tree-wide: fix assorted typos all over the place
[safe/jmp/linux-2.6] / drivers / gpu / drm / ttm / ttm_bo_util.c
1 /**************************************************************************
2  *
3  * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 /*
28  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29  */
30
31 #include "ttm/ttm_bo_driver.h"
32 #include "ttm/ttm_placement.h"
33 #include <linux/io.h>
34 #include <linux/highmem.h>
35 #include <linux/wait.h>
36 #include <linux/vmalloc.h>
37 #include <linux/module.h>
38
39 void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
40 {
41         struct ttm_mem_reg *old_mem = &bo->mem;
42
43         if (old_mem->mm_node) {
44                 spin_lock(&bo->glob->lru_lock);
45                 drm_mm_put_block(old_mem->mm_node);
46                 spin_unlock(&bo->glob->lru_lock);
47         }
48         old_mem->mm_node = NULL;
49 }
50
51 int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
52                     bool evict, bool no_wait, struct ttm_mem_reg *new_mem)
53 {
54         struct ttm_tt *ttm = bo->ttm;
55         struct ttm_mem_reg *old_mem = &bo->mem;
56         uint32_t save_flags = old_mem->placement;
57         int ret;
58
59         if (old_mem->mem_type != TTM_PL_SYSTEM) {
60                 ttm_tt_unbind(ttm);
61                 ttm_bo_free_old_node(bo);
62                 ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM,
63                                 TTM_PL_MASK_MEM);
64                 old_mem->mem_type = TTM_PL_SYSTEM;
65                 save_flags = old_mem->placement;
66         }
67
68         ret = ttm_tt_set_placement_caching(ttm, new_mem->placement);
69         if (unlikely(ret != 0))
70                 return ret;
71
72         if (new_mem->mem_type != TTM_PL_SYSTEM) {
73                 ret = ttm_tt_bind(ttm, new_mem);
74                 if (unlikely(ret != 0))
75                         return ret;
76         }
77
78         *old_mem = *new_mem;
79         new_mem->mm_node = NULL;
80         ttm_flag_masked(&save_flags, new_mem->placement, TTM_PL_MASK_MEMTYPE);
81         return 0;
82 }
83 EXPORT_SYMBOL(ttm_bo_move_ttm);
84
85 int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
86                         void **virtual)
87 {
88         struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
89         unsigned long bus_offset;
90         unsigned long bus_size;
91         unsigned long bus_base;
92         int ret;
93         void *addr;
94
95         *virtual = NULL;
96         ret = ttm_bo_pci_offset(bdev, mem, &bus_base, &bus_offset, &bus_size);
97         if (ret || bus_size == 0)
98                 return ret;
99
100         if (!(man->flags & TTM_MEMTYPE_FLAG_NEEDS_IOREMAP))
101                 addr = (void *)(((u8 *) man->io_addr) + bus_offset);
102         else {
103                 if (mem->placement & TTM_PL_FLAG_WC)
104                         addr = ioremap_wc(bus_base + bus_offset, bus_size);
105                 else
106                         addr = ioremap_nocache(bus_base + bus_offset, bus_size);
107                 if (!addr)
108                         return -ENOMEM;
109         }
110         *virtual = addr;
111         return 0;
112 }
113
114 void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
115                          void *virtual)
116 {
117         struct ttm_mem_type_manager *man;
118
119         man = &bdev->man[mem->mem_type];
120
121         if (virtual && (man->flags & TTM_MEMTYPE_FLAG_NEEDS_IOREMAP))
122                 iounmap(virtual);
123 }
124
125 static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
126 {
127         uint32_t *dstP =
128             (uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT));
129         uint32_t *srcP =
130             (uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT));
131
132         int i;
133         for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i)
134                 iowrite32(ioread32(srcP++), dstP++);
135         return 0;
136 }
137
138 static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
139                                 unsigned long page,
140                                 pgprot_t prot)
141 {
142         struct page *d = ttm_tt_get_page(ttm, page);
143         void *dst;
144
145         if (!d)
146                 return -ENOMEM;
147
148         src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
149
150 #ifdef CONFIG_X86
151         dst = kmap_atomic_prot(d, KM_USER0, prot);
152 #else
153         if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
154                 dst = vmap(&d, 1, 0, prot);
155         else
156                 dst = kmap(d);
157 #endif
158         if (!dst)
159                 return -ENOMEM;
160
161         memcpy_fromio(dst, src, PAGE_SIZE);
162
163 #ifdef CONFIG_X86
164         kunmap_atomic(dst, KM_USER0);
165 #else
166         if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
167                 vunmap(dst);
168         else
169                 kunmap(d);
170 #endif
171
172         return 0;
173 }
174
175 static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
176                                 unsigned long page,
177                                 pgprot_t prot)
178 {
179         struct page *s = ttm_tt_get_page(ttm, page);
180         void *src;
181
182         if (!s)
183                 return -ENOMEM;
184
185         dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
186 #ifdef CONFIG_X86
187         src = kmap_atomic_prot(s, KM_USER0, prot);
188 #else
189         if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
190                 src = vmap(&s, 1, 0, prot);
191         else
192                 src = kmap(s);
193 #endif
194         if (!src)
195                 return -ENOMEM;
196
197         memcpy_toio(dst, src, PAGE_SIZE);
198
199 #ifdef CONFIG_X86
200         kunmap_atomic(src, KM_USER0);
201 #else
202         if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
203                 vunmap(src);
204         else
205                 kunmap(s);
206 #endif
207
208         return 0;
209 }
210
211 int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
212                        bool evict, bool no_wait, struct ttm_mem_reg *new_mem)
213 {
214         struct ttm_bo_device *bdev = bo->bdev;
215         struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
216         struct ttm_tt *ttm = bo->ttm;
217         struct ttm_mem_reg *old_mem = &bo->mem;
218         struct ttm_mem_reg old_copy = *old_mem;
219         void *old_iomap;
220         void *new_iomap;
221         int ret;
222         uint32_t save_flags = old_mem->placement;
223         unsigned long i;
224         unsigned long page;
225         unsigned long add = 0;
226         int dir;
227
228         ret = ttm_mem_reg_ioremap(bdev, old_mem, &old_iomap);
229         if (ret)
230                 return ret;
231         ret = ttm_mem_reg_ioremap(bdev, new_mem, &new_iomap);
232         if (ret)
233                 goto out;
234
235         if (old_iomap == NULL && new_iomap == NULL)
236                 goto out2;
237         if (old_iomap == NULL && ttm == NULL)
238                 goto out2;
239
240         add = 0;
241         dir = 1;
242
243         if ((old_mem->mem_type == new_mem->mem_type) &&
244             (new_mem->mm_node->start <
245              old_mem->mm_node->start + old_mem->mm_node->size)) {
246                 dir = -1;
247                 add = new_mem->num_pages - 1;
248         }
249
250         for (i = 0; i < new_mem->num_pages; ++i) {
251                 page = i * dir + add;
252                 if (old_iomap == NULL) {
253                         pgprot_t prot = ttm_io_prot(old_mem->placement,
254                                                     PAGE_KERNEL);
255                         ret = ttm_copy_ttm_io_page(ttm, new_iomap, page,
256                                                    prot);
257                 } else if (new_iomap == NULL) {
258                         pgprot_t prot = ttm_io_prot(new_mem->placement,
259                                                     PAGE_KERNEL);
260                         ret = ttm_copy_io_ttm_page(ttm, old_iomap, page,
261                                                    prot);
262                 } else
263                         ret = ttm_copy_io_page(new_iomap, old_iomap, page);
264                 if (ret)
265                         goto out1;
266         }
267         mb();
268 out2:
269         ttm_bo_free_old_node(bo);
270
271         *old_mem = *new_mem;
272         new_mem->mm_node = NULL;
273         ttm_flag_masked(&save_flags, new_mem->placement, TTM_PL_MASK_MEMTYPE);
274
275         if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && (ttm != NULL)) {
276                 ttm_tt_unbind(ttm);
277                 ttm_tt_destroy(ttm);
278                 bo->ttm = NULL;
279         }
280
281 out1:
282         ttm_mem_reg_iounmap(bdev, new_mem, new_iomap);
283 out:
284         ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap);
285         return ret;
286 }
287 EXPORT_SYMBOL(ttm_bo_move_memcpy);
288
289 static void ttm_transfered_destroy(struct ttm_buffer_object *bo)
290 {
291         kfree(bo);
292 }
293
294 /**
295  * ttm_buffer_object_transfer
296  *
297  * @bo: A pointer to a struct ttm_buffer_object.
298  * @new_obj: A pointer to a pointer to a newly created ttm_buffer_object,
299  * holding the data of @bo with the old placement.
300  *
301  * This is a utility function that may be called after an accelerated move
302  * has been scheduled. A new buffer object is created as a placeholder for
303  * the old data while it's being copied. When that buffer object is idle,
304  * it can be destroyed, releasing the space of the old placement.
305  * Returns:
306  * !0: Failure.
307  */
308
309 static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
310                                       struct ttm_buffer_object **new_obj)
311 {
312         struct ttm_buffer_object *fbo;
313         struct ttm_bo_device *bdev = bo->bdev;
314         struct ttm_bo_driver *driver = bdev->driver;
315
316         fbo = kzalloc(sizeof(*fbo), GFP_KERNEL);
317         if (!fbo)
318                 return -ENOMEM;
319
320         *fbo = *bo;
321
322         /**
323          * Fix up members that we shouldn't copy directly:
324          * TODO: Explicit member copy would probably be better here.
325          */
326
327         spin_lock_init(&fbo->lock);
328         init_waitqueue_head(&fbo->event_queue);
329         INIT_LIST_HEAD(&fbo->ddestroy);
330         INIT_LIST_HEAD(&fbo->lru);
331         INIT_LIST_HEAD(&fbo->swap);
332         fbo->vm_node = NULL;
333
334         fbo->sync_obj = driver->sync_obj_ref(bo->sync_obj);
335         if (fbo->mem.mm_node)
336                 fbo->mem.mm_node->private = (void *)fbo;
337         kref_init(&fbo->list_kref);
338         kref_init(&fbo->kref);
339         fbo->destroy = &ttm_transfered_destroy;
340
341         *new_obj = fbo;
342         return 0;
343 }
344
345 pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
346 {
347 #if defined(__i386__) || defined(__x86_64__)
348         if (caching_flags & TTM_PL_FLAG_WC)
349                 tmp = pgprot_writecombine(tmp);
350         else if (boot_cpu_data.x86 > 3)
351                 tmp = pgprot_noncached(tmp);
352
353 #elif defined(__powerpc__)
354         if (!(caching_flags & TTM_PL_FLAG_CACHED)) {
355                 pgprot_val(tmp) |= _PAGE_NO_CACHE;
356                 if (caching_flags & TTM_PL_FLAG_UNCACHED)
357                         pgprot_val(tmp) |= _PAGE_GUARDED;
358         }
359 #endif
360 #if defined(__ia64__)
361         if (caching_flags & TTM_PL_FLAG_WC)
362                 tmp = pgprot_writecombine(tmp);
363         else
364                 tmp = pgprot_noncached(tmp);
365 #endif
366 #if defined(__sparc__)
367         if (!(caching_flags & TTM_PL_FLAG_CACHED))
368                 tmp = pgprot_noncached(tmp);
369 #endif
370         return tmp;
371 }
372
373 static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
374                           unsigned long bus_base,
375                           unsigned long bus_offset,
376                           unsigned long bus_size,
377                           struct ttm_bo_kmap_obj *map)
378 {
379         struct ttm_bo_device *bdev = bo->bdev;
380         struct ttm_mem_reg *mem = &bo->mem;
381         struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
382
383         if (!(man->flags & TTM_MEMTYPE_FLAG_NEEDS_IOREMAP)) {
384                 map->bo_kmap_type = ttm_bo_map_premapped;
385                 map->virtual = (void *)(((u8 *) man->io_addr) + bus_offset);
386         } else {
387                 map->bo_kmap_type = ttm_bo_map_iomap;
388                 if (mem->placement & TTM_PL_FLAG_WC)
389                         map->virtual = ioremap_wc(bus_base + bus_offset,
390                                                   bus_size);
391                 else
392                         map->virtual = ioremap_nocache(bus_base + bus_offset,
393                                                        bus_size);
394         }
395         return (!map->virtual) ? -ENOMEM : 0;
396 }
397
398 static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
399                            unsigned long start_page,
400                            unsigned long num_pages,
401                            struct ttm_bo_kmap_obj *map)
402 {
403         struct ttm_mem_reg *mem = &bo->mem; pgprot_t prot;
404         struct ttm_tt *ttm = bo->ttm;
405         struct page *d;
406         int i;
407
408         BUG_ON(!ttm);
409         if (num_pages == 1 && (mem->placement & TTM_PL_FLAG_CACHED)) {
410                 /*
411                  * We're mapping a single page, and the desired
412                  * page protection is consistent with the bo.
413                  */
414
415                 map->bo_kmap_type = ttm_bo_map_kmap;
416                 map->page = ttm_tt_get_page(ttm, start_page);
417                 map->virtual = kmap(map->page);
418         } else {
419             /*
420              * Populate the part we're mapping;
421              */
422                 for (i = start_page; i < start_page + num_pages; ++i) {
423                         d = ttm_tt_get_page(ttm, i);
424                         if (!d)
425                                 return -ENOMEM;
426                 }
427
428                 /*
429                  * We need to use vmap to get the desired page protection
430                  * or to make the buffer object look contiguous.
431                  */
432                 prot = (mem->placement & TTM_PL_FLAG_CACHED) ?
433                         PAGE_KERNEL :
434                         ttm_io_prot(mem->placement, PAGE_KERNEL);
435                 map->bo_kmap_type = ttm_bo_map_vmap;
436                 map->virtual = vmap(ttm->pages + start_page, num_pages,
437                                     0, prot);
438         }
439         return (!map->virtual) ? -ENOMEM : 0;
440 }
441
442 int ttm_bo_kmap(struct ttm_buffer_object *bo,
443                 unsigned long start_page, unsigned long num_pages,
444                 struct ttm_bo_kmap_obj *map)
445 {
446         int ret;
447         unsigned long bus_base;
448         unsigned long bus_offset;
449         unsigned long bus_size;
450
451         BUG_ON(!list_empty(&bo->swap));
452         map->virtual = NULL;
453         if (num_pages > bo->num_pages)
454                 return -EINVAL;
455         if (start_page > bo->num_pages)
456                 return -EINVAL;
457 #if 0
458         if (num_pages > 1 && !DRM_SUSER(DRM_CURPROC))
459                 return -EPERM;
460 #endif
461         ret = ttm_bo_pci_offset(bo->bdev, &bo->mem, &bus_base,
462                                 &bus_offset, &bus_size);
463         if (ret)
464                 return ret;
465         if (bus_size == 0) {
466                 return ttm_bo_kmap_ttm(bo, start_page, num_pages, map);
467         } else {
468                 bus_offset += start_page << PAGE_SHIFT;
469                 bus_size = num_pages << PAGE_SHIFT;
470                 return ttm_bo_ioremap(bo, bus_base, bus_offset, bus_size, map);
471         }
472 }
473 EXPORT_SYMBOL(ttm_bo_kmap);
474
475 void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
476 {
477         if (!map->virtual)
478                 return;
479         switch (map->bo_kmap_type) {
480         case ttm_bo_map_iomap:
481                 iounmap(map->virtual);
482                 break;
483         case ttm_bo_map_vmap:
484                 vunmap(map->virtual);
485                 break;
486         case ttm_bo_map_kmap:
487                 kunmap(map->page);
488                 break;
489         case ttm_bo_map_premapped:
490                 break;
491         default:
492                 BUG();
493         }
494         map->virtual = NULL;
495         map->page = NULL;
496 }
497 EXPORT_SYMBOL(ttm_bo_kunmap);
498
499 int ttm_bo_pfn_prot(struct ttm_buffer_object *bo,
500                     unsigned long dst_offset,
501                     unsigned long *pfn, pgprot_t *prot)
502 {
503         struct ttm_mem_reg *mem = &bo->mem;
504         struct ttm_bo_device *bdev = bo->bdev;
505         unsigned long bus_offset;
506         unsigned long bus_size;
507         unsigned long bus_base;
508         int ret;
509         ret = ttm_bo_pci_offset(bdev, mem, &bus_base, &bus_offset,
510                         &bus_size);
511         if (ret)
512                 return -EINVAL;
513         if (bus_size != 0)
514                 *pfn = (bus_base + bus_offset + dst_offset) >> PAGE_SHIFT;
515         else
516                 if (!bo->ttm)
517                         return -EINVAL;
518                 else
519                         *pfn = page_to_pfn(ttm_tt_get_page(bo->ttm,
520                                                            dst_offset >>
521                                                            PAGE_SHIFT));
522         *prot = (mem->placement & TTM_PL_FLAG_CACHED) ?
523                 PAGE_KERNEL : ttm_io_prot(mem->placement, PAGE_KERNEL);
524
525         return 0;
526 }
527
528 int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
529                               void *sync_obj,
530                               void *sync_obj_arg,
531                               bool evict, bool no_wait,
532                               struct ttm_mem_reg *new_mem)
533 {
534         struct ttm_bo_device *bdev = bo->bdev;
535         struct ttm_bo_driver *driver = bdev->driver;
536         struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
537         struct ttm_mem_reg *old_mem = &bo->mem;
538         int ret;
539         uint32_t save_flags = old_mem->placement;
540         struct ttm_buffer_object *ghost_obj;
541         void *tmp_obj = NULL;
542
543         spin_lock(&bo->lock);
544         if (bo->sync_obj) {
545                 tmp_obj = bo->sync_obj;
546                 bo->sync_obj = NULL;
547         }
548         bo->sync_obj = driver->sync_obj_ref(sync_obj);
549         bo->sync_obj_arg = sync_obj_arg;
550         if (evict) {
551                 ret = ttm_bo_wait(bo, false, false, false);
552                 spin_unlock(&bo->lock);
553                 if (tmp_obj)
554                         driver->sync_obj_unref(&tmp_obj);
555                 if (ret)
556                         return ret;
557
558                 ttm_bo_free_old_node(bo);
559                 if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
560                     (bo->ttm != NULL)) {
561                         ttm_tt_unbind(bo->ttm);
562                         ttm_tt_destroy(bo->ttm);
563                         bo->ttm = NULL;
564                 }
565         } else {
566                 /**
567                  * This should help pipeline ordinary buffer moves.
568                  *
569                  * Hang old buffer memory on a new buffer object,
570                  * and leave it to be released when the GPU
571                  * operation has completed.
572                  */
573
574                 set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
575                 spin_unlock(&bo->lock);
576                 if (tmp_obj)
577                         driver->sync_obj_unref(&tmp_obj);
578
579                 ret = ttm_buffer_object_transfer(bo, &ghost_obj);
580                 if (ret)
581                         return ret;
582
583                 /**
584                  * If we're not moving to fixed memory, the TTM object
585                  * needs to stay alive. Otherwhise hang it on the ghost
586                  * bo to be unbound and destroyed.
587                  */
588
589                 if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED))
590                         ghost_obj->ttm = NULL;
591                 else
592                         bo->ttm = NULL;
593
594                 ttm_bo_unreserve(ghost_obj);
595                 ttm_bo_unref(&ghost_obj);
596         }
597
598         *old_mem = *new_mem;
599         new_mem->mm_node = NULL;
600         ttm_flag_masked(&save_flags, new_mem->placement, TTM_PL_MASK_MEMTYPE);
601         return 0;
602 }
603 EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);