drm/nv50: fix iommu errors caused by device reading from address 0
[safe/jmp/linux-2.6] / drivers / gpu / drm / nouveau / nouveau_sgdma.c
1 #include "drmP.h"
2 #include "nouveau_drv.h"
3 #include <linux/pagemap.h>
4 #include <linux/slab.h>
5
6 #define NV_CTXDMA_PAGE_SHIFT 12
7 #define NV_CTXDMA_PAGE_SIZE  (1 << NV_CTXDMA_PAGE_SHIFT)
8 #define NV_CTXDMA_PAGE_MASK  (NV_CTXDMA_PAGE_SIZE - 1)
9
10 struct nouveau_sgdma_be {
11         struct ttm_backend backend;
12         struct drm_device *dev;
13
14         dma_addr_t *pages;
15         unsigned nr_pages;
16
17         unsigned pte_start;
18         bool bound;
19 };
20
21 static int
22 nouveau_sgdma_populate(struct ttm_backend *be, unsigned long num_pages,
23                        struct page **pages, struct page *dummy_read_page)
24 {
25         struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
26         struct drm_device *dev = nvbe->dev;
27
28         NV_DEBUG(nvbe->dev, "num_pages = %ld\n", num_pages);
29
30         if (nvbe->pages)
31                 return -EINVAL;
32
33         nvbe->pages = kmalloc(sizeof(dma_addr_t) * num_pages, GFP_KERNEL);
34         if (!nvbe->pages)
35                 return -ENOMEM;
36
37         nvbe->nr_pages = 0;
38         while (num_pages--) {
39                 nvbe->pages[nvbe->nr_pages] =
40                         pci_map_page(dev->pdev, pages[nvbe->nr_pages], 0,
41                                      PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
42                 if (pci_dma_mapping_error(dev->pdev,
43                                           nvbe->pages[nvbe->nr_pages])) {
44                         be->func->clear(be);
45                         return -EFAULT;
46                 }
47
48                 nvbe->nr_pages++;
49         }
50
51         return 0;
52 }
53
54 static void
55 nouveau_sgdma_clear(struct ttm_backend *be)
56 {
57         struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
58         struct drm_device *dev;
59
60         if (nvbe && nvbe->pages) {
61                 dev = nvbe->dev;
62                 NV_DEBUG(dev, "\n");
63
64                 if (nvbe->bound)
65                         be->func->unbind(be);
66
67                 while (nvbe->nr_pages--) {
68                         pci_unmap_page(dev->pdev, nvbe->pages[nvbe->nr_pages],
69                                        PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
70                 }
71                 kfree(nvbe->pages);
72                 nvbe->pages = NULL;
73                 nvbe->nr_pages = 0;
74         }
75 }
76
77 static inline unsigned
78 nouveau_sgdma_pte(struct drm_device *dev, uint64_t offset)
79 {
80         struct drm_nouveau_private *dev_priv = dev->dev_private;
81         unsigned pte = (offset >> NV_CTXDMA_PAGE_SHIFT);
82
83         if (dev_priv->card_type < NV_50)
84                 return pte + 2;
85
86         return pte << 1;
87 }
88
89 static int
90 nouveau_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
91 {
92         struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
93         struct drm_device *dev = nvbe->dev;
94         struct drm_nouveau_private *dev_priv = dev->dev_private;
95         struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
96         unsigned i, j, pte;
97
98         NV_DEBUG(dev, "pg=0x%lx\n", mem->mm_node->start);
99
100         dev_priv->engine.instmem.prepare_access(nvbe->dev, true);
101         pte = nouveau_sgdma_pte(nvbe->dev, mem->mm_node->start << PAGE_SHIFT);
102         nvbe->pte_start = pte;
103         for (i = 0; i < nvbe->nr_pages; i++) {
104                 dma_addr_t dma_offset = nvbe->pages[i];
105                 uint32_t offset_l = lower_32_bits(dma_offset);
106                 uint32_t offset_h = upper_32_bits(dma_offset);
107
108                 for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++) {
109                         if (dev_priv->card_type < NV_50)
110                                 nv_wo32(dev, gpuobj, pte++, offset_l | 3);
111                         else {
112                                 nv_wo32(dev, gpuobj, pte++, offset_l | 0x21);
113                                 nv_wo32(dev, gpuobj, pte++, offset_h & 0xff);
114                         }
115
116                         dma_offset += NV_CTXDMA_PAGE_SIZE;
117                 }
118         }
119         dev_priv->engine.instmem.finish_access(nvbe->dev);
120
121         if (dev_priv->card_type == NV_50) {
122                 nv_wr32(dev, 0x100c80, 0x00050001);
123                 if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
124                         NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n");
125                         NV_ERROR(dev, "0x100c80 = 0x%08x\n",
126                                                 nv_rd32(dev, 0x100c80));
127                         return -EBUSY;
128                 }
129
130                 nv_wr32(dev, 0x100c80, 0x00000001);
131                 if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
132                         NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n");
133                         NV_ERROR(dev, "0x100c80 = 0x%08x\n",
134                                                 nv_rd32(dev, 0x100c80));
135                         return -EBUSY;
136                 }
137         }
138
139         nvbe->bound = true;
140         return 0;
141 }
142
143 static int
144 nouveau_sgdma_unbind(struct ttm_backend *be)
145 {
146         struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
147         struct drm_device *dev = nvbe->dev;
148         struct drm_nouveau_private *dev_priv = dev->dev_private;
149         struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
150         unsigned i, j, pte;
151
152         NV_DEBUG(dev, "\n");
153
154         if (!nvbe->bound)
155                 return 0;
156
157         dev_priv->engine.instmem.prepare_access(nvbe->dev, true);
158         pte = nvbe->pte_start;
159         for (i = 0; i < nvbe->nr_pages; i++) {
160                 dma_addr_t dma_offset = dev_priv->gart_info.sg_dummy_bus;
161
162                 for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++) {
163                         if (dev_priv->card_type < NV_50)
164                                 nv_wo32(dev, gpuobj, pte++, dma_offset | 3);
165                         else {
166                                 nv_wo32(dev, gpuobj, pte++, dma_offset | 0x21);
167                                 nv_wo32(dev, gpuobj, pte++, 0x00000000);
168                         }
169
170                         dma_offset += NV_CTXDMA_PAGE_SIZE;
171                 }
172         }
173         dev_priv->engine.instmem.finish_access(nvbe->dev);
174
175         if (dev_priv->card_type == NV_50) {
176                 nv_wr32(dev, 0x100c80, 0x00050001);
177                 if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
178                         NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n");
179                         NV_ERROR(dev, "0x100c80 = 0x%08x\n",
180                                                 nv_rd32(dev, 0x100c80));
181                         return -EBUSY;
182                 }
183
184                 nv_wr32(dev, 0x100c80, 0x00000001);
185                 if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
186                         NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n");
187                         NV_ERROR(dev, "0x100c80 = 0x%08x\n",
188                                                 nv_rd32(dev, 0x100c80));
189                         return -EBUSY;
190                 }
191         }
192
193         nvbe->bound = false;
194         return 0;
195 }
196
197 static void
198 nouveau_sgdma_destroy(struct ttm_backend *be)
199 {
200         struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
201
202         if (be) {
203                 NV_DEBUG(nvbe->dev, "\n");
204
205                 if (nvbe) {
206                         if (nvbe->pages)
207                                 be->func->clear(be);
208                         kfree(nvbe);
209                 }
210         }
211 }
212
213 static struct ttm_backend_func nouveau_sgdma_backend = {
214         .populate               = nouveau_sgdma_populate,
215         .clear                  = nouveau_sgdma_clear,
216         .bind                   = nouveau_sgdma_bind,
217         .unbind                 = nouveau_sgdma_unbind,
218         .destroy                = nouveau_sgdma_destroy
219 };
220
221 struct ttm_backend *
222 nouveau_sgdma_init_ttm(struct drm_device *dev)
223 {
224         struct drm_nouveau_private *dev_priv = dev->dev_private;
225         struct nouveau_sgdma_be *nvbe;
226
227         if (!dev_priv->gart_info.sg_ctxdma)
228                 return NULL;
229
230         nvbe = kzalloc(sizeof(*nvbe), GFP_KERNEL);
231         if (!nvbe)
232                 return NULL;
233
234         nvbe->dev = dev;
235
236         nvbe->backend.func      = &nouveau_sgdma_backend;
237
238         return &nvbe->backend;
239 }
240
241 int
242 nouveau_sgdma_init(struct drm_device *dev)
243 {
244         struct drm_nouveau_private *dev_priv = dev->dev_private;
245         struct nouveau_gpuobj *gpuobj = NULL;
246         uint32_t aper_size, obj_size;
247         int i, ret;
248
249         if (dev_priv->card_type < NV_50) {
250                 aper_size = (64 * 1024 * 1024);
251                 obj_size  = (aper_size >> NV_CTXDMA_PAGE_SHIFT) * 4;
252                 obj_size += 8; /* ctxdma header */
253         } else {
254                 /* 1 entire VM page table */
255                 aper_size = (512 * 1024 * 1024);
256                 obj_size  = (aper_size >> NV_CTXDMA_PAGE_SHIFT) * 8;
257         }
258
259         ret = nouveau_gpuobj_new(dev, NULL, obj_size, 16,
260                                       NVOBJ_FLAG_ALLOW_NO_REFS |
261                                       NVOBJ_FLAG_ZERO_ALLOC |
262                                       NVOBJ_FLAG_ZERO_FREE, &gpuobj);
263         if (ret) {
264                 NV_ERROR(dev, "Error creating sgdma object: %d\n", ret);
265                 return ret;
266         }
267
268         dev_priv->gart_info.sg_dummy_page =
269                 alloc_page(GFP_KERNEL|__GFP_DMA32);
270         set_bit(PG_locked, &dev_priv->gart_info.sg_dummy_page->flags);
271         dev_priv->gart_info.sg_dummy_bus =
272                 pci_map_page(dev->pdev, dev_priv->gart_info.sg_dummy_page, 0,
273                              PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
274
275         dev_priv->engine.instmem.prepare_access(dev, true);
276         if (dev_priv->card_type < NV_50) {
277                 /* Maybe use NV_DMA_TARGET_AGP for PCIE? NVIDIA do this, and
278                  * confirmed to work on c51.  Perhaps means NV_DMA_TARGET_PCIE
279                  * on those cards? */
280                 nv_wo32(dev, gpuobj, 0, NV_CLASS_DMA_IN_MEMORY |
281                                        (1 << 12) /* PT present */ |
282                                        (0 << 13) /* PT *not* linear */ |
283                                        (NV_DMA_ACCESS_RW  << 14) |
284                                        (NV_DMA_TARGET_PCI << 16));
285                 nv_wo32(dev, gpuobj, 1, aper_size - 1);
286                 for (i = 2; i < 2 + (aper_size >> 12); i++) {
287                         nv_wo32(dev, gpuobj, i,
288                                     dev_priv->gart_info.sg_dummy_bus | 3);
289                 }
290         } else {
291                 for (i = 0; i < obj_size; i += 8) {
292                         nv_wo32(dev, gpuobj, (i+0)/4,
293                                     dev_priv->gart_info.sg_dummy_bus | 0x21);
294                         nv_wo32(dev, gpuobj, (i+4)/4, 0);
295                 }
296         }
297         dev_priv->engine.instmem.finish_access(dev);
298
299         dev_priv->gart_info.type      = NOUVEAU_GART_SGDMA;
300         dev_priv->gart_info.aper_base = 0;
301         dev_priv->gart_info.aper_size = aper_size;
302         dev_priv->gart_info.sg_ctxdma = gpuobj;
303         return 0;
304 }
305
306 void
307 nouveau_sgdma_takedown(struct drm_device *dev)
308 {
309         struct drm_nouveau_private *dev_priv = dev->dev_private;
310
311         if (dev_priv->gart_info.sg_dummy_page) {
312                 pci_unmap_page(dev->pdev, dev_priv->gart_info.sg_dummy_bus,
313                                NV_CTXDMA_PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
314                 unlock_page(dev_priv->gart_info.sg_dummy_page);
315                 __free_page(dev_priv->gart_info.sg_dummy_page);
316                 dev_priv->gart_info.sg_dummy_page = NULL;
317                 dev_priv->gart_info.sg_dummy_bus = 0;
318         }
319
320         nouveau_gpuobj_del(dev, &dev_priv->gart_info.sg_ctxdma);
321 }
322
323 int
324 nouveau_sgdma_get_page(struct drm_device *dev, uint32_t offset, uint32_t *page)
325 {
326         struct drm_nouveau_private *dev_priv = dev->dev_private;
327         struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
328         struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
329         int pte;
330
331         pte = (offset >> NV_CTXDMA_PAGE_SHIFT);
332         if (dev_priv->card_type < NV_50) {
333                 instmem->prepare_access(dev, false);
334                 *page = nv_ro32(dev, gpuobj, (pte + 2)) & ~NV_CTXDMA_PAGE_MASK;
335                 instmem->finish_access(dev);
336                 return 0;
337         }
338
339         NV_ERROR(dev, "Unimplemented on NV50\n");
340         return -EINVAL;
341 }