5897bd00591e03776e07bb4761a861be660d8b08
[safe/jmp/linux-2.6] / drivers / gpu / drm / radeon / r300.c
1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 #include <linux/seq_file.h>
29 #include "drmP.h"
30 #include "drm.h"
31 #include "radeon_reg.h"
32 #include "radeon.h"
33 #include "radeon_drm.h"
34 #include "r100_track.h"
35 #include "r300d.h"
36 #include "rv350d.h"
37 #include "r300_reg_safe.h"
38
39 /* This files gather functions specifics to: r300,r350,rv350,rv370,rv380
40  *
41  * GPU Errata:
42  * - HOST_PATH_CNTL: r300 family seems to dislike write to HOST_PATH_CNTL
43  *   using MMIO to flush host path read cache, this lead to HARDLOCKUP.
44  *   However, scheduling such write to the ring seems harmless, i suspect
45  *   the CP read collide with the flush somehow, or maybe the MC, hard to
46  *   tell. (Jerome Glisse)
47  */
48
49 /*
50  * rv370,rv380 PCIE GART
51  */
52 static int rv370_debugfs_pcie_gart_info_init(struct radeon_device *rdev);
53
54 void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev)
55 {
56         uint32_t tmp;
57         int i;
58
59         /* Workaround HW bug do flush 2 times */
60         for (i = 0; i < 2; i++) {
61                 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
62                 WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp | RADEON_PCIE_TX_GART_INVALIDATE_TLB);
63                 (void)RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
64                 WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp);
65         }
66         mb();
67 }
68
69 int rv370_pcie_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
70 {
71         void __iomem *ptr = (void *)rdev->gart.table.vram.ptr;
72
73         if (i < 0 || i > rdev->gart.num_gpu_pages) {
74                 return -EINVAL;
75         }
76         addr = (lower_32_bits(addr) >> 8) |
77                ((upper_32_bits(addr) & 0xff) << 24) |
78                0xc;
79         /* on x86 we want this to be CPU endian, on powerpc
80          * on powerpc without HW swappers, it'll get swapped on way
81          * into VRAM - so no need for cpu_to_le32 on VRAM tables */
82         writel(addr, ((void __iomem *)ptr) + (i * 4));
83         return 0;
84 }
85
86 int rv370_pcie_gart_init(struct radeon_device *rdev)
87 {
88         int r;
89
90         if (rdev->gart.table.vram.robj) {
91                 WARN(1, "RV370 PCIE GART already initialized.\n");
92                 return 0;
93         }
94         /* Initialize common gart structure */
95         r = radeon_gart_init(rdev);
96         if (r)
97                 return r;
98         r = rv370_debugfs_pcie_gart_info_init(rdev);
99         if (r)
100                 DRM_ERROR("Failed to register debugfs file for PCIE gart !\n");
101         rdev->gart.table_size = rdev->gart.num_gpu_pages * 4;
102         rdev->asic->gart_tlb_flush = &rv370_pcie_gart_tlb_flush;
103         rdev->asic->gart_set_page = &rv370_pcie_gart_set_page;
104         return radeon_gart_table_vram_alloc(rdev);
105 }
106
107 int rv370_pcie_gart_enable(struct radeon_device *rdev)
108 {
109         uint32_t table_addr;
110         uint32_t tmp;
111         int r;
112
113         if (rdev->gart.table.vram.robj == NULL) {
114                 dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
115                 return -EINVAL;
116         }
117         r = radeon_gart_table_vram_pin(rdev);
118         if (r)
119                 return r;
120         /* discard memory request outside of configured range */
121         tmp = RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD;
122         WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp);
123         WREG32_PCIE(RADEON_PCIE_TX_GART_START_LO, rdev->mc.gtt_location);
124         tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - RADEON_GPU_PAGE_SIZE;
125         WREG32_PCIE(RADEON_PCIE_TX_GART_END_LO, tmp);
126         WREG32_PCIE(RADEON_PCIE_TX_GART_START_HI, 0);
127         WREG32_PCIE(RADEON_PCIE_TX_GART_END_HI, 0);
128         table_addr = rdev->gart.table_addr;
129         WREG32_PCIE(RADEON_PCIE_TX_GART_BASE, table_addr);
130         /* FIXME: setup default page */
131         WREG32_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_LO, rdev->mc.vram_location);
132         WREG32_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_HI, 0);
133         /* Clear error */
134         WREG32_PCIE(0x18, 0);
135         tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
136         tmp |= RADEON_PCIE_TX_GART_EN;
137         tmp |= RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD;
138         WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp);
139         rv370_pcie_gart_tlb_flush(rdev);
140         DRM_INFO("PCIE GART of %uM enabled (table at 0x%08X).\n",
141                  (unsigned)(rdev->mc.gtt_size >> 20), table_addr);
142         rdev->gart.ready = true;
143         return 0;
144 }
145
146 void rv370_pcie_gart_disable(struct radeon_device *rdev)
147 {
148         u32 tmp;
149         int r;
150
151         tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
152         tmp |= RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD;
153         WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp & ~RADEON_PCIE_TX_GART_EN);
154         if (rdev->gart.table.vram.robj) {
155                 r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
156                 if (likely(r == 0)) {
157                         radeon_bo_kunmap(rdev->gart.table.vram.robj);
158                         radeon_bo_unpin(rdev->gart.table.vram.robj);
159                         radeon_bo_unreserve(rdev->gart.table.vram.robj);
160                 }
161         }
162 }
163
164 void rv370_pcie_gart_fini(struct radeon_device *rdev)
165 {
166         rv370_pcie_gart_disable(rdev);
167         radeon_gart_table_vram_free(rdev);
168         radeon_gart_fini(rdev);
169 }
170
171 void r300_fence_ring_emit(struct radeon_device *rdev,
172                           struct radeon_fence *fence)
173 {
174         /* Who ever call radeon_fence_emit should call ring_lock and ask
175          * for enough space (today caller are ib schedule and buffer move) */
176         /* Write SC register so SC & US assert idle */
177         radeon_ring_write(rdev, PACKET0(R300_RE_SCISSORS_TL, 0));
178         radeon_ring_write(rdev, 0);
179         radeon_ring_write(rdev, PACKET0(R300_RE_SCISSORS_BR, 0));
180         radeon_ring_write(rdev, 0);
181         /* Flush 3D cache */
182         radeon_ring_write(rdev, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
183         radeon_ring_write(rdev, R300_RB3D_DC_FLUSH);
184         radeon_ring_write(rdev, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0));
185         radeon_ring_write(rdev, R300_ZC_FLUSH);
186         /* Wait until IDLE & CLEAN */
187         radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
188         radeon_ring_write(rdev, (RADEON_WAIT_3D_IDLECLEAN |
189                                  RADEON_WAIT_2D_IDLECLEAN |
190                                  RADEON_WAIT_DMA_GUI_IDLE));
191         radeon_ring_write(rdev, PACKET0(RADEON_HOST_PATH_CNTL, 0));
192         radeon_ring_write(rdev, rdev->config.r300.hdp_cntl |
193                                 RADEON_HDP_READ_BUFFER_INVALIDATE);
194         radeon_ring_write(rdev, PACKET0(RADEON_HOST_PATH_CNTL, 0));
195         radeon_ring_write(rdev, rdev->config.r300.hdp_cntl);
196         /* Emit fence sequence & fire IRQ */
197         radeon_ring_write(rdev, PACKET0(rdev->fence_drv.scratch_reg, 0));
198         radeon_ring_write(rdev, fence->seq);
199         radeon_ring_write(rdev, PACKET0(RADEON_GEN_INT_STATUS, 0));
200         radeon_ring_write(rdev, RADEON_SW_INT_FIRE);
201 }
202
203 int r300_copy_dma(struct radeon_device *rdev,
204                   uint64_t src_offset,
205                   uint64_t dst_offset,
206                   unsigned num_pages,
207                   struct radeon_fence *fence)
208 {
209         uint32_t size;
210         uint32_t cur_size;
211         int i, num_loops;
212         int r = 0;
213
214         /* radeon pitch is /64 */
215         size = num_pages << PAGE_SHIFT;
216         num_loops = DIV_ROUND_UP(size, 0x1FFFFF);
217         r = radeon_ring_lock(rdev, num_loops * 4 + 64);
218         if (r) {
219                 DRM_ERROR("radeon: moving bo (%d).\n", r);
220                 return r;
221         }
222         /* Must wait for 2D idle & clean before DMA or hangs might happen */
223         radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0 ));
224         radeon_ring_write(rdev, RADEON_WAIT_2D_IDLECLEAN);
225         for (i = 0; i < num_loops; i++) {
226                 cur_size = size;
227                 if (cur_size > 0x1FFFFF) {
228                         cur_size = 0x1FFFFF;
229                 }
230                 size -= cur_size;
231                 radeon_ring_write(rdev, PACKET0(0x720, 2));
232                 radeon_ring_write(rdev, src_offset);
233                 radeon_ring_write(rdev, dst_offset);
234                 radeon_ring_write(rdev, cur_size | (1 << 31) | (1 << 30));
235                 src_offset += cur_size;
236                 dst_offset += cur_size;
237         }
238         radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
239         radeon_ring_write(rdev, RADEON_WAIT_DMA_GUI_IDLE);
240         if (fence) {
241                 r = radeon_fence_emit(rdev, fence);
242         }
243         radeon_ring_unlock_commit(rdev);
244         return r;
245 }
246
247 void r300_ring_start(struct radeon_device *rdev)
248 {
249         unsigned gb_tile_config;
250         int r;
251
252         /* Sub pixel 1/12 so we can have 4K rendering according to doc */
253         gb_tile_config = (R300_ENABLE_TILING | R300_TILE_SIZE_16);
254         switch(rdev->num_gb_pipes) {
255         case 2:
256                 gb_tile_config |= R300_PIPE_COUNT_R300;
257                 break;
258         case 3:
259                 gb_tile_config |= R300_PIPE_COUNT_R420_3P;
260                 break;
261         case 4:
262                 gb_tile_config |= R300_PIPE_COUNT_R420;
263                 break;
264         case 1:
265         default:
266                 gb_tile_config |= R300_PIPE_COUNT_RV350;
267                 break;
268         }
269
270         r = radeon_ring_lock(rdev, 64);
271         if (r) {
272                 return;
273         }
274         radeon_ring_write(rdev, PACKET0(RADEON_ISYNC_CNTL, 0));
275         radeon_ring_write(rdev,
276                           RADEON_ISYNC_ANY2D_IDLE3D |
277                           RADEON_ISYNC_ANY3D_IDLE2D |
278                           RADEON_ISYNC_WAIT_IDLEGUI |
279                           RADEON_ISYNC_CPSCRATCH_IDLEGUI);
280         radeon_ring_write(rdev, PACKET0(R300_GB_TILE_CONFIG, 0));
281         radeon_ring_write(rdev, gb_tile_config);
282         radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
283         radeon_ring_write(rdev,
284                           RADEON_WAIT_2D_IDLECLEAN |
285                           RADEON_WAIT_3D_IDLECLEAN);
286         radeon_ring_write(rdev, PACKET0(R300_DST_PIPE_CONFIG, 0));
287         radeon_ring_write(rdev, R300_PIPE_AUTO_CONFIG);
288         radeon_ring_write(rdev, PACKET0(R300_GB_SELECT, 0));
289         radeon_ring_write(rdev, 0);
290         radeon_ring_write(rdev, PACKET0(R300_GB_ENABLE, 0));
291         radeon_ring_write(rdev, 0);
292         radeon_ring_write(rdev, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
293         radeon_ring_write(rdev, R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE);
294         radeon_ring_write(rdev, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0));
295         radeon_ring_write(rdev, R300_ZC_FLUSH | R300_ZC_FREE);
296         radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
297         radeon_ring_write(rdev,
298                           RADEON_WAIT_2D_IDLECLEAN |
299                           RADEON_WAIT_3D_IDLECLEAN);
300         radeon_ring_write(rdev, PACKET0(R300_GB_AA_CONFIG, 0));
301         radeon_ring_write(rdev, 0);
302         radeon_ring_write(rdev, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
303         radeon_ring_write(rdev, R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE);
304         radeon_ring_write(rdev, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0));
305         radeon_ring_write(rdev, R300_ZC_FLUSH | R300_ZC_FREE);
306         radeon_ring_write(rdev, PACKET0(R300_GB_MSPOS0, 0));
307         radeon_ring_write(rdev,
308                           ((6 << R300_MS_X0_SHIFT) |
309                            (6 << R300_MS_Y0_SHIFT) |
310                            (6 << R300_MS_X1_SHIFT) |
311                            (6 << R300_MS_Y1_SHIFT) |
312                            (6 << R300_MS_X2_SHIFT) |
313                            (6 << R300_MS_Y2_SHIFT) |
314                            (6 << R300_MSBD0_Y_SHIFT) |
315                            (6 << R300_MSBD0_X_SHIFT)));
316         radeon_ring_write(rdev, PACKET0(R300_GB_MSPOS1, 0));
317         radeon_ring_write(rdev,
318                           ((6 << R300_MS_X3_SHIFT) |
319                            (6 << R300_MS_Y3_SHIFT) |
320                            (6 << R300_MS_X4_SHIFT) |
321                            (6 << R300_MS_Y4_SHIFT) |
322                            (6 << R300_MS_X5_SHIFT) |
323                            (6 << R300_MS_Y5_SHIFT) |
324                            (6 << R300_MSBD1_SHIFT)));
325         radeon_ring_write(rdev, PACKET0(R300_GA_ENHANCE, 0));
326         radeon_ring_write(rdev, R300_GA_DEADLOCK_CNTL | R300_GA_FASTSYNC_CNTL);
327         radeon_ring_write(rdev, PACKET0(R300_GA_POLY_MODE, 0));
328         radeon_ring_write(rdev,
329                           R300_FRONT_PTYPE_TRIANGE | R300_BACK_PTYPE_TRIANGE);
330         radeon_ring_write(rdev, PACKET0(R300_GA_ROUND_MODE, 0));
331         radeon_ring_write(rdev,
332                           R300_GEOMETRY_ROUND_NEAREST |
333                           R300_COLOR_ROUND_NEAREST);
334         radeon_ring_unlock_commit(rdev);
335 }
336
337 void r300_errata(struct radeon_device *rdev)
338 {
339         rdev->pll_errata = 0;
340
341         if (rdev->family == CHIP_R300 &&
342             (RREG32(RADEON_CONFIG_CNTL) & RADEON_CFG_ATI_REV_ID_MASK) == RADEON_CFG_ATI_REV_A11) {
343                 rdev->pll_errata |= CHIP_ERRATA_R300_CG;
344         }
345 }
346
347 int r300_mc_wait_for_idle(struct radeon_device *rdev)
348 {
349         unsigned i;
350         uint32_t tmp;
351
352         for (i = 0; i < rdev->usec_timeout; i++) {
353                 /* read MC_STATUS */
354                 tmp = RREG32(RADEON_MC_STATUS);
355                 if (tmp & R300_MC_IDLE) {
356                         return 0;
357                 }
358                 DRM_UDELAY(1);
359         }
360         return -1;
361 }
362
363 void r300_gpu_init(struct radeon_device *rdev)
364 {
365         uint32_t gb_tile_config, tmp;
366
367         r100_hdp_reset(rdev);
368         /* FIXME: rv380 one pipes ? */
369         if ((rdev->family == CHIP_R300) || (rdev->family == CHIP_R350)) {
370                 /* r300,r350 */
371                 rdev->num_gb_pipes = 2;
372         } else {
373                 /* rv350,rv370,rv380 */
374                 rdev->num_gb_pipes = 1;
375         }
376         rdev->num_z_pipes = 1;
377         gb_tile_config = (R300_ENABLE_TILING | R300_TILE_SIZE_16);
378         switch (rdev->num_gb_pipes) {
379         case 2:
380                 gb_tile_config |= R300_PIPE_COUNT_R300;
381                 break;
382         case 3:
383                 gb_tile_config |= R300_PIPE_COUNT_R420_3P;
384                 break;
385         case 4:
386                 gb_tile_config |= R300_PIPE_COUNT_R420;
387                 break;
388         default:
389         case 1:
390                 gb_tile_config |= R300_PIPE_COUNT_RV350;
391                 break;
392         }
393         WREG32(R300_GB_TILE_CONFIG, gb_tile_config);
394
395         if (r100_gui_wait_for_idle(rdev)) {
396                 printk(KERN_WARNING "Failed to wait GUI idle while "
397                        "programming pipes. Bad things might happen.\n");
398         }
399
400         tmp = RREG32(R300_DST_PIPE_CONFIG);
401         WREG32(R300_DST_PIPE_CONFIG, tmp | R300_PIPE_AUTO_CONFIG);
402
403         WREG32(R300_RB2D_DSTCACHE_MODE,
404                R300_DC_AUTOFLUSH_ENABLE |
405                R300_DC_DC_DISABLE_IGNORE_PE);
406
407         if (r100_gui_wait_for_idle(rdev)) {
408                 printk(KERN_WARNING "Failed to wait GUI idle while "
409                        "programming pipes. Bad things might happen.\n");
410         }
411         if (r300_mc_wait_for_idle(rdev)) {
412                 printk(KERN_WARNING "Failed to wait MC idle while "
413                        "programming pipes. Bad things might happen.\n");
414         }
415         DRM_INFO("radeon: %d quad pipes, %d Z pipes initialized.\n",
416                  rdev->num_gb_pipes, rdev->num_z_pipes);
417 }
418
419 int r300_ga_reset(struct radeon_device *rdev)
420 {
421         uint32_t tmp;
422         bool reinit_cp;
423         int i;
424
425         reinit_cp = rdev->cp.ready;
426         rdev->cp.ready = false;
427         for (i = 0; i < rdev->usec_timeout; i++) {
428                 WREG32(RADEON_CP_CSQ_MODE, 0);
429                 WREG32(RADEON_CP_CSQ_CNTL, 0);
430                 WREG32(RADEON_RBBM_SOFT_RESET, 0x32005);
431                 (void)RREG32(RADEON_RBBM_SOFT_RESET);
432                 udelay(200);
433                 WREG32(RADEON_RBBM_SOFT_RESET, 0);
434                 /* Wait to prevent race in RBBM_STATUS */
435                 mdelay(1);
436                 tmp = RREG32(RADEON_RBBM_STATUS);
437                 if (tmp & ((1 << 20) | (1 << 26))) {
438                         DRM_ERROR("VAP & CP still busy (RBBM_STATUS=0x%08X)", tmp);
439                         /* GA still busy soft reset it */
440                         WREG32(0x429C, 0x200);
441                         WREG32(R300_VAP_PVS_STATE_FLUSH_REG, 0);
442                         WREG32(R300_RE_SCISSORS_TL, 0);
443                         WREG32(R300_RE_SCISSORS_BR, 0);
444                         WREG32(0x24AC, 0);
445                 }
446                 /* Wait to prevent race in RBBM_STATUS */
447                 mdelay(1);
448                 tmp = RREG32(RADEON_RBBM_STATUS);
449                 if (!(tmp & ((1 << 20) | (1 << 26)))) {
450                         break;
451                 }
452         }
453         for (i = 0; i < rdev->usec_timeout; i++) {
454                 tmp = RREG32(RADEON_RBBM_STATUS);
455                 if (!(tmp & ((1 << 20) | (1 << 26)))) {
456                         DRM_INFO("GA reset succeed (RBBM_STATUS=0x%08X)\n",
457                                  tmp);
458                         if (reinit_cp) {
459                                 return r100_cp_init(rdev, rdev->cp.ring_size);
460                         }
461                         return 0;
462                 }
463                 DRM_UDELAY(1);
464         }
465         tmp = RREG32(RADEON_RBBM_STATUS);
466         DRM_ERROR("Failed to reset GA ! (RBBM_STATUS=0x%08X)\n", tmp);
467         return -1;
468 }
469
470 int r300_gpu_reset(struct radeon_device *rdev)
471 {
472         uint32_t status;
473
474         /* reset order likely matter */
475         status = RREG32(RADEON_RBBM_STATUS);
476         /* reset HDP */
477         r100_hdp_reset(rdev);
478         /* reset rb2d */
479         if (status & ((1 << 17) | (1 << 18) | (1 << 27))) {
480                 r100_rb2d_reset(rdev);
481         }
482         /* reset GA */
483         if (status & ((1 << 20) | (1 << 26))) {
484                 r300_ga_reset(rdev);
485         }
486         /* reset CP */
487         status = RREG32(RADEON_RBBM_STATUS);
488         if (status & (1 << 16)) {
489                 r100_cp_reset(rdev);
490         }
491         /* Check if GPU is idle */
492         status = RREG32(RADEON_RBBM_STATUS);
493         if (status & RADEON_RBBM_ACTIVE) {
494                 DRM_ERROR("Failed to reset GPU (RBBM_STATUS=0x%08X)\n", status);
495                 return -1;
496         }
497         DRM_INFO("GPU reset succeed (RBBM_STATUS=0x%08X)\n", status);
498         return 0;
499 }
500
501
502 /*
503  * r300,r350,rv350,rv380 VRAM info
504  */
505 void r300_vram_info(struct radeon_device *rdev)
506 {
507         uint32_t tmp;
508
509         /* DDR for all card after R300 & IGP */
510         rdev->mc.vram_is_ddr = true;
511
512         tmp = RREG32(RADEON_MEM_CNTL);
513         tmp &= R300_MEM_NUM_CHANNELS_MASK;
514         switch (tmp) {
515         case 0: rdev->mc.vram_width = 64; break;
516         case 1: rdev->mc.vram_width = 128; break;
517         case 2: rdev->mc.vram_width = 256; break;
518         default:  rdev->mc.vram_width = 128; break;
519         }
520
521         r100_vram_init_sizes(rdev);
522 }
523
524 void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes)
525 {
526         uint32_t link_width_cntl, mask;
527
528         if (rdev->flags & RADEON_IS_IGP)
529                 return;
530
531         if (!(rdev->flags & RADEON_IS_PCIE))
532                 return;
533
534         /* FIXME wait for idle */
535
536         switch (lanes) {
537         case 0:
538                 mask = RADEON_PCIE_LC_LINK_WIDTH_X0;
539                 break;
540         case 1:
541                 mask = RADEON_PCIE_LC_LINK_WIDTH_X1;
542                 break;
543         case 2:
544                 mask = RADEON_PCIE_LC_LINK_WIDTH_X2;
545                 break;
546         case 4:
547                 mask = RADEON_PCIE_LC_LINK_WIDTH_X4;
548                 break;
549         case 8:
550                 mask = RADEON_PCIE_LC_LINK_WIDTH_X8;
551                 break;
552         case 12:
553                 mask = RADEON_PCIE_LC_LINK_WIDTH_X12;
554                 break;
555         case 16:
556         default:
557                 mask = RADEON_PCIE_LC_LINK_WIDTH_X16;
558                 break;
559         }
560
561         link_width_cntl = RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
562
563         if ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) ==
564             (mask << RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT))
565                 return;
566
567         link_width_cntl &= ~(RADEON_PCIE_LC_LINK_WIDTH_MASK |
568                              RADEON_PCIE_LC_RECONFIG_NOW |
569                              RADEON_PCIE_LC_RECONFIG_LATER |
570                              RADEON_PCIE_LC_SHORT_RECONFIG_EN);
571         link_width_cntl |= mask;
572         WREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
573         WREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL, (link_width_cntl |
574                                                      RADEON_PCIE_LC_RECONFIG_NOW));
575
576         /* wait for lane set to complete */
577         link_width_cntl = RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
578         while (link_width_cntl == 0xffffffff)
579                 link_width_cntl = RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
580
581 }
582
583 int rv370_get_pcie_lanes(struct radeon_device *rdev)
584 {
585         u32 link_width_cntl;
586
587         if (rdev->flags & RADEON_IS_IGP)
588                 return 0;
589
590         if (!(rdev->flags & RADEON_IS_PCIE))
591                 return 0;
592
593         /* FIXME wait for idle */
594
595         link_width_cntl = RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
596
597         switch ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) >> RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT) {
598         case RADEON_PCIE_LC_LINK_WIDTH_X0:
599                 return 0;
600         case RADEON_PCIE_LC_LINK_WIDTH_X1:
601                 return 1;
602         case RADEON_PCIE_LC_LINK_WIDTH_X2:
603                 return 2;
604         case RADEON_PCIE_LC_LINK_WIDTH_X4:
605                 return 4;
606         case RADEON_PCIE_LC_LINK_WIDTH_X8:
607                 return 8;
608         case RADEON_PCIE_LC_LINK_WIDTH_X16:
609         default:
610                 return 16;
611         }
612 }
613
614 #if defined(CONFIG_DEBUG_FS)
615 static int rv370_debugfs_pcie_gart_info(struct seq_file *m, void *data)
616 {
617         struct drm_info_node *node = (struct drm_info_node *) m->private;
618         struct drm_device *dev = node->minor->dev;
619         struct radeon_device *rdev = dev->dev_private;
620         uint32_t tmp;
621
622         tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
623         seq_printf(m, "PCIE_TX_GART_CNTL 0x%08x\n", tmp);
624         tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_BASE);
625         seq_printf(m, "PCIE_TX_GART_BASE 0x%08x\n", tmp);
626         tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_START_LO);
627         seq_printf(m, "PCIE_TX_GART_START_LO 0x%08x\n", tmp);
628         tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_START_HI);
629         seq_printf(m, "PCIE_TX_GART_START_HI 0x%08x\n", tmp);
630         tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_END_LO);
631         seq_printf(m, "PCIE_TX_GART_END_LO 0x%08x\n", tmp);
632         tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_END_HI);
633         seq_printf(m, "PCIE_TX_GART_END_HI 0x%08x\n", tmp);
634         tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_ERROR);
635         seq_printf(m, "PCIE_TX_GART_ERROR 0x%08x\n", tmp);
636         return 0;
637 }
638
639 static struct drm_info_list rv370_pcie_gart_info_list[] = {
640         {"rv370_pcie_gart_info", rv370_debugfs_pcie_gart_info, 0, NULL},
641 };
642 #endif
643
644 static int rv370_debugfs_pcie_gart_info_init(struct radeon_device *rdev)
645 {
646 #if defined(CONFIG_DEBUG_FS)
647         return radeon_debugfs_add_files(rdev, rv370_pcie_gart_info_list, 1);
648 #else
649         return 0;
650 #endif
651 }
652
653 static int r300_packet0_check(struct radeon_cs_parser *p,
654                 struct radeon_cs_packet *pkt,
655                 unsigned idx, unsigned reg)
656 {
657         struct radeon_cs_reloc *reloc;
658         struct r100_cs_track *track;
659         volatile uint32_t *ib;
660         uint32_t tmp, tile_flags = 0;
661         unsigned i;
662         int r;
663         u32 idx_value;
664
665         ib = p->ib->ptr;
666         track = (struct r100_cs_track *)p->track;
667         idx_value = radeon_get_ib_value(p, idx);
668
669         switch(reg) {
670         case AVIVO_D1MODE_VLINE_START_END:
671         case RADEON_CRTC_GUI_TRIG_VLINE:
672                 r = r100_cs_packet_parse_vline(p);
673                 if (r) {
674                         DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
675                                         idx, reg);
676                         r100_cs_dump_packet(p, pkt);
677                         return r;
678                 }
679                 break;
680         case RADEON_DST_PITCH_OFFSET:
681         case RADEON_SRC_PITCH_OFFSET:
682                 r = r100_reloc_pitch_offset(p, pkt, idx, reg);
683                 if (r)
684                         return r;
685                 break;
686         case R300_RB3D_COLOROFFSET0:
687         case R300_RB3D_COLOROFFSET1:
688         case R300_RB3D_COLOROFFSET2:
689         case R300_RB3D_COLOROFFSET3:
690                 i = (reg - R300_RB3D_COLOROFFSET0) >> 2;
691                 r = r100_cs_packet_next_reloc(p, &reloc);
692                 if (r) {
693                         DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
694                                         idx, reg);
695                         r100_cs_dump_packet(p, pkt);
696                         return r;
697                 }
698                 track->cb[i].robj = reloc->robj;
699                 track->cb[i].offset = idx_value;
700                 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
701                 break;
702         case R300_ZB_DEPTHOFFSET:
703                 r = r100_cs_packet_next_reloc(p, &reloc);
704                 if (r) {
705                         DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
706                                         idx, reg);
707                         r100_cs_dump_packet(p, pkt);
708                         return r;
709                 }
710                 track->zb.robj = reloc->robj;
711                 track->zb.offset = idx_value;
712                 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
713                 break;
714         case R300_TX_OFFSET_0:
715         case R300_TX_OFFSET_0+4:
716         case R300_TX_OFFSET_0+8:
717         case R300_TX_OFFSET_0+12:
718         case R300_TX_OFFSET_0+16:
719         case R300_TX_OFFSET_0+20:
720         case R300_TX_OFFSET_0+24:
721         case R300_TX_OFFSET_0+28:
722         case R300_TX_OFFSET_0+32:
723         case R300_TX_OFFSET_0+36:
724         case R300_TX_OFFSET_0+40:
725         case R300_TX_OFFSET_0+44:
726         case R300_TX_OFFSET_0+48:
727         case R300_TX_OFFSET_0+52:
728         case R300_TX_OFFSET_0+56:
729         case R300_TX_OFFSET_0+60:
730                 i = (reg - R300_TX_OFFSET_0) >> 2;
731                 r = r100_cs_packet_next_reloc(p, &reloc);
732                 if (r) {
733                         DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
734                                         idx, reg);
735                         r100_cs_dump_packet(p, pkt);
736                         return r;
737                 }
738
739                 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
740                         tile_flags |= R300_TXO_MACRO_TILE;
741                 if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
742                         tile_flags |= R300_TXO_MICRO_TILE;
743
744                 tmp = idx_value + ((u32)reloc->lobj.gpu_offset);
745                 tmp |= tile_flags;
746                 ib[idx] = tmp;
747                 track->textures[i].robj = reloc->robj;
748                 break;
749         /* Tracked registers */
750         case 0x2084:
751                 /* VAP_VF_CNTL */
752                 track->vap_vf_cntl = idx_value;
753                 break;
754         case 0x20B4:
755                 /* VAP_VTX_SIZE */
756                 track->vtx_size = idx_value & 0x7F;
757                 break;
758         case 0x2134:
759                 /* VAP_VF_MAX_VTX_INDX */
760                 track->max_indx = idx_value & 0x00FFFFFFUL;
761                 break;
762         case 0x43E4:
763                 /* SC_SCISSOR1 */
764                 track->maxy = ((idx_value >> 13) & 0x1FFF) + 1;
765                 if (p->rdev->family < CHIP_RV515) {
766                         track->maxy -= 1440;
767                 }
768                 break;
769         case 0x4E00:
770                 /* RB3D_CCTL */
771                 track->num_cb = ((idx_value >> 5) & 0x3) + 1;
772                 break;
773         case 0x4E38:
774         case 0x4E3C:
775         case 0x4E40:
776         case 0x4E44:
777                 /* RB3D_COLORPITCH0 */
778                 /* RB3D_COLORPITCH1 */
779                 /* RB3D_COLORPITCH2 */
780                 /* RB3D_COLORPITCH3 */
781                 r = r100_cs_packet_next_reloc(p, &reloc);
782                 if (r) {
783                         DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
784                                   idx, reg);
785                         r100_cs_dump_packet(p, pkt);
786                         return r;
787                 }
788
789                 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
790                         tile_flags |= R300_COLOR_TILE_ENABLE;
791                 if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
792                         tile_flags |= R300_COLOR_MICROTILE_ENABLE;
793
794                 tmp = idx_value & ~(0x7 << 16);
795                 tmp |= tile_flags;
796                 ib[idx] = tmp;
797
798                 i = (reg - 0x4E38) >> 2;
799                 track->cb[i].pitch = idx_value & 0x3FFE;
800                 switch (((idx_value >> 21) & 0xF)) {
801                 case 9:
802                 case 11:
803                 case 12:
804                         track->cb[i].cpp = 1;
805                         break;
806                 case 3:
807                 case 4:
808                 case 13:
809                 case 15:
810                         track->cb[i].cpp = 2;
811                         break;
812                 case 6:
813                         track->cb[i].cpp = 4;
814                         break;
815                 case 10:
816                         track->cb[i].cpp = 8;
817                         break;
818                 case 7:
819                         track->cb[i].cpp = 16;
820                         break;
821                 default:
822                         DRM_ERROR("Invalid color buffer format (%d) !\n",
823                                   ((idx_value >> 21) & 0xF));
824                         return -EINVAL;
825                 }
826                 break;
827         case 0x4F00:
828                 /* ZB_CNTL */
829                 if (idx_value & 2) {
830                         track->z_enabled = true;
831                 } else {
832                         track->z_enabled = false;
833                 }
834                 break;
835         case 0x4F10:
836                 /* ZB_FORMAT */
837                 switch ((idx_value & 0xF)) {
838                 case 0:
839                 case 1:
840                         track->zb.cpp = 2;
841                         break;
842                 case 2:
843                         track->zb.cpp = 4;
844                         break;
845                 default:
846                         DRM_ERROR("Invalid z buffer format (%d) !\n",
847                                   (idx_value & 0xF));
848                         return -EINVAL;
849                 }
850                 break;
851         case 0x4F24:
852                 /* ZB_DEPTHPITCH */
853                 r = r100_cs_packet_next_reloc(p, &reloc);
854                 if (r) {
855                         DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
856                                   idx, reg);
857                         r100_cs_dump_packet(p, pkt);
858                         return r;
859                 }
860
861                 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
862                         tile_flags |= R300_DEPTHMACROTILE_ENABLE;
863                 if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
864                         tile_flags |= R300_DEPTHMICROTILE_TILED;;
865
866                 tmp = idx_value & ~(0x7 << 16);
867                 tmp |= tile_flags;
868                 ib[idx] = tmp;
869
870                 track->zb.pitch = idx_value & 0x3FFC;
871                 break;
872         case 0x4104:
873                 for (i = 0; i < 16; i++) {
874                         bool enabled;
875
876                         enabled = !!(idx_value & (1 << i));
877                         track->textures[i].enabled = enabled;
878                 }
879                 break;
880         case 0x44C0:
881         case 0x44C4:
882         case 0x44C8:
883         case 0x44CC:
884         case 0x44D0:
885         case 0x44D4:
886         case 0x44D8:
887         case 0x44DC:
888         case 0x44E0:
889         case 0x44E4:
890         case 0x44E8:
891         case 0x44EC:
892         case 0x44F0:
893         case 0x44F4:
894         case 0x44F8:
895         case 0x44FC:
896                 /* TX_FORMAT1_[0-15] */
897                 i = (reg - 0x44C0) >> 2;
898                 tmp = (idx_value >> 25) & 0x3;
899                 track->textures[i].tex_coord_type = tmp;
900                 switch ((idx_value & 0x1F)) {
901                 case R300_TX_FORMAT_X8:
902                 case R300_TX_FORMAT_Y4X4:
903                 case R300_TX_FORMAT_Z3Y3X2:
904                         track->textures[i].cpp = 1;
905                         break;
906                 case R300_TX_FORMAT_X16:
907                 case R300_TX_FORMAT_Y8X8:
908                 case R300_TX_FORMAT_Z5Y6X5:
909                 case R300_TX_FORMAT_Z6Y5X5:
910                 case R300_TX_FORMAT_W4Z4Y4X4:
911                 case R300_TX_FORMAT_W1Z5Y5X5:
912                 case R300_TX_FORMAT_D3DMFT_CxV8U8:
913                 case R300_TX_FORMAT_B8G8_B8G8:
914                 case R300_TX_FORMAT_G8R8_G8B8:
915                         track->textures[i].cpp = 2;
916                         break;
917                 case R300_TX_FORMAT_Y16X16:
918                 case R300_TX_FORMAT_Z11Y11X10:
919                 case R300_TX_FORMAT_Z10Y11X11:
920                 case R300_TX_FORMAT_W8Z8Y8X8:
921                 case R300_TX_FORMAT_W2Z10Y10X10:
922                 case 0x17:
923                 case R300_TX_FORMAT_FL_I32:
924                 case 0x1e:
925                         track->textures[i].cpp = 4;
926                         break;
927                 case R300_TX_FORMAT_W16Z16Y16X16:
928                 case R300_TX_FORMAT_FL_R16G16B16A16:
929                 case R300_TX_FORMAT_FL_I32A32:
930                         track->textures[i].cpp = 8;
931                         break;
932                 case R300_TX_FORMAT_FL_R32G32B32A32:
933                         track->textures[i].cpp = 16;
934                         break;
935                 case R300_TX_FORMAT_DXT1:
936                         track->textures[i].cpp = 1;
937                         track->textures[i].compress_format = R100_TRACK_COMP_DXT1;
938                         break;
939                 case R300_TX_FORMAT_ATI2N:
940                         if (p->rdev->family < CHIP_R420) {
941                                 DRM_ERROR("Invalid texture format %u\n",
942                                           (idx_value & 0x1F));
943                                 return -EINVAL;
944                         }
945                         /* The same rules apply as for DXT3/5. */
946                         /* Pass through. */
947                 case R300_TX_FORMAT_DXT3:
948                 case R300_TX_FORMAT_DXT5:
949                         track->textures[i].cpp = 1;
950                         track->textures[i].compress_format = R100_TRACK_COMP_DXT35;
951                         break;
952                 default:
953                         DRM_ERROR("Invalid texture format %u\n",
954                                   (idx_value & 0x1F));
955                         return -EINVAL;
956                         break;
957                 }
958                 break;
959         case 0x4400:
960         case 0x4404:
961         case 0x4408:
962         case 0x440C:
963         case 0x4410:
964         case 0x4414:
965         case 0x4418:
966         case 0x441C:
967         case 0x4420:
968         case 0x4424:
969         case 0x4428:
970         case 0x442C:
971         case 0x4430:
972         case 0x4434:
973         case 0x4438:
974         case 0x443C:
975                 /* TX_FILTER0_[0-15] */
976                 i = (reg - 0x4400) >> 2;
977                 tmp = idx_value & 0x7;
978                 if (tmp == 2 || tmp == 4 || tmp == 6) {
979                         track->textures[i].roundup_w = false;
980                 }
981                 tmp = (idx_value >> 3) & 0x7;
982                 if (tmp == 2 || tmp == 4 || tmp == 6) {
983                         track->textures[i].roundup_h = false;
984                 }
985                 break;
986         case 0x4500:
987         case 0x4504:
988         case 0x4508:
989         case 0x450C:
990         case 0x4510:
991         case 0x4514:
992         case 0x4518:
993         case 0x451C:
994         case 0x4520:
995         case 0x4524:
996         case 0x4528:
997         case 0x452C:
998         case 0x4530:
999         case 0x4534:
1000         case 0x4538:
1001         case 0x453C:
1002                 /* TX_FORMAT2_[0-15] */
1003                 i = (reg - 0x4500) >> 2;
1004                 tmp = idx_value & 0x3FFF;
1005                 track->textures[i].pitch = tmp + 1;
1006                 if (p->rdev->family >= CHIP_RV515) {
1007                         tmp = ((idx_value >> 15) & 1) << 11;
1008                         track->textures[i].width_11 = tmp;
1009                         tmp = ((idx_value >> 16) & 1) << 11;
1010                         track->textures[i].height_11 = tmp;
1011
1012                         /* ATI1N */
1013                         if (idx_value & (1 << 14)) {
1014                                 /* The same rules apply as for DXT1. */
1015                                 track->textures[i].compress_format =
1016                                         R100_TRACK_COMP_DXT1;
1017                         }
1018                 } else if (idx_value & (1 << 14)) {
1019                         DRM_ERROR("Forbidden bit TXFORMAT_MSB\n");
1020                         return -EINVAL;
1021                 }
1022                 break;
1023         case 0x4480:
1024         case 0x4484:
1025         case 0x4488:
1026         case 0x448C:
1027         case 0x4490:
1028         case 0x4494:
1029         case 0x4498:
1030         case 0x449C:
1031         case 0x44A0:
1032         case 0x44A4:
1033         case 0x44A8:
1034         case 0x44AC:
1035         case 0x44B0:
1036         case 0x44B4:
1037         case 0x44B8:
1038         case 0x44BC:
1039                 /* TX_FORMAT0_[0-15] */
1040                 i = (reg - 0x4480) >> 2;
1041                 tmp = idx_value & 0x7FF;
1042                 track->textures[i].width = tmp + 1;
1043                 tmp = (idx_value >> 11) & 0x7FF;
1044                 track->textures[i].height = tmp + 1;
1045                 tmp = (idx_value >> 26) & 0xF;
1046                 track->textures[i].num_levels = tmp;
1047                 tmp = idx_value & (1 << 31);
1048                 track->textures[i].use_pitch = !!tmp;
1049                 tmp = (idx_value >> 22) & 0xF;
1050                 track->textures[i].txdepth = tmp;
1051                 break;
1052         case R300_ZB_ZPASS_ADDR:
1053                 r = r100_cs_packet_next_reloc(p, &reloc);
1054                 if (r) {
1055                         DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1056                                         idx, reg);
1057                         r100_cs_dump_packet(p, pkt);
1058                         return r;
1059                 }
1060                 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
1061                 break;
1062         case 0x4e0c:
1063                 /* RB3D_COLOR_CHANNEL_MASK */
1064                 track->color_channel_mask = idx_value;
1065                 break;
1066         case 0x4d1c:
1067                 /* ZB_BW_CNTL */
1068                 track->fastfill = !!(idx_value & (1 << 2));
1069                 break;
1070         case 0x4e04:
1071                 /* RB3D_BLENDCNTL */
1072                 track->blend_read_enable = !!(idx_value & (1 << 2));
1073                 break;
1074         case 0x4be8:
1075                 /* valid register only on RV530 */
1076                 if (p->rdev->family == CHIP_RV530)
1077                         break;
1078                 /* fallthrough do not move */
1079         default:
1080                 printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n",
1081                        reg, idx);
1082                 return -EINVAL;
1083         }
1084         return 0;
1085 }
1086
1087 static int r300_packet3_check(struct radeon_cs_parser *p,
1088                               struct radeon_cs_packet *pkt)
1089 {
1090         struct radeon_cs_reloc *reloc;
1091         struct r100_cs_track *track;
1092         volatile uint32_t *ib;
1093         unsigned idx;
1094         int r;
1095
1096         ib = p->ib->ptr;
1097         idx = pkt->idx + 1;
1098         track = (struct r100_cs_track *)p->track;
1099         switch(pkt->opcode) {
1100         case PACKET3_3D_LOAD_VBPNTR:
1101                 r = r100_packet3_load_vbpntr(p, pkt, idx);
1102                 if (r)
1103                         return r;
1104                 break;
1105         case PACKET3_INDX_BUFFER:
1106                 r = r100_cs_packet_next_reloc(p, &reloc);
1107                 if (r) {
1108                         DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode);
1109                         r100_cs_dump_packet(p, pkt);
1110                         return r;
1111                 }
1112                 ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->lobj.gpu_offset);
1113                 r = r100_cs_track_check_pkt3_indx_buffer(p, pkt, reloc->robj);
1114                 if (r) {
1115                         return r;
1116                 }
1117                 break;
1118         /* Draw packet */
1119         case PACKET3_3D_DRAW_IMMD:
1120                 /* Number of dwords is vtx_size * (num_vertices - 1)
1121                  * PRIM_WALK must be equal to 3 vertex data in embedded
1122                  * in cmd stream */
1123                 if (((radeon_get_ib_value(p, idx + 1) >> 4) & 0x3) != 3) {
1124                         DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n");
1125                         return -EINVAL;
1126                 }
1127                 track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1);
1128                 track->immd_dwords = pkt->count - 1;
1129                 r = r100_cs_track_check(p->rdev, track);
1130                 if (r) {
1131                         return r;
1132                 }
1133                 break;
1134         case PACKET3_3D_DRAW_IMMD_2:
1135                 /* Number of dwords is vtx_size * (num_vertices - 1)
1136                  * PRIM_WALK must be equal to 3 vertex data in embedded
1137                  * in cmd stream */
1138                 if (((radeon_get_ib_value(p, idx) >> 4) & 0x3) != 3) {
1139                         DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n");
1140                         return -EINVAL;
1141                 }
1142                 track->vap_vf_cntl = radeon_get_ib_value(p, idx);
1143                 track->immd_dwords = pkt->count;
1144                 r = r100_cs_track_check(p->rdev, track);
1145                 if (r) {
1146                         return r;
1147                 }
1148                 break;
1149         case PACKET3_3D_DRAW_VBUF:
1150                 track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1);
1151                 r = r100_cs_track_check(p->rdev, track);
1152                 if (r) {
1153                         return r;
1154                 }
1155                 break;
1156         case PACKET3_3D_DRAW_VBUF_2:
1157                 track->vap_vf_cntl = radeon_get_ib_value(p, idx);
1158                 r = r100_cs_track_check(p->rdev, track);
1159                 if (r) {
1160                         return r;
1161                 }
1162                 break;
1163         case PACKET3_3D_DRAW_INDX:
1164                 track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1);
1165                 r = r100_cs_track_check(p->rdev, track);
1166                 if (r) {
1167                         return r;
1168                 }
1169                 break;
1170         case PACKET3_3D_DRAW_INDX_2:
1171                 track->vap_vf_cntl = radeon_get_ib_value(p, idx);
1172                 r = r100_cs_track_check(p->rdev, track);
1173                 if (r) {
1174                         return r;
1175                 }
1176                 break;
1177         case PACKET3_NOP:
1178                 break;
1179         default:
1180                 DRM_ERROR("Packet3 opcode %x not supported\n", pkt->opcode);
1181                 return -EINVAL;
1182         }
1183         return 0;
1184 }
1185
1186 int r300_cs_parse(struct radeon_cs_parser *p)
1187 {
1188         struct radeon_cs_packet pkt;
1189         struct r100_cs_track *track;
1190         int r;
1191
1192         track = kzalloc(sizeof(*track), GFP_KERNEL);
1193         r100_cs_track_clear(p->rdev, track);
1194         p->track = track;
1195         do {
1196                 r = r100_cs_packet_parse(p, &pkt, p->idx);
1197                 if (r) {
1198                         return r;
1199                 }
1200                 p->idx += pkt.count + 2;
1201                 switch (pkt.type) {
1202                 case PACKET_TYPE0:
1203                         r = r100_cs_parse_packet0(p, &pkt,
1204                                                   p->rdev->config.r300.reg_safe_bm,
1205                                                   p->rdev->config.r300.reg_safe_bm_size,
1206                                                   &r300_packet0_check);
1207                         break;
1208                 case PACKET_TYPE2:
1209                         break;
1210                 case PACKET_TYPE3:
1211                         r = r300_packet3_check(p, &pkt);
1212                         break;
1213                 default:
1214                         DRM_ERROR("Unknown packet type %d !\n", pkt.type);
1215                         return -EINVAL;
1216                 }
1217                 if (r) {
1218                         return r;
1219                 }
1220         } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
1221         return 0;
1222 }
1223
1224 void r300_set_reg_safe(struct radeon_device *rdev)
1225 {
1226         rdev->config.r300.reg_safe_bm = r300_reg_safe_bm;
1227         rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(r300_reg_safe_bm);
1228 }
1229
1230 void r300_mc_program(struct radeon_device *rdev)
1231 {
1232         struct r100_mc_save save;
1233         int r;
1234
1235         r = r100_debugfs_mc_info_init(rdev);
1236         if (r) {
1237                 dev_err(rdev->dev, "Failed to create r100_mc debugfs file.\n");
1238         }
1239
1240         /* Stops all mc clients */
1241         r100_mc_stop(rdev, &save);
1242         if (rdev->flags & RADEON_IS_AGP) {
1243                 WREG32(R_00014C_MC_AGP_LOCATION,
1244                         S_00014C_MC_AGP_START(rdev->mc.gtt_start >> 16) |
1245                         S_00014C_MC_AGP_TOP(rdev->mc.gtt_end >> 16));
1246                 WREG32(R_000170_AGP_BASE, lower_32_bits(rdev->mc.agp_base));
1247                 WREG32(R_00015C_AGP_BASE_2,
1248                         upper_32_bits(rdev->mc.agp_base) & 0xff);
1249         } else {
1250                 WREG32(R_00014C_MC_AGP_LOCATION, 0x0FFFFFFF);
1251                 WREG32(R_000170_AGP_BASE, 0);
1252                 WREG32(R_00015C_AGP_BASE_2, 0);
1253         }
1254         /* Wait for mc idle */
1255         if (r300_mc_wait_for_idle(rdev))
1256                 DRM_INFO("Failed to wait MC idle before programming MC.\n");
1257         /* Program MC, should be a 32bits limited address space */
1258         WREG32(R_000148_MC_FB_LOCATION,
1259                 S_000148_MC_FB_START(rdev->mc.vram_start >> 16) |
1260                 S_000148_MC_FB_TOP(rdev->mc.vram_end >> 16));
1261         r100_mc_resume(rdev, &save);
1262 }
1263
1264 void r300_clock_startup(struct radeon_device *rdev)
1265 {
1266         u32 tmp;
1267
1268         if (radeon_dynclks != -1 && radeon_dynclks)
1269                 radeon_legacy_set_clock_gating(rdev, 1);
1270         /* We need to force on some of the block */
1271         tmp = RREG32_PLL(R_00000D_SCLK_CNTL);
1272         tmp |= S_00000D_FORCE_CP(1) | S_00000D_FORCE_VIP(1);
1273         if ((rdev->family == CHIP_RV350) || (rdev->family == CHIP_RV380))
1274                 tmp |= S_00000D_FORCE_VAP(1);
1275         WREG32_PLL(R_00000D_SCLK_CNTL, tmp);
1276 }
1277
1278 static int r300_startup(struct radeon_device *rdev)
1279 {
1280         int r;
1281
1282         /* set common regs */
1283         r100_set_common_regs(rdev);
1284         /* program mc */
1285         r300_mc_program(rdev);
1286         /* Resume clock */
1287         r300_clock_startup(rdev);
1288         /* Initialize GPU configuration (# pipes, ...) */
1289         r300_gpu_init(rdev);
1290         /* Initialize GART (initialize after TTM so we can allocate
1291          * memory through TTM but finalize after TTM) */
1292         if (rdev->flags & RADEON_IS_PCIE) {
1293                 r = rv370_pcie_gart_enable(rdev);
1294                 if (r)
1295                         return r;
1296         }
1297
1298         if (rdev->family == CHIP_R300 ||
1299             rdev->family == CHIP_R350 ||
1300             rdev->family == CHIP_RV350)
1301                 r100_enable_bm(rdev);
1302
1303         if (rdev->flags & RADEON_IS_PCI) {
1304                 r = r100_pci_gart_enable(rdev);
1305                 if (r)
1306                         return r;
1307         }
1308         /* Enable IRQ */
1309         r100_irq_set(rdev);
1310         rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
1311         /* 1M ring buffer */
1312         r = r100_cp_init(rdev, 1024 * 1024);
1313         if (r) {
1314                 dev_err(rdev->dev, "failled initializing CP (%d).\n", r);
1315                 return r;
1316         }
1317         r = r100_wb_init(rdev);
1318         if (r)
1319                 dev_err(rdev->dev, "failled initializing WB (%d).\n", r);
1320         r = r100_ib_init(rdev);
1321         if (r) {
1322                 dev_err(rdev->dev, "failled initializing IB (%d).\n", r);
1323                 return r;
1324         }
1325         return 0;
1326 }
1327
1328 int r300_resume(struct radeon_device *rdev)
1329 {
1330         /* Make sur GART are not working */
1331         if (rdev->flags & RADEON_IS_PCIE)
1332                 rv370_pcie_gart_disable(rdev);
1333         if (rdev->flags & RADEON_IS_PCI)
1334                 r100_pci_gart_disable(rdev);
1335         /* Resume clock before doing reset */
1336         r300_clock_startup(rdev);
1337         /* Reset gpu before posting otherwise ATOM will enter infinite loop */
1338         if (radeon_gpu_reset(rdev)) {
1339                 dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
1340                         RREG32(R_000E40_RBBM_STATUS),
1341                         RREG32(R_0007C0_CP_STAT));
1342         }
1343         /* post */
1344         radeon_combios_asic_init(rdev->ddev);
1345         /* Resume clock after posting */
1346         r300_clock_startup(rdev);
1347         /* Initialize surface registers */
1348         radeon_surface_init(rdev);
1349         return r300_startup(rdev);
1350 }
1351
1352 int r300_suspend(struct radeon_device *rdev)
1353 {
1354         r100_cp_disable(rdev);
1355         r100_wb_disable(rdev);
1356         r100_irq_disable(rdev);
1357         if (rdev->flags & RADEON_IS_PCIE)
1358                 rv370_pcie_gart_disable(rdev);
1359         if (rdev->flags & RADEON_IS_PCI)
1360                 r100_pci_gart_disable(rdev);
1361         return 0;
1362 }
1363
1364 void r300_fini(struct radeon_device *rdev)
1365 {
1366         r100_cp_fini(rdev);
1367         r100_wb_fini(rdev);
1368         r100_ib_fini(rdev);
1369         radeon_gem_fini(rdev);
1370         if (rdev->flags & RADEON_IS_PCIE)
1371                 rv370_pcie_gart_fini(rdev);
1372         if (rdev->flags & RADEON_IS_PCI)
1373                 r100_pci_gart_fini(rdev);
1374         radeon_agp_fini(rdev);
1375         radeon_irq_kms_fini(rdev);
1376         radeon_fence_driver_fini(rdev);
1377         radeon_bo_fini(rdev);
1378         radeon_atombios_fini(rdev);
1379         kfree(rdev->bios);
1380         rdev->bios = NULL;
1381 }
1382
1383 int r300_init(struct radeon_device *rdev)
1384 {
1385         int r;
1386
1387         /* Disable VGA */
1388         r100_vga_render_disable(rdev);
1389         /* Initialize scratch registers */
1390         radeon_scratch_init(rdev);
1391         /* Initialize surface registers */
1392         radeon_surface_init(rdev);
1393         /* TODO: disable VGA need to use VGA request */
1394         /* BIOS*/
1395         if (!radeon_get_bios(rdev)) {
1396                 if (ASIC_IS_AVIVO(rdev))
1397                         return -EINVAL;
1398         }
1399         if (rdev->is_atom_bios) {
1400                 dev_err(rdev->dev, "Expecting combios for RS400/RS480 GPU\n");
1401                 return -EINVAL;
1402         } else {
1403                 r = radeon_combios_init(rdev);
1404                 if (r)
1405                         return r;
1406         }
1407         /* Reset gpu before posting otherwise ATOM will enter infinite loop */
1408         if (radeon_gpu_reset(rdev)) {
1409                 dev_warn(rdev->dev,
1410                         "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
1411                         RREG32(R_000E40_RBBM_STATUS),
1412                         RREG32(R_0007C0_CP_STAT));
1413         }
1414         /* check if cards are posted or not */
1415         if (radeon_boot_test_post_card(rdev) == false)
1416                 return -EINVAL;
1417         /* Set asic errata */
1418         r300_errata(rdev);
1419         /* Initialize clocks */
1420         radeon_get_clock_info(rdev->ddev);
1421         /* Initialize power management */
1422         radeon_pm_init(rdev);
1423         /* Get vram informations */
1424         r300_vram_info(rdev);
1425         /* Initialize memory controller (also test AGP) */
1426         r = r420_mc_init(rdev);
1427         if (r)
1428                 return r;
1429         /* Fence driver */
1430         r = radeon_fence_driver_init(rdev);
1431         if (r)
1432                 return r;
1433         r = radeon_irq_kms_init(rdev);
1434         if (r)
1435                 return r;
1436         /* Memory manager */
1437         r = radeon_bo_init(rdev);
1438         if (r)
1439                 return r;
1440         if (rdev->flags & RADEON_IS_PCIE) {
1441                 r = rv370_pcie_gart_init(rdev);
1442                 if (r)
1443                         return r;
1444         }
1445         if (rdev->flags & RADEON_IS_PCI) {
1446                 r = r100_pci_gart_init(rdev);
1447                 if (r)
1448                         return r;
1449         }
1450         r300_set_reg_safe(rdev);
1451         rdev->accel_working = true;
1452         r = r300_startup(rdev);
1453         if (r) {
1454                 /* Somethings want wront with the accel init stop accel */
1455                 dev_err(rdev->dev, "Disabling GPU acceleration\n");
1456                 r100_cp_fini(rdev);
1457                 r100_wb_fini(rdev);
1458                 r100_ib_fini(rdev);
1459                 radeon_irq_kms_fini(rdev);
1460                 if (rdev->flags & RADEON_IS_PCIE)
1461                         rv370_pcie_gart_fini(rdev);
1462                 if (rdev->flags & RADEON_IS_PCI)
1463                         r100_pci_gart_fini(rdev);
1464                 radeon_agp_fini(rdev);
1465                 rdev->accel_working = false;
1466         }
1467         return 0;
1468 }