Merge branch 'drm-radeon-evergreen-accel' into drm-core-next
[safe/jmp/linux-2.6] / drivers / gpu / drm / radeon / rv770.c
1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 #include <linux/firmware.h>
29 #include <linux/platform_device.h>
30 #include <linux/slab.h>
31 #include "drmP.h"
32 #include "radeon.h"
33 #include "radeon_asic.h"
34 #include "radeon_drm.h"
35 #include "rv770d.h"
36 #include "atom.h"
37 #include "avivod.h"
38
39 #define R700_PFP_UCODE_SIZE 848
40 #define R700_PM4_UCODE_SIZE 1360
41
42 static void rv770_gpu_init(struct radeon_device *rdev);
43 void rv770_fini(struct radeon_device *rdev);
44
45
46 /*
47  * GART
48  */
49 int rv770_pcie_gart_enable(struct radeon_device *rdev)
50 {
51         u32 tmp;
52         int r, i;
53
54         if (rdev->gart.table.vram.robj == NULL) {
55                 dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
56                 return -EINVAL;
57         }
58         r = radeon_gart_table_vram_pin(rdev);
59         if (r)
60                 return r;
61         radeon_gart_restore(rdev);
62         /* Setup L2 cache */
63         WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
64                                 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
65                                 EFFECTIVE_L2_QUEUE_SIZE(7));
66         WREG32(VM_L2_CNTL2, 0);
67         WREG32(VM_L2_CNTL3, BANK_SELECT(0) | CACHE_UPDATE_MODE(2));
68         /* Setup TLB control */
69         tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
70                 SYSTEM_ACCESS_MODE_NOT_IN_SYS |
71                 SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU |
72                 EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5);
73         WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp);
74         WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp);
75         WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp);
76         WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp);
77         WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
78         WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
79         WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
80         WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
81         WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
82         WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
83         WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
84                                 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
85         WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
86                         (u32)(rdev->dummy_page.addr >> 12));
87         for (i = 1; i < 7; i++)
88                 WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
89
90         r600_pcie_gart_tlb_flush(rdev);
91         rdev->gart.ready = true;
92         return 0;
93 }
94
95 void rv770_pcie_gart_disable(struct radeon_device *rdev)
96 {
97         u32 tmp;
98         int i, r;
99
100         /* Disable all tables */
101         for (i = 0; i < 7; i++)
102                 WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
103
104         /* Setup L2 cache */
105         WREG32(VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING |
106                                 EFFECTIVE_L2_QUEUE_SIZE(7));
107         WREG32(VM_L2_CNTL2, 0);
108         WREG32(VM_L2_CNTL3, BANK_SELECT(0) | CACHE_UPDATE_MODE(2));
109         /* Setup TLB control */
110         tmp = EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5);
111         WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp);
112         WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp);
113         WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp);
114         WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp);
115         WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
116         WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
117         WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
118         if (rdev->gart.table.vram.robj) {
119                 r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
120                 if (likely(r == 0)) {
121                         radeon_bo_kunmap(rdev->gart.table.vram.robj);
122                         radeon_bo_unpin(rdev->gart.table.vram.robj);
123                         radeon_bo_unreserve(rdev->gart.table.vram.robj);
124                 }
125         }
126 }
127
128 void rv770_pcie_gart_fini(struct radeon_device *rdev)
129 {
130         radeon_gart_fini(rdev);
131         rv770_pcie_gart_disable(rdev);
132         radeon_gart_table_vram_free(rdev);
133 }
134
135
136 void rv770_agp_enable(struct radeon_device *rdev)
137 {
138         u32 tmp;
139         int i;
140
141         /* Setup L2 cache */
142         WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
143                                 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
144                                 EFFECTIVE_L2_QUEUE_SIZE(7));
145         WREG32(VM_L2_CNTL2, 0);
146         WREG32(VM_L2_CNTL3, BANK_SELECT(0) | CACHE_UPDATE_MODE(2));
147         /* Setup TLB control */
148         tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
149                 SYSTEM_ACCESS_MODE_NOT_IN_SYS |
150                 SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU |
151                 EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5);
152         WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp);
153         WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp);
154         WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp);
155         WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp);
156         WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
157         WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
158         WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
159         for (i = 0; i < 7; i++)
160                 WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
161 }
162
163 static void rv770_mc_program(struct radeon_device *rdev)
164 {
165         struct rv515_mc_save save;
166         u32 tmp;
167         int i, j;
168
169         /* Initialize HDP */
170         for (i = 0, j = 0; i < 32; i++, j += 0x18) {
171                 WREG32((0x2c14 + j), 0x00000000);
172                 WREG32((0x2c18 + j), 0x00000000);
173                 WREG32((0x2c1c + j), 0x00000000);
174                 WREG32((0x2c20 + j), 0x00000000);
175                 WREG32((0x2c24 + j), 0x00000000);
176         }
177         WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0);
178
179         rv515_mc_stop(rdev, &save);
180         if (r600_mc_wait_for_idle(rdev)) {
181                 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
182         }
183         /* Lockout access through VGA aperture*/
184         WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE);
185         /* Update configuration */
186         if (rdev->flags & RADEON_IS_AGP) {
187                 if (rdev->mc.vram_start < rdev->mc.gtt_start) {
188                         /* VRAM before AGP */
189                         WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
190                                 rdev->mc.vram_start >> 12);
191                         WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
192                                 rdev->mc.gtt_end >> 12);
193                 } else {
194                         /* VRAM after AGP */
195                         WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
196                                 rdev->mc.gtt_start >> 12);
197                         WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
198                                 rdev->mc.vram_end >> 12);
199                 }
200         } else {
201                 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
202                         rdev->mc.vram_start >> 12);
203                 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
204                         rdev->mc.vram_end >> 12);
205         }
206         WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, 0);
207         tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16;
208         tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF);
209         WREG32(MC_VM_FB_LOCATION, tmp);
210         WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8));
211         WREG32(HDP_NONSURFACE_INFO, (2 << 7));
212         WREG32(HDP_NONSURFACE_SIZE, (rdev->mc.mc_vram_size - 1) | 0x3FF);
213         if (rdev->flags & RADEON_IS_AGP) {
214                 WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 16);
215                 WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 16);
216                 WREG32(MC_VM_AGP_BASE, rdev->mc.agp_base >> 22);
217         } else {
218                 WREG32(MC_VM_AGP_BASE, 0);
219                 WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF);
220                 WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF);
221         }
222         if (r600_mc_wait_for_idle(rdev)) {
223                 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
224         }
225         rv515_mc_resume(rdev, &save);
226         /* we need to own VRAM, so turn off the VGA renderer here
227          * to stop it overwriting our objects */
228         rv515_vga_render_disable(rdev);
229 }
230
231
232 /*
233  * CP.
234  */
235 void r700_cp_stop(struct radeon_device *rdev)
236 {
237         WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT));
238 }
239
240 static int rv770_cp_load_microcode(struct radeon_device *rdev)
241 {
242         const __be32 *fw_data;
243         int i;
244
245         if (!rdev->me_fw || !rdev->pfp_fw)
246                 return -EINVAL;
247
248         r700_cp_stop(rdev);
249         WREG32(CP_RB_CNTL, RB_NO_UPDATE | (15 << 8) | (3 << 0));
250
251         /* Reset cp */
252         WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP);
253         RREG32(GRBM_SOFT_RESET);
254         mdelay(15);
255         WREG32(GRBM_SOFT_RESET, 0);
256
257         fw_data = (const __be32 *)rdev->pfp_fw->data;
258         WREG32(CP_PFP_UCODE_ADDR, 0);
259         for (i = 0; i < R700_PFP_UCODE_SIZE; i++)
260                 WREG32(CP_PFP_UCODE_DATA, be32_to_cpup(fw_data++));
261         WREG32(CP_PFP_UCODE_ADDR, 0);
262
263         fw_data = (const __be32 *)rdev->me_fw->data;
264         WREG32(CP_ME_RAM_WADDR, 0);
265         for (i = 0; i < R700_PM4_UCODE_SIZE; i++)
266                 WREG32(CP_ME_RAM_DATA, be32_to_cpup(fw_data++));
267
268         WREG32(CP_PFP_UCODE_ADDR, 0);
269         WREG32(CP_ME_RAM_WADDR, 0);
270         WREG32(CP_ME_RAM_RADDR, 0);
271         return 0;
272 }
273
274 void r700_cp_fini(struct radeon_device *rdev)
275 {
276         r700_cp_stop(rdev);
277         radeon_ring_fini(rdev);
278 }
279
280 /*
281  * Core functions
282  */
283 static u32 r700_get_tile_pipe_to_backend_map(struct radeon_device *rdev,
284                                              u32 num_tile_pipes,
285                                              u32 num_backends,
286                                              u32 backend_disable_mask)
287 {
288         u32 backend_map = 0;
289         u32 enabled_backends_mask;
290         u32 enabled_backends_count;
291         u32 cur_pipe;
292         u32 swizzle_pipe[R7XX_MAX_PIPES];
293         u32 cur_backend;
294         u32 i;
295         bool force_no_swizzle;
296
297         if (num_tile_pipes > R7XX_MAX_PIPES)
298                 num_tile_pipes = R7XX_MAX_PIPES;
299         if (num_tile_pipes < 1)
300                 num_tile_pipes = 1;
301         if (num_backends > R7XX_MAX_BACKENDS)
302                 num_backends = R7XX_MAX_BACKENDS;
303         if (num_backends < 1)
304                 num_backends = 1;
305
306         enabled_backends_mask = 0;
307         enabled_backends_count = 0;
308         for (i = 0; i < R7XX_MAX_BACKENDS; ++i) {
309                 if (((backend_disable_mask >> i) & 1) == 0) {
310                         enabled_backends_mask |= (1 << i);
311                         ++enabled_backends_count;
312                 }
313                 if (enabled_backends_count == num_backends)
314                         break;
315         }
316
317         if (enabled_backends_count == 0) {
318                 enabled_backends_mask = 1;
319                 enabled_backends_count = 1;
320         }
321
322         if (enabled_backends_count != num_backends)
323                 num_backends = enabled_backends_count;
324
325         switch (rdev->family) {
326         case CHIP_RV770:
327         case CHIP_RV730:
328                 force_no_swizzle = false;
329                 break;
330         case CHIP_RV710:
331         case CHIP_RV740:
332         default:
333                 force_no_swizzle = true;
334                 break;
335         }
336
337         memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * R7XX_MAX_PIPES);
338         switch (num_tile_pipes) {
339         case 1:
340                 swizzle_pipe[0] = 0;
341                 break;
342         case 2:
343                 swizzle_pipe[0] = 0;
344                 swizzle_pipe[1] = 1;
345                 break;
346         case 3:
347                 if (force_no_swizzle) {
348                         swizzle_pipe[0] = 0;
349                         swizzle_pipe[1] = 1;
350                         swizzle_pipe[2] = 2;
351                 } else {
352                         swizzle_pipe[0] = 0;
353                         swizzle_pipe[1] = 2;
354                         swizzle_pipe[2] = 1;
355                 }
356                 break;
357         case 4:
358                 if (force_no_swizzle) {
359                         swizzle_pipe[0] = 0;
360                         swizzle_pipe[1] = 1;
361                         swizzle_pipe[2] = 2;
362                         swizzle_pipe[3] = 3;
363                 } else {
364                         swizzle_pipe[0] = 0;
365                         swizzle_pipe[1] = 2;
366                         swizzle_pipe[2] = 3;
367                         swizzle_pipe[3] = 1;
368                 }
369                 break;
370         case 5:
371                 if (force_no_swizzle) {
372                         swizzle_pipe[0] = 0;
373                         swizzle_pipe[1] = 1;
374                         swizzle_pipe[2] = 2;
375                         swizzle_pipe[3] = 3;
376                         swizzle_pipe[4] = 4;
377                 } else {
378                         swizzle_pipe[0] = 0;
379                         swizzle_pipe[1] = 2;
380                         swizzle_pipe[2] = 4;
381                         swizzle_pipe[3] = 1;
382                         swizzle_pipe[4] = 3;
383                 }
384                 break;
385         case 6:
386                 if (force_no_swizzle) {
387                         swizzle_pipe[0] = 0;
388                         swizzle_pipe[1] = 1;
389                         swizzle_pipe[2] = 2;
390                         swizzle_pipe[3] = 3;
391                         swizzle_pipe[4] = 4;
392                         swizzle_pipe[5] = 5;
393                 } else {
394                         swizzle_pipe[0] = 0;
395                         swizzle_pipe[1] = 2;
396                         swizzle_pipe[2] = 4;
397                         swizzle_pipe[3] = 5;
398                         swizzle_pipe[4] = 3;
399                         swizzle_pipe[5] = 1;
400                 }
401                 break;
402         case 7:
403                 if (force_no_swizzle) {
404                         swizzle_pipe[0] = 0;
405                         swizzle_pipe[1] = 1;
406                         swizzle_pipe[2] = 2;
407                         swizzle_pipe[3] = 3;
408                         swizzle_pipe[4] = 4;
409                         swizzle_pipe[5] = 5;
410                         swizzle_pipe[6] = 6;
411                 } else {
412                         swizzle_pipe[0] = 0;
413                         swizzle_pipe[1] = 2;
414                         swizzle_pipe[2] = 4;
415                         swizzle_pipe[3] = 6;
416                         swizzle_pipe[4] = 3;
417                         swizzle_pipe[5] = 1;
418                         swizzle_pipe[6] = 5;
419                 }
420                 break;
421         case 8:
422                 if (force_no_swizzle) {
423                         swizzle_pipe[0] = 0;
424                         swizzle_pipe[1] = 1;
425                         swizzle_pipe[2] = 2;
426                         swizzle_pipe[3] = 3;
427                         swizzle_pipe[4] = 4;
428                         swizzle_pipe[5] = 5;
429                         swizzle_pipe[6] = 6;
430                         swizzle_pipe[7] = 7;
431                 } else {
432                         swizzle_pipe[0] = 0;
433                         swizzle_pipe[1] = 2;
434                         swizzle_pipe[2] = 4;
435                         swizzle_pipe[3] = 6;
436                         swizzle_pipe[4] = 3;
437                         swizzle_pipe[5] = 1;
438                         swizzle_pipe[6] = 7;
439                         swizzle_pipe[7] = 5;
440                 }
441                 break;
442         }
443
444         cur_backend = 0;
445         for (cur_pipe = 0; cur_pipe < num_tile_pipes; ++cur_pipe) {
446                 while (((1 << cur_backend) & enabled_backends_mask) == 0)
447                         cur_backend = (cur_backend + 1) % R7XX_MAX_BACKENDS;
448
449                 backend_map |= (u32)(((cur_backend & 3) << (swizzle_pipe[cur_pipe] * 2)));
450
451                 cur_backend = (cur_backend + 1) % R7XX_MAX_BACKENDS;
452         }
453
454         return backend_map;
455 }
456
457 static void rv770_gpu_init(struct radeon_device *rdev)
458 {
459         int i, j, num_qd_pipes;
460         u32 ta_aux_cntl;
461         u32 sx_debug_1;
462         u32 smx_dc_ctl0;
463         u32 db_debug3;
464         u32 num_gs_verts_per_thread;
465         u32 vgt_gs_per_es;
466         u32 gs_prim_buffer_depth = 0;
467         u32 sq_ms_fifo_sizes;
468         u32 sq_config;
469         u32 sq_thread_resource_mgmt;
470         u32 hdp_host_path_cntl;
471         u32 sq_dyn_gpr_size_simd_ab_0;
472         u32 backend_map;
473         u32 gb_tiling_config = 0;
474         u32 cc_rb_backend_disable = 0;
475         u32 cc_gc_shader_pipe_config = 0;
476         u32 mc_arb_ramcfg;
477         u32 db_debug4;
478
479         /* setup chip specs */
480         switch (rdev->family) {
481         case CHIP_RV770:
482                 rdev->config.rv770.max_pipes = 4;
483                 rdev->config.rv770.max_tile_pipes = 8;
484                 rdev->config.rv770.max_simds = 10;
485                 rdev->config.rv770.max_backends = 4;
486                 rdev->config.rv770.max_gprs = 256;
487                 rdev->config.rv770.max_threads = 248;
488                 rdev->config.rv770.max_stack_entries = 512;
489                 rdev->config.rv770.max_hw_contexts = 8;
490                 rdev->config.rv770.max_gs_threads = 16 * 2;
491                 rdev->config.rv770.sx_max_export_size = 128;
492                 rdev->config.rv770.sx_max_export_pos_size = 16;
493                 rdev->config.rv770.sx_max_export_smx_size = 112;
494                 rdev->config.rv770.sq_num_cf_insts = 2;
495
496                 rdev->config.rv770.sx_num_of_sets = 7;
497                 rdev->config.rv770.sc_prim_fifo_size = 0xF9;
498                 rdev->config.rv770.sc_hiz_tile_fifo_size = 0x30;
499                 rdev->config.rv770.sc_earlyz_tile_fifo_fize = 0x130;
500                 break;
501         case CHIP_RV730:
502                 rdev->config.rv770.max_pipes = 2;
503                 rdev->config.rv770.max_tile_pipes = 4;
504                 rdev->config.rv770.max_simds = 8;
505                 rdev->config.rv770.max_backends = 2;
506                 rdev->config.rv770.max_gprs = 128;
507                 rdev->config.rv770.max_threads = 248;
508                 rdev->config.rv770.max_stack_entries = 256;
509                 rdev->config.rv770.max_hw_contexts = 8;
510                 rdev->config.rv770.max_gs_threads = 16 * 2;
511                 rdev->config.rv770.sx_max_export_size = 256;
512                 rdev->config.rv770.sx_max_export_pos_size = 32;
513                 rdev->config.rv770.sx_max_export_smx_size = 224;
514                 rdev->config.rv770.sq_num_cf_insts = 2;
515
516                 rdev->config.rv770.sx_num_of_sets = 7;
517                 rdev->config.rv770.sc_prim_fifo_size = 0xf9;
518                 rdev->config.rv770.sc_hiz_tile_fifo_size = 0x30;
519                 rdev->config.rv770.sc_earlyz_tile_fifo_fize = 0x130;
520                 if (rdev->config.rv770.sx_max_export_pos_size > 16) {
521                         rdev->config.rv770.sx_max_export_pos_size -= 16;
522                         rdev->config.rv770.sx_max_export_smx_size += 16;
523                 }
524                 break;
525         case CHIP_RV710:
526                 rdev->config.rv770.max_pipes = 2;
527                 rdev->config.rv770.max_tile_pipes = 2;
528                 rdev->config.rv770.max_simds = 2;
529                 rdev->config.rv770.max_backends = 1;
530                 rdev->config.rv770.max_gprs = 256;
531                 rdev->config.rv770.max_threads = 192;
532                 rdev->config.rv770.max_stack_entries = 256;
533                 rdev->config.rv770.max_hw_contexts = 4;
534                 rdev->config.rv770.max_gs_threads = 8 * 2;
535                 rdev->config.rv770.sx_max_export_size = 128;
536                 rdev->config.rv770.sx_max_export_pos_size = 16;
537                 rdev->config.rv770.sx_max_export_smx_size = 112;
538                 rdev->config.rv770.sq_num_cf_insts = 1;
539
540                 rdev->config.rv770.sx_num_of_sets = 7;
541                 rdev->config.rv770.sc_prim_fifo_size = 0x40;
542                 rdev->config.rv770.sc_hiz_tile_fifo_size = 0x30;
543                 rdev->config.rv770.sc_earlyz_tile_fifo_fize = 0x130;
544                 break;
545         case CHIP_RV740:
546                 rdev->config.rv770.max_pipes = 4;
547                 rdev->config.rv770.max_tile_pipes = 4;
548                 rdev->config.rv770.max_simds = 8;
549                 rdev->config.rv770.max_backends = 4;
550                 rdev->config.rv770.max_gprs = 256;
551                 rdev->config.rv770.max_threads = 248;
552                 rdev->config.rv770.max_stack_entries = 512;
553                 rdev->config.rv770.max_hw_contexts = 8;
554                 rdev->config.rv770.max_gs_threads = 16 * 2;
555                 rdev->config.rv770.sx_max_export_size = 256;
556                 rdev->config.rv770.sx_max_export_pos_size = 32;
557                 rdev->config.rv770.sx_max_export_smx_size = 224;
558                 rdev->config.rv770.sq_num_cf_insts = 2;
559
560                 rdev->config.rv770.sx_num_of_sets = 7;
561                 rdev->config.rv770.sc_prim_fifo_size = 0x100;
562                 rdev->config.rv770.sc_hiz_tile_fifo_size = 0x30;
563                 rdev->config.rv770.sc_earlyz_tile_fifo_fize = 0x130;
564
565                 if (rdev->config.rv770.sx_max_export_pos_size > 16) {
566                         rdev->config.rv770.sx_max_export_pos_size -= 16;
567                         rdev->config.rv770.sx_max_export_smx_size += 16;
568                 }
569                 break;
570         default:
571                 break;
572         }
573
574         /* Initialize HDP */
575         j = 0;
576         for (i = 0; i < 32; i++) {
577                 WREG32((0x2c14 + j), 0x00000000);
578                 WREG32((0x2c18 + j), 0x00000000);
579                 WREG32((0x2c1c + j), 0x00000000);
580                 WREG32((0x2c20 + j), 0x00000000);
581                 WREG32((0x2c24 + j), 0x00000000);
582                 j += 0x18;
583         }
584
585         WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
586
587         /* setup tiling, simd, pipe config */
588         mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG);
589
590         switch (rdev->config.rv770.max_tile_pipes) {
591         case 1:
592         default:
593                 gb_tiling_config |= PIPE_TILING(0);
594                 break;
595         case 2:
596                 gb_tiling_config |= PIPE_TILING(1);
597                 break;
598         case 4:
599                 gb_tiling_config |= PIPE_TILING(2);
600                 break;
601         case 8:
602                 gb_tiling_config |= PIPE_TILING(3);
603                 break;
604         }
605         rdev->config.rv770.tiling_npipes = rdev->config.rv770.max_tile_pipes;
606
607         if (rdev->family == CHIP_RV770)
608                 gb_tiling_config |= BANK_TILING(1);
609         else
610                 gb_tiling_config |= BANK_TILING((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
611         rdev->config.rv770.tiling_nbanks = 4 << ((gb_tiling_config >> 4) & 0x3);
612
613         gb_tiling_config |= GROUP_SIZE(0);
614         rdev->config.rv770.tiling_group_size = 256;
615
616         if (((mc_arb_ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT) > 3) {
617                 gb_tiling_config |= ROW_TILING(3);
618                 gb_tiling_config |= SAMPLE_SPLIT(3);
619         } else {
620                 gb_tiling_config |=
621                         ROW_TILING(((mc_arb_ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT));
622                 gb_tiling_config |=
623                         SAMPLE_SPLIT(((mc_arb_ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT));
624         }
625
626         gb_tiling_config |= BANK_SWAPS(1);
627
628         cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000;
629         cc_rb_backend_disable |=
630                 BACKEND_DISABLE((R7XX_MAX_BACKENDS_MASK << rdev->config.rv770.max_backends) & R7XX_MAX_BACKENDS_MASK);
631
632         cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0xffffff00;
633         cc_gc_shader_pipe_config |=
634                 INACTIVE_QD_PIPES((R7XX_MAX_PIPES_MASK << rdev->config.rv770.max_pipes) & R7XX_MAX_PIPES_MASK);
635         cc_gc_shader_pipe_config |=
636                 INACTIVE_SIMDS((R7XX_MAX_SIMDS_MASK << rdev->config.rv770.max_simds) & R7XX_MAX_SIMDS_MASK);
637
638         if (rdev->family == CHIP_RV740)
639                 backend_map = 0x28;
640         else
641                 backend_map = r700_get_tile_pipe_to_backend_map(rdev,
642                                                                 rdev->config.rv770.max_tile_pipes,
643                                                                 (R7XX_MAX_BACKENDS -
644                                                                  r600_count_pipe_bits((cc_rb_backend_disable &
645                                                                                        R7XX_MAX_BACKENDS_MASK) >> 16)),
646                                                                 (cc_rb_backend_disable >> 16));
647         gb_tiling_config |= BACKEND_MAP(backend_map);
648
649
650         WREG32(GB_TILING_CONFIG, gb_tiling_config);
651         WREG32(DCP_TILING_CONFIG, (gb_tiling_config & 0xffff));
652         WREG32(HDP_TILING_CONFIG, (gb_tiling_config & 0xffff));
653
654         WREG32(CC_RB_BACKEND_DISABLE,      cc_rb_backend_disable);
655         WREG32(CC_GC_SHADER_PIPE_CONFIG,   cc_gc_shader_pipe_config);
656         WREG32(GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
657         WREG32(CC_SYS_RB_BACKEND_DISABLE,  cc_rb_backend_disable);
658
659         WREG32(CGTS_SYS_TCC_DISABLE, 0);
660         WREG32(CGTS_TCC_DISABLE, 0);
661         WREG32(CGTS_USER_SYS_TCC_DISABLE, 0);
662         WREG32(CGTS_USER_TCC_DISABLE, 0);
663
664         num_qd_pipes =
665                 R7XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8);
666         WREG32(VGT_OUT_DEALLOC_CNTL, (num_qd_pipes * 4) & DEALLOC_DIST_MASK);
667         WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, ((num_qd_pipes * 4) - 2) & VTX_REUSE_DEPTH_MASK);
668
669         /* set HW defaults for 3D engine */
670         WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) |
671                                      ROQ_IB2_START(0x2b)));
672
673         WREG32(CP_MEQ_THRESHOLDS, STQ_SPLIT(0x30));
674
675         ta_aux_cntl = RREG32(TA_CNTL_AUX);
676         WREG32(TA_CNTL_AUX, ta_aux_cntl | DISABLE_CUBE_ANISO);
677
678         sx_debug_1 = RREG32(SX_DEBUG_1);
679         sx_debug_1 |= ENABLE_NEW_SMX_ADDRESS;
680         WREG32(SX_DEBUG_1, sx_debug_1);
681
682         smx_dc_ctl0 = RREG32(SMX_DC_CTL0);
683         smx_dc_ctl0 &= ~CACHE_DEPTH(0x1ff);
684         smx_dc_ctl0 |= CACHE_DEPTH((rdev->config.rv770.sx_num_of_sets * 64) - 1);
685         WREG32(SMX_DC_CTL0, smx_dc_ctl0);
686
687         if (rdev->family != CHIP_RV740)
688                 WREG32(SMX_EVENT_CTL, (ES_FLUSH_CTL(4) |
689                                        GS_FLUSH_CTL(4) |
690                                        ACK_FLUSH_CTL(3) |
691                                        SYNC_FLUSH_CTL));
692
693         db_debug3 = RREG32(DB_DEBUG3);
694         db_debug3 &= ~DB_CLK_OFF_DELAY(0x1f);
695         switch (rdev->family) {
696         case CHIP_RV770:
697         case CHIP_RV740:
698                 db_debug3 |= DB_CLK_OFF_DELAY(0x1f);
699                 break;
700         case CHIP_RV710:
701         case CHIP_RV730:
702         default:
703                 db_debug3 |= DB_CLK_OFF_DELAY(2);
704                 break;
705         }
706         WREG32(DB_DEBUG3, db_debug3);
707
708         if (rdev->family != CHIP_RV770) {
709                 db_debug4 = RREG32(DB_DEBUG4);
710                 db_debug4 |= DISABLE_TILE_COVERED_FOR_PS_ITER;
711                 WREG32(DB_DEBUG4, db_debug4);
712         }
713
714         WREG32(SX_EXPORT_BUFFER_SIZES, (COLOR_BUFFER_SIZE((rdev->config.rv770.sx_max_export_size / 4) - 1) |
715                                         POSITION_BUFFER_SIZE((rdev->config.rv770.sx_max_export_pos_size / 4) - 1) |
716                                         SMX_BUFFER_SIZE((rdev->config.rv770.sx_max_export_smx_size / 4) - 1)));
717
718         WREG32(PA_SC_FIFO_SIZE, (SC_PRIM_FIFO_SIZE(rdev->config.rv770.sc_prim_fifo_size) |
719                                  SC_HIZ_TILE_FIFO_SIZE(rdev->config.rv770.sc_hiz_tile_fifo_size) |
720                                  SC_EARLYZ_TILE_FIFO_SIZE(rdev->config.rv770.sc_earlyz_tile_fifo_fize)));
721
722         WREG32(PA_SC_MULTI_CHIP_CNTL, 0);
723
724         WREG32(VGT_NUM_INSTANCES, 1);
725
726         WREG32(SPI_CONFIG_CNTL, GPR_WRITE_PRIORITY(0));
727
728         WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(4));
729
730         WREG32(CP_PERFMON_CNTL, 0);
731
732         sq_ms_fifo_sizes = (CACHE_FIFO_SIZE(16 * rdev->config.rv770.sq_num_cf_insts) |
733                             DONE_FIFO_HIWATER(0xe0) |
734                             ALU_UPDATE_FIFO_HIWATER(0x8));
735         switch (rdev->family) {
736         case CHIP_RV770:
737         case CHIP_RV730:
738         case CHIP_RV710:
739                 sq_ms_fifo_sizes |= FETCH_FIFO_HIWATER(0x1);
740                 break;
741         case CHIP_RV740:
742         default:
743                 sq_ms_fifo_sizes |= FETCH_FIFO_HIWATER(0x4);
744                 break;
745         }
746         WREG32(SQ_MS_FIFO_SIZES, sq_ms_fifo_sizes);
747
748         /* SQ_CONFIG, SQ_GPR_RESOURCE_MGMT, SQ_THREAD_RESOURCE_MGMT, SQ_STACK_RESOURCE_MGMT
749          * should be adjusted as needed by the 2D/3D drivers.  This just sets default values
750          */
751         sq_config = RREG32(SQ_CONFIG);
752         sq_config &= ~(PS_PRIO(3) |
753                        VS_PRIO(3) |
754                        GS_PRIO(3) |
755                        ES_PRIO(3));
756         sq_config |= (DX9_CONSTS |
757                       VC_ENABLE |
758                       EXPORT_SRC_C |
759                       PS_PRIO(0) |
760                       VS_PRIO(1) |
761                       GS_PRIO(2) |
762                       ES_PRIO(3));
763         if (rdev->family == CHIP_RV710)
764                 /* no vertex cache */
765                 sq_config &= ~VC_ENABLE;
766
767         WREG32(SQ_CONFIG, sq_config);
768
769         WREG32(SQ_GPR_RESOURCE_MGMT_1,  (NUM_PS_GPRS((rdev->config.rv770.max_gprs * 24)/64) |
770                                          NUM_VS_GPRS((rdev->config.rv770.max_gprs * 24)/64) |
771                                          NUM_CLAUSE_TEMP_GPRS(((rdev->config.rv770.max_gprs * 24)/64)/2)));
772
773         WREG32(SQ_GPR_RESOURCE_MGMT_2,  (NUM_GS_GPRS((rdev->config.rv770.max_gprs * 7)/64) |
774                                          NUM_ES_GPRS((rdev->config.rv770.max_gprs * 7)/64)));
775
776         sq_thread_resource_mgmt = (NUM_PS_THREADS((rdev->config.rv770.max_threads * 4)/8) |
777                                    NUM_VS_THREADS((rdev->config.rv770.max_threads * 2)/8) |
778                                    NUM_ES_THREADS((rdev->config.rv770.max_threads * 1)/8));
779         if (((rdev->config.rv770.max_threads * 1) / 8) > rdev->config.rv770.max_gs_threads)
780                 sq_thread_resource_mgmt |= NUM_GS_THREADS(rdev->config.rv770.max_gs_threads);
781         else
782                 sq_thread_resource_mgmt |= NUM_GS_THREADS((rdev->config.rv770.max_gs_threads * 1)/8);
783         WREG32(SQ_THREAD_RESOURCE_MGMT, sq_thread_resource_mgmt);
784
785         WREG32(SQ_STACK_RESOURCE_MGMT_1, (NUM_PS_STACK_ENTRIES((rdev->config.rv770.max_stack_entries * 1)/4) |
786                                                      NUM_VS_STACK_ENTRIES((rdev->config.rv770.max_stack_entries * 1)/4)));
787
788         WREG32(SQ_STACK_RESOURCE_MGMT_2, (NUM_GS_STACK_ENTRIES((rdev->config.rv770.max_stack_entries * 1)/4) |
789                                                      NUM_ES_STACK_ENTRIES((rdev->config.rv770.max_stack_entries * 1)/4)));
790
791         sq_dyn_gpr_size_simd_ab_0 = (SIMDA_RING0((rdev->config.rv770.max_gprs * 38)/64) |
792                                      SIMDA_RING1((rdev->config.rv770.max_gprs * 38)/64) |
793                                      SIMDB_RING0((rdev->config.rv770.max_gprs * 38)/64) |
794                                      SIMDB_RING1((rdev->config.rv770.max_gprs * 38)/64));
795
796         WREG32(SQ_DYN_GPR_SIZE_SIMD_AB_0, sq_dyn_gpr_size_simd_ab_0);
797         WREG32(SQ_DYN_GPR_SIZE_SIMD_AB_1, sq_dyn_gpr_size_simd_ab_0);
798         WREG32(SQ_DYN_GPR_SIZE_SIMD_AB_2, sq_dyn_gpr_size_simd_ab_0);
799         WREG32(SQ_DYN_GPR_SIZE_SIMD_AB_3, sq_dyn_gpr_size_simd_ab_0);
800         WREG32(SQ_DYN_GPR_SIZE_SIMD_AB_4, sq_dyn_gpr_size_simd_ab_0);
801         WREG32(SQ_DYN_GPR_SIZE_SIMD_AB_5, sq_dyn_gpr_size_simd_ab_0);
802         WREG32(SQ_DYN_GPR_SIZE_SIMD_AB_6, sq_dyn_gpr_size_simd_ab_0);
803         WREG32(SQ_DYN_GPR_SIZE_SIMD_AB_7, sq_dyn_gpr_size_simd_ab_0);
804
805         WREG32(PA_SC_FORCE_EOV_MAX_CNTS, (FORCE_EOV_MAX_CLK_CNT(4095) |
806                                           FORCE_EOV_MAX_REZ_CNT(255)));
807
808         if (rdev->family == CHIP_RV710)
809                 WREG32(VGT_CACHE_INVALIDATION, (CACHE_INVALIDATION(TC_ONLY) |
810                                                 AUTO_INVLD_EN(ES_AND_GS_AUTO)));
811         else
812                 WREG32(VGT_CACHE_INVALIDATION, (CACHE_INVALIDATION(VC_AND_TC) |
813                                                 AUTO_INVLD_EN(ES_AND_GS_AUTO)));
814
815         switch (rdev->family) {
816         case CHIP_RV770:
817         case CHIP_RV730:
818         case CHIP_RV740:
819                 gs_prim_buffer_depth = 384;
820                 break;
821         case CHIP_RV710:
822                 gs_prim_buffer_depth = 128;
823                 break;
824         default:
825                 break;
826         }
827
828         num_gs_verts_per_thread = rdev->config.rv770.max_pipes * 16;
829         vgt_gs_per_es = gs_prim_buffer_depth + num_gs_verts_per_thread;
830         /* Max value for this is 256 */
831         if (vgt_gs_per_es > 256)
832                 vgt_gs_per_es = 256;
833
834         WREG32(VGT_ES_PER_GS, 128);
835         WREG32(VGT_GS_PER_ES, vgt_gs_per_es);
836         WREG32(VGT_GS_PER_VS, 2);
837
838         /* more default values. 2D/3D driver should adjust as needed */
839         WREG32(VGT_GS_VERTEX_REUSE, 16);
840         WREG32(PA_SC_LINE_STIPPLE_STATE, 0);
841         WREG32(VGT_STRMOUT_EN, 0);
842         WREG32(SX_MISC, 0);
843         WREG32(PA_SC_MODE_CNTL, 0);
844         WREG32(PA_SC_EDGERULE, 0xaaaaaaaa);
845         WREG32(PA_SC_AA_CONFIG, 0);
846         WREG32(PA_SC_CLIPRECT_RULE, 0xffff);
847         WREG32(PA_SC_LINE_STIPPLE, 0);
848         WREG32(SPI_INPUT_Z, 0);
849         WREG32(SPI_PS_IN_CONTROL_0, NUM_INTERP(2));
850         WREG32(CB_COLOR7_FRAG, 0);
851
852         /* clear render buffer base addresses */
853         WREG32(CB_COLOR0_BASE, 0);
854         WREG32(CB_COLOR1_BASE, 0);
855         WREG32(CB_COLOR2_BASE, 0);
856         WREG32(CB_COLOR3_BASE, 0);
857         WREG32(CB_COLOR4_BASE, 0);
858         WREG32(CB_COLOR5_BASE, 0);
859         WREG32(CB_COLOR6_BASE, 0);
860         WREG32(CB_COLOR7_BASE, 0);
861
862         WREG32(TCP_CNTL, 0);
863
864         hdp_host_path_cntl = RREG32(HDP_HOST_PATH_CNTL);
865         WREG32(HDP_HOST_PATH_CNTL, hdp_host_path_cntl);
866
867         WREG32(PA_SC_MULTI_CHIP_CNTL, 0);
868
869         WREG32(PA_CL_ENHANCE, (CLIP_VTX_REORDER_ENA |
870                                           NUM_CLIP_SEQ(3)));
871
872 }
873
874 int rv770_mc_init(struct radeon_device *rdev)
875 {
876         u32 tmp;
877         int chansize, numchan;
878
879         /* Get VRAM informations */
880         rdev->mc.vram_is_ddr = true;
881         tmp = RREG32(MC_ARB_RAMCFG);
882         if (tmp & CHANSIZE_OVERRIDE) {
883                 chansize = 16;
884         } else if (tmp & CHANSIZE_MASK) {
885                 chansize = 64;
886         } else {
887                 chansize = 32;
888         }
889         tmp = RREG32(MC_SHARED_CHMAP);
890         switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
891         case 0:
892         default:
893                 numchan = 1;
894                 break;
895         case 1:
896                 numchan = 2;
897                 break;
898         case 2:
899                 numchan = 4;
900                 break;
901         case 3:
902                 numchan = 8;
903                 break;
904         }
905         rdev->mc.vram_width = numchan * chansize;
906         /* Could aper size report 0 ? */
907         rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
908         rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
909         /* Setup GPU memory space */
910         rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
911         rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
912         rdev->mc.visible_vram_size = rdev->mc.aper_size;
913         /* FIXME remove this once we support unmappable VRAM */
914         if (rdev->mc.mc_vram_size > rdev->mc.aper_size) {
915                 rdev->mc.mc_vram_size = rdev->mc.aper_size;
916                 rdev->mc.real_vram_size = rdev->mc.aper_size;
917         }
918         r600_vram_gtt_location(rdev, &rdev->mc);
919         radeon_update_bandwidth_info(rdev);
920
921         return 0;
922 }
923
924 static int rv770_startup(struct radeon_device *rdev)
925 {
926         int r;
927
928         if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
929                 r = r600_init_microcode(rdev);
930                 if (r) {
931                         DRM_ERROR("Failed to load firmware!\n");
932                         return r;
933                 }
934         }
935
936         rv770_mc_program(rdev);
937         if (rdev->flags & RADEON_IS_AGP) {
938                 rv770_agp_enable(rdev);
939         } else {
940                 r = rv770_pcie_gart_enable(rdev);
941                 if (r)
942                         return r;
943         }
944         rv770_gpu_init(rdev);
945         r = r600_blit_init(rdev);
946         if (r) {
947                 r600_blit_fini(rdev);
948                 rdev->asic->copy = NULL;
949                 dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
950         }
951         /* pin copy shader into vram */
952         if (rdev->r600_blit.shader_obj) {
953                 r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
954                 if (unlikely(r != 0))
955                         return r;
956                 r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM,
957                                 &rdev->r600_blit.shader_gpu_addr);
958                 radeon_bo_unreserve(rdev->r600_blit.shader_obj);
959                 if (r) {
960                         DRM_ERROR("failed to pin blit object %d\n", r);
961                         return r;
962                 }
963         }
964         /* Enable IRQ */
965         r = r600_irq_init(rdev);
966         if (r) {
967                 DRM_ERROR("radeon: IH init failed (%d).\n", r);
968                 radeon_irq_kms_fini(rdev);
969                 return r;
970         }
971         r600_irq_set(rdev);
972
973         r = radeon_ring_init(rdev, rdev->cp.ring_size);
974         if (r)
975                 return r;
976         r = rv770_cp_load_microcode(rdev);
977         if (r)
978                 return r;
979         r = r600_cp_resume(rdev);
980         if (r)
981                 return r;
982         /* write back buffer are not vital so don't worry about failure */
983         r600_wb_enable(rdev);
984         return 0;
985 }
986
987 int rv770_resume(struct radeon_device *rdev)
988 {
989         int r;
990
991         /* Do not reset GPU before posting, on rv770 hw unlike on r500 hw,
992          * posting will perform necessary task to bring back GPU into good
993          * shape.
994          */
995         /* post card */
996         atom_asic_init(rdev->mode_info.atom_context);
997         /* Initialize clocks */
998         r = radeon_clocks_init(rdev);
999         if (r) {
1000                 return r;
1001         }
1002
1003         r = rv770_startup(rdev);
1004         if (r) {
1005                 DRM_ERROR("r600 startup failed on resume\n");
1006                 return r;
1007         }
1008
1009         r = r600_ib_test(rdev);
1010         if (r) {
1011                 DRM_ERROR("radeon: failled testing IB (%d).\n", r);
1012                 return r;
1013         }
1014
1015         r = r600_audio_init(rdev);
1016         if (r) {
1017                 dev_err(rdev->dev, "radeon: audio init failed\n");
1018                 return r;
1019         }
1020
1021         return r;
1022
1023 }
1024
1025 int rv770_suspend(struct radeon_device *rdev)
1026 {
1027         int r;
1028
1029         r600_audio_fini(rdev);
1030         /* FIXME: we should wait for ring to be empty */
1031         r700_cp_stop(rdev);
1032         rdev->cp.ready = false;
1033         r600_irq_suspend(rdev);
1034         r600_wb_disable(rdev);
1035         rv770_pcie_gart_disable(rdev);
1036         /* unpin shaders bo */
1037         if (rdev->r600_blit.shader_obj) {
1038                 r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
1039                 if (likely(r == 0)) {
1040                         radeon_bo_unpin(rdev->r600_blit.shader_obj);
1041                         radeon_bo_unreserve(rdev->r600_blit.shader_obj);
1042                 }
1043         }
1044         return 0;
1045 }
1046
1047 /* Plan is to move initialization in that function and use
1048  * helper function so that radeon_device_init pretty much
1049  * do nothing more than calling asic specific function. This
1050  * should also allow to remove a bunch of callback function
1051  * like vram_info.
1052  */
1053 int rv770_init(struct radeon_device *rdev)
1054 {
1055         int r;
1056
1057         r = radeon_dummy_page_init(rdev);
1058         if (r)
1059                 return r;
1060         /* This don't do much */
1061         r = radeon_gem_init(rdev);
1062         if (r)
1063                 return r;
1064         /* Read BIOS */
1065         if (!radeon_get_bios(rdev)) {
1066                 if (ASIC_IS_AVIVO(rdev))
1067                         return -EINVAL;
1068         }
1069         /* Must be an ATOMBIOS */
1070         if (!rdev->is_atom_bios) {
1071                 dev_err(rdev->dev, "Expecting atombios for R600 GPU\n");
1072                 return -EINVAL;
1073         }
1074         r = radeon_atombios_init(rdev);
1075         if (r)
1076                 return r;
1077         /* Post card if necessary */
1078         if (!r600_card_posted(rdev)) {
1079                 if (!rdev->bios) {
1080                         dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
1081                         return -EINVAL;
1082                 }
1083                 DRM_INFO("GPU not posted. posting now...\n");
1084                 atom_asic_init(rdev->mode_info.atom_context);
1085         }
1086         /* Initialize scratch registers */
1087         r600_scratch_init(rdev);
1088         /* Initialize surface registers */
1089         radeon_surface_init(rdev);
1090         /* Initialize clocks */
1091         radeon_get_clock_info(rdev->ddev);
1092         r = radeon_clocks_init(rdev);
1093         if (r)
1094                 return r;
1095         /* Initialize power management */
1096         radeon_pm_init(rdev);
1097         /* Fence driver */
1098         r = radeon_fence_driver_init(rdev);
1099         if (r)
1100                 return r;
1101         /* initialize AGP */
1102         if (rdev->flags & RADEON_IS_AGP) {
1103                 r = radeon_agp_init(rdev);
1104                 if (r)
1105                         radeon_agp_disable(rdev);
1106         }
1107         r = rv770_mc_init(rdev);
1108         if (r)
1109                 return r;
1110         /* Memory manager */
1111         r = radeon_bo_init(rdev);
1112         if (r)
1113                 return r;
1114
1115         r = radeon_irq_kms_init(rdev);
1116         if (r)
1117                 return r;
1118
1119         rdev->cp.ring_obj = NULL;
1120         r600_ring_init(rdev, 1024 * 1024);
1121
1122         rdev->ih.ring_obj = NULL;
1123         r600_ih_ring_init(rdev, 64 * 1024);
1124
1125         r = r600_pcie_gart_init(rdev);
1126         if (r)
1127                 return r;
1128
1129         rdev->accel_working = true;
1130         r = rv770_startup(rdev);
1131         if (r) {
1132                 dev_err(rdev->dev, "disabling GPU acceleration\n");
1133                 r700_cp_fini(rdev);
1134                 r600_wb_fini(rdev);
1135                 r600_irq_fini(rdev);
1136                 radeon_irq_kms_fini(rdev);
1137                 rv770_pcie_gart_fini(rdev);
1138                 rdev->accel_working = false;
1139         }
1140         if (rdev->accel_working) {
1141                 r = radeon_ib_pool_init(rdev);
1142                 if (r) {
1143                         dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
1144                         rdev->accel_working = false;
1145                 } else {
1146                         r = r600_ib_test(rdev);
1147                         if (r) {
1148                                 dev_err(rdev->dev, "IB test failed (%d).\n", r);
1149                                 rdev->accel_working = false;
1150                         }
1151                 }
1152         }
1153
1154         r = r600_audio_init(rdev);
1155         if (r) {
1156                 dev_err(rdev->dev, "radeon: audio init failed\n");
1157                 return r;
1158         }
1159
1160         return 0;
1161 }
1162
1163 void rv770_fini(struct radeon_device *rdev)
1164 {
1165         radeon_pm_fini(rdev);
1166         r600_blit_fini(rdev);
1167         r700_cp_fini(rdev);
1168         r600_wb_fini(rdev);
1169         r600_irq_fini(rdev);
1170         radeon_irq_kms_fini(rdev);
1171         rv770_pcie_gart_fini(rdev);
1172         radeon_gem_fini(rdev);
1173         radeon_fence_driver_fini(rdev);
1174         radeon_clocks_fini(rdev);
1175         radeon_agp_fini(rdev);
1176         radeon_bo_fini(rdev);
1177         radeon_atombios_fini(rdev);
1178         kfree(rdev->bios);
1179         rdev->bios = NULL;
1180         radeon_dummy_page_fini(rdev);
1181 }