drm/radeon/kms: setup MC/VRAM the same way for suspend/resume
[safe/jmp/linux-2.6] / drivers / gpu / drm / radeon / rv515.c
1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 #include <linux/seq_file.h>
29 #include "drmP.h"
30 #include "rv515r.h"
31 #include "radeon.h"
32 #include "radeon_share.h"
33
34 /* rv515 depends on : */
35 void r100_hdp_reset(struct radeon_device *rdev);
36 int r100_cp_reset(struct radeon_device *rdev);
37 int r100_rb2d_reset(struct radeon_device *rdev);
38 int r100_gui_wait_for_idle(struct radeon_device *rdev);
39 int r100_cp_init(struct radeon_device *rdev, unsigned ring_size);
40 int rv370_pcie_gart_enable(struct radeon_device *rdev);
41 void rv370_pcie_gart_disable(struct radeon_device *rdev);
42 void r420_pipes_init(struct radeon_device *rdev);
43 void rs600_mc_disable_clients(struct radeon_device *rdev);
44 void rs600_disable_vga(struct radeon_device *rdev);
45
46 /* This files gather functions specifics to:
47  * rv515
48  *
49  * Some of these functions might be used by newer ASICs.
50  */
51 int rv515_debugfs_pipes_info_init(struct radeon_device *rdev);
52 int rv515_debugfs_ga_info_init(struct radeon_device *rdev);
53 void rv515_gpu_init(struct radeon_device *rdev);
54 int rv515_mc_wait_for_idle(struct radeon_device *rdev);
55
56
57 /*
58  * MC
59  */
60 int rv515_mc_init(struct radeon_device *rdev)
61 {
62         uint32_t tmp;
63         int r;
64
65         if (r100_debugfs_rbbm_init(rdev)) {
66                 DRM_ERROR("Failed to register debugfs file for RBBM !\n");
67         }
68         if (rv515_debugfs_pipes_info_init(rdev)) {
69                 DRM_ERROR("Failed to register debugfs file for pipes !\n");
70         }
71         if (rv515_debugfs_ga_info_init(rdev)) {
72                 DRM_ERROR("Failed to register debugfs file for pipes !\n");
73         }
74
75         rv515_gpu_init(rdev);
76         rv370_pcie_gart_disable(rdev);
77
78         /* Setup GPU memory space */
79         rdev->mc.vram_location = 0xFFFFFFFFUL;
80         rdev->mc.gtt_location = 0xFFFFFFFFUL;
81         if (rdev->flags & RADEON_IS_AGP) {
82                 r = radeon_agp_init(rdev);
83                 if (r) {
84                         printk(KERN_WARNING "[drm] Disabling AGP\n");
85                         rdev->flags &= ~RADEON_IS_AGP;
86                         rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
87                 } else {
88                         rdev->mc.gtt_location = rdev->mc.agp_base;
89                 }
90         }
91         r = radeon_mc_setup(rdev);
92         if (r) {
93                 return r;
94         }
95
96         /* Program GPU memory space */
97         rs600_mc_disable_clients(rdev);
98         if (rv515_mc_wait_for_idle(rdev)) {
99                 printk(KERN_WARNING "Failed to wait MC idle while "
100                        "programming pipes. Bad things might happen.\n");
101         }
102         /* Write VRAM size in case we are limiting it */
103         WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size);
104         tmp = REG_SET(MC_FB_START, rdev->mc.vram_location >> 16);
105         WREG32(0x134, tmp);
106         tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1;
107         tmp = REG_SET(MC_FB_TOP, tmp >> 16);
108         tmp |= REG_SET(MC_FB_START, rdev->mc.vram_location >> 16);
109         WREG32_MC(MC_FB_LOCATION, tmp);
110         WREG32(HDP_FB_LOCATION, rdev->mc.vram_location >> 16);
111         WREG32(0x310, rdev->mc.vram_location);
112         if (rdev->flags & RADEON_IS_AGP) {
113                 tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1;
114                 tmp = REG_SET(MC_AGP_TOP, tmp >> 16);
115                 tmp |= REG_SET(MC_AGP_START, rdev->mc.gtt_location >> 16);
116                 WREG32_MC(MC_AGP_LOCATION, tmp);
117                 WREG32_MC(MC_AGP_BASE, rdev->mc.agp_base);
118                 WREG32_MC(MC_AGP_BASE_2, 0);
119         } else {
120                 WREG32_MC(MC_AGP_LOCATION, 0x0FFFFFFF);
121                 WREG32_MC(MC_AGP_BASE, 0);
122                 WREG32_MC(MC_AGP_BASE_2, 0);
123         }
124         return 0;
125 }
126
127 void rv515_mc_fini(struct radeon_device *rdev)
128 {
129         rv370_pcie_gart_disable(rdev);
130         radeon_gart_table_vram_free(rdev);
131         radeon_gart_fini(rdev);
132 }
133
134
135 /*
136  * Global GPU functions
137  */
138 void rv515_ring_start(struct radeon_device *rdev)
139 {
140         int r;
141
142         r = radeon_ring_lock(rdev, 64);
143         if (r) {
144                 return;
145         }
146         radeon_ring_write(rdev, PACKET0(ISYNC_CNTL, 0));
147         radeon_ring_write(rdev,
148                           ISYNC_ANY2D_IDLE3D |
149                           ISYNC_ANY3D_IDLE2D |
150                           ISYNC_WAIT_IDLEGUI |
151                           ISYNC_CPSCRATCH_IDLEGUI);
152         radeon_ring_write(rdev, PACKET0(WAIT_UNTIL, 0));
153         radeon_ring_write(rdev, WAIT_2D_IDLECLEAN | WAIT_3D_IDLECLEAN);
154         radeon_ring_write(rdev, PACKET0(0x170C, 0));
155         radeon_ring_write(rdev, 1 << 31);
156         radeon_ring_write(rdev, PACKET0(GB_SELECT, 0));
157         radeon_ring_write(rdev, 0);
158         radeon_ring_write(rdev, PACKET0(GB_ENABLE, 0));
159         radeon_ring_write(rdev, 0);
160         radeon_ring_write(rdev, PACKET0(0x42C8, 0));
161         radeon_ring_write(rdev, (1 << rdev->num_gb_pipes) - 1);
162         radeon_ring_write(rdev, PACKET0(VAP_INDEX_OFFSET, 0));
163         radeon_ring_write(rdev, 0);
164         radeon_ring_write(rdev, PACKET0(RB3D_DSTCACHE_CTLSTAT, 0));
165         radeon_ring_write(rdev, RB3D_DC_FLUSH | RB3D_DC_FREE);
166         radeon_ring_write(rdev, PACKET0(ZB_ZCACHE_CTLSTAT, 0));
167         radeon_ring_write(rdev, ZC_FLUSH | ZC_FREE);
168         radeon_ring_write(rdev, PACKET0(WAIT_UNTIL, 0));
169         radeon_ring_write(rdev, WAIT_2D_IDLECLEAN | WAIT_3D_IDLECLEAN);
170         radeon_ring_write(rdev, PACKET0(GB_AA_CONFIG, 0));
171         radeon_ring_write(rdev, 0);
172         radeon_ring_write(rdev, PACKET0(RB3D_DSTCACHE_CTLSTAT, 0));
173         radeon_ring_write(rdev, RB3D_DC_FLUSH | RB3D_DC_FREE);
174         radeon_ring_write(rdev, PACKET0(ZB_ZCACHE_CTLSTAT, 0));
175         radeon_ring_write(rdev, ZC_FLUSH | ZC_FREE);
176         radeon_ring_write(rdev, PACKET0(GB_MSPOS0, 0));
177         radeon_ring_write(rdev,
178                           ((6 << MS_X0_SHIFT) |
179                            (6 << MS_Y0_SHIFT) |
180                            (6 << MS_X1_SHIFT) |
181                            (6 << MS_Y1_SHIFT) |
182                            (6 << MS_X2_SHIFT) |
183                            (6 << MS_Y2_SHIFT) |
184                            (6 << MSBD0_Y_SHIFT) |
185                            (6 << MSBD0_X_SHIFT)));
186         radeon_ring_write(rdev, PACKET0(GB_MSPOS1, 0));
187         radeon_ring_write(rdev,
188                           ((6 << MS_X3_SHIFT) |
189                            (6 << MS_Y3_SHIFT) |
190                            (6 << MS_X4_SHIFT) |
191                            (6 << MS_Y4_SHIFT) |
192                            (6 << MS_X5_SHIFT) |
193                            (6 << MS_Y5_SHIFT) |
194                            (6 << MSBD1_SHIFT)));
195         radeon_ring_write(rdev, PACKET0(GA_ENHANCE, 0));
196         radeon_ring_write(rdev, GA_DEADLOCK_CNTL | GA_FASTSYNC_CNTL);
197         radeon_ring_write(rdev, PACKET0(GA_POLY_MODE, 0));
198         radeon_ring_write(rdev, FRONT_PTYPE_TRIANGE | BACK_PTYPE_TRIANGE);
199         radeon_ring_write(rdev, PACKET0(GA_ROUND_MODE, 0));
200         radeon_ring_write(rdev, GEOMETRY_ROUND_NEAREST | COLOR_ROUND_NEAREST);
201         radeon_ring_write(rdev, PACKET0(0x20C8, 0));
202         radeon_ring_write(rdev, 0);
203         radeon_ring_unlock_commit(rdev);
204 }
205
206 void rv515_errata(struct radeon_device *rdev)
207 {
208         rdev->pll_errata = 0;
209 }
210
211 int rv515_mc_wait_for_idle(struct radeon_device *rdev)
212 {
213         unsigned i;
214         uint32_t tmp;
215
216         for (i = 0; i < rdev->usec_timeout; i++) {
217                 /* read MC_STATUS */
218                 tmp = RREG32_MC(MC_STATUS);
219                 if (tmp & MC_STATUS_IDLE) {
220                         return 0;
221                 }
222                 DRM_UDELAY(1);
223         }
224         return -1;
225 }
226
227 void rv515_gpu_init(struct radeon_device *rdev)
228 {
229         unsigned pipe_select_current, gb_pipe_select, tmp;
230
231         r100_hdp_reset(rdev);
232         r100_rb2d_reset(rdev);
233
234         if (r100_gui_wait_for_idle(rdev)) {
235                 printk(KERN_WARNING "Failed to wait GUI idle while "
236                        "reseting GPU. Bad things might happen.\n");
237         }
238
239         rs600_disable_vga(rdev);
240
241         r420_pipes_init(rdev);
242         gb_pipe_select = RREG32(0x402C);
243         tmp = RREG32(0x170C);
244         pipe_select_current = (tmp >> 2) & 3;
245         tmp = (1 << pipe_select_current) |
246               (((gb_pipe_select >> 8) & 0xF) << 4);
247         WREG32_PLL(0x000D, tmp);
248         if (r100_gui_wait_for_idle(rdev)) {
249                 printk(KERN_WARNING "Failed to wait GUI idle while "
250                        "reseting GPU. Bad things might happen.\n");
251         }
252         if (rv515_mc_wait_for_idle(rdev)) {
253                 printk(KERN_WARNING "Failed to wait MC idle while "
254                        "programming pipes. Bad things might happen.\n");
255         }
256 }
257
258 int rv515_ga_reset(struct radeon_device *rdev)
259 {
260         uint32_t tmp;
261         bool reinit_cp;
262         int i;
263
264         reinit_cp = rdev->cp.ready;
265         rdev->cp.ready = false;
266         for (i = 0; i < rdev->usec_timeout; i++) {
267                 WREG32(CP_CSQ_MODE, 0);
268                 WREG32(CP_CSQ_CNTL, 0);
269                 WREG32(RBBM_SOFT_RESET, 0x32005);
270                 (void)RREG32(RBBM_SOFT_RESET);
271                 udelay(200);
272                 WREG32(RBBM_SOFT_RESET, 0);
273                 /* Wait to prevent race in RBBM_STATUS */
274                 mdelay(1);
275                 tmp = RREG32(RBBM_STATUS);
276                 if (tmp & ((1 << 20) | (1 << 26))) {
277                         DRM_ERROR("VAP & CP still busy (RBBM_STATUS=0x%08X)\n", tmp);
278                         /* GA still busy soft reset it */
279                         WREG32(0x429C, 0x200);
280                         WREG32(VAP_PVS_STATE_FLUSH_REG, 0);
281                         WREG32(0x43E0, 0);
282                         WREG32(0x43E4, 0);
283                         WREG32(0x24AC, 0);
284                 }
285                 /* Wait to prevent race in RBBM_STATUS */
286                 mdelay(1);
287                 tmp = RREG32(RBBM_STATUS);
288                 if (!(tmp & ((1 << 20) | (1 << 26)))) {
289                         break;
290                 }
291         }
292         for (i = 0; i < rdev->usec_timeout; i++) {
293                 tmp = RREG32(RBBM_STATUS);
294                 if (!(tmp & ((1 << 20) | (1 << 26)))) {
295                         DRM_INFO("GA reset succeed (RBBM_STATUS=0x%08X)\n",
296                                  tmp);
297                         DRM_INFO("GA_IDLE=0x%08X\n", RREG32(0x425C));
298                         DRM_INFO("RB3D_RESET_STATUS=0x%08X\n", RREG32(0x46f0));
299                         DRM_INFO("ISYNC_CNTL=0x%08X\n", RREG32(0x1724));
300                         if (reinit_cp) {
301                                 return r100_cp_init(rdev, rdev->cp.ring_size);
302                         }
303                         return 0;
304                 }
305                 DRM_UDELAY(1);
306         }
307         tmp = RREG32(RBBM_STATUS);
308         DRM_ERROR("Failed to reset GA ! (RBBM_STATUS=0x%08X)\n", tmp);
309         return -1;
310 }
311
312 int rv515_gpu_reset(struct radeon_device *rdev)
313 {
314         uint32_t status;
315
316         /* reset order likely matter */
317         status = RREG32(RBBM_STATUS);
318         /* reset HDP */
319         r100_hdp_reset(rdev);
320         /* reset rb2d */
321         if (status & ((1 << 17) | (1 << 18) | (1 << 27))) {
322                 r100_rb2d_reset(rdev);
323         }
324         /* reset GA */
325         if (status & ((1 << 20) | (1 << 26))) {
326                 rv515_ga_reset(rdev);
327         }
328         /* reset CP */
329         status = RREG32(RBBM_STATUS);
330         if (status & (1 << 16)) {
331                 r100_cp_reset(rdev);
332         }
333         /* Check if GPU is idle */
334         status = RREG32(RBBM_STATUS);
335         if (status & (1 << 31)) {
336                 DRM_ERROR("Failed to reset GPU (RBBM_STATUS=0x%08X)\n", status);
337                 return -1;
338         }
339         DRM_INFO("GPU reset succeed (RBBM_STATUS=0x%08X)\n", status);
340         return 0;
341 }
342
343
344 /*
345  * VRAM info
346  */
347 static void rv515_vram_get_type(struct radeon_device *rdev)
348 {
349         uint32_t tmp;
350
351         rdev->mc.vram_width = 128;
352         rdev->mc.vram_is_ddr = true;
353         tmp = RREG32_MC(RV515_MC_CNTL) & MEM_NUM_CHANNELS_MASK;
354         switch (tmp) {
355         case 0:
356                 rdev->mc.vram_width = 64;
357                 break;
358         case 1:
359                 rdev->mc.vram_width = 128;
360                 break;
361         default:
362                 rdev->mc.vram_width = 128;
363                 break;
364         }
365 }
366
367 void rv515_vram_info(struct radeon_device *rdev)
368 {
369         fixed20_12 a;
370
371         rv515_vram_get_type(rdev);
372
373         r100_vram_init_sizes(rdev);
374         /* FIXME: we should enforce default clock in case GPU is not in
375          * default setup
376          */
377         a.full = rfixed_const(100);
378         rdev->pm.sclk.full = rfixed_const(rdev->clock.default_sclk);
379         rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a);
380 }
381
382
383 /*
384  * Indirect registers accessor
385  */
386 uint32_t rv515_mc_rreg(struct radeon_device *rdev, uint32_t reg)
387 {
388         uint32_t r;
389
390         WREG32(MC_IND_INDEX, 0x7f0000 | (reg & 0xffff));
391         r = RREG32(MC_IND_DATA);
392         WREG32(MC_IND_INDEX, 0);
393         return r;
394 }
395
396 void rv515_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
397 {
398         WREG32(MC_IND_INDEX, 0xff0000 | ((reg) & 0xffff));
399         WREG32(MC_IND_DATA, (v));
400         WREG32(MC_IND_INDEX, 0);
401 }
402
403 uint32_t rv515_pcie_rreg(struct radeon_device *rdev, uint32_t reg)
404 {
405         uint32_t r;
406
407         WREG32(PCIE_INDEX, ((reg) & 0x7ff));
408         (void)RREG32(PCIE_INDEX);
409         r = RREG32(PCIE_DATA);
410         return r;
411 }
412
413 void rv515_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
414 {
415         WREG32(PCIE_INDEX, ((reg) & 0x7ff));
416         (void)RREG32(PCIE_INDEX);
417         WREG32(PCIE_DATA, (v));
418         (void)RREG32(PCIE_DATA);
419 }
420
421
422 /*
423  * Debugfs info
424  */
425 #if defined(CONFIG_DEBUG_FS)
426 static int rv515_debugfs_pipes_info(struct seq_file *m, void *data)
427 {
428         struct drm_info_node *node = (struct drm_info_node *) m->private;
429         struct drm_device *dev = node->minor->dev;
430         struct radeon_device *rdev = dev->dev_private;
431         uint32_t tmp;
432
433         tmp = RREG32(GB_PIPE_SELECT);
434         seq_printf(m, "GB_PIPE_SELECT 0x%08x\n", tmp);
435         tmp = RREG32(SU_REG_DEST);
436         seq_printf(m, "SU_REG_DEST 0x%08x\n", tmp);
437         tmp = RREG32(GB_TILE_CONFIG);
438         seq_printf(m, "GB_TILE_CONFIG 0x%08x\n", tmp);
439         tmp = RREG32(DST_PIPE_CONFIG);
440         seq_printf(m, "DST_PIPE_CONFIG 0x%08x\n", tmp);
441         return 0;
442 }
443
444 static int rv515_debugfs_ga_info(struct seq_file *m, void *data)
445 {
446         struct drm_info_node *node = (struct drm_info_node *) m->private;
447         struct drm_device *dev = node->minor->dev;
448         struct radeon_device *rdev = dev->dev_private;
449         uint32_t tmp;
450
451         tmp = RREG32(0x2140);
452         seq_printf(m, "VAP_CNTL_STATUS 0x%08x\n", tmp);
453         radeon_gpu_reset(rdev);
454         tmp = RREG32(0x425C);
455         seq_printf(m, "GA_IDLE 0x%08x\n", tmp);
456         return 0;
457 }
458
459 static struct drm_info_list rv515_pipes_info_list[] = {
460         {"rv515_pipes_info", rv515_debugfs_pipes_info, 0, NULL},
461 };
462
463 static struct drm_info_list rv515_ga_info_list[] = {
464         {"rv515_ga_info", rv515_debugfs_ga_info, 0, NULL},
465 };
466 #endif
467
468 int rv515_debugfs_pipes_info_init(struct radeon_device *rdev)
469 {
470 #if defined(CONFIG_DEBUG_FS)
471         return radeon_debugfs_add_files(rdev, rv515_pipes_info_list, 1);
472 #else
473         return 0;
474 #endif
475 }
476
477 int rv515_debugfs_ga_info_init(struct radeon_device *rdev)
478 {
479 #if defined(CONFIG_DEBUG_FS)
480         return radeon_debugfs_add_files(rdev, rv515_ga_info_list, 1);
481 #else
482         return 0;
483 #endif
484 }
485
486
487 /*
488  * Asic initialization
489  */
490 static const unsigned r500_reg_safe_bm[219] = {
491         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
492         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
493         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
494         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
495         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
496         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
497         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
498         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
499         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
500         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
501         0x17FF1FFF, 0xFFFFFFFC, 0xFFFFFFFF, 0xFF30FFBF,
502         0xFFFFFFF8, 0xC3E6FFFF, 0xFFFFF6DF, 0xFFFFFFFF,
503         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
504         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
505         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFF03F,
506         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
507         0xFFFFFFFF, 0xFFFFEFCE, 0xF00EBFFF, 0x007C0000,
508         0xF0000038, 0xFF000009, 0xFFFFFFFF, 0xFFFFFFFF,
509         0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF,
510         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
511         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
512         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
513         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
514         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
515         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
516         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
517         0xFFFFF7FF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
518         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
519         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
520         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
521         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
522         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
523         0x1FFFFC78, 0xFFFFE000, 0xFFFFFFFE, 0xFFFFFFFF,
524         0x38CF8F50, 0xFFF88082, 0xFF0000FC, 0xFAE009FF,
525         0x0000FFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000,
526         0xFFFF8CFC, 0xFFFFC1FF, 0xFFFFFFFF, 0xFFFFFFFF,
527         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
528         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFF80FFFF,
529         0x00000000, 0x00000000, 0x00000000, 0x00000000,
530         0x0003FC01, 0x3FFFFCF8, 0xFE800B19, 0xFFFFFFFF,
531         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
532         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
533         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
534         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
535         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
536         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
537         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
538         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
539         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
540         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
541         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
542         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
543         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
544         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
545         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
546 };
547
548 int rv515_init(struct radeon_device *rdev)
549 {
550         rdev->config.r300.reg_safe_bm = r500_reg_safe_bm;
551         rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(r500_reg_safe_bm);
552         return 0;
553 }
554
555 void atom_rv515_force_tv_scaler(struct radeon_device *rdev)
556 {
557
558         WREG32(0x659C, 0x0);
559         WREG32(0x6594, 0x705);
560         WREG32(0x65A4, 0x10001);
561         WREG32(0x65D8, 0x0);
562         WREG32(0x65B0, 0x0);
563         WREG32(0x65C0, 0x0);
564         WREG32(0x65D4, 0x0);
565         WREG32(0x6578, 0x0);
566         WREG32(0x657C, 0x841880A8);
567         WREG32(0x6578, 0x1);
568         WREG32(0x657C, 0x84208680);
569         WREG32(0x6578, 0x2);
570         WREG32(0x657C, 0xBFF880B0);
571         WREG32(0x6578, 0x100);
572         WREG32(0x657C, 0x83D88088);
573         WREG32(0x6578, 0x101);
574         WREG32(0x657C, 0x84608680);
575         WREG32(0x6578, 0x102);
576         WREG32(0x657C, 0xBFF080D0);
577         WREG32(0x6578, 0x200);
578         WREG32(0x657C, 0x83988068);
579         WREG32(0x6578, 0x201);
580         WREG32(0x657C, 0x84A08680);
581         WREG32(0x6578, 0x202);
582         WREG32(0x657C, 0xBFF080F8);
583         WREG32(0x6578, 0x300);
584         WREG32(0x657C, 0x83588058);
585         WREG32(0x6578, 0x301);
586         WREG32(0x657C, 0x84E08660);
587         WREG32(0x6578, 0x302);
588         WREG32(0x657C, 0xBFF88120);
589         WREG32(0x6578, 0x400);
590         WREG32(0x657C, 0x83188040);
591         WREG32(0x6578, 0x401);
592         WREG32(0x657C, 0x85008660);
593         WREG32(0x6578, 0x402);
594         WREG32(0x657C, 0xBFF88150);
595         WREG32(0x6578, 0x500);
596         WREG32(0x657C, 0x82D88030);
597         WREG32(0x6578, 0x501);
598         WREG32(0x657C, 0x85408640);
599         WREG32(0x6578, 0x502);
600         WREG32(0x657C, 0xBFF88180);
601         WREG32(0x6578, 0x600);
602         WREG32(0x657C, 0x82A08018);
603         WREG32(0x6578, 0x601);
604         WREG32(0x657C, 0x85808620);
605         WREG32(0x6578, 0x602);
606         WREG32(0x657C, 0xBFF081B8);
607         WREG32(0x6578, 0x700);
608         WREG32(0x657C, 0x82608010);
609         WREG32(0x6578, 0x701);
610         WREG32(0x657C, 0x85A08600);
611         WREG32(0x6578, 0x702);
612         WREG32(0x657C, 0x800081F0);
613         WREG32(0x6578, 0x800);
614         WREG32(0x657C, 0x8228BFF8);
615         WREG32(0x6578, 0x801);
616         WREG32(0x657C, 0x85E085E0);
617         WREG32(0x6578, 0x802);
618         WREG32(0x657C, 0xBFF88228);
619         WREG32(0x6578, 0x10000);
620         WREG32(0x657C, 0x82A8BF00);
621         WREG32(0x6578, 0x10001);
622         WREG32(0x657C, 0x82A08CC0);
623         WREG32(0x6578, 0x10002);
624         WREG32(0x657C, 0x8008BEF8);
625         WREG32(0x6578, 0x10100);
626         WREG32(0x657C, 0x81F0BF28);
627         WREG32(0x6578, 0x10101);
628         WREG32(0x657C, 0x83608CA0);
629         WREG32(0x6578, 0x10102);
630         WREG32(0x657C, 0x8018BED0);
631         WREG32(0x6578, 0x10200);
632         WREG32(0x657C, 0x8148BF38);
633         WREG32(0x6578, 0x10201);
634         WREG32(0x657C, 0x84408C80);
635         WREG32(0x6578, 0x10202);
636         WREG32(0x657C, 0x8008BEB8);
637         WREG32(0x6578, 0x10300);
638         WREG32(0x657C, 0x80B0BF78);
639         WREG32(0x6578, 0x10301);
640         WREG32(0x657C, 0x85008C20);
641         WREG32(0x6578, 0x10302);
642         WREG32(0x657C, 0x8020BEA0);
643         WREG32(0x6578, 0x10400);
644         WREG32(0x657C, 0x8028BF90);
645         WREG32(0x6578, 0x10401);
646         WREG32(0x657C, 0x85E08BC0);
647         WREG32(0x6578, 0x10402);
648         WREG32(0x657C, 0x8018BE90);
649         WREG32(0x6578, 0x10500);
650         WREG32(0x657C, 0xBFB8BFB0);
651         WREG32(0x6578, 0x10501);
652         WREG32(0x657C, 0x86C08B40);
653         WREG32(0x6578, 0x10502);
654         WREG32(0x657C, 0x8010BE90);
655         WREG32(0x6578, 0x10600);
656         WREG32(0x657C, 0xBF58BFC8);
657         WREG32(0x6578, 0x10601);
658         WREG32(0x657C, 0x87A08AA0);
659         WREG32(0x6578, 0x10602);
660         WREG32(0x657C, 0x8010BE98);
661         WREG32(0x6578, 0x10700);
662         WREG32(0x657C, 0xBF10BFF0);
663         WREG32(0x6578, 0x10701);
664         WREG32(0x657C, 0x886089E0);
665         WREG32(0x6578, 0x10702);
666         WREG32(0x657C, 0x8018BEB0);
667         WREG32(0x6578, 0x10800);
668         WREG32(0x657C, 0xBED8BFE8);
669         WREG32(0x6578, 0x10801);
670         WREG32(0x657C, 0x89408940);
671         WREG32(0x6578, 0x10802);
672         WREG32(0x657C, 0xBFE8BED8);
673         WREG32(0x6578, 0x20000);
674         WREG32(0x657C, 0x80008000);
675         WREG32(0x6578, 0x20001);
676         WREG32(0x657C, 0x90008000);
677         WREG32(0x6578, 0x20002);
678         WREG32(0x657C, 0x80008000);
679         WREG32(0x6578, 0x20003);
680         WREG32(0x657C, 0x80008000);
681         WREG32(0x6578, 0x20100);
682         WREG32(0x657C, 0x80108000);
683         WREG32(0x6578, 0x20101);
684         WREG32(0x657C, 0x8FE0BF70);
685         WREG32(0x6578, 0x20102);
686         WREG32(0x657C, 0xBFE880C0);
687         WREG32(0x6578, 0x20103);
688         WREG32(0x657C, 0x80008000);
689         WREG32(0x6578, 0x20200);
690         WREG32(0x657C, 0x8018BFF8);
691         WREG32(0x6578, 0x20201);
692         WREG32(0x657C, 0x8F80BF08);
693         WREG32(0x6578, 0x20202);
694         WREG32(0x657C, 0xBFD081A0);
695         WREG32(0x6578, 0x20203);
696         WREG32(0x657C, 0xBFF88000);
697         WREG32(0x6578, 0x20300);
698         WREG32(0x657C, 0x80188000);
699         WREG32(0x6578, 0x20301);
700         WREG32(0x657C, 0x8EE0BEC0);
701         WREG32(0x6578, 0x20302);
702         WREG32(0x657C, 0xBFB082A0);
703         WREG32(0x6578, 0x20303);
704         WREG32(0x657C, 0x80008000);
705         WREG32(0x6578, 0x20400);
706         WREG32(0x657C, 0x80188000);
707         WREG32(0x6578, 0x20401);
708         WREG32(0x657C, 0x8E00BEA0);
709         WREG32(0x6578, 0x20402);
710         WREG32(0x657C, 0xBF8883C0);
711         WREG32(0x6578, 0x20403);
712         WREG32(0x657C, 0x80008000);
713         WREG32(0x6578, 0x20500);
714         WREG32(0x657C, 0x80188000);
715         WREG32(0x6578, 0x20501);
716         WREG32(0x657C, 0x8D00BE90);
717         WREG32(0x6578, 0x20502);
718         WREG32(0x657C, 0xBF588500);
719         WREG32(0x6578, 0x20503);
720         WREG32(0x657C, 0x80008008);
721         WREG32(0x6578, 0x20600);
722         WREG32(0x657C, 0x80188000);
723         WREG32(0x6578, 0x20601);
724         WREG32(0x657C, 0x8BC0BE98);
725         WREG32(0x6578, 0x20602);
726         WREG32(0x657C, 0xBF308660);
727         WREG32(0x6578, 0x20603);
728         WREG32(0x657C, 0x80008008);
729         WREG32(0x6578, 0x20700);
730         WREG32(0x657C, 0x80108000);
731         WREG32(0x6578, 0x20701);
732         WREG32(0x657C, 0x8A80BEB0);
733         WREG32(0x6578, 0x20702);
734         WREG32(0x657C, 0xBF0087C0);
735         WREG32(0x6578, 0x20703);
736         WREG32(0x657C, 0x80008008);
737         WREG32(0x6578, 0x20800);
738         WREG32(0x657C, 0x80108000);
739         WREG32(0x6578, 0x20801);
740         WREG32(0x657C, 0x8920BED0);
741         WREG32(0x6578, 0x20802);
742         WREG32(0x657C, 0xBED08920);
743         WREG32(0x6578, 0x20803);
744         WREG32(0x657C, 0x80008010);
745         WREG32(0x6578, 0x30000);
746         WREG32(0x657C, 0x90008000);
747         WREG32(0x6578, 0x30001);
748         WREG32(0x657C, 0x80008000);
749         WREG32(0x6578, 0x30100);
750         WREG32(0x657C, 0x8FE0BF90);
751         WREG32(0x6578, 0x30101);
752         WREG32(0x657C, 0xBFF880A0);
753         WREG32(0x6578, 0x30200);
754         WREG32(0x657C, 0x8F60BF40);
755         WREG32(0x6578, 0x30201);
756         WREG32(0x657C, 0xBFE88180);
757         WREG32(0x6578, 0x30300);
758         WREG32(0x657C, 0x8EC0BF00);
759         WREG32(0x6578, 0x30301);
760         WREG32(0x657C, 0xBFC88280);
761         WREG32(0x6578, 0x30400);
762         WREG32(0x657C, 0x8DE0BEE0);
763         WREG32(0x6578, 0x30401);
764         WREG32(0x657C, 0xBFA083A0);
765         WREG32(0x6578, 0x30500);
766         WREG32(0x657C, 0x8CE0BED0);
767         WREG32(0x6578, 0x30501);
768         WREG32(0x657C, 0xBF7884E0);
769         WREG32(0x6578, 0x30600);
770         WREG32(0x657C, 0x8BA0BED8);
771         WREG32(0x6578, 0x30601);
772         WREG32(0x657C, 0xBF508640);
773         WREG32(0x6578, 0x30700);
774         WREG32(0x657C, 0x8A60BEE8);
775         WREG32(0x6578, 0x30701);
776         WREG32(0x657C, 0xBF2087A0);
777         WREG32(0x6578, 0x30800);
778         WREG32(0x657C, 0x8900BF00);
779         WREG32(0x6578, 0x30801);
780         WREG32(0x657C, 0xBF008900);
781 }
782
783 struct rv515_watermark {
784         u32        lb_request_fifo_depth;
785         fixed20_12 num_line_pair;
786         fixed20_12 estimated_width;
787         fixed20_12 worst_case_latency;
788         fixed20_12 consumption_rate;
789         fixed20_12 active_time;
790         fixed20_12 dbpp;
791         fixed20_12 priority_mark_max;
792         fixed20_12 priority_mark;
793         fixed20_12 sclk;
794 };
795
796 void rv515_crtc_bandwidth_compute(struct radeon_device *rdev,
797                                   struct radeon_crtc *crtc,
798                                   struct rv515_watermark *wm)
799 {
800         struct drm_display_mode *mode = &crtc->base.mode;
801         fixed20_12 a, b, c;
802         fixed20_12 pclk, request_fifo_depth, tolerable_latency, estimated_width;
803         fixed20_12 consumption_time, line_time, chunk_time, read_delay_latency;
804
805         if (!crtc->base.enabled) {
806                 /* FIXME: wouldn't it better to set priority mark to maximum */
807                 wm->lb_request_fifo_depth = 4;
808                 return;
809         }
810
811         if (crtc->vsc.full > rfixed_const(2))
812                 wm->num_line_pair.full = rfixed_const(2);
813         else
814                 wm->num_line_pair.full = rfixed_const(1);
815
816         b.full = rfixed_const(mode->crtc_hdisplay);
817         c.full = rfixed_const(256);
818         a.full = rfixed_mul(wm->num_line_pair, b);
819         request_fifo_depth.full = rfixed_div(a, c);
820         if (a.full < rfixed_const(4)) {
821                 wm->lb_request_fifo_depth = 4;
822         } else {
823                 wm->lb_request_fifo_depth = rfixed_trunc(request_fifo_depth);
824         }
825
826         /* Determine consumption rate
827          *  pclk = pixel clock period(ns) = 1000 / (mode.clock / 1000)
828          *  vtaps = number of vertical taps,
829          *  vsc = vertical scaling ratio, defined as source/destination
830          *  hsc = horizontal scaling ration, defined as source/destination
831          */
832         a.full = rfixed_const(mode->clock);
833         b.full = rfixed_const(1000);
834         a.full = rfixed_div(a, b);
835         pclk.full = rfixed_div(b, a);
836         if (crtc->rmx_type != RMX_OFF) {
837                 b.full = rfixed_const(2);
838                 if (crtc->vsc.full > b.full)
839                         b.full = crtc->vsc.full;
840                 b.full = rfixed_mul(b, crtc->hsc);
841                 c.full = rfixed_const(2);
842                 b.full = rfixed_div(b, c);
843                 consumption_time.full = rfixed_div(pclk, b);
844         } else {
845                 consumption_time.full = pclk.full;
846         }
847         a.full = rfixed_const(1);
848         wm->consumption_rate.full = rfixed_div(a, consumption_time);
849
850
851         /* Determine line time
852          *  LineTime = total time for one line of displayhtotal
853          *  LineTime = total number of horizontal pixels
854          *  pclk = pixel clock period(ns)
855          */
856         a.full = rfixed_const(crtc->base.mode.crtc_htotal);
857         line_time.full = rfixed_mul(a, pclk);
858
859         /* Determine active time
860          *  ActiveTime = time of active region of display within one line,
861          *  hactive = total number of horizontal active pixels
862          *  htotal = total number of horizontal pixels
863          */
864         a.full = rfixed_const(crtc->base.mode.crtc_htotal);
865         b.full = rfixed_const(crtc->base.mode.crtc_hdisplay);
866         wm->active_time.full = rfixed_mul(line_time, b);
867         wm->active_time.full = rfixed_div(wm->active_time, a);
868
869         /* Determine chunk time
870          * ChunkTime = the time it takes the DCP to send one chunk of data
871          * to the LB which consists of pipeline delay and inter chunk gap
872          * sclk = system clock(Mhz)
873          */
874         a.full = rfixed_const(600 * 1000);
875         chunk_time.full = rfixed_div(a, rdev->pm.sclk);
876         read_delay_latency.full = rfixed_const(1000);
877
878         /* Determine the worst case latency
879          * NumLinePair = Number of line pairs to request(1=2 lines, 2=4 lines)
880          * WorstCaseLatency = worst case time from urgent to when the MC starts
881          *                    to return data
882          * READ_DELAY_IDLE_MAX = constant of 1us
883          * ChunkTime = time it takes the DCP to send one chunk of data to the LB
884          *             which consists of pipeline delay and inter chunk gap
885          */
886         if (rfixed_trunc(wm->num_line_pair) > 1) {
887                 a.full = rfixed_const(3);
888                 wm->worst_case_latency.full = rfixed_mul(a, chunk_time);
889                 wm->worst_case_latency.full += read_delay_latency.full;
890         } else {
891                 wm->worst_case_latency.full = chunk_time.full + read_delay_latency.full;
892         }
893
894         /* Determine the tolerable latency
895          * TolerableLatency = Any given request has only 1 line time
896          *                    for the data to be returned
897          * LBRequestFifoDepth = Number of chunk requests the LB can
898          *                      put into the request FIFO for a display
899          *  LineTime = total time for one line of display
900          *  ChunkTime = the time it takes the DCP to send one chunk
901          *              of data to the LB which consists of
902          *  pipeline delay and inter chunk gap
903          */
904         if ((2+wm->lb_request_fifo_depth) >= rfixed_trunc(request_fifo_depth)) {
905                 tolerable_latency.full = line_time.full;
906         } else {
907                 tolerable_latency.full = rfixed_const(wm->lb_request_fifo_depth - 2);
908                 tolerable_latency.full = request_fifo_depth.full - tolerable_latency.full;
909                 tolerable_latency.full = rfixed_mul(tolerable_latency, chunk_time);
910                 tolerable_latency.full = line_time.full - tolerable_latency.full;
911         }
912         /* We assume worst case 32bits (4 bytes) */
913         wm->dbpp.full = rfixed_const(2 * 16);
914
915         /* Determine the maximum priority mark
916          *  width = viewport width in pixels
917          */
918         a.full = rfixed_const(16);
919         wm->priority_mark_max.full = rfixed_const(crtc->base.mode.crtc_hdisplay);
920         wm->priority_mark_max.full = rfixed_div(wm->priority_mark_max, a);
921
922         /* Determine estimated width */
923         estimated_width.full = tolerable_latency.full - wm->worst_case_latency.full;
924         estimated_width.full = rfixed_div(estimated_width, consumption_time);
925         if (rfixed_trunc(estimated_width) > crtc->base.mode.crtc_hdisplay) {
926                 wm->priority_mark.full = rfixed_const(10);
927         } else {
928                 a.full = rfixed_const(16);
929                 wm->priority_mark.full = rfixed_div(estimated_width, a);
930                 wm->priority_mark.full = wm->priority_mark_max.full - wm->priority_mark.full;
931         }
932 }
933
934 void rv515_bandwidth_avivo_update(struct radeon_device *rdev)
935 {
936         struct drm_display_mode *mode0 = NULL;
937         struct drm_display_mode *mode1 = NULL;
938         struct rv515_watermark wm0;
939         struct rv515_watermark wm1;
940         u32 tmp;
941         fixed20_12 priority_mark02, priority_mark12, fill_rate;
942         fixed20_12 a, b;
943
944         if (rdev->mode_info.crtcs[0]->base.enabled)
945                 mode0 = &rdev->mode_info.crtcs[0]->base.mode;
946         if (rdev->mode_info.crtcs[1]->base.enabled)
947                 mode1 = &rdev->mode_info.crtcs[1]->base.mode;
948         rs690_line_buffer_adjust(rdev, mode0, mode1);
949
950         rv515_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[0], &wm0);
951         rv515_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[1], &wm1);
952
953         tmp = wm0.lb_request_fifo_depth;
954         tmp |= wm1.lb_request_fifo_depth << 16;
955         WREG32(LB_MAX_REQ_OUTSTANDING, tmp);
956
957         if (mode0 && mode1) {
958                 if (rfixed_trunc(wm0.dbpp) > 64)
959                         a.full = rfixed_div(wm0.dbpp, wm0.num_line_pair);
960                 else
961                         a.full = wm0.num_line_pair.full;
962                 if (rfixed_trunc(wm1.dbpp) > 64)
963                         b.full = rfixed_div(wm1.dbpp, wm1.num_line_pair);
964                 else
965                         b.full = wm1.num_line_pair.full;
966                 a.full += b.full;
967                 fill_rate.full = rfixed_div(wm0.sclk, a);
968                 if (wm0.consumption_rate.full > fill_rate.full) {
969                         b.full = wm0.consumption_rate.full - fill_rate.full;
970                         b.full = rfixed_mul(b, wm0.active_time);
971                         a.full = rfixed_const(16);
972                         b.full = rfixed_div(b, a);
973                         a.full = rfixed_mul(wm0.worst_case_latency,
974                                                 wm0.consumption_rate);
975                         priority_mark02.full = a.full + b.full;
976                 } else {
977                         a.full = rfixed_mul(wm0.worst_case_latency,
978                                                 wm0.consumption_rate);
979                         b.full = rfixed_const(16 * 1000);
980                         priority_mark02.full = rfixed_div(a, b);
981                 }
982                 if (wm1.consumption_rate.full > fill_rate.full) {
983                         b.full = wm1.consumption_rate.full - fill_rate.full;
984                         b.full = rfixed_mul(b, wm1.active_time);
985                         a.full = rfixed_const(16);
986                         b.full = rfixed_div(b, a);
987                         a.full = rfixed_mul(wm1.worst_case_latency,
988                                                 wm1.consumption_rate);
989                         priority_mark12.full = a.full + b.full;
990                 } else {
991                         a.full = rfixed_mul(wm1.worst_case_latency,
992                                                 wm1.consumption_rate);
993                         b.full = rfixed_const(16 * 1000);
994                         priority_mark12.full = rfixed_div(a, b);
995                 }
996                 if (wm0.priority_mark.full > priority_mark02.full)
997                         priority_mark02.full = wm0.priority_mark.full;
998                 if (rfixed_trunc(priority_mark02) < 0)
999                         priority_mark02.full = 0;
1000                 if (wm0.priority_mark_max.full > priority_mark02.full)
1001                         priority_mark02.full = wm0.priority_mark_max.full;
1002                 if (wm1.priority_mark.full > priority_mark12.full)
1003                         priority_mark12.full = wm1.priority_mark.full;
1004                 if (rfixed_trunc(priority_mark12) < 0)
1005                         priority_mark12.full = 0;
1006                 if (wm1.priority_mark_max.full > priority_mark12.full)
1007                         priority_mark12.full = wm1.priority_mark_max.full;
1008                 WREG32(D1MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark02));
1009                 WREG32(D1MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark02));
1010                 WREG32(D2MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark12));
1011                 WREG32(D2MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark12));
1012         } else if (mode0) {
1013                 if (rfixed_trunc(wm0.dbpp) > 64)
1014                         a.full = rfixed_div(wm0.dbpp, wm0.num_line_pair);
1015                 else
1016                         a.full = wm0.num_line_pair.full;
1017                 fill_rate.full = rfixed_div(wm0.sclk, a);
1018                 if (wm0.consumption_rate.full > fill_rate.full) {
1019                         b.full = wm0.consumption_rate.full - fill_rate.full;
1020                         b.full = rfixed_mul(b, wm0.active_time);
1021                         a.full = rfixed_const(16);
1022                         b.full = rfixed_div(b, a);
1023                         a.full = rfixed_mul(wm0.worst_case_latency,
1024                                                 wm0.consumption_rate);
1025                         priority_mark02.full = a.full + b.full;
1026                 } else {
1027                         a.full = rfixed_mul(wm0.worst_case_latency,
1028                                                 wm0.consumption_rate);
1029                         b.full = rfixed_const(16);
1030                         priority_mark02.full = rfixed_div(a, b);
1031                 }
1032                 if (wm0.priority_mark.full > priority_mark02.full)
1033                         priority_mark02.full = wm0.priority_mark.full;
1034                 if (rfixed_trunc(priority_mark02) < 0)
1035                         priority_mark02.full = 0;
1036                 if (wm0.priority_mark_max.full > priority_mark02.full)
1037                         priority_mark02.full = wm0.priority_mark_max.full;
1038                 WREG32(D1MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark02));
1039                 WREG32(D1MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark02));
1040                 WREG32(D2MODE_PRIORITY_A_CNT, MODE_PRIORITY_OFF);
1041                 WREG32(D2MODE_PRIORITY_B_CNT, MODE_PRIORITY_OFF);
1042         } else {
1043                 if (rfixed_trunc(wm1.dbpp) > 64)
1044                         a.full = rfixed_div(wm1.dbpp, wm1.num_line_pair);
1045                 else
1046                         a.full = wm1.num_line_pair.full;
1047                 fill_rate.full = rfixed_div(wm1.sclk, a);
1048                 if (wm1.consumption_rate.full > fill_rate.full) {
1049                         b.full = wm1.consumption_rate.full - fill_rate.full;
1050                         b.full = rfixed_mul(b, wm1.active_time);
1051                         a.full = rfixed_const(16);
1052                         b.full = rfixed_div(b, a);
1053                         a.full = rfixed_mul(wm1.worst_case_latency,
1054                                                 wm1.consumption_rate);
1055                         priority_mark12.full = a.full + b.full;
1056                 } else {
1057                         a.full = rfixed_mul(wm1.worst_case_latency,
1058                                                 wm1.consumption_rate);
1059                         b.full = rfixed_const(16 * 1000);
1060                         priority_mark12.full = rfixed_div(a, b);
1061                 }
1062                 if (wm1.priority_mark.full > priority_mark12.full)
1063                         priority_mark12.full = wm1.priority_mark.full;
1064                 if (rfixed_trunc(priority_mark12) < 0)
1065                         priority_mark12.full = 0;
1066                 if (wm1.priority_mark_max.full > priority_mark12.full)
1067                         priority_mark12.full = wm1.priority_mark_max.full;
1068                 WREG32(D1MODE_PRIORITY_A_CNT, MODE_PRIORITY_OFF);
1069                 WREG32(D1MODE_PRIORITY_B_CNT, MODE_PRIORITY_OFF);
1070                 WREG32(D2MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark12));
1071                 WREG32(D2MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark12));
1072         }
1073 }
1074
1075 void rv515_bandwidth_update(struct radeon_device *rdev)
1076 {
1077         uint32_t tmp;
1078         struct drm_display_mode *mode0 = NULL;
1079         struct drm_display_mode *mode1 = NULL;
1080
1081         if (rdev->mode_info.crtcs[0]->base.enabled)
1082                 mode0 = &rdev->mode_info.crtcs[0]->base.mode;
1083         if (rdev->mode_info.crtcs[1]->base.enabled)
1084                 mode1 = &rdev->mode_info.crtcs[1]->base.mode;
1085         /*
1086          * Set display0/1 priority up in the memory controller for
1087          * modes if the user specifies HIGH for displaypriority
1088          * option.
1089          */
1090         if (rdev->disp_priority == 2) {
1091                 tmp = RREG32_MC(MC_MISC_LAT_TIMER);
1092                 tmp &= ~MC_DISP1R_INIT_LAT_MASK;
1093                 tmp &= ~MC_DISP0R_INIT_LAT_MASK;
1094                 if (mode1)
1095                         tmp |= (1 << MC_DISP1R_INIT_LAT_SHIFT);
1096                 if (mode0)
1097                         tmp |= (1 << MC_DISP0R_INIT_LAT_SHIFT);
1098                 WREG32_MC(MC_MISC_LAT_TIMER, tmp);
1099         }
1100         rv515_bandwidth_avivo_update(rdev);
1101 }