2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
24 * Authors: Dave Airlie
28 #include <linux/slab.h>
29 #include <linux/seq_file.h>
30 #include <linux/firmware.h>
31 #include <linux/platform_device.h>
33 #include "radeon_drm.h"
35 #include "radeon_asic.h"
36 #include "radeon_mode.h"
41 #define PFP_UCODE_SIZE 576
42 #define PM4_UCODE_SIZE 1792
43 #define RLC_UCODE_SIZE 768
44 #define R700_PFP_UCODE_SIZE 848
45 #define R700_PM4_UCODE_SIZE 1360
46 #define R700_RLC_UCODE_SIZE 1024
47 #define EVERGREEN_PFP_UCODE_SIZE 1120
48 #define EVERGREEN_PM4_UCODE_SIZE 1376
49 #define EVERGREEN_RLC_UCODE_SIZE 768
52 MODULE_FIRMWARE("radeon/R600_pfp.bin");
53 MODULE_FIRMWARE("radeon/R600_me.bin");
54 MODULE_FIRMWARE("radeon/RV610_pfp.bin");
55 MODULE_FIRMWARE("radeon/RV610_me.bin");
56 MODULE_FIRMWARE("radeon/RV630_pfp.bin");
57 MODULE_FIRMWARE("radeon/RV630_me.bin");
58 MODULE_FIRMWARE("radeon/RV620_pfp.bin");
59 MODULE_FIRMWARE("radeon/RV620_me.bin");
60 MODULE_FIRMWARE("radeon/RV635_pfp.bin");
61 MODULE_FIRMWARE("radeon/RV635_me.bin");
62 MODULE_FIRMWARE("radeon/RV670_pfp.bin");
63 MODULE_FIRMWARE("radeon/RV670_me.bin");
64 MODULE_FIRMWARE("radeon/RS780_pfp.bin");
65 MODULE_FIRMWARE("radeon/RS780_me.bin");
66 MODULE_FIRMWARE("radeon/RV770_pfp.bin");
67 MODULE_FIRMWARE("radeon/RV770_me.bin");
68 MODULE_FIRMWARE("radeon/RV730_pfp.bin");
69 MODULE_FIRMWARE("radeon/RV730_me.bin");
70 MODULE_FIRMWARE("radeon/RV710_pfp.bin");
71 MODULE_FIRMWARE("radeon/RV710_me.bin");
72 MODULE_FIRMWARE("radeon/R600_rlc.bin");
73 MODULE_FIRMWARE("radeon/R700_rlc.bin");
74 MODULE_FIRMWARE("radeon/CEDAR_pfp.bin");
75 MODULE_FIRMWARE("radeon/CEDAR_me.bin");
76 MODULE_FIRMWARE("radeon/CEDAR_rlc.bin");
77 MODULE_FIRMWARE("radeon/REDWOOD_pfp.bin");
78 MODULE_FIRMWARE("radeon/REDWOOD_me.bin");
79 MODULE_FIRMWARE("radeon/REDWOOD_rlc.bin");
80 MODULE_FIRMWARE("radeon/JUNIPER_pfp.bin");
81 MODULE_FIRMWARE("radeon/JUNIPER_me.bin");
82 MODULE_FIRMWARE("radeon/JUNIPER_rlc.bin");
83 MODULE_FIRMWARE("radeon/CYPRESS_pfp.bin");
84 MODULE_FIRMWARE("radeon/CYPRESS_me.bin");
85 MODULE_FIRMWARE("radeon/CYPRESS_rlc.bin");
87 int r600_debugfs_mc_info_init(struct radeon_device *rdev);
89 /* r600,rv610,rv630,rv620,rv635,rv670 */
90 int r600_mc_wait_for_idle(struct radeon_device *rdev);
91 void r600_gpu_init(struct radeon_device *rdev);
92 void r600_fini(struct radeon_device *rdev);
93 void r600_irq_disable(struct radeon_device *rdev);
95 void r600_get_power_state(struct radeon_device *rdev,
96 enum radeon_pm_action action)
100 rdev->pm.can_upclock = true;
101 rdev->pm.can_downclock = true;
103 /* power state array is low to high, default is first */
104 if ((rdev->flags & RADEON_IS_IGP) || (rdev->family == CHIP_R600)) {
105 int min_power_state_index = 0;
107 if (rdev->pm.num_power_states > 2)
108 min_power_state_index = 1;
111 case PM_ACTION_MINIMUM:
112 rdev->pm.requested_power_state_index = min_power_state_index;
113 rdev->pm.requested_clock_mode_index = 0;
114 rdev->pm.can_downclock = false;
116 case PM_ACTION_DOWNCLOCK:
117 if (rdev->pm.current_power_state_index == min_power_state_index) {
118 rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
119 rdev->pm.can_downclock = false;
121 if (rdev->pm.active_crtc_count > 1) {
122 for (i = 0; i < rdev->pm.num_power_states; i++) {
123 if (rdev->pm.power_state[i].flags & RADEON_PM_SINGLE_DISPLAY_ONLY)
125 else if (i >= rdev->pm.current_power_state_index) {
126 rdev->pm.requested_power_state_index =
127 rdev->pm.current_power_state_index;
130 rdev->pm.requested_power_state_index = i;
135 rdev->pm.requested_power_state_index =
136 rdev->pm.current_power_state_index - 1;
138 rdev->pm.requested_clock_mode_index = 0;
140 case PM_ACTION_UPCLOCK:
141 if (rdev->pm.current_power_state_index == (rdev->pm.num_power_states - 1)) {
142 rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
143 rdev->pm.can_upclock = false;
145 if (rdev->pm.active_crtc_count > 1) {
146 for (i = (rdev->pm.num_power_states - 1); i >= 0; i--) {
147 if (rdev->pm.power_state[i].flags & RADEON_PM_SINGLE_DISPLAY_ONLY)
149 else if (i <= rdev->pm.current_power_state_index) {
150 rdev->pm.requested_power_state_index =
151 rdev->pm.current_power_state_index;
154 rdev->pm.requested_power_state_index = i;
159 rdev->pm.requested_power_state_index =
160 rdev->pm.current_power_state_index + 1;
162 rdev->pm.requested_clock_mode_index = 0;
164 case PM_ACTION_DEFAULT:
165 rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index;
166 rdev->pm.requested_clock_mode_index = 0;
167 rdev->pm.can_upclock = false;
171 DRM_ERROR("Requested mode for not defined action\n");
175 /* XXX select a power state based on AC/DC, single/dualhead, etc. */
176 /* for now just select the first power state and switch between clock modes */
177 /* power state array is low to high, default is first (0) */
178 if (rdev->pm.active_crtc_count > 1) {
179 rdev->pm.requested_power_state_index = -1;
180 /* start at 1 as we don't want the default mode */
181 for (i = 1; i < rdev->pm.num_power_states; i++) {
182 if (rdev->pm.power_state[i].flags & RADEON_PM_SINGLE_DISPLAY_ONLY)
184 else if ((rdev->pm.power_state[i].type == POWER_STATE_TYPE_PERFORMANCE) ||
185 (rdev->pm.power_state[i].type == POWER_STATE_TYPE_BATTERY)) {
186 rdev->pm.requested_power_state_index = i;
190 /* if nothing selected, grab the default state. */
191 if (rdev->pm.requested_power_state_index == -1)
192 rdev->pm.requested_power_state_index = 0;
194 rdev->pm.requested_power_state_index = 1;
197 case PM_ACTION_MINIMUM:
198 rdev->pm.requested_clock_mode_index = 0;
199 rdev->pm.can_downclock = false;
201 case PM_ACTION_DOWNCLOCK:
202 if (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index) {
203 if (rdev->pm.current_clock_mode_index == 0) {
204 rdev->pm.requested_clock_mode_index = 0;
205 rdev->pm.can_downclock = false;
207 rdev->pm.requested_clock_mode_index =
208 rdev->pm.current_clock_mode_index - 1;
210 rdev->pm.requested_clock_mode_index = 0;
211 rdev->pm.can_downclock = false;
214 case PM_ACTION_UPCLOCK:
215 if (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index) {
216 if (rdev->pm.current_clock_mode_index ==
217 (rdev->pm.power_state[rdev->pm.requested_power_state_index].num_clock_modes - 1)) {
218 rdev->pm.requested_clock_mode_index = rdev->pm.current_clock_mode_index;
219 rdev->pm.can_upclock = false;
221 rdev->pm.requested_clock_mode_index =
222 rdev->pm.current_clock_mode_index + 1;
224 rdev->pm.requested_clock_mode_index =
225 rdev->pm.power_state[rdev->pm.requested_power_state_index].num_clock_modes - 1;
226 rdev->pm.can_upclock = false;
229 case PM_ACTION_DEFAULT:
230 rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index;
231 rdev->pm.requested_clock_mode_index = 0;
232 rdev->pm.can_upclock = false;
236 DRM_ERROR("Requested mode for not defined action\n");
241 DRM_INFO("Requested: e: %d m: %d p: %d\n",
242 rdev->pm.power_state[rdev->pm.requested_power_state_index].
243 clock_info[rdev->pm.requested_clock_mode_index].sclk,
244 rdev->pm.power_state[rdev->pm.requested_power_state_index].
245 clock_info[rdev->pm.requested_clock_mode_index].mclk,
246 rdev->pm.power_state[rdev->pm.requested_power_state_index].
250 void r600_set_power_state(struct radeon_device *rdev)
254 if ((rdev->pm.requested_clock_mode_index == rdev->pm.current_clock_mode_index) &&
255 (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index))
258 if (radeon_gui_idle(rdev)) {
260 sclk = rdev->pm.power_state[rdev->pm.requested_power_state_index].
261 clock_info[rdev->pm.requested_clock_mode_index].sclk;
262 if (sclk > rdev->clock.default_sclk)
263 sclk = rdev->clock.default_sclk;
265 mclk = rdev->pm.power_state[rdev->pm.requested_power_state_index].
266 clock_info[rdev->pm.requested_clock_mode_index].mclk;
267 if (mclk > rdev->clock.default_mclk)
268 mclk = rdev->clock.default_mclk;
269 /* don't change the mclk with multiple crtcs */
270 if (rdev->pm.active_crtc_count > 1)
271 mclk = rdev->clock.default_mclk;
279 /* set engine clock */
280 if (sclk != rdev->pm.current_sclk) {
281 radeon_sync_with_vblank(rdev);
282 radeon_pm_debug_check_in_vbl(rdev, false);
283 radeon_set_engine_clock(rdev, sclk);
284 radeon_pm_debug_check_in_vbl(rdev, true);
285 rdev->pm.current_sclk = sclk;
286 DRM_INFO("Setting: e: %d\n", sclk);
290 /* set memory clock */
291 if (rdev->asic->set_memory_clock && (mclk != rdev->pm.current_mclk)) {
292 radeon_sync_with_vblank(rdev);
293 radeon_pm_debug_check_in_vbl(rdev, false);
294 radeon_set_memory_clock(rdev, mclk);
295 radeon_pm_debug_check_in_vbl(rdev, true);
296 rdev->pm.current_mclk = mclk;
297 DRM_INFO("Setting: m: %d\n", mclk);
301 rdev->pm.current_power_state_index = rdev->pm.requested_power_state_index;
302 rdev->pm.current_clock_mode_index = rdev->pm.requested_clock_mode_index;
304 DRM_INFO("GUI not idle!!!\n");
307 void r600_pm_misc(struct radeon_device *rdev)
312 bool r600_gui_idle(struct radeon_device *rdev)
314 if (RREG32(GRBM_STATUS) & GUI_ACTIVE)
320 /* hpd for digital panel detect/disconnect */
321 bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
323 bool connected = false;
325 if (ASIC_IS_DCE3(rdev)) {
328 if (RREG32(DC_HPD1_INT_STATUS) & DC_HPDx_SENSE)
332 if (RREG32(DC_HPD2_INT_STATUS) & DC_HPDx_SENSE)
336 if (RREG32(DC_HPD3_INT_STATUS) & DC_HPDx_SENSE)
340 if (RREG32(DC_HPD4_INT_STATUS) & DC_HPDx_SENSE)
345 if (RREG32(DC_HPD5_INT_STATUS) & DC_HPDx_SENSE)
349 if (RREG32(DC_HPD6_INT_STATUS) & DC_HPDx_SENSE)
358 if (RREG32(DC_HOT_PLUG_DETECT1_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
362 if (RREG32(DC_HOT_PLUG_DETECT2_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
366 if (RREG32(DC_HOT_PLUG_DETECT3_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
376 void r600_hpd_set_polarity(struct radeon_device *rdev,
377 enum radeon_hpd_id hpd)
380 bool connected = r600_hpd_sense(rdev, hpd);
382 if (ASIC_IS_DCE3(rdev)) {
385 tmp = RREG32(DC_HPD1_INT_CONTROL);
387 tmp &= ~DC_HPDx_INT_POLARITY;
389 tmp |= DC_HPDx_INT_POLARITY;
390 WREG32(DC_HPD1_INT_CONTROL, tmp);
393 tmp = RREG32(DC_HPD2_INT_CONTROL);
395 tmp &= ~DC_HPDx_INT_POLARITY;
397 tmp |= DC_HPDx_INT_POLARITY;
398 WREG32(DC_HPD2_INT_CONTROL, tmp);
401 tmp = RREG32(DC_HPD3_INT_CONTROL);
403 tmp &= ~DC_HPDx_INT_POLARITY;
405 tmp |= DC_HPDx_INT_POLARITY;
406 WREG32(DC_HPD3_INT_CONTROL, tmp);
409 tmp = RREG32(DC_HPD4_INT_CONTROL);
411 tmp &= ~DC_HPDx_INT_POLARITY;
413 tmp |= DC_HPDx_INT_POLARITY;
414 WREG32(DC_HPD4_INT_CONTROL, tmp);
417 tmp = RREG32(DC_HPD5_INT_CONTROL);
419 tmp &= ~DC_HPDx_INT_POLARITY;
421 tmp |= DC_HPDx_INT_POLARITY;
422 WREG32(DC_HPD5_INT_CONTROL, tmp);
426 tmp = RREG32(DC_HPD6_INT_CONTROL);
428 tmp &= ~DC_HPDx_INT_POLARITY;
430 tmp |= DC_HPDx_INT_POLARITY;
431 WREG32(DC_HPD6_INT_CONTROL, tmp);
439 tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL);
441 tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
443 tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
444 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
447 tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL);
449 tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
451 tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
452 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
455 tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL);
457 tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
459 tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
460 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
468 void r600_hpd_init(struct radeon_device *rdev)
470 struct drm_device *dev = rdev->ddev;
471 struct drm_connector *connector;
473 if (ASIC_IS_DCE3(rdev)) {
474 u32 tmp = DC_HPDx_CONNECTION_TIMER(0x9c4) | DC_HPDx_RX_INT_TIMER(0xfa);
475 if (ASIC_IS_DCE32(rdev))
478 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
479 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
480 switch (radeon_connector->hpd.hpd) {
482 WREG32(DC_HPD1_CONTROL, tmp);
483 rdev->irq.hpd[0] = true;
486 WREG32(DC_HPD2_CONTROL, tmp);
487 rdev->irq.hpd[1] = true;
490 WREG32(DC_HPD3_CONTROL, tmp);
491 rdev->irq.hpd[2] = true;
494 WREG32(DC_HPD4_CONTROL, tmp);
495 rdev->irq.hpd[3] = true;
499 WREG32(DC_HPD5_CONTROL, tmp);
500 rdev->irq.hpd[4] = true;
503 WREG32(DC_HPD6_CONTROL, tmp);
504 rdev->irq.hpd[5] = true;
511 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
512 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
513 switch (radeon_connector->hpd.hpd) {
515 WREG32(DC_HOT_PLUG_DETECT1_CONTROL, DC_HOT_PLUG_DETECTx_EN);
516 rdev->irq.hpd[0] = true;
519 WREG32(DC_HOT_PLUG_DETECT2_CONTROL, DC_HOT_PLUG_DETECTx_EN);
520 rdev->irq.hpd[1] = true;
523 WREG32(DC_HOT_PLUG_DETECT3_CONTROL, DC_HOT_PLUG_DETECTx_EN);
524 rdev->irq.hpd[2] = true;
531 if (rdev->irq.installed)
535 void r600_hpd_fini(struct radeon_device *rdev)
537 struct drm_device *dev = rdev->ddev;
538 struct drm_connector *connector;
540 if (ASIC_IS_DCE3(rdev)) {
541 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
542 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
543 switch (radeon_connector->hpd.hpd) {
545 WREG32(DC_HPD1_CONTROL, 0);
546 rdev->irq.hpd[0] = false;
549 WREG32(DC_HPD2_CONTROL, 0);
550 rdev->irq.hpd[1] = false;
553 WREG32(DC_HPD3_CONTROL, 0);
554 rdev->irq.hpd[2] = false;
557 WREG32(DC_HPD4_CONTROL, 0);
558 rdev->irq.hpd[3] = false;
562 WREG32(DC_HPD5_CONTROL, 0);
563 rdev->irq.hpd[4] = false;
566 WREG32(DC_HPD6_CONTROL, 0);
567 rdev->irq.hpd[5] = false;
574 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
575 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
576 switch (radeon_connector->hpd.hpd) {
578 WREG32(DC_HOT_PLUG_DETECT1_CONTROL, 0);
579 rdev->irq.hpd[0] = false;
582 WREG32(DC_HOT_PLUG_DETECT2_CONTROL, 0);
583 rdev->irq.hpd[1] = false;
586 WREG32(DC_HOT_PLUG_DETECT3_CONTROL, 0);
587 rdev->irq.hpd[2] = false;
599 void r600_pcie_gart_tlb_flush(struct radeon_device *rdev)
604 /* flush hdp cache so updates hit vram */
605 WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
607 WREG32(VM_CONTEXT0_INVALIDATION_LOW_ADDR, rdev->mc.gtt_start >> 12);
608 WREG32(VM_CONTEXT0_INVALIDATION_HIGH_ADDR, (rdev->mc.gtt_end - 1) >> 12);
609 WREG32(VM_CONTEXT0_REQUEST_RESPONSE, REQUEST_TYPE(1));
610 for (i = 0; i < rdev->usec_timeout; i++) {
612 tmp = RREG32(VM_CONTEXT0_REQUEST_RESPONSE);
613 tmp = (tmp & RESPONSE_TYPE_MASK) >> RESPONSE_TYPE_SHIFT;
615 printk(KERN_WARNING "[drm] r600 flush TLB failed\n");
625 int r600_pcie_gart_init(struct radeon_device *rdev)
629 if (rdev->gart.table.vram.robj) {
630 WARN(1, "R600 PCIE GART already initialized.\n");
633 /* Initialize common gart structure */
634 r = radeon_gart_init(rdev);
637 rdev->gart.table_size = rdev->gart.num_gpu_pages * 8;
638 return radeon_gart_table_vram_alloc(rdev);
641 int r600_pcie_gart_enable(struct radeon_device *rdev)
646 if (rdev->gart.table.vram.robj == NULL) {
647 dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
650 r = radeon_gart_table_vram_pin(rdev);
653 radeon_gart_restore(rdev);
656 WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
657 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
658 EFFECTIVE_L2_QUEUE_SIZE(7));
659 WREG32(VM_L2_CNTL2, 0);
660 WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1));
661 /* Setup TLB control */
662 tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
663 SYSTEM_ACCESS_MODE_NOT_IN_SYS |
664 EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
665 ENABLE_WAIT_L2_QUERY;
666 WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp);
667 WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
668 WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp | ENABLE_L1_STRICT_ORDERING);
669 WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
670 WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp);
671 WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp);
672 WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp);
673 WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp);
674 WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp);
675 WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
676 WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
677 WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
678 WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
679 WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
680 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
681 WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
682 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
683 WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
684 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
685 WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
686 (u32)(rdev->dummy_page.addr >> 12));
687 for (i = 1; i < 7; i++)
688 WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
690 r600_pcie_gart_tlb_flush(rdev);
691 rdev->gart.ready = true;
695 void r600_pcie_gart_disable(struct radeon_device *rdev)
700 /* Disable all tables */
701 for (i = 0; i < 7; i++)
702 WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
704 /* Disable L2 cache */
705 WREG32(VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING |
706 EFFECTIVE_L2_QUEUE_SIZE(7));
707 WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1));
708 /* Setup L1 TLB control */
709 tmp = EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
710 ENABLE_WAIT_L2_QUERY;
711 WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp);
712 WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp);
713 WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp);
714 WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp);
715 WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp);
716 WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
717 WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
718 WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
719 WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp);
720 WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp);
721 WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp);
722 WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
723 WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp);
724 WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
725 if (rdev->gart.table.vram.robj) {
726 r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
727 if (likely(r == 0)) {
728 radeon_bo_kunmap(rdev->gart.table.vram.robj);
729 radeon_bo_unpin(rdev->gart.table.vram.robj);
730 radeon_bo_unreserve(rdev->gart.table.vram.robj);
735 void r600_pcie_gart_fini(struct radeon_device *rdev)
737 radeon_gart_fini(rdev);
738 r600_pcie_gart_disable(rdev);
739 radeon_gart_table_vram_free(rdev);
742 void r600_agp_enable(struct radeon_device *rdev)
748 WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
749 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
750 EFFECTIVE_L2_QUEUE_SIZE(7));
751 WREG32(VM_L2_CNTL2, 0);
752 WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1));
753 /* Setup TLB control */
754 tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
755 SYSTEM_ACCESS_MODE_NOT_IN_SYS |
756 EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
757 ENABLE_WAIT_L2_QUERY;
758 WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp);
759 WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
760 WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp | ENABLE_L1_STRICT_ORDERING);
761 WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
762 WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp);
763 WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp);
764 WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp);
765 WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp);
766 WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp);
767 WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
768 WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
769 WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
770 WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
771 WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
772 for (i = 0; i < 7; i++)
773 WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
776 int r600_mc_wait_for_idle(struct radeon_device *rdev)
781 for (i = 0; i < rdev->usec_timeout; i++) {
783 tmp = RREG32(R_000E50_SRBM_STATUS) & 0x3F00;
791 static void r600_mc_program(struct radeon_device *rdev)
793 struct rv515_mc_save save;
798 for (i = 0, j = 0; i < 32; i++, j += 0x18) {
799 WREG32((0x2c14 + j), 0x00000000);
800 WREG32((0x2c18 + j), 0x00000000);
801 WREG32((0x2c1c + j), 0x00000000);
802 WREG32((0x2c20 + j), 0x00000000);
803 WREG32((0x2c24 + j), 0x00000000);
805 WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0);
807 rv515_mc_stop(rdev, &save);
808 if (r600_mc_wait_for_idle(rdev)) {
809 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
811 /* Lockout access through VGA aperture (doesn't exist before R600) */
812 WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE);
813 /* Update configuration */
814 if (rdev->flags & RADEON_IS_AGP) {
815 if (rdev->mc.vram_start < rdev->mc.gtt_start) {
816 /* VRAM before AGP */
817 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
818 rdev->mc.vram_start >> 12);
819 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
820 rdev->mc.gtt_end >> 12);
823 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
824 rdev->mc.gtt_start >> 12);
825 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
826 rdev->mc.vram_end >> 12);
829 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.vram_start >> 12);
830 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, rdev->mc.vram_end >> 12);
832 WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, 0);
833 tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16;
834 tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF);
835 WREG32(MC_VM_FB_LOCATION, tmp);
836 WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8));
837 WREG32(HDP_NONSURFACE_INFO, (2 << 7));
838 WREG32(HDP_NONSURFACE_SIZE, rdev->mc.mc_vram_size | 0x3FF);
839 if (rdev->flags & RADEON_IS_AGP) {
840 WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 22);
841 WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 22);
842 WREG32(MC_VM_AGP_BASE, rdev->mc.agp_base >> 22);
844 WREG32(MC_VM_AGP_BASE, 0);
845 WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF);
846 WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF);
848 if (r600_mc_wait_for_idle(rdev)) {
849 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
851 rv515_mc_resume(rdev, &save);
852 /* we need to own VRAM, so turn off the VGA renderer here
853 * to stop it overwriting our objects */
854 rv515_vga_render_disable(rdev);
858 * r600_vram_gtt_location - try to find VRAM & GTT location
859 * @rdev: radeon device structure holding all necessary informations
860 * @mc: memory controller structure holding memory informations
862 * Function will place try to place VRAM at same place as in CPU (PCI)
863 * address space as some GPU seems to have issue when we reprogram at
864 * different address space.
866 * If there is not enough space to fit the unvisible VRAM after the
867 * aperture then we limit the VRAM size to the aperture.
869 * If we are using AGP then place VRAM adjacent to AGP aperture are we need
870 * them to be in one from GPU point of view so that we can program GPU to
871 * catch access outside them (weird GPU policy see ??).
873 * This function will never fails, worst case are limiting VRAM or GTT.
875 * Note: GTT start, end, size should be initialized before calling this
876 * function on AGP platform.
878 void r600_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
880 u64 size_bf, size_af;
882 if (mc->mc_vram_size > 0xE0000000) {
883 /* leave room for at least 512M GTT */
884 dev_warn(rdev->dev, "limiting VRAM\n");
885 mc->real_vram_size = 0xE0000000;
886 mc->mc_vram_size = 0xE0000000;
888 if (rdev->flags & RADEON_IS_AGP) {
889 size_bf = mc->gtt_start;
890 size_af = 0xFFFFFFFF - mc->gtt_end + 1;
891 if (size_bf > size_af) {
892 if (mc->mc_vram_size > size_bf) {
893 dev_warn(rdev->dev, "limiting VRAM\n");
894 mc->real_vram_size = size_bf;
895 mc->mc_vram_size = size_bf;
897 mc->vram_start = mc->gtt_start - mc->mc_vram_size;
899 if (mc->mc_vram_size > size_af) {
900 dev_warn(rdev->dev, "limiting VRAM\n");
901 mc->real_vram_size = size_af;
902 mc->mc_vram_size = size_af;
904 mc->vram_start = mc->gtt_end;
906 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
907 dev_info(rdev->dev, "VRAM: %lluM 0x%08llX - 0x%08llX (%lluM used)\n",
908 mc->mc_vram_size >> 20, mc->vram_start,
909 mc->vram_end, mc->real_vram_size >> 20);
912 if (rdev->flags & RADEON_IS_IGP)
913 base = (RREG32(MC_VM_FB_LOCATION) & 0xFFFF) << 24;
914 radeon_vram_location(rdev, &rdev->mc, base);
915 radeon_gtt_location(rdev, mc);
919 int r600_mc_init(struct radeon_device *rdev)
922 int chansize, numchan;
924 /* Get VRAM informations */
925 rdev->mc.vram_is_ddr = true;
926 tmp = RREG32(RAMCFG);
927 if (tmp & CHANSIZE_OVERRIDE) {
929 } else if (tmp & CHANSIZE_MASK) {
935 switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
950 rdev->mc.vram_width = numchan * chansize;
951 /* Could aper size report 0 ? */
952 rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
953 rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
954 /* Setup GPU memory space */
955 rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
956 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
957 rdev->mc.visible_vram_size = rdev->mc.aper_size;
958 r600_vram_gtt_location(rdev, &rdev->mc);
960 if (rdev->flags & RADEON_IS_IGP)
961 rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
962 radeon_update_bandwidth_info(rdev);
966 /* We doesn't check that the GPU really needs a reset we simply do the
967 * reset, it's up to the caller to determine if the GPU needs one. We
968 * might add an helper function to check that.
970 int r600_gpu_soft_reset(struct radeon_device *rdev)
972 struct rv515_mc_save save;
973 u32 grbm_busy_mask = S_008010_VC_BUSY(1) | S_008010_VGT_BUSY_NO_DMA(1) |
974 S_008010_VGT_BUSY(1) | S_008010_TA03_BUSY(1) |
975 S_008010_TC_BUSY(1) | S_008010_SX_BUSY(1) |
976 S_008010_SH_BUSY(1) | S_008010_SPI03_BUSY(1) |
977 S_008010_SMX_BUSY(1) | S_008010_SC_BUSY(1) |
978 S_008010_PA_BUSY(1) | S_008010_DB03_BUSY(1) |
979 S_008010_CR_BUSY(1) | S_008010_CB03_BUSY(1) |
980 S_008010_GUI_ACTIVE(1);
981 u32 grbm2_busy_mask = S_008014_SPI0_BUSY(1) | S_008014_SPI1_BUSY(1) |
982 S_008014_SPI2_BUSY(1) | S_008014_SPI3_BUSY(1) |
983 S_008014_TA0_BUSY(1) | S_008014_TA1_BUSY(1) |
984 S_008014_TA2_BUSY(1) | S_008014_TA3_BUSY(1) |
985 S_008014_DB0_BUSY(1) | S_008014_DB1_BUSY(1) |
986 S_008014_DB2_BUSY(1) | S_008014_DB3_BUSY(1) |
987 S_008014_CB0_BUSY(1) | S_008014_CB1_BUSY(1) |
988 S_008014_CB2_BUSY(1) | S_008014_CB3_BUSY(1);
991 dev_info(rdev->dev, "GPU softreset \n");
992 dev_info(rdev->dev, " R_008010_GRBM_STATUS=0x%08X\n",
993 RREG32(R_008010_GRBM_STATUS));
994 dev_info(rdev->dev, " R_008014_GRBM_STATUS2=0x%08X\n",
995 RREG32(R_008014_GRBM_STATUS2));
996 dev_info(rdev->dev, " R_000E50_SRBM_STATUS=0x%08X\n",
997 RREG32(R_000E50_SRBM_STATUS));
998 rv515_mc_stop(rdev, &save);
999 if (r600_mc_wait_for_idle(rdev)) {
1000 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
1002 /* Disable CP parsing/prefetching */
1003 WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
1004 /* Check if any of the rendering block is busy and reset it */
1005 if ((RREG32(R_008010_GRBM_STATUS) & grbm_busy_mask) ||
1006 (RREG32(R_008014_GRBM_STATUS2) & grbm2_busy_mask)) {
1007 tmp = S_008020_SOFT_RESET_CR(1) |
1008 S_008020_SOFT_RESET_DB(1) |
1009 S_008020_SOFT_RESET_CB(1) |
1010 S_008020_SOFT_RESET_PA(1) |
1011 S_008020_SOFT_RESET_SC(1) |
1012 S_008020_SOFT_RESET_SMX(1) |
1013 S_008020_SOFT_RESET_SPI(1) |
1014 S_008020_SOFT_RESET_SX(1) |
1015 S_008020_SOFT_RESET_SH(1) |
1016 S_008020_SOFT_RESET_TC(1) |
1017 S_008020_SOFT_RESET_TA(1) |
1018 S_008020_SOFT_RESET_VC(1) |
1019 S_008020_SOFT_RESET_VGT(1);
1020 dev_info(rdev->dev, " R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp);
1021 WREG32(R_008020_GRBM_SOFT_RESET, tmp);
1022 RREG32(R_008020_GRBM_SOFT_RESET);
1024 WREG32(R_008020_GRBM_SOFT_RESET, 0);
1026 /* Reset CP (we always reset CP) */
1027 tmp = S_008020_SOFT_RESET_CP(1);
1028 dev_info(rdev->dev, "R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp);
1029 WREG32(R_008020_GRBM_SOFT_RESET, tmp);
1030 RREG32(R_008020_GRBM_SOFT_RESET);
1032 WREG32(R_008020_GRBM_SOFT_RESET, 0);
1033 /* Wait a little for things to settle down */
1035 dev_info(rdev->dev, " R_008010_GRBM_STATUS=0x%08X\n",
1036 RREG32(R_008010_GRBM_STATUS));
1037 dev_info(rdev->dev, " R_008014_GRBM_STATUS2=0x%08X\n",
1038 RREG32(R_008014_GRBM_STATUS2));
1039 dev_info(rdev->dev, " R_000E50_SRBM_STATUS=0x%08X\n",
1040 RREG32(R_000E50_SRBM_STATUS));
1041 rv515_mc_resume(rdev, &save);
1045 bool r600_gpu_is_lockup(struct radeon_device *rdev)
1052 srbm_status = RREG32(R_000E50_SRBM_STATUS);
1053 grbm_status = RREG32(R_008010_GRBM_STATUS);
1054 grbm_status2 = RREG32(R_008014_GRBM_STATUS2);
1055 if (!G_008010_GUI_ACTIVE(grbm_status)) {
1056 r100_gpu_lockup_update(&rdev->config.r300.lockup, &rdev->cp);
1059 /* force CP activities */
1060 r = radeon_ring_lock(rdev, 2);
1063 radeon_ring_write(rdev, 0x80000000);
1064 radeon_ring_write(rdev, 0x80000000);
1065 radeon_ring_unlock_commit(rdev);
1067 rdev->cp.rptr = RREG32(R600_CP_RB_RPTR);
1068 return r100_gpu_cp_is_lockup(rdev, &rdev->config.r300.lockup, &rdev->cp);
1071 int r600_asic_reset(struct radeon_device *rdev)
1073 return r600_gpu_soft_reset(rdev);
1076 static u32 r600_get_tile_pipe_to_backend_map(u32 num_tile_pipes,
1078 u32 backend_disable_mask)
1080 u32 backend_map = 0;
1081 u32 enabled_backends_mask;
1082 u32 enabled_backends_count;
1084 u32 swizzle_pipe[R6XX_MAX_PIPES];
1088 if (num_tile_pipes > R6XX_MAX_PIPES)
1089 num_tile_pipes = R6XX_MAX_PIPES;
1090 if (num_tile_pipes < 1)
1092 if (num_backends > R6XX_MAX_BACKENDS)
1093 num_backends = R6XX_MAX_BACKENDS;
1094 if (num_backends < 1)
1097 enabled_backends_mask = 0;
1098 enabled_backends_count = 0;
1099 for (i = 0; i < R6XX_MAX_BACKENDS; ++i) {
1100 if (((backend_disable_mask >> i) & 1) == 0) {
1101 enabled_backends_mask |= (1 << i);
1102 ++enabled_backends_count;
1104 if (enabled_backends_count == num_backends)
1108 if (enabled_backends_count == 0) {
1109 enabled_backends_mask = 1;
1110 enabled_backends_count = 1;
1113 if (enabled_backends_count != num_backends)
1114 num_backends = enabled_backends_count;
1116 memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * R6XX_MAX_PIPES);
1117 switch (num_tile_pipes) {
1119 swizzle_pipe[0] = 0;
1122 swizzle_pipe[0] = 0;
1123 swizzle_pipe[1] = 1;
1126 swizzle_pipe[0] = 0;
1127 swizzle_pipe[1] = 1;
1128 swizzle_pipe[2] = 2;
1131 swizzle_pipe[0] = 0;
1132 swizzle_pipe[1] = 1;
1133 swizzle_pipe[2] = 2;
1134 swizzle_pipe[3] = 3;
1137 swizzle_pipe[0] = 0;
1138 swizzle_pipe[1] = 1;
1139 swizzle_pipe[2] = 2;
1140 swizzle_pipe[3] = 3;
1141 swizzle_pipe[4] = 4;
1144 swizzle_pipe[0] = 0;
1145 swizzle_pipe[1] = 2;
1146 swizzle_pipe[2] = 4;
1147 swizzle_pipe[3] = 5;
1148 swizzle_pipe[4] = 1;
1149 swizzle_pipe[5] = 3;
1152 swizzle_pipe[0] = 0;
1153 swizzle_pipe[1] = 2;
1154 swizzle_pipe[2] = 4;
1155 swizzle_pipe[3] = 6;
1156 swizzle_pipe[4] = 1;
1157 swizzle_pipe[5] = 3;
1158 swizzle_pipe[6] = 5;
1161 swizzle_pipe[0] = 0;
1162 swizzle_pipe[1] = 2;
1163 swizzle_pipe[2] = 4;
1164 swizzle_pipe[3] = 6;
1165 swizzle_pipe[4] = 1;
1166 swizzle_pipe[5] = 3;
1167 swizzle_pipe[6] = 5;
1168 swizzle_pipe[7] = 7;
1173 for (cur_pipe = 0; cur_pipe < num_tile_pipes; ++cur_pipe) {
1174 while (((1 << cur_backend) & enabled_backends_mask) == 0)
1175 cur_backend = (cur_backend + 1) % R6XX_MAX_BACKENDS;
1177 backend_map |= (u32)(((cur_backend & 3) << (swizzle_pipe[cur_pipe] * 2)));
1179 cur_backend = (cur_backend + 1) % R6XX_MAX_BACKENDS;
1185 int r600_count_pipe_bits(uint32_t val)
1189 for (i = 0; i < 32; i++) {
1196 void r600_gpu_init(struct radeon_device *rdev)
1201 u32 cc_rb_backend_disable;
1202 u32 cc_gc_shader_pipe_config;
1206 u32 sq_gpr_resource_mgmt_1 = 0;
1207 u32 sq_gpr_resource_mgmt_2 = 0;
1208 u32 sq_thread_resource_mgmt = 0;
1209 u32 sq_stack_resource_mgmt_1 = 0;
1210 u32 sq_stack_resource_mgmt_2 = 0;
1212 /* FIXME: implement */
1213 switch (rdev->family) {
1215 rdev->config.r600.max_pipes = 4;
1216 rdev->config.r600.max_tile_pipes = 8;
1217 rdev->config.r600.max_simds = 4;
1218 rdev->config.r600.max_backends = 4;
1219 rdev->config.r600.max_gprs = 256;
1220 rdev->config.r600.max_threads = 192;
1221 rdev->config.r600.max_stack_entries = 256;
1222 rdev->config.r600.max_hw_contexts = 8;
1223 rdev->config.r600.max_gs_threads = 16;
1224 rdev->config.r600.sx_max_export_size = 128;
1225 rdev->config.r600.sx_max_export_pos_size = 16;
1226 rdev->config.r600.sx_max_export_smx_size = 128;
1227 rdev->config.r600.sq_num_cf_insts = 2;
1231 rdev->config.r600.max_pipes = 2;
1232 rdev->config.r600.max_tile_pipes = 2;
1233 rdev->config.r600.max_simds = 3;
1234 rdev->config.r600.max_backends = 1;
1235 rdev->config.r600.max_gprs = 128;
1236 rdev->config.r600.max_threads = 192;
1237 rdev->config.r600.max_stack_entries = 128;
1238 rdev->config.r600.max_hw_contexts = 8;
1239 rdev->config.r600.max_gs_threads = 4;
1240 rdev->config.r600.sx_max_export_size = 128;
1241 rdev->config.r600.sx_max_export_pos_size = 16;
1242 rdev->config.r600.sx_max_export_smx_size = 128;
1243 rdev->config.r600.sq_num_cf_insts = 2;
1249 rdev->config.r600.max_pipes = 1;
1250 rdev->config.r600.max_tile_pipes = 1;
1251 rdev->config.r600.max_simds = 2;
1252 rdev->config.r600.max_backends = 1;
1253 rdev->config.r600.max_gprs = 128;
1254 rdev->config.r600.max_threads = 192;
1255 rdev->config.r600.max_stack_entries = 128;
1256 rdev->config.r600.max_hw_contexts = 4;
1257 rdev->config.r600.max_gs_threads = 4;
1258 rdev->config.r600.sx_max_export_size = 128;
1259 rdev->config.r600.sx_max_export_pos_size = 16;
1260 rdev->config.r600.sx_max_export_smx_size = 128;
1261 rdev->config.r600.sq_num_cf_insts = 1;
1264 rdev->config.r600.max_pipes = 4;
1265 rdev->config.r600.max_tile_pipes = 4;
1266 rdev->config.r600.max_simds = 4;
1267 rdev->config.r600.max_backends = 4;
1268 rdev->config.r600.max_gprs = 192;
1269 rdev->config.r600.max_threads = 192;
1270 rdev->config.r600.max_stack_entries = 256;
1271 rdev->config.r600.max_hw_contexts = 8;
1272 rdev->config.r600.max_gs_threads = 16;
1273 rdev->config.r600.sx_max_export_size = 128;
1274 rdev->config.r600.sx_max_export_pos_size = 16;
1275 rdev->config.r600.sx_max_export_smx_size = 128;
1276 rdev->config.r600.sq_num_cf_insts = 2;
1282 /* Initialize HDP */
1283 for (i = 0, j = 0; i < 32; i++, j += 0x18) {
1284 WREG32((0x2c14 + j), 0x00000000);
1285 WREG32((0x2c18 + j), 0x00000000);
1286 WREG32((0x2c1c + j), 0x00000000);
1287 WREG32((0x2c20 + j), 0x00000000);
1288 WREG32((0x2c24 + j), 0x00000000);
1291 WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
1295 ramcfg = RREG32(RAMCFG);
1296 switch (rdev->config.r600.max_tile_pipes) {
1298 tiling_config |= PIPE_TILING(0);
1301 tiling_config |= PIPE_TILING(1);
1304 tiling_config |= PIPE_TILING(2);
1307 tiling_config |= PIPE_TILING(3);
1312 rdev->config.r600.tiling_npipes = rdev->config.r600.max_tile_pipes;
1313 rdev->config.r600.tiling_nbanks = 4 << ((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
1314 tiling_config |= BANK_TILING((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
1315 tiling_config |= GROUP_SIZE(0);
1316 rdev->config.r600.tiling_group_size = 256;
1317 tmp = (ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT;
1319 tiling_config |= ROW_TILING(3);
1320 tiling_config |= SAMPLE_SPLIT(3);
1322 tiling_config |= ROW_TILING(tmp);
1323 tiling_config |= SAMPLE_SPLIT(tmp);
1325 tiling_config |= BANK_SWAPS(1);
1327 cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000;
1328 cc_rb_backend_disable |=
1329 BACKEND_DISABLE((R6XX_MAX_BACKENDS_MASK << rdev->config.r600.max_backends) & R6XX_MAX_BACKENDS_MASK);
1331 cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0xffffff00;
1332 cc_gc_shader_pipe_config |=
1333 INACTIVE_QD_PIPES((R6XX_MAX_PIPES_MASK << rdev->config.r600.max_pipes) & R6XX_MAX_PIPES_MASK);
1334 cc_gc_shader_pipe_config |=
1335 INACTIVE_SIMDS((R6XX_MAX_SIMDS_MASK << rdev->config.r600.max_simds) & R6XX_MAX_SIMDS_MASK);
1337 backend_map = r600_get_tile_pipe_to_backend_map(rdev->config.r600.max_tile_pipes,
1338 (R6XX_MAX_BACKENDS -
1339 r600_count_pipe_bits((cc_rb_backend_disable &
1340 R6XX_MAX_BACKENDS_MASK) >> 16)),
1341 (cc_rb_backend_disable >> 16));
1343 tiling_config |= BACKEND_MAP(backend_map);
1344 WREG32(GB_TILING_CONFIG, tiling_config);
1345 WREG32(DCP_TILING_CONFIG, tiling_config & 0xffff);
1346 WREG32(HDP_TILING_CONFIG, tiling_config & 0xffff);
1349 WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable);
1350 WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
1351 WREG32(GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
1353 tmp = R6XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8);
1354 WREG32(VGT_OUT_DEALLOC_CNTL, (tmp * 4) & DEALLOC_DIST_MASK);
1355 WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, ((tmp * 4) - 2) & VTX_REUSE_DEPTH_MASK);
1357 /* Setup some CP states */
1358 WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) | ROQ_IB2_START(0x2b)));
1359 WREG32(CP_MEQ_THRESHOLDS, (MEQ_END(0x40) | ROQ_END(0x40)));
1361 WREG32(TA_CNTL_AUX, (DISABLE_CUBE_ANISO | SYNC_GRADIENT |
1362 SYNC_WALKER | SYNC_ALIGNER));
1363 /* Setup various GPU states */
1364 if (rdev->family == CHIP_RV670)
1365 WREG32(ARB_GDEC_RD_CNTL, 0x00000021);
1367 tmp = RREG32(SX_DEBUG_1);
1368 tmp |= SMX_EVENT_RELEASE;
1369 if ((rdev->family > CHIP_R600))
1370 tmp |= ENABLE_NEW_SMX_ADDRESS;
1371 WREG32(SX_DEBUG_1, tmp);
1373 if (((rdev->family) == CHIP_R600) ||
1374 ((rdev->family) == CHIP_RV630) ||
1375 ((rdev->family) == CHIP_RV610) ||
1376 ((rdev->family) == CHIP_RV620) ||
1377 ((rdev->family) == CHIP_RS780) ||
1378 ((rdev->family) == CHIP_RS880)) {
1379 WREG32(DB_DEBUG, PREZ_MUST_WAIT_FOR_POSTZ_DONE);
1381 WREG32(DB_DEBUG, 0);
1383 WREG32(DB_WATERMARKS, (DEPTH_FREE(4) | DEPTH_CACHELINE_FREE(16) |
1384 DEPTH_FLUSH(16) | DEPTH_PENDING_FREE(4)));
1386 WREG32(PA_SC_MULTI_CHIP_CNTL, 0);
1387 WREG32(VGT_NUM_INSTANCES, 0);
1389 WREG32(SPI_CONFIG_CNTL, GPR_WRITE_PRIORITY(0));
1390 WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(0));
1392 tmp = RREG32(SQ_MS_FIFO_SIZES);
1393 if (((rdev->family) == CHIP_RV610) ||
1394 ((rdev->family) == CHIP_RV620) ||
1395 ((rdev->family) == CHIP_RS780) ||
1396 ((rdev->family) == CHIP_RS880)) {
1397 tmp = (CACHE_FIFO_SIZE(0xa) |
1398 FETCH_FIFO_HIWATER(0xa) |
1399 DONE_FIFO_HIWATER(0xe0) |
1400 ALU_UPDATE_FIFO_HIWATER(0x8));
1401 } else if (((rdev->family) == CHIP_R600) ||
1402 ((rdev->family) == CHIP_RV630)) {
1403 tmp &= ~DONE_FIFO_HIWATER(0xff);
1404 tmp |= DONE_FIFO_HIWATER(0x4);
1406 WREG32(SQ_MS_FIFO_SIZES, tmp);
1408 /* SQ_CONFIG, SQ_GPR_RESOURCE_MGMT, SQ_THREAD_RESOURCE_MGMT, SQ_STACK_RESOURCE_MGMT
1409 * should be adjusted as needed by the 2D/3D drivers. This just sets default values
1411 sq_config = RREG32(SQ_CONFIG);
1412 sq_config &= ~(PS_PRIO(3) |
1416 sq_config |= (DX9_CONSTS |
1423 if ((rdev->family) == CHIP_R600) {
1424 sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(124) |
1426 NUM_CLAUSE_TEMP_GPRS(4));
1427 sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(0) |
1429 sq_thread_resource_mgmt = (NUM_PS_THREADS(136) |
1430 NUM_VS_THREADS(48) |
1433 sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(128) |
1434 NUM_VS_STACK_ENTRIES(128));
1435 sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(0) |
1436 NUM_ES_STACK_ENTRIES(0));
1437 } else if (((rdev->family) == CHIP_RV610) ||
1438 ((rdev->family) == CHIP_RV620) ||
1439 ((rdev->family) == CHIP_RS780) ||
1440 ((rdev->family) == CHIP_RS880)) {
1441 /* no vertex cache */
1442 sq_config &= ~VC_ENABLE;
1444 sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) |
1446 NUM_CLAUSE_TEMP_GPRS(2));
1447 sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(17) |
1449 sq_thread_resource_mgmt = (NUM_PS_THREADS(79) |
1450 NUM_VS_THREADS(78) |
1452 NUM_ES_THREADS(31));
1453 sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(40) |
1454 NUM_VS_STACK_ENTRIES(40));
1455 sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(32) |
1456 NUM_ES_STACK_ENTRIES(16));
1457 } else if (((rdev->family) == CHIP_RV630) ||
1458 ((rdev->family) == CHIP_RV635)) {
1459 sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) |
1461 NUM_CLAUSE_TEMP_GPRS(2));
1462 sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(18) |
1464 sq_thread_resource_mgmt = (NUM_PS_THREADS(79) |
1465 NUM_VS_THREADS(78) |
1467 NUM_ES_THREADS(31));
1468 sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(40) |
1469 NUM_VS_STACK_ENTRIES(40));
1470 sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(32) |
1471 NUM_ES_STACK_ENTRIES(16));
1472 } else if ((rdev->family) == CHIP_RV670) {
1473 sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) |
1475 NUM_CLAUSE_TEMP_GPRS(2));
1476 sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(17) |
1478 sq_thread_resource_mgmt = (NUM_PS_THREADS(79) |
1479 NUM_VS_THREADS(78) |
1481 NUM_ES_THREADS(31));
1482 sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(64) |
1483 NUM_VS_STACK_ENTRIES(64));
1484 sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(64) |
1485 NUM_ES_STACK_ENTRIES(64));
1488 WREG32(SQ_CONFIG, sq_config);
1489 WREG32(SQ_GPR_RESOURCE_MGMT_1, sq_gpr_resource_mgmt_1);
1490 WREG32(SQ_GPR_RESOURCE_MGMT_2, sq_gpr_resource_mgmt_2);
1491 WREG32(SQ_THREAD_RESOURCE_MGMT, sq_thread_resource_mgmt);
1492 WREG32(SQ_STACK_RESOURCE_MGMT_1, sq_stack_resource_mgmt_1);
1493 WREG32(SQ_STACK_RESOURCE_MGMT_2, sq_stack_resource_mgmt_2);
1495 if (((rdev->family) == CHIP_RV610) ||
1496 ((rdev->family) == CHIP_RV620) ||
1497 ((rdev->family) == CHIP_RS780) ||
1498 ((rdev->family) == CHIP_RS880)) {
1499 WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(TC_ONLY));
1501 WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(VC_AND_TC));
1504 /* More default values. 2D/3D driver should adjust as needed */
1505 WREG32(PA_SC_AA_SAMPLE_LOCS_2S, (S0_X(0xc) | S0_Y(0x4) |
1506 S1_X(0x4) | S1_Y(0xc)));
1507 WREG32(PA_SC_AA_SAMPLE_LOCS_4S, (S0_X(0xe) | S0_Y(0xe) |
1508 S1_X(0x2) | S1_Y(0x2) |
1509 S2_X(0xa) | S2_Y(0x6) |
1510 S3_X(0x6) | S3_Y(0xa)));
1511 WREG32(PA_SC_AA_SAMPLE_LOCS_8S_WD0, (S0_X(0xe) | S0_Y(0xb) |
1512 S1_X(0x4) | S1_Y(0xc) |
1513 S2_X(0x1) | S2_Y(0x6) |
1514 S3_X(0xa) | S3_Y(0xe)));
1515 WREG32(PA_SC_AA_SAMPLE_LOCS_8S_WD1, (S4_X(0x6) | S4_Y(0x1) |
1516 S5_X(0x0) | S5_Y(0x0) |
1517 S6_X(0xb) | S6_Y(0x4) |
1518 S7_X(0x7) | S7_Y(0x8)));
1520 WREG32(VGT_STRMOUT_EN, 0);
1521 tmp = rdev->config.r600.max_pipes * 16;
1522 switch (rdev->family) {
1538 WREG32(VGT_ES_PER_GS, 128);
1539 WREG32(VGT_GS_PER_ES, tmp);
1540 WREG32(VGT_GS_PER_VS, 2);
1541 WREG32(VGT_GS_VERTEX_REUSE, 16);
1543 /* more default values. 2D/3D driver should adjust as needed */
1544 WREG32(PA_SC_LINE_STIPPLE_STATE, 0);
1545 WREG32(VGT_STRMOUT_EN, 0);
1547 WREG32(PA_SC_MODE_CNTL, 0);
1548 WREG32(PA_SC_AA_CONFIG, 0);
1549 WREG32(PA_SC_LINE_STIPPLE, 0);
1550 WREG32(SPI_INPUT_Z, 0);
1551 WREG32(SPI_PS_IN_CONTROL_0, NUM_INTERP(2));
1552 WREG32(CB_COLOR7_FRAG, 0);
1554 /* Clear render buffer base addresses */
1555 WREG32(CB_COLOR0_BASE, 0);
1556 WREG32(CB_COLOR1_BASE, 0);
1557 WREG32(CB_COLOR2_BASE, 0);
1558 WREG32(CB_COLOR3_BASE, 0);
1559 WREG32(CB_COLOR4_BASE, 0);
1560 WREG32(CB_COLOR5_BASE, 0);
1561 WREG32(CB_COLOR6_BASE, 0);
1562 WREG32(CB_COLOR7_BASE, 0);
1563 WREG32(CB_COLOR7_FRAG, 0);
1565 switch (rdev->family) {
1570 tmp = TC_L2_SIZE(8);
1574 tmp = TC_L2_SIZE(4);
1577 tmp = TC_L2_SIZE(0) | L2_DISABLE_LATE_HIT;
1580 tmp = TC_L2_SIZE(0);
1583 WREG32(TC_CNTL, tmp);
1585 tmp = RREG32(HDP_HOST_PATH_CNTL);
1586 WREG32(HDP_HOST_PATH_CNTL, tmp);
1588 tmp = RREG32(ARB_POP);
1589 tmp |= ENABLE_TC128;
1590 WREG32(ARB_POP, tmp);
1592 WREG32(PA_SC_MULTI_CHIP_CNTL, 0);
1593 WREG32(PA_CL_ENHANCE, (CLIP_VTX_REORDER_ENA |
1595 WREG32(PA_SC_ENHANCE, FORCE_EOV_MAX_CLK_CNT(4095));
1600 * Indirect registers accessor
1602 u32 r600_pciep_rreg(struct radeon_device *rdev, u32 reg)
1606 WREG32(PCIE_PORT_INDEX, ((reg) & 0xff));
1607 (void)RREG32(PCIE_PORT_INDEX);
1608 r = RREG32(PCIE_PORT_DATA);
1612 void r600_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v)
1614 WREG32(PCIE_PORT_INDEX, ((reg) & 0xff));
1615 (void)RREG32(PCIE_PORT_INDEX);
1616 WREG32(PCIE_PORT_DATA, (v));
1617 (void)RREG32(PCIE_PORT_DATA);
1623 void r600_cp_stop(struct radeon_device *rdev)
1625 WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
1628 int r600_init_microcode(struct radeon_device *rdev)
1630 struct platform_device *pdev;
1631 const char *chip_name;
1632 const char *rlc_chip_name;
1633 size_t pfp_req_size, me_req_size, rlc_req_size;
1639 pdev = platform_device_register_simple("radeon_cp", 0, NULL, 0);
1642 printk(KERN_ERR "radeon_cp: Failed to register firmware\n");
1646 switch (rdev->family) {
1649 rlc_chip_name = "R600";
1652 chip_name = "RV610";
1653 rlc_chip_name = "R600";
1656 chip_name = "RV630";
1657 rlc_chip_name = "R600";
1660 chip_name = "RV620";
1661 rlc_chip_name = "R600";
1664 chip_name = "RV635";
1665 rlc_chip_name = "R600";
1668 chip_name = "RV670";
1669 rlc_chip_name = "R600";
1673 chip_name = "RS780";
1674 rlc_chip_name = "R600";
1677 chip_name = "RV770";
1678 rlc_chip_name = "R700";
1682 chip_name = "RV730";
1683 rlc_chip_name = "R700";
1686 chip_name = "RV710";
1687 rlc_chip_name = "R700";
1690 chip_name = "CEDAR";
1691 rlc_chip_name = "CEDAR";
1694 chip_name = "REDWOOD";
1695 rlc_chip_name = "REDWOOD";
1698 chip_name = "JUNIPER";
1699 rlc_chip_name = "JUNIPER";
1703 chip_name = "CYPRESS";
1704 rlc_chip_name = "CYPRESS";
1709 if (rdev->family >= CHIP_CEDAR) {
1710 pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4;
1711 me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4;
1712 rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4;
1713 } else if (rdev->family >= CHIP_RV770) {
1714 pfp_req_size = R700_PFP_UCODE_SIZE * 4;
1715 me_req_size = R700_PM4_UCODE_SIZE * 4;
1716 rlc_req_size = R700_RLC_UCODE_SIZE * 4;
1718 pfp_req_size = PFP_UCODE_SIZE * 4;
1719 me_req_size = PM4_UCODE_SIZE * 12;
1720 rlc_req_size = RLC_UCODE_SIZE * 4;
1723 DRM_INFO("Loading %s Microcode\n", chip_name);
1725 snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
1726 err = request_firmware(&rdev->pfp_fw, fw_name, &pdev->dev);
1729 if (rdev->pfp_fw->size != pfp_req_size) {
1731 "r600_cp: Bogus length %zu in firmware \"%s\"\n",
1732 rdev->pfp_fw->size, fw_name);
1737 snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name);
1738 err = request_firmware(&rdev->me_fw, fw_name, &pdev->dev);
1741 if (rdev->me_fw->size != me_req_size) {
1743 "r600_cp: Bogus length %zu in firmware \"%s\"\n",
1744 rdev->me_fw->size, fw_name);
1748 snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", rlc_chip_name);
1749 err = request_firmware(&rdev->rlc_fw, fw_name, &pdev->dev);
1752 if (rdev->rlc_fw->size != rlc_req_size) {
1754 "r600_rlc: Bogus length %zu in firmware \"%s\"\n",
1755 rdev->rlc_fw->size, fw_name);
1760 platform_device_unregister(pdev);
1765 "r600_cp: Failed to load firmware \"%s\"\n",
1767 release_firmware(rdev->pfp_fw);
1768 rdev->pfp_fw = NULL;
1769 release_firmware(rdev->me_fw);
1771 release_firmware(rdev->rlc_fw);
1772 rdev->rlc_fw = NULL;
1777 static int r600_cp_load_microcode(struct radeon_device *rdev)
1779 const __be32 *fw_data;
1782 if (!rdev->me_fw || !rdev->pfp_fw)
1787 WREG32(CP_RB_CNTL, RB_NO_UPDATE | RB_BLKSZ(15) | RB_BUFSZ(3));
1790 WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP);
1791 RREG32(GRBM_SOFT_RESET);
1793 WREG32(GRBM_SOFT_RESET, 0);
1795 WREG32(CP_ME_RAM_WADDR, 0);
1797 fw_data = (const __be32 *)rdev->me_fw->data;
1798 WREG32(CP_ME_RAM_WADDR, 0);
1799 for (i = 0; i < PM4_UCODE_SIZE * 3; i++)
1800 WREG32(CP_ME_RAM_DATA,
1801 be32_to_cpup(fw_data++));
1803 fw_data = (const __be32 *)rdev->pfp_fw->data;
1804 WREG32(CP_PFP_UCODE_ADDR, 0);
1805 for (i = 0; i < PFP_UCODE_SIZE; i++)
1806 WREG32(CP_PFP_UCODE_DATA,
1807 be32_to_cpup(fw_data++));
1809 WREG32(CP_PFP_UCODE_ADDR, 0);
1810 WREG32(CP_ME_RAM_WADDR, 0);
1811 WREG32(CP_ME_RAM_RADDR, 0);
1815 int r600_cp_start(struct radeon_device *rdev)
1820 r = radeon_ring_lock(rdev, 7);
1822 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
1825 radeon_ring_write(rdev, PACKET3(PACKET3_ME_INITIALIZE, 5));
1826 radeon_ring_write(rdev, 0x1);
1827 if (rdev->family >= CHIP_CEDAR) {
1828 radeon_ring_write(rdev, 0x0);
1829 radeon_ring_write(rdev, rdev->config.evergreen.max_hw_contexts - 1);
1830 } else if (rdev->family >= CHIP_RV770) {
1831 radeon_ring_write(rdev, 0x0);
1832 radeon_ring_write(rdev, rdev->config.rv770.max_hw_contexts - 1);
1834 radeon_ring_write(rdev, 0x3);
1835 radeon_ring_write(rdev, rdev->config.r600.max_hw_contexts - 1);
1837 radeon_ring_write(rdev, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
1838 radeon_ring_write(rdev, 0);
1839 radeon_ring_write(rdev, 0);
1840 radeon_ring_unlock_commit(rdev);
1843 WREG32(R_0086D8_CP_ME_CNTL, cp_me);
1847 int r600_cp_resume(struct radeon_device *rdev)
1854 WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP);
1855 RREG32(GRBM_SOFT_RESET);
1857 WREG32(GRBM_SOFT_RESET, 0);
1859 /* Set ring buffer size */
1860 rb_bufsz = drm_order(rdev->cp.ring_size / 8);
1861 tmp = RB_NO_UPDATE | (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
1863 tmp |= BUF_SWAP_32BIT;
1865 WREG32(CP_RB_CNTL, tmp);
1866 WREG32(CP_SEM_WAIT_TIMER, 0x4);
1868 /* Set the write pointer delay */
1869 WREG32(CP_RB_WPTR_DELAY, 0);
1871 /* Initialize the ring buffer's read and write pointers */
1872 WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA);
1873 WREG32(CP_RB_RPTR_WR, 0);
1874 WREG32(CP_RB_WPTR, 0);
1875 WREG32(CP_RB_RPTR_ADDR, rdev->cp.gpu_addr & 0xFFFFFFFF);
1876 WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->cp.gpu_addr));
1878 WREG32(CP_RB_CNTL, tmp);
1880 WREG32(CP_RB_BASE, rdev->cp.gpu_addr >> 8);
1881 WREG32(CP_DEBUG, (1 << 27) | (1 << 28));
1883 rdev->cp.rptr = RREG32(CP_RB_RPTR);
1884 rdev->cp.wptr = RREG32(CP_RB_WPTR);
1886 r600_cp_start(rdev);
1887 rdev->cp.ready = true;
1888 r = radeon_ring_test(rdev);
1890 rdev->cp.ready = false;
1896 void r600_cp_commit(struct radeon_device *rdev)
1898 WREG32(CP_RB_WPTR, rdev->cp.wptr);
1899 (void)RREG32(CP_RB_WPTR);
1902 void r600_ring_init(struct radeon_device *rdev, unsigned ring_size)
1906 /* Align ring size */
1907 rb_bufsz = drm_order(ring_size / 8);
1908 ring_size = (1 << (rb_bufsz + 1)) * 4;
1909 rdev->cp.ring_size = ring_size;
1910 rdev->cp.align_mask = 16 - 1;
1913 void r600_cp_fini(struct radeon_device *rdev)
1916 radeon_ring_fini(rdev);
1921 * GPU scratch registers helpers function.
1923 void r600_scratch_init(struct radeon_device *rdev)
1927 rdev->scratch.num_reg = 7;
1928 for (i = 0; i < rdev->scratch.num_reg; i++) {
1929 rdev->scratch.free[i] = true;
1930 rdev->scratch.reg[i] = SCRATCH_REG0 + (i * 4);
1934 int r600_ring_test(struct radeon_device *rdev)
1941 r = radeon_scratch_get(rdev, &scratch);
1943 DRM_ERROR("radeon: cp failed to get scratch reg (%d).\n", r);
1946 WREG32(scratch, 0xCAFEDEAD);
1947 r = radeon_ring_lock(rdev, 3);
1949 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
1950 radeon_scratch_free(rdev, scratch);
1953 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
1954 radeon_ring_write(rdev, ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
1955 radeon_ring_write(rdev, 0xDEADBEEF);
1956 radeon_ring_unlock_commit(rdev);
1957 for (i = 0; i < rdev->usec_timeout; i++) {
1958 tmp = RREG32(scratch);
1959 if (tmp == 0xDEADBEEF)
1963 if (i < rdev->usec_timeout) {
1964 DRM_INFO("ring test succeeded in %d usecs\n", i);
1966 DRM_ERROR("radeon: ring test failed (scratch(0x%04X)=0x%08X)\n",
1970 radeon_scratch_free(rdev, scratch);
1974 void r600_wb_disable(struct radeon_device *rdev)
1978 WREG32(SCRATCH_UMSK, 0);
1979 if (rdev->wb.wb_obj) {
1980 r = radeon_bo_reserve(rdev->wb.wb_obj, false);
1981 if (unlikely(r != 0))
1983 radeon_bo_kunmap(rdev->wb.wb_obj);
1984 radeon_bo_unpin(rdev->wb.wb_obj);
1985 radeon_bo_unreserve(rdev->wb.wb_obj);
1989 void r600_wb_fini(struct radeon_device *rdev)
1991 r600_wb_disable(rdev);
1992 if (rdev->wb.wb_obj) {
1993 radeon_bo_unref(&rdev->wb.wb_obj);
1995 rdev->wb.wb_obj = NULL;
1999 int r600_wb_enable(struct radeon_device *rdev)
2003 if (rdev->wb.wb_obj == NULL) {
2004 r = radeon_bo_create(rdev, NULL, RADEON_GPU_PAGE_SIZE, true,
2005 RADEON_GEM_DOMAIN_GTT, &rdev->wb.wb_obj);
2007 dev_warn(rdev->dev, "(%d) create WB bo failed\n", r);
2010 r = radeon_bo_reserve(rdev->wb.wb_obj, false);
2011 if (unlikely(r != 0)) {
2015 r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT,
2016 &rdev->wb.gpu_addr);
2018 radeon_bo_unreserve(rdev->wb.wb_obj);
2019 dev_warn(rdev->dev, "(%d) pin WB bo failed\n", r);
2023 r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
2024 radeon_bo_unreserve(rdev->wb.wb_obj);
2026 dev_warn(rdev->dev, "(%d) map WB bo failed\n", r);
2031 WREG32(SCRATCH_ADDR, (rdev->wb.gpu_addr >> 8) & 0xFFFFFFFF);
2032 WREG32(CP_RB_RPTR_ADDR, (rdev->wb.gpu_addr + 1024) & 0xFFFFFFFC);
2033 WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + 1024) & 0xFF);
2034 WREG32(SCRATCH_UMSK, 0xff);
2038 void r600_fence_ring_emit(struct radeon_device *rdev,
2039 struct radeon_fence *fence)
2041 /* Also consider EVENT_WRITE_EOP. it handles the interrupts + timestamps + events */
2043 radeon_ring_write(rdev, PACKET3(PACKET3_EVENT_WRITE, 0));
2044 radeon_ring_write(rdev, CACHE_FLUSH_AND_INV_EVENT);
2045 /* wait for 3D idle clean */
2046 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
2047 radeon_ring_write(rdev, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
2048 radeon_ring_write(rdev, WAIT_3D_IDLE_bit | WAIT_3D_IDLECLEAN_bit);
2049 /* Emit fence sequence & fire IRQ */
2050 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
2051 radeon_ring_write(rdev, ((rdev->fence_drv.scratch_reg - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
2052 radeon_ring_write(rdev, fence->seq);
2053 /* CP_INTERRUPT packet 3 no longer exists, use packet 0 */
2054 radeon_ring_write(rdev, PACKET0(CP_INT_STATUS, 0));
2055 radeon_ring_write(rdev, RB_INT_STAT);
2058 int r600_copy_blit(struct radeon_device *rdev,
2059 uint64_t src_offset, uint64_t dst_offset,
2060 unsigned num_pages, struct radeon_fence *fence)
2064 mutex_lock(&rdev->r600_blit.mutex);
2065 rdev->r600_blit.vb_ib = NULL;
2066 r = r600_blit_prepare_copy(rdev, num_pages * RADEON_GPU_PAGE_SIZE);
2068 if (rdev->r600_blit.vb_ib)
2069 radeon_ib_free(rdev, &rdev->r600_blit.vb_ib);
2070 mutex_unlock(&rdev->r600_blit.mutex);
2073 r600_kms_blit_copy(rdev, src_offset, dst_offset, num_pages * RADEON_GPU_PAGE_SIZE);
2074 r600_blit_done_copy(rdev, fence);
2075 mutex_unlock(&rdev->r600_blit.mutex);
2079 int r600_set_surface_reg(struct radeon_device *rdev, int reg,
2080 uint32_t tiling_flags, uint32_t pitch,
2081 uint32_t offset, uint32_t obj_size)
2083 /* FIXME: implement */
2087 void r600_clear_surface_reg(struct radeon_device *rdev, int reg)
2089 /* FIXME: implement */
2093 bool r600_card_posted(struct radeon_device *rdev)
2097 /* first check CRTCs */
2098 reg = RREG32(D1CRTC_CONTROL) |
2099 RREG32(D2CRTC_CONTROL);
2103 /* then check MEM_SIZE, in case the crtcs are off */
2104 if (RREG32(CONFIG_MEMSIZE))
2110 int r600_startup(struct radeon_device *rdev)
2114 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
2115 r = r600_init_microcode(rdev);
2117 DRM_ERROR("Failed to load firmware!\n");
2122 r600_mc_program(rdev);
2123 if (rdev->flags & RADEON_IS_AGP) {
2124 r600_agp_enable(rdev);
2126 r = r600_pcie_gart_enable(rdev);
2130 r600_gpu_init(rdev);
2131 r = r600_blit_init(rdev);
2133 r600_blit_fini(rdev);
2134 rdev->asic->copy = NULL;
2135 dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
2137 /* pin copy shader into vram */
2138 if (rdev->r600_blit.shader_obj) {
2139 r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
2140 if (unlikely(r != 0))
2142 r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM,
2143 &rdev->r600_blit.shader_gpu_addr);
2144 radeon_bo_unreserve(rdev->r600_blit.shader_obj);
2146 dev_err(rdev->dev, "(%d) pin blit object failed\n", r);
2151 r = r600_irq_init(rdev);
2153 DRM_ERROR("radeon: IH init failed (%d).\n", r);
2154 radeon_irq_kms_fini(rdev);
2159 r = radeon_ring_init(rdev, rdev->cp.ring_size);
2162 r = r600_cp_load_microcode(rdev);
2165 r = r600_cp_resume(rdev);
2168 /* write back buffer are not vital so don't worry about failure */
2169 r600_wb_enable(rdev);
2173 void r600_vga_set_state(struct radeon_device *rdev, bool state)
2177 temp = RREG32(CONFIG_CNTL);
2178 if (state == false) {
2184 WREG32(CONFIG_CNTL, temp);
2187 int r600_resume(struct radeon_device *rdev)
2191 /* Do not reset GPU before posting, on r600 hw unlike on r500 hw,
2192 * posting will perform necessary task to bring back GPU into good
2196 atom_asic_init(rdev->mode_info.atom_context);
2197 /* Initialize clocks */
2198 r = radeon_clocks_init(rdev);
2203 r = r600_startup(rdev);
2205 DRM_ERROR("r600 startup failed on resume\n");
2209 r = r600_ib_test(rdev);
2211 DRM_ERROR("radeon: failled testing IB (%d).\n", r);
2215 r = r600_audio_init(rdev);
2217 DRM_ERROR("radeon: audio resume failed\n");
2224 int r600_suspend(struct radeon_device *rdev)
2228 r600_audio_fini(rdev);
2229 /* FIXME: we should wait for ring to be empty */
2231 rdev->cp.ready = false;
2232 r600_irq_suspend(rdev);
2233 r600_wb_disable(rdev);
2234 r600_pcie_gart_disable(rdev);
2235 /* unpin shaders bo */
2236 if (rdev->r600_blit.shader_obj) {
2237 r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
2239 radeon_bo_unpin(rdev->r600_blit.shader_obj);
2240 radeon_bo_unreserve(rdev->r600_blit.shader_obj);
2246 /* Plan is to move initialization in that function and use
2247 * helper function so that radeon_device_init pretty much
2248 * do nothing more than calling asic specific function. This
2249 * should also allow to remove a bunch of callback function
2252 int r600_init(struct radeon_device *rdev)
2256 r = radeon_dummy_page_init(rdev);
2259 if (r600_debugfs_mc_info_init(rdev)) {
2260 DRM_ERROR("Failed to register debugfs file for mc !\n");
2262 /* This don't do much */
2263 r = radeon_gem_init(rdev);
2267 if (!radeon_get_bios(rdev)) {
2268 if (ASIC_IS_AVIVO(rdev))
2271 /* Must be an ATOMBIOS */
2272 if (!rdev->is_atom_bios) {
2273 dev_err(rdev->dev, "Expecting atombios for R600 GPU\n");
2276 r = radeon_atombios_init(rdev);
2279 /* Post card if necessary */
2280 if (!r600_card_posted(rdev)) {
2282 dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
2285 DRM_INFO("GPU not posted. posting now...\n");
2286 atom_asic_init(rdev->mode_info.atom_context);
2288 /* Initialize scratch registers */
2289 r600_scratch_init(rdev);
2290 /* Initialize surface registers */
2291 radeon_surface_init(rdev);
2292 /* Initialize clocks */
2293 radeon_get_clock_info(rdev->ddev);
2294 r = radeon_clocks_init(rdev);
2297 /* Initialize power management */
2298 radeon_pm_init(rdev);
2300 r = radeon_fence_driver_init(rdev);
2303 if (rdev->flags & RADEON_IS_AGP) {
2304 r = radeon_agp_init(rdev);
2306 radeon_agp_disable(rdev);
2308 r = r600_mc_init(rdev);
2311 /* Memory manager */
2312 r = radeon_bo_init(rdev);
2316 r = radeon_irq_kms_init(rdev);
2320 rdev->cp.ring_obj = NULL;
2321 r600_ring_init(rdev, 1024 * 1024);
2323 rdev->ih.ring_obj = NULL;
2324 r600_ih_ring_init(rdev, 64 * 1024);
2326 r = r600_pcie_gart_init(rdev);
2330 rdev->accel_working = true;
2331 r = r600_startup(rdev);
2333 dev_err(rdev->dev, "disabling GPU acceleration\n");
2336 r600_irq_fini(rdev);
2337 radeon_irq_kms_fini(rdev);
2338 r600_pcie_gart_fini(rdev);
2339 rdev->accel_working = false;
2341 if (rdev->accel_working) {
2342 r = radeon_ib_pool_init(rdev);
2344 dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
2345 rdev->accel_working = false;
2347 r = r600_ib_test(rdev);
2349 dev_err(rdev->dev, "IB test failed (%d).\n", r);
2350 rdev->accel_working = false;
2355 r = r600_audio_init(rdev);
2357 return r; /* TODO error handling */
2361 void r600_fini(struct radeon_device *rdev)
2363 radeon_pm_fini(rdev);
2364 r600_audio_fini(rdev);
2365 r600_blit_fini(rdev);
2368 r600_irq_fini(rdev);
2369 radeon_irq_kms_fini(rdev);
2370 r600_pcie_gart_fini(rdev);
2371 radeon_agp_fini(rdev);
2372 radeon_gem_fini(rdev);
2373 radeon_fence_driver_fini(rdev);
2374 radeon_clocks_fini(rdev);
2375 radeon_bo_fini(rdev);
2376 radeon_atombios_fini(rdev);
2379 radeon_dummy_page_fini(rdev);
2386 void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
2388 /* FIXME: implement */
2389 radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
2390 radeon_ring_write(rdev, ib->gpu_addr & 0xFFFFFFFC);
2391 radeon_ring_write(rdev, upper_32_bits(ib->gpu_addr) & 0xFF);
2392 radeon_ring_write(rdev, ib->length_dw);
2395 int r600_ib_test(struct radeon_device *rdev)
2397 struct radeon_ib *ib;
2403 r = radeon_scratch_get(rdev, &scratch);
2405 DRM_ERROR("radeon: failed to get scratch reg (%d).\n", r);
2408 WREG32(scratch, 0xCAFEDEAD);
2409 r = radeon_ib_get(rdev, &ib);
2411 DRM_ERROR("radeon: failed to get ib (%d).\n", r);
2414 ib->ptr[0] = PACKET3(PACKET3_SET_CONFIG_REG, 1);
2415 ib->ptr[1] = ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
2416 ib->ptr[2] = 0xDEADBEEF;
2417 ib->ptr[3] = PACKET2(0);
2418 ib->ptr[4] = PACKET2(0);
2419 ib->ptr[5] = PACKET2(0);
2420 ib->ptr[6] = PACKET2(0);
2421 ib->ptr[7] = PACKET2(0);
2422 ib->ptr[8] = PACKET2(0);
2423 ib->ptr[9] = PACKET2(0);
2424 ib->ptr[10] = PACKET2(0);
2425 ib->ptr[11] = PACKET2(0);
2426 ib->ptr[12] = PACKET2(0);
2427 ib->ptr[13] = PACKET2(0);
2428 ib->ptr[14] = PACKET2(0);
2429 ib->ptr[15] = PACKET2(0);
2431 r = radeon_ib_schedule(rdev, ib);
2433 radeon_scratch_free(rdev, scratch);
2434 radeon_ib_free(rdev, &ib);
2435 DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
2438 r = radeon_fence_wait(ib->fence, false);
2440 DRM_ERROR("radeon: fence wait failed (%d).\n", r);
2443 for (i = 0; i < rdev->usec_timeout; i++) {
2444 tmp = RREG32(scratch);
2445 if (tmp == 0xDEADBEEF)
2449 if (i < rdev->usec_timeout) {
2450 DRM_INFO("ib test succeeded in %u usecs\n", i);
2452 DRM_ERROR("radeon: ib test failed (sracth(0x%04X)=0x%08X)\n",
2456 radeon_scratch_free(rdev, scratch);
2457 radeon_ib_free(rdev, &ib);
2464 * Interrupts use a ring buffer on r6xx/r7xx hardware. It works pretty
2465 * the same as the CP ring buffer, but in reverse. Rather than the CPU
2466 * writing to the ring and the GPU consuming, the GPU writes to the ring
2467 * and host consumes. As the host irq handler processes interrupts, it
2468 * increments the rptr. When the rptr catches up with the wptr, all the
2469 * current interrupts have been processed.
2472 void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size)
2476 /* Align ring size */
2477 rb_bufsz = drm_order(ring_size / 4);
2478 ring_size = (1 << rb_bufsz) * 4;
2479 rdev->ih.ring_size = ring_size;
2480 rdev->ih.ptr_mask = rdev->ih.ring_size - 1;
2484 static int r600_ih_ring_alloc(struct radeon_device *rdev)
2488 /* Allocate ring buffer */
2489 if (rdev->ih.ring_obj == NULL) {
2490 r = radeon_bo_create(rdev, NULL, rdev->ih.ring_size,
2492 RADEON_GEM_DOMAIN_GTT,
2493 &rdev->ih.ring_obj);
2495 DRM_ERROR("radeon: failed to create ih ring buffer (%d).\n", r);
2498 r = radeon_bo_reserve(rdev->ih.ring_obj, false);
2499 if (unlikely(r != 0))
2501 r = radeon_bo_pin(rdev->ih.ring_obj,
2502 RADEON_GEM_DOMAIN_GTT,
2503 &rdev->ih.gpu_addr);
2505 radeon_bo_unreserve(rdev->ih.ring_obj);
2506 DRM_ERROR("radeon: failed to pin ih ring buffer (%d).\n", r);
2509 r = radeon_bo_kmap(rdev->ih.ring_obj,
2510 (void **)&rdev->ih.ring);
2511 radeon_bo_unreserve(rdev->ih.ring_obj);
2513 DRM_ERROR("radeon: failed to map ih ring buffer (%d).\n", r);
2520 static void r600_ih_ring_fini(struct radeon_device *rdev)
2523 if (rdev->ih.ring_obj) {
2524 r = radeon_bo_reserve(rdev->ih.ring_obj, false);
2525 if (likely(r == 0)) {
2526 radeon_bo_kunmap(rdev->ih.ring_obj);
2527 radeon_bo_unpin(rdev->ih.ring_obj);
2528 radeon_bo_unreserve(rdev->ih.ring_obj);
2530 radeon_bo_unref(&rdev->ih.ring_obj);
2531 rdev->ih.ring = NULL;
2532 rdev->ih.ring_obj = NULL;
2536 void r600_rlc_stop(struct radeon_device *rdev)
2539 if ((rdev->family >= CHIP_RV770) &&
2540 (rdev->family <= CHIP_RV740)) {
2541 /* r7xx asics need to soft reset RLC before halting */
2542 WREG32(SRBM_SOFT_RESET, SOFT_RESET_RLC);
2543 RREG32(SRBM_SOFT_RESET);
2545 WREG32(SRBM_SOFT_RESET, 0);
2546 RREG32(SRBM_SOFT_RESET);
2549 WREG32(RLC_CNTL, 0);
2552 static void r600_rlc_start(struct radeon_device *rdev)
2554 WREG32(RLC_CNTL, RLC_ENABLE);
2557 static int r600_rlc_init(struct radeon_device *rdev)
2560 const __be32 *fw_data;
2565 r600_rlc_stop(rdev);
2567 WREG32(RLC_HB_BASE, 0);
2568 WREG32(RLC_HB_CNTL, 0);
2569 WREG32(RLC_HB_RPTR, 0);
2570 WREG32(RLC_HB_WPTR, 0);
2571 WREG32(RLC_HB_WPTR_LSB_ADDR, 0);
2572 WREG32(RLC_HB_WPTR_MSB_ADDR, 0);
2573 WREG32(RLC_MC_CNTL, 0);
2574 WREG32(RLC_UCODE_CNTL, 0);
2576 fw_data = (const __be32 *)rdev->rlc_fw->data;
2577 if (rdev->family >= CHIP_CEDAR) {
2578 for (i = 0; i < EVERGREEN_RLC_UCODE_SIZE; i++) {
2579 WREG32(RLC_UCODE_ADDR, i);
2580 WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
2582 } else if (rdev->family >= CHIP_RV770) {
2583 for (i = 0; i < R700_RLC_UCODE_SIZE; i++) {
2584 WREG32(RLC_UCODE_ADDR, i);
2585 WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
2588 for (i = 0; i < RLC_UCODE_SIZE; i++) {
2589 WREG32(RLC_UCODE_ADDR, i);
2590 WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
2593 WREG32(RLC_UCODE_ADDR, 0);
2595 r600_rlc_start(rdev);
2600 static void r600_enable_interrupts(struct radeon_device *rdev)
2602 u32 ih_cntl = RREG32(IH_CNTL);
2603 u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
2605 ih_cntl |= ENABLE_INTR;
2606 ih_rb_cntl |= IH_RB_ENABLE;
2607 WREG32(IH_CNTL, ih_cntl);
2608 WREG32(IH_RB_CNTL, ih_rb_cntl);
2609 rdev->ih.enabled = true;
2612 void r600_disable_interrupts(struct radeon_device *rdev)
2614 u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
2615 u32 ih_cntl = RREG32(IH_CNTL);
2617 ih_rb_cntl &= ~IH_RB_ENABLE;
2618 ih_cntl &= ~ENABLE_INTR;
2619 WREG32(IH_RB_CNTL, ih_rb_cntl);
2620 WREG32(IH_CNTL, ih_cntl);
2621 /* set rptr, wptr to 0 */
2622 WREG32(IH_RB_RPTR, 0);
2623 WREG32(IH_RB_WPTR, 0);
2624 rdev->ih.enabled = false;
2629 static void r600_disable_interrupt_state(struct radeon_device *rdev)
2633 WREG32(CP_INT_CNTL, 0);
2634 WREG32(GRBM_INT_CNTL, 0);
2635 WREG32(DxMODE_INT_MASK, 0);
2636 if (ASIC_IS_DCE3(rdev)) {
2637 WREG32(DCE3_DACA_AUTODETECT_INT_CONTROL, 0);
2638 WREG32(DCE3_DACB_AUTODETECT_INT_CONTROL, 0);
2639 tmp = RREG32(DC_HPD1_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2640 WREG32(DC_HPD1_INT_CONTROL, tmp);
2641 tmp = RREG32(DC_HPD2_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2642 WREG32(DC_HPD2_INT_CONTROL, tmp);
2643 tmp = RREG32(DC_HPD3_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2644 WREG32(DC_HPD3_INT_CONTROL, tmp);
2645 tmp = RREG32(DC_HPD4_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2646 WREG32(DC_HPD4_INT_CONTROL, tmp);
2647 if (ASIC_IS_DCE32(rdev)) {
2648 tmp = RREG32(DC_HPD5_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2649 WREG32(DC_HPD5_INT_CONTROL, tmp);
2650 tmp = RREG32(DC_HPD6_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2651 WREG32(DC_HPD6_INT_CONTROL, tmp);
2654 WREG32(DACA_AUTODETECT_INT_CONTROL, 0);
2655 WREG32(DACB_AUTODETECT_INT_CONTROL, 0);
2656 tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
2657 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
2658 tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
2659 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
2660 tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
2661 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
2665 int r600_irq_init(struct radeon_device *rdev)
2669 u32 interrupt_cntl, ih_cntl, ih_rb_cntl;
2672 ret = r600_ih_ring_alloc(rdev);
2677 r600_disable_interrupts(rdev);
2680 ret = r600_rlc_init(rdev);
2682 r600_ih_ring_fini(rdev);
2686 /* setup interrupt control */
2687 /* set dummy read address to ring address */
2688 WREG32(INTERRUPT_CNTL2, rdev->ih.gpu_addr >> 8);
2689 interrupt_cntl = RREG32(INTERRUPT_CNTL);
2690 /* IH_DUMMY_RD_OVERRIDE=0 - dummy read disabled with msi, enabled without msi
2691 * IH_DUMMY_RD_OVERRIDE=1 - dummy read controlled by IH_DUMMY_RD_EN
2693 interrupt_cntl &= ~IH_DUMMY_RD_OVERRIDE;
2694 /* IH_REQ_NONSNOOP_EN=1 if ring is in non-cacheable memory, e.g., vram */
2695 interrupt_cntl &= ~IH_REQ_NONSNOOP_EN;
2696 WREG32(INTERRUPT_CNTL, interrupt_cntl);
2698 WREG32(IH_RB_BASE, rdev->ih.gpu_addr >> 8);
2699 rb_bufsz = drm_order(rdev->ih.ring_size / 4);
2701 ih_rb_cntl = (IH_WPTR_OVERFLOW_ENABLE |
2702 IH_WPTR_OVERFLOW_CLEAR |
2704 /* WPTR writeback, not yet */
2705 /*ih_rb_cntl |= IH_WPTR_WRITEBACK_ENABLE;*/
2706 WREG32(IH_RB_WPTR_ADDR_LO, 0);
2707 WREG32(IH_RB_WPTR_ADDR_HI, 0);
2709 WREG32(IH_RB_CNTL, ih_rb_cntl);
2711 /* set rptr, wptr to 0 */
2712 WREG32(IH_RB_RPTR, 0);
2713 WREG32(IH_RB_WPTR, 0);
2715 /* Default settings for IH_CNTL (disabled at first) */
2716 ih_cntl = MC_WRREQ_CREDIT(0x10) | MC_WR_CLEAN_CNT(0x10);
2717 /* RPTR_REARM only works if msi's are enabled */
2718 if (rdev->msi_enabled)
2719 ih_cntl |= RPTR_REARM;
2722 ih_cntl |= IH_MC_SWAP(IH_MC_SWAP_32BIT);
2724 WREG32(IH_CNTL, ih_cntl);
2726 /* force the active interrupt state to all disabled */
2727 if (rdev->family >= CHIP_CEDAR)
2728 evergreen_disable_interrupt_state(rdev);
2730 r600_disable_interrupt_state(rdev);
2733 r600_enable_interrupts(rdev);
2738 void r600_irq_suspend(struct radeon_device *rdev)
2740 r600_irq_disable(rdev);
2741 r600_rlc_stop(rdev);
2744 void r600_irq_fini(struct radeon_device *rdev)
2746 r600_irq_suspend(rdev);
2747 r600_ih_ring_fini(rdev);
2750 int r600_irq_set(struct radeon_device *rdev)
2752 u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE;
2754 u32 hpd1, hpd2, hpd3, hpd4 = 0, hpd5 = 0, hpd6 = 0;
2755 u32 grbm_int_cntl = 0;
2758 if (!rdev->irq.installed) {
2759 WARN(1, "Can't enable IRQ/MSI because no handler is installed.\n");
2762 /* don't enable anything if the ih is disabled */
2763 if (!rdev->ih.enabled) {
2764 r600_disable_interrupts(rdev);
2765 /* force the active interrupt state to all disabled */
2766 r600_disable_interrupt_state(rdev);
2770 hdmi1 = RREG32(R600_HDMI_BLOCK1 + R600_HDMI_CNTL) & ~R600_HDMI_INT_EN;
2771 if (ASIC_IS_DCE3(rdev)) {
2772 hdmi2 = RREG32(R600_HDMI_BLOCK3 + R600_HDMI_CNTL) & ~R600_HDMI_INT_EN;
2773 hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN;
2774 hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN;
2775 hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN;
2776 hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~DC_HPDx_INT_EN;
2777 if (ASIC_IS_DCE32(rdev)) {
2778 hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN;
2779 hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN;
2782 hdmi2 = RREG32(R600_HDMI_BLOCK2 + R600_HDMI_CNTL) & ~R600_HDMI_INT_EN;
2783 hpd1 = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL) & ~DC_HPDx_INT_EN;
2784 hpd2 = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL) & ~DC_HPDx_INT_EN;
2785 hpd3 = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & ~DC_HPDx_INT_EN;
2788 if (rdev->irq.sw_int) {
2789 DRM_DEBUG("r600_irq_set: sw int\n");
2790 cp_int_cntl |= RB_INT_ENABLE;
2792 if (rdev->irq.crtc_vblank_int[0]) {
2793 DRM_DEBUG("r600_irq_set: vblank 0\n");
2794 mode_int |= D1MODE_VBLANK_INT_MASK;
2796 if (rdev->irq.crtc_vblank_int[1]) {
2797 DRM_DEBUG("r600_irq_set: vblank 1\n");
2798 mode_int |= D2MODE_VBLANK_INT_MASK;
2800 if (rdev->irq.hpd[0]) {
2801 DRM_DEBUG("r600_irq_set: hpd 1\n");
2802 hpd1 |= DC_HPDx_INT_EN;
2804 if (rdev->irq.hpd[1]) {
2805 DRM_DEBUG("r600_irq_set: hpd 2\n");
2806 hpd2 |= DC_HPDx_INT_EN;
2808 if (rdev->irq.hpd[2]) {
2809 DRM_DEBUG("r600_irq_set: hpd 3\n");
2810 hpd3 |= DC_HPDx_INT_EN;
2812 if (rdev->irq.hpd[3]) {
2813 DRM_DEBUG("r600_irq_set: hpd 4\n");
2814 hpd4 |= DC_HPDx_INT_EN;
2816 if (rdev->irq.hpd[4]) {
2817 DRM_DEBUG("r600_irq_set: hpd 5\n");
2818 hpd5 |= DC_HPDx_INT_EN;
2820 if (rdev->irq.hpd[5]) {
2821 DRM_DEBUG("r600_irq_set: hpd 6\n");
2822 hpd6 |= DC_HPDx_INT_EN;
2824 if (rdev->irq.hdmi[0]) {
2825 DRM_DEBUG("r600_irq_set: hdmi 1\n");
2826 hdmi1 |= R600_HDMI_INT_EN;
2828 if (rdev->irq.hdmi[1]) {
2829 DRM_DEBUG("r600_irq_set: hdmi 2\n");
2830 hdmi2 |= R600_HDMI_INT_EN;
2832 if (rdev->irq.gui_idle) {
2833 DRM_DEBUG("gui idle\n");
2834 grbm_int_cntl |= GUI_IDLE_INT_ENABLE;
2837 WREG32(CP_INT_CNTL, cp_int_cntl);
2838 WREG32(DxMODE_INT_MASK, mode_int);
2839 WREG32(GRBM_INT_CNTL, grbm_int_cntl);
2840 WREG32(R600_HDMI_BLOCK1 + R600_HDMI_CNTL, hdmi1);
2841 if (ASIC_IS_DCE3(rdev)) {
2842 WREG32(R600_HDMI_BLOCK3 + R600_HDMI_CNTL, hdmi2);
2843 WREG32(DC_HPD1_INT_CONTROL, hpd1);
2844 WREG32(DC_HPD2_INT_CONTROL, hpd2);
2845 WREG32(DC_HPD3_INT_CONTROL, hpd3);
2846 WREG32(DC_HPD4_INT_CONTROL, hpd4);
2847 if (ASIC_IS_DCE32(rdev)) {
2848 WREG32(DC_HPD5_INT_CONTROL, hpd5);
2849 WREG32(DC_HPD6_INT_CONTROL, hpd6);
2852 WREG32(R600_HDMI_BLOCK2 + R600_HDMI_CNTL, hdmi2);
2853 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, hpd1);
2854 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, hpd2);
2855 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, hpd3);
2861 static inline void r600_irq_ack(struct radeon_device *rdev,
2864 u32 *disp_int_cont2)
2868 if (ASIC_IS_DCE3(rdev)) {
2869 *disp_int = RREG32(DCE3_DISP_INTERRUPT_STATUS);
2870 *disp_int_cont = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE);
2871 *disp_int_cont2 = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE2);
2873 *disp_int = RREG32(DISP_INTERRUPT_STATUS);
2874 *disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE);
2875 *disp_int_cont2 = 0;
2878 if (*disp_int & LB_D1_VBLANK_INTERRUPT)
2879 WREG32(D1MODE_VBLANK_STATUS, DxMODE_VBLANK_ACK);
2880 if (*disp_int & LB_D1_VLINE_INTERRUPT)
2881 WREG32(D1MODE_VLINE_STATUS, DxMODE_VLINE_ACK);
2882 if (*disp_int & LB_D2_VBLANK_INTERRUPT)
2883 WREG32(D2MODE_VBLANK_STATUS, DxMODE_VBLANK_ACK);
2884 if (*disp_int & LB_D2_VLINE_INTERRUPT)
2885 WREG32(D2MODE_VLINE_STATUS, DxMODE_VLINE_ACK);
2886 if (*disp_int & DC_HPD1_INTERRUPT) {
2887 if (ASIC_IS_DCE3(rdev)) {
2888 tmp = RREG32(DC_HPD1_INT_CONTROL);
2889 tmp |= DC_HPDx_INT_ACK;
2890 WREG32(DC_HPD1_INT_CONTROL, tmp);
2892 tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL);
2893 tmp |= DC_HPDx_INT_ACK;
2894 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
2897 if (*disp_int & DC_HPD2_INTERRUPT) {
2898 if (ASIC_IS_DCE3(rdev)) {
2899 tmp = RREG32(DC_HPD2_INT_CONTROL);
2900 tmp |= DC_HPDx_INT_ACK;
2901 WREG32(DC_HPD2_INT_CONTROL, tmp);
2903 tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL);
2904 tmp |= DC_HPDx_INT_ACK;
2905 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
2908 if (*disp_int_cont & DC_HPD3_INTERRUPT) {
2909 if (ASIC_IS_DCE3(rdev)) {
2910 tmp = RREG32(DC_HPD3_INT_CONTROL);
2911 tmp |= DC_HPDx_INT_ACK;
2912 WREG32(DC_HPD3_INT_CONTROL, tmp);
2914 tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL);
2915 tmp |= DC_HPDx_INT_ACK;
2916 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
2919 if (*disp_int_cont & DC_HPD4_INTERRUPT) {
2920 tmp = RREG32(DC_HPD4_INT_CONTROL);
2921 tmp |= DC_HPDx_INT_ACK;
2922 WREG32(DC_HPD4_INT_CONTROL, tmp);
2924 if (ASIC_IS_DCE32(rdev)) {
2925 if (*disp_int_cont2 & DC_HPD5_INTERRUPT) {
2926 tmp = RREG32(DC_HPD5_INT_CONTROL);
2927 tmp |= DC_HPDx_INT_ACK;
2928 WREG32(DC_HPD5_INT_CONTROL, tmp);
2930 if (*disp_int_cont2 & DC_HPD6_INTERRUPT) {
2931 tmp = RREG32(DC_HPD5_INT_CONTROL);
2932 tmp |= DC_HPDx_INT_ACK;
2933 WREG32(DC_HPD6_INT_CONTROL, tmp);
2936 if (RREG32(R600_HDMI_BLOCK1 + R600_HDMI_STATUS) & R600_HDMI_INT_PENDING) {
2937 WREG32_P(R600_HDMI_BLOCK1 + R600_HDMI_CNTL, R600_HDMI_INT_ACK, ~R600_HDMI_INT_ACK);
2939 if (ASIC_IS_DCE3(rdev)) {
2940 if (RREG32(R600_HDMI_BLOCK3 + R600_HDMI_STATUS) & R600_HDMI_INT_PENDING) {
2941 WREG32_P(R600_HDMI_BLOCK3 + R600_HDMI_CNTL, R600_HDMI_INT_ACK, ~R600_HDMI_INT_ACK);
2944 if (RREG32(R600_HDMI_BLOCK2 + R600_HDMI_STATUS) & R600_HDMI_INT_PENDING) {
2945 WREG32_P(R600_HDMI_BLOCK2 + R600_HDMI_CNTL, R600_HDMI_INT_ACK, ~R600_HDMI_INT_ACK);
2950 void r600_irq_disable(struct radeon_device *rdev)
2952 u32 disp_int, disp_int_cont, disp_int_cont2;
2954 r600_disable_interrupts(rdev);
2955 /* Wait and acknowledge irq */
2957 r600_irq_ack(rdev, &disp_int, &disp_int_cont, &disp_int_cont2);
2958 r600_disable_interrupt_state(rdev);
2961 static inline u32 r600_get_ih_wptr(struct radeon_device *rdev)
2965 /* XXX use writeback */
2966 wptr = RREG32(IH_RB_WPTR);
2968 if (wptr & RB_OVERFLOW) {
2969 /* When a ring buffer overflow happen start parsing interrupt
2970 * from the last not overwritten vector (wptr + 16). Hopefully
2971 * this should allow us to catchup.
2973 dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, %d, %d)\n",
2974 wptr, rdev->ih.rptr, (wptr + 16) + rdev->ih.ptr_mask);
2975 rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask;
2976 tmp = RREG32(IH_RB_CNTL);
2977 tmp |= IH_WPTR_OVERFLOW_CLEAR;
2978 WREG32(IH_RB_CNTL, tmp);
2980 return (wptr & rdev->ih.ptr_mask);
2984 * Each IV ring entry is 128 bits:
2985 * [7:0] - interrupt source id
2987 * [59:32] - interrupt source data
2988 * [127:60] - reserved
2990 * The basic interrupt vector entries
2991 * are decoded as follows:
2992 * src_id src_data description
2997 * 19 0 FP Hot plug detection A
2998 * 19 1 FP Hot plug detection B
2999 * 19 2 DAC A auto-detection
3000 * 19 3 DAC B auto-detection
3006 * 181 - EOP Interrupt
3009 * Note, these are based on r600 and may need to be
3010 * adjusted or added to on newer asics
3013 int r600_irq_process(struct radeon_device *rdev)
3015 u32 wptr = r600_get_ih_wptr(rdev);
3016 u32 rptr = rdev->ih.rptr;
3017 u32 src_id, src_data;
3018 u32 ring_index, disp_int, disp_int_cont, disp_int_cont2;
3019 unsigned long flags;
3020 bool queue_hotplug = false;
3022 DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
3023 if (!rdev->ih.enabled)
3026 spin_lock_irqsave(&rdev->ih.lock, flags);
3029 spin_unlock_irqrestore(&rdev->ih.lock, flags);
3032 if (rdev->shutdown) {
3033 spin_unlock_irqrestore(&rdev->ih.lock, flags);
3038 /* display interrupts */
3039 r600_irq_ack(rdev, &disp_int, &disp_int_cont, &disp_int_cont2);
3041 rdev->ih.wptr = wptr;
3042 while (rptr != wptr) {
3043 /* wptr/rptr are in bytes! */
3044 ring_index = rptr / 4;
3045 src_id = rdev->ih.ring[ring_index] & 0xff;
3046 src_data = rdev->ih.ring[ring_index + 1] & 0xfffffff;
3049 case 1: /* D1 vblank/vline */
3051 case 0: /* D1 vblank */
3052 if (disp_int & LB_D1_VBLANK_INTERRUPT) {
3053 drm_handle_vblank(rdev->ddev, 0);
3054 rdev->pm.vblank_sync = true;
3055 wake_up(&rdev->irq.vblank_queue);
3056 disp_int &= ~LB_D1_VBLANK_INTERRUPT;
3057 DRM_DEBUG("IH: D1 vblank\n");
3060 case 1: /* D1 vline */
3061 if (disp_int & LB_D1_VLINE_INTERRUPT) {
3062 disp_int &= ~LB_D1_VLINE_INTERRUPT;
3063 DRM_DEBUG("IH: D1 vline\n");
3067 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
3071 case 5: /* D2 vblank/vline */
3073 case 0: /* D2 vblank */
3074 if (disp_int & LB_D2_VBLANK_INTERRUPT) {
3075 drm_handle_vblank(rdev->ddev, 1);
3076 rdev->pm.vblank_sync = true;
3077 wake_up(&rdev->irq.vblank_queue);
3078 disp_int &= ~LB_D2_VBLANK_INTERRUPT;
3079 DRM_DEBUG("IH: D2 vblank\n");
3082 case 1: /* D1 vline */
3083 if (disp_int & LB_D2_VLINE_INTERRUPT) {
3084 disp_int &= ~LB_D2_VLINE_INTERRUPT;
3085 DRM_DEBUG("IH: D2 vline\n");
3089 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
3093 case 19: /* HPD/DAC hotplug */
3096 if (disp_int & DC_HPD1_INTERRUPT) {
3097 disp_int &= ~DC_HPD1_INTERRUPT;
3098 queue_hotplug = true;
3099 DRM_DEBUG("IH: HPD1\n");
3103 if (disp_int & DC_HPD2_INTERRUPT) {
3104 disp_int &= ~DC_HPD2_INTERRUPT;
3105 queue_hotplug = true;
3106 DRM_DEBUG("IH: HPD2\n");
3110 if (disp_int_cont & DC_HPD3_INTERRUPT) {
3111 disp_int_cont &= ~DC_HPD3_INTERRUPT;
3112 queue_hotplug = true;
3113 DRM_DEBUG("IH: HPD3\n");
3117 if (disp_int_cont & DC_HPD4_INTERRUPT) {
3118 disp_int_cont &= ~DC_HPD4_INTERRUPT;
3119 queue_hotplug = true;
3120 DRM_DEBUG("IH: HPD4\n");
3124 if (disp_int_cont2 & DC_HPD5_INTERRUPT) {
3125 disp_int_cont2 &= ~DC_HPD5_INTERRUPT;
3126 queue_hotplug = true;
3127 DRM_DEBUG("IH: HPD5\n");
3131 if (disp_int_cont2 & DC_HPD6_INTERRUPT) {
3132 disp_int_cont2 &= ~DC_HPD6_INTERRUPT;
3133 queue_hotplug = true;
3134 DRM_DEBUG("IH: HPD6\n");
3138 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
3143 DRM_DEBUG("IH: HDMI: 0x%x\n", src_data);
3144 r600_audio_schedule_polling(rdev);
3146 case 176: /* CP_INT in ring buffer */
3147 case 177: /* CP_INT in IB1 */
3148 case 178: /* CP_INT in IB2 */
3149 DRM_DEBUG("IH: CP int: 0x%08x\n", src_data);
3150 radeon_fence_process(rdev);
3152 case 181: /* CP EOP event */
3153 DRM_DEBUG("IH: CP EOP\n");
3155 case 233: /* GUI IDLE */
3156 DRM_DEBUG("IH: CP EOP\n");
3157 rdev->pm.gui_idle = true;
3158 wake_up(&rdev->irq.idle_queue);
3161 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
3165 /* wptr/rptr are in bytes! */
3167 rptr &= rdev->ih.ptr_mask;
3169 /* make sure wptr hasn't changed while processing */
3170 wptr = r600_get_ih_wptr(rdev);
3171 if (wptr != rdev->ih.wptr)
3174 queue_work(rdev->wq, &rdev->hotplug_work);
3175 rdev->ih.rptr = rptr;
3176 WREG32(IH_RB_RPTR, rdev->ih.rptr);
3177 spin_unlock_irqrestore(&rdev->ih.lock, flags);
3184 #if defined(CONFIG_DEBUG_FS)
3186 static int r600_debugfs_cp_ring_info(struct seq_file *m, void *data)
3188 struct drm_info_node *node = (struct drm_info_node *) m->private;
3189 struct drm_device *dev = node->minor->dev;
3190 struct radeon_device *rdev = dev->dev_private;
3191 unsigned count, i, j;
3193 radeon_ring_free_size(rdev);
3194 count = (rdev->cp.ring_size / 4) - rdev->cp.ring_free_dw;
3195 seq_printf(m, "CP_STAT 0x%08x\n", RREG32(CP_STAT));
3196 seq_printf(m, "CP_RB_WPTR 0x%08x\n", RREG32(CP_RB_WPTR));
3197 seq_printf(m, "CP_RB_RPTR 0x%08x\n", RREG32(CP_RB_RPTR));
3198 seq_printf(m, "driver's copy of the CP_RB_WPTR 0x%08x\n", rdev->cp.wptr);
3199 seq_printf(m, "driver's copy of the CP_RB_RPTR 0x%08x\n", rdev->cp.rptr);
3200 seq_printf(m, "%u free dwords in ring\n", rdev->cp.ring_free_dw);
3201 seq_printf(m, "%u dwords in ring\n", count);
3203 for (j = 0; j <= count; j++) {
3204 seq_printf(m, "r[%04d]=0x%08x\n", i, rdev->cp.ring[i]);
3205 i = (i + 1) & rdev->cp.ptr_mask;
3210 static int r600_debugfs_mc_info(struct seq_file *m, void *data)
3212 struct drm_info_node *node = (struct drm_info_node *) m->private;
3213 struct drm_device *dev = node->minor->dev;
3214 struct radeon_device *rdev = dev->dev_private;
3216 DREG32_SYS(m, rdev, R_000E50_SRBM_STATUS);
3217 DREG32_SYS(m, rdev, VM_L2_STATUS);
3221 static struct drm_info_list r600_mc_info_list[] = {
3222 {"r600_mc_info", r600_debugfs_mc_info, 0, NULL},
3223 {"r600_ring_info", r600_debugfs_cp_ring_info, 0, NULL},
3227 int r600_debugfs_mc_info_init(struct radeon_device *rdev)
3229 #if defined(CONFIG_DEBUG_FS)
3230 return radeon_debugfs_add_files(rdev, r600_mc_info_list, ARRAY_SIZE(r600_mc_info_list));
3237 * r600_ioctl_wait_idle - flush host path cache on wait idle ioctl
3238 * rdev: radeon device structure
3239 * bo: buffer object struct which userspace is waiting for idle
3241 * Some R6XX/R7XX doesn't seems to take into account HDP flush performed
3242 * through ring buffer, this leads to corruption in rendering, see
3243 * http://bugzilla.kernel.org/show_bug.cgi?id=15186 to avoid this we
3244 * directly perform HDP flush by writing register through MMIO.
3246 void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo)
3248 WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);