2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
24 * Authors: Dave Airlie
28 #include <linux/slab.h>
29 #include <linux/seq_file.h>
30 #include <linux/firmware.h>
31 #include <linux/platform_device.h>
33 #include "radeon_drm.h"
35 #include "radeon_asic.h"
36 #include "radeon_mode.h"
41 #define PFP_UCODE_SIZE 576
42 #define PM4_UCODE_SIZE 1792
43 #define RLC_UCODE_SIZE 768
44 #define R700_PFP_UCODE_SIZE 848
45 #define R700_PM4_UCODE_SIZE 1360
46 #define R700_RLC_UCODE_SIZE 1024
47 #define EVERGREEN_PFP_UCODE_SIZE 1120
48 #define EVERGREEN_PM4_UCODE_SIZE 1376
49 #define EVERGREEN_RLC_UCODE_SIZE 768
52 MODULE_FIRMWARE("radeon/R600_pfp.bin");
53 MODULE_FIRMWARE("radeon/R600_me.bin");
54 MODULE_FIRMWARE("radeon/RV610_pfp.bin");
55 MODULE_FIRMWARE("radeon/RV610_me.bin");
56 MODULE_FIRMWARE("radeon/RV630_pfp.bin");
57 MODULE_FIRMWARE("radeon/RV630_me.bin");
58 MODULE_FIRMWARE("radeon/RV620_pfp.bin");
59 MODULE_FIRMWARE("radeon/RV620_me.bin");
60 MODULE_FIRMWARE("radeon/RV635_pfp.bin");
61 MODULE_FIRMWARE("radeon/RV635_me.bin");
62 MODULE_FIRMWARE("radeon/RV670_pfp.bin");
63 MODULE_FIRMWARE("radeon/RV670_me.bin");
64 MODULE_FIRMWARE("radeon/RS780_pfp.bin");
65 MODULE_FIRMWARE("radeon/RS780_me.bin");
66 MODULE_FIRMWARE("radeon/RV770_pfp.bin");
67 MODULE_FIRMWARE("radeon/RV770_me.bin");
68 MODULE_FIRMWARE("radeon/RV730_pfp.bin");
69 MODULE_FIRMWARE("radeon/RV730_me.bin");
70 MODULE_FIRMWARE("radeon/RV710_pfp.bin");
71 MODULE_FIRMWARE("radeon/RV710_me.bin");
72 MODULE_FIRMWARE("radeon/R600_rlc.bin");
73 MODULE_FIRMWARE("radeon/R700_rlc.bin");
74 MODULE_FIRMWARE("radeon/CEDAR_pfp.bin");
75 MODULE_FIRMWARE("radeon/CEDAR_me.bin");
76 MODULE_FIRMWARE("radeon/CEDAR_rlc.bin");
77 MODULE_FIRMWARE("radeon/REDWOOD_pfp.bin");
78 MODULE_FIRMWARE("radeon/REDWOOD_me.bin");
79 MODULE_FIRMWARE("radeon/REDWOOD_rlc.bin");
80 MODULE_FIRMWARE("radeon/JUNIPER_pfp.bin");
81 MODULE_FIRMWARE("radeon/JUNIPER_me.bin");
82 MODULE_FIRMWARE("radeon/JUNIPER_rlc.bin");
83 MODULE_FIRMWARE("radeon/CYPRESS_pfp.bin");
84 MODULE_FIRMWARE("radeon/CYPRESS_me.bin");
85 MODULE_FIRMWARE("radeon/CYPRESS_rlc.bin");
87 int r600_debugfs_mc_info_init(struct radeon_device *rdev);
89 /* r600,rv610,rv630,rv620,rv635,rv670 */
90 int r600_mc_wait_for_idle(struct radeon_device *rdev);
91 void r600_gpu_init(struct radeon_device *rdev);
92 void r600_fini(struct radeon_device *rdev);
93 void r600_irq_disable(struct radeon_device *rdev);
95 void r600_get_power_state(struct radeon_device *rdev,
96 enum radeon_pm_action action)
100 rdev->pm.can_upclock = true;
101 rdev->pm.can_downclock = true;
103 /* power state array is low to high, default is first */
104 if ((rdev->flags & RADEON_IS_IGP) || (rdev->family == CHIP_R600)) {
105 int min_power_state_index = 0;
107 if (rdev->pm.num_power_states > 2)
108 min_power_state_index = 1;
111 case PM_ACTION_MINIMUM:
112 rdev->pm.requested_power_state_index = min_power_state_index;
113 rdev->pm.requested_clock_mode_index = 0;
114 rdev->pm.can_downclock = false;
116 case PM_ACTION_DOWNCLOCK:
117 if (rdev->pm.current_power_state_index == min_power_state_index) {
118 rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
119 rdev->pm.can_downclock = false;
121 if (rdev->pm.active_crtc_count > 1) {
122 for (i = 0; i < rdev->pm.num_power_states; i++) {
123 if (rdev->pm.power_state[i].flags & RADEON_PM_SINGLE_DISPLAY_ONLY)
125 else if (i >= rdev->pm.current_power_state_index) {
126 rdev->pm.requested_power_state_index =
127 rdev->pm.current_power_state_index;
130 rdev->pm.requested_power_state_index = i;
135 rdev->pm.requested_power_state_index =
136 rdev->pm.current_power_state_index - 1;
138 rdev->pm.requested_clock_mode_index = 0;
140 case PM_ACTION_UPCLOCK:
141 if (rdev->pm.current_power_state_index == (rdev->pm.num_power_states - 1)) {
142 rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
143 rdev->pm.can_upclock = false;
145 if (rdev->pm.active_crtc_count > 1) {
146 for (i = (rdev->pm.num_power_states - 1); i >= 0; i--) {
147 if (rdev->pm.power_state[i].flags & RADEON_PM_SINGLE_DISPLAY_ONLY)
149 else if (i <= rdev->pm.current_power_state_index) {
150 rdev->pm.requested_power_state_index =
151 rdev->pm.current_power_state_index;
154 rdev->pm.requested_power_state_index = i;
159 rdev->pm.requested_power_state_index =
160 rdev->pm.current_power_state_index + 1;
162 rdev->pm.requested_clock_mode_index = 0;
164 case PM_ACTION_DEFAULT:
165 rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index;
166 rdev->pm.requested_clock_mode_index = 0;
167 rdev->pm.can_upclock = false;
171 DRM_ERROR("Requested mode for not defined action\n");
175 /* XXX select a power state based on AC/DC, single/dualhead, etc. */
176 /* for now just select the first power state and switch between clock modes */
177 /* power state array is low to high, default is first (0) */
178 if (rdev->pm.active_crtc_count > 1) {
179 rdev->pm.requested_power_state_index = -1;
180 /* start at 1 as we don't want the default mode */
181 for (i = 1; i < rdev->pm.num_power_states; i++) {
182 if (rdev->pm.power_state[i].flags & RADEON_PM_SINGLE_DISPLAY_ONLY)
184 else if ((rdev->pm.power_state[i].type == POWER_STATE_TYPE_PERFORMANCE) ||
185 (rdev->pm.power_state[i].type == POWER_STATE_TYPE_BATTERY)) {
186 rdev->pm.requested_power_state_index = i;
190 /* if nothing selected, grab the default state. */
191 if (rdev->pm.requested_power_state_index == -1)
192 rdev->pm.requested_power_state_index = 0;
194 rdev->pm.requested_power_state_index = 1;
197 case PM_ACTION_MINIMUM:
198 rdev->pm.requested_clock_mode_index = 0;
199 rdev->pm.can_downclock = false;
201 case PM_ACTION_DOWNCLOCK:
202 if (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index) {
203 if (rdev->pm.current_clock_mode_index == 0) {
204 rdev->pm.requested_clock_mode_index = 0;
205 rdev->pm.can_downclock = false;
207 rdev->pm.requested_clock_mode_index =
208 rdev->pm.current_clock_mode_index - 1;
210 rdev->pm.requested_clock_mode_index = 0;
211 rdev->pm.can_downclock = false;
214 case PM_ACTION_UPCLOCK:
215 if (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index) {
216 if (rdev->pm.current_clock_mode_index ==
217 (rdev->pm.power_state[rdev->pm.requested_power_state_index].num_clock_modes - 1)) {
218 rdev->pm.requested_clock_mode_index = rdev->pm.current_clock_mode_index;
219 rdev->pm.can_upclock = false;
221 rdev->pm.requested_clock_mode_index =
222 rdev->pm.current_clock_mode_index + 1;
224 rdev->pm.requested_clock_mode_index =
225 rdev->pm.power_state[rdev->pm.requested_power_state_index].num_clock_modes - 1;
226 rdev->pm.can_upclock = false;
229 case PM_ACTION_DEFAULT:
230 rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index;
231 rdev->pm.requested_clock_mode_index = 0;
232 rdev->pm.can_upclock = false;
236 DRM_ERROR("Requested mode for not defined action\n");
241 DRM_INFO("Requested: e: %d m: %d p: %d\n",
242 rdev->pm.power_state[rdev->pm.requested_power_state_index].
243 clock_info[rdev->pm.requested_clock_mode_index].sclk,
244 rdev->pm.power_state[rdev->pm.requested_power_state_index].
245 clock_info[rdev->pm.requested_clock_mode_index].mclk,
246 rdev->pm.power_state[rdev->pm.requested_power_state_index].
250 void r600_set_power_state(struct radeon_device *rdev)
254 if ((rdev->pm.requested_clock_mode_index == rdev->pm.current_clock_mode_index) &&
255 (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index))
258 if (radeon_gui_idle(rdev)) {
260 sclk = rdev->pm.power_state[rdev->pm.requested_power_state_index].
261 clock_info[rdev->pm.requested_clock_mode_index].sclk;
262 if (sclk > rdev->clock.default_sclk)
263 sclk = rdev->clock.default_sclk;
265 mclk = rdev->pm.power_state[rdev->pm.requested_power_state_index].
266 clock_info[rdev->pm.requested_clock_mode_index].mclk;
267 if (mclk > rdev->clock.default_mclk)
268 mclk = rdev->clock.default_mclk;
269 /* don't change the mclk with multiple crtcs */
270 if (rdev->pm.active_crtc_count > 1)
271 mclk = rdev->clock.default_mclk;
279 /* set engine clock */
280 if (sclk != rdev->pm.current_sclk) {
281 radeon_sync_with_vblank(rdev);
282 radeon_pm_debug_check_in_vbl(rdev, false);
283 radeon_set_engine_clock(rdev, sclk);
284 radeon_pm_debug_check_in_vbl(rdev, true);
285 rdev->pm.current_sclk = sclk;
286 DRM_INFO("Setting: e: %d\n", sclk);
290 /* set memory clock */
291 if (rdev->asic->set_memory_clock && (mclk != rdev->pm.current_mclk)) {
292 radeon_sync_with_vblank(rdev);
293 radeon_pm_debug_check_in_vbl(rdev, false);
294 radeon_set_memory_clock(rdev, mclk);
295 radeon_pm_debug_check_in_vbl(rdev, true);
296 rdev->pm.current_mclk = mclk;
297 DRM_INFO("Setting: m: %d\n", mclk);
301 rdev->pm.current_power_state_index = rdev->pm.requested_power_state_index;
302 rdev->pm.current_clock_mode_index = rdev->pm.requested_clock_mode_index;
304 DRM_INFO("GUI not idle!!!\n");
307 bool r600_gui_idle(struct radeon_device *rdev)
309 if (RREG32(GRBM_STATUS) & GUI_ACTIVE)
315 /* hpd for digital panel detect/disconnect */
316 bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
318 bool connected = false;
320 if (ASIC_IS_DCE3(rdev)) {
323 if (RREG32(DC_HPD1_INT_STATUS) & DC_HPDx_SENSE)
327 if (RREG32(DC_HPD2_INT_STATUS) & DC_HPDx_SENSE)
331 if (RREG32(DC_HPD3_INT_STATUS) & DC_HPDx_SENSE)
335 if (RREG32(DC_HPD4_INT_STATUS) & DC_HPDx_SENSE)
340 if (RREG32(DC_HPD5_INT_STATUS) & DC_HPDx_SENSE)
344 if (RREG32(DC_HPD6_INT_STATUS) & DC_HPDx_SENSE)
353 if (RREG32(DC_HOT_PLUG_DETECT1_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
357 if (RREG32(DC_HOT_PLUG_DETECT2_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
361 if (RREG32(DC_HOT_PLUG_DETECT3_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
371 void r600_hpd_set_polarity(struct radeon_device *rdev,
372 enum radeon_hpd_id hpd)
375 bool connected = r600_hpd_sense(rdev, hpd);
377 if (ASIC_IS_DCE3(rdev)) {
380 tmp = RREG32(DC_HPD1_INT_CONTROL);
382 tmp &= ~DC_HPDx_INT_POLARITY;
384 tmp |= DC_HPDx_INT_POLARITY;
385 WREG32(DC_HPD1_INT_CONTROL, tmp);
388 tmp = RREG32(DC_HPD2_INT_CONTROL);
390 tmp &= ~DC_HPDx_INT_POLARITY;
392 tmp |= DC_HPDx_INT_POLARITY;
393 WREG32(DC_HPD2_INT_CONTROL, tmp);
396 tmp = RREG32(DC_HPD3_INT_CONTROL);
398 tmp &= ~DC_HPDx_INT_POLARITY;
400 tmp |= DC_HPDx_INT_POLARITY;
401 WREG32(DC_HPD3_INT_CONTROL, tmp);
404 tmp = RREG32(DC_HPD4_INT_CONTROL);
406 tmp &= ~DC_HPDx_INT_POLARITY;
408 tmp |= DC_HPDx_INT_POLARITY;
409 WREG32(DC_HPD4_INT_CONTROL, tmp);
412 tmp = RREG32(DC_HPD5_INT_CONTROL);
414 tmp &= ~DC_HPDx_INT_POLARITY;
416 tmp |= DC_HPDx_INT_POLARITY;
417 WREG32(DC_HPD5_INT_CONTROL, tmp);
421 tmp = RREG32(DC_HPD6_INT_CONTROL);
423 tmp &= ~DC_HPDx_INT_POLARITY;
425 tmp |= DC_HPDx_INT_POLARITY;
426 WREG32(DC_HPD6_INT_CONTROL, tmp);
434 tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL);
436 tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
438 tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
439 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
442 tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL);
444 tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
446 tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
447 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
450 tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL);
452 tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
454 tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
455 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
463 void r600_hpd_init(struct radeon_device *rdev)
465 struct drm_device *dev = rdev->ddev;
466 struct drm_connector *connector;
468 if (ASIC_IS_DCE3(rdev)) {
469 u32 tmp = DC_HPDx_CONNECTION_TIMER(0x9c4) | DC_HPDx_RX_INT_TIMER(0xfa);
470 if (ASIC_IS_DCE32(rdev))
473 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
474 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
475 switch (radeon_connector->hpd.hpd) {
477 WREG32(DC_HPD1_CONTROL, tmp);
478 rdev->irq.hpd[0] = true;
481 WREG32(DC_HPD2_CONTROL, tmp);
482 rdev->irq.hpd[1] = true;
485 WREG32(DC_HPD3_CONTROL, tmp);
486 rdev->irq.hpd[2] = true;
489 WREG32(DC_HPD4_CONTROL, tmp);
490 rdev->irq.hpd[3] = true;
494 WREG32(DC_HPD5_CONTROL, tmp);
495 rdev->irq.hpd[4] = true;
498 WREG32(DC_HPD6_CONTROL, tmp);
499 rdev->irq.hpd[5] = true;
506 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
507 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
508 switch (radeon_connector->hpd.hpd) {
510 WREG32(DC_HOT_PLUG_DETECT1_CONTROL, DC_HOT_PLUG_DETECTx_EN);
511 rdev->irq.hpd[0] = true;
514 WREG32(DC_HOT_PLUG_DETECT2_CONTROL, DC_HOT_PLUG_DETECTx_EN);
515 rdev->irq.hpd[1] = true;
518 WREG32(DC_HOT_PLUG_DETECT3_CONTROL, DC_HOT_PLUG_DETECTx_EN);
519 rdev->irq.hpd[2] = true;
526 if (rdev->irq.installed)
530 void r600_hpd_fini(struct radeon_device *rdev)
532 struct drm_device *dev = rdev->ddev;
533 struct drm_connector *connector;
535 if (ASIC_IS_DCE3(rdev)) {
536 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
537 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
538 switch (radeon_connector->hpd.hpd) {
540 WREG32(DC_HPD1_CONTROL, 0);
541 rdev->irq.hpd[0] = false;
544 WREG32(DC_HPD2_CONTROL, 0);
545 rdev->irq.hpd[1] = false;
548 WREG32(DC_HPD3_CONTROL, 0);
549 rdev->irq.hpd[2] = false;
552 WREG32(DC_HPD4_CONTROL, 0);
553 rdev->irq.hpd[3] = false;
557 WREG32(DC_HPD5_CONTROL, 0);
558 rdev->irq.hpd[4] = false;
561 WREG32(DC_HPD6_CONTROL, 0);
562 rdev->irq.hpd[5] = false;
569 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
570 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
571 switch (radeon_connector->hpd.hpd) {
573 WREG32(DC_HOT_PLUG_DETECT1_CONTROL, 0);
574 rdev->irq.hpd[0] = false;
577 WREG32(DC_HOT_PLUG_DETECT2_CONTROL, 0);
578 rdev->irq.hpd[1] = false;
581 WREG32(DC_HOT_PLUG_DETECT3_CONTROL, 0);
582 rdev->irq.hpd[2] = false;
594 void r600_pcie_gart_tlb_flush(struct radeon_device *rdev)
599 /* flush hdp cache so updates hit vram */
600 WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
602 WREG32(VM_CONTEXT0_INVALIDATION_LOW_ADDR, rdev->mc.gtt_start >> 12);
603 WREG32(VM_CONTEXT0_INVALIDATION_HIGH_ADDR, (rdev->mc.gtt_end - 1) >> 12);
604 WREG32(VM_CONTEXT0_REQUEST_RESPONSE, REQUEST_TYPE(1));
605 for (i = 0; i < rdev->usec_timeout; i++) {
607 tmp = RREG32(VM_CONTEXT0_REQUEST_RESPONSE);
608 tmp = (tmp & RESPONSE_TYPE_MASK) >> RESPONSE_TYPE_SHIFT;
610 printk(KERN_WARNING "[drm] r600 flush TLB failed\n");
620 int r600_pcie_gart_init(struct radeon_device *rdev)
624 if (rdev->gart.table.vram.robj) {
625 WARN(1, "R600 PCIE GART already initialized.\n");
628 /* Initialize common gart structure */
629 r = radeon_gart_init(rdev);
632 rdev->gart.table_size = rdev->gart.num_gpu_pages * 8;
633 return radeon_gart_table_vram_alloc(rdev);
636 int r600_pcie_gart_enable(struct radeon_device *rdev)
641 if (rdev->gart.table.vram.robj == NULL) {
642 dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
645 r = radeon_gart_table_vram_pin(rdev);
648 radeon_gart_restore(rdev);
651 WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
652 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
653 EFFECTIVE_L2_QUEUE_SIZE(7));
654 WREG32(VM_L2_CNTL2, 0);
655 WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1));
656 /* Setup TLB control */
657 tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
658 SYSTEM_ACCESS_MODE_NOT_IN_SYS |
659 EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
660 ENABLE_WAIT_L2_QUERY;
661 WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp);
662 WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
663 WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp | ENABLE_L1_STRICT_ORDERING);
664 WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
665 WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp);
666 WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp);
667 WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp);
668 WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp);
669 WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp);
670 WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
671 WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
672 WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
673 WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
674 WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
675 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
676 WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
677 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
678 WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
679 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
680 WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
681 (u32)(rdev->dummy_page.addr >> 12));
682 for (i = 1; i < 7; i++)
683 WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
685 r600_pcie_gart_tlb_flush(rdev);
686 rdev->gart.ready = true;
690 void r600_pcie_gart_disable(struct radeon_device *rdev)
695 /* Disable all tables */
696 for (i = 0; i < 7; i++)
697 WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
699 /* Disable L2 cache */
700 WREG32(VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING |
701 EFFECTIVE_L2_QUEUE_SIZE(7));
702 WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1));
703 /* Setup L1 TLB control */
704 tmp = EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
705 ENABLE_WAIT_L2_QUERY;
706 WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp);
707 WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp);
708 WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp);
709 WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp);
710 WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp);
711 WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
712 WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
713 WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
714 WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp);
715 WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp);
716 WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp);
717 WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
718 WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp);
719 WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
720 if (rdev->gart.table.vram.robj) {
721 r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
722 if (likely(r == 0)) {
723 radeon_bo_kunmap(rdev->gart.table.vram.robj);
724 radeon_bo_unpin(rdev->gart.table.vram.robj);
725 radeon_bo_unreserve(rdev->gart.table.vram.robj);
730 void r600_pcie_gart_fini(struct radeon_device *rdev)
732 radeon_gart_fini(rdev);
733 r600_pcie_gart_disable(rdev);
734 radeon_gart_table_vram_free(rdev);
737 void r600_agp_enable(struct radeon_device *rdev)
743 WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
744 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
745 EFFECTIVE_L2_QUEUE_SIZE(7));
746 WREG32(VM_L2_CNTL2, 0);
747 WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1));
748 /* Setup TLB control */
749 tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
750 SYSTEM_ACCESS_MODE_NOT_IN_SYS |
751 EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
752 ENABLE_WAIT_L2_QUERY;
753 WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp);
754 WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
755 WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp | ENABLE_L1_STRICT_ORDERING);
756 WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
757 WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp);
758 WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp);
759 WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp);
760 WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp);
761 WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp);
762 WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
763 WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
764 WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
765 WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
766 WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
767 for (i = 0; i < 7; i++)
768 WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
771 int r600_mc_wait_for_idle(struct radeon_device *rdev)
776 for (i = 0; i < rdev->usec_timeout; i++) {
778 tmp = RREG32(R_000E50_SRBM_STATUS) & 0x3F00;
786 static void r600_mc_program(struct radeon_device *rdev)
788 struct rv515_mc_save save;
793 for (i = 0, j = 0; i < 32; i++, j += 0x18) {
794 WREG32((0x2c14 + j), 0x00000000);
795 WREG32((0x2c18 + j), 0x00000000);
796 WREG32((0x2c1c + j), 0x00000000);
797 WREG32((0x2c20 + j), 0x00000000);
798 WREG32((0x2c24 + j), 0x00000000);
800 WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0);
802 rv515_mc_stop(rdev, &save);
803 if (r600_mc_wait_for_idle(rdev)) {
804 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
806 /* Lockout access through VGA aperture (doesn't exist before R600) */
807 WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE);
808 /* Update configuration */
809 if (rdev->flags & RADEON_IS_AGP) {
810 if (rdev->mc.vram_start < rdev->mc.gtt_start) {
811 /* VRAM before AGP */
812 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
813 rdev->mc.vram_start >> 12);
814 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
815 rdev->mc.gtt_end >> 12);
818 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
819 rdev->mc.gtt_start >> 12);
820 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
821 rdev->mc.vram_end >> 12);
824 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.vram_start >> 12);
825 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, rdev->mc.vram_end >> 12);
827 WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, 0);
828 tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16;
829 tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF);
830 WREG32(MC_VM_FB_LOCATION, tmp);
831 WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8));
832 WREG32(HDP_NONSURFACE_INFO, (2 << 7));
833 WREG32(HDP_NONSURFACE_SIZE, rdev->mc.mc_vram_size | 0x3FF);
834 if (rdev->flags & RADEON_IS_AGP) {
835 WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 22);
836 WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 22);
837 WREG32(MC_VM_AGP_BASE, rdev->mc.agp_base >> 22);
839 WREG32(MC_VM_AGP_BASE, 0);
840 WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF);
841 WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF);
843 if (r600_mc_wait_for_idle(rdev)) {
844 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
846 rv515_mc_resume(rdev, &save);
847 /* we need to own VRAM, so turn off the VGA renderer here
848 * to stop it overwriting our objects */
849 rv515_vga_render_disable(rdev);
853 * r600_vram_gtt_location - try to find VRAM & GTT location
854 * @rdev: radeon device structure holding all necessary informations
855 * @mc: memory controller structure holding memory informations
857 * Function will place try to place VRAM at same place as in CPU (PCI)
858 * address space as some GPU seems to have issue when we reprogram at
859 * different address space.
861 * If there is not enough space to fit the unvisible VRAM after the
862 * aperture then we limit the VRAM size to the aperture.
864 * If we are using AGP then place VRAM adjacent to AGP aperture are we need
865 * them to be in one from GPU point of view so that we can program GPU to
866 * catch access outside them (weird GPU policy see ??).
868 * This function will never fails, worst case are limiting VRAM or GTT.
870 * Note: GTT start, end, size should be initialized before calling this
871 * function on AGP platform.
873 void r600_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
875 u64 size_bf, size_af;
877 if (mc->mc_vram_size > 0xE0000000) {
878 /* leave room for at least 512M GTT */
879 dev_warn(rdev->dev, "limiting VRAM\n");
880 mc->real_vram_size = 0xE0000000;
881 mc->mc_vram_size = 0xE0000000;
883 if (rdev->flags & RADEON_IS_AGP) {
884 size_bf = mc->gtt_start;
885 size_af = 0xFFFFFFFF - mc->gtt_end + 1;
886 if (size_bf > size_af) {
887 if (mc->mc_vram_size > size_bf) {
888 dev_warn(rdev->dev, "limiting VRAM\n");
889 mc->real_vram_size = size_bf;
890 mc->mc_vram_size = size_bf;
892 mc->vram_start = mc->gtt_start - mc->mc_vram_size;
894 if (mc->mc_vram_size > size_af) {
895 dev_warn(rdev->dev, "limiting VRAM\n");
896 mc->real_vram_size = size_af;
897 mc->mc_vram_size = size_af;
899 mc->vram_start = mc->gtt_end;
901 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
902 dev_info(rdev->dev, "VRAM: %lluM 0x%08llX - 0x%08llX (%lluM used)\n",
903 mc->mc_vram_size >> 20, mc->vram_start,
904 mc->vram_end, mc->real_vram_size >> 20);
907 if (rdev->flags & RADEON_IS_IGP)
908 base = (RREG32(MC_VM_FB_LOCATION) & 0xFFFF) << 24;
909 radeon_vram_location(rdev, &rdev->mc, base);
910 radeon_gtt_location(rdev, mc);
914 int r600_mc_init(struct radeon_device *rdev)
917 int chansize, numchan;
919 /* Get VRAM informations */
920 rdev->mc.vram_is_ddr = true;
921 tmp = RREG32(RAMCFG);
922 if (tmp & CHANSIZE_OVERRIDE) {
924 } else if (tmp & CHANSIZE_MASK) {
930 switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
945 rdev->mc.vram_width = numchan * chansize;
946 /* Could aper size report 0 ? */
947 rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
948 rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
949 /* Setup GPU memory space */
950 rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
951 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
952 rdev->mc.visible_vram_size = rdev->mc.aper_size;
953 r600_vram_gtt_location(rdev, &rdev->mc);
955 if (rdev->flags & RADEON_IS_IGP)
956 rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
957 radeon_update_bandwidth_info(rdev);
961 /* We doesn't check that the GPU really needs a reset we simply do the
962 * reset, it's up to the caller to determine if the GPU needs one. We
963 * might add an helper function to check that.
965 int r600_gpu_soft_reset(struct radeon_device *rdev)
967 struct rv515_mc_save save;
968 u32 grbm_busy_mask = S_008010_VC_BUSY(1) | S_008010_VGT_BUSY_NO_DMA(1) |
969 S_008010_VGT_BUSY(1) | S_008010_TA03_BUSY(1) |
970 S_008010_TC_BUSY(1) | S_008010_SX_BUSY(1) |
971 S_008010_SH_BUSY(1) | S_008010_SPI03_BUSY(1) |
972 S_008010_SMX_BUSY(1) | S_008010_SC_BUSY(1) |
973 S_008010_PA_BUSY(1) | S_008010_DB03_BUSY(1) |
974 S_008010_CR_BUSY(1) | S_008010_CB03_BUSY(1) |
975 S_008010_GUI_ACTIVE(1);
976 u32 grbm2_busy_mask = S_008014_SPI0_BUSY(1) | S_008014_SPI1_BUSY(1) |
977 S_008014_SPI2_BUSY(1) | S_008014_SPI3_BUSY(1) |
978 S_008014_TA0_BUSY(1) | S_008014_TA1_BUSY(1) |
979 S_008014_TA2_BUSY(1) | S_008014_TA3_BUSY(1) |
980 S_008014_DB0_BUSY(1) | S_008014_DB1_BUSY(1) |
981 S_008014_DB2_BUSY(1) | S_008014_DB3_BUSY(1) |
982 S_008014_CB0_BUSY(1) | S_008014_CB1_BUSY(1) |
983 S_008014_CB2_BUSY(1) | S_008014_CB3_BUSY(1);
986 dev_info(rdev->dev, "GPU softreset \n");
987 dev_info(rdev->dev, " R_008010_GRBM_STATUS=0x%08X\n",
988 RREG32(R_008010_GRBM_STATUS));
989 dev_info(rdev->dev, " R_008014_GRBM_STATUS2=0x%08X\n",
990 RREG32(R_008014_GRBM_STATUS2));
991 dev_info(rdev->dev, " R_000E50_SRBM_STATUS=0x%08X\n",
992 RREG32(R_000E50_SRBM_STATUS));
993 rv515_mc_stop(rdev, &save);
994 if (r600_mc_wait_for_idle(rdev)) {
995 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
997 /* Disable CP parsing/prefetching */
998 WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
999 /* Check if any of the rendering block is busy and reset it */
1000 if ((RREG32(R_008010_GRBM_STATUS) & grbm_busy_mask) ||
1001 (RREG32(R_008014_GRBM_STATUS2) & grbm2_busy_mask)) {
1002 tmp = S_008020_SOFT_RESET_CR(1) |
1003 S_008020_SOFT_RESET_DB(1) |
1004 S_008020_SOFT_RESET_CB(1) |
1005 S_008020_SOFT_RESET_PA(1) |
1006 S_008020_SOFT_RESET_SC(1) |
1007 S_008020_SOFT_RESET_SMX(1) |
1008 S_008020_SOFT_RESET_SPI(1) |
1009 S_008020_SOFT_RESET_SX(1) |
1010 S_008020_SOFT_RESET_SH(1) |
1011 S_008020_SOFT_RESET_TC(1) |
1012 S_008020_SOFT_RESET_TA(1) |
1013 S_008020_SOFT_RESET_VC(1) |
1014 S_008020_SOFT_RESET_VGT(1);
1015 dev_info(rdev->dev, " R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp);
1016 WREG32(R_008020_GRBM_SOFT_RESET, tmp);
1017 RREG32(R_008020_GRBM_SOFT_RESET);
1019 WREG32(R_008020_GRBM_SOFT_RESET, 0);
1021 /* Reset CP (we always reset CP) */
1022 tmp = S_008020_SOFT_RESET_CP(1);
1023 dev_info(rdev->dev, "R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp);
1024 WREG32(R_008020_GRBM_SOFT_RESET, tmp);
1025 RREG32(R_008020_GRBM_SOFT_RESET);
1027 WREG32(R_008020_GRBM_SOFT_RESET, 0);
1028 /* Wait a little for things to settle down */
1030 dev_info(rdev->dev, " R_008010_GRBM_STATUS=0x%08X\n",
1031 RREG32(R_008010_GRBM_STATUS));
1032 dev_info(rdev->dev, " R_008014_GRBM_STATUS2=0x%08X\n",
1033 RREG32(R_008014_GRBM_STATUS2));
1034 dev_info(rdev->dev, " R_000E50_SRBM_STATUS=0x%08X\n",
1035 RREG32(R_000E50_SRBM_STATUS));
1036 rv515_mc_resume(rdev, &save);
1040 bool r600_gpu_is_lockup(struct radeon_device *rdev)
1047 srbm_status = RREG32(R_000E50_SRBM_STATUS);
1048 grbm_status = RREG32(R_008010_GRBM_STATUS);
1049 grbm_status2 = RREG32(R_008014_GRBM_STATUS2);
1050 if (!G_008010_GUI_ACTIVE(grbm_status)) {
1051 r100_gpu_lockup_update(&rdev->config.r300.lockup, &rdev->cp);
1054 /* force CP activities */
1055 r = radeon_ring_lock(rdev, 2);
1058 radeon_ring_write(rdev, 0x80000000);
1059 radeon_ring_write(rdev, 0x80000000);
1060 radeon_ring_unlock_commit(rdev);
1062 rdev->cp.rptr = RREG32(R600_CP_RB_RPTR);
1063 return r100_gpu_cp_is_lockup(rdev, &rdev->config.r300.lockup, &rdev->cp);
1066 int r600_asic_reset(struct radeon_device *rdev)
1068 return r600_gpu_soft_reset(rdev);
1071 static u32 r600_get_tile_pipe_to_backend_map(u32 num_tile_pipes,
1073 u32 backend_disable_mask)
1075 u32 backend_map = 0;
1076 u32 enabled_backends_mask;
1077 u32 enabled_backends_count;
1079 u32 swizzle_pipe[R6XX_MAX_PIPES];
1083 if (num_tile_pipes > R6XX_MAX_PIPES)
1084 num_tile_pipes = R6XX_MAX_PIPES;
1085 if (num_tile_pipes < 1)
1087 if (num_backends > R6XX_MAX_BACKENDS)
1088 num_backends = R6XX_MAX_BACKENDS;
1089 if (num_backends < 1)
1092 enabled_backends_mask = 0;
1093 enabled_backends_count = 0;
1094 for (i = 0; i < R6XX_MAX_BACKENDS; ++i) {
1095 if (((backend_disable_mask >> i) & 1) == 0) {
1096 enabled_backends_mask |= (1 << i);
1097 ++enabled_backends_count;
1099 if (enabled_backends_count == num_backends)
1103 if (enabled_backends_count == 0) {
1104 enabled_backends_mask = 1;
1105 enabled_backends_count = 1;
1108 if (enabled_backends_count != num_backends)
1109 num_backends = enabled_backends_count;
1111 memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * R6XX_MAX_PIPES);
1112 switch (num_tile_pipes) {
1114 swizzle_pipe[0] = 0;
1117 swizzle_pipe[0] = 0;
1118 swizzle_pipe[1] = 1;
1121 swizzle_pipe[0] = 0;
1122 swizzle_pipe[1] = 1;
1123 swizzle_pipe[2] = 2;
1126 swizzle_pipe[0] = 0;
1127 swizzle_pipe[1] = 1;
1128 swizzle_pipe[2] = 2;
1129 swizzle_pipe[3] = 3;
1132 swizzle_pipe[0] = 0;
1133 swizzle_pipe[1] = 1;
1134 swizzle_pipe[2] = 2;
1135 swizzle_pipe[3] = 3;
1136 swizzle_pipe[4] = 4;
1139 swizzle_pipe[0] = 0;
1140 swizzle_pipe[1] = 2;
1141 swizzle_pipe[2] = 4;
1142 swizzle_pipe[3] = 5;
1143 swizzle_pipe[4] = 1;
1144 swizzle_pipe[5] = 3;
1147 swizzle_pipe[0] = 0;
1148 swizzle_pipe[1] = 2;
1149 swizzle_pipe[2] = 4;
1150 swizzle_pipe[3] = 6;
1151 swizzle_pipe[4] = 1;
1152 swizzle_pipe[5] = 3;
1153 swizzle_pipe[6] = 5;
1156 swizzle_pipe[0] = 0;
1157 swizzle_pipe[1] = 2;
1158 swizzle_pipe[2] = 4;
1159 swizzle_pipe[3] = 6;
1160 swizzle_pipe[4] = 1;
1161 swizzle_pipe[5] = 3;
1162 swizzle_pipe[6] = 5;
1163 swizzle_pipe[7] = 7;
1168 for (cur_pipe = 0; cur_pipe < num_tile_pipes; ++cur_pipe) {
1169 while (((1 << cur_backend) & enabled_backends_mask) == 0)
1170 cur_backend = (cur_backend + 1) % R6XX_MAX_BACKENDS;
1172 backend_map |= (u32)(((cur_backend & 3) << (swizzle_pipe[cur_pipe] * 2)));
1174 cur_backend = (cur_backend + 1) % R6XX_MAX_BACKENDS;
1180 int r600_count_pipe_bits(uint32_t val)
1184 for (i = 0; i < 32; i++) {
1191 void r600_gpu_init(struct radeon_device *rdev)
1196 u32 cc_rb_backend_disable;
1197 u32 cc_gc_shader_pipe_config;
1201 u32 sq_gpr_resource_mgmt_1 = 0;
1202 u32 sq_gpr_resource_mgmt_2 = 0;
1203 u32 sq_thread_resource_mgmt = 0;
1204 u32 sq_stack_resource_mgmt_1 = 0;
1205 u32 sq_stack_resource_mgmt_2 = 0;
1207 /* FIXME: implement */
1208 switch (rdev->family) {
1210 rdev->config.r600.max_pipes = 4;
1211 rdev->config.r600.max_tile_pipes = 8;
1212 rdev->config.r600.max_simds = 4;
1213 rdev->config.r600.max_backends = 4;
1214 rdev->config.r600.max_gprs = 256;
1215 rdev->config.r600.max_threads = 192;
1216 rdev->config.r600.max_stack_entries = 256;
1217 rdev->config.r600.max_hw_contexts = 8;
1218 rdev->config.r600.max_gs_threads = 16;
1219 rdev->config.r600.sx_max_export_size = 128;
1220 rdev->config.r600.sx_max_export_pos_size = 16;
1221 rdev->config.r600.sx_max_export_smx_size = 128;
1222 rdev->config.r600.sq_num_cf_insts = 2;
1226 rdev->config.r600.max_pipes = 2;
1227 rdev->config.r600.max_tile_pipes = 2;
1228 rdev->config.r600.max_simds = 3;
1229 rdev->config.r600.max_backends = 1;
1230 rdev->config.r600.max_gprs = 128;
1231 rdev->config.r600.max_threads = 192;
1232 rdev->config.r600.max_stack_entries = 128;
1233 rdev->config.r600.max_hw_contexts = 8;
1234 rdev->config.r600.max_gs_threads = 4;
1235 rdev->config.r600.sx_max_export_size = 128;
1236 rdev->config.r600.sx_max_export_pos_size = 16;
1237 rdev->config.r600.sx_max_export_smx_size = 128;
1238 rdev->config.r600.sq_num_cf_insts = 2;
1244 rdev->config.r600.max_pipes = 1;
1245 rdev->config.r600.max_tile_pipes = 1;
1246 rdev->config.r600.max_simds = 2;
1247 rdev->config.r600.max_backends = 1;
1248 rdev->config.r600.max_gprs = 128;
1249 rdev->config.r600.max_threads = 192;
1250 rdev->config.r600.max_stack_entries = 128;
1251 rdev->config.r600.max_hw_contexts = 4;
1252 rdev->config.r600.max_gs_threads = 4;
1253 rdev->config.r600.sx_max_export_size = 128;
1254 rdev->config.r600.sx_max_export_pos_size = 16;
1255 rdev->config.r600.sx_max_export_smx_size = 128;
1256 rdev->config.r600.sq_num_cf_insts = 1;
1259 rdev->config.r600.max_pipes = 4;
1260 rdev->config.r600.max_tile_pipes = 4;
1261 rdev->config.r600.max_simds = 4;
1262 rdev->config.r600.max_backends = 4;
1263 rdev->config.r600.max_gprs = 192;
1264 rdev->config.r600.max_threads = 192;
1265 rdev->config.r600.max_stack_entries = 256;
1266 rdev->config.r600.max_hw_contexts = 8;
1267 rdev->config.r600.max_gs_threads = 16;
1268 rdev->config.r600.sx_max_export_size = 128;
1269 rdev->config.r600.sx_max_export_pos_size = 16;
1270 rdev->config.r600.sx_max_export_smx_size = 128;
1271 rdev->config.r600.sq_num_cf_insts = 2;
1277 /* Initialize HDP */
1278 for (i = 0, j = 0; i < 32; i++, j += 0x18) {
1279 WREG32((0x2c14 + j), 0x00000000);
1280 WREG32((0x2c18 + j), 0x00000000);
1281 WREG32((0x2c1c + j), 0x00000000);
1282 WREG32((0x2c20 + j), 0x00000000);
1283 WREG32((0x2c24 + j), 0x00000000);
1286 WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
1290 ramcfg = RREG32(RAMCFG);
1291 switch (rdev->config.r600.max_tile_pipes) {
1293 tiling_config |= PIPE_TILING(0);
1296 tiling_config |= PIPE_TILING(1);
1299 tiling_config |= PIPE_TILING(2);
1302 tiling_config |= PIPE_TILING(3);
1307 rdev->config.r600.tiling_npipes = rdev->config.r600.max_tile_pipes;
1308 rdev->config.r600.tiling_nbanks = 4 << ((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
1309 tiling_config |= BANK_TILING((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
1310 tiling_config |= GROUP_SIZE(0);
1311 rdev->config.r600.tiling_group_size = 256;
1312 tmp = (ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT;
1314 tiling_config |= ROW_TILING(3);
1315 tiling_config |= SAMPLE_SPLIT(3);
1317 tiling_config |= ROW_TILING(tmp);
1318 tiling_config |= SAMPLE_SPLIT(tmp);
1320 tiling_config |= BANK_SWAPS(1);
1322 cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000;
1323 cc_rb_backend_disable |=
1324 BACKEND_DISABLE((R6XX_MAX_BACKENDS_MASK << rdev->config.r600.max_backends) & R6XX_MAX_BACKENDS_MASK);
1326 cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0xffffff00;
1327 cc_gc_shader_pipe_config |=
1328 INACTIVE_QD_PIPES((R6XX_MAX_PIPES_MASK << rdev->config.r600.max_pipes) & R6XX_MAX_PIPES_MASK);
1329 cc_gc_shader_pipe_config |=
1330 INACTIVE_SIMDS((R6XX_MAX_SIMDS_MASK << rdev->config.r600.max_simds) & R6XX_MAX_SIMDS_MASK);
1332 backend_map = r600_get_tile_pipe_to_backend_map(rdev->config.r600.max_tile_pipes,
1333 (R6XX_MAX_BACKENDS -
1334 r600_count_pipe_bits((cc_rb_backend_disable &
1335 R6XX_MAX_BACKENDS_MASK) >> 16)),
1336 (cc_rb_backend_disable >> 16));
1338 tiling_config |= BACKEND_MAP(backend_map);
1339 WREG32(GB_TILING_CONFIG, tiling_config);
1340 WREG32(DCP_TILING_CONFIG, tiling_config & 0xffff);
1341 WREG32(HDP_TILING_CONFIG, tiling_config & 0xffff);
1344 WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable);
1345 WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
1346 WREG32(GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
1348 tmp = R6XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8);
1349 WREG32(VGT_OUT_DEALLOC_CNTL, (tmp * 4) & DEALLOC_DIST_MASK);
1350 WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, ((tmp * 4) - 2) & VTX_REUSE_DEPTH_MASK);
1352 /* Setup some CP states */
1353 WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) | ROQ_IB2_START(0x2b)));
1354 WREG32(CP_MEQ_THRESHOLDS, (MEQ_END(0x40) | ROQ_END(0x40)));
1356 WREG32(TA_CNTL_AUX, (DISABLE_CUBE_ANISO | SYNC_GRADIENT |
1357 SYNC_WALKER | SYNC_ALIGNER));
1358 /* Setup various GPU states */
1359 if (rdev->family == CHIP_RV670)
1360 WREG32(ARB_GDEC_RD_CNTL, 0x00000021);
1362 tmp = RREG32(SX_DEBUG_1);
1363 tmp |= SMX_EVENT_RELEASE;
1364 if ((rdev->family > CHIP_R600))
1365 tmp |= ENABLE_NEW_SMX_ADDRESS;
1366 WREG32(SX_DEBUG_1, tmp);
1368 if (((rdev->family) == CHIP_R600) ||
1369 ((rdev->family) == CHIP_RV630) ||
1370 ((rdev->family) == CHIP_RV610) ||
1371 ((rdev->family) == CHIP_RV620) ||
1372 ((rdev->family) == CHIP_RS780) ||
1373 ((rdev->family) == CHIP_RS880)) {
1374 WREG32(DB_DEBUG, PREZ_MUST_WAIT_FOR_POSTZ_DONE);
1376 WREG32(DB_DEBUG, 0);
1378 WREG32(DB_WATERMARKS, (DEPTH_FREE(4) | DEPTH_CACHELINE_FREE(16) |
1379 DEPTH_FLUSH(16) | DEPTH_PENDING_FREE(4)));
1381 WREG32(PA_SC_MULTI_CHIP_CNTL, 0);
1382 WREG32(VGT_NUM_INSTANCES, 0);
1384 WREG32(SPI_CONFIG_CNTL, GPR_WRITE_PRIORITY(0));
1385 WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(0));
1387 tmp = RREG32(SQ_MS_FIFO_SIZES);
1388 if (((rdev->family) == CHIP_RV610) ||
1389 ((rdev->family) == CHIP_RV620) ||
1390 ((rdev->family) == CHIP_RS780) ||
1391 ((rdev->family) == CHIP_RS880)) {
1392 tmp = (CACHE_FIFO_SIZE(0xa) |
1393 FETCH_FIFO_HIWATER(0xa) |
1394 DONE_FIFO_HIWATER(0xe0) |
1395 ALU_UPDATE_FIFO_HIWATER(0x8));
1396 } else if (((rdev->family) == CHIP_R600) ||
1397 ((rdev->family) == CHIP_RV630)) {
1398 tmp &= ~DONE_FIFO_HIWATER(0xff);
1399 tmp |= DONE_FIFO_HIWATER(0x4);
1401 WREG32(SQ_MS_FIFO_SIZES, tmp);
1403 /* SQ_CONFIG, SQ_GPR_RESOURCE_MGMT, SQ_THREAD_RESOURCE_MGMT, SQ_STACK_RESOURCE_MGMT
1404 * should be adjusted as needed by the 2D/3D drivers. This just sets default values
1406 sq_config = RREG32(SQ_CONFIG);
1407 sq_config &= ~(PS_PRIO(3) |
1411 sq_config |= (DX9_CONSTS |
1418 if ((rdev->family) == CHIP_R600) {
1419 sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(124) |
1421 NUM_CLAUSE_TEMP_GPRS(4));
1422 sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(0) |
1424 sq_thread_resource_mgmt = (NUM_PS_THREADS(136) |
1425 NUM_VS_THREADS(48) |
1428 sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(128) |
1429 NUM_VS_STACK_ENTRIES(128));
1430 sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(0) |
1431 NUM_ES_STACK_ENTRIES(0));
1432 } else if (((rdev->family) == CHIP_RV610) ||
1433 ((rdev->family) == CHIP_RV620) ||
1434 ((rdev->family) == CHIP_RS780) ||
1435 ((rdev->family) == CHIP_RS880)) {
1436 /* no vertex cache */
1437 sq_config &= ~VC_ENABLE;
1439 sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) |
1441 NUM_CLAUSE_TEMP_GPRS(2));
1442 sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(17) |
1444 sq_thread_resource_mgmt = (NUM_PS_THREADS(79) |
1445 NUM_VS_THREADS(78) |
1447 NUM_ES_THREADS(31));
1448 sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(40) |
1449 NUM_VS_STACK_ENTRIES(40));
1450 sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(32) |
1451 NUM_ES_STACK_ENTRIES(16));
1452 } else if (((rdev->family) == CHIP_RV630) ||
1453 ((rdev->family) == CHIP_RV635)) {
1454 sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) |
1456 NUM_CLAUSE_TEMP_GPRS(2));
1457 sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(18) |
1459 sq_thread_resource_mgmt = (NUM_PS_THREADS(79) |
1460 NUM_VS_THREADS(78) |
1462 NUM_ES_THREADS(31));
1463 sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(40) |
1464 NUM_VS_STACK_ENTRIES(40));
1465 sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(32) |
1466 NUM_ES_STACK_ENTRIES(16));
1467 } else if ((rdev->family) == CHIP_RV670) {
1468 sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) |
1470 NUM_CLAUSE_TEMP_GPRS(2));
1471 sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(17) |
1473 sq_thread_resource_mgmt = (NUM_PS_THREADS(79) |
1474 NUM_VS_THREADS(78) |
1476 NUM_ES_THREADS(31));
1477 sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(64) |
1478 NUM_VS_STACK_ENTRIES(64));
1479 sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(64) |
1480 NUM_ES_STACK_ENTRIES(64));
1483 WREG32(SQ_CONFIG, sq_config);
1484 WREG32(SQ_GPR_RESOURCE_MGMT_1, sq_gpr_resource_mgmt_1);
1485 WREG32(SQ_GPR_RESOURCE_MGMT_2, sq_gpr_resource_mgmt_2);
1486 WREG32(SQ_THREAD_RESOURCE_MGMT, sq_thread_resource_mgmt);
1487 WREG32(SQ_STACK_RESOURCE_MGMT_1, sq_stack_resource_mgmt_1);
1488 WREG32(SQ_STACK_RESOURCE_MGMT_2, sq_stack_resource_mgmt_2);
1490 if (((rdev->family) == CHIP_RV610) ||
1491 ((rdev->family) == CHIP_RV620) ||
1492 ((rdev->family) == CHIP_RS780) ||
1493 ((rdev->family) == CHIP_RS880)) {
1494 WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(TC_ONLY));
1496 WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(VC_AND_TC));
1499 /* More default values. 2D/3D driver should adjust as needed */
1500 WREG32(PA_SC_AA_SAMPLE_LOCS_2S, (S0_X(0xc) | S0_Y(0x4) |
1501 S1_X(0x4) | S1_Y(0xc)));
1502 WREG32(PA_SC_AA_SAMPLE_LOCS_4S, (S0_X(0xe) | S0_Y(0xe) |
1503 S1_X(0x2) | S1_Y(0x2) |
1504 S2_X(0xa) | S2_Y(0x6) |
1505 S3_X(0x6) | S3_Y(0xa)));
1506 WREG32(PA_SC_AA_SAMPLE_LOCS_8S_WD0, (S0_X(0xe) | S0_Y(0xb) |
1507 S1_X(0x4) | S1_Y(0xc) |
1508 S2_X(0x1) | S2_Y(0x6) |
1509 S3_X(0xa) | S3_Y(0xe)));
1510 WREG32(PA_SC_AA_SAMPLE_LOCS_8S_WD1, (S4_X(0x6) | S4_Y(0x1) |
1511 S5_X(0x0) | S5_Y(0x0) |
1512 S6_X(0xb) | S6_Y(0x4) |
1513 S7_X(0x7) | S7_Y(0x8)));
1515 WREG32(VGT_STRMOUT_EN, 0);
1516 tmp = rdev->config.r600.max_pipes * 16;
1517 switch (rdev->family) {
1533 WREG32(VGT_ES_PER_GS, 128);
1534 WREG32(VGT_GS_PER_ES, tmp);
1535 WREG32(VGT_GS_PER_VS, 2);
1536 WREG32(VGT_GS_VERTEX_REUSE, 16);
1538 /* more default values. 2D/3D driver should adjust as needed */
1539 WREG32(PA_SC_LINE_STIPPLE_STATE, 0);
1540 WREG32(VGT_STRMOUT_EN, 0);
1542 WREG32(PA_SC_MODE_CNTL, 0);
1543 WREG32(PA_SC_AA_CONFIG, 0);
1544 WREG32(PA_SC_LINE_STIPPLE, 0);
1545 WREG32(SPI_INPUT_Z, 0);
1546 WREG32(SPI_PS_IN_CONTROL_0, NUM_INTERP(2));
1547 WREG32(CB_COLOR7_FRAG, 0);
1549 /* Clear render buffer base addresses */
1550 WREG32(CB_COLOR0_BASE, 0);
1551 WREG32(CB_COLOR1_BASE, 0);
1552 WREG32(CB_COLOR2_BASE, 0);
1553 WREG32(CB_COLOR3_BASE, 0);
1554 WREG32(CB_COLOR4_BASE, 0);
1555 WREG32(CB_COLOR5_BASE, 0);
1556 WREG32(CB_COLOR6_BASE, 0);
1557 WREG32(CB_COLOR7_BASE, 0);
1558 WREG32(CB_COLOR7_FRAG, 0);
1560 switch (rdev->family) {
1565 tmp = TC_L2_SIZE(8);
1569 tmp = TC_L2_SIZE(4);
1572 tmp = TC_L2_SIZE(0) | L2_DISABLE_LATE_HIT;
1575 tmp = TC_L2_SIZE(0);
1578 WREG32(TC_CNTL, tmp);
1580 tmp = RREG32(HDP_HOST_PATH_CNTL);
1581 WREG32(HDP_HOST_PATH_CNTL, tmp);
1583 tmp = RREG32(ARB_POP);
1584 tmp |= ENABLE_TC128;
1585 WREG32(ARB_POP, tmp);
1587 WREG32(PA_SC_MULTI_CHIP_CNTL, 0);
1588 WREG32(PA_CL_ENHANCE, (CLIP_VTX_REORDER_ENA |
1590 WREG32(PA_SC_ENHANCE, FORCE_EOV_MAX_CLK_CNT(4095));
1595 * Indirect registers accessor
1597 u32 r600_pciep_rreg(struct radeon_device *rdev, u32 reg)
1601 WREG32(PCIE_PORT_INDEX, ((reg) & 0xff));
1602 (void)RREG32(PCIE_PORT_INDEX);
1603 r = RREG32(PCIE_PORT_DATA);
1607 void r600_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v)
1609 WREG32(PCIE_PORT_INDEX, ((reg) & 0xff));
1610 (void)RREG32(PCIE_PORT_INDEX);
1611 WREG32(PCIE_PORT_DATA, (v));
1612 (void)RREG32(PCIE_PORT_DATA);
1618 void r600_cp_stop(struct radeon_device *rdev)
1620 WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
1623 int r600_init_microcode(struct radeon_device *rdev)
1625 struct platform_device *pdev;
1626 const char *chip_name;
1627 const char *rlc_chip_name;
1628 size_t pfp_req_size, me_req_size, rlc_req_size;
1634 pdev = platform_device_register_simple("radeon_cp", 0, NULL, 0);
1637 printk(KERN_ERR "radeon_cp: Failed to register firmware\n");
1641 switch (rdev->family) {
1644 rlc_chip_name = "R600";
1647 chip_name = "RV610";
1648 rlc_chip_name = "R600";
1651 chip_name = "RV630";
1652 rlc_chip_name = "R600";
1655 chip_name = "RV620";
1656 rlc_chip_name = "R600";
1659 chip_name = "RV635";
1660 rlc_chip_name = "R600";
1663 chip_name = "RV670";
1664 rlc_chip_name = "R600";
1668 chip_name = "RS780";
1669 rlc_chip_name = "R600";
1672 chip_name = "RV770";
1673 rlc_chip_name = "R700";
1677 chip_name = "RV730";
1678 rlc_chip_name = "R700";
1681 chip_name = "RV710";
1682 rlc_chip_name = "R700";
1685 chip_name = "CEDAR";
1686 rlc_chip_name = "CEDAR";
1689 chip_name = "REDWOOD";
1690 rlc_chip_name = "REDWOOD";
1693 chip_name = "JUNIPER";
1694 rlc_chip_name = "JUNIPER";
1698 chip_name = "CYPRESS";
1699 rlc_chip_name = "CYPRESS";
1704 if (rdev->family >= CHIP_CEDAR) {
1705 pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4;
1706 me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4;
1707 rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4;
1708 } else if (rdev->family >= CHIP_RV770) {
1709 pfp_req_size = R700_PFP_UCODE_SIZE * 4;
1710 me_req_size = R700_PM4_UCODE_SIZE * 4;
1711 rlc_req_size = R700_RLC_UCODE_SIZE * 4;
1713 pfp_req_size = PFP_UCODE_SIZE * 4;
1714 me_req_size = PM4_UCODE_SIZE * 12;
1715 rlc_req_size = RLC_UCODE_SIZE * 4;
1718 DRM_INFO("Loading %s Microcode\n", chip_name);
1720 snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
1721 err = request_firmware(&rdev->pfp_fw, fw_name, &pdev->dev);
1724 if (rdev->pfp_fw->size != pfp_req_size) {
1726 "r600_cp: Bogus length %zu in firmware \"%s\"\n",
1727 rdev->pfp_fw->size, fw_name);
1732 snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name);
1733 err = request_firmware(&rdev->me_fw, fw_name, &pdev->dev);
1736 if (rdev->me_fw->size != me_req_size) {
1738 "r600_cp: Bogus length %zu in firmware \"%s\"\n",
1739 rdev->me_fw->size, fw_name);
1743 snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", rlc_chip_name);
1744 err = request_firmware(&rdev->rlc_fw, fw_name, &pdev->dev);
1747 if (rdev->rlc_fw->size != rlc_req_size) {
1749 "r600_rlc: Bogus length %zu in firmware \"%s\"\n",
1750 rdev->rlc_fw->size, fw_name);
1755 platform_device_unregister(pdev);
1760 "r600_cp: Failed to load firmware \"%s\"\n",
1762 release_firmware(rdev->pfp_fw);
1763 rdev->pfp_fw = NULL;
1764 release_firmware(rdev->me_fw);
1766 release_firmware(rdev->rlc_fw);
1767 rdev->rlc_fw = NULL;
1772 static int r600_cp_load_microcode(struct radeon_device *rdev)
1774 const __be32 *fw_data;
1777 if (!rdev->me_fw || !rdev->pfp_fw)
1782 WREG32(CP_RB_CNTL, RB_NO_UPDATE | RB_BLKSZ(15) | RB_BUFSZ(3));
1785 WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP);
1786 RREG32(GRBM_SOFT_RESET);
1788 WREG32(GRBM_SOFT_RESET, 0);
1790 WREG32(CP_ME_RAM_WADDR, 0);
1792 fw_data = (const __be32 *)rdev->me_fw->data;
1793 WREG32(CP_ME_RAM_WADDR, 0);
1794 for (i = 0; i < PM4_UCODE_SIZE * 3; i++)
1795 WREG32(CP_ME_RAM_DATA,
1796 be32_to_cpup(fw_data++));
1798 fw_data = (const __be32 *)rdev->pfp_fw->data;
1799 WREG32(CP_PFP_UCODE_ADDR, 0);
1800 for (i = 0; i < PFP_UCODE_SIZE; i++)
1801 WREG32(CP_PFP_UCODE_DATA,
1802 be32_to_cpup(fw_data++));
1804 WREG32(CP_PFP_UCODE_ADDR, 0);
1805 WREG32(CP_ME_RAM_WADDR, 0);
1806 WREG32(CP_ME_RAM_RADDR, 0);
1810 int r600_cp_start(struct radeon_device *rdev)
1815 r = radeon_ring_lock(rdev, 7);
1817 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
1820 radeon_ring_write(rdev, PACKET3(PACKET3_ME_INITIALIZE, 5));
1821 radeon_ring_write(rdev, 0x1);
1822 if (rdev->family >= CHIP_CEDAR) {
1823 radeon_ring_write(rdev, 0x0);
1824 radeon_ring_write(rdev, rdev->config.evergreen.max_hw_contexts - 1);
1825 } else if (rdev->family >= CHIP_RV770) {
1826 radeon_ring_write(rdev, 0x0);
1827 radeon_ring_write(rdev, rdev->config.rv770.max_hw_contexts - 1);
1829 radeon_ring_write(rdev, 0x3);
1830 radeon_ring_write(rdev, rdev->config.r600.max_hw_contexts - 1);
1832 radeon_ring_write(rdev, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
1833 radeon_ring_write(rdev, 0);
1834 radeon_ring_write(rdev, 0);
1835 radeon_ring_unlock_commit(rdev);
1838 WREG32(R_0086D8_CP_ME_CNTL, cp_me);
1842 int r600_cp_resume(struct radeon_device *rdev)
1849 WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP);
1850 RREG32(GRBM_SOFT_RESET);
1852 WREG32(GRBM_SOFT_RESET, 0);
1854 /* Set ring buffer size */
1855 rb_bufsz = drm_order(rdev->cp.ring_size / 8);
1856 tmp = RB_NO_UPDATE | (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
1858 tmp |= BUF_SWAP_32BIT;
1860 WREG32(CP_RB_CNTL, tmp);
1861 WREG32(CP_SEM_WAIT_TIMER, 0x4);
1863 /* Set the write pointer delay */
1864 WREG32(CP_RB_WPTR_DELAY, 0);
1866 /* Initialize the ring buffer's read and write pointers */
1867 WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA);
1868 WREG32(CP_RB_RPTR_WR, 0);
1869 WREG32(CP_RB_WPTR, 0);
1870 WREG32(CP_RB_RPTR_ADDR, rdev->cp.gpu_addr & 0xFFFFFFFF);
1871 WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->cp.gpu_addr));
1873 WREG32(CP_RB_CNTL, tmp);
1875 WREG32(CP_RB_BASE, rdev->cp.gpu_addr >> 8);
1876 WREG32(CP_DEBUG, (1 << 27) | (1 << 28));
1878 rdev->cp.rptr = RREG32(CP_RB_RPTR);
1879 rdev->cp.wptr = RREG32(CP_RB_WPTR);
1881 r600_cp_start(rdev);
1882 rdev->cp.ready = true;
1883 r = radeon_ring_test(rdev);
1885 rdev->cp.ready = false;
1891 void r600_cp_commit(struct radeon_device *rdev)
1893 WREG32(CP_RB_WPTR, rdev->cp.wptr);
1894 (void)RREG32(CP_RB_WPTR);
1897 void r600_ring_init(struct radeon_device *rdev, unsigned ring_size)
1901 /* Align ring size */
1902 rb_bufsz = drm_order(ring_size / 8);
1903 ring_size = (1 << (rb_bufsz + 1)) * 4;
1904 rdev->cp.ring_size = ring_size;
1905 rdev->cp.align_mask = 16 - 1;
1908 void r600_cp_fini(struct radeon_device *rdev)
1911 radeon_ring_fini(rdev);
1916 * GPU scratch registers helpers function.
1918 void r600_scratch_init(struct radeon_device *rdev)
1922 rdev->scratch.num_reg = 7;
1923 for (i = 0; i < rdev->scratch.num_reg; i++) {
1924 rdev->scratch.free[i] = true;
1925 rdev->scratch.reg[i] = SCRATCH_REG0 + (i * 4);
1929 int r600_ring_test(struct radeon_device *rdev)
1936 r = radeon_scratch_get(rdev, &scratch);
1938 DRM_ERROR("radeon: cp failed to get scratch reg (%d).\n", r);
1941 WREG32(scratch, 0xCAFEDEAD);
1942 r = radeon_ring_lock(rdev, 3);
1944 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
1945 radeon_scratch_free(rdev, scratch);
1948 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
1949 radeon_ring_write(rdev, ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
1950 radeon_ring_write(rdev, 0xDEADBEEF);
1951 radeon_ring_unlock_commit(rdev);
1952 for (i = 0; i < rdev->usec_timeout; i++) {
1953 tmp = RREG32(scratch);
1954 if (tmp == 0xDEADBEEF)
1958 if (i < rdev->usec_timeout) {
1959 DRM_INFO("ring test succeeded in %d usecs\n", i);
1961 DRM_ERROR("radeon: ring test failed (scratch(0x%04X)=0x%08X)\n",
1965 radeon_scratch_free(rdev, scratch);
1969 void r600_wb_disable(struct radeon_device *rdev)
1973 WREG32(SCRATCH_UMSK, 0);
1974 if (rdev->wb.wb_obj) {
1975 r = radeon_bo_reserve(rdev->wb.wb_obj, false);
1976 if (unlikely(r != 0))
1978 radeon_bo_kunmap(rdev->wb.wb_obj);
1979 radeon_bo_unpin(rdev->wb.wb_obj);
1980 radeon_bo_unreserve(rdev->wb.wb_obj);
1984 void r600_wb_fini(struct radeon_device *rdev)
1986 r600_wb_disable(rdev);
1987 if (rdev->wb.wb_obj) {
1988 radeon_bo_unref(&rdev->wb.wb_obj);
1990 rdev->wb.wb_obj = NULL;
1994 int r600_wb_enable(struct radeon_device *rdev)
1998 if (rdev->wb.wb_obj == NULL) {
1999 r = radeon_bo_create(rdev, NULL, RADEON_GPU_PAGE_SIZE, true,
2000 RADEON_GEM_DOMAIN_GTT, &rdev->wb.wb_obj);
2002 dev_warn(rdev->dev, "(%d) create WB bo failed\n", r);
2005 r = radeon_bo_reserve(rdev->wb.wb_obj, false);
2006 if (unlikely(r != 0)) {
2010 r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT,
2011 &rdev->wb.gpu_addr);
2013 radeon_bo_unreserve(rdev->wb.wb_obj);
2014 dev_warn(rdev->dev, "(%d) pin WB bo failed\n", r);
2018 r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
2019 radeon_bo_unreserve(rdev->wb.wb_obj);
2021 dev_warn(rdev->dev, "(%d) map WB bo failed\n", r);
2026 WREG32(SCRATCH_ADDR, (rdev->wb.gpu_addr >> 8) & 0xFFFFFFFF);
2027 WREG32(CP_RB_RPTR_ADDR, (rdev->wb.gpu_addr + 1024) & 0xFFFFFFFC);
2028 WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + 1024) & 0xFF);
2029 WREG32(SCRATCH_UMSK, 0xff);
2033 void r600_fence_ring_emit(struct radeon_device *rdev,
2034 struct radeon_fence *fence)
2036 /* Also consider EVENT_WRITE_EOP. it handles the interrupts + timestamps + events */
2038 radeon_ring_write(rdev, PACKET3(PACKET3_EVENT_WRITE, 0));
2039 radeon_ring_write(rdev, CACHE_FLUSH_AND_INV_EVENT);
2040 /* wait for 3D idle clean */
2041 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
2042 radeon_ring_write(rdev, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
2043 radeon_ring_write(rdev, WAIT_3D_IDLE_bit | WAIT_3D_IDLECLEAN_bit);
2044 /* Emit fence sequence & fire IRQ */
2045 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
2046 radeon_ring_write(rdev, ((rdev->fence_drv.scratch_reg - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
2047 radeon_ring_write(rdev, fence->seq);
2048 /* CP_INTERRUPT packet 3 no longer exists, use packet 0 */
2049 radeon_ring_write(rdev, PACKET0(CP_INT_STATUS, 0));
2050 radeon_ring_write(rdev, RB_INT_STAT);
2053 int r600_copy_blit(struct radeon_device *rdev,
2054 uint64_t src_offset, uint64_t dst_offset,
2055 unsigned num_pages, struct radeon_fence *fence)
2059 mutex_lock(&rdev->r600_blit.mutex);
2060 rdev->r600_blit.vb_ib = NULL;
2061 r = r600_blit_prepare_copy(rdev, num_pages * RADEON_GPU_PAGE_SIZE);
2063 if (rdev->r600_blit.vb_ib)
2064 radeon_ib_free(rdev, &rdev->r600_blit.vb_ib);
2065 mutex_unlock(&rdev->r600_blit.mutex);
2068 r600_kms_blit_copy(rdev, src_offset, dst_offset, num_pages * RADEON_GPU_PAGE_SIZE);
2069 r600_blit_done_copy(rdev, fence);
2070 mutex_unlock(&rdev->r600_blit.mutex);
2074 int r600_set_surface_reg(struct radeon_device *rdev, int reg,
2075 uint32_t tiling_flags, uint32_t pitch,
2076 uint32_t offset, uint32_t obj_size)
2078 /* FIXME: implement */
2082 void r600_clear_surface_reg(struct radeon_device *rdev, int reg)
2084 /* FIXME: implement */
2088 bool r600_card_posted(struct radeon_device *rdev)
2092 /* first check CRTCs */
2093 reg = RREG32(D1CRTC_CONTROL) |
2094 RREG32(D2CRTC_CONTROL);
2098 /* then check MEM_SIZE, in case the crtcs are off */
2099 if (RREG32(CONFIG_MEMSIZE))
2105 int r600_startup(struct radeon_device *rdev)
2109 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
2110 r = r600_init_microcode(rdev);
2112 DRM_ERROR("Failed to load firmware!\n");
2117 r600_mc_program(rdev);
2118 if (rdev->flags & RADEON_IS_AGP) {
2119 r600_agp_enable(rdev);
2121 r = r600_pcie_gart_enable(rdev);
2125 r600_gpu_init(rdev);
2126 r = r600_blit_init(rdev);
2128 r600_blit_fini(rdev);
2129 rdev->asic->copy = NULL;
2130 dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
2132 /* pin copy shader into vram */
2133 if (rdev->r600_blit.shader_obj) {
2134 r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
2135 if (unlikely(r != 0))
2137 r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM,
2138 &rdev->r600_blit.shader_gpu_addr);
2139 radeon_bo_unreserve(rdev->r600_blit.shader_obj);
2141 dev_err(rdev->dev, "(%d) pin blit object failed\n", r);
2146 r = r600_irq_init(rdev);
2148 DRM_ERROR("radeon: IH init failed (%d).\n", r);
2149 radeon_irq_kms_fini(rdev);
2154 r = radeon_ring_init(rdev, rdev->cp.ring_size);
2157 r = r600_cp_load_microcode(rdev);
2160 r = r600_cp_resume(rdev);
2163 /* write back buffer are not vital so don't worry about failure */
2164 r600_wb_enable(rdev);
2168 void r600_vga_set_state(struct radeon_device *rdev, bool state)
2172 temp = RREG32(CONFIG_CNTL);
2173 if (state == false) {
2179 WREG32(CONFIG_CNTL, temp);
2182 int r600_resume(struct radeon_device *rdev)
2186 /* Do not reset GPU before posting, on r600 hw unlike on r500 hw,
2187 * posting will perform necessary task to bring back GPU into good
2191 atom_asic_init(rdev->mode_info.atom_context);
2192 /* Initialize clocks */
2193 r = radeon_clocks_init(rdev);
2198 r = r600_startup(rdev);
2200 DRM_ERROR("r600 startup failed on resume\n");
2204 r = r600_ib_test(rdev);
2206 DRM_ERROR("radeon: failled testing IB (%d).\n", r);
2210 r = r600_audio_init(rdev);
2212 DRM_ERROR("radeon: audio resume failed\n");
2219 int r600_suspend(struct radeon_device *rdev)
2223 r600_audio_fini(rdev);
2224 /* FIXME: we should wait for ring to be empty */
2226 rdev->cp.ready = false;
2227 r600_irq_suspend(rdev);
2228 r600_wb_disable(rdev);
2229 r600_pcie_gart_disable(rdev);
2230 /* unpin shaders bo */
2231 if (rdev->r600_blit.shader_obj) {
2232 r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
2234 radeon_bo_unpin(rdev->r600_blit.shader_obj);
2235 radeon_bo_unreserve(rdev->r600_blit.shader_obj);
2241 /* Plan is to move initialization in that function and use
2242 * helper function so that radeon_device_init pretty much
2243 * do nothing more than calling asic specific function. This
2244 * should also allow to remove a bunch of callback function
2247 int r600_init(struct radeon_device *rdev)
2251 r = radeon_dummy_page_init(rdev);
2254 if (r600_debugfs_mc_info_init(rdev)) {
2255 DRM_ERROR("Failed to register debugfs file for mc !\n");
2257 /* This don't do much */
2258 r = radeon_gem_init(rdev);
2262 if (!radeon_get_bios(rdev)) {
2263 if (ASIC_IS_AVIVO(rdev))
2266 /* Must be an ATOMBIOS */
2267 if (!rdev->is_atom_bios) {
2268 dev_err(rdev->dev, "Expecting atombios for R600 GPU\n");
2271 r = radeon_atombios_init(rdev);
2274 /* Post card if necessary */
2275 if (!r600_card_posted(rdev)) {
2277 dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
2280 DRM_INFO("GPU not posted. posting now...\n");
2281 atom_asic_init(rdev->mode_info.atom_context);
2283 /* Initialize scratch registers */
2284 r600_scratch_init(rdev);
2285 /* Initialize surface registers */
2286 radeon_surface_init(rdev);
2287 /* Initialize clocks */
2288 radeon_get_clock_info(rdev->ddev);
2289 r = radeon_clocks_init(rdev);
2292 /* Initialize power management */
2293 radeon_pm_init(rdev);
2295 r = radeon_fence_driver_init(rdev);
2298 if (rdev->flags & RADEON_IS_AGP) {
2299 r = radeon_agp_init(rdev);
2301 radeon_agp_disable(rdev);
2303 r = r600_mc_init(rdev);
2306 /* Memory manager */
2307 r = radeon_bo_init(rdev);
2311 r = radeon_irq_kms_init(rdev);
2315 rdev->cp.ring_obj = NULL;
2316 r600_ring_init(rdev, 1024 * 1024);
2318 rdev->ih.ring_obj = NULL;
2319 r600_ih_ring_init(rdev, 64 * 1024);
2321 r = r600_pcie_gart_init(rdev);
2325 rdev->accel_working = true;
2326 r = r600_startup(rdev);
2328 dev_err(rdev->dev, "disabling GPU acceleration\n");
2331 r600_irq_fini(rdev);
2332 radeon_irq_kms_fini(rdev);
2333 r600_pcie_gart_fini(rdev);
2334 rdev->accel_working = false;
2336 if (rdev->accel_working) {
2337 r = radeon_ib_pool_init(rdev);
2339 dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
2340 rdev->accel_working = false;
2342 r = r600_ib_test(rdev);
2344 dev_err(rdev->dev, "IB test failed (%d).\n", r);
2345 rdev->accel_working = false;
2350 r = r600_audio_init(rdev);
2352 return r; /* TODO error handling */
2356 void r600_fini(struct radeon_device *rdev)
2358 radeon_pm_fini(rdev);
2359 r600_audio_fini(rdev);
2360 r600_blit_fini(rdev);
2363 r600_irq_fini(rdev);
2364 radeon_irq_kms_fini(rdev);
2365 r600_pcie_gart_fini(rdev);
2366 radeon_agp_fini(rdev);
2367 radeon_gem_fini(rdev);
2368 radeon_fence_driver_fini(rdev);
2369 radeon_clocks_fini(rdev);
2370 radeon_bo_fini(rdev);
2371 radeon_atombios_fini(rdev);
2374 radeon_dummy_page_fini(rdev);
2381 void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
2383 /* FIXME: implement */
2384 radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
2385 radeon_ring_write(rdev, ib->gpu_addr & 0xFFFFFFFC);
2386 radeon_ring_write(rdev, upper_32_bits(ib->gpu_addr) & 0xFF);
2387 radeon_ring_write(rdev, ib->length_dw);
2390 int r600_ib_test(struct radeon_device *rdev)
2392 struct radeon_ib *ib;
2398 r = radeon_scratch_get(rdev, &scratch);
2400 DRM_ERROR("radeon: failed to get scratch reg (%d).\n", r);
2403 WREG32(scratch, 0xCAFEDEAD);
2404 r = radeon_ib_get(rdev, &ib);
2406 DRM_ERROR("radeon: failed to get ib (%d).\n", r);
2409 ib->ptr[0] = PACKET3(PACKET3_SET_CONFIG_REG, 1);
2410 ib->ptr[1] = ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
2411 ib->ptr[2] = 0xDEADBEEF;
2412 ib->ptr[3] = PACKET2(0);
2413 ib->ptr[4] = PACKET2(0);
2414 ib->ptr[5] = PACKET2(0);
2415 ib->ptr[6] = PACKET2(0);
2416 ib->ptr[7] = PACKET2(0);
2417 ib->ptr[8] = PACKET2(0);
2418 ib->ptr[9] = PACKET2(0);
2419 ib->ptr[10] = PACKET2(0);
2420 ib->ptr[11] = PACKET2(0);
2421 ib->ptr[12] = PACKET2(0);
2422 ib->ptr[13] = PACKET2(0);
2423 ib->ptr[14] = PACKET2(0);
2424 ib->ptr[15] = PACKET2(0);
2426 r = radeon_ib_schedule(rdev, ib);
2428 radeon_scratch_free(rdev, scratch);
2429 radeon_ib_free(rdev, &ib);
2430 DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
2433 r = radeon_fence_wait(ib->fence, false);
2435 DRM_ERROR("radeon: fence wait failed (%d).\n", r);
2438 for (i = 0; i < rdev->usec_timeout; i++) {
2439 tmp = RREG32(scratch);
2440 if (tmp == 0xDEADBEEF)
2444 if (i < rdev->usec_timeout) {
2445 DRM_INFO("ib test succeeded in %u usecs\n", i);
2447 DRM_ERROR("radeon: ib test failed (sracth(0x%04X)=0x%08X)\n",
2451 radeon_scratch_free(rdev, scratch);
2452 radeon_ib_free(rdev, &ib);
2459 * Interrupts use a ring buffer on r6xx/r7xx hardware. It works pretty
2460 * the same as the CP ring buffer, but in reverse. Rather than the CPU
2461 * writing to the ring and the GPU consuming, the GPU writes to the ring
2462 * and host consumes. As the host irq handler processes interrupts, it
2463 * increments the rptr. When the rptr catches up with the wptr, all the
2464 * current interrupts have been processed.
2467 void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size)
2471 /* Align ring size */
2472 rb_bufsz = drm_order(ring_size / 4);
2473 ring_size = (1 << rb_bufsz) * 4;
2474 rdev->ih.ring_size = ring_size;
2475 rdev->ih.ptr_mask = rdev->ih.ring_size - 1;
2479 static int r600_ih_ring_alloc(struct radeon_device *rdev)
2483 /* Allocate ring buffer */
2484 if (rdev->ih.ring_obj == NULL) {
2485 r = radeon_bo_create(rdev, NULL, rdev->ih.ring_size,
2487 RADEON_GEM_DOMAIN_GTT,
2488 &rdev->ih.ring_obj);
2490 DRM_ERROR("radeon: failed to create ih ring buffer (%d).\n", r);
2493 r = radeon_bo_reserve(rdev->ih.ring_obj, false);
2494 if (unlikely(r != 0))
2496 r = radeon_bo_pin(rdev->ih.ring_obj,
2497 RADEON_GEM_DOMAIN_GTT,
2498 &rdev->ih.gpu_addr);
2500 radeon_bo_unreserve(rdev->ih.ring_obj);
2501 DRM_ERROR("radeon: failed to pin ih ring buffer (%d).\n", r);
2504 r = radeon_bo_kmap(rdev->ih.ring_obj,
2505 (void **)&rdev->ih.ring);
2506 radeon_bo_unreserve(rdev->ih.ring_obj);
2508 DRM_ERROR("radeon: failed to map ih ring buffer (%d).\n", r);
2515 static void r600_ih_ring_fini(struct radeon_device *rdev)
2518 if (rdev->ih.ring_obj) {
2519 r = radeon_bo_reserve(rdev->ih.ring_obj, false);
2520 if (likely(r == 0)) {
2521 radeon_bo_kunmap(rdev->ih.ring_obj);
2522 radeon_bo_unpin(rdev->ih.ring_obj);
2523 radeon_bo_unreserve(rdev->ih.ring_obj);
2525 radeon_bo_unref(&rdev->ih.ring_obj);
2526 rdev->ih.ring = NULL;
2527 rdev->ih.ring_obj = NULL;
2531 void r600_rlc_stop(struct radeon_device *rdev)
2534 if ((rdev->family >= CHIP_RV770) &&
2535 (rdev->family <= CHIP_RV740)) {
2536 /* r7xx asics need to soft reset RLC before halting */
2537 WREG32(SRBM_SOFT_RESET, SOFT_RESET_RLC);
2538 RREG32(SRBM_SOFT_RESET);
2540 WREG32(SRBM_SOFT_RESET, 0);
2541 RREG32(SRBM_SOFT_RESET);
2544 WREG32(RLC_CNTL, 0);
2547 static void r600_rlc_start(struct radeon_device *rdev)
2549 WREG32(RLC_CNTL, RLC_ENABLE);
2552 static int r600_rlc_init(struct radeon_device *rdev)
2555 const __be32 *fw_data;
2560 r600_rlc_stop(rdev);
2562 WREG32(RLC_HB_BASE, 0);
2563 WREG32(RLC_HB_CNTL, 0);
2564 WREG32(RLC_HB_RPTR, 0);
2565 WREG32(RLC_HB_WPTR, 0);
2566 WREG32(RLC_HB_WPTR_LSB_ADDR, 0);
2567 WREG32(RLC_HB_WPTR_MSB_ADDR, 0);
2568 WREG32(RLC_MC_CNTL, 0);
2569 WREG32(RLC_UCODE_CNTL, 0);
2571 fw_data = (const __be32 *)rdev->rlc_fw->data;
2572 if (rdev->family >= CHIP_CEDAR) {
2573 for (i = 0; i < EVERGREEN_RLC_UCODE_SIZE; i++) {
2574 WREG32(RLC_UCODE_ADDR, i);
2575 WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
2577 } else if (rdev->family >= CHIP_RV770) {
2578 for (i = 0; i < R700_RLC_UCODE_SIZE; i++) {
2579 WREG32(RLC_UCODE_ADDR, i);
2580 WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
2583 for (i = 0; i < RLC_UCODE_SIZE; i++) {
2584 WREG32(RLC_UCODE_ADDR, i);
2585 WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
2588 WREG32(RLC_UCODE_ADDR, 0);
2590 r600_rlc_start(rdev);
2595 static void r600_enable_interrupts(struct radeon_device *rdev)
2597 u32 ih_cntl = RREG32(IH_CNTL);
2598 u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
2600 ih_cntl |= ENABLE_INTR;
2601 ih_rb_cntl |= IH_RB_ENABLE;
2602 WREG32(IH_CNTL, ih_cntl);
2603 WREG32(IH_RB_CNTL, ih_rb_cntl);
2604 rdev->ih.enabled = true;
2607 void r600_disable_interrupts(struct radeon_device *rdev)
2609 u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
2610 u32 ih_cntl = RREG32(IH_CNTL);
2612 ih_rb_cntl &= ~IH_RB_ENABLE;
2613 ih_cntl &= ~ENABLE_INTR;
2614 WREG32(IH_RB_CNTL, ih_rb_cntl);
2615 WREG32(IH_CNTL, ih_cntl);
2616 /* set rptr, wptr to 0 */
2617 WREG32(IH_RB_RPTR, 0);
2618 WREG32(IH_RB_WPTR, 0);
2619 rdev->ih.enabled = false;
2624 static void r600_disable_interrupt_state(struct radeon_device *rdev)
2628 WREG32(CP_INT_CNTL, 0);
2629 WREG32(GRBM_INT_CNTL, 0);
2630 WREG32(DxMODE_INT_MASK, 0);
2631 if (ASIC_IS_DCE3(rdev)) {
2632 WREG32(DCE3_DACA_AUTODETECT_INT_CONTROL, 0);
2633 WREG32(DCE3_DACB_AUTODETECT_INT_CONTROL, 0);
2634 tmp = RREG32(DC_HPD1_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2635 WREG32(DC_HPD1_INT_CONTROL, tmp);
2636 tmp = RREG32(DC_HPD2_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2637 WREG32(DC_HPD2_INT_CONTROL, tmp);
2638 tmp = RREG32(DC_HPD3_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2639 WREG32(DC_HPD3_INT_CONTROL, tmp);
2640 tmp = RREG32(DC_HPD4_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2641 WREG32(DC_HPD4_INT_CONTROL, tmp);
2642 if (ASIC_IS_DCE32(rdev)) {
2643 tmp = RREG32(DC_HPD5_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2644 WREG32(DC_HPD5_INT_CONTROL, tmp);
2645 tmp = RREG32(DC_HPD6_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2646 WREG32(DC_HPD6_INT_CONTROL, tmp);
2649 WREG32(DACA_AUTODETECT_INT_CONTROL, 0);
2650 WREG32(DACB_AUTODETECT_INT_CONTROL, 0);
2651 tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
2652 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
2653 tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
2654 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
2655 tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
2656 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
2660 int r600_irq_init(struct radeon_device *rdev)
2664 u32 interrupt_cntl, ih_cntl, ih_rb_cntl;
2667 ret = r600_ih_ring_alloc(rdev);
2672 r600_disable_interrupts(rdev);
2675 ret = r600_rlc_init(rdev);
2677 r600_ih_ring_fini(rdev);
2681 /* setup interrupt control */
2682 /* set dummy read address to ring address */
2683 WREG32(INTERRUPT_CNTL2, rdev->ih.gpu_addr >> 8);
2684 interrupt_cntl = RREG32(INTERRUPT_CNTL);
2685 /* IH_DUMMY_RD_OVERRIDE=0 - dummy read disabled with msi, enabled without msi
2686 * IH_DUMMY_RD_OVERRIDE=1 - dummy read controlled by IH_DUMMY_RD_EN
2688 interrupt_cntl &= ~IH_DUMMY_RD_OVERRIDE;
2689 /* IH_REQ_NONSNOOP_EN=1 if ring is in non-cacheable memory, e.g., vram */
2690 interrupt_cntl &= ~IH_REQ_NONSNOOP_EN;
2691 WREG32(INTERRUPT_CNTL, interrupt_cntl);
2693 WREG32(IH_RB_BASE, rdev->ih.gpu_addr >> 8);
2694 rb_bufsz = drm_order(rdev->ih.ring_size / 4);
2696 ih_rb_cntl = (IH_WPTR_OVERFLOW_ENABLE |
2697 IH_WPTR_OVERFLOW_CLEAR |
2699 /* WPTR writeback, not yet */
2700 /*ih_rb_cntl |= IH_WPTR_WRITEBACK_ENABLE;*/
2701 WREG32(IH_RB_WPTR_ADDR_LO, 0);
2702 WREG32(IH_RB_WPTR_ADDR_HI, 0);
2704 WREG32(IH_RB_CNTL, ih_rb_cntl);
2706 /* set rptr, wptr to 0 */
2707 WREG32(IH_RB_RPTR, 0);
2708 WREG32(IH_RB_WPTR, 0);
2710 /* Default settings for IH_CNTL (disabled at first) */
2711 ih_cntl = MC_WRREQ_CREDIT(0x10) | MC_WR_CLEAN_CNT(0x10);
2712 /* RPTR_REARM only works if msi's are enabled */
2713 if (rdev->msi_enabled)
2714 ih_cntl |= RPTR_REARM;
2717 ih_cntl |= IH_MC_SWAP(IH_MC_SWAP_32BIT);
2719 WREG32(IH_CNTL, ih_cntl);
2721 /* force the active interrupt state to all disabled */
2722 if (rdev->family >= CHIP_CEDAR)
2723 evergreen_disable_interrupt_state(rdev);
2725 r600_disable_interrupt_state(rdev);
2728 r600_enable_interrupts(rdev);
2733 void r600_irq_suspend(struct radeon_device *rdev)
2735 r600_irq_disable(rdev);
2736 r600_rlc_stop(rdev);
2739 void r600_irq_fini(struct radeon_device *rdev)
2741 r600_irq_suspend(rdev);
2742 r600_ih_ring_fini(rdev);
2745 int r600_irq_set(struct radeon_device *rdev)
2747 u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE;
2749 u32 hpd1, hpd2, hpd3, hpd4 = 0, hpd5 = 0, hpd6 = 0;
2750 u32 grbm_int_cntl = 0;
2753 if (!rdev->irq.installed) {
2754 WARN(1, "Can't enable IRQ/MSI because no handler is installed.\n");
2757 /* don't enable anything if the ih is disabled */
2758 if (!rdev->ih.enabled) {
2759 r600_disable_interrupts(rdev);
2760 /* force the active interrupt state to all disabled */
2761 r600_disable_interrupt_state(rdev);
2765 hdmi1 = RREG32(R600_HDMI_BLOCK1 + R600_HDMI_CNTL) & ~R600_HDMI_INT_EN;
2766 if (ASIC_IS_DCE3(rdev)) {
2767 hdmi2 = RREG32(R600_HDMI_BLOCK3 + R600_HDMI_CNTL) & ~R600_HDMI_INT_EN;
2768 hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN;
2769 hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN;
2770 hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN;
2771 hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~DC_HPDx_INT_EN;
2772 if (ASIC_IS_DCE32(rdev)) {
2773 hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN;
2774 hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN;
2777 hdmi2 = RREG32(R600_HDMI_BLOCK2 + R600_HDMI_CNTL) & ~R600_HDMI_INT_EN;
2778 hpd1 = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL) & ~DC_HPDx_INT_EN;
2779 hpd2 = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL) & ~DC_HPDx_INT_EN;
2780 hpd3 = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & ~DC_HPDx_INT_EN;
2783 if (rdev->irq.sw_int) {
2784 DRM_DEBUG("r600_irq_set: sw int\n");
2785 cp_int_cntl |= RB_INT_ENABLE;
2787 if (rdev->irq.crtc_vblank_int[0]) {
2788 DRM_DEBUG("r600_irq_set: vblank 0\n");
2789 mode_int |= D1MODE_VBLANK_INT_MASK;
2791 if (rdev->irq.crtc_vblank_int[1]) {
2792 DRM_DEBUG("r600_irq_set: vblank 1\n");
2793 mode_int |= D2MODE_VBLANK_INT_MASK;
2795 if (rdev->irq.hpd[0]) {
2796 DRM_DEBUG("r600_irq_set: hpd 1\n");
2797 hpd1 |= DC_HPDx_INT_EN;
2799 if (rdev->irq.hpd[1]) {
2800 DRM_DEBUG("r600_irq_set: hpd 2\n");
2801 hpd2 |= DC_HPDx_INT_EN;
2803 if (rdev->irq.hpd[2]) {
2804 DRM_DEBUG("r600_irq_set: hpd 3\n");
2805 hpd3 |= DC_HPDx_INT_EN;
2807 if (rdev->irq.hpd[3]) {
2808 DRM_DEBUG("r600_irq_set: hpd 4\n");
2809 hpd4 |= DC_HPDx_INT_EN;
2811 if (rdev->irq.hpd[4]) {
2812 DRM_DEBUG("r600_irq_set: hpd 5\n");
2813 hpd5 |= DC_HPDx_INT_EN;
2815 if (rdev->irq.hpd[5]) {
2816 DRM_DEBUG("r600_irq_set: hpd 6\n");
2817 hpd6 |= DC_HPDx_INT_EN;
2819 if (rdev->irq.hdmi[0]) {
2820 DRM_DEBUG("r600_irq_set: hdmi 1\n");
2821 hdmi1 |= R600_HDMI_INT_EN;
2823 if (rdev->irq.hdmi[1]) {
2824 DRM_DEBUG("r600_irq_set: hdmi 2\n");
2825 hdmi2 |= R600_HDMI_INT_EN;
2827 if (rdev->irq.gui_idle) {
2828 DRM_DEBUG("gui idle\n");
2829 grbm_int_cntl |= GUI_IDLE_INT_ENABLE;
2832 WREG32(CP_INT_CNTL, cp_int_cntl);
2833 WREG32(DxMODE_INT_MASK, mode_int);
2834 WREG32(GRBM_INT_CNTL, grbm_int_cntl);
2835 WREG32(R600_HDMI_BLOCK1 + R600_HDMI_CNTL, hdmi1);
2836 if (ASIC_IS_DCE3(rdev)) {
2837 WREG32(R600_HDMI_BLOCK3 + R600_HDMI_CNTL, hdmi2);
2838 WREG32(DC_HPD1_INT_CONTROL, hpd1);
2839 WREG32(DC_HPD2_INT_CONTROL, hpd2);
2840 WREG32(DC_HPD3_INT_CONTROL, hpd3);
2841 WREG32(DC_HPD4_INT_CONTROL, hpd4);
2842 if (ASIC_IS_DCE32(rdev)) {
2843 WREG32(DC_HPD5_INT_CONTROL, hpd5);
2844 WREG32(DC_HPD6_INT_CONTROL, hpd6);
2847 WREG32(R600_HDMI_BLOCK2 + R600_HDMI_CNTL, hdmi2);
2848 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, hpd1);
2849 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, hpd2);
2850 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, hpd3);
2856 static inline void r600_irq_ack(struct radeon_device *rdev,
2859 u32 *disp_int_cont2)
2863 if (ASIC_IS_DCE3(rdev)) {
2864 *disp_int = RREG32(DCE3_DISP_INTERRUPT_STATUS);
2865 *disp_int_cont = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE);
2866 *disp_int_cont2 = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE2);
2868 *disp_int = RREG32(DISP_INTERRUPT_STATUS);
2869 *disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE);
2870 *disp_int_cont2 = 0;
2873 if (*disp_int & LB_D1_VBLANK_INTERRUPT)
2874 WREG32(D1MODE_VBLANK_STATUS, DxMODE_VBLANK_ACK);
2875 if (*disp_int & LB_D1_VLINE_INTERRUPT)
2876 WREG32(D1MODE_VLINE_STATUS, DxMODE_VLINE_ACK);
2877 if (*disp_int & LB_D2_VBLANK_INTERRUPT)
2878 WREG32(D2MODE_VBLANK_STATUS, DxMODE_VBLANK_ACK);
2879 if (*disp_int & LB_D2_VLINE_INTERRUPT)
2880 WREG32(D2MODE_VLINE_STATUS, DxMODE_VLINE_ACK);
2881 if (*disp_int & DC_HPD1_INTERRUPT) {
2882 if (ASIC_IS_DCE3(rdev)) {
2883 tmp = RREG32(DC_HPD1_INT_CONTROL);
2884 tmp |= DC_HPDx_INT_ACK;
2885 WREG32(DC_HPD1_INT_CONTROL, tmp);
2887 tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL);
2888 tmp |= DC_HPDx_INT_ACK;
2889 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
2892 if (*disp_int & DC_HPD2_INTERRUPT) {
2893 if (ASIC_IS_DCE3(rdev)) {
2894 tmp = RREG32(DC_HPD2_INT_CONTROL);
2895 tmp |= DC_HPDx_INT_ACK;
2896 WREG32(DC_HPD2_INT_CONTROL, tmp);
2898 tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL);
2899 tmp |= DC_HPDx_INT_ACK;
2900 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
2903 if (*disp_int_cont & DC_HPD3_INTERRUPT) {
2904 if (ASIC_IS_DCE3(rdev)) {
2905 tmp = RREG32(DC_HPD3_INT_CONTROL);
2906 tmp |= DC_HPDx_INT_ACK;
2907 WREG32(DC_HPD3_INT_CONTROL, tmp);
2909 tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL);
2910 tmp |= DC_HPDx_INT_ACK;
2911 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
2914 if (*disp_int_cont & DC_HPD4_INTERRUPT) {
2915 tmp = RREG32(DC_HPD4_INT_CONTROL);
2916 tmp |= DC_HPDx_INT_ACK;
2917 WREG32(DC_HPD4_INT_CONTROL, tmp);
2919 if (ASIC_IS_DCE32(rdev)) {
2920 if (*disp_int_cont2 & DC_HPD5_INTERRUPT) {
2921 tmp = RREG32(DC_HPD5_INT_CONTROL);
2922 tmp |= DC_HPDx_INT_ACK;
2923 WREG32(DC_HPD5_INT_CONTROL, tmp);
2925 if (*disp_int_cont2 & DC_HPD6_INTERRUPT) {
2926 tmp = RREG32(DC_HPD5_INT_CONTROL);
2927 tmp |= DC_HPDx_INT_ACK;
2928 WREG32(DC_HPD6_INT_CONTROL, tmp);
2931 if (RREG32(R600_HDMI_BLOCK1 + R600_HDMI_STATUS) & R600_HDMI_INT_PENDING) {
2932 WREG32_P(R600_HDMI_BLOCK1 + R600_HDMI_CNTL, R600_HDMI_INT_ACK, ~R600_HDMI_INT_ACK);
2934 if (ASIC_IS_DCE3(rdev)) {
2935 if (RREG32(R600_HDMI_BLOCK3 + R600_HDMI_STATUS) & R600_HDMI_INT_PENDING) {
2936 WREG32_P(R600_HDMI_BLOCK3 + R600_HDMI_CNTL, R600_HDMI_INT_ACK, ~R600_HDMI_INT_ACK);
2939 if (RREG32(R600_HDMI_BLOCK2 + R600_HDMI_STATUS) & R600_HDMI_INT_PENDING) {
2940 WREG32_P(R600_HDMI_BLOCK2 + R600_HDMI_CNTL, R600_HDMI_INT_ACK, ~R600_HDMI_INT_ACK);
2945 void r600_irq_disable(struct radeon_device *rdev)
2947 u32 disp_int, disp_int_cont, disp_int_cont2;
2949 r600_disable_interrupts(rdev);
2950 /* Wait and acknowledge irq */
2952 r600_irq_ack(rdev, &disp_int, &disp_int_cont, &disp_int_cont2);
2953 r600_disable_interrupt_state(rdev);
2956 static inline u32 r600_get_ih_wptr(struct radeon_device *rdev)
2960 /* XXX use writeback */
2961 wptr = RREG32(IH_RB_WPTR);
2963 if (wptr & RB_OVERFLOW) {
2964 /* When a ring buffer overflow happen start parsing interrupt
2965 * from the last not overwritten vector (wptr + 16). Hopefully
2966 * this should allow us to catchup.
2968 dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, %d, %d)\n",
2969 wptr, rdev->ih.rptr, (wptr + 16) + rdev->ih.ptr_mask);
2970 rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask;
2971 tmp = RREG32(IH_RB_CNTL);
2972 tmp |= IH_WPTR_OVERFLOW_CLEAR;
2973 WREG32(IH_RB_CNTL, tmp);
2975 return (wptr & rdev->ih.ptr_mask);
2979 * Each IV ring entry is 128 bits:
2980 * [7:0] - interrupt source id
2982 * [59:32] - interrupt source data
2983 * [127:60] - reserved
2985 * The basic interrupt vector entries
2986 * are decoded as follows:
2987 * src_id src_data description
2992 * 19 0 FP Hot plug detection A
2993 * 19 1 FP Hot plug detection B
2994 * 19 2 DAC A auto-detection
2995 * 19 3 DAC B auto-detection
3001 * 181 - EOP Interrupt
3004 * Note, these are based on r600 and may need to be
3005 * adjusted or added to on newer asics
3008 int r600_irq_process(struct radeon_device *rdev)
3010 u32 wptr = r600_get_ih_wptr(rdev);
3011 u32 rptr = rdev->ih.rptr;
3012 u32 src_id, src_data;
3013 u32 ring_index, disp_int, disp_int_cont, disp_int_cont2;
3014 unsigned long flags;
3015 bool queue_hotplug = false;
3017 DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
3018 if (!rdev->ih.enabled)
3021 spin_lock_irqsave(&rdev->ih.lock, flags);
3024 spin_unlock_irqrestore(&rdev->ih.lock, flags);
3027 if (rdev->shutdown) {
3028 spin_unlock_irqrestore(&rdev->ih.lock, flags);
3033 /* display interrupts */
3034 r600_irq_ack(rdev, &disp_int, &disp_int_cont, &disp_int_cont2);
3036 rdev->ih.wptr = wptr;
3037 while (rptr != wptr) {
3038 /* wptr/rptr are in bytes! */
3039 ring_index = rptr / 4;
3040 src_id = rdev->ih.ring[ring_index] & 0xff;
3041 src_data = rdev->ih.ring[ring_index + 1] & 0xfffffff;
3044 case 1: /* D1 vblank/vline */
3046 case 0: /* D1 vblank */
3047 if (disp_int & LB_D1_VBLANK_INTERRUPT) {
3048 drm_handle_vblank(rdev->ddev, 0);
3049 rdev->pm.vblank_sync = true;
3050 wake_up(&rdev->irq.vblank_queue);
3051 disp_int &= ~LB_D1_VBLANK_INTERRUPT;
3052 DRM_DEBUG("IH: D1 vblank\n");
3055 case 1: /* D1 vline */
3056 if (disp_int & LB_D1_VLINE_INTERRUPT) {
3057 disp_int &= ~LB_D1_VLINE_INTERRUPT;
3058 DRM_DEBUG("IH: D1 vline\n");
3062 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
3066 case 5: /* D2 vblank/vline */
3068 case 0: /* D2 vblank */
3069 if (disp_int & LB_D2_VBLANK_INTERRUPT) {
3070 drm_handle_vblank(rdev->ddev, 1);
3071 rdev->pm.vblank_sync = true;
3072 wake_up(&rdev->irq.vblank_queue);
3073 disp_int &= ~LB_D2_VBLANK_INTERRUPT;
3074 DRM_DEBUG("IH: D2 vblank\n");
3077 case 1: /* D1 vline */
3078 if (disp_int & LB_D2_VLINE_INTERRUPT) {
3079 disp_int &= ~LB_D2_VLINE_INTERRUPT;
3080 DRM_DEBUG("IH: D2 vline\n");
3084 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
3088 case 19: /* HPD/DAC hotplug */
3091 if (disp_int & DC_HPD1_INTERRUPT) {
3092 disp_int &= ~DC_HPD1_INTERRUPT;
3093 queue_hotplug = true;
3094 DRM_DEBUG("IH: HPD1\n");
3098 if (disp_int & DC_HPD2_INTERRUPT) {
3099 disp_int &= ~DC_HPD2_INTERRUPT;
3100 queue_hotplug = true;
3101 DRM_DEBUG("IH: HPD2\n");
3105 if (disp_int_cont & DC_HPD3_INTERRUPT) {
3106 disp_int_cont &= ~DC_HPD3_INTERRUPT;
3107 queue_hotplug = true;
3108 DRM_DEBUG("IH: HPD3\n");
3112 if (disp_int_cont & DC_HPD4_INTERRUPT) {
3113 disp_int_cont &= ~DC_HPD4_INTERRUPT;
3114 queue_hotplug = true;
3115 DRM_DEBUG("IH: HPD4\n");
3119 if (disp_int_cont2 & DC_HPD5_INTERRUPT) {
3120 disp_int_cont2 &= ~DC_HPD5_INTERRUPT;
3121 queue_hotplug = true;
3122 DRM_DEBUG("IH: HPD5\n");
3126 if (disp_int_cont2 & DC_HPD6_INTERRUPT) {
3127 disp_int_cont2 &= ~DC_HPD6_INTERRUPT;
3128 queue_hotplug = true;
3129 DRM_DEBUG("IH: HPD6\n");
3133 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
3138 DRM_DEBUG("IH: HDMI: 0x%x\n", src_data);
3139 r600_audio_schedule_polling(rdev);
3141 case 176: /* CP_INT in ring buffer */
3142 case 177: /* CP_INT in IB1 */
3143 case 178: /* CP_INT in IB2 */
3144 DRM_DEBUG("IH: CP int: 0x%08x\n", src_data);
3145 radeon_fence_process(rdev);
3147 case 181: /* CP EOP event */
3148 DRM_DEBUG("IH: CP EOP\n");
3150 case 233: /* GUI IDLE */
3151 DRM_DEBUG("IH: CP EOP\n");
3152 rdev->pm.gui_idle = true;
3153 wake_up(&rdev->irq.idle_queue);
3156 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
3160 /* wptr/rptr are in bytes! */
3162 rptr &= rdev->ih.ptr_mask;
3164 /* make sure wptr hasn't changed while processing */
3165 wptr = r600_get_ih_wptr(rdev);
3166 if (wptr != rdev->ih.wptr)
3169 queue_work(rdev->wq, &rdev->hotplug_work);
3170 rdev->ih.rptr = rptr;
3171 WREG32(IH_RB_RPTR, rdev->ih.rptr);
3172 spin_unlock_irqrestore(&rdev->ih.lock, flags);
3179 #if defined(CONFIG_DEBUG_FS)
3181 static int r600_debugfs_cp_ring_info(struct seq_file *m, void *data)
3183 struct drm_info_node *node = (struct drm_info_node *) m->private;
3184 struct drm_device *dev = node->minor->dev;
3185 struct radeon_device *rdev = dev->dev_private;
3186 unsigned count, i, j;
3188 radeon_ring_free_size(rdev);
3189 count = (rdev->cp.ring_size / 4) - rdev->cp.ring_free_dw;
3190 seq_printf(m, "CP_STAT 0x%08x\n", RREG32(CP_STAT));
3191 seq_printf(m, "CP_RB_WPTR 0x%08x\n", RREG32(CP_RB_WPTR));
3192 seq_printf(m, "CP_RB_RPTR 0x%08x\n", RREG32(CP_RB_RPTR));
3193 seq_printf(m, "driver's copy of the CP_RB_WPTR 0x%08x\n", rdev->cp.wptr);
3194 seq_printf(m, "driver's copy of the CP_RB_RPTR 0x%08x\n", rdev->cp.rptr);
3195 seq_printf(m, "%u free dwords in ring\n", rdev->cp.ring_free_dw);
3196 seq_printf(m, "%u dwords in ring\n", count);
3198 for (j = 0; j <= count; j++) {
3199 seq_printf(m, "r[%04d]=0x%08x\n", i, rdev->cp.ring[i]);
3200 i = (i + 1) & rdev->cp.ptr_mask;
3205 static int r600_debugfs_mc_info(struct seq_file *m, void *data)
3207 struct drm_info_node *node = (struct drm_info_node *) m->private;
3208 struct drm_device *dev = node->minor->dev;
3209 struct radeon_device *rdev = dev->dev_private;
3211 DREG32_SYS(m, rdev, R_000E50_SRBM_STATUS);
3212 DREG32_SYS(m, rdev, VM_L2_STATUS);
3216 static struct drm_info_list r600_mc_info_list[] = {
3217 {"r600_mc_info", r600_debugfs_mc_info, 0, NULL},
3218 {"r600_ring_info", r600_debugfs_cp_ring_info, 0, NULL},
3222 int r600_debugfs_mc_info_init(struct radeon_device *rdev)
3224 #if defined(CONFIG_DEBUG_FS)
3225 return radeon_debugfs_add_files(rdev, r600_mc_info_list, ARRAY_SIZE(r600_mc_info_list));
3232 * r600_ioctl_wait_idle - flush host path cache on wait idle ioctl
3233 * rdev: radeon device structure
3234 * bo: buffer object struct which userspace is waiting for idle
3236 * Some R6XX/R7XX doesn't seems to take into account HDP flush performed
3237 * through ring buffer, this leads to corruption in rendering, see
3238 * http://bugzilla.kernel.org/show_bug.cgi?id=15186 to avoid this we
3239 * directly perform HDP flush by writing register through MMIO.
3241 void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo)
3243 WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);