X-Git-Url: http://ftp.safe.ca/?a=blobdiff_plain;f=drivers%2Fgpu%2Fdrm%2Fradeon%2Fr300.c;h=7e9f95653cbef1ebaf434d7e5a62a8dadb28f908;hb=d594e46ace22afa1621254f6f669e65430048153;hp=9a5798544b42b43e7c0f562c54067ce759bf6f68;hpb=17e15b0c719b5ec0b344d3ebe3787b48315a0218;p=safe%2Fjmp%2Flinux-2.6 diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c index 9a57985..7e9f956 100644 --- a/drivers/gpu/drm/radeon/r300.c +++ b/drivers/gpu/drm/radeon/r300.c @@ -36,7 +36,15 @@ #include "rv350d.h" #include "r300_reg_safe.h" -/* This files gather functions specifics to: r300,r350,rv350,rv370,rv380 */ +/* This files gather functions specifics to: r300,r350,rv350,rv370,rv380 + * + * GPU Errata: + * - HOST_PATH_CNTL: r300 family seems to dislike write to HOST_PATH_CNTL + * using MMIO to flush host path read cache, this lead to HARDLOCKUP. + * However, scheduling such write to the ring seems harmless, i suspect + * the CP read collide with the flush somehow, or maybe the MC, hard to + * tell. (Jerome Glisse) + */ /* * rv370,rv380 PCIE GART @@ -109,18 +117,19 @@ int rv370_pcie_gart_enable(struct radeon_device *rdev) r = radeon_gart_table_vram_pin(rdev); if (r) return r; + radeon_gart_restore(rdev); /* discard memory request outside of configured range */ tmp = RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD; WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp); - WREG32_PCIE(RADEON_PCIE_TX_GART_START_LO, rdev->mc.gtt_location); - tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - RADEON_GPU_PAGE_SIZE; + WREG32_PCIE(RADEON_PCIE_TX_GART_START_LO, rdev->mc.gtt_start); + tmp = rdev->mc.gtt_end & ~RADEON_GPU_PAGE_MASK; WREG32_PCIE(RADEON_PCIE_TX_GART_END_LO, tmp); WREG32_PCIE(RADEON_PCIE_TX_GART_START_HI, 0); WREG32_PCIE(RADEON_PCIE_TX_GART_END_HI, 0); table_addr = rdev->gart.table_addr; WREG32_PCIE(RADEON_PCIE_TX_GART_BASE, table_addr); /* FIXME: setup default page */ - WREG32_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_LO, rdev->mc.vram_location); + WREG32_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_LO, rdev->mc.vram_start); WREG32_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_HI, 0); /* Clear error */ WREG32_PCIE(0x18, 0); @@ -137,14 +146,19 @@ int rv370_pcie_gart_enable(struct radeon_device *rdev) void rv370_pcie_gart_disable(struct radeon_device *rdev) { - uint32_t tmp; + u32 tmp; + int r; tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL); tmp |= RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD; WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp & ~RADEON_PCIE_TX_GART_EN); if (rdev->gart.table.vram.robj) { - radeon_object_kunmap(rdev->gart.table.vram.robj); - radeon_object_unpin(rdev->gart.table.vram.robj); + r = radeon_bo_reserve(rdev->gart.table.vram.robj, false); + if (likely(r == 0)) { + radeon_bo_kunmap(rdev->gart.table.vram.robj); + radeon_bo_unpin(rdev->gart.table.vram.robj); + radeon_bo_unreserve(rdev->gart.table.vram.robj); + } } } @@ -161,18 +175,25 @@ void r300_fence_ring_emit(struct radeon_device *rdev, /* Who ever call radeon_fence_emit should call ring_lock and ask * for enough space (today caller are ib schedule and buffer move) */ /* Write SC register so SC & US assert idle */ - radeon_ring_write(rdev, PACKET0(0x43E0, 0)); + radeon_ring_write(rdev, PACKET0(R300_RE_SCISSORS_TL, 0)); radeon_ring_write(rdev, 0); - radeon_ring_write(rdev, PACKET0(0x43E4, 0)); + radeon_ring_write(rdev, PACKET0(R300_RE_SCISSORS_BR, 0)); radeon_ring_write(rdev, 0); /* Flush 3D cache */ - radeon_ring_write(rdev, PACKET0(0x4E4C, 0)); - radeon_ring_write(rdev, (2 << 0)); - radeon_ring_write(rdev, PACKET0(0x4F18, 0)); - radeon_ring_write(rdev, (1 << 0)); + radeon_ring_write(rdev, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0)); + radeon_ring_write(rdev, R300_RB3D_DC_FLUSH); + radeon_ring_write(rdev, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0)); + radeon_ring_write(rdev, R300_ZC_FLUSH); /* Wait until IDLE & CLEAN */ - radeon_ring_write(rdev, PACKET0(0x1720, 0)); - radeon_ring_write(rdev, (1 << 17) | (1 << 16) | (1 << 9)); + radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0)); + radeon_ring_write(rdev, (RADEON_WAIT_3D_IDLECLEAN | + RADEON_WAIT_2D_IDLECLEAN | + RADEON_WAIT_DMA_GUI_IDLE)); + radeon_ring_write(rdev, PACKET0(RADEON_HOST_PATH_CNTL, 0)); + radeon_ring_write(rdev, rdev->config.r300.hdp_cntl | + RADEON_HDP_READ_BUFFER_INVALIDATE); + radeon_ring_write(rdev, PACKET0(RADEON_HOST_PATH_CNTL, 0)); + radeon_ring_write(rdev, rdev->config.r300.hdp_cntl); /* Emit fence sequence & fire IRQ */ radeon_ring_write(rdev, PACKET0(rdev->fence_drv.scratch_reg, 0)); radeon_ring_write(rdev, fence->seq); @@ -180,50 +201,6 @@ void r300_fence_ring_emit(struct radeon_device *rdev, radeon_ring_write(rdev, RADEON_SW_INT_FIRE); } -int r300_copy_dma(struct radeon_device *rdev, - uint64_t src_offset, - uint64_t dst_offset, - unsigned num_pages, - struct radeon_fence *fence) -{ - uint32_t size; - uint32_t cur_size; - int i, num_loops; - int r = 0; - - /* radeon pitch is /64 */ - size = num_pages << PAGE_SHIFT; - num_loops = DIV_ROUND_UP(size, 0x1FFFFF); - r = radeon_ring_lock(rdev, num_loops * 4 + 64); - if (r) { - DRM_ERROR("radeon: moving bo (%d).\n", r); - return r; - } - /* Must wait for 2D idle & clean before DMA or hangs might happen */ - radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0 )); - radeon_ring_write(rdev, (1 << 16)); - for (i = 0; i < num_loops; i++) { - cur_size = size; - if (cur_size > 0x1FFFFF) { - cur_size = 0x1FFFFF; - } - size -= cur_size; - radeon_ring_write(rdev, PACKET0(0x720, 2)); - radeon_ring_write(rdev, src_offset); - radeon_ring_write(rdev, dst_offset); - radeon_ring_write(rdev, cur_size | (1 << 31) | (1 << 30)); - src_offset += cur_size; - dst_offset += cur_size; - } - radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0)); - radeon_ring_write(rdev, RADEON_WAIT_DMA_GUI_IDLE); - if (fence) { - r = radeon_fence_emit(rdev, fence); - } - radeon_ring_unlock_commit(rdev); - return r; -} - void r300_ring_start(struct radeon_device *rdev) { unsigned gb_tile_config; @@ -263,8 +240,8 @@ void r300_ring_start(struct radeon_device *rdev) radeon_ring_write(rdev, RADEON_WAIT_2D_IDLECLEAN | RADEON_WAIT_3D_IDLECLEAN); - radeon_ring_write(rdev, PACKET0(0x170C, 0)); - radeon_ring_write(rdev, 1 << 31); + radeon_ring_write(rdev, PACKET0(R300_DST_PIPE_CONFIG, 0)); + radeon_ring_write(rdev, R300_PIPE_AUTO_CONFIG); radeon_ring_write(rdev, PACKET0(R300_GB_SELECT, 0)); radeon_ring_write(rdev, 0); radeon_ring_write(rdev, PACKET0(R300_GB_ENABLE, 0)); @@ -331,8 +308,8 @@ int r300_mc_wait_for_idle(struct radeon_device *rdev) for (i = 0; i < rdev->usec_timeout; i++) { /* read MC_STATUS */ - tmp = RREG32(0x0150); - if (tmp & (1 << 4)) { + tmp = RREG32(RADEON_MC_STATUS); + if (tmp & R300_MC_IDLE) { return 0; } DRM_UDELAY(1); @@ -377,8 +354,8 @@ void r300_gpu_init(struct radeon_device *rdev) "programming pipes. Bad things might happen.\n"); } - tmp = RREG32(0x170C); - WREG32(0x170C, tmp | (1 << 31)); + tmp = RREG32(R300_DST_PIPE_CONFIG); + WREG32(R300_DST_PIPE_CONFIG, tmp | R300_PIPE_AUTO_CONFIG); WREG32(R300_RB2D_DSTCACHE_MODE, R300_DC_AUTOFLUSH_ENABLE | @@ -419,8 +396,8 @@ int r300_ga_reset(struct radeon_device *rdev) /* GA still busy soft reset it */ WREG32(0x429C, 0x200); WREG32(R300_VAP_PVS_STATE_FLUSH_REG, 0); - WREG32(0x43E0, 0); - WREG32(0x43E4, 0); + WREG32(R300_RE_SCISSORS_TL, 0); + WREG32(R300_RE_SCISSORS_BR, 0); WREG32(0x24AC, 0); } /* Wait to prevent race in RBBM_STATUS */ @@ -470,7 +447,7 @@ int r300_gpu_reset(struct radeon_device *rdev) } /* Check if GPU is idle */ status = RREG32(RADEON_RBBM_STATUS); - if (status & (1 << 31)) { + if (status & RADEON_RBBM_ACTIVE) { DRM_ERROR("Failed to reset GPU (RBBM_STATUS=0x%08X)\n", status); return -1; } @@ -482,20 +459,23 @@ int r300_gpu_reset(struct radeon_device *rdev) /* * r300,r350,rv350,rv380 VRAM info */ -void r300_vram_info(struct radeon_device *rdev) +void r300_mc_init(struct radeon_device *rdev) { uint32_t tmp; /* DDR for all card after R300 & IGP */ rdev->mc.vram_is_ddr = true; tmp = RREG32(RADEON_MEM_CNTL); - if (tmp & R300_MEM_NUM_CHANNELS_MASK) { - rdev->mc.vram_width = 128; - } else { - rdev->mc.vram_width = 64; + tmp &= R300_MEM_NUM_CHANNELS_MASK; + switch (tmp) { + case 0: rdev->mc.vram_width = 64; break; + case 1: rdev->mc.vram_width = 128; break; + case 2: rdev->mc.vram_width = 256; break; + default: rdev->mc.vram_width = 128; break; } - r100_vram_init_sizes(rdev); + if (!(rdev->flags & RADEON_IS_AGP)) + radeon_gtt_location(rdev, &rdev->mc); } void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes) @@ -557,6 +537,37 @@ void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes) } +int rv370_get_pcie_lanes(struct radeon_device *rdev) +{ + u32 link_width_cntl; + + if (rdev->flags & RADEON_IS_IGP) + return 0; + + if (!(rdev->flags & RADEON_IS_PCIE)) + return 0; + + /* FIXME wait for idle */ + + link_width_cntl = RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL); + + switch ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) >> RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT) { + case RADEON_PCIE_LC_LINK_WIDTH_X0: + return 0; + case RADEON_PCIE_LC_LINK_WIDTH_X1: + return 1; + case RADEON_PCIE_LC_LINK_WIDTH_X2: + return 2; + case RADEON_PCIE_LC_LINK_WIDTH_X4: + return 4; + case RADEON_PCIE_LC_LINK_WIDTH_X8: + return 8; + case RADEON_PCIE_LC_LINK_WIDTH_X16: + default: + return 16; + } +} + #if defined(CONFIG_DEBUG_FS) static int rv370_debugfs_pcie_gart_info(struct seq_file *m, void *data) { @@ -681,7 +692,15 @@ static int r300_packet0_check(struct radeon_cs_parser *p, r100_cs_dump_packet(p, pkt); return r; } - ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); + + if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) + tile_flags |= R300_TXO_MACRO_TILE; + if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) + tile_flags |= R300_TXO_MICRO_TILE; + + tmp = idx_value + ((u32)reloc->lobj.gpu_offset); + tmp |= tile_flags; + ib[idx] = tmp; track->textures[i].robj = reloc->robj; break; /* Tracked registers */ @@ -847,7 +866,6 @@ static int r300_packet0_check(struct radeon_cs_parser *p, case R300_TX_FORMAT_Z6Y5X5: case R300_TX_FORMAT_W4Z4Y4X4: case R300_TX_FORMAT_W1Z5Y5X5: - case R300_TX_FORMAT_DXT1: case R300_TX_FORMAT_D3DMFT_CxV8U8: case R300_TX_FORMAT_B8G8_B8G8: case R300_TX_FORMAT_G8R8_G8B8: @@ -861,8 +879,6 @@ static int r300_packet0_check(struct radeon_cs_parser *p, case 0x17: case R300_TX_FORMAT_FL_I32: case 0x1e: - case R300_TX_FORMAT_DXT3: - case R300_TX_FORMAT_DXT5: track->textures[i].cpp = 4; break; case R300_TX_FORMAT_W16Z16Y16X16: @@ -873,6 +889,23 @@ static int r300_packet0_check(struct radeon_cs_parser *p, case R300_TX_FORMAT_FL_R32G32B32A32: track->textures[i].cpp = 16; break; + case R300_TX_FORMAT_DXT1: + track->textures[i].cpp = 1; + track->textures[i].compress_format = R100_TRACK_COMP_DXT1; + break; + case R300_TX_FORMAT_ATI2N: + if (p->rdev->family < CHIP_R420) { + DRM_ERROR("Invalid texture format %u\n", + (idx_value & 0x1F)); + return -EINVAL; + } + /* The same rules apply as for DXT3/5. */ + /* Pass through. */ + case R300_TX_FORMAT_DXT3: + case R300_TX_FORMAT_DXT5: + track->textures[i].cpp = 1; + track->textures[i].compress_format = R100_TRACK_COMP_DXT35; + break; default: DRM_ERROR("Invalid texture format %u\n", (idx_value & 0x1F)); @@ -932,6 +965,16 @@ static int r300_packet0_check(struct radeon_cs_parser *p, track->textures[i].width_11 = tmp; tmp = ((idx_value >> 16) & 1) << 11; track->textures[i].height_11 = tmp; + + /* ATI1N */ + if (idx_value & (1 << 14)) { + /* The same rules apply as for DXT1. */ + track->textures[i].compress_format = + R100_TRACK_COMP_DXT1; + } + } else if (idx_value & (1 << 14)) { + DRM_ERROR("Forbidden bit TXFORMAT_MSB\n"); + return -EINVAL; } break; case 0x4480: @@ -973,6 +1016,18 @@ static int r300_packet0_check(struct radeon_cs_parser *p, } ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); break; + case 0x4e0c: + /* RB3D_COLOR_CHANNEL_MASK */ + track->color_channel_mask = idx_value; + break; + case 0x4d1c: + /* ZB_BW_CNTL */ + track->fastfill = !!(idx_value & (1 << 2)); + break; + case 0x4e04: + /* RB3D_BLENDCNTL */ + track->blend_read_enable = !!(idx_value & (1 << 2)); + break; case 0x4be8: /* valid register only on RV530 */ if (p->rdev->family == CHIP_RV530) @@ -1181,6 +1236,9 @@ static int r300_startup(struct radeon_device *rdev) { int r; + /* set common regs */ + r100_set_common_regs(rdev); + /* program mc */ r300_mc_program(rdev); /* Resume clock */ r300_clock_startup(rdev); @@ -1205,8 +1263,8 @@ static int r300_startup(struct radeon_device *rdev) return r; } /* Enable IRQ */ - rdev->irq.sw_int = true; r100_irq_set(rdev); + rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL); /* 1M ring buffer */ r = r100_cp_init(rdev, 1024 * 1024); if (r) { @@ -1243,6 +1301,8 @@ int r300_resume(struct radeon_device *rdev) radeon_combios_asic_init(rdev->ddev); /* Resume clock after posting */ r300_clock_startup(rdev); + /* Initialize surface registers */ + radeon_surface_init(rdev); return r300_startup(rdev); } @@ -1260,7 +1320,6 @@ int r300_suspend(struct radeon_device *rdev) void r300_fini(struct radeon_device *rdev) { - r300_suspend(rdev); r100_cp_fini(rdev); r100_wb_fini(rdev); r100_ib_fini(rdev); @@ -1269,9 +1328,10 @@ void r300_fini(struct radeon_device *rdev) rv370_pcie_gart_fini(rdev); if (rdev->flags & RADEON_IS_PCI) r100_pci_gart_fini(rdev); + radeon_agp_fini(rdev); radeon_irq_kms_fini(rdev); radeon_fence_driver_fini(rdev); - radeon_object_fini(rdev); + radeon_bo_fini(rdev); radeon_atombios_fini(rdev); kfree(rdev->bios); rdev->bios = NULL; @@ -1309,20 +1369,23 @@ int r300_init(struct radeon_device *rdev) RREG32(R_0007C0_CP_STAT)); } /* check if cards are posted or not */ - if (!radeon_card_posted(rdev) && rdev->bios) { - DRM_INFO("GPU not posted. posting now...\n"); - radeon_combios_asic_init(rdev->ddev); - } + if (radeon_boot_test_post_card(rdev) == false) + return -EINVAL; /* Set asic errata */ r300_errata(rdev); /* Initialize clocks */ radeon_get_clock_info(rdev->ddev); - /* Get vram informations */ - r300_vram_info(rdev); - /* Initialize memory controller (also test AGP) */ - r = r420_mc_init(rdev); - if (r) - return r; + /* Initialize power management */ + radeon_pm_init(rdev); + /* initialize AGP */ + if (rdev->flags & RADEON_IS_AGP) { + r = radeon_agp_init(rdev); + if (r) { + radeon_agp_disable(rdev); + } + } + /* initialize memory controller */ + r300_mc_init(rdev); /* Fence driver */ r = radeon_fence_driver_init(rdev); if (r) @@ -1331,7 +1394,7 @@ int r300_init(struct radeon_device *rdev) if (r) return r; /* Memory manager */ - r = radeon_object_init(rdev); + r = radeon_bo_init(rdev); if (r) return r; if (rdev->flags & RADEON_IS_PCIE) { @@ -1350,15 +1413,15 @@ int r300_init(struct radeon_device *rdev) if (r) { /* Somethings want wront with the accel init stop accel */ dev_err(rdev->dev, "Disabling GPU acceleration\n"); - r300_suspend(rdev); r100_cp_fini(rdev); r100_wb_fini(rdev); r100_ib_fini(rdev); + radeon_irq_kms_fini(rdev); if (rdev->flags & RADEON_IS_PCIE) rv370_pcie_gart_fini(rdev); if (rdev->flags & RADEON_IS_PCI) r100_pci_gart_fini(rdev); - radeon_irq_kms_fini(rdev); + radeon_agp_fini(rdev); rdev->accel_working = false; } return 0;