X-Git-Url: http://ftp.safe.ca/?a=blobdiff_plain;f=drivers%2Fgpu%2Fdrm%2Fradeon%2Fradeon_ring.c;h=261e98a276dbb813631e56bf48358e003491c32f;hb=8de016e2bd8ebce9b3728462085bef51179841a6;hp=60d159308b883adddec3628b343a7784c6d22cab;hpb=6cdf65855cf884712532fc72770baaef7bdf1b9a;p=safe%2Fjmp%2Flinux-2.6 diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c index 60d1593..261e98a 100644 --- a/drivers/gpu/drm/radeon/radeon_ring.c +++ b/drivers/gpu/drm/radeon/radeon_ring.c @@ -26,6 +26,7 @@ * Jerome Glisse */ #include +#include #include "drmP.h" #include "radeon_drm.h" #include "radeon_reg.h" @@ -34,6 +35,36 @@ int radeon_debugfs_ib_init(struct radeon_device *rdev); +void radeon_ib_bogus_cleanup(struct radeon_device *rdev) +{ + struct radeon_ib *ib, *n; + + list_for_each_entry_safe(ib, n, &rdev->ib_pool.bogus_ib, list) { + list_del(&ib->list); + vfree(ib->ptr); + kfree(ib); + } +} + +void radeon_ib_bogus_add(struct radeon_device *rdev, struct radeon_ib *ib) +{ + struct radeon_ib *bib; + + bib = kmalloc(sizeof(*bib), GFP_KERNEL); + if (bib == NULL) + return; + bib->ptr = vmalloc(ib->length_dw * 4); + if (bib->ptr == NULL) { + kfree(bib); + return; + } + memcpy(bib->ptr, ib->ptr, ib->length_dw * 4); + bib->length_dw = ib->length_dw; + mutex_lock(&rdev->ib_pool.mutex); + list_add_tail(&bib->list, &rdev->ib_pool.bogus_ib); + mutex_unlock(&rdev->ib_pool.mutex); +} + /* * IB. */ @@ -41,58 +72,55 @@ int radeon_ib_get(struct radeon_device *rdev, struct radeon_ib **ib) { struct radeon_fence *fence; struct radeon_ib *nib; - unsigned long i; - int r = 0; + int r = 0, i, c; *ib = NULL; r = radeon_fence_create(rdev, &fence); if (r) { - DRM_ERROR("failed to create fence for new IB\n"); + dev_err(rdev->dev, "failed to create fence for new IB\n"); return r; } mutex_lock(&rdev->ib_pool.mutex); - i = find_first_zero_bit(rdev->ib_pool.alloc_bm, RADEON_IB_POOL_SIZE); - if (i < RADEON_IB_POOL_SIZE) { - set_bit(i, rdev->ib_pool.alloc_bm); - rdev->ib_pool.ibs[i].length_dw = 0; - *ib = &rdev->ib_pool.ibs[i]; - goto out; - } - if (list_empty(&rdev->ib_pool.scheduled_ibs)) { - /* we go do nothings here */ - DRM_ERROR("all IB allocated none scheduled.\n"); - r = -EINVAL; - goto out; + for (i = rdev->ib_pool.head_id, c = 0, nib = NULL; c < RADEON_IB_POOL_SIZE; c++, i++) { + i &= (RADEON_IB_POOL_SIZE - 1); + if (rdev->ib_pool.ibs[i].free) { + nib = &rdev->ib_pool.ibs[i]; + break; + } } - /* get the first ib on the scheduled list */ - nib = list_entry(rdev->ib_pool.scheduled_ibs.next, - struct radeon_ib, list); - if (nib->fence == NULL) { - /* we go do nothings here */ - DRM_ERROR("IB %lu scheduled without a fence.\n", nib->idx); - r = -EINVAL; - goto out; + if (nib == NULL) { + /* This should never happen, it means we allocated all + * IB and haven't scheduled one yet, return EBUSY to + * userspace hoping that on ioctl recall we get better + * luck + */ + dev_err(rdev->dev, "no free indirect buffer !\n"); + mutex_unlock(&rdev->ib_pool.mutex); + radeon_fence_unref(&fence); + return -EBUSY; } - r = radeon_fence_wait(nib->fence, false); - if (r) { - DRM_ERROR("radeon: IB(%lu:0x%016lX:%u)\n", nib->idx, - (unsigned long)nib->gpu_addr, nib->length_dw); - DRM_ERROR("radeon: GPU lockup detected, fail to get a IB\n"); - goto out; + rdev->ib_pool.head_id = (nib->idx + 1) & (RADEON_IB_POOL_SIZE - 1); + nib->free = false; + if (nib->fence) { + mutex_unlock(&rdev->ib_pool.mutex); + r = radeon_fence_wait(nib->fence, false); + if (r) { + dev_err(rdev->dev, "error waiting fence of IB(%u:0x%016lX:%u)\n", + nib->idx, (unsigned long)nib->gpu_addr, nib->length_dw); + mutex_lock(&rdev->ib_pool.mutex); + nib->free = true; + mutex_unlock(&rdev->ib_pool.mutex); + radeon_fence_unref(&fence); + return r; + } + mutex_lock(&rdev->ib_pool.mutex); } radeon_fence_unref(&nib->fence); + nib->fence = fence; nib->length_dw = 0; - list_del(&nib->list); - INIT_LIST_HEAD(&nib->list); - *ib = nib; -out: mutex_unlock(&rdev->ib_pool.mutex); - if (r) { - radeon_fence_unref(&fence); - } else { - (*ib)->fence = fence; - } - return r; + *ib = nib; + return 0; } void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib) @@ -103,55 +131,36 @@ void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib) if (tmp == NULL) { return; } - mutex_lock(&rdev->ib_pool.mutex); - if (!list_empty(&tmp->list) && !radeon_fence_signaled(tmp->fence)) { - /* IB is scheduled & not signaled don't do anythings */ - mutex_unlock(&rdev->ib_pool.mutex); - return; - } - list_del(&tmp->list); - INIT_LIST_HEAD(&tmp->list); - if (tmp->fence) { + if (!tmp->fence->emited) radeon_fence_unref(&tmp->fence); - } - tmp->length_dw = 0; - clear_bit(tmp->idx, rdev->ib_pool.alloc_bm); + mutex_lock(&rdev->ib_pool.mutex); + tmp->free = true; mutex_unlock(&rdev->ib_pool.mutex); } -static void radeon_ib_align(struct radeon_device *rdev, struct radeon_ib *ib) -{ - while ((ib->length_dw & rdev->cp.align_mask)) { - ib->ptr[ib->length_dw++] = PACKET2(0); - } -} - int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib) { int r = 0; - mutex_lock(&rdev->ib_pool.mutex); - radeon_ib_align(rdev, ib); if (!ib->length_dw || !rdev->cp.ready) { /* TODO: Nothings in the ib we should report. */ - mutex_unlock(&rdev->ib_pool.mutex); - DRM_ERROR("radeon: couldn't schedule IB(%lu).\n", ib->idx); + DRM_ERROR("radeon: couldn't schedule IB(%u).\n", ib->idx); return -EINVAL; } + /* 64 dwords should be enough for fence too */ r = radeon_ring_lock(rdev, 64); if (r) { DRM_ERROR("radeon: scheduling IB failled (%d).\n", r); - mutex_unlock(&rdev->ib_pool.mutex); return r; } - radeon_ring_write(rdev, PACKET0(RADEON_CP_IB_BASE, 1)); - radeon_ring_write(rdev, ib->gpu_addr); - radeon_ring_write(rdev, ib->length_dw); + radeon_ring_ib_execute(rdev, ib); radeon_fence_emit(rdev, ib->fence); - radeon_ring_unlock_commit(rdev); - list_add_tail(&ib->list, &rdev->ib_pool.scheduled_ibs); + mutex_lock(&rdev->ib_pool.mutex); + /* once scheduled IB is considered free and protected by the fence */ + ib->free = true; mutex_unlock(&rdev->ib_pool.mutex); + radeon_ring_unlock_commit(rdev); return 0; } @@ -162,21 +171,28 @@ int radeon_ib_pool_init(struct radeon_device *rdev) int i; int r = 0; + if (rdev->ib_pool.robj) + return 0; + INIT_LIST_HEAD(&rdev->ib_pool.bogus_ib); /* Allocate 1M object buffer */ - INIT_LIST_HEAD(&rdev->ib_pool.scheduled_ibs); - r = radeon_object_create(rdev, NULL, RADEON_IB_POOL_SIZE*64*1024, - true, RADEON_GEM_DOMAIN_GTT, - false, &rdev->ib_pool.robj); + r = radeon_bo_create(rdev, NULL, RADEON_IB_POOL_SIZE*64*1024, + true, RADEON_GEM_DOMAIN_GTT, + &rdev->ib_pool.robj); if (r) { DRM_ERROR("radeon: failed to ib pool (%d).\n", r); return r; } - r = radeon_object_pin(rdev->ib_pool.robj, RADEON_GEM_DOMAIN_GTT, &gpu_addr); + r = radeon_bo_reserve(rdev->ib_pool.robj, false); + if (unlikely(r != 0)) + return r; + r = radeon_bo_pin(rdev->ib_pool.robj, RADEON_GEM_DOMAIN_GTT, &gpu_addr); if (r) { + radeon_bo_unreserve(rdev->ib_pool.robj); DRM_ERROR("radeon: failed to pin ib pool (%d).\n", r); return r; } - r = radeon_object_kmap(rdev->ib_pool.robj, &ptr); + r = radeon_bo_kmap(rdev->ib_pool.robj, &ptr); + radeon_bo_unreserve(rdev->ib_pool.robj); if (r) { DRM_ERROR("radeon: failed to map ib poll (%d).\n", r); return r; @@ -189,9 +205,9 @@ int radeon_ib_pool_init(struct radeon_device *rdev) rdev->ib_pool.ibs[i].ptr = ptr + offset; rdev->ib_pool.ibs[i].idx = i; rdev->ib_pool.ibs[i].length_dw = 0; - INIT_LIST_HEAD(&rdev->ib_pool.ibs[i].list); + rdev->ib_pool.ibs[i].free = true; } - bitmap_zero(rdev->ib_pool.alloc_bm, RADEON_IB_POOL_SIZE); + rdev->ib_pool.head_id = 0; rdev->ib_pool.ready = true; DRM_INFO("radeon: ib pool ready.\n"); if (radeon_debugfs_ib_init(rdev)) { @@ -202,73 +218,27 @@ int radeon_ib_pool_init(struct radeon_device *rdev) void radeon_ib_pool_fini(struct radeon_device *rdev) { + int r; + struct radeon_bo *robj; + if (!rdev->ib_pool.ready) { return; } mutex_lock(&rdev->ib_pool.mutex); - bitmap_zero(rdev->ib_pool.alloc_bm, RADEON_IB_POOL_SIZE); - if (rdev->ib_pool.robj) { - radeon_object_kunmap(rdev->ib_pool.robj); - radeon_object_unref(&rdev->ib_pool.robj); - rdev->ib_pool.robj = NULL; - } + radeon_ib_bogus_cleanup(rdev); + robj = rdev->ib_pool.robj; + rdev->ib_pool.robj = NULL; mutex_unlock(&rdev->ib_pool.mutex); -} -int radeon_ib_test(struct radeon_device *rdev) -{ - struct radeon_ib *ib; - uint32_t scratch; - uint32_t tmp = 0; - unsigned i; - int r; - - r = radeon_scratch_get(rdev, &scratch); - if (r) { - DRM_ERROR("radeon: failed to get scratch reg (%d).\n", r); - return r; - } - WREG32(scratch, 0xCAFEDEAD); - r = radeon_ib_get(rdev, &ib); - if (r) { - return r; - } - ib->ptr[0] = PACKET0(scratch, 0); - ib->ptr[1] = 0xDEADBEEF; - ib->ptr[2] = PACKET2(0); - ib->ptr[3] = PACKET2(0); - ib->ptr[4] = PACKET2(0); - ib->ptr[5] = PACKET2(0); - ib->ptr[6] = PACKET2(0); - ib->ptr[7] = PACKET2(0); - ib->length_dw = 8; - r = radeon_ib_schedule(rdev, ib); - if (r) { - radeon_scratch_free(rdev, scratch); - radeon_ib_free(rdev, &ib); - return r; - } - r = radeon_fence_wait(ib->fence, false); - if (r) { - return r; - } - for (i = 0; i < rdev->usec_timeout; i++) { - tmp = RREG32(scratch); - if (tmp == 0xDEADBEEF) { - break; + if (robj) { + r = radeon_bo_reserve(robj, false); + if (likely(r == 0)) { + radeon_bo_kunmap(robj); + radeon_bo_unpin(robj); + radeon_bo_unreserve(robj); } - DRM_UDELAY(1); + radeon_bo_unref(&robj); } - if (i < rdev->usec_timeout) { - DRM_INFO("ib test succeeded in %u usecs\n", i); - } else { - DRM_ERROR("radeon: ib test failed (sracth(0x%04X)=0x%08X)\n", - scratch, tmp); - r = -EINVAL; - } - radeon_scratch_free(rdev, scratch); - radeon_ib_free(rdev, &ib); - return r; } @@ -277,7 +247,10 @@ int radeon_ib_test(struct radeon_device *rdev) */ void radeon_ring_free_size(struct radeon_device *rdev) { - rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR); + if (rdev->family >= CHIP_R600) + rdev->cp.rptr = RREG32(R600_CP_RB_RPTR); + else + rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR); /* This works because ring_size is a power of 2 */ rdev->cp.ring_free_dw = (rdev->cp.rptr + (rdev->cp.ring_size / 4)); rdev->cp.ring_free_dw -= rdev->cp.wptr; @@ -287,31 +260,41 @@ void radeon_ring_free_size(struct radeon_device *rdev) } } -int radeon_ring_lock(struct radeon_device *rdev, unsigned ndw) +int radeon_ring_alloc(struct radeon_device *rdev, unsigned ndw) { int r; /* Align requested size with padding so unlock_commit can * pad safely */ ndw = (ndw + rdev->cp.align_mask) & ~rdev->cp.align_mask; - mutex_lock(&rdev->cp.mutex); while (ndw > (rdev->cp.ring_free_dw - 1)) { radeon_ring_free_size(rdev); if (ndw < rdev->cp.ring_free_dw) { break; } r = radeon_fence_wait_next(rdev); - if (r) { - mutex_unlock(&rdev->cp.mutex); + if (r) return r; - } } rdev->cp.count_dw = ndw; rdev->cp.wptr_old = rdev->cp.wptr; return 0; } -void radeon_ring_unlock_commit(struct radeon_device *rdev) +int radeon_ring_lock(struct radeon_device *rdev, unsigned ndw) +{ + int r; + + mutex_lock(&rdev->cp.mutex); + r = radeon_ring_alloc(rdev, ndw); + if (r) { + mutex_unlock(&rdev->cp.mutex); + return r; + } + return 0; +} + +void radeon_ring_commit(struct radeon_device *rdev) { unsigned count_dw_pad; unsigned i; @@ -320,58 +303,22 @@ void radeon_ring_unlock_commit(struct radeon_device *rdev) count_dw_pad = (rdev->cp.align_mask + 1) - (rdev->cp.wptr & rdev->cp.align_mask); for (i = 0; i < count_dw_pad; i++) { - radeon_ring_write(rdev, PACKET2(0)); + radeon_ring_write(rdev, 2 << 30); } DRM_MEMORYBARRIER(); - WREG32(RADEON_CP_RB_WPTR, rdev->cp.wptr); - (void)RREG32(RADEON_CP_RB_WPTR); - mutex_unlock(&rdev->cp.mutex); + radeon_cp_commit(rdev); } -void radeon_ring_unlock_undo(struct radeon_device *rdev) +void radeon_ring_unlock_commit(struct radeon_device *rdev) { - rdev->cp.wptr = rdev->cp.wptr_old; + radeon_ring_commit(rdev); mutex_unlock(&rdev->cp.mutex); } -int radeon_ring_test(struct radeon_device *rdev) +void radeon_ring_unlock_undo(struct radeon_device *rdev) { - uint32_t scratch; - uint32_t tmp = 0; - unsigned i; - int r; - - r = radeon_scratch_get(rdev, &scratch); - if (r) { - DRM_ERROR("radeon: cp failed to get scratch reg (%d).\n", r); - return r; - } - WREG32(scratch, 0xCAFEDEAD); - r = radeon_ring_lock(rdev, 2); - if (r) { - DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r); - radeon_scratch_free(rdev, scratch); - return r; - } - radeon_ring_write(rdev, PACKET0(scratch, 0)); - radeon_ring_write(rdev, 0xDEADBEEF); - radeon_ring_unlock_commit(rdev); - for (i = 0; i < rdev->usec_timeout; i++) { - tmp = RREG32(scratch); - if (tmp == 0xDEADBEEF) { - break; - } - DRM_UDELAY(1); - } - if (i < rdev->usec_timeout) { - DRM_INFO("ring test succeeded in %d usecs\n", i); - } else { - DRM_ERROR("radeon: ring test failed (sracth(0x%04X)=0x%08X)\n", - scratch, tmp); - r = -EINVAL; - } - radeon_scratch_free(rdev, scratch); - return r; + rdev->cp.wptr = rdev->cp.wptr_old; + mutex_unlock(&rdev->cp.mutex); } int radeon_ring_init(struct radeon_device *rdev, unsigned ring_size) @@ -381,29 +328,28 @@ int radeon_ring_init(struct radeon_device *rdev, unsigned ring_size) rdev->cp.ring_size = ring_size; /* Allocate ring buffer */ if (rdev->cp.ring_obj == NULL) { - r = radeon_object_create(rdev, NULL, rdev->cp.ring_size, - true, - RADEON_GEM_DOMAIN_GTT, - false, - &rdev->cp.ring_obj); + r = radeon_bo_create(rdev, NULL, rdev->cp.ring_size, true, + RADEON_GEM_DOMAIN_GTT, + &rdev->cp.ring_obj); if (r) { - DRM_ERROR("radeon: failed to create ring buffer (%d).\n", r); - mutex_unlock(&rdev->cp.mutex); + dev_err(rdev->dev, "(%d) ring create failed\n", r); return r; } - r = radeon_object_pin(rdev->cp.ring_obj, - RADEON_GEM_DOMAIN_GTT, - &rdev->cp.gpu_addr); + r = radeon_bo_reserve(rdev->cp.ring_obj, false); + if (unlikely(r != 0)) + return r; + r = radeon_bo_pin(rdev->cp.ring_obj, RADEON_GEM_DOMAIN_GTT, + &rdev->cp.gpu_addr); if (r) { - DRM_ERROR("radeon: failed to pin ring buffer (%d).\n", r); - mutex_unlock(&rdev->cp.mutex); + radeon_bo_unreserve(rdev->cp.ring_obj); + dev_err(rdev->dev, "(%d) ring pin failed\n", r); return r; } - r = radeon_object_kmap(rdev->cp.ring_obj, + r = radeon_bo_kmap(rdev->cp.ring_obj, (void **)&rdev->cp.ring); + radeon_bo_unreserve(rdev->cp.ring_obj); if (r) { - DRM_ERROR("radeon: failed to map ring buffer (%d).\n", r); - mutex_unlock(&rdev->cp.mutex); + dev_err(rdev->dev, "(%d) ring map failed\n", r); return r; } } @@ -414,15 +360,24 @@ int radeon_ring_init(struct radeon_device *rdev, unsigned ring_size) void radeon_ring_fini(struct radeon_device *rdev) { + int r; + struct radeon_bo *ring_obj; + mutex_lock(&rdev->cp.mutex); - if (rdev->cp.ring_obj) { - radeon_object_kunmap(rdev->cp.ring_obj); - radeon_object_unpin(rdev->cp.ring_obj); - radeon_object_unref(&rdev->cp.ring_obj); - rdev->cp.ring = NULL; - rdev->cp.ring_obj = NULL; - } + ring_obj = rdev->cp.ring_obj; + rdev->cp.ring = NULL; + rdev->cp.ring_obj = NULL; mutex_unlock(&rdev->cp.mutex); + + if (ring_obj) { + r = radeon_bo_reserve(ring_obj, false); + if (likely(r == 0)) { + radeon_bo_kunmap(ring_obj); + radeon_bo_unpin(ring_obj); + radeon_bo_unreserve(ring_obj); + } + radeon_bo_unref(&ring_obj); + } } @@ -439,7 +394,7 @@ static int radeon_debugfs_ib_info(struct seq_file *m, void *data) if (ib == NULL) { return 0; } - seq_printf(m, "IB %04lu\n", ib->idx); + seq_printf(m, "IB %04u\n", ib->idx); seq_printf(m, "IB fence %p\n", ib->fence); seq_printf(m, "IB size %05u dwords\n", ib->length_dw); for (i = 0; i < ib->length_dw; i++) { @@ -448,15 +403,49 @@ static int radeon_debugfs_ib_info(struct seq_file *m, void *data) return 0; } +static int radeon_debugfs_ib_bogus_info(struct seq_file *m, void *data) +{ + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct radeon_device *rdev = node->info_ent->data; + struct radeon_ib *ib; + unsigned i; + + mutex_lock(&rdev->ib_pool.mutex); + if (list_empty(&rdev->ib_pool.bogus_ib)) { + mutex_unlock(&rdev->ib_pool.mutex); + seq_printf(m, "no bogus IB recorded\n"); + return 0; + } + ib = list_first_entry(&rdev->ib_pool.bogus_ib, struct radeon_ib, list); + list_del_init(&ib->list); + mutex_unlock(&rdev->ib_pool.mutex); + seq_printf(m, "IB size %05u dwords\n", ib->length_dw); + for (i = 0; i < ib->length_dw; i++) { + seq_printf(m, "[%05u]=0x%08X\n", i, ib->ptr[i]); + } + vfree(ib->ptr); + kfree(ib); + return 0; +} + static struct drm_info_list radeon_debugfs_ib_list[RADEON_IB_POOL_SIZE]; static char radeon_debugfs_ib_names[RADEON_IB_POOL_SIZE][32]; + +static struct drm_info_list radeon_debugfs_ib_bogus_info_list[] = { + {"radeon_ib_bogus", radeon_debugfs_ib_bogus_info, 0, NULL}, +}; #endif int radeon_debugfs_ib_init(struct radeon_device *rdev) { #if defined(CONFIG_DEBUG_FS) unsigned i; + int r; + radeon_debugfs_ib_bogus_info_list[0].data = rdev; + r = radeon_debugfs_add_files(rdev, radeon_debugfs_ib_bogus_info_list, 1); + if (r) + return r; for (i = 0; i < RADEON_IB_POOL_SIZE; i++) { sprintf(radeon_debugfs_ib_names[i], "radeon_ib_%04u", i); radeon_debugfs_ib_list[i].name = radeon_debugfs_ib_names[i];