X-Git-Url: http://ftp.safe.ca/?a=blobdiff_plain;f=drivers%2Fgpu%2Fdrm%2Fi915%2Fi915_gem.c;h=4a24c90fb940d0d96e9937588e2437bcf3f51960;hb=8e7d2b2c6ecd3c21a54b877eae3d5be48292e6b5;hp=9ac73dd1b422db554758061d7d8786403bf64b9e;hpb=6dbe2772d6af067845bab57be490c302f4490ac7;p=safe%2Fjmp%2Flinux-2.6 diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 9ac73dd..4a24c90 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -30,51 +30,74 @@ #include "i915_drm.h" #include "i915_drv.h" #include - -static int -i915_gem_object_set_domain(struct drm_gem_object *obj, - uint32_t read_domains, - uint32_t write_domain); -static int -i915_gem_object_set_domain_range(struct drm_gem_object *obj, - uint64_t offset, - uint64_t size, - uint32_t read_domains, - uint32_t write_domain); -static int -i915_gem_set_domain(struct drm_gem_object *obj, - struct drm_file *file_priv, - uint32_t read_domains, - uint32_t write_domain); -static int i915_gem_object_get_page_list(struct drm_gem_object *obj); -static void i915_gem_object_free_page_list(struct drm_gem_object *obj); +#include + +#define I915_GEM_GPU_DOMAINS (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT)) + +static void i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj); +static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj); +static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj); +static int i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, + int write); +static int i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj, + uint64_t offset, + uint64_t size); +static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj); static int i915_gem_object_wait_rendering(struct drm_gem_object *obj); +static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, + unsigned alignment); +static int i915_gem_object_get_fence_reg(struct drm_gem_object *obj, bool write); +static void i915_gem_clear_fence_reg(struct drm_gem_object *obj); +static int i915_gem_evict_something(struct drm_device *dev); +static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj, + struct drm_i915_gem_pwrite *args, + struct drm_file *file_priv); + +int i915_gem_do_init(struct drm_device *dev, unsigned long start, + unsigned long end) +{ + drm_i915_private_t *dev_priv = dev->dev_private; -static void -i915_gem_cleanup_ringbuffer(struct drm_device *dev); + if (start >= end || + (start & (PAGE_SIZE - 1)) != 0 || + (end & (PAGE_SIZE - 1)) != 0) { + return -EINVAL; + } + + drm_mm_init(&dev_priv->mm.gtt_space, start, + end - start); + + dev->gtt_total = (uint32_t) (end - start); + + return 0; +} int i915_gem_init_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { - drm_i915_private_t *dev_priv = dev->dev_private; struct drm_i915_gem_init *args = data; + int ret; mutex_lock(&dev->struct_mutex); + ret = i915_gem_do_init(dev, args->gtt_start, args->gtt_end); + mutex_unlock(&dev->struct_mutex); - if (args->gtt_start >= args->gtt_end || - (args->gtt_start & (PAGE_SIZE - 1)) != 0 || - (args->gtt_end & (PAGE_SIZE - 1)) != 0) { - mutex_unlock(&dev->struct_mutex); - return -EINVAL; - } + return ret; +} - drm_mm_init(&dev_priv->mm.gtt_space, args->gtt_start, - args->gtt_end - args->gtt_start); +int +i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_i915_gem_get_aperture *args = data; - dev->gtt_total = (uint32_t) (args->gtt_end - args->gtt_start); + if (!(dev->driver->driver_features & DRIVER_GEM)) + return -ENODEV; - mutex_unlock(&dev->struct_mutex); + args->aper_size = dev->gtt_total; + args->aper_available_size = (args->aper_size - + atomic_read(&dev->pin_memory)); return 0; } @@ -111,6 +134,306 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data, return 0; } +static inline int +fast_shmem_read(struct page **pages, + loff_t page_base, int page_offset, + char __user *data, + int length) +{ + char __iomem *vaddr; + int unwritten; + + vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0); + if (vaddr == NULL) + return -ENOMEM; + unwritten = __copy_to_user_inatomic(data, vaddr + page_offset, length); + kunmap_atomic(vaddr, KM_USER0); + + if (unwritten) + return -EFAULT; + + return 0; +} + +static int i915_gem_object_needs_bit17_swizzle(struct drm_gem_object *obj) +{ + drm_i915_private_t *dev_priv = obj->dev->dev_private; + struct drm_i915_gem_object *obj_priv = obj->driver_private; + + return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 && + obj_priv->tiling_mode != I915_TILING_NONE; +} + +static inline int +slow_shmem_copy(struct page *dst_page, + int dst_offset, + struct page *src_page, + int src_offset, + int length) +{ + char *dst_vaddr, *src_vaddr; + + dst_vaddr = kmap_atomic(dst_page, KM_USER0); + if (dst_vaddr == NULL) + return -ENOMEM; + + src_vaddr = kmap_atomic(src_page, KM_USER1); + if (src_vaddr == NULL) { + kunmap_atomic(dst_vaddr, KM_USER0); + return -ENOMEM; + } + + memcpy(dst_vaddr + dst_offset, src_vaddr + src_offset, length); + + kunmap_atomic(src_vaddr, KM_USER1); + kunmap_atomic(dst_vaddr, KM_USER0); + + return 0; +} + +static inline int +slow_shmem_bit17_copy(struct page *gpu_page, + int gpu_offset, + struct page *cpu_page, + int cpu_offset, + int length, + int is_read) +{ + char *gpu_vaddr, *cpu_vaddr; + + /* Use the unswizzled path if this page isn't affected. */ + if ((page_to_phys(gpu_page) & (1 << 17)) == 0) { + if (is_read) + return slow_shmem_copy(cpu_page, cpu_offset, + gpu_page, gpu_offset, length); + else + return slow_shmem_copy(gpu_page, gpu_offset, + cpu_page, cpu_offset, length); + } + + gpu_vaddr = kmap_atomic(gpu_page, KM_USER0); + if (gpu_vaddr == NULL) + return -ENOMEM; + + cpu_vaddr = kmap_atomic(cpu_page, KM_USER1); + if (cpu_vaddr == NULL) { + kunmap_atomic(gpu_vaddr, KM_USER0); + return -ENOMEM; + } + + /* Copy the data, XORing A6 with A17 (1). The user already knows he's + * XORing with the other bits (A9 for Y, A9 and A10 for X) + */ + while (length > 0) { + int cacheline_end = ALIGN(gpu_offset + 1, 64); + int this_length = min(cacheline_end - gpu_offset, length); + int swizzled_gpu_offset = gpu_offset ^ 64; + + if (is_read) { + memcpy(cpu_vaddr + cpu_offset, + gpu_vaddr + swizzled_gpu_offset, + this_length); + } else { + memcpy(gpu_vaddr + swizzled_gpu_offset, + cpu_vaddr + cpu_offset, + this_length); + } + cpu_offset += this_length; + gpu_offset += this_length; + length -= this_length; + } + + kunmap_atomic(cpu_vaddr, KM_USER1); + kunmap_atomic(gpu_vaddr, KM_USER0); + + return 0; +} + +/** + * This is the fast shmem pread path, which attempts to copy_from_user directly + * from the backing pages of the object to the user's address space. On a + * fault, it fails so we can fall back to i915_gem_shmem_pwrite_slow(). + */ +static int +i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj, + struct drm_i915_gem_pread *args, + struct drm_file *file_priv) +{ + struct drm_i915_gem_object *obj_priv = obj->driver_private; + ssize_t remain; + loff_t offset, page_base; + char __user *user_data; + int page_offset, page_length; + int ret; + + user_data = (char __user *) (uintptr_t) args->data_ptr; + remain = args->size; + + mutex_lock(&dev->struct_mutex); + + ret = i915_gem_object_get_pages(obj); + if (ret != 0) + goto fail_unlock; + + ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset, + args->size); + if (ret != 0) + goto fail_put_pages; + + obj_priv = obj->driver_private; + offset = args->offset; + + while (remain > 0) { + /* Operation in this page + * + * page_base = page offset within aperture + * page_offset = offset within page + * page_length = bytes to copy for this page + */ + page_base = (offset & ~(PAGE_SIZE-1)); + page_offset = offset & (PAGE_SIZE-1); + page_length = remain; + if ((page_offset + remain) > PAGE_SIZE) + page_length = PAGE_SIZE - page_offset; + + ret = fast_shmem_read(obj_priv->pages, + page_base, page_offset, + user_data, page_length); + if (ret) + goto fail_put_pages; + + remain -= page_length; + user_data += page_length; + offset += page_length; + } + +fail_put_pages: + i915_gem_object_put_pages(obj); +fail_unlock: + mutex_unlock(&dev->struct_mutex); + + return ret; +} + +/** + * This is the fallback shmem pread path, which allocates temporary storage + * in kernel space to copy_to_user into outside of the struct_mutex, so we + * can copy out of the object's backing pages while holding the struct mutex + * and not take page faults. + */ +static int +i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj, + struct drm_i915_gem_pread *args, + struct drm_file *file_priv) +{ + struct drm_i915_gem_object *obj_priv = obj->driver_private; + struct mm_struct *mm = current->mm; + struct page **user_pages; + ssize_t remain; + loff_t offset, pinned_pages, i; + loff_t first_data_page, last_data_page, num_pages; + int shmem_page_index, shmem_page_offset; + int data_page_index, data_page_offset; + int page_length; + int ret; + uint64_t data_ptr = args->data_ptr; + int do_bit17_swizzling; + + remain = args->size; + + /* Pin the user pages containing the data. We can't fault while + * holding the struct mutex, yet we want to hold it while + * dereferencing the user data. + */ + first_data_page = data_ptr / PAGE_SIZE; + last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE; + num_pages = last_data_page - first_data_page + 1; + + user_pages = drm_calloc_large(num_pages, sizeof(struct page *)); + if (user_pages == NULL) + return -ENOMEM; + + down_read(&mm->mmap_sem); + pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr, + num_pages, 1, 0, user_pages, NULL); + up_read(&mm->mmap_sem); + if (pinned_pages < num_pages) { + ret = -EFAULT; + goto fail_put_user_pages; + } + + do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj); + + mutex_lock(&dev->struct_mutex); + + ret = i915_gem_object_get_pages(obj); + if (ret != 0) + goto fail_unlock; + + ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset, + args->size); + if (ret != 0) + goto fail_put_pages; + + obj_priv = obj->driver_private; + offset = args->offset; + + while (remain > 0) { + /* Operation in this page + * + * shmem_page_index = page number within shmem file + * shmem_page_offset = offset within page in shmem file + * data_page_index = page number in get_user_pages return + * data_page_offset = offset with data_page_index page. + * page_length = bytes to copy for this page + */ + shmem_page_index = offset / PAGE_SIZE; + shmem_page_offset = offset & ~PAGE_MASK; + data_page_index = data_ptr / PAGE_SIZE - first_data_page; + data_page_offset = data_ptr & ~PAGE_MASK; + + page_length = remain; + if ((shmem_page_offset + page_length) > PAGE_SIZE) + page_length = PAGE_SIZE - shmem_page_offset; + if ((data_page_offset + page_length) > PAGE_SIZE) + page_length = PAGE_SIZE - data_page_offset; + + if (do_bit17_swizzling) { + ret = slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index], + shmem_page_offset, + user_pages[data_page_index], + data_page_offset, + page_length, + 1); + } else { + ret = slow_shmem_copy(user_pages[data_page_index], + data_page_offset, + obj_priv->pages[shmem_page_index], + shmem_page_offset, + page_length); + } + if (ret) + goto fail_put_pages; + + remain -= page_length; + data_ptr += page_length; + offset += page_length; + } + +fail_put_pages: + i915_gem_object_put_pages(obj); +fail_unlock: + mutex_unlock(&dev->struct_mutex); +fail_put_user_pages: + for (i = 0; i < pinned_pages; i++) { + SetPageDirty(user_pages[i]); + page_cache_release(user_pages[i]); + } + drm_free_large(user_pages); + + return ret; +} + /** * Reads data from the object referenced by handle. * @@ -123,8 +446,6 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data, struct drm_i915_gem_pread *args = data; struct drm_gem_object *obj; struct drm_i915_gem_object *obj_priv; - ssize_t read; - loff_t offset; int ret; obj = drm_gem_object_lookup(dev, file_priv, args->handle); @@ -142,50 +463,103 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data, return -EINVAL; } - mutex_lock(&dev->struct_mutex); - - ret = i915_gem_object_set_domain_range(obj, args->offset, args->size, - I915_GEM_DOMAIN_CPU, 0); - if (ret != 0) { - drm_gem_object_unreference(obj); - mutex_unlock(&dev->struct_mutex); - return ret; + if (i915_gem_object_needs_bit17_swizzle(obj)) { + ret = i915_gem_shmem_pread_slow(dev, obj, args, file_priv); + } else { + ret = i915_gem_shmem_pread_fast(dev, obj, args, file_priv); + if (ret != 0) + ret = i915_gem_shmem_pread_slow(dev, obj, args, + file_priv); } - offset = args->offset; + drm_gem_object_unreference(obj); - read = vfs_read(obj->filp, (char __user *)(uintptr_t)args->data_ptr, - args->size, &offset); - if (read != args->size) { - drm_gem_object_unreference(obj); - mutex_unlock(&dev->struct_mutex); - if (read < 0) - return read; - else - return -EINVAL; - } + return ret; +} - drm_gem_object_unreference(obj); - mutex_unlock(&dev->struct_mutex); +/* This is the fast write path which cannot handle + * page faults in the source data + */ + +static inline int +fast_user_write(struct io_mapping *mapping, + loff_t page_base, int page_offset, + char __user *user_data, + int length) +{ + char *vaddr_atomic; + unsigned long unwritten; + vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base); + unwritten = __copy_from_user_inatomic_nocache(vaddr_atomic + page_offset, + user_data, length); + io_mapping_unmap_atomic(vaddr_atomic); + if (unwritten) + return -EFAULT; + return 0; +} + +/* Here's the write path which can sleep for + * page faults + */ + +static inline int +slow_kernel_write(struct io_mapping *mapping, + loff_t gtt_base, int gtt_offset, + struct page *user_page, int user_offset, + int length) +{ + char *src_vaddr, *dst_vaddr; + unsigned long unwritten; + + dst_vaddr = io_mapping_map_atomic_wc(mapping, gtt_base); + src_vaddr = kmap_atomic(user_page, KM_USER1); + unwritten = __copy_from_user_inatomic_nocache(dst_vaddr + gtt_offset, + src_vaddr + user_offset, + length); + kunmap_atomic(src_vaddr, KM_USER1); + io_mapping_unmap_atomic(dst_vaddr); + if (unwritten) + return -EFAULT; return 0; } +static inline int +fast_shmem_write(struct page **pages, + loff_t page_base, int page_offset, + char __user *data, + int length) +{ + char __iomem *vaddr; + unsigned long unwritten; + + vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0); + if (vaddr == NULL) + return -ENOMEM; + unwritten = __copy_from_user_inatomic(vaddr + page_offset, data, length); + kunmap_atomic(vaddr, KM_USER0); + + if (unwritten) + return -EFAULT; + return 0; +} + +/** + * This is the fast pwrite path, where we copy the data directly from the + * user into the GTT, uncached. + */ static int -i915_gem_gtt_pwrite(struct drm_device *dev, struct drm_gem_object *obj, - struct drm_i915_gem_pwrite *args, - struct drm_file *file_priv) +i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj, + struct drm_i915_gem_pwrite *args, + struct drm_file *file_priv) { struct drm_i915_gem_object *obj_priv = obj->driver_private; + drm_i915_private_t *dev_priv = dev->dev_private; ssize_t remain; - loff_t offset; + loff_t offset, page_base; char __user *user_data; - char __iomem *vaddr; - char *vaddr_atomic; - int i, o, l; - int ret = 0; - unsigned long pfn; - unsigned long unwritten; + int page_offset, page_length; + int ret; user_data = (char __user *) (uintptr_t) args->data_ptr; remain = args->size; @@ -199,79 +573,40 @@ i915_gem_gtt_pwrite(struct drm_device *dev, struct drm_gem_object *obj, mutex_unlock(&dev->struct_mutex); return ret; } - ret = i915_gem_set_domain(obj, file_priv, - I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT); + ret = i915_gem_object_set_to_gtt_domain(obj, 1); if (ret) goto fail; obj_priv = obj->driver_private; offset = obj_priv->gtt_offset + args->offset; - obj_priv->dirty = 1; while (remain > 0) { /* Operation in this page * - * i = page number - * o = offset within page - * l = bytes to copy + * page_base = page offset within aperture + * page_offset = offset within page + * page_length = bytes to copy for this page */ - i = offset >> PAGE_SHIFT; - o = offset & (PAGE_SIZE-1); - l = remain; - if ((o + l) > PAGE_SIZE) - l = PAGE_SIZE - o; - - pfn = (dev->agp->base >> PAGE_SHIFT) + i; - -#ifdef CONFIG_HIGHMEM - /* This is a workaround for the low performance of iounmap - * (approximate 10% cpu cost on normal 3D workloads). - * kmap_atomic on HIGHMEM kernels happens to let us map card - * memory without taking IPIs. When the vmap rework lands - * we should be able to dump this hack. + page_base = (offset & ~(PAGE_SIZE-1)); + page_offset = offset & (PAGE_SIZE-1); + page_length = remain; + if ((page_offset + remain) > PAGE_SIZE) + page_length = PAGE_SIZE - page_offset; + + ret = fast_user_write (dev_priv->mm.gtt_mapping, page_base, + page_offset, user_data, page_length); + + /* If we get a fault while copying data, then (presumably) our + * source page isn't available. Return the error and we'll + * retry in the slow path. */ - vaddr_atomic = kmap_atomic_pfn(pfn, KM_USER0); -#if WATCH_PWRITE - DRM_INFO("pwrite i %d o %d l %d pfn %ld vaddr %p\n", - i, o, l, pfn, vaddr_atomic); -#endif - unwritten = __copy_from_user_inatomic_nocache(vaddr_atomic + o, - user_data, l); - kunmap_atomic(vaddr_atomic, KM_USER0); - - if (unwritten) -#endif /* CONFIG_HIGHMEM */ - { - vaddr = ioremap_wc(pfn << PAGE_SHIFT, PAGE_SIZE); -#if WATCH_PWRITE - DRM_INFO("pwrite slow i %d o %d l %d " - "pfn %ld vaddr %p\n", - i, o, l, pfn, vaddr); -#endif - if (vaddr == NULL) { - ret = -EFAULT; - goto fail; - } - unwritten = __copy_from_user(vaddr + o, user_data, l); -#if WATCH_PWRITE - DRM_INFO("unwritten %ld\n", unwritten); -#endif - iounmap(vaddr); - if (unwritten) { - ret = -EFAULT; - goto fail; - } - } + if (ret) + goto fail; - remain -= l; - user_data += l; - offset += l; + remain -= page_length; + user_data += page_length; + offset += page_length; } -#if WATCH_PWRITE && 1 - i915_gem_clflush_object(obj); - i915_gem_dump_object(obj, args->offset + args->size, __func__, ~0); - i915_gem_clflush_object(obj); -#endif fail: i915_gem_object_unpin(obj); @@ -280,40 +615,296 @@ fail: return ret; } +/** + * This is the fallback GTT pwrite path, which uses get_user_pages to pin + * the memory and maps it using kmap_atomic for copying. + * + * This code resulted in x11perf -rgb10text consuming about 10% more CPU + * than using i915_gem_gtt_pwrite_fast on a G45 (32-bit). + */ static int -i915_gem_shmem_pwrite(struct drm_device *dev, struct drm_gem_object *obj, - struct drm_i915_gem_pwrite *args, - struct drm_file *file_priv) +i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj, + struct drm_i915_gem_pwrite *args, + struct drm_file *file_priv) { + struct drm_i915_gem_object *obj_priv = obj->driver_private; + drm_i915_private_t *dev_priv = dev->dev_private; + ssize_t remain; + loff_t gtt_page_base, offset; + loff_t first_data_page, last_data_page, num_pages; + loff_t pinned_pages, i; + struct page **user_pages; + struct mm_struct *mm = current->mm; + int gtt_page_offset, data_page_offset, data_page_index, page_length; int ret; - loff_t offset; - ssize_t written; + uint64_t data_ptr = args->data_ptr; + + remain = args->size; + + /* Pin the user pages containing the data. We can't fault while + * holding the struct mutex, and all of the pwrite implementations + * want to hold it while dereferencing the user data. + */ + first_data_page = data_ptr / PAGE_SIZE; + last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE; + num_pages = last_data_page - first_data_page + 1; + + user_pages = drm_calloc_large(num_pages, sizeof(struct page *)); + if (user_pages == NULL) + return -ENOMEM; + + down_read(&mm->mmap_sem); + pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr, + num_pages, 0, 0, user_pages, NULL); + up_read(&mm->mmap_sem); + if (pinned_pages < num_pages) { + ret = -EFAULT; + goto out_unpin_pages; + } mutex_lock(&dev->struct_mutex); + ret = i915_gem_object_pin(obj, 0); + if (ret) + goto out_unlock; - ret = i915_gem_set_domain(obj, file_priv, - I915_GEM_DOMAIN_CPU, I915_GEM_DOMAIN_CPU); - if (ret) { - mutex_unlock(&dev->struct_mutex); - return ret; + ret = i915_gem_object_set_to_gtt_domain(obj, 1); + if (ret) + goto out_unpin_object; + + obj_priv = obj->driver_private; + offset = obj_priv->gtt_offset + args->offset; + + while (remain > 0) { + /* Operation in this page + * + * gtt_page_base = page offset within aperture + * gtt_page_offset = offset within page in aperture + * data_page_index = page number in get_user_pages return + * data_page_offset = offset with data_page_index page. + * page_length = bytes to copy for this page + */ + gtt_page_base = offset & PAGE_MASK; + gtt_page_offset = offset & ~PAGE_MASK; + data_page_index = data_ptr / PAGE_SIZE - first_data_page; + data_page_offset = data_ptr & ~PAGE_MASK; + + page_length = remain; + if ((gtt_page_offset + page_length) > PAGE_SIZE) + page_length = PAGE_SIZE - gtt_page_offset; + if ((data_page_offset + page_length) > PAGE_SIZE) + page_length = PAGE_SIZE - data_page_offset; + + ret = slow_kernel_write(dev_priv->mm.gtt_mapping, + gtt_page_base, gtt_page_offset, + user_pages[data_page_index], + data_page_offset, + page_length); + + /* If we get a fault while copying data, then (presumably) our + * source page isn't available. Return the error and we'll + * retry in the slow path. + */ + if (ret) + goto out_unpin_object; + + remain -= page_length; + offset += page_length; + data_ptr += page_length; } +out_unpin_object: + i915_gem_object_unpin(obj); +out_unlock: + mutex_unlock(&dev->struct_mutex); +out_unpin_pages: + for (i = 0; i < pinned_pages; i++) + page_cache_release(user_pages[i]); + drm_free_large(user_pages); + + return ret; +} + +/** + * This is the fast shmem pwrite path, which attempts to directly + * copy_from_user into the kmapped pages backing the object. + */ +static int +i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj, + struct drm_i915_gem_pwrite *args, + struct drm_file *file_priv) +{ + struct drm_i915_gem_object *obj_priv = obj->driver_private; + ssize_t remain; + loff_t offset, page_base; + char __user *user_data; + int page_offset, page_length; + int ret; + + user_data = (char __user *) (uintptr_t) args->data_ptr; + remain = args->size; + + mutex_lock(&dev->struct_mutex); + + ret = i915_gem_object_get_pages(obj); + if (ret != 0) + goto fail_unlock; + + ret = i915_gem_object_set_to_cpu_domain(obj, 1); + if (ret != 0) + goto fail_put_pages; + + obj_priv = obj->driver_private; offset = args->offset; + obj_priv->dirty = 1; - written = vfs_write(obj->filp, - (char __user *)(uintptr_t) args->data_ptr, - args->size, &offset); - if (written != args->size) { - mutex_unlock(&dev->struct_mutex); - if (written < 0) - return written; - else - return -EINVAL; + while (remain > 0) { + /* Operation in this page + * + * page_base = page offset within aperture + * page_offset = offset within page + * page_length = bytes to copy for this page + */ + page_base = (offset & ~(PAGE_SIZE-1)); + page_offset = offset & (PAGE_SIZE-1); + page_length = remain; + if ((page_offset + remain) > PAGE_SIZE) + page_length = PAGE_SIZE - page_offset; + + ret = fast_shmem_write(obj_priv->pages, + page_base, page_offset, + user_data, page_length); + if (ret) + goto fail_put_pages; + + remain -= page_length; + user_data += page_length; + offset += page_length; } +fail_put_pages: + i915_gem_object_put_pages(obj); +fail_unlock: mutex_unlock(&dev->struct_mutex); - return 0; + return ret; +} + +/** + * This is the fallback shmem pwrite path, which uses get_user_pages to pin + * the memory and maps it using kmap_atomic for copying. + * + * This avoids taking mmap_sem for faulting on the user's address while the + * struct_mutex is held. + */ +static int +i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj, + struct drm_i915_gem_pwrite *args, + struct drm_file *file_priv) +{ + struct drm_i915_gem_object *obj_priv = obj->driver_private; + struct mm_struct *mm = current->mm; + struct page **user_pages; + ssize_t remain; + loff_t offset, pinned_pages, i; + loff_t first_data_page, last_data_page, num_pages; + int shmem_page_index, shmem_page_offset; + int data_page_index, data_page_offset; + int page_length; + int ret; + uint64_t data_ptr = args->data_ptr; + int do_bit17_swizzling; + + remain = args->size; + + /* Pin the user pages containing the data. We can't fault while + * holding the struct mutex, and all of the pwrite implementations + * want to hold it while dereferencing the user data. + */ + first_data_page = data_ptr / PAGE_SIZE; + last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE; + num_pages = last_data_page - first_data_page + 1; + + user_pages = drm_calloc_large(num_pages, sizeof(struct page *)); + if (user_pages == NULL) + return -ENOMEM; + + down_read(&mm->mmap_sem); + pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr, + num_pages, 0, 0, user_pages, NULL); + up_read(&mm->mmap_sem); + if (pinned_pages < num_pages) { + ret = -EFAULT; + goto fail_put_user_pages; + } + + do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj); + + mutex_lock(&dev->struct_mutex); + + ret = i915_gem_object_get_pages(obj); + if (ret != 0) + goto fail_unlock; + + ret = i915_gem_object_set_to_cpu_domain(obj, 1); + if (ret != 0) + goto fail_put_pages; + + obj_priv = obj->driver_private; + offset = args->offset; + obj_priv->dirty = 1; + + while (remain > 0) { + /* Operation in this page + * + * shmem_page_index = page number within shmem file + * shmem_page_offset = offset within page in shmem file + * data_page_index = page number in get_user_pages return + * data_page_offset = offset with data_page_index page. + * page_length = bytes to copy for this page + */ + shmem_page_index = offset / PAGE_SIZE; + shmem_page_offset = offset & ~PAGE_MASK; + data_page_index = data_ptr / PAGE_SIZE - first_data_page; + data_page_offset = data_ptr & ~PAGE_MASK; + + page_length = remain; + if ((shmem_page_offset + page_length) > PAGE_SIZE) + page_length = PAGE_SIZE - shmem_page_offset; + if ((data_page_offset + page_length) > PAGE_SIZE) + page_length = PAGE_SIZE - data_page_offset; + + if (do_bit17_swizzling) { + ret = slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index], + shmem_page_offset, + user_pages[data_page_index], + data_page_offset, + page_length, + 0); + } else { + ret = slow_shmem_copy(obj_priv->pages[shmem_page_index], + shmem_page_offset, + user_pages[data_page_index], + data_page_offset, + page_length); + } + if (ret) + goto fail_put_pages; + + remain -= page_length; + data_ptr += page_length; + offset += page_length; + } + +fail_put_pages: + i915_gem_object_put_pages(obj); +fail_unlock: + mutex_unlock(&dev->struct_mutex); +fail_put_user_pages: + for (i = 0; i < pinned_pages; i++) + page_cache_release(user_pages[i]); + drm_free_large(user_pages); + + return ret; } /** @@ -351,11 +942,24 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, * pread/pwrite currently are reading and writing from the CPU * perspective, requiring manual detiling by the client. */ - if (obj_priv->tiling_mode == I915_TILING_NONE && - dev->gtt_total != 0) - ret = i915_gem_gtt_pwrite(dev, obj, args, file_priv); - else - ret = i915_gem_shmem_pwrite(dev, obj, args, file_priv); + if (obj_priv->phys_obj) + ret = i915_gem_phys_pwrite(dev, obj, args, file_priv); + else if (obj_priv->tiling_mode == I915_TILING_NONE && + dev->gtt_total != 0) { + ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file_priv); + if (ret == -EFAULT) { + ret = i915_gem_gtt_pwrite_slow(dev, obj, args, + file_priv); + } + } else if (i915_gem_object_needs_bit17_swizzle(obj)) { + ret = i915_gem_shmem_pwrite_slow(dev, obj, args, file_priv); + } else { + ret = i915_gem_shmem_pwrite_fast(dev, obj, args, file_priv); + if (ret == -EFAULT) { + ret = i915_gem_shmem_pwrite_slow(dev, obj, args, + file_priv); + } + } #if WATCH_PWRITE if (ret) @@ -368,7 +972,8 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, } /** - * Called when user space prepares to use an object + * Called when user space prepares to use an object with the CPU, either + * through the mmap ioctl's mapping or a GTT mapping. */ int i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, @@ -376,11 +981,26 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, { struct drm_i915_gem_set_domain *args = data; struct drm_gem_object *obj; + uint32_t read_domains = args->read_domains; + uint32_t write_domain = args->write_domain; int ret; if (!(dev->driver->driver_features & DRIVER_GEM)) return -ENODEV; + /* Only handle setting domains to types used by the CPU. */ + if (write_domain & ~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT)) + return -EINVAL; + + if (read_domains & ~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT)) + return -EINVAL; + + /* Having something in the write domain implies it's in the read + * domain, and only that read domain. Enforce that in the request. + */ + if (write_domain != 0 && read_domains != write_domain) + return -EINVAL; + obj = drm_gem_object_lookup(dev, file_priv, args->handle); if (obj == NULL) return -EBADF; @@ -388,10 +1008,21 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, mutex_lock(&dev->struct_mutex); #if WATCH_BUF DRM_INFO("set_domain_ioctl %p(%d), %08x %08x\n", - obj, obj->size, args->read_domains, args->write_domain); + obj, obj->size, read_domains, write_domain); #endif - ret = i915_gem_set_domain(obj, file_priv, - args->read_domains, args->write_domain); + if (read_domains & I915_GEM_DOMAIN_GTT) { + ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0); + + /* Silently promote "you're not bound, there was nothing to do" + * to success, since the client was just asking us to + * make sure everything was done. + */ + if (ret == -EINVAL) + ret = 0; + } else { + ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0); + } + drm_gem_object_unreference(obj); mutex_unlock(&dev->struct_mutex); return ret; @@ -426,10 +1057,9 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, obj_priv = obj->driver_private; /* Pinned buffers may be scanout, so flush the cache */ - if ((obj->write_domain & I915_GEM_DOMAIN_CPU) && obj_priv->pin_count) { - i915_gem_clflush_object(obj); - drm_agp_chipset_flush(dev); - } + if (obj_priv->pin_count) + i915_gem_object_flush_cpu_write_domain(obj); + drm_gem_object_unreference(obj); mutex_unlock(&dev->struct_mutex); return ret; @@ -476,34 +1106,314 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data, return 0; } +/** + * i915_gem_fault - fault a page into the GTT + * vma: VMA in question + * vmf: fault info + * + * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped + * from userspace. The fault handler takes care of binding the object to + * the GTT (if needed), allocating and programming a fence register (again, + * only if needed based on whether the old reg is still valid or the object + * is tiled) and inserting a new PTE into the faulting process. + * + * Note that the faulting process may involve evicting existing objects + * from the GTT and/or fence registers to make room. So performance may + * suffer if the GTT working set is large or there are few fence registers + * left. + */ +int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) +{ + struct drm_gem_object *obj = vma->vm_private_data; + struct drm_device *dev = obj->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_gem_object *obj_priv = obj->driver_private; + pgoff_t page_offset; + unsigned long pfn; + int ret = 0; + bool write = !!(vmf->flags & FAULT_FLAG_WRITE); + + /* We don't use vmf->pgoff since that has the fake offset */ + page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >> + PAGE_SHIFT; + + /* Now bind it into the GTT if needed */ + mutex_lock(&dev->struct_mutex); + if (!obj_priv->gtt_space) { + ret = i915_gem_object_bind_to_gtt(obj, obj_priv->gtt_alignment); + if (ret) { + mutex_unlock(&dev->struct_mutex); + return VM_FAULT_SIGBUS; + } + list_add(&obj_priv->list, &dev_priv->mm.inactive_list); + } + + /* Need a new fence register? */ + if (obj_priv->fence_reg == I915_FENCE_REG_NONE && + obj_priv->tiling_mode != I915_TILING_NONE) { + ret = i915_gem_object_get_fence_reg(obj, write); + if (ret) { + mutex_unlock(&dev->struct_mutex); + return VM_FAULT_SIGBUS; + } + } + + pfn = ((dev->agp->base + obj_priv->gtt_offset) >> PAGE_SHIFT) + + page_offset; + + /* Finally, remap it using the new GTT offset */ + ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn); + + mutex_unlock(&dev->struct_mutex); + + switch (ret) { + case -ENOMEM: + case -EAGAIN: + return VM_FAULT_OOM; + case -EFAULT: + case -EINVAL: + return VM_FAULT_SIGBUS; + default: + return VM_FAULT_NOPAGE; + } +} + +/** + * i915_gem_create_mmap_offset - create a fake mmap offset for an object + * @obj: obj in question + * + * GEM memory mapping works by handing back to userspace a fake mmap offset + * it can use in a subsequent mmap(2) call. The DRM core code then looks + * up the object based on the offset and sets up the various memory mapping + * structures. + * + * This routine allocates and attaches a fake offset for @obj. + */ +static int +i915_gem_create_mmap_offset(struct drm_gem_object *obj) +{ + struct drm_device *dev = obj->dev; + struct drm_gem_mm *mm = dev->mm_private; + struct drm_i915_gem_object *obj_priv = obj->driver_private; + struct drm_map_list *list; + struct drm_local_map *map; + int ret = 0; + + /* Set the object up for mmap'ing */ + list = &obj->map_list; + list->map = drm_calloc(1, sizeof(struct drm_map_list), + DRM_MEM_DRIVER); + if (!list->map) + return -ENOMEM; + + map = list->map; + map->type = _DRM_GEM; + map->size = obj->size; + map->handle = obj; + + /* Get a DRM GEM mmap offset allocated... */ + list->file_offset_node = drm_mm_search_free(&mm->offset_manager, + obj->size / PAGE_SIZE, 0, 0); + if (!list->file_offset_node) { + DRM_ERROR("failed to allocate offset for bo %d\n", obj->name); + ret = -ENOMEM; + goto out_free_list; + } + + list->file_offset_node = drm_mm_get_block(list->file_offset_node, + obj->size / PAGE_SIZE, 0); + if (!list->file_offset_node) { + ret = -ENOMEM; + goto out_free_list; + } + + list->hash.key = list->file_offset_node->start; + if (drm_ht_insert_item(&mm->offset_hash, &list->hash)) { + DRM_ERROR("failed to add to map hash\n"); + goto out_free_mm; + } + + /* By now we should be all set, any drm_mmap request on the offset + * below will get to our mmap & fault handler */ + obj_priv->mmap_offset = ((uint64_t) list->hash.key) << PAGE_SHIFT; + + return 0; + +out_free_mm: + drm_mm_put_block(list->file_offset_node); +out_free_list: + drm_free(list->map, sizeof(struct drm_map_list), DRM_MEM_DRIVER); + + return ret; +} + static void -i915_gem_object_free_page_list(struct drm_gem_object *obj) +i915_gem_free_mmap_offset(struct drm_gem_object *obj) +{ + struct drm_device *dev = obj->dev; + struct drm_i915_gem_object *obj_priv = obj->driver_private; + struct drm_gem_mm *mm = dev->mm_private; + struct drm_map_list *list; + + list = &obj->map_list; + drm_ht_remove_item(&mm->offset_hash, &list->hash); + + if (list->file_offset_node) { + drm_mm_put_block(list->file_offset_node); + list->file_offset_node = NULL; + } + + if (list->map) { + drm_free(list->map, sizeof(struct drm_map), DRM_MEM_DRIVER); + list->map = NULL; + } + + obj_priv->mmap_offset = 0; +} + +/** + * i915_gem_get_gtt_alignment - return required GTT alignment for an object + * @obj: object to check + * + * Return the required GTT alignment for an object, taking into account + * potential fence register mapping if needed. + */ +static uint32_t +i915_gem_get_gtt_alignment(struct drm_gem_object *obj) +{ + struct drm_device *dev = obj->dev; + struct drm_i915_gem_object *obj_priv = obj->driver_private; + int start, i; + + /* + * Minimum alignment is 4k (GTT page size), but might be greater + * if a fence register is needed for the object. + */ + if (IS_I965G(dev) || obj_priv->tiling_mode == I915_TILING_NONE) + return 4096; + + /* + * Previous chips need to be aligned to the size of the smallest + * fence register that can contain the object. + */ + if (IS_I9XX(dev)) + start = 1024*1024; + else + start = 512*1024; + + for (i = start; i < obj->size; i <<= 1) + ; + + return i; +} + +/** + * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing + * @dev: DRM device + * @data: GTT mapping ioctl data + * @file_priv: GEM object info + * + * Simply returns the fake offset to userspace so it can mmap it. + * The mmap call will end up in drm_gem_mmap(), which will set things + * up so we can get faults in the handler above. + * + * The fault handler will take care of binding the object into the GTT + * (since it may have been evicted to make room for something), allocating + * a fence register, and mapping the appropriate aperture address into + * userspace. + */ +int +i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_i915_gem_mmap_gtt *args = data; + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_gem_object *obj; + struct drm_i915_gem_object *obj_priv; + int ret; + + if (!(dev->driver->driver_features & DRIVER_GEM)) + return -ENODEV; + + obj = drm_gem_object_lookup(dev, file_priv, args->handle); + if (obj == NULL) + return -EBADF; + + mutex_lock(&dev->struct_mutex); + + obj_priv = obj->driver_private; + + if (!obj_priv->mmap_offset) { + ret = i915_gem_create_mmap_offset(obj); + if (ret) { + drm_gem_object_unreference(obj); + mutex_unlock(&dev->struct_mutex); + return ret; + } + } + + args->offset = obj_priv->mmap_offset; + + obj_priv->gtt_alignment = i915_gem_get_gtt_alignment(obj); + + /* Make sure the alignment is correct for fence regs etc */ + if (obj_priv->agp_mem && + (obj_priv->gtt_offset & (obj_priv->gtt_alignment - 1))) { + drm_gem_object_unreference(obj); + mutex_unlock(&dev->struct_mutex); + return -EINVAL; + } + + /* + * Pull it into the GTT so that we have a page list (makes the + * initial fault faster and any subsequent flushing possible). + */ + if (!obj_priv->agp_mem) { + ret = i915_gem_object_bind_to_gtt(obj, obj_priv->gtt_alignment); + if (ret) { + drm_gem_object_unreference(obj); + mutex_unlock(&dev->struct_mutex); + return ret; + } + list_add(&obj_priv->list, &dev_priv->mm.inactive_list); + } + + drm_gem_object_unreference(obj); + mutex_unlock(&dev->struct_mutex); + + return 0; +} + +void +i915_gem_object_put_pages(struct drm_gem_object *obj) { struct drm_i915_gem_object *obj_priv = obj->driver_private; int page_count = obj->size / PAGE_SIZE; int i; - if (obj_priv->page_list == NULL) + BUG_ON(obj_priv->pages_refcount == 0); + + if (--obj_priv->pages_refcount != 0) return; + if (obj_priv->tiling_mode != I915_TILING_NONE) + i915_gem_object_save_bit_17_swizzle(obj); for (i = 0; i < page_count; i++) - if (obj_priv->page_list[i] != NULL) { + if (obj_priv->pages[i] != NULL) { if (obj_priv->dirty) - set_page_dirty(obj_priv->page_list[i]); - mark_page_accessed(obj_priv->page_list[i]); - page_cache_release(obj_priv->page_list[i]); + set_page_dirty(obj_priv->pages[i]); + mark_page_accessed(obj_priv->pages[i]); + page_cache_release(obj_priv->pages[i]); } obj_priv->dirty = 0; - drm_free(obj_priv->page_list, - page_count * sizeof(struct page *), - DRM_MEM_DRIVER); - obj_priv->page_list = NULL; + drm_free_large(obj_priv->pages); + obj_priv->pages = NULL; } static void -i915_gem_object_move_to_active(struct drm_gem_object *obj) +i915_gem_object_move_to_active(struct drm_gem_object *obj, uint32_t seqno) { struct drm_device *dev = obj->dev; drm_i915_private_t *dev_priv = dev->dev_private; @@ -515,10 +1425,24 @@ i915_gem_object_move_to_active(struct drm_gem_object *obj) obj_priv->active = 1; } /* Move from whatever list we were on to the tail of execution. */ + spin_lock(&dev_priv->mm.active_list_lock); list_move_tail(&obj_priv->list, &dev_priv->mm.active_list); + spin_unlock(&dev_priv->mm.active_list_lock); + obj_priv->last_rendering_seqno = seqno; } +static void +i915_gem_object_move_to_flushing(struct drm_gem_object *obj) +{ + struct drm_device *dev = obj->dev; + drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_gem_object *obj_priv = obj->driver_private; + + BUG_ON(!obj_priv->active); + list_move_tail(&obj_priv->list, &dev_priv->mm.flushing_list); + obj_priv->last_rendering_seqno = 0; +} static void i915_gem_object_move_to_inactive(struct drm_gem_object *obj) @@ -533,6 +1457,7 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj) else list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list); + obj_priv->last_rendering_seqno = 0; if (obj_priv->active) { obj_priv->active = 0; drm_gem_object_unreference(obj); @@ -581,10 +1506,28 @@ i915_add_request(struct drm_device *dev, uint32_t flush_domains) request->seqno = seqno; request->emitted_jiffies = jiffies; - request->flush_domains = flush_domains; was_empty = list_empty(&dev_priv->mm.request_list); list_add_tail(&request->list, &dev_priv->mm.request_list); + /* Associate any objects on the flushing list matching the write + * domain we're flushing with our flush. + */ + if (flush_domains != 0) { + struct drm_i915_gem_object *obj_priv, *next; + + list_for_each_entry_safe(obj_priv, next, + &dev_priv->mm.flushing_list, list) { + struct drm_gem_object *obj = obj_priv->obj; + + if ((obj->write_domain & flush_domains) == + obj->write_domain) { + obj->write_domain = 0; + i915_gem_object_move_to_active(obj, seqno); + } + } + + } + if (was_empty && !dev_priv->mm.suspended) schedule_delayed_work(&dev_priv->mm.retire_work, HZ); return seqno; @@ -627,6 +1570,7 @@ i915_gem_retire_request(struct drm_device *dev, /* Move any buffers on the active list that are no longer referenced * by the ringbuffer to the flushing/inactive lists as appropriate. */ + spin_lock(&dev_priv->mm.active_list_lock); while (!list_empty(&dev_priv->mm.active_list)) { struct drm_gem_object *obj; struct drm_i915_gem_object *obj_priv; @@ -641,37 +1585,31 @@ i915_gem_retire_request(struct drm_device *dev, * this seqno. */ if (obj_priv->last_rendering_seqno != request->seqno) - return; + goto out; + #if WATCH_LRU DRM_INFO("%s: retire %d moves to inactive list %p\n", __func__, request->seqno, obj); #endif - if (obj->write_domain != 0) { - list_move_tail(&obj_priv->list, - &dev_priv->mm.flushing_list); - } else { + if (obj->write_domain != 0) + i915_gem_object_move_to_flushing(obj); + else { + /* Take a reference on the object so it won't be + * freed while the spinlock is held. The list + * protection for this spinlock is safe when breaking + * the lock like this since the next thing we do + * is just get the head of the list again. + */ + drm_gem_object_reference(obj); i915_gem_object_move_to_inactive(obj); + spin_unlock(&dev_priv->mm.active_list_lock); + drm_gem_object_unreference(obj); + spin_lock(&dev_priv->mm.active_list_lock); } } - - if (request->flush_domains != 0) { - struct drm_i915_gem_object *obj_priv, *next; - - /* Clear the write domain and activity from any buffers - * that are just waiting for a flush matching the one retired. - */ - list_for_each_entry_safe(obj_priv, next, - &dev_priv->mm.flushing_list, list) { - struct drm_gem_object *obj = obj_priv->obj; - - if (obj->write_domain & request->flush_domains) { - obj->write_domain = 0; - i915_gem_object_move_to_inactive(obj); - } - } - - } +out: + spin_unlock(&dev_priv->mm.active_list_lock); } /** @@ -700,6 +1638,9 @@ i915_gem_retire_requests(struct drm_device *dev) drm_i915_private_t *dev_priv = dev->dev_private; uint32_t seqno; + if (!dev_priv->hw_status_page) + return; + seqno = i915_get_gem_seqno(dev); while (!list_empty(&dev_priv->mm.request_list)) { @@ -748,11 +1689,20 @@ static int i915_wait_request(struct drm_device *dev, uint32_t seqno) { drm_i915_private_t *dev_priv = dev->dev_private; + u32 ier; int ret = 0; BUG_ON(seqno == 0); if (!i915_seqno_passed(i915_get_gem_seqno(dev), seqno)) { + ier = I915_READ(IER); + if (!ier) { + DRM_ERROR("something (likely vbetool) disabled " + "interrupts, re-enabling\n"); + i915_driver_irq_preinstall(dev); + i915_driver_irq_postinstall(dev); + } + dev_priv->mm.waiting_gem_seqno = seqno; i915_user_irq_get(dev); ret = wait_event_interruptible(dev_priv->irq_queue, @@ -863,25 +1813,10 @@ i915_gem_object_wait_rendering(struct drm_gem_object *obj) struct drm_i915_gem_object *obj_priv = obj->driver_private; int ret; - /* If there are writes queued to the buffer, flush and - * create a new seqno to wait for. + /* This function only exists to support waiting for existing rendering, + * not for emitting required flushes. */ - if (obj->write_domain & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT)) { - uint32_t write_domain = obj->write_domain; -#if WATCH_BUF - DRM_INFO("%s: flushing object %p from write domain %08x\n", - __func__, obj, write_domain); -#endif - i915_gem_flush(dev, 0, write_domain); - - i915_gem_object_move_to_active(obj); - obj_priv->last_rendering_seqno = i915_add_request(dev, - write_domain); - BUG_ON(obj_priv->last_rendering_seqno == 0); -#if WATCH_LRU - DRM_INFO("%s: flush moves to exec list %p\n", __func__, obj); -#endif - } + BUG_ON((obj->write_domain & I915_GEM_GPU_DOMAINS) != 0); /* If there is rendering queued on the buffer being evicted, wait for * it. @@ -902,11 +1837,12 @@ i915_gem_object_wait_rendering(struct drm_gem_object *obj) /** * Unbinds an object from the GTT aperture. */ -static int +int i915_gem_object_unbind(struct drm_gem_object *obj) { struct drm_device *dev = obj->dev; struct drm_i915_gem_object *obj_priv = obj->driver_private; + loff_t offset; int ret = 0; #if WATCH_BUF @@ -921,24 +1857,16 @@ i915_gem_object_unbind(struct drm_gem_object *obj) return -EINVAL; } - /* Wait for any rendering to complete - */ - ret = i915_gem_object_wait_rendering(obj); - if (ret) { - DRM_ERROR("wait_rendering failed: %d\n", ret); - return ret; - } - /* Move the object to the CPU domain to ensure that * any possible CPU writes while it's not in the GTT * are flushed when we go to remap it. This will * also ensure that all pending GPU writes are finished * before we unbind. */ - ret = i915_gem_object_set_domain(obj, I915_GEM_DOMAIN_CPU, - I915_GEM_DOMAIN_CPU); + ret = i915_gem_object_set_to_cpu_domain(obj, 1); if (ret) { - DRM_ERROR("set_domain failed: %d\n", ret); + if (ret != -ERESTARTSYS) + DRM_ERROR("set_domain failed: %d\n", ret); return ret; } @@ -950,7 +1878,15 @@ i915_gem_object_unbind(struct drm_gem_object *obj) BUG_ON(obj_priv->active); - i915_gem_object_free_page_list(obj); + /* blow away mappings if mapped through GTT */ + offset = ((loff_t) obj->map_list.hash.key) << PAGE_SHIFT; + if (dev->dev_mapping) + unmap_mapping_range(dev->dev_mapping, offset, obj->size, 1); + + if (obj_priv->fence_reg != I915_FENCE_REG_NONE) + i915_gem_clear_fence_reg(obj); + + i915_gem_object_put_pages(obj); if (obj_priv->gtt_space) { atomic_dec(&dev->gtt_count); @@ -1054,7 +1990,22 @@ i915_gem_evict_something(struct drm_device *dev) } static int -i915_gem_object_get_page_list(struct drm_gem_object *obj) +i915_gem_evict_everything(struct drm_device *dev) +{ + int ret; + + for (;;) { + ret = i915_gem_evict_something(dev); + if (ret != 0) + break; + } + if (ret == -ENOMEM) + return 0; + return ret; +} + +int +i915_gem_object_get_pages(struct drm_gem_object *obj) { struct drm_i915_gem_object *obj_priv = obj->driver_private; int page_count, i; @@ -1063,18 +2014,18 @@ i915_gem_object_get_page_list(struct drm_gem_object *obj) struct page *page; int ret; - if (obj_priv->page_list) + if (obj_priv->pages_refcount++ != 0) return 0; /* Get the list of pages out of our struct file. They'll be pinned * at this point until we release them. */ page_count = obj->size / PAGE_SIZE; - BUG_ON(obj_priv->page_list != NULL); - obj_priv->page_list = drm_calloc(page_count, sizeof(struct page *), - DRM_MEM_DRIVER); - if (obj_priv->page_list == NULL) { + BUG_ON(obj_priv->pages != NULL); + obj_priv->pages = drm_calloc_large(page_count, sizeof(struct page *)); + if (obj_priv->pages == NULL) { DRM_ERROR("Faled to allocate page list\n"); + obj_priv->pages_refcount--; return -ENOMEM; } @@ -1085,15 +2036,281 @@ i915_gem_object_get_page_list(struct drm_gem_object *obj) if (IS_ERR(page)) { ret = PTR_ERR(page); DRM_ERROR("read_mapping_page failed: %d\n", ret); - i915_gem_object_free_page_list(obj); + i915_gem_object_put_pages(obj); return ret; } - obj_priv->page_list[i] = page; + obj_priv->pages[i] = page; + } + + if (obj_priv->tiling_mode != I915_TILING_NONE) + i915_gem_object_do_bit_17_swizzle(obj); + + return 0; +} + +static void i965_write_fence_reg(struct drm_i915_fence_reg *reg) +{ + struct drm_gem_object *obj = reg->obj; + struct drm_device *dev = obj->dev; + drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_gem_object *obj_priv = obj->driver_private; + int regnum = obj_priv->fence_reg; + uint64_t val; + + val = (uint64_t)((obj_priv->gtt_offset + obj->size - 4096) & + 0xfffff000) << 32; + val |= obj_priv->gtt_offset & 0xfffff000; + val |= ((obj_priv->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT; + if (obj_priv->tiling_mode == I915_TILING_Y) + val |= 1 << I965_FENCE_TILING_Y_SHIFT; + val |= I965_FENCE_REG_VALID; + + I915_WRITE64(FENCE_REG_965_0 + (regnum * 8), val); +} + +static void i915_write_fence_reg(struct drm_i915_fence_reg *reg) +{ + struct drm_gem_object *obj = reg->obj; + struct drm_device *dev = obj->dev; + drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_gem_object *obj_priv = obj->driver_private; + int regnum = obj_priv->fence_reg; + int tile_width; + uint32_t fence_reg, val; + uint32_t pitch_val; + + if ((obj_priv->gtt_offset & ~I915_FENCE_START_MASK) || + (obj_priv->gtt_offset & (obj->size - 1))) { + WARN(1, "%s: object 0x%08x not 1M or size (0x%zx) aligned\n", + __func__, obj_priv->gtt_offset, obj->size); + return; + } + + if (obj_priv->tiling_mode == I915_TILING_Y && + HAS_128_BYTE_Y_TILING(dev)) + tile_width = 128; + else + tile_width = 512; + + /* Note: pitch better be a power of two tile widths */ + pitch_val = obj_priv->stride / tile_width; + pitch_val = ffs(pitch_val) - 1; + + val = obj_priv->gtt_offset; + if (obj_priv->tiling_mode == I915_TILING_Y) + val |= 1 << I830_FENCE_TILING_Y_SHIFT; + val |= I915_FENCE_SIZE_BITS(obj->size); + val |= pitch_val << I830_FENCE_PITCH_SHIFT; + val |= I830_FENCE_REG_VALID; + + if (regnum < 8) + fence_reg = FENCE_REG_830_0 + (regnum * 4); + else + fence_reg = FENCE_REG_945_8 + ((regnum - 8) * 4); + I915_WRITE(fence_reg, val); +} + +static void i830_write_fence_reg(struct drm_i915_fence_reg *reg) +{ + struct drm_gem_object *obj = reg->obj; + struct drm_device *dev = obj->dev; + drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_gem_object *obj_priv = obj->driver_private; + int regnum = obj_priv->fence_reg; + uint32_t val; + uint32_t pitch_val; + uint32_t fence_size_bits; + + if ((obj_priv->gtt_offset & ~I830_FENCE_START_MASK) || + (obj_priv->gtt_offset & (obj->size - 1))) { + WARN(1, "%s: object 0x%08x not 512K or size aligned\n", + __func__, obj_priv->gtt_offset); + return; + } + + pitch_val = (obj_priv->stride / 128) - 1; + WARN_ON(pitch_val & ~0x0000000f); + val = obj_priv->gtt_offset; + if (obj_priv->tiling_mode == I915_TILING_Y) + val |= 1 << I830_FENCE_TILING_Y_SHIFT; + fence_size_bits = I830_FENCE_SIZE_BITS(obj->size); + WARN_ON(fence_size_bits & ~0x00000f00); + val |= fence_size_bits; + val |= pitch_val << I830_FENCE_PITCH_SHIFT; + val |= I830_FENCE_REG_VALID; + + I915_WRITE(FENCE_REG_830_0 + (regnum * 4), val); + +} + +/** + * i915_gem_object_get_fence_reg - set up a fence reg for an object + * @obj: object to map through a fence reg + * @write: object is about to be written + * + * When mapping objects through the GTT, userspace wants to be able to write + * to them without having to worry about swizzling if the object is tiled. + * + * This function walks the fence regs looking for a free one for @obj, + * stealing one if it can't find any. + * + * It then sets up the reg based on the object's properties: address, pitch + * and tiling format. + */ +static int +i915_gem_object_get_fence_reg(struct drm_gem_object *obj, bool write) +{ + struct drm_device *dev = obj->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_gem_object *obj_priv = obj->driver_private; + struct drm_i915_fence_reg *reg = NULL; + struct drm_i915_gem_object *old_obj_priv = NULL; + int i, ret, avail; + + switch (obj_priv->tiling_mode) { + case I915_TILING_NONE: + WARN(1, "allocating a fence for non-tiled object?\n"); + break; + case I915_TILING_X: + if (!obj_priv->stride) + return -EINVAL; + WARN((obj_priv->stride & (512 - 1)), + "object 0x%08x is X tiled but has non-512B pitch\n", + obj_priv->gtt_offset); + break; + case I915_TILING_Y: + if (!obj_priv->stride) + return -EINVAL; + WARN((obj_priv->stride & (128 - 1)), + "object 0x%08x is Y tiled but has non-128B pitch\n", + obj_priv->gtt_offset); + break; + } + + /* First try to find a free reg */ +try_again: + avail = 0; + for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) { + reg = &dev_priv->fence_regs[i]; + if (!reg->obj) + break; + + old_obj_priv = reg->obj->driver_private; + if (!old_obj_priv->pin_count) + avail++; + } + + /* None available, try to steal one or wait for a user to finish */ + if (i == dev_priv->num_fence_regs) { + uint32_t seqno = dev_priv->mm.next_gem_seqno; + loff_t offset; + + if (avail == 0) + return -ENOMEM; + + for (i = dev_priv->fence_reg_start; + i < dev_priv->num_fence_regs; i++) { + uint32_t this_seqno; + + reg = &dev_priv->fence_regs[i]; + old_obj_priv = reg->obj->driver_private; + + if (old_obj_priv->pin_count) + continue; + + /* i915 uses fences for GPU access to tiled buffers */ + if (IS_I965G(dev) || !old_obj_priv->active) + break; + + /* find the seqno of the first available fence */ + this_seqno = old_obj_priv->last_rendering_seqno; + if (this_seqno != 0 && + reg->obj->write_domain == 0 && + i915_seqno_passed(seqno, this_seqno)) + seqno = this_seqno; + } + + /* + * Now things get ugly... we have to wait for one of the + * objects to finish before trying again. + */ + if (i == dev_priv->num_fence_regs) { + if (seqno == dev_priv->mm.next_gem_seqno) { + i915_gem_flush(dev, + I915_GEM_GPU_DOMAINS, + I915_GEM_GPU_DOMAINS); + seqno = i915_add_request(dev, + I915_GEM_GPU_DOMAINS); + if (seqno == 0) + return -ENOMEM; + } + + ret = i915_wait_request(dev, seqno); + if (ret) + return ret; + goto try_again; + } + + BUG_ON(old_obj_priv->active || + (reg->obj->write_domain & I915_GEM_GPU_DOMAINS)); + + /* + * Zap this virtual mapping so we can set up a fence again + * for this object next time we need it. + */ + offset = ((loff_t) reg->obj->map_list.hash.key) << PAGE_SHIFT; + if (dev->dev_mapping) + unmap_mapping_range(dev->dev_mapping, offset, + reg->obj->size, 1); + old_obj_priv->fence_reg = I915_FENCE_REG_NONE; } + + obj_priv->fence_reg = i; + reg->obj = obj; + + if (IS_I965G(dev)) + i965_write_fence_reg(reg); + else if (IS_I9XX(dev)) + i915_write_fence_reg(reg); + else + i830_write_fence_reg(reg); + return 0; } /** + * i915_gem_clear_fence_reg - clear out fence register info + * @obj: object to clear + * + * Zeroes out the fence register itself and clears out the associated + * data structures in dev_priv and obj_priv. + */ +static void +i915_gem_clear_fence_reg(struct drm_gem_object *obj) +{ + struct drm_device *dev = obj->dev; + drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_gem_object *obj_priv = obj->driver_private; + + if (IS_I965G(dev)) + I915_WRITE64(FENCE_REG_965_0 + (obj_priv->fence_reg * 8), 0); + else { + uint32_t fence_reg; + + if (obj_priv->fence_reg < 8) + fence_reg = FENCE_REG_830_0 + obj_priv->fence_reg * 4; + else + fence_reg = FENCE_REG_945_8 + (obj_priv->fence_reg - + 8) * 4; + + I915_WRITE(fence_reg, 0); + } + + dev_priv->fence_regs[obj_priv->fence_reg].obj = NULL; + obj_priv->fence_reg = I915_FENCE_REG_NONE; +} + +/** * Finds free space in the GTT aperture and binds the object there. */ static int @@ -1105,9 +2322,11 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) struct drm_mm_node *free_space; int page_count, ret; + if (dev_priv->mm.suspended) + return -EBUSY; if (alignment == 0) - alignment = PAGE_SIZE; - if (alignment & (PAGE_SIZE - 1)) { + alignment = i915_gem_get_gtt_alignment(obj); + if (alignment & (i915_gem_get_gtt_alignment(obj) - 1)) { DRM_ERROR("Invalid object alignment requested %u\n", alignment); return -EINVAL; } @@ -1124,79 +2343,220 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) } } if (obj_priv->gtt_space == NULL) { + bool lists_empty; + /* If the gtt is empty and we're still having trouble * fitting our object in, we're out of memory. */ #if WATCH_LRU DRM_INFO("%s: GTT full, evicting something\n", __func__); #endif - if (list_empty(&dev_priv->mm.inactive_list) && - list_empty(&dev_priv->mm.flushing_list) && - list_empty(&dev_priv->mm.active_list)) { + spin_lock(&dev_priv->mm.active_list_lock); + lists_empty = (list_empty(&dev_priv->mm.inactive_list) && + list_empty(&dev_priv->mm.flushing_list) && + list_empty(&dev_priv->mm.active_list)); + spin_unlock(&dev_priv->mm.active_list_lock); + if (lists_empty) { DRM_ERROR("GTT full, but LRU list empty\n"); return -ENOMEM; } - ret = i915_gem_evict_something(dev); - if (ret != 0) { - DRM_ERROR("Failed to evict a buffer %d\n", ret); - return ret; - } - goto search_free; - } + ret = i915_gem_evict_something(dev); + if (ret != 0) { + if (ret != -ERESTARTSYS) + DRM_ERROR("Failed to evict a buffer %d\n", ret); + return ret; + } + goto search_free; + } + +#if WATCH_BUF + DRM_INFO("Binding object of size %d at 0x%08x\n", + obj->size, obj_priv->gtt_offset); +#endif + ret = i915_gem_object_get_pages(obj); + if (ret) { + drm_mm_put_block(obj_priv->gtt_space); + obj_priv->gtt_space = NULL; + return ret; + } + + page_count = obj->size / PAGE_SIZE; + /* Create an AGP memory structure pointing at our pages, and bind it + * into the GTT. + */ + obj_priv->agp_mem = drm_agp_bind_pages(dev, + obj_priv->pages, + page_count, + obj_priv->gtt_offset, + obj_priv->agp_type); + if (obj_priv->agp_mem == NULL) { + i915_gem_object_put_pages(obj); + drm_mm_put_block(obj_priv->gtt_space); + obj_priv->gtt_space = NULL; + return -ENOMEM; + } + atomic_inc(&dev->gtt_count); + atomic_add(obj->size, &dev->gtt_memory); + + /* Assert that the object is not currently in any GPU domain. As it + * wasn't in the GTT, there shouldn't be any way it could have been in + * a GPU cache + */ + BUG_ON(obj->read_domains & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT)); + BUG_ON(obj->write_domain & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT)); + + return 0; +} + +void +i915_gem_clflush_object(struct drm_gem_object *obj) +{ + struct drm_i915_gem_object *obj_priv = obj->driver_private; + + /* If we don't have a page list set up, then we're not pinned + * to GPU, and we can ignore the cache flush because it'll happen + * again at bind time. + */ + if (obj_priv->pages == NULL) + return; + + drm_clflush_pages(obj_priv->pages, obj->size / PAGE_SIZE); +} + +/** Flushes any GPU write domain for the object if it's dirty. */ +static void +i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj) +{ + struct drm_device *dev = obj->dev; + uint32_t seqno; + + if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0) + return; + + /* Queue the GPU write cache flushing we need. */ + i915_gem_flush(dev, 0, obj->write_domain); + seqno = i915_add_request(dev, obj->write_domain); + obj->write_domain = 0; + i915_gem_object_move_to_active(obj, seqno); +} + +/** Flushes the GTT write domain for the object if it's dirty. */ +static void +i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj) +{ + if (obj->write_domain != I915_GEM_DOMAIN_GTT) + return; + + /* No actual flushing is required for the GTT write domain. Writes + * to it immediately go to main memory as far as we know, so there's + * no chipset flush. It also doesn't land in render cache. + */ + obj->write_domain = 0; +} + +/** Flushes the CPU write domain for the object if it's dirty. */ +static void +i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj) +{ + struct drm_device *dev = obj->dev; + + if (obj->write_domain != I915_GEM_DOMAIN_CPU) + return; + + i915_gem_clflush_object(obj); + drm_agp_chipset_flush(dev); + obj->write_domain = 0; +} + +/** + * Moves a single object to the GTT read, and possibly write domain. + * + * This function returns when the move is complete, including waiting on + * flushes to occur. + */ +int +i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write) +{ + struct drm_i915_gem_object *obj_priv = obj->driver_private; + int ret; + + /* Not valid to be called on unbound objects. */ + if (obj_priv->gtt_space == NULL) + return -EINVAL; -#if WATCH_BUF - DRM_INFO("Binding object of size %d at 0x%08x\n", - obj->size, obj_priv->gtt_offset); -#endif - ret = i915_gem_object_get_page_list(obj); - if (ret) { - drm_mm_put_block(obj_priv->gtt_space); - obj_priv->gtt_space = NULL; + i915_gem_object_flush_gpu_write_domain(obj); + /* Wait on any GPU rendering and flushing to occur. */ + ret = i915_gem_object_wait_rendering(obj); + if (ret != 0) return ret; - } - page_count = obj->size / PAGE_SIZE; - /* Create an AGP memory structure pointing at our pages, and bind it - * into the GTT. + /* If we're writing through the GTT domain, then CPU and GPU caches + * will need to be invalidated at next use. */ - obj_priv->agp_mem = drm_agp_bind_pages(dev, - obj_priv->page_list, - page_count, - obj_priv->gtt_offset, - obj_priv->agp_type); - if (obj_priv->agp_mem == NULL) { - i915_gem_object_free_page_list(obj); - drm_mm_put_block(obj_priv->gtt_space); - obj_priv->gtt_space = NULL; - return -ENOMEM; - } - atomic_inc(&dev->gtt_count); - atomic_add(obj->size, &dev->gtt_memory); + if (write) + obj->read_domains &= I915_GEM_DOMAIN_GTT; - /* Assert that the object is not currently in any GPU domain. As it - * wasn't in the GTT, there shouldn't be any way it could have been in - * a GPU cache + i915_gem_object_flush_cpu_write_domain(obj); + + /* It should now be out of any other write domains, and we can update + * the domain values for our changes. */ - BUG_ON(obj->read_domains & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT)); - BUG_ON(obj->write_domain & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT)); + BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0); + obj->read_domains |= I915_GEM_DOMAIN_GTT; + if (write) { + obj->write_domain = I915_GEM_DOMAIN_GTT; + obj_priv->dirty = 1; + } return 0; } -void -i915_gem_clflush_object(struct drm_gem_object *obj) +/** + * Moves a single object to the CPU read, and possibly write domain. + * + * This function returns when the move is complete, including waiting on + * flushes to occur. + */ +static int +i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write) { - struct drm_i915_gem_object *obj_priv = obj->driver_private; + int ret; - /* If we don't have a page list set up, then we're not pinned - * to GPU, and we can ignore the cache flush because it'll happen - * again at bind time. + i915_gem_object_flush_gpu_write_domain(obj); + /* Wait on any GPU rendering and flushing to occur. */ + ret = i915_gem_object_wait_rendering(obj); + if (ret != 0) + return ret; + + i915_gem_object_flush_gtt_write_domain(obj); + + /* If we have a partially-valid cache of the object in the CPU, + * finish invalidating it and free the per-page flags. */ - if (obj_priv->page_list == NULL) - return; + i915_gem_object_set_to_full_cpu_read_domain(obj); + + /* Flush the CPU cache if it's still invalid. */ + if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) { + i915_gem_clflush_object(obj); + + obj->read_domains |= I915_GEM_DOMAIN_CPU; + } + + /* It should now be out of any other write domains, and we can update + * the domain values for our changes. + */ + BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0); + + /* If we're writing through the CPU, then the GPU read domains will + * need to be invalidated at next use. + */ + if (write) { + obj->read_domains &= I915_GEM_DOMAIN_CPU; + obj->write_domain = I915_GEM_DOMAIN_CPU; + } - drm_clflush_pages(obj_priv->page_list, obj->size / PAGE_SIZE); + return 0; } /* @@ -1310,29 +2670,29 @@ i915_gem_clflush_object(struct drm_gem_object *obj) * MI_FLUSH * drm_agp_chipset_flush */ -static int -i915_gem_object_set_domain(struct drm_gem_object *obj, - uint32_t read_domains, - uint32_t write_domain) +static void +i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj) { struct drm_device *dev = obj->dev; struct drm_i915_gem_object *obj_priv = obj->driver_private; uint32_t invalidate_domains = 0; uint32_t flush_domains = 0; - int ret; + + BUG_ON(obj->pending_read_domains & I915_GEM_DOMAIN_CPU); + BUG_ON(obj->pending_write_domain == I915_GEM_DOMAIN_CPU); #if WATCH_BUF DRM_INFO("%s: object %p read %08x -> %08x write %08x -> %08x\n", __func__, obj, - obj->read_domains, read_domains, - obj->write_domain, write_domain); + obj->read_domains, obj->pending_read_domains, + obj->write_domain, obj->pending_write_domain); #endif /* * If the object isn't moving to a new write domain, * let the object stay in multiple read domains */ - if (write_domain == 0) - read_domains |= obj->read_domains; + if (obj->pending_write_domain == 0) + obj->pending_read_domains |= obj->read_domains; else obj_priv->dirty = 1; @@ -1342,49 +2702,34 @@ i915_gem_object_set_domain(struct drm_gem_object *obj, * any read domains which differ from the old * write domain */ - if (obj->write_domain && obj->write_domain != read_domains) { + if (obj->write_domain && + obj->write_domain != obj->pending_read_domains) { flush_domains |= obj->write_domain; - invalidate_domains |= read_domains & ~obj->write_domain; + invalidate_domains |= + obj->pending_read_domains & ~obj->write_domain; } /* * Invalidate any read caches which may have * stale data. That is, any new read domains. */ - invalidate_domains |= read_domains & ~obj->read_domains; + invalidate_domains |= obj->pending_read_domains & ~obj->read_domains; if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU) { #if WATCH_BUF DRM_INFO("%s: CPU domain flush %08x invalidate %08x\n", __func__, flush_domains, invalidate_domains); #endif - /* - * If we're invaliding the CPU cache and flushing a GPU cache, - * then pause for rendering so that the GPU caches will be - * flushed before the cpu cache is invalidated - */ - if ((invalidate_domains & I915_GEM_DOMAIN_CPU) && - (flush_domains & ~(I915_GEM_DOMAIN_CPU | - I915_GEM_DOMAIN_GTT))) { - ret = i915_gem_object_wait_rendering(obj); - if (ret) - return ret; - } i915_gem_clflush_object(obj); } - if ((write_domain | flush_domains) != 0) - obj->write_domain = write_domain; - - /* If we're invalidating the CPU domain, clear the per-page CPU - * domain list as well. + /* The actual obj->write_domain will be updated with + * pending_write_domain after we emit the accumulated flush for all + * of our domain changes in execbuffers (which clears objects' + * write_domains). So if we have a current write domain that we + * aren't changing, set pending_write_domain to that. */ - if (obj_priv->page_cpu_valid != NULL && - (write_domain != 0 || - read_domains & I915_GEM_DOMAIN_CPU)) { - drm_free(obj_priv->page_cpu_valid, obj->size / PAGE_SIZE, - DRM_MEM_DRIVER); - obj_priv->page_cpu_valid = NULL; - } - obj->read_domains = read_domains; + if (flush_domains == 0 && obj->pending_write_domain == 0) + obj->pending_write_domain = obj->write_domain; + obj->read_domains = obj->pending_read_domains; dev->invalidate_domains |= invalidate_domains; dev->flush_domains |= flush_domains; @@ -1394,90 +2739,108 @@ i915_gem_object_set_domain(struct drm_gem_object *obj, obj->read_domains, obj->write_domain, dev->invalidate_domains, dev->flush_domains); #endif - return 0; } /** - * Set the read/write domain on a range of the object. + * Moves the object from a partially CPU read to a full one. * - * Currently only implemented for CPU reads, otherwise drops to normal - * i915_gem_object_set_domain(). + * Note that this only resolves i915_gem_object_set_cpu_read_domain_range(), + * and doesn't handle transitioning from !(read_domains & I915_GEM_DOMAIN_CPU). */ -static int -i915_gem_object_set_domain_range(struct drm_gem_object *obj, - uint64_t offset, - uint64_t size, - uint32_t read_domains, - uint32_t write_domain) +static void +i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj) { struct drm_i915_gem_object *obj_priv = obj->driver_private; - int ret, i; - if (obj->read_domains & I915_GEM_DOMAIN_CPU) - return 0; + if (!obj_priv->page_cpu_valid) + return; - if (read_domains != I915_GEM_DOMAIN_CPU || - write_domain != 0) - return i915_gem_object_set_domain(obj, - read_domains, write_domain); + /* If we're partially in the CPU read domain, finish moving it in. + */ + if (obj->read_domains & I915_GEM_DOMAIN_CPU) { + int i; - /* Wait on any GPU rendering to the object to be flushed. */ - if (obj->write_domain & ~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT)) { - ret = i915_gem_object_wait_rendering(obj); - if (ret) - return ret; + for (i = 0; i <= (obj->size - 1) / PAGE_SIZE; i++) { + if (obj_priv->page_cpu_valid[i]) + continue; + drm_clflush_pages(obj_priv->pages + i, 1); + } } + /* Free the page_cpu_valid mappings which are now stale, whether + * or not we've got I915_GEM_DOMAIN_CPU. + */ + drm_free(obj_priv->page_cpu_valid, obj->size / PAGE_SIZE, + DRM_MEM_DRIVER); + obj_priv->page_cpu_valid = NULL; +} + +/** + * Set the CPU read domain on a range of the object. + * + * The object ends up with I915_GEM_DOMAIN_CPU in its read flags although it's + * not entirely valid. The page_cpu_valid member of the object flags which + * pages have been flushed, and will be respected by + * i915_gem_object_set_to_cpu_domain() if it's called on to get a valid mapping + * of the whole object. + * + * This function returns when the move is complete, including waiting on + * flushes to occur. + */ +static int +i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj, + uint64_t offset, uint64_t size) +{ + struct drm_i915_gem_object *obj_priv = obj->driver_private; + int i, ret; + + if (offset == 0 && size == obj->size) + return i915_gem_object_set_to_cpu_domain(obj, 0); + + i915_gem_object_flush_gpu_write_domain(obj); + /* Wait on any GPU rendering and flushing to occur. */ + ret = i915_gem_object_wait_rendering(obj); + if (ret != 0) + return ret; + i915_gem_object_flush_gtt_write_domain(obj); + + /* If we're already fully in the CPU read domain, we're done. */ + if (obj_priv->page_cpu_valid == NULL && + (obj->read_domains & I915_GEM_DOMAIN_CPU) != 0) + return 0; + + /* Otherwise, create/clear the per-page CPU read domain flag if we're + * newly adding I915_GEM_DOMAIN_CPU + */ if (obj_priv->page_cpu_valid == NULL) { obj_priv->page_cpu_valid = drm_calloc(1, obj->size / PAGE_SIZE, DRM_MEM_DRIVER); - } + if (obj_priv->page_cpu_valid == NULL) + return -ENOMEM; + } else if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) + memset(obj_priv->page_cpu_valid, 0, obj->size / PAGE_SIZE); /* Flush the cache on any pages that are still invalid from the CPU's * perspective. */ - for (i = offset / PAGE_SIZE; i <= (offset + size - 1) / PAGE_SIZE; i++) { + for (i = offset / PAGE_SIZE; i <= (offset + size - 1) / PAGE_SIZE; + i++) { if (obj_priv->page_cpu_valid[i]) continue; - drm_clflush_pages(obj_priv->page_list + i, 1); + drm_clflush_pages(obj_priv->pages + i, 1); obj_priv->page_cpu_valid[i] = 1; } - return 0; -} - -/** - * Once all of the objects have been set in the proper domain, - * perform the necessary flush and invalidate operations. - * - * Returns the write domains flushed, for use in flush tracking. - */ -static uint32_t -i915_gem_dev_set_domain(struct drm_device *dev) -{ - uint32_t flush_domains = dev->flush_domains; - - /* - * Now that all the buffers are synced to the proper domains, - * flush and invalidate the collected domains + /* It should now be out of any other write domains, and we can update + * the domain values for our changes. */ - if (dev->invalidate_domains | dev->flush_domains) { -#if WATCH_EXEC - DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n", - __func__, - dev->invalidate_domains, - dev->flush_domains); -#endif - i915_gem_flush(dev, - dev->invalidate_domains, - dev->flush_domains); - dev->invalidate_domains = 0; - dev->flush_domains = 0; - } + BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0); - return flush_domains; + obj->read_domains |= I915_GEM_DOMAIN_CPU; + + return 0; } /** @@ -1486,15 +2849,14 @@ i915_gem_dev_set_domain(struct drm_device *dev) static int i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, struct drm_file *file_priv, - struct drm_i915_gem_exec_object *entry) + struct drm_i915_gem_exec_object *entry, + struct drm_i915_gem_relocation_entry *relocs) { struct drm_device *dev = obj->dev; - struct drm_i915_gem_relocation_entry reloc; - struct drm_i915_gem_relocation_entry __user *relocs; + drm_i915_private_t *dev_priv = dev->dev_private; struct drm_i915_gem_object *obj_priv = obj->driver_private; int i, ret; - uint32_t last_reloc_offset = -1; - void __iomem *reloc_page = NULL; + void __iomem *reloc_page; /* Choose the GTT offset for our buffer and put it there. */ ret = i915_gem_object_pin(obj, (uint32_t) entry->alignment); @@ -1503,25 +2865,18 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, entry->offset = obj_priv->gtt_offset; - relocs = (struct drm_i915_gem_relocation_entry __user *) - (uintptr_t) entry->relocs_ptr; /* Apply the relocations, using the GTT aperture to avoid cache * flushing requirements. */ for (i = 0; i < entry->relocation_count; i++) { + struct drm_i915_gem_relocation_entry *reloc= &relocs[i]; struct drm_gem_object *target_obj; struct drm_i915_gem_object *target_obj_priv; uint32_t reloc_val, reloc_offset; uint32_t __iomem *reloc_entry; - ret = copy_from_user(&reloc, relocs + i, sizeof(reloc)); - if (ret != 0) { - i915_gem_object_unpin(obj); - return ret; - } - target_obj = drm_gem_object_lookup(obj->dev, file_priv, - reloc.target_handle); + reloc->target_handle); if (target_obj == NULL) { i915_gem_object_unpin(obj); return -EBADF; @@ -1533,39 +2888,53 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, */ if (target_obj_priv->gtt_space == NULL) { DRM_ERROR("No GTT space found for object %d\n", - reloc.target_handle); + reloc->target_handle); drm_gem_object_unreference(target_obj); i915_gem_object_unpin(obj); return -EINVAL; } - if (reloc.offset > obj->size - 4) { + if (reloc->offset > obj->size - 4) { DRM_ERROR("Relocation beyond object bounds: " "obj %p target %d offset %d size %d.\n", - obj, reloc.target_handle, - (int) reloc.offset, (int) obj->size); + obj, reloc->target_handle, + (int) reloc->offset, (int) obj->size); drm_gem_object_unreference(target_obj); i915_gem_object_unpin(obj); return -EINVAL; } - if (reloc.offset & 3) { + if (reloc->offset & 3) { DRM_ERROR("Relocation not 4-byte aligned: " "obj %p target %d offset %d.\n", - obj, reloc.target_handle, - (int) reloc.offset); + obj, reloc->target_handle, + (int) reloc->offset); drm_gem_object_unreference(target_obj); i915_gem_object_unpin(obj); return -EINVAL; } - if (reloc.write_domain && target_obj->pending_write_domain && - reloc.write_domain != target_obj->pending_write_domain) { + if (reloc->write_domain & I915_GEM_DOMAIN_CPU || + reloc->read_domains & I915_GEM_DOMAIN_CPU) { + DRM_ERROR("reloc with read/write CPU domains: " + "obj %p target %d offset %d " + "read %08x write %08x", + obj, reloc->target_handle, + (int) reloc->offset, + reloc->read_domains, + reloc->write_domain); + drm_gem_object_unreference(target_obj); + i915_gem_object_unpin(obj); + return -EINVAL; + } + + if (reloc->write_domain && target_obj->pending_write_domain && + reloc->write_domain != target_obj->pending_write_domain) { DRM_ERROR("Write domain conflict: " "obj %p target %d offset %d " "new %08x old %08x\n", - obj, reloc.target_handle, - (int) reloc.offset, - reloc.write_domain, + obj, reloc->target_handle, + (int) reloc->offset, + reloc->write_domain, target_obj->pending_write_domain); drm_gem_object_unreference(target_obj); i915_gem_object_unpin(obj); @@ -1578,91 +2947,60 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, "presumed %08x delta %08x\n", __func__, obj, - (int) reloc.offset, - (int) reloc.target_handle, - (int) reloc.read_domains, - (int) reloc.write_domain, + (int) reloc->offset, + (int) reloc->target_handle, + (int) reloc->read_domains, + (int) reloc->write_domain, (int) target_obj_priv->gtt_offset, - (int) reloc.presumed_offset, - reloc.delta); + (int) reloc->presumed_offset, + reloc->delta); #endif - target_obj->pending_read_domains |= reloc.read_domains; - target_obj->pending_write_domain |= reloc.write_domain; + target_obj->pending_read_domains |= reloc->read_domains; + target_obj->pending_write_domain |= reloc->write_domain; /* If the relocation already has the right value in it, no * more work needs to be done. */ - if (target_obj_priv->gtt_offset == reloc.presumed_offset) { + if (target_obj_priv->gtt_offset == reloc->presumed_offset) { drm_gem_object_unreference(target_obj); continue; } - /* Now that we're going to actually write some data in, - * make sure that any rendering using this buffer's contents - * is completed. - */ - i915_gem_object_wait_rendering(obj); - - /* As we're writing through the gtt, flush - * any CPU writes before we write the relocations - */ - if (obj->write_domain & I915_GEM_DOMAIN_CPU) { - i915_gem_clflush_object(obj); - drm_agp_chipset_flush(dev); - obj->write_domain = 0; + ret = i915_gem_object_set_to_gtt_domain(obj, 1); + if (ret != 0) { + drm_gem_object_unreference(target_obj); + i915_gem_object_unpin(obj); + return -EINVAL; } /* Map the page containing the relocation we're going to * perform. */ - reloc_offset = obj_priv->gtt_offset + reloc.offset; - if (reloc_page == NULL || - (last_reloc_offset & ~(PAGE_SIZE - 1)) != - (reloc_offset & ~(PAGE_SIZE - 1))) { - if (reloc_page != NULL) - iounmap(reloc_page); - - reloc_page = ioremap_wc(dev->agp->base + - (reloc_offset & - ~(PAGE_SIZE - 1)), - PAGE_SIZE); - last_reloc_offset = reloc_offset; - if (reloc_page == NULL) { - drm_gem_object_unreference(target_obj); - i915_gem_object_unpin(obj); - return -ENOMEM; - } - } - + reloc_offset = obj_priv->gtt_offset + reloc->offset; + reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping, + (reloc_offset & + ~(PAGE_SIZE - 1))); reloc_entry = (uint32_t __iomem *)(reloc_page + - (reloc_offset & (PAGE_SIZE - 1))); - reloc_val = target_obj_priv->gtt_offset + reloc.delta; + (reloc_offset & (PAGE_SIZE - 1))); + reloc_val = target_obj_priv->gtt_offset + reloc->delta; #if WATCH_BUF DRM_INFO("Applied relocation: %p@0x%08x %08x -> %08x\n", - obj, (unsigned int) reloc.offset, + obj, (unsigned int) reloc->offset, readl(reloc_entry), reloc_val); #endif writel(reloc_val, reloc_entry); + io_mapping_unmap_atomic(reloc_page); - /* Write the updated presumed offset for this entry back out - * to the user. + /* The updated presumed offset for this entry will be + * copied back out to the user. */ - reloc.presumed_offset = target_obj_priv->gtt_offset; - ret = copy_to_user(relocs + i, &reloc, sizeof(reloc)); - if (ret != 0) { - drm_gem_object_unreference(target_obj); - i915_gem_object_unpin(obj); - return ret; - } + reloc->presumed_offset = target_obj_priv->gtt_offset; drm_gem_object_unreference(target_obj); } - if (reloc_page != NULL) - iounmap(reloc_page); - #if WATCH_BUF if (0) i915_gem_dump_object(obj, 128, __func__, ~0); @@ -1675,11 +3013,10 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, static int i915_dispatch_gem_execbuffer(struct drm_device *dev, struct drm_i915_gem_execbuffer *exec, + struct drm_clip_rect *cliprects, uint64_t exec_offset) { drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_clip_rect __user *boxes = (struct drm_clip_rect __user *) - (uintptr_t) exec->cliprects_ptr; int nbox = exec->num_cliprects; int i = 0, count; uint32_t exec_start, exec_len; @@ -1700,7 +3037,7 @@ i915_dispatch_gem_execbuffer(struct drm_device *dev, for (i = 0; i < count; i++) { if (i < nbox) { - int ret = i915_emit_box(dev, boxes, i, + int ret = i915_emit_box(dev, cliprects, i, exec->DR1, exec->DR4); if (ret) return ret; @@ -1756,6 +3093,79 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file_priv) return ret; } +static int +i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object *exec_list, + uint32_t buffer_count, + struct drm_i915_gem_relocation_entry **relocs) +{ + uint32_t reloc_count = 0, reloc_index = 0, i; + int ret; + + *relocs = NULL; + for (i = 0; i < buffer_count; i++) { + if (reloc_count + exec_list[i].relocation_count < reloc_count) + return -EINVAL; + reloc_count += exec_list[i].relocation_count; + } + + *relocs = drm_calloc_large(reloc_count, sizeof(**relocs)); + if (*relocs == NULL) + return -ENOMEM; + + for (i = 0; i < buffer_count; i++) { + struct drm_i915_gem_relocation_entry __user *user_relocs; + + user_relocs = (void __user *)(uintptr_t)exec_list[i].relocs_ptr; + + ret = copy_from_user(&(*relocs)[reloc_index], + user_relocs, + exec_list[i].relocation_count * + sizeof(**relocs)); + if (ret != 0) { + drm_free_large(*relocs); + *relocs = NULL; + return -EFAULT; + } + + reloc_index += exec_list[i].relocation_count; + } + + return 0; +} + +static int +i915_gem_put_relocs_to_user(struct drm_i915_gem_exec_object *exec_list, + uint32_t buffer_count, + struct drm_i915_gem_relocation_entry *relocs) +{ + uint32_t reloc_count = 0, i; + int ret = 0; + + for (i = 0; i < buffer_count; i++) { + struct drm_i915_gem_relocation_entry __user *user_relocs; + int unwritten; + + user_relocs = (void __user *)(uintptr_t)exec_list[i].relocs_ptr; + + unwritten = copy_to_user(user_relocs, + &relocs[reloc_count], + exec_list[i].relocation_count * + sizeof(*relocs)); + + if (unwritten) { + ret = -EFAULT; + goto err; + } + + reloc_count += exec_list[i].relocation_count; + } + +err: + drm_free_large(relocs); + + return ret; +} + int i915_gem_execbuffer(struct drm_device *dev, void *data, struct drm_file *file_priv) @@ -1766,9 +3176,13 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, struct drm_i915_gem_exec_object *exec_list = NULL; struct drm_gem_object **object_list = NULL; struct drm_gem_object *batch_obj; - int ret, i, pinned = 0; + struct drm_i915_gem_object *obj_priv; + struct drm_clip_rect *cliprects = NULL; + struct drm_i915_gem_relocation_entry *relocs; + int ret, ret2, i, pinned = 0; uint64_t exec_offset; - uint32_t seqno, flush_domains; + uint32_t seqno, flush_domains, reloc_index; + int pin_tries; #if WATCH_EXEC DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n", @@ -1780,10 +3194,8 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, return -EINVAL; } /* Copy in the exec list from userland */ - exec_list = drm_calloc(sizeof(*exec_list), args->buffer_count, - DRM_MEM_DRIVER); - object_list = drm_calloc(sizeof(*object_list), args->buffer_count, - DRM_MEM_DRIVER); + exec_list = drm_calloc_large(sizeof(*exec_list), args->buffer_count); + object_list = drm_calloc_large(sizeof(*object_list), args->buffer_count); if (exec_list == NULL || object_list == NULL) { DRM_ERROR("Failed to allocate exec or object list " "for %d buffers\n", @@ -1801,6 +3213,28 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, goto pre_mutex_err; } + if (args->num_cliprects != 0) { + cliprects = drm_calloc(args->num_cliprects, sizeof(*cliprects), + DRM_MEM_DRIVER); + if (cliprects == NULL) + goto pre_mutex_err; + + ret = copy_from_user(cliprects, + (struct drm_clip_rect __user *) + (uintptr_t) args->cliprects_ptr, + sizeof(*cliprects) * args->num_cliprects); + if (ret != 0) { + DRM_ERROR("copy %d cliprects failed: %d\n", + args->num_cliprects, ret); + goto pre_mutex_err; + } + } + + ret = i915_gem_get_relocs_from_user(exec_list, args->buffer_count, + &relocs); + if (ret != 0) + goto pre_mutex_err; + mutex_lock(&dev->struct_mutex); i915_verify_inactive(dev, __FILE__, __LINE__); @@ -1808,23 +3242,18 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, if (dev_priv->mm.wedged) { DRM_ERROR("Execbuf while wedged\n"); mutex_unlock(&dev->struct_mutex); - return -EIO; + ret = -EIO; + goto pre_mutex_err; } if (dev_priv->mm.suspended) { DRM_ERROR("Execbuf while VT-switched.\n"); mutex_unlock(&dev->struct_mutex); - return -EBUSY; + ret = -EBUSY; + goto pre_mutex_err; } - /* Zero the gloabl flush/invalidate flags. These - * will be modified as each object is bound to the - * gtt - */ - dev->invalidate_domains = 0; - dev->flush_domains = 0; - - /* Look up object handles and perform the relocations */ + /* Look up object handles */ for (i = 0; i < args->buffer_count; i++) { object_list[i] = drm_gem_object_lookup(dev, file_priv, exec_list[i].handle); @@ -1835,16 +3264,53 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, goto err; } - object_list[i]->pending_read_domains = 0; - object_list[i]->pending_write_domain = 0; - ret = i915_gem_object_pin_and_relocate(object_list[i], - file_priv, - &exec_list[i]); - if (ret) { - DRM_ERROR("object bind and relocate failed %d\n", ret); + obj_priv = object_list[i]->driver_private; + if (obj_priv->in_execbuffer) { + DRM_ERROR("Object %p appears more than once in object list\n", + object_list[i]); + ret = -EBADF; + goto err; + } + obj_priv->in_execbuffer = true; + } + + /* Pin and relocate */ + for (pin_tries = 0; ; pin_tries++) { + ret = 0; + reloc_index = 0; + + for (i = 0; i < args->buffer_count; i++) { + object_list[i]->pending_read_domains = 0; + object_list[i]->pending_write_domain = 0; + ret = i915_gem_object_pin_and_relocate(object_list[i], + file_priv, + &exec_list[i], + &relocs[reloc_index]); + if (ret) + break; + pinned = i + 1; + reloc_index += exec_list[i].relocation_count; + } + /* success */ + if (ret == 0) + break; + + /* error other than GTT full, or we've already tried again */ + if (ret != -ENOMEM || pin_tries >= 1) { + if (ret != -ERESTARTSYS) + DRM_ERROR("Failed to pin buffers %d\n", ret); goto err; } - pinned = i + 1; + + /* unpin all of our buffers */ + for (i = 0; i < pinned; i++) + i915_gem_object_unpin(object_list[i]); + pinned = 0; + + /* evict everyone we can from the aperture */ + ret = i915_gem_evict_everything(dev); + if (ret) + goto err; } /* Set the pending read domains for the batch buffer to COMMAND */ @@ -1854,35 +3320,44 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, i915_verify_inactive(dev, __FILE__, __LINE__); + /* Zero the global flush/invalidate flags. These + * will be modified as new domains are computed + * for each object + */ + dev->invalidate_domains = 0; + dev->flush_domains = 0; + + for (i = 0; i < args->buffer_count; i++) { + struct drm_gem_object *obj = object_list[i]; + + /* Compute new gpu domains and update invalidate/flush */ + i915_gem_object_set_to_gpu_domain(obj); + } + + i915_verify_inactive(dev, __FILE__, __LINE__); + + if (dev->invalidate_domains | dev->flush_domains) { +#if WATCH_EXEC + DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n", + __func__, + dev->invalidate_domains, + dev->flush_domains); +#endif + i915_gem_flush(dev, + dev->invalidate_domains, + dev->flush_domains); + if (dev->flush_domains) + (void)i915_add_request(dev, dev->flush_domains); + } + for (i = 0; i < args->buffer_count; i++) { struct drm_gem_object *obj = object_list[i]; - struct drm_i915_gem_object *obj_priv = obj->driver_private; - - if (obj_priv->gtt_space == NULL) { - /* We evicted the buffer in the process of validating - * our set of buffers in. We could try to recover by - * kicking them everything out and trying again from - * the start. - */ - ret = -ENOMEM; - goto err; - } - /* make sure all previous memory operations have passed */ - ret = i915_gem_object_set_domain(obj, - obj->pending_read_domains, - obj->pending_write_domain); - if (ret) - goto err; + obj->write_domain = obj->pending_write_domain; } i915_verify_inactive(dev, __FILE__, __LINE__); - /* Flush/invalidate caches and chipset buffer */ - flush_domains = i915_gem_dev_set_domain(dev); - - i915_verify_inactive(dev, __FILE__, __LINE__); - #if WATCH_COHERENCY for (i = 0; i < args->buffer_count; i++) { i915_gem_object_check_coherency(object_list[i], @@ -1893,16 +3368,14 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, exec_offset = exec_list[args->buffer_count - 1].offset; #if WATCH_EXEC - i915_gem_dump_object(object_list[args->buffer_count - 1], + i915_gem_dump_object(batch_obj, args->batch_len, __func__, ~0); #endif - (void)i915_add_request(dev, flush_domains); - /* Exec the batchbuffer */ - ret = i915_dispatch_gem_execbuffer(dev, args, exec_offset); + ret = i915_dispatch_gem_execbuffer(dev, args, cliprects, exec_offset); if (ret) { DRM_ERROR("dispatch failed %d\n", ret); goto err; @@ -1928,10 +3401,8 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, i915_file_priv->mm.last_gem_seqno = seqno; for (i = 0; i < args->buffer_count; i++) { struct drm_gem_object *obj = object_list[i]; - struct drm_i915_gem_object *obj_priv = obj->driver_private; - i915_gem_object_move_to_active(obj); - obj_priv->last_rendering_seqno = seqno; + i915_gem_object_move_to_active(obj, seqno); #if WATCH_LRU DRM_INFO("%s: move to exec list %p\n", __func__, obj); #endif @@ -1942,29 +3413,52 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, i915_verify_inactive(dev, __FILE__, __LINE__); - /* Copy the new buffer offsets back to the user's exec list. */ - ret = copy_to_user((struct drm_i915_relocation_entry __user *) - (uintptr_t) args->buffers_ptr, - exec_list, - sizeof(*exec_list) * args->buffer_count); - if (ret) - DRM_ERROR("failed to copy %d exec entries " - "back to user (%d)\n", - args->buffer_count, ret); err: - if (object_list != NULL) { - for (i = 0; i < pinned; i++) - i915_gem_object_unpin(object_list[i]); + for (i = 0; i < pinned; i++) + i915_gem_object_unpin(object_list[i]); - for (i = 0; i < args->buffer_count; i++) - drm_gem_object_unreference(object_list[i]); + for (i = 0; i < args->buffer_count; i++) { + if (object_list[i]) { + obj_priv = object_list[i]->driver_private; + obj_priv->in_execbuffer = false; + } + drm_gem_object_unreference(object_list[i]); } + mutex_unlock(&dev->struct_mutex); + if (!ret) { + /* Copy the new buffer offsets back to the user's exec list. */ + ret = copy_to_user((struct drm_i915_relocation_entry __user *) + (uintptr_t) args->buffers_ptr, + exec_list, + sizeof(*exec_list) * args->buffer_count); + if (ret) { + ret = -EFAULT; + DRM_ERROR("failed to copy %d exec entries " + "back to user (%d)\n", + args->buffer_count, ret); + } + } + + /* Copy the updated relocations out regardless of current error + * state. Failure to update the relocs would mean that the next + * time userland calls execbuf, it would do so with presumed offset + * state that didn't match the actual object state. + */ + ret2 = i915_gem_put_relocs_to_user(exec_list, args->buffer_count, + relocs); + if (ret2 != 0) { + DRM_ERROR("Failed to copy relocations back out: %d\n", ret2); + + if (ret == 0) + ret = ret2; + } + pre_mutex_err: - drm_free(object_list, sizeof(*object_list) * args->buffer_count, - DRM_MEM_DRIVER); - drm_free(exec_list, sizeof(*exec_list) * args->buffer_count, + drm_free_large(object_list); + drm_free_large(exec_list); + drm_free(cliprects, sizeof(*cliprects) * args->num_cliprects, DRM_MEM_DRIVER); return ret; @@ -1981,7 +3475,23 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment) if (obj_priv->gtt_space == NULL) { ret = i915_gem_object_bind_to_gtt(obj, alignment); if (ret != 0) { - DRM_ERROR("Failure to bind: %d", ret); + if (ret != -EBUSY && ret != -ERESTARTSYS) + DRM_ERROR("Failure to bind: %d\n", ret); + return ret; + } + } + /* + * Pre-965 chips need a fence register set up in order to + * properly handle tiled surfaces. + */ + if (!IS_I965G(dev) && + obj_priv->fence_reg == I915_FENCE_REG_NONE && + obj_priv->tiling_mode != I915_TILING_NONE) { + ret = i915_gem_object_get_fence_reg(obj, true); + if (ret != 0) { + if (ret != -EBUSY && ret != -ERESTARTSYS) + DRM_ERROR("Failure to install fence: %d\n", + ret); return ret; } } @@ -2052,21 +3562,29 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data, } obj_priv = obj->driver_private; - ret = i915_gem_object_pin(obj, args->alignment); - if (ret != 0) { + if (obj_priv->pin_filp != NULL && obj_priv->pin_filp != file_priv) { + DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n", + args->handle); drm_gem_object_unreference(obj); mutex_unlock(&dev->struct_mutex); - return ret; + return -EINVAL; + } + + obj_priv->user_pin_count++; + obj_priv->pin_filp = file_priv; + if (obj_priv->user_pin_count == 1) { + ret = i915_gem_object_pin(obj, args->alignment); + if (ret != 0) { + drm_gem_object_unreference(obj); + mutex_unlock(&dev->struct_mutex); + return ret; + } } /* XXX - flush the CPU caches for pinned objects * as the X server doesn't manage domains yet */ - if (obj->write_domain & I915_GEM_DOMAIN_CPU) { - i915_gem_clflush_object(obj); - drm_agp_chipset_flush(dev); - obj->write_domain = 0; - } + i915_gem_object_flush_cpu_write_domain(obj); args->offset = obj_priv->gtt_offset; drm_gem_object_unreference(obj); mutex_unlock(&dev->struct_mutex); @@ -2080,6 +3598,7 @@ i915_gem_unpin_ioctl(struct drm_device *dev, void *data, { struct drm_i915_gem_pin *args = data; struct drm_gem_object *obj; + struct drm_i915_gem_object *obj_priv; mutex_lock(&dev->struct_mutex); @@ -2091,7 +3610,19 @@ i915_gem_unpin_ioctl(struct drm_device *dev, void *data, return -EBADF; } - i915_gem_object_unpin(obj); + obj_priv = obj->driver_private; + if (obj_priv->pin_filp != file_priv) { + DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n", + args->handle); + drm_gem_object_unreference(obj); + mutex_unlock(&dev->struct_mutex); + return -EINVAL; + } + obj_priv->user_pin_count--; + if (obj_priv->user_pin_count == 0) { + obj_priv->pin_filp = NULL; + i915_gem_object_unpin(obj); + } drm_gem_object_unreference(obj); mutex_unlock(&dev->struct_mutex); @@ -2115,8 +3646,22 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data, return -EBADF; } + /* Update the active list for the hardware's current position. + * Otherwise this only updates on a delayed timer or when irqs are + * actually unmasked, and our working set ends up being larger than + * required. + */ + i915_gem_retire_requests(dev); + obj_priv = obj->driver_private; - args->busy = obj_priv->active; + /* Don't count being on the flushing list against the object being + * done. Otherwise, a buffer left on the flushing list but not getting + * flushed (because nobody's flushing that domain) won't ever return + * unbusy and get reused by libdrm's bo cache. The other expected + * consumer of this interface, OpenGL's occlusion queries, also specs + * that the objects get unbusy "eventually" without any interference. + */ + args->busy = obj_priv->active && obj_priv->last_rendering_seqno != 0; drm_gem_object_unreference(obj); mutex_unlock(&dev->struct_mutex); @@ -2151,46 +3696,32 @@ int i915_gem_init_object(struct drm_gem_object *obj) obj->driver_private = obj_priv; obj_priv->obj = obj; + obj_priv->fence_reg = I915_FENCE_REG_NONE; INIT_LIST_HEAD(&obj_priv->list); + return 0; } void i915_gem_free_object(struct drm_gem_object *obj) { + struct drm_device *dev = obj->dev; struct drm_i915_gem_object *obj_priv = obj->driver_private; while (obj_priv->pin_count > 0) i915_gem_object_unpin(obj); + if (obj_priv->phys_obj) + i915_gem_detach_phys_object(dev, obj); + i915_gem_object_unbind(obj); + i915_gem_free_mmap_offset(obj); + drm_free(obj_priv->page_cpu_valid, 1, DRM_MEM_DRIVER); + kfree(obj_priv->bit_17); drm_free(obj->driver_private, 1, DRM_MEM_DRIVER); } -static int -i915_gem_set_domain(struct drm_gem_object *obj, - struct drm_file *file_priv, - uint32_t read_domains, - uint32_t write_domain) -{ - struct drm_device *dev = obj->dev; - int ret; - uint32_t flush_domains; - - BUG_ON(!mutex_is_locked(&dev->struct_mutex)); - - ret = i915_gem_object_set_domain(obj, read_domains, write_domain); - if (ret) - return ret; - flush_domains = i915_gem_dev_set_domain(obj->dev); - - if (flush_domains & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT)) - (void) i915_add_request(dev, flush_domains); - - return 0; -} - /** Unbinds all objects that are on the given buffer list. */ static int i915_gem_evict_from_list(struct drm_device *dev, struct list_head *head) @@ -2224,7 +3755,7 @@ i915_gem_evict_from_list(struct drm_device *dev, struct list_head *head) return 0; } -static int +int i915_gem_idle(struct drm_device *dev) { drm_i915_private_t *dev_priv = dev->dev_private; @@ -2255,8 +3786,7 @@ i915_gem_idle(struct drm_device *dev) */ i915_gem_flush(dev, ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT), ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT)); - seqno = i915_add_request(dev, ~(I915_GEM_DOMAIN_CPU | - I915_GEM_DOMAIN_GTT)); + seqno = i915_add_request(dev, ~I915_GEM_DOMAIN_CPU); if (seqno == 0) { mutex_unlock(&dev->struct_mutex); @@ -2285,29 +3815,54 @@ i915_gem_idle(struct drm_device *dev) i915_gem_retire_requests(dev); - /* Active and flushing should now be empty as we've - * waited for a sequence higher than any pending execbuffer - */ - BUG_ON(!list_empty(&dev_priv->mm.active_list)); - BUG_ON(!list_empty(&dev_priv->mm.flushing_list)); + spin_lock(&dev_priv->mm.active_list_lock); + if (!dev_priv->mm.wedged) { + /* Active and flushing should now be empty as we've + * waited for a sequence higher than any pending execbuffer + */ + WARN_ON(!list_empty(&dev_priv->mm.active_list)); + WARN_ON(!list_empty(&dev_priv->mm.flushing_list)); + /* Request should now be empty as we've also waited + * for the last request in the list + */ + WARN_ON(!list_empty(&dev_priv->mm.request_list)); + } - /* Request should now be empty as we've also waited - * for the last request in the list + /* Empty the active and flushing lists to inactive. If there's + * anything left at this point, it means that we're wedged and + * nothing good's going to happen by leaving them there. So strip + * the GPU domains and just stuff them onto inactive. */ - BUG_ON(!list_empty(&dev_priv->mm.request_list)); + while (!list_empty(&dev_priv->mm.active_list)) { + struct drm_i915_gem_object *obj_priv; + + obj_priv = list_first_entry(&dev_priv->mm.active_list, + struct drm_i915_gem_object, + list); + obj_priv->obj->write_domain &= ~I915_GEM_GPU_DOMAINS; + i915_gem_object_move_to_inactive(obj_priv->obj); + } + spin_unlock(&dev_priv->mm.active_list_lock); - /* Move all buffers out of the GTT. */ + while (!list_empty(&dev_priv->mm.flushing_list)) { + struct drm_i915_gem_object *obj_priv; + + obj_priv = list_first_entry(&dev_priv->mm.flushing_list, + struct drm_i915_gem_object, + list); + obj_priv->obj->write_domain &= ~I915_GEM_GPU_DOMAINS; + i915_gem_object_move_to_inactive(obj_priv->obj); + } + + + /* Move all inactive buffers out of the GTT. */ ret = i915_gem_evict_from_list(dev, &dev_priv->mm.inactive_list); + WARN_ON(!list_empty(&dev_priv->mm.inactive_list)); if (ret) { mutex_unlock(&dev->struct_mutex); return ret; } - BUG_ON(!list_empty(&dev_priv->mm.active_list)); - BUG_ON(!list_empty(&dev_priv->mm.flushing_list)); - BUG_ON(!list_empty(&dev_priv->mm.inactive_list)); - BUG_ON(!list_empty(&dev_priv->mm.request_list)); - i915_gem_cleanup_ringbuffer(dev); mutex_unlock(&dev->struct_mutex); @@ -2344,10 +3899,11 @@ i915_gem_init_hws(struct drm_device *dev) dev_priv->status_gfx_addr = obj_priv->gtt_offset; - dev_priv->hw_status_page = kmap(obj_priv->page_list[0]); + dev_priv->hw_status_page = kmap(obj_priv->pages[0]); if (dev_priv->hw_status_page == NULL) { DRM_ERROR("Failed to map status page.\n"); memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); + i915_gem_object_unpin(obj); drm_gem_object_unreference(obj); return -EINVAL; } @@ -2360,12 +3916,38 @@ i915_gem_init_hws(struct drm_device *dev) return 0; } -static int +static void +i915_gem_cleanup_hws(struct drm_device *dev) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_gem_object *obj; + struct drm_i915_gem_object *obj_priv; + + if (dev_priv->hws_obj == NULL) + return; + + obj = dev_priv->hws_obj; + obj_priv = obj->driver_private; + + kunmap(obj_priv->pages[0]); + i915_gem_object_unpin(obj); + drm_gem_object_unreference(obj); + dev_priv->hws_obj = NULL; + + memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); + dev_priv->hw_status_page = NULL; + + /* Write high address into HWS_PGA when disabling. */ + I915_WRITE(HWS_PGA, 0x1ffff000); +} + +int i915_gem_init_ringbuffer(struct drm_device *dev) { drm_i915_private_t *dev_priv = dev->dev_private; struct drm_gem_object *obj; struct drm_i915_gem_object *obj_priv; + drm_i915_ring_buffer_t *ring = &dev_priv->ring; int ret; u32 head; @@ -2376,6 +3958,7 @@ i915_gem_init_ringbuffer(struct drm_device *dev) obj = drm_gem_object_alloc(dev, 128 * 1024); if (obj == NULL) { DRM_ERROR("Failed to allocate ringbuffer\n"); + i915_gem_cleanup_hws(dev); return -ENOMEM; } obj_priv = obj->driver_private; @@ -2383,28 +3966,31 @@ i915_gem_init_ringbuffer(struct drm_device *dev) ret = i915_gem_object_pin(obj, 4096); if (ret != 0) { drm_gem_object_unreference(obj); + i915_gem_cleanup_hws(dev); return ret; } /* Set up the kernel mapping for the ring. */ - dev_priv->ring.Size = obj->size; - dev_priv->ring.tail_mask = obj->size - 1; + ring->Size = obj->size; + ring->tail_mask = obj->size - 1; - dev_priv->ring.map.offset = dev->agp->base + obj_priv->gtt_offset; - dev_priv->ring.map.size = obj->size; - dev_priv->ring.map.type = 0; - dev_priv->ring.map.flags = 0; - dev_priv->ring.map.mtrr = 0; + ring->map.offset = dev->agp->base + obj_priv->gtt_offset; + ring->map.size = obj->size; + ring->map.type = 0; + ring->map.flags = 0; + ring->map.mtrr = 0; - drm_core_ioremap_wc(&dev_priv->ring.map, dev); - if (dev_priv->ring.map.handle == NULL) { + drm_core_ioremap_wc(&ring->map, dev); + if (ring->map.handle == NULL) { DRM_ERROR("Failed to map ringbuffer.\n"); memset(&dev_priv->ring, 0, sizeof(dev_priv->ring)); + i915_gem_object_unpin(obj); drm_gem_object_unreference(obj); + i915_gem_cleanup_hws(dev); return -EINVAL; } - dev_priv->ring.ring_obj = obj; - dev_priv->ring.virtual_start = dev_priv->ring.map.handle; + ring->ring_obj = obj; + ring->virtual_start = ring->map.handle; /* Stop the ring if it's running. */ I915_WRITE(PRB0_CTL, 0); @@ -2452,12 +4038,20 @@ i915_gem_init_ringbuffer(struct drm_device *dev) } /* Update our cache of the ring state */ - i915_kernel_lost_context(dev); + if (!drm_core_check_feature(dev, DRIVER_MODESET)) + i915_kernel_lost_context(dev); + else { + ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR; + ring->tail = I915_READ(PRB0_TAIL) & TAIL_ADDR; + ring->space = ring->head - (ring->tail + 8); + if (ring->space < 0) + ring->space += ring->Size; + } return 0; } -static void +void i915_gem_cleanup_ringbuffer(struct drm_device *dev) { drm_i915_private_t *dev_priv = dev->dev_private; @@ -2472,20 +4066,7 @@ i915_gem_cleanup_ringbuffer(struct drm_device *dev) dev_priv->ring.ring_obj = NULL; memset(&dev_priv->ring, 0, sizeof(dev_priv->ring)); - if (dev_priv->hws_obj != NULL) { - struct drm_gem_object *obj = dev_priv->hws_obj; - struct drm_i915_gem_object *obj_priv = obj->driver_private; - - kunmap(obj_priv->page_list[0]); - i915_gem_object_unpin(obj); - drm_gem_object_unreference(obj); - dev_priv->hws_obj = NULL; - memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); - dev_priv->hw_status_page = NULL; - - /* Write high address into HWS_PGA when disabling. */ - I915_WRITE(HWS_PGA, 0x1ffff000); - } + i915_gem_cleanup_hws(dev); } int @@ -2495,21 +4076,30 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data, drm_i915_private_t *dev_priv = dev->dev_private; int ret; + if (drm_core_check_feature(dev, DRIVER_MODESET)) + return 0; + if (dev_priv->mm.wedged) { DRM_ERROR("Reenabling wedged hardware, good luck\n"); dev_priv->mm.wedged = 0; } + mutex_lock(&dev->struct_mutex); + dev_priv->mm.suspended = 0; + ret = i915_gem_init_ringbuffer(dev); - if (ret != 0) + if (ret != 0) { + mutex_unlock(&dev->struct_mutex); return ret; + } - mutex_lock(&dev->struct_mutex); + spin_lock(&dev_priv->mm.active_list_lock); BUG_ON(!list_empty(&dev_priv->mm.active_list)); + spin_unlock(&dev_priv->mm.active_list_lock); + BUG_ON(!list_empty(&dev_priv->mm.flushing_list)); BUG_ON(!list_empty(&dev_priv->mm.inactive_list)); BUG_ON(!list_empty(&dev_priv->mm.request_list)); - dev_priv->mm.suspended = 0; mutex_unlock(&dev->struct_mutex); drm_irq_install(dev); @@ -2523,6 +4113,9 @@ i915_gem_leavevt_ioctl(struct drm_device *dev, void *data, { int ret; + if (drm_core_check_feature(dev, DRIVER_MODESET)) + return 0; + ret = i915_gem_idle(dev); drm_irq_uninstall(dev); @@ -2534,6 +4127,9 @@ i915_gem_lastclose(struct drm_device *dev) { int ret; + if (drm_core_check_feature(dev, DRIVER_MODESET)) + return; + ret = i915_gem_idle(dev); if (ret) DRM_ERROR("failed to idle hardware: %d\n", ret); @@ -2544,15 +4140,199 @@ i915_gem_load(struct drm_device *dev) { drm_i915_private_t *dev_priv = dev->dev_private; + spin_lock_init(&dev_priv->mm.active_list_lock); INIT_LIST_HEAD(&dev_priv->mm.active_list); INIT_LIST_HEAD(&dev_priv->mm.flushing_list); INIT_LIST_HEAD(&dev_priv->mm.inactive_list); INIT_LIST_HEAD(&dev_priv->mm.request_list); INIT_DELAYED_WORK(&dev_priv->mm.retire_work, i915_gem_retire_work_handler); - INIT_WORK(&dev_priv->mm.vblank_work, - i915_gem_vblank_work_handler); dev_priv->mm.next_gem_seqno = 1; + /* Old X drivers will take 0-2 for front, back, depth buffers */ + dev_priv->fence_reg_start = 3; + + if (IS_I965G(dev) || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) + dev_priv->num_fence_regs = 16; + else + dev_priv->num_fence_regs = 8; + i915_gem_detect_bit_6_swizzle(dev); } + +/* + * Create a physically contiguous memory object for this object + * e.g. for cursor + overlay regs + */ +int i915_gem_init_phys_object(struct drm_device *dev, + int id, int size) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_gem_phys_object *phys_obj; + int ret; + + if (dev_priv->mm.phys_objs[id - 1] || !size) + return 0; + + phys_obj = drm_calloc(1, sizeof(struct drm_i915_gem_phys_object), DRM_MEM_DRIVER); + if (!phys_obj) + return -ENOMEM; + + phys_obj->id = id; + + phys_obj->handle = drm_pci_alloc(dev, size, 0, 0xffffffff); + if (!phys_obj->handle) { + ret = -ENOMEM; + goto kfree_obj; + } +#ifdef CONFIG_X86 + set_memory_wc((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE); +#endif + + dev_priv->mm.phys_objs[id - 1] = phys_obj; + + return 0; +kfree_obj: + drm_free(phys_obj, sizeof(struct drm_i915_gem_phys_object), DRM_MEM_DRIVER); + return ret; +} + +void i915_gem_free_phys_object(struct drm_device *dev, int id) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_gem_phys_object *phys_obj; + + if (!dev_priv->mm.phys_objs[id - 1]) + return; + + phys_obj = dev_priv->mm.phys_objs[id - 1]; + if (phys_obj->cur_obj) { + i915_gem_detach_phys_object(dev, phys_obj->cur_obj); + } + +#ifdef CONFIG_X86 + set_memory_wb((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE); +#endif + drm_pci_free(dev, phys_obj->handle); + kfree(phys_obj); + dev_priv->mm.phys_objs[id - 1] = NULL; +} + +void i915_gem_free_all_phys_object(struct drm_device *dev) +{ + int i; + + for (i = I915_GEM_PHYS_CURSOR_0; i <= I915_MAX_PHYS_OBJECT; i++) + i915_gem_free_phys_object(dev, i); +} + +void i915_gem_detach_phys_object(struct drm_device *dev, + struct drm_gem_object *obj) +{ + struct drm_i915_gem_object *obj_priv; + int i; + int ret; + int page_count; + + obj_priv = obj->driver_private; + if (!obj_priv->phys_obj) + return; + + ret = i915_gem_object_get_pages(obj); + if (ret) + goto out; + + page_count = obj->size / PAGE_SIZE; + + for (i = 0; i < page_count; i++) { + char *dst = kmap_atomic(obj_priv->pages[i], KM_USER0); + char *src = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE); + + memcpy(dst, src, PAGE_SIZE); + kunmap_atomic(dst, KM_USER0); + } + drm_clflush_pages(obj_priv->pages, page_count); + drm_agp_chipset_flush(dev); +out: + obj_priv->phys_obj->cur_obj = NULL; + obj_priv->phys_obj = NULL; +} + +int +i915_gem_attach_phys_object(struct drm_device *dev, + struct drm_gem_object *obj, int id) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_gem_object *obj_priv; + int ret = 0; + int page_count; + int i; + + if (id > I915_MAX_PHYS_OBJECT) + return -EINVAL; + + obj_priv = obj->driver_private; + + if (obj_priv->phys_obj) { + if (obj_priv->phys_obj->id == id) + return 0; + i915_gem_detach_phys_object(dev, obj); + } + + + /* create a new object */ + if (!dev_priv->mm.phys_objs[id - 1]) { + ret = i915_gem_init_phys_object(dev, id, + obj->size); + if (ret) { + DRM_ERROR("failed to init phys object %d size: %zu\n", id, obj->size); + goto out; + } + } + + /* bind to the object */ + obj_priv->phys_obj = dev_priv->mm.phys_objs[id - 1]; + obj_priv->phys_obj->cur_obj = obj; + + ret = i915_gem_object_get_pages(obj); + if (ret) { + DRM_ERROR("failed to get page list\n"); + goto out; + } + + page_count = obj->size / PAGE_SIZE; + + for (i = 0; i < page_count; i++) { + char *src = kmap_atomic(obj_priv->pages[i], KM_USER0); + char *dst = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE); + + memcpy(dst, src, PAGE_SIZE); + kunmap_atomic(src, KM_USER0); + } + + return 0; +out: + return ret; +} + +static int +i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj, + struct drm_i915_gem_pwrite *args, + struct drm_file *file_priv) +{ + struct drm_i915_gem_object *obj_priv = obj->driver_private; + void *obj_addr; + int ret; + char __user *user_data; + + user_data = (char __user *) (uintptr_t) args->data_ptr; + obj_addr = obj_priv->phys_obj->handle->vaddr + args->offset; + + DRM_DEBUG("obj_addr %p, %lld\n", obj_addr, args->size); + ret = copy_from_user(obj_addr, user_data, args->size); + if (ret) + return -EFAULT; + + drm_agp_chipset_flush(dev); + return 0; +}