on_each_cpu(): kill unused 'retry' parameter
[safe/jmp/linux-2.6] / drivers / char / agp / generic.c
index 5ff457b..eaa1a35 100644 (file)
@@ -37,6 +37,7 @@
 #include <linux/vmalloc.h>
 #include <linux/dma-mapping.h>
 #include <linux/mm.h>
+#include <linux/sched.h>
 #include <asm/io.h>
 #include <asm/cacheflush.h>
 #include <asm/pgtable.h>
@@ -51,28 +52,6 @@ int agp_memory_reserved;
  */
 EXPORT_SYMBOL_GPL(agp_memory_reserved);
 
-#if defined(CONFIG_X86)
-int map_page_into_agp(struct page *page)
-{
-       int i;
-       i = change_page_attr(page, 1, PAGE_KERNEL_NOCACHE);
-       /* Caller's responsibility to call global_flush_tlb() for
-        * performance reasons */
-       return i;
-}
-EXPORT_SYMBOL_GPL(map_page_into_agp);
-
-int unmap_page_from_agp(struct page *page)
-{
-       int i;
-       i = change_page_attr(page, 1, PAGE_KERNEL);
-       /* Caller's responsibility to call global_flush_tlb() for
-        * performance reasons */
-       return i;
-}
-EXPORT_SYMBOL_GPL(unmap_page_from_agp);
-#endif
-
 /*
  * Generic routines for handling agp_memory structures -
  * They use the basic page allocation routines to do the brunt of the work.
@@ -101,6 +80,70 @@ static int agp_get_key(void)
        return -1;
 }
 
+void agp_flush_chipset(struct agp_bridge_data *bridge)
+{
+       if (bridge->driver->chipset_flush)
+               bridge->driver->chipset_flush(bridge);
+}
+EXPORT_SYMBOL(agp_flush_chipset);
+
+/*
+ * Use kmalloc if possible for the page list. Otherwise fall back to
+ * vmalloc. This speeds things up and also saves memory for small AGP
+ * regions.
+ */
+
+void agp_alloc_page_array(size_t size, struct agp_memory *mem)
+{
+       mem->memory = NULL;
+       mem->vmalloc_flag = false;
+
+       if (size <= 2*PAGE_SIZE)
+               mem->memory = kmalloc(size, GFP_KERNEL | __GFP_NORETRY);
+       if (mem->memory == NULL) {
+               mem->memory = vmalloc(size);
+               mem->vmalloc_flag = true;
+       }
+}
+EXPORT_SYMBOL(agp_alloc_page_array);
+
+void agp_free_page_array(struct agp_memory *mem)
+{
+       if (mem->vmalloc_flag) {
+               vfree(mem->memory);
+       } else {
+               kfree(mem->memory);
+       }
+}
+EXPORT_SYMBOL(agp_free_page_array);
+
+
+static struct agp_memory *agp_create_user_memory(unsigned long num_agp_pages)
+{
+       struct agp_memory *new;
+       unsigned long alloc_size = num_agp_pages*sizeof(struct page *);
+
+       new = kzalloc(sizeof(struct agp_memory), GFP_KERNEL);
+       if (new == NULL)
+               return NULL;
+
+       new->key = agp_get_key();
+
+       if (new->key < 0) {
+               kfree(new);
+               return NULL;
+       }
+
+       agp_alloc_page_array(alloc_size, new);
+
+       if (new->memory == NULL) {
+               agp_free_key(new->key);
+               kfree(new);
+               return NULL;
+       }
+       new->num_scratch_pages = 0;
+       return new;
+}
 
 struct agp_memory *agp_create_memory(int scratch_pages)
 {
@@ -116,7 +159,8 @@ struct agp_memory *agp_create_memory(int scratch_pages)
                kfree(new);
                return NULL;
        }
-       new->memory = vmalloc(PAGE_SIZE * scratch_pages);
+
+       agp_alloc_page_array(PAGE_SIZE * scratch_pages, new);
 
        if (new->memory == NULL) {
                agp_free_key(new->key);
@@ -124,6 +168,7 @@ struct agp_memory *agp_create_memory(int scratch_pages)
                return NULL;
        }
        new->num_scratch_pages = scratch_pages;
+       new->type = AGP_NORMAL_MEMORY;
        return new;
 }
 EXPORT_SYMBOL(agp_create_memory);
@@ -143,21 +188,31 @@ void agp_free_memory(struct agp_memory *curr)
        if (curr == NULL)
                return;
 
-       if (curr->is_bound == TRUE)
+       if (curr->is_bound)
                agp_unbind_memory(curr);
 
+       if (curr->type >= AGP_USER_TYPES) {
+               agp_generic_free_by_type(curr);
+               return;
+       }
+
        if (curr->type != 0) {
                curr->bridge->driver->free_by_type(curr);
                return;
        }
        if (curr->page_count != 0) {
                for (i = 0; i < curr->page_count; i++) {
-                       curr->bridge->driver->agp_destroy_page(gart_to_virt(curr->memory[i]));
+                       curr->memory[i] = (unsigned long)gart_to_virt(curr->memory[i]);
+                       curr->bridge->driver->agp_destroy_page((void *)curr->memory[i],
+                                                              AGP_PAGE_DESTROY_UNMAP);
+               }
+               for (i = 0; i < curr->page_count; i++) {
+                       curr->bridge->driver->agp_destroy_page((void *)curr->memory[i],
+                                                              AGP_PAGE_DESTROY_FREE);
                }
-               flush_agp_mappings();
        }
        agp_free_key(curr->key);
-       vfree(curr->memory);
+       agp_free_page_array(curr);
        kfree(curr);
 }
 EXPORT_SYMBOL(agp_free_memory);
@@ -188,6 +243,13 @@ struct agp_memory *agp_allocate_memory(struct agp_bridge_data *bridge,
        if ((atomic_read(&bridge->current_memory_agp) + page_count) > bridge->max_memory_agp)
                return NULL;
 
+       if (type >= AGP_USER_TYPES) {
+               new = agp_generic_alloc_user(page_count, type);
+               if (new)
+                       new->bridge = bridge;
+               return new;
+       }
+
        if (type != 0) {
                new = bridge->driver->alloc_by_type(page_count, type);
                if (new)
@@ -214,8 +276,6 @@ struct agp_memory *agp_allocate_memory(struct agp_bridge_data *bridge,
        }
        new->bridge = bridge;
 
-       flush_agp_mappings();
-
        return new;
 }
 EXPORT_SYMBOL(agp_allocate_memory);
@@ -354,20 +414,20 @@ int agp_bind_memory(struct agp_memory *curr, off_t pg_start)
        if (curr == NULL)
                return -EINVAL;
 
-       if (curr->is_bound == TRUE) {
+       if (curr->is_bound) {
                printk(KERN_INFO PFX "memory %p is already bound!\n", curr);
                return -EINVAL;
        }
-       if (curr->is_flushed == FALSE) {
+       if (!curr->is_flushed) {
                curr->bridge->driver->cache_flush();
-               curr->is_flushed = TRUE;
+               curr->is_flushed = true;
        }
        ret_val = curr->bridge->driver->insert_memory(curr, pg_start, curr->type);
 
        if (ret_val != 0)
                return ret_val;
 
-       curr->is_bound = TRUE;
+       curr->is_bound = true;
        curr->pg_start = pg_start;
        return 0;
 }
@@ -389,7 +449,7 @@ int agp_unbind_memory(struct agp_memory *curr)
        if (curr == NULL)
                return -EINVAL;
 
-       if (curr->is_bound != TRUE) {
+       if (!curr->is_bound) {
                printk(KERN_INFO PFX "memory %p was not bound!\n", curr);
                return -EINVAL;
        }
@@ -399,7 +459,7 @@ int agp_unbind_memory(struct agp_memory *curr)
        if (ret_val != 0)
                return ret_val;
 
-       curr->is_bound = FALSE;
+       curr->is_bound = false;
        curr->pg_start = 0;
        return 0;
 }
@@ -419,6 +479,31 @@ static void agp_v2_parse_one(u32 *requested_mode, u32 *bridge_agpstat, u32 *vga_
                *requested_mode &= ~AGP2_RESERVED_MASK;
        }
 
+       /*
+        * Some dumb bridges are programmed to disobey the AGP2 spec.
+        * This is likely a BIOS misprogramming rather than poweron default, or
+        * it would be a lot more common.
+        * https://bugs.freedesktop.org/show_bug.cgi?id=8816
+        * AGPv2 spec 6.1.9 states:
+        *   The RATE field indicates the data transfer rates supported by this
+        *   device. A.G.P. devices must report all that apply.
+        * Fix them up as best we can.
+        */
+       switch (*bridge_agpstat & 7) {
+       case 4:
+               *bridge_agpstat |= (AGPSTAT2_2X | AGPSTAT2_1X);
+               printk(KERN_INFO PFX "BIOS bug. AGP bridge claims to only support x4 rate"
+                       "Fixing up support for x2 & x1\n");
+               break;
+       case 2:
+               *bridge_agpstat |= AGPSTAT2_1X;
+               printk(KERN_INFO PFX "BIOS bug. AGP bridge claims to only support x2 rate"
+                       "Fixing up support for x1\n");
+               break;
+       default:
+               break;
+       }
+
        /* Check the speed bits make sense. Only one should be set. */
        tmp = *requested_mode & 7;
        switch (tmp) {
@@ -672,7 +757,7 @@ u32 agp_collect_device_status(struct agp_bridge_data *bridge, u32 requested_mode
 EXPORT_SYMBOL(agp_collect_device_status);
 
 
-void agp_device_command(u32 bridge_agpstat, int agp_v3)
+void agp_device_command(u32 bridge_agpstat, bool agp_v3)
 {
        struct pci_dev *device = NULL;
        int mode;
@@ -736,7 +821,7 @@ void agp_generic_enable(struct agp_bridge_data *bridge, u32 requested_mode)
                        /* If we have 3.5, we can do the isoch stuff. */
                        if (bridge->minor_version >= 5)
                                agp_3_5_enable(bridge);
-                       agp_device_command(bridge_agpstat, TRUE);
+                       agp_device_command(bridge_agpstat, true);
                        return;
                } else {
                    /* Disable calibration cycle in RX91<1> when not in AGP3.0 mode of operation.*/
@@ -753,7 +838,7 @@ void agp_generic_enable(struct agp_bridge_data *bridge, u32 requested_mode)
        }
 
        /* AGP v<3 */
-       agp_device_command(bridge_agpstat, FALSE);
+       agp_device_command(bridge_agpstat, false);
 }
 EXPORT_SYMBOL(agp_generic_enable);
 
@@ -850,9 +935,14 @@ int agp_generic_create_gatt_table(struct agp_bridge_data *bridge)
        agp_gatt_table = (void *)table;
 
        bridge->driver->cache_flush();
+#ifdef CONFIG_X86
+       set_memory_uc((unsigned long)table, 1 << page_order);
+       bridge->gatt_table = (void *)table;
+#else
        bridge->gatt_table = ioremap_nocache(virt_to_gart(table),
                                        (PAGE_SIZE * (1 << page_order)));
        bridge->driver->cache_flush();
+#endif
 
        if (bridge->gatt_table == NULL) {
                for (page = virt_to_page(table); page <= virt_to_page(table_end); page++)
@@ -909,7 +999,11 @@ int agp_generic_free_gatt_table(struct agp_bridge_data *bridge)
         * called, then all agp memory is deallocated and removed
         * from the table. */
 
+#ifdef CONFIG_X86
+       set_memory_wb((unsigned long)bridge->gatt_table, 1 << page_order);
+#else
        iounmap(bridge->gatt_table);
+#endif
        table = (char *) bridge->gatt_table_real;
        table_end = table + ((PAGE_SIZE * (1 << page_order)) - 1);
 
@@ -935,11 +1029,15 @@ int agp_generic_insert_memory(struct agp_memory * mem, off_t pg_start, int type)
        off_t j;
        void *temp;
        struct agp_bridge_data *bridge;
+       int mask_type;
 
        bridge = mem->bridge;
        if (!bridge)
                return -EINVAL;
 
+       if (mem->page_count == 0)
+               return 0;
+
        temp = bridge->current_size;
 
        switch (bridge->driver->size_type) {
@@ -967,7 +1065,11 @@ int agp_generic_insert_memory(struct agp_memory * mem, off_t pg_start, int type)
        num_entries -= agp_memory_reserved/PAGE_SIZE;
        if (num_entries < 0) num_entries = 0;
 
-       if (type != 0 || mem->type != 0) {
+       if (type != mem->type)
+               return -EINVAL;
+
+       mask_type = bridge->driver->agp_type_to_mask_type(bridge, type);
+       if (mask_type != 0) {
                /* The generic routines know nothing of memory types */
                return -EINVAL;
        }
@@ -984,15 +1086,16 @@ int agp_generic_insert_memory(struct agp_memory * mem, off_t pg_start, int type)
                j++;
        }
 
-       if (mem->is_flushed == FALSE) {
+       if (!mem->is_flushed) {
                bridge->driver->cache_flush();
-               mem->is_flushed = TRUE;
+               mem->is_flushed = true;
        }
 
        for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
-               writel(bridge->driver->mask_memory(bridge, mem->memory[i], mem->type), bridge->gatt_table+j);
-               readl(bridge->gatt_table+j);    /* PCI Posting. */
+               writel(bridge->driver->mask_memory(bridge, mem->memory[i], mask_type),
+                      bridge->gatt_table+j);
        }
+       readl(bridge->gatt_table+j-1);  /* PCI Posting. */
 
        bridge->driver->tlb_flush(mem);
        return 0;
@@ -1004,12 +1107,20 @@ int agp_generic_remove_memory(struct agp_memory *mem, off_t pg_start, int type)
 {
        size_t i;
        struct agp_bridge_data *bridge;
+       int mask_type;
 
        bridge = mem->bridge;
        if (!bridge)
                return -EINVAL;
 
-       if (type != 0 || mem->type != 0) {
+       if (mem->page_count == 0)
+               return 0;
+
+       if (type != mem->type)
+               return -EINVAL;
+
+       mask_type = bridge->driver->agp_type_to_mask_type(bridge, type);
+       if (mask_type != 0) {
                /* The generic routines know nothing of memory types */
                return -EINVAL;
        }
@@ -1017,31 +1128,48 @@ int agp_generic_remove_memory(struct agp_memory *mem, off_t pg_start, int type)
        /* AK: bogus, should encode addresses > 4GB */
        for (i = pg_start; i < (mem->page_count + pg_start); i++) {
                writel(bridge->scratch_page, bridge->gatt_table+i);
-               readl(bridge->gatt_table+i);    /* PCI Posting. */
        }
+       readl(bridge->gatt_table+i-1);  /* PCI Posting. */
 
-       global_cache_flush();
        bridge->driver->tlb_flush(mem);
        return 0;
 }
 EXPORT_SYMBOL(agp_generic_remove_memory);
 
-
 struct agp_memory *agp_generic_alloc_by_type(size_t page_count, int type)
 {
        return NULL;
 }
 EXPORT_SYMBOL(agp_generic_alloc_by_type);
 
-
 void agp_generic_free_by_type(struct agp_memory *curr)
 {
-       vfree(curr->memory);
+       agp_free_page_array(curr);
        agp_free_key(curr->key);
        kfree(curr);
 }
 EXPORT_SYMBOL(agp_generic_free_by_type);
 
+struct agp_memory *agp_generic_alloc_user(size_t page_count, int type)
+{
+       struct agp_memory *new;
+       int i;
+       int pages;
+
+       pages = (page_count + ENTRIES_PER_PAGE - 1) / ENTRIES_PER_PAGE;
+       new = agp_create_user_memory(page_count);
+       if (new == NULL)
+               return NULL;
+
+       for (i = 0; i < page_count; i++)
+               new->memory[i] = 0;
+       new->page_count = 0;
+       new->type = type;
+       new->num_scratch_pages = pages;
+
+       return new;
+}
+EXPORT_SYMBOL(agp_generic_alloc_user);
 
 /*
  * Basic Page Allocation Routines -
@@ -1061,14 +1189,13 @@ void *agp_generic_alloc_page(struct agp_bridge_data *bridge)
        map_page_into_agp(page);
 
        get_page(page);
-       SetPageLocked(page);
        atomic_inc(&agp_bridge->current_memory_agp);
        return page_address(page);
 }
 EXPORT_SYMBOL(agp_generic_alloc_page);
 
 
-void agp_generic_destroy_page(void *addr)
+void agp_generic_destroy_page(void *addr, int flags)
 {
        struct page *page;
 
@@ -1076,11 +1203,14 @@ void agp_generic_destroy_page(void *addr)
                return;
 
        page = virt_to_page(addr);
-       unmap_page_from_agp(page);
-       put_page(page);
-       unlock_page(page);
-       free_page((unsigned long)addr);
-       atomic_dec(&agp_bridge->current_memory_agp);
+       if (flags & AGP_PAGE_DESTROY_UNMAP)
+               unmap_page_from_agp(page);
+
+       if (flags & AGP_PAGE_DESTROY_FREE) {
+               put_page(page);
+               free_page((unsigned long)addr);
+               atomic_dec(&agp_bridge->current_memory_agp);
+       }
 }
 EXPORT_SYMBOL(agp_generic_destroy_page);
 
@@ -1119,7 +1249,7 @@ static void ipi_handler(void *null)
 
 void global_cache_flush(void)
 {
-       if (on_each_cpu(ipi_handler, NULL, 1, 1) != 0)
+       if (on_each_cpu(ipi_handler, NULL, 1) != 0)
                panic(PFX "timed out waiting for the other CPUs!\n");
 }
 EXPORT_SYMBOL(global_cache_flush);
@@ -1135,6 +1265,15 @@ unsigned long agp_generic_mask_memory(struct agp_bridge_data *bridge,
 }
 EXPORT_SYMBOL(agp_generic_mask_memory);
 
+int agp_generic_type_to_mask_type(struct agp_bridge_data *bridge,
+                                 int type)
+{
+       if (type >= AGP_USER_TYPES)
+               return 0;
+       return type;
+}
+EXPORT_SYMBOL(agp_generic_type_to_mask_type);
+
 /*
  * These functions are implemented according to the AGPv3 spec,
  * which covers implementation details that had previously been
@@ -1201,7 +1340,7 @@ void agp3_generic_cleanup(void)
 }
 EXPORT_SYMBOL(agp3_generic_cleanup);
 
-struct aper_size_info_16 agp3_generic_sizes[AGP_GENERIC_SIZES_ENTRIES] =
+const struct aper_size_info_16 agp3_generic_sizes[AGP_GENERIC_SIZES_ENTRIES] =
 {
        {4096, 1048576, 10,0x000},
        {2048,  524288, 9, 0x800},