agp/intel-agp: Set dma_mask for capable chipsets before agp_add_bridge()
[safe/jmp/linux-2.6] / drivers / char / agp / intel-agp.c
index 148d7e3..10e1f03 100644 (file)
@@ -46,6 +46,8 @@
 #define PCI_DEVICE_ID_INTEL_Q35_IG          0x29B2
 #define PCI_DEVICE_ID_INTEL_Q33_HB          0x29D0
 #define PCI_DEVICE_ID_INTEL_Q33_IG          0x29D2
+#define PCI_DEVICE_ID_INTEL_B43_HB          0x2E40
+#define PCI_DEVICE_ID_INTEL_B43_IG          0x2E42
 #define PCI_DEVICE_ID_INTEL_GM45_HB         0x2A40
 #define PCI_DEVICE_ID_INTEL_GM45_IG         0x2A42
 #define PCI_DEVICE_ID_INTEL_IGD_E_HB        0x2E00
@@ -59,6 +61,7 @@
 #define PCI_DEVICE_ID_INTEL_IGDNG_D_HB     0x0040
 #define PCI_DEVICE_ID_INTEL_IGDNG_D_IG     0x0042
 #define PCI_DEVICE_ID_INTEL_IGDNG_M_HB     0x0044
+#define PCI_DEVICE_ID_INTEL_IGDNG_MA_HB            0x0062
 #define PCI_DEVICE_ID_INTEL_IGDNG_M_IG     0x0046
 
 /* cover 915 and 945 variants */
                agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G45_HB || \
                agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_GM45_HB || \
                agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G41_HB || \
+               agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_B43_HB || \
                agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDNG_D_HB || \
-               agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDNG_M_HB)
+               agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDNG_M_HB || \
+               agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDNG_MA_HB)
 
 extern int agp_memory_reserved;
 
@@ -196,41 +201,39 @@ static void intel_agp_unmap_page(struct page *page, dma_addr_t dma)
                       PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
 }
 
+static void intel_agp_free_sglist(struct agp_memory *mem)
+{
+       struct sg_table st;
+
+       st.sgl = mem->sg_list;
+       st.orig_nents = st.nents = mem->page_count;
+
+       sg_free_table(&st);
+
+       mem->sg_list = NULL;
+       mem->num_sg = 0;
+}
+
 static int intel_agp_map_memory(struct agp_memory *mem)
 {
+       struct sg_table st;
        struct scatterlist *sg;
        int i;
 
        DBG("try mapping %lu pages\n", (unsigned long)mem->page_count);
 
-       if ((mem->page_count * sizeof(*mem->sg_list)) < 2*PAGE_SIZE)
-               mem->sg_list = kcalloc(mem->page_count, sizeof(*mem->sg_list),
-                                      GFP_KERNEL);
-
-       if (mem->sg_list == NULL) {
-               mem->sg_list = vmalloc(mem->page_count * sizeof(*mem->sg_list));
-               mem->sg_vmalloc_flag = 1;
-       }
-
-       if (!mem->sg_list) {
-               mem->sg_vmalloc_flag = 0;
+       if (sg_alloc_table(&st, mem->page_count, GFP_KERNEL))
                return -ENOMEM;
-       }
-       sg_init_table(mem->sg_list, mem->page_count);
 
-       sg = mem->sg_list;
+       mem->sg_list = sg = st.sgl;
+
        for (i = 0 ; i < mem->page_count; i++, sg = sg_next(sg))
                sg_set_page(sg, mem->pages[i], PAGE_SIZE, 0);
 
        mem->num_sg = pci_map_sg(intel_private.pcidev, mem->sg_list,
                                 mem->page_count, PCI_DMA_BIDIRECTIONAL);
-       if (!mem->num_sg) {
-               if (mem->sg_vmalloc_flag)
-                       vfree(mem->sg_list);
-               else
-                       kfree(mem->sg_list);
-               mem->sg_list = NULL;
-               mem->sg_vmalloc_flag = 0;
+       if (unlikely(!mem->num_sg)) {
+               intel_agp_free_sglist(mem);
                return -ENOMEM;
        }
        return 0;
@@ -242,13 +245,7 @@ static void intel_agp_unmap_memory(struct agp_memory *mem)
 
        pci_unmap_sg(intel_private.pcidev, mem->sg_list,
                     mem->page_count, PCI_DMA_BIDIRECTIONAL);
-       if (mem->sg_vmalloc_flag)
-               vfree(mem->sg_list);
-       else
-               kfree(mem->sg_list);
-       mem->sg_vmalloc_flag = 0;
-       mem->sg_list = NULL;
-       mem->num_sg = 0;
+       intel_agp_free_sglist(mem);
 }
 
 static void intel_agp_insert_sg_entries(struct agp_memory *mem,
@@ -296,7 +293,7 @@ static void intel_agp_insert_sg_entries(struct agp_memory *mem,
 
        for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
                writel(agp_bridge->driver->mask_memory(agp_bridge,
-                              phys_to_gart(page_to_phys(mem->pages[i])), mask_type),
+                               page_to_phys(mem->pages[i]), mask_type),
                       intel_private.gtt+j);
        }
 
@@ -478,8 +475,7 @@ static int intel_i810_insert_entries(struct agp_memory *mem, off_t pg_start,
                        global_cache_flush();
                for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
                        writel(agp_bridge->driver->mask_memory(agp_bridge,
-                                                              phys_to_gart(page_to_phys(mem->pages[i])),
-                                                              mask_type),
+                                       page_to_phys(mem->pages[i]), mask_type),
                               intel_private.registers+I810_PTE_BASE+(j*4));
                }
                readl(intel_private.registers+I810_PTE_BASE+((j-1)*4));
@@ -811,23 +807,39 @@ static void intel_i830_setup_flush(void)
        if (!intel_private.i8xx_page)
                return;
 
-       /* make page uncached */
-       map_page_into_agp(intel_private.i8xx_page);
-
        intel_private.i8xx_flush_page = kmap(intel_private.i8xx_page);
        if (!intel_private.i8xx_flush_page)
                intel_i830_fini_flush();
 }
 
+static void
+do_wbinvd(void *null)
+{
+       wbinvd();
+}
+
+/* The chipset_flush interface needs to get data that has already been
+ * flushed out of the CPU all the way out to main memory, because the GPU
+ * doesn't snoop those buffers.
+ *
+ * The 8xx series doesn't have the same lovely interface for flushing the
+ * chipset write buffers that the later chips do. According to the 865
+ * specs, it's 64 octwords, or 1KB.  So, to get those previous things in
+ * that buffer out, we just fill 1KB and clflush it out, on the assumption
+ * that it'll push whatever was in there out.  It appears to work.
+ */
 static void intel_i830_chipset_flush(struct agp_bridge_data *bridge)
 {
        unsigned int *pg = intel_private.i8xx_flush_page;
-       int i;
 
-       for (i = 0; i < 256; i += 2)
-               *(pg + i) = i;
+       memset(pg, 0, 1024);
 
-       wmb();
+       if (cpu_has_clflush) {
+               clflush_cache_range(pg, 1024);
+       } else {
+               if (on_each_cpu(do_wbinvd, NULL, 1) != 0)
+                       printk(KERN_ERR "Timed out waiting for cache flush.\n");
+       }
 }
 
 /* The intel i830 automatically initializes the agp aperture during POST.
@@ -985,7 +997,7 @@ static int intel_i830_insert_entries(struct agp_memory *mem, off_t pg_start,
 
        for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
                writel(agp_bridge->driver->mask_memory(agp_bridge,
-                              phys_to_gart(page_to_phys(mem->pages[i])), mask_type),
+                               page_to_phys(mem->pages[i]), mask_type),
                       intel_private.registers+I810_PTE_BASE+(j*4));
        }
        readl(intel_private.registers+I810_PTE_BASE+((j-1)*4));
@@ -1342,8 +1354,10 @@ static void intel_i965_get_gtt_range(int *gtt_offset, int *gtt_size)
        case PCI_DEVICE_ID_INTEL_Q45_HB:
        case PCI_DEVICE_ID_INTEL_G45_HB:
        case PCI_DEVICE_ID_INTEL_G41_HB:
+       case PCI_DEVICE_ID_INTEL_B43_HB:
        case PCI_DEVICE_ID_INTEL_IGDNG_D_HB:
        case PCI_DEVICE_ID_INTEL_IGDNG_M_HB:
+       case PCI_DEVICE_ID_INTEL_IGDNG_MA_HB:
                *gtt_offset = *gtt_size = MB(2);
                break;
        default:
@@ -2335,12 +2349,16 @@ static const struct intel_driver_description {
            "Q45/Q43", NULL, &intel_i965_driver },
        { PCI_DEVICE_ID_INTEL_G45_HB, PCI_DEVICE_ID_INTEL_G45_IG, 0,
            "G45/G43", NULL, &intel_i965_driver },
+       { PCI_DEVICE_ID_INTEL_B43_HB, PCI_DEVICE_ID_INTEL_B43_IG, 0,
+           "B43", NULL, &intel_i965_driver },
        { PCI_DEVICE_ID_INTEL_G41_HB, PCI_DEVICE_ID_INTEL_G41_IG, 0,
            "G41", NULL, &intel_i965_driver },
        { PCI_DEVICE_ID_INTEL_IGDNG_D_HB, PCI_DEVICE_ID_INTEL_IGDNG_D_IG, 0,
            "IGDNG/D", NULL, &intel_i965_driver },
        { PCI_DEVICE_ID_INTEL_IGDNG_M_HB, PCI_DEVICE_ID_INTEL_IGDNG_M_IG, 0,
            "IGDNG/M", NULL, &intel_i965_driver },
+       { PCI_DEVICE_ID_INTEL_IGDNG_MA_HB, PCI_DEVICE_ID_INTEL_IGDNG_M_IG, 0,
+           "IGDNG/MA", NULL, &intel_i965_driver },
        { 0, 0, 0, NULL, NULL, NULL }
 };
 
@@ -2432,6 +2450,11 @@ static int __devinit agp_intel_probe(struct pci_dev *pdev,
                                &bridge->mode);
        }
 
+       if (bridge->driver->mask_memory == intel_i965_mask_memory)
+               if (pci_set_dma_mask(intel_private.pcidev, DMA_BIT_MASK(36)))
+                       dev_err(&intel_private.pcidev->dev,
+                               "set gfx device dma mask 36bit failed!\n");
+
        pci_set_drvdata(pdev, bridge);
        return agp_add_bridge(bridge);
 }
@@ -2454,15 +2477,6 @@ static int agp_intel_resume(struct pci_dev *pdev)
        struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
        int ret_val;
 
-       pci_restore_state(pdev);
-
-       /* We should restore our graphics device's config space,
-        * as host bridge (00:00) resumes before graphics device (02:00),
-        * then our access to its pci space can work right.
-        */
-       if (intel_private.pcidev)
-               pci_restore_state(intel_private.pcidev);
-
        if (bridge->driver == &intel_generic_driver)
                intel_configure();
        else if (bridge->driver == &intel_850_driver)
@@ -2542,8 +2556,10 @@ static struct pci_device_id agp_intel_pci_table[] = {
        ID(PCI_DEVICE_ID_INTEL_Q45_HB),
        ID(PCI_DEVICE_ID_INTEL_G45_HB),
        ID(PCI_DEVICE_ID_INTEL_G41_HB),
+       ID(PCI_DEVICE_ID_INTEL_B43_HB),
        ID(PCI_DEVICE_ID_INTEL_IGDNG_D_HB),
        ID(PCI_DEVICE_ID_INTEL_IGDNG_M_HB),
+       ID(PCI_DEVICE_ID_INTEL_IGDNG_MA_HB),
        { }
 };