x86 gart: don't complain if no AMD GART found
[safe/jmp/linux-2.6] / arch / x86 / kernel / pci-gart_64.c
index d2b46b4..a35eaa3 100644 (file)
 #include <linux/bitops.h>
 #include <linux/kdebug.h>
 #include <linux/scatterlist.h>
+#include <linux/iommu-helper.h>
+#include <linux/sysdev.h>
+#include <linux/io.h>
 #include <asm/atomic.h>
-#include <asm/io.h>
 #include <asm/mtrr.h>
 #include <asm/pgtable.h>
 #include <asm/proto.h>
+#include <asm/iommu.h>
 #include <asm/gart.h>
 #include <asm/cacheflush.h>
 #include <asm/swiotlb.h>
@@ -64,9 +67,6 @@ static u32 gart_unmapped_entry;
        (((x) & 0xfffff000) | (((x) >> 32) << 4) | GPTE_VALID | GPTE_COHERENT)
 #define GPTE_DECODE(x) (((x) & 0xfffff000) | (((u64)(x) & 0xff0) << 28))
 
-#define to_pages(addr, size) \
-       (round_up(((addr) & ~PAGE_MASK) + (size), PAGE_SIZE) >> PAGE_SHIFT)
-
 #define EMERGENCY_PAGES 32 /* = 128KB */
 
 #ifdef CONFIG_AGP
@@ -80,30 +80,38 @@ AGPEXTERN int agp_memory_reserved;
 AGPEXTERN __u32 *agp_gatt_table;
 
 static unsigned long next_bit;  /* protected by iommu_bitmap_lock */
-static int need_flush;         /* global flush state. set for each gart wrap */
+static bool need_flush;                /* global flush state. set for each gart wrap */
 
-static unsigned long alloc_iommu(int size)
+static unsigned long alloc_iommu(struct device *dev, int size,
+                                unsigned long align_mask)
 {
        unsigned long offset, flags;
+       unsigned long boundary_size;
+       unsigned long base_index;
+
+       base_index = ALIGN(iommu_bus_base & dma_get_seg_boundary(dev),
+                          PAGE_SIZE) >> PAGE_SHIFT;
+       boundary_size = ALIGN((unsigned long long)dma_get_seg_boundary(dev) + 1,
+                             PAGE_SIZE) >> PAGE_SHIFT;
 
        spin_lock_irqsave(&iommu_bitmap_lock, flags);
-       offset = find_next_zero_string(iommu_gart_bitmap, next_bit,
-                                       iommu_pages, size);
+       offset = iommu_area_alloc(iommu_gart_bitmap, iommu_pages, next_bit,
+                                 size, base_index, boundary_size, align_mask);
        if (offset == -1) {
-               need_flush = 1;
-               offset = find_next_zero_string(iommu_gart_bitmap, 0,
-                                               iommu_pages, size);
+               need_flush = true;
+               offset = iommu_area_alloc(iommu_gart_bitmap, iommu_pages, 0,
+                                         size, base_index, boundary_size,
+                                         align_mask);
        }
        if (offset != -1) {
-               set_bit_string(iommu_gart_bitmap, offset, size);
                next_bit = offset+size;
                if (next_bit >= iommu_pages) {
                        next_bit = 0;
-                       need_flush = 1;
+                       need_flush = true;
                }
        }
        if (iommu_fullflush)
-               need_flush = 1;
+               need_flush = true;
        spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
 
        return offset;
@@ -114,7 +122,9 @@ static void free_iommu(unsigned long offset, int size)
        unsigned long flags;
 
        spin_lock_irqsave(&iommu_bitmap_lock, flags);
-       __clear_bit_string(iommu_gart_bitmap, offset, size);
+       iommu_area_free(iommu_gart_bitmap, offset, size);
+       if (offset >= next_bit)
+               next_bit = offset + size;
        spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
 }
 
@@ -128,7 +138,7 @@ static void flush_gart(void)
        spin_lock_irqsave(&iommu_bitmap_lock, flags);
        if (need_flush) {
                k8_flush_garts();
-               need_flush = 0;
+               need_flush = false;
        }
        spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
 }
@@ -167,7 +177,8 @@ static void dump_leak(void)
               iommu_leak_pages);
        for (i = 0; i < iommu_leak_pages; i += 2) {
                printk(KERN_DEBUG "%lu: ", iommu_pages-i);
-               printk_address((unsigned long) iommu_leak_tab[iommu_pages-i]);
+               printk_address((unsigned long) iommu_leak_tab[iommu_pages-i],
+                               0);
                printk(KERN_CONT "%c", (i+1)%2 == 0 ? '\n' : ' ');
        }
        printk(KERN_DEBUG "\n");
@@ -189,9 +200,7 @@ static void iommu_full(struct device *dev, size_t size, int dir)
         * out. Hopefully no network devices use single mappings that big.
         */
 
-       printk(KERN_ERR
-               "PCI-DMA: Out of IOMMU space for %lu bytes at device %s\n",
-               size, dev->bus_id);
+       dev_err(dev, "PCI-DMA: Out of IOMMU space for %lu bytes\n", size);
 
        if (size > PAGE_SIZE*EMERGENCY_PAGES) {
                if (dir == PCI_DMA_FROMDEVICE || dir == PCI_DMA_BIDIRECTIONAL)
@@ -208,34 +217,24 @@ static void iommu_full(struct device *dev, size_t size, int dir)
 static inline int
 need_iommu(struct device *dev, unsigned long addr, size_t size)
 {
-       u64 mask = *dev->dma_mask;
-       int high = addr + size > mask;
-       int mmu = high;
-
-       if (force_iommu)
-               mmu = 1;
-
-       return mmu;
+       return force_iommu ||
+               !is_buffer_dma_capable(*dev->dma_mask, addr, size);
 }
 
 static inline int
 nonforced_iommu(struct device *dev, unsigned long addr, size_t size)
 {
-       u64 mask = *dev->dma_mask;
-       int high = addr + size > mask;
-       int mmu = high;
-
-       return mmu;
+       return !is_buffer_dma_capable(*dev->dma_mask, addr, size);
 }
 
 /* Map a single continuous physical area into the IOMMU.
  * Caller needs to check if the iommu is needed and flush.
  */
 static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem,
-                               size_t size, int dir)
+                               size_t size, int dir, unsigned long align_mask)
 {
-       unsigned long npages = to_pages(phys_mem, size);
-       unsigned long iommu_page = alloc_iommu(npages);
+       unsigned long npages = iommu_num_pages(phys_mem, size, PAGE_SIZE);
+       unsigned long iommu_page = alloc_iommu(dev, npages, align_mask);
        int i;
 
        if (iommu_page == -1) {
@@ -255,30 +254,20 @@ static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem,
        return iommu_bus_base + iommu_page*PAGE_SIZE + (phys_mem & ~PAGE_MASK);
 }
 
-static dma_addr_t
-gart_map_simple(struct device *dev, char *buf, size_t size, int dir)
-{
-       dma_addr_t map = dma_map_area(dev, virt_to_bus(buf), size, dir);
-
-       flush_gart();
-
-       return map;
-}
-
 /* Map a single area into the IOMMU */
 static dma_addr_t
-gart_map_single(struct device *dev, void *addr, size_t size, int dir)
+gart_map_single(struct device *dev, phys_addr_t paddr, size_t size, int dir)
 {
-       unsigned long phys_mem, bus;
+       unsigned long bus;
 
        if (!dev)
-               dev = &fallback_dev;
+               dev = &x86_dma_fallback_dev;
 
-       phys_mem = virt_to_phys(addr);
-       if (!need_iommu(dev, phys_mem, size))
-               return phys_mem;
+       if (!need_iommu(dev, paddr, size))
+               return paddr;
 
-       bus = gart_map_simple(dev, addr, size, dir);
+       bus = dma_map_area(dev, paddr, size, dir, 0);
+       flush_gart();
 
        return bus;
 }
@@ -298,7 +287,7 @@ static void gart_unmap_single(struct device *dev, dma_addr_t dma_addr,
                return;
 
        iommu_page = (dma_addr - iommu_bus_base)>>PAGE_SHIFT;
-       npages = to_pages(dma_addr, size);
+       npages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
        for (i = 0; i < npages; i++) {
                iommu_gatt_base[iommu_page + i] = gart_unmapped_entry;
                CLEAR_LEAK(iommu_page + i);
@@ -337,7 +326,7 @@ static int dma_map_sg_nonforce(struct device *dev, struct scatterlist *sg,
                unsigned long addr = sg_phys(s);
 
                if (nonforced_iommu(dev, addr, s->length)) {
-                       addr = dma_map_area(dev, addr, s->length, dir);
+                       addr = dma_map_area(dev, addr, s->length, dir, 0);
                        if (addr == bad_dma_address) {
                                if (i > 0)
                                        gart_unmap_sg(dev, sg, i, dir);
@@ -355,10 +344,11 @@ static int dma_map_sg_nonforce(struct device *dev, struct scatterlist *sg,
 }
 
 /* Map multiple scatterlist entries continuous into the first. */
-static int __dma_map_cont(struct scatterlist *start, int nelems,
-                         struct scatterlist *sout, unsigned long pages)
+static int __dma_map_cont(struct device *dev, struct scatterlist *start,
+                         int nelems, struct scatterlist *sout,
+                         unsigned long pages)
 {
-       unsigned long iommu_start = alloc_iommu(pages);
+       unsigned long iommu_start = alloc_iommu(dev, pages, 0);
        unsigned long iommu_page = iommu_start;
        struct scatterlist *s;
        int i;
@@ -380,7 +370,7 @@ static int __dma_map_cont(struct scatterlist *start, int nelems,
                }
 
                addr = phys_addr;
-               pages = to_pages(s->offset, s->length);
+               pages = iommu_num_pages(s->offset, s->length, PAGE_SIZE);
                while (pages--) {
                        iommu_gatt_base[iommu_page] = GPTE_ENCODE(addr);
                        SET_LEAK(iommu_page);
@@ -394,8 +384,8 @@ static int __dma_map_cont(struct scatterlist *start, int nelems,
 }
 
 static inline int
-dma_map_cont(struct scatterlist *start, int nelems, struct scatterlist *sout,
-            unsigned long pages, int need)
+dma_map_cont(struct device *dev, struct scatterlist *start, int nelems,
+            struct scatterlist *sout, unsigned long pages, int need)
 {
        if (!need) {
                BUG_ON(nelems != 1);
@@ -403,7 +393,7 @@ dma_map_cont(struct scatterlist *start, int nelems, struct scatterlist *sout,
                sout->dma_length = start->length;
                return 0;
        }
-       return __dma_map_cont(start, nelems, sout, pages);
+       return __dma_map_cont(dev, start, nelems, sout, pages);
 }
 
 /*
@@ -416,16 +406,20 @@ gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, int dir)
        struct scatterlist *s, *ps, *start_sg, *sgmap;
        int need = 0, nextneed, i, out, start;
        unsigned long pages = 0;
+       unsigned int seg_size;
+       unsigned int max_seg_size;
 
        if (nents == 0)
                return 0;
 
        if (!dev)
-               dev = &fallback_dev;
+               dev = &x86_dma_fallback_dev;
 
        out = 0;
        start = 0;
        start_sg = sgmap = sg;
+       seg_size = 0;
+       max_seg_size = dma_get_max_seg_size(dev);
        ps = NULL; /* shut up gcc */
        for_each_sg(sg, s, nents, i) {
                dma_addr_t addr = sg_phys(s);
@@ -443,11 +437,13 @@ gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, int dir)
                         * offset.
                         */
                        if (!iommu_merge || !nextneed || !need || s->offset ||
+                           (s->length + seg_size > max_seg_size) ||
                            (ps->offset + ps->length) % PAGE_SIZE) {
-                               if (dma_map_cont(start_sg, i - start, sgmap,
-                                                 pages, need) < 0)
+                               if (dma_map_cont(dev, start_sg, i - start,
+                                                sgmap, pages, need) < 0)
                                        goto error;
                                out++;
+                               seg_size = 0;
                                sgmap = sg_next(sgmap);
                                pages = 0;
                                start = i;
@@ -455,11 +451,12 @@ gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, int dir)
                        }
                }
 
+               seg_size += s->length;
                need = nextneed;
-               pages += to_pages(s->offset, s->length);
+               pages += iommu_num_pages(s->offset, s->length, PAGE_SIZE);
                ps = s;
        }
-       if (dma_map_cont(start_sg, i - start, sgmap, pages, need) < 0)
+       if (dma_map_cont(dev, start_sg, i - start, sgmap, pages, need) < 0)
                goto error;
        out++;
        flush_gart();
@@ -488,6 +485,46 @@ error:
        return 0;
 }
 
+/* allocate and map a coherent mapping */
+static void *
+gart_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_addr,
+                   gfp_t flag)
+{
+       dma_addr_t paddr;
+       unsigned long align_mask;
+       struct page *page;
+
+       if (force_iommu && !(flag & GFP_DMA)) {
+               flag &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
+               page = alloc_pages(flag | __GFP_ZERO, get_order(size));
+               if (!page)
+                       return NULL;
+
+               align_mask = (1UL << get_order(size)) - 1;
+               paddr = dma_map_area(dev, page_to_phys(page), size,
+                                    DMA_BIDIRECTIONAL, align_mask);
+
+               flush_gart();
+               if (paddr != bad_dma_address) {
+                       *dma_addr = paddr;
+                       return page_address(page);
+               }
+               __free_pages(page, get_order(size));
+       } else
+               return dma_generic_alloc_coherent(dev, size, dma_addr, flag);
+
+       return NULL;
+}
+
+/* free a coherent mapping */
+static void
+gart_free_coherent(struct device *dev, size_t size, void *vaddr,
+                  dma_addr_t dma_addr)
+{
+       gart_unmap_single(dev, dma_addr, size, DMA_BIDIRECTIONAL);
+       free_pages((unsigned long)vaddr, get_order(size));
+}
+
 static int no_agp;
 
 static __init unsigned long check_iommu_size(unsigned long aper, u64 aper_size)
@@ -501,7 +538,7 @@ static __init unsigned long check_iommu_size(unsigned long aper, u64 aper_size)
        }
 
        a = aper + iommu_size;
-       iommu_size -= round_up(a, LARGE_PAGE_SIZE) - a;
+       iommu_size -= round_up(a, PMD_PAGE_SIZE) - a;
 
        if (iommu_size < 64*1024*1024) {
                printk(KERN_WARNING
@@ -518,8 +555,8 @@ static __init unsigned read_aperture(struct pci_dev *dev, u32 *size)
        unsigned aper_size = 0, aper_base_32, aper_order;
        u64 aper_base;
 
-       pci_read_config_dword(dev, 0x94, &aper_base_32);
-       pci_read_config_dword(dev, 0x90, &aper_order);
+       pci_read_config_dword(dev, AMD64_GARTAPERTUREBASE, &aper_base_32);
+       pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &aper_order);
        aper_order = (aper_order >> 1) & 7;
 
        aper_base = aper_base_32 & 0x7fff;
@@ -533,6 +570,77 @@ static __init unsigned read_aperture(struct pci_dev *dev, u32 *size)
        return aper_base;
 }
 
+static void enable_gart_translations(void)
+{
+       int i;
+
+       for (i = 0; i < num_k8_northbridges; i++) {
+               struct pci_dev *dev = k8_northbridges[i];
+
+               enable_gart_translation(dev, __pa(agp_gatt_table));
+       }
+}
+
+/*
+ * If fix_up_north_bridges is set, the north bridges have to be fixed up on
+ * resume in the same way as they are handled in gart_iommu_hole_init().
+ */
+static bool fix_up_north_bridges;
+static u32 aperture_order;
+static u32 aperture_alloc;
+
+void set_up_gart_resume(u32 aper_order, u32 aper_alloc)
+{
+       fix_up_north_bridges = true;
+       aperture_order = aper_order;
+       aperture_alloc = aper_alloc;
+}
+
+static int gart_resume(struct sys_device *dev)
+{
+       printk(KERN_INFO "PCI-DMA: Resuming GART IOMMU\n");
+
+       if (fix_up_north_bridges) {
+               int i;
+
+               printk(KERN_INFO "PCI-DMA: Restoring GART aperture settings\n");
+
+               for (i = 0; i < num_k8_northbridges; i++) {
+                       struct pci_dev *dev = k8_northbridges[i];
+
+                       /*
+                        * Don't enable translations just yet.  That is the next
+                        * step.  Restore the pre-suspend aperture settings.
+                        */
+                       pci_write_config_dword(dev, AMD64_GARTAPERTURECTL,
+                                               aperture_order << 1);
+                       pci_write_config_dword(dev, AMD64_GARTAPERTUREBASE,
+                                               aperture_alloc >> 25);
+               }
+       }
+
+       enable_gart_translations();
+
+       return 0;
+}
+
+static int gart_suspend(struct sys_device *dev, pm_message_t state)
+{
+       return 0;
+}
+
+static struct sysdev_class gart_sysdev_class = {
+       .name = "gart",
+       .suspend = gart_suspend,
+       .resume = gart_resume,
+
+};
+
+static struct sys_device device_gart = {
+       .id     = 0,
+       .cls    = &gart_sysdev_class,
+};
+
 /*
  * Private Northbridge GATT initialization in case we cannot use the
  * AGP driver for some reason.
@@ -543,7 +651,7 @@ static __init int init_k8_gatt(struct agp_kern_info *info)
        unsigned aper_base, new_aper_base;
        struct pci_dev *dev;
        void *gatt;
-       int i;
+       int i, error;
 
        printk(KERN_INFO "PCI-DMA: Disabling AGP.\n");
        aper_size = aper_base = info->aper_size = 0;
@@ -567,60 +675,45 @@ static __init int init_k8_gatt(struct agp_kern_info *info)
        info->aper_size = aper_size >> 20;
 
        gatt_size = (aper_size >> PAGE_SHIFT) * sizeof(u32);
-       gatt = (void *)__get_free_pages(GFP_KERNEL, get_order(gatt_size));
+       gatt = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
+                                       get_order(gatt_size));
        if (!gatt)
                panic("Cannot allocate GATT table");
-       if (change_page_attr_addr((unsigned long)gatt, gatt_size >> PAGE_SHIFT,
-                                 PAGE_KERNEL_NOCACHE))
+       if (set_memory_uc((unsigned long)gatt, gatt_size >> PAGE_SHIFT))
                panic("Could not set GART PTEs to uncacheable pages");
-       global_flush_tlb();
 
-       memset(gatt, 0, gatt_size);
        agp_gatt_table = gatt;
 
-       for (i = 0; i < num_k8_northbridges; i++) {
-               u32 gatt_reg;
-               u32 ctl;
-
-               dev = k8_northbridges[i];
-               gatt_reg = __pa(gatt) >> 12;
-               gatt_reg <<= 4;
-               pci_write_config_dword(dev, 0x98, gatt_reg);
-               pci_read_config_dword(dev, 0x90, &ctl);
+       enable_gart_translations();
 
-               ctl |= 1;
-               ctl &= ~((1<<4) | (1<<5));
+       error = sysdev_class_register(&gart_sysdev_class);
+       if (!error)
+               error = sysdev_register(&device_gart);
+       if (error)
+               panic("Could not register gart_sysdev -- "
+                     "would corrupt data on next suspend");
 
-               pci_write_config_dword(dev, 0x90, ctl);
-       }
        flush_gart();
 
        printk(KERN_INFO "PCI-DMA: aperture base @ %x size %u KB\n",
               aper_base, aper_size>>10);
+
        return 0;
 
  nommu:
        /* Should not happen anymore */
-       printk(KERN_ERR "PCI-DMA: More than 4GB of RAM and no IOMMU\n"
-              KERN_ERR "PCI-DMA: 32bit PCI IO may malfunction.\n");
+       printk(KERN_WARNING "PCI-DMA: More than 4GB of RAM and no IOMMU\n"
+              KERN_WARNING "falling back to iommu=soft.\n");
        return -1;
 }
 
-extern int agp_amd64_init(void);
-
-static const struct dma_mapping_ops gart_dma_ops = {
-       .mapping_error                  = NULL,
+static struct dma_mapping_ops gart_dma_ops = {
        .map_single                     = gart_map_single,
-       .map_simple                     = gart_map_simple,
        .unmap_single                   = gart_unmap_single,
-       .sync_single_for_cpu            = NULL,
-       .sync_single_for_device         = NULL,
-       .sync_single_range_for_cpu      = NULL,
-       .sync_single_range_for_device   = NULL,
-       .sync_sg_for_cpu                = NULL,
-       .sync_sg_for_device             = NULL,
        .map_sg                         = gart_map_sg,
        .unmap_sg                       = gart_unmap_sg,
+       .alloc_coherent                 = gart_alloc_coherent,
+       .free_coherent                  = gart_free_coherent,
 };
 
 void gart_iommu_shutdown(void)
@@ -635,11 +728,11 @@ void gart_iommu_shutdown(void)
                u32 ctl;
 
                dev = k8_northbridges[i];
-               pci_read_config_dword(dev, 0x90, &ctl);
+               pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &ctl);
 
-               ctl &= ~1;
+               ctl &= ~GARTEN;
 
-               pci_write_config_dword(dev, 0x90, ctl);
+               pci_write_config_dword(dev, AMD64_GARTAPERTURECTL, ctl);
        }
 }
 
@@ -647,14 +740,13 @@ void __init gart_iommu_init(void)
 {
        struct agp_kern_info info;
        unsigned long iommu_start;
-       unsigned long aper_size;
+       unsigned long aper_base, aper_size;
+       unsigned long start_pfn, end_pfn;
        unsigned long scratch;
        long i;
 
-       if (cache_k8_northbridges() < 0 || num_k8_northbridges == 0) {
-               printk(KERN_INFO "PCI-GART: No AMD northbridge found.\n");
+       if (cache_k8_northbridges() < 0 || num_k8_northbridges == 0)
                return;
-       }
 
 #ifndef CONFIG_AGP_AMD64
        no_agp = 1;
@@ -674,35 +766,40 @@ void __init gart_iommu_init(void)
                return;
 
        if (no_iommu ||
-           (!force_iommu && end_pfn <= MAX_DMA32_PFN) ||
+           (!force_iommu && max_pfn <= MAX_DMA32_PFN) ||
            !gart_iommu_aperture ||
            (no_agp && init_k8_gatt(&info) < 0)) {
-               if (end_pfn > MAX_DMA32_PFN) {
-                       printk(KERN_ERR "WARNING more than 4GB of memory "
-                                       "but GART IOMMU not available.\n"
-                              KERN_ERR "WARNING 32bit PCI may malfunction.\n");
+               if (max_pfn > MAX_DMA32_PFN) {
+                       printk(KERN_WARNING "More than 4GB of memory "
+                              "but GART IOMMU not available.\n");
+                       printk(KERN_WARNING "falling back to iommu=soft.\n");
                }
                return;
        }
 
+       /* need to map that range */
+       aper_size = info.aper_size << 20;
+       aper_base = info.aper_base;
+       end_pfn = (aper_base>>PAGE_SHIFT) + (aper_size>>PAGE_SHIFT);
+       if (end_pfn > max_low_pfn_mapped) {
+               start_pfn = (aper_base>>PAGE_SHIFT);
+               init_memory_mapping(start_pfn<<PAGE_SHIFT, end_pfn<<PAGE_SHIFT);
+       }
+
        printk(KERN_INFO "PCI-DMA: using GART IOMMU.\n");
-       aper_size = info.aper_size * 1024 * 1024;
        iommu_size = check_iommu_size(info.aper_base, aper_size);
        iommu_pages = iommu_size >> PAGE_SHIFT;
 
-       iommu_gart_bitmap = (void *) __get_free_pages(GFP_KERNEL,
+       iommu_gart_bitmap = (void *) __get_free_pages(GFP_KERNEL | __GFP_ZERO,
                                                      get_order(iommu_pages/8));
        if (!iommu_gart_bitmap)
                panic("Cannot allocate iommu bitmap\n");
-       memset(iommu_gart_bitmap, 0, iommu_pages/8);
 
 #ifdef CONFIG_IOMMU_LEAK
        if (leak_trace) {
-               iommu_leak_tab = (void *)__get_free_pages(GFP_KERNEL,
+               iommu_leak_tab = (void *)__get_free_pages(GFP_KERNEL|__GFP_ZERO,
                                  get_order(iommu_pages*sizeof(void *)));
-               if (iommu_leak_tab)
-                       memset(iommu_leak_tab, 0, iommu_pages * 8);
-               else
+               if (!iommu_leak_tab)
                        printk(KERN_DEBUG
                               "PCI-DMA: Cannot allocate leak trace area\n");
        }
@@ -712,7 +809,7 @@ void __init gart_iommu_init(void)
         * Out of IOMMU space handling.
         * Reserve some invalid pages at the beginning of the GART.
         */
-       set_bit_string(iommu_gart_bitmap, 0, EMERGENCY_PAGES);
+       iommu_area_reserve(iommu_gart_bitmap, 0, EMERGENCY_PAGES);
 
        agp_memory_reserved = iommu_size;
        printk(KERN_INFO
@@ -733,13 +830,23 @@ void __init gart_iommu_init(void)
         * the backing memory. The GART address is only used by PCI
         * devices.
         */
-       clear_kernel_mapping((unsigned long)__va(iommu_bus_base), iommu_size);
+       set_memory_np((unsigned long)__va(iommu_bus_base),
+                               iommu_size >> PAGE_SHIFT);
+       /*
+        * Tricky. The GART table remaps the physical memory range,
+        * so the CPU wont notice potential aliases and if the memory
+        * is remapped to UC later on, we might surprise the PCI devices
+        * with a stray writeout of a cacheline. So play it sure and
+        * do an explicit, full-scale wbinvd() _after_ having marked all
+        * the pages as Not-Present:
+        */
+       wbinvd();
 
        /*
-        * Try to workaround a bug (thanks to BenH)
+        * Try to workaround a bug (thanks to BenH):
         * Set unmapped entries to a scratch page instead of 0.
         * Any prefetches that hit unmapped entries won't get an bus abort
-        * then.
+        * then. (P2P bridge may be prefetching on DMA reads).
         */
        scratch = get_zeroed_page(GFP_KERNEL);
        if (!scratch)
@@ -760,7 +867,8 @@ void __init gart_parse_options(char *p)
        if (!strncmp(p, "leak", 4)) {
                leak_trace = 1;
                p += 4;
-               if (*p == '=') ++p;
+               if (*p == '=')
+                       ++p;
                if (isdigit(*p) && get_option(&p, &arg))
                        iommu_leak_pages = arg;
        }