iommu-helper: use bitmap library
[safe/jmp/linux-2.6] / arch / x86 / kernel / pci-gart_64.c
index ba7ad83..34de53b 100644 (file)
@@ -5,7 +5,7 @@
  * This allows to use PCI devices that only support 32bit addresses on systems
  * with more than 4GB.
  *
- * See Documentation/DMA-mapping.txt for the interface specification.
+ * See Documentation/PCI/PCI-DMA-mapping.txt for the interface specification.
  *
  * Copyright 2002 Andi Kleen, SuSE Labs.
  * Subject to the GNU General Public License v2 only.
 #include <linux/agp_backend.h>
 #include <linux/init.h>
 #include <linux/mm.h>
+#include <linux/sched.h>
 #include <linux/string.h>
 #include <linux/spinlock.h>
 #include <linux/pci.h>
 #include <linux/module.h>
 #include <linux/topology.h>
 #include <linux/interrupt.h>
-#include <linux/bitops.h>
+#include <linux/bitmap.h>
 #include <linux/kdebug.h>
 #include <linux/scatterlist.h>
 #include <linux/iommu-helper.h>
@@ -38,6 +39,7 @@
 #include <asm/swiotlb.h>
 #include <asm/dma.h>
 #include <asm/k8.h>
+#include <asm/x86_init.h>
 
 static unsigned long iommu_bus_base;   /* GART remapping area (physical) */
 static unsigned long iommu_size;       /* size of remapping area bytes */
@@ -45,6 +47,8 @@ static unsigned long iommu_pages;     /* .. and in pages */
 
 static u32 *iommu_gatt_base;           /* Remapping table */
 
+static dma_addr_t bad_dma_addr;
+
 /*
  * If this is disabled the IOMMU will use an optimized flushing strategy
  * of only flushing when an mapping is reused. With it true the GART is
@@ -52,7 +56,7 @@ static u32 *iommu_gatt_base;          /* Remapping table */
  * to trigger bugs with some popular PCI cards, in particular 3ware (but
  * has been also also seen with Qlogic at least).
  */
-int iommu_fullflush = 1;
+static int iommu_fullflush = 1;
 
 /* Allocation bitmap for the remapping area: */
 static DEFINE_SPINLOCK(iommu_bitmap_lock);
@@ -91,7 +95,7 @@ static unsigned long alloc_iommu(struct device *dev, int size,
 
        base_index = ALIGN(iommu_bus_base & dma_get_seg_boundary(dev),
                           PAGE_SIZE) >> PAGE_SHIFT;
-       boundary_size = ALIGN((unsigned long long)dma_get_seg_boundary(dev) + 1,
+       boundary_size = ALIGN((u64)dma_get_seg_boundary(dev) + 1,
                              PAGE_SIZE) >> PAGE_SHIFT;
 
        spin_lock_irqsave(&iommu_bitmap_lock, flags);
@@ -122,7 +126,7 @@ static void free_iommu(unsigned long offset, int size)
        unsigned long flags;
 
        spin_lock_irqsave(&iommu_bitmap_lock, flags);
-       iommu_area_free(iommu_gart_bitmap, offset, size);
+       bitmap_clear(iommu_gart_bitmap, offset, size);
        if (offset >= next_bit)
                next_bit = offset + size;
        spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
@@ -144,48 +148,21 @@ static void flush_gart(void)
 }
 
 #ifdef CONFIG_IOMMU_LEAK
-
-#define SET_LEAK(x)                                                    \
-       do {                                                            \
-               if (iommu_leak_tab)                                     \
-                       iommu_leak_tab[x] = __builtin_return_address(0);\
-       } while (0)
-
-#define CLEAR_LEAK(x)                                                  \
-       do {                                                            \
-               if (iommu_leak_tab)                                     \
-                       iommu_leak_tab[x] = NULL;                       \
-       } while (0)
-
 /* Debugging aid for drivers that don't free their IOMMU tables */
-static void **iommu_leak_tab;
 static int leak_trace;
 static int iommu_leak_pages = 20;
 
 static void dump_leak(void)
 {
-       int i;
        static int dump;
 
-       if (dump || !iommu_leak_tab)
+       if (dump)
                return;
        dump = 1;
-       show_stack(NULL, NULL);
 
-       /* Very crude. dump some from the end of the table too */
-       printk(KERN_DEBUG "Dumping %d pages from end of IOMMU:\n",
-              iommu_leak_pages);
-       for (i = 0; i < iommu_leak_pages; i += 2) {
-               printk(KERN_DEBUG "%lu: ", iommu_pages-i);
-               printk_address((unsigned long) iommu_leak_tab[iommu_pages-i],
-                               0);
-               printk(KERN_CONT "%c", (i+1)%2 == 0 ? '\n' : ' ');
-       }
-       printk(KERN_DEBUG "\n");
+       show_stack(NULL, NULL);
+       debug_dma_dump_mappings(NULL);
 }
-#else
-# define SET_LEAK(x)
-# define CLEAR_LEAK(x)
 #endif
 
 static void iommu_full(struct device *dev, size_t size, int dir)
@@ -217,14 +194,13 @@ static void iommu_full(struct device *dev, size_t size, int dir)
 static inline int
 need_iommu(struct device *dev, unsigned long addr, size_t size)
 {
-       return force_iommu ||
-               !is_buffer_dma_capable(*dev->dma_mask, addr, size);
+       return force_iommu || !dma_capable(dev, addr, size);
 }
 
 static inline int
 nonforced_iommu(struct device *dev, unsigned long addr, size_t size)
 {
-       return !is_buffer_dma_capable(*dev->dma_mask, addr, size);
+       return !dma_capable(dev, addr, size);
 }
 
 /* Map a single continuous physical area into the IOMMU.
@@ -243,22 +219,24 @@ static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem,
                if (panic_on_overflow)
                        panic("dma_map_area overflow %lu bytes\n", size);
                iommu_full(dev, size, dir);
-               return bad_dma_address;
+               return bad_dma_addr;
        }
 
        for (i = 0; i < npages; i++) {
                iommu_gatt_base[iommu_page + i] = GPTE_ENCODE(phys_mem);
-               SET_LEAK(iommu_page + i);
                phys_mem += PAGE_SIZE;
        }
        return iommu_bus_base + iommu_page*PAGE_SIZE + (phys_mem & ~PAGE_MASK);
 }
 
 /* Map a single area into the IOMMU */
-static dma_addr_t
-gart_map_single(struct device *dev, phys_addr_t paddr, size_t size, int dir)
+static dma_addr_t gart_map_page(struct device *dev, struct page *page,
+                               unsigned long offset, size_t size,
+                               enum dma_data_direction dir,
+                               struct dma_attrs *attrs)
 {
        unsigned long bus;
+       phys_addr_t paddr = page_to_phys(page) + offset;
 
        if (!dev)
                dev = &x86_dma_fallback_dev;
@@ -275,8 +253,9 @@ gart_map_single(struct device *dev, phys_addr_t paddr, size_t size, int dir)
 /*
  * Free a DMA mapping.
  */
-static void gart_unmap_single(struct device *dev, dma_addr_t dma_addr,
-                             size_t size, int direction)
+static void gart_unmap_page(struct device *dev, dma_addr_t dma_addr,
+                           size_t size, enum dma_data_direction dir,
+                           struct dma_attrs *attrs)
 {
        unsigned long iommu_page;
        int npages;
@@ -290,7 +269,6 @@ static void gart_unmap_single(struct device *dev, dma_addr_t dma_addr,
        npages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
        for (i = 0; i < npages; i++) {
                iommu_gatt_base[iommu_page + i] = gart_unmapped_entry;
-               CLEAR_LEAK(iommu_page + i);
        }
        free_iommu(iommu_page, npages);
 }
@@ -298,8 +276,8 @@ static void gart_unmap_single(struct device *dev, dma_addr_t dma_addr,
 /*
  * Wrapper for pci_unmap_single working with scatterlists.
  */
-static void
-gart_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, int dir)
+static void gart_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
+                         enum dma_data_direction dir, struct dma_attrs *attrs)
 {
        struct scatterlist *s;
        int i;
@@ -307,7 +285,7 @@ gart_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, int dir)
        for_each_sg(sg, s, nents, i) {
                if (!s->dma_length || !s->length)
                        break;
-               gart_unmap_single(dev, s->dma_address, s->dma_length, dir);
+               gart_unmap_page(dev, s->dma_address, s->dma_length, dir, NULL);
        }
 }
 
@@ -319,7 +297,7 @@ static int dma_map_sg_nonforce(struct device *dev, struct scatterlist *sg,
        int i;
 
 #ifdef CONFIG_IOMMU_DEBUG
-       printk(KERN_DEBUG "dma_map_sg overflow\n");
+       pr_debug("dma_map_sg overflow\n");
 #endif
 
        for_each_sg(sg, s, nents, i) {
@@ -327,9 +305,9 @@ static int dma_map_sg_nonforce(struct device *dev, struct scatterlist *sg,
 
                if (nonforced_iommu(dev, addr, s->length)) {
                        addr = dma_map_area(dev, addr, s->length, dir, 0);
-                       if (addr == bad_dma_address) {
+                       if (addr == bad_dma_addr) {
                                if (i > 0)
-                                       gart_unmap_sg(dev, sg, i, dir);
+                                       gart_unmap_sg(dev, sg, i, dir, NULL);
                                nents = 0;
                                sg[0].dma_length = 0;
                                break;
@@ -373,7 +351,6 @@ static int __dma_map_cont(struct device *dev, struct scatterlist *start,
                pages = iommu_num_pages(s->offset, s->length, PAGE_SIZE);
                while (pages--) {
                        iommu_gatt_base[iommu_page] = GPTE_ENCODE(addr);
-                       SET_LEAK(iommu_page);
                        addr += PAGE_SIZE;
                        iommu_page++;
                }
@@ -400,8 +377,8 @@ dma_map_cont(struct device *dev, struct scatterlist *start, int nelems,
  * DMA map all entries in a scatterlist.
  * Merge chunks that have page aligned sizes into a continuous mapping.
  */
-static int
-gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, int dir)
+static int gart_map_sg(struct device *dev, struct scatterlist *sg, int nents,
+                      enum dma_data_direction dir, struct dma_attrs *attrs)
 {
        struct scatterlist *s, *ps, *start_sg, *sgmap;
        int need = 0, nextneed, i, out, start;
@@ -415,12 +392,14 @@ gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, int dir)
        if (!dev)
                dev = &x86_dma_fallback_dev;
 
-       out = 0;
-       start = 0;
-       start_sg = sgmap = sg;
-       seg_size = 0;
-       max_seg_size = dma_get_max_seg_size(dev);
-       ps = NULL; /* shut up gcc */
+       out             = 0;
+       start           = 0;
+       start_sg        = sg;
+       sgmap           = sg;
+       seg_size        = 0;
+       max_seg_size    = dma_get_max_seg_size(dev);
+       ps              = NULL; /* shut up gcc */
+
        for_each_sg(sg, s, nents, i) {
                dma_addr_t addr = sg_phys(s);
 
@@ -443,11 +422,12 @@ gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, int dir)
                                                 sgmap, pages, need) < 0)
                                        goto error;
                                out++;
-                               seg_size = 0;
-                               sgmap = sg_next(sgmap);
-                               pages = 0;
-                               start = i;
-                               start_sg = s;
+
+                               seg_size        = 0;
+                               sgmap           = sg_next(sgmap);
+                               pages           = 0;
+                               start           = i;
+                               start_sg        = s;
                        }
                }
 
@@ -468,7 +448,7 @@ gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, int dir)
 
 error:
        flush_gart();
-       gart_unmap_sg(dev, sg, out, dir);
+       gart_unmap_sg(dev, sg, out, dir, NULL);
 
        /* When it was forced or merged try again in a dumb way */
        if (force_iommu || iommu_merge) {
@@ -481,7 +461,7 @@ error:
 
        iommu_full(dev, pages << PAGE_SHIFT, dir);
        for_each_sg(sg, s, nents, i)
-               s->dma_address = bad_dma_address;
+               s->dma_address = bad_dma_addr;
        return 0;
 }
 
@@ -505,7 +485,7 @@ gart_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_addr,
                                     DMA_BIDIRECTIONAL, align_mask);
 
                flush_gart();
-               if (paddr != bad_dma_address) {
+               if (paddr != bad_dma_addr) {
                        *dma_addr = paddr;
                        return page_address(page);
                }
@@ -521,10 +501,15 @@ static void
 gart_free_coherent(struct device *dev, size_t size, void *vaddr,
                   dma_addr_t dma_addr)
 {
-       gart_unmap_single(dev, dma_addr, size, DMA_BIDIRECTIONAL);
+       gart_unmap_page(dev, dma_addr, size, DMA_BIDIRECTIONAL, NULL);
        free_pages((unsigned long)vaddr, get_order(size));
 }
 
+static int gart_mapping_error(struct device *dev, dma_addr_t dma_addr)
+{
+       return (dma_addr == bad_dma_addr);
+}
+
 static int no_agp;
 
 static __init unsigned long check_iommu_size(unsigned long aper, u64 aper_size)
@@ -541,7 +526,7 @@ static __init unsigned long check_iommu_size(unsigned long aper, u64 aper_size)
        iommu_size -= round_up(a, PMD_PAGE_SIZE) - a;
 
        if (iommu_size < 64*1024*1024) {
-               printk(KERN_WARNING
+               pr_warning(
                        "PCI-DMA: Warning: Small IOMMU %luMB."
                        " Consider increasing the AGP aperture in BIOS\n",
                                iommu_size >> 20);
@@ -596,28 +581,32 @@ void set_up_gart_resume(u32 aper_order, u32 aper_alloc)
        aperture_alloc = aper_alloc;
 }
 
-static int gart_resume(struct sys_device *dev)
+static void gart_fixup_northbridges(struct sys_device *dev)
 {
-       printk(KERN_INFO "PCI-DMA: Resuming GART IOMMU\n");
+       int i;
 
-       if (fix_up_north_bridges) {
-               int i;
+       if (!fix_up_north_bridges)
+               return;
 
-               printk(KERN_INFO "PCI-DMA: Restoring GART aperture settings\n");
+       pr_info("PCI-DMA: Restoring GART aperture settings\n");
 
-               for (i = 0; i < num_k8_northbridges; i++) {
-                       struct pci_dev *dev = k8_northbridges[i];
+       for (i = 0; i < num_k8_northbridges; i++) {
+               struct pci_dev *dev = k8_northbridges[i];
 
-                       /*
-                        * Don't enable translations just yet.  That is the next
-                        * step.  Restore the pre-suspend aperture settings.
-                        */
-                       pci_write_config_dword(dev, AMD64_GARTAPERTURECTL,
-                                               aperture_order << 1);
-                       pci_write_config_dword(dev, AMD64_GARTAPERTUREBASE,
-                                               aperture_alloc >> 25);
-               }
+               /*
+                * Don't enable translations just yet.  That is the next
+                * step.  Restore the pre-suspend aperture settings.
+                */
+               pci_write_config_dword(dev, AMD64_GARTAPERTURECTL, aperture_order << 1);
+               pci_write_config_dword(dev, AMD64_GARTAPERTUREBASE, aperture_alloc >> 25);
        }
+}
+
+static int gart_resume(struct sys_device *dev)
+{
+       pr_info("PCI-DMA: Resuming GART IOMMU\n");
+
+       gart_fixup_northbridges(dev);
 
        enable_gart_translations();
 
@@ -630,15 +619,14 @@ static int gart_suspend(struct sys_device *dev, pm_message_t state)
 }
 
 static struct sysdev_class gart_sysdev_class = {
-       .name = "gart",
-       .suspend = gart_suspend,
-       .resume = gart_resume,
+       .name           = "gart",
+       .suspend        = gart_suspend,
+       .resume         = gart_resume,
 
 };
 
 static struct sys_device device_gart = {
-       .id     = 0,
-       .cls    = &gart_sysdev_class,
+       .cls            = &gart_sysdev_class,
 };
 
 /*
@@ -653,7 +641,8 @@ static __init int init_k8_gatt(struct agp_kern_info *info)
        void *gatt;
        int i, error;
 
-       printk(KERN_INFO "PCI-DMA: Disabling AGP.\n");
+       pr_info("PCI-DMA: Disabling AGP.\n");
+
        aper_size = aper_base = info->aper_size = 0;
        dev = NULL;
        for (i = 0; i < num_k8_northbridges; i++) {
@@ -671,6 +660,7 @@ static __init int init_k8_gatt(struct agp_kern_info *info)
        }
        if (!aper_base)
                goto nommu;
+
        info->aper_base = aper_base;
        info->aper_size = aper_size >> 20;
 
@@ -684,8 +674,6 @@ static __init int init_k8_gatt(struct agp_kern_info *info)
 
        agp_gatt_table = gatt;
 
-       enable_gart_translations();
-
        error = sysdev_class_register(&gart_sysdev_class);
        if (!error)
                error = sysdev_register(&device_gart);
@@ -695,33 +683,35 @@ static __init int init_k8_gatt(struct agp_kern_info *info)
 
        flush_gart();
 
-       printk(KERN_INFO "PCI-DMA: aperture base @ %x size %u KB\n",
+       pr_info("PCI-DMA: aperture base @ %x size %u KB\n",
               aper_base, aper_size>>10);
 
        return 0;
 
  nommu:
        /* Should not happen anymore */
-       printk(KERN_WARNING "PCI-DMA: More than 4GB of RAM and no IOMMU\n"
-              KERN_WARNING "falling back to iommu=soft.\n");
+       pr_warning("PCI-DMA: More than 4GB of RAM and no IOMMU\n"
+              "falling back to iommu=soft.\n");
        return -1;
 }
 
-static struct dma_mapping_ops gart_dma_ops = {
-       .map_single                     = gart_map_single,
-       .unmap_single                   = gart_unmap_single,
+static struct dma_map_ops gart_dma_ops = {
        .map_sg                         = gart_map_sg,
        .unmap_sg                       = gart_unmap_sg,
+       .map_page                       = gart_map_page,
+       .unmap_page                     = gart_unmap_page,
        .alloc_coherent                 = gart_alloc_coherent,
        .free_coherent                  = gart_free_coherent,
+       .mapping_error                  = gart_mapping_error,
 };
 
-void gart_iommu_shutdown(void)
+static void gart_iommu_shutdown(void)
 {
        struct pci_dev *dev;
        int i;
 
-       if (no_agp && (dma_ops != &gart_dma_ops))
+       /* don't shutdown it if there is AGP installed */
+       if (!no_agp)
                return;
 
        for (i = 0; i < num_k8_northbridges; i++) {
@@ -736,7 +726,7 @@ void gart_iommu_shutdown(void)
        }
 }
 
-void __init gart_iommu_init(void)
+int __init gart_iommu_init(void)
 {
        struct agp_kern_info info;
        unsigned long iommu_start;
@@ -745,10 +735,8 @@ void __init gart_iommu_init(void)
        unsigned long scratch;
        long i;
 
-       if (cache_k8_northbridges() < 0 || num_k8_northbridges == 0) {
-               printk(KERN_INFO "PCI-GART: No AMD GART found.\n");
-               return;
-       }
+       if (cache_k8_northbridges() < 0 || num_k8_northbridges == 0)
+               return 0;
 
 #ifndef CONFIG_AGP_AMD64
        no_agp = 1;
@@ -760,35 +748,28 @@ void __init gart_iommu_init(void)
                (agp_copy_info(agp_bridge, &info) < 0);
 #endif
 
-       if (swiotlb)
-               return;
-
-       /* Did we detect a different HW IOMMU? */
-       if (iommu_detected && !gart_iommu_aperture)
-               return;
-
        if (no_iommu ||
            (!force_iommu && max_pfn <= MAX_DMA32_PFN) ||
            !gart_iommu_aperture ||
            (no_agp && init_k8_gatt(&info) < 0)) {
                if (max_pfn > MAX_DMA32_PFN) {
-                       printk(KERN_WARNING "More than 4GB of memory "
-                              "but GART IOMMU not available.\n");
-                       printk(KERN_WARNING "falling back to iommu=soft.\n");
+                       pr_warning("More than 4GB of memory but GART IOMMU not available.\n");
+                       pr_warning("falling back to iommu=soft.\n");
                }
-               return;
+               return 0;
        }
 
        /* need to map that range */
-       aper_size = info.aper_size << 20;
-       aper_base = info.aper_base;
-       end_pfn = (aper_base>>PAGE_SHIFT) + (aper_size>>PAGE_SHIFT);
+       aper_size       = info.aper_size << 20;
+       aper_base       = info.aper_base;
+       end_pfn         = (aper_base>>PAGE_SHIFT) + (aper_size>>PAGE_SHIFT);
+
        if (end_pfn > max_low_pfn_mapped) {
                start_pfn = (aper_base>>PAGE_SHIFT);
                init_memory_mapping(start_pfn<<PAGE_SHIFT, end_pfn<<PAGE_SHIFT);
        }
 
-       printk(KERN_INFO "PCI-DMA: using GART IOMMU.\n");
+       pr_info("PCI-DMA: using GART IOMMU.\n");
        iommu_size = check_iommu_size(info.aper_base, aper_size);
        iommu_pages = iommu_size >> PAGE_SHIFT;
 
@@ -799,11 +780,11 @@ void __init gart_iommu_init(void)
 
 #ifdef CONFIG_IOMMU_LEAK
        if (leak_trace) {
-               iommu_leak_tab = (void *)__get_free_pages(GFP_KERNEL|__GFP_ZERO,
-                                 get_order(iommu_pages*sizeof(void *)));
-               if (!iommu_leak_tab)
-                       printk(KERN_DEBUG
-                              "PCI-DMA: Cannot allocate leak trace area\n");
+               int ret;
+
+               ret = dma_debug_resize_entries(iommu_pages);
+               if (ret)
+                       pr_debug("PCI-DMA: Cannot trace all the entries\n");
        }
 #endif
 
@@ -811,17 +792,16 @@ void __init gart_iommu_init(void)
         * Out of IOMMU space handling.
         * Reserve some invalid pages at the beginning of the GART.
         */
-       iommu_area_reserve(iommu_gart_bitmap, 0, EMERGENCY_PAGES);
+       bitmap_set(iommu_gart_bitmap, 0, EMERGENCY_PAGES);
 
-       agp_memory_reserved = iommu_size;
-       printk(KERN_INFO
-              "PCI-DMA: Reserving %luMB of IOMMU area in the AGP aperture\n",
+       pr_info("PCI-DMA: Reserving %luMB of IOMMU area in the AGP aperture\n",
               iommu_size >> 20);
 
-       iommu_start = aper_size - iommu_size;
-       iommu_bus_base = info.aper_base + iommu_start;
-       bad_dma_address = iommu_bus_base;
-       iommu_gatt_base = agp_gatt_table + (iommu_start>>PAGE_SHIFT);
+       agp_memory_reserved     = iommu_size;
+       iommu_start             = aper_size - iommu_size;
+       iommu_bus_base          = info.aper_base + iommu_start;
+       bad_dma_addr            = iommu_bus_base;
+       iommu_gatt_base         = agp_gatt_table + (iommu_start>>PAGE_SHIFT);
 
        /*
         * Unmap the IOMMU part of the GART. The alias of the page is
@@ -845,6 +825,14 @@ void __init gart_iommu_init(void)
        wbinvd();
 
        /*
+        * Now all caches are flushed and we can safely enable
+        * GART hardware.  Doing it early leaves the possibility
+        * of stale cache entries that can lead to GART PTE
+        * errors.
+        */
+       enable_gart_translations();
+
+       /*
         * Try to workaround a bug (thanks to BenH):
         * Set unmapped entries to a scratch page instead of 0.
         * Any prefetches that hit unmapped entries won't get an bus abort
@@ -859,6 +847,10 @@ void __init gart_iommu_init(void)
 
        flush_gart();
        dma_ops = &gart_dma_ops;
+       x86_platform.iommu_shutdown = gart_iommu_shutdown;
+       swiotlb = 0;
+
+       return 0;
 }
 
 void __init gart_parse_options(char *p)
@@ -877,7 +869,7 @@ void __init gart_parse_options(char *p)
 #endif
        if (isdigit(*p) && get_option(&p, &arg))
                iommu_size = arg;
-       if (!strncmp(p, "fullflush", 8))
+       if (!strncmp(p, "fullflush", 9))
                iommu_fullflush = 1;
        if (!strncmp(p, "nofullflush", 11))
                iommu_fullflush = 0;