include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit...
[safe/jmp/linux-2.6] / arch / powerpc / platforms / iseries / iommu.c
index 2c3dbcd..ce61cea 100644 (file)
 #include <linux/types.h>
 #include <linux/dma-mapping.h>
 #include <linux/list.h>
+#include <linux/pci.h>
+#include <linux/module.h>
+#include <linux/slab.h>
 
 #include <asm/iommu.h>
+#include <asm/vio.h>
 #include <asm/tce.h>
 #include <asm/machdep.h>
 #include <asm/abs_addr.h>
 #include <asm/prom.h>
 #include <asm/pci-bridge.h>
 #include <asm/iseries/hv_call_xm.h>
+#include <asm/iseries/hv_call_event.h>
 #include <asm/iseries/iommu.h>
 
-static void tce_build_iSeries(struct iommu_table *tbl, long index, long npages,
-               unsigned long uaddr, enum dma_data_direction direction)
+static int tce_build_iSeries(struct iommu_table *tbl, long index, long npages,
+               unsigned long uaddr, enum dma_data_direction direction,
+               struct dma_attrs *attrs)
 {
        u64 rc;
        u64 tce, rpn;
 
-       index <<= TCE_PAGE_FACTOR;
-       npages <<= TCE_PAGE_FACTOR;
-
        while (npages--) {
                rpn = virt_to_abs(uaddr) >> TCE_SHIFT;
                tce = (rpn & TCE_RPN_MASK) << TCE_RPN_SHIFT;
@@ -64,24 +67,22 @@ static void tce_build_iSeries(struct iommu_table *tbl, long index, long npages,
 
                rc = HvCallXm_setTce((u64)tbl->it_index, (u64)index, tce);
                if (rc)
-                       panic("PCI_DMA: HvCallXm_setTce failed, Rc: 0x%lx\n",
+                       panic("PCI_DMA: HvCallXm_setTce failed, Rc: 0x%llx\n",
                                        rc);
                index++;
                uaddr += TCE_PAGE_SIZE;
        }
+       return 0;
 }
 
 static void tce_free_iSeries(struct iommu_table *tbl, long index, long npages)
 {
        u64 rc;
 
-       npages <<= TCE_PAGE_FACTOR;
-       index <<= TCE_PAGE_FACTOR;
-
        while (npages--) {
                rc = HvCallXm_setTce((u64)tbl->it_index, (u64)index, 0);
                if (rc)
-                       panic("PCI_DMA: HvCallXm_setTce failed, Rc: 0x%lx\n",
+                       panic("PCI_DMA: HvCallXm_setTce failed, Rc: 0x%llx\n",
                                        rc);
                index++;
        }
@@ -120,12 +121,10 @@ void iommu_table_getparms_iSeries(unsigned long busno,
 {
        struct iommu_table_cb *parms;
 
-       parms = kmalloc(sizeof(*parms), GFP_KERNEL);
+       parms = kzalloc(sizeof(*parms), GFP_KERNEL);
        if (parms == NULL)
                panic("PCI_DMA: TCE Table Allocation failed.");
 
-       memset(parms, 0, sizeof(*parms));
-
        parms->itc_busno = busno;
        parms->itc_slotno = slotno;
        parms->itc_virtbus = virtbus;
@@ -136,10 +135,9 @@ void iommu_table_getparms_iSeries(unsigned long busno,
                panic("PCI_DMA: parms->size is zero, parms is 0x%p", parms);
 
        /* itc_size is in pages worth of table, it_size is in # of entries */
-       tbl->it_size = ((parms->itc_size * TCE_PAGE_SIZE) /
-                       TCE_ENTRY_SIZE) >> TCE_PAGE_FACTOR;
+       tbl->it_size = (parms->itc_size * TCE_PAGE_SIZE) / TCE_ENTRY_SIZE;
        tbl->it_busno = parms->itc_busno;
-       tbl->it_offset = parms->itc_offset >> TCE_PAGE_FACTOR;
+       tbl->it_offset = parms->itc_offset;
        tbl->it_index = parms->itc_index;
        tbl->it_blocksize = 1;
        tbl->it_type = virtbus ? TCE_VB : TCE_PCI;
@@ -168,18 +166,21 @@ static struct iommu_table *iommu_table_find(struct iommu_table * tbl)
                    (it->it_type == TCE_PCI) &&
                    (it->it_offset == tbl->it_offset) &&
                    (it->it_index == tbl->it_index) &&
-                   (it->it_size == tbl->it_size))
+                   (it->it_size == tbl->it_size)) {
+                       of_node_put(node);
                        return it;
+               }
        }
        return NULL;
 }
 
 
-void iommu_devnode_init_iSeries(struct device_node *dn)
+static void pci_dma_dev_setup_iseries(struct pci_dev *pdev)
 {
        struct iommu_table *tbl;
+       struct device_node *dn = pci_device_to_OF_node(pdev);
        struct pci_dn *pdn = PCI_DN(dn);
-       u32 *lsn = (u32 *)get_property(dn, "linux,logical-slot-number", NULL);
+       const u32 *lsn = of_get_property(dn, "linux,logical-slot-number", NULL);
 
        BUG_ON(lsn == NULL);
 
@@ -193,19 +194,67 @@ void iommu_devnode_init_iSeries(struct device_node *dn)
                pdn->iommu_table = iommu_init_table(tbl, -1);
        else
                kfree(tbl);
+       set_iommu_table_base(&pdev->dev, pdn->iommu_table);
 }
+#else
+#define pci_dma_dev_setup_iseries      NULL
 #endif
 
-static void iommu_dev_setup_iSeries(struct pci_dev *dev) { }
-static void iommu_bus_setup_iSeries(struct pci_bus *bus) { }
+static struct iommu_table veth_iommu_table;
+static struct iommu_table vio_iommu_table;
+
+void *iseries_hv_alloc(size_t size, dma_addr_t *dma_handle, gfp_t flag)
+{
+       return iommu_alloc_coherent(NULL, &vio_iommu_table, size, dma_handle,
+                               DMA_BIT_MASK(32), flag, -1);
+}
+EXPORT_SYMBOL_GPL(iseries_hv_alloc);
+
+void iseries_hv_free(size_t size, void *vaddr, dma_addr_t dma_handle)
+{
+       iommu_free_coherent(&vio_iommu_table, size, vaddr, dma_handle);
+}
+EXPORT_SYMBOL_GPL(iseries_hv_free);
+
+dma_addr_t iseries_hv_map(void *vaddr, size_t size,
+                       enum dma_data_direction direction)
+{
+       return iommu_map_page(NULL, &vio_iommu_table, virt_to_page(vaddr),
+                             (unsigned long)vaddr % PAGE_SIZE, size,
+                             DMA_BIT_MASK(32), direction, NULL);
+}
+
+void iseries_hv_unmap(dma_addr_t dma_handle, size_t size,
+                       enum dma_data_direction direction)
+{
+       iommu_unmap_page(&vio_iommu_table, dma_handle, size, direction, NULL);
+}
+
+void __init iommu_vio_init(void)
+{
+       iommu_table_getparms_iSeries(255, 0, 0xff, &veth_iommu_table);
+       veth_iommu_table.it_size /= 2;
+       vio_iommu_table = veth_iommu_table;
+       vio_iommu_table.it_offset += veth_iommu_table.it_size;
+
+       if (!iommu_init_table(&veth_iommu_table, -1))
+               printk("Virtual Bus VETH TCE table failed.\n");
+       if (!iommu_init_table(&vio_iommu_table, -1))
+               printk("Virtual Bus VIO TCE table failed.\n");
+}
+
+struct iommu_table *vio_build_iommu_table_iseries(struct vio_dev *dev)
+{
+       if (strcmp(dev->type, "network") == 0)
+               return &veth_iommu_table;
+       return &vio_iommu_table;
+}
 
 void iommu_init_early_iSeries(void)
 {
        ppc_md.tce_build = tce_build_iSeries;
        ppc_md.tce_free  = tce_free_iSeries;
 
-       ppc_md.iommu_dev_setup = iommu_dev_setup_iSeries;
-       ppc_md.iommu_bus_setup = iommu_bus_setup_iSeries;
-
-       pci_iommu_init();
+       ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_iseries;
+       set_pci_dma_ops(&dma_iommu_ops);
 }