/* Fallback functions when the main IOMMU code is not compiled in. This
code is roughly equivalent to i386. */
-#include <linux/mm.h>
-#include <linux/init.h>
-#include <linux/pci.h>
-#include <linux/string.h>
#include <linux/dma-mapping.h>
#include <linux/scatterlist.h>
+#include <linux/string.h>
+#include <linux/init.h>
+#include <linux/gfp.h>
+#include <linux/pci.h>
+#include <linux/mm.h>
-#include <asm/iommu.h>
#include <asm/processor.h>
+#include <asm/iommu.h>
#include <asm/dma.h>
static int
check_addr(char *name, struct device *hwdev, dma_addr_t bus, size_t size)
{
- if (hwdev && bus + size > *hwdev->dma_mask) {
- if (*hwdev->dma_mask >= DMA_32BIT_MASK)
+ if (hwdev && !dma_capable(hwdev, bus, size)) {
+ if (*hwdev->dma_mask >= DMA_BIT_MASK(32))
printk(KERN_ERR
"nommu_%s: overflow %Lx+%zu of device mask %Lx\n",
name, (long long)bus, size,
return 1;
}
-static dma_addr_t
-nommu_map_single(struct device *hwdev, phys_addr_t paddr, size_t size,
- int direction)
+static dma_addr_t nommu_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t size,
+ enum dma_data_direction dir,
+ struct dma_attrs *attrs)
{
- dma_addr_t bus = paddr;
+ dma_addr_t bus = page_to_phys(page) + offset;
WARN_ON(size == 0);
- if (!check_addr("map_single", hwdev, bus, size))
- return bad_dma_address;
+ if (!check_addr("map_single", dev, bus, size))
+ return DMA_ERROR_CODE;
flush_write_buffers();
return bus;
}
-
/* Map a set of buffers described by scatterlist in streaming
* mode for DMA. This is the scatter-gather version of the
* above pci_map_single interface. Here the scatter gather list
* the same here.
*/
static int nommu_map_sg(struct device *hwdev, struct scatterlist *sg,
- int nents, int direction)
+ int nents, enum dma_data_direction dir,
+ struct dma_attrs *attrs)
{
struct scatterlist *s;
int i;
return nents;
}
-static void *
-nommu_alloc_coherent(struct device *hwdev, size_t size,
- dma_addr_t *dma_addr, gfp_t gfp)
-{
- unsigned long dma_mask;
- int node;
- struct page *page;
-
- gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
- gfp |= __GFP_ZERO;
-
- dma_mask = hwdev->coherent_dma_mask;
- if (!dma_mask)
- dma_mask = *(hwdev->dma_mask);
-
- if (dma_mask < DMA_24BIT_MASK)
- return NULL;
-
- node = dev_to_node(hwdev);
-
-#ifdef CONFIG_X86_64
- if (dma_mask <= DMA_32BIT_MASK)
- gfp |= GFP_DMA32;
-#endif
-
- /* No alloc-free penalty for ISA devices */
- if (dma_mask == DMA_24BIT_MASK)
- gfp |= GFP_DMA;
-
-again:
- page = alloc_pages_node(node, gfp, get_order(size));
- if (!page)
- return NULL;
-
- if ((page_to_phys(page) + size > dma_mask) && !(gfp & GFP_DMA)) {
- free_pages((unsigned long)page_address(page), get_order(size));
- gfp |= GFP_DMA;
- goto again;
- }
-
- *dma_addr = page_to_phys(page);
- if (check_addr("alloc_coherent", hwdev, *dma_addr, size)) {
- flush_write_buffers();
- return page_address(page);
- }
-
- free_pages((unsigned long)page_address(page), get_order(size));
-
- return NULL;
-}
-
static void nommu_free_coherent(struct device *dev, size_t size, void *vaddr,
dma_addr_t dma_addr)
{
free_pages((unsigned long)vaddr, get_order(size));
}
-struct dma_mapping_ops nommu_dma_ops = {
- .alloc_coherent = nommu_alloc_coherent,
- .free_coherent = nommu_free_coherent,
- .map_single = nommu_map_single,
- .map_sg = nommu_map_sg,
- .is_phys = 1,
-};
-
-void __init no_iommu_init(void)
+static void nommu_sync_single_for_device(struct device *dev,
+ dma_addr_t addr, size_t size,
+ enum dma_data_direction dir)
{
- if (dma_ops)
- return;
+ flush_write_buffers();
+}
+
- force_iommu = 0; /* no HW IOMMU */
- dma_ops = &nommu_dma_ops;
+static void nommu_sync_sg_for_device(struct device *dev,
+ struct scatterlist *sg, int nelems,
+ enum dma_data_direction dir)
+{
+ flush_write_buffers();
}
+
+struct dma_map_ops nommu_dma_ops = {
+ .alloc_coherent = dma_generic_alloc_coherent,
+ .free_coherent = nommu_free_coherent,
+ .map_sg = nommu_map_sg,
+ .map_page = nommu_map_page,
+ .sync_single_for_device = nommu_sync_single_for_device,
+ .sync_sg_for_device = nommu_sync_sg_for_device,
+ .is_phys = 1,
+};