1 #ifndef _ASM_IA64_DMA_MAPPING_H
2 #define _ASM_IA64_DMA_MAPPING_H
5 * Copyright (C) 2003-2004 Hewlett-Packard Co
6 * David Mosberger-Tang <davidm@hpl.hp.com>
8 #include <asm/machvec.h>
9 #include <linux/scatterlist.h>
10 #include <asm/swiotlb.h>
12 struct dma_mapping_ops {
13 int (*mapping_error)(struct device *dev,
15 void* (*alloc_coherent)(struct device *dev, size_t size,
16 dma_addr_t *dma_handle, gfp_t gfp);
17 void (*free_coherent)(struct device *dev, size_t size,
18 void *vaddr, dma_addr_t dma_handle);
19 dma_addr_t (*map_single)(struct device *hwdev, unsigned long ptr,
20 size_t size, int direction);
21 void (*unmap_single)(struct device *dev, dma_addr_t addr,
22 size_t size, int direction);
23 dma_addr_t (*map_single_attrs)(struct device *dev, void *cpu_addr,
24 size_t size, int direction,
25 struct dma_attrs *attrs);
26 void (*unmap_single_attrs)(struct device *dev,
28 size_t size, int direction,
29 struct dma_attrs *attrs);
30 void (*sync_single_for_cpu)(struct device *hwdev,
31 dma_addr_t dma_handle, size_t size,
33 void (*sync_single_for_device)(struct device *hwdev,
34 dma_addr_t dma_handle, size_t size,
36 void (*sync_single_range_for_cpu)(struct device *hwdev,
37 dma_addr_t dma_handle, unsigned long offset,
38 size_t size, int direction);
39 void (*sync_single_range_for_device)(struct device *hwdev,
40 dma_addr_t dma_handle, unsigned long offset,
41 size_t size, int direction);
42 void (*sync_sg_for_cpu)(struct device *hwdev,
43 struct scatterlist *sg, int nelems,
45 void (*sync_sg_for_device)(struct device *hwdev,
46 struct scatterlist *sg, int nelems,
48 int (*map_sg)(struct device *hwdev, struct scatterlist *sg,
49 int nents, int direction);
50 void (*unmap_sg)(struct device *hwdev,
51 struct scatterlist *sg, int nents,
53 int (*map_sg_attrs)(struct device *dev,
54 struct scatterlist *sg, int nents,
55 int direction, struct dma_attrs *attrs);
56 void (*unmap_sg_attrs)(struct device *dev,
57 struct scatterlist *sg, int nents,
59 struct dma_attrs *attrs);
60 int (*dma_supported_op)(struct device *hwdev, u64 mask);
64 extern struct dma_mapping_ops *dma_ops;
65 extern struct ia64_machine_vector ia64_mv;
66 extern void set_iommu_machvec(void);
68 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
69 dma_addr_t *daddr, gfp_t gfp)
71 return dma_ops->alloc_coherent(dev, size, daddr, gfp | GFP_DMA);
74 static inline void dma_free_coherent(struct device *dev, size_t size,
75 void *caddr, dma_addr_t daddr)
77 dma_ops->free_coherent(dev, size, caddr, daddr);
80 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
81 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
83 static inline dma_addr_t dma_map_single_attrs(struct device *dev,
84 void *caddr, size_t size,
85 enum dma_data_direction dir,
86 struct dma_attrs *attrs)
88 return dma_ops->map_single_attrs(dev, caddr, size, dir, attrs);
91 static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t daddr,
93 enum dma_data_direction dir,
94 struct dma_attrs *attrs)
96 dma_ops->unmap_single_attrs(dev, daddr, size, dir, attrs);
99 #define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, NULL)
100 #define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, NULL)
102 static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sgl,
103 int nents, enum dma_data_direction dir,
104 struct dma_attrs *attrs)
106 return dma_ops->map_sg_attrs(dev, sgl, nents, dir, attrs);
109 static inline void dma_unmap_sg_attrs(struct device *dev,
110 struct scatterlist *sgl, int nents,
111 enum dma_data_direction dir,
112 struct dma_attrs *attrs)
114 dma_ops->unmap_sg_attrs(dev, sgl, nents, dir, attrs);
117 #define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, NULL)
118 #define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, NULL)
120 static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t daddr,
122 enum dma_data_direction dir)
124 dma_ops->sync_single_for_cpu(dev, daddr, size, dir);
127 static inline void dma_sync_sg_for_cpu(struct device *dev,
128 struct scatterlist *sgl,
129 int nents, enum dma_data_direction dir)
131 dma_ops->sync_sg_for_cpu(dev, sgl, nents, dir);
134 static inline void dma_sync_single_for_device(struct device *dev,
137 enum dma_data_direction dir)
139 dma_ops->sync_single_for_device(dev, daddr, size, dir);
142 static inline void dma_sync_sg_for_device(struct device *dev,
143 struct scatterlist *sgl,
145 enum dma_data_direction dir)
147 dma_ops->sync_sg_for_device(dev, sgl, nents, dir);
150 static inline int dma_mapping_error(struct device *dev, dma_addr_t daddr)
152 return dma_ops->mapping_error(dev, daddr);
155 #define dma_map_page(dev, pg, off, size, dir) \
156 dma_map_single(dev, page_address(pg) + (off), (size), (dir))
157 #define dma_unmap_page(dev, dma_addr, size, dir) \
158 dma_unmap_single(dev, dma_addr, size, dir)
161 * Rest of this file is part of the "Advanced DMA API". Use at your own risk.
162 * See Documentation/DMA-API.txt for details.
165 #define dma_sync_single_range_for_cpu(dev, dma_handle, offset, size, dir) \
166 dma_sync_single_for_cpu(dev, dma_handle, size, dir)
167 #define dma_sync_single_range_for_device(dev, dma_handle, offset, size, dir) \
168 dma_sync_single_for_device(dev, dma_handle, size, dir)
170 static inline int dma_supported(struct device *dev, u64 mask)
172 return dma_ops->dma_supported_op(dev, mask);
176 dma_set_mask (struct device *dev, u64 mask)
178 if (!dev->dma_mask || !dma_supported(dev, mask))
180 *dev->dma_mask = mask;
184 extern int dma_get_cache_alignment(void);
187 dma_cache_sync (struct device *dev, void *vaddr, size_t size,
188 enum dma_data_direction dir)
191 * IA-64 is cache-coherent, so this is mostly a no-op. However, we do need to
192 * ensure that dma_cache_sync() enforces order, hence the mb().
197 #define dma_is_consistent(d, h) (1) /* all we do is coherent memory... */
199 static inline struct dma_mapping_ops *get_dma_ops(struct device *dev)
204 #endif /* _ASM_IA64_DMA_MAPPING_H */