x86: delete the arch-specific dma-mapping headers.
[safe/jmp/linux-2.6] / include / asm-x86 / dma-mapping.h
1 #ifndef _ASM_DMA_MAPPING_H_
2 #define _ASM_DMA_MAPPING_H_
3
4 /*
5  * IOMMU interface. See Documentation/DMA-mapping.txt and DMA-API.txt for
6  * documentation.
7  */
8
9 #include <linux/scatterlist.h>
10 #include <asm/io.h>
11 #include <asm/swiotlb.h>
12
13 extern dma_addr_t bad_dma_address;
14 extern int iommu_merge;
15 extern struct device fallback_dev;
16 extern int panic_on_overflow;
17
18 struct dma_mapping_ops {
19         int             (*mapping_error)(dma_addr_t dma_addr);
20         void*           (*alloc_coherent)(struct device *dev, size_t size,
21                                 dma_addr_t *dma_handle, gfp_t gfp);
22         void            (*free_coherent)(struct device *dev, size_t size,
23                                 void *vaddr, dma_addr_t dma_handle);
24         dma_addr_t      (*map_single)(struct device *hwdev, phys_addr_t ptr,
25                                 size_t size, int direction);
26         /* like map_single, but doesn't check the device mask */
27         dma_addr_t      (*map_simple)(struct device *hwdev, phys_addr_t ptr,
28                                 size_t size, int direction);
29         void            (*unmap_single)(struct device *dev, dma_addr_t addr,
30                                 size_t size, int direction);
31         void            (*sync_single_for_cpu)(struct device *hwdev,
32                                 dma_addr_t dma_handle, size_t size,
33                                 int direction);
34         void            (*sync_single_for_device)(struct device *hwdev,
35                                 dma_addr_t dma_handle, size_t size,
36                                 int direction);
37         void            (*sync_single_range_for_cpu)(struct device *hwdev,
38                                 dma_addr_t dma_handle, unsigned long offset,
39                                 size_t size, int direction);
40         void            (*sync_single_range_for_device)(struct device *hwdev,
41                                 dma_addr_t dma_handle, unsigned long offset,
42                                 size_t size, int direction);
43         void            (*sync_sg_for_cpu)(struct device *hwdev,
44                                 struct scatterlist *sg, int nelems,
45                                 int direction);
46         void            (*sync_sg_for_device)(struct device *hwdev,
47                                 struct scatterlist *sg, int nelems,
48                                 int direction);
49         int             (*map_sg)(struct device *hwdev, struct scatterlist *sg,
50                                 int nents, int direction);
51         void            (*unmap_sg)(struct device *hwdev,
52                                 struct scatterlist *sg, int nents,
53                                 int direction);
54         int             (*dma_supported)(struct device *hwdev, u64 mask);
55         int             is_phys;
56 };
57
58 extern const struct dma_mapping_ops *dma_ops;
59
60 static inline int dma_mapping_error(dma_addr_t dma_addr)
61 {
62         if (dma_ops->mapping_error)
63                 return dma_ops->mapping_error(dma_addr);
64
65         return (dma_addr == bad_dma_address);
66 }
67
68 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
69 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
70
71 void *dma_alloc_coherent(struct device *dev, size_t size,
72                            dma_addr_t *dma_handle, gfp_t flag);
73
74 void dma_free_coherent(struct device *dev, size_t size,
75                          void *vaddr, dma_addr_t dma_handle);
76
77
78 extern int dma_supported(struct device *hwdev, u64 mask);
79 extern int dma_set_mask(struct device *dev, u64 mask);
80
81 static inline dma_addr_t
82 dma_map_single(struct device *hwdev, void *ptr, size_t size,
83                int direction)
84 {
85         BUG_ON(!valid_dma_direction(direction));
86         return dma_ops->map_single(hwdev, virt_to_phys(ptr), size, direction);
87 }
88
89 static inline void
90 dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size,
91                  int direction)
92 {
93         BUG_ON(!valid_dma_direction(direction));
94         if (dma_ops->unmap_single)
95                 dma_ops->unmap_single(dev, addr, size, direction);
96 }
97
98 static inline int
99 dma_map_sg(struct device *hwdev, struct scatterlist *sg,
100            int nents, int direction)
101 {
102         BUG_ON(!valid_dma_direction(direction));
103         return dma_ops->map_sg(hwdev, sg, nents, direction);
104 }
105
106 static inline void
107 dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents,
108              int direction)
109 {
110         BUG_ON(!valid_dma_direction(direction));
111         if (dma_ops->unmap_sg)
112                 dma_ops->unmap_sg(hwdev, sg, nents, direction);
113 }
114
115 static inline void
116 dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
117                         size_t size, int direction)
118 {
119         BUG_ON(!valid_dma_direction(direction));
120         if (dma_ops->sync_single_for_cpu)
121                 dma_ops->sync_single_for_cpu(hwdev, dma_handle, size,
122                                              direction);
123         flush_write_buffers();
124 }
125
126 static inline void
127 dma_sync_single_for_device(struct device *hwdev, dma_addr_t dma_handle,
128                            size_t size, int direction)
129 {
130         BUG_ON(!valid_dma_direction(direction));
131         if (dma_ops->sync_single_for_device)
132                 dma_ops->sync_single_for_device(hwdev, dma_handle, size,
133                                                 direction);
134         flush_write_buffers();
135 }
136
137 static inline void
138 dma_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
139                               unsigned long offset, size_t size, int direction)
140 {
141         BUG_ON(!valid_dma_direction(direction));
142         if (dma_ops->sync_single_range_for_cpu)
143                 dma_ops->sync_single_range_for_cpu(hwdev, dma_handle, offset,
144                                                    size, direction);
145
146         flush_write_buffers();
147 }
148
149 static inline void
150 dma_sync_single_range_for_device(struct device *hwdev, dma_addr_t dma_handle,
151                                  unsigned long offset, size_t size,
152                                  int direction)
153 {
154         BUG_ON(!valid_dma_direction(direction));
155         if (dma_ops->sync_single_range_for_device)
156                 dma_ops->sync_single_range_for_device(hwdev, dma_handle,
157                                                       offset, size, direction);
158
159         flush_write_buffers();
160 }
161
162 static inline void
163 dma_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
164                     int nelems, int direction)
165 {
166         BUG_ON(!valid_dma_direction(direction));
167         if (dma_ops->sync_sg_for_cpu)
168                 dma_ops->sync_sg_for_cpu(hwdev, sg, nelems, direction);
169         flush_write_buffers();
170 }
171
172 static inline void
173 dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
174                        int nelems, int direction)
175 {
176         BUG_ON(!valid_dma_direction(direction));
177         if (dma_ops->sync_sg_for_device)
178                 dma_ops->sync_sg_for_device(hwdev, sg, nelems, direction);
179
180         flush_write_buffers();
181 }
182
183 static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
184                                       size_t offset, size_t size,
185                                       int direction)
186 {
187         BUG_ON(!valid_dma_direction(direction));
188         return dma_ops->map_single(dev, page_to_phys(page)+offset,
189                                    size, direction);
190 }
191
192 static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
193                                   size_t size, int direction)
194 {
195         dma_unmap_single(dev, addr, size, direction);
196 }
197
198 static inline void
199 dma_cache_sync(struct device *dev, void *vaddr, size_t size,
200         enum dma_data_direction dir)
201 {
202         flush_write_buffers();
203 }
204
205 static inline int dma_get_cache_alignment(void)
206 {
207         /* no easy way to get cache size on all x86, so return the
208          * maximum possible, to be safe */
209         return boot_cpu_data.x86_clflush_size;
210 }
211
212 #define dma_is_consistent(d, h) (1)
213
214 #ifdef CONFIG_X86_32
215 #  define ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY
216 extern int
217 dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
218                             dma_addr_t device_addr, size_t size, int flags);
219
220 extern void
221 dma_release_declared_memory(struct device *dev);
222
223 extern void *
224 dma_mark_declared_memory_occupied(struct device *dev,
225                                   dma_addr_t device_addr, size_t size);
226 extern int forbid_dac;
227 #endif /* CONFIG_X86_32 */
228 #endif