x86: unify dma_mapping_error
[safe/jmp/linux-2.6] / include / asm-x86 / dma-mapping.h
1 #ifndef _ASM_DMA_MAPPING_H_
2 #define _ASM_DMA_MAPPING_H_
3
4 /*
5  * IOMMU interface. See Documentation/DMA-mapping.txt and DMA-API.txt for
6  * documentation.
7  */
8
9 #include <linux/scatterlist.h>
10 #include <asm/io.h>
11 #include <asm/swiotlb.h>
12
13 extern dma_addr_t bad_dma_address;
14
15 struct dma_mapping_ops {
16         int             (*mapping_error)(dma_addr_t dma_addr);
17         void*           (*alloc_coherent)(struct device *dev, size_t size,
18                                 dma_addr_t *dma_handle, gfp_t gfp);
19         void            (*free_coherent)(struct device *dev, size_t size,
20                                 void *vaddr, dma_addr_t dma_handle);
21         dma_addr_t      (*map_single)(struct device *hwdev, phys_addr_t ptr,
22                                 size_t size, int direction);
23         /* like map_single, but doesn't check the device mask */
24         dma_addr_t      (*map_simple)(struct device *hwdev, phys_addr_t ptr,
25                                 size_t size, int direction);
26         void            (*unmap_single)(struct device *dev, dma_addr_t addr,
27                                 size_t size, int direction);
28         void            (*sync_single_for_cpu)(struct device *hwdev,
29                                 dma_addr_t dma_handle, size_t size,
30                                 int direction);
31         void            (*sync_single_for_device)(struct device *hwdev,
32                                 dma_addr_t dma_handle, size_t size,
33                                 int direction);
34         void            (*sync_single_range_for_cpu)(struct device *hwdev,
35                                 dma_addr_t dma_handle, unsigned long offset,
36                                 size_t size, int direction);
37         void            (*sync_single_range_for_device)(struct device *hwdev,
38                                 dma_addr_t dma_handle, unsigned long offset,
39                                 size_t size, int direction);
40         void            (*sync_sg_for_cpu)(struct device *hwdev,
41                                 struct scatterlist *sg, int nelems,
42                                 int direction);
43         void            (*sync_sg_for_device)(struct device *hwdev,
44                                 struct scatterlist *sg, int nelems,
45                                 int direction);
46         int             (*map_sg)(struct device *hwdev, struct scatterlist *sg,
47                                 int nents, int direction);
48         void            (*unmap_sg)(struct device *hwdev,
49                                 struct scatterlist *sg, int nents,
50                                 int direction);
51         int             (*dma_supported)(struct device *hwdev, u64 mask);
52         int             is_phys;
53 };
54
55 extern const struct dma_mapping_ops *dma_ops;
56
57 static inline int dma_mapping_error(dma_addr_t dma_addr)
58 {
59         if (dma_ops->mapping_error)
60                 return dma_ops->mapping_error(dma_addr);
61
62         return (dma_addr == bad_dma_address);
63 }
64
65 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
66 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
67
68 void *dma_alloc_coherent(struct device *dev, size_t size,
69                            dma_addr_t *dma_handle, gfp_t flag);
70
71 void dma_free_coherent(struct device *dev, size_t size,
72                          void *vaddr, dma_addr_t dma_handle);
73
74
75 extern int dma_supported(struct device *hwdev, u64 mask);
76 extern int dma_set_mask(struct device *dev, u64 mask);
77
78 #ifdef CONFIG_X86_32
79 # include "dma-mapping_32.h"
80 #else
81 # include "dma-mapping_64.h"
82 #endif
83
84 static inline dma_addr_t
85 dma_map_single(struct device *hwdev, void *ptr, size_t size,
86                int direction)
87 {
88         BUG_ON(!valid_dma_direction(direction));
89         return dma_ops->map_single(hwdev, virt_to_phys(ptr), size, direction);
90 }
91
92 static inline void
93 dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size,
94                  int direction)
95 {
96         BUG_ON(!valid_dma_direction(direction));
97         if (dma_ops->unmap_single)
98                 dma_ops->unmap_single(dev, addr, size, direction);
99 }
100
101 static inline int
102 dma_map_sg(struct device *hwdev, struct scatterlist *sg,
103            int nents, int direction)
104 {
105         BUG_ON(!valid_dma_direction(direction));
106         return dma_ops->map_sg(hwdev, sg, nents, direction);
107 }
108
109 static inline void
110 dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents,
111              int direction)
112 {
113         BUG_ON(!valid_dma_direction(direction));
114         if (dma_ops->unmap_sg)
115                 dma_ops->unmap_sg(hwdev, sg, nents, direction);
116 }
117
118 static inline void
119 dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
120                         size_t size, int direction)
121 {
122         BUG_ON(!valid_dma_direction(direction));
123         if (dma_ops->sync_single_for_cpu)
124                 dma_ops->sync_single_for_cpu(hwdev, dma_handle, size,
125                                              direction);
126         flush_write_buffers();
127 }
128
129 static inline void
130 dma_sync_single_for_device(struct device *hwdev, dma_addr_t dma_handle,
131                            size_t size, int direction)
132 {
133         BUG_ON(!valid_dma_direction(direction));
134         if (dma_ops->sync_single_for_device)
135                 dma_ops->sync_single_for_device(hwdev, dma_handle, size,
136                                                 direction);
137         flush_write_buffers();
138 }
139
140 static inline void
141 dma_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
142                               unsigned long offset, size_t size, int direction)
143 {
144         BUG_ON(!valid_dma_direction(direction));
145         if (dma_ops->sync_single_range_for_cpu)
146                 dma_ops->sync_single_range_for_cpu(hwdev, dma_handle, offset,
147                                                    size, direction);
148
149         flush_write_buffers();
150 }
151
152 static inline void
153 dma_sync_single_range_for_device(struct device *hwdev, dma_addr_t dma_handle,
154                                  unsigned long offset, size_t size,
155                                  int direction)
156 {
157         BUG_ON(!valid_dma_direction(direction));
158         if (dma_ops->sync_single_range_for_device)
159                 dma_ops->sync_single_range_for_device(hwdev, dma_handle,
160                                                       offset, size, direction);
161
162         flush_write_buffers();
163 }
164
165 static inline void
166 dma_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
167                     int nelems, int direction)
168 {
169         BUG_ON(!valid_dma_direction(direction));
170         if (dma_ops->sync_sg_for_cpu)
171                 dma_ops->sync_sg_for_cpu(hwdev, sg, nelems, direction);
172         flush_write_buffers();
173 }
174
175 static inline void
176 dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
177                        int nelems, int direction)
178 {
179         BUG_ON(!valid_dma_direction(direction));
180         if (dma_ops->sync_sg_for_device)
181                 dma_ops->sync_sg_for_device(hwdev, sg, nelems, direction);
182
183         flush_write_buffers();
184 }
185
186 static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
187                                       size_t offset, size_t size,
188                                       int direction)
189 {
190         BUG_ON(!valid_dma_direction(direction));
191         return dma_ops->map_single(dev, page_to_phys(page)+offset,
192                                    size, direction);
193 }
194
195 static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
196                                   size_t size, int direction)
197 {
198         dma_unmap_single(dev, addr, size, direction);
199 }
200
201 static inline void
202 dma_cache_sync(struct device *dev, void *vaddr, size_t size,
203         enum dma_data_direction dir)
204 {
205         flush_write_buffers();
206 }
207 #endif