Blackfin arch: update cache flush prototypes with argument names to make them less...
[safe/jmp/linux-2.6] / drivers / infiniband / core / umem.c
index 14159ff..6f7c096 100644 (file)
  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
- *
- * $Id: uverbs_mem.c 2743 2005-06-28 22:27:59Z roland $
  */
 
 #include <linux/mm.h>
 #include <linux/dma-mapping.h>
 #include <linux/sched.h>
 #include <linux/hugetlb.h>
+#include <linux/dma-attrs.h>
 
 #include "uverbs.h"
 
@@ -72,9 +71,10 @@ static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int d
  * @addr: userspace virtual address to start at
  * @size: length of region to pin
  * @access: IB_ACCESS_xxx flags for memory being pinned
+ * @dmasync: flush in-flight DMA when the memory region is written
  */
 struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
-                           size_t size, int access)
+                           size_t size, int access, int dmasync)
 {
        struct ib_umem *umem;
        struct page **page_list;
@@ -87,6 +87,10 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
        int ret;
        int off;
        int i;
+       DEFINE_DMA_ATTRS(attrs);
+
+       if (dmasync)
+               dma_set_attr(DMA_ATTR_WRITE_BARRIER, &attrs);
 
        if (!can_do_mlock())
                return ERR_PTR(-EPERM);
@@ -144,7 +148,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
        ret = 0;
        while (npages) {
                ret = get_user_pages(current, current->mm, cur_base,
-                                    min_t(int, npages,
+                                    min_t(unsigned long, npages,
                                           PAGE_SIZE / sizeof (struct page *)),
                                     1, !umem->writable, page_list, vma_list);
 
@@ -171,15 +175,14 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
                                if (vma_list &&
                                    !is_vm_hugetlb_page(vma_list[i + off]))
                                        umem->hugetlb = 0;
-                               sg_set_page(&chunk->page_list[i], page_list[i + off]);
-                               chunk->page_list[i].offset = 0;
-                               chunk->page_list[i].length = PAGE_SIZE;
+                               sg_set_page(&chunk->page_list[i], page_list[i + off], PAGE_SIZE, 0);
                        }
 
-                       chunk->nmap = ib_dma_map_sg(context->device,
-                                                   &chunk->page_list[0],
-                                                   chunk->nents,
-                                                   DMA_BIDIRECTIONAL);
+                       chunk->nmap = ib_dma_map_sg_attrs(context->device,
+                                                         &chunk->page_list[0],
+                                                         chunk->nents,
+                                                         DMA_BIDIRECTIONAL,
+                                                         &attrs);
                        if (chunk->nmap <= 0) {
                                for (i = 0; i < chunk->nents; ++i)
                                        put_page(sg_page(&chunk->page_list[i]));