IB/mthca, mlx4_core: Fix typo in comment
[safe/jmp/linux-2.6] / drivers / infiniband / hw / mthca / mthca_memfree.c
index a70a1c7..e61f3e6 100644 (file)
@@ -1,5 +1,7 @@
 /*
  * Copyright (c) 2004, 2005 Topspin Communications.  All rights reserved.
+ * Copyright (c) 2005 Cisco Systems.  All rights reserved.
+ * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
  *
  * This software is available to you under a choice of one of two
  * licenses.  You may choose to be licensed under the terms of the GNU
  * $Id$
  */
 
+#include <linux/mm.h>
+#include <linux/scatterlist.h>
+#include <linux/sched.h>
+
+#include <asm/page.h>
+
 #include "mthca_memfree.h"
 #include "mthca_dev.h"
 #include "mthca_cmd.h"
@@ -45,22 +53,51 @@ enum {
        MTHCA_TABLE_CHUNK_SIZE = 1 << 18
 };
 
-void mthca_free_icm(struct mthca_dev *dev, struct mthca_icm *icm)
+struct mthca_user_db_table {
+       struct mutex mutex;
+       struct {
+               u64                uvirt;
+               struct scatterlist mem;
+               int                refcount;
+       }                page[0];
+};
+
+static void mthca_free_icm_pages(struct mthca_dev *dev, struct mthca_icm_chunk *chunk)
+{
+       int i;
+
+       if (chunk->nsg > 0)
+               pci_unmap_sg(dev->pdev, chunk->mem, chunk->npages,
+                            PCI_DMA_BIDIRECTIONAL);
+
+       for (i = 0; i < chunk->npages; ++i)
+               __free_pages(chunk->mem[i].page,
+                            get_order(chunk->mem[i].length));
+}
+
+static void mthca_free_icm_coherent(struct mthca_dev *dev, struct mthca_icm_chunk *chunk)
 {
-       struct mthca_icm_chunk *chunk, *tmp;
        int i;
 
+       for (i = 0; i < chunk->npages; ++i) {
+               dma_free_coherent(&dev->pdev->dev, chunk->mem[i].length,
+                                 lowmem_page_address(chunk->mem[i].page),
+                                 sg_dma_address(&chunk->mem[i]));
+       }
+}
+
+void mthca_free_icm(struct mthca_dev *dev, struct mthca_icm *icm, int coherent)
+{
+       struct mthca_icm_chunk *chunk, *tmp;
+
        if (!icm)
                return;
 
        list_for_each_entry_safe(chunk, tmp, &icm->chunk_list, list) {
-               if (chunk->nsg > 0)
-                       pci_unmap_sg(dev->pdev, chunk->mem, chunk->npages,
-                                    PCI_DMA_BIDIRECTIONAL);
-
-               for (i = 0; i < chunk->npages; ++i)
-                       __free_pages(chunk->mem[i].page,
-                                    get_order(chunk->mem[i].length));
+               if (coherent)
+                       mthca_free_icm_coherent(dev, chunk);
+               else
+                       mthca_free_icm_pages(dev, chunk);
 
                kfree(chunk);
        }
@@ -68,12 +105,41 @@ void mthca_free_icm(struct mthca_dev *dev, struct mthca_icm *icm)
        kfree(icm);
 }
 
+static int mthca_alloc_icm_pages(struct scatterlist *mem, int order, gfp_t gfp_mask)
+{
+       mem->page = alloc_pages(gfp_mask, order);
+       if (!mem->page)
+               return -ENOMEM;
+
+       mem->length = PAGE_SIZE << order;
+       mem->offset = 0;
+       return 0;
+}
+
+static int mthca_alloc_icm_coherent(struct device *dev, struct scatterlist *mem,
+                                   int order, gfp_t gfp_mask)
+{
+       void *buf = dma_alloc_coherent(dev, PAGE_SIZE << order, &sg_dma_address(mem),
+                                      gfp_mask);
+       if (!buf)
+               return -ENOMEM;
+
+       sg_set_buf(mem, buf, PAGE_SIZE << order);
+       BUG_ON(mem->offset);
+       sg_dma_len(mem) = PAGE_SIZE << order;
+       return 0;
+}
+
 struct mthca_icm *mthca_alloc_icm(struct mthca_dev *dev, int npages,
-                                 unsigned int gfp_mask)
+                                 gfp_t gfp_mask, int coherent)
 {
        struct mthca_icm *icm;
        struct mthca_icm_chunk *chunk = NULL;
        int cur_order;
+       int ret;
+
+       /* We use sg_set_buf for coherent allocs, which assumes low memory */
+       BUG_ON(coherent && (gfp_mask & __GFP_HIGHMEM));
 
        icm = kmalloc(sizeof *icm, gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN));
        if (!icm)
@@ -99,21 +165,30 @@ struct mthca_icm *mthca_alloc_icm(struct mthca_dev *dev, int npages,
                while (1 << cur_order > npages)
                        --cur_order;
 
-               chunk->mem[chunk->npages].page = alloc_pages(gfp_mask, cur_order);
-               if (chunk->mem[chunk->npages].page) {
-                       chunk->mem[chunk->npages].length = PAGE_SIZE << cur_order;
-                       chunk->mem[chunk->npages].offset = 0;
+               if (coherent)
+                       ret = mthca_alloc_icm_coherent(&dev->pdev->dev,
+                                                      &chunk->mem[chunk->npages],
+                                                      cur_order, gfp_mask);
+               else
+                       ret = mthca_alloc_icm_pages(&chunk->mem[chunk->npages],
+                                                   cur_order, gfp_mask);
+
+               if (!ret) {
+                       ++chunk->npages;
 
-                       if (++chunk->npages == MTHCA_ICM_CHUNK_LEN) {
+                       if (coherent)
+                               ++chunk->nsg;
+                       else if (chunk->npages == MTHCA_ICM_CHUNK_LEN) {
                                chunk->nsg = pci_map_sg(dev->pdev, chunk->mem,
                                                        chunk->npages,
                                                        PCI_DMA_BIDIRECTIONAL);
 
                                if (chunk->nsg <= 0)
                                        goto fail;
+                       }
 
+                       if (chunk->npages == MTHCA_ICM_CHUNK_LEN)
                                chunk = NULL;
-                       }
 
                        npages -= 1 << cur_order;
                } else {
@@ -123,7 +198,7 @@ struct mthca_icm *mthca_alloc_icm(struct mthca_dev *dev, int npages,
                }
        }
 
-       if (chunk) {
+       if (!coherent && chunk) {
                chunk->nsg = pci_map_sg(dev->pdev, chunk->mem,
                                        chunk->npages,
                                        PCI_DMA_BIDIRECTIONAL);
@@ -135,7 +210,7 @@ struct mthca_icm *mthca_alloc_icm(struct mthca_dev *dev, int npages,
        return icm;
 
 fail:
-       mthca_free_icm(dev, icm);
+       mthca_free_icm(dev, icm, coherent);
        return NULL;
 }
 
@@ -145,7 +220,7 @@ int mthca_table_get(struct mthca_dev *dev, struct mthca_icm_table *table, int ob
        int ret = 0;
        u8 status;
 
-       down(&table->mutex);
+       mutex_lock(&table->mutex);
 
        if (table->icm[i]) {
                ++table->icm[i]->refcount;
@@ -154,7 +229,7 @@ int mthca_table_get(struct mthca_dev *dev, struct mthca_icm_table *table, int ob
 
        table->icm[i] = mthca_alloc_icm(dev, MTHCA_TABLE_CHUNK_SIZE >> PAGE_SHIFT,
                                        (table->lowmem ? GFP_KERNEL : GFP_HIGHUSER) |
-                                       __GFP_NOWARN);
+                                       __GFP_NOWARN, table->coherent);
        if (!table->icm[i]) {
                ret = -ENOMEM;
                goto out;
@@ -162,7 +237,7 @@ int mthca_table_get(struct mthca_dev *dev, struct mthca_icm_table *table, int ob
 
        if (mthca_MAP_ICM(dev, table->icm[i], table->virt + i * MTHCA_TABLE_CHUNK_SIZE,
                          &status) || status) {
-               mthca_free_icm(dev, table->icm[i]);
+               mthca_free_icm(dev, table->icm[i], table->coherent);
                table->icm[i] = NULL;
                ret = -ENOMEM;
                goto out;
@@ -171,25 +246,74 @@ int mthca_table_get(struct mthca_dev *dev, struct mthca_icm_table *table, int ob
        ++table->icm[i]->refcount;
 
 out:
-       up(&table->mutex);
+       mutex_unlock(&table->mutex);
        return ret;
 }
 
 void mthca_table_put(struct mthca_dev *dev, struct mthca_icm_table *table, int obj)
 {
-       int i = (obj & (table->num_obj - 1)) * table->obj_size / MTHCA_TABLE_CHUNK_SIZE;
+       int i;
        u8 status;
 
-       down(&table->mutex);
+       if (!mthca_is_memfree(dev))
+               return;
+
+       i = (obj & (table->num_obj - 1)) * table->obj_size / MTHCA_TABLE_CHUNK_SIZE;
+
+       mutex_lock(&table->mutex);
 
        if (--table->icm[i]->refcount == 0) {
                mthca_UNMAP_ICM(dev, table->virt + i * MTHCA_TABLE_CHUNK_SIZE,
-                               MTHCA_TABLE_CHUNK_SIZE >> 12, &status);
-               mthca_free_icm(dev, table->icm[i]);
+                               MTHCA_TABLE_CHUNK_SIZE / MTHCA_ICM_PAGE_SIZE,
+                               &status);
+               mthca_free_icm(dev, table->icm[i], table->coherent);
                table->icm[i] = NULL;
        }
 
-       up(&table->mutex);
+       mutex_unlock(&table->mutex);
+}
+
+void *mthca_table_find(struct mthca_icm_table *table, int obj, dma_addr_t *dma_handle)
+{
+       int idx, offset, dma_offset, i;
+       struct mthca_icm_chunk *chunk;
+       struct mthca_icm *icm;
+       struct page *page = NULL;
+
+       if (!table->lowmem)
+               return NULL;
+
+       mutex_lock(&table->mutex);
+
+       idx = (obj & (table->num_obj - 1)) * table->obj_size;
+       icm = table->icm[idx / MTHCA_TABLE_CHUNK_SIZE];
+       dma_offset = offset = idx % MTHCA_TABLE_CHUNK_SIZE;
+
+       if (!icm)
+               goto out;
+
+       list_for_each_entry(chunk, &icm->chunk_list, list) {
+               for (i = 0; i < chunk->npages; ++i) {
+                       if (dma_handle && dma_offset >= 0) {
+                               if (sg_dma_len(&chunk->mem[i]) > dma_offset)
+                                       *dma_handle = sg_dma_address(&chunk->mem[i]) +
+                                               dma_offset;
+                               dma_offset -= sg_dma_len(&chunk->mem[i]);
+                       }
+                       /* DMA mapping can merge pages but not split them,
+                        * so if we found the page, dma_handle has already
+                        * been assigned to. */
+                       if (chunk->mem[i].length > offset) {
+                               page = chunk->mem[i].page;
+                               goto out;
+                       }
+                       offset -= chunk->mem[i].length;
+               }
+       }
+
+out:
+       mutex_unlock(&table->mutex);
+       return page ? lowmem_page_address(page) + offset : NULL;
 }
 
 int mthca_table_get_range(struct mthca_dev *dev, struct mthca_icm_table *table,
@@ -220,6 +344,9 @@ void mthca_table_put_range(struct mthca_dev *dev, struct mthca_icm_table *table,
 {
        int i;
 
+       if (!mthca_is_memfree(dev))
+               return;
+
        for (i = start; i <= end; i += MTHCA_TABLE_CHUNK_SIZE / table->obj_size)
                mthca_table_put(dev, table, i);
 }
@@ -227,14 +354,15 @@ void mthca_table_put_range(struct mthca_dev *dev, struct mthca_icm_table *table,
 struct mthca_icm_table *mthca_alloc_icm_table(struct mthca_dev *dev,
                                              u64 virt, int obj_size,
                                              int nobj, int reserved,
-                                             int use_lowmem)
+                                             int use_lowmem, int use_coherent)
 {
        struct mthca_icm_table *table;
        int num_icm;
+       unsigned chunk_size;
        int i;
        u8 status;
 
-       num_icm = obj_size * nobj / MTHCA_TABLE_CHUNK_SIZE;
+       num_icm = (obj_size * nobj + MTHCA_TABLE_CHUNK_SIZE - 1) / MTHCA_TABLE_CHUNK_SIZE;
 
        table = kmalloc(sizeof *table + num_icm * sizeof *table->icm, GFP_KERNEL);
        if (!table)
@@ -245,20 +373,25 @@ struct mthca_icm_table *mthca_alloc_icm_table(struct mthca_dev *dev,
        table->num_obj  = nobj;
        table->obj_size = obj_size;
        table->lowmem   = use_lowmem;
-       init_MUTEX(&table->mutex);
+       table->coherent = use_coherent;
+       mutex_init(&table->mutex);
 
        for (i = 0; i < num_icm; ++i)
                table->icm[i] = NULL;
 
        for (i = 0; i * MTHCA_TABLE_CHUNK_SIZE < reserved * obj_size; ++i) {
-               table->icm[i] = mthca_alloc_icm(dev, MTHCA_TABLE_CHUNK_SIZE >> PAGE_SHIFT,
+               chunk_size = MTHCA_TABLE_CHUNK_SIZE;
+               if ((i + 1) * MTHCA_TABLE_CHUNK_SIZE > nobj * obj_size)
+                       chunk_size = nobj * obj_size - i * MTHCA_TABLE_CHUNK_SIZE;
+
+               table->icm[i] = mthca_alloc_icm(dev, chunk_size >> PAGE_SHIFT,
                                                (use_lowmem ? GFP_KERNEL : GFP_HIGHUSER) |
-                                               __GFP_NOWARN);
+                                               __GFP_NOWARN, use_coherent);
                if (!table->icm[i])
                        goto err;
                if (mthca_MAP_ICM(dev, table->icm[i], virt + i * MTHCA_TABLE_CHUNK_SIZE,
                                  &status) || status) {
-                       mthca_free_icm(dev, table->icm[i]);
+                       mthca_free_icm(dev, table->icm[i], table->coherent);
                        table->icm[i] = NULL;
                        goto err;
                }
@@ -276,8 +409,9 @@ err:
        for (i = 0; i < num_icm; ++i)
                if (table->icm[i]) {
                        mthca_UNMAP_ICM(dev, virt + i * MTHCA_TABLE_CHUNK_SIZE,
-                                       MTHCA_TABLE_CHUNK_SIZE >> 12, &status);
-                       mthca_free_icm(dev, table->icm[i]);
+                                       MTHCA_TABLE_CHUNK_SIZE / MTHCA_ICM_PAGE_SIZE,
+                                       &status);
+                       mthca_free_icm(dev, table->icm[i], table->coherent);
                }
 
        kfree(table);
@@ -293,21 +427,145 @@ void mthca_free_icm_table(struct mthca_dev *dev, struct mthca_icm_table *table)
        for (i = 0; i < table->num_icm; ++i)
                if (table->icm[i]) {
                        mthca_UNMAP_ICM(dev, table->virt + i * MTHCA_TABLE_CHUNK_SIZE,
-                                       MTHCA_TABLE_CHUNK_SIZE >> 12, &status);
-                       mthca_free_icm(dev, table->icm[i]);
+                                       MTHCA_TABLE_CHUNK_SIZE / MTHCA_ICM_PAGE_SIZE,
+                                       &status);
+                       mthca_free_icm(dev, table->icm[i], table->coherent);
                }
 
        kfree(table);
 }
 
-static u64 mthca_uarc_virt(struct mthca_dev *dev, int page)
+static u64 mthca_uarc_virt(struct mthca_dev *dev, struct mthca_uar *uar, int page)
 {
        return dev->uar_table.uarc_base +
-               dev->driver_uar.index * dev->uar_table.uarc_size +
-               page * 4096;
+               uar->index * dev->uar_table.uarc_size +
+               page * MTHCA_ICM_PAGE_SIZE;
 }
 
-int mthca_alloc_db(struct mthca_dev *dev, int type, u32 qn, u32 **db)
+int mthca_map_user_db(struct mthca_dev *dev, struct mthca_uar *uar,
+                     struct mthca_user_db_table *db_tab, int index, u64 uaddr)
+{
+       int ret = 0;
+       u8 status;
+       int i;
+
+       if (!mthca_is_memfree(dev))
+               return 0;
+
+       if (index < 0 || index > dev->uar_table.uarc_size / 8)
+               return -EINVAL;
+
+       mutex_lock(&db_tab->mutex);
+
+       i = index / MTHCA_DB_REC_PER_PAGE;
+
+       if ((db_tab->page[i].refcount >= MTHCA_DB_REC_PER_PAGE)       ||
+           (db_tab->page[i].uvirt && db_tab->page[i].uvirt != uaddr) ||
+           (uaddr & 4095)) {
+               ret = -EINVAL;
+               goto out;
+       }
+
+       if (db_tab->page[i].refcount) {
+               ++db_tab->page[i].refcount;
+               goto out;
+       }
+
+       ret = get_user_pages(current, current->mm, uaddr & PAGE_MASK, 1, 1, 0,
+                            &db_tab->page[i].mem.page, NULL);
+       if (ret < 0)
+               goto out;
+
+       db_tab->page[i].mem.length = MTHCA_ICM_PAGE_SIZE;
+       db_tab->page[i].mem.offset = uaddr & ~PAGE_MASK;
+
+       ret = pci_map_sg(dev->pdev, &db_tab->page[i].mem, 1, PCI_DMA_TODEVICE);
+       if (ret < 0) {
+               put_page(db_tab->page[i].mem.page);
+               goto out;
+       }
+
+       ret = mthca_MAP_ICM_page(dev, sg_dma_address(&db_tab->page[i].mem),
+                                mthca_uarc_virt(dev, uar, i), &status);
+       if (!ret && status)
+               ret = -EINVAL;
+       if (ret) {
+               pci_unmap_sg(dev->pdev, &db_tab->page[i].mem, 1, PCI_DMA_TODEVICE);
+               put_page(db_tab->page[i].mem.page);
+               goto out;
+       }
+
+       db_tab->page[i].uvirt    = uaddr;
+       db_tab->page[i].refcount = 1;
+
+out:
+       mutex_unlock(&db_tab->mutex);
+       return ret;
+}
+
+void mthca_unmap_user_db(struct mthca_dev *dev, struct mthca_uar *uar,
+                        struct mthca_user_db_table *db_tab, int index)
+{
+       if (!mthca_is_memfree(dev))
+               return;
+
+       /*
+        * To make our bookkeeping simpler, we don't unmap DB
+        * pages until we clean up the whole db table.
+        */
+
+       mutex_lock(&db_tab->mutex);
+
+       --db_tab->page[index / MTHCA_DB_REC_PER_PAGE].refcount;
+
+       mutex_unlock(&db_tab->mutex);
+}
+
+struct mthca_user_db_table *mthca_init_user_db_tab(struct mthca_dev *dev)
+{
+       struct mthca_user_db_table *db_tab;
+       int npages;
+       int i;
+
+       if (!mthca_is_memfree(dev))
+               return NULL;
+
+       npages = dev->uar_table.uarc_size / MTHCA_ICM_PAGE_SIZE;
+       db_tab = kmalloc(sizeof *db_tab + npages * sizeof *db_tab->page, GFP_KERNEL);
+       if (!db_tab)
+               return ERR_PTR(-ENOMEM);
+
+       mutex_init(&db_tab->mutex);
+       for (i = 0; i < npages; ++i) {
+               db_tab->page[i].refcount = 0;
+               db_tab->page[i].uvirt    = 0;
+       }
+
+       return db_tab;
+}
+
+void mthca_cleanup_user_db_tab(struct mthca_dev *dev, struct mthca_uar *uar,
+                              struct mthca_user_db_table *db_tab)
+{
+       int i;
+       u8 status;
+
+       if (!mthca_is_memfree(dev))
+               return;
+
+       for (i = 0; i < dev->uar_table.uarc_size / MTHCA_ICM_PAGE_SIZE; ++i) {
+               if (db_tab->page[i].uvirt) {
+                       mthca_UNMAP_ICM(dev, mthca_uarc_virt(dev, uar, i), 1, &status);
+                       pci_unmap_sg(dev->pdev, &db_tab->page[i].mem, 1, PCI_DMA_TODEVICE);
+                       put_page(db_tab->page[i].mem.page);
+               }
+       }
+
+       kfree(db_tab);
+}
+
+int mthca_alloc_db(struct mthca_dev *dev, enum mthca_db_type type,
+                  u32 qn, __be32 **db)
 {
        int group;
        int start, end, dir;
@@ -316,7 +574,7 @@ int mthca_alloc_db(struct mthca_dev *dev, int type, u32 qn, u32 **db)
        int ret = 0;
        u8 status;
 
-       down(&dev->db_tab->mutex);
+       mutex_lock(&dev->db_tab->mutex);
 
        switch (type) {
        case MTHCA_DB_TYPE_CQ_ARM:
@@ -337,7 +595,8 @@ int mthca_alloc_db(struct mthca_dev *dev, int type, u32 qn, u32 **db)
                break;
 
        default:
-               return -1;
+               ret = -EINVAL;
+               goto out;
        }
 
        for (i = start; i != end; i += dir)
@@ -348,34 +607,44 @@ int mthca_alloc_db(struct mthca_dev *dev, int type, u32 qn, u32 **db)
                        goto found;
                }
 
+       for (i = start; i != end; i += dir)
+               if (!dev->db_tab->page[i].db_rec) {
+                       page = dev->db_tab->page + i;
+                       goto alloc;
+               }
+
        if (dev->db_tab->max_group1 >= dev->db_tab->min_group2 - 1) {
                ret = -ENOMEM;
                goto out;
        }
 
+       if (group == 0)
+               ++dev->db_tab->max_group1;
+       else
+               --dev->db_tab->min_group2;
+
        page = dev->db_tab->page + end;
-       page->db_rec = dma_alloc_coherent(&dev->pdev->dev, 4096,
+
+alloc:
+       page->db_rec = dma_alloc_coherent(&dev->pdev->dev, MTHCA_ICM_PAGE_SIZE,
                                          &page->mapping, GFP_KERNEL);
        if (!page->db_rec) {
                ret = -ENOMEM;
                goto out;
        }
-       memset(page->db_rec, 0, 4096);
+       memset(page->db_rec, 0, MTHCA_ICM_PAGE_SIZE);
 
-       ret = mthca_MAP_ICM_page(dev, page->mapping, mthca_uarc_virt(dev, i), &status);
+       ret = mthca_MAP_ICM_page(dev, page->mapping,
+                                mthca_uarc_virt(dev, &dev->driver_uar, i), &status);
        if (!ret && status)
                ret = -EINVAL;
        if (ret) {
-               dma_free_coherent(&dev->pdev->dev, 4096,
+               dma_free_coherent(&dev->pdev->dev, MTHCA_ICM_PAGE_SIZE,
                                  page->db_rec, page->mapping);
                goto out;
        }
 
        bitmap_zero(page->used, MTHCA_DB_REC_PER_PAGE);
-       if (group == 0)
-               ++dev->db_tab->max_group1;
-       else
-               --dev->db_tab->min_group2;
 
 found:
        j = find_first_zero_bit(page->used, MTHCA_DB_REC_PER_PAGE);
@@ -388,10 +657,10 @@ found:
 
        page->db_rec[j] = cpu_to_be64((qn << 8) | (type << 5));
 
-       *db = (u32 *) &page->db_rec[j];
+       *db = (__be32 *) &page->db_rec[j];
 
 out:
-       up(&dev->db_tab->mutex);
+       mutex_unlock(&dev->db_tab->mutex);
 
        return ret;
 }
@@ -407,7 +676,7 @@ void mthca_free_db(struct mthca_dev *dev, int type, int db_index)
 
        page = dev->db_tab->page + i;
 
-       down(&dev->db_tab->mutex);
+       mutex_lock(&dev->db_tab->mutex);
 
        page->db_rec[j] = 0;
        if (i >= dev->db_tab->min_group2)
@@ -416,9 +685,9 @@ void mthca_free_db(struct mthca_dev *dev, int type, int db_index)
 
        if (bitmap_empty(page->used, MTHCA_DB_REC_PER_PAGE) &&
            i >= dev->db_tab->max_group1 - 1) {
-               mthca_UNMAP_ICM(dev, mthca_uarc_virt(dev, i), 1, &status);
+               mthca_UNMAP_ICM(dev, mthca_uarc_virt(dev, &dev->driver_uar, i), 1, &status);
 
-               dma_free_coherent(&dev->pdev->dev, 4096,
+               dma_free_coherent(&dev->pdev->dev, MTHCA_ICM_PAGE_SIZE,
                                  page->db_rec, page->mapping);
                page->db_rec = NULL;
 
@@ -430,23 +699,23 @@ void mthca_free_db(struct mthca_dev *dev, int type, int db_index)
                        ++dev->db_tab->min_group2;
        }
 
-       up(&dev->db_tab->mutex);
+       mutex_unlock(&dev->db_tab->mutex);
 }
 
 int mthca_init_db_tab(struct mthca_dev *dev)
 {
        int i;
 
-       if (dev->hca_type != ARBEL_NATIVE)
+       if (!mthca_is_memfree(dev))
                return 0;
 
        dev->db_tab = kmalloc(sizeof *dev->db_tab, GFP_KERNEL);
        if (!dev->db_tab)
                return -ENOMEM;
 
-       init_MUTEX(&dev->db_tab->mutex);
+       mutex_init(&dev->db_tab->mutex);
 
-       dev->db_tab->npages     = dev->uar_table.uarc_size / PAGE_SIZE;
+       dev->db_tab->npages     = dev->uar_table.uarc_size / MTHCA_ICM_PAGE_SIZE;
        dev->db_tab->max_group1 = 0;
        dev->db_tab->min_group2 = dev->db_tab->npages - 1;
 
@@ -469,7 +738,7 @@ void mthca_cleanup_db_tab(struct mthca_dev *dev)
        int i;
        u8 status;
 
-       if (dev->hca_type != ARBEL_NATIVE)
+       if (!mthca_is_memfree(dev))
                return;
 
        /*
@@ -485,9 +754,9 @@ void mthca_cleanup_db_tab(struct mthca_dev *dev)
                if (!bitmap_empty(dev->db_tab->page[i].used, MTHCA_DB_REC_PER_PAGE))
                        mthca_warn(dev, "Kernel UARC page %d not empty\n", i);
 
-               mthca_UNMAP_ICM(dev, mthca_uarc_virt(dev, i), 1, &status);
+               mthca_UNMAP_ICM(dev, mthca_uarc_virt(dev, &dev->driver_uar, i), 1, &status);
 
-               dma_free_coherent(&dev->pdev->dev, 4096,
+               dma_free_coherent(&dev->pdev->dev, MTHCA_ICM_PAGE_SIZE,
                                  dev->db_tab->page[i].db_rec,
                                  dev->db_tab->page[i].mapping);
        }