include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit...
[safe/jmp/linux-2.6] / drivers / infiniband / hw / mthca / mthca_main.c
index 9ebadd6..5eee666 100644 (file)
@@ -30,8 +30,6 @@
  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
- *
- * $Id: mthca_main.c 1396 2004-12-28 04:10:27Z roland $
  */
 
 #include <linux/module.h>
 #include <linux/errno.h>
 #include <linux/pci.h>
 #include <linux/interrupt.h>
+#include <linux/gfp.h>
 
 #include "mthca_dev.h"
 #include "mthca_config_reg.h"
 #include "mthca_cmd.h"
 #include "mthca_profile.h"
 #include "mthca_memfree.h"
+#include "mthca_wqe.h"
 
 MODULE_AUTHOR("Roland Dreier");
 MODULE_DESCRIPTION("Mellanox InfiniBand HCA low-level driver");
@@ -126,6 +126,10 @@ module_param_named(fmr_reserved_mtts, hca_profile.fmr_reserved_mtts, int, 0444);
 MODULE_PARM_DESC(fmr_reserved_mtts,
                 "number of memory translation table segments reserved for FMR");
 
+static int log_mtts_per_seg = ilog2(MTHCA_MTT_SEG_SIZE / 8);
+module_param_named(log_mtts_per_seg, log_mtts_per_seg, int, 0444);
+MODULE_PARM_DESC(log_mtts_per_seg, "Log2 number of MTT entries per segment (1-5)");
+
 static char mthca_version[] __devinitdata =
        DRV_NAME ": Mellanox InfiniBand HCA driver v"
        DRV_VERSION " (" DRV_RELDATE ")\n";
@@ -163,6 +167,7 @@ static int mthca_dev_lim(struct mthca_dev *mdev, struct mthca_dev_lim *dev_lim)
        int err;
        u8 status;
 
+       mdev->limits.mtt_seg_size = (1 << log_mtts_per_seg) * 8;
        err = mthca_QUERY_DEV_LIM(mdev, dev_lim, &status);
        if (err) {
                mthca_err(mdev, "QUERY_DEV_LIM command failed, aborting.\n");
@@ -200,7 +205,18 @@ static int mthca_dev_lim(struct mthca_dev *mdev, struct mthca_dev_lim *dev_lim)
        mdev->limits.gid_table_len      = dev_lim->max_gids;
        mdev->limits.pkey_table_len     = dev_lim->max_pkeys;
        mdev->limits.local_ca_ack_delay = dev_lim->local_ca_ack_delay;
-       mdev->limits.max_sg             = dev_lim->max_sg;
+       /*
+        * Need to allow for worst case send WQE overhead and check
+        * whether max_desc_sz imposes a lower limit than max_sg; UD
+        * send has the biggest overhead.
+        */
+       mdev->limits.max_sg             = min_t(int, dev_lim->max_sg,
+                                             (dev_lim->max_desc_sz -
+                                              sizeof (struct mthca_next_seg) -
+                                              (mthca_is_memfree(mdev) ?
+                                               sizeof (struct mthca_arbel_ud_seg) :
+                                               sizeof (struct mthca_tavor_ud_seg))) /
+                                               sizeof (struct mthca_data_seg));
        mdev->limits.max_wqes           = dev_lim->max_qp_sz;
        mdev->limits.max_qp_init_rdma   = dev_lim->max_requester_per_qp;
        mdev->limits.reserved_qps       = dev_lim->reserved_qps;
@@ -450,11 +466,11 @@ static int mthca_init_icm(struct mthca_dev *mdev,
        }
 
        /* CPU writes to non-reserved MTTs, while HCA might DMA to reserved mtts */
-       mdev->limits.reserved_mtts = ALIGN(mdev->limits.reserved_mtts * MTHCA_MTT_SEG_SIZE,
-                                          dma_get_cache_alignment()) / MTHCA_MTT_SEG_SIZE;
+       mdev->limits.reserved_mtts = ALIGN(mdev->limits.reserved_mtts * mdev->limits.mtt_seg_size,
+                                          dma_get_cache_alignment()) / mdev->limits.mtt_seg_size;
 
        mdev->mr_table.mtt_table = mthca_alloc_icm_table(mdev, init_hca->mtt_base,
-                                                        MTHCA_MTT_SEG_SIZE,
+                                                        mdev->limits.mtt_seg_size,
                                                         mdev->limits.num_mtt_segs,
                                                         mdev->limits.reserved_mtts,
                                                         1, 0);
@@ -911,58 +927,6 @@ err_uar_table_free:
        return err;
 }
 
-static int mthca_request_regions(struct pci_dev *pdev, int ddr_hidden)
-{
-       int err;
-
-       /*
-        * We can't just use pci_request_regions() because the MSI-X
-        * table is right in the middle of the first BAR.  If we did
-        * pci_request_region and grab all of the first BAR, then
-        * setting up MSI-X would fail, since the PCI core wants to do
-        * request_mem_region on the MSI-X vector table.
-        *
-        * So just request what we need right now, and request any
-        * other regions we need when setting up EQs.
-        */
-       if (!request_mem_region(pci_resource_start(pdev, 0) + MTHCA_HCR_BASE,
-                               MTHCA_HCR_SIZE, DRV_NAME))
-               return -EBUSY;
-
-       err = pci_request_region(pdev, 2, DRV_NAME);
-       if (err)
-               goto err_bar2_failed;
-
-       if (!ddr_hidden) {
-               err = pci_request_region(pdev, 4, DRV_NAME);
-               if (err)
-                       goto err_bar4_failed;
-       }
-
-       return 0;
-
-err_bar4_failed:
-       pci_release_region(pdev, 2);
-
-err_bar2_failed:
-       release_mem_region(pci_resource_start(pdev, 0) + MTHCA_HCR_BASE,
-                          MTHCA_HCR_SIZE);
-
-       return err;
-}
-
-static void mthca_release_regions(struct pci_dev *pdev,
-                                 int ddr_hidden)
-{
-       if (!ddr_hidden)
-               pci_release_region(pdev, 4);
-
-       pci_release_region(pdev, 2);
-
-       release_mem_region(pci_resource_start(pdev, 0) + MTHCA_HCR_BASE,
-                          MTHCA_HCR_SIZE);
-}
-
 static int mthca_enable_msi_x(struct mthca_dev *mdev)
 {
        struct msix_entry entries[3];
@@ -1049,7 +1013,7 @@ static int __mthca_init_one(struct pci_dev *pdev, int hca_type)
        if (!(pci_resource_flags(pdev, 4) & IORESOURCE_MEM))
                ddr_hidden = 1;
 
-       err = mthca_request_regions(pdev, ddr_hidden);
+       err = pci_request_regions(pdev, DRV_NAME);
        if (err) {
                dev_err(&pdev->dev, "Cannot obtain PCI resources, "
                        "aborting.\n");
@@ -1058,20 +1022,20 @@ static int __mthca_init_one(struct pci_dev *pdev, int hca_type)
 
        pci_set_master(pdev);
 
-       err = pci_set_dma_mask(pdev, DMA_64BIT_MASK);
+       err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
        if (err) {
                dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask.\n");
-               err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
+               err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
                if (err) {
                        dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting.\n");
                        goto err_free_res;
                }
        }
-       err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
+       err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
        if (err) {
                dev_warn(&pdev->dev, "Warning: couldn't set 64-bit "
                         "consistent PCI DMA mask.\n");
-               err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
+               err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
                if (err) {
                        dev_err(&pdev->dev, "Can't set consistent PCI DMA mask, "
                                "aborting.\n");
@@ -1153,6 +1117,8 @@ static int __mthca_init_one(struct pci_dev *pdev, int hca_type)
        pci_set_drvdata(pdev, mdev);
        mdev->hca_type = hca_type;
 
+       mdev->active = true;
+
        return 0;
 
 err_unregister:
@@ -1186,7 +1152,7 @@ err_free_dev:
        ib_dealloc_device(&mdev->ib_dev);
 
 err_free_res:
-       mthca_release_regions(pdev, ddr_hidden);
+       pci_release_regions(pdev);
 
 err_disable_pdev:
        pci_disable_device(pdev);
@@ -1230,8 +1196,7 @@ static void __mthca_remove_one(struct pci_dev *pdev)
                        pci_disable_msix(pdev);
 
                ib_dealloc_device(&mdev->ib_dev);
-               mthca_release_regions(pdev, mdev->mthca_flags &
-                                     MTHCA_FLAG_DDR_HIDDEN);
+               pci_release_regions(pdev);
                pci_disable_device(pdev);
                pci_set_drvdata(pdev, NULL);
        }
@@ -1253,15 +1218,11 @@ int __mthca_restart_one(struct pci_dev *pdev)
 static int __devinit mthca_init_one(struct pci_dev *pdev,
                                    const struct pci_device_id *id)
 {
-       static int mthca_version_printed = 0;
        int ret;
 
        mutex_lock(&mthca_device_mutex);
 
-       if (!mthca_version_printed) {
-               printk(KERN_INFO "%s", mthca_version);
-               ++mthca_version_printed;
-       }
+       printk_once(KERN_INFO "%s", mthca_version);
 
        if (id->driver_data >= ARRAY_SIZE(mthca_hca_table)) {
                printk(KERN_ERR PFX "%s has invalid driver data %lx\n",
@@ -1358,6 +1319,12 @@ static void __init mthca_validate_profile(void)
                printk(KERN_WARNING PFX "Corrected fmr_reserved_mtts to %d.\n",
                       hca_profile.fmr_reserved_mtts);
        }
+
+       if ((log_mtts_per_seg < 1) || (log_mtts_per_seg > 5)) {
+               printk(KERN_WARNING PFX "bad log_mtts_per_seg (%d). Using default - %d\n",
+                      log_mtts_per_seg, ilog2(MTHCA_MTT_SEG_SIZE / 8));
+               log_mtts_per_seg = ilog2(MTHCA_MTT_SEG_SIZE / 8);
+       }
 }
 
 static int __init mthca_init(void)