IB/mthca: Fix max_sge value returned by query_device
[safe/jmp/linux-2.6] / drivers / infiniband / hw / mthca / mthca_main.c
1 /*
2  * Copyright (c) 2004, 2005 Topspin Communications.  All rights reserved.
3  * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4  * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenIB.org BSD license below:
11  *
12  *     Redistribution and use in source and binary forms, with or
13  *     without modification, are permitted provided that the following
14  *     conditions are met:
15  *
16  *      - Redistributions of source code must retain the above
17  *        copyright notice, this list of conditions and the following
18  *        disclaimer.
19  *
20  *      - Redistributions in binary form must reproduce the above
21  *        copyright notice, this list of conditions and the following
22  *        disclaimer in the documentation and/or other materials
23  *        provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  *
34  * $Id: mthca_main.c 1396 2004-12-28 04:10:27Z roland $
35  */
36
37 #include <linux/module.h>
38 #include <linux/init.h>
39 #include <linux/errno.h>
40 #include <linux/pci.h>
41 #include <linux/interrupt.h>
42
43 #include "mthca_dev.h"
44 #include "mthca_config_reg.h"
45 #include "mthca_cmd.h"
46 #include "mthca_profile.h"
47 #include "mthca_memfree.h"
48 #include "mthca_wqe.h"
49
50 MODULE_AUTHOR("Roland Dreier");
51 MODULE_DESCRIPTION("Mellanox InfiniBand HCA low-level driver");
52 MODULE_LICENSE("Dual BSD/GPL");
53 MODULE_VERSION(DRV_VERSION);
54
55 #ifdef CONFIG_INFINIBAND_MTHCA_DEBUG
56
57 int mthca_debug_level = 0;
58 module_param_named(debug_level, mthca_debug_level, int, 0644);
59 MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0");
60
61 #endif /* CONFIG_INFINIBAND_MTHCA_DEBUG */
62
63 #ifdef CONFIG_PCI_MSI
64
65 static int msi_x = 1;
66 module_param(msi_x, int, 0444);
67 MODULE_PARM_DESC(msi_x, "attempt to use MSI-X if nonzero");
68
69 #else /* CONFIG_PCI_MSI */
70
71 #define msi_x (0)
72
73 #endif /* CONFIG_PCI_MSI */
74
75 static int tune_pci = 0;
76 module_param(tune_pci, int, 0444);
77 MODULE_PARM_DESC(tune_pci, "increase PCI burst from the default set by BIOS if nonzero");
78
79 DEFINE_MUTEX(mthca_device_mutex);
80
81 #define MTHCA_DEFAULT_NUM_QP            (1 << 16)
82 #define MTHCA_DEFAULT_RDB_PER_QP        (1 << 2)
83 #define MTHCA_DEFAULT_NUM_CQ            (1 << 16)
84 #define MTHCA_DEFAULT_NUM_MCG           (1 << 13)
85 #define MTHCA_DEFAULT_NUM_MPT           (1 << 17)
86 #define MTHCA_DEFAULT_NUM_MTT           (1 << 20)
87 #define MTHCA_DEFAULT_NUM_UDAV          (1 << 15)
88 #define MTHCA_DEFAULT_NUM_RESERVED_MTTS (1 << 18)
89 #define MTHCA_DEFAULT_NUM_UARC_SIZE     (1 << 18)
90
91 static struct mthca_profile hca_profile = {
92         .num_qp             = MTHCA_DEFAULT_NUM_QP,
93         .rdb_per_qp         = MTHCA_DEFAULT_RDB_PER_QP,
94         .num_cq             = MTHCA_DEFAULT_NUM_CQ,
95         .num_mcg            = MTHCA_DEFAULT_NUM_MCG,
96         .num_mpt            = MTHCA_DEFAULT_NUM_MPT,
97         .num_mtt            = MTHCA_DEFAULT_NUM_MTT,
98         .num_udav           = MTHCA_DEFAULT_NUM_UDAV,          /* Tavor only */
99         .fmr_reserved_mtts  = MTHCA_DEFAULT_NUM_RESERVED_MTTS, /* Tavor only */
100         .uarc_size          = MTHCA_DEFAULT_NUM_UARC_SIZE,     /* Arbel only */
101 };
102
103 module_param_named(num_qp, hca_profile.num_qp, int, 0444);
104 MODULE_PARM_DESC(num_qp, "maximum number of QPs per HCA");
105
106 module_param_named(rdb_per_qp, hca_profile.rdb_per_qp, int, 0444);
107 MODULE_PARM_DESC(rdb_per_qp, "number of RDB buffers per QP");
108
109 module_param_named(num_cq, hca_profile.num_cq, int, 0444);
110 MODULE_PARM_DESC(num_cq, "maximum number of CQs per HCA");
111
112 module_param_named(num_mcg, hca_profile.num_mcg, int, 0444);
113 MODULE_PARM_DESC(num_mcg, "maximum number of multicast groups per HCA");
114
115 module_param_named(num_mpt, hca_profile.num_mpt, int, 0444);
116 MODULE_PARM_DESC(num_mpt,
117                 "maximum number of memory protection table entries per HCA");
118
119 module_param_named(num_mtt, hca_profile.num_mtt, int, 0444);
120 MODULE_PARM_DESC(num_mtt,
121                  "maximum number of memory translation table segments per HCA");
122
123 module_param_named(num_udav, hca_profile.num_udav, int, 0444);
124 MODULE_PARM_DESC(num_udav, "maximum number of UD address vectors per HCA");
125
126 module_param_named(fmr_reserved_mtts, hca_profile.fmr_reserved_mtts, int, 0444);
127 MODULE_PARM_DESC(fmr_reserved_mtts,
128                  "number of memory translation table segments reserved for FMR");
129
130 static char mthca_version[] __devinitdata =
131         DRV_NAME ": Mellanox InfiniBand HCA driver v"
132         DRV_VERSION " (" DRV_RELDATE ")\n";
133
134 static int mthca_tune_pci(struct mthca_dev *mdev)
135 {
136         if (!tune_pci)
137                 return 0;
138
139         /* First try to max out Read Byte Count */
140         if (pci_find_capability(mdev->pdev, PCI_CAP_ID_PCIX)) {
141                 if (pcix_set_mmrbc(mdev->pdev, pcix_get_max_mmrbc(mdev->pdev))) {
142                         mthca_err(mdev, "Couldn't set PCI-X max read count, "
143                                 "aborting.\n");
144                         return -ENODEV;
145                 }
146         } else if (!(mdev->mthca_flags & MTHCA_FLAG_PCIE))
147                 mthca_info(mdev, "No PCI-X capability, not setting RBC.\n");
148
149         if (pci_find_capability(mdev->pdev, PCI_CAP_ID_EXP)) {
150                 if (pcie_set_readrq(mdev->pdev, 4096)) {
151                         mthca_err(mdev, "Couldn't write PCI Express read request, "
152                                 "aborting.\n");
153                         return -ENODEV;
154                 }
155         } else if (mdev->mthca_flags & MTHCA_FLAG_PCIE)
156                 mthca_info(mdev, "No PCI Express capability, "
157                            "not setting Max Read Request Size.\n");
158
159         return 0;
160 }
161
162 static int mthca_dev_lim(struct mthca_dev *mdev, struct mthca_dev_lim *dev_lim)
163 {
164         int err;
165         u8 status;
166
167         err = mthca_QUERY_DEV_LIM(mdev, dev_lim, &status);
168         if (err) {
169                 mthca_err(mdev, "QUERY_DEV_LIM command failed, aborting.\n");
170                 return err;
171         }
172         if (status) {
173                 mthca_err(mdev, "QUERY_DEV_LIM returned status 0x%02x, "
174                           "aborting.\n", status);
175                 return -EINVAL;
176         }
177         if (dev_lim->min_page_sz > PAGE_SIZE) {
178                 mthca_err(mdev, "HCA minimum page size of %d bigger than "
179                           "kernel PAGE_SIZE of %ld, aborting.\n",
180                           dev_lim->min_page_sz, PAGE_SIZE);
181                 return -ENODEV;
182         }
183         if (dev_lim->num_ports > MTHCA_MAX_PORTS) {
184                 mthca_err(mdev, "HCA has %d ports, but we only support %d, "
185                           "aborting.\n",
186                           dev_lim->num_ports, MTHCA_MAX_PORTS);
187                 return -ENODEV;
188         }
189
190         if (dev_lim->uar_size > pci_resource_len(mdev->pdev, 2)) {
191                 mthca_err(mdev, "HCA reported UAR size of 0x%x bigger than "
192                           "PCI resource 2 size of 0x%llx, aborting.\n",
193                           dev_lim->uar_size,
194                           (unsigned long long)pci_resource_len(mdev->pdev, 2));
195                 return -ENODEV;
196         }
197
198         mdev->limits.num_ports          = dev_lim->num_ports;
199         mdev->limits.vl_cap             = dev_lim->max_vl;
200         mdev->limits.mtu_cap            = dev_lim->max_mtu;
201         mdev->limits.gid_table_len      = dev_lim->max_gids;
202         mdev->limits.pkey_table_len     = dev_lim->max_pkeys;
203         mdev->limits.local_ca_ack_delay = dev_lim->local_ca_ack_delay;
204         /*
205          * Need to allow for worst case send WQE overhead and check
206          * whether max_desc_sz imposes a lower limit than max_sg; UD
207          * send has the biggest overhead.
208          */
209         mdev->limits.max_sg             = min_t(int, dev_lim->max_sg,
210                                               (dev_lim->max_desc_sz -
211                                                sizeof (struct mthca_next_seg) -
212                                                (mthca_is_memfree(mdev) ?
213                                                 sizeof (struct mthca_arbel_ud_seg) :
214                                                 sizeof (struct mthca_tavor_ud_seg))) /
215                                                 sizeof (struct mthca_data_seg));
216         mdev->limits.max_wqes           = dev_lim->max_qp_sz;
217         mdev->limits.max_qp_init_rdma   = dev_lim->max_requester_per_qp;
218         mdev->limits.reserved_qps       = dev_lim->reserved_qps;
219         mdev->limits.max_srq_wqes       = dev_lim->max_srq_sz;
220         mdev->limits.reserved_srqs      = dev_lim->reserved_srqs;
221         mdev->limits.reserved_eecs      = dev_lim->reserved_eecs;
222         mdev->limits.max_desc_sz        = dev_lim->max_desc_sz;
223         mdev->limits.max_srq_sge        = mthca_max_srq_sge(mdev);
224         /*
225          * Subtract 1 from the limit because we need to allocate a
226          * spare CQE so the HCA HW can tell the difference between an
227          * empty CQ and a full CQ.
228          */
229         mdev->limits.max_cqes           = dev_lim->max_cq_sz - 1;
230         mdev->limits.reserved_cqs       = dev_lim->reserved_cqs;
231         mdev->limits.reserved_eqs       = dev_lim->reserved_eqs;
232         mdev->limits.reserved_mtts      = dev_lim->reserved_mtts;
233         mdev->limits.reserved_mrws      = dev_lim->reserved_mrws;
234         mdev->limits.reserved_uars      = dev_lim->reserved_uars;
235         mdev->limits.reserved_pds       = dev_lim->reserved_pds;
236         mdev->limits.port_width_cap     = dev_lim->max_port_width;
237         mdev->limits.page_size_cap      = ~(u32) (dev_lim->min_page_sz - 1);
238         mdev->limits.flags              = dev_lim->flags;
239         /*
240          * For old FW that doesn't return static rate support, use a
241          * value of 0x3 (only static rate values of 0 or 1 are handled),
242          * except on Sinai, where even old FW can handle static rate
243          * values of 2 and 3.
244          */
245         if (dev_lim->stat_rate_support)
246                 mdev->limits.stat_rate_support = dev_lim->stat_rate_support;
247         else if (mdev->mthca_flags & MTHCA_FLAG_SINAI_OPT)
248                 mdev->limits.stat_rate_support = 0xf;
249         else
250                 mdev->limits.stat_rate_support = 0x3;
251
252         /* IB_DEVICE_RESIZE_MAX_WR not supported by driver.
253            May be doable since hardware supports it for SRQ.
254
255            IB_DEVICE_N_NOTIFY_CQ is supported by hardware but not by driver.
256
257            IB_DEVICE_SRQ_RESIZE is supported by hardware but SRQ is not
258            supported by driver. */
259         mdev->device_cap_flags = IB_DEVICE_CHANGE_PHY_PORT |
260                 IB_DEVICE_PORT_ACTIVE_EVENT |
261                 IB_DEVICE_SYS_IMAGE_GUID |
262                 IB_DEVICE_RC_RNR_NAK_GEN;
263
264         if (dev_lim->flags & DEV_LIM_FLAG_BAD_PKEY_CNTR)
265                 mdev->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR;
266
267         if (dev_lim->flags & DEV_LIM_FLAG_BAD_QKEY_CNTR)
268                 mdev->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR;
269
270         if (dev_lim->flags & DEV_LIM_FLAG_RAW_MULTI)
271                 mdev->device_cap_flags |= IB_DEVICE_RAW_MULTI;
272
273         if (dev_lim->flags & DEV_LIM_FLAG_AUTO_PATH_MIG)
274                 mdev->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG;
275
276         if (dev_lim->flags & DEV_LIM_FLAG_UD_AV_PORT_ENFORCE)
277                 mdev->device_cap_flags |= IB_DEVICE_UD_AV_PORT_ENFORCE;
278
279         if (dev_lim->flags & DEV_LIM_FLAG_SRQ)
280                 mdev->mthca_flags |= MTHCA_FLAG_SRQ;
281
282         if (mthca_is_memfree(mdev))
283                 if (dev_lim->flags & DEV_LIM_FLAG_IPOIB_CSUM)
284                         mdev->device_cap_flags |= IB_DEVICE_UD_IP_CSUM;
285
286         return 0;
287 }
288
289 static int mthca_init_tavor(struct mthca_dev *mdev)
290 {
291         s64 size;
292         u8 status;
293         int err;
294         struct mthca_dev_lim        dev_lim;
295         struct mthca_profile        profile;
296         struct mthca_init_hca_param init_hca;
297
298         err = mthca_SYS_EN(mdev, &status);
299         if (err) {
300                 mthca_err(mdev, "SYS_EN command failed, aborting.\n");
301                 return err;
302         }
303         if (status) {
304                 mthca_err(mdev, "SYS_EN returned status 0x%02x, "
305                           "aborting.\n", status);
306                 return -EINVAL;
307         }
308
309         err = mthca_QUERY_FW(mdev, &status);
310         if (err) {
311                 mthca_err(mdev, "QUERY_FW command failed, aborting.\n");
312                 goto err_disable;
313         }
314         if (status) {
315                 mthca_err(mdev, "QUERY_FW returned status 0x%02x, "
316                           "aborting.\n", status);
317                 err = -EINVAL;
318                 goto err_disable;
319         }
320         err = mthca_QUERY_DDR(mdev, &status);
321         if (err) {
322                 mthca_err(mdev, "QUERY_DDR command failed, aborting.\n");
323                 goto err_disable;
324         }
325         if (status) {
326                 mthca_err(mdev, "QUERY_DDR returned status 0x%02x, "
327                           "aborting.\n", status);
328                 err = -EINVAL;
329                 goto err_disable;
330         }
331
332         err = mthca_dev_lim(mdev, &dev_lim);
333         if (err) {
334                 mthca_err(mdev, "QUERY_DEV_LIM command failed, aborting.\n");
335                 goto err_disable;
336         }
337
338         profile = hca_profile;
339         profile.num_uar   = dev_lim.uar_size / PAGE_SIZE;
340         profile.uarc_size = 0;
341         if (mdev->mthca_flags & MTHCA_FLAG_SRQ)
342                 profile.num_srq = dev_lim.max_srqs;
343
344         size = mthca_make_profile(mdev, &profile, &dev_lim, &init_hca);
345         if (size < 0) {
346                 err = size;
347                 goto err_disable;
348         }
349
350         err = mthca_INIT_HCA(mdev, &init_hca, &status);
351         if (err) {
352                 mthca_err(mdev, "INIT_HCA command failed, aborting.\n");
353                 goto err_disable;
354         }
355         if (status) {
356                 mthca_err(mdev, "INIT_HCA returned status 0x%02x, "
357                           "aborting.\n", status);
358                 err = -EINVAL;
359                 goto err_disable;
360         }
361
362         return 0;
363
364 err_disable:
365         mthca_SYS_DIS(mdev, &status);
366
367         return err;
368 }
369
370 static int mthca_load_fw(struct mthca_dev *mdev)
371 {
372         u8 status;
373         int err;
374
375         /* FIXME: use HCA-attached memory for FW if present */
376
377         mdev->fw.arbel.fw_icm =
378                 mthca_alloc_icm(mdev, mdev->fw.arbel.fw_pages,
379                                 GFP_HIGHUSER | __GFP_NOWARN, 0);
380         if (!mdev->fw.arbel.fw_icm) {
381                 mthca_err(mdev, "Couldn't allocate FW area, aborting.\n");
382                 return -ENOMEM;
383         }
384
385         err = mthca_MAP_FA(mdev, mdev->fw.arbel.fw_icm, &status);
386         if (err) {
387                 mthca_err(mdev, "MAP_FA command failed, aborting.\n");
388                 goto err_free;
389         }
390         if (status) {
391                 mthca_err(mdev, "MAP_FA returned status 0x%02x, aborting.\n", status);
392                 err = -EINVAL;
393                 goto err_free;
394         }
395         err = mthca_RUN_FW(mdev, &status);
396         if (err) {
397                 mthca_err(mdev, "RUN_FW command failed, aborting.\n");
398                 goto err_unmap_fa;
399         }
400         if (status) {
401                 mthca_err(mdev, "RUN_FW returned status 0x%02x, aborting.\n", status);
402                 err = -EINVAL;
403                 goto err_unmap_fa;
404         }
405
406         return 0;
407
408 err_unmap_fa:
409         mthca_UNMAP_FA(mdev, &status);
410
411 err_free:
412         mthca_free_icm(mdev, mdev->fw.arbel.fw_icm, 0);
413         return err;
414 }
415
416 static int mthca_init_icm(struct mthca_dev *mdev,
417                           struct mthca_dev_lim *dev_lim,
418                           struct mthca_init_hca_param *init_hca,
419                           u64 icm_size)
420 {
421         u64 aux_pages;
422         u8 status;
423         int err;
424
425         err = mthca_SET_ICM_SIZE(mdev, icm_size, &aux_pages, &status);
426         if (err) {
427                 mthca_err(mdev, "SET_ICM_SIZE command failed, aborting.\n");
428                 return err;
429         }
430         if (status) {
431                 mthca_err(mdev, "SET_ICM_SIZE returned status 0x%02x, "
432                           "aborting.\n", status);
433                 return -EINVAL;
434         }
435
436         mthca_dbg(mdev, "%lld KB of HCA context requires %lld KB aux memory.\n",
437                   (unsigned long long) icm_size >> 10,
438                   (unsigned long long) aux_pages << 2);
439
440         mdev->fw.arbel.aux_icm = mthca_alloc_icm(mdev, aux_pages,
441                                                  GFP_HIGHUSER | __GFP_NOWARN, 0);
442         if (!mdev->fw.arbel.aux_icm) {
443                 mthca_err(mdev, "Couldn't allocate aux memory, aborting.\n");
444                 return -ENOMEM;
445         }
446
447         err = mthca_MAP_ICM_AUX(mdev, mdev->fw.arbel.aux_icm, &status);
448         if (err) {
449                 mthca_err(mdev, "MAP_ICM_AUX command failed, aborting.\n");
450                 goto err_free_aux;
451         }
452         if (status) {
453                 mthca_err(mdev, "MAP_ICM_AUX returned status 0x%02x, aborting.\n", status);
454                 err = -EINVAL;
455                 goto err_free_aux;
456         }
457
458         err = mthca_map_eq_icm(mdev, init_hca->eqc_base);
459         if (err) {
460                 mthca_err(mdev, "Failed to map EQ context memory, aborting.\n");
461                 goto err_unmap_aux;
462         }
463
464         /* CPU writes to non-reserved MTTs, while HCA might DMA to reserved mtts */
465         mdev->limits.reserved_mtts = ALIGN(mdev->limits.reserved_mtts * MTHCA_MTT_SEG_SIZE,
466                                            dma_get_cache_alignment()) / MTHCA_MTT_SEG_SIZE;
467
468         mdev->mr_table.mtt_table = mthca_alloc_icm_table(mdev, init_hca->mtt_base,
469                                                          MTHCA_MTT_SEG_SIZE,
470                                                          mdev->limits.num_mtt_segs,
471                                                          mdev->limits.reserved_mtts,
472                                                          1, 0);
473         if (!mdev->mr_table.mtt_table) {
474                 mthca_err(mdev, "Failed to map MTT context memory, aborting.\n");
475                 err = -ENOMEM;
476                 goto err_unmap_eq;
477         }
478
479         mdev->mr_table.mpt_table = mthca_alloc_icm_table(mdev, init_hca->mpt_base,
480                                                          dev_lim->mpt_entry_sz,
481                                                          mdev->limits.num_mpts,
482                                                          mdev->limits.reserved_mrws,
483                                                          1, 1);
484         if (!mdev->mr_table.mpt_table) {
485                 mthca_err(mdev, "Failed to map MPT context memory, aborting.\n");
486                 err = -ENOMEM;
487                 goto err_unmap_mtt;
488         }
489
490         mdev->qp_table.qp_table = mthca_alloc_icm_table(mdev, init_hca->qpc_base,
491                                                         dev_lim->qpc_entry_sz,
492                                                         mdev->limits.num_qps,
493                                                         mdev->limits.reserved_qps,
494                                                         0, 0);
495         if (!mdev->qp_table.qp_table) {
496                 mthca_err(mdev, "Failed to map QP context memory, aborting.\n");
497                 err = -ENOMEM;
498                 goto err_unmap_mpt;
499         }
500
501         mdev->qp_table.eqp_table = mthca_alloc_icm_table(mdev, init_hca->eqpc_base,
502                                                          dev_lim->eqpc_entry_sz,
503                                                          mdev->limits.num_qps,
504                                                          mdev->limits.reserved_qps,
505                                                          0, 0);
506         if (!mdev->qp_table.eqp_table) {
507                 mthca_err(mdev, "Failed to map EQP context memory, aborting.\n");
508                 err = -ENOMEM;
509                 goto err_unmap_qp;
510         }
511
512         mdev->qp_table.rdb_table = mthca_alloc_icm_table(mdev, init_hca->rdb_base,
513                                                          MTHCA_RDB_ENTRY_SIZE,
514                                                          mdev->limits.num_qps <<
515                                                          mdev->qp_table.rdb_shift, 0,
516                                                          0, 0);
517         if (!mdev->qp_table.rdb_table) {
518                 mthca_err(mdev, "Failed to map RDB context memory, aborting\n");
519                 err = -ENOMEM;
520                 goto err_unmap_eqp;
521         }
522
523        mdev->cq_table.table = mthca_alloc_icm_table(mdev, init_hca->cqc_base,
524                                                     dev_lim->cqc_entry_sz,
525                                                     mdev->limits.num_cqs,
526                                                     mdev->limits.reserved_cqs,
527                                                     0, 0);
528         if (!mdev->cq_table.table) {
529                 mthca_err(mdev, "Failed to map CQ context memory, aborting.\n");
530                 err = -ENOMEM;
531                 goto err_unmap_rdb;
532         }
533
534         if (mdev->mthca_flags & MTHCA_FLAG_SRQ) {
535                 mdev->srq_table.table =
536                         mthca_alloc_icm_table(mdev, init_hca->srqc_base,
537                                               dev_lim->srq_entry_sz,
538                                               mdev->limits.num_srqs,
539                                               mdev->limits.reserved_srqs,
540                                               0, 0);
541                 if (!mdev->srq_table.table) {
542                         mthca_err(mdev, "Failed to map SRQ context memory, "
543                                   "aborting.\n");
544                         err = -ENOMEM;
545                         goto err_unmap_cq;
546                 }
547         }
548
549         /*
550          * It's not strictly required, but for simplicity just map the
551          * whole multicast group table now.  The table isn't very big
552          * and it's a lot easier than trying to track ref counts.
553          */
554         mdev->mcg_table.table = mthca_alloc_icm_table(mdev, init_hca->mc_base,
555                                                       MTHCA_MGM_ENTRY_SIZE,
556                                                       mdev->limits.num_mgms +
557                                                       mdev->limits.num_amgms,
558                                                       mdev->limits.num_mgms +
559                                                       mdev->limits.num_amgms,
560                                                       0, 0);
561         if (!mdev->mcg_table.table) {
562                 mthca_err(mdev, "Failed to map MCG context memory, aborting.\n");
563                 err = -ENOMEM;
564                 goto err_unmap_srq;
565         }
566
567         return 0;
568
569 err_unmap_srq:
570         if (mdev->mthca_flags & MTHCA_FLAG_SRQ)
571                 mthca_free_icm_table(mdev, mdev->srq_table.table);
572
573 err_unmap_cq:
574         mthca_free_icm_table(mdev, mdev->cq_table.table);
575
576 err_unmap_rdb:
577         mthca_free_icm_table(mdev, mdev->qp_table.rdb_table);
578
579 err_unmap_eqp:
580         mthca_free_icm_table(mdev, mdev->qp_table.eqp_table);
581
582 err_unmap_qp:
583         mthca_free_icm_table(mdev, mdev->qp_table.qp_table);
584
585 err_unmap_mpt:
586         mthca_free_icm_table(mdev, mdev->mr_table.mpt_table);
587
588 err_unmap_mtt:
589         mthca_free_icm_table(mdev, mdev->mr_table.mtt_table);
590
591 err_unmap_eq:
592         mthca_unmap_eq_icm(mdev);
593
594 err_unmap_aux:
595         mthca_UNMAP_ICM_AUX(mdev, &status);
596
597 err_free_aux:
598         mthca_free_icm(mdev, mdev->fw.arbel.aux_icm, 0);
599
600         return err;
601 }
602
603 static void mthca_free_icms(struct mthca_dev *mdev)
604 {
605         u8 status;
606
607         mthca_free_icm_table(mdev, mdev->mcg_table.table);
608         if (mdev->mthca_flags & MTHCA_FLAG_SRQ)
609                 mthca_free_icm_table(mdev, mdev->srq_table.table);
610         mthca_free_icm_table(mdev, mdev->cq_table.table);
611         mthca_free_icm_table(mdev, mdev->qp_table.rdb_table);
612         mthca_free_icm_table(mdev, mdev->qp_table.eqp_table);
613         mthca_free_icm_table(mdev, mdev->qp_table.qp_table);
614         mthca_free_icm_table(mdev, mdev->mr_table.mpt_table);
615         mthca_free_icm_table(mdev, mdev->mr_table.mtt_table);
616         mthca_unmap_eq_icm(mdev);
617
618         mthca_UNMAP_ICM_AUX(mdev, &status);
619         mthca_free_icm(mdev, mdev->fw.arbel.aux_icm, 0);
620 }
621
622 static int mthca_init_arbel(struct mthca_dev *mdev)
623 {
624         struct mthca_dev_lim        dev_lim;
625         struct mthca_profile        profile;
626         struct mthca_init_hca_param init_hca;
627         s64 icm_size;
628         u8 status;
629         int err;
630
631         err = mthca_QUERY_FW(mdev, &status);
632         if (err) {
633                 mthca_err(mdev, "QUERY_FW command failed, aborting.\n");
634                 return err;
635         }
636         if (status) {
637                 mthca_err(mdev, "QUERY_FW returned status 0x%02x, "
638                           "aborting.\n", status);
639                 return -EINVAL;
640         }
641
642         err = mthca_ENABLE_LAM(mdev, &status);
643         if (err) {
644                 mthca_err(mdev, "ENABLE_LAM command failed, aborting.\n");
645                 return err;
646         }
647         if (status == MTHCA_CMD_STAT_LAM_NOT_PRE) {
648                 mthca_dbg(mdev, "No HCA-attached memory (running in MemFree mode)\n");
649                 mdev->mthca_flags |= MTHCA_FLAG_NO_LAM;
650         } else if (status) {
651                 mthca_err(mdev, "ENABLE_LAM returned status 0x%02x, "
652                           "aborting.\n", status);
653                 return -EINVAL;
654         }
655
656         err = mthca_load_fw(mdev);
657         if (err) {
658                 mthca_err(mdev, "Failed to start FW, aborting.\n");
659                 goto err_disable;
660         }
661
662         err = mthca_dev_lim(mdev, &dev_lim);
663         if (err) {
664                 mthca_err(mdev, "QUERY_DEV_LIM command failed, aborting.\n");
665                 goto err_stop_fw;
666         }
667
668         profile = hca_profile;
669         profile.num_uar  = dev_lim.uar_size / PAGE_SIZE;
670         profile.num_udav = 0;
671         if (mdev->mthca_flags & MTHCA_FLAG_SRQ)
672                 profile.num_srq = dev_lim.max_srqs;
673
674         icm_size = mthca_make_profile(mdev, &profile, &dev_lim, &init_hca);
675         if (icm_size < 0) {
676                 err = icm_size;
677                 goto err_stop_fw;
678         }
679
680         err = mthca_init_icm(mdev, &dev_lim, &init_hca, icm_size);
681         if (err)
682                 goto err_stop_fw;
683
684         err = mthca_INIT_HCA(mdev, &init_hca, &status);
685         if (err) {
686                 mthca_err(mdev, "INIT_HCA command failed, aborting.\n");
687                 goto err_free_icm;
688         }
689         if (status) {
690                 mthca_err(mdev, "INIT_HCA returned status 0x%02x, "
691                           "aborting.\n", status);
692                 err = -EINVAL;
693                 goto err_free_icm;
694         }
695
696         return 0;
697
698 err_free_icm:
699         mthca_free_icms(mdev);
700
701 err_stop_fw:
702         mthca_UNMAP_FA(mdev, &status);
703         mthca_free_icm(mdev, mdev->fw.arbel.fw_icm, 0);
704
705 err_disable:
706         if (!(mdev->mthca_flags & MTHCA_FLAG_NO_LAM))
707                 mthca_DISABLE_LAM(mdev, &status);
708
709         return err;
710 }
711
712 static void mthca_close_hca(struct mthca_dev *mdev)
713 {
714         u8 status;
715
716         mthca_CLOSE_HCA(mdev, 0, &status);
717
718         if (mthca_is_memfree(mdev)) {
719                 mthca_free_icms(mdev);
720
721                 mthca_UNMAP_FA(mdev, &status);
722                 mthca_free_icm(mdev, mdev->fw.arbel.fw_icm, 0);
723
724                 if (!(mdev->mthca_flags & MTHCA_FLAG_NO_LAM))
725                         mthca_DISABLE_LAM(mdev, &status);
726         } else
727                 mthca_SYS_DIS(mdev, &status);
728 }
729
730 static int mthca_init_hca(struct mthca_dev *mdev)
731 {
732         u8 status;
733         int err;
734         struct mthca_adapter adapter;
735
736         if (mthca_is_memfree(mdev))
737                 err = mthca_init_arbel(mdev);
738         else
739                 err = mthca_init_tavor(mdev);
740
741         if (err)
742                 return err;
743
744         err = mthca_QUERY_ADAPTER(mdev, &adapter, &status);
745         if (err) {
746                 mthca_err(mdev, "QUERY_ADAPTER command failed, aborting.\n");
747                 goto err_close;
748         }
749         if (status) {
750                 mthca_err(mdev, "QUERY_ADAPTER returned status 0x%02x, "
751                           "aborting.\n", status);
752                 err = -EINVAL;
753                 goto err_close;
754         }
755
756         mdev->eq_table.inta_pin = adapter.inta_pin;
757         if (!mthca_is_memfree(mdev))
758                 mdev->rev_id = adapter.revision_id;
759         memcpy(mdev->board_id, adapter.board_id, sizeof mdev->board_id);
760
761         return 0;
762
763 err_close:
764         mthca_close_hca(mdev);
765         return err;
766 }
767
768 static int mthca_setup_hca(struct mthca_dev *dev)
769 {
770         int err;
771         u8 status;
772
773         MTHCA_INIT_DOORBELL_LOCK(&dev->doorbell_lock);
774
775         err = mthca_init_uar_table(dev);
776         if (err) {
777                 mthca_err(dev, "Failed to initialize "
778                           "user access region table, aborting.\n");
779                 return err;
780         }
781
782         err = mthca_uar_alloc(dev, &dev->driver_uar);
783         if (err) {
784                 mthca_err(dev, "Failed to allocate driver access region, "
785                           "aborting.\n");
786                 goto err_uar_table_free;
787         }
788
789         dev->kar = ioremap(dev->driver_uar.pfn << PAGE_SHIFT, PAGE_SIZE);
790         if (!dev->kar) {
791                 mthca_err(dev, "Couldn't map kernel access region, "
792                           "aborting.\n");
793                 err = -ENOMEM;
794                 goto err_uar_free;
795         }
796
797         err = mthca_init_pd_table(dev);
798         if (err) {
799                 mthca_err(dev, "Failed to initialize "
800                           "protection domain table, aborting.\n");
801                 goto err_kar_unmap;
802         }
803
804         err = mthca_init_mr_table(dev);
805         if (err) {
806                 mthca_err(dev, "Failed to initialize "
807                           "memory region table, aborting.\n");
808                 goto err_pd_table_free;
809         }
810
811         err = mthca_pd_alloc(dev, 1, &dev->driver_pd);
812         if (err) {
813                 mthca_err(dev, "Failed to create driver PD, "
814                           "aborting.\n");
815                 goto err_mr_table_free;
816         }
817
818         err = mthca_init_eq_table(dev);
819         if (err) {
820                 mthca_err(dev, "Failed to initialize "
821                           "event queue table, aborting.\n");
822                 goto err_pd_free;
823         }
824
825         err = mthca_cmd_use_events(dev);
826         if (err) {
827                 mthca_err(dev, "Failed to switch to event-driven "
828                           "firmware commands, aborting.\n");
829                 goto err_eq_table_free;
830         }
831
832         err = mthca_NOP(dev, &status);
833         if (err || status) {
834                 if (dev->mthca_flags & MTHCA_FLAG_MSI_X) {
835                         mthca_warn(dev, "NOP command failed to generate interrupt "
836                                    "(IRQ %d).\n",
837                                    dev->eq_table.eq[MTHCA_EQ_CMD].msi_x_vector);
838                         mthca_warn(dev, "Trying again with MSI-X disabled.\n");
839                 } else {
840                         mthca_err(dev, "NOP command failed to generate interrupt "
841                                   "(IRQ %d), aborting.\n",
842                                   dev->pdev->irq);
843                         mthca_err(dev, "BIOS or ACPI interrupt routing problem?\n");
844                 }
845
846                 goto err_cmd_poll;
847         }
848
849         mthca_dbg(dev, "NOP command IRQ test passed\n");
850
851         err = mthca_init_cq_table(dev);
852         if (err) {
853                 mthca_err(dev, "Failed to initialize "
854                           "completion queue table, aborting.\n");
855                 goto err_cmd_poll;
856         }
857
858         err = mthca_init_srq_table(dev);
859         if (err) {
860                 mthca_err(dev, "Failed to initialize "
861                           "shared receive queue table, aborting.\n");
862                 goto err_cq_table_free;
863         }
864
865         err = mthca_init_qp_table(dev);
866         if (err) {
867                 mthca_err(dev, "Failed to initialize "
868                           "queue pair table, aborting.\n");
869                 goto err_srq_table_free;
870         }
871
872         err = mthca_init_av_table(dev);
873         if (err) {
874                 mthca_err(dev, "Failed to initialize "
875                           "address vector table, aborting.\n");
876                 goto err_qp_table_free;
877         }
878
879         err = mthca_init_mcg_table(dev);
880         if (err) {
881                 mthca_err(dev, "Failed to initialize "
882                           "multicast group table, aborting.\n");
883                 goto err_av_table_free;
884         }
885
886         return 0;
887
888 err_av_table_free:
889         mthca_cleanup_av_table(dev);
890
891 err_qp_table_free:
892         mthca_cleanup_qp_table(dev);
893
894 err_srq_table_free:
895         mthca_cleanup_srq_table(dev);
896
897 err_cq_table_free:
898         mthca_cleanup_cq_table(dev);
899
900 err_cmd_poll:
901         mthca_cmd_use_polling(dev);
902
903 err_eq_table_free:
904         mthca_cleanup_eq_table(dev);
905
906 err_pd_free:
907         mthca_pd_free(dev, &dev->driver_pd);
908
909 err_mr_table_free:
910         mthca_cleanup_mr_table(dev);
911
912 err_pd_table_free:
913         mthca_cleanup_pd_table(dev);
914
915 err_kar_unmap:
916         iounmap(dev->kar);
917
918 err_uar_free:
919         mthca_uar_free(dev, &dev->driver_uar);
920
921 err_uar_table_free:
922         mthca_cleanup_uar_table(dev);
923         return err;
924 }
925
926 static int mthca_request_regions(struct pci_dev *pdev, int ddr_hidden)
927 {
928         int err;
929
930         /*
931          * We can't just use pci_request_regions() because the MSI-X
932          * table is right in the middle of the first BAR.  If we did
933          * pci_request_region and grab all of the first BAR, then
934          * setting up MSI-X would fail, since the PCI core wants to do
935          * request_mem_region on the MSI-X vector table.
936          *
937          * So just request what we need right now, and request any
938          * other regions we need when setting up EQs.
939          */
940         if (!request_mem_region(pci_resource_start(pdev, 0) + MTHCA_HCR_BASE,
941                                 MTHCA_HCR_SIZE, DRV_NAME))
942                 return -EBUSY;
943
944         err = pci_request_region(pdev, 2, DRV_NAME);
945         if (err)
946                 goto err_bar2_failed;
947
948         if (!ddr_hidden) {
949                 err = pci_request_region(pdev, 4, DRV_NAME);
950                 if (err)
951                         goto err_bar4_failed;
952         }
953
954         return 0;
955
956 err_bar4_failed:
957         pci_release_region(pdev, 2);
958
959 err_bar2_failed:
960         release_mem_region(pci_resource_start(pdev, 0) + MTHCA_HCR_BASE,
961                            MTHCA_HCR_SIZE);
962
963         return err;
964 }
965
966 static void mthca_release_regions(struct pci_dev *pdev,
967                                   int ddr_hidden)
968 {
969         if (!ddr_hidden)
970                 pci_release_region(pdev, 4);
971
972         pci_release_region(pdev, 2);
973
974         release_mem_region(pci_resource_start(pdev, 0) + MTHCA_HCR_BASE,
975                            MTHCA_HCR_SIZE);
976 }
977
978 static int mthca_enable_msi_x(struct mthca_dev *mdev)
979 {
980         struct msix_entry entries[3];
981         int err;
982
983         entries[0].entry = 0;
984         entries[1].entry = 1;
985         entries[2].entry = 2;
986
987         err = pci_enable_msix(mdev->pdev, entries, ARRAY_SIZE(entries));
988         if (err) {
989                 if (err > 0)
990                         mthca_info(mdev, "Only %d MSI-X vectors available, "
991                                    "not using MSI-X\n", err);
992                 return err;
993         }
994
995         mdev->eq_table.eq[MTHCA_EQ_COMP ].msi_x_vector = entries[0].vector;
996         mdev->eq_table.eq[MTHCA_EQ_ASYNC].msi_x_vector = entries[1].vector;
997         mdev->eq_table.eq[MTHCA_EQ_CMD  ].msi_x_vector = entries[2].vector;
998
999         return 0;
1000 }
1001
1002 /* Types of supported HCA */
1003 enum {
1004         TAVOR,                  /* MT23108                        */
1005         ARBEL_COMPAT,           /* MT25208 in Tavor compat mode   */
1006         ARBEL_NATIVE,           /* MT25208 with extended features */
1007         SINAI                   /* MT25204 */
1008 };
1009
1010 #define MTHCA_FW_VER(major, minor, subminor) \
1011         (((u64) (major) << 32) | ((u64) (minor) << 16) | (u64) (subminor))
1012
1013 static struct {
1014         u64 latest_fw;
1015         u32 flags;
1016 } mthca_hca_table[] = {
1017         [TAVOR]        = { .latest_fw = MTHCA_FW_VER(3, 5, 0),
1018                            .flags     = 0 },
1019         [ARBEL_COMPAT] = { .latest_fw = MTHCA_FW_VER(4, 8, 200),
1020                            .flags     = MTHCA_FLAG_PCIE },
1021         [ARBEL_NATIVE] = { .latest_fw = MTHCA_FW_VER(5, 3, 0),
1022                            .flags     = MTHCA_FLAG_MEMFREE |
1023                                         MTHCA_FLAG_PCIE },
1024         [SINAI]        = { .latest_fw = MTHCA_FW_VER(1, 2, 0),
1025                            .flags     = MTHCA_FLAG_MEMFREE |
1026                                         MTHCA_FLAG_PCIE    |
1027                                         MTHCA_FLAG_SINAI_OPT }
1028 };
1029
1030 static int __mthca_init_one(struct pci_dev *pdev, int hca_type)
1031 {
1032         int ddr_hidden = 0;
1033         int err;
1034         struct mthca_dev *mdev;
1035
1036         printk(KERN_INFO PFX "Initializing %s\n",
1037                pci_name(pdev));
1038
1039         err = pci_enable_device(pdev);
1040         if (err) {
1041                 dev_err(&pdev->dev, "Cannot enable PCI device, "
1042                         "aborting.\n");
1043                 return err;
1044         }
1045
1046         /*
1047          * Check for BARs.  We expect 0: 1MB, 2: 8MB, 4: DDR (may not
1048          * be present)
1049          */
1050         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM) ||
1051             pci_resource_len(pdev, 0) != 1 << 20) {
1052                 dev_err(&pdev->dev, "Missing DCS, aborting.\n");
1053                 err = -ENODEV;
1054                 goto err_disable_pdev;
1055         }
1056         if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
1057                 dev_err(&pdev->dev, "Missing UAR, aborting.\n");
1058                 err = -ENODEV;
1059                 goto err_disable_pdev;
1060         }
1061         if (!(pci_resource_flags(pdev, 4) & IORESOURCE_MEM))
1062                 ddr_hidden = 1;
1063
1064         err = mthca_request_regions(pdev, ddr_hidden);
1065         if (err) {
1066                 dev_err(&pdev->dev, "Cannot obtain PCI resources, "
1067                         "aborting.\n");
1068                 goto err_disable_pdev;
1069         }
1070
1071         pci_set_master(pdev);
1072
1073         err = pci_set_dma_mask(pdev, DMA_64BIT_MASK);
1074         if (err) {
1075                 dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask.\n");
1076                 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
1077                 if (err) {
1078                         dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting.\n");
1079                         goto err_free_res;
1080                 }
1081         }
1082         err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
1083         if (err) {
1084                 dev_warn(&pdev->dev, "Warning: couldn't set 64-bit "
1085                          "consistent PCI DMA mask.\n");
1086                 err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
1087                 if (err) {
1088                         dev_err(&pdev->dev, "Can't set consistent PCI DMA mask, "
1089                                 "aborting.\n");
1090                         goto err_free_res;
1091                 }
1092         }
1093
1094         mdev = (struct mthca_dev *) ib_alloc_device(sizeof *mdev);
1095         if (!mdev) {
1096                 dev_err(&pdev->dev, "Device struct alloc failed, "
1097                         "aborting.\n");
1098                 err = -ENOMEM;
1099                 goto err_free_res;
1100         }
1101
1102         mdev->pdev = pdev;
1103
1104         mdev->mthca_flags = mthca_hca_table[hca_type].flags;
1105         if (ddr_hidden)
1106                 mdev->mthca_flags |= MTHCA_FLAG_DDR_HIDDEN;
1107
1108         /*
1109          * Now reset the HCA before we touch the PCI capabilities or
1110          * attempt a firmware command, since a boot ROM may have left
1111          * the HCA in an undefined state.
1112          */
1113         err = mthca_reset(mdev);
1114         if (err) {
1115                 mthca_err(mdev, "Failed to reset HCA, aborting.\n");
1116                 goto err_free_dev;
1117         }
1118
1119         if (mthca_cmd_init(mdev)) {
1120                 mthca_err(mdev, "Failed to init command interface, aborting.\n");
1121                 goto err_free_dev;
1122         }
1123
1124         err = mthca_tune_pci(mdev);
1125         if (err)
1126                 goto err_cmd;
1127
1128         err = mthca_init_hca(mdev);
1129         if (err)
1130                 goto err_cmd;
1131
1132         if (mdev->fw_ver < mthca_hca_table[hca_type].latest_fw) {
1133                 mthca_warn(mdev, "HCA FW version %d.%d.%03d is old (%d.%d.%03d is current).\n",
1134                            (int) (mdev->fw_ver >> 32), (int) (mdev->fw_ver >> 16) & 0xffff,
1135                            (int) (mdev->fw_ver & 0xffff),
1136                            (int) (mthca_hca_table[hca_type].latest_fw >> 32),
1137                            (int) (mthca_hca_table[hca_type].latest_fw >> 16) & 0xffff,
1138                            (int) (mthca_hca_table[hca_type].latest_fw & 0xffff));
1139                 mthca_warn(mdev, "If you have problems, try updating your HCA FW.\n");
1140         }
1141
1142         if (msi_x && !mthca_enable_msi_x(mdev))
1143                 mdev->mthca_flags |= MTHCA_FLAG_MSI_X;
1144
1145         err = mthca_setup_hca(mdev);
1146         if (err == -EBUSY && (mdev->mthca_flags & MTHCA_FLAG_MSI_X)) {
1147                 if (mdev->mthca_flags & MTHCA_FLAG_MSI_X)
1148                         pci_disable_msix(pdev);
1149                 mdev->mthca_flags &= ~MTHCA_FLAG_MSI_X;
1150
1151                 err = mthca_setup_hca(mdev);
1152         }
1153
1154         if (err)
1155                 goto err_close;
1156
1157         err = mthca_register_device(mdev);
1158         if (err)
1159                 goto err_cleanup;
1160
1161         err = mthca_create_agents(mdev);
1162         if (err)
1163                 goto err_unregister;
1164
1165         pci_set_drvdata(pdev, mdev);
1166         mdev->hca_type = hca_type;
1167
1168         return 0;
1169
1170 err_unregister:
1171         mthca_unregister_device(mdev);
1172
1173 err_cleanup:
1174         mthca_cleanup_mcg_table(mdev);
1175         mthca_cleanup_av_table(mdev);
1176         mthca_cleanup_qp_table(mdev);
1177         mthca_cleanup_srq_table(mdev);
1178         mthca_cleanup_cq_table(mdev);
1179         mthca_cmd_use_polling(mdev);
1180         mthca_cleanup_eq_table(mdev);
1181
1182         mthca_pd_free(mdev, &mdev->driver_pd);
1183
1184         mthca_cleanup_mr_table(mdev);
1185         mthca_cleanup_pd_table(mdev);
1186         mthca_cleanup_uar_table(mdev);
1187
1188 err_close:
1189         if (mdev->mthca_flags & MTHCA_FLAG_MSI_X)
1190                 pci_disable_msix(pdev);
1191
1192         mthca_close_hca(mdev);
1193
1194 err_cmd:
1195         mthca_cmd_cleanup(mdev);
1196
1197 err_free_dev:
1198         ib_dealloc_device(&mdev->ib_dev);
1199
1200 err_free_res:
1201         mthca_release_regions(pdev, ddr_hidden);
1202
1203 err_disable_pdev:
1204         pci_disable_device(pdev);
1205         pci_set_drvdata(pdev, NULL);
1206         return err;
1207 }
1208
1209 static void __mthca_remove_one(struct pci_dev *pdev)
1210 {
1211         struct mthca_dev *mdev = pci_get_drvdata(pdev);
1212         u8 status;
1213         int p;
1214
1215         if (mdev) {
1216                 mthca_free_agents(mdev);
1217                 mthca_unregister_device(mdev);
1218
1219                 for (p = 1; p <= mdev->limits.num_ports; ++p)
1220                         mthca_CLOSE_IB(mdev, p, &status);
1221
1222                 mthca_cleanup_mcg_table(mdev);
1223                 mthca_cleanup_av_table(mdev);
1224                 mthca_cleanup_qp_table(mdev);
1225                 mthca_cleanup_srq_table(mdev);
1226                 mthca_cleanup_cq_table(mdev);
1227                 mthca_cmd_use_polling(mdev);
1228                 mthca_cleanup_eq_table(mdev);
1229
1230                 mthca_pd_free(mdev, &mdev->driver_pd);
1231
1232                 mthca_cleanup_mr_table(mdev);
1233                 mthca_cleanup_pd_table(mdev);
1234
1235                 iounmap(mdev->kar);
1236                 mthca_uar_free(mdev, &mdev->driver_uar);
1237                 mthca_cleanup_uar_table(mdev);
1238                 mthca_close_hca(mdev);
1239                 mthca_cmd_cleanup(mdev);
1240
1241                 if (mdev->mthca_flags & MTHCA_FLAG_MSI_X)
1242                         pci_disable_msix(pdev);
1243
1244                 ib_dealloc_device(&mdev->ib_dev);
1245                 mthca_release_regions(pdev, mdev->mthca_flags &
1246                                       MTHCA_FLAG_DDR_HIDDEN);
1247                 pci_disable_device(pdev);
1248                 pci_set_drvdata(pdev, NULL);
1249         }
1250 }
1251
1252 int __mthca_restart_one(struct pci_dev *pdev)
1253 {
1254         struct mthca_dev *mdev;
1255         int hca_type;
1256
1257         mdev = pci_get_drvdata(pdev);
1258         if (!mdev)
1259                 return -ENODEV;
1260         hca_type = mdev->hca_type;
1261         __mthca_remove_one(pdev);
1262         return __mthca_init_one(pdev, hca_type);
1263 }
1264
1265 static int __devinit mthca_init_one(struct pci_dev *pdev,
1266                                     const struct pci_device_id *id)
1267 {
1268         static int mthca_version_printed = 0;
1269         int ret;
1270
1271         mutex_lock(&mthca_device_mutex);
1272
1273         if (!mthca_version_printed) {
1274                 printk(KERN_INFO "%s", mthca_version);
1275                 ++mthca_version_printed;
1276         }
1277
1278         if (id->driver_data >= ARRAY_SIZE(mthca_hca_table)) {
1279                 printk(KERN_ERR PFX "%s has invalid driver data %lx\n",
1280                        pci_name(pdev), id->driver_data);
1281                 mutex_unlock(&mthca_device_mutex);
1282                 return -ENODEV;
1283         }
1284
1285         ret = __mthca_init_one(pdev, id->driver_data);
1286
1287         mutex_unlock(&mthca_device_mutex);
1288
1289         return ret;
1290 }
1291
1292 static void __devexit mthca_remove_one(struct pci_dev *pdev)
1293 {
1294         mutex_lock(&mthca_device_mutex);
1295         __mthca_remove_one(pdev);
1296         mutex_unlock(&mthca_device_mutex);
1297 }
1298
1299 static struct pci_device_id mthca_pci_table[] = {
1300         { PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, PCI_DEVICE_ID_MELLANOX_TAVOR),
1301           .driver_data = TAVOR },
1302         { PCI_DEVICE(PCI_VENDOR_ID_TOPSPIN, PCI_DEVICE_ID_MELLANOX_TAVOR),
1303           .driver_data = TAVOR },
1304         { PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, PCI_DEVICE_ID_MELLANOX_ARBEL_COMPAT),
1305           .driver_data = ARBEL_COMPAT },
1306         { PCI_DEVICE(PCI_VENDOR_ID_TOPSPIN, PCI_DEVICE_ID_MELLANOX_ARBEL_COMPAT),
1307           .driver_data = ARBEL_COMPAT },
1308         { PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, PCI_DEVICE_ID_MELLANOX_ARBEL),
1309           .driver_data = ARBEL_NATIVE },
1310         { PCI_DEVICE(PCI_VENDOR_ID_TOPSPIN, PCI_DEVICE_ID_MELLANOX_ARBEL),
1311           .driver_data = ARBEL_NATIVE },
1312         { PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, PCI_DEVICE_ID_MELLANOX_SINAI),
1313           .driver_data = SINAI },
1314         { PCI_DEVICE(PCI_VENDOR_ID_TOPSPIN, PCI_DEVICE_ID_MELLANOX_SINAI),
1315           .driver_data = SINAI },
1316         { PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, PCI_DEVICE_ID_MELLANOX_SINAI_OLD),
1317           .driver_data = SINAI },
1318         { PCI_DEVICE(PCI_VENDOR_ID_TOPSPIN, PCI_DEVICE_ID_MELLANOX_SINAI_OLD),
1319           .driver_data = SINAI },
1320         { 0, }
1321 };
1322
1323 MODULE_DEVICE_TABLE(pci, mthca_pci_table);
1324
1325 static struct pci_driver mthca_driver = {
1326         .name           = DRV_NAME,
1327         .id_table       = mthca_pci_table,
1328         .probe          = mthca_init_one,
1329         .remove         = __devexit_p(mthca_remove_one)
1330 };
1331
1332 static void __init __mthca_check_profile_val(const char *name, int *pval,
1333                                              int pval_default)
1334 {
1335         /* value must be positive and power of 2 */
1336         int old_pval = *pval;
1337
1338         if (old_pval <= 0)
1339                 *pval = pval_default;
1340         else
1341                 *pval = roundup_pow_of_two(old_pval);
1342
1343         if (old_pval != *pval) {
1344                 printk(KERN_WARNING PFX "Invalid value %d for %s in module parameter.\n",
1345                        old_pval, name);
1346                 printk(KERN_WARNING PFX "Corrected %s to %d.\n", name, *pval);
1347         }
1348 }
1349
1350 #define mthca_check_profile_val(name, default)                          \
1351         __mthca_check_profile_val(#name, &hca_profile.name, default)
1352
1353 static void __init mthca_validate_profile(void)
1354 {
1355         mthca_check_profile_val(num_qp,            MTHCA_DEFAULT_NUM_QP);
1356         mthca_check_profile_val(rdb_per_qp,        MTHCA_DEFAULT_RDB_PER_QP);
1357         mthca_check_profile_val(num_cq,            MTHCA_DEFAULT_NUM_CQ);
1358         mthca_check_profile_val(num_mcg,           MTHCA_DEFAULT_NUM_MCG);
1359         mthca_check_profile_val(num_mpt,           MTHCA_DEFAULT_NUM_MPT);
1360         mthca_check_profile_val(num_mtt,           MTHCA_DEFAULT_NUM_MTT);
1361         mthca_check_profile_val(num_udav,          MTHCA_DEFAULT_NUM_UDAV);
1362         mthca_check_profile_val(fmr_reserved_mtts, MTHCA_DEFAULT_NUM_RESERVED_MTTS);
1363
1364         if (hca_profile.fmr_reserved_mtts >= hca_profile.num_mtt) {
1365                 printk(KERN_WARNING PFX "Invalid fmr_reserved_mtts module parameter %d.\n",
1366                        hca_profile.fmr_reserved_mtts);
1367                 printk(KERN_WARNING PFX "(Must be smaller than num_mtt %d)\n",
1368                        hca_profile.num_mtt);
1369                 hca_profile.fmr_reserved_mtts = hca_profile.num_mtt / 2;
1370                 printk(KERN_WARNING PFX "Corrected fmr_reserved_mtts to %d.\n",
1371                        hca_profile.fmr_reserved_mtts);
1372         }
1373 }
1374
1375 static int __init mthca_init(void)
1376 {
1377         int ret;
1378
1379         mthca_validate_profile();
1380
1381         ret = mthca_catas_init();
1382         if (ret)
1383                 return ret;
1384
1385         ret = pci_register_driver(&mthca_driver);
1386         if (ret < 0) {
1387                 mthca_catas_cleanup();
1388                 return ret;
1389         }
1390
1391         return 0;
1392 }
1393
1394 static void __exit mthca_cleanup(void)
1395 {
1396         pci_unregister_driver(&mthca_driver);
1397         mthca_catas_cleanup();
1398 }
1399
1400 module_init(mthca_init);
1401 module_exit(mthca_cleanup);