Merge branches 'cxgb3', 'endian', 'ipath', 'ipoib', 'iser', 'mad', 'misc', 'mlx4...
authorRoland Dreier <rolandd@cisco.com>
Wed, 25 Mar 2009 03:44:41 +0000 (20:44 -0700)
committerRoland Dreier <rolandd@cisco.com>
Wed, 25 Mar 2009 03:44:41 +0000 (20:44 -0700)
43 files changed:
drivers/infiniband/core/cm.c
drivers/infiniband/core/cm_msgs.h
drivers/infiniband/core/device.c
drivers/infiniband/core/mad.c
drivers/infiniband/core/mad_rmpp.c
drivers/infiniband/core/sa_query.c
drivers/infiniband/core/sysfs.c
drivers/infiniband/hw/cxgb3/cxio_hal.c
drivers/infiniband/hw/cxgb3/cxio_hal.h
drivers/infiniband/hw/cxgb3/cxio_wr.h
drivers/infiniband/hw/cxgb3/iwch_cm.c
drivers/infiniband/hw/cxgb3/iwch_ev.c
drivers/infiniband/hw/cxgb3/iwch_qp.c
drivers/infiniband/hw/ehca/ehca_sqp.c
drivers/infiniband/hw/ipath/ipath_eeprom.c
drivers/infiniband/hw/ipath/ipath_init_chip.c
drivers/infiniband/hw/ipath/ipath_mad.c
drivers/infiniband/hw/ipath/ipath_rc.c
drivers/infiniband/hw/ipath/ipath_sdma.c
drivers/infiniband/hw/ipath/ipath_uc.c
drivers/infiniband/hw/ipath/ipath_ud.c
drivers/infiniband/hw/ipath/ipath_user_pages.c
drivers/infiniband/hw/ipath/ipath_user_sdma.c
drivers/infiniband/hw/ipath/ipath_verbs.c
drivers/infiniband/hw/ipath/ipath_verbs.h
drivers/infiniband/hw/mlx4/mad.c
drivers/infiniband/hw/mlx4/main.c
drivers/infiniband/hw/mlx4/qp.c
drivers/infiniband/hw/mthca/mthca_mad.c
drivers/infiniband/ulp/ipoib/ipoib_main.c
drivers/infiniband/ulp/iser/iser_verbs.c
drivers/net/mlx4/Makefile
drivers/net/mlx4/catas.c
drivers/net/mlx4/eq.c
drivers/net/mlx4/main.c
drivers/net/mlx4/mlx4.h
drivers/net/mlx4/port.c
drivers/net/mlx4/sense.c [new file with mode: 0644]
include/linux/mlx4/cmd.h
include/linux/mlx4/device.h
include/rdma/ib_cm.h
include/rdma/ib_mad.h
include/rdma/ib_smi.h

index f1e82a9..5130fc5 100644 (file)
@@ -927,8 +927,7 @@ int ib_cm_listen(struct ib_cm_id *cm_id, __be64 service_id, __be64 service_mask,
        unsigned long flags;
        int ret = 0;
 
-       service_mask = service_mask ? service_mask :
-                      __constant_cpu_to_be64(~0ULL);
+       service_mask = service_mask ? service_mask : ~cpu_to_be64(0);
        service_id &= service_mask;
        if ((service_id & IB_SERVICE_ID_AGN_MASK) == IB_CM_ASSIGN_SERVICE_ID &&
            (service_id != IB_CM_ASSIGN_SERVICE_ID))
@@ -954,7 +953,7 @@ int ib_cm_listen(struct ib_cm_id *cm_id, __be64 service_id, __be64 service_mask,
        spin_lock_irqsave(&cm.lock, flags);
        if (service_id == IB_CM_ASSIGN_SERVICE_ID) {
                cm_id->service_id = cpu_to_be64(cm.listen_service_id++);
-               cm_id->service_mask = __constant_cpu_to_be64(~0ULL);
+               cm_id->service_mask = ~cpu_to_be64(0);
        } else {
                cm_id->service_id = service_id;
                cm_id->service_mask = service_mask;
@@ -1134,7 +1133,7 @@ int ib_send_cm_req(struct ib_cm_id *cm_id,
                        goto error1;
        }
        cm_id->service_id = param->service_id;
-       cm_id->service_mask = __constant_cpu_to_be64(~0ULL);
+       cm_id->service_mask = ~cpu_to_be64(0);
        cm_id_priv->timeout_ms = cm_convert_to_ms(
                                    param->primary_path->packet_life_time) * 2 +
                                 cm_convert_to_ms(
@@ -1545,7 +1544,7 @@ static int cm_req_handler(struct cm_work *work)
        cm_id_priv->id.cm_handler = listen_cm_id_priv->id.cm_handler;
        cm_id_priv->id.context = listen_cm_id_priv->id.context;
        cm_id_priv->id.service_id = req_msg->service_id;
-       cm_id_priv->id.service_mask = __constant_cpu_to_be64(~0ULL);
+       cm_id_priv->id.service_mask = ~cpu_to_be64(0);
 
        cm_process_routed_req(req_msg, work->mad_recv_wc->wc);
        cm_format_paths_from_req(req_msg, &work->path[0], &work->path[1]);
@@ -2898,7 +2897,7 @@ int ib_send_cm_sidr_req(struct ib_cm_id *cm_id,
                goto out;
 
        cm_id->service_id = param->service_id;
-       cm_id->service_mask = __constant_cpu_to_be64(~0ULL);
+       cm_id->service_mask = ~cpu_to_be64(0);
        cm_id_priv->timeout_ms = param->timeout_ms;
        cm_id_priv->max_cm_retries = param->max_cm_retries;
        ret = cm_alloc_msg(cm_id_priv, &msg);
@@ -2992,7 +2991,7 @@ static int cm_sidr_req_handler(struct cm_work *work)
        cm_id_priv->id.cm_handler = cur_cm_id_priv->id.cm_handler;
        cm_id_priv->id.context = cur_cm_id_priv->id.context;
        cm_id_priv->id.service_id = sidr_req_msg->service_id;
-       cm_id_priv->id.service_mask = __constant_cpu_to_be64(~0ULL);
+       cm_id_priv->id.service_mask = ~cpu_to_be64(0);
 
        cm_format_sidr_req_event(work, &cur_cm_id_priv->id);
        cm_process_work(cm_id_priv, work);
@@ -3789,7 +3788,7 @@ static int __init ib_cm_init(void)
        rwlock_init(&cm.device_lock);
        spin_lock_init(&cm.lock);
        cm.listen_service_table = RB_ROOT;
-       cm.listen_service_id = __constant_be64_to_cpu(IB_CM_ASSIGN_SERVICE_ID);
+       cm.listen_service_id = be64_to_cpu(IB_CM_ASSIGN_SERVICE_ID);
        cm.remote_id_table = RB_ROOT;
        cm.remote_qp_table = RB_ROOT;
        cm.remote_sidr_table = RB_ROOT;
index aec9c7a..7e63c08 100644 (file)
 
 #define IB_CM_CLASS_VERSION    2 /* IB specification 1.2 */
 
-#define CM_REQ_ATTR_ID     __constant_htons(0x0010)
-#define CM_MRA_ATTR_ID     __constant_htons(0x0011)
-#define CM_REJ_ATTR_ID     __constant_htons(0x0012)
-#define CM_REP_ATTR_ID     __constant_htons(0x0013)
-#define CM_RTU_ATTR_ID     __constant_htons(0x0014)
-#define CM_DREQ_ATTR_ID            __constant_htons(0x0015)
-#define CM_DREP_ATTR_ID            __constant_htons(0x0016)
-#define CM_SIDR_REQ_ATTR_ID __constant_htons(0x0017)
-#define CM_SIDR_REP_ATTR_ID __constant_htons(0x0018)
-#define CM_LAP_ATTR_ID      __constant_htons(0x0019)
-#define CM_APR_ATTR_ID      __constant_htons(0x001A)
+#define CM_REQ_ATTR_ID         cpu_to_be16(0x0010)
+#define CM_MRA_ATTR_ID         cpu_to_be16(0x0011)
+#define CM_REJ_ATTR_ID         cpu_to_be16(0x0012)
+#define CM_REP_ATTR_ID         cpu_to_be16(0x0013)
+#define CM_RTU_ATTR_ID         cpu_to_be16(0x0014)
+#define CM_DREQ_ATTR_ID                cpu_to_be16(0x0015)
+#define CM_DREP_ATTR_ID                cpu_to_be16(0x0016)
+#define CM_SIDR_REQ_ATTR_ID    cpu_to_be16(0x0017)
+#define CM_SIDR_REP_ATTR_ID    cpu_to_be16(0x0018)
+#define CM_LAP_ATTR_ID         cpu_to_be16(0x0019)
+#define CM_APR_ATTR_ID         cpu_to_be16(0x001A)
 
 enum cm_msg_sequence {
        CM_MSG_SEQUENCE_REQ,
index 7913b80..d1fba41 100644 (file)
@@ -193,7 +193,7 @@ void ib_dealloc_device(struct ib_device *device)
 
        BUG_ON(device->reg_state != IB_DEV_UNREGISTERED);
 
-       ib_device_unregister_sysfs(device);
+       kobject_put(&device->dev.kobj);
 }
 EXPORT_SYMBOL(ib_dealloc_device);
 
@@ -348,6 +348,8 @@ void ib_unregister_device(struct ib_device *device)
 
        mutex_unlock(&device_mutex);
 
+       ib_device_unregister_sysfs(device);
+
        spin_lock_irqsave(&device->client_data_lock, flags);
        list_for_each_entry_safe(context, tmp, &device->client_data_list, list)
                kfree(context);
index 5c54fc2..de922a0 100644 (file)
@@ -301,6 +301,16 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
        mad_agent_priv->agent.context = context;
        mad_agent_priv->agent.qp = port_priv->qp_info[qpn].qp;
        mad_agent_priv->agent.port_num = port_num;
+       spin_lock_init(&mad_agent_priv->lock);
+       INIT_LIST_HEAD(&mad_agent_priv->send_list);
+       INIT_LIST_HEAD(&mad_agent_priv->wait_list);
+       INIT_LIST_HEAD(&mad_agent_priv->done_list);
+       INIT_LIST_HEAD(&mad_agent_priv->rmpp_list);
+       INIT_DELAYED_WORK(&mad_agent_priv->timed_work, timeout_sends);
+       INIT_LIST_HEAD(&mad_agent_priv->local_list);
+       INIT_WORK(&mad_agent_priv->local_work, local_completions);
+       atomic_set(&mad_agent_priv->refcount, 1);
+       init_completion(&mad_agent_priv->comp);
 
        spin_lock_irqsave(&port_priv->reg_lock, flags);
        mad_agent_priv->agent.hi_tid = ++ib_mad_client_id;
@@ -350,17 +360,6 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
        list_add_tail(&mad_agent_priv->agent_list, &port_priv->agent_list);
        spin_unlock_irqrestore(&port_priv->reg_lock, flags);
 
-       spin_lock_init(&mad_agent_priv->lock);
-       INIT_LIST_HEAD(&mad_agent_priv->send_list);
-       INIT_LIST_HEAD(&mad_agent_priv->wait_list);
-       INIT_LIST_HEAD(&mad_agent_priv->done_list);
-       INIT_LIST_HEAD(&mad_agent_priv->rmpp_list);
-       INIT_DELAYED_WORK(&mad_agent_priv->timed_work, timeout_sends);
-       INIT_LIST_HEAD(&mad_agent_priv->local_list);
-       INIT_WORK(&mad_agent_priv->local_work, local_completions);
-       atomic_set(&mad_agent_priv->refcount, 1);
-       init_completion(&mad_agent_priv->comp);
-
        return &mad_agent_priv->agent;
 
 error4:
@@ -743,9 +742,7 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
                break;
        case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED:
                kmem_cache_free(ib_mad_cache, mad_priv);
-               kfree(local);
-               ret = 1;
-               goto out;
+               break;
        case IB_MAD_RESULT_SUCCESS:
                /* Treat like an incoming receive MAD */
                port_priv = ib_get_mad_port(mad_agent_priv->agent.device,
@@ -756,10 +753,12 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
                                                        &mad_priv->mad.mad);
                }
                if (!port_priv || !recv_mad_agent) {
+                       /*
+                        * No receiving agent so drop packet and
+                        * generate send completion.
+                        */
                        kmem_cache_free(ib_mad_cache, mad_priv);
-                       kfree(local);
-                       ret = 0;
-                       goto out;
+                       break;
                }
                local->mad_priv = mad_priv;
                local->recv_mad_agent = recv_mad_agent;
@@ -2356,7 +2355,7 @@ static void local_completions(struct work_struct *work)
        struct ib_mad_local_private *local;
        struct ib_mad_agent_private *recv_mad_agent;
        unsigned long flags;
-       int recv = 0;
+       int free_mad;
        struct ib_wc wc;
        struct ib_mad_send_wc mad_send_wc;
 
@@ -2370,14 +2369,15 @@ static void local_completions(struct work_struct *work)
                                   completion_list);
                list_del(&local->completion_list);
                spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
+               free_mad = 0;
                if (local->mad_priv) {
                        recv_mad_agent = local->recv_mad_agent;
                        if (!recv_mad_agent) {
                                printk(KERN_ERR PFX "No receive MAD agent for local completion\n");
+                               free_mad = 1;
                                goto local_send_completion;
                        }
 
-                       recv = 1;
                        /*
                         * Defined behavior is to complete response
                         * before request
@@ -2422,7 +2422,7 @@ local_send_completion:
 
                spin_lock_irqsave(&mad_agent_priv->lock, flags);
                atomic_dec(&mad_agent_priv->refcount);
-               if (!recv)
+               if (free_mad)
                        kmem_cache_free(ib_mad_cache, local->mad_priv);
                kfree(local);
        }
index 3af2b84..57a3c6f 100644 (file)
@@ -735,7 +735,7 @@ process_rmpp_data(struct ib_mad_agent_private *agent,
                goto bad;
        }
 
-       if (rmpp_hdr->seg_num == __constant_htonl(1)) {
+       if (rmpp_hdr->seg_num == cpu_to_be32(1)) {
                if (!(ib_get_rmpp_flags(rmpp_hdr) & IB_MGMT_RMPP_FLAG_FIRST)) {
                        rmpp_status = IB_MGMT_RMPP_STATUS_BAD_SEG;
                        goto bad;
index 7863a50..1865049 100644 (file)
@@ -395,6 +395,8 @@ static void update_sm_ah(struct work_struct *work)
        }
 
        spin_lock_irq(&port->ah_lock);
+       if (port->sm_ah)
+               kref_put(&port->sm_ah->ref, free_sm_ah);
        port->sm_ah = new_ah;
        spin_unlock_irq(&port->ah_lock);
 
index b43f7d3..5c04cfb 100644 (file)
@@ -66,11 +66,6 @@ struct port_table_attribute {
        int                     index;
 };
 
-static inline int ibdev_is_alive(const struct ib_device *dev)
-{
-       return dev->reg_state == IB_DEV_REGISTERED;
-}
-
 static ssize_t port_attr_show(struct kobject *kobj,
                              struct attribute *attr, char *buf)
 {
@@ -80,8 +75,6 @@ static ssize_t port_attr_show(struct kobject *kobj,
 
        if (!port_attr->show)
                return -EIO;
-       if (!ibdev_is_alive(p->ibdev))
-               return -ENODEV;
 
        return port_attr->show(p, port_attr, buf);
 }
@@ -562,9 +555,6 @@ static ssize_t show_node_type(struct device *device,
 {
        struct ib_device *dev = container_of(device, struct ib_device, dev);
 
-       if (!ibdev_is_alive(dev))
-               return -ENODEV;
-
        switch (dev->node_type) {
        case RDMA_NODE_IB_CA:     return sprintf(buf, "%d: CA\n", dev->node_type);
        case RDMA_NODE_RNIC:      return sprintf(buf, "%d: RNIC\n", dev->node_type);
@@ -581,9 +571,6 @@ static ssize_t show_sys_image_guid(struct device *device,
        struct ib_device_attr attr;
        ssize_t ret;
 
-       if (!ibdev_is_alive(dev))
-               return -ENODEV;
-
        ret = ib_query_device(dev, &attr);
        if (ret)
                return ret;
@@ -600,9 +587,6 @@ static ssize_t show_node_guid(struct device *device,
 {
        struct ib_device *dev = container_of(device, struct ib_device, dev);
 
-       if (!ibdev_is_alive(dev))
-               return -ENODEV;
-
        return sprintf(buf, "%04x:%04x:%04x:%04x\n",
                       be16_to_cpu(((__be16 *) &dev->node_guid)[0]),
                       be16_to_cpu(((__be16 *) &dev->node_guid)[1]),
@@ -848,6 +832,9 @@ void ib_device_unregister_sysfs(struct ib_device *device)
        struct kobject *p, *t;
        struct ib_port *port;
 
+       /* Hold kobject until ib_dealloc_device() */
+       kobject_get(&device->dev.kobj);
+
        list_for_each_entry_safe(p, t, &device->port_list, entry) {
                list_del(&p->entry);
                port = container_of(p, struct ib_port, kobj);
index 4dcf08b..d4d7204 100644 (file)
@@ -450,7 +450,7 @@ static int cqe_completes_wr(struct t3_cqe *cqe, struct t3_wq *wq)
        if ((CQE_OPCODE(*cqe) == T3_READ_RESP) && SQ_TYPE(*cqe))
                return 0;
 
-       if ((CQE_OPCODE(*cqe) == T3_SEND) && RQ_TYPE(*cqe) &&
+       if (CQE_SEND_OPCODE(*cqe) && RQ_TYPE(*cqe) &&
            Q_EMPTY(wq->rq_rptr, wq->rq_wptr))
                return 0;
 
@@ -938,6 +938,23 @@ int cxio_rdev_open(struct cxio_rdev *rdev_p)
        if (!rdev_p->t3cdev_p)
                rdev_p->t3cdev_p = dev2t3cdev(netdev_p);
        rdev_p->t3cdev_p->ulp = (void *) rdev_p;
+
+       err = rdev_p->t3cdev_p->ctl(rdev_p->t3cdev_p, GET_EMBEDDED_INFO,
+                                        &(rdev_p->fw_info));
+       if (err) {
+               printk(KERN_ERR "%s t3cdev_p(%p)->ctl returned error %d.\n",
+                    __func__, rdev_p->t3cdev_p, err);
+               goto err1;
+       }
+       if (G_FW_VERSION_MAJOR(rdev_p->fw_info.fw_vers) != CXIO_FW_MAJ) {
+               printk(KERN_ERR MOD "fatal firmware version mismatch: "
+                      "need version %u but adapter has version %u\n",
+                      CXIO_FW_MAJ,
+                      G_FW_VERSION_MAJOR(rdev_p->fw_info.fw_vers));
+               err = -EINVAL;
+               goto err1;
+       }
+
        err = rdev_p->t3cdev_p->ctl(rdev_p->t3cdev_p, RDMA_GET_PARAMS,
                                         &(rdev_p->rnic_info));
        if (err) {
@@ -1204,11 +1221,12 @@ int cxio_poll_cq(struct t3_wq *wq, struct t3_cq *cq, struct t3_cqe *cqe,
                }
 
                /* incoming SEND with no receive posted failures */
-               if ((CQE_OPCODE(*hw_cqe) == T3_SEND) && RQ_TYPE(*hw_cqe) &&
+               if (CQE_SEND_OPCODE(*hw_cqe) && RQ_TYPE(*hw_cqe) &&
                    Q_EMPTY(wq->rq_rptr, wq->rq_wptr)) {
                        ret = -1;
                        goto skip_cqe;
                }
+               BUG_ON((*cqe_flushed == 0) && !SW_CQE(*hw_cqe));
                goto proc_cqe;
        }
 
@@ -1223,6 +1241,13 @@ int cxio_poll_cq(struct t3_wq *wq, struct t3_cq *cq, struct t3_cqe *cqe,
                 * then we complete this with TPT_ERR_MSN and mark the wq in
                 * error.
                 */
+
+               if (Q_EMPTY(wq->rq_rptr, wq->rq_wptr)) {
+                       wq->error = 1;
+                       ret = -1;
+                       goto skip_cqe;
+               }
+
                if (unlikely((CQE_WRID_MSN(*hw_cqe) != (wq->rq_rptr + 1)))) {
                        wq->error = 1;
                        hw_cqe->header |= htonl(V_CQE_STATUS(TPT_ERR_MSN));
@@ -1277,6 +1302,7 @@ proc_cqe:
                        cxio_hal_pblpool_free(wq->rdev,
                                wq->rq[Q_PTR2IDX(wq->rq_rptr,
                                wq->rq_size_log2)].pbl_addr, T3_STAG0_PBL_SIZE);
+               BUG_ON(Q_EMPTY(wq->rq_rptr, wq->rq_wptr));
                wq->rq_rptr++;
        }
 
index 656fe47..e44dc22 100644 (file)
@@ -61,6 +61,8 @@
 
 #define T3_MAX_DEV_NAME_LEN 32
 
+#define CXIO_FW_MAJ 7
+
 struct cxio_hal_ctrl_qp {
        u32 wptr;
        u32 rptr;
@@ -108,6 +110,7 @@ struct cxio_rdev {
        struct gen_pool *pbl_pool;
        struct gen_pool *rqt_pool;
        struct list_head entry;
+       struct ch_embedded_info fw_info;
 };
 
 static inline int cxio_num_stags(struct cxio_rdev *rdev_p)
index 04618f7..ff9be1a 100644 (file)
@@ -604,6 +604,12 @@ struct t3_cqe {
 #define CQE_STATUS(x)     (G_CQE_STATUS(be32_to_cpu((x).header)))
 #define CQE_OPCODE(x)     (G_CQE_OPCODE(be32_to_cpu((x).header)))
 
+#define CQE_SEND_OPCODE(x)( \
+       (G_CQE_OPCODE(be32_to_cpu((x).header)) == T3_SEND) || \
+       (G_CQE_OPCODE(be32_to_cpu((x).header)) == T3_SEND_WITH_SE) || \
+       (G_CQE_OPCODE(be32_to_cpu((x).header)) == T3_SEND_WITH_INV) || \
+       (G_CQE_OPCODE(be32_to_cpu((x).header)) == T3_SEND_WITH_SE_INV))
+
 #define CQE_LEN(x)        (be32_to_cpu((x).len))
 
 /* used for RQ completion processing */
index 44e936e..8699947 100644 (file)
@@ -1678,6 +1678,9 @@ static int terminate(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
 {
        struct iwch_ep *ep = ctx;
 
+       if (state_read(&ep->com) != FPDU_MODE)
+               return CPL_RET_BUF_DONE;
+
        PDBG("%s ep %p\n", __func__, ep);
        skb_pull(skb, sizeof(struct cpl_rdma_terminate));
        PDBG("%s saving %d bytes of term msg\n", __func__, skb->len);
index 7b67a67..743c5d8 100644 (file)
@@ -179,11 +179,6 @@ void iwch_ev_dispatch(struct cxio_rdev *rdev_p, struct sk_buff *skb)
        case TPT_ERR_BOUND:
        case TPT_ERR_INVALIDATE_SHARED_MR:
        case TPT_ERR_INVALIDATE_MR_WITH_MW_BOUND:
-               printk(KERN_ERR "%s - CQE Err qpid 0x%x opcode %d status 0x%x "
-                      "type %d wrid.hi 0x%x wrid.lo 0x%x \n", __func__,
-                      CQE_QPID(rsp_msg->cqe), CQE_OPCODE(rsp_msg->cqe),
-                      CQE_STATUS(rsp_msg->cqe), CQE_TYPE(rsp_msg->cqe),
-                      CQE_WRID_HI(rsp_msg->cqe), CQE_WRID_LOW(rsp_msg->cqe));
                (*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context);
                post_qp_event(rnicp, chp, rsp_msg, IB_EVENT_QP_ACCESS_ERR, 1);
                break;
index 19661b2..c758fbd 100644 (file)
@@ -99,8 +99,8 @@ static int build_rdma_write(union t3_wr *wqe, struct ib_send_wr *wr,
        if (wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) {
                plen = 4;
                wqe->write.sgl[0].stag = wr->ex.imm_data;
-               wqe->write.sgl[0].len = __constant_cpu_to_be32(0);
-               wqe->write.num_sgle = __constant_cpu_to_be32(0);
+               wqe->write.sgl[0].len = cpu_to_be32(0);
+               wqe->write.num_sgle = cpu_to_be32(0);
                *flit_cnt = 6;
        } else {
                plen = 0;
@@ -195,15 +195,12 @@ static int build_inv_stag(union t3_wr *wqe, struct ib_send_wr *wr,
        return 0;
 }
 
-/*
- * TBD: this is going to be moved to firmware. Missing pdid/qpid check for now.
- */
 static int iwch_sgl2pbl_map(struct iwch_dev *rhp, struct ib_sge *sg_list,
                            u32 num_sgle, u32 * pbl_addr, u8 * page_size)
 {
        int i;
        struct iwch_mr *mhp;
-       u32 offset;
+       u64 offset;
        for (i = 0; i < num_sgle; i++) {
 
                mhp = get_mhp(rhp, (sg_list[i].lkey) >> 8);
@@ -235,8 +232,8 @@ static int iwch_sgl2pbl_map(struct iwch_dev *rhp, struct ib_sge *sg_list,
                        return -EINVAL;
                }
                offset = sg_list[i].addr - mhp->attr.va_fbo;
-               offset += ((u32) mhp->attr.va_fbo) %
-                         (1UL << (12 + mhp->attr.page_size));
+               offset += mhp->attr.va_fbo &
+                         ((1UL << (12 + mhp->attr.page_size)) - 1);
                pbl_addr[i] = ((mhp->attr.pbl_addr -
                                rhp->rdev.rnic_info.pbl_base) >> 3) +
                              (offset >> (12 + mhp->attr.page_size));
@@ -266,8 +263,8 @@ static int build_rdma_recv(struct iwch_qp *qhp, union t3_wr *wqe,
                wqe->recv.sgl[i].len = cpu_to_be32(wr->sg_list[i].length);
 
                /* to in the WQE == the offset into the page */
-               wqe->recv.sgl[i].to = cpu_to_be64(((u32) wr->sg_list[i].addr) %
-                               (1UL << (12 + page_size[i])));
+               wqe->recv.sgl[i].to = cpu_to_be64(((u32)wr->sg_list[i].addr) &
+                               ((1UL << (12 + page_size[i])) - 1));
 
                /* pbl_addr is the adapters address in the PBL */
                wqe->recv.pbl_addr[i] = cpu_to_be32(pbl_addr[i]);
index 44447aa..c568b28 100644 (file)
 #include "ehca_iverbs.h"
 #include "hcp_if.h"
 
-#define IB_MAD_STATUS_REDIRECT         __constant_htons(0x0002)
-#define IB_MAD_STATUS_UNSUP_VERSION    __constant_htons(0x0004)
-#define IB_MAD_STATUS_UNSUP_METHOD     __constant_htons(0x0008)
+#define IB_MAD_STATUS_REDIRECT         cpu_to_be16(0x0002)
+#define IB_MAD_STATUS_UNSUP_VERSION    cpu_to_be16(0x0004)
+#define IB_MAD_STATUS_UNSUP_METHOD     cpu_to_be16(0x0008)
 
-#define IB_PMA_CLASS_PORT_INFO         __constant_htons(0x0001)
+#define IB_PMA_CLASS_PORT_INFO         cpu_to_be16(0x0001)
 
 /**
  * ehca_define_sqp - Defines special queue pair 1 (GSI QP). When special queue
index dc37277..fc71819 100644 (file)
@@ -772,8 +772,8 @@ void ipath_get_eeprom_info(struct ipath_devdata *dd)
                         "0x%x, not 0x%x\n", csum, ifp->if_csum);
                goto done;
        }
-       if (*(__be64 *) ifp->if_guid == 0ULL ||
-           *(__be64 *) ifp->if_guid == __constant_cpu_to_be64(-1LL)) {
+       if (*(__be64 *) ifp->if_guid == cpu_to_be64(0) ||
+           *(__be64 *) ifp->if_guid == ~cpu_to_be64(0)) {
                ipath_dev_err(dd, "Invalid GUID %llx from flash; "
                              "ignoring\n",
                              *(unsigned long long *) ifp->if_guid);
index 64aeefb..077879c 100644 (file)
@@ -455,7 +455,7 @@ static void init_shadow_tids(struct ipath_devdata *dd)
        if (!addrs) {
                ipath_dev_err(dd, "failed to allocate shadow dma handle "
                              "array, no expected sends!\n");
-               vfree(dd->ipath_pageshadow);
+               vfree(pages);
                dd->ipath_pageshadow = NULL;
                return;
        }
index 17a1231..16a702d 100644 (file)
 #include "ipath_verbs.h"
 #include "ipath_common.h"
 
-#define IB_SMP_UNSUP_VERSION   __constant_htons(0x0004)
-#define IB_SMP_UNSUP_METHOD    __constant_htons(0x0008)
-#define IB_SMP_UNSUP_METH_ATTR __constant_htons(0x000C)
-#define IB_SMP_INVALID_FIELD   __constant_htons(0x001C)
+#define IB_SMP_UNSUP_VERSION   cpu_to_be16(0x0004)
+#define IB_SMP_UNSUP_METHOD    cpu_to_be16(0x0008)
+#define IB_SMP_UNSUP_METH_ATTR cpu_to_be16(0x000C)
+#define IB_SMP_INVALID_FIELD   cpu_to_be16(0x001C)
 
 static int reply(struct ib_smp *smp)
 {
@@ -789,12 +789,12 @@ static int recv_subn_set_pkeytable(struct ib_smp *smp,
        return recv_subn_get_pkeytable(smp, ibdev);
 }
 
-#define IB_PMA_CLASS_PORT_INFO         __constant_htons(0x0001)
-#define IB_PMA_PORT_SAMPLES_CONTROL    __constant_htons(0x0010)
-#define IB_PMA_PORT_SAMPLES_RESULT     __constant_htons(0x0011)
-#define IB_PMA_PORT_COUNTERS           __constant_htons(0x0012)
-#define IB_PMA_PORT_COUNTERS_EXT       __constant_htons(0x001D)
-#define IB_PMA_PORT_SAMPLES_RESULT_EXT __constant_htons(0x001E)
+#define IB_PMA_CLASS_PORT_INFO         cpu_to_be16(0x0001)
+#define IB_PMA_PORT_SAMPLES_CONTROL    cpu_to_be16(0x0010)
+#define IB_PMA_PORT_SAMPLES_RESULT     cpu_to_be16(0x0011)
+#define IB_PMA_PORT_COUNTERS           cpu_to_be16(0x0012)
+#define IB_PMA_PORT_COUNTERS_EXT       cpu_to_be16(0x001D)
+#define IB_PMA_PORT_SAMPLES_RESULT_EXT cpu_to_be16(0x001E)
 
 struct ib_perf {
        u8 base_version;
@@ -884,19 +884,19 @@ struct ib_pma_portcounters {
        __be32 port_rcv_packets;
 } __attribute__ ((packed));
 
-#define IB_PMA_SEL_SYMBOL_ERROR                        __constant_htons(0x0001)
-#define IB_PMA_SEL_LINK_ERROR_RECOVERY         __constant_htons(0x0002)
-#define IB_PMA_SEL_LINK_DOWNED                 __constant_htons(0x0004)
-#define IB_PMA_SEL_PORT_RCV_ERRORS             __constant_htons(0x0008)
-#define IB_PMA_SEL_PORT_RCV_REMPHYS_ERRORS     __constant_htons(0x0010)
-#define IB_PMA_SEL_PORT_XMIT_DISCARDS          __constant_htons(0x0040)
-#define IB_PMA_SEL_LOCAL_LINK_INTEGRITY_ERRORS __constant_htons(0x0200)
-#define IB_PMA_SEL_EXCESSIVE_BUFFER_OVERRUNS   __constant_htons(0x0400)
-#define IB_PMA_SEL_PORT_VL15_DROPPED           __constant_htons(0x0800)
-#define IB_PMA_SEL_PORT_XMIT_DATA              __constant_htons(0x1000)
-#define IB_PMA_SEL_PORT_RCV_DATA               __constant_htons(0x2000)
-#define IB_PMA_SEL_PORT_XMIT_PACKETS           __constant_htons(0x4000)
-#define IB_PMA_SEL_PORT_RCV_PACKETS            __constant_htons(0x8000)
+#define IB_PMA_SEL_SYMBOL_ERROR                        cpu_to_be16(0x0001)
+#define IB_PMA_SEL_LINK_ERROR_RECOVERY         cpu_to_be16(0x0002)
+#define IB_PMA_SEL_LINK_DOWNED                 cpu_to_be16(0x0004)
+#define IB_PMA_SEL_PORT_RCV_ERRORS             cpu_to_be16(0x0008)
+#define IB_PMA_SEL_PORT_RCV_REMPHYS_ERRORS     cpu_to_be16(0x0010)
+#define IB_PMA_SEL_PORT_XMIT_DISCARDS          cpu_to_be16(0x0040)
+#define IB_PMA_SEL_LOCAL_LINK_INTEGRITY_ERRORS cpu_to_be16(0x0200)
+#define IB_PMA_SEL_EXCESSIVE_BUFFER_OVERRUNS   cpu_to_be16(0x0400)
+#define IB_PMA_SEL_PORT_VL15_DROPPED           cpu_to_be16(0x0800)
+#define IB_PMA_SEL_PORT_XMIT_DATA              cpu_to_be16(0x1000)
+#define IB_PMA_SEL_PORT_RCV_DATA               cpu_to_be16(0x2000)
+#define IB_PMA_SEL_PORT_XMIT_PACKETS           cpu_to_be16(0x4000)
+#define IB_PMA_SEL_PORT_RCV_PACKETS            cpu_to_be16(0x8000)
 
 struct ib_pma_portcounters_ext {
        u8 reserved;
@@ -913,14 +913,14 @@ struct ib_pma_portcounters_ext {
        __be64 port_multicast_rcv_packets;
 } __attribute__ ((packed));
 
-#define IB_PMA_SELX_PORT_XMIT_DATA             __constant_htons(0x0001)
-#define IB_PMA_SELX_PORT_RCV_DATA              __constant_htons(0x0002)
-#define IB_PMA_SELX_PORT_XMIT_PACKETS          __constant_htons(0x0004)
-#define IB_PMA_SELX_PORT_RCV_PACKETS           __constant_htons(0x0008)
-#define IB_PMA_SELX_PORT_UNI_XMIT_PACKETS      __constant_htons(0x0010)
-#define IB_PMA_SELX_PORT_UNI_RCV_PACKETS       __constant_htons(0x0020)
-#define IB_PMA_SELX_PORT_MULTI_XMIT_PACKETS    __constant_htons(0x0040)
-#define IB_PMA_SELX_PORT_MULTI_RCV_PACKETS     __constant_htons(0x0080)
+#define IB_PMA_SELX_PORT_XMIT_DATA             cpu_to_be16(0x0001)
+#define IB_PMA_SELX_PORT_RCV_DATA              cpu_to_be16(0x0002)
+#define IB_PMA_SELX_PORT_XMIT_PACKETS          cpu_to_be16(0x0004)
+#define IB_PMA_SELX_PORT_RCV_PACKETS           cpu_to_be16(0x0008)
+#define IB_PMA_SELX_PORT_UNI_XMIT_PACKETS      cpu_to_be16(0x0010)
+#define IB_PMA_SELX_PORT_UNI_RCV_PACKETS       cpu_to_be16(0x0020)
+#define IB_PMA_SELX_PORT_MULTI_XMIT_PACKETS    cpu_to_be16(0x0040)
+#define IB_PMA_SELX_PORT_MULTI_RCV_PACKETS     cpu_to_be16(0x0080)
 
 static int recv_pma_get_classportinfo(struct ib_perf *pmp)
 {
@@ -933,7 +933,7 @@ static int recv_pma_get_classportinfo(struct ib_perf *pmp)
                pmp->status |= IB_SMP_INVALID_FIELD;
 
        /* Indicate AllPortSelect is valid (only one port anyway) */
-       p->cap_mask = __constant_cpu_to_be16(1 << 8);
+       p->cap_mask = cpu_to_be16(1 << 8);
        p->base_version = 1;
        p->class_version = 1;
        /*
@@ -951,12 +951,11 @@ static int recv_pma_get_classportinfo(struct ib_perf *pmp)
  * We support 5 counters which only count the mandatory quantities.
  */
 #define COUNTER_MASK(q, n) (q << ((9 - n) * 3))
-#define COUNTER_MASK0_9 \
-       __constant_cpu_to_be32(COUNTER_MASK(1, 0) | \
-                              COUNTER_MASK(1, 1) | \
-                              COUNTER_MASK(1, 2) | \
-                              COUNTER_MASK(1, 3) | \
-                              COUNTER_MASK(1, 4))
+#define COUNTER_MASK0_9 cpu_to_be32(COUNTER_MASK(1, 0) | \
+                                   COUNTER_MASK(1, 1) | \
+                                   COUNTER_MASK(1, 2) | \
+                                   COUNTER_MASK(1, 3) | \
+                                   COUNTER_MASK(1, 4))
 
 static int recv_pma_get_portsamplescontrol(struct ib_perf *pmp,
                                           struct ib_device *ibdev, u8 port)
@@ -1137,7 +1136,7 @@ static int recv_pma_get_portsamplesresult_ext(struct ib_perf *pmp,
                status = dev->pma_sample_status;
        p->sample_status = cpu_to_be16(status);
        /* 64 bits */
-       p->extended_width = __constant_cpu_to_be32(0x80000000);
+       p->extended_width = cpu_to_be32(0x80000000);
        for (i = 0; i < ARRAY_SIZE(dev->pma_counter_select); i++)
                p->counter[i] = (status != IB_PMA_SAMPLE_STATUS_DONE) ? 0 :
                    cpu_to_be64(
@@ -1185,7 +1184,7 @@ static int recv_pma_get_portcounters(struct ib_perf *pmp,
                pmp->status |= IB_SMP_INVALID_FIELD;
 
        if (cntrs.symbol_error_counter > 0xFFFFUL)
-               p->symbol_error_counter = __constant_cpu_to_be16(0xFFFF);
+               p->symbol_error_counter = cpu_to_be16(0xFFFF);
        else
                p->symbol_error_counter =
                        cpu_to_be16((u16)cntrs.symbol_error_counter);
@@ -1199,17 +1198,17 @@ static int recv_pma_get_portcounters(struct ib_perf *pmp,
        else
                p->link_downed_counter = (u8)cntrs.link_downed_counter;
        if (cntrs.port_rcv_errors > 0xFFFFUL)
-               p->port_rcv_errors = __constant_cpu_to_be16(0xFFFF);
+               p->port_rcv_errors = cpu_to_be16(0xFFFF);
        else
                p->port_rcv_errors =
                        cpu_to_be16((u16) cntrs.port_rcv_errors);
        if (cntrs.port_rcv_remphys_errors > 0xFFFFUL)
-               p->port_rcv_remphys_errors = __constant_cpu_to_be16(0xFFFF);
+               p->port_rcv_remphys_errors = cpu_to_be16(0xFFFF);
        else
                p->port_rcv_remphys_errors =
                        cpu_to_be16((u16)cntrs.port_rcv_remphys_errors);
        if (cntrs.port_xmit_discards > 0xFFFFUL)
-               p->port_xmit_discards = __constant_cpu_to_be16(0xFFFF);
+               p->port_xmit_discards = cpu_to_be16(0xFFFF);
        else
                p->port_xmit_discards =
                        cpu_to_be16((u16)cntrs.port_xmit_discards);
@@ -1220,24 +1219,24 @@ static int recv_pma_get_portcounters(struct ib_perf *pmp,
        p->lli_ebor_errors = (cntrs.local_link_integrity_errors << 4) |
                cntrs.excessive_buffer_overrun_errors;
        if (cntrs.vl15_dropped > 0xFFFFUL)
-               p->vl15_dropped = __constant_cpu_to_be16(0xFFFF);
+               p->vl15_dropped = cpu_to_be16(0xFFFF);
        else
                p->vl15_dropped = cpu_to_be16((u16)cntrs.vl15_dropped);
        if (cntrs.port_xmit_data > 0xFFFFFFFFUL)
-               p->port_xmit_data = __constant_cpu_to_be32(0xFFFFFFFF);
+               p->port_xmit_data = cpu_to_be32(0xFFFFFFFF);
        else
                p->port_xmit_data = cpu_to_be32((u32)cntrs.port_xmit_data);
        if (cntrs.port_rcv_data > 0xFFFFFFFFUL)
-               p->port_rcv_data = __constant_cpu_to_be32(0xFFFFFFFF);
+               p->port_rcv_data = cpu_to_be32(0xFFFFFFFF);
        else
                p->port_rcv_data = cpu_to_be32((u32)cntrs.port_rcv_data);
        if (cntrs.port_xmit_packets > 0xFFFFFFFFUL)
-               p->port_xmit_packets = __constant_cpu_to_be32(0xFFFFFFFF);
+               p->port_xmit_packets = cpu_to_be32(0xFFFFFFFF);
        else
                p->port_xmit_packets =
                        cpu_to_be32((u32)cntrs.port_xmit_packets);
        if (cntrs.port_rcv_packets > 0xFFFFFFFFUL)
-               p->port_rcv_packets = __constant_cpu_to_be32(0xFFFFFFFF);
+               p->port_rcv_packets = cpu_to_be32(0xFFFFFFFF);
        else
                p->port_rcv_packets =
                        cpu_to_be32((u32) cntrs.port_rcv_packets);
index 9170710..79b3dbc 100644 (file)
@@ -1744,7 +1744,7 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
                /* Signal completion event if the solicited bit is set. */
                ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
                               (ohdr->bth[0] &
-                               __constant_cpu_to_be32(1 << 23)) != 0);
+                               cpu_to_be32(1 << 23)) != 0);
                break;
 
        case OP(RDMA_WRITE_FIRST):
index 8e255ad..4b06985 100644 (file)
@@ -781,10 +781,10 @@ retry:
                descqp = &dd->ipath_sdma_descq[dd->ipath_sdma_descq_cnt].qw[0];
        descqp -= 2;
        /* SDmaLastDesc */
-       descqp[0] |= __constant_cpu_to_le64(1ULL << 11);
+       descqp[0] |= cpu_to_le64(1ULL << 11);
        if (tx->txreq.flags & IPATH_SDMA_TXREQ_F_INTREQ) {
                /* SDmaIntReq */
-               descqp[0] |= __constant_cpu_to_le64(1ULL << 15);
+               descqp[0] |= cpu_to_le64(1ULL << 15);
        }
 
        /* Commit writes to memory and advance the tail on the chip */
index 82cc588..22e6099 100644 (file)
@@ -419,7 +419,7 @@ void ipath_uc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
                /* Signal completion event if the solicited bit is set. */
                ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
                               (ohdr->bth[0] &
-                               __constant_cpu_to_be32(1 << 23)) != 0);
+                               cpu_to_be32(1 << 23)) != 0);
                break;
 
        case OP(RDMA_WRITE_FIRST):
index 91c74cc..6076cb6 100644 (file)
@@ -370,7 +370,7 @@ int ipath_make_ud_req(struct ipath_qp *qp)
         */
        ohdr->bth[1] = ah_attr->dlid >= IPATH_MULTICAST_LID_BASE &&
                ah_attr->dlid != IPATH_PERMISSIVE_LID ?
-               __constant_cpu_to_be32(IPATH_MULTICAST_QPN) :
+               cpu_to_be32(IPATH_MULTICAST_QPN) :
                cpu_to_be32(wqe->wr.wr.ud.remote_qpn);
        ohdr->bth[2] = cpu_to_be32(qp->s_next_psn++ & IPATH_PSN_MASK);
        /*
@@ -573,7 +573,7 @@ void ipath_ud_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
        /* Signal completion event if the solicited bit is set. */
        ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
                       (ohdr->bth[0] &
-                       __constant_cpu_to_be32(1 << 23)) != 0);
+                       cpu_to_be32(1 << 23)) != 0);
 
 bail:;
 }
index 0190edc..855911e 100644 (file)
@@ -209,20 +209,20 @@ void ipath_release_user_pages_on_close(struct page **p, size_t num_pages)
 
        mm = get_task_mm(current);
        if (!mm)
-               goto bail;
+               return;
 
        work = kmalloc(sizeof(*work), GFP_KERNEL);
        if (!work)
                goto bail_mm;
 
-       goto bail;
-
        INIT_WORK(&work->work, user_pages_account);
        work->mm = mm;
        work->num_pages = num_pages;
 
+       schedule_work(&work->work);
+       return;
+
 bail_mm:
        mmput(mm);
-bail:
        return;
 }
index 82d9a0b..7bff4b9 100644 (file)
@@ -667,13 +667,13 @@ static inline __le64 ipath_sdma_make_desc0(struct ipath_devdata *dd,
 
 static inline __le64 ipath_sdma_make_first_desc0(__le64 descq)
 {
-       return descq | __constant_cpu_to_le64(1ULL << 12);
+       return descq | cpu_to_le64(1ULL << 12);
 }
 
 static inline __le64 ipath_sdma_make_last_desc0(__le64 descq)
 {
                                              /* last */  /* dma head */
-       return descq | __constant_cpu_to_le64(1ULL << 11 | 1ULL << 13);
+       return descq | cpu_to_le64(1ULL << 11 | 1ULL << 13);
 }
 
 static inline __le64 ipath_sdma_make_desc1(u64 addr)
@@ -763,7 +763,7 @@ static int ipath_user_sdma_push_pkts(struct ipath_devdata *dd,
                if (ofs >= IPATH_SMALLBUF_DWORDS) {
                        for (i = 0; i < pkt->naddr; i++) {
                                dd->ipath_sdma_descq[dtail].qw[0] |=
-                                       __constant_cpu_to_le64(1ULL << 14);
+                                       cpu_to_le64(1ULL << 14);
                                if (++dtail == dd->ipath_sdma_descq_cnt)
                                        dtail = 0;
                        }
index cdf0e6a..9289ab4 100644 (file)
@@ -1585,7 +1585,7 @@ static int ipath_query_port(struct ib_device *ibdev,
        u64 ibcstat;
 
        memset(props, 0, sizeof(*props));
-       props->lid = lid ? lid : __constant_be16_to_cpu(IB_LID_PERMISSIVE);
+       props->lid = lid ? lid : be16_to_cpu(IB_LID_PERMISSIVE);
        props->lmc = dd->ipath_lmc;
        props->sm_lid = dev->sm_lid;
        props->sm_sl = dev->sm_sl;
index 11e3f61..ae6cff4 100644 (file)
 #define IB_PMA_SAMPLE_STATUS_RUNNING   0x02
 
 /* Mandatory IB performance counter select values. */
-#define IB_PMA_PORT_XMIT_DATA  __constant_htons(0x0001)
-#define IB_PMA_PORT_RCV_DATA   __constant_htons(0x0002)
-#define IB_PMA_PORT_XMIT_PKTS  __constant_htons(0x0003)
-#define IB_PMA_PORT_RCV_PKTS   __constant_htons(0x0004)
-#define IB_PMA_PORT_XMIT_WAIT  __constant_htons(0x0005)
+#define IB_PMA_PORT_XMIT_DATA  cpu_to_be16(0x0001)
+#define IB_PMA_PORT_RCV_DATA   cpu_to_be16(0x0002)
+#define IB_PMA_PORT_XMIT_PKTS  cpu_to_be16(0x0003)
+#define IB_PMA_PORT_RCV_PKTS   cpu_to_be16(0x0004)
+#define IB_PMA_PORT_XMIT_WAIT  cpu_to_be16(0x0005)
 
 struct ib_reth {
        __be64 vaddr;
index 606f1e2..19e68ab 100644 (file)
@@ -147,7 +147,8 @@ static void update_sm_ah(struct mlx4_ib_dev *dev, u8 port_num, u16 lid, u8 sl)
  * Snoop SM MADs for port info and P_Key table sets, so we can
  * synthesize LID change and P_Key change events.
  */
-static void smp_snoop(struct ib_device *ibdev, u8 port_num, struct ib_mad *mad)
+static void smp_snoop(struct ib_device *ibdev, u8 port_num, struct ib_mad *mad,
+                               u16 prev_lid)
 {
        struct ib_event event;
 
@@ -157,6 +158,7 @@ static void smp_snoop(struct ib_device *ibdev, u8 port_num, struct ib_mad *mad)
                if (mad->mad_hdr.attr_id == IB_SMP_ATTR_PORT_INFO) {
                        struct ib_port_info *pinfo =
                                (struct ib_port_info *) ((struct ib_smp *) mad)->data;
+                       u16 lid = be16_to_cpu(pinfo->lid);
 
                        update_sm_ah(to_mdev(ibdev), port_num,
                                     be16_to_cpu(pinfo->sm_lid),
@@ -165,12 +167,15 @@ static void smp_snoop(struct ib_device *ibdev, u8 port_num, struct ib_mad *mad)
                        event.device           = ibdev;
                        event.element.port_num = port_num;
 
-                       if (pinfo->clientrereg_resv_subnetto & 0x80)
+                       if (pinfo->clientrereg_resv_subnetto & 0x80) {
                                event.event    = IB_EVENT_CLIENT_REREGISTER;
-                       else
-                               event.event    = IB_EVENT_LID_CHANGE;
+                               ib_dispatch_event(&event);
+                       }
 
-                       ib_dispatch_event(&event);
+                       if (prev_lid != lid) {
+                               event.event    = IB_EVENT_LID_CHANGE;
+                               ib_dispatch_event(&event);
+                       }
                }
 
                if (mad->mad_hdr.attr_id == IB_SMP_ATTR_PKEY_TABLE) {
@@ -228,8 +233,9 @@ int mlx4_ib_process_mad(struct ib_device *ibdev, int mad_flags,     u8 port_num,
                        struct ib_wc *in_wc, struct ib_grh *in_grh,
                        struct ib_mad *in_mad, struct ib_mad *out_mad)
 {
-       u16 slid;
+       u16 slid, prev_lid = 0;
        int err;
+       struct ib_port_attr pattr;
 
        slid = in_wc ? in_wc->slid : be16_to_cpu(IB_LID_PERMISSIVE);
 
@@ -263,6 +269,13 @@ int mlx4_ib_process_mad(struct ib_device *ibdev, int mad_flags,    u8 port_num,
        } else
                return IB_MAD_RESULT_SUCCESS;
 
+       if ((in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
+            in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) &&
+           in_mad->mad_hdr.method == IB_MGMT_METHOD_SET &&
+           in_mad->mad_hdr.attr_id == IB_SMP_ATTR_PORT_INFO &&
+           !ib_query_port(ibdev, port_num, &pattr))
+               prev_lid = pattr.lid;
+
        err = mlx4_MAD_IFC(to_mdev(ibdev),
                           mad_flags & IB_MAD_IGNORE_MKEY,
                           mad_flags & IB_MAD_IGNORE_BKEY,
@@ -271,7 +284,7 @@ int mlx4_ib_process_mad(struct ib_device *ibdev, int mad_flags,     u8 port_num,
                return IB_MAD_RESULT_FAILURE;
 
        if (!out_mad->mad_hdr.status) {
-               smp_snoop(ibdev, port_num, in_mad);
+               smp_snoop(ibdev, port_num, in_mad, prev_lid);
                node_desc_override(ibdev, out_mad);
        }
 
index 61588bd..2ccb9d3 100644 (file)
@@ -699,11 +699,12 @@ static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr)
        struct mlx4_ib_dev *ibdev = ibdev_ptr;
        int p;
 
+       mlx4_ib_mad_cleanup(ibdev);
+       ib_unregister_device(&ibdev->ib_dev);
+
        for (p = 1; p <= ibdev->num_ports; ++p)
                mlx4_CLOSE_PORT(dev, p);
 
-       mlx4_ib_mad_cleanup(ibdev);
-       ib_unregister_device(&ibdev->ib_dev);
        iounmap(ibdev->uar_map);
        mlx4_uar_free(dev, &ibdev->priv_uar);
        mlx4_pd_free(dev, ibdev->priv_pdn);
index a91cb4c..f385a24 100644 (file)
@@ -71,17 +71,17 @@ enum {
 };
 
 static const __be32 mlx4_ib_opcode[] = {
-       [IB_WR_SEND]                    = __constant_cpu_to_be32(MLX4_OPCODE_SEND),
-       [IB_WR_LSO]                     = __constant_cpu_to_be32(MLX4_OPCODE_LSO),
-       [IB_WR_SEND_WITH_IMM]           = __constant_cpu_to_be32(MLX4_OPCODE_SEND_IMM),
-       [IB_WR_RDMA_WRITE]              = __constant_cpu_to_be32(MLX4_OPCODE_RDMA_WRITE),
-       [IB_WR_RDMA_WRITE_WITH_IMM]     = __constant_cpu_to_be32(MLX4_OPCODE_RDMA_WRITE_IMM),
-       [IB_WR_RDMA_READ]               = __constant_cpu_to_be32(MLX4_OPCODE_RDMA_READ),
-       [IB_WR_ATOMIC_CMP_AND_SWP]      = __constant_cpu_to_be32(MLX4_OPCODE_ATOMIC_CS),
-       [IB_WR_ATOMIC_FETCH_AND_ADD]    = __constant_cpu_to_be32(MLX4_OPCODE_ATOMIC_FA),
-       [IB_WR_SEND_WITH_INV]           = __constant_cpu_to_be32(MLX4_OPCODE_SEND_INVAL),
-       [IB_WR_LOCAL_INV]               = __constant_cpu_to_be32(MLX4_OPCODE_LOCAL_INVAL),
-       [IB_WR_FAST_REG_MR]             = __constant_cpu_to_be32(MLX4_OPCODE_FMR),
+       [IB_WR_SEND]                    = cpu_to_be32(MLX4_OPCODE_SEND),
+       [IB_WR_LSO]                     = cpu_to_be32(MLX4_OPCODE_LSO),
+       [IB_WR_SEND_WITH_IMM]           = cpu_to_be32(MLX4_OPCODE_SEND_IMM),
+       [IB_WR_RDMA_WRITE]              = cpu_to_be32(MLX4_OPCODE_RDMA_WRITE),
+       [IB_WR_RDMA_WRITE_WITH_IMM]     = cpu_to_be32(MLX4_OPCODE_RDMA_WRITE_IMM),
+       [IB_WR_RDMA_READ]               = cpu_to_be32(MLX4_OPCODE_RDMA_READ),
+       [IB_WR_ATOMIC_CMP_AND_SWP]      = cpu_to_be32(MLX4_OPCODE_ATOMIC_CS),
+       [IB_WR_ATOMIC_FETCH_AND_ADD]    = cpu_to_be32(MLX4_OPCODE_ATOMIC_FA),
+       [IB_WR_SEND_WITH_INV]           = cpu_to_be32(MLX4_OPCODE_SEND_INVAL),
+       [IB_WR_LOCAL_INV]               = cpu_to_be32(MLX4_OPCODE_LOCAL_INVAL),
+       [IB_WR_FAST_REG_MR]             = cpu_to_be32(MLX4_OPCODE_FMR),
 };
 
 static struct mlx4_ib_sqp *to_msqp(struct mlx4_ib_qp *mqp)
index 6404495..5648659 100644 (file)
@@ -104,7 +104,8 @@ static void update_sm_ah(struct mthca_dev *dev,
  */
 static void smp_snoop(struct ib_device *ibdev,
                      u8 port_num,
-                     struct ib_mad *mad)
+                     struct ib_mad *mad,
+                     u16 prev_lid)
 {
        struct ib_event event;
 
@@ -114,6 +115,7 @@ static void smp_snoop(struct ib_device *ibdev,
                if (mad->mad_hdr.attr_id == IB_SMP_ATTR_PORT_INFO) {
                        struct ib_port_info *pinfo =
                                (struct ib_port_info *) ((struct ib_smp *) mad)->data;
+                       u16 lid = be16_to_cpu(pinfo->lid);
 
                        mthca_update_rate(to_mdev(ibdev), port_num);
                        update_sm_ah(to_mdev(ibdev), port_num,
@@ -123,12 +125,15 @@ static void smp_snoop(struct ib_device *ibdev,
                        event.device           = ibdev;
                        event.element.port_num = port_num;
 
-                       if (pinfo->clientrereg_resv_subnetto & 0x80)
+                       if (pinfo->clientrereg_resv_subnetto & 0x80) {
                                event.event    = IB_EVENT_CLIENT_REREGISTER;
-                       else
-                               event.event    = IB_EVENT_LID_CHANGE;
+                               ib_dispatch_event(&event);
+                       }
 
-                       ib_dispatch_event(&event);
+                       if (prev_lid != lid) {
+                               event.event    = IB_EVENT_LID_CHANGE;
+                               ib_dispatch_event(&event);
+                       }
                }
 
                if (mad->mad_hdr.attr_id == IB_SMP_ATTR_PKEY_TABLE) {
@@ -196,6 +201,8 @@ int mthca_process_mad(struct ib_device *ibdev,
        int err;
        u8 status;
        u16 slid = in_wc ? in_wc->slid : be16_to_cpu(IB_LID_PERMISSIVE);
+       u16 prev_lid = 0;
+       struct ib_port_attr pattr;
 
        /* Forward locally generated traps to the SM */
        if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP &&
@@ -233,6 +240,12 @@ int mthca_process_mad(struct ib_device *ibdev,
                        return IB_MAD_RESULT_SUCCESS;
        } else
                return IB_MAD_RESULT_SUCCESS;
+       if ((in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
+            in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) &&
+           in_mad->mad_hdr.method == IB_MGMT_METHOD_SET &&
+           in_mad->mad_hdr.attr_id == IB_SMP_ATTR_PORT_INFO &&
+           !ib_query_port(ibdev, port_num, &pattr))
+               prev_lid = pattr.lid;
 
        err = mthca_MAD_IFC(to_mdev(ibdev),
                            mad_flags & IB_MAD_IGNORE_MKEY,
@@ -252,7 +265,7 @@ int mthca_process_mad(struct ib_device *ibdev,
        }
 
        if (!out_mad->mad_hdr.status) {
-               smp_snoop(ibdev, port_num, in_mad);
+               smp_snoop(ibdev, port_num, in_mad, prev_lid);
                node_desc_override(ibdev, out_mad);
        }
 
index 0bd2a4f..353c13b 100644 (file)
@@ -660,8 +660,12 @@ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
 
        path = __path_find(dev, phdr->hwaddr + 4);
        if (!path || !path->valid) {
-               if (!path)
+               int new_path = 0;
+
+               if (!path) {
                        path = path_rec_create(dev, phdr->hwaddr + 4);
+                       new_path = 1;
+               }
                if (path) {
                        /* put pseudoheader back on for next time */
                        skb_push(skb, sizeof *phdr);
@@ -669,7 +673,8 @@ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
 
                        if (!path->query && path_rec_start(dev, path)) {
                                spin_unlock_irqrestore(&priv->lock, flags);
-                               path_free(dev, path);
+                               if (new_path)
+                                       path_free(dev, path);
                                return;
                        } else
                                __path_add(dev, path);
index 319b188..ea9e155 100644 (file)
@@ -401,13 +401,6 @@ static void iser_route_handler(struct rdma_cm_id *cma_id)
        if (ret)
                goto failure;
 
-       iser_dbg("path.mtu is %d setting it to %d\n",
-                cma_id->route.path_rec->mtu, IB_MTU_1024);
-
-       /* we must set the MTU to 1024 as this is what the target is assuming */
-       if (cma_id->route.path_rec->mtu > IB_MTU_1024)
-               cma_id->route.path_rec->mtu = IB_MTU_1024;
-
        memset(&conn_param, 0, sizeof conn_param);
        conn_param.responder_resources = 4;
        conn_param.initiator_depth     = 1;
index a7a97bf..21040a0 100644 (file)
@@ -1,7 +1,7 @@
 obj-$(CONFIG_MLX4_CORE)                += mlx4_core.o
 
 mlx4_core-y := alloc.o catas.o cmd.o cq.o eq.o fw.o icm.o intf.o main.o mcg.o \
-               mr.o pd.o port.o profile.o qp.o reset.o srq.o
+               mr.o pd.o port.o profile.o qp.o reset.o sense.o srq.o
 
 obj-$(CONFIG_MLX4_EN)               += mlx4_en.o
 
index f094ee0..aa9674b 100644 (file)
@@ -42,7 +42,6 @@ enum {
 static DEFINE_SPINLOCK(catas_lock);
 
 static LIST_HEAD(catas_list);
-static struct workqueue_struct *catas_wq;
 static struct work_struct catas_work;
 
 static int internal_err_reset = 1;
@@ -77,7 +76,7 @@ static void poll_catas(unsigned long dev_ptr)
                        list_add(&priv->catas_err.list, &catas_list);
                        spin_unlock(&catas_lock);
 
-                       queue_work(catas_wq, &catas_work);
+                       queue_work(mlx4_wq, &catas_work);
                }
        } else
                mod_timer(&priv->catas_err.timer,
@@ -146,18 +145,7 @@ void mlx4_stop_catas_poll(struct mlx4_dev *dev)
        spin_unlock_irq(&catas_lock);
 }
 
-int __init mlx4_catas_init(void)
+void  __init mlx4_catas_init(void)
 {
        INIT_WORK(&catas_work, catas_reset);
-
-       catas_wq = create_singlethread_workqueue("mlx4_err");
-       if (!catas_wq)
-               return -ENOMEM;
-
-       return 0;
-}
-
-void mlx4_catas_cleanup(void)
-{
-       destroy_workqueue(catas_wq);
 }
index 2c19bff..8830dcb 100644 (file)
@@ -163,6 +163,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
        int cqn;
        int eqes_found = 0;
        int set_ci = 0;
+       int port;
 
        while ((eqe = next_eqe_sw(eq))) {
                /*
@@ -203,11 +204,16 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
                        break;
 
                case MLX4_EVENT_TYPE_PORT_CHANGE:
-                       mlx4_dispatch_event(dev,
-                                           eqe->subtype == MLX4_PORT_CHANGE_SUBTYPE_ACTIVE ?
-                                           MLX4_DEV_EVENT_PORT_UP :
-                                           MLX4_DEV_EVENT_PORT_DOWN,
-                                           be32_to_cpu(eqe->event.port_change.port) >> 28);
+                       port = be32_to_cpu(eqe->event.port_change.port) >> 28;
+                       if (eqe->subtype == MLX4_PORT_CHANGE_SUBTYPE_DOWN) {
+                               mlx4_dispatch_event(dev, MLX4_DEV_EVENT_PORT_DOWN,
+                                                   port);
+                               mlx4_priv(dev)->sense.do_sense_port[port] = 1;
+                       } else {
+                               mlx4_dispatch_event(dev, MLX4_DEV_EVENT_PORT_UP,
+                                                   port);
+                               mlx4_priv(dev)->sense.do_sense_port[port] = 0;
+                       }
                        break;
 
                case MLX4_EVENT_TYPE_CQ_ERROR:
index 6ef2490..a66f5b2 100644 (file)
@@ -51,6 +51,8 @@ MODULE_DESCRIPTION("Mellanox ConnectX HCA low-level driver");
 MODULE_LICENSE("Dual BSD/GPL");
 MODULE_VERSION(DRV_VERSION);
 
+struct workqueue_struct *mlx4_wq;
+
 #ifdef CONFIG_MLX4_DEBUG
 
 int mlx4_debug_level = 0;
@@ -98,24 +100,23 @@ module_param_named(use_prio, use_prio, bool, 0444);
 MODULE_PARM_DESC(use_prio, "Enable steering by VLAN priority on ETH ports "
                  "(0/1, default 0)");
 
-static int mlx4_check_port_params(struct mlx4_dev *dev,
-                                 enum mlx4_port_type *port_type)
+int mlx4_check_port_params(struct mlx4_dev *dev,
+                          enum mlx4_port_type *port_type)
 {
        int i;
 
        for (i = 0; i < dev->caps.num_ports - 1; i++) {
-               if (port_type[i] != port_type[i+1] &&
-                   !(dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP)) {
-                       mlx4_err(dev, "Only same port types supported "
-                                "on this HCA, aborting.\n");
-                       return -EINVAL;
+               if (port_type[i] != port_type[i + 1]) {
+                       if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP)) {
+                               mlx4_err(dev, "Only same port types supported "
+                                        "on this HCA, aborting.\n");
+                               return -EINVAL;
+                       }
+                       if (port_type[i] == MLX4_PORT_TYPE_ETH &&
+                           port_type[i + 1] == MLX4_PORT_TYPE_IB)
+                               return -EINVAL;
                }
        }
-       if ((port_type[0] == MLX4_PORT_TYPE_ETH) &&
-           (port_type[1] == MLX4_PORT_TYPE_IB)) {
-               mlx4_err(dev, "eth-ib configuration is not supported.\n");
-               return -EINVAL;
-       }
 
        for (i = 0; i < dev->caps.num_ports; i++) {
                if (!(port_type[i] & dev->caps.supported_type[i+1])) {
@@ -225,6 +226,9 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
                        dev->caps.port_type[i] = MLX4_PORT_TYPE_IB;
                else
                        dev->caps.port_type[i] = MLX4_PORT_TYPE_ETH;
+               dev->caps.possible_type[i] = dev->caps.port_type[i];
+               mlx4_priv(dev)->sense.sense_allowed[i] =
+                       dev->caps.supported_type[i] == MLX4_PORT_TYPE_AUTO;
 
                if (dev->caps.log_num_macs > dev_cap->log_max_macs[i]) {
                        dev->caps.log_num_macs = dev_cap->log_max_macs[i];
@@ -263,14 +267,16 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
  * Change the port configuration of the device.
  * Every user of this function must hold the port mutex.
  */
-static int mlx4_change_port_types(struct mlx4_dev *dev,
-                                 enum mlx4_port_type *port_types)
+int mlx4_change_port_types(struct mlx4_dev *dev,
+                          enum mlx4_port_type *port_types)
 {
        int err = 0;
        int change = 0;
        int port;
 
        for (port = 0; port <  dev->caps.num_ports; port++) {
+               /* Change the port type only if the new type is different
+                * from the current, and not set to Auto */
                if (port_types[port] != dev->caps.port_type[port + 1]) {
                        change = 1;
                        dev->caps.port_type[port + 1] = port_types[port];
@@ -302,10 +308,17 @@ static ssize_t show_port_type(struct device *dev,
        struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info,
                                                   port_attr);
        struct mlx4_dev *mdev = info->dev;
+       char type[8];
+
+       sprintf(type, "%s",
+               (mdev->caps.port_type[info->port] == MLX4_PORT_TYPE_IB) ?
+               "ib" : "eth");
+       if (mdev->caps.possible_type[info->port] == MLX4_PORT_TYPE_AUTO)
+               sprintf(buf, "auto (%s)\n", type);
+       else
+               sprintf(buf, "%s\n", type);
 
-       return sprintf(buf, "%s\n",
-                      mdev->caps.port_type[info->port] == MLX4_PORT_TYPE_IB ?
-                      "ib" : "eth");
+       return strlen(buf);
 }
 
 static ssize_t set_port_type(struct device *dev,
@@ -317,6 +330,7 @@ static ssize_t set_port_type(struct device *dev,
        struct mlx4_dev *mdev = info->dev;
        struct mlx4_priv *priv = mlx4_priv(mdev);
        enum mlx4_port_type types[MLX4_MAX_PORTS];
+       enum mlx4_port_type new_types[MLX4_MAX_PORTS];
        int i;
        int err = 0;
 
@@ -324,26 +338,56 @@ static ssize_t set_port_type(struct device *dev,
                info->tmp_type = MLX4_PORT_TYPE_IB;
        else if (!strcmp(buf, "eth\n"))
                info->tmp_type = MLX4_PORT_TYPE_ETH;
+       else if (!strcmp(buf, "auto\n"))
+               info->tmp_type = MLX4_PORT_TYPE_AUTO;
        else {
                mlx4_err(mdev, "%s is not supported port type\n", buf);
                return -EINVAL;
        }
 
+       mlx4_stop_sense(mdev);
        mutex_lock(&priv->port_mutex);
-       for (i = 0; i < mdev->caps.num_ports; i++)
+       /* Possible type is always the one that was delivered */
+       mdev->caps.possible_type[info->port] = info->tmp_type;
+
+       for (i = 0; i < mdev->caps.num_ports; i++) {
                types[i] = priv->port[i+1].tmp_type ? priv->port[i+1].tmp_type :
-                                       mdev->caps.port_type[i+1];
+                                       mdev->caps.possible_type[i+1];
+               if (types[i] == MLX4_PORT_TYPE_AUTO)
+                       types[i] = mdev->caps.port_type[i+1];
+       }
 
-       err = mlx4_check_port_params(mdev, types);
+       if (!(mdev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP)) {
+               for (i = 1; i <= mdev->caps.num_ports; i++) {
+                       if (mdev->caps.possible_type[i] == MLX4_PORT_TYPE_AUTO) {
+                               mdev->caps.possible_type[i] = mdev->caps.port_type[i];
+                               err = -EINVAL;
+                       }
+               }
+       }
+       if (err) {
+               mlx4_err(mdev, "Auto sensing is not supported on this HCA. "
+                              "Set only 'eth' or 'ib' for both ports "
+                              "(should be the same)\n");
+               goto out;
+       }
+
+       mlx4_do_sense_ports(mdev, new_types, types);
+
+       err = mlx4_check_port_params(mdev, new_types);
        if (err)
                goto out;
 
-       for (i = 1; i <= mdev->caps.num_ports; i++)
-               priv->port[i].tmp_type = 0;
+       /* We are about to apply the changes after the configuration
+        * was verified, no need to remember the temporary types
+        * any more */
+       for (i = 0; i < mdev->caps.num_ports; i++)
+               priv->port[i + 1].tmp_type = 0;
 
-       err = mlx4_change_port_types(mdev, types);
+       err = mlx4_change_port_types(mdev, new_types);
 
 out:
+       mlx4_start_sense(mdev);
        mutex_unlock(&priv->port_mutex);
        return err ? err : count;
 }
@@ -1117,6 +1161,9 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
        if (err)
                goto err_port;
 
+       mlx4_sense_init(dev);
+       mlx4_start_sense(dev);
+
        pci_set_drvdata(pdev, dev);
 
        return 0;
@@ -1182,6 +1229,7 @@ static void mlx4_remove_one(struct pci_dev *pdev)
        int p;
 
        if (dev) {
+               mlx4_stop_sense(dev);
                mlx4_unregister_device(dev);
 
                for (p = 1; p <= dev->caps.num_ports; p++) {
@@ -1230,6 +1278,8 @@ static struct pci_device_id mlx4_pci_table[] = {
        { PCI_VDEVICE(MELLANOX, 0x673c) }, /* MT25408 "Hermon" QDR PCIe gen2 */
        { PCI_VDEVICE(MELLANOX, 0x6368) }, /* MT25408 "Hermon" EN 10GigE */
        { PCI_VDEVICE(MELLANOX, 0x6750) }, /* MT25408 "Hermon" EN 10GigE PCIe gen2 */
+       { PCI_VDEVICE(MELLANOX, 0x6372) }, /* MT25458 ConnectX EN 10GBASE-T 10GigE */
+       { PCI_VDEVICE(MELLANOX, 0x675a) }, /* MT25458 ConnectX EN 10GBASE-T+Gen2 10GigE */
        { 0, }
 };
 
@@ -1264,9 +1314,11 @@ static int __init mlx4_init(void)
        if (mlx4_verify_params())
                return -EINVAL;
 
-       ret = mlx4_catas_init();
-       if (ret)
-               return ret;
+       mlx4_catas_init();
+
+       mlx4_wq = create_singlethread_workqueue("mlx4");
+       if (!mlx4_wq)
+               return -ENOMEM;
 
        ret = pci_register_driver(&mlx4_driver);
        return ret < 0 ? ret : 0;
@@ -1275,7 +1327,7 @@ static int __init mlx4_init(void)
 static void __exit mlx4_cleanup(void)
 {
        pci_unregister_driver(&mlx4_driver);
-       mlx4_catas_cleanup();
+       destroy_workqueue(mlx4_wq);
 }
 
 module_init(mlx4_init);
index e0213ba..5bd79c2 100644 (file)
@@ -40,6 +40,7 @@
 #include <linux/mutex.h>
 #include <linux/radix-tree.h>
 #include <linux/timer.h>
+#include <linux/workqueue.h>
 
 #include <linux/mlx4/device.h>
 #include <linux/mlx4/driver.h>
@@ -276,6 +277,13 @@ struct mlx4_port_info {
        struct mlx4_vlan_table  vlan_table;
 };
 
+struct mlx4_sense {
+       struct mlx4_dev         *dev;
+       u8                      do_sense_port[MLX4_MAX_PORTS + 1];
+       u8                      sense_allowed[MLX4_MAX_PORTS + 1];
+       struct delayed_work     sense_poll;
+};
+
 struct mlx4_priv {
        struct mlx4_dev         dev;
 
@@ -305,6 +313,7 @@ struct mlx4_priv {
        struct mlx4_uar         driver_uar;
        void __iomem           *kar;
        struct mlx4_port_info   port[MLX4_MAX_PORTS + 1];
+       struct mlx4_sense       sense;
        struct mutex            port_mutex;
 };
 
@@ -313,6 +322,10 @@ static inline struct mlx4_priv *mlx4_priv(struct mlx4_dev *dev)
        return container_of(dev, struct mlx4_priv, dev);
 }
 
+#define MLX4_SENSE_RANGE       (HZ * 3)
+
+extern struct workqueue_struct *mlx4_wq;
+
 u32 mlx4_bitmap_alloc(struct mlx4_bitmap *bitmap);
 void mlx4_bitmap_free(struct mlx4_bitmap *bitmap, u32 obj);
 u32 mlx4_bitmap_alloc_range(struct mlx4_bitmap *bitmap, int cnt, int align);
@@ -346,8 +359,7 @@ void mlx4_cleanup_mcg_table(struct mlx4_dev *dev);
 
 void mlx4_start_catas_poll(struct mlx4_dev *dev);
 void mlx4_stop_catas_poll(struct mlx4_dev *dev);
-int mlx4_catas_init(void);
-void mlx4_catas_cleanup(void);
+void mlx4_catas_init(void);
 int mlx4_restart_one(struct pci_dev *pdev);
 int mlx4_register_device(struct mlx4_dev *dev);
 void mlx4_unregister_device(struct mlx4_dev *dev);
@@ -379,6 +391,17 @@ void mlx4_srq_event(struct mlx4_dev *dev, u32 srqn, int event_type);
 
 void mlx4_handle_catas_err(struct mlx4_dev *dev);
 
+void mlx4_do_sense_ports(struct mlx4_dev *dev,
+                        enum mlx4_port_type *stype,
+                        enum mlx4_port_type *defaults);
+void mlx4_start_sense(struct mlx4_dev *dev);
+void mlx4_stop_sense(struct mlx4_dev *dev);
+void mlx4_sense_init(struct mlx4_dev *dev);
+int mlx4_check_port_params(struct mlx4_dev *dev,
+                          enum mlx4_port_type *port_type);
+int mlx4_change_port_types(struct mlx4_dev *dev,
+                          enum mlx4_port_type *port_types);
+
 void mlx4_init_mac_table(struct mlx4_dev *dev, struct mlx4_mac_table *table);
 void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table);
 
index 0a057e5..7cce334 100644 (file)
@@ -298,20 +298,17 @@ int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port)
 {
        struct mlx4_cmd_mailbox *mailbox;
        int err;
-       u8 is_eth = dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH;
 
        mailbox = mlx4_alloc_cmd_mailbox(dev);
        if (IS_ERR(mailbox))
                return PTR_ERR(mailbox);
 
        memset(mailbox->buf, 0, 256);
-       if (is_eth) {
-               ((u8 *) mailbox->buf)[3] = 6;
-               ((__be16 *) mailbox->buf)[4] = cpu_to_be16(1 << 15);
-               ((__be16 *) mailbox->buf)[6] = cpu_to_be16(1 << 15);
-       } else
-               ((__be32 *) mailbox->buf)[1] = dev->caps.ib_port_def_cap[port];
-       err = mlx4_cmd(dev, mailbox->dma, port, is_eth, MLX4_CMD_SET_PORT,
+       if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH)
+               return 0;
+
+       ((__be32 *) mailbox->buf)[1] = dev->caps.ib_port_def_cap[port];
+       err = mlx4_cmd(dev, mailbox->dma, port, 0, MLX4_CMD_SET_PORT,
                       MLX4_CMD_TIME_CLASS_B);
 
        mlx4_free_cmd_mailbox(dev, mailbox);
diff --git a/drivers/net/mlx4/sense.c b/drivers/net/mlx4/sense.c
new file mode 100644 (file)
index 0000000..6d5089e
--- /dev/null
@@ -0,0 +1,156 @@
+/*
+ * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include <linux/errno.h>
+#include <linux/if_ether.h>
+
+#include <linux/mlx4/cmd.h>
+
+#include "mlx4.h"
+
+static int mlx4_SENSE_PORT(struct mlx4_dev *dev, int port,
+                          enum mlx4_port_type *type)
+{
+       u64 out_param;
+       int err = 0;
+
+       err = mlx4_cmd_imm(dev, 0, &out_param, port, 0,
+                          MLX4_CMD_SENSE_PORT, MLX4_CMD_TIME_CLASS_B);
+       if (err) {
+               mlx4_err(dev, "Sense command failed for port: %d\n", port);
+               return err;
+       }
+
+       if (out_param > 2) {
+               mlx4_err(dev, "Sense returned illegal value: 0x%llx\n", out_param);
+               return EINVAL;
+       }
+
+       *type = out_param;
+       return 0;
+}
+
+void mlx4_do_sense_ports(struct mlx4_dev *dev,
+                        enum mlx4_port_type *stype,
+                        enum mlx4_port_type *defaults)
+{
+       struct mlx4_sense *sense = &mlx4_priv(dev)->sense;
+       int err;
+       int i;
+
+       for (i = 1; i <= dev->caps.num_ports; i++) {
+               stype[i - 1] = 0;
+               if (sense->do_sense_port[i] && sense->sense_allowed[i] &&
+                   dev->caps.possible_type[i] == MLX4_PORT_TYPE_AUTO) {
+                       err = mlx4_SENSE_PORT(dev, i, &stype[i - 1]);
+                       if (err)
+                               stype[i - 1] = defaults[i - 1];
+               } else
+                       stype[i - 1] = defaults[i - 1];
+       }
+
+       /*
+        * Adjust port configuration:
+        * If port 1 sensed nothing and port 2 is IB, set both as IB
+        * If port 2 sensed nothing and port 1 is Eth, set both as Eth
+        */
+       if (stype[0] == MLX4_PORT_TYPE_ETH) {
+               for (i = 1; i < dev->caps.num_ports; i++)
+                       stype[i] = stype[i] ? stype[i] : MLX4_PORT_TYPE_ETH;
+       }
+       if (stype[dev->caps.num_ports - 1] == MLX4_PORT_TYPE_IB) {
+               for (i = 0; i < dev->caps.num_ports - 1; i++)
+                       stype[i] = stype[i] ? stype[i] : MLX4_PORT_TYPE_IB;
+       }
+
+       /*
+        * If sensed nothing, remain in current configuration.
+        */
+       for (i = 0; i < dev->caps.num_ports; i++)
+               stype[i] = stype[i] ? stype[i] : defaults[i];
+
+}
+
+static void mlx4_sense_port(struct work_struct *work)
+{
+       struct delayed_work *delay = container_of(work, struct delayed_work, work);
+       struct mlx4_sense *sense = container_of(delay, struct mlx4_sense,
+                                               sense_poll);
+       struct mlx4_dev *dev = sense->dev;
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       enum mlx4_port_type stype[MLX4_MAX_PORTS];
+
+       mutex_lock(&priv->port_mutex);
+       mlx4_do_sense_ports(dev, stype, &dev->caps.port_type[1]);
+
+       if (mlx4_check_port_params(dev, stype))
+               goto sense_again;
+
+       if (mlx4_change_port_types(dev, stype))
+               mlx4_err(dev, "Failed to change port_types\n");
+
+sense_again:
+       mutex_unlock(&priv->port_mutex);
+       queue_delayed_work(mlx4_wq , &sense->sense_poll,
+                          round_jiffies_relative(MLX4_SENSE_RANGE));
+}
+
+void mlx4_start_sense(struct mlx4_dev *dev)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       struct mlx4_sense *sense = &priv->sense;
+
+       if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP))
+               return;
+
+       queue_delayed_work(mlx4_wq , &sense->sense_poll,
+                          round_jiffies_relative(MLX4_SENSE_RANGE));
+}
+
+void mlx4_stop_sense(struct mlx4_dev *dev)
+{
+       cancel_delayed_work_sync(&mlx4_priv(dev)->sense.sense_poll);
+}
+
+void  mlx4_sense_init(struct mlx4_dev *dev)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       struct mlx4_sense *sense = &priv->sense;
+       int port;
+
+       sense->dev = dev;
+       for (port = 1; port <= dev->caps.num_ports; port++)
+               sense->do_sense_port[port] = 1;
+
+       INIT_DELAYED_WORK_DEFERRABLE(&sense->sense_poll, mlx4_sense_port);
+}
index cf9c679..0f82293 100644 (file)
@@ -55,6 +55,7 @@ enum {
        MLX4_CMD_CLOSE_PORT      = 0xa,
        MLX4_CMD_QUERY_HCA       = 0xb,
        MLX4_CMD_QUERY_PORT      = 0x43,
+       MLX4_CMD_SENSE_PORT      = 0x4d,
        MLX4_CMD_SET_PORT        = 0xc,
        MLX4_CMD_ACCESS_DDR      = 0x2e,
        MLX4_CMD_MAP_ICM         = 0xffa,
index 8f659cc..3aff8a6 100644 (file)
@@ -155,8 +155,9 @@ enum mlx4_qp_region {
 };
 
 enum mlx4_port_type {
-       MLX4_PORT_TYPE_IB       = 1 << 0,
-       MLX4_PORT_TYPE_ETH      = 1 << 1,
+       MLX4_PORT_TYPE_IB       = 1,
+       MLX4_PORT_TYPE_ETH      = 2,
+       MLX4_PORT_TYPE_AUTO     = 3
 };
 
 enum mlx4_special_vlan_idx {
@@ -237,6 +238,7 @@ struct mlx4_caps {
        enum mlx4_port_type     port_type[MLX4_MAX_PORTS + 1];
        u8                      supported_type[MLX4_MAX_PORTS + 1];
        u32                     port_mask;
+       enum mlx4_port_type     possible_type[MLX4_MAX_PORTS + 1];
 };
 
 struct mlx4_buf_list {
index ec7c6d9..9388583 100644 (file)
@@ -314,12 +314,12 @@ struct ib_cm_id *ib_create_cm_id(struct ib_device *device,
  */
 void ib_destroy_cm_id(struct ib_cm_id *cm_id);
 
-#define IB_SERVICE_ID_AGN_MASK __constant_cpu_to_be64(0xFF00000000000000ULL)
-#define IB_CM_ASSIGN_SERVICE_ID __constant_cpu_to_be64(0x0200000000000000ULL)
-#define IB_CMA_SERVICE_ID      __constant_cpu_to_be64(0x0000000001000000ULL)
-#define IB_CMA_SERVICE_ID_MASK __constant_cpu_to_be64(0xFFFFFFFFFF000000ULL)
-#define IB_SDP_SERVICE_ID      __constant_cpu_to_be64(0x0000000000010000ULL)
-#define IB_SDP_SERVICE_ID_MASK __constant_cpu_to_be64(0xFFFFFFFFFFFF0000ULL)
+#define IB_SERVICE_ID_AGN_MASK cpu_to_be64(0xFF00000000000000ULL)
+#define IB_CM_ASSIGN_SERVICE_ID        cpu_to_be64(0x0200000000000000ULL)
+#define IB_CMA_SERVICE_ID      cpu_to_be64(0x0000000001000000ULL)
+#define IB_CMA_SERVICE_ID_MASK cpu_to_be64(0xFFFFFFFFFF000000ULL)
+#define IB_SDP_SERVICE_ID      cpu_to_be64(0x0000000000010000ULL)
+#define IB_SDP_SERVICE_ID_MASK cpu_to_be64(0xFFFFFFFFFFFF0000ULL)
 
 struct ib_cm_compare_data {
        u8  data[IB_CM_COMPARE_SIZE];
index 5f6c40f..d3b9401 100644 (file)
 #define        IB_MGMT_RMPP_STATUS_ABORT_MAX           127
 
 #define IB_QP0         0
-#define IB_QP1         __constant_htonl(1)
+#define IB_QP1         cpu_to_be32(1)
 #define IB_QP1_QKEY    0x80010000
 #define IB_QP_SET_QKEY 0x80000000
 
@@ -290,7 +290,7 @@ static inline void ib_set_rmpp_resptime(struct ib_rmpp_hdr *rmpp_hdr, u8 rtime)
  */
 static inline void ib_set_rmpp_flags(struct ib_rmpp_hdr *rmpp_hdr, u8 flags)
 {
-       rmpp_hdr->rmpp_rtime_flags = (rmpp_hdr->rmpp_rtime_flags & 0xF1) |
+       rmpp_hdr->rmpp_rtime_flags = (rmpp_hdr->rmpp_rtime_flags & 0xF8) |
                                     (flags & 0x7);
 }
 
index aaca087..98b9086 100644 (file)
@@ -63,25 +63,25 @@ struct ib_smp {
        u8      return_path[IB_SMP_MAX_PATH_HOPS];
 } __attribute__ ((packed));
 
-#define IB_SMP_DIRECTION                       __constant_htons(0x8000)
+#define IB_SMP_DIRECTION                       cpu_to_be16(0x8000)
 
 /* Subnet management attributes */
-#define IB_SMP_ATTR_NOTICE                     __constant_htons(0x0002)
-#define IB_SMP_ATTR_NODE_DESC                  __constant_htons(0x0010)
-#define IB_SMP_ATTR_NODE_INFO                  __constant_htons(0x0011)
-#define IB_SMP_ATTR_SWITCH_INFO                        __constant_htons(0x0012)
-#define IB_SMP_ATTR_GUID_INFO                  __constant_htons(0x0014)
-#define IB_SMP_ATTR_PORT_INFO                  __constant_htons(0x0015)
-#define IB_SMP_ATTR_PKEY_TABLE                 __constant_htons(0x0016)
-#define IB_SMP_ATTR_SL_TO_VL_TABLE             __constant_htons(0x0017)
-#define IB_SMP_ATTR_VL_ARB_TABLE               __constant_htons(0x0018)
-#define IB_SMP_ATTR_LINEAR_FORWARD_TABLE       __constant_htons(0x0019)
-#define IB_SMP_ATTR_RANDOM_FORWARD_TABLE       __constant_htons(0x001A)
-#define IB_SMP_ATTR_MCAST_FORWARD_TABLE                __constant_htons(0x001B)
-#define IB_SMP_ATTR_SM_INFO                    __constant_htons(0x0020)
-#define IB_SMP_ATTR_VENDOR_DIAG                        __constant_htons(0x0030)
-#define IB_SMP_ATTR_LED_INFO                   __constant_htons(0x0031)
-#define IB_SMP_ATTR_VENDOR_MASK                        __constant_htons(0xFF00)
+#define IB_SMP_ATTR_NOTICE                     cpu_to_be16(0x0002)
+#define IB_SMP_ATTR_NODE_DESC                  cpu_to_be16(0x0010)
+#define IB_SMP_ATTR_NODE_INFO                  cpu_to_be16(0x0011)
+#define IB_SMP_ATTR_SWITCH_INFO                        cpu_to_be16(0x0012)
+#define IB_SMP_ATTR_GUID_INFO                  cpu_to_be16(0x0014)
+#define IB_SMP_ATTR_PORT_INFO                  cpu_to_be16(0x0015)
+#define IB_SMP_ATTR_PKEY_TABLE                 cpu_to_be16(0x0016)
+#define IB_SMP_ATTR_SL_TO_VL_TABLE             cpu_to_be16(0x0017)
+#define IB_SMP_ATTR_VL_ARB_TABLE               cpu_to_be16(0x0018)
+#define IB_SMP_ATTR_LINEAR_FORWARD_TABLE       cpu_to_be16(0x0019)
+#define IB_SMP_ATTR_RANDOM_FORWARD_TABLE       cpu_to_be16(0x001A)
+#define IB_SMP_ATTR_MCAST_FORWARD_TABLE                cpu_to_be16(0x001B)
+#define IB_SMP_ATTR_SM_INFO                    cpu_to_be16(0x0020)
+#define IB_SMP_ATTR_VENDOR_DIAG                        cpu_to_be16(0x0030)
+#define IB_SMP_ATTR_LED_INFO                   cpu_to_be16(0x0031)
+#define IB_SMP_ATTR_VENDOR_MASK                        cpu_to_be16(0xFF00)
 
 struct ib_port_info {
        __be64 mkey;