2 * Copyright (c) 2005 Voltaire Inc. All rights reserved.
3 * Copyright (c) 2002-2005, Network Appliance, Inc. All rights reserved.
4 * Copyright (c) 1999-2005, Mellanox Technologies, Inc. All rights reserved.
5 * Copyright (c) 2005-2006 Intel Corporation. All rights reserved.
7 * This Software is licensed under one of the following licenses:
9 * 1) under the terms of the "Common Public License 1.0" a copy of which is
10 * available from the Open Source Initiative, see
11 * http://www.opensource.org/licenses/cpl.php.
13 * 2) under the terms of the "The BSD License" a copy of which is
14 * available from the Open Source Initiative, see
15 * http://www.opensource.org/licenses/bsd-license.php.
17 * 3) under the terms of the "GNU General Public License (GPL) Version 2" a
18 * copy of which is available from the Open Source Initiative, see
19 * http://www.opensource.org/licenses/gpl-license.php.
21 * Licensee has the right to choose one of the above licenses.
23 * Redistributions of source code must retain the above copyright
24 * notice and one of the license notices.
26 * Redistributions in binary form must reproduce both the above copyright
27 * notice, one of the license notices in the documentation
28 * and/or other materials provided with the distribution.
32 #include <linux/completion.h>
34 #include <linux/in6.h>
35 #include <linux/mutex.h>
36 #include <linux/random.h>
37 #include <linux/idr.h>
38 #include <linux/inetdevice.h>
42 #include <rdma/rdma_cm.h>
43 #include <rdma/rdma_cm_ib.h>
44 #include <rdma/ib_cache.h>
45 #include <rdma/ib_cm.h>
46 #include <rdma/ib_sa.h>
47 #include <rdma/iw_cm.h>
49 MODULE_AUTHOR("Sean Hefty");
50 MODULE_DESCRIPTION("Generic RDMA CM Agent");
51 MODULE_LICENSE("Dual BSD/GPL");
53 #define CMA_CM_RESPONSE_TIMEOUT 20
54 #define CMA_MAX_CM_RETRIES 15
56 static void cma_add_one(struct ib_device *device);
57 static void cma_remove_one(struct ib_device *device);
59 static struct ib_client cma_client = {
62 .remove = cma_remove_one
65 static struct ib_sa_client sa_client;
66 static struct rdma_addr_client addr_client;
67 static LIST_HEAD(dev_list);
68 static LIST_HEAD(listen_any_list);
69 static DEFINE_MUTEX(lock);
70 static struct workqueue_struct *cma_wq;
71 static DEFINE_IDR(sdp_ps);
72 static DEFINE_IDR(tcp_ps);
75 struct list_head list;
76 struct ib_device *device;
78 struct completion comp;
80 struct list_head id_list;
97 struct rdma_bind_list {
99 struct hlist_head owners;
104 * Device removal can occur at anytime, so we need extra handling to
105 * serialize notifying the user of device removal with other callbacks.
106 * We do this by disabling removal notification while a callback is in process,
107 * and reporting it after the callback completes.
109 struct rdma_id_private {
110 struct rdma_cm_id id;
112 struct rdma_bind_list *bind_list;
113 struct hlist_node node;
114 struct list_head list;
115 struct list_head listen_list;
116 struct cma_device *cma_dev;
118 enum cma_state state;
120 struct completion comp;
122 wait_queue_head_t wait_remove;
127 struct ib_sa_query *query;
140 struct work_struct work;
141 struct rdma_id_private *id;
142 enum cma_state old_state;
143 enum cma_state new_state;
144 struct rdma_cm_event event;
157 u8 ip_version; /* IP version: 7:4 */
159 union cma_ip_addr src_addr;
160 union cma_ip_addr dst_addr;
165 u8 sdp_version; /* Major version: 7:4 */
166 u8 ip_version; /* IP version: 7:4 */
167 u8 sdp_specific1[10];
170 union cma_ip_addr src_addr;
171 union cma_ip_addr dst_addr;
179 #define CMA_VERSION 0x00
180 #define SDP_MAJ_VERSION 0x2
182 static int cma_comp(struct rdma_id_private *id_priv, enum cma_state comp)
187 spin_lock_irqsave(&id_priv->lock, flags);
188 ret = (id_priv->state == comp);
189 spin_unlock_irqrestore(&id_priv->lock, flags);
193 static int cma_comp_exch(struct rdma_id_private *id_priv,
194 enum cma_state comp, enum cma_state exch)
199 spin_lock_irqsave(&id_priv->lock, flags);
200 if ((ret = (id_priv->state == comp)))
201 id_priv->state = exch;
202 spin_unlock_irqrestore(&id_priv->lock, flags);
206 static enum cma_state cma_exch(struct rdma_id_private *id_priv,
212 spin_lock_irqsave(&id_priv->lock, flags);
213 old = id_priv->state;
214 id_priv->state = exch;
215 spin_unlock_irqrestore(&id_priv->lock, flags);
219 static inline u8 cma_get_ip_ver(struct cma_hdr *hdr)
221 return hdr->ip_version >> 4;
224 static inline void cma_set_ip_ver(struct cma_hdr *hdr, u8 ip_ver)
226 hdr->ip_version = (ip_ver << 4) | (hdr->ip_version & 0xF);
229 static inline u8 sdp_get_majv(u8 sdp_version)
231 return sdp_version >> 4;
234 static inline u8 sdp_get_ip_ver(struct sdp_hh *hh)
236 return hh->ip_version >> 4;
239 static inline void sdp_set_ip_ver(struct sdp_hh *hh, u8 ip_ver)
241 hh->ip_version = (ip_ver << 4) | (hh->ip_version & 0xF);
244 static void cma_attach_to_dev(struct rdma_id_private *id_priv,
245 struct cma_device *cma_dev)
247 atomic_inc(&cma_dev->refcount);
248 id_priv->cma_dev = cma_dev;
249 id_priv->id.device = cma_dev->device;
250 list_add_tail(&id_priv->list, &cma_dev->id_list);
253 static inline void cma_deref_dev(struct cma_device *cma_dev)
255 if (atomic_dec_and_test(&cma_dev->refcount))
256 complete(&cma_dev->comp);
259 static void cma_detach_from_dev(struct rdma_id_private *id_priv)
261 list_del(&id_priv->list);
262 cma_deref_dev(id_priv->cma_dev);
263 id_priv->cma_dev = NULL;
266 static int cma_acquire_dev(struct rdma_id_private *id_priv)
268 enum rdma_node_type dev_type = id_priv->id.route.addr.dev_addr.dev_type;
269 struct cma_device *cma_dev;
273 switch (rdma_node_get_transport(dev_type)) {
274 case RDMA_TRANSPORT_IB:
275 ib_addr_get_sgid(&id_priv->id.route.addr.dev_addr, &gid);
277 case RDMA_TRANSPORT_IWARP:
278 iw_addr_get_sgid(&id_priv->id.route.addr.dev_addr, &gid);
284 list_for_each_entry(cma_dev, &dev_list, list) {
285 ret = ib_find_cached_gid(cma_dev->device, &gid,
286 &id_priv->id.port_num, NULL);
288 cma_attach_to_dev(id_priv, cma_dev);
295 static void cma_deref_id(struct rdma_id_private *id_priv)
297 if (atomic_dec_and_test(&id_priv->refcount))
298 complete(&id_priv->comp);
301 static void cma_release_remove(struct rdma_id_private *id_priv)
303 if (atomic_dec_and_test(&id_priv->dev_remove))
304 wake_up(&id_priv->wait_remove);
307 struct rdma_cm_id *rdma_create_id(rdma_cm_event_handler event_handler,
308 void *context, enum rdma_port_space ps)
310 struct rdma_id_private *id_priv;
312 id_priv = kzalloc(sizeof *id_priv, GFP_KERNEL);
314 return ERR_PTR(-ENOMEM);
316 id_priv->state = CMA_IDLE;
317 id_priv->id.context = context;
318 id_priv->id.event_handler = event_handler;
320 spin_lock_init(&id_priv->lock);
321 init_completion(&id_priv->comp);
322 atomic_set(&id_priv->refcount, 1);
323 init_waitqueue_head(&id_priv->wait_remove);
324 atomic_set(&id_priv->dev_remove, 0);
325 INIT_LIST_HEAD(&id_priv->listen_list);
326 get_random_bytes(&id_priv->seq_num, sizeof id_priv->seq_num);
330 EXPORT_SYMBOL(rdma_create_id);
332 static int cma_init_ib_qp(struct rdma_id_private *id_priv, struct ib_qp *qp)
334 struct ib_qp_attr qp_attr;
335 struct rdma_dev_addr *dev_addr;
338 dev_addr = &id_priv->id.route.addr.dev_addr;
339 ret = ib_find_cached_pkey(id_priv->id.device, id_priv->id.port_num,
340 ib_addr_get_pkey(dev_addr),
341 &qp_attr.pkey_index);
345 qp_attr.qp_state = IB_QPS_INIT;
346 qp_attr.qp_access_flags = 0;
347 qp_attr.port_num = id_priv->id.port_num;
348 return ib_modify_qp(qp, &qp_attr, IB_QP_STATE | IB_QP_ACCESS_FLAGS |
349 IB_QP_PKEY_INDEX | IB_QP_PORT);
352 static int cma_init_iw_qp(struct rdma_id_private *id_priv, struct ib_qp *qp)
354 struct ib_qp_attr qp_attr;
356 qp_attr.qp_state = IB_QPS_INIT;
357 qp_attr.qp_access_flags = IB_ACCESS_LOCAL_WRITE;
359 return ib_modify_qp(qp, &qp_attr, IB_QP_STATE | IB_QP_ACCESS_FLAGS);
362 int rdma_create_qp(struct rdma_cm_id *id, struct ib_pd *pd,
363 struct ib_qp_init_attr *qp_init_attr)
365 struct rdma_id_private *id_priv;
369 id_priv = container_of(id, struct rdma_id_private, id);
370 if (id->device != pd->device)
373 qp = ib_create_qp(pd, qp_init_attr);
377 switch (rdma_node_get_transport(id->device->node_type)) {
378 case RDMA_TRANSPORT_IB:
379 ret = cma_init_ib_qp(id_priv, qp);
381 case RDMA_TRANSPORT_IWARP:
382 ret = cma_init_iw_qp(id_priv, qp);
393 id_priv->qp_num = qp->qp_num;
394 id_priv->srq = (qp->srq != NULL);
400 EXPORT_SYMBOL(rdma_create_qp);
402 void rdma_destroy_qp(struct rdma_cm_id *id)
404 ib_destroy_qp(id->qp);
406 EXPORT_SYMBOL(rdma_destroy_qp);
408 static int cma_modify_qp_rtr(struct rdma_cm_id *id)
410 struct ib_qp_attr qp_attr;
411 int qp_attr_mask, ret;
416 /* Need to update QP attributes from default values. */
417 qp_attr.qp_state = IB_QPS_INIT;
418 ret = rdma_init_qp_attr(id, &qp_attr, &qp_attr_mask);
422 ret = ib_modify_qp(id->qp, &qp_attr, qp_attr_mask);
426 qp_attr.qp_state = IB_QPS_RTR;
427 ret = rdma_init_qp_attr(id, &qp_attr, &qp_attr_mask);
431 return ib_modify_qp(id->qp, &qp_attr, qp_attr_mask);
434 static int cma_modify_qp_rts(struct rdma_cm_id *id)
436 struct ib_qp_attr qp_attr;
437 int qp_attr_mask, ret;
442 qp_attr.qp_state = IB_QPS_RTS;
443 ret = rdma_init_qp_attr(id, &qp_attr, &qp_attr_mask);
447 return ib_modify_qp(id->qp, &qp_attr, qp_attr_mask);
450 static int cma_modify_qp_err(struct rdma_cm_id *id)
452 struct ib_qp_attr qp_attr;
457 qp_attr.qp_state = IB_QPS_ERR;
458 return ib_modify_qp(id->qp, &qp_attr, IB_QP_STATE);
461 int rdma_init_qp_attr(struct rdma_cm_id *id, struct ib_qp_attr *qp_attr,
464 struct rdma_id_private *id_priv;
467 id_priv = container_of(id, struct rdma_id_private, id);
468 switch (rdma_node_get_transport(id_priv->id.device->node_type)) {
469 case RDMA_TRANSPORT_IB:
470 ret = ib_cm_init_qp_attr(id_priv->cm_id.ib, qp_attr,
472 if (qp_attr->qp_state == IB_QPS_RTR)
473 qp_attr->rq_psn = id_priv->seq_num;
475 case RDMA_TRANSPORT_IWARP:
476 ret = iw_cm_init_qp_attr(id_priv->cm_id.iw, qp_attr,
486 EXPORT_SYMBOL(rdma_init_qp_attr);
488 static inline int cma_zero_addr(struct sockaddr *addr)
490 struct in6_addr *ip6;
492 if (addr->sa_family == AF_INET)
493 return ZERONET(((struct sockaddr_in *) addr)->sin_addr.s_addr);
495 ip6 = &((struct sockaddr_in6 *) addr)->sin6_addr;
496 return (ip6->s6_addr32[0] | ip6->s6_addr32[1] |
497 ip6->s6_addr32[2] | ip6->s6_addr32[3]) == 0;
501 static inline int cma_loopback_addr(struct sockaddr *addr)
503 return LOOPBACK(((struct sockaddr_in *) addr)->sin_addr.s_addr);
506 static inline int cma_any_addr(struct sockaddr *addr)
508 return cma_zero_addr(addr) || cma_loopback_addr(addr);
511 static inline int cma_any_port(struct sockaddr *addr)
513 return !((struct sockaddr_in *) addr)->sin_port;
516 static int cma_get_net_info(void *hdr, enum rdma_port_space ps,
517 u8 *ip_ver, __u16 *port,
518 union cma_ip_addr **src, union cma_ip_addr **dst)
522 if (sdp_get_majv(((struct sdp_hh *) hdr)->sdp_version) !=
526 *ip_ver = sdp_get_ip_ver(hdr);
527 *port = ((struct sdp_hh *) hdr)->port;
528 *src = &((struct sdp_hh *) hdr)->src_addr;
529 *dst = &((struct sdp_hh *) hdr)->dst_addr;
532 if (((struct cma_hdr *) hdr)->cma_version != CMA_VERSION)
535 *ip_ver = cma_get_ip_ver(hdr);
536 *port = ((struct cma_hdr *) hdr)->port;
537 *src = &((struct cma_hdr *) hdr)->src_addr;
538 *dst = &((struct cma_hdr *) hdr)->dst_addr;
542 if (*ip_ver != 4 && *ip_ver != 6)
547 static void cma_save_net_info(struct rdma_addr *addr,
548 struct rdma_addr *listen_addr,
549 u8 ip_ver, __u16 port,
550 union cma_ip_addr *src, union cma_ip_addr *dst)
552 struct sockaddr_in *listen4, *ip4;
553 struct sockaddr_in6 *listen6, *ip6;
557 listen4 = (struct sockaddr_in *) &listen_addr->src_addr;
558 ip4 = (struct sockaddr_in *) &addr->src_addr;
559 ip4->sin_family = listen4->sin_family;
560 ip4->sin_addr.s_addr = dst->ip4.addr;
561 ip4->sin_port = listen4->sin_port;
563 ip4 = (struct sockaddr_in *) &addr->dst_addr;
564 ip4->sin_family = listen4->sin_family;
565 ip4->sin_addr.s_addr = src->ip4.addr;
566 ip4->sin_port = port;
569 listen6 = (struct sockaddr_in6 *) &listen_addr->src_addr;
570 ip6 = (struct sockaddr_in6 *) &addr->src_addr;
571 ip6->sin6_family = listen6->sin6_family;
572 ip6->sin6_addr = dst->ip6;
573 ip6->sin6_port = listen6->sin6_port;
575 ip6 = (struct sockaddr_in6 *) &addr->dst_addr;
576 ip6->sin6_family = listen6->sin6_family;
577 ip6->sin6_addr = src->ip6;
578 ip6->sin6_port = port;
585 static inline int cma_user_data_offset(enum rdma_port_space ps)
591 return sizeof(struct cma_hdr);
595 static void cma_cancel_route(struct rdma_id_private *id_priv)
597 switch (rdma_node_get_transport(id_priv->id.device->node_type)) {
598 case RDMA_TRANSPORT_IB:
600 ib_sa_cancel_query(id_priv->query_id, id_priv->query);
607 static inline int cma_internal_listen(struct rdma_id_private *id_priv)
609 return (id_priv->state == CMA_LISTEN) && id_priv->cma_dev &&
610 cma_any_addr(&id_priv->id.route.addr.src_addr);
613 static void cma_destroy_listen(struct rdma_id_private *id_priv)
615 cma_exch(id_priv, CMA_DESTROYING);
617 if (id_priv->cma_dev) {
618 switch (rdma_node_get_transport(id_priv->id.device->node_type)) {
619 case RDMA_TRANSPORT_IB:
620 if (id_priv->cm_id.ib && !IS_ERR(id_priv->cm_id.ib))
621 ib_destroy_cm_id(id_priv->cm_id.ib);
623 case RDMA_TRANSPORT_IWARP:
624 if (id_priv->cm_id.iw && !IS_ERR(id_priv->cm_id.iw))
625 iw_destroy_cm_id(id_priv->cm_id.iw);
630 cma_detach_from_dev(id_priv);
632 list_del(&id_priv->listen_list);
634 cma_deref_id(id_priv);
635 wait_for_completion(&id_priv->comp);
640 static void cma_cancel_listens(struct rdma_id_private *id_priv)
642 struct rdma_id_private *dev_id_priv;
645 list_del(&id_priv->list);
647 while (!list_empty(&id_priv->listen_list)) {
648 dev_id_priv = list_entry(id_priv->listen_list.next,
649 struct rdma_id_private, listen_list);
650 cma_destroy_listen(dev_id_priv);
655 static void cma_cancel_operation(struct rdma_id_private *id_priv,
656 enum cma_state state)
660 rdma_addr_cancel(&id_priv->id.route.addr.dev_addr);
662 case CMA_ROUTE_QUERY:
663 cma_cancel_route(id_priv);
666 if (cma_any_addr(&id_priv->id.route.addr.src_addr) &&
668 cma_cancel_listens(id_priv);
675 static void cma_release_port(struct rdma_id_private *id_priv)
677 struct rdma_bind_list *bind_list = id_priv->bind_list;
683 hlist_del(&id_priv->node);
684 if (hlist_empty(&bind_list->owners)) {
685 idr_remove(bind_list->ps, bind_list->port);
691 void rdma_destroy_id(struct rdma_cm_id *id)
693 struct rdma_id_private *id_priv;
694 enum cma_state state;
696 id_priv = container_of(id, struct rdma_id_private, id);
697 state = cma_exch(id_priv, CMA_DESTROYING);
698 cma_cancel_operation(id_priv, state);
701 if (id_priv->cma_dev) {
703 switch (rdma_node_get_transport(id->device->node_type)) {
704 case RDMA_TRANSPORT_IB:
705 if (id_priv->cm_id.ib && !IS_ERR(id_priv->cm_id.ib))
706 ib_destroy_cm_id(id_priv->cm_id.ib);
708 case RDMA_TRANSPORT_IWARP:
709 if (id_priv->cm_id.iw && !IS_ERR(id_priv->cm_id.iw))
710 iw_destroy_cm_id(id_priv->cm_id.iw);
716 cma_detach_from_dev(id_priv);
720 cma_release_port(id_priv);
721 cma_deref_id(id_priv);
722 wait_for_completion(&id_priv->comp);
724 kfree(id_priv->id.route.path_rec);
727 EXPORT_SYMBOL(rdma_destroy_id);
729 static int cma_rep_recv(struct rdma_id_private *id_priv)
733 ret = cma_modify_qp_rtr(&id_priv->id);
737 ret = cma_modify_qp_rts(&id_priv->id);
741 ret = ib_send_cm_rtu(id_priv->cm_id.ib, NULL, 0);
747 cma_modify_qp_err(&id_priv->id);
748 ib_send_cm_rej(id_priv->cm_id.ib, IB_CM_REJ_CONSUMER_DEFINED,
753 static int cma_verify_rep(struct rdma_id_private *id_priv, void *data)
755 if (id_priv->id.ps == RDMA_PS_SDP &&
756 sdp_get_majv(((struct sdp_hah *) data)->sdp_version) !=
763 static void cma_set_rep_event_data(struct rdma_cm_event *event,
764 struct ib_cm_rep_event_param *rep_data,
767 event->param.conn.private_data = private_data;
768 event->param.conn.private_data_len = IB_CM_REP_PRIVATE_DATA_SIZE;
769 event->param.conn.responder_resources = rep_data->responder_resources;
770 event->param.conn.initiator_depth = rep_data->initiator_depth;
771 event->param.conn.flow_control = rep_data->flow_control;
772 event->param.conn.rnr_retry_count = rep_data->rnr_retry_count;
773 event->param.conn.srq = rep_data->srq;
774 event->param.conn.qp_num = rep_data->remote_qpn;
777 static int cma_ib_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
779 struct rdma_id_private *id_priv = cm_id->context;
780 struct rdma_cm_event event;
783 atomic_inc(&id_priv->dev_remove);
784 if (!cma_comp(id_priv, CMA_CONNECT))
787 memset(&event, 0, sizeof event);
788 switch (ib_event->event) {
789 case IB_CM_REQ_ERROR:
790 case IB_CM_REP_ERROR:
791 event.event = RDMA_CM_EVENT_UNREACHABLE;
792 event.status = -ETIMEDOUT;
794 case IB_CM_REP_RECEIVED:
795 event.status = cma_verify_rep(id_priv, ib_event->private_data);
797 event.event = RDMA_CM_EVENT_CONNECT_ERROR;
798 else if (id_priv->id.qp && id_priv->id.ps != RDMA_PS_SDP) {
799 event.status = cma_rep_recv(id_priv);
800 event.event = event.status ? RDMA_CM_EVENT_CONNECT_ERROR :
801 RDMA_CM_EVENT_ESTABLISHED;
803 event.event = RDMA_CM_EVENT_CONNECT_RESPONSE;
804 cma_set_rep_event_data(&event, &ib_event->param.rep_rcvd,
805 ib_event->private_data);
807 case IB_CM_RTU_RECEIVED:
808 case IB_CM_USER_ESTABLISHED:
809 event.event = RDMA_CM_EVENT_ESTABLISHED;
811 case IB_CM_DREQ_ERROR:
812 event.status = -ETIMEDOUT; /* fall through */
813 case IB_CM_DREQ_RECEIVED:
814 case IB_CM_DREP_RECEIVED:
815 if (!cma_comp_exch(id_priv, CMA_CONNECT, CMA_DISCONNECT))
817 event.event = RDMA_CM_EVENT_DISCONNECTED;
819 case IB_CM_TIMEWAIT_EXIT:
820 case IB_CM_MRA_RECEIVED:
823 case IB_CM_REJ_RECEIVED:
824 cma_modify_qp_err(&id_priv->id);
825 event.status = ib_event->param.rej_rcvd.reason;
826 event.event = RDMA_CM_EVENT_REJECTED;
827 event.param.conn.private_data = ib_event->private_data;
828 event.param.conn.private_data_len = IB_CM_REJ_PRIVATE_DATA_SIZE;
831 printk(KERN_ERR "RDMA CMA: unexpected IB CM event: %d",
836 ret = id_priv->id.event_handler(&id_priv->id, &event);
838 /* Destroy the CM ID by returning a non-zero value. */
839 id_priv->cm_id.ib = NULL;
840 cma_exch(id_priv, CMA_DESTROYING);
841 cma_release_remove(id_priv);
842 rdma_destroy_id(&id_priv->id);
846 cma_release_remove(id_priv);
850 static struct rdma_id_private *cma_new_id(struct rdma_cm_id *listen_id,
851 struct ib_cm_event *ib_event)
853 struct rdma_id_private *id_priv;
854 struct rdma_cm_id *id;
855 struct rdma_route *rt;
856 union cma_ip_addr *src, *dst;
860 if (cma_get_net_info(ib_event->private_data, listen_id->ps,
861 &ip_ver, &port, &src, &dst))
864 id = rdma_create_id(listen_id->event_handler, listen_id->context,
869 cma_save_net_info(&id->route.addr, &listen_id->route.addr,
870 ip_ver, port, src, dst);
873 rt->num_paths = ib_event->param.req_rcvd.alternate_path ? 2 : 1;
874 rt->path_rec = kmalloc(sizeof *rt->path_rec * rt->num_paths,
879 rt->path_rec[0] = *ib_event->param.req_rcvd.primary_path;
880 if (rt->num_paths == 2)
881 rt->path_rec[1] = *ib_event->param.req_rcvd.alternate_path;
883 ib_addr_set_sgid(&rt->addr.dev_addr, &rt->path_rec[0].sgid);
884 ib_addr_set_dgid(&rt->addr.dev_addr, &rt->path_rec[0].dgid);
885 ib_addr_set_pkey(&rt->addr.dev_addr, be16_to_cpu(rt->path_rec[0].pkey));
886 rt->addr.dev_addr.dev_type = RDMA_NODE_IB_CA;
888 id_priv = container_of(id, struct rdma_id_private, id);
889 id_priv->state = CMA_CONNECT;
898 static void cma_set_req_event_data(struct rdma_cm_event *event,
899 struct ib_cm_req_event_param *req_data,
900 void *private_data, int offset)
902 event->param.conn.private_data = private_data + offset;
903 event->param.conn.private_data_len = IB_CM_REQ_PRIVATE_DATA_SIZE - offset;
904 event->param.conn.responder_resources = req_data->responder_resources;
905 event->param.conn.initiator_depth = req_data->initiator_depth;
906 event->param.conn.flow_control = req_data->flow_control;
907 event->param.conn.retry_count = req_data->retry_count;
908 event->param.conn.rnr_retry_count = req_data->rnr_retry_count;
909 event->param.conn.srq = req_data->srq;
910 event->param.conn.qp_num = req_data->remote_qpn;
913 static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
915 struct rdma_id_private *listen_id, *conn_id;
916 struct rdma_cm_event event;
919 listen_id = cm_id->context;
920 atomic_inc(&listen_id->dev_remove);
921 if (!cma_comp(listen_id, CMA_LISTEN)) {
926 conn_id = cma_new_id(&listen_id->id, ib_event);
932 atomic_inc(&conn_id->dev_remove);
934 ret = cma_acquire_dev(conn_id);
937 goto release_conn_id;
939 conn_id->cm_id.ib = cm_id;
940 cm_id->context = conn_id;
941 cm_id->cm_handler = cma_ib_handler;
943 offset = cma_user_data_offset(listen_id->id.ps);
944 memset(&event, 0, sizeof event);
945 event.event = RDMA_CM_EVENT_CONNECT_REQUEST;
946 cma_set_req_event_data(&event, &ib_event->param.req_rcvd,
947 ib_event->private_data, offset);
948 ret = conn_id->id.event_handler(&conn_id->id, &event);
952 /* Destroy the CM ID by returning a non-zero value. */
953 conn_id->cm_id.ib = NULL;
956 cma_exch(conn_id, CMA_DESTROYING);
957 cma_release_remove(conn_id);
958 rdma_destroy_id(&conn_id->id);
961 cma_release_remove(listen_id);
965 static __be64 cma_get_service_id(enum rdma_port_space ps, struct sockaddr *addr)
967 return cpu_to_be64(((u64)ps << 16) +
968 be16_to_cpu(((struct sockaddr_in *) addr)->sin_port));
971 static void cma_set_compare_data(enum rdma_port_space ps, struct sockaddr *addr,
972 struct ib_cm_compare_data *compare)
974 struct cma_hdr *cma_data, *cma_mask;
975 struct sdp_hh *sdp_data, *sdp_mask;
977 struct in6_addr ip6_addr;
979 memset(compare, 0, sizeof *compare);
980 cma_data = (void *) compare->data;
981 cma_mask = (void *) compare->mask;
982 sdp_data = (void *) compare->data;
983 sdp_mask = (void *) compare->mask;
985 switch (addr->sa_family) {
987 ip4_addr = ((struct sockaddr_in *) addr)->sin_addr.s_addr;
988 if (ps == RDMA_PS_SDP) {
989 sdp_set_ip_ver(sdp_data, 4);
990 sdp_set_ip_ver(sdp_mask, 0xF);
991 sdp_data->dst_addr.ip4.addr = ip4_addr;
992 sdp_mask->dst_addr.ip4.addr = ~0;
994 cma_set_ip_ver(cma_data, 4);
995 cma_set_ip_ver(cma_mask, 0xF);
996 cma_data->dst_addr.ip4.addr = ip4_addr;
997 cma_mask->dst_addr.ip4.addr = ~0;
1001 ip6_addr = ((struct sockaddr_in6 *) addr)->sin6_addr;
1002 if (ps == RDMA_PS_SDP) {
1003 sdp_set_ip_ver(sdp_data, 6);
1004 sdp_set_ip_ver(sdp_mask, 0xF);
1005 sdp_data->dst_addr.ip6 = ip6_addr;
1006 memset(&sdp_mask->dst_addr.ip6, 0xFF,
1007 sizeof sdp_mask->dst_addr.ip6);
1009 cma_set_ip_ver(cma_data, 6);
1010 cma_set_ip_ver(cma_mask, 0xF);
1011 cma_data->dst_addr.ip6 = ip6_addr;
1012 memset(&cma_mask->dst_addr.ip6, 0xFF,
1013 sizeof cma_mask->dst_addr.ip6);
1021 static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event)
1023 struct rdma_id_private *id_priv = iw_id->context;
1024 struct rdma_cm_event event;
1025 struct sockaddr_in *sin;
1028 memset(&event, 0, sizeof event);
1029 atomic_inc(&id_priv->dev_remove);
1031 switch (iw_event->event) {
1032 case IW_CM_EVENT_CLOSE:
1033 event.event = RDMA_CM_EVENT_DISCONNECTED;
1035 case IW_CM_EVENT_CONNECT_REPLY:
1036 sin = (struct sockaddr_in *) &id_priv->id.route.addr.src_addr;
1037 *sin = iw_event->local_addr;
1038 sin = (struct sockaddr_in *) &id_priv->id.route.addr.dst_addr;
1039 *sin = iw_event->remote_addr;
1040 if (iw_event->status)
1041 event.event = RDMA_CM_EVENT_REJECTED;
1043 event.event = RDMA_CM_EVENT_ESTABLISHED;
1045 case IW_CM_EVENT_ESTABLISHED:
1046 event.event = RDMA_CM_EVENT_ESTABLISHED;
1052 event.status = iw_event->status;
1053 event.param.conn.private_data = iw_event->private_data;
1054 event.param.conn.private_data_len = iw_event->private_data_len;
1055 ret = id_priv->id.event_handler(&id_priv->id, &event);
1057 /* Destroy the CM ID by returning a non-zero value. */
1058 id_priv->cm_id.iw = NULL;
1059 cma_exch(id_priv, CMA_DESTROYING);
1060 cma_release_remove(id_priv);
1061 rdma_destroy_id(&id_priv->id);
1065 cma_release_remove(id_priv);
1069 static int iw_conn_req_handler(struct iw_cm_id *cm_id,
1070 struct iw_cm_event *iw_event)
1072 struct rdma_cm_id *new_cm_id;
1073 struct rdma_id_private *listen_id, *conn_id;
1074 struct sockaddr_in *sin;
1075 struct net_device *dev = NULL;
1076 struct rdma_cm_event event;
1079 listen_id = cm_id->context;
1080 atomic_inc(&listen_id->dev_remove);
1081 if (!cma_comp(listen_id, CMA_LISTEN)) {
1082 ret = -ECONNABORTED;
1086 /* Create a new RDMA id for the new IW CM ID */
1087 new_cm_id = rdma_create_id(listen_id->id.event_handler,
1088 listen_id->id.context,
1094 conn_id = container_of(new_cm_id, struct rdma_id_private, id);
1095 atomic_inc(&conn_id->dev_remove);
1096 conn_id->state = CMA_CONNECT;
1098 dev = ip_dev_find(iw_event->local_addr.sin_addr.s_addr);
1100 ret = -EADDRNOTAVAIL;
1101 cma_release_remove(conn_id);
1102 rdma_destroy_id(new_cm_id);
1105 ret = rdma_copy_addr(&conn_id->id.route.addr.dev_addr, dev, NULL);
1107 cma_release_remove(conn_id);
1108 rdma_destroy_id(new_cm_id);
1113 ret = cma_acquire_dev(conn_id);
1114 mutex_unlock(&lock);
1116 cma_release_remove(conn_id);
1117 rdma_destroy_id(new_cm_id);
1121 conn_id->cm_id.iw = cm_id;
1122 cm_id->context = conn_id;
1123 cm_id->cm_handler = cma_iw_handler;
1125 sin = (struct sockaddr_in *) &new_cm_id->route.addr.src_addr;
1126 *sin = iw_event->local_addr;
1127 sin = (struct sockaddr_in *) &new_cm_id->route.addr.dst_addr;
1128 *sin = iw_event->remote_addr;
1130 memset(&event, 0, sizeof event);
1131 event.event = RDMA_CM_EVENT_CONNECT_REQUEST;
1132 event.param.conn.private_data = iw_event->private_data;
1133 event.param.conn.private_data_len = iw_event->private_data_len;
1134 ret = conn_id->id.event_handler(&conn_id->id, &event);
1136 /* User wants to destroy the CM ID */
1137 conn_id->cm_id.iw = NULL;
1138 cma_exch(conn_id, CMA_DESTROYING);
1139 cma_release_remove(conn_id);
1140 rdma_destroy_id(&conn_id->id);
1146 cma_release_remove(listen_id);
1150 static int cma_ib_listen(struct rdma_id_private *id_priv)
1152 struct ib_cm_compare_data compare_data;
1153 struct sockaddr *addr;
1157 id_priv->cm_id.ib = ib_create_cm_id(id_priv->id.device, cma_req_handler,
1159 if (IS_ERR(id_priv->cm_id.ib))
1160 return PTR_ERR(id_priv->cm_id.ib);
1162 addr = &id_priv->id.route.addr.src_addr;
1163 svc_id = cma_get_service_id(id_priv->id.ps, addr);
1164 if (cma_any_addr(addr))
1165 ret = ib_cm_listen(id_priv->cm_id.ib, svc_id, 0, NULL);
1167 cma_set_compare_data(id_priv->id.ps, addr, &compare_data);
1168 ret = ib_cm_listen(id_priv->cm_id.ib, svc_id, 0, &compare_data);
1172 ib_destroy_cm_id(id_priv->cm_id.ib);
1173 id_priv->cm_id.ib = NULL;
1179 static int cma_iw_listen(struct rdma_id_private *id_priv, int backlog)
1182 struct sockaddr_in *sin;
1184 id_priv->cm_id.iw = iw_create_cm_id(id_priv->id.device,
1185 iw_conn_req_handler,
1187 if (IS_ERR(id_priv->cm_id.iw))
1188 return PTR_ERR(id_priv->cm_id.iw);
1190 sin = (struct sockaddr_in *) &id_priv->id.route.addr.src_addr;
1191 id_priv->cm_id.iw->local_addr = *sin;
1193 ret = iw_cm_listen(id_priv->cm_id.iw, backlog);
1196 iw_destroy_cm_id(id_priv->cm_id.iw);
1197 id_priv->cm_id.iw = NULL;
1203 static int cma_listen_handler(struct rdma_cm_id *id,
1204 struct rdma_cm_event *event)
1206 struct rdma_id_private *id_priv = id->context;
1208 id->context = id_priv->id.context;
1209 id->event_handler = id_priv->id.event_handler;
1210 return id_priv->id.event_handler(id, event);
1213 static void cma_listen_on_dev(struct rdma_id_private *id_priv,
1214 struct cma_device *cma_dev)
1216 struct rdma_id_private *dev_id_priv;
1217 struct rdma_cm_id *id;
1220 id = rdma_create_id(cma_listen_handler, id_priv, id_priv->id.ps);
1224 dev_id_priv = container_of(id, struct rdma_id_private, id);
1226 dev_id_priv->state = CMA_ADDR_BOUND;
1227 memcpy(&id->route.addr.src_addr, &id_priv->id.route.addr.src_addr,
1228 ip_addr_size(&id_priv->id.route.addr.src_addr));
1230 cma_attach_to_dev(dev_id_priv, cma_dev);
1231 list_add_tail(&dev_id_priv->listen_list, &id_priv->listen_list);
1233 ret = rdma_listen(id, id_priv->backlog);
1239 cma_destroy_listen(dev_id_priv);
1242 static void cma_listen_on_all(struct rdma_id_private *id_priv)
1244 struct cma_device *cma_dev;
1247 list_add_tail(&id_priv->list, &listen_any_list);
1248 list_for_each_entry(cma_dev, &dev_list, list)
1249 cma_listen_on_dev(id_priv, cma_dev);
1250 mutex_unlock(&lock);
1253 static int cma_bind_any(struct rdma_cm_id *id, sa_family_t af)
1255 struct sockaddr_in addr_in;
1257 memset(&addr_in, 0, sizeof addr_in);
1258 addr_in.sin_family = af;
1259 return rdma_bind_addr(id, (struct sockaddr *) &addr_in);
1262 int rdma_listen(struct rdma_cm_id *id, int backlog)
1264 struct rdma_id_private *id_priv;
1267 id_priv = container_of(id, struct rdma_id_private, id);
1268 if (id_priv->state == CMA_IDLE) {
1269 ret = cma_bind_any(id, AF_INET);
1274 if (!cma_comp_exch(id_priv, CMA_ADDR_BOUND, CMA_LISTEN))
1277 id_priv->backlog = backlog;
1279 switch (rdma_node_get_transport(id->device->node_type)) {
1280 case RDMA_TRANSPORT_IB:
1281 ret = cma_ib_listen(id_priv);
1285 case RDMA_TRANSPORT_IWARP:
1286 ret = cma_iw_listen(id_priv, backlog);
1295 cma_listen_on_all(id_priv);
1299 id_priv->backlog = 0;
1300 cma_comp_exch(id_priv, CMA_LISTEN, CMA_ADDR_BOUND);
1303 EXPORT_SYMBOL(rdma_listen);
1305 static void cma_query_handler(int status, struct ib_sa_path_rec *path_rec,
1308 struct cma_work *work = context;
1309 struct rdma_route *route;
1311 route = &work->id->id.route;
1314 route->num_paths = 1;
1315 *route->path_rec = *path_rec;
1317 work->old_state = CMA_ROUTE_QUERY;
1318 work->new_state = CMA_ADDR_RESOLVED;
1319 work->event.event = RDMA_CM_EVENT_ROUTE_ERROR;
1320 work->event.status = status;
1323 queue_work(cma_wq, &work->work);
1326 static int cma_query_ib_route(struct rdma_id_private *id_priv, int timeout_ms,
1327 struct cma_work *work)
1329 struct rdma_dev_addr *addr = &id_priv->id.route.addr.dev_addr;
1330 struct ib_sa_path_rec path_rec;
1332 memset(&path_rec, 0, sizeof path_rec);
1333 ib_addr_get_sgid(addr, &path_rec.sgid);
1334 ib_addr_get_dgid(addr, &path_rec.dgid);
1335 path_rec.pkey = cpu_to_be16(ib_addr_get_pkey(addr));
1336 path_rec.numb_path = 1;
1338 id_priv->query_id = ib_sa_path_rec_get(&sa_client, id_priv->id.device,
1339 id_priv->id.port_num, &path_rec,
1340 IB_SA_PATH_REC_DGID | IB_SA_PATH_REC_SGID |
1341 IB_SA_PATH_REC_PKEY | IB_SA_PATH_REC_NUMB_PATH,
1342 timeout_ms, GFP_KERNEL,
1343 cma_query_handler, work, &id_priv->query);
1345 return (id_priv->query_id < 0) ? id_priv->query_id : 0;
1348 static void cma_work_handler(struct work_struct *_work)
1350 struct cma_work *work = container_of(_work, struct cma_work, work);
1351 struct rdma_id_private *id_priv = work->id;
1354 atomic_inc(&id_priv->dev_remove);
1355 if (!cma_comp_exch(id_priv, work->old_state, work->new_state))
1358 if (id_priv->id.event_handler(&id_priv->id, &work->event)) {
1359 cma_exch(id_priv, CMA_DESTROYING);
1363 cma_release_remove(id_priv);
1364 cma_deref_id(id_priv);
1366 rdma_destroy_id(&id_priv->id);
1370 static int cma_resolve_ib_route(struct rdma_id_private *id_priv, int timeout_ms)
1372 struct rdma_route *route = &id_priv->id.route;
1373 struct cma_work *work;
1376 work = kzalloc(sizeof *work, GFP_KERNEL);
1381 INIT_WORK(&work->work, cma_work_handler);
1382 work->old_state = CMA_ROUTE_QUERY;
1383 work->new_state = CMA_ROUTE_RESOLVED;
1384 work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
1386 route->path_rec = kmalloc(sizeof *route->path_rec, GFP_KERNEL);
1387 if (!route->path_rec) {
1392 ret = cma_query_ib_route(id_priv, timeout_ms, work);
1398 kfree(route->path_rec);
1399 route->path_rec = NULL;
1405 int rdma_set_ib_paths(struct rdma_cm_id *id,
1406 struct ib_sa_path_rec *path_rec, int num_paths)
1408 struct rdma_id_private *id_priv;
1411 id_priv = container_of(id, struct rdma_id_private, id);
1412 if (!cma_comp_exch(id_priv, CMA_ADDR_RESOLVED, CMA_ROUTE_RESOLVED))
1415 id->route.path_rec = kmalloc(sizeof *path_rec * num_paths, GFP_KERNEL);
1416 if (!id->route.path_rec) {
1421 memcpy(id->route.path_rec, path_rec, sizeof *path_rec * num_paths);
1424 cma_comp_exch(id_priv, CMA_ROUTE_RESOLVED, CMA_ADDR_RESOLVED);
1427 EXPORT_SYMBOL(rdma_set_ib_paths);
1429 static int cma_resolve_iw_route(struct rdma_id_private *id_priv, int timeout_ms)
1431 struct cma_work *work;
1433 work = kzalloc(sizeof *work, GFP_KERNEL);
1438 INIT_WORK(&work->work, cma_work_handler);
1439 work->old_state = CMA_ROUTE_QUERY;
1440 work->new_state = CMA_ROUTE_RESOLVED;
1441 work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
1442 queue_work(cma_wq, &work->work);
1446 int rdma_resolve_route(struct rdma_cm_id *id, int timeout_ms)
1448 struct rdma_id_private *id_priv;
1451 id_priv = container_of(id, struct rdma_id_private, id);
1452 if (!cma_comp_exch(id_priv, CMA_ADDR_RESOLVED, CMA_ROUTE_QUERY))
1455 atomic_inc(&id_priv->refcount);
1456 switch (rdma_node_get_transport(id->device->node_type)) {
1457 case RDMA_TRANSPORT_IB:
1458 ret = cma_resolve_ib_route(id_priv, timeout_ms);
1460 case RDMA_TRANSPORT_IWARP:
1461 ret = cma_resolve_iw_route(id_priv, timeout_ms);
1472 cma_comp_exch(id_priv, CMA_ROUTE_QUERY, CMA_ADDR_RESOLVED);
1473 cma_deref_id(id_priv);
1476 EXPORT_SYMBOL(rdma_resolve_route);
1478 static int cma_bind_loopback(struct rdma_id_private *id_priv)
1480 struct cma_device *cma_dev;
1481 struct ib_port_attr port_attr;
1488 if (list_empty(&dev_list)) {
1492 list_for_each_entry(cma_dev, &dev_list, list)
1493 for (p = 1; p <= cma_dev->device->phys_port_cnt; ++p)
1494 if (!ib_query_port(cma_dev->device, p, &port_attr) &&
1495 port_attr.state == IB_PORT_ACTIVE)
1499 cma_dev = list_entry(dev_list.next, struct cma_device, list);
1502 ret = ib_get_cached_gid(cma_dev->device, p, 0, &gid);
1506 ret = ib_get_cached_pkey(cma_dev->device, p, 0, &pkey);
1510 ib_addr_set_sgid(&id_priv->id.route.addr.dev_addr, &gid);
1511 ib_addr_set_pkey(&id_priv->id.route.addr.dev_addr, pkey);
1512 id_priv->id.port_num = p;
1513 cma_attach_to_dev(id_priv, cma_dev);
1515 mutex_unlock(&lock);
1519 static void addr_handler(int status, struct sockaddr *src_addr,
1520 struct rdma_dev_addr *dev_addr, void *context)
1522 struct rdma_id_private *id_priv = context;
1523 struct rdma_cm_event event;
1525 memset(&event, 0, sizeof event);
1526 atomic_inc(&id_priv->dev_remove);
1529 * Grab mutex to block rdma_destroy_id() from removing the device while
1530 * we're trying to acquire it.
1533 if (!cma_comp_exch(id_priv, CMA_ADDR_QUERY, CMA_ADDR_RESOLVED)) {
1534 mutex_unlock(&lock);
1538 if (!status && !id_priv->cma_dev)
1539 status = cma_acquire_dev(id_priv);
1540 mutex_unlock(&lock);
1543 if (!cma_comp_exch(id_priv, CMA_ADDR_RESOLVED, CMA_ADDR_BOUND))
1545 event.event = RDMA_CM_EVENT_ADDR_ERROR;
1546 event.status = status;
1548 memcpy(&id_priv->id.route.addr.src_addr, src_addr,
1549 ip_addr_size(src_addr));
1550 event.event = RDMA_CM_EVENT_ADDR_RESOLVED;
1553 if (id_priv->id.event_handler(&id_priv->id, &event)) {
1554 cma_exch(id_priv, CMA_DESTROYING);
1555 cma_release_remove(id_priv);
1556 cma_deref_id(id_priv);
1557 rdma_destroy_id(&id_priv->id);
1561 cma_release_remove(id_priv);
1562 cma_deref_id(id_priv);
1565 static int cma_resolve_loopback(struct rdma_id_private *id_priv)
1567 struct cma_work *work;
1568 struct sockaddr_in *src_in, *dst_in;
1572 work = kzalloc(sizeof *work, GFP_KERNEL);
1576 if (!id_priv->cma_dev) {
1577 ret = cma_bind_loopback(id_priv);
1582 ib_addr_get_sgid(&id_priv->id.route.addr.dev_addr, &gid);
1583 ib_addr_set_dgid(&id_priv->id.route.addr.dev_addr, &gid);
1585 if (cma_zero_addr(&id_priv->id.route.addr.src_addr)) {
1586 src_in = (struct sockaddr_in *)&id_priv->id.route.addr.src_addr;
1587 dst_in = (struct sockaddr_in *)&id_priv->id.route.addr.dst_addr;
1588 src_in->sin_family = dst_in->sin_family;
1589 src_in->sin_addr.s_addr = dst_in->sin_addr.s_addr;
1593 INIT_WORK(&work->work, cma_work_handler);
1594 work->old_state = CMA_ADDR_QUERY;
1595 work->new_state = CMA_ADDR_RESOLVED;
1596 work->event.event = RDMA_CM_EVENT_ADDR_RESOLVED;
1597 queue_work(cma_wq, &work->work);
1604 static int cma_bind_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
1605 struct sockaddr *dst_addr)
1607 if (src_addr && src_addr->sa_family)
1608 return rdma_bind_addr(id, src_addr);
1610 return cma_bind_any(id, dst_addr->sa_family);
1613 int rdma_resolve_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
1614 struct sockaddr *dst_addr, int timeout_ms)
1616 struct rdma_id_private *id_priv;
1619 id_priv = container_of(id, struct rdma_id_private, id);
1620 if (id_priv->state == CMA_IDLE) {
1621 ret = cma_bind_addr(id, src_addr, dst_addr);
1626 if (!cma_comp_exch(id_priv, CMA_ADDR_BOUND, CMA_ADDR_QUERY))
1629 atomic_inc(&id_priv->refcount);
1630 memcpy(&id->route.addr.dst_addr, dst_addr, ip_addr_size(dst_addr));
1631 if (cma_any_addr(dst_addr))
1632 ret = cma_resolve_loopback(id_priv);
1634 ret = rdma_resolve_ip(&addr_client, &id->route.addr.src_addr,
1635 dst_addr, &id->route.addr.dev_addr,
1636 timeout_ms, addr_handler, id_priv);
1642 cma_comp_exch(id_priv, CMA_ADDR_QUERY, CMA_ADDR_BOUND);
1643 cma_deref_id(id_priv);
1646 EXPORT_SYMBOL(rdma_resolve_addr);
1648 static void cma_bind_port(struct rdma_bind_list *bind_list,
1649 struct rdma_id_private *id_priv)
1651 struct sockaddr_in *sin;
1653 sin = (struct sockaddr_in *) &id_priv->id.route.addr.src_addr;
1654 sin->sin_port = htons(bind_list->port);
1655 id_priv->bind_list = bind_list;
1656 hlist_add_head(&id_priv->node, &bind_list->owners);
1659 static int cma_alloc_port(struct idr *ps, struct rdma_id_private *id_priv,
1660 unsigned short snum)
1662 struct rdma_bind_list *bind_list;
1663 int port, start, ret;
1665 bind_list = kzalloc(sizeof *bind_list, GFP_KERNEL);
1669 start = snum ? snum : sysctl_local_port_range[0];
1672 ret = idr_get_new_above(ps, bind_list, start, &port);
1673 } while ((ret == -EAGAIN) && idr_pre_get(ps, GFP_KERNEL));
1678 if ((snum && port != snum) ||
1679 (!snum && port > sysctl_local_port_range[1])) {
1680 idr_remove(ps, port);
1681 ret = -EADDRNOTAVAIL;
1686 bind_list->port = (unsigned short) port;
1687 cma_bind_port(bind_list, id_priv);
1694 static int cma_use_port(struct idr *ps, struct rdma_id_private *id_priv)
1696 struct rdma_id_private *cur_id;
1697 struct sockaddr_in *sin, *cur_sin;
1698 struct rdma_bind_list *bind_list;
1699 struct hlist_node *node;
1700 unsigned short snum;
1702 sin = (struct sockaddr_in *) &id_priv->id.route.addr.src_addr;
1703 snum = ntohs(sin->sin_port);
1704 if (snum < PROT_SOCK && !capable(CAP_NET_BIND_SERVICE))
1707 bind_list = idr_find(ps, snum);
1709 return cma_alloc_port(ps, id_priv, snum);
1712 * We don't support binding to any address if anyone is bound to
1713 * a specific address on the same port.
1715 if (cma_any_addr(&id_priv->id.route.addr.src_addr))
1716 return -EADDRNOTAVAIL;
1718 hlist_for_each_entry(cur_id, node, &bind_list->owners, node) {
1719 if (cma_any_addr(&cur_id->id.route.addr.src_addr))
1720 return -EADDRNOTAVAIL;
1722 cur_sin = (struct sockaddr_in *) &cur_id->id.route.addr.src_addr;
1723 if (sin->sin_addr.s_addr == cur_sin->sin_addr.s_addr)
1727 cma_bind_port(bind_list, id_priv);
1731 static int cma_get_port(struct rdma_id_private *id_priv)
1736 switch (id_priv->id.ps) {
1744 return -EPROTONOSUPPORT;
1748 if (cma_any_port(&id_priv->id.route.addr.src_addr))
1749 ret = cma_alloc_port(ps, id_priv, 0);
1751 ret = cma_use_port(ps, id_priv);
1752 mutex_unlock(&lock);
1757 int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr)
1759 struct rdma_id_private *id_priv;
1762 if (addr->sa_family != AF_INET)
1763 return -EAFNOSUPPORT;
1765 id_priv = container_of(id, struct rdma_id_private, id);
1766 if (!cma_comp_exch(id_priv, CMA_IDLE, CMA_ADDR_BOUND))
1769 if (!cma_any_addr(addr)) {
1770 ret = rdma_translate_ip(addr, &id->route.addr.dev_addr);
1775 ret = cma_acquire_dev(id_priv);
1776 mutex_unlock(&lock);
1781 memcpy(&id->route.addr.src_addr, addr, ip_addr_size(addr));
1782 ret = cma_get_port(id_priv);
1788 if (!cma_any_addr(addr)) {
1790 cma_detach_from_dev(id_priv);
1791 mutex_unlock(&lock);
1794 cma_comp_exch(id_priv, CMA_ADDR_BOUND, CMA_IDLE);
1797 EXPORT_SYMBOL(rdma_bind_addr);
1799 static int cma_format_hdr(void *hdr, enum rdma_port_space ps,
1800 struct rdma_route *route)
1802 struct sockaddr_in *src4, *dst4;
1803 struct cma_hdr *cma_hdr;
1804 struct sdp_hh *sdp_hdr;
1806 src4 = (struct sockaddr_in *) &route->addr.src_addr;
1807 dst4 = (struct sockaddr_in *) &route->addr.dst_addr;
1812 if (sdp_get_majv(sdp_hdr->sdp_version) != SDP_MAJ_VERSION)
1814 sdp_set_ip_ver(sdp_hdr, 4);
1815 sdp_hdr->src_addr.ip4.addr = src4->sin_addr.s_addr;
1816 sdp_hdr->dst_addr.ip4.addr = dst4->sin_addr.s_addr;
1817 sdp_hdr->port = src4->sin_port;
1821 cma_hdr->cma_version = CMA_VERSION;
1822 cma_set_ip_ver(cma_hdr, 4);
1823 cma_hdr->src_addr.ip4.addr = src4->sin_addr.s_addr;
1824 cma_hdr->dst_addr.ip4.addr = dst4->sin_addr.s_addr;
1825 cma_hdr->port = src4->sin_port;
1831 static int cma_connect_ib(struct rdma_id_private *id_priv,
1832 struct rdma_conn_param *conn_param)
1834 struct ib_cm_req_param req;
1835 struct rdma_route *route;
1839 memset(&req, 0, sizeof req);
1840 offset = cma_user_data_offset(id_priv->id.ps);
1841 req.private_data_len = offset + conn_param->private_data_len;
1842 private_data = kzalloc(req.private_data_len, GFP_ATOMIC);
1846 if (conn_param->private_data && conn_param->private_data_len)
1847 memcpy(private_data + offset, conn_param->private_data,
1848 conn_param->private_data_len);
1850 id_priv->cm_id.ib = ib_create_cm_id(id_priv->id.device, cma_ib_handler,
1852 if (IS_ERR(id_priv->cm_id.ib)) {
1853 ret = PTR_ERR(id_priv->cm_id.ib);
1857 route = &id_priv->id.route;
1858 ret = cma_format_hdr(private_data, id_priv->id.ps, route);
1861 req.private_data = private_data;
1863 req.primary_path = &route->path_rec[0];
1864 if (route->num_paths == 2)
1865 req.alternate_path = &route->path_rec[1];
1867 req.service_id = cma_get_service_id(id_priv->id.ps,
1868 &route->addr.dst_addr);
1869 req.qp_num = id_priv->qp_num;
1870 req.qp_type = IB_QPT_RC;
1871 req.starting_psn = id_priv->seq_num;
1872 req.responder_resources = conn_param->responder_resources;
1873 req.initiator_depth = conn_param->initiator_depth;
1874 req.flow_control = conn_param->flow_control;
1875 req.retry_count = conn_param->retry_count;
1876 req.rnr_retry_count = conn_param->rnr_retry_count;
1877 req.remote_cm_response_timeout = CMA_CM_RESPONSE_TIMEOUT;
1878 req.local_cm_response_timeout = CMA_CM_RESPONSE_TIMEOUT;
1879 req.max_cm_retries = CMA_MAX_CM_RETRIES;
1880 req.srq = id_priv->srq ? 1 : 0;
1882 ret = ib_send_cm_req(id_priv->cm_id.ib, &req);
1884 if (ret && !IS_ERR(id_priv->cm_id.ib)) {
1885 ib_destroy_cm_id(id_priv->cm_id.ib);
1886 id_priv->cm_id.ib = NULL;
1889 kfree(private_data);
1893 static int cma_connect_iw(struct rdma_id_private *id_priv,
1894 struct rdma_conn_param *conn_param)
1896 struct iw_cm_id *cm_id;
1897 struct sockaddr_in* sin;
1899 struct iw_cm_conn_param iw_param;
1901 cm_id = iw_create_cm_id(id_priv->id.device, cma_iw_handler, id_priv);
1902 if (IS_ERR(cm_id)) {
1903 ret = PTR_ERR(cm_id);
1907 id_priv->cm_id.iw = cm_id;
1909 sin = (struct sockaddr_in*) &id_priv->id.route.addr.src_addr;
1910 cm_id->local_addr = *sin;
1912 sin = (struct sockaddr_in*) &id_priv->id.route.addr.dst_addr;
1913 cm_id->remote_addr = *sin;
1915 ret = cma_modify_qp_rtr(&id_priv->id);
1919 iw_param.ord = conn_param->initiator_depth;
1920 iw_param.ird = conn_param->responder_resources;
1921 iw_param.private_data = conn_param->private_data;
1922 iw_param.private_data_len = conn_param->private_data_len;
1924 iw_param.qpn = id_priv->qp_num;
1926 iw_param.qpn = conn_param->qp_num;
1927 ret = iw_cm_connect(cm_id, &iw_param);
1929 if (ret && !IS_ERR(cm_id)) {
1930 iw_destroy_cm_id(cm_id);
1931 id_priv->cm_id.iw = NULL;
1936 int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
1938 struct rdma_id_private *id_priv;
1941 id_priv = container_of(id, struct rdma_id_private, id);
1942 if (!cma_comp_exch(id_priv, CMA_ROUTE_RESOLVED, CMA_CONNECT))
1946 id_priv->qp_num = conn_param->qp_num;
1947 id_priv->srq = conn_param->srq;
1950 switch (rdma_node_get_transport(id->device->node_type)) {
1951 case RDMA_TRANSPORT_IB:
1952 ret = cma_connect_ib(id_priv, conn_param);
1954 case RDMA_TRANSPORT_IWARP:
1955 ret = cma_connect_iw(id_priv, conn_param);
1966 cma_comp_exch(id_priv, CMA_CONNECT, CMA_ROUTE_RESOLVED);
1969 EXPORT_SYMBOL(rdma_connect);
1971 static int cma_accept_ib(struct rdma_id_private *id_priv,
1972 struct rdma_conn_param *conn_param)
1974 struct ib_cm_rep_param rep;
1975 struct ib_qp_attr qp_attr;
1976 int qp_attr_mask, ret;
1978 if (id_priv->id.qp) {
1979 ret = cma_modify_qp_rtr(&id_priv->id);
1983 qp_attr.qp_state = IB_QPS_RTS;
1984 ret = ib_cm_init_qp_attr(id_priv->cm_id.ib, &qp_attr,
1989 qp_attr.max_rd_atomic = conn_param->initiator_depth;
1990 ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask);
1995 memset(&rep, 0, sizeof rep);
1996 rep.qp_num = id_priv->qp_num;
1997 rep.starting_psn = id_priv->seq_num;
1998 rep.private_data = conn_param->private_data;
1999 rep.private_data_len = conn_param->private_data_len;
2000 rep.responder_resources = conn_param->responder_resources;
2001 rep.initiator_depth = conn_param->initiator_depth;
2002 rep.target_ack_delay = CMA_CM_RESPONSE_TIMEOUT;
2003 rep.failover_accepted = 0;
2004 rep.flow_control = conn_param->flow_control;
2005 rep.rnr_retry_count = conn_param->rnr_retry_count;
2006 rep.srq = id_priv->srq ? 1 : 0;
2008 ret = ib_send_cm_rep(id_priv->cm_id.ib, &rep);
2013 static int cma_accept_iw(struct rdma_id_private *id_priv,
2014 struct rdma_conn_param *conn_param)
2016 struct iw_cm_conn_param iw_param;
2019 ret = cma_modify_qp_rtr(&id_priv->id);
2023 iw_param.ord = conn_param->initiator_depth;
2024 iw_param.ird = conn_param->responder_resources;
2025 iw_param.private_data = conn_param->private_data;
2026 iw_param.private_data_len = conn_param->private_data_len;
2027 if (id_priv->id.qp) {
2028 iw_param.qpn = id_priv->qp_num;
2030 iw_param.qpn = conn_param->qp_num;
2032 return iw_cm_accept(id_priv->cm_id.iw, &iw_param);
2035 int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
2037 struct rdma_id_private *id_priv;
2040 id_priv = container_of(id, struct rdma_id_private, id);
2041 if (!cma_comp(id_priv, CMA_CONNECT))
2044 if (!id->qp && conn_param) {
2045 id_priv->qp_num = conn_param->qp_num;
2046 id_priv->srq = conn_param->srq;
2049 switch (rdma_node_get_transport(id->device->node_type)) {
2050 case RDMA_TRANSPORT_IB:
2052 ret = cma_accept_ib(id_priv, conn_param);
2054 ret = cma_rep_recv(id_priv);
2056 case RDMA_TRANSPORT_IWARP:
2057 ret = cma_accept_iw(id_priv, conn_param);
2069 cma_modify_qp_err(id);
2070 rdma_reject(id, NULL, 0);
2073 EXPORT_SYMBOL(rdma_accept);
2075 int rdma_notify(struct rdma_cm_id *id, enum ib_event_type event)
2077 struct rdma_id_private *id_priv;
2080 id_priv = container_of(id, struct rdma_id_private, id);
2081 if (!cma_comp(id_priv, CMA_CONNECT))
2084 switch (id->device->node_type) {
2085 case RDMA_NODE_IB_CA:
2086 ret = ib_cm_notify(id_priv->cm_id.ib, event);
2094 EXPORT_SYMBOL(rdma_notify);
2096 int rdma_reject(struct rdma_cm_id *id, const void *private_data,
2097 u8 private_data_len)
2099 struct rdma_id_private *id_priv;
2102 id_priv = container_of(id, struct rdma_id_private, id);
2103 if (!cma_comp(id_priv, CMA_CONNECT))
2106 switch (rdma_node_get_transport(id->device->node_type)) {
2107 case RDMA_TRANSPORT_IB:
2108 ret = ib_send_cm_rej(id_priv->cm_id.ib,
2109 IB_CM_REJ_CONSUMER_DEFINED, NULL, 0,
2110 private_data, private_data_len);
2112 case RDMA_TRANSPORT_IWARP:
2113 ret = iw_cm_reject(id_priv->cm_id.iw,
2114 private_data, private_data_len);
2122 EXPORT_SYMBOL(rdma_reject);
2124 int rdma_disconnect(struct rdma_cm_id *id)
2126 struct rdma_id_private *id_priv;
2129 id_priv = container_of(id, struct rdma_id_private, id);
2130 if (!cma_comp(id_priv, CMA_CONNECT) &&
2131 !cma_comp(id_priv, CMA_DISCONNECT))
2134 switch (rdma_node_get_transport(id->device->node_type)) {
2135 case RDMA_TRANSPORT_IB:
2136 ret = cma_modify_qp_err(id);
2139 /* Initiate or respond to a disconnect. */
2140 if (ib_send_cm_dreq(id_priv->cm_id.ib, NULL, 0))
2141 ib_send_cm_drep(id_priv->cm_id.ib, NULL, 0);
2143 case RDMA_TRANSPORT_IWARP:
2144 ret = iw_cm_disconnect(id_priv->cm_id.iw, 0);
2153 EXPORT_SYMBOL(rdma_disconnect);
2155 static void cma_add_one(struct ib_device *device)
2157 struct cma_device *cma_dev;
2158 struct rdma_id_private *id_priv;
2160 cma_dev = kmalloc(sizeof *cma_dev, GFP_KERNEL);
2164 cma_dev->device = device;
2165 cma_dev->node_guid = device->node_guid;
2167 init_completion(&cma_dev->comp);
2168 atomic_set(&cma_dev->refcount, 1);
2169 INIT_LIST_HEAD(&cma_dev->id_list);
2170 ib_set_client_data(device, &cma_client, cma_dev);
2173 list_add_tail(&cma_dev->list, &dev_list);
2174 list_for_each_entry(id_priv, &listen_any_list, list)
2175 cma_listen_on_dev(id_priv, cma_dev);
2176 mutex_unlock(&lock);
2179 static int cma_remove_id_dev(struct rdma_id_private *id_priv)
2181 struct rdma_cm_event event;
2182 enum cma_state state;
2184 /* Record that we want to remove the device */
2185 state = cma_exch(id_priv, CMA_DEVICE_REMOVAL);
2186 if (state == CMA_DESTROYING)
2189 cma_cancel_operation(id_priv, state);
2190 wait_event(id_priv->wait_remove, !atomic_read(&id_priv->dev_remove));
2192 /* Check for destruction from another callback. */
2193 if (!cma_comp(id_priv, CMA_DEVICE_REMOVAL))
2196 memset(&event, 0, sizeof event);
2197 event.event = RDMA_CM_EVENT_DEVICE_REMOVAL;
2198 return id_priv->id.event_handler(&id_priv->id, &event);
2201 static void cma_process_remove(struct cma_device *cma_dev)
2203 struct rdma_id_private *id_priv;
2207 while (!list_empty(&cma_dev->id_list)) {
2208 id_priv = list_entry(cma_dev->id_list.next,
2209 struct rdma_id_private, list);
2211 if (cma_internal_listen(id_priv)) {
2212 cma_destroy_listen(id_priv);
2216 list_del_init(&id_priv->list);
2217 atomic_inc(&id_priv->refcount);
2218 mutex_unlock(&lock);
2220 ret = cma_remove_id_dev(id_priv);
2221 cma_deref_id(id_priv);
2223 rdma_destroy_id(&id_priv->id);
2227 mutex_unlock(&lock);
2229 cma_deref_dev(cma_dev);
2230 wait_for_completion(&cma_dev->comp);
2233 static void cma_remove_one(struct ib_device *device)
2235 struct cma_device *cma_dev;
2237 cma_dev = ib_get_client_data(device, &cma_client);
2242 list_del(&cma_dev->list);
2243 mutex_unlock(&lock);
2245 cma_process_remove(cma_dev);
2249 static int cma_init(void)
2253 cma_wq = create_singlethread_workqueue("rdma_cm_wq");
2257 ib_sa_register_client(&sa_client);
2258 rdma_addr_register_client(&addr_client);
2260 ret = ib_register_client(&cma_client);
2266 rdma_addr_unregister_client(&addr_client);
2267 ib_sa_unregister_client(&sa_client);
2268 destroy_workqueue(cma_wq);
2272 static void cma_cleanup(void)
2274 ib_unregister_client(&cma_client);
2275 rdma_addr_unregister_client(&addr_client);
2276 ib_sa_unregister_client(&sa_client);
2277 destroy_workqueue(cma_wq);
2278 idr_destroy(&sdp_ps);
2279 idr_destroy(&tcp_ps);
2282 module_init(cma_init);
2283 module_exit(cma_cleanup);