Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6
[safe/jmp/linux-2.6] / include / rdma / ib_verbs.h
index a5a7f96..c179318 100644 (file)
@@ -34,8 +34,6 @@
  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
- *
- * $Id: ib_verbs.h 1349 2004-12-16 21:09:43Z roland $
  */
 
 #if !defined(IB_VERBS_H)
@@ -93,8 +91,8 @@ enum ib_device_cap_flags {
        IB_DEVICE_RC_RNR_NAK_GEN        = (1<<12),
        IB_DEVICE_SRQ_RESIZE            = (1<<13),
        IB_DEVICE_N_NOTIFY_CQ           = (1<<14),
-       IB_DEVICE_ZERO_STAG             = (1<<15),
-       IB_DEVICE_SEND_W_INV            = (1<<16),
+       IB_DEVICE_LOCAL_DMA_LKEY        = (1<<15),
+       IB_DEVICE_RESERVED              = (1<<16), /* old SEND_W_INV */
        IB_DEVICE_MEM_WINDOW            = (1<<17),
        /*
         * Devices should set IB_DEVICE_UD_IP_SUM if they support
@@ -104,6 +102,9 @@ enum ib_device_cap_flags {
         * IPoIB driver may set NETIF_F_IP_CSUM for datagram mode.
         */
        IB_DEVICE_UD_IP_CSUM            = (1<<18),
+       IB_DEVICE_UD_TSO                = (1<<19),
+       IB_DEVICE_MEM_MGT_EXTENSIONS    = (1<<21),
+       IB_DEVICE_BLOCK_MULTICAST_LOOPBACK = (1<<22),
 };
 
 enum ib_atomic_cap {
@@ -149,6 +150,7 @@ struct ib_device_attr {
        int                     max_srq;
        int                     max_srq_wr;
        int                     max_srq_sge;
+       unsigned int            max_fast_reg_page_list_len;
        u16                     max_pkeys;
        u8                      local_ca_ack_delay;
 };
@@ -225,6 +227,57 @@ static inline int ib_width_enum_to_int(enum ib_port_width width)
        }
 }
 
+struct ib_protocol_stats {
+       /* TBD... */
+};
+
+struct iw_protocol_stats {
+       u64     ipInReceives;
+       u64     ipInHdrErrors;
+       u64     ipInTooBigErrors;
+       u64     ipInNoRoutes;
+       u64     ipInAddrErrors;
+       u64     ipInUnknownProtos;
+       u64     ipInTruncatedPkts;
+       u64     ipInDiscards;
+       u64     ipInDelivers;
+       u64     ipOutForwDatagrams;
+       u64     ipOutRequests;
+       u64     ipOutDiscards;
+       u64     ipOutNoRoutes;
+       u64     ipReasmTimeout;
+       u64     ipReasmReqds;
+       u64     ipReasmOKs;
+       u64     ipReasmFails;
+       u64     ipFragOKs;
+       u64     ipFragFails;
+       u64     ipFragCreates;
+       u64     ipInMcastPkts;
+       u64     ipOutMcastPkts;
+       u64     ipInBcastPkts;
+       u64     ipOutBcastPkts;
+
+       u64     tcpRtoAlgorithm;
+       u64     tcpRtoMin;
+       u64     tcpRtoMax;
+       u64     tcpMaxConn;
+       u64     tcpActiveOpens;
+       u64     tcpPassiveOpens;
+       u64     tcpAttemptFails;
+       u64     tcpEstabResets;
+       u64     tcpCurrEstab;
+       u64     tcpInSegs;
+       u64     tcpOutSegs;
+       u64     tcpRetransSegs;
+       u64     tcpInErrs;
+       u64     tcpOutRsts;
+};
+
+union rdma_protocol_stats {
+       struct ib_protocol_stats        ib;
+       struct iw_protocol_stats        iw;
+};
+
 struct ib_port_attr {
        enum ib_port_state      state;
        enum ib_mtu             max_mtu;
@@ -335,7 +388,7 @@ enum {
        IB_MULTICAST_QPN = 0xffffff
 };
 
-#define IB_LID_PERMISSIVE      __constant_htons(0xFFFF)
+#define IB_LID_PERMISSIVE      cpu_to_be16(0xFFFF)
 
 enum ib_ah_flags {
        IB_AH_GRH       = 1
@@ -411,6 +464,9 @@ enum ib_wc_opcode {
        IB_WC_COMP_SWAP,
        IB_WC_FETCH_ADD,
        IB_WC_BIND_MW,
+       IB_WC_LSO,
+       IB_WC_LOCAL_INV,
+       IB_WC_FAST_REG_MR,
 /*
  * Set value of IB_WC_RECV so consumers can test if a completion is a
  * receive by testing (opcode & IB_WC_RECV).
@@ -421,7 +477,8 @@ enum ib_wc_opcode {
 
 enum ib_wc_flags {
        IB_WC_GRH               = 1,
-       IB_WC_WITH_IMM          = (1<<1)
+       IB_WC_WITH_IMM          = (1<<1),
+       IB_WC_WITH_INVALIDATE   = (1<<2),
 };
 
 struct ib_wc {
@@ -431,7 +488,10 @@ struct ib_wc {
        u32                     vendor_err;
        u32                     byte_len;
        struct ib_qp           *qp;
-       __be32                  imm_data;
+       union {
+               __be32          imm_data;
+               u32             invalidate_rkey;
+       } ex;
        u32                     src_qp;
        int                     wc_flags;
        u16                     pkey_index;
@@ -495,6 +555,11 @@ enum ib_qp_type {
        IB_QPT_RAW_ETY
 };
 
+enum ib_qp_create_flags {
+       IB_QP_CREATE_IPOIB_UD_LSO               = 1 << 0,
+       IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK   = 1 << 1,
+};
+
 struct ib_qp_init_attr {
        void                  (*event_handler)(struct ib_event *, void *);
        void                   *qp_context;
@@ -504,6 +569,7 @@ struct ib_qp_init_attr {
        struct ib_qp_cap        cap;
        enum ib_sig_type        sq_sig_type;
        enum ib_qp_type         qp_type;
+       enum ib_qp_create_flags create_flags;
        u8                      port_num; /* special QP types only */
 };
 
@@ -617,7 +683,12 @@ enum ib_wr_opcode {
        IB_WR_SEND_WITH_IMM,
        IB_WR_RDMA_READ,
        IB_WR_ATOMIC_CMP_AND_SWP,
-       IB_WR_ATOMIC_FETCH_AND_ADD
+       IB_WR_ATOMIC_FETCH_AND_ADD,
+       IB_WR_LSO,
+       IB_WR_SEND_WITH_INV,
+       IB_WR_RDMA_READ_WITH_INV,
+       IB_WR_LOCAL_INV,
+       IB_WR_FAST_REG_MR,
 };
 
 enum ib_send_flags {
@@ -634,6 +705,12 @@ struct ib_sge {
        u32     lkey;
 };
 
+struct ib_fast_reg_page_list {
+       struct ib_device       *device;
+       u64                    *page_list;
+       unsigned int            max_page_list_len;
+};
+
 struct ib_send_wr {
        struct ib_send_wr      *next;
        u64                     wr_id;
@@ -641,7 +718,10 @@ struct ib_send_wr {
        int                     num_sge;
        enum ib_wr_opcode       opcode;
        int                     send_flags;
-       __be32                  imm_data;
+       union {
+               __be32          imm_data;
+               u32             invalidate_rkey;
+       } ex;
        union {
                struct {
                        u64     remote_addr;
@@ -655,11 +735,23 @@ struct ib_send_wr {
                } atomic;
                struct {
                        struct ib_ah *ah;
+                       void   *header;
+                       int     hlen;
+                       int     mss;
                        u32     remote_qpn;
                        u32     remote_qkey;
                        u16     pkey_index; /* valid for GSI only */
                        u8      port_num;   /* valid for DR SMPs on switch only */
                } ud;
+               struct {
+                       u64                             iova_start;
+                       struct ib_fast_reg_page_list   *page_list;
+                       unsigned int                    page_shift;
+                       unsigned int                    page_list_len;
+                       u32                             length;
+                       int                             access_flags;
+                       u32                             rkey;
+               } fast_reg;
        } wr;
 };
 
@@ -730,7 +822,7 @@ struct ib_uobject {
        struct ib_ucontext     *context;        /* associated user context */
        void                   *object;         /* containing object */
        struct list_head        list;           /* link to context's list */
-       u32                     id;             /* index into kernel idr */
+       int                     id;             /* index into kernel idr */
        struct kref             ref;
        struct rw_semaphore     mutex;          /* protects .live */
        int                     live;
@@ -762,7 +854,7 @@ struct ib_cq {
        struct ib_uobject      *uobject;
        ib_comp_handler         comp_handler;
        void                  (*event_handler)(struct ib_event *, void *);
-       void *                  cq_context;
+       void                   *cq_context;
        int                     cqe;
        atomic_t                usecnt; /* count number of work queues */
 };
@@ -868,7 +960,7 @@ struct ib_dma_mapping_ops {
        void            (*sync_single_for_cpu)(struct ib_device *dev,
                                               u64 dma_handle,
                                               size_t size,
-                                              enum dma_data_direction dir);
+                                              enum dma_data_direction dir);
        void            (*sync_single_for_device)(struct ib_device *dev,
                                                  u64 dma_handle,
                                                  size_t size,
@@ -900,12 +992,12 @@ struct ib_device {
        int                          *pkey_tbl_len;
        int                          *gid_tbl_len;
 
-       u32                           flags;
-
        int                           num_comp_vectors;
 
        struct iw_cm_verbs           *iwcm;
 
+       int                        (*get_protocol_stats)(struct ib_device *device,
+                                                        union rdma_protocol_stats *stats);
        int                        (*query_device)(struct ib_device *device,
                                                   struct ib_device_attr *device_attr);
        int                        (*query_port)(struct ib_device *device,
@@ -973,6 +1065,8 @@ struct ib_device {
                                                int comp_vector,
                                                struct ib_ucontext *context,
                                                struct ib_udata *udata);
+       int                        (*modify_cq)(struct ib_cq *cq, u16 cq_count,
+                                               u16 cq_period);
        int                        (*destroy_cq)(struct ib_cq *cq);
        int                        (*resize_cq)(struct ib_cq *cq, int cqe,
                                                struct ib_udata *udata);
@@ -998,6 +1092,11 @@ struct ib_device {
        int                        (*query_mr)(struct ib_mr *mr,
                                               struct ib_mr_attr *mr_attr);
        int                        (*dereg_mr)(struct ib_mr *mr);
+       struct ib_mr *             (*alloc_fast_reg_mr)(struct ib_pd *pd,
+                                              int max_page_list_len);
+       struct ib_fast_reg_page_list * (*alloc_fast_reg_page_list)(struct ib_device *device,
+                                                                  int page_list_len);
+       void                       (*free_fast_reg_page_list)(struct ib_fast_reg_page_list *page_list);
        int                        (*rereg_phys_mr)(struct ib_mr *mr,
                                                    int mr_rereg_mask,
                                                    struct ib_pd *pd,
@@ -1035,7 +1134,7 @@ struct ib_device {
        struct ib_dma_mapping_ops   *dma_ops;
 
        struct module               *owner;
-       struct class_device          class_dev;
+       struct device                dev;
        struct kobject               *ports_parent;
        struct list_head             port_list;
 
@@ -1050,6 +1149,7 @@ struct ib_device {
 
        char                         node_desc[64];
        __be64                       node_guid;
+       u32                          local_dma_lkey;
        u8                           node_type;
        u8                           phys_port_cnt;
 };
@@ -1378,6 +1478,15 @@ struct ib_cq *ib_create_cq(struct ib_device *device,
 int ib_resize_cq(struct ib_cq *cq, int cqe);
 
 /**
+ * ib_modify_cq - Modifies moderation params of the CQ
+ * @cq: The CQ to modify.
+ * @cq_count: number of CQEs that will trigger an event
+ * @cq_period: max period of time in usec before triggering an event
+ *
+ */
+int ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period);
+
+/**
  * ib_destroy_cq - Destroys the specified CQ.
  * @cq: The CQ to destroy.
  */
@@ -1481,7 +1590,7 @@ static inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr)
 {
        if (dev->dma_ops)
                return dev->dma_ops->mapping_error(dev, dma_addr);
-       return dma_mapping_error(dma_addr);
+       return dma_mapping_error(dev->dma_device, dma_addr);
 }
 
 /**
@@ -1517,6 +1626,24 @@ static inline void ib_dma_unmap_single(struct ib_device *dev,
                dma_unmap_single(dev->dma_device, addr, size, direction);
 }
 
+static inline u64 ib_dma_map_single_attrs(struct ib_device *dev,
+                                         void *cpu_addr, size_t size,
+                                         enum dma_data_direction direction,
+                                         struct dma_attrs *attrs)
+{
+       return dma_map_single_attrs(dev->dma_device, cpu_addr, size,
+                                   direction, attrs);
+}
+
+static inline void ib_dma_unmap_single_attrs(struct ib_device *dev,
+                                            u64 addr, size_t size,
+                                            enum dma_data_direction direction,
+                                            struct dma_attrs *attrs)
+{
+       return dma_unmap_single_attrs(dev->dma_device, addr, size,
+                                     direction, attrs);
+}
+
 /**
  * ib_dma_map_page - Map a physical page to DMA address
  * @dev: The device for which the dma_addr is to be created
@@ -1586,6 +1713,21 @@ static inline void ib_dma_unmap_sg(struct ib_device *dev,
                dma_unmap_sg(dev->dma_device, sg, nents, direction);
 }
 
+static inline int ib_dma_map_sg_attrs(struct ib_device *dev,
+                                     struct scatterlist *sg, int nents,
+                                     enum dma_data_direction direction,
+                                     struct dma_attrs *attrs)
+{
+       return dma_map_sg_attrs(dev->dma_device, sg, nents, direction, attrs);
+}
+
+static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev,
+                                        struct scatterlist *sg, int nents,
+                                        enum dma_data_direction direction,
+                                        struct dma_attrs *attrs)
+{
+       dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction, attrs);
+}
 /**
  * ib_sg_dma_address - Return the DMA address from a scatter/gather entry
  * @dev: The device for which the DMA addresses were created
@@ -1750,6 +1892,54 @@ int ib_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr);
 int ib_dereg_mr(struct ib_mr *mr);
 
 /**
+ * ib_alloc_fast_reg_mr - Allocates memory region usable with the
+ *   IB_WR_FAST_REG_MR send work request.
+ * @pd: The protection domain associated with the region.
+ * @max_page_list_len: requested max physical buffer list length to be
+ *   used with fast register work requests for this MR.
+ */
+struct ib_mr *ib_alloc_fast_reg_mr(struct ib_pd *pd, int max_page_list_len);
+
+/**
+ * ib_alloc_fast_reg_page_list - Allocates a page list array
+ * @device - ib device pointer.
+ * @page_list_len - size of the page list array to be allocated.
+ *
+ * This allocates and returns a struct ib_fast_reg_page_list * and a
+ * page_list array that is at least page_list_len in size.  The actual
+ * size is returned in max_page_list_len.  The caller is responsible
+ * for initializing the contents of the page_list array before posting
+ * a send work request with the IB_WC_FAST_REG_MR opcode.
+ *
+ * The page_list array entries must be translated using one of the
+ * ib_dma_*() functions just like the addresses passed to
+ * ib_map_phys_fmr().  Once the ib_post_send() is issued, the struct
+ * ib_fast_reg_page_list must not be modified by the caller until the
+ * IB_WC_FAST_REG_MR work request completes.
+ */
+struct ib_fast_reg_page_list *ib_alloc_fast_reg_page_list(
+                               struct ib_device *device, int page_list_len);
+
+/**
+ * ib_free_fast_reg_page_list - Deallocates a previously allocated
+ *   page list array.
+ * @page_list - struct ib_fast_reg_page_list pointer to be deallocated.
+ */
+void ib_free_fast_reg_page_list(struct ib_fast_reg_page_list *page_list);
+
+/**
+ * ib_update_fast_reg_key - updates the key portion of the fast_reg MR
+ *   R_Key and L_Key.
+ * @mr - struct ib_mr pointer to be updated.
+ * @newkey - new key to be used.
+ */
+static inline void ib_update_fast_reg_key(struct ib_mr *mr, u8 newkey)
+{
+       mr->lkey = (mr->lkey & 0xffffff00) | newkey;
+       mr->rkey = (mr->rkey & 0xffffff00) | newkey;
+}
+
+/**
  * ib_alloc_mw - Allocates a memory window.
  * @pd: The protection domain associated with the memory window.
  */