nfsd4: allow 4.0 clients to change callback path
[safe/jmp/linux-2.6] / net / rds / iw_recv.c
index a1931f0..54af7d6 100644 (file)
@@ -143,15 +143,16 @@ static int rds_iw_recv_refill_one(struct rds_connection *conn,
        int ret = -ENOMEM;
 
        if (recv->r_iwinc == NULL) {
-               if (atomic_read(&rds_iw_allocation) >= rds_iw_sysctl_max_recv_allocation) {
+               if (!atomic_add_unless(&rds_iw_allocation, 1, rds_iw_sysctl_max_recv_allocation)) {
                        rds_iw_stats_inc(s_iw_rx_alloc_limit);
                        goto out;
                }
                recv->r_iwinc = kmem_cache_alloc(rds_iw_incoming_slab,
                                                 kptr_gfp);
-               if (recv->r_iwinc == NULL)
+               if (recv->r_iwinc == NULL) {
+                       atomic_dec(&rds_iw_allocation);
                        goto out;
-               atomic_inc(&rds_iw_allocation);
+               }
                INIT_LIST_HEAD(&recv->r_iwinc->ii_frags);
                rds_inc_init(&recv->r_iwinc->ii_inc, conn, conn->c_faddr);
        }
@@ -229,8 +230,8 @@ int rds_iw_recv_refill(struct rds_connection *conn, gfp_t kptr_gfp,
        int ret = 0;
        u32 pos;
 
-       while ((prefill || rds_conn_up(conn))
-                       && rds_iw_ring_alloc(&ic->i_recv_ring, 1, &pos)) {
+       while ((prefill || rds_conn_up(conn)) &&
+              rds_iw_ring_alloc(&ic->i_recv_ring, 1, &pos)) {
                if (pos >= ic->i_recv_ring.w_nr) {
                        printk(KERN_NOTICE "Argh - ring alloc returned pos=%u\n",
                                        pos);
@@ -395,10 +396,37 @@ void rds_iw_recv_init_ack(struct rds_iw_connection *ic)
  * room for it beyond the ring size.  Send completion notices its special
  * wr_id and avoids working with the ring in that case.
  */
+#ifndef KERNEL_HAS_ATOMIC64
+static void rds_iw_set_ack(struct rds_iw_connection *ic, u64 seq,
+                               int ack_required)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&ic->i_ack_lock, flags);
+       ic->i_ack_next = seq;
+       if (ack_required)
+               set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
+       spin_unlock_irqrestore(&ic->i_ack_lock, flags);
+}
+
+static u64 rds_iw_get_ack(struct rds_iw_connection *ic)
+{
+       unsigned long flags;
+       u64 seq;
+
+       clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
+
+       spin_lock_irqsave(&ic->i_ack_lock, flags);
+       seq = ic->i_ack_next;
+       spin_unlock_irqrestore(&ic->i_ack_lock, flags);
+
+       return seq;
+}
+#else
 static void rds_iw_set_ack(struct rds_iw_connection *ic, u64 seq,
                                int ack_required)
 {
-       rds_iw_set_64bit(&ic->i_ack_next, seq);
+       atomic64_set(&ic->i_ack_next, seq);
        if (ack_required) {
                smp_mb__before_clear_bit();
                set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
@@ -410,8 +438,10 @@ static u64 rds_iw_get_ack(struct rds_iw_connection *ic)
        clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
        smp_mb__after_clear_bit();
 
-       return ic->i_ack_next;
+       return atomic64_read(&ic->i_ack_next);
 }
+#endif
+
 
 static void rds_iw_send_ack(struct rds_iw_connection *ic, unsigned int adv_credits)
 {
@@ -464,6 +494,10 @@ static void rds_iw_send_ack(struct rds_iw_connection *ic, unsigned int adv_credi
  *  -  i_ack_next, which is the last sequence number we received
  *
  * Potentially, send queue and receive queue handlers can run concurrently.
+ * It would be nice to not have to use a spinlock to synchronize things,
+ * but the one problem that rules this out is that 64bit updates are
+ * not atomic on all platforms. Things would be a lot simpler if
+ * we had atomic64 or maybe cmpxchg64 everywhere.
  *
  * Reconnecting complicates this picture just slightly. When we
  * reconnect, we may be seeing duplicate packets. The peer
@@ -491,7 +525,7 @@ void rds_iw_attempt_ack(struct rds_iw_connection *ic)
        }
 
        /* Can we get a send credit? */
-       if (!rds_iw_send_grab_credits(ic, 1, &adv_credits, 0)) {
+       if (!rds_iw_send_grab_credits(ic, 1, &adv_credits, 0, RDS_MAX_ADV_CREDIT)) {
                rds_iw_stats_inc(s_iw_tx_throttle);
                clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
                return;
@@ -696,10 +730,10 @@ static void rds_iw_process_recv(struct rds_connection *conn,
                hdr = &iwinc->ii_inc.i_hdr;
                /* We can't just use memcmp here; fragments of a
                 * single message may carry different ACKs */
-               if (hdr->h_sequence != ihdr->h_sequence
-                || hdr->h_len != ihdr->h_len
-                || hdr->h_sport != ihdr->h_sport
-                || hdr->h_dport != ihdr->h_dport) {
+               if (hdr->h_sequence != ihdr->h_sequence ||
+                   hdr->h_len != ihdr->h_len ||
+                   hdr->h_sport != ihdr->h_sport ||
+                   hdr->h_dport != ihdr->h_dport) {
                        rds_iw_conn_error(conn,
                                "fragment header mismatch; forcing reconnect\n");
                        return;
@@ -750,17 +784,22 @@ void rds_iw_recv_cq_comp_handler(struct ib_cq *cq, void *context)
 {
        struct rds_connection *conn = context;
        struct rds_iw_connection *ic = conn->c_transport_data;
-       struct ib_wc wc;
-       struct rds_iw_ack_state state = { 0, };
-       struct rds_iw_recv_work *recv;
 
        rdsdebug("conn %p cq %p\n", conn, cq);
 
        rds_iw_stats_inc(s_iw_rx_cq_call);
 
-       ib_req_notify_cq(cq, IB_CQ_SOLICITED);
+       tasklet_schedule(&ic->i_recv_tasklet);
+}
 
-       while (ib_poll_cq(cq, 1, &wc) > 0) {
+static inline void rds_poll_cq(struct rds_iw_connection *ic,
+                              struct rds_iw_ack_state *state)
+{
+       struct rds_connection *conn = ic->conn;
+       struct ib_wc wc;
+       struct rds_iw_recv_work *recv;
+
+       while (ib_poll_cq(ic->i_recv_cq, 1, &wc) > 0) {
                rdsdebug("wc wr_id 0x%llx status %u byte_len %u imm_data %u\n",
                         (unsigned long long)wc.wr_id, wc.status, wc.byte_len,
                         be32_to_cpu(wc.ex.imm_data));
@@ -778,7 +817,7 @@ void rds_iw_recv_cq_comp_handler(struct ib_cq *cq, void *context)
                if (rds_conn_up(conn) || rds_conn_connecting(conn)) {
                        /* We expect errors as the qp is drained during shutdown */
                        if (wc.status == IB_WC_SUCCESS) {
-                               rds_iw_process_recv(conn, recv, wc.byte_len, &state);
+                               rds_iw_process_recv(conn, recv, wc.byte_len, state);
                        } else {
                                rds_iw_conn_error(conn, "recv completion on "
                                       "%pI4 had status %u, disconnecting and "
@@ -789,6 +828,17 @@ void rds_iw_recv_cq_comp_handler(struct ib_cq *cq, void *context)
 
                rds_iw_ring_free(&ic->i_recv_ring, 1);
        }
+}
+
+void rds_iw_recv_tasklet_fn(unsigned long data)
+{
+       struct rds_iw_connection *ic = (struct rds_iw_connection *) data;
+       struct rds_connection *conn = ic->conn;
+       struct rds_iw_ack_state state = { 0, };
+
+       rds_poll_cq(ic, &state);
+       ib_req_notify_cq(ic->i_recv_cq, IB_CQ_SOLICITED);
+       rds_poll_cq(ic, &state);
 
        if (state.ack_next_valid)
                rds_iw_set_ack(ic, state.ack_next, state.ack_required);