sctp: Fix a race between ICMP protocol unreachable and connect()
[safe/jmp/linux-2.6] / net / sctp / transport.c
index f4938f6..4a36803 100644 (file)
@@ -48,6 +48,7 @@
  * be incorporated into the next SCTP release.
  */
 
+#include <linux/slab.h>
 #include <linux/types.h>
 #include <linux/random.h>
 #include <net/sctp/sctp.h>
@@ -74,14 +75,15 @@ static struct sctp_transport *sctp_transport_init(struct sctp_transport *peer,
         * given destination transport address, set RTO to the protocol
         * parameter 'RTO.Initial'.
         */
-       peer->last_rto = peer->rto = msecs_to_jiffies(sctp_rto_initial);
+       peer->rto = msecs_to_jiffies(sctp_rto_initial);
        peer->rtt = 0;
        peer->rttvar = 0;
        peer->srtt = 0;
        peer->rto_pending = 0;
+       peer->hb_sent = 0;
+       peer->fast_recovery = 0;
 
        peer->last_time_heard = jiffies;
-       peer->last_time_used = jiffies;
        peer->last_time_ecne_reduced = jiffies;
 
        peer->init_sent_count = 0;
@@ -99,10 +101,15 @@ static struct sctp_transport *sctp_transport_init(struct sctp_transport *peer,
        INIT_LIST_HEAD(&peer->send_ready);
        INIT_LIST_HEAD(&peer->transports);
 
+       peer->T3_rtx_timer.expires = 0;
+       peer->hb_timer.expires = 0;
+
        setup_timer(&peer->T3_rtx_timer, sctp_generate_t3_rtx_event,
                        (unsigned long)peer);
        setup_timer(&peer->hb_timer, sctp_generate_heartbeat_event,
                        (unsigned long)peer);
+       setup_timer(&peer->proto_unreach_timer,
+                   sctp_generate_proto_unreach_event, (unsigned long)peer);
 
        /* Initialize the 64-bit random nonce sent with heartbeat. */
        get_random_bytes(&peer->hb_nonce, sizeof(peer->hb_nonce));
@@ -190,7 +197,7 @@ static void sctp_transport_destroy(struct sctp_transport *transport)
 /* Start T3_rtx timer if it is not already running and update the heartbeat
  * timer.  This routine is called every time a DATA chunk is sent.
  */
-void sctp_transport_reset_timers(struct sctp_transport *transport)
+void sctp_transport_reset_timers(struct sctp_transport *transport, int force)
 {
        /* RFC 2960 6.3.2 Retransmission Timer Rules
         *
@@ -200,7 +207,7 @@ void sctp_transport_reset_timers(struct sctp_transport *transport)
         * address.
         */
 
-       if (!timer_pending(&transport->T3_rtx_timer))
+       if (force || !timer_pending(&transport->T3_rtx_timer))
                if (!mod_timer(&transport->T3_rtx_timer,
                               jiffies + transport->rto))
                        sctp_transport_hold(transport);
@@ -291,7 +298,7 @@ void sctp_transport_route(struct sctp_transport *transport,
        if (saddr)
                memcpy(&transport->saddr, saddr, sizeof(union sctp_addr));
        else
-               af->get_saddr(asoc, dst, daddr, &transport->saddr);
+               af->get_saddr(opt, asoc, dst, daddr, &transport->saddr);
 
        transport->dst = dst;
        if ((transport->param_flags & SPP_PMTUD_DISABLE) && transport->pathmtu) {
@@ -303,7 +310,8 @@ void sctp_transport_route(struct sctp_transport *transport,
                /* Initialize sk->sk_rcv_saddr, if the transport is the
                 * association's active path for getsockname().
                 */
-               if (asoc && (transport == asoc->peer.active_path))
+               if (asoc && (!asoc->peer.primary_path ||
+                               (transport == asoc->peer.active_path)))
                        opt->pf->af->to_sk_saddr(&transport->saddr,
                                                 asoc->base.sk);
        } else
@@ -380,7 +388,6 @@ void sctp_transport_update_rto(struct sctp_transport *tp, __u32 rtt)
                tp->rto = tp->asoc->rto_max;
 
        tp->rtt = rtt;
-       tp->last_rto = tp->rto;
 
        /* Reset rto_pending so that a new RTT measurement is started when a
         * new data chunk is sent.
@@ -403,11 +410,16 @@ void sctp_transport_raise_cwnd(struct sctp_transport *transport,
        cwnd = transport->cwnd;
        flight_size = transport->flight_size;
 
+       /* See if we need to exit Fast Recovery first */
+       if (transport->fast_recovery &&
+           TSN_lte(transport->fast_recovery_exit, sack_ctsn))
+               transport->fast_recovery = 0;
+
        /* The appropriate cwnd increase algorithm is performed if, and only
-        * if the cumulative TSN has advanced and the congestion window is
+        * if the cumulative TSN whould advanced and the congestion window is
         * being fully utilized.
         */
-       if ((transport->asoc->ctsn_ack_point >= sack_ctsn) ||
+       if (TSN_lte(sack_ctsn, transport->asoc->ctsn_ack_point) ||
            (flight_size < cwnd))
                return;
 
@@ -416,17 +428,23 @@ void sctp_transport_raise_cwnd(struct sctp_transport *transport,
        pmtu = transport->asoc->pathmtu;
 
        if (cwnd <= ssthresh) {
-               /* RFC 2960 7.2.1, sctpimpguide-05 2.14.2 When cwnd is less
-                * than or equal to ssthresh an SCTP endpoint MUST use the
-                * slow start algorithm to increase cwnd only if the current
-                * congestion window is being fully utilized and an incoming
-                * SACK advances the Cumulative TSN Ack Point. Only when these
-                * two conditions are met can the cwnd be increased otherwise
-                * the cwnd MUST not be increased. If these conditions are met
-                * then cwnd MUST be increased by at most the lesser of
-                * 1) the total size of the previously outstanding DATA
-                * chunk(s) acknowledged, and 2) the destination's path MTU.
+               /* RFC 4960 7.2.1
+                * o  When cwnd is less than or equal to ssthresh, an SCTP
+                *    endpoint MUST use the slow-start algorithm to increase
+                *    cwnd only if the current congestion window is being fully
+                *    utilized, an incoming SACK advances the Cumulative TSN
+                *    Ack Point, and the data sender is not in Fast Recovery.
+                *    Only when these three conditions are met can the cwnd be
+                *    increased; otherwise, the cwnd MUST not be increased.
+                *    If these conditions are met, then cwnd MUST be increased
+                *    by, at most, the lesser of 1) the total size of the
+                *    previously outstanding DATA chunk(s) acknowledged, and
+                *    2) the destination's path MTU.  This upper bound protects
+                *    against the ACK-Splitting attack outlined in [SAVAGE99].
                 */
+               if (transport->fast_recovery)
+                       return;
+
                if (bytes_acked > pmtu)
                        cwnd += pmtu;
                else
@@ -487,6 +505,9 @@ void sctp_transport_lower_cwnd(struct sctp_transport *transport,
                transport->ssthresh = max(transport->cwnd/2,
                                          4*transport->asoc->pathmtu);
                transport->cwnd = transport->asoc->pathmtu;
+
+               /* T3-rtx also clears fast recovery on the transport */
+               transport->fast_recovery = 0;
                break;
 
        case SCTP_LOWER_CWND_FAST_RTX:
@@ -502,6 +523,13 @@ void sctp_transport_lower_cwnd(struct sctp_transport *transport,
                 *      cwnd = ssthresh
                 *      partial_bytes_acked = 0
                 */
+               if (transport->fast_recovery)
+                       return;
+
+               /* Mark Fast recovery */
+               transport->fast_recovery = 1;
+               transport->fast_recovery_exit = transport->asoc->next_tsn - 1;
+
                transport->ssthresh = max(transport->cwnd/2,
                                          4*transport->asoc->pathmtu);
                transport->cwnd = transport->ssthresh;
@@ -520,8 +548,8 @@ void sctp_transport_lower_cwnd(struct sctp_transport *transport,
                 * congestion indications more than once every window of
                 * data (or more loosely more than once every round-trip time).
                 */
-               if ((jiffies - transport->last_time_ecne_reduced) >
-                   transport->rtt) {
+               if (time_after(jiffies, transport->last_time_ecne_reduced +
+                                       transport->rtt)) {
                        transport->ssthresh = max(transport->cwnd/2,
                                                  4*transport->asoc->pathmtu);
                        transport->cwnd = transport->ssthresh;
@@ -538,9 +566,8 @@ void sctp_transport_lower_cwnd(struct sctp_transport *transport,
                 * to be done every RTO interval, we do it every hearbeat
                 * interval.
                 */
-               if ((jiffies - transport->last_time_used) > transport->rto)
-                       transport->cwnd = max(transport->cwnd/2,
-                                                4*transport->asoc->pathmtu);
+               transport->cwnd = max(transport->cwnd/2,
+                                        4*transport->asoc->pathmtu);
                break;
        }
 
@@ -551,6 +578,43 @@ void sctp_transport_lower_cwnd(struct sctp_transport *transport,
                          transport->cwnd, transport->ssthresh);
 }
 
+/* Apply Max.Burst limit to the congestion window:
+ * sctpimpguide-05 2.14.2
+ * D) When the time comes for the sender to
+ * transmit new DATA chunks, the protocol parameter Max.Burst MUST
+ * first be applied to limit how many new DATA chunks may be sent.
+ * The limit is applied by adjusting cwnd as follows:
+ *     if ((flightsize+ Max.Burst * MTU) < cwnd)
+ *             cwnd = flightsize + Max.Burst * MTU
+ */
+
+void sctp_transport_burst_limited(struct sctp_transport *t)
+{
+       struct sctp_association *asoc = t->asoc;
+       u32 old_cwnd = t->cwnd;
+       u32 max_burst_bytes;
+
+       if (t->burst_limited)
+               return;
+
+       max_burst_bytes = t->flight_size + (asoc->max_burst * asoc->pathmtu);
+       if (max_burst_bytes < old_cwnd) {
+               t->cwnd = max_burst_bytes;
+               t->burst_limited = old_cwnd;
+       }
+}
+
+/* Restore the old cwnd congestion window, after the burst had it's
+ * desired effect.
+ */
+void sctp_transport_burst_reset(struct sctp_transport *t)
+{
+       if (t->burst_limited) {
+               t->cwnd = t->burst_limited;
+               t->burst_limited = 0;
+       }
+}
+
 /* What is the next timeout value for this transport? */
 unsigned long sctp_transport_timeout(struct sctp_transport *t)
 {
@@ -573,8 +637,9 @@ void sctp_transport_reset(struct sctp_transport *t)
         * (see Section 6.2.1)
         */
        t->cwnd = min(4*asoc->pathmtu, max_t(__u32, 2*asoc->pathmtu, 4380));
+       t->burst_limited = 0;
        t->ssthresh = asoc->peer.i.a_rwnd;
-       t->last_rto = t->rto = asoc->rto_initial;
+       t->rto = asoc->rto_initial;
        t->rtt = 0;
        t->srtt = 0;
        t->rttvar = 0;
@@ -586,6 +651,8 @@ void sctp_transport_reset(struct sctp_transport *t)
        t->flight_size = 0;
        t->error_count = 0;
        t->rto_pending = 0;
+       t->hb_sent = 0;
+       t->fast_recovery = 0;
 
        /* Initialize the state information for SFR-CACC */
        t->cacc.changeover_active = 0;