+static int tcp_is_sackblock_valid(struct tcp_sock *tp, int is_dsack,
+ u32 start_seq, u32 end_seq)
+{
+ /* Too far in future, or reversed (interpretation is ambiguous) */
+ if (after(end_seq, tp->snd_nxt) || !before(start_seq, end_seq))
+ return 0;
+
+ /* Nasty start_seq wrap-around check (see comments above) */
+ if (!before(start_seq, tp->snd_nxt))
+ return 0;
+
+ /* In outstanding window? ...This is valid exit for D-SACKs too.
+ * start_seq == snd_una is non-sensical (see comments above)
+ */
+ if (after(start_seq, tp->snd_una))
+ return 1;
+
+ if (!is_dsack || !tp->undo_marker)
+ return 0;
+
+ /* ...Then it's D-SACK, and must reside below snd_una completely */
+ if (!after(end_seq, tp->snd_una))
+ return 0;
+
+ if (!before(start_seq, tp->undo_marker))
+ return 1;
+
+ /* Too old */
+ if (!after(end_seq, tp->undo_marker))
+ return 0;
+
+ /* Undo_marker boundary crossing (overestimates a lot). Known already:
+ * start_seq < undo_marker and end_seq >= undo_marker.
+ */
+ return !before(start_seq, end_seq - tp->max_window);
+}
+
+/* Check for lost retransmit. This superb idea is borrowed from "ratehalving".
+ * Event "C". Later note: FACK people cheated me again 8), we have to account
+ * for reordering! Ugly, but should help.
+ *
+ * Search retransmitted skbs from write_queue that were sent when snd_nxt was
+ * less than what is now known to be received by the other end (derived from
+ * highest SACK block). Also calculate the lowest snd_nxt among the remaining
+ * retransmitted skbs to avoid some costly processing per ACKs.
+ */
+static void tcp_mark_lost_retrans(struct sock *sk)
+{
+ const struct inet_connection_sock *icsk = inet_csk(sk);
+ struct tcp_sock *tp = tcp_sk(sk);
+ struct sk_buff *skb;
+ int cnt = 0;
+ u32 new_low_seq = tp->snd_nxt;
+ u32 received_upto = tcp_highest_sack_seq(tp);
+
+ if (!tcp_is_fack(tp) || !tp->retrans_out ||
+ !after(received_upto, tp->lost_retrans_low) ||
+ icsk->icsk_ca_state != TCP_CA_Recovery)
+ return;
+
+ tcp_for_write_queue(skb, sk) {
+ u32 ack_seq = TCP_SKB_CB(skb)->ack_seq;
+
+ if (skb == tcp_send_head(sk))
+ break;
+ if (cnt == tp->retrans_out)
+ break;
+ if (!after(TCP_SKB_CB(skb)->end_seq, tp->snd_una))
+ continue;
+
+ if (!(TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS))
+ continue;
+
+ /* TODO: We would like to get rid of tcp_is_fack(tp) only
+ * constraint here (see above) but figuring out that at
+ * least tp->reordering SACK blocks reside between ack_seq
+ * and received_upto is not easy task to do cheaply with
+ * the available datastructures.
+ *
+ * Whether FACK should check here for tp->reordering segs
+ * in-between one could argue for either way (it would be
+ * rather simple to implement as we could count fack_count
+ * during the walk and do tp->fackets_out - fack_count).
+ */
+ if (after(received_upto, ack_seq)) {
+ TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS;
+ tp->retrans_out -= tcp_skb_pcount(skb);
+
+ tcp_skb_mark_lost_uncond_verify(tp, skb);
+ NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPLOSTRETRANSMIT);
+ } else {
+ if (before(ack_seq, new_low_seq))
+ new_low_seq = ack_seq;
+ cnt += tcp_skb_pcount(skb);
+ }
+ }
+
+ if (tp->retrans_out)
+ tp->lost_retrans_low = new_low_seq;
+}
+
+static int tcp_check_dsack(struct sock *sk, struct sk_buff *ack_skb,