cfg80211: Fix parsed country IE info for 5 GHz
[safe/jmp/linux-2.6] / net / dccp / proto.c
index 8c125ff..945b4d5 100644 (file)
@@ -40,16 +40,10 @@ DEFINE_SNMP_STAT(struct dccp_mib, dccp_statistics) __read_mostly;
 
 EXPORT_SYMBOL_GPL(dccp_statistics);
 
-atomic_t dccp_orphan_count = ATOMIC_INIT(0);
-
+struct percpu_counter dccp_orphan_count;
 EXPORT_SYMBOL_GPL(dccp_orphan_count);
 
-struct inet_hashinfo __cacheline_aligned dccp_hashinfo = {
-       .lhash_lock     = RW_LOCK_UNLOCKED,
-       .lhash_users    = ATOMIC_INIT(0),
-       .lhash_wait = __WAIT_QUEUE_HEAD_INITIALIZER(dccp_hashinfo.lhash_wait),
-};
-
+struct inet_hashinfo dccp_hashinfo;
 EXPORT_SYMBOL_GPL(dccp_hashinfo);
 
 /* the maximum queue length for tx in packets. 0 is no limit */
@@ -180,6 +174,8 @@ int dccp_init_sock(struct sock *sk, const __u8 ctl_sock_initialized)
        struct dccp_sock *dp = dccp_sk(sk);
        struct inet_connection_sock *icsk = inet_csk(sk);
 
+       dccp_minisock_init(&dp->dccps_minisock);
+
        icsk->icsk_rto          = DCCP_TIMEOUT_INIT;
        icsk->icsk_syn_retries  = sysctl_dccp_request_retries;
        sk->sk_state            = DCCP_CLOSED;
@@ -189,6 +185,7 @@ int dccp_init_sock(struct sock *sk, const __u8 ctl_sock_initialized)
        dp->dccps_rate_last     = jiffies;
        dp->dccps_role          = DCCP_ROLE_UNDEFINED;
        dp->dccps_service       = DCCP_SERVICE_CODE_IS_ABSENT;
+       dp->dccps_l_ack_ratio   = dp->dccps_r_ack_ratio = 1;
 
        dccp_init_xmit_timers(sk);
 
@@ -735,7 +732,7 @@ int dccp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
                goto out_discard;
 
        skb_queue_tail(&sk->sk_write_queue, skb);
-       dccp_write_xmit(sk);
+       dccp_write_xmit(sk,0);
 out_release:
        release_sock(sk);
        return rc ? : len;
@@ -958,29 +955,15 @@ void dccp_close(struct sock *sk, long timeout)
                /* Check zero linger _after_ checking for unread data. */
                sk->sk_prot->disconnect(sk, 0);
        } else if (sk->sk_state != DCCP_CLOSED) {
-               /*
-                * Normal connection termination. May need to wait if there are
-                * still packets in the TX queue that are delayed by the CCID.
-                */
-               dccp_flush_write_queue(sk, &timeout);
                dccp_terminate_connection(sk);
        }
 
-       /*
-        * Flush write queue. This may be necessary in several cases:
-        * - we have been closed by the peer but still have application data;
-        * - abortive termination (unread data or zero linger time),
-        * - normal termination but queue could not be flushed within time limit
-        */
-       __skb_queue_purge(&sk->sk_write_queue);
-
        sk_stream_wait_close(sk, timeout);
 
 adjudge_to_death:
        state = sk->sk_state;
        sock_hold(sk);
        sock_orphan(sk);
-       atomic_inc(sk->sk_prot->orphan_count);
 
        /*
         * It is the last release_sock in its life. It will remove backlog.
@@ -994,6 +977,8 @@ adjudge_to_death:
        bh_lock_sock(sk);
        WARN_ON(sock_owned_by_user(sk));
 
+       percpu_counter_inc(sk->sk_prot->orphan_count);
+
        /* Have we already been destroyed by a softirq or backlog? */
        if (state != DCCP_CLOSED && sk->sk_state == DCCP_CLOSED)
                goto out;
@@ -1044,17 +1029,21 @@ static int __init dccp_init(void)
 {
        unsigned long goal;
        int ehash_order, bhash_order, i;
-       int rc = -ENOBUFS;
+       int rc;
 
        BUILD_BUG_ON(sizeof(struct dccp_skb_cb) >
                     FIELD_SIZEOF(struct sk_buff, cb));
-
+       rc = percpu_counter_init(&dccp_orphan_count, 0);
+       if (rc)
+               goto out;
+       rc = -ENOBUFS;
+       inet_hashinfo_init(&dccp_hashinfo);
        dccp_hashinfo.bind_bucket_cachep =
                kmem_cache_create("dccp_bind_bucket",
                                  sizeof(struct inet_bind_bucket), 0,
                                  SLAB_HWCACHE_ALIGN, NULL);
        if (!dccp_hashinfo.bind_bucket_cachep)
-               goto out;
+               goto out_free_percpu;
 
        /*
         * Size and allocate the main established and bind bucket
@@ -1088,8 +1077,8 @@ static int __init dccp_init(void)
        }
 
        for (i = 0; i < dccp_hashinfo.ehash_size; i++) {
-               INIT_HLIST_HEAD(&dccp_hashinfo.ehash[i].chain);
-               INIT_HLIST_HEAD(&dccp_hashinfo.ehash[i].twchain);
+               INIT_HLIST_NULLS_HEAD(&dccp_hashinfo.ehash[i].chain, i);
+               INIT_HLIST_NULLS_HEAD(&dccp_hashinfo.ehash[i].twchain, i);
        }
 
        if (inet_ehash_locks_alloc(&dccp_hashinfo))
@@ -1129,9 +1118,15 @@ static int __init dccp_init(void)
        if (rc)
                goto out_ackvec_exit;
 
+       rc = ccid_initialize_builtins();
+       if (rc)
+               goto out_sysctl_exit;
+
        dccp_timestamping_init();
 out:
        return rc;
+out_sysctl_exit:
+       dccp_sysctl_exit();
 out_ackvec_exit:
        dccp_ackvec_exit();
 out_free_dccp_mib:
@@ -1147,11 +1142,14 @@ out_free_dccp_ehash:
 out_free_bind_bucket_cachep:
        kmem_cache_destroy(dccp_hashinfo.bind_bucket_cachep);
        dccp_hashinfo.bind_bucket_cachep = NULL;
+out_free_percpu:
+       percpu_counter_destroy(&dccp_orphan_count);
        goto out;
 }
 
 static void __exit dccp_fini(void)
 {
+       ccid_cleanup_builtins();
        dccp_mib_exit();
        free_pages((unsigned long)dccp_hashinfo.bhash,
                   get_order(dccp_hashinfo.bhash_size *