netfilter: xtables: add struct xt_mtchk_param::net
[safe/jmp/linux-2.6] / net / ipv4 / inet_timewait_sock.c
1 /*
2  * INET         An implementation of the TCP/IP protocol suite for the LINUX
3  *              operating system.  INET is implemented using the  BSD Socket
4  *              interface as the means of communication with the user level.
5  *
6  *              Generic TIME_WAIT sockets functions
7  *
8  *              From code orinally in TCP
9  */
10
11 #include <linux/kernel.h>
12 #include <linux/kmemcheck.h>
13 #include <net/inet_hashtables.h>
14 #include <net/inet_timewait_sock.h>
15 #include <net/ip.h>
16
17
18 /**
19  *      inet_twsk_unhash - unhash a timewait socket from established hash
20  *      @tw: timewait socket
21  *
22  *      unhash a timewait socket from established hash, if hashed.
23  *      ehash lock must be held by caller.
24  *      Returns 1 if caller should call inet_twsk_put() after lock release.
25  */
26 int inet_twsk_unhash(struct inet_timewait_sock *tw)
27 {
28         if (hlist_nulls_unhashed(&tw->tw_node))
29                 return 0;
30
31         hlist_nulls_del_rcu(&tw->tw_node);
32         sk_nulls_node_init(&tw->tw_node);
33         /*
34          * We cannot call inet_twsk_put() ourself under lock,
35          * caller must call it for us.
36          */
37         return 1;
38 }
39
40 /**
41  *      inet_twsk_bind_unhash - unhash a timewait socket from bind hash
42  *      @tw: timewait socket
43  *      @hashinfo: hashinfo pointer
44  *
45  *      unhash a timewait socket from bind hash, if hashed.
46  *      bind hash lock must be held by caller.
47  *      Returns 1 if caller should call inet_twsk_put() after lock release.
48  */
49 int inet_twsk_bind_unhash(struct inet_timewait_sock *tw,
50                           struct inet_hashinfo *hashinfo)
51 {
52         struct inet_bind_bucket *tb = tw->tw_tb;
53
54         if (!tb)
55                 return 0;
56
57         __hlist_del(&tw->tw_bind_node);
58         tw->tw_tb = NULL;
59         inet_bind_bucket_destroy(hashinfo->bind_bucket_cachep, tb);
60         /*
61          * We cannot call inet_twsk_put() ourself under lock,
62          * caller must call it for us.
63          */
64         return 1;
65 }
66
67 /* Must be called with locally disabled BHs. */
68 static void __inet_twsk_kill(struct inet_timewait_sock *tw,
69                              struct inet_hashinfo *hashinfo)
70 {
71         struct inet_bind_hashbucket *bhead;
72         int refcnt;
73         /* Unlink from established hashes. */
74         spinlock_t *lock = inet_ehash_lockp(hashinfo, tw->tw_hash);
75
76         spin_lock(lock);
77         refcnt = inet_twsk_unhash(tw);
78         spin_unlock(lock);
79
80         /* Disassociate with bind bucket. */
81         bhead = &hashinfo->bhash[inet_bhashfn(twsk_net(tw), tw->tw_num,
82                         hashinfo->bhash_size)];
83
84         spin_lock(&bhead->lock);
85         refcnt += inet_twsk_bind_unhash(tw, hashinfo);
86         spin_unlock(&bhead->lock);
87
88 #ifdef SOCK_REFCNT_DEBUG
89         if (atomic_read(&tw->tw_refcnt) != 1) {
90                 printk(KERN_DEBUG "%s timewait_sock %p refcnt=%d\n",
91                        tw->tw_prot->name, tw, atomic_read(&tw->tw_refcnt));
92         }
93 #endif
94         while (refcnt) {
95                 inet_twsk_put(tw);
96                 refcnt--;
97         }
98 }
99
100 static noinline void inet_twsk_free(struct inet_timewait_sock *tw)
101 {
102         struct module *owner = tw->tw_prot->owner;
103         twsk_destructor((struct sock *)tw);
104 #ifdef SOCK_REFCNT_DEBUG
105         pr_debug("%s timewait_sock %p released\n", tw->tw_prot->name, tw);
106 #endif
107         release_net(twsk_net(tw));
108         kmem_cache_free(tw->tw_prot->twsk_prot->twsk_slab, tw);
109         module_put(owner);
110 }
111
112 void inet_twsk_put(struct inet_timewait_sock *tw)
113 {
114         if (atomic_dec_and_test(&tw->tw_refcnt))
115                 inet_twsk_free(tw);
116 }
117 EXPORT_SYMBOL_GPL(inet_twsk_put);
118
119 /*
120  * Enter the time wait state. This is called with locally disabled BH.
121  * Essentially we whip up a timewait bucket, copy the relevant info into it
122  * from the SK, and mess with hash chains and list linkage.
123  */
124 void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk,
125                            struct inet_hashinfo *hashinfo)
126 {
127         const struct inet_sock *inet = inet_sk(sk);
128         const struct inet_connection_sock *icsk = inet_csk(sk);
129         struct inet_ehash_bucket *ehead = inet_ehash_bucket(hashinfo, sk->sk_hash);
130         spinlock_t *lock = inet_ehash_lockp(hashinfo, sk->sk_hash);
131         struct inet_bind_hashbucket *bhead;
132         /* Step 1: Put TW into bind hash. Original socket stays there too.
133            Note, that any socket with inet->num != 0 MUST be bound in
134            binding cache, even if it is closed.
135          */
136         bhead = &hashinfo->bhash[inet_bhashfn(twsk_net(tw), inet->inet_num,
137                         hashinfo->bhash_size)];
138         spin_lock(&bhead->lock);
139         tw->tw_tb = icsk->icsk_bind_hash;
140         WARN_ON(!icsk->icsk_bind_hash);
141         inet_twsk_add_bind_node(tw, &tw->tw_tb->owners);
142         spin_unlock(&bhead->lock);
143
144         spin_lock(lock);
145
146         /*
147          * Step 2: Hash TW into TIMEWAIT chain.
148          * Should be done before removing sk from established chain
149          * because readers are lockless and search established first.
150          */
151         inet_twsk_add_node_rcu(tw, &ehead->twchain);
152
153         /* Step 3: Remove SK from established hash. */
154         if (__sk_nulls_del_node_init_rcu(sk))
155                 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
156
157         /*
158          * Notes :
159          * - We initially set tw_refcnt to 0 in inet_twsk_alloc()
160          * - We add one reference for the bhash link
161          * - We add one reference for the ehash link
162          * - We want this refcnt update done before allowing other
163          *   threads to find this tw in ehash chain.
164          */
165         atomic_add(1 + 1 + 1, &tw->tw_refcnt);
166
167         spin_unlock(lock);
168 }
169 EXPORT_SYMBOL_GPL(__inet_twsk_hashdance);
170
171 struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk, const int state)
172 {
173         struct inet_timewait_sock *tw =
174                 kmem_cache_alloc(sk->sk_prot_creator->twsk_prot->twsk_slab,
175                                  GFP_ATOMIC);
176         if (tw != NULL) {
177                 const struct inet_sock *inet = inet_sk(sk);
178
179                 kmemcheck_annotate_bitfield(tw, flags);
180
181                 /* Give us an identity. */
182                 tw->tw_daddr        = inet->inet_daddr;
183                 tw->tw_rcv_saddr    = inet->inet_rcv_saddr;
184                 tw->tw_bound_dev_if = sk->sk_bound_dev_if;
185                 tw->tw_num          = inet->inet_num;
186                 tw->tw_state        = TCP_TIME_WAIT;
187                 tw->tw_substate     = state;
188                 tw->tw_sport        = inet->inet_sport;
189                 tw->tw_dport        = inet->inet_dport;
190                 tw->tw_family       = sk->sk_family;
191                 tw->tw_reuse        = sk->sk_reuse;
192                 tw->tw_hash         = sk->sk_hash;
193                 tw->tw_ipv6only     = 0;
194                 tw->tw_transparent  = inet->transparent;
195                 tw->tw_prot         = sk->sk_prot_creator;
196                 twsk_net_set(tw, hold_net(sock_net(sk)));
197                 /*
198                  * Because we use RCU lookups, we should not set tw_refcnt
199                  * to a non null value before everything is setup for this
200                  * timewait socket.
201                  */
202                 atomic_set(&tw->tw_refcnt, 0);
203                 inet_twsk_dead_node_init(tw);
204                 __module_get(tw->tw_prot->owner);
205         }
206
207         return tw;
208 }
209 EXPORT_SYMBOL_GPL(inet_twsk_alloc);
210
211 /* Returns non-zero if quota exceeded.  */
212 static int inet_twdr_do_twkill_work(struct inet_timewait_death_row *twdr,
213                                     const int slot)
214 {
215         struct inet_timewait_sock *tw;
216         struct hlist_node *node;
217         unsigned int killed;
218         int ret;
219
220         /* NOTE: compare this to previous version where lock
221          * was released after detaching chain. It was racy,
222          * because tw buckets are scheduled in not serialized context
223          * in 2.3 (with netfilter), and with softnet it is common, because
224          * soft irqs are not sequenced.
225          */
226         killed = 0;
227         ret = 0;
228 rescan:
229         inet_twsk_for_each_inmate(tw, node, &twdr->cells[slot]) {
230                 __inet_twsk_del_dead_node(tw);
231                 spin_unlock(&twdr->death_lock);
232                 __inet_twsk_kill(tw, twdr->hashinfo);
233 #ifdef CONFIG_NET_NS
234                 NET_INC_STATS_BH(twsk_net(tw), LINUX_MIB_TIMEWAITED);
235 #endif
236                 inet_twsk_put(tw);
237                 killed++;
238                 spin_lock(&twdr->death_lock);
239                 if (killed > INET_TWDR_TWKILL_QUOTA) {
240                         ret = 1;
241                         break;
242                 }
243
244                 /* While we dropped twdr->death_lock, another cpu may have
245                  * killed off the next TW bucket in the list, therefore
246                  * do a fresh re-read of the hlist head node with the
247                  * lock reacquired.  We still use the hlist traversal
248                  * macro in order to get the prefetches.
249                  */
250                 goto rescan;
251         }
252
253         twdr->tw_count -= killed;
254 #ifndef CONFIG_NET_NS
255         NET_ADD_STATS_BH(&init_net, LINUX_MIB_TIMEWAITED, killed);
256 #endif
257         return ret;
258 }
259
260 void inet_twdr_hangman(unsigned long data)
261 {
262         struct inet_timewait_death_row *twdr;
263         int unsigned need_timer;
264
265         twdr = (struct inet_timewait_death_row *)data;
266         spin_lock(&twdr->death_lock);
267
268         if (twdr->tw_count == 0)
269                 goto out;
270
271         need_timer = 0;
272         if (inet_twdr_do_twkill_work(twdr, twdr->slot)) {
273                 twdr->thread_slots |= (1 << twdr->slot);
274                 schedule_work(&twdr->twkill_work);
275                 need_timer = 1;
276         } else {
277                 /* We purged the entire slot, anything left?  */
278                 if (twdr->tw_count)
279                         need_timer = 1;
280                 twdr->slot = ((twdr->slot + 1) & (INET_TWDR_TWKILL_SLOTS - 1));
281         }
282         if (need_timer)
283                 mod_timer(&twdr->tw_timer, jiffies + twdr->period);
284 out:
285         spin_unlock(&twdr->death_lock);
286 }
287 EXPORT_SYMBOL_GPL(inet_twdr_hangman);
288
289 void inet_twdr_twkill_work(struct work_struct *work)
290 {
291         struct inet_timewait_death_row *twdr =
292                 container_of(work, struct inet_timewait_death_row, twkill_work);
293         int i;
294
295         BUILD_BUG_ON((INET_TWDR_TWKILL_SLOTS - 1) >
296                         (sizeof(twdr->thread_slots) * 8));
297
298         while (twdr->thread_slots) {
299                 spin_lock_bh(&twdr->death_lock);
300                 for (i = 0; i < INET_TWDR_TWKILL_SLOTS; i++) {
301                         if (!(twdr->thread_slots & (1 << i)))
302                                 continue;
303
304                         while (inet_twdr_do_twkill_work(twdr, i) != 0) {
305                                 if (need_resched()) {
306                                         spin_unlock_bh(&twdr->death_lock);
307                                         schedule();
308                                         spin_lock_bh(&twdr->death_lock);
309                                 }
310                         }
311
312                         twdr->thread_slots &= ~(1 << i);
313                 }
314                 spin_unlock_bh(&twdr->death_lock);
315         }
316 }
317 EXPORT_SYMBOL_GPL(inet_twdr_twkill_work);
318
319 /* These are always called from BH context.  See callers in
320  * tcp_input.c to verify this.
321  */
322
323 /* This is for handling early-kills of TIME_WAIT sockets. */
324 void inet_twsk_deschedule(struct inet_timewait_sock *tw,
325                           struct inet_timewait_death_row *twdr)
326 {
327         spin_lock(&twdr->death_lock);
328         if (inet_twsk_del_dead_node(tw)) {
329                 inet_twsk_put(tw);
330                 if (--twdr->tw_count == 0)
331                         del_timer(&twdr->tw_timer);
332         }
333         spin_unlock(&twdr->death_lock);
334         __inet_twsk_kill(tw, twdr->hashinfo);
335 }
336 EXPORT_SYMBOL(inet_twsk_deschedule);
337
338 void inet_twsk_schedule(struct inet_timewait_sock *tw,
339                        struct inet_timewait_death_row *twdr,
340                        const int timeo, const int timewait_len)
341 {
342         struct hlist_head *list;
343         int slot;
344
345         /* timeout := RTO * 3.5
346          *
347          * 3.5 = 1+2+0.5 to wait for two retransmits.
348          *
349          * RATIONALE: if FIN arrived and we entered TIME-WAIT state,
350          * our ACK acking that FIN can be lost. If N subsequent retransmitted
351          * FINs (or previous seqments) are lost (probability of such event
352          * is p^(N+1), where p is probability to lose single packet and
353          * time to detect the loss is about RTO*(2^N - 1) with exponential
354          * backoff). Normal timewait length is calculated so, that we
355          * waited at least for one retransmitted FIN (maximal RTO is 120sec).
356          * [ BTW Linux. following BSD, violates this requirement waiting
357          *   only for 60sec, we should wait at least for 240 secs.
358          *   Well, 240 consumes too much of resources 8)
359          * ]
360          * This interval is not reduced to catch old duplicate and
361          * responces to our wandering segments living for two MSLs.
362          * However, if we use PAWS to detect
363          * old duplicates, we can reduce the interval to bounds required
364          * by RTO, rather than MSL. So, if peer understands PAWS, we
365          * kill tw bucket after 3.5*RTO (it is important that this number
366          * is greater than TS tick!) and detect old duplicates with help
367          * of PAWS.
368          */
369         slot = (timeo + (1 << INET_TWDR_RECYCLE_TICK) - 1) >> INET_TWDR_RECYCLE_TICK;
370
371         spin_lock(&twdr->death_lock);
372
373         /* Unlink it, if it was scheduled */
374         if (inet_twsk_del_dead_node(tw))
375                 twdr->tw_count--;
376         else
377                 atomic_inc(&tw->tw_refcnt);
378
379         if (slot >= INET_TWDR_RECYCLE_SLOTS) {
380                 /* Schedule to slow timer */
381                 if (timeo >= timewait_len) {
382                         slot = INET_TWDR_TWKILL_SLOTS - 1;
383                 } else {
384                         slot = DIV_ROUND_UP(timeo, twdr->period);
385                         if (slot >= INET_TWDR_TWKILL_SLOTS)
386                                 slot = INET_TWDR_TWKILL_SLOTS - 1;
387                 }
388                 tw->tw_ttd = jiffies + timeo;
389                 slot = (twdr->slot + slot) & (INET_TWDR_TWKILL_SLOTS - 1);
390                 list = &twdr->cells[slot];
391         } else {
392                 tw->tw_ttd = jiffies + (slot << INET_TWDR_RECYCLE_TICK);
393
394                 if (twdr->twcal_hand < 0) {
395                         twdr->twcal_hand = 0;
396                         twdr->twcal_jiffie = jiffies;
397                         twdr->twcal_timer.expires = twdr->twcal_jiffie +
398                                               (slot << INET_TWDR_RECYCLE_TICK);
399                         add_timer(&twdr->twcal_timer);
400                 } else {
401                         if (time_after(twdr->twcal_timer.expires,
402                                        jiffies + (slot << INET_TWDR_RECYCLE_TICK)))
403                                 mod_timer(&twdr->twcal_timer,
404                                           jiffies + (slot << INET_TWDR_RECYCLE_TICK));
405                         slot = (twdr->twcal_hand + slot) & (INET_TWDR_RECYCLE_SLOTS - 1);
406                 }
407                 list = &twdr->twcal_row[slot];
408         }
409
410         hlist_add_head(&tw->tw_death_node, list);
411
412         if (twdr->tw_count++ == 0)
413                 mod_timer(&twdr->tw_timer, jiffies + twdr->period);
414         spin_unlock(&twdr->death_lock);
415 }
416 EXPORT_SYMBOL_GPL(inet_twsk_schedule);
417
418 void inet_twdr_twcal_tick(unsigned long data)
419 {
420         struct inet_timewait_death_row *twdr;
421         int n, slot;
422         unsigned long j;
423         unsigned long now = jiffies;
424         int killed = 0;
425         int adv = 0;
426
427         twdr = (struct inet_timewait_death_row *)data;
428
429         spin_lock(&twdr->death_lock);
430         if (twdr->twcal_hand < 0)
431                 goto out;
432
433         slot = twdr->twcal_hand;
434         j = twdr->twcal_jiffie;
435
436         for (n = 0; n < INET_TWDR_RECYCLE_SLOTS; n++) {
437                 if (time_before_eq(j, now)) {
438                         struct hlist_node *node, *safe;
439                         struct inet_timewait_sock *tw;
440
441                         inet_twsk_for_each_inmate_safe(tw, node, safe,
442                                                        &twdr->twcal_row[slot]) {
443                                 __inet_twsk_del_dead_node(tw);
444                                 __inet_twsk_kill(tw, twdr->hashinfo);
445 #ifdef CONFIG_NET_NS
446                                 NET_INC_STATS_BH(twsk_net(tw), LINUX_MIB_TIMEWAITKILLED);
447 #endif
448                                 inet_twsk_put(tw);
449                                 killed++;
450                         }
451                 } else {
452                         if (!adv) {
453                                 adv = 1;
454                                 twdr->twcal_jiffie = j;
455                                 twdr->twcal_hand = slot;
456                         }
457
458                         if (!hlist_empty(&twdr->twcal_row[slot])) {
459                                 mod_timer(&twdr->twcal_timer, j);
460                                 goto out;
461                         }
462                 }
463                 j += 1 << INET_TWDR_RECYCLE_TICK;
464                 slot = (slot + 1) & (INET_TWDR_RECYCLE_SLOTS - 1);
465         }
466         twdr->twcal_hand = -1;
467
468 out:
469         if ((twdr->tw_count -= killed) == 0)
470                 del_timer(&twdr->tw_timer);
471 #ifndef CONFIG_NET_NS
472         NET_ADD_STATS_BH(&init_net, LINUX_MIB_TIMEWAITKILLED, killed);
473 #endif
474         spin_unlock(&twdr->death_lock);
475 }
476 EXPORT_SYMBOL_GPL(inet_twdr_twcal_tick);
477
478 void inet_twsk_purge(struct inet_hashinfo *hashinfo,
479                      struct inet_timewait_death_row *twdr, int family)
480 {
481         struct inet_timewait_sock *tw;
482         struct sock *sk;
483         struct hlist_nulls_node *node;
484         unsigned int slot;
485
486         for (slot = 0; slot <= hashinfo->ehash_mask; slot++) {
487                 struct inet_ehash_bucket *head = &hashinfo->ehash[slot];
488 restart_rcu:
489                 rcu_read_lock();
490 restart:
491                 sk_nulls_for_each_rcu(sk, node, &head->twchain) {
492                         tw = inet_twsk(sk);
493                         if ((tw->tw_family != family) ||
494                                 atomic_read(&twsk_net(tw)->count))
495                                 continue;
496
497                         if (unlikely(!atomic_inc_not_zero(&tw->tw_refcnt)))
498                                 continue;
499
500                         if (unlikely((tw->tw_family != family) ||
501                                      atomic_read(&twsk_net(tw)->count))) {
502                                 inet_twsk_put(tw);
503                                 goto restart;
504                         }
505
506                         rcu_read_unlock();
507                         inet_twsk_deschedule(tw, twdr);
508                         inet_twsk_put(tw);
509                         goto restart_rcu;
510                 }
511                 /* If the nulls value we got at the end of this lookup is
512                  * not the expected one, we must restart lookup.
513                  * We probably met an item that was moved to another chain.
514                  */
515                 if (get_nulls_value(node) != slot)
516                         goto restart;
517                 rcu_read_unlock();
518         }
519 }
520 EXPORT_SYMBOL_GPL(inet_twsk_purge);