[TIPC] License header update
[safe/jmp/linux-2.6] / net / tipc / link.c
1 /*
2  * net/tipc/link.c: TIPC link code
3  * 
4  * Copyright (c) 2003-2005, Ericsson Research Canada
5  * Copyright (c) 2004-2005, Wind River Systems
6  * Copyright (c) 2005-2006, Ericsson AB
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions are met:
11  *
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. Neither the names of the copyright holders nor the names of its
18  *    contributors may be used to endorse or promote products derived from
19  *    this software without specific prior written permission.
20  *
21  * Alternatively, this software may be distributed under the terms of the
22  * GNU General Public License ("GPL") version 2 as published by the Free
23  * Software Foundation.
24  *
25  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
26  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
29  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35  * POSSIBILITY OF SUCH DAMAGE.
36  */
37
38 #include "core.h"
39 #include "dbg.h"
40 #include "link.h"
41 #include "net.h"
42 #include "node.h"
43 #include "port.h"
44 #include "addr.h"
45 #include "node_subscr.h"
46 #include "name_distr.h"
47 #include "bearer.h"
48 #include "name_table.h"
49 #include "discover.h"
50 #include "config.h"
51 #include "bcast.h"
52
53
54 /* 
55  * Limit for deferred reception queue: 
56  */
57
58 #define DEF_QUEUE_LIMIT 256u
59
60 /* 
61  * Link state events: 
62  */
63
64 #define  STARTING_EVT    856384768      /* link processing trigger */
65 #define  TRAFFIC_MSG_EVT 560815u        /* rx'd ??? */
66 #define  TIMEOUT_EVT     560817u        /* link timer expired */
67
68 /*   
69  * The following two 'message types' is really just implementation 
70  * data conveniently stored in the message header. 
71  * They must not be considered part of the protocol
72  */
73 #define OPEN_MSG   0
74 #define CLOSED_MSG 1
75
76 /* 
77  * State value stored in 'exp_msg_count'
78  */
79
80 #define START_CHANGEOVER 100000u
81
82 /**
83  * struct link_name - deconstructed link name
84  * @addr_local: network address of node at this end
85  * @if_local: name of interface at this end
86  * @addr_peer: network address of node at far end
87  * @if_peer: name of interface at far end
88  */
89
90 struct link_name {
91         u32 addr_local;
92         char if_local[TIPC_MAX_IF_NAME];
93         u32 addr_peer;
94         char if_peer[TIPC_MAX_IF_NAME];
95 };
96
97 #if 0
98
99 /* LINK EVENT CODE IS NOT SUPPORTED AT PRESENT */
100
101 /** 
102  * struct link_event - link up/down event notification
103  */
104
105 struct link_event {
106         u32 addr;
107         int up;
108         void (*fcn)(u32, char *, int);
109         char name[TIPC_MAX_LINK_NAME];
110 };
111
112 #endif
113
114 static void link_handle_out_of_seq_msg(struct link *l_ptr,
115                                        struct sk_buff *buf);
116 static void link_recv_proto_msg(struct link *l_ptr, struct sk_buff *buf);
117 static int  link_recv_changeover_msg(struct link **l_ptr, struct sk_buff **buf);
118 static void link_set_supervision_props(struct link *l_ptr, u32 tolerance);
119 static int  link_send_sections_long(struct port *sender,
120                                     struct iovec const *msg_sect,
121                                     u32 num_sect, u32 destnode);
122 static void link_check_defragm_bufs(struct link *l_ptr);
123 static void link_state_event(struct link *l_ptr, u32 event);
124 static void link_reset_statistics(struct link *l_ptr);
125 static void link_print(struct link *l_ptr, struct print_buf *buf, 
126                        const char *str);
127
128 /*
129  * Debugging code used by link routines only
130  *
131  * When debugging link problems on a system that has multiple links,
132  * the standard TIPC debugging routines may not be useful since they
133  * allow the output from multiple links to be intermixed.  For this reason
134  * routines of the form "dbg_link_XXX()" have been created that will capture
135  * debug info into a link's personal print buffer, which can then be dumped
136  * into the TIPC system log (LOG) upon request.
137  *
138  * To enable per-link debugging, use LINK_LOG_BUF_SIZE to specify the size
139  * of the print buffer used by each link.  If LINK_LOG_BUF_SIZE is set to 0,
140  * the dbg_link_XXX() routines simply send their output to the standard 
141  * debug print buffer (DBG_OUTPUT), if it has been defined; this can be useful
142  * when there is only a single link in the system being debugged.
143  *
144  * Notes:
145  * - When enabled, LINK_LOG_BUF_SIZE should be set to at least 1000 (bytes)
146  * - "l_ptr" must be valid when using dbg_link_XXX() macros  
147  */
148
149 #define LINK_LOG_BUF_SIZE 0
150
151 #define dbg_link(fmt, arg...)  do {if (LINK_LOG_BUF_SIZE) tipc_printf(&l_ptr->print_buf, fmt, ## arg); } while(0)
152 #define dbg_link_msg(msg, txt) do {if (LINK_LOG_BUF_SIZE) msg_print(&l_ptr->print_buf, msg, txt); } while(0)
153 #define dbg_link_state(txt) do {if (LINK_LOG_BUF_SIZE) link_print(l_ptr, &l_ptr->print_buf, txt); } while(0)
154 #define dbg_link_dump() do { \
155         if (LINK_LOG_BUF_SIZE) { \
156                 tipc_printf(LOG, "\n\nDumping link <%s>:\n", l_ptr->name); \
157                 printbuf_move(LOG, &l_ptr->print_buf); \
158         } \
159 } while (0)
160
161 static inline void dbg_print_link(struct link *l_ptr, const char *str)
162 {
163         if (DBG_OUTPUT)
164                 link_print(l_ptr, DBG_OUTPUT, str);
165 }
166
167 static inline void dbg_print_buf_chain(struct sk_buff *root_buf)
168 {
169         if (DBG_OUTPUT) {
170                 struct sk_buff *buf = root_buf;
171
172                 while (buf) {
173                         msg_dbg(buf_msg(buf), "In chain: ");
174                         buf = buf->next;
175                 }
176         }
177 }
178
179 /*
180  *  Simple inlined link routines
181  */
182
183 static inline unsigned int align(unsigned int i)
184 {
185         return (i + 3) & ~3u;
186 }
187
188 static inline int link_working_working(struct link *l_ptr)
189 {
190         return (l_ptr->state == WORKING_WORKING);
191 }
192
193 static inline int link_working_unknown(struct link *l_ptr)
194 {
195         return (l_ptr->state == WORKING_UNKNOWN);
196 }
197
198 static inline int link_reset_unknown(struct link *l_ptr)
199 {
200         return (l_ptr->state == RESET_UNKNOWN);
201 }
202
203 static inline int link_reset_reset(struct link *l_ptr)
204 {
205         return (l_ptr->state == RESET_RESET);
206 }
207
208 static inline int link_blocked(struct link *l_ptr)
209 {
210         return (l_ptr->exp_msg_count || l_ptr->blocked);
211 }
212
213 static inline int link_congested(struct link *l_ptr)
214 {
215         return (l_ptr->out_queue_size >= l_ptr->queue_limit[0]);
216 }
217
218 static inline u32 link_max_pkt(struct link *l_ptr)
219 {
220         return l_ptr->max_pkt;
221 }
222
223 static inline void link_init_max_pkt(struct link *l_ptr)
224 {
225         u32 max_pkt;
226         
227         max_pkt = (l_ptr->b_ptr->publ.mtu & ~3);
228         if (max_pkt > MAX_MSG_SIZE)
229                 max_pkt = MAX_MSG_SIZE;
230
231         l_ptr->max_pkt_target = max_pkt;
232         if (l_ptr->max_pkt_target < MAX_PKT_DEFAULT)
233                 l_ptr->max_pkt = l_ptr->max_pkt_target;
234         else 
235                 l_ptr->max_pkt = MAX_PKT_DEFAULT;
236
237         l_ptr->max_pkt_probes = 0;
238 }
239
240 static inline u32 link_next_sent(struct link *l_ptr)
241 {
242         if (l_ptr->next_out)
243                 return msg_seqno(buf_msg(l_ptr->next_out));
244         return mod(l_ptr->next_out_no);
245 }
246
247 static inline u32 link_last_sent(struct link *l_ptr)
248 {
249         return mod(link_next_sent(l_ptr) - 1);
250 }
251
252 /*
253  *  Simple non-inlined link routines (i.e. referenced outside this file)
254  */
255
256 int link_is_up(struct link *l_ptr)
257 {
258         if (!l_ptr)
259                 return 0;
260         return (link_working_working(l_ptr) || link_working_unknown(l_ptr));
261 }
262
263 int link_is_active(struct link *l_ptr)
264 {
265         return ((l_ptr->owner->active_links[0] == l_ptr) ||
266                 (l_ptr->owner->active_links[1] == l_ptr));
267 }
268
269 /**
270  * link_name_validate - validate & (optionally) deconstruct link name
271  * @name - ptr to link name string
272  * @name_parts - ptr to area for link name components (or NULL if not needed)
273  * 
274  * Returns 1 if link name is valid, otherwise 0.
275  */
276
277 static int link_name_validate(const char *name, struct link_name *name_parts)
278 {
279         char name_copy[TIPC_MAX_LINK_NAME];
280         char *addr_local;
281         char *if_local;
282         char *addr_peer;
283         char *if_peer;
284         char dummy;
285         u32 z_local, c_local, n_local;
286         u32 z_peer, c_peer, n_peer;
287         u32 if_local_len;
288         u32 if_peer_len;
289
290         /* copy link name & ensure length is OK */
291
292         name_copy[TIPC_MAX_LINK_NAME - 1] = 0;
293         /* need above in case non-Posix strncpy() doesn't pad with nulls */
294         strncpy(name_copy, name, TIPC_MAX_LINK_NAME);
295         if (name_copy[TIPC_MAX_LINK_NAME - 1] != 0)
296                 return 0;
297
298         /* ensure all component parts of link name are present */
299
300         addr_local = name_copy;
301         if ((if_local = strchr(addr_local, ':')) == NULL)
302                 return 0;
303         *(if_local++) = 0;
304         if ((addr_peer = strchr(if_local, '-')) == NULL)
305                 return 0;
306         *(addr_peer++) = 0;
307         if_local_len = addr_peer - if_local;
308         if ((if_peer = strchr(addr_peer, ':')) == NULL)
309                 return 0;
310         *(if_peer++) = 0;
311         if_peer_len = strlen(if_peer) + 1;
312
313         /* validate component parts of link name */
314
315         if ((sscanf(addr_local, "%u.%u.%u%c",
316                     &z_local, &c_local, &n_local, &dummy) != 3) ||
317             (sscanf(addr_peer, "%u.%u.%u%c",
318                     &z_peer, &c_peer, &n_peer, &dummy) != 3) ||
319             (z_local > 255) || (c_local > 4095) || (n_local > 4095) ||
320             (z_peer  > 255) || (c_peer  > 4095) || (n_peer  > 4095) ||
321             (if_local_len <= 1) || (if_local_len > TIPC_MAX_IF_NAME) || 
322             (if_peer_len  <= 1) || (if_peer_len  > TIPC_MAX_IF_NAME) || 
323             (strspn(if_local, tipc_alphabet) != (if_local_len - 1)) ||
324             (strspn(if_peer, tipc_alphabet) != (if_peer_len - 1)))
325                 return 0;
326
327         /* return link name components, if necessary */
328
329         if (name_parts) {
330                 name_parts->addr_local = tipc_addr(z_local, c_local, n_local);
331                 strcpy(name_parts->if_local, if_local);
332                 name_parts->addr_peer = tipc_addr(z_peer, c_peer, n_peer);
333                 strcpy(name_parts->if_peer, if_peer);
334         }
335         return 1;
336 }
337
338 /**
339  * link_timeout - handle expiration of link timer
340  * @l_ptr: pointer to link
341  * 
342  * This routine must not grab "net_lock" to avoid a potential deadlock conflict
343  * with link_delete().  (There is no risk that the node will be deleted by
344  * another thread because link_delete() always cancels the link timer before
345  * node_delete() is called.)
346  */
347
348 static void link_timeout(struct link *l_ptr)
349 {
350         node_lock(l_ptr->owner);
351
352         /* update counters used in statistical profiling of send traffic */
353
354         l_ptr->stats.accu_queue_sz += l_ptr->out_queue_size;
355         l_ptr->stats.queue_sz_counts++;
356
357         if (l_ptr->out_queue_size > l_ptr->stats.max_queue_sz)
358                 l_ptr->stats.max_queue_sz = l_ptr->out_queue_size;
359
360         if (l_ptr->first_out) {
361                 struct tipc_msg *msg = buf_msg(l_ptr->first_out);
362                 u32 length = msg_size(msg);
363
364                 if ((msg_user(msg) == MSG_FRAGMENTER)
365                     && (msg_type(msg) == FIRST_FRAGMENT)) {
366                         length = msg_size(msg_get_wrapped(msg));
367                 }
368                 if (length) {
369                         l_ptr->stats.msg_lengths_total += length;
370                         l_ptr->stats.msg_length_counts++;
371                         if (length <= 64)
372                                 l_ptr->stats.msg_length_profile[0]++;
373                         else if (length <= 256)
374                                 l_ptr->stats.msg_length_profile[1]++;
375                         else if (length <= 1024)
376                                 l_ptr->stats.msg_length_profile[2]++;
377                         else if (length <= 4096)
378                                 l_ptr->stats.msg_length_profile[3]++;
379                         else if (length <= 16384)
380                                 l_ptr->stats.msg_length_profile[4]++;
381                         else if (length <= 32768)
382                                 l_ptr->stats.msg_length_profile[5]++;
383                         else
384                                 l_ptr->stats.msg_length_profile[6]++;
385                 }
386         }
387
388         /* do all other link processing performed on a periodic basis */
389
390         link_check_defragm_bufs(l_ptr);
391
392         link_state_event(l_ptr, TIMEOUT_EVT);
393
394         if (l_ptr->next_out)
395                 link_push_queue(l_ptr);
396
397         node_unlock(l_ptr->owner);
398 }
399
400 static inline void link_set_timer(struct link *l_ptr, u32 time)
401 {
402         k_start_timer(&l_ptr->timer, time);
403 }
404
405 /**
406  * link_create - create a new link
407  * @b_ptr: pointer to associated bearer
408  * @peer: network address of node at other end of link
409  * @media_addr: media address to use when sending messages over link
410  * 
411  * Returns pointer to link.
412  */
413
414 struct link *link_create(struct bearer *b_ptr, const u32 peer,
415                          const struct tipc_media_addr *media_addr)
416 {
417         struct link *l_ptr;
418         struct tipc_msg *msg;
419         char *if_name;
420
421         l_ptr = (struct link *)kmalloc(sizeof(*l_ptr), GFP_ATOMIC);
422         if (!l_ptr) {
423                 warn("Memory squeeze; Failed to create link\n");
424                 return NULL;
425         }
426         memset(l_ptr, 0, sizeof(*l_ptr));
427
428         l_ptr->addr = peer;
429         if_name = strchr(b_ptr->publ.name, ':') + 1;
430         sprintf(l_ptr->name, "%u.%u.%u:%s-%u.%u.%u:",
431                 tipc_zone(tipc_own_addr), tipc_cluster(tipc_own_addr),
432                 tipc_node(tipc_own_addr), 
433                 if_name,
434                 tipc_zone(peer), tipc_cluster(peer), tipc_node(peer));
435                 /* note: peer i/f is appended to link name by reset/activate */
436         memcpy(&l_ptr->media_addr, media_addr, sizeof(*media_addr));
437         k_init_timer(&l_ptr->timer, (Handler)link_timeout, (unsigned long)l_ptr);
438         list_add_tail(&l_ptr->link_list, &b_ptr->links);
439         l_ptr->checkpoint = 1;
440         l_ptr->b_ptr = b_ptr;
441         link_set_supervision_props(l_ptr, b_ptr->media->tolerance);
442         l_ptr->state = RESET_UNKNOWN;
443
444         l_ptr->pmsg = (struct tipc_msg *)&l_ptr->proto_msg;
445         msg = l_ptr->pmsg;
446         msg_init(msg, LINK_PROTOCOL, RESET_MSG, TIPC_OK, INT_H_SIZE, l_ptr->addr);
447         msg_set_size(msg, sizeof(l_ptr->proto_msg));
448         msg_set_session(msg, tipc_random);
449         msg_set_bearer_id(msg, b_ptr->identity);
450         strcpy((char *)msg_data(msg), if_name);
451
452         l_ptr->priority = b_ptr->priority;
453         link_set_queue_limits(l_ptr, b_ptr->media->window);
454
455         link_init_max_pkt(l_ptr);
456
457         l_ptr->next_out_no = 1;
458         INIT_LIST_HEAD(&l_ptr->waiting_ports);
459
460         link_reset_statistics(l_ptr);
461
462         l_ptr->owner = node_attach_link(l_ptr);
463         if (!l_ptr->owner) {
464                 kfree(l_ptr);
465                 return NULL;
466         }
467
468         if (LINK_LOG_BUF_SIZE) {
469                 char *pb = kmalloc(LINK_LOG_BUF_SIZE, GFP_ATOMIC);
470
471                 if (!pb) {
472                         kfree(l_ptr);
473                         warn("Memory squeeze; Failed to create link\n");
474                         return NULL;
475                 }
476                 printbuf_init(&l_ptr->print_buf, pb, LINK_LOG_BUF_SIZE);
477         }
478
479         k_signal((Handler)link_start, (unsigned long)l_ptr);
480
481         dbg("link_create(): tolerance = %u,cont intv = %u, abort_limit = %u\n",
482             l_ptr->tolerance, l_ptr->continuity_interval, l_ptr->abort_limit);
483         
484         return l_ptr;
485 }
486
487 /** 
488  * link_delete - delete a link
489  * @l_ptr: pointer to link
490  * 
491  * Note: 'net_lock' is write_locked, bearer is locked.
492  * This routine must not grab the node lock until after link timer cancellation
493  * to avoid a potential deadlock situation.  
494  */
495
496 void link_delete(struct link *l_ptr)
497 {
498         if (!l_ptr) {
499                 err("Attempt to delete non-existent link\n");
500                 return;
501         }
502
503         dbg("link_delete()\n");
504
505         k_cancel_timer(&l_ptr->timer);
506         
507         node_lock(l_ptr->owner);
508         link_reset(l_ptr);
509         node_detach_link(l_ptr->owner, l_ptr);
510         link_stop(l_ptr);
511         list_del_init(&l_ptr->link_list);
512         if (LINK_LOG_BUF_SIZE)
513                 kfree(l_ptr->print_buf.buf);
514         node_unlock(l_ptr->owner);
515         k_term_timer(&l_ptr->timer);
516         kfree(l_ptr);
517 }
518
519 void link_start(struct link *l_ptr)
520 {
521         dbg("link_start %x\n", l_ptr);
522         link_state_event(l_ptr, STARTING_EVT);
523 }
524
525 /**
526  * link_schedule_port - schedule port for deferred sending 
527  * @l_ptr: pointer to link
528  * @origport: reference to sending port
529  * @sz: amount of data to be sent
530  * 
531  * Schedules port for renewed sending of messages after link congestion 
532  * has abated.
533  */
534
535 static int link_schedule_port(struct link *l_ptr, u32 origport, u32 sz)
536 {
537         struct port *p_ptr;
538
539         spin_lock_bh(&port_list_lock);
540         p_ptr = port_lock(origport);
541         if (p_ptr) {
542                 if (!p_ptr->wakeup)
543                         goto exit;
544                 if (!list_empty(&p_ptr->wait_list))
545                         goto exit;
546                 p_ptr->congested_link = l_ptr;
547                 p_ptr->publ.congested = 1;
548                 p_ptr->waiting_pkts = 1 + ((sz - 1) / link_max_pkt(l_ptr));
549                 list_add_tail(&p_ptr->wait_list, &l_ptr->waiting_ports);
550                 l_ptr->stats.link_congs++;
551 exit:
552                 port_unlock(p_ptr);
553         }
554         spin_unlock_bh(&port_list_lock);
555         return -ELINKCONG;
556 }
557
558 void link_wakeup_ports(struct link *l_ptr, int all)
559 {
560         struct port *p_ptr;
561         struct port *temp_p_ptr;
562         int win = l_ptr->queue_limit[0] - l_ptr->out_queue_size;
563
564         if (all)
565                 win = 100000;
566         if (win <= 0)
567                 return;
568         if (!spin_trylock_bh(&port_list_lock))
569                 return;
570         if (link_congested(l_ptr))
571                 goto exit;
572         list_for_each_entry_safe(p_ptr, temp_p_ptr, &l_ptr->waiting_ports, 
573                                  wait_list) {
574                 if (win <= 0)
575                         break;
576                 list_del_init(&p_ptr->wait_list);
577                 p_ptr->congested_link = 0;
578                 assert(p_ptr->wakeup);
579                 spin_lock_bh(p_ptr->publ.lock);
580                 p_ptr->publ.congested = 0;
581                 p_ptr->wakeup(&p_ptr->publ);
582                 win -= p_ptr->waiting_pkts;
583                 spin_unlock_bh(p_ptr->publ.lock);
584         }
585
586 exit:
587         spin_unlock_bh(&port_list_lock);
588 }
589
590 /** 
591  * link_release_outqueue - purge link's outbound message queue
592  * @l_ptr: pointer to link
593  */
594
595 static void link_release_outqueue(struct link *l_ptr)
596 {
597         struct sk_buff *buf = l_ptr->first_out;
598         struct sk_buff *next;
599
600         while (buf) {
601                 next = buf->next;
602                 buf_discard(buf);
603                 buf = next;
604         }
605         l_ptr->first_out = NULL;
606         l_ptr->out_queue_size = 0;
607 }
608
609 /**
610  * link_reset_fragments - purge link's inbound message fragments queue
611  * @l_ptr: pointer to link
612  */
613
614 void link_reset_fragments(struct link *l_ptr)
615 {
616         struct sk_buff *buf = l_ptr->defragm_buf;
617         struct sk_buff *next;
618
619         while (buf) {
620                 next = buf->next;
621                 buf_discard(buf);
622                 buf = next;
623         }
624         l_ptr->defragm_buf = NULL;
625 }
626
627 /** 
628  * link_stop - purge all inbound and outbound messages associated with link
629  * @l_ptr: pointer to link
630  */
631
632 void link_stop(struct link *l_ptr)
633 {
634         struct sk_buff *buf;
635         struct sk_buff *next;
636
637         buf = l_ptr->oldest_deferred_in;
638         while (buf) {
639                 next = buf->next;
640                 buf_discard(buf);
641                 buf = next;
642         }
643
644         buf = l_ptr->first_out;
645         while (buf) {
646                 next = buf->next;
647                 buf_discard(buf);
648                 buf = next;
649         }
650
651         link_reset_fragments(l_ptr);
652
653         buf_discard(l_ptr->proto_msg_queue);
654         l_ptr->proto_msg_queue = NULL;
655 }
656
657 #if 0
658
659 /* LINK EVENT CODE IS NOT SUPPORTED AT PRESENT */
660
661 static void link_recv_event(struct link_event *ev)
662 {
663         ev->fcn(ev->addr, ev->name, ev->up);
664         kfree(ev);
665 }
666
667 static void link_send_event(void (*fcn)(u32 a, char *n, int up),
668                             struct link *l_ptr, int up)
669 {
670         struct link_event *ev;
671         
672         ev = kmalloc(sizeof(*ev), GFP_ATOMIC);
673         if (!ev) {
674                 warn("Link event allocation failure\n");
675                 return;
676         }
677         ev->addr = l_ptr->addr;
678         ev->up = up;
679         ev->fcn = fcn;
680         memcpy(ev->name, l_ptr->name, TIPC_MAX_LINK_NAME);
681         k_signal((Handler)link_recv_event, (unsigned long)ev);
682 }
683
684 #else
685
686 #define link_send_event(fcn, l_ptr, up) do { } while (0)
687
688 #endif
689
690 void link_reset(struct link *l_ptr)
691 {
692         struct sk_buff *buf;
693         u32 prev_state = l_ptr->state;
694         u32 checkpoint = l_ptr->next_in_no;
695         
696         msg_set_session(l_ptr->pmsg, msg_session(l_ptr->pmsg) + 1);
697
698         /* Link is down, accept any session: */
699         l_ptr->peer_session = 0;
700
701         /* Prepare for max packet size negotiation */
702         link_init_max_pkt(l_ptr);
703         
704         l_ptr->state = RESET_UNKNOWN;
705         dbg_link_state("Resetting Link\n");
706
707         if ((prev_state == RESET_UNKNOWN) || (prev_state == RESET_RESET))
708                 return;
709
710         node_link_down(l_ptr->owner, l_ptr);
711         bearer_remove_dest(l_ptr->b_ptr, l_ptr->addr);
712 #if 0
713         tipc_printf(CONS, "\nReset link <%s>\n", l_ptr->name);
714         dbg_link_dump();
715 #endif
716         if (node_has_active_links(l_ptr->owner) &&
717             l_ptr->owner->permit_changeover) {
718                 l_ptr->reset_checkpoint = checkpoint;
719                 l_ptr->exp_msg_count = START_CHANGEOVER;
720         }
721
722         /* Clean up all queues: */
723
724         link_release_outqueue(l_ptr);
725         buf_discard(l_ptr->proto_msg_queue);
726         l_ptr->proto_msg_queue = NULL;
727         buf = l_ptr->oldest_deferred_in;
728         while (buf) {
729                 struct sk_buff *next = buf->next;
730                 buf_discard(buf);
731                 buf = next;
732         }
733         if (!list_empty(&l_ptr->waiting_ports))
734                 link_wakeup_ports(l_ptr, 1);
735
736         l_ptr->retransm_queue_head = 0;
737         l_ptr->retransm_queue_size = 0;
738         l_ptr->last_out = NULL;
739         l_ptr->first_out = NULL;
740         l_ptr->next_out = NULL;
741         l_ptr->unacked_window = 0;
742         l_ptr->checkpoint = 1;
743         l_ptr->next_out_no = 1;
744         l_ptr->deferred_inqueue_sz = 0;
745         l_ptr->oldest_deferred_in = NULL;
746         l_ptr->newest_deferred_in = NULL;
747         l_ptr->fsm_msg_cnt = 0;
748         l_ptr->stale_count = 0;
749         link_reset_statistics(l_ptr);
750
751         link_send_event(cfg_link_event, l_ptr, 0);
752         if (!in_own_cluster(l_ptr->addr))
753                 link_send_event(disc_link_event, l_ptr, 0);
754 }
755
756
757 static void link_activate(struct link *l_ptr)
758 {
759         l_ptr->next_in_no = 1;
760         node_link_up(l_ptr->owner, l_ptr);
761         bearer_add_dest(l_ptr->b_ptr, l_ptr->addr);
762         link_send_event(cfg_link_event, l_ptr, 1);
763         if (!in_own_cluster(l_ptr->addr))
764                 link_send_event(disc_link_event, l_ptr, 1);
765 }
766
767 /**
768  * link_state_event - link finite state machine
769  * @l_ptr: pointer to link
770  * @event: state machine event to process
771  */
772
773 static void link_state_event(struct link *l_ptr, unsigned event)
774 {
775         struct link *other; 
776         u32 cont_intv = l_ptr->continuity_interval;
777
778         if (!l_ptr->started && (event != STARTING_EVT))
779                 return;         /* Not yet. */
780
781         if (link_blocked(l_ptr)) {
782                 if (event == TIMEOUT_EVT) {
783                         link_set_timer(l_ptr, cont_intv);
784                 }
785                 return;   /* Changeover going on */
786         }
787         dbg_link("STATE_EV: <%s> ", l_ptr->name);
788
789         switch (l_ptr->state) {
790         case WORKING_WORKING:
791                 dbg_link("WW/");
792                 switch (event) {
793                 case TRAFFIC_MSG_EVT:
794                         dbg_link("TRF-");
795                         /* fall through */
796                 case ACTIVATE_MSG:
797                         dbg_link("ACT\n");
798                         break;
799                 case TIMEOUT_EVT:
800                         dbg_link("TIM ");
801                         if (l_ptr->next_in_no != l_ptr->checkpoint) {
802                                 l_ptr->checkpoint = l_ptr->next_in_no;
803                                 if (bclink_acks_missing(l_ptr->owner)) {
804                                         link_send_proto_msg(l_ptr, STATE_MSG, 
805                                                             0, 0, 0, 0, 0);
806                                         l_ptr->fsm_msg_cnt++;
807                                 } else if (l_ptr->max_pkt < l_ptr->max_pkt_target) {
808                                         link_send_proto_msg(l_ptr, STATE_MSG, 
809                                                             1, 0, 0, 0, 0);
810                                         l_ptr->fsm_msg_cnt++;
811                                 }
812                                 link_set_timer(l_ptr, cont_intv);
813                                 break;
814                         }
815                         dbg_link(" -> WU\n");
816                         l_ptr->state = WORKING_UNKNOWN;
817                         l_ptr->fsm_msg_cnt = 0;
818                         link_send_proto_msg(l_ptr, STATE_MSG, 1, 0, 0, 0, 0);
819                         l_ptr->fsm_msg_cnt++;
820                         link_set_timer(l_ptr, cont_intv / 4);
821                         break;
822                 case RESET_MSG:
823                         dbg_link("RES -> RR\n");
824                         link_reset(l_ptr);
825                         l_ptr->state = RESET_RESET;
826                         l_ptr->fsm_msg_cnt = 0;
827                         link_send_proto_msg(l_ptr, ACTIVATE_MSG, 0, 0, 0, 0, 0);
828                         l_ptr->fsm_msg_cnt++;
829                         link_set_timer(l_ptr, cont_intv);
830                         break;
831                 default:
832                         err("Unknown link event %u in WW state\n", event);
833                 }
834                 break;
835         case WORKING_UNKNOWN:
836                 dbg_link("WU/");
837                 switch (event) {
838                 case TRAFFIC_MSG_EVT:
839                         dbg_link("TRF-");
840                 case ACTIVATE_MSG:
841                         dbg_link("ACT -> WW\n");
842                         l_ptr->state = WORKING_WORKING;
843                         l_ptr->fsm_msg_cnt = 0;
844                         link_set_timer(l_ptr, cont_intv);
845                         break;
846                 case RESET_MSG:
847                         dbg_link("RES -> RR\n");
848                         link_reset(l_ptr);
849                         l_ptr->state = RESET_RESET;
850                         l_ptr->fsm_msg_cnt = 0;
851                         link_send_proto_msg(l_ptr, ACTIVATE_MSG, 0, 0, 0, 0, 0);
852                         l_ptr->fsm_msg_cnt++;
853                         link_set_timer(l_ptr, cont_intv);
854                         break;
855                 case TIMEOUT_EVT:
856                         dbg_link("TIM ");
857                         if (l_ptr->next_in_no != l_ptr->checkpoint) {
858                                 dbg_link("-> WW \n");
859                                 l_ptr->state = WORKING_WORKING;
860                                 l_ptr->fsm_msg_cnt = 0;
861                                 l_ptr->checkpoint = l_ptr->next_in_no;
862                                 if (bclink_acks_missing(l_ptr->owner)) {
863                                         link_send_proto_msg(l_ptr, STATE_MSG,
864                                                             0, 0, 0, 0, 0);
865                                         l_ptr->fsm_msg_cnt++;
866                                 }
867                                 link_set_timer(l_ptr, cont_intv);
868                         } else if (l_ptr->fsm_msg_cnt < l_ptr->abort_limit) {
869                                 dbg_link("Probing %u/%u,timer = %u ms)\n",
870                                          l_ptr->fsm_msg_cnt, l_ptr->abort_limit,
871                                          cont_intv / 4);
872                                 link_send_proto_msg(l_ptr, STATE_MSG, 
873                                                     1, 0, 0, 0, 0);
874                                 l_ptr->fsm_msg_cnt++;
875                                 link_set_timer(l_ptr, cont_intv / 4);
876                         } else {        /* Link has failed */
877                                 dbg_link("-> RU (%u probes unanswered)\n",
878                                          l_ptr->fsm_msg_cnt);
879                                 link_reset(l_ptr);
880                                 l_ptr->state = RESET_UNKNOWN;
881                                 l_ptr->fsm_msg_cnt = 0;
882                                 link_send_proto_msg(l_ptr, RESET_MSG,
883                                                     0, 0, 0, 0, 0);
884                                 l_ptr->fsm_msg_cnt++;
885                                 link_set_timer(l_ptr, cont_intv);
886                         }
887                         break;
888                 default:
889                         err("Unknown link event %u in WU state\n", event);
890                 }
891                 break;
892         case RESET_UNKNOWN:
893                 dbg_link("RU/");
894                 switch (event) {
895                 case TRAFFIC_MSG_EVT:
896                         dbg_link("TRF-\n");
897                         break;
898                 case ACTIVATE_MSG:
899                         other = l_ptr->owner->active_links[0];
900                         if (other && link_working_unknown(other)) {
901                                 dbg_link("ACT\n");
902                                 break;
903                         }
904                         dbg_link("ACT -> WW\n");
905                         l_ptr->state = WORKING_WORKING;
906                         l_ptr->fsm_msg_cnt = 0;
907                         link_activate(l_ptr);
908                         link_send_proto_msg(l_ptr, STATE_MSG, 1, 0, 0, 0, 0);
909                         l_ptr->fsm_msg_cnt++;
910                         link_set_timer(l_ptr, cont_intv);
911                         break;
912                 case RESET_MSG:
913                         dbg_link("RES \n");
914                         dbg_link(" -> RR\n");
915                         l_ptr->state = RESET_RESET;
916                         l_ptr->fsm_msg_cnt = 0;
917                         link_send_proto_msg(l_ptr, ACTIVATE_MSG, 1, 0, 0, 0, 0);
918                         l_ptr->fsm_msg_cnt++;
919                         link_set_timer(l_ptr, cont_intv);
920                         break;
921                 case STARTING_EVT:
922                         dbg_link("START-");
923                         l_ptr->started = 1;
924                         /* fall through */
925                 case TIMEOUT_EVT:
926                         dbg_link("TIM \n");
927                         link_send_proto_msg(l_ptr, RESET_MSG, 0, 0, 0, 0, 0);
928                         l_ptr->fsm_msg_cnt++;
929                         link_set_timer(l_ptr, cont_intv);
930                         break;
931                 default:
932                         err("Unknown link event %u in RU state\n", event);
933                 }
934                 break;
935         case RESET_RESET:
936                 dbg_link("RR/ ");
937                 switch (event) {
938                 case TRAFFIC_MSG_EVT:
939                         dbg_link("TRF-");
940                         /* fall through */
941                 case ACTIVATE_MSG:
942                         other = l_ptr->owner->active_links[0];
943                         if (other && link_working_unknown(other)) {
944                                 dbg_link("ACT\n");
945                                 break;
946                         }
947                         dbg_link("ACT -> WW\n");
948                         l_ptr->state = WORKING_WORKING;
949                         l_ptr->fsm_msg_cnt = 0;
950                         link_activate(l_ptr);
951                         link_send_proto_msg(l_ptr, STATE_MSG, 1, 0, 0, 0, 0);
952                         l_ptr->fsm_msg_cnt++;
953                         link_set_timer(l_ptr, cont_intv);
954                         break;
955                 case RESET_MSG:
956                         dbg_link("RES\n");
957                         break;
958                 case TIMEOUT_EVT:
959                         dbg_link("TIM\n");
960                         link_send_proto_msg(l_ptr, ACTIVATE_MSG, 0, 0, 0, 0, 0);
961                         l_ptr->fsm_msg_cnt++;
962                         link_set_timer(l_ptr, cont_intv);
963                         dbg_link("fsm_msg_cnt %u\n", l_ptr->fsm_msg_cnt);
964                         break;
965                 default:
966                         err("Unknown link event %u in RR state\n", event);
967                 }
968                 break;
969         default:
970                 err("Unknown link state %u/%u\n", l_ptr->state, event);
971         }
972 }
973
974 /*
975  * link_bundle_buf(): Append contents of a buffer to
976  * the tail of an existing one. 
977  */
978
979 static int link_bundle_buf(struct link *l_ptr,
980                            struct sk_buff *bundler, 
981                            struct sk_buff *buf)
982 {
983         struct tipc_msg *bundler_msg = buf_msg(bundler);
984         struct tipc_msg *msg = buf_msg(buf);
985         u32 size = msg_size(msg);
986         u32 to_pos = align(msg_size(bundler_msg));
987         u32 rest = link_max_pkt(l_ptr) - to_pos;
988
989         if (msg_user(bundler_msg) != MSG_BUNDLER)
990                 return 0;
991         if (msg_type(bundler_msg) != OPEN_MSG)
992                 return 0;
993         if (rest < align(size))
994                 return 0;
995
996         skb_put(bundler, (to_pos - msg_size(bundler_msg)) + size);
997         memcpy(bundler->data + to_pos, buf->data, size);
998         msg_set_size(bundler_msg, to_pos + size);
999         msg_set_msgcnt(bundler_msg, msg_msgcnt(bundler_msg) + 1);
1000         dbg("Packed msg # %u(%u octets) into pos %u in buf(#%u)\n",
1001             msg_msgcnt(bundler_msg), size, to_pos, msg_seqno(bundler_msg));
1002         msg_dbg(msg, "PACKD:");
1003         buf_discard(buf);
1004         l_ptr->stats.sent_bundled++;
1005         return 1;
1006 }
1007
1008 static inline void link_add_to_outqueue(struct link *l_ptr, 
1009                                         struct sk_buff *buf, 
1010                                         struct tipc_msg *msg)
1011 {
1012         u32 ack = mod(l_ptr->next_in_no - 1);
1013         u32 seqno = mod(l_ptr->next_out_no++);
1014
1015         msg_set_word(msg, 2, ((ack << 16) | seqno));
1016         msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
1017         buf->next = NULL;
1018         if (l_ptr->first_out) {
1019                 l_ptr->last_out->next = buf;
1020                 l_ptr->last_out = buf;
1021         } else
1022                 l_ptr->first_out = l_ptr->last_out = buf;
1023         l_ptr->out_queue_size++;
1024 }
1025
1026 /* 
1027  * link_send_buf() is the 'full path' for messages, called from 
1028  * inside TIPC when the 'fast path' in tipc_send_buf
1029  * has failed, and from link_send()
1030  */
1031
1032 int link_send_buf(struct link *l_ptr, struct sk_buff *buf)
1033 {
1034         struct tipc_msg *msg = buf_msg(buf);
1035         u32 size = msg_size(msg);
1036         u32 dsz = msg_data_sz(msg);
1037         u32 queue_size = l_ptr->out_queue_size;
1038         u32 imp = msg_tot_importance(msg);
1039         u32 queue_limit = l_ptr->queue_limit[imp];
1040         u32 max_packet = link_max_pkt(l_ptr);
1041
1042         msg_set_prevnode(msg, tipc_own_addr);   /* If routed message */
1043
1044         /* Match msg importance against queue limits: */
1045
1046         if (unlikely(queue_size >= queue_limit)) {
1047                 if (imp <= TIPC_CRITICAL_IMPORTANCE) {
1048                         return link_schedule_port(l_ptr, msg_origport(msg),
1049                                                   size);
1050                 }
1051                 msg_dbg(msg, "TIPC: Congestion, throwing away\n");
1052                 buf_discard(buf);
1053                 if (imp > CONN_MANAGER) {
1054                         warn("Resetting <%s>, send queue full", l_ptr->name);
1055                         link_reset(l_ptr);
1056                 }
1057                 return dsz;
1058         }
1059
1060         /* Fragmentation needed ? */
1061
1062         if (size > max_packet)
1063                 return link_send_long_buf(l_ptr, buf);
1064
1065         /* Packet can be queued or sent: */
1066
1067         if (queue_size > l_ptr->stats.max_queue_sz)
1068                 l_ptr->stats.max_queue_sz = queue_size;
1069
1070         if (likely(!bearer_congested(l_ptr->b_ptr, l_ptr) && 
1071                    !link_congested(l_ptr))) {
1072                 link_add_to_outqueue(l_ptr, buf, msg);
1073
1074                 if (likely(bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr))) {
1075                         l_ptr->unacked_window = 0;
1076                 } else {
1077                         bearer_schedule(l_ptr->b_ptr, l_ptr);
1078                         l_ptr->stats.bearer_congs++;
1079                         l_ptr->next_out = buf;
1080                 }
1081                 return dsz;
1082         }
1083         /* Congestion: can message be bundled ?: */
1084
1085         if ((msg_user(msg) != CHANGEOVER_PROTOCOL) &&
1086             (msg_user(msg) != MSG_FRAGMENTER)) {
1087
1088                 /* Try adding message to an existing bundle */
1089
1090                 if (l_ptr->next_out && 
1091                     link_bundle_buf(l_ptr, l_ptr->last_out, buf)) {
1092                         bearer_resolve_congestion(l_ptr->b_ptr, l_ptr);
1093                         return dsz;
1094                 }
1095
1096                 /* Try creating a new bundle */
1097
1098                 if (size <= max_packet * 2 / 3) {
1099                         struct sk_buff *bundler = buf_acquire(max_packet);
1100                         struct tipc_msg bundler_hdr;
1101
1102                         if (bundler) {
1103                                 msg_init(&bundler_hdr, MSG_BUNDLER, OPEN_MSG,
1104                                          TIPC_OK, INT_H_SIZE, l_ptr->addr);
1105                                 memcpy(bundler->data, (unchar *)&bundler_hdr, 
1106                                        INT_H_SIZE);
1107                                 skb_trim(bundler, INT_H_SIZE);
1108                                 link_bundle_buf(l_ptr, bundler, buf);
1109                                 buf = bundler;
1110                                 msg = buf_msg(buf);
1111                                 l_ptr->stats.sent_bundles++;
1112                         }
1113                 }
1114         }
1115         if (!l_ptr->next_out)
1116                 l_ptr->next_out = buf;
1117         link_add_to_outqueue(l_ptr, buf, msg);
1118         bearer_resolve_congestion(l_ptr->b_ptr, l_ptr);
1119         return dsz;
1120 }
1121
1122 /* 
1123  * link_send(): same as link_send_buf(), but the link to use has 
1124  * not been selected yet, and the the owner node is not locked
1125  * Called by TIPC internal users, e.g. the name distributor
1126  */
1127
1128 int link_send(struct sk_buff *buf, u32 dest, u32 selector)
1129 {
1130         struct link *l_ptr;
1131         struct node *n_ptr;
1132         int res = -ELINKCONG;
1133
1134         read_lock_bh(&net_lock);
1135         n_ptr = node_select(dest, selector);
1136         if (n_ptr) {
1137                 node_lock(n_ptr);
1138                 l_ptr = n_ptr->active_links[selector & 1];
1139                 dbg("link_send: found link %x for dest %x\n", l_ptr, dest);
1140                 if (l_ptr) {
1141                         res = link_send_buf(l_ptr, buf);
1142                 }
1143                 node_unlock(n_ptr);
1144         } else {
1145                 dbg("Attempt to send msg to unknown node:\n");
1146                 msg_dbg(buf_msg(buf),">>>");
1147                 buf_discard(buf);
1148         }
1149         read_unlock_bh(&net_lock);
1150         return res;
1151 }
1152
1153 /* 
1154  * link_send_buf_fast: Entry for data messages where the 
1155  * destination link is known and the header is complete,
1156  * inclusive total message length. Very time critical.
1157  * Link is locked. Returns user data length.
1158  */
1159
1160 static inline int link_send_buf_fast(struct link *l_ptr, struct sk_buff *buf,
1161                                      u32 *used_max_pkt)
1162 {
1163         struct tipc_msg *msg = buf_msg(buf);
1164         int res = msg_data_sz(msg);
1165
1166         if (likely(!link_congested(l_ptr))) {
1167                 if (likely(msg_size(msg) <= link_max_pkt(l_ptr))) {
1168                         if (likely(list_empty(&l_ptr->b_ptr->cong_links))) {
1169                                 link_add_to_outqueue(l_ptr, buf, msg);
1170                                 if (likely(bearer_send(l_ptr->b_ptr, buf,
1171                                                        &l_ptr->media_addr))) {
1172                                         l_ptr->unacked_window = 0;
1173                                         msg_dbg(msg,"SENT_FAST:");
1174                                         return res;
1175                                 }
1176                                 dbg("failed sent fast...\n");
1177                                 bearer_schedule(l_ptr->b_ptr, l_ptr);
1178                                 l_ptr->stats.bearer_congs++;
1179                                 l_ptr->next_out = buf;
1180                                 return res;
1181                         }
1182                 }
1183                 else
1184                         *used_max_pkt = link_max_pkt(l_ptr);
1185         }
1186         return link_send_buf(l_ptr, buf);  /* All other cases */
1187 }
1188
1189 /* 
1190  * tipc_send_buf_fast: Entry for data messages where the 
1191  * destination node is known and the header is complete,
1192  * inclusive total message length.
1193  * Returns user data length.
1194  */
1195 int tipc_send_buf_fast(struct sk_buff *buf, u32 destnode)
1196 {
1197         struct link *l_ptr;
1198         struct node *n_ptr;
1199         int res;
1200         u32 selector = msg_origport(buf_msg(buf)) & 1;
1201         u32 dummy;
1202
1203         if (destnode == tipc_own_addr)
1204                 return port_recv_msg(buf);
1205
1206         read_lock_bh(&net_lock);
1207         n_ptr = node_select(destnode, selector);
1208         if (likely(n_ptr)) {
1209                 node_lock(n_ptr);
1210                 l_ptr = n_ptr->active_links[selector];
1211                 dbg("send_fast: buf %x selected %x, destnode = %x\n",
1212                     buf, l_ptr, destnode);
1213                 if (likely(l_ptr)) {
1214                         res = link_send_buf_fast(l_ptr, buf, &dummy);
1215                         node_unlock(n_ptr);
1216                         read_unlock_bh(&net_lock);
1217                         return res;
1218                 }
1219                 node_unlock(n_ptr);
1220         }
1221         read_unlock_bh(&net_lock);
1222         res = msg_data_sz(buf_msg(buf));
1223         tipc_reject_msg(buf, TIPC_ERR_NO_NODE);
1224         return res;
1225 }
1226
1227
1228 /* 
1229  * link_send_sections_fast: Entry for messages where the 
1230  * destination processor is known and the header is complete,
1231  * except for total message length. 
1232  * Returns user data length or errno.
1233  */
1234 int link_send_sections_fast(struct port *sender, 
1235                             struct iovec const *msg_sect,
1236                             const u32 num_sect, 
1237                             u32 destaddr)
1238 {
1239         struct tipc_msg *hdr = &sender->publ.phdr;
1240         struct link *l_ptr;
1241         struct sk_buff *buf;
1242         struct node *node;
1243         int res;
1244         u32 selector = msg_origport(hdr) & 1;
1245
1246         assert(destaddr != tipc_own_addr);
1247
1248 again:
1249         /*
1250          * Try building message using port's max_pkt hint.
1251          * (Must not hold any locks while building message.)
1252          */
1253
1254         res = msg_build(hdr, msg_sect, num_sect, sender->max_pkt,
1255                         !sender->user_port, &buf);
1256
1257         read_lock_bh(&net_lock);
1258         node = node_select(destaddr, selector);
1259         if (likely(node)) {
1260                 node_lock(node);
1261                 l_ptr = node->active_links[selector];
1262                 if (likely(l_ptr)) {
1263                         if (likely(buf)) {
1264                                 res = link_send_buf_fast(l_ptr, buf,
1265                                                          &sender->max_pkt);
1266                                 if (unlikely(res < 0))
1267                                         buf_discard(buf);
1268 exit:
1269                                 node_unlock(node);
1270                                 read_unlock_bh(&net_lock);
1271                                 return res;
1272                         }
1273
1274                         /* Exit if build request was invalid */
1275
1276                         if (unlikely(res < 0))
1277                                 goto exit;
1278
1279                         /* Exit if link (or bearer) is congested */
1280
1281                         if (link_congested(l_ptr) || 
1282                             !list_empty(&l_ptr->b_ptr->cong_links)) {
1283                                 res = link_schedule_port(l_ptr,
1284                                                          sender->publ.ref, res);
1285                                 goto exit;
1286                         }
1287
1288                         /* 
1289                          * Message size exceeds max_pkt hint; update hint,
1290                          * then re-try fast path or fragment the message
1291                          */
1292
1293                         sender->max_pkt = link_max_pkt(l_ptr);
1294                         node_unlock(node);
1295                         read_unlock_bh(&net_lock);
1296
1297
1298                         if ((msg_hdr_sz(hdr) + res) <= sender->max_pkt)
1299                                 goto again;
1300
1301                         return link_send_sections_long(sender, msg_sect,
1302                                                        num_sect, destaddr);
1303                 }
1304                 node_unlock(node);
1305         }
1306         read_unlock_bh(&net_lock);
1307
1308         /* Couldn't find a link to the destination node */
1309
1310         if (buf)
1311                 return tipc_reject_msg(buf, TIPC_ERR_NO_NODE);
1312         if (res >= 0)
1313                 return port_reject_sections(sender, hdr, msg_sect, num_sect,
1314                                             TIPC_ERR_NO_NODE);
1315         return res;
1316 }
1317
1318 /* 
1319  * link_send_sections_long(): Entry for long messages where the 
1320  * destination node is known and the header is complete,
1321  * inclusive total message length. 
1322  * Link and bearer congestion status have been checked to be ok,
1323  * and are ignored if they change.
1324  *
1325  * Note that fragments do not use the full link MTU so that they won't have
1326  * to undergo refragmentation if link changeover causes them to be sent
1327  * over another link with an additional tunnel header added as prefix.
1328  * (Refragmentation will still occur if the other link has a smaller MTU.)
1329  *
1330  * Returns user data length or errno.
1331  */
1332 static int link_send_sections_long(struct port *sender,
1333                                    struct iovec const *msg_sect,
1334                                    u32 num_sect,
1335                                    u32 destaddr)
1336 {
1337         struct link *l_ptr;
1338         struct node *node;
1339         struct tipc_msg *hdr = &sender->publ.phdr;
1340         u32 dsz = msg_data_sz(hdr);
1341         u32 max_pkt,fragm_sz,rest;
1342         struct tipc_msg fragm_hdr;
1343         struct sk_buff *buf,*buf_chain,*prev;
1344         u32 fragm_crs,fragm_rest,hsz,sect_rest;
1345         const unchar *sect_crs;
1346         int curr_sect;
1347         u32 fragm_no;
1348
1349 again:
1350         fragm_no = 1;
1351         max_pkt = sender->max_pkt - INT_H_SIZE;  
1352                 /* leave room for tunnel header in case of link changeover */
1353         fragm_sz = max_pkt - INT_H_SIZE; 
1354                 /* leave room for fragmentation header in each fragment */
1355         rest = dsz;
1356         fragm_crs = 0;
1357         fragm_rest = 0;
1358         sect_rest = 0;
1359         sect_crs = 0;
1360         curr_sect = -1;
1361
1362         /* Prepare reusable fragment header: */
1363
1364         msg_dbg(hdr, ">FRAGMENTING>");
1365         msg_init(&fragm_hdr, MSG_FRAGMENTER, FIRST_FRAGMENT,
1366                  TIPC_OK, INT_H_SIZE, msg_destnode(hdr));
1367         msg_set_link_selector(&fragm_hdr, sender->publ.ref);
1368         msg_set_size(&fragm_hdr, max_pkt);
1369         msg_set_fragm_no(&fragm_hdr, 1);
1370
1371         /* Prepare header of first fragment: */
1372
1373         buf_chain = buf = buf_acquire(max_pkt);
1374         if (!buf)
1375                 return -ENOMEM;
1376         buf->next = NULL;
1377         memcpy(buf->data, (unchar *)&fragm_hdr, INT_H_SIZE);
1378         hsz = msg_hdr_sz(hdr);
1379         memcpy(buf->data + INT_H_SIZE, (unchar *)hdr, hsz);
1380         msg_dbg(buf_msg(buf), ">BUILD>");
1381
1382         /* Chop up message: */
1383
1384         fragm_crs = INT_H_SIZE + hsz;
1385         fragm_rest = fragm_sz - hsz;
1386
1387         do {            /* For all sections */
1388                 u32 sz;
1389
1390                 if (!sect_rest) {
1391                         sect_rest = msg_sect[++curr_sect].iov_len;
1392                         sect_crs = (const unchar *)msg_sect[curr_sect].iov_base;
1393                 }
1394
1395                 if (sect_rest < fragm_rest)
1396                         sz = sect_rest;
1397                 else
1398                         sz = fragm_rest;
1399
1400                 if (likely(!sender->user_port)) {
1401                         if (copy_from_user(buf->data + fragm_crs, sect_crs, sz)) {
1402 error:
1403                                 for (; buf_chain; buf_chain = buf) {
1404                                         buf = buf_chain->next;
1405                                         buf_discard(buf_chain);
1406                                 }
1407                                 return -EFAULT;
1408                         }
1409                 } else
1410                         memcpy(buf->data + fragm_crs, sect_crs, sz);
1411
1412                 sect_crs += sz;
1413                 sect_rest -= sz;
1414                 fragm_crs += sz;
1415                 fragm_rest -= sz;
1416                 rest -= sz;
1417
1418                 if (!fragm_rest && rest) {
1419
1420                         /* Initiate new fragment: */
1421                         if (rest <= fragm_sz) {
1422                                 fragm_sz = rest;
1423                                 msg_set_type(&fragm_hdr,LAST_FRAGMENT);
1424                         } else {
1425                                 msg_set_type(&fragm_hdr, FRAGMENT);
1426                         }
1427                         msg_set_size(&fragm_hdr, fragm_sz + INT_H_SIZE);
1428                         msg_set_fragm_no(&fragm_hdr, ++fragm_no);
1429                         prev = buf;
1430                         buf = buf_acquire(fragm_sz + INT_H_SIZE);
1431                         if (!buf)
1432                                 goto error;
1433
1434                         buf->next = NULL;                                
1435                         prev->next = buf;
1436                         memcpy(buf->data, (unchar *)&fragm_hdr, INT_H_SIZE);
1437                         fragm_crs = INT_H_SIZE;
1438                         fragm_rest = fragm_sz;
1439                         msg_dbg(buf_msg(buf),"  >BUILD>");
1440                 }
1441         }
1442         while (rest > 0);
1443
1444         /* 
1445          * Now we have a buffer chain. Select a link and check
1446          * that packet size is still OK
1447          */
1448         node = node_select(destaddr, sender->publ.ref & 1);
1449         if (likely(node)) {
1450                 node_lock(node);
1451                 l_ptr = node->active_links[sender->publ.ref & 1];
1452                 if (!l_ptr) {
1453                         node_unlock(node);
1454                         goto reject;
1455                 }
1456                 if (link_max_pkt(l_ptr) < max_pkt) {
1457                         sender->max_pkt = link_max_pkt(l_ptr);
1458                         node_unlock(node);
1459                         for (; buf_chain; buf_chain = buf) {
1460                                 buf = buf_chain->next;
1461                                 buf_discard(buf_chain);
1462                         }
1463                         goto again;
1464                 }
1465         } else {
1466 reject:
1467                 for (; buf_chain; buf_chain = buf) {
1468                         buf = buf_chain->next;
1469                         buf_discard(buf_chain);
1470                 }
1471                 return port_reject_sections(sender, hdr, msg_sect, num_sect,
1472                                             TIPC_ERR_NO_NODE);
1473         }
1474
1475         /* Append whole chain to send queue: */
1476
1477         buf = buf_chain;
1478         l_ptr->long_msg_seq_no = mod(l_ptr->long_msg_seq_no + 1);
1479         if (!l_ptr->next_out)
1480                 l_ptr->next_out = buf_chain;
1481         l_ptr->stats.sent_fragmented++;
1482         while (buf) {
1483                 struct sk_buff *next = buf->next;
1484                 struct tipc_msg *msg = buf_msg(buf);
1485
1486                 l_ptr->stats.sent_fragments++;
1487                 msg_set_long_msgno(msg, l_ptr->long_msg_seq_no);
1488                 link_add_to_outqueue(l_ptr, buf, msg);
1489                 msg_dbg(msg, ">ADD>");
1490                 buf = next;
1491         }
1492
1493         /* Send it, if possible: */
1494
1495         link_push_queue(l_ptr);
1496         node_unlock(node);
1497         return dsz;
1498 }
1499
1500 /* 
1501  * link_push_packet: Push one unsent packet to the media
1502  */
1503 u32 link_push_packet(struct link *l_ptr)
1504 {
1505         struct sk_buff *buf = l_ptr->first_out;
1506         u32 r_q_size = l_ptr->retransm_queue_size;
1507         u32 r_q_head = l_ptr->retransm_queue_head;
1508
1509         /* Step to position where retransmission failed, if any,    */
1510         /* consider that buffers may have been released in meantime */
1511
1512         if (r_q_size && buf) {
1513                 u32 last = lesser(mod(r_q_head + r_q_size), 
1514                                   link_last_sent(l_ptr));
1515                 u32 first = msg_seqno(buf_msg(buf));
1516
1517                 while (buf && less(first, r_q_head)) {
1518                         first = mod(first + 1);
1519                         buf = buf->next;
1520                 }
1521                 l_ptr->retransm_queue_head = r_q_head = first;
1522                 l_ptr->retransm_queue_size = r_q_size = mod(last - first);
1523         }
1524
1525         /* Continue retransmission now, if there is anything: */
1526
1527         if (r_q_size && buf && !skb_cloned(buf)) {
1528                 msg_set_ack(buf_msg(buf), mod(l_ptr->next_in_no - 1));
1529                 msg_set_bcast_ack(buf_msg(buf), l_ptr->owner->bclink.last_in); 
1530                 if (bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) {
1531                         msg_dbg(buf_msg(buf), ">DEF-RETR>");
1532                         l_ptr->retransm_queue_head = mod(++r_q_head);
1533                         l_ptr->retransm_queue_size = --r_q_size;
1534                         l_ptr->stats.retransmitted++;
1535                         return TIPC_OK;
1536                 } else {
1537                         l_ptr->stats.bearer_congs++;
1538                         msg_dbg(buf_msg(buf), "|>DEF-RETR>");
1539                         return PUSH_FAILED;
1540                 }
1541         }
1542
1543         /* Send deferred protocol message, if any: */
1544
1545         buf = l_ptr->proto_msg_queue;
1546         if (buf) {
1547                 msg_set_ack(buf_msg(buf), mod(l_ptr->next_in_no - 1));
1548                 msg_set_bcast_ack(buf_msg(buf),l_ptr->owner->bclink.last_in); 
1549                 if (bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) {
1550                         msg_dbg(buf_msg(buf), ">DEF-PROT>");
1551                         l_ptr->unacked_window = 0;
1552                         buf_discard(buf);
1553                         l_ptr->proto_msg_queue = 0;
1554                         return TIPC_OK;
1555                 } else {
1556                         msg_dbg(buf_msg(buf), "|>DEF-PROT>");
1557                         l_ptr->stats.bearer_congs++;
1558                         return PUSH_FAILED;
1559                 }
1560         }
1561
1562         /* Send one deferred data message, if send window not full: */
1563
1564         buf = l_ptr->next_out;
1565         if (buf) {
1566                 struct tipc_msg *msg = buf_msg(buf);
1567                 u32 next = msg_seqno(msg);
1568                 u32 first = msg_seqno(buf_msg(l_ptr->first_out));
1569
1570                 if (mod(next - first) < l_ptr->queue_limit[0]) {
1571                         msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
1572                         msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in); 
1573                         if (bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) {
1574                                 if (msg_user(msg) == MSG_BUNDLER)
1575                                         msg_set_type(msg, CLOSED_MSG);
1576                                 msg_dbg(msg, ">PUSH-DATA>");
1577                                 l_ptr->next_out = buf->next;
1578                                 return TIPC_OK;
1579                         } else {
1580                                 msg_dbg(msg, "|PUSH-DATA|");
1581                                 l_ptr->stats.bearer_congs++;
1582                                 return PUSH_FAILED;
1583                         }
1584                 }
1585         }
1586         return PUSH_FINISHED;
1587 }
1588
1589 /*
1590  * push_queue(): push out the unsent messages of a link where
1591  *               congestion has abated. Node is locked
1592  */
1593 void link_push_queue(struct link *l_ptr)
1594 {
1595         u32 res;
1596
1597         if (bearer_congested(l_ptr->b_ptr, l_ptr))
1598                 return;
1599
1600         do {
1601                 res = link_push_packet(l_ptr);
1602         }
1603         while (res == TIPC_OK);
1604         if (res == PUSH_FAILED)
1605                 bearer_schedule(l_ptr->b_ptr, l_ptr);
1606 }
1607
1608 void link_retransmit(struct link *l_ptr, struct sk_buff *buf, 
1609                      u32 retransmits)
1610 {
1611         struct tipc_msg *msg;
1612
1613         dbg("Retransmitting %u in link %x\n", retransmits, l_ptr);
1614
1615         if (bearer_congested(l_ptr->b_ptr, l_ptr) && buf && !skb_cloned(buf)) {
1616                 msg_dbg(buf_msg(buf), ">NO_RETR->BCONG>");
1617                 dbg_print_link(l_ptr, "   ");
1618                 l_ptr->retransm_queue_head = msg_seqno(buf_msg(buf));
1619                 l_ptr->retransm_queue_size = retransmits;
1620                 return;
1621         }
1622         while (retransmits && (buf != l_ptr->next_out) && buf && !skb_cloned(buf)) {
1623                 msg = buf_msg(buf);
1624                 msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
1625                 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in); 
1626                 if (bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) {
1627                         /* Catch if retransmissions fail repeatedly: */
1628                         if (l_ptr->last_retransmitted == msg_seqno(msg)) {
1629                                 if (++l_ptr->stale_count > 100) {
1630                                         msg_print(CONS, buf_msg(buf), ">RETR>");
1631                                         info("...Retransmitted %u times\n",
1632                                              l_ptr->stale_count);
1633                                         link_print(l_ptr, CONS, "Resetting Link\n");;
1634                                         link_reset(l_ptr);
1635                                         break;
1636                                 }
1637                         } else {
1638                                 l_ptr->stale_count = 0;
1639                         }
1640                         l_ptr->last_retransmitted = msg_seqno(msg);
1641
1642                         msg_dbg(buf_msg(buf), ">RETR>");
1643                         buf = buf->next;
1644                         retransmits--;
1645                         l_ptr->stats.retransmitted++;
1646                 } else {
1647                         bearer_schedule(l_ptr->b_ptr, l_ptr);
1648                         l_ptr->stats.bearer_congs++;
1649                         l_ptr->retransm_queue_head = msg_seqno(buf_msg(buf));
1650                         l_ptr->retransm_queue_size = retransmits;
1651                         return;
1652                 }
1653         }
1654         l_ptr->retransm_queue_head = l_ptr->retransm_queue_size = 0;
1655 }
1656
1657 /* 
1658  * link_recv_non_seq: Receive packets which are outside
1659  *                    the link sequence flow
1660  */
1661
1662 static void link_recv_non_seq(struct sk_buff *buf)
1663 {
1664         struct tipc_msg *msg = buf_msg(buf);
1665
1666         if (msg_user(msg) ==  LINK_CONFIG)
1667                 disc_recv_msg(buf);
1668         else
1669                 bclink_recv_pkt(buf);
1670 }
1671
1672 /** 
1673  * link_insert_deferred_queue - insert deferred messages back into receive chain
1674  */
1675
1676 static struct sk_buff *link_insert_deferred_queue(struct link *l_ptr, 
1677                                                   struct sk_buff *buf)
1678 {
1679         u32 seq_no;
1680
1681         if (l_ptr->oldest_deferred_in == NULL)
1682                 return buf;
1683
1684         seq_no = msg_seqno(buf_msg(l_ptr->oldest_deferred_in));
1685         if (seq_no == mod(l_ptr->next_in_no)) {
1686                 l_ptr->newest_deferred_in->next = buf;
1687                 buf = l_ptr->oldest_deferred_in;
1688                 l_ptr->oldest_deferred_in = NULL;
1689                 l_ptr->deferred_inqueue_sz = 0;
1690         }
1691         return buf;
1692 }
1693
1694 void tipc_recv_msg(struct sk_buff *head, struct tipc_bearer *tb_ptr)
1695 {
1696         read_lock_bh(&net_lock);
1697         while (head) {
1698                 struct bearer *b_ptr;
1699                 struct node *n_ptr;
1700                 struct link *l_ptr;
1701                 struct sk_buff *crs;
1702                 struct sk_buff *buf = head;
1703                 struct tipc_msg *msg = buf_msg(buf);
1704                 u32 seq_no = msg_seqno(msg);
1705                 u32 ackd = msg_ack(msg);
1706                 u32 released = 0;
1707                 int type;
1708
1709                 b_ptr = (struct bearer *)tb_ptr;
1710                 TIPC_SKB_CB(buf)->handle = b_ptr;
1711
1712                 head = head->next;
1713                 if (unlikely(msg_version(msg) != TIPC_VERSION))
1714                         goto cont;
1715 #if 0
1716                 if (msg_user(msg) != LINK_PROTOCOL)
1717 #endif
1718                         msg_dbg(msg,"<REC<");
1719
1720                 if (unlikely(msg_non_seq(msg))) {
1721                         link_recv_non_seq(buf);
1722                         continue;
1723                 }
1724                 n_ptr = node_find(msg_prevnode(msg));
1725                 if (unlikely(!n_ptr))
1726                         goto cont;
1727
1728                 node_lock(n_ptr);
1729                 l_ptr = n_ptr->links[b_ptr->identity];
1730                 if (unlikely(!l_ptr)) {
1731                         node_unlock(n_ptr);
1732                         goto cont;
1733                 }
1734                 /* 
1735                  * Release acked messages 
1736                  */
1737                 if (less(n_ptr->bclink.acked, msg_bcast_ack(msg))) {
1738                         if (node_is_up(n_ptr) && n_ptr->bclink.supported)
1739                                 bclink_acknowledge(n_ptr, msg_bcast_ack(msg));
1740                 }
1741
1742                 crs = l_ptr->first_out;
1743                 while ((crs != l_ptr->next_out) && 
1744                        less_eq(msg_seqno(buf_msg(crs)), ackd)) {
1745                         struct sk_buff *next = crs->next;
1746
1747                         buf_discard(crs);
1748                         crs = next;
1749                         released++;
1750                 }
1751                 if (released) {
1752                         l_ptr->first_out = crs;
1753                         l_ptr->out_queue_size -= released;
1754                 }
1755                 if (unlikely(l_ptr->next_out))
1756                         link_push_queue(l_ptr);
1757                 if (unlikely(!list_empty(&l_ptr->waiting_ports)))
1758                         link_wakeup_ports(l_ptr, 0);
1759                 if (unlikely(++l_ptr->unacked_window >= TIPC_MIN_LINK_WIN)) {
1760                         l_ptr->stats.sent_acks++;
1761                         link_send_proto_msg(l_ptr, STATE_MSG, 0, 0, 0, 0, 0);
1762                 }
1763
1764 protocol_check:
1765                 if (likely(link_working_working(l_ptr))) {
1766                         if (likely(seq_no == mod(l_ptr->next_in_no))) {
1767                                 l_ptr->next_in_no++;
1768                                 if (unlikely(l_ptr->oldest_deferred_in))
1769                                         head = link_insert_deferred_queue(l_ptr,
1770                                                                           head);
1771                                 if (likely(msg_is_dest(msg, tipc_own_addr))) {
1772 deliver:
1773                                         if (likely(msg_isdata(msg))) {
1774                                                 node_unlock(n_ptr);
1775                                                 port_recv_msg(buf);
1776                                                 continue;
1777                                         }
1778                                         switch (msg_user(msg)) {
1779                                         case MSG_BUNDLER:
1780                                                 l_ptr->stats.recv_bundles++;
1781                                                 l_ptr->stats.recv_bundled += 
1782                                                         msg_msgcnt(msg);
1783                                                 node_unlock(n_ptr);
1784                                                 link_recv_bundle(buf);
1785                                                 continue;
1786                                         case ROUTE_DISTRIBUTOR:
1787                                                 node_unlock(n_ptr);
1788                                                 cluster_recv_routing_table(buf);
1789                                                 continue;
1790                                         case NAME_DISTRIBUTOR:
1791                                                 node_unlock(n_ptr);
1792                                                 named_recv(buf);
1793                                                 continue;
1794                                         case CONN_MANAGER:
1795                                                 node_unlock(n_ptr);
1796                                                 port_recv_proto_msg(buf);
1797                                                 continue;
1798                                         case MSG_FRAGMENTER:
1799                                                 l_ptr->stats.recv_fragments++;
1800                                                 if (link_recv_fragment(
1801                                                         &l_ptr->defragm_buf, 
1802                                                         &buf, &msg)) {
1803                                                         l_ptr->stats.recv_fragmented++;
1804                                                         goto deliver;
1805                                                 }
1806                                                 break;
1807                                         case CHANGEOVER_PROTOCOL:
1808                                                 type = msg_type(msg);
1809                                                 if (link_recv_changeover_msg(
1810                                                         &l_ptr, &buf)) {
1811                                                         msg = buf_msg(buf);
1812                                                         seq_no = msg_seqno(msg);
1813                                                         TIPC_SKB_CB(buf)->handle 
1814                                                                 = b_ptr;
1815                                                         if (type == ORIGINAL_MSG)
1816                                                                 goto deliver;
1817                                                         goto protocol_check;
1818                                                 }
1819                                                 break;
1820                                         }
1821                                 }
1822                                 node_unlock(n_ptr);
1823                                 net_route_msg(buf);
1824                                 continue;
1825                         }
1826                         link_handle_out_of_seq_msg(l_ptr, buf);
1827                         head = link_insert_deferred_queue(l_ptr, head);
1828                         node_unlock(n_ptr);
1829                         continue;
1830                 }
1831
1832                 if (msg_user(msg) == LINK_PROTOCOL) {
1833                         link_recv_proto_msg(l_ptr, buf);
1834                         head = link_insert_deferred_queue(l_ptr, head);
1835                         node_unlock(n_ptr);
1836                         continue;
1837                 }
1838                 msg_dbg(msg,"NSEQ<REC<");
1839                 link_state_event(l_ptr, TRAFFIC_MSG_EVT);
1840
1841                 if (link_working_working(l_ptr)) {
1842                         /* Re-insert in front of queue */
1843                         msg_dbg(msg,"RECV-REINS:");
1844                         buf->next = head;
1845                         head = buf;
1846                         node_unlock(n_ptr);
1847                         continue;
1848                 }
1849                 node_unlock(n_ptr);
1850 cont:
1851                 buf_discard(buf);
1852         }
1853         read_unlock_bh(&net_lock);
1854 }
1855
1856 /* 
1857  * link_defer_buf(): Sort a received out-of-sequence packet 
1858  *                   into the deferred reception queue.
1859  * Returns the increase of the queue length,i.e. 0 or 1
1860  */
1861
1862 u32 link_defer_pkt(struct sk_buff **head,
1863                    struct sk_buff **tail,
1864                    struct sk_buff *buf)
1865 {
1866         struct sk_buff *prev = 0;
1867         struct sk_buff *crs = *head;
1868         u32 seq_no = msg_seqno(buf_msg(buf));
1869
1870         buf->next = NULL;
1871
1872         /* Empty queue ? */
1873         if (*head == NULL) {
1874                 *head = *tail = buf;
1875                 return 1;
1876         }
1877
1878         /* Last ? */
1879         if (less(msg_seqno(buf_msg(*tail)), seq_no)) {
1880                 (*tail)->next = buf;
1881                 *tail = buf;
1882                 return 1;
1883         }
1884
1885         /* Scan through queue and sort it in */
1886         do {
1887                 struct tipc_msg *msg = buf_msg(crs);
1888
1889                 if (less(seq_no, msg_seqno(msg))) {
1890                         buf->next = crs;
1891                         if (prev)
1892                                 prev->next = buf;
1893                         else
1894                                 *head = buf;   
1895                         return 1;
1896                 }
1897                 if (seq_no == msg_seqno(msg)) {
1898                         break;
1899                 }
1900                 prev = crs;
1901                 crs = crs->next;
1902         }
1903         while (crs);
1904
1905         /* Message is a duplicate of an existing message */
1906
1907         buf_discard(buf);
1908         return 0;
1909 }
1910
1911 /** 
1912  * link_handle_out_of_seq_msg - handle arrival of out-of-sequence packet
1913  */
1914
1915 static void link_handle_out_of_seq_msg(struct link *l_ptr, 
1916                                        struct sk_buff *buf)
1917 {
1918         u32 seq_no = msg_seqno(buf_msg(buf));
1919
1920         if (likely(msg_user(buf_msg(buf)) == LINK_PROTOCOL)) {
1921                 link_recv_proto_msg(l_ptr, buf);
1922                 return;
1923         }
1924
1925         dbg("rx OOS msg: seq_no %u, expecting %u (%u)\n", 
1926             seq_no, mod(l_ptr->next_in_no), l_ptr->next_in_no);
1927
1928         /* Record OOS packet arrival (force mismatch on next timeout) */
1929
1930         l_ptr->checkpoint--;
1931
1932         /* 
1933          * Discard packet if a duplicate; otherwise add it to deferred queue
1934          * and notify peer of gap as per protocol specification
1935          */
1936
1937         if (less(seq_no, mod(l_ptr->next_in_no))) {
1938                 l_ptr->stats.duplicates++;
1939                 buf_discard(buf);
1940                 return;
1941         }
1942
1943         if (link_defer_pkt(&l_ptr->oldest_deferred_in,
1944                            &l_ptr->newest_deferred_in, buf)) {
1945                 l_ptr->deferred_inqueue_sz++;
1946                 l_ptr->stats.deferred_recv++;
1947                 if ((l_ptr->deferred_inqueue_sz % 16) == 1)
1948                         link_send_proto_msg(l_ptr, STATE_MSG, 0, 0, 0, 0, 0);
1949         } else
1950                 l_ptr->stats.duplicates++;
1951 }
1952
1953 /*
1954  * Send protocol message to the other endpoint.
1955  */
1956 void link_send_proto_msg(struct link *l_ptr, u32 msg_typ, int probe_msg,
1957                          u32 gap, u32 tolerance, u32 priority, u32 ack_mtu)
1958 {
1959         struct sk_buff *buf = 0;
1960         struct tipc_msg *msg = l_ptr->pmsg;
1961         u32 msg_size = sizeof(l_ptr->proto_msg);
1962
1963         if (link_blocked(l_ptr))
1964                 return;
1965         msg_set_type(msg, msg_typ);
1966         msg_set_net_plane(msg, l_ptr->b_ptr->net_plane);
1967         msg_set_bcast_ack(msg, mod(l_ptr->owner->bclink.last_in)); 
1968         msg_set_last_bcast(msg, bclink_get_last_sent());
1969
1970         if (msg_typ == STATE_MSG) {
1971                 u32 next_sent = mod(l_ptr->next_out_no);
1972
1973                 if (!link_is_up(l_ptr))
1974                         return;
1975                 if (l_ptr->next_out)
1976                         next_sent = msg_seqno(buf_msg(l_ptr->next_out));
1977                 msg_set_next_sent(msg, next_sent);
1978                 if (l_ptr->oldest_deferred_in) {
1979                         u32 rec = msg_seqno(buf_msg(l_ptr->oldest_deferred_in));
1980                         gap = mod(rec - mod(l_ptr->next_in_no));
1981                 }
1982                 msg_set_seq_gap(msg, gap);
1983                 if (gap)
1984                         l_ptr->stats.sent_nacks++;
1985                 msg_set_link_tolerance(msg, tolerance);
1986                 msg_set_linkprio(msg, priority);
1987                 msg_set_max_pkt(msg, ack_mtu);
1988                 msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
1989                 msg_set_probe(msg, probe_msg != 0);
1990                 if (probe_msg) { 
1991                         u32 mtu = l_ptr->max_pkt;
1992
1993                         if ((mtu < l_ptr->max_pkt_target) &&
1994                             link_working_working(l_ptr) &&
1995                             l_ptr->fsm_msg_cnt) {
1996                                 msg_size = (mtu + (l_ptr->max_pkt_target - mtu)/2 + 2) & ~3;
1997                                 if (l_ptr->max_pkt_probes == 10) {
1998                                         l_ptr->max_pkt_target = (msg_size - 4);
1999                                         l_ptr->max_pkt_probes = 0;
2000                                         msg_size = (mtu + (l_ptr->max_pkt_target - mtu)/2 + 2) & ~3;
2001                                 }
2002                                 l_ptr->max_pkt_probes++;
2003                         }
2004
2005                         l_ptr->stats.sent_probes++;
2006                 }
2007                 l_ptr->stats.sent_states++;
2008         } else {                /* RESET_MSG or ACTIVATE_MSG */
2009                 msg_set_ack(msg, mod(l_ptr->reset_checkpoint - 1));
2010                 msg_set_seq_gap(msg, 0);
2011                 msg_set_next_sent(msg, 1);
2012                 msg_set_link_tolerance(msg, l_ptr->tolerance);
2013                 msg_set_linkprio(msg, l_ptr->priority);
2014                 msg_set_max_pkt(msg, l_ptr->max_pkt_target);
2015         }
2016
2017         if (node_has_redundant_links(l_ptr->owner)) {
2018                 msg_set_redundant_link(msg);
2019         } else {
2020                 msg_clear_redundant_link(msg);
2021         }
2022         msg_set_linkprio(msg, l_ptr->priority);
2023
2024         /* Ensure sequence number will not fit : */
2025
2026         msg_set_seqno(msg, mod(l_ptr->next_out_no + (0xffff/2)));
2027
2028         /* Congestion? */
2029
2030         if (bearer_congested(l_ptr->b_ptr, l_ptr)) {
2031                 if (!l_ptr->proto_msg_queue) {
2032                         l_ptr->proto_msg_queue =
2033                                 buf_acquire(sizeof(l_ptr->proto_msg));
2034                 }
2035                 buf = l_ptr->proto_msg_queue;
2036                 if (!buf)
2037                         return;
2038                 memcpy(buf->data, (unchar *)msg, sizeof(l_ptr->proto_msg));
2039                 return;
2040         }
2041         msg_set_timestamp(msg, jiffies_to_msecs(jiffies));
2042
2043         /* Message can be sent */
2044
2045         msg_dbg(msg, ">>");
2046
2047         buf = buf_acquire(msg_size);
2048         if (!buf)
2049                 return;
2050
2051         memcpy(buf->data, (unchar *)msg, sizeof(l_ptr->proto_msg));
2052         msg_set_size(buf_msg(buf), msg_size);
2053
2054         if (bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) {
2055                 l_ptr->unacked_window = 0;
2056                 buf_discard(buf);
2057                 return;
2058         }
2059
2060         /* New congestion */
2061         bearer_schedule(l_ptr->b_ptr, l_ptr);
2062         l_ptr->proto_msg_queue = buf;
2063         l_ptr->stats.bearer_congs++;
2064 }
2065
2066 /*
2067  * Receive protocol message :
2068  * Note that network plane id propagates through the network, and may 
2069  * change at any time. The node with lowest address rules    
2070  */
2071
2072 static void link_recv_proto_msg(struct link *l_ptr, struct sk_buff *buf)
2073 {
2074         u32 rec_gap = 0;
2075         u32 max_pkt_info;
2076         u32 max_pkt_ack;
2077         u32 msg_tol;
2078         struct tipc_msg *msg = buf_msg(buf);
2079
2080         dbg("AT(%u):", jiffies_to_msecs(jiffies));
2081         msg_dbg(msg, "<<");
2082         if (link_blocked(l_ptr))
2083                 goto exit;
2084
2085         /* record unnumbered packet arrival (force mismatch on next timeout) */
2086
2087         l_ptr->checkpoint--;
2088
2089         if (l_ptr->b_ptr->net_plane != msg_net_plane(msg))
2090                 if (tipc_own_addr > msg_prevnode(msg))
2091                         l_ptr->b_ptr->net_plane = msg_net_plane(msg);
2092
2093         l_ptr->owner->permit_changeover = msg_redundant_link(msg);
2094
2095         switch (msg_type(msg)) {
2096         
2097         case RESET_MSG:
2098                 if (!link_working_unknown(l_ptr) && l_ptr->peer_session) {
2099                         if (msg_session(msg) == l_ptr->peer_session) {
2100                                 dbg("Duplicate RESET: %u<->%u\n",
2101                                     msg_session(msg), l_ptr->peer_session);                                     
2102                                 break; /* duplicate: ignore */
2103                         }
2104                 }
2105                 /* fall thru' */
2106         case ACTIVATE_MSG:
2107                 /* Update link settings according other endpoint's values */
2108
2109                 strcpy((strrchr(l_ptr->name, ':') + 1), (char *)msg_data(msg));
2110
2111                 if ((msg_tol = msg_link_tolerance(msg)) &&
2112                     (msg_tol > l_ptr->tolerance))
2113                         link_set_supervision_props(l_ptr, msg_tol);
2114
2115                 if (msg_linkprio(msg) > l_ptr->priority)
2116                         l_ptr->priority = msg_linkprio(msg);
2117
2118                 max_pkt_info = msg_max_pkt(msg);
2119                 if (max_pkt_info) {
2120                         if (max_pkt_info < l_ptr->max_pkt_target)
2121                                 l_ptr->max_pkt_target = max_pkt_info;
2122                         if (l_ptr->max_pkt > l_ptr->max_pkt_target)
2123                                 l_ptr->max_pkt = l_ptr->max_pkt_target;
2124                 } else {
2125                         l_ptr->max_pkt = l_ptr->max_pkt_target;
2126                 }
2127                 l_ptr->owner->bclink.supported = (max_pkt_info != 0);
2128
2129                 link_state_event(l_ptr, msg_type(msg));
2130
2131                 l_ptr->peer_session = msg_session(msg);
2132                 l_ptr->peer_bearer_id = msg_bearer_id(msg);
2133
2134                 /* Synchronize broadcast sequence numbers */
2135                 if (!node_has_redundant_links(l_ptr->owner)) {
2136                         l_ptr->owner->bclink.last_in = mod(msg_last_bcast(msg));
2137                 }
2138                 break;
2139         case STATE_MSG:
2140
2141                 if ((msg_tol = msg_link_tolerance(msg)))
2142                         link_set_supervision_props(l_ptr, msg_tol);
2143                 
2144                 if (msg_linkprio(msg) && 
2145                     (msg_linkprio(msg) != l_ptr->priority)) {
2146                         warn("Changing prio <%s>: %u->%u\n",
2147                              l_ptr->name, l_ptr->priority, msg_linkprio(msg));
2148                         l_ptr->priority = msg_linkprio(msg);
2149                         link_reset(l_ptr); /* Enforce change to take effect */
2150                         break;
2151                 }
2152                 link_state_event(l_ptr, TRAFFIC_MSG_EVT);
2153                 l_ptr->stats.recv_states++;
2154                 if (link_reset_unknown(l_ptr))
2155                         break;
2156
2157                 if (less_eq(mod(l_ptr->next_in_no), msg_next_sent(msg))) {
2158                         rec_gap = mod(msg_next_sent(msg) - 
2159                                       mod(l_ptr->next_in_no));
2160                 }
2161
2162                 max_pkt_ack = msg_max_pkt(msg);
2163                 if (max_pkt_ack > l_ptr->max_pkt) {
2164                         dbg("Link <%s> updated MTU %u -> %u\n",
2165                             l_ptr->name, l_ptr->max_pkt, max_pkt_ack);
2166                         l_ptr->max_pkt = max_pkt_ack;
2167                         l_ptr->max_pkt_probes = 0;
2168                 }
2169
2170                 max_pkt_ack = 0;
2171                 if (msg_probe(msg)) {
2172                         l_ptr->stats.recv_probes++;
2173                         if (msg_size(msg) > sizeof(l_ptr->proto_msg)) {
2174                                 max_pkt_ack = msg_size(msg);
2175                         }
2176                 }
2177
2178                 /* Protocol message before retransmits, reduce loss risk */
2179
2180                 bclink_check_gap(l_ptr->owner, msg_last_bcast(msg));
2181
2182                 if (rec_gap || (msg_probe(msg))) {
2183                         link_send_proto_msg(l_ptr, STATE_MSG,
2184                                             0, rec_gap, 0, 0, max_pkt_ack);
2185                 }
2186                 if (msg_seq_gap(msg)) {
2187                         msg_dbg(msg, "With Gap:");
2188                         l_ptr->stats.recv_nacks++;
2189                         link_retransmit(l_ptr, l_ptr->first_out,
2190                                         msg_seq_gap(msg));
2191                 }
2192                 break;
2193         default:
2194                 msg_dbg(buf_msg(buf), "<DISCARDING UNKNOWN<");
2195         }
2196 exit:
2197         buf_discard(buf);
2198 }
2199
2200
2201 /*
2202  * link_tunnel(): Send one message via a link belonging to 
2203  * another bearer. Owner node is locked.
2204  */
2205 void link_tunnel(struct link *l_ptr, 
2206             struct tipc_msg *tunnel_hdr, 
2207             struct tipc_msg  *msg,
2208             u32 selector)
2209 {
2210         struct link *tunnel;
2211         struct sk_buff *buf;
2212         u32 length = msg_size(msg);
2213
2214         tunnel = l_ptr->owner->active_links[selector & 1];
2215         if (!link_is_up(tunnel))
2216                 return;
2217         msg_set_size(tunnel_hdr, length + INT_H_SIZE);
2218         buf = buf_acquire(length + INT_H_SIZE);
2219         if (!buf)
2220                 return;
2221         memcpy(buf->data, (unchar *)tunnel_hdr, INT_H_SIZE);
2222         memcpy(buf->data + INT_H_SIZE, (unchar *)msg, length);
2223         dbg("%c->%c:", l_ptr->b_ptr->net_plane, tunnel->b_ptr->net_plane);
2224         msg_dbg(buf_msg(buf), ">SEND>");
2225         assert(tunnel);
2226         link_send_buf(tunnel, buf);
2227 }
2228
2229
2230
2231 /*
2232  * changeover(): Send whole message queue via the remaining link
2233  *               Owner node is locked.
2234  */
2235
2236 void link_changeover(struct link *l_ptr)
2237 {
2238         u32 msgcount = l_ptr->out_queue_size;
2239         struct sk_buff *crs = l_ptr->first_out;
2240         struct link *tunnel = l_ptr->owner->active_links[0];
2241         int split_bundles = node_has_redundant_links(l_ptr->owner);
2242         struct tipc_msg tunnel_hdr;
2243
2244         if (!tunnel)
2245                 return;
2246
2247         if (!l_ptr->owner->permit_changeover)
2248                 return;
2249
2250         msg_init(&tunnel_hdr, CHANGEOVER_PROTOCOL,
2251                  ORIGINAL_MSG, TIPC_OK, INT_H_SIZE, l_ptr->addr);
2252         msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id);
2253         msg_set_msgcnt(&tunnel_hdr, msgcount);
2254         if (!l_ptr->first_out) {
2255                 struct sk_buff *buf;
2256
2257                 assert(!msgcount);
2258                 buf = buf_acquire(INT_H_SIZE);
2259                 if (buf) {
2260                         memcpy(buf->data, (unchar *)&tunnel_hdr, INT_H_SIZE);
2261                         msg_set_size(&tunnel_hdr, INT_H_SIZE);
2262                         dbg("%c->%c:", l_ptr->b_ptr->net_plane,
2263                             tunnel->b_ptr->net_plane);
2264                         msg_dbg(&tunnel_hdr, "EMPTY>SEND>");
2265                         link_send_buf(tunnel, buf);
2266                 } else {
2267                         warn("Memory squeeze; link changeover failed\n");
2268                 }
2269                 return;
2270         }
2271         while (crs) {
2272                 struct tipc_msg *msg = buf_msg(crs);
2273
2274                 if ((msg_user(msg) == MSG_BUNDLER) && split_bundles) {
2275                         u32 msgcount = msg_msgcnt(msg);
2276                         struct tipc_msg *m = msg_get_wrapped(msg);
2277                         unchar* pos = (unchar*)m;
2278
2279                         while (msgcount--) {
2280                                 msg_set_seqno(m,msg_seqno(msg));
2281                                 link_tunnel(l_ptr, &tunnel_hdr, m,
2282                                             msg_link_selector(m));
2283                                 pos += align(msg_size(m));
2284                                 m = (struct tipc_msg *)pos;
2285                         }
2286                 } else {
2287                         link_tunnel(l_ptr, &tunnel_hdr, msg,
2288                                     msg_link_selector(msg));
2289                 }
2290                 crs = crs->next;
2291         }
2292 }
2293
2294 void link_send_duplicate(struct link *l_ptr, struct link *tunnel)
2295 {
2296         struct sk_buff *iter;
2297         struct tipc_msg tunnel_hdr;
2298
2299         msg_init(&tunnel_hdr, CHANGEOVER_PROTOCOL,
2300                  DUPLICATE_MSG, TIPC_OK, INT_H_SIZE, l_ptr->addr);
2301         msg_set_msgcnt(&tunnel_hdr, l_ptr->out_queue_size);
2302         msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id);
2303         iter = l_ptr->first_out;
2304         while (iter) {
2305                 struct sk_buff *outbuf;
2306                 struct tipc_msg *msg = buf_msg(iter);
2307                 u32 length = msg_size(msg);
2308
2309                 if (msg_user(msg) == MSG_BUNDLER)
2310                         msg_set_type(msg, CLOSED_MSG);
2311                 msg_set_ack(msg, mod(l_ptr->next_in_no - 1));   /* Update */
2312                 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in); 
2313                 msg_set_size(&tunnel_hdr, length + INT_H_SIZE);
2314                 outbuf = buf_acquire(length + INT_H_SIZE);
2315                 if (outbuf == NULL) {
2316                         warn("Memory squeeze; buffer duplication failed\n");
2317                         return;
2318                 }
2319                 memcpy(outbuf->data, (unchar *)&tunnel_hdr, INT_H_SIZE);
2320                 memcpy(outbuf->data + INT_H_SIZE, iter->data, length);
2321                 dbg("%c->%c:", l_ptr->b_ptr->net_plane,
2322                     tunnel->b_ptr->net_plane);
2323                 msg_dbg(buf_msg(outbuf), ">SEND>");
2324                 link_send_buf(tunnel, outbuf);
2325                 if (!link_is_up(l_ptr))
2326                         return;
2327                 iter = iter->next;
2328         }
2329 }
2330
2331
2332
2333 /**
2334  * buf_extract - extracts embedded TIPC message from another message
2335  * @skb: encapsulating message buffer
2336  * @from_pos: offset to extract from
2337  *
2338  * Returns a new message buffer containing an embedded message.  The 
2339  * encapsulating message itself is left unchanged.
2340  */
2341
2342 static struct sk_buff *buf_extract(struct sk_buff *skb, u32 from_pos)
2343 {
2344         struct tipc_msg *msg = (struct tipc_msg *)(skb->data + from_pos);
2345         u32 size = msg_size(msg);
2346         struct sk_buff *eb;
2347
2348         eb = buf_acquire(size);
2349         if (eb)
2350                 memcpy(eb->data, (unchar *)msg, size);
2351         return eb;
2352 }
2353
2354 /* 
2355  *  link_recv_changeover_msg(): Receive tunneled packet sent
2356  *  via other link. Node is locked. Return extracted buffer.
2357  */
2358
2359 static int link_recv_changeover_msg(struct link **l_ptr,
2360                                     struct sk_buff **buf)
2361 {
2362         struct sk_buff *tunnel_buf = *buf;
2363         struct link *dest_link;
2364         struct tipc_msg *msg;
2365         struct tipc_msg *tunnel_msg = buf_msg(tunnel_buf);
2366         u32 msg_typ = msg_type(tunnel_msg);
2367         u32 msg_count = msg_msgcnt(tunnel_msg);
2368
2369         dest_link = (*l_ptr)->owner->links[msg_bearer_id(tunnel_msg)];
2370         assert(dest_link != *l_ptr);
2371         if (!dest_link) {
2372                 msg_dbg(tunnel_msg, "NOLINK/<REC<");
2373                 goto exit;
2374         }
2375         dbg("%c<-%c:", dest_link->b_ptr->net_plane,
2376             (*l_ptr)->b_ptr->net_plane);
2377         *l_ptr = dest_link;
2378         msg = msg_get_wrapped(tunnel_msg);
2379
2380         if (msg_typ == DUPLICATE_MSG) {
2381                 if (less(msg_seqno(msg), mod(dest_link->next_in_no))) {
2382                         msg_dbg(tunnel_msg, "DROP/<REC<");
2383                         goto exit;
2384                 }
2385                 *buf = buf_extract(tunnel_buf,INT_H_SIZE);
2386                 if (*buf == NULL) {
2387                         warn("Memory squeeze; failed to extract msg\n");
2388                         goto exit;
2389                 }
2390                 msg_dbg(tunnel_msg, "TNL<REC<");
2391                 buf_discard(tunnel_buf);
2392                 return 1;
2393         }
2394
2395         /* First original message ?: */
2396
2397         if (link_is_up(dest_link)) {
2398                 msg_dbg(tunnel_msg, "UP/FIRST/<REC<");
2399                 link_reset(dest_link);
2400                 dest_link->exp_msg_count = msg_count;
2401                 if (!msg_count)
2402                         goto exit;
2403         } else if (dest_link->exp_msg_count == START_CHANGEOVER) {
2404                 msg_dbg(tunnel_msg, "BLK/FIRST/<REC<");
2405                 dest_link->exp_msg_count = msg_count;
2406                 if (!msg_count)
2407                         goto exit;
2408         }
2409
2410         /* Receive original message */
2411
2412         if (dest_link->exp_msg_count == 0) {
2413                 msg_dbg(tunnel_msg, "OVERDUE/DROP/<REC<");
2414                 dbg_print_link(dest_link, "LINK:");
2415                 goto exit;
2416         }
2417         dest_link->exp_msg_count--;
2418         if (less(msg_seqno(msg), dest_link->reset_checkpoint)) {
2419                 msg_dbg(tunnel_msg, "DROP/DUPL/<REC<");
2420                 goto exit;
2421         } else {
2422                 *buf = buf_extract(tunnel_buf, INT_H_SIZE);
2423                 if (*buf != NULL) {
2424                         msg_dbg(tunnel_msg, "TNL<REC<");
2425                         buf_discard(tunnel_buf);
2426                         return 1;
2427                 } else {
2428                         warn("Memory squeeze; dropped incoming msg\n");
2429                 }
2430         }
2431 exit:
2432         *buf = 0;
2433         buf_discard(tunnel_buf);
2434         return 0;
2435 }
2436
2437 /*
2438  *  Bundler functionality:
2439  */
2440 void link_recv_bundle(struct sk_buff *buf)
2441 {
2442         u32 msgcount = msg_msgcnt(buf_msg(buf));
2443         u32 pos = INT_H_SIZE;
2444         struct sk_buff *obuf;
2445
2446         msg_dbg(buf_msg(buf), "<BNDL<: ");
2447         while (msgcount--) {
2448                 obuf = buf_extract(buf, pos);
2449                 if (obuf == NULL) {
2450                         char addr_string[16];
2451
2452                         warn("Buffer allocation failure;\n");
2453                         warn("  incoming message(s) from %s lost\n",
2454                              addr_string_fill(addr_string, 
2455                                               msg_orignode(buf_msg(buf))));
2456                         return;
2457                 };
2458                 pos += align(msg_size(buf_msg(obuf)));
2459                 msg_dbg(buf_msg(obuf), "     /");
2460                 net_route_msg(obuf);
2461         }
2462         buf_discard(buf);
2463 }
2464
2465 /*
2466  *  Fragmentation/defragmentation:
2467  */
2468
2469
2470 /* 
2471  * link_send_long_buf: Entry for buffers needing fragmentation.
2472  * The buffer is complete, inclusive total message length. 
2473  * Returns user data length.
2474  */
2475 int link_send_long_buf(struct link *l_ptr, struct sk_buff *buf)
2476 {
2477         struct tipc_msg *inmsg = buf_msg(buf);
2478         struct tipc_msg fragm_hdr;
2479         u32 insize = msg_size(inmsg);
2480         u32 dsz = msg_data_sz(inmsg);
2481         unchar *crs = buf->data;
2482         u32 rest = insize;
2483         u32 pack_sz = link_max_pkt(l_ptr);
2484         u32 fragm_sz = pack_sz - INT_H_SIZE;
2485         u32 fragm_no = 1;
2486         u32 destaddr = msg_destnode(inmsg);
2487
2488         if (msg_short(inmsg))
2489                 destaddr = l_ptr->addr;
2490
2491         if (msg_routed(inmsg))
2492                 msg_set_prevnode(inmsg, tipc_own_addr);
2493
2494         /* Prepare reusable fragment header: */
2495
2496         msg_init(&fragm_hdr, MSG_FRAGMENTER, FIRST_FRAGMENT,
2497                  TIPC_OK, INT_H_SIZE, destaddr);
2498         msg_set_link_selector(&fragm_hdr, msg_link_selector(inmsg));
2499         msg_set_long_msgno(&fragm_hdr, mod(l_ptr->long_msg_seq_no++));
2500         msg_set_fragm_no(&fragm_hdr, fragm_no);
2501         l_ptr->stats.sent_fragmented++;
2502
2503         /* Chop up message: */
2504
2505         while (rest > 0) {
2506                 struct sk_buff *fragm;
2507
2508                 if (rest <= fragm_sz) {
2509                         fragm_sz = rest;
2510                         msg_set_type(&fragm_hdr, LAST_FRAGMENT);
2511                 }
2512                 fragm = buf_acquire(fragm_sz + INT_H_SIZE);
2513                 if (fragm == NULL) {
2514                         warn("Memory squeeze; failed to fragment msg\n");
2515                         dsz = -ENOMEM;
2516                         goto exit;
2517                 }
2518                 msg_set_size(&fragm_hdr, fragm_sz + INT_H_SIZE);
2519                 memcpy(fragm->data, (unchar *)&fragm_hdr, INT_H_SIZE);
2520                 memcpy(fragm->data + INT_H_SIZE, crs, fragm_sz);
2521
2522                 /*  Send queued messages first, if any: */
2523
2524                 l_ptr->stats.sent_fragments++;
2525                 link_send_buf(l_ptr, fragm);
2526                 if (!link_is_up(l_ptr))
2527                         return dsz;
2528                 msg_set_fragm_no(&fragm_hdr, ++fragm_no);
2529                 rest -= fragm_sz;
2530                 crs += fragm_sz;
2531                 msg_set_type(&fragm_hdr, FRAGMENT);
2532         }
2533 exit:
2534         buf_discard(buf);
2535         return dsz;
2536 }
2537
2538 /* 
2539  * A pending message being re-assembled must store certain values 
2540  * to handle subsequent fragments correctly. The following functions 
2541  * help storing these values in unused, available fields in the
2542  * pending message. This makes dynamic memory allocation unecessary.
2543  */
2544
2545 static inline u32 get_long_msg_seqno(struct sk_buff *buf)
2546 {
2547         return msg_seqno(buf_msg(buf));
2548 }
2549
2550 static inline void set_long_msg_seqno(struct sk_buff *buf, u32 seqno)
2551 {
2552         msg_set_seqno(buf_msg(buf), seqno);
2553 }
2554
2555 static inline u32 get_fragm_size(struct sk_buff *buf)
2556 {
2557         return msg_ack(buf_msg(buf));
2558 }
2559
2560 static inline void set_fragm_size(struct sk_buff *buf, u32 sz)
2561 {
2562         msg_set_ack(buf_msg(buf), sz);
2563 }
2564
2565 static inline u32 get_expected_frags(struct sk_buff *buf)
2566 {
2567         return msg_bcast_ack(buf_msg(buf));
2568 }
2569
2570 static inline void set_expected_frags(struct sk_buff *buf, u32 exp)
2571 {
2572         msg_set_bcast_ack(buf_msg(buf), exp);
2573 }
2574
2575 static inline u32 get_timer_cnt(struct sk_buff *buf)
2576 {
2577         return msg_reroute_cnt(buf_msg(buf));
2578 }
2579
2580 static inline void incr_timer_cnt(struct sk_buff *buf)
2581 {
2582         msg_incr_reroute_cnt(buf_msg(buf));
2583 }
2584
2585 /* 
2586  * link_recv_fragment(): Called with node lock on. Returns 
2587  * the reassembled buffer if message is complete.
2588  */
2589 int link_recv_fragment(struct sk_buff **pending, struct sk_buff **fb, 
2590                        struct tipc_msg **m)
2591 {
2592         struct sk_buff *prev = 0;
2593         struct sk_buff *fbuf = *fb;
2594         struct tipc_msg *fragm = buf_msg(fbuf);
2595         struct sk_buff *pbuf = *pending;
2596         u32 long_msg_seq_no = msg_long_msgno(fragm);
2597
2598         *fb = 0;
2599         msg_dbg(fragm,"FRG<REC<");
2600
2601         /* Is there an incomplete message waiting for this fragment? */
2602
2603         while (pbuf && ((msg_seqno(buf_msg(pbuf)) != long_msg_seq_no)
2604                         || (msg_orignode(fragm) != msg_orignode(buf_msg(pbuf))))) {
2605                 prev = pbuf;
2606                 pbuf = pbuf->next;
2607         }
2608
2609         if (!pbuf && (msg_type(fragm) == FIRST_FRAGMENT)) {
2610                 struct tipc_msg *imsg = (struct tipc_msg *)msg_data(fragm);
2611                 u32 msg_sz = msg_size(imsg);
2612                 u32 fragm_sz = msg_data_sz(fragm);
2613                 u32 exp_fragm_cnt = msg_sz/fragm_sz + !!(msg_sz % fragm_sz);
2614                 u32 max =  TIPC_MAX_USER_MSG_SIZE + LONG_H_SIZE;
2615                 if (msg_type(imsg) == TIPC_MCAST_MSG)
2616                         max = TIPC_MAX_USER_MSG_SIZE + MCAST_H_SIZE;
2617                 if (msg_size(imsg) > max) {
2618                         msg_dbg(fragm,"<REC<Oversized: ");
2619                         buf_discard(fbuf);
2620                         return 0;
2621                 }
2622                 pbuf = buf_acquire(msg_size(imsg));
2623                 if (pbuf != NULL) {
2624                         pbuf->next = *pending;
2625                         *pending = pbuf;
2626                         memcpy(pbuf->data, (unchar *)imsg, msg_data_sz(fragm));
2627
2628                         /*  Prepare buffer for subsequent fragments. */
2629
2630                         set_long_msg_seqno(pbuf, long_msg_seq_no); 
2631                         set_fragm_size(pbuf,fragm_sz); 
2632                         set_expected_frags(pbuf,exp_fragm_cnt - 1); 
2633                 } else {
2634                         warn("Memory squeeze; got no defragmenting buffer\n");
2635                 }
2636                 buf_discard(fbuf);
2637                 return 0;
2638         } else if (pbuf && (msg_type(fragm) != FIRST_FRAGMENT)) {
2639                 u32 dsz = msg_data_sz(fragm);
2640                 u32 fsz = get_fragm_size(pbuf);
2641                 u32 crs = ((msg_fragm_no(fragm) - 1) * fsz);
2642                 u32 exp_frags = get_expected_frags(pbuf) - 1;
2643                 memcpy(pbuf->data + crs, msg_data(fragm), dsz);
2644                 buf_discard(fbuf);
2645
2646                 /* Is message complete? */
2647
2648                 if (exp_frags == 0) {
2649                         if (prev)
2650                                 prev->next = pbuf->next;
2651                         else
2652                                 *pending = pbuf->next;
2653                         msg_reset_reroute_cnt(buf_msg(pbuf));
2654                         *fb = pbuf;
2655                         *m = buf_msg(pbuf);
2656                         return 1;
2657                 }
2658                 set_expected_frags(pbuf,exp_frags);     
2659                 return 0;
2660         }
2661         dbg(" Discarding orphan fragment %x\n",fbuf);
2662         msg_dbg(fragm,"ORPHAN:");
2663         dbg("Pending long buffers:\n");
2664         dbg_print_buf_chain(*pending);
2665         buf_discard(fbuf);
2666         return 0;
2667 }
2668
2669 /**
2670  * link_check_defragm_bufs - flush stale incoming message fragments
2671  * @l_ptr: pointer to link
2672  */
2673
2674 static void link_check_defragm_bufs(struct link *l_ptr)
2675 {
2676         struct sk_buff *prev = 0;
2677         struct sk_buff *next = 0;
2678         struct sk_buff *buf = l_ptr->defragm_buf;
2679
2680         if (!buf)
2681                 return;
2682         if (!link_working_working(l_ptr))
2683                 return;
2684         while (buf) {
2685                 u32 cnt = get_timer_cnt(buf);
2686
2687                 next = buf->next;
2688                 if (cnt < 4) {
2689                         incr_timer_cnt(buf);
2690                         prev = buf;
2691                 } else {
2692                         dbg(" Discarding incomplete long buffer\n");
2693                         msg_dbg(buf_msg(buf), "LONG:");
2694                         dbg_print_link(l_ptr, "curr:");
2695                         dbg("Pending long buffers:\n");
2696                         dbg_print_buf_chain(l_ptr->defragm_buf);
2697                         if (prev)
2698                                 prev->next = buf->next;
2699                         else
2700                                 l_ptr->defragm_buf = buf->next;
2701                         buf_discard(buf);
2702                 }
2703                 buf = next;
2704         }
2705 }
2706
2707
2708
2709 static void link_set_supervision_props(struct link *l_ptr, u32 tolerance)
2710 {
2711         l_ptr->tolerance = tolerance;
2712         l_ptr->continuity_interval =
2713                 ((tolerance / 4) > 500) ? 500 : tolerance / 4;
2714         l_ptr->abort_limit = tolerance / (l_ptr->continuity_interval / 4);
2715 }
2716
2717
2718 void link_set_queue_limits(struct link *l_ptr, u32 window)
2719 {
2720         /* Data messages from this node, inclusive FIRST_FRAGM */
2721         l_ptr->queue_limit[DATA_LOW] = window;
2722         l_ptr->queue_limit[DATA_MEDIUM] = (window / 3) * 4;
2723         l_ptr->queue_limit[DATA_HIGH] = (window / 3) * 5;
2724         l_ptr->queue_limit[DATA_CRITICAL] = (window / 3) * 6;
2725         /* Transiting data messages,inclusive FIRST_FRAGM */
2726         l_ptr->queue_limit[DATA_LOW + 4] = 300;
2727         l_ptr->queue_limit[DATA_MEDIUM + 4] = 600;
2728         l_ptr->queue_limit[DATA_HIGH + 4] = 900;
2729         l_ptr->queue_limit[DATA_CRITICAL + 4] = 1200;
2730         l_ptr->queue_limit[CONN_MANAGER] = 1200;
2731         l_ptr->queue_limit[ROUTE_DISTRIBUTOR] = 1200;
2732         l_ptr->queue_limit[CHANGEOVER_PROTOCOL] = 2500;
2733         l_ptr->queue_limit[NAME_DISTRIBUTOR] = 3000;
2734         /* FRAGMENT and LAST_FRAGMENT packets */
2735         l_ptr->queue_limit[MSG_FRAGMENTER] = 4000;
2736 }
2737
2738 /**
2739  * link_find_link - locate link by name
2740  * @name - ptr to link name string
2741  * @node - ptr to area to be filled with ptr to associated node
2742  * 
2743  * Caller must hold 'net_lock' to ensure node and bearer are not deleted;
2744  * this also prevents link deletion.
2745  * 
2746  * Returns pointer to link (or 0 if invalid link name).
2747  */
2748
2749 static struct link *link_find_link(const char *name, struct node **node)
2750 {
2751         struct link_name link_name_parts;
2752         struct bearer *b_ptr;
2753         struct link *l_ptr; 
2754
2755         if (!link_name_validate(name, &link_name_parts))
2756                 return 0;
2757
2758         b_ptr = bearer_find_interface(link_name_parts.if_local);
2759         if (!b_ptr)
2760                 return 0;
2761
2762         *node = node_find(link_name_parts.addr_peer); 
2763         if (!*node)
2764                 return 0;
2765
2766         l_ptr = (*node)->links[b_ptr->identity];
2767         if (!l_ptr || strcmp(l_ptr->name, name))
2768                 return 0;
2769
2770         return l_ptr;
2771 }
2772
2773 struct sk_buff *link_cmd_config(const void *req_tlv_area, int req_tlv_space, 
2774                                 u16 cmd)
2775 {
2776         struct tipc_link_config *args;
2777         u32 new_value;
2778         struct link *l_ptr;
2779         struct node *node;
2780         int res;
2781
2782         if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_LINK_CONFIG))
2783                 return cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
2784
2785         args = (struct tipc_link_config *)TLV_DATA(req_tlv_area);
2786         new_value = ntohl(args->value);
2787
2788         if (!strcmp(args->name, bc_link_name)) {
2789                 if ((cmd == TIPC_CMD_SET_LINK_WINDOW) &&
2790                     (bclink_set_queue_limits(new_value) == 0))
2791                         return cfg_reply_none();
2792                 return cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
2793                                               " (cannot change setting on broadcast link)");
2794         }
2795
2796         read_lock_bh(&net_lock);
2797         l_ptr = link_find_link(args->name, &node); 
2798         if (!l_ptr) {
2799                 read_unlock_bh(&net_lock);
2800                 return cfg_reply_error_string("link not found");
2801         }
2802
2803         node_lock(node);
2804         res = -EINVAL;
2805         switch (cmd) {
2806         case TIPC_CMD_SET_LINK_TOL: 
2807                 if ((new_value >= TIPC_MIN_LINK_TOL) && 
2808                     (new_value <= TIPC_MAX_LINK_TOL)) {
2809                         link_set_supervision_props(l_ptr, new_value);
2810                         link_send_proto_msg(l_ptr, STATE_MSG, 
2811                                             0, 0, new_value, 0, 0);
2812                         res = TIPC_OK;
2813                 }
2814                 break;
2815         case TIPC_CMD_SET_LINK_PRI: 
2816                 if (new_value < TIPC_NUM_LINK_PRI) {
2817                         l_ptr->priority = new_value;
2818                         link_send_proto_msg(l_ptr, STATE_MSG, 
2819                                             0, 0, 0, new_value, 0);
2820                         res = TIPC_OK;
2821                 }
2822                 break;
2823         case TIPC_CMD_SET_LINK_WINDOW: 
2824                 if ((new_value >= TIPC_MIN_LINK_WIN) && 
2825                     (new_value <= TIPC_MAX_LINK_WIN)) {
2826                         link_set_queue_limits(l_ptr, new_value);
2827                         res = TIPC_OK;
2828                 }
2829                 break;
2830         }
2831         node_unlock(node);
2832
2833         read_unlock_bh(&net_lock);
2834         if (res)
2835                 return cfg_reply_error_string("cannot change link setting");
2836
2837         return cfg_reply_none();
2838 }
2839
2840 /**
2841  * link_reset_statistics - reset link statistics
2842  * @l_ptr: pointer to link
2843  */
2844
2845 static void link_reset_statistics(struct link *l_ptr)
2846 {
2847         memset(&l_ptr->stats, 0, sizeof(l_ptr->stats));
2848         l_ptr->stats.sent_info = l_ptr->next_out_no;
2849         l_ptr->stats.recv_info = l_ptr->next_in_no;
2850 }
2851
2852 struct sk_buff *link_cmd_reset_stats(const void *req_tlv_area, int req_tlv_space)
2853 {
2854         char *link_name;
2855         struct link *l_ptr; 
2856         struct node *node;
2857
2858         if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_LINK_NAME))
2859                 return cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
2860
2861         link_name = (char *)TLV_DATA(req_tlv_area);
2862         if (!strcmp(link_name, bc_link_name)) {
2863                 if (bclink_reset_stats())
2864                         return cfg_reply_error_string("link not found");
2865                 return cfg_reply_none();
2866         }
2867
2868         read_lock_bh(&net_lock);
2869         l_ptr = link_find_link(link_name, &node); 
2870         if (!l_ptr) {
2871                 read_unlock_bh(&net_lock);
2872                 return cfg_reply_error_string("link not found");
2873         }
2874
2875         node_lock(node);
2876         link_reset_statistics(l_ptr);
2877         node_unlock(node);
2878         read_unlock_bh(&net_lock);
2879         return cfg_reply_none();
2880 }
2881
2882 /**
2883  * percent - convert count to a percentage of total (rounding up or down)
2884  */
2885
2886 static u32 percent(u32 count, u32 total)
2887 {
2888         return (count * 100 + (total / 2)) / total;
2889 }
2890
2891 /**
2892  * link_stats - print link statistics
2893  * @name: link name
2894  * @buf: print buffer area
2895  * @buf_size: size of print buffer area
2896  * 
2897  * Returns length of print buffer data string (or 0 if error)
2898  */
2899
2900 static int link_stats(const char *name, char *buf, const u32 buf_size)
2901 {
2902         struct print_buf pb;
2903         struct link *l_ptr; 
2904         struct node *node;
2905         char *status;
2906         u32 profile_total = 0;
2907
2908         if (!strcmp(name, bc_link_name))
2909                 return bclink_stats(buf, buf_size);
2910
2911         printbuf_init(&pb, buf, buf_size);
2912
2913         read_lock_bh(&net_lock);
2914         l_ptr = link_find_link(name, &node); 
2915         if (!l_ptr) {
2916                 read_unlock_bh(&net_lock);
2917                 return 0;
2918         }
2919         node_lock(node);
2920
2921         if (link_is_active(l_ptr))
2922                 status = "ACTIVE";
2923         else if (link_is_up(l_ptr))
2924                 status = "STANDBY";
2925         else
2926                 status = "DEFUNCT";
2927         tipc_printf(&pb, "Link <%s>\n"
2928                          "  %s  MTU:%u  Priority:%u  Tolerance:%u ms"
2929                          "  Window:%u packets\n", 
2930                     l_ptr->name, status, link_max_pkt(l_ptr), 
2931                     l_ptr->priority, l_ptr->tolerance, l_ptr->queue_limit[0]);
2932         tipc_printf(&pb, "  RX packets:%u fragments:%u/%u bundles:%u/%u\n", 
2933                     l_ptr->next_in_no - l_ptr->stats.recv_info,
2934                     l_ptr->stats.recv_fragments,
2935                     l_ptr->stats.recv_fragmented,
2936                     l_ptr->stats.recv_bundles,
2937                     l_ptr->stats.recv_bundled);
2938         tipc_printf(&pb, "  TX packets:%u fragments:%u/%u bundles:%u/%u\n", 
2939                     l_ptr->next_out_no - l_ptr->stats.sent_info,
2940                     l_ptr->stats.sent_fragments,
2941                     l_ptr->stats.sent_fragmented, 
2942                     l_ptr->stats.sent_bundles,
2943                     l_ptr->stats.sent_bundled);
2944         profile_total = l_ptr->stats.msg_length_counts;
2945         if (!profile_total)
2946                 profile_total = 1;
2947         tipc_printf(&pb, "  TX profile sample:%u packets  average:%u octets\n"
2948                          "  0-64:%u%% -256:%u%% -1024:%u%% -4096:%u%% "
2949                          "-16354:%u%% -32768:%u%% -66000:%u%%\n",
2950                     l_ptr->stats.msg_length_counts,
2951                     l_ptr->stats.msg_lengths_total / profile_total,
2952                     percent(l_ptr->stats.msg_length_profile[0], profile_total),
2953                     percent(l_ptr->stats.msg_length_profile[1], profile_total),
2954                     percent(l_ptr->stats.msg_length_profile[2], profile_total),
2955                     percent(l_ptr->stats.msg_length_profile[3], profile_total),
2956                     percent(l_ptr->stats.msg_length_profile[4], profile_total),
2957                     percent(l_ptr->stats.msg_length_profile[5], profile_total),
2958                     percent(l_ptr->stats.msg_length_profile[6], profile_total));
2959         tipc_printf(&pb, "  RX states:%u probes:%u naks:%u defs:%u dups:%u\n", 
2960                     l_ptr->stats.recv_states,
2961                     l_ptr->stats.recv_probes,
2962                     l_ptr->stats.recv_nacks,
2963                     l_ptr->stats.deferred_recv, 
2964                     l_ptr->stats.duplicates);
2965         tipc_printf(&pb, "  TX states:%u probes:%u naks:%u acks:%u dups:%u\n", 
2966                     l_ptr->stats.sent_states, 
2967                     l_ptr->stats.sent_probes, 
2968                     l_ptr->stats.sent_nacks, 
2969                     l_ptr->stats.sent_acks, 
2970                     l_ptr->stats.retransmitted);
2971         tipc_printf(&pb, "  Congestion bearer:%u link:%u  Send queue max:%u avg:%u\n",
2972                     l_ptr->stats.bearer_congs,
2973                     l_ptr->stats.link_congs, 
2974                     l_ptr->stats.max_queue_sz,
2975                     l_ptr->stats.queue_sz_counts
2976                     ? (l_ptr->stats.accu_queue_sz / l_ptr->stats.queue_sz_counts)
2977                     : 0);
2978
2979         node_unlock(node);
2980         read_unlock_bh(&net_lock);
2981         return printbuf_validate(&pb);
2982 }
2983
2984 #define MAX_LINK_STATS_INFO 2000
2985
2986 struct sk_buff *link_cmd_show_stats(const void *req_tlv_area, int req_tlv_space)
2987 {
2988         struct sk_buff *buf;
2989         struct tlv_desc *rep_tlv;
2990         int str_len;
2991
2992         if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_LINK_NAME))
2993                 return cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
2994
2995         buf = cfg_reply_alloc(TLV_SPACE(MAX_LINK_STATS_INFO));
2996         if (!buf)
2997                 return NULL;
2998
2999         rep_tlv = (struct tlv_desc *)buf->data;
3000
3001         str_len = link_stats((char *)TLV_DATA(req_tlv_area),
3002                              (char *)TLV_DATA(rep_tlv), MAX_LINK_STATS_INFO);
3003         if (!str_len) {
3004                 buf_discard(buf);
3005                 return cfg_reply_error_string("link not found");
3006         }
3007
3008         skb_put(buf, TLV_SPACE(str_len));
3009         TLV_SET(rep_tlv, TIPC_TLV_ULTRA_STRING, NULL, str_len);
3010
3011         return buf;
3012 }
3013
3014 #if 0
3015 int link_control(const char *name, u32 op, u32 val)
3016 {
3017         int res = -EINVAL;
3018         struct link *l_ptr;
3019         u32 bearer_id;
3020         struct node * node;
3021         u32 a;
3022
3023         a = link_name2addr(name, &bearer_id);
3024         read_lock_bh(&net_lock);
3025         node = node_find(a);
3026         if (node) {
3027                 node_lock(node);
3028                 l_ptr = node->links[bearer_id];
3029                 if (l_ptr) {
3030                         if (op == TIPC_REMOVE_LINK) {
3031                                 struct bearer *b_ptr = l_ptr->b_ptr;
3032                                 spin_lock_bh(&b_ptr->publ.lock);
3033                                 link_delete(l_ptr);
3034                                 spin_unlock_bh(&b_ptr->publ.lock);
3035                         }
3036                         if (op == TIPC_CMD_BLOCK_LINK) {
3037                                 link_reset(l_ptr);
3038                                 l_ptr->blocked = 1;
3039                         }
3040                         if (op == TIPC_CMD_UNBLOCK_LINK) {
3041                                 l_ptr->blocked = 0;
3042                         }
3043                         res = TIPC_OK;
3044                 }
3045                 node_unlock(node);
3046         }
3047         read_unlock_bh(&net_lock);
3048         return res;
3049 }
3050 #endif
3051
3052 /**
3053  * link_get_max_pkt - get maximum packet size to use when sending to destination
3054  * @dest: network address of destination node
3055  * @selector: used to select from set of active links
3056  * 
3057  * If no active link can be found, uses default maximum packet size.
3058  */
3059
3060 u32 link_get_max_pkt(u32 dest, u32 selector)
3061 {
3062         struct node *n_ptr;
3063         struct link *l_ptr;
3064         u32 res = MAX_PKT_DEFAULT;
3065         
3066         if (dest == tipc_own_addr)
3067                 return MAX_MSG_SIZE;
3068
3069         read_lock_bh(&net_lock);        
3070         n_ptr = node_select(dest, selector);
3071         if (n_ptr) {
3072                 node_lock(n_ptr);
3073                 l_ptr = n_ptr->active_links[selector & 1];
3074                 if (l_ptr)
3075                         res = link_max_pkt(l_ptr);
3076                 node_unlock(n_ptr);
3077         }
3078         read_unlock_bh(&net_lock);       
3079         return res;
3080 }
3081
3082 #if 0
3083 static void link_dump_rec_queue(struct link *l_ptr)
3084 {
3085         struct sk_buff *crs;
3086
3087         if (!l_ptr->oldest_deferred_in) {
3088                 info("Reception queue empty\n");
3089                 return;
3090         }
3091         info("Contents of Reception queue:\n");
3092         crs = l_ptr->oldest_deferred_in;
3093         while (crs) {
3094                 if (crs->data == (void *)0x0000a3a3) {
3095                         info("buffer %x invalid\n", crs);
3096                         return;
3097                 }
3098                 msg_dbg(buf_msg(crs), "In rec queue: \n");
3099                 crs = crs->next;
3100         }
3101 }
3102 #endif
3103
3104 static void link_dump_send_queue(struct link *l_ptr)
3105 {
3106         if (l_ptr->next_out) {
3107                 info("\nContents of unsent queue:\n");
3108                 dbg_print_buf_chain(l_ptr->next_out);
3109         }
3110         info("\nContents of send queue:\n");
3111         if (l_ptr->first_out) {
3112                 dbg_print_buf_chain(l_ptr->first_out);
3113         }
3114         info("Empty send queue\n");
3115 }
3116
3117 static void link_print(struct link *l_ptr, struct print_buf *buf,
3118                        const char *str)
3119 {
3120         tipc_printf(buf, str);
3121         if (link_reset_reset(l_ptr) || link_reset_unknown(l_ptr))
3122                 return;
3123         tipc_printf(buf, "Link %x<%s>:",
3124                     l_ptr->addr, l_ptr->b_ptr->publ.name);
3125         tipc_printf(buf, ": NXO(%u):", mod(l_ptr->next_out_no));
3126         tipc_printf(buf, "NXI(%u):", mod(l_ptr->next_in_no));
3127         tipc_printf(buf, "SQUE");
3128         if (l_ptr->first_out) {
3129                 tipc_printf(buf, "[%u..", msg_seqno(buf_msg(l_ptr->first_out)));
3130                 if (l_ptr->next_out)
3131                         tipc_printf(buf, "%u..",
3132                                     msg_seqno(buf_msg(l_ptr->next_out)));
3133                 tipc_printf(buf, "%u]",
3134                             msg_seqno(buf_msg
3135                                       (l_ptr->last_out)), l_ptr->out_queue_size);
3136                 if ((mod(msg_seqno(buf_msg(l_ptr->last_out)) - 
3137                          msg_seqno(buf_msg(l_ptr->first_out))) 
3138                      != (l_ptr->out_queue_size - 1))
3139                     || (l_ptr->last_out->next != 0)) {
3140                         tipc_printf(buf, "\nSend queue inconsistency\n");
3141                         tipc_printf(buf, "first_out= %x ", l_ptr->first_out);
3142                         tipc_printf(buf, "next_out= %x ", l_ptr->next_out);
3143                         tipc_printf(buf, "last_out= %x ", l_ptr->last_out);
3144                         link_dump_send_queue(l_ptr);
3145                 }
3146         } else
3147                 tipc_printf(buf, "[]");
3148         tipc_printf(buf, "SQSIZ(%u)", l_ptr->out_queue_size);
3149         if (l_ptr->oldest_deferred_in) {
3150                 u32 o = msg_seqno(buf_msg(l_ptr->oldest_deferred_in));
3151                 u32 n = msg_seqno(buf_msg(l_ptr->newest_deferred_in));
3152                 tipc_printf(buf, ":RQUE[%u..%u]", o, n);
3153                 if (l_ptr->deferred_inqueue_sz != mod((n + 1) - o)) {
3154                         tipc_printf(buf, ":RQSIZ(%u)",
3155                                     l_ptr->deferred_inqueue_sz);
3156                 }
3157         }
3158         if (link_working_unknown(l_ptr))
3159                 tipc_printf(buf, ":WU");
3160         if (link_reset_reset(l_ptr))
3161                 tipc_printf(buf, ":RR");
3162         if (link_reset_unknown(l_ptr))
3163                 tipc_printf(buf, ":RU");
3164         if (link_working_working(l_ptr))
3165                 tipc_printf(buf, ":WW");
3166         tipc_printf(buf, "\n");
3167 }
3168