/*
* net/tipc/bcast.c: TIPC broadcast code
- *
- * Copyright (c) 2003-2005, Ericsson Research Canada
+ *
+ * Copyright (c) 2004-2006, Ericsson AB
* Copyright (c) 2004, Intel Corporation.
* Copyright (c) 2005, Wind River Systems
- * Copyright (c) 2005-2006, Ericsson AB
* All rights reserved.
*
- * Redistribution and use in source and binary forms, with or without
+ * Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- * Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from this
- * software without specific prior written permission.
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
*
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include "name_table.h"
#include "bcast.h"
-
#define MAX_PKT_DEFAULT_MCAST 1500 /* bcast link max packet size (fixed) */
#define BCLINK_WIN_DEFAULT 20 /* bcast link window size (default) */
#define BCLINK_LOG_BUF_SIZE 0
+/*
+ * Loss rate for incoming broadcast frames; used to test retransmission code.
+ * Set to N to cause every N'th frame to be discarded; 0 => don't discard any.
+ */
+
+#define TIPC_BCAST_LOSS_RATE 0
+
/**
* struct bcbearer_pair - a pair of bearers used by broadcast link
* @primary: pointer to primary bearer
* @secondary: pointer to secondary bearer
- *
- * Bearers must have same priority and same set of reachable destinations
+ *
+ * Bearers must have same priority and same set of reachable destinations
* to be paired.
*/
* @bearer: (non-standard) broadcast bearer structure
* @media: (non-standard) broadcast media structure
* @bpairs: array of bearer pairs
- * @bpairs_temp: array of bearer pairs used during creation of "bpairs"
+ * @bpairs_temp: temporary array of bearer pairs used by tipc_bcbearer_sort()
+ * @remains: temporary node map used by tipc_bcbearer_send()
+ * @remains_new: temporary node map used tipc_bcbearer_send()
+ *
+ * Note: The fields labelled "temporary" are incorporated into the bearer
+ * to avoid consuming potentially limited stack space through the use of
+ * large local variables within multicast routines. Concurrent access is
+ * prevented through use of the spinlock "bc_lock".
*/
struct bcbearer {
struct bearer bearer;
struct media media;
struct bcbearer_pair bpairs[MAX_BEARERS];
- struct bcbearer_pair bpairs_temp[TIPC_NUM_LINK_PRI];
+ struct bcbearer_pair bpairs_temp[TIPC_MAX_LINK_PRI + 1];
+ struct tipc_node_map remains;
+ struct tipc_node_map remains_new;
};
/**
* struct bclink - link used for broadcast messages
* @link: (non-standard) broadcast link structure
* @node: (non-standard) node structure representing b'cast link's peer node
- *
+ *
* Handles sequence numbering, fragmentation, bundling, etc.
*/
struct bclink {
struct link link;
- struct node node;
+ struct tipc_node node;
};
static struct bcbearer *bcbearer = NULL;
static struct bclink *bclink = NULL;
static struct link *bcl = NULL;
-static spinlock_t bc_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(bc_lock);
-char bc_link_name[] = "multicast-link";
+const char tipc_bclink_name[] = "broadcast-link";
-static inline u32 buf_seqno(struct sk_buff *buf)
+static u32 buf_seqno(struct sk_buff *buf)
{
return msg_seqno(buf_msg(buf));
-}
+}
-static inline u32 bcbuf_acks(struct sk_buff *buf)
+static u32 bcbuf_acks(struct sk_buff *buf)
{
- return (u32)TIPC_SKB_CB(buf)->handle;
+ return (u32)(unsigned long)TIPC_SKB_CB(buf)->handle;
}
-static inline void bcbuf_set_acks(struct sk_buff *buf, u32 acks)
+static void bcbuf_set_acks(struct sk_buff *buf, u32 acks)
{
- TIPC_SKB_CB(buf)->handle = (void *)acks;
+ TIPC_SKB_CB(buf)->handle = (void *)(unsigned long)acks;
}
-static inline void bcbuf_decr_acks(struct sk_buff *buf)
+static void bcbuf_decr_acks(struct sk_buff *buf)
{
bcbuf_set_acks(buf, bcbuf_acks(buf) - 1);
}
-/**
+/**
* bclink_set_gap - set gap according to contents of current deferred pkt queue
- *
+ *
* Called with 'node' locked, bc_lock unlocked
*/
-static inline void bclink_set_gap(struct node *n_ptr)
+static void bclink_set_gap(struct tipc_node *n_ptr)
{
struct sk_buff *buf = n_ptr->bclink.deferred_head;
n_ptr->bclink.gap_to = mod(buf_seqno(buf) - 1);
}
-/**
+/**
* bclink_ack_allowed - test if ACK or NACK message can be sent at this moment
- *
+ *
* This mechanism endeavours to prevent all nodes in network from trying
* to ACK or NACK at the same time.
- *
+ *
* Note: TIPC uses a different trigger to distribute ACKs than it does to
- * distribute NACKs, but tries to use the same spacing (divide by 16).
+ * distribute NACKs, but tries to use the same spacing (divide by 16).
*/
-static inline int bclink_ack_allowed(u32 n)
+static int bclink_ack_allowed(u32 n)
{
return((n % TIPC_MIN_LINK_WIN) == tipc_own_tag);
}
-/**
+/**
* bclink_retransmit_pkt - retransmit broadcast packets
* @after: sequence number of last packet to *not* retransmit
* @to: sequence number of last packet to retransmit
- *
- * Called with 'node' locked, bc_lock unlocked
+ *
+ * Called with bc_lock locked
*/
static void bclink_retransmit_pkt(u32 after, u32 to)
{
struct sk_buff *buf;
- spin_lock_bh(&bc_lock);
buf = bcl->first_out;
while (buf && less_eq(buf_seqno(buf), after)) {
- buf = buf->next;
+ buf = buf->next;
}
- if (buf != NULL)
- link_retransmit(bcl, buf, mod(to - after));
- spin_unlock_bh(&bc_lock);
+ tipc_link_retransmit(bcl, buf, mod(to - after));
}
-/**
- * bclink_acknowledge - handle acknowledgement of broadcast packets
+/**
+ * tipc_bclink_acknowledge - handle acknowledgement of broadcast packets
* @n_ptr: node that sent acknowledgement info
* @acked: broadcast sequence # that has been acknowledged
- *
+ *
* Node is locked, bc_lock unlocked.
*/
-void bclink_acknowledge(struct node *n_ptr, u32 acked)
+void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked)
{
struct sk_buff *crs;
struct sk_buff *next;
/* Try resolving broadcast link congestion, if necessary */
if (unlikely(bcl->next_out))
- link_push_queue(bcl);
+ tipc_link_push_queue(bcl);
if (unlikely(released && !list_empty(&bcl->waiting_ports)))
- link_wakeup_ports(bcl, 0);
+ tipc_link_wakeup_ports(bcl, 0);
spin_unlock_bh(&bc_lock);
}
-/**
+/**
* bclink_send_ack - unicast an ACK msg
- *
- * net_lock and node lock set
+ *
+ * tipc_net_lock and node lock set
*/
-static void bclink_send_ack(struct node *n_ptr)
+static void bclink_send_ack(struct tipc_node *n_ptr)
{
struct link *l_ptr = n_ptr->active_links[n_ptr->addr & 1];
if (l_ptr != NULL)
- link_send_proto_msg(l_ptr, STATE_MSG, 0, 0, 0, 0, 0);
+ tipc_link_send_proto_msg(l_ptr, STATE_MSG, 0, 0, 0, 0, 0);
}
-/**
+/**
* bclink_send_nack- broadcast a NACK msg
- *
- * net_lock and node lock set
+ *
+ * tipc_net_lock and node lock set
*/
-static void bclink_send_nack(struct node *n_ptr)
+static void bclink_send_nack(struct tipc_node *n_ptr)
{
struct sk_buff *buf;
struct tipc_msg *msg;
buf = buf_acquire(INT_H_SIZE);
if (buf) {
msg = buf_msg(buf);
- msg_init(msg, BCAST_PROTOCOL, STATE_MSG,
- TIPC_OK, INT_H_SIZE, n_ptr->addr);
+ tipc_msg_init(msg, BCAST_PROTOCOL, STATE_MSG,
+ INT_H_SIZE, n_ptr->addr);
msg_set_mc_netid(msg, tipc_net_id);
- msg_set_bcast_ack(msg, mod(n_ptr->bclink.last_in));
+ msg_set_bcast_ack(msg, mod(n_ptr->bclink.last_in));
msg_set_bcgap_after(msg, n_ptr->bclink.gap_after);
msg_set_bcgap_to(msg, n_ptr->bclink.gap_to);
msg_set_bcast_tag(msg, tipc_own_tag);
- if (bearer_send(&bcbearer->bearer, buf, 0)) {
+ if (tipc_bearer_send(&bcbearer->bearer, buf, NULL)) {
bcl->stats.sent_nacks++;
buf_discard(buf);
} else {
- bearer_schedule(bcl->b_ptr, bcl);
+ tipc_bearer_schedule(bcl->b_ptr, bcl);
bcl->proto_msg_queue = buf;
bcl->stats.bearer_congs++;
}
- /*
+ /*
* Ensure we doesn't send another NACK msg to the node
* until 16 more deferred messages arrive from it
* (i.e. helps prevent all nodes from NACK'ing at same time)
*/
-
+
n_ptr->bclink.nack_sync = tipc_own_tag;
}
}
-/**
- * bclink_check_gap - send a NACK if a sequence gap exists
+/**
+ * tipc_bclink_check_gap - send a NACK if a sequence gap exists
*
- * net_lock and node lock set
+ * tipc_net_lock and node lock set
*/
-void bclink_check_gap(struct node *n_ptr, u32 last_sent)
+void tipc_bclink_check_gap(struct tipc_node *n_ptr, u32 last_sent)
{
if (!n_ptr->bclink.supported ||
less_eq(last_sent, mod(n_ptr->bclink.last_in)))
bclink_send_nack(n_ptr);
}
-/**
- * bclink_peek_nack - process a NACK msg meant for another node
- *
- * Only net_lock set.
+/**
+ * tipc_bclink_peek_nack - process a NACK msg meant for another node
+ *
+ * Only tipc_net_lock set.
*/
-void bclink_peek_nack(u32 dest, u32 sender_tag, u32 gap_after, u32 gap_to)
+static void tipc_bclink_peek_nack(u32 dest, u32 sender_tag, u32 gap_after, u32 gap_to)
{
- struct node *n_ptr = node_find(dest);
+ struct tipc_node *n_ptr = tipc_node_find(dest);
u32 my_after, my_to;
- if (unlikely(!n_ptr || !node_is_up(n_ptr)))
+ if (unlikely(!n_ptr || !tipc_node_is_up(n_ptr)))
return;
- node_lock(n_ptr);
+ tipc_node_lock(n_ptr);
/*
* Modify gap to suppress unnecessary NACKs from this node
*/
if (less_eq(my_to, gap_to))
n_ptr->bclink.gap_to = gap_after;
} else {
- /*
+ /*
* Expand gap if missing bufs not in deferred queue:
*/
struct sk_buff *buf = n_ptr->bclink.deferred_head;
for (; buf; buf = buf->next) {
u32 seqno = buf_seqno(buf);
- if (mod(seqno - prev) != 1)
+ if (mod(seqno - prev) != 1) {
buf = NULL;
+ break;
+ }
if (seqno == gap_after)
break;
prev = seqno;
}
/*
* Some nodes may send a complementary NACK now:
- */
+ */
if (bclink_ack_allowed(sender_tag + 1)) {
if (n_ptr->bclink.gap_to != n_ptr->bclink.gap_after) {
bclink_send_nack(n_ptr);
bclink_set_gap(n_ptr);
}
}
- node_unlock(n_ptr);
+ tipc_node_unlock(n_ptr);
}
/**
- * bclink_send_msg - broadcast a packet to all nodes in cluster
+ * tipc_bclink_send_msg - broadcast a packet to all nodes in cluster
*/
-int bclink_send_msg(struct sk_buff *buf)
+int tipc_bclink_send_msg(struct sk_buff *buf)
{
int res;
spin_lock_bh(&bc_lock);
- res = link_send_buf(bcl, buf);
+ res = tipc_link_send_buf(bcl, buf);
if (unlikely(res == -ELINKCONG))
buf_discard(buf);
else
}
/**
- * bclink_recv_pkt - receive a broadcast packet, and deliver upwards
- *
- * net_lock is read_locked, no other locks set
+ * tipc_bclink_recv_pkt - receive a broadcast packet, and deliver upwards
+ *
+ * tipc_net_lock is read_locked, no other locks set
*/
-void bclink_recv_pkt(struct sk_buff *buf)
-{
+void tipc_bclink_recv_pkt(struct sk_buff *buf)
+{
+#if (TIPC_BCAST_LOSS_RATE)
+ static int rx_count = 0;
+#endif
struct tipc_msg *msg = buf_msg(buf);
- struct node* node = node_find(msg_prevnode(msg));
+ struct tipc_node* node = tipc_node_find(msg_prevnode(msg));
u32 next_in;
u32 seqno;
struct sk_buff *deferred;
msg_dbg(msg, "<BC<<<");
- if (unlikely(!node || !node_is_up(node) || !node->bclink.supported ||
+ if (unlikely(!node || !tipc_node_is_up(node) || !node->bclink.supported ||
(msg_mc_netid(msg) != tipc_net_id))) {
buf_discard(buf);
return;
if (unlikely(msg_user(msg) == BCAST_PROTOCOL)) {
msg_dbg(msg, "<BCNACK<<<");
if (msg_destnode(msg) == tipc_own_addr) {
- node_lock(node);
- bclink_acknowledge(node, msg_bcast_ack(msg));
- node_unlock(node);
+ tipc_node_lock(node);
+ tipc_bclink_acknowledge(node, msg_bcast_ack(msg));
+ tipc_node_unlock(node);
+ spin_lock_bh(&bc_lock);
bcl->stats.recv_nacks++;
+ bcl->owner->next = node; /* remember requestor */
bclink_retransmit_pkt(msg_bcgap_after(msg),
msg_bcgap_to(msg));
+ bcl->owner->next = NULL;
+ spin_unlock_bh(&bc_lock);
} else {
- bclink_peek_nack(msg_destnode(msg),
- msg_bcast_tag(msg),
- msg_bcgap_after(msg),
- msg_bcgap_to(msg));
+ tipc_bclink_peek_nack(msg_destnode(msg),
+ msg_bcast_tag(msg),
+ msg_bcgap_after(msg),
+ msg_bcgap_to(msg));
}
buf_discard(buf);
return;
}
- node_lock(node);
+#if (TIPC_BCAST_LOSS_RATE)
+ if (++rx_count == TIPC_BCAST_LOSS_RATE) {
+ rx_count = 0;
+ buf_discard(buf);
+ return;
+ }
+#endif
+
+ tipc_node_lock(node);
receive:
deferred = node->bclink.deferred_head;
next_in = mod(node->bclink.last_in + 1);
bcl->stats.sent_acks++;
}
if (likely(msg_isdata(msg))) {
- node_unlock(node);
- port_recv_mcast(buf, NULL);
+ tipc_node_unlock(node);
+ tipc_port_recv_mcast(buf, NULL);
} else if (msg_user(msg) == MSG_BUNDLER) {
bcl->stats.recv_bundles++;
bcl->stats.recv_bundled += msg_msgcnt(msg);
- node_unlock(node);
- link_recv_bundle(buf);
+ tipc_node_unlock(node);
+ tipc_link_recv_bundle(buf);
} else if (msg_user(msg) == MSG_FRAGMENTER) {
bcl->stats.recv_fragments++;
- if (link_recv_fragment(&node->bclink.defragm,
- &buf, &msg))
+ if (tipc_link_recv_fragment(&node->bclink.defragm,
+ &buf, &msg))
bcl->stats.recv_fragmented++;
- node_unlock(node);
- net_route_msg(buf);
+ tipc_node_unlock(node);
+ tipc_net_route_msg(buf);
} else {
- node_unlock(node);
- net_route_msg(buf);
+ tipc_node_unlock(node);
+ tipc_net_route_msg(buf);
}
if (deferred && (buf_seqno(deferred) == mod(next_in + 1))) {
- node_lock(node);
+ tipc_node_lock(node);
buf = deferred;
msg = buf_msg(buf);
node->bclink.deferred_head = deferred->next;
u32 gap_after = node->bclink.gap_after;
u32 gap_to = node->bclink.gap_to;
- if (link_defer_pkt(&node->bclink.deferred_head,
- &node->bclink.deferred_tail,
- buf)) {
+ if (tipc_link_defer_pkt(&node->bclink.deferred_head,
+ &node->bclink.deferred_tail,
+ buf)) {
node->bclink.nack_sync++;
bcl->stats.deferred_recv++;
if (seqno == mod(gap_after + 1))
bcl->stats.duplicates++;
buf_discard(buf);
}
- node_unlock(node);
+ tipc_node_unlock(node);
}
-u32 bclink_get_last_sent(void)
+u32 tipc_bclink_get_last_sent(void)
{
u32 last_sent = mod(bcl->next_out_no - 1);
return last_sent;
}
-u32 bclink_acks_missing(struct node *n_ptr)
+u32 tipc_bclink_acks_missing(struct tipc_node *n_ptr)
{
return (n_ptr->bclink.supported &&
- (bclink_get_last_sent() != n_ptr->bclink.acked));
+ (tipc_bclink_get_last_sent() != n_ptr->bclink.acked));
}
/**
- * bcbearer_send - send a packet through the broadcast pseudo-bearer
- *
+ * tipc_bcbearer_send - send a packet through the broadcast pseudo-bearer
+ *
* Send through as many bearers as necessary to reach all nodes
* that support TIPC multicasting.
- *
+ *
* Returns 0 if packet sent successfully, non-zero if not
*/
-int bcbearer_send(struct sk_buff *buf,
- struct tipc_bearer *unused1,
- struct tipc_media_addr *unused2)
+static int tipc_bcbearer_send(struct sk_buff *buf,
+ struct tipc_bearer *unused1,
+ struct tipc_media_addr *unused2)
{
- static int send_count = 0;
-
- struct node_map remains;
- struct node_map remains_new;
int bp_index;
- int swap_time;
/* Prepare buffer for broadcasting (if first time trying to send it) */
if (likely(!msg_non_seq(buf_msg(buf)))) {
struct tipc_msg *msg;
- assert(cluster_bcast_nodes.count != 0);
- bcbuf_set_acks(buf, cluster_bcast_nodes.count);
+ assert(tipc_cltr_bcast_nodes.count != 0);
+ bcbuf_set_acks(buf, tipc_cltr_bcast_nodes.count);
msg = buf_msg(buf);
- msg_set_non_seq(msg);
+ msg_set_non_seq(msg, 1);
msg_set_mc_netid(msg, tipc_net_id);
}
- /* Determine if bearer pairs should be swapped following this attempt */
-
- if ((swap_time = (++send_count >= 10)))
- send_count = 0;
-
/* Send buffer over bearers until all targets reached */
-
- remains = cluster_bcast_nodes;
+
+ bcbearer->remains = tipc_cltr_bcast_nodes;
for (bp_index = 0; bp_index < MAX_BEARERS; bp_index++) {
struct bearer *p = bcbearer->bpairs[bp_index].primary;
if (!p)
break; /* no more bearers to try */
- nmap_diff(&remains, &p->nodes, &remains_new);
- if (remains_new.count == remains.count)
+ tipc_nmap_diff(&bcbearer->remains, &p->nodes, &bcbearer->remains_new);
+ if (bcbearer->remains_new.count == bcbearer->remains.count)
continue; /* bearer pair doesn't add anything */
- if (!p->publ.blocked &&
- !p->media->send_msg(buf, &p->publ, &p->media->bcast_addr)) {
- if (swap_time && s && !s->publ.blocked)
- goto swap;
- else
- goto update;
+ if (p->publ.blocked ||
+ p->media->send_msg(buf, &p->publ, &p->media->bcast_addr)) {
+ /* unable to send on primary bearer */
+ if (!s || s->publ.blocked ||
+ s->media->send_msg(buf, &s->publ,
+ &s->media->bcast_addr)) {
+ /* unable to send on either bearer */
+ continue;
+ }
+ }
+
+ if (s) {
+ bcbearer->bpairs[bp_index].primary = s;
+ bcbearer->bpairs[bp_index].secondary = p;
}
- if (!s || s->publ.blocked ||
- s->media->send_msg(buf, &s->publ, &s->media->bcast_addr))
- continue; /* unable to send using bearer pair */
-swap:
- bcbearer->bpairs[bp_index].primary = s;
- bcbearer->bpairs[bp_index].secondary = p;
-update:
- if (remains_new.count == 0)
- return TIPC_OK;
-
- remains = remains_new;
+ if (bcbearer->remains_new.count == 0)
+ return 0;
+
+ bcbearer->remains = bcbearer->remains_new;
}
-
+
/* Unable to reach all targets */
bcbearer->bearer.publ.blocked = 1;
bcl->stats.bearer_congs++;
- return ~TIPC_OK;
+ return 1;
}
/**
- * bcbearer_sort - create sets of bearer pairs used by broadcast bearer
+ * tipc_bcbearer_sort - create sets of bearer pairs used by broadcast bearer
*/
-void bcbearer_sort(void)
+void tipc_bcbearer_sort(void)
{
struct bcbearer_pair *bp_temp = bcbearer->bpairs_temp;
struct bcbearer_pair *bp_curr;
memset(bp_temp, 0, sizeof(bcbearer->bpairs_temp));
for (b_index = 0; b_index < MAX_BEARERS; b_index++) {
- struct bearer *b = &bearers[b_index];
+ struct bearer *b = &tipc_bearers[b_index];
if (!b->active || !b->nodes.count)
continue;
bp_curr = bcbearer->bpairs;
memset(bcbearer->bpairs, 0, sizeof(bcbearer->bpairs));
- for (pri = (TIPC_NUM_LINK_PRI - 1); pri >= 0; pri--) {
+ for (pri = TIPC_MAX_LINK_PRI; pri >= 0; pri--) {
if (!bp_temp[pri].primary)
continue;
bp_curr->primary = bp_temp[pri].primary;
if (bp_temp[pri].secondary) {
- if (nmap_equal(&bp_temp[pri].primary->nodes,
- &bp_temp[pri].secondary->nodes)) {
+ if (tipc_nmap_equal(&bp_temp[pri].primary->nodes,
+ &bp_temp[pri].secondary->nodes)) {
bp_curr->secondary = bp_temp[pri].secondary;
} else {
bp_curr++;
}
/**
- * bcbearer_push - resolve bearer congestion
- *
+ * tipc_bcbearer_push - resolve bearer congestion
+ *
* Forces bclink to push out any unsent packets, until all packets are gone
* or congestion reoccurs.
* No locks set when function called
*/
-void bcbearer_push(void)
+void tipc_bcbearer_push(void)
{
struct bearer *b_ptr;
b_ptr = &bcbearer->bearer;
if (b_ptr->publ.blocked) {
b_ptr->publ.blocked = 0;
- bearer_lock_push(b_ptr);
+ tipc_bearer_lock_push(b_ptr);
}
spin_unlock_bh(&bc_lock);
}
-int bclink_stats(char *buf, const u32 buf_size)
+int tipc_bclink_stats(char *buf, const u32 buf_size)
{
struct print_buf pb;
if (!bcl)
return 0;
- printbuf_init(&pb, buf, buf_size);
+ tipc_printbuf_init(&pb, buf, buf_size);
spin_lock_bh(&bc_lock);
tipc_printf(&pb, "Link <%s>\n"
- " Window:%u packets\n",
+ " Window:%u packets\n",
bcl->name, bcl->queue_limit[0]);
- tipc_printf(&pb, " RX packets:%u fragments:%u/%u bundles:%u/%u\n",
+ tipc_printf(&pb, " RX packets:%u fragments:%u/%u bundles:%u/%u\n",
bcl->stats.recv_info,
bcl->stats.recv_fragments,
bcl->stats.recv_fragmented,
bcl->stats.recv_bundles,
bcl->stats.recv_bundled);
- tipc_printf(&pb, " TX packets:%u fragments:%u/%u bundles:%u/%u\n",
+ tipc_printf(&pb, " TX packets:%u fragments:%u/%u bundles:%u/%u\n",
bcl->stats.sent_info,
bcl->stats.sent_fragments,
- bcl->stats.sent_fragmented,
+ bcl->stats.sent_fragmented,
bcl->stats.sent_bundles,
bcl->stats.sent_bundled);
- tipc_printf(&pb, " RX naks:%u defs:%u dups:%u\n",
+ tipc_printf(&pb, " RX naks:%u defs:%u dups:%u\n",
bcl->stats.recv_nacks,
- bcl->stats.deferred_recv,
+ bcl->stats.deferred_recv,
bcl->stats.duplicates);
- tipc_printf(&pb, " TX naks:%u acks:%u dups:%u\n",
- bcl->stats.sent_nacks,
- bcl->stats.sent_acks,
+ tipc_printf(&pb, " TX naks:%u acks:%u dups:%u\n",
+ bcl->stats.sent_nacks,
+ bcl->stats.sent_acks,
bcl->stats.retransmitted);
tipc_printf(&pb, " Congestion bearer:%u link:%u Send queue max:%u avg:%u\n",
bcl->stats.bearer_congs,
: 0);
spin_unlock_bh(&bc_lock);
- return printbuf_validate(&pb);
+ return tipc_printbuf_validate(&pb);
}
-int bclink_reset_stats(void)
+int tipc_bclink_reset_stats(void)
{
if (!bcl)
return -ENOPROTOOPT;
spin_lock_bh(&bc_lock);
memset(&bcl->stats, 0, sizeof(bcl->stats));
spin_unlock_bh(&bc_lock);
- return TIPC_OK;
+ return 0;
}
-int bclink_set_queue_limits(u32 limit)
+int tipc_bclink_set_queue_limits(u32 limit)
{
if (!bcl)
return -ENOPROTOOPT;
return -EINVAL;
spin_lock_bh(&bc_lock);
- link_set_queue_limits(bcl, limit);
+ tipc_link_set_queue_limits(bcl, limit);
spin_unlock_bh(&bc_lock);
- return TIPC_OK;
+ return 0;
}
-int bclink_init(void)
+int tipc_bclink_init(void)
{
- bcbearer = kmalloc(sizeof(*bcbearer), GFP_ATOMIC);
- bclink = kmalloc(sizeof(*bclink), GFP_ATOMIC);
+ bcbearer = kzalloc(sizeof(*bcbearer), GFP_ATOMIC);
+ bclink = kzalloc(sizeof(*bclink), GFP_ATOMIC);
if (!bcbearer || !bclink) {
nomem:
- warn("Memory squeeze; Failed to create multicast link\n");
+ warn("Multicast link creation failed, no memory\n");
kfree(bcbearer);
bcbearer = NULL;
kfree(bclink);
return -ENOMEM;
}
- memset(bcbearer, 0, sizeof(struct bcbearer));
INIT_LIST_HEAD(&bcbearer->bearer.cong_links);
bcbearer->bearer.media = &bcbearer->media;
- bcbearer->media.send_msg = bcbearer_send;
+ bcbearer->media.send_msg = tipc_bcbearer_send;
sprintf(bcbearer->media.name, "tipc-multicast");
bcl = &bclink->link;
- memset(bclink, 0, sizeof(struct bclink));
INIT_LIST_HEAD(&bcl->waiting_ports);
bcl->next_out_no = 1;
- bclink->node.lock = SPIN_LOCK_UNLOCKED;
+ spin_lock_init(&bclink->node.lock);
bcl->owner = &bclink->node;
- bcl->max_pkt = MAX_PKT_DEFAULT_MCAST;
- link_set_queue_limits(bcl, BCLINK_WIN_DEFAULT);
+ bcl->max_pkt = MAX_PKT_DEFAULT_MCAST;
+ tipc_link_set_queue_limits(bcl, BCLINK_WIN_DEFAULT);
bcl->b_ptr = &bcbearer->bearer;
bcl->state = WORKING_WORKING;
- sprintf(bcl->name, bc_link_name);
+ strlcpy(bcl->name, tipc_bclink_name, TIPC_MAX_LINK_NAME);
if (BCLINK_LOG_BUF_SIZE) {
char *pb = kmalloc(BCLINK_LOG_BUF_SIZE, GFP_ATOMIC);
if (!pb)
goto nomem;
- printbuf_init(&bcl->print_buf, pb, BCLINK_LOG_BUF_SIZE);
+ tipc_printbuf_init(&bcl->print_buf, pb, BCLINK_LOG_BUF_SIZE);
}
- return TIPC_OK;
+ return 0;
}
-void bclink_stop(void)
+void tipc_bclink_stop(void)
{
spin_lock_bh(&bc_lock);
if (bcbearer) {
- link_stop(bcl);
+ tipc_link_stop(bcl);
if (BCLINK_LOG_BUF_SIZE)
kfree(bcl->print_buf.buf);
bcl = NULL;
spin_unlock_bh(&bc_lock);
}
+
+/**
+ * tipc_nmap_add - add a node to a node map
+ */
+
+void tipc_nmap_add(struct tipc_node_map *nm_ptr, u32 node)
+{
+ int n = tipc_node(node);
+ int w = n / WSIZE;
+ u32 mask = (1 << (n % WSIZE));
+
+ if ((nm_ptr->map[w] & mask) == 0) {
+ nm_ptr->count++;
+ nm_ptr->map[w] |= mask;
+ }
+}
+
+/**
+ * tipc_nmap_remove - remove a node from a node map
+ */
+
+void tipc_nmap_remove(struct tipc_node_map *nm_ptr, u32 node)
+{
+ int n = tipc_node(node);
+ int w = n / WSIZE;
+ u32 mask = (1 << (n % WSIZE));
+
+ if ((nm_ptr->map[w] & mask) != 0) {
+ nm_ptr->map[w] &= ~mask;
+ nm_ptr->count--;
+ }
+}
+
+/**
+ * tipc_nmap_diff - find differences between node maps
+ * @nm_a: input node map A
+ * @nm_b: input node map B
+ * @nm_diff: output node map A-B (i.e. nodes of A that are not in B)
+ */
+
+void tipc_nmap_diff(struct tipc_node_map *nm_a, struct tipc_node_map *nm_b,
+ struct tipc_node_map *nm_diff)
+{
+ int stop = ARRAY_SIZE(nm_a->map);
+ int w;
+ int b;
+ u32 map;
+
+ memset(nm_diff, 0, sizeof(*nm_diff));
+ for (w = 0; w < stop; w++) {
+ map = nm_a->map[w] ^ (nm_a->map[w] & nm_b->map[w]);
+ nm_diff->map[w] = map;
+ if (map != 0) {
+ for (b = 0 ; b < WSIZE; b++) {
+ if (map & (1 << b))
+ nm_diff->count++;
+ }
+ }
+ }
+}
+
+/**
+ * tipc_port_list_add - add a port to a port list, ensuring no duplicates
+ */
+
+void tipc_port_list_add(struct port_list *pl_ptr, u32 port)
+{
+ struct port_list *item = pl_ptr;
+ int i;
+ int item_sz = PLSIZE;
+ int cnt = pl_ptr->count;
+
+ for (; ; cnt -= item_sz, item = item->next) {
+ if (cnt < PLSIZE)
+ item_sz = cnt;
+ for (i = 0; i < item_sz; i++)
+ if (item->ports[i] == port)
+ return;
+ if (i < PLSIZE) {
+ item->ports[i] = port;
+ pl_ptr->count++;
+ return;
+ }
+ if (!item->next) {
+ item->next = kmalloc(sizeof(*item), GFP_ATOMIC);
+ if (!item->next) {
+ warn("Incomplete multicast delivery, no memory\n");
+ return;
+ }
+ item->next->next = NULL;
+ }
+ }
+}
+
+/**
+ * tipc_port_list_free - free dynamically created entries in port_list chain
+ *
+ */
+
+void tipc_port_list_free(struct port_list *pl_ptr)
+{
+ struct port_list *item;
+ struct port_list *next;
+
+ for (item = pl_ptr->next; item; item = next) {
+ next = item->next;
+ kfree(item);
+ }
+}
+