blob: 180d80b37a8db5b1fa731d6001034263028ea471 [file] [log] [blame]
Per Lidenb97bf3f2006-01-02 19:04:38 +01001/*
2 * net/tipc/bcast.c: TIPC broadcast code
3 *
4 * Copyright (c) 2003-2005, Ericsson Research Canada
5 * Copyright (c) 2004, Intel Corporation.
6 * Copyright (c) 2005, Wind River Systems
7 * Copyright (c) 2005-2006, Ericsson AB
8 * All rights reserved.
9 *
Per Liden9ea1fd32006-01-11 13:30:43 +010010 * Redistribution and use in source and binary forms, with or without
Per Lidenb97bf3f2006-01-02 19:04:38 +010011 * modification, are permitted provided that the following conditions are met:
12 *
Per Liden9ea1fd32006-01-11 13:30:43 +010013 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. Neither the names of the copyright holders nor the names of its
19 * contributors may be used to endorse or promote products derived from
20 * this software without specific prior written permission.
Per Lidenb97bf3f2006-01-02 19:04:38 +010021 *
Per Liden9ea1fd32006-01-11 13:30:43 +010022 * Alternatively, this software may be distributed under the terms of the
23 * GNU General Public License ("GPL") version 2 as published by the Free
24 * Software Foundation.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
27 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
30 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
Per Lidenb97bf3f2006-01-02 19:04:38 +010036 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39#include "core.h"
40#include "msg.h"
41#include "dbg.h"
42#include "link.h"
43#include "net.h"
44#include "node.h"
45#include "port.h"
46#include "addr.h"
47#include "node_subscr.h"
48#include "name_distr.h"
49#include "bearer.h"
50#include "name_table.h"
51#include "bcast.h"
52
53
54#define MAX_PKT_DEFAULT_MCAST 1500 /* bcast link max packet size (fixed) */
55
56#define BCLINK_WIN_DEFAULT 20 /* bcast link window size (default) */
57
58#define BCLINK_LOG_BUF_SIZE 0
59
60/**
61 * struct bcbearer_pair - a pair of bearers used by broadcast link
62 * @primary: pointer to primary bearer
63 * @secondary: pointer to secondary bearer
64 *
65 * Bearers must have same priority and same set of reachable destinations
66 * to be paired.
67 */
68
69struct bcbearer_pair {
70 struct bearer *primary;
71 struct bearer *secondary;
72};
73
74/**
75 * struct bcbearer - bearer used by broadcast link
76 * @bearer: (non-standard) broadcast bearer structure
77 * @media: (non-standard) broadcast media structure
78 * @bpairs: array of bearer pairs
79 * @bpairs_temp: array of bearer pairs used during creation of "bpairs"
80 */
81
82struct bcbearer {
83 struct bearer bearer;
84 struct media media;
85 struct bcbearer_pair bpairs[MAX_BEARERS];
86 struct bcbearer_pair bpairs_temp[TIPC_NUM_LINK_PRI];
87};
88
89/**
90 * struct bclink - link used for broadcast messages
91 * @link: (non-standard) broadcast link structure
92 * @node: (non-standard) node structure representing b'cast link's peer node
93 *
94 * Handles sequence numbering, fragmentation, bundling, etc.
95 */
96
97struct bclink {
98 struct link link;
99 struct node node;
100};
101
102
103static struct bcbearer *bcbearer = NULL;
104static struct bclink *bclink = NULL;
105static struct link *bcl = NULL;
106static spinlock_t bc_lock = SPIN_LOCK_UNLOCKED;
107
108char bc_link_name[] = "multicast-link";
109
110
111static inline u32 buf_seqno(struct sk_buff *buf)
112{
113 return msg_seqno(buf_msg(buf));
114}
115
116static inline u32 bcbuf_acks(struct sk_buff *buf)
117{
118 return (u32)TIPC_SKB_CB(buf)->handle;
119}
120
121static inline void bcbuf_set_acks(struct sk_buff *buf, u32 acks)
122{
123 TIPC_SKB_CB(buf)->handle = (void *)acks;
124}
125
126static inline void bcbuf_decr_acks(struct sk_buff *buf)
127{
128 bcbuf_set_acks(buf, bcbuf_acks(buf) - 1);
129}
130
131
132/**
133 * bclink_set_gap - set gap according to contents of current deferred pkt queue
134 *
135 * Called with 'node' locked, bc_lock unlocked
136 */
137
138static inline void bclink_set_gap(struct node *n_ptr)
139{
140 struct sk_buff *buf = n_ptr->bclink.deferred_head;
141
142 n_ptr->bclink.gap_after = n_ptr->bclink.gap_to =
143 mod(n_ptr->bclink.last_in);
144 if (unlikely(buf != NULL))
145 n_ptr->bclink.gap_to = mod(buf_seqno(buf) - 1);
146}
147
148/**
149 * bclink_ack_allowed - test if ACK or NACK message can be sent at this moment
150 *
151 * This mechanism endeavours to prevent all nodes in network from trying
152 * to ACK or NACK at the same time.
153 *
154 * Note: TIPC uses a different trigger to distribute ACKs than it does to
155 * distribute NACKs, but tries to use the same spacing (divide by 16).
156 */
157
158static inline int bclink_ack_allowed(u32 n)
159{
160 return((n % TIPC_MIN_LINK_WIN) == tipc_own_tag);
161}
162
163
164/**
165 * bclink_retransmit_pkt - retransmit broadcast packets
166 * @after: sequence number of last packet to *not* retransmit
167 * @to: sequence number of last packet to retransmit
168 *
169 * Called with 'node' locked, bc_lock unlocked
170 */
171
172static void bclink_retransmit_pkt(u32 after, u32 to)
173{
174 struct sk_buff *buf;
175
176 spin_lock_bh(&bc_lock);
177 buf = bcl->first_out;
178 while (buf && less_eq(buf_seqno(buf), after)) {
179 buf = buf->next;
180 }
181 if (buf != NULL)
182 link_retransmit(bcl, buf, mod(to - after));
183 spin_unlock_bh(&bc_lock);
184}
185
186/**
187 * bclink_acknowledge - handle acknowledgement of broadcast packets
188 * @n_ptr: node that sent acknowledgement info
189 * @acked: broadcast sequence # that has been acknowledged
190 *
191 * Node is locked, bc_lock unlocked.
192 */
193
194void bclink_acknowledge(struct node *n_ptr, u32 acked)
195{
196 struct sk_buff *crs;
197 struct sk_buff *next;
198 unsigned int released = 0;
199
200 if (less_eq(acked, n_ptr->bclink.acked))
201 return;
202
203 spin_lock_bh(&bc_lock);
204
205 /* Skip over packets that node has previously acknowledged */
206
207 crs = bcl->first_out;
208 while (crs && less_eq(buf_seqno(crs), n_ptr->bclink.acked)) {
209 crs = crs->next;
210 }
211
212 /* Update packets that node is now acknowledging */
213
214 while (crs && less_eq(buf_seqno(crs), acked)) {
215 next = crs->next;
216 bcbuf_decr_acks(crs);
217 if (bcbuf_acks(crs) == 0) {
218 bcl->first_out = next;
219 bcl->out_queue_size--;
220 buf_discard(crs);
221 released = 1;
222 }
223 crs = next;
224 }
225 n_ptr->bclink.acked = acked;
226
227 /* Try resolving broadcast link congestion, if necessary */
228
229 if (unlikely(bcl->next_out))
230 link_push_queue(bcl);
231 if (unlikely(released && !list_empty(&bcl->waiting_ports)))
232 link_wakeup_ports(bcl, 0);
233 spin_unlock_bh(&bc_lock);
234}
235
236/**
237 * bclink_send_ack - unicast an ACK msg
238 *
239 * net_lock and node lock set
240 */
241
242static void bclink_send_ack(struct node *n_ptr)
243{
244 struct link *l_ptr = n_ptr->active_links[n_ptr->addr & 1];
245
246 if (l_ptr != NULL)
247 link_send_proto_msg(l_ptr, STATE_MSG, 0, 0, 0, 0, 0);
248}
249
250/**
251 * bclink_send_nack- broadcast a NACK msg
252 *
253 * net_lock and node lock set
254 */
255
256static void bclink_send_nack(struct node *n_ptr)
257{
258 struct sk_buff *buf;
259 struct tipc_msg *msg;
260
261 if (!less(n_ptr->bclink.gap_after, n_ptr->bclink.gap_to))
262 return;
263
264 buf = buf_acquire(INT_H_SIZE);
265 if (buf) {
266 msg = buf_msg(buf);
267 msg_init(msg, BCAST_PROTOCOL, STATE_MSG,
268 TIPC_OK, INT_H_SIZE, n_ptr->addr);
269 msg_set_mc_netid(msg, tipc_net_id);
270 msg_set_bcast_ack(msg, mod(n_ptr->bclink.last_in));
271 msg_set_bcgap_after(msg, n_ptr->bclink.gap_after);
272 msg_set_bcgap_to(msg, n_ptr->bclink.gap_to);
273 msg_set_bcast_tag(msg, tipc_own_tag);
274
275 if (bearer_send(&bcbearer->bearer, buf, 0)) {
276 bcl->stats.sent_nacks++;
277 buf_discard(buf);
278 } else {
279 bearer_schedule(bcl->b_ptr, bcl);
280 bcl->proto_msg_queue = buf;
281 bcl->stats.bearer_congs++;
282 }
283
284 /*
285 * Ensure we doesn't send another NACK msg to the node
286 * until 16 more deferred messages arrive from it
287 * (i.e. helps prevent all nodes from NACK'ing at same time)
288 */
289
290 n_ptr->bclink.nack_sync = tipc_own_tag;
291 }
292}
293
294/**
295 * bclink_check_gap - send a NACK if a sequence gap exists
296 *
297 * net_lock and node lock set
298 */
299
300void bclink_check_gap(struct node *n_ptr, u32 last_sent)
301{
302 if (!n_ptr->bclink.supported ||
303 less_eq(last_sent, mod(n_ptr->bclink.last_in)))
304 return;
305
306 bclink_set_gap(n_ptr);
307 if (n_ptr->bclink.gap_after == n_ptr->bclink.gap_to)
308 n_ptr->bclink.gap_to = last_sent;
309 bclink_send_nack(n_ptr);
310}
311
312/**
313 * bclink_peek_nack - process a NACK msg meant for another node
314 *
315 * Only net_lock set.
316 */
317
318void bclink_peek_nack(u32 dest, u32 sender_tag, u32 gap_after, u32 gap_to)
319{
320 struct node *n_ptr = node_find(dest);
321 u32 my_after, my_to;
322
323 if (unlikely(!n_ptr || !node_is_up(n_ptr)))
324 return;
325 node_lock(n_ptr);
326 /*
327 * Modify gap to suppress unnecessary NACKs from this node
328 */
329 my_after = n_ptr->bclink.gap_after;
330 my_to = n_ptr->bclink.gap_to;
331
332 if (less_eq(gap_after, my_after)) {
333 if (less(my_after, gap_to) && less(gap_to, my_to))
334 n_ptr->bclink.gap_after = gap_to;
335 else if (less_eq(my_to, gap_to))
336 n_ptr->bclink.gap_to = n_ptr->bclink.gap_after;
337 } else if (less_eq(gap_after, my_to)) {
338 if (less_eq(my_to, gap_to))
339 n_ptr->bclink.gap_to = gap_after;
340 } else {
341 /*
342 * Expand gap if missing bufs not in deferred queue:
343 */
344 struct sk_buff *buf = n_ptr->bclink.deferred_head;
345 u32 prev = n_ptr->bclink.gap_to;
346
347 for (; buf; buf = buf->next) {
348 u32 seqno = buf_seqno(buf);
349
350 if (mod(seqno - prev) != 1)
351 buf = NULL;
352 if (seqno == gap_after)
353 break;
354 prev = seqno;
355 }
356 if (buf == NULL)
357 n_ptr->bclink.gap_to = gap_after;
358 }
359 /*
360 * Some nodes may send a complementary NACK now:
361 */
362 if (bclink_ack_allowed(sender_tag + 1)) {
363 if (n_ptr->bclink.gap_to != n_ptr->bclink.gap_after) {
364 bclink_send_nack(n_ptr);
365 bclink_set_gap(n_ptr);
366 }
367 }
368 node_unlock(n_ptr);
369}
370
371/**
372 * bclink_send_msg - broadcast a packet to all nodes in cluster
373 */
374
375int bclink_send_msg(struct sk_buff *buf)
376{
377 int res;
378
379 spin_lock_bh(&bc_lock);
380
381 res = link_send_buf(bcl, buf);
382 if (unlikely(res == -ELINKCONG))
383 buf_discard(buf);
384 else
385 bcl->stats.sent_info++;
386
387 if (bcl->out_queue_size > bcl->stats.max_queue_sz)
388 bcl->stats.max_queue_sz = bcl->out_queue_size;
389 bcl->stats.queue_sz_counts++;
390 bcl->stats.accu_queue_sz += bcl->out_queue_size;
391
392 spin_unlock_bh(&bc_lock);
393 return res;
394}
395
396/**
397 * bclink_recv_pkt - receive a broadcast packet, and deliver upwards
398 *
399 * net_lock is read_locked, no other locks set
400 */
401
402void bclink_recv_pkt(struct sk_buff *buf)
403{
404 struct tipc_msg *msg = buf_msg(buf);
405 struct node* node = node_find(msg_prevnode(msg));
406 u32 next_in;
407 u32 seqno;
408 struct sk_buff *deferred;
409
410 msg_dbg(msg, "<BC<<<");
411
412 if (unlikely(!node || !node_is_up(node) || !node->bclink.supported ||
413 (msg_mc_netid(msg) != tipc_net_id))) {
414 buf_discard(buf);
415 return;
416 }
417
418 if (unlikely(msg_user(msg) == BCAST_PROTOCOL)) {
419 msg_dbg(msg, "<BCNACK<<<");
420 if (msg_destnode(msg) == tipc_own_addr) {
421 node_lock(node);
422 bclink_acknowledge(node, msg_bcast_ack(msg));
423 node_unlock(node);
424 bcl->stats.recv_nacks++;
425 bclink_retransmit_pkt(msg_bcgap_after(msg),
426 msg_bcgap_to(msg));
427 } else {
428 bclink_peek_nack(msg_destnode(msg),
429 msg_bcast_tag(msg),
430 msg_bcgap_after(msg),
431 msg_bcgap_to(msg));
432 }
433 buf_discard(buf);
434 return;
435 }
436
437 node_lock(node);
438receive:
439 deferred = node->bclink.deferred_head;
440 next_in = mod(node->bclink.last_in + 1);
441 seqno = msg_seqno(msg);
442
443 if (likely(seqno == next_in)) {
444 bcl->stats.recv_info++;
445 node->bclink.last_in++;
446 bclink_set_gap(node);
447 if (unlikely(bclink_ack_allowed(seqno))) {
448 bclink_send_ack(node);
449 bcl->stats.sent_acks++;
450 }
451 if (likely(msg_isdata(msg))) {
452 node_unlock(node);
453 port_recv_mcast(buf, NULL);
454 } else if (msg_user(msg) == MSG_BUNDLER) {
455 bcl->stats.recv_bundles++;
456 bcl->stats.recv_bundled += msg_msgcnt(msg);
457 node_unlock(node);
458 link_recv_bundle(buf);
459 } else if (msg_user(msg) == MSG_FRAGMENTER) {
460 bcl->stats.recv_fragments++;
461 if (link_recv_fragment(&node->bclink.defragm,
462 &buf, &msg))
463 bcl->stats.recv_fragmented++;
464 node_unlock(node);
465 net_route_msg(buf);
466 } else {
467 node_unlock(node);
468 net_route_msg(buf);
469 }
470 if (deferred && (buf_seqno(deferred) == mod(next_in + 1))) {
471 node_lock(node);
472 buf = deferred;
473 msg = buf_msg(buf);
474 node->bclink.deferred_head = deferred->next;
475 goto receive;
476 }
477 return;
478 } else if (less(next_in, seqno)) {
479 u32 gap_after = node->bclink.gap_after;
480 u32 gap_to = node->bclink.gap_to;
481
482 if (link_defer_pkt(&node->bclink.deferred_head,
483 &node->bclink.deferred_tail,
484 buf)) {
485 node->bclink.nack_sync++;
486 bcl->stats.deferred_recv++;
487 if (seqno == mod(gap_after + 1))
488 node->bclink.gap_after = seqno;
489 else if (less(gap_after, seqno) && less(seqno, gap_to))
490 node->bclink.gap_to = seqno;
491 }
492 if (bclink_ack_allowed(node->bclink.nack_sync)) {
493 if (gap_to != gap_after)
494 bclink_send_nack(node);
495 bclink_set_gap(node);
496 }
497 } else {
498 bcl->stats.duplicates++;
499 buf_discard(buf);
500 }
501 node_unlock(node);
502}
503
504u32 bclink_get_last_sent(void)
505{
506 u32 last_sent = mod(bcl->next_out_no - 1);
507
508 if (bcl->next_out)
509 last_sent = mod(buf_seqno(bcl->next_out) - 1);
510 return last_sent;
511}
512
513u32 bclink_acks_missing(struct node *n_ptr)
514{
515 return (n_ptr->bclink.supported &&
516 (bclink_get_last_sent() != n_ptr->bclink.acked));
517}
518
519
520/**
521 * bcbearer_send - send a packet through the broadcast pseudo-bearer
522 *
523 * Send through as many bearers as necessary to reach all nodes
524 * that support TIPC multicasting.
525 *
526 * Returns 0 if packet sent successfully, non-zero if not
527 */
528
529int bcbearer_send(struct sk_buff *buf,
530 struct tipc_bearer *unused1,
531 struct tipc_media_addr *unused2)
532{
533 static int send_count = 0;
534
535 struct node_map remains;
536 struct node_map remains_new;
537 int bp_index;
538 int swap_time;
539
540 /* Prepare buffer for broadcasting (if first time trying to send it) */
541
542 if (likely(!msg_non_seq(buf_msg(buf)))) {
543 struct tipc_msg *msg;
544
545 assert(cluster_bcast_nodes.count != 0);
546 bcbuf_set_acks(buf, cluster_bcast_nodes.count);
547 msg = buf_msg(buf);
548 msg_set_non_seq(msg);
549 msg_set_mc_netid(msg, tipc_net_id);
550 }
551
552 /* Determine if bearer pairs should be swapped following this attempt */
553
554 if ((swap_time = (++send_count >= 10)))
555 send_count = 0;
556
557 /* Send buffer over bearers until all targets reached */
558
559 remains = cluster_bcast_nodes;
560
561 for (bp_index = 0; bp_index < MAX_BEARERS; bp_index++) {
562 struct bearer *p = bcbearer->bpairs[bp_index].primary;
563 struct bearer *s = bcbearer->bpairs[bp_index].secondary;
564
565 if (!p)
566 break; /* no more bearers to try */
567
568 nmap_diff(&remains, &p->nodes, &remains_new);
569 if (remains_new.count == remains.count)
570 continue; /* bearer pair doesn't add anything */
571
572 if (!p->publ.blocked &&
573 !p->media->send_msg(buf, &p->publ, &p->media->bcast_addr)) {
574 if (swap_time && s && !s->publ.blocked)
575 goto swap;
576 else
577 goto update;
578 }
579
580 if (!s || s->publ.blocked ||
581 s->media->send_msg(buf, &s->publ, &s->media->bcast_addr))
582 continue; /* unable to send using bearer pair */
583swap:
584 bcbearer->bpairs[bp_index].primary = s;
585 bcbearer->bpairs[bp_index].secondary = p;
586update:
587 if (remains_new.count == 0)
588 return TIPC_OK;
589
590 remains = remains_new;
591 }
592
593 /* Unable to reach all targets */
594
595 bcbearer->bearer.publ.blocked = 1;
596 bcl->stats.bearer_congs++;
597 return ~TIPC_OK;
598}
599
600/**
601 * bcbearer_sort - create sets of bearer pairs used by broadcast bearer
602 */
603
604void bcbearer_sort(void)
605{
606 struct bcbearer_pair *bp_temp = bcbearer->bpairs_temp;
607 struct bcbearer_pair *bp_curr;
608 int b_index;
609 int pri;
610
611 spin_lock_bh(&bc_lock);
612
613 /* Group bearers by priority (can assume max of two per priority) */
614
615 memset(bp_temp, 0, sizeof(bcbearer->bpairs_temp));
616
617 for (b_index = 0; b_index < MAX_BEARERS; b_index++) {
618 struct bearer *b = &bearers[b_index];
619
620 if (!b->active || !b->nodes.count)
621 continue;
622
623 if (!bp_temp[b->priority].primary)
624 bp_temp[b->priority].primary = b;
625 else
626 bp_temp[b->priority].secondary = b;
627 }
628
629 /* Create array of bearer pairs for broadcasting */
630
631 bp_curr = bcbearer->bpairs;
632 memset(bcbearer->bpairs, 0, sizeof(bcbearer->bpairs));
633
634 for (pri = (TIPC_NUM_LINK_PRI - 1); pri >= 0; pri--) {
635
636 if (!bp_temp[pri].primary)
637 continue;
638
639 bp_curr->primary = bp_temp[pri].primary;
640
641 if (bp_temp[pri].secondary) {
642 if (nmap_equal(&bp_temp[pri].primary->nodes,
643 &bp_temp[pri].secondary->nodes)) {
644 bp_curr->secondary = bp_temp[pri].secondary;
645 } else {
646 bp_curr++;
647 bp_curr->primary = bp_temp[pri].secondary;
648 }
649 }
650
651 bp_curr++;
652 }
653
654 spin_unlock_bh(&bc_lock);
655}
656
657/**
658 * bcbearer_push - resolve bearer congestion
659 *
660 * Forces bclink to push out any unsent packets, until all packets are gone
661 * or congestion reoccurs.
662 * No locks set when function called
663 */
664
665void bcbearer_push(void)
666{
667 struct bearer *b_ptr;
668
669 spin_lock_bh(&bc_lock);
670 b_ptr = &bcbearer->bearer;
671 if (b_ptr->publ.blocked) {
672 b_ptr->publ.blocked = 0;
673 bearer_lock_push(b_ptr);
674 }
675 spin_unlock_bh(&bc_lock);
676}
677
678
679int bclink_stats(char *buf, const u32 buf_size)
680{
681 struct print_buf pb;
682
683 if (!bcl)
684 return 0;
685
686 printbuf_init(&pb, buf, buf_size);
687
688 spin_lock_bh(&bc_lock);
689
690 tipc_printf(&pb, "Link <%s>\n"
691 " Window:%u packets\n",
692 bcl->name, bcl->queue_limit[0]);
693 tipc_printf(&pb, " RX packets:%u fragments:%u/%u bundles:%u/%u\n",
694 bcl->stats.recv_info,
695 bcl->stats.recv_fragments,
696 bcl->stats.recv_fragmented,
697 bcl->stats.recv_bundles,
698 bcl->stats.recv_bundled);
699 tipc_printf(&pb, " TX packets:%u fragments:%u/%u bundles:%u/%u\n",
700 bcl->stats.sent_info,
701 bcl->stats.sent_fragments,
702 bcl->stats.sent_fragmented,
703 bcl->stats.sent_bundles,
704 bcl->stats.sent_bundled);
705 tipc_printf(&pb, " RX naks:%u defs:%u dups:%u\n",
706 bcl->stats.recv_nacks,
707 bcl->stats.deferred_recv,
708 bcl->stats.duplicates);
709 tipc_printf(&pb, " TX naks:%u acks:%u dups:%u\n",
710 bcl->stats.sent_nacks,
711 bcl->stats.sent_acks,
712 bcl->stats.retransmitted);
713 tipc_printf(&pb, " Congestion bearer:%u link:%u Send queue max:%u avg:%u\n",
714 bcl->stats.bearer_congs,
715 bcl->stats.link_congs,
716 bcl->stats.max_queue_sz,
717 bcl->stats.queue_sz_counts
718 ? (bcl->stats.accu_queue_sz / bcl->stats.queue_sz_counts)
719 : 0);
720
721 spin_unlock_bh(&bc_lock);
722 return printbuf_validate(&pb);
723}
724
725int bclink_reset_stats(void)
726{
727 if (!bcl)
728 return -ENOPROTOOPT;
729
730 spin_lock_bh(&bc_lock);
731 memset(&bcl->stats, 0, sizeof(bcl->stats));
732 spin_unlock_bh(&bc_lock);
733 return TIPC_OK;
734}
735
736int bclink_set_queue_limits(u32 limit)
737{
738 if (!bcl)
739 return -ENOPROTOOPT;
740 if ((limit < TIPC_MIN_LINK_WIN) || (limit > TIPC_MAX_LINK_WIN))
741 return -EINVAL;
742
743 spin_lock_bh(&bc_lock);
744 link_set_queue_limits(bcl, limit);
745 spin_unlock_bh(&bc_lock);
746 return TIPC_OK;
747}
748
749int bclink_init(void)
750{
751 bcbearer = kmalloc(sizeof(*bcbearer), GFP_ATOMIC);
752 bclink = kmalloc(sizeof(*bclink), GFP_ATOMIC);
753 if (!bcbearer || !bclink) {
754 nomem:
755 warn("Memory squeeze; Failed to create multicast link\n");
756 kfree(bcbearer);
757 bcbearer = NULL;
758 kfree(bclink);
759 bclink = NULL;
760 return -ENOMEM;
761 }
762
763 memset(bcbearer, 0, sizeof(struct bcbearer));
764 INIT_LIST_HEAD(&bcbearer->bearer.cong_links);
765 bcbearer->bearer.media = &bcbearer->media;
766 bcbearer->media.send_msg = bcbearer_send;
767 sprintf(bcbearer->media.name, "tipc-multicast");
768
769 bcl = &bclink->link;
770 memset(bclink, 0, sizeof(struct bclink));
771 INIT_LIST_HEAD(&bcl->waiting_ports);
772 bcl->next_out_no = 1;
773 bclink->node.lock = SPIN_LOCK_UNLOCKED;
774 bcl->owner = &bclink->node;
775 bcl->max_pkt = MAX_PKT_DEFAULT_MCAST;
776 link_set_queue_limits(bcl, BCLINK_WIN_DEFAULT);
777 bcl->b_ptr = &bcbearer->bearer;
778 bcl->state = WORKING_WORKING;
779 sprintf(bcl->name, bc_link_name);
780
781 if (BCLINK_LOG_BUF_SIZE) {
782 char *pb = kmalloc(BCLINK_LOG_BUF_SIZE, GFP_ATOMIC);
783
784 if (!pb)
785 goto nomem;
786 printbuf_init(&bcl->print_buf, pb, BCLINK_LOG_BUF_SIZE);
787 }
788
789 return TIPC_OK;
790}
791
792void bclink_stop(void)
793{
794 spin_lock_bh(&bc_lock);
795 if (bcbearer) {
796 link_stop(bcl);
797 if (BCLINK_LOG_BUF_SIZE)
798 kfree(bcl->print_buf.buf);
799 bcl = NULL;
800 kfree(bclink);
801 bclink = NULL;
802 kfree(bcbearer);
803 bcbearer = NULL;
804 }
805 spin_unlock_bh(&bc_lock);
806}
807