2 * IPVS An implementation of the IP virtual server support for the
3 * LINUX operating system. IPVS is now implemented as a module
4 * over the NetFilter framework. IPVS can be used to build a
5 * high-performance and highly available server based on a
8 * Authors: Wensong Zhang <wensong@linuxvirtualserver.org>
10 * ip_vs_sync: sync connection info from master load balancer to backups
14 * Alexandre Cassen : Added master & backup support at a time.
15 * Alexandre Cassen : Added SyncID support for incoming sync
17 * Justin Ossevoort : Fix endian problem on sync message size.
20 #define KMSG_COMPONENT "IPVS"
21 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
23 #include <linux/module.h>
24 #include <linux/slab.h>
25 #include <linux/inetdevice.h>
26 #include <linux/net.h>
27 #include <linux/completion.h>
28 #include <linux/delay.h>
29 #include <linux/skbuff.h>
31 #include <linux/igmp.h> /* for ip_mc_join_group */
32 #include <linux/udp.h>
33 #include <linux/err.h>
34 #include <linux/kthread.h>
35 #include <linux/wait.h>
36 #include <linux/kernel.h>
41 #include <net/ip_vs.h>
43 #define IP_VS_SYNC_GROUP 0xe0000051 /* multicast addr - 224.0.0.81 */
44 #define IP_VS_SYNC_PORT 8848 /* multicast port */
48 * IPVS sync connection entry
50 struct ip_vs_sync_conn {
53 /* Protocol, addresses and port numbers */
54 __u8 protocol; /* Which protocol (TCP/UDP) */
58 __be32 caddr; /* client address */
59 __be32 vaddr; /* virtual address */
60 __be32 daddr; /* destination address */
62 /* Flags and state transition */
63 __be16 flags; /* status flags */
64 __be16 state; /* state info */
66 /* The sequence options start here */
69 struct ip_vs_sync_conn_options {
70 struct ip_vs_seq in_seq; /* incoming seq. struct */
71 struct ip_vs_seq out_seq; /* outgoing seq. struct */
74 struct ip_vs_sync_thread_data {
79 #define SIMPLE_CONN_SIZE (sizeof(struct ip_vs_sync_conn))
80 #define FULL_CONN_SIZE \
81 (sizeof(struct ip_vs_sync_conn) + sizeof(struct ip_vs_sync_conn_options))
85 The master mulitcasts messages to the backup load balancers in the
89 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
90 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
91 | Count Conns | SyncID | Size |
92 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
94 | IPVS Sync Connection (1) |
95 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
99 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
101 | IPVS Sync Connection (n) |
102 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
105 #define SYNC_MESG_HEADER_LEN 4
106 #define MAX_CONNS_PER_SYNCBUFF 255 /* nr_conns in ip_vs_sync_mesg is 8 bit */
108 struct ip_vs_sync_mesg {
113 /* ip_vs_sync_conn entries start here */
116 /* the maximum length of sync (sending/receiving) message */
117 static int sync_send_mesg_maxlen;
118 static int sync_recv_mesg_maxlen;
120 struct ip_vs_sync_buff {
121 struct list_head list;
122 unsigned long firstuse;
124 /* pointers for the message data */
125 struct ip_vs_sync_mesg *mesg;
131 /* the sync_buff list head and the lock */
132 static LIST_HEAD(ip_vs_sync_queue);
133 static DEFINE_SPINLOCK(ip_vs_sync_lock);
135 /* current sync_buff for accepting new conn entries */
136 static struct ip_vs_sync_buff *curr_sb = NULL;
137 static DEFINE_SPINLOCK(curr_sb_lock);
139 /* ipvs sync daemon state */
140 volatile int ip_vs_sync_state = IP_VS_STATE_NONE;
141 volatile int ip_vs_master_syncid = 0;
142 volatile int ip_vs_backup_syncid = 0;
144 /* multicast interface name */
145 char ip_vs_master_mcast_ifn[IP_VS_IFNAME_MAXLEN];
146 char ip_vs_backup_mcast_ifn[IP_VS_IFNAME_MAXLEN];
148 /* sync daemon tasks */
149 static struct task_struct *sync_master_thread;
150 static struct task_struct *sync_backup_thread;
153 static struct sockaddr_in mcast_addr = {
154 .sin_family = AF_INET,
155 .sin_port = cpu_to_be16(IP_VS_SYNC_PORT),
156 .sin_addr.s_addr = cpu_to_be32(IP_VS_SYNC_GROUP),
160 static inline struct ip_vs_sync_buff *sb_dequeue(void)
162 struct ip_vs_sync_buff *sb;
164 spin_lock_bh(&ip_vs_sync_lock);
165 if (list_empty(&ip_vs_sync_queue)) {
168 sb = list_entry(ip_vs_sync_queue.next,
169 struct ip_vs_sync_buff,
173 spin_unlock_bh(&ip_vs_sync_lock);
178 static inline struct ip_vs_sync_buff * ip_vs_sync_buff_create(void)
180 struct ip_vs_sync_buff *sb;
182 if (!(sb=kmalloc(sizeof(struct ip_vs_sync_buff), GFP_ATOMIC)))
185 if (!(sb->mesg=kmalloc(sync_send_mesg_maxlen, GFP_ATOMIC))) {
189 sb->mesg->nr_conns = 0;
190 sb->mesg->syncid = ip_vs_master_syncid;
192 sb->head = (unsigned char *)sb->mesg + 4;
193 sb->end = (unsigned char *)sb->mesg + sync_send_mesg_maxlen;
194 sb->firstuse = jiffies;
198 static inline void ip_vs_sync_buff_release(struct ip_vs_sync_buff *sb)
204 static inline void sb_queue_tail(struct ip_vs_sync_buff *sb)
206 spin_lock(&ip_vs_sync_lock);
207 if (ip_vs_sync_state & IP_VS_STATE_MASTER)
208 list_add_tail(&sb->list, &ip_vs_sync_queue);
210 ip_vs_sync_buff_release(sb);
211 spin_unlock(&ip_vs_sync_lock);
215 * Get the current sync buffer if it has been created for more
216 * than the specified time or the specified time is zero.
218 static inline struct ip_vs_sync_buff *
219 get_curr_sync_buff(unsigned long time)
221 struct ip_vs_sync_buff *sb;
223 spin_lock_bh(&curr_sb_lock);
224 if (curr_sb && (time == 0 ||
225 time_before(jiffies - curr_sb->firstuse, time))) {
230 spin_unlock_bh(&curr_sb_lock);
236 * Add an ip_vs_conn information into the current sync_buff.
237 * Called by ip_vs_in.
239 void ip_vs_sync_conn(struct ip_vs_conn *cp)
241 struct ip_vs_sync_mesg *m;
242 struct ip_vs_sync_conn *s;
245 spin_lock(&curr_sb_lock);
247 if (!(curr_sb=ip_vs_sync_buff_create())) {
248 spin_unlock(&curr_sb_lock);
249 pr_err("ip_vs_sync_buff_create failed.\n");
254 len = (cp->flags & IP_VS_CONN_F_SEQ_MASK) ? FULL_CONN_SIZE :
257 s = (struct ip_vs_sync_conn *)curr_sb->head;
260 s->protocol = cp->protocol;
261 s->cport = cp->cport;
262 s->vport = cp->vport;
263 s->dport = cp->dport;
264 s->caddr = cp->caddr.ip;
265 s->vaddr = cp->vaddr.ip;
266 s->daddr = cp->daddr.ip;
267 s->flags = htons(cp->flags & ~IP_VS_CONN_F_HASHED);
268 s->state = htons(cp->state);
269 if (cp->flags & IP_VS_CONN_F_SEQ_MASK) {
270 struct ip_vs_sync_conn_options *opt =
271 (struct ip_vs_sync_conn_options *)&s[1];
272 memcpy(opt, &cp->in_seq, sizeof(*opt));
277 curr_sb->head += len;
279 /* check if there is a space for next one */
280 if (curr_sb->head+FULL_CONN_SIZE > curr_sb->end) {
281 sb_queue_tail(curr_sb);
284 spin_unlock(&curr_sb_lock);
286 /* synchronize its controller if it has */
288 ip_vs_sync_conn(cp->control);
293 * Process received multicast message and create the corresponding
294 * ip_vs_conn entries.
296 static void ip_vs_process_message(const char *buffer, const size_t buflen)
298 struct ip_vs_sync_mesg *m = (struct ip_vs_sync_mesg *)buffer;
299 struct ip_vs_sync_conn *s;
300 struct ip_vs_sync_conn_options *opt;
301 struct ip_vs_conn *cp;
302 struct ip_vs_protocol *pp;
303 struct ip_vs_dest *dest;
307 if (buflen < sizeof(struct ip_vs_sync_mesg)) {
308 IP_VS_ERR_RL("sync message header too short\n");
312 /* Convert size back to host byte order */
313 m->size = ntohs(m->size);
315 if (buflen != m->size) {
316 IP_VS_ERR_RL("bogus sync message size\n");
320 /* SyncID sanity check */
321 if (ip_vs_backup_syncid != 0 && m->syncid != ip_vs_backup_syncid) {
322 IP_VS_DBG(7, "Ignoring incoming msg with syncid = %d\n",
327 p = (char *)buffer + sizeof(struct ip_vs_sync_mesg);
328 for (i=0; i<m->nr_conns; i++) {
329 unsigned flags, state;
331 if (p + SIMPLE_CONN_SIZE > buffer+buflen) {
332 IP_VS_ERR_RL("bogus conn in sync message\n");
335 s = (struct ip_vs_sync_conn *) p;
336 flags = ntohs(s->flags) | IP_VS_CONN_F_SYNC;
337 flags &= ~IP_VS_CONN_F_HASHED;
338 if (flags & IP_VS_CONN_F_SEQ_MASK) {
339 opt = (struct ip_vs_sync_conn_options *)&s[1];
341 if (p > buffer+buflen) {
342 IP_VS_ERR_RL("bogus conn options in sync message\n");
347 p += SIMPLE_CONN_SIZE;
350 state = ntohs(s->state);
351 if (!(flags & IP_VS_CONN_F_TEMPLATE)) {
352 pp = ip_vs_proto_get(s->protocol);
354 IP_VS_ERR_RL("Unsupported protocol %u in sync msg\n",
358 if (state >= pp->num_states) {
359 IP_VS_DBG(2, "Invalid %s state %u in sync msg\n",
364 /* protocol in templates is not used for state/timeout */
367 IP_VS_DBG(2, "Invalid template state %u in sync msg\n",
373 if (!(flags & IP_VS_CONN_F_TEMPLATE))
374 cp = ip_vs_conn_in_get(AF_INET, s->protocol,
375 (union nf_inet_addr *)&s->caddr,
377 (union nf_inet_addr *)&s->vaddr,
380 cp = ip_vs_ct_in_get(AF_INET, s->protocol,
381 (union nf_inet_addr *)&s->caddr,
383 (union nf_inet_addr *)&s->vaddr,
387 * Find the appropriate destination for the connection.
388 * If it is not found the connection will remain unbound
391 dest = ip_vs_find_dest(AF_INET,
392 (union nf_inet_addr *)&s->daddr,
394 (union nf_inet_addr *)&s->vaddr,
397 /* Set the approprite ativity flag */
398 if (s->protocol == IPPROTO_TCP) {
399 if (state != IP_VS_TCP_S_ESTABLISHED)
400 flags |= IP_VS_CONN_F_INACTIVE;
402 flags &= ~IP_VS_CONN_F_INACTIVE;
404 cp = ip_vs_conn_new(AF_INET, s->protocol,
405 (union nf_inet_addr *)&s->caddr,
407 (union nf_inet_addr *)&s->vaddr,
409 (union nf_inet_addr *)&s->daddr,
413 atomic_dec(&dest->refcnt);
415 pr_err("ip_vs_conn_new failed\n");
418 } else if (!cp->dest) {
419 dest = ip_vs_try_bind_dest(cp);
421 atomic_dec(&dest->refcnt);
422 } else if ((cp->dest) && (cp->protocol == IPPROTO_TCP) &&
423 (cp->state != state)) {
424 /* update active/inactive flag for the connection */
426 if (!(cp->flags & IP_VS_CONN_F_INACTIVE) &&
427 (state != IP_VS_TCP_S_ESTABLISHED)) {
428 atomic_dec(&dest->activeconns);
429 atomic_inc(&dest->inactconns);
430 cp->flags |= IP_VS_CONN_F_INACTIVE;
431 } else if ((cp->flags & IP_VS_CONN_F_INACTIVE) &&
432 (state == IP_VS_TCP_S_ESTABLISHED)) {
433 atomic_inc(&dest->activeconns);
434 atomic_dec(&dest->inactconns);
435 cp->flags &= ~IP_VS_CONN_F_INACTIVE;
440 memcpy(&cp->in_seq, opt, sizeof(*opt));
441 atomic_set(&cp->in_pkts, sysctl_ip_vs_sync_threshold[0]);
443 cp->old_state = cp->state;
445 * We can not recover the right timeout for templates
446 * in all cases, we can not find the right fwmark
447 * virtual service. If needed, we can do it for
448 * non-fwmark persistent services.
450 if (!(flags & IP_VS_CONN_F_TEMPLATE) && pp->timeout_table)
451 cp->timeout = pp->timeout_table[state];
453 cp->timeout = (3*60*HZ);
460 * Setup loopback of outgoing multicasts on a sending socket
462 static void set_mcast_loop(struct sock *sk, u_char loop)
464 struct inet_sock *inet = inet_sk(sk);
466 /* setsockopt(sock, SOL_IP, IP_MULTICAST_LOOP, &loop, sizeof(loop)); */
468 inet->mc_loop = loop ? 1 : 0;
473 * Specify TTL for outgoing multicasts on a sending socket
475 static void set_mcast_ttl(struct sock *sk, u_char ttl)
477 struct inet_sock *inet = inet_sk(sk);
479 /* setsockopt(sock, SOL_IP, IP_MULTICAST_TTL, &ttl, sizeof(ttl)); */
486 * Specifiy default interface for outgoing multicasts
488 static int set_mcast_if(struct sock *sk, char *ifname)
490 struct net_device *dev;
491 struct inet_sock *inet = inet_sk(sk);
493 if ((dev = __dev_get_by_name(&init_net, ifname)) == NULL)
496 if (sk->sk_bound_dev_if && dev->ifindex != sk->sk_bound_dev_if)
500 inet->mc_index = dev->ifindex;
501 /* inet->mc_addr = 0; */
509 * Set the maximum length of sync message according to the
510 * specified interface's MTU.
512 static int set_sync_mesg_maxlen(int sync_state)
514 struct net_device *dev;
517 if (sync_state == IP_VS_STATE_MASTER) {
518 if ((dev = __dev_get_by_name(&init_net, ip_vs_master_mcast_ifn)) == NULL)
521 num = (dev->mtu - sizeof(struct iphdr) -
522 sizeof(struct udphdr) -
523 SYNC_MESG_HEADER_LEN - 20) / SIMPLE_CONN_SIZE;
524 sync_send_mesg_maxlen = SYNC_MESG_HEADER_LEN +
525 SIMPLE_CONN_SIZE * min(num, MAX_CONNS_PER_SYNCBUFF);
526 IP_VS_DBG(7, "setting the maximum length of sync sending "
527 "message %d.\n", sync_send_mesg_maxlen);
528 } else if (sync_state == IP_VS_STATE_BACKUP) {
529 if ((dev = __dev_get_by_name(&init_net, ip_vs_backup_mcast_ifn)) == NULL)
532 sync_recv_mesg_maxlen = dev->mtu -
533 sizeof(struct iphdr) - sizeof(struct udphdr);
534 IP_VS_DBG(7, "setting the maximum length of sync receiving "
535 "message %d.\n", sync_recv_mesg_maxlen);
543 * Join a multicast group.
544 * the group is specified by a class D multicast address 224.0.0.0/8
545 * in the in_addr structure passed in as a parameter.
548 join_mcast_group(struct sock *sk, struct in_addr *addr, char *ifname)
550 struct ip_mreqn mreq;
551 struct net_device *dev;
554 memset(&mreq, 0, sizeof(mreq));
555 memcpy(&mreq.imr_multiaddr, addr, sizeof(struct in_addr));
557 if ((dev = __dev_get_by_name(&init_net, ifname)) == NULL)
559 if (sk->sk_bound_dev_if && dev->ifindex != sk->sk_bound_dev_if)
562 mreq.imr_ifindex = dev->ifindex;
565 ret = ip_mc_join_group(sk, &mreq);
572 static int bind_mcastif_addr(struct socket *sock, char *ifname)
574 struct net_device *dev;
576 struct sockaddr_in sin;
578 if ((dev = __dev_get_by_name(&init_net, ifname)) == NULL)
581 addr = inet_select_addr(dev, 0, RT_SCOPE_UNIVERSE);
583 pr_err("You probably need to specify IP address on "
584 "multicast interface.\n");
586 IP_VS_DBG(7, "binding socket with (%s) %pI4\n",
589 /* Now bind the socket with the address of multicast interface */
590 sin.sin_family = AF_INET;
591 sin.sin_addr.s_addr = addr;
594 return sock->ops->bind(sock, (struct sockaddr*)&sin, sizeof(sin));
598 * Set up sending multicast socket over UDP
600 static struct socket * make_send_sock(void)
605 /* First create a socket */
606 result = sock_create_kern(PF_INET, SOCK_DGRAM, IPPROTO_UDP, &sock);
608 pr_err("Error during creation of socket; terminating\n");
609 return ERR_PTR(result);
612 result = set_mcast_if(sock->sk, ip_vs_master_mcast_ifn);
614 pr_err("Error setting outbound mcast interface\n");
618 set_mcast_loop(sock->sk, 0);
619 set_mcast_ttl(sock->sk, 1);
621 result = bind_mcastif_addr(sock, ip_vs_master_mcast_ifn);
623 pr_err("Error binding address of the mcast interface\n");
627 result = sock->ops->connect(sock, (struct sockaddr *) &mcast_addr,
628 sizeof(struct sockaddr), 0);
630 pr_err("Error connecting to the multicast addr\n");
638 return ERR_PTR(result);
643 * Set up receiving multicast socket over UDP
645 static struct socket * make_receive_sock(void)
650 /* First create a socket */
651 result = sock_create_kern(PF_INET, SOCK_DGRAM, IPPROTO_UDP, &sock);
653 pr_err("Error during creation of socket; terminating\n");
654 return ERR_PTR(result);
657 /* it is equivalent to the REUSEADDR option in user-space */
658 sock->sk->sk_reuse = 1;
660 result = sock->ops->bind(sock, (struct sockaddr *) &mcast_addr,
661 sizeof(struct sockaddr));
663 pr_err("Error binding to the multicast addr\n");
667 /* join the multicast group */
668 result = join_mcast_group(sock->sk,
669 (struct in_addr *) &mcast_addr.sin_addr,
670 ip_vs_backup_mcast_ifn);
672 pr_err("Error joining to the multicast group\n");
680 return ERR_PTR(result);
685 ip_vs_send_async(struct socket *sock, const char *buffer, const size_t length)
687 struct msghdr msg = {.msg_flags = MSG_DONTWAIT|MSG_NOSIGNAL};
692 iov.iov_base = (void *)buffer;
693 iov.iov_len = length;
695 len = kernel_sendmsg(sock, &msg, &iov, 1, (size_t)(length));
702 ip_vs_send_sync_msg(struct socket *sock, struct ip_vs_sync_mesg *msg)
708 /* Put size in network byte order */
709 msg->size = htons(msg->size);
711 if (ip_vs_send_async(sock, (char *)msg, msize) != msize)
712 pr_err("ip_vs_send_async error\n");
716 ip_vs_receive(struct socket *sock, char *buffer, const size_t buflen)
718 struct msghdr msg = {NULL,};
724 /* Receive a packet */
725 iov.iov_base = buffer;
726 iov.iov_len = (size_t)buflen;
728 len = kernel_recvmsg(sock, &msg, &iov, 1, buflen, 0);
738 static int sync_thread_master(void *data)
740 struct ip_vs_sync_thread_data *tinfo = data;
741 struct ip_vs_sync_buff *sb;
743 pr_info("sync thread started: state = MASTER, mcast_ifn = %s, "
745 ip_vs_master_mcast_ifn, ip_vs_master_syncid);
747 while (!kthread_should_stop()) {
748 while ((sb = sb_dequeue())) {
749 ip_vs_send_sync_msg(tinfo->sock, sb->mesg);
750 ip_vs_sync_buff_release(sb);
753 /* check if entries stay in curr_sb for 2 seconds */
754 sb = get_curr_sync_buff(2 * HZ);
756 ip_vs_send_sync_msg(tinfo->sock, sb->mesg);
757 ip_vs_sync_buff_release(sb);
760 schedule_timeout_interruptible(HZ);
763 /* clean up the sync_buff queue */
764 while ((sb=sb_dequeue())) {
765 ip_vs_sync_buff_release(sb);
768 /* clean up the current sync_buff */
769 if ((sb = get_curr_sync_buff(0))) {
770 ip_vs_sync_buff_release(sb);
773 /* release the sending multicast socket */
774 sock_release(tinfo->sock);
781 static int sync_thread_backup(void *data)
783 struct ip_vs_sync_thread_data *tinfo = data;
786 pr_info("sync thread started: state = BACKUP, mcast_ifn = %s, "
788 ip_vs_backup_mcast_ifn, ip_vs_backup_syncid);
790 while (!kthread_should_stop()) {
791 wait_event_interruptible(*tinfo->sock->sk->sk_sleep,
792 !skb_queue_empty(&tinfo->sock->sk->sk_receive_queue)
793 || kthread_should_stop());
795 /* do we have data now? */
796 while (!skb_queue_empty(&(tinfo->sock->sk->sk_receive_queue))) {
797 len = ip_vs_receive(tinfo->sock, tinfo->buf,
798 sync_recv_mesg_maxlen);
800 pr_err("receiving message error\n");
804 /* disable bottom half, because it accesses the data
805 shared by softirq while getting/creating conns */
807 ip_vs_process_message(tinfo->buf, len);
812 /* release the sending multicast socket */
813 sock_release(tinfo->sock);
821 int start_sync_thread(int state, char *mcast_ifn, __u8 syncid)
823 struct ip_vs_sync_thread_data *tinfo;
824 struct task_struct **realtask, *task;
826 char *name, *buf = NULL;
827 int (*threadfn)(void *data);
828 int result = -ENOMEM;
830 IP_VS_DBG(7, "%s(): pid %d\n", __func__, task_pid_nr(current));
831 IP_VS_DBG(7, "Each ip_vs_sync_conn entry needs %Zd bytes\n",
832 sizeof(struct ip_vs_sync_conn));
834 if (state == IP_VS_STATE_MASTER) {
835 if (sync_master_thread)
838 strlcpy(ip_vs_master_mcast_ifn, mcast_ifn,
839 sizeof(ip_vs_master_mcast_ifn));
840 ip_vs_master_syncid = syncid;
841 realtask = &sync_master_thread;
842 name = "ipvs_syncmaster";
843 threadfn = sync_thread_master;
844 sock = make_send_sock();
845 } else if (state == IP_VS_STATE_BACKUP) {
846 if (sync_backup_thread)
849 strlcpy(ip_vs_backup_mcast_ifn, mcast_ifn,
850 sizeof(ip_vs_backup_mcast_ifn));
851 ip_vs_backup_syncid = syncid;
852 realtask = &sync_backup_thread;
853 name = "ipvs_syncbackup";
854 threadfn = sync_thread_backup;
855 sock = make_receive_sock();
861 result = PTR_ERR(sock);
865 set_sync_mesg_maxlen(state);
866 if (state == IP_VS_STATE_BACKUP) {
867 buf = kmalloc(sync_recv_mesg_maxlen, GFP_KERNEL);
872 tinfo = kmalloc(sizeof(*tinfo), GFP_KERNEL);
879 task = kthread_run(threadfn, tinfo, name);
881 result = PTR_ERR(task);
887 ip_vs_sync_state |= state;
889 /* increase the module use count */
890 ip_vs_use_count_inc();
905 int stop_sync_thread(int state)
907 IP_VS_DBG(7, "%s(): pid %d\n", __func__, task_pid_nr(current));
909 if (state == IP_VS_STATE_MASTER) {
910 if (!sync_master_thread)
913 pr_info("stopping master sync thread %d ...\n",
914 task_pid_nr(sync_master_thread));
917 * The lock synchronizes with sb_queue_tail(), so that we don't
918 * add sync buffers to the queue, when we are already in
919 * progress of stopping the master sync daemon.
922 spin_lock_bh(&ip_vs_sync_lock);
923 ip_vs_sync_state &= ~IP_VS_STATE_MASTER;
924 spin_unlock_bh(&ip_vs_sync_lock);
925 kthread_stop(sync_master_thread);
926 sync_master_thread = NULL;
927 } else if (state == IP_VS_STATE_BACKUP) {
928 if (!sync_backup_thread)
931 pr_info("stopping backup sync thread %d ...\n",
932 task_pid_nr(sync_backup_thread));
934 ip_vs_sync_state &= ~IP_VS_STATE_BACKUP;
935 kthread_stop(sync_backup_thread);
936 sync_backup_thread = NULL;
941 /* decrease the module use count */
942 ip_vs_use_count_dec();