2 * IPVS: Locality-Based Least-Connection with Replication scheduler
4 * Version: $Id: ip_vs_lblcr.c,v 1.11 2002/09/15 08:14:08 wensong Exp $
6 * Authors: Wensong Zhang <wensong@gnuchina.org>
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
14 * Julian Anastasov : Added the missing (dest->weight>0)
15 * condition in the ip_vs_dest_set_max.
20 * The lblc/r algorithm is as follows (pseudo code):
22 * if serverSet[dest_ip] is null then
23 * n, serverSet[dest_ip] <- {weighted least-conn node};
25 * n <- {least-conn (alive) node in serverSet[dest_ip]};
27 * (n.conns>n.weight AND
28 * there is a node m with m.conns<m.weight/2) then
29 * n <- {weighted least-conn node};
30 * add n to serverSet[dest_ip];
31 * if |serverSet[dest_ip]| > 1 AND
32 * now - serverSet[dest_ip].lastMod > T then
33 * m <- {most conn node in serverSet[dest_ip]};
34 * remove m from serverSet[dest_ip];
35 * if serverSet[dest_ip] changed then
36 * serverSet[dest_ip].lastMod <- now;
43 #include <linux/module.h>
44 #include <linux/kernel.h>
45 #include <linux/skbuff.h>
46 #include <linux/jiffies.h>
50 #include <linux/sysctl.h>
51 #include <net/net_namespace.h>
53 #include <net/ip_vs.h>
57 * It is for garbage collection of stale IPVS lblcr entries,
58 * when the table is full.
60 #define CHECK_EXPIRE_INTERVAL (60*HZ)
61 #define ENTRY_TIMEOUT (6*60*HZ)
64 * It is for full expiration check.
65 * When there is no partial expiration check (garbage collection)
66 * in a half hour, do a full expiration check to collect stale
67 * entries that haven't been touched for a day.
69 #define COUNT_FOR_FULL_EXPIRATION 30
70 static int sysctl_ip_vs_lblcr_expiration = 24*60*60*HZ;
74 * for IPVS lblcr entry hash table
76 #ifndef CONFIG_IP_VS_LBLCR_TAB_BITS
77 #define CONFIG_IP_VS_LBLCR_TAB_BITS 10
79 #define IP_VS_LBLCR_TAB_BITS CONFIG_IP_VS_LBLCR_TAB_BITS
80 #define IP_VS_LBLCR_TAB_SIZE (1 << IP_VS_LBLCR_TAB_BITS)
81 #define IP_VS_LBLCR_TAB_MASK (IP_VS_LBLCR_TAB_SIZE - 1)
85 * IPVS destination set structure and operations
87 struct ip_vs_dest_list {
88 struct ip_vs_dest_list *next; /* list link */
89 struct ip_vs_dest *dest; /* destination server */
92 struct ip_vs_dest_set {
93 atomic_t size; /* set size */
94 unsigned long lastmod; /* last modified time */
95 struct ip_vs_dest_list *list; /* destination list */
96 rwlock_t lock; /* lock for this list */
100 static struct ip_vs_dest_list *
101 ip_vs_dest_set_insert(struct ip_vs_dest_set *set, struct ip_vs_dest *dest)
103 struct ip_vs_dest_list *e;
105 for (e=set->list; e!=NULL; e=e->next) {
107 /* already existed */
111 e = kmalloc(sizeof(struct ip_vs_dest_list), GFP_ATOMIC);
113 IP_VS_ERR("ip_vs_dest_set_insert(): no memory\n");
117 atomic_inc(&dest->refcnt);
120 /* link it to the list */
121 write_lock(&set->lock);
124 atomic_inc(&set->size);
125 write_unlock(&set->lock);
127 set->lastmod = jiffies;
132 ip_vs_dest_set_erase(struct ip_vs_dest_set *set, struct ip_vs_dest *dest)
134 struct ip_vs_dest_list *e, **ep;
136 write_lock(&set->lock);
137 for (ep=&set->list, e=*ep; e!=NULL; e=*ep) {
138 if (e->dest == dest) {
141 atomic_dec(&set->size);
142 set->lastmod = jiffies;
143 atomic_dec(&e->dest->refcnt);
149 write_unlock(&set->lock);
152 static void ip_vs_dest_set_eraseall(struct ip_vs_dest_set *set)
154 struct ip_vs_dest_list *e, **ep;
156 write_lock(&set->lock);
157 for (ep=&set->list, e=*ep; e!=NULL; e=*ep) {
160 * We don't kfree dest because it is refered either
161 * by its service or by the trash dest list.
163 atomic_dec(&e->dest->refcnt);
166 write_unlock(&set->lock);
169 /* get weighted least-connection node in the destination set */
170 static inline struct ip_vs_dest *ip_vs_dest_set_min(struct ip_vs_dest_set *set)
172 register struct ip_vs_dest_list *e;
173 struct ip_vs_dest *dest, *least;
179 read_lock(&set->lock);
180 /* select the first destination server, whose weight > 0 */
181 for (e=set->list; e!=NULL; e=e->next) {
183 if (least->flags & IP_VS_DEST_F_OVERLOAD)
186 if ((atomic_read(&least->weight) > 0)
187 && (least->flags & IP_VS_DEST_F_AVAILABLE)) {
188 loh = atomic_read(&least->activeconns) * 50
189 + atomic_read(&least->inactconns);
193 read_unlock(&set->lock);
196 /* find the destination with the weighted least load */
198 for (e=e->next; e!=NULL; e=e->next) {
200 if (dest->flags & IP_VS_DEST_F_OVERLOAD)
203 doh = atomic_read(&dest->activeconns) * 50
204 + atomic_read(&dest->inactconns);
205 if ((loh * atomic_read(&dest->weight) >
206 doh * atomic_read(&least->weight))
207 && (dest->flags & IP_VS_DEST_F_AVAILABLE)) {
212 read_unlock(&set->lock);
214 IP_VS_DBG(6, "ip_vs_dest_set_min: server %d.%d.%d.%d:%d "
215 "activeconns %d refcnt %d weight %d overhead %d\n",
216 NIPQUAD(least->addr), ntohs(least->port),
217 atomic_read(&least->activeconns),
218 atomic_read(&least->refcnt),
219 atomic_read(&least->weight), loh);
224 /* get weighted most-connection node in the destination set */
225 static inline struct ip_vs_dest *ip_vs_dest_set_max(struct ip_vs_dest_set *set)
227 register struct ip_vs_dest_list *e;
228 struct ip_vs_dest *dest, *most;
234 read_lock(&set->lock);
235 /* select the first destination server, whose weight > 0 */
236 for (e=set->list; e!=NULL; e=e->next) {
238 if (atomic_read(&most->weight) > 0) {
239 moh = atomic_read(&most->activeconns) * 50
240 + atomic_read(&most->inactconns);
244 read_unlock(&set->lock);
247 /* find the destination with the weighted most load */
249 for (e=e->next; e!=NULL; e=e->next) {
251 doh = atomic_read(&dest->activeconns) * 50
252 + atomic_read(&dest->inactconns);
253 /* moh/mw < doh/dw ==> moh*dw < doh*mw, where mw,dw>0 */
254 if ((moh * atomic_read(&dest->weight) <
255 doh * atomic_read(&most->weight))
256 && (atomic_read(&dest->weight) > 0)) {
261 read_unlock(&set->lock);
263 IP_VS_DBG(6, "ip_vs_dest_set_max: server %d.%d.%d.%d:%d "
264 "activeconns %d refcnt %d weight %d overhead %d\n",
265 NIPQUAD(most->addr), ntohs(most->port),
266 atomic_read(&most->activeconns),
267 atomic_read(&most->refcnt),
268 atomic_read(&most->weight), moh);
274 * IPVS lblcr entry represents an association between destination
275 * IP address and its destination server set
277 struct ip_vs_lblcr_entry {
278 struct list_head list;
279 __be32 addr; /* destination IP address */
280 struct ip_vs_dest_set set; /* destination server set */
281 unsigned long lastuse; /* last used time */
286 * IPVS lblcr hash table
288 struct ip_vs_lblcr_table {
289 rwlock_t lock; /* lock for this table */
290 struct list_head bucket[IP_VS_LBLCR_TAB_SIZE]; /* hash bucket */
291 atomic_t entries; /* number of entries */
292 int max_size; /* maximum size of entries */
293 struct timer_list periodic_timer; /* collect stale entries */
294 int rover; /* rover for expire check */
295 int counter; /* counter for no expire */
300 * IPVS LBLCR sysctl table
303 static ctl_table vs_vars_table[] = {
305 .procname = "lblcr_expiration",
306 .data = &sysctl_ip_vs_lblcr_expiration,
307 .maxlen = sizeof(int),
309 .proc_handler = &proc_dointvec_jiffies,
314 static ctl_table vs_table[] = {
316 .ctl_name = NET_IPV4_VS,
319 .child = vs_vars_table
324 static ctl_table ipvs_ipv4_table[] = {
326 .ctl_name = NET_IPV4,
334 static ctl_table lblcr_root_table[] = {
339 .child = ipvs_ipv4_table
344 static struct ctl_table_header * sysctl_header;
347 * new/free a ip_vs_lblcr_entry, which is a mapping of a destination
348 * IP address to a server.
350 static inline struct ip_vs_lblcr_entry *ip_vs_lblcr_new(__be32 daddr)
352 struct ip_vs_lblcr_entry *en;
354 en = kmalloc(sizeof(struct ip_vs_lblcr_entry), GFP_ATOMIC);
356 IP_VS_ERR("ip_vs_lblcr_new(): no memory\n");
360 INIT_LIST_HEAD(&en->list);
363 /* initilize its dest set */
364 atomic_set(&(en->set.size), 0);
366 rwlock_init(&en->set.lock);
372 static inline void ip_vs_lblcr_free(struct ip_vs_lblcr_entry *en)
375 ip_vs_dest_set_eraseall(&en->set);
381 * Returns hash value for IPVS LBLCR entry
383 static inline unsigned ip_vs_lblcr_hashkey(__be32 addr)
385 return (ntohl(addr)*2654435761UL) & IP_VS_LBLCR_TAB_MASK;
390 * Hash an entry in the ip_vs_lblcr_table.
391 * returns bool success.
394 ip_vs_lblcr_hash(struct ip_vs_lblcr_table *tbl, struct ip_vs_lblcr_entry *en)
398 if (!list_empty(&en->list)) {
399 IP_VS_ERR("ip_vs_lblcr_hash(): request for already hashed, "
400 "called from %p\n", __builtin_return_address(0));
405 * Hash by destination IP address
407 hash = ip_vs_lblcr_hashkey(en->addr);
409 write_lock(&tbl->lock);
410 list_add(&en->list, &tbl->bucket[hash]);
411 atomic_inc(&tbl->entries);
412 write_unlock(&tbl->lock);
419 * Get ip_vs_lblcr_entry associated with supplied parameters.
421 static inline struct ip_vs_lblcr_entry *
422 ip_vs_lblcr_get(struct ip_vs_lblcr_table *tbl, __be32 addr)
425 struct ip_vs_lblcr_entry *en;
427 hash = ip_vs_lblcr_hashkey(addr);
429 read_lock(&tbl->lock);
431 list_for_each_entry(en, &tbl->bucket[hash], list) {
432 if (en->addr == addr) {
434 read_unlock(&tbl->lock);
439 read_unlock(&tbl->lock);
446 * Flush all the entries of the specified table.
448 static void ip_vs_lblcr_flush(struct ip_vs_lblcr_table *tbl)
451 struct ip_vs_lblcr_entry *en, *nxt;
453 for (i=0; i<IP_VS_LBLCR_TAB_SIZE; i++) {
454 write_lock(&tbl->lock);
455 list_for_each_entry_safe(en, nxt, &tbl->bucket[i], list) {
456 ip_vs_lblcr_free(en);
457 atomic_dec(&tbl->entries);
459 write_unlock(&tbl->lock);
464 static inline void ip_vs_lblcr_full_check(struct ip_vs_lblcr_table *tbl)
466 unsigned long now = jiffies;
468 struct ip_vs_lblcr_entry *en, *nxt;
470 for (i=0, j=tbl->rover; i<IP_VS_LBLCR_TAB_SIZE; i++) {
471 j = (j + 1) & IP_VS_LBLCR_TAB_MASK;
473 write_lock(&tbl->lock);
474 list_for_each_entry_safe(en, nxt, &tbl->bucket[j], list) {
475 if (time_after(en->lastuse+sysctl_ip_vs_lblcr_expiration,
479 ip_vs_lblcr_free(en);
480 atomic_dec(&tbl->entries);
482 write_unlock(&tbl->lock);
489 * Periodical timer handler for IPVS lblcr table
490 * It is used to collect stale entries when the number of entries
491 * exceeds the maximum size of the table.
493 * Fixme: we probably need more complicated algorithm to collect
494 * entries that have not been used for a long time even
495 * if the number of entries doesn't exceed the maximum size
497 * The full expiration check is for this purpose now.
499 static void ip_vs_lblcr_check_expire(unsigned long data)
501 struct ip_vs_lblcr_table *tbl;
502 unsigned long now = jiffies;
505 struct ip_vs_lblcr_entry *en, *nxt;
507 tbl = (struct ip_vs_lblcr_table *)data;
509 if ((tbl->counter % COUNT_FOR_FULL_EXPIRATION) == 0) {
510 /* do full expiration check */
511 ip_vs_lblcr_full_check(tbl);
516 if (atomic_read(&tbl->entries) <= tbl->max_size) {
521 goal = (atomic_read(&tbl->entries) - tbl->max_size)*4/3;
522 if (goal > tbl->max_size/2)
523 goal = tbl->max_size/2;
525 for (i=0, j=tbl->rover; i<IP_VS_LBLCR_TAB_SIZE; i++) {
526 j = (j + 1) & IP_VS_LBLCR_TAB_MASK;
528 write_lock(&tbl->lock);
529 list_for_each_entry_safe(en, nxt, &tbl->bucket[j], list) {
530 if (time_before(now, en->lastuse+ENTRY_TIMEOUT))
533 ip_vs_lblcr_free(en);
534 atomic_dec(&tbl->entries);
537 write_unlock(&tbl->lock);
544 mod_timer(&tbl->periodic_timer, jiffies+CHECK_EXPIRE_INTERVAL);
547 static int ip_vs_lblcr_init_svc(struct ip_vs_service *svc)
550 struct ip_vs_lblcr_table *tbl;
553 * Allocate the ip_vs_lblcr_table for this service
555 tbl = kmalloc(sizeof(struct ip_vs_lblcr_table), GFP_ATOMIC);
557 IP_VS_ERR("ip_vs_lblcr_init_svc(): no memory\n");
560 svc->sched_data = tbl;
561 IP_VS_DBG(6, "LBLCR hash table (memory=%Zdbytes) allocated for "
563 sizeof(struct ip_vs_lblcr_table));
566 * Initialize the hash buckets
568 for (i=0; i<IP_VS_LBLCR_TAB_SIZE; i++) {
569 INIT_LIST_HEAD(&tbl->bucket[i]);
571 rwlock_init(&tbl->lock);
572 tbl->max_size = IP_VS_LBLCR_TAB_SIZE*16;
577 * Hook periodic timer for garbage collection
579 init_timer(&tbl->periodic_timer);
580 tbl->periodic_timer.data = (unsigned long)tbl;
581 tbl->periodic_timer.function = ip_vs_lblcr_check_expire;
582 tbl->periodic_timer.expires = jiffies+CHECK_EXPIRE_INTERVAL;
583 add_timer(&tbl->periodic_timer);
589 static int ip_vs_lblcr_done_svc(struct ip_vs_service *svc)
591 struct ip_vs_lblcr_table *tbl = svc->sched_data;
593 /* remove periodic timer */
594 del_timer_sync(&tbl->periodic_timer);
596 /* got to clean up table entries here */
597 ip_vs_lblcr_flush(tbl);
599 /* release the table itself */
600 kfree(svc->sched_data);
601 IP_VS_DBG(6, "LBLCR hash table (memory=%Zdbytes) released\n",
602 sizeof(struct ip_vs_lblcr_table));
608 static int ip_vs_lblcr_update_svc(struct ip_vs_service *svc)
614 static inline struct ip_vs_dest *
615 __ip_vs_wlc_schedule(struct ip_vs_service *svc, struct iphdr *iph)
617 struct ip_vs_dest *dest, *least;
621 * We think the overhead of processing active connections is fifty
622 * times higher than that of inactive connections in average. (This
623 * fifty times might not be accurate, we will change it later.) We
624 * use the following formula to estimate the overhead:
625 * dest->activeconns*50 + dest->inactconns
627 * (dest overhead) / dest->weight
629 * Remember -- no floats in kernel mode!!!
630 * The comparison of h1*w2 > h2*w1 is equivalent to that of
632 * if every weight is larger than zero.
634 * The server with weight=0 is quiesced and will not receive any
637 list_for_each_entry(dest, &svc->destinations, n_list) {
638 if (dest->flags & IP_VS_DEST_F_OVERLOAD)
641 if (atomic_read(&dest->weight) > 0) {
643 loh = atomic_read(&least->activeconns) * 50
644 + atomic_read(&least->inactconns);
651 * Find the destination with the least load.
654 list_for_each_entry_continue(dest, &svc->destinations, n_list) {
655 if (dest->flags & IP_VS_DEST_F_OVERLOAD)
658 doh = atomic_read(&dest->activeconns) * 50
659 + atomic_read(&dest->inactconns);
660 if (loh * atomic_read(&dest->weight) >
661 doh * atomic_read(&least->weight)) {
667 IP_VS_DBG(6, "LBLCR: server %d.%d.%d.%d:%d "
668 "activeconns %d refcnt %d weight %d overhead %d\n",
669 NIPQUAD(least->addr), ntohs(least->port),
670 atomic_read(&least->activeconns),
671 atomic_read(&least->refcnt),
672 atomic_read(&least->weight), loh);
679 * If this destination server is overloaded and there is a less loaded
680 * server, then return true.
683 is_overloaded(struct ip_vs_dest *dest, struct ip_vs_service *svc)
685 if (atomic_read(&dest->activeconns) > atomic_read(&dest->weight)) {
686 struct ip_vs_dest *d;
688 list_for_each_entry(d, &svc->destinations, n_list) {
689 if (atomic_read(&d->activeconns)*2
690 < atomic_read(&d->weight)) {
700 * Locality-Based (weighted) Least-Connection scheduling
702 static struct ip_vs_dest *
703 ip_vs_lblcr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
705 struct ip_vs_dest *dest;
706 struct ip_vs_lblcr_table *tbl;
707 struct ip_vs_lblcr_entry *en;
708 struct iphdr *iph = ip_hdr(skb);
710 IP_VS_DBG(6, "ip_vs_lblcr_schedule(): Scheduling...\n");
712 tbl = (struct ip_vs_lblcr_table *)svc->sched_data;
713 en = ip_vs_lblcr_get(tbl, iph->daddr);
715 dest = __ip_vs_wlc_schedule(svc, iph);
717 IP_VS_DBG(1, "no destination available\n");
720 en = ip_vs_lblcr_new(iph->daddr);
724 ip_vs_dest_set_insert(&en->set, dest);
725 ip_vs_lblcr_hash(tbl, en);
727 dest = ip_vs_dest_set_min(&en->set);
728 if (!dest || is_overloaded(dest, svc)) {
729 dest = __ip_vs_wlc_schedule(svc, iph);
731 IP_VS_DBG(1, "no destination available\n");
734 ip_vs_dest_set_insert(&en->set, dest);
736 if (atomic_read(&en->set.size) > 1 &&
737 jiffies-en->set.lastmod > sysctl_ip_vs_lblcr_expiration) {
738 struct ip_vs_dest *m;
739 m = ip_vs_dest_set_max(&en->set);
741 ip_vs_dest_set_erase(&en->set, m);
744 en->lastuse = jiffies;
746 IP_VS_DBG(6, "LBLCR: destination IP address %u.%u.%u.%u "
747 "--> server %u.%u.%u.%u:%d\n",
757 * IPVS LBLCR Scheduler structure
759 static struct ip_vs_scheduler ip_vs_lblcr_scheduler =
762 .refcnt = ATOMIC_INIT(0),
763 .module = THIS_MODULE,
764 .init_service = ip_vs_lblcr_init_svc,
765 .done_service = ip_vs_lblcr_done_svc,
766 .update_service = ip_vs_lblcr_update_svc,
767 .schedule = ip_vs_lblcr_schedule,
771 static int __init ip_vs_lblcr_init(void)
773 INIT_LIST_HEAD(&ip_vs_lblcr_scheduler.n_list);
774 sysctl_header = register_sysctl_table(lblcr_root_table);
775 return register_ip_vs_scheduler(&ip_vs_lblcr_scheduler);
779 static void __exit ip_vs_lblcr_cleanup(void)
781 unregister_sysctl_table(sysctl_header);
782 unregister_ip_vs_scheduler(&ip_vs_lblcr_scheduler);
786 module_init(ip_vs_lblcr_init);
787 module_exit(ip_vs_lblcr_cleanup);
788 MODULE_LICENSE("GPL");