2 * IPVS: Locality-Based Least-Connection with Replication scheduler
4 * Authors: Wensong Zhang <wensong@gnuchina.org>
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
12 * Julian Anastasov : Added the missing (dest->weight>0)
13 * condition in the ip_vs_dest_set_max.
18 * The lblc/r algorithm is as follows (pseudo code):
20 * if serverSet[dest_ip] is null then
21 * n, serverSet[dest_ip] <- {weighted least-conn node};
23 * n <- {least-conn (alive) node in serverSet[dest_ip]};
25 * (n.conns>n.weight AND
26 * there is a node m with m.conns<m.weight/2) then
27 * n <- {weighted least-conn node};
28 * add n to serverSet[dest_ip];
29 * if |serverSet[dest_ip]| > 1 AND
30 * now - serverSet[dest_ip].lastMod > T then
31 * m <- {most conn node in serverSet[dest_ip]};
32 * remove m from serverSet[dest_ip];
33 * if serverSet[dest_ip] changed then
34 * serverSet[dest_ip].lastMod <- now;
40 #define KMSG_COMPONENT "IPVS"
41 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
44 #include <linux/module.h>
45 #include <linux/kernel.h>
46 #include <linux/skbuff.h>
47 #include <linux/jiffies.h>
51 #include <linux/sysctl.h>
52 #include <net/net_namespace.h>
54 #include <net/ip_vs.h>
58 * It is for garbage collection of stale IPVS lblcr entries,
59 * when the table is full.
61 #define CHECK_EXPIRE_INTERVAL (60*HZ)
62 #define ENTRY_TIMEOUT (6*60*HZ)
65 * It is for full expiration check.
66 * When there is no partial expiration check (garbage collection)
67 * in a half hour, do a full expiration check to collect stale
68 * entries that haven't been touched for a day.
70 #define COUNT_FOR_FULL_EXPIRATION 30
71 static int sysctl_ip_vs_lblcr_expiration = 24*60*60*HZ;
75 * for IPVS lblcr entry hash table
77 #ifndef CONFIG_IP_VS_LBLCR_TAB_BITS
78 #define CONFIG_IP_VS_LBLCR_TAB_BITS 10
80 #define IP_VS_LBLCR_TAB_BITS CONFIG_IP_VS_LBLCR_TAB_BITS
81 #define IP_VS_LBLCR_TAB_SIZE (1 << IP_VS_LBLCR_TAB_BITS)
82 #define IP_VS_LBLCR_TAB_MASK (IP_VS_LBLCR_TAB_SIZE - 1)
86 * IPVS destination set structure and operations
88 struct ip_vs_dest_list {
89 struct ip_vs_dest_list *next; /* list link */
90 struct ip_vs_dest *dest; /* destination server */
93 struct ip_vs_dest_set {
94 atomic_t size; /* set size */
95 unsigned long lastmod; /* last modified time */
96 struct ip_vs_dest_list *list; /* destination list */
97 rwlock_t lock; /* lock for this list */
101 static struct ip_vs_dest_list *
102 ip_vs_dest_set_insert(struct ip_vs_dest_set *set, struct ip_vs_dest *dest)
104 struct ip_vs_dest_list *e;
106 for (e=set->list; e!=NULL; e=e->next) {
108 /* already existed */
112 e = kmalloc(sizeof(*e), GFP_ATOMIC);
114 IP_VS_ERR("ip_vs_dest_set_insert(): no memory\n");
118 atomic_inc(&dest->refcnt);
121 /* link it to the list */
124 atomic_inc(&set->size);
126 set->lastmod = jiffies;
131 ip_vs_dest_set_erase(struct ip_vs_dest_set *set, struct ip_vs_dest *dest)
133 struct ip_vs_dest_list *e, **ep;
135 for (ep=&set->list, e=*ep; e!=NULL; e=*ep) {
136 if (e->dest == dest) {
139 atomic_dec(&set->size);
140 set->lastmod = jiffies;
141 atomic_dec(&e->dest->refcnt);
149 static void ip_vs_dest_set_eraseall(struct ip_vs_dest_set *set)
151 struct ip_vs_dest_list *e, **ep;
153 write_lock(&set->lock);
154 for (ep=&set->list, e=*ep; e!=NULL; e=*ep) {
157 * We don't kfree dest because it is refered either
158 * by its service or by the trash dest list.
160 atomic_dec(&e->dest->refcnt);
163 write_unlock(&set->lock);
166 /* get weighted least-connection node in the destination set */
167 static inline struct ip_vs_dest *ip_vs_dest_set_min(struct ip_vs_dest_set *set)
169 register struct ip_vs_dest_list *e;
170 struct ip_vs_dest *dest, *least;
176 /* select the first destination server, whose weight > 0 */
177 for (e=set->list; e!=NULL; e=e->next) {
179 if (least->flags & IP_VS_DEST_F_OVERLOAD)
182 if ((atomic_read(&least->weight) > 0)
183 && (least->flags & IP_VS_DEST_F_AVAILABLE)) {
184 loh = atomic_read(&least->activeconns) * 50
185 + atomic_read(&least->inactconns);
191 /* find the destination with the weighted least load */
193 for (e=e->next; e!=NULL; e=e->next) {
195 if (dest->flags & IP_VS_DEST_F_OVERLOAD)
198 doh = atomic_read(&dest->activeconns) * 50
199 + atomic_read(&dest->inactconns);
200 if ((loh * atomic_read(&dest->weight) >
201 doh * atomic_read(&least->weight))
202 && (dest->flags & IP_VS_DEST_F_AVAILABLE)) {
208 IP_VS_DBG_BUF(6, "ip_vs_dest_set_min: server %s:%d "
209 "activeconns %d refcnt %d weight %d overhead %d\n",
210 IP_VS_DBG_ADDR(least->af, &least->addr),
212 atomic_read(&least->activeconns),
213 atomic_read(&least->refcnt),
214 atomic_read(&least->weight), loh);
219 /* get weighted most-connection node in the destination set */
220 static inline struct ip_vs_dest *ip_vs_dest_set_max(struct ip_vs_dest_set *set)
222 register struct ip_vs_dest_list *e;
223 struct ip_vs_dest *dest, *most;
229 /* select the first destination server, whose weight > 0 */
230 for (e=set->list; e!=NULL; e=e->next) {
232 if (atomic_read(&most->weight) > 0) {
233 moh = atomic_read(&most->activeconns) * 50
234 + atomic_read(&most->inactconns);
240 /* find the destination with the weighted most load */
242 for (e=e->next; e!=NULL; e=e->next) {
244 doh = atomic_read(&dest->activeconns) * 50
245 + atomic_read(&dest->inactconns);
246 /* moh/mw < doh/dw ==> moh*dw < doh*mw, where mw,dw>0 */
247 if ((moh * atomic_read(&dest->weight) <
248 doh * atomic_read(&most->weight))
249 && (atomic_read(&dest->weight) > 0)) {
255 IP_VS_DBG_BUF(6, "ip_vs_dest_set_max: server %s:%d "
256 "activeconns %d refcnt %d weight %d overhead %d\n",
257 IP_VS_DBG_ADDR(most->af, &most->addr), ntohs(most->port),
258 atomic_read(&most->activeconns),
259 atomic_read(&most->refcnt),
260 atomic_read(&most->weight), moh);
266 * IPVS lblcr entry represents an association between destination
267 * IP address and its destination server set
269 struct ip_vs_lblcr_entry {
270 struct list_head list;
271 int af; /* address family */
272 union nf_inet_addr addr; /* destination IP address */
273 struct ip_vs_dest_set set; /* destination server set */
274 unsigned long lastuse; /* last used time */
279 * IPVS lblcr hash table
281 struct ip_vs_lblcr_table {
282 struct list_head bucket[IP_VS_LBLCR_TAB_SIZE]; /* hash bucket */
283 atomic_t entries; /* number of entries */
284 int max_size; /* maximum size of entries */
285 struct timer_list periodic_timer; /* collect stale entries */
286 int rover; /* rover for expire check */
287 int counter; /* counter for no expire */
292 * IPVS LBLCR sysctl table
295 static ctl_table vs_vars_table[] = {
297 .procname = "lblcr_expiration",
298 .data = &sysctl_ip_vs_lblcr_expiration,
299 .maxlen = sizeof(int),
301 .proc_handler = proc_dointvec_jiffies,
306 static struct ctl_table_header * sysctl_header;
308 static inline void ip_vs_lblcr_free(struct ip_vs_lblcr_entry *en)
311 ip_vs_dest_set_eraseall(&en->set);
317 * Returns hash value for IPVS LBLCR entry
319 static inline unsigned
320 ip_vs_lblcr_hashkey(int af, const union nf_inet_addr *addr)
322 __be32 addr_fold = addr->ip;
324 #ifdef CONFIG_IP_VS_IPV6
326 addr_fold = addr->ip6[0]^addr->ip6[1]^
327 addr->ip6[2]^addr->ip6[3];
329 return (ntohl(addr_fold)*2654435761UL) & IP_VS_LBLCR_TAB_MASK;
334 * Hash an entry in the ip_vs_lblcr_table.
335 * returns bool success.
338 ip_vs_lblcr_hash(struct ip_vs_lblcr_table *tbl, struct ip_vs_lblcr_entry *en)
340 unsigned hash = ip_vs_lblcr_hashkey(en->af, &en->addr);
342 list_add(&en->list, &tbl->bucket[hash]);
343 atomic_inc(&tbl->entries);
348 * Get ip_vs_lblcr_entry associated with supplied parameters. Called under
351 static inline struct ip_vs_lblcr_entry *
352 ip_vs_lblcr_get(int af, struct ip_vs_lblcr_table *tbl,
353 const union nf_inet_addr *addr)
355 unsigned hash = ip_vs_lblcr_hashkey(af, addr);
356 struct ip_vs_lblcr_entry *en;
358 list_for_each_entry(en, &tbl->bucket[hash], list)
359 if (ip_vs_addr_equal(af, &en->addr, addr))
367 * Create or update an ip_vs_lblcr_entry, which is a mapping of a destination
368 * IP address to a server. Called under write lock.
370 static inline struct ip_vs_lblcr_entry *
371 ip_vs_lblcr_new(struct ip_vs_lblcr_table *tbl, const union nf_inet_addr *daddr,
372 struct ip_vs_dest *dest)
374 struct ip_vs_lblcr_entry *en;
376 en = ip_vs_lblcr_get(dest->af, tbl, daddr);
378 en = kmalloc(sizeof(*en), GFP_ATOMIC);
380 IP_VS_ERR("ip_vs_lblcr_new(): no memory\n");
385 ip_vs_addr_copy(dest->af, &en->addr, daddr);
386 en->lastuse = jiffies;
388 /* initilize its dest set */
389 atomic_set(&(en->set.size), 0);
391 rwlock_init(&en->set.lock);
393 ip_vs_lblcr_hash(tbl, en);
396 write_lock(&en->set.lock);
397 ip_vs_dest_set_insert(&en->set, dest);
398 write_unlock(&en->set.lock);
405 * Flush all the entries of the specified table.
407 static void ip_vs_lblcr_flush(struct ip_vs_lblcr_table *tbl)
410 struct ip_vs_lblcr_entry *en, *nxt;
412 /* No locking required, only called during cleanup. */
413 for (i=0; i<IP_VS_LBLCR_TAB_SIZE; i++) {
414 list_for_each_entry_safe(en, nxt, &tbl->bucket[i], list) {
415 ip_vs_lblcr_free(en);
421 static inline void ip_vs_lblcr_full_check(struct ip_vs_service *svc)
423 struct ip_vs_lblcr_table *tbl = svc->sched_data;
424 unsigned long now = jiffies;
426 struct ip_vs_lblcr_entry *en, *nxt;
428 for (i=0, j=tbl->rover; i<IP_VS_LBLCR_TAB_SIZE; i++) {
429 j = (j + 1) & IP_VS_LBLCR_TAB_MASK;
431 write_lock(&svc->sched_lock);
432 list_for_each_entry_safe(en, nxt, &tbl->bucket[j], list) {
433 if (time_after(en->lastuse+sysctl_ip_vs_lblcr_expiration,
437 ip_vs_lblcr_free(en);
438 atomic_dec(&tbl->entries);
440 write_unlock(&svc->sched_lock);
447 * Periodical timer handler for IPVS lblcr table
448 * It is used to collect stale entries when the number of entries
449 * exceeds the maximum size of the table.
451 * Fixme: we probably need more complicated algorithm to collect
452 * entries that have not been used for a long time even
453 * if the number of entries doesn't exceed the maximum size
455 * The full expiration check is for this purpose now.
457 static void ip_vs_lblcr_check_expire(unsigned long data)
459 struct ip_vs_service *svc = (struct ip_vs_service *) data;
460 struct ip_vs_lblcr_table *tbl = svc->sched_data;
461 unsigned long now = jiffies;
464 struct ip_vs_lblcr_entry *en, *nxt;
466 if ((tbl->counter % COUNT_FOR_FULL_EXPIRATION) == 0) {
467 /* do full expiration check */
468 ip_vs_lblcr_full_check(svc);
473 if (atomic_read(&tbl->entries) <= tbl->max_size) {
478 goal = (atomic_read(&tbl->entries) - tbl->max_size)*4/3;
479 if (goal > tbl->max_size/2)
480 goal = tbl->max_size/2;
482 for (i=0, j=tbl->rover; i<IP_VS_LBLCR_TAB_SIZE; i++) {
483 j = (j + 1) & IP_VS_LBLCR_TAB_MASK;
485 write_lock(&svc->sched_lock);
486 list_for_each_entry_safe(en, nxt, &tbl->bucket[j], list) {
487 if (time_before(now, en->lastuse+ENTRY_TIMEOUT))
490 ip_vs_lblcr_free(en);
491 atomic_dec(&tbl->entries);
494 write_unlock(&svc->sched_lock);
501 mod_timer(&tbl->periodic_timer, jiffies+CHECK_EXPIRE_INTERVAL);
504 static int ip_vs_lblcr_init_svc(struct ip_vs_service *svc)
507 struct ip_vs_lblcr_table *tbl;
510 * Allocate the ip_vs_lblcr_table for this service
512 tbl = kmalloc(sizeof(*tbl), GFP_ATOMIC);
514 IP_VS_ERR("ip_vs_lblcr_init_svc(): no memory\n");
517 svc->sched_data = tbl;
518 IP_VS_DBG(6, "LBLCR hash table (memory=%Zdbytes) allocated for "
519 "current service\n", sizeof(*tbl));
522 * Initialize the hash buckets
524 for (i=0; i<IP_VS_LBLCR_TAB_SIZE; i++) {
525 INIT_LIST_HEAD(&tbl->bucket[i]);
527 tbl->max_size = IP_VS_LBLCR_TAB_SIZE*16;
532 * Hook periodic timer for garbage collection
534 setup_timer(&tbl->periodic_timer, ip_vs_lblcr_check_expire,
536 mod_timer(&tbl->periodic_timer, jiffies + CHECK_EXPIRE_INTERVAL);
542 static int ip_vs_lblcr_done_svc(struct ip_vs_service *svc)
544 struct ip_vs_lblcr_table *tbl = svc->sched_data;
546 /* remove periodic timer */
547 del_timer_sync(&tbl->periodic_timer);
549 /* got to clean up table entries here */
550 ip_vs_lblcr_flush(tbl);
552 /* release the table itself */
554 IP_VS_DBG(6, "LBLCR hash table (memory=%Zdbytes) released\n",
561 static inline struct ip_vs_dest *
562 __ip_vs_lblcr_schedule(struct ip_vs_service *svc)
564 struct ip_vs_dest *dest, *least;
568 * We think the overhead of processing active connections is fifty
569 * times higher than that of inactive connections in average. (This
570 * fifty times might not be accurate, we will change it later.) We
571 * use the following formula to estimate the overhead:
572 * dest->activeconns*50 + dest->inactconns
574 * (dest overhead) / dest->weight
576 * Remember -- no floats in kernel mode!!!
577 * The comparison of h1*w2 > h2*w1 is equivalent to that of
579 * if every weight is larger than zero.
581 * The server with weight=0 is quiesced and will not receive any
584 list_for_each_entry(dest, &svc->destinations, n_list) {
585 if (dest->flags & IP_VS_DEST_F_OVERLOAD)
588 if (atomic_read(&dest->weight) > 0) {
590 loh = atomic_read(&least->activeconns) * 50
591 + atomic_read(&least->inactconns);
598 * Find the destination with the least load.
601 list_for_each_entry_continue(dest, &svc->destinations, n_list) {
602 if (dest->flags & IP_VS_DEST_F_OVERLOAD)
605 doh = atomic_read(&dest->activeconns) * 50
606 + atomic_read(&dest->inactconns);
607 if (loh * atomic_read(&dest->weight) >
608 doh * atomic_read(&least->weight)) {
614 IP_VS_DBG_BUF(6, "LBLCR: server %s:%d "
615 "activeconns %d refcnt %d weight %d overhead %d\n",
616 IP_VS_DBG_ADDR(least->af, &least->addr),
618 atomic_read(&least->activeconns),
619 atomic_read(&least->refcnt),
620 atomic_read(&least->weight), loh);
627 * If this destination server is overloaded and there is a less loaded
628 * server, then return true.
631 is_overloaded(struct ip_vs_dest *dest, struct ip_vs_service *svc)
633 if (atomic_read(&dest->activeconns) > atomic_read(&dest->weight)) {
634 struct ip_vs_dest *d;
636 list_for_each_entry(d, &svc->destinations, n_list) {
637 if (atomic_read(&d->activeconns)*2
638 < atomic_read(&d->weight)) {
648 * Locality-Based (weighted) Least-Connection scheduling
650 static struct ip_vs_dest *
651 ip_vs_lblcr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
653 struct ip_vs_lblcr_table *tbl = svc->sched_data;
654 struct ip_vs_iphdr iph;
655 struct ip_vs_dest *dest = NULL;
656 struct ip_vs_lblcr_entry *en;
658 ip_vs_fill_iphdr(svc->af, skb_network_header(skb), &iph);
660 IP_VS_DBG(6, "ip_vs_lblcr_schedule(): Scheduling...\n");
662 /* First look in our cache */
663 read_lock(&svc->sched_lock);
664 en = ip_vs_lblcr_get(svc->af, tbl, &iph.daddr);
666 /* We only hold a read lock, but this is atomic */
667 en->lastuse = jiffies;
669 /* Get the least loaded destination */
670 read_lock(&en->set.lock);
671 dest = ip_vs_dest_set_min(&en->set);
672 read_unlock(&en->set.lock);
674 /* More than one destination + enough time passed by, cleanup */
675 if (atomic_read(&en->set.size) > 1 &&
676 time_after(jiffies, en->set.lastmod +
677 sysctl_ip_vs_lblcr_expiration)) {
678 struct ip_vs_dest *m;
680 write_lock(&en->set.lock);
681 m = ip_vs_dest_set_max(&en->set);
683 ip_vs_dest_set_erase(&en->set, m);
684 write_unlock(&en->set.lock);
687 /* If the destination is not overloaded, use it */
688 if (dest && !is_overloaded(dest, svc)) {
689 read_unlock(&svc->sched_lock);
693 /* The cache entry is invalid, time to schedule */
694 dest = __ip_vs_lblcr_schedule(svc);
696 IP_VS_ERR_RL("LBLCR: no destination available\n");
697 read_unlock(&svc->sched_lock);
701 /* Update our cache entry */
702 write_lock(&en->set.lock);
703 ip_vs_dest_set_insert(&en->set, dest);
704 write_unlock(&en->set.lock);
706 read_unlock(&svc->sched_lock);
711 /* No cache entry, time to schedule */
712 dest = __ip_vs_lblcr_schedule(svc);
714 IP_VS_DBG(1, "no destination available\n");
718 /* If we fail to create a cache entry, we'll just use the valid dest */
719 write_lock(&svc->sched_lock);
720 ip_vs_lblcr_new(tbl, &iph.daddr, dest);
721 write_unlock(&svc->sched_lock);
724 IP_VS_DBG_BUF(6, "LBLCR: destination IP address %s --> server %s:%d\n",
725 IP_VS_DBG_ADDR(svc->af, &iph.daddr),
726 IP_VS_DBG_ADDR(svc->af, &dest->addr), ntohs(dest->port));
733 * IPVS LBLCR Scheduler structure
735 static struct ip_vs_scheduler ip_vs_lblcr_scheduler =
738 .refcnt = ATOMIC_INIT(0),
739 .module = THIS_MODULE,
740 .n_list = LIST_HEAD_INIT(ip_vs_lblcr_scheduler.n_list),
741 .init_service = ip_vs_lblcr_init_svc,
742 .done_service = ip_vs_lblcr_done_svc,
743 .schedule = ip_vs_lblcr_schedule,
747 static int __init ip_vs_lblcr_init(void)
751 sysctl_header = register_sysctl_paths(net_vs_ctl_path, vs_vars_table);
752 ret = register_ip_vs_scheduler(&ip_vs_lblcr_scheduler);
754 unregister_sysctl_table(sysctl_header);
759 static void __exit ip_vs_lblcr_cleanup(void)
761 unregister_sysctl_table(sysctl_header);
762 unregister_ip_vs_scheduler(&ip_vs_lblcr_scheduler);
766 module_init(ip_vs_lblcr_init);
767 module_exit(ip_vs_lblcr_cleanup);
768 MODULE_LICENSE("GPL");