2 * IPVS: Locality-Based Least-Connection with Replication scheduler
4 * Authors: Wensong Zhang <wensong@gnuchina.org>
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
12 * Julian Anastasov : Added the missing (dest->weight>0)
13 * condition in the ip_vs_dest_set_max.
18 * The lblc/r algorithm is as follows (pseudo code):
20 * if serverSet[dest_ip] is null then
21 * n, serverSet[dest_ip] <- {weighted least-conn node};
23 * n <- {least-conn (alive) node in serverSet[dest_ip]};
25 * (n.conns>n.weight AND
26 * there is a node m with m.conns<m.weight/2) then
27 * n <- {weighted least-conn node};
28 * add n to serverSet[dest_ip];
29 * if |serverSet[dest_ip]| > 1 AND
30 * now - serverSet[dest_ip].lastMod > T then
31 * m <- {most conn node in serverSet[dest_ip]};
32 * remove m from serverSet[dest_ip];
33 * if serverSet[dest_ip] changed then
34 * serverSet[dest_ip].lastMod <- now;
40 #define KMSG_COMPONENT "IPVS"
41 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
44 #include <linux/module.h>
45 #include <linux/kernel.h>
46 #include <linux/skbuff.h>
47 #include <linux/jiffies.h>
48 #include <linux/list.h>
49 #include <linux/slab.h>
53 #include <linux/sysctl.h>
54 #include <net/net_namespace.h>
56 #include <net/ip_vs.h>
60 * It is for garbage collection of stale IPVS lblcr entries,
61 * when the table is full.
63 #define CHECK_EXPIRE_INTERVAL (60*HZ)
64 #define ENTRY_TIMEOUT (6*60*HZ)
66 #define DEFAULT_EXPIRATION (24*60*60*HZ)
69 * It is for full expiration check.
70 * When there is no partial expiration check (garbage collection)
71 * in a half hour, do a full expiration check to collect stale
72 * entries that haven't been touched for a day.
74 #define COUNT_FOR_FULL_EXPIRATION 30
77 * for IPVS lblcr entry hash table
79 #ifndef CONFIG_IP_VS_LBLCR_TAB_BITS
80 #define CONFIG_IP_VS_LBLCR_TAB_BITS 10
82 #define IP_VS_LBLCR_TAB_BITS CONFIG_IP_VS_LBLCR_TAB_BITS
83 #define IP_VS_LBLCR_TAB_SIZE (1 << IP_VS_LBLCR_TAB_BITS)
84 #define IP_VS_LBLCR_TAB_MASK (IP_VS_LBLCR_TAB_SIZE - 1)
88 * IPVS destination set structure and operations
90 struct ip_vs_dest_set_elem {
91 struct list_head list; /* list link */
92 struct ip_vs_dest *dest; /* destination server */
93 struct rcu_head rcu_head;
96 struct ip_vs_dest_set {
97 atomic_t size; /* set size */
98 unsigned long lastmod; /* last modified time */
99 struct list_head list; /* destination list */
103 static void ip_vs_dest_set_insert(struct ip_vs_dest_set *set,
104 struct ip_vs_dest *dest, bool check)
106 struct ip_vs_dest_set_elem *e;
109 list_for_each_entry(e, &set->list, list) {
115 e = kmalloc(sizeof(*e), GFP_ATOMIC);
119 ip_vs_dest_hold(dest);
122 list_add_rcu(&e->list, &set->list);
123 atomic_inc(&set->size);
125 set->lastmod = jiffies;
128 static void ip_vs_lblcr_elem_rcu_free(struct rcu_head *head)
130 struct ip_vs_dest_set_elem *e;
132 e = container_of(head, struct ip_vs_dest_set_elem, rcu_head);
133 ip_vs_dest_put(e->dest);
138 ip_vs_dest_set_erase(struct ip_vs_dest_set *set, struct ip_vs_dest *dest)
140 struct ip_vs_dest_set_elem *e;
142 list_for_each_entry(e, &set->list, list) {
143 if (e->dest == dest) {
145 atomic_dec(&set->size);
146 set->lastmod = jiffies;
147 list_del_rcu(&e->list);
148 call_rcu(&e->rcu_head, ip_vs_lblcr_elem_rcu_free);
154 static void ip_vs_dest_set_eraseall(struct ip_vs_dest_set *set)
156 struct ip_vs_dest_set_elem *e, *ep;
158 list_for_each_entry_safe(e, ep, &set->list, list) {
159 list_del_rcu(&e->list);
160 call_rcu(&e->rcu_head, ip_vs_lblcr_elem_rcu_free);
164 /* get weighted least-connection node in the destination set */
165 static inline struct ip_vs_dest *ip_vs_dest_set_min(struct ip_vs_dest_set *set)
167 register struct ip_vs_dest_set_elem *e;
168 struct ip_vs_dest *dest, *least;
171 /* select the first destination server, whose weight > 0 */
172 list_for_each_entry_rcu(e, &set->list, list) {
174 if (least->flags & IP_VS_DEST_F_OVERLOAD)
177 if ((atomic_read(&least->weight) > 0)
178 && (least->flags & IP_VS_DEST_F_AVAILABLE)) {
179 loh = ip_vs_dest_conn_overhead(least);
185 /* find the destination with the weighted least load */
187 list_for_each_entry_continue_rcu(e, &set->list, list) {
189 if (dest->flags & IP_VS_DEST_F_OVERLOAD)
192 doh = ip_vs_dest_conn_overhead(dest);
193 if (((__s64)loh * atomic_read(&dest->weight) >
194 (__s64)doh * atomic_read(&least->weight))
195 && (dest->flags & IP_VS_DEST_F_AVAILABLE)) {
201 IP_VS_DBG_BUF(6, "%s(): server %s:%d "
202 "activeconns %d refcnt %d weight %d overhead %d\n",
204 IP_VS_DBG_ADDR(least->af, &least->addr),
206 atomic_read(&least->activeconns),
207 atomic_read(&least->refcnt),
208 atomic_read(&least->weight), loh);
213 /* get weighted most-connection node in the destination set */
214 static inline struct ip_vs_dest *ip_vs_dest_set_max(struct ip_vs_dest_set *set)
216 register struct ip_vs_dest_set_elem *e;
217 struct ip_vs_dest *dest, *most;
223 /* select the first destination server, whose weight > 0 */
224 list_for_each_entry(e, &set->list, list) {
226 if (atomic_read(&most->weight) > 0) {
227 moh = ip_vs_dest_conn_overhead(most);
233 /* find the destination with the weighted most load */
235 list_for_each_entry_continue(e, &set->list, list) {
237 doh = ip_vs_dest_conn_overhead(dest);
238 /* moh/mw < doh/dw ==> moh*dw < doh*mw, where mw,dw>0 */
239 if (((__s64)moh * atomic_read(&dest->weight) <
240 (__s64)doh * atomic_read(&most->weight))
241 && (atomic_read(&dest->weight) > 0)) {
247 IP_VS_DBG_BUF(6, "%s(): server %s:%d "
248 "activeconns %d refcnt %d weight %d overhead %d\n",
250 IP_VS_DBG_ADDR(most->af, &most->addr), ntohs(most->port),
251 atomic_read(&most->activeconns),
252 atomic_read(&most->refcnt),
253 atomic_read(&most->weight), moh);
259 * IPVS lblcr entry represents an association between destination
260 * IP address and its destination server set
262 struct ip_vs_lblcr_entry {
263 struct hlist_node list;
264 int af; /* address family */
265 union nf_inet_addr addr; /* destination IP address */
266 struct ip_vs_dest_set set; /* destination server set */
267 unsigned long lastuse; /* last used time */
268 struct rcu_head rcu_head;
273 * IPVS lblcr hash table
275 struct ip_vs_lblcr_table {
276 struct rcu_head rcu_head;
277 struct hlist_head bucket[IP_VS_LBLCR_TAB_SIZE]; /* hash bucket */
278 atomic_t entries; /* number of entries */
279 int max_size; /* maximum size of entries */
280 struct timer_list periodic_timer; /* collect stale entries */
281 int rover; /* rover for expire check */
282 int counter; /* counter for no expire */
289 * IPVS LBLCR sysctl table
292 static struct ctl_table vs_vars_table[] = {
294 .procname = "lblcr_expiration",
296 .maxlen = sizeof(int),
298 .proc_handler = proc_dointvec_jiffies,
304 static inline void ip_vs_lblcr_free(struct ip_vs_lblcr_entry *en)
306 hlist_del_rcu(&en->list);
307 ip_vs_dest_set_eraseall(&en->set);
308 kfree_rcu(en, rcu_head);
313 * Returns hash value for IPVS LBLCR entry
315 static inline unsigned int
316 ip_vs_lblcr_hashkey(int af, const union nf_inet_addr *addr)
318 __be32 addr_fold = addr->ip;
320 #ifdef CONFIG_IP_VS_IPV6
322 addr_fold = addr->ip6[0]^addr->ip6[1]^
323 addr->ip6[2]^addr->ip6[3];
325 return (ntohl(addr_fold)*2654435761UL) & IP_VS_LBLCR_TAB_MASK;
330 * Hash an entry in the ip_vs_lblcr_table.
331 * returns bool success.
334 ip_vs_lblcr_hash(struct ip_vs_lblcr_table *tbl, struct ip_vs_lblcr_entry *en)
336 unsigned int hash = ip_vs_lblcr_hashkey(en->af, &en->addr);
338 hlist_add_head_rcu(&en->list, &tbl->bucket[hash]);
339 atomic_inc(&tbl->entries);
343 /* Get ip_vs_lblcr_entry associated with supplied parameters. */
344 static inline struct ip_vs_lblcr_entry *
345 ip_vs_lblcr_get(int af, struct ip_vs_lblcr_table *tbl,
346 const union nf_inet_addr *addr)
348 unsigned int hash = ip_vs_lblcr_hashkey(af, addr);
349 struct ip_vs_lblcr_entry *en;
351 hlist_for_each_entry_rcu(en, &tbl->bucket[hash], list)
352 if (ip_vs_addr_equal(af, &en->addr, addr))
360 * Create or update an ip_vs_lblcr_entry, which is a mapping of a destination
361 * IP address to a server. Called under spin lock.
363 static inline struct ip_vs_lblcr_entry *
364 ip_vs_lblcr_new(struct ip_vs_lblcr_table *tbl, const union nf_inet_addr *daddr,
365 struct ip_vs_dest *dest)
367 struct ip_vs_lblcr_entry *en;
369 en = ip_vs_lblcr_get(dest->af, tbl, daddr);
371 en = kmalloc(sizeof(*en), GFP_ATOMIC);
376 ip_vs_addr_copy(dest->af, &en->addr, daddr);
377 en->lastuse = jiffies;
379 /* initialize its dest set */
380 atomic_set(&(en->set.size), 0);
381 INIT_LIST_HEAD(&en->set.list);
383 ip_vs_dest_set_insert(&en->set, dest, false);
385 ip_vs_lblcr_hash(tbl, en);
389 ip_vs_dest_set_insert(&en->set, dest, true);
396 * Flush all the entries of the specified table.
398 static void ip_vs_lblcr_flush(struct ip_vs_service *svc)
400 struct ip_vs_lblcr_table *tbl = svc->sched_data;
402 struct ip_vs_lblcr_entry *en;
403 struct hlist_node *next;
405 spin_lock_bh(&svc->sched_lock);
407 for (i = 0; i < IP_VS_LBLCR_TAB_SIZE; i++) {
408 hlist_for_each_entry_safe(en, next, &tbl->bucket[i], list) {
409 ip_vs_lblcr_free(en);
412 spin_unlock_bh(&svc->sched_lock);
415 static int sysctl_lblcr_expiration(struct ip_vs_service *svc)
418 struct netns_ipvs *ipvs = net_ipvs(svc->net);
419 return ipvs->sysctl_lblcr_expiration;
421 return DEFAULT_EXPIRATION;
425 static inline void ip_vs_lblcr_full_check(struct ip_vs_service *svc)
427 struct ip_vs_lblcr_table *tbl = svc->sched_data;
428 unsigned long now = jiffies;
430 struct ip_vs_lblcr_entry *en;
431 struct hlist_node *next;
433 for (i = 0, j = tbl->rover; i < IP_VS_LBLCR_TAB_SIZE; i++) {
434 j = (j + 1) & IP_VS_LBLCR_TAB_MASK;
436 spin_lock(&svc->sched_lock);
437 hlist_for_each_entry_safe(en, next, &tbl->bucket[j], list) {
438 if (time_after(en->lastuse +
439 sysctl_lblcr_expiration(svc), now))
442 ip_vs_lblcr_free(en);
443 atomic_dec(&tbl->entries);
445 spin_unlock(&svc->sched_lock);
452 * Periodical timer handler for IPVS lblcr table
453 * It is used to collect stale entries when the number of entries
454 * exceeds the maximum size of the table.
456 * Fixme: we probably need more complicated algorithm to collect
457 * entries that have not been used for a long time even
458 * if the number of entries doesn't exceed the maximum size
460 * The full expiration check is for this purpose now.
462 static void ip_vs_lblcr_check_expire(unsigned long data)
464 struct ip_vs_service *svc = (struct ip_vs_service *) data;
465 struct ip_vs_lblcr_table *tbl = svc->sched_data;
466 unsigned long now = jiffies;
469 struct ip_vs_lblcr_entry *en;
470 struct hlist_node *next;
472 if ((tbl->counter % COUNT_FOR_FULL_EXPIRATION) == 0) {
473 /* do full expiration check */
474 ip_vs_lblcr_full_check(svc);
479 if (atomic_read(&tbl->entries) <= tbl->max_size) {
484 goal = (atomic_read(&tbl->entries) - tbl->max_size)*4/3;
485 if (goal > tbl->max_size/2)
486 goal = tbl->max_size/2;
488 for (i = 0, j = tbl->rover; i < IP_VS_LBLCR_TAB_SIZE; i++) {
489 j = (j + 1) & IP_VS_LBLCR_TAB_MASK;
491 spin_lock(&svc->sched_lock);
492 hlist_for_each_entry_safe(en, next, &tbl->bucket[j], list) {
493 if (time_before(now, en->lastuse+ENTRY_TIMEOUT))
496 ip_vs_lblcr_free(en);
497 atomic_dec(&tbl->entries);
500 spin_unlock(&svc->sched_lock);
507 mod_timer(&tbl->periodic_timer, jiffies+CHECK_EXPIRE_INTERVAL);
510 static int ip_vs_lblcr_init_svc(struct ip_vs_service *svc)
513 struct ip_vs_lblcr_table *tbl;
516 * Allocate the ip_vs_lblcr_table for this service
518 tbl = kmalloc(sizeof(*tbl), GFP_KERNEL);
522 svc->sched_data = tbl;
523 IP_VS_DBG(6, "LBLCR hash table (memory=%Zdbytes) allocated for "
524 "current service\n", sizeof(*tbl));
527 * Initialize the hash buckets
529 for (i = 0; i < IP_VS_LBLCR_TAB_SIZE; i++) {
530 INIT_HLIST_HEAD(&tbl->bucket[i]);
532 tbl->max_size = IP_VS_LBLCR_TAB_SIZE*16;
538 * Hook periodic timer for garbage collection
540 setup_timer(&tbl->periodic_timer, ip_vs_lblcr_check_expire,
542 mod_timer(&tbl->periodic_timer, jiffies + CHECK_EXPIRE_INTERVAL);
548 static void ip_vs_lblcr_done_svc(struct ip_vs_service *svc)
550 struct ip_vs_lblcr_table *tbl = svc->sched_data;
552 /* remove periodic timer */
553 del_timer_sync(&tbl->periodic_timer);
555 /* got to clean up table entries here */
556 ip_vs_lblcr_flush(svc);
558 /* release the table itself */
559 kfree_rcu(tbl, rcu_head);
560 IP_VS_DBG(6, "LBLCR hash table (memory=%Zdbytes) released\n",
565 static inline struct ip_vs_dest *
566 __ip_vs_lblcr_schedule(struct ip_vs_service *svc)
568 struct ip_vs_dest *dest, *least;
572 * We use the following formula to estimate the load:
573 * (dest overhead) / dest->weight
575 * Remember -- no floats in kernel mode!!!
576 * The comparison of h1*w2 > h2*w1 is equivalent to that of
578 * if every weight is larger than zero.
580 * The server with weight=0 is quiesced and will not receive any
583 list_for_each_entry_rcu(dest, &svc->destinations, n_list) {
584 if (dest->flags & IP_VS_DEST_F_OVERLOAD)
587 if (atomic_read(&dest->weight) > 0) {
589 loh = ip_vs_dest_conn_overhead(least);
596 * Find the destination with the least load.
599 list_for_each_entry_continue_rcu(dest, &svc->destinations, n_list) {
600 if (dest->flags & IP_VS_DEST_F_OVERLOAD)
603 doh = ip_vs_dest_conn_overhead(dest);
604 if ((__s64)loh * atomic_read(&dest->weight) >
605 (__s64)doh * atomic_read(&least->weight)) {
611 IP_VS_DBG_BUF(6, "LBLCR: server %s:%d "
612 "activeconns %d refcnt %d weight %d overhead %d\n",
613 IP_VS_DBG_ADDR(least->af, &least->addr),
615 atomic_read(&least->activeconns),
616 atomic_read(&least->refcnt),
617 atomic_read(&least->weight), loh);
624 * If this destination server is overloaded and there is a less loaded
625 * server, then return true.
628 is_overloaded(struct ip_vs_dest *dest, struct ip_vs_service *svc)
630 if (atomic_read(&dest->activeconns) > atomic_read(&dest->weight)) {
631 struct ip_vs_dest *d;
633 list_for_each_entry_rcu(d, &svc->destinations, n_list) {
634 if (atomic_read(&d->activeconns)*2
635 < atomic_read(&d->weight)) {
645 * Locality-Based (weighted) Least-Connection scheduling
647 static struct ip_vs_dest *
648 ip_vs_lblcr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb,
649 struct ip_vs_iphdr *iph)
651 struct ip_vs_lblcr_table *tbl = svc->sched_data;
652 struct ip_vs_dest *dest;
653 struct ip_vs_lblcr_entry *en;
655 IP_VS_DBG(6, "%s(): Scheduling...\n", __func__);
657 /* First look in our cache */
658 en = ip_vs_lblcr_get(svc->af, tbl, &iph->daddr);
660 en->lastuse = jiffies;
662 /* Get the least loaded destination */
663 dest = ip_vs_dest_set_min(&en->set);
665 /* More than one destination + enough time passed by, cleanup */
666 if (atomic_read(&en->set.size) > 1 &&
667 time_after(jiffies, en->set.lastmod +
668 sysctl_lblcr_expiration(svc))) {
669 spin_lock_bh(&svc->sched_lock);
670 if (atomic_read(&en->set.size) > 1) {
671 struct ip_vs_dest *m;
673 m = ip_vs_dest_set_max(&en->set);
675 ip_vs_dest_set_erase(&en->set, m);
677 spin_unlock_bh(&svc->sched_lock);
680 /* If the destination is not overloaded, use it */
681 if (dest && !is_overloaded(dest, svc))
684 /* The cache entry is invalid, time to schedule */
685 dest = __ip_vs_lblcr_schedule(svc);
687 ip_vs_scheduler_err(svc, "no destination available");
691 /* Update our cache entry */
692 spin_lock_bh(&svc->sched_lock);
694 ip_vs_dest_set_insert(&en->set, dest, true);
695 spin_unlock_bh(&svc->sched_lock);
699 /* No cache entry, time to schedule */
700 dest = __ip_vs_lblcr_schedule(svc);
702 IP_VS_DBG(1, "no destination available\n");
706 /* If we fail to create a cache entry, we'll just use the valid dest */
707 spin_lock_bh(&svc->sched_lock);
709 ip_vs_lblcr_new(tbl, &iph->daddr, dest);
710 spin_unlock_bh(&svc->sched_lock);
713 IP_VS_DBG_BUF(6, "LBLCR: destination IP address %s --> server %s:%d\n",
714 IP_VS_DBG_ADDR(svc->af, &iph->daddr),
715 IP_VS_DBG_ADDR(svc->af, &dest->addr), ntohs(dest->port));
722 * IPVS LBLCR Scheduler structure
724 static struct ip_vs_scheduler ip_vs_lblcr_scheduler =
727 .refcnt = ATOMIC_INIT(0),
728 .module = THIS_MODULE,
729 .n_list = LIST_HEAD_INIT(ip_vs_lblcr_scheduler.n_list),
730 .init_service = ip_vs_lblcr_init_svc,
731 .done_service = ip_vs_lblcr_done_svc,
732 .schedule = ip_vs_lblcr_schedule,
739 static int __net_init __ip_vs_lblcr_init(struct net *net)
741 struct netns_ipvs *ipvs = net_ipvs(net);
746 if (!net_eq(net, &init_net)) {
747 ipvs->lblcr_ctl_table = kmemdup(vs_vars_table,
748 sizeof(vs_vars_table),
750 if (ipvs->lblcr_ctl_table == NULL)
753 /* Don't export sysctls to unprivileged users */
754 if (net->user_ns != &init_user_ns)
755 ipvs->lblcr_ctl_table[0].procname = NULL;
757 ipvs->lblcr_ctl_table = vs_vars_table;
758 ipvs->sysctl_lblcr_expiration = DEFAULT_EXPIRATION;
759 ipvs->lblcr_ctl_table[0].data = &ipvs->sysctl_lblcr_expiration;
761 ipvs->lblcr_ctl_header =
762 register_net_sysctl(net, "net/ipv4/vs", ipvs->lblcr_ctl_table);
763 if (!ipvs->lblcr_ctl_header) {
764 if (!net_eq(net, &init_net))
765 kfree(ipvs->lblcr_ctl_table);
772 static void __net_exit __ip_vs_lblcr_exit(struct net *net)
774 struct netns_ipvs *ipvs = net_ipvs(net);
776 unregister_net_sysctl_table(ipvs->lblcr_ctl_header);
778 if (!net_eq(net, &init_net))
779 kfree(ipvs->lblcr_ctl_table);
784 static int __net_init __ip_vs_lblcr_init(struct net *net) { return 0; }
785 static void __net_exit __ip_vs_lblcr_exit(struct net *net) { }
789 static struct pernet_operations ip_vs_lblcr_ops = {
790 .init = __ip_vs_lblcr_init,
791 .exit = __ip_vs_lblcr_exit,
794 static int __init ip_vs_lblcr_init(void)
798 ret = register_pernet_subsys(&ip_vs_lblcr_ops);
802 ret = register_ip_vs_scheduler(&ip_vs_lblcr_scheduler);
804 unregister_pernet_subsys(&ip_vs_lblcr_ops);
808 static void __exit ip_vs_lblcr_cleanup(void)
810 unregister_ip_vs_scheduler(&ip_vs_lblcr_scheduler);
811 unregister_pernet_subsys(&ip_vs_lblcr_ops);
816 module_init(ip_vs_lblcr_init);
817 module_exit(ip_vs_lblcr_cleanup);
818 MODULE_LICENSE("GPL");