Hi Wensong, should I add an Acked-by? On Wed, Feb 23, 2011 at 09:56:54AM +0800, Wensong Zhang wrote: > Sure, I am ok with this patch. Thanks! > > On Tue, Feb 22, 2011 at 1:56 PM, Simon Horman <horms@xxxxxxxxxxxx> wrote: > > On Sat, Feb 19, 2011 at 05:32:28PM +0800, Changli Gao wrote: > >> lc and wlc use the same formula, but lblc and lblcr use another one. There > >> is no reason for using two different formulas for the lc variants. > >> > >> The formula used by lc is used by all the lc variants in this patch. > > > > Wensong, are you ok with this version of the patch? > > > >> > >> Signed-off-by: Changli Gao <xiaosuo@xxxxxxxxx> > >> --- > >> v2: use ip_vs_dest_conn_overhead() instead. > >> Âinclude/net/ip_vs.h       Â|  14 ++++++++++++++ > >> Ânet/netfilter/ipvs/ip_vs_lblc.c Â|  13 +++---------- > >> Ânet/netfilter/ipvs/ip_vs_lblcr.c |  25 +++++++------------------ > >> Ânet/netfilter/ipvs/ip_vs_lc.c  Â|  18 +----------------- > >> Ânet/netfilter/ipvs/ip_vs_wlc.c  |  20 ++------------------ > >> Â5 files changed, 27 insertions(+), 63 deletions(-) > >> diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h > >> index 5d75fea..e80ffb7 100644 > >> --- a/include/net/ip_vs.h > >> +++ b/include/net/ip_vs.h > >> @@ -1241,6 +1241,20 @@ static inline void ip_vs_conn_drop_conntrack(struct ip_vs_conn *cp) > >> Â/* CONFIG_IP_VS_NFCT */ > >> Â#endif > >> > >> +static inline unsigned int > >> +ip_vs_dest_conn_overhead(struct ip_vs_dest *dest) > >> +{ > >> +   /* > >> +   Â* We think the overhead of processing active connections is 256 > >> +   Â* times higher than that of inactive connections in average. (This > >> +   Â* 256 times might not be accurate, we will change it later) We > >> +   Â* use the following formula to estimate the overhead now: > >> +   Â*        Âdest->activeconns*256 + dest->inactconns > >> +   Â*/ > >> +   return (atomic_read(&dest->activeconns) << 8) + > >> +       atomic_read(&dest->inactconns); > >> +} > >> + > >> Â#endif /* __KERNEL__ */ > >> > >> Â#endif    /* _NET_IP_VS_H */ > >> diff --git a/net/netfilter/ipvs/ip_vs_lblc.c b/net/netfilter/ipvs/ip_vs_lblc.c > >> index 00b5ffa..58ae403 100644 > >> --- a/net/netfilter/ipvs/ip_vs_lblc.c > >> +++ b/net/netfilter/ipvs/ip_vs_lblc.c > >> @@ -389,12 +389,7 @@ __ip_vs_lblc_schedule(struct ip_vs_service *svc) > >>    int loh, doh; > >> > >>    /* > >> -   Â* We think the overhead of processing active connections is fifty > >> -   Â* times higher than that of inactive connections in average. (This > >> -   Â* fifty times might not be accurate, we will change it later.) We > >> -   Â* use the following formula to estimate the overhead: > >> -   Â*        Âdest->activeconns*50 + dest->inactconns > >> -   Â* and the load: > >> +   Â* We use the following formula to estimate the load: > >>    Â*        Â(dest overhead) / dest->weight > >>    Â* > >>    Â* Remember -- no floats in kernel mode!!! > >> @@ -410,8 +405,7 @@ __ip_vs_lblc_schedule(struct ip_vs_service *svc) > >>            continue; > >>        if (atomic_read(&dest->weight) > 0) { > >>            least = dest; > >> -           loh = atomic_read(&least->activeconns) * 50 > >> -               + atomic_read(&least->inactconns); > >> +           loh = ip_vs_dest_conn_overhead(least); > >>            goto nextstage; > >>        } > >>    } > >> @@ -425,8 +419,7 @@ __ip_vs_lblc_schedule(struct ip_vs_service *svc) > >>        if (dest->flags & IP_VS_DEST_F_OVERLOAD) > >>            continue; > >> > >> -       doh = atomic_read(&dest->activeconns) * 50 > >> -           + atomic_read(&dest->inactconns); > >> +       doh = ip_vs_dest_conn_overhead(dest); > >>        if (loh * atomic_read(&dest->weight) > > >>          doh * atomic_read(&least->weight)) { > >>            least = dest; > >> diff --git a/net/netfilter/ipvs/ip_vs_lblcr.c b/net/netfilter/ipvs/ip_vs_lblcr.c > >> index bfa25f1..2ddefe8 100644 > >> --- a/net/netfilter/ipvs/ip_vs_lblcr.c > >> +++ b/net/netfilter/ipvs/ip_vs_lblcr.c > >> @@ -178,8 +178,7 @@ static inline struct ip_vs_dest *ip_vs_dest_set_min(struct ip_vs_dest_set *set) > >> > >>        if ((atomic_read(&least->weight) > 0) > >>          && (least->flags & IP_VS_DEST_F_AVAILABLE)) { > >> -           loh = atomic_read(&least->activeconns) * 50 > >> -               + atomic_read(&least->inactconns); > >> +           loh = ip_vs_dest_conn_overhead(least); > >>            goto nextstage; > >>        } > >>    } > >> @@ -192,8 +191,7 @@ static inline struct ip_vs_dest *ip_vs_dest_set_min(struct ip_vs_dest_set *set) > >>        if (dest->flags & IP_VS_DEST_F_OVERLOAD) > >>            continue; > >> > >> -       doh = atomic_read(&dest->activeconns) * 50 > >> -           + atomic_read(&dest->inactconns); > >> +       doh = ip_vs_dest_conn_overhead(dest); > >>        if ((loh * atomic_read(&dest->weight) > > >>          Âdoh * atomic_read(&least->weight)) > >>          && (dest->flags & IP_VS_DEST_F_AVAILABLE)) { > >> @@ -228,8 +226,7 @@ static inline struct ip_vs_dest *ip_vs_dest_set_max(struct ip_vs_dest_set *set) > >>    list_for_each_entry(e, &set->list, list) { > >>        most = e->dest; > >>        if (atomic_read(&most->weight) > 0) { > >> -           moh = atomic_read(&most->activeconns) * 50 > >> -               + atomic_read(&most->inactconns); > >> +           moh = ip_vs_dest_conn_overhead(most); > >>            goto nextstage; > >>        } > >>    } > >> @@ -239,8 +236,7 @@ static inline struct ip_vs_dest *ip_vs_dest_set_max(struct ip_vs_dest_set *set) > >>  Ânextstage: > >>    list_for_each_entry(e, &set->list, list) { > >>        dest = e->dest; > >> -       doh = atomic_read(&dest->activeconns) * 50 > >> -           + atomic_read(&dest->inactconns); > >> +       doh = ip_vs_dest_conn_overhead(dest); > >>        /* moh/mw < doh/dw ==> moh*dw < doh*mw, where mw,dw>0 */ > >>        if ((moh * atomic_read(&dest->weight) < > >>          Âdoh * atomic_read(&most->weight)) > >> @@ -563,12 +559,7 @@ __ip_vs_lblcr_schedule(struct ip_vs_service *svc) > >>    int loh, doh; > >> > >>    /* > >> -   Â* We think the overhead of processing active connections is fifty > >> -   Â* times higher than that of inactive connections in average. (This > >> -   Â* fifty times might not be accurate, we will change it later.) We > >> -   Â* use the following formula to estimate the overhead: > >> -   Â*        Âdest->activeconns*50 + dest->inactconns > >> -   Â* and the load: > >> +   Â* We use the following formula to estimate the load: > >>    Â*        Â(dest overhead) / dest->weight > >>    Â* > >>    Â* Remember -- no floats in kernel mode!!! > >> @@ -585,8 +576,7 @@ __ip_vs_lblcr_schedule(struct ip_vs_service *svc) > >> > >>        if (atomic_read(&dest->weight) > 0) { > >>            least = dest; > >> -           loh = atomic_read(&least->activeconns) * 50 > >> -               + atomic_read(&least->inactconns); > >> +           loh = ip_vs_dest_conn_overhead(least); > >>            goto nextstage; > >>        } > >>    } > >> @@ -600,8 +590,7 @@ __ip_vs_lblcr_schedule(struct ip_vs_service *svc) > >>        if (dest->flags & IP_VS_DEST_F_OVERLOAD) > >>            continue; > >> > >> -       doh = atomic_read(&dest->activeconns) * 50 > >> -           + atomic_read(&dest->inactconns); > >> +       doh = ip_vs_dest_conn_overhead(dest); > >>        if (loh * atomic_read(&dest->weight) > > >>          doh * atomic_read(&least->weight)) { > >>            least = dest; > >> diff --git a/net/netfilter/ipvs/ip_vs_lc.c b/net/netfilter/ipvs/ip_vs_lc.c > >> index 4f69db1..160cb80 100644 > >> --- a/net/netfilter/ipvs/ip_vs_lc.c > >> +++ b/net/netfilter/ipvs/ip_vs_lc.c > >> @@ -22,22 +22,6 @@ > >> > >> Â#include <net/ip_vs.h> > >> > >> - > >> -static inline unsigned int > >> -ip_vs_lc_dest_overhead(struct ip_vs_dest *dest) > >> -{ > >> -   /* > >> -   Â* We think the overhead of processing active connections is 256 > >> -   Â* times higher than that of inactive connections in average. (This > >> -   Â* 256 times might not be accurate, we will change it later) We > >> -   Â* use the following formula to estimate the overhead now: > >> -   Â*        Âdest->activeconns*256 + dest->inactconns > >> -   Â*/ > >> -   return (atomic_read(&dest->activeconns) << 8) + > >> -       atomic_read(&dest->inactconns); > >> -} > >> - > >> - > >> Â/* > >>  *  Least Connection scheduling > >>  */ > >> @@ -62,7 +46,7 @@ ip_vs_lc_schedule(struct ip_vs_service *svc, const struct sk_buff *skb) > >>        if ((dest->flags & IP_VS_DEST_F_OVERLOAD) || > >>          atomic_read(&dest->weight) == 0) > >>            continue; > >> -       doh = ip_vs_lc_dest_overhead(dest); > >> +       doh = ip_vs_dest_conn_overhead(dest); > >>        if (!least || doh < loh) { > >>            least = dest; > >>            loh = doh; > >> diff --git a/net/netfilter/ipvs/ip_vs_wlc.c b/net/netfilter/ipvs/ip_vs_wlc.c > >> index bbddfdb..db751f5 100644 > >> --- a/net/netfilter/ipvs/ip_vs_wlc.c > >> +++ b/net/netfilter/ipvs/ip_vs_wlc.c > >> @@ -27,22 +27,6 @@ > >> > >> Â#include <net/ip_vs.h> > >> > >> - > >> -static inline unsigned int > >> -ip_vs_wlc_dest_overhead(struct ip_vs_dest *dest) > >> -{ > >> -   /* > >> -   Â* We think the overhead of processing active connections is 256 > >> -   Â* times higher than that of inactive connections in average. (This > >> -   Â* 256 times might not be accurate, we will change it later) We > >> -   Â* use the following formula to estimate the overhead now: > >> -   Â*        Âdest->activeconns*256 + dest->inactconns > >> -   Â*/ > >> -   return (atomic_read(&dest->activeconns) << 8) + > >> -       atomic_read(&dest->inactconns); > >> -} > >> - > >> - > >> Â/* > >>  *  Weighted Least Connection scheduling > >>  */ > >> @@ -71,7 +55,7 @@ ip_vs_wlc_schedule(struct ip_vs_service *svc, const struct sk_buff *skb) > >>        if (!(dest->flags & IP_VS_DEST_F_OVERLOAD) && > >>          atomic_read(&dest->weight) > 0) { > >>            least = dest; > >> -           loh = ip_vs_wlc_dest_overhead(least); > >> +           loh = ip_vs_dest_conn_overhead(least); > >>            goto nextstage; > >>        } > >>    } > >> @@ -85,7 +69,7 @@ ip_vs_wlc_schedule(struct ip_vs_service *svc, const struct sk_buff *skb) > >>    list_for_each_entry_continue(dest, &svc->destinations, n_list) { > >>        if (dest->flags & IP_VS_DEST_F_OVERLOAD) > >>            continue; > >> -       doh = ip_vs_wlc_dest_overhead(dest); > >> +       doh = ip_vs_dest_conn_overhead(dest); > >>        if (loh * atomic_read(&dest->weight) > > >>          doh * atomic_read(&least->weight)) { > >>            least = dest; > >> > > > -- To unsubscribe from this list: send the line "unsubscribe lvs-devel" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html