| From foo@baz Mon May 16 11:20:33 PDT 2016 |
| From: WANG Cong <xiyou.wangcong@gmail.com> |
| Date: Thu, 25 Feb 2016 14:55:01 -0800 |
| Subject: net_sched: update hierarchical backlog too |
| |
| From: WANG Cong <xiyou.wangcong@gmail.com> |
| |
| [ Upstream commit 2ccccf5fb43ff62b2b96cc58d95fc0b3596516e4 ] |
| |
| When the bottom qdisc decides to, for example, drop some packet, |
| it calls qdisc_tree_decrease_qlen() to update the queue length |
| for all its ancestors, we need to update the backlog too to |
| keep the stats on root qdisc accurate. |
| |
| Cc: Jamal Hadi Salim <jhs@mojatatu.com> |
| Acked-by: Jamal Hadi Salim <jhs@mojatatu.com> |
| Signed-off-by: Cong Wang <xiyou.wangcong@gmail.com> |
| Signed-off-by: David S. Miller <davem@davemloft.net> |
| Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> |
| --- |
| include/net/codel.h | 4 ++++ |
| include/net/sch_generic.h | 5 +++-- |
| net/sched/sch_api.c | 8 +++++--- |
| net/sched/sch_cbq.c | 5 +++-- |
| net/sched/sch_choke.c | 6 ++++-- |
| net/sched/sch_codel.c | 10 ++++++---- |
| net/sched/sch_drr.c | 3 ++- |
| net/sched/sch_fq.c | 4 +++- |
| net/sched/sch_fq_codel.c | 17 ++++++++++++----- |
| net/sched/sch_hfsc.c | 3 ++- |
| net/sched/sch_hhf.c | 10 +++++++--- |
| net/sched/sch_htb.c | 10 ++++++---- |
| net/sched/sch_multiq.c | 8 +++++--- |
| net/sched/sch_netem.c | 3 ++- |
| net/sched/sch_pie.c | 5 +++-- |
| net/sched/sch_prio.c | 7 ++++--- |
| net/sched/sch_qfq.c | 3 ++- |
| net/sched/sch_red.c | 3 ++- |
| net/sched/sch_sfb.c | 3 ++- |
| net/sched/sch_sfq.c | 16 +++++++++------- |
| net/sched/sch_tbf.c | 7 +++++-- |
| 21 files changed, 91 insertions(+), 49 deletions(-) |
| |
| --- a/include/net/codel.h |
| +++ b/include/net/codel.h |
| @@ -162,12 +162,14 @@ struct codel_vars { |
| * struct codel_stats - contains codel shared variables and stats |
| * @maxpacket: largest packet we've seen so far |
| * @drop_count: temp count of dropped packets in dequeue() |
| + * @drop_len: bytes of dropped packets in dequeue() |
| * ecn_mark: number of packets we ECN marked instead of dropping |
| * ce_mark: number of packets CE marked because sojourn time was above ce_threshold |
| */ |
| struct codel_stats { |
| u32 maxpacket; |
| u32 drop_count; |
| + u32 drop_len; |
| u32 ecn_mark; |
| u32 ce_mark; |
| }; |
| @@ -308,6 +310,7 @@ static struct sk_buff *codel_dequeue(str |
| vars->rec_inv_sqrt); |
| goto end; |
| } |
| + stats->drop_len += qdisc_pkt_len(skb); |
| qdisc_drop(skb, sch); |
| stats->drop_count++; |
| skb = dequeue_func(vars, sch); |
| @@ -330,6 +333,7 @@ static struct sk_buff *codel_dequeue(str |
| if (params->ecn && INET_ECN_set_ce(skb)) { |
| stats->ecn_mark++; |
| } else { |
| + stats->drop_len += qdisc_pkt_len(skb); |
| qdisc_drop(skb, sch); |
| stats->drop_count++; |
| |
| --- a/include/net/sch_generic.h |
| +++ b/include/net/sch_generic.h |
| @@ -396,7 +396,8 @@ struct Qdisc *dev_graft_qdisc(struct net |
| struct Qdisc *qdisc); |
| void qdisc_reset(struct Qdisc *qdisc); |
| void qdisc_destroy(struct Qdisc *qdisc); |
| -void qdisc_tree_decrease_qlen(struct Qdisc *qdisc, unsigned int n); |
| +void qdisc_tree_reduce_backlog(struct Qdisc *qdisc, unsigned int n, |
| + unsigned int len); |
| struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue, |
| const struct Qdisc_ops *ops); |
| struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue, |
| @@ -716,7 +717,7 @@ static inline struct Qdisc *qdisc_replac |
| old = *pold; |
| *pold = new; |
| if (old != NULL) { |
| - qdisc_tree_decrease_qlen(old, old->q.qlen); |
| + qdisc_tree_reduce_backlog(old, old->q.qlen, old->qstats.backlog); |
| qdisc_reset(old); |
| } |
| sch_tree_unlock(sch); |
| --- a/net/sched/sch_api.c |
| +++ b/net/sched/sch_api.c |
| @@ -744,14 +744,15 @@ static u32 qdisc_alloc_handle(struct net |
| return 0; |
| } |
| |
| -void qdisc_tree_decrease_qlen(struct Qdisc *sch, unsigned int n) |
| +void qdisc_tree_reduce_backlog(struct Qdisc *sch, unsigned int n, |
| + unsigned int len) |
| { |
| const struct Qdisc_class_ops *cops; |
| unsigned long cl; |
| u32 parentid; |
| int drops; |
| |
| - if (n == 0) |
| + if (n == 0 && len == 0) |
| return; |
| drops = max_t(int, n, 0); |
| rcu_read_lock(); |
| @@ -774,11 +775,12 @@ void qdisc_tree_decrease_qlen(struct Qdi |
| cops->put(sch, cl); |
| } |
| sch->q.qlen -= n; |
| + sch->qstats.backlog -= len; |
| __qdisc_qstats_drop(sch, drops); |
| } |
| rcu_read_unlock(); |
| } |
| -EXPORT_SYMBOL(qdisc_tree_decrease_qlen); |
| +EXPORT_SYMBOL(qdisc_tree_reduce_backlog); |
| |
| static void notify_and_destroy(struct net *net, struct sk_buff *skb, |
| struct nlmsghdr *n, u32 clid, |
| --- a/net/sched/sch_cbq.c |
| +++ b/net/sched/sch_cbq.c |
| @@ -1909,7 +1909,7 @@ static int cbq_delete(struct Qdisc *sch, |
| { |
| struct cbq_sched_data *q = qdisc_priv(sch); |
| struct cbq_class *cl = (struct cbq_class *)arg; |
| - unsigned int qlen; |
| + unsigned int qlen, backlog; |
| |
| if (cl->filters || cl->children || cl == &q->link) |
| return -EBUSY; |
| @@ -1917,8 +1917,9 @@ static int cbq_delete(struct Qdisc *sch, |
| sch_tree_lock(sch); |
| |
| qlen = cl->q->q.qlen; |
| + backlog = cl->q->qstats.backlog; |
| qdisc_reset(cl->q); |
| - qdisc_tree_decrease_qlen(cl->q, qlen); |
| + qdisc_tree_reduce_backlog(cl->q, qlen, backlog); |
| |
| if (cl->next_alive) |
| cbq_deactivate_class(cl); |
| --- a/net/sched/sch_choke.c |
| +++ b/net/sched/sch_choke.c |
| @@ -128,8 +128,8 @@ static void choke_drop_by_idx(struct Qdi |
| choke_zap_tail_holes(q); |
| |
| qdisc_qstats_backlog_dec(sch, skb); |
| + qdisc_tree_reduce_backlog(sch, 1, qdisc_pkt_len(skb)); |
| qdisc_drop(skb, sch); |
| - qdisc_tree_decrease_qlen(sch, 1); |
| --sch->q.qlen; |
| } |
| |
| @@ -456,6 +456,7 @@ static int choke_change(struct Qdisc *sc |
| old = q->tab; |
| if (old) { |
| unsigned int oqlen = sch->q.qlen, tail = 0; |
| + unsigned dropped = 0; |
| |
| while (q->head != q->tail) { |
| struct sk_buff *skb = q->tab[q->head]; |
| @@ -467,11 +468,12 @@ static int choke_change(struct Qdisc *sc |
| ntab[tail++] = skb; |
| continue; |
| } |
| + dropped += qdisc_pkt_len(skb); |
| qdisc_qstats_backlog_dec(sch, skb); |
| --sch->q.qlen; |
| qdisc_drop(skb, sch); |
| } |
| - qdisc_tree_decrease_qlen(sch, oqlen - sch->q.qlen); |
| + qdisc_tree_reduce_backlog(sch, oqlen - sch->q.qlen, dropped); |
| q->head = 0; |
| q->tail = tail; |
| } |
| --- a/net/sched/sch_codel.c |
| +++ b/net/sched/sch_codel.c |
| @@ -79,12 +79,13 @@ static struct sk_buff *codel_qdisc_deque |
| |
| skb = codel_dequeue(sch, &q->params, &q->vars, &q->stats, dequeue); |
| |
| - /* We cant call qdisc_tree_decrease_qlen() if our qlen is 0, |
| + /* We cant call qdisc_tree_reduce_backlog() if our qlen is 0, |
| * or HTB crashes. Defer it for next round. |
| */ |
| if (q->stats.drop_count && sch->q.qlen) { |
| - qdisc_tree_decrease_qlen(sch, q->stats.drop_count); |
| + qdisc_tree_reduce_backlog(sch, q->stats.drop_count, q->stats.drop_len); |
| q->stats.drop_count = 0; |
| + q->stats.drop_len = 0; |
| } |
| if (skb) |
| qdisc_bstats_update(sch, skb); |
| @@ -116,7 +117,7 @@ static int codel_change(struct Qdisc *sc |
| { |
| struct codel_sched_data *q = qdisc_priv(sch); |
| struct nlattr *tb[TCA_CODEL_MAX + 1]; |
| - unsigned int qlen; |
| + unsigned int qlen, dropped = 0; |
| int err; |
| |
| if (!opt) |
| @@ -156,10 +157,11 @@ static int codel_change(struct Qdisc *sc |
| while (sch->q.qlen > sch->limit) { |
| struct sk_buff *skb = __skb_dequeue(&sch->q); |
| |
| + dropped += qdisc_pkt_len(skb); |
| qdisc_qstats_backlog_dec(sch, skb); |
| qdisc_drop(skb, sch); |
| } |
| - qdisc_tree_decrease_qlen(sch, qlen - sch->q.qlen); |
| + qdisc_tree_reduce_backlog(sch, qlen - sch->q.qlen, dropped); |
| |
| sch_tree_unlock(sch); |
| return 0; |
| --- a/net/sched/sch_drr.c |
| +++ b/net/sched/sch_drr.c |
| @@ -53,9 +53,10 @@ static struct drr_class *drr_find_class( |
| static void drr_purge_queue(struct drr_class *cl) |
| { |
| unsigned int len = cl->qdisc->q.qlen; |
| + unsigned int backlog = cl->qdisc->qstats.backlog; |
| |
| qdisc_reset(cl->qdisc); |
| - qdisc_tree_decrease_qlen(cl->qdisc, len); |
| + qdisc_tree_reduce_backlog(cl->qdisc, len, backlog); |
| } |
| |
| static const struct nla_policy drr_policy[TCA_DRR_MAX + 1] = { |
| --- a/net/sched/sch_fq.c |
| +++ b/net/sched/sch_fq.c |
| @@ -662,6 +662,7 @@ static int fq_change(struct Qdisc *sch, |
| struct fq_sched_data *q = qdisc_priv(sch); |
| struct nlattr *tb[TCA_FQ_MAX + 1]; |
| int err, drop_count = 0; |
| + unsigned drop_len = 0; |
| u32 fq_log; |
| |
| if (!opt) |
| @@ -736,10 +737,11 @@ static int fq_change(struct Qdisc *sch, |
| |
| if (!skb) |
| break; |
| + drop_len += qdisc_pkt_len(skb); |
| kfree_skb(skb); |
| drop_count++; |
| } |
| - qdisc_tree_decrease_qlen(sch, drop_count); |
| + qdisc_tree_reduce_backlog(sch, drop_count, drop_len); |
| |
| sch_tree_unlock(sch); |
| return err; |
| --- a/net/sched/sch_fq_codel.c |
| +++ b/net/sched/sch_fq_codel.c |
| @@ -175,7 +175,7 @@ static unsigned int fq_codel_qdisc_drop( |
| static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch) |
| { |
| struct fq_codel_sched_data *q = qdisc_priv(sch); |
| - unsigned int idx; |
| + unsigned int idx, prev_backlog; |
| struct fq_codel_flow *flow; |
| int uninitialized_var(ret); |
| |
| @@ -203,6 +203,7 @@ static int fq_codel_enqueue(struct sk_bu |
| if (++sch->q.qlen <= sch->limit) |
| return NET_XMIT_SUCCESS; |
| |
| + prev_backlog = sch->qstats.backlog; |
| q->drop_overlimit++; |
| /* Return Congestion Notification only if we dropped a packet |
| * from this flow. |
| @@ -211,7 +212,7 @@ static int fq_codel_enqueue(struct sk_bu |
| return NET_XMIT_CN; |
| |
| /* As we dropped a packet, better let upper stack know this */ |
| - qdisc_tree_decrease_qlen(sch, 1); |
| + qdisc_tree_reduce_backlog(sch, 1, prev_backlog - sch->qstats.backlog); |
| return NET_XMIT_SUCCESS; |
| } |
| |
| @@ -241,6 +242,7 @@ static struct sk_buff *fq_codel_dequeue( |
| struct fq_codel_flow *flow; |
| struct list_head *head; |
| u32 prev_drop_count, prev_ecn_mark; |
| + unsigned int prev_backlog; |
| |
| begin: |
| head = &q->new_flows; |
| @@ -259,6 +261,7 @@ begin: |
| |
| prev_drop_count = q->cstats.drop_count; |
| prev_ecn_mark = q->cstats.ecn_mark; |
| + prev_backlog = sch->qstats.backlog; |
| |
| skb = codel_dequeue(sch, &q->cparams, &flow->cvars, &q->cstats, |
| dequeue); |
| @@ -276,12 +279,14 @@ begin: |
| } |
| qdisc_bstats_update(sch, skb); |
| flow->deficit -= qdisc_pkt_len(skb); |
| - /* We cant call qdisc_tree_decrease_qlen() if our qlen is 0, |
| + /* We cant call qdisc_tree_reduce_backlog() if our qlen is 0, |
| * or HTB crashes. Defer it for next round. |
| */ |
| if (q->cstats.drop_count && sch->q.qlen) { |
| - qdisc_tree_decrease_qlen(sch, q->cstats.drop_count); |
| + qdisc_tree_reduce_backlog(sch, q->cstats.drop_count, |
| + q->cstats.drop_len); |
| q->cstats.drop_count = 0; |
| + q->cstats.drop_len = 0; |
| } |
| return skb; |
| } |
| @@ -372,11 +377,13 @@ static int fq_codel_change(struct Qdisc |
| while (sch->q.qlen > sch->limit) { |
| struct sk_buff *skb = fq_codel_dequeue(sch); |
| |
| + q->cstats.drop_len += qdisc_pkt_len(skb); |
| kfree_skb(skb); |
| q->cstats.drop_count++; |
| } |
| - qdisc_tree_decrease_qlen(sch, q->cstats.drop_count); |
| + qdisc_tree_reduce_backlog(sch, q->cstats.drop_count, q->cstats.drop_len); |
| q->cstats.drop_count = 0; |
| + q->cstats.drop_len = 0; |
| |
| sch_tree_unlock(sch); |
| return 0; |
| --- a/net/sched/sch_hfsc.c |
| +++ b/net/sched/sch_hfsc.c |
| @@ -895,9 +895,10 @@ static void |
| hfsc_purge_queue(struct Qdisc *sch, struct hfsc_class *cl) |
| { |
| unsigned int len = cl->qdisc->q.qlen; |
| + unsigned int backlog = cl->qdisc->qstats.backlog; |
| |
| qdisc_reset(cl->qdisc); |
| - qdisc_tree_decrease_qlen(cl->qdisc, len); |
| + qdisc_tree_reduce_backlog(cl->qdisc, len, backlog); |
| } |
| |
| static void |
| --- a/net/sched/sch_hhf.c |
| +++ b/net/sched/sch_hhf.c |
| @@ -382,6 +382,7 @@ static int hhf_enqueue(struct sk_buff *s |
| struct hhf_sched_data *q = qdisc_priv(sch); |
| enum wdrr_bucket_idx idx; |
| struct wdrr_bucket *bucket; |
| + unsigned int prev_backlog; |
| |
| idx = hhf_classify(skb, sch); |
| |
| @@ -409,6 +410,7 @@ static int hhf_enqueue(struct sk_buff *s |
| if (++sch->q.qlen <= sch->limit) |
| return NET_XMIT_SUCCESS; |
| |
| + prev_backlog = sch->qstats.backlog; |
| q->drop_overlimit++; |
| /* Return Congestion Notification only if we dropped a packet from this |
| * bucket. |
| @@ -417,7 +419,7 @@ static int hhf_enqueue(struct sk_buff *s |
| return NET_XMIT_CN; |
| |
| /* As we dropped a packet, better let upper stack know this. */ |
| - qdisc_tree_decrease_qlen(sch, 1); |
| + qdisc_tree_reduce_backlog(sch, 1, prev_backlog - sch->qstats.backlog); |
| return NET_XMIT_SUCCESS; |
| } |
| |
| @@ -527,7 +529,7 @@ static int hhf_change(struct Qdisc *sch, |
| { |
| struct hhf_sched_data *q = qdisc_priv(sch); |
| struct nlattr *tb[TCA_HHF_MAX + 1]; |
| - unsigned int qlen; |
| + unsigned int qlen, prev_backlog; |
| int err; |
| u64 non_hh_quantum; |
| u32 new_quantum = q->quantum; |
| @@ -577,12 +579,14 @@ static int hhf_change(struct Qdisc *sch, |
| } |
| |
| qlen = sch->q.qlen; |
| + prev_backlog = sch->qstats.backlog; |
| while (sch->q.qlen > sch->limit) { |
| struct sk_buff *skb = hhf_dequeue(sch); |
| |
| kfree_skb(skb); |
| } |
| - qdisc_tree_decrease_qlen(sch, qlen - sch->q.qlen); |
| + qdisc_tree_reduce_backlog(sch, qlen - sch->q.qlen, |
| + prev_backlog - sch->qstats.backlog); |
| |
| sch_tree_unlock(sch); |
| return 0; |
| --- a/net/sched/sch_htb.c |
| +++ b/net/sched/sch_htb.c |
| @@ -1265,7 +1265,6 @@ static int htb_delete(struct Qdisc *sch, |
| { |
| struct htb_sched *q = qdisc_priv(sch); |
| struct htb_class *cl = (struct htb_class *)arg; |
| - unsigned int qlen; |
| struct Qdisc *new_q = NULL; |
| int last_child = 0; |
| |
| @@ -1285,9 +1284,11 @@ static int htb_delete(struct Qdisc *sch, |
| sch_tree_lock(sch); |
| |
| if (!cl->level) { |
| - qlen = cl->un.leaf.q->q.qlen; |
| + unsigned int qlen = cl->un.leaf.q->q.qlen; |
| + unsigned int backlog = cl->un.leaf.q->qstats.backlog; |
| + |
| qdisc_reset(cl->un.leaf.q); |
| - qdisc_tree_decrease_qlen(cl->un.leaf.q, qlen); |
| + qdisc_tree_reduce_backlog(cl->un.leaf.q, qlen, backlog); |
| } |
| |
| /* delete from hash and active; remainder in destroy_class */ |
| @@ -1421,10 +1422,11 @@ static int htb_change_class(struct Qdisc |
| sch_tree_lock(sch); |
| if (parent && !parent->level) { |
| unsigned int qlen = parent->un.leaf.q->q.qlen; |
| + unsigned int backlog = parent->un.leaf.q->qstats.backlog; |
| |
| /* turn parent into inner node */ |
| qdisc_reset(parent->un.leaf.q); |
| - qdisc_tree_decrease_qlen(parent->un.leaf.q, qlen); |
| + qdisc_tree_reduce_backlog(parent->un.leaf.q, qlen, backlog); |
| qdisc_destroy(parent->un.leaf.q); |
| if (parent->prio_activity) |
| htb_deactivate(q, parent); |
| --- a/net/sched/sch_multiq.c |
| +++ b/net/sched/sch_multiq.c |
| @@ -218,7 +218,8 @@ static int multiq_tune(struct Qdisc *sch |
| if (q->queues[i] != &noop_qdisc) { |
| struct Qdisc *child = q->queues[i]; |
| q->queues[i] = &noop_qdisc; |
| - qdisc_tree_decrease_qlen(child, child->q.qlen); |
| + qdisc_tree_reduce_backlog(child, child->q.qlen, |
| + child->qstats.backlog); |
| qdisc_destroy(child); |
| } |
| } |
| @@ -238,8 +239,9 @@ static int multiq_tune(struct Qdisc *sch |
| q->queues[i] = child; |
| |
| if (old != &noop_qdisc) { |
| - qdisc_tree_decrease_qlen(old, |
| - old->q.qlen); |
| + qdisc_tree_reduce_backlog(old, |
| + old->q.qlen, |
| + old->qstats.backlog); |
| qdisc_destroy(old); |
| } |
| sch_tree_unlock(sch); |
| --- a/net/sched/sch_netem.c |
| +++ b/net/sched/sch_netem.c |
| @@ -598,7 +598,8 @@ deliver: |
| if (unlikely(err != NET_XMIT_SUCCESS)) { |
| if (net_xmit_drop_count(err)) { |
| qdisc_qstats_drop(sch); |
| - qdisc_tree_decrease_qlen(sch, 1); |
| + qdisc_tree_reduce_backlog(sch, 1, |
| + qdisc_pkt_len(skb)); |
| } |
| } |
| goto tfifo_dequeue; |
| --- a/net/sched/sch_pie.c |
| +++ b/net/sched/sch_pie.c |
| @@ -183,7 +183,7 @@ static int pie_change(struct Qdisc *sch, |
| { |
| struct pie_sched_data *q = qdisc_priv(sch); |
| struct nlattr *tb[TCA_PIE_MAX + 1]; |
| - unsigned int qlen; |
| + unsigned int qlen, dropped = 0; |
| int err; |
| |
| if (!opt) |
| @@ -232,10 +232,11 @@ static int pie_change(struct Qdisc *sch, |
| while (sch->q.qlen > sch->limit) { |
| struct sk_buff *skb = __skb_dequeue(&sch->q); |
| |
| + dropped += qdisc_pkt_len(skb); |
| qdisc_qstats_backlog_dec(sch, skb); |
| qdisc_drop(skb, sch); |
| } |
| - qdisc_tree_decrease_qlen(sch, qlen - sch->q.qlen); |
| + qdisc_tree_reduce_backlog(sch, qlen - sch->q.qlen, dropped); |
| |
| sch_tree_unlock(sch); |
| return 0; |
| --- a/net/sched/sch_prio.c |
| +++ b/net/sched/sch_prio.c |
| @@ -191,7 +191,7 @@ static int prio_tune(struct Qdisc *sch, |
| struct Qdisc *child = q->queues[i]; |
| q->queues[i] = &noop_qdisc; |
| if (child != &noop_qdisc) { |
| - qdisc_tree_decrease_qlen(child, child->q.qlen); |
| + qdisc_tree_reduce_backlog(child, child->q.qlen, child->qstats.backlog); |
| qdisc_destroy(child); |
| } |
| } |
| @@ -210,8 +210,9 @@ static int prio_tune(struct Qdisc *sch, |
| q->queues[i] = child; |
| |
| if (old != &noop_qdisc) { |
| - qdisc_tree_decrease_qlen(old, |
| - old->q.qlen); |
| + qdisc_tree_reduce_backlog(old, |
| + old->q.qlen, |
| + old->qstats.backlog); |
| qdisc_destroy(old); |
| } |
| sch_tree_unlock(sch); |
| --- a/net/sched/sch_qfq.c |
| +++ b/net/sched/sch_qfq.c |
| @@ -220,9 +220,10 @@ static struct qfq_class *qfq_find_class( |
| static void qfq_purge_queue(struct qfq_class *cl) |
| { |
| unsigned int len = cl->qdisc->q.qlen; |
| + unsigned int backlog = cl->qdisc->qstats.backlog; |
| |
| qdisc_reset(cl->qdisc); |
| - qdisc_tree_decrease_qlen(cl->qdisc, len); |
| + qdisc_tree_reduce_backlog(cl->qdisc, len, backlog); |
| } |
| |
| static const struct nla_policy qfq_policy[TCA_QFQ_MAX + 1] = { |
| --- a/net/sched/sch_red.c |
| +++ b/net/sched/sch_red.c |
| @@ -210,7 +210,8 @@ static int red_change(struct Qdisc *sch, |
| q->flags = ctl->flags; |
| q->limit = ctl->limit; |
| if (child) { |
| - qdisc_tree_decrease_qlen(q->qdisc, q->qdisc->q.qlen); |
| + qdisc_tree_reduce_backlog(q->qdisc, q->qdisc->q.qlen, |
| + q->qdisc->qstats.backlog); |
| qdisc_destroy(q->qdisc); |
| q->qdisc = child; |
| } |
| --- a/net/sched/sch_sfb.c |
| +++ b/net/sched/sch_sfb.c |
| @@ -510,7 +510,8 @@ static int sfb_change(struct Qdisc *sch, |
| |
| sch_tree_lock(sch); |
| |
| - qdisc_tree_decrease_qlen(q->qdisc, q->qdisc->q.qlen); |
| + qdisc_tree_reduce_backlog(q->qdisc, q->qdisc->q.qlen, |
| + q->qdisc->qstats.backlog); |
| qdisc_destroy(q->qdisc); |
| q->qdisc = child; |
| |
| --- a/net/sched/sch_sfq.c |
| +++ b/net/sched/sch_sfq.c |
| @@ -346,7 +346,7 @@ static int |
| sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch) |
| { |
| struct sfq_sched_data *q = qdisc_priv(sch); |
| - unsigned int hash; |
| + unsigned int hash, dropped; |
| sfq_index x, qlen; |
| struct sfq_slot *slot; |
| int uninitialized_var(ret); |
| @@ -461,7 +461,7 @@ enqueue: |
| return NET_XMIT_SUCCESS; |
| |
| qlen = slot->qlen; |
| - sfq_drop(sch); |
| + dropped = sfq_drop(sch); |
| /* Return Congestion Notification only if we dropped a packet |
| * from this flow. |
| */ |
| @@ -469,7 +469,7 @@ enqueue: |
| return NET_XMIT_CN; |
| |
| /* As we dropped a packet, better let upper stack know this */ |
| - qdisc_tree_decrease_qlen(sch, 1); |
| + qdisc_tree_reduce_backlog(sch, 1, dropped); |
| return NET_XMIT_SUCCESS; |
| } |
| |
| @@ -537,6 +537,7 @@ static void sfq_rehash(struct Qdisc *sch |
| struct sfq_slot *slot; |
| struct sk_buff_head list; |
| int dropped = 0; |
| + unsigned int drop_len = 0; |
| |
| __skb_queue_head_init(&list); |
| |
| @@ -565,6 +566,7 @@ static void sfq_rehash(struct Qdisc *sch |
| if (x >= SFQ_MAX_FLOWS) { |
| drop: |
| qdisc_qstats_backlog_dec(sch, skb); |
| + drop_len += qdisc_pkt_len(skb); |
| kfree_skb(skb); |
| dropped++; |
| continue; |
| @@ -594,7 +596,7 @@ drop: |
| } |
| } |
| sch->q.qlen -= dropped; |
| - qdisc_tree_decrease_qlen(sch, dropped); |
| + qdisc_tree_reduce_backlog(sch, dropped, drop_len); |
| } |
| |
| static void sfq_perturbation(unsigned long arg) |
| @@ -618,7 +620,7 @@ static int sfq_change(struct Qdisc *sch, |
| struct sfq_sched_data *q = qdisc_priv(sch); |
| struct tc_sfq_qopt *ctl = nla_data(opt); |
| struct tc_sfq_qopt_v1 *ctl_v1 = NULL; |
| - unsigned int qlen; |
| + unsigned int qlen, dropped = 0; |
| struct red_parms *p = NULL; |
| |
| if (opt->nla_len < nla_attr_size(sizeof(*ctl))) |
| @@ -667,8 +669,8 @@ static int sfq_change(struct Qdisc *sch, |
| |
| qlen = sch->q.qlen; |
| while (sch->q.qlen > q->limit) |
| - sfq_drop(sch); |
| - qdisc_tree_decrease_qlen(sch, qlen - sch->q.qlen); |
| + dropped += sfq_drop(sch); |
| + qdisc_tree_reduce_backlog(sch, qlen - sch->q.qlen, dropped); |
| |
| del_timer(&q->perturb_timer); |
| if (q->perturb_period) { |
| --- a/net/sched/sch_tbf.c |
| +++ b/net/sched/sch_tbf.c |
| @@ -160,6 +160,7 @@ static int tbf_segment(struct sk_buff *s |
| struct tbf_sched_data *q = qdisc_priv(sch); |
| struct sk_buff *segs, *nskb; |
| netdev_features_t features = netif_skb_features(skb); |
| + unsigned int len = 0, prev_len = qdisc_pkt_len(skb); |
| int ret, nb; |
| |
| segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK); |
| @@ -172,6 +173,7 @@ static int tbf_segment(struct sk_buff *s |
| nskb = segs->next; |
| segs->next = NULL; |
| qdisc_skb_cb(segs)->pkt_len = segs->len; |
| + len += segs->len; |
| ret = qdisc_enqueue(segs, q->qdisc); |
| if (ret != NET_XMIT_SUCCESS) { |
| if (net_xmit_drop_count(ret)) |
| @@ -183,7 +185,7 @@ static int tbf_segment(struct sk_buff *s |
| } |
| sch->q.qlen += nb; |
| if (nb > 1) |
| - qdisc_tree_decrease_qlen(sch, 1 - nb); |
| + qdisc_tree_reduce_backlog(sch, 1 - nb, prev_len - len); |
| consume_skb(skb); |
| return nb > 0 ? NET_XMIT_SUCCESS : NET_XMIT_DROP; |
| } |
| @@ -399,7 +401,8 @@ static int tbf_change(struct Qdisc *sch, |
| |
| sch_tree_lock(sch); |
| if (child) { |
| - qdisc_tree_decrease_qlen(q->qdisc, q->qdisc->q.qlen); |
| + qdisc_tree_reduce_backlog(q->qdisc, q->qdisc->q.qlen, |
| + q->qdisc->qstats.backlog); |
| qdisc_destroy(q->qdisc); |
| q->qdisc = child; |
| } |