| From foo@baz Thu 06 Feb 2020 06:56:59 AM GMT |
| From: Cong Wang <xiyou.wangcong@gmail.com> |
| Date: Sun, 2 Feb 2020 21:14:35 -0800 |
| Subject: net_sched: fix an OOB access in cls_tcindex |
| |
| From: Cong Wang <xiyou.wangcong@gmail.com> |
| |
| [ Upstream commit 599be01ee567b61f4471ee8078870847d0a11e8e ] |
| |
| As Eric noticed, tcindex_alloc_perfect_hash() uses cp->hash |
| to compute the size of memory allocation, but cp->hash is |
| set again after the allocation, this caused an out-of-bound |
| access. |
| |
| So we have to move all cp->hash initialization and computation |
| before the memory allocation. Move cp->mask and cp->shift together |
| as cp->hash may need them for computation too. |
| |
| Reported-and-tested-by: syzbot+35d4dea36c387813ed31@syzkaller.appspotmail.com |
| Fixes: 331b72922c5f ("net: sched: RCU cls_tcindex") |
| Cc: Eric Dumazet <eric.dumazet@gmail.com> |
| Cc: John Fastabend <john.fastabend@gmail.com> |
| Cc: Jamal Hadi Salim <jhs@mojatatu.com> |
| Cc: Jiri Pirko <jiri@resnulli.us> |
| Cc: Jakub Kicinski <kuba@kernel.org> |
| Signed-off-by: Cong Wang <xiyou.wangcong@gmail.com> |
| Signed-off-by: David S. Miller <davem@davemloft.net> |
| Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> |
| --- |
| net/sched/cls_tcindex.c | 40 ++++++++++++++++++++-------------------- |
| 1 file changed, 20 insertions(+), 20 deletions(-) |
| |
| --- a/net/sched/cls_tcindex.c |
| +++ b/net/sched/cls_tcindex.c |
| @@ -333,12 +333,31 @@ tcindex_set_parms(struct net *net, struc |
| cp->fall_through = p->fall_through; |
| cp->tp = tp; |
| |
| + if (tb[TCA_TCINDEX_HASH]) |
| + cp->hash = nla_get_u32(tb[TCA_TCINDEX_HASH]); |
| + |
| + if (tb[TCA_TCINDEX_MASK]) |
| + cp->mask = nla_get_u16(tb[TCA_TCINDEX_MASK]); |
| + |
| + if (tb[TCA_TCINDEX_SHIFT]) |
| + cp->shift = nla_get_u32(tb[TCA_TCINDEX_SHIFT]); |
| + |
| + if (!cp->hash) { |
| + /* Hash not specified, use perfect hash if the upper limit |
| + * of the hashing index is below the threshold. |
| + */ |
| + if ((cp->mask >> cp->shift) < PERFECT_HASH_THRESHOLD) |
| + cp->hash = (cp->mask >> cp->shift) + 1; |
| + else |
| + cp->hash = DEFAULT_HASH_SIZE; |
| + } |
| + |
| if (p->perfect) { |
| int i; |
| |
| if (tcindex_alloc_perfect_hash(net, cp) < 0) |
| goto errout; |
| - for (i = 0; i < cp->hash; i++) |
| + for (i = 0; i < min(cp->hash, p->hash); i++) |
| cp->perfect[i].res = p->perfect[i].res; |
| balloc = 1; |
| } |
| @@ -350,15 +369,6 @@ tcindex_set_parms(struct net *net, struc |
| if (old_r) |
| cr = r->res; |
| |
| - if (tb[TCA_TCINDEX_HASH]) |
| - cp->hash = nla_get_u32(tb[TCA_TCINDEX_HASH]); |
| - |
| - if (tb[TCA_TCINDEX_MASK]) |
| - cp->mask = nla_get_u16(tb[TCA_TCINDEX_MASK]); |
| - |
| - if (tb[TCA_TCINDEX_SHIFT]) |
| - cp->shift = nla_get_u32(tb[TCA_TCINDEX_SHIFT]); |
| - |
| err = -EBUSY; |
| |
| /* Hash already allocated, make sure that we still meet the |
| @@ -376,16 +386,6 @@ tcindex_set_parms(struct net *net, struc |
| if (tb[TCA_TCINDEX_FALL_THROUGH]) |
| cp->fall_through = nla_get_u32(tb[TCA_TCINDEX_FALL_THROUGH]); |
| |
| - if (!cp->hash) { |
| - /* Hash not specified, use perfect hash if the upper limit |
| - * of the hashing index is below the threshold. |
| - */ |
| - if ((cp->mask >> cp->shift) < PERFECT_HASH_THRESHOLD) |
| - cp->hash = (cp->mask >> cp->shift) + 1; |
| - else |
| - cp->hash = DEFAULT_HASH_SIZE; |
| - } |
| - |
| if (!cp->perfect && !cp->h) |
| cp->alloc_hash = cp->hash; |
| |