| From gregkh@mini.kroah.org Thu Jan 14 15:06:20 2010 |
| Message-Id: <20100114230620.830938718@mini.kroah.org> |
| User-Agent: quilt/0.48-1 |
| Date: Thu, 14 Jan 2010 15:04:49 -0800 |
| From: Greg KH <gregkh@suse.de> |
| To: linux-kernel@vger.kernel.org, |
| stable@kernel.org |
| Cc: stable-review@kernel.org, |
| torvalds@linux-foundation.org, |
| akpm@linux-foundation.org, |
| alan@lxorguk.ukuu.org.uk, |
| Andi Kleen <ak@linux.intel.com>, |
| Ingo Molnar <mingo@elte.hu>, |
| Oleg Nesterov <oleg@redhat.com> |
| Subject: [1/6] kernel/signal.c: fix kernel information leak with print-fatal-signals=1 |
| |
| 2.6.27-stable review patch. If anyone has any objections, please let us know. |
| |
| ------------------ |
| |
| From: Andi Kleen <andi@firstfloor.org> |
| |
| commit b45c6e76bc2c72f6426c14bed64fdcbc9bf37cb0 upstream. |
| |
| When print-fatal-signals is enabled it's possible to dump any memory |
| reachable by the kernel to the log by simply jumping to that address from |
| user space. |
| |
| Or crash the system if there's some hardware with read side effects. |
| |
| The fatal signals handler will dump 16 bytes at the execution address, |
| which is fully controlled by ring 3. |
| |
| In addition when something jumps to a unmapped address there will be up to |
| 16 additional useless page faults, which might be potentially slow (and at |
| least is not very efficient) |
| |
| Fortunately this option is off by default and only there on i386. |
| |
| But fix it by checking for kernel addresses and also stopping when there's |
| a page fault. |
| |
| Signed-off-by: Andi Kleen <ak@linux.intel.com> |
| Cc: Ingo Molnar <mingo@elte.hu> |
| Cc: Oleg Nesterov <oleg@redhat.com> |
| Signed-off-by: Andrew Morton <akpm@linux-foundation.org> |
| Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org> |
| Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de> |
| |
| --- |
| kernel/signal.c | 3 ++- |
| 1 file changed, 2 insertions(+), 1 deletion(-) |
| |
| --- a/kernel/signal.c |
| +++ b/kernel/signal.c |
| @@ -884,7 +884,8 @@ static void print_fatal_signal(struct pt |
| for (i = 0; i < 16; i++) { |
| unsigned char insn; |
| |
| - __get_user(insn, (unsigned char *)(regs->ip + i)); |
| + if (get_user(insn, (unsigned char *)(regs->ip + i))) |
| + break; |
| printk("%02x ", insn); |
| } |
| } |
| |
| |
| From gregkh@mini.kroah.org Thu Jan 14 15:06:21 2010 |
| Message-Id: <20100114230620.960894210@mini.kroah.org> |
| User-Agent: quilt/0.48-1 |
| Date: Thu, 14 Jan 2010 15:04:50 -0800 |
| From: Greg KH <gregkh@suse.de> |
| To: linux-kernel@vger.kernel.org, |
| stable@kernel.org |
| Cc: stable-review@kernel.org, |
| torvalds@linux-foundation.org, |
| akpm@linux-foundation.org, |
| alan@lxorguk.ukuu.org.uk, |
| Florian Westphal <fwestphal@astaro.com>, |
| Patrick McHardy <kaber@trash.net> |
| Subject: [2/6] netfilter: ebtables: enforce CAP_NET_ADMIN |
| |
| 2.6.27-stable review patch. If anyone has any objections, please let us know. |
| |
| ------------------ |
| |
| From: Florian Westphal <fwestphal@astaro.com> |
| |
| commit dce766af541f6605fa9889892c0280bab31c66ab upstream. |
| |
| normal users are currently allowed to set/modify ebtables rules. |
| Restrict it to processes with CAP_NET_ADMIN. |
| |
| Note that this cannot be reproduced with unmodified ebtables binary |
| because it uses SOCK_RAW. |
| |
| Signed-off-by: Florian Westphal <fwestphal@astaro.com> |
| Signed-off-by: Patrick McHardy <kaber@trash.net> |
| Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de> |
| |
| --- |
| net/bridge/netfilter/ebtables.c | 6 ++++++ |
| 1 file changed, 6 insertions(+) |
| |
| --- a/net/bridge/netfilter/ebtables.c |
| +++ b/net/bridge/netfilter/ebtables.c |
| @@ -1436,6 +1436,9 @@ static int do_ebt_set_ctl(struct sock *s |
| { |
| int ret; |
| |
| + if (!capable(CAP_NET_ADMIN)) |
| + return -EPERM; |
| + |
| switch(cmd) { |
| case EBT_SO_SET_ENTRIES: |
| ret = do_replace(user, len); |
| @@ -1455,6 +1458,9 @@ static int do_ebt_get_ctl(struct sock *s |
| struct ebt_replace tmp; |
| struct ebt_table *t; |
| |
| + if (!capable(CAP_NET_ADMIN)) |
| + return -EPERM; |
| + |
| if (copy_from_user(&tmp, user, sizeof(tmp))) |
| return -EFAULT; |
| |
| |
| |
| From gregkh@mini.kroah.org Thu Jan 14 15:06:21 2010 |
| Message-Id: <20100114230621.084598004@mini.kroah.org> |
| User-Agent: quilt/0.48-1 |
| Date: Thu, 14 Jan 2010 15:04:51 -0800 |
| From: Greg KH <gregkh@suse.de> |
| To: linux-kernel@vger.kernel.org, |
| stable@kernel.org |
| Cc: stable-review@kernel.org, |
| torvalds@linux-foundation.org, |
| akpm@linux-foundation.org, |
| alan@lxorguk.ukuu.org.uk, |
| Al Viro <viro@zeniv.linux.org.uk> |
| Subject: [3/6] fix braindamage in audit_tree.c untag_chunk() |
| |
| 2.6.27-stable review patch. If anyone has any objections, please let us know. |
| |
| ------------------ |
| |
| From: Al Viro <viro@ZenIV.linux.org.uk> |
| |
| commit 6f5d51148921c242680a7a1d9913384a30ab3cbe upstream. |
| |
| ... aka "Al had badly fscked up when writing that thing and nobody |
| noticed until Eric had fixed leaks that used to mask the breakage". |
| |
| The function essentially creates a copy of old array sans one element |
| and replaces the references to elements of original (they are on cyclic |
| lists) with those to corresponding elements of new one. After that the |
| old one is fair game for freeing. |
| |
| First of all, there's a dumb braino: when we get to list_replace_init we |
| use indices for wrong arrays - position in new one with the old array |
| and vice versa. |
| |
| Another bug is more subtle - termination condition is wrong if the |
| element to be excluded happens to be the last one. We shouldn't go |
| until we fill the new array, we should go until we'd finished the old |
| one. Otherwise the element we are trying to kill will remain on the |
| cyclic lists... |
| |
| That crap used to be masked by several leaks, so it was not quite |
| trivial to hit. Eric had fixed some of those leaks a while ago and the |
| shit had hit the fan... |
| |
| Signed-off-by: Al Viro <viro@zeniv.linux.org.uk> |
| Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org> |
| Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de> |
| |
| --- |
| kernel/audit_tree.c | 4 ++-- |
| 1 file changed, 2 insertions(+), 2 deletions(-) |
| |
| --- a/kernel/audit_tree.c |
| +++ b/kernel/audit_tree.c |
| @@ -276,7 +276,7 @@ static void untag_chunk(struct node *p) |
| owner->root = NULL; |
| } |
| |
| - for (i = j = 0; i < size; i++, j++) { |
| + for (i = j = 0; j <= size; i++, j++) { |
| struct audit_tree *s; |
| if (&chunk->owners[j] == p) { |
| list_del_init(&p->list); |
| @@ -289,7 +289,7 @@ static void untag_chunk(struct node *p) |
| if (!s) /* result of earlier fallback */ |
| continue; |
| get_tree(s); |
| - list_replace_init(&chunk->owners[i].list, &new->owners[j].list); |
| + list_replace_init(&chunk->owners[j].list, &new->owners[i].list); |
| } |
| |
| list_replace_rcu(&chunk->hash, &new->hash); |
| |
| |
| From gregkh@mini.kroah.org Thu Jan 14 15:06:21 2010 |
| Message-Id: <20100114230621.214106296@mini.kroah.org> |
| User-Agent: quilt/0.48-1 |
| Date: Thu, 14 Jan 2010 15:04:52 -0800 |
| From: Greg KH <gregkh@suse.de> |
| To: linux-kernel@vger.kernel.org, |
| stable@kernel.org |
| Cc: stable-review@kernel.org, |
| torvalds@linux-foundation.org, |
| akpm@linux-foundation.org, |
| alan@lxorguk.ukuu.org.uk, |
| Al Viro <viro@zeniv.linux.org.uk> |
| Subject: [4/6] fix more leaks in audit_tree.c tag_chunk() |
| |
| 2.6.27-stable review patch. If anyone has any objections, please let us know. |
| |
| ------------------ |
| |
| From: Al Viro <viro@ZenIV.linux.org.uk> |
| |
| commit b4c30aad39805902cf5b855aa8a8b22d728ad057 upstream. |
| |
| Several leaks in audit_tree didn't get caught by commit |
| 318b6d3d7ddbcad3d6867e630711b8a705d873d7, including the leak on normal |
| exit in case of multiple rules refering to the same chunk. |
| |
| Signed-off-by: Al Viro <viro@zeniv.linux.org.uk> |
| Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org> |
| Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de> |
| |
| --- |
| kernel/audit_tree.c | 9 ++++++--- |
| 1 file changed, 6 insertions(+), 3 deletions(-) |
| |
| --- a/kernel/audit_tree.c |
| +++ b/kernel/audit_tree.c |
| @@ -372,15 +372,17 @@ static int tag_chunk(struct inode *inode |
| for (n = 0; n < old->count; n++) { |
| if (old->owners[n].owner == tree) { |
| spin_unlock(&hash_lock); |
| - put_inotify_watch(watch); |
| + put_inotify_watch(&old->watch); |
| return 0; |
| } |
| } |
| spin_unlock(&hash_lock); |
| |
| chunk = alloc_chunk(old->count + 1); |
| - if (!chunk) |
| + if (!chunk) { |
| + put_inotify_watch(&old->watch); |
| return -ENOMEM; |
| + } |
| |
| mutex_lock(&inode->inotify_mutex); |
| if (inotify_clone_watch(&old->watch, &chunk->watch) < 0) { |
| @@ -422,7 +424,8 @@ static int tag_chunk(struct inode *inode |
| spin_unlock(&hash_lock); |
| inotify_evict_watch(&old->watch); |
| mutex_unlock(&inode->inotify_mutex); |
| - put_inotify_watch(&old->watch); |
| + put_inotify_watch(&old->watch); /* pair to inotify_find_watch */ |
| + put_inotify_watch(&old->watch); /* and kill it */ |
| return 0; |
| } |
| |
| |
| |
| From gregkh@mini.kroah.org Thu Jan 14 15:06:21 2010 |
| Message-Id: <20100114230621.341451387@mini.kroah.org> |
| User-Agent: quilt/0.48-1 |
| Date: Thu, 14 Jan 2010 15:04:53 -0800 |
| From: Greg KH <gregkh@suse.de> |
| To: linux-kernel@vger.kernel.org, |
| stable@kernel.org |
| Cc: stable-review@kernel.org, |
| torvalds@linux-foundation.org, |
| akpm@linux-foundation.org, |
| alan@lxorguk.ukuu.org.uk, |
| Michael Neuling <mikey@neuling.org>, |
| Paul Mackerras <paulus@samba.org> |
| Subject: [5/6] powerpc: Disable VSX or current process in giveup_fpu/altivec |
| |
| 2.6.27-stable review patch. If anyone has any objections, please let us know. |
| |
| ------------------ |
| |
| From: Michael Neuling <mikey@neuling.org> |
| |
| commit 7e875e9dc8af70d126fa632446e967327ac3fdda upstream. |
| |
| When we call giveup_fpu, we need to need to turn off VSX for the |
| current process. If we don't, on return to userspace it may execute a |
| VSX instruction before the next FP instruction, and not have its |
| register state refreshed correctly from the thread_struct. Ditto for |
| altivec. |
| |
| This caused a bug where an unaligned lfs or stfs results in |
| fix_alignment calling giveup_fpu so it can use the FPRs (in order to |
| do a single <-> double conversion), and then returning to userspace |
| with FP off but VSX on. Then if a VSX instruction is executed, before |
| another FP instruction, it will proceed without another exception and |
| hence have the incorrect register state for VSX registers 0-31. |
| |
| lfs unaligned <- alignment exception turns FP off but leaves VSX on |
| |
| VSX instruction <- no exception since VSX on, hence we get the |
| wrong VSX register values for VSX registers 0-31, |
| which overlap the FPRs. |
| |
| Signed-off-by: Michael Neuling <mikey@neuling.org> |
| Signed-off-by: Paul Mackerras <paulus@samba.org> |
| Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de> |
| |
| --- |
| arch/powerpc/kernel/fpu.S | 5 +++++ |
| arch/powerpc/kernel/misc_64.S | 8 ++++++++ |
| 2 files changed, 13 insertions(+) |
| |
| --- a/arch/powerpc/kernel/fpu.S |
| +++ b/arch/powerpc/kernel/fpu.S |
| @@ -145,6 +145,11 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX) |
| beq 1f |
| PPC_LL r4,_MSR-STACK_FRAME_OVERHEAD(r5) |
| li r3,MSR_FP|MSR_FE0|MSR_FE1 |
| +#ifdef CONFIG_VSX |
| +BEGIN_FTR_SECTION |
| + oris r3,r3,MSR_VSX@h |
| +END_FTR_SECTION_IFSET(CPU_FTR_VSX) |
| +#endif |
| andc r4,r4,r3 /* disable FP for previous task */ |
| PPC_STL r4,_MSR-STACK_FRAME_OVERHEAD(r5) |
| 1: |
| --- a/arch/powerpc/kernel/misc_64.S |
| +++ b/arch/powerpc/kernel/misc_64.S |
| @@ -493,7 +493,15 @@ _GLOBAL(giveup_altivec) |
| stvx vr0,r4,r3 |
| beq 1f |
| ld r4,_MSR-STACK_FRAME_OVERHEAD(r5) |
| +#ifdef CONFIG_VSX |
| +BEGIN_FTR_SECTION |
| + lis r3,(MSR_VEC|MSR_VSX)@h |
| +FTR_SECTION_ELSE |
| + lis r3,MSR_VEC@h |
| +ALT_FTR_SECTION_END_IFSET(CPU_FTR_VSX) |
| +#else |
| lis r3,MSR_VEC@h |
| +#endif |
| andc r4,r4,r3 /* disable FP for previous task */ |
| std r4,_MSR-STACK_FRAME_OVERHEAD(r5) |
| 1: |
| |
| |
| From gregkh@mini.kroah.org Thu Jan 14 15:06:21 2010 |
| Message-Id: <20100114230621.471041815@mini.kroah.org> |
| User-Agent: quilt/0.48-1 |
| Date: Thu, 14 Jan 2010 15:04:54 -0800 |
| From: Greg KH <gregkh@suse.de> |
| To: linux-kernel@vger.kernel.org, |
| stable@kernel.org |
| Cc: stable-review@kernel.org, |
| torvalds@linux-foundation.org, |
| akpm@linux-foundation.org, |
| alan@lxorguk.ukuu.org.uk, |
| Neil Campbell <neilc@linux.vnet.ibm.com>, |
| Michael Neuling <mikey@neuling.org>, |
| Benjamin Herrenschmidt <benh@kernel.crashing.org> |
| Subject: [6/6] powerpc: Handle VSX alignment faults correctly in little-endian mode |
| |
| 2.6.27-stable review patch. If anyone has any objections, please let us know. |
| |
| ------------------ |
| |
| From: Neil Campbell <neilc@linux.vnet.ibm.com> |
| |
| commit bb7f20b1c639606def3b91f4e4aca6daeee5d80a upstream. |
| |
| This patch fixes the handling of VSX alignment faults in little-endian |
| mode (the current code assumes the processor is in big-endian mode). |
| |
| The patch also makes the handlers clear the top 8 bytes of the register |
| when handling an 8 byte VSX load. |
| |
| This is based on 2.6.32. |
| |
| Signed-off-by: Neil Campbell <neilc@linux.vnet.ibm.com> |
| Acked-by: Michael Neuling <mikey@neuling.org> |
| Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org> |
| Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de> |
| |
| --- |
| arch/powerpc/kernel/align.c | 63 ++++++++++++++++++++++++++++++++------------ |
| 1 file changed, 46 insertions(+), 17 deletions(-) |
| |
| --- a/arch/powerpc/kernel/align.c |
| +++ b/arch/powerpc/kernel/align.c |
| @@ -641,10 +641,14 @@ static int emulate_spe(struct pt_regs *r |
| */ |
| static int emulate_vsx(unsigned char __user *addr, unsigned int reg, |
| unsigned int areg, struct pt_regs *regs, |
| - unsigned int flags, unsigned int length) |
| + unsigned int flags, unsigned int length, |
| + unsigned int elsize) |
| { |
| char *ptr; |
| + unsigned long *lptr; |
| int ret = 0; |
| + int sw = 0; |
| + int i, j; |
| |
| flush_vsx_to_thread(current); |
| |
| @@ -653,19 +657,35 @@ static int emulate_vsx(unsigned char __u |
| else |
| ptr = (char *) ¤t->thread.vr[reg - 32]; |
| |
| - if (flags & ST) |
| - ret = __copy_to_user(addr, ptr, length); |
| - else { |
| - if (flags & SPLT){ |
| - ret = __copy_from_user(ptr, addr, length); |
| - ptr += length; |
| + lptr = (unsigned long *) ptr; |
| + |
| + if (flags & SW) |
| + sw = elsize-1; |
| + |
| + for (j = 0; j < length; j += elsize) { |
| + for (i = 0; i < elsize; ++i) { |
| + if (flags & ST) |
| + ret |= __put_user(ptr[i^sw], addr + i); |
| + else |
| + ret |= __get_user(ptr[i^sw], addr + i); |
| } |
| - ret |= __copy_from_user(ptr, addr, length); |
| + ptr += elsize; |
| + addr += elsize; |
| } |
| - if (flags & U) |
| - regs->gpr[areg] = regs->dar; |
| - if (ret) |
| + |
| + if (!ret) { |
| + if (flags & U) |
| + regs->gpr[areg] = regs->dar; |
| + |
| + /* Splat load copies the same data to top and bottom 8 bytes */ |
| + if (flags & SPLT) |
| + lptr[1] = lptr[0]; |
| + /* For 8 byte loads, zero the top 8 bytes */ |
| + else if (!(flags & ST) && (8 == length)) |
| + lptr[1] = 0; |
| + } else |
| return -EFAULT; |
| + |
| return 1; |
| } |
| #endif |
| @@ -764,16 +784,25 @@ int fix_alignment(struct pt_regs *regs) |
| |
| #ifdef CONFIG_VSX |
| if ((instruction & 0xfc00003e) == 0x7c000018) { |
| - /* Additional register addressing bit (64 VSX vs 32 FPR/GPR */ |
| + unsigned int elsize; |
| + |
| + /* Additional register addressing bit (64 VSX vs 32 FPR/GPR) */ |
| reg |= (instruction & 0x1) << 5; |
| /* Simple inline decoder instead of a table */ |
| + /* VSX has only 8 and 16 byte memory accesses */ |
| + nb = 8; |
| if (instruction & 0x200) |
| nb = 16; |
| - else if (instruction & 0x080) |
| - nb = 8; |
| - else |
| - nb = 4; |
| + |
| + /* Vector stores in little-endian mode swap individual |
| + elements, so process them separately */ |
| + elsize = 4; |
| + if (instruction & 0x80) |
| + elsize = 8; |
| + |
| flags = 0; |
| + if (regs->msr & MSR_LE) |
| + flags |= SW; |
| if (instruction & 0x100) |
| flags |= ST; |
| if (instruction & 0x040) |
| @@ -783,7 +812,7 @@ int fix_alignment(struct pt_regs *regs) |
| flags |= SPLT; |
| nb = 8; |
| } |
| - return emulate_vsx(addr, reg, areg, regs, flags, nb); |
| + return emulate_vsx(addr, reg, areg, regs, flags, nb, elsize); |
| } |
| #endif |
| /* A size of 0 indicates an instruction we don't support, with |
| |
| |