blob: bd193cc010f2ae790d81e02cf958cd82b0b17856 [file] [log] [blame]
From aae299cf8b01326162cd07e2b9818d2e9b2daa3b Mon Sep 17 00:00:00 2001
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Mon, 11 Mar 2013 17:08:49 +0100
Subject: [PATCH 1/6] x86/highmem: close race between clear/set ptes
If the task is interrupted during a kmap_atomic() / kunmap_atomic() (or
the same code in kmap_atomic_prot_pfn() and its counter part) it may
race against switch_kmaps() and trigger a false positive warning.
In kmap_atomic_prot() we first grab a new index via
kmap_atomic_idx_push() and then check if the slot is already in use.
If we get interrupted after taking the index then switch_kmaps() will
assume that the index is in use and write the old entry from the
kmap_pte member. Since __kunmap_atomic() never invalidates this member
it might write an old entry and now it looks like this entry is already
in use and a WARN_ON() is seen.
This patch sets the shadow pte entry to 0 so pte_none() doesn't trigger
a warning.
While here, I add a BUG_ON() to kmap_atomic_idx_push() which is also in
the non-RT case.
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
arch/x86/kernel/process_32.c | 3 ++-
arch/x86/mm/highmem_32.c | 3 +++
arch/x86/mm/iomap_32.c | 3 +++
include/linux/highmem.h | 4 +++-
4 files changed, 11 insertions(+), 2 deletions(-)
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
index 33e5d14..ebcee60 100644
--- a/arch/x86/kernel/process_32.c
+++ b/arch/x86/kernel/process_32.c
@@ -218,7 +218,8 @@ static void switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p)
for (i = 0; i < next_p->kmap_idx; i++) {
int idx = i + KM_TYPE_NR * smp_processor_id();
- set_pte(kmap_pte - idx, next_p->kmap_pte[i]);
+ if (!pte_none(next_p->kmap_pte[i]))
+ set_pte(kmap_pte - idx, next_p->kmap_pte[i]);
}
}
#else
diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
index 0935789..6e5ac8b 100644
--- a/arch/x86/mm/highmem_32.c
+++ b/arch/x86/mm/highmem_32.c
@@ -91,6 +91,9 @@ void __kunmap_atomic(void *kvaddr)
* is a bad idea also, in case the page changes cacheability
* attributes or becomes a protected page in a hypervisor.
*/
+#ifdef CONFIG_PREEMPT_RT_FULL
+ current->kmap_pte[type] = __pte(0);
+#endif
kpte_clear_flush(kmap_pte-idx, vaddr);
kmap_atomic_idx_pop();
arch_flush_lazy_mmu_mode();
diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
index 4e1d4d5..0c953e3 100644
--- a/arch/x86/mm/iomap_32.c
+++ b/arch/x86/mm/iomap_32.c
@@ -114,6 +114,9 @@ iounmap_atomic(void __iomem *kvaddr)
* is a bad idea also, in case the page changes cacheability
* attributes or becomes a protected page in a hypervisor.
*/
+#ifdef CONFIG_PREEMPT_RT_FULL
+ current->kmap_pte[type] = __pte(0);
+#endif
kpte_clear_flush(kmap_pte-idx, vaddr);
kmap_atomic_idx_pop();
}
diff --git a/include/linux/highmem.h b/include/linux/highmem.h
index acdd321..84223de 100644
--- a/include/linux/highmem.h
+++ b/include/linux/highmem.h
@@ -101,7 +101,9 @@ static inline int kmap_atomic_idx_push(void)
# endif
return idx;
#else
- return current->kmap_idx++;
+ current->kmap_idx++;
+ BUG_ON(current->kmap_idx > KM_TYPE_NR);
+ return current->kmap_idx - 1;
#endif
}
--
1.7.10.4