| From 8376efd31d3d7c44bd05be337adde023cc531fa1 Mon Sep 17 00:00:00 2001 |
| From: Ben Hutchings <ben.hutchings@codethink.co.uk> |
| Date: Tue, 9 May 2017 18:00:43 +0100 |
| Subject: [PATCH] x86, pmem: Fix cache flushing for iovec write < 8 bytes |
| |
| commit 8376efd31d3d7c44bd05be337adde023cc531fa1 upstream. |
| |
| Commit 11e63f6d920d added cache flushing for unaligned writes from an |
| iovec, covering the first and last cache line of a >= 8 byte write and |
| the first cache line of a < 8 byte write. But an unaligned write of |
| 2-7 bytes can still cover two cache lines, so make sure we flush both |
| in that case. |
| |
| Cc: <stable@vger.kernel.org> |
| Fixes: 11e63f6d920d ("x86, pmem: fix broken __copy_user_nocache ...") |
| Signed-off-by: Ben Hutchings <ben.hutchings@codethink.co.uk> |
| Signed-off-by: Dan Williams <dan.j.williams@intel.com> |
| |
| diff --git a/arch/x86/include/asm/pmem.h b/arch/x86/include/asm/pmem.h |
| index d5a22bac9988..0ff8fe71b255 100644 |
| --- a/arch/x86/include/asm/pmem.h |
| +++ b/arch/x86/include/asm/pmem.h |
| @@ -98,7 +98,7 @@ static inline size_t arch_copy_from_iter_pmem(void *addr, size_t bytes, |
| |
| if (bytes < 8) { |
| if (!IS_ALIGNED(dest, 4) || (bytes != 4)) |
| - arch_wb_cache_pmem(addr, 1); |
| + arch_wb_cache_pmem(addr, bytes); |
| } else { |
| if (!IS_ALIGNED(dest, 8)) { |
| dest = ALIGN(dest, boot_cpu_data.x86_clflush_size); |
| -- |
| 2.12.0 |
| |