TODO
diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
index ba3a538..8fcfb46 100644
--- a/arch/x86/include/asm/alternative.h
+++ b/arch/x86/include/asm/alternative.h
@@ -201,7 +201,7 @@
: outputs \
: [old] "i" (oldfunc), [new] "i" (newfunc) \
ARGS_APPEND(inputs) \
- CLOBBERS_APPEND(clobbers))
+ : ASM_CALL_CLOBBERS_ARGS(clobbers))
/*
* Like alternative_call, but there are two features and respective functions.
@@ -211,16 +211,13 @@
*/
#define alternative_call_2(oldfunc, newfunc1, feature1, newfunc2, \
feature2, outputs, inputs, clobbers...) \
-{ \
- register void *__sp asm(_ASM_SP); \
asm volatile (ALTERNATIVE_2("call %P[old]", \
"call %P[new1]", feature1, \
"call %P[new2]", feature2) \
- : "+r" (__sp) ARGS_APPEND(outputs) \
+ : outputs \
: [old] "i" (oldfunc), [new1] "i" (newfunc1), \
[new2] "i" (newfunc2) ARGS_APPEND(inputs) \
- CLOBBERS_APPEND(clobbers)); \
-}
+ : ASM_CALL_CLOBBERS_ARGS(clobbers))
#endif /* __ASSEMBLY__ */
diff --git a/arch/x86/include/asm/mshyperv.h b/arch/x86/include/asm/mshyperv.h
index 5d49ed2..9b36288 100644
--- a/arch/x86/include/asm/mshyperv.h
+++ b/arch/x86/include/asm/mshyperv.h
@@ -179,7 +179,6 @@
u64 input_address = input ? virt_to_phys(input) : 0;
u64 output_address = output ? virt_to_phys(output) : 0;
u64 hv_status;
- register void *__sp asm(_ASM_SP);
#ifdef CONFIG_X86_64
if (!hv_hypercall_pg)
@@ -187,11 +186,12 @@
__asm__ __volatile__("mov %[out], %%r8\n"
"call *%[pg]"
- : "=a" (hv_status), "+r" (__sp),
- "+c" (control), "+d" (input_address)
+ : "=a" (hv_status), "+c" (control),
+ "+d" (input_address)
: [out] "r" (output_address),
[pg] "m" (hv_hypercall_pg)
- : "cc", "memory", "r8", "r9", "r10", "r11");
+ : "cc", "memory", "r8", "r9", "r10", "r11"
+ ASM_CALL_CLOBBERS_APPEND);
#else
u32 input_address_hi = upper_32_bits(input_address);
u32 input_address_lo = lower_32_bits(input_address);
@@ -202,13 +202,13 @@
return U64_MAX;
__asm__ __volatile__("call *%[pg]"
- : "=A" (hv_status),
- "+c" (input_address_lo), "+r" (__sp)
+ : "=A" (hv_status), "+c" (input_address_lo)
: "A" (control),
"b" (input_address_hi),
"D"(output_address_hi), "S"(output_address_lo),
[pg] "m" (hv_hypercall_pg)
- : "cc", "memory");
+ : "cc", "memory"
+ ASM_CALL_CLOBBERS_APPEND);
#endif /* !x86_64 */
return hv_status;
}
@@ -225,15 +225,15 @@
static inline u64 hv_do_fast_hypercall8(u16 code, u64 input1)
{
u64 hv_status, control = (u64)code | HV_HYPERCALL_FAST_BIT;
- register void *__sp asm(_ASM_SP);
#ifdef CONFIG_X86_64
{
__asm__ __volatile__("call *%[pg]"
- : "=a" (hv_status), "+r" (__sp),
- "+c" (control), "+d" (input1)
+ : "=a" (hv_status), "+c" (control),
+ "+d" (input1)
: [pg] "m" (hv_hypercall_pg)
- : "cc", "r8", "r9", "r10", "r11");
+ : "cc", "r8", "r9", "r10", "r11"
+ ASM_CALL_CLOBBERS_APPEND);
}
#else
{
@@ -242,12 +242,12 @@
__asm__ __volatile__ ("call *%[pg]"
: "=A"(hv_status),
- "+c"(input1_lo),
- "+r"(__sp)
+ "+c"(input1_lo)
: "A" (control),
"b" (input1_hi),
[pg] "m" (hv_hypercall_pg)
- : "cc", "edi", "esi");
+ : "cc", "edi", "esi"
+ ASM_CALL_CLOBBERS_APPEND);
}
#endif
return hv_status;
diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
index 5dda8bf..b25627f 100644
--- a/arch/x86/include/asm/paravirt_types.h
+++ b/arch/x86/include/asm/paravirt_types.h
@@ -459,8 +459,8 @@
*/
#ifdef CONFIG_X86_32
#define PVOP_VCALL_ARGS \
- unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; \
- register void *__sp asm("esp")
+ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx
+
#define PVOP_CALL_ARGS PVOP_VCALL_ARGS
#define PVOP_CALL_ARG1(x) "a" ((unsigned long)(x))
@@ -480,8 +480,8 @@
/* [re]ax isn't an arg, but the return val */
#define PVOP_VCALL_ARGS \
unsigned long __edi = __edi, __esi = __esi, \
- __edx = __edx, __ecx = __ecx, __eax = __eax; \
- register void *__sp asm("rsp")
+ __edx = __edx, __ecx = __ecx, __eax = __eax
+
#define PVOP_CALL_ARGS PVOP_VCALL_ARGS
#define PVOP_CALL_ARG1(x) "D" ((unsigned long)(x))
@@ -532,21 +532,23 @@
asm volatile(pre \
paravirt_alt(PARAVIRT_CALL) \
post \
- : outputs, "+r" (__sp) \
+ : outputs \
: paravirt_type(op), \
paravirt_clobber(clbr), \
##__VA_ARGS__ \
- : "memory", "cc" extra_clbr); \
- __ret = (rettype)((((u64)__edx) << 32) | __eax); \
+ : "memory", "cc" extra_clbr \
+ ASM_CALL_CLOBBERS_APPEND); \
+ __ret = (rettype)((((u64)__edx) << 32) | __eax);\
} else { \
asm volatile(pre \
paravirt_alt(PARAVIRT_CALL) \
post \
- : outputs, "+r" (__sp) \
+ : outputs \
: paravirt_type(op), \
paravirt_clobber(clbr), \
##__VA_ARGS__ \
- : "memory", "cc" extra_clbr); \
+ : "memory", "cc" extra_clbr \
+ ASM_CALL_CLOBBERS_APPEND); \
__ret = (rettype)(__eax & PVOP_RETMASK(rettype)); \
} \
__ret; \
@@ -569,11 +571,12 @@
asm volatile(pre \
paravirt_alt(PARAVIRT_CALL) \
post \
- : outputs, "+r" (__sp) \
+ : outputs \
: paravirt_type(op), \
paravirt_clobber(clbr), \
##__VA_ARGS__ \
- : "memory", "cc" extra_clbr); \
+ : "memory", "cc" extra_clbr \
+ ASM_CALL_CLOBBERS_APPEND); \
})
#define __PVOP_VCALL(op, pre, post, ...) \
diff --git a/arch/x86/include/asm/preempt.h b/arch/x86/include/asm/preempt.h
index ec1f3c6..7405b68 100644
--- a/arch/x86/include/asm/preempt.h
+++ b/arch/x86/include/asm/preempt.h
@@ -100,19 +100,17 @@
#ifdef CONFIG_PREEMPT
extern asmlinkage void ___preempt_schedule(void);
-# define __preempt_schedule() \
-({ \
- register void *__sp asm(_ASM_SP); \
- asm volatile ("call ___preempt_schedule" : "+r"(__sp)); \
-})
+
+# define __preempt_schedule() \
+ asm volatile ("call ___preempt_schedule" : : : ASM_CALL_CLOBBERS)
extern asmlinkage void preempt_schedule(void);
extern asmlinkage void ___preempt_schedule_notrace(void);
-# define __preempt_schedule_notrace() \
-({ \
- register void *__sp asm(_ASM_SP); \
- asm volatile ("call ___preempt_schedule_notrace" : "+r"(__sp)); \
-})
+
+# define __preempt_schedule_notrace() \
+ asm volatile ("call ___preempt_schedule_notrace" \
+ : : : ASM_CALL_CLOBBERS)
+
extern asmlinkage void preempt_schedule_notrace(void);
#endif
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index a93e0d2..a213c1c 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -677,8 +677,6 @@
* Like all of Linux's memory ordering operations, this is a
* compiler barrier as well.
*/
- register void *__sp asm(_ASM_SP);
-
#ifdef CONFIG_X86_32
asm volatile (
"pushfl\n\t"
@@ -686,7 +684,7 @@
"pushl $1f\n\t"
"iret\n\t"
"1:"
- : "+r" (__sp) : : "memory");
+ : : : "memory" ASM_CALL_CLOBBERS_APPEND);
#else
unsigned int tmp;
@@ -703,7 +701,8 @@
"iretq\n\t"
UNWIND_HINT_RESTORE
"1:"
- : [tmp] "=&r" (tmp), "+r" (__sp) : : "cc", "memory");
+ : [tmp] "=&r" (tmp)
+ : : "cc", "memory" ASM_CALL_CLOBBERS_APPEND);
#endif
}
diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h
index b715152..8d30f85 100644
--- a/arch/x86/include/asm/rwsem.h
+++ b/arch/x86/include/asm/rwsem.h
@@ -104,7 +104,6 @@
({ \
long tmp; \
struct rw_semaphore* ret; \
- register void *__sp asm(_ASM_SP); \
\
asm volatile("# beginning down_write\n\t" \
LOCK_PREFIX " xadd %[tmp],(%[sem])\n\t" \
@@ -116,10 +115,9 @@
" call " slow_path "\n\t" \
"1:\n\t" \
"# ending down_write\n\t" \
- : "+m" (sem->count), [tmp] "=d" (tmp), "=a" (ret), \
- "+r" (__sp) \
+ : "+m" (sem->count), [tmp] "=d" (tmp), "=a" (ret) \
: [sem] "a" (sem), "d" (RWSEM_ACTIVE_WRITE_BIAS) \
- : "memory", "cc"); \
+ : "memory", "cc" ASM_CALL_CLOBBERS_APPEND); \
ret; \
})
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
index 12fb373..b9eefb9 100644
--- a/arch/x86/include/asm/uaccess.h
+++ b/arch/x86/include/asm/uaccess.h
@@ -166,12 +166,12 @@
({ \
int __ret_gu; \
register __inttype(*(ptr)) __val_gu asm("%"_ASM_DX); \
- register void *__sp asm(_ASM_SP); \
__chk_user_ptr(ptr); \
might_fault(); \
asm volatile("call __get_user_%P[size]" \
- : "=a" (__ret_gu), "=r" (__val_gu), "+r" (__sp) \
- : "a" (ptr), [size] "i" (sizeof(*(ptr)))); \
+ : "=a" (__ret_gu), "=r" (__val_gu) \
+ : "a" (ptr), [size] "i" (sizeof(*(ptr))) \
+ : ASM_CALL_CLOBBERS); \
(x) = (__force __typeof__(*(ptr))) __val_gu; \
__builtin_expect(__ret_gu, 0); \
})
diff --git a/arch/x86/include/asm/xen/hypercall.h b/arch/x86/include/asm/xen/hypercall.h
index 9606688..d51d3f5 100644
--- a/arch/x86/include/asm/xen/hypercall.h
+++ b/arch/x86/include/asm/xen/hypercall.h
@@ -113,10 +113,9 @@
register unsigned long __arg2 asm(__HYPERCALL_ARG2REG) = __arg2; \
register unsigned long __arg3 asm(__HYPERCALL_ARG3REG) = __arg3; \
register unsigned long __arg4 asm(__HYPERCALL_ARG4REG) = __arg4; \
- register unsigned long __arg5 asm(__HYPERCALL_ARG5REG) = __arg5; \
- register void *__sp asm(_ASM_SP);
+ register unsigned long __arg5 asm(__HYPERCALL_ARG5REG) = __arg5
-#define __HYPERCALL_0PARAM "=r" (__res), "+r" (__sp)
+#define __HYPERCALL_0PARAM "=r" (__res)
#define __HYPERCALL_1PARAM __HYPERCALL_0PARAM, "+r" (__arg1)
#define __HYPERCALL_2PARAM __HYPERCALL_1PARAM, "+r" (__arg2)
#define __HYPERCALL_3PARAM __HYPERCALL_2PARAM, "+r" (__arg3)
@@ -135,7 +134,7 @@
#define __HYPERCALL_5ARG(a1,a2,a3,a4,a5) \
__HYPERCALL_4ARG(a1,a2,a3,a4) __arg5 = (unsigned long)(a5);
-#define __HYPERCALL_CLOBBER5 "memory"
+#define __HYPERCALL_CLOBBER5 "memory" ASM_CALL_CLOBBERS_APPEND
#define __HYPERCALL_CLOBBER4 __HYPERCALL_CLOBBER5, __HYPERCALL_ARG5REG
#define __HYPERCALL_CLOBBER3 __HYPERCALL_CLOBBER4, __HYPERCALL_ARG4REG
#define __HYPERCALL_CLOBBER2 __HYPERCALL_CLOBBER3, __HYPERCALL_ARG3REG
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index 16bf665..2314c7b 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -5296,7 +5296,6 @@
static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *))
{
- register void *__sp asm(_ASM_SP);
ulong flags = (ctxt->eflags & EFLAGS_MASK) | X86_EFLAGS_IF;
if (!(ctxt->d & ByteOp))
@@ -5304,8 +5303,9 @@
asm("push %[flags]; popf; call *%[fastop]; pushf; pop %[flags]\n"
: "+a"(ctxt->dst.val), "+d"(ctxt->src.val), [flags]"+D"(flags),
- [fastop]"+S"(fop), "+r"(__sp)
- : "c"(ctxt->src2.val));
+ [fastop]"+S"(fop)
+ : "c"(ctxt->src2.val)
+ : ASM_CALL_CLOBBERS);
ctxt->eflags = (ctxt->eflags & ~EFLAGS_MASK) | (flags & EFLAGS_MASK);
if (!fop) /* exception is returned in fop variable */
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 699704d..04d1998f 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -9036,7 +9036,6 @@
static void vmx_handle_external_intr(struct kvm_vcpu *vcpu)
{
u32 exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
- register void *__sp asm(_ASM_SP);
if ((exit_intr_info & (INTR_INFO_VALID_MASK | INTR_INFO_INTR_TYPE_MASK))
== (INTR_INFO_VALID_MASK | INTR_TYPE_EXT_INTR)) {
@@ -9063,13 +9062,14 @@
"call *%[entry]\n\t"
:
#ifdef CONFIG_X86_64
- [sp]"=&r"(tmp),
+ [sp]"=&r"(tmp)
#endif
- "+r"(__sp)
:
[entry]"r"(entry),
[ss]"i"(__KERNEL_DS),
[cs]"i"(__KERNEL_CS)
+ :
+ ASM_CALL_CLOBBERS
);
}
}
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index b836a72..d40c602 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -806,7 +806,6 @@
if (is_vmalloc_addr((void *)address) &&
(((unsigned long)tsk->stack - 1 - address < PAGE_SIZE) ||
address - ((unsigned long)tsk->stack + THREAD_SIZE) < PAGE_SIZE)) {
- register void *__sp asm("rsp");
unsigned long stack = this_cpu_read(orig_ist.ist[DOUBLEFAULT_STACK]) - sizeof(void *);
/*
* We're likely to be running with very little stack space
@@ -821,10 +820,11 @@
asm volatile ("movq %[stack], %%rsp\n\t"
"call handle_stack_overflow\n\t"
"1: jmp 1b"
- : "+r" (__sp)
+ :
: "D" ("kernel stack overflow (page fault)"),
"S" (regs), "d" (address),
- [stack] "rm" (stack));
+ [stack] "rm" (stack)
+ : ASM_CALL_CLOBBERS);
unreachable();
}
#endif
diff --git a/include/linux/compiler-clang.h b/include/linux/compiler-clang.h
index de17999..ef1a46b 100644
--- a/include/linux/compiler-clang.h
+++ b/include/linux/compiler-clang.h
@@ -15,3 +15,7 @@
* with any version that can compile the kernel
*/
#define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__)
+
+#undef ASM_CALL_CLOBBERS
+#undef ASM_CALL_CLOBBERS_APPEND
+#undef ASM_CALL_CLOBBERS_ARGS
diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
index 16d41de..b08fcc9 100644
--- a/include/linux/compiler-gcc.h
+++ b/include/linux/compiler-gcc.h
@@ -128,6 +128,12 @@
#define __always_unused __attribute__((unused))
#define __mode(x) __attribute__((mode(x)))
+#ifdef CONFIG_FRAME_POINTER
+# define ASM_CALL_CLOBBERS "sp"
+# define ASM_CALL_CLOBBERS_APPEND , ASM_CALL_CLOBBERS
+# define ASM_CALL_CLOBBERS_ARGS(args...) ASM_CALL_CLOBBERS, ## args
+#endif
+
/* gcc version specific checks */
#if GCC_VERSION < 30200
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index 5a9ede0..0b4e269 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -653,4 +653,11 @@
#define CLOBBERS_APPEND(...) \
_CLOBBERS_APPEND(HAS_ARGS(__VA_ARGS__), __VA_ARGS__)
+#ifndef ASM_CALL_CLOBBERS
+# define ASM_CALL_CLOBBERS
+# define ASM_CALL_CLOBBERS_APPEND
+# define ASM_CALL_CLOBBERS_ARGS(args...) args
+#endif
+
+
#endif /* __LINUX_COMPILER_H */
diff --git a/tools/objtool/Documentation/stack-validation.txt b/tools/objtool/Documentation/stack-validation.txt
index 6a1af43..5696517 100644
--- a/tools/objtool/Documentation/stack-validation.txt
+++ b/tools/objtool/Documentation/stack-validation.txt
@@ -193,11 +193,15 @@
If it's a GCC-compiled .c file, the error may be because the function
uses an inline asm() statement which has a "call" instruction. An
- asm() statement with a call instruction must declare the use of the
- stack pointer in its output operand. For example, on x86_64:
+ asm() statement with a call instruction must add the stack pointer to
+ its clobber list. For example, on x86_64:
- register void *__sp asm("rsp");
- asm volatile("call func" : "+r" (__sp));
+ asm volatile("call func" : : : ASM_CALL_CLOBBERS);
+
+ If you need to specify additional clobbers, use the "APPEND" version
+ of the macro:
+
+ asm volatile("call func" : : : "memory" ASM_CALL_CLOBBERS_APPEND);
Otherwise the stack frame may not get created before the call.