arm64: stacktrace: Implement reliable stacktrace

Live patching has a consistency model which requires that the
architecture provide a reliable stack trace interface which specifically
indicates that the stack has been fully walked and that it is reliable
and consistent. This is done by providing arch_stack_walk_reliable(), a
variant of arch_stack_walk() which should verify that the stack has
these properties and return an error if not.

The arm64 unwinder is already reasonably thorough in verifying the stack
as it walks it and reports errors but we additionally check that
we do not see any kretprobe trampolines on the stack. Since the unwinder
is able to resolve function graph tracer probes transparently we do not
reject those.

Signed-off-by: Mark Brown <broonie@kernel.org>
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index d1ba52e..026f695 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -174,6 +174,7 @@
 	select HAVE_FUNCTION_ARG_ACCESS_API
 	select HAVE_FUTEX_CMPXCHG if FUTEX
 	select MMU_GATHER_RCU_TABLE_FREE
+	select HAVE_RELIABLE_STACKTRACE
 	select HAVE_RSEQ
 	select HAVE_STACKPROTECTOR
 	select HAVE_SYSCALL_TRACEPOINTS
diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c
index ad20981..795b2c1 100644
--- a/arch/arm64/kernel/stacktrace.c
+++ b/arch/arm64/kernel/stacktrace.c
@@ -14,6 +14,7 @@
 #include <linux/stacktrace.h>
 
 #include <asm/irq.h>
+#include <asm/kprobes.h>
 #include <asm/pointer_auth.h>
 #include <asm/stack_pointer.h>
 #include <asm/stacktrace.h>
@@ -212,4 +213,45 @@
 	walk_stackframe(task, &frame, consume_entry, cookie);
 }
 
+/*
+ * This function returns an error if it detects any unreliable features of the
+ * stack.  Otherwise it guarantees that the stack trace is reliable.
+ *
+ * If the task is not 'current', the caller *must* ensure the task is inactive.
+ */
+int arch_stack_walk_reliable(stack_trace_consume_fn consume_entry,
+			     void *cookie, struct task_struct *task)
+{
+	struct stackframe frame;
+
+	if (task == current)
+		start_backtrace(&frame,
+				(unsigned long)__builtin_frame_address(0),
+				(unsigned long)arch_stack_walk_reliable);
+	else
+		start_backtrace(&frame, thread_saved_fp(task),
+				thread_saved_pc(task));
+
+	while (1) {
+		int ret;
+
+#ifdef CONFIG_KPROBES
+		/*
+		 * Mark stacktraces with kretprobed functions on them
+		 * as unreliable.
+		 */
+		if (frame.pc == (unsigned long)kretprobe_trampoline)
+			return -EINVAL;
+#endif
+
+		if (!consume_entry(cookie, frame.pc))
+			return -EINVAL;
+		ret = unwind_frame(task, &frame);
+		if (ret == -ENOENT)
+			return 0;
+		if (ret < 0)
+			return ret;
+	}
+}
+
 #endif