x86/paravirt: optimize pte_val/make_pte/etc. using asm goto

Signed-off-by: Anders Kaseorg <anders.kaseorg@oracle.com>
Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index 6a80c92..5724a76 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -13,6 +13,16 @@
 #include <linux/types.h>
 #include <linux/cpumask.h>
 
+static __always_inline bool paravirt_is_nop(u8 type)
+{
+	asm goto(paravirt_alt("jmp %l[l_false]")
+		 : : [paravirt_typenum] "i" (type), paravirt_clobber(CLBR_NONE)
+		 : : l_false);
+	return true;
+l_false:
+	return false;
+}
+
 static inline int paravirt_enabled(void)
 {
 	return pv_info.paravirt_enabled;
@@ -792,10 +802,20 @@
 #define __PV_IS_CALLEE_SAVE(func)			\
 	((struct paravirt_callee_save) { func })
 
+#ifdef CC_HAVE_ASM_GOTO
+#define PV_NOT_IDENT_REGS_THUNK(func)
+#define PV_NOT_IDENT(func)						\
+	((paravirt_likely_ident) { func, _paravirt_ignore })
+#define PV_IDENT_32							\
+	((paravirt_likely_ident) { _paravirt_ident_32, paravirt_nop })
+#define PV_IDENT_64							\
+	((paravirt_likely_ident) { _paravirt_ident_64, paravirt_nop })
+#else
 #define PV_NOT_IDENT_REGS_THUNK(func) PV_CALLEE_SAVE_REGS_THUNK(func)
 #define PV_NOT_IDENT(func) PV_CALLEE_SAVE(func)
 #define PV_IDENT_32 __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
 #define PV_IDENT_64 __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
+#endif
 
 static inline notrace unsigned long arch_local_save_flags(void)
 {
diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
index 0b90b10..0b21866 100644
--- a/arch/x86/include/asm/paravirt_types.h
+++ b/arch/x86/include/asm/paravirt_types.h
@@ -60,7 +60,14 @@
 	void *func;
 };
 
+#ifdef CC_HAVE_ASM_GOTO
+typedef struct {
+	void *func;
+	void *nop_ident;
+} paravirt_likely_ident;
+#else
 typedef struct paravirt_callee_save paravirt_likely_ident;
+#endif
 
 /* general info */
 struct pv_info {
@@ -669,10 +676,19 @@
 		     PVOP_CALL_ARG3(arg3), PVOP_CALL_ARG4(arg4))
 #endif
 
+#ifdef CC_HAVE_ASM_GOTO
+#define PVOP_LIKELY_IDENT(rettype, op, arg)				\
+	(paravirt_is_nop(PARAVIRT_PATCH(op.nop_ident)) ?		\
+	 (rettype)(arg) :						\
+	 sizeof(rettype) > sizeof(long) ?				\
+	 PVOP_CALL2(rettype, op.func, arg, (u64)(arg) >> 32) :		\
+	 PVOP_CALL1(rettype, op.func, arg))
+#else
 #define PVOP_LIKELY_IDENT(rettype, op, arg)				\
 	(sizeof(rettype) > sizeof(long) ?				\
 	 PVOP_CALLEE2(rettype, op, arg, (u64)(arg) >> 32) :		\
 	 PVOP_CALLEE1(rettype, op, arg))
+#endif
 
 
 /* Lazy mode for batching updates / context switch */
@@ -690,6 +706,7 @@
 void paravirt_leave_lazy_mmu(void);
 
 void _paravirt_nop(void);
+void _paravirt_ignore(void);
 u32 _paravirt_ident_32(u32);
 u64 _paravirt_ident_64(u64);
 
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
index 712688a..55fb7e2 100644
--- a/arch/x86/kernel/paravirt.c
+++ b/arch/x86/kernel/paravirt.c
@@ -43,6 +43,11 @@
 {
 }
 
+void _paravirt_ignore(void)
+{
+	BUG();
+}
+
 /* identity function, which can be inlined */
 u32 _paravirt_ident_32(u32 x)
 {
@@ -148,6 +153,8 @@
 	else if (opfunc == _paravirt_nop)
 		/* If the operation is a nop, then nop the callsite */
 		ret = paravirt_patch_nop();
+	else if (opfunc == _paravirt_ignore)
+		ret = paravirt_patch_ignore(len);
 
 	/* identity functions just return their single argument */
 	else if (opfunc == _paravirt_ident_32)