WIP

Signed-off-by: Marc Zyngier <maz@kernel.org>
diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
index d134595..cf4305b 100644
--- a/arch/arm64/include/asm/sysreg.h
+++ b/arch/arm64/include/asm/sysreg.h
@@ -475,6 +475,7 @@
 #define SYS_PMCCFILTR_EL0		sys_reg(3, 3, 14, 15, 7)
 
 #define SYS_SCTLR_EL2			sys_reg(3, 4, 1, 0, 0)
+#define SYS_HACR_EL2			sys_reg(3, 4, 1, 1, 7)
 #define SYS_ZCR_EL2			sys_reg(3, 4, 1, 2, 0)
 #define SYS_TRFCR_EL2			sys_reg(3, 4, 1, 2, 1)
 #define SYS_DACR32_EL2			sys_reg(3, 4, 3, 0, 0)
diff --git a/arch/arm64/include/asm/sysreg_apple.h b/arch/arm64/include/asm/sysreg_apple.h
index 2068a34..7de1473 100644
--- a/arch/arm64/include/asm/sysreg_apple.h
+++ b/arch/arm64/include/asm/sysreg_apple.h
@@ -48,6 +48,9 @@
 #define SYS_APL_IPI_SR			sys_reg(3, 5, 15, 1, 1)
 #define IPI_SR_PENDING			BIT(0)
 
+/* Guest virtual timer List Register */
+#define SYS_APL_VTIMER_LR_EL2		sys_reg(3, 5, 15, 1, 2)
+
 /* Guest timer FIQ mask register */
 #define SYS_APL_VM_TMR_MASK		sys_reg(3, 5, 15, 1, 3)
 #define VM_TMR_MASK_V			BIT(0)
diff --git a/arch/arm64/kvm/arch_timer.c b/arch/arm64/kvm/arch_timer.c
index e2288b6b..a91184a 100644
--- a/arch/arm64/kvm/arch_timer.c
+++ b/arch/arm64/kvm/arch_timer.c
@@ -15,10 +15,13 @@
 #include <asm/arch_timer.h>
 #include <asm/kvm_emulate.h>
 #include <asm/kvm_hyp.h>
+#include <asm/sysreg_apple.h>
 
 #include <kvm/arm_vgic.h>
 #include <kvm/arm_arch_timer.h>
 
+#include "vgic/vgic.h"
+
 #include "trace.h"
 
 static struct timecounter *timecounter;
@@ -145,7 +148,7 @@ u64 kvm_phys_timer_read(void)
 
 static void get_timer_map(struct kvm_vcpu *vcpu, struct timer_map *map)
 {
-	if (has_vhe()) {
+	if (has_vhe() && !kvm_vgic_is_apple_m1()) {
 		map->direct_vtimer = vcpu_vtimer(vcpu);
 		map->direct_ptimer = vcpu_ptimer(vcpu);
 		map->emul_ptimer = NULL;
@@ -511,7 +514,7 @@ static void timer_restore_state(struct arch_timer_context *ctx)
 
 	local_irq_save(flags);
 
-	if (ctx->loaded)
+	if (ctx->loaded || kvm_vgic_is_apple_m1())
 		goto out;
 
 	switch (index) {
@@ -1116,10 +1119,50 @@ bool kvm_arch_timer_get_input_level(int vintid)
 	return kvm_timer_should_fire(timer);
 }
 
+static void apl_vtimer_flush_state(struct vgic_irq *irq)
+{
+	struct kvm_vcpu *vcpu = kvm_get_running_vcpu();
+	u64 val = vgic_v3_compute_lr(vcpu, irq);
+
+	/*
+	 * If the physical INTID is different from the virtual one,
+	 * odd things happen (screaming maintenance interrupt...).
+	 */
+	val &= ~ICH_LR_PHYS_ID_MASK;
+	val |= FIELD_PREP(ICH_LR_PHYS_ID_MASK, (val & 0x3ff));
+	write_sysreg_s(val, SYS_APL_VTIMER_LR_EL2);
+	if (!irq->enabled)
+		disable_percpu_irq(irq->host_irq);
+}
+
+static void apl_vtimer_sync_state(struct vgic_irq *irq)
+{
+	u64 val;
+
+	/*
+	 * By the time we read the funky LR, the timer has been
+	 * disabled, so the only piece of state we can save is the
+	 * active state. We can't restore the pending bit anyway, so
+	 * we aren't missing much...
+	 */
+	val = read_sysreg_s(SYS_APL_VTIMER_LR_EL2);
+	irq->active = !!(val & ICH_LR_ACTIVE_BIT);
+
+	/* Reenable the interrupt that we disabled on entry */
+	if (!irq->enabled)
+		enable_percpu_irq(irq->host_irq, host_vtimer_irq_flags);
+}
+
 static struct irq_ops arch_timer_irq_ops = {
 	.get_input_level = kvm_arch_timer_get_input_level,
 };
 
+static struct irq_ops apl_vtimer_irq_ops = {
+	.get_input_level = kvm_arch_timer_get_input_level,
+	.sync_oob_state  = apl_vtimer_sync_state,
+	.flush_oob_state = apl_vtimer_flush_state,
+};
+
 int kvm_timer_enable(struct kvm_vcpu *vcpu)
 {
 	struct arch_timer_cpu *timer = vcpu_timer(vcpu);
@@ -1147,7 +1190,8 @@ int kvm_timer_enable(struct kvm_vcpu *vcpu)
 	ret = kvm_vgic_map_phys_irq(vcpu,
 				    map.direct_vtimer->host_timer_irq,
 				    map.direct_vtimer->irq.irq,
-				    &arch_timer_irq_ops);
+				    (kvm_vgic_is_apple_m1() ?
+				     &apl_vtimer_irq_ops : &arch_timer_irq_ops));
 	if (ret)
 		return ret;
 
@@ -1178,6 +1222,19 @@ void kvm_timer_init_vhe(void)
 	u32 cnthctl_shift = 10;
 	u64 val;
 
+	if (kvm_vgic_is_apple_m1()) {
+		/*
+		 * Enable direct injection of the virtual timer, but
+		 * don't allow guest access to the physical timer, as
+		 * we have no way to deactivate it properly yet.
+		 */
+		u64 val = read_sysreg_s(SYS_HACR_EL2);
+		val |= BIT_ULL(20);
+		write_sysreg_s(val, SYS_HACR_EL2);
+		isb();
+		return;
+	}
+
 	/*
 	 * VHE systems allow the guest direct access to the EL1 physical
 	 * timer/counter.
diff --git a/arch/arm64/kvm/hyp/hyp-entry.S b/arch/arm64/kvm/hyp/hyp-entry.S
index 5f49df4..9aa9b734 100644
--- a/arch/arm64/kvm/hyp/hyp-entry.S
+++ b/arch/arm64/kvm/hyp/hyp-entry.S
@@ -76,6 +76,7 @@
 	b	__guest_exit
 
 el1_irq:
+el1_fiq:
 	get_vcpu_ptr	x1, x0
 	mov	x0, #ARM_EXCEPTION_IRQ
 	b	__guest_exit
@@ -131,7 +132,6 @@
 	invalid_vector	el2t_error_invalid
 	invalid_vector	el2h_irq_invalid
 	invalid_vector	el2h_fiq_invalid
-	invalid_vector	el1_fiq_invalid
 
 	.ltorg
 
@@ -179,12 +179,12 @@
 
 	valid_vect	el1_sync		// Synchronous 64-bit EL1
 	valid_vect	el1_irq			// IRQ 64-bit EL1
-	invalid_vect	el1_fiq_invalid		// FIQ 64-bit EL1
+	valid_vect	el1_fiq			// FIQ 64-bit EL1
 	valid_vect	el1_error		// Error 64-bit EL1
 
 	valid_vect	el1_sync		// Synchronous 32-bit EL1
 	valid_vect	el1_irq			// IRQ 32-bit EL1
-	invalid_vect	el1_fiq_invalid		// FIQ 32-bit EL1
+	valid_vect	el1_fiq			// FIQ 32-bit EL1
 	valid_vect	el1_error		// Error 32-bit EL1
 SYM_CODE_END(__kvm_hyp_vector)
 
diff --git a/arch/arm64/kvm/hyp/vgic-v3-sr.c b/arch/arm64/kvm/hyp/vgic-v3-sr.c
index ee3682b..32eca95 100644
--- a/arch/arm64/kvm/hyp/vgic-v3-sr.c
+++ b/arch/arm64/kvm/hyp/vgic-v3-sr.c
@@ -300,6 +300,7 @@ void __vgic_v3_activate_traps(struct vgic_v3_cpu_if *cpu_if)
 	 * injected,
 	 */
 	if (static_branch_unlikely(&vgic_v3_cpuif_trap) ||
+	    kvm_vgic_is_apple_m1() ||
 	    cpu_if->its_vpe.its_vm)
 		write_gicreg(cpu_if->vgic_hcr, ICH_HCR_EL2);
 }
@@ -326,6 +327,7 @@ void __vgic_v3_deactivate_traps(struct vgic_v3_cpu_if *cpu_if)
 	 * no interrupts were being injected, and we disable it again here.
 	 */
 	if (static_branch_unlikely(&vgic_v3_cpuif_trap) ||
+	    kvm_vgic_is_apple_m1() ||
 	    cpu_if->its_vpe.its_vm)
 		write_gicreg(0, ICH_HCR_EL2);
 }
diff --git a/arch/arm64/kvm/hyp/vhe/sysreg-sr.c b/arch/arm64/kvm/hyp/vhe/sysreg-sr.c
index 2a0b8c8..a8c7d6e 100644
--- a/arch/arm64/kvm/hyp/vhe/sysreg-sr.c
+++ b/arch/arm64/kvm/hyp/vhe/sysreg-sr.c
@@ -32,6 +32,14 @@ NOKPROBE_SYMBOL(sysreg_save_host_state_vhe);
 
 void sysreg_save_guest_state_vhe(struct kvm_cpu_context *ctxt)
 {
+	if (kvm_vgic_is_apple_m1()) {
+		ctxt_sys_reg(ctxt, CNTV_CVAL_EL0) = read_sysreg_el0(SYS_CNTV_CVAL);
+		ctxt_sys_reg(ctxt, CNTV_CTL_EL0) = read_sysreg_el0(SYS_CNTV_CTL);
+		write_sysreg_el0(0, SYS_CNTV_CTL);
+
+		/* Make sure the timer is disabled before we mess with TGE */
+		isb();
+	}
 	__sysreg_save_common_state(ctxt);
 	__sysreg_save_el2_return_state(ctxt);
 }
@@ -45,6 +53,13 @@ NOKPROBE_SYMBOL(sysreg_restore_host_state_vhe);
 
 void sysreg_restore_guest_state_vhe(struct kvm_cpu_context *ctxt)
 {
+	if (kvm_vgic_is_apple_m1()) {
+		/* Make sure we're in Guest mode before the timer can fire */
+		isb();
+
+		write_sysreg_el0(ctxt_sys_reg(ctxt, CNTV_CVAL_EL0), SYS_CNTV_CVAL);
+		write_sysreg_el0(ctxt_sys_reg(ctxt, CNTV_CTL_EL0), SYS_CNTV_CTL);
+	}
 	__sysreg_restore_common_state(ctxt);
 	__sysreg_restore_el2_return_state(ctxt);
 }
diff --git a/arch/arm64/kvm/vgic/vgic-init.c b/arch/arm64/kvm/vgic/vgic-init.c
index e129ce0..d5c969a 100644
--- a/arch/arm64/kvm/vgic/vgic-init.c
+++ b/arch/arm64/kvm/vgic/vgic-init.c
@@ -192,6 +192,7 @@ int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
 	vgic_cpu->rd_iodev.base_addr = VGIC_ADDR_UNDEF;
 
 	INIT_LIST_HEAD(&vgic_cpu->ap_list_head);
+	INIT_LIST_HEAD(&vgic_cpu->oob_list_head);
 	raw_spin_lock_init(&vgic_cpu->ap_list_lock);
 	atomic_set(&vgic_cpu->vgic_v3.its_vpe.vlpi_count, 0);
 
@@ -362,6 +363,7 @@ void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu)
 	vgic_flush_pending_lpis(vcpu);
 
 	INIT_LIST_HEAD(&vgic_cpu->ap_list_head);
+	INIT_LIST_HEAD(&vgic_cpu->oob_list_head);
 }
 
 /* To be called with kvm->lock held */
diff --git a/arch/arm64/kvm/vgic/vgic-v3.c b/arch/arm64/kvm/vgic/vgic-v3.c
index dd49b75..7b4fe0d 100644
--- a/arch/arm64/kvm/vgic/vgic-v3.c
+++ b/arch/arm64/kvm/vgic/vgic-v3.c
@@ -115,7 +115,7 @@ void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu)
 }
 
 /* Requires the irq to be locked already */
-void vgic_v3_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr)
+u64 vgic_v3_compute_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq)
 {
 	u32 model = vcpu->kvm->arch.vgic.vgic_model;
 	u64 val = irq->intid;
@@ -169,7 +169,7 @@ void vgic_v3_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr)
 
 			if (WARN_RATELIMIT(!src, "No SGI source for INTID %d\n",
 					   irq->intid))
-				return;
+				return ~0ULL;
 
 			val |= (src - 1) << GICH_LR_PHYSID_CPUID_SHIFT;
 			irq->source &= ~(1 << (src - 1));
@@ -194,6 +194,16 @@ void vgic_v3_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr)
 
 	val |= (u64)irq->priority << ICH_LR_PRIORITY_SHIFT;
 
+	return val;
+}
+
+/* Requires the irq to be locked already */
+void vgic_v3_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr)
+{
+	u64 val = vgic_v3_compute_lr(vcpu, irq);
+
+	if (val == ~0ULL)
+		return;
 	vcpu->arch.vgic_cpu.vgic_v3.vgic_lr[lr] = val;
 }
 
diff --git a/arch/arm64/kvm/vgic/vgic.c b/arch/arm64/kvm/vgic/vgic.c
index 1d22365..b7eab22 100644
--- a/arch/arm64/kvm/vgic/vgic.c
+++ b/arch/arm64/kvm/vgic/vgic.c
@@ -310,6 +310,9 @@ static void vgic_sort_ap_list(struct kvm_vcpu *vcpu)
  */
 static bool vgic_validate_injection(struct vgic_irq *irq, bool level, void *owner)
 {
+	if (unlikely(irq->ops && irq->ops->flush_oob_state))
+		return false;
+
 	if (irq->owner != owner)
 		return false;
 
@@ -515,14 +518,24 @@ int kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, unsigned int host_irq,
 			  u32 vintid, struct irq_ops *ops)
 {
 	struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, vintid);
+	struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
 	unsigned long flags;
 	int ret;
 
 	BUG_ON(!irq);
 
-	raw_spin_lock_irqsave(&irq->irq_lock, flags);
+	raw_spin_lock_irqsave(&vgic_cpu->ap_list_lock, flags);
+
+	raw_spin_lock(&irq->irq_lock);
 	ret = kvm_vgic_map_irq(vcpu, irq, host_irq, ops);
-	raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
+	raw_spin_unlock(&irq->irq_lock);
+
+	if (irq->ops && irq->ops->sync_oob_state) {
+		INIT_LIST_HEAD(&irq->ap_list);
+		list_add(&irq->ap_list, &vgic_cpu->oob_list_head);
+	}
+
+	raw_spin_unlock_irqrestore(&vgic_cpu->ap_list_lock, flags);
 	vgic_put_irq(vcpu->kvm, irq);
 
 	return ret;
@@ -633,6 +646,9 @@ static void vgic_prune_ap_list(struct kvm_vcpu *vcpu)
 
 		BUG_ON(vcpu != irq->vcpu);
 
+		if (irq->ops && irq->ops->sync_oob_state)
+			irq->ops->sync_oob_state(irq);
+
 		target_vcpu = vgic_target_oracle(irq);
 
 		if (!target_vcpu) {
@@ -642,6 +658,9 @@ static void vgic_prune_ap_list(struct kvm_vcpu *vcpu)
 			 */
 			list_del(&irq->ap_list);
 			irq->vcpu = NULL;
+			if (irq->ops && irq->ops->sync_oob_state)
+				list_add_tail(&irq->ap_list,
+					      &vgic_cpu->oob_list_head);
 			raw_spin_unlock(&irq->irq_lock);
 
 			/*
@@ -722,6 +741,8 @@ static inline void vgic_fold_lr_state(struct kvm_vcpu *vcpu)
 		vgic_v2_fold_lr_state(vcpu);
 	else
 		vgic_v3_fold_lr_state(vcpu);
+
+
 }
 
 /* Requires the irq_lock to be held. */
@@ -812,7 +833,14 @@ static void vgic_flush_lr_state(struct kvm_vcpu *vcpu)
 		}
 
 		if (likely(vgic_target_oracle(irq) == vcpu)) {
-			vgic_populate_lr(vcpu, irq, count++);
+			/*
+			 * IRQs that use an IMPDEF injection method
+			 * don't occupy a List Register.
+			 */
+			if (irq->ops && irq->ops->flush_oob_state)
+				irq->ops->flush_oob_state(irq);
+			else
+				vgic_populate_lr(vcpu, irq, count++);
 
 			if (irq->source)
 				prio = irq->priority;
@@ -856,13 +884,71 @@ static inline void vgic_save_state(struct kvm_vcpu *vcpu)
 		__vgic_v3_save_state(&vcpu->arch.vgic_cpu.vgic_v3);
 }
 
+static void vgic_sync_oob_hwstate(struct kvm_vcpu *vcpu)
+{
+	struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
+	struct vgic_irq *irq;
+
+	if (list_empty(&vgic_cpu->oob_list_head))
+		return;
+
+	raw_spin_lock(&vgic_cpu->ap_list_lock);
+
+	list_for_each_entry(irq, &vgic_cpu->oob_list_head, ap_list) {
+		raw_spin_lock(&irq->irq_lock);
+
+		/* If the interrupt is on the OOB list, it must be invalid */
+		WARN_ON(irq->active || irq_is_pending(irq));
+
+		if (irq->ops && irq->ops->sync_oob_state)
+			irq->ops->sync_oob_state(irq);
+
+		/* At this stage, the state may be valid */
+		raw_spin_unlock(&irq->irq_lock);
+	}
+
+	raw_spin_unlock(&vgic_cpu->ap_list_lock);
+}
+
+static void vgic_merge_oob_irqs(struct kvm_vcpu *vcpu)
+{
+	struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
+	struct vgic_irq *irq, *tmp;
+
+	if (list_empty(&vgic_cpu->oob_list_head))
+		return;
+
+	raw_spin_lock(&vgic_cpu->ap_list_lock);
+
+	list_for_each_entry_safe(irq, tmp, &vgic_cpu->oob_list_head, ap_list) {
+		raw_spin_lock(&irq->irq_lock);
+
+		/*
+		 * If we've acquired an A|P state, move it to the
+		 * temporary list while mimicking what the normal
+		 * injection process would have done.
+		 */
+		if (irq->active || irq_is_pending(irq)) {
+			list_del(&irq->ap_list);
+			irq->vcpu = vcpu;
+			vgic_get_irq_kref(irq);
+			list_add_tail(&irq->ap_list, &vgic_cpu->ap_list_head);
+		}
+
+		raw_spin_unlock(&irq->irq_lock);
+	}
+
+	raw_spin_unlock(&vgic_cpu->ap_list_lock);
+}
+
 /* Sync back the hardware VGIC state into our emulation after a guest's run. */
 void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
 {
 	int used_lrs;
 
 	/* An empty ap_list_head implies used_lrs == 0 */
-	if (list_empty(&vcpu->arch.vgic_cpu.ap_list_head))
+	if (list_empty(&vcpu->arch.vgic_cpu.ap_list_head) &&
+	    list_empty(&vcpu->arch.vgic_cpu.oob_list_head))
 		return;
 
 	if (can_access_vgic_from_kernel())
@@ -875,7 +961,10 @@ void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
 
 	if (used_lrs)
 		vgic_fold_lr_state(vcpu);
+
+	vgic_sync_oob_hwstate(vcpu);
 	vgic_prune_ap_list(vcpu);
+	vgic_merge_oob_irqs(vcpu);
 }
 
 static inline void vgic_restore_state(struct kvm_vcpu *vcpu)
@@ -886,9 +975,36 @@ static inline void vgic_restore_state(struct kvm_vcpu *vcpu)
 		__vgic_v3_restore_state(&vcpu->arch.vgic_cpu.vgic_v3);
 }
 
+static void vgic_flush_oob_hwstate(struct kvm_vcpu *vcpu)
+{
+	struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
+	struct vgic_irq *irq;
+
+	if (list_empty(&vgic_cpu->oob_list_head))
+		return;
+
+	raw_spin_lock(&vgic_cpu->ap_list_lock);
+
+	list_for_each_entry(irq, &vgic_cpu->oob_list_head, ap_list) {
+		raw_spin_lock(&irq->irq_lock);
+
+		/* If the interrupt is on the OOB list, it must be invalid */
+		WARN_ON(irq->active || irq_is_pending(irq));
+
+		if (irq->ops && irq->ops->flush_oob_state)
+			irq->ops->flush_oob_state(irq);
+
+		raw_spin_unlock(&irq->irq_lock);
+	}
+
+	raw_spin_unlock(&vgic_cpu->ap_list_lock);
+}
+
 /* Flush our emulation state into the GIC hardware before entering the guest. */
 void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
 {
+	vgic_flush_oob_hwstate(vcpu);
+
 	/*
 	 * If there are no virtual interrupts active or pending for this
 	 * VCPU, then there is no work to do and we can bail out without
diff --git a/arch/arm64/kvm/vgic/vgic.h b/arch/arm64/kvm/vgic/vgic.h
index 64fcd75..6f27c34 100644
--- a/arch/arm64/kvm/vgic/vgic.h
+++ b/arch/arm64/kvm/vgic/vgic.h
@@ -207,6 +207,7 @@ static inline void vgic_get_irq_kref(struct vgic_irq *irq)
 	kref_get(&irq->refcount);
 }
 
+u64 vgic_v3_compute_lr(struct kvm_vcpu *, struct vgic_irq *);
 void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu);
 void vgic_v3_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr);
 void vgic_v3_clear_lr(struct kvm_vcpu *vcpu, int lr);
@@ -318,4 +319,6 @@ int vgic_v4_init(struct kvm *kvm);
 void vgic_v4_teardown(struct kvm *kvm);
 void vgic_v4_configure_vsgis(struct kvm *kvm);
 
+void vgic_v3_flush_impdef_hwstate(struct kvm_vcpu *vcpu);
+
 #endif
diff --git a/drivers/irqchip/irq-apple-aic.c b/drivers/irqchip/irq-apple-aic.c
index c48a350..7a611b9 100644
--- a/drivers/irqchip/irq-apple-aic.c
+++ b/drivers/irqchip/irq-apple-aic.c
@@ -343,6 +343,13 @@ static int aic_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu)
 	return 0;
 }
 
+static int aic_irq_set_irqchip_state(struct irq_data *data,
+				     enum irqchip_irq_state which, bool state)
+
+{
+	return 0;
+}
+
 static struct irq_chip fiq_chip = {
 	.name = "AIC-FIQ",
 	.irq_mask = aic_fiq_mask,
@@ -351,6 +358,7 @@ static struct irq_chip fiq_chip = {
 	.irq_eoi = aic_fiq_eoi,
 	.irq_set_type = aic_irq_set_type,
 	.irq_set_vcpu_affinity = aic_irq_set_vcpu_affinity,
+	.irq_set_irqchip_state = aic_irq_set_irqchip_state,
 };
 
 /*
diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h
index 084a112..95bd091 100644
--- a/include/kvm/arm_vgic.h
+++ b/include/kvm/arm_vgic.h
@@ -95,6 +95,8 @@ enum vgic_irq_config {
 	VGIC_CONFIG_LEVEL
 };
 
+struct vgic_irq;
+
 /*
  * Per-irq ops overriding some common behavious.
  *
@@ -108,6 +110,13 @@ struct irq_ops {
 	 * peaking into the physical GIC.
 	 */
 	bool (*get_input_level)(int vintid);
+
+	/*
+	 * Callback function pointer to in-kernel devices to inject
+	 * the IRQ using an IMPDEF method.
+	 */
+	void (*flush_oob_state)(struct vgic_irq *irq);
+	void (*sync_oob_state)(struct vgic_irq *irq);
 };
 
 struct vgic_irq {
@@ -327,6 +336,35 @@ struct vgic_cpu {
 	 * VCPU.
 	 */
 	struct list_head ap_list_head;
+	/*
+	 * List of IRQs that need special treatment because they do not
+	 * strictly follow the GIC state machine (most likely they have an
+	 * Out-Of-Band signalling, hence the name of the list), and can
+	 * enter the AP list from a state other than just Pending.
+	 *
+	 * As they enter a standard state, they must be moved to the AP
+	 * list. When they are neither Active nor Pending, they must be
+	 * moved back to the OOB list. From the vgic_irq perspective, this
+	 * is using the ap_list list_head, and both list are protected by
+	 * the same lock.
+	 *
+	 * The rules to move between the two lists are:
+	 *
+	 * * On guest entry:
+	 *   - Each interrupt on the OOB list has its flush_oob_state()
+	 *     helper called.
+	 *   - Each interrupt on the AP list with a flush_oob_state()
+	 *     helper gets called as well.
+	 *
+	 * * On guest exit:
+	 *    - Each interrupt with a sync_oob_state() helper that is
+	 *	on the AP list gets called. If the new state is invalid,
+	 *	it moves back to the OOB list.
+	 *    - Each interrupt on the OOB list has its sync_oob_state()
+	 *	helper called. If any of the A/P state is valid, it moves
+	 *	to the AP list.
+	 */
+	struct list_head oob_list_head;
 
 	/*
 	 * Members below are used with GICv3 emulation only and represent
@@ -397,6 +435,15 @@ static inline int kvm_vgic_get_max_vcpus(void)
 	return kvm_vgic_global_state.max_gic_vcpus;
 }
 
+/*
+ * Canard du Jour (Shut Up and Play Yer Guitar)
+ */
+static inline bool kvm_vgic_is_apple_m1(void)
+{
+	return (static_branch_unlikely(&kvm_vgic_global_state.gicv3_impdef) &&
+		kvm_vgic_global_state.gic_type == APL_VGIC_V3);
+}
+
 /**
  * kvm_vgic_setup_default_irq_routing:
  * Setup a default flat gsi routing table mapping all SPIs