| From ea23acc752f131163b7b23c06453f478e5991981 Mon Sep 17 00:00:00 2001 |
| From: Ben Widawsky <ben@bwidawsk.net> |
| Date: Tue, 28 May 2013 19:22:27 -0700 |
| Subject: drm/i915: make PM interrupt writes non-destructive |
| |
| PM interrupts have an expanded role on HSW. It helps route the EBOX |
| interrupts. This patch is necessary to make the existing code which |
| touches the mask, and enable registers more friendly to other code paths |
| that also will need these registers. |
| |
| To be more explicit: |
| At preinstall all interrupts are masked and disabled. This implies that |
| preinstall should always happen before any enabling/disabling of RPS or |
| other interrupts. |
| |
| The PMIMR is touched by the workqueue, so enable/disable touch IER and |
| IIR. Similarly, the code currently expects IMR has no use outside of the |
| RPS related interrupts so they unconditionally set 0, or ~0. We could |
| use IER in the workqueue, and IMR elsewhere, but since the workqueue |
| use-case is more transient the existing usage makes sense. |
| |
| Disable RPS events: |
| IER := IER & ~GEN6_PM_RPS_EVENTS // Disable RPS related interrupts |
| IIR := GEN6_PM_RPS_EVENTS // Disable any outstanding interrupts |
| |
| Enable RPS events: |
| IER := IER | GEN6_PM_RPS_EVENTS // Enable the RPS related interrupts |
| IIR := GEN6_PM_RPS_EVENTS // Make sure there were no leftover events |
| (really shouldn't happen) |
| |
| v2: Shouldn't destroy PMIIR or PMIMR VEBOX interrupt state in |
| enable/disable rps functions (Haihao) |
| |
| v3: Bug found by Chris where we were clearing the wrong bits at rps |
| disable. |
| expanded commit message |
| |
| v4: v3 was based off the wrong branch |
| |
| v5: Added the setting of PMIMR because of previous patch update |
| |
| CC: Chris Wilson <chris@chris-wilson.co.uk> |
| Signed-off-by: Ben Widawsky <ben@bwidawsk.net> |
| Reviewed-by: Damien Lespiau <damien.lespiau@intel.com> |
| Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch> |
| (cherry picked from commit 4848405cced3b46f4ec7d404b8ed5873171ae10a) |
| Signed-off-by: Darren Hart <dvhart@linux.intel.com> |
| --- |
| drivers/gpu/drm/i915/i915_irq.c | 21 +++++++++++---------- |
| drivers/gpu/drm/i915/i915_reg.h | 2 +- |
| drivers/gpu/drm/i915/intel_pm.c | 13 +++++++------ |
| 3 files changed, 19 insertions(+), 17 deletions(-) |
| |
| --- a/drivers/gpu/drm/i915/i915_irq.c |
| +++ b/drivers/gpu/drm/i915/i915_irq.c |
| @@ -691,10 +691,11 @@ static void gen6_pm_rps_work(struct work |
| pm_iir = dev_priv->rps.pm_iir; |
| dev_priv->rps.pm_iir = 0; |
| pm_imr = I915_READ(GEN6_PMIMR); |
| - I915_WRITE(GEN6_PMIMR, 0); |
| + /* Make sure not to corrupt PMIMR state used by ringbuffer code */ |
| + I915_WRITE(GEN6_PMIMR, pm_imr & ~GEN6_PM_RPS_EVENTS); |
| spin_unlock_irq(&dev_priv->rps.lock); |
| |
| - if ((pm_iir & GEN6_PM_DEFERRED_EVENTS) == 0) |
| + if ((pm_iir & GEN6_PM_RPS_EVENTS) == 0) |
| return; |
| |
| mutex_lock(&dev_priv->rps.hw_lock); |
| @@ -924,17 +925,17 @@ static void hsw_pm_irq_handler(struct dr |
| unsigned long flags; |
| |
| spin_lock_irqsave(&dev_priv->rps.lock, flags); |
| - dev_priv->rps.pm_iir |= pm_iir & GEN6_PM_DEFERRED_EVENTS; |
| + dev_priv->rps.pm_iir |= pm_iir & GEN6_PM_RPS_EVENTS; |
| if (dev_priv->rps.pm_iir) { |
| I915_WRITE(GEN6_PMIMR, dev_priv->rps.pm_iir); |
| /* never want to mask useful interrupts. (also posting read) */ |
| - WARN_ON(I915_READ_NOTRACE(GEN6_PMIMR) & ~GEN6_PM_DEFERRED_EVENTS); |
| + WARN_ON(I915_READ_NOTRACE(GEN6_PMIMR) & ~GEN6_PM_RPS_EVENTS); |
| /* TODO: if queue_work is slow, move it out of the spinlock */ |
| queue_work(dev_priv->wq, &dev_priv->rps.work); |
| } |
| spin_unlock_irqrestore(&dev_priv->rps.lock, flags); |
| |
| - if (pm_iir & ~GEN6_PM_DEFERRED_EVENTS) |
| + if (pm_iir & ~GEN6_PM_RPS_EVENTS) |
| DRM_ERROR("Unexpected PM interrupted\n"); |
| } |
| |
| @@ -1009,7 +1010,7 @@ static irqreturn_t valleyview_irq_handle |
| if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) |
| gmbus_irq_handler(dev); |
| |
| - if (pm_iir & GEN6_PM_DEFERRED_EVENTS) |
| + if (pm_iir & GEN6_PM_RPS_EVENTS) |
| gen6_queue_rps_work(dev_priv, pm_iir); |
| |
| I915_WRITE(GTIIR, gt_iir); |
| @@ -1250,7 +1251,7 @@ static irqreturn_t ivybridge_irq_handler |
| if (pm_iir) { |
| if (IS_HASWELL(dev)) |
| hsw_pm_irq_handler(dev_priv, pm_iir); |
| - else if (pm_iir & GEN6_PM_DEFERRED_EVENTS) |
| + else if (pm_iir & GEN6_PM_RPS_EVENTS) |
| gen6_queue_rps_work(dev_priv, pm_iir); |
| I915_WRITE(GEN6_PMIIR, pm_iir); |
| ret = IRQ_HANDLED; |
| @@ -1365,7 +1366,7 @@ static irqreturn_t ironlake_irq_handler( |
| if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT) |
| ironlake_handle_rps_change(dev); |
| |
| - if (IS_GEN6(dev) && pm_iir & GEN6_PM_DEFERRED_EVENTS) |
| + if (IS_GEN6(dev) && pm_iir & GEN6_PM_RPS_EVENTS) |
| gen6_queue_rps_work(dev_priv, pm_iir); |
| |
| I915_WRITE(GTIIR, gt_iir); |
| @@ -2753,8 +2754,8 @@ static int ivybridge_irq_postinstall(str |
| POSTING_READ(GTIER); |
| |
| /* Power management */ |
| - I915_WRITE(GEN6_PMIMR, ~GEN6_PM_DEFERRED_EVENTS); |
| - I915_WRITE(GEN6_PMIER, GEN6_PM_DEFERRED_EVENTS); |
| + I915_WRITE(GEN6_PMIMR, ~GEN6_PM_RPS_EVENTS); |
| + I915_WRITE(GEN6_PMIER, GEN6_PM_RPS_EVENTS); |
| POSTING_READ(GEN6_PMIMR); |
| |
| ibx_irq_postinstall(dev); |
| --- a/drivers/gpu/drm/i915/i915_reg.h |
| +++ b/drivers/gpu/drm/i915/i915_reg.h |
| @@ -4578,7 +4578,7 @@ |
| #define GEN6_PM_RP_DOWN_THRESHOLD (1<<4) |
| #define GEN6_PM_RP_UP_EI_EXPIRED (1<<2) |
| #define GEN6_PM_RP_DOWN_EI_EXPIRED (1<<1) |
| -#define GEN6_PM_DEFERRED_EVENTS (GEN6_PM_RP_UP_THRESHOLD | \ |
| +#define GEN6_PM_RPS_EVENTS (GEN6_PM_RP_UP_THRESHOLD | \ |
| GEN6_PM_RP_DOWN_THRESHOLD | \ |
| GEN6_PM_RP_DOWN_TIMEOUT) |
| |
| --- a/drivers/gpu/drm/i915/intel_pm.c |
| +++ b/drivers/gpu/drm/i915/intel_pm.c |
| @@ -3116,7 +3116,7 @@ static void gen6_disable_rps(struct drm_ |
| I915_WRITE(GEN6_RC_CONTROL, 0); |
| I915_WRITE(GEN6_RPNSWREQ, 1 << 31); |
| I915_WRITE(GEN6_PMINTRMSK, 0xffffffff); |
| - I915_WRITE(GEN6_PMIER, 0); |
| + I915_WRITE(GEN6_PMIER, I915_READ(GEN6_PMIER) & ~GEN6_PM_RPS_EVENTS); |
| /* Complete PM interrupt masking here doesn't race with the rps work |
| * item again unmasking PM interrupts because that is using a different |
| * register (PMIMR) to mask PM interrupts. The only risk is in leaving |
| @@ -3126,7 +3126,7 @@ static void gen6_disable_rps(struct drm_ |
| dev_priv->rps.pm_iir = 0; |
| spin_unlock_irq(&dev_priv->rps.lock); |
| |
| - I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR)); |
| + I915_WRITE(GEN6_PMIIR, GEN6_PM_RPS_EVENTS); |
| } |
| |
| static void valleyview_disable_rps(struct drm_device *dev) |
| @@ -3307,14 +3307,15 @@ static void gen6_enable_rps(struct drm_d |
| gen6_set_rps(dev_priv->dev, (gt_perf_status & 0xff00) >> 8); |
| |
| /* requires MSI enabled */ |
| - I915_WRITE(GEN6_PMIER, GEN6_PM_DEFERRED_EVENTS); |
| + I915_WRITE(GEN6_PMIER, I915_READ(GEN6_PMIER) | GEN6_PM_RPS_EVENTS); |
| spin_lock_irq(&dev_priv->rps.lock); |
| /* FIXME: Our interrupt enabling sequence is bonghits. |
| * dev_priv->rps.pm_iir really should be 0 here. */ |
| dev_priv->rps.pm_iir = 0; |
| - I915_WRITE(GEN6_PMIMR, 0); |
| + I915_WRITE(GEN6_PMIMR, I915_READ(GEN6_PMIMR) & ~GEN6_PM_RPS_EVENTS); |
| + I915_WRITE(GEN6_PMIIR, GEN6_PM_RPS_EVENTS); |
| spin_unlock_irq(&dev_priv->rps.lock); |
| - /* enable all PM interrupts */ |
| + /* unmask all PM interrupts */ |
| I915_WRITE(GEN6_PMINTRMSK, 0); |
| |
| rc6vids = 0; |
| @@ -3577,7 +3578,7 @@ static void valleyview_enable_rps(struct |
| valleyview_set_rps(dev_priv->dev, rpe); |
| |
| /* requires MSI enabled */ |
| - I915_WRITE(GEN6_PMIER, GEN6_PM_DEFERRED_EVENTS); |
| + I915_WRITE(GEN6_PMIER, GEN6_PM_RPS_EVENTS); |
| spin_lock_irq(&dev_priv->rps.lock); |
| WARN_ON(dev_priv->rps.pm_iir != 0); |
| I915_WRITE(GEN6_PMIMR, 0); |