| From 08bb633ed0667bcf1c2479eb7811e720a65f3589 Mon Sep 17 00:00:00 2001 |
| From: Ben Widawsky <ben@bwidawsk.net> |
| Date: Fri, 5 Jul 2013 14:41:04 -0700 |
| Subject: drm/i915: Getter/setter for object attributes |
| |
| Soon we want to gut a lot of our existing assumptions how many address |
| spaces an object can live in, and in doing so, embed the drm_mm_node in |
| the object (and later the VMA). |
| |
| It's possible in the future we'll want to add more getter/setter |
| methods, but for now this is enough to enable the VMAs. |
| |
| v2: Reworked commit message (Ben) |
| Added comments to the main functions (Ben) |
| sed -i "s/i915_gem_obj_set_color/i915_gem_obj_ggtt_set_color/" drivers/gpu/drm/i915/*.[ch] |
| sed -i "s/i915_gem_obj_bound/i915_gem_obj_ggtt_bound/" drivers/gpu/drm/i915/*.[ch] |
| sed -i "s/i915_gem_obj_size/i915_gem_obj_ggtt_size/" drivers/gpu/drm/i915/*.[ch] |
| sed -i "s/i915_gem_obj_offset/i915_gem_obj_ggtt_offset/" drivers/gpu/drm/i915/*.[ch] |
| (Daniel) |
| |
| v3: Rebased on new reserve_node patch |
| Changed DRM_DEBUG_KMS to actually work (will need fixing later) |
| |
| Signed-off-by: Ben Widawsky <ben@bwidawsk.net> |
| Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch> |
| (cherry picked from commit f343c5f6477354967ee1e331a68a56b9fece2f36) |
| (cherry picked from drm-intel-next-queued) |
| Signed-off-by: James Ausmus <james.ausmus@intel.com> |
| |
| Conflicts: |
| drivers/gpu/drm/i915/i915_gem.c |
| (used airlied's rerere from |
| e13af9a8340685cfe25d0c9f708da7121e0f51dd) |
| Signed-off-by: Darren Hart <dvhart@linux.intel.com> |
| --- |
| drivers/gpu/drm/i915/i915_debugfs.c | 26 ++++---- |
| drivers/gpu/drm/i915/i915_drv.h | 31 ++++++++++ |
| drivers/gpu/drm/i915/i915_gem.c | 89 ++++++++++++++--------------- |
| drivers/gpu/drm/i915/i915_gem_context.c | 2 |
| drivers/gpu/drm/i915/i915_gem_execbuffer.c | 19 +++--- |
| drivers/gpu/drm/i915/i915_gem_gtt.c | 8 +- |
| drivers/gpu/drm/i915/i915_gem_tiling.c | 14 ++-- |
| drivers/gpu/drm/i915/i915_irq.c | 15 ++-- |
| drivers/gpu/drm/i915/i915_trace.h | 8 +- |
| drivers/gpu/drm/i915/intel_display.c | 28 ++++----- |
| drivers/gpu/drm/i915/intel_fb.c | 8 +- |
| drivers/gpu/drm/i915/intel_overlay.c | 14 ++-- |
| drivers/gpu/drm/i915/intel_pm.c | 8 +- |
| drivers/gpu/drm/i915/intel_ringbuffer.c | 12 +-- |
| drivers/gpu/drm/i915/intel_sprite.c | 8 +- |
| 15 files changed, 164 insertions(+), 126 deletions(-) |
| |
| --- a/drivers/gpu/drm/i915/i915_debugfs.c |
| +++ b/drivers/gpu/drm/i915/i915_debugfs.c |
| @@ -122,9 +122,9 @@ describe_obj(struct seq_file *m, struct |
| seq_printf(m, " (pinned x %d)", obj->pin_count); |
| if (obj->fence_reg != I915_FENCE_REG_NONE) |
| seq_printf(m, " (fence: %d)", obj->fence_reg); |
| - if (obj->gtt_space != NULL) |
| - seq_printf(m, " (gtt offset: %08x, size: %08x)", |
| - obj->gtt_offset, (unsigned int)obj->gtt_space->size); |
| + if (i915_gem_obj_ggtt_bound(obj)) |
| + seq_printf(m, " (gtt offset: %08lx, size: %08x)", |
| + i915_gem_obj_ggtt_offset(obj), (unsigned int)i915_gem_obj_ggtt_size(obj)); |
| if (obj->stolen) |
| seq_printf(m, " (stolen: %08lx)", obj->stolen->start); |
| if (obj->pin_mappable || obj->fault_mappable) { |
| @@ -175,7 +175,7 @@ static int i915_gem_object_list_info(str |
| describe_obj(m, obj); |
| seq_putc(m, '\n'); |
| total_obj_size += obj->base.size; |
| - total_gtt_size += obj->gtt_space->size; |
| + total_gtt_size += i915_gem_obj_ggtt_size(obj); |
| count++; |
| } |
| mutex_unlock(&dev->struct_mutex); |
| @@ -187,10 +187,10 @@ static int i915_gem_object_list_info(str |
| |
| #define count_objects(list, member) do { \ |
| list_for_each_entry(obj, list, member) { \ |
| - size += obj->gtt_space->size; \ |
| + size += i915_gem_obj_ggtt_size(obj); \ |
| ++count; \ |
| if (obj->map_and_fenceable) { \ |
| - mappable_size += obj->gtt_space->size; \ |
| + mappable_size += i915_gem_obj_ggtt_size(obj); \ |
| ++mappable_count; \ |
| } \ |
| } \ |
| @@ -209,7 +209,7 @@ static int per_file_stats(int id, void * |
| stats->count++; |
| stats->total += obj->base.size; |
| |
| - if (obj->gtt_space) { |
| + if (i915_gem_obj_ggtt_bound(obj)) { |
| if (!list_empty(&obj->ring_list)) |
| stats->active += obj->base.size; |
| else |
| @@ -267,11 +267,11 @@ static int i915_gem_object_info(struct s |
| size = count = mappable_size = mappable_count = 0; |
| list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { |
| if (obj->fault_mappable) { |
| - size += obj->gtt_space->size; |
| + size += i915_gem_obj_ggtt_size(obj); |
| ++count; |
| } |
| if (obj->pin_mappable) { |
| - mappable_size += obj->gtt_space->size; |
| + mappable_size += i915_gem_obj_ggtt_size(obj); |
| ++mappable_count; |
| } |
| if (obj->madv == I915_MADV_DONTNEED) { |
| @@ -333,7 +333,7 @@ static int i915_gem_gtt_info(struct seq_ |
| describe_obj(m, obj); |
| seq_putc(m, '\n'); |
| total_obj_size += obj->base.size; |
| - total_gtt_size += obj->gtt_space->size; |
| + total_gtt_size += i915_gem_obj_ggtt_size(obj); |
| count++; |
| } |
| |
| @@ -379,12 +379,14 @@ static int i915_gem_pageflip_info(struct |
| if (work->old_fb_obj) { |
| struct drm_i915_gem_object *obj = work->old_fb_obj; |
| if (obj) |
| - seq_printf(m, "Old framebuffer gtt_offset 0x%08x\n", obj->gtt_offset); |
| + seq_printf(m, "Old framebuffer gtt_offset 0x%08lx\n", |
| + i915_gem_obj_ggtt_offset(obj)); |
| } |
| if (work->pending_flip_obj) { |
| struct drm_i915_gem_object *obj = work->pending_flip_obj; |
| if (obj) |
| - seq_printf(m, "New framebuffer gtt_offset 0x%08x\n", obj->gtt_offset); |
| + seq_printf(m, "New framebuffer gtt_offset 0x%08lx\n", |
| + i915_gem_obj_ggtt_offset(obj)); |
| } |
| } |
| spin_unlock_irqrestore(&dev->event_lock, flags); |
| --- a/drivers/gpu/drm/i915/i915_drv.h |
| +++ b/drivers/gpu/drm/i915/i915_drv.h |
| @@ -1362,6 +1362,37 @@ struct drm_i915_gem_object { |
| |
| #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base) |
| |
| +/* Offset of the first PTE pointing to this object */ |
| +static inline unsigned long |
| +i915_gem_obj_ggtt_offset(struct drm_i915_gem_object *o) |
| +{ |
| + return o->gtt_space->start; |
| +} |
| + |
| +/* Whether or not this object is currently mapped by the translation tables */ |
| +static inline bool |
| +i915_gem_obj_ggtt_bound(struct drm_i915_gem_object *o) |
| +{ |
| + return o->gtt_space != NULL; |
| +} |
| + |
| +/* The size used in the translation tables may be larger than the actual size of |
| + * the object on GEN2/GEN3 because of the way tiling is handled. See |
| + * i915_gem_get_gtt_size() for more details. |
| + */ |
| +static inline unsigned long |
| +i915_gem_obj_ggtt_size(struct drm_i915_gem_object *o) |
| +{ |
| + return o->gtt_space->size; |
| +} |
| + |
| +static inline void |
| +i915_gem_obj_ggtt_set_color(struct drm_i915_gem_object *o, |
| + enum i915_cache_level color) |
| +{ |
| + o->gtt_space->color = color; |
| +} |
| + |
| /** |
| * Request queue structure. |
| * |
| --- a/drivers/gpu/drm/i915/i915_gem.c |
| +++ b/drivers/gpu/drm/i915/i915_gem.c |
| @@ -135,7 +135,7 @@ int i915_mutex_lock_interruptible(struct |
| static inline bool |
| i915_gem_object_is_inactive(struct drm_i915_gem_object *obj) |
| { |
| - return obj->gtt_space && !obj->active; |
| + return i915_gem_obj_ggtt_bound(obj) && !obj->active; |
| } |
| |
| int |
| @@ -178,7 +178,7 @@ i915_gem_get_aperture_ioctl(struct drm_d |
| mutex_lock(&dev->struct_mutex); |
| list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) |
| if (obj->pin_count) |
| - pinned += obj->gtt_space->size; |
| + pinned += i915_gem_obj_ggtt_size(obj); |
| mutex_unlock(&dev->struct_mutex); |
| |
| args->aper_size = dev_priv->gtt.total; |
| @@ -422,7 +422,7 @@ i915_gem_shmem_pread(struct drm_device * |
| * anyway again before the next pread happens. */ |
| if (obj->cache_level == I915_CACHE_NONE) |
| needs_clflush = 1; |
| - if (obj->gtt_space) { |
| + if (i915_gem_obj_ggtt_bound(obj)) { |
| ret = i915_gem_object_set_to_gtt_domain(obj, false); |
| if (ret) |
| return ret; |
| @@ -609,7 +609,7 @@ i915_gem_gtt_pwrite_fast(struct drm_devi |
| user_data = to_user_ptr(args->data_ptr); |
| remain = args->size; |
| |
| - offset = obj->gtt_offset + args->offset; |
| + offset = i915_gem_obj_ggtt_offset(obj) + args->offset; |
| |
| while (remain > 0) { |
| /* Operation in this page |
| @@ -739,7 +739,7 @@ i915_gem_shmem_pwrite(struct drm_device |
| * right away and we therefore have to clflush anyway. */ |
| if (obj->cache_level == I915_CACHE_NONE) |
| needs_clflush_after = 1; |
| - if (obj->gtt_space) { |
| + if (i915_gem_obj_ggtt_bound(obj)) { |
| ret = i915_gem_object_set_to_gtt_domain(obj, true); |
| if (ret) |
| return ret; |
| @@ -1360,8 +1360,9 @@ int i915_gem_fault(struct vm_area_struct |
| |
| obj->fault_mappable = true; |
| |
| - pfn = ((dev_priv->gtt.mappable_base + obj->gtt_offset) >> PAGE_SHIFT) + |
| - page_offset; |
| + pfn = dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj); |
| + pfn >>= PAGE_SHIFT; |
| + pfn += page_offset; |
| |
| /* Finally, remap it using the new GTT offset */ |
| ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn); |
| @@ -1667,7 +1668,7 @@ i915_gem_object_put_pages(struct drm_i91 |
| if (obj->pages == NULL) |
| return 0; |
| |
| - BUG_ON(obj->gtt_space); |
| + BUG_ON(i915_gem_obj_ggtt_bound(obj)); |
| |
| if (obj->pages_pin_count) |
| return -EBUSY; |
| @@ -2121,8 +2122,8 @@ i915_gem_request_remove_from_client(stru |
| |
| static bool i915_head_inside_object(u32 acthd, struct drm_i915_gem_object *obj) |
| { |
| - if (acthd >= obj->gtt_offset && |
| - acthd < obj->gtt_offset + obj->base.size) |
| + if (acthd >= i915_gem_obj_ggtt_offset(obj) && |
| + acthd < i915_gem_obj_ggtt_offset(obj) + obj->base.size) |
| return true; |
| |
| return false; |
| @@ -2180,11 +2181,11 @@ static void i915_set_reset_status(struct |
| |
| if (ring->hangcheck.action != wait && |
| i915_request_guilty(request, acthd, &inside)) { |
| - DRM_ERROR("%s hung %s bo (0x%x ctx %d) at 0x%x\n", |
| + DRM_ERROR("%s hung %s bo (0x%lx ctx %d) at 0x%x\n", |
| ring->name, |
| inside ? "inside" : "flushing", |
| request->batch_obj ? |
| - request->batch_obj->gtt_offset : 0, |
| + i915_gem_obj_ggtt_offset(request->batch_obj) : 0, |
| request->ctx ? request->ctx->id : 0, |
| acthd); |
| |
| @@ -2595,7 +2596,7 @@ i915_gem_object_unbind(struct drm_i915_g |
| drm_i915_private_t *dev_priv = obj->base.dev->dev_private; |
| int ret; |
| |
| - if (obj->gtt_space == NULL) |
| + if (!i915_gem_obj_ggtt_bound(obj)) |
| return 0; |
| |
| if (obj->pin_count) |
| @@ -2691,12 +2692,12 @@ static void i965_write_fence_reg(struct |
| POSTING_READ(fence_reg); |
| |
| if (obj) { |
| - u32 size = obj->gtt_space->size; |
| + u32 size = i915_gem_obj_ggtt_size(obj); |
| uint64_t val; |
| |
| - val = (uint64_t)((obj->gtt_offset + size - 4096) & |
| + val = (uint64_t)((i915_gem_obj_ggtt_offset(obj) + size - 4096) & |
| 0xfffff000) << 32; |
| - val |= obj->gtt_offset & 0xfffff000; |
| + val |= i915_gem_obj_ggtt_offset(obj) & 0xfffff000; |
| val |= (uint64_t)((obj->stride / 128) - 1) << fence_pitch_shift; |
| if (obj->tiling_mode == I915_TILING_Y) |
| val |= 1 << I965_FENCE_TILING_Y_SHIFT; |
| @@ -2720,15 +2721,15 @@ static void i915_write_fence_reg(struct |
| u32 val; |
| |
| if (obj) { |
| - u32 size = obj->gtt_space->size; |
| + u32 size = i915_gem_obj_ggtt_size(obj); |
| int pitch_val; |
| int tile_width; |
| |
| - WARN((obj->gtt_offset & ~I915_FENCE_START_MASK) || |
| + WARN((i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK) || |
| (size & -size) != size || |
| - (obj->gtt_offset & (size - 1)), |
| - "object 0x%08x [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n", |
| - obj->gtt_offset, obj->map_and_fenceable, size); |
| + (i915_gem_obj_ggtt_offset(obj) & (size - 1)), |
| + "object 0x%08lx [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n", |
| + i915_gem_obj_ggtt_offset(obj), obj->map_and_fenceable, size); |
| |
| if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev)) |
| tile_width = 128; |
| @@ -2739,7 +2740,7 @@ static void i915_write_fence_reg(struct |
| pitch_val = obj->stride / tile_width; |
| pitch_val = ffs(pitch_val) - 1; |
| |
| - val = obj->gtt_offset; |
| + val = i915_gem_obj_ggtt_offset(obj); |
| if (obj->tiling_mode == I915_TILING_Y) |
| val |= 1 << I830_FENCE_TILING_Y_SHIFT; |
| val |= I915_FENCE_SIZE_BITS(size); |
| @@ -2764,19 +2765,19 @@ static void i830_write_fence_reg(struct |
| uint32_t val; |
| |
| if (obj) { |
| - u32 size = obj->gtt_space->size; |
| + u32 size = i915_gem_obj_ggtt_size(obj); |
| uint32_t pitch_val; |
| |
| - WARN((obj->gtt_offset & ~I830_FENCE_START_MASK) || |
| + WARN((i915_gem_obj_ggtt_offset(obj) & ~I830_FENCE_START_MASK) || |
| (size & -size) != size || |
| - (obj->gtt_offset & (size - 1)), |
| - "object 0x%08x not 512K or pot-size 0x%08x aligned\n", |
| - obj->gtt_offset, size); |
| + (i915_gem_obj_ggtt_offset(obj) & (size - 1)), |
| + "object 0x%08lx not 512K or pot-size 0x%08x aligned\n", |
| + i915_gem_obj_ggtt_offset(obj), size); |
| |
| pitch_val = obj->stride / 128; |
| pitch_val = ffs(pitch_val) - 1; |
| |
| - val = obj->gtt_offset; |
| + val = i915_gem_obj_ggtt_offset(obj); |
| if (obj->tiling_mode == I915_TILING_Y) |
| val |= 1 << I830_FENCE_TILING_Y_SHIFT; |
| val |= I830_FENCE_SIZE_BITS(size); |
| @@ -3030,8 +3031,8 @@ static void i915_gem_verify_gtt(struct d |
| |
| if (obj->cache_level != obj->gtt_space->color) { |
| printk(KERN_ERR "object reserved space [%08lx, %08lx] with wrong color, cache_level=%x, color=%lx\n", |
| - obj->gtt_space->start, |
| - obj->gtt_space->start + obj->gtt_space->size, |
| + i915_gem_obj_ggtt_offset(obj), |
| + i915_gem_obj_ggtt_offset(obj) + i915_gem_obj_ggtt_size(obj), |
| obj->cache_level, |
| obj->gtt_space->color); |
| err++; |
| @@ -3042,8 +3043,8 @@ static void i915_gem_verify_gtt(struct d |
| obj->gtt_space, |
| obj->cache_level)) { |
| printk(KERN_ERR "invalid GTT space found at [%08lx, %08lx] - color=%x\n", |
| - obj->gtt_space->start, |
| - obj->gtt_space->start + obj->gtt_space->size, |
| + i915_gem_obj_ggtt_offset(obj), |
| + i915_gem_obj_ggtt_offset(obj) + i915_gem_obj_ggtt_size(obj), |
| obj->cache_level); |
| err++; |
| continue; |
| @@ -3155,8 +3156,8 @@ search_free: |
| node->size == fence_size && |
| (node->start & (fence_alignment - 1)) == 0; |
| |
| - mappable = |
| - obj->gtt_offset + obj->base.size <= dev_priv->gtt.mappable_end; |
| + mappable = i915_gem_obj_ggtt_offset(obj) + obj->base.size <= |
| + dev_priv->gtt.mappable_end; |
| |
| obj->map_and_fenceable = mappable && fenceable; |
| |
| @@ -3258,7 +3259,7 @@ i915_gem_object_set_to_gtt_domain(struct |
| int ret; |
| |
| /* Not valid to be called on unbound objects. */ |
| - if (obj->gtt_space == NULL) |
| + if (!i915_gem_obj_ggtt_bound(obj)) |
| return -EINVAL; |
| |
| if (obj->base.write_domain == I915_GEM_DOMAIN_GTT) |
| @@ -3323,7 +3324,7 @@ int i915_gem_object_set_cache_level(stru |
| return ret; |
| } |
| |
| - if (obj->gtt_space) { |
| + if (i915_gem_obj_ggtt_bound(obj)) { |
| ret = i915_gem_object_finish_gpu(obj); |
| if (ret) |
| return ret; |
| @@ -3346,7 +3347,7 @@ int i915_gem_object_set_cache_level(stru |
| i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt, |
| obj, cache_level); |
| |
| - obj->gtt_space->color = cache_level; |
| + i915_gem_obj_ggtt_set_color(obj, cache_level); |
| } |
| |
| if (cache_level == I915_CACHE_NONE) { |
| @@ -3627,14 +3628,14 @@ i915_gem_object_pin(struct drm_i915_gem_ |
| if (WARN_ON(obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT)) |
| return -EBUSY; |
| |
| - if (obj->gtt_space != NULL) { |
| - if ((alignment && obj->gtt_offset & (alignment - 1)) || |
| + if (i915_gem_obj_ggtt_bound(obj)) { |
| + if ((alignment && i915_gem_obj_ggtt_offset(obj) & (alignment - 1)) || |
| (map_and_fenceable && !obj->map_and_fenceable)) { |
| WARN(obj->pin_count, |
| "bo is already pinned with incorrect alignment:" |
| - " offset=%x, req.alignment=%x, req.map_and_fenceable=%d," |
| + " offset=%lx, req.alignment=%x, req.map_and_fenceable=%d," |
| " obj->map_and_fenceable=%d\n", |
| - obj->gtt_offset, alignment, |
| + i915_gem_obj_ggtt_offset(obj), alignment, |
| map_and_fenceable, |
| obj->map_and_fenceable); |
| ret = i915_gem_object_unbind(obj); |
| @@ -3643,7 +3644,7 @@ i915_gem_object_pin(struct drm_i915_gem_ |
| } |
| } |
| |
| - if (obj->gtt_space == NULL) { |
| + if (!i915_gem_obj_ggtt_bound(obj)) { |
| struct drm_i915_private *dev_priv = obj->base.dev->dev_private; |
| |
| ret = i915_gem_object_bind_to_gtt(obj, alignment, |
| @@ -3669,7 +3670,7 @@ void |
| i915_gem_object_unpin(struct drm_i915_gem_object *obj) |
| { |
| BUG_ON(obj->pin_count == 0); |
| - BUG_ON(obj->gtt_space == NULL); |
| + BUG_ON(!i915_gem_obj_ggtt_bound(obj)); |
| |
| if (--obj->pin_count == 0) |
| obj->pin_mappable = false; |
| @@ -3719,7 +3720,7 @@ i915_gem_pin_ioctl(struct drm_device *de |
| * as the X server doesn't manage domains yet |
| */ |
| i915_gem_object_flush_cpu_write_domain(obj); |
| - args->offset = obj->gtt_offset; |
| + args->offset = i915_gem_obj_ggtt_offset(obj); |
| out: |
| drm_gem_object_unreference(&obj->base); |
| unlock: |
| --- a/drivers/gpu/drm/i915/i915_gem_context.c |
| +++ b/drivers/gpu/drm/i915/i915_gem_context.c |
| @@ -375,7 +375,7 @@ mi_set_context(struct intel_ring_buffer |
| |
| intel_ring_emit(ring, MI_NOOP); |
| intel_ring_emit(ring, MI_SET_CONTEXT); |
| - intel_ring_emit(ring, new_context->obj->gtt_offset | |
| + intel_ring_emit(ring, i915_gem_obj_ggtt_offset(new_context->obj) | |
| MI_MM_SPACE_GTT | |
| MI_SAVE_EXT_STATE_EN | |
| MI_RESTORE_EXT_STATE_EN | |
| --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c |
| +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c |
| @@ -188,7 +188,7 @@ i915_gem_execbuffer_relocate_entry(struc |
| return -ENOENT; |
| |
| target_i915_obj = to_intel_bo(target_obj); |
| - target_offset = target_i915_obj->gtt_offset; |
| + target_offset = i915_gem_obj_ggtt_offset(target_i915_obj); |
| |
| /* Sandybridge PPGTT errata: We need a global gtt mapping for MI and |
| * pipe_control writes because the gpu doesn't properly redirect them |
| @@ -280,7 +280,7 @@ i915_gem_execbuffer_relocate_entry(struc |
| return ret; |
| |
| /* Map the page containing the relocation we're going to perform. */ |
| - reloc->offset += obj->gtt_offset; |
| + reloc->offset += i915_gem_obj_ggtt_offset(obj); |
| reloc_page = io_mapping_map_atomic_wc(dev_priv->gtt.mappable, |
| reloc->offset & PAGE_MASK); |
| reloc_entry = (uint32_t __iomem *) |
| @@ -436,8 +436,8 @@ i915_gem_execbuffer_reserve_object(struc |
| obj->has_aliasing_ppgtt_mapping = 1; |
| } |
| |
| - if (entry->offset != obj->gtt_offset) { |
| - entry->offset = obj->gtt_offset; |
| + if (entry->offset != i915_gem_obj_ggtt_offset(obj)) { |
| + entry->offset = i915_gem_obj_ggtt_offset(obj); |
| *need_reloc = true; |
| } |
| |
| @@ -458,7 +458,7 @@ i915_gem_execbuffer_unreserve_object(str |
| { |
| struct drm_i915_gem_exec_object2 *entry; |
| |
| - if (!obj->gtt_space) |
| + if (!i915_gem_obj_ggtt_bound(obj)) |
| return; |
| |
| entry = obj->exec_entry; |
| @@ -530,7 +530,7 @@ i915_gem_execbuffer_reserve(struct intel |
| struct drm_i915_gem_exec_object2 *entry = obj->exec_entry; |
| bool need_fence, need_mappable; |
| |
| - if (!obj->gtt_space) |
| + if (!i915_gem_obj_ggtt_bound(obj)) |
| continue; |
| |
| need_fence = |
| @@ -539,7 +539,8 @@ i915_gem_execbuffer_reserve(struct intel |
| obj->tiling_mode != I915_TILING_NONE; |
| need_mappable = need_fence || need_reloc_mappable(obj); |
| |
| - if ((entry->alignment && obj->gtt_offset & (entry->alignment - 1)) || |
| + if ((entry->alignment && |
| + i915_gem_obj_ggtt_offset(obj) & (entry->alignment - 1)) || |
| (need_mappable && !obj->map_and_fenceable)) |
| ret = i915_gem_object_unbind(obj); |
| else |
| @@ -550,7 +551,7 @@ i915_gem_execbuffer_reserve(struct intel |
| |
| /* Bind fresh objects */ |
| list_for_each_entry(obj, objects, exec_list) { |
| - if (obj->gtt_space) |
| + if (i915_gem_obj_ggtt_bound(obj)) |
| continue; |
| |
| ret = i915_gem_execbuffer_reserve_object(obj, ring, need_relocs); |
| @@ -1058,7 +1059,7 @@ i915_gem_do_execbuffer(struct drm_device |
| goto err; |
| } |
| |
| - exec_start = batch_obj->gtt_offset + args->batch_start_offset; |
| + exec_start = i915_gem_obj_ggtt_offset(batch_obj) + args->batch_start_offset; |
| exec_len = args->batch_len; |
| if (cliprects) { |
| for (i = 0; i < args->num_cliprects; i++) { |
| --- a/drivers/gpu/drm/i915/i915_gem_gtt.c |
| +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c |
| @@ -378,7 +378,7 @@ void i915_ppgtt_bind_object(struct i915_ |
| enum i915_cache_level cache_level) |
| { |
| ppgtt->insert_entries(ppgtt, obj->pages, |
| - obj->gtt_space->start >> PAGE_SHIFT, |
| + i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT, |
| cache_level); |
| } |
| |
| @@ -386,7 +386,7 @@ void i915_ppgtt_unbind_object(struct i91 |
| struct drm_i915_gem_object *obj) |
| { |
| ppgtt->clear_range(ppgtt, |
| - obj->gtt_space->start >> PAGE_SHIFT, |
| + i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT, |
| obj->base.size >> PAGE_SHIFT); |
| } |
| |
| @@ -551,7 +551,7 @@ void i915_gem_gtt_bind_object(struct drm |
| struct drm_i915_private *dev_priv = dev->dev_private; |
| |
| dev_priv->gtt.gtt_insert_entries(dev, obj->pages, |
| - obj->gtt_space->start >> PAGE_SHIFT, |
| + i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT, |
| cache_level); |
| |
| obj->has_global_gtt_mapping = 1; |
| @@ -563,7 +563,7 @@ void i915_gem_gtt_unbind_object(struct d |
| struct drm_i915_private *dev_priv = dev->dev_private; |
| |
| dev_priv->gtt.gtt_clear_range(obj->base.dev, |
| - obj->gtt_space->start >> PAGE_SHIFT, |
| + i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT, |
| obj->base.size >> PAGE_SHIFT); |
| |
| obj->has_global_gtt_mapping = 0; |
| --- a/drivers/gpu/drm/i915/i915_gem_tiling.c |
| +++ b/drivers/gpu/drm/i915/i915_gem_tiling.c |
| @@ -268,18 +268,18 @@ i915_gem_object_fence_ok(struct drm_i915 |
| return true; |
| |
| if (INTEL_INFO(obj->base.dev)->gen == 3) { |
| - if (obj->gtt_offset & ~I915_FENCE_START_MASK) |
| + if (i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK) |
| return false; |
| } else { |
| - if (obj->gtt_offset & ~I830_FENCE_START_MASK) |
| + if (i915_gem_obj_ggtt_offset(obj) & ~I830_FENCE_START_MASK) |
| return false; |
| } |
| |
| size = i915_gem_get_gtt_size(obj->base.dev, obj->base.size, tiling_mode); |
| - if (obj->gtt_space->size != size) |
| + if (i915_gem_obj_ggtt_size(obj) != size) |
| return false; |
| |
| - if (obj->gtt_offset & (size - 1)) |
| + if (i915_gem_obj_ggtt_offset(obj) & (size - 1)) |
| return false; |
| |
| return true; |
| @@ -359,8 +359,8 @@ i915_gem_set_tiling(struct drm_device *d |
| */ |
| |
| obj->map_and_fenceable = |
| - obj->gtt_space == NULL || |
| - (obj->gtt_offset + obj->base.size <= dev_priv->gtt.mappable_end && |
| + !i915_gem_obj_ggtt_bound(obj) || |
| + (i915_gem_obj_ggtt_offset(obj) + obj->base.size <= dev_priv->gtt.mappable_end && |
| i915_gem_object_fence_ok(obj, args->tiling_mode)); |
| |
| /* Rebind if we need a change of alignment */ |
| @@ -369,7 +369,7 @@ i915_gem_set_tiling(struct drm_device *d |
| i915_gem_get_gtt_alignment(dev, obj->base.size, |
| args->tiling_mode, |
| false); |
| - if (obj->gtt_offset & (unfenced_alignment - 1)) |
| + if (i915_gem_obj_ggtt_offset(obj) & (unfenced_alignment - 1)) |
| ret = i915_gem_object_unbind(obj); |
| } |
| |
| --- a/drivers/gpu/drm/i915/i915_irq.c |
| +++ b/drivers/gpu/drm/i915/i915_irq.c |
| @@ -1554,7 +1554,7 @@ i915_error_object_create_sized(struct dr |
| if (dst == NULL) |
| return NULL; |
| |
| - reloc_offset = src->gtt_offset; |
| + reloc_offset = dst->gtt_offset = i915_gem_obj_ggtt_offset(src); |
| for (i = 0; i < num_pages; i++) { |
| unsigned long flags; |
| void *d; |
| @@ -1606,7 +1606,6 @@ i915_error_object_create_sized(struct dr |
| reloc_offset += PAGE_SIZE; |
| } |
| dst->page_count = num_pages; |
| - dst->gtt_offset = src->gtt_offset; |
| |
| return dst; |
| |
| @@ -1660,7 +1659,7 @@ static void capture_bo(struct drm_i915_e |
| err->name = obj->base.name; |
| err->rseqno = obj->last_read_seqno; |
| err->wseqno = obj->last_write_seqno; |
| - err->gtt_offset = obj->gtt_offset; |
| + err->gtt_offset = i915_gem_obj_ggtt_offset(obj); |
| err->read_domains = obj->base.read_domains; |
| err->write_domain = obj->base.write_domain; |
| err->fence_reg = obj->fence_reg; |
| @@ -1758,8 +1757,8 @@ i915_error_first_batchbuffer(struct drm_ |
| return NULL; |
| |
| obj = ring->private; |
| - if (acthd >= obj->gtt_offset && |
| - acthd < obj->gtt_offset + obj->base.size) |
| + if (acthd >= i915_gem_obj_ggtt_offset(obj) && |
| + acthd < i915_gem_obj_ggtt_offset(obj) + obj->base.size) |
| return i915_error_object_create(dev_priv, obj); |
| } |
| |
| @@ -1840,7 +1839,7 @@ static void i915_gem_record_active_conte |
| return; |
| |
| list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { |
| - if ((error->ccid & PAGE_MASK) == obj->gtt_offset) { |
| + if ((error->ccid & PAGE_MASK) == i915_gem_obj_ggtt_offset(obj)) { |
| ering->ctx = i915_error_object_create_sized(dev_priv, |
| obj, 1); |
| break; |
| @@ -2206,10 +2205,10 @@ static void __always_unused i915_pagefli |
| if (INTEL_INFO(dev)->gen >= 4) { |
| int dspsurf = DSPSURF(intel_crtc->plane); |
| stall_detected = I915_HI_DISPBASE(I915_READ(dspsurf)) == |
| - obj->gtt_offset; |
| + i915_gem_obj_ggtt_offset(obj); |
| } else { |
| int dspaddr = DSPADDR(intel_crtc->plane); |
| - stall_detected = I915_READ(dspaddr) == (obj->gtt_offset + |
| + stall_detected = I915_READ(dspaddr) == (i915_gem_obj_ggtt_offset(obj) + |
| crtc->y * crtc->fb->pitches[0] + |
| crtc->x * crtc->fb->bits_per_pixel/8); |
| } |
| --- a/drivers/gpu/drm/i915/i915_trace.h |
| +++ b/drivers/gpu/drm/i915/i915_trace.h |
| @@ -46,8 +46,8 @@ TRACE_EVENT(i915_gem_object_bind, |
| |
| TP_fast_assign( |
| __entry->obj = obj; |
| - __entry->offset = obj->gtt_space->start; |
| - __entry->size = obj->gtt_space->size; |
| + __entry->offset = i915_gem_obj_ggtt_offset(obj); |
| + __entry->size = i915_gem_obj_ggtt_size(obj); |
| __entry->mappable = mappable; |
| ), |
| |
| @@ -68,8 +68,8 @@ TRACE_EVENT(i915_gem_object_unbind, |
| |
| TP_fast_assign( |
| __entry->obj = obj; |
| - __entry->offset = obj->gtt_space->start; |
| - __entry->size = obj->gtt_space->size; |
| + __entry->offset = i915_gem_obj_ggtt_offset(obj); |
| + __entry->size = i915_gem_obj_ggtt_size(obj); |
| ), |
| |
| TP_printk("obj=%p, offset=%08x size=%x", |
| --- a/drivers/gpu/drm/i915/intel_display.c |
| +++ b/drivers/gpu/drm/i915/intel_display.c |
| @@ -1980,16 +1980,17 @@ static int i9xx_update_plane(struct drm_ |
| intel_crtc->dspaddr_offset = linear_offset; |
| } |
| |
| - DRM_DEBUG_KMS("Writing base %08X %08lX %d %d %d\n", |
| - obj->gtt_offset, linear_offset, x, y, fb->pitches[0]); |
| + DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n", |
| + i915_gem_obj_ggtt_offset(obj), linear_offset, x, y, |
| + fb->pitches[0]); |
| I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]); |
| if (INTEL_INFO(dev)->gen >= 4) { |
| I915_MODIFY_DISPBASE(DSPSURF(plane), |
| - obj->gtt_offset + intel_crtc->dspaddr_offset); |
| + i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset); |
| I915_WRITE(DSPTILEOFF(plane), (y << 16) | x); |
| I915_WRITE(DSPLINOFF(plane), linear_offset); |
| } else |
| - I915_WRITE(DSPADDR(plane), obj->gtt_offset + linear_offset); |
| + I915_WRITE(DSPADDR(plane), i915_gem_obj_ggtt_offset(obj) + linear_offset); |
| POSTING_READ(reg); |
| |
| return 0; |
| @@ -2069,11 +2070,12 @@ static int ironlake_update_plane(struct |
| fb->pitches[0]); |
| linear_offset -= intel_crtc->dspaddr_offset; |
| |
| - DRM_DEBUG_KMS("Writing base %08X %08lX %d %d %d\n", |
| - obj->gtt_offset, linear_offset, x, y, fb->pitches[0]); |
| + DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n", |
| + i915_gem_obj_ggtt_offset(obj), linear_offset, x, y, |
| + fb->pitches[0]); |
| I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]); |
| I915_MODIFY_DISPBASE(DSPSURF(plane), |
| - obj->gtt_offset + intel_crtc->dspaddr_offset); |
| + i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset); |
| if (IS_HASWELL(dev)) { |
| I915_WRITE(DSPOFFSET(plane), (y << 16) | x); |
| } else { |
| @@ -6568,7 +6570,7 @@ static int intel_crtc_cursor_set(struct |
| goto fail_unpin; |
| } |
| |
| - addr = obj->gtt_offset; |
| + addr = i915_gem_obj_ggtt_offset(obj); |
| } else { |
| int align = IS_I830(dev) ? 16 * 1024 : 256; |
| ret = i915_gem_attach_phys_object(dev, obj, |
| @@ -7340,7 +7342,7 @@ static int intel_gen2_queue_flip(struct |
| intel_ring_emit(ring, MI_DISPLAY_FLIP | |
| MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); |
| intel_ring_emit(ring, fb->pitches[0]); |
| - intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset); |
| + intel_ring_emit(ring, i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset); |
| intel_ring_emit(ring, 0); /* aux display base address, unused */ |
| |
| intel_mark_page_flip_active(intel_crtc); |
| @@ -7381,7 +7383,7 @@ static int intel_gen3_queue_flip(struct |
| intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | |
| MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); |
| intel_ring_emit(ring, fb->pitches[0]); |
| - intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset); |
| + intel_ring_emit(ring, i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset); |
| intel_ring_emit(ring, MI_NOOP); |
| |
| intel_mark_page_flip_active(intel_crtc); |
| @@ -7421,7 +7423,7 @@ static int intel_gen4_queue_flip(struct |
| MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); |
| intel_ring_emit(ring, fb->pitches[0]); |
| intel_ring_emit(ring, |
| - (obj->gtt_offset + intel_crtc->dspaddr_offset) | |
| + (i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset) | |
| obj->tiling_mode); |
| |
| /* XXX Enabling the panel-fitter across page-flip is so far |
| @@ -7464,7 +7466,7 @@ static int intel_gen6_queue_flip(struct |
| intel_ring_emit(ring, MI_DISPLAY_FLIP | |
| MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); |
| intel_ring_emit(ring, fb->pitches[0] | obj->tiling_mode); |
| - intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset); |
| + intel_ring_emit(ring, i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset); |
| |
| /* Contrary to the suggestions in the documentation, |
| * "Enable Panel Fitter" does not seem to be required when page |
| @@ -7529,7 +7531,7 @@ static int intel_gen7_queue_flip(struct |
| |
| intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane_bit); |
| intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode)); |
| - intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset); |
| + intel_ring_emit(ring, i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset); |
| intel_ring_emit(ring, (MI_NOOP)); |
| |
| intel_mark_page_flip_active(intel_crtc); |
| --- a/drivers/gpu/drm/i915/intel_fb.c |
| +++ b/drivers/gpu/drm/i915/intel_fb.c |
| @@ -139,11 +139,11 @@ static int intelfb_create(struct drm_fb_ |
| info->apertures->ranges[0].base = dev->mode_config.fb_base; |
| info->apertures->ranges[0].size = dev_priv->gtt.mappable_end; |
| |
| - info->fix.smem_start = dev->mode_config.fb_base + obj->gtt_offset; |
| + info->fix.smem_start = dev->mode_config.fb_base + i915_gem_obj_ggtt_offset(obj); |
| info->fix.smem_len = size; |
| |
| info->screen_base = |
| - ioremap_wc(dev_priv->gtt.mappable_base + obj->gtt_offset, |
| + ioremap_wc(dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj), |
| size); |
| if (!info->screen_base) { |
| ret = -ENOSPC; |
| @@ -166,9 +166,9 @@ static int intelfb_create(struct drm_fb_ |
| |
| /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */ |
| |
| - DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08x, bo %p\n", |
| + DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08lx, bo %p\n", |
| fb->width, fb->height, |
| - obj->gtt_offset, obj); |
| + i915_gem_obj_ggtt_offset(obj), obj); |
| |
| |
| mutex_unlock(&dev->struct_mutex); |
| --- a/drivers/gpu/drm/i915/intel_overlay.c |
| +++ b/drivers/gpu/drm/i915/intel_overlay.c |
| @@ -196,7 +196,7 @@ intel_overlay_map_regs(struct intel_over |
| regs = (struct overlay_registers __iomem *)overlay->reg_bo->phys_obj->handle->vaddr; |
| else |
| regs = io_mapping_map_wc(dev_priv->gtt.mappable, |
| - overlay->reg_bo->gtt_offset); |
| + i915_gem_obj_ggtt_offset(overlay->reg_bo)); |
| |
| return regs; |
| } |
| @@ -740,7 +740,7 @@ static int intel_overlay_do_put_image(st |
| swidth = params->src_w; |
| swidthsw = calc_swidthsw(overlay->dev, params->offset_Y, tmp_width); |
| sheight = params->src_h; |
| - iowrite32(new_bo->gtt_offset + params->offset_Y, ®s->OBUF_0Y); |
| + iowrite32(i915_gem_obj_ggtt_offset(new_bo) + params->offset_Y, ®s->OBUF_0Y); |
| ostride = params->stride_Y; |
| |
| if (params->format & I915_OVERLAY_YUV_PLANAR) { |
| @@ -754,8 +754,8 @@ static int intel_overlay_do_put_image(st |
| params->src_w/uv_hscale); |
| swidthsw |= max_t(u32, tmp_U, tmp_V) << 16; |
| sheight |= (params->src_h/uv_vscale) << 16; |
| - iowrite32(new_bo->gtt_offset + params->offset_U, ®s->OBUF_0U); |
| - iowrite32(new_bo->gtt_offset + params->offset_V, ®s->OBUF_0V); |
| + iowrite32(i915_gem_obj_ggtt_offset(new_bo) + params->offset_U, ®s->OBUF_0U); |
| + iowrite32(i915_gem_obj_ggtt_offset(new_bo) + params->offset_V, ®s->OBUF_0V); |
| ostride |= params->stride_UV << 16; |
| } |
| |
| @@ -1355,7 +1355,7 @@ void intel_setup_overlay(struct drm_devi |
| DRM_ERROR("failed to pin overlay register bo\n"); |
| goto out_free_bo; |
| } |
| - overlay->flip_addr = reg_bo->gtt_offset; |
| + overlay->flip_addr = i915_gem_obj_ggtt_offset(reg_bo); |
| |
| ret = i915_gem_object_set_to_gtt_domain(reg_bo, true); |
| if (ret) { |
| @@ -1435,7 +1435,7 @@ intel_overlay_map_regs_atomic(struct int |
| overlay->reg_bo->phys_obj->handle->vaddr; |
| else |
| regs = io_mapping_map_atomic_wc(dev_priv->gtt.mappable, |
| - overlay->reg_bo->gtt_offset); |
| + i915_gem_obj_ggtt_offset(overlay->reg_bo)); |
| |
| return regs; |
| } |
| @@ -1468,7 +1468,7 @@ intel_overlay_capture_error_state(struct |
| if (OVERLAY_NEEDS_PHYSICAL(overlay->dev)) |
| error->base = (__force long)overlay->reg_bo->phys_obj->handle->vaddr; |
| else |
| - error->base = overlay->reg_bo->gtt_offset; |
| + error->base = i915_gem_obj_ggtt_offset(overlay->reg_bo); |
| |
| regs = intel_overlay_map_regs_atomic(overlay); |
| if (!regs) |
| --- a/drivers/gpu/drm/i915/intel_pm.c |
| +++ b/drivers/gpu/drm/i915/intel_pm.c |
| @@ -218,7 +218,7 @@ static void ironlake_enable_fbc(struct d |
| (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) | |
| (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT)); |
| I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y); |
| - I915_WRITE(ILK_FBC_RT_BASE, obj->gtt_offset | ILK_FBC_RT_VALID); |
| + I915_WRITE(ILK_FBC_RT_BASE, i915_gem_obj_ggtt_offset(obj) | ILK_FBC_RT_VALID); |
| /* enable it... */ |
| I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN); |
| |
| @@ -275,7 +275,7 @@ static void gen7_enable_fbc(struct drm_c |
| struct drm_i915_gem_object *obj = intel_fb->obj; |
| struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
| |
| - I915_WRITE(IVB_FBC_RT_BASE, obj->gtt_offset); |
| + I915_WRITE(IVB_FBC_RT_BASE, i915_gem_obj_ggtt_offset(obj)); |
| |
| I915_WRITE(ILK_DPFC_CONTROL, DPFC_CTL_EN | DPFC_CTL_LIMIT_1X | |
| IVB_DPFC_CTL_FENCE_EN | |
| @@ -3700,7 +3700,7 @@ static void ironlake_enable_rc6(struct d |
| |
| intel_ring_emit(ring, MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN); |
| intel_ring_emit(ring, MI_SET_CONTEXT); |
| - intel_ring_emit(ring, dev_priv->ips.renderctx->gtt_offset | |
| + intel_ring_emit(ring, i915_gem_obj_ggtt_offset(dev_priv->ips.renderctx) | |
| MI_MM_SPACE_GTT | |
| MI_SAVE_EXT_STATE_EN | |
| MI_RESTORE_EXT_STATE_EN | |
| @@ -3723,7 +3723,7 @@ static void ironlake_enable_rc6(struct d |
| return; |
| } |
| |
| - I915_WRITE(PWRCTXA, dev_priv->ips.pwrctx->gtt_offset | PWRCTX_EN); |
| + I915_WRITE(PWRCTXA, i915_gem_obj_ggtt_offset(dev_priv->ips.pwrctx) | PWRCTX_EN); |
| I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT); |
| } |
| |
| --- a/drivers/gpu/drm/i915/intel_ringbuffer.c |
| +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c |
| @@ -446,14 +446,14 @@ static int init_ring_common(struct intel |
| * registers with the above sequence (the readback of the HEAD registers |
| * also enforces ordering), otherwise the hw might lose the new ring |
| * register values. */ |
| - I915_WRITE_START(ring, obj->gtt_offset); |
| + I915_WRITE_START(ring, i915_gem_obj_ggtt_offset(obj)); |
| I915_WRITE_CTL(ring, |
| ((ring->size - PAGE_SIZE) & RING_NR_PAGES) |
| | RING_VALID); |
| |
| /* If the head is still not zero, the ring is dead */ |
| if (wait_for((I915_READ_CTL(ring) & RING_VALID) != 0 && |
| - I915_READ_START(ring) == obj->gtt_offset && |
| + I915_READ_START(ring) == i915_gem_obj_ggtt_offset(obj) && |
| (I915_READ_HEAD(ring) & HEAD_ADDR) == 0, 50)) { |
| DRM_ERROR("%s initialization failed " |
| "ctl %08x head %08x tail %08x start %08x\n", |
| @@ -511,7 +511,7 @@ init_pipe_control(struct intel_ring_buff |
| if (ret) |
| goto err_unref; |
| |
| - pc->gtt_offset = obj->gtt_offset; |
| + pc->gtt_offset = i915_gem_obj_ggtt_offset(obj); |
| pc->cpu_page = kmap(sg_page(obj->pages->sgl)); |
| if (pc->cpu_page == NULL) { |
| ret = -ENOMEM; |
| @@ -1162,7 +1162,7 @@ i830_dispatch_execbuffer(struct intel_ri |
| intel_ring_advance(ring); |
| } else { |
| struct drm_i915_gem_object *obj = ring->private; |
| - u32 cs_offset = obj->gtt_offset; |
| + u32 cs_offset = i915_gem_obj_ggtt_offset(obj); |
| |
| if (len > I830_BATCH_LIMIT) |
| return -ENOSPC; |
| @@ -1247,7 +1247,7 @@ static int init_status_page(struct intel |
| goto err_unref; |
| } |
| |
| - ring->status_page.gfx_addr = obj->gtt_offset; |
| + ring->status_page.gfx_addr = i915_gem_obj_ggtt_offset(obj); |
| ring->status_page.page_addr = kmap(sg_page(obj->pages->sgl)); |
| if (ring->status_page.page_addr == NULL) { |
| ret = -ENOMEM; |
| @@ -1334,7 +1334,7 @@ static int intel_init_ring_buffer(struct |
| goto err_unpin; |
| |
| ring->virtual_start = |
| - ioremap_wc(dev_priv->gtt.mappable_base + obj->gtt_offset, |
| + ioremap_wc(dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj), |
| ring->size); |
| if (ring->virtual_start == NULL) { |
| DRM_ERROR("Failed to map ringbuffer.\n"); |
| --- a/drivers/gpu/drm/i915/intel_sprite.c |
| +++ b/drivers/gpu/drm/i915/intel_sprite.c |
| @@ -133,7 +133,7 @@ vlv_update_plane(struct drm_plane *dplan |
| |
| I915_WRITE(SPSIZE(pipe, plane), (crtc_h << 16) | crtc_w); |
| I915_WRITE(SPCNTR(pipe, plane), sprctl); |
| - I915_MODIFY_DISPBASE(SPSURF(pipe, plane), obj->gtt_offset + |
| + I915_MODIFY_DISPBASE(SPSURF(pipe, plane), i915_gem_obj_ggtt_offset(obj) + |
| sprsurf_offset); |
| POSTING_READ(SPSURF(pipe, plane)); |
| } |
| @@ -308,7 +308,8 @@ ivb_update_plane(struct drm_plane *plane |
| if (intel_plane->can_scale) |
| I915_WRITE(SPRSCALE(pipe), sprscale); |
| I915_WRITE(SPRCTL(pipe), sprctl); |
| - I915_MODIFY_DISPBASE(SPRSURF(pipe), obj->gtt_offset + sprsurf_offset); |
| + I915_MODIFY_DISPBASE(SPRSURF(pipe), |
| + i915_gem_obj_ggtt_offset(obj) + sprsurf_offset); |
| POSTING_READ(SPRSURF(pipe)); |
| |
| /* potentially re-enable LP watermarks */ |
| @@ -478,7 +479,8 @@ ilk_update_plane(struct drm_plane *plane |
| I915_WRITE(DVSSIZE(pipe), (crtc_h << 16) | crtc_w); |
| I915_WRITE(DVSSCALE(pipe), dvsscale); |
| I915_WRITE(DVSCNTR(pipe), dvscntr); |
| - I915_MODIFY_DISPBASE(DVSSURF(pipe), obj->gtt_offset + dvssurf_offset); |
| + I915_MODIFY_DISPBASE(DVSSURF(pipe), |
| + i915_gem_obj_ggtt_offset(obj) + dvssurf_offset); |
| POSTING_READ(DVSSURF(pipe)); |
| } |
| |