| From 95829069a5f93790175d1cd4ef53bcd62dd145aa Mon Sep 17 00:00:00 2001 |
| From: Ben Widawsky <ben@bwidawsk.net> |
| Date: Tue, 16 Jul 2013 16:50:08 -0700 |
| Subject: drm/i915: Move active/inactive lists to new mm |
| |
| Shamelessly manipulated out of Daniel :-) |
| "When moving the lists around explain that the active/inactive stuff is |
| used by eviction when we run out of address space, so needs to be |
| per-vma and per-address space. Bound/unbound otoh is used by the |
| shrinker which only cares about the amount of memory used and not one |
| bit about in which address space this memory is all used in. Of course |
| to actual kick out an object we need to unbind it from every address |
| space, but for that we have the per-object list of vmas." |
| |
| v2: Leave the bound list as a global one. (Chris, indirectly) |
| |
| v3: Rebased with no i915_gtt_vm. In most places I added a new *vm local, |
| since it will eventually be replaces by a vm argument. |
| Put comment back inline, since it no longer makes sense to do otherwise. |
| |
| v4: Rebased on hangcheck/error state movement |
| |
| Signed-off-by: Ben Widawsky <ben@bwidawsk.net> |
| Reviewed-by: Imre Deak <imre.deak@intel.com> |
| Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch> |
| (cherry picked from commit 5cef07e1628300aeda9ac9dae95a2b406175b3ff) |
| Signed-off-by: James Ausmus <james.ausmus@intel.com> |
| |
| Conflicts: |
| drivers/gpu/drm/i915/i915_gem.c |
| (context changes) |
| Signed-off-by: Darren Hart <dvhart@linux.intel.com> |
| --- |
| drivers/gpu/drm/i915/i915_debugfs.c | 16 +++++++----- |
| drivers/gpu/drm/i915/i915_drv.h | 46 +++++++++++++++++----------------- |
| drivers/gpu/drm/i915/i915_gem.c | 33 ++++++++++++------------ |
| drivers/gpu/drm/i915/i915_gem_debug.c | 2 +- |
| drivers/gpu/drm/i915/i915_gem_evict.c | 18 ++++++------- |
| drivers/gpu/drm/i915/i915_gem_stolen.c | 3 ++- |
| drivers/gpu/drm/i915/i915_gpu_error.c | 8 +++--- |
| 7 files changed, 67 insertions(+), 59 deletions(-) |
| |
| diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c |
| index 1c697c0ab7e5..a9246e9c5f9d 100644 |
| --- a/drivers/gpu/drm/i915/i915_debugfs.c |
| +++ b/drivers/gpu/drm/i915/i915_debugfs.c |
| @@ -135,7 +135,8 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data) |
| uintptr_t list = (uintptr_t) node->info_ent->data; |
| struct list_head *head; |
| struct drm_device *dev = node->minor->dev; |
| - drm_i915_private_t *dev_priv = dev->dev_private; |
| + struct drm_i915_private *dev_priv = dev->dev_private; |
| + struct i915_address_space *vm = &dev_priv->gtt.base; |
| struct drm_i915_gem_object *obj; |
| size_t total_obj_size, total_gtt_size; |
| int count, ret; |
| @@ -147,11 +148,11 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data) |
| switch (list) { |
| case ACTIVE_LIST: |
| seq_puts(m, "Active:\n"); |
| - head = &dev_priv->mm.active_list; |
| + head = &vm->active_list; |
| break; |
| case INACTIVE_LIST: |
| seq_puts(m, "Inactive:\n"); |
| - head = &dev_priv->mm.inactive_list; |
| + head = &vm->inactive_list; |
| break; |
| default: |
| mutex_unlock(&dev->struct_mutex); |
| @@ -219,6 +220,7 @@ static int i915_gem_object_info(struct seq_file *m, void *data) |
| u32 count, mappable_count, purgeable_count; |
| size_t size, mappable_size, purgeable_size; |
| struct drm_i915_gem_object *obj; |
| + struct i915_address_space *vm = &dev_priv->gtt.base; |
| struct drm_file *file; |
| int ret; |
| |
| @@ -236,12 +238,12 @@ static int i915_gem_object_info(struct seq_file *m, void *data) |
| count, mappable_count, size, mappable_size); |
| |
| size = count = mappable_size = mappable_count = 0; |
| - count_objects(&dev_priv->mm.active_list, mm_list); |
| + count_objects(&vm->active_list, mm_list); |
| seq_printf(m, " %u [%u] active objects, %zu [%zu] bytes\n", |
| count, mappable_count, size, mappable_size); |
| |
| size = count = mappable_size = mappable_count = 0; |
| - count_objects(&dev_priv->mm.inactive_list, mm_list); |
| + count_objects(&vm->inactive_list, mm_list); |
| seq_printf(m, " %u [%u] inactive objects, %zu [%zu] bytes\n", |
| count, mappable_count, size, mappable_size); |
| |
| @@ -1625,6 +1627,7 @@ i915_drop_caches_set(void *data, u64 val) |
| struct drm_device *dev = data; |
| struct drm_i915_private *dev_priv = dev->dev_private; |
| struct drm_i915_gem_object *obj, *next; |
| + struct i915_address_space *vm = &dev_priv->gtt.base; |
| int ret; |
| |
| DRM_DEBUG_DRIVER("Dropping caches: 0x%08llx\n", val); |
| @@ -1645,7 +1648,8 @@ i915_drop_caches_set(void *data, u64 val) |
| i915_gem_retire_requests(dev); |
| |
| if (val & DROP_BOUND) { |
| - list_for_each_entry_safe(obj, next, &dev_priv->mm.inactive_list, mm_list) |
| + list_for_each_entry_safe(obj, next, &vm->inactive_list, |
| + mm_list) |
| if (obj->pin_count == 0) { |
| ret = i915_gem_object_unbind(obj); |
| if (ret) |
| diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h |
| index bcfcc1086d5b..0bd41e4b9f08 100644 |
| --- a/drivers/gpu/drm/i915/i915_drv.h |
| +++ b/drivers/gpu/drm/i915/i915_drv.h |
| @@ -458,6 +458,29 @@ struct i915_address_space { |
| struct page *page; |
| } scratch; |
| |
| + /** |
| + * List of objects currently involved in rendering. |
| + * |
| + * Includes buffers having the contents of their GPU caches |
| + * flushed, not necessarily primitives. last_rendering_seqno |
| + * represents when the rendering involved will be completed. |
| + * |
| + * A reference is held on the buffer while on this list. |
| + */ |
| + struct list_head active_list; |
| + |
| + /** |
| + * LRU list of objects which are not in the ringbuffer and |
| + * are ready to unbind, but are still in the GTT. |
| + * |
| + * last_rendering_seqno is 0 while an object is in this list. |
| + * |
| + * A reference is not held on the buffer while on this list, |
| + * as merely being GTT-bound shouldn't prevent its being |
| + * freed, and we'll pull it off the list in the free path. |
| + */ |
| + struct list_head inactive_list; |
| + |
| /* FIXME: Need a more generic return type */ |
| gen6_gtt_pte_t (*pte_encode)(dma_addr_t addr, |
| enum i915_cache_level level); |
| @@ -853,29 +876,6 @@ struct i915_gem_mm { |
| struct shrinker inactive_shrinker; |
| bool shrinker_no_lock_stealing; |
| |
| - /** |
| - * List of objects currently involved in rendering. |
| - * |
| - * Includes buffers having the contents of their GPU caches |
| - * flushed, not necessarily primitives. last_rendering_seqno |
| - * represents when the rendering involved will be completed. |
| - * |
| - * A reference is held on the buffer while on this list. |
| - */ |
| - struct list_head active_list; |
| - |
| - /** |
| - * LRU list of objects which are not in the ringbuffer and |
| - * are ready to unbind, but are still in the GTT. |
| - * |
| - * last_rendering_seqno is 0 while an object is in this list. |
| - * |
| - * A reference is not held on the buffer while on this list, |
| - * as merely being GTT-bound shouldn't prevent its being |
| - * freed, and we'll pull it off the list in the free path. |
| - */ |
| - struct list_head inactive_list; |
| - |
| /** LRU list of objects with fence regs on them. */ |
| struct list_head fence_list; |
| |
| diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c |
| index b99c73b82ce4..0d749cb9d01a 100644 |
| --- a/drivers/gpu/drm/i915/i915_gem.c |
| +++ b/drivers/gpu/drm/i915/i915_gem.c |
| @@ -1692,6 +1692,7 @@ __i915_gem_shrink(struct drm_i915_private *dev_priv, long target, |
| bool purgeable_only) |
| { |
| struct drm_i915_gem_object *obj, *next; |
| + struct i915_address_space *vm = &dev_priv->gtt.base; |
| long count = 0; |
| |
| list_for_each_entry_safe(obj, next, |
| @@ -1705,9 +1706,7 @@ __i915_gem_shrink(struct drm_i915_private *dev_priv, long target, |
| } |
| } |
| |
| - list_for_each_entry_safe(obj, next, |
| - &dev_priv->mm.inactive_list, |
| - mm_list) { |
| + list_for_each_entry_safe(obj, next, &vm->inactive_list, mm_list) { |
| if ((i915_gem_object_is_purgeable(obj) || !purgeable_only) && |
| i915_gem_object_unbind(obj) == 0 && |
| i915_gem_object_put_pages(obj) == 0) { |
| @@ -1878,6 +1877,7 @@ i915_gem_object_move_to_active(struct drm_i915_gem_object *obj, |
| { |
| struct drm_device *dev = obj->base.dev; |
| struct drm_i915_private *dev_priv = dev->dev_private; |
| + struct i915_address_space *vm = &dev_priv->gtt.base; |
| u32 seqno = intel_ring_get_seqno(ring); |
| |
| BUG_ON(ring == NULL); |
| @@ -1894,7 +1894,7 @@ i915_gem_object_move_to_active(struct drm_i915_gem_object *obj, |
| } |
| |
| /* Move from whatever list we were on to the tail of execution. */ |
| - list_move_tail(&obj->mm_list, &dev_priv->mm.active_list); |
| + list_move_tail(&obj->mm_list, &vm->active_list); |
| list_move_tail(&obj->ring_list, &ring->active_list); |
| |
| obj->last_read_seqno = seqno; |
| @@ -1918,11 +1918,12 @@ i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj) |
| { |
| struct drm_device *dev = obj->base.dev; |
| struct drm_i915_private *dev_priv = dev->dev_private; |
| + struct i915_address_space *vm = &dev_priv->gtt.base; |
| |
| BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS); |
| BUG_ON(!obj->active); |
| |
| - list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list); |
| + list_move_tail(&obj->mm_list, &vm->inactive_list); |
| |
| list_del_init(&obj->ring_list); |
| obj->ring = NULL; |
| @@ -2274,6 +2275,7 @@ void i915_gem_restore_fences(struct drm_device *dev) |
| void i915_gem_reset(struct drm_device *dev) |
| { |
| struct drm_i915_private *dev_priv = dev->dev_private; |
| + struct i915_address_space *vm = &dev_priv->gtt.base; |
| struct drm_i915_gem_object *obj; |
| struct intel_ring_buffer *ring; |
| int i; |
| @@ -2284,12 +2286,8 @@ void i915_gem_reset(struct drm_device *dev) |
| /* Move everything out of the GPU domains to ensure we do any |
| * necessary invalidation upon reuse. |
| */ |
| - list_for_each_entry(obj, |
| - &dev_priv->mm.inactive_list, |
| - mm_list) |
| - { |
| + list_for_each_entry(obj, &vm->inactive_list, mm_list) |
| obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS; |
| - } |
| |
| i915_gem_restore_fences(dev); |
| } |
| @@ -3062,6 +3060,7 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj, |
| { |
| struct drm_device *dev = obj->base.dev; |
| drm_i915_private_t *dev_priv = dev->dev_private; |
| + struct i915_address_space *vm = &dev_priv->gtt.base; |
| u32 size, fence_size, fence_alignment, unfenced_alignment; |
| bool mappable, fenceable; |
| size_t gtt_max = map_and_fenceable ? |
| @@ -3137,7 +3136,7 @@ search_free: |
| } |
| |
| list_move_tail(&obj->global_list, &dev_priv->mm.bound_list); |
| - list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list); |
| + list_add_tail(&obj->mm_list, &vm->inactive_list); |
| |
| fenceable = |
| i915_gem_obj_ggtt_size(obj) == fence_size && |
| @@ -3285,7 +3284,8 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write) |
| |
| /* And bump the LRU for this access */ |
| if (i915_gem_object_is_inactive(obj)) |
| - list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list); |
| + list_move_tail(&obj->mm_list, |
| + &dev_priv->gtt.base.inactive_list); |
| |
| return 0; |
| } |
| @@ -4226,7 +4226,7 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data, |
| return ret; |
| } |
| |
| - BUG_ON(!list_empty(&dev_priv->mm.active_list)); |
| + BUG_ON(!list_empty(&dev_priv->gtt.base.active_list)); |
| mutex_unlock(&dev->struct_mutex); |
| |
| ret = drm_irq_install(dev); |
| @@ -4304,8 +4304,8 @@ i915_gem_load(struct drm_device *dev) |
| SLAB_HWCACHE_ALIGN, |
| NULL); |
| |
| - INIT_LIST_HEAD(&dev_priv->mm.active_list); |
| - INIT_LIST_HEAD(&dev_priv->mm.inactive_list); |
| + INIT_LIST_HEAD(&dev_priv->gtt.base.active_list); |
| + INIT_LIST_HEAD(&dev_priv->gtt.base.inactive_list); |
| INIT_LIST_HEAD(&dev_priv->mm.unbound_list); |
| INIT_LIST_HEAD(&dev_priv->mm.bound_list); |
| INIT_LIST_HEAD(&dev_priv->mm.fence_list); |
| @@ -4576,6 +4576,7 @@ i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc) |
| struct drm_i915_private, |
| mm.inactive_shrinker); |
| struct drm_device *dev = dev_priv->dev; |
| + struct i915_address_space *vm = &dev_priv->gtt.base; |
| struct drm_i915_gem_object *obj; |
| int nr_to_scan = sc->nr_to_scan; |
| bool unlock = true; |
| @@ -4604,7 +4605,7 @@ i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc) |
| list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) |
| if (obj->pages_pin_count == 0) |
| cnt += obj->base.size >> PAGE_SHIFT; |
| - list_for_each_entry(obj, &dev_priv->mm.inactive_list, mm_list) |
| + list_for_each_entry(obj, &vm->inactive_list, global_list) |
| if (obj->pin_count == 0 && obj->pages_pin_count == 0) |
| cnt += obj->base.size >> PAGE_SHIFT; |
| |
| diff --git a/drivers/gpu/drm/i915/i915_gem_debug.c b/drivers/gpu/drm/i915/i915_gem_debug.c |
| index 582e6a5f3dac..bf945a39fbb1 100644 |
| --- a/drivers/gpu/drm/i915/i915_gem_debug.c |
| +++ b/drivers/gpu/drm/i915/i915_gem_debug.c |
| @@ -97,7 +97,7 @@ i915_verify_lists(struct drm_device *dev) |
| } |
| } |
| |
| - list_for_each_entry(obj, &dev_priv->mm.inactive_list, list) { |
| + list_for_each_entry(obj, &i915_gtt_vm->inactive_list, list) { |
| if (obj->base.dev != dev || |
| !atomic_read(&obj->base.refcount.refcount)) { |
| DRM_ERROR("freed inactive %p\n", obj); |
| diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c |
| index f1c9ab096b00..43b82350d8dc 100644 |
| --- a/drivers/gpu/drm/i915/i915_gem_evict.c |
| +++ b/drivers/gpu/drm/i915/i915_gem_evict.c |
| @@ -47,6 +47,7 @@ i915_gem_evict_something(struct drm_device *dev, int min_size, |
| bool mappable, bool nonblocking) |
| { |
| drm_i915_private_t *dev_priv = dev->dev_private; |
| + struct i915_address_space *vm = &dev_priv->gtt.base; |
| struct list_head eviction_list, unwind_list; |
| struct drm_i915_gem_object *obj; |
| int ret = 0; |
| @@ -78,15 +79,14 @@ i915_gem_evict_something(struct drm_device *dev, int min_size, |
| |
| INIT_LIST_HEAD(&unwind_list); |
| if (mappable) |
| - drm_mm_init_scan_with_range(&dev_priv->gtt.base.mm, min_size, |
| + drm_mm_init_scan_with_range(&vm->mm, min_size, |
| alignment, cache_level, 0, |
| dev_priv->gtt.mappable_end); |
| else |
| - drm_mm_init_scan(&dev_priv->gtt.base.mm, min_size, alignment, |
| - cache_level); |
| + drm_mm_init_scan(&vm->mm, min_size, alignment, cache_level); |
| |
| /* First see if there is a large enough contiguous idle region... */ |
| - list_for_each_entry(obj, &dev_priv->mm.inactive_list, mm_list) { |
| + list_for_each_entry(obj, &vm->inactive_list, mm_list) { |
| if (mark_free(obj, &unwind_list)) |
| goto found; |
| } |
| @@ -95,7 +95,7 @@ i915_gem_evict_something(struct drm_device *dev, int min_size, |
| goto none; |
| |
| /* Now merge in the soon-to-be-expired objects... */ |
| - list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) { |
| + list_for_each_entry(obj, &vm->active_list, mm_list) { |
| if (mark_free(obj, &unwind_list)) |
| goto found; |
| } |
| @@ -154,12 +154,13 @@ int |
| i915_gem_evict_everything(struct drm_device *dev) |
| { |
| drm_i915_private_t *dev_priv = dev->dev_private; |
| + struct i915_address_space *vm = &dev_priv->gtt.base; |
| struct drm_i915_gem_object *obj, *next; |
| bool lists_empty; |
| int ret; |
| |
| - lists_empty = (list_empty(&dev_priv->mm.inactive_list) && |
| - list_empty(&dev_priv->mm.active_list)); |
| + lists_empty = (list_empty(&vm->inactive_list) && |
| + list_empty(&vm->active_list)); |
| if (lists_empty) |
| return -ENOSPC; |
| |
| @@ -176,8 +177,7 @@ i915_gem_evict_everything(struct drm_device *dev) |
| i915_gem_retire_requests(dev); |
| |
| /* Having flushed everything, unbind() should never raise an error */ |
| - list_for_each_entry_safe(obj, next, |
| - &dev_priv->mm.inactive_list, mm_list) |
| + list_for_each_entry_safe(obj, next, &vm->inactive_list, mm_list) |
| if (obj->pin_count == 0) |
| WARN_ON(i915_gem_object_unbind(obj)); |
| |
| diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c |
| index ede8c41399d9..46a971560b01 100644 |
| --- a/drivers/gpu/drm/i915/i915_gem_stolen.c |
| +++ b/drivers/gpu/drm/i915/i915_gem_stolen.c |
| @@ -351,6 +351,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev, |
| u32 size) |
| { |
| struct drm_i915_private *dev_priv = dev->dev_private; |
| + struct i915_address_space *vm = &dev_priv->gtt.base; |
| struct drm_i915_gem_object *obj; |
| struct drm_mm_node *stolen; |
| int ret; |
| @@ -411,7 +412,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev, |
| obj->has_global_gtt_mapping = 1; |
| |
| list_add_tail(&obj->global_list, &dev_priv->mm.bound_list); |
| - list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list); |
| + list_add_tail(&obj->mm_list, &vm->inactive_list); |
| |
| return obj; |
| |
| diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c |
| index 58386cebb865..d970d84da65f 100644 |
| --- a/drivers/gpu/drm/i915/i915_gpu_error.c |
| +++ b/drivers/gpu/drm/i915/i915_gpu_error.c |
| @@ -622,6 +622,7 @@ static struct drm_i915_error_object * |
| i915_error_first_batchbuffer(struct drm_i915_private *dev_priv, |
| struct intel_ring_buffer *ring) |
| { |
| + struct i915_address_space *vm = &dev_priv->gtt.base; |
| struct drm_i915_gem_object *obj; |
| u32 seqno; |
| |
| @@ -641,7 +642,7 @@ i915_error_first_batchbuffer(struct drm_i915_private *dev_priv, |
| } |
| |
| seqno = ring->get_seqno(ring, false); |
| - list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) { |
| + list_for_each_entry(obj, &vm->active_list, mm_list) { |
| if (obj->ring != ring) |
| continue; |
| |
| @@ -773,11 +774,12 @@ static void i915_gem_record_rings(struct drm_device *dev, |
| static void i915_gem_capture_buffers(struct drm_i915_private *dev_priv, |
| struct drm_i915_error_state *error) |
| { |
| + struct i915_address_space *vm = &dev_priv->gtt.base; |
| struct drm_i915_gem_object *obj; |
| int i; |
| |
| i = 0; |
| - list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) |
| + list_for_each_entry(obj, &vm->active_list, mm_list) |
| i++; |
| error->active_bo_count = i; |
| list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) |
| @@ -797,7 +799,7 @@ static void i915_gem_capture_buffers(struct drm_i915_private *dev_priv, |
| error->active_bo_count = |
| capture_active_bo(error->active_bo, |
| error->active_bo_count, |
| - &dev_priv->mm.active_list); |
| + &vm->active_list); |
| |
| if (error->pinned_bo) |
| error->pinned_bo_count = |
| -- |
| 1.8.5.rc3 |
| |