| From 8db22e0b6d06a7666fcdc9434e82cb3b6a58c774 Mon Sep 17 00:00:00 2001 |
| From: David Herrmann <dh.herrmann@gmail.com> |
| Date: Wed, 24 Jul 2013 21:08:53 +0200 |
| Subject: drm/ttm: convert to unified vma offset manager |
| |
| Use the new vma-manager infrastructure. This doesn't change any |
| implementation details as the vma-offset-manager is nearly copied 1-to-1 |
| from TTM. |
| |
| The vm_lock is moved into the offset manager so we can drop it from TTM. |
| During lookup, we use the vma locking helpers to take a reference to the |
| found object. |
| In all other scenarios, locking stays the same as before. We always |
| guarantee that drm_vma_offset_remove() is called only during destruction. |
| Hence, helpers like drm_vma_node_offset_addr() are always safe as long as |
| the node has a valid offset. |
| |
| This also drops the addr_space_offset member as it is a copy of vm_start |
| in vma_node objects. Use the accessor functions instead. |
| |
| v4: |
| - remove vm_lock |
| - use drm_vma_offset_lock_lookup() to protect lookup (instead of vm_lock) |
| |
| Cc: Dave Airlie <airlied@redhat.com> |
| Cc: Ben Skeggs <bskeggs@redhat.com> |
| Cc: Maarten Lankhorst <maarten.lankhorst@canonical.com> |
| Cc: Martin Peres <martin.peres@labri.fr> |
| Cc: Alex Deucher <alexander.deucher@amd.com> |
| Cc: Thomas Hellstrom <thellstrom@vmware.com> |
| Signed-off-by: David Herrmann <dh.herrmann@gmail.com> |
| Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch> |
| Signed-off-by: Dave Airlie <airlied@gmail.com> |
| (cherry picked from commit 72525b3f333de54fa0c42ef87f27861e41478f1e) |
| Signed-off-by: Darren Hart <dvhart@linux.intel.com> |
| --- |
| drivers/gpu/drm/ast/ast_main.c | 2 |
| drivers/gpu/drm/cirrus/cirrus_main.c | 2 |
| drivers/gpu/drm/mgag200/mgag200_main.c | 2 |
| drivers/gpu/drm/nouveau/nouveau_display.c | 2 |
| drivers/gpu/drm/nouveau/nouveau_gem.c | 2 |
| drivers/gpu/drm/qxl/qxl_object.h | 2 |
| drivers/gpu/drm/qxl/qxl_release.c | 2 |
| drivers/gpu/drm/radeon/radeon_object.h | 5 - |
| drivers/gpu/drm/ttm/ttm_bo.c | 89 +++++------------------------- |
| drivers/gpu/drm/ttm/ttm_bo_util.c | 3 - |
| drivers/gpu/drm/ttm/ttm_bo_vm.c | 81 ++++++++++----------------- |
| drivers/gpu/drm/vmwgfx/vmwgfx_resource.c | 4 - |
| include/drm/ttm/ttm_bo_api.h | 15 +---- |
| include/drm/ttm/ttm_bo_driver.h | 10 +-- |
| 14 files changed, 66 insertions(+), 155 deletions(-) |
| |
| --- a/drivers/gpu/drm/ast/ast_main.c |
| +++ b/drivers/gpu/drm/ast/ast_main.c |
| @@ -491,7 +491,7 @@ void ast_gem_free_object(struct drm_gem_ |
| |
| static inline u64 ast_bo_mmap_offset(struct ast_bo *bo) |
| { |
| - return bo->bo.addr_space_offset; |
| + return drm_vma_node_offset_addr(&bo->bo.vma_node); |
| } |
| int |
| ast_dumb_mmap_offset(struct drm_file *file, |
| --- a/drivers/gpu/drm/cirrus/cirrus_main.c |
| +++ b/drivers/gpu/drm/cirrus/cirrus_main.c |
| @@ -294,7 +294,7 @@ void cirrus_gem_free_object(struct drm_g |
| |
| static inline u64 cirrus_bo_mmap_offset(struct cirrus_bo *bo) |
| { |
| - return bo->bo.addr_space_offset; |
| + return drm_vma_node_offset_addr(&bo->bo.vma_node); |
| } |
| |
| int |
| --- a/drivers/gpu/drm/mgag200/mgag200_main.c |
| +++ b/drivers/gpu/drm/mgag200/mgag200_main.c |
| @@ -330,7 +330,7 @@ void mgag200_gem_free_object(struct drm_ |
| |
| static inline u64 mgag200_bo_mmap_offset(struct mgag200_bo *bo) |
| { |
| - return bo->bo.addr_space_offset; |
| + return drm_vma_node_offset_addr(&bo->bo.vma_node); |
| } |
| |
| int |
| --- a/drivers/gpu/drm/nouveau/nouveau_display.c |
| +++ b/drivers/gpu/drm/nouveau/nouveau_display.c |
| @@ -705,7 +705,7 @@ nouveau_display_dumb_map_offset(struct d |
| gem = drm_gem_object_lookup(dev, file_priv, handle); |
| if (gem) { |
| struct nouveau_bo *bo = gem->driver_private; |
| - *poffset = bo->bo.addr_space_offset; |
| + *poffset = drm_vma_node_offset_addr(&bo->bo.vma_node); |
| drm_gem_object_unreference_unlocked(gem); |
| return 0; |
| } |
| --- a/drivers/gpu/drm/nouveau/nouveau_gem.c |
| +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c |
| @@ -193,7 +193,7 @@ nouveau_gem_info(struct drm_file *file_p |
| } |
| |
| rep->size = nvbo->bo.mem.num_pages << PAGE_SHIFT; |
| - rep->map_handle = nvbo->bo.addr_space_offset; |
| + rep->map_handle = drm_vma_node_offset_addr(&nvbo->bo.vma_node); |
| rep->tile_mode = nvbo->tile_mode; |
| rep->tile_flags = nvbo->tile_flags; |
| return 0; |
| --- a/drivers/gpu/drm/qxl/qxl_object.h |
| +++ b/drivers/gpu/drm/qxl/qxl_object.h |
| @@ -64,7 +64,7 @@ static inline bool qxl_bo_is_reserved(st |
| |
| static inline u64 qxl_bo_mmap_offset(struct qxl_bo *bo) |
| { |
| - return bo->tbo.addr_space_offset; |
| + return drm_vma_node_offset_addr(&bo->tbo.vma_node); |
| } |
| |
| static inline int qxl_bo_wait(struct qxl_bo *bo, u32 *mem_type, |
| --- a/drivers/gpu/drm/qxl/qxl_release.c |
| +++ b/drivers/gpu/drm/qxl/qxl_release.c |
| @@ -87,7 +87,7 @@ qxl_release_free(struct qxl_device *qdev |
| |
| for (i = 0 ; i < release->bo_count; ++i) { |
| QXL_INFO(qdev, "release %llx\n", |
| - release->bos[i]->tbo.addr_space_offset |
| + drm_vma_node_offset_addr(&release->bos[i]->tbo.vma_node) |
| - DRM_FILE_OFFSET); |
| qxl_fence_remove_release(&release->bos[i]->fence, release->id); |
| qxl_bo_unref(&release->bos[i]); |
| --- a/drivers/gpu/drm/radeon/radeon_object.h |
| +++ b/drivers/gpu/drm/radeon/radeon_object.h |
| @@ -98,13 +98,10 @@ static inline unsigned radeon_bo_gpu_pag |
| * @bo: radeon object for which we query the offset |
| * |
| * Returns mmap offset of the object. |
| - * |
| - * Note: addr_space_offset is constant after ttm bo init thus isn't protected |
| - * by any lock. |
| */ |
| static inline u64 radeon_bo_mmap_offset(struct radeon_bo *bo) |
| { |
| - return bo->tbo.addr_space_offset; |
| + return drm_vma_node_offset_addr(&bo->tbo.vma_node); |
| } |
| |
| extern int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type, |
| --- a/drivers/gpu/drm/ttm/ttm_bo.c |
| +++ b/drivers/gpu/drm/ttm/ttm_bo.c |
| @@ -777,13 +777,7 @@ static void ttm_bo_release(struct kref * |
| struct ttm_bo_device *bdev = bo->bdev; |
| struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type]; |
| |
| - write_lock(&bdev->vm_lock); |
| - if (likely(bo->vm_node != NULL)) { |
| - rb_erase(&bo->vm_rb, &bdev->addr_space_rb); |
| - drm_mm_put_block(bo->vm_node); |
| - bo->vm_node = NULL; |
| - } |
| - write_unlock(&bdev->vm_lock); |
| + drm_vma_offset_remove(&bdev->vma_manager, &bo->vma_node); |
| ttm_mem_io_lock(man, false); |
| ttm_mem_io_free_vm(bo); |
| ttm_mem_io_unlock(man); |
| @@ -1299,6 +1293,7 @@ int ttm_bo_init(struct ttm_bo_device *bd |
| bo->acc_size = acc_size; |
| bo->sg = sg; |
| atomic_inc(&bo->glob->bo_count); |
| + drm_vma_node_reset(&bo->vma_node); |
| |
| ret = ttm_bo_check_placement(bo, placement); |
| if (unlikely(ret != 0)) |
| @@ -1598,10 +1593,7 @@ int ttm_bo_device_release(struct ttm_bo_ |
| TTM_DEBUG("Swap list was clean\n"); |
| spin_unlock(&glob->lru_lock); |
| |
| - BUG_ON(!drm_mm_clean(&bdev->addr_space_mm)); |
| - write_lock(&bdev->vm_lock); |
| - drm_mm_takedown(&bdev->addr_space_mm); |
| - write_unlock(&bdev->vm_lock); |
| + drm_vma_offset_manager_destroy(&bdev->vma_manager); |
| |
| return ret; |
| } |
| @@ -1615,7 +1607,6 @@ int ttm_bo_device_init(struct ttm_bo_dev |
| { |
| int ret = -EINVAL; |
| |
| - rwlock_init(&bdev->vm_lock); |
| bdev->driver = driver; |
| |
| memset(bdev->man, 0, sizeof(bdev->man)); |
| @@ -1628,9 +1619,8 @@ int ttm_bo_device_init(struct ttm_bo_dev |
| if (unlikely(ret != 0)) |
| goto out_no_sys; |
| |
| - bdev->addr_space_rb = RB_ROOT; |
| - drm_mm_init(&bdev->addr_space_mm, file_page_offset, 0x10000000); |
| - |
| + drm_vma_offset_manager_init(&bdev->vma_manager, file_page_offset, |
| + 0x10000000); |
| INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue); |
| INIT_LIST_HEAD(&bdev->ddestroy); |
| bdev->dev_mapping = NULL; |
| @@ -1672,12 +1662,17 @@ bool ttm_mem_reg_is_pci(struct ttm_bo_de |
| void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo) |
| { |
| struct ttm_bo_device *bdev = bo->bdev; |
| - loff_t offset = (loff_t) bo->addr_space_offset; |
| - loff_t holelen = ((loff_t) bo->mem.num_pages) << PAGE_SHIFT; |
| + loff_t offset, holelen; |
| |
| if (!bdev->dev_mapping) |
| return; |
| - unmap_mapping_range(bdev->dev_mapping, offset, holelen, 1); |
| + |
| + if (drm_vma_node_has_offset(&bo->vma_node)) { |
| + offset = (loff_t) drm_vma_node_offset_addr(&bo->vma_node); |
| + holelen = ((loff_t) bo->mem.num_pages) << PAGE_SHIFT; |
| + |
| + unmap_mapping_range(bdev->dev_mapping, offset, holelen, 1); |
| + } |
| ttm_mem_io_free_vm(bo); |
| } |
| |
| @@ -1694,31 +1689,6 @@ void ttm_bo_unmap_virtual(struct ttm_buf |
| |
| EXPORT_SYMBOL(ttm_bo_unmap_virtual); |
| |
| -static void ttm_bo_vm_insert_rb(struct ttm_buffer_object *bo) |
| -{ |
| - struct ttm_bo_device *bdev = bo->bdev; |
| - struct rb_node **cur = &bdev->addr_space_rb.rb_node; |
| - struct rb_node *parent = NULL; |
| - struct ttm_buffer_object *cur_bo; |
| - unsigned long offset = bo->vm_node->start; |
| - unsigned long cur_offset; |
| - |
| - while (*cur) { |
| - parent = *cur; |
| - cur_bo = rb_entry(parent, struct ttm_buffer_object, vm_rb); |
| - cur_offset = cur_bo->vm_node->start; |
| - if (offset < cur_offset) |
| - cur = &parent->rb_left; |
| - else if (offset > cur_offset) |
| - cur = &parent->rb_right; |
| - else |
| - BUG(); |
| - } |
| - |
| - rb_link_node(&bo->vm_rb, parent, cur); |
| - rb_insert_color(&bo->vm_rb, &bdev->addr_space_rb); |
| -} |
| - |
| /** |
| * ttm_bo_setup_vm: |
| * |
| @@ -1733,38 +1703,9 @@ static void ttm_bo_vm_insert_rb(struct t |
| static int ttm_bo_setup_vm(struct ttm_buffer_object *bo) |
| { |
| struct ttm_bo_device *bdev = bo->bdev; |
| - int ret; |
| - |
| -retry_pre_get: |
| - ret = drm_mm_pre_get(&bdev->addr_space_mm); |
| - if (unlikely(ret != 0)) |
| - return ret; |
| - |
| - write_lock(&bdev->vm_lock); |
| - bo->vm_node = drm_mm_search_free(&bdev->addr_space_mm, |
| - bo->mem.num_pages, 0, 0); |
| - |
| - if (unlikely(bo->vm_node == NULL)) { |
| - ret = -ENOMEM; |
| - goto out_unlock; |
| - } |
| |
| - bo->vm_node = drm_mm_get_block_atomic(bo->vm_node, |
| - bo->mem.num_pages, 0); |
| - |
| - if (unlikely(bo->vm_node == NULL)) { |
| - write_unlock(&bdev->vm_lock); |
| - goto retry_pre_get; |
| - } |
| - |
| - ttm_bo_vm_insert_rb(bo); |
| - write_unlock(&bdev->vm_lock); |
| - bo->addr_space_offset = ((uint64_t) bo->vm_node->start) << PAGE_SHIFT; |
| - |
| - return 0; |
| -out_unlock: |
| - write_unlock(&bdev->vm_lock); |
| - return ret; |
| + return drm_vma_offset_add(&bdev->vma_manager, &bo->vma_node, |
| + bo->mem.num_pages); |
| } |
| |
| int ttm_bo_wait(struct ttm_buffer_object *bo, |
| --- a/drivers/gpu/drm/ttm/ttm_bo_util.c |
| +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c |
| @@ -30,6 +30,7 @@ |
| |
| #include <drm/ttm/ttm_bo_driver.h> |
| #include <drm/ttm/ttm_placement.h> |
| +#include <drm/drm_vma_manager.h> |
| #include <linux/io.h> |
| #include <linux/highmem.h> |
| #include <linux/wait.h> |
| @@ -458,7 +459,7 @@ static int ttm_buffer_object_transfer(st |
| INIT_LIST_HEAD(&fbo->lru); |
| INIT_LIST_HEAD(&fbo->swap); |
| INIT_LIST_HEAD(&fbo->io_reserve_lru); |
| - fbo->vm_node = NULL; |
| + drm_vma_node_reset(&fbo->vma_node); |
| atomic_set(&fbo->cpu_writers, 0); |
| |
| spin_lock(&bdev->fence_lock); |
| --- a/drivers/gpu/drm/ttm/ttm_bo_vm.c |
| +++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c |
| @@ -33,6 +33,7 @@ |
| #include <ttm/ttm_module.h> |
| #include <ttm/ttm_bo_driver.h> |
| #include <ttm/ttm_placement.h> |
| +#include <drm/drm_vma_manager.h> |
| #include <linux/mm.h> |
| #include <linux/rbtree.h> |
| #include <linux/module.h> |
| @@ -40,37 +41,6 @@ |
| |
| #define TTM_BO_VM_NUM_PREFAULT 16 |
| |
| -static struct ttm_buffer_object *ttm_bo_vm_lookup_rb(struct ttm_bo_device *bdev, |
| - unsigned long page_start, |
| - unsigned long num_pages) |
| -{ |
| - struct rb_node *cur = bdev->addr_space_rb.rb_node; |
| - unsigned long cur_offset; |
| - struct ttm_buffer_object *bo; |
| - struct ttm_buffer_object *best_bo = NULL; |
| - |
| - while (likely(cur != NULL)) { |
| - bo = rb_entry(cur, struct ttm_buffer_object, vm_rb); |
| - cur_offset = bo->vm_node->start; |
| - if (page_start >= cur_offset) { |
| - cur = cur->rb_right; |
| - best_bo = bo; |
| - if (page_start == cur_offset) |
| - break; |
| - } else |
| - cur = cur->rb_left; |
| - } |
| - |
| - if (unlikely(best_bo == NULL)) |
| - return NULL; |
| - |
| - if (unlikely((best_bo->vm_node->start + best_bo->num_pages) < |
| - (page_start + num_pages))) |
| - return NULL; |
| - |
| - return best_bo; |
| -} |
| - |
| static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) |
| { |
| struct ttm_buffer_object *bo = (struct ttm_buffer_object *) |
| @@ -146,9 +116,9 @@ static int ttm_bo_vm_fault(struct vm_are |
| } |
| |
| page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) + |
| - bo->vm_node->start - vma->vm_pgoff; |
| + drm_vma_node_start(&bo->vma_node) - vma->vm_pgoff; |
| page_last = vma_pages(vma) + |
| - bo->vm_node->start - vma->vm_pgoff; |
| + drm_vma_node_start(&bo->vma_node) - vma->vm_pgoff; |
| |
| if (unlikely(page_offset >= bo->num_pages)) { |
| retval = VM_FAULT_SIGBUS; |
| @@ -249,6 +219,30 @@ static const struct vm_operations_struct |
| .close = ttm_bo_vm_close |
| }; |
| |
| +static struct ttm_buffer_object *ttm_bo_vm_lookup(struct ttm_bo_device *bdev, |
| + unsigned long offset, |
| + unsigned long pages) |
| +{ |
| + struct drm_vma_offset_node *node; |
| + struct ttm_buffer_object *bo = NULL; |
| + |
| + drm_vma_offset_lock_lookup(&bdev->vma_manager); |
| + |
| + node = drm_vma_offset_lookup_locked(&bdev->vma_manager, offset, pages); |
| + if (likely(node)) { |
| + bo = container_of(node, struct ttm_buffer_object, vma_node); |
| + if (!kref_get_unless_zero(&bo->kref)) |
| + bo = NULL; |
| + } |
| + |
| + drm_vma_offset_unlock_lookup(&bdev->vma_manager); |
| + |
| + if (!bo) |
| + pr_err("Could not find buffer object to map\n"); |
| + |
| + return bo; |
| +} |
| + |
| int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma, |
| struct ttm_bo_device *bdev) |
| { |
| @@ -256,17 +250,9 @@ int ttm_bo_mmap(struct file *filp, struc |
| struct ttm_buffer_object *bo; |
| int ret; |
| |
| - read_lock(&bdev->vm_lock); |
| - bo = ttm_bo_vm_lookup_rb(bdev, vma->vm_pgoff, |
| - vma_pages(vma)); |
| - if (likely(bo != NULL) && !kref_get_unless_zero(&bo->kref)) |
| - bo = NULL; |
| - read_unlock(&bdev->vm_lock); |
| - |
| - if (unlikely(bo == NULL)) { |
| - pr_err("Could not find buffer object to map\n"); |
| + bo = ttm_bo_vm_lookup(bdev, vma->vm_pgoff, vma_pages(vma)); |
| + if (unlikely(!bo)) |
| return -EINVAL; |
| - } |
| |
| driver = bo->bdev->driver; |
| if (unlikely(!driver->verify_access)) { |
| @@ -324,12 +310,7 @@ ssize_t ttm_bo_io(struct ttm_bo_device * |
| bool no_wait = false; |
| bool dummy; |
| |
| - read_lock(&bdev->vm_lock); |
| - bo = ttm_bo_vm_lookup_rb(bdev, dev_offset, 1); |
| - if (likely(bo != NULL)) |
| - ttm_bo_reference(bo); |
| - read_unlock(&bdev->vm_lock); |
| - |
| + bo = ttm_bo_vm_lookup(bdev, dev_offset, 1); |
| if (unlikely(bo == NULL)) |
| return -EFAULT; |
| |
| @@ -343,7 +324,7 @@ ssize_t ttm_bo_io(struct ttm_bo_device * |
| if (unlikely(ret != 0)) |
| goto out_unref; |
| |
| - kmap_offset = dev_offset - bo->vm_node->start; |
| + kmap_offset = dev_offset - drm_vma_node_start(&bo->vma_node); |
| if (unlikely(kmap_offset >= bo->num_pages)) { |
| ret = -EFBIG; |
| goto out_unref; |
| --- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c |
| +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c |
| @@ -500,7 +500,7 @@ int vmw_dmabuf_alloc_ioctl(struct drm_de |
| goto out_no_dmabuf; |
| |
| rep->handle = handle; |
| - rep->map_handle = dma_buf->base.addr_space_offset; |
| + rep->map_handle = drm_vma_node_offset_addr(&dma_buf->base.vma_node); |
| rep->cur_gmr_id = handle; |
| rep->cur_gmr_offset = 0; |
| |
| @@ -834,7 +834,7 @@ int vmw_dumb_map_offset(struct drm_file |
| if (ret != 0) |
| return -EINVAL; |
| |
| - *offset = out_buf->base.addr_space_offset; |
| + *offset = drm_vma_node_offset_addr(&out_buf->base.vma_node); |
| vmw_dmabuf_unreference(&out_buf); |
| return 0; |
| } |
| --- a/include/drm/ttm/ttm_bo_api.h |
| +++ b/include/drm/ttm/ttm_bo_api.h |
| @@ -32,12 +32,12 @@ |
| #define _TTM_BO_API_H_ |
| |
| #include <drm/drm_hashtab.h> |
| +#include <drm/drm_vma_manager.h> |
| #include <linux/kref.h> |
| #include <linux/list.h> |
| #include <linux/wait.h> |
| #include <linux/mutex.h> |
| #include <linux/mm.h> |
| -#include <linux/rbtree.h> |
| #include <linux/bitmap.h> |
| |
| struct ttm_bo_device; |
| @@ -144,7 +144,6 @@ struct ttm_tt; |
| * @type: The bo type. |
| * @destroy: Destruction function. If NULL, kfree is used. |
| * @num_pages: Actual number of pages. |
| - * @addr_space_offset: Address space offset. |
| * @acc_size: Accounted size for this object. |
| * @kref: Reference count of this buffer object. When this refcount reaches |
| * zero, the object is put on the delayed delete list. |
| @@ -172,8 +171,7 @@ struct ttm_tt; |
| * @reserved: Deadlock-free lock used for synchronization state transitions. |
| * @sync_obj: Pointer to a synchronization object. |
| * @priv_flags: Flags describing buffer object internal state. |
| - * @vm_rb: Rb node for the vm rb tree. |
| - * @vm_node: Address space manager node. |
| + * @vma_node: Address space manager node. |
| * @offset: The current GPU offset, which can have different meanings |
| * depending on the memory type. For SYSTEM type memory, it should be 0. |
| * @cur_placement: Hint of current placement. |
| @@ -200,7 +198,6 @@ struct ttm_buffer_object { |
| enum ttm_bo_type type; |
| void (*destroy) (struct ttm_buffer_object *); |
| unsigned long num_pages; |
| - uint64_t addr_space_offset; |
| size_t acc_size; |
| |
| /** |
| @@ -254,13 +251,7 @@ struct ttm_buffer_object { |
| void *sync_obj; |
| unsigned long priv_flags; |
| |
| - /** |
| - * Members protected by the bdev::vm_lock |
| - */ |
| - |
| - struct rb_node vm_rb; |
| - struct drm_mm_node *vm_node; |
| - |
| + struct drm_vma_offset_node vma_node; |
| |
| /** |
| * Special members that are protected by the reserve lock |
| --- a/include/drm/ttm/ttm_bo_driver.h |
| +++ b/include/drm/ttm/ttm_bo_driver.h |
| @@ -35,6 +35,7 @@ |
| #include <ttm/ttm_module.h> |
| #include <drm/drm_mm.h> |
| #include <drm/drm_global.h> |
| +#include <drm/drm_vma_manager.h> |
| #include <linux/workqueue.h> |
| #include <linux/fs.h> |
| #include <linux/spinlock.h> |
| @@ -517,7 +518,7 @@ struct ttm_bo_global { |
| * @man: An array of mem_type_managers. |
| * @fence_lock: Protects the synchronizing members on *all* bos belonging |
| * to this device. |
| - * @addr_space_mm: Range manager for the device address space. |
| + * @vma_manager: Address space manager |
| * lru_lock: Spinlock that protects the buffer+device lru lists and |
| * ddestroy lists. |
| * @val_seq: Current validation sequence. |
| @@ -535,14 +536,13 @@ struct ttm_bo_device { |
| struct list_head device_list; |
| struct ttm_bo_global *glob; |
| struct ttm_bo_driver *driver; |
| - rwlock_t vm_lock; |
| struct ttm_mem_type_manager man[TTM_NUM_MEM_TYPES]; |
| spinlock_t fence_lock; |
| + |
| /* |
| - * Protected by the vm lock. |
| + * Protected by internal locks. |
| */ |
| - struct rb_root addr_space_rb; |
| - struct drm_mm addr_space_mm; |
| + struct drm_vma_offset_manager vma_manager; |
| |
| /* |
| * Protected by the global:lru lock. |