| From 7d2dc16ea91a04db0abd8d2e57f3f09e02e00658 Mon Sep 17 00:00:00 2001 |
| From: Omer Shpigelman <oshpigelman@habana.ai> |
| Date: Thu, 14 Nov 2019 18:23:57 +0000 |
| Subject: [PATCH] habanalabs: skip VA block list update in reset flow |
| |
| commit 71c5e55e7c077fa17c42fbda91a8d14322825c44 upstream. |
| |
| Reduce context close time by skipping the VA block free list update in |
| order to avoid hard reset with open contexts. |
| Reset with open contexts can potentially lead to a kernel crash as the |
| generic pool of the MMU hops is destroyed while it is not empty because |
| some unmap operations are not done. |
| The commit affect mainly when running on simulator. |
| |
| Signed-off-by: Omer Shpigelman <oshpigelman@habana.ai> |
| Reviewed-by: Oded Gabbay <oded.gabbay@gmail.com> |
| Signed-off-by: Oded Gabbay <oded.gabbay@gmail.com> |
| Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com> |
| |
| diff --git a/drivers/misc/habanalabs/memory.c b/drivers/misc/habanalabs/memory.c |
| index 924a438ba973..77ad0bf15c15 100644 |
| --- a/drivers/misc/habanalabs/memory.c |
| +++ b/drivers/misc/habanalabs/memory.c |
| @@ -965,17 +965,19 @@ static int map_device_va(struct hl_ctx *ctx, struct hl_mem_in *args, |
| * |
| * @ctx : current context |
| * @vaddr : device virtual address to unmap |
| + * @ctx_free : true if in context free flow, false otherwise. |
| * |
| * This function does the following: |
| * - Unmap the physical pages related to the given virtual address |
| * - return the device virtual block to the virtual block list |
| */ |
| -static int unmap_device_va(struct hl_ctx *ctx, u64 vaddr) |
| +static int unmap_device_va(struct hl_ctx *ctx, u64 vaddr, bool ctx_free) |
| { |
| struct hl_device *hdev = ctx->hdev; |
| struct hl_vm_phys_pg_pack *phys_pg_pack = NULL; |
| struct hl_vm_hash_node *hnode = NULL; |
| struct hl_userptr *userptr = NULL; |
| + struct hl_va_range *va_range; |
| enum vm_type_t *vm_type; |
| u64 next_vaddr, i; |
| u32 page_size; |
| @@ -1003,6 +1005,7 @@ static int unmap_device_va(struct hl_ctx *ctx, u64 vaddr) |
| |
| if (*vm_type == VM_TYPE_USERPTR) { |
| is_userptr = true; |
| + va_range = &ctx->host_va_range; |
| userptr = hnode->ptr; |
| rc = init_phys_pg_pack_from_userptr(ctx, userptr, |
| &phys_pg_pack); |
| @@ -1014,6 +1017,7 @@ static int unmap_device_va(struct hl_ctx *ctx, u64 vaddr) |
| } |
| } else if (*vm_type == VM_TYPE_PHYS_PACK) { |
| is_userptr = false; |
| + va_range = &ctx->dram_va_range; |
| phys_pg_pack = hnode->ptr; |
| } else { |
| dev_warn(hdev->dev, |
| @@ -1052,12 +1056,18 @@ static int unmap_device_va(struct hl_ctx *ctx, u64 vaddr) |
| |
| mutex_unlock(&ctx->mmu_lock); |
| |
| - if (add_va_block(hdev, |
| - is_userptr ? &ctx->host_va_range : &ctx->dram_va_range, |
| - vaddr, |
| - vaddr + phys_pg_pack->total_size - 1)) |
| - dev_warn(hdev->dev, "add va block failed for vaddr: 0x%llx\n", |
| - vaddr); |
| + /* |
| + * No point in maintaining the free VA block list if the context is |
| + * closing as the list will be freed anyway |
| + */ |
| + if (!ctx_free) { |
| + rc = add_va_block(hdev, va_range, vaddr, |
| + vaddr + phys_pg_pack->total_size - 1); |
| + if (rc) |
| + dev_warn(hdev->dev, |
| + "add va block failed for vaddr: 0x%llx\n", |
| + vaddr); |
| + } |
| |
| atomic_dec(&phys_pg_pack->mapping_cnt); |
| kfree(hnode); |
| @@ -1189,8 +1199,8 @@ int hl_mem_ioctl(struct hl_fpriv *hpriv, void *data) |
| break; |
| |
| case HL_MEM_OP_UNMAP: |
| - rc = unmap_device_va(ctx, |
| - args->in.unmap.device_virt_addr); |
| + rc = unmap_device_va(ctx, args->in.unmap.device_virt_addr, |
| + false); |
| break; |
| |
| default: |
| @@ -1620,7 +1630,7 @@ void hl_vm_ctx_fini(struct hl_ctx *ctx) |
| dev_dbg(hdev->dev, |
| "hl_mem_hash_node of vaddr 0x%llx of asid %d is still alive\n", |
| hnode->vaddr, ctx->asid); |
| - unmap_device_va(ctx, hnode->vaddr); |
| + unmap_device_va(ctx, hnode->vaddr, true); |
| } |
| |
| spin_lock(&vm->idr_lock); |
| -- |
| 2.7.4 |
| |