| From: Minchan Kim <minchan@kernel.org> |
| Subject: zsmalloc: introduce obj_allocated |
| |
| The usage pattern for obj_to_head is to check whether the zpage is |
| allocated or not. Thus, introduce obj_allocated. |
| |
| Link: https://lkml.kernel.org/r/20211115185909.3949505-5-minchan@kernel.org |
| Signed-off-by: Minchan Kim <minchan@kernel.org> |
| Acked-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> |
| Tested-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> |
| Cc: Mike Galbraith <umgwanakikbuti@gmail.com> |
| Cc: Peter Zijlstra (Intel) <peterz@infradead.org> |
| Cc: Sergey Senozhatsky <senozhatsky@chromium.org> |
| Cc: Thomas Gleixner <tglx@linutronix.de> |
| Signed-off-by: Andrew Morton <akpm@linux-foundation.org> |
| --- |
| |
| mm/zsmalloc.c | 33 ++++++++++++++++----------------- |
| 1 file changed, 16 insertions(+), 17 deletions(-) |
| |
| --- a/mm/zsmalloc.c~zsmalloc-introduce-obj_allocated |
| +++ a/mm/zsmalloc.c |
| @@ -877,13 +877,21 @@ static unsigned long handle_to_obj(unsig |
| return *(unsigned long *)handle; |
| } |
| |
| -static unsigned long obj_to_head(struct page *page, void *obj) |
| +static bool obj_allocated(struct page *page, void *obj, unsigned long *phandle) |
| { |
| + unsigned long handle; |
| + |
| if (unlikely(PageHugeObject(page))) { |
| VM_BUG_ON_PAGE(!is_first_page(page), page); |
| - return page->index; |
| + handle = page->index; |
| } else |
| - return *(unsigned long *)obj; |
| + handle = *(unsigned long *)obj; |
| + |
| + if (!(handle & OBJ_ALLOCATED_TAG)) |
| + return false; |
| + |
| + *phandle = handle & ~OBJ_ALLOCATED_TAG; |
| + return true; |
| } |
| |
| static inline int testpin_tag(unsigned long handle) |
| @@ -1606,7 +1614,6 @@ static void zs_object_copy(struct size_c |
| static unsigned long find_alloced_obj(struct size_class *class, |
| struct page *page, int *obj_idx) |
| { |
| - unsigned long head; |
| int offset = 0; |
| int index = *obj_idx; |
| unsigned long handle = 0; |
| @@ -1616,9 +1623,7 @@ static unsigned long find_alloced_obj(st |
| offset += class->size * index; |
| |
| while (offset < PAGE_SIZE) { |
| - head = obj_to_head(page, addr + offset); |
| - if (head & OBJ_ALLOCATED_TAG) { |
| - handle = head & ~OBJ_ALLOCATED_TAG; |
| + if (obj_allocated(page, addr + offset, &handle)) { |
| if (trypin_tag(handle)) |
| break; |
| handle = 0; |
| @@ -1928,7 +1933,7 @@ static int zs_page_migrate(struct addres |
| struct page *dummy; |
| void *s_addr, *d_addr, *addr; |
| int offset, pos; |
| - unsigned long handle, head; |
| + unsigned long handle; |
| unsigned long old_obj, new_obj; |
| unsigned int obj_idx; |
| int ret = -EAGAIN; |
| @@ -1964,9 +1969,7 @@ static int zs_page_migrate(struct addres |
| pos = offset; |
| s_addr = kmap_atomic(page); |
| while (pos < PAGE_SIZE) { |
| - head = obj_to_head(page, s_addr + pos); |
| - if (head & OBJ_ALLOCATED_TAG) { |
| - handle = head & ~OBJ_ALLOCATED_TAG; |
| + if (obj_allocated(page, s_addr + pos, &handle)) { |
| if (!trypin_tag(handle)) |
| goto unpin_objects; |
| } |
| @@ -1982,9 +1985,7 @@ static int zs_page_migrate(struct addres |
| |
| for (addr = s_addr + offset; addr < s_addr + pos; |
| addr += class->size) { |
| - head = obj_to_head(page, addr); |
| - if (head & OBJ_ALLOCATED_TAG) { |
| - handle = head & ~OBJ_ALLOCATED_TAG; |
| + if (obj_allocated(page, addr, &handle)) { |
| BUG_ON(!testpin_tag(handle)); |
| |
| old_obj = handle_to_obj(handle); |
| @@ -2029,9 +2030,7 @@ static int zs_page_migrate(struct addres |
| unpin_objects: |
| for (addr = s_addr + offset; addr < s_addr + pos; |
| addr += class->size) { |
| - head = obj_to_head(page, addr); |
| - if (head & OBJ_ALLOCATED_TAG) { |
| - handle = head & ~OBJ_ALLOCATED_TAG; |
| + if (obj_allocated(page, addr, &handle)) { |
| BUG_ON(!testpin_tag(handle)); |
| unpin_tag(handle); |
| } |
| _ |