|  | // SPDX-License-Identifier: GPL-2.0 | 
|  | #include <stdlib.h> | 
|  | #include <string.h> | 
|  | #include <malloc.h> | 
|  | #include <pthread.h> | 
|  | #include <unistd.h> | 
|  | #include <assert.h> | 
|  |  | 
|  | #include <linux/gfp.h> | 
|  | #include <linux/poison.h> | 
|  | #include <linux/slab.h> | 
|  | #include <linux/radix-tree.h> | 
|  | #include <urcu/uatomic.h> | 
|  |  | 
|  | int nr_allocated; | 
|  | int preempt_count; | 
|  | int test_verbose; | 
|  |  | 
|  | void kmem_cache_set_callback(struct kmem_cache *cachep, void (*callback)(void *)) | 
|  | { | 
|  | cachep->callback = callback; | 
|  | } | 
|  |  | 
|  | void kmem_cache_set_private(struct kmem_cache *cachep, void *private) | 
|  | { | 
|  | cachep->private = private; | 
|  | } | 
|  |  | 
|  | void kmem_cache_set_non_kernel(struct kmem_cache *cachep, unsigned int val) | 
|  | { | 
|  | cachep->non_kernel = val; | 
|  | } | 
|  |  | 
|  | unsigned long kmem_cache_get_alloc(struct kmem_cache *cachep) | 
|  | { | 
|  | return cachep->size * cachep->nr_allocated; | 
|  | } | 
|  |  | 
|  | unsigned long kmem_cache_nr_allocated(struct kmem_cache *cachep) | 
|  | { | 
|  | return cachep->nr_allocated; | 
|  | } | 
|  |  | 
|  | unsigned long kmem_cache_nr_tallocated(struct kmem_cache *cachep) | 
|  | { | 
|  | return cachep->nr_tallocated; | 
|  | } | 
|  |  | 
|  | void kmem_cache_zero_nr_tallocated(struct kmem_cache *cachep) | 
|  | { | 
|  | cachep->nr_tallocated = 0; | 
|  | } | 
|  |  | 
|  | void *kmem_cache_alloc_lru(struct kmem_cache *cachep, struct list_lru *lru, | 
|  | int gfp) | 
|  | { | 
|  | void *p; | 
|  |  | 
|  | if (cachep->exec_callback) { | 
|  | if (cachep->callback) | 
|  | cachep->callback(cachep->private); | 
|  | cachep->exec_callback = false; | 
|  | } | 
|  |  | 
|  | if (!(gfp & __GFP_DIRECT_RECLAIM)) { | 
|  | if (!cachep->non_kernel) { | 
|  | if (cachep->callback) | 
|  | cachep->exec_callback = true; | 
|  | return NULL; | 
|  | } | 
|  |  | 
|  | cachep->non_kernel--; | 
|  | } | 
|  |  | 
|  | pthread_mutex_lock(&cachep->lock); | 
|  | if (cachep->nr_objs) { | 
|  | struct radix_tree_node *node = cachep->objs; | 
|  | cachep->nr_objs--; | 
|  | cachep->objs = node->parent; | 
|  | pthread_mutex_unlock(&cachep->lock); | 
|  | node->parent = NULL; | 
|  | p = node; | 
|  | } else { | 
|  | pthread_mutex_unlock(&cachep->lock); | 
|  | if (cachep->align) { | 
|  | if (posix_memalign(&p, cachep->align, cachep->size) < 0) | 
|  | return NULL; | 
|  | } else { | 
|  | p = malloc(cachep->size); | 
|  | } | 
|  |  | 
|  | if (cachep->ctor) | 
|  | cachep->ctor(p); | 
|  | else if (gfp & __GFP_ZERO) | 
|  | memset(p, 0, cachep->size); | 
|  | } | 
|  |  | 
|  | uatomic_inc(&cachep->nr_allocated); | 
|  | uatomic_inc(&nr_allocated); | 
|  | uatomic_inc(&cachep->nr_tallocated); | 
|  | if (kmalloc_verbose) | 
|  | printf("Allocating %p from slab\n", p); | 
|  | return p; | 
|  | } | 
|  |  | 
|  | void __kmem_cache_free_locked(struct kmem_cache *cachep, void *objp) | 
|  | { | 
|  | assert(objp); | 
|  | if (cachep->nr_objs > 10 || cachep->align) { | 
|  | memset(objp, POISON_FREE, cachep->size); | 
|  | free(objp); | 
|  | } else { | 
|  | struct radix_tree_node *node = objp; | 
|  | cachep->nr_objs++; | 
|  | node->parent = cachep->objs; | 
|  | cachep->objs = node; | 
|  | } | 
|  | } | 
|  |  | 
|  | void kmem_cache_free_locked(struct kmem_cache *cachep, void *objp) | 
|  | { | 
|  | uatomic_dec(&nr_allocated); | 
|  | uatomic_dec(&cachep->nr_allocated); | 
|  | if (kmalloc_verbose) | 
|  | printf("Freeing %p to slab\n", objp); | 
|  | __kmem_cache_free_locked(cachep, objp); | 
|  | } | 
|  |  | 
|  | void kmem_cache_free(struct kmem_cache *cachep, void *objp) | 
|  | { | 
|  | pthread_mutex_lock(&cachep->lock); | 
|  | kmem_cache_free_locked(cachep, objp); | 
|  | pthread_mutex_unlock(&cachep->lock); | 
|  | } | 
|  |  | 
|  | void kmem_cache_free_bulk(struct kmem_cache *cachep, size_t size, void **list) | 
|  | { | 
|  | if (kmalloc_verbose) | 
|  | pr_debug("Bulk free %p[0-%zu]\n", list, size - 1); | 
|  |  | 
|  | if (cachep->exec_callback) { | 
|  | if (cachep->callback) | 
|  | cachep->callback(cachep->private); | 
|  | cachep->exec_callback = false; | 
|  | } | 
|  |  | 
|  | pthread_mutex_lock(&cachep->lock); | 
|  | for (int i = 0; i < size; i++) | 
|  | kmem_cache_free_locked(cachep, list[i]); | 
|  | pthread_mutex_unlock(&cachep->lock); | 
|  | } | 
|  |  | 
|  | void kmem_cache_shrink(struct kmem_cache *cachep) | 
|  | { | 
|  | } | 
|  |  | 
|  | int kmem_cache_alloc_bulk(struct kmem_cache *cachep, gfp_t gfp, size_t size, | 
|  | void **p) | 
|  | { | 
|  | size_t i; | 
|  |  | 
|  | if (kmalloc_verbose) | 
|  | pr_debug("Bulk alloc %zu\n", size); | 
|  |  | 
|  | pthread_mutex_lock(&cachep->lock); | 
|  | if (cachep->nr_objs >= size) { | 
|  | struct radix_tree_node *node; | 
|  |  | 
|  | for (i = 0; i < size; i++) { | 
|  | if (!(gfp & __GFP_DIRECT_RECLAIM)) { | 
|  | if (!cachep->non_kernel) | 
|  | break; | 
|  | cachep->non_kernel--; | 
|  | } | 
|  |  | 
|  | node = cachep->objs; | 
|  | cachep->nr_objs--; | 
|  | cachep->objs = node->parent; | 
|  | p[i] = node; | 
|  | node->parent = NULL; | 
|  | } | 
|  | pthread_mutex_unlock(&cachep->lock); | 
|  | } else { | 
|  | pthread_mutex_unlock(&cachep->lock); | 
|  | for (i = 0; i < size; i++) { | 
|  | if (!(gfp & __GFP_DIRECT_RECLAIM)) { | 
|  | if (!cachep->non_kernel) | 
|  | break; | 
|  | cachep->non_kernel--; | 
|  | } | 
|  |  | 
|  | if (cachep->align) { | 
|  | if (posix_memalign(&p[i], cachep->align, | 
|  | cachep->size) < 0) | 
|  | break; | 
|  | } else { | 
|  | p[i] = malloc(cachep->size); | 
|  | if (!p[i]) | 
|  | break; | 
|  | } | 
|  | if (cachep->ctor) | 
|  | cachep->ctor(p[i]); | 
|  | else if (gfp & __GFP_ZERO) | 
|  | memset(p[i], 0, cachep->size); | 
|  | } | 
|  | } | 
|  |  | 
|  | if (i < size) { | 
|  | size = i; | 
|  | pthread_mutex_lock(&cachep->lock); | 
|  | for (i = 0; i < size; i++) | 
|  | __kmem_cache_free_locked(cachep, p[i]); | 
|  | pthread_mutex_unlock(&cachep->lock); | 
|  | if (cachep->callback) | 
|  | cachep->exec_callback = true; | 
|  | return 0; | 
|  | } | 
|  |  | 
|  | for (i = 0; i < size; i++) { | 
|  | uatomic_inc(&nr_allocated); | 
|  | uatomic_inc(&cachep->nr_allocated); | 
|  | uatomic_inc(&cachep->nr_tallocated); | 
|  | if (kmalloc_verbose) | 
|  | printf("Allocating %p from slab\n", p[i]); | 
|  | } | 
|  |  | 
|  | return size; | 
|  | } | 
|  |  | 
|  | struct kmem_cache * | 
|  | __kmem_cache_create_args(const char *name, unsigned int size, | 
|  | struct kmem_cache_args *args, | 
|  | unsigned int flags) | 
|  | { | 
|  | struct kmem_cache *ret = malloc(sizeof(*ret)); | 
|  |  | 
|  | pthread_mutex_init(&ret->lock, NULL); | 
|  | ret->size = size; | 
|  | ret->align = args->align; | 
|  | ret->sheaf_capacity = args->sheaf_capacity; | 
|  | ret->nr_objs = 0; | 
|  | ret->nr_allocated = 0; | 
|  | ret->nr_tallocated = 0; | 
|  | ret->objs = NULL; | 
|  | ret->ctor = args->ctor; | 
|  | ret->non_kernel = 0; | 
|  | ret->exec_callback = false; | 
|  | ret->callback = NULL; | 
|  | ret->private = NULL; | 
|  |  | 
|  | return ret; | 
|  | } | 
|  |  | 
|  | struct slab_sheaf * | 
|  | kmem_cache_prefill_sheaf(struct kmem_cache *s, gfp_t gfp, unsigned int size) | 
|  | { | 
|  | struct slab_sheaf *sheaf; | 
|  | unsigned int capacity; | 
|  |  | 
|  | if (s->exec_callback) { | 
|  | if (s->callback) | 
|  | s->callback(s->private); | 
|  | s->exec_callback = false; | 
|  | } | 
|  |  | 
|  | capacity = max(size, s->sheaf_capacity); | 
|  |  | 
|  | sheaf = calloc(1, sizeof(*sheaf) + sizeof(void *) * capacity); | 
|  | if (!sheaf) | 
|  | return NULL; | 
|  |  | 
|  | sheaf->cache = s; | 
|  | sheaf->capacity = capacity; | 
|  | sheaf->size = kmem_cache_alloc_bulk(s, gfp, size, sheaf->objects); | 
|  | if (!sheaf->size) { | 
|  | free(sheaf); | 
|  | return NULL; | 
|  | } | 
|  |  | 
|  | return sheaf; | 
|  | } | 
|  |  | 
|  | int kmem_cache_refill_sheaf(struct kmem_cache *s, gfp_t gfp, | 
|  | struct slab_sheaf **sheafp, unsigned int size) | 
|  | { | 
|  | struct slab_sheaf *sheaf = *sheafp; | 
|  | int refill; | 
|  |  | 
|  | if (sheaf->size >= size) | 
|  | return 0; | 
|  |  | 
|  | if (size > sheaf->capacity) { | 
|  | sheaf = kmem_cache_prefill_sheaf(s, gfp, size); | 
|  | if (!sheaf) | 
|  | return -ENOMEM; | 
|  |  | 
|  | kmem_cache_return_sheaf(s, gfp, *sheafp); | 
|  | *sheafp = sheaf; | 
|  | return 0; | 
|  | } | 
|  |  | 
|  | refill = kmem_cache_alloc_bulk(s, gfp, size - sheaf->size, | 
|  | &sheaf->objects[sheaf->size]); | 
|  | if (!refill) | 
|  | return -ENOMEM; | 
|  |  | 
|  | sheaf->size += refill; | 
|  | return 0; | 
|  | } | 
|  |  | 
|  | void kmem_cache_return_sheaf(struct kmem_cache *s, gfp_t gfp, | 
|  | struct slab_sheaf *sheaf) | 
|  | { | 
|  | if (sheaf->size) | 
|  | kmem_cache_free_bulk(s, sheaf->size, &sheaf->objects[0]); | 
|  |  | 
|  | free(sheaf); | 
|  | } | 
|  |  | 
|  | void * | 
|  | kmem_cache_alloc_from_sheaf(struct kmem_cache *s, gfp_t gfp, | 
|  | struct slab_sheaf *sheaf) | 
|  | { | 
|  | void *obj; | 
|  |  | 
|  | if (sheaf->size == 0) { | 
|  | printf("Nothing left in sheaf!\n"); | 
|  | return NULL; | 
|  | } | 
|  |  | 
|  | obj = sheaf->objects[--sheaf->size]; | 
|  | sheaf->objects[sheaf->size] = NULL; | 
|  |  | 
|  | return obj; | 
|  | } | 
|  |  | 
|  | /* | 
|  | * Test the test infrastructure for kem_cache_alloc/free and bulk counterparts. | 
|  | */ | 
|  | void test_kmem_cache_bulk(void) | 
|  | { | 
|  | int i; | 
|  | void *list[12]; | 
|  | static struct kmem_cache *test_cache, *test_cache2; | 
|  |  | 
|  | /* | 
|  | * Testing the bulk allocators without aligned kmem_cache to force the | 
|  | * bulk alloc/free to reuse | 
|  | */ | 
|  | test_cache = kmem_cache_create("test_cache", 256, 0, SLAB_PANIC, NULL); | 
|  |  | 
|  | for (i = 0; i < 5; i++) | 
|  | list[i] = kmem_cache_alloc(test_cache, __GFP_DIRECT_RECLAIM); | 
|  |  | 
|  | for (i = 0; i < 5; i++) | 
|  | kmem_cache_free(test_cache, list[i]); | 
|  | assert(test_cache->nr_objs == 5); | 
|  |  | 
|  | kmem_cache_alloc_bulk(test_cache, __GFP_DIRECT_RECLAIM, 5, list); | 
|  | kmem_cache_free_bulk(test_cache, 5, list); | 
|  |  | 
|  | for (i = 0; i < 12 ; i++) | 
|  | list[i] = kmem_cache_alloc(test_cache, __GFP_DIRECT_RECLAIM); | 
|  |  | 
|  | for (i = 0; i < 12; i++) | 
|  | kmem_cache_free(test_cache, list[i]); | 
|  |  | 
|  | /* The last free will not be kept around */ | 
|  | assert(test_cache->nr_objs == 11); | 
|  |  | 
|  | /* Aligned caches will immediately free */ | 
|  | test_cache2 = kmem_cache_create("test_cache2", 128, 128, SLAB_PANIC, NULL); | 
|  |  | 
|  | kmem_cache_alloc_bulk(test_cache2, __GFP_DIRECT_RECLAIM, 10, list); | 
|  | kmem_cache_free_bulk(test_cache2, 10, list); | 
|  | assert(!test_cache2->nr_objs); | 
|  |  | 
|  |  | 
|  | } |