Merge tag 'slab-for-6.0-rc7' of git://git.kernel.org/pub/scm/linux/kernel/git/vbabka/slab
Pull slab fixes from Vlastimil Babka:
- Fix a possible use-after-free in SLUB's kmem_cache removal,
introduced in this cycle, by Feng Tang.
- WQ_MEM_RECLAIM dependency fix for the workqueue-based cpu slab
flushing introduced in 5.15, by Maurizio Lombardi.
- Add missing KASAN hooks in two kmalloc entry paths, by Peter
Collingbourne.
- A BUG_ON() removal in SLUB's kmem_cache creation when allocation
fails (too small to possibly happen in practice, syzbot used fault
injection), by Chao Yu.
* tag 'slab-for-6.0-rc7' of git://git.kernel.org/pub/scm/linux/kernel/git/vbabka/slab:
mm: slub: fix flush_cpu_slab()/__free_slab() invocations in task context.
mm/slab_common: fix possible double free of kmem_cache
kasan: call kasan_malloc() from __kmalloc_*track_caller()
mm/slub: fix to return errno if kmalloc() fails
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 07b9482..ccc0257 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -475,6 +475,7 @@
void kmem_cache_destroy(struct kmem_cache *s)
{
int refcnt;
+ bool rcu_set;
if (unlikely(!s) || !kasan_check_byte(s))
return;
@@ -482,6 +483,8 @@
cpus_read_lock();
mutex_lock(&slab_mutex);
+ rcu_set = s->flags & SLAB_TYPESAFE_BY_RCU;
+
refcnt = --s->refcount;
if (refcnt)
goto out_unlock;
@@ -492,7 +495,7 @@
out_unlock:
mutex_unlock(&slab_mutex);
cpus_read_unlock();
- if (!refcnt && !(s->flags & SLAB_TYPESAFE_BY_RCU))
+ if (!refcnt && !rcu_set)
kmem_cache_release(s);
}
EXPORT_SYMBOL(kmem_cache_destroy);
diff --git a/mm/slub.c b/mm/slub.c
index 862dbd9..4b98dff 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -310,6 +310,11 @@
*/
static nodemask_t slab_nodes;
+/*
+ * Workqueue used for flush_cpu_slab().
+ */
+static struct workqueue_struct *flushwq;
+
/********************************************************************
* Core slab cache functions
*******************************************************************/
@@ -2730,7 +2735,7 @@
INIT_WORK(&sfw->work, flush_cpu_slab);
sfw->skip = false;
sfw->s = s;
- schedule_work_on(cpu, &sfw->work);
+ queue_work_on(cpu, flushwq, &sfw->work);
}
for_each_online_cpu(cpu) {
@@ -4858,6 +4863,8 @@
void __init kmem_cache_init_late(void)
{
+ flushwq = alloc_workqueue("slub_flushwq", WQ_MEM_RECLAIM, 0);
+ WARN_ON(!flushwq);
}
struct kmem_cache *
@@ -4926,6 +4933,8 @@
/* Honor the call site pointer we received. */
trace_kmalloc(caller, ret, s, size, s->size, gfpflags);
+ ret = kasan_kmalloc(s, ret, size, gfpflags);
+
return ret;
}
EXPORT_SYMBOL(__kmalloc_track_caller);
@@ -4957,6 +4966,8 @@
/* Honor the call site pointer we received. */
trace_kmalloc_node(caller, ret, s, size, s->size, gfpflags, node);
+ ret = kasan_kmalloc(s, ret, size, gfpflags);
+
return ret;
}
EXPORT_SYMBOL(__kmalloc_node_track_caller);
@@ -5890,7 +5901,8 @@
char *name = kmalloc(ID_STR_LENGTH, GFP_KERNEL);
char *p = name;
- BUG_ON(!name);
+ if (!name)
+ return ERR_PTR(-ENOMEM);
*p++ = ':';
/*
@@ -5948,6 +5960,8 @@
* for the symlinks.
*/
name = create_unique_id(s);
+ if (IS_ERR(name))
+ return PTR_ERR(name);
}
s->kobj.kset = kset;