| Subject: x86: perf: Deal with kfree from atomic contexts | 
 | From: Thomas Gleixner <tglx@linutronix.de> | 
 | Date: Thu, 04 Oct 2012 13:32:46 +0100 | 
 |  | 
 | The x86 perf code allocates memory upfront because it might need | 
 | it. The detection that it is not needed happens in atomic context and | 
 | calls kfree from there. RT cant do that. Use kfree_rcu instead. | 
 |  | 
 | Signed-off-by: Thomas Gleixner <tglx@linutronix.de> | 
 | --- | 
 |  arch/x86/kernel/cpu/perf_event.h              |    1 + | 
 |  arch/x86/kernel/cpu/perf_event_intel.c        |    2 +- | 
 |  arch/x86/kernel/cpu/perf_event_intel_uncore.c |    5 +++-- | 
 |  arch/x86/kernel/cpu/perf_event_intel_uncore.h |    1 + | 
 |  4 files changed, 6 insertions(+), 3 deletions(-) | 
 |  | 
 | Index: linux-stable/arch/x86/kernel/cpu/perf_event.h | 
 | =================================================================== | 
 | --- linux-stable.orig/arch/x86/kernel/cpu/perf_event.h | 
 | +++ linux-stable/arch/x86/kernel/cpu/perf_event.h | 
 | @@ -108,6 +108,7 @@ struct intel_shared_regs { | 
 |  	struct er_account       regs[EXTRA_REG_MAX]; | 
 |  	int                     refcnt;		/* per-core: #HT threads */ | 
 |  	unsigned                core_id;	/* per-core: core id */ | 
 | +	struct rcu_head		rcu; | 
 |  }; | 
 |   | 
 |  #define MAX_LBR_ENTRIES		16 | 
 | Index: linux-stable/arch/x86/kernel/cpu/perf_event_intel.c | 
 | =================================================================== | 
 | --- linux-stable.orig/arch/x86/kernel/cpu/perf_event_intel.c | 
 | +++ linux-stable/arch/x86/kernel/cpu/perf_event_intel.c | 
 | @@ -1707,7 +1707,7 @@ static void intel_pmu_cpu_dying(int cpu) | 
 |  	pc = cpuc->shared_regs; | 
 |  	if (pc) { | 
 |  		if (pc->core_id == -1 || --pc->refcnt == 0) | 
 | -			kfree(pc); | 
 | +			kfree_rcu(pc, rcu); | 
 |  		cpuc->shared_regs = NULL; | 
 |  	} | 
 |   | 
 | Index: linux-stable/arch/x86/kernel/cpu/perf_event_intel_uncore.c | 
 | =================================================================== | 
 | --- linux-stable.orig/arch/x86/kernel/cpu/perf_event_intel_uncore.c | 
 | +++ linux-stable/arch/x86/kernel/cpu/perf_event_intel_uncore.c | 
 | @@ -2603,7 +2603,7 @@ static void __cpuinit uncore_cpu_dying(i | 
 |  			box = *per_cpu_ptr(pmu->box, cpu); | 
 |  			*per_cpu_ptr(pmu->box, cpu) = NULL; | 
 |  			if (box && atomic_dec_and_test(&box->refcnt)) | 
 | -				kfree(box); | 
 | +				kfree_rcu(box, rcu); | 
 |  		} | 
 |  	} | 
 |  } | 
 | @@ -2633,7 +2633,8 @@ static int __cpuinit uncore_cpu_starting | 
 |  				if (exist && exist->phys_id == phys_id) { | 
 |  					atomic_inc(&exist->refcnt); | 
 |  					*per_cpu_ptr(pmu->box, cpu) = exist; | 
 | -					kfree(box); | 
 | +					if (box) | 
 | +						kfree_rcu(box, rcu); | 
 |  					box = NULL; | 
 |  					break; | 
 |  				} | 
 | Index: linux-stable/arch/x86/kernel/cpu/perf_event_intel_uncore.h | 
 | =================================================================== | 
 | --- linux-stable.orig/arch/x86/kernel/cpu/perf_event_intel_uncore.h | 
 | +++ linux-stable/arch/x86/kernel/cpu/perf_event_intel_uncore.h | 
 | @@ -419,6 +419,7 @@ struct intel_uncore_box { | 
 |  	struct hrtimer hrtimer; | 
 |  	struct list_head list; | 
 |  	struct intel_uncore_extra_reg shared_regs[0]; | 
 | +	struct rcu_head rcu; | 
 |  }; | 
 |   | 
 |  #define UNCORE_BOX_FLAG_INITIATED	0 |