blob: 58a1359e40940999ee92ed237c2cdcc00c7997d1 [file] [log] [blame]
From 69f55751d5afdb69ef213c843fa96d1933462ae1 Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <tglx@linutronix.de>
Date: Fri, 3 Jul 2009 08:44:51 -0500
Subject: [PATCH] powerpc: Annotate atomic_locks
commit 32ff9169c2f6c94c900f08217359972e2f3eb17f in tip.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
diff --git a/arch/powerpc/include/asm/mpic.h b/arch/powerpc/include/asm/mpic.h
index a002682..61913d9 100644
--- a/arch/powerpc/include/asm/mpic.h
+++ b/arch/powerpc/include/asm/mpic.h
@@ -289,7 +289,7 @@ struct mpic
#ifdef CONFIG_MPIC_U3_HT_IRQS
/* The fixup table */
struct mpic_irq_fixup *fixups;
- spinlock_t fixup_lock;
+ raw_spinlock_t fixup_lock;
#endif
/* Register access method */
diff --git a/arch/powerpc/include/asm/pmac_feature.h b/arch/powerpc/include/asm/pmac_feature.h
index 877c35a..00eedc5 100644
--- a/arch/powerpc/include/asm/pmac_feature.h
+++ b/arch/powerpc/include/asm/pmac_feature.h
@@ -378,7 +378,7 @@ extern struct macio_chip* macio_find(struct device_node* child, int type);
* Those are exported by pmac feature for internal use by arch code
* only like the platform function callbacks, do not use directly in drivers
*/
-extern spinlock_t feature_lock;
+extern raw_spinlock_t feature_lock;
extern struct device_node *uninorth_node;
extern u32 __iomem *uninorth_base;
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index dee83b8..28483d1 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -468,7 +468,7 @@ void do_softirq(void)
*/
static LIST_HEAD(irq_hosts);
-static DEFINE_SPINLOCK(irq_big_lock);
+static DEFINE_RAW_SPINLOCK(irq_big_lock);
static unsigned int revmap_trees_allocated;
static DEFINE_MUTEX(revmap_trees_mutex);
struct irq_map_entry irq_map[NR_IRQS];
@@ -514,14 +514,14 @@ struct irq_host *irq_alloc_host(struct device_node *of_node,
if (host->ops->match == NULL)
host->ops->match = default_irq_host_match;
- spin_lock_irqsave(&irq_big_lock, flags);
+ raw_spin_lock_irqsave(&irq_big_lock, flags);
/* If it's a legacy controller, check for duplicates and
* mark it as allocated (we use irq 0 host pointer for that
*/
if (revmap_type == IRQ_HOST_MAP_LEGACY) {
if (irq_map[0].host != NULL) {
- spin_unlock_irqrestore(&irq_big_lock, flags);
+ raw_spin_unlock_irqrestore(&irq_big_lock, flags);
/* If we are early boot, we can't free the structure,
* too bad...
* this will be fixed once slab is made available early
@@ -535,7 +535,7 @@ struct irq_host *irq_alloc_host(struct device_node *of_node,
}
list_add(&host->link, &irq_hosts);
- spin_unlock_irqrestore(&irq_big_lock, flags);
+ raw_spin_unlock_irqrestore(&irq_big_lock, flags);
/* Additional setups per revmap type */
switch(revmap_type) {
@@ -586,13 +586,13 @@ struct irq_host *irq_find_host(struct device_node *node)
* the absence of a device node. This isn't a problem so far
* yet though...
*/
- spin_lock_irqsave(&irq_big_lock, flags);
+ raw_spin_lock_irqsave(&irq_big_lock, flags);
list_for_each_entry(h, &irq_hosts, link)
if (h->ops->match(h, node)) {
found = h;
break;
}
- spin_unlock_irqrestore(&irq_big_lock, flags);
+ raw_spin_unlock_irqrestore(&irq_big_lock, flags);
return found;
}
EXPORT_SYMBOL_GPL(irq_find_host);
@@ -961,7 +961,7 @@ unsigned int irq_alloc_virt(struct irq_host *host,
if (count == 0 || count > (irq_virq_count - NUM_ISA_INTERRUPTS))
return NO_IRQ;
- spin_lock_irqsave(&irq_big_lock, flags);
+ raw_spin_lock_irqsave(&irq_big_lock, flags);
/* Use hint for 1 interrupt if any */
if (count == 1 && hint >= NUM_ISA_INTERRUPTS &&
@@ -985,7 +985,7 @@ unsigned int irq_alloc_virt(struct irq_host *host,
}
}
if (found == NO_IRQ) {
- spin_unlock_irqrestore(&irq_big_lock, flags);
+ raw_spin_unlock_irqrestore(&irq_big_lock, flags);
return NO_IRQ;
}
hint_found:
@@ -994,7 +994,7 @@ unsigned int irq_alloc_virt(struct irq_host *host,
smp_wmb();
irq_map[i].host = host;
}
- spin_unlock_irqrestore(&irq_big_lock, flags);
+ raw_spin_unlock_irqrestore(&irq_big_lock, flags);
return found;
}
@@ -1006,7 +1006,7 @@ void irq_free_virt(unsigned int virq, unsigned int count)
WARN_ON (virq < NUM_ISA_INTERRUPTS);
WARN_ON (count == 0 || (virq + count) > irq_virq_count);
- spin_lock_irqsave(&irq_big_lock, flags);
+ raw_spin_lock_irqsave(&irq_big_lock, flags);
for (i = virq; i < (virq + count); i++) {
struct irq_host *host;
@@ -1019,7 +1019,7 @@ void irq_free_virt(unsigned int virq, unsigned int count)
smp_wmb();
irq_map[i].host = NULL;
}
- spin_unlock_irqrestore(&irq_big_lock, flags);
+ raw_spin_unlock_irqrestore(&irq_big_lock, flags);
}
int arch_early_irq_init(void)
diff --git a/arch/powerpc/kernel/pmc.c b/arch/powerpc/kernel/pmc.c
index 0516e2d..461499b 100644
--- a/arch/powerpc/kernel/pmc.c
+++ b/arch/powerpc/kernel/pmc.c
@@ -37,7 +37,7 @@ static void dummy_perf(struct pt_regs *regs)
}
-static DEFINE_SPINLOCK(pmc_owner_lock);
+static DEFINE_RAW_SPINLOCK(pmc_owner_lock);
static void *pmc_owner_caller; /* mostly for debugging */
perf_irq_t perf_irq = dummy_perf;
@@ -45,7 +45,7 @@ int reserve_pmc_hardware(perf_irq_t new_perf_irq)
{
int err = 0;
- spin_lock(&pmc_owner_lock);
+ raw_spin_lock(&pmc_owner_lock);
if (pmc_owner_caller) {
printk(KERN_WARNING "reserve_pmc_hardware: "
@@ -59,21 +59,21 @@ int reserve_pmc_hardware(perf_irq_t new_perf_irq)
perf_irq = new_perf_irq ? new_perf_irq : dummy_perf;
out:
- spin_unlock(&pmc_owner_lock);
+ raw_spin_unlock(&pmc_owner_lock);
return err;
}
EXPORT_SYMBOL_GPL(reserve_pmc_hardware);
void release_pmc_hardware(void)
{
- spin_lock(&pmc_owner_lock);
+ raw_spin_lock(&pmc_owner_lock);
WARN_ON(! pmc_owner_caller);
pmc_owner_caller = NULL;
perf_irq = dummy_perf;
- spin_unlock(&pmc_owner_lock);
+ raw_spin_unlock(&pmc_owner_lock);
}
EXPORT_SYMBOL_GPL(release_pmc_hardware);
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index d069ff8..4f99a3a 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -102,11 +102,11 @@ static inline void pmac_backlight_unblank(void) { }
int die(const char *str, struct pt_regs *regs, long err)
{
static struct {
- spinlock_t lock;
+ raw_spinlock_t lock;
u32 lock_owner;
int lock_owner_depth;
} die = {
- .lock = __SPIN_LOCK_UNLOCKED(die.lock),
+ .lock = __RAW_SPIN_LOCK_UNLOCKED(die.lock),
.lock_owner = -1,
.lock_owner_depth = 0
};
@@ -120,7 +120,7 @@ int die(const char *str, struct pt_regs *regs, long err)
if (die.lock_owner != raw_smp_processor_id()) {
console_verbose();
- spin_lock_irqsave(&die.lock, flags);
+ raw_spin_lock_irqsave(&die.lock, flags);
die.lock_owner = smp_processor_id();
die.lock_owner_depth = 0;
bust_spinlocks(1);
@@ -155,7 +155,7 @@ int die(const char *str, struct pt_regs *regs, long err)
bust_spinlocks(0);
die.lock_owner = -1;
add_taint(TAINT_DIE);
- spin_unlock_irqrestore(&die.lock, flags);
+ raw_spin_unlock_irqrestore(&die.lock, flags);
if (kexec_should_crash(current) ||
kexec_sr_activated(smp_processor_id()))
@@ -202,6 +202,11 @@ void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr)
addr, regs->nip, regs->link, code);
}
+#ifdef CONFIG_PREEMPT_RT
+ local_irq_enable();
+ preempt_check_resched();
+#endif
+
memset(&info, 0, sizeof(info));
info.si_signo = signr;
info.si_code = code;
diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c
index 056d23a..2cecbf5 100644
--- a/arch/powerpc/mm/hash_native_64.c
+++ b/arch/powerpc/mm/hash_native_64.c
@@ -37,7 +37,7 @@
#define HPTE_LOCK_BIT 3
-static DEFINE_SPINLOCK(native_tlbie_lock);
+static DEFINE_RAW_SPINLOCK(native_tlbie_lock);
static inline void __tlbie(unsigned long va, int psize, int ssize)
{
@@ -104,7 +104,7 @@ static inline void tlbie(unsigned long va, int psize, int ssize, int local)
if (use_local)
use_local = mmu_psize_defs[psize].tlbiel;
if (lock_tlbie && !use_local)
- spin_lock(&native_tlbie_lock);
+ raw_spin_lock(&native_tlbie_lock);
asm volatile("ptesync": : :"memory");
if (use_local) {
__tlbiel(va, psize, ssize);
@@ -114,7 +114,7 @@ static inline void tlbie(unsigned long va, int psize, int ssize, int local)
asm volatile("eieio; tlbsync; ptesync": : :"memory");
}
if (lock_tlbie && !use_local)
- spin_unlock(&native_tlbie_lock);
+ raw_spin_unlock(&native_tlbie_lock);
}
static inline void native_lock_hpte(struct hash_pte *hptep)
@@ -434,7 +434,7 @@ static void native_hpte_clear(void)
/* we take the tlbie lock and hold it. Some hardware will
* deadlock if we try to tlbie from two processors at once.
*/
- spin_lock(&native_tlbie_lock);
+ raw_spin_lock(&native_tlbie_lock);
slots = pteg_count * HPTES_PER_GROUP;
@@ -458,7 +458,7 @@ static void native_hpte_clear(void)
}
asm volatile("eieio; tlbsync; ptesync":::"memory");
- spin_unlock(&native_tlbie_lock);
+ raw_spin_unlock(&native_tlbie_lock);
local_irq_restore(flags);
}
@@ -521,7 +521,7 @@ static void native_flush_hash_range(unsigned long number, int local)
int lock_tlbie = !cpu_has_feature(CPU_FTR_LOCKLESS_TLBIE);
if (lock_tlbie)
- spin_lock(&native_tlbie_lock);
+ raw_spin_lock(&native_tlbie_lock);
asm volatile("ptesync":::"memory");
for (i = 0; i < number; i++) {
@@ -536,7 +536,7 @@ static void native_flush_hash_range(unsigned long number, int local)
asm volatile("eieio; tlbsync; ptesync":::"memory");
if (lock_tlbie)
- spin_unlock(&native_tlbie_lock);
+ raw_spin_unlock(&native_tlbie_lock);
}
local_irq_restore(flags);
diff --git a/arch/powerpc/mm/mmu_context_nohash.c b/arch/powerpc/mm/mmu_context_nohash.c
index 1044a63..dbc6921 100644
--- a/arch/powerpc/mm/mmu_context_nohash.c
+++ b/arch/powerpc/mm/mmu_context_nohash.c
@@ -56,7 +56,7 @@ static unsigned int next_context, nr_free_contexts;
static unsigned long *context_map;
static unsigned long *stale_map[NR_CPUS];
static struct mm_struct **context_mm;
-static DEFINE_SPINLOCK(context_lock);
+static DEFINE_RAW_SPINLOCK(context_lock);
#define CTX_MAP_SIZE \
(sizeof(unsigned long) * (last_context / BITS_PER_LONG + 1))
@@ -121,9 +121,9 @@ static unsigned int steal_context_smp(unsigned int id)
/* This will happen if you have more CPUs than available contexts,
* all we can do here is wait a bit and try again
*/
- spin_unlock(&context_lock);
+ raw_spin_unlock(&context_lock);
cpu_relax();
- spin_lock(&context_lock);
+ raw_spin_lock(&context_lock);
/* This will cause the caller to try again */
return MMU_NO_CONTEXT;
@@ -194,7 +194,7 @@ void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next)
unsigned long *map;
/* No lockless fast path .. yet */
- spin_lock(&context_lock);
+ raw_spin_lock(&context_lock);
pr_hard("[%d] activating context for mm @%p, active=%d, id=%d",
cpu, next, next->context.active, next->context.id);
@@ -278,7 +278,7 @@ void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next)
/* Flick the MMU and release lock */
pr_hardcont(" -> %d\n", id);
set_context(id, next->pgd);
- spin_unlock(&context_lock);
+ raw_spin_unlock(&context_lock);
}
/*
@@ -307,7 +307,7 @@ void destroy_context(struct mm_struct *mm)
WARN_ON(mm->context.active != 0);
- spin_lock_irqsave(&context_lock, flags);
+ raw_spin_lock_irqsave(&context_lock, flags);
id = mm->context.id;
if (id != MMU_NO_CONTEXT) {
__clear_bit(id, context_map);
@@ -318,7 +318,7 @@ void destroy_context(struct mm_struct *mm)
context_mm[id] = NULL;
nr_free_contexts++;
}
- spin_unlock_irqrestore(&context_lock, flags);
+ raw_spin_unlock_irqrestore(&context_lock, flags);
}
#ifdef CONFIG_SMP
diff --git a/arch/powerpc/mm/tlb_nohash.c b/arch/powerpc/mm/tlb_nohash.c
index 2fbc680..a65f1d0 100644
--- a/arch/powerpc/mm/tlb_nohash.c
+++ b/arch/powerpc/mm/tlb_nohash.c
@@ -150,7 +150,7 @@ EXPORT_SYMBOL(local_flush_tlb_page);
*/
#ifdef CONFIG_SMP
-static DEFINE_SPINLOCK(tlbivax_lock);
+static DEFINE_RAW_SPINLOCK(tlbivax_lock);
static int mm_is_core_local(struct mm_struct *mm)
{
@@ -232,10 +232,10 @@ void __flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
if (mmu_has_feature(MMU_FTR_USE_TLBIVAX_BCAST)) {
int lock = mmu_has_feature(MMU_FTR_LOCK_BCAST_INVAL);
if (lock)
- spin_lock(&tlbivax_lock);
+ raw_spin_lock(&tlbivax_lock);
_tlbivax_bcast(vmaddr, pid, tsize, ind);
if (lock)
- spin_unlock(&tlbivax_lock);
+ raw_spin_unlock(&tlbivax_lock);
goto bail;
} else {
struct tlb_flush_param p = {
@@ -274,7 +274,9 @@ void flush_tlb_kernel_range(unsigned long start, unsigned long end)
_tlbil_pid(0);
preempt_enable();
#else
+ preempt_disable();
_tlbil_pid(0);
+ preempt_enable();
#endif
}
EXPORT_SYMBOL(flush_tlb_kernel_range);
diff --git a/arch/powerpc/platforms/cell/beat_htab.c b/arch/powerpc/platforms/cell/beat_htab.c
index 35b1ec4..2516c1c 100644
--- a/arch/powerpc/platforms/cell/beat_htab.c
+++ b/arch/powerpc/platforms/cell/beat_htab.c
@@ -40,7 +40,7 @@
#define DBG_LOW(fmt...) do { } while (0)
#endif
-static DEFINE_SPINLOCK(beat_htab_lock);
+static DEFINE_RAW_SPINLOCK(beat_htab_lock);
static inline unsigned int beat_read_mask(unsigned hpte_group)
{
@@ -114,18 +114,18 @@ static long beat_lpar_hpte_insert(unsigned long hpte_group,
if (rflags & _PAGE_NO_CACHE)
hpte_r &= ~_PAGE_COHERENT;
- spin_lock(&beat_htab_lock);
+ raw_spin_lock(&beat_htab_lock);
lpar_rc = beat_read_mask(hpte_group);
if (lpar_rc == 0) {
if (!(vflags & HPTE_V_BOLTED))
DBG_LOW(" full\n");
- spin_unlock(&beat_htab_lock);
+ raw_spin_unlock(&beat_htab_lock);
return -1;
}
lpar_rc = beat_insert_htab_entry(0, hpte_group, lpar_rc << 48,
hpte_v, hpte_r, &slot);
- spin_unlock(&beat_htab_lock);
+ raw_spin_unlock(&beat_htab_lock);
/*
* Since we try and ioremap PHBs we don't own, the pte insert
@@ -198,17 +198,17 @@ static long beat_lpar_hpte_updatepp(unsigned long slot,
"avpnv=%016lx, slot=%016lx, psize: %d, newpp %016lx ... ",
want_v & HPTE_V_AVPN, slot, psize, newpp);
- spin_lock(&beat_htab_lock);
+ raw_spin_lock(&beat_htab_lock);
dummy0 = beat_lpar_hpte_getword0(slot);
if ((dummy0 & ~0x7FUL) != (want_v & ~0x7FUL)) {
DBG_LOW("not found !\n");
- spin_unlock(&beat_htab_lock);
+ raw_spin_unlock(&beat_htab_lock);
return -1;
}
lpar_rc = beat_write_htab_entry(0, slot, 0, newpp, 0, 7, &dummy0,
&dummy1);
- spin_unlock(&beat_htab_lock);
+ raw_spin_unlock(&beat_htab_lock);
if (lpar_rc != 0 || dummy0 == 0) {
DBG_LOW("not found !\n");
return -1;
@@ -262,13 +262,13 @@ static void beat_lpar_hpte_updateboltedpp(unsigned long newpp,
vsid = get_kernel_vsid(ea, MMU_SEGSIZE_256M);
va = (vsid << 28) | (ea & 0x0fffffff);
- spin_lock(&beat_htab_lock);
+ raw_spin_lock(&beat_htab_lock);
slot = beat_lpar_hpte_find(va, psize);
BUG_ON(slot == -1);
lpar_rc = beat_write_htab_entry(0, slot, 0, newpp, 0, 7,
&dummy0, &dummy1);
- spin_unlock(&beat_htab_lock);
+ raw_spin_unlock(&beat_htab_lock);
BUG_ON(lpar_rc != 0);
}
@@ -285,18 +285,18 @@ static void beat_lpar_hpte_invalidate(unsigned long slot, unsigned long va,
slot, va, psize, local);
want_v = hpte_encode_v(va, psize, MMU_SEGSIZE_256M);
- spin_lock_irqsave(&beat_htab_lock, flags);
+ raw_spin_lock_irqsave(&beat_htab_lock, flags);
dummy1 = beat_lpar_hpte_getword0(slot);
if ((dummy1 & ~0x7FUL) != (want_v & ~0x7FUL)) {
DBG_LOW("not found !\n");
- spin_unlock_irqrestore(&beat_htab_lock, flags);
+ raw_spin_unlock_irqrestore(&beat_htab_lock, flags);
return;
}
lpar_rc = beat_write_htab_entry(0, slot, 0, 0, HPTE_V_VALID, 0,
&dummy1, &dummy2);
- spin_unlock_irqrestore(&beat_htab_lock, flags);
+ raw_spin_unlock_irqrestore(&beat_htab_lock, flags);
BUG_ON(lpar_rc != 0);
}
diff --git a/arch/powerpc/platforms/cell/beat_interrupt.c b/arch/powerpc/platforms/cell/beat_interrupt.c
index 36052a9..cfe3b96 100644
--- a/arch/powerpc/platforms/cell/beat_interrupt.c
+++ b/arch/powerpc/platforms/cell/beat_interrupt.c
@@ -30,7 +30,7 @@
#include "beat_wrapper.h"
#define MAX_IRQS NR_IRQS
-static DEFINE_SPINLOCK(beatic_irq_mask_lock);
+static DEFINE_RAW_SPINLOCK(beatic_irq_mask_lock);
static uint64_t beatic_irq_mask_enable[(MAX_IRQS+255)/64];
static uint64_t beatic_irq_mask_ack[(MAX_IRQS+255)/64];
@@ -65,30 +65,30 @@ static void beatic_mask_irq(unsigned int irq_plug)
{
unsigned long flags;
- spin_lock_irqsave(&beatic_irq_mask_lock, flags);
+ raw_spin_lock_irqsave(&beatic_irq_mask_lock, flags);
beatic_irq_mask_enable[irq_plug/64] &= ~(1UL << (63 - (irq_plug%64)));
beatic_update_irq_mask(irq_plug);
- spin_unlock_irqrestore(&beatic_irq_mask_lock, flags);
+ raw_spin_unlock_irqrestore(&beatic_irq_mask_lock, flags);
}
static void beatic_unmask_irq(unsigned int irq_plug)
{
unsigned long flags;
- spin_lock_irqsave(&beatic_irq_mask_lock, flags);
+ raw_spin_lock_irqsave(&beatic_irq_mask_lock, flags);
beatic_irq_mask_enable[irq_plug/64] |= 1UL << (63 - (irq_plug%64));
beatic_update_irq_mask(irq_plug);
- spin_unlock_irqrestore(&beatic_irq_mask_lock, flags);
+ raw_spin_unlock_irqrestore(&beatic_irq_mask_lock, flags);
}
static void beatic_ack_irq(unsigned int irq_plug)
{
unsigned long flags;
- spin_lock_irqsave(&beatic_irq_mask_lock, flags);
+ raw_spin_lock_irqsave(&beatic_irq_mask_lock, flags);
beatic_irq_mask_ack[irq_plug/64] &= ~(1UL << (63 - (irq_plug%64)));
beatic_update_irq_mask(irq_plug);
- spin_unlock_irqrestore(&beatic_irq_mask_lock, flags);
+ raw_spin_unlock_irqrestore(&beatic_irq_mask_lock, flags);
}
static void beatic_end_irq(unsigned int irq_plug)
@@ -103,10 +103,10 @@ static void beatic_end_irq(unsigned int irq_plug)
printk(KERN_ERR "IRQ over-downcounted, plug %d\n", irq_plug);
}
- spin_lock_irqsave(&beatic_irq_mask_lock, flags);
+ raw_spin_lock_irqsave(&beatic_irq_mask_lock, flags);
beatic_irq_mask_ack[irq_plug/64] |= 1UL << (63 - (irq_plug%64));
beatic_update_irq_mask(irq_plug);
- spin_unlock_irqrestore(&beatic_irq_mask_lock, flags);
+ raw_spin_unlock_irqrestore(&beatic_irq_mask_lock, flags);
}
static struct irq_chip beatic_pic = {
diff --git a/arch/powerpc/platforms/powermac/feature.c b/arch/powerpc/platforms/powermac/feature.c
index fbc9bbd..424b633 100644
--- a/arch/powerpc/platforms/powermac/feature.c
+++ b/arch/powerpc/platforms/powermac/feature.c
@@ -59,10 +59,10 @@ extern struct device_node *k2_skiplist[2];
* We use a single global lock to protect accesses. Each driver has
* to take care of its own locking
*/
-DEFINE_SPINLOCK(feature_lock);
+DEFINE_RAW_SPINLOCK(feature_lock);
-#define LOCK(flags) spin_lock_irqsave(&feature_lock, flags);
-#define UNLOCK(flags) spin_unlock_irqrestore(&feature_lock, flags);
+#define LOCK(flags) raw_spin_lock_irqsave(&feature_lock, flags);
+#define UNLOCK(flags) raw_spin_unlock_irqrestore(&feature_lock, flags);
/*
diff --git a/arch/powerpc/platforms/powermac/nvram.c b/arch/powerpc/platforms/powermac/nvram.c
index c6f0f9e..80a5258 100644
--- a/arch/powerpc/platforms/powermac/nvram.c
+++ b/arch/powerpc/platforms/powermac/nvram.c
@@ -80,7 +80,7 @@ static int is_core_99;
static int core99_bank = 0;
static int nvram_partitions[3];
// XXX Turn that into a sem
-static DEFINE_SPINLOCK(nv_lock);
+static DEFINE_RAW_SPINLOCK(nv_lock);
static int (*core99_write_bank)(int bank, u8* datas);
static int (*core99_erase_bank)(int bank);
@@ -165,10 +165,10 @@ static unsigned char indirect_nvram_read_byte(int addr)
unsigned char val;
unsigned long flags;
- spin_lock_irqsave(&nv_lock, flags);
+ raw_spin_lock_irqsave(&nv_lock, flags);
out_8(nvram_addr, addr >> 5);
val = in_8(&nvram_data[(addr & 0x1f) << 4]);
- spin_unlock_irqrestore(&nv_lock, flags);
+ raw_spin_unlock_irqrestore(&nv_lock, flags);
return val;
}
@@ -177,10 +177,10 @@ static void indirect_nvram_write_byte(int addr, unsigned char val)
{
unsigned long flags;
- spin_lock_irqsave(&nv_lock, flags);
+ raw_spin_lock_irqsave(&nv_lock, flags);
out_8(nvram_addr, addr >> 5);
out_8(&nvram_data[(addr & 0x1f) << 4], val);
- spin_unlock_irqrestore(&nv_lock, flags);
+ raw_spin_unlock_irqrestore(&nv_lock, flags);
}
@@ -481,7 +481,7 @@ static void core99_nvram_sync(void)
if (!is_core_99 || !nvram_data || !nvram_image)
return;
- spin_lock_irqsave(&nv_lock, flags);
+ raw_spin_lock_irqsave(&nv_lock, flags);
if (!memcmp(nvram_image, (u8*)nvram_data + core99_bank*NVRAM_SIZE,
NVRAM_SIZE))
goto bail;
@@ -503,7 +503,7 @@ static void core99_nvram_sync(void)
if (core99_write_bank(core99_bank, nvram_image))
printk("nvram: Error writing bank %d\n", core99_bank);
bail:
- spin_unlock_irqrestore(&nv_lock, flags);
+ raw_spin_unlock_irqrestore(&nv_lock, flags);
#ifdef DEBUG
mdelay(2000);
diff --git a/arch/powerpc/platforms/powermac/pfunc_base.c b/arch/powerpc/platforms/powermac/pfunc_base.c
index db20de5..f5e3cda 100644
--- a/arch/powerpc/platforms/powermac/pfunc_base.c
+++ b/arch/powerpc/platforms/powermac/pfunc_base.c
@@ -50,13 +50,13 @@ static int macio_do_gpio_write(PMF_STD_ARGS, u8 value, u8 mask)
value = ~value;
/* Toggle the GPIO */
- spin_lock_irqsave(&feature_lock, flags);
+ raw_spin_lock_irqsave(&feature_lock, flags);
tmp = readb(addr);
tmp = (tmp & ~mask) | (value & mask);
DBG("Do write 0x%02x to GPIO %s (%p)\n",
tmp, func->node->full_name, addr);
writeb(tmp, addr);
- spin_unlock_irqrestore(&feature_lock, flags);
+ raw_spin_unlock_irqrestore(&feature_lock, flags);
return 0;
}
@@ -145,9 +145,9 @@ static int macio_do_write_reg32(PMF_STD_ARGS, u32 offset, u32 value, u32 mask)
struct macio_chip *macio = func->driver_data;
unsigned long flags;
- spin_lock_irqsave(&feature_lock, flags);
+ raw_spin_lock_irqsave(&feature_lock, flags);
MACIO_OUT32(offset, (MACIO_IN32(offset) & ~mask) | (value & mask));
- spin_unlock_irqrestore(&feature_lock, flags);
+ raw_spin_unlock_irqrestore(&feature_lock, flags);
return 0;
}
@@ -168,9 +168,9 @@ static int macio_do_write_reg8(PMF_STD_ARGS, u32 offset, u8 value, u8 mask)
struct macio_chip *macio = func->driver_data;
unsigned long flags;
- spin_lock_irqsave(&feature_lock, flags);
+ raw_spin_lock_irqsave(&feature_lock, flags);
MACIO_OUT8(offset, (MACIO_IN8(offset) & ~mask) | (value & mask));
- spin_unlock_irqrestore(&feature_lock, flags);
+ raw_spin_unlock_irqrestore(&feature_lock, flags);
return 0;
}
@@ -223,12 +223,12 @@ static int macio_do_write_reg32_slm(PMF_STD_ARGS, u32 offset, u32 shift,
if (args == NULL || args->count == 0)
return -EINVAL;
- spin_lock_irqsave(&feature_lock, flags);
+ raw_spin_lock_irqsave(&feature_lock, flags);
tmp = MACIO_IN32(offset);
val = args->u[0].v << shift;
tmp = (tmp & ~mask) | (val & mask);
MACIO_OUT32(offset, tmp);
- spin_unlock_irqrestore(&feature_lock, flags);
+ raw_spin_unlock_irqrestore(&feature_lock, flags);
return 0;
}
@@ -243,12 +243,12 @@ static int macio_do_write_reg8_slm(PMF_STD_ARGS, u32 offset, u32 shift,
if (args == NULL || args->count == 0)
return -EINVAL;
- spin_lock_irqsave(&feature_lock, flags);
+ raw_spin_lock_irqsave(&feature_lock, flags);
tmp = MACIO_IN8(offset);
val = args->u[0].v << shift;
tmp = (tmp & ~mask) | (val & mask);
MACIO_OUT8(offset, tmp);
- spin_unlock_irqrestore(&feature_lock, flags);
+ raw_spin_unlock_irqrestore(&feature_lock, flags);
return 0;
}
@@ -278,12 +278,12 @@ static int unin_do_write_reg32(PMF_STD_ARGS, u32 offset, u32 value, u32 mask)
{
unsigned long flags;
- spin_lock_irqsave(&feature_lock, flags);
+ raw_spin_lock_irqsave(&feature_lock, flags);
/* This is fairly bogus in darwin, but it should work for our needs
* implemeted that way:
*/
UN_OUT(offset, (UN_IN(offset) & ~mask) | (value & mask));
- spin_unlock_irqrestore(&feature_lock, flags);
+ raw_spin_unlock_irqrestore(&feature_lock, flags);
return 0;
}
diff --git a/arch/powerpc/platforms/powermac/pic.c b/arch/powerpc/platforms/powermac/pic.c
index 09e8272..e2364cf 100644
--- a/arch/powerpc/platforms/powermac/pic.c
+++ b/arch/powerpc/platforms/powermac/pic.c
@@ -57,7 +57,7 @@ static int max_irqs;
static int max_real_irqs;
static u32 level_mask[4];
-static DEFINE_SPINLOCK(pmac_pic_lock);
+static DEFINE_RAW_SPINLOCK(pmac_pic_lock);
#define NR_MASK_WORDS ((NR_IRQS + 31) / 32)
static unsigned long ppc_lost_interrupts[NR_MASK_WORDS];
@@ -85,7 +85,7 @@ static void pmac_mask_and_ack_irq(unsigned int virq)
int i = src >> 5;
unsigned long flags;
- spin_lock_irqsave(&pmac_pic_lock, flags);
+ raw_spin_lock_irqsave(&pmac_pic_lock, flags);
__clear_bit(src, ppc_cached_irq_mask);
if (__test_and_clear_bit(src, ppc_lost_interrupts))
atomic_dec(&ppc_n_lost_interrupts);
@@ -97,7 +97,7 @@ static void pmac_mask_and_ack_irq(unsigned int virq)
mb();
} while((in_le32(&pmac_irq_hw[i]->enable) & bit)
!= (ppc_cached_irq_mask[i] & bit));
- spin_unlock_irqrestore(&pmac_pic_lock, flags);
+ raw_spin_unlock_irqrestore(&pmac_pic_lock, flags);
}
static void pmac_ack_irq(unsigned int virq)
@@ -107,12 +107,12 @@ static void pmac_ack_irq(unsigned int virq)
int i = src >> 5;
unsigned long flags;
- spin_lock_irqsave(&pmac_pic_lock, flags);
+ raw_spin_lock_irqsave(&pmac_pic_lock, flags);
if (__test_and_clear_bit(src, ppc_lost_interrupts))
atomic_dec(&ppc_n_lost_interrupts);
out_le32(&pmac_irq_hw[i]->ack, bit);
(void)in_le32(&pmac_irq_hw[i]->ack);
- spin_unlock_irqrestore(&pmac_pic_lock, flags);
+ raw_spin_unlock_irqrestore(&pmac_pic_lock, flags);
}
static void __pmac_set_irq_mask(unsigned int irq_nr, int nokicklost)
@@ -152,12 +152,12 @@ static unsigned int pmac_startup_irq(unsigned int virq)
unsigned long bit = 1UL << (src & 0x1f);
int i = src >> 5;
- spin_lock_irqsave(&pmac_pic_lock, flags);
+ raw_spin_lock_irqsave(&pmac_pic_lock, flags);
if ((irq_to_desc(virq)->status & IRQ_LEVEL) == 0)
out_le32(&pmac_irq_hw[i]->ack, bit);
__set_bit(src, ppc_cached_irq_mask);
__pmac_set_irq_mask(src, 0);
- spin_unlock_irqrestore(&pmac_pic_lock, flags);
+ raw_spin_unlock_irqrestore(&pmac_pic_lock, flags);
return 0;
}
@@ -167,10 +167,10 @@ static void pmac_mask_irq(unsigned int virq)
unsigned long flags;
unsigned int src = irq_map[virq].hwirq;
- spin_lock_irqsave(&pmac_pic_lock, flags);
+ raw_spin_lock_irqsave(&pmac_pic_lock, flags);
__clear_bit(src, ppc_cached_irq_mask);
__pmac_set_irq_mask(src, 1);
- spin_unlock_irqrestore(&pmac_pic_lock, flags);
+ raw_spin_unlock_irqrestore(&pmac_pic_lock, flags);
}
static void pmac_unmask_irq(unsigned int virq)
@@ -178,19 +178,19 @@ static void pmac_unmask_irq(unsigned int virq)
unsigned long flags;
unsigned int src = irq_map[virq].hwirq;
- spin_lock_irqsave(&pmac_pic_lock, flags);
+ raw_spin_lock_irqsave(&pmac_pic_lock, flags);
__set_bit(src, ppc_cached_irq_mask);
__pmac_set_irq_mask(src, 0);
- spin_unlock_irqrestore(&pmac_pic_lock, flags);
+ raw_spin_unlock_irqrestore(&pmac_pic_lock, flags);
}
static int pmac_retrigger(unsigned int virq)
{
unsigned long flags;
- spin_lock_irqsave(&pmac_pic_lock, flags);
+ raw_spin_lock_irqsave(&pmac_pic_lock, flags);
__pmac_retrigger(irq_map[virq].hwirq);
- spin_unlock_irqrestore(&pmac_pic_lock, flags);
+ raw_spin_unlock_irqrestore(&pmac_pic_lock, flags);
return 1;
}
@@ -210,7 +210,7 @@ static irqreturn_t gatwick_action(int cpl, void *dev_id)
int irq, bits;
int rc = IRQ_NONE;
- spin_lock_irqsave(&pmac_pic_lock, flags);
+ raw_spin_lock_irqsave(&pmac_pic_lock, flags);
for (irq = max_irqs; (irq -= 32) >= max_real_irqs; ) {
int i = irq >> 5;
bits = in_le32(&pmac_irq_hw[i]->event) | ppc_lost_interrupts[i];
@@ -220,12 +220,12 @@ static irqreturn_t gatwick_action(int cpl, void *dev_id)
if (bits == 0)
continue;
irq += __ilog2(bits);
- spin_unlock_irqrestore(&pmac_pic_lock, flags);
+ raw_spin_unlock_irqrestore(&pmac_pic_lock, flags);
generic_handle_irq(irq);
- spin_lock_irqsave(&pmac_pic_lock, flags);
+ raw_spin_lock_irqsave(&pmac_pic_lock, flags);
rc = IRQ_HANDLED;
}
- spin_unlock_irqrestore(&pmac_pic_lock, flags);
+ raw_spin_unlock_irqrestore(&pmac_pic_lock, flags);
return rc;
}
@@ -244,7 +244,7 @@ static unsigned int pmac_pic_get_irq(void)
return NO_IRQ_IGNORE; /* ignore, already handled */
}
#endif /* CONFIG_SMP */
- spin_lock_irqsave(&pmac_pic_lock, flags);
+ raw_spin_lock_irqsave(&pmac_pic_lock, flags);
for (irq = max_real_irqs; (irq -= 32) >= 0; ) {
int i = irq >> 5;
bits = in_le32(&pmac_irq_hw[i]->event) | ppc_lost_interrupts[i];
@@ -256,7 +256,7 @@ static unsigned int pmac_pic_get_irq(void)
irq += __ilog2(bits);
break;
}
- spin_unlock_irqrestore(&pmac_pic_lock, flags);
+ raw_spin_unlock_irqrestore(&pmac_pic_lock, flags);
if (unlikely(irq < 0))
return NO_IRQ;
return irq_linear_revmap(pmac_pic_host, irq);
diff --git a/arch/powerpc/platforms/pseries/eeh.c b/arch/powerpc/platforms/pseries/eeh.c
index ccd8dd0..c35b484 100644
--- a/arch/powerpc/platforms/pseries/eeh.c
+++ b/arch/powerpc/platforms/pseries/eeh.c
@@ -100,7 +100,7 @@ int eeh_subsystem_enabled;
EXPORT_SYMBOL(eeh_subsystem_enabled);
/* Lock to avoid races due to multiple reports of an error */
-static DEFINE_SPINLOCK(confirm_error_lock);
+static DEFINE_RAW_SPINLOCK(confirm_error_lock);
/* Buffer for reporting slot-error-detail rtas calls. Its here
* in BSS, and not dynamically alloced, so that it ends up in
@@ -436,7 +436,7 @@ static void __eeh_clear_slot(struct device_node *parent, int mode_flag)
void eeh_clear_slot (struct device_node *dn, int mode_flag)
{
unsigned long flags;
- spin_lock_irqsave(&confirm_error_lock, flags);
+ raw_spin_lock_irqsave(&confirm_error_lock, flags);
dn = find_device_pe (dn);
@@ -447,7 +447,7 @@ void eeh_clear_slot (struct device_node *dn, int mode_flag)
PCI_DN(dn)->eeh_mode &= ~mode_flag;
PCI_DN(dn)->eeh_check_count = 0;
__eeh_clear_slot(dn, mode_flag);
- spin_unlock_irqrestore(&confirm_error_lock, flags);
+ raw_spin_unlock_irqrestore(&confirm_error_lock, flags);
}
/**
@@ -506,7 +506,7 @@ int eeh_dn_check_failure(struct device_node *dn, struct pci_dev *dev)
* in one slot might report errors simultaneously, and we
* only want one error recovery routine running.
*/
- spin_lock_irqsave(&confirm_error_lock, flags);
+ raw_spin_lock_irqsave(&confirm_error_lock, flags);
rc = 1;
if (pdn->eeh_mode & EEH_MODE_ISOLATED) {
pdn->eeh_check_count ++;
@@ -575,7 +575,7 @@ int eeh_dn_check_failure(struct device_node *dn, struct pci_dev *dev)
* with other functions on this device, and functions under
* bridges. */
eeh_mark_slot (dn, EEH_MODE_ISOLATED);
- spin_unlock_irqrestore(&confirm_error_lock, flags);
+ raw_spin_unlock_irqrestore(&confirm_error_lock, flags);
eeh_send_failure_event (dn, dev);
@@ -586,7 +586,7 @@ int eeh_dn_check_failure(struct device_node *dn, struct pci_dev *dev)
return 1;
dn_unlock:
- spin_unlock_irqrestore(&confirm_error_lock, flags);
+ raw_spin_unlock_irqrestore(&confirm_error_lock, flags);
return rc;
}
@@ -1064,7 +1064,7 @@ void __init eeh_init(void)
struct device_node *phb, *np;
struct eeh_early_enable_info info;
- spin_lock_init(&confirm_error_lock);
+ raw_spin_lock_init(&confirm_error_lock);
spin_lock_init(&slot_errbuf_lock);
np = of_find_node_by_path("/rtas");
diff --git a/arch/powerpc/sysdev/i8259.c b/arch/powerpc/sysdev/i8259.c
index 0a55db8..69ddff0 100644
--- a/arch/powerpc/sysdev/i8259.c
+++ b/arch/powerpc/sysdev/i8259.c
@@ -23,7 +23,7 @@ static unsigned char cached_8259[2] = { 0xff, 0xff };
#define cached_A1 (cached_8259[0])
#define cached_21 (cached_8259[1])
-static DEFINE_SPINLOCK(i8259_lock);
+static DEFINE_RAW_SPINLOCK(i8259_lock);
static struct irq_host *i8259_host;
@@ -42,7 +42,7 @@ unsigned int i8259_irq(void)
if (pci_intack)
irq = readb(pci_intack);
else {
- spin_lock(&i8259_lock);
+ raw_spin_lock(&i8259_lock);
lock = 1;
/* Perform an interrupt acknowledge cycle on controller 1. */
@@ -74,7 +74,7 @@ unsigned int i8259_irq(void)
irq = NO_IRQ;
if (lock)
- spin_unlock(&i8259_lock);
+ raw_spin_unlock(&i8259_lock);
return irq;
}
@@ -82,7 +82,7 @@ static void i8259_mask_and_ack_irq(unsigned int irq_nr)
{
unsigned long flags;
- spin_lock_irqsave(&i8259_lock, flags);
+ raw_spin_lock_irqsave(&i8259_lock, flags);
if (irq_nr > 7) {
cached_A1 |= 1 << (irq_nr-8);
inb(0xA1); /* DUMMY */
@@ -95,7 +95,7 @@ static void i8259_mask_and_ack_irq(unsigned int irq_nr)
outb(cached_21, 0x21);
outb(0x20, 0x20); /* Non-specific EOI */
}
- spin_unlock_irqrestore(&i8259_lock, flags);
+ raw_spin_unlock_irqrestore(&i8259_lock, flags);
}
static void i8259_set_irq_mask(int irq_nr)
@@ -110,13 +110,13 @@ static void i8259_mask_irq(unsigned int irq_nr)
pr_debug("i8259_mask_irq(%d)\n", irq_nr);
- spin_lock_irqsave(&i8259_lock, flags);
+ raw_spin_lock_irqsave(&i8259_lock, flags);
if (irq_nr < 8)
cached_21 |= 1 << irq_nr;
else
cached_A1 |= 1 << (irq_nr-8);
i8259_set_irq_mask(irq_nr);
- spin_unlock_irqrestore(&i8259_lock, flags);
+ raw_spin_unlock_irqrestore(&i8259_lock, flags);
}
static void i8259_unmask_irq(unsigned int irq_nr)
@@ -125,13 +125,13 @@ static void i8259_unmask_irq(unsigned int irq_nr)
pr_debug("i8259_unmask_irq(%d)\n", irq_nr);
- spin_lock_irqsave(&i8259_lock, flags);
+ raw_spin_lock_irqsave(&i8259_lock, flags);
if (irq_nr < 8)
cached_21 &= ~(1 << irq_nr);
else
cached_A1 &= ~(1 << (irq_nr-8));
i8259_set_irq_mask(irq_nr);
- spin_unlock_irqrestore(&i8259_lock, flags);
+ raw_spin_unlock_irqrestore(&i8259_lock, flags);
}
static struct irq_chip i8259_pic = {
@@ -241,7 +241,7 @@ void i8259_init(struct device_node *node, unsigned long intack_addr)
unsigned long flags;
/* initialize the controller */
- spin_lock_irqsave(&i8259_lock, flags);
+ raw_spin_lock_irqsave(&i8259_lock, flags);
/* Mask all first */
outb(0xff, 0xA1);
@@ -273,7 +273,7 @@ void i8259_init(struct device_node *node, unsigned long intack_addr)
outb(cached_A1, 0xA1);
outb(cached_21, 0x21);
- spin_unlock_irqrestore(&i8259_lock, flags);
+ raw_spin_unlock_irqrestore(&i8259_lock, flags);
/* create a legacy host */
i8259_host = irq_alloc_host(node, IRQ_HOST_MAP_LEGACY,
diff --git a/arch/powerpc/sysdev/ipic.c b/arch/powerpc/sysdev/ipic.c
index 28cdddd..970f3ba 100644
--- a/arch/powerpc/sysdev/ipic.c
+++ b/arch/powerpc/sysdev/ipic.c
@@ -32,7 +32,7 @@
static struct ipic * primary_ipic;
static struct irq_chip ipic_level_irq_chip, ipic_edge_irq_chip;
-static DEFINE_SPINLOCK(ipic_lock);
+static DEFINE_RAW_SPINLOCK(ipic_lock);
static struct ipic_info ipic_info[] = {
[1] = {
@@ -530,13 +530,13 @@ static void ipic_unmask_irq(unsigned int virq)
unsigned long flags;
u32 temp;
- spin_lock_irqsave(&ipic_lock, flags);
+ raw_spin_lock_irqsave(&ipic_lock, flags);
temp = ipic_read(ipic->regs, ipic_info[src].mask);
temp |= (1 << (31 - ipic_info[src].bit));
ipic_write(ipic->regs, ipic_info[src].mask, temp);
- spin_unlock_irqrestore(&ipic_lock, flags);
+ raw_spin_unlock_irqrestore(&ipic_lock, flags);
}
static void ipic_mask_irq(unsigned int virq)
@@ -546,7 +546,7 @@ static void ipic_mask_irq(unsigned int virq)
unsigned long flags;
u32 temp;
- spin_lock_irqsave(&ipic_lock, flags);
+ raw_spin_lock_irqsave(&ipic_lock, flags);
temp = ipic_read(ipic->regs, ipic_info[src].mask);
temp &= ~(1 << (31 - ipic_info[src].bit));
@@ -556,7 +556,7 @@ static void ipic_mask_irq(unsigned int virq)
* for nearly all cases. */
mb();
- spin_unlock_irqrestore(&ipic_lock, flags);
+ raw_spin_unlock_irqrestore(&ipic_lock, flags);
}
static void ipic_ack_irq(unsigned int virq)
@@ -566,7 +566,7 @@ static void ipic_ack_irq(unsigned int virq)
unsigned long flags;
u32 temp;
- spin_lock_irqsave(&ipic_lock, flags);
+ raw_spin_lock_irqsave(&ipic_lock, flags);
temp = 1 << (31 - ipic_info[src].bit);
ipic_write(ipic->regs, ipic_info[src].ack, temp);
@@ -575,7 +575,7 @@ static void ipic_ack_irq(unsigned int virq)
* for nearly all cases. */
mb();
- spin_unlock_irqrestore(&ipic_lock, flags);
+ raw_spin_unlock_irqrestore(&ipic_lock, flags);
}
static void ipic_mask_irq_and_ack(unsigned int virq)
@@ -585,7 +585,7 @@ static void ipic_mask_irq_and_ack(unsigned int virq)
unsigned long flags;
u32 temp;
- spin_lock_irqsave(&ipic_lock, flags);
+ raw_spin_lock_irqsave(&ipic_lock, flags);
temp = ipic_read(ipic->regs, ipic_info[src].mask);
temp &= ~(1 << (31 - ipic_info[src].bit));
@@ -598,7 +598,7 @@ static void ipic_mask_irq_and_ack(unsigned int virq)
* for nearly all cases. */
mb();
- spin_unlock_irqrestore(&ipic_lock, flags);
+ raw_spin_unlock_irqrestore(&ipic_lock, flags);
}
static int ipic_set_irq_type(unsigned int virq, unsigned int flow_type)
diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c
index 470dc6c..cb5fe3f 100644
--- a/arch/powerpc/sysdev/mpic.c
+++ b/arch/powerpc/sysdev/mpic.c
@@ -46,7 +46,7 @@
static struct mpic *mpics;
static struct mpic *mpic_primary;
-static DEFINE_SPINLOCK(mpic_lock);
+static DEFINE_RAW_SPINLOCK(mpic_lock);
#ifdef CONFIG_PPC32 /* XXX for now */
#ifdef CONFIG_IRQ_ALL_CPUS
@@ -347,10 +347,10 @@ static inline void mpic_ht_end_irq(struct mpic *mpic, unsigned int source)
unsigned int mask = 1U << (fixup->index & 0x1f);
writel(mask, fixup->applebase + soff);
} else {
- spin_lock(&mpic->fixup_lock);
+ raw_spin_lock(&mpic->fixup_lock);
writeb(0x11 + 2 * fixup->index, fixup->base + 2);
writel(fixup->data, fixup->base + 4);
- spin_unlock(&mpic->fixup_lock);
+ raw_spin_unlock(&mpic->fixup_lock);
}
}
@@ -366,7 +366,7 @@ static void mpic_startup_ht_interrupt(struct mpic *mpic, unsigned int source,
DBG("startup_ht_interrupt(0x%x, 0x%x) index: %d\n",
source, irqflags, fixup->index);
- spin_lock_irqsave(&mpic->fixup_lock, flags);
+ raw_spin_lock_irqsave(&mpic->fixup_lock, flags);
/* Enable and configure */
writeb(0x10 + 2 * fixup->index, fixup->base + 2);
tmp = readl(fixup->base + 4);
@@ -374,7 +374,7 @@ static void mpic_startup_ht_interrupt(struct mpic *mpic, unsigned int source,
if (irqflags & IRQ_LEVEL)
tmp |= 0x22;
writel(tmp, fixup->base + 4);
- spin_unlock_irqrestore(&mpic->fixup_lock, flags);
+ raw_spin_unlock_irqrestore(&mpic->fixup_lock, flags);
#ifdef CONFIG_PM
/* use the lowest bit inverted to the actual HW,
@@ -396,12 +396,12 @@ static void mpic_shutdown_ht_interrupt(struct mpic *mpic, unsigned int source,
DBG("shutdown_ht_interrupt(0x%x, 0x%x)\n", source, irqflags);
/* Disable */
- spin_lock_irqsave(&mpic->fixup_lock, flags);
+ raw_spin_lock_irqsave(&mpic->fixup_lock, flags);
writeb(0x10 + 2 * fixup->index, fixup->base + 2);
tmp = readl(fixup->base + 4);
tmp |= 1;
writel(tmp, fixup->base + 4);
- spin_unlock_irqrestore(&mpic->fixup_lock, flags);
+ raw_spin_unlock_irqrestore(&mpic->fixup_lock, flags);
#ifdef CONFIG_PM
/* use the lowest bit inverted to the actual HW,
@@ -515,7 +515,7 @@ static void __init mpic_scan_ht_pics(struct mpic *mpic)
BUG_ON(mpic->fixups == NULL);
/* Init spinlock */
- spin_lock_init(&mpic->fixup_lock);
+ raw_spin_lock_init(&mpic->fixup_lock);
/* Map U3 config space. We assume all IO-APICs are on the primary bus
* so we only need to map 64kB.
@@ -573,12 +573,12 @@ static int irq_choose_cpu(const cpumask_t *mask)
if (cpumask_equal(mask, cpu_all_mask)) {
static int irq_rover;
- static DEFINE_SPINLOCK(irq_rover_lock);
+ static DEFINE_RAW_SPINLOCK(irq_rover_lock);
unsigned long flags;
/* Round-robin distribution... */
do_round_robin:
- spin_lock_irqsave(&irq_rover_lock, flags);
+ raw_spin_lock_irqsave(&irq_rover_lock, flags);
while (!cpu_online(irq_rover)) {
if (++irq_rover >= NR_CPUS)
@@ -590,7 +590,7 @@ static int irq_choose_cpu(const cpumask_t *mask)
irq_rover = 0;
} while (!cpu_online(irq_rover));
- spin_unlock_irqrestore(&irq_rover_lock, flags);
+ raw_spin_unlock_irqrestore(&irq_rover_lock, flags);
} else {
cpuid = cpumask_first_and(mask, cpu_online_mask);
if (cpuid >= nr_cpu_ids)
@@ -1368,14 +1368,14 @@ void __init mpic_set_serial_int(struct mpic *mpic, int enable)
unsigned long flags;
u32 v;
- spin_lock_irqsave(&mpic_lock, flags);
+ raw_spin_lock_irqsave(&mpic_lock, flags);
v = mpic_read(mpic->gregs, MPIC_GREG_GLOBAL_CONF_1);
if (enable)
v |= MPIC_GREG_GLOBAL_CONF_1_SIE;
else
v &= ~MPIC_GREG_GLOBAL_CONF_1_SIE;
mpic_write(mpic->gregs, MPIC_GREG_GLOBAL_CONF_1, v);
- spin_unlock_irqrestore(&mpic_lock, flags);
+ raw_spin_unlock_irqrestore(&mpic_lock, flags);
}
void mpic_irq_set_priority(unsigned int irq, unsigned int pri)
@@ -1388,7 +1388,7 @@ void mpic_irq_set_priority(unsigned int irq, unsigned int pri)
if (!mpic)
return;
- spin_lock_irqsave(&mpic_lock, flags);
+ raw_spin_lock_irqsave(&mpic_lock, flags);
if (mpic_is_ipi(mpic, irq)) {
reg = mpic_ipi_read(src - mpic->ipi_vecs[0]) &
~MPIC_VECPRI_PRIORITY_MASK;
@@ -1400,7 +1400,7 @@ void mpic_irq_set_priority(unsigned int irq, unsigned int pri)
mpic_irq_write(src, MPIC_INFO(IRQ_VECTOR_PRI),
reg | (pri << MPIC_VECPRI_PRIORITY_SHIFT));
}
- spin_unlock_irqrestore(&mpic_lock, flags);
+ raw_spin_unlock_irqrestore(&mpic_lock, flags);
}
void mpic_setup_this_cpu(void)
@@ -1415,7 +1415,7 @@ void mpic_setup_this_cpu(void)
DBG("%s: setup_this_cpu(%d)\n", mpic->name, hard_smp_processor_id());
- spin_lock_irqsave(&mpic_lock, flags);
+ raw_spin_lock_irqsave(&mpic_lock, flags);
/* let the mpic know we want intrs. default affinity is 0xffffffff
* until changed via /proc. That's how it's done on x86. If we want
@@ -1431,7 +1431,7 @@ void mpic_setup_this_cpu(void)
/* Set current processor priority to 0 */
mpic_cpu_write(MPIC_INFO(CPU_CURRENT_TASK_PRI), 0);
- spin_unlock_irqrestore(&mpic_lock, flags);
+ raw_spin_unlock_irqrestore(&mpic_lock, flags);
#endif /* CONFIG_SMP */
}
@@ -1460,7 +1460,7 @@ void mpic_teardown_this_cpu(int secondary)
BUG_ON(mpic == NULL);
DBG("%s: teardown_this_cpu(%d)\n", mpic->name, hard_smp_processor_id());
- spin_lock_irqsave(&mpic_lock, flags);
+ raw_spin_lock_irqsave(&mpic_lock, flags);
/* let the mpic know we don't want intrs. */
for (i = 0; i < mpic->num_sources ; i++)
@@ -1474,7 +1474,7 @@ void mpic_teardown_this_cpu(int secondary)
*/
mpic_eoi(mpic);
- spin_unlock_irqrestore(&mpic_lock, flags);
+ raw_spin_unlock_irqrestore(&mpic_lock, flags);
}
--
1.7.1.1