| Subject: seqlock: Use seqcount | 
 | From: Thomas Gleixner <tglx@linutronix.de> | 
 | Date: Sat, 16 Jul 2011 18:40:26 +0200 | 
 |  | 
 | No point in having different implementations for the same thing. | 
 |  | 
 | Signed-off-by: Thomas Gleixner <tglx@linutronix.de> | 
 | --- | 
 |  include/linux/seqlock.h |  176 +++++++++++++++++++++++++----------------------- | 
 |  1 file changed, 93 insertions(+), 83 deletions(-) | 
 |  | 
 | Index: linux-stable/include/linux/seqlock.h | 
 | =================================================================== | 
 | --- linux-stable.orig/include/linux/seqlock.h | 
 | +++ linux-stable/include/linux/seqlock.h | 
 | @@ -30,81 +30,12 @@ | 
 |  #include <linux/preempt.h> | 
 |  #include <asm/processor.h> | 
 |   | 
 | -typedef struct { | 
 | -	unsigned sequence; | 
 | -	spinlock_t lock; | 
 | -} seqlock_t; | 
 | - | 
 | -/* | 
 | - * These macros triggered gcc-3.x compile-time problems.  We think these are | 
 | - * OK now.  Be cautious. | 
 | - */ | 
 | -#define __SEQLOCK_UNLOCKED(lockname) \ | 
 | -		 { 0, __SPIN_LOCK_UNLOCKED(lockname) } | 
 | - | 
 | -#define seqlock_init(x)					\ | 
 | -	do {						\ | 
 | -		(x)->sequence = 0;			\ | 
 | -		spin_lock_init(&(x)->lock);		\ | 
 | -	} while (0) | 
 | - | 
 | -#define DEFINE_SEQLOCK(x) \ | 
 | -		seqlock_t x = __SEQLOCK_UNLOCKED(x) | 
 | - | 
 | -/* Lock out other writers and update the count. | 
 | - * Acts like a normal spin_lock/unlock. | 
 | - * Don't need preempt_disable() because that is in the spin_lock already. | 
 | - */ | 
 | -static inline void write_seqlock(seqlock_t *sl) | 
 | -{ | 
 | -	spin_lock(&sl->lock); | 
 | -	++sl->sequence; | 
 | -	smp_wmb(); | 
 | -} | 
 | - | 
 | -static inline void write_sequnlock(seqlock_t *sl) | 
 | -{ | 
 | -	smp_wmb(); | 
 | -	sl->sequence++; | 
 | -	spin_unlock(&sl->lock); | 
 | -} | 
 | - | 
 | -/* Start of read calculation -- fetch last complete writer token */ | 
 | -static __always_inline unsigned read_seqbegin(const seqlock_t *sl) | 
 | -{ | 
 | -	unsigned ret; | 
 | - | 
 | -repeat: | 
 | -	ret = ACCESS_ONCE(sl->sequence); | 
 | -	if (unlikely(ret & 1)) { | 
 | -		cpu_relax(); | 
 | -		goto repeat; | 
 | -	} | 
 | -	smp_rmb(); | 
 | - | 
 | -	return ret; | 
 | -} | 
 | - | 
 | -/* | 
 | - * Test if reader processed invalid data. | 
 | - * | 
 | - * If sequence value changed then writer changed data while in section. | 
 | - */ | 
 | -static __always_inline int read_seqretry(const seqlock_t *sl, unsigned start) | 
 | -{ | 
 | -	smp_rmb(); | 
 | - | 
 | -	return unlikely(sl->sequence != start); | 
 | -} | 
 | - | 
 | - | 
 |  /* | 
 |   * Version using sequence counter only. | 
 |   * This can be used when code has its own mutex protecting the | 
 |   * updating starting before the write_seqcountbeqin() and ending | 
 |   * after the write_seqcount_end(). | 
 |   */ | 
 | - | 
 |  typedef struct seqcount { | 
 |  	unsigned sequence; | 
 |  } seqcount_t; | 
 | @@ -207,7 +138,6 @@ static inline int __read_seqcount_retry( | 
 |  static inline int read_seqcount_retry(const seqcount_t *s, unsigned start) | 
 |  { | 
 |  	smp_rmb(); | 
 | - | 
 |  	return __read_seqcount_retry(s, start); | 
 |  } | 
 |   | 
 | @@ -241,21 +171,101 @@ static inline void write_seqcount_barrie | 
 |  	s->sequence+=2; | 
 |  } | 
 |   | 
 | +typedef struct { | 
 | +	struct seqcount seqcount; | 
 | +	spinlock_t lock; | 
 | +} seqlock_t; | 
 | + | 
 |  /* | 
 | - * Possible sw/hw IRQ protected versions of the interfaces. | 
 | + * These macros triggered gcc-3.x compile-time problems.  We think these are | 
 | + * OK now.  Be cautious. | 
 |   */ | 
 | +#define __SEQLOCK_UNLOCKED(lockname)			\ | 
 | +	{						\ | 
 | +		.seqcount = SEQCNT_ZERO,		\ | 
 | +		.lock =	__SPIN_LOCK_UNLOCKED(lockname)	\ | 
 | +	} | 
 | + | 
 | +#define seqlock_init(x)					\ | 
 | +	do {						\ | 
 | +		seqcount_init(&(x)->seqcount);		\ | 
 | +		spin_lock_init(&(x)->lock);		\ | 
 | +	} while (0) | 
 | + | 
 | +#define DEFINE_SEQLOCK(x) \ | 
 | +		seqlock_t x = __SEQLOCK_UNLOCKED(x) | 
 | + | 
 | +/* | 
 | + * Read side functions for starting and finalizing a read side section. | 
 | + */ | 
 | +static inline unsigned read_seqbegin(const seqlock_t *sl) | 
 | +{ | 
 | +	return read_seqcount_begin(&sl->seqcount); | 
 | +} | 
 | + | 
 | +static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start) | 
 | +{ | 
 | +	return read_seqcount_retry(&sl->seqcount, start); | 
 | +} | 
 | + | 
 | +/* | 
 | + * Lock out other writers and update the count. | 
 | + * Acts like a normal spin_lock/unlock. | 
 | + * Don't need preempt_disable() because that is in the spin_lock already. | 
 | + */ | 
 | +static inline void write_seqlock(seqlock_t *sl) | 
 | +{ | 
 | +	spin_lock(&sl->lock); | 
 | +	write_seqcount_begin(&sl->seqcount); | 
 | +} | 
 | + | 
 | +static inline void write_sequnlock(seqlock_t *sl) | 
 | +{ | 
 | +	write_seqcount_end(&sl->seqcount); | 
 | +	spin_unlock(&sl->lock); | 
 | +} | 
 | + | 
 | +static inline void write_seqlock_bh(seqlock_t *sl) | 
 | +{ | 
 | +	spin_lock_bh(&sl->lock); | 
 | +	write_seqcount_begin(&sl->seqcount); | 
 | +} | 
 | + | 
 | +static inline void write_sequnlock_bh(seqlock_t *sl) | 
 | +{ | 
 | +	write_seqcount_end(&sl->seqcount); | 
 | +	spin_unlock_bh(&sl->lock); | 
 | +} | 
 | + | 
 | +static inline void write_seqlock_irq(seqlock_t *sl) | 
 | +{ | 
 | +	spin_lock_irq(&sl->lock); | 
 | +	write_seqcount_begin(&sl->seqcount); | 
 | +} | 
 | + | 
 | +static inline void write_sequnlock_irq(seqlock_t *sl) | 
 | +{ | 
 | +	write_seqcount_end(&sl->seqcount); | 
 | +	spin_unlock_irq(&sl->lock); | 
 | +} | 
 | + | 
 | +static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl) | 
 | +{ | 
 | +	unsigned long flags; | 
 | + | 
 | +	spin_lock_irqsave(&sl->lock, flags); | 
 | +	write_seqcount_begin(&sl->seqcount); | 
 | +	return flags; | 
 | +} | 
 | + | 
 |  #define write_seqlock_irqsave(lock, flags)				\ | 
 | -	do { local_irq_save(flags); write_seqlock(lock); } while (0) | 
 | -#define write_seqlock_irq(lock)						\ | 
 | -	do { local_irq_disable();   write_seqlock(lock); } while (0) | 
 | -#define write_seqlock_bh(lock)						\ | 
 | -        do { local_bh_disable();    write_seqlock(lock); } while (0) | 
 | - | 
 | -#define write_sequnlock_irqrestore(lock, flags)				\ | 
 | -	do { write_sequnlock(lock); local_irq_restore(flags); } while(0) | 
 | -#define write_sequnlock_irq(lock)					\ | 
 | -	do { write_sequnlock(lock); local_irq_enable(); } while(0) | 
 | -#define write_sequnlock_bh(lock)					\ | 
 | -	do { write_sequnlock(lock); local_bh_enable(); } while(0) | 
 | +	do { flags = __write_seqlock_irqsave(lock); } while (0) | 
 | + | 
 | +static inline void | 
 | +write_sequnlock_irqrestore(seqlock_t *sl, unsigned long flags) | 
 | +{ | 
 | +	write_seqcount_end(&sl->seqcount); | 
 | +	spin_unlock_irqrestore(&sl->lock, flags); | 
 | +} | 
 |   | 
 |  #endif /* __LINUX_SEQLOCK_H */ |