diff options
| author | Samuel Thibault <samuel.thibault@ens-lyon.org> | 2012-02-19 06:14:24 +0000 |
|---|---|---|
| committer | Samuel Thibault <samuel.thibault@ens-lyon.org> | 2012-02-19 06:14:24 +0000 |
| commit | 6fafeb146e9efd59140ea58cebd7dd38ae9a6379 (patch) | |
| tree | 7db89ba6a28932514b105d620bba4884ec332ec3 /libdde_linux26/contrib/include/linux/bit_spinlock.h | |
| parent | 38c2c2458e3f4ecb329ff35621806252aac209b9 (diff) | |
| parent | 8df772b3c665e663f6f9d2a70f9c691590bd3f91 (diff) | |
Merge branch 'dde' into upstream-merged
Diffstat (limited to 'libdde_linux26/contrib/include/linux/bit_spinlock.h')
| -rw-r--r-- | libdde_linux26/contrib/include/linux/bit_spinlock.h | 95 |
1 files changed, 95 insertions, 0 deletions
diff --git a/libdde_linux26/contrib/include/linux/bit_spinlock.h b/libdde_linux26/contrib/include/linux/bit_spinlock.h new file mode 100644 index 00000000..7113a32a --- /dev/null +++ b/libdde_linux26/contrib/include/linux/bit_spinlock.h @@ -0,0 +1,95 @@ +#ifndef __LINUX_BIT_SPINLOCK_H +#define __LINUX_BIT_SPINLOCK_H + +/* + * bit-based spin_lock() + * + * Don't use this unless you really need to: spin_lock() and spin_unlock() + * are significantly faster. + */ +static inline void bit_spin_lock(int bitnum, unsigned long *addr) +{ + /* + * Assuming the lock is uncontended, this never enters + * the body of the outer loop. If it is contended, then + * within the inner loop a non-atomic test is used to + * busywait with less bus contention for a good time to + * attempt to acquire the lock bit. + */ + preempt_disable(); +#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) + while (unlikely(test_and_set_bit_lock(bitnum, addr))) { + while (test_bit(bitnum, addr)) { + preempt_enable(); + cpu_relax(); + preempt_disable(); + } + } +#endif + __acquire(bitlock); +} + +/* + * Return true if it was acquired + */ +static inline int bit_spin_trylock(int bitnum, unsigned long *addr) +{ + preempt_disable(); +#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) + if (unlikely(test_and_set_bit_lock(bitnum, addr))) { + preempt_enable(); + return 0; + } +#endif + __acquire(bitlock); + return 1; +} + +/* + * bit-based spin_unlock() + */ +static inline void bit_spin_unlock(int bitnum, unsigned long *addr) +{ +#ifdef CONFIG_DEBUG_SPINLOCK + BUG_ON(!test_bit(bitnum, addr)); +#endif +#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) + clear_bit_unlock(bitnum, addr); +#endif + preempt_enable(); + __release(bitlock); +} + +/* + * bit-based spin_unlock() + * non-atomic version, which can be used eg. if the bit lock itself is + * protecting the rest of the flags in the word. + */ +static inline void __bit_spin_unlock(int bitnum, unsigned long *addr) +{ +#ifdef CONFIG_DEBUG_SPINLOCK + BUG_ON(!test_bit(bitnum, addr)); +#endif +#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) + __clear_bit_unlock(bitnum, addr); +#endif + preempt_enable(); + __release(bitlock); +} + +/* + * Return true if the lock is held. + */ +static inline int bit_spin_is_locked(int bitnum, unsigned long *addr) +{ +#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) + return test_bit(bitnum, addr); +#elif defined CONFIG_PREEMPT + return preempt_count(); +#else + return 1; +#endif +} + +#endif /* __LINUX_BIT_SPINLOCK_H */ + |
