|  | /* rwsem-spinlock.c: R/W semaphores: contention handling functions for | 
|  | * generic spinlock implementation | 
|  | * | 
|  | * Copyright (c) 2001   David Howells (dhowells@redhat.com). | 
|  | * - Derived partially from idea by Andrea Arcangeli <andrea@suse.de> | 
|  | * - Derived also from comments by Linus | 
|  | */ | 
|  | #include <linux/rwsem.h> | 
|  | #include <linux/sched.h> | 
|  | #include <linux/module.h> | 
|  |  | 
|  | struct rwsem_waiter { | 
|  | struct list_head list; | 
|  | struct task_struct *task; | 
|  | unsigned int flags; | 
|  | #define RWSEM_WAITING_FOR_READ	0x00000001 | 
|  | #define RWSEM_WAITING_FOR_WRITE	0x00000002 | 
|  | }; | 
|  |  | 
|  | /* | 
|  | * initialise the semaphore | 
|  | */ | 
|  | void __init_rwsem(struct rw_semaphore *sem, const char *name, | 
|  | struct lock_class_key *key) | 
|  | { | 
|  | #ifdef CONFIG_DEBUG_LOCK_ALLOC | 
|  | /* | 
|  | * Make sure we are not reinitializing a held semaphore: | 
|  | */ | 
|  | debug_check_no_locks_freed((void *)sem, sizeof(*sem)); | 
|  | lockdep_init_map(&sem->dep_map, name, key); | 
|  | #endif | 
|  | sem->activity = 0; | 
|  | spin_lock_init(&sem->wait_lock); | 
|  | INIT_LIST_HEAD(&sem->wait_list); | 
|  | } | 
|  |  | 
|  | /* | 
|  | * handle the lock release when processes blocked on it that can now run | 
|  | * - if we come here, then: | 
|  | *   - the 'active count' _reached_ zero | 
|  | *   - the 'waiting count' is non-zero | 
|  | * - the spinlock must be held by the caller | 
|  | * - woken process blocks are discarded from the list after having task zeroed | 
|  | * - writers are only woken if wakewrite is non-zero | 
|  | */ | 
|  | static inline struct rw_semaphore * | 
|  | __rwsem_do_wake(struct rw_semaphore *sem, int wakewrite) | 
|  | { | 
|  | struct rwsem_waiter *waiter; | 
|  | struct task_struct *tsk; | 
|  | int woken; | 
|  |  | 
|  | waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list); | 
|  |  | 
|  | if (!wakewrite) { | 
|  | if (waiter->flags & RWSEM_WAITING_FOR_WRITE) | 
|  | goto out; | 
|  | goto dont_wake_writers; | 
|  | } | 
|  |  | 
|  | /* if we are allowed to wake writers try to grant a single write lock | 
|  | * if there's a writer at the front of the queue | 
|  | * - we leave the 'waiting count' incremented to signify potential | 
|  | *   contention | 
|  | */ | 
|  | if (waiter->flags & RWSEM_WAITING_FOR_WRITE) { | 
|  | sem->activity = -1; | 
|  | list_del(&waiter->list); | 
|  | tsk = waiter->task; | 
|  | /* Don't touch waiter after ->task has been NULLed */ | 
|  | smp_mb(); | 
|  | waiter->task = NULL; | 
|  | wake_up_process(tsk); | 
|  | put_task_struct(tsk); | 
|  | goto out; | 
|  | } | 
|  |  | 
|  | /* grant an infinite number of read locks to the front of the queue */ | 
|  | dont_wake_writers: | 
|  | woken = 0; | 
|  | while (waiter->flags & RWSEM_WAITING_FOR_READ) { | 
|  | struct list_head *next = waiter->list.next; | 
|  |  | 
|  | list_del(&waiter->list); | 
|  | tsk = waiter->task; | 
|  | smp_mb(); | 
|  | waiter->task = NULL; | 
|  | wake_up_process(tsk); | 
|  | put_task_struct(tsk); | 
|  | woken++; | 
|  | if (list_empty(&sem->wait_list)) | 
|  | break; | 
|  | waiter = list_entry(next, struct rwsem_waiter, list); | 
|  | } | 
|  |  | 
|  | sem->activity += woken; | 
|  |  | 
|  | out: | 
|  | return sem; | 
|  | } | 
|  |  | 
|  | /* | 
|  | * wake a single writer | 
|  | */ | 
|  | static inline struct rw_semaphore * | 
|  | __rwsem_wake_one_writer(struct rw_semaphore *sem) | 
|  | { | 
|  | struct rwsem_waiter *waiter; | 
|  | struct task_struct *tsk; | 
|  |  | 
|  | sem->activity = -1; | 
|  |  | 
|  | waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list); | 
|  | list_del(&waiter->list); | 
|  |  | 
|  | tsk = waiter->task; | 
|  | smp_mb(); | 
|  | waiter->task = NULL; | 
|  | wake_up_process(tsk); | 
|  | put_task_struct(tsk); | 
|  | return sem; | 
|  | } | 
|  |  | 
|  | /* | 
|  | * get a read lock on the semaphore | 
|  | */ | 
|  | void fastcall __sched __down_read(struct rw_semaphore *sem) | 
|  | { | 
|  | struct rwsem_waiter waiter; | 
|  | struct task_struct *tsk; | 
|  |  | 
|  | spin_lock_irq(&sem->wait_lock); | 
|  |  | 
|  | if (sem->activity >= 0 && list_empty(&sem->wait_list)) { | 
|  | /* granted */ | 
|  | sem->activity++; | 
|  | spin_unlock_irq(&sem->wait_lock); | 
|  | goto out; | 
|  | } | 
|  |  | 
|  | tsk = current; | 
|  | set_task_state(tsk, TASK_UNINTERRUPTIBLE); | 
|  |  | 
|  | /* set up my own style of waitqueue */ | 
|  | waiter.task = tsk; | 
|  | waiter.flags = RWSEM_WAITING_FOR_READ; | 
|  | get_task_struct(tsk); | 
|  |  | 
|  | list_add_tail(&waiter.list, &sem->wait_list); | 
|  |  | 
|  | /* we don't need to touch the semaphore struct anymore */ | 
|  | spin_unlock_irq(&sem->wait_lock); | 
|  |  | 
|  | /* wait to be given the lock */ | 
|  | for (;;) { | 
|  | if (!waiter.task) | 
|  | break; | 
|  | schedule(); | 
|  | set_task_state(tsk, TASK_UNINTERRUPTIBLE); | 
|  | } | 
|  |  | 
|  | tsk->state = TASK_RUNNING; | 
|  | out: | 
|  | ; | 
|  | } | 
|  |  | 
|  | /* | 
|  | * trylock for reading -- returns 1 if successful, 0 if contention | 
|  | */ | 
|  | int fastcall __down_read_trylock(struct rw_semaphore *sem) | 
|  | { | 
|  | unsigned long flags; | 
|  | int ret = 0; | 
|  |  | 
|  |  | 
|  | spin_lock_irqsave(&sem->wait_lock, flags); | 
|  |  | 
|  | if (sem->activity >= 0 && list_empty(&sem->wait_list)) { | 
|  | /* granted */ | 
|  | sem->activity++; | 
|  | ret = 1; | 
|  | } | 
|  |  | 
|  | spin_unlock_irqrestore(&sem->wait_lock, flags); | 
|  |  | 
|  | return ret; | 
|  | } | 
|  |  | 
|  | /* | 
|  | * get a write lock on the semaphore | 
|  | * - we increment the waiting count anyway to indicate an exclusive lock | 
|  | */ | 
|  | void fastcall __sched __down_write_nested(struct rw_semaphore *sem, int subclass) | 
|  | { | 
|  | struct rwsem_waiter waiter; | 
|  | struct task_struct *tsk; | 
|  |  | 
|  | spin_lock_irq(&sem->wait_lock); | 
|  |  | 
|  | if (sem->activity == 0 && list_empty(&sem->wait_list)) { | 
|  | /* granted */ | 
|  | sem->activity = -1; | 
|  | spin_unlock_irq(&sem->wait_lock); | 
|  | goto out; | 
|  | } | 
|  |  | 
|  | tsk = current; | 
|  | set_task_state(tsk, TASK_UNINTERRUPTIBLE); | 
|  |  | 
|  | /* set up my own style of waitqueue */ | 
|  | waiter.task = tsk; | 
|  | waiter.flags = RWSEM_WAITING_FOR_WRITE; | 
|  | get_task_struct(tsk); | 
|  |  | 
|  | list_add_tail(&waiter.list, &sem->wait_list); | 
|  |  | 
|  | /* we don't need to touch the semaphore struct anymore */ | 
|  | spin_unlock_irq(&sem->wait_lock); | 
|  |  | 
|  | /* wait to be given the lock */ | 
|  | for (;;) { | 
|  | if (!waiter.task) | 
|  | break; | 
|  | schedule(); | 
|  | set_task_state(tsk, TASK_UNINTERRUPTIBLE); | 
|  | } | 
|  |  | 
|  | tsk->state = TASK_RUNNING; | 
|  | out: | 
|  | ; | 
|  | } | 
|  |  | 
|  | void fastcall __sched __down_write(struct rw_semaphore *sem) | 
|  | { | 
|  | __down_write_nested(sem, 0); | 
|  | } | 
|  |  | 
|  | /* | 
|  | * trylock for writing -- returns 1 if successful, 0 if contention | 
|  | */ | 
|  | int fastcall __down_write_trylock(struct rw_semaphore *sem) | 
|  | { | 
|  | unsigned long flags; | 
|  | int ret = 0; | 
|  |  | 
|  | spin_lock_irqsave(&sem->wait_lock, flags); | 
|  |  | 
|  | if (sem->activity == 0 && list_empty(&sem->wait_list)) { | 
|  | /* granted */ | 
|  | sem->activity = -1; | 
|  | ret = 1; | 
|  | } | 
|  |  | 
|  | spin_unlock_irqrestore(&sem->wait_lock, flags); | 
|  |  | 
|  | return ret; | 
|  | } | 
|  |  | 
|  | /* | 
|  | * release a read lock on the semaphore | 
|  | */ | 
|  | void fastcall __up_read(struct rw_semaphore *sem) | 
|  | { | 
|  | unsigned long flags; | 
|  |  | 
|  | spin_lock_irqsave(&sem->wait_lock, flags); | 
|  |  | 
|  | if (--sem->activity == 0 && !list_empty(&sem->wait_list)) | 
|  | sem = __rwsem_wake_one_writer(sem); | 
|  |  | 
|  | spin_unlock_irqrestore(&sem->wait_lock, flags); | 
|  | } | 
|  |  | 
|  | /* | 
|  | * release a write lock on the semaphore | 
|  | */ | 
|  | void fastcall __up_write(struct rw_semaphore *sem) | 
|  | { | 
|  | unsigned long flags; | 
|  |  | 
|  | spin_lock_irqsave(&sem->wait_lock, flags); | 
|  |  | 
|  | sem->activity = 0; | 
|  | if (!list_empty(&sem->wait_list)) | 
|  | sem = __rwsem_do_wake(sem, 1); | 
|  |  | 
|  | spin_unlock_irqrestore(&sem->wait_lock, flags); | 
|  | } | 
|  |  | 
|  | /* | 
|  | * downgrade a write lock into a read lock | 
|  | * - just wake up any readers at the front of the queue | 
|  | */ | 
|  | void fastcall __downgrade_write(struct rw_semaphore *sem) | 
|  | { | 
|  | unsigned long flags; | 
|  |  | 
|  | spin_lock_irqsave(&sem->wait_lock, flags); | 
|  |  | 
|  | sem->activity = 1; | 
|  | if (!list_empty(&sem->wait_list)) | 
|  | sem = __rwsem_do_wake(sem, 0); | 
|  |  | 
|  | spin_unlock_irqrestore(&sem->wait_lock, flags); | 
|  | } | 
|  |  | 
|  | EXPORT_SYMBOL(__init_rwsem); | 
|  | EXPORT_SYMBOL(__down_read); | 
|  | EXPORT_SYMBOL(__down_read_trylock); | 
|  | EXPORT_SYMBOL(__down_write_nested); | 
|  | EXPORT_SYMBOL(__down_write); | 
|  | EXPORT_SYMBOL(__down_write_trylock); | 
|  | EXPORT_SYMBOL(__up_read); | 
|  | EXPORT_SYMBOL(__up_write); | 
|  | EXPORT_SYMBOL(__downgrade_write); |