|  | /* | 
|  | * arch/v850/kernel/semaphore.c -- Semaphore support | 
|  | * | 
|  | *  Copyright (C) 1998-2000  IBM Corporation | 
|  | *  Copyright (C) 1999  Linus Torvalds | 
|  | * | 
|  | * This file is subject to the terms and conditions of the GNU General | 
|  | * Public License.  See the file COPYING in the main directory of this | 
|  | * archive for more details. | 
|  | * | 
|  | * This file is a copy of the s390 version, arch/s390/kernel/semaphore.c | 
|  | *    Author(s): Martin Schwidefsky | 
|  | * which was derived from the i386 version, linux/arch/i386/kernel/semaphore.c | 
|  | */ | 
|  |  | 
|  | #include <linux/errno.h> | 
|  | #include <linux/sched.h> | 
|  | #include <linux/init.h> | 
|  |  | 
|  | #include <asm/semaphore.h> | 
|  |  | 
|  | /* | 
|  | * Semaphores are implemented using a two-way counter: | 
|  | * The "count" variable is decremented for each process | 
|  | * that tries to acquire the semaphore, while the "sleeping" | 
|  | * variable is a count of such acquires. | 
|  | * | 
|  | * Notably, the inline "up()" and "down()" functions can | 
|  | * efficiently test if they need to do any extra work (up | 
|  | * needs to do something only if count was negative before | 
|  | * the increment operation. | 
|  | * | 
|  | * "sleeping" and the contention routine ordering is | 
|  | * protected by the semaphore spinlock. | 
|  | * | 
|  | * Note that these functions are only called when there is | 
|  | * contention on the lock, and as such all this is the | 
|  | * "non-critical" part of the whole semaphore business. The | 
|  | * critical part is the inline stuff in <asm/semaphore.h> | 
|  | * where we want to avoid any extra jumps and calls. | 
|  | */ | 
|  |  | 
|  | /* | 
|  | * Logic: | 
|  | *  - only on a boundary condition do we need to care. When we go | 
|  | *    from a negative count to a non-negative, we wake people up. | 
|  | *  - when we go from a non-negative count to a negative do we | 
|  | *    (a) synchronize with the "sleeper" count and (b) make sure | 
|  | *    that we're on the wakeup list before we synchronize so that | 
|  | *    we cannot lose wakeup events. | 
|  | */ | 
|  |  | 
|  | void __up(struct semaphore *sem) | 
|  | { | 
|  | wake_up(&sem->wait); | 
|  | } | 
|  |  | 
|  | static DEFINE_SPINLOCK(semaphore_lock); | 
|  |  | 
|  | void __sched __down(struct semaphore * sem) | 
|  | { | 
|  | struct task_struct *tsk = current; | 
|  | DECLARE_WAITQUEUE(wait, tsk); | 
|  | tsk->state = TASK_UNINTERRUPTIBLE; | 
|  | add_wait_queue_exclusive(&sem->wait, &wait); | 
|  |  | 
|  | spin_lock_irq(&semaphore_lock); | 
|  | sem->sleepers++; | 
|  | for (;;) { | 
|  | int sleepers = sem->sleepers; | 
|  |  | 
|  | /* | 
|  | * Add "everybody else" into it. They aren't | 
|  | * playing, because we own the spinlock. | 
|  | */ | 
|  | if (!atomic_add_negative(sleepers - 1, &sem->count)) { | 
|  | sem->sleepers = 0; | 
|  | break; | 
|  | } | 
|  | sem->sleepers = 1;	/* us - see -1 above */ | 
|  | spin_unlock_irq(&semaphore_lock); | 
|  |  | 
|  | schedule(); | 
|  | tsk->state = TASK_UNINTERRUPTIBLE; | 
|  | spin_lock_irq(&semaphore_lock); | 
|  | } | 
|  | spin_unlock_irq(&semaphore_lock); | 
|  | remove_wait_queue(&sem->wait, &wait); | 
|  | tsk->state = TASK_RUNNING; | 
|  | wake_up(&sem->wait); | 
|  | } | 
|  |  | 
|  | int __sched __down_interruptible(struct semaphore * sem) | 
|  | { | 
|  | int retval = 0; | 
|  | struct task_struct *tsk = current; | 
|  | DECLARE_WAITQUEUE(wait, tsk); | 
|  | tsk->state = TASK_INTERRUPTIBLE; | 
|  | add_wait_queue_exclusive(&sem->wait, &wait); | 
|  |  | 
|  | spin_lock_irq(&semaphore_lock); | 
|  | sem->sleepers ++; | 
|  | for (;;) { | 
|  | int sleepers = sem->sleepers; | 
|  |  | 
|  | /* | 
|  | * With signals pending, this turns into | 
|  | * the trylock failure case - we won't be | 
|  | * sleeping, and we* can't get the lock as | 
|  | * it has contention. Just correct the count | 
|  | * and exit. | 
|  | */ | 
|  | if (signal_pending(current)) { | 
|  | retval = -EINTR; | 
|  | sem->sleepers = 0; | 
|  | atomic_add(sleepers, &sem->count); | 
|  | break; | 
|  | } | 
|  |  | 
|  | /* | 
|  | * Add "everybody else" into it. They aren't | 
|  | * playing, because we own the spinlock. The | 
|  | * "-1" is because we're still hoping to get | 
|  | * the lock. | 
|  | */ | 
|  | if (!atomic_add_negative(sleepers - 1, &sem->count)) { | 
|  | sem->sleepers = 0; | 
|  | break; | 
|  | } | 
|  | sem->sleepers = 1;	/* us - see -1 above */ | 
|  | spin_unlock_irq(&semaphore_lock); | 
|  |  | 
|  | schedule(); | 
|  | tsk->state = TASK_INTERRUPTIBLE; | 
|  | spin_lock_irq(&semaphore_lock); | 
|  | } | 
|  | spin_unlock_irq(&semaphore_lock); | 
|  | tsk->state = TASK_RUNNING; | 
|  | remove_wait_queue(&sem->wait, &wait); | 
|  | wake_up(&sem->wait); | 
|  | return retval; | 
|  | } | 
|  |  | 
|  | /* | 
|  | * Trylock failed - make sure we correct for | 
|  | * having decremented the count. | 
|  | */ | 
|  | int __down_trylock(struct semaphore * sem) | 
|  | { | 
|  | unsigned long flags; | 
|  | int sleepers; | 
|  |  | 
|  | spin_lock_irqsave(&semaphore_lock, flags); | 
|  | sleepers = sem->sleepers + 1; | 
|  | sem->sleepers = 0; | 
|  |  | 
|  | /* | 
|  | * Add "everybody else" and us into it. They aren't | 
|  | * playing, because we own the spinlock. | 
|  | */ | 
|  | if (!atomic_add_negative(sleepers, &sem->count)) | 
|  | wake_up(&sem->wait); | 
|  |  | 
|  | spin_unlock_irqrestore(&semaphore_lock, flags); | 
|  | return 1; | 
|  | } |