Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* $Id: semaphore.c,v 1.7 2001/04/18 21:06:05 davem Exp $ */ |
| 2 | |
| 3 | /* sparc32 semaphore implementation, based on i386 version */ |
| 4 | |
| 5 | #include <linux/sched.h> |
| 6 | #include <linux/errno.h> |
| 7 | #include <linux/init.h> |
| 8 | |
| 9 | #include <asm/semaphore.h> |
| 10 | |
| 11 | /* |
| 12 | * Semaphores are implemented using a two-way counter: |
| 13 | * The "count" variable is decremented for each process |
| 14 | * that tries to acquire the semaphore, while the "sleeping" |
| 15 | * variable is a count of such acquires. |
| 16 | * |
| 17 | * Notably, the inline "up()" and "down()" functions can |
| 18 | * efficiently test if they need to do any extra work (up |
| 19 | * needs to do something only if count was negative before |
| 20 | * the increment operation. |
| 21 | * |
| 22 | * "sleeping" and the contention routine ordering is |
| 23 | * protected by the semaphore spinlock. |
| 24 | * |
| 25 | * Note that these functions are only called when there is |
| 26 | * contention on the lock, and as such all this is the |
| 27 | * "non-critical" part of the whole semaphore business. The |
| 28 | * critical part is the inline stuff in <asm/semaphore.h> |
| 29 | * where we want to avoid any extra jumps and calls. |
| 30 | */ |
| 31 | |
| 32 | /* |
| 33 | * Logic: |
| 34 | * - only on a boundary condition do we need to care. When we go |
| 35 | * from a negative count to a non-negative, we wake people up. |
| 36 | * - when we go from a non-negative count to a negative do we |
| 37 | * (a) synchronize with the "sleeper" count and (b) make sure |
| 38 | * that we're on the wakeup list before we synchronize so that |
| 39 | * we cannot lose wakeup events. |
| 40 | */ |
| 41 | |
| 42 | void __up(struct semaphore *sem) |
| 43 | { |
| 44 | wake_up(&sem->wait); |
| 45 | } |
| 46 | |
| 47 | static DEFINE_SPINLOCK(semaphore_lock); |
| 48 | |
| 49 | void __sched __down(struct semaphore * sem) |
| 50 | { |
| 51 | struct task_struct *tsk = current; |
| 52 | DECLARE_WAITQUEUE(wait, tsk); |
| 53 | tsk->state = TASK_UNINTERRUPTIBLE; |
| 54 | add_wait_queue_exclusive(&sem->wait, &wait); |
| 55 | |
| 56 | spin_lock_irq(&semaphore_lock); |
| 57 | sem->sleepers++; |
| 58 | for (;;) { |
| 59 | int sleepers = sem->sleepers; |
| 60 | |
| 61 | /* |
| 62 | * Add "everybody else" into it. They aren't |
| 63 | * playing, because we own the spinlock. |
| 64 | */ |
| 65 | if (!atomic24_add_negative(sleepers - 1, &sem->count)) { |
| 66 | sem->sleepers = 0; |
| 67 | break; |
| 68 | } |
| 69 | sem->sleepers = 1; /* us - see -1 above */ |
| 70 | spin_unlock_irq(&semaphore_lock); |
| 71 | |
| 72 | schedule(); |
| 73 | tsk->state = TASK_UNINTERRUPTIBLE; |
| 74 | spin_lock_irq(&semaphore_lock); |
| 75 | } |
| 76 | spin_unlock_irq(&semaphore_lock); |
| 77 | remove_wait_queue(&sem->wait, &wait); |
| 78 | tsk->state = TASK_RUNNING; |
| 79 | wake_up(&sem->wait); |
| 80 | } |
| 81 | |
| 82 | int __sched __down_interruptible(struct semaphore * sem) |
| 83 | { |
| 84 | int retval = 0; |
| 85 | struct task_struct *tsk = current; |
| 86 | DECLARE_WAITQUEUE(wait, tsk); |
| 87 | tsk->state = TASK_INTERRUPTIBLE; |
| 88 | add_wait_queue_exclusive(&sem->wait, &wait); |
| 89 | |
| 90 | spin_lock_irq(&semaphore_lock); |
| 91 | sem->sleepers ++; |
| 92 | for (;;) { |
| 93 | int sleepers = sem->sleepers; |
| 94 | |
| 95 | /* |
| 96 | * With signals pending, this turns into |
| 97 | * the trylock failure case - we won't be |
| 98 | * sleeping, and we* can't get the lock as |
| 99 | * it has contention. Just correct the count |
| 100 | * and exit. |
| 101 | */ |
| 102 | if (signal_pending(current)) { |
| 103 | retval = -EINTR; |
| 104 | sem->sleepers = 0; |
| 105 | atomic24_add(sleepers, &sem->count); |
| 106 | break; |
| 107 | } |
| 108 | |
| 109 | /* |
| 110 | * Add "everybody else" into it. They aren't |
| 111 | * playing, because we own the spinlock. The |
| 112 | * "-1" is because we're still hoping to get |
| 113 | * the lock. |
| 114 | */ |
| 115 | if (!atomic24_add_negative(sleepers - 1, &sem->count)) { |
| 116 | sem->sleepers = 0; |
| 117 | break; |
| 118 | } |
| 119 | sem->sleepers = 1; /* us - see -1 above */ |
| 120 | spin_unlock_irq(&semaphore_lock); |
| 121 | |
| 122 | schedule(); |
| 123 | tsk->state = TASK_INTERRUPTIBLE; |
| 124 | spin_lock_irq(&semaphore_lock); |
| 125 | } |
| 126 | spin_unlock_irq(&semaphore_lock); |
| 127 | tsk->state = TASK_RUNNING; |
| 128 | remove_wait_queue(&sem->wait, &wait); |
| 129 | wake_up(&sem->wait); |
| 130 | return retval; |
| 131 | } |
| 132 | |
| 133 | /* |
| 134 | * Trylock failed - make sure we correct for |
| 135 | * having decremented the count. |
| 136 | */ |
| 137 | int __down_trylock(struct semaphore * sem) |
| 138 | { |
| 139 | int sleepers; |
| 140 | unsigned long flags; |
| 141 | |
| 142 | spin_lock_irqsave(&semaphore_lock, flags); |
| 143 | sleepers = sem->sleepers + 1; |
| 144 | sem->sleepers = 0; |
| 145 | |
| 146 | /* |
| 147 | * Add "everybody else" and us into it. They aren't |
| 148 | * playing, because we own the spinlock. |
| 149 | */ |
| 150 | if (!atomic24_add_negative(sleepers, &sem->count)) |
| 151 | wake_up(&sem->wait); |
| 152 | |
| 153 | spin_unlock_irqrestore(&semaphore_lock, flags); |
| 154 | return 1; |
| 155 | } |