| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
|  | 2 | * Just taken from alpha implementation. | 
|  | 3 | * This can't work well, perhaps. | 
|  | 4 | */ | 
|  | 5 | /* | 
|  | 6 | *  Generic semaphore code. Buyer beware. Do your own | 
|  | 7 | * specific changes in <asm/semaphore-helper.h> | 
|  | 8 | */ | 
|  | 9 |  | 
|  | 10 | #include <linux/errno.h> | 
|  | 11 | #include <linux/sched.h> | 
|  | 12 | #include <linux/wait.h> | 
|  | 13 | #include <linux/init.h> | 
|  | 14 | #include <asm/semaphore.h> | 
|  | 15 | #include <asm/semaphore-helper.h> | 
|  | 16 |  | 
|  | 17 | spinlock_t semaphore_wake_lock; | 
|  | 18 |  | 
|  | 19 | /* | 
|  | 20 | * Semaphores are implemented using a two-way counter: | 
|  | 21 | * The "count" variable is decremented for each process | 
|  | 22 | * that tries to sleep, while the "waking" variable is | 
|  | 23 | * incremented when the "up()" code goes to wake up waiting | 
|  | 24 | * processes. | 
|  | 25 | * | 
|  | 26 | * Notably, the inline "up()" and "down()" functions can | 
|  | 27 | * efficiently test if they need to do any extra work (up | 
|  | 28 | * needs to do something only if count was negative before | 
|  | 29 | * the increment operation. | 
|  | 30 | * | 
|  | 31 | * waking_non_zero() (from asm/semaphore.h) must execute | 
|  | 32 | * atomically. | 
|  | 33 | * | 
|  | 34 | * When __up() is called, the count was negative before | 
|  | 35 | * incrementing it, and we need to wake up somebody. | 
|  | 36 | * | 
|  | 37 | * This routine adds one to the count of processes that need to | 
|  | 38 | * wake up and exit.  ALL waiting processes actually wake up but | 
|  | 39 | * only the one that gets to the "waking" field first will gate | 
|  | 40 | * through and acquire the semaphore.  The others will go back | 
|  | 41 | * to sleep. | 
|  | 42 | * | 
|  | 43 | * Note that these functions are only called when there is | 
|  | 44 | * contention on the lock, and as such all this is the | 
|  | 45 | * "non-critical" part of the whole semaphore business. The | 
|  | 46 | * critical part is the inline stuff in <asm/semaphore.h> | 
|  | 47 | * where we want to avoid any extra jumps and calls. | 
|  | 48 | */ | 
|  | 49 | void __up(struct semaphore *sem) | 
|  | 50 | { | 
|  | 51 | wake_one_more(sem); | 
|  | 52 | wake_up(&sem->wait); | 
|  | 53 | } | 
|  | 54 |  | 
|  | 55 | /* | 
|  | 56 | * Perform the "down" function.  Return zero for semaphore acquired, | 
|  | 57 | * return negative for signalled out of the function. | 
|  | 58 | * | 
|  | 59 | * If called from __down, the return is ignored and the wait loop is | 
|  | 60 | * not interruptible.  This means that a task waiting on a semaphore | 
|  | 61 | * using "down()" cannot be killed until someone does an "up()" on | 
|  | 62 | * the semaphore. | 
|  | 63 | * | 
|  | 64 | * If called from __down_interruptible, the return value gets checked | 
|  | 65 | * upon return.  If the return value is negative then the task continues | 
|  | 66 | * with the negative value in the return register (it can be tested by | 
|  | 67 | * the caller). | 
|  | 68 | * | 
|  | 69 | * Either form may be used in conjunction with "up()". | 
|  | 70 | * | 
|  | 71 | */ | 
|  | 72 |  | 
|  | 73 | #define DOWN_VAR				\ | 
|  | 74 | struct task_struct *tsk = current;	\ | 
|  | 75 | wait_queue_t wait;			\ | 
|  | 76 | init_waitqueue_entry(&wait, tsk); | 
|  | 77 |  | 
|  | 78 | #define DOWN_HEAD(task_state)						\ | 
|  | 79 | \ | 
|  | 80 | \ | 
|  | 81 | tsk->state = (task_state);					\ | 
|  | 82 | add_wait_queue(&sem->wait, &wait);				\ | 
|  | 83 | \ | 
|  | 84 | /*								\ | 
|  | 85 | * Ok, we're set up.  sem->count is known to be less than zero	\ | 
|  | 86 | * so we must wait.						\ | 
|  | 87 | *								\ | 
|  | 88 | * We can let go the lock for purposes of waiting.		\ | 
|  | 89 | * We re-acquire it after awaking so as to protect		\ | 
|  | 90 | * all semaphore operations.					\ | 
|  | 91 | *								\ | 
|  | 92 | * If "up()" is called before we call waking_non_zero() then	\ | 
|  | 93 | * we will catch it right away.  If it is called later then	\ | 
|  | 94 | * we will have to go through a wakeup cycle to catch it.	\ | 
|  | 95 | *								\ | 
|  | 96 | * Multiple waiters contend for the semaphore lock to see	\ | 
|  | 97 | * who gets to gate through and who has to wait some more.	\ | 
|  | 98 | */								\ | 
|  | 99 | for (;;) { | 
|  | 100 |  | 
|  | 101 | #define DOWN_TAIL(task_state)			\ | 
|  | 102 | tsk->state = (task_state);	\ | 
|  | 103 | }					\ | 
|  | 104 | tsk->state = TASK_RUNNING;		\ | 
|  | 105 | remove_wait_queue(&sem->wait, &wait); | 
|  | 106 |  | 
|  | 107 | void __sched __down(struct semaphore * sem) | 
|  | 108 | { | 
|  | 109 | DOWN_VAR | 
|  | 110 | DOWN_HEAD(TASK_UNINTERRUPTIBLE) | 
|  | 111 | if (waking_non_zero(sem)) | 
|  | 112 | break; | 
|  | 113 | schedule(); | 
|  | 114 | DOWN_TAIL(TASK_UNINTERRUPTIBLE) | 
|  | 115 | } | 
|  | 116 |  | 
|  | 117 | int __sched __down_interruptible(struct semaphore * sem) | 
|  | 118 | { | 
|  | 119 | int ret = 0; | 
|  | 120 | DOWN_VAR | 
|  | 121 | DOWN_HEAD(TASK_INTERRUPTIBLE) | 
|  | 122 |  | 
|  | 123 | ret = waking_non_zero_interruptible(sem, tsk); | 
|  | 124 | if (ret) | 
|  | 125 | { | 
|  | 126 | if (ret == 1) | 
|  | 127 | /* ret != 0 only if we get interrupted -arca */ | 
|  | 128 | ret = 0; | 
|  | 129 | break; | 
|  | 130 | } | 
|  | 131 | schedule(); | 
|  | 132 | DOWN_TAIL(TASK_INTERRUPTIBLE) | 
|  | 133 | return ret; | 
|  | 134 | } | 
|  | 135 |  | 
|  | 136 | int __down_trylock(struct semaphore * sem) | 
|  | 137 | { | 
|  | 138 | return waking_non_zero_trylock(sem); | 
|  | 139 | } |