| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
|  | 2 | *  Generic semaphore code. Buyer beware. Do your own | 
|  | 3 | * specific changes in <asm/semaphore-helper.h> | 
|  | 4 | */ | 
|  | 5 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6 | #include <linux/sched.h> | 
|  | 7 | #include <linux/init.h> | 
|  | 8 | #include <asm/semaphore-helper.h> | 
|  | 9 |  | 
|  | 10 | #ifndef CONFIG_RMW_INSNS | 
|  | 11 | spinlock_t semaphore_wake_lock; | 
|  | 12 | #endif | 
|  | 13 |  | 
|  | 14 | /* | 
|  | 15 | * Semaphores are implemented using a two-way counter: | 
|  | 16 | * The "count" variable is decremented for each process | 
|  | 17 | * that tries to sleep, while the "waking" variable is | 
|  | 18 | * incremented when the "up()" code goes to wake up waiting | 
|  | 19 | * processes. | 
|  | 20 | * | 
|  | 21 | * Notably, the inline "up()" and "down()" functions can | 
|  | 22 | * efficiently test if they need to do any extra work (up | 
|  | 23 | * needs to do something only if count was negative before | 
|  | 24 | * the increment operation. | 
|  | 25 | * | 
|  | 26 | * waking_non_zero() (from asm/semaphore.h) must execute | 
|  | 27 | * atomically. | 
|  | 28 | * | 
|  | 29 | * When __up() is called, the count was negative before | 
|  | 30 | * incrementing it, and we need to wake up somebody. | 
|  | 31 | * | 
|  | 32 | * This routine adds one to the count of processes that need to | 
|  | 33 | * wake up and exit.  ALL waiting processes actually wake up but | 
|  | 34 | * only the one that gets to the "waking" field first will gate | 
|  | 35 | * through and acquire the semaphore.  The others will go back | 
|  | 36 | * to sleep. | 
|  | 37 | * | 
|  | 38 | * Note that these functions are only called when there is | 
|  | 39 | * contention on the lock, and as such all this is the | 
|  | 40 | * "non-critical" part of the whole semaphore business. The | 
|  | 41 | * critical part is the inline stuff in <asm/semaphore.h> | 
|  | 42 | * where we want to avoid any extra jumps and calls. | 
|  | 43 | */ | 
|  | 44 | void __up(struct semaphore *sem) | 
|  | 45 | { | 
|  | 46 | wake_one_more(sem); | 
|  | 47 | wake_up(&sem->wait); | 
|  | 48 | } | 
|  | 49 |  | 
|  | 50 | /* | 
|  | 51 | * Perform the "down" function.  Return zero for semaphore acquired, | 
|  | 52 | * return negative for signalled out of the function. | 
|  | 53 | * | 
|  | 54 | * If called from __down, the return is ignored and the wait loop is | 
|  | 55 | * not interruptible.  This means that a task waiting on a semaphore | 
|  | 56 | * using "down()" cannot be killed until someone does an "up()" on | 
|  | 57 | * the semaphore. | 
|  | 58 | * | 
|  | 59 | * If called from __down_interruptible, the return value gets checked | 
|  | 60 | * upon return.  If the return value is negative then the task continues | 
|  | 61 | * with the negative value in the return register (it can be tested by | 
|  | 62 | * the caller). | 
|  | 63 | * | 
|  | 64 | * Either form may be used in conjunction with "up()". | 
|  | 65 | * | 
|  | 66 | */ | 
|  | 67 |  | 
|  | 68 |  | 
|  | 69 | #define DOWN_HEAD(task_state)						\ | 
|  | 70 | \ | 
|  | 71 | \ | 
|  | 72 | current->state = (task_state);					\ | 
|  | 73 | add_wait_queue(&sem->wait, &wait);				\ | 
|  | 74 | \ | 
|  | 75 | /*								\ | 
|  | 76 | * Ok, we're set up.  sem->count is known to be less than zero	\ | 
|  | 77 | * so we must wait.						\ | 
|  | 78 | *								\ | 
|  | 79 | * We can let go the lock for purposes of waiting.		\ | 
|  | 80 | * We re-acquire it after awaking so as to protect		\ | 
|  | 81 | * all semaphore operations.					\ | 
|  | 82 | *								\ | 
|  | 83 | * If "up()" is called before we call waking_non_zero() then	\ | 
|  | 84 | * we will catch it right away.  If it is called later then	\ | 
|  | 85 | * we will have to go through a wakeup cycle to catch it.	\ | 
|  | 86 | *								\ | 
|  | 87 | * Multiple waiters contend for the semaphore lock to see	\ | 
|  | 88 | * who gets to gate through and who has to wait some more.	\ | 
|  | 89 | */								\ | 
|  | 90 | for (;;) { | 
|  | 91 |  | 
|  | 92 | #define DOWN_TAIL(task_state)			\ | 
|  | 93 | current->state = (task_state);	\ | 
|  | 94 | }					\ | 
|  | 95 | current->state = TASK_RUNNING;		\ | 
|  | 96 | remove_wait_queue(&sem->wait, &wait); | 
|  | 97 |  | 
|  | 98 | void __sched __down(struct semaphore * sem) | 
|  | 99 | { | 
|  | 100 | DECLARE_WAITQUEUE(wait, current); | 
|  | 101 |  | 
|  | 102 | DOWN_HEAD(TASK_UNINTERRUPTIBLE) | 
|  | 103 | if (waking_non_zero(sem)) | 
|  | 104 | break; | 
|  | 105 | schedule(); | 
|  | 106 | DOWN_TAIL(TASK_UNINTERRUPTIBLE) | 
|  | 107 | } | 
|  | 108 |  | 
|  | 109 | int __sched __down_interruptible(struct semaphore * sem) | 
|  | 110 | { | 
|  | 111 | DECLARE_WAITQUEUE(wait, current); | 
|  | 112 | int ret = 0; | 
|  | 113 |  | 
|  | 114 | DOWN_HEAD(TASK_INTERRUPTIBLE) | 
|  | 115 |  | 
|  | 116 | ret = waking_non_zero_interruptible(sem, current); | 
|  | 117 | if (ret) | 
|  | 118 | { | 
|  | 119 | if (ret == 1) | 
|  | 120 | /* ret != 0 only if we get interrupted -arca */ | 
|  | 121 | ret = 0; | 
|  | 122 | break; | 
|  | 123 | } | 
|  | 124 | schedule(); | 
|  | 125 | DOWN_TAIL(TASK_INTERRUPTIBLE) | 
|  | 126 | return ret; | 
|  | 127 | } | 
|  | 128 |  | 
|  | 129 | int __down_trylock(struct semaphore * sem) | 
|  | 130 | { | 
|  | 131 | return waking_non_zero_trylock(sem); | 
|  | 132 | } |