| Yabin Cui | e7c2fff | 2015-11-05 22:06:09 -0800 | [diff] [blame] | 1 | /* | 
 | 2 |  * Copyright (C) 2015 The Android Open Source Project | 
 | 3 |  * All rights reserved. | 
 | 4 |  * | 
 | 5 |  * Redistribution and use in source and binary forms, with or without | 
 | 6 |  * modification, are permitted provided that the following conditions | 
 | 7 |  * are met: | 
 | 8 |  *  * Redistributions of source code must retain the above copyright | 
 | 9 |  *    notice, this list of conditions and the following disclaimer. | 
 | 10 |  *  * Redistributions in binary form must reproduce the above copyright | 
 | 11 |  *    notice, this list of conditions and the following disclaimer in | 
 | 12 |  *    the documentation and/or other materials provided with the | 
 | 13 |  *    distribution. | 
 | 14 |  * | 
 | 15 |  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | 
 | 16 |  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | 
 | 17 |  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS | 
 | 18 |  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE | 
 | 19 |  * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, | 
 | 20 |  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, | 
 | 21 |  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS | 
 | 22 |  * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED | 
 | 23 |  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, | 
 | 24 |  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT | 
 | 25 |  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF | 
 | 26 |  * SUCH DAMAGE. | 
 | 27 |  */ | 
 | 28 |  | 
 | 29 | #include <pthread.h> | 
 | 30 | #include <stdatomic.h> | 
 | 31 | #include <stdint.h> | 
 | 32 |  | 
 | 33 | #include "private/bionic_futex.h" | 
 | 34 |  | 
 | 35 | int pthread_barrierattr_init(pthread_barrierattr_t* attr) { | 
 | 36 |   *attr = 0; | 
 | 37 |   return 0; | 
 | 38 | } | 
 | 39 |  | 
 | 40 | int pthread_barrierattr_destroy(pthread_barrierattr_t* attr) { | 
 | 41 |   *attr = 0; | 
 | 42 |   return 0; | 
 | 43 | } | 
 | 44 |  | 
| Elliott Hughes | 02ac028 | 2016-08-11 23:18:13 -0700 | [diff] [blame] | 45 | int pthread_barrierattr_getpshared(const pthread_barrierattr_t* attr, int* pshared) { | 
| Yabin Cui | e7c2fff | 2015-11-05 22:06:09 -0800 | [diff] [blame] | 46 |   *pshared = (*attr & 1) ? PTHREAD_PROCESS_SHARED : PTHREAD_PROCESS_PRIVATE; | 
 | 47 |   return 0; | 
 | 48 | } | 
 | 49 |  | 
 | 50 | int pthread_barrierattr_setpshared(pthread_barrierattr_t* attr, int pshared) { | 
 | 51 |   if (pshared == PTHREAD_PROCESS_SHARED) { | 
 | 52 |     *attr |= 1; | 
 | 53 |   } else { | 
 | 54 |     *attr &= ~1; | 
 | 55 |   } | 
 | 56 |   return 0; | 
 | 57 | } | 
 | 58 |  | 
 | 59 | enum BarrierState { | 
 | 60 |   WAIT, | 
 | 61 |   RELEASE, | 
 | 62 | }; | 
 | 63 |  | 
 | 64 | struct pthread_barrier_internal_t { | 
 | 65 |   // One barrier can be used for unlimited number of cycles. In each cycle, [init_count] | 
 | 66 |   // threads must call pthread_barrier_wait() before any of them successfully return from | 
 | 67 |   // the call. It is undefined behavior if there are more than [init_count] threads call | 
 | 68 |   // pthread_barrier_wait() in one cycle. | 
 | 69 |   uint32_t init_count; | 
 | 70 |   // Barrier state. It is WAIT if waiting for more threads to enter the barrier in this cycle, | 
 | 71 |   // otherwise threads are leaving the barrier. | 
 | 72 |   _Atomic(BarrierState) state; | 
 | 73 |   // Number of threads having entered but not left the barrier in this cycle. | 
 | 74 |   atomic_uint wait_count; | 
 | 75 |   // Whether the barrier is shared across processes. | 
 | 76 |   bool pshared; | 
 | 77 |   uint32_t __reserved[4]; | 
 | 78 | }; | 
 | 79 |  | 
 | 80 | static_assert(sizeof(pthread_barrier_t) == sizeof(pthread_barrier_internal_t), | 
 | 81 |               "pthread_barrier_t should actually be pthread_barrier_internal_t in implementation." | 
 | 82 |               ); | 
 | 83 |  | 
 | 84 | static_assert(alignof(pthread_barrier_t) >= 4, | 
 | 85 |               "pthread_barrier_t should fulfill the alignment of pthread_barrier_internal_t."); | 
 | 86 |  | 
 | 87 | static inline pthread_barrier_internal_t* __get_internal_barrier(pthread_barrier_t* barrier) { | 
 | 88 |   return reinterpret_cast<pthread_barrier_internal_t*>(barrier); | 
 | 89 | } | 
 | 90 |  | 
 | 91 | int pthread_barrier_init(pthread_barrier_t* barrier_interface, const pthread_barrierattr_t* attr, | 
 | 92 |                          unsigned count) { | 
 | 93 |   pthread_barrier_internal_t* barrier = __get_internal_barrier(barrier_interface); | 
 | 94 |   if (count == 0) { | 
 | 95 |     return EINVAL; | 
 | 96 |   } | 
 | 97 |   barrier->init_count = count; | 
 | 98 |   atomic_init(&barrier->state, WAIT); | 
 | 99 |   atomic_init(&barrier->wait_count, 0); | 
 | 100 |   barrier->pshared = false; | 
 | 101 |   if (attr != nullptr && (*attr & 1)) { | 
 | 102 |     barrier->pshared = true; | 
 | 103 |   } | 
 | 104 |   return 0; | 
 | 105 | } | 
 | 106 |  | 
 | 107 | // According to POSIX standard, pthread_barrier_wait() synchronizes memory between participating | 
 | 108 | // threads. It means all memory operations made by participating threads before calling | 
 | 109 | // pthread_barrier_wait() can be seen by all participating threads after the function call. | 
 | 110 | // We establish this by making a happens-before relation between all threads entering the barrier | 
 | 111 | // with the last thread entering the barrier, and a happens-before relation between the last | 
 | 112 | // thread entering the barrier with all threads leaving the barrier. | 
 | 113 | int pthread_barrier_wait(pthread_barrier_t* barrier_interface) { | 
 | 114 |   pthread_barrier_internal_t* barrier = __get_internal_barrier(barrier_interface); | 
 | 115 |  | 
 | 116 |   // Wait until all threads for the previous cycle have left the barrier. This is needed | 
 | 117 |   // as a participating thread can call pthread_barrier_wait() again before other | 
 | 118 |   // threads have left the barrier. Use acquire operation here to synchronize with | 
 | 119 |   // the last thread leaving the previous cycle, so we can read correct wait_count below. | 
 | 120 |   while(atomic_load_explicit(&barrier->state, memory_order_acquire) == RELEASE) { | 
| Yabin Cui | c9a659c | 2015-11-05 15:36:08 -0800 | [diff] [blame] | 121 |     __futex_wait_ex(&barrier->state, barrier->pshared, RELEASE, false, nullptr); | 
| Yabin Cui | e7c2fff | 2015-11-05 22:06:09 -0800 | [diff] [blame] | 122 |   } | 
 | 123 |  | 
 | 124 |   uint32_t prev_wait_count = atomic_load_explicit(&barrier->wait_count, memory_order_relaxed); | 
 | 125 |   while (true) { | 
 | 126 |     // It happens when there are more than [init_count] threads trying to enter the barrier | 
 | 127 |     // at one cycle. We read the POSIX standard as disallowing this, since additional arriving | 
 | 128 |     // threads are not synchronized with respect to the barrier reset. We also don't know of | 
 | 129 |     // any reasonable cases in which this would be intentional. | 
 | 130 |     if (prev_wait_count >= barrier->init_count) { | 
 | 131 |       return EINVAL; | 
 | 132 |     } | 
 | 133 |     // Use memory_order_acq_rel operation here to synchronize between all threads entering | 
 | 134 |     // the barrier with the last thread entering the barrier. | 
 | 135 |     if (atomic_compare_exchange_weak_explicit(&barrier->wait_count, &prev_wait_count, | 
 | 136 |                                               prev_wait_count + 1u, memory_order_acq_rel, | 
 | 137 |                                               memory_order_relaxed)) { | 
 | 138 |       break; | 
 | 139 |     } | 
 | 140 |   } | 
 | 141 |  | 
 | 142 |   int result = 0; | 
 | 143 |   if (prev_wait_count + 1 == barrier->init_count) { | 
 | 144 |     result = PTHREAD_BARRIER_SERIAL_THREAD; | 
 | 145 |     if (prev_wait_count != 0) { | 
 | 146 |       // Use release operation here to synchronize between the last thread entering the | 
 | 147 |       // barrier with all threads leaving the barrier. | 
 | 148 |       atomic_store_explicit(&barrier->state, RELEASE, memory_order_release); | 
 | 149 |       __futex_wake_ex(&barrier->state, barrier->pshared, prev_wait_count); | 
 | 150 |     } | 
 | 151 |   } else { | 
 | 152 |     // Use acquire operation here to synchronize between the last thread entering the | 
 | 153 |     // barrier with all threads leaving the barrier. | 
 | 154 |     while (atomic_load_explicit(&barrier->state, memory_order_acquire) == WAIT) { | 
| Yabin Cui | c9a659c | 2015-11-05 15:36:08 -0800 | [diff] [blame] | 155 |       __futex_wait_ex(&barrier->state, barrier->pshared, WAIT, false, nullptr); | 
| Yabin Cui | e7c2fff | 2015-11-05 22:06:09 -0800 | [diff] [blame] | 156 |     } | 
 | 157 |   } | 
 | 158 |   // Use release operation here to make it not reordered with previous operations. | 
 | 159 |   if (atomic_fetch_sub_explicit(&barrier->wait_count, 1, memory_order_release) == 1) { | 
 | 160 |     // Use release operation here to synchronize with threads entering the barrier for | 
 | 161 |     // the next cycle, or the thread calling pthread_barrier_destroy(). | 
 | 162 |     atomic_store_explicit(&barrier->state, WAIT, memory_order_release); | 
 | 163 |     __futex_wake_ex(&barrier->state, barrier->pshared, barrier->init_count); | 
 | 164 |   } | 
 | 165 |   return result; | 
 | 166 | } | 
 | 167 |  | 
 | 168 | int pthread_barrier_destroy(pthread_barrier_t* barrier_interface) { | 
 | 169 |   pthread_barrier_internal_t* barrier = __get_internal_barrier(barrier_interface); | 
 | 170 |   if (barrier->init_count == 0) { | 
 | 171 |     return EINVAL; | 
 | 172 |   } | 
 | 173 |   // Use acquire operation here to synchronize with the last thread leaving the barrier. | 
 | 174 |   // So we can read correct wait_count below. | 
 | 175 |   while (atomic_load_explicit(&barrier->state, memory_order_acquire) == RELEASE) { | 
| Yabin Cui | c9a659c | 2015-11-05 15:36:08 -0800 | [diff] [blame] | 176 |     __futex_wait_ex(&barrier->state, barrier->pshared, RELEASE, false, nullptr); | 
| Yabin Cui | e7c2fff | 2015-11-05 22:06:09 -0800 | [diff] [blame] | 177 |   } | 
 | 178 |   if (atomic_load_explicit(&barrier->wait_count, memory_order_relaxed) != 0) { | 
 | 179 |     return EBUSY; | 
 | 180 |   } | 
 | 181 |   barrier->init_count = 0; | 
 | 182 |   return 0; | 
 | 183 | } |