Glenn Kasten | 2234002 | 2014-04-07 12:04:41 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2014 The Android Open Source Project |
| 3 | * |
| 4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | * you may not use this file except in compliance with the License. |
| 6 | * You may obtain a copy of the License at |
| 7 | * |
| 8 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | * |
| 10 | * Unless required by applicable law or agreed to in writing, software |
| 11 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | * See the License for the specific language governing permissions and |
| 14 | * limitations under the License. |
| 15 | */ |
| 16 | |
| 17 | #define LOG_TAG "FastThread" |
| 18 | //#define LOG_NDEBUG 0 |
| 19 | |
| 20 | #define ATRACE_TAG ATRACE_TAG_AUDIO |
| 21 | |
| 22 | #include "Configuration.h" |
Elliott Hughes | e348c5b | 2014-05-21 18:47:50 -0700 | [diff] [blame] | 23 | #include <linux/futex.h> |
| 24 | #include <sys/syscall.h> |
Eric Tan | fefe316 | 2018-09-07 10:09:11 -0700 | [diff] [blame] | 25 | #include <audio_utils/clock.h> |
Mathias Agopian | 05d19b0 | 2017-02-28 16:28:19 -0800 | [diff] [blame] | 26 | #include <cutils/atomic.h> |
Glenn Kasten | 2234002 | 2014-04-07 12:04:41 -0700 | [diff] [blame] | 27 | #include <utils/Log.h> |
Glenn Kasten | 2234002 | 2014-04-07 12:04:41 -0700 | [diff] [blame] | 28 | #include <utils/Trace.h> |
| 29 | #include "FastThread.h" |
Glenn Kasten | 045ee7e | 2015-02-17 16:22:04 -0800 | [diff] [blame] | 30 | #include "FastThreadDumpState.h" |
Glenn Kasten | 3ab8d66 | 2017-04-03 14:35:09 -0700 | [diff] [blame] | 31 | #include "TypedLogger.h" |
Glenn Kasten | 2234002 | 2014-04-07 12:04:41 -0700 | [diff] [blame] | 32 | |
| 33 | #define FAST_DEFAULT_NS 999999999L // ~1 sec: default time to sleep |
| 34 | #define FAST_HOT_IDLE_NS 1000000L // 1 ms: time to sleep while hot idling |
Glenn Kasten | d2123e6 | 2015-01-29 10:02:44 -0800 | [diff] [blame] | 35 | #define MIN_WARMUP_CYCLES 2 // minimum number of consecutive in-range loop cycles |
| 36 | // to wait for warmup |
Glenn Kasten | 2234002 | 2014-04-07 12:04:41 -0700 | [diff] [blame] | 37 | #define MAX_WARMUP_CYCLES 10 // maximum number of loop cycles to wait for warmup |
| 38 | |
| 39 | namespace android { |
| 40 | |
Glenn Kasten | f9715e4 | 2016-07-13 14:02:03 -0700 | [diff] [blame] | 41 | FastThread::FastThread(const char *cycleMs, const char *loadUs) : Thread(false /*canCallJava*/), |
Glenn Kasten | e4a7ce2 | 2015-03-03 11:23:17 -0800 | [diff] [blame] | 42 | // re-initialized to &sInitial by subclass constructor |
| 43 | mPrevious(NULL), mCurrent(NULL), |
| 44 | /* mOldTs({0, 0}), */ |
| 45 | mOldTsValid(false), |
| 46 | mSleepNs(-1), |
| 47 | mPeriodNs(0), |
| 48 | mUnderrunNs(0), |
| 49 | mOverrunNs(0), |
| 50 | mForceNs(0), |
| 51 | mWarmupNsMin(0), |
| 52 | mWarmupNsMax(LONG_MAX), |
| 53 | // re-initialized to &mDummySubclassDumpState by subclass constructor |
Glenn Kasten | 2234002 | 2014-04-07 12:04:41 -0700 | [diff] [blame] | 54 | mDummyDumpState(NULL), |
Glenn Kasten | e4a7ce2 | 2015-03-03 11:23:17 -0800 | [diff] [blame] | 55 | mDumpState(NULL), |
| 56 | mIgnoreNextOverrun(true), |
Glenn Kasten | 214b406 | 2015-03-02 14:15:47 -0800 | [diff] [blame] | 57 | #ifdef FAST_THREAD_STATISTICS |
Glenn Kasten | e4a7ce2 | 2015-03-03 11:23:17 -0800 | [diff] [blame] | 58 | // mOldLoad |
| 59 | mOldLoadValid(false), |
| 60 | mBounds(0), |
| 61 | mFull(false), |
| 62 | // mTcu |
Glenn Kasten | 2234002 | 2014-04-07 12:04:41 -0700 | [diff] [blame] | 63 | #endif |
Glenn Kasten | e4a7ce2 | 2015-03-03 11:23:17 -0800 | [diff] [blame] | 64 | mColdGen(0), |
| 65 | mIsWarm(false), |
| 66 | /* mMeasuredWarmupTs({0, 0}), */ |
| 67 | mWarmupCycles(0), |
| 68 | mWarmupConsecutiveInRangeCycles(0), |
Glenn Kasten | e4a7ce2 | 2015-03-03 11:23:17 -0800 | [diff] [blame] | 69 | mTimestampStatus(INVALID_OPERATION), |
Glenn Kasten | 2234002 | 2014-04-07 12:04:41 -0700 | [diff] [blame] | 70 | |
Glenn Kasten | e4a7ce2 | 2015-03-03 11:23:17 -0800 | [diff] [blame] | 71 | mCommand(FastThreadState::INITIAL), |
Glenn Kasten | 2234002 | 2014-04-07 12:04:41 -0700 | [diff] [blame] | 72 | #if 0 |
| 73 | frameCount(0), |
| 74 | #endif |
Glenn Kasten | e4a7ce2 | 2015-03-03 11:23:17 -0800 | [diff] [blame] | 75 | mAttemptedWrite(false) |
Glenn Kasten | f9715e4 | 2016-07-13 14:02:03 -0700 | [diff] [blame] | 76 | // mCycleMs(cycleMs) |
| 77 | // mLoadUs(loadUs) |
Glenn Kasten | 2234002 | 2014-04-07 12:04:41 -0700 | [diff] [blame] | 78 | { |
Glenn Kasten | e4a7ce2 | 2015-03-03 11:23:17 -0800 | [diff] [blame] | 79 | mOldTs.tv_sec = 0; |
| 80 | mOldTs.tv_nsec = 0; |
| 81 | mMeasuredWarmupTs.tv_sec = 0; |
| 82 | mMeasuredWarmupTs.tv_nsec = 0; |
Glenn Kasten | f9715e4 | 2016-07-13 14:02:03 -0700 | [diff] [blame] | 83 | strlcpy(mCycleMs, cycleMs, sizeof(mCycleMs)); |
| 84 | strlcpy(mLoadUs, loadUs, sizeof(mLoadUs)); |
Glenn Kasten | 2234002 | 2014-04-07 12:04:41 -0700 | [diff] [blame] | 85 | } |
| 86 | |
| 87 | FastThread::~FastThread() |
| 88 | { |
| 89 | } |
| 90 | |
| 91 | bool FastThread::threadLoop() |
| 92 | { |
Glenn Kasten | 388d571 | 2017-04-07 14:38:41 -0700 | [diff] [blame] | 93 | // LOGT now works even if tlNBLogWriter is nullptr, but we're considering changing that, |
Glenn Kasten | eef598c | 2017-04-03 14:41:13 -0700 | [diff] [blame] | 94 | // so this initialization permits a future change to remove the check for nullptr. |
Mikhail Naganov | 01d09d9 | 2018-09-18 12:38:57 -0700 | [diff] [blame] | 95 | tlNBLogWriter = mDummyNBLogWriter.get(); |
Glenn Kasten | 2234002 | 2014-04-07 12:04:41 -0700 | [diff] [blame] | 96 | for (;;) { |
| 97 | |
| 98 | // either nanosleep, sched_yield, or busy wait |
Glenn Kasten | e4a7ce2 | 2015-03-03 11:23:17 -0800 | [diff] [blame] | 99 | if (mSleepNs >= 0) { |
| 100 | if (mSleepNs > 0) { |
| 101 | ALOG_ASSERT(mSleepNs < 1000000000); |
| 102 | const struct timespec req = {0, mSleepNs}; |
Glenn Kasten | 2234002 | 2014-04-07 12:04:41 -0700 | [diff] [blame] | 103 | nanosleep(&req, NULL); |
| 104 | } else { |
| 105 | sched_yield(); |
| 106 | } |
| 107 | } |
| 108 | // default to long sleep for next cycle |
Glenn Kasten | e4a7ce2 | 2015-03-03 11:23:17 -0800 | [diff] [blame] | 109 | mSleepNs = FAST_DEFAULT_NS; |
Glenn Kasten | 2234002 | 2014-04-07 12:04:41 -0700 | [diff] [blame] | 110 | |
| 111 | // poll for state change |
| 112 | const FastThreadState *next = poll(); |
| 113 | if (next == NULL) { |
| 114 | // continue to use the default initial state until a real state is available |
Glenn Kasten | e4a7ce2 | 2015-03-03 11:23:17 -0800 | [diff] [blame] | 115 | // FIXME &sInitial not available, should save address earlier |
| 116 | //ALOG_ASSERT(mCurrent == &sInitial && previous == &sInitial); |
| 117 | next = mCurrent; |
Glenn Kasten | 2234002 | 2014-04-07 12:04:41 -0700 | [diff] [blame] | 118 | } |
| 119 | |
Glenn Kasten | e4a7ce2 | 2015-03-03 11:23:17 -0800 | [diff] [blame] | 120 | mCommand = next->mCommand; |
| 121 | if (next != mCurrent) { |
Glenn Kasten | 2234002 | 2014-04-07 12:04:41 -0700 | [diff] [blame] | 122 | |
| 123 | // As soon as possible of learning of a new dump area, start using it |
Glenn Kasten | e4a7ce2 | 2015-03-03 11:23:17 -0800 | [diff] [blame] | 124 | mDumpState = next->mDumpState != NULL ? next->mDumpState : mDummyDumpState; |
Andy Hung | a37aaa2 | 2018-09-18 18:15:37 -0700 | [diff] [blame] | 125 | tlNBLogWriter = next->mNBLogWriter != NULL ? |
Mikhail Naganov | 01d09d9 | 2018-09-18 12:38:57 -0700 | [diff] [blame] | 126 | next->mNBLogWriter : mDummyNBLogWriter.get(); |
Mikhail Naganov | 9b6599e | 2019-07-29 15:23:21 -0700 | [diff] [blame] | 127 | setNBLogWriter(tlNBLogWriter); // This is used for debugging only |
Glenn Kasten | 2234002 | 2014-04-07 12:04:41 -0700 | [diff] [blame] | 128 | |
| 129 | // We want to always have a valid reference to the previous (non-idle) state. |
| 130 | // However, the state queue only guarantees access to current and previous states. |
| 131 | // So when there is a transition from a non-idle state into an idle state, we make a |
| 132 | // copy of the last known non-idle state so it is still available on return from idle. |
| 133 | // The possible transitions are: |
| 134 | // non-idle -> non-idle update previous from current in-place |
| 135 | // non-idle -> idle update previous from copy of current |
| 136 | // idle -> idle don't update previous |
| 137 | // idle -> non-idle don't update previous |
Glenn Kasten | e4a7ce2 | 2015-03-03 11:23:17 -0800 | [diff] [blame] | 138 | if (!(mCurrent->mCommand & FastThreadState::IDLE)) { |
| 139 | if (mCommand & FastThreadState::IDLE) { |
Glenn Kasten | 2234002 | 2014-04-07 12:04:41 -0700 | [diff] [blame] | 140 | onIdle(); |
Glenn Kasten | e4a7ce2 | 2015-03-03 11:23:17 -0800 | [diff] [blame] | 141 | mOldTsValid = false; |
Glenn Kasten | 214b406 | 2015-03-02 14:15:47 -0800 | [diff] [blame] | 142 | #ifdef FAST_THREAD_STATISTICS |
Glenn Kasten | e4a7ce2 | 2015-03-03 11:23:17 -0800 | [diff] [blame] | 143 | mOldLoadValid = false; |
Glenn Kasten | 2234002 | 2014-04-07 12:04:41 -0700 | [diff] [blame] | 144 | #endif |
Glenn Kasten | e4a7ce2 | 2015-03-03 11:23:17 -0800 | [diff] [blame] | 145 | mIgnoreNextOverrun = true; |
Glenn Kasten | 2234002 | 2014-04-07 12:04:41 -0700 | [diff] [blame] | 146 | } |
Glenn Kasten | e4a7ce2 | 2015-03-03 11:23:17 -0800 | [diff] [blame] | 147 | mPrevious = mCurrent; |
Glenn Kasten | 2234002 | 2014-04-07 12:04:41 -0700 | [diff] [blame] | 148 | } |
Glenn Kasten | e4a7ce2 | 2015-03-03 11:23:17 -0800 | [diff] [blame] | 149 | mCurrent = next; |
Glenn Kasten | 2234002 | 2014-04-07 12:04:41 -0700 | [diff] [blame] | 150 | } |
| 151 | #if !LOG_NDEBUG |
| 152 | next = NULL; // not referenced again |
| 153 | #endif |
| 154 | |
Glenn Kasten | e4a7ce2 | 2015-03-03 11:23:17 -0800 | [diff] [blame] | 155 | mDumpState->mCommand = mCommand; |
Glenn Kasten | 2234002 | 2014-04-07 12:04:41 -0700 | [diff] [blame] | 156 | |
Glenn Kasten | e4a7ce2 | 2015-03-03 11:23:17 -0800 | [diff] [blame] | 157 | // FIXME what does this comment mean? |
Glenn Kasten | 2234002 | 2014-04-07 12:04:41 -0700 | [diff] [blame] | 158 | // << current, previous, command, dumpState >> |
| 159 | |
Glenn Kasten | e4a7ce2 | 2015-03-03 11:23:17 -0800 | [diff] [blame] | 160 | switch (mCommand) { |
Glenn Kasten | 2234002 | 2014-04-07 12:04:41 -0700 | [diff] [blame] | 161 | case FastThreadState::INITIAL: |
| 162 | case FastThreadState::HOT_IDLE: |
Glenn Kasten | e4a7ce2 | 2015-03-03 11:23:17 -0800 | [diff] [blame] | 163 | mSleepNs = FAST_HOT_IDLE_NS; |
Glenn Kasten | 2234002 | 2014-04-07 12:04:41 -0700 | [diff] [blame] | 164 | continue; |
| 165 | case FastThreadState::COLD_IDLE: |
| 166 | // only perform a cold idle command once |
| 167 | // FIXME consider checking previous state and only perform if previous != COLD_IDLE |
Glenn Kasten | e4a7ce2 | 2015-03-03 11:23:17 -0800 | [diff] [blame] | 168 | if (mCurrent->mColdGen != mColdGen) { |
| 169 | int32_t *coldFutexAddr = mCurrent->mColdFutexAddr; |
Glenn Kasten | 2234002 | 2014-04-07 12:04:41 -0700 | [diff] [blame] | 170 | ALOG_ASSERT(coldFutexAddr != NULL); |
| 171 | int32_t old = android_atomic_dec(coldFutexAddr); |
| 172 | if (old <= 0) { |
Elliott Hughes | e348c5b | 2014-05-21 18:47:50 -0700 | [diff] [blame] | 173 | syscall(__NR_futex, coldFutexAddr, FUTEX_WAIT_PRIVATE, old - 1, NULL); |
Glenn Kasten | 2234002 | 2014-04-07 12:04:41 -0700 | [diff] [blame] | 174 | } |
Glenn Kasten | 8255ba7 | 2016-08-23 13:54:23 -0700 | [diff] [blame] | 175 | int policy = sched_getscheduler(0) & ~SCHED_RESET_ON_FORK; |
Glenn Kasten | 2234002 | 2014-04-07 12:04:41 -0700 | [diff] [blame] | 176 | if (!(policy == SCHED_FIFO || policy == SCHED_RR)) { |
Glenn Kasten | 1bfe09a | 2017-02-21 13:05:56 -0800 | [diff] [blame] | 177 | ALOGE("did not receive expected priority boost on time"); |
Glenn Kasten | 2234002 | 2014-04-07 12:04:41 -0700 | [diff] [blame] | 178 | } |
| 179 | // This may be overly conservative; there could be times that the normal mixer |
| 180 | // requests such a brief cold idle that it doesn't require resetting this flag. |
Glenn Kasten | e4a7ce2 | 2015-03-03 11:23:17 -0800 | [diff] [blame] | 181 | mIsWarm = false; |
| 182 | mMeasuredWarmupTs.tv_sec = 0; |
| 183 | mMeasuredWarmupTs.tv_nsec = 0; |
| 184 | mWarmupCycles = 0; |
| 185 | mWarmupConsecutiveInRangeCycles = 0; |
| 186 | mSleepNs = -1; |
| 187 | mColdGen = mCurrent->mColdGen; |
Glenn Kasten | 214b406 | 2015-03-02 14:15:47 -0800 | [diff] [blame] | 188 | #ifdef FAST_THREAD_STATISTICS |
Glenn Kasten | e4a7ce2 | 2015-03-03 11:23:17 -0800 | [diff] [blame] | 189 | mBounds = 0; |
| 190 | mFull = false; |
Glenn Kasten | 2234002 | 2014-04-07 12:04:41 -0700 | [diff] [blame] | 191 | #endif |
Glenn Kasten | e4a7ce2 | 2015-03-03 11:23:17 -0800 | [diff] [blame] | 192 | mOldTsValid = !clock_gettime(CLOCK_MONOTONIC, &mOldTs); |
| 193 | mTimestampStatus = INVALID_OPERATION; |
Glenn Kasten | 2234002 | 2014-04-07 12:04:41 -0700 | [diff] [blame] | 194 | } else { |
Glenn Kasten | e4a7ce2 | 2015-03-03 11:23:17 -0800 | [diff] [blame] | 195 | mSleepNs = FAST_HOT_IDLE_NS; |
Glenn Kasten | 2234002 | 2014-04-07 12:04:41 -0700 | [diff] [blame] | 196 | } |
| 197 | continue; |
| 198 | case FastThreadState::EXIT: |
| 199 | onExit(); |
| 200 | return false; |
| 201 | default: |
Glenn Kasten | e4a7ce2 | 2015-03-03 11:23:17 -0800 | [diff] [blame] | 202 | LOG_ALWAYS_FATAL_IF(!isSubClassCommand(mCommand)); |
Glenn Kasten | 2234002 | 2014-04-07 12:04:41 -0700 | [diff] [blame] | 203 | break; |
| 204 | } |
| 205 | |
| 206 | // there is a non-idle state available to us; did the state change? |
Glenn Kasten | e4a7ce2 | 2015-03-03 11:23:17 -0800 | [diff] [blame] | 207 | if (mCurrent != mPrevious) { |
Glenn Kasten | 2234002 | 2014-04-07 12:04:41 -0700 | [diff] [blame] | 208 | onStateChange(); |
| 209 | #if 1 // FIXME shouldn't need this |
| 210 | // only process state change once |
Glenn Kasten | e4a7ce2 | 2015-03-03 11:23:17 -0800 | [diff] [blame] | 211 | mPrevious = mCurrent; |
Glenn Kasten | 2234002 | 2014-04-07 12:04:41 -0700 | [diff] [blame] | 212 | #endif |
| 213 | } |
| 214 | |
| 215 | // do work using current state here |
Glenn Kasten | e4a7ce2 | 2015-03-03 11:23:17 -0800 | [diff] [blame] | 216 | mAttemptedWrite = false; |
Glenn Kasten | 2234002 | 2014-04-07 12:04:41 -0700 | [diff] [blame] | 217 | onWork(); |
| 218 | |
| 219 | // To be exactly periodic, compute the next sleep time based on current time. |
| 220 | // This code doesn't have long-term stability when the sink is non-blocking. |
| 221 | // FIXME To avoid drift, use the local audio clock or watch the sink's fill status. |
| 222 | struct timespec newTs; |
| 223 | int rc = clock_gettime(CLOCK_MONOTONIC, &newTs); |
| 224 | if (rc == 0) { |
Glenn Kasten | e4a7ce2 | 2015-03-03 11:23:17 -0800 | [diff] [blame] | 225 | if (mOldTsValid) { |
| 226 | time_t sec = newTs.tv_sec - mOldTs.tv_sec; |
| 227 | long nsec = newTs.tv_nsec - mOldTs.tv_nsec; |
Glenn Kasten | 2234002 | 2014-04-07 12:04:41 -0700 | [diff] [blame] | 228 | ALOGE_IF(sec < 0 || (sec == 0 && nsec < 0), |
| 229 | "clock_gettime(CLOCK_MONOTONIC) failed: was %ld.%09ld but now %ld.%09ld", |
Glenn Kasten | e4a7ce2 | 2015-03-03 11:23:17 -0800 | [diff] [blame] | 230 | mOldTs.tv_sec, mOldTs.tv_nsec, newTs.tv_sec, newTs.tv_nsec); |
Glenn Kasten | 2234002 | 2014-04-07 12:04:41 -0700 | [diff] [blame] | 231 | if (nsec < 0) { |
| 232 | --sec; |
| 233 | nsec += 1000000000; |
| 234 | } |
| 235 | // To avoid an initial underrun on fast tracks after exiting standby, |
| 236 | // do not start pulling data from tracks and mixing until warmup is complete. |
| 237 | // Warmup is considered complete after the earlier of: |
Glenn Kasten | d2123e6 | 2015-01-29 10:02:44 -0800 | [diff] [blame] | 238 | // MIN_WARMUP_CYCLES consecutive in-range write() attempts, |
Glenn Kasten | e4a7ce2 | 2015-03-03 11:23:17 -0800 | [diff] [blame] | 239 | // where "in-range" means mWarmupNsMin <= cycle time <= mWarmupNsMax |
Glenn Kasten | 2234002 | 2014-04-07 12:04:41 -0700 | [diff] [blame] | 240 | // MAX_WARMUP_CYCLES write() attempts. |
| 241 | // This is overly conservative, but to get better accuracy requires a new HAL API. |
Glenn Kasten | e4a7ce2 | 2015-03-03 11:23:17 -0800 | [diff] [blame] | 242 | if (!mIsWarm && mAttemptedWrite) { |
| 243 | mMeasuredWarmupTs.tv_sec += sec; |
| 244 | mMeasuredWarmupTs.tv_nsec += nsec; |
| 245 | if (mMeasuredWarmupTs.tv_nsec >= 1000000000) { |
| 246 | mMeasuredWarmupTs.tv_sec++; |
| 247 | mMeasuredWarmupTs.tv_nsec -= 1000000000; |
Glenn Kasten | 2234002 | 2014-04-07 12:04:41 -0700 | [diff] [blame] | 248 | } |
Glenn Kasten | e4a7ce2 | 2015-03-03 11:23:17 -0800 | [diff] [blame] | 249 | ++mWarmupCycles; |
| 250 | if (mWarmupNsMin <= nsec && nsec <= mWarmupNsMax) { |
| 251 | ALOGV("warmup cycle %d in range: %.03f ms", mWarmupCycles, nsec * 1e-9); |
| 252 | ++mWarmupConsecutiveInRangeCycles; |
Glenn Kasten | d2123e6 | 2015-01-29 10:02:44 -0800 | [diff] [blame] | 253 | } else { |
Glenn Kasten | e4a7ce2 | 2015-03-03 11:23:17 -0800 | [diff] [blame] | 254 | ALOGV("warmup cycle %d out of range: %.03f ms", mWarmupCycles, nsec * 1e-9); |
| 255 | mWarmupConsecutiveInRangeCycles = 0; |
Glenn Kasten | d2123e6 | 2015-01-29 10:02:44 -0800 | [diff] [blame] | 256 | } |
Glenn Kasten | e4a7ce2 | 2015-03-03 11:23:17 -0800 | [diff] [blame] | 257 | if ((mWarmupConsecutiveInRangeCycles >= MIN_WARMUP_CYCLES) || |
| 258 | (mWarmupCycles >= MAX_WARMUP_CYCLES)) { |
| 259 | mIsWarm = true; |
| 260 | mDumpState->mMeasuredWarmupTs = mMeasuredWarmupTs; |
| 261 | mDumpState->mWarmupCycles = mWarmupCycles; |
Eric Tan | fefe316 | 2018-09-07 10:09:11 -0700 | [diff] [blame] | 262 | const double measuredWarmupMs = (mMeasuredWarmupTs.tv_sec * 1e3) + |
| 263 | (mMeasuredWarmupTs.tv_nsec * 1e-6); |
| 264 | LOG_WARMUP_TIME(measuredWarmupMs); |
Glenn Kasten | 2234002 | 2014-04-07 12:04:41 -0700 | [diff] [blame] | 265 | } |
| 266 | } |
Glenn Kasten | e4a7ce2 | 2015-03-03 11:23:17 -0800 | [diff] [blame] | 267 | mSleepNs = -1; |
| 268 | if (mIsWarm) { |
| 269 | if (sec > 0 || nsec > mUnderrunNs) { |
Glenn Kasten | 2234002 | 2014-04-07 12:04:41 -0700 | [diff] [blame] | 270 | ATRACE_NAME("underrun"); |
| 271 | // FIXME only log occasionally |
| 272 | ALOGV("underrun: time since last cycle %d.%03ld sec", |
| 273 | (int) sec, nsec / 1000000L); |
Glenn Kasten | e4a7ce2 | 2015-03-03 11:23:17 -0800 | [diff] [blame] | 274 | mDumpState->mUnderruns++; |
Eric Tan | fefe316 | 2018-09-07 10:09:11 -0700 | [diff] [blame] | 275 | LOG_UNDERRUN(audio_utils_ns_from_timespec(&newTs)); |
Glenn Kasten | e4a7ce2 | 2015-03-03 11:23:17 -0800 | [diff] [blame] | 276 | mIgnoreNextOverrun = true; |
| 277 | } else if (nsec < mOverrunNs) { |
| 278 | if (mIgnoreNextOverrun) { |
| 279 | mIgnoreNextOverrun = false; |
Glenn Kasten | 2234002 | 2014-04-07 12:04:41 -0700 | [diff] [blame] | 280 | } else { |
| 281 | // FIXME only log occasionally |
| 282 | ALOGV("overrun: time since last cycle %d.%03ld sec", |
| 283 | (int) sec, nsec / 1000000L); |
Glenn Kasten | e4a7ce2 | 2015-03-03 11:23:17 -0800 | [diff] [blame] | 284 | mDumpState->mOverruns++; |
Eric Tan | fefe316 | 2018-09-07 10:09:11 -0700 | [diff] [blame] | 285 | LOG_OVERRUN(audio_utils_ns_from_timespec(&newTs)); |
Glenn Kasten | 2234002 | 2014-04-07 12:04:41 -0700 | [diff] [blame] | 286 | } |
| 287 | // This forces a minimum cycle time. It: |
| 288 | // - compensates for an audio HAL with jitter due to sample rate conversion |
| 289 | // - works with a variable buffer depth audio HAL that never pulls at a |
Glenn Kasten | e4a7ce2 | 2015-03-03 11:23:17 -0800 | [diff] [blame] | 290 | // rate < than mOverrunNs per buffer. |
Glenn Kasten | 2234002 | 2014-04-07 12:04:41 -0700 | [diff] [blame] | 291 | // - recovers from overrun immediately after underrun |
| 292 | // It doesn't work with a non-blocking audio HAL. |
Glenn Kasten | e4a7ce2 | 2015-03-03 11:23:17 -0800 | [diff] [blame] | 293 | mSleepNs = mForceNs - nsec; |
Glenn Kasten | 2234002 | 2014-04-07 12:04:41 -0700 | [diff] [blame] | 294 | } else { |
Glenn Kasten | e4a7ce2 | 2015-03-03 11:23:17 -0800 | [diff] [blame] | 295 | mIgnoreNextOverrun = false; |
Glenn Kasten | 2234002 | 2014-04-07 12:04:41 -0700 | [diff] [blame] | 296 | } |
| 297 | } |
Glenn Kasten | 214b406 | 2015-03-02 14:15:47 -0800 | [diff] [blame] | 298 | #ifdef FAST_THREAD_STATISTICS |
Glenn Kasten | e4a7ce2 | 2015-03-03 11:23:17 -0800 | [diff] [blame] | 299 | if (mIsWarm) { |
Glenn Kasten | 2234002 | 2014-04-07 12:04:41 -0700 | [diff] [blame] | 300 | // advance the FIFO queue bounds |
Glenn Kasten | e4a7ce2 | 2015-03-03 11:23:17 -0800 | [diff] [blame] | 301 | size_t i = mBounds & (mDumpState->mSamplingN - 1); |
| 302 | mBounds = (mBounds & 0xFFFF0000) | ((mBounds + 1) & 0xFFFF); |
| 303 | if (mFull) { |
Ivan Lozano | 7765714 | 2018-01-02 21:01:16 +0000 | [diff] [blame] | 304 | //mBounds += 0x10000; |
| 305 | __builtin_add_overflow(mBounds, 0x10000, &mBounds); |
Glenn Kasten | e4a7ce2 | 2015-03-03 11:23:17 -0800 | [diff] [blame] | 306 | } else if (!(mBounds & (mDumpState->mSamplingN - 1))) { |
| 307 | mFull = true; |
Glenn Kasten | 2234002 | 2014-04-07 12:04:41 -0700 | [diff] [blame] | 308 | } |
| 309 | // compute the delta value of clock_gettime(CLOCK_MONOTONIC) |
| 310 | uint32_t monotonicNs = nsec; |
| 311 | if (sec > 0 && sec < 4) { |
wendy lin | cd4c724 | 2019-06-12 10:37:11 +0800 | [diff] [blame] | 312 | monotonicNs += sec * 1000000000U; // unsigned to prevent signed overflow. |
Glenn Kasten | 2234002 | 2014-04-07 12:04:41 -0700 | [diff] [blame] | 313 | } |
| 314 | // compute raw CPU load = delta value of clock_gettime(CLOCK_THREAD_CPUTIME_ID) |
| 315 | uint32_t loadNs = 0; |
| 316 | struct timespec newLoad; |
| 317 | rc = clock_gettime(CLOCK_THREAD_CPUTIME_ID, &newLoad); |
| 318 | if (rc == 0) { |
Glenn Kasten | e4a7ce2 | 2015-03-03 11:23:17 -0800 | [diff] [blame] | 319 | if (mOldLoadValid) { |
| 320 | sec = newLoad.tv_sec - mOldLoad.tv_sec; |
| 321 | nsec = newLoad.tv_nsec - mOldLoad.tv_nsec; |
Glenn Kasten | 2234002 | 2014-04-07 12:04:41 -0700 | [diff] [blame] | 322 | if (nsec < 0) { |
| 323 | --sec; |
| 324 | nsec += 1000000000; |
| 325 | } |
| 326 | loadNs = nsec; |
| 327 | if (sec > 0 && sec < 4) { |
wendy lin | cd4c724 | 2019-06-12 10:37:11 +0800 | [diff] [blame] | 328 | loadNs += sec * 1000000000U; // unsigned to prevent signed overflow. |
Glenn Kasten | 2234002 | 2014-04-07 12:04:41 -0700 | [diff] [blame] | 329 | } |
| 330 | } else { |
| 331 | // first time through the loop |
Glenn Kasten | e4a7ce2 | 2015-03-03 11:23:17 -0800 | [diff] [blame] | 332 | mOldLoadValid = true; |
Glenn Kasten | 2234002 | 2014-04-07 12:04:41 -0700 | [diff] [blame] | 333 | } |
Glenn Kasten | e4a7ce2 | 2015-03-03 11:23:17 -0800 | [diff] [blame] | 334 | mOldLoad = newLoad; |
Glenn Kasten | 2234002 | 2014-04-07 12:04:41 -0700 | [diff] [blame] | 335 | } |
| 336 | #ifdef CPU_FREQUENCY_STATISTICS |
| 337 | // get the absolute value of CPU clock frequency in kHz |
| 338 | int cpuNum = sched_getcpu(); |
Glenn Kasten | e4a7ce2 | 2015-03-03 11:23:17 -0800 | [diff] [blame] | 339 | uint32_t kHz = mTcu.getCpukHz(cpuNum); |
Glenn Kasten | 2234002 | 2014-04-07 12:04:41 -0700 | [diff] [blame] | 340 | kHz = (kHz << 4) | (cpuNum & 0xF); |
| 341 | #endif |
| 342 | // save values in FIFO queues for dumpsys |
| 343 | // these stores #1, #2, #3 are not atomic with respect to each other, |
| 344 | // or with respect to store #4 below |
Glenn Kasten | e4a7ce2 | 2015-03-03 11:23:17 -0800 | [diff] [blame] | 345 | mDumpState->mMonotonicNs[i] = monotonicNs; |
Eric Tan | cf3d82c | 2018-09-04 15:44:45 -0700 | [diff] [blame] | 346 | LOG_WORK_TIME(monotonicNs); |
Glenn Kasten | e4a7ce2 | 2015-03-03 11:23:17 -0800 | [diff] [blame] | 347 | mDumpState->mLoadNs[i] = loadNs; |
Glenn Kasten | 2234002 | 2014-04-07 12:04:41 -0700 | [diff] [blame] | 348 | #ifdef CPU_FREQUENCY_STATISTICS |
Glenn Kasten | e4a7ce2 | 2015-03-03 11:23:17 -0800 | [diff] [blame] | 349 | mDumpState->mCpukHz[i] = kHz; |
Glenn Kasten | 2234002 | 2014-04-07 12:04:41 -0700 | [diff] [blame] | 350 | #endif |
| 351 | // this store #4 is not atomic with respect to stores #1, #2, #3 above, but |
| 352 | // the newest open & oldest closed halves are atomic with respect to each other |
Glenn Kasten | e4a7ce2 | 2015-03-03 11:23:17 -0800 | [diff] [blame] | 353 | mDumpState->mBounds = mBounds; |
Glenn Kasten | f9715e4 | 2016-07-13 14:02:03 -0700 | [diff] [blame] | 354 | ATRACE_INT(mCycleMs, monotonicNs / 1000000); |
| 355 | ATRACE_INT(mLoadUs, loadNs / 1000); |
Glenn Kasten | 2234002 | 2014-04-07 12:04:41 -0700 | [diff] [blame] | 356 | } |
| 357 | #endif |
| 358 | } else { |
| 359 | // first time through the loop |
Glenn Kasten | e4a7ce2 | 2015-03-03 11:23:17 -0800 | [diff] [blame] | 360 | mOldTsValid = true; |
| 361 | mSleepNs = mPeriodNs; |
| 362 | mIgnoreNextOverrun = true; |
Glenn Kasten | 2234002 | 2014-04-07 12:04:41 -0700 | [diff] [blame] | 363 | } |
Glenn Kasten | e4a7ce2 | 2015-03-03 11:23:17 -0800 | [diff] [blame] | 364 | mOldTs = newTs; |
Glenn Kasten | 2234002 | 2014-04-07 12:04:41 -0700 | [diff] [blame] | 365 | } else { |
| 366 | // monotonic clock is broken |
Glenn Kasten | e4a7ce2 | 2015-03-03 11:23:17 -0800 | [diff] [blame] | 367 | mOldTsValid = false; |
| 368 | mSleepNs = mPeriodNs; |
Glenn Kasten | 2234002 | 2014-04-07 12:04:41 -0700 | [diff] [blame] | 369 | } |
| 370 | |
| 371 | } // for (;;) |
| 372 | |
| 373 | // never return 'true'; Thread::_threadLoop() locks mutex which can result in priority inversion |
| 374 | } |
| 375 | |
| 376 | } // namespace android |