Glenn Kasten | 97b5d0d | 2012-03-23 18:54:19 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2012 The Android Open Source Project |
| 3 | * |
| 4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | * you may not use this file except in compliance with the License. |
| 6 | * You may obtain a copy of the License at |
| 7 | * |
| 8 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | * |
| 10 | * Unless required by applicable law or agreed to in writing, software |
| 11 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | * See the License for the specific language governing permissions and |
| 14 | * limitations under the License. |
| 15 | */ |
| 16 | |
| 17 | #define LOG_TAG "FastMixer" |
| 18 | //#define LOG_NDEBUG 0 |
| 19 | |
Glenn Kasten | d8e6fd3 | 2012-05-07 11:07:57 -0700 | [diff] [blame] | 20 | //#define ATRACE_TAG ATRACE_TAG_AUDIO |
| 21 | |
Glenn Kasten | 97b5d0d | 2012-03-23 18:54:19 -0700 | [diff] [blame] | 22 | #include <sys/atomics.h> |
| 23 | #include <time.h> |
| 24 | #include <utils/Log.h> |
Glenn Kasten | d8e6fd3 | 2012-05-07 11:07:57 -0700 | [diff] [blame] | 25 | #include <utils/Trace.h> |
Glenn Kasten | 97b5d0d | 2012-03-23 18:54:19 -0700 | [diff] [blame] | 26 | #include <system/audio.h> |
| 27 | #ifdef FAST_MIXER_STATISTICS |
| 28 | #include <cpustats/CentralTendencyStatistics.h> |
Glenn Kasten | 42d45cf | 2012-05-02 10:34:47 -0700 | [diff] [blame] | 29 | #include <cpustats/ThreadCpuUsage.h> |
Glenn Kasten | 97b5d0d | 2012-03-23 18:54:19 -0700 | [diff] [blame] | 30 | #endif |
| 31 | #include "AudioMixer.h" |
| 32 | #include "FastMixer.h" |
| 33 | |
| 34 | #define FAST_HOT_IDLE_NS 1000000L // 1 ms: time to sleep while hot idling |
| 35 | #define FAST_DEFAULT_NS 999999999L // ~1 sec: default time to sleep |
Glenn Kasten | 288ed21 | 2012-04-25 17:52:27 -0700 | [diff] [blame] | 36 | #define MAX_WARMUP_CYCLES 10 // maximum number of loop cycles to wait for warmup |
Glenn Kasten | 97b5d0d | 2012-03-23 18:54:19 -0700 | [diff] [blame] | 37 | |
| 38 | namespace android { |
| 39 | |
| 40 | // Fast mixer thread |
| 41 | bool FastMixer::threadLoop() |
| 42 | { |
| 43 | static const FastMixerState initial; |
| 44 | const FastMixerState *previous = &initial, *current = &initial; |
| 45 | FastMixerState preIdle; // copy of state before we went into idle |
| 46 | struct timespec oldTs = {0, 0}; |
| 47 | bool oldTsValid = false; |
| 48 | long slopNs = 0; // accumulated time we've woken up too early (> 0) or too late (< 0) |
| 49 | long sleepNs = -1; // -1: busy wait, 0: sched_yield, > 0: nanosleep |
| 50 | int fastTrackNames[FastMixerState::kMaxFastTracks]; // handles used by mixer to identify tracks |
| 51 | int generations[FastMixerState::kMaxFastTracks]; // last observed mFastTracks[i].mGeneration |
| 52 | unsigned i; |
| 53 | for (i = 0; i < FastMixerState::kMaxFastTracks; ++i) { |
| 54 | fastTrackNames[i] = -1; |
| 55 | generations[i] = 0; |
| 56 | } |
| 57 | NBAIO_Sink *outputSink = NULL; |
| 58 | int outputSinkGen = 0; |
| 59 | AudioMixer* mixer = NULL; |
| 60 | short *mixBuffer = NULL; |
| 61 | enum {UNDEFINED, MIXED, ZEROED} mixBufferState = UNDEFINED; |
| 62 | NBAIO_Format format = Format_Invalid; |
| 63 | unsigned sampleRate = 0; |
| 64 | int fastTracksGen = 0; |
| 65 | long periodNs = 0; // expected period; the time required to render one mix buffer |
Glenn Kasten | 288ed21 | 2012-04-25 17:52:27 -0700 | [diff] [blame] | 66 | long underrunNs = 0; // underrun likely when write cycle is greater than this value |
| 67 | long overrunNs = 0; // overrun likely when write cycle is less than this value |
| 68 | long warmupNs = 0; // warmup complete when write cycle is greater than to this value |
Glenn Kasten | 97b5d0d | 2012-03-23 18:54:19 -0700 | [diff] [blame] | 69 | FastMixerDumpState dummyDumpState, *dumpState = &dummyDumpState; |
| 70 | bool ignoreNextOverrun = true; // used to ignore initial overrun and first after an underrun |
| 71 | #ifdef FAST_MIXER_STATISTICS |
Glenn Kasten | 42d45cf | 2012-05-02 10:34:47 -0700 | [diff] [blame] | 72 | struct timespec oldLoad = {0, 0}; // previous value of clock_gettime(CLOCK_THREAD_CPUTIME_ID) |
| 73 | bool oldLoadValid = false; // whether oldLoad is valid |
| 74 | uint32_t bounds = 0; |
| 75 | bool full = false; // whether we have collected at least kSamplingN samples |
| 76 | ThreadCpuUsage tcu; // for reading the current CPU clock frequency in kHz |
Glenn Kasten | 97b5d0d | 2012-03-23 18:54:19 -0700 | [diff] [blame] | 77 | #endif |
| 78 | unsigned coldGen = 0; // last observed mColdGen |
Glenn Kasten | 288ed21 | 2012-04-25 17:52:27 -0700 | [diff] [blame] | 79 | bool isWarm = false; // true means ready to mix, false means wait for warmup before mixing |
| 80 | struct timespec measuredWarmupTs = {0, 0}; // how long did it take for warmup to complete |
| 81 | uint32_t warmupCycles = 0; // counter of number of loop cycles required to warmup |
Glenn Kasten | 97b5d0d | 2012-03-23 18:54:19 -0700 | [diff] [blame] | 82 | |
| 83 | for (;;) { |
| 84 | |
| 85 | // either nanosleep, sched_yield, or busy wait |
| 86 | if (sleepNs >= 0) { |
| 87 | if (sleepNs > 0) { |
| 88 | ALOG_ASSERT(sleepNs < 1000000000); |
| 89 | const struct timespec req = {0, sleepNs}; |
| 90 | nanosleep(&req, NULL); |
| 91 | } else { |
| 92 | sched_yield(); |
| 93 | } |
| 94 | } |
| 95 | // default to long sleep for next cycle |
| 96 | sleepNs = FAST_DEFAULT_NS; |
| 97 | |
| 98 | // poll for state change |
| 99 | const FastMixerState *next = mSQ.poll(); |
| 100 | if (next == NULL) { |
| 101 | // continue to use the default initial state until a real state is available |
| 102 | ALOG_ASSERT(current == &initial && previous == &initial); |
| 103 | next = current; |
| 104 | } |
| 105 | |
| 106 | FastMixerState::Command command = next->mCommand; |
| 107 | if (next != current) { |
| 108 | |
| 109 | // As soon as possible of learning of a new dump area, start using it |
| 110 | dumpState = next->mDumpState != NULL ? next->mDumpState : &dummyDumpState; |
| 111 | |
| 112 | // We want to always have a valid reference to the previous (non-idle) state. |
| 113 | // However, the state queue only guarantees access to current and previous states. |
| 114 | // So when there is a transition from a non-idle state into an idle state, we make a |
| 115 | // copy of the last known non-idle state so it is still available on return from idle. |
| 116 | // The possible transitions are: |
| 117 | // non-idle -> non-idle update previous from current in-place |
| 118 | // non-idle -> idle update previous from copy of current |
| 119 | // idle -> idle don't update previous |
| 120 | // idle -> non-idle don't update previous |
| 121 | if (!(current->mCommand & FastMixerState::IDLE)) { |
| 122 | if (command & FastMixerState::IDLE) { |
| 123 | preIdle = *current; |
| 124 | current = &preIdle; |
| 125 | oldTsValid = false; |
Glenn Kasten | 42d45cf | 2012-05-02 10:34:47 -0700 | [diff] [blame] | 126 | oldLoadValid = false; |
Glenn Kasten | 97b5d0d | 2012-03-23 18:54:19 -0700 | [diff] [blame] | 127 | ignoreNextOverrun = true; |
| 128 | } |
| 129 | previous = current; |
| 130 | } |
| 131 | current = next; |
| 132 | } |
| 133 | #if !LOG_NDEBUG |
| 134 | next = NULL; // not referenced again |
| 135 | #endif |
| 136 | |
| 137 | dumpState->mCommand = command; |
| 138 | |
| 139 | switch (command) { |
| 140 | case FastMixerState::INITIAL: |
| 141 | case FastMixerState::HOT_IDLE: |
| 142 | sleepNs = FAST_HOT_IDLE_NS; |
| 143 | continue; |
| 144 | case FastMixerState::COLD_IDLE: |
| 145 | // only perform a cold idle command once |
Glenn Kasten | 21e8c50 | 2012-04-12 09:39:42 -0700 | [diff] [blame] | 146 | // FIXME consider checking previous state and only perform if previous != COLD_IDLE |
Glenn Kasten | 97b5d0d | 2012-03-23 18:54:19 -0700 | [diff] [blame] | 147 | if (current->mColdGen != coldGen) { |
| 148 | int32_t *coldFutexAddr = current->mColdFutexAddr; |
| 149 | ALOG_ASSERT(coldFutexAddr != NULL); |
| 150 | int32_t old = android_atomic_dec(coldFutexAddr); |
| 151 | if (old <= 0) { |
| 152 | __futex_syscall4(coldFutexAddr, FUTEX_WAIT_PRIVATE, old - 1, NULL); |
| 153 | } |
Glenn Kasten | 288ed21 | 2012-04-25 17:52:27 -0700 | [diff] [blame] | 154 | // This may be overly conservative; there could be times that the normal mixer |
| 155 | // requests such a brief cold idle that it doesn't require resetting this flag. |
| 156 | isWarm = false; |
| 157 | measuredWarmupTs.tv_sec = 0; |
| 158 | measuredWarmupTs.tv_nsec = 0; |
| 159 | warmupCycles = 0; |
Glenn Kasten | 97b5d0d | 2012-03-23 18:54:19 -0700 | [diff] [blame] | 160 | sleepNs = -1; |
| 161 | coldGen = current->mColdGen; |
Glenn Kasten | 42d45cf | 2012-05-02 10:34:47 -0700 | [diff] [blame] | 162 | bounds = 0; |
| 163 | full = false; |
Glenn Kasten | 97b5d0d | 2012-03-23 18:54:19 -0700 | [diff] [blame] | 164 | } else { |
| 165 | sleepNs = FAST_HOT_IDLE_NS; |
| 166 | } |
| 167 | continue; |
| 168 | case FastMixerState::EXIT: |
| 169 | delete mixer; |
| 170 | delete[] mixBuffer; |
| 171 | return false; |
| 172 | case FastMixerState::MIX: |
| 173 | case FastMixerState::WRITE: |
| 174 | case FastMixerState::MIX_WRITE: |
| 175 | break; |
| 176 | default: |
| 177 | LOG_FATAL("bad command %d", command); |
| 178 | } |
| 179 | |
| 180 | // there is a non-idle state available to us; did the state change? |
| 181 | size_t frameCount = current->mFrameCount; |
| 182 | if (current != previous) { |
| 183 | |
| 184 | // handle state change here, but since we want to diff the state, |
| 185 | // we're prepared for previous == &initial the first time through |
| 186 | unsigned previousTrackMask; |
| 187 | |
| 188 | // check for change in output HAL configuration |
| 189 | NBAIO_Format previousFormat = format; |
| 190 | if (current->mOutputSinkGen != outputSinkGen) { |
| 191 | outputSink = current->mOutputSink; |
| 192 | outputSinkGen = current->mOutputSinkGen; |
| 193 | if (outputSink == NULL) { |
| 194 | format = Format_Invalid; |
| 195 | sampleRate = 0; |
| 196 | } else { |
| 197 | format = outputSink->format(); |
| 198 | sampleRate = Format_sampleRate(format); |
| 199 | ALOG_ASSERT(Format_channelCount(format) == 2); |
| 200 | } |
Glenn Kasten | 21e8c50 | 2012-04-12 09:39:42 -0700 | [diff] [blame] | 201 | dumpState->mSampleRate = sampleRate; |
Glenn Kasten | 97b5d0d | 2012-03-23 18:54:19 -0700 | [diff] [blame] | 202 | } |
| 203 | |
| 204 | if ((format != previousFormat) || (frameCount != previous->mFrameCount)) { |
| 205 | // FIXME to avoid priority inversion, don't delete here |
| 206 | delete mixer; |
| 207 | mixer = NULL; |
| 208 | delete[] mixBuffer; |
| 209 | mixBuffer = NULL; |
| 210 | if (frameCount > 0 && sampleRate > 0) { |
| 211 | // FIXME new may block for unbounded time at internal mutex of the heap |
| 212 | // implementation; it would be better to have normal mixer allocate for us |
| 213 | // to avoid blocking here and to prevent possible priority inversion |
| 214 | mixer = new AudioMixer(frameCount, sampleRate, FastMixerState::kMaxFastTracks); |
| 215 | mixBuffer = new short[frameCount * 2]; |
| 216 | periodNs = (frameCount * 1000000000LL) / sampleRate; // 1.00 |
| 217 | underrunNs = (frameCount * 1750000000LL) / sampleRate; // 1.75 |
| 218 | overrunNs = (frameCount * 250000000LL) / sampleRate; // 0.25 |
Glenn Kasten | 288ed21 | 2012-04-25 17:52:27 -0700 | [diff] [blame] | 219 | warmupNs = (frameCount * 500000000LL) / sampleRate; // 0.50 |
Glenn Kasten | 97b5d0d | 2012-03-23 18:54:19 -0700 | [diff] [blame] | 220 | } else { |
| 221 | periodNs = 0; |
| 222 | underrunNs = 0; |
| 223 | overrunNs = 0; |
| 224 | } |
| 225 | mixBufferState = UNDEFINED; |
| 226 | #if !LOG_NDEBUG |
| 227 | for (i = 0; i < FastMixerState::kMaxFastTracks; ++i) { |
| 228 | fastTrackNames[i] = -1; |
| 229 | } |
| 230 | #endif |
| 231 | // we need to reconfigure all active tracks |
| 232 | previousTrackMask = 0; |
| 233 | fastTracksGen = current->mFastTracksGen - 1; |
Glenn Kasten | 21e8c50 | 2012-04-12 09:39:42 -0700 | [diff] [blame] | 234 | dumpState->mFrameCount = frameCount; |
Glenn Kasten | 97b5d0d | 2012-03-23 18:54:19 -0700 | [diff] [blame] | 235 | } else { |
| 236 | previousTrackMask = previous->mTrackMask; |
| 237 | } |
| 238 | |
| 239 | // check for change in active track set |
| 240 | unsigned currentTrackMask = current->mTrackMask; |
| 241 | if (current->mFastTracksGen != fastTracksGen) { |
| 242 | ALOG_ASSERT(mixBuffer != NULL); |
| 243 | int name; |
| 244 | |
| 245 | // process removed tracks first to avoid running out of track names |
| 246 | unsigned removedTracks = previousTrackMask & ~currentTrackMask; |
| 247 | while (removedTracks != 0) { |
| 248 | i = __builtin_ctz(removedTracks); |
| 249 | removedTracks &= ~(1 << i); |
| 250 | const FastTrack* fastTrack = ¤t->mFastTracks[i]; |
Glenn Kasten | 288ed21 | 2012-04-25 17:52:27 -0700 | [diff] [blame] | 251 | ALOG_ASSERT(fastTrack->mBufferProvider == NULL); |
Glenn Kasten | 97b5d0d | 2012-03-23 18:54:19 -0700 | [diff] [blame] | 252 | if (mixer != NULL) { |
| 253 | name = fastTrackNames[i]; |
| 254 | ALOG_ASSERT(name >= 0); |
| 255 | mixer->deleteTrackName(name); |
| 256 | } |
| 257 | #if !LOG_NDEBUG |
| 258 | fastTrackNames[i] = -1; |
| 259 | #endif |
Glenn Kasten | 288ed21 | 2012-04-25 17:52:27 -0700 | [diff] [blame] | 260 | // don't reset track dump state, since other side is ignoring it |
Glenn Kasten | 97b5d0d | 2012-03-23 18:54:19 -0700 | [diff] [blame] | 261 | generations[i] = fastTrack->mGeneration; |
| 262 | } |
| 263 | |
| 264 | // now process added tracks |
| 265 | unsigned addedTracks = currentTrackMask & ~previousTrackMask; |
| 266 | while (addedTracks != 0) { |
| 267 | i = __builtin_ctz(addedTracks); |
| 268 | addedTracks &= ~(1 << i); |
| 269 | const FastTrack* fastTrack = ¤t->mFastTracks[i]; |
| 270 | AudioBufferProvider *bufferProvider = fastTrack->mBufferProvider; |
| 271 | ALOG_ASSERT(bufferProvider != NULL && fastTrackNames[i] == -1); |
| 272 | if (mixer != NULL) { |
Jean-Michel Trivi | 9bd2322 | 2012-04-16 13:43:48 -0700 | [diff] [blame] | 273 | // calling getTrackName with default channel mask |
| 274 | name = mixer->getTrackName(AUDIO_CHANNEL_OUT_STEREO); |
Glenn Kasten | 97b5d0d | 2012-03-23 18:54:19 -0700 | [diff] [blame] | 275 | ALOG_ASSERT(name >= 0); |
| 276 | fastTrackNames[i] = name; |
| 277 | mixer->setBufferProvider(name, bufferProvider); |
| 278 | mixer->setParameter(name, AudioMixer::TRACK, AudioMixer::MAIN_BUFFER, |
| 279 | (void *) mixBuffer); |
| 280 | // newly allocated track names default to full scale volume |
Glenn Kasten | 21e8c50 | 2012-04-12 09:39:42 -0700 | [diff] [blame] | 281 | if (fastTrack->mSampleRate != 0 && fastTrack->mSampleRate != sampleRate) { |
| 282 | mixer->setParameter(name, AudioMixer::RESAMPLE, |
| 283 | AudioMixer::SAMPLE_RATE, (void*) fastTrack->mSampleRate); |
| 284 | } |
| 285 | mixer->setParameter(name, AudioMixer::TRACK, AudioMixer::CHANNEL_MASK, |
| 286 | (void *) fastTrack->mChannelMask); |
Glenn Kasten | 97b5d0d | 2012-03-23 18:54:19 -0700 | [diff] [blame] | 287 | mixer->enable(name); |
| 288 | } |
| 289 | generations[i] = fastTrack->mGeneration; |
| 290 | } |
| 291 | |
| 292 | // finally process modified tracks; these use the same slot |
| 293 | // but may have a different buffer provider or volume provider |
| 294 | unsigned modifiedTracks = currentTrackMask & previousTrackMask; |
| 295 | while (modifiedTracks != 0) { |
| 296 | i = __builtin_ctz(modifiedTracks); |
| 297 | modifiedTracks &= ~(1 << i); |
| 298 | const FastTrack* fastTrack = ¤t->mFastTracks[i]; |
| 299 | if (fastTrack->mGeneration != generations[i]) { |
| 300 | AudioBufferProvider *bufferProvider = fastTrack->mBufferProvider; |
| 301 | ALOG_ASSERT(bufferProvider != NULL); |
| 302 | if (mixer != NULL) { |
| 303 | name = fastTrackNames[i]; |
| 304 | ALOG_ASSERT(name >= 0); |
| 305 | mixer->setBufferProvider(name, bufferProvider); |
| 306 | if (fastTrack->mVolumeProvider == NULL) { |
| 307 | mixer->setParameter(name, AudioMixer::VOLUME, AudioMixer::VOLUME0, |
| 308 | (void *)0x1000); |
| 309 | mixer->setParameter(name, AudioMixer::VOLUME, AudioMixer::VOLUME1, |
| 310 | (void *)0x1000); |
| 311 | } |
Glenn Kasten | 21e8c50 | 2012-04-12 09:39:42 -0700 | [diff] [blame] | 312 | if (fastTrack->mSampleRate != 0 && |
| 313 | fastTrack->mSampleRate != sampleRate) { |
| 314 | mixer->setParameter(name, AudioMixer::RESAMPLE, |
| 315 | AudioMixer::SAMPLE_RATE, (void*) fastTrack->mSampleRate); |
| 316 | } else { |
| 317 | mixer->setParameter(name, AudioMixer::RESAMPLE, |
| 318 | AudioMixer::REMOVE, NULL); |
| 319 | } |
| 320 | mixer->setParameter(name, AudioMixer::TRACK, AudioMixer::CHANNEL_MASK, |
| 321 | (void *) fastTrack->mChannelMask); |
Glenn Kasten | 97b5d0d | 2012-03-23 18:54:19 -0700 | [diff] [blame] | 322 | // already enabled |
| 323 | } |
| 324 | generations[i] = fastTrack->mGeneration; |
| 325 | } |
| 326 | } |
| 327 | |
| 328 | fastTracksGen = current->mFastTracksGen; |
| 329 | |
| 330 | dumpState->mNumTracks = popcount(currentTrackMask); |
| 331 | } |
| 332 | |
| 333 | #if 1 // FIXME shouldn't need this |
| 334 | // only process state change once |
| 335 | previous = current; |
| 336 | #endif |
| 337 | } |
| 338 | |
| 339 | // do work using current state here |
Glenn Kasten | 288ed21 | 2012-04-25 17:52:27 -0700 | [diff] [blame] | 340 | if ((command & FastMixerState::MIX) && (mixer != NULL) && isWarm) { |
Glenn Kasten | 97b5d0d | 2012-03-23 18:54:19 -0700 | [diff] [blame] | 341 | ALOG_ASSERT(mixBuffer != NULL); |
Glenn Kasten | 288ed21 | 2012-04-25 17:52:27 -0700 | [diff] [blame] | 342 | // for each track, update volume and check for underrun |
| 343 | unsigned currentTrackMask = current->mTrackMask; |
| 344 | while (currentTrackMask != 0) { |
| 345 | i = __builtin_ctz(currentTrackMask); |
| 346 | currentTrackMask &= ~(1 << i); |
Glenn Kasten | 97b5d0d | 2012-03-23 18:54:19 -0700 | [diff] [blame] | 347 | const FastTrack* fastTrack = ¤t->mFastTracks[i]; |
| 348 | int name = fastTrackNames[i]; |
| 349 | ALOG_ASSERT(name >= 0); |
| 350 | if (fastTrack->mVolumeProvider != NULL) { |
| 351 | uint32_t vlr = fastTrack->mVolumeProvider->getVolumeLR(); |
| 352 | mixer->setParameter(name, AudioMixer::VOLUME, AudioMixer::VOLUME0, |
| 353 | (void *)(vlr & 0xFFFF)); |
| 354 | mixer->setParameter(name, AudioMixer::VOLUME, AudioMixer::VOLUME1, |
| 355 | (void *)(vlr >> 16)); |
| 356 | } |
Glenn Kasten | 288ed21 | 2012-04-25 17:52:27 -0700 | [diff] [blame] | 357 | // FIXME The current implementation of framesReady() for fast tracks |
| 358 | // takes a tryLock, which can block |
| 359 | // up to 1 ms. If enough active tracks all blocked in sequence, this would result |
| 360 | // in the overall fast mix cycle being delayed. Should use a non-blocking FIFO. |
| 361 | size_t framesReady = fastTrack->mBufferProvider->framesReady(); |
| 362 | FastTrackDump *ftDump = &dumpState->mTracks[i]; |
Glenn Kasten | 09474df | 2012-05-10 14:48:07 -0700 | [diff] [blame] | 363 | FastTrackUnderruns underruns = ftDump->mUnderruns; |
Glenn Kasten | 288ed21 | 2012-04-25 17:52:27 -0700 | [diff] [blame] | 364 | if (framesReady < frameCount) { |
Glenn Kasten | d8e6fd3 | 2012-05-07 11:07:57 -0700 | [diff] [blame] | 365 | ATRACE_INT("underrun", i); |
Glenn Kasten | 288ed21 | 2012-04-25 17:52:27 -0700 | [diff] [blame] | 366 | if (framesReady == 0) { |
Glenn Kasten | 09474df | 2012-05-10 14:48:07 -0700 | [diff] [blame] | 367 | underruns.mBitFields.mEmpty++; |
| 368 | underruns.mBitFields.mMostRecent = UNDERRUN_EMPTY; |
Glenn Kasten | 288ed21 | 2012-04-25 17:52:27 -0700 | [diff] [blame] | 369 | mixer->disable(name); |
| 370 | } else { |
| 371 | // allow mixing partial buffer |
Glenn Kasten | 09474df | 2012-05-10 14:48:07 -0700 | [diff] [blame] | 372 | underruns.mBitFields.mPartial++; |
| 373 | underruns.mBitFields.mMostRecent = UNDERRUN_PARTIAL; |
Glenn Kasten | 288ed21 | 2012-04-25 17:52:27 -0700 | [diff] [blame] | 374 | mixer->enable(name); |
| 375 | } |
Glenn Kasten | 09474df | 2012-05-10 14:48:07 -0700 | [diff] [blame] | 376 | } else { |
| 377 | underruns.mBitFields.mFull++; |
| 378 | underruns.mBitFields.mMostRecent = UNDERRUN_FULL; |
Glenn Kasten | 288ed21 | 2012-04-25 17:52:27 -0700 | [diff] [blame] | 379 | mixer->enable(name); |
| 380 | } |
Glenn Kasten | 09474df | 2012-05-10 14:48:07 -0700 | [diff] [blame] | 381 | ftDump->mUnderruns = underruns; |
Glenn Kasten | 97b5d0d | 2012-03-23 18:54:19 -0700 | [diff] [blame] | 382 | } |
| 383 | // process() is CPU-bound |
| 384 | mixer->process(AudioBufferProvider::kInvalidPTS); |
| 385 | mixBufferState = MIXED; |
| 386 | } else if (mixBufferState == MIXED) { |
| 387 | mixBufferState = UNDEFINED; |
| 388 | } |
Glenn Kasten | 288ed21 | 2012-04-25 17:52:27 -0700 | [diff] [blame] | 389 | bool attemptedWrite = false; |
| 390 | //bool didFullWrite = false; // dumpsys could display a count of partial writes |
Glenn Kasten | 97b5d0d | 2012-03-23 18:54:19 -0700 | [diff] [blame] | 391 | if ((command & FastMixerState::WRITE) && (outputSink != NULL) && (mixBuffer != NULL)) { |
| 392 | if (mixBufferState == UNDEFINED) { |
| 393 | memset(mixBuffer, 0, frameCount * 2 * sizeof(short)); |
| 394 | mixBufferState = ZEROED; |
| 395 | } |
| 396 | // FIXME write() is non-blocking and lock-free for a properly implemented NBAIO sink, |
| 397 | // but this code should be modified to handle both non-blocking and blocking sinks |
| 398 | dumpState->mWriteSequence++; |
Glenn Kasten | d8e6fd3 | 2012-05-07 11:07:57 -0700 | [diff] [blame] | 399 | Tracer::traceBegin(ATRACE_TAG, "write"); |
Glenn Kasten | 97b5d0d | 2012-03-23 18:54:19 -0700 | [diff] [blame] | 400 | ssize_t framesWritten = outputSink->write(mixBuffer, frameCount); |
Glenn Kasten | d8e6fd3 | 2012-05-07 11:07:57 -0700 | [diff] [blame] | 401 | Tracer::traceEnd(ATRACE_TAG); |
Glenn Kasten | 97b5d0d | 2012-03-23 18:54:19 -0700 | [diff] [blame] | 402 | dumpState->mWriteSequence++; |
| 403 | if (framesWritten >= 0) { |
Glenn Kasten | 288ed21 | 2012-04-25 17:52:27 -0700 | [diff] [blame] | 404 | ALOG_ASSERT(framesWritten <= frameCount); |
Glenn Kasten | 97b5d0d | 2012-03-23 18:54:19 -0700 | [diff] [blame] | 405 | dumpState->mFramesWritten += framesWritten; |
Glenn Kasten | 288ed21 | 2012-04-25 17:52:27 -0700 | [diff] [blame] | 406 | //if ((size_t) framesWritten == frameCount) { |
| 407 | // didFullWrite = true; |
| 408 | //} |
Glenn Kasten | 97b5d0d | 2012-03-23 18:54:19 -0700 | [diff] [blame] | 409 | } else { |
| 410 | dumpState->mWriteErrors++; |
| 411 | } |
Glenn Kasten | 288ed21 | 2012-04-25 17:52:27 -0700 | [diff] [blame] | 412 | attemptedWrite = true; |
Glenn Kasten | 97b5d0d | 2012-03-23 18:54:19 -0700 | [diff] [blame] | 413 | // FIXME count # of writes blocked excessively, CPU usage, etc. for dump |
| 414 | } |
| 415 | |
| 416 | // To be exactly periodic, compute the next sleep time based on current time. |
| 417 | // This code doesn't have long-term stability when the sink is non-blocking. |
| 418 | // FIXME To avoid drift, use the local audio clock or watch the sink's fill status. |
| 419 | struct timespec newTs; |
| 420 | int rc = clock_gettime(CLOCK_MONOTONIC, &newTs); |
| 421 | if (rc == 0) { |
| 422 | if (oldTsValid) { |
| 423 | time_t sec = newTs.tv_sec - oldTs.tv_sec; |
| 424 | long nsec = newTs.tv_nsec - oldTs.tv_nsec; |
| 425 | if (nsec < 0) { |
| 426 | --sec; |
| 427 | nsec += 1000000000; |
| 428 | } |
Glenn Kasten | 288ed21 | 2012-04-25 17:52:27 -0700 | [diff] [blame] | 429 | // To avoid an initial underrun on fast tracks after exiting standby, |
| 430 | // do not start pulling data from tracks and mixing until warmup is complete. |
| 431 | // Warmup is considered complete after the earlier of: |
| 432 | // first successful single write() that blocks for more than warmupNs |
| 433 | // MAX_WARMUP_CYCLES write() attempts. |
| 434 | // This is overly conservative, but to get better accuracy requires a new HAL API. |
| 435 | if (!isWarm && attemptedWrite) { |
| 436 | measuredWarmupTs.tv_sec += sec; |
| 437 | measuredWarmupTs.tv_nsec += nsec; |
| 438 | if (measuredWarmupTs.tv_nsec >= 1000000000) { |
| 439 | measuredWarmupTs.tv_sec++; |
| 440 | measuredWarmupTs.tv_nsec -= 1000000000; |
| 441 | } |
| 442 | ++warmupCycles; |
| 443 | if ((attemptedWrite && nsec > warmupNs) || |
| 444 | (warmupCycles >= MAX_WARMUP_CYCLES)) { |
| 445 | isWarm = true; |
| 446 | dumpState->mMeasuredWarmupTs = measuredWarmupTs; |
| 447 | dumpState->mWarmupCycles = warmupCycles; |
| 448 | } |
| 449 | } |
Glenn Kasten | 97b5d0d | 2012-03-23 18:54:19 -0700 | [diff] [blame] | 450 | if (sec > 0 || nsec > underrunNs) { |
Glenn Kasten | d8e6fd3 | 2012-05-07 11:07:57 -0700 | [diff] [blame] | 451 | ScopedTrace st(ATRACE_TAG, "underrun"); |
Glenn Kasten | 97b5d0d | 2012-03-23 18:54:19 -0700 | [diff] [blame] | 452 | // FIXME only log occasionally |
| 453 | ALOGV("underrun: time since last cycle %d.%03ld sec", |
| 454 | (int) sec, nsec / 1000000L); |
| 455 | dumpState->mUnderruns++; |
| 456 | sleepNs = -1; |
| 457 | ignoreNextOverrun = true; |
| 458 | } else if (nsec < overrunNs) { |
| 459 | if (ignoreNextOverrun) { |
| 460 | ignoreNextOverrun = false; |
| 461 | } else { |
| 462 | // FIXME only log occasionally |
| 463 | ALOGV("overrun: time since last cycle %d.%03ld sec", |
| 464 | (int) sec, nsec / 1000000L); |
| 465 | dumpState->mOverruns++; |
| 466 | } |
| 467 | sleepNs = periodNs - overrunNs; |
| 468 | } else { |
| 469 | sleepNs = -1; |
| 470 | ignoreNextOverrun = false; |
| 471 | } |
| 472 | #ifdef FAST_MIXER_STATISTICS |
Glenn Kasten | 42d45cf | 2012-05-02 10:34:47 -0700 | [diff] [blame] | 473 | // advance the FIFO queue bounds |
| 474 | size_t i = bounds & (FastMixerDumpState::kSamplingN - 1); |
Glenn Kasten | e58ccce | 2012-05-11 15:19:24 -0700 | [diff] [blame] | 475 | bounds = (bounds & 0xFFFF0000) | ((bounds + 1) & 0xFFFF); |
Glenn Kasten | 42d45cf | 2012-05-02 10:34:47 -0700 | [diff] [blame] | 476 | if (full) { |
| 477 | bounds += 0x10000; |
| 478 | } else if (!(bounds & (FastMixerDumpState::kSamplingN - 1))) { |
| 479 | full = true; |
Glenn Kasten | 97b5d0d | 2012-03-23 18:54:19 -0700 | [diff] [blame] | 480 | } |
Glenn Kasten | 42d45cf | 2012-05-02 10:34:47 -0700 | [diff] [blame] | 481 | // compute the delta value of clock_gettime(CLOCK_MONOTONIC) |
| 482 | uint32_t monotonicNs = nsec; |
| 483 | if (sec > 0 && sec < 4) { |
| 484 | monotonicNs += sec * 1000000000; |
| 485 | } |
| 486 | // compute the raw CPU load = delta value of clock_gettime(CLOCK_THREAD_CPUTIME_ID) |
| 487 | uint32_t loadNs = 0; |
| 488 | struct timespec newLoad; |
| 489 | rc = clock_gettime(CLOCK_THREAD_CPUTIME_ID, &newLoad); |
| 490 | if (rc == 0) { |
| 491 | if (oldLoadValid) { |
| 492 | sec = newLoad.tv_sec - oldLoad.tv_sec; |
| 493 | nsec = newLoad.tv_nsec - oldLoad.tv_nsec; |
| 494 | if (nsec < 0) { |
| 495 | --sec; |
| 496 | nsec += 1000000000; |
| 497 | } |
| 498 | loadNs = nsec; |
| 499 | if (sec > 0 && sec < 4) { |
| 500 | loadNs += sec * 1000000000; |
| 501 | } |
| 502 | } else { |
| 503 | // first time through the loop |
| 504 | oldLoadValid = true; |
| 505 | } |
| 506 | oldLoad = newLoad; |
| 507 | } |
| 508 | // get the absolute value of CPU clock frequency in kHz |
| 509 | int cpuNum = sched_getcpu(); |
| 510 | uint32_t kHz = tcu.getCpukHz(cpuNum); |
Glenn Kasten | c059bd4 | 2012-05-14 17:41:09 -0700 | [diff] [blame^] | 511 | kHz = (kHz << 4) | (cpuNum & 0xF); |
Glenn Kasten | 42d45cf | 2012-05-02 10:34:47 -0700 | [diff] [blame] | 512 | // save values in FIFO queues for dumpsys |
| 513 | // these stores #1, #2, #3 are not atomic with respect to each other, |
| 514 | // or with respect to store #4 below |
| 515 | dumpState->mMonotonicNs[i] = monotonicNs; |
| 516 | dumpState->mLoadNs[i] = loadNs; |
| 517 | dumpState->mCpukHz[i] = kHz; |
| 518 | // this store #4 is not atomic with respect to stores #1, #2, #3 above, but |
| 519 | // the newest open and oldest closed halves are atomic with respect to each other |
| 520 | dumpState->mBounds = bounds; |
Glenn Kasten | 97b5d0d | 2012-03-23 18:54:19 -0700 | [diff] [blame] | 521 | #endif |
| 522 | } else { |
| 523 | // first time through the loop |
| 524 | oldTsValid = true; |
| 525 | sleepNs = periodNs; |
| 526 | ignoreNextOverrun = true; |
| 527 | } |
| 528 | oldTs = newTs; |
| 529 | } else { |
| 530 | // monotonic clock is broken |
| 531 | oldTsValid = false; |
| 532 | sleepNs = periodNs; |
| 533 | } |
| 534 | |
Glenn Kasten | 42d45cf | 2012-05-02 10:34:47 -0700 | [diff] [blame] | 535 | |
Glenn Kasten | 97b5d0d | 2012-03-23 18:54:19 -0700 | [diff] [blame] | 536 | } // for (;;) |
| 537 | |
| 538 | // never return 'true'; Thread::_threadLoop() locks mutex which can result in priority inversion |
| 539 | } |
| 540 | |
| 541 | FastMixerDumpState::FastMixerDumpState() : |
| 542 | mCommand(FastMixerState::INITIAL), mWriteSequence(0), mFramesWritten(0), |
Glenn Kasten | 21e8c50 | 2012-04-12 09:39:42 -0700 | [diff] [blame] | 543 | mNumTracks(0), mWriteErrors(0), mUnderruns(0), mOverruns(0), |
Glenn Kasten | 288ed21 | 2012-04-25 17:52:27 -0700 | [diff] [blame] | 544 | mSampleRate(0), mFrameCount(0), /* mMeasuredWarmupTs({0, 0}), */ mWarmupCycles(0) |
Glenn Kasten | 97b5d0d | 2012-03-23 18:54:19 -0700 | [diff] [blame] | 545 | #ifdef FAST_MIXER_STATISTICS |
Glenn Kasten | 42d45cf | 2012-05-02 10:34:47 -0700 | [diff] [blame] | 546 | , mBounds(0) |
Glenn Kasten | 97b5d0d | 2012-03-23 18:54:19 -0700 | [diff] [blame] | 547 | #endif |
| 548 | { |
Glenn Kasten | 288ed21 | 2012-04-25 17:52:27 -0700 | [diff] [blame] | 549 | mMeasuredWarmupTs.tv_sec = 0; |
| 550 | mMeasuredWarmupTs.tv_nsec = 0; |
Glenn Kasten | 42d45cf | 2012-05-02 10:34:47 -0700 | [diff] [blame] | 551 | // sample arrays aren't accessed atomically with respect to the bounds, |
| 552 | // so clearing reduces chance for dumpsys to read random uninitialized samples |
| 553 | memset(&mMonotonicNs, 0, sizeof(mMonotonicNs)); |
| 554 | memset(&mLoadNs, 0, sizeof(mLoadNs)); |
| 555 | memset(&mCpukHz, 0, sizeof(mCpukHz)); |
Glenn Kasten | 97b5d0d | 2012-03-23 18:54:19 -0700 | [diff] [blame] | 556 | } |
| 557 | |
| 558 | FastMixerDumpState::~FastMixerDumpState() |
| 559 | { |
| 560 | } |
| 561 | |
| 562 | void FastMixerDumpState::dump(int fd) |
| 563 | { |
| 564 | #define COMMAND_MAX 32 |
| 565 | char string[COMMAND_MAX]; |
| 566 | switch (mCommand) { |
| 567 | case FastMixerState::INITIAL: |
| 568 | strcpy(string, "INITIAL"); |
| 569 | break; |
| 570 | case FastMixerState::HOT_IDLE: |
| 571 | strcpy(string, "HOT_IDLE"); |
| 572 | break; |
| 573 | case FastMixerState::COLD_IDLE: |
| 574 | strcpy(string, "COLD_IDLE"); |
| 575 | break; |
| 576 | case FastMixerState::EXIT: |
| 577 | strcpy(string, "EXIT"); |
| 578 | break; |
| 579 | case FastMixerState::MIX: |
| 580 | strcpy(string, "MIX"); |
| 581 | break; |
| 582 | case FastMixerState::WRITE: |
| 583 | strcpy(string, "WRITE"); |
| 584 | break; |
| 585 | case FastMixerState::MIX_WRITE: |
| 586 | strcpy(string, "MIX_WRITE"); |
| 587 | break; |
| 588 | default: |
| 589 | snprintf(string, COMMAND_MAX, "%d", mCommand); |
| 590 | break; |
| 591 | } |
Glenn Kasten | 42d45cf | 2012-05-02 10:34:47 -0700 | [diff] [blame] | 592 | double measuredWarmupMs = (mMeasuredWarmupTs.tv_sec * 1000.0) + |
Glenn Kasten | 288ed21 | 2012-04-25 17:52:27 -0700 | [diff] [blame] | 593 | (mMeasuredWarmupTs.tv_nsec / 1000000.0); |
Glenn Kasten | 42d45cf | 2012-05-02 10:34:47 -0700 | [diff] [blame] | 594 | double mixPeriodSec = (double) mFrameCount / (double) mSampleRate; |
Glenn Kasten | 97b5d0d | 2012-03-23 18:54:19 -0700 | [diff] [blame] | 595 | fdprintf(fd, "FastMixer command=%s writeSequence=%u framesWritten=%u\n" |
Glenn Kasten | 21e8c50 | 2012-04-12 09:39:42 -0700 | [diff] [blame] | 596 | " numTracks=%u writeErrors=%u underruns=%u overruns=%u\n" |
Glenn Kasten | 42d45cf | 2012-05-02 10:34:47 -0700 | [diff] [blame] | 597 | " sampleRate=%u frameCount=%u measuredWarmup=%.3g ms, warmupCycles=%u\n" |
| 598 | " mixPeriod=%.2f ms\n", |
Glenn Kasten | 97b5d0d | 2012-03-23 18:54:19 -0700 | [diff] [blame] | 599 | string, mWriteSequence, mFramesWritten, |
Glenn Kasten | 21e8c50 | 2012-04-12 09:39:42 -0700 | [diff] [blame] | 600 | mNumTracks, mWriteErrors, mUnderruns, mOverruns, |
Glenn Kasten | 42d45cf | 2012-05-02 10:34:47 -0700 | [diff] [blame] | 601 | mSampleRate, mFrameCount, measuredWarmupMs, mWarmupCycles, |
| 602 | mixPeriodSec * 1e3); |
Glenn Kasten | 97b5d0d | 2012-03-23 18:54:19 -0700 | [diff] [blame] | 603 | #ifdef FAST_MIXER_STATISTICS |
Glenn Kasten | 42d45cf | 2012-05-02 10:34:47 -0700 | [diff] [blame] | 604 | // find the interval of valid samples |
| 605 | uint32_t bounds = mBounds; |
| 606 | uint32_t newestOpen = bounds & 0xFFFF; |
| 607 | uint32_t oldestClosed = bounds >> 16; |
| 608 | uint32_t n = (newestOpen - oldestClosed) & 0xFFFF; |
| 609 | if (n > kSamplingN) { |
| 610 | ALOGE("too many samples %u", n); |
| 611 | n = kSamplingN; |
| 612 | } |
| 613 | // statistics for monotonic (wall clock) time, thread raw CPU load in time, CPU clock frequency, |
| 614 | // and adjusted CPU load in MHz normalized for CPU clock frequency |
| 615 | CentralTendencyStatistics wall, loadNs, kHz, loadMHz; |
| 616 | // only compute adjusted CPU load in Hz if current CPU number and CPU clock frequency are stable |
| 617 | bool valid = false; |
| 618 | uint32_t previousCpukHz = 0; |
| 619 | // loop over all the samples |
| 620 | for (; n > 0; --n) { |
| 621 | size_t i = oldestClosed++ & (kSamplingN - 1); |
| 622 | uint32_t wallNs = mMonotonicNs[i]; |
| 623 | wall.sample(wallNs); |
| 624 | uint32_t sampleLoadNs = mLoadNs[i]; |
| 625 | uint32_t sampleCpukHz = mCpukHz[i]; |
| 626 | loadNs.sample(sampleLoadNs); |
Glenn Kasten | c059bd4 | 2012-05-14 17:41:09 -0700 | [diff] [blame^] | 627 | // skip bad kHz samples |
| 628 | if ((sampleCpukHz & ~0xF) != 0) { |
| 629 | kHz.sample(sampleCpukHz >> 4); |
| 630 | if (sampleCpukHz == previousCpukHz) { |
| 631 | double megacycles = (double) sampleLoadNs * (double) (sampleCpukHz >> 4) * 1e-12; |
| 632 | double adjMHz = megacycles / mixPeriodSec; // _not_ wallNs * 1e9 |
| 633 | loadMHz.sample(adjMHz); |
| 634 | } |
Glenn Kasten | 42d45cf | 2012-05-02 10:34:47 -0700 | [diff] [blame] | 635 | } |
| 636 | previousCpukHz = sampleCpukHz; |
| 637 | } |
| 638 | fdprintf(fd, "Simple moving statistics over last %.1f seconds:\n", wall.n() * mixPeriodSec); |
| 639 | fdprintf(fd, " wall clock time in ms per mix cycle:\n" |
| 640 | " mean=%.2f min=%.2f max=%.2f stddev=%.2f\n", |
| 641 | wall.mean()*1e-6, wall.minimum()*1e-6, wall.maximum()*1e-6, wall.stddev()*1e-6); |
| 642 | fdprintf(fd, " raw CPU load in us per mix cycle:\n" |
| 643 | " mean=%.0f min=%.0f max=%.0f stddev=%.0f\n", |
| 644 | loadNs.mean()*1e-3, loadNs.minimum()*1e-3, loadNs.maximum()*1e-3, |
| 645 | loadNs.stddev()*1e-3); |
| 646 | fdprintf(fd, " CPU clock frequency in MHz:\n" |
| 647 | " mean=%.0f min=%.0f max=%.0f stddev=%.0f\n", |
| 648 | kHz.mean()*1e-3, kHz.minimum()*1e-3, kHz.maximum()*1e-3, kHz.stddev()*1e-3); |
| 649 | fdprintf(fd, " adjusted CPU load in MHz (i.e. normalized for CPU clock frequency):\n" |
| 650 | " mean=%.1f min=%.1f max=%.1f stddev=%.1f\n", |
| 651 | loadMHz.mean(), loadMHz.minimum(), loadMHz.maximum(), loadMHz.stddev()); |
Glenn Kasten | 97b5d0d | 2012-03-23 18:54:19 -0700 | [diff] [blame] | 652 | #endif |
| 653 | } |
| 654 | |
| 655 | } // namespace android |