blob: ffdc11772651edf74ea44dff1e8f335708c7a55e [file] [log] [blame]
Glenn Kasten04333cd2015-02-17 16:23:03 -08001/*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#define LOG_TAG "FastMixerDumpState"
18//#define LOG_NDEBUG 0
19
20#include "Configuration.h"
Glenn Kasten214b4062015-03-02 14:15:47 -080021#ifdef FAST_THREAD_STATISTICS
Eric Tan5b13ff82018-07-27 11:20:17 -070022#include <audio_utils/Statistics.h>
Glenn Kasten04333cd2015-02-17 16:23:03 -080023#ifdef CPU_FREQUENCY_STATISTICS
24#include <cpustats/ThreadCpuUsage.h>
25#endif
26#endif
Eric Tan7b651152018-07-13 10:17:19 -070027#include <string>
Glenn Kasten04333cd2015-02-17 16:23:03 -080028#include <utils/Debug.h>
29#include <utils/Log.h>
30#include "FastMixerDumpState.h"
31
32namespace android {
33
Glenn Kastenfbdb2ac2015-03-02 14:47:19 -080034FastMixerDumpState::FastMixerDumpState() : FastThreadDumpState(),
Glenn Kasten04333cd2015-02-17 16:23:03 -080035 mWriteSequence(0), mFramesWritten(0),
36 mNumTracks(0), mWriteErrors(0),
37 mSampleRate(0), mFrameCount(0),
38 mTrackMask(0)
39{
Glenn Kasten04333cd2015-02-17 16:23:03 -080040}
41
Glenn Kasten04333cd2015-02-17 16:23:03 -080042FastMixerDumpState::~FastMixerDumpState()
43{
44}
45
46// helper function called by qsort()
47static int compare_uint32_t(const void *pa, const void *pb)
48{
49 uint32_t a = *(const uint32_t *)pa;
50 uint32_t b = *(const uint32_t *)pb;
51 if (a < b) {
52 return -1;
53 } else if (a > b) {
54 return 1;
55 } else {
56 return 0;
57 }
58}
59
60void FastMixerDumpState::dump(int fd) const
61{
62 if (mCommand == FastMixerState::INITIAL) {
63 dprintf(fd, " FastMixer not initialized\n");
64 return;
65 }
Glenn Kasten04333cd2015-02-17 16:23:03 -080066 double measuredWarmupMs = (mMeasuredWarmupTs.tv_sec * 1000.0) +
67 (mMeasuredWarmupTs.tv_nsec / 1000000.0);
Glenn Kasten17c9c992015-03-02 15:53:01 -080068 double mixPeriodSec = (double) mFrameCount / mSampleRate;
Glenn Kasten04333cd2015-02-17 16:23:03 -080069 dprintf(fd, " FastMixer command=%s writeSequence=%u framesWritten=%u\n"
70 " numTracks=%u writeErrors=%u underruns=%u overruns=%u\n"
71 " sampleRate=%u frameCount=%zu measuredWarmup=%.3g ms, warmupCycles=%u\n"
Andy Hungf6ab58d2018-05-25 12:50:39 -070072 " mixPeriod=%.2f ms latency=%.2f ms\n",
Glenn Kastenbcb14862015-03-05 17:11:21 -080073 FastMixerState::commandToString(mCommand), mWriteSequence, mFramesWritten,
74 mNumTracks, mWriteErrors, mUnderruns, mOverruns,
75 mSampleRate, mFrameCount, measuredWarmupMs, mWarmupCycles,
Andy Hungf6ab58d2018-05-25 12:50:39 -070076 mixPeriodSec * 1e3, mLatencyMs);
Andy Hung2e2c0bb2018-06-11 19:13:11 -070077 dprintf(fd, " FastMixer Timestamp stats: %s\n", mTimestampVerifier.toString().c_str());
Glenn Kasten214b4062015-03-02 14:15:47 -080078#ifdef FAST_THREAD_STATISTICS
Glenn Kasten04333cd2015-02-17 16:23:03 -080079 // find the interval of valid samples
Eric Tan7b651152018-07-13 10:17:19 -070080 const uint32_t bounds = mBounds;
81 const uint32_t newestOpen = bounds & 0xFFFF;
Glenn Kasten04333cd2015-02-17 16:23:03 -080082 uint32_t oldestClosed = bounds >> 16;
Ivan Lozanoe71027e2017-12-04 16:09:10 -080083
84 //uint32_t n = (newestOpen - oldestClosed) & 0xFFFF;
85 uint32_t n;
86 __builtin_sub_overflow(newestOpen, oldestClosed, &n);
Eric Tan7b651152018-07-13 10:17:19 -070087 n &= 0xFFFF;
Ivan Lozanoe71027e2017-12-04 16:09:10 -080088
Glenn Kasten04333cd2015-02-17 16:23:03 -080089 if (n > mSamplingN) {
90 ALOGE("too many samples %u", n);
91 n = mSamplingN;
92 }
93 // statistics for monotonic (wall clock) time, thread raw CPU load in time, CPU clock frequency,
94 // and adjusted CPU load in MHz normalized for CPU clock frequency
Eric Tan5b13ff82018-07-27 11:20:17 -070095 Statistics<double> wall, loadNs;
Glenn Kasten04333cd2015-02-17 16:23:03 -080096#ifdef CPU_FREQUENCY_STATISTICS
Eric Tan5b13ff82018-07-27 11:20:17 -070097 Statistics<double> kHz, loadMHz;
Glenn Kasten04333cd2015-02-17 16:23:03 -080098 uint32_t previousCpukHz = 0;
99#endif
100 // Assuming a normal distribution for cycle times, three standard deviations on either side of
101 // the mean account for 99.73% of the population. So if we take each tail to be 1/1000 of the
102 // sample set, we get 99.8% combined, or close to three standard deviations.
103 static const uint32_t kTailDenominator = 1000;
104 uint32_t *tail = n >= kTailDenominator ? new uint32_t[n] : NULL;
105 // loop over all the samples
106 for (uint32_t j = 0; j < n; ++j) {
107 size_t i = oldestClosed++ & (mSamplingN - 1);
108 uint32_t wallNs = mMonotonicNs[i];
109 if (tail != NULL) {
110 tail[j] = wallNs;
111 }
Eric Tan5b13ff82018-07-27 11:20:17 -0700112 wall.add(wallNs);
Glenn Kasten04333cd2015-02-17 16:23:03 -0800113 uint32_t sampleLoadNs = mLoadNs[i];
Eric Tan5b13ff82018-07-27 11:20:17 -0700114 loadNs.add(sampleLoadNs);
Glenn Kasten04333cd2015-02-17 16:23:03 -0800115#ifdef CPU_FREQUENCY_STATISTICS
116 uint32_t sampleCpukHz = mCpukHz[i];
117 // skip bad kHz samples
118 if ((sampleCpukHz & ~0xF) != 0) {
Eric Tan5b13ff82018-07-27 11:20:17 -0700119 kHz.add(sampleCpukHz >> 4);
Glenn Kasten04333cd2015-02-17 16:23:03 -0800120 if (sampleCpukHz == previousCpukHz) {
121 double megacycles = (double) sampleLoadNs * (double) (sampleCpukHz >> 4) * 1e-12;
122 double adjMHz = megacycles / mixPeriodSec; // _not_ wallNs * 1e9
Eric Tan5b13ff82018-07-27 11:20:17 -0700123 loadMHz.add(adjMHz);
Glenn Kasten04333cd2015-02-17 16:23:03 -0800124 }
125 }
126 previousCpukHz = sampleCpukHz;
127#endif
128 }
129 if (n) {
130 dprintf(fd, " Simple moving statistics over last %.1f seconds:\n",
Eric Tan5b13ff82018-07-27 11:20:17 -0700131 wall.getN() * mixPeriodSec);
Glenn Kasten04333cd2015-02-17 16:23:03 -0800132 dprintf(fd, " wall clock time in ms per mix cycle:\n"
133 " mean=%.2f min=%.2f max=%.2f stddev=%.2f\n",
Eric Tan5b13ff82018-07-27 11:20:17 -0700134 wall.getMean()*1e-6, wall.getMin()*1e-6, wall.getMax()*1e-6,
135 wall.getStdDev()*1e-6);
Glenn Kasten04333cd2015-02-17 16:23:03 -0800136 dprintf(fd, " raw CPU load in us per mix cycle:\n"
137 " mean=%.0f min=%.0f max=%.0f stddev=%.0f\n",
Eric Tan5b13ff82018-07-27 11:20:17 -0700138 loadNs.getMean()*1e-3, loadNs.getMin()*1e-3, loadNs.getMax()*1e-3,
139 loadNs.getStdDev()*1e-3);
Glenn Kasten04333cd2015-02-17 16:23:03 -0800140 } else {
141 dprintf(fd, " No FastMixer statistics available currently\n");
142 }
143#ifdef CPU_FREQUENCY_STATISTICS
144 dprintf(fd, " CPU clock frequency in MHz:\n"
145 " mean=%.0f min=%.0f max=%.0f stddev=%.0f\n",
Eric Tan5b13ff82018-07-27 11:20:17 -0700146 kHz.getMean()*1e-3, kHz.getMin()*1e-3, kHz.getMax()*1e-3, kHz.getStdDev()*1e-3);
Glenn Kasten04333cd2015-02-17 16:23:03 -0800147 dprintf(fd, " adjusted CPU load in MHz (i.e. normalized for CPU clock frequency):\n"
148 " mean=%.1f min=%.1f max=%.1f stddev=%.1f\n",
Eric Tan5b13ff82018-07-27 11:20:17 -0700149 loadMHz.getMean(), loadMHz.getMin(), loadMHz.getMax(), loadMHz.getStdDev());
Glenn Kasten04333cd2015-02-17 16:23:03 -0800150#endif
151 if (tail != NULL) {
152 qsort(tail, n, sizeof(uint32_t), compare_uint32_t);
153 // assume same number of tail samples on each side, left and right
154 uint32_t count = n / kTailDenominator;
Eric Tan5b13ff82018-07-27 11:20:17 -0700155 Statistics<double> left, right;
Glenn Kasten04333cd2015-02-17 16:23:03 -0800156 for (uint32_t i = 0; i < count; ++i) {
Eric Tan5b13ff82018-07-27 11:20:17 -0700157 left.add(tail[i]);
158 right.add(tail[n - (i + 1)]);
Glenn Kasten04333cd2015-02-17 16:23:03 -0800159 }
160 dprintf(fd, " Distribution of mix cycle times in ms for the tails "
161 "(> ~3 stddev outliers):\n"
162 " left tail: mean=%.2f min=%.2f max=%.2f stddev=%.2f\n"
163 " right tail: mean=%.2f min=%.2f max=%.2f stddev=%.2f\n",
Eric Tan5b13ff82018-07-27 11:20:17 -0700164 left.getMean()*1e-6, left.getMin()*1e-6, left.getMax()*1e-6, left.getStdDev()*1e-6,
165 right.getMean()*1e-6, right.getMin()*1e-6, right.getMax()*1e-6,
166 right.getStdDev()*1e-6);
Glenn Kasten04333cd2015-02-17 16:23:03 -0800167 delete[] tail;
168 }
169#endif
170 // The active track mask and track states are updated non-atomically.
171 // So if we relied on isActive to decide whether to display,
172 // then we might display an obsolete track or omit an active track.
173 // Instead we always display all tracks, with an indication
174 // of whether we think the track is active.
175 uint32_t trackMask = mTrackMask;
Glenn Kastendc2c50b2016-04-21 08:13:14 -0700176 dprintf(fd, " Fast tracks: sMaxFastTracks=%u activeMask=%#x\n",
177 FastMixerState::sMaxFastTracks, trackMask);
Andy Hungb54c8542016-09-21 12:55:15 -0700178 dprintf(fd, " Index Active Full Partial Empty Recent Ready Written\n");
Glenn Kastendc2c50b2016-04-21 08:13:14 -0700179 for (uint32_t i = 0; i < FastMixerState::sMaxFastTracks; ++i, trackMask >>= 1) {
Glenn Kasten04333cd2015-02-17 16:23:03 -0800180 bool isActive = trackMask & 1;
181 const FastTrackDump *ftDump = &mTracks[i];
182 const FastTrackUnderruns& underruns = ftDump->mUnderruns;
183 const char *mostRecent;
184 switch (underruns.mBitFields.mMostRecent) {
185 case UNDERRUN_FULL:
186 mostRecent = "full";
187 break;
188 case UNDERRUN_PARTIAL:
189 mostRecent = "partial";
190 break;
191 case UNDERRUN_EMPTY:
192 mostRecent = "empty";
193 break;
194 default:
195 mostRecent = "?";
196 break;
197 }
Andy Hungb54c8542016-09-21 12:55:15 -0700198 dprintf(fd, " %5u %6s %4u %7u %5u %7s %5zu %10lld\n",
199 i, isActive ? "yes" : "no",
Glenn Kasten04333cd2015-02-17 16:23:03 -0800200 (underruns.mBitFields.mFull) & UNDERRUN_MASK,
201 (underruns.mBitFields.mPartial) & UNDERRUN_MASK,
202 (underruns.mBitFields.mEmpty) & UNDERRUN_MASK,
Andy Hungb54c8542016-09-21 12:55:15 -0700203 mostRecent, ftDump->mFramesReady,
204 (long long)ftDump->mFramesWritten);
Glenn Kasten04333cd2015-02-17 16:23:03 -0800205 }
206}
207
Eric Tan7b651152018-07-13 10:17:19 -0700208// TODO get rid of extraneous lines and use better key names.
209// TODO may go back to using a library to do the json formatting.
210std::string FastMixerDumpState::getJsonString() const
211{
212 if (mCommand == FastMixerState::INITIAL) {
213 return " {\n \"status\": \"uninitialized\"\n }";
214 }
215 std::string jsonStr = " {\n";
216#ifdef FAST_THREAD_STATISTICS
217 // find the interval of valid samples
218 const uint32_t bounds = mBounds;
219 const uint32_t newestOpen = bounds & 0xFFFF;
220 uint32_t oldestClosed = bounds >> 16;
221
222 //uint32_t n = (newestOpen - oldestClosed) & 0xFFFF;
223 uint32_t n;
224 __builtin_sub_overflow(newestOpen, oldestClosed, &n);
225 n &= 0xFFFF;
226
227 if (n > mSamplingN) {
228 ALOGE("too many samples %u", n);
229 n = mSamplingN;
230 }
231 // statistics for monotonic (wall clock) time, thread raw CPU load in time, CPU clock frequency,
232 // and adjusted CPU load in MHz normalized for CPU clock frequency
233 std::string jsonWallStr = " \"wall_clock_time\":[";
234 std::string jsonLoadNsStr = " \"raw_cpu_load\":[";
235 // loop over all the samples
236 for (uint32_t j = 0; j < n; ++j) {
237 size_t i = oldestClosed++ & (mSamplingN - 1);
238 uint32_t wallNs = mMonotonicNs[i];
239 if (j != 0) {
240 jsonWallStr += ',';
241 jsonLoadNsStr += ',';
242 }
243 /* jsonObject["wall"].append(wallNs); */
244 jsonWallStr += std::to_string(wallNs);
245 uint32_t sampleLoadNs = mLoadNs[i];
246 jsonLoadNsStr += std::to_string(sampleLoadNs);
247 }
248 jsonWallStr += ']';
249 jsonLoadNsStr += ']';
250 if (n) {
251 jsonStr += jsonWallStr + ",\n" + jsonLoadNsStr + "\n";
252 } else {
253 //dprintf(fd, " No FastMixer statistics available currently\n");
254 }
255#endif
256 jsonStr += " }";
257 return jsonStr;
258}
259
Glenn Kasten04333cd2015-02-17 16:23:03 -0800260} // android