Prints one histogram of all data in recent history
A call to dumpsys media.log causes the program to read
timestamp data from the circular buffer stored in local memory
to a local data structure which stores this data in a
compressed format.
The current code is a temporary step towards the ultimate
goal (described below). The timestamp data is turned into
historgrams of a small, fixed size, and added to a vector
of histograms. When the vector reaches a given number of
histograms, the oldest one is popped. When dump() is called,
all the short histograms are combined into one large one and
printed. The advantage of this approach is to not delete
data from memory until it is overwritten. Ultimately, this
data structure should be a circular buffer. A second data
structure should be added which keeps a more long-term analysis
of the data.
Test: dumpsys media.log
Change-Id: I39c4818e7d1221d5023a4355b8b57fadd9451de5
diff --git a/media/libnbaio/NBLog.cpp b/media/libnbaio/NBLog.cpp
index aa5d1e1..d590283 100644
--- a/media/libnbaio/NBLog.cpp
+++ b/media/libnbaio/NBLog.cpp
@@ -55,7 +55,7 @@
* LOG_HIST_FLUSH()
* calls logHistFlush
* NBLog::Writer::logHistFlush
-* records current timestamp to write it to the console
+* records current timestamp
* calls log(EVENT_HISTOGRAM_FLUSH)
* From here, everything is the same as in 1), resulting in call to fifo write
*
@@ -78,9 +78,8 @@
*
* 4) reading the data from private buffer
* MediaLogService::dump
-* TODO: when was MediaLogService::dump called?
-* For each NBLog::Reader in vector NamedReaders (subclass mergeReader):
-* calls NBLog::Reader::dump(int)
+* calls NBLog::Reader::dump(CONSOLE)
+* The private buffer contains all logs for all readers in shared memory
* NBLog::Reader::dump(int)
* calls getSnapshot on the current reader
* calls dump(int, size_t, Snapshot)
@@ -959,30 +958,19 @@
ofs.close();
}
+// converts a time series into a map. key: buffer period length. value: count
+static std::map<int, int> buildBuckets(const std::vector<int64_t> &samples) {
+ // TODO allow buckets of variable resolution
+ std::map<int, int> buckets;
+ for (size_t i = 1; i < samples.size(); ++i) {
+ ++buckets[deltaMs(samples[i - 1], samples[i])];
+ }
+ return buckets;
+}
+
void NBLog::Reader::dump(int fd, size_t indent, NBLog::Reader::Snapshot &snapshot)
{
- // CallStack cs(LOG_TAG);
-#if 0
- struct timespec ts;
- time_t maxSec = -1;
- while (entry - start >= (int) Entry::kOverhead) {
- if (prevEntry - start < 0 || !prevEntry.hasConsistentLength()) {
- break;
- }
- if (prevEntry->type == EVENT_TIMESTAMP) {
- if (prevEntry->length != sizeof(struct timespec)) {
- // corrupt
- break;
- }
- prevEntry.copyData((uint8_t*) &ts);
- if (ts.tv_sec > maxSec) {
- maxSec = ts.tv_sec;
- }
- }
- --entry;
- --prevEntry;
- }
-#endif
+ // CallStack cs(LOG_TAG);
mFd = fd;
mIndent = indent;
String8 timestamp, body;
@@ -993,85 +981,9 @@
// log to push it out. Consider keeping the timestamp/body between calls to copyEntryDataAt().
dumpLine(timestamp, body);
}
-#if 0
- size_t width = 1;
- while (maxSec >= 10) {
- ++width;
- maxSec /= 10;
- }
- if (maxSec >= 0) {
- timestamp.appendFormat("[%*s]", (int) width + 4, "");
- }
- bool deferredTimestamp = false;
-#endif
for (auto entry = snapshot.begin(); entry != snapshot.end();) {
switch (entry->type) {
-#if 0
- case EVENT_STRING:
- body.appendFormat("%.*s", (int) entry.length(), entry.data());
- break;
- case EVENT_TIMESTAMP: {
- // already checked that length == sizeof(struct timespec);
- entry.copyData((const uint8_t*) &ts);
- long prevNsec = ts.tv_nsec;
- long deltaMin = LONG_MAX;
- long deltaMax = -1;
- long deltaTotal = 0;
- auto aux(entry);
- for (;;) {
- ++aux;
- if (end - aux >= 0 || aux.type() != EVENT_TIMESTAMP) {
- break;
- }
- struct timespec tsNext;
- aux.copyData((const uint8_t*) &tsNext);
- if (tsNext.tv_sec != ts.tv_sec) {
- break;
- }
- long delta = tsNext.tv_nsec - prevNsec;
- if (delta < 0) {
- break;
- }
- if (delta < deltaMin) {
- deltaMin = delta;
- }
- if (delta > deltaMax) {
- deltaMax = delta;
- }
- deltaTotal += delta;
- prevNsec = tsNext.tv_nsec;
- }
- size_t n = (aux - entry) / (sizeof(struct timespec) + 3 /*Entry::kOverhead?*/);
- if (deferredTimestamp) {
- dumpLine(timestamp, body);
- deferredTimestamp = false;
- }
- timestamp.clear();
- if (n >= kSquashTimestamp) {
- timestamp.appendFormat("[%d.%03d to .%.03d by .%.03d to .%.03d]",
- (int) ts.tv_sec, (int) (ts.tv_nsec / 1000000),
- (int) ((ts.tv_nsec + deltaTotal) / 1000000),
- (int) (deltaMin / 1000000), (int) (deltaMax / 1000000));
- entry = aux;
- // advance = 0;
- break;
- }
- timestamp.appendFormat("[%d.%03d]", (int) ts.tv_sec,
- (int) (ts.tv_nsec / 1000000));
- deferredTimestamp = true;
- }
- break;
- case EVENT_INTEGER:
- appendInt(&body, entry.data());
- break;
- case EVENT_FLOAT:
- appendFloat(&body, entry.data());
- break;
- case EVENT_PID:
- appendPID(&body, entry.data(), entry.length());
- break;
-#endif
case EVENT_START_FMT:
entry = handleFormat(FormatEntry(entry), ×tamp, &body);
break;
@@ -1087,36 +999,27 @@
// TODO might want to filter excessively high outliers, which are usually caused
// by the thread being inactive.
mHists[key].push_back(ts);
+ // store time series data for each reader in order to bucket it once there
+ // is enough data. Then, it is written to recentHists as a histogram.
+ mTimeStampSeries[data->author].push_back(ts);
+ // if length of the time series has reached kShortHistSize samples,
+ // compute its histogram, append this to mRecentHists and erase the time series
+ if (mTimeStampSeries[data->author].size() >= kShortHistSize) {
+ mRecentHists.emplace_front(data->author,
+ buildBuckets(mTimeStampSeries[data->author]));
+ // do not let mRecentHists exceed capacity
+ // TODO: turn the FIFO queue into a circular buffer
+ if (mRecentHists.size() >= kRecentHistsCapacity) {
+ mRecentHists.pop_back();
+ }
+ mTimeStampSeries.erase(data->author);
+ }
+ // if an element in mHists has not grown for a long time, delete
+ // TODO copy histogram data only to mRecentHistsBuffer and pop oldest
++entry;
break;
}
- // draws histograms stored in global Reader::mHists and erases them
case EVENT_HISTOGRAM_FLUSH: {
- HistogramEntry histEntry(entry);
- // Log timestamp
- // Timestamp of call to drawHistogram, not when audio was generated
- const int64_t ts = histEntry.timestamp();
- timestamp.clear();
- timestamp.appendFormat("[%d.%03d]", (int) (ts / (1000 * 1000 * 1000)),
- (int) ((ts / (1000 * 1000)) % 1000));
- // Log histograms
- setFindGlitch(true);
- body.appendFormat("Histogram flush - ");
- handleAuthor(histEntry, &body);
- for (auto hist = mHists.begin(); hist != mHists.end();) {
- if (hist->first.second == histEntry.author()) {
- body.appendFormat("%X", (int)hist->first.first);
- if (findGlitch) {
- alertIfGlitch(hist->second);
- }
- // set file to empty and write data for all histograms in this set
- writeHistToFile(hist->second, hist != mHists.begin());
- drawHistogram(&body, hist->second, true, indent);
- hist = mHists.erase(hist);
- } else {
- ++hist;
- }
- }
++entry;
break;
}
@@ -1130,10 +1033,13 @@
++entry;
break;
}
-
- if (!body.isEmpty()) {
- dumpLine(timestamp, body);
- }
+ // if (!body.isEmpty()) {
+ // dumpLine(timestamp, body);
+ // }
+ }
+ reportPerformance(&body, mRecentHists);
+ if (!body.isEmpty()) {
+ dumpLine(timestamp, body);
}
}
@@ -1312,36 +1218,88 @@
return width;
}
-static std::map<int, int> buildBuckets(const std::vector<int64_t> &samples) {
- // TODO allow buckets of variable resolution
- std::map<int, int> buckets;
- for (size_t i = 1; i < samples.size(); ++i) {
- ++buckets[deltaMs(samples[i - 1], samples[i])];
- }
- return buckets;
-}
-
static inline uint32_t log2(uint32_t x) {
// This works for x > 0
return 31 - __builtin_clz(x);
}
+// TODO create a subclass of Reader for this and related work
+void NBLog::Reader::reportPerformance(String8 *body,
+ const std::deque<std::pair
+ <int, short_histogram>> &shortHists,
+ int maxHeight) {
+ if (shortHists.size() < 1) {
+ return;
+ }
+ // this is temporary code, which only prints out one histogram
+ // of all data stored in buffer. The data is not erased, only overwritten.
+ // TODO: more elaborate data analysis
+ std::map<int, int> buckets;
+ for (const auto &shortHist: shortHists) {
+ for (const auto &countPair : shortHist.second) {
+ buckets[countPair.first] += countPair.second;
+ }
+ }
+
+ // underscores and spaces length corresponds to maximum width of histogram
+ static const int kLen = 40;
+ std::string underscores(kLen, '_');
+ std::string spaces(kLen, ' ');
+
+ auto it = buckets.begin();
+ int maxDelta = it->first;
+ int maxCount = it->second;
+ // Compute maximum values
+ while (++it != buckets.end()) {
+ if (it->first > maxDelta) {
+ maxDelta = it->first;
+ }
+ if (it->second > maxCount) {
+ maxCount = it->second;
+ }
+ }
+ int height = log2(maxCount) + 1; // maxCount > 0, safe to call log2
+ const int leftPadding = widthOf(1 << height);
+ const int colWidth = std::max(std::max(widthOf(maxDelta) + 1, 3), leftPadding + 2);
+ int scalingFactor = 1;
+ // scale data if it exceeds maximum height
+ if (height > maxHeight) {
+ scalingFactor = (height + maxHeight) / maxHeight;
+ height /= scalingFactor;
+ }
+ body->appendFormat("\n%*s", leftPadding + 11, "Occurrences");
+ // write histogram label line with bucket values
+ body->appendFormat("\n%s", " ");
+ body->appendFormat("%*s", leftPadding, " ");
+ for (auto const &x : buckets) {
+ body->appendFormat("%*d", colWidth, x.second);
+ }
+ // write histogram ascii art
+ body->appendFormat("\n%s", " ");
+ for (int row = height * scalingFactor; row >= 0; row -= scalingFactor) {
+ const int value = 1 << row;
+ body->appendFormat("%.*s", leftPadding, spaces.c_str());
+ for (auto const &x : buckets) {
+ body->appendFormat("%.*s%s", colWidth - 1, spaces.c_str(), x.second < value ? " " : "|");
+ }
+ body->appendFormat("\n%s", " ");
+ }
+ // print x-axis
+ const int columns = static_cast<int>(buckets.size());
+ body->appendFormat("%*c", leftPadding, ' ');
+ body->appendFormat("%.*s", (columns + 1) * colWidth, underscores.c_str());
+ body->appendFormat("\n%s", " ");
+
+ // write footer with bucket labels
+ body->appendFormat("%*s", leftPadding, " ");
+ for (auto const &x : buckets) {
+ body->appendFormat("%*d", colWidth, x.first);
+ }
+ body->appendFormat("%.*s%s", colWidth, spaces.c_str(), "ms\n");
+
+}
+
// TODO put this function in separate file. Make it return a std::string instead of modifying body
-/*
-Example output:
-[54.234] Histogram flush - AudioOut_D:
-Histogram 33640BF1
- [ 1][ 1][ 1][ 3][54][69][ 1][ 2][ 1]
- 64| []
- 32| [] []
- 16| [] []
- 8| [] []
- 4| [] []
- 2|______________[]__[]__[]______[]____
- 4 5 6 8 9 10 11 13 15
-Notice that all values that fall in the same row have the same height (65 and 127 are displayed
-identically). That's why exact counts are added at the top.
-*/
void NBLog::Reader::drawHistogram(String8 *body,
const std::vector<int64_t> &samples,
bool logScale,
@@ -1368,7 +1326,7 @@
// underscores and spaces length corresponds to maximum width of histogram
static const int kLen = 40;
- std::string underscores(kLen, '-');
+ std::string underscores(kLen, '_');
std::string spaces(kLen, ' ');
auto it = buckets.begin();
@@ -1384,7 +1342,7 @@
}
}
int height = logScale ? log2(maxCount) + 1 : maxCount; // maxCount > 0, safe to call log2
- const int leftPadding = widthOf(logScale ? pow(2, height) : maxCount);
+ const int leftPadding = widthOf(logScale ? 1 << height : maxCount);
const int colWidth = std::max(std::max(widthOf(maxDelta) + 1, 3), leftPadding + 2);
int scalingFactor = 1;
// scale data if it exceeds maximum height
diff --git a/media/libnbaio/include/NBLog.h b/media/libnbaio/include/NBLog.h
index 785b9c2..f3e6ca4 100644
--- a/media/libnbaio/include/NBLog.h
+++ b/media/libnbaio/include/NBLog.h
@@ -25,6 +25,7 @@
#include <utils/threads.h>
#include <map>
+#include <deque>
#include <set>
#include <vector>
@@ -457,6 +458,9 @@
bool isFindGlitch() const;
private:
+
+ static const int kShortHistSize = 50; // number of samples in a short-term histogram
+ static const int kRecentHistsCapacity = 100; // number of short-term histograms stored in memory
static const std::set<Event> startingTypes;
static const std::set<Event> endingTypes;
/*const*/ Shared* const mShared; // raw pointer to shared memory, actually const but not
@@ -469,10 +473,23 @@
audio_utils_fifo_reader * const mFifoReader; // used to read from FIFO,
// non-NULL unless constructor fails
+ // stores a short-term histogram of size determined by kShortHistSize
+ // TODO: unsigned, unsigned
+ using short_histogram = std::map<int, int>;
+
// each pair contains a sequence of timestamps (one histogram's worth)
// pair's log_hash_t is the hash of the source code location where the timestamp was taken
// pair's int points to the Reader that originated the entry
std::map<std::pair<log_hash_t, int>, std::vector<int64_t>> mHists;
+
+ // mHistsCopy stores timestamp vectors whose key is the reader thread index.
+ // TODO remove old mHists after changing the code
+ std::map<int, std::vector<int64_t>> mTimeStampSeries;
+
+ // stores fixed-size short buffer period histograms with hash and thread data
+ // TODO: Turn it into a circular buffer for better data flow
+ std::deque<std::pair<int, short_histogram>> mRecentHists;
+
// TODO: it might be clearer, instead of a direct map from source location to vector of
// timestamps, if we instead first mapped from source location to an object that
// represented that location. And one_of its fields would be a vector of timestamps.
@@ -488,6 +505,11 @@
static void drawHistogram(String8 *body, const std::vector<int64_t> &samples,
bool logScale, int indent = 0, int maxHeight = 10);
+ static void reportPerformance(String8 *body,
+ const std::deque<std::pair
+ <int, short_histogram>> &shortHists,
+ int maxHeight = 10);
+
// Searches for the last entry of type <type> in the range [front, back)
// back has to be entry-aligned. Returns nullptr if none enconuntered.
static const uint8_t *findLastEntryOfTypes(const uint8_t *front, const uint8_t *back,