Split nblog off from nbaio
nbaio is an acronym for "non-blocking audio I/O", and nblog means
"non-blocking logger" so nblog does not belong with nbaio.
There are a lot of improvements planned for nblog, and having the
restructuring done will make it clearer as more files are added.
Test: builds OK
Change-Id: Ib28bada2566c1d64bdbe9f5d7a5ce40e080178ef
diff --git a/media/libnblog/Android.bp b/media/libnblog/Android.bp
new file mode 100644
index 0000000..74aaf77
--- /dev/null
+++ b/media/libnblog/Android.bp
@@ -0,0 +1,28 @@
+cc_library_shared {
+
+ name: "libnblog",
+
+ srcs: [
+ "NBLog.cpp",
+ "PerformanceAnalysis.cpp",
+ "ReportPerformance.cpp",
+ ],
+
+ shared_libs: [
+ "libaudioutils",
+ "libbinder",
+ "libcutils",
+ "liblog",
+ "libutils",
+ ],
+
+ cflags: [
+ "-Werror",
+ "-Wall",
+ ],
+
+ include_dirs: ["system/media/audio_utils/include"],
+
+ export_include_dirs: ["include"],
+
+}
diff --git a/media/libnblog/NBLog.cpp b/media/libnblog/NBLog.cpp
new file mode 100644
index 0000000..c8c7195
--- /dev/null
+++ b/media/libnblog/NBLog.cpp
@@ -0,0 +1,1156 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ *
+ */
+
+#define LOG_TAG "NBLog"
+
+#include <algorithm>
+#include <climits>
+#include <deque>
+#include <fstream>
+#include <iostream>
+#include <math.h>
+#include <numeric>
+#include <vector>
+#include <stdarg.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <string.h>
+#include <sys/prctl.h>
+#include <time.h>
+#include <new>
+#include <audio_utils/roundup.h>
+#include <media/nblog/NBLog.h>
+#include <media/nblog/PerformanceAnalysis.h>
+#include <media/nblog/ReportPerformance.h>
+#include <utils/CallStack.h>
+#include <utils/Log.h>
+#include <utils/String8.h>
+
+#include <queue>
+#include <utility>
+
+namespace android {
+
+int NBLog::Entry::copyEntryDataAt(size_t offset) const
+{
+ // FIXME This is too slow
+ if (offset == 0)
+ return mEvent;
+ else if (offset == 1)
+ return mLength;
+ else if (offset < (size_t) (mLength + 2))
+ return ((char *) mData)[offset - 2];
+ else if (offset == (size_t) (mLength + 2))
+ return mLength;
+ else
+ return 0;
+}
+
+// ---------------------------------------------------------------------------
+
+/*static*/
+std::unique_ptr<NBLog::AbstractEntry> NBLog::AbstractEntry::buildEntry(const uint8_t *ptr) {
+ const uint8_t type = EntryIterator(ptr)->type;
+ switch (type) {
+ case EVENT_START_FMT:
+ return std::make_unique<FormatEntry>(FormatEntry(ptr));
+ case EVENT_AUDIO_STATE:
+ case EVENT_HISTOGRAM_ENTRY_TS:
+ return std::make_unique<HistogramEntry>(HistogramEntry(ptr));
+ default:
+ ALOGW("Tried to create AbstractEntry of type %d", type);
+ return nullptr;
+ }
+}
+
+NBLog::AbstractEntry::AbstractEntry(const uint8_t *entry) : mEntry(entry) {
+}
+
+// ---------------------------------------------------------------------------
+
+NBLog::EntryIterator NBLog::FormatEntry::begin() const {
+ return EntryIterator(mEntry);
+}
+
+const char *NBLog::FormatEntry::formatString() const {
+ return (const char*) mEntry + offsetof(entry, data);
+}
+
+size_t NBLog::FormatEntry::formatStringLength() const {
+ return mEntry[offsetof(entry, length)];
+}
+
+NBLog::EntryIterator NBLog::FormatEntry::args() const {
+ auto it = begin();
+ // skip start fmt
+ ++it;
+ // skip timestamp
+ ++it;
+ // skip hash
+ ++it;
+ // Skip author if present
+ if (it->type == EVENT_AUTHOR) {
+ ++it;
+ }
+ return it;
+}
+
+int64_t NBLog::FormatEntry::timestamp() const {
+ auto it = begin();
+ // skip start fmt
+ ++it;
+ return it.payload<int64_t>();
+}
+
+NBLog::log_hash_t NBLog::FormatEntry::hash() const {
+ auto it = begin();
+ // skip start fmt
+ ++it;
+ // skip timestamp
+ ++it;
+ // unaligned 64-bit read not supported
+ log_hash_t hash;
+ memcpy(&hash, it->data, sizeof(hash));
+ return hash;
+}
+
+int NBLog::FormatEntry::author() const {
+ auto it = begin();
+ // skip start fmt
+ ++it;
+ // skip timestamp
+ ++it;
+ // skip hash
+ ++it;
+ // if there is an author entry, return it, return -1 otherwise
+ if (it->type == EVENT_AUTHOR) {
+ return it.payload<int>();
+ }
+ return -1;
+}
+
+NBLog::EntryIterator NBLog::FormatEntry::copyWithAuthor(
+ std::unique_ptr<audio_utils_fifo_writer> &dst, int author) const {
+ auto it = begin();
+ // copy fmt start entry
+ it.copyTo(dst);
+ // copy timestamp
+ (++it).copyTo(dst); // copy hash
+ (++it).copyTo(dst);
+ // insert author entry
+ size_t authorEntrySize = NBLog::Entry::kOverhead + sizeof(author);
+ uint8_t authorEntry[authorEntrySize];
+ authorEntry[offsetof(entry, type)] = EVENT_AUTHOR;
+ authorEntry[offsetof(entry, length)] =
+ authorEntry[authorEntrySize + NBLog::Entry::kPreviousLengthOffset] =
+ sizeof(author);
+ *(int*) (&authorEntry[offsetof(entry, data)]) = author;
+ dst->write(authorEntry, authorEntrySize);
+ // copy rest of entries
+ while ((++it)->type != EVENT_END_FMT) {
+ it.copyTo(dst);
+ }
+ it.copyTo(dst);
+ ++it;
+ return it;
+}
+
+void NBLog::EntryIterator::copyTo(std::unique_ptr<audio_utils_fifo_writer> &dst) const {
+ size_t length = ptr[offsetof(entry, length)] + NBLog::Entry::kOverhead;
+ dst->write(ptr, length);
+}
+
+void NBLog::EntryIterator::copyData(uint8_t *dst) const {
+ memcpy((void*) dst, ptr + offsetof(entry, data), ptr[offsetof(entry, length)]);
+}
+
+NBLog::EntryIterator::EntryIterator()
+ : ptr(nullptr) {}
+
+NBLog::EntryIterator::EntryIterator(const uint8_t *entry)
+ : ptr(entry) {}
+
+NBLog::EntryIterator::EntryIterator(const NBLog::EntryIterator &other)
+ : ptr(other.ptr) {}
+
+const NBLog::entry& NBLog::EntryIterator::operator*() const {
+ return *(entry*) ptr;
+}
+
+const NBLog::entry* NBLog::EntryIterator::operator->() const {
+ return (entry*) ptr;
+}
+
+NBLog::EntryIterator& NBLog::EntryIterator::operator++() {
+ ptr += ptr[offsetof(entry, length)] + NBLog::Entry::kOverhead;
+ return *this;
+}
+
+NBLog::EntryIterator& NBLog::EntryIterator::operator--() {
+ ptr -= ptr[NBLog::Entry::kPreviousLengthOffset] + NBLog::Entry::kOverhead;
+ return *this;
+}
+
+NBLog::EntryIterator NBLog::EntryIterator::next() const {
+ EntryIterator aux(*this);
+ return ++aux;
+}
+
+NBLog::EntryIterator NBLog::EntryIterator::prev() const {
+ EntryIterator aux(*this);
+ return --aux;
+}
+
+int NBLog::EntryIterator::operator-(const NBLog::EntryIterator &other) const {
+ return ptr - other.ptr;
+}
+
+bool NBLog::EntryIterator::operator!=(const EntryIterator &other) const {
+ return ptr != other.ptr;
+}
+
+bool NBLog::EntryIterator::hasConsistentLength() const {
+ return ptr[offsetof(entry, length)] == ptr[ptr[offsetof(entry, length)] +
+ NBLog::Entry::kOverhead + NBLog::Entry::kPreviousLengthOffset];
+}
+
+// ---------------------------------------------------------------------------
+
+int64_t NBLog::HistogramEntry::timestamp() const {
+ return EntryIterator(mEntry).payload<HistTsEntry>().ts;
+}
+
+NBLog::log_hash_t NBLog::HistogramEntry::hash() const {
+ return EntryIterator(mEntry).payload<HistTsEntry>().hash;
+}
+
+int NBLog::HistogramEntry::author() const {
+ EntryIterator it(mEntry);
+ if (it->length == sizeof(HistTsEntryWithAuthor)) {
+ return it.payload<HistTsEntryWithAuthor>().author;
+ } else {
+ return -1;
+ }
+}
+
+NBLog::EntryIterator NBLog::HistogramEntry::copyWithAuthor(
+ std::unique_ptr<audio_utils_fifo_writer> &dst, int author) const {
+ // Current histogram entry has {type, length, struct HistTsEntry, length}.
+ // We now want {type, length, struct HistTsEntryWithAuthor, length}
+ uint8_t buffer[Entry::kOverhead + sizeof(HistTsEntryWithAuthor)];
+ // Copy content until the point we want to add the author
+ memcpy(buffer, mEntry, sizeof(entry) + sizeof(HistTsEntry));
+ // Copy the author
+ *(int*) (buffer + sizeof(entry) + sizeof(HistTsEntry)) = author;
+ // Update lengths
+ buffer[offsetof(entry, length)] = sizeof(HistTsEntryWithAuthor);
+ buffer[sizeof(buffer) + Entry::kPreviousLengthOffset] = sizeof(HistTsEntryWithAuthor);
+ // Write new buffer into FIFO
+ dst->write(buffer, sizeof(buffer));
+ return EntryIterator(mEntry).next();
+}
+
+// ---------------------------------------------------------------------------
+
+#if 0 // FIXME see note in NBLog.h
+NBLog::Timeline::Timeline(size_t size, void *shared)
+ : mSize(roundup(size)), mOwn(shared == NULL),
+ mShared((Shared *) (mOwn ? new char[sharedSize(size)] : shared))
+{
+ new (mShared) Shared;
+}
+
+NBLog::Timeline::~Timeline()
+{
+ mShared->~Shared();
+ if (mOwn) {
+ delete[] (char *) mShared;
+ }
+}
+#endif
+
+/*static*/
+size_t NBLog::Timeline::sharedSize(size_t size)
+{
+ // TODO fifo now supports non-power-of-2 buffer sizes, so could remove the roundup
+ return sizeof(Shared) + roundup(size);
+}
+
+// ---------------------------------------------------------------------------
+
+NBLog::Writer::Writer()
+ : mShared(NULL), mFifo(NULL), mFifoWriter(NULL), mEnabled(false), mPidTag(NULL), mPidTagSize(0)
+{
+}
+
+NBLog::Writer::Writer(void *shared, size_t size)
+ : mShared((Shared *) shared),
+ mFifo(mShared != NULL ?
+ new audio_utils_fifo(size, sizeof(uint8_t),
+ mShared->mBuffer, mShared->mRear, NULL /*throttlesFront*/) : NULL),
+ mFifoWriter(mFifo != NULL ? new audio_utils_fifo_writer(*mFifo) : NULL),
+ mEnabled(mFifoWriter != NULL)
+{
+ // caching pid and process name
+ pid_t id = ::getpid();
+ char procName[16];
+ int status = prctl(PR_GET_NAME, procName);
+ if (status) { // error getting process name
+ procName[0] = '\0';
+ }
+ size_t length = strlen(procName);
+ mPidTagSize = length + sizeof(pid_t);
+ mPidTag = new char[mPidTagSize];
+ memcpy(mPidTag, &id, sizeof(pid_t));
+ memcpy(mPidTag + sizeof(pid_t), procName, length);
+}
+
+NBLog::Writer::Writer(const sp<IMemory>& iMemory, size_t size)
+ : Writer(iMemory != 0 ? (Shared *) iMemory->pointer() : NULL, size)
+{
+ mIMemory = iMemory;
+}
+
+NBLog::Writer::~Writer()
+{
+ delete mFifoWriter;
+ delete mFifo;
+ delete[] mPidTag;
+}
+
+void NBLog::Writer::log(const char *string)
+{
+ if (!mEnabled) {
+ return;
+ }
+ LOG_ALWAYS_FATAL_IF(string == NULL, "Attempted to log NULL string");
+ size_t length = strlen(string);
+ if (length > Entry::kMaxLength) {
+ length = Entry::kMaxLength;
+ }
+ log(EVENT_STRING, string, length);
+}
+
+void NBLog::Writer::logf(const char *fmt, ...)
+{
+ if (!mEnabled) {
+ return;
+ }
+ va_list ap;
+ va_start(ap, fmt);
+ Writer::logvf(fmt, ap); // the Writer:: is needed to avoid virtual dispatch for LockedWriter
+ va_end(ap);
+}
+
+void NBLog::Writer::logvf(const char *fmt, va_list ap)
+{
+ if (!mEnabled) {
+ return;
+ }
+ char buffer[Entry::kMaxLength + 1 /*NUL*/];
+ int length = vsnprintf(buffer, sizeof(buffer), fmt, ap);
+ if (length >= (int) sizeof(buffer)) {
+ length = sizeof(buffer) - 1;
+ // NUL termination is not required
+ // buffer[length] = '\0';
+ }
+ if (length >= 0) {
+ log(EVENT_STRING, buffer, length);
+ }
+}
+
+void NBLog::Writer::logTimestamp()
+{
+ if (!mEnabled) {
+ return;
+ }
+ int64_t ts = get_monotonic_ns();
+ if (ts > 0) {
+ log(EVENT_TIMESTAMP, &ts, sizeof(ts));
+ } else {
+ ALOGE("Failed to get timestamp");
+ }
+}
+
+void NBLog::Writer::logTimestamp(const int64_t ts)
+{
+ if (!mEnabled) {
+ return;
+ }
+ log(EVENT_TIMESTAMP, &ts, sizeof(ts));
+}
+
+void NBLog::Writer::logInteger(const int x)
+{
+ if (!mEnabled) {
+ return;
+ }
+ log(EVENT_INTEGER, &x, sizeof(x));
+}
+
+void NBLog::Writer::logFloat(const float x)
+{
+ if (!mEnabled) {
+ return;
+ }
+ log(EVENT_FLOAT, &x, sizeof(x));
+}
+
+void NBLog::Writer::logPID()
+{
+ if (!mEnabled) {
+ return;
+ }
+ log(EVENT_PID, mPidTag, mPidTagSize);
+}
+
+void NBLog::Writer::logStart(const char *fmt)
+{
+ if (!mEnabled) {
+ return;
+ }
+ size_t length = strlen(fmt);
+ if (length > Entry::kMaxLength) {
+ length = Entry::kMaxLength;
+ }
+ log(EVENT_START_FMT, fmt, length);
+}
+
+void NBLog::Writer::logEnd()
+{
+ if (!mEnabled) {
+ return;
+ }
+ Entry entry = Entry(EVENT_END_FMT, NULL, 0);
+ log(&entry, true);
+}
+
+void NBLog::Writer::logHash(log_hash_t hash)
+{
+ if (!mEnabled) {
+ return;
+ }
+ log(EVENT_HASH, &hash, sizeof(hash));
+}
+
+void NBLog::Writer::logEventHistTs(Event event, log_hash_t hash)
+{
+ if (!mEnabled) {
+ return;
+ }
+ HistTsEntry data;
+ data.hash = hash;
+ data.ts = get_monotonic_ns();
+ if (data.ts > 0) {
+ log(event, &data, sizeof(data));
+ } else {
+ ALOGE("Failed to get timestamp");
+ }
+}
+
+void NBLog::Writer::logFormat(const char *fmt, log_hash_t hash, ...)
+{
+ if (!mEnabled) {
+ return;
+ }
+
+ va_list ap;
+ va_start(ap, hash);
+ Writer::logVFormat(fmt, hash, ap);
+ va_end(ap);
+}
+
+void NBLog::Writer::logVFormat(const char *fmt, log_hash_t hash, va_list argp)
+{
+ if (!mEnabled) {
+ return;
+ }
+ Writer::logStart(fmt);
+ int i;
+ double f;
+ char* s;
+ int64_t t;
+ Writer::logTimestamp();
+ Writer::logHash(hash);
+ for (const char *p = fmt; *p != '\0'; p++) {
+ // TODO: implement more complex formatting such as %.3f
+ if (*p != '%') {
+ continue;
+ }
+ switch(*++p) {
+ case 's': // string
+ s = va_arg(argp, char *);
+ Writer::log(s);
+ break;
+
+ case 't': // timestamp
+ t = va_arg(argp, int64_t);
+ Writer::logTimestamp(t);
+ break;
+
+ case 'd': // integer
+ i = va_arg(argp, int);
+ Writer::logInteger(i);
+ break;
+
+ case 'f': // float
+ f = va_arg(argp, double); // float arguments are promoted to double in vararg lists
+ Writer::logFloat((float)f);
+ break;
+
+ case 'p': // pid
+ Writer::logPID();
+ break;
+
+ // the "%\0" case finishes parsing
+ case '\0':
+ --p;
+ break;
+
+ case '%':
+ break;
+
+ default:
+ ALOGW("NBLog Writer parsed invalid format specifier: %c", *p);
+ break;
+ }
+ }
+ Writer::logEnd();
+}
+
+void NBLog::Writer::log(Event event, const void *data, size_t length)
+{
+ if (!mEnabled) {
+ return;
+ }
+ if (data == NULL || length > Entry::kMaxLength) {
+ // TODO Perhaps it makes sense to display truncated data or at least a
+ // message that the data is too long? The current behavior can create
+ // a confusion for a programmer debugging their code.
+ return;
+ }
+ // Ignore if invalid event
+ if (event == EVENT_RESERVED || event >= EVENT_UPPER_BOUND) {
+ return;
+ }
+ Entry etr(event, data, length);
+ log(&etr, true /*trusted*/);
+}
+
+void NBLog::Writer::log(const NBLog::Entry *etr, bool trusted)
+{
+ if (!mEnabled) {
+ return;
+ }
+ if (!trusted) {
+ log(etr->mEvent, etr->mData, etr->mLength);
+ return;
+ }
+ size_t need = etr->mLength + Entry::kOverhead; // mEvent, mLength, data[mLength], mLength
+ // need = number of bytes written to FIFO
+
+ // FIXME optimize this using memcpy for the data part of the Entry.
+ // The Entry could have a method copyTo(ptr, offset, size) to optimize the copy.
+ // checks size of a single log Entry: type, length, data pointer and ending
+ uint8_t temp[Entry::kMaxLength + Entry::kOverhead];
+ // write this data to temp array
+ for (size_t i = 0; i < need; i++) {
+ temp[i] = etr->copyEntryDataAt(i);
+ }
+ // write to circular buffer
+ mFifoWriter->write(temp, need);
+}
+
+bool NBLog::Writer::isEnabled() const
+{
+ return mEnabled;
+}
+
+bool NBLog::Writer::setEnabled(bool enabled)
+{
+ bool old = mEnabled;
+ mEnabled = enabled && mShared != NULL;
+ return old;
+}
+
+// ---------------------------------------------------------------------------
+
+NBLog::LockedWriter::LockedWriter()
+ : Writer()
+{
+}
+
+NBLog::LockedWriter::LockedWriter(void *shared, size_t size)
+ : Writer(shared, size)
+{
+}
+
+void NBLog::LockedWriter::log(const char *string)
+{
+ Mutex::Autolock _l(mLock);
+ Writer::log(string);
+}
+
+void NBLog::LockedWriter::logf(const char *fmt, ...)
+{
+ // FIXME should not take the lock until after formatting is done
+ Mutex::Autolock _l(mLock);
+ va_list ap;
+ va_start(ap, fmt);
+ Writer::logvf(fmt, ap);
+ va_end(ap);
+}
+
+void NBLog::LockedWriter::logvf(const char *fmt, va_list ap)
+{
+ // FIXME should not take the lock until after formatting is done
+ Mutex::Autolock _l(mLock);
+ Writer::logvf(fmt, ap);
+}
+
+void NBLog::LockedWriter::logTimestamp()
+{
+ // FIXME should not take the lock until after the clock_gettime() syscall
+ Mutex::Autolock _l(mLock);
+ Writer::logTimestamp();
+}
+
+void NBLog::LockedWriter::logTimestamp(const int64_t ts)
+{
+ Mutex::Autolock _l(mLock);
+ Writer::logTimestamp(ts);
+}
+
+void NBLog::LockedWriter::logInteger(const int x)
+{
+ Mutex::Autolock _l(mLock);
+ Writer::logInteger(x);
+}
+
+void NBLog::LockedWriter::logFloat(const float x)
+{
+ Mutex::Autolock _l(mLock);
+ Writer::logFloat(x);
+}
+
+void NBLog::LockedWriter::logPID()
+{
+ Mutex::Autolock _l(mLock);
+ Writer::logPID();
+}
+
+void NBLog::LockedWriter::logStart(const char *fmt)
+{
+ Mutex::Autolock _l(mLock);
+ Writer::logStart(fmt);
+}
+
+
+void NBLog::LockedWriter::logEnd()
+{
+ Mutex::Autolock _l(mLock);
+ Writer::logEnd();
+}
+
+void NBLog::LockedWriter::logHash(log_hash_t hash)
+{
+ Mutex::Autolock _l(mLock);
+ Writer::logHash(hash);
+}
+
+bool NBLog::LockedWriter::isEnabled() const
+{
+ Mutex::Autolock _l(mLock);
+ return Writer::isEnabled();
+}
+
+bool NBLog::LockedWriter::setEnabled(bool enabled)
+{
+ Mutex::Autolock _l(mLock);
+ return Writer::setEnabled(enabled);
+}
+
+// ---------------------------------------------------------------------------
+
+const std::set<NBLog::Event> NBLog::Reader::startingTypes {NBLog::Event::EVENT_START_FMT,
+ NBLog::Event::EVENT_HISTOGRAM_ENTRY_TS,
+ NBLog::Event::EVENT_AUDIO_STATE};
+const std::set<NBLog::Event> NBLog::Reader::endingTypes {NBLog::Event::EVENT_END_FMT,
+ NBLog::Event::EVENT_HISTOGRAM_ENTRY_TS,
+ NBLog::Event::EVENT_AUDIO_STATE};
+
+NBLog::Reader::Reader(const void *shared, size_t size)
+ : mFd(-1), mIndent(0), mLost(0),
+ mShared((/*const*/ Shared *) shared), /*mIMemory*/
+ mFifo(mShared != NULL ?
+ new audio_utils_fifo(size, sizeof(uint8_t),
+ mShared->mBuffer, mShared->mRear, NULL /*throttlesFront*/) : NULL),
+ mFifoReader(mFifo != NULL ? new audio_utils_fifo_reader(*mFifo) : NULL)
+{
+}
+
+NBLog::Reader::Reader(const sp<IMemory>& iMemory, size_t size)
+ : Reader(iMemory != 0 ? (Shared *) iMemory->pointer() : NULL, size)
+{
+ mIMemory = iMemory;
+}
+
+NBLog::Reader::~Reader()
+{
+ delete mFifoReader;
+ delete mFifo;
+}
+
+const uint8_t *NBLog::Reader::findLastEntryOfTypes(const uint8_t *front, const uint8_t *back,
+ const std::set<Event> &types) {
+ while (back + Entry::kPreviousLengthOffset >= front) {
+ const uint8_t *prev = back - back[Entry::kPreviousLengthOffset] - Entry::kOverhead;
+ if (prev < front || prev + prev[offsetof(entry, length)] +
+ Entry::kOverhead != back) {
+
+ // prev points to an out of limits or inconsistent entry
+ return nullptr;
+ }
+ if (types.find((const Event) prev[offsetof(entry, type)]) != types.end()) {
+ return prev;
+ }
+ back = prev;
+ }
+ return nullptr; // no entry found
+}
+
+// Copies content of a Reader FIFO into its Snapshot
+// The Snapshot has the same raw data, but represented as a sequence of entries
+// and an EntryIterator making it possible to process the data.
+std::unique_ptr<NBLog::Reader::Snapshot> NBLog::Reader::getSnapshot()
+{
+ if (mFifoReader == NULL) {
+ return std::unique_ptr<NBLog::Reader::Snapshot>(new Snapshot());
+ }
+ // make a copy to avoid race condition with writer
+ size_t capacity = mFifo->capacity();
+
+ // This emulates the behaviour of audio_utils_fifo_reader::read, but without incrementing the
+ // reader index. The index is incremented after handling corruption, to after the last complete
+ // entry of the buffer
+ size_t lost;
+ audio_utils_iovec iovec[2];
+ ssize_t availToRead = mFifoReader->obtain(iovec, capacity, NULL /*timeout*/, &lost);
+ if (availToRead <= 0) {
+ return std::unique_ptr<NBLog::Reader::Snapshot>(new Snapshot());
+ }
+
+ std::unique_ptr<Snapshot> snapshot(new Snapshot(availToRead));
+ memcpy(snapshot->mData, (const char *) mFifo->buffer() + iovec[0].mOffset, iovec[0].mLength);
+ if (iovec[1].mLength > 0) {
+ memcpy(snapshot->mData + (iovec[0].mLength),
+ (const char *) mFifo->buffer() + iovec[1].mOffset, iovec[1].mLength);
+ }
+
+ // Handle corrupted buffer
+ // Potentially, a buffer has corrupted data on both beginning (due to overflow) and end
+ // (due to incomplete format entry). But even if the end format entry is incomplete,
+ // it ends in a complete entry (which is not an END_FMT). So is safe to traverse backwards.
+ // TODO: handle client corruption (in the middle of a buffer)
+
+ const uint8_t *back = snapshot->mData + availToRead;
+ const uint8_t *front = snapshot->mData;
+
+ // Find last END_FMT. <back> is sitting on an entry which might be the middle of a FormatEntry.
+ // We go backwards until we find an EVENT_END_FMT.
+ const uint8_t *lastEnd = findLastEntryOfTypes(front, back, endingTypes);
+ if (lastEnd == nullptr) {
+ snapshot->mEnd = snapshot->mBegin = EntryIterator(front);
+ } else {
+ // end of snapshot points to after last END_FMT entry
+ snapshot->mEnd = EntryIterator(lastEnd).next();
+ // find first START_FMT
+ const uint8_t *firstStart = nullptr;
+ const uint8_t *firstStartTmp = snapshot->mEnd;
+ while ((firstStartTmp = findLastEntryOfTypes(front, firstStartTmp, startingTypes))
+ != nullptr) {
+ firstStart = firstStartTmp;
+ }
+ // firstStart is null if no START_FMT entry was found before lastEnd
+ if (firstStart == nullptr) {
+ snapshot->mBegin = snapshot->mEnd;
+ } else {
+ snapshot->mBegin = EntryIterator(firstStart);
+ }
+ }
+
+ // advance fifo reader index to after last entry read.
+ mFifoReader->release(snapshot->mEnd - front);
+
+ snapshot->mLost = lost;
+ return snapshot;
+
+}
+
+// Takes raw content of the local merger FIFO, processes log entries, and
+// writes the data to a map of class PerformanceAnalysis, based on their thread ID.
+void NBLog::MergeReader::getAndProcessSnapshot(NBLog::Reader::Snapshot &snapshot)
+{
+ String8 timestamp, body;
+
+ for (auto entry = snapshot.begin(); entry != snapshot.end();) {
+ switch (entry->type) {
+ case EVENT_START_FMT:
+ entry = handleFormat(FormatEntry(entry), ×tamp, &body);
+ break;
+ case EVENT_HISTOGRAM_ENTRY_TS: {
+ HistTsEntryWithAuthor *data = (HistTsEntryWithAuthor *) (entry->data);
+ // TODO This memcpies are here to avoid unaligned memory access crash.
+ // There's probably a more efficient way to do it
+ log_hash_t hash;
+ memcpy(&hash, &(data->hash), sizeof(hash));
+ int64_t ts;
+ memcpy(&ts, &data->ts, sizeof(ts));
+ // TODO: hash for histogram ts and audio state need to match
+ // and correspond to audio production source file location
+ mThreadPerformanceAnalysis[data->author][0 /*hash*/].logTsEntry(ts);
+ ++entry;
+ break;
+ }
+ case EVENT_AUDIO_STATE: {
+ HistTsEntryWithAuthor *data = (HistTsEntryWithAuthor *) (entry->data);
+ // TODO This memcpies are here to avoid unaligned memory access crash.
+ // There's probably a more efficient way to do it
+ log_hash_t hash;
+ memcpy(&hash, &(data->hash), sizeof(hash));
+ // TODO: remove ts if unused
+ int64_t ts;
+ memcpy(&ts, &data->ts, sizeof(ts));
+ mThreadPerformanceAnalysis[data->author][0 /*hash*/].handleStateChange();
+ ++entry;
+ break;
+ }
+ case EVENT_END_FMT:
+ body.appendFormat("warning: got to end format event");
+ ++entry;
+ break;
+ case EVENT_RESERVED:
+ default:
+ body.appendFormat("warning: unexpected event %d", entry->type);
+ ++entry;
+ break;
+ }
+ }
+ // FIXME: decide whether to print the warnings here or elsewhere
+ if (!body.isEmpty()) {
+ dumpLine(timestamp, body);
+ }
+}
+
+void NBLog::MergeReader::getAndProcessSnapshot()
+{
+ // get a snapshot, process it
+ std::unique_ptr<Snapshot> snap = getSnapshot();
+ getAndProcessSnapshot(*snap);
+}
+
+void NBLog::MergeReader::dump(int fd, int indent) {
+ // TODO: add a mutex around media.log dump
+ ReportPerformance::dump(fd, indent, mThreadPerformanceAnalysis);
+}
+
+// Writes a string to the console
+void NBLog::Reader::dumpLine(const String8 ×tamp, String8 &body)
+{
+ if (mFd >= 0) {
+ dprintf(mFd, "%.*s%s %s\n", mIndent, "", timestamp.string(), body.string());
+ } else {
+ ALOGI("%.*s%s %s", mIndent, "", timestamp.string(), body.string());
+ }
+ body.clear();
+}
+
+bool NBLog::Reader::isIMemory(const sp<IMemory>& iMemory) const
+{
+ return iMemory != 0 && mIMemory != 0 && iMemory->pointer() == mIMemory->pointer();
+}
+
+// ---------------------------------------------------------------------------
+
+void NBLog::appendTimestamp(String8 *body, const void *data) {
+ int64_t ts;
+ memcpy(&ts, data, sizeof(ts));
+ body->appendFormat("[%d.%03d]", (int) (ts / (1000 * 1000 * 1000)),
+ (int) ((ts / (1000 * 1000)) % 1000));
+}
+
+void NBLog::appendInt(String8 *body, const void *data) {
+ int x = *((int*) data);
+ body->appendFormat("<%d>", x);
+}
+
+void NBLog::appendFloat(String8 *body, const void *data) {
+ float f;
+ memcpy(&f, data, sizeof(float));
+ body->appendFormat("<%f>", f);
+}
+
+void NBLog::appendPID(String8 *body, const void* data, size_t length) {
+ pid_t id = *((pid_t*) data);
+ char * name = &((char*) data)[sizeof(pid_t)];
+ body->appendFormat("<PID: %d, name: %.*s>", id, (int) (length - sizeof(pid_t)), name);
+}
+
+String8 NBLog::bufferDump(const uint8_t *buffer, size_t size)
+{
+ String8 str;
+ str.append("[ ");
+ for(size_t i = 0; i < size; i++)
+ {
+ str.appendFormat("%d ", buffer[i]);
+ }
+ str.append("]");
+ return str;
+}
+
+String8 NBLog::bufferDump(const EntryIterator &it)
+{
+ return bufferDump(it, it->length + Entry::kOverhead);
+}
+
+NBLog::EntryIterator NBLog::Reader::handleFormat(const FormatEntry &fmtEntry,
+ String8 *timestamp,
+ String8 *body) {
+ // log timestamp
+ int64_t ts = fmtEntry.timestamp();
+ timestamp->clear();
+ timestamp->appendFormat("[%d.%03d]", (int) (ts / (1000 * 1000 * 1000)),
+ (int) ((ts / (1000 * 1000)) % 1000));
+
+ // log unique hash
+ log_hash_t hash = fmtEntry.hash();
+ // print only lower 16bit of hash as hex and line as int to reduce spam in the log
+ body->appendFormat("%.4X-%d ", (int)(hash >> 16) & 0xFFFF, (int) hash & 0xFFFF);
+
+ // log author (if present)
+ handleAuthor(fmtEntry, body);
+
+ // log string
+ NBLog::EntryIterator arg = fmtEntry.args();
+
+ const char* fmt = fmtEntry.formatString();
+ size_t fmt_length = fmtEntry.formatStringLength();
+
+ for (size_t fmt_offset = 0; fmt_offset < fmt_length; ++fmt_offset) {
+ if (fmt[fmt_offset] != '%') {
+ body->append(&fmt[fmt_offset], 1); // TODO optimize to write consecutive strings at once
+ continue;
+ }
+ // case "%%""
+ if (fmt[++fmt_offset] == '%') {
+ body->append("%");
+ continue;
+ }
+ // case "%\0"
+ if (fmt_offset == fmt_length) {
+ continue;
+ }
+
+ NBLog::Event event = (NBLog::Event) arg->type;
+ size_t length = arg->length;
+
+ // TODO check length for event type is correct
+
+ if (event == EVENT_END_FMT) {
+ break;
+ }
+
+ // TODO: implement more complex formatting such as %.3f
+ const uint8_t *datum = arg->data; // pointer to the current event args
+ switch(fmt[fmt_offset])
+ {
+ case 's': // string
+ ALOGW_IF(event != EVENT_STRING,
+ "NBLog Reader incompatible event for string specifier: %d", event);
+ body->append((const char*) datum, length);
+ break;
+
+ case 't': // timestamp
+ ALOGW_IF(event != EVENT_TIMESTAMP,
+ "NBLog Reader incompatible event for timestamp specifier: %d", event);
+ appendTimestamp(body, datum);
+ break;
+
+ case 'd': // integer
+ ALOGW_IF(event != EVENT_INTEGER,
+ "NBLog Reader incompatible event for integer specifier: %d", event);
+ appendInt(body, datum);
+ break;
+
+ case 'f': // float
+ ALOGW_IF(event != EVENT_FLOAT,
+ "NBLog Reader incompatible event for float specifier: %d", event);
+ appendFloat(body, datum);
+ break;
+
+ case 'p': // pid
+ ALOGW_IF(event != EVENT_PID,
+ "NBLog Reader incompatible event for pid specifier: %d", event);
+ appendPID(body, datum, length);
+ break;
+
+ default:
+ ALOGW("NBLog Reader encountered unknown character %c", fmt[fmt_offset]);
+ }
+ ++arg;
+ }
+ ALOGW_IF(arg->type != EVENT_END_FMT, "Expected end of format, got %d", arg->type);
+ ++arg;
+ return arg;
+}
+
+NBLog::Merger::Merger(const void *shared, size_t size):
+ mShared((Shared *) shared),
+ mFifo(mShared != NULL ?
+ new audio_utils_fifo(size, sizeof(uint8_t),
+ mShared->mBuffer, mShared->mRear, NULL /*throttlesFront*/) : NULL),
+ mFifoWriter(mFifo != NULL ? new audio_utils_fifo_writer(*mFifo) : NULL)
+ {}
+
+void NBLog::Merger::addReader(const NBLog::NamedReader &reader) {
+
+ // FIXME This is called by binder thread in MediaLogService::registerWriter
+ // but the access to shared variable mNamedReaders is not yet protected by a lock.
+ mNamedReaders.push_back(reader);
+}
+
+// items placed in priority queue during merge
+// composed by a timestamp and the index of the snapshot where the timestamp came from
+struct MergeItem
+{
+ int64_t ts;
+ int index;
+ MergeItem(int64_t ts, int index): ts(ts), index(index) {}
+};
+
+// operators needed for priority queue in merge
+// bool operator>(const int64_t &t1, const int64_t &t2) {
+// return t1.tv_sec > t2.tv_sec || (t1.tv_sec == t2.tv_sec && t1.tv_nsec > t2.tv_nsec);
+// }
+
+bool operator>(const struct MergeItem &i1, const struct MergeItem &i2) {
+ return i1.ts > i2.ts || (i1.ts == i2.ts && i1.index > i2.index);
+}
+
+// Merge registered readers, sorted by timestamp, and write data to a single FIFO in local memory
+void NBLog::Merger::merge() {
+ // FIXME This is called by merge thread
+ // but the access to shared variable mNamedReaders is not yet protected by a lock.
+ int nLogs = mNamedReaders.size();
+ std::vector<std::unique_ptr<NBLog::Reader::Snapshot>> snapshots(nLogs);
+ std::vector<NBLog::EntryIterator> offsets(nLogs);
+ for (int i = 0; i < nLogs; ++i) {
+ snapshots[i] = mNamedReaders[i].reader()->getSnapshot();
+ offsets[i] = snapshots[i]->begin();
+ }
+ // initialize offsets
+ // TODO custom heap implementation could allow to update top, improving performance
+ // for bursty buffers
+ std::priority_queue<MergeItem, std::vector<MergeItem>, std::greater<MergeItem>> timestamps;
+ for (int i = 0; i < nLogs; ++i)
+ {
+ if (offsets[i] != snapshots[i]->end()) {
+ int64_t ts = AbstractEntry::buildEntry(offsets[i])->timestamp();
+ timestamps.emplace(ts, i);
+ }
+ }
+
+ while (!timestamps.empty()) {
+ // find minimum timestamp
+ int index = timestamps.top().index;
+ // copy it to the log, increasing offset
+ offsets[index] = AbstractEntry::buildEntry(offsets[index])->copyWithAuthor(mFifoWriter,
+ index);
+ // update data structures
+ timestamps.pop();
+ if (offsets[index] != snapshots[index]->end()) {
+ int64_t ts = AbstractEntry::buildEntry(offsets[index])->timestamp();
+ timestamps.emplace(ts, index);
+ }
+ }
+}
+
+const std::vector<NBLog::NamedReader>& NBLog::Merger::getNamedReaders() const {
+ // FIXME This is returning a reference to a shared variable that needs a lock
+ return mNamedReaders;
+}
+
+// ---------------------------------------------------------------------------
+
+NBLog::MergeReader::MergeReader(const void *shared, size_t size, Merger &merger)
+ : Reader(shared, size), mNamedReaders(merger.getNamedReaders()) {}
+
+void NBLog::MergeReader::handleAuthor(const NBLog::AbstractEntry &entry, String8 *body) {
+ int author = entry.author();
+ // FIXME Needs a lock
+ const char* name = mNamedReaders[author].name();
+ body->appendFormat("%s: ", name);
+}
+
+// ---------------------------------------------------------------------------
+
+NBLog::MergeThread::MergeThread(NBLog::Merger &merger, NBLog::MergeReader &mergeReader)
+ : mMerger(merger),
+ mMergeReader(mergeReader),
+ mTimeoutUs(0) {}
+
+NBLog::MergeThread::~MergeThread() {
+ // set exit flag, set timeout to 0 to force threadLoop to exit and wait for the thread to join
+ requestExit();
+ setTimeoutUs(0);
+ join();
+}
+
+bool NBLog::MergeThread::threadLoop() {
+ bool doMerge;
+ {
+ AutoMutex _l(mMutex);
+ // If mTimeoutUs is negative, wait on the condition variable until it's positive.
+ // If it's positive, wait kThreadSleepPeriodUs and then merge
+ nsecs_t waitTime = mTimeoutUs > 0 ? kThreadSleepPeriodUs * 1000 : LLONG_MAX;
+ mCond.waitRelative(mMutex, waitTime);
+ doMerge = mTimeoutUs > 0;
+ mTimeoutUs -= kThreadSleepPeriodUs;
+ }
+ if (doMerge) {
+ // Merge data from all the readers
+ mMerger.merge();
+ // Process the data collected by mMerger and write it to PerformanceAnalysis
+ // FIXME: decide whether to call getAndProcessSnapshot every time
+ // or whether to have a separate thread that calls it with a lower frequency
+ mMergeReader.getAndProcessSnapshot();
+ }
+ return true;
+}
+
+void NBLog::MergeThread::wakeup() {
+ setTimeoutUs(kThreadWakeupPeriodUs);
+}
+
+void NBLog::MergeThread::setTimeoutUs(int time) {
+ AutoMutex _l(mMutex);
+ mTimeoutUs = time;
+ mCond.signal();
+}
+
+} // namespace android
diff --git a/media/libnblog/PerformanceAnalysis.cpp b/media/libnblog/PerformanceAnalysis.cpp
new file mode 100644
index 0000000..478c460
--- /dev/null
+++ b/media/libnblog/PerformanceAnalysis.cpp
@@ -0,0 +1,377 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+#define LOG_TAG "PerformanceAnalysis"
+// #define LOG_NDEBUG 0
+
+#include <algorithm>
+#include <climits>
+#include <deque>
+#include <iostream>
+#include <math.h>
+#include <numeric>
+#include <vector>
+#include <stdarg.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <string.h>
+#include <sys/prctl.h>
+#include <time.h>
+#include <new>
+#include <audio_utils/roundup.h>
+#include <media/nblog/NBLog.h>
+#include <media/nblog/PerformanceAnalysis.h>
+#include <media/nblog/ReportPerformance.h>
+#include <utils/Log.h>
+#include <utils/String8.h>
+
+#include <queue>
+#include <utility>
+
+namespace android {
+
+namespace ReportPerformance {
+
+// Given an audio processing wakeup timestamp, buckets the time interval
+// since the previous timestamp into a histogram, searches for
+// outliers, analyzes the outlier series for unexpectedly
+// small or large values and stores these as peaks
+void PerformanceAnalysis::logTsEntry(timestamp ts) {
+ // after a state change, start a new series and do not
+ // record time intervals in-between
+ if (mBufferPeriod.mPrevTs == 0) {
+ mBufferPeriod.mPrevTs = ts;
+ return;
+ }
+
+ // calculate time interval between current and previous timestamp
+ const msInterval diffMs = static_cast<msInterval>(
+ deltaMs(mBufferPeriod.mPrevTs, ts));
+
+ const int diffJiffy = deltaJiffy(mBufferPeriod.mPrevTs, ts);
+
+ // old versus new weight ratio when updating the buffer period mean
+ static constexpr double exponentialWeight = 0.999;
+ // update buffer period mean with exponential weighting
+ mBufferPeriod.mMean = (mBufferPeriod.mMean < 0) ? diffMs :
+ exponentialWeight * mBufferPeriod.mMean + (1.0 - exponentialWeight) * diffMs;
+ // set mOutlierFactor to a smaller value for the fastmixer thread
+ const int kFastMixerMax = 10;
+ // NormalMixer times vary much more than FastMixer times.
+ // TODO: mOutlierFactor values are set empirically based on what appears to be
+ // an outlier. Learn these values from the data.
+ mBufferPeriod.mOutlierFactor = mBufferPeriod.mMean < kFastMixerMax ? 1.8 : 2.0;
+ // set outlier threshold
+ mBufferPeriod.mOutlier = mBufferPeriod.mMean * mBufferPeriod.mOutlierFactor;
+
+ // Check whether the time interval between the current timestamp
+ // and the previous one is long enough to count as an outlier
+ const bool isOutlier = detectAndStoreOutlier(diffMs);
+ // If an outlier was found, check whether it was a peak
+ if (isOutlier) {
+ /*bool isPeak =*/ detectAndStorePeak(
+ mOutlierData[0].first, mOutlierData[0].second);
+ // TODO: decide whether to insert a new empty histogram if a peak
+ // TODO: remove isPeak if unused to avoid "unused variable" error
+ // occurred at the current timestamp
+ }
+
+ // Insert a histogram to mHists if it is empty, or
+ // close the current histogram and insert a new empty one if
+ // if the current histogram has spanned its maximum time interval.
+ if (mHists.empty() ||
+ deltaMs(mHists[0].first, ts) >= kMaxLength.HistTimespanMs) {
+ mHists.emplace_front(ts, std::map<int, int>());
+ // When memory is full, delete oldest histogram
+ // TODO: use a circular buffer
+ if (mHists.size() >= kMaxLength.Hists) {
+ mHists.resize(kMaxLength.Hists);
+ }
+ }
+ // add current time intervals to histogram
+ ++mHists[0].second[diffJiffy];
+ // update previous timestamp
+ mBufferPeriod.mPrevTs = ts;
+}
+
+
+// forces short-term histogram storage to avoid adding idle audio time interval
+// to buffer period data
+void PerformanceAnalysis::handleStateChange() {
+ mBufferPeriod.mPrevTs = 0;
+ return;
+}
+
+
+// Checks whether the time interval between two outliers is far enough from
+// a typical delta to be considered a peak.
+// looks for changes in distribution (peaks), which can be either positive or negative.
+// The function sets the mean to the starting value and sigma to 0, and updates
+// them as long as no peak is detected. When a value is more than 'threshold'
+// standard deviations from the mean, a peak is detected and the mean and sigma
+// are set to the peak value and 0.
+bool PerformanceAnalysis::detectAndStorePeak(msInterval diff, timestamp ts) {
+ bool isPeak = false;
+ if (mOutlierData.empty()) {
+ return false;
+ }
+ // Update mean of the distribution
+ // TypicalDiff is used to check whether a value is unusually large
+ // when we cannot use standard deviations from the mean because the sd is set to 0.
+ mOutlierDistribution.mTypicalDiff = (mOutlierDistribution.mTypicalDiff *
+ (mOutlierData.size() - 1) + diff) / mOutlierData.size();
+
+ // Initialize short-term mean at start of program
+ if (mOutlierDistribution.mMean == 0) {
+ mOutlierDistribution.mMean = diff;
+ }
+ // Update length of current sequence of outliers
+ mOutlierDistribution.mN++;
+
+ // Check whether a large deviation from the mean occurred.
+ // If the standard deviation has been reset to zero, the comparison is
+ // instead to the mean of the full mOutlierInterval sequence.
+ if ((fabs(diff - mOutlierDistribution.mMean) <
+ mOutlierDistribution.kMaxDeviation * mOutlierDistribution.mSd) ||
+ (mOutlierDistribution.mSd == 0 &&
+ fabs(diff - mOutlierDistribution.mMean) <
+ mOutlierDistribution.mTypicalDiff)) {
+ // update the mean and sd using online algorithm
+ // https://en.wikipedia.org/wiki/
+ // Algorithms_for_calculating_variance#Online_algorithm
+ mOutlierDistribution.mN++;
+ const double kDelta = diff - mOutlierDistribution.mMean;
+ mOutlierDistribution.mMean += kDelta / mOutlierDistribution.mN;
+ const double kDelta2 = diff - mOutlierDistribution.mMean;
+ mOutlierDistribution.mM2 += kDelta * kDelta2;
+ mOutlierDistribution.mSd = (mOutlierDistribution.mN < 2) ? 0 :
+ sqrt(mOutlierDistribution.mM2 / (mOutlierDistribution.mN - 1));
+ } else {
+ // new value is far from the mean:
+ // store peak timestamp and reset mean, sd, and short-term sequence
+ isPeak = true;
+ mPeakTimestamps.emplace_front(ts);
+ // if mPeaks has reached capacity, delete oldest data
+ // Note: this means that mOutlierDistribution values do not exactly
+ // match the data we have in mPeakTimestamps, but this is not an issue
+ // in practice for estimating future peaks.
+ // TODO: turn this into a circular buffer
+ if (mPeakTimestamps.size() >= kMaxLength.Peaks) {
+ mPeakTimestamps.resize(kMaxLength.Peaks);
+ }
+ mOutlierDistribution.mMean = 0;
+ mOutlierDistribution.mSd = 0;
+ mOutlierDistribution.mN = 0;
+ mOutlierDistribution.mM2 = 0;
+ }
+ return isPeak;
+}
+
+
+// Determines whether the difference between a timestamp and the previous
+// one is beyond a threshold. If yes, stores the timestamp as an outlier
+// and writes to mOutlierdata in the following format:
+// Time elapsed since previous outlier: Timestamp of start of outlier
+// e.g. timestamps (ms) 1, 4, 5, 16, 18, 28 will produce pairs (4, 5), (13, 18).
+// TODO: learn what timestamp sequences correlate with glitches instead of
+// manually designing a heuristic.
+bool PerformanceAnalysis::detectAndStoreOutlier(const msInterval diffMs) {
+ bool isOutlier = false;
+ if (diffMs >= mBufferPeriod.mOutlier) {
+ isOutlier = true;
+ mOutlierData.emplace_front(
+ mOutlierDistribution.mElapsed, mBufferPeriod.mPrevTs);
+ // Remove oldest value if the vector is full
+ // TODO: turn this into a circular buffer
+ // TODO: make sure kShortHistSize is large enough that that data will never be lost
+ // before being written to file or to a FIFO
+ if (mOutlierData.size() >= kMaxLength.Outliers) {
+ mOutlierData.resize(kMaxLength.Outliers);
+ }
+ mOutlierDistribution.mElapsed = 0;
+ }
+ mOutlierDistribution.mElapsed += diffMs;
+ return isOutlier;
+}
+
+static int widthOf(int x) {
+ int width = 0;
+ if (x < 0) {
+ width++;
+ x = x == INT_MIN ? INT_MAX : -x;
+ }
+ // assert (x >= 0)
+ do {
+ ++width;
+ x /= 10;
+ } while (x > 0);
+ return width;
+}
+
+// computes the column width required for a specific histogram value
+inline int numberWidth(double number, int leftPadding) {
+ // Added values account for whitespaces needed around numbers, and for the
+ // dot and decimal digit not accounted for by widthOf
+ return std::max(std::max(widthOf(static_cast<int>(number)) + 3, 2), leftPadding + 1);
+}
+
+// rounds value to precision based on log-distance from mean
+inline double logRound(double x, double mean) {
+ // Larger values decrease range of high resolution and prevent overflow
+ // of a histogram on the console.
+ // The following formula adjusts kBase based on the buffer period length.
+ // Different threads have buffer periods ranging from 2 to 40. The
+ // formula below maps buffer period 2 to kBase = ~1, 4 to ~2, 20 to ~3, 40 to ~4.
+ // TODO: tighten this for higher means, the data still overflows
+ const double kBase = log(mean) / log(2.2);
+ const double power = floor(
+ log(abs(x - mean) / mean) / log(kBase)) + 2;
+ // do not round values close to the mean
+ if (power < 1) {
+ return x;
+ }
+ const int factor = static_cast<int>(pow(10, power));
+ return (static_cast<int>(x) * factor) / factor;
+}
+
+// TODO Make it return a std::string instead of modifying body
+// TODO: move this to ReportPerformance, probably make it a friend function
+// of PerformanceAnalysis
+void PerformanceAnalysis::reportPerformance(String8 *body, int author, log_hash_t hash,
+ int maxHeight) {
+ if (mHists.empty()) {
+ return;
+ }
+
+ // ms of active audio in displayed histogram
+ double elapsedMs = 0;
+ // starting timestamp of histogram
+ timestamp startingTs = mHists[0].first;
+
+ // histogram which stores .1 precision ms counts instead of Jiffy multiple counts
+ std::map<double, int> buckets;
+ for (const auto &shortHist: mHists) {
+ for (const auto &countPair : shortHist.second) {
+ const double ms = static_cast<double>(countPair.first) / kJiffyPerMs;
+ buckets[logRound(ms, mBufferPeriod.mMean)] += countPair.second;
+ elapsedMs += ms * countPair.second;
+ }
+ }
+
+ // underscores and spaces length corresponds to maximum width of histogram
+ static const int kLen = 200;
+ std::string underscores(kLen, '_');
+ std::string spaces(kLen, ' ');
+
+ auto it = buckets.begin();
+ double maxDelta = it->first;
+ int maxCount = it->second;
+ // Compute maximum values
+ while (++it != buckets.end()) {
+ if (it->first > maxDelta) {
+ maxDelta = it->first;
+ }
+ if (it->second > maxCount) {
+ maxCount = it->second;
+ }
+ }
+ int height = log2(maxCount) + 1; // maxCount > 0, safe to call log2
+ const int leftPadding = widthOf(1 << height);
+ const int bucketWidth = numberWidth(maxDelta, leftPadding);
+ int scalingFactor = 1;
+ // scale data if it exceeds maximum height
+ if (height > maxHeight) {
+ scalingFactor = (height + maxHeight) / maxHeight;
+ height /= scalingFactor;
+ }
+ body->appendFormat("\n%*s %3.2f %s", leftPadding + 11,
+ "Occurrences in", (elapsedMs / kMsPerSec), "seconds of audio:");
+ body->appendFormat("\n%*s%d, %lld, %lld\n", leftPadding + 11,
+ "Thread, hash, starting timestamp: ", author,
+ static_cast<long long int>(hash), static_cast<long long int>(startingTs));
+ // write histogram label line with bucket values
+ body->appendFormat("\n%s", " ");
+ body->appendFormat("%*s", leftPadding, " ");
+ for (auto const &x : buckets) {
+ const int colWidth = numberWidth(x.first, leftPadding);
+ body->appendFormat("%*d", colWidth, x.second);
+ }
+ // write histogram ascii art
+ body->appendFormat("\n%s", " ");
+ for (int row = height * scalingFactor; row >= 0; row -= scalingFactor) {
+ const int value = 1 << row;
+ body->appendFormat("%.*s", leftPadding, spaces.c_str());
+ for (auto const &x : buckets) {
+ const int colWidth = numberWidth(x.first, leftPadding);
+ body->appendFormat("%.*s%s", colWidth - 1,
+ spaces.c_str(), x.second < value ? " " : "|");
+ }
+ body->appendFormat("\n%s", " ");
+ }
+ // print x-axis
+ const int columns = static_cast<int>(buckets.size());
+ body->appendFormat("%*c", leftPadding, ' ');
+ body->appendFormat("%.*s", (columns + 1) * bucketWidth, underscores.c_str());
+ body->appendFormat("\n%s", " ");
+
+ // write footer with bucket labels
+ body->appendFormat("%*s", leftPadding, " ");
+ for (auto const &x : buckets) {
+ const int colWidth = numberWidth(x.first, leftPadding);
+ body->appendFormat("%*.*f", colWidth, 1, x.first);
+ }
+ body->appendFormat("%.*s%s", bucketWidth, spaces.c_str(), "ms\n");
+
+ // Now report glitches
+ body->appendFormat("\ntime elapsed between glitches and glitch timestamps:\n");
+ for (const auto &outlier: mOutlierData) {
+ body->appendFormat("%lld: %lld\n", static_cast<long long>(outlier.first),
+ static_cast<long long>(outlier.second));
+ }
+}
+
+//------------------------------------------------------------------------------
+
+// writes summary of performance into specified file descriptor
+void dump(int fd, int indent, PerformanceAnalysisMap &threadPerformanceAnalysis) {
+ String8 body;
+ const char* const kDirectory = "/data/misc/audioserver/";
+ for (auto & thread : threadPerformanceAnalysis) {
+ for (auto & hash: thread.second) {
+ PerformanceAnalysis& curr = hash.second;
+ // write performance data to console
+ curr.reportPerformance(&body, thread.first, hash.first);
+ if (!body.isEmpty()) {
+ dumpLine(fd, indent, body);
+ body.clear();
+ }
+ // write to file
+ writeToFile(curr.mHists, curr.mOutlierData, curr.mPeakTimestamps,
+ kDirectory, false, thread.first, hash.first);
+ }
+ }
+}
+
+
+// Writes a string into specified file descriptor
+void dumpLine(int fd, int indent, const String8 &body) {
+ dprintf(fd, "%.*s%s \n", indent, "", body.string());
+}
+
+} // namespace ReportPerformance
+
+} // namespace android
diff --git a/media/libnblog/ReportPerformance.cpp b/media/libnblog/ReportPerformance.cpp
new file mode 100644
index 0000000..827e731
--- /dev/null
+++ b/media/libnblog/ReportPerformance.cpp
@@ -0,0 +1,133 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "ReportPerformance"
+
+#include <fstream>
+#include <iostream>
+#include <queue>
+#include <stdarg.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <string.h>
+#include <sstream>
+#include <sys/prctl.h>
+#include <sys/time.h>
+#include <utility>
+#include <media/nblog/NBLog.h>
+#include <media/nblog/PerformanceAnalysis.h>
+#include <media/nblog/ReportPerformance.h>
+#include <utils/Log.h>
+#include <utils/String8.h>
+
+namespace android {
+
+namespace ReportPerformance {
+
+
+// TODO: use a function like this to extract logic from writeToFile
+// https://stackoverflow.com/a/9279620
+
+// Writes outlier intervals, timestamps, and histograms spanning long time intervals to file.
+// TODO: write data in binary format
+void writeToFile(const std::deque<std::pair<timestamp, Histogram>> &hists,
+ const std::deque<std::pair<msInterval, timestamp>> &outlierData,
+ const std::deque<timestamp> &peakTimestamps,
+ const char * directory, bool append, int author, log_hash_t hash) {
+
+ // TODO: remove old files, implement rotating files as in AudioFlinger.cpp
+
+ if (outlierData.empty() && hists.empty() && peakTimestamps.empty()) {
+ ALOGW("No data, returning.");
+ return;
+ }
+
+ std::stringstream outlierName;
+ std::stringstream histogramName;
+ std::stringstream peakName;
+
+ // get current time
+ char currTime[16]; //YYYYMMDDHHMMSS + '\0' + one unused
+ struct timeval tv;
+ gettimeofday(&tv, NULL);
+ struct tm tm;
+ localtime_r(&tv.tv_sec, &tm);
+ strftime(currTime, sizeof(currTime), "%Y%m%d%H%M%S", &tm);
+
+ // generate file names
+ std::stringstream common;
+ common << author << "_" << hash << "_" << currTime << ".csv";
+
+ histogramName << directory << "histograms_" << common.str();
+ outlierName << directory << "outliers_" << common.str();
+ peakName << directory << "peaks_" << common.str();
+
+ std::ofstream hfs;
+ hfs.open(histogramName.str(), append ? std::ios::app : std::ios::trunc);
+ if (!hfs.is_open()) {
+ ALOGW("couldn't open file %s", histogramName.str().c_str());
+ return;
+ }
+ // each histogram is written as a line where the first value is the timestamp and
+ // subsequent values are pairs of buckets and counts. Each value is separated
+ // by a comma, and each histogram is separated by a newline.
+ for (auto hist = hists.begin(); hist != hists.end(); ++hist) {
+ hfs << hist->first << ", ";
+ for (auto bucket = hist->second.begin(); bucket != hist->second.end(); ++bucket) {
+ hfs << bucket->first / static_cast<double>(kJiffyPerMs)
+ << ", " << bucket->second;
+ if (std::next(bucket) != end(hist->second)) {
+ hfs << ", ";
+ }
+ }
+ if (std::next(hist) != end(hists)) {
+ hfs << "\n";
+ }
+ }
+ hfs.close();
+
+ std::ofstream ofs;
+ ofs.open(outlierName.str(), append ? std::ios::app : std::ios::trunc);
+ if (!ofs.is_open()) {
+ ALOGW("couldn't open file %s", outlierName.str().c_str());
+ return;
+ }
+ // outliers are written as pairs separated by newlines, where each
+ // pair's values are separated by a comma
+ for (const auto &outlier : outlierData) {
+ ofs << outlier.first << ", " << outlier.second << "\n";
+ }
+ ofs.close();
+
+ std::ofstream pfs;
+ pfs.open(peakName.str(), append ? std::ios::app : std::ios::trunc);
+ if (!pfs.is_open()) {
+ ALOGW("couldn't open file %s", peakName.str().c_str());
+ return;
+ }
+ // peaks are simply timestamps separated by commas
+ for (auto peak = peakTimestamps.begin(); peak != peakTimestamps.end(); ++peak) {
+ pfs << *peak;
+ if (std::next(peak) != end(peakTimestamps)) {
+ pfs << ", ";
+ }
+ }
+ pfs.close();
+}
+
+} // namespace ReportPerformance
+
+} // namespace android
diff --git a/media/libnblog/include/media/nblog/NBLog.h b/media/libnblog/include/media/nblog/NBLog.h
new file mode 100644
index 0000000..ebb88f0
--- /dev/null
+++ b/media/libnblog/include/media/nblog/NBLog.h
@@ -0,0 +1,613 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Non-blocking event logger intended for safe communication between processes via shared memory
+
+#ifndef ANDROID_MEDIA_NBLOG_H
+#define ANDROID_MEDIA_NBLOG_H
+
+#include <deque>
+#include <map>
+#include <set>
+#include <vector>
+
+#include <audio_utils/fifo.h>
+#include <binder/IMemory.h>
+#include <media/nblog/PerformanceAnalysis.h>
+#include <media/nblog/ReportPerformance.h>
+#include <utils/Mutex.h>
+#include <utils/threads.h>
+
+namespace android {
+
+class String8;
+
+class NBLog {
+
+public:
+
+ using log_hash_t = ReportPerformance::log_hash_t;
+
+ // FIXME Everything needed for client (writer API and registration) should be isolated
+ // from the rest of the implementation.
+ class Writer;
+ class Reader;
+
+ enum Event : uint8_t {
+ EVENT_RESERVED,
+ EVENT_STRING, // ASCII string, not NUL-terminated
+ // TODO: make timestamp optional
+ EVENT_TIMESTAMP, // clock_gettime(CLOCK_MONOTONIC)
+ EVENT_INTEGER, // integer value entry
+ EVENT_FLOAT, // floating point value entry
+ EVENT_PID, // process ID and process name
+ EVENT_AUTHOR, // author index (present in merged logs) tracks entry's
+ // original log
+ EVENT_START_FMT, // logFormat start event: entry includes format string,
+ // following entries contain format arguments
+ EVENT_HASH, // unique HASH of log origin, originates from hash of file name
+ // and line number
+ EVENT_HISTOGRAM_ENTRY_TS, // single datum for timestamp histogram
+ EVENT_AUDIO_STATE, // audio on/off event: logged on FastMixer::onStateChange call
+ EVENT_END_FMT, // end of logFormat argument list
+
+ EVENT_UPPER_BOUND, // to check for invalid events
+ };
+
+private:
+
+ // ---------------------------------------------------------------------------
+ // API for handling format entry operations
+
+ // a formatted entry has the following structure:
+ // * START_FMT entry, containing the format string
+ // * TIMESTAMP entry
+ // * HASH entry
+ // * author entry of the thread that generated it (optional, present in merged log)
+ // * format arg1
+ // * format arg2
+ // * ...
+ // * END_FMT entry
+
+ // entry representation in memory
+ struct entry {
+ const uint8_t type;
+ const uint8_t length;
+ const uint8_t data[0];
+ };
+
+ // entry tail representation (after data)
+ struct ending {
+ uint8_t length;
+ uint8_t next[0];
+ };
+
+ // entry iterator
+ class EntryIterator {
+ public:
+ EntryIterator();
+ explicit EntryIterator(const uint8_t *entry);
+ EntryIterator(const EntryIterator &other);
+
+ // dereference underlying entry
+ const entry& operator*() const;
+ const entry* operator->() const;
+ // advance to next entry
+ EntryIterator& operator++(); // ++i
+ // back to previous entry
+ EntryIterator& operator--(); // --i
+ EntryIterator next() const;
+ EntryIterator prev() const;
+ bool operator!=(const EntryIterator &other) const;
+ int operator-(const EntryIterator &other) const;
+
+ bool hasConsistentLength() const;
+ void copyTo(std::unique_ptr<audio_utils_fifo_writer> &dst) const;
+ void copyData(uint8_t *dst) const;
+
+ template<typename T>
+ inline const T& payload() {
+ return *reinterpret_cast<const T *>(ptr + offsetof(entry, data));
+ }
+
+ inline operator const uint8_t*() const {
+ return ptr;
+ }
+
+ private:
+ const uint8_t *ptr;
+ };
+
+ class AbstractEntry {
+ public:
+
+ // Entry starting in the given pointer
+ explicit AbstractEntry(const uint8_t *entry);
+ virtual ~AbstractEntry() {}
+
+ // build concrete entry of appropriate class from pointer
+ static std::unique_ptr<AbstractEntry> buildEntry(const uint8_t *ptr);
+
+ // get format entry timestamp
+ virtual int64_t timestamp() const = 0;
+
+ // get format entry's unique id
+ virtual log_hash_t hash() const = 0;
+
+ // entry's author index (-1 if none present)
+ // a Merger has a vector of Readers, author simply points to the index of the
+ // Reader that originated the entry
+ // TODO consider changing to uint32_t
+ virtual int author() const = 0;
+
+ // copy entry, adding author before timestamp, returns iterator to end of entry
+ virtual EntryIterator copyWithAuthor(std::unique_ptr<audio_utils_fifo_writer> &dst,
+ int author) const = 0;
+
+ protected:
+ // copies ordinary entry from src to dst, and returns length of entry
+ // size_t copyEntry(audio_utils_fifo_writer *dst, const iterator &it);
+ const uint8_t *mEntry;
+ };
+
+ class FormatEntry : public AbstractEntry {
+ public:
+ // explicit FormatEntry(const EntryIterator &it);
+ explicit FormatEntry(const uint8_t *ptr) : AbstractEntry(ptr) {}
+ virtual ~FormatEntry() {}
+
+ EntryIterator begin() const;
+
+ // Entry's format string
+ const char* formatString() const;
+
+ // Enrty's format string length
+ size_t formatStringLength() const;
+
+ // Format arguments (excluding format string, timestamp and author)
+ EntryIterator args() const;
+
+ // get format entry timestamp
+ virtual int64_t timestamp() const override;
+
+ // get format entry's unique id
+ virtual log_hash_t hash() const override;
+
+ // entry's author index (-1 if none present)
+ // a Merger has a vector of Readers, author simply points to the index of the
+ // Reader that originated the entry
+ virtual int author() const override;
+
+ // copy entry, adding author before timestamp, returns size of original entry
+ virtual EntryIterator copyWithAuthor(std::unique_ptr<audio_utils_fifo_writer> &dst,
+ int author) const override;
+
+ };
+
+ class HistogramEntry : public AbstractEntry {
+ public:
+ explicit HistogramEntry(const uint8_t *ptr) : AbstractEntry(ptr) {
+ }
+ virtual ~HistogramEntry() {}
+
+ virtual int64_t timestamp() const override;
+
+ virtual log_hash_t hash() const override;
+
+ virtual int author() const override;
+
+ virtual EntryIterator copyWithAuthor(std::unique_ptr<audio_utils_fifo_writer> &dst,
+ int author) const override;
+
+ };
+
+ // ---------------------------------------------------------------------------
+
+ // representation of a single log entry in private memory
+ struct Entry {
+ Entry(Event event, const void *data, size_t length)
+ : mEvent(event), mLength(length), mData(data) { }
+ /*virtual*/ ~Entry() { }
+
+ // used during writing to format Entry information as follows:
+ // [type][length][data ... ][length]
+ int copyEntryDataAt(size_t offset) const;
+
+ private:
+ friend class Writer;
+ Event mEvent; // event type
+ uint8_t mLength; // length of additional data, 0 <= mLength <= kMaxLength
+ const void *mData; // event type-specific data
+ static const size_t kMaxLength = 255;
+ public:
+ // mEvent, mLength, mData[...], duplicate mLength
+ static const size_t kOverhead = sizeof(entry) + sizeof(ending);
+ // endind length of previous entry
+ static const size_t kPreviousLengthOffset = - sizeof(ending) +
+ offsetof(ending, length);
+ };
+
+ struct HistTsEntry {
+ log_hash_t hash;
+ int64_t ts;
+ }; //TODO __attribute__((packed));
+
+ struct HistTsEntryWithAuthor {
+ log_hash_t hash;
+ int64_t ts;
+ int author;
+ }; //TODO __attribute__((packed));
+
+ struct HistIntEntry {
+ log_hash_t hash;
+ int value;
+ }; //TODO __attribute__((packed));
+
+ // representation of a single log entry in shared memory
+ // byte[0] mEvent
+ // byte[1] mLength
+ // byte[2] mData[0]
+ // ...
+ // byte[2+i] mData[i]
+ // ...
+ // byte[2+mLength-1] mData[mLength-1]
+ // byte[2+mLength] duplicate copy of mLength to permit reverse scan
+ // byte[3+mLength] start of next log entry
+
+ static void appendInt(String8 *body, const void *data);
+ static void appendFloat(String8 *body, const void *data);
+ static void appendPID(String8 *body, const void *data, size_t length);
+ static void appendTimestamp(String8 *body, const void *data);
+ static size_t fmtEntryLength(const uint8_t *data);
+ static String8 bufferDump(const uint8_t *buffer, size_t size);
+ static String8 bufferDump(const EntryIterator &it);
+public:
+
+ // Located in shared memory, must be POD.
+ // Exactly one process must explicitly call the constructor or use placement new.
+ // Since this is a POD, the destructor is empty and unnecessary to call it explicitly.
+ struct Shared {
+ Shared() /* mRear initialized via default constructor */ { }
+ /*virtual*/ ~Shared() { }
+
+ audio_utils_fifo_index mRear; // index one byte past the end of most recent Entry
+ char mBuffer[0]; // circular buffer for entries
+ };
+
+public:
+
+ // ---------------------------------------------------------------------------
+
+ // FIXME Timeline was intended to wrap Writer and Reader, but isn't actually used yet.
+ // For now it is just a namespace for sharedSize().
+ class Timeline : public RefBase {
+ public:
+#if 0
+ Timeline(size_t size, void *shared = NULL);
+ virtual ~Timeline();
+#endif
+
+ // Input parameter 'size' is the desired size of the timeline in byte units.
+ // Returns the size rounded up to a power-of-2, plus the constant size overhead for indices.
+ static size_t sharedSize(size_t size);
+
+#if 0
+ private:
+ friend class Writer;
+ friend class Reader;
+
+ const size_t mSize; // circular buffer size in bytes, must be a power of 2
+ bool mOwn; // whether I own the memory at mShared
+ Shared* const mShared; // pointer to shared memory
+#endif
+ };
+
+ // ---------------------------------------------------------------------------
+
+ // Writer is thread-safe with respect to Reader, but not with respect to multiple threads
+ // calling Writer methods. If you need multi-thread safety for writing, use LockedWriter.
+ class Writer : public RefBase {
+ public:
+ Writer(); // dummy nop implementation without shared memory
+
+ // Input parameter 'size' is the desired size of the timeline in byte units.
+ // The size of the shared memory must be at least Timeline::sharedSize(size).
+ Writer(void *shared, size_t size);
+ Writer(const sp<IMemory>& iMemory, size_t size);
+
+ virtual ~Writer();
+
+ // FIXME needs comments, and some should be private
+ virtual void log(const char *string);
+ virtual void logf(const char *fmt, ...) __attribute__ ((format (printf, 2, 3)));
+ virtual void logvf(const char *fmt, va_list ap);
+ virtual void logTimestamp();
+ virtual void logTimestamp(const int64_t ts);
+ virtual void logInteger(const int x);
+ virtual void logFloat(const float x);
+ virtual void logPID();
+ virtual void logFormat(const char *fmt, log_hash_t hash, ...);
+ virtual void logVFormat(const char *fmt, log_hash_t hash, va_list ap);
+ virtual void logStart(const char *fmt);
+ virtual void logEnd();
+ virtual void logHash(log_hash_t hash);
+ virtual void logEventHistTs(Event event, log_hash_t hash);
+
+ virtual bool isEnabled() const;
+
+ // return value for all of these is the previous isEnabled()
+ virtual bool setEnabled(bool enabled); // but won't enable if no shared memory
+ bool enable() { return setEnabled(true); }
+ bool disable() { return setEnabled(false); }
+
+ sp<IMemory> getIMemory() const { return mIMemory; }
+
+ private:
+ // 0 <= length <= kMaxLength
+ // writes a single Entry to the FIFO
+ void log(Event event, const void *data, size_t length);
+ // checks validity of an event before calling log above this one
+ void log(const Entry *entry, bool trusted = false);
+
+ Shared* const mShared; // raw pointer to shared memory
+ sp<IMemory> mIMemory; // ref-counted version, initialized in constructor
+ // and then const
+ audio_utils_fifo * const mFifo; // FIFO itself, non-NULL
+ // unless constructor fails
+ audio_utils_fifo_writer * const mFifoWriter; // used to write to FIFO, non-NULL
+ // unless dummy constructor used
+ bool mEnabled; // whether to actually log
+
+ // cached pid and process name to use in %p format specifier
+ // total tag length is mPidTagSize and process name is not zero terminated
+ char *mPidTag;
+ size_t mPidTagSize;
+ };
+
+ // ---------------------------------------------------------------------------
+
+ // Similar to Writer, but safe for multiple threads to call concurrently
+ class LockedWriter : public Writer {
+ public:
+ LockedWriter();
+ LockedWriter(void *shared, size_t size);
+
+ virtual void log(const char *string);
+ virtual void logf(const char *fmt, ...) __attribute__ ((format (printf, 2, 3)));
+ virtual void logvf(const char *fmt, va_list ap);
+ virtual void logTimestamp();
+ virtual void logTimestamp(const int64_t ts);
+ virtual void logInteger(const int x);
+ virtual void logFloat(const float x);
+ virtual void logPID();
+ virtual void logStart(const char *fmt);
+ virtual void logEnd();
+ virtual void logHash(log_hash_t hash);
+
+ virtual bool isEnabled() const;
+ virtual bool setEnabled(bool enabled);
+
+ private:
+ mutable Mutex mLock;
+ };
+
+ // ---------------------------------------------------------------------------
+
+ class Reader : public RefBase {
+ public:
+ // A snapshot of a readers buffer
+ // This is raw data. No analysis has been done on it
+ class Snapshot {
+ public:
+ Snapshot() : mData(NULL), mLost(0) {}
+
+ Snapshot(size_t bufferSize) : mData(new uint8_t[bufferSize]) {}
+
+ ~Snapshot() { delete[] mData; }
+
+ // copy of the buffer
+ uint8_t *data() const { return mData; }
+
+ // amount of data lost (given by audio_utils_fifo_reader)
+ size_t lost() const { return mLost; }
+
+ // iterator to beginning of readable segment of snapshot
+ // data between begin and end has valid entries
+ EntryIterator begin() { return mBegin; }
+
+ // iterator to end of readable segment of snapshot
+ EntryIterator end() { return mEnd; }
+
+ private:
+ friend class MergeReader;
+ friend class Reader;
+ uint8_t *mData;
+ size_t mLost;
+ EntryIterator mBegin;
+ EntryIterator mEnd;
+ };
+
+ // Input parameter 'size' is the desired size of the timeline in byte units.
+ // The size of the shared memory must be at least Timeline::sharedSize(size).
+ Reader(const void *shared, size_t size);
+ Reader(const sp<IMemory>& iMemory, size_t size);
+
+ virtual ~Reader();
+
+ // get snapshot of readers fifo buffer, effectively consuming the buffer
+ std::unique_ptr<Snapshot> getSnapshot();
+
+ bool isIMemory(const sp<IMemory>& iMemory) const;
+
+ protected:
+ // print a summary of the performance to the console
+ void dumpLine(const String8& timestamp, String8& body);
+ EntryIterator handleFormat(const FormatEntry &fmtEntry,
+ String8 *timestamp,
+ String8 *body);
+ int mFd; // file descriptor
+ int mIndent; // indentation level
+ int mLost; // bytes of data lost before buffer was read
+
+ private:
+ static const std::set<Event> startingTypes;
+ static const std::set<Event> endingTypes;
+
+ // declared as const because audio_utils_fifo() constructor
+ sp<IMemory> mIMemory; // ref-counted version, assigned only in constructor
+
+ /*const*/ Shared* const mShared; // raw pointer to shared memory, actually const but not
+ audio_utils_fifo * const mFifo; // FIFO itself,
+ // non-NULL unless constructor fails
+ audio_utils_fifo_reader * const mFifoReader; // used to read from FIFO,
+ // non-NULL unless constructor fails
+
+ // Searches for the last entry of type <type> in the range [front, back)
+ // back has to be entry-aligned. Returns nullptr if none enconuntered.
+ static const uint8_t *findLastEntryOfTypes(const uint8_t *front, const uint8_t *back,
+ const std::set<Event> &types);
+
+ // dummy method for handling absent author entry
+ virtual void handleAuthor(const AbstractEntry& /*fmtEntry*/, String8* /*body*/) {}
+ };
+
+ // Wrapper for a reader with a name. Contains a pointer to the reader and a pointer to the name
+ class NamedReader {
+ public:
+ NamedReader() { mName[0] = '\0'; } // for Vector
+ NamedReader(const sp<NBLog::Reader>& reader, const char *name) :
+ mReader(reader)
+ { strlcpy(mName, name, sizeof(mName)); }
+ ~NamedReader() { }
+ const sp<NBLog::Reader>& reader() const { return mReader; }
+ const char* name() const { return mName; }
+
+ private:
+ sp<NBLog::Reader> mReader;
+ static const size_t kMaxName = 32;
+ char mName[kMaxName];
+ };
+
+ // ---------------------------------------------------------------------------
+
+ // This class is used to read data from each thread's individual FIFO in shared memory
+ // and write it to a single FIFO in local memory.
+ class Merger : public RefBase {
+ public:
+ Merger(const void *shared, size_t size);
+
+ virtual ~Merger() {}
+
+ void addReader(const NamedReader &reader);
+ // TODO add removeReader
+ void merge();
+
+ // FIXME This is returning a reference to a shared variable that needs a lock
+ const std::vector<NamedReader>& getNamedReaders() const;
+
+ private:
+ // vector of the readers the merger is supposed to merge from.
+ // every reader reads from a writer's buffer
+ // FIXME Needs to be protected by a lock
+ std::vector<NamedReader> mNamedReaders;
+
+ Shared * const mShared; // raw pointer to shared memory
+ std::unique_ptr<audio_utils_fifo> mFifo; // FIFO itself
+ std::unique_ptr<audio_utils_fifo_writer> mFifoWriter; // used to write to FIFO
+ };
+
+ // This class has a pointer to the FIFO in local memory which stores the merged
+ // data collected by NBLog::Merger from all NamedReaders. It is used to process
+ // this data and write the result to PerformanceAnalysis.
+ class MergeReader : public Reader {
+ public:
+ MergeReader(const void *shared, size_t size, Merger &merger);
+
+ void dump(int fd, int indent = 0);
+ // process a particular snapshot of the reader
+ void getAndProcessSnapshot(Snapshot & snap);
+ // call getSnapshot of the content of the reader's buffer and process the data
+ void getAndProcessSnapshot();
+
+ private:
+ // FIXME Needs to be protected by a lock,
+ // because even though our use of it is read-only there may be asynchronous updates
+ const std::vector<NamedReader>& mNamedReaders;
+
+ // analyzes, compresses and stores the merged data
+ // contains a separate instance for every author (thread), and for every source file
+ // location within each author
+ ReportPerformance::PerformanceAnalysisMap mThreadPerformanceAnalysis;
+
+ // handle author entry by looking up the author's name and appending it to the body
+ // returns number of bytes read from fmtEntry
+ void handleAuthor(const AbstractEntry &fmtEntry, String8 *body);
+ };
+
+ // MergeThread is a thread that contains a Merger. It works as a retriggerable one-shot:
+ // when triggered, it awakes for a lapse of time, during which it periodically merges; if
+ // retriggered, the timeout is reset.
+ // The thread is triggered on AudioFlinger binder activity.
+ class MergeThread : public Thread {
+ public:
+ MergeThread(Merger &merger, MergeReader &mergeReader);
+ virtual ~MergeThread() override;
+
+ // Reset timeout and activate thread to merge periodically if it's idle
+ void wakeup();
+
+ // Set timeout period until the merging thread goes idle again
+ void setTimeoutUs(int time);
+
+ private:
+ virtual bool threadLoop() override;
+
+ // the merger who actually does the work of merging the logs
+ Merger& mMerger;
+
+ // the mergereader used to process data merged by mMerger
+ MergeReader& mMergeReader;
+
+ // mutex for the condition variable
+ Mutex mMutex;
+
+ // condition variable to activate merging on timeout >= 0
+ Condition mCond;
+
+ // time left until the thread blocks again (in microseconds)
+ int mTimeoutUs;
+
+ // merging period when the thread is awake
+ static const int kThreadSleepPeriodUs = 1000000 /*1s*/;
+
+ // initial timeout value when triggered
+ static const int kThreadWakeupPeriodUs = 3000000 /*3s*/;
+ };
+
+}; // class NBLog
+
+// TODO put somewhere else
+static inline int64_t get_monotonic_ns() {
+ timespec ts;
+ if (clock_gettime(CLOCK_MONOTONIC, &ts) == 0) {
+ return (uint64_t) ts.tv_sec * 1000 * 1000 * 1000 + ts.tv_nsec;
+ }
+ return 0; // should not happen.
+}
+
+} // namespace android
+
+#endif // ANDROID_MEDIA_NBLOG_H
diff --git a/media/libnblog/include/media/nblog/PerformanceAnalysis.h b/media/libnblog/include/media/nblog/PerformanceAnalysis.h
new file mode 100644
index 0000000..ddfe9d6
--- /dev/null
+++ b/media/libnblog/include/media/nblog/PerformanceAnalysis.h
@@ -0,0 +1,126 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_MEDIA_PERFORMANCEANALYSIS_H
+#define ANDROID_MEDIA_PERFORMANCEANALYSIS_H
+
+#include <deque>
+#include <map>
+#include <vector>
+
+#include <media/nblog/ReportPerformance.h>
+
+namespace android {
+
+namespace ReportPerformance {
+
+class PerformanceAnalysis;
+
+// a map of PerformanceAnalysis instances
+// The outer key is for the thread, the inner key for the source file location.
+using PerformanceAnalysisMap = std::map<int, std::map<log_hash_t, PerformanceAnalysis>>;
+
+class PerformanceAnalysis {
+ // This class stores and analyzes audio processing wakeup timestamps from NBLog
+ // FIXME: currently, all performance data is stored in deques. Turn these into circular
+ // buffers.
+ // TODO: add a mutex.
+public:
+
+ PerformanceAnalysis() {};
+
+ friend void dump(int fd, int indent,
+ PerformanceAnalysisMap &threadPerformanceAnalysis);
+
+ // Called in the case of an audio on/off event, e.g., EVENT_AUDIO_STATE.
+ // Used to discard idle time intervals
+ void handleStateChange();
+
+ // Writes wakeup timestamp entry to log and runs analysis
+ void logTsEntry(timestamp ts);
+
+ // FIXME: make peakdetector and storeOutlierData a single function
+ // Input: mOutlierData. Looks at time elapsed between outliers
+ // finds significant changes in the distribution
+ // writes timestamps of significant changes to mPeakTimestamps
+ bool detectAndStorePeak(msInterval delta, timestamp ts);
+
+ // stores timestamps of intervals above a threshold: these are assumed outliers.
+ // writes to mOutlierData <time elapsed since previous outlier, outlier timestamp>
+ bool detectAndStoreOutlier(const msInterval diffMs);
+
+ // Generates a string of analysis of the buffer periods and prints to console
+ // FIXME: move this data visualization to a separate class. Model/view/controller
+ void reportPerformance(String8 *body, int author, log_hash_t hash,
+ int maxHeight = 10);
+
+private:
+
+ // TODO use a circular buffer for the deques and vectors below
+
+ // stores outlier analysis:
+ // <elapsed time between outliers in ms, outlier beginning timestamp>
+ std::deque<std::pair<msInterval, timestamp>> mOutlierData;
+
+ // stores each timestamp at which a peak was detected
+ // a peak is a moment at which the average outlier interval changed significantly
+ std::deque<timestamp> mPeakTimestamps;
+
+ // stores buffer period histograms with timestamp of first sample
+ std::deque<std::pair<timestamp, Histogram>> mHists;
+
+ // Parameters used when detecting outliers
+ struct BufferPeriod {
+ double mMean = -1; // average time between audio processing wakeups
+ double mOutlierFactor = -1; // values > mMean * mOutlierFactor are outliers
+ double mOutlier = -1; // this is set to mMean * mOutlierFactor
+ timestamp mPrevTs = -1; // previous timestamp
+ } mBufferPeriod;
+
+ // capacity allocated to data structures
+ struct MaxLength {
+ size_t Hists; // number of histograms stored in memory
+ size_t Outliers; // number of values stored in outlier array
+ size_t Peaks; // number of values stored in peak array
+ int HistTimespanMs; // maximum histogram timespan
+ };
+ // These values allow for 10 hours of data allowing for a glitch and a peak
+ // as often as every 3 seconds
+ static constexpr MaxLength kMaxLength = {.Hists = 60, .Outliers = 12000,
+ .Peaks = 12000, .HistTimespanMs = 10 * kSecPerMin * kMsPerSec };
+
+ // these variables ensure continuity while analyzing the timestamp
+ // series one sample at a time.
+ // TODO: change this to a running variance/mean class
+ struct OutlierDistribution {
+ msInterval mMean = 0; // sample mean since previous peak
+ msInterval mSd = 0; // sample sd since previous peak
+ msInterval mElapsed = 0; // time since previous detected outlier
+ const int kMaxDeviation = 5; // standard deviations from the mean threshold
+ msInterval mTypicalDiff = 0; // global mean of outliers
+ double mN = 0; // length of sequence since the last peak
+ double mM2 = 0; // used to calculate sd
+ } mOutlierDistribution;
+};
+
+void dump(int fd, int indent, PerformanceAnalysisMap &threadPerformanceAnalysis);
+void dumpLine(int fd, int indent, const String8 &body);
+
+} // namespace ReportPerformance
+
+} // namespace android
+
+#endif // ANDROID_MEDIA_PERFORMANCEANALYSIS_H
diff --git a/media/libnblog/include/media/nblog/ReportPerformance.h b/media/libnblog/include/media/nblog/ReportPerformance.h
new file mode 100644
index 0000000..ec0842f
--- /dev/null
+++ b/media/libnblog/include/media/nblog/ReportPerformance.h
@@ -0,0 +1,69 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_MEDIA_REPORTPERFORMANCE_H
+#define ANDROID_MEDIA_REPORTPERFORMANCE_H
+
+#include <deque>
+#include <map>
+#include <vector>
+
+namespace android {
+
+// The String8 class is used by reportPerformance function
+class String8;
+
+namespace ReportPerformance {
+
+constexpr int kMsPerSec = 1000;
+constexpr int kSecPerMin = 60;
+
+constexpr int kJiffyPerMs = 10; // time unit for histogram as a multiple of milliseconds
+
+// stores a histogram: key: observed buffer period (multiple of jiffy). value: count
+using Histogram = std::map<int, int>;
+
+using msInterval = double;
+using jiffyInterval = double;
+
+using timestamp = int64_t;
+
+using log_hash_t = uint64_t;
+
+static inline int deltaMs(int64_t ns1, int64_t ns2) {
+ return (ns2 - ns1) / (1000 * 1000);
+}
+
+static inline int deltaJiffy(int64_t ns1, int64_t ns2) {
+ return (kJiffyPerMs * (ns2 - ns1)) / (1000 * 1000);
+}
+
+static inline uint32_t log2(uint32_t x) {
+ // This works for x > 0
+ return 31 - __builtin_clz(x);
+}
+
+// Writes outlier intervals, timestamps, peaks timestamps, and histograms to a file.
+void writeToFile(const std::deque<std::pair<timestamp, Histogram>> &hists,
+ const std::deque<std::pair<msInterval, timestamp>> &outlierData,
+ const std::deque<timestamp> &peakTimestamps,
+ const char * kDirectory, bool append, int author, log_hash_t hash);
+
+} // namespace ReportPerformance
+
+} // namespace android
+
+#endif // ANDROID_MEDIA_REPORTPERFORMANCE_H