Merge "stagefright: set PTS for SoftwareRenderer"
diff --git a/include/media/AudioTrack.h b/include/media/AudioTrack.h
index 2c48bbf..79db323 100644
--- a/include/media/AudioTrack.h
+++ b/include/media/AudioTrack.h
@@ -154,8 +154,9 @@
* streamType: Select the type of audio stream this track is attached to
* (e.g. AUDIO_STREAM_MUSIC).
* sampleRate: Data source sampling rate in Hz.
- * format: Audio format (e.g AUDIO_FORMAT_PCM_16_BIT for signed
- * 16 bits per sample).
+ * format: Audio format. For mixed tracks, any PCM format supported by server is OK
+ * or AUDIO_FORMAT_PCM_8_BIT which is handled on client side. For direct
+ * and offloaded tracks, the possible format(s) depends on the output sink.
* channelMask: Channel mask, such that audio_is_output_channel(channelMask) is true.
* frameCount: Minimum size of track PCM buffer in frames. This defines the
* application's contribution to the
diff --git a/include/media/stagefright/MediaCodec.h b/include/media/stagefright/MediaCodec.h
index 276543b..39e57de 100644
--- a/include/media/stagefright/MediaCodec.h
+++ b/include/media/stagefright/MediaCodec.h
@@ -100,6 +100,7 @@
uint32_t *flags,
int64_t timeoutUs = 0ll);
+ status_t renderOutputBufferAndRelease(size_t index, int64_t timestampNs);
status_t renderOutputBufferAndRelease(size_t index);
status_t releaseOutputBuffer(size_t index);
diff --git a/include/ndk/NdkMediaCodec.h b/include/ndk/NdkMediaCodec.h
index 73ece1b..2af88d0b 100644
--- a/include/ndk/NdkMediaCodec.h
+++ b/include/ndk/NdkMediaCodec.h
@@ -49,6 +49,7 @@
enum {
AMEDIACODEC_BUFFER_FLAG_END_OF_STREAM = 4,
+ AMEDIACODEC_CONFIGURE_FLAG_ENCODE = 1,
AMEDIACODEC_INFO_OUTPUT_BUFFERS_CHANGED = -3,
AMEDIACODEC_INFO_OUTPUT_FORMAT_CHANGED = -2,
AMEDIACODEC_INFO_TRY_AGAIN_LATER = -1
@@ -56,20 +57,22 @@
/**
- * Create decoder by name. Use this if you know the exact codec you want to use.
+ * Create codec by name. Use this if you know the exact codec you want to use.
+ * When configuring, you will need to specify whether to use the codec as an
+ * encoder or decoder.
*/
-AMediaCodec* AMediaCodec_createByCodecName(const char *name);
+AMediaCodec* AMediaCodec_createCodecByName(const char *name);
/**
* Create codec by mime type. Most applications will use this, specifying a
* mime type obtained from media extractor.
*/
-AMediaCodec* AMediaCodec_createByCodecType(const char *mime_type);
+AMediaCodec* AMediaCodec_createDecoderByType(const char *mime_type);
/**
* Create encoder by name.
*/
-AMediaCodec* AMediaCodec_createEncoderByName(const char *name);
+AMediaCodec* AMediaCodec_createEncoderByType(const char *mime_type);
/**
* delete the codec and free its resources
@@ -79,7 +82,8 @@
/**
* Configure the codec. For decoding you would typically get the format from an extractor.
*/
-int AMediaCodec_configure(AMediaCodec*, const AMediaFormat* format, ANativeWindow* surface); // TODO: other args
+int AMediaCodec_configure(AMediaCodec*, const AMediaFormat* format,
+ ANativeWindow* surface, uint32_t flags); // TODO: other args
/**
* Start the codec. A codec must be configured before it can be started, and must be started
diff --git a/media/libeffects/downmix/Android.mk b/media/libeffects/downmix/Android.mk
index 2bb6dbe..e0ca8af 100644
--- a/media/libeffects/downmix/Android.mk
+++ b/media/libeffects/downmix/Android.mk
@@ -15,16 +15,10 @@
LOCAL_MODULE_RELATIVE_PATH := soundfx
-ifeq ($(TARGET_OS)-$(TARGET_SIMULATOR),linux-true)
-LOCAL_LDLIBS += -ldl
-endif
-
LOCAL_C_INCLUDES := \
$(call include-path-for, audio-effects) \
$(call include-path-for, audio-utils)
-LOCAL_PRELINK_MODULE := false
-
LOCAL_CFLAGS += -fvisibility=hidden
include $(BUILD_SHARED_LIBRARY)
diff --git a/media/libeffects/preprocessing/Android.mk b/media/libeffects/preprocessing/Android.mk
index 9e8cb83..ea3c59d 100644
--- a/media/libeffects/preprocessing/Android.mk
+++ b/media/libeffects/preprocessing/Android.mk
@@ -24,12 +24,7 @@
libutils \
liblog
-ifeq ($(TARGET_SIMULATOR),true)
-LOCAL_LDLIBS += -ldl
-else
LOCAL_SHARED_LIBRARIES += libdl
-endif
-
LOCAL_CFLAGS += -fvisibility=hidden
include $(BUILD_SHARED_LIBRARY)
diff --git a/media/libmedia/AudioTrack.cpp b/media/libmedia/AudioTrack.cpp
index dc4f90e..aaaa3f1 100644
--- a/media/libmedia/AudioTrack.cpp
+++ b/media/libmedia/AudioTrack.cpp
@@ -315,12 +315,20 @@
flags = (audio_output_flags_t)(flags &~AUDIO_OUTPUT_FLAG_DEEP_BUFFER);
}
- if (audio_is_linear_pcm(format)) {
- mFrameSize = channelCount * audio_bytes_per_sample(format);
- mFrameSizeAF = channelCount * sizeof(int16_t);
+ if (flags & AUDIO_OUTPUT_FLAG_DIRECT) {
+ if (audio_is_linear_pcm(format)) {
+ mFrameSize = channelCount * audio_bytes_per_sample(format);
+ } else {
+ mFrameSize = sizeof(uint8_t);
+ }
+ mFrameSizeAF = mFrameSize;
} else {
- mFrameSize = sizeof(uint8_t);
- mFrameSizeAF = sizeof(uint8_t);
+ ALOG_ASSERT(audio_is_linear_pcm(format));
+ mFrameSize = channelCount * audio_bytes_per_sample(format);
+ mFrameSizeAF = channelCount * audio_bytes_per_sample(
+ format == AUDIO_FORMAT_PCM_8_BIT ? AUDIO_FORMAT_PCM_16_BIT : format);
+ // createTrack will return an error if PCM format is not supported by server,
+ // so no need to check for specific PCM formats here
}
// Make copy of input parameter offloadInfo so that in the future:
@@ -931,7 +939,11 @@
// Ensure that buffer alignment matches channel count
// 8-bit data in shared memory is not currently supported by AudioFlinger
- size_t alignment = /* mFormat == AUDIO_FORMAT_PCM_8_BIT ? 1 : */ 2;
+ size_t alignment = audio_bytes_per_sample(
+ mFormat == AUDIO_FORMAT_PCM_8_BIT ? AUDIO_FORMAT_PCM_16_BIT : mFormat);
+ if (alignment & 1) {
+ alignment = 1;
+ }
if (mChannelCount > 1) {
// More than 2 channels does not require stronger alignment than stereo
alignment <<= 1;
@@ -947,7 +959,7 @@
// there's no frameCount parameter.
// But when initializing a shared buffer AudioTrack via set(),
// there _is_ a frameCount parameter. We silently ignore it.
- frameCount = mSharedBuffer->size()/mChannelCount/sizeof(int16_t);
+ frameCount = mSharedBuffer->size() / mFrameSizeAF;
} else if (!(mFlags & AUDIO_OUTPUT_FLAG_FAST)) {
diff --git a/media/libstagefright/ACodec.cpp b/media/libstagefright/ACodec.cpp
index 537d9de..0a3a3b6 100644
--- a/media/libstagefright/ACodec.cpp
+++ b/media/libstagefright/ACodec.cpp
@@ -3670,7 +3670,28 @@
ATRACE_NAME("render");
// The client wants this buffer to be rendered.
+ int64_t timestampNs = 0;
+ if (!msg->findInt64("timestampNs", ×tampNs)) {
+ // TODO: it seems like we should use the timestamp
+ // in the (media)buffer as it potentially came from
+ // an input surface, but we did not propagate it prior to
+ // API 20. Perhaps check for target SDK version.
+#if 0
+ if (info->mData->meta()->findInt64("timeUs", ×tampNs)) {
+ ALOGI("using buffer PTS of %" PRId64, timestampNs);
+ timestampNs *= 1000;
+ }
+#endif
+ }
+
status_t err;
+ err = native_window_set_buffers_timestamp(mCodec->mNativeWindow.get(), timestampNs);
+ if (err != OK) {
+ ALOGW("failed to set buffer timestamp: %d", err);
+ } else {
+ ALOGI("set PTS to %" PRId64, timestampNs);
+ }
+
if ((err = mCodec->mNativeWindow->queueBuffer(
mCodec->mNativeWindow.get(),
info->mGraphicBuffer.get(), -1)) == OK) {
diff --git a/media/libstagefright/MediaCodec.cpp b/media/libstagefright/MediaCodec.cpp
index 601dccf..5b525f2 100644
--- a/media/libstagefright/MediaCodec.cpp
+++ b/media/libstagefright/MediaCodec.cpp
@@ -17,6 +17,7 @@
//#define LOG_NDEBUG 0
#define LOG_TAG "MediaCodec"
#include <utils/Log.h>
+#include <inttypes.h>
#include <media/stagefright/MediaCodec.h>
@@ -323,6 +324,16 @@
return PostAndAwaitResponse(msg, &response);
}
+status_t MediaCodec::renderOutputBufferAndRelease(size_t index, int64_t timestampNs) {
+ sp<AMessage> msg = new AMessage(kWhatReleaseOutputBuffer, id());
+ msg->setSize("index", index);
+ msg->setInt32("render", true);
+ msg->setInt64("timestampNs", timestampNs);
+
+ sp<AMessage> response;
+ return PostAndAwaitResponse(msg, &response);
+}
+
status_t MediaCodec::releaseOutputBuffer(size_t index) {
sp<AMessage> msg = new AMessage(kWhatReleaseOutputBuffer, id());
msg->setSize("index", index);
@@ -1707,9 +1718,25 @@
if (render && info->mData != NULL && info->mData->size() != 0) {
info->mNotify->setInt32("render", true);
+ int64_t timestampNs = 0;
+ if (msg->findInt64("timestampNs", ×tampNs)) {
+ info->mNotify->setInt64("timestampNs", timestampNs);
+ } else {
+ // TODO: it seems like we should use the timestamp
+ // in the (media)buffer as it potentially came from
+ // an input surface, but we did not propagate it prior to
+ // API 20. Perhaps check for target SDK version.
+#if 0
+ if (info->mData->meta()->findInt64("timeUs", ×tampNs)) {
+ ALOGI("using buffer PTS of %" PRId64, timestampNs);
+ timestampNs *= 1000;
+ }
+#endif
+ }
+
if (mSoftRenderer != NULL) {
mSoftRenderer->render(
- info->mData->data(), info->mData->size(), NULL);
+ info->mData->data(), info->mData->size(), timestampNs, NULL);
}
}
diff --git a/media/libstagefright/httplive/PlaylistFetcher.cpp b/media/libstagefright/httplive/PlaylistFetcher.cpp
index c34f3cb..6160009 100644
--- a/media/libstagefright/httplive/PlaylistFetcher.cpp
+++ b/media/libstagefright/httplive/PlaylistFetcher.cpp
@@ -1183,9 +1183,35 @@
return OK;
}
+/* static */
+bool PlaylistFetcher::bufferStartsWithWebVTTMagicSequence(
+ const sp<ABuffer> &buffer) {
+ size_t pos = 0;
+
+ // skip possible BOM
+ if (buffer->size() >= pos + 3 &&
+ !memcmp("\xef\xbb\xbf", buffer->data() + pos, 3)) {
+ pos += 3;
+ }
+
+ // accept WEBVTT followed by SPACE, TAB or (CR) LF
+ if (buffer->size() < pos + 6 ||
+ memcmp("WEBVTT", buffer->data() + pos, 6)) {
+ return false;
+ }
+ pos += 6;
+
+ if (buffer->size() == pos) {
+ return true;
+ }
+
+ uint8_t sep = buffer->data()[pos];
+ return sep == ' ' || sep == '\t' || sep == '\n' || sep == '\r';
+}
+
status_t PlaylistFetcher::extractAndQueueAccessUnits(
const sp<ABuffer> &buffer, const sp<AMessage> &itemMeta) {
- if (buffer->size() >= 7 && !memcmp("WEBVTT\n", buffer->data(), 7)) {
+ if (bufferStartsWithWebVTTMagicSequence(buffer)) {
if (mStreamTypeMask != LiveSession::STREAMTYPE_SUBTITLES) {
ALOGE("This stream only contains subtitles.");
return ERROR_MALFORMED;
diff --git a/media/libstagefright/httplive/PlaylistFetcher.h b/media/libstagefright/httplive/PlaylistFetcher.h
index 7e21523..6af82c4 100644
--- a/media/libstagefright/httplive/PlaylistFetcher.h
+++ b/media/libstagefright/httplive/PlaylistFetcher.h
@@ -91,6 +91,7 @@
static const int32_t kNumSkipFrames;
static bool bufferStartsWithTsSyncByte(const sp<ABuffer>& buffer);
+ static bool bufferStartsWithWebVTTMagicSequence(const sp<ABuffer>& buffer);
// notifications to mSession
sp<AMessage> mNotify;
diff --git a/media/ndk/NdkMediaCodec.cpp b/media/ndk/NdkMediaCodec.cpp
index e7f009e..9592af8 100644
--- a/media/ndk/NdkMediaCodec.cpp
+++ b/media/ndk/NdkMediaCodec.cpp
@@ -88,16 +88,16 @@
}
-AMediaCodec* AMediaCodec_createByCodecName(const char *name) {
+AMediaCodec* AMediaCodec_createCodecByName(const char *name) {
return createAMediaCodec(name, false, false);
}
-AMediaCodec* AMediaCodec_createByCodecType(const char *mime_type) {
+AMediaCodec* AMediaCodec_createDecoderByType(const char *mime_type) {
return createAMediaCodec(mime_type, true, false);
}
-AMediaCodec* AMediaCodec_createEncoderByName(const char *name) {
- return createAMediaCodec(name, false, true);
+AMediaCodec* AMediaCodec_createEncoderByType(const char *name) {
+ return createAMediaCodec(name, true, true);
}
int AMediaCodec_delete(AMediaCodec *mData) {
@@ -115,7 +115,8 @@
return OK;
}
-int AMediaCodec_configure(AMediaCodec *mData, const AMediaFormat* format, ANativeWindow* window) {
+int AMediaCodec_configure(
+ AMediaCodec *mData, const AMediaFormat* format, ANativeWindow* window, uint32_t flags) {
sp<AMessage> nativeFormat;
AMediaFormat_getFormat(format, &nativeFormat);
ALOGV("configure with format: %s", nativeFormat->debugString(0).c_str());
@@ -124,7 +125,7 @@
surface = (Surface*) window;
}
- return translate_error(mData->mCodec->configure(nativeFormat, surface, NULL, 0));
+ return translate_error(mData->mCodec->configure(nativeFormat, surface, NULL, flags));
}
int AMediaCodec_start(AMediaCodec *mData) {
diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp
index a916b32..2c5a0eb 100644
--- a/services/audioflinger/Threads.cpp
+++ b/services/audioflinger/Threads.cpp
@@ -143,6 +143,12 @@
// See the client's minBufCount and mNotificationFramesAct calculations for details.
static const int kFastTrackMultiplier = 2;
+// See Thread::readOnlyHeap().
+// Initially this heap is used to allocate client buffers for "fast" AudioRecord.
+// Eventually it will be the single buffer that FastCapture writes into via HAL read(),
+// and that all "fast" AudioRecord clients read from. In either case, the size can be small.
+static const size_t kRecordThreadReadOnlyHeapSize = 0x1000;
+
// ----------------------------------------------------------------------------
#ifdef ADD_BATTERY_DATA
@@ -4635,6 +4641,8 @@
#ifdef TEE_SINK
, mTeeSink(teeSink)
#endif
+ , mReadOnlyHeap(new MemoryDealer(kRecordThreadReadOnlyHeapSize,
+ "RecordThreadRO", MemoryHeapBase::READ_ONLY))
{
snprintf(mName, kNameLength, "AudioIn_%X", id);
mNBLogWriter = audioFlinger->newWriter_l(kLogSize, mName);
diff --git a/services/audioflinger/Threads.h b/services/audioflinger/Threads.h
index 5617c0c..8ea8683 100644
--- a/services/audioflinger/Threads.h
+++ b/services/audioflinger/Threads.h
@@ -226,6 +226,13 @@
virtual status_t setSyncEvent(const sp<SyncEvent>& event) = 0;
virtual bool isValidSyncEvent(const sp<SyncEvent>& event) const = 0;
+ // Return a reference to a per-thread heap which can be used to allocate IMemory
+ // objects that will be read-only to client processes, read/write to mediaserver,
+ // and shared by all client processes of the thread.
+ // The heap is per-thread rather than common across all threads, because
+ // clients can't be trusted not to modify the offset of the IMemory they receive.
+ // If a thread does not have such a heap, this method returns 0.
+ virtual sp<MemoryDealer> readOnlyHeap() const { return 0; }
mutable Mutex mLock;
@@ -947,6 +954,8 @@
virtual status_t initCheck() const { return (mInput == NULL) ? NO_INIT : NO_ERROR; }
+ virtual sp<MemoryDealer> readOnlyHeap() const { return mReadOnlyHeap; }
+
sp<AudioFlinger::RecordThread::RecordTrack> createRecordTrack_l(
const sp<AudioFlinger::Client>& client,
uint32_t sampleRate,
@@ -1021,4 +1030,6 @@
// For dumpsys
const sp<NBAIO_Sink> mTeeSink;
+
+ const sp<MemoryDealer> mReadOnlyHeap;
};
diff --git a/services/audiopolicy/AudioPolicyManager.cpp b/services/audiopolicy/AudioPolicyManager.cpp
index fc9b81a..62a44ee 100644
--- a/services/audiopolicy/AudioPolicyManager.cpp
+++ b/services/audiopolicy/AudioPolicyManager.cpp
@@ -2920,9 +2920,6 @@
uint32_t muteWaitMs = 0;
audio_devices_t device = outputDesc->device();
bool shouldMute = outputDesc->isActive() && (popcount(device) >= 2);
- // temporary mute output if device selection changes to avoid volume bursts due to
- // different per device volumes
- bool tempMute = outputDesc->isActive() && (device != prevDevice);
for (size_t i = 0; i < NUM_STRATEGIES; i++) {
audio_devices_t curDevice = getDeviceForStrategy((routing_strategy)i, false /*fromCache*/);
@@ -2936,7 +2933,7 @@
doMute = true;
outputDesc->mStrategyMutedByDevice[i] = false;
}
- if (doMute || tempMute) {
+ if (doMute) {
for (size_t j = 0; j < mOutputs.size(); j++) {
AudioOutputDescriptor *desc = mOutputs.valueAt(j);
// skip output if it does not share any device with current output
@@ -2949,15 +2946,14 @@
mute ? "muting" : "unmuting", i, curDevice, curOutput);
setStrategyMute((routing_strategy)i, mute, curOutput, mute ? 0 : delayMs);
if (desc->isStrategyActive((routing_strategy)i)) {
- // do tempMute only for current output
- if (tempMute && (desc == outputDesc)) {
- setStrategyMute((routing_strategy)i, true, curOutput);
- setStrategyMute((routing_strategy)i, false, curOutput,
- desc->latency() * 2, device);
- }
- if ((tempMute && (desc == outputDesc)) || mute) {
- if (muteWaitMs < desc->latency()) {
- muteWaitMs = desc->latency();
+ if (mute) {
+ // FIXME: should not need to double latency if volume could be applied
+ // immediately by the audioflinger mixer. We must account for the delay
+ // between now and the next time the audioflinger thread for this output
+ // will process a buffer (which corresponds to one buffer size,
+ // usually 1/2 or 1/4 of the latency).
+ if (muteWaitMs < desc->latency() * 2) {
+ muteWaitMs = desc->latency() * 2;
}
}
}
@@ -2965,11 +2961,22 @@
}
}
- // FIXME: should not need to double latency if volume could be applied immediately by the
- // audioflinger mixer. We must account for the delay between now and the next time
- // the audioflinger thread for this output will process a buffer (which corresponds to
- // one buffer size, usually 1/2 or 1/4 of the latency).
- muteWaitMs *= 2;
+ // temporary mute output if device selection changes to avoid volume bursts due to
+ // different per device volumes
+ if (outputDesc->isActive() && (device != prevDevice)) {
+ if (muteWaitMs < outputDesc->latency() * 2) {
+ muteWaitMs = outputDesc->latency() * 2;
+ }
+ for (size_t i = 0; i < NUM_STRATEGIES; i++) {
+ if (outputDesc->isStrategyActive((routing_strategy)i)) {
+ setStrategyMute((routing_strategy)i, true, outputDesc->mId);
+ // do tempMute unmute after twice the mute wait time
+ setStrategyMute((routing_strategy)i, false, outputDesc->mId,
+ muteWaitMs *2, device);
+ }
+ }
+ }
+
// wait for the PCM output buffers to empty before proceeding with the rest of the command
if (muteWaitMs > delayMs) {
muteWaitMs -= delayMs;