Merge "NuPlayerRenderer: stop feeding AudioSink when paused." into lmp-dev
diff --git a/include/media/stagefright/ACodec.h b/include/media/stagefright/ACodec.h
index eb31c77..da4c20c 100644
--- a/include/media/stagefright/ACodec.h
+++ b/include/media/stagefright/ACodec.h
@@ -234,7 +234,7 @@
status_t setComponentRole(bool isEncoder, const char *mime);
status_t configureCodec(const char *mime, const sp<AMessage> &msg);
- status_t configureTunneledVideoPlayback(int64_t audioHwSync,
+ status_t configureTunneledVideoPlayback(int32_t audioHwSync,
const sp<ANativeWindow> &nativeWindow);
status_t setVideoPortFormatType(
diff --git a/media/libmedia/Android.mk b/media/libmedia/Android.mk
index 3be0651..37bc418 100644
--- a/media/libmedia/Android.mk
+++ b/media/libmedia/Android.mk
@@ -70,7 +70,7 @@
LOCAL_STATIC_LIBRARIES += libinstantssq
-LOCAL_WHOLE_STATIC_LIBRARY := libmedia_helper
+LOCAL_WHOLE_STATIC_LIBRARIES := libmedia_helper
LOCAL_MODULE:= libmedia
diff --git a/media/libmediaplayerservice/Android.mk b/media/libmediaplayerservice/Android.mk
index 0c7e590c..adc066d 100644
--- a/media/libmediaplayerservice/Android.mk
+++ b/media/libmediaplayerservice/Android.mk
@@ -28,6 +28,7 @@
libcamera_client \
libcrypto \
libcutils \
+ libdrmframework \
liblog \
libdl \
libgui \
diff --git a/media/libmediaplayerservice/nuplayer/GenericSource.cpp b/media/libmediaplayerservice/nuplayer/GenericSource.cpp
index f0f4e45..f257ef3 100644
--- a/media/libmediaplayerservice/nuplayer/GenericSource.cpp
+++ b/media/libmediaplayerservice/nuplayer/GenericSource.cpp
@@ -32,6 +32,7 @@
#include <media/stagefright/MediaExtractor.h>
#include <media/stagefright/MediaSource.h>
#include <media/stagefright/MetaData.h>
+#include "../../libstagefright/include/DRMExtractor.h"
#include "../../libstagefright/include/NuCachedSource2.h"
#include "../../libstagefright/include/WVMExtractor.h"
@@ -49,6 +50,7 @@
mIsWidevine(false),
mUIDValid(uidValid),
mUID(uid),
+ mDrmManagerClient(NULL),
mMetaDataSize(-1ll),
mBitrate(-1ll),
mPollBufferingGeneration(0) {
@@ -57,12 +59,18 @@
}
void NuPlayer::GenericSource::resetDataSource() {
+ mAudioTimeUs = 0;
+ mVideoTimeUs = 0;
mHTTPService.clear();
mUri.clear();
mUriHeaders.clear();
mFd = -1;
mOffset = 0;
mLength = 0;
+ setDrmPlaybackStatusIfNeeded(Playback::STOP, 0);
+ mDecryptHandle = NULL;
+ mDrmManagerClient = NULL;
+ mStarted = false;
}
status_t NuPlayer::GenericSource::setDataSource(
@@ -130,6 +138,10 @@
return UNKNOWN_ERROR;
}
+ if (extractor->getDrmFlag()) {
+ checkDrmStatus(mDataSource);
+ }
+
sp<MetaData> fileMeta = extractor->getMetaData();
if (fileMeta != NULL) {
int64_t duration;
@@ -203,6 +215,28 @@
return OK;
}
+void NuPlayer::GenericSource::checkDrmStatus(const sp<DataSource>& dataSource) {
+ dataSource->getDrmInfo(mDecryptHandle, &mDrmManagerClient);
+ if (mDecryptHandle != NULL) {
+ CHECK(mDrmManagerClient);
+ if (RightsStatus::RIGHTS_VALID != mDecryptHandle->status) {
+ sp<AMessage> msg = dupNotify();
+ msg->setInt32("what", kWhatDrmNoLicense);
+ msg->post();
+ }
+ }
+}
+
+int64_t NuPlayer::GenericSource::getLastReadPosition() {
+ if (mAudioTrack.mSource != NULL) {
+ return mAudioTimeUs;
+ } else if (mVideoTrack.mSource != NULL) {
+ return mVideoTimeUs;
+ } else {
+ return 0;
+ }
+}
+
status_t NuPlayer::GenericSource::setBuffers(
bool audio, Vector<MediaBuffer *> &buffers) {
if (mIsWidevine && !audio) {
@@ -398,6 +432,33 @@
readBuffer(MEDIA_TRACK_TYPE_VIDEO);
}
+
+ setDrmPlaybackStatusIfNeeded(Playback::START, getLastReadPosition() / 1000);
+ mStarted = true;
+}
+
+void NuPlayer::GenericSource::stop() {
+ // nothing to do, just account for DRM playback status
+ setDrmPlaybackStatusIfNeeded(Playback::STOP, 0);
+ mStarted = false;
+}
+
+void NuPlayer::GenericSource::pause() {
+ // nothing to do, just account for DRM playback status
+ setDrmPlaybackStatusIfNeeded(Playback::PAUSE, 0);
+ mStarted = false;
+}
+
+void NuPlayer::GenericSource::resume() {
+ // nothing to do, just account for DRM playback status
+ setDrmPlaybackStatusIfNeeded(Playback::START, getLastReadPosition() / 1000);
+ mStarted = true;
+}
+
+void NuPlayer::GenericSource::setDrmPlaybackStatusIfNeeded(int playbackStatus, int64_t position) {
+ if (mDecryptHandle != NULL) {
+ mDrmManagerClient->setPlaybackStatus(mDecryptHandle, playbackStatus, position);
+ }
}
status_t NuPlayer::GenericSource::feedMoreTSData() {
@@ -872,6 +933,10 @@
readBuffer(MEDIA_TRACK_TYPE_AUDIO, seekTimeUs);
}
+ setDrmPlaybackStatusIfNeeded(Playback::START, seekTimeUs / 1000);
+ if (!mStarted) {
+ setDrmPlaybackStatusIfNeeded(Playback::PAUSE, 0);
+ }
return OK;
}
@@ -989,6 +1054,14 @@
options.clearSeekTo();
if (err == OK) {
+ int64_t timeUs;
+ CHECK(mbuf->meta_data()->findInt64(kKeyTime, &timeUs));
+ if (trackType == MEDIA_TRACK_TYPE_AUDIO) {
+ mAudioTimeUs = timeUs;
+ } else if (trackType == MEDIA_TRACK_TYPE_VIDEO) {
+ mVideoTimeUs = timeUs;
+ }
+
// formatChange && seeking: track whose source is changed during selection
// formatChange && !seeking: track whose source is not changed during selection
// !formatChange: normal seek
diff --git a/media/libmediaplayerservice/nuplayer/GenericSource.h b/media/libmediaplayerservice/nuplayer/GenericSource.h
index 663bfae..1f13120 100644
--- a/media/libmediaplayerservice/nuplayer/GenericSource.h
+++ b/media/libmediaplayerservice/nuplayer/GenericSource.h
@@ -27,6 +27,8 @@
namespace android {
+class DecryptHandle;
+class DrmManagerClient;
struct AnotherPacketSource;
struct ARTSPController;
struct DataSource;
@@ -49,6 +51,9 @@
virtual void prepareAsync();
virtual void start();
+ virtual void stop();
+ virtual void pause();
+ virtual void resume();
virtual status_t feedMoreTSData();
@@ -90,7 +95,9 @@
};
Track mAudioTrack;
+ int64_t mAudioTimeUs;
Track mVideoTrack;
+ int64_t mVideoTimeUs;
Track mSubtitleTrack;
Track mTimedTextTrack;
@@ -111,6 +118,9 @@
sp<DataSource> mDataSource;
sp<NuCachedSource2> mCachedSource;
sp<WVMExtractor> mWVMExtractor;
+ DrmManagerClient *mDrmManagerClient;
+ sp<DecryptHandle> mDecryptHandle;
+ bool mStarted;
String8 mContentType;
AString mSniffedMIME;
off64_t mMetaDataSize;
@@ -122,6 +132,9 @@
void resetDataSource();
status_t initFromDataSource();
+ void checkDrmStatus(const sp<DataSource>& dataSource);
+ int64_t getLastReadPosition();
+ void setDrmPlaybackStatusIfNeeded(int playbackStatus, int64_t position);
status_t prefillCacheIfNecessary();
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayer.cpp b/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
index 2b7457b..76d25de 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
@@ -1915,6 +1915,12 @@
break;
}
+ case Source::kWhatDrmNoLicense:
+ {
+ notifyListener(MEDIA_ERROR, MEDIA_ERROR_UNKNOWN, ERROR_DRM_NO_LICENSE);
+ break;
+ }
+
default:
TRESPASS();
}
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp
index d1aac50..5aaf48c 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp
@@ -220,6 +220,8 @@
void NuPlayer::Decoder::handleError(int32_t err)
{
+ mCodec->release();
+
sp<AMessage> notify = mNotify->dup();
notify->setInt32("what", kWhatError);
notify->setInt32("err", err);
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerSource.h b/media/libmediaplayerservice/nuplayer/NuPlayerSource.h
index 45657c2..7ccf3b1 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerSource.h
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerSource.h
@@ -51,6 +51,7 @@
kWhatSubtitleData,
kWhatTimedTextData,
kWhatQueueDecoderShutdown,
+ kWhatDrmNoLicense,
};
// The provides message is used to notify the player about various
diff --git a/media/libstagefright/ACodec.cpp b/media/libstagefright/ACodec.cpp
index e4e463a..19a5908 100644
--- a/media/libstagefright/ACodec.cpp
+++ b/media/libstagefright/ACodec.cpp
@@ -1245,13 +1245,13 @@
tunneled != 0) {
ALOGI("Configuring TUNNELED video playback.");
- int64_t audioHwSync = 0;
- if (!msg->findInt64("audio-hw-sync", &audioHwSync)) {
+ int32_t audioHwSync = 0;
+ if (!msg->findInt32("audio-hw-sync", &audioHwSync)) {
ALOGW("No Audio HW Sync provided for video tunnel");
}
err = configureTunneledVideoPlayback(audioHwSync, nativeWindow);
if (err != OK) {
- ALOGE("configureTunneledVideoPlayback(%" PRId64 ",%p) failed!",
+ ALOGE("configureTunneledVideoPlayback(%d,%p) failed!",
audioHwSync, nativeWindow.get());
return err;
}
@@ -1898,7 +1898,7 @@
}
status_t ACodec::configureTunneledVideoPlayback(
- int64_t audioHwSync, const sp<ANativeWindow> &nativeWindow) {
+ int32_t audioHwSync, const sp<ANativeWindow> &nativeWindow) {
native_handle_t* sidebandHandle;
status_t err = mOMX->configureVideoTunnelMode(
diff --git a/media/libstagefright/MediaCodec.cpp b/media/libstagefright/MediaCodec.cpp
index 76f730f..fc2dd30 100644
--- a/media/libstagefright/MediaCodec.cpp
+++ b/media/libstagefright/MediaCodec.cpp
@@ -733,13 +733,15 @@
case CONFIGURING:
{
- setState(INITIALIZED);
+ setState(actionCode == ACTION_CODE_FATAL ?
+ UNINITIALIZED : INITIALIZED);
break;
}
case STARTING:
{
- setState(CONFIGURED);
+ setState(actionCode == ACTION_CODE_FATAL ?
+ UNINITIALIZED : CONFIGURED);
break;
}
diff --git a/media/libstagefright/data/media_codecs_google_video.xml b/media/libstagefright/data/media_codecs_google_video.xml
index 9b930bc..c97be28 100644
--- a/media/libstagefright/data/media_codecs_google_video.xml
+++ b/media/libstagefright/data/media_codecs_google_video.xml
@@ -16,18 +16,89 @@
<Included>
<Decoders>
- <MediaCodec name="OMX.google.mpeg4.decoder" type="video/mp4v-es" />
- <MediaCodec name="OMX.google.h263.decoder" type="video/3gpp" />
- <MediaCodec name="OMX.google.h264.decoder" type="video/avc" />
- <MediaCodec name="OMX.google.hevc.decoder" type="video/hevc" />
- <MediaCodec name="OMX.google.vp8.decoder" type="video/x-vnd.on2.vp8" />
- <MediaCodec name="OMX.google.vp9.decoder" type="video/x-vnd.on2.vp9" />
+ <MediaCodec name="OMX.google.mpeg4.decoder" type="video/mp4v-es">
+ <!-- profiles and levels: ProfileSimple : Level3 -->
+ <Limit name="size" min="2x2" max="352x288" />
+ <Limit name="alignment" value="2x2" />
+ <Limit name="block-size" value="16x16" />
+ <Limit name="blocks-per-second" range="12-11880" />
+ <Limit name="bitrate" range="1-384000" />
+ <Feature name="adaptive-playback" />
+ </MediaCodec>
+ <MediaCodec name="OMX.google.h263.decoder" type="video/3gpp">
+ <!-- profiles and levels: ProfileBaseline : Level30, ProfileBaseline : Level45
+ ProfileISWV2 : Level30, ProfileISWV2 : Level45 -->
+ <Limit name="size" min="2x2" max="352x288" />
+ <Limit name="alignment" value="2x2" />
+ <Limit name="bitrate" range="1-384000" />
+ <Feature name="adaptive-playback" />
+ </MediaCodec>
+ <MediaCodec name="OMX.google.h264.decoder" type="video/avc">
+ <!-- profiles and levels: ProfileBaseline : Level51 -->
+ <Limit name="size" min="2x2" max="2048x2048" />
+ <Limit name="alignment" value="2x2" />
+ <Limit name="block-size" value="16x16" />
+ <Limit name="blocks-per-second" range="1-983040" />
+ <Limit name="bitrate" range="1-40000000" />
+ <Feature name="adaptive-playback" />
+ </MediaCodec>
+ <MediaCodec name="OMX.google.hevc.decoder" type="video/hevc">
+ <!-- profiles and levels: ProfileMain : MainTierLevel51 -->
+ <Limit name="size" min="2x2" max="2048x2048" />
+ <Limit name="alignment" value="2x2" />
+ <Limit name="block-size" value="8x8" />
+ <Limit name="block-count" range="1-139264" />
+ <Limit name="blocks-per-second" range="1-2000000" />
+ <Limit name="bitrate" range="1-10000000" />
+ <Feature name="adaptive-playback" />
+ </MediaCodec>
+ <MediaCodec name="OMX.google.vp8.decoder" type="video/x-vnd.on2.vp8">
+ <Limit name="size" min="2x2" max="2048x2048" />
+ <Limit name="alignment" value="2x2" />
+ <Limit name="block-size" value="16x16" />
+ <Limit name="blocks-per-second" range="1-1000000" />
+ <Limit name="bitrate" range="1-40000000" />
+ <Feature name="adaptive-playback" />
+ </MediaCodec>
+ <MediaCodec name="OMX.google.vp9.decoder" type="video/x-vnd.on2.vp9">
+ <Limit name="size" min="2x2" max="2048x2048" />
+ <Limit name="alignment" value="2x2" />
+ <Limit name="block-size" value="16x16" />
+ <Limit name="blocks-per-second" range="1-500000" />
+ <Limit name="bitrate" range="1-40000000" />
+ <Feature name="adaptive-playback" />
+ </MediaCodec>
</Decoders>
<Encoders>
- <MediaCodec name="OMX.google.h263.encoder" type="video/3gpp" />
- <MediaCodec name="OMX.google.h264.encoder" type="video/avc" />
- <MediaCodec name="OMX.google.mpeg4.encoder" type="video/mp4v-es" />
- <MediaCodec name="OMX.google.vp8.encoder" type="video/x-vnd.on2.vp8" />
+ <MediaCodec name="OMX.google.h263.encoder" type="video/3gpp">
+ <!-- profiles and levels: ProfileBaseline : Level45 -->
+ <Limit name="size" min="2x2" max="176x144" />
+ <Limit name="alignment" value="2x2" />
+ <Limit name="bitrate" range="1-128000" />
+ </MediaCodec>
+ <MediaCodec name="OMX.google.h264.encoder" type="video/avc">
+ <!-- profiles and levels: ProfileBaseline : Level2 -->
+ <Limit name="size" min="2x2" max="896x896" />
+ <Limit name="alignment" value="2x2" />
+ <Limit name="block-size" value="16x16" />
+ <Limit name="blocks-per-second" range="1-11880" />
+ <Limit name="bitrate" range="1-2000000" />
+ </MediaCodec>
+ <MediaCodec name="OMX.google.mpeg4.encoder" type="video/mp4v-es">
+ <!-- profiles and levels: ProfileCore : Level2 -->
+ <Limit name="size" min="2x2" max="176x144" />
+ <Limit name="alignment" value="2x2" />
+ <Limit name="block-size" value="16x16" />
+ <Limit name="blocks-per-second" range="12-1485" />
+ <Limit name="bitrate" range="1-64000" />
+ </MediaCodec>
+ <MediaCodec name="OMX.google.vp8.encoder" type="video/x-vnd.on2.vp8">
+ <!-- profiles and levels: ProfileMain : Level_Version0-3 -->
+ <Limit name="size" min="2x2" max="2048x2048" />
+ <Limit name="alignment" value="2x2" />
+ <Limit name="bitrate" range="1-40000000" />
+ <Feature name="bitrate-modes" value="VBR,CBR" />
+ </MediaCodec>
</Encoders>
</Included>
diff --git a/services/audiopolicy/AudioPolicyEffects.cpp b/services/audiopolicy/AudioPolicyEffects.cpp
index cc0e965..c45acd0 100644
--- a/services/audiopolicy/AudioPolicyEffects.cpp
+++ b/services/audiopolicy/AudioPolicyEffects.cpp
@@ -98,8 +98,12 @@
inputDesc = new EffectVector(audioSession);
mInputs.add(input, inputDesc);
} else {
+ // EffectVector is existing and we just need to increase ref count
inputDesc = mInputs.valueAt(idx);
}
+ inputDesc->mRefCount++;
+
+ ALOGV("addInputEffects(): input: %d, refCount: %d", input, inputDesc->mRefCount);
Vector <EffectDesc *> effects = mInputSources.valueAt(index)->mEffects;
for (size_t i = 0; i < effects.size(); i++) {
@@ -133,10 +137,14 @@
return status;
}
EffectVector *inputDesc = mInputs.valueAt(index);
- setProcessorEnabled(inputDesc, false);
- delete inputDesc;
- mInputs.removeItemsAt(index);
- ALOGV("releaseInputEffects(): all effects released");
+ inputDesc->mRefCount--;
+ ALOGV("releaseInputEffects(): input: %d, refCount: %d", input, inputDesc->mRefCount);
+ if (inputDesc->mRefCount == 0) {
+ setProcessorEnabled(inputDesc, false);
+ delete inputDesc;
+ mInputs.removeItemsAt(index);
+ ALOGV("releaseInputEffects(): all effects released");
+ }
return status;
}
@@ -223,8 +231,12 @@
procDesc = new EffectVector(audioSession);
mOutputSessions.add(audioSession, procDesc);
} else {
+ // EffectVector is existing and we just need to increase ref count
procDesc = mOutputSessions.valueAt(idx);
}
+ procDesc->mRefCount++;
+
+ ALOGV("addOutputSessionEffects(): session: %d, refCount: %d", audioSession, procDesc->mRefCount);
Vector <EffectDesc *> effects = mOutputStreams.valueAt(index)->mEffects;
for (size_t i = 0; i < effects.size(); i++) {
@@ -262,12 +274,16 @@
}
EffectVector *procDesc = mOutputSessions.valueAt(index);
- setProcessorEnabled(procDesc, false);
- procDesc->mEffects.clear();
- delete procDesc;
- mOutputSessions.removeItemsAt(index);
- ALOGV("releaseOutputSessionEffects(): output processing released from session: %d",
- audioSession);
+ procDesc->mRefCount--;
+ ALOGV("releaseOutputSessionEffects(): session: %d, refCount: %d", audioSession, procDesc->mRefCount);
+ if (procDesc->mRefCount == 0) {
+ setProcessorEnabled(procDesc, false);
+ procDesc->mEffects.clear();
+ delete procDesc;
+ mOutputSessions.removeItemsAt(index);
+ ALOGV("releaseOutputSessionEffects(): output processing released from session: %d",
+ audioSession);
+ }
return status;
}
diff --git a/services/audiopolicy/AudioPolicyEffects.h b/services/audiopolicy/AudioPolicyEffects.h
index 351cb1a..dbe0d0e 100644
--- a/services/audiopolicy/AudioPolicyEffects.h
+++ b/services/audiopolicy/AudioPolicyEffects.h
@@ -131,9 +131,11 @@
// class to store voctor of AudioEffects
class EffectVector {
public:
- EffectVector(int session) : mSessionId(session) {}
+ EffectVector(int session) : mSessionId(session), mRefCount(0) {}
/*virtual*/ ~EffectVector() {}
const int mSessionId;
+ // AudioPolicyManager keeps mLock, no need for lock on reference count here
+ int mRefCount;
Vector< sp<AudioEffect> >mEffects;
};
diff --git a/services/audiopolicy/AudioPolicyManager.cpp b/services/audiopolicy/AudioPolicyManager.cpp
index 06dd22c..a805923 100644
--- a/services/audiopolicy/AudioPolicyManager.cpp
+++ b/services/audiopolicy/AudioPolicyManager.cpp
@@ -1297,21 +1297,23 @@
audio_io_handle_t input = AUDIO_IO_HANDLE_NONE;
bool isSoundTrigger = false;
+ audio_source_t halInputSource = inputSource;
if (inputSource == AUDIO_SOURCE_HOTWORD) {
ssize_t index = mSoundTriggerSessions.indexOfKey(session);
if (index >= 0) {
input = mSoundTriggerSessions.valueFor(session);
isSoundTrigger = true;
ALOGV("SoundTrigger capture on session %d input %d", session, input);
+ } else {
+ halInputSource = AUDIO_SOURCE_VOICE_RECOGNITION;
}
}
-
status_t status = mpClientInterface->openInput(profile->mModule->mHandle,
&input,
&config,
&device,
String8(""),
- inputSource,
+ halInputSource,
flags);
// only accept input with the exact requested set of parameters
@@ -4317,6 +4319,20 @@
mpClientInterface->onAudioPatchListUpdate();
}
}
+
+ // inform all input as well
+ for (size_t i = 0; i < mInputs.size(); i++) {
+ const sp<AudioInputDescriptor> inputDescriptor = mInputs.valueAt(i);
+ if (!isVirtualInputDevice(inputDescriptor->mDevice)) {
+ AudioParameter inputCmd = AudioParameter();
+ ALOGV("%s: inform input %d of device:%d", __func__,
+ inputDescriptor->mIoHandle, device);
+ inputCmd.addInt(String8(AudioParameter::keyRouting),device);
+ mpClientInterface->setParameters(inputDescriptor->mIoHandle,
+ inputCmd.toString(),
+ delayMs);
+ }
+ }
}
// update stream volumes according to new device
diff --git a/services/camera/libcameraservice/Android.mk b/services/camera/libcameraservice/Android.mk
index 2f485b9..9d6ab23 100644
--- a/services/camera/libcameraservice/Android.mk
+++ b/services/camera/libcameraservice/Android.mk
@@ -47,6 +47,7 @@
device3/Camera3InputStream.cpp \
device3/Camera3OutputStream.cpp \
device3/Camera3ZslStream.cpp \
+ device3/Camera3DummyStream.cpp \
device3/StatusTracker.cpp \
gui/RingBufferConsumer.cpp \
utils/CameraTraces.cpp \
diff --git a/services/camera/libcameraservice/device3/Camera3Device.cpp b/services/camera/libcameraservice/device3/Camera3Device.cpp
index 9b51b99..6f78db5 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Device.cpp
@@ -48,6 +48,7 @@
#include "device3/Camera3OutputStream.h"
#include "device3/Camera3InputStream.h"
#include "device3/Camera3ZslStream.h"
+#include "device3/Camera3DummyStream.h"
#include "CameraService.h"
using namespace android::camera3;
@@ -181,6 +182,7 @@
mHal3Device = device;
mStatus = STATUS_UNCONFIGURED;
mNextStreamId = 0;
+ mDummyStreamId = NO_STREAM;
mNeedConfig = true;
mPauseStateNotify = false;
@@ -1418,6 +1420,15 @@
return OK;
}
+ // Workaround for device HALv3.2 or older spec bug - zero streams requires
+ // adding a dummy stream instead.
+ // TODO: Bug: 17321404 for fixing the HAL spec and removing this workaround.
+ if (mOutputStreams.size() == 0) {
+ addDummyStreamLocked();
+ } else {
+ tryRemoveDummyStreamLocked();
+ }
+
// Start configuring the streams
ALOGV("%s: Camera %d: Starting stream configuration", __FUNCTION__, mId);
@@ -1540,7 +1551,7 @@
mNeedConfig = false;
- if (config.num_streams > 0) {
+ if (mDummyStreamId == NO_STREAM) {
mStatus = STATUS_CONFIGURED;
} else {
mStatus = STATUS_UNCONFIGURED;
@@ -1554,6 +1565,69 @@
return OK;
}
+status_t Camera3Device::addDummyStreamLocked() {
+ ATRACE_CALL();
+ status_t res;
+
+ if (mDummyStreamId != NO_STREAM) {
+ // Should never be adding a second dummy stream when one is already
+ // active
+ SET_ERR_L("%s: Camera %d: A dummy stream already exists!",
+ __FUNCTION__, mId);
+ return INVALID_OPERATION;
+ }
+
+ ALOGV("%s: Camera %d: Adding a dummy stream", __FUNCTION__, mId);
+
+ sp<Camera3OutputStreamInterface> dummyStream =
+ new Camera3DummyStream(mNextStreamId);
+
+ res = mOutputStreams.add(mNextStreamId, dummyStream);
+ if (res < 0) {
+ SET_ERR_L("Can't add dummy stream to set: %s (%d)", strerror(-res), res);
+ return res;
+ }
+
+ mDummyStreamId = mNextStreamId;
+ mNextStreamId++;
+
+ return OK;
+}
+
+status_t Camera3Device::tryRemoveDummyStreamLocked() {
+ ATRACE_CALL();
+ status_t res;
+
+ if (mDummyStreamId == NO_STREAM) return OK;
+ if (mOutputStreams.size() == 1) return OK;
+
+ ALOGV("%s: Camera %d: Removing the dummy stream", __FUNCTION__, mId);
+
+ // Ok, have a dummy stream and there's at least one other output stream,
+ // so remove the dummy
+
+ sp<Camera3StreamInterface> deletedStream;
+ ssize_t outputStreamIdx = mOutputStreams.indexOfKey(mDummyStreamId);
+ if (outputStreamIdx == NAME_NOT_FOUND) {
+ SET_ERR_L("Dummy stream %d does not appear to exist", mDummyStreamId);
+ return INVALID_OPERATION;
+ }
+
+ deletedStream = mOutputStreams.editValueAt(outputStreamIdx);
+ mOutputStreams.removeItemsAt(outputStreamIdx);
+
+ // Free up the stream endpoint so that it can be used by some other stream
+ res = deletedStream->disconnect();
+ if (res != OK) {
+ SET_ERR_L("Can't disconnect deleted dummy stream %d", mDummyStreamId);
+ // fall through since we want to still list the stream as deleted.
+ }
+ mDeletedStreams.add(deletedStream);
+ mDummyStreamId = NO_STREAM;
+
+ return res;
+}
+
void Camera3Device::setErrorState(const char *fmt, ...) {
Mutex::Autolock l(mLock);
va_list args;
diff --git a/services/camera/libcameraservice/device3/Camera3Device.h b/services/camera/libcameraservice/device3/Camera3Device.h
index e3c98ef..b99ed7e 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.h
+++ b/services/camera/libcameraservice/device3/Camera3Device.h
@@ -151,6 +151,8 @@
struct RequestTrigger;
// minimal jpeg buffer size: 256KB + blob header
static const ssize_t kMinJpegBufferSize = 256 * 1024 + sizeof(camera3_jpeg_blob);
+ // Constant to use for stream ID when one doesn't exist
+ static const int NO_STREAM = -1;
// A lock to enforce serialization on the input/configure side
// of the public interface.
@@ -196,6 +198,8 @@
int mNextStreamId;
bool mNeedConfig;
+ int mDummyStreamId;
+
// Whether to send state updates upstream
// Pause when doing transparent reconfiguration
bool mPauseStateNotify;
@@ -291,6 +295,17 @@
status_t configureStreamsLocked();
/**
+ * Add a dummy stream to the current stream set as a workaround for
+ * not allowing 0 streams in the camera HAL spec.
+ */
+ status_t addDummyStreamLocked();
+
+ /**
+ * Remove a dummy stream if the current config includes real streams.
+ */
+ status_t tryRemoveDummyStreamLocked();
+
+ /**
* Set device into an error state due to some fatal failure, and set an
* error message to indicate why. Only the first call's message will be
* used. The message is also sent to the log.
diff --git a/services/camera/libcameraservice/device3/Camera3DummyStream.cpp b/services/camera/libcameraservice/device3/Camera3DummyStream.cpp
new file mode 100644
index 0000000..6656b09
--- /dev/null
+++ b/services/camera/libcameraservice/device3/Camera3DummyStream.cpp
@@ -0,0 +1,97 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "Camera3-DummyStream"
+#define ATRACE_TAG ATRACE_TAG_CAMERA
+//#define LOG_NDEBUG 0
+
+#include <utils/Log.h>
+#include <utils/Trace.h>
+#include "Camera3DummyStream.h"
+
+namespace android {
+
+namespace camera3 {
+
+Camera3DummyStream::Camera3DummyStream(int id) :
+ Camera3IOStreamBase(id, CAMERA3_STREAM_OUTPUT, DUMMY_WIDTH, DUMMY_HEIGHT,
+ /*maxSize*/0, DUMMY_FORMAT) {
+
+}
+
+Camera3DummyStream::~Camera3DummyStream() {
+
+}
+
+status_t Camera3DummyStream::getBufferLocked(camera3_stream_buffer *buffer) {
+ ATRACE_CALL();
+ ALOGE("%s: Stream %d: Dummy stream cannot produce buffers!", mId);
+ return INVALID_OPERATION;
+}
+
+status_t Camera3DummyStream::returnBufferLocked(
+ const camera3_stream_buffer &buffer,
+ nsecs_t timestamp) {
+ ATRACE_CALL();
+ ALOGE("%s: Stream %d: Dummy stream cannot return buffers!", mId);
+ return INVALID_OPERATION;
+}
+
+status_t Camera3DummyStream::returnBufferCheckedLocked(
+ const camera3_stream_buffer &buffer,
+ nsecs_t timestamp,
+ bool output,
+ /*out*/
+ sp<Fence> *releaseFenceOut) {
+ ATRACE_CALL();
+ ALOGE("%s: Stream %d: Dummy stream cannot return buffers!", mId);
+ return INVALID_OPERATION;
+}
+
+void Camera3DummyStream::dump(int fd, const Vector<String16> &args) const {
+ (void) args;
+ String8 lines;
+ lines.appendFormat(" Stream[%d]: Dummy\n", mId);
+ write(fd, lines.string(), lines.size());
+
+ Camera3IOStreamBase::dump(fd, args);
+}
+
+status_t Camera3DummyStream::setTransform(int transform) {
+ ATRACE_CALL();
+ // Do nothing
+ return OK;
+}
+
+status_t Camera3DummyStream::configureQueueLocked() {
+ // Do nothing
+ return OK;
+}
+
+status_t Camera3DummyStream::disconnectLocked() {
+ mState = (mState == STATE_IN_RECONFIG) ? STATE_IN_CONFIG
+ : STATE_CONSTRUCTED;
+ return OK;
+}
+
+status_t Camera3DummyStream::getEndpointUsage(uint32_t *usage) {
+ *usage = DUMMY_USAGE;
+ return OK;
+}
+
+}; // namespace camera3
+
+}; // namespace android
diff --git a/services/camera/libcameraservice/device3/Camera3DummyStream.h b/services/camera/libcameraservice/device3/Camera3DummyStream.h
new file mode 100644
index 0000000..3e42623
--- /dev/null
+++ b/services/camera/libcameraservice/device3/Camera3DummyStream.h
@@ -0,0 +1,98 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_SERVERS_CAMERA3_DUMMY_STREAM_H
+#define ANDROID_SERVERS_CAMERA3_DUMMY_STREAM_H
+
+#include <utils/RefBase.h>
+#include <gui/Surface.h>
+
+#include "Camera3Stream.h"
+#include "Camera3IOStreamBase.h"
+#include "Camera3OutputStreamInterface.h"
+
+namespace android {
+namespace camera3 {
+
+/**
+ * A dummy output stream class, to be used as a placeholder when no valid
+ * streams are configured by the client.
+ * This is necessary because camera HAL v3.2 or older disallow configuring
+ * 0 output streams, while the public camera2 API allows for it.
+ */
+class Camera3DummyStream :
+ public Camera3IOStreamBase,
+ public Camera3OutputStreamInterface {
+
+ public:
+ /**
+ * Set up a dummy stream; doesn't actually connect to anything, and uses
+ * a default dummy format and size.
+ */
+ Camera3DummyStream(int id);
+
+ virtual ~Camera3DummyStream();
+
+ /**
+ * Camera3Stream interface
+ */
+
+ virtual void dump(int fd, const Vector<String16> &args) const;
+
+ status_t setTransform(int transform);
+
+ protected:
+
+ /**
+ * Note that we release the lock briefly in this function
+ */
+ virtual status_t returnBufferCheckedLocked(
+ const camera3_stream_buffer &buffer,
+ nsecs_t timestamp,
+ bool output,
+ /*out*/
+ sp<Fence> *releaseFenceOut);
+
+ virtual status_t disconnectLocked();
+
+ private:
+
+ // Default dummy parameters; 320x240 is a required size for all devices,
+ // otherwise act like a SurfaceView would.
+ static const int DUMMY_WIDTH = 320;
+ static const int DUMMY_HEIGHT = 240;
+ static const int DUMMY_FORMAT = HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED;
+ static const uint32_t DUMMY_USAGE = GRALLOC_USAGE_HW_COMPOSER;
+
+ /**
+ * Internal Camera3Stream interface
+ */
+ virtual status_t getBufferLocked(camera3_stream_buffer *buffer);
+ virtual status_t returnBufferLocked(
+ const camera3_stream_buffer &buffer,
+ nsecs_t timestamp);
+
+ virtual status_t configureQueueLocked();
+
+ virtual status_t getEndpointUsage(uint32_t *usage);
+
+}; // class Camera3DummyStream
+
+} // namespace camera3
+
+} // namespace android
+
+#endif
diff --git a/services/soundtrigger/SoundTriggerHwService.cpp b/services/soundtrigger/SoundTriggerHwService.cpp
index 2502e0d..b5aaee3 100644
--- a/services/soundtrigger/SoundTriggerHwService.cpp
+++ b/services/soundtrigger/SoundTriggerHwService.cpp
@@ -249,7 +249,7 @@
event->data_offset = sizeof(struct sound_trigger_recognition_event);
break;
default:
- return eventMemory;
+ return eventMemory;
}
size_t size = event->data_offset + event->data_size;
@@ -653,7 +653,6 @@
{
ALOGV("onCallbackEvent type %d", event->mType);
- AutoMutex lock(mLock);
sp<IMemory> eventMemory = event->mMemory;
if (eventMemory == 0 || eventMemory->pointer() == NULL) {
@@ -668,34 +667,53 @@
case CallbackEvent::TYPE_RECOGNITION: {
struct sound_trigger_recognition_event *recognitionEvent =
(struct sound_trigger_recognition_event *)eventMemory->pointer();
+ sp<ISoundTriggerClient> client;
+ {
+ AutoMutex lock(mLock);
+ sp<Model> model = getModel(recognitionEvent->model);
+ if (model == 0) {
+ ALOGW("%s model == 0", __func__);
+ return;
+ }
+ if (model->mState != Model::STATE_ACTIVE) {
+ ALOGV("onCallbackEvent model->mState %d != Model::STATE_ACTIVE", model->mState);
+ return;
+ }
- sp<Model> model = getModel(recognitionEvent->model);
- if (model == 0) {
- ALOGW("%s model == 0", __func__);
- return;
+ recognitionEvent->capture_session = model->mCaptureSession;
+ model->mState = Model::STATE_IDLE;
+ client = mClient;
}
- if (model->mState != Model::STATE_ACTIVE) {
- ALOGV("onCallbackEvent model->mState %d != Model::STATE_ACTIVE", model->mState);
- return;
+ if (client != 0) {
+ client->onRecognitionEvent(eventMemory);
}
-
- recognitionEvent->capture_session = model->mCaptureSession;
- mClient->onRecognitionEvent(eventMemory);
- model->mState = Model::STATE_IDLE;
} break;
case CallbackEvent::TYPE_SOUNDMODEL: {
struct sound_trigger_model_event *soundmodelEvent =
(struct sound_trigger_model_event *)eventMemory->pointer();
-
- sp<Model> model = getModel(soundmodelEvent->model);
- if (model == 0) {
- ALOGW("%s model == 0", __func__);
- return;
+ sp<ISoundTriggerClient> client;
+ {
+ AutoMutex lock(mLock);
+ sp<Model> model = getModel(soundmodelEvent->model);
+ if (model == 0) {
+ ALOGW("%s model == 0", __func__);
+ return;
+ }
+ client = mClient;
}
- mClient->onSoundModelEvent(eventMemory);
+ if (client != 0) {
+ client->onSoundModelEvent(eventMemory);
+ }
} break;
case CallbackEvent::TYPE_SERVICE_STATE: {
- mClient->onServiceStateChange(eventMemory);
+ sp<ISoundTriggerClient> client;
+ {
+ AutoMutex lock(mLock);
+ client = mClient;
+ }
+ if (client != 0) {
+ client->onServiceStateChange(eventMemory);
+ }
} break;
default:
LOG_ALWAYS_FATAL("onCallbackEvent unknown event type %d", event->mType);