Merge "AudioFlinger: send audio source to audio effects" into jb-mr1-dev
diff --git a/include/media/IMediaPlayerService.h b/include/media/IMediaPlayerService.h
index 172975c..c4c37b6 100644
--- a/include/media/IMediaPlayerService.h
+++ b/include/media/IMediaPlayerService.h
@@ -34,6 +34,8 @@
struct ICrypto;
class IMediaRecorder;
class IOMX;
+class IRemoteDisplay;
+class IRemoteDisplayClient;
struct IStreamSource;
class IMediaPlayerService: public IInterface
@@ -50,6 +52,16 @@
virtual sp<IOMX> getOMX() = 0;
virtual sp<ICrypto> makeCrypto() = 0;
+ // Connects to a remote display.
+ // 'iface' specifies the address of the local interface on which to listen for
+ // a connection from the remote display as an ip address and port number
+ // of the form "x.x.x.x:y". The media server should call back into the provided remote
+ // display client when display connection, disconnection or errors occur.
+ // The assumption is that at most one remote display will be connected to the
+ // provided interface at a time.
+ virtual sp<IRemoteDisplay> listenForRemoteDisplay(const sp<IRemoteDisplayClient>& client,
+ const String8& iface) = 0;
+
// If iface == NULL, disable remote display, otherwise
// iface should be of the form "x.x.x.x:y", i.e. ip address
// of the local interface to bind to and the port number
diff --git a/include/media/IRemoteDisplay.h b/include/media/IRemoteDisplay.h
new file mode 100644
index 0000000..a61704e
--- /dev/null
+++ b/include/media/IRemoteDisplay.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_IREMOTEDISPLAY_H
+#define ANDROID_IREMOTEDISPLAY_H
+
+#include <stdint.h>
+#include <sys/types.h>
+
+#include <utils/RefBase.h>
+#include <binder/IInterface.h>
+#include <binder/Parcel.h>
+
+namespace android {
+
+/*
+ * Represents a remote display, such as a Wifi display.
+ *
+ * When the remote display is created, it may not yet be connected to the
+ * display. The remote display asynchronously reports events such as successful
+ * connection, disconnection and errors to an IRemoteDisplayClient interface provided by
+ * the client.
+ */
+class IRemoteDisplay : public IInterface
+{
+public:
+ DECLARE_META_INTERFACE(RemoteDisplay);
+
+ // Disconnects the remote display and stops listening for new connections.
+ virtual status_t dispose() = 0;
+};
+
+
+// ----------------------------------------------------------------------------
+
+class BnRemoteDisplay : public BnInterface<IRemoteDisplay>
+{
+public:
+ virtual status_t onTransact( uint32_t code,
+ const Parcel& data,
+ Parcel* reply,
+ uint32_t flags = 0);
+};
+
+}; // namespace android
+
+#endif // ANDROID_IREMOTEDISPLAY_H
diff --git a/include/media/IRemoteDisplayClient.h b/include/media/IRemoteDisplayClient.h
new file mode 100644
index 0000000..553ad36
--- /dev/null
+++ b/include/media/IRemoteDisplayClient.h
@@ -0,0 +1,81 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_IREMOTEDISPLAYCLIENT_H
+#define ANDROID_IREMOTEDISPLAYCLIENT_H
+
+#include <stdint.h>
+#include <sys/types.h>
+
+#include <utils/RefBase.h>
+#include <binder/IInterface.h>
+#include <binder/Parcel.h>
+
+namespace android {
+
+class ISurfaceTexture;
+
+class IRemoteDisplayClient : public IInterface
+{
+public:
+ DECLARE_META_INTERFACE(RemoteDisplayClient);
+
+ enum {
+ // Flag: The remote display is using a secure transport protocol such as HDCP.
+ kDisplayFlagSecure = 1 << 0,
+ };
+
+ enum {
+ // Error: An unknown / generic error occurred.
+ kDisplayErrorUnknown = 1,
+ // Error: The connection was dropped unexpectedly.
+ kDisplayErrorConnectionDropped = 2,
+ };
+
+ // Indicates that the remote display has been connected successfully.
+ // Provides a surface texture that the client should use to stream buffers to
+ // the remote display.
+ virtual void onDisplayConnected(const sp<ISurfaceTexture>& surfaceTexture,
+ uint32_t width, uint32_t height, uint32_t flags) = 0; // one-way
+
+ // Indicates that the remote display has been disconnected normally.
+ // This method should only be called once the client has called 'dispose()'
+ // on the IRemoteDisplay.
+ // It is currently an error for the display to disconnect for any other reason.
+ virtual void onDisplayDisconnected() = 0; // one-way
+
+ // Indicates that a connection could not be established to the remote display
+ // or an unrecoverable error occurred and the connection was severed.
+ // The media server should continue listening for connection attempts from the
+ // remote display.
+ virtual void onDisplayError(int32_t error) = 0; // one-way
+};
+
+
+// ----------------------------------------------------------------------------
+
+class BnRemoteDisplayClient : public BnInterface<IRemoteDisplayClient>
+{
+public:
+ virtual status_t onTransact( uint32_t code,
+ const Parcel& data,
+ Parcel* reply,
+ uint32_t flags = 0);
+};
+
+}; // namespace android
+
+#endif // ANDROID_IREMOTEDISPLAYCLIENT_H
diff --git a/include/media/IStreamSource.h b/include/media/IStreamSource.h
index 19646b0..61b9d5a 100644
--- a/include/media/IStreamSource.h
+++ b/include/media/IStreamSource.h
@@ -33,6 +33,12 @@
virtual void setBuffers(const Vector<sp<IMemory> > &buffers) = 0;
virtual void onBufferAvailable(size_t index) = 0;
+
+ enum {
+ // Video PES packets contain exactly one (aligned) access unit.
+ kFlagAlignedVideoData = 1,
+ };
+ virtual uint32_t flags() const { return 0; }
};
struct IStreamListener : public IInterface {
diff --git a/include/media/MediaPlayerInterface.h b/include/media/MediaPlayerInterface.h
index 518948c..b7bee3f 100644
--- a/include/media/MediaPlayerInterface.h
+++ b/include/media/MediaPlayerInterface.h
@@ -150,16 +150,16 @@
virtual status_t setParameter(int key, const Parcel &request) = 0;
virtual status_t getParameter(int key, Parcel *reply) = 0;
- // Right now, only the AAX TX player supports this functionality. For now,
- // provide default implementations which indicate a lack of support for this
- // functionality to make life easier for all of the other media player
- // maintainers out there.
+ // default no-op implementation of optional extensions
virtual status_t setRetransmitEndpoint(const struct sockaddr_in* endpoint) {
return INVALID_OPERATION;
}
virtual status_t getRetransmitEndpoint(struct sockaddr_in* endpoint) {
return INVALID_OPERATION;
}
+ virtual status_t setNextPlayer(const sp<MediaPlayerBase>& next) {
+ return OK;
+ }
// Invoke a generic method on the player by using opaque parcels
// for the request and reply.
diff --git a/include/media/stagefright/SurfaceMediaSource.h b/include/media/stagefright/SurfaceMediaSource.h
index 724c68d..840b4aa 100644
--- a/include/media/stagefright/SurfaceMediaSource.h
+++ b/include/media/stagefright/SurfaceMediaSource.h
@@ -111,6 +111,9 @@
sp<BufferQueue> getBufferQueue() const { return mBufferQueue; }
+ // To be called before start()
+ status_t setMaxAcquiredBufferCount(size_t count);
+
protected:
// Implementation of the BufferQueue::ConsumerListener interface. These
diff --git a/include/media/stagefright/Utils.h b/include/media/stagefright/Utils.h
index d87902e..8213af9 100644
--- a/include/media/stagefright/Utils.h
+++ b/include/media/stagefright/Utils.h
@@ -42,6 +42,8 @@
struct AMessage;
status_t convertMetaDataToMessage(
const sp<MetaData> &meta, sp<AMessage> *format);
+void convertMessageToMetaData(
+ const sp<AMessage> &format, sp<MetaData> &meta);
} // namespace android
diff --git a/media/libmedia/Android.mk b/media/libmedia/Android.mk
index bcce063..76308e8 100644
--- a/media/libmedia/Android.mk
+++ b/media/libmedia/Android.mk
@@ -27,6 +27,8 @@
IMediaRecorderClient.cpp \
IMediaPlayer.cpp \
IMediaRecorder.cpp \
+ IRemoteDisplay.cpp \
+ IRemoteDisplayClient.cpp \
IStreamSource.cpp \
Metadata.cpp \
mediarecorder.cpp \
diff --git a/media/libmedia/IMediaPlayerService.cpp b/media/libmedia/IMediaPlayerService.cpp
index d3e2e19..c2ec439 100644
--- a/media/libmedia/IMediaPlayerService.cpp
+++ b/media/libmedia/IMediaPlayerService.cpp
@@ -24,9 +24,12 @@
#include <media/IMediaPlayerService.h>
#include <media/IMediaRecorder.h>
#include <media/IOMX.h>
+#include <media/IRemoteDisplay.h>
+#include <media/IRemoteDisplayClient.h>
#include <media/IStreamSource.h>
#include <utils/Errors.h> // for status_t
+#include <utils/String8.h>
namespace android {
@@ -40,7 +43,8 @@
MAKE_CRYPTO,
ENABLE_REMOTE_DISPLAY,
ADD_BATTERY_DATA,
- PULL_BATTERY_DATA
+ PULL_BATTERY_DATA,
+ LISTEN_FOR_REMOTE_DISPLAY,
};
class BpMediaPlayerService: public BpInterface<IMediaPlayerService>
@@ -148,6 +152,17 @@
data.writeInterfaceToken(IMediaPlayerService::getInterfaceDescriptor());
return remote()->transact(PULL_BATTERY_DATA, data, reply);
}
+
+ virtual sp<IRemoteDisplay> listenForRemoteDisplay(const sp<IRemoteDisplayClient>& client,
+ const String8& iface)
+ {
+ Parcel data, reply;
+ data.writeInterfaceToken(IMediaPlayerService::getInterfaceDescriptor());
+ data.writeStrongBinder(client->asBinder());
+ data.writeString8(iface);
+ remote()->transact(LISTEN_FOR_REMOTE_DISPLAY, data, &reply);
+ return interface_cast<IRemoteDisplay>(reply.readStrongBinder());
+ }
};
IMPLEMENT_META_INTERFACE(MediaPlayerService, "android.media.IMediaPlayerService");
@@ -242,6 +257,15 @@
pullBatteryData(reply);
return NO_ERROR;
} break;
+ case LISTEN_FOR_REMOTE_DISPLAY: {
+ CHECK_INTERFACE(IMediaPlayerService, data, reply);
+ sp<IRemoteDisplayClient> client(
+ interface_cast<IRemoteDisplayClient>(data.readStrongBinder()));
+ String8 iface(data.readString8());
+ sp<IRemoteDisplay> display(listenForRemoteDisplay(client, iface));
+ reply->writeStrongBinder(display->asBinder());
+ return NO_ERROR;
+ } break;
default:
return BBinder::onTransact(code, data, reply, flags);
}
diff --git a/media/libmedia/IRemoteDisplay.cpp b/media/libmedia/IRemoteDisplay.cpp
new file mode 100644
index 0000000..da25a15
--- /dev/null
+++ b/media/libmedia/IRemoteDisplay.cpp
@@ -0,0 +1,63 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <stdint.h>
+#include <sys/types.h>
+
+#include <media/IRemoteDisplay.h>
+
+namespace android {
+
+enum {
+ DISPOSE = IBinder::FIRST_CALL_TRANSACTION,
+};
+
+class BpRemoteDisplay: public BpInterface<IRemoteDisplay>
+{
+public:
+ BpRemoteDisplay(const sp<IBinder>& impl)
+ : BpInterface<IRemoteDisplay>(impl)
+ {
+ }
+
+ status_t dispose()
+ {
+ Parcel data, reply;
+ data.writeInterfaceToken(IRemoteDisplay::getInterfaceDescriptor());
+ remote()->transact(DISPOSE, data, &reply);
+ return reply.readInt32();
+ }
+};
+
+IMPLEMENT_META_INTERFACE(RemoteDisplay, "android.media.IRemoteDisplay");
+
+// ----------------------------------------------------------------------
+
+status_t BnRemoteDisplay::onTransact(
+ uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
+{
+ switch (code) {
+ case DISPOSE: {
+ CHECK_INTERFACE(IRemoteDisplay, data, reply);
+ reply->writeInt32(dispose());
+ return NO_ERROR;
+ }
+ default:
+ return BBinder::onTransact(code, data, reply, flags);
+ }
+}
+
+}; // namespace android
diff --git a/media/libmedia/IRemoteDisplayClient.cpp b/media/libmedia/IRemoteDisplayClient.cpp
new file mode 100644
index 0000000..4a1b570
--- /dev/null
+++ b/media/libmedia/IRemoteDisplayClient.cpp
@@ -0,0 +1,102 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <stdint.h>
+#include <sys/types.h>
+
+#include <media/IRemoteDisplayClient.h>
+#include <gui/ISurfaceTexture.h>
+#include <utils/String8.h>
+
+namespace android {
+
+enum {
+ ON_DISPLAY_CONNECTED = IBinder::FIRST_CALL_TRANSACTION,
+ ON_DISPLAY_DISCONNECTED,
+ ON_DISPLAY_ERROR,
+};
+
+class BpRemoteDisplayClient: public BpInterface<IRemoteDisplayClient>
+{
+public:
+ BpRemoteDisplayClient(const sp<IBinder>& impl)
+ : BpInterface<IRemoteDisplayClient>(impl)
+ {
+ }
+
+ void onDisplayConnected(const sp<ISurfaceTexture>& surfaceTexture,
+ uint32_t width, uint32_t height, uint32_t flags)
+ {
+ Parcel data, reply;
+ data.writeInterfaceToken(IRemoteDisplayClient::getInterfaceDescriptor());
+ data.writeStrongBinder(surfaceTexture->asBinder());
+ data.writeInt32(width);
+ data.writeInt32(height);
+ data.writeInt32(flags);
+ remote()->transact(ON_DISPLAY_CONNECTED, data, &reply, IBinder::FLAG_ONEWAY);
+ }
+
+ void onDisplayDisconnected()
+ {
+ Parcel data, reply;
+ data.writeInterfaceToken(IRemoteDisplayClient::getInterfaceDescriptor());
+ remote()->transact(ON_DISPLAY_DISCONNECTED, data, &reply, IBinder::FLAG_ONEWAY);
+ }
+
+ void onDisplayError(int32_t error)
+ {
+ Parcel data, reply;
+ data.writeInterfaceToken(IRemoteDisplayClient::getInterfaceDescriptor());
+ data.writeInt32(error);
+ remote()->transact(ON_DISPLAY_ERROR, data, &reply, IBinder::FLAG_ONEWAY);
+ }
+};
+
+IMPLEMENT_META_INTERFACE(RemoteDisplayClient, "android.media.IRemoteDisplayClient");
+
+// ----------------------------------------------------------------------
+
+status_t BnRemoteDisplayClient::onTransact(
+ uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
+{
+ switch (code) {
+ case ON_DISPLAY_CONNECTED: {
+ CHECK_INTERFACE(IRemoteDisplayClient, data, reply);
+ sp<ISurfaceTexture> surfaceTexture(
+ interface_cast<ISurfaceTexture>(data.readStrongBinder()));
+ uint32_t width = data.readInt32();
+ uint32_t height = data.readInt32();
+ uint32_t flags = data.readInt32();
+ onDisplayConnected(surfaceTexture, width, height, flags);
+ return NO_ERROR;
+ }
+ case ON_DISPLAY_DISCONNECTED: {
+ CHECK_INTERFACE(IRemoteDisplayClient, data, reply);
+ onDisplayDisconnected();
+ return NO_ERROR;
+ }
+ case ON_DISPLAY_ERROR: {
+ CHECK_INTERFACE(IRemoteDisplayClient, data, reply);
+ int32_t error = data.readInt32();
+ onDisplayError(error);
+ return NO_ERROR;
+ }
+ default:
+ return BBinder::onTransact(code, data, reply, flags);
+ }
+}
+
+}; // namespace android
diff --git a/media/libmedia/IStreamSource.cpp b/media/libmedia/IStreamSource.cpp
index 078be94..78d810d 100644
--- a/media/libmedia/IStreamSource.cpp
+++ b/media/libmedia/IStreamSource.cpp
@@ -37,6 +37,7 @@
SET_LISTENER = IBinder::FIRST_CALL_TRANSACTION,
SET_BUFFERS,
ON_BUFFER_AVAILABLE,
+ FLAGS,
// IStreamListener
QUEUE_BUFFER,
@@ -72,6 +73,14 @@
remote()->transact(
ON_BUFFER_AVAILABLE, data, &reply, IBinder::FLAG_ONEWAY);
}
+
+ virtual uint32_t flags() const {
+ Parcel data, reply;
+ data.writeInterfaceToken(IStreamSource::getInterfaceDescriptor());
+ remote()->transact(FLAGS, data, &reply);
+
+ return reply.readInt32();
+ }
};
IMPLEMENT_META_INTERFACE(StreamSource, "android.hardware.IStreamSource");
@@ -109,6 +118,13 @@
break;
}
+ case FLAGS:
+ {
+ CHECK_INTERFACE(IStreamSource, data, reply);
+ reply->writeInt32(this->flags());
+ break;
+ }
+
default:
return BBinder::onTransact(code, data, reply, flags);
}
diff --git a/media/libmediaplayerservice/MediaPlayerFactory.cpp b/media/libmediaplayerservice/MediaPlayerFactory.cpp
index f821cc3..3f69c11 100644
--- a/media/libmediaplayerservice/MediaPlayerFactory.cpp
+++ b/media/libmediaplayerservice/MediaPlayerFactory.cpp
@@ -246,6 +246,7 @@
".midi",
".smf",
".xmf",
+ ".mxmf",
".imy",
".rtttl",
".rtx",
diff --git a/media/libmediaplayerservice/MediaPlayerService.cpp b/media/libmediaplayerservice/MediaPlayerService.cpp
index 166bae9..6b57c48 100644
--- a/media/libmediaplayerservice/MediaPlayerService.cpp
+++ b/media/libmediaplayerservice/MediaPlayerService.cpp
@@ -44,6 +44,8 @@
#include <utils/SystemClock.h>
#include <utils/Vector.h>
+#include <media/IRemoteDisplay.h>
+#include <media/IRemoteDisplayClient.h>
#include <media/MediaPlayerInterface.h>
#include <media/mediarecorder.h>
#include <media/MediaMetadataRetrieverInterface.h>
@@ -279,6 +281,11 @@
return new Crypto;
}
+sp<IRemoteDisplay> MediaPlayerService::listenForRemoteDisplay(
+ const sp<IRemoteDisplayClient>& client, const String8& iface) {
+ return new RemoteDisplay(client, iface.string());
+}
+
status_t MediaPlayerService::enableRemoteDisplay(const char *iface) {
Mutex::Autolock autoLock(mLock);
@@ -287,20 +294,12 @@
return INVALID_OPERATION;
}
- mRemoteDisplay = new RemoteDisplay;
-
- status_t err = mRemoteDisplay->start(iface);
-
- if (err != OK) {
- mRemoteDisplay.clear();
- return err;
- }
-
+ mRemoteDisplay = new RemoteDisplay(NULL /* client */, iface);
return OK;
}
if (mRemoteDisplay != NULL) {
- mRemoteDisplay->stop();
+ mRemoteDisplay->dispose();
mRemoteDisplay.clear();
}
@@ -923,15 +922,22 @@
Mutex::Autolock l(mLock);
sp<Client> c = static_cast<Client*>(player.get());
mNextClient = c;
- if (mAudioOutput != NULL && c != NULL) {
- mAudioOutput->setNextOutput(c->mAudioOutput);
- } else {
- ALOGE("no current audio output");
+
+ if (c != NULL) {
+ if (mAudioOutput != NULL) {
+ mAudioOutput->setNextOutput(c->mAudioOutput);
+ } else if ((mPlayer != NULL) && !mPlayer->hardwareOutput()) {
+ ALOGE("no current audio output");
+ }
+
+ if ((mPlayer != NULL) && (mNextClient->getPlayer() != NULL)) {
+ mPlayer->setNextPlayer(mNextClient->getPlayer());
+ }
}
+
return OK;
}
-
status_t MediaPlayerService::Client::seekTo(int msec)
{
ALOGV("[%d] seekTo(%d)", mConnId, msec);
diff --git a/media/libmediaplayerservice/MediaPlayerService.h b/media/libmediaplayerservice/MediaPlayerService.h
index 2577c58..ca8a96f 100644
--- a/media/libmediaplayerservice/MediaPlayerService.h
+++ b/media/libmediaplayerservice/MediaPlayerService.h
@@ -41,6 +41,8 @@
class IMediaRecorder;
class IMediaMetadataRetriever;
class IOMX;
+class IRemoteDisplay;
+class IRemoteDisplayClient;
class MediaRecorderClient;
struct RemoteDisplay;
@@ -248,6 +250,9 @@
virtual sp<IMemory> decode(int fd, int64_t offset, int64_t length, uint32_t *pSampleRate, int* pNumChannels, audio_format_t* pFormat);
virtual sp<IOMX> getOMX();
virtual sp<ICrypto> makeCrypto();
+
+ virtual sp<IRemoteDisplay> listenForRemoteDisplay(const sp<IRemoteDisplayClient>& client,
+ const String8& iface);
virtual status_t enableRemoteDisplay(const char *iface);
virtual status_t dump(int fd, const Vector<String16>& args);
diff --git a/media/libmediaplayerservice/RemoteDisplay.cpp b/media/libmediaplayerservice/RemoteDisplay.cpp
index 49f7278..5542bb5 100644
--- a/media/libmediaplayerservice/RemoteDisplay.cpp
+++ b/media/libmediaplayerservice/RemoteDisplay.cpp
@@ -19,29 +19,27 @@
#include "ANetworkSession.h"
#include "source/WifiDisplaySource.h"
+#include <media/IRemoteDisplayClient.h>
+
namespace android {
-RemoteDisplay::RemoteDisplay()
- : mInitCheck(NO_INIT),
- mLooper(new ALooper),
+RemoteDisplay::RemoteDisplay(
+ const sp<IRemoteDisplayClient> &client, const char *iface)
+ : mLooper(new ALooper),
mNetSession(new ANetworkSession),
- mSource(new WifiDisplaySource(mNetSession)) {
+ mSource(new WifiDisplaySource(mNetSession, client)) {
mLooper->registerHandler(mSource);
+
+ mNetSession->start();
+ mLooper->start();
+
+ mSource->start(iface);
}
RemoteDisplay::~RemoteDisplay() {
}
-status_t RemoteDisplay::start(const char *iface) {
- mNetSession->start();
- mLooper->start();
-
- mSource->start(iface);
-
- return OK;
-}
-
-status_t RemoteDisplay::stop() {
+status_t RemoteDisplay::dispose() {
mSource->stop();
mLooper->stop();
@@ -51,4 +49,3 @@
}
} // namespace android
-
diff --git a/media/libmediaplayerservice/RemoteDisplay.h b/media/libmediaplayerservice/RemoteDisplay.h
index 3607d06..0d87250 100644
--- a/media/libmediaplayerservice/RemoteDisplay.h
+++ b/media/libmediaplayerservice/RemoteDisplay.h
@@ -18,6 +18,7 @@
#define REMOTE_DISPLAY_H_
+#include <media/IRemoteDisplay.h>
#include <media/stagefright/foundation/ABase.h>
#include <utils/Errors.h>
#include <utils/RefBase.h>
@@ -26,20 +27,18 @@
struct ALooper;
struct ANetworkSession;
+struct IRemoteDisplayClient;
struct WifiDisplaySource;
-struct RemoteDisplay : public RefBase {
- RemoteDisplay();
+struct RemoteDisplay : public BnRemoteDisplay {
+ RemoteDisplay(const sp<IRemoteDisplayClient> &client, const char *iface);
- status_t start(const char *iface);
- status_t stop();
+ virtual status_t dispose();
protected:
virtual ~RemoteDisplay();
private:
- status_t mInitCheck;
-
sp<ALooper> mNetLooper;
sp<ALooper> mLooper;
sp<ANetworkSession> mNetSession;
diff --git a/media/libmediaplayerservice/nuplayer/Android.mk b/media/libmediaplayerservice/nuplayer/Android.mk
index f469054..f946c1c 100644
--- a/media/libmediaplayerservice/nuplayer/Android.mk
+++ b/media/libmediaplayerservice/nuplayer/Android.mk
@@ -12,8 +12,6 @@
RTSPSource.cpp \
StreamingSource.cpp \
mp4/MP4Source.cpp \
- mp4/Parser.cpp \
- mp4/TrackFragment.cpp \
LOCAL_C_INCLUDES := \
$(TOP)/frameworks/av/media/libstagefright/httplive \
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayer.cpp b/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
index a02732b..dc1e351 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
@@ -68,7 +68,8 @@
mSkipRenderingVideoUntilMediaTimeUs(-1ll),
mVideoLateByUs(0ll),
mNumFramesTotal(0ll),
- mNumFramesDropped(0ll) {
+ mNumFramesDropped(0ll),
+ mVideoScalingMode(NATIVE_WINDOW_SCALING_MODE_SCALE_TO_WINDOW) {
}
NuPlayer::~NuPlayer() {
@@ -217,6 +218,9 @@
CHECK(msg->findObject("native-window", &obj));
mNativeWindow = static_cast<NativeWindowWrapper *>(obj.get());
+
+ // XXX - ignore error from setVideoScalingMode for now
+ setVideoScalingMode(mVideoScalingMode);
break;
}
@@ -293,8 +297,8 @@
break;
}
- if (mAudioDecoder == NULL && mAudioSink != NULL ||
- mVideoDecoder == NULL && mNativeWindow != NULL) {
+ if ((mAudioDecoder == NULL && mAudioSink != NULL)
+ || (mVideoDecoder == NULL && mNativeWindow != NULL)) {
msg->post(100000ll);
mScanSourcesPending = true;
}
@@ -957,4 +961,18 @@
return NULL;
}
+status_t NuPlayer::setVideoScalingMode(int32_t mode) {
+ mVideoScalingMode = mode;
+ if (mNativeWindow != NULL) {
+ status_t ret = native_window_set_scaling_mode(
+ mNativeWindow->getNativeWindow().get(), mVideoScalingMode);
+ if (ret != OK) {
+ ALOGE("Failed to set scaling mode (%d): %s",
+ -ret, strerror(-ret));
+ return ret;
+ }
+ }
+ return OK;
+}
+
} // namespace android
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayer.h b/media/libmediaplayerservice/nuplayer/NuPlayer.h
index 996806e..36d3a9c 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayer.h
+++ b/media/libmediaplayerservice/nuplayer/NuPlayer.h
@@ -55,6 +55,8 @@
// Will notify the driver through "notifySeekComplete" once finished.
void seekToAsync(int64_t seekTimeUs);
+ status_t setVideoScalingMode(int32_t mode);
+
protected:
virtual ~NuPlayer();
@@ -130,6 +132,8 @@
int64_t mVideoLateByUs;
int64_t mNumFramesTotal, mNumFramesDropped;
+ int32_t mVideoScalingMode;
+
status_t instantiateDecoder(bool audio, sp<Decoder> *decoder);
status_t feedDecoderInputData(bool audio, const sp<AMessage> &msg);
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp
index 441cbf3..d03601f 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp
@@ -259,7 +259,29 @@
}
status_t NuPlayerDriver::invoke(const Parcel &request, Parcel *reply) {
- return INVALID_OPERATION;
+ if (reply == NULL) {
+ ALOGE("reply is a NULL pointer");
+ return BAD_VALUE;
+ }
+
+ int32_t methodId;
+ status_t ret = request.readInt32(&methodId);
+ if (ret != OK) {
+ ALOGE("Failed to retrieve the requested method to invoke");
+ return ret;
+ }
+
+ switch (methodId) {
+ case INVOKE_ID_SET_VIDEO_SCALING_MODE:
+ {
+ int mode = request.readInt32();
+ return mPlayer->setVideoScalingMode(mode);
+ }
+ default:
+ {
+ return INVALID_OPERATION;
+ }
+ }
}
void NuPlayerDriver::setAudioSink(const sp<AudioSink> &audioSink) {
diff --git a/media/libmediaplayerservice/nuplayer/RTSPSource.cpp b/media/libmediaplayerservice/nuplayer/RTSPSource.cpp
index 4a704e3..5a7a785 100644
--- a/media/libmediaplayerservice/nuplayer/RTSPSource.cpp
+++ b/media/libmediaplayerservice/nuplayer/RTSPSource.cpp
@@ -23,6 +23,7 @@
#include "AnotherPacketSource.h"
#include "MyHandler.h"
+#include <media/stagefright/MediaDefs.h>
#include <media/stagefright/MetaData.h>
namespace android {
@@ -159,6 +160,13 @@
}
sp<AnotherPacketSource> NuPlayer::RTSPSource::getSource(bool audio) {
+ if (mTSParser != NULL) {
+ sp<MediaSource> source = mTSParser->getSource(
+ audio ? ATSParser::AUDIO : ATSParser::VIDEO);
+
+ return static_cast<AnotherPacketSource *>(source.get());
+ }
+
return audio ? mAudioTrack : mVideoTrack;
}
@@ -255,7 +263,12 @@
{
size_t trackIndex;
CHECK(msg->findSize("trackIndex", &trackIndex));
- CHECK_LT(trackIndex, mTracks.size());
+
+ if (mTSParser == NULL) {
+ CHECK_LT(trackIndex, mTracks.size());
+ } else {
+ CHECK_EQ(trackIndex, 0u);
+ }
sp<ABuffer> accessUnit;
CHECK(msg->findBuffer("accessUnit", &accessUnit));
@@ -267,6 +280,37 @@
break;
}
+ if (mTSParser != NULL) {
+ size_t offset = 0;
+ status_t err = OK;
+ while (offset + 188 <= accessUnit->size()) {
+ err = mTSParser->feedTSPacket(
+ accessUnit->data() + offset, 188);
+ if (err != OK) {
+ break;
+ }
+
+ offset += 188;
+ }
+
+ if (offset < accessUnit->size()) {
+ err = ERROR_MALFORMED;
+ }
+
+ if (err != OK) {
+ sp<AnotherPacketSource> source = getSource(false /* audio */);
+ if (source != NULL) {
+ source->signalEOS(err);
+ }
+
+ source = getSource(true /* audio */);
+ if (source != NULL) {
+ source->signalEOS(err);
+ }
+ }
+ break;
+ }
+
TrackInfo *info = &mTracks.editItemAt(trackIndex);
sp<AnotherPacketSource> source = info->mSource;
@@ -296,14 +340,28 @@
case MyHandler::kWhatEOS:
{
- size_t trackIndex;
- CHECK(msg->findSize("trackIndex", &trackIndex));
- CHECK_LT(trackIndex, mTracks.size());
-
int32_t finalResult;
CHECK(msg->findInt32("finalResult", &finalResult));
CHECK_NE(finalResult, (status_t)OK);
+ if (mTSParser != NULL) {
+ sp<AnotherPacketSource> source = getSource(false /* audio */);
+ if (source != NULL) {
+ source->signalEOS(finalResult);
+ }
+
+ source = getSource(true /* audio */);
+ if (source != NULL) {
+ source->signalEOS(finalResult);
+ }
+
+ return;
+ }
+
+ size_t trackIndex;
+ CHECK(msg->findSize("trackIndex", &trackIndex));
+ CHECK_LT(trackIndex, mTracks.size());
+
TrackInfo *info = &mTracks.editItemAt(trackIndex);
sp<AnotherPacketSource> source = info->mSource;
if (source != NULL) {
@@ -364,6 +422,14 @@
const char *mime;
CHECK(format->findCString(kKeyMIMEType, &mime));
+ if (!strcasecmp(mime, MEDIA_MIMETYPE_CONTAINER_MPEG2TS)) {
+ // Very special case for MPEG2 Transport Streams.
+ CHECK_EQ(numTracks, 1u);
+
+ mTSParser = new ATSParser;
+ return;
+ }
+
bool isAudio = !strncasecmp(mime, "audio/", 6);
bool isVideo = !strncasecmp(mime, "video/", 6);
diff --git a/media/libmediaplayerservice/nuplayer/RTSPSource.h b/media/libmediaplayerservice/nuplayer/RTSPSource.h
index c8409e5..f07c724 100644
--- a/media/libmediaplayerservice/nuplayer/RTSPSource.h
+++ b/media/libmediaplayerservice/nuplayer/RTSPSource.h
@@ -20,6 +20,8 @@
#include "NuPlayerSource.h"
+#include "ATSParser.h"
+
#include <media/stagefright/foundation/AHandlerReflector.h>
namespace android {
@@ -99,6 +101,8 @@
sp<AnotherPacketSource> mAudioTrack;
sp<AnotherPacketSource> mVideoTrack;
+ sp<ATSParser> mTSParser;
+
int32_t mSeekGeneration;
sp<AnotherPacketSource> getSource(bool audio);
diff --git a/media/libmediaplayerservice/nuplayer/StreamingSource.cpp b/media/libmediaplayerservice/nuplayer/StreamingSource.cpp
index b696aa4..a1fd2ed 100644
--- a/media/libmediaplayerservice/nuplayer/StreamingSource.cpp
+++ b/media/libmediaplayerservice/nuplayer/StreamingSource.cpp
@@ -42,7 +42,15 @@
void NuPlayer::StreamingSource::start() {
mStreamListener = new NuPlayerStreamListener(mSource, 0);
- mTSParser = new ATSParser(ATSParser::TS_TIMESTAMPS_ARE_ABSOLUTE);
+
+ uint32_t sourceFlags = mSource->flags();
+
+ uint32_t parserFlags = ATSParser::TS_TIMESTAMPS_ARE_ABSOLUTE;
+ if (sourceFlags & IStreamSource::kFlagAlignedVideoData) {
+ parserFlags |= ATSParser::ALIGNED_VIDEO_DATA;
+ }
+
+ mTSParser = new ATSParser(parserFlags);
mStreamListener->start();
}
@@ -138,7 +146,17 @@
return finalResult == OK ? -EWOULDBLOCK : finalResult;
}
- return source->dequeueAccessUnit(accessUnit);
+ status_t err = source->dequeueAccessUnit(accessUnit);
+
+#if !defined(LOG_NDEBUG) || LOG_NDEBUG == 0
+ if (err == OK) {
+ int64_t timeUs;
+ CHECK((*accessUnit)->meta()->findInt64("timeUs", &timeUs));
+ ALOGV("dequeueAccessUnit timeUs=%lld us", timeUs);
+ }
+#endif
+
+ return err;
}
} // namespace android
diff --git a/media/libmediaplayerservice/nuplayer/mp4/MP4Source.cpp b/media/libmediaplayerservice/nuplayer/mp4/MP4Source.cpp
index 25c91e9..ffb3a65 100644
--- a/media/libmediaplayerservice/nuplayer/mp4/MP4Source.cpp
+++ b/media/libmediaplayerservice/nuplayer/mp4/MP4Source.cpp
@@ -16,7 +16,7 @@
#include "MP4Source.h"
-#include "Parser.h"
+#include "FragmentedMP4Parser.h"
#include "../NuPlayerStreamListener.h"
#include <media/IStreamSource.h>
@@ -26,7 +26,7 @@
namespace android {
-struct StreamSource : public Parser::Source {
+struct StreamSource : public FragmentedMP4Parser::Source {
StreamSource(const sp<IStreamSource> &source)
: mListener(new NuPlayer::NuPlayerStreamListener(source, 0)),
mPosition(0) {
@@ -93,6 +93,10 @@
return total;
}
+ bool isSeekable() {
+ return false;
+ }
+
private:
sp<NuPlayer::NuPlayerStreamListener> mListener;
off64_t mPosition;
@@ -103,7 +107,7 @@
MP4Source::MP4Source(const sp<IStreamSource> &source)
: mSource(source),
mLooper(new ALooper),
- mParser(new Parser),
+ mParser(new FragmentedMP4Parser),
mEOS(false) {
mLooper->registerHandler(mParser);
}
diff --git a/media/libmediaplayerservice/nuplayer/mp4/MP4Source.h b/media/libmediaplayerservice/nuplayer/mp4/MP4Source.h
index 57430aa..4e927af 100644
--- a/media/libmediaplayerservice/nuplayer/mp4/MP4Source.h
+++ b/media/libmediaplayerservice/nuplayer/mp4/MP4Source.h
@@ -21,7 +21,7 @@
namespace android {
-struct Parser;
+struct FragmentedMP4Parser;
struct MP4Source : public NuPlayer::Source {
MP4Source(const sp<IStreamSource> &source);
@@ -41,7 +41,7 @@
private:
sp<IStreamSource> mSource;
sp<ALooper> mLooper;
- sp<Parser> mParser;
+ sp<FragmentedMP4Parser> mParser;
bool mEOS;
DISALLOW_EVIL_CONSTRUCTORS(MP4Source);
diff --git a/media/libstagefright/Android.mk b/media/libstagefright/Android.mk
index 3fd0f85..f40982e 100644
--- a/media/libstagefright/Android.mk
+++ b/media/libstagefright/Android.mk
@@ -19,6 +19,7 @@
ESDS.cpp \
FileSource.cpp \
FLACExtractor.cpp \
+ FragmentedMP4Extractor.cpp \
HTTPBase.cpp \
JPEGSource.cpp \
MP3Extractor.cpp \
@@ -53,6 +54,8 @@
WVMExtractor.cpp \
XINGSeeker.cpp \
avc_utils.cpp \
+ mp4/FragmentedMP4Parser.cpp \
+ mp4/TrackFragment.cpp \
LOCAL_C_INCLUDES:= \
$(TOP)/frameworks/av/include/media/stagefright/timedtext \
diff --git a/media/libstagefright/DRMExtractor.cpp b/media/libstagefright/DRMExtractor.cpp
index 524c3aa..63cb430 100644
--- a/media/libstagefright/DRMExtractor.cpp
+++ b/media/libstagefright/DRMExtractor.cpp
@@ -15,11 +15,6 @@
*/
#include "include/DRMExtractor.h"
-#include "include/AMRExtractor.h"
-#include "include/MP3Extractor.h"
-#include "include/MPEG4Extractor.h"
-#include "include/WAVExtractor.h"
-#include "include/OggExtractor.h"
#include <arpa/inet.h>
#include <utils/String8.h>
diff --git a/media/libstagefright/DataSource.cpp b/media/libstagefright/DataSource.cpp
index 1de808e..9d0eea2 100644
--- a/media/libstagefright/DataSource.cpp
+++ b/media/libstagefright/DataSource.cpp
@@ -20,17 +20,18 @@
#include "include/chromium_http_stub.h"
#endif
-#include "include/MP3Extractor.h"
-#include "include/MPEG4Extractor.h"
-#include "include/WAVExtractor.h"
-#include "include/OggExtractor.h"
-#include "include/MPEG2PSExtractor.h"
-#include "include/MPEG2TSExtractor.h"
-#include "include/NuCachedSource2.h"
-#include "include/HTTPBase.h"
+#include "include/AACExtractor.h"
#include "include/DRMExtractor.h"
#include "include/FLACExtractor.h"
-#include "include/AACExtractor.h"
+#include "include/FragmentedMP4Extractor.h"
+#include "include/HTTPBase.h"
+#include "include/MP3Extractor.h"
+#include "include/MPEG2PSExtractor.h"
+#include "include/MPEG2TSExtractor.h"
+#include "include/MPEG4Extractor.h"
+#include "include/NuCachedSource2.h"
+#include "include/OggExtractor.h"
+#include "include/WAVExtractor.h"
#include "include/WVMExtractor.h"
#include "matroska/MatroskaExtractor.h"
@@ -110,6 +111,7 @@
// static
void DataSource::RegisterDefaultSniffers() {
RegisterSniffer(SniffMPEG4);
+ RegisterSniffer(SniffFragmentedMP4);
RegisterSniffer(SniffMatroska);
RegisterSniffer(SniffOgg);
RegisterSniffer(SniffWAV);
diff --git a/media/libstagefright/FragmentedMP4Extractor.cpp b/media/libstagefright/FragmentedMP4Extractor.cpp
new file mode 100644
index 0000000..82712ef
--- /dev/null
+++ b/media/libstagefright/FragmentedMP4Extractor.cpp
@@ -0,0 +1,460 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "FragmentedMP4Extractor"
+#include <utils/Log.h>
+
+#include "include/FragmentedMP4Extractor.h"
+#include "include/SampleTable.h"
+#include "include/ESDS.h"
+
+#include <arpa/inet.h>
+
+#include <ctype.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include <cutils/properties.h> // for property_get
+
+#include <media/stagefright/foundation/ABitReader.h>
+#include <media/stagefright/foundation/ABuffer.h>
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/AMessage.h>
+#include <media/stagefright/DataSource.h>
+#include <media/stagefright/MediaBuffer.h>
+#include <media/stagefright/MediaBufferGroup.h>
+#include <media/stagefright/MediaDefs.h>
+#include <media/stagefright/MediaSource.h>
+#include <media/stagefright/MetaData.h>
+#include <media/stagefright/Utils.h>
+#include <utils/String8.h>
+
+namespace android {
+
+class FragmentedMPEG4Source : public MediaSource {
+public:
+ // Caller retains ownership of the Parser
+ FragmentedMPEG4Source(bool audio,
+ const sp<MetaData> &format,
+ const sp<FragmentedMP4Parser> &parser,
+ const sp<FragmentedMP4Extractor> &extractor);
+
+ virtual status_t start(MetaData *params = NULL);
+ virtual status_t stop();
+
+ virtual sp<MetaData> getFormat();
+
+ virtual status_t read(
+ MediaBuffer **buffer, const ReadOptions *options = NULL);
+
+protected:
+ virtual ~FragmentedMPEG4Source();
+
+private:
+ Mutex mLock;
+
+ sp<MetaData> mFormat;
+ sp<FragmentedMP4Parser> mParser;
+ sp<FragmentedMP4Extractor> mExtractor;
+ bool mIsAudioTrack;
+ uint32_t mCurrentSampleIndex;
+
+ bool mIsAVC;
+ size_t mNALLengthSize;
+
+ bool mStarted;
+
+ MediaBufferGroup *mGroup;
+
+ bool mWantsNALFragments;
+
+ uint8_t *mSrcBuffer;
+
+ FragmentedMPEG4Source(const FragmentedMPEG4Source &);
+ FragmentedMPEG4Source &operator=(const FragmentedMPEG4Source &);
+};
+
+
+FragmentedMP4Extractor::FragmentedMP4Extractor(const sp<DataSource> &source)
+ : mLooper(new ALooper),
+ mParser(new FragmentedMP4Parser()),
+ mDataSource(source),
+ mInitCheck(NO_INIT),
+ mFileMetaData(new MetaData) {
+ ALOGV("FragmentedMP4Extractor");
+ mLooper->registerHandler(mParser);
+ mLooper->start(false /* runOnCallingThread */);
+ mParser->start(mDataSource);
+
+ bool hasVideo = mParser->getFormat(false /* audio */, true /* synchronous */) != NULL;
+ bool hasAudio = mParser->getFormat(true /* audio */, true /* synchronous */) != NULL;
+
+ ALOGV("number of tracks: %d", countTracks());
+
+ if (hasVideo) {
+ mFileMetaData->setCString(
+ kKeyMIMEType, MEDIA_MIMETYPE_CONTAINER_MPEG4);
+ } else if (hasAudio) {
+ mFileMetaData->setCString(kKeyMIMEType, "audio/mp4");
+ } else {
+ ALOGE("no audio and no video, no idea what file type this is");
+ }
+ // tracks are numbered such that video track is first, audio track is second
+ if (hasAudio && hasVideo) {
+ mTrackCount = 2;
+ mAudioTrackIndex = 1;
+ } else if (hasAudio) {
+ mTrackCount = 1;
+ mAudioTrackIndex = 0;
+ } else if (hasVideo) {
+ mTrackCount = 1;
+ mAudioTrackIndex = -1;
+ } else {
+ mTrackCount = 0;
+ mAudioTrackIndex = -1;
+ }
+}
+
+FragmentedMP4Extractor::~FragmentedMP4Extractor() {
+ ALOGV("~FragmentedMP4Extractor");
+ mLooper->stop();
+}
+
+uint32_t FragmentedMP4Extractor::flags() const {
+ return CAN_PAUSE |
+ (mParser->isSeekable() ? (CAN_SEEK_BACKWARD | CAN_SEEK_FORWARD | CAN_SEEK) : 0);
+}
+
+sp<MetaData> FragmentedMP4Extractor::getMetaData() {
+ return mFileMetaData;
+}
+
+size_t FragmentedMP4Extractor::countTracks() {
+ return mTrackCount;
+}
+
+
+sp<MetaData> FragmentedMP4Extractor::getTrackMetaData(
+ size_t index, uint32_t flags) {
+ if (index >= countTracks()) {
+ return NULL;
+ }
+
+ sp<AMessage> msg = mParser->getFormat(index == mAudioTrackIndex, true /* synchronous */);
+
+ if (msg == NULL) {
+ ALOGV("got null format for track %d", index);
+ return NULL;
+ }
+
+ sp<MetaData> meta = new MetaData();
+ convertMessageToMetaData(msg, meta);
+ return meta;
+}
+
+static void MakeFourCCString(uint32_t x, char *s) {
+ s[0] = x >> 24;
+ s[1] = (x >> 16) & 0xff;
+ s[2] = (x >> 8) & 0xff;
+ s[3] = x & 0xff;
+ s[4] = '\0';
+}
+
+sp<MediaSource> FragmentedMP4Extractor::getTrack(size_t index) {
+ if (index >= countTracks()) {
+ return NULL;
+ }
+ return new FragmentedMPEG4Source(index == mAudioTrackIndex, getTrackMetaData(index, 0), mParser, this);
+}
+
+
+////////////////////////////////////////////////////////////////////////////////
+
+FragmentedMPEG4Source::FragmentedMPEG4Source(
+ bool audio,
+ const sp<MetaData> &format,
+ const sp<FragmentedMP4Parser> &parser,
+ const sp<FragmentedMP4Extractor> &extractor)
+ : mFormat(format),
+ mParser(parser),
+ mExtractor(extractor),
+ mIsAudioTrack(audio),
+ mStarted(false),
+ mGroup(NULL),
+ mWantsNALFragments(false),
+ mSrcBuffer(NULL) {
+}
+
+FragmentedMPEG4Source::~FragmentedMPEG4Source() {
+ if (mStarted) {
+ stop();
+ }
+}
+
+status_t FragmentedMPEG4Source::start(MetaData *params) {
+ Mutex::Autolock autoLock(mLock);
+
+ CHECK(!mStarted);
+
+ int32_t val;
+ if (params && params->findInt32(kKeyWantsNALFragments, &val)
+ && val != 0) {
+ mWantsNALFragments = true;
+ } else {
+ mWantsNALFragments = false;
+ }
+ ALOGV("caller wants NAL fragments: %s", mWantsNALFragments ? "yes" : "no");
+
+ mGroup = new MediaBufferGroup;
+
+ int32_t max_size = 65536;
+ // XXX CHECK(mFormat->findInt32(kKeyMaxInputSize, &max_size));
+
+ mGroup->add_buffer(new MediaBuffer(max_size));
+
+ mSrcBuffer = new uint8_t[max_size];
+
+ mStarted = true;
+
+ return OK;
+}
+
+status_t FragmentedMPEG4Source::stop() {
+ Mutex::Autolock autoLock(mLock);
+
+ CHECK(mStarted);
+
+ delete[] mSrcBuffer;
+ mSrcBuffer = NULL;
+
+ delete mGroup;
+ mGroup = NULL;
+
+ mStarted = false;
+ mCurrentSampleIndex = 0;
+
+ return OK;
+}
+
+sp<MetaData> FragmentedMPEG4Source::getFormat() {
+ Mutex::Autolock autoLock(mLock);
+
+ return mFormat;
+}
+
+
+status_t FragmentedMPEG4Source::read(
+ MediaBuffer **out, const ReadOptions *options) {
+ int64_t seekTimeUs;
+ ReadOptions::SeekMode mode;
+ if (options && options->getSeekTo(&seekTimeUs, &mode)) {
+ mParser->seekTo(mIsAudioTrack, seekTimeUs);
+ }
+ MediaBuffer *buffer = NULL;
+ mGroup->acquire_buffer(&buffer);
+ sp<ABuffer> parseBuffer;
+
+ status_t ret = mParser->dequeueAccessUnit(mIsAudioTrack, &parseBuffer, true /* synchronous */);
+ if (ret != OK) {
+ buffer->release();
+ ALOGV("returning %d", ret);
+ return ret;
+ }
+ sp<AMessage> meta = parseBuffer->meta();
+ int64_t timeUs;
+ CHECK(meta->findInt64("timeUs", &timeUs));
+ buffer->meta_data()->setInt64(kKeyTime, timeUs);
+ buffer->set_range(0, parseBuffer->size());
+ memcpy(buffer->data(), parseBuffer->data(), parseBuffer->size());
+ *out = buffer;
+ return OK;
+}
+
+
+static bool isCompatibleBrand(uint32_t fourcc) {
+ static const uint32_t kCompatibleBrands[] = {
+ FOURCC('i', 's', 'o', 'm'),
+ FOURCC('i', 's', 'o', '2'),
+ FOURCC('a', 'v', 'c', '1'),
+ FOURCC('3', 'g', 'p', '4'),
+ FOURCC('m', 'p', '4', '1'),
+ FOURCC('m', 'p', '4', '2'),
+
+ // Won't promise that the following file types can be played.
+ // Just give these file types a chance.
+ FOURCC('q', 't', ' ', ' '), // Apple's QuickTime
+ FOURCC('M', 'S', 'N', 'V'), // Sony's PSP
+
+ FOURCC('3', 'g', '2', 'a'), // 3GPP2
+ FOURCC('3', 'g', '2', 'b'),
+ };
+
+ for (size_t i = 0;
+ i < sizeof(kCompatibleBrands) / sizeof(kCompatibleBrands[0]);
+ ++i) {
+ if (kCompatibleBrands[i] == fourcc) {
+ return true;
+ }
+ }
+
+ return false;
+}
+
+// Attempt to actually parse the 'ftyp' atom and determine if a suitable
+// compatible brand is present.
+// Also try to identify where this file's metadata ends
+// (end of the 'moov' atom) and report it to the caller as part of
+// the metadata.
+static bool Sniff(
+ const sp<DataSource> &source, String8 *mimeType, float *confidence,
+ sp<AMessage> *meta) {
+ // We scan up to 128k bytes to identify this file as an MP4.
+ static const off64_t kMaxScanOffset = 128ll * 1024ll;
+
+ off64_t offset = 0ll;
+ bool foundGoodFileType = false;
+ bool isFragmented = false;
+ off64_t moovAtomEndOffset = -1ll;
+ bool done = false;
+
+ while (!done && offset < kMaxScanOffset) {
+ uint32_t hdr[2];
+ if (source->readAt(offset, hdr, 8) < 8) {
+ return false;
+ }
+
+ uint64_t chunkSize = ntohl(hdr[0]);
+ uint32_t chunkType = ntohl(hdr[1]);
+ off64_t chunkDataOffset = offset + 8;
+
+ if (chunkSize == 1) {
+ if (source->readAt(offset + 8, &chunkSize, 8) < 8) {
+ return false;
+ }
+
+ chunkSize = ntoh64(chunkSize);
+ chunkDataOffset += 8;
+
+ if (chunkSize < 16) {
+ // The smallest valid chunk is 16 bytes long in this case.
+ return false;
+ }
+ } else if (chunkSize < 8) {
+ // The smallest valid chunk is 8 bytes long.
+ return false;
+ }
+
+ off64_t chunkDataSize = offset + chunkSize - chunkDataOffset;
+
+ char chunkstring[5];
+ MakeFourCCString(chunkType, chunkstring);
+ ALOGV("saw chunk type %s, size %lld @ %lld", chunkstring, chunkSize, offset);
+ switch (chunkType) {
+ case FOURCC('f', 't', 'y', 'p'):
+ {
+ if (chunkDataSize < 8) {
+ return false;
+ }
+
+ uint32_t numCompatibleBrands = (chunkDataSize - 8) / 4;
+ for (size_t i = 0; i < numCompatibleBrands + 2; ++i) {
+ if (i == 1) {
+ // Skip this index, it refers to the minorVersion,
+ // not a brand.
+ continue;
+ }
+
+ uint32_t brand;
+ if (source->readAt(
+ chunkDataOffset + 4 * i, &brand, 4) < 4) {
+ return false;
+ }
+
+ brand = ntohl(brand);
+ char brandstring[5];
+ MakeFourCCString(brand, brandstring);
+ ALOGV("Brand: %s", brandstring);
+
+ if (isCompatibleBrand(brand)) {
+ foundGoodFileType = true;
+ break;
+ }
+ }
+
+ if (!foundGoodFileType) {
+ return false;
+ }
+
+ break;
+ }
+
+ case FOURCC('m', 'o', 'o', 'v'):
+ {
+ moovAtomEndOffset = offset + chunkSize;
+ break;
+ }
+
+ case FOURCC('m', 'o', 'o', 'f'):
+ {
+ // this is kind of broken, since we might not actually find a
+ // moof box in the first 128k.
+ isFragmented = true;
+ done = true;
+ break;
+ }
+
+ default:
+ break;
+ }
+
+ offset += chunkSize;
+ }
+
+ if (!foundGoodFileType || !isFragmented) {
+ return false;
+ }
+
+ *mimeType = MEDIA_MIMETYPE_CONTAINER_MPEG4;
+ *confidence = 0.5f; // slightly more than MPEG4Extractor
+
+ if (moovAtomEndOffset >= 0) {
+ *meta = new AMessage;
+ (*meta)->setInt64("meta-data-size", moovAtomEndOffset);
+ (*meta)->setInt32("fragmented", 1); // tell MediaExtractor what to instantiate
+
+ ALOGV("found metadata size: %lld", moovAtomEndOffset);
+ }
+
+ return true;
+}
+
+// used by DataSource::RegisterDefaultSniffers
+bool SniffFragmentedMP4(
+ const sp<DataSource> &source, String8 *mimeType, float *confidence,
+ sp<AMessage> *meta) {
+ ALOGV("SniffFragmentedMP4");
+ char prop[PROPERTY_VALUE_MAX];
+ if (property_get("media.stagefright.use-fragmp4", prop, NULL)
+ && (!strcmp(prop, "1") || !strcasecmp(prop, "true"))) {
+ return Sniff(source, mimeType, confidence, meta);
+ }
+
+ return false;
+}
+
+} // namespace android
diff --git a/media/libstagefright/MP3Extractor.cpp b/media/libstagefright/MP3Extractor.cpp
index 6abaf23..d94054b 100644
--- a/media/libstagefright/MP3Extractor.cpp
+++ b/media/libstagefright/MP3Extractor.cpp
@@ -228,6 +228,7 @@
virtual ~MP3Source();
private:
+ static const size_t kMaxFrameSize;
sp<MetaData> mMeta;
sp<DataSource> mDataSource;
off64_t mFirstFramePos;
@@ -405,6 +406,13 @@
////////////////////////////////////////////////////////////////////////////////
+// The theoretical maximum frame size for an MPEG audio stream should occur
+// while playing a Layer 2, MPEGv2.5 audio stream at 160kbps (with padding).
+// The size of this frame should be...
+// ((1152 samples/frame * 160000 bits/sec) /
+// (8000 samples/sec * 8 bits/byte)) + 1 padding byte/frame = 2881 bytes/frame.
+// Set our max frame size to the nearest power of 2 above this size (aka, 4kB)
+const size_t MP3Source::kMaxFrameSize = (1 << 12); /* 4096 bytes */
MP3Source::MP3Source(
const sp<MetaData> &meta, const sp<DataSource> &source,
off64_t first_frame_pos, uint32_t fixed_header,
@@ -433,7 +441,6 @@
mGroup = new MediaBufferGroup;
- const size_t kMaxFrameSize = 32768;
mGroup->add_buffer(new MediaBuffer(kMaxFrameSize));
mCurrentPos = mFirstFramePos;
diff --git a/media/libstagefright/MPEG4Extractor.cpp b/media/libstagefright/MPEG4Extractor.cpp
index a572541..7d49ef0 100644
--- a/media/libstagefright/MPEG4Extractor.cpp
+++ b/media/libstagefright/MPEG4Extractor.cpp
@@ -14,6 +14,7 @@
* limitations under the License.
*/
+//#define LOG_NDEBUG 0
#define LOG_TAG "MPEG4Extractor"
#include <utils/Log.h>
@@ -408,7 +409,7 @@
}
// Reads an encoded integer 7 bits at a time until it encounters the high bit clear.
-int32_t readSize(off64_t offset,
+static int32_t readSize(off64_t offset,
const sp<DataSource> DataSource, uint8_t *numOfBytes) {
uint32_t size = 0;
uint8_t data;
diff --git a/media/libstagefright/MediaDefs.cpp b/media/libstagefright/MediaDefs.cpp
index 2740d6b..e7b5903 100644
--- a/media/libstagefright/MediaDefs.cpp
+++ b/media/libstagefright/MediaDefs.cpp
@@ -42,7 +42,7 @@
const char *MEDIA_MIMETYPE_AUDIO_AAC_ADTS = "audio/aac-adts";
const char *MEDIA_MIMETYPE_CONTAINER_MPEG4 = "video/mp4";
-const char *MEDIA_MIMETYPE_CONTAINER_WAV = "audio/wav";
+const char *MEDIA_MIMETYPE_CONTAINER_WAV = "audio/x-wav";
const char *MEDIA_MIMETYPE_CONTAINER_OGG = "application/ogg";
const char *MEDIA_MIMETYPE_CONTAINER_MATROSKA = "video/x-matroska";
const char *MEDIA_MIMETYPE_CONTAINER_MPEG2TS = "video/mp2ts";
diff --git a/media/libstagefright/MediaExtractor.cpp b/media/libstagefright/MediaExtractor.cpp
index 9ab6611..b18c916 100644
--- a/media/libstagefright/MediaExtractor.cpp
+++ b/media/libstagefright/MediaExtractor.cpp
@@ -21,6 +21,7 @@
#include "include/AMRExtractor.h"
#include "include/MP3Extractor.h"
#include "include/MPEG4Extractor.h"
+#include "include/FragmentedMP4Extractor.h"
#include "include/WAVExtractor.h"
#include "include/OggExtractor.h"
#include "include/MPEG2PSExtractor.h"
@@ -93,7 +94,12 @@
MediaExtractor *ret = NULL;
if (!strcasecmp(mime, MEDIA_MIMETYPE_CONTAINER_MPEG4)
|| !strcasecmp(mime, "audio/mp4")) {
- ret = new MPEG4Extractor(source);
+ int fragmented = 0;
+ if (meta != NULL && meta->findInt32("fragmented", &fragmented) && fragmented) {
+ ret = new FragmentedMP4Extractor(source);
+ } else {
+ ret = new MPEG4Extractor(source);
+ }
} else if (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_MPEG)) {
ret = new MP3Extractor(source, meta);
} else if (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_AMR_NB)
diff --git a/media/libstagefright/MetaData.cpp b/media/libstagefright/MetaData.cpp
index 755594a..a01ec97 100644
--- a/media/libstagefright/MetaData.cpp
+++ b/media/libstagefright/MetaData.cpp
@@ -22,6 +22,8 @@
#include <string.h>
#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/AString.h>
+#include <media/stagefright/foundation/hexdump.h>
#include <media/stagefright/MetaData.h>
namespace android {
@@ -318,6 +320,12 @@
default:
out = String8::format("(unknown type %d, size %d)", mType, mSize);
+ if (mSize <= 48) { // if it's less than three lines of hex data, dump it
+ AString foo;
+ hexdump(data, mSize, 0, &foo);
+ out.append("\n");
+ out.append(foo.c_str());
+ }
break;
}
return out;
diff --git a/media/libstagefright/SurfaceMediaSource.cpp b/media/libstagefright/SurfaceMediaSource.cpp
index c478b28..867f76d 100644
--- a/media/libstagefright/SurfaceMediaSource.cpp
+++ b/media/libstagefright/SurfaceMediaSource.cpp
@@ -167,6 +167,10 @@
return OK;
}
+status_t SurfaceMediaSource::setMaxAcquiredBufferCount(size_t count) {
+ return mBufferQueue->setMaxAcquiredBufferCount(count);
+}
+
status_t SurfaceMediaSource::stop()
{
diff --git a/media/libstagefright/Utils.cpp b/media/libstagefright/Utils.cpp
index 2a16f66..74e9222 100644
--- a/media/libstagefright/Utils.cpp
+++ b/media/libstagefright/Utils.cpp
@@ -241,5 +241,196 @@
return OK;
}
+static size_t reassembleAVCC(const sp<ABuffer> &csd0, const sp<ABuffer> csd1, char *avcc) {
+
+ avcc[0] = 1; // version
+ avcc[1] = 0x64; // profile
+ avcc[2] = 0; // unused (?)
+ avcc[3] = 0xd; // level
+ avcc[4] = 0xff; // reserved+size
+
+ size_t i = 0;
+ int numparams = 0;
+ int lastparamoffset = 0;
+ int avccidx = 6;
+ do {
+ if (i >= csd0->size() - 4 ||
+ memcmp(csd0->data() + i, "\x00\x00\x00\x01", 4) == 0) {
+ if (i >= csd0->size() - 4) {
+ // there can't be another param here, so use all the rest
+ i = csd0->size();
+ }
+ ALOGV("block at %d, last was %d", i, lastparamoffset);
+ if (lastparamoffset > 0) {
+ int size = i - lastparamoffset;
+ avcc[avccidx++] = size >> 8;
+ avcc[avccidx++] = size & 0xff;
+ memcpy(avcc+avccidx, csd0->data() + lastparamoffset, size);
+ avccidx += size;
+ numparams++;
+ }
+ i += 4;
+ lastparamoffset = i;
+ } else {
+ i++;
+ }
+ } while(i < csd0->size());
+ ALOGV("csd0 contains %d params", numparams);
+
+ avcc[5] = 0xe0 | numparams;
+ //and now csd-1
+ i = 0;
+ numparams = 0;
+ lastparamoffset = 0;
+ int numpicparamsoffset = avccidx;
+ avccidx++;
+ do {
+ if (i >= csd1->size() - 4 ||
+ memcmp(csd1->data() + i, "\x00\x00\x00\x01", 4) == 0) {
+ if (i >= csd1->size() - 4) {
+ // there can't be another param here, so use all the rest
+ i = csd1->size();
+ }
+ ALOGV("block at %d, last was %d", i, lastparamoffset);
+ if (lastparamoffset > 0) {
+ int size = i - lastparamoffset;
+ avcc[avccidx++] = size >> 8;
+ avcc[avccidx++] = size & 0xff;
+ memcpy(avcc+avccidx, csd1->data() + lastparamoffset, size);
+ avccidx += size;
+ numparams++;
+ }
+ i += 4;
+ lastparamoffset = i;
+ } else {
+ i++;
+ }
+ } while(i < csd1->size());
+ avcc[numpicparamsoffset] = numparams;
+ return avccidx;
+}
+
+static void reassembleESDS(const sp<ABuffer> &csd0, char *esds) {
+ int csd0size = csd0->size();
+ esds[0] = 3; // kTag_ESDescriptor;
+ int esdescriptorsize = 26 + csd0size;
+ CHECK(esdescriptorsize < 268435456); // 7 bits per byte, so max is 2^28-1
+ esds[1] = 0x80 | (esdescriptorsize >> 21);
+ esds[2] = 0x80 | ((esdescriptorsize >> 14) & 0x7f);
+ esds[3] = 0x80 | ((esdescriptorsize >> 7) & 0x7f);
+ esds[4] = (esdescriptorsize & 0x7f);
+ esds[5] = esds[6] = 0; // es id
+ esds[7] = 0; // flags
+ esds[8] = 4; // kTag_DecoderConfigDescriptor
+ int configdescriptorsize = 18 + csd0size;
+ esds[9] = 0x80 | (configdescriptorsize >> 21);
+ esds[10] = 0x80 | ((configdescriptorsize >> 14) & 0x7f);
+ esds[11] = 0x80 | ((configdescriptorsize >> 7) & 0x7f);
+ esds[12] = (configdescriptorsize & 0x7f);
+ esds[13] = 0x40; // objectTypeIndication
+ esds[14] = 0x15; // not sure what 14-25 mean, they are ignored by ESDS.cpp,
+ esds[15] = 0x00; // but the actual values here were taken from a real file.
+ esds[16] = 0x18;
+ esds[17] = 0x00;
+ esds[18] = 0x00;
+ esds[19] = 0x00;
+ esds[20] = 0xfa;
+ esds[21] = 0x00;
+ esds[22] = 0x00;
+ esds[23] = 0x00;
+ esds[24] = 0xfa;
+ esds[25] = 0x00;
+ esds[26] = 5; // kTag_DecoderSpecificInfo;
+ esds[27] = 0x80 | (csd0size >> 21);
+ esds[28] = 0x80 | ((csd0size >> 14) & 0x7f);
+ esds[29] = 0x80 | ((csd0size >> 7) & 0x7f);
+ esds[30] = (csd0size & 0x7f);
+ memcpy((void*)&esds[31], csd0->data(), csd0size);
+ // data following this is ignored, so don't bother appending it
+
+}
+
+void convertMessageToMetaData(const sp<AMessage> &msg, sp<MetaData> &meta) {
+ AString mime;
+ if (msg->findString("mime", &mime)) {
+ meta->setCString(kKeyMIMEType, mime.c_str());
+ } else {
+ ALOGW("did not find mime type");
+ }
+
+ int64_t durationUs;
+ if (msg->findInt64("durationUs", &durationUs)) {
+ meta->setInt64(kKeyDuration, durationUs);
+ }
+
+ if (mime.startsWith("video/")) {
+ int32_t width;
+ int32_t height;
+ if (msg->findInt32("width", &width) && msg->findInt32("height", &height)) {
+ meta->setInt32(kKeyWidth, width);
+ meta->setInt32(kKeyHeight, height);
+ } else {
+ ALOGW("did not find width and/or height");
+ }
+ } else if (mime.startsWith("audio/")) {
+ int32_t numChannels;
+ if (msg->findInt32("channel-count", &numChannels)) {
+ meta->setInt32(kKeyChannelCount, numChannels);
+ }
+ int32_t sampleRate;
+ if (msg->findInt32("sample-rate", &sampleRate)) {
+ meta->setInt32(kKeySampleRate, sampleRate);
+ }
+ int32_t channelMask;
+ if (msg->findInt32("channel-mask", &channelMask)) {
+ meta->setInt32(kKeyChannelMask, channelMask);
+ }
+ int32_t delay = 0;
+ if (msg->findInt32("encoder-delay", &delay)) {
+ meta->setInt32(kKeyEncoderDelay, delay);
+ }
+ int32_t padding = 0;
+ if (msg->findInt32("encoder-padding", &padding)) {
+ meta->setInt32(kKeyEncoderPadding, padding);
+ }
+
+ int32_t isADTS;
+ if (msg->findInt32("is-adts", &isADTS)) {
+ meta->setInt32(kKeyIsADTS, isADTS);
+ }
+ }
+
+ int32_t maxInputSize;
+ if (msg->findInt32("max-input-size", &maxInputSize)) {
+ meta->setInt32(kKeyMaxInputSize, maxInputSize);
+ }
+
+ // reassemble the csd data into its original form
+ sp<ABuffer> csd0;
+ if (msg->findBuffer("csd-0", &csd0)) {
+ if (mime.startsWith("video/")) { // do we need to be stricter than this?
+ sp<ABuffer> csd1;
+ if (msg->findBuffer("csd-1", &csd1)) {
+ char avcc[1024]; // that oughta be enough, right?
+ size_t outsize = reassembleAVCC(csd0, csd1, avcc);
+ meta->setData(kKeyAVCC, kKeyAVCC, avcc, outsize);
+ }
+ } else if (mime.startsWith("audio/")) {
+ int csd0size = csd0->size();
+ char esds[csd0size + 31];
+ reassembleESDS(csd0, esds);
+ meta->setData(kKeyESDS, kKeyESDS, esds, sizeof(esds));
+ }
+ }
+
+ // XXX TODO add whatever other keys there are
+
+#if 0
+ ALOGI("converted %s to:", msg->debugString(0).c_str());
+ meta->dumpToLog();
+#endif
+}
+
+
} // namespace android
diff --git a/media/libstagefright/WAVExtractor.cpp b/media/libstagefright/WAVExtractor.cpp
index 851321d..a38400b 100644
--- a/media/libstagefright/WAVExtractor.cpp
+++ b/media/libstagefright/WAVExtractor.cpp
@@ -106,7 +106,7 @@
return meta;
}
- meta->setCString(kKeyMIMEType, "audio/x-wav");
+ meta->setCString(kKeyMIMEType, MEDIA_MIMETYPE_CONTAINER_WAV);
return meta;
}
@@ -509,4 +509,3 @@
}
} // namespace android
-
diff --git a/media/libstagefright/avc_utils.cpp b/media/libstagefright/avc_utils.cpp
index 65c1848..a141752 100644
--- a/media/libstagefright/avc_utils.cpp
+++ b/media/libstagefright/avc_utils.cpp
@@ -600,7 +600,7 @@
bitrate = kBitrateV2[bitrate_index - 1];
if (out_num_samples) {
- *out_num_samples = 576;
+ *out_num_samples = (layer == 1 /* L3 */) ? 576 : 1152;
}
}
@@ -612,7 +612,8 @@
*frame_size = 144000 * bitrate / sampling_rate + padding;
} else {
// V2 or V2.5
- *frame_size = 72000 * bitrate / sampling_rate + padding;
+ size_t tmp = (layer == 1 /* L3 */) ? 72000 : 144000;
+ *frame_size = tmp * bitrate / sampling_rate + padding;
}
}
diff --git a/media/libstagefright/include/FragmentedMP4Extractor.h b/media/libstagefright/include/FragmentedMP4Extractor.h
new file mode 100644
index 0000000..763cd3a
--- /dev/null
+++ b/media/libstagefright/include/FragmentedMP4Extractor.h
@@ -0,0 +1,70 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FRAGMENTED_MP4_EXTRACTOR_H_
+
+#define FRAGMENTED_MP4_EXTRACTOR_H_
+
+#include "include/FragmentedMP4Parser.h"
+
+#include <media/stagefright/MediaExtractor.h>
+#include <utils/Vector.h>
+#include <utils/String8.h>
+
+namespace android {
+
+struct AMessage;
+class DataSource;
+class SampleTable;
+class String8;
+
+class FragmentedMP4Extractor : public MediaExtractor {
+public:
+ // Extractor assumes ownership of "source".
+ FragmentedMP4Extractor(const sp<DataSource> &source);
+
+ virtual size_t countTracks();
+ virtual sp<MediaSource> getTrack(size_t index);
+ virtual sp<MetaData> getTrackMetaData(size_t index, uint32_t flags);
+ virtual sp<MetaData> getMetaData();
+ virtual uint32_t flags() const;
+
+protected:
+ virtual ~FragmentedMP4Extractor();
+
+private:
+ sp<ALooper> mLooper;
+ sp<FragmentedMP4Parser> mParser;
+ sp<DataSource> mDataSource;
+ status_t mInitCheck;
+ size_t mAudioTrackIndex;
+ size_t mTrackCount;
+
+ sp<MetaData> mFileMetaData;
+
+ Vector<uint32_t> mPath;
+
+ FragmentedMP4Extractor(const FragmentedMP4Extractor &);
+ FragmentedMP4Extractor &operator=(const FragmentedMP4Extractor &);
+};
+
+bool SniffFragmentedMP4(
+ const sp<DataSource> &source, String8 *mimeType, float *confidence,
+ sp<AMessage> *);
+
+} // namespace android
+
+#endif // MPEG4_EXTRACTOR_H_
diff --git a/media/libmediaplayerservice/nuplayer/mp4/Parser.h b/media/libstagefright/include/FragmentedMP4Parser.h
similarity index 85%
rename from media/libmediaplayerservice/nuplayer/mp4/Parser.h
rename to media/libstagefright/include/FragmentedMP4Parser.h
index 0d8d0f5..0edafb9 100644
--- a/media/libmediaplayerservice/nuplayer/mp4/Parser.h
+++ b/media/libstagefright/include/FragmentedMP4Parser.h
@@ -19,17 +19,19 @@
#define PARSER_H_
#include <media/stagefright/foundation/AHandler.h>
+#include <media/stagefright/DataSource.h>
#include <utils/Vector.h>
namespace android {
struct ABuffer;
-struct Parser : public AHandler {
+struct FragmentedMP4Parser : public AHandler {
struct Source : public RefBase {
Source() {}
virtual ssize_t readAt(off64_t offset, void *data, size_t size) = 0;
+ virtual bool isSeekable() = 0;
protected:
virtual ~Source() {}
@@ -38,18 +40,21 @@
DISALLOW_EVIL_CONSTRUCTORS(Source);
};
- Parser();
+ FragmentedMP4Parser();
void start(const char *filename);
void start(const sp<Source> &source);
+ void start(sp<DataSource> &source);
- sp<AMessage> getFormat(bool audio);
- status_t dequeueAccessUnit(bool audio, sp<ABuffer> *accessUnit);
+ sp<AMessage> getFormat(bool audio, bool synchronous = false);
+ status_t dequeueAccessUnit(bool audio, sp<ABuffer> *accessUnit, bool synchronous = false);
+ status_t seekTo(bool audio, int64_t timeUs);
+ bool isSeekable() const;
virtual void onMessageReceived(const sp<AMessage> &msg);
protected:
- virtual ~Parser();
+ virtual ~FragmentedMP4Parser();
private:
enum {
@@ -58,6 +63,7 @@
kWhatReadMore,
kWhatGetFormat,
kWhatDequeueAccessUnit,
+ kWhatSeekTo,
};
struct TrackFragment;
@@ -67,7 +73,7 @@
struct DispatchEntry {
uint32_t mType;
uint32_t mParentType;
- status_t (Parser::*mHandler)(uint32_t, size_t, uint64_t);
+ status_t (FragmentedMP4Parser::*mHandler)(uint32_t, size_t, uint64_t);
};
struct Container {
@@ -97,6 +103,11 @@
off64_t mOffset;
};
+ struct SidxEntry {
+ size_t mSize;
+ uint32_t mDurationUs;
+ };
+
struct TrackInfo {
enum Flags {
kTrackEnabled = 0x01,
@@ -107,6 +118,7 @@
uint32_t mTrackID;
uint32_t mFlags;
uint32_t mDuration; // This is the duration in terms of movie timescale!
+ uint64_t mSidxDuration; // usec, from sidx box, which can use a different timescale
uint32_t mMediaTimeScale;
@@ -121,6 +133,7 @@
uint32_t mDecodingTime;
+ Vector<SidxEntry> mSidx;
sp<StaticTrackFragment> mStaticFragment;
List<sp<TrackFragment> > mFragments;
};
@@ -151,6 +164,8 @@
sp<Source> mSource;
off_t mBufferPos;
bool mSuspended;
+ bool mDoneWithMoov;
+ off_t mFirstMoofOffset; // used as the starting point for offsets calculated from the sidx box
sp<ABuffer> mBuffer;
Vector<Container> mStack;
KeyedVector<uint32_t, TrackInfo> mTracks; // TrackInfo by trackID
@@ -164,6 +179,7 @@
status_t onProceed();
status_t onDequeueAccessUnit(size_t trackIndex, sp<ABuffer> *accessUnit);
+ status_t onSeekTo(bool wantAudio, int64_t position);
void enter(off64_t offset, uint32_t type, uint64_t size);
@@ -222,6 +238,9 @@
status_t parseMediaData(
uint32_t type, size_t offset, uint64_t size);
+ status_t parseSegmentIndex(
+ uint32_t type, size_t offset, uint64_t size);
+
TrackInfo *editTrack(uint32_t trackID, bool createIfNecessary = false);
ssize_t findTrack(bool wantAudio) const;
@@ -246,7 +265,7 @@
sp<ABuffer> *dst,
size_t offset, uint64_t size, size_t extra = 0) const;
- DISALLOW_EVIL_CONSTRUCTORS(Parser);
+ DISALLOW_EVIL_CONSTRUCTORS(FragmentedMP4Parser);
};
} // namespace android
diff --git a/media/libmediaplayerservice/nuplayer/mp4/Parser.cpp b/media/libstagefright/mp4/FragmentedMP4Parser.cpp
similarity index 74%
rename from media/libmediaplayerservice/nuplayer/mp4/Parser.cpp
rename to media/libstagefright/mp4/FragmentedMP4Parser.cpp
index f664e92..7fe4e63 100644
--- a/media/libmediaplayerservice/nuplayer/mp4/Parser.cpp
+++ b/media/libstagefright/mp4/FragmentedMP4Parser.cpp
@@ -15,13 +15,13 @@
*/
//#define LOG_NDEBUG 0
-#define LOG_TAG "Parser"
+#define LOG_TAG "FragmentedMP4Parser"
#include <utils/Log.h>
-#include "Parser.h"
+#include "include/ESDS.h"
+#include "include/FragmentedMP4Parser.h"
#include "TrackFragment.h"
-#include "ESDS.h"
#include <media/stagefright/foundation/ABuffer.h>
#include <media/stagefright/foundation/ADebug.h>
@@ -31,7 +31,6 @@
#include <media/stagefright/MediaErrors.h>
#include <media/stagefright/Utils.h>
-#include "../NuPlayerStreamListener.h"
namespace android {
@@ -52,7 +51,7 @@
}
// static
-const Parser::DispatchEntry Parser::kDispatchTable[] = {
+const FragmentedMP4Parser::DispatchEntry FragmentedMP4Parser::kDispatchTable[] = {
{ FOURCC('m', 'o', 'o', 'v'), 0, NULL },
{ FOURCC('t', 'r', 'a', 'k'), FOURCC('m', 'o', 'o', 'v'), NULL },
{ FOURCC('u', 'd', 't', 'a'), FOURCC('t', 'r', 'a', 'k'), NULL },
@@ -61,24 +60,24 @@
{ FOURCC('i', 'l', 's', 't'), FOURCC('m', 'e', 't', 'a'), NULL },
{ FOURCC('t', 'k', 'h', 'd'), FOURCC('t', 'r', 'a', 'k'),
- &Parser::parseTrackHeader
+ &FragmentedMP4Parser::parseTrackHeader
},
{ FOURCC('m', 'v', 'e', 'x'), FOURCC('m', 'o', 'o', 'v'), NULL },
{ FOURCC('t', 'r', 'e', 'x'), FOURCC('m', 'v', 'e', 'x'),
- &Parser::parseTrackExtends
+ &FragmentedMP4Parser::parseTrackExtends
},
{ FOURCC('e', 'd', 't', 's'), FOURCC('t', 'r', 'a', 'k'), NULL },
{ FOURCC('m', 'd', 'i', 'a'), FOURCC('t', 'r', 'a', 'k'), NULL },
{ FOURCC('m', 'd', 'h', 'd'), FOURCC('m', 'd', 'i', 'a'),
- &Parser::parseMediaHeader
+ &FragmentedMP4Parser::parseMediaHeader
},
{ FOURCC('h', 'd', 'l', 'r'), FOURCC('m', 'd', 'i', 'a'),
- &Parser::parseMediaHandler
+ &FragmentedMP4Parser::parseMediaHandler
},
{ FOURCC('m', 'i', 'n', 'f'), FOURCC('m', 'd', 'i', 'a'), NULL },
@@ -87,45 +86,47 @@
{ FOURCC('s', 't', 's', 'd'), FOURCC('s', 't', 'b', 'l'), NULL },
{ FOURCC('s', 't', 's', 'z'), FOURCC('s', 't', 'b', 'l'),
- &Parser::parseSampleSizes },
+ &FragmentedMP4Parser::parseSampleSizes },
{ FOURCC('s', 't', 'z', '2'), FOURCC('s', 't', 'b', 'l'),
- &Parser::parseCompactSampleSizes },
+ &FragmentedMP4Parser::parseCompactSampleSizes },
{ FOURCC('s', 't', 's', 'c'), FOURCC('s', 't', 'b', 'l'),
- &Parser::parseSampleToChunk },
+ &FragmentedMP4Parser::parseSampleToChunk },
{ FOURCC('s', 't', 'c', 'o'), FOURCC('s', 't', 'b', 'l'),
- &Parser::parseChunkOffsets },
+ &FragmentedMP4Parser::parseChunkOffsets },
{ FOURCC('c', 'o', '6', '4'), FOURCC('s', 't', 'b', 'l'),
- &Parser::parseChunkOffsets64 },
+ &FragmentedMP4Parser::parseChunkOffsets64 },
{ FOURCC('a', 'v', 'c', 'C'), FOURCC('a', 'v', 'c', '1'),
- &Parser::parseAVCCodecSpecificData },
+ &FragmentedMP4Parser::parseAVCCodecSpecificData },
{ FOURCC('e', 's', 'd', 's'), FOURCC('m', 'p', '4', 'a'),
- &Parser::parseESDSCodecSpecificData },
+ &FragmentedMP4Parser::parseESDSCodecSpecificData },
{ FOURCC('e', 's', 'd', 's'), FOURCC('m', 'p', '4', 'v'),
- &Parser::parseESDSCodecSpecificData },
+ &FragmentedMP4Parser::parseESDSCodecSpecificData },
- { FOURCC('m', 'd', 'a', 't'), 0, &Parser::parseMediaData },
+ { FOURCC('m', 'd', 'a', 't'), 0, &FragmentedMP4Parser::parseMediaData },
{ FOURCC('m', 'o', 'o', 'f'), 0, NULL },
{ FOURCC('t', 'r', 'a', 'f'), FOURCC('m', 'o', 'o', 'f'), NULL },
{ FOURCC('t', 'f', 'h', 'd'), FOURCC('t', 'r', 'a', 'f'),
- &Parser::parseTrackFragmentHeader
+ &FragmentedMP4Parser::parseTrackFragmentHeader
},
{ FOURCC('t', 'r', 'u', 'n'), FOURCC('t', 'r', 'a', 'f'),
- &Parser::parseTrackFragmentRun
+ &FragmentedMP4Parser::parseTrackFragmentRun
},
{ FOURCC('m', 'f', 'r', 'a'), 0, NULL },
+
+ { FOURCC('s', 'i', 'd', 'x'), 0, &FragmentedMP4Parser::parseSegmentIndex },
};
-struct FileSource : public Parser::Source {
+struct FileSource : public FragmentedMP4Parser::Source {
FileSource(const char *filename)
: mFile(fopen(filename, "rb")) {
CHECK(mFile != NULL);
@@ -136,76 +137,241 @@
return fread(data, 1, size, mFile);
}
+ virtual bool isSeekable() {
+ return true;
+ }
+
private:
FILE *mFile;
DISALLOW_EVIL_CONSTRUCTORS(FileSource);
};
-Parser::Parser()
+struct ReadTracker : public RefBase {
+ ReadTracker(off64_t size) {
+ allocSize = 1 + size / 8192; // 1 bit per kilobyte
+ bitmap = (char*) calloc(1, allocSize);
+ }
+ virtual ~ReadTracker() {
+ dumpToLog();
+ free(bitmap);
+ }
+ void mark(off64_t offset, size_t size) {
+ int firstbit = offset / 1024;
+ int lastbit = (offset + size - 1) / 1024;
+ for (int i = firstbit; i <= lastbit; i++) {
+ bitmap[i/8] |= (0x80 >> (i & 7));
+ }
+ }
+
+ private:
+ void dumpToLog() {
+ // 96 chars per line, each char represents one kilobyte, 1 kb per bit
+ int numlines = allocSize / 12;
+ char buf[97];
+ char *cur = bitmap;
+ for (int i = 0; i < numlines; i++ && cur) {
+ for (int j = 0; j < 12; j++) {
+ for (int k = 0; k < 8; k++) {
+ buf[(j * 8) + k] = (*cur & (0x80 >> k)) ? 'X' : '.';
+ }
+ cur++;
+ }
+ buf[96] = '\0';
+ ALOGI("%5dk: %s", i * 96, buf);
+ }
+ }
+
+ size_t allocSize;
+ char *bitmap;
+};
+
+struct DataSourceSource : public FragmentedMP4Parser::Source {
+ DataSourceSource(sp<DataSource> &source)
+ : mDataSource(source) {
+ CHECK(mDataSource != NULL);
+#if 0
+ off64_t size;
+ if (source->getSize(&size) == OK) {
+ mReadTracker = new ReadTracker(size);
+ } else {
+ ALOGE("couldn't get data source size");
+ }
+#endif
+ }
+
+ virtual ssize_t readAt(off64_t offset, void *data, size_t size) {
+ if (mReadTracker != NULL) {
+ mReadTracker->mark(offset, size);
+ }
+ return mDataSource->readAt(offset, data, size);
+ }
+
+ virtual bool isSeekable() {
+ return true;
+ }
+
+ private:
+ sp<DataSource> mDataSource;
+ sp<ReadTracker> mReadTracker;
+
+ DISALLOW_EVIL_CONSTRUCTORS(DataSourceSource);
+};
+
+FragmentedMP4Parser::FragmentedMP4Parser()
: mBufferPos(0),
mSuspended(false),
+ mDoneWithMoov(false),
+ mFirstMoofOffset(0),
mFinalResult(OK) {
}
-Parser::~Parser() {
+FragmentedMP4Parser::~FragmentedMP4Parser() {
}
-void Parser::start(const char *filename) {
+void FragmentedMP4Parser::start(const char *filename) {
sp<AMessage> msg = new AMessage(kWhatStart, id());
msg->setObject("source", new FileSource(filename));
msg->post();
+ ALOGV("Parser::start(%s)", filename);
}
-void Parser::start(const sp<Source> &source) {
+void FragmentedMP4Parser::start(const sp<Source> &source) {
sp<AMessage> msg = new AMessage(kWhatStart, id());
msg->setObject("source", source);
msg->post();
+ ALOGV("Parser::start(Source)");
}
-sp<AMessage> Parser::getFormat(bool audio) {
- sp<AMessage> msg = new AMessage(kWhatGetFormat, id());
- msg->setInt32("audio", audio);
+void FragmentedMP4Parser::start(sp<DataSource> &source) {
+ sp<AMessage> msg = new AMessage(kWhatStart, id());
+ msg->setObject("source", new DataSourceSource(source));
+ msg->post();
+ ALOGV("Parser::start(DataSource)");
+}
+
+sp<AMessage> FragmentedMP4Parser::getFormat(bool audio, bool synchronous) {
+
+ while (true) {
+ bool moovDone = mDoneWithMoov;
+ sp<AMessage> msg = new AMessage(kWhatGetFormat, id());
+ msg->setInt32("audio", audio);
+
+ sp<AMessage> response;
+ status_t err = msg->postAndAwaitResponse(&response);
+
+ if (err != OK) {
+ ALOGV("getFormat post failed: %d", err);
+ return NULL;
+ }
+
+ if (response->findInt32("err", &err) && err != OK) {
+ if (synchronous && err == -EWOULDBLOCK && !moovDone) {
+ resumeIfNecessary();
+ ALOGV("@getFormat parser not ready yet, retrying");
+ usleep(10000);
+ continue;
+ }
+ ALOGV("getFormat failed: %d", err);
+ return NULL;
+ }
+
+ sp<AMessage> format;
+ CHECK(response->findMessage("format", &format));
+
+ ALOGV("returning format %s", format->debugString().c_str());
+ return format;
+ }
+}
+
+status_t FragmentedMP4Parser::seekTo(bool wantAudio, int64_t timeUs) {
+ sp<AMessage> msg = new AMessage(kWhatSeekTo, id());
+ msg->setInt32("audio", wantAudio);
+ msg->setInt64("position", timeUs);
sp<AMessage> response;
status_t err = msg->postAndAwaitResponse(&response);
-
- if (err != OK) {
- return NULL;
- }
-
- if (response->findInt32("err", &err) && err != OK) {
- return NULL;
- }
-
- sp<AMessage> format;
- CHECK(response->findMessage("format", &format));
-
- ALOGV("returning format %s", format->debugString().c_str());
- return format;
+ return err;
}
-status_t Parser::dequeueAccessUnit(bool audio, sp<ABuffer> *accessUnit) {
- sp<AMessage> msg = new AMessage(kWhatDequeueAccessUnit, id());
- msg->setInt32("audio", audio);
-
- sp<AMessage> response;
- status_t err = msg->postAndAwaitResponse(&response);
-
- if (err != OK) {
- return err;
+bool FragmentedMP4Parser::isSeekable() const {
+ while (mFirstMoofOffset == 0 && mFinalResult == OK) {
+ usleep(10000);
}
-
- if (response->findInt32("err", &err) && err != OK) {
- return err;
+ bool seekable = mSource->isSeekable();
+ for (size_t i = 0; seekable && i < mTracks.size(); i++) {
+ const TrackInfo *info = &mTracks.valueAt(i);
+ seekable &= !info->mSidx.empty();
}
-
- CHECK(response->findBuffer("accessUnit", accessUnit));
-
- return OK;
+ return seekable;
}
-ssize_t Parser::findTrack(bool wantAudio) const {
+status_t FragmentedMP4Parser::onSeekTo(bool wantAudio, int64_t position) {
+ status_t err = -EINVAL;
+ ssize_t trackIndex = findTrack(wantAudio);
+ if (trackIndex < 0) {
+ err = trackIndex;
+ } else {
+ TrackInfo *info = &mTracks.editValueAt(trackIndex);
+
+ int numSidxEntries = info->mSidx.size();
+ int64_t totalTime = 0;
+ off_t totalOffset = mFirstMoofOffset;
+ for (int i = 0; i < numSidxEntries; i++) {
+ const SidxEntry *se = &info->mSidx[i];
+ totalTime += se->mDurationUs;
+ if (totalTime > position) {
+ mBuffer->setRange(0,0);
+ mBufferPos = totalOffset;
+ if (mFinalResult == ERROR_END_OF_STREAM) {
+ mFinalResult = OK;
+ mSuspended = true; // force resume
+ resumeIfNecessary();
+ }
+ info->mFragments.clear();
+ info->mDecodingTime = position * info->mMediaTimeScale / 1000000ll;
+ return OK;
+ }
+ totalOffset += se->mSize;
+ }
+ }
+ ALOGV("seekTo out of range");
+ return err;
+}
+
+status_t FragmentedMP4Parser::dequeueAccessUnit(bool audio, sp<ABuffer> *accessUnit,
+ bool synchronous) {
+
+ while (true) {
+ sp<AMessage> msg = new AMessage(kWhatDequeueAccessUnit, id());
+ msg->setInt32("audio", audio);
+
+ sp<AMessage> response;
+ status_t err = msg->postAndAwaitResponse(&response);
+
+ if (err != OK) {
+ ALOGV("dequeue fail 1: %d", err);
+ return err;
+ }
+
+ if (response->findInt32("err", &err) && err != OK) {
+ if (synchronous && err == -EWOULDBLOCK) {
+ resumeIfNecessary();
+ ALOGV("Parser not ready yet, retrying");
+ usleep(10000);
+ continue;
+ }
+ ALOGV("dequeue fail 2: %d, %d", err, synchronous);
+ return err;
+ }
+
+ CHECK(response->findBuffer("accessUnit", accessUnit));
+
+ return OK;
+ }
+}
+
+ssize_t FragmentedMP4Parser::findTrack(bool wantAudio) const {
for (size_t i = 0; i < mTracks.size(); ++i) {
const TrackInfo *info = &mTracks.valueAt(i);
@@ -227,7 +393,7 @@
return -EWOULDBLOCK;
}
-void Parser::onMessageReceived(const sp<AMessage> &msg) {
+void FragmentedMP4Parser::onMessageReceived(const sp<AMessage> &msg) {
switch (msg->what()) {
case kWhatStart:
{
@@ -274,7 +440,7 @@
size_t maxBytesToRead = mBuffer->capacity() - mBuffer->size();
if (maxBytesToRead < needed) {
- ALOGI("resizing buffer.");
+ ALOGV("resizing buffer.");
sp<ABuffer> newBuffer =
new ABuffer((mBuffer->size() + needed + 1023) & ~1023);
@@ -292,7 +458,7 @@
mBuffer->data() + mBuffer->size(), needed);
if (n < (ssize_t)needed) {
- ALOGI("%s", "Reached EOF");
+ ALOGV("Reached EOF when reading %d @ %d + %d", needed, mBufferPos, mBuffer->size());
if (n < 0) {
mFinalResult = n;
} else if (n == 0) {
@@ -323,8 +489,16 @@
} else {
TrackInfo *info = &mTracks.editValueAt(trackIndex);
+ sp<AMessage> format = info->mSampleDescs.itemAt(0).mFormat;
+ if (info->mSidxDuration) {
+ format->setInt64("durationUs", info->mSidxDuration);
+ } else {
+ // this is probably going to be zero. Oh well...
+ format->setInt64("durationUs",
+ 1000000ll * info->mDuration / info->mMediaTimeScale);
+ }
response->setMessage(
- "format", info->mSampleDescs.itemAt(0).mFormat);
+ "format", format);
err = OK;
}
@@ -368,12 +542,36 @@
break;
}
+ case kWhatSeekTo:
+ {
+ ALOGV("kWhatSeekTo");
+ int32_t wantAudio;
+ CHECK(msg->findInt32("audio", &wantAudio));
+ int64_t position;
+ CHECK(msg->findInt64("position", &position));
+
+ status_t err = -EWOULDBLOCK;
+ sp<AMessage> response = new AMessage;
+
+ ssize_t trackIndex = findTrack(wantAudio);
+
+ if (trackIndex < 0) {
+ err = trackIndex;
+ } else {
+ err = onSeekTo(wantAudio, position);
+ }
+ response->setInt32("err", err);
+ uint32_t replyID;
+ CHECK(msg->senderAwaitsResponse(&replyID));
+ response->postReply(replyID);
+ break;
+ }
default:
TRESPASS();
}
}
-status_t Parser::onProceed() {
+status_t FragmentedMP4Parser::onProceed() {
status_t err;
if ((err = need(8)) != OK) {
@@ -431,6 +629,12 @@
if ((i < kNumDispatchers && kDispatchTable[i].mHandler == 0)
|| isSampleEntryBox || ptype == FOURCC('i', 'l', 's', 't')) {
// This is a container box.
+ if (type == FOURCC('m', 'o', 'o', 'f')) {
+ if (mFirstMoofOffset == 0) {
+ ALOGV("first moof @ %08x", mBufferPos + offset);
+ mFirstMoofOffset = mBufferPos + offset - 8; // point at the size
+ }
+ }
if (type == FOURCC('m', 'e', 't', 'a')) {
if ((err = need(offset + 4)) < OK) {
return err;
@@ -565,7 +769,7 @@
}
// static
-int Parser::CompareSampleLocation(
+int FragmentedMP4Parser::CompareSampleLocation(
const SampleInfo &sample, const MediaDataInfo &mdatInfo) {
if (sample.mOffset + sample.mSize < mdatInfo.mOffset) {
return -1;
@@ -586,18 +790,18 @@
return 0;
}
-void Parser::resumeIfNecessary() {
+void FragmentedMP4Parser::resumeIfNecessary() {
if (!mSuspended) {
return;
}
- ALOGI("resuming.");
+ ALOGV("resuming.");
mSuspended = false;
(new AMessage(kWhatProceed, id()))->post();
}
-status_t Parser::getSample(
+status_t FragmentedMP4Parser::getSample(
TrackInfo *info, sp<TrackFragment> *fragment, SampleInfo *sampleInfo) {
for (;;) {
if (info->mFragments.empty()) {
@@ -625,7 +829,7 @@
}
}
-status_t Parser::onDequeueAccessUnit(
+status_t FragmentedMP4Parser::onDequeueAccessUnit(
size_t trackIndex, sp<ABuffer> *accessUnit) {
TrackInfo *info = &mTracks.editValueAt(trackIndex);
@@ -649,7 +853,7 @@
int cmp = CompareSampleLocation(sampleInfo, mdatInfo);
- if (cmp < 0) {
+ if (cmp < 0 && !mSource->isSeekable()) {
return -EPIPE;
} else if (cmp == 0) {
if (i > 0) {
@@ -671,6 +875,8 @@
size_t numDroppable = 0;
bool done = false;
+ // XXX FIXME: if one of the tracks is not advanced (e.g. if you play an audio+video
+ // file with sf2), then mMediaData will not be pruned and keeps growing
for (size_t i = 0; !done && i < mMediaData.size(); ++i) {
const MediaDataInfo &mdatInfo = mMediaData.itemAt(i);
@@ -730,7 +936,7 @@
return 0;
}
-status_t Parser::makeAccessUnit(
+status_t FragmentedMP4Parser::makeAccessUnit(
TrackInfo *info,
const SampleInfo &sample,
const MediaDataInfo &mdatInfo,
@@ -801,7 +1007,7 @@
return OK;
}
-status_t Parser::need(size_t size) {
+status_t FragmentedMP4Parser::need(size_t size) {
if (!fitsContainer(size)) {
return -EINVAL;
}
@@ -819,7 +1025,7 @@
return -EAGAIN;
}
-void Parser::enter(off64_t offset, uint32_t type, uint64_t size) {
+void FragmentedMP4Parser::enter(off64_t offset, uint32_t type, uint64_t size) {
Container container;
container.mOffset = offset;
container.mType = type;
@@ -829,32 +1035,32 @@
mStack.push(container);
}
-bool Parser::fitsContainer(uint64_t size) const {
+bool FragmentedMP4Parser::fitsContainer(uint64_t size) const {
CHECK(!mStack.isEmpty());
const Container &container = mStack.itemAt(mStack.size() - 1);
return container.mExtendsToEOF || size <= container.mBytesRemaining;
}
-uint16_t Parser::readU16(size_t offset) {
+uint16_t FragmentedMP4Parser::readU16(size_t offset) {
CHECK_LE(offset + 2, mBuffer->size());
const uint8_t *ptr = mBuffer->data() + offset;
return (ptr[0] << 8) | ptr[1];
}
-uint32_t Parser::readU32(size_t offset) {
+uint32_t FragmentedMP4Parser::readU32(size_t offset) {
CHECK_LE(offset + 4, mBuffer->size());
const uint8_t *ptr = mBuffer->data() + offset;
return (ptr[0] << 24) | (ptr[1] << 16) | (ptr[2] << 8) | ptr[3];
}
-uint64_t Parser::readU64(size_t offset) {
+uint64_t FragmentedMP4Parser::readU64(size_t offset) {
return (((uint64_t)readU32(offset)) << 32) | readU32(offset + 4);
}
-void Parser::skip(off_t distance) {
+void FragmentedMP4Parser::skip(off_t distance) {
CHECK(!mStack.isEmpty());
for (size_t i = mStack.size(); i-- > 0;) {
Container *container = &mStack.editItemAt(i);
@@ -898,6 +1104,8 @@
static_cast<DynamicTrackFragment *>(
fragment.get())->signalCompletion();
+ } else if (container->mType == FOURCC('m', 'o', 'o', 'v')) {
+ mDoneWithMoov = true;
}
container = NULL;
@@ -916,7 +1124,7 @@
mBufferPos += distance;
}
-status_t Parser::parseTrackHeader(
+status_t FragmentedMP4Parser::parseTrackHeader(
uint32_t type, size_t offset, uint64_t size) {
if (offset + 4 > size) {
return -EINVAL;
@@ -955,6 +1163,10 @@
TrackInfo *info = editTrack(trackID, true /* createIfNecessary */);
info->mFlags = flags;
info->mDuration = duration;
+ if (info->mDuration == 0xffffffff) {
+ // ffmpeg sets this to -1, which is incorrect.
+ info->mDuration = 0;
+ }
info->mStaticFragment = new StaticTrackFragment;
@@ -963,7 +1175,7 @@
return OK;
}
-status_t Parser::parseMediaHeader(
+status_t FragmentedMP4Parser::parseMediaHeader(
uint32_t type, size_t offset, uint64_t size) {
if (offset + 4 > size) {
return -EINVAL;
@@ -996,7 +1208,7 @@
return OK;
}
-status_t Parser::parseMediaHandler(
+status_t FragmentedMP4Parser::parseMediaHandler(
uint32_t type, size_t offset, uint64_t size) {
if (offset + 12 > size) {
return -EINVAL;
@@ -1024,7 +1236,7 @@
return OK;
}
-status_t Parser::parseVisualSampleEntry(
+status_t FragmentedMP4Parser::parseVisualSampleEntry(
uint32_t type, size_t offset, uint64_t size) {
if (offset + 78 > size) {
return -EINVAL;
@@ -1067,7 +1279,7 @@
return OK;
}
-status_t Parser::parseAudioSampleEntry(
+status_t FragmentedMP4Parser::parseAudioSampleEntry(
uint32_t type, size_t offset, uint64_t size) {
if (offset + 28 > size) {
return -EINVAL;
@@ -1133,37 +1345,37 @@
format->setBuffer(StringPrintf("csd-%d", index).c_str(), csd);
}
-status_t Parser::parseSampleSizes(
+status_t FragmentedMP4Parser::parseSampleSizes(
uint32_t type, size_t offset, uint64_t size) {
return editTrack(mCurrentTrackID)->mStaticFragment->parseSampleSizes(
this, type, offset, size);
}
-status_t Parser::parseCompactSampleSizes(
+status_t FragmentedMP4Parser::parseCompactSampleSizes(
uint32_t type, size_t offset, uint64_t size) {
return editTrack(mCurrentTrackID)->mStaticFragment->parseCompactSampleSizes(
this, type, offset, size);
}
-status_t Parser::parseSampleToChunk(
+status_t FragmentedMP4Parser::parseSampleToChunk(
uint32_t type, size_t offset, uint64_t size) {
return editTrack(mCurrentTrackID)->mStaticFragment->parseSampleToChunk(
this, type, offset, size);
}
-status_t Parser::parseChunkOffsets(
+status_t FragmentedMP4Parser::parseChunkOffsets(
uint32_t type, size_t offset, uint64_t size) {
return editTrack(mCurrentTrackID)->mStaticFragment->parseChunkOffsets(
this, type, offset, size);
}
-status_t Parser::parseChunkOffsets64(
+status_t FragmentedMP4Parser::parseChunkOffsets64(
uint32_t type, size_t offset, uint64_t size) {
return editTrack(mCurrentTrackID)->mStaticFragment->parseChunkOffsets64(
this, type, offset, size);
}
-status_t Parser::parseAVCCodecSpecificData(
+status_t FragmentedMP4Parser::parseAVCCodecSpecificData(
uint32_t type, size_t offset, uint64_t size) {
TrackInfo *trackInfo = editTrack(mCurrentTrackID);
@@ -1246,7 +1458,7 @@
return OK;
}
-status_t Parser::parseESDSCodecSpecificData(
+status_t FragmentedMP4Parser::parseESDSCodecSpecificData(
uint32_t type, size_t offset, uint64_t size) {
TrackInfo *trackInfo = editTrack(mCurrentTrackID);
@@ -1351,7 +1563,7 @@
return OK;
}
-status_t Parser::parseMediaData(
+status_t FragmentedMP4Parser::parseMediaData(
uint32_t type, size_t offset, uint64_t size) {
ALOGV("skipping 'mdat' chunk at offsets 0x%08lx-0x%08llx.",
mBufferPos + offset, mBufferPos + size);
@@ -1365,14 +1577,101 @@
info->mOffset = mBufferPos + offset;
if (mMediaData.size() > 10) {
- ALOGI("suspending for now.");
+ ALOGV("suspending for now.");
mSuspended = true;
}
return OK;
}
-status_t Parser::parseTrackExtends(
+status_t FragmentedMP4Parser::parseSegmentIndex(
+ uint32_t type, size_t offset, uint64_t size) {
+ ALOGV("sidx box type %d, offset %d, size %d", type, int(offset), int(size));
+// AString sidxstr;
+// hexdump(mBuffer->data() + offset, size, 0 /* indent */, &sidxstr);
+// ALOGV("raw sidx:");
+// ALOGV("%s", sidxstr.c_str());
+ if (offset + 12 > size) {
+ return -EINVAL;
+ }
+
+ uint32_t flags = readU32(offset);
+
+ uint32_t version = flags >> 24;
+ flags &= 0xffffff;
+
+ ALOGV("sidx version %d", version);
+
+ uint32_t referenceId = readU32(offset + 4);
+ uint32_t timeScale = readU32(offset + 8);
+ ALOGV("sidx refid/timescale: %d/%d", referenceId, timeScale);
+
+ uint64_t earliestPresentationTime;
+ uint64_t firstOffset;
+
+ offset += 12;
+
+ if (version == 0) {
+ if (offset + 8 > size) {
+ return -EINVAL;
+ }
+ earliestPresentationTime = readU32(offset);
+ firstOffset = readU32(offset + 4);
+ offset += 8;
+ } else {
+ if (offset + 16 > size) {
+ return -EINVAL;
+ }
+ earliestPresentationTime = readU64(offset);
+ firstOffset = readU64(offset + 8);
+ offset += 16;
+ }
+ ALOGV("sidx pres/off: %Ld/%Ld", earliestPresentationTime, firstOffset);
+
+ if (offset + 4 > size) {
+ return -EINVAL;
+ }
+ if (readU16(offset) != 0) { // reserved
+ return -EINVAL;
+ }
+ int32_t referenceCount = readU16(offset + 2);
+ offset += 4;
+ ALOGV("refcount: %d", referenceCount);
+
+ if (offset + referenceCount * 12 > size) {
+ return -EINVAL;
+ }
+
+ TrackInfo *info = editTrack(mCurrentTrackID);
+ uint64_t total_duration = 0;
+ for (int i = 0; i < referenceCount; i++) {
+ uint32_t d1 = readU32(offset);
+ uint32_t d2 = readU32(offset + 4);
+ uint32_t d3 = readU32(offset + 8);
+
+ if (d1 & 0x80000000) {
+ ALOGW("sub-sidx boxes not supported yet");
+ }
+ bool sap = d3 & 0x80000000;
+ bool saptype = d3 >> 28;
+ if (!sap || saptype > 2) {
+ ALOGW("not a stream access point, or unsupported type");
+ }
+ total_duration += d2;
+ offset += 12;
+ ALOGV(" item %d, %08x %08x %08x", i, d1, d2, d3);
+ SidxEntry se;
+ se.mSize = d1 & 0x7fffffff;
+ se.mDurationUs = 1000000LL * d2 / timeScale;
+ info->mSidx.add(se);
+ }
+
+ info->mSidxDuration = total_duration * 1000000 / timeScale;
+ ALOGV("duration: %lld", info->mSidxDuration);
+ return OK;
+}
+
+status_t FragmentedMP4Parser::parseTrackExtends(
uint32_t type, size_t offset, uint64_t size) {
if (offset + 24 > size) {
return -EINVAL;
@@ -1393,7 +1692,7 @@
return OK;
}
-Parser::TrackInfo *Parser::editTrack(
+FragmentedMP4Parser::TrackInfo *FragmentedMP4Parser::editTrack(
uint32_t trackID, bool createIfNecessary) {
ssize_t i = mTracks.indexOfKey(trackID);
@@ -1409,6 +1708,7 @@
info.mTrackID = trackID;
info.mFlags = 0;
info.mDuration = 0xffffffff;
+ info.mSidxDuration = 0;
info.mMediaTimeScale = 0;
info.mMediaHandlerType = 0;
info.mDefaultSampleDescriptionIndex = 0;
@@ -1422,7 +1722,7 @@
return &mTracks.editValueAt(mTracks.indexOfKey(trackID));
}
-status_t Parser::parseTrackFragmentHeader(
+status_t FragmentedMP4Parser::parseTrackFragmentHeader(
uint32_t type, size_t offset, uint64_t size) {
if (offset + 8 > size) {
return -EINVAL;
@@ -1512,7 +1812,7 @@
return OK;
}
-status_t Parser::parseTrackFragmentRun(
+status_t FragmentedMP4Parser::parseTrackFragmentRun(
uint32_t type, size_t offset, uint64_t size) {
if (offset + 8 > size) {
return -EINVAL;
@@ -1670,7 +1970,7 @@
return OK;
}
-void Parser::copyBuffer(
+void FragmentedMP4Parser::copyBuffer(
sp<ABuffer> *dst, size_t offset, uint64_t size, size_t extra) const {
sp<ABuffer> buf = new ABuffer(size + extra);
memcpy(buf->data(), mBuffer->data() + offset, size);
diff --git a/media/libmediaplayerservice/nuplayer/mp4/TrackFragment.cpp b/media/libstagefright/mp4/TrackFragment.cpp
similarity index 82%
rename from media/libmediaplayerservice/nuplayer/mp4/TrackFragment.cpp
rename to media/libstagefright/mp4/TrackFragment.cpp
index a4c31ea..3699038 100644
--- a/media/libmediaplayerservice/nuplayer/mp4/TrackFragment.cpp
+++ b/media/libstagefright/mp4/TrackFragment.cpp
@@ -28,15 +28,15 @@
namespace android {
-Parser::DynamicTrackFragment::DynamicTrackFragment()
+FragmentedMP4Parser::DynamicTrackFragment::DynamicTrackFragment()
: mComplete(false),
mSampleIndex(0) {
}
-Parser::DynamicTrackFragment::~DynamicTrackFragment() {
+FragmentedMP4Parser::DynamicTrackFragment::~DynamicTrackFragment() {
}
-status_t Parser::DynamicTrackFragment::getSample(SampleInfo *info) {
+status_t FragmentedMP4Parser::DynamicTrackFragment::getSample(SampleInfo *info) {
if (mSampleIndex >= mSamples.size()) {
return mComplete ? ERROR_END_OF_STREAM : -EWOULDBLOCK;
}
@@ -46,11 +46,11 @@
return OK;
}
-void Parser::DynamicTrackFragment::advance() {
+void FragmentedMP4Parser::DynamicTrackFragment::advance() {
++mSampleIndex;
}
-void Parser::DynamicTrackFragment::addSample(
+void FragmentedMP4Parser::DynamicTrackFragment::addSample(
off64_t dataOffset, size_t sampleSize,
uint32_t presentationTime,
size_t sampleDescIndex,
@@ -65,19 +65,19 @@
sampleInfo->mFlags = flags;
}
-status_t Parser::DynamicTrackFragment::signalCompletion() {
+status_t FragmentedMP4Parser::DynamicTrackFragment::signalCompletion() {
mComplete = true;
return OK;
}
-bool Parser::DynamicTrackFragment::complete() const {
+bool FragmentedMP4Parser::DynamicTrackFragment::complete() const {
return mComplete;
}
////////////////////////////////////////////////////////////////////////////////
-Parser::StaticTrackFragment::StaticTrackFragment()
+FragmentedMP4Parser::StaticTrackFragment::StaticTrackFragment()
: mSampleIndex(0),
mSampleCount(0),
mChunkIndex(0),
@@ -87,10 +87,10 @@
mNextSampleOffset(0) {
}
-Parser::StaticTrackFragment::~StaticTrackFragment() {
+FragmentedMP4Parser::StaticTrackFragment::~StaticTrackFragment() {
}
-status_t Parser::StaticTrackFragment::getSample(SampleInfo *info) {
+status_t FragmentedMP4Parser::StaticTrackFragment::getSample(SampleInfo *info) {
if (mSampleIndex >= mSampleCount) {
return ERROR_END_OF_STREAM;
}
@@ -104,7 +104,7 @@
return OK;
}
-void Parser::StaticTrackFragment::updateSampleInfo() {
+void FragmentedMP4Parser::StaticTrackFragment::updateSampleInfo() {
if (mSampleIndex >= mSampleCount) {
return;
}
@@ -185,7 +185,7 @@
mSampleInfo.mFlags = 0;
}
-void Parser::StaticTrackFragment::advance() {
+void FragmentedMP4Parser::StaticTrackFragment::advance() {
mNextSampleOffset += mSampleInfo.mSize;
++mSampleIndex;
@@ -223,7 +223,7 @@
ptr[3] = x & 0xff;
}
-status_t Parser::StaticTrackFragment::signalCompletion() {
+status_t FragmentedMP4Parser::StaticTrackFragment::signalCompletion() {
mSampleToChunkIndex = 0;
mSampleToChunkRemaining =
@@ -236,12 +236,12 @@
return OK;
}
-bool Parser::StaticTrackFragment::complete() const {
+bool FragmentedMP4Parser::StaticTrackFragment::complete() const {
return true;
}
-status_t Parser::StaticTrackFragment::parseSampleSizes(
- Parser *parser, uint32_t type, size_t offset, uint64_t size) {
+status_t FragmentedMP4Parser::StaticTrackFragment::parseSampleSizes(
+ FragmentedMP4Parser *parser, uint32_t type, size_t offset, uint64_t size) {
if (offset + 12 > size) {
return ERROR_MALFORMED;
}
@@ -264,8 +264,8 @@
return OK;
}
-status_t Parser::StaticTrackFragment::parseCompactSampleSizes(
- Parser *parser, uint32_t type, size_t offset, uint64_t size) {
+status_t FragmentedMP4Parser::StaticTrackFragment::parseCompactSampleSizes(
+ FragmentedMP4Parser *parser, uint32_t type, size_t offset, uint64_t size) {
if (offset + 12 > size) {
return ERROR_MALFORMED;
}
@@ -293,8 +293,8 @@
return OK;
}
-status_t Parser::StaticTrackFragment::parseSampleToChunk(
- Parser *parser, uint32_t type, size_t offset, uint64_t size) {
+status_t FragmentedMP4Parser::StaticTrackFragment::parseSampleToChunk(
+ FragmentedMP4Parser *parser, uint32_t type, size_t offset, uint64_t size) {
if (offset + 8 > size) {
return ERROR_MALFORMED;
}
@@ -318,8 +318,8 @@
return OK;
}
-status_t Parser::StaticTrackFragment::parseChunkOffsets(
- Parser *parser, uint32_t type, size_t offset, uint64_t size) {
+status_t FragmentedMP4Parser::StaticTrackFragment::parseChunkOffsets(
+ FragmentedMP4Parser *parser, uint32_t type, size_t offset, uint64_t size) {
if (offset + 8 > size) {
return ERROR_MALFORMED;
}
@@ -339,8 +339,8 @@
return OK;
}
-status_t Parser::StaticTrackFragment::parseChunkOffsets64(
- Parser *parser, uint32_t type, size_t offset, uint64_t size) {
+status_t FragmentedMP4Parser::StaticTrackFragment::parseChunkOffsets64(
+ FragmentedMP4Parser *parser, uint32_t type, size_t offset, uint64_t size) {
if (offset + 8 > size) {
return ERROR_MALFORMED;
}
diff --git a/media/libmediaplayerservice/nuplayer/mp4/TrackFragment.h b/media/libstagefright/mp4/TrackFragment.h
similarity index 77%
rename from media/libmediaplayerservice/nuplayer/mp4/TrackFragment.h
rename to media/libstagefright/mp4/TrackFragment.h
index 1498aad..e1ad46e 100644
--- a/media/libmediaplayerservice/nuplayer/mp4/TrackFragment.h
+++ b/media/libstagefright/mp4/TrackFragment.h
@@ -18,11 +18,11 @@
#define TRACK_FRAGMENT_H_
-#include "Parser.h"
+#include "include/FragmentedMP4Parser.h"
namespace android {
-struct Parser::TrackFragment : public RefBase {
+struct FragmentedMP4Parser::TrackFragment : public RefBase {
TrackFragment() {}
virtual status_t getSample(SampleInfo *info) = 0;
@@ -38,7 +38,7 @@
DISALLOW_EVIL_CONSTRUCTORS(TrackFragment);
};
-struct Parser::DynamicTrackFragment : public Parser::TrackFragment {
+struct FragmentedMP4Parser::DynamicTrackFragment : public FragmentedMP4Parser::TrackFragment {
DynamicTrackFragment();
virtual status_t getSample(SampleInfo *info);
@@ -66,7 +66,7 @@
DISALLOW_EVIL_CONSTRUCTORS(DynamicTrackFragment);
};
-struct Parser::StaticTrackFragment : public Parser::TrackFragment {
+struct FragmentedMP4Parser::StaticTrackFragment : public FragmentedMP4Parser::TrackFragment {
StaticTrackFragment();
virtual status_t getSample(SampleInfo *info);
@@ -76,19 +76,19 @@
virtual bool complete() const;
status_t parseSampleSizes(
- Parser *parser, uint32_t type, size_t offset, uint64_t size);
+ FragmentedMP4Parser *parser, uint32_t type, size_t offset, uint64_t size);
status_t parseCompactSampleSizes(
- Parser *parser, uint32_t type, size_t offset, uint64_t size);
+ FragmentedMP4Parser *parser, uint32_t type, size_t offset, uint64_t size);
status_t parseSampleToChunk(
- Parser *parser, uint32_t type, size_t offset, uint64_t size);
+ FragmentedMP4Parser *parser, uint32_t type, size_t offset, uint64_t size);
status_t parseChunkOffsets(
- Parser *parser, uint32_t type, size_t offset, uint64_t size);
+ FragmentedMP4Parser *parser, uint32_t type, size_t offset, uint64_t size);
status_t parseChunkOffsets64(
- Parser *parser, uint32_t type, size_t offset, uint64_t size);
+ FragmentedMP4Parser *parser, uint32_t type, size_t offset, uint64_t size);
protected:
virtual ~StaticTrackFragment();
diff --git a/media/libstagefright/mpeg2ts/ATSParser.cpp b/media/libstagefright/mpeg2ts/ATSParser.cpp
index 5f3e300..d988356 100644
--- a/media/libstagefright/mpeg2ts/ATSParser.cpp
+++ b/media/libstagefright/mpeg2ts/ATSParser.cpp
@@ -51,7 +51,8 @@
unsigned pid, ABitReader *br, status_t *err);
bool parsePID(
- unsigned pid, unsigned payload_unit_start_indicator,
+ unsigned pid, unsigned continuity_counter,
+ unsigned payload_unit_start_indicator,
ABitReader *br, status_t *err);
void signalDiscontinuity(
@@ -77,6 +78,10 @@
return mProgramMapPID;
}
+ uint32_t parserFlags() const {
+ return mParser->mFlags;
+ }
+
private:
ATSParser *mParser;
unsigned mProgramNumber;
@@ -91,13 +96,17 @@
};
struct ATSParser::Stream : public RefBase {
- Stream(Program *program, unsigned elementaryPID, unsigned streamType);
+ Stream(Program *program,
+ unsigned elementaryPID,
+ unsigned streamType,
+ unsigned PCR_PID);
unsigned type() const { return mStreamType; }
unsigned pid() const { return mElementaryPID; }
void setPID(unsigned pid) { mElementaryPID = pid; }
status_t parse(
+ unsigned continuity_counter,
unsigned payload_unit_start_indicator,
ABitReader *br);
@@ -115,6 +124,8 @@
Program *mProgram;
unsigned mElementaryPID;
unsigned mStreamType;
+ unsigned mPCR_PID;
+ int32_t mExpectedContinuityCounter;
sp<ABuffer> mBuffer;
sp<AnotherPacketSource> mSource;
@@ -184,7 +195,8 @@
}
bool ATSParser::Program::parsePID(
- unsigned pid, unsigned payload_unit_start_indicator,
+ unsigned pid, unsigned continuity_counter,
+ unsigned payload_unit_start_indicator,
ABitReader *br, status_t *err) {
*err = OK;
@@ -194,7 +206,7 @@
}
*err = mStreams.editValueAt(index)->parse(
- payload_unit_start_indicator, br);
+ continuity_counter, payload_unit_start_indicator, br);
return true;
}
@@ -241,7 +253,10 @@
MY_LOGV(" section_number = %u", br->getBits(8));
MY_LOGV(" last_section_number = %u", br->getBits(8));
MY_LOGV(" reserved = %u", br->getBits(3));
- MY_LOGV(" PCR_PID = 0x%04x", br->getBits(13));
+
+ unsigned PCR_PID = br->getBits(13);
+ ALOGV(" PCR_PID = 0x%04x", PCR_PID);
+
MY_LOGV(" reserved = %u", br->getBits(4));
unsigned program_info_length = br->getBits(12);
@@ -382,7 +397,9 @@
ssize_t index = mStreams.indexOfKey(info.mPID);
if (index < 0) {
- sp<Stream> stream = new Stream(this, info.mPID, info.mType);
+ sp<Stream> stream = new Stream(
+ this, info.mPID, info.mType, PCR_PID);
+
mStreams.add(info.mPID, stream);
}
}
@@ -419,21 +436,35 @@
}
}
- return (PTS * 100) / 9;
+ int64_t timeUs = (PTS * 100) / 9;
+
+ if (mParser->mAbsoluteTimeAnchorUs >= 0ll) {
+ timeUs += mParser->mAbsoluteTimeAnchorUs;
+ }
+
+ return timeUs;
}
////////////////////////////////////////////////////////////////////////////////
ATSParser::Stream::Stream(
- Program *program, unsigned elementaryPID, unsigned streamType)
+ Program *program,
+ unsigned elementaryPID,
+ unsigned streamType,
+ unsigned PCR_PID)
: mProgram(program),
mElementaryPID(elementaryPID),
mStreamType(streamType),
+ mPCR_PID(PCR_PID),
+ mExpectedContinuityCounter(-1),
mPayloadStarted(false),
mQueue(NULL) {
switch (mStreamType) {
case STREAMTYPE_H264:
- mQueue = new ElementaryStreamQueue(ElementaryStreamQueue::H264);
+ mQueue = new ElementaryStreamQueue(
+ ElementaryStreamQueue::H264,
+ (mProgram->parserFlags() & ALIGNED_VIDEO_DATA)
+ ? ElementaryStreamQueue::kFlag_AlignedData : 0);
break;
case STREAMTYPE_MPEG2_AUDIO_ADTS:
mQueue = new ElementaryStreamQueue(ElementaryStreamQueue::AAC);
@@ -473,11 +504,25 @@
}
status_t ATSParser::Stream::parse(
+ unsigned continuity_counter,
unsigned payload_unit_start_indicator, ABitReader *br) {
if (mQueue == NULL) {
return OK;
}
+ if (mExpectedContinuityCounter >= 0
+ && (unsigned)mExpectedContinuityCounter != continuity_counter) {
+ ALOGI("discontinuity on stream pid 0x%04x", mElementaryPID);
+
+ mPayloadStarted = false;
+ mBuffer->setRange(0, 0);
+ mExpectedContinuityCounter = -1;
+
+ return OK;
+ }
+
+ mExpectedContinuityCounter = (continuity_counter + 1) & 0x0f;
+
if (payload_unit_start_indicator) {
if (mPayloadStarted) {
// Otherwise we run the danger of receiving the trailing bytes
@@ -664,8 +709,7 @@
PTS |= br->getBits(15);
CHECK_EQ(br->getBits(1), 1u);
- ALOGV("PTS = %llu", PTS);
- // ALOGI("PTS = %.2f secs", PTS / 90000.0f);
+ ALOGV("PTS = 0x%016llx (%.2f)", PTS, PTS / 90000.0);
optional_bytes_remaining -= 5;
@@ -847,7 +891,10 @@
////////////////////////////////////////////////////////////////////////////////
ATSParser::ATSParser(uint32_t flags)
- : mFlags(flags) {
+ : mFlags(flags),
+ mAbsoluteTimeAnchorUs(-1ll),
+ mNumTSPacketsParsed(0),
+ mNumPCRs(0) {
mPSISections.add(0 /* PID */, new PSISection);
}
@@ -863,6 +910,15 @@
void ATSParser::signalDiscontinuity(
DiscontinuityType type, const sp<AMessage> &extra) {
+ if (type == DISCONTINUITY_ABSOLUTE_TIME) {
+ int64_t timeUs;
+ CHECK(extra->findInt64("timeUs", &timeUs));
+
+ CHECK(mPrograms.empty());
+ mAbsoluteTimeAnchorUs = timeUs;
+ return;
+ }
+
for (size_t i = 0; i < mPrograms.size(); ++i) {
mPrograms.editItemAt(i)->signalDiscontinuity(type, extra);
}
@@ -942,6 +998,7 @@
status_t ATSParser::parsePID(
ABitReader *br, unsigned PID,
+ unsigned continuity_counter,
unsigned payload_unit_start_indicator) {
ssize_t sectionIndex = mPSISections.indexOfKey(PID);
@@ -1002,7 +1059,8 @@
for (size_t i = 0; i < mPrograms.size(); ++i) {
status_t err;
if (mPrograms.editItemAt(i)->parsePID(
- PID, payload_unit_start_indicator, br, &err)) {
+ PID, continuity_counter, payload_unit_start_indicator,
+ br, &err)) {
if (err != OK) {
return err;
}
@@ -1019,10 +1077,55 @@
return OK;
}
-void ATSParser::parseAdaptationField(ABitReader *br) {
+void ATSParser::parseAdaptationField(ABitReader *br, unsigned PID) {
unsigned adaptation_field_length = br->getBits(8);
+
if (adaptation_field_length > 0) {
- br->skipBits(adaptation_field_length * 8); // XXX
+ unsigned discontinuity_indicator = br->getBits(1);
+
+ if (discontinuity_indicator) {
+ ALOGV("PID 0x%04x: discontinuity_indicator = 1 (!!!)", PID);
+ }
+
+ br->skipBits(2);
+ unsigned PCR_flag = br->getBits(1);
+
+ size_t numBitsRead = 4;
+
+ if (PCR_flag) {
+ br->skipBits(4);
+ uint64_t PCR_base = br->getBits(32);
+ PCR_base = (PCR_base << 1) | br->getBits(1);
+
+ br->skipBits(6);
+ unsigned PCR_ext = br->getBits(9);
+
+ // The number of bytes from the start of the current
+ // MPEG2 transport stream packet up and including
+ // the final byte of this PCR_ext field.
+ size_t byteOffsetFromStartOfTSPacket =
+ (188 - br->numBitsLeft() / 8);
+
+ uint64_t PCR = PCR_base * 300 + PCR_ext;
+
+ ALOGV("PID 0x%04x: PCR = 0x%016llx (%.2f)",
+ PID, PCR, PCR / 27E6);
+
+ // The number of bytes received by this parser up to and
+ // including the final byte of this PCR_ext field.
+ size_t byteOffsetFromStart =
+ mNumTSPacketsParsed * 188 + byteOffsetFromStartOfTSPacket;
+
+ for (size_t i = 0; i < mPrograms.size(); ++i) {
+ updatePCR(PID, PCR, byteOffsetFromStart);
+ }
+
+ numBitsRead += 52;
+ }
+
+ CHECK_GE(adaptation_field_length * 8, numBitsRead);
+
+ br->skipBits(adaptation_field_length * 8 - numBitsRead);
}
}
@@ -1048,19 +1151,24 @@
ALOGV("adaptation_field_control = %u", adaptation_field_control);
unsigned continuity_counter = br->getBits(4);
- ALOGV("continuity_counter = %u", continuity_counter);
+ ALOGV("PID = 0x%04x, continuity_counter = %u", PID, continuity_counter);
// ALOGI("PID = 0x%04x, continuity_counter = %u", PID, continuity_counter);
if (adaptation_field_control == 2 || adaptation_field_control == 3) {
- parseAdaptationField(br);
+ parseAdaptationField(br, PID);
}
+ status_t err = OK;
+
if (adaptation_field_control == 1 || adaptation_field_control == 3) {
- return parsePID(br, PID, payload_unit_start_indicator);
+ err = parsePID(
+ br, PID, continuity_counter, payload_unit_start_indicator);
}
- return OK;
+ ++mNumTSPacketsParsed;
+
+ return err;
}
sp<MediaSource> ATSParser::getSource(SourceType type) {
@@ -1091,6 +1199,31 @@
return mPrograms.editItemAt(0)->PTSTimeDeltaEstablished();
}
+void ATSParser::updatePCR(
+ unsigned PID, uint64_t PCR, size_t byteOffsetFromStart) {
+ ALOGV("PCR 0x%016llx @ %d", PCR, byteOffsetFromStart);
+
+ if (mNumPCRs == 2) {
+ mPCR[0] = mPCR[1];
+ mPCRBytes[0] = mPCRBytes[1];
+ mSystemTimeUs[0] = mSystemTimeUs[1];
+ mNumPCRs = 1;
+ }
+
+ mPCR[mNumPCRs] = PCR;
+ mPCRBytes[mNumPCRs] = byteOffsetFromStart;
+ mSystemTimeUs[mNumPCRs] = ALooper::GetNowUs();
+
+ ++mNumPCRs;
+
+ if (mNumPCRs == 2) {
+ double transportRate =
+ (mPCRBytes[1] - mPCRBytes[0]) * 27E6 / (mPCR[1] - mPCR[0]);
+
+ ALOGV("transportRate = %.2f bytes/sec", transportRate);
+ }
+}
+
////////////////////////////////////////////////////////////////////////////////
ATSParser::PSISection::PSISection() {
diff --git a/media/libstagefright/mpeg2ts/ATSParser.h b/media/libstagefright/mpeg2ts/ATSParser.h
index 9ef2939..5ccbab7 100644
--- a/media/libstagefright/mpeg2ts/ATSParser.h
+++ b/media/libstagefright/mpeg2ts/ATSParser.h
@@ -38,6 +38,7 @@
DISCONTINUITY_TIME = 1,
DISCONTINUITY_AUDIO_FORMAT = 2,
DISCONTINUITY_VIDEO_FORMAT = 4,
+ DISCONTINUITY_ABSOLUTE_TIME = 8,
DISCONTINUITY_SEEK = DISCONTINUITY_TIME,
@@ -54,7 +55,9 @@
// If this flag is _not_ specified, the first PTS encountered in a
// program of this stream will be assumed to correspond to media time 0
// instead.
- TS_TIMESTAMPS_ARE_ABSOLUTE = 1
+ TS_TIMESTAMPS_ARE_ABSOLUTE = 1,
+ // Video PES packets contain exactly one (aligned) access unit.
+ ALIGNED_VIDEO_DATA = 2,
};
ATSParser(uint32_t flags = 0);
@@ -100,17 +103,29 @@
// Keyed by PID
KeyedVector<unsigned, sp<PSISection> > mPSISections;
+ int64_t mAbsoluteTimeAnchorUs;
+
+ size_t mNumTSPacketsParsed;
+
void parseProgramAssociationTable(ABitReader *br);
void parseProgramMap(ABitReader *br);
void parsePES(ABitReader *br);
status_t parsePID(
ABitReader *br, unsigned PID,
+ unsigned continuity_counter,
unsigned payload_unit_start_indicator);
- void parseAdaptationField(ABitReader *br);
+ void parseAdaptationField(ABitReader *br, unsigned PID);
status_t parseTS(ABitReader *br);
+ void updatePCR(unsigned PID, uint64_t PCR, size_t byteOffsetFromStart);
+
+ uint64_t mPCR[2];
+ size_t mPCRBytes[2];
+ int64_t mSystemTimeUs[2];
+ size_t mNumPCRs;
+
DISALLOW_EVIL_CONSTRUCTORS(ATSParser);
};
diff --git a/media/libstagefright/mpeg2ts/ESQueue.cpp b/media/libstagefright/mpeg2ts/ESQueue.cpp
index 1cab077..e58e9bf 100644
--- a/media/libstagefright/mpeg2ts/ESQueue.cpp
+++ b/media/libstagefright/mpeg2ts/ESQueue.cpp
@@ -33,8 +33,9 @@
namespace android {
-ElementaryStreamQueue::ElementaryStreamQueue(Mode mode)
- : mMode(mode) {
+ElementaryStreamQueue::ElementaryStreamQueue(Mode mode, uint32_t flags)
+ : mMode(mode),
+ mFlags(flags) {
}
sp<MetaData> ElementaryStreamQueue::getFormat() {
@@ -289,6 +290,31 @@
}
sp<ABuffer> ElementaryStreamQueue::dequeueAccessUnit() {
+ if ((mFlags & kFlag_AlignedData) && mMode == H264) {
+ if (mRangeInfos.empty()) {
+ return NULL;
+ }
+
+ RangeInfo info = *mRangeInfos.begin();
+ mRangeInfos.erase(mRangeInfos.begin());
+
+ sp<ABuffer> accessUnit = new ABuffer(info.mLength);
+ memcpy(accessUnit->data(), mBuffer->data(), info.mLength);
+ accessUnit->meta()->setInt64("timeUs", info.mTimestampUs);
+
+ memmove(mBuffer->data(),
+ mBuffer->data() + info.mLength,
+ mBuffer->size() - info.mLength);
+
+ mBuffer->setRange(0, mBuffer->size() - info.mLength);
+
+ if (mFormat == NULL) {
+ mFormat = MakeAVCCodecSpecificData(accessUnit);
+ }
+
+ return accessUnit;
+ }
+
switch (mMode) {
case H264:
return dequeueAccessUnitH264();
@@ -436,8 +462,8 @@
sp<ABuffer> ElementaryStreamQueue::dequeueAccessUnitH264() {
const uint8_t *data = mBuffer->data();
- size_t size = mBuffer->size();
+ size_t size = mBuffer->size();
Vector<NALPosition> nals;
size_t totalSize = 0;
diff --git a/media/libstagefright/mpeg2ts/ESQueue.h b/media/libstagefright/mpeg2ts/ESQueue.h
index 4035ed3..72aa2e7 100644
--- a/media/libstagefright/mpeg2ts/ESQueue.h
+++ b/media/libstagefright/mpeg2ts/ESQueue.h
@@ -36,7 +36,12 @@
MPEG_VIDEO,
MPEG4_VIDEO,
};
- ElementaryStreamQueue(Mode mode);
+
+ enum Flags {
+ // Data appended to the queue is always at access unit boundaries.
+ kFlag_AlignedData = 1,
+ };
+ ElementaryStreamQueue(Mode mode, uint32_t flags = 0);
status_t appendData(const void *data, size_t size, int64_t timeUs);
void clear(bool clearFormat);
@@ -52,6 +57,7 @@
};
Mode mMode;
+ uint32_t mFlags;
sp<ABuffer> mBuffer;
List<RangeInfo> mRangeInfos;
diff --git a/media/libstagefright/rtsp/AMPEG2TSAssembler.cpp b/media/libstagefright/rtsp/AMPEG2TSAssembler.cpp
new file mode 100644
index 0000000..4c9bf5b
--- /dev/null
+++ b/media/libstagefright/rtsp/AMPEG2TSAssembler.cpp
@@ -0,0 +1,119 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "AMPEG2TSAssembler"
+#include <utils/Log.h>
+
+#include "AMPEG2TSAssembler.h"
+
+#include "ARTPSource.h"
+#include "ASessionDescription.h"
+
+#include <media/stagefright/foundation/ABuffer.h>
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/AMessage.h>
+#include <media/stagefright/foundation/hexdump.h>
+#include <media/stagefright/MediaDefs.h>
+#include <media/stagefright/MetaData.h>
+#include <media/stagefright/Utils.h>
+
+namespace android {
+
+AMPEG2TSAssembler::AMPEG2TSAssembler(
+ const sp<AMessage> ¬ify, const char *desc, const AString ¶ms)
+ : mNotifyMsg(notify),
+ mNextExpectedSeqNoValid(false),
+ mNextExpectedSeqNo(0) {
+}
+
+AMPEG2TSAssembler::~AMPEG2TSAssembler() {
+}
+
+ARTPAssembler::AssemblyStatus AMPEG2TSAssembler::assembleMore(
+ const sp<ARTPSource> &source) {
+ return addPacket(source);
+}
+
+ARTPAssembler::AssemblyStatus AMPEG2TSAssembler::addPacket(
+ const sp<ARTPSource> &source) {
+ List<sp<ABuffer> > *queue = source->queue();
+
+ if (queue->empty()) {
+ return NOT_ENOUGH_DATA;
+ }
+
+ if (mNextExpectedSeqNoValid) {
+ List<sp<ABuffer> >::iterator it = queue->begin();
+ while (it != queue->end()) {
+ if ((uint32_t)(*it)->int32Data() >= mNextExpectedSeqNo) {
+ break;
+ }
+
+ it = queue->erase(it);
+ }
+
+ if (queue->empty()) {
+ return NOT_ENOUGH_DATA;
+ }
+ }
+
+ sp<ABuffer> buffer = *queue->begin();
+
+ if (!mNextExpectedSeqNoValid) {
+ mNextExpectedSeqNoValid = true;
+ mNextExpectedSeqNo = (uint32_t)buffer->int32Data();
+ } else if ((uint32_t)buffer->int32Data() != mNextExpectedSeqNo) {
+ ALOGV("Not the sequence number I expected");
+
+ return WRONG_SEQUENCE_NUMBER;
+ }
+
+ // hexdump(buffer->data(), buffer->size());
+
+ if ((buffer->size() % 188) > 0) {
+ queue->erase(queue->begin());
+ ++mNextExpectedSeqNo;
+
+ ALOGV("Not a multiple of transport packet size.");
+
+ return MALFORMED_PACKET;
+ }
+
+ sp<AMessage> msg = mNotifyMsg->dup();
+ msg->setBuffer("access-unit", buffer);
+ msg->post();
+
+ queue->erase(queue->begin());
+ ++mNextExpectedSeqNo;
+
+ return OK;
+}
+
+void AMPEG2TSAssembler::packetLost() {
+ CHECK(mNextExpectedSeqNoValid);
+ ++mNextExpectedSeqNo;
+}
+
+void AMPEG2TSAssembler::onByeReceived() {
+ sp<AMessage> msg = mNotifyMsg->dup();
+ msg->setInt32("eos", true);
+ msg->post();
+}
+
+} // namespace android
+
+
diff --git a/media/libstagefright/rtsp/AMPEG2TSAssembler.h b/media/libstagefright/rtsp/AMPEG2TSAssembler.h
new file mode 100644
index 0000000..712e18e
--- /dev/null
+++ b/media/libstagefright/rtsp/AMPEG2TSAssembler.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef A_MPEG2_TS_ASSEMBLER_H_
+
+#define A_MPEG2_TS_ASSEMBLER_H_
+
+#include "ARTPAssembler.h"
+
+namespace android {
+
+struct AMessage;
+struct AString;
+struct MetaData;
+
+struct AMPEG2TSAssembler : public ARTPAssembler {
+ AMPEG2TSAssembler(
+ const sp<AMessage> ¬ify,
+ const char *desc, const AString ¶ms);
+
+protected:
+ virtual ~AMPEG2TSAssembler();
+
+ virtual AssemblyStatus assembleMore(const sp<ARTPSource> &source);
+ virtual void onByeReceived();
+ virtual void packetLost();
+
+private:
+ sp<AMessage> mNotifyMsg;
+ bool mNextExpectedSeqNoValid;
+ uint32_t mNextExpectedSeqNo;
+
+ AssemblyStatus addPacket(const sp<ARTPSource> &source);
+
+ DISALLOW_EVIL_CONSTRUCTORS(AMPEG2TSAssembler);
+};
+
+} // namespace android
+
+#endif // A_MPEG2_TS_ASSEMBLER_H_
+
diff --git a/media/libstagefright/rtsp/APacketSource.cpp b/media/libstagefright/rtsp/APacketSource.cpp
index ddd2f06..462c384 100644
--- a/media/libstagefright/rtsp/APacketSource.cpp
+++ b/media/libstagefright/rtsp/APacketSource.cpp
@@ -566,6 +566,8 @@
codecSpecificData->data(), codecSpecificData->size());
} else if (ARawAudioAssembler::Supports(desc.c_str())) {
ARawAudioAssembler::MakeFormat(desc.c_str(), mFormat);
+ } else if (!strncasecmp("MP2T/", desc.c_str(), 5)) {
+ mFormat->setCString(kKeyMIMEType, MEDIA_MIMETYPE_CONTAINER_MPEG2TS);
} else {
mInitCheck = ERROR_UNSUPPORTED;
}
diff --git a/media/libstagefright/rtsp/ARTPSource.cpp b/media/libstagefright/rtsp/ARTPSource.cpp
index ed68790..d7c3bd6 100644
--- a/media/libstagefright/rtsp/ARTPSource.cpp
+++ b/media/libstagefright/rtsp/ARTPSource.cpp
@@ -23,6 +23,7 @@
#include "AAMRAssembler.h"
#include "AAVCAssembler.h"
#include "AH263Assembler.h"
+#include "AMPEG2TSAssembler.h"
#include "AMPEG4AudioAssembler.h"
#include "AMPEG4ElementaryAssembler.h"
#include "ARawAudioAssembler.h"
@@ -73,6 +74,8 @@
mIssueFIRRequests = true;
} else if (ARawAudioAssembler::Supports(desc.c_str())) {
mAssembler = new ARawAudioAssembler(notify, desc.c_str(), params);
+ } else if (!strncasecmp(desc.c_str(), "MP2T/", 5)) {
+ mAssembler = new AMPEG2TSAssembler(notify, desc.c_str(), params);
} else {
TRESPASS();
}
diff --git a/media/libstagefright/rtsp/Android.mk b/media/libstagefright/rtsp/Android.mk
index d0f5259..49e2daf 100644
--- a/media/libstagefright/rtsp/Android.mk
+++ b/media/libstagefright/rtsp/Android.mk
@@ -6,6 +6,7 @@
AAMRAssembler.cpp \
AAVCAssembler.cpp \
AH263Assembler.cpp \
+ AMPEG2TSAssembler.cpp \
AMPEG4AudioAssembler.cpp \
AMPEG4ElementaryAssembler.cpp \
APacketSource.cpp \
diff --git a/media/libstagefright/wifi-display/Android.mk b/media/libstagefright/wifi-display/Android.mk
index b035a51..0e59b9e 100644
--- a/media/libstagefright/wifi-display/Android.mk
+++ b/media/libstagefright/wifi-display/Android.mk
@@ -5,6 +5,10 @@
LOCAL_SRC_FILES:= \
ANetworkSession.cpp \
ParsedMessage.cpp \
+ sink/LinearRegression.cpp \
+ sink/RTPSink.cpp \
+ sink/TunnelRenderer.cpp \
+ sink/WifiDisplaySink.cpp \
source/Converter.cpp \
source/PlaybackSession.cpp \
source/RepeaterSource.cpp \
diff --git a/media/libstagefright/wifi-display/sink/LinearRegression.cpp b/media/libstagefright/wifi-display/sink/LinearRegression.cpp
new file mode 100644
index 0000000..8cfce37
--- /dev/null
+++ b/media/libstagefright/wifi-display/sink/LinearRegression.cpp
@@ -0,0 +1,110 @@
+/*
+ * Copyright 2012, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "LinearRegression"
+#include <utils/Log.h>
+
+#include "LinearRegression.h"
+
+#include <math.h>
+#include <string.h>
+
+namespace android {
+
+LinearRegression::LinearRegression(size_t historySize)
+ : mHistorySize(historySize),
+ mCount(0),
+ mHistory(new Point[mHistorySize]),
+ mSumX(0.0),
+ mSumY(0.0) {
+}
+
+LinearRegression::~LinearRegression() {
+ delete[] mHistory;
+ mHistory = NULL;
+}
+
+void LinearRegression::addPoint(float x, float y) {
+ if (mCount == mHistorySize) {
+ const Point &oldest = mHistory[0];
+
+ mSumX -= oldest.mX;
+ mSumY -= oldest.mY;
+
+ memmove(&mHistory[0], &mHistory[1], (mHistorySize - 1) * sizeof(Point));
+ --mCount;
+ }
+
+ Point *newest = &mHistory[mCount++];
+ newest->mX = x;
+ newest->mY = y;
+
+ mSumX += x;
+ mSumY += y;
+}
+
+bool LinearRegression::approxLine(float *n1, float *n2, float *b) const {
+ static const float kEpsilon = 1.0E-4;
+
+ if (mCount < 2) {
+ return false;
+ }
+
+ float sumX2 = 0.0f;
+ float sumY2 = 0.0f;
+ float sumXY = 0.0f;
+
+ float meanX = mSumX / (float)mCount;
+ float meanY = mSumY / (float)mCount;
+
+ for (size_t i = 0; i < mCount; ++i) {
+ const Point &p = mHistory[i];
+
+ float x = p.mX - meanX;
+ float y = p.mY - meanY;
+
+ sumX2 += x * x;
+ sumY2 += y * y;
+ sumXY += x * y;
+ }
+
+ float T = sumX2 + sumY2;
+ float D = sumX2 * sumY2 - sumXY * sumXY;
+ float root = sqrt(T * T * 0.25 - D);
+
+ float L1 = T * 0.5 - root;
+
+ if (fabs(sumXY) > kEpsilon) {
+ *n1 = 1.0;
+ *n2 = (2.0 * L1 - sumX2) / sumXY;
+
+ float mag = sqrt((*n1) * (*n1) + (*n2) * (*n2));
+
+ *n1 /= mag;
+ *n2 /= mag;
+ } else {
+ *n1 = 0.0;
+ *n2 = 1.0;
+ }
+
+ *b = (*n1) * meanX + (*n2) * meanY;
+
+ return true;
+}
+
+} // namespace android
+
diff --git a/media/libstagefright/wifi-display/sink/LinearRegression.h b/media/libstagefright/wifi-display/sink/LinearRegression.h
new file mode 100644
index 0000000..ca6f5a1
--- /dev/null
+++ b/media/libstagefright/wifi-display/sink/LinearRegression.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright 2012, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef LINEAR_REGRESSION_H_
+
+#define LINEAR_REGRESSION_H_
+
+#include <sys/types.h>
+#include <media/stagefright/foundation/ABase.h>
+
+namespace android {
+
+// Helper class to fit a line to a set of points minimizing the sum of
+// squared (orthogonal) distances from line to individual points.
+struct LinearRegression {
+ LinearRegression(size_t historySize);
+ ~LinearRegression();
+
+ void addPoint(float x, float y);
+
+ bool approxLine(float *n1, float *n2, float *b) const;
+
+private:
+ struct Point {
+ float mX, mY;
+ };
+
+ size_t mHistorySize;
+ size_t mCount;
+ Point *mHistory;
+
+ float mSumX, mSumY;
+
+ DISALLOW_EVIL_CONSTRUCTORS(LinearRegression);
+};
+
+} // namespace android
+
+#endif // LINEAR_REGRESSION_H_
diff --git a/media/libstagefright/wifi-display/sink/RTPSink.cpp b/media/libstagefright/wifi-display/sink/RTPSink.cpp
new file mode 100644
index 0000000..0918034
--- /dev/null
+++ b/media/libstagefright/wifi-display/sink/RTPSink.cpp
@@ -0,0 +1,806 @@
+/*
+ * Copyright 2012, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "RTPSink"
+#include <utils/Log.h>
+
+#include "RTPSink.h"
+
+#include "ANetworkSession.h"
+#include "TunnelRenderer.h"
+
+#include <media/stagefright/foundation/ABuffer.h>
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/AMessage.h>
+#include <media/stagefright/foundation/hexdump.h>
+#include <media/stagefright/MediaErrors.h>
+#include <media/stagefright/Utils.h>
+
+namespace android {
+
+struct RTPSink::Source : public RefBase {
+ Source(uint16_t seq, const sp<ABuffer> &buffer,
+ const sp<AMessage> queueBufferMsg);
+
+ bool updateSeq(uint16_t seq, const sp<ABuffer> &buffer);
+
+ void addReportBlock(uint32_t ssrc, const sp<ABuffer> &buf);
+
+protected:
+ virtual ~Source();
+
+private:
+ static const uint32_t kMinSequential = 2;
+ static const uint32_t kMaxDropout = 3000;
+ static const uint32_t kMaxMisorder = 100;
+ static const uint32_t kRTPSeqMod = 1u << 16;
+
+ sp<AMessage> mQueueBufferMsg;
+
+ uint16_t mMaxSeq;
+ uint32_t mCycles;
+ uint32_t mBaseSeq;
+ uint32_t mBadSeq;
+ uint32_t mProbation;
+ uint32_t mReceived;
+ uint32_t mExpectedPrior;
+ uint32_t mReceivedPrior;
+
+ void initSeq(uint16_t seq);
+ void queuePacket(const sp<ABuffer> &buffer);
+
+ DISALLOW_EVIL_CONSTRUCTORS(Source);
+};
+
+////////////////////////////////////////////////////////////////////////////////
+
+RTPSink::Source::Source(
+ uint16_t seq, const sp<ABuffer> &buffer,
+ const sp<AMessage> queueBufferMsg)
+ : mQueueBufferMsg(queueBufferMsg),
+ mProbation(kMinSequential) {
+ initSeq(seq);
+ mMaxSeq = seq - 1;
+
+ buffer->setInt32Data(mCycles | seq);
+ queuePacket(buffer);
+}
+
+RTPSink::Source::~Source() {
+}
+
+void RTPSink::Source::initSeq(uint16_t seq) {
+ mMaxSeq = seq;
+ mCycles = 0;
+ mBaseSeq = seq;
+ mBadSeq = kRTPSeqMod + 1;
+ mReceived = 0;
+ mExpectedPrior = 0;
+ mReceivedPrior = 0;
+}
+
+bool RTPSink::Source::updateSeq(uint16_t seq, const sp<ABuffer> &buffer) {
+ uint16_t udelta = seq - mMaxSeq;
+
+ if (mProbation) {
+ // Startup phase
+
+ if (seq == mMaxSeq + 1) {
+ buffer->setInt32Data(mCycles | seq);
+ queuePacket(buffer);
+
+ --mProbation;
+ mMaxSeq = seq;
+ if (mProbation == 0) {
+ initSeq(seq);
+ ++mReceived;
+
+ return true;
+ }
+ } else {
+ // Packet out of sequence, restart startup phase
+
+ mProbation = kMinSequential - 1;
+ mMaxSeq = seq;
+
+#if 0
+ mPackets.clear();
+ mTotalBytesQueued = 0;
+ ALOGI("XXX cleared packets");
+#endif
+
+ buffer->setInt32Data(mCycles | seq);
+ queuePacket(buffer);
+ }
+
+ return false;
+ }
+
+ if (udelta < kMaxDropout) {
+ // In order, with permissible gap.
+
+ if (seq < mMaxSeq) {
+ // Sequence number wrapped - count another 64K cycle
+ mCycles += kRTPSeqMod;
+ }
+
+ mMaxSeq = seq;
+ } else if (udelta <= kRTPSeqMod - kMaxMisorder) {
+ // The sequence number made a very large jump
+
+ if (seq == mBadSeq) {
+ // Two sequential packets -- assume that the other side
+ // restarted without telling us so just re-sync
+ // (i.e. pretend this was the first packet)
+
+ initSeq(seq);
+ } else {
+ mBadSeq = (seq + 1) & (kRTPSeqMod - 1);
+
+ return false;
+ }
+ } else {
+ // Duplicate or reordered packet.
+ }
+
+ ++mReceived;
+
+ buffer->setInt32Data(mCycles | seq);
+ queuePacket(buffer);
+
+ return true;
+}
+
+void RTPSink::Source::queuePacket(const sp<ABuffer> &buffer) {
+ sp<AMessage> msg = mQueueBufferMsg->dup();
+ msg->setBuffer("buffer", buffer);
+ msg->post();
+}
+
+void RTPSink::Source::addReportBlock(
+ uint32_t ssrc, const sp<ABuffer> &buf) {
+ uint32_t extMaxSeq = mMaxSeq | mCycles;
+ uint32_t expected = extMaxSeq - mBaseSeq + 1;
+
+ int64_t lost = (int64_t)expected - (int64_t)mReceived;
+ if (lost > 0x7fffff) {
+ lost = 0x7fffff;
+ } else if (lost < -0x800000) {
+ lost = -0x800000;
+ }
+
+ uint32_t expectedInterval = expected - mExpectedPrior;
+ mExpectedPrior = expected;
+
+ uint32_t receivedInterval = mReceived - mReceivedPrior;
+ mReceivedPrior = mReceived;
+
+ int64_t lostInterval = expectedInterval - receivedInterval;
+
+ uint8_t fractionLost;
+ if (expectedInterval == 0 || lostInterval <=0) {
+ fractionLost = 0;
+ } else {
+ fractionLost = (lostInterval << 8) / expectedInterval;
+ }
+
+ uint8_t *ptr = buf->data() + buf->size();
+
+ ptr[0] = ssrc >> 24;
+ ptr[1] = (ssrc >> 16) & 0xff;
+ ptr[2] = (ssrc >> 8) & 0xff;
+ ptr[3] = ssrc & 0xff;
+
+ ptr[4] = fractionLost;
+
+ ptr[5] = (lost >> 16) & 0xff;
+ ptr[6] = (lost >> 8) & 0xff;
+ ptr[7] = lost & 0xff;
+
+ ptr[8] = extMaxSeq >> 24;
+ ptr[9] = (extMaxSeq >> 16) & 0xff;
+ ptr[10] = (extMaxSeq >> 8) & 0xff;
+ ptr[11] = extMaxSeq & 0xff;
+
+ // XXX TODO:
+
+ ptr[12] = 0x00; // interarrival jitter
+ ptr[13] = 0x00;
+ ptr[14] = 0x00;
+ ptr[15] = 0x00;
+
+ ptr[16] = 0x00; // last SR
+ ptr[17] = 0x00;
+ ptr[18] = 0x00;
+ ptr[19] = 0x00;
+
+ ptr[20] = 0x00; // delay since last SR
+ ptr[21] = 0x00;
+ ptr[22] = 0x00;
+ ptr[23] = 0x00;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+RTPSink::RTPSink(
+ const sp<ANetworkSession> &netSession,
+ const sp<ISurfaceTexture> &surfaceTex)
+ : mNetSession(netSession),
+ mSurfaceTex(surfaceTex),
+ mRTPPort(0),
+ mRTPSessionID(0),
+ mRTCPSessionID(0),
+ mFirstArrivalTimeUs(-1ll),
+ mNumPacketsReceived(0ll),
+ mRegression(1000),
+ mMaxDelayMs(-1ll) {
+}
+
+RTPSink::~RTPSink() {
+ if (mRTCPSessionID != 0) {
+ mNetSession->destroySession(mRTCPSessionID);
+ }
+
+ if (mRTPSessionID != 0) {
+ mNetSession->destroySession(mRTPSessionID);
+ }
+}
+
+status_t RTPSink::init(bool useTCPInterleaving) {
+ if (useTCPInterleaving) {
+ return OK;
+ }
+
+ int clientRtp;
+
+ sp<AMessage> rtpNotify = new AMessage(kWhatRTPNotify, id());
+ sp<AMessage> rtcpNotify = new AMessage(kWhatRTCPNotify, id());
+ for (clientRtp = 15550;; clientRtp += 2) {
+ int32_t rtpSession;
+ status_t err = mNetSession->createUDPSession(
+ clientRtp, rtpNotify, &rtpSession);
+
+ if (err != OK) {
+ ALOGI("failed to create RTP socket on port %d", clientRtp);
+ continue;
+ }
+
+ int32_t rtcpSession;
+ err = mNetSession->createUDPSession(
+ clientRtp + 1, rtcpNotify, &rtcpSession);
+
+ if (err == OK) {
+ mRTPPort = clientRtp;
+ mRTPSessionID = rtpSession;
+ mRTCPSessionID = rtcpSession;
+ break;
+ }
+
+ ALOGI("failed to create RTCP socket on port %d", clientRtp + 1);
+ mNetSession->destroySession(rtpSession);
+ }
+
+ if (mRTPPort == 0) {
+ return UNKNOWN_ERROR;
+ }
+
+ return OK;
+}
+
+int32_t RTPSink::getRTPPort() const {
+ return mRTPPort;
+}
+
+void RTPSink::onMessageReceived(const sp<AMessage> &msg) {
+ switch (msg->what()) {
+ case kWhatRTPNotify:
+ case kWhatRTCPNotify:
+ {
+ int32_t reason;
+ CHECK(msg->findInt32("reason", &reason));
+
+ switch (reason) {
+ case ANetworkSession::kWhatError:
+ {
+ int32_t sessionID;
+ CHECK(msg->findInt32("sessionID", &sessionID));
+
+ int32_t err;
+ CHECK(msg->findInt32("err", &err));
+
+ AString detail;
+ CHECK(msg->findString("detail", &detail));
+
+ ALOGE("An error occurred in session %d (%d, '%s/%s').",
+ sessionID,
+ err,
+ detail.c_str(),
+ strerror(-err));
+
+ mNetSession->destroySession(sessionID);
+
+ if (sessionID == mRTPSessionID) {
+ mRTPSessionID = 0;
+ } else if (sessionID == mRTCPSessionID) {
+ mRTCPSessionID = 0;
+ }
+ break;
+ }
+
+ case ANetworkSession::kWhatDatagram:
+ {
+ int32_t sessionID;
+ CHECK(msg->findInt32("sessionID", &sessionID));
+
+ sp<ABuffer> data;
+ CHECK(msg->findBuffer("data", &data));
+
+ status_t err;
+ if (msg->what() == kWhatRTPNotify) {
+ err = parseRTP(data);
+ } else {
+ err = parseRTCP(data);
+ }
+ break;
+ }
+
+ default:
+ TRESPASS();
+ }
+ break;
+ }
+
+ case kWhatSendRR:
+ {
+ onSendRR();
+ break;
+ }
+
+ case kWhatPacketLost:
+ {
+ onPacketLost(msg);
+ break;
+ }
+
+ case kWhatInject:
+ {
+ int32_t isRTP;
+ CHECK(msg->findInt32("isRTP", &isRTP));
+
+ sp<ABuffer> buffer;
+ CHECK(msg->findBuffer("buffer", &buffer));
+
+ status_t err;
+ if (isRTP) {
+ err = parseRTP(buffer);
+ } else {
+ err = parseRTCP(buffer);
+ }
+ break;
+ }
+
+ default:
+ TRESPASS();
+ }
+}
+
+status_t RTPSink::injectPacket(bool isRTP, const sp<ABuffer> &buffer) {
+ sp<AMessage> msg = new AMessage(kWhatInject, id());
+ msg->setInt32("isRTP", isRTP);
+ msg->setBuffer("buffer", buffer);
+ msg->post();
+
+ return OK;
+}
+
+status_t RTPSink::parseRTP(const sp<ABuffer> &buffer) {
+ size_t size = buffer->size();
+ if (size < 12) {
+ // Too short to be a valid RTP header.
+ return ERROR_MALFORMED;
+ }
+
+ const uint8_t *data = buffer->data();
+
+ if ((data[0] >> 6) != 2) {
+ // Unsupported version.
+ return ERROR_UNSUPPORTED;
+ }
+
+ if (data[0] & 0x20) {
+ // Padding present.
+
+ size_t paddingLength = data[size - 1];
+
+ if (paddingLength + 12 > size) {
+ // If we removed this much padding we'd end up with something
+ // that's too short to be a valid RTP header.
+ return ERROR_MALFORMED;
+ }
+
+ size -= paddingLength;
+ }
+
+ int numCSRCs = data[0] & 0x0f;
+
+ size_t payloadOffset = 12 + 4 * numCSRCs;
+
+ if (size < payloadOffset) {
+ // Not enough data to fit the basic header and all the CSRC entries.
+ return ERROR_MALFORMED;
+ }
+
+ if (data[0] & 0x10) {
+ // Header eXtension present.
+
+ if (size < payloadOffset + 4) {
+ // Not enough data to fit the basic header, all CSRC entries
+ // and the first 4 bytes of the extension header.
+
+ return ERROR_MALFORMED;
+ }
+
+ const uint8_t *extensionData = &data[payloadOffset];
+
+ size_t extensionLength =
+ 4 * (extensionData[2] << 8 | extensionData[3]);
+
+ if (size < payloadOffset + 4 + extensionLength) {
+ return ERROR_MALFORMED;
+ }
+
+ payloadOffset += 4 + extensionLength;
+ }
+
+ uint32_t srcId = U32_AT(&data[8]);
+ uint32_t rtpTime = U32_AT(&data[4]);
+ uint16_t seqNo = U16_AT(&data[2]);
+
+ int64_t arrivalTimeUs;
+ CHECK(buffer->meta()->findInt64("arrivalTimeUs", &arrivalTimeUs));
+
+ if (mFirstArrivalTimeUs < 0ll) {
+ mFirstArrivalTimeUs = arrivalTimeUs;
+ }
+ arrivalTimeUs -= mFirstArrivalTimeUs;
+
+ int64_t arrivalTimeMedia = (arrivalTimeUs * 9ll) / 100ll;
+
+ ALOGV("seqNo: %d, SSRC 0x%08x, diff %lld",
+ seqNo, srcId, rtpTime - arrivalTimeMedia);
+
+ mRegression.addPoint((float)rtpTime, (float)arrivalTimeMedia);
+
+ ++mNumPacketsReceived;
+
+ float n1, n2, b;
+ if (mRegression.approxLine(&n1, &n2, &b)) {
+ ALOGV("Line %lld: %.2f %.2f %.2f, slope %.2f",
+ mNumPacketsReceived, n1, n2, b, -n1 / n2);
+
+ float expectedArrivalTimeMedia = (b - n1 * (float)rtpTime) / n2;
+ float latenessMs = (arrivalTimeMedia - expectedArrivalTimeMedia) / 90.0;
+
+ if (mMaxDelayMs < 0ll || latenessMs > mMaxDelayMs) {
+ mMaxDelayMs = latenessMs;
+ ALOGI("packet was %.2f ms late", latenessMs);
+ }
+ }
+
+ sp<AMessage> meta = buffer->meta();
+ meta->setInt32("ssrc", srcId);
+ meta->setInt32("rtp-time", rtpTime);
+ meta->setInt32("PT", data[1] & 0x7f);
+ meta->setInt32("M", data[1] >> 7);
+
+ buffer->setRange(payloadOffset, size - payloadOffset);
+
+ ssize_t index = mSources.indexOfKey(srcId);
+ if (index < 0) {
+ if (mRenderer == NULL) {
+ sp<AMessage> notifyLost = new AMessage(kWhatPacketLost, id());
+ notifyLost->setInt32("ssrc", srcId);
+
+ mRenderer = new TunnelRenderer(notifyLost, mSurfaceTex);
+ looper()->registerHandler(mRenderer);
+ }
+
+ sp<AMessage> queueBufferMsg =
+ new AMessage(TunnelRenderer::kWhatQueueBuffer, mRenderer->id());
+
+ sp<Source> source = new Source(seqNo, buffer, queueBufferMsg);
+ mSources.add(srcId, source);
+ } else {
+ mSources.valueAt(index)->updateSeq(seqNo, buffer);
+ }
+
+ return OK;
+}
+
+status_t RTPSink::parseRTCP(const sp<ABuffer> &buffer) {
+ const uint8_t *data = buffer->data();
+ size_t size = buffer->size();
+
+ while (size > 0) {
+ if (size < 8) {
+ // Too short to be a valid RTCP header
+ return ERROR_MALFORMED;
+ }
+
+ if ((data[0] >> 6) != 2) {
+ // Unsupported version.
+ return ERROR_UNSUPPORTED;
+ }
+
+ if (data[0] & 0x20) {
+ // Padding present.
+
+ size_t paddingLength = data[size - 1];
+
+ if (paddingLength + 12 > size) {
+ // If we removed this much padding we'd end up with something
+ // that's too short to be a valid RTP header.
+ return ERROR_MALFORMED;
+ }
+
+ size -= paddingLength;
+ }
+
+ size_t headerLength = 4 * (data[2] << 8 | data[3]) + 4;
+
+ if (size < headerLength) {
+ // Only received a partial packet?
+ return ERROR_MALFORMED;
+ }
+
+ switch (data[1]) {
+ case 200:
+ {
+ parseSR(data, headerLength);
+ break;
+ }
+
+ case 201: // RR
+ case 202: // SDES
+ case 204: // APP
+ break;
+
+ case 205: // TSFB (transport layer specific feedback)
+ case 206: // PSFB (payload specific feedback)
+ // hexdump(data, headerLength);
+ break;
+
+ case 203:
+ {
+ parseBYE(data, headerLength);
+ break;
+ }
+
+ default:
+ {
+ ALOGW("Unknown RTCP packet type %u of size %d",
+ (unsigned)data[1], headerLength);
+ break;
+ }
+ }
+
+ data += headerLength;
+ size -= headerLength;
+ }
+
+ return OK;
+}
+
+status_t RTPSink::parseBYE(const uint8_t *data, size_t size) {
+ size_t SC = data[0] & 0x3f;
+
+ if (SC == 0 || size < (4 + SC * 4)) {
+ // Packet too short for the minimal BYE header.
+ return ERROR_MALFORMED;
+ }
+
+ uint32_t id = U32_AT(&data[4]);
+
+ return OK;
+}
+
+status_t RTPSink::parseSR(const uint8_t *data, size_t size) {
+ size_t RC = data[0] & 0x1f;
+
+ if (size < (7 + RC * 6) * 4) {
+ // Packet too short for the minimal SR header.
+ return ERROR_MALFORMED;
+ }
+
+ uint32_t id = U32_AT(&data[4]);
+ uint64_t ntpTime = U64_AT(&data[8]);
+ uint32_t rtpTime = U32_AT(&data[16]);
+
+ ALOGV("SR: ssrc 0x%08x, ntpTime 0x%016llx, rtpTime 0x%08x",
+ id, ntpTime, rtpTime);
+
+ return OK;
+}
+
+status_t RTPSink::connect(
+ const char *host, int32_t remoteRtpPort, int32_t remoteRtcpPort) {
+ ALOGI("connecting RTP/RTCP sockets to %s:{%d,%d}",
+ host, remoteRtpPort, remoteRtcpPort);
+
+ status_t err =
+ mNetSession->connectUDPSession(mRTPSessionID, host, remoteRtpPort);
+
+ if (err != OK) {
+ return err;
+ }
+
+ err = mNetSession->connectUDPSession(mRTCPSessionID, host, remoteRtcpPort);
+
+ if (err != OK) {
+ return err;
+ }
+
+#if 0
+ sp<ABuffer> buf = new ABuffer(1500);
+ memset(buf->data(), 0, buf->size());
+
+ mNetSession->sendRequest(
+ mRTPSessionID, buf->data(), buf->size());
+
+ mNetSession->sendRequest(
+ mRTCPSessionID, buf->data(), buf->size());
+#endif
+
+ scheduleSendRR();
+
+ return OK;
+}
+
+void RTPSink::scheduleSendRR() {
+ (new AMessage(kWhatSendRR, id()))->post(2000000ll);
+}
+
+void RTPSink::addSDES(const sp<ABuffer> &buffer) {
+ uint8_t *data = buffer->data() + buffer->size();
+ data[0] = 0x80 | 1;
+ data[1] = 202; // SDES
+ data[4] = 0xde; // SSRC
+ data[5] = 0xad;
+ data[6] = 0xbe;
+ data[7] = 0xef;
+
+ size_t offset = 8;
+
+ data[offset++] = 1; // CNAME
+
+ AString cname = "stagefright@somewhere";
+ data[offset++] = cname.size();
+
+ memcpy(&data[offset], cname.c_str(), cname.size());
+ offset += cname.size();
+
+ data[offset++] = 6; // TOOL
+
+ AString tool = "stagefright/1.0";
+ data[offset++] = tool.size();
+
+ memcpy(&data[offset], tool.c_str(), tool.size());
+ offset += tool.size();
+
+ data[offset++] = 0;
+
+ if ((offset % 4) > 0) {
+ size_t count = 4 - (offset % 4);
+ switch (count) {
+ case 3:
+ data[offset++] = 0;
+ case 2:
+ data[offset++] = 0;
+ case 1:
+ data[offset++] = 0;
+ }
+ }
+
+ size_t numWords = (offset / 4) - 1;
+ data[2] = numWords >> 8;
+ data[3] = numWords & 0xff;
+
+ buffer->setRange(buffer->offset(), buffer->size() + offset);
+}
+
+void RTPSink::onSendRR() {
+ sp<ABuffer> buf = new ABuffer(1500);
+ buf->setRange(0, 0);
+
+ uint8_t *ptr = buf->data();
+ ptr[0] = 0x80 | 0;
+ ptr[1] = 201; // RR
+ ptr[2] = 0;
+ ptr[3] = 1;
+ ptr[4] = 0xde; // SSRC
+ ptr[5] = 0xad;
+ ptr[6] = 0xbe;
+ ptr[7] = 0xef;
+
+ buf->setRange(0, 8);
+
+ size_t numReportBlocks = 0;
+ for (size_t i = 0; i < mSources.size(); ++i) {
+ uint32_t ssrc = mSources.keyAt(i);
+ sp<Source> source = mSources.valueAt(i);
+
+ if (numReportBlocks > 31 || buf->size() + 24 > buf->capacity()) {
+ // Cannot fit another report block.
+ break;
+ }
+
+ source->addReportBlock(ssrc, buf);
+ ++numReportBlocks;
+ }
+
+ ptr[0] |= numReportBlocks; // 5 bit
+
+ size_t sizeInWordsMinus1 = 1 + 6 * numReportBlocks;
+ ptr[2] = sizeInWordsMinus1 >> 8;
+ ptr[3] = sizeInWordsMinus1 & 0xff;
+
+ buf->setRange(0, (sizeInWordsMinus1 + 1) * 4);
+
+ addSDES(buf);
+
+ mNetSession->sendRequest(mRTCPSessionID, buf->data(), buf->size());
+
+ scheduleSendRR();
+}
+
+void RTPSink::onPacketLost(const sp<AMessage> &msg) {
+ uint32_t srcId;
+ CHECK(msg->findInt32("ssrc", (int32_t *)&srcId));
+
+ int32_t seqNo;
+ CHECK(msg->findInt32("seqNo", &seqNo));
+
+ int32_t blp = 0;
+
+ sp<ABuffer> buf = new ABuffer(1500);
+ buf->setRange(0, 0);
+
+ uint8_t *ptr = buf->data();
+ ptr[0] = 0x80 | 1; // generic NACK
+ ptr[1] = 205; // RTPFB
+ ptr[2] = 0;
+ ptr[3] = 3;
+ ptr[4] = 0xde; // sender SSRC
+ ptr[5] = 0xad;
+ ptr[6] = 0xbe;
+ ptr[7] = 0xef;
+ ptr[8] = (srcId >> 24) & 0xff;
+ ptr[9] = (srcId >> 16) & 0xff;
+ ptr[10] = (srcId >> 8) & 0xff;
+ ptr[11] = (srcId & 0xff);
+ ptr[12] = (seqNo >> 8) & 0xff;
+ ptr[13] = (seqNo & 0xff);
+ ptr[14] = (blp >> 8) & 0xff;
+ ptr[15] = (blp & 0xff);
+
+ buf->setRange(0, 16);
+
+ mNetSession->sendRequest(mRTCPSessionID, buf->data(), buf->size());
+}
+
+} // namespace android
+
diff --git a/media/libstagefright/wifi-display/sink/RTPSink.h b/media/libstagefright/wifi-display/sink/RTPSink.h
new file mode 100644
index 0000000..a1d127d
--- /dev/null
+++ b/media/libstagefright/wifi-display/sink/RTPSink.h
@@ -0,0 +1,98 @@
+/*
+ * Copyright 2012, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef RTP_SINK_H_
+
+#define RTP_SINK_H_
+
+#include <media/stagefright/foundation/AHandler.h>
+
+#include "LinearRegression.h"
+
+#include <gui/Surface.h>
+
+namespace android {
+
+struct ABuffer;
+struct ANetworkSession;
+struct TunnelRenderer;
+
+// Creates a pair of sockets for RTP/RTCP traffic, instantiates a renderer
+// for incoming transport stream data and occasionally sends statistics over
+// the RTCP channel.
+struct RTPSink : public AHandler {
+ RTPSink(const sp<ANetworkSession> &netSession,
+ const sp<ISurfaceTexture> &surfaceTex);
+
+ // If TCP interleaving is used, no UDP sockets are created, instead
+ // incoming RTP/RTCP packets (arriving on the RTSP control connection)
+ // are manually injected by WifiDisplaySink.
+ status_t init(bool useTCPInterleaving);
+
+ status_t connect(
+ const char *host, int32_t remoteRtpPort, int32_t remoteRtcpPort);
+
+ int32_t getRTPPort() const;
+
+ status_t injectPacket(bool isRTP, const sp<ABuffer> &buffer);
+
+protected:
+ virtual void onMessageReceived(const sp<AMessage> &msg);
+ virtual ~RTPSink();
+
+private:
+ enum {
+ kWhatRTPNotify,
+ kWhatRTCPNotify,
+ kWhatSendRR,
+ kWhatPacketLost,
+ kWhatInject,
+ };
+
+ struct Source;
+ struct StreamSource;
+
+ sp<ANetworkSession> mNetSession;
+ sp<ISurfaceTexture> mSurfaceTex;
+ KeyedVector<uint32_t, sp<Source> > mSources;
+
+ int32_t mRTPPort;
+ int32_t mRTPSessionID;
+ int32_t mRTCPSessionID;
+
+ int64_t mFirstArrivalTimeUs;
+ int64_t mNumPacketsReceived;
+ LinearRegression mRegression;
+ int64_t mMaxDelayMs;
+
+ sp<TunnelRenderer> mRenderer;
+
+ status_t parseRTP(const sp<ABuffer> &buffer);
+ status_t parseRTCP(const sp<ABuffer> &buffer);
+ status_t parseBYE(const uint8_t *data, size_t size);
+ status_t parseSR(const uint8_t *data, size_t size);
+
+ void addSDES(const sp<ABuffer> &buffer);
+ void onSendRR();
+ void onPacketLost(const sp<AMessage> &msg);
+ void scheduleSendRR();
+
+ DISALLOW_EVIL_CONSTRUCTORS(RTPSink);
+};
+
+} // namespace android
+
+#endif // RTP_SINK_H_
diff --git a/media/libstagefright/wifi-display/sink/TunnelRenderer.cpp b/media/libstagefright/wifi-display/sink/TunnelRenderer.cpp
new file mode 100644
index 0000000..bc35aef
--- /dev/null
+++ b/media/libstagefright/wifi-display/sink/TunnelRenderer.cpp
@@ -0,0 +1,396 @@
+/*
+ * Copyright 2012, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "TunnelRenderer"
+#include <utils/Log.h>
+
+#include "TunnelRenderer.h"
+
+#include "ATSParser.h"
+
+#include <binder/IMemory.h>
+#include <binder/IServiceManager.h>
+#include <gui/SurfaceComposerClient.h>
+#include <media/IMediaPlayerService.h>
+#include <media/IStreamSource.h>
+#include <media/stagefright/foundation/ABuffer.h>
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/AMessage.h>
+#include <ui/DisplayInfo.h>
+
+namespace android {
+
+struct TunnelRenderer::PlayerClient : public BnMediaPlayerClient {
+ PlayerClient() {}
+
+ virtual void notify(int msg, int ext1, int ext2, const Parcel *obj) {
+ ALOGI("notify %d, %d, %d", msg, ext1, ext2);
+ }
+
+protected:
+ virtual ~PlayerClient() {}
+
+private:
+ DISALLOW_EVIL_CONSTRUCTORS(PlayerClient);
+};
+
+struct TunnelRenderer::StreamSource : public BnStreamSource {
+ StreamSource(TunnelRenderer *owner);
+
+ virtual void setListener(const sp<IStreamListener> &listener);
+ virtual void setBuffers(const Vector<sp<IMemory> > &buffers);
+
+ virtual void onBufferAvailable(size_t index);
+
+ virtual uint32_t flags() const;
+
+ void doSomeWork();
+
+protected:
+ virtual ~StreamSource();
+
+private:
+ mutable Mutex mLock;
+
+ TunnelRenderer *mOwner;
+
+ sp<IStreamListener> mListener;
+
+ Vector<sp<IMemory> > mBuffers;
+ List<size_t> mIndicesAvailable;
+
+ size_t mNumDeqeued;
+
+ DISALLOW_EVIL_CONSTRUCTORS(StreamSource);
+};
+
+////////////////////////////////////////////////////////////////////////////////
+
+TunnelRenderer::StreamSource::StreamSource(TunnelRenderer *owner)
+ : mOwner(owner),
+ mNumDeqeued(0) {
+}
+
+TunnelRenderer::StreamSource::~StreamSource() {
+}
+
+void TunnelRenderer::StreamSource::setListener(
+ const sp<IStreamListener> &listener) {
+ mListener = listener;
+}
+
+void TunnelRenderer::StreamSource::setBuffers(
+ const Vector<sp<IMemory> > &buffers) {
+ mBuffers = buffers;
+}
+
+void TunnelRenderer::StreamSource::onBufferAvailable(size_t index) {
+ CHECK_LT(index, mBuffers.size());
+
+ {
+ Mutex::Autolock autoLock(mLock);
+ mIndicesAvailable.push_back(index);
+ }
+
+ doSomeWork();
+}
+
+uint32_t TunnelRenderer::StreamSource::flags() const {
+ return kFlagAlignedVideoData;
+}
+
+void TunnelRenderer::StreamSource::doSomeWork() {
+ Mutex::Autolock autoLock(mLock);
+
+ while (!mIndicesAvailable.empty()) {
+ sp<ABuffer> srcBuffer = mOwner->dequeueBuffer();
+ if (srcBuffer == NULL) {
+ break;
+ }
+
+ ++mNumDeqeued;
+
+ if (mNumDeqeued == 1) {
+ ALOGI("fixing real time now.");
+
+ sp<AMessage> extra = new AMessage;
+
+ extra->setInt32(
+ IStreamListener::kKeyDiscontinuityMask,
+ ATSParser::DISCONTINUITY_ABSOLUTE_TIME);
+
+ extra->setInt64("timeUs", ALooper::GetNowUs());
+
+ mListener->issueCommand(
+ IStreamListener::DISCONTINUITY,
+ false /* synchronous */,
+ extra);
+ }
+
+ ALOGV("dequeue TS packet of size %d", srcBuffer->size());
+
+ size_t index = *mIndicesAvailable.begin();
+ mIndicesAvailable.erase(mIndicesAvailable.begin());
+
+ sp<IMemory> mem = mBuffers.itemAt(index);
+ CHECK_LE(srcBuffer->size(), mem->size());
+ CHECK_EQ((srcBuffer->size() % 188), 0u);
+
+ memcpy(mem->pointer(), srcBuffer->data(), srcBuffer->size());
+ mListener->queueBuffer(index, srcBuffer->size());
+ }
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+TunnelRenderer::TunnelRenderer(
+ const sp<AMessage> ¬ifyLost,
+ const sp<ISurfaceTexture> &surfaceTex)
+ : mNotifyLost(notifyLost),
+ mSurfaceTex(surfaceTex),
+ mTotalBytesQueued(0ll),
+ mLastDequeuedExtSeqNo(-1),
+ mFirstFailedAttemptUs(-1ll),
+ mRequestedRetransmission(false) {
+}
+
+TunnelRenderer::~TunnelRenderer() {
+ destroyPlayer();
+}
+
+void TunnelRenderer::queueBuffer(const sp<ABuffer> &buffer) {
+ Mutex::Autolock autoLock(mLock);
+
+ mTotalBytesQueued += buffer->size();
+
+ if (mPackets.empty()) {
+ mPackets.push_back(buffer);
+ return;
+ }
+
+ int32_t newExtendedSeqNo = buffer->int32Data();
+
+ List<sp<ABuffer> >::iterator firstIt = mPackets.begin();
+ List<sp<ABuffer> >::iterator it = --mPackets.end();
+ for (;;) {
+ int32_t extendedSeqNo = (*it)->int32Data();
+
+ if (extendedSeqNo == newExtendedSeqNo) {
+ // Duplicate packet.
+ return;
+ }
+
+ if (extendedSeqNo < newExtendedSeqNo) {
+ // Insert new packet after the one at "it".
+ mPackets.insert(++it, buffer);
+ return;
+ }
+
+ if (it == firstIt) {
+ // Insert new packet before the first existing one.
+ mPackets.insert(it, buffer);
+ return;
+ }
+
+ --it;
+ }
+}
+
+sp<ABuffer> TunnelRenderer::dequeueBuffer() {
+ Mutex::Autolock autoLock(mLock);
+
+ sp<ABuffer> buffer;
+ int32_t extSeqNo;
+ while (!mPackets.empty()) {
+ buffer = *mPackets.begin();
+ extSeqNo = buffer->int32Data();
+
+ if (mLastDequeuedExtSeqNo < 0 || extSeqNo > mLastDequeuedExtSeqNo) {
+ break;
+ }
+
+ // This is a retransmission of a packet we've already returned.
+
+ mTotalBytesQueued -= buffer->size();
+ buffer.clear();
+ extSeqNo = -1;
+
+ mPackets.erase(mPackets.begin());
+ }
+
+ if (mPackets.empty()) {
+ if (mFirstFailedAttemptUs < 0ll) {
+ mFirstFailedAttemptUs = ALooper::GetNowUs();
+ mRequestedRetransmission = false;
+ } else {
+ ALOGV("no packets available for %.2f secs",
+ (ALooper::GetNowUs() - mFirstFailedAttemptUs) / 1E6);
+ }
+
+ return NULL;
+ }
+
+ if (mLastDequeuedExtSeqNo < 0 || extSeqNo == mLastDequeuedExtSeqNo + 1) {
+ if (mRequestedRetransmission) {
+ ALOGI("Recovered after requesting retransmission of %d",
+ extSeqNo);
+ }
+
+ mLastDequeuedExtSeqNo = extSeqNo;
+ mFirstFailedAttemptUs = -1ll;
+ mRequestedRetransmission = false;
+
+ mPackets.erase(mPackets.begin());
+
+ mTotalBytesQueued -= buffer->size();
+
+ return buffer;
+ }
+
+ if (mFirstFailedAttemptUs < 0ll) {
+ mFirstFailedAttemptUs = ALooper::GetNowUs();
+
+ ALOGI("failed to get the correct packet the first time.");
+ return NULL;
+ }
+
+ if (mFirstFailedAttemptUs + 50000ll > ALooper::GetNowUs()) {
+ // We're willing to wait a little while to get the right packet.
+
+ if (!mRequestedRetransmission) {
+ ALOGI("requesting retransmission of seqNo %d",
+ (mLastDequeuedExtSeqNo + 1) & 0xffff);
+
+ sp<AMessage> notify = mNotifyLost->dup();
+ notify->setInt32("seqNo", (mLastDequeuedExtSeqNo + 1) & 0xffff);
+ notify->post();
+
+ mRequestedRetransmission = true;
+ } else {
+ ALOGI("still waiting for the correct packet to arrive.");
+ }
+
+ return NULL;
+ }
+
+ ALOGI("dropping packet. extSeqNo %d didn't arrive in time",
+ mLastDequeuedExtSeqNo + 1);
+
+ // Permanent failure, we never received the packet.
+ mLastDequeuedExtSeqNo = extSeqNo;
+ mFirstFailedAttemptUs = -1ll;
+ mRequestedRetransmission = false;
+
+ mTotalBytesQueued -= buffer->size();
+
+ mPackets.erase(mPackets.begin());
+
+ return buffer;
+}
+
+void TunnelRenderer::onMessageReceived(const sp<AMessage> &msg) {
+ switch (msg->what()) {
+ case kWhatQueueBuffer:
+ {
+ sp<ABuffer> buffer;
+ CHECK(msg->findBuffer("buffer", &buffer));
+
+ queueBuffer(buffer);
+
+ if (mStreamSource == NULL) {
+ if (mTotalBytesQueued > 0ll) {
+ initPlayer();
+ } else {
+ ALOGI("Have %lld bytes queued...", mTotalBytesQueued);
+ }
+ } else {
+ mStreamSource->doSomeWork();
+ }
+ break;
+ }
+
+ default:
+ TRESPASS();
+ }
+}
+
+void TunnelRenderer::initPlayer() {
+ if (mSurfaceTex == NULL) {
+ mComposerClient = new SurfaceComposerClient;
+ CHECK_EQ(mComposerClient->initCheck(), (status_t)OK);
+
+ DisplayInfo info;
+ SurfaceComposerClient::getDisplayInfo(0, &info);
+ ssize_t displayWidth = info.w;
+ ssize_t displayHeight = info.h;
+
+ mSurfaceControl =
+ mComposerClient->createSurface(
+ String8("A Surface"),
+ displayWidth,
+ displayHeight,
+ PIXEL_FORMAT_RGB_565,
+ 0);
+
+ CHECK(mSurfaceControl != NULL);
+ CHECK(mSurfaceControl->isValid());
+
+ SurfaceComposerClient::openGlobalTransaction();
+ CHECK_EQ(mSurfaceControl->setLayer(INT_MAX), (status_t)OK);
+ CHECK_EQ(mSurfaceControl->show(), (status_t)OK);
+ SurfaceComposerClient::closeGlobalTransaction();
+
+ mSurface = mSurfaceControl->getSurface();
+ CHECK(mSurface != NULL);
+ }
+
+ sp<IServiceManager> sm = defaultServiceManager();
+ sp<IBinder> binder = sm->getService(String16("media.player"));
+ sp<IMediaPlayerService> service = interface_cast<IMediaPlayerService>(binder);
+ CHECK(service.get() != NULL);
+
+ mStreamSource = new StreamSource(this);
+
+ mPlayerClient = new PlayerClient;
+
+ mPlayer = service->create(getpid(), mPlayerClient, 0);
+ CHECK(mPlayer != NULL);
+ CHECK_EQ(mPlayer->setDataSource(mStreamSource), (status_t)OK);
+
+ mPlayer->setVideoSurfaceTexture(
+ mSurfaceTex != NULL ? mSurfaceTex : mSurface->getSurfaceTexture());
+
+ mPlayer->start();
+}
+
+void TunnelRenderer::destroyPlayer() {
+ mStreamSource.clear();
+
+ mPlayer->stop();
+ mPlayer.clear();
+
+ if (mSurfaceTex == NULL) {
+ mSurface.clear();
+ mSurfaceControl.clear();
+
+ mComposerClient->dispose();
+ mComposerClient.clear();
+ }
+}
+
+} // namespace android
+
diff --git a/media/libstagefright/wifi-display/sink/TunnelRenderer.h b/media/libstagefright/wifi-display/sink/TunnelRenderer.h
new file mode 100644
index 0000000..c9597e0
--- /dev/null
+++ b/media/libstagefright/wifi-display/sink/TunnelRenderer.h
@@ -0,0 +1,84 @@
+/*
+ * Copyright 2012, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef TUNNEL_RENDERER_H_
+
+#define TUNNEL_RENDERER_H_
+
+#include <gui/Surface.h>
+#include <media/stagefright/foundation/AHandler.h>
+
+namespace android {
+
+struct ABuffer;
+struct SurfaceComposerClient;
+struct SurfaceControl;
+struct Surface;
+struct IMediaPlayer;
+struct IStreamListener;
+
+// This class reassembles incoming RTP packets into the correct order
+// and sends the resulting transport stream to a mediaplayer instance
+// for playback.
+struct TunnelRenderer : public AHandler {
+ TunnelRenderer(
+ const sp<AMessage> ¬ifyLost,
+ const sp<ISurfaceTexture> &surfaceTex);
+
+ sp<ABuffer> dequeueBuffer();
+
+ enum {
+ kWhatQueueBuffer,
+ };
+
+protected:
+ virtual void onMessageReceived(const sp<AMessage> &msg);
+ virtual ~TunnelRenderer();
+
+private:
+ struct PlayerClient;
+ struct StreamSource;
+
+ mutable Mutex mLock;
+
+ sp<AMessage> mNotifyLost;
+ sp<ISurfaceTexture> mSurfaceTex;
+
+ List<sp<ABuffer> > mPackets;
+ int64_t mTotalBytesQueued;
+
+ sp<SurfaceComposerClient> mComposerClient;
+ sp<SurfaceControl> mSurfaceControl;
+ sp<Surface> mSurface;
+ sp<PlayerClient> mPlayerClient;
+ sp<IMediaPlayer> mPlayer;
+ sp<StreamSource> mStreamSource;
+
+ int32_t mLastDequeuedExtSeqNo;
+ int64_t mFirstFailedAttemptUs;
+ bool mRequestedRetransmission;
+
+ void initPlayer();
+ void destroyPlayer();
+
+ void queueBuffer(const sp<ABuffer> &buffer);
+
+ DISALLOW_EVIL_CONSTRUCTORS(TunnelRenderer);
+};
+
+} // namespace android
+
+#endif // TUNNEL_RENDERER_H_
diff --git a/media/libstagefright/wifi-display/source/Converter.cpp b/media/libstagefright/wifi-display/source/Converter.cpp
index ee05e45..b8b8688 100644
--- a/media/libstagefright/wifi-display/source/Converter.cpp
+++ b/media/libstagefright/wifi-display/source/Converter.cpp
@@ -54,6 +54,10 @@
return mInitCheck;
}
+size_t Converter::getInputBufferCount() const {
+ return mEncoderInputBuffers.size();
+}
+
sp<AMessage> Converter::getOutputFormat() const {
return mOutputFormat;
}
diff --git a/media/libstagefright/wifi-display/source/Converter.h b/media/libstagefright/wifi-display/source/Converter.h
index 6700a32..67471c7 100644
--- a/media/libstagefright/wifi-display/source/Converter.h
+++ b/media/libstagefright/wifi-display/source/Converter.h
@@ -36,6 +36,8 @@
status_t initCheck() const;
+ size_t getInputBufferCount() const;
+
sp<AMessage> getOutputFormat() const;
void feedAccessUnit(const sp<ABuffer> &accessUnit);
diff --git a/media/libstagefright/wifi-display/source/PlaybackSession.cpp b/media/libstagefright/wifi-display/source/PlaybackSession.cpp
index f9223d6..6c01c7b 100644
--- a/media/libstagefright/wifi-display/source/PlaybackSession.cpp
+++ b/media/libstagefright/wifi-display/source/PlaybackSession.cpp
@@ -113,9 +113,11 @@
WifiDisplaySource::PlaybackSession::PlaybackSession(
const sp<ANetworkSession> &netSession,
- const sp<AMessage> ¬ify)
+ const sp<AMessage> ¬ify,
+ bool legacyMode)
: mNetSession(netSession),
mNotify(notify),
+ mLegacyMode(legacyMode),
mLastLifesignUs(),
mTSQueue(new ABuffer(12 + kMaxNumTSPacketsPerRTPPacket * 188)),
mPrevTimeUs(-1ll),
@@ -240,11 +242,6 @@
mPacketizer.clear();
- sp<IServiceManager> sm = defaultServiceManager();
- sp<IBinder> binder = sm->getService(String16("SurfaceFlinger"));
- sp<ISurfaceComposer> service = interface_cast<ISurfaceComposer>(binder);
- CHECK(service != NULL);
-
if (mSerializer != NULL) {
mSerializer->stop();
@@ -257,7 +254,14 @@
mSerializerLooper.clear();
}
- service->connectDisplay(NULL);
+ if (mLegacyMode) {
+ sp<IServiceManager> sm = defaultServiceManager();
+ sp<IBinder> binder = sm->getService(String16("SurfaceFlinger"));
+ sp<ISurfaceComposer> service = interface_cast<ISurfaceComposer>(binder);
+ CHECK(service != NULL);
+
+ service->connectDisplay(NULL);
+ }
if (mRTCPSessionID != 0) {
mNetSession->destroySession(mRTCPSessionID);
@@ -598,28 +602,7 @@
SurfaceComposerClient::getDisplayInfo(0, &info);
// sp<SurfaceMediaSource> source = new SurfaceMediaSource(info.w, info.h);
- sp<SurfaceMediaSource> source = new SurfaceMediaSource(720, 1280);
-
- sp<IServiceManager> sm = defaultServiceManager();
- sp<IBinder> binder = sm->getService(String16("SurfaceFlinger"));
- sp<ISurfaceComposer> service = interface_cast<ISurfaceComposer>(binder);
- CHECK(service != NULL);
-
- service->connectDisplay(source->getBufferQueue());
-
-#if 0
- {
- ALOGI("reading buffer");
-
- CHECK_EQ((status_t)OK, source->start());
- MediaBuffer *mbuf;
- CHECK_EQ((status_t)OK, source->read(&mbuf));
- mbuf->release();
- mbuf = NULL;
-
- ALOGI("got buffer");
- }
-#endif
+ sp<SurfaceMediaSource> source = new SurfaceMediaSource(width(), height());
#if 0
ssize_t index = mSerializer->addSource(source);
@@ -644,10 +627,29 @@
sp<Converter> converter =
new Converter(notify, mCodecLooper, format);
+ CHECK_EQ(converter->initCheck(), (status_t)OK);
+
+ size_t numInputBuffers = converter->getInputBufferCount();
+ ALOGI("numInputBuffers to the encoder is %d", numInputBuffers);
looper()->registerHandler(converter);
mTracks.add(index, new Track(converter));
+
+ sp<IServiceManager> sm = defaultServiceManager();
+ sp<IBinder> binder = sm->getService(String16("SurfaceFlinger"));
+ sp<ISurfaceComposer> service = interface_cast<ISurfaceComposer>(binder);
+ CHECK(service != NULL);
+
+ // Add one reference to account for the serializer.
+ err = source->setMaxAcquiredBufferCount(numInputBuffers + 1);
+ CHECK_EQ(err, (status_t)OK);
+
+ mBufferQueue = source->getBufferQueue();
+
+ if (mLegacyMode) {
+ service->connectDisplay(mBufferQueue);
+ }
#endif
#if 0
@@ -679,6 +681,18 @@
return OK;
}
+sp<ISurfaceTexture> WifiDisplaySource::PlaybackSession::getSurfaceTexture() {
+ return mBufferQueue;
+}
+
+int32_t WifiDisplaySource::PlaybackSession::width() const {
+ return 720;
+}
+
+int32_t WifiDisplaySource::PlaybackSession::height() const {
+ return 1280;
+}
+
void WifiDisplaySource::PlaybackSession::scheduleSendSR() {
if (mSendSRPending) {
return;
diff --git a/media/libstagefright/wifi-display/source/PlaybackSession.h b/media/libstagefright/wifi-display/source/PlaybackSession.h
index a6c9f27..5c228f6 100644
--- a/media/libstagefright/wifi-display/source/PlaybackSession.h
+++ b/media/libstagefright/wifi-display/source/PlaybackSession.h
@@ -23,6 +23,8 @@
namespace android {
struct ABuffer;
+struct BufferQueue;
+struct ISurfaceTexture;
struct Serializer;
struct TSPacketizer;
@@ -32,7 +34,9 @@
// display.
struct WifiDisplaySource::PlaybackSession : public AHandler {
PlaybackSession(
- const sp<ANetworkSession> &netSession, const sp<AMessage> ¬ify);
+ const sp<ANetworkSession> &netSession,
+ const sp<AMessage> ¬ify,
+ bool legacyMode);
status_t init(
const char *clientIP, int32_t clientRtp, int32_t clientRtcp,
@@ -46,6 +50,10 @@
status_t play();
status_t pause();
+ sp<ISurfaceTexture> getSurfaceTexture();
+ int32_t width() const;
+ int32_t height() const;
+
enum {
kWhatSessionDead,
kWhatBinaryData,
@@ -73,6 +81,7 @@
sp<ANetworkSession> mNetSession;
sp<AMessage> mNotify;
+ bool mLegacyMode;
int64_t mLastLifesignUs;
@@ -80,6 +89,7 @@
sp<Serializer> mSerializer;
sp<TSPacketizer> mPacketizer;
sp<ALooper> mCodecLooper;
+ sp<BufferQueue> mBufferQueue;
KeyedVector<size_t, sp<Track> > mTracks;
diff --git a/media/libstagefright/wifi-display/source/WifiDisplaySource.cpp b/media/libstagefright/wifi-display/source/WifiDisplaySource.cpp
index a998dcd..0786f2b 100644
--- a/media/libstagefright/wifi-display/source/WifiDisplaySource.cpp
+++ b/media/libstagefright/wifi-display/source/WifiDisplaySource.cpp
@@ -22,6 +22,9 @@
#include "PlaybackSession.h"
#include "ParsedMessage.h"
+#include <gui/ISurfaceTexture.h>
+
+#include <media/IRemoteDisplayClient.h>
#include <media/stagefright/foundation/ABuffer.h>
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/foundation/AMessage.h>
@@ -32,8 +35,11 @@
namespace android {
-WifiDisplaySource::WifiDisplaySource(const sp<ANetworkSession> &netSession)
+WifiDisplaySource::WifiDisplaySource(
+ const sp<ANetworkSession> &netSession,
+ const sp<IRemoteDisplayClient> &client)
: mNetSession(netSession),
+ mClient(client),
mSessionID(0),
mReaperPending(false),
mNextCSeq(1) {
@@ -201,6 +207,10 @@
mPlaybackSessions.removeItemsAt(i);
}
+ if (mClient != NULL) {
+ mClient->onDisplayDisconnected();
+ }
+
status_t err = OK;
sp<AMessage> response = new AMessage;
@@ -768,7 +778,8 @@
notify->setInt32("sessionID", sessionID);
sp<PlaybackSession> playbackSession =
- new PlaybackSession(mNetSession, notify);
+ new PlaybackSession(
+ mNetSession, notify, mClient == NULL /* legacyMode */);
looper()->registerHandler(playbackSession);
@@ -869,6 +880,14 @@
err = mNetSession->sendRequest(sessionID, response.c_str());
CHECK_EQ(err, (status_t)OK);
+
+ if (mClient != NULL) {
+ mClient->onDisplayConnected(
+ playbackSession->getSurfaceTexture(),
+ playbackSession->width(),
+ playbackSession->height(),
+ 0 /* flags */);
+ }
}
void WifiDisplaySource::onPauseRequest(
diff --git a/media/libstagefright/wifi-display/source/WifiDisplaySource.h b/media/libstagefright/wifi-display/source/WifiDisplaySource.h
index f56347d..99eb4f5 100644
--- a/media/libstagefright/wifi-display/source/WifiDisplaySource.h
+++ b/media/libstagefright/wifi-display/source/WifiDisplaySource.h
@@ -24,6 +24,7 @@
namespace android {
+struct IRemoteDisplayClient;
struct ParsedMessage;
// Represents the RTSP server acting as a wifi display source.
@@ -31,7 +32,9 @@
struct WifiDisplaySource : public AHandler {
static const unsigned kWifiDisplayDefaultPort = 7236;
- WifiDisplaySource(const sp<ANetworkSession> &netSession);
+ WifiDisplaySource(
+ const sp<ANetworkSession> &netSession,
+ const sp<IRemoteDisplayClient> &client);
status_t start(const char *iface);
status_t stop();
@@ -74,6 +77,7 @@
kPlaybackSessionTimeoutSecs * 1000000ll;
sp<ANetworkSession> mNetSession;
+ sp<IRemoteDisplayClient> mClient;
int32_t mSessionID;
struct ClientInfo {
diff --git a/media/libstagefright/wifi-display/wfd.cpp b/media/libstagefright/wifi-display/wfd.cpp
index 5e7d9fd..d886f14 100644
--- a/media/libstagefright/wifi-display/wfd.cpp
+++ b/media/libstagefright/wifi-display/wfd.cpp
@@ -18,11 +18,8 @@
#define LOG_TAG "wfd"
#include <utils/Log.h>
-#define SUPPORT_SINK 0
-
-#if SUPPORT_SINK
#include "sink/WifiDisplaySink.h"
-#endif
+#include "source/WifiDisplaySource.h"
#include <binder/ProcessState.h>
#include <binder/IServiceManager.h>
@@ -49,10 +46,8 @@
static void usage(const char *me) {
fprintf(stderr,
"usage:\n"
-#if SUPPORT_SINK
" %s -c host[:port]\tconnect to wifi source\n"
" -u uri \tconnect to an rtsp uri\n"
-#endif
" -e ip[:port] \tenable remote display\n"
" -d \tdisable remote display\n",
me);
@@ -72,7 +67,6 @@
int res;
while ((res = getopt(argc, argv, "hc:l:u:e:d")) >= 0) {
switch (res) {
-#if SUPPORT_SINK
case 'c':
{
const char *colonPos = strrchr(optarg, ':');
@@ -100,7 +94,6 @@
uri = optarg;
break;
}
-#endif
case 'e':
{
@@ -124,7 +117,6 @@
}
}
-#if SUPPORT_SINK
if (connectToPort < 0 && uri.empty()) {
fprintf(stderr,
"You need to select either source host or uri.\n");
@@ -154,7 +146,6 @@
}
looper->start(true /* runOnCallingThread */);
-#endif
return 0;
}
diff --git a/services/camera/libcameraservice/Android.mk b/services/camera/libcameraservice/Android.mk
index 1370c62..e27a065 100644
--- a/services/camera/libcameraservice/Android.mk
+++ b/services/camera/libcameraservice/Android.mk
@@ -14,8 +14,10 @@
camera2/CameraMetadata.cpp \
camera2/Parameters.cpp \
camera2/FrameProcessor.cpp \
- camera2/CaptureProcessor.cpp \
- camera2/CallbackProcessor.cpp
+ camera2/JpegProcessor.cpp \
+ camera2/CallbackProcessor.cpp \
+ camera2/ZslProcessor.cpp \
+ camera2/CaptureSequencer.cpp \
LOCAL_SHARED_LIBRARIES:= \
libui \
diff --git a/services/camera/libcameraservice/Camera2Client.cpp b/services/camera/libcameraservice/Camera2Client.cpp
index acd290d..5400604 100644
--- a/services/camera/libcameraservice/Camera2Client.cpp
+++ b/services/camera/libcameraservice/Camera2Client.cpp
@@ -59,12 +59,21 @@
mRecordingHeapCount(kDefaultRecordingHeapCount)
{
ATRACE_CALL();
- ALOGV("%s: Created client for camera %d", __FUNCTION__, cameraId);
+ ALOGI("Camera %d: Opened", cameraId);
mDevice = new Camera2Device(cameraId);
SharedParameters::Lock l(mParameters);
l.mParameters.state = Parameters::DISCONNECTED;
+
+ char value[PROPERTY_VALUE_MAX];
+ property_get("camera.zsl_mode", value, "0");
+ if (!strcmp(value,"1")) {
+ ALOGI("Camera %d: Enabling ZSL mode", cameraId);
+ l.mParameters.zslMode = true;
+ } else {
+ l.mParameters.zslMode = false;
+ }
}
status_t Camera2Client::checkPid(const char* checkLocation) const {
@@ -100,20 +109,32 @@
return NO_INIT;
}
- mFrameProcessor = new FrameProcessor(this);
- String8 frameThreadName = String8::format("Camera2Client[%d]::FrameProcessor",
- mCameraId);
- mFrameProcessor->run(frameThreadName.string());
+ String8 threadName;
- mCaptureProcessor = new CaptureProcessor(this);
- String8 captureThreadName =
- String8::format("Camera2Client[%d]::CaptureProcessor", mCameraId);
- mCaptureProcessor->run(captureThreadName.string());
+ mFrameProcessor = new FrameProcessor(this);
+ threadName = String8::format("Camera2Client[%d]::FrameProcessor",
+ mCameraId);
+ mFrameProcessor->run(threadName.string());
+
+ mCaptureSequencer = new CaptureSequencer(this);
+ threadName = String8::format("Camera2Client[%d]::CaptureSequencer",
+ mCameraId);
+ mCaptureSequencer->run(threadName.string());
+
+ mJpegProcessor = new JpegProcessor(this, mCaptureSequencer);
+ threadName = String8::format("Camera2Client[%d]::JpegProcessor",
+ mCameraId);
+ mJpegProcessor->run(threadName.string());
+
+ mZslProcessor = new ZslProcessor(this, mCaptureSequencer);
+ threadName = String8::format("Camera2Client[%d]::ZslProcessor",
+ mCameraId);
+ mZslProcessor->run(threadName.string());
mCallbackProcessor = new CallbackProcessor(this);
- String8 callbackThreadName =
- String8::format("Camera2Client[%d]::CallbackProcessor", mCameraId);
- mCallbackProcessor->run(callbackThreadName.string());
+ threadName = String8::format("Camera2Client[%d]::CallbackProcessor",
+ mCameraId);
+ mCallbackProcessor->run(threadName.string());
if (gLogLevel >= 1) {
ALOGD("%s: Default parameters converted from camera %d:", __FUNCTION__,
@@ -126,7 +147,7 @@
Camera2Client::~Camera2Client() {
ATRACE_CALL();
- ALOGV("%s: Camera %d: Shutting down client.", __FUNCTION__, mCameraId);
+ ALOGV("Camera %d: Shutting down", mCameraId);
mDestructionStarted = true;
@@ -135,7 +156,7 @@
disconnect();
mFrameProcessor->requestExit();
- ALOGV("%s: Camera %d: Shutdown complete", __FUNCTION__, mCameraId);
+ ALOGI("Camera %d: Closed", mCameraId);
}
status_t Camera2Client::dump(int fd, const Vector<String16>& args) {
@@ -299,10 +320,12 @@
p.videoStabilization ? "enabled" : "disabled");
result.append(" Current streams:\n");
- result.appendFormat(" Preview stream ID: %d\n", mPreviewStreamId);
+ result.appendFormat(" Preview stream ID: %d\n",
+ getPreviewStreamId());
result.appendFormat(" Capture stream ID: %d\n",
- mCaptureProcessor->getStreamId());
- result.appendFormat(" Recording stream ID: %d\n", mRecordingStreamId);
+ getCaptureStreamId());
+ result.appendFormat(" Recording stream ID: %d\n",
+ getRecordingStreamId());
result.append(" Current requests:\n");
if (mPreviewRequest.entryCount() != 0) {
@@ -314,15 +337,6 @@
write(fd, result.string(), result.size());
}
- if (mCaptureRequest.entryCount() != 0) {
- result = " Capture request:\n";
- write(fd, result.string(), result.size());
- mCaptureRequest.dump(fd, 2, 6);
- } else {
- result = " Capture request: undefined\n";
- write(fd, result.string(), result.size());
- }
-
if (mRecordingRequest.entryCount() != 0) {
result = " Recording request:\n";
write(fd, result.string(), result.size());
@@ -332,6 +346,8 @@
write(fd, result.string(), result.size());
}
+ mCaptureSequencer->dump(fd, args);
+
mFrameProcessor->dump(fd, args);
result = " Device dump:\n";
@@ -366,7 +382,7 @@
mPreviewStreamId = NO_STREAM;
}
- mCaptureProcessor->deleteStream();
+ mJpegProcessor->deleteStream();
if (mRecordingStreamId != NO_STREAM) {
mDevice->deleteStream(mRecordingStreamId);
@@ -623,6 +639,14 @@
return res;
}
}
+ if (params.zslMode) {
+ res = mZslProcessor->updateStream(params);
+ if (res != OK) {
+ ALOGE("%s: Camera %d: Unable to update ZSL stream: %s (%d)",
+ __FUNCTION__, mCameraId, strerror(-res), res);
+ return res;
+ }
+ }
if (mPreviewRequest.entryCount() == 0) {
res = updatePreviewRequest(params);
@@ -633,18 +657,20 @@
}
}
+ Vector<uint8_t> outputStreams;
+ outputStreams.push(getPreviewStreamId());
+
if (callbacksEnabled) {
- uint8_t outputStreams[2] =
- { mPreviewStreamId, mCallbackProcessor->getStreamId() };
- res = mPreviewRequest.update(
- ANDROID_REQUEST_OUTPUT_STREAMS,
- outputStreams, 2);
- } else {
- uint8_t outputStreams[1] = { mPreviewStreamId };
- res = mPreviewRequest.update(
- ANDROID_REQUEST_OUTPUT_STREAMS,
- outputStreams, 1);
+ outputStreams.push(getCallbackStreamId());
}
+ if (params.zslMode) {
+ outputStreams.push(getZslStreamId());
+ }
+
+ res = mPreviewRequest.update(
+ ANDROID_REQUEST_OUTPUT_STREAMS,
+ outputStreams);
+
if (res != OK) {
ALOGE("%s: Camera %d: Unable to set up preview request: %s (%d)",
__FUNCTION__, mCameraId, strerror(-res), res);
@@ -817,14 +843,19 @@
}
if (callbacksEnabled) {
- uint8_t outputStreams[3] =
- { mPreviewStreamId, mRecordingStreamId,
- mCallbackProcessor->getStreamId() };
+ uint8_t outputStreams[3] ={
+ getPreviewStreamId(),
+ getRecordingStreamId(),
+ getCallbackStreamId()
+ };
res = mRecordingRequest.update(
ANDROID_REQUEST_OUTPUT_STREAMS,
outputStreams, 3);
} else {
- uint8_t outputStreams[2] = { mPreviewStreamId, mRecordingStreamId };
+ uint8_t outputStreams[2] = {
+ getPreviewStreamId(),
+ getRecordingStreamId()
+ };
res = mRecordingRequest.update(
ANDROID_REQUEST_OUTPUT_STREAMS,
outputStreams, 2);
@@ -1020,8 +1051,18 @@
__FUNCTION__, mCameraId);
return INVALID_OPERATION;
case Parameters::PREVIEW:
- case Parameters::RECORD:
// Good to go for takePicture
+ res = commandStopFaceDetectionL(l.mParameters);
+ if (res != OK) {
+ ALOGE("%s: Camera %d: Unable to stop face detection for still capture",
+ __FUNCTION__, mCameraId);
+ return res;
+ }
+ l.mParameters.state = Parameters::STILL_CAPTURE;
+ break;
+ case Parameters::RECORD:
+ // Good to go for video snapshot
+ l.mParameters.state = Parameters::VIDEO_SNAPSHOT;
break;
case Parameters::STILL_CAPTURE:
case Parameters::VIDEO_SNAPSHOT:
@@ -1032,130 +1073,20 @@
ALOGV("%s: Camera %d: Starting picture capture", __FUNCTION__, mCameraId);
- res = mCaptureProcessor->updateStream(l.mParameters);
+ res = mJpegProcessor->updateStream(l.mParameters);
if (res != OK) {
ALOGE("%s: Camera %d: Can't set up still image stream: %s (%d)",
__FUNCTION__, mCameraId, strerror(-res), res);
return res;
}
- if (mCaptureRequest.entryCount() == 0) {
- res = updateCaptureRequest(l.mParameters);
- if (res != OK) {
- ALOGE("%s: Camera %d: Can't create still image capture request: "
- "%s (%d)", __FUNCTION__, mCameraId, strerror(-res), res);
- return res;
- }
- }
-
- bool callbacksEnabled = l.mParameters.previewCallbackFlags &
- CAMERA_FRAME_CALLBACK_FLAG_ENABLE_MASK;
- bool recordingEnabled = (l.mParameters.state == Parameters::RECORD);
-
- int captureStreamId = mCaptureProcessor->getStreamId();
-
- int streamSwitch = (callbacksEnabled ? 0x2 : 0x0) +
- (recordingEnabled ? 0x1 : 0x0);
- switch ( streamSwitch ) {
- case 0: { // No recording, callbacks
- uint8_t streamIds[2] = {
- mPreviewStreamId,
- captureStreamId
- };
- res = mCaptureRequest.update(ANDROID_REQUEST_OUTPUT_STREAMS,
- streamIds, 2);
- break;
- }
- case 1: { // Recording
- uint8_t streamIds[3] = {
- mPreviewStreamId,
- mRecordingStreamId,
- captureStreamId
- };
- res = mCaptureRequest.update(ANDROID_REQUEST_OUTPUT_STREAMS,
- streamIds, 3);
- break;
- }
- case 2: { // Callbacks
- uint8_t streamIds[3] = {
- mPreviewStreamId,
- mCallbackProcessor->getStreamId(),
- captureStreamId
- };
- res = mCaptureRequest.update(ANDROID_REQUEST_OUTPUT_STREAMS,
- streamIds, 3);
- break;
- }
- case 3: { // Both
- uint8_t streamIds[4] = {
- mPreviewStreamId,
- mCallbackProcessor->getStreamId(),
- mRecordingStreamId,
- captureStreamId
- };
- res = mCaptureRequest.update(ANDROID_REQUEST_OUTPUT_STREAMS,
- streamIds, 4);
- break;
- }
- };
+ res = mCaptureSequencer->startCapture();
if (res != OK) {
- ALOGE("%s: Camera %d: Unable to set up still image capture request: "
- "%s (%d)",
+ ALOGE("%s: Camera %d: Unable to start capture: %s (%d)",
__FUNCTION__, mCameraId, strerror(-res), res);
- return res;
- }
- res = mCaptureRequest.sort();
- if (res != OK) {
- ALOGE("%s: Camera %d: Unable to sort capture request: %s (%d)",
- __FUNCTION__, mCameraId, strerror(-res), res);
- return res;
}
- CameraMetadata captureCopy = mCaptureRequest;
- if (captureCopy.entryCount() == 0) {
- ALOGE("%s: Camera %d: Unable to copy capture request for HAL device",
- __FUNCTION__, mCameraId);
- return NO_MEMORY;
- }
-
- if (l.mParameters.state == Parameters::PREVIEW) {
- res = mDevice->clearStreamingRequest();
- if (res != OK) {
- ALOGE("%s: Camera %d: Unable to stop preview for still capture: "
- "%s (%d)",
- __FUNCTION__, mCameraId, strerror(-res), res);
- return res;
- }
- }
- // TODO: Capture should be atomic with setStreamingRequest here
- res = mDevice->capture(captureCopy);
- if (res != OK) {
- ALOGE("%s: Camera %d: Unable to submit still image capture request: "
- "%s (%d)",
- __FUNCTION__, mCameraId, strerror(-res), res);
- return res;
- }
-
- switch (l.mParameters.state) {
- case Parameters::PREVIEW:
- l.mParameters.state = Parameters::STILL_CAPTURE;
- res = commandStopFaceDetectionL(l.mParameters);
- if (res != OK) {
- ALOGE("%s: Camera %d: Unable to stop face detection for still capture",
- __FUNCTION__, mCameraId);
- return res;
- }
- break;
- case Parameters::RECORD:
- l.mParameters.state = Parameters::VIDEO_SNAPSHOT;
- break;
- default:
- ALOGE("%s: Camera %d: Unknown state for still capture!",
- __FUNCTION__, mCameraId);
- return INVALID_OPERATION;
- }
-
- return OK;
+ return res;
}
status_t Camera2Client::setParameters(const String8& params) {
@@ -1501,6 +1432,7 @@
void Camera2Client::notifyAutoExposure(uint8_t newState, int triggerId) {
ALOGV("%s: Autoexposure state now %d, last trigger %d",
__FUNCTION__, newState, triggerId);
+ mCaptureSequencer->notifyAutoExposure(newState, triggerId);
}
void Camera2Client::notifyAutoWhitebalance(uint8_t newState, int triggerId) {
@@ -1508,7 +1440,7 @@
__FUNCTION__, newState, triggerId);
}
-int Camera2Client::getCameraId() {
+int Camera2Client::getCameraId() const {
return mCameraId;
}
@@ -1520,6 +1452,35 @@
return mParameters;
}
+int Camera2Client::getPreviewStreamId() const {
+ return mPreviewStreamId;
+}
+
+int Camera2Client::getCaptureStreamId() const {
+ return mJpegProcessor->getStreamId();
+}
+
+int Camera2Client::getCallbackStreamId() const {
+ return mCallbackProcessor->getStreamId();
+}
+
+int Camera2Client::getRecordingStreamId() const {
+ return mRecordingStreamId;
+}
+
+int Camera2Client::getZslStreamId() const {
+ return mZslProcessor->getStreamId();
+}
+
+status_t Camera2Client::registerFrameListener(int32_t id,
+ wp<camera2::FrameProcessor::FilteredListener> listener) {
+ return mFrameProcessor->registerListener(id, listener);
+}
+
+status_t Camera2Client::removeFrameListener(int32_t id) {
+ return mFrameProcessor->removeListener(id);
+}
+
Camera2Client::SharedCameraClient::Lock::Lock(SharedCameraClient &client):
mCameraClient(client.mCameraClient),
mSharedClient(client) {
@@ -1546,6 +1507,10 @@
mCameraClient.clear();
}
+const int32_t Camera2Client::kPreviewRequestId;
+const int32_t Camera2Client::kRecordRequestId;
+const int32_t Camera2Client::kFirstCaptureRequestId;
+
void Camera2Client::onRecordingFrameAvailable() {
ATRACE_CALL();
status_t res;
@@ -1656,13 +1621,6 @@
__FUNCTION__, mCameraId, strerror(-res), res);
return res;
}
- res = updateCaptureRequest(params);
- if (res != OK) {
- ALOGE("%s: Camera %d: Unable to update capture request: %s (%d)",
- __FUNCTION__, mCameraId, strerror(-res), res);
- return res;
- }
-
res = updateRecordingRequest(params);
if (res != OK) {
ALOGE("%s: Camera %d: Unable to update recording request: %s (%d)",
@@ -1761,7 +1719,7 @@
}
}
- res = updateRequestCommon(&mPreviewRequest, params);
+ res = params.updateRequest(&mPreviewRequest);
if (res != OK) {
ALOGE("%s: Camera %d: Unable to update common entries of preview "
"request: %s (%d)", __FUNCTION__, mCameraId,
@@ -1769,65 +1727,8 @@
return res;
}
- return OK;
-}
-
-status_t Camera2Client::updateCaptureRequest(const Parameters ¶ms) {
- ATRACE_CALL();
- status_t res;
- if (mCaptureRequest.entryCount() == 0) {
- res = mDevice->createDefaultRequest(CAMERA2_TEMPLATE_STILL_CAPTURE,
- &mCaptureRequest);
- if (res != OK) {
- ALOGE("%s: Camera %d: Unable to create default still image request:"
- " %s (%d)", __FUNCTION__, mCameraId, strerror(-res), res);
- return res;
- }
- }
-
- res = updateRequestCommon(&mCaptureRequest, params);
- if (res != OK) {
- ALOGE("%s: Camera %d: Unable to update common entries of capture "
- "request: %s (%d)", __FUNCTION__, mCameraId,
- strerror(-res), res);
- return res;
- }
-
- res = mCaptureRequest.update(ANDROID_JPEG_THUMBNAIL_SIZE,
- params.jpegThumbSize, 2);
- if (res != OK) return res;
- res = mCaptureRequest.update(ANDROID_JPEG_THUMBNAIL_QUALITY,
- ¶ms.jpegThumbQuality, 1);
- if (res != OK) return res;
- res = mCaptureRequest.update(ANDROID_JPEG_QUALITY,
- ¶ms.jpegQuality, 1);
- if (res != OK) return res;
- res = mCaptureRequest.update(
- ANDROID_JPEG_ORIENTATION,
- ¶ms.jpegRotation, 1);
- if (res != OK) return res;
-
- if (params.gpsEnabled) {
- res = mCaptureRequest.update(
- ANDROID_JPEG_GPS_COORDINATES,
- params.gpsCoordinates, 3);
- if (res != OK) return res;
- res = mCaptureRequest.update(
- ANDROID_JPEG_GPS_TIMESTAMP,
- ¶ms.gpsTimestamp, 1);
- if (res != OK) return res;
- res = mCaptureRequest.update(
- ANDROID_JPEG_GPS_PROCESSING_METHOD,
- params.gpsProcessingMethod);
- if (res != OK) return res;
- } else {
- res = mCaptureRequest.erase(ANDROID_JPEG_GPS_COORDINATES);
- if (res != OK) return res;
- res = mCaptureRequest.erase(ANDROID_JPEG_GPS_TIMESTAMP);
- if (res != OK) return res;
- res = mCaptureRequest.erase(ANDROID_JPEG_GPS_PROCESSING_METHOD);
- if (res != OK) return res;
- }
+ res = mPreviewRequest.update(ANDROID_REQUEST_ID,
+ &kPreviewRequestId, 1);
return OK;
}
@@ -1845,7 +1746,7 @@
}
}
- res = updateRequestCommon(&mRecordingRequest, params);
+ res = params.updateRequest(&mRecordingRequest);
if (res != OK) {
ALOGE("%s: Camera %d: Unable to update common entries of recording "
"request: %s (%d)", __FUNCTION__, mCameraId,
@@ -1913,197 +1814,6 @@
return OK;
}
-status_t Camera2Client::updateRequestCommon(CameraMetadata *request,
- const Parameters ¶ms) const {
- ATRACE_CALL();
- status_t res;
- res = request->update(ANDROID_CONTROL_AE_TARGET_FPS_RANGE,
- params.previewFpsRange, 2);
- if (res != OK) return res;
-
- uint8_t wbMode = params.autoWhiteBalanceLock ?
- (uint8_t)ANDROID_CONTROL_AWB_LOCKED : params.wbMode;
- res = request->update(ANDROID_CONTROL_AWB_MODE,
- &wbMode, 1);
- if (res != OK) return res;
- res = request->update(ANDROID_CONTROL_EFFECT_MODE,
- ¶ms.effectMode, 1);
- if (res != OK) return res;
- res = request->update(ANDROID_CONTROL_AE_ANTIBANDING_MODE,
- ¶ms.antibandingMode, 1);
- if (res != OK) return res;
-
- uint8_t controlMode =
- (params.sceneMode == ANDROID_CONTROL_SCENE_MODE_UNSUPPORTED) ?
- ANDROID_CONTROL_AUTO : ANDROID_CONTROL_USE_SCENE_MODE;
- res = request->update(ANDROID_CONTROL_MODE,
- &controlMode, 1);
- if (res != OK) return res;
- if (controlMode == ANDROID_CONTROL_USE_SCENE_MODE) {
- res = request->update(ANDROID_CONTROL_SCENE_MODE,
- ¶ms.sceneMode, 1);
- if (res != OK) return res;
- }
-
- uint8_t flashMode = ANDROID_FLASH_OFF;
- uint8_t aeMode;
- switch (params.flashMode) {
- case Parameters::FLASH_MODE_OFF:
- aeMode = ANDROID_CONTROL_AE_ON; break;
- case Parameters::FLASH_MODE_AUTO:
- aeMode = ANDROID_CONTROL_AE_ON_AUTO_FLASH; break;
- case Parameters::FLASH_MODE_ON:
- aeMode = ANDROID_CONTROL_AE_ON_ALWAYS_FLASH; break;
- case Parameters::FLASH_MODE_TORCH:
- aeMode = ANDROID_CONTROL_AE_ON;
- flashMode = ANDROID_FLASH_TORCH;
- break;
- case Parameters::FLASH_MODE_RED_EYE:
- aeMode = ANDROID_CONTROL_AE_ON_AUTO_FLASH_REDEYE; break;
- default:
- ALOGE("%s: Camera %d: Unknown flash mode %d", __FUNCTION__,
- mCameraId, params.flashMode);
- return BAD_VALUE;
- }
- if (params.autoExposureLock) aeMode = ANDROID_CONTROL_AE_LOCKED;
-
- res = request->update(ANDROID_FLASH_MODE,
- &flashMode, 1);
- if (res != OK) return res;
- res = request->update(ANDROID_CONTROL_AE_MODE,
- &aeMode, 1);
- if (res != OK) return res;
-
- float focusDistance = 0; // infinity focus in diopters
- uint8_t focusMode;
- switch (params.focusMode) {
- case Parameters::FOCUS_MODE_AUTO:
- case Parameters::FOCUS_MODE_MACRO:
- case Parameters::FOCUS_MODE_CONTINUOUS_VIDEO:
- case Parameters::FOCUS_MODE_CONTINUOUS_PICTURE:
- case Parameters::FOCUS_MODE_EDOF:
- focusMode = params.focusMode;
- break;
- case Parameters::FOCUS_MODE_INFINITY:
- case Parameters::FOCUS_MODE_FIXED:
- focusMode = ANDROID_CONTROL_AF_OFF;
- break;
- default:
- ALOGE("%s: Camera %d: Unknown focus mode %d", __FUNCTION__,
- mCameraId, params.focusMode);
- return BAD_VALUE;
- }
- res = request->update(ANDROID_LENS_FOCUS_DISTANCE,
- &focusDistance, 1);
- if (res != OK) return res;
- res = request->update(ANDROID_CONTROL_AF_MODE,
- &focusMode, 1);
- if (res != OK) return res;
-
- size_t focusingAreasSize = params.focusingAreas.size() * 5;
- int32_t *focusingAreas = new int32_t[focusingAreasSize];
- for (size_t i = 0; i < focusingAreasSize; i += 5) {
- if (params.focusingAreas[i].weight != 0) {
- focusingAreas[i + 0] =
- params.normalizedXToArray(params.focusingAreas[i].left);
- focusingAreas[i + 1] =
- params.normalizedYToArray(params.focusingAreas[i].top);
- focusingAreas[i + 2] =
- params.normalizedXToArray(params.focusingAreas[i].right);
- focusingAreas[i + 3] =
- params.normalizedYToArray(params.focusingAreas[i].bottom);
- } else {
- focusingAreas[i + 0] = 0;
- focusingAreas[i + 1] = 0;
- focusingAreas[i + 2] = 0;
- focusingAreas[i + 3] = 0;
- }
- focusingAreas[i + 4] = params.focusingAreas[i].weight;
- }
- res = request->update(ANDROID_CONTROL_AF_REGIONS,
- focusingAreas,focusingAreasSize);
- if (res != OK) return res;
- delete[] focusingAreas;
-
- res = request->update(ANDROID_CONTROL_AE_EXP_COMPENSATION,
- ¶ms.exposureCompensation, 1);
- if (res != OK) return res;
-
- size_t meteringAreasSize = params.meteringAreas.size() * 5;
- int32_t *meteringAreas = new int32_t[meteringAreasSize];
- for (size_t i = 0; i < meteringAreasSize; i += 5) {
- if (params.meteringAreas[i].weight != 0) {
- meteringAreas[i + 0] =
- params.normalizedXToArray(params.meteringAreas[i].left);
- meteringAreas[i + 1] =
- params.normalizedYToArray(params.meteringAreas[i].top);
- meteringAreas[i + 2] =
- params.normalizedXToArray(params.meteringAreas[i].right);
- meteringAreas[i + 3] =
- params.normalizedYToArray(params.meteringAreas[i].bottom);
- } else {
- meteringAreas[i + 0] = 0;
- meteringAreas[i + 1] = 0;
- meteringAreas[i + 2] = 0;
- meteringAreas[i + 3] = 0;
- }
- meteringAreas[i + 4] = params.meteringAreas[i].weight;
- }
- res = request->update(ANDROID_CONTROL_AE_REGIONS,
- meteringAreas, meteringAreasSize);
- if (res != OK) return res;
-
- res = request->update(ANDROID_CONTROL_AWB_REGIONS,
- meteringAreas, meteringAreasSize);
- if (res != OK) return res;
- delete[] meteringAreas;
-
- // Need to convert zoom index into a crop rectangle. The rectangle is
- // chosen to maximize its area on the sensor
-
- camera_metadata_ro_entry_t maxDigitalZoom =
- mParameters.staticInfo(ANDROID_SCALER_AVAILABLE_MAX_ZOOM);
- float zoomIncrement = (maxDigitalZoom.data.f[0] - 1) /
- (params.NUM_ZOOM_STEPS-1);
- float zoomRatio = 1 + zoomIncrement * params.zoom;
-
- float zoomLeft, zoomTop, zoomWidth, zoomHeight;
- if (params.previewWidth >= params.previewHeight) {
- zoomWidth = params.fastInfo.arrayWidth / zoomRatio;
- zoomHeight = zoomWidth *
- params.previewHeight / params.previewWidth;
- } else {
- zoomHeight = params.fastInfo.arrayHeight / zoomRatio;
- zoomWidth = zoomHeight *
- params.previewWidth / params.previewHeight;
- }
- zoomLeft = (params.fastInfo.arrayWidth - zoomWidth) / 2;
- zoomTop = (params.fastInfo.arrayHeight - zoomHeight) / 2;
-
- int32_t cropRegion[3] = { zoomLeft, zoomTop, zoomWidth };
- res = request->update(ANDROID_SCALER_CROP_REGION,
- cropRegion, 3);
- if (res != OK) return res;
-
- // TODO: Decide how to map recordingHint, or whether just to ignore it
-
- uint8_t vstabMode = params.videoStabilization ?
- ANDROID_CONTROL_VIDEO_STABILIZATION_ON :
- ANDROID_CONTROL_VIDEO_STABILIZATION_OFF;
- res = request->update(ANDROID_CONTROL_VIDEO_STABILIZATION_MODE,
- &vstabMode, 1);
- if (res != OK) return res;
-
- uint8_t faceDetectMode = params.enableFaceDetect ?
- params.fastInfo.bestFaceDetectMode :
- (uint8_t)ANDROID_STATS_FACE_DETECTION_OFF;
- res = request->update(ANDROID_STATS_FACE_DETECT_MODE,
- &faceDetectMode, 1);
- if (res != OK) return res;
-
- return OK;
-}
-
size_t Camera2Client::calculateBufferSize(int width, int height,
int format, int stride) {
switch (format) {
diff --git a/services/camera/libcameraservice/Camera2Client.h b/services/camera/libcameraservice/Camera2Client.h
index b2fd636..df5dbf4 100644
--- a/services/camera/libcameraservice/Camera2Client.h
+++ b/services/camera/libcameraservice/Camera2Client.h
@@ -21,7 +21,9 @@
#include "CameraService.h"
#include "camera2/Parameters.h"
#include "camera2/FrameProcessor.h"
-#include "camera2/CaptureProcessor.h"
+#include "camera2/JpegProcessor.h"
+#include "camera2/ZslProcessor.h"
+#include "camera2/CaptureSequencer.h"
#include "camera2/CallbackProcessor.h"
#include <binder/MemoryBase.h>
#include <binder/MemoryHeapBase.h>
@@ -95,10 +97,20 @@
* Interface used by independent components of Camera2Client.
*/
- int getCameraId();
+ int getCameraId() const;
const sp<Camera2Device>& getCameraDevice();
camera2::SharedParameters& getParameters();
+ int getPreviewStreamId() const;
+ int getCaptureStreamId() const;
+ int getCallbackStreamId() const;
+ int getRecordingStreamId() const;
+ int getZslStreamId() const;
+
+ status_t registerFrameListener(int32_t id,
+ wp<camera2::FrameProcessor::FilteredListener> listener);
+ status_t removeFrameListener(int32_t id);
+
// Simple class to ensure that access to ICameraClient is serialized by
// requiring mCameraClientLock to be locked before access to mCameraClient
// is possible.
@@ -123,6 +135,10 @@
static size_t calculateBufferSize(int width, int height,
int format, int stride);
+ static const int32_t kPreviewRequestId = 1000;
+ static const int32_t kRecordRequestId = 2000;
+ static const int32_t kFirstCaptureRequestId = 3000;
+
private:
/** ICamera interface-related private members */
@@ -183,9 +199,9 @@
/* Still image capture related members */
- sp<camera2::CaptureProcessor> mCaptureProcessor;
- CameraMetadata mCaptureRequest;
- status_t updateCaptureRequest(const Parameters ¶ms);
+ sp<camera2::CaptureSequencer> mCaptureSequencer;
+ sp<camera2::JpegProcessor> mJpegProcessor;
+ sp<camera2::ZslProcessor> mZslProcessor;
/* Recording related members */
@@ -228,18 +244,6 @@
// Verify that caller is the owner of the camera
status_t checkPid(const char *checkLocation) const;
-
- // Update parameters all requests use, based on mParameters
- status_t updateRequestCommon(CameraMetadata *request, const Parameters ¶ms) const;
-
- // Map from sensor active array pixel coordinates to normalized camera
- // parameter coordinates. The former are (0,0)-(array width - 1, array height
- // - 1), the latter from (-1000,-1000)-(1000,1000)
- int normalizedXToArray(int x) const;
- int normalizedYToArray(int y) const;
- int arrayXToNormalized(int width) const;
- int arrayYToNormalized(int height) const;
-
};
}; // namespace android
diff --git a/services/camera/libcameraservice/Camera2Device.cpp b/services/camera/libcameraservice/Camera2Device.cpp
index f62c0a0..a171c46 100644
--- a/services/camera/libcameraservice/Camera2Device.cpp
+++ b/services/camera/libcameraservice/Camera2Device.cpp
@@ -206,6 +206,42 @@
return OK;
}
+status_t Camera2Device::createReprocessStreamFromStream(int outputId, int *id) {
+ status_t res;
+ ALOGV("%s: E", __FUNCTION__);
+
+ bool found = false;
+ StreamList::iterator streamI;
+ for (streamI = mStreams.begin();
+ streamI != mStreams.end(); streamI++) {
+ if ((*streamI)->getId() == outputId) {
+ found = true;
+ break;
+ }
+ }
+ if (!found) {
+ ALOGE("%s: Camera %d: Output stream %d doesn't exist; can't create "
+ "reprocess stream from it!", __FUNCTION__, mId, outputId);
+ return BAD_VALUE;
+ }
+
+ sp<ReprocessStreamAdapter> stream = new ReprocessStreamAdapter(mDevice);
+
+ res = stream->connectToDevice((*streamI));
+ if (res != OK) {
+ ALOGE("%s: Camera %d: Unable to create reprocessing stream from "\
+ "stream %d: %s (%d)", __FUNCTION__, mId, outputId,
+ strerror(-res), res);
+ return res;
+ }
+
+ *id = stream->getId();
+
+ mReprocessStreams.push_back(stream);
+ return OK;
+}
+
+
status_t Camera2Device::getStreamInfo(int id,
uint32_t *width, uint32_t *height, uint32_t *format) {
ALOGV("%s: E", __FUNCTION__);
@@ -277,6 +313,33 @@
return OK;
}
+status_t Camera2Device::deleteReprocessStream(int id) {
+ ALOGV("%s: E", __FUNCTION__);
+ bool found = false;
+ for (ReprocessStreamList::iterator streamI = mReprocessStreams.begin();
+ streamI != mReprocessStreams.end(); streamI++) {
+ if ((*streamI)->getId() == id) {
+ status_t res = (*streamI)->release();
+ if (res != OK) {
+ ALOGE("%s: Unable to release reprocess stream %d from "
+ "HAL device: %s (%d)", __FUNCTION__, id,
+ strerror(-res), res);
+ return res;
+ }
+ mReprocessStreams.erase(streamI);
+ found = true;
+ break;
+ }
+ }
+ if (!found) {
+ ALOGE("%s: Camera %d: Unable to find stream %d to delete",
+ __FUNCTION__, mId, id);
+ return BAD_VALUE;
+ }
+ return OK;
+}
+
+
status_t Camera2Device::createDefaultRequest(int templateId,
CameraMetadata *request) {
status_t err;
@@ -405,6 +468,32 @@
return res;
}
+status_t Camera2Device::pushReprocessBuffer(int reprocessStreamId,
+ buffer_handle_t *buffer, wp<BufferReleasedListener> listener) {
+ ALOGV("%s: E", __FUNCTION__);
+ bool found = false;
+ status_t res = OK;
+ for (ReprocessStreamList::iterator streamI = mReprocessStreams.begin();
+ streamI != mReprocessStreams.end(); streamI++) {
+ if ((*streamI)->getId() == reprocessStreamId) {
+ res = (*streamI)->pushIntoStream(buffer, listener);
+ if (res != OK) {
+ ALOGE("%s: Unable to push buffer to reprocess stream %d: %s (%d)",
+ __FUNCTION__, reprocessStreamId, strerror(-res), res);
+ return res;
+ }
+ found = true;
+ break;
+ }
+ }
+ if (!found) {
+ ALOGE("%s: Camera %d: Unable to find reprocess stream %d",
+ __FUNCTION__, mId, reprocessStreamId);
+ res = BAD_VALUE;
+ }
+ return res;
+}
+
/**
* Camera2Device::NotificationListener
*/
@@ -903,7 +992,7 @@
}
buffers[bufferIdx] = anwBuffers[bufferIdx]->handle;
- ALOGV("%s: Buffer %p allocated", __FUNCTION__, (void*)(buffers[bufferIdx]));
+ ALOGV("%s: Buffer %p allocated", __FUNCTION__, (void*)buffers[bufferIdx]);
}
ALOGV("%s: Registering %d buffers with camera HAL", __FUNCTION__, mTotalBuffers);
@@ -1030,7 +1119,7 @@
const_cast<StreamAdapter*>(static_cast<const StreamAdapter*>(w));
stream->mFrameCount++;
ALOGVV("Stream %d enqueue: Frame %d (%p) captured at %lld ns",
- stream->mId, mFrameCount, (void*)(*buffer), timestamp);
+ stream->mId, stream->mFrameCount, (void*)(*buffer), timestamp);
int state = stream->mState;
if (state != ACTIVE) {
ALOGE("%s: Called when in bad state: %d", __FUNCTION__, state);
@@ -1094,5 +1183,198 @@
return native_window_set_crop(a, &crop);
}
+/**
+ * Camera2Device::ReprocessStreamAdapter
+ */
+
+#ifndef container_of
+#define container_of(ptr, type, member) \
+ (type *)((char*)(ptr) - offsetof(type, member))
+#endif
+
+Camera2Device::ReprocessStreamAdapter::ReprocessStreamAdapter(camera2_device_t *d):
+ mState(RELEASED),
+ mDevice(d),
+ mId(-1),
+ mWidth(0), mHeight(0), mFormat(0),
+ mActiveBuffers(0),
+ mFrameCount(0)
+{
+ camera2_stream_in_ops::acquire_buffer = acquire_buffer;
+ camera2_stream_in_ops::release_buffer = release_buffer;
+}
+
+Camera2Device::ReprocessStreamAdapter::~ReprocessStreamAdapter() {
+ if (mState != RELEASED) {
+ release();
+ }
+}
+
+status_t Camera2Device::ReprocessStreamAdapter::connectToDevice(
+ const sp<StreamAdapter> &outputStream) {
+ status_t res;
+ ALOGV("%s: E", __FUNCTION__);
+
+ if (mState != RELEASED) return INVALID_OPERATION;
+ if (outputStream == NULL) {
+ ALOGE("%s: Null base stream passed to reprocess stream adapter",
+ __FUNCTION__);
+ return BAD_VALUE;
+ }
+
+ mBaseStream = outputStream;
+ mWidth = outputStream->getWidth();
+ mHeight = outputStream->getHeight();
+ mFormat = outputStream->getFormat();
+
+ ALOGV("%s: New reprocess stream parameters %d x %d, format 0x%x",
+ __FUNCTION__, mWidth, mHeight, mFormat);
+
+ // Allocate device-side stream interface
+
+ uint32_t id;
+ res = mDevice->ops->allocate_reprocess_stream_from_stream(mDevice,
+ outputStream->getId(), getStreamOps(),
+ &id);
+ if (res != OK) {
+ ALOGE("%s: Device reprocess stream allocation failed: %s (%d)",
+ __FUNCTION__, strerror(-res), res);
+ return res;
+ }
+
+ ALOGV("%s: Allocated reprocess stream id %d based on stream %d",
+ __FUNCTION__, id, outputStream->getId());
+
+ mId = id;
+
+ mState = ACTIVE;
+
+ return OK;
+}
+
+status_t Camera2Device::ReprocessStreamAdapter::release() {
+ status_t res;
+ ALOGV("%s: Releasing stream %d", __FUNCTION__, mId);
+ if (mState >= ACTIVE) {
+ res = mDevice->ops->release_reprocess_stream(mDevice, mId);
+ if (res != OK) {
+ ALOGE("%s: Unable to release stream %d",
+ __FUNCTION__, mId);
+ return res;
+ }
+ }
+
+ List<QueueEntry>::iterator s;
+ for (s = mQueue.begin(); s != mQueue.end(); s++) {
+ sp<BufferReleasedListener> listener = s->releaseListener.promote();
+ if (listener != 0) listener->onBufferReleased(s->handle);
+ }
+ for (s = mInFlightQueue.begin(); s != mInFlightQueue.end(); s++) {
+ sp<BufferReleasedListener> listener = s->releaseListener.promote();
+ if (listener != 0) listener->onBufferReleased(s->handle);
+ }
+ mQueue.clear();
+ mInFlightQueue.clear();
+
+ mState = RELEASED;
+ return OK;
+}
+
+status_t Camera2Device::ReprocessStreamAdapter::pushIntoStream(
+ buffer_handle_t *handle, const wp<BufferReleasedListener> &releaseListener) {
+ // TODO: Some error checking here would be nice
+ ALOGV("%s: Pushing buffer %p to stream", __FUNCTION__, (void*)(*handle));
+
+ QueueEntry entry;
+ entry.handle = handle;
+ entry.releaseListener = releaseListener;
+ mQueue.push_back(entry);
+ return OK;
+}
+
+status_t Camera2Device::ReprocessStreamAdapter::dump(int fd,
+ const Vector<String16>& args) {
+ String8 result =
+ String8::format(" Reprocess stream %d: %d x %d, fmt 0x%x\n",
+ mId, mWidth, mHeight, mFormat);
+ result.appendFormat(" acquired buffers: %d\n",
+ mActiveBuffers);
+ result.appendFormat(" frame count: %d\n",
+ mFrameCount);
+ write(fd, result.string(), result.size());
+ return OK;
+}
+
+const camera2_stream_in_ops *Camera2Device::ReprocessStreamAdapter::getStreamOps() {
+ return static_cast<camera2_stream_in_ops *>(this);
+}
+
+int Camera2Device::ReprocessStreamAdapter::acquire_buffer(
+ const camera2_stream_in_ops_t *w,
+ buffer_handle_t** buffer) {
+ int res;
+ ReprocessStreamAdapter* stream =
+ const_cast<ReprocessStreamAdapter*>(
+ static_cast<const ReprocessStreamAdapter*>(w));
+ if (stream->mState != ACTIVE) {
+ ALOGE("%s: Called when in bad state: %d", __FUNCTION__, stream->mState);
+ return INVALID_OPERATION;
+ }
+
+ if (stream->mQueue.empty()) {
+ *buffer = NULL;
+ return OK;
+ }
+
+ QueueEntry &entry = *(stream->mQueue.begin());
+
+ *buffer = entry.handle;
+
+ stream->mInFlightQueue.push_back(entry);
+ stream->mQueue.erase(stream->mQueue.begin());
+
+ stream->mActiveBuffers++;
+
+ ALOGV("Stream %d acquire: Buffer %p acquired", stream->mId,
+ (void*)(**buffer));
+ return OK;
+}
+
+int Camera2Device::ReprocessStreamAdapter::release_buffer(
+ const camera2_stream_in_ops_t* w,
+ buffer_handle_t* buffer) {
+ ReprocessStreamAdapter *stream =
+ const_cast<ReprocessStreamAdapter*>(
+ static_cast<const ReprocessStreamAdapter*>(w) );
+ stream->mFrameCount++;
+ ALOGV("Reprocess stream %d release: Frame %d (%p)",
+ stream->mId, stream->mFrameCount, (void*)*buffer);
+ int state = stream->mState;
+ if (state != ACTIVE) {
+ ALOGE("%s: Called when in bad state: %d", __FUNCTION__, state);
+ return INVALID_OPERATION;
+ }
+ stream->mActiveBuffers--;
+
+ List<QueueEntry>::iterator s;
+ for (s = stream->mInFlightQueue.begin(); s != stream->mInFlightQueue.end(); s++) {
+ if ( s->handle == buffer ) break;
+ }
+ if (s == stream->mInFlightQueue.end()) {
+ ALOGE("%s: Can't find buffer %p in in-flight list!", __FUNCTION__,
+ buffer);
+ return INVALID_OPERATION;
+ }
+
+ sp<BufferReleasedListener> listener = s->releaseListener.promote();
+ if (listener != 0) {
+ listener->onBufferReleased(s->handle);
+ } else {
+ ALOGE("%s: Can't free buffer - missing listener", __FUNCTION__);
+ }
+ stream->mInFlightQueue.erase(s);
+
+ return OK;
+}
}; // namespace android
diff --git a/services/camera/libcameraservice/Camera2Device.h b/services/camera/libcameraservice/Camera2Device.h
index 64f4608..a327d8d 100644
--- a/services/camera/libcameraservice/Camera2Device.h
+++ b/services/camera/libcameraservice/Camera2Device.h
@@ -80,6 +80,12 @@
int *id);
/**
+ * Create an input reprocess stream that uses buffers from an existing
+ * output stream.
+ */
+ status_t createReprocessStreamFromStream(int outputId, int *id);
+
+ /**
* Get information about a given stream.
*/
status_t getStreamInfo(int id,
@@ -97,6 +103,12 @@
status_t deleteStream(int id);
/**
+ * Delete reprocess stream. Must not be called if there are requests in
+ * flight which reference that stream.
+ */
+ status_t deleteReprocessStream(int id);
+
+ /**
* Create a metadata buffer with fields that the HAL device believes are
* best for the given use case
*/
@@ -163,6 +175,21 @@
*/
status_t triggerPrecaptureMetering(uint32_t id);
+ /**
+ * Abstract interface for clients that want to listen to reprocess buffer
+ * release events
+ */
+ struct BufferReleasedListener: public virtual RefBase {
+ virtual void onBufferReleased(buffer_handle_t *handle) = 0;
+ };
+
+ /**
+ * Push a buffer to be reprocessed into a reprocessing stream, and
+ * provide a listener to call once the buffer is returned by the HAL
+ */
+ status_t pushReprocessBuffer(int reprocessStreamId,
+ buffer_handle_t *buffer, wp<BufferReleasedListener> listener);
+
private:
const int mId;
@@ -343,6 +370,86 @@
typedef List<sp<StreamAdapter> > StreamList;
StreamList mStreams;
+ /**
+ * Adapter from an ANativeWindow interface to camera2 device stream ops.
+ * Also takes care of allocating/deallocating stream in device interface
+ */
+ class ReprocessStreamAdapter: public camera2_stream_in_ops, public virtual RefBase {
+ public:
+ ReprocessStreamAdapter(camera2_device_t *d);
+
+ ~ReprocessStreamAdapter();
+
+ /**
+ * Create a HAL device reprocess stream based on an existing output stream.
+ */
+ status_t connectToDevice(const sp<StreamAdapter> &outputStream);
+
+ status_t release();
+
+ /**
+ * Push buffer into stream for reprocessing. Takes ownership until it notifies
+ * that the buffer has been released
+ */
+ status_t pushIntoStream(buffer_handle_t *handle,
+ const wp<BufferReleasedListener> &releaseListener);
+
+ /**
+ * Get stream parameters.
+ * Only valid after a successful connectToDevice call.
+ */
+ int getId() const { return mId; }
+ uint32_t getWidth() const { return mWidth; }
+ uint32_t getHeight() const { return mHeight; }
+ uint32_t getFormat() const { return mFormat; }
+
+ // Dump stream information
+ status_t dump(int fd, const Vector<String16>& args);
+
+ private:
+ enum {
+ ERROR = -1,
+ RELEASED = 0,
+ ACTIVE
+ } mState;
+
+ sp<ANativeWindow> mConsumerInterface;
+ wp<StreamAdapter> mBaseStream;
+
+ struct QueueEntry {
+ buffer_handle_t *handle;
+ wp<BufferReleasedListener> releaseListener;
+ };
+
+ List<QueueEntry> mQueue;
+
+ List<QueueEntry> mInFlightQueue;
+
+ camera2_device_t *mDevice;
+
+ uint32_t mId;
+ uint32_t mWidth;
+ uint32_t mHeight;
+ uint32_t mFormat;
+
+ /** Debugging information */
+ uint32_t mActiveBuffers;
+ uint32_t mFrameCount;
+ int64_t mLastTimestamp;
+
+ const camera2_stream_in_ops *getStreamOps();
+
+ static int acquire_buffer(const camera2_stream_in_ops_t *w,
+ buffer_handle_t** buffer);
+
+ static int release_buffer(const camera2_stream_in_ops_t* w,
+ buffer_handle_t* buffer);
+
+ }; // class ReprocessStreamAdapter
+
+ typedef List<sp<ReprocessStreamAdapter> > ReprocessStreamList;
+ ReprocessStreamList mReprocessStreams;
+
// Receives HAL notifications and routes them to the NotificationListener
static void notificationCallback(int32_t msg_type,
int32_t ext1,
diff --git a/services/camera/libcameraservice/camera2/CallbackProcessor.cpp b/services/camera/libcameraservice/camera2/CallbackProcessor.cpp
index 854b890..bccb18e 100644
--- a/services/camera/libcameraservice/camera2/CallbackProcessor.cpp
+++ b/services/camera/libcameraservice/camera2/CallbackProcessor.cpp
@@ -136,7 +136,7 @@
return mCallbackStreamId;
}
-void CallbackProcessor::dump(int fd, const Vector<String16>& args) {
+void CallbackProcessor::dump(int fd, const Vector<String16>& args) const {
}
bool CallbackProcessor::threadLoop() {
diff --git a/services/camera/libcameraservice/camera2/CallbackProcessor.h b/services/camera/libcameraservice/camera2/CallbackProcessor.h
index 36c51a3..c2a1372 100644
--- a/services/camera/libcameraservice/camera2/CallbackProcessor.h
+++ b/services/camera/libcameraservice/camera2/CallbackProcessor.h
@@ -48,7 +48,7 @@
status_t deleteStream();
int getStreamId() const;
- void dump(int fd, const Vector<String16>& args);
+ void dump(int fd, const Vector<String16>& args) const;
private:
static const nsecs_t kWaitDuration = 10000000; // 10 ms
wp<Camera2Client> mClient;
diff --git a/services/camera/libcameraservice/camera2/CameraMetadata.cpp b/services/camera/libcameraservice/camera2/CameraMetadata.cpp
index 95377b2..8399e20 100644
--- a/services/camera/libcameraservice/camera2/CameraMetadata.cpp
+++ b/services/camera/libcameraservice/camera2/CameraMetadata.cpp
@@ -84,6 +84,10 @@
get_camera_metadata_entry_count(mBuffer);
}
+bool CameraMetadata::isEmpty() const {
+ return entryCount() == 0;
+}
+
status_t CameraMetadata::sort() {
return sort_camera_metadata(mBuffer);
}
diff --git a/services/camera/libcameraservice/camera2/CameraMetadata.h b/services/camera/libcameraservice/camera2/CameraMetadata.h
index 340414e..aee6cd7 100644
--- a/services/camera/libcameraservice/camera2/CameraMetadata.h
+++ b/services/camera/libcameraservice/camera2/CameraMetadata.h
@@ -87,6 +87,11 @@
size_t entryCount() const;
/**
+ * Is the buffer empty (no entires)
+ */
+ bool isEmpty() const;
+
+ /**
* Sort metadata buffer for faster find
*/
status_t sort();
diff --git a/services/camera/libcameraservice/camera2/CaptureSequencer.cpp b/services/camera/libcameraservice/camera2/CaptureSequencer.cpp
new file mode 100644
index 0000000..532d2aa
--- /dev/null
+++ b/services/camera/libcameraservice/camera2/CaptureSequencer.cpp
@@ -0,0 +1,506 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "Camera2Client::CaptureSequencer"
+#define ATRACE_TAG ATRACE_TAG_CAMERA
+//#define LOG_NDEBUG 0
+
+#include <utils/Log.h>
+#include <utils/Trace.h>
+#include <utils/Vector.h>
+
+#include "CaptureSequencer.h"
+#include "../Camera2Device.h"
+#include "../Camera2Client.h"
+#include "Parameters.h"
+
+namespace android {
+namespace camera2 {
+
+/** Public members */
+
+CaptureSequencer::CaptureSequencer(wp<Camera2Client> client):
+ Thread(false),
+ mStartCapture(false),
+ mBusy(false),
+ mNewAEState(false),
+ mNewFrameReceived(false),
+ mNewCaptureReceived(false),
+ mClient(client),
+ mCaptureState(IDLE),
+ mTriggerId(0),
+ mTimeoutCount(0),
+ mCaptureId(Camera2Client::kFirstCaptureRequestId) {
+}
+
+CaptureSequencer::~CaptureSequencer() {
+ ALOGV("%s: Exit", __FUNCTION__);
+}
+
+void CaptureSequencer::setZslProcessor(wp<ZslProcessor> processor) {
+ Mutex::Autolock l(mInputMutex);
+ mZslProcessor = processor;
+}
+
+status_t CaptureSequencer::startCapture() {
+ ATRACE_CALL();
+ Mutex::Autolock l(mInputMutex);
+ if (mBusy) {
+ ALOGE("%s: Already busy capturing!", __FUNCTION__);
+ return INVALID_OPERATION;
+ }
+ if (!mStartCapture) {
+ mStartCapture = true;
+ mStartCaptureSignal.signal();
+ }
+ return OK;
+}
+
+void CaptureSequencer::notifyAutoExposure(uint8_t newState, int triggerId) {
+ ATRACE_CALL();
+ Mutex::Autolock l(mInputMutex);
+ mAEState = newState;
+ mAETriggerId = triggerId;
+ if (!mNewAEState) {
+ mNewAEState = true;
+ mNewNotifySignal.signal();
+ }
+}
+
+void CaptureSequencer::onFrameAvailable(int32_t frameId,
+ CameraMetadata &frame) {
+ ATRACE_CALL();
+ Mutex::Autolock l(mInputMutex);
+ mNewFrameId = frameId;
+ mNewFrame.acquire(frame);
+ if (!mNewFrameReceived) {
+ mNewFrameReceived = true;
+ mNewFrameSignal.signal();
+ }
+}
+
+void CaptureSequencer::onCaptureAvailable(nsecs_t timestamp) {
+ ATRACE_CALL();
+ Mutex::Autolock l(mInputMutex);
+ mCaptureTimestamp = timestamp;
+ if (!mNewCaptureReceived) {
+ mNewCaptureReceived = true;
+ mNewCaptureSignal.signal();
+ }
+}
+
+
+void CaptureSequencer::dump(int fd, const Vector<String16>& args) {
+ String8 result;
+ if (mCaptureRequest.entryCount() != 0) {
+ result = " Capture request:\n";
+ write(fd, result.string(), result.size());
+ mCaptureRequest.dump(fd, 2, 6);
+ } else {
+ result = " Capture request: undefined\n";
+ write(fd, result.string(), result.size());
+ }
+ result = String8::format(" Current capture state: %s\n",
+ kStateNames[mCaptureState]);
+ result.append(" Latest captured frame:\n");
+ write(fd, result.string(), result.size());
+ mNewFrame.dump(fd, 2, 6);
+}
+
+/** Private members */
+
+const char* CaptureSequencer::kStateNames[CaptureSequencer::NUM_CAPTURE_STATES+1] =
+{
+ "IDLE",
+ "START",
+ "ZSL_START",
+ "ZSL_WAITING",
+ "ZSL_REPROCESSING",
+ "STANDARD_START",
+ "STANDARD_PRECAPTURE",
+ "STANDARD_CAPTURING",
+ "DONE",
+ "ERROR",
+ "UNKNOWN"
+};
+
+const CaptureSequencer::StateManager
+ CaptureSequencer::kStateManagers[CaptureSequencer::NUM_CAPTURE_STATES-1] = {
+ &CaptureSequencer::manageIdle,
+ &CaptureSequencer::manageStart,
+ &CaptureSequencer::manageZslStart,
+ &CaptureSequencer::manageZslWaiting,
+ &CaptureSequencer::manageZslReprocessing,
+ &CaptureSequencer::manageStandardStart,
+ &CaptureSequencer::manageStandardPrecaptureWait,
+ &CaptureSequencer::manageStandardCapture,
+ &CaptureSequencer::manageStandardCaptureWait,
+ &CaptureSequencer::manageDone,
+};
+
+bool CaptureSequencer::threadLoop() {
+ status_t res;
+
+ sp<Camera2Client> client = mClient.promote();
+ if (client == 0) return false;
+
+ if (mCaptureState < ERROR) {
+ mCaptureState = (this->*kStateManagers[mCaptureState])(client);
+ } else {
+ ALOGE("%s: Bad capture state: %s",
+ __FUNCTION__, kStateNames[mCaptureState]);
+ return false;
+ }
+
+ return true;
+}
+
+CaptureSequencer::CaptureState CaptureSequencer::manageIdle(sp<Camera2Client> &client) {
+ status_t res;
+ ATRACE_CALL();
+ Mutex::Autolock l(mInputMutex);
+ while (!mStartCapture) {
+ res = mStartCaptureSignal.waitRelative(mInputMutex,
+ kWaitDuration);
+ if (res == TIMED_OUT) break;
+ }
+ if (mStartCapture) {
+ mStartCapture = false;
+ mBusy = true;
+ return START;
+ }
+ return IDLE;
+}
+
+CaptureSequencer::CaptureState CaptureSequencer::manageDone(sp<Camera2Client> &client) {
+ status_t res;
+ ATRACE_CALL();
+ mCaptureId++;
+
+ {
+ Mutex::Autolock l(mInputMutex);
+ mBusy = false;
+ }
+
+ SharedParameters::Lock l(client->getParameters());
+ switch (l.mParameters.state) {
+ case Parameters::STILL_CAPTURE:
+ l.mParameters.state = Parameters::STOPPED;
+ break;
+ case Parameters::VIDEO_SNAPSHOT:
+ l.mParameters.state = Parameters::RECORD;
+ break;
+ default:
+ ALOGE("%s: Camera %d: Still image produced unexpectedly "
+ "in state %s!",
+ __FUNCTION__, client->getCameraId(),
+ Parameters::getStateName(l.mParameters.state));
+ }
+
+ return IDLE;
+}
+
+CaptureSequencer::CaptureState CaptureSequencer::manageStart(
+ sp<Camera2Client> &client) {
+ status_t res;
+ ATRACE_CALL();
+ SharedParameters::Lock l(client->getParameters());
+ CaptureState nextState = DONE;
+
+ res = updateCaptureRequest(l.mParameters, client);
+ if (res != OK ) {
+ ALOGE("%s: Camera %d: Can't update still image capture request: %s (%d)",
+ __FUNCTION__, client->getCameraId(), strerror(-res), res);
+ return DONE;
+ }
+
+ if (l.mParameters.zslMode &&
+ l.mParameters.state == Parameters::STILL_CAPTURE) {
+ nextState = ZSL_START;
+ } else {
+ nextState = STANDARD_START;
+ }
+
+ return nextState;
+}
+
+CaptureSequencer::CaptureState CaptureSequencer::manageZslStart(
+ sp<Camera2Client> &client) {
+ status_t res;
+ sp<ZslProcessor> processor = mZslProcessor.promote();
+ if (processor == 0) {
+ ALOGE("%s: No ZSL queue to use!", __FUNCTION__);
+ return DONE;
+ }
+
+ client->registerFrameListener(mCaptureId,
+ this);
+
+ res = client->getCameraDevice()->clearStreamingRequest();
+ if (res != OK) {
+ ALOGE("%s: Camera %d: Unable to stop preview for ZSL capture: "
+ "%s (%d)",
+ __FUNCTION__, client->getCameraId(), strerror(-res), res);
+ return DONE;
+ }
+ // TODO: Actually select the right thing here.
+ processor->pushToReprocess(mCaptureId);
+
+ mTimeoutCount = kMaxTimeoutsForCaptureEnd;
+ return STANDARD_CAPTURE_WAIT;
+}
+
+CaptureSequencer::CaptureState CaptureSequencer::manageZslWaiting(
+ sp<Camera2Client> &client) {
+ return DONE;
+}
+
+CaptureSequencer::CaptureState CaptureSequencer::manageZslReprocessing(
+ sp<Camera2Client> &client) {
+ return START;
+}
+
+CaptureSequencer::CaptureState CaptureSequencer::manageStandardStart(
+ sp<Camera2Client> &client) {
+ ATRACE_CALL();
+ client->registerFrameListener(mCaptureId,
+ this);
+ {
+ SharedParameters::Lock l(client->getParameters());
+ mTriggerId = l.mParameters.precaptureTriggerCounter++;
+ }
+ client->getCameraDevice()->triggerPrecaptureMetering(mTriggerId);
+
+ mAeInPrecapture = false;
+ mTimeoutCount = kMaxTimeoutsForPrecaptureStart;
+ return STANDARD_PRECAPTURE_WAIT;
+}
+
+CaptureSequencer::CaptureState CaptureSequencer::manageStandardPrecaptureWait(
+ sp<Camera2Client> &client) {
+ status_t res;
+ ATRACE_CALL();
+ Mutex::Autolock l(mInputMutex);
+ while (!mNewAEState) {
+ res = mNewNotifySignal.waitRelative(mInputMutex, kWaitDuration);
+ if (res == TIMED_OUT) {
+ mTimeoutCount--;
+ break;
+ }
+ }
+ if (mTimeoutCount <= 0) {
+ ALOGW("Timed out waiting for precapture %s",
+ mAeInPrecapture ? "end" : "start");
+ return STANDARD_CAPTURE;
+ }
+ if (mNewAEState) {
+ if (!mAeInPrecapture) {
+ // Waiting to see PRECAPTURE state
+ if (mAETriggerId == mTriggerId &&
+ mAEState == ANDROID_CONTROL_AE_STATE_PRECAPTURE) {
+ ALOGV("%s: Got precapture start", __FUNCTION__);
+ mAeInPrecapture = true;
+ mTimeoutCount = kMaxTimeoutsForPrecaptureEnd;
+ }
+ } else {
+ // Waiting to see PRECAPTURE state end
+ if (mAETriggerId == mTriggerId &&
+ mAEState != ANDROID_CONTROL_AE_STATE_PRECAPTURE) {
+ ALOGV("%s: Got precapture end", __FUNCTION__);
+ return STANDARD_CAPTURE;
+ }
+ }
+ mNewAEState = false;
+ }
+ return STANDARD_PRECAPTURE_WAIT;
+}
+
+CaptureSequencer::CaptureState CaptureSequencer::manageStandardCapture(
+ sp<Camera2Client> &client) {
+ status_t res;
+ ATRACE_CALL();
+ SharedParameters::Lock l(client->getParameters());
+ Vector<uint8_t> outputStreams;
+
+ outputStreams.push(client->getPreviewStreamId());
+ outputStreams.push(client->getCaptureStreamId());
+
+ if (l.mParameters.previewCallbackFlags &
+ CAMERA_FRAME_CALLBACK_FLAG_ENABLE_MASK) {
+ outputStreams.push(client->getCallbackStreamId());
+ }
+
+ if (l.mParameters.state == Parameters::VIDEO_SNAPSHOT) {
+ outputStreams.push(client->getRecordingStreamId());
+ }
+
+ res = mCaptureRequest.update(ANDROID_REQUEST_OUTPUT_STREAMS,
+ outputStreams);
+ if (res == OK) {
+ res = mCaptureRequest.update(ANDROID_REQUEST_ID,
+ &mCaptureId, 1);
+ }
+ if (res == OK) {
+ res = mCaptureRequest.sort();
+ }
+
+ if (res != OK) {
+ ALOGE("%s: Camera %d: Unable to set up still capture request: %s (%d)",
+ __FUNCTION__, client->getCameraId(), strerror(-res), res);
+ return DONE;
+ }
+
+ CameraMetadata captureCopy = mCaptureRequest;
+ if (captureCopy.entryCount() == 0) {
+ ALOGE("%s: Camera %d: Unable to copy capture request for HAL device",
+ __FUNCTION__, client->getCameraId());
+ return DONE;
+ }
+
+ if (l.mParameters.state == Parameters::STILL_CAPTURE) {
+ res = client->getCameraDevice()->clearStreamingRequest();
+ if (res != OK) {
+ ALOGE("%s: Camera %d: Unable to stop preview for still capture: "
+ "%s (%d)",
+ __FUNCTION__, client->getCameraId(), strerror(-res), res);
+ return DONE;
+ }
+ }
+ // TODO: Capture should be atomic with setStreamingRequest here
+ res = client->getCameraDevice()->capture(captureCopy);
+ if (res != OK) {
+ ALOGE("%s: Camera %d: Unable to submit still image capture request: "
+ "%s (%d)",
+ __FUNCTION__, client->getCameraId(), strerror(-res), res);
+ return DONE;
+ }
+
+ mTimeoutCount = kMaxTimeoutsForCaptureEnd;
+ return STANDARD_CAPTURE_WAIT;
+}
+
+CaptureSequencer::CaptureState CaptureSequencer::manageStandardCaptureWait(
+ sp<Camera2Client> &client) {
+ status_t res;
+ ATRACE_CALL();
+ Mutex::Autolock l(mInputMutex);
+ while (!mNewFrameReceived) {
+ res = mNewFrameSignal.waitRelative(mInputMutex, kWaitDuration);
+ if (res == TIMED_OUT) {
+ mTimeoutCount--;
+ break;
+ }
+ }
+ while (!mNewCaptureReceived) {
+ res = mNewCaptureSignal.waitRelative(mInputMutex, kWaitDuration);
+ if (res == TIMED_OUT) {
+ mTimeoutCount--;
+ break;
+ }
+ }
+ if (mTimeoutCount <= 0) {
+ ALOGW("Timed out waiting for capture to complete");
+ return DONE;
+ }
+ if (mNewFrameReceived && mNewCaptureReceived) {
+ if (mNewFrameId != mCaptureId) {
+ ALOGW("Mismatched capture frame IDs: Expected %d, got %d",
+ mCaptureId, mNewFrameId);
+ }
+ camera_metadata_entry_t entry;
+ entry = mNewFrame.find(ANDROID_SENSOR_TIMESTAMP);
+ if (entry.count == 0) {
+ ALOGE("No timestamp field in capture frame!");
+ }
+ if (entry.data.i64[0] != mCaptureTimestamp) {
+ ALOGW("Mismatched capture timestamps: Metadata frame %lld,"
+ " captured buffer %lld", entry.data.i64[0], mCaptureTimestamp);
+ }
+ client->removeFrameListener(mCaptureId);
+
+ mNewFrameReceived = false;
+ mNewCaptureReceived = false;
+ return DONE;
+ }
+ return STANDARD_CAPTURE_WAIT;
+}
+
+status_t CaptureSequencer::updateCaptureRequest(const Parameters ¶ms,
+ sp<Camera2Client> &client) {
+ ATRACE_CALL();
+ status_t res;
+ if (mCaptureRequest.entryCount() == 0) {
+ res = client->getCameraDevice()->createDefaultRequest(
+ CAMERA2_TEMPLATE_STILL_CAPTURE,
+ &mCaptureRequest);
+ if (res != OK) {
+ ALOGE("%s: Camera %d: Unable to create default still image request:"
+ " %s (%d)", __FUNCTION__, client->getCameraId(),
+ strerror(-res), res);
+ return res;
+ }
+ }
+
+ res = params.updateRequest(&mCaptureRequest);
+ if (res != OK) {
+ ALOGE("%s: Camera %d: Unable to update common entries of capture "
+ "request: %s (%d)", __FUNCTION__, client->getCameraId(),
+ strerror(-res), res);
+ return res;
+ }
+
+ res = mCaptureRequest.update(ANDROID_JPEG_THUMBNAIL_SIZE,
+ params.jpegThumbSize, 2);
+ if (res != OK) return res;
+ res = mCaptureRequest.update(ANDROID_JPEG_THUMBNAIL_QUALITY,
+ ¶ms.jpegThumbQuality, 1);
+ if (res != OK) return res;
+ res = mCaptureRequest.update(ANDROID_JPEG_QUALITY,
+ ¶ms.jpegQuality, 1);
+ if (res != OK) return res;
+ res = mCaptureRequest.update(
+ ANDROID_JPEG_ORIENTATION,
+ ¶ms.jpegRotation, 1);
+ if (res != OK) return res;
+
+ if (params.gpsEnabled) {
+ res = mCaptureRequest.update(
+ ANDROID_JPEG_GPS_COORDINATES,
+ params.gpsCoordinates, 3);
+ if (res != OK) return res;
+ res = mCaptureRequest.update(
+ ANDROID_JPEG_GPS_TIMESTAMP,
+ ¶ms.gpsTimestamp, 1);
+ if (res != OK) return res;
+ res = mCaptureRequest.update(
+ ANDROID_JPEG_GPS_PROCESSING_METHOD,
+ params.gpsProcessingMethod);
+ if (res != OK) return res;
+ } else {
+ res = mCaptureRequest.erase(ANDROID_JPEG_GPS_COORDINATES);
+ if (res != OK) return res;
+ res = mCaptureRequest.erase(ANDROID_JPEG_GPS_TIMESTAMP);
+ if (res != OK) return res;
+ res = mCaptureRequest.erase(ANDROID_JPEG_GPS_PROCESSING_METHOD);
+ if (res != OK) return res;
+ }
+
+ return OK;
+}
+
+
+}; // namespace camera2
+}; // namespace android
diff --git a/services/camera/libcameraservice/camera2/CaptureSequencer.h b/services/camera/libcameraservice/camera2/CaptureSequencer.h
new file mode 100644
index 0000000..0492a43
--- /dev/null
+++ b/services/camera/libcameraservice/camera2/CaptureSequencer.h
@@ -0,0 +1,154 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_SERVERS_CAMERA_CAMERA2_CAPTURESEQUENCER_H
+#define ANDROID_SERVERS_CAMERA_CAMERA2_CAPTURESEQUENCER_H
+
+#include <utils/Thread.h>
+#include <utils/String16.h>
+#include <utils/Vector.h>
+#include <utils/Mutex.h>
+#include <utils/Condition.h>
+#include "CameraMetadata.h"
+#include "Parameters.h"
+#include "FrameProcessor.h"
+
+namespace android {
+
+class Camera2Client;
+
+namespace camera2 {
+
+class ZslProcessor;
+
+/**
+ * Manages the still image capture process for
+ * zero-shutter-lag, regular, and video snapshots.
+ */
+class CaptureSequencer:
+ virtual public Thread,
+ virtual public FrameProcessor::FilteredListener {
+ public:
+ CaptureSequencer(wp<Camera2Client> client);
+ ~CaptureSequencer();
+
+ // Get reference to the ZslProcessor, which holds the ZSL buffers and frames
+ void setZslProcessor(wp<ZslProcessor> processor);
+
+ // Begin still image capture
+ status_t startCapture();
+
+ // Notifications about AE state changes
+ void notifyAutoExposure(uint8_t newState, int triggerId);
+
+ // Notifications from the frame processor
+ virtual void onFrameAvailable(int32_t frameId, CameraMetadata &frame);
+
+ // Notifications from the capture processor
+ void onCaptureAvailable(nsecs_t timestamp);
+
+ void dump(int fd, const Vector<String16>& args);
+
+ private:
+ /**
+ * Accessed by other threads
+ */
+ Mutex mInputMutex;
+
+ bool mStartCapture;
+ bool mBusy;
+ Condition mStartCaptureSignal;
+
+ bool mNewAEState;
+ uint8_t mAEState;
+ int mAETriggerId;
+ Condition mNewNotifySignal;
+
+ bool mNewFrameReceived;
+ int32_t mNewFrameId;
+ CameraMetadata mNewFrame;
+ Condition mNewFrameSignal;
+
+ bool mNewCaptureReceived;
+ nsecs_t mCaptureTimestamp;
+ Condition mNewCaptureSignal;
+
+ /**
+ * Internal to CaptureSequencer
+ */
+ static const nsecs_t kWaitDuration = 100000000; // 100 ms
+ static const int kMaxTimeoutsForPrecaptureStart = 2; // 200 ms
+ static const int kMaxTimeoutsForPrecaptureEnd = 10; // 1 sec
+ static const int kMaxTimeoutsForCaptureEnd = 20; // 2 sec
+
+ wp<Camera2Client> mClient;
+ wp<ZslProcessor> mZslProcessor;
+
+ enum CaptureState {
+ IDLE,
+ START,
+ ZSL_START,
+ ZSL_WAITING,
+ ZSL_REPROCESSING,
+ STANDARD_START,
+ STANDARD_PRECAPTURE_WAIT,
+ STANDARD_CAPTURE,
+ STANDARD_CAPTURE_WAIT,
+ DONE,
+ ERROR,
+ NUM_CAPTURE_STATES
+ } mCaptureState;
+ static const char* kStateNames[];
+
+ typedef CaptureState (CaptureSequencer::*StateManager)(sp<Camera2Client> &client);
+ static const StateManager kStateManagers[];
+
+ CameraMetadata mCaptureRequest;
+
+ int mTriggerId;
+ int mTimeoutCount;
+ bool mAeInPrecapture;
+
+ int32_t mCaptureId;
+
+ // Main internal methods
+
+ virtual bool threadLoop();
+
+ CaptureState manageIdle(sp<Camera2Client> &client);
+ CaptureState manageStart(sp<Camera2Client> &client);
+
+ CaptureState manageZslStart(sp<Camera2Client> &client);
+ CaptureState manageZslWaiting(sp<Camera2Client> &client);
+ CaptureState manageZslReprocessing(sp<Camera2Client> &client);
+
+ CaptureState manageStandardStart(sp<Camera2Client> &client);
+ CaptureState manageStandardPrecaptureWait(sp<Camera2Client> &client);
+ CaptureState manageStandardCapture(sp<Camera2Client> &client);
+ CaptureState manageStandardCaptureWait(sp<Camera2Client> &client);
+
+ CaptureState manageDone(sp<Camera2Client> &client);
+
+ // Utility methods
+
+ status_t updateCaptureRequest(const Parameters ¶ms,
+ sp<Camera2Client> &client);
+};
+
+}; // namespace camera2
+}; // namespace android
+
+#endif
diff --git a/services/camera/libcameraservice/camera2/FrameProcessor.cpp b/services/camera/libcameraservice/camera2/FrameProcessor.cpp
index 5059754..e24db0b 100644
--- a/services/camera/libcameraservice/camera2/FrameProcessor.cpp
+++ b/services/camera/libcameraservice/camera2/FrameProcessor.cpp
@@ -36,6 +36,19 @@
ALOGV("%s: Exit", __FUNCTION__);
}
+status_t FrameProcessor::registerListener(int32_t id,
+ wp<FilteredListener> listener) {
+ Mutex::Autolock l(mInputMutex);
+ ALOGV("%s: Registering listener for frame id %d",
+ __FUNCTION__, id);
+ return mListeners.replaceValueFor(id, listener);
+}
+
+status_t FrameProcessor::removeListener(int32_t id) {
+ Mutex::Autolock l(mInputMutex);
+ return mListeners.removeItem(id);
+}
+
void FrameProcessor::dump(int fd, const Vector<String16>& args) {
String8 result(" Latest received frame:\n");
write(fd, result.string(), result.size());
@@ -50,6 +63,7 @@
sp<Camera2Client> client = mClient.promote();
if (client == 0) return false;
device = client->getCameraDevice();
+ if (device == 0) return false;
}
res = device->waitForNextFrame(kWaitDuration);
@@ -67,20 +81,28 @@
void FrameProcessor::processNewFrames(sp<Camera2Client> &client) {
status_t res;
+ ATRACE_CALL();
CameraMetadata frame;
while ( (res = client->getCameraDevice()->getNextFrame(&frame)) == OK) {
camera_metadata_entry_t entry;
+
entry = frame.find(ANDROID_REQUEST_FRAME_COUNT);
if (entry.count == 0) {
- ALOGE("%s: Camera %d: Error reading frame number: %s (%d)",
- __FUNCTION__, client->getCameraId(), strerror(-res), res);
+ ALOGE("%s: Camera %d: Error reading frame number",
+ __FUNCTION__, client->getCameraId());
break;
}
res = processFaceDetect(frame, client);
if (res != OK) break;
- mLastFrame.acquire(frame);
+ // Must be last - listener can take ownership of frame
+ res = processListener(frame, client);
+ if (res != OK) break;
+
+ if (!frame.isEmpty()) {
+ mLastFrame.acquire(frame);
+ }
}
if (res != NOT_ENOUGH_DATA) {
ALOGE("%s: Camera %d: Error getting next frame: %s (%d)",
@@ -91,9 +113,43 @@
return;
}
-status_t FrameProcessor::processFaceDetect(
- const CameraMetadata &frame, sp<Camera2Client> &client) {
+status_t FrameProcessor::processListener(CameraMetadata &frame,
+ sp<Camera2Client> &client) {
status_t res;
+ ATRACE_CALL();
+ camera_metadata_entry_t entry;
+
+ entry = frame.find(ANDROID_REQUEST_ID);
+ if (entry.count == 0) {
+ ALOGE("%s: Camera %d: Error reading frame id",
+ __FUNCTION__, client->getCameraId());
+ return BAD_VALUE;
+ }
+ int32_t frameId = entry.data.i32[0];
+ ALOGV("%s: Got frame with ID %d", __FUNCTION__, frameId);
+
+ sp<FilteredListener> listener;
+ {
+ Mutex::Autolock l(mInputMutex);
+ ssize_t listenerIndex = mListeners.indexOfKey(frameId);
+ if (listenerIndex != NAME_NOT_FOUND) {
+ listener = mListeners[listenerIndex].promote();
+ if (listener == 0) {
+ mListeners.removeItemsAt(listenerIndex, 1);
+ }
+ }
+ }
+
+ if (listener != 0) {
+ listener->onFrameAvailable(frameId, frame);
+ }
+ return OK;
+}
+
+status_t FrameProcessor::processFaceDetect(const CameraMetadata &frame,
+ sp<Camera2Client> &client) {
+ status_t res;
+ ATRACE_CALL();
camera_metadata_ro_entry_t entry;
bool enableFaceDetect;
int maxFaces;
@@ -209,6 +265,5 @@
return OK;
}
-
}; // namespace camera2
}; // namespace android
diff --git a/services/camera/libcameraservice/camera2/FrameProcessor.h b/services/camera/libcameraservice/camera2/FrameProcessor.h
index 2cdf7f0..25d489a 100644
--- a/services/camera/libcameraservice/camera2/FrameProcessor.h
+++ b/services/camera/libcameraservice/camera2/FrameProcessor.h
@@ -20,6 +20,7 @@
#include <utils/Thread.h>
#include <utils/String16.h>
#include <utils/Vector.h>
+#include <utils/KeyedVector.h>
#include "CameraMetadata.h"
namespace android {
@@ -36,6 +37,17 @@
FrameProcessor(wp<Camera2Client> client);
~FrameProcessor();
+ struct FilteredListener: virtual public RefBase {
+ // Listener may take ownership of frame
+ virtual void onFrameAvailable(int32_t frameId, CameraMetadata &frame) = 0;
+ };
+
+ // Register a listener for a specific frame ID (android.request.id).
+ // De-registers any existing listeners for that ID
+ status_t registerListener(int32_t id, wp<FilteredListener> listener);
+
+ status_t removeListener(int32_t id);
+
void dump(int fd, const Vector<String16>& args);
private:
static const nsecs_t kWaitDuration = 10000000; // 10 ms
@@ -43,10 +55,17 @@
virtual bool threadLoop();
+ Mutex mInputMutex;
+ KeyedVector<int32_t, wp<FilteredListener> > mListeners;
+
void processNewFrames(sp<Camera2Client> &client);
+
status_t processFaceDetect(const CameraMetadata &frame,
sp<Camera2Client> &client);
+ status_t processListener(CameraMetadata &frame,
+ sp<Camera2Client> &client);
+
CameraMetadata mLastFrame;
};
diff --git a/services/camera/libcameraservice/camera2/CaptureProcessor.cpp b/services/camera/libcameraservice/camera2/JpegProcessor.cpp
similarity index 90%
rename from services/camera/libcameraservice/camera2/CaptureProcessor.cpp
rename to services/camera/libcameraservice/camera2/JpegProcessor.cpp
index b17f9d2..92148ca 100644
--- a/services/camera/libcameraservice/camera2/CaptureProcessor.cpp
+++ b/services/camera/libcameraservice/camera2/JpegProcessor.cpp
@@ -14,14 +14,14 @@
* limitations under the License.
*/
-#define LOG_TAG "Camera2Client::CaptureProcessor"
+#define LOG_TAG "Camera2Client::JpegProcessor"
#define ATRACE_TAG ATRACE_TAG_CAMERA
//#define LOG_NDEBUG 0
#include <utils/Log.h>
#include <utils/Trace.h>
-#include "CaptureProcessor.h"
+#include "JpegProcessor.h"
#include <gui/SurfaceTextureClient.h>
#include "../Camera2Device.h"
#include "../Camera2Client.h"
@@ -30,18 +30,21 @@
namespace android {
namespace camera2 {
-CaptureProcessor::CaptureProcessor(wp<Camera2Client> client):
+JpegProcessor::JpegProcessor(
+ wp<Camera2Client> client,
+ wp<CaptureSequencer> sequencer):
Thread(false),
mClient(client),
+ mSequencer(sequencer),
mCaptureAvailable(false),
mCaptureStreamId(NO_STREAM) {
}
-CaptureProcessor::~CaptureProcessor() {
+JpegProcessor::~JpegProcessor() {
ALOGV("%s: Exit", __FUNCTION__);
}
-void CaptureProcessor::onFrameAvailable() {
+void JpegProcessor::onFrameAvailable() {
Mutex::Autolock l(mInputMutex);
if (!mCaptureAvailable) {
mCaptureAvailable = true;
@@ -49,7 +52,7 @@
}
}
-status_t CaptureProcessor::updateStream(const Parameters ¶ms) {
+status_t JpegProcessor::updateStream(const Parameters ¶ms) {
ATRACE_CALL();
ALOGV("%s", __FUNCTION__);
status_t res;
@@ -127,7 +130,7 @@
return OK;
}
-status_t CaptureProcessor::deleteStream() {
+status_t JpegProcessor::deleteStream() {
ATRACE_CALL();
status_t res;
@@ -144,15 +147,15 @@
return OK;
}
-int CaptureProcessor::getStreamId() const {
+int JpegProcessor::getStreamId() const {
Mutex::Autolock l(mInputMutex);
return mCaptureStreamId;
}
-void CaptureProcessor::dump(int fd, const Vector<String16>& args) {
+void JpegProcessor::dump(int fd, const Vector<String16>& args) const {
}
-bool CaptureProcessor::threadLoop() {
+bool JpegProcessor::threadLoop() {
status_t res;
{
@@ -174,7 +177,7 @@
return true;
}
-status_t CaptureProcessor::processNewCapture(sp<Camera2Client> &client) {
+status_t JpegProcessor::processNewCapture(sp<Camera2Client> &client) {
ATRACE_CALL();
status_t res;
sp<Camera2Heap> captureHeap;
@@ -200,10 +203,7 @@
switch (l.mParameters.state) {
case Parameters::STILL_CAPTURE:
- l.mParameters.state = Parameters::STOPPED;
- break;
case Parameters::VIDEO_SNAPSHOT:
- l.mParameters.state = Parameters::RECORD;
break;
default:
ALOGE("%s: Camera %d: Still image produced unexpectedly "
@@ -224,6 +224,11 @@
return OK;
}
+ sp<CaptureSequencer> sequencer = mSequencer.promote();
+ if (sequencer != 0) {
+ sequencer->onCaptureAvailable(imgBuffer.timestamp);
+ }
+
// TODO: Optimize this to avoid memcopy
void* captureMemory = mCaptureHeap->mHeap->getBase();
size_t size = mCaptureHeap->mHeap->getSize();
diff --git a/services/camera/libcameraservice/camera2/CaptureProcessor.h b/services/camera/libcameraservice/camera2/JpegProcessor.h
similarity index 83%
rename from services/camera/libcameraservice/camera2/CaptureProcessor.h
rename to services/camera/libcameraservice/camera2/JpegProcessor.h
index 8e35739..6e7a860 100644
--- a/services/camera/libcameraservice/camera2/CaptureProcessor.h
+++ b/services/camera/libcameraservice/camera2/JpegProcessor.h
@@ -14,8 +14,8 @@
* limitations under the License.
*/
-#ifndef ANDROID_SERVERS_CAMERA_CAMERA2_CAPTUREPROCESSOR_H
-#define ANDROID_SERVERS_CAMERA_CAMERA2_CAPTUREPROCESSOR_H
+#ifndef ANDROID_SERVERS_CAMERA_CAMERA2_JPEGPROCESSOR_H
+#define ANDROID_SERVERS_CAMERA_CAMERA2_JPEGPROCESSOR_H
#include <utils/Thread.h>
#include <utils/String16.h>
@@ -33,14 +33,16 @@
namespace camera2 {
+class CaptureSequencer;
+
/***
* Still image capture output image processing
*/
-class CaptureProcessor:
+class JpegProcessor:
public Thread, public CpuConsumer::FrameAvailableListener {
public:
- CaptureProcessor(wp<Camera2Client> client);
- ~CaptureProcessor();
+ JpegProcessor(wp<Camera2Client> client, wp<CaptureSequencer> sequencer);
+ ~JpegProcessor();
void onFrameAvailable();
@@ -48,10 +50,11 @@
status_t deleteStream();
int getStreamId() const;
- void dump(int fd, const Vector<String16>& args);
+ void dump(int fd, const Vector<String16>& args) const;
private:
static const nsecs_t kWaitDuration = 10000000; // 10 ms
wp<Camera2Client> mClient;
+ wp<CaptureSequencer> mSequencer;
mutable Mutex mInputMutex;
bool mCaptureAvailable;
diff --git a/services/camera/libcameraservice/camera2/Parameters.cpp b/services/camera/libcameraservice/camera2/Parameters.cpp
index 2f7d023..f89d1e3 100644
--- a/services/camera/libcameraservice/camera2/Parameters.cpp
+++ b/services/camera/libcameraservice/camera2/Parameters.cpp
@@ -18,6 +18,9 @@
#define ATRACE_TAG ATRACE_TAG_CAMERA
//#define LOG_NDEBUG 0
+#include <utils/Log.h>
+#include <utils/Trace.h>
+
#include <math.h>
#include <stdlib.h>
@@ -738,9 +741,11 @@
enableFaceDetect = false;
enableFocusMoveMessages = false;
- afTriggerCounter = 0;
+ afTriggerCounter = 1;
currentAfTriggerId = -1;
+ precaptureTriggerCounter = 1;
+
previewCallbackFlags = 0;
state = STOPPED;
@@ -1318,6 +1323,209 @@
return OK;
}
+status_t Parameters::updateRequest(CameraMetadata *request) const {
+ ATRACE_CALL();
+ status_t res;
+
+ uint8_t metadataMode = ANDROID_REQUEST_METADATA_FULL;
+ res = request->update(ANDROID_REQUEST_METADATA_MODE,
+ &metadataMode, 1);
+ if (res != OK) return res;
+
+ res = request->update(ANDROID_CONTROL_AE_TARGET_FPS_RANGE,
+ previewFpsRange, 2);
+ if (res != OK) return res;
+
+ res = request->update(ANDROID_CONTROL_AWB_MODE,
+ &wbMode, 1);
+ if (res != OK) return res;
+
+ uint8_t reqWbLock = autoWhiteBalanceLock ?
+ ANDROID_CONTROL_AWB_LOCK_ON : ANDROID_CONTROL_AWB_LOCK_OFF;
+ res = request->update(ANDROID_CONTROL_AWB_LOCK,
+ &reqWbLock, 1);
+
+ res = request->update(ANDROID_CONTROL_EFFECT_MODE,
+ &effectMode, 1);
+ if (res != OK) return res;
+ res = request->update(ANDROID_CONTROL_AE_ANTIBANDING_MODE,
+ &antibandingMode, 1);
+ if (res != OK) return res;
+
+ uint8_t reqControlMode =
+ (sceneMode == ANDROID_CONTROL_SCENE_MODE_UNSUPPORTED) ?
+ ANDROID_CONTROL_AUTO : ANDROID_CONTROL_USE_SCENE_MODE;
+ res = request->update(ANDROID_CONTROL_MODE,
+ &reqControlMode, 1);
+ if (res != OK) return res;
+ if (reqControlMode == ANDROID_CONTROL_USE_SCENE_MODE) {
+ res = request->update(ANDROID_CONTROL_SCENE_MODE,
+ &sceneMode, 1);
+ if (res != OK) return res;
+ }
+
+ uint8_t reqFlashMode = ANDROID_FLASH_OFF;
+ uint8_t reqAeMode;
+ switch (flashMode) {
+ case Parameters::FLASH_MODE_OFF:
+ reqAeMode = ANDROID_CONTROL_AE_ON; break;
+ case Parameters::FLASH_MODE_AUTO:
+ reqAeMode = ANDROID_CONTROL_AE_ON_AUTO_FLASH; break;
+ case Parameters::FLASH_MODE_ON:
+ reqAeMode = ANDROID_CONTROL_AE_ON_ALWAYS_FLASH; break;
+ case Parameters::FLASH_MODE_TORCH:
+ reqAeMode = ANDROID_CONTROL_AE_ON;
+ reqFlashMode = ANDROID_FLASH_TORCH;
+ break;
+ case Parameters::FLASH_MODE_RED_EYE:
+ reqAeMode = ANDROID_CONTROL_AE_ON_AUTO_FLASH_REDEYE; break;
+ default:
+ ALOGE("%s: Camera %d: Unknown flash mode %d", __FUNCTION__,
+ cameraId, flashMode);
+ return BAD_VALUE;
+ }
+ res = request->update(ANDROID_FLASH_MODE,
+ &reqFlashMode, 1);
+ if (res != OK) return res;
+ res = request->update(ANDROID_CONTROL_AE_MODE,
+ &reqAeMode, 1);
+ if (res != OK) return res;
+
+ uint8_t reqAeLock = autoExposureLock ?
+ ANDROID_CONTROL_AE_LOCK_ON : ANDROID_CONTROL_AE_LOCK_OFF;
+ res = request->update(ANDROID_CONTROL_AE_LOCK,
+ &reqAeLock, 1);
+
+ float reqFocusDistance = 0; // infinity focus in diopters
+ uint8_t reqFocusMode;
+ switch (focusMode) {
+ case Parameters::FOCUS_MODE_AUTO:
+ case Parameters::FOCUS_MODE_MACRO:
+ case Parameters::FOCUS_MODE_CONTINUOUS_VIDEO:
+ case Parameters::FOCUS_MODE_CONTINUOUS_PICTURE:
+ case Parameters::FOCUS_MODE_EDOF:
+ reqFocusMode = focusMode;
+ break;
+ case Parameters::FOCUS_MODE_INFINITY:
+ case Parameters::FOCUS_MODE_FIXED:
+ reqFocusMode = ANDROID_CONTROL_AF_OFF;
+ break;
+ default:
+ ALOGE("%s: Camera %d: Unknown focus mode %d", __FUNCTION__,
+ cameraId, focusMode);
+ return BAD_VALUE;
+ }
+ res = request->update(ANDROID_LENS_FOCUS_DISTANCE,
+ &reqFocusDistance, 1);
+ if (res != OK) return res;
+ res = request->update(ANDROID_CONTROL_AF_MODE,
+ &reqFocusMode, 1);
+ if (res != OK) return res;
+
+ size_t reqFocusingAreasSize = focusingAreas.size() * 5;
+ int32_t *reqFocusingAreas = new int32_t[reqFocusingAreasSize];
+ for (size_t i = 0; i < reqFocusingAreasSize; i += 5) {
+ if (focusingAreas[i].weight != 0) {
+ reqFocusingAreas[i + 0] =
+ normalizedXToArray(focusingAreas[i].left);
+ reqFocusingAreas[i + 1] =
+ normalizedYToArray(focusingAreas[i].top);
+ reqFocusingAreas[i + 2] =
+ normalizedXToArray(focusingAreas[i].right);
+ reqFocusingAreas[i + 3] =
+ normalizedYToArray(focusingAreas[i].bottom);
+ } else {
+ reqFocusingAreas[i + 0] = 0;
+ reqFocusingAreas[i + 1] = 0;
+ reqFocusingAreas[i + 2] = 0;
+ reqFocusingAreas[i + 3] = 0;
+ }
+ reqFocusingAreas[i + 4] = focusingAreas[i].weight;
+ }
+ res = request->update(ANDROID_CONTROL_AF_REGIONS,
+ reqFocusingAreas, reqFocusingAreasSize);
+ if (res != OK) return res;
+ delete[] reqFocusingAreas;
+
+ res = request->update(ANDROID_CONTROL_AE_EXP_COMPENSATION,
+ &exposureCompensation, 1);
+ if (res != OK) return res;
+
+ size_t reqMeteringAreasSize = meteringAreas.size() * 5;
+ int32_t *reqMeteringAreas = new int32_t[reqMeteringAreasSize];
+ for (size_t i = 0; i < reqMeteringAreasSize; i += 5) {
+ if (meteringAreas[i].weight != 0) {
+ reqMeteringAreas[i + 0] =
+ normalizedXToArray(meteringAreas[i].left);
+ reqMeteringAreas[i + 1] =
+ normalizedYToArray(meteringAreas[i].top);
+ reqMeteringAreas[i + 2] =
+ normalizedXToArray(meteringAreas[i].right);
+ reqMeteringAreas[i + 3] =
+ normalizedYToArray(meteringAreas[i].bottom);
+ } else {
+ reqMeteringAreas[i + 0] = 0;
+ reqMeteringAreas[i + 1] = 0;
+ reqMeteringAreas[i + 2] = 0;
+ reqMeteringAreas[i + 3] = 0;
+ }
+ reqMeteringAreas[i + 4] = meteringAreas[i].weight;
+ }
+ res = request->update(ANDROID_CONTROL_AE_REGIONS,
+ reqMeteringAreas, reqMeteringAreasSize);
+ if (res != OK) return res;
+
+ res = request->update(ANDROID_CONTROL_AWB_REGIONS,
+ reqMeteringAreas, reqMeteringAreasSize);
+ if (res != OK) return res;
+ delete[] reqMeteringAreas;
+
+ // Need to convert zoom index into a crop rectangle. The rectangle is
+ // chosen to maximize its area on the sensor
+
+ camera_metadata_ro_entry_t maxDigitalZoom =
+ staticInfo(ANDROID_SCALER_AVAILABLE_MAX_ZOOM);
+ float zoomIncrement = (maxDigitalZoom.data.f[0] - 1) /
+ (NUM_ZOOM_STEPS-1);
+ float zoomRatio = 1 + zoomIncrement * zoom;
+
+ float zoomLeft, zoomTop, zoomWidth, zoomHeight;
+ if (previewWidth >= previewHeight) {
+ zoomWidth = fastInfo.arrayWidth / zoomRatio;
+ zoomHeight = zoomWidth *
+ previewHeight / previewWidth;
+ } else {
+ zoomHeight = fastInfo.arrayHeight / zoomRatio;
+ zoomWidth = zoomHeight *
+ previewWidth / previewHeight;
+ }
+ zoomLeft = (fastInfo.arrayWidth - zoomWidth) / 2;
+ zoomTop = (fastInfo.arrayHeight - zoomHeight) / 2;
+
+ int32_t reqCropRegion[3] = { zoomLeft, zoomTop, zoomWidth };
+ res = request->update(ANDROID_SCALER_CROP_REGION,
+ reqCropRegion, 3);
+ if (res != OK) return res;
+
+ // TODO: Decide how to map recordingHint, or whether just to ignore it
+
+ uint8_t reqVstabMode = videoStabilization ?
+ ANDROID_CONTROL_VIDEO_STABILIZATION_ON :
+ ANDROID_CONTROL_VIDEO_STABILIZATION_OFF;
+ res = request->update(ANDROID_CONTROL_VIDEO_STABILIZATION_MODE,
+ &reqVstabMode, 1);
+ if (res != OK) return res;
+
+ uint8_t reqFaceDetectMode = enableFaceDetect ?
+ fastInfo.bestFaceDetectMode :
+ (uint8_t)ANDROID_STATS_FACE_DETECTION_OFF;
+ res = request->update(ANDROID_STATS_FACE_DETECT_MODE,
+ &reqFaceDetectMode, 1);
+ if (res != OK) return res;
+
+ return OK;
+}
+
const char* Parameters::getStateName(State state) {
#define CASE_ENUM_TO_CHAR(x) case x: return(#x); break;
switch(state) {
diff --git a/services/camera/libcameraservice/camera2/Parameters.h b/services/camera/libcameraservice/camera2/Parameters.h
index 817d001..e71d086 100644
--- a/services/camera/libcameraservice/camera2/Parameters.h
+++ b/services/camera/libcameraservice/camera2/Parameters.h
@@ -29,12 +29,17 @@
namespace android {
namespace camera2 {
-// Current camera state; this is the full state of the Camera under the old
-// camera API (contents of the CameraParameters object in a more-efficient
-// format, plus other state). The enum values are mostly based off the
-// corresponding camera2 enums, not the camera1 strings. A few are defined here
-// if they don't cleanly map to camera2 values.
+/**
+ * Current camera state; this is the full state of the Camera under the old
+ * camera API (contents of the CameraParameters object in a more-efficient
+ * format, plus other state). The enum values are mostly based off the
+ * corresponding camera2 enums, not the camera1 strings. A few are defined here
+ * if they don't cleanly map to camera2 values.
+ */
struct Parameters {
+ /**
+ * Parameters and other state
+ */
int cameraId;
int cameraFacing;
@@ -117,9 +122,13 @@
int currentAfTriggerId;
bool afInMotion;
+ int precaptureTriggerCounter;
+
uint32_t previewCallbackFlags;
bool previewCallbackOneShot;
+ bool zslMode;
+
// Overall camera state
enum State {
DISCONNECTED,
@@ -149,7 +158,9 @@
int32_t maxFaces;
} fastInfo;
- // Parameter manipulation and setup methods
+ /**
+ * Parameter manipulation and setup methods
+ */
Parameters(int cameraId, int cameraFacing);
~Parameters();
@@ -170,6 +181,9 @@
// Validate and update camera parameters based on new settings
status_t set(const String8 ¶ms);
+ // Update passed-in request for common parameters
+ status_t updateRequest(CameraMetadata *request) const;
+
// Static methods for debugging and converting between camera1 and camera2
// parameters
diff --git a/services/camera/libcameraservice/camera2/ZslProcessor.cpp b/services/camera/libcameraservice/camera2/ZslProcessor.cpp
new file mode 100644
index 0000000..a39585e
--- /dev/null
+++ b/services/camera/libcameraservice/camera2/ZslProcessor.cpp
@@ -0,0 +1,378 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "Camera2Client::ZslProcessor"
+#define ATRACE_TAG ATRACE_TAG_CAMERA
+//#define LOG_NDEBUG 0
+//#define LOG_NNDEBUG 0
+
+#ifdef LOG_NNDEBUG
+#define ALOGVV(...) ALOGV(__VA_ARGS__)
+#else
+#define ALOGVV(...) ((void)0)
+#endif
+
+#include <utils/Log.h>
+#include <utils/Trace.h>
+
+#include "ZslProcessor.h"
+#include <gui/SurfaceTextureClient.h>
+#include "../Camera2Device.h"
+#include "../Camera2Client.h"
+
+
+namespace android {
+namespace camera2 {
+
+ZslProcessor::ZslProcessor(
+ wp<Camera2Client> client,
+ wp<CaptureSequencer> sequencer):
+ Thread(false),
+ mState(RUNNING),
+ mClient(client),
+ mSequencer(sequencer),
+ mZslBufferAvailable(false),
+ mZslStreamId(NO_STREAM),
+ mZslReprocessStreamId(NO_STREAM),
+ mFrameListHead(0),
+ mZslQueueHead(0),
+ mZslQueueTail(0) {
+ mZslQueue.insertAt(0, kZslBufferDepth);
+ mFrameList.insertAt(0, kFrameListDepth);
+ sp<CaptureSequencer> captureSequencer = mSequencer.promote();
+ if (captureSequencer != 0) captureSequencer->setZslProcessor(this);
+}
+
+ZslProcessor::~ZslProcessor() {
+ ALOGV("%s: Exit", __FUNCTION__);
+}
+
+void ZslProcessor::onFrameAvailable() {
+ Mutex::Autolock l(mInputMutex);
+ if (!mZslBufferAvailable) {
+ mZslBufferAvailable = true;
+ mZslBufferAvailableSignal.signal();
+ }
+}
+
+void ZslProcessor::onFrameAvailable(int32_t frameId, CameraMetadata &frame) {
+ Mutex::Autolock l(mInputMutex);
+ camera_metadata_entry_t entry;
+ entry = frame.find(ANDROID_SENSOR_TIMESTAMP);
+ nsecs_t timestamp = entry.data.i64[0];
+ ALOGVV("Got preview frame for timestamp %lld", timestamp);
+
+ if (mState != RUNNING) return;
+
+ mFrameList.editItemAt(mFrameListHead).acquire(frame);
+ mFrameListHead = (mFrameListHead + 1) % kFrameListDepth;
+
+ findMatchesLocked();
+}
+
+void ZslProcessor::onBufferReleased(buffer_handle_t *handle) {
+ Mutex::Autolock l(mInputMutex);
+
+ buffer_handle_t *expectedHandle =
+ &(mZslQueue[mZslQueueTail].buffer.mGraphicBuffer->handle);
+
+ if (handle != expectedHandle) {
+ ALOGE("%s: Expected buffer %p, got buffer %p",
+ __FUNCTION__, expectedHandle, handle);
+ }
+
+ mState = RUNNING;
+}
+
+status_t ZslProcessor::updateStream(const Parameters ¶ms) {
+ ATRACE_CALL();
+ ALOGV("%s: Configuring ZSL streams", __FUNCTION__);
+ status_t res;
+
+ Mutex::Autolock l(mInputMutex);
+
+ sp<Camera2Client> client = mClient.promote();
+ if (client == 0) return OK;
+ sp<Camera2Device> device = client->getCameraDevice();
+
+ if (mZslConsumer == 0) {
+ // Create CPU buffer queue endpoint
+ mZslConsumer = new BufferItemConsumer(
+ GRALLOC_USAGE_HW_CAMERA_ZSL,
+ kZslBufferDepth,
+ true);
+ mZslConsumer->setFrameAvailableListener(this);
+ mZslConsumer->setName(String8("Camera2Client::ZslConsumer"));
+ mZslWindow = new SurfaceTextureClient(
+ mZslConsumer->getProducerInterface());
+ }
+
+ if (mZslStreamId != NO_STREAM) {
+ // Check if stream parameters have to change
+ uint32_t currentWidth, currentHeight;
+ res = device->getStreamInfo(mZslStreamId,
+ ¤tWidth, ¤tHeight, 0);
+ if (res != OK) {
+ ALOGE("%s: Camera %d: Error querying capture output stream info: "
+ "%s (%d)", __FUNCTION__,
+ client->getCameraId(), strerror(-res), res);
+ return res;
+ }
+ if (currentWidth != (uint32_t)params.pictureWidth ||
+ currentHeight != (uint32_t)params.pictureHeight) {
+ res = device->deleteStream(mZslReprocessStreamId);
+ if (res != OK) {
+ ALOGE("%s: Camera %d: Unable to delete old reprocess stream "
+ "for ZSL: %s (%d)", __FUNCTION__,
+ client->getCameraId(), strerror(-res), res);
+ return res;
+ }
+ res = device->deleteStream(mZslStreamId);
+ if (res != OK) {
+ ALOGE("%s: Camera %d: Unable to delete old output stream "
+ "for ZSL: %s (%d)", __FUNCTION__,
+ client->getCameraId(), strerror(-res), res);
+ return res;
+ }
+ mZslStreamId = NO_STREAM;
+ }
+ }
+
+ if (mZslStreamId == NO_STREAM) {
+ // Create stream for HAL production
+ res = device->createStream(mZslWindow,
+ params.pictureWidth, params.pictureHeight,
+ HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED, 0,
+ &mZslStreamId);
+ if (res != OK) {
+ ALOGE("%s: Camera %d: Can't create output stream for ZSL: "
+ "%s (%d)", __FUNCTION__, client->getCameraId(),
+ strerror(-res), res);
+ return res;
+ }
+ res = device->createReprocessStreamFromStream(mZslStreamId,
+ &mZslReprocessStreamId);
+ if (res != OK) {
+ ALOGE("%s: Camera %d: Can't create reprocess stream for ZSL: "
+ "%s (%d)", __FUNCTION__, client->getCameraId(),
+ strerror(-res), res);
+ return res;
+ }
+ }
+ client->registerFrameListener(Camera2Client::kPreviewRequestId, this);
+
+ return OK;
+}
+
+status_t ZslProcessor::deleteStream() {
+ ATRACE_CALL();
+ status_t res;
+
+ Mutex::Autolock l(mInputMutex);
+
+ if (mZslStreamId != NO_STREAM) {
+ sp<Camera2Client> client = mClient.promote();
+ if (client == 0) return OK;
+ sp<Camera2Device> device = client->getCameraDevice();
+
+ device->deleteStream(mZslReprocessStreamId);
+ mZslReprocessStreamId = NO_STREAM;
+ device->deleteStream(mZslStreamId);
+ mZslStreamId = NO_STREAM;
+ }
+ return OK;
+}
+
+int ZslProcessor::getStreamId() const {
+ Mutex::Autolock l(mInputMutex);
+ return mZslStreamId;
+}
+
+int ZslProcessor::getReprocessStreamId() const {
+ Mutex::Autolock l(mInputMutex);
+ return mZslReprocessStreamId;
+}
+
+status_t ZslProcessor::pushToReprocess(int32_t requestId) {
+ ALOGV("%s: Send in reprocess request with id %d",
+ __FUNCTION__, requestId);
+ Mutex::Autolock l(mInputMutex);
+ status_t res;
+ sp<Camera2Client> client = mClient.promote();
+
+ if (client == 0) return false;
+
+ if (mZslQueueTail != mZslQueueHead) {
+ buffer_handle_t *handle =
+ &(mZslQueue[mZslQueueTail].buffer.mGraphicBuffer->handle);
+ CameraMetadata request = mZslQueue[mZslQueueTail].frame;
+ uint8_t requestType = ANDROID_REQUEST_TYPE_REPROCESS;
+ res = request.update(ANDROID_REQUEST_TYPE,
+ &requestType, 1);
+ uint8_t inputStreams[1] = { mZslReprocessStreamId };
+ if (res == OK) request.update(ANDROID_REQUEST_INPUT_STREAMS,
+ inputStreams, 1);
+ uint8_t outputStreams[1] = { client->getCaptureStreamId() };
+ if (res == OK) request.update(ANDROID_REQUEST_OUTPUT_STREAMS,
+ outputStreams, 1);
+ res = request.update(ANDROID_REQUEST_ID,
+ &requestId, 1);
+
+ if (res != OK ) {
+ ALOGE("%s: Unable to update frame to a reprocess request", __FUNCTION__);
+ return INVALID_OPERATION;
+ }
+
+ res = client->getCameraDevice()->pushReprocessBuffer(mZslReprocessStreamId,
+ handle, this);
+ if (res != OK) {
+ ALOGE("%s: Unable to push buffer for reprocessing: %s (%d)",
+ __FUNCTION__, strerror(-res), res);
+ return res;
+ }
+
+ res = client->getCameraDevice()->capture(request);
+ if (res != OK ) {
+ ALOGE("%s: Unable to send ZSL reprocess request to capture: %s (%d)",
+ __FUNCTION__, strerror(-res), res);
+ return res;
+ }
+
+ mState = LOCKED;
+ } else {
+ ALOGE("%s: Nothing to push", __FUNCTION__);
+ return BAD_VALUE;
+ }
+ return OK;
+}
+
+void ZslProcessor::dump(int fd, const Vector<String16>& args) const {
+}
+
+bool ZslProcessor::threadLoop() {
+ status_t res;
+
+ {
+ Mutex::Autolock l(mInputMutex);
+ while (!mZslBufferAvailable) {
+ res = mZslBufferAvailableSignal.waitRelative(mInputMutex,
+ kWaitDuration);
+ if (res == TIMED_OUT) return true;
+ }
+ mZslBufferAvailable = false;
+ }
+
+ do {
+ sp<Camera2Client> client = mClient.promote();
+ if (client == 0) return false;
+ res = processNewZslBuffer(client);
+ } while (res == OK);
+
+ return true;
+}
+
+status_t ZslProcessor::processNewZslBuffer(sp<Camera2Client> &client) {
+ ATRACE_CALL();
+ status_t res;
+ Mutex::Autolock l(mInputMutex);
+
+ if (mState == LOCKED) {
+ BufferItemConsumer::BufferItem item;
+ res = mZslConsumer->acquireBuffer(&item);
+ if (res != OK) {
+ if (res != BufferItemConsumer::NO_BUFFER_AVAILABLE) {
+ ALOGE("%s: Camera %d: Error receiving ZSL image buffer: "
+ "%s (%d)", __FUNCTION__,
+ client->getCameraId(), strerror(-res), res);
+ }
+ return res;
+ }
+ mZslConsumer->releaseBuffer(item);
+ return OK;
+ }
+
+ ALOGVV("Got ZSL buffer: head: %d, tail: %d", mZslQueueHead, mZslQueueTail);
+
+ if ( (mZslQueueHead + 1) % kZslBufferDepth == mZslQueueTail) {
+ mZslConsumer->releaseBuffer(mZslQueue[mZslQueueTail].buffer);
+ mZslQueue.replaceAt(mZslQueueTail);
+ mZslQueueTail = (mZslQueueTail + 1) % kZslBufferDepth;
+ }
+
+ ZslPair &queueHead = mZslQueue.editItemAt(mZslQueueHead);
+
+ res = mZslConsumer->acquireBuffer(&(queueHead.buffer));
+ if (res != OK) {
+ if (res != BufferItemConsumer::NO_BUFFER_AVAILABLE) {
+ ALOGE("%s: Camera %d: Error receiving ZSL image buffer: "
+ "%s (%d)", __FUNCTION__,
+ client->getCameraId(), strerror(-res), res);
+ }
+ return res;
+ }
+ queueHead.frame.release();
+
+ mZslQueueHead = (mZslQueueHead + 1) % kZslBufferDepth;
+
+ ALOGVV(" Added buffer, timestamp %lld", queueHead.buffer.mTimestamp);
+
+ findMatchesLocked();
+
+ return OK;
+}
+
+void ZslProcessor::findMatchesLocked() {
+ for (size_t i = 0; i < mZslQueue.size(); i++) {
+ ZslPair &queueEntry = mZslQueue.editItemAt(i);
+ nsecs_t bufferTimestamp = queueEntry.buffer.mTimestamp;
+ if (queueEntry.frame.isEmpty() && bufferTimestamp != 0) {
+ // Have buffer, no matching frame. Look for one
+ for (size_t j = 0; j < mFrameList.size(); j++) {
+ bool match = false;
+ CameraMetadata &frame = mFrameList.editItemAt(j);
+ if (!frame.isEmpty()) {
+ camera_metadata_entry_t entry;
+ entry = frame.find(ANDROID_SENSOR_TIMESTAMP);
+ if (entry.count == 0) {
+ ALOGE("%s: Can't find timestamp in frame!",
+ __FUNCTION__);
+ continue;
+ }
+ nsecs_t frameTimestamp = entry.data.i64[0];
+ if (bufferTimestamp == frameTimestamp) {
+ ALOGVV("%s: Found match %lld", __FUNCTION__,
+ frameTimestamp);
+ match = true;
+ } else {
+ int64_t delta = abs(bufferTimestamp - frameTimestamp);
+ if ( delta < 1000000) {
+ ALOGVV("%s: Found close match %lld (delta %lld)",
+ __FUNCTION__, bufferTimestamp, delta);
+ match = true;
+ }
+ }
+ }
+ if (match) {
+ queueEntry.frame.acquire(frame);
+ break;
+ }
+ }
+ }
+ }
+}
+
+}; // namespace camera2
+}; // namespace android
diff --git a/services/camera/libcameraservice/camera2/ZslProcessor.h b/services/camera/libcameraservice/camera2/ZslProcessor.h
new file mode 100644
index 0000000..74921a3
--- /dev/null
+++ b/services/camera/libcameraservice/camera2/ZslProcessor.h
@@ -0,0 +1,119 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_SERVERS_CAMERA_CAMERA2_ZSLPROCESSOR_H
+#define ANDROID_SERVERS_CAMERA_CAMERA2_ZSLPROCESSOR_H
+
+#include <utils/Thread.h>
+#include <utils/String16.h>
+#include <utils/Vector.h>
+#include <utils/Mutex.h>
+#include <utils/Condition.h>
+#include <gui/BufferItemConsumer.h>
+#include "Parameters.h"
+#include "FrameProcessor.h"
+#include "CameraMetadata.h"
+#include "Camera2Heap.h"
+#include "../Camera2Device.h"
+
+namespace android {
+
+class Camera2Client;
+
+namespace camera2 {
+
+class CaptureSequencer;
+
+/***
+ * ZSL queue processing
+ */
+class ZslProcessor:
+ virtual public Thread,
+ virtual public BufferItemConsumer::FrameAvailableListener,
+ virtual public FrameProcessor::FilteredListener,
+ virtual public Camera2Device::BufferReleasedListener {
+ public:
+ ZslProcessor(wp<Camera2Client> client, wp<CaptureSequencer> sequencer);
+ ~ZslProcessor();
+
+ // From mZslConsumer
+ virtual void onFrameAvailable();
+ // From FrameProcessor
+ virtual void onFrameAvailable(int32_t frameId, CameraMetadata &frame);
+
+ virtual void onBufferReleased(buffer_handle_t *handle);
+
+ status_t updateStream(const Parameters ¶ms);
+ status_t deleteStream();
+ int getStreamId() const;
+ int getReprocessStreamId() const;
+
+ status_t pushToReprocess(int32_t requestId);
+
+ void dump(int fd, const Vector<String16>& args) const;
+ private:
+ static const nsecs_t kWaitDuration = 10000000; // 10 ms
+
+ enum {
+ RUNNING,
+ LOCKED
+ } mState;
+
+ wp<Camera2Client> mClient;
+ wp<CaptureSequencer> mSequencer;
+
+ mutable Mutex mInputMutex;
+ bool mZslBufferAvailable;
+ Condition mZslBufferAvailableSignal;
+
+ enum {
+ NO_STREAM = -1
+ };
+
+ int mZslStreamId;
+ int mZslReprocessStreamId;
+ sp<BufferItemConsumer> mZslConsumer;
+ sp<ANativeWindow> mZslWindow;
+
+ struct ZslPair {
+ BufferItemConsumer::BufferItem buffer;
+ CameraMetadata frame;
+ };
+
+ static const size_t kZslBufferDepth = 3;
+ static const size_t kFrameListDepth = kZslBufferDepth * 2;
+ Vector<CameraMetadata> mFrameList;
+ size_t mFrameListHead;
+
+ ZslPair mNextPair;
+
+ Vector<ZslPair> mZslQueue;
+ size_t mZslQueueHead;
+ size_t mZslQueueTail;
+
+ virtual bool threadLoop();
+
+ status_t processNewZslBuffer(sp<Camera2Client> &client);
+
+ // Match up entries from frame list to buffers in ZSL queue
+ void findMatchesLocked();
+};
+
+
+}; //namespace camera2
+}; //namespace android
+
+#endif