Merge "audioflinger: Update throttling logic in MixerThread" into nyc-mr1-dev
diff --git a/camera/ndk/impl/ACameraManager.cpp b/camera/ndk/impl/ACameraManager.cpp
index 24d5282..26d6679 100644
--- a/camera/ndk/impl/ACameraManager.cpp
+++ b/camera/ndk/impl/ACameraManager.cpp
@@ -471,8 +471,38 @@
if (!serviceRet.isOk()) {
ALOGE("%s: connect camera device failed: %s", __FUNCTION__, serviceRet.toString8().string());
+ // Convert serviceRet to camera_status_t
+ switch(serviceRet.serviceSpecificErrorCode()) {
+ case hardware::ICameraService::ERROR_DISCONNECTED:
+ ret = ACAMERA_ERROR_CAMERA_DISCONNECTED;
+ break;
+ case hardware::ICameraService::ERROR_CAMERA_IN_USE:
+ ret = ACAMERA_ERROR_CAMERA_IN_USE;
+ break;
+ case hardware::ICameraService::ERROR_MAX_CAMERAS_IN_USE:
+ ret = ACAMERA_ERROR_MAX_CAMERA_IN_USE;
+ break;
+ case hardware::ICameraService::ERROR_ILLEGAL_ARGUMENT:
+ ret = ACAMERA_ERROR_INVALID_PARAMETER;
+ break;
+ case hardware::ICameraService::ERROR_DEPRECATED_HAL:
+ // Should not reach here since we filtered legacy HALs earlier
+ ret = ACAMERA_ERROR_INVALID_PARAMETER;
+ break;
+ case hardware::ICameraService::ERROR_DISABLED:
+ ret = ACAMERA_ERROR_CAMERA_DISABLED;
+ break;
+ case hardware::ICameraService::ERROR_PERMISSION_DENIED:
+ ret = ACAMERA_ERROR_PERMISSION_DENIED;
+ break;
+ case hardware::ICameraService::ERROR_INVALID_OPERATION:
+ default:
+ ret = ACAMERA_ERROR_UNKNOWN;
+ break;
+ }
+
delete device;
- return ACAMERA_ERROR_CAMERA_DISCONNECTED;
+ return ret;
}
if (deviceRemote == nullptr) {
ALOGE("%s: connect camera device failed! remote device is null", __FUNCTION__);
diff --git a/cmds/stagefright/Android.mk b/cmds/stagefright/Android.mk
index e5fbba0..9e15a81 100644
--- a/cmds/stagefright/Android.mk
+++ b/cmds/stagefright/Android.mk
@@ -39,7 +39,8 @@
LOCAL_C_INCLUDES:= \
frameworks/av/media/libstagefright \
- $(TOP)/frameworks/native/include/media/openmax
+ $(TOP)/frameworks/native/include/media/openmax \
+ $(TOP)/frameworks/native/include/media/hardware
LOCAL_CFLAGS += -Wno-multichar -Werror -Wall
LOCAL_CLANG := true
@@ -63,7 +64,8 @@
LOCAL_C_INCLUDES:= \
frameworks/av/media/libstagefright \
- $(TOP)/frameworks/native/include/media/openmax
+ $(TOP)/frameworks/native/include/media/openmax \
+ $(TOP)/frameworks/native/include/media/hardware
LOCAL_CFLAGS += -Wno-multichar -Werror -Wall
LOCAL_CLANG := true
diff --git a/include/camera/ndk/NdkCameraCaptureSession.h b/include/camera/ndk/NdkCameraCaptureSession.h
index 085b614..68eff7a 100644
--- a/include/camera/ndk/NdkCameraCaptureSession.h
+++ b/include/camera/ndk/NdkCameraCaptureSession.h
@@ -14,6 +14,15 @@
* limitations under the License.
*/
+/**
+ * @addtogroup Camera
+ * @{
+ */
+
+/**
+ * @file NdkCameraCaptureSession.h
+ */
+
/*
* This file defines an NDK API.
* Do not remove methods.
@@ -34,63 +43,337 @@
extern "C" {
#endif
+/**
+ * ACameraCaptureSession is an opaque type that manages frame captures of a camera device.
+ *
+ * A pointer can be obtained using {@link ACameraDevice_createCaptureSession} method.
+ */
typedef struct ACameraCaptureSession ACameraCaptureSession;
+/**
+ * The definition of camera capture session state callback.
+ *
+ * @param context The optional application context provided by user in
+ * {@link ACameraCaptureSession_stateCallbacks}.
+ * @param session The camera capture session whose state is changing.
+ */
typedef void (*ACameraCaptureSession_stateCallback)(void* context, ACameraCaptureSession *session);
typedef struct ACameraCaptureSession_stateCallbacks {
+ /// optional application context.
void* context;
- ACameraCaptureSession_stateCallback onClosed; // session is unusable after this callback
+
+ /**
+ * This callback is called when the session is closed and deleted from memory.
+ *
+ * <p>A session is closed when {@link ACameraCaptureSession_close} is called, a new session
+ * is created by the parent camera device,
+ * or when the parent camera device is closed (either by the user closing the device,
+ * or due to a camera device disconnection or fatal error).</p>
+ *
+ * <p>Once this callback is called, all access to this ACameraCaptureSession object will cause
+ * a crash.</p>
+ */
+ ACameraCaptureSession_stateCallback onClosed;
+
+ /**
+ * This callback is called every time the session has no more capture requests to process.
+ *
+ * <p>This callback will be invoked any time the session finishes processing
+ * all of its active capture requests, and no repeating request or burst is set up.</p>
+ */
ACameraCaptureSession_stateCallback onReady;
+
+ /**
+ * This callback is called when the session starts actively processing capture requests.
+ *
+ * <p>If the session runs out of capture requests to process and calls {@link onReady},
+ * then this callback will be invoked again once new requests are submitted for capture.</p>
+ */
ACameraCaptureSession_stateCallback onActive;
} ACameraCaptureSession_stateCallbacks;
+/// Enum for describing error reason in {@link ACameraCaptureFailure}
enum {
+ /**
+ * The capture session has dropped this frame due to an
+ * {@link ACameraCaptureSession_abortCaptures} call.
+ */
CAPTURE_FAILURE_REASON_FLUSHED = 0,
+
+ /**
+ * The capture session has dropped this frame due to an error in the framework.
+ */
CAPTURE_FAILURE_REASON_ERROR
};
+/// Struct to describe a capture failure
typedef struct ACameraCaptureFailure {
+ /**
+ * The frame number associated with this failed capture.
+ *
+ * <p>Whenever a request has been processed, regardless of failed capture or success,
+ * it gets a unique frame number assigned to its future result/failed capture.</p>
+ *
+ * <p>This value monotonically increments, starting with 0,
+ * for every new result or failure; and the scope is the lifetime of the
+ * {@link ACameraDevice}.</p>
+ */
int64_t frameNumber;
+
+ /**
+ * Determine why the request was dropped, whether due to an error or to a user
+ * action.
+ *
+ * @see CAPTURE_FAILURE_REASON_ERROR
+ * @see CAPTURE_FAILURE_REASON_FLUSHED
+ */
int reason;
+
+ /**
+ * The sequence ID for this failed capture that was returned by the
+ * {@link ACameraCaptureSession_capture} or {@link ACameraCaptureSession_setRepeatingRequest}.
+ *
+ * <p>The sequence ID is a unique monotonically increasing value starting from 0,
+ * incremented every time a new group of requests is submitted to the ACameraDevice.</p>
+ */
int sequenceId;
+
+ /**
+ * Determine if the image was captured from the camera.
+ *
+ * <p>If the image was not captured, no image buffers will be available.
+ * If the image was captured, then image buffers may be available.</p>
+ *
+ */
bool wasImageCaptured;
} ACameraCaptureFailure;
-/* Note that the ACaptureRequest* in the callback will be different to what app has submitted,
- but the contents will still be the same as what app submitted */
+/**
+ * The definition of camera capture start callback.
+ *
+ * @param context The optional application context provided by user in
+ * {@link ACameraCaptureSession_captureCallbacks}.
+ * @param session The camera capture session of interest.
+ * @param request The capture request that is starting. Note that this pointer points to a copy of
+ * capture request sent by application, so the address is different to what
+ * application sent but the content will match. This request will be freed by
+ * framework immediately after this callback returns.
+ * @param timestamp The timestamp when the capture is started. This timestmap will match
+ * {@link ACAMERA_SENSOR_TIMESTAMP} of the {@link ACameraMetadata} in
+ * {@link ACameraCaptureSession_captureCallbacks#onCaptureCompleted} callback.
+ */
typedef void (*ACameraCaptureSession_captureCallback_start)(
void* context, ACameraCaptureSession* session,
const ACaptureRequest* request, int64_t timestamp);
+/**
+ * The definition of camera capture progress/result callback.
+ *
+ * @param context The optional application context provided by user in
+ * {@link ACameraCaptureSession_captureCallbacks}.
+ * @param session The camera capture session of interest.
+ * @param request The capture request of interest. Note that this pointer points to a copy of
+ * capture request sent by application, so the address is different to what
+ * application sent but the content will match. This request will be freed by
+ * framework immediately after this callback returns.
+ * @param result The capture result metadata reported by camera device
+ */
typedef void (*ACameraCaptureSession_captureCallback_result)(
void* context, ACameraCaptureSession* session,
ACaptureRequest* request, const ACameraMetadata* result);
+/**
+ * The definition of camera capture failure callback.
+ *
+ * @param context The optional application context provided by user in
+ * {@link ACameraCaptureSession_captureCallbacks}.
+ * @param session The camera capture session of interest.
+ * @param request The capture request of interest. Note that this pointer points to a copy of
+ * capture request sent by application, so the address is different to what
+ * application sent but the content will match. This request will be freed by
+ * framework immediately after this callback returns.
+ * @param failure The {@link ACameraCaptureFailure} desribes the capture failure.
+ */
typedef void (*ACameraCaptureSession_captureCallback_failed)(
void* context, ACameraCaptureSession* session,
ACaptureRequest* request, ACameraCaptureFailure* failure);
+/**
+ * The definition of camera sequence end callback.
+ *
+ * @param context The optional application context provided by user in
+ * {@link ACameraCaptureSession_captureCallbacks}.
+ * @param session The camera capture session of interest.
+ * @param sequenceId The capture sequence ID of the finished sequence.
+ * @param frameNumber The frame number of the last frame of this sequence.
+ */
typedef void (*ACameraCaptureSession_captureCallback_sequenceEnd)(
void* context, ACameraCaptureSession* session,
int sequenceId, int64_t frameNumber);
+/**
+ * The definition of camera sequence aborted callback.
+ *
+ * @param context The optional application context provided by user in
+ * {@link ACameraCaptureSession_captureCallbacks}.
+ * @param session The camera capture session of interest.
+ * @param sequenceId The capture sequence ID of the aborted sequence.
+ */
typedef void (*ACameraCaptureSession_captureCallback_sequenceAbort)(
void* context, ACameraCaptureSession* session,
int sequenceId);
+/**
+ * The definition of camera buffer lost callback.
+ *
+ * @param context The optional application context provided by user in
+ * {@link ACameraCaptureSession_captureCallbacks}.
+ * @param session The camera capture session of interest.
+ * @param request The capture request of interest. Note that this pointer points to a copy of
+ * capture request sent by application, so the address is different to what
+ * application sent but the content will match. This request will be freed by
+ * framework immediately after this callback returns.
+ * @param window The {@link ANativeWindow} that the lost buffer would have been sent to.
+ * @param frameNumber The frame number of the lost buffer.
+ */
typedef void (*ACameraCaptureSession_captureCallback_bufferLost)(
void* context, ACameraCaptureSession* session,
ACaptureRequest* request, ANativeWindow* window, int64_t frameNumber);
typedef struct ACameraCaptureSession_captureCallbacks {
- void* context;
+ /// optional application context.
+ void* context;
+
+ /**
+ * This callback is called when the camera device has started capturing
+ * the output image for the request, at the beginning of image exposure.
+ *
+ * <p>This callback is invoked right as
+ * the capture of a frame begins, so it is the most appropriate time
+ * for playing a shutter sound, or triggering UI indicators of capture.</p>
+ *
+ * <p>The request that is being used for this capture is provided, along
+ * with the actual timestamp for the start of exposure.
+ * This timestamp matches the timestamps that will be
+ * included in {@link ACAMERA_SENSOR_TIMESTAMP} of the {@link ACameraMetadata} in
+ * {@link onCaptureCompleted} callback,
+ * and in the buffers sent to each output ANativeWindow. These buffer
+ * timestamps are accessible through, for example,
+ * {@link AImage_getTimestamp} or
+ * <a href="http://developer.android.com/reference/android/graphics/SurfaceTexture.html#getTimestamp()">
+ * android.graphics.SurfaceTexture#getTimestamp()</a>.</p>
+ *
+ * <p>Note that the ACaptureRequest pointer in the callback will not match what application has
+ * submitted, but the contents the ACaptureRequest will match what application submitted.</p>
+ *
+ */
ACameraCaptureSession_captureCallback_start onCaptureStarted;
+
+ /**
+ * This callback is called when an image capture makes partial forward progress; some
+ * (but not all) results from an image capture are available.
+ *
+ * <p>The result provided here will contain some subset of the fields of
+ * a full result. Multiple {@link onCaptureProgressed} calls may happen per
+ * capture; a given result field will only be present in one partial
+ * capture at most. The final {@link onCaptureCompleted} call will always
+ * contain all the fields (in particular, the union of all the fields of all
+ * the partial results composing the total result).</p>
+ *
+ * <p>For each request, some result data might be available earlier than others. The typical
+ * delay between each partial result (per request) is a single frame interval.
+ * For performance-oriented use-cases, applications should query the metadata they need
+ * to make forward progress from the partial results and avoid waiting for the completed
+ * result.</p>
+ *
+ * <p>For a particular request, {@link onCaptureProgressed} may happen before or after
+ * {@link onCaptureStarted}.</p>
+ *
+ * <p>Each request will generate at least `1` partial results, and at most
+ * {@link ACAMERA_REQUEST_PARTIAL_RESULT_COUNT} partial results.</p>
+ *
+ * <p>Depending on the request settings, the number of partial results per request
+ * will vary, although typically the partial count could be the same as long as the
+ * camera device subsystems enabled stay the same.</p>
+ *
+ * <p>Note that the ACaptureRequest pointer in the callback will not match what application has
+ * submitted, but the contents the ACaptureRequest will match what application submitted.</p>
+ */
ACameraCaptureSession_captureCallback_result onCaptureProgressed;
+
+ /**
+ * This callback is called when an image capture has fully completed and all the
+ * result metadata is available.
+ *
+ * <p>This callback will always fire after the last {@link onCaptureProgressed};
+ * in other words, no more partial results will be delivered once the completed result
+ * is available.</p>
+ *
+ * <p>For performance-intensive use-cases where latency is a factor, consider
+ * using {@link onCaptureProgressed} instead.</p>
+ *
+ * <p>Note that the ACaptureRequest pointer in the callback will not match what application has
+ * submitted, but the contents the ACaptureRequest will match what application submitted.</p>
+ */
ACameraCaptureSession_captureCallback_result onCaptureCompleted;
+
+ /**
+ * This callback is called instead of {@link onCaptureCompleted} when the
+ * camera device failed to produce a capture result for the
+ * request.
+ *
+ * <p>Other requests are unaffected, and some or all image buffers from
+ * the capture may have been pushed to their respective output
+ * streams.</p>
+ *
+ * <p>Note that the ACaptureRequest pointer in the callback will not match what application has
+ * submitted, but the contents the ACaptureRequest will match what application submitted.</p>
+ *
+ * @see ACameraCaptureFailure
+ */
ACameraCaptureSession_captureCallback_failed onCaptureFailed;
+
+ /**
+ * This callback is called independently of the others in {@link ACameraCaptureSession_captureCallbacks},
+ * when a capture sequence finishes and all capture result
+ * or capture failure for it have been returned via this {@link ACameraCaptureSession_captureCallbacks}.
+ *
+ * <p>In total, there will be at least one result/failure returned by this listener
+ * before this callback is invoked. If the capture sequence is aborted before any
+ * requests have been processed, {@link onCaptureSequenceAborted} is invoked instead.</p>
+ */
ACameraCaptureSession_captureCallback_sequenceEnd onCaptureSequenceCompleted;
+
+ /**
+ * This callback is called independently of the others in {@link ACameraCaptureSession_captureCallbacks},
+ * when a capture sequence aborts before any capture result
+ * or capture failure for it have been returned via this {@link ACameraCaptureSession_captureCallbacks}.
+ *
+ * <p>Due to the asynchronous nature of the camera device, not all submitted captures
+ * are immediately processed. It is possible to clear out the pending requests
+ * by a variety of operations such as {@link ACameraCaptureSession_stopRepeating} or
+ * {@link ACameraCaptureSession_abortCaptures}. When such an event happens,
+ * {@link onCaptureSequenceCompleted} will not be called.</p>
+ */
ACameraCaptureSession_captureCallback_sequenceAbort onCaptureSequenceAborted;
+
+ /**
+ * This callback is called if a single buffer for a capture could not be sent to its
+ * destination ANativeWindow.
+ *
+ * <p>If the whole capture failed, then {@link onCaptureFailed} will be called instead. If
+ * some but not all buffers were captured but the result metadata will not be available,
+ * then onCaptureFailed will be invoked with {@link ACameraCaptureFailure#wasImageCaptured}
+ * returning true, along with one or more calls to {@link onCaptureBufferLost} for the
+ * failed outputs.</p>
+ *
+ * <p>Note that the ACaptureRequest pointer in the callback will not match what application has
+ * submitted, but the contents the ACaptureRequest will match what application submitted.
+ * The ANativeWindow pointer will always match what application submitted in
+ * {@link ACameraDevice_createCaptureSession}</p>
+ *
+ */
ACameraCaptureSession_captureCallback_bufferLost onCaptureBufferLost;
} ACameraCaptureSession_captureCallbacks;
@@ -98,45 +381,208 @@
CAPTURE_SEQUENCE_ID_NONE = -1
};
-/*
- * Close capture session
+/**
+ * Close this capture session.
+ *
+ * <p>Closing a session frees up the target output Surfaces of the session for reuse with either
+ * a new session, or to other APIs that can draw to Surfaces.</p>
+ *
+ * <p>Note that creating a new capture session with {@link ACameraDevice_createCaptureSession}
+ * will close any existing capture session automatically, and call the older session listener's
+ * {@link ACameraCaptureSession_stateCallbacks#onClosed} callback. Using
+ * {@link ACameraDevice_createCaptureSession} directly without closing is the recommended approach
+ * for quickly switching to a new session, since unchanged target outputs can be reused more
+ * efficiently.</p>
+ *
+ * <p>After a session is closed and before {@link ACameraCaptureSession_stateCallbacks#onClosed}
+ * is called, all methods invoked on the session will return {@link ACAMERA_ERROR_SESSION_CLOSED},
+ * and any repeating requests are stopped (as if {@link ACameraCaptureSession_stopRepeating} was
+ * called). However, any in-progress capture requests submitted to the session will be completed as
+ * normal; once all captures have completed and the session has been torn down,
+ * {@link ACameraCaptureSession_stateCallbacks#onClosed} callback will be called and the seesion
+ * will be removed from memory.</p>
+ *
+ * <p>Closing a session is idempotent; closing more than once has no effect.</p>
+ *
+ * @param session the capture session of interest
*/
-void ACameraCaptureSession_close(ACameraCaptureSession*);
+void ACameraCaptureSession_close(ACameraCaptureSession* session);
struct ACameraDevice;
typedef struct ACameraDevice ACameraDevice;
/**
- * Get the camera device associated with this capture session
+ * Get the ACameraDevice pointer associated with this capture session in the device argument
+ * if the method succeeds.
+ *
+ * @param session the capture session of interest
+ * @param device the {@link ACameraDevice} associated with session. Will be set to NULL
+ * if the session is closed or this method fails.
+ * @return <ul><li>
+ * {@link ACAMERA_OK} if the method call succeeds. The {@link ACameraDevice}
+ * will be stored in device argument</li>
+ * <li>{@link ACAMERA_ERROR_INVALID_PARAMETER} if session or device is NULL</li>
+ * <li>{@link ACAMERA_ERROR_SESSION_CLOSED} if the capture session has been closed</li>
+ * <li>{@link ACAMERA_ERROR_UNKNOWN} if the method fails for some other reasons</li></ul>
+ *
*/
camera_status_t ACameraCaptureSession_getDevice(
- ACameraCaptureSession*, ACameraDevice** device);
+ ACameraCaptureSession* session, /*out*/ACameraDevice** device);
/**
- * Send capture request(s)
+ * Submit an array of requests to be captured in sequence as a burst in the minimum of time possible.
+ *
+ * <p>The burst will be captured in the minimum amount of time possible, and will not be
+ * interleaved with requests submitted by other capture or repeat calls.</p>
+ *
+ * <p>Each capture produces one {@link ACameraMetadata} as a capture result and image buffers for
+ * one or more target {@link ANativeWindow}s. The target ANativeWindows (set with
+ * {@link ACaptureRequest_addTarget}) must be a subset of the ANativeWindow provided when
+ * this capture session was created.</p>
+ *
+ * @param session the capture session of interest
+ * @param callbacks the {@link ACameraCaptureSession_captureCallbacks} to be associated this capture
+ * sequence. No capture callback will be fired if this is set to NULL.
+ * @param numRequests number of requests in requests argument. Must be at least 1.
+ * @param requests an array of {@link ACaptureRequest} to be captured. Length must be at least
+ * numRequests.
+ * @param captureSequenceId the capture sequence ID associated with this capture method invocation
+ * will be stored here if this argument is not NULL and the method call succeeds.
+ * When this argument is set to NULL, the capture sequence ID will not be returned.
+ *
+ * @return <ul><li>
+ * {@link ACAMERA_OK} if the method succeeds. captureSequenceId will be filled
+ * if it is not NULL.</li>
+ * <li>{@link ACAMERA_ERROR_INVALID_PARAMETER} if session or requests is NULL, or
+ * if numRequests < 1</li>
+ * <li>{@link ACAMERA_ERROR_SESSION_CLOSED} if the capture session has been closed</li>
+ * <li>{@link ACAMERA_ERROR_CAMERA_DISCONNECTED} if the camera device is closed</li>
+ * <li>{@link ACAMERA_ERROR_CAMERA_DEVICE} if the camera device encounters fatal error</li>
+ * <li>{@link ACAMERA_ERROR_CAMERA_SERVICE} if the camera service encounters fatal error</li>
+ * <li>{@link ACAMERA_ERROR_UNKNOWN} if the method fails for some other reasons</li></ul>
*/
camera_status_t ACameraCaptureSession_capture(
- ACameraCaptureSession*, /*optional*/ACameraCaptureSession_captureCallbacks*,
+ ACameraCaptureSession* session,
+ /*optional*/ACameraCaptureSession_captureCallbacks* callbacks,
int numRequests, ACaptureRequest** requests,
/*optional*/int* captureSequenceId);
/**
- * Send repeating capture request(s)
+ * Request endlessly repeating capture of a sequence of images by this capture session.
+ *
+ * <p>With this method, the camera device will continually capture images,
+ * cycling through the settings in the provided list of
+ * {@link ACaptureRequest}, at the maximum rate possible.</p>
+ *
+ * <p>If a request is submitted through {@link ACameraCaptureSession_capture},
+ * the current repetition of the request list will be
+ * completed before the higher-priority request is handled. This guarantees
+ * that the application always receives a complete repeat burst captured in
+ * minimal time, instead of bursts interleaved with higher-priority
+ * captures, or incomplete captures.</p>
+ *
+ * <p>Repeating burst requests are a simple way for an application to
+ * maintain a preview or other continuous stream of frames where each
+ * request is different in a predicatable way, without having to continually
+ * submit requests through {@link ACameraCaptureSession_capture}.</p>
+ *
+ * <p>To stop the repeating capture, call {@link ACameraCaptureSession_stopRepeating}. Any
+ * ongoing burst will still be completed, however. Calling
+ * {@link ACameraCaptureSession_abortCaptures} will also clear the request.</p>
+ *
+ * <p>Calling this method will replace a previously-set repeating requests
+ * set up by this method, although any in-progress burst will be completed before the new repeat
+ * burst will be used.</p>
+ *
+ * @param session the capture session of interest
+ * @param callbacks the {@link ACameraCaptureSession_captureCallbacks} to be associated with this
+ * capture sequence. No capture callback will be fired if callbacks is set to NULL.
+ * @param numRequests number of requests in requests array. Must be at least 1.
+ * @param requests an array of {@link ACaptureRequest} to be captured. Length must be at least
+ * numRequests.
+ * @param captureSequenceId the capture sequence ID associated with this capture method invocation
+ * will be stored here if this argument is not NULL and the method call succeeds.
+ * When this argument is set to NULL, the capture sequence ID will not be returned.
+ *
+ * @return <ul><li>
+ * {@link ACAMERA_OK} if the method succeeds. captureSequenceId will be filled
+ * if it is not NULL.</li>
+ * <li>{@link ACAMERA_ERROR_INVALID_PARAMETER} if session or requests is NULL, or
+ * if numRequests < 1</li>
+ * <li>{@link ACAMERA_ERROR_SESSION_CLOSED} if the capture session has been closed</li>
+ * <li>{@link ACAMERA_ERROR_CAMERA_DISCONNECTED} if the camera device is closed</li>
+ * <li>{@link ACAMERA_ERROR_CAMERA_DEVICE} if the camera device encounters fatal error</li>
+ * <li>{@link ACAMERA_ERROR_CAMERA_SERVICE} if the camera service encounters fatal error</li>
+ * <li>{@link ACAMERA_ERROR_UNKNOWN} if the method fails for some other reasons</li></ul>
*/
camera_status_t ACameraCaptureSession_setRepeatingRequest(
- ACameraCaptureSession*, /*optional*/ACameraCaptureSession_captureCallbacks*,
+ ACameraCaptureSession* session,
+ /*optional*/ACameraCaptureSession_captureCallbacks* callbacks,
int numRequests, ACaptureRequest** requests,
/*optional*/int* captureSequenceId);
/**
- * Stop repeating capture request(s)
+ * Cancel any ongoing repeating capture set by {@link ACameraCaptureSession_setRepeatingRequest}.
+ * Has no effect on requests submitted through {@link ACameraCaptureSession_capture}.
+ *
+ * <p>Any currently in-flight captures will still complete, as will any burst that is
+ * mid-capture. To ensure that the device has finished processing all of its capture requests
+ * and is in ready state, wait for the {@link ACameraCaptureSession_stateCallbacks#onReady} callback
+ * after calling this method.</p>
+ *
+ * @param session the capture session of interest
+ *
+ * @return <ul><li>
+ * {@link ACAMERA_OK} if the method succeeds. captureSequenceId will be filled
+ * if it is not NULL.</li>
+ * <li>{@link ACAMERA_ERROR_INVALID_PARAMETER} if session is NULL.</li>
+ * <li>{@link ACAMERA_ERROR_SESSION_CLOSED} if the capture session has been closed</li>
+ * <li>{@link ACAMERA_ERROR_CAMERA_DISCONNECTED} if the camera device is closed</li>
+ * <li>{@link ACAMERA_ERROR_CAMERA_DEVICE} if the camera device encounters fatal error</li>
+ * <li>{@link ACAMERA_ERROR_CAMERA_SERVICE} if the camera service encounters fatal error</li>
+ * <li>{@link ACAMERA_ERROR_UNKNOWN} if the method fails for some other reasons</li></ul>
*/
-camera_status_t ACameraCaptureSession_stopRepeating(ACameraCaptureSession*);
+camera_status_t ACameraCaptureSession_stopRepeating(ACameraCaptureSession* session);
/**
- * Stop all capture requests as soon as possible
+ * Discard all captures currently pending and in-progress as fast as possible.
+ *
+ * <p>The camera device will discard all of its current work as fast as possible. Some in-flight
+ * captures may complete successfully and call
+ * {@link ACameraCaptureSession_captureCallbacks#onCaptureCompleted},
+ * while others will trigger their {@link ACameraCaptureSession_captureCallbacks#onCaptureFailed}
+ * callbacks. If a repeating request list is set, it will be cleared.</p>
+ *
+ * <p>This method is the fastest way to switch the camera device to a new session with
+ * {@link ACameraDevice_createCaptureSession}, at the cost of discarding in-progress
+ * work. It must be called before the new session is created. Once all pending requests are
+ * either completed or thrown away, the {@link ACameraCaptureSession_stateCallbacks#onReady}
+ * callback will be called, if the session has not been closed. Otherwise, the
+ * {@link ACameraCaptureSession_stateCallbacks#onClosed}
+ * callback will be fired when a new session is created by the camera device and the previous
+ * session is being removed from memory.</p>
+ *
+ * <p>Cancelling will introduce at least a brief pause in the stream of data from the camera
+ * device, since once the camera device is emptied, the first new request has to make it through
+ * the entire camera pipeline before new output buffers are produced.</p>
+ *
+ * <p>This means that using ACameraCaptureSession_abortCaptures to simply remove pending requests is
+ * not recommended; it's best used for quickly switching output configurations, or for cancelling
+ * long in-progress requests (such as a multi-second capture).</p>
+ *
+ * @param session the capture session of interest
+ *
+ * @return <ul><li>
+ * {@link ACAMERA_OK} if the method succeeds. captureSequenceId will be filled
+ * if it is not NULL.</li>
+ * <li>{@link ACAMERA_ERROR_INVALID_PARAMETER} if session is NULL.</li>
+ * <li>{@link ACAMERA_ERROR_SESSION_CLOSED} if the capture session has been closed</li>
+ * <li>{@link ACAMERA_ERROR_CAMERA_DISCONNECTED} if the camera device is closed</li>
+ * <li>{@link ACAMERA_ERROR_CAMERA_DEVICE} if the camera device encounters fatal error</li>
+ * <li>{@link ACAMERA_ERROR_CAMERA_SERVICE} if the camera service encounters fatal error</li>
+ * <li>{@link ACAMERA_ERROR_UNKNOWN} if the method fails for some other reasons</li></ul>
*/
-camera_status_t ACameraCaptureSession_abortCaptures(ACameraCaptureSession*);
+camera_status_t ACameraCaptureSession_abortCaptures(ACameraCaptureSession* session);
#ifdef __cplusplus
@@ -144,3 +590,5 @@
#endif
#endif // _NDK_CAMERA_CAPTURE_SESSION_H
+
+/** @} */
diff --git a/include/camera/ndk/NdkCameraDevice.h b/include/camera/ndk/NdkCameraDevice.h
index 2008a68..fb124a0 100644
--- a/include/camera/ndk/NdkCameraDevice.h
+++ b/include/camera/ndk/NdkCameraDevice.h
@@ -14,6 +14,15 @@
* limitations under the License.
*/
+/**
+ * @addtogroup Camera
+ * @{
+ */
+
+/**
+ * @file NdkCameraDevice.h
+ */
+
/*
* This file defines an NDK API.
* Do not remove methods.
@@ -36,71 +45,614 @@
extern "C" {
#endif
+/**
+ * ACameraDevice is opaque type that provides access to a camera device.
+ *
+ * A pointer can be obtained using {@link ACameraManager_openCamera} method.
+ */
typedef struct ACameraDevice ACameraDevice;
-// Struct to hold camera state callbacks
+/// Enum for ACameraDevice_ErrorStateCallback error code
+enum {
+ /**
+ * The camera device is in use already.
+ */
+ ERROR_CAMERA_IN_USE = 1,
+
+ /**
+ * The system-wide limit for number of open cameras or camera resources has
+ * been reached, and more camera devices cannot be opened until previous
+ * instances are closed.
+ */
+ ERROR_MAX_CAMERAS_IN_USE = 2,
+
+ /**
+ * The camera is disabled due to a device policy, and cannot be opened.
+ */
+ ERROR_CAMERA_DISABLED = 3,
+
+ /**
+ * The camera device has encountered a fatal error.
+ * <p>The camera device needs to be re-opened to be used again.</p>
+ */
+ ERROR_CAMERA_DEVICE = 4,
+
+ /**
+ * The camera service has encountered a fatal error.
+ * <p>The Android device may need to be shut down and restarted to restore
+ * camera function, or there may be a persistent hardware problem.
+ * An attempt at recovery may be possible by closing the
+ * CameraDevice and the CameraManager, and trying to acquire all resources
+ * again from scratch.</p>
+ */
+ ERROR_CAMERA_SERVICE = 5
+};
+
+/**
+ * Camera device state callbacks to be used in {@link ACameraDevice_stateCallbacks}.
+ *
+ * @param context The optional context in {@link ACameraDevice_stateCallbacks} will be
+ * passed to this callback.
+ * @param device The {@link ACameraDevice} that is being disconnected.
+ */
typedef void (*ACameraDevice_StateCallback)(void* context, ACameraDevice* device);
+
+/**
+ * Camera device error state callbacks to be used in {@link ACameraDevice_stateCallbacks}.
+ *
+ * @param context The optional context in {@link ACameraDevice_stateCallbacks} will be
+ * passed to this callback.
+ * @param device The {@link ACameraDevice} that is being disconnected.
+ * @param error The error code describes the cause of this error callback. See the folowing
+ * links for more detail.
+ *
+ * @see ERROR_CAMERA_IN_USE
+ * @see ERROR_MAX_CAMERAS_IN_USE
+ * @see ERROR_CAMERA_DISABLED
+ * @see ERROR_CAMERA_DEVICE
+ * @see ERROR_CAMERA_SERVICE
+ */
typedef void (*ACameraDevice_ErrorStateCallback)(void* context, ACameraDevice* device, int error);
typedef struct ACameraDevice_StateCallbacks {
+ /// optional application context.
void* context;
- ACameraDevice_StateCallback onDisconnected; // Device is unusable after this callback
- ACameraDevice_ErrorStateCallback onError; // Device is unusable after this callback
+
+ /**
+ * The function is called when a camera device is no longer available for use.
+ *
+ * <p>Any attempt to call API methods on this ACameraDevice will return
+ * {@link ACAMERA_ERROR_CAMERA_DISCONNECTED}. The disconnection could be due to a
+ * change in security policy or permissions; the physical disconnection
+ * of a removable camera device; or the camera being needed for a
+ * higher-priority camera API client.</p>
+ *
+ * <p>Application should clean up the camera with {@link ACameraDevice_close} after
+ * this happens, as it is not recoverable until the camera can be opened
+ * again.</p>
+ *
+ */
+ ACameraDevice_StateCallback onDisconnected;
+
+ /**
+ * The function called when a camera device has encountered a serious error.
+ *
+ * <p>This indicates a failure of the camera device or camera service in some way.
+ * Any attempt to call API methods on this ACameraDevice in the future will return
+ * {@link ACAMERA_ERROR_CAMERA_DISCONNECTED}.</p>
+ *
+ * <p>There may still be capture completion or camera stream callbacks that will be called
+ * after this error is received.</p>
+ *
+ * <p>Application should clean up the camera with {@link ACameraDevice_close} after this
+ * happens. Further attempts at recovery are error-code specific.</p>
+ *
+ */
+ ACameraDevice_ErrorStateCallback onError;
} ACameraDevice_stateCallbacks;
/**
- * Close the camera device synchronously. Open is done in ACameraManager_openCamera
+ * Close the connection and free this ACameraDevice synchronously. Access to the ACameraDevice
+ * after calling this method will cause a crash.
+ *
+ * <p>After this call, all calls to the active ACameraCaptureSession associated to this
+ * ACameraDevice will return {@link ACAMERA_ERROR_SESSION_CLOSED} except for calls to
+ * {@link ACameraCaptureSession_close}.</p>
+ *
+ * <p>This method will stop all repeating captures sent via
+ * {@link ACameraCaptureSession_setRepeatingRequest} and block until all capture requests sent via
+ * {@link ACameraCaptureSession_capture} is complete. Once the method returns, the camera device
+ * will be removed from memory and access to the closed camera device pointer will cause a crash.</p>
+ *
+ * @param device the camera device to be closed
+ *
+ * @return <ul>
+ * <li>{@link ACAMERA_OK} if the method call succeeds.</li>
+ * <li>{@link ACAMERA_ERROR_INVALID_PARAMETER} if device is NULL.</li></ul>
*/
-camera_status_t ACameraDevice_close(ACameraDevice*);
+camera_status_t ACameraDevice_close(ACameraDevice* device);
/**
- * Return the camera id associated with this camera device
- * The returned pointer is still owned by framework and should not be delete/free by app
- * The returned pointer should not be used after the device has been closed
+ * Return the camera id associated with this camera device.
+ *
+ * @param device the camera device to be closed
+ *
+ * @return camera ID string. The returned string is managed by framework and should not be
+ * delete/free by the application. Also the returned string must not be used after the device
+ * has been closed.
*/
-const char* ACameraDevice_getId(const ACameraDevice*);
+const char* ACameraDevice_getId(const ACameraDevice* device);
typedef enum {
+ /**
+ * Create a request suitable for a camera preview window. Specifically, this
+ * means that high frame rate is given priority over the highest-quality
+ * post-processing. These requests would normally be used with the
+ * {@link ACameraCaptureSession_setRepeatingRequest} method.
+ * This template is guaranteed to be supported on all camera devices.
+ *
+ * @see ACameraDevice_createCaptureRequest
+ */
TEMPLATE_PREVIEW = 1,
- TEMPLATE_STILL_CAPTURE,
- TEMPLATE_RECORD,
- TEMPLATE_VIDEO_SNAPSHOT,
- TEMPLATE_ZERO_SHUTTER_LAG,
- TEMPLATE_MANUAL,
+
+ /**
+ * Create a request suitable for still image capture. Specifically, this
+ * means prioritizing image quality over frame rate. These requests would
+ * commonly be used with the {@link ACameraCaptureSession_capture} method.
+ * This template is guaranteed to be supported on all camera devices.
+ *
+ * @see ACameraDevice_createCaptureRequest
+ */
+ TEMPLATE_STILL_CAPTURE = 2,
+
+ /**
+ * Create a request suitable for video recording. Specifically, this means
+ * that a stable frame rate is used, and post-processing is set for
+ * recording quality. These requests would commonly be used with the
+ * {@link ACameraCaptureSession_setRepeatingRequest} method.
+ * This template is guaranteed to be supported on all camera devices.
+ *
+ * @see ACameraDevice_createCaptureRequest
+ */
+ TEMPLATE_RECORD = 3,
+
+ /**
+ * Create a request suitable for still image capture while recording
+ * video. Specifically, this means maximizing image quality without
+ * disrupting the ongoing recording. These requests would commonly be used
+ * with the {@link ACameraCaptureSession_capture} method while a request based on
+ * {@link TEMPLATE_RECORD} is is in use with {@link ACameraCaptureSession_setRepeatingRequest}.
+ * This template is guaranteed to be supported on all camera devices.
+ *
+ * @see ACameraDevice_createCaptureRequest
+ */
+ TEMPLATE_VIDEO_SNAPSHOT = 4,
+
+ /**
+ * Create a request suitable for zero shutter lag still capture. This means
+ * means maximizing image quality without compromising preview frame rate.
+ * AE/AWB/AF should be on auto mode.
+ *
+ * @see ACameraDevice_createCaptureRequest
+ */
+ TEMPLATE_ZERO_SHUTTER_LAG = 5,
+
+ /**
+ * A basic template for direct application control of capture
+ * parameters. All automatic control is disabled (auto-exposure, auto-white
+ * balance, auto-focus), and post-processing parameters are set to preview
+ * quality. The manual capture parameters (exposure, sensitivity, and so on)
+ * are set to reasonable defaults, but should be overriden by the
+ * application depending on the intended use case.
+ * This template is guaranteed to be supported on camera devices that support the
+ * {@link ACAMERA_REQUEST_AVAILABLE_CAPABILITIES_MANUAL_SENSOR} capability.
+ *
+ * @see ACameraDevice_createCaptureRequest
+ */
+ TEMPLATE_MANUAL = 6,
} ACameraDevice_request_template;
/**
- * Create/free a default capture request for input template
+ * Create a ACaptureRequest for capturing images, initialized with template
+ * for a target use case.
+ *
+ * <p>The settings are chosen to be the best options for this camera device,
+ * so it is not recommended to reuse the same request for a different camera device.</p>
+ *
+ * @param device the camera device of interest
+ * @param templateId the type of capture request to be created.
+ * See {@link ACameraDevice_request_template}.
+ * @param request the output request will be stored here if the method call succeeds.
+ *
+ * @return <ul>
+ * <li>{@link ACAMERA_OK} if the method call succeeds. The created capture request will be
+ * filled in request argument.</li>
+ * <li>{@link ACAMERA_ERROR_INVALID_PARAMETER} if device or request is NULL, or templateId
+ * is undefined.</li>
+ * <li>{@link ACAMERA_ERROR_CAMERA_DISCONNECTED} if the camera device is closed.</li>
+ * <li>{@link ACAMERA_ERROR_CAMERA_DEVICE} if the camera device encounters fatal error.</li>
+ * <li>{@link ACAMERA_ERROR_CAMERA_SERVICE} if the camera service encounters fatal error.</li>
+ * <li>{@link ACAMERA_ERROR_UNKNOWN} if the method fails for some other reasons.</li></ul>
+ *
+ * @see TEMPLATE_PREVIEW
+ * @see TEMPLATE_RECORD
+ * @see TEMPLATE_STILL_CAPTURE
+ * @see TEMPLATE_VIDEO_SNAPSHOT
+ * @see TEMPLATE_MANUAL
*/
camera_status_t ACameraDevice_createCaptureRequest(
- const ACameraDevice*, ACameraDevice_request_template, /*out*/ACaptureRequest** request);
+ const ACameraDevice* device, ACameraDevice_request_template templateId,
+ /*out*/ACaptureRequest** request);
-/**
- * APIs for createing capture session
- */
+
typedef struct ACaptureSessionOutputContainer ACaptureSessionOutputContainer;
typedef struct ACaptureSessionOutput ACaptureSessionOutput;
-camera_status_t ACaptureSessionOutputContainer_create(/*out*/ACaptureSessionOutputContainer**);
-void ACaptureSessionOutputContainer_free(ACaptureSessionOutputContainer*);
+/**
+ * Create a capture session output container.
+ *
+ * <p>The container is used in {@link ACameraDevice_createCaptureSession} method to create a capture
+ * session. Use {@link ACaptureSessionOutputContainer_free} to free the container and its memory
+ * after application no longer needs the ACaptureSessionOutputContainer.</p>
+ *
+ * @param container the output {@link ACaptureSessionOutputContainer} will be stored here if the
+ * method call succeeds.
+ *
+ * @return <ul>
+ * <li>{@link ACAMERA_OK} if the method call succeeds. The created container will be
+ * filled in container argument.</li>
+ * <li>{@link ACAMERA_ERROR_INVALID_PARAMETER} if container is NULL.</li></ul>
+ */
+camera_status_t ACaptureSessionOutputContainer_create(
+ /*out*/ACaptureSessionOutputContainer** container);
-camera_status_t ACaptureSessionOutput_create(ANativeWindow*, /*out*/ACaptureSessionOutput**);
-void ACaptureSessionOutput_free(ACaptureSessionOutput*);
+/**
+ * Free a capture session output container.
+ *
+ * @param container the {@link ACaptureSessionOutputContainer} to be freed.
+ *
+ * @see ACaptureSessionOutputContainer_create
+ */
+void ACaptureSessionOutputContainer_free(ACaptureSessionOutputContainer* container);
+/**
+ * Create a ACaptureSessionOutput object.
+ *
+ * <p>The ACaptureSessionOutput is used in {@link ACaptureSessionOutputContainer_add} method to add
+ * an output {@link ANativeWindow} to ACaptureSessionOutputContainer. Use
+ * {@link ACaptureSessionOutput_free} to free the object and its memory after application no longer
+ * needs the {@link ACaptureSessionOutput}.</p>
+ *
+ * @param anw the {@link ANativeWindow} to be associated with the {@link ACaptureSessionOutput}
+ * @param output the output {@link ACaptureSessionOutput} will be stored here if the
+ * method call succeeds.
+ *
+ * @return <ul>
+ * <li>{@link ACAMERA_OK} if the method call succeeds. The created container will be
+ * filled in the output argument.</li>
+ * <li>{@link ACAMERA_ERROR_INVALID_PARAMETER} if anw or output is NULL.</li></ul>
+ *
+ * @see ACaptureSessionOutputContainer_add
+ */
+camera_status_t ACaptureSessionOutput_create(
+ ANativeWindow* anw, /*out*/ACaptureSessionOutput** output);
+
+/**
+ * Free a ACaptureSessionOutput object.
+ *
+ * @param output the {@link ACaptureSessionOutput} to be freed.
+ *
+ * @see ACaptureSessionOutput_create
+ */
+void ACaptureSessionOutput_free(ACaptureSessionOutput* output);
+
+/**
+ * Add an {@link ACaptureSessionOutput} object to {@link ACaptureSessionOutputContainer}.
+ *
+ * @param container the {@link ACaptureSessionOutputContainer} of interest.
+ * @param output the output {@link ACaptureSessionOutput} to be added to container.
+ *
+ * @return <ul>
+ * <li>{@link ACAMERA_OK} if the method call succeeds.</li>
+ * <li>{@link ACAMERA_ERROR_INVALID_PARAMETER} if container or output is NULL.</li></ul>
+ */
camera_status_t ACaptureSessionOutputContainer_add(
- ACaptureSessionOutputContainer*, const ACaptureSessionOutput*);
-camera_status_t ACaptureSessionOutputContainer_remove(
- ACaptureSessionOutputContainer*, const ACaptureSessionOutput*);
+ ACaptureSessionOutputContainer* container, const ACaptureSessionOutput* output);
-/*
- * Create a new capture session.
- * If there is a preexisting session, the previous session will be closed automatically.
- * However, app still needs to call ACameraCaptureSession_close on previous session.
- * Otherwise the resources hold by previous session won't be freed
+/**
+ * Remove an {@link ACaptureSessionOutput} object from {@link ACaptureSessionOutputContainer}.
+ *
+ * <p>This method has no effect if the ACaptureSessionOutput does not exist in
+ * ACaptureSessionOutputContainer.</p>
+ *
+ * @param container the {@link ACaptureSessionOutputContainer} of interest.
+ * @param output the output {@link ACaptureSessionOutput} to be removed from container.
+ *
+ * @return <ul>
+ * <li>{@link ACAMERA_OK} if the method call succeeds.</li>
+ * <li>{@link ACAMERA_ERROR_INVALID_PARAMETER} if container or output is NULL.</li></ul>
+ */
+camera_status_t ACaptureSessionOutputContainer_remove(
+ ACaptureSessionOutputContainer* container, const ACaptureSessionOutput* output);
+
+/**
+ * Create a new camera capture session by providing the target output set of {@link ANativeWindow}
+ * to the camera device.
+ *
+ * <p>If there is a preexisting session, the previous session will be closed
+ * automatically. However, app still needs to call {@link ACameraCaptureSession_close} on previous
+ * session. Otherwise the resources held by previous session will NOT be freed.</p>
+ *
+ * <p>The active capture session determines the set of potential output {@link ANativeWindow}s for
+ * the camera device for each capture request. A given request may use all
+ * or only some of the outputs. Once the ACameraCaptureSession is created, requests can be
+ * submitted with {@link ACameraCaptureSession_capture} or
+ * {@link ACameraCaptureSession_setRepeatingRequest}.</p>
+ *
+ * <p>Often the {@link ANativeWindow} used with this method can be obtained from a <a href=
+ * "http://developer.android.com/reference/android/view/Surface.html">Surface</a> java object by
+ * {@link ANativeWindow_fromSurface} NDK method. Surfaces or ANativeWindow suitable for inclusion as a camera
+ * output can be created for various use cases and targets:</p>
+ *
+ * <ul>
+ *
+ * <li>For drawing to a
+ * <a href="http://developer.android.com/reference/android/view/SurfaceView.html">SurfaceView</a>:
+ * Once the SurfaceView's Surface is created, set the size
+ * of the Surface with
+ * <a href="http://developer.android.com/reference/android/view/SurfaceHolder.html#setFixedSize(int, int)">
+ * android.view.SurfaceHolder\#setFixedSize</a> to be one of the PRIVATE output sizes
+ * returned by {@link ACAMERA_SCALER_AVAILABLE_STREAM_CONFIGURATIONS}
+ * and then obtain the Surface by calling <a href=
+ * "http://developer.android.com/reference/android/view/SurfaceHolder.html#getSurface()">
+ * android.view.SurfaceHolder\#getSurface</a>. If the size is not set by the application, it will
+ * be rounded to the nearest supported size less than 1080p, by the camera device.</li>
+ *
+ * <li>For accessing through an OpenGL texture via a <a href=
+ * "http://developer.android.com/reference/android/graphics/SurfaceTexture.html">SurfaceTexture</a>:
+ * Set the size of the SurfaceTexture with <a href=
+ * "http://developer.android.com/reference/android/graphics/SurfaceTexture.html#setDefaultBufferSize(int, int)">
+ * setDefaultBufferSize</a> to be one of the PRIVATE output sizes
+ * returned by {@link ACAMERA_SCALER_AVAILABLE_STREAM_CONFIGURATIONS}
+ * before creating a Surface from the SurfaceTexture with <a href=
+ * "http://developer.android.com/reference/android/view/Surface.html#Surface(android.graphics.SurfaceTexture)">
+ * Surface\#Surface(SurfaceTextrue)</a>. If the size is not set by the application, it will be set to be the
+ * smallest supported size less than 1080p, by the camera device.</li>
+ *
+ * <li>For recording with <a href=
+ * "http://developer.android.com/reference/android/media/MediaCodec.html">
+ * MediaCodec</a>: Call
+ * <a href=
+ * "http://developer.android.com/reference/android/media/MediaCodec.html#createInputSurface()">
+ * android.media.MediaCodec\#createInputSurface</a> after configuring
+ * the media codec to use one of the PRIVATE output sizes
+ * returned by {@link ACAMERA_SCALER_AVAILABLE_STREAM_CONFIGURATIONS}.
+ * </li>
+ *
+ * <li>For recording with <a href=
+ * "http://developer.android.com/reference/android/media/MediaRecorder.html">
+ * MediaRecorder</a>: Call
+ * <a href="http://developer.android.com/reference/android/media/MediaRecorder.html#getSurface()">
+ * android.media.MediaRecorder\#getSurface</a> after configuring the media recorder to use
+ * one of the PRIVATE output sizes returned by
+ * {@link ACAMERA_SCALER_AVAILABLE_STREAM_CONFIGURATIONS}, or configuring it to use one of the supported
+ * <a href="http://developer.android.com/reference/android/media/CamcorderProfile.html">
+ * CamcorderProfiles</a>.</li>
+ *
+ * <li>For efficient YUV processing with <a href=
+ * "http://developer.android.com/reference/android/renderscript/package-summary.html">
+ * RenderScript</a>:
+ * Create a RenderScript
+ * <a href="http://developer.android.com/reference/android/renderscript/Allocation.html">
+ * Allocation</a> with a supported YUV
+ * type, the IO_INPUT flag, and one of the YUV output sizes returned by
+ * {@link ACAMERA_SCALER_AVAILABLE_STREAM_CONFIGURATIONS},
+ * Then obtain the Surface with
+ * <a href="http://developer.android.com/reference/android/renderscript/Allocation.html#getSurface()">
+ * Allocation#getSurface}</a>.</li>
+ *
+ * <li>For access to RAW, uncompressed YUV, or compressed JPEG data in the application: Create an
+ * {@link AImageReader} object using the {@link AImageReader_new} method with one of the supported
+ * output formats given by {@link ACAMERA_SCALER_AVAILABLE_STREAM_CONFIGURATIONS}. Then obtain a
+ * ANativeWindow from it with {@link AImageReader_getWindow}.
+ * If the AImageReader size is not set to a supported size, it will be rounded to a supported
+ * size less than 1080p by the camera device.
+ * </li>
+ *
+ * </ul>
+ *
+ * <p>The camera device will query each ANativeWindow's size and formats upon this
+ * call, so they must be set to a valid setting at this time.</p>
+ *
+ * <p>It can take several hundred milliseconds for the session's configuration to complete,
+ * since camera hardware may need to be powered on or reconfigured.</p>
+ *
+ * <p>If a prior ACameraCaptureSession already exists when this method is called, the previous
+ * session will no longer be able to accept new capture requests and will be closed. Any
+ * in-progress capture requests made on the prior session will be completed before it's closed.
+ * To minimize the transition time,
+ * the ACameraCaptureSession_abortCaptures method can be used to discard the remaining
+ * requests for the prior capture session before a new one is created. Note that once the new
+ * session is created, the old one can no longer have its captures aborted.</p>
+ *
+ * <p>Using larger resolution outputs, or more outputs, can result in slower
+ * output rate from the device.</p>
+ *
+ * <p>Configuring a session with an empty list will close the current session, if
+ * any. This can be used to release the current session's target surfaces for another use.</p>
+ *
+ * <p>While any of the sizes from {@link ACAMERA_SCALER_AVAILABLE_STREAM_CONFIGURATIONS} can be used when
+ * a single output stream is configured, a given camera device may not be able to support all
+ * combination of sizes, formats, and targets when multiple outputs are configured at once. The
+ * tables below list the maximum guaranteed resolutions for combinations of streams and targets,
+ * given the capabilities of the camera device.</p>
+ *
+ * <p>If an application tries to create a session using a set of targets that exceed the limits
+ * described in the below tables, one of three possibilities may occur. First, the session may
+ * be successfully created and work normally. Second, the session may be successfully created,
+ * but the camera device won't meet the frame rate guarantees as described in
+ * {@link ACAMERA_SCALER_AVAILABLE_MIN_FRAME_DURATIONS}. Or third, if the output set
+ * cannot be used at all, session creation will fail entirely, with
+ * {@link ACAMERA_ERROR_STREAM_CONFIGURE_FAIL} being returned.</p>
+ *
+ * <p>For the type column `PRIV` refers to output format {@link AIMAGE_FORMAT_PRIVATE},
+ * `YUV` refers to output format {@link AIMAGE_FORMAT_YUV_420_888},
+ * `JPEG` refers to output format {@link AIMAGE_FORMAT_JPEG},
+ * and `RAW` refers to output format {@link AIMAGE_FORMAT_RAW16}
+ *
+ *
+ * <p>For the maximum size column, `PREVIEW` refers to the best size match to the
+ * device's screen resolution, or to 1080p `(1920x1080)`, whichever is
+ * smaller. `RECORD` refers to the camera device's maximum supported recording resolution,
+ * as determined by <a href="http://developer.android.com/reference/android/media/CamcorderProfile.html">
+ * android.media.CamcorderProfiles</a>. And `MAXIMUM` refers to the
+ * camera device's maximum output resolution for that format or target from
+ * {@link ACAMERA_SCALER_AVAILABLE_STREAM_CONFIGURATIONS}.</p>
+ *
+ * <p>To use these tables, determine the number and the formats/targets of outputs needed, and
+ * find the row(s) of the table with those targets. The sizes indicate the maximum set of sizes
+ * that can be used; it is guaranteed that for those targets, the listed sizes and anything
+ * smaller from the list given by {@link ACAMERA_SCALER_AVAILABLE_STREAM_CONFIGURATIONS} can be
+ * successfully used to create a session. For example, if a row indicates that a 8 megapixel
+ * (MP) YUV_420_888 output can be used together with a 2 MP `PRIV` output, then a session
+ * can be created with targets `[8 MP YUV, 2 MP PRIV]` or targets `[2 MP YUV, 2 MP PRIV]`;
+ * but a session with targets `[8 MP YUV, 4 MP PRIV]`, targets `[4 MP YUV, 4 MP PRIV]`,
+ * or targets `[8 MP PRIV, 2 MP YUV]` would not be guaranteed to work, unless
+ * some other row of the table lists such a combination.</p>
+ *
+ * <p>Legacy devices ({@link ACAMERA_INFO_SUPPORTED_HARDWARE_LEVEL}
+ * `== `{@link ACAMERA_INFO_SUPPORTED_HARDWARE_LEVEL_LEGACY LEGACY}) support at
+ * least the following stream combinations:
+ *
+ * <table>
+ * <tr><th colspan="7">LEGACY-level guaranteed configurations</th></tr>
+ * <tr> <th colspan="2" id="rb">Target 1</th> <th colspan="2" id="rb">Target 2</th> <th colspan="2" id="rb">Target 3</th> <th rowspan="2">Sample use case(s)</th> </tr>
+ * <tr> <th>Type</th><th id="rb">Max size</th> <th>Type</th><th id="rb">Max size</th> <th>Type</th><th id="rb">Max size</th></tr>
+ * <tr> <td>`PRIV`</td><td id="rb">`MAXIMUM`</td> <td colspan="2" id="rb"></td> <td colspan="2" id="rb"></td> <td>Simple preview, GPU video processing, or no-preview video recording.</td> </tr>
+ * <tr> <td>`JPEG`</td><td id="rb">`MAXIMUM`</td> <td colspan="2" id="rb"></td> <td colspan="2" id="rb"></td> <td>No-viewfinder still image capture.</td> </tr>
+ * <tr> <td>`YUV `</td><td id="rb">`MAXIMUM`</td> <td colspan="2" id="rb"></td> <td colspan="2" id="rb"></td> <td>In-application video/image processing.</td> </tr>
+ * <tr> <td>`PRIV`</td><td id="rb">`PREVIEW`</td> <td>`JPEG`</td><td id="rb">`MAXIMUM`</td> <td colspan="2" id="rb"></td> <td>Standard still imaging.</td> </tr>
+ * <tr> <td>`YUV `</td><td id="rb">`PREVIEW`</td> <td>`JPEG`</td><td id="rb">`MAXIMUM`</td> <td colspan="2" id="rb"></td> <td>In-app processing plus still capture.</td> </tr>
+ * <tr> <td>`PRIV`</td><td id="rb">`PREVIEW`</td> <td>`PRIV`</td><td id="rb">`PREVIEW`</td> <td colspan="2" id="rb"></td> <td>Standard recording.</td> </tr>
+ * <tr> <td>`PRIV`</td><td id="rb">`PREVIEW`</td> <td>`YUV `</td><td id="rb">`PREVIEW`</td> <td colspan="2" id="rb"></td> <td>Preview plus in-app processing.</td> </tr>
+ * <tr> <td>`PRIV`</td><td id="rb">`PREVIEW`</td> <td>`YUV `</td><td id="rb">`PREVIEW`</td> <td>`JPEG`</td><td id="rb">`MAXIMUM`</td> <td>Still capture plus in-app processing.</td> </tr>
+ * </table><br>
+ * </p>
+ *
+ * <p>Limited-level ({@link ACAMERA_INFO_SUPPORTED_HARDWARE_LEVEL}
+ * `== `{@link ACAMERA_INFO_SUPPORTED_HARDWARE_LEVEL_LIMITED LIMITED}) devices
+ * support at least the following stream combinations in addition to those for
+ * {@link ACAMERA_INFO_SUPPORTED_HARDWARE_LEVEL_LEGACY LEGACY} devices:
+ *
+ * <table>
+ * <tr><th colspan="7">LIMITED-level additional guaranteed configurations</th></tr>
+ * <tr><th colspan="2" id="rb">Target 1</th><th colspan="2" id="rb">Target 2</th><th colspan="2" id="rb">Target 3</th> <th rowspan="2">Sample use case(s)</th> </tr>
+ * <tr><th>Type</th><th id="rb">Max size</th><th>Type</th><th id="rb">Max size</th><th>Type</th><th id="rb">Max size</th></tr>
+ * <tr> <td>`PRIV`</td><td id="rb">`PREVIEW`</td> <td>`PRIV`</td><td id="rb">`RECORD `</td> <td colspan="2" id="rb"></td> <td>High-resolution video recording with preview.</td> </tr>
+ * <tr> <td>`PRIV`</td><td id="rb">`PREVIEW`</td> <td>`YUV `</td><td id="rb">`RECORD `</td> <td colspan="2" id="rb"></td> <td>High-resolution in-app video processing with preview.</td> </tr>
+ * <tr> <td>`YUV `</td><td id="rb">`PREVIEW`</td> <td>`YUV `</td><td id="rb">`RECORD `</td> <td colspan="2" id="rb"></td> <td>Two-input in-app video processing.</td> </tr>
+ * <tr> <td>`PRIV`</td><td id="rb">`PREVIEW`</td> <td>`PRIV`</td><td id="rb">`RECORD `</td> <td>`JPEG`</td><td id="rb">`RECORD `</td> <td>High-resolution recording with video snapshot.</td> </tr>
+ * <tr> <td>`PRIV`</td><td id="rb">`PREVIEW`</td> <td>`YUV `</td><td id="rb">`RECORD `</td> <td>`JPEG`</td><td id="rb">`RECORD `</td> <td>High-resolution in-app processing with video snapshot.</td> </tr>
+ * <tr> <td>`YUV `</td><td id="rb">`PREVIEW`</td> <td>`YUV `</td><td id="rb">`PREVIEW`</td> <td>`JPEG`</td><td id="rb">`MAXIMUM`</td> <td>Two-input in-app processing with still capture.</td> </tr>
+ * </table><br>
+ * </p>
+ *
+ * <p>FULL-level ({@link ACAMERA_INFO_SUPPORTED_HARDWARE_LEVEL}
+ * `== `{@link ACAMERA_INFO_SUPPORTED_HARDWARE_LEVEL_FULL FULL}) devices
+ * support at least the following stream combinations in addition to those for
+ * {@link ACAMERA_INFO_SUPPORTED_HARDWARE_LEVEL_LIMITED LIMITED} devices:
+ *
+ * <table>
+ * <tr><th colspan="7">FULL-level additional guaranteed configurations</th></tr>
+ * <tr><th colspan="2" id="rb">Target 1</th><th colspan="2" id="rb">Target 2</th><th colspan="2" id="rb">Target 3</th> <th rowspan="2">Sample use case(s)</th> </tr>
+ * <tr><th>Type</th><th id="rb">Max size</th><th>Type</th><th id="rb">Max size</th><th>Type</th><th id="rb">Max size</th> </tr>
+ * <tr> <td>`PRIV`</td><td id="rb">`PREVIEW`</td> <td>`PRIV`</td><td id="rb">`MAXIMUM`</td> <td colspan="2" id="rb"></td> <td>Maximum-resolution GPU processing with preview.</td> </tr>
+ * <tr> <td>`PRIV`</td><td id="rb">`PREVIEW`</td> <td>`YUV `</td><td id="rb">`MAXIMUM`</td> <td colspan="2" id="rb"></td> <td>Maximum-resolution in-app processing with preview.</td> </tr>
+ * <tr> <td>`YUV `</td><td id="rb">`PREVIEW`</td> <td>`YUV `</td><td id="rb">`MAXIMUM`</td> <td colspan="2" id="rb"></td> <td>Maximum-resolution two-input in-app processsing.</td> </tr>
+ * <tr> <td>`PRIV`</td><td id="rb">`PREVIEW`</td> <td>`PRIV`</td><td id="rb">`PREVIEW`</td> <td>`JPEG`</td><td id="rb">`MAXIMUM`</td> <td>Video recording with maximum-size video snapshot</td> </tr>
+ * <tr> <td>`YUV `</td><td id="rb">`640x480`</td> <td>`PRIV`</td><td id="rb">`PREVIEW`</td> <td>`YUV `</td><td id="rb">`MAXIMUM`</td> <td>Standard video recording plus maximum-resolution in-app processing.</td> </tr>
+ * <tr> <td>`YUV `</td><td id="rb">`640x480`</td> <td>`YUV `</td><td id="rb">`PREVIEW`</td> <td>`YUV `</td><td id="rb">`MAXIMUM`</td> <td>Preview plus two-input maximum-resolution in-app processing.</td> </tr>
+ * </table><br>
+ * </p>
+ *
+ * <p>RAW-capability ({@link ACAMERA_REQUEST_AVAILABLE_CAPABILITIES} includes
+ * {@link ACAMERA_REQUEST_AVAILABLE_CAPABILITIES_RAW RAW}) devices additionally support
+ * at least the following stream combinations on both
+ * {@link ACAMERA_INFO_SUPPORTED_HARDWARE_LEVEL_FULL FULL} and
+ * {@link ACAMERA_INFO_SUPPORTED_HARDWARE_LEVEL_LIMITED LIMITED} devices:
+ *
+ * <table>
+ * <tr><th colspan="7">RAW-capability additional guaranteed configurations</th></tr>
+ * <tr><th colspan="2" id="rb">Target 1</th><th colspan="2" id="rb">Target 2</th><th colspan="2" id="rb">Target 3</th> <th rowspan="2">Sample use case(s)</th> </tr>
+ * <tr><th>Type</th><th id="rb">Max size</th><th>Type</th><th id="rb">Max size</th><th>Type</th><th id="rb">Max size</th> </tr>
+ * <tr> <td>`RAW `</td><td id="rb">`MAXIMUM`</td> <td colspan="2" id="rb"></td> <td colspan="2" id="rb"></td> <td>No-preview DNG capture.</td> </tr>
+ * <tr> <td>`PRIV`</td><td id="rb">`PREVIEW`</td> <td>`RAW `</td><td id="rb">`MAXIMUM`</td> <td colspan="2" id="rb"></td> <td>Standard DNG capture.</td> </tr>
+ * <tr> <td>`YUV `</td><td id="rb">`PREVIEW`</td> <td>`RAW `</td><td id="rb">`MAXIMUM`</td> <td colspan="2" id="rb"></td> <td>In-app processing plus DNG capture.</td> </tr>
+ * <tr> <td>`PRIV`</td><td id="rb">`PREVIEW`</td> <td>`PRIV`</td><td id="rb">`PREVIEW`</td> <td>`RAW `</td><td id="rb">`MAXIMUM`</td> <td>Video recording with DNG capture.</td> </tr>
+ * <tr> <td>`PRIV`</td><td id="rb">`PREVIEW`</td> <td>`YUV `</td><td id="rb">`PREVIEW`</td> <td>`RAW `</td><td id="rb">`MAXIMUM`</td> <td>Preview with in-app processing and DNG capture.</td> </tr>
+ * <tr> <td>`YUV `</td><td id="rb">`PREVIEW`</td> <td>`YUV `</td><td id="rb">`PREVIEW`</td> <td>`RAW `</td><td id="rb">`MAXIMUM`</td> <td>Two-input in-app processing plus DNG capture.</td> </tr>
+ * <tr> <td>`PRIV`</td><td id="rb">`PREVIEW`</td> <td>`JPEG`</td><td id="rb">`MAXIMUM`</td> <td>`RAW `</td><td id="rb">`MAXIMUM`</td> <td>Still capture with simultaneous JPEG and DNG.</td> </tr>
+ * <tr> <td>`YUV `</td><td id="rb">`PREVIEW`</td> <td>`JPEG`</td><td id="rb">`MAXIMUM`</td> <td>`RAW `</td><td id="rb">`MAXIMUM`</td> <td>In-app processing with simultaneous JPEG and DNG.</td> </tr>
+ * </table><br>
+ * </p>
+ *
+ * <p>BURST-capability ({@link ACAMERA_REQUEST_AVAILABLE_CAPABILITIES} includes
+ * {@link ACAMERA_REQUEST_AVAILABLE_CAPABILITIES_BURST_CAPTURE BURST_CAPTURE}) devices
+ * support at least the below stream combinations in addition to those for
+ * {@link ACAMERA_INFO_SUPPORTED_HARDWARE_LEVEL_LIMITED LIMITED} devices. Note that all
+ * FULL-level devices support the BURST capability, and the below list is a strict subset of the
+ * list for FULL-level devices, so this table is only relevant for LIMITED-level devices that
+ * support the BURST_CAPTURE capability.
+ *
+ * <table>
+ * <tr><th colspan="5">BURST-capability additional guaranteed configurations</th></tr>
+ * <tr><th colspan="2" id="rb">Target 1</th><th colspan="2" id="rb">Target 2</th><th rowspan="2">Sample use case(s)</th> </tr>
+ * <tr><th>Type</th><th id="rb">Max size</th><th>Type</th><th id="rb">Max size</th> </tr>
+ * <tr> <td>`PRIV`</td><td id="rb">`PREVIEW`</td> <td>`PRIV`</td><td id="rb">`MAXIMUM`</td> <td>Maximum-resolution GPU processing with preview.</td> </tr>
+ * <tr> <td>`PRIV`</td><td id="rb">`PREVIEW`</td> <td>`YUV `</td><td id="rb">`MAXIMUM`</td> <td>Maximum-resolution in-app processing with preview.</td> </tr>
+ * <tr> <td>`YUV `</td><td id="rb">`PREVIEW`</td> <td>`YUV `</td><td id="rb">`MAXIMUM`</td> <td>Maximum-resolution two-input in-app processsing.</td> </tr>
+ * </table><br>
+ * </p>
+ *
+ * <p>LEVEL-3 ({@link ACAMERA_INFO_SUPPORTED_HARDWARE_LEVEL}
+ * `== `{@link ACAMERA_INFO_SUPPORTED_HARDWARE_LEVEL_3 LEVEL_3})
+ * support at least the following stream combinations in addition to the combinations for
+ * {@link ACAMERA_INFO_SUPPORTED_HARDWARE_LEVEL_FULL FULL} and for
+ * RAW capability ({@link ACAMERA_REQUEST_AVAILABLE_CAPABILITIES} includes
+ * {@link ACAMERA_REQUEST_AVAILABLE_CAPABILITIES_RAW RAW}):
+ *
+ * <table>
+ * <tr><th colspan="11">LEVEL-3 additional guaranteed configurations</th></tr>
+ * <tr><th colspan="2" id="rb">Target 1</th><th colspan="2" id="rb">Target 2</th><th colspan="2" id="rb">Target 3</th><th colspan="2" id="rb">Target 4</th><th rowspan="2">Sample use case(s)</th> </tr>
+ * <tr><th>Type</th><th id="rb">Max size</th><th>Type</th><th id="rb">Max size</th><th>Type</th><th id="rb">Max size</th><th>Type</th><th id="rb">Max size</th> </tr>
+ * <tr> <td>`PRIV`</td><td id="rb">`PREVIEW`</td> <td>`PRIV`</td><td id="rb">`640x480`</td> <td>`YUV`</td><td id="rb">`MAXIMUM`</td> <td>`RAW`</td><td id="rb">`MAXIMUM`</td> <td>In-app viewfinder analysis with dynamic selection of output format.</td> </tr>
+ * <tr> <td>`PRIV`</td><td id="rb">`PREVIEW`</td> <td>`PRIV`</td><td id="rb">`640x480`</td> <td>`JPEG`</td><td id="rb">`MAXIMUM`</td> <td>`RAW`</td><td id="rb">`MAXIMUM`</td> <td>In-app viewfinder analysis with dynamic selection of output format.</td> </tr>
+ * </table><br>
+ * </p>
+ *
+ * <p>Since the capabilities of camera devices vary greatly, a given camera device may support
+ * target combinations with sizes outside of these guarantees, but this can only be tested for
+ * by attempting to create a session with such targets.</p>
+ *
+ * @param device the camera device of interest.
+ * @param outputs the {@link ACaptureSessionOutputContainer} describes all output streams.
+ * @param callbacks the {@link ACameraCaptureSession_stateCallbacks capture session state callbacks}.
+ * @param session the created {@link ACameraCaptureSession} will be filled here if the method call
+ * succeeds.
+ *
+ * @return <ul>
+ * <li>{@link ACAMERA_OK} if the method call succeeds. The created capture session will be
+ * filled in session argument.</li>
+ * <li>{@link ACAMERA_ERROR_INVALID_PARAMETER} if any of device, outputs, callbacks or
+ * session is NULL.</li>
+ * <li>{@link ACAMERA_ERROR_CAMERA_DISCONNECTED} if the camera device is closed.</li>
+ * <li>{@link ACAMERA_ERROR_CAMERA_DEVICE} if the camera device encounters fatal error.</li>
+ * <li>{@link ACAMERA_ERROR_CAMERA_SERVICE} if the camera service encounters fatal error.</li>
+ * <li>{@link ACAMERA_ERROR_UNKNOWN} if the method fails for some other reasons.</li></ul>
*/
camera_status_t ACameraDevice_createCaptureSession(
- ACameraDevice*,
+ ACameraDevice* device,
const ACaptureSessionOutputContainer* outputs,
const ACameraCaptureSession_stateCallbacks* callbacks,
/*out*/ACameraCaptureSession** session);
@@ -110,3 +662,6 @@
#endif
#endif // _NDK_CAMERA_DEVICE_H
+
+/** @} */
+
diff --git a/include/camera/ndk/NdkCameraError.h b/include/camera/ndk/NdkCameraError.h
index 6d671de..e817eef 100644
--- a/include/camera/ndk/NdkCameraError.h
+++ b/include/camera/ndk/NdkCameraError.h
@@ -14,6 +14,14 @@
* limitations under the License.
*/
+/**
+ * @addtogroup Camera
+ * @{
+ */
+
+/**
+ * @file NdkCameraError.h
+ */
/*
* This file defines an NDK API.
@@ -52,6 +60,10 @@
ACAMERA_ERROR_INVALID_OPERATION = ACAMERA_ERROR_BASE - 13,
ACAMERA_ERROR_TIMEOUT = ACAMERA_ERROR_BASE - 14,
ACAMERA_ERROR_STREAM_CONFIGURE_FAIL = ACAMERA_ERROR_BASE - 15,
+ ACAMERA_ERROR_CAMERA_IN_USE = ACAMERA_ERROR_BASE - 16,
+ ACAMERA_ERROR_MAX_CAMERA_IN_USE = ACAMERA_ERROR_BASE - 17,
+ ACAMERA_ERROR_CAMERA_DISABLED = ACAMERA_ERROR_BASE - 18,
+ ACAMERA_ERROR_PERMISSION_DENIED = ACAMERA_ERROR_BASE - 19,
} camera_status_t;
@@ -60,3 +72,5 @@
#endif
#endif // _NDK_CAMERA_ERROR_H
+
+/** @} */
diff --git a/include/camera/ndk/NdkCameraManager.h b/include/camera/ndk/NdkCameraManager.h
index adef6ed..9188e94 100644
--- a/include/camera/ndk/NdkCameraManager.h
+++ b/include/camera/ndk/NdkCameraManager.h
@@ -14,6 +14,15 @@
* limitations under the License.
*/
+/**
+ * @addtogroup Camera
+ * @{
+ */
+
+/**
+ * @file NdkCameraManager.h
+ */
+
/*
* This file defines an NDK API.
* Do not remove methods.
@@ -35,66 +44,230 @@
extern "C" {
#endif
+/**
+ * ACameraManager is opaque type that provides access to camera service.
+ *
+ * A pointer can be obtained using {@link ACameraManager_create} method.
+ */
typedef struct ACameraManager ACameraManager;
/**
- * Create CameraManager instance.
- * The caller must call ACameraManager_delete to free the resources
+ * Create ACameraManager instance.
+ *
+ * <p>The ACameraManager is responsible for
+ * detecting, characterizing, and connecting to {@link ACameraDevice}s.</p>
+ *
+ * <p>The caller must call {@link ACameraManager_delete} to free the resources once it is done
+ * using the ACameraManager instance.</p>
+ *
+ * @return a {@link ACameraManager} instance.
+ *
*/
ACameraManager* ACameraManager_create();
/**
- * delete the ACameraManager and free its resources
+ * <p>Delete the {@link ACameraManager} instance and free its resources. </p>
+ *
+ * @param manager the {@link ACameraManager} instance to be deleted.
*/
-void ACameraManager_delete(ACameraManager*);
+void ACameraManager_delete(ACameraManager* manager);
-// Struct to hold list of camera devices
+/// Struct to hold list of camera devices
typedef struct ACameraIdList {
- int numCameras;
- const char** cameraIds;
+ int numCameras; ///< Number of connected camera devices
+ const char** cameraIds; ///< list of identifier of connected camera devices
} ACameraIdList;
/**
- * Create/delete a list of camera devices.
- * ACameraManager_getCameraIdList will allocate and return an ACameraIdList.
- * The caller must call ACameraManager_deleteCameraIdList to free the memory
+ * Create a list of currently connected camera devices, including
+ * cameras that may be in use by other camera API clients.
+ *
+ * <p>Non-removable cameras use integers starting at 0 for their
+ * identifiers, while removable cameras have a unique identifier for each
+ * individual device, even if they are the same model.</p>
+ *
+ * <p>ACameraManager_getCameraIdList will allocate and return an {@link ACameraIdList}.
+ * The caller must call {@link ACameraManager_deleteCameraIdList} to free the memory</p>
+ *
+ * @param manager the {@link ACameraManager} of interest
+ * @param cameraIdList the output {@link ACameraIdList} will be filled in here if the method call
+ * succeeds.
+ *
+ * @return <ul>
+ * <li>{@link ACAMERA_OK} if the method call succeeds.</li>
+ * <li>{@link ACAMERA_ERROR_INVALID_PARAMETER} if manager or cameraIdList is NULL.</li>
+ * <li>{@link ACAMERA_ERROR_CAMERA_DISCONNECTED} if connection to camera service fails.</li>
+ * <li>{@link ACAMERA_ERROR_NOT_ENOUGH_MEMORY} if allocating memory fails.</li></ul>
*/
-camera_status_t ACameraManager_getCameraIdList(ACameraManager*,
+camera_status_t ACameraManager_getCameraIdList(ACameraManager* manager,
/*out*/ACameraIdList** cameraIdList);
+
+/**
+ * Delete a list of camera devices allocated via {@link ACameraManager_getCameraIdList}.
+ *
+ * @param cameraIdList the {@link ACameraIdList} to be deleted.
+ */
void ACameraManager_deleteCameraIdList(ACameraIdList* cameraIdList);
-
-// Struct to hold camera availability callbacks
+/**
+ * Definition of camera availability callbacks.
+ *
+ * @param context The optional application context provided by user in
+ * {@link ACameraManager_AvailabilityCallbacks}.
+ * @param cameraId The ID of the camera device whose availability is changing. The memory of this
+ * argument is owned by camera framework and will become invalid immediately after
+ * this callback returns.
+ */
typedef void (*ACameraManager_AvailabilityCallback)(void* context, const char* cameraId);
+/**
+ * A listener for camera devices becoming available or unavailable to open.
+ *
+ * <p>Cameras become available when they are no longer in use, or when a new
+ * removable camera is connected. They become unavailable when some
+ * application or service starts using a camera, or when a removable camera
+ * is disconnected.</p>
+ *
+ * @see ACameraManager_registerAvailabilityCallback
+ */
typedef struct ACameraManager_AvailabilityListener {
- void* context; // optional application context.
+ /// Optional application context.
+ void* context;
+ /// Called when a camera becomes available
ACameraManager_AvailabilityCallback onCameraAvailable;
+ /// Called when a camera becomes unavailable
ACameraManager_AvailabilityCallback onCameraUnavailable;
} ACameraManager_AvailabilityCallbacks;
/**
- * register/unregister camera availability callbacks
+ * Register camera availability callbacks.
+ *
+ * <p>onCameraUnavailable will be called whenever a camera device is opened by any camera API client.
+ * Other camera API clients may still be able to open such a camera device, evicting the existing
+ * client if they have higher priority than the existing client of a camera device.
+ * See {@link ACameraManager_openCamera} for more details.</p>
+ *
+ * <p>The callbacks will be called on a dedicated thread shared among all ACameraManager
+ * instances.</p>
+ *
+ * <p>Since this callback will be registered with the camera service, remember to unregister it
+ * once it is no longer needed; otherwise the callback will continue to receive events
+ * indefinitely and it may prevent other resources from being released. Specifically, the
+ * callbacks will be invoked independently of the general activity lifecycle and independently
+ * of the state of individual ACameraManager instances.</p>
+ *
+ * @param manager the {@link ACameraManager} of interest.
+ * @param callback the {@link ACameraManager_AvailabilityCallbacks} to be registered.
+ *
+ * @return <ul>
+ * <li>{@link ACAMERA_OK} if the method call succeeds.</li>
+ * <li>{@link ACAMERA_ERROR_INVALID_PARAMETER} if manager or callback is NULL, or
+ * {ACameraManager_AvailabilityCallbacks#onCameraAvailable} or
+ * {ACameraManager_AvailabilityCallbacks#onCameraUnavailable} is NULL.</li></ul>
*/
camera_status_t ACameraManager_registerAvailabilityCallback(
- ACameraManager*, const ACameraManager_AvailabilityCallbacks *callback);
-camera_status_t ACameraManager_unregisterAvailabilityCallback(
- ACameraManager*, const ACameraManager_AvailabilityCallbacks *callback);
+ ACameraManager* manager, const ACameraManager_AvailabilityCallbacks* callback);
/**
- * Query the characteristics of a camera.
- * The caller must call ACameraMetadata_free to free the memory of the output characteristics.
+ * Unregister camera availability callbacks.
+ *
+ * <p>Removing a callback that isn't registered has no effect.</p>
+ *
+ * @param manager the {@link ACameraManager} of interest.
+ * @param callback the {@link ACameraManager_AvailabilityCallbacks} to be unregistered.
+ *
+ * @return <ul>
+ * <li>{@link ACAMERA_OK} if the method call succeeds.</li>
+ * <li>{@link ACAMERA_ERROR_INVALID_PARAMETER} if callback,
+ * {ACameraManager_AvailabilityCallbacks#onCameraAvailable} or
+ * {ACameraManager_AvailabilityCallbacks#onCameraUnavailable} is NULL.</li></ul>
+ */
+camera_status_t ACameraManager_unregisterAvailabilityCallback(
+ ACameraManager* manager, const ACameraManager_AvailabilityCallbacks* callback);
+
+/**
+ * Query the capabilities of a camera device. These capabilities are
+ * immutable for a given camera.
+ *
+ * <p>See {@link ACameraMetadata} document and {@link NdkCameraMetadataTags.h} for more details.</p>
+ *
+ * <p>The caller must call {@link ACameraMetadata_free} to free the memory of the output
+ * characteristics.</p>
+ *
+ * @param manager the {@link ACameraManager} of interest.
+ * @param cameraId the ID string of the camera device of interest.
+ * @param characteristics the output {@link ACameraMetadata} will be filled here if the method call
+ * succeeeds.
+ *
+ * @return <ul>
+ * <li>{@link ACAMERA_OK} if the method call succeeds.</li>
+ * <li>{@link ACAMERA_ERROR_INVALID_PARAMETER} if manager, cameraId, or characteristics
+ * is NULL, or cameraId does not match any camera devices connected.</li>
+ * <li>{@link ACAMERA_ERROR_CAMERA_DISCONNECTED} if connection to camera service fails.</li>
+ * <li>{@link ACAMERA_ERROR_NOT_ENOUGH_MEMORY} if allocating memory fails.</li>
+ * <li>{@link ACAMERA_ERROR_UNKNOWN} if the method fails for some other reasons.</li></ul>
*/
camera_status_t ACameraManager_getCameraCharacteristics(
- ACameraManager*, const char *cameraId,
- /*out*/ACameraMetadata **characteristics);
+ ACameraManager* manager, const char* cameraId,
+ /*out*/ACameraMetadata** characteristics);
/**
- * Open a camera device synchronously.
- * The opened camera device will be returned in
+ * Open a connection to a camera with the given ID. The opened camera device will be
+ * returned in the `device` parameter.
+ *
+ * <p>Use {@link ACameraManager_getCameraIdList} to get the list of available camera
+ * devices. Note that even if an id is listed, open may fail if the device
+ * is disconnected between the calls to {@link ACameraManager_getCameraIdList} and
+ * {@link ACameraManager_openCamera}, or if a higher-priority camera API client begins using the
+ * camera device.</p>
+ *
+ * <p>Devices for which the
+ * {@link ACameraManager_AvailabilityCallbacks#onCameraUnavailable} callback has been called due to
+ * the device being in use by a lower-priority, background camera API client can still potentially
+ * be opened by calling this method when the calling camera API client has a higher priority
+ * than the current camera API client using this device. In general, if the top, foreground
+ * activity is running within your application process, your process will be given the highest
+ * priority when accessing the camera, and this method will succeed even if the camera device is
+ * in use by another camera API client. Any lower-priority application that loses control of the
+ * camera in this way will receive an
+ * {@link ACameraDevice_stateCallbacks#onDisconnected} callback.</p>
+ *
+ * <p>Once the camera is successfully opened,the ACameraDevice can then be set up
+ * for operation by calling {@link ACameraDevice_createCaptureSession} and
+ * {@link ACameraDevice_createCaptureRequest}.</p>
+ *
+ * <p>If the camera becomes disconnected after this function call returns,
+ * {@link ACameraDevice_stateCallbacks#onDisconnected} with a
+ * ACameraDevice in the disconnected state will be called.</p>
+ *
+ * <p>If the camera runs into error after this function call returns,
+ * {@link ACameraDevice_stateCallbacks#onError} with a
+ * ACameraDevice in the error state will be called.</p>
+ *
+ * @param manager the {@link ACameraManager} of interest.
+ * @param cameraId the ID string of the camera device to be opened.
+ * @param callback the {@link ACameraDevice_StateCallbacks} associated with the opened camera device.
+ * @param device the opened {@link ACameraDevice} will be filled here if the method call succeeds.
+ *
+ * @return <ul>
+ * <li>{@link ACAMERA_OK} if the method call succeeds.</li>
+ * <li>{@link ACAMERA_ERROR_INVALID_PARAMETER} if manager, cameraId, callback, or device
+ * is NULL, or cameraId does not match any camera devices connected.</li>
+ * <li>{@link ACAMERA_ERROR_CAMERA_DISCONNECTED} if connection to camera service fails.</li>
+ * <li>{@link ACAMERA_ERROR_NOT_ENOUGH_MEMORY} if allocating memory fails.</li>
+ * <li>{@link ACAMERA_ERROR_CAMERA_IN_USE} if camera device is being used by a higher
+ * priority camera API client.</li>
+ * <li>{@link ACAMERA_ERROR_MAX_CAMERA_IN_USE} if the system-wide limit for number of open
+ * cameras or camera resources has been reached, and more camera devices cannot be
+ * opened until previous instances are closed.</li>
+ * <li>{@link ACAMERA_ERROR_CAMERA_DISABLED} if the camera is disabled due to a device
+ * policy, and cannot be opened.</li>
+ * <li>{@link ACAMERA_ERROR_PERMISSION_DENIED} if the application does not have permission
+ * to open camera.</li>
+ * <li>{@link ACAMERA_ERROR_UNKNOWN} if the method fails for some other reasons.</li></ul>
*/
camera_status_t ACameraManager_openCamera(
- ACameraManager*, const char* cameraId,
+ ACameraManager* manager, const char* cameraId,
ACameraDevice_StateCallbacks* callback,
/*out*/ACameraDevice** device);
@@ -103,3 +276,5 @@
#endif
#endif //_NDK_CAMERA_MANAGER_H
+
+/** @} */
diff --git a/include/camera/ndk/NdkCameraMetadata.h b/include/camera/ndk/NdkCameraMetadata.h
index 9b56a9d..8a8865d 100644
--- a/include/camera/ndk/NdkCameraMetadata.h
+++ b/include/camera/ndk/NdkCameraMetadata.h
@@ -14,6 +14,15 @@
* limitations under the License.
*/
+/**
+ * @addtogroup Camera
+ * @{
+ */
+
+/**
+ * @file NdkCameraMetadata.h
+ */
+
/*
* This file defines an NDK API.
* Do not remove methods.
@@ -119,3 +128,5 @@
#endif
#endif //_NDK_CAMERA_METADATA_H
+
+/** @} */
diff --git a/include/camera/ndk/NdkCaptureRequest.h b/include/camera/ndk/NdkCaptureRequest.h
index d9fb164..e278196 100644
--- a/include/camera/ndk/NdkCaptureRequest.h
+++ b/include/camera/ndk/NdkCaptureRequest.h
@@ -14,6 +14,15 @@
* limitations under the License.
*/
+/**
+ * @addtogroup Camera
+ * @{
+ */
+
+/**
+ * @file NdkCaptureRequest.h
+ */
+
/*
* This file defines an NDK API.
* Do not remove methods.
@@ -95,3 +104,5 @@
#endif
#endif // _NDK_CAPTURE_REQUEST_H
+
+/** @} */
diff --git a/include/media/AudioTimestamp.h b/include/media/AudioTimestamp.h
index 4f504a4..44d6c0b 100644
--- a/include/media/AudioTimestamp.h
+++ b/include/media/AudioTimestamp.h
@@ -37,9 +37,16 @@
struct ExtendedTimestamp {
enum Location {
LOCATION_INVALID = -1,
- LOCATION_CLIENT, // timestamp of last read frame from client-server track buffer
- LOCATION_SERVER, // timestamp of newest frame from client-server track buffer
+ // Locations in the audio playback / record pipeline.
+ LOCATION_CLIENT, // timestamp of last read frame from client-server track buffer.
+ LOCATION_SERVER, // timestamp of newest frame from client-server track buffer.
LOCATION_KERNEL, // timestamp of newest frame in the kernel (alsa) buffer.
+
+ // Historical data: info when the kernel timestamp was OK (prior to the newest frame).
+ // This may be useful when the newest frame kernel timestamp is unavailable.
+ // Available for playback timestamps.
+ LOCATION_SERVER_LASTKERNELOK, // timestamp of server the prior time kernel timestamp OK.
+ LOCATION_KERNEL_LASTKERNELOK, // timestamp of kernel the prior time kernel timestamp OK.
LOCATION_MAX // for sizing arrays only
};
@@ -101,7 +108,7 @@
// look for the closest-to-hw stage in the pipeline with a valid timestamp.
// We omit LOCATION_CLIENT as we prefer at least LOCATION_SERVER based accuracy
// when getting the best timestamp.
- for (int i = LOCATION_MAX - 1; i >= LOCATION_SERVER; --i) {
+ for (int i = LOCATION_KERNEL; i >= LOCATION_SERVER; --i) {
if (mTimeNs[i] > 0) {
*position = mPosition[i];
*time = mTimeNs[i] + mTimebaseOffset[timebase];
diff --git a/include/media/AudioTrack.h b/include/media/AudioTrack.h
index 46948e4..88c4e61 100644
--- a/include/media/AudioTrack.h
+++ b/include/media/AudioTrack.h
@@ -1047,7 +1047,6 @@
bool mRetrogradeMotionReported; // reduce log spam
AudioTimestamp mPreviousTimestamp; // used to detect retrograde motion
ExtendedTimestamp::Location mPreviousLocation; // location used for previous timestamp
- double mComputedLatencyMs; // latency between server and kernel
uint32_t mUnderrunCountOffset; // updated when restoring tracks
diff --git a/include/media/IMediaExtractor.h b/include/media/IMediaExtractor.h
index d9fcd89..34b15e9 100644
--- a/include/media/IMediaExtractor.h
+++ b/include/media/IMediaExtractor.h
@@ -19,7 +19,7 @@
#define IMEDIA_EXTRACTOR_BASE_H_
#include <media/IMediaSource.h>
-#include <media/IDataSource.h>
+#include <media/stagefright/DataSource.h>
namespace android {
@@ -72,7 +72,7 @@
void registerMediaExtractor(
const sp<IMediaExtractor> &extractor,
- const sp<IDataSource> &source,
+ const sp<DataSource> &source,
const char *mime);
void registerMediaSource(
diff --git a/include/media/IOMX.h b/include/media/IOMX.h
index 19c7955..15d691f 100644
--- a/include/media/IOMX.h
+++ b/include/media/IOMX.h
@@ -37,6 +37,7 @@
class IMemory;
class IOMXObserver;
class IOMXRenderer;
+class NativeHandle;
class Surface;
class IOMX : public IInterface {
@@ -118,6 +119,10 @@
node_id node, OMX_U32 port_index,
const sp<GraphicBuffer> &graphicBuffer, buffer_id buffer) = 0;
+ virtual status_t updateNativeHandleInMeta(
+ node_id node, OMX_U32 port_index,
+ const sp<NativeHandle> &nativeHandle, buffer_id buffer) = 0;
+
// This will set *type to resulting metadata buffer type on OMX error (not on binder error) as
// well as on success.
virtual status_t createInputSurface(
@@ -145,7 +150,7 @@
// pointer is just that, a pointer into local address space.
virtual status_t allocateSecureBuffer(
node_id node, OMX_U32 port_index, size_t size,
- buffer_id *buffer, void **buffer_data, native_handle_t **native_handle) = 0;
+ buffer_id *buffer, void **buffer_data, sp<NativeHandle> *native_handle) = 0;
// Allocate an OMX buffer of size |allotedSize|. Use |params| as the backup buffer, which
// may be larger.
@@ -272,17 +277,18 @@
OMX_U32 mLevel;
};
-} // namespace android
-
-inline static const char *asString(android::MetadataBufferType i, const char *def = "??") {
+inline static const char *asString(MetadataBufferType i, const char *def = "??") {
using namespace android;
switch (i) {
case kMetadataBufferTypeCameraSource: return "CameraSource";
case kMetadataBufferTypeGrallocSource: return "GrallocSource";
case kMetadataBufferTypeANWBuffer: return "ANWBuffer";
+ case kMetadataBufferTypeNativeHandleSource: return "NativeHandleSource";
case kMetadataBufferTypeInvalid: return "Invalid";
default: return def;
}
}
+} // namespace android
+
#endif // ANDROID_IOMX_H_
diff --git a/include/media/stagefright/ACodec.h b/include/media/stagefright/ACodec.h
index f6d5f12..d14bb7b 100644
--- a/include/media/stagefright/ACodec.h
+++ b/include/media/stagefright/ACodec.h
@@ -271,6 +271,7 @@
bool mFatalError;
bool mShutdownInProgress;
bool mExplicitShutdown;
+ bool mIsLegacyVP9Decoder;
// If "mKeepComponentAllocated" we only transition back to Loaded state
// and do not release the component instance.
diff --git a/include/media/stagefright/CameraSource.h b/include/media/stagefright/CameraSource.h
index 399f363..c2e75a6 100644
--- a/include/media/stagefright/CameraSource.h
+++ b/include/media/stagefright/CameraSource.h
@@ -28,6 +28,7 @@
#include <utils/List.h>
#include <utils/RefBase.h>
#include <utils/String16.h>
+#include <MetadataBufferType.h>
namespace android {
@@ -118,11 +119,11 @@
* Tell whether this camera source stores meta data or real YUV
* frame data in video buffers.
*
- * @return true if meta data is stored in the video
- * buffers; false if real YUV data is stored in
+ * @return a valid type if meta data is stored in the video
+ * buffers; kMetadataBufferTypeInvalid if real YUV data is stored in
* the video buffers.
*/
- bool isMetaDataStoredInVideoBuffers() const;
+ MetadataBufferType metaDataStoredInVideoBuffers() const;
virtual void signalBufferReturned(MediaBuffer* buffer);
diff --git a/include/media/stagefright/MediaCodecSource.h b/include/media/stagefright/MediaCodecSource.h
index 035e8ae..cc62786 100644
--- a/include/media/stagefright/MediaCodecSource.h
+++ b/include/media/stagefright/MediaCodecSource.h
@@ -37,7 +37,6 @@
public MediaBufferObserver {
enum FlagBits {
FLAG_USE_SURFACE_INPUT = 1,
- FLAG_USE_METADATA_INPUT = 2,
FLAG_PREFER_SOFTWARE_CODEC = 4, // used for testing only
};
diff --git a/include/media/stagefright/MetaData.h b/include/media/stagefright/MetaData.h
index 4f7426d..ba375a2 100644
--- a/include/media/stagefright/MetaData.h
+++ b/include/media/stagefright/MetaData.h
@@ -62,6 +62,7 @@
kKeyOpusHeader = 'ohdr', // raw data
kKeyOpusCodecDelay = 'ocod', // uint64_t (codec delay in ns)
kKeyOpusSeekPreRoll = 'ospr', // uint64_t (seek preroll in ns)
+ kKeyVp9CodecPrivate = 'vp9p', // raw data (vp9 csd information)
kKeyWantsNALFragments = 'NALf',
kKeyIsSyncFrame = 'sync', // int32_t (bool)
kKeyIsCodecConfig = 'conf', // int32_t (bool)
diff --git a/include/media/stagefright/SurfaceMediaSource.h b/include/media/stagefright/SurfaceMediaSource.h
index 2177c00..ca3a3bf 100644
--- a/include/media/stagefright/SurfaceMediaSource.h
+++ b/include/media/stagefright/SurfaceMediaSource.h
@@ -25,6 +25,8 @@
#include <media/stagefright/MediaSource.h>
#include <media/stagefright/MediaBuffer.h>
+#include <MetadataBufferType.h>
+
#include "foundation/ABase.h"
namespace android {
@@ -109,9 +111,9 @@
void dump(String8& result, const char* prefix, char* buffer,
size_t SIZE) const;
- // isMetaDataStoredInVideoBuffers tells the encoder whether we will
- // pass metadata through the buffers. Currently, it is force set to true
- bool isMetaDataStoredInVideoBuffers() const;
+ // metaDataStoredInVideoBuffers tells the encoder what kind of metadata
+ // is passed through the buffers. Currently, it is set to ANWBuffer
+ MetadataBufferType metaDataStoredInVideoBuffers() const;
sp<IGraphicBufferProducer> getProducer() const { return mProducer; }
@@ -234,6 +236,9 @@
Condition mMediaBuffersAvailableCondition;
+ // Allocate and return a new MediaBuffer and pass the ANW buffer as metadata into it.
+ void passMetadataBuffer_l(MediaBuffer **buffer, ANativeWindowBuffer *bufferHandle) const;
+
// Avoid copying and equating and default constructor
DISALLOW_EVIL_CONSTRUCTORS(SurfaceMediaSource);
};
diff --git a/include/media/stagefright/foundation/AMessage.h b/include/media/stagefright/foundation/AMessage.h
index 09d2ad8..87c32a6 100644
--- a/include/media/stagefright/foundation/AMessage.h
+++ b/include/media/stagefright/foundation/AMessage.h
@@ -62,7 +62,29 @@
AMessage();
AMessage(uint32_t what, const sp<const AHandler> &handler);
- static sp<AMessage> FromParcel(const Parcel &parcel);
+ // Construct an AMessage from a parcel.
+ // nestingAllowed determines how many levels AMessage can be nested inside
+ // AMessage. The default value here is arbitrarily set to 255.
+ // FromParcel() returns NULL on error, which occurs when the input parcel
+ // contains
+ // - an AMessage nested deeper than maxNestingLevel; or
+ // - an item whose type is not recognized by this function.
+ // Types currently recognized by this function are:
+ // Item types set/find function suffixes
+ // ==========================================
+ // int32_t Int32
+ // int64_t Int64
+ // size_t Size
+ // float Float
+ // double Double
+ // AString String
+ // AMessage Message
+ static sp<AMessage> FromParcel(const Parcel &parcel,
+ size_t maxNestingLevel = 255);
+
+ // Write this AMessage to a parcel.
+ // All items in the AMessage must have types that are recognized by
+ // FromParcel(); otherwise, TRESPASS error will occur.
void writeToParcel(Parcel *parcel) const;
void setWhat(uint32_t what);
diff --git a/include/ndk/NdkImage.h b/include/ndk/NdkImage.h
index 5c92294..eab7ead 100644
--- a/include/ndk/NdkImage.h
+++ b/include/ndk/NdkImage.h
@@ -14,6 +14,15 @@
* limitations under the License.
*/
+/**
+ * @addtogroup Media Camera
+ * @{
+ */
+
+/**
+ * @file NdkImage.h
+ */
+
/*
* This file defines an NDK API.
* Do not remove methods.
@@ -44,7 +53,8 @@
AIMAGE_FORMAT_RAW10 = 0x25,
AIMAGE_FORMAT_RAW12 = 0x26,
AIMAGE_FORMAT_DEPTH16 = 0x44363159,
- AIMAGE_FORMAT_DEPTH_POINT_CLOUD = 0x101
+ AIMAGE_FORMAT_DEPTH_POINT_CLOUD = 0x101,
+ AIMAGE_FORMAT_PRIVATE = 0x22 ///> Not supported by AImageReader yet
};
typedef struct AImageCropRect {
@@ -97,3 +107,5 @@
#endif
#endif //_NDK_IMAGE_H
+
+/** @} */
diff --git a/include/ndk/NdkImageReader.h b/include/ndk/NdkImageReader.h
index 041c378..9e7483d 100644
--- a/include/ndk/NdkImageReader.h
+++ b/include/ndk/NdkImageReader.h
@@ -14,6 +14,15 @@
* limitations under the License.
*/
+/**
+ * @addtogroup Media Camera
+ * @{
+ */
+
+/**
+ * @file NdkImageReader.h
+ */
+
/*
* This file defines an NDK API.
* Do not remove methods.
@@ -75,3 +84,5 @@
#endif
#endif //_NDK_IMAGE_READER_H
+
+/** @} */
diff --git a/media/libmedia/AudioTrack.cpp b/media/libmedia/AudioTrack.cpp
index babe4ed..22a5acd 100644
--- a/media/libmedia/AudioTrack.cpp
+++ b/media/libmedia/AudioTrack.cpp
@@ -536,7 +536,6 @@
mTimestampStartupGlitchReported = false;
mRetrogradeMotionReported = false;
mPreviousLocation = ExtendedTimestamp::LOCATION_INVALID;
- mComputedLatencyMs = 0.;
mUnderrunCountOffset = 0;
mFramesWritten = 0;
mFramesWrittenServerOffset = 0;
@@ -570,7 +569,6 @@
mTimestampStartupGlitchReported = false;
mRetrogradeMotionReported = false;
mPreviousLocation = ExtendedTimestamp::LOCATION_INVALID;
- mComputedLatencyMs = 0.;
// read last server side position change via timestamp.
ExtendedTimestamp ets;
@@ -2375,25 +2373,22 @@
if (location == ExtendedTimestamp::LOCATION_SERVER) {
ALOGW_IF(mPreviousLocation == ExtendedTimestamp::LOCATION_KERNEL,
"getTimestamp() location moved from kernel to server");
- const double latencyMs = mComputedLatencyMs > 0.
- ? mComputedLatencyMs : mAfLatency;
const int64_t frames =
- int64_t(latencyMs * mSampleRate * mPlaybackRate.mSpeed / 1000);
- ALOGV("mComputedLatencyMs:%lf mAfLatency:%u frame adjustment:%lld",
- mComputedLatencyMs, mAfLatency, (long long)frames);
+ (ets.mTimeNs[ExtendedTimestamp::LOCATION_SERVER_LASTKERNELOK] < 0 ||
+ ets.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL_LASTKERNELOK] < 0)
+ ?
+ int64_t((double)mAfLatency * mSampleRate * mPlaybackRate.mSpeed
+ / 1000)
+ :
+ (ets.mPosition[ExtendedTimestamp::LOCATION_SERVER_LASTKERNELOK]
+ - ets.mPosition[ExtendedTimestamp::LOCATION_KERNEL_LASTKERNELOK]);
+ ALOGV("frame adjustment:%lld timestamp:%s",
+ (long long)frames, ets.toString().c_str());
if (frames >= ets.mPosition[location]) {
timestamp.mPosition = 0;
} else {
timestamp.mPosition = (uint32_t)(ets.mPosition[location] - frames);
}
- } else if (location == ExtendedTimestamp::LOCATION_KERNEL) {
- const double bufferDiffMs =
- (double)(ets.mPosition[ExtendedTimestamp::LOCATION_SERVER]
- - ets.mPosition[ExtendedTimestamp::LOCATION_KERNEL])
- * 1000 / ((double)mSampleRate * mPlaybackRate.mSpeed);
- mComputedLatencyMs = bufferDiffMs > 0. ? bufferDiffMs : 0.;
- ALOGV("mComputedLatencyMs:%lf mAfLatency:%d",
- mComputedLatencyMs, mAfLatency);
}
mPreviousLocation = location;
} else {
diff --git a/media/libmedia/IMediaExtractor.cpp b/media/libmedia/IMediaExtractor.cpp
index f142ccc..e8ad75b 100644
--- a/media/libmedia/IMediaExtractor.cpp
+++ b/media/libmedia/IMediaExtractor.cpp
@@ -239,7 +239,7 @@
void registerMediaExtractor(
const sp<IMediaExtractor> &extractor,
- const sp<IDataSource> &source,
+ const sp<DataSource> &source,
const char *mime) {
ExtractorInstance ex;
ex.mime = mime == NULL ? "NULL" : mime;
diff --git a/media/libmedia/IOMX.cpp b/media/libmedia/IOMX.cpp
index 61fba35..8ebb355 100644
--- a/media/libmedia/IOMX.cpp
+++ b/media/libmedia/IOMX.cpp
@@ -25,6 +25,7 @@
#include <media/IOMX.h>
#include <media/stagefright/foundation/ADebug.h>
#include <media/openmax/OMX_IndexExt.h>
+#include <utils/NativeHandle.h>
namespace android {
@@ -60,6 +61,7 @@
SET_INTERNAL_OPTION,
UPDATE_GRAPHIC_BUFFER_IN_META,
CONFIGURE_VIDEO_TUNNEL_MODE,
+ UPDATE_NATIVE_HANDLE_IN_META,
};
class BpOMX : public BpInterface<IOMX> {
@@ -313,6 +315,24 @@
return err;
}
+ virtual status_t updateNativeHandleInMeta(
+ node_id node, OMX_U32 port_index,
+ const sp<NativeHandle> &nativeHandle, buffer_id buffer) {
+ Parcel data, reply;
+ data.writeInterfaceToken(IOMX::getInterfaceDescriptor());
+ data.writeInt32((int32_t)node);
+ data.writeInt32(port_index);
+ data.writeInt32(nativeHandle != NULL);
+ if (nativeHandle != NULL) {
+ data.writeNativeHandle(nativeHandle->handle());
+ }
+ data.writeInt32((int32_t)buffer);
+ remote()->transact(UPDATE_NATIVE_HANDLE_IN_META, data, &reply);
+
+ status_t err = reply.readInt32();
+ return err;
+ }
+
virtual status_t createInputSurface(
node_id node, OMX_U32 port_index, android_dataspace dataSpace,
sp<IGraphicBufferProducer> *bufferProducer, MetadataBufferType *type) {
@@ -416,7 +436,9 @@
data.writeInterfaceToken(IOMX::getInterfaceDescriptor());
data.writeInt32((int32_t)node);
data.writeInt32(port_index);
- data.writeInt32((uint32_t)enable);
+ data.writeInt32((int32_t)enable);
+ data.writeInt32(type == NULL ? kMetadataBufferTypeANWBuffer : *type);
+
remote()->transact(STORE_META_DATA_IN_BUFFERS, data, &reply);
// read type even storeMetaDataInBuffers failed
@@ -465,7 +487,7 @@
virtual status_t allocateSecureBuffer(
node_id node, OMX_U32 port_index, size_t size,
- buffer_id *buffer, void **buffer_data, native_handle_t **native_handle) {
+ buffer_id *buffer, void **buffer_data, sp<NativeHandle> *native_handle) {
Parcel data, reply;
data.writeInterfaceToken(IOMX::getInterfaceDescriptor());
data.writeInt32((int32_t)node);
@@ -484,7 +506,8 @@
*buffer = (buffer_id)reply.readInt32();
*buffer_data = (void *)reply.readInt64();
if (*buffer_data == NULL) {
- *native_handle = reply.readNativeHandle();
+ *native_handle = NativeHandle::create(
+ reply.readNativeHandle(), true /* ownsHandle */);
} else {
*native_handle = NULL;
}
@@ -908,6 +931,25 @@
return NO_ERROR;
}
+ case UPDATE_NATIVE_HANDLE_IN_META:
+ {
+ CHECK_OMX_INTERFACE(IOMX, data, reply);
+
+ node_id node = (node_id)data.readInt32();
+ OMX_U32 port_index = data.readInt32();
+ native_handle *handle = NULL;
+ if (data.readInt32()) {
+ handle = data.readNativeHandle();
+ }
+ buffer_id buffer = (buffer_id)data.readInt32();
+
+ status_t err = updateNativeHandleInMeta(
+ node, port_index, NativeHandle::create(handle, true /* ownshandle */), buffer);
+ reply->writeInt32(err);
+
+ return NO_ERROR;
+ }
+
case CREATE_INPUT_SURFACE:
{
CHECK_OMX_INTERFACE(IOMX, data, reply);
@@ -1001,7 +1043,7 @@
OMX_U32 port_index = data.readInt32();
OMX_BOOL enable = (OMX_BOOL)data.readInt32();
- MetadataBufferType type = kMetadataBufferTypeInvalid;
+ MetadataBufferType type = (MetadataBufferType)data.readInt32();
status_t err = storeMetaDataInBuffers(node, port_index, enable, &type);
reply->writeInt32(type);
@@ -1063,7 +1105,7 @@
buffer_id buffer;
void *buffer_data = NULL;
- native_handle_t *native_handle = NULL;
+ sp<NativeHandle> native_handle;
status_t err = allocateSecureBuffer(
node, port_index, size, &buffer, &buffer_data, &native_handle);
reply->writeInt32(err);
@@ -1072,7 +1114,7 @@
reply->writeInt32((int32_t)buffer);
reply->writeInt64((uintptr_t)buffer_data);
if (buffer_data == NULL) {
- reply->writeNativeHandle(native_handle);
+ reply->writeNativeHandle(native_handle == NULL ? NULL : native_handle->handle());
}
}
diff --git a/media/libmedia/MediaCodecInfo.cpp b/media/libmedia/MediaCodecInfo.cpp
index 06abd8d..1b3b3eb 100644
--- a/media/libmedia/MediaCodecInfo.cpp
+++ b/media/libmedia/MediaCodecInfo.cpp
@@ -75,6 +75,8 @@
}
uint32_t flags = static_cast<uint32_t>(parcel.readInt32());
sp<AMessage> details = AMessage::FromParcel(parcel);
+ if (details == NULL)
+ return NULL;
if (caps != NULL) {
caps->mFlags = flags;
caps->mDetails = details;
@@ -163,6 +165,8 @@
for (size_t i = 0; i < size; i++) {
AString mime = AString::FromParcel(parcel);
sp<Capabilities> caps = Capabilities::FromParcel(parcel);
+ if (caps == NULL)
+ return NULL;
if (info != NULL) {
info->mCaps.add(mime, caps);
}
diff --git a/media/libmediaplayerservice/Android.mk b/media/libmediaplayerservice/Android.mk
index 68860d2..8d86366 100644
--- a/media/libmediaplayerservice/Android.mk
+++ b/media/libmediaplayerservice/Android.mk
@@ -51,6 +51,7 @@
$(TOP)/frameworks/av/include/media \
$(TOP)/frameworks/av/include/camera \
$(TOP)/frameworks/native/include/media/openmax \
+ $(TOP)/frameworks/native/include/media/hardware \
$(TOP)/external/tremolo/Tremolo \
libcore/include \
diff --git a/media/libmediaplayerservice/MediaPlayerService.cpp b/media/libmediaplayerservice/MediaPlayerService.cpp
index 7d3c671..cd91e72 100644
--- a/media/libmediaplayerservice/MediaPlayerService.cpp
+++ b/media/libmediaplayerservice/MediaPlayerService.cpp
@@ -640,6 +640,28 @@
return p;
}
+MediaPlayerService::Client::ServiceDeathNotifier::ServiceDeathNotifier(
+ const sp<IBinder>& service,
+ const sp<MediaPlayerBase>& listener,
+ int which) {
+ mService = service;
+ mListener = listener;
+ mWhich = which;
+}
+
+MediaPlayerService::Client::ServiceDeathNotifier::~ServiceDeathNotifier() {
+ mService->unlinkToDeath(this);
+}
+
+void MediaPlayerService::Client::ServiceDeathNotifier::binderDied(const wp<IBinder>& /*who*/) {
+ sp<MediaPlayerBase> listener = mListener.promote();
+ if (listener != NULL) {
+ listener->sendEvent(MEDIA_ERROR, MEDIA_ERROR_SERVER_DIED, mWhich);
+ } else {
+ ALOGW("listener for process %d death is gone", mWhich);
+ }
+}
+
sp<MediaPlayerBase> MediaPlayerService::Client::setDataSource_pre(
player_type playerType)
{
@@ -651,6 +673,19 @@
return p;
}
+ sp<IServiceManager> sm = defaultServiceManager();
+ sp<IBinder> binder = sm->getService(String16("media.extractor"));
+ mExtractorDeathListener = new ServiceDeathNotifier(binder, p, MEDIAEXTRACTOR_PROCESS_DEATH);
+ binder->linkToDeath(mExtractorDeathListener);
+
+ binder = sm->getService(String16("media.codec"));
+ mCodecDeathListener = new ServiceDeathNotifier(binder, p, MEDIACODEC_PROCESS_DEATH);
+ binder->linkToDeath(mCodecDeathListener);
+
+ binder = sm->getService(String16("media.audio_flinger"));
+ mAudioDeathListener = new ServiceDeathNotifier(binder, p, AUDIO_PROCESS_DEATH);
+ binder->linkToDeath(mAudioDeathListener);
+
if (!p->hardwareOutput()) {
Mutex::Autolock l(mLock);
mAudioOutput = new AudioOutput(mAudioSessionId, IPCThreadState::self()->getCallingUid(),
@@ -2115,7 +2150,7 @@
//
// The underrun event is sent once per track underrun; the condition is reset
// when more data is sent to the AudioTrack.
- ALOGI("callbackwrapper: EVENT_UNDERRUN (discarded)");
+ ALOGD("callbackwrapper: EVENT_UNDERRUN (discarded)");
break;
default:
diff --git a/media/libmediaplayerservice/MediaPlayerService.h b/media/libmediaplayerservice/MediaPlayerService.h
index 1cf648e..0ecfdbc 100644
--- a/media/libmediaplayerservice/MediaPlayerService.h
+++ b/media/libmediaplayerservice/MediaPlayerService.h
@@ -227,6 +227,14 @@
void removeClient(wp<Client> client);
+ enum {
+ MEDIASERVER_PROCESS_DEATH = 0,
+ MEDIAEXTRACTOR_PROCESS_DEATH = 1,
+ MEDIACODEC_PROCESS_DEATH = 2,
+ AUDIO_PROCESS_DEATH = 3,
+ CAMERA_PROCESS_DEATH = 4
+ };
+
// For battery usage tracking purpose
struct BatteryUsageInfo {
// how many streams are being played by one UID
@@ -334,6 +342,22 @@
audio_session_t getAudioSessionId() { return mAudioSessionId; }
private:
+ class ServiceDeathNotifier: public IBinder::DeathRecipient
+ {
+ public:
+ ServiceDeathNotifier(
+ const sp<IBinder>& service,
+ const sp<MediaPlayerBase>& listener,
+ int which);
+ virtual ~ServiceDeathNotifier();
+ virtual void binderDied(const wp<IBinder>& who);
+
+ private:
+ int mWhich;
+ sp<IBinder> mService;
+ wp<MediaPlayerBase> mListener;
+ };
+
friend class MediaPlayerService;
Client( const sp<MediaPlayerService>& service,
pid_t pid,
@@ -393,6 +417,9 @@
// getMetadata clears this set.
media::Metadata::Filter mMetadataUpdated; // protected by mLock
+ sp<IBinder::DeathRecipient> mExtractorDeathListener;
+ sp<IBinder::DeathRecipient> mCodecDeathListener;
+ sp<IBinder::DeathRecipient> mAudioDeathListener;
#if CALLBACK_ANTAGONIZER
Antagonizer* mAntagonizer;
#endif
diff --git a/media/libmediaplayerservice/MediaRecorderClient.cpp b/media/libmediaplayerservice/MediaRecorderClient.cpp
index 3152e04..2832166 100644
--- a/media/libmediaplayerservice/MediaRecorderClient.cpp
+++ b/media/libmediaplayerservice/MediaRecorderClient.cpp
@@ -335,6 +335,28 @@
release();
}
+MediaRecorderClient::ServiceDeathNotifier::ServiceDeathNotifier(
+ const sp<IBinder>& service,
+ const sp<IMediaRecorderClient>& listener,
+ int which) {
+ mService = service;
+ mListener = listener;
+ mWhich = which;
+}
+
+MediaRecorderClient::ServiceDeathNotifier::~ServiceDeathNotifier() {
+ mService->unlinkToDeath(this);
+}
+
+void MediaRecorderClient::ServiceDeathNotifier::binderDied(const wp<IBinder>& /*who*/) {
+ sp<IMediaRecorderClient> listener = mListener.promote();
+ if (listener != NULL) {
+ listener->notify(MEDIA_ERROR, MEDIA_ERROR_SERVER_DIED, mWhich);
+ } else {
+ ALOGW("listener for process %d death is gone", mWhich);
+ }
+}
+
status_t MediaRecorderClient::setListener(const sp<IMediaRecorderClient>& listener)
{
ALOGV("setListener");
@@ -343,7 +365,25 @@
ALOGE("recorder is not initialized");
return NO_INIT;
}
- return mRecorder->setListener(listener);
+ mRecorder->setListener(listener);
+
+ sp<IServiceManager> sm = defaultServiceManager();
+ sp<IBinder> binder = sm->getService(String16("media.camera"));
+ mCameraDeathListener = new ServiceDeathNotifier(binder, listener,
+ MediaPlayerService::CAMERA_PROCESS_DEATH);
+ binder->linkToDeath(mCameraDeathListener);
+
+ binder = sm->getService(String16("media.codec"));
+ mCodecDeathListener = new ServiceDeathNotifier(binder, listener,
+ MediaPlayerService::MEDIACODEC_PROCESS_DEATH);
+ binder->linkToDeath(mCodecDeathListener);
+
+ binder = sm->getService(String16("media.audio_flinger"));
+ mAudioDeathListener = new ServiceDeathNotifier(binder, listener,
+ MediaPlayerService::AUDIO_PROCESS_DEATH);
+ binder->linkToDeath(mAudioDeathListener);
+
+ return OK;
}
status_t MediaRecorderClient::setClientName(const String16& clientName) {
diff --git a/media/libmediaplayerservice/MediaRecorderClient.h b/media/libmediaplayerservice/MediaRecorderClient.h
index 5a080df..6e70194 100644
--- a/media/libmediaplayerservice/MediaRecorderClient.h
+++ b/media/libmediaplayerservice/MediaRecorderClient.h
@@ -29,6 +29,22 @@
class MediaRecorderClient : public BnMediaRecorder
{
+ class ServiceDeathNotifier: public IBinder::DeathRecipient
+ {
+ public:
+ ServiceDeathNotifier(
+ const sp<IBinder>& service,
+ const sp<IMediaRecorderClient>& listener,
+ int which);
+ virtual ~ServiceDeathNotifier();
+ virtual void binderDied(const wp<IBinder>& who);
+
+ private:
+ int mWhich;
+ sp<IBinder> mService;
+ wp<IMediaRecorderClient> mListener;
+ };
+
public:
virtual status_t setCamera(const sp<hardware::ICamera>& camera,
const sp<ICameraRecordingProxy>& proxy);
@@ -69,6 +85,10 @@
const String16& opPackageName);
virtual ~MediaRecorderClient();
+ sp<IBinder::DeathRecipient> mCameraDeathListener;
+ sp<IBinder::DeathRecipient> mCodecDeathListener;
+ sp<IBinder::DeathRecipient> mAudioDeathListener;
+
pid_t mPid;
Mutex mLock;
MediaRecorderBase *mRecorder;
diff --git a/media/libmediaplayerservice/StagefrightRecorder.cpp b/media/libmediaplayerservice/StagefrightRecorder.cpp
index 3f7367f..6114af8 100644
--- a/media/libmediaplayerservice/StagefrightRecorder.cpp
+++ b/media/libmediaplayerservice/StagefrightRecorder.cpp
@@ -1474,8 +1474,8 @@
CHECK(mFrameRate != -1);
- mIsMetaDataStoredInVideoBuffers =
- (*cameraSource)->isMetaDataStoredInVideoBuffers();
+ mMetaDataStoredInVideoBuffers =
+ (*cameraSource)->metaDataStoredInVideoBuffers();
return OK;
}
@@ -1565,11 +1565,11 @@
format->setFloat("operating-rate", mCaptureFps);
}
- uint32_t flags = 0;
- if (mIsMetaDataStoredInVideoBuffers) {
- flags |= MediaCodecSource::FLAG_USE_METADATA_INPUT;
+ if (mMetaDataStoredInVideoBuffers != kMetadataBufferTypeInvalid) {
+ format->setInt32("android._input-metadata-buffer-type", mMetaDataStoredInVideoBuffers);
}
+ uint32_t flags = 0;
if (cameraSource == NULL) {
flags |= MediaCodecSource::FLAG_USE_SURFACE_INPUT;
} else {
@@ -1866,7 +1866,7 @@
mCaptureFps = 0.0f;
mTimeBetweenCaptureUs = -1;
mCameraSourceTimeLapse = NULL;
- mIsMetaDataStoredInVideoBuffers = false;
+ mMetaDataStoredInVideoBuffers = kMetadataBufferTypeInvalid;
mEncoderProfiles = MediaProfiles::getInstance();
mRotationDegrees = 0;
mLatitudex10000 = -3600000;
diff --git a/media/libmediaplayerservice/StagefrightRecorder.h b/media/libmediaplayerservice/StagefrightRecorder.h
index a73197f..d7f43bc 100644
--- a/media/libmediaplayerservice/StagefrightRecorder.h
+++ b/media/libmediaplayerservice/StagefrightRecorder.h
@@ -24,6 +24,8 @@
#include <system/audio.h>
+#include <MetadataBufferType.h>
+
namespace android {
class Camera;
@@ -121,7 +123,7 @@
String8 mParams;
- bool mIsMetaDataStoredInVideoBuffers;
+ MetadataBufferType mMetaDataStoredInVideoBuffers;
MediaProfiles *mEncoderProfiles;
int64_t mPauseStartTimeUs;
diff --git a/media/libstagefright/ACodec.cpp b/media/libstagefright/ACodec.cpp
index f159882..99996ed 100644
--- a/media/libstagefright/ACodec.cpp
+++ b/media/libstagefright/ACodec.cpp
@@ -503,6 +503,7 @@
mFatalError(false),
mShutdownInProgress(false),
mExplicitShutdown(false),
+ mIsLegacyVP9Decoder(false),
mEncoderDelay(0),
mEncoderPadding(0),
mRotationDegrees(0),
@@ -792,10 +793,10 @@
MetadataBufferType type =
portIndex == kPortIndexOutput ? mOutputMetadataType : mInputMetadataType;
size_t bufSize = def.nBufferSize;
- if (type == kMetadataBufferTypeGrallocSource) {
- bufSize = sizeof(VideoGrallocMetadata);
- } else if (type == kMetadataBufferTypeANWBuffer) {
+ if (type == kMetadataBufferTypeANWBuffer) {
bufSize = sizeof(VideoNativeMetadata);
+ } else if (type == kMetadataBufferTypeNativeHandleSource) {
+ bufSize = sizeof(VideoNativeHandleMetadata);
}
// If using gralloc or native source input metadata buffers, allocate largest
@@ -803,7 +804,7 @@
// may require gralloc source. For camera source, allocate at least enough
// size for native metadata buffers.
size_t allottedSize = bufSize;
- if (portIndex == kPortIndexInput && type >= kMetadataBufferTypeGrallocSource) {
+ if (portIndex == kPortIndexInput && type == kMetadataBufferTypeANWBuffer) {
bufSize = max(sizeof(VideoGrallocMetadata), sizeof(VideoNativeMetadata));
} else if (portIndex == kPortIndexInput && type == kMetadataBufferTypeCameraSource) {
bufSize = max(bufSize, sizeof(VideoNativeMetadata));
@@ -866,7 +867,7 @@
mem.clear();
void *ptr = NULL;
- native_handle_t *native_handle = NULL;
+ sp<NativeHandle> native_handle;
err = mOMX->allocateSecureBuffer(
mNode, portIndex, bufSize, &info.mBufferID,
&ptr, &native_handle);
@@ -879,8 +880,11 @@
// TRICKY2: use native handle as the base of the ABuffer if received one,
// because Widevine source only receives these base addresses.
- info.mData = new ABuffer(ptr != NULL ? ptr : (void *)native_handle, bufSize);
- info.mNativeHandle = NativeHandle::create(native_handle, true /* ownsHandle */);
+ const native_handle_t *native_handle_ptr =
+ native_handle == NULL ? NULL : native_handle->handle();
+ info.mData = new ABuffer(
+ ptr != NULL ? ptr : (void *)native_handle_ptr, bufSize);
+ info.mNativeHandle = native_handle;
info.mCodecData = info.mData;
} else if (mQuirks & requiresAllocateBufferBit) {
err = mOMX->allocateBufferWithBackup(
@@ -1724,19 +1728,20 @@
int32_t storeMeta;
if (encoder
- && msg->findInt32("store-metadata-in-buffers", &storeMeta)
- && storeMeta != 0) {
- err = mOMX->storeMetaDataInBuffers(mNode, kPortIndexInput, OMX_TRUE, &mInputMetadataType);
+ && msg->findInt32("android._input-metadata-buffer-type", &storeMeta)
+ && storeMeta != kMetadataBufferTypeInvalid) {
+ mInputMetadataType = (MetadataBufferType)storeMeta;
+ err = mOMX->storeMetaDataInBuffers(
+ mNode, kPortIndexInput, OMX_TRUE, &mInputMetadataType);
if (err != OK) {
ALOGE("[%s] storeMetaDataInBuffers (input) failed w/ err %d",
mComponentName.c_str(), err);
return err;
- }
- // For this specific case we could be using camera source even if storeMetaDataInBuffers
- // returns Gralloc source. Pretend that we are; this will force us to use nBufferSize.
- if (mInputMetadataType == kMetadataBufferTypeGrallocSource) {
- mInputMetadataType = kMetadataBufferTypeCameraSource;
+ } else if (storeMeta == kMetadataBufferTypeANWBuffer
+ && mInputMetadataType == kMetadataBufferTypeGrallocSource) {
+ // IOMX translates ANWBuffers to gralloc source already.
+ mInputMetadataType = (MetadataBufferType)storeMeta;
}
uint32_t usageBits;
@@ -1782,9 +1787,10 @@
mIsVideo = video;
if (encoder && video) {
OMX_BOOL enable = (OMX_BOOL) (prependSPSPPS
- && msg->findInt32("store-metadata-in-buffers-output", &storeMeta)
+ && msg->findInt32("android._store-metadata-in-buffers-output", &storeMeta)
&& storeMeta != 0);
+ mOutputMetadataType = kMetadataBufferTypeNativeHandleSource;
err = mOMX->storeMetaDataInBuffers(mNode, kPortIndexOutput, enable, &mOutputMetadataType);
if (err != OK) {
ALOGE("[%s] storeMetaDataInBuffers (output) failed w/ err %d",
@@ -1913,6 +1919,7 @@
}
// Always try to enable dynamic output buffers on native surface
+ mOutputMetadataType = kMetadataBufferTypeANWBuffer;
err = mOMX->storeMetaDataInBuffers(
mNode, kPortIndexOutput, OMX_TRUE, &mOutputMetadataType);
if (err != OK) {
@@ -3133,6 +3140,20 @@
return err;
}
+ if (compressionFormat == OMX_VIDEO_CodingVP9) {
+ OMX_VIDEO_PARAM_PROFILELEVELTYPE params;
+ InitOMXParams(¶ms);
+ params.nPortIndex = kPortIndexInput;
+ // Check if VP9 decoder advertises supported profiles.
+ params.nProfileIndex = 0;
+ status_t err = mOMX->getParameter(
+ mNode,
+ OMX_IndexParamVideoProfileLevelQuerySupported,
+ ¶ms,
+ sizeof(params));
+ mIsLegacyVP9Decoder = err != OK;
+ }
+
err = setVideoPortFormatType(
kPortIndexInput, compressionFormat, OMX_COLOR_FormatUnused);
@@ -5683,9 +5704,17 @@
OMX_U32 flags = OMX_BUFFERFLAG_ENDOFFRAME;
- int32_t isCSD;
+ MetadataBufferType metaType = mCodec->mInputMetadataType;
+ int32_t isCSD = 0;
if (buffer->meta()->findInt32("csd", &isCSD) && isCSD != 0) {
+ if (mCodec->mIsLegacyVP9Decoder) {
+ ALOGV("[%s] is legacy VP9 decoder. Ignore %u codec specific data",
+ mCodec->mComponentName.c_str(), bufferID);
+ postFillThisBuffer(info);
+ break;
+ }
flags |= OMX_BUFFERFLAG_CODECCONFIG;
+ metaType = kMetadataBufferTypeInvalid;
}
if (eos) {
@@ -5699,7 +5728,7 @@
buffer.get(), info->mCodecData.get());
sp<DataConverter> converter = mCodec->mConverter[kPortIndexInput];
- if (converter == NULL) {
+ if (converter == NULL || isCSD) {
converter = getCopyConverter();
}
status_t err = converter->convert(buffer, info->mCodecData);
@@ -5745,14 +5774,50 @@
}
}
info->checkReadFence("onInputBufferFilled");
- status_t err2 = mCodec->mOMX->emptyBuffer(
- mCodec->mNode,
- bufferID,
- 0,
- info->mCodecData->size(),
- flags,
- timeUs,
- info->mFenceFd);
+
+ status_t err2 = OK;
+ switch (metaType) {
+ case kMetadataBufferTypeInvalid:
+ break;
+#ifndef OMX_ANDROID_COMPILE_AS_32BIT_ON_64BIT_PLATFORMS
+ case kMetadataBufferTypeNativeHandleSource:
+ if (info->mCodecData->size() >= sizeof(VideoNativeHandleMetadata)) {
+ VideoNativeHandleMetadata *vnhmd =
+ (VideoNativeHandleMetadata*)info->mCodecData->base();
+ err2 = mCodec->mOMX->updateNativeHandleInMeta(
+ mCodec->mNode, kPortIndexInput,
+ NativeHandle::create(vnhmd->pHandle, false /* ownsHandle */),
+ bufferID);
+ }
+ break;
+ case kMetadataBufferTypeANWBuffer:
+ if (info->mCodecData->size() >= sizeof(VideoNativeMetadata)) {
+ VideoNativeMetadata *vnmd = (VideoNativeMetadata*)info->mCodecData->base();
+ err2 = mCodec->mOMX->updateGraphicBufferInMeta(
+ mCodec->mNode, kPortIndexInput,
+ new GraphicBuffer(vnmd->pBuffer, false /* keepOwnership */),
+ bufferID);
+ }
+ break;
+#endif
+ default:
+ ALOGW("Can't marshall %s data in %zu sized buffers in %zu-bit mode",
+ asString(metaType), info->mCodecData->size(),
+ sizeof(buffer_handle_t) * 8);
+ err2 = ERROR_UNSUPPORTED;
+ break;
+ }
+
+ if (err2 == OK) {
+ err2 = mCodec->mOMX->emptyBuffer(
+ mCodec->mNode,
+ bufferID,
+ 0,
+ info->mCodecData->size(),
+ flags,
+ timeUs,
+ info->mFenceFd);
+ }
info->mFenceFd = -1;
if (err2 != OK) {
mCodec->signalError(OMX_ErrorUndefined, makeNoSideEffectStatus(err2));
@@ -5938,18 +6003,15 @@
if (mCodec->usingMetadataOnEncoderOutput()) {
native_handle_t *handle = NULL;
- VideoGrallocMetadata &grallocMeta = *(VideoGrallocMetadata *)info->mData->data();
- VideoNativeMetadata &nativeMeta = *(VideoNativeMetadata *)info->mData->data();
- if (info->mData->size() >= sizeof(grallocMeta)
- && grallocMeta.eType == kMetadataBufferTypeGrallocSource) {
- handle = (native_handle_t *)(uintptr_t)grallocMeta.pHandle;
- } else if (info->mData->size() >= sizeof(nativeMeta)
- && nativeMeta.eType == kMetadataBufferTypeANWBuffer) {
+ VideoNativeHandleMetadata &nativeMeta =
+ *(VideoNativeHandleMetadata *)info->mData->data();
+ if (info->mData->size() >= sizeof(nativeMeta)
+ && nativeMeta.eType == kMetadataBufferTypeNativeHandleSource) {
#ifdef OMX_ANDROID_COMPILE_AS_32BIT_ON_64BIT_PLATFORMS
- // ANativeWindowBuffer is only valid on 32-bit/mediaserver process
+ // handle is only valid on 32-bit/mediaserver process
handle = NULL;
#else
- handle = (native_handle_t *)nativeMeta.pBuffer->handle;
+ handle = (native_handle_t *)nativeMeta.pHandle;
#endif
}
info->mData->meta()->setPointer("handle", handle);
@@ -6636,8 +6698,14 @@
sp<IGraphicBufferProducer> bufferProducer;
if (err == OK) {
+ mCodec->mInputMetadataType = kMetadataBufferTypeANWBuffer;
err = mCodec->mOMX->createInputSurface(
- mCodec->mNode, kPortIndexInput, dataSpace, &bufferProducer, &mCodec->mInputMetadataType);
+ mCodec->mNode, kPortIndexInput, dataSpace, &bufferProducer,
+ &mCodec->mInputMetadataType);
+ // framework uses ANW buffers internally instead of gralloc handles
+ if (mCodec->mInputMetadataType == kMetadataBufferTypeGrallocSource) {
+ mCodec->mInputMetadataType = kMetadataBufferTypeANWBuffer;
+ }
}
if (err == OK) {
@@ -6676,9 +6744,14 @@
notify->setMessage("output-format", mCodec->mOutputFormat);
if (err == OK) {
+ mCodec->mInputMetadataType = kMetadataBufferTypeANWBuffer;
err = mCodec->mOMX->setInputSurface(
mCodec->mNode, kPortIndexInput, surface->getBufferConsumer(),
&mCodec->mInputMetadataType);
+ // framework uses ANW buffers internally instead of gralloc handles
+ if (mCodec->mInputMetadataType == kMetadataBufferTypeGrallocSource) {
+ mCodec->mInputMetadataType = kMetadataBufferTypeANWBuffer;
+ }
}
if (err == OK) {
diff --git a/media/libstagefright/CameraSource.cpp b/media/libstagefright/CameraSource.cpp
index 8e9db93..e087249 100644
--- a/media/libstagefright/CameraSource.cpp
+++ b/media/libstagefright/CameraSource.cpp
@@ -1244,13 +1244,19 @@
mFrameAvailableCondition.signal();
}
-bool CameraSource::isMetaDataStoredInVideoBuffers() const {
- ALOGV("isMetaDataStoredInVideoBuffers");
+MetadataBufferType CameraSource::metaDataStoredInVideoBuffers() const {
+ ALOGV("metaDataStoredInVideoBuffers");
// Output buffers will contain metadata if camera sends us buffer in metadata mode or via
// buffer queue.
- return (mVideoBufferMode == hardware::ICamera::VIDEO_BUFFER_MODE_DATA_CALLBACK_METADATA ||
- mVideoBufferMode == hardware::ICamera::VIDEO_BUFFER_MODE_BUFFER_QUEUE);
+ switch (mVideoBufferMode) {
+ case hardware::ICamera::VIDEO_BUFFER_MODE_DATA_CALLBACK_METADATA:
+ return kMetadataBufferTypeNativeHandleSource;
+ case hardware::ICamera::VIDEO_BUFFER_MODE_BUFFER_QUEUE:
+ return kMetadataBufferTypeANWBuffer;
+ default:
+ return kMetadataBufferTypeInvalid;
+ }
}
CameraSource::ProxyListener::ProxyListener(const sp<CameraSource>& source) {
diff --git a/media/libstagefright/MPEG4Extractor.cpp b/media/libstagefright/MPEG4Extractor.cpp
index f5549e4..f296d9a 100644
--- a/media/libstagefright/MPEG4Extractor.cpp
+++ b/media/libstagefright/MPEG4Extractor.cpp
@@ -1560,12 +1560,29 @@
// Calculate average frame rate.
if (!strncasecmp("video/", mime, 6)) {
size_t nSamples = mLastTrack->sampleTable->countSamples();
- int64_t durationUs;
- if (mLastTrack->meta->findInt64(kKeyDuration, &durationUs)) {
- if (durationUs > 0) {
- int32_t frameRate = (nSamples * 1000000LL +
- (durationUs >> 1)) / durationUs;
- mLastTrack->meta->setInt32(kKeyFrameRate, frameRate);
+ if (nSamples == 0) {
+ int32_t trackId;
+ if (mLastTrack->meta->findInt32(kKeyTrackID, &trackId)) {
+ for (size_t i = 0; i < mTrex.size(); i++) {
+ Trex *t = &mTrex.editItemAt(i);
+ if (t->track_ID == (uint32_t) trackId) {
+ if (t->default_sample_duration > 0) {
+ int32_t frameRate =
+ mLastTrack->timescale / t->default_sample_duration;
+ mLastTrack->meta->setInt32(kKeyFrameRate, frameRate);
+ }
+ break;
+ }
+ }
+ }
+ } else {
+ int64_t durationUs;
+ if (mLastTrack->meta->findInt64(kKeyDuration, &durationUs)) {
+ if (durationUs > 0) {
+ int32_t frameRate = (nSamples * 1000000LL +
+ (durationUs >> 1)) / durationUs;
+ mLastTrack->meta->setInt32(kKeyFrameRate, frameRate);
+ }
}
}
}
@@ -2929,7 +2946,7 @@
int32_t trackId;
if (track->meta->findInt32(kKeyTrackID, &trackId)) {
for (size_t i = 0; i < mTrex.size(); i++) {
- Trex *t = &mTrex.editItemAt(index);
+ Trex *t = &mTrex.editItemAt(i);
if (t->track_ID == (uint32_t) trackId) {
trex = t;
break;
diff --git a/media/libstagefright/MediaCodecSource.cpp b/media/libstagefright/MediaCodecSource.cpp
index 90c56f4..0aafa6b 100644
--- a/media/libstagefright/MediaCodecSource.cpp
+++ b/media/libstagefright/MediaCodecSource.cpp
@@ -449,10 +449,6 @@
mCodecLooper->setName("codec_looper");
mCodecLooper->start();
- if (mFlags & FLAG_USE_METADATA_INPUT) {
- mOutputFormat->setInt32("store-metadata-in-buffers", 1);
- }
-
if (mFlags & FLAG_USE_SURFACE_INPUT) {
mOutputFormat->setInt32("create-input-buffers-suspended", 1);
}
diff --git a/media/libstagefright/OMXClient.cpp b/media/libstagefright/OMXClient.cpp
index a523d0e..4dde5f6 100644
--- a/media/libstagefright/OMXClient.cpp
+++ b/media/libstagefright/OMXClient.cpp
@@ -111,6 +111,10 @@
node_id node, OMX_U32 port_index,
const sp<GraphicBuffer> &graphicBuffer, buffer_id buffer);
+ virtual status_t updateNativeHandleInMeta(
+ node_id node, OMX_U32 port_index,
+ const sp<NativeHandle> &nativeHandle, buffer_id buffer);
+
virtual status_t createInputSurface(
node_id node, OMX_U32 port_index, android_dataspace dataSpace,
sp<IGraphicBufferProducer> *bufferProducer, MetadataBufferType *type);
@@ -127,7 +131,7 @@
virtual status_t allocateSecureBuffer(
node_id node, OMX_U32 port_index, size_t size,
- buffer_id *buffer, void **buffer_data, native_handle_t **native_handle);
+ buffer_id *buffer, void **buffer_data, sp<NativeHandle> *native_handle);
virtual status_t allocateBufferWithBackup(
node_id node, OMX_U32 port_index, const sp<IMemory> ¶ms,
@@ -387,6 +391,13 @@
node, port_index, graphicBuffer, buffer);
}
+status_t MuxOMX::updateNativeHandleInMeta(
+ node_id node, OMX_U32 port_index,
+ const sp<NativeHandle> &nativeHandle, buffer_id buffer) {
+ return getOMX(node)->updateNativeHandleInMeta(
+ node, port_index, nativeHandle, buffer);
+}
+
status_t MuxOMX::createInputSurface(
node_id node, OMX_U32 port_index, android_dataspace dataSpace,
sp<IGraphicBufferProducer> *bufferProducer, MetadataBufferType *type) {
@@ -415,7 +426,7 @@
status_t MuxOMX::allocateSecureBuffer(
node_id node, OMX_U32 port_index, size_t size,
- buffer_id *buffer, void **buffer_data, native_handle_t **native_handle) {
+ buffer_id *buffer, void **buffer_data, sp<NativeHandle> *native_handle) {
return getOMX(node)->allocateSecureBuffer(
node, port_index, size, buffer, buffer_data, native_handle);
}
diff --git a/media/libstagefright/SurfaceMediaSource.cpp b/media/libstagefright/SurfaceMediaSource.cpp
index e4bf67a..15ff569 100644
--- a/media/libstagefright/SurfaceMediaSource.cpp
+++ b/media/libstagefright/SurfaceMediaSource.cpp
@@ -23,6 +23,7 @@
#include <media/stagefright/MediaDefs.h>
#include <media/stagefright/MetaData.h>
#include <OMX_IVCommon.h>
+#include <media/hardware/HardwareAPI.h>
#include <media/hardware/MetadataBufferType.h>
#include <ui/GraphicBuffer.h>
@@ -126,9 +127,9 @@
return OK;
}
-bool SurfaceMediaSource::isMetaDataStoredInVideoBuffers() const {
+MetadataBufferType SurfaceMediaSource::metaDataStoredInVideoBuffers() const {
ALOGV("isMetaDataStoredInVideoBuffers");
- return true;
+ return kMetadataBufferTypeANWBuffer;
}
int32_t SurfaceMediaSource::getFrameRate( ) const {
@@ -250,29 +251,19 @@
}
// Pass the data to the MediaBuffer. Pass in only the metadata
-// The metadata passed consists of two parts:
-// 1. First, there is an integer indicating that it is a GRAlloc
-// source (kMetadataBufferTypeGrallocSource)
-// 2. This is followed by the buffer_handle_t that is a handle to the
-// GRalloc buffer. The encoder needs to interpret this GRalloc handle
-// and encode the frames.
-// --------------------------------------------------------------
-// | kMetadataBufferTypeGrallocSource | sizeof(buffer_handle_t) |
-// --------------------------------------------------------------
// Note: Call only when you have the lock
-static void passMetadataBuffer(MediaBuffer **buffer,
- buffer_handle_t bufferHandle) {
- *buffer = new MediaBuffer(4 + sizeof(buffer_handle_t));
- char *data = (char *)(*buffer)->data();
+void SurfaceMediaSource::passMetadataBuffer_l(MediaBuffer **buffer,
+ ANativeWindowBuffer *bufferHandle) const {
+ *buffer = new MediaBuffer(sizeof(VideoNativeMetadata));
+ VideoNativeMetadata *data = (VideoNativeMetadata *)(*buffer)->data();
if (data == NULL) {
ALOGE("Cannot allocate memory for metadata buffer!");
return;
}
- OMX_U32 type = kMetadataBufferTypeGrallocSource;
- memcpy(data, &type, 4);
- memcpy(data + 4, &bufferHandle, sizeof(buffer_handle_t));
-
- ALOGV("handle = %p, , offset = %zu, length = %zu",
+ data->eType = metaDataStoredInVideoBuffers();
+ data->pBuffer = bufferHandle;
+ data->nFenceFd = -1;
+ ALOGV("handle = %p, offset = %zu, length = %zu",
bufferHandle, (*buffer)->range_length(), (*buffer)->range_offset());
}
@@ -361,7 +352,7 @@
mNumFramesEncoded++;
// Pass the data to the MediaBuffer. Pass in only the metadata
- passMetadataBuffer(buffer, mSlots[mCurrentSlot].mGraphicBuffer->handle);
+ passMetadataBuffer_l(buffer, mSlots[mCurrentSlot].mGraphicBuffer->getNativeBuffer());
(*buffer)->setObserver(this);
(*buffer)->add_ref();
diff --git a/media/libstagefright/Utils.cpp b/media/libstagefright/Utils.cpp
index 448f8aa..3e1badf 100644
--- a/media/libstagefright/Utils.cpp
+++ b/media/libstagefright/Utils.cpp
@@ -527,6 +527,16 @@
buffer->meta()->setInt32("csd", true);
buffer->meta()->setInt64("timeUs", 0);
msg->setBuffer("csd-2", buffer);
+ } else if (meta->findData(kKeyVp9CodecPrivate, &type, &data, &size)) {
+ sp<ABuffer> buffer = new (std::nothrow) ABuffer(size);
+ if (buffer.get() == NULL || buffer->base() == NULL) {
+ return NO_MEMORY;
+ }
+ memcpy(buffer->data(), data, size);
+
+ buffer->meta()->setInt32("csd", true);
+ buffer->meta()->setInt64("timeUs", 0);
+ msg->setBuffer("csd-0", buffer);
}
// TODO expose "crypto-key"/kKeyCryptoKey through public api
diff --git a/media/libstagefright/codecs/avcdec/SoftAVCDec.cpp b/media/libstagefright/codecs/avcdec/SoftAVCDec.cpp
index 1e64b49..973c528 100644
--- a/media/libstagefright/codecs/avcdec/SoftAVCDec.cpp
+++ b/media/libstagefright/codecs/avcdec/SoftAVCDec.cpp
@@ -638,6 +638,8 @@
mChangingResolution = false;
resetDecoder();
resetPlugin();
+ mStride = outputBufferWidth();
+ setParams(mStride);
continue;
}
diff --git a/media/libstagefright/codecs/mpeg2dec/SoftMPEG2.cpp b/media/libstagefright/codecs/mpeg2dec/SoftMPEG2.cpp
index 8283ab5..27f860e 100644
--- a/media/libstagefright/codecs/mpeg2dec/SoftMPEG2.cpp
+++ b/media/libstagefright/codecs/mpeg2dec/SoftMPEG2.cpp
@@ -74,7 +74,8 @@
mIvColorFormat(IV_YUV_420P),
mNewWidth(mWidth),
mNewHeight(mHeight),
- mChangingResolution(false) {
+ mChangingResolution(false),
+ mStride(mWidth) {
initPorts(kNumBuffers, INPUT_BUF_SIZE, kNumBuffers, CODEC_MIME_TYPE);
// If input dump is enabled, then open create an empty file
@@ -202,6 +203,8 @@
/* Set number of cores/threads to be used by the codec */
setNumCores();
+ mStride = 0;
+
return OK;
}
@@ -384,7 +387,8 @@
resetPlugin();
/* Set the run time (dynamic) parameters */
- setParams(displayStride);
+ mStride = outputBufferWidth();
+ setParams(mStride);
/* Set number of cores/threads to be used by the codec */
setNumCores();
@@ -548,6 +552,12 @@
List<BufferInfo *> &inQueue = getPortQueue(kInputPortIndex);
List<BufferInfo *> &outQueue = getPortQueue(kOutputPortIndex);
+ if (outputBufferWidth() != mStride) {
+ /* Set the run-time (dynamic) parameters */
+ mStride = outputBufferWidth();
+ setParams(mStride);
+ }
+
/* If input EOS is seen and decoder is not in flush mode,
* set the decoder in flush mode.
* There can be a case where EOS is sent along with last picture data
@@ -686,6 +696,8 @@
mChangingResolution = false;
resetDecoder();
resetPlugin();
+ mStride = outputBufferWidth();
+ setParams(mStride);
continue;
}
diff --git a/media/libstagefright/codecs/mpeg2dec/SoftMPEG2.h b/media/libstagefright/codecs/mpeg2dec/SoftMPEG2.h
index f48b70b..025e9a0 100644
--- a/media/libstagefright/codecs/mpeg2dec/SoftMPEG2.h
+++ b/media/libstagefright/codecs/mpeg2dec/SoftMPEG2.h
@@ -106,6 +106,7 @@
bool mChangingResolution;
bool mFlushNeeded;
bool mWaitForI;
+ size_t mStride;
status_t initDecoder();
status_t deInitDecoder();
diff --git a/media/libstagefright/codecs/on2/dec/SoftVPX.cpp b/media/libstagefright/codecs/on2/dec/SoftVPX.cpp
index 2a56ed5..ba1f263 100644
--- a/media/libstagefright/codecs/on2/dec/SoftVPX.cpp
+++ b/media/libstagefright/codecs/on2/dec/SoftVPX.cpp
@@ -228,6 +228,17 @@
BufferInfo *inInfo = *inQueue.begin();
OMX_BUFFERHEADERTYPE *inHeader = inInfo->mHeader;
+
+ // Software VP9 Decoder does not need the Codec Specific Data (CSD)
+ // (specified in http://www.webmproject.org/vp9/profiles/). Ignore it if
+ // it was passed.
+ if (inHeader->nFlags & OMX_BUFFERFLAG_CODECCONFIG) {
+ inQueue.erase(inQueue.begin());
+ inInfo->mOwnedByUs = false;
+ notifyEmptyBufferDone(inHeader);
+ continue;
+ }
+
mTimeStamps[mTimeStampIdx] = inHeader->nTimeStamp;
if (inHeader->nFlags & OMX_BUFFERFLAG_EOS) {
diff --git a/media/libstagefright/foundation/AMessage.cpp b/media/libstagefright/foundation/AMessage.cpp
index 855ac95..37fb33f 100644
--- a/media/libstagefright/foundation/AMessage.cpp
+++ b/media/libstagefright/foundation/AMessage.cpp
@@ -595,7 +595,7 @@
}
// static
-sp<AMessage> AMessage::FromParcel(const Parcel &parcel) {
+sp<AMessage> AMessage::FromParcel(const Parcel &parcel, size_t maxNestingLevel) {
int32_t what = parcel.readInt32();
sp<AMessage> msg = new AMessage();
msg->setWhat(what);
@@ -667,7 +667,19 @@
case kTypeMessage:
{
- sp<AMessage> subMsg = AMessage::FromParcel(parcel);
+ if (maxNestingLevel == 0) {
+ ALOGE("Too many levels of AMessage nesting.");
+ return NULL;
+ }
+ sp<AMessage> subMsg = AMessage::FromParcel(
+ parcel,
+ maxNestingLevel - 1);
+ if (subMsg == NULL) {
+ // This condition will be triggered when there exists an
+ // object that cannot cross process boundaries or when the
+ // level of nested AMessage is too deep.
+ return NULL;
+ }
subMsg->incStrong(msg.get());
item->u.refValue = subMsg.get();
@@ -677,7 +689,7 @@
default:
{
ALOGE("This type of object cannot cross process boundaries.");
- TRESPASS();
+ return NULL;
}
}
diff --git a/media/libstagefright/httplive/PlaylistFetcher.cpp b/media/libstagefright/httplive/PlaylistFetcher.cpp
index 20d124c..7ad7fee 100644
--- a/media/libstagefright/httplive/PlaylistFetcher.cpp
+++ b/media/libstagefright/httplive/PlaylistFetcher.cpp
@@ -1092,6 +1092,13 @@
// fall through
} else {
if (mPlaylist != NULL) {
+ if (mSeqNumber >= firstSeqNumberInPlaylist + (int32_t)mPlaylist->size()
+ && !mPlaylist->isComplete()) {
+ // Live playlists
+ ALOGW("sequence number %d not yet available", mSeqNumber);
+ postMonitorQueue(delayUsToRefreshPlaylist());
+ return false;
+ }
ALOGE("Cannot find sequence number %d in playlist "
"(contains %d - %d)",
mSeqNumber, firstSeqNumberInPlaylist,
diff --git a/media/libstagefright/include/OMX.h b/media/libstagefright/include/OMX.h
index 9726741..6c073f0 100644
--- a/media/libstagefright/include/OMX.h
+++ b/media/libstagefright/include/OMX.h
@@ -93,6 +93,10 @@
node_id node, OMX_U32 port_index,
const sp<GraphicBuffer> &graphicBuffer, buffer_id buffer);
+ virtual status_t updateNativeHandleInMeta(
+ node_id node, OMX_U32 port_index,
+ const sp<NativeHandle> &nativeHandle, buffer_id buffer);
+
virtual status_t createInputSurface(
node_id node, OMX_U32 port_index, android_dataspace dataSpace,
sp<IGraphicBufferProducer> *bufferProducer,
@@ -111,7 +115,7 @@
virtual status_t allocateSecureBuffer(
node_id node, OMX_U32 port_index, size_t size,
- buffer_id *buffer, void **buffer_data, native_handle_t **native_handle);
+ buffer_id *buffer, void **buffer_data, sp<NativeHandle> *native_handle);
virtual status_t allocateBufferWithBackup(
node_id node, OMX_U32 port_index, const sp<IMemory> ¶ms,
diff --git a/media/libstagefright/include/OMXNodeInstance.h b/media/libstagefright/include/OMXNodeInstance.h
index 25c3773..060b6be 100644
--- a/media/libstagefright/include/OMXNodeInstance.h
+++ b/media/libstagefright/include/OMXNodeInstance.h
@@ -79,6 +79,10 @@
OMX_U32 portIndex, const sp<GraphicBuffer> &graphicBuffer,
OMX::buffer_id buffer);
+ status_t updateNativeHandleInMeta(
+ OMX_U32 portIndex, const sp<NativeHandle> &nativeHandle,
+ OMX::buffer_id buffer);
+
status_t createInputSurface(
OMX_U32 portIndex, android_dataspace dataSpace,
sp<IGraphicBufferProducer> *bufferProducer,
@@ -98,7 +102,7 @@
status_t allocateSecureBuffer(
OMX_U32 portIndex, size_t size, OMX::buffer_id *buffer,
- void **buffer_data, native_handle_t **native_handle);
+ void **buffer_data, sp<NativeHandle> *native_handle);
status_t allocateBufferWithBackup(
OMX_U32 portIndex, const sp<IMemory> ¶ms,
@@ -237,9 +241,14 @@
OMX_BUFFERHEADERTYPE *header,
OMX_U32 flags, OMX_TICKS timestamp, intptr_t debugAddr, int fenceFd);
+ // Updates the graphic buffer handle in the metadata buffer for |buffer| and |header| to
+ // |graphicBuffer|'s handle. If |updateCodecBuffer| is true, the update will happen in
+ // the actual codec buffer (use this if not using emptyBuffer (with no _l) later to
+ // pass the buffer to the codec, as only emptyBuffer copies the backup buffer to the codec
+ // buffer.)
status_t updateGraphicBufferInMeta_l(
OMX_U32 portIndex, const sp<GraphicBuffer> &graphicBuffer,
- OMX::buffer_id buffer, OMX_BUFFERHEADERTYPE *header);
+ OMX::buffer_id buffer, OMX_BUFFERHEADERTYPE *header, bool updateCodecBuffer);
status_t createGraphicBufferSource(
OMX_U32 portIndex, sp<IGraphicBufferConsumer> consumer /* nullable */,
diff --git a/media/libstagefright/matroska/MatroskaExtractor.cpp b/media/libstagefright/matroska/MatroskaExtractor.cpp
index 861bdc5..434be86 100644
--- a/media/libstagefright/matroska/MatroskaExtractor.cpp
+++ b/media/libstagefright/matroska/MatroskaExtractor.cpp
@@ -1113,6 +1113,13 @@
meta->setCString(kKeyMIMEType, MEDIA_MIMETYPE_VIDEO_VP8);
} else if (!strcmp("V_VP9", codecID)) {
meta->setCString(kKeyMIMEType, MEDIA_MIMETYPE_VIDEO_VP9);
+ if (codecPrivateSize > 0) {
+ // 'csd-0' for VP9 is the Blob of Codec Private data as
+ // specified in http://www.webmproject.org/vp9/profiles/.
+ meta->setData(
+ kKeyVp9CodecPrivate, 0, codecPrivate,
+ codecPrivateSize);
+ }
} else {
ALOGW("%s is not supported.", codecID);
continue;
diff --git a/media/libstagefright/omx/OMX.cpp b/media/libstagefright/omx/OMX.cpp
index 4d89ba1..2e989b5 100644
--- a/media/libstagefright/omx/OMX.cpp
+++ b/media/libstagefright/omx/OMX.cpp
@@ -488,6 +488,19 @@
port_index, graphicBuffer, buffer);
}
+status_t OMX::updateNativeHandleInMeta(
+ node_id node, OMX_U32 port_index,
+ const sp<NativeHandle> &nativeHandle, buffer_id buffer) {
+ OMXNodeInstance *instance = findInstance(node);
+
+ if (instance == NULL) {
+ return NAME_NOT_FOUND;
+ }
+
+ return instance->updateNativeHandleInMeta(
+ port_index, nativeHandle, buffer);
+}
+
status_t OMX::createInputSurface(
node_id node, OMX_U32 port_index, android_dataspace dataSpace,
sp<IGraphicBufferProducer> *bufferProducer, MetadataBufferType *type) {
@@ -533,7 +546,7 @@
status_t OMX::allocateSecureBuffer(
node_id node, OMX_U32 port_index, size_t size,
- buffer_id *buffer, void **buffer_data, native_handle_t **native_handle) {
+ buffer_id *buffer, void **buffer_data, sp<NativeHandle> *native_handle) {
OMXNodeInstance *instance = findInstance(node);
if (instance == NULL) {
diff --git a/media/libstagefright/omx/OMXNodeInstance.cpp b/media/libstagefright/omx/OMXNodeInstance.cpp
index 6b7a871..5445944 100644
--- a/media/libstagefright/omx/OMXNodeInstance.cpp
+++ b/media/libstagefright/omx/OMXNodeInstance.cpp
@@ -36,8 +36,8 @@
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/foundation/ABuffer.h>
#include <media/stagefright/MediaErrors.h>
-
#include <utils/misc.h>
+#include <utils/NativeHandle.h>
static const OMX_U32 kPortIndexInput = 0;
static const OMX_U32 kPortIndexOutput = 1;
@@ -152,8 +152,13 @@
mGraphicBuffer = graphicBuffer;
}
+ void setNativeHandle(const sp<NativeHandle> &nativeHandle) {
+ mNativeHandle = nativeHandle;
+ }
+
private:
sp<GraphicBuffer> mGraphicBuffer;
+ sp<NativeHandle> mNativeHandle;
sp<IMemory> mMem;
size_t mSize;
bool mIsBackup;
@@ -523,6 +528,9 @@
OMX_U32 portIndex, OMX_BOOL enable, MetadataBufferType *type) {
if (portIndex != kPortIndexInput && portIndex != kPortIndexOutput) {
android_errorWriteLog(0x534e4554, "26324358");
+ if (type != NULL) {
+ *type = kMetadataBufferTypeInvalid;
+ }
return BAD_VALUE;
}
@@ -533,26 +541,32 @@
OMX_STRING nativeBufferName = const_cast<OMX_STRING>(
"OMX.google.android.index.storeANWBufferInMetadata");
MetadataBufferType negotiatedType;
+ MetadataBufferType requestedType = type != NULL ? *type : kMetadataBufferTypeANWBuffer;
StoreMetaDataInBuffersParams params;
InitOMXParams(¶ms);
params.nPortIndex = portIndex;
params.bStoreMetaData = enable;
- OMX_ERRORTYPE err = OMX_GetExtensionIndex(mHandle, nativeBufferName, &index);
+ OMX_ERRORTYPE err =
+ requestedType == kMetadataBufferTypeANWBuffer
+ ? OMX_GetExtensionIndex(mHandle, nativeBufferName, &index)
+ : OMX_ErrorUnsupportedIndex;
OMX_ERRORTYPE xerr = err;
if (err == OMX_ErrorNone) {
err = OMX_SetParameter(mHandle, index, ¶ms);
if (err == OMX_ErrorNone) {
name = nativeBufferName; // set name for debugging
- negotiatedType = kMetadataBufferTypeANWBuffer;
+ negotiatedType = requestedType;
}
}
if (err != OMX_ErrorNone) {
err = OMX_GetExtensionIndex(mHandle, name, &index);
xerr = err;
if (err == OMX_ErrorNone) {
- negotiatedType = kMetadataBufferTypeGrallocSource;
+ negotiatedType =
+ requestedType == kMetadataBufferTypeANWBuffer
+ ? kMetadataBufferTypeGrallocSource : requestedType;
err = OMX_SetParameter(mHandle, index, ¶ms);
}
}
@@ -574,8 +588,9 @@
}
mMetadataType[portIndex] = negotiatedType;
}
- CLOG_CONFIG(storeMetaDataInBuffers, "%s:%u negotiated %s:%d",
- portString(portIndex), portIndex, asString(negotiatedType), negotiatedType);
+ CLOG_CONFIG(storeMetaDataInBuffers, "%s:%u %srequested %s:%d negotiated %s:%d",
+ portString(portIndex), portIndex, enable ? "" : "UN",
+ asString(requestedType), requestedType, asString(negotiatedType), negotiatedType);
if (type != NULL) {
*type = negotiatedType;
@@ -827,7 +842,7 @@
status_t OMXNodeInstance::updateGraphicBufferInMeta_l(
OMX_U32 portIndex, const sp<GraphicBuffer>& graphicBuffer,
- OMX::buffer_id buffer, OMX_BUFFERHEADERTYPE *header) {
+ OMX::buffer_id buffer, OMX_BUFFERHEADERTYPE *header, bool updateCodecBuffer) {
// No need to check |graphicBuffer| since NULL is valid for it as below.
if (header == NULL) {
ALOGE("b/25884056");
@@ -839,20 +854,27 @@
}
BufferMeta *bufferMeta = (BufferMeta *)(header->pAppPrivate);
+ sp<ABuffer> data = bufferMeta->getBuffer(
+ header, !updateCodecBuffer /* backup */, false /* limit */);
bufferMeta->setGraphicBuffer(graphicBuffer);
- if (mMetadataType[portIndex] == kMetadataBufferTypeGrallocSource
- && header->nAllocLen >= sizeof(VideoGrallocMetadata)) {
- VideoGrallocMetadata &metadata = *(VideoGrallocMetadata *)(header->pBuffer);
+ MetadataBufferType metaType = mMetadataType[portIndex];
+ // we use gralloc source only in the codec buffers
+ if (metaType == kMetadataBufferTypeGrallocSource && !updateCodecBuffer) {
+ metaType = kMetadataBufferTypeANWBuffer;
+ }
+ if (metaType == kMetadataBufferTypeGrallocSource
+ && data->capacity() >= sizeof(VideoGrallocMetadata)) {
+ VideoGrallocMetadata &metadata = *(VideoGrallocMetadata *)(data->data());
metadata.eType = kMetadataBufferTypeGrallocSource;
metadata.pHandle = graphicBuffer == NULL ? NULL : graphicBuffer->handle;
- } else if (mMetadataType[portIndex] == kMetadataBufferTypeANWBuffer
- && header->nAllocLen >= sizeof(VideoNativeMetadata)) {
- VideoNativeMetadata &metadata = *(VideoNativeMetadata *)(header->pBuffer);
+ } else if (metaType == kMetadataBufferTypeANWBuffer
+ && data->capacity() >= sizeof(VideoNativeMetadata)) {
+ VideoNativeMetadata &metadata = *(VideoNativeMetadata *)(data->data());
metadata.eType = kMetadataBufferTypeANWBuffer;
metadata.pBuffer = graphicBuffer == NULL ? NULL : graphicBuffer->getNativeBuffer();
metadata.nFenceFd = -1;
} else {
- CLOG_BUFFER(updateGraphicBufferInMeta, "%s:%u, %#x bad type (%d) or size (%u)",
+ CLOG_ERROR(updateGraphicBufferInMeta, BAD_VALUE, "%s:%u, %#x bad type (%d) or size (%u)",
portString(portIndex), portIndex, buffer, mMetadataType[portIndex], header->nAllocLen);
return BAD_VALUE;
}
@@ -868,7 +890,47 @@
OMX::buffer_id buffer) {
Mutex::Autolock autoLock(mLock);
OMX_BUFFERHEADERTYPE *header = findBufferHeader(buffer);
- return updateGraphicBufferInMeta_l(portIndex, graphicBuffer, buffer, header);
+ // update backup buffer for input, codec buffer for output
+ return updateGraphicBufferInMeta_l(
+ portIndex, graphicBuffer, buffer, header,
+ portIndex == kPortIndexOutput /* updateCodecBuffer */);
+}
+
+status_t OMXNodeInstance::updateNativeHandleInMeta(
+ OMX_U32 portIndex, const sp<NativeHandle>& nativeHandle, OMX::buffer_id buffer) {
+ Mutex::Autolock autoLock(mLock);
+ OMX_BUFFERHEADERTYPE *header = findBufferHeader(buffer);
+ // No need to check |nativeHandle| since NULL is valid for it as below.
+ if (header == NULL) {
+ ALOGE("b/25884056");
+ return BAD_VALUE;
+ }
+
+ if (portIndex != kPortIndexInput && portIndex != kPortIndexOutput) {
+ return BAD_VALUE;
+ }
+
+ BufferMeta *bufferMeta = (BufferMeta *)(header->pAppPrivate);
+ // update backup buffer for input, codec buffer for output
+ sp<ABuffer> data = bufferMeta->getBuffer(
+ header, portIndex == kPortIndexInput /* backup */, false /* limit */);
+ bufferMeta->setNativeHandle(nativeHandle);
+ if (mMetadataType[portIndex] == kMetadataBufferTypeNativeHandleSource
+ && data->capacity() >= sizeof(VideoNativeHandleMetadata)) {
+ VideoNativeHandleMetadata &metadata = *(VideoNativeHandleMetadata *)(data->data());
+ metadata.eType = mMetadataType[portIndex];
+ metadata.pHandle =
+ nativeHandle == NULL ? NULL : const_cast<native_handle*>(nativeHandle->handle());
+ } else {
+ CLOG_ERROR(updateNativeHandleInMeta, BAD_VALUE, "%s:%u, %#x bad type (%d) or size (%zu)",
+ portString(portIndex), portIndex, buffer, mMetadataType[portIndex], data->capacity());
+ return BAD_VALUE;
+ }
+
+ CLOG_BUFFER(updateNativeHandleInMeta, "%s:%u, %#x := %p",
+ portString(portIndex), portIndex, buffer,
+ nativeHandle == NULL ? NULL : nativeHandle->handle());
+ return OK;
}
status_t OMXNodeInstance::createGraphicBufferSource(
@@ -884,6 +946,9 @@
}
// Input buffers will hold meta-data (ANativeWindowBuffer references).
+ if (type != NULL) {
+ *type = kMetadataBufferTypeANWBuffer;
+ }
err = storeMetaDataInBuffers_l(portIndex, OMX_TRUE, type);
if (err != OK) {
return err;
@@ -1009,7 +1074,7 @@
status_t OMXNodeInstance::allocateSecureBuffer(
OMX_U32 portIndex, size_t size, OMX::buffer_id *buffer,
- void **buffer_data, native_handle_t **native_handle) {
+ void **buffer_data, sp<NativeHandle> *native_handle) {
if (buffer == NULL || buffer_data == NULL || native_handle == NULL) {
ALOGE("b/25884056");
return BAD_VALUE;
@@ -1039,7 +1104,8 @@
*buffer = makeBufferID(header);
if (mSecureBufferType[portIndex] == kSecureBufferTypeNativeHandle) {
*buffer_data = NULL;
- *native_handle = (native_handle_t *)header->pBuffer;
+ *native_handle = NativeHandle::create(
+ (native_handle_t *)header->pBuffer, false /* ownsHandle */);
} else {
*buffer_data = header->pBuffer;
*native_handle = NULL;
@@ -1052,7 +1118,8 @@
bufferSource->addCodecBuffer(header);
}
CLOG_BUFFER(allocateSecureBuffer, NEW_BUFFER_FMT(
- *buffer, portIndex, "%zu@%p:%p", size, *buffer_data, *native_handle));
+ *buffer, portIndex, "%zu@%p:%p", size, *buffer_data,
+ *native_handle == NULL ? NULL : (*native_handle)->handle()));
return OK;
}
@@ -1331,7 +1398,9 @@
Mutex::Autolock autoLock(mLock);
OMX::buffer_id buffer = findBufferID(header);
- status_t err = updateGraphicBufferInMeta_l(kPortIndexInput, graphicBuffer, buffer, header);
+ status_t err = updateGraphicBufferInMeta_l(
+ kPortIndexInput, graphicBuffer, buffer, header,
+ true /* updateCodecBuffer */);
if (err != OK) {
CLOG_ERROR(emptyGraphicBuffer, err, FULL_BUFFER(
(intptr_t)header->pBuffer, header, fenceFd));
@@ -1339,7 +1408,13 @@
}
header->nOffset = 0;
- header->nFilledLen = graphicBuffer == NULL ? 0 : header->nAllocLen;
+ if (graphicBuffer == NULL) {
+ header->nFilledLen = 0;
+ } else if (mMetadataType[kPortIndexInput] == kMetadataBufferTypeGrallocSource) {
+ header->nFilledLen = sizeof(VideoGrallocMetadata);
+ } else {
+ header->nFilledLen = sizeof(VideoNativeMetadata);
+ }
return emptyBuffer_l(header, flags, timestamp, (intptr_t)header->pBuffer, fenceFd);
}
diff --git a/media/libstagefright/tests/Android.mk b/media/libstagefright/tests/Android.mk
index 111e6c5..d1c9d36 100644
--- a/media/libstagefright/tests/Android.mk
+++ b/media/libstagefright/tests/Android.mk
@@ -30,6 +30,7 @@
frameworks/av/media/libstagefright \
frameworks/av/media/libstagefright/include \
$(TOP)/frameworks/native/include/media/openmax \
+ $(TOP)/frameworks/native/include/media/hardware \
LOCAL_CFLAGS += -Werror -Wall
LOCAL_CLANG := true
diff --git a/media/libstagefright/wifi-display/Android.mk b/media/libstagefright/wifi-display/Android.mk
index 5bd6e5c..ae4ac90 100644
--- a/media/libstagefright/wifi-display/Android.mk
+++ b/media/libstagefright/wifi-display/Android.mk
@@ -17,6 +17,7 @@
LOCAL_C_INCLUDES:= \
$(TOP)/frameworks/av/media/libstagefright \
$(TOP)/frameworks/native/include/media/openmax \
+ $(TOP)/frameworks/native/include/media/hardware \
$(TOP)/frameworks/av/media/libstagefright/mpeg2ts \
LOCAL_SHARED_LIBRARIES:= \
diff --git a/media/libstagefright/wifi-display/source/PlaybackSession.cpp b/media/libstagefright/wifi-display/source/PlaybackSession.cpp
index 3ecb52b..3587cb9 100644
--- a/media/libstagefright/wifi-display/source/PlaybackSession.cpp
+++ b/media/libstagefright/wifi-display/source/PlaybackSession.cpp
@@ -948,8 +948,9 @@
if (isVideo) {
format->setString("mime", MEDIA_MIMETYPE_VIDEO_AVC);
- format->setInt32("store-metadata-in-buffers", true);
- format->setInt32("store-metadata-in-buffers-output", (mHDCP != NULL)
+ format->setInt32(
+ "android._input-metadata-buffer-type", kMetadataBufferTypeANWBuffer);
+ format->setInt32("android._store-metadata-in-buffers-output", (mHDCP != NULL)
&& (mHDCP->getCaps() & HDCPModule::HDCP_CAPS_ENCRYPT_NATIVE));
format->setInt32(
"color-format", OMX_COLOR_FormatAndroidOpaque);
diff --git a/services/audioflinger/AudioFlinger.cpp b/services/audioflinger/AudioFlinger.cpp
index ca1a0b7..2b0d4c8 100644
--- a/services/audioflinger/AudioFlinger.cpp
+++ b/services/audioflinger/AudioFlinger.cpp
@@ -1843,8 +1843,7 @@
PlaybackThread *thread;
if (flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) {
- thread = new OffloadThread(this, outputStream, *output, devices, mSystemReady,
- config->offload_info.bit_rate);
+ thread = new OffloadThread(this, outputStream, *output, devices, mSystemReady);
ALOGV("openOutput_l() created offload output: ID %d thread %p", *output, thread);
} else if ((flags & AUDIO_OUTPUT_FLAG_DIRECT)
|| !isValidPcmSinkFormat(config->format)
@@ -2108,8 +2107,8 @@
audio_is_linear_pcm(config->format) &&
audio_is_linear_pcm(halconfig.format) &&
(halconfig.sample_rate <= AUDIO_RESAMPLER_DOWN_RATIO_MAX * config->sample_rate) &&
- (audio_channel_count_from_in_mask(halconfig.channel_mask) <= FCC_2) &&
- (audio_channel_count_from_in_mask(config->channel_mask) <= FCC_2)) {
+ (audio_channel_count_from_in_mask(halconfig.channel_mask) <= FCC_8) &&
+ (audio_channel_count_from_in_mask(config->channel_mask) <= FCC_8)) {
// FIXME describe the change proposed by HAL (save old values so we can log them here)
ALOGV("openInput_l() reopening with proposed sampling rate and channel mask");
inStream = NULL;
diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp
index e721a80..033e5d2 100644
--- a/services/audioflinger/Threads.cpp
+++ b/services/audioflinger/Threads.cpp
@@ -110,12 +110,7 @@
// direct outputs can be a scarce resource in audio hardware and should
// be released as quickly as possible.
static const int8_t kMaxTrackRetriesDirect = 2;
-// retry count before removing active track in case of underrun on offloaded thread:
-// we need to make sure that AudioTrack client has enough time to send large buffers
-//FIXME may be more appropriate if expressed in time units. Need to revise how underrun is handled
-// for offloaded tracks
-static const int8_t kMaxTrackRetriesOffload = 10;
-static const int8_t kMaxTrackStartupRetriesOffload = 100;
+
// don't warn about blocked writes or record buffer overflows more often than this
@@ -148,10 +143,6 @@
// Direct output thread minimum sleep time in idle or active(underrun) state
static const nsecs_t kDirectMinSleepTimeUs = 10000;
-// Offloaded output bit rate in bits per second when unknown.
-// Used for sleep time calculation, so use a high default bitrate to be conservative on sleep time.
-static const uint32_t kOffloadDefaultBitRateBps = 1500000;
-
// Whether to use fast mixer
static const enum {
@@ -1567,8 +1558,7 @@
audio_io_handle_t id,
audio_devices_t device,
type_t type,
- bool systemReady,
- uint32_t bitRate)
+ bool systemReady)
: ThreadBase(audioFlinger, id, device, AUDIO_DEVICE_NONE, type, systemReady),
mNormalFrameCount(0), mSinkBuffer(NULL),
mMixerBufferEnabled(AudioFlinger::kEnableExtendedPrecision),
@@ -1631,13 +1621,6 @@
mStreamTypes[stream].volume = mAudioFlinger->streamVolume_l(stream);
mStreamTypes[stream].mute = mAudioFlinger->streamMute_l(stream);
}
-
- if (audio_has_proportional_frames(mFormat)) {
- mBufferDurationUs = (uint32_t)((mNormalFrameCount * 1000000LL) / mSampleRate);
- } else {
- bitRate = bitRate != 0 ? bitRate : kOffloadDefaultBitRateBps;
- mBufferDurationUs = (uint32_t)((mBufferSize * 8 * 1000000LL) / bitRate);
- }
}
AudioFlinger::PlaybackThread::~PlaybackThread()
@@ -2049,12 +2032,18 @@
// set retry count for buffer fill
if (track->isOffloaded()) {
- track->mRetryCount = kMaxTrackStartupRetriesOffload;
+ if (track->isStopping_1()) {
+ track->mRetryCount = kMaxTrackStopRetriesOffload;
+ } else {
+ track->mRetryCount = kMaxTrackStartupRetriesOffload;
+ }
+ track->mFillingUpStatus = mStandby ? Track::FS_FILLING : Track::FS_FILLED;
} else {
track->mRetryCount = kMaxTrackStartupRetries;
+ track->mFillingUpStatus =
+ track->sharedBuffer() != 0 ? Track::FS_FILLED : Track::FS_FILLING;
}
- track->mFillingUpStatus = track->sharedBuffer() != 0 ? Track::FS_FILLED : Track::FS_FILLING;
track->mResetDone = false;
track->mPresentationCompleteFrames = 0;
mActiveTracks.add(track);
@@ -2900,6 +2889,24 @@
// sink will block whie writing.
ExtendedTimestamp timestamp; // use private copy to fetch
(void) mNormalSink->getTimestamp(timestamp);
+
+ // We keep track of the last valid kernel position in case we are in underrun
+ // and the normal mixer period is the same as the fast mixer period, or there
+ // is some error from the HAL.
+ if (mTimestamp.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL] >= 0) {
+ mTimestamp.mPosition[ExtendedTimestamp::LOCATION_KERNEL_LASTKERNELOK] =
+ mTimestamp.mPosition[ExtendedTimestamp::LOCATION_KERNEL];
+ mTimestamp.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL_LASTKERNELOK] =
+ mTimestamp.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL];
+
+ mTimestamp.mPosition[ExtendedTimestamp::LOCATION_SERVER_LASTKERNELOK] =
+ mTimestamp.mPosition[ExtendedTimestamp::LOCATION_SERVER];
+ mTimestamp.mTimeNs[ExtendedTimestamp::LOCATION_SERVER_LASTKERNELOK] =
+ mTimestamp.mTimeNs[ExtendedTimestamp::LOCATION_SERVER];
+ } else {
+ ALOGV("getTimestamp error - no valid kernel position");
+ }
+
// copy over kernel info
mTimestamp.mPosition[ExtendedTimestamp::LOCATION_KERNEL] =
timestamp.mPosition[ExtendedTimestamp::LOCATION_KERNEL];
@@ -3159,7 +3166,9 @@
uint32_t diff = mThreadThrottleTimeMs - mThreadThrottleEndMs;
if (diff > 0) {
// notify of throttle end on debug log
- ALOGD("mixer(%p) throttle end: throttle time(%u)", this, diff);
+ // but prevent spamming for bluetooth
+ ALOGD_IF(!audio_is_a2dp_out_device(outDevice()),
+ "mixer(%p) throttle end: throttle time(%u)", this, diff);
mThreadThrottleEndMs = mThreadThrottleTimeMs;
}
}
@@ -3168,32 +3177,9 @@
} else {
ATRACE_BEGIN("sleep");
- if ((mType == OFFLOAD) && !audio_has_proportional_frames(mFormat)) {
- Mutex::Autolock _l(mLock);
- if (!mSignalPending && !exitPending()) {
- // If more than one buffer has been written to the audio HAL since exiting
- // standby or last flush, do not sleep more than one buffer duration
- // since last write and not less than kDirectMinSleepTimeUs.
- // Wake up if a command is received
- uint32_t timeoutUs = mSleepTimeUs;
- if (mBytesWritten >= (int64_t) mBufferSize) {
- nsecs_t now = systemTime();
- uint32_t deltaUs = (uint32_t)((now - mLastWriteTime) / 1000);
- if (timeoutUs + deltaUs > mBufferDurationUs) {
- if (mBufferDurationUs > deltaUs) {
- timeoutUs = mBufferDurationUs - deltaUs;
- if (timeoutUs < kDirectMinSleepTimeUs) {
- timeoutUs = kDirectMinSleepTimeUs;
- }
- } else {
- timeoutUs = kDirectMinSleepTimeUs;
- }
- }
- }
- mWaitWorkCV.waitRelative(mLock, microseconds((nsecs_t)timeoutUs));
- }
- } else {
- usleep(mSleepTimeUs);
+ Mutex::Autolock _l(mLock);
+ if (!mSignalPending && mConfigEvents.isEmpty() && !exitPending()) {
+ mWaitWorkCV.waitRelative(mLock, microseconds((nsecs_t)mSleepTimeUs));
}
ATRACE_END();
}
@@ -4579,17 +4565,16 @@
// ----------------------------------------------------------------------------
AudioFlinger::DirectOutputThread::DirectOutputThread(const sp<AudioFlinger>& audioFlinger,
- AudioStreamOut* output, audio_io_handle_t id, audio_devices_t device, bool systemReady,
- uint32_t bitRate)
- : PlaybackThread(audioFlinger, output, id, device, DIRECT, systemReady, bitRate)
+ AudioStreamOut* output, audio_io_handle_t id, audio_devices_t device, bool systemReady)
+ : PlaybackThread(audioFlinger, output, id, device, DIRECT, systemReady)
// mLeftVolFloat, mRightVolFloat
{
}
AudioFlinger::DirectOutputThread::DirectOutputThread(const sp<AudioFlinger>& audioFlinger,
AudioStreamOut* output, audio_io_handle_t id, uint32_t device,
- ThreadBase::type_t type, bool systemReady, uint32_t bitRate)
- : PlaybackThread(audioFlinger, output, id, device, type, systemReady, bitRate)
+ ThreadBase::type_t type, bool systemReady)
+ : PlaybackThread(audioFlinger, output, id, device, type, systemReady)
// mLeftVolFloat, mRightVolFloat
{
}
@@ -4895,14 +4880,7 @@
}
if (mSleepTimeUs == 0) {
if (mMixerStatus == MIXER_TRACKS_ENABLED) {
- // For compressed offload, use faster sleep time when underruning until more than an
- // entire buffer was written to the audio HAL
- if (!audio_has_proportional_frames(mFormat) &&
- (mType == OFFLOAD) && (mBytesWritten < (int64_t) mBufferSize)) {
- mSleepTimeUs = kDirectMinSleepTimeUs;
- } else {
- mSleepTimeUs = mActiveSleepTimeUs;
- }
+ mSleepTimeUs = mActiveSleepTimeUs;
} else {
mSleepTimeUs = mIdleSleepTimeUs;
}
@@ -5174,9 +5152,8 @@
// ----------------------------------------------------------------------------
AudioFlinger::OffloadThread::OffloadThread(const sp<AudioFlinger>& audioFlinger,
- AudioStreamOut* output, audio_io_handle_t id, uint32_t device, bool systemReady,
- uint32_t bitRate)
- : DirectOutputThread(audioFlinger, output, id, device, OFFLOAD, systemReady, bitRate),
+ AudioStreamOut* output, audio_io_handle_t id, uint32_t device, bool systemReady)
+ : DirectOutputThread(audioFlinger, output, id, device, OFFLOAD, systemReady),
mPausedWriteLength(0), mPausedBytesRemaining(0), mKeepWakeLock(true)
{
//FIXME: mStandby should be set to true by ThreadBase constructor
@@ -5260,7 +5237,11 @@
}
tracksToRemove->add(track);
} else if (track->isFlushPending()) {
- track->mRetryCount = kMaxTrackRetriesOffload;
+ if (track->isStopping_1()) {
+ track->mRetryCount = kMaxTrackStopRetriesOffload;
+ } else {
+ track->mRetryCount = kMaxTrackRetriesOffload;
+ }
track->flushAck();
if (last) {
mFlushPending = true;
@@ -5321,38 +5302,47 @@
}
mPreviousTrack = track;
// reset retry count
- track->mRetryCount = kMaxTrackRetriesOffload;
+ if (track->isStopping_1()) {
+ track->mRetryCount = kMaxTrackStopRetriesOffload;
+ } else {
+ track->mRetryCount = kMaxTrackRetriesOffload;
+ }
mActiveTrack = t;
mixerStatus = MIXER_TRACKS_READY;
}
} else {
ALOGVV("OffloadThread: track %d s=%08x [NOT READY]", track->name(), cblk->mServer);
if (track->isStopping_1()) {
- // Hardware buffer can hold a large amount of audio so we must
- // wait for all current track's data to drain before we say
- // that the track is stopped.
- if (mBytesRemaining == 0) {
- // Only start draining when all data in mixbuffer
- // has been written
- ALOGV("OffloadThread: underrun and STOPPING_1 -> draining, STOPPING_2");
- track->mState = TrackBase::STOPPING_2; // so presentation completes after drain
- // do not drain if no data was ever sent to HAL (mStandby == true)
- if (last && !mStandby) {
- // do not modify drain sequence if we are already draining. This happens
- // when resuming from pause after drain.
- if ((mDrainSequence & 1) == 0) {
- mSleepTimeUs = 0;
- mStandbyTimeNs = systemTime() + mStandbyDelayNs;
- mixerStatus = MIXER_DRAIN_TRACK;
- mDrainSequence += 2;
- }
- if (mHwPaused) {
- // It is possible to move from PAUSED to STOPPING_1 without
- // a resume so we must ensure hardware is running
- doHwResume = true;
- mHwPaused = false;
+ if (--(track->mRetryCount) <= 0) {
+ // Hardware buffer can hold a large amount of audio so we must
+ // wait for all current track's data to drain before we say
+ // that the track is stopped.
+ if (mBytesRemaining == 0) {
+ // Only start draining when all data in mixbuffer
+ // has been written
+ ALOGV("OffloadThread: underrun and STOPPING_1 -> draining, STOPPING_2");
+ track->mState = TrackBase::STOPPING_2; // so presentation completes after
+ // drain do not drain if no data was ever sent to HAL (mStandby == true)
+ if (last && !mStandby) {
+ // do not modify drain sequence if we are already draining. This happens
+ // when resuming from pause after drain.
+ if ((mDrainSequence & 1) == 0) {
+ mSleepTimeUs = 0;
+ mStandbyTimeNs = systemTime() + mStandbyDelayNs;
+ mixerStatus = MIXER_DRAIN_TRACK;
+ mDrainSequence += 2;
+ }
+ if (mHwPaused) {
+ // It is possible to move from PAUSED to STOPPING_1 without
+ // a resume so we must ensure hardware is running
+ doHwResume = true;
+ mHwPaused = false;
+ }
}
}
+ } else if (last) {
+ ALOGV("stopping1 underrun retries left %d", track->mRetryCount);
+ mixerStatus = MIXER_TRACKS_ENABLED;
}
} else if (track->isStopping_2()) {
// Drain has completed or we are in standby, signal presentation complete
@@ -5443,20 +5433,6 @@
}
}
-uint32_t AudioFlinger::OffloadThread::activeSleepTimeUs() const
-{
- uint32_t time;
- if (audio_has_proportional_frames(mFormat)) {
- time = PlaybackThread::activeSleepTimeUs();
- } else {
- // sleep time is half the duration of an audio HAL buffer.
- // Note: This can be problematic in case of underrun with variable bit rate and
- // current rate is much less than initial rate.
- time = (uint32_t)max(kDirectMinSleepTimeUs, mBufferDurationUs / 2);
- }
- return time;
-}
-
void AudioFlinger::OffloadThread::invalidateTracks(audio_stream_type_t streamType)
{
Mutex::Autolock _l(mLock);
diff --git a/services/audioflinger/Threads.h b/services/audioflinger/Threads.h
index 0ddd279..1cceb6d 100644
--- a/services/audioflinger/Threads.h
+++ b/services/audioflinger/Threads.h
@@ -486,9 +486,16 @@
// suspend by audio policy manager is orthogonal to mixer state
};
+ // retry count before removing active track in case of underrun on offloaded thread:
+ // we need to make sure that AudioTrack client has enough time to send large buffers
+ //FIXME may be more appropriate if expressed in time units. Need to revise how underrun is
+ // handled for offloaded tracks
+ static const int8_t kMaxTrackRetriesOffload = 20;
+ static const int8_t kMaxTrackStartupRetriesOffload = 100;
+ static const int8_t kMaxTrackStopRetriesOffload = 2;
+
PlaybackThread(const sp<AudioFlinger>& audioFlinger, AudioStreamOut* output,
- audio_io_handle_t id, audio_devices_t device, type_t type, bool systemReady,
- uint32_t bitRate = 0);
+ audio_io_handle_t id, audio_devices_t device, type_t type, bool systemReady);
virtual ~PlaybackThread();
void dump(int fd, const Vector<String16>& args);
@@ -843,8 +850,6 @@
bool mHwSupportsPause;
bool mHwPaused;
bool mFlushPending;
- uint32_t mBufferDurationUs; // estimated duration of an audio HAL buffer
- // based on initial bit rate (offload only)
};
class MixerThread : public PlaybackThread {
@@ -935,8 +940,7 @@
public:
DirectOutputThread(const sp<AudioFlinger>& audioFlinger, AudioStreamOut* output,
- audio_io_handle_t id, audio_devices_t device, bool systemReady,
- uint32_t bitRate = 0);
+ audio_io_handle_t id, audio_devices_t device, bool systemReady);
virtual ~DirectOutputThread();
// Thread virtuals
@@ -969,7 +973,7 @@
DirectOutputThread(const sp<AudioFlinger>& audioFlinger, AudioStreamOut* output,
audio_io_handle_t id, uint32_t device, ThreadBase::type_t type,
- bool systemReady, uint32_t bitRate = 0);
+ bool systemReady);
void processVolume_l(Track *track, bool lastTrack);
// prepareTracks_l() tells threadLoop_mix() the name of the single active track
@@ -985,8 +989,7 @@
public:
OffloadThread(const sp<AudioFlinger>& audioFlinger, AudioStreamOut* output,
- audio_io_handle_t id, uint32_t device,
- bool systemReady, uint32_t bitRate);
+ audio_io_handle_t id, uint32_t device, bool systemReady);
virtual ~OffloadThread() {};
virtual void flushHw_l();
@@ -995,8 +998,6 @@
virtual mixer_state prepareTracks_l(Vector< sp<Track> > *tracksToRemove);
virtual void threadLoop_exit();
- virtual uint32_t activeSleepTimeUs() const;
-
virtual bool waitingAsyncCallback();
virtual bool waitingAsyncCallback_l();
virtual void invalidateTracks(audio_stream_type_t streamType);
diff --git a/services/audioflinger/Tracks.cpp b/services/audioflinger/Tracks.cpp
index 9d430aa..364e339 100644
--- a/services/audioflinger/Tracks.cpp
+++ b/services/audioflinger/Tracks.cpp
@@ -373,7 +373,7 @@
// client == 0 implies sharedBuffer == 0
ALOG_ASSERT(!(client == 0 && sharedBuffer != 0));
- ALOGV_IF(sharedBuffer != 0, "sharedBuffer: %p, size: %d", sharedBuffer->pointer(),
+ ALOGV_IF(sharedBuffer != 0, "sharedBuffer: %p, size: %zu", sharedBuffer->pointer(),
sharedBuffer->size());
if (mCblk == NULL) {
@@ -728,6 +728,9 @@
// For an offloaded track this starts a drain and state will
// move to STOPPING_2 when drain completes and then STOPPED
mState = STOPPING_1;
+ if (isOffloaded()) {
+ mRetryCount = PlaybackThread::kMaxTrackStopRetriesOffload;
+ }
}
playbackThread->broadcast_l();
ALOGV("not stopping/stopped => stopping/stopped (%d) on thread %p", mName,
@@ -1103,7 +1106,7 @@
if (local.mTimeNs[i] > 0) {
local.mPosition[i] = mFrameMap.findX(local.mPosition[i]);
// check drain state from the latest stage in the pipeline.
- if (!checked) {
+ if (!checked && i <= ExtendedTimestamp::LOCATION_KERNEL) {
mAudioTrackServerProxy->setDrained(
local.mPosition[i] >= mAudioTrackServerProxy->framesReleased());
checked = true;
diff --git a/services/camera/libcameraservice/api1/client2/StreamingProcessor.cpp b/services/camera/libcameraservice/api1/client2/StreamingProcessor.cpp
index 211bdae..a2c9712 100644
--- a/services/camera/libcameraservice/api1/client2/StreamingProcessor.cpp
+++ b/services/camera/libcameraservice/api1/client2/StreamingProcessor.cpp
@@ -334,6 +334,7 @@
currentFormat != (uint32_t)params.videoFormat ||
currentDataSpace != params.videoDataSpace) {
*needsUpdate = true;
+ return res;
}
*needsUpdate = false;
return res;
diff --git a/services/mediaextractor/MediaExtractorService.cpp b/services/mediaextractor/MediaExtractorService.cpp
index 0c93af1..4a80166 100644
--- a/services/mediaextractor/MediaExtractorService.cpp
+++ b/services/mediaextractor/MediaExtractorService.cpp
@@ -39,7 +39,7 @@
ret == NULL ? "" : ret->name());
if (ret != NULL) {
- registerMediaExtractor(ret, remoteSource, mime);
+ registerMediaExtractor(ret, localSource, mime);
}
return ret;