Merge "frameworks/av/media portion of removing YouTube specific error codes"
diff --git a/MODULE_LICENSE_APACHE2 b/MODULE_LICENSE_APACHE2
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/MODULE_LICENSE_APACHE2
diff --git a/NOTICE b/NOTICE
new file mode 100644
index 0000000..152be20
--- /dev/null
+++ b/NOTICE
@@ -0,0 +1,324 @@
+ =========================================================================
+ == NOTICE file corresponding to the section 4 d of ==
+ == the Apache License, Version 2.0, ==
+ == in this case for the Android-specific code. ==
+ =========================================================================
+
+Android Code
+Copyright 2005-2008 The Android Open Source Project
+
+This product includes software developed as part of
+The Android Open Source Project (http://source.android.com).
+
+ =========================================================================
+ == NOTICE file corresponding to the section 4 d of ==
+ == the Apache License, Version 2.0, ==
+ == in this case for Apache Commons code. ==
+ =========================================================================
+
+Apache Commons
+Copyright 1999-2006 The Apache Software Foundation
+
+This product includes software developed at
+The Apache Software Foundation (http://www.apache.org/).
+
+ =========================================================================
+ == NOTICE file corresponding to the section 4 d of ==
+ == the Apache License, Version 2.0, ==
+ == in this case for Jakarta Commons Logging. ==
+ =========================================================================
+
+Jakarta Commons Logging (JCL)
+Copyright 2005,2006 The Apache Software Foundation.
+
+This product includes software developed at
+The Apache Software Foundation (http://www.apache.org/).
+
+ =========================================================================
+ == NOTICE file corresponding to the section 4 d of ==
+ == the Apache License, Version 2.0, ==
+ == in this case for the Nuance code. ==
+ =========================================================================
+
+These files are Copyright 2007 Nuance Communications, but released under
+the Apache2 License.
+
+ =========================================================================
+ == NOTICE file corresponding to the section 4 d of ==
+ == the Apache License, Version 2.0, ==
+ == in this case for the Media Codecs code. ==
+ =========================================================================
+
+Media Codecs
+These files are Copyright 1998 - 2009 PacketVideo, but released under
+the Apache2 License.
+
+ =========================================================================
+ == NOTICE file corresponding to the section 4 d of ==
+ == the Apache License, Version 2.0, ==
+ == in this case for the TagSoup code. ==
+ =========================================================================
+
+This file is part of TagSoup and is Copyright 2002-2008 by John Cowan.
+
+TagSoup is licensed under the Apache License,
+Version 2.0. You may obtain a copy of this license at
+http://www.apache.org/licenses/LICENSE-2.0 . You may also have
+additional legal rights not granted by this license.
+
+TagSoup is distributed in the hope that it will be useful, but
+unless required by applicable law or agreed to in writing, TagSoup
+is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS
+OF ANY KIND, either express or implied; not even the implied warranty
+of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+
+ =========================================================================
+ == NOTICE file corresponding to the section 4 d of ==
+ == the Apache License, Version 2.0, ==
+ == in this case for Additional Codecs code. ==
+ =========================================================================
+
+Additional Codecs
+These files are Copyright 2003-2010 VisualOn, but released under
+the Apache2 License.
+
+ =========================================================================
+ == NOTICE file corresponding to the section 4 d of ==
+ == the Apache License, Version 2.0, ==
+ == in this case for the Audio Effects code. ==
+ =========================================================================
+
+Audio Effects
+These files are Copyright (C) 2004-2010 NXP Software and
+Copyright (C) 2010 The Android Open Source Project, but released under
+the Apache2 License.
+
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+
+
+UNICODE, INC. LICENSE AGREEMENT - DATA FILES AND SOFTWARE
+
+Unicode Data Files include all data files under the directories
+http://www.unicode.org/Public/, http://www.unicode.org/reports/,
+and http://www.unicode.org/cldr/data/ . Unicode Software includes any
+source code published in the Unicode Standard or under the directories
+http://www.unicode.org/Public/, http://www.unicode.org/reports/, and
+http://www.unicode.org/cldr/data/.
+
+NOTICE TO USER: Carefully read the following legal agreement. BY
+DOWNLOADING, INSTALLING, COPYING OR OTHERWISE USING UNICODE INC.'S DATA
+FILES ("DATA FILES"), AND/OR SOFTWARE ("SOFTWARE"), YOU UNEQUIVOCALLY
+ACCEPT, AND AGREE TO BE BOUND BY, ALL OF THE TERMS AND CONDITIONS OF
+THIS AGREEMENT. IF YOU DO NOT AGREE, DO NOT DOWNLOAD, INSTALL, COPY,
+DISTRIBUTE OR USE THE DATA FILES OR SOFTWARE.
+
+COPYRIGHT AND PERMISSION NOTICE
+
+Copyright © 1991-2008 Unicode, Inc. All rights reserved. Distributed
+under the Terms of Use in http://www.unicode.org/copyright.html.
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of the Unicode data files and any associated documentation (the
+"Data Files") or Unicode software and any associated documentation (the
+"Software") to deal in the Data Files or Software without restriction,
+including without limitation the rights to use, copy, modify, merge,
+publish, distribute, and/or sell copies of the Data Files or Software,
+and to permit persons to whom the Data Files or Software are furnished to
+do so, provided that (a) the above copyright notice(s) and this permission
+notice appear with all copies of the Data Files or Software, (b) both the
+above copyright notice(s) and this permission notice appear in associated
+documentation, and (c) there is clear notice in each modified Data File
+or in the Software as well as in the documentation associated with the
+Data File(s) or Software that the data or software has been modified.
+
+THE DATA FILES AND SOFTWARE ARE PROVIDED "AS IS", WITHOUT WARRANTY OF
+ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT
+OF THIRD PARTY RIGHTS. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR HOLDERS
+INCLUDED IN THIS NOTICE BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT
+OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
+OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
+OR PERFORMANCE OF THE DATA FILES OR SOFTWARE.
+
+Except as contained in this notice, the name of a copyright holder
+shall not be used in advertising or otherwise to promote the sale, use
+or other dealings in these Data Files or Software without prior written
+authorization of the copyright holder.
diff --git a/cmds/stagefright/SimplePlayer.cpp b/cmds/stagefright/SimplePlayer.cpp
index 0cfeb3e..7636906 100644
--- a/cmds/stagefright/SimplePlayer.cpp
+++ b/cmds/stagefright/SimplePlayer.cpp
@@ -22,6 +22,7 @@
#include <gui/SurfaceTextureClient.h>
#include <media/AudioTrack.h>
+#include <media/ICrypto.h>
#include <media/stagefright/foundation/ABuffer.h>
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/foundation/AMessage.h>
@@ -318,7 +319,9 @@
CHECK(state->mCodec != NULL);
err = state->mCodec->configure(
- format, mNativeWindow->getSurfaceTextureClient(),
+ format,
+ mNativeWindow->getSurfaceTextureClient(),
+ NULL /* crypto */,
0 /* flags */);
CHECK_EQ(err, (status_t)OK);
diff --git a/cmds/stagefright/codec.cpp b/cmds/stagefright/codec.cpp
index cf2909e..5cbfbfe 100644
--- a/cmds/stagefright/codec.cpp
+++ b/cmds/stagefright/codec.cpp
@@ -20,8 +20,10 @@
#include "SimplePlayer.h"
+#include <binder/IServiceManager.h>
#include <binder/ProcessState.h>
-
+#include <media/ICrypto.h>
+#include <media/IMediaPlayerService.h>
#include <media/stagefright/foundation/ABuffer.h>
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/foundation/ALooper.h>
@@ -59,6 +61,33 @@
bool mIsAudio;
};
+static sp<ICrypto> makeCrypto(
+ const uint8_t uuid[16], const void *data, size_t size) {
+ sp<IServiceManager> sm = defaultServiceManager();
+
+ sp<IBinder> binder =
+ sm->getService(String16("media.player"));
+
+ sp<IMediaPlayerService> service =
+ interface_cast<IMediaPlayerService>(binder);
+
+ CHECK(service != NULL);
+
+ sp<ICrypto> crypto = service->makeCrypto();
+
+ if (crypto == NULL || crypto->initCheck() != OK) {
+ return NULL;
+ }
+
+ status_t err = crypto->createPlugin(uuid, data, size);
+
+ if (err != OK) {
+ return NULL;
+ }
+
+ return crypto;
+}
+
} // namespace android
static int decode(
@@ -78,6 +107,8 @@
return 1;
}
+ sp<ICrypto> crypto;
+
KeyedVector<size_t, CodecState> stateByTrack;
bool haveAudio = false;
@@ -113,7 +144,38 @@
state->mNumBuffersDecoded = 0;
state->mIsAudio = isAudio;
- if (decryptInputBuffers && !isAudio) {
+ if (decryptInputBuffers && crypto == NULL) {
+ sp<ABuffer> emm;
+ CHECK(format->findBuffer("emm", &emm));
+
+ sp<ABuffer> ecm;
+ CHECK(format->findBuffer("ecm", &ecm));
+
+ struct WVOpaqueInitData {
+ uint8_t mEMM[16];
+ uint8_t mECM[32];
+
+ } opaque;
+
+ CHECK_EQ(emm->size(), sizeof(opaque.mEMM));
+ memcpy(opaque.mEMM, emm->data(), emm->size());
+
+ CHECK_EQ(ecm->size(), 80u);
+ // bytes 16..47 of the original ecm stream data.
+ memcpy(opaque.mECM, ecm->data() + 16, 32);
+
+ static const uint8_t kUUIDWidevine[16] = {
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
+ };
+
+ crypto = makeCrypto(kUUIDWidevine, &opaque, sizeof(opaque));
+ CHECK(crypto != NULL);
+ CHECK_EQ(crypto->initCheck(), (status_t)OK);
+ }
+
+ if (decryptInputBuffers
+ && crypto->requiresSecureDecoderComponent(mime.c_str())) {
static const MediaCodecList *list = MediaCodecList::getInstance();
ssize_t index =
@@ -137,7 +199,8 @@
err = state->mCodec->configure(
format, isVideo ? surface : NULL,
- decryptInputBuffers ? MediaCodec::CONFIGURE_FLAG_SECURE : 0);
+ crypto,
+ 0 /* flags */);
CHECK_EQ(err, (status_t)OK);
diff --git a/include/media/AudioParameter.h b/include/media/AudioParameter.h
index 79d5d82..8cb2fa7 100644
--- a/include/media/AudioParameter.h
+++ b/include/media/AudioParameter.h
@@ -40,12 +40,12 @@
// keyFrameCount: to change audio output frame count, value is an int
// keyInputSource: to change audio input source, value is an int in audio_source_t
// (defined in media/mediarecorder.h)
- static const char *keyRouting;
- static const char *keySamplingRate;
- static const char *keyFormat;
- static const char *keyChannels;
- static const char *keyFrameCount;
- static const char *keyInputSource;
+ static const char * const keyRouting;
+ static const char * const keySamplingRate;
+ static const char * const keyFormat;
+ static const char * const keyChannels;
+ static const char * const keyFrameCount;
+ static const char * const keyInputSource;
String8 toString();
diff --git a/include/media/AudioRecord.h b/include/media/AudioRecord.h
index 5bfb65b..80d2d72 100644
--- a/include/media/AudioRecord.h
+++ b/include/media/AudioRecord.h
@@ -30,6 +30,7 @@
#include <utils/threads.h>
#include <system/audio.h>
+#include <media/AudioSystem.h>
namespace android {
@@ -215,8 +216,11 @@
/* After it's created the track is not active. Call start() to
* make it active. If set, the callback will start being called.
+ * if event is not AudioSystem::SYNC_EVENT_NONE, the capture start will be delayed until
+ * the specified event occurs on the specified trigger session.
*/
- status_t start();
+ status_t start(AudioSystem::sync_event_t event = AudioSystem::SYNC_EVENT_NONE,
+ int triggerSession = 0);
/* Stop a track. If set, the callback will cease being called and
* obtainBuffer returns STOPPED. Note that obtainBuffer() still works
diff --git a/include/media/AudioSystem.h b/include/media/AudioSystem.h
index cc0a594..471f462 100644
--- a/include/media/AudioSystem.h
+++ b/include/media/AudioSystem.h
@@ -88,6 +88,19 @@
static status_t getOutputSamplingRate(int* samplingRate, audio_stream_type_t stream = AUDIO_STREAM_DEFAULT);
static status_t getOutputFrameCount(int* frameCount, audio_stream_type_t stream = AUDIO_STREAM_DEFAULT);
static status_t getOutputLatency(uint32_t* latency, audio_stream_type_t stream = AUDIO_STREAM_DEFAULT);
+ static status_t getSamplingRate(audio_io_handle_t output,
+ audio_stream_type_t streamType,
+ int* samplingRate);
+ // returns the number of frames per audio HAL write buffer. Corresponds to
+ // audio_stream->get_buffer_size()/audio_stream_frame_size()
+ static status_t getFrameCount(audio_io_handle_t output,
+ audio_stream_type_t stream,
+ int* frameCount);
+ // returns the audio output stream latency in ms. Corresponds to
+ // audio_stream_out->get_latency()
+ static status_t getLatency(audio_io_handle_t output,
+ audio_stream_type_t stream,
+ uint32_t* latency);
// DEPRECATED
static status_t getOutputSamplingRate(int* samplingRate, int stream = AUDIO_STREAM_DEFAULT);
@@ -145,6 +158,21 @@
uint32_t latency;
};
+ // Events used to synchronize actions between audio sessions.
+ // For instance SYNC_EVENT_PRESENTATION_COMPLETE can be used to delay recording start until playback
+ // is complete on another audio session.
+ // See definitions in MediaSyncEvent.java
+ enum sync_event_t {
+ SYNC_EVENT_SAME = -1, // used internally to indicate restart with same event
+ SYNC_EVENT_NONE = 0,
+ SYNC_EVENT_PRESENTATION_COMPLETE,
+
+ //
+ // Define new events here: SYNC_EVENT_START, SYNC_EVENT_STOP, SYNC_EVENT_TIME ...
+ //
+ SYNC_EVENT_CNT,
+ };
+
//
// IAudioPolicyService interface (see AudioPolicyInterface for method descriptions)
//
diff --git a/include/media/AudioTrack.h b/include/media/AudioTrack.h
index 7d5d772..6de6486 100644
--- a/include/media/AudioTrack.h
+++ b/include/media/AudioTrack.h
@@ -139,13 +139,15 @@
* latency of the track. The actual size selected by the AudioTrack could be
* larger if the requested size is not compatible with current audio HAL
* latency.
- * flags: Reserved for future use.
+ * flags: See comments on audio_policy_output_flags_t in <system/audio_policy.h>.
* cbf: Callback function. If not null, this function is called periodically
* to request new PCM data.
* user: Context for use by the callback receiver.
* notificationFrames: The callback function is called each time notificationFrames PCM
* frames have been consumed from track input buffer.
* sessionId: Specific session ID, or zero to use default.
+ * threadCanCallJava: Whether callbacks are made from an attached thread and thus can call JNI.
+ * If not present in parameter list, then fixed at false.
*/
AudioTrack( audio_stream_type_t streamType,
@@ -157,7 +159,7 @@
callback_t cbf = NULL,
void* user = NULL,
int notificationFrames = 0,
- int sessionId = 0);
+ int sessionId = 0);
// DEPRECATED
explicit AudioTrack( int streamType,
@@ -189,7 +191,7 @@
callback_t cbf = NULL,
void* user = NULL,
int notificationFrames = 0,
- int sessionId = 0);
+ int sessionId = 0);
/* Terminates the AudioTrack and unregisters it from AudioFlinger.
* Also destroys all resources associated with the AudioTrack.
diff --git a/include/media/IAudioFlinger.h b/include/media/IAudioFlinger.h
index 8239b0e..9e938d1 100644
--- a/include/media/IAudioFlinger.h
+++ b/include/media/IAudioFlinger.h
@@ -29,6 +29,7 @@
#include <media/IAudioFlingerClient.h>
#include <system/audio.h>
#include <system/audio_policy.h>
+#include <hardware/audio_policy.h>
#include <hardware/audio_effect.h>
#include <media/IEffect.h>
#include <media/IEffectClient.h>
@@ -126,23 +127,24 @@
// retrieve the audio recording buffer size
virtual size_t getInputBufferSize(uint32_t sampleRate, audio_format_t format, int channelCount) const = 0;
- virtual audio_io_handle_t openOutput(uint32_t *pDevices,
- uint32_t *pSamplingRate,
- audio_format_t *pFormat,
- uint32_t *pChannels,
- uint32_t *pLatencyMs,
- audio_policy_output_flags_t flags) = 0;
+ virtual audio_io_handle_t openOutput(audio_module_handle_t module,
+ audio_devices_t *pDevices,
+ uint32_t *pSamplingRate,
+ audio_format_t *pFormat,
+ audio_channel_mask_t *pChannelMask,
+ uint32_t *pLatencyMs,
+ audio_policy_output_flags_t flags) = 0;
virtual audio_io_handle_t openDuplicateOutput(audio_io_handle_t output1,
audio_io_handle_t output2) = 0;
virtual status_t closeOutput(audio_io_handle_t output) = 0;
virtual status_t suspendOutput(audio_io_handle_t output) = 0;
virtual status_t restoreOutput(audio_io_handle_t output) = 0;
- virtual audio_io_handle_t openInput(uint32_t *pDevices,
- uint32_t *pSamplingRate,
- audio_format_t *pFormat,
- uint32_t *pChannels,
- audio_in_acoustics_t acoustics) = 0;
+ virtual audio_io_handle_t openInput(audio_module_handle_t module,
+ audio_devices_t *pDevices,
+ uint32_t *pSamplingRate,
+ audio_format_t *pFormat,
+ audio_channel_mask_t *pChannelMask) = 0;
virtual status_t closeInput(audio_io_handle_t input) = 0;
virtual status_t setStreamOutput(audio_stream_type_t stream, audio_io_handle_t output) = 0;
@@ -178,6 +180,8 @@
virtual status_t moveEffects(int session, audio_io_handle_t srcOutput,
audio_io_handle_t dstOutput) = 0;
+
+ virtual audio_module_handle_t loadHwModule(const char *name) = 0;
};
diff --git a/include/media/IAudioRecord.h b/include/media/IAudioRecord.h
index 089be3b..c486c6b 100644
--- a/include/media/IAudioRecord.h
+++ b/include/media/IAudioRecord.h
@@ -25,7 +25,6 @@
#include <binder/IInterface.h>
#include <binder/IMemory.h>
-
namespace android {
// ----------------------------------------------------------------------------
@@ -39,7 +38,7 @@
* make it active. If set, the callback will start being called.
* tid identifies the client callback thread, or 0 if not needed.
*/
- virtual status_t start(pid_t tid) = 0;
+ virtual status_t start(pid_t tid, int event, int triggerSession) = 0;
/* Stop a track. If set, the callback will cease being called and
* obtainBuffer will return an error. Buffers that are already released
diff --git a/include/media/ICrypto.h b/include/media/ICrypto.h
index 916abe0..376c326 100644
--- a/include/media/ICrypto.h
+++ b/include/media/ICrypto.h
@@ -16,6 +16,7 @@
#include <binder/IInterface.h>
#include <media/stagefright/foundation/ABase.h>
+#include <media/hardware/CryptoAPI.h>
#ifndef ANDROID_ICRYPTO_H_
@@ -26,26 +27,26 @@
struct ICrypto : public IInterface {
DECLARE_META_INTERFACE(Crypto);
- virtual status_t initialize() = 0;
- virtual status_t terminate() = 0;
+ virtual status_t initCheck() const = 0;
- virtual status_t setEntitlementKey(
- const void *key, size_t keyLength) = 0;
+ virtual bool isCryptoSchemeSupported(const uint8_t uuid[16]) const = 0;
- virtual status_t setEntitlementControlMessage(
- const void *msg, size_t msgLength) = 0;
+ virtual status_t createPlugin(
+ const uint8_t uuid[16], const void *data, size_t size) = 0;
- // "dstData" is in media_server's address space (but inaccessible).
- virtual ssize_t decryptVideo(
- const void *iv, size_t ivLength,
- const void *srcData, size_t srcDataSize,
- void *dstData, size_t dstDataOffset) = 0;
+ virtual status_t destroyPlugin() = 0;
- // "dstData" is in the calling process' address space.
- virtual ssize_t decryptAudio(
- const void *iv, size_t ivLength,
- const void *srcData, size_t srcDataSize,
- void *dstData, size_t dstDataSize) = 0;
+ virtual bool requiresSecureDecoderComponent(
+ const char *mime) const = 0;
+
+ virtual status_t decrypt(
+ bool secure,
+ const uint8_t key[16],
+ const uint8_t iv[16],
+ CryptoPlugin::Mode mode,
+ const void *srcPtr,
+ const CryptoPlugin::SubSample *subSamples, size_t numSubSamples,
+ void *dstPtr) = 0;
private:
DISALLOW_EVIL_CONSTRUCTORS(ICrypto);
diff --git a/include/media/MediaPlayerInterface.h b/include/media/MediaPlayerInterface.h
index d4aa233..9a8f4b0 100644
--- a/include/media/MediaPlayerInterface.h
+++ b/include/media/MediaPlayerInterface.h
@@ -87,8 +87,9 @@
virtual ssize_t frameSize() const = 0;
virtual uint32_t latency() const = 0;
virtual float msecsPerFrame() const = 0;
- virtual status_t getPosition(uint32_t *position) = 0;
- virtual int getSessionId() = 0;
+ virtual status_t getPosition(uint32_t *position) const = 0;
+ virtual status_t getFramesWritten(uint32_t *frameswritten) const = 0;
+ virtual int getSessionId() const = 0;
// If no callback is specified, use the "write" API below to submit
// audio data.
diff --git a/include/media/ToneGenerator.h b/include/media/ToneGenerator.h
index df0c97e..29c8fd9 100644
--- a/include/media/ToneGenerator.h
+++ b/include/media/ToneGenerator.h
@@ -159,6 +159,9 @@
bool isInited() { return (mState == TONE_IDLE)?false:true;}
+ // returns the audio session this ToneGenerator belongs to or 0 if an error occured.
+ int getSessionId() { return (mpAudioTrack == NULL) ? 0 : mpAudioTrack->getSessionId(); }
+
private:
enum tone_state {
diff --git a/include/media/stagefright/MediaCodec.h b/include/media/stagefright/MediaCodec.h
index 0fc88e1..0b0d511 100644
--- a/include/media/stagefright/MediaCodec.h
+++ b/include/media/stagefright/MediaCodec.h
@@ -34,7 +34,6 @@
struct MediaCodec : public AHandler {
enum ConfigureFlags {
CONFIGURE_FLAG_ENCODE = 1,
- CONFIGURE_FLAG_SECURE = 2,
};
enum BufferFlags {
@@ -53,6 +52,7 @@
status_t configure(
const sp<AMessage> &format,
const sp<SurfaceTextureClient> &nativeWindow,
+ const sp<ICrypto> &crypto,
uint32_t flags);
status_t start();
diff --git a/include/media/stagefright/MediaCodecList.h b/include/media/stagefright/MediaCodecList.h
index 14dc1b8..dfb845b 100644
--- a/include/media/stagefright/MediaCodecList.h
+++ b/include/media/stagefright/MediaCodecList.h
@@ -36,9 +36,22 @@
ssize_t findCodecByName(const char *name) const;
+ size_t countCodecs() const;
const char *getCodecName(size_t index) const;
+ bool isEncoder(size_t index) const;
bool codecHasQuirk(size_t index, const char *quirkName) const;
+ status_t getSupportedTypes(size_t index, Vector<AString> *types) const;
+
+ struct ProfileLevel {
+ uint32_t mProfile;
+ uint32_t mLevel;
+ };
+ status_t getCodecCapabilities(
+ size_t index, const char *type,
+ Vector<ProfileLevel> *profileLevels,
+ Vector<uint32_t> *colorFormats) const;
+
private:
enum Section {
SECTION_TOPLEVEL,
diff --git a/include/media/stagefright/OMXCodec.h b/include/media/stagefright/OMXCodec.h
index 7d51dee..bf054ac 100644
--- a/include/media/stagefright/OMXCodec.h
+++ b/include/media/stagefright/OMXCodec.h
@@ -384,6 +384,11 @@
const char *mimeType, bool queryDecoders,
Vector<CodecCapabilities> *results);
+status_t QueryCodec(
+ const sp<IOMX> &omx,
+ const char *componentName, const char *mime,
+ bool isEncoder,
+ CodecCapabilities *caps);
} // namespace android
diff --git a/include/media/stagefright/SkipCutBuffer.h b/include/media/stagefright/SkipCutBuffer.h
index 5c7cd47..27851ca 100644
--- a/include/media/stagefright/SkipCutBuffer.h
+++ b/include/media/stagefright/SkipCutBuffer.h
@@ -19,6 +19,7 @@
#define SKIP_CUT_BUFFER_H_
#include <media/stagefright/MediaBuffer.h>
+#include <media/stagefright/foundation/ABuffer.h>
namespace android {
@@ -30,14 +31,14 @@
public:
// 'skip' is the number of bytes to skip from the beginning
// 'cut' is the number of bytes to cut from the end
- // 'output_size' is the size in bytes of the MediaBuffers that will be used
- SkipCutBuffer(int32_t skip, int32_t cut, int32_t output_size);
+ SkipCutBuffer(int32_t skip, int32_t cut);
virtual ~SkipCutBuffer();
// Submit one MediaBuffer for skipping and cutting. This may consume all or
// some of the data in the buffer, or it may add data to it.
// After this, the caller should continue processing the buffer as usual.
void submit(MediaBuffer *buffer);
+ void submit(const sp<ABuffer>& buffer); // same as above, but with an ABuffer
void clear();
size_t size(); // how many bytes are currently stored in the buffer
diff --git a/include/media/stagefright/SurfaceMediaSource.h b/include/media/stagefright/SurfaceMediaSource.h
index 936b057..c0dc074 100644
--- a/include/media/stagefright/SurfaceMediaSource.h
+++ b/include/media/stagefright/SurfaceMediaSource.h
@@ -101,6 +101,7 @@
// (zero point, etc) are client-dependent and should be documented by the
// client.
virtual status_t queueBuffer(int buf, int64_t timestamp,
+ const Rect& crop, int scalingMode, uint32_t transform,
uint32_t* outWidth, uint32_t* outHeight, uint32_t* outTransform);
virtual void cancelBuffer(int buf);
@@ -111,13 +112,8 @@
// Make sure this is called when the mutex is locked
virtual status_t onFrameReceivedLocked();
- virtual status_t setScalingMode(int mode) { return OK; } // no op for encoding
virtual int query(int what, int* value);
- // Just confirming to the ISurfaceTexture interface as of now
- virtual status_t setCrop(const Rect& reg) { return OK; }
- virtual status_t setTransform(uint32_t transform) {return OK;}
-
// setSynchronousMode set whether dequeueBuffer is synchronous or
// asynchronous. In synchronous mode, dequeueBuffer blocks until
// a buffer is available, the currently bound buffer can be dequeued and
diff --git a/libvideoeditor/lvpp/VideoEditorPlayer.cpp b/libvideoeditor/lvpp/VideoEditorPlayer.cpp
index 57cab08..1ba1f44 100755
--- a/libvideoeditor/lvpp/VideoEditorPlayer.cpp
+++ b/libvideoeditor/lvpp/VideoEditorPlayer.cpp
@@ -376,12 +376,19 @@
return mMsecsPerFrame;
}
-status_t VideoEditorPlayer::VeAudioOutput::getPosition(uint32_t *position) {
+status_t VideoEditorPlayer::VeAudioOutput::getPosition(uint32_t *position) const {
if (mTrack == 0) return NO_INIT;
return mTrack->getPosition(position);
}
+status_t VideoEditorPlayer::VeAudioOutput::getFramesWritten(uint32_t *written) const {
+
+ if (mTrack == 0) return NO_INIT;
+ *written = mNumFramesWritten;
+ return OK;
+}
+
status_t VideoEditorPlayer::VeAudioOutput::open(
uint32_t sampleRate, int channelCount, audio_channel_mask_t channelMask,
audio_format_t format, int bufferCount,
@@ -569,7 +576,7 @@
return NO_ERROR;
}
-int VideoEditorPlayer::VeAudioOutput::getSessionId() {
+int VideoEditorPlayer::VeAudioOutput::getSessionId() const {
return mSessionId;
}
diff --git a/libvideoeditor/lvpp/VideoEditorPlayer.h b/libvideoeditor/lvpp/VideoEditorPlayer.h
index 6962501..350b384 100755
--- a/libvideoeditor/lvpp/VideoEditorPlayer.h
+++ b/libvideoeditor/lvpp/VideoEditorPlayer.h
@@ -45,8 +45,9 @@
virtual ssize_t frameSize() const;
virtual uint32_t latency() const;
virtual float msecsPerFrame() const;
- virtual status_t getPosition(uint32_t *position);
- virtual int getSessionId();
+ virtual status_t getPosition(uint32_t *position) const;
+ virtual status_t getFramesWritten(uint32_t*) const;
+ virtual int getSessionId() const;
virtual status_t open(
uint32_t sampleRate, int channelCount, audio_channel_mask_t channelMask,
diff --git a/media/libmedia/AudioParameter.cpp b/media/libmedia/AudioParameter.cpp
index abc7b3f..9766ee6 100644
--- a/media/libmedia/AudioParameter.cpp
+++ b/media/libmedia/AudioParameter.cpp
@@ -19,16 +19,18 @@
#include <utils/Log.h>
+#include <hardware/audio.h>
#include <media/AudioParameter.h>
namespace android {
-const char *AudioParameter::keyRouting = "routing";
-const char *AudioParameter::keySamplingRate = "sampling_rate";
-const char *AudioParameter::keyFormat = "format";
-const char *AudioParameter::keyChannels = "channels";
-const char *AudioParameter::keyFrameCount = "frame_count";
-const char *AudioParameter::keyInputSource = "input_source";
+// static
+const char * const AudioParameter::keyRouting = AUDIO_PARAMETER_STREAM_ROUTING;
+const char * const AudioParameter::keySamplingRate = AUDIO_PARAMETER_STREAM_SAMPLING_RATE;
+const char * const AudioParameter::keyFormat = AUDIO_PARAMETER_STREAM_FORMAT;
+const char * const AudioParameter::keyChannels = AUDIO_PARAMETER_STREAM_CHANNELS;
+const char * const AudioParameter::keyFrameCount = AUDIO_PARAMETER_STREAM_FRAME_COUNT;
+const char * const AudioParameter::keyInputSource = AUDIO_PARAMETER_STREAM_INPUT_SOURCE;
AudioParameter::AudioParameter(const String8& keyValuePairs)
{
diff --git a/media/libmedia/AudioRecord.cpp b/media/libmedia/AudioRecord.cpp
index 70ec593..1fdc536 100644
--- a/media/libmedia/AudioRecord.cpp
+++ b/media/libmedia/AudioRecord.cpp
@@ -279,12 +279,12 @@
// -------------------------------------------------------------------------
-status_t AudioRecord::start()
+status_t AudioRecord::start(AudioSystem::sync_event_t event, int triggerSession)
{
status_t ret = NO_ERROR;
sp<ClientRecordThread> t = mClientRecordThread;
- ALOGV("start");
+ ALOGV("start, sync event %d trigger session %d", event, triggerSession);
if (t != 0) {
if (t->exitPending()) {
@@ -322,7 +322,7 @@
if (!(cblk->flags & CBLK_INVALID_MSK)) {
cblk->lock.unlock();
ALOGV("mAudioRecord->start(tid=%d)", tid);
- ret = mAudioRecord->start(tid);
+ ret = mAudioRecord->start(tid, event, triggerSession);
cblk->lock.lock();
if (ret == DEAD_OBJECT) {
android_atomic_or(CBLK_INVALID_ON, &cblk->flags);
@@ -541,7 +541,8 @@
ALOGW( "obtainBuffer timed out (is the CPU pegged?) "
"user=%08x, server=%08x", cblk->user, cblk->server);
cblk->lock.unlock();
- result = mAudioRecord->start(0); // callback thread hasn't changed
+ // callback thread or sync event hasn't changed
+ result = mAudioRecord->start(0, AudioSystem::SYNC_EVENT_SAME, 0);
cblk->lock.lock();
if (result == DEAD_OBJECT) {
android_atomic_or(CBLK_INVALID_ON, &cblk->flags);
@@ -576,7 +577,7 @@
uint32_t u = cblk->user;
uint32_t bufferEnd = cblk->userBase + cblk->frameCount;
- if (u + framesReq > bufferEnd) {
+ if (framesReq > bufferEnd - u) {
framesReq = bufferEnd - u;
}
@@ -779,7 +780,8 @@
result = openRecord_l(cblk->sampleRate, mFormat, mChannelMask,
mFrameCount, getInput_l());
if (result == NO_ERROR) {
- result = mAudioRecord->start(0); // callback thread hasn't changed
+ // callback thread or sync event hasn't changed
+ result = mAudioRecord->start(0, AudioSystem::SYNC_EVENT_SAME, 0);
}
if (result != NO_ERROR) {
mActive = false;
diff --git a/media/libmedia/AudioSystem.cpp b/media/libmedia/AudioSystem.cpp
index 33c7d03..2596f07 100644
--- a/media/libmedia/AudioSystem.cpp
+++ b/media/libmedia/AudioSystem.cpp
@@ -36,7 +36,6 @@
audio_error_callback AudioSystem::gAudioErrorCallback = NULL;
// Cached values
-DefaultKeyedVector<audio_stream_type_t, audio_io_handle_t> AudioSystem::gStreamOutputMap(0);
DefaultKeyedVector<audio_io_handle_t, AudioSystem::OutputDescriptor *> AudioSystem::gOutputs(0);
// Cached values for recording queries, all protected by gLock
@@ -213,7 +212,6 @@
status_t AudioSystem::getOutputSamplingRate(int* samplingRate, audio_stream_type_t streamType)
{
- OutputDescriptor *outputDesc;
audio_io_handle_t output;
if (streamType == AUDIO_STREAM_DEFAULT) {
@@ -225,6 +223,15 @@
return PERMISSION_DENIED;
}
+ return getSamplingRate(output, streamType, samplingRate);
+}
+
+status_t AudioSystem::getSamplingRate(audio_io_handle_t output,
+ audio_stream_type_t streamType,
+ int* samplingRate)
+{
+ OutputDescriptor *outputDesc;
+
gLock.lock();
outputDesc = AudioSystem::gOutputs.valueFor(output);
if (outputDesc == NULL) {
@@ -239,7 +246,7 @@
gLock.unlock();
}
- ALOGV("getOutputSamplingRate() streamType %d, output %d, sampling rate %d", streamType, output, *samplingRate);
+ ALOGV("getSamplingRate() streamType %d, output %d, sampling rate %d", streamType, output, *samplingRate);
return NO_ERROR;
}
@@ -251,7 +258,6 @@
status_t AudioSystem::getOutputFrameCount(int* frameCount, audio_stream_type_t streamType)
{
- OutputDescriptor *outputDesc;
audio_io_handle_t output;
if (streamType == AUDIO_STREAM_DEFAULT) {
@@ -263,6 +269,15 @@
return PERMISSION_DENIED;
}
+ return getFrameCount(output, streamType, frameCount);
+}
+
+status_t AudioSystem::getFrameCount(audio_io_handle_t output,
+ audio_stream_type_t streamType,
+ int* frameCount)
+{
+ OutputDescriptor *outputDesc;
+
gLock.lock();
outputDesc = AudioSystem::gOutputs.valueFor(output);
if (outputDesc == NULL) {
@@ -275,14 +290,13 @@
gLock.unlock();
}
- ALOGV("getOutputFrameCount() streamType %d, output %d, frameCount %d", streamType, output, *frameCount);
+ ALOGV("getFrameCount() streamType %d, output %d, frameCount %d", streamType, output, *frameCount);
return NO_ERROR;
}
status_t AudioSystem::getOutputLatency(uint32_t* latency, audio_stream_type_t streamType)
{
- OutputDescriptor *outputDesc;
audio_io_handle_t output;
if (streamType == AUDIO_STREAM_DEFAULT) {
@@ -294,6 +308,15 @@
return PERMISSION_DENIED;
}
+ return getLatency(output, streamType, latency);
+}
+
+status_t AudioSystem::getLatency(audio_io_handle_t output,
+ audio_stream_type_t streamType,
+ uint32_t* latency)
+{
+ OutputDescriptor *outputDesc;
+
gLock.lock();
outputDesc = AudioSystem::gOutputs.valueFor(output);
if (outputDesc == NULL) {
@@ -306,7 +329,7 @@
gLock.unlock();
}
- ALOGV("getOutputLatency() streamType %d, output %d, latency %d", streamType, output, *latency);
+ ALOGV("getLatency() streamType %d, output %d, latency %d", streamType, output, *latency);
return NO_ERROR;
}
@@ -395,7 +418,6 @@
AudioSystem::gAudioFlinger.clear();
// clear output handles and stream to output map caches
- AudioSystem::gStreamOutputMap.clear();
AudioSystem::gOutputs.clear();
if (gAudioErrorCallback) {
@@ -416,12 +438,6 @@
switch (event) {
case STREAM_CONFIG_CHANGED:
- if (param2 == NULL) break;
- stream = *(const audio_stream_type_t *)param2;
- ALOGV("ioConfigChanged() STREAM_CONFIG_CHANGED stream %d, output %d", stream, ioHandle);
- if (gStreamOutputMap.indexOfKey(stream) >= 0) {
- gStreamOutputMap.replaceValueFor(stream, ioHandle);
- }
break;
case OUTPUT_OPENED: {
if (gOutputs.indexOfKey(ioHandle) >= 0) {
@@ -444,11 +460,6 @@
ALOGV("ioConfigChanged() output %d closed", ioHandle);
gOutputs.removeItem(ioHandle);
- for (int i = gStreamOutputMap.size() - 1; i >= 0 ; i--) {
- if (gStreamOutputMap.valueAt(i) == ioHandle) {
- gStreamOutputMap.removeItemsAt(i);
- }
- }
} break;
case OUTPUT_CONFIG_CHANGED: {
@@ -580,33 +591,9 @@
uint32_t channels,
audio_policy_output_flags_t flags)
{
- audio_io_handle_t output = 0;
- // Do not use stream to output map cache if the direct output
- // flag is set or if we are likely to use a direct output
- // (e.g voice call stream @ 8kHz could use BT SCO device and be routed to
- // a direct output on some platforms).
- // TODO: the output cache and stream to output mapping implementation needs to
- // be reworked for proper operation with direct outputs. This code is too specific
- // to the first use case we want to cover (Voice Recognition and Voice Dialer over
- // Bluetooth SCO
- if ((flags & AUDIO_POLICY_OUTPUT_FLAG_DIRECT) == 0 &&
- ((stream != AUDIO_STREAM_VOICE_CALL && stream != AUDIO_STREAM_BLUETOOTH_SCO) ||
- channels != AUDIO_CHANNEL_OUT_MONO ||
- (samplingRate != 8000 && samplingRate != 16000))) {
- Mutex::Autolock _l(gLock);
- output = AudioSystem::gStreamOutputMap.valueFor(stream);
- ALOGV_IF((output != 0), "getOutput() read %d from cache for stream %d", output, stream);
- }
- if (output == 0) {
- const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
- if (aps == 0) return 0;
- output = aps->getOutput(stream, samplingRate, format, channels, flags);
- if ((flags & AUDIO_POLICY_OUTPUT_FLAG_DIRECT) == 0) {
- Mutex::Autolock _l(gLock);
- AudioSystem::gStreamOutputMap.add(stream, output);
- }
- }
- return output;
+ const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+ if (aps == 0) return 0;
+ return aps->getOutput(stream, samplingRate, format, channels, flags);
}
status_t AudioSystem::startOutput(audio_io_handle_t output,
@@ -754,7 +741,6 @@
{
Mutex::Autolock _l(gLock);
ALOGV("clearAudioConfigCache()");
- gStreamOutputMap.clear();
gOutputs.clear();
}
diff --git a/media/libmedia/AudioTrack.cpp b/media/libmedia/AudioTrack.cpp
index bafde3a..6dc6c41 100644
--- a/media/libmedia/AudioTrack.cpp
+++ b/media/libmedia/AudioTrack.cpp
@@ -185,27 +185,23 @@
ALOGV_IF(sharedBuffer != 0, "sharedBuffer: %p, size: %d", sharedBuffer->pointer(), sharedBuffer->size());
+ ALOGV("set() streamType %d frameCount %d flags %04x", streamType, frameCount, flags);
+
AutoMutex lock(mLock);
if (mAudioTrack != 0) {
ALOGE("Track already in use");
return INVALID_OPERATION;
}
- int afSampleRate;
- if (AudioSystem::getOutputSamplingRate(&afSampleRate, streamType) != NO_ERROR) {
- return NO_INIT;
- }
-
- uint32_t afLatency;
- if (AudioSystem::getOutputLatency(&afLatency, streamType) != NO_ERROR) {
- return NO_INIT;
- }
-
// handle default values first.
if (streamType == AUDIO_STREAM_DEFAULT) {
streamType = AUDIO_STREAM_MUSIC;
}
+ int afSampleRate;
+ if (AudioSystem::getOutputSamplingRate(&afSampleRate, streamType) != NO_ERROR) {
+ return NO_INIT;
+ }
if (sampleRate == 0) {
sampleRate = afSampleRate;
}
@@ -226,7 +222,8 @@
// force direct flag if format is not linear PCM
if (!audio_is_linear_pcm(format)) {
- flags = (audio_policy_output_flags_t) (flags | AUDIO_POLICY_OUTPUT_FLAG_DIRECT);
+ flags = (audio_policy_output_flags_t)
+ ((flags | AUDIO_POLICY_OUTPUT_FLAG_DIRECT) & ~AUDIO_POLICY_OUTPUT_FLAG_FAST);
}
if (!audio_is_output_channel(channelMask)) {
@@ -252,6 +249,7 @@
mNotificationFramesReq = notificationFrames;
mSessionId = sessionId;
mAuxEffectId = 0;
+ mCbf = cbf;
// create the IAudioTrack
status_t status = createTrack_l(streamType,
@@ -280,7 +278,6 @@
mSharedBuffer = sharedBuffer;
mMuted = false;
mActive = false;
- mCbf = cbf;
mUserData = user;
mLoopCount = 0;
mMarkerPosition = 0;
@@ -750,18 +747,31 @@
}
int afSampleRate;
- if (AudioSystem::getOutputSamplingRate(&afSampleRate, streamType) != NO_ERROR) {
+ if (AudioSystem::getSamplingRate(output, streamType, &afSampleRate) != NO_ERROR) {
return NO_INIT;
}
int afFrameCount;
- if (AudioSystem::getOutputFrameCount(&afFrameCount, streamType) != NO_ERROR) {
+ if (AudioSystem::getFrameCount(output, streamType, &afFrameCount) != NO_ERROR) {
return NO_INIT;
}
uint32_t afLatency;
- if (AudioSystem::getOutputLatency(&afLatency, streamType) != NO_ERROR) {
+ if (AudioSystem::getLatency(output, streamType, &afLatency) != NO_ERROR) {
return NO_INIT;
}
+ // Client decides whether the track is TIMED (see below), but can only express a preference
+ // for FAST. Server will perform additional tests.
+ if ((flags & AUDIO_POLICY_OUTPUT_FLAG_FAST) && !(
+ // either of these use cases:
+ // use case 1: shared buffer
+ (sharedBuffer != 0) ||
+ // use case 2: callback handler
+ (mCbf != NULL))) {
+ ALOGW("AUDIO_POLICY_OUTPUT_FLAG_FAST denied");
+ flags = (audio_policy_output_flags_t) (flags & ~AUDIO_POLICY_OUTPUT_FLAG_FAST);
+ }
+ ALOGV("createTrack_l() output %d afFrameCount %d afLatency %d", output, afFrameCount, afLatency);
+
mNotificationFramesAct = mNotificationFramesReq;
if (!audio_is_linear_pcm(format)) {
if (sharedBuffer != 0) {
@@ -786,7 +796,7 @@
if (mNotificationFramesAct > (uint32_t)frameCount/2) {
mNotificationFramesAct = frameCount/2;
}
- if (frameCount < minFrameCount) {
+ if (frameCount < minFrameCount && !(flags & AUDIO_POLICY_OUTPUT_FLAG_FAST)) {
// not ALOGW because it happens all the time when playing key clicks over A2DP
ALOGV("Minimum buffer size corrected from %d to %d",
frameCount, minFrameCount);
@@ -807,6 +817,10 @@
if (mIsTimed) {
trackFlags |= IAudioFlinger::TRACK_TIMED;
}
+ if (flags & AUDIO_POLICY_OUTPUT_FLAG_FAST) {
+ trackFlags |= IAudioFlinger::TRACK_FAST;
+ }
+
sp<IAudioTrack> track = audioFlinger->createTrack(getpid(),
streamType,
sampleRate,
@@ -952,7 +966,7 @@
uint32_t u = cblk->user;
uint32_t bufferEnd = cblk->userBase + cblk->frameCount;
- if (u + framesReq > bufferEnd) {
+ if (framesReq > bufferEnd - u) {
framesReq = bufferEnd - u;
}
@@ -1382,8 +1396,9 @@
uint32_t audio_track_cblk_t::stepUser(uint32_t frameCount)
{
- uint32_t u = user;
+ ALOGV("stepuser %08x %08x %d", user, server, frameCount);
+ uint32_t u = user;
u += frameCount;
// Ensure that user is never ahead of server for AudioRecord
if (flags & CBLK_DIRECTION_MSK) {
@@ -1392,12 +1407,19 @@
bufferTimeoutMs = MAX_RUN_TIMEOUT_MS;
}
} else if (u > server) {
- ALOGW("stepServer occurred after track reset");
+ ALOGW("stepUser occurred after track reset");
u = server;
}
- if (u >= userBase + this->frameCount) {
- userBase += this->frameCount;
+ uint32_t fc = this->frameCount;
+ if (u >= fc) {
+ // common case, user didn't just wrap
+ if (u - fc >= userBase ) {
+ userBase += fc;
+ }
+ } else if (u >= userBase + fc) {
+ // user just wrapped
+ userBase += fc;
}
user = u;
@@ -1412,12 +1434,15 @@
bool audio_track_cblk_t::stepServer(uint32_t frameCount)
{
+ ALOGV("stepserver %08x %08x %d", user, server, frameCount);
+
if (!tryLock()) {
ALOGW("stepServer() could not lock cblk");
return false;
}
uint32_t s = server;
+ bool flushed = (s == user);
s += frameCount;
if (flags & CBLK_DIRECTION_MSK) {
@@ -1430,7 +1455,7 @@
// while the mixer is processing a block: in this case,
// stepServer() is called After the flush() has reset u & s and
// we have s > u
- if (s > user) {
+ if (flushed) {
ALOGW("stepServer occurred after track reset");
s = user;
}
@@ -1444,8 +1469,16 @@
loopStart = UINT_MAX;
}
}
- if (s >= serverBase + this->frameCount) {
- serverBase += this->frameCount;
+
+ uint32_t fc = this->frameCount;
+ if (s >= fc) {
+ // common case, server didn't just wrap
+ if (s - fc >= serverBase ) {
+ serverBase += fc;
+ }
+ } else if (s >= serverBase + fc) {
+ // server just wrapped
+ serverBase += fc;
}
server = s;
diff --git a/media/libmedia/IAudioFlinger.cpp b/media/libmedia/IAudioFlinger.cpp
index ce10c8e..81e259a 100644
--- a/media/libmedia/IAudioFlinger.cpp
+++ b/media/libmedia/IAudioFlinger.cpp
@@ -69,7 +69,8 @@
QUERY_EFFECT,
GET_EFFECT_DESCRIPTOR,
CREATE_EFFECT,
- MOVE_EFFECTS
+ MOVE_EFFECTS,
+ LOAD_HW_MODULE
};
class BpAudioFlinger : public BpInterface<IAudioFlinger>
@@ -355,38 +356,40 @@
return reply.readInt32();
}
- virtual audio_io_handle_t openOutput(uint32_t *pDevices,
- uint32_t *pSamplingRate,
- audio_format_t *pFormat,
- uint32_t *pChannels,
- uint32_t *pLatencyMs,
- audio_policy_output_flags_t flags)
+ virtual audio_io_handle_t openOutput(audio_module_handle_t module,
+ audio_devices_t *pDevices,
+ uint32_t *pSamplingRate,
+ audio_format_t *pFormat,
+ audio_channel_mask_t *pChannelMask,
+ uint32_t *pLatencyMs,
+ audio_policy_output_flags_t flags)
{
Parcel data, reply;
- uint32_t devices = pDevices ? *pDevices : 0;
+ audio_devices_t devices = pDevices ? *pDevices : (audio_devices_t)0;
uint32_t samplingRate = pSamplingRate ? *pSamplingRate : 0;
audio_format_t format = pFormat ? *pFormat : AUDIO_FORMAT_DEFAULT;
- uint32_t channels = pChannels ? *pChannels : 0;
+ audio_channel_mask_t channelMask = pChannelMask ? *pChannelMask : (audio_channel_mask_t)0;
uint32_t latency = pLatencyMs ? *pLatencyMs : 0;
data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
+ data.writeInt32(module);
data.writeInt32(devices);
data.writeInt32(samplingRate);
data.writeInt32(format);
- data.writeInt32(channels);
+ data.writeInt32(channelMask);
data.writeInt32(latency);
data.writeInt32((int32_t) flags);
remote()->transact(OPEN_OUTPUT, data, &reply);
audio_io_handle_t output = (audio_io_handle_t) reply.readInt32();
ALOGV("openOutput() returned output, %d", output);
- devices = reply.readInt32();
+ devices = (audio_devices_t)reply.readInt32();
if (pDevices) *pDevices = devices;
samplingRate = reply.readInt32();
if (pSamplingRate) *pSamplingRate = samplingRate;
format = (audio_format_t) reply.readInt32();
if (pFormat) *pFormat = format;
- channels = reply.readInt32();
- if (pChannels) *pChannels = channels;
+ channelMask = (audio_channel_mask_t)reply.readInt32();
+ if (pChannelMask) *pChannelMask = channelMask;
latency = reply.readInt32();
if (pLatencyMs) *pLatencyMs = latency;
return output;
@@ -430,34 +433,34 @@
return reply.readInt32();
}
- virtual audio_io_handle_t openInput(uint32_t *pDevices,
- uint32_t *pSamplingRate,
- audio_format_t *pFormat,
- uint32_t *pChannels,
- audio_in_acoustics_t acoustics)
+ virtual audio_io_handle_t openInput(audio_module_handle_t module,
+ audio_devices_t *pDevices,
+ uint32_t *pSamplingRate,
+ audio_format_t *pFormat,
+ audio_channel_mask_t *pChannelMask)
{
Parcel data, reply;
- uint32_t devices = pDevices ? *pDevices : 0;
+ audio_devices_t devices = pDevices ? *pDevices : (audio_devices_t)0;
uint32_t samplingRate = pSamplingRate ? *pSamplingRate : 0;
audio_format_t format = pFormat ? *pFormat : AUDIO_FORMAT_DEFAULT;
- uint32_t channels = pChannels ? *pChannels : 0;
+ audio_channel_mask_t channelMask = pChannelMask ? *pChannelMask : (audio_channel_mask_t)0;
data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
+ data.writeInt32(module);
data.writeInt32(devices);
data.writeInt32(samplingRate);
data.writeInt32(format);
- data.writeInt32(channels);
- data.writeInt32((int32_t) acoustics);
+ data.writeInt32(channelMask);
remote()->transact(OPEN_INPUT, data, &reply);
audio_io_handle_t input = (audio_io_handle_t) reply.readInt32();
- devices = reply.readInt32();
+ devices = (audio_devices_t)reply.readInt32();
if (pDevices) *pDevices = devices;
samplingRate = reply.readInt32();
if (pSamplingRate) *pSamplingRate = samplingRate;
format = (audio_format_t) reply.readInt32();
if (pFormat) *pFormat = format;
- channels = reply.readInt32();
- if (pChannels) *pChannels = channels;
+ channelMask = (audio_channel_mask_t)reply.readInt32();
+ if (pChannelMask) *pChannelMask = channelMask;
return input;
}
@@ -668,6 +671,15 @@
remote()->transact(MOVE_EFFECTS, data, &reply);
return reply.readInt32();
}
+
+ virtual audio_module_handle_t loadHwModule(const char *name)
+ {
+ Parcel data, reply;
+ data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
+ data.writeCString(name);
+ remote()->transact(LOAD_HW_MODULE, data, &reply);
+ return (audio_module_handle_t) reply.readInt32();
+ }
};
IMPLEMENT_META_INTERFACE(AudioFlinger, "android.media.IAudioFlinger");
@@ -837,24 +849,26 @@
} break;
case OPEN_OUTPUT: {
CHECK_INTERFACE(IAudioFlinger, data, reply);
- uint32_t devices = data.readInt32();
+ audio_module_handle_t module = (audio_module_handle_t)data.readInt32();
+ audio_devices_t devices = (audio_devices_t)data.readInt32();
uint32_t samplingRate = data.readInt32();
audio_format_t format = (audio_format_t) data.readInt32();
- uint32_t channels = data.readInt32();
+ audio_channel_mask_t channelMask = (audio_channel_mask_t)data.readInt32();
uint32_t latency = data.readInt32();
audio_policy_output_flags_t flags = (audio_policy_output_flags_t) data.readInt32();
- audio_io_handle_t output = openOutput(&devices,
- &samplingRate,
- &format,
- &channels,
- &latency,
- flags);
+ audio_io_handle_t output = openOutput(module,
+ &devices,
+ &samplingRate,
+ &format,
+ &channelMask,
+ &latency,
+ flags);
ALOGV("OPEN_OUTPUT output, %p", output);
reply->writeInt32((int32_t) output);
reply->writeInt32(devices);
reply->writeInt32(samplingRate);
reply->writeInt32(format);
- reply->writeInt32(channels);
+ reply->writeInt32(channelMask);
reply->writeInt32(latency);
return NO_ERROR;
} break;
@@ -882,22 +896,22 @@
} break;
case OPEN_INPUT: {
CHECK_INTERFACE(IAudioFlinger, data, reply);
- uint32_t devices = data.readInt32();
+ audio_module_handle_t module = (audio_module_handle_t)data.readInt32();
+ audio_devices_t devices = (audio_devices_t)data.readInt32();
uint32_t samplingRate = data.readInt32();
audio_format_t format = (audio_format_t) data.readInt32();
- uint32_t channels = data.readInt32();
- audio_in_acoustics_t acoustics = (audio_in_acoustics_t) data.readInt32();
+ audio_channel_mask_t channelMask = (audio_channel_mask_t)data.readInt32();
- audio_io_handle_t input = openInput(&devices,
- &samplingRate,
- &format,
- &channels,
- acoustics);
+ audio_io_handle_t input = openInput(module,
+ &devices,
+ &samplingRate,
+ &format,
+ &channelMask);
reply->writeInt32((int32_t) input);
reply->writeInt32(devices);
reply->writeInt32(samplingRate);
reply->writeInt32(format);
- reply->writeInt32(channels);
+ reply->writeInt32(channelMask);
return NO_ERROR;
} break;
case CLOSE_INPUT: {
@@ -1015,6 +1029,11 @@
reply->writeInt32(moveEffects(session, srcOutput, dstOutput));
return NO_ERROR;
} break;
+ case LOAD_HW_MODULE: {
+ CHECK_INTERFACE(IAudioFlinger, data, reply);
+ reply->writeInt32(loadHwModule(data.readCString()));
+ return NO_ERROR;
+ } break;
default:
return BBinder::onTransact(code, data, reply, flags);
}
diff --git a/media/libmedia/IAudioRecord.cpp b/media/libmedia/IAudioRecord.cpp
index 377b9a8..cb5c7f3 100644
--- a/media/libmedia/IAudioRecord.cpp
+++ b/media/libmedia/IAudioRecord.cpp
@@ -42,11 +42,13 @@
{
}
- virtual status_t start(pid_t tid)
+ virtual status_t start(pid_t tid, int event, int triggerSession)
{
Parcel data, reply;
data.writeInterfaceToken(IAudioRecord::getInterfaceDescriptor());
data.writeInt32(tid);
+ data.writeInt32(event);
+ data.writeInt32(triggerSession);
status_t status = remote()->transact(START, data, &reply);
if (status == NO_ERROR) {
status = reply.readInt32();
@@ -91,7 +93,7 @@
} break;
case START: {
CHECK_INTERFACE(IAudioRecord, data, reply);
- reply->writeInt32(start(data.readInt32()));
+ reply->writeInt32(start(data.readInt32(), data.readInt32(), data.readInt32()));
return NO_ERROR;
} break;
case STOP: {
diff --git a/media/libmedia/ICrypto.cpp b/media/libmedia/ICrypto.cpp
index 827d7af..1fe6bed 100644
--- a/media/libmedia/ICrypto.cpp
+++ b/media/libmedia/ICrypto.cpp
@@ -25,12 +25,12 @@
namespace android {
enum {
- INITIALIZE = IBinder::FIRST_CALL_TRANSACTION,
- TERMINATE,
- SET_ENTITLEMENT_KEY,
- SET_ECM,
- DECRYPT_VIDEO,
- DECRYPT_AUDIO,
+ INIT_CHECK = IBinder::FIRST_CALL_TRANSACTION,
+ IS_CRYPTO_SUPPORTED,
+ CREATE_PLUGIN,
+ DESTROY_PLUGIN,
+ REQUIRES_SECURE_COMPONENT,
+ DECRYPT,
};
struct BpCrypto : public BpInterface<ICrypto> {
@@ -38,104 +38,97 @@
: BpInterface<ICrypto>(impl) {
}
- virtual status_t initialize() {
+ virtual status_t initCheck() const {
Parcel data, reply;
data.writeInterfaceToken(ICrypto::getInterfaceDescriptor());
- remote()->transact(INITIALIZE, data, &reply);
+ remote()->transact(INIT_CHECK, data, &reply);
return reply.readInt32();
}
- virtual status_t terminate() {
+ virtual bool isCryptoSchemeSupported(const uint8_t uuid[16]) const {
Parcel data, reply;
data.writeInterfaceToken(ICrypto::getInterfaceDescriptor());
- remote()->transact(TERMINATE, data, &reply);
+ data.write(uuid, 16);
+ remote()->transact(IS_CRYPTO_SUPPORTED, data, &reply);
+
+ return reply.readInt32() != 0;
+ }
+
+ virtual status_t createPlugin(
+ const uint8_t uuid[16], const void *opaqueData, size_t opaqueSize) {
+ Parcel data, reply;
+ data.writeInterfaceToken(ICrypto::getInterfaceDescriptor());
+ data.write(uuid, 16);
+ data.writeInt32(opaqueSize);
+ data.write(opaqueData, opaqueSize);
+ remote()->transact(CREATE_PLUGIN, data, &reply);
return reply.readInt32();
}
- virtual status_t setEntitlementKey(
- const void *key, size_t keyLength) {
+ virtual status_t destroyPlugin() {
Parcel data, reply;
data.writeInterfaceToken(ICrypto::getInterfaceDescriptor());
- data.writeInt32(keyLength);
- data.write(key, keyLength);
- remote()->transact(SET_ENTITLEMENT_KEY, data, &reply);
+ remote()->transact(DESTROY_PLUGIN, data, &reply);
return reply.readInt32();
}
- virtual status_t setEntitlementControlMessage(
- const void *msg, size_t msgLength) {
+ virtual bool requiresSecureDecoderComponent(
+ const char *mime) const {
Parcel data, reply;
data.writeInterfaceToken(ICrypto::getInterfaceDescriptor());
- data.writeInt32(msgLength);
- data.write(msg, msgLength);
- remote()->transact(SET_ECM, data, &reply);
+ data.writeCString(mime);
+ remote()->transact(REQUIRES_SECURE_COMPONENT, data, &reply);
- return reply.readInt32();
+ return reply.readInt32() != 0;
}
- virtual ssize_t decryptVideo(
- const void *iv, size_t ivLength,
- const void *srcData, size_t srcDataSize,
- void *dstData, size_t dstDataOffset) {
+ virtual status_t decrypt(
+ bool secure,
+ const uint8_t key[16],
+ const uint8_t iv[16],
+ CryptoPlugin::Mode mode,
+ const void *srcPtr,
+ const CryptoPlugin::SubSample *subSamples, size_t numSubSamples,
+ void *dstPtr) {
Parcel data, reply;
data.writeInterfaceToken(ICrypto::getInterfaceDescriptor());
- if (iv == NULL) {
- if (ivLength > 0) {
- return -EINVAL;
- }
+ data.writeInt32(secure);
+ data.writeInt32(mode);
+ data.write(key, 16);
+ data.write(iv, 16);
- data.writeInt32(-1);
- } else {
- data.writeInt32(ivLength);
- data.write(iv, ivLength);
+ size_t totalSize = 0;
+ for (size_t i = 0; i < numSubSamples; ++i) {
+ totalSize += subSamples[i].mNumBytesOfEncryptedData;
+ totalSize += subSamples[i].mNumBytesOfClearData;
}
- data.writeInt32(srcDataSize);
- data.write(srcData, srcDataSize);
+ data.writeInt32(totalSize);
+ data.write(srcPtr, totalSize);
- data.writeIntPtr((intptr_t)dstData);
- data.writeInt32(dstDataOffset);
+ data.writeInt32(numSubSamples);
+ data.write(subSamples, sizeof(CryptoPlugin::SubSample) * numSubSamples);
- remote()->transact(DECRYPT_VIDEO, data, &reply);
-
- return reply.readInt32();
- }
-
- virtual ssize_t decryptAudio(
- const void *iv, size_t ivLength,
- const void *srcData, size_t srcDataSize,
- void *dstData, size_t dstDataSize) {
- Parcel data, reply;
- data.writeInterfaceToken(ICrypto::getInterfaceDescriptor());
- if (iv == NULL) {
- if (ivLength > 0) {
- return -EINVAL;
- }
-
- data.writeInt32(-1);
- } else {
- data.writeInt32(ivLength);
- data.write(iv, ivLength);
+ if (secure) {
+ data.writeIntPtr((intptr_t)dstPtr);
}
- data.writeInt32(srcDataSize);
- data.write(srcData, srcDataSize);
- data.writeInt32(dstDataSize);
+ remote()->transact(DECRYPT, data, &reply);
- remote()->transact(DECRYPT_AUDIO, data, &reply);
+ status_t result = reply.readInt32();
- ssize_t res = reply.readInt32();
-
- if (res <= 0) {
- return res;
+ if (result != OK) {
+ return result;
}
- reply.read(dstData, res);
+ if (!secure) {
+ reply.read(dstPtr, totalSize);
+ }
- return res;
+ return OK;
}
private:
@@ -149,138 +142,120 @@
status_t BnCrypto::onTransact(
uint32_t code, const Parcel &data, Parcel *reply, uint32_t flags) {
switch (code) {
- case INITIALIZE:
+ case INIT_CHECK:
{
CHECK_INTERFACE(ICrypto, data, reply);
- reply->writeInt32(initialize());
+ reply->writeInt32(initCheck());
return OK;
}
- case TERMINATE:
+ case IS_CRYPTO_SUPPORTED:
{
CHECK_INTERFACE(ICrypto, data, reply);
- reply->writeInt32(terminate());
+ uint8_t uuid[16];
+ data.read(uuid, sizeof(uuid));
+ reply->writeInt32(isCryptoSchemeSupported(uuid));
return OK;
}
- case SET_ENTITLEMENT_KEY:
+ case CREATE_PLUGIN:
{
CHECK_INTERFACE(ICrypto, data, reply);
- size_t keyLength = data.readInt32();
- void *key = malloc(keyLength);
- data.read(key, keyLength);
+ uint8_t uuid[16];
+ data.read(uuid, sizeof(uuid));
- reply->writeInt32(setEntitlementKey(key, keyLength));
+ size_t opaqueSize = data.readInt32();
+ void *opaqueData = malloc(opaqueSize);
+ data.read(opaqueData, opaqueSize);
- free(key);
- key = NULL;
+ reply->writeInt32(createPlugin(uuid, opaqueData, opaqueSize));
+
+ free(opaqueData);
+ opaqueData = NULL;
return OK;
}
- case SET_ECM:
+ case DESTROY_PLUGIN:
{
CHECK_INTERFACE(ICrypto, data, reply);
-
- size_t msgLength = data.readInt32();
- void *msg = malloc(msgLength);
- data.read(msg, msgLength);
-
- reply->writeInt32(setEntitlementControlMessage(msg, msgLength));
-
- free(msg);
- msg = NULL;
+ reply->writeInt32(destroyPlugin());
return OK;
}
- case DECRYPT_VIDEO:
+ case REQUIRES_SECURE_COMPONENT:
{
CHECK_INTERFACE(ICrypto, data, reply);
- void *iv = NULL;
+ const char *mime = data.readCString();
+ reply->writeInt32(requiresSecureDecoderComponent(mime));
- int32_t ivLength = data.readInt32();
- if (ivLength >= 0) {
- iv = malloc(ivLength);
- data.read(iv, ivLength);
+ return OK;
+ }
+
+ case DECRYPT:
+ {
+ CHECK_INTERFACE(ICrypto, data, reply);
+
+ bool secure = data.readInt32() != 0;
+ CryptoPlugin::Mode mode = (CryptoPlugin::Mode)data.readInt32();
+
+ uint8_t key[16];
+ data.read(key, sizeof(key));
+
+ uint8_t iv[16];
+ data.read(iv, sizeof(iv));
+
+ size_t totalSize = data.readInt32();
+ void *srcData = malloc(totalSize);
+ data.read(srcData, totalSize);
+
+ int32_t numSubSamples = data.readInt32();
+
+ CryptoPlugin::SubSample *subSamples =
+ new CryptoPlugin::SubSample[numSubSamples];
+
+ data.read(
+ subSamples,
+ sizeof(CryptoPlugin::SubSample) * numSubSamples);
+
+ void *dstPtr;
+ if (secure) {
+ dstPtr = (void *)data.readIntPtr();
+ } else {
+ dstPtr = malloc(totalSize);
}
- size_t srcDataSize = data.readInt32();
- void *srcData = malloc(srcDataSize);
- data.read(srcData, srcDataSize);
+ status_t err = decrypt(
+ secure,
+ key,
+ iv,
+ mode,
+ srcData,
+ subSamples, numSubSamples,
+ dstPtr);
- void *dstData = (void *)data.readIntPtr();
- size_t dstDataOffset = data.readInt32();
+ reply->writeInt32(err);
- reply->writeInt32(
- decryptVideo(
- iv,
- ivLength < 0 ? 0 : ivLength,
- srcData,
- srcDataSize,
- dstData,
- dstDataOffset));
+ if (!secure) {
+ if (err == OK) {
+ reply->write(dstPtr, totalSize);
+ }
+
+ free(dstPtr);
+ dstPtr = NULL;
+ }
+
+ delete[] subSamples;
+ subSamples = NULL;
free(srcData);
srcData = NULL;
- if (iv != NULL) {
- free(iv);
- iv = NULL;
- }
-
- return OK;
- }
-
- case DECRYPT_AUDIO:
- {
- CHECK_INTERFACE(ICrypto, data, reply);
-
- void *iv = NULL;
-
- int32_t ivLength = data.readInt32();
- if (ivLength >= 0) {
- iv = malloc(ivLength);
- data.read(iv, ivLength);
- }
-
- size_t srcDataSize = data.readInt32();
- void *srcData = malloc(srcDataSize);
- data.read(srcData, srcDataSize);
-
- size_t dstDataSize = data.readInt32();
- void *dstData = malloc(dstDataSize);
-
- ssize_t res =
- decryptAudio(
- iv,
- ivLength < 0 ? 0 : ivLength,
- srcData,
- srcDataSize,
- dstData,
- dstDataSize);
-
- reply->writeInt32(res);
-
- if (res > 0) {
- reply->write(dstData, res);
- }
-
- free(dstData);
- dstData = NULL;
-
- free(srcData);
- srcData = NULL;
-
- if (iv != NULL) {
- free(iv);
- iv = NULL;
- }
-
return OK;
}
diff --git a/media/libmediaplayerservice/Crypto.cpp b/media/libmediaplayerservice/Crypto.cpp
index e02035f..4491f2b 100644
--- a/media/libmediaplayerservice/Crypto.cpp
+++ b/media/libmediaplayerservice/Crypto.cpp
@@ -20,46 +20,137 @@
#include "Crypto.h"
+#include <media/hardware/CryptoAPI.h>
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/hexdump.h>
#include <media/stagefright/MediaErrors.h>
+#include <dlfcn.h>
+
namespace android {
-Crypto::Crypto() {
+Crypto::Crypto()
+ : mInitCheck(NO_INIT),
+ mLibHandle(NULL),
+ mPlugin(NULL) {
+ mInitCheck = init();
}
Crypto::~Crypto() {
+ delete mPlugin;
+ mPlugin = NULL;
+
+ delete mFactory;
+ mFactory = NULL;
+
+ if (mLibHandle != NULL) {
+ dlclose(mLibHandle);
+ mLibHandle = NULL;
+ }
}
-status_t Crypto::initialize() {
- return ERROR_UNSUPPORTED;
+status_t Crypto::initCheck() const {
+ return mInitCheck;
}
-status_t Crypto::terminate() {
- return ERROR_UNSUPPORTED;
+status_t Crypto::init() {
+ mLibHandle = dlopen("libdrmdecrypt.so", RTLD_NOW);
+
+ if (mLibHandle == NULL) {
+ return ERROR_UNSUPPORTED;
+ }
+
+ typedef CryptoFactory *(*CreateCryptoFactoryFunc)();
+ CreateCryptoFactoryFunc createCryptoFactory =
+ (CreateCryptoFactoryFunc)dlsym(mLibHandle, "createCryptoFactory");
+
+ if (createCryptoFactory == NULL
+ || ((mFactory = createCryptoFactory()) == NULL)) {
+ dlclose(mLibHandle);
+ mLibHandle = NULL;
+
+ return ERROR_UNSUPPORTED;
+ }
+
+ return OK;
}
-status_t Crypto::setEntitlementKey(
- const void *key, size_t keyLength) {
- return ERROR_UNSUPPORTED;
+bool Crypto::isCryptoSchemeSupported(const uint8_t uuid[16]) const {
+ Mutex::Autolock autoLock(mLock);
+
+ if (mInitCheck != OK) {
+ return false;
+ }
+
+ return mFactory->isCryptoSchemeSupported(uuid);
}
-status_t Crypto::setEntitlementControlMessage(
- const void *msg, size_t msgLength) {
- return ERROR_UNSUPPORTED;
+status_t Crypto::createPlugin(
+ const uint8_t uuid[16], const void *data, size_t size) {
+ Mutex::Autolock autoLock(mLock);
+
+ if (mInitCheck != OK) {
+ return mInitCheck;
+ }
+
+ if (mPlugin != NULL) {
+ return -EINVAL;
+ }
+
+ return mFactory->createPlugin(uuid, data, size, &mPlugin);
}
-ssize_t Crypto::decryptVideo(
- const void *iv, size_t ivLength,
- const void *srcData, size_t srcDataSize,
- void *dstData, size_t dstDataOffset) {
- return ERROR_UNSUPPORTED;
+status_t Crypto::destroyPlugin() {
+ Mutex::Autolock autoLock(mLock);
+
+ if (mInitCheck != OK) {
+ return mInitCheck;
+ }
+
+ if (mPlugin == NULL) {
+ return -EINVAL;
+ }
+
+ delete mPlugin;
+ mPlugin = NULL;
+
+ return OK;
}
-ssize_t Crypto::decryptAudio(
- const void *iv, size_t ivLength,
- const void *srcData, size_t srcDataSize,
- void *dstData, size_t dstDataSize) {
- return ERROR_UNSUPPORTED;
+bool Crypto::requiresSecureDecoderComponent(const char *mime) const {
+ Mutex::Autolock autoLock(mLock);
+
+ if (mInitCheck != OK) {
+ return mInitCheck;
+ }
+
+ if (mPlugin == NULL) {
+ return -EINVAL;
+ }
+
+ return mPlugin->requiresSecureDecoderComponent(mime);
+}
+
+status_t Crypto::decrypt(
+ bool secure,
+ const uint8_t key[16],
+ const uint8_t iv[16],
+ CryptoPlugin::Mode mode,
+ const void *srcPtr,
+ const CryptoPlugin::SubSample *subSamples, size_t numSubSamples,
+ void *dstPtr) {
+ Mutex::Autolock autoLock(mLock);
+
+ if (mInitCheck != OK) {
+ return mInitCheck;
+ }
+
+ if (mPlugin == NULL) {
+ return -EINVAL;
+ }
+
+ return mPlugin->decrypt(
+ secure, key, iv, mode, srcPtr, subSamples, numSubSamples, dstPtr);
}
} // namespace android
diff --git a/media/libmediaplayerservice/Crypto.h b/media/libmediaplayerservice/Crypto.h
index 9855496..74de2b5 100644
--- a/media/libmediaplayerservice/Crypto.h
+++ b/media/libmediaplayerservice/Crypto.h
@@ -23,32 +23,44 @@
namespace android {
+struct CryptoFactory;
+struct CryptoPlugin;
+
struct Crypto : public BnCrypto {
Crypto();
-
- virtual status_t initialize();
- virtual status_t terminate();
-
- virtual status_t setEntitlementKey(
- const void *key, size_t keyLength);
-
- virtual status_t setEntitlementControlMessage(
- const void *msg, size_t msgLength);
-
- virtual ssize_t decryptVideo(
- const void *iv, size_t ivLength,
- const void *srcData, size_t srcDataSize,
- void *dstData, size_t dstDataOffset);
-
- virtual ssize_t decryptAudio(
- const void *iv, size_t ivLength,
- const void *srcData, size_t srcDataSize,
- void *dstData, size_t dstDataSize);
-
-protected:
virtual ~Crypto();
+ virtual status_t initCheck() const;
+
+ virtual bool isCryptoSchemeSupported(const uint8_t uuid[16]) const;
+
+ virtual status_t createPlugin(
+ const uint8_t uuid[16], const void *data, size_t size);
+
+ virtual status_t destroyPlugin();
+
+ virtual bool requiresSecureDecoderComponent(
+ const char *mime) const;
+
+ virtual status_t decrypt(
+ bool secure,
+ const uint8_t key[16],
+ const uint8_t iv[16],
+ CryptoPlugin::Mode mode,
+ const void *srcPtr,
+ const CryptoPlugin::SubSample *subSamples, size_t numSubSamples,
+ void *dstPtr);
+
private:
+ mutable Mutex mLock;
+
+ status_t mInitCheck;
+ void *mLibHandle;
+ CryptoFactory *mFactory;
+ CryptoPlugin *mPlugin;
+
+ status_t init();
+
DISALLOW_EVIL_CONSTRUCTORS(Crypto);
};
diff --git a/media/libmediaplayerservice/MediaPlayerService.cpp b/media/libmediaplayerservice/MediaPlayerService.cpp
index cc3138d..7c3fb0d 100644
--- a/media/libmediaplayerservice/MediaPlayerService.cpp
+++ b/media/libmediaplayerservice/MediaPlayerService.cpp
@@ -294,13 +294,7 @@
}
sp<ICrypto> MediaPlayerService::makeCrypto() {
- Mutex::Autolock autoLock(mLock);
-
- if (mCrypto == NULL) {
- mCrypto = new Crypto;
- }
-
- return mCrypto;
+ return new Crypto;
}
status_t MediaPlayerService::AudioCache::dump(int fd, const Vector<String16>& args) const
@@ -1417,6 +1411,7 @@
: mCallback(NULL),
mCallbackCookie(NULL),
mCallbackData(NULL),
+ mBytesWritten(0),
mSessionId(sessionId) {
ALOGV("AudioOutput(%d)", sessionId);
mTrack = 0;
@@ -1495,12 +1490,19 @@
return mMsecsPerFrame;
}
-status_t MediaPlayerService::AudioOutput::getPosition(uint32_t *position)
+status_t MediaPlayerService::AudioOutput::getPosition(uint32_t *position) const
{
if (mTrack == 0) return NO_INIT;
return mTrack->getPosition(position);
}
+status_t MediaPlayerService::AudioOutput::getFramesWritten(uint32_t *frameswritten) const
+{
+ if (mTrack == 0) return NO_INIT;
+ *frameswritten = mBytesWritten / frameSize();
+ return OK;
+}
+
status_t MediaPlayerService::AudioOutput::open(
uint32_t sampleRate, int channelCount, audio_channel_mask_t channelMask,
audio_format_t format, int bufferCount,
@@ -1656,6 +1658,7 @@
mTrack = NULL;
mNextOutput->mSampleRateHz = mSampleRateHz;
mNextOutput->mMsecsPerFrame = mMsecsPerFrame;
+ mNextOutput->mBytesWritten = mBytesWritten;
}
}
@@ -1666,6 +1669,7 @@
//ALOGV("write(%p, %u)", buffer, size);
if (mTrack) {
ssize_t ret = mTrack->write(buffer, size);
+ mBytesWritten += ret;
return ret;
}
return NO_INIT;
@@ -1777,7 +1781,7 @@
data->unlock();
}
-int MediaPlayerService::AudioOutput::getSessionId()
+int MediaPlayerService::AudioOutput::getSessionId() const
{
return mSessionId;
}
@@ -1802,13 +1806,20 @@
return mMsecsPerFrame;
}
-status_t MediaPlayerService::AudioCache::getPosition(uint32_t *position)
+status_t MediaPlayerService::AudioCache::getPosition(uint32_t *position) const
{
if (position == 0) return BAD_VALUE;
*position = mSize;
return NO_ERROR;
}
+status_t MediaPlayerService::AudioCache::getFramesWritten(uint32_t *written) const
+{
+ if (written == 0) return BAD_VALUE;
+ *written = mSize;
+ return NO_ERROR;
+}
+
////////////////////////////////////////////////////////////////////////////////
struct CallbackThread : public Thread {
@@ -1971,7 +1982,7 @@
p->mSignal.signal();
}
-int MediaPlayerService::AudioCache::getSessionId()
+int MediaPlayerService::AudioCache::getSessionId() const
{
return 0;
}
diff --git a/media/libmediaplayerservice/MediaPlayerService.h b/media/libmediaplayerservice/MediaPlayerService.h
index b08dd6c..2a8cfd2 100644
--- a/media/libmediaplayerservice/MediaPlayerService.h
+++ b/media/libmediaplayerservice/MediaPlayerService.h
@@ -84,8 +84,9 @@
virtual ssize_t frameSize() const;
virtual uint32_t latency() const;
virtual float msecsPerFrame() const;
- virtual status_t getPosition(uint32_t *position);
- virtual int getSessionId();
+ virtual status_t getPosition(uint32_t *position) const;
+ virtual status_t getFramesWritten(uint32_t *frameswritten) const;
+ virtual int getSessionId() const;
virtual status_t open(
uint32_t sampleRate, int channelCount, audio_channel_mask_t channelMask,
@@ -122,6 +123,7 @@
AudioCallback mCallback;
void * mCallbackCookie;
CallbackData * mCallbackData;
+ uint64_t mBytesWritten;
audio_stream_type_t mStreamType;
float mLeftVolume;
float mRightVolume;
@@ -181,8 +183,9 @@
virtual ssize_t frameSize() const { return ssize_t(mChannelCount * ((mFormat == AUDIO_FORMAT_PCM_16_BIT)?sizeof(int16_t):sizeof(u_int8_t))); }
virtual uint32_t latency() const;
virtual float msecsPerFrame() const;
- virtual status_t getPosition(uint32_t *position);
- virtual int getSessionId();
+ virtual status_t getPosition(uint32_t *position) const;
+ virtual status_t getFramesWritten(uint32_t *frameswritten) const;
+ virtual int getSessionId() const;
virtual status_t open(
uint32_t sampleRate, int channelCount, audio_channel_mask_t channelMask,
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayer.cpp b/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
index 544d501..11cea3b 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
@@ -39,6 +39,7 @@
#include <media/stagefright/MediaDefs.h>
#include <media/stagefright/MediaErrors.h>
#include <media/stagefright/MetaData.h>
+#include <media/stagefright/SkipCutBuffer.h>
#include <gui/ISurfaceTexture.h>
#include "avc_utils.h"
@@ -63,10 +64,13 @@
mSkipRenderingVideoUntilMediaTimeUs(-1ll),
mVideoLateByUs(0ll),
mNumFramesTotal(0ll),
- mNumFramesDropped(0ll) {
+ mNumFramesDropped(0ll),
+ mSkipCutBuffer(NULL) {
}
NuPlayer::~NuPlayer() {
+ delete mSkipCutBuffer;
+ mSkipCutBuffer = NULL;
}
void NuPlayer::setUID(uid_t uid) {
@@ -234,6 +238,32 @@
mSource->start();
+ sp<MetaData> meta = mSource->getFormat(true /* audio */);
+ if (meta != NULL) {
+ int32_t delay = 0;
+ if (!meta->findInt32(kKeyEncoderDelay, &delay)) {
+ delay = 0;
+ }
+ int32_t padding = 0;
+ if (!meta->findInt32(kKeyEncoderPadding, &padding)) {
+ padding = 0;
+ }
+ int32_t numchannels = 0;
+ if (delay + padding) {
+ if (meta->findInt32(kKeyChannelCount, &numchannels)) {
+ size_t frameSize = numchannels * sizeof(int16_t);
+ if (mSkipCutBuffer) {
+ size_t prevbuffersize = mSkipCutBuffer->size();
+ if (prevbuffersize != 0) {
+ ALOGW("Replacing SkipCutBuffer holding %d bytes", prevbuffersize);
+ }
+ delete mSkipCutBuffer;
+ }
+ mSkipCutBuffer = new SkipCutBuffer(delay * frameSize, padding * frameSize);
+ }
+ }
+ }
+
mRenderer = new Renderer(
mAudioSink,
new AMessage(kWhatRendererNotify, id()));
@@ -844,6 +874,10 @@
skipUntilMediaTimeUs = -1;
}
+ if (audio && mSkipCutBuffer) {
+ mSkipCutBuffer->submit(buffer);
+ }
+
mRenderer->queueBuffer(audio, buffer, reply);
}
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayer.h b/media/libmediaplayerservice/nuplayer/NuPlayer.h
index 25766e0..f917f64 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayer.h
+++ b/media/libmediaplayerservice/nuplayer/NuPlayer.h
@@ -27,6 +27,7 @@
struct ACodec;
struct MetaData;
struct NuPlayerDriver;
+class SkipCutBuffer;
struct NuPlayer : public AHandler {
NuPlayer();
@@ -128,6 +129,8 @@
int64_t mVideoLateByUs;
int64_t mNumFramesTotal, mNumFramesDropped;
+ SkipCutBuffer *mSkipCutBuffer;
+
status_t instantiateDecoder(bool audio, sp<Decoder> *decoder);
status_t feedDecoderInputData(bool audio, const sp<AMessage> &msg);
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
index ecbc428..1f13955 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
@@ -591,6 +591,10 @@
void NuPlayer::Renderer::onAudioSinkChanged() {
CHECK(!mDrainAudioQueuePending);
mNumFramesWritten = 0;
+ uint32_t written;
+ if (mAudioSink->getFramesWritten(&written) == OK) {
+ mNumFramesWritten = written;
+ }
}
void NuPlayer::Renderer::notifyPosition() {
diff --git a/media/libstagefright/ACodec.cpp b/media/libstagefright/ACodec.cpp
index db2beda..e6e0413 100644
--- a/media/libstagefright/ACodec.cpp
+++ b/media/libstagefright/ACodec.cpp
@@ -768,6 +768,8 @@
"video_decoder.h263", "video_encoder.h263" },
{ MEDIA_MIMETYPE_VIDEO_VPX,
"video_decoder.vpx", "video_encoder.vpx" },
+ { MEDIA_MIMETYPE_AUDIO_RAW,
+ "audio_decoder.raw", "audio_encoder.raw" },
};
static const size_t kNumMimeToRole =
@@ -873,6 +875,15 @@
} else {
err = setupG711Codec(encoder, numChannels);
}
+ } else if (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_RAW)) {
+ int32_t numChannels, sampleRate;
+ if (encoder
+ || !msg->findInt32("channel-count", &numChannels)
+ || !msg->findInt32("sample-rate", &sampleRate)) {
+ err = INVALID_OPERATION;
+ } else {
+ err = setupRawAudioFormat(kPortIndexInput, sampleRate, numChannels);
+ }
}
int32_t maxInputSize;
diff --git a/media/libstagefright/MediaCodec.cpp b/media/libstagefright/MediaCodec.cpp
index 42b5c7e..ced8368 100644
--- a/media/libstagefright/MediaCodec.cpp
+++ b/media/libstagefright/MediaCodec.cpp
@@ -22,10 +22,8 @@
#include "include/SoftwareRenderer.h"
-#include <binder/IServiceManager.h>
#include <gui/SurfaceTextureClient.h>
#include <media/ICrypto.h>
-#include <media/IMediaPlayerService.h>
#include <media/stagefright/foundation/ABuffer.h>
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/foundation/AMessage.h>
@@ -134,6 +132,7 @@
status_t MediaCodec::configure(
const sp<AMessage> &format,
const sp<SurfaceTextureClient> &nativeWindow,
+ const sp<ICrypto> &crypto,
uint32_t flags) {
sp<AMessage> msg = new AMessage(kWhatConfigure, id());
@@ -141,13 +140,13 @@
msg->setInt32("flags", flags);
if (nativeWindow != NULL) {
- if (!(mFlags & kFlagIsSoftwareCodec)) {
- msg->setObject(
- "native-window",
- new NativeWindowWrapper(nativeWindow));
- } else {
- mNativeWindow = nativeWindow;
- }
+ msg->setObject(
+ "native-window",
+ new NativeWindowWrapper(nativeWindow));
+ }
+
+ if (crypto != NULL) {
+ msg->setPointer("crypto", crypto.get());
}
sp<AMessage> response;
@@ -490,6 +489,12 @@
mFlags &= ~kFlagIsSoftwareCodec;
}
+ if (componentName.endsWith(".secure")) {
+ mFlags |= kFlagIsSecure;
+ } else {
+ mFlags &= ~kFlagIsSecure;
+ }
+
(new AMessage)->postReply(mReplyID);
break;
}
@@ -532,8 +537,7 @@
info.mOwnedByClient = false;
CHECK(msg->findBuffer(name.c_str(), &info.mData));
- if (portIndex == kPortIndexInput
- && (mFlags & kFlagIsSecure)) {
+ if (portIndex == kPortIndexInput && mCrypto != NULL) {
info.mEncryptedData =
new ABuffer(info.mData->capacity());
}
@@ -743,8 +747,23 @@
if (obj != NULL) {
format->setObject("native-window", obj);
+
+ if (mFlags & kFlagIsSoftwareCodec) {
+ mNativeWindow =
+ static_cast<NativeWindowWrapper *>(obj.get())
+ ->getSurfaceTextureClient();
+ }
+ } else {
+ mNativeWindow.clear();
}
+ void *crypto;
+ if (!msg->findPointer("crypto", &crypto)) {
+ crypto = NULL;
+ }
+
+ mCrypto = static_cast<ICrypto *>(crypto);
+
uint32_t flags;
CHECK(msg->findInt32("flags", (int32_t *)&flags));
@@ -752,59 +771,6 @@
format->setInt32("encoder", true);
}
- if (flags & CONFIGURE_FLAG_SECURE) {
- mFlags |= kFlagIsSecure;
-
- sp<IServiceManager> sm = defaultServiceManager();
-
- sp<IBinder> binder =
- sm->getService(String16("media.player"));
-
- sp<IMediaPlayerService> service =
- interface_cast<IMediaPlayerService>(binder);
-
- CHECK(service != NULL);
-
- mCrypto = service->makeCrypto();
-
- status_t err = mCrypto->initialize();
-
- if (err == OK) {
- sp<ABuffer> emm;
- if (format->findBuffer("emm", &emm)) {
- err = mCrypto->setEntitlementKey(
- emm->data(), emm->size());
- }
- }
-
- if (err == OK) {
- sp<ABuffer> ecm;
- if (format->findBuffer("ecm", &ecm)) {
- CHECK_EQ(ecm->size(), 80u);
-
- // bytes 16..47 of the original ecm stream data.
- err = mCrypto->setEntitlementControlMessage(
- ecm->data() + 16, 32);
- }
- }
-
- if (err != OK) {
- ALOGE("failed to instantiate crypto service.");
-
- mCrypto.clear();
-
- setState(INITIALIZED);
-
- sp<AMessage> response = new AMessage;
- response->setInt32("err", UNKNOWN_ERROR);
-
- response->postReply(mReplyID);
- break;
- }
- } else {
- mFlags &= ~kFlagIsSecure;
- }
-
mCodec->initiateConfigureComponent(format);
break;
}
@@ -1047,8 +1013,7 @@
const BufferInfo &info = srcBuffers.itemAt(i);
dstBuffers->push_back(
- (portIndex == kPortIndexInput
- && (mFlags & kFlagIsSecure))
+ (portIndex == kPortIndexInput && mCrypto != NULL)
? info.mEncryptedData : info.mData);
}
@@ -1107,11 +1072,7 @@
delete mSoftRenderer;
mSoftRenderer = NULL;
- if (mCrypto != NULL) {
- mCrypto->terminate();
- mCrypto.clear();
- }
-
+ mCrypto.clear();
mNativeWindow.clear();
mOutputFormat.clear();
@@ -1221,39 +1182,41 @@
info->mData->meta()->setInt32("csd", true);
}
- if (mFlags & kFlagIsSecure) {
- uint8_t iv[16];
- memset(iv, 0, sizeof(iv));
-
- ssize_t outLength;
-
- if (mFlags & kFlagIsSoftwareCodec) {
- outLength = mCrypto->decryptAudio(
- (flags & BUFFER_FLAG_ENCRYPTED) ? iv : NULL,
- (flags & BUFFER_FLAG_ENCRYPTED) ? sizeof(iv) : 0,
- info->mEncryptedData->base() + offset,
- size,
- info->mData->base(),
- info->mData->capacity());
- } else {
- outLength = mCrypto->decryptVideo(
- (flags & BUFFER_FLAG_ENCRYPTED) ? iv : NULL,
- (flags & BUFFER_FLAG_ENCRYPTED) ? sizeof(iv) : 0,
- info->mEncryptedData->base() + offset,
- size,
- info->mData->base(),
- 0 /* offset */);
- }
-
- if (outLength < 0) {
- return outLength;
- }
-
- if ((size_t)outLength > info->mEncryptedData->capacity()) {
+ if (mCrypto != NULL) {
+ if (size > info->mEncryptedData->capacity()) {
return -ERANGE;
}
- info->mData->setRange(0, outLength);
+ uint8_t key[16];
+ uint8_t iv[16];
+
+ CryptoPlugin::Mode mode;
+ CryptoPlugin::SubSample ss;
+ if (flags & BUFFER_FLAG_ENCRYPTED) {
+ mode = CryptoPlugin::kMode_AES_WV;
+ ss.mNumBytesOfClearData = 0;
+ ss.mNumBytesOfEncryptedData = size;
+ } else {
+ mode = CryptoPlugin::kMode_Unencrypted;
+ ss.mNumBytesOfClearData = size;
+ ss.mNumBytesOfEncryptedData = 0;
+ }
+
+ status_t err = mCrypto->decrypt(
+ (mFlags & kFlagIsSecure) != 0,
+ key,
+ iv,
+ mode,
+ info->mEncryptedData->base() + offset,
+ &ss,
+ 1 /* numSubSamples */,
+ info->mData->base());
+
+ if (err != OK) {
+ return err;
+ }
+
+ info->mData->setRange(0, size);
} else if (flags & BUFFER_FLAG_ENCRYPTED) {
return -EINVAL;
}
diff --git a/media/libstagefright/MediaCodecList.cpp b/media/libstagefright/MediaCodecList.cpp
index 6b64e21..c39aa77 100644
--- a/media/libstagefright/MediaCodecList.cpp
+++ b/media/libstagefright/MediaCodecList.cpp
@@ -22,6 +22,8 @@
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/MediaErrors.h>
+#include <media/stagefright/OMXClient.h>
+#include <media/stagefright/OMXCodec.h>
#include <utils/threads.h>
#include <expat.h>
@@ -64,6 +66,9 @@
addMediaCodec(true /* encoder */, "M4vH263Encoder");
addType("video/3gpp");
addType("video/mp4v-es");
+
+ addMediaCodec(
+ false /* encoder */, "OMX.google.raw.decoder", "audio/raw");
}
#if 0
@@ -445,6 +450,10 @@
return -ENOENT;
}
+size_t MediaCodecList::countCodecs() const {
+ return mCodecInfos.size();
+}
+
const char *MediaCodecList::getCodecName(size_t index) const {
if (index >= mCodecInfos.size()) {
return NULL;
@@ -454,6 +463,15 @@
return info.mName.c_str();
}
+bool MediaCodecList::isEncoder(size_t index) const {
+ if (index >= mCodecInfos.size()) {
+ return NULL;
+ }
+
+ const CodecInfo &info = mCodecInfos.itemAt(index);
+ return info.mIsEncoder;
+}
+
bool MediaCodecList::codecHasQuirk(
size_t index, const char *quirkName) const {
if (index >= mCodecInfos.size()) {
@@ -472,4 +490,69 @@
return false;
}
+status_t MediaCodecList::getSupportedTypes(
+ size_t index, Vector<AString> *types) const {
+ types->clear();
+
+ if (index >= mCodecInfos.size()) {
+ return -ERANGE;
+ }
+
+ const CodecInfo &info = mCodecInfos.itemAt(index);
+
+ for (size_t i = 0; i < mTypes.size(); ++i) {
+ uint32_t typeMask = 1ul << mTypes.valueAt(i);
+
+ if (info.mTypes & typeMask) {
+ types->push(mTypes.keyAt(i));
+ }
+ }
+
+ return OK;
+}
+
+status_t MediaCodecList::getCodecCapabilities(
+ size_t index, const char *type,
+ Vector<ProfileLevel> *profileLevels,
+ Vector<uint32_t> *colorFormats) const {
+ profileLevels->clear();
+ colorFormats->clear();
+
+ if (index >= mCodecInfos.size()) {
+ return -ERANGE;
+ }
+
+ const CodecInfo &info = mCodecInfos.itemAt(index);
+
+ OMXClient client;
+ status_t err = client.connect();
+ if (err != OK) {
+ return err;
+ }
+
+ CodecCapabilities caps;
+ err = QueryCodec(
+ client.interface(),
+ info.mName.c_str(), type, info.mIsEncoder, &caps);
+
+ if (err != OK) {
+ return err;
+ }
+
+ for (size_t i = 0; i < caps.mProfileLevels.size(); ++i) {
+ const CodecProfileLevel &src = caps.mProfileLevels.itemAt(i);
+
+ ProfileLevel profileLevel;
+ profileLevel.mProfile = src.mProfile;
+ profileLevel.mLevel = src.mLevel;
+ profileLevels->push(profileLevel);
+ }
+
+ for (size_t i = 0; i < caps.mColorFormats.size(); ++i) {
+ colorFormats->push(caps.mColorFormats.itemAt(i));
+ }
+
+ return OK;
+}
+
} // namespace android
diff --git a/media/libstagefright/OMXCodec.cpp b/media/libstagefright/OMXCodec.cpp
index 9769f21..1c4b47e 100755
--- a/media/libstagefright/OMXCodec.cpp
+++ b/media/libstagefright/OMXCodec.cpp
@@ -534,6 +534,14 @@
CHECK(meta->findInt32(kKeyChannelCount, &numChannels));
setG711Format(numChannels);
+ } else if (!strcasecmp(MEDIA_MIMETYPE_AUDIO_RAW, mMIME)) {
+ CHECK(!mIsEncoder);
+
+ int32_t numChannels, sampleRate;
+ CHECK(meta->findInt32(kKeyChannelCount, &numChannels));
+ CHECK(meta->findInt32(kKeySampleRate, &sampleRate));
+
+ setRawAudioFormat(kPortIndexInput, sampleRate, numChannels);
}
if (!strncasecmp(mMIME, "video/", 6)) {
@@ -1359,6 +1367,8 @@
"video_decoder.h263", "video_encoder.h263" },
{ MEDIA_MIMETYPE_VIDEO_VPX,
"video_decoder.vpx", "video_encoder.vpx" },
+ { MEDIA_MIMETYPE_AUDIO_RAW,
+ "audio_decoder.raw", "audio_encoder.raw" },
};
static const size_t kNumMimeToRole =
@@ -1605,8 +1615,7 @@
}
delete mSkipCutBuffer;
}
- mSkipCutBuffer = new SkipCutBuffer(delay * frameSize, padding * frameSize,
- def.nBufferSize);
+ mSkipCutBuffer = new SkipCutBuffer(delay * frameSize, padding * frameSize);
}
}
}
@@ -4500,70 +4509,84 @@
for (size_t c = 0; c < matchingCodecs.size(); c++) {
const char *componentName = matchingCodecs.itemAt(c).string();
- if (strncmp(componentName, "OMX.", 4)) {
- // Not an OpenMax component but a software codec.
-
- results->push();
- CodecCapabilities *caps = &results->editItemAt(results->size() - 1);
- caps->mComponentName = componentName;
- continue;
- }
-
- sp<OMXCodecObserver> observer = new OMXCodecObserver;
- IOMX::node_id node;
- status_t err = omx->allocateNode(componentName, observer, &node);
-
- if (err != OK) {
- continue;
- }
-
- OMXCodec::setComponentRole(omx, node, !queryDecoders, mime);
-
results->push();
CodecCapabilities *caps = &results->editItemAt(results->size() - 1);
- caps->mComponentName = componentName;
- OMX_VIDEO_PARAM_PROFILELEVELTYPE param;
- InitOMXParams(¶m);
+ status_t err =
+ QueryCodec(omx, componentName, mime, !queryDecoders, caps);
- param.nPortIndex = queryDecoders ? 0 : 1;
-
- for (param.nProfileIndex = 0;; ++param.nProfileIndex) {
- err = omx->getParameter(
- node, OMX_IndexParamVideoProfileLevelQuerySupported,
- ¶m, sizeof(param));
-
- if (err != OK) {
- break;
- }
-
- CodecProfileLevel profileLevel;
- profileLevel.mProfile = param.eProfile;
- profileLevel.mLevel = param.eLevel;
-
- caps->mProfileLevels.push(profileLevel);
+ if (err != OK) {
+ results->removeAt(results->size() - 1);
}
-
- // Color format query
- OMX_VIDEO_PARAM_PORTFORMATTYPE portFormat;
- InitOMXParams(&portFormat);
- portFormat.nPortIndex = queryDecoders ? 1 : 0;
- for (portFormat.nIndex = 0;; ++portFormat.nIndex) {
- err = omx->getParameter(
- node, OMX_IndexParamVideoPortFormat,
- &portFormat, sizeof(portFormat));
- if (err != OK) {
- break;
- }
- caps->mColorFormats.push(portFormat.eColorFormat);
- }
-
- CHECK_EQ(omx->freeNode(node), (status_t)OK);
}
return OK;
}
+status_t QueryCodec(
+ const sp<IOMX> &omx,
+ const char *componentName, const char *mime,
+ bool isEncoder,
+ CodecCapabilities *caps) {
+ if (strncmp(componentName, "OMX.", 4)) {
+ // Not an OpenMax component but a software codec.
+
+ caps->mComponentName = componentName;
+ return OK;
+ }
+
+ sp<OMXCodecObserver> observer = new OMXCodecObserver;
+ IOMX::node_id node;
+ status_t err = omx->allocateNode(componentName, observer, &node);
+
+ if (err != OK) {
+ return err;
+ }
+
+ OMXCodec::setComponentRole(omx, node, isEncoder, mime);
+
+ caps->mComponentName = componentName;
+
+ OMX_VIDEO_PARAM_PROFILELEVELTYPE param;
+ InitOMXParams(¶m);
+
+ param.nPortIndex = !isEncoder ? 0 : 1;
+
+ for (param.nProfileIndex = 0;; ++param.nProfileIndex) {
+ err = omx->getParameter(
+ node, OMX_IndexParamVideoProfileLevelQuerySupported,
+ ¶m, sizeof(param));
+
+ if (err != OK) {
+ break;
+ }
+
+ CodecProfileLevel profileLevel;
+ profileLevel.mProfile = param.eProfile;
+ profileLevel.mLevel = param.eLevel;
+
+ caps->mProfileLevels.push(profileLevel);
+ }
+
+ // Color format query
+ OMX_VIDEO_PARAM_PORTFORMATTYPE portFormat;
+ InitOMXParams(&portFormat);
+ portFormat.nPortIndex = !isEncoder ? 1 : 0;
+ for (portFormat.nIndex = 0;; ++portFormat.nIndex) {
+ err = omx->getParameter(
+ node, OMX_IndexParamVideoPortFormat,
+ &portFormat, sizeof(portFormat));
+ if (err != OK) {
+ break;
+ }
+ caps->mColorFormats.push(portFormat.eColorFormat);
+ }
+
+ CHECK_EQ(omx->freeNode(node), (status_t)OK);
+
+ return OK;
+}
+
status_t QueryCodecs(
const sp<IOMX> &omx,
const char *mimeType, bool queryDecoders,
diff --git a/media/libstagefright/SkipCutBuffer.cpp b/media/libstagefright/SkipCutBuffer.cpp
index 6d331b0..773854f 100755
--- a/media/libstagefright/SkipCutBuffer.cpp
+++ b/media/libstagefright/SkipCutBuffer.cpp
@@ -24,12 +24,12 @@
namespace android {
-SkipCutBuffer::SkipCutBuffer(int32_t skip, int32_t cut, int32_t output_size) {
+SkipCutBuffer::SkipCutBuffer(int32_t skip, int32_t cut) {
mFrontPadding = skip;
mBackPadding = cut;
mWriteHead = 0;
mReadHead = 0;
- mCapacity = cut + output_size;
+ mCapacity = cut + 4096;
mCutBuffer = new char[mCapacity];
ALOGV("skipcutbuffer %d %d %d", skip, cut, mCapacity);
}
@@ -65,6 +65,33 @@
buffer->set_range(0, copied);
}
+void SkipCutBuffer::submit(const sp<ABuffer>& buffer) {
+ int32_t offset = buffer->offset();
+ int32_t buflen = buffer->size();
+
+ // drop the initial data from the buffer if needed
+ if (mFrontPadding > 0) {
+ // still data left to drop
+ int32_t to_drop = (buflen < mFrontPadding) ? buflen : mFrontPadding;
+ offset += to_drop;
+ buflen -= to_drop;
+ buffer->setRange(offset, buflen);
+ mFrontPadding -= to_drop;
+ }
+
+
+ // append data to cutbuffer
+ char *src = (char*) buffer->data();
+ write(src, buflen);
+
+
+ // the mediabuffer is now empty. Fill it from cutbuffer, always leaving
+ // at least mBackPadding bytes in the cutbuffer
+ char *dst = (char*) buffer->base();
+ size_t copied = read(dst, buffer->capacity());
+ buffer->setRange(0, copied);
+}
+
void SkipCutBuffer::clear() {
mWriteHead = mReadHead = 0;
}
@@ -73,8 +100,19 @@
int32_t sizeused = (mWriteHead - mReadHead);
if (sizeused < 0) sizeused += mCapacity;
- // everything must fit
- CHECK_GE((mCapacity - size_t(sizeused)), num);
+ // Everything must fit. Make sure the buffer is a little larger than needed,
+ // so there is no ambiguity as to whether mWriteHead == mReadHead means buffer
+ // full or empty
+ size_t available = mCapacity - sizeused - 32;
+ if (available < num) {
+ int32_t newcapacity = mCapacity + (num - available);
+ char * newbuffer = new char[newcapacity];
+ memcpy(newbuffer, mCutBuffer, mCapacity);
+ delete [] mCutBuffer;
+ mCapacity = newcapacity;
+ mCutBuffer = newbuffer;
+ ALOGV("reallocated buffer at size %d", newcapacity);
+ }
size_t copyfirst = (mCapacity - mWriteHead);
if (copyfirst > num) copyfirst = num;
@@ -100,7 +138,7 @@
if (available <=0) {
return 0;
}
- if (available < num) {
+ if (available < int32_t(num)) {
num = available;
}
diff --git a/media/libstagefright/SurfaceMediaSource.cpp b/media/libstagefright/SurfaceMediaSource.cpp
index 7481e2e..1345cd9 100644
--- a/media/libstagefright/SurfaceMediaSource.cpp
+++ b/media/libstagefright/SurfaceMediaSource.cpp
@@ -460,6 +460,7 @@
}
status_t SurfaceMediaSource::queueBuffer(int bufIndex, int64_t timestamp,
+ const Rect& crop, int scalingMode, uint32_t transform,
uint32_t* outWidth, uint32_t* outHeight, uint32_t* outTransform) {
ALOGV("queueBuffer");
diff --git a/media/libstagefright/codecs/raw/Android.mk b/media/libstagefright/codecs/raw/Android.mk
new file mode 100644
index 0000000..285c747
--- /dev/null
+++ b/media/libstagefright/codecs/raw/Android.mk
@@ -0,0 +1,17 @@
+LOCAL_PATH:= $(call my-dir)
+include $(CLEAR_VARS)
+
+LOCAL_SRC_FILES := \
+ SoftRaw.cpp
+
+LOCAL_C_INCLUDES := \
+ frameworks/av/media/libstagefright/include \
+ frameworks/native/include/media/openmax
+
+LOCAL_SHARED_LIBRARIES := \
+ libstagefright_omx libstagefright_foundation libutils
+
+LOCAL_MODULE := libstagefright_soft_rawdec
+LOCAL_MODULE_TAGS := optional
+
+include $(BUILD_SHARED_LIBRARY)
diff --git a/media/libstagefright/codecs/raw/SoftRaw.cpp b/media/libstagefright/codecs/raw/SoftRaw.cpp
new file mode 100644
index 0000000..19d6f13
--- /dev/null
+++ b/media/libstagefright/codecs/raw/SoftRaw.cpp
@@ -0,0 +1,213 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "SoftRaw"
+#include <utils/Log.h>
+
+#include "SoftRaw.h"
+
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/hexdump.h>
+
+namespace android {
+
+template<class T>
+static void InitOMXParams(T *params) {
+ params->nSize = sizeof(T);
+ params->nVersion.s.nVersionMajor = 1;
+ params->nVersion.s.nVersionMinor = 0;
+ params->nVersion.s.nRevision = 0;
+ params->nVersion.s.nStep = 0;
+}
+
+SoftRaw::SoftRaw(
+ const char *name,
+ const OMX_CALLBACKTYPE *callbacks,
+ OMX_PTR appData,
+ OMX_COMPONENTTYPE **component)
+ : SimpleSoftOMXComponent(name, callbacks, appData, component),
+ mSignalledError(false),
+ mChannelCount(2),
+ mSampleRate(44100) {
+ initPorts();
+ CHECK_EQ(initDecoder(), (status_t)OK);
+}
+
+SoftRaw::~SoftRaw() {
+}
+
+void SoftRaw::initPorts() {
+ OMX_PARAM_PORTDEFINITIONTYPE def;
+ InitOMXParams(&def);
+
+ def.nPortIndex = 0;
+ def.eDir = OMX_DirInput;
+ def.nBufferCountMin = kNumBuffers;
+ def.nBufferCountActual = def.nBufferCountMin;
+ def.nBufferSize = 32 * 1024;
+ def.bEnabled = OMX_TRUE;
+ def.bPopulated = OMX_FALSE;
+ def.eDomain = OMX_PortDomainAudio;
+ def.bBuffersContiguous = OMX_FALSE;
+ def.nBufferAlignment = 1;
+
+ def.format.audio.cMIMEType = const_cast<char *>("audio/raw");
+ def.format.audio.pNativeRender = NULL;
+ def.format.audio.bFlagErrorConcealment = OMX_FALSE;
+ def.format.audio.eEncoding = OMX_AUDIO_CodingPCM;
+
+ addPort(def);
+
+ def.nPortIndex = 1;
+ def.eDir = OMX_DirOutput;
+ def.nBufferCountMin = kNumBuffers;
+ def.nBufferCountActual = def.nBufferCountMin;
+ def.nBufferSize = 32 * 1024;
+ def.bEnabled = OMX_TRUE;
+ def.bPopulated = OMX_FALSE;
+ def.eDomain = OMX_PortDomainAudio;
+ def.bBuffersContiguous = OMX_FALSE;
+ def.nBufferAlignment = 2;
+
+ def.format.audio.cMIMEType = const_cast<char *>("audio/raw");
+ def.format.audio.pNativeRender = NULL;
+ def.format.audio.bFlagErrorConcealment = OMX_FALSE;
+ def.format.audio.eEncoding = OMX_AUDIO_CodingPCM;
+
+ addPort(def);
+}
+
+status_t SoftRaw::initDecoder() {
+ return OK;
+}
+
+OMX_ERRORTYPE SoftRaw::internalGetParameter(
+ OMX_INDEXTYPE index, OMX_PTR params) {
+ switch (index) {
+ case OMX_IndexParamAudioPcm:
+ {
+ OMX_AUDIO_PARAM_PCMMODETYPE *pcmParams =
+ (OMX_AUDIO_PARAM_PCMMODETYPE *)params;
+
+ if (pcmParams->nPortIndex != 0 && pcmParams->nPortIndex != 1) {
+ return OMX_ErrorUndefined;
+ }
+
+ pcmParams->eNumData = OMX_NumericalDataSigned;
+ pcmParams->eEndian = OMX_EndianBig;
+ pcmParams->bInterleaved = OMX_TRUE;
+ pcmParams->nBitPerSample = 16;
+ pcmParams->ePCMMode = OMX_AUDIO_PCMModeLinear;
+ pcmParams->eChannelMapping[0] = OMX_AUDIO_ChannelLF;
+ pcmParams->eChannelMapping[1] = OMX_AUDIO_ChannelRF;
+
+ pcmParams->nChannels = mChannelCount;
+ pcmParams->nSamplingRate = mSampleRate;
+
+ return OMX_ErrorNone;
+ }
+
+ default:
+ return SimpleSoftOMXComponent::internalGetParameter(index, params);
+ }
+}
+
+OMX_ERRORTYPE SoftRaw::internalSetParameter(
+ OMX_INDEXTYPE index, const OMX_PTR params) {
+ switch (index) {
+ case OMX_IndexParamStandardComponentRole:
+ {
+ const OMX_PARAM_COMPONENTROLETYPE *roleParams =
+ (const OMX_PARAM_COMPONENTROLETYPE *)params;
+
+ if (strncmp((const char *)roleParams->cRole,
+ "audio_decoder.raw",
+ OMX_MAX_STRINGNAME_SIZE - 1)) {
+ return OMX_ErrorUndefined;
+ }
+
+ return OMX_ErrorNone;
+ }
+
+ case OMX_IndexParamAudioPcm:
+ {
+ const OMX_AUDIO_PARAM_PCMMODETYPE *pcmParams =
+ (OMX_AUDIO_PARAM_PCMMODETYPE *)params;
+
+ if (pcmParams->nPortIndex != 0) {
+ return OMX_ErrorUndefined;
+ }
+
+ mChannelCount = pcmParams->nChannels;
+ mSampleRate = pcmParams->nSamplingRate;
+
+ return OMX_ErrorNone;
+ }
+
+ default:
+ return SimpleSoftOMXComponent::internalSetParameter(index, params);
+ }
+}
+
+void SoftRaw::onQueueFilled(OMX_U32 portIndex) {
+ if (mSignalledError) {
+ return;
+ }
+
+ List<BufferInfo *> &inQueue = getPortQueue(0);
+ List<BufferInfo *> &outQueue = getPortQueue(1);
+
+ while (!inQueue.empty() && !outQueue.empty()) {
+ BufferInfo *inInfo = *inQueue.begin();
+ OMX_BUFFERHEADERTYPE *inHeader = inInfo->mHeader;
+
+ BufferInfo *outInfo = *outQueue.begin();
+ OMX_BUFFERHEADERTYPE *outHeader = outInfo->mHeader;
+
+ CHECK_GE(outHeader->nAllocLen, inHeader->nFilledLen);
+ memcpy(outHeader->pBuffer,
+ inHeader->pBuffer + inHeader->nOffset,
+ inHeader->nFilledLen);
+
+ outHeader->nFlags = inHeader->nFlags;
+ outHeader->nOffset = 0;
+ outHeader->nFilledLen = inHeader->nFilledLen;
+ outHeader->nTimeStamp = inHeader->nTimeStamp;
+
+ bool sawEOS = (inHeader->nFlags & OMX_BUFFERFLAG_EOS) != 0;
+
+ inQueue.erase(inQueue.begin());
+ inInfo->mOwnedByUs = false;
+ notifyEmptyBufferDone(inHeader);
+
+ outQueue.erase(outQueue.begin());
+ outInfo->mOwnedByUs = false;
+ notifyFillBufferDone(outHeader);
+
+ if (sawEOS) {
+ break;
+ }
+ }
+}
+
+} // namespace android
+
+android::SoftOMXComponent *createSoftOMXComponent(
+ const char *name, const OMX_CALLBACKTYPE *callbacks,
+ OMX_PTR appData, OMX_COMPONENTTYPE **component) {
+ return new android::SoftRaw(name, callbacks, appData, component);
+}
diff --git a/media/libstagefright/codecs/raw/SoftRaw.h b/media/libstagefright/codecs/raw/SoftRaw.h
new file mode 100644
index 0000000..015c4a3
--- /dev/null
+++ b/media/libstagefright/codecs/raw/SoftRaw.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef SOFT_RAW_H_
+
+#define SOFT_RAW_H_
+
+#include "SimpleSoftOMXComponent.h"
+
+struct tPVMP4AudioDecoderExternal;
+
+namespace android {
+
+struct SoftRaw : public SimpleSoftOMXComponent {
+ SoftRaw(const char *name,
+ const OMX_CALLBACKTYPE *callbacks,
+ OMX_PTR appData,
+ OMX_COMPONENTTYPE **component);
+
+protected:
+ virtual ~SoftRaw();
+
+ virtual OMX_ERRORTYPE internalGetParameter(
+ OMX_INDEXTYPE index, OMX_PTR params);
+
+ virtual OMX_ERRORTYPE internalSetParameter(
+ OMX_INDEXTYPE index, const OMX_PTR params);
+
+ virtual void onQueueFilled(OMX_U32 portIndex);
+
+private:
+ enum {
+ kNumBuffers = 4
+ };
+
+ bool mSignalledError;
+
+ int32_t mChannelCount;
+ int32_t mSampleRate;
+
+ void initPorts();
+ status_t initDecoder();
+
+ DISALLOW_EVIL_CONSTRUCTORS(SoftRaw);
+};
+
+} // namespace android
+
+#endif // SOFT_RAW_H_
diff --git a/media/libstagefright/matroska/MatroskaExtractor.cpp b/media/libstagefright/matroska/MatroskaExtractor.cpp
index a0db719..f8c272c 100644
--- a/media/libstagefright/matroska/MatroskaExtractor.cpp
+++ b/media/libstagefright/matroska/MatroskaExtractor.cpp
@@ -313,11 +313,94 @@
*actualFrameTimeUs = -1ll;
- int64_t seekTimeNs = seekTimeUs * 1000ll;
+ const int64_t seekTimeNs = seekTimeUs * 1000ll;
- mCluster = mExtractor->mSegment->FindCluster(seekTimeNs);
- mBlockEntry = NULL;
- mBlockEntryIndex = 0;
+ mkvparser::Segment* const pSegment = mExtractor->mSegment;
+
+ // Special case the 0 seek to avoid loading Cues when the application
+ // extraneously seeks to 0 before playing.
+ if (seekTimeNs <= 0) {
+ ALOGV("Seek to beginning: %lld", seekTimeUs);
+ mCluster = pSegment->GetFirst();
+ mBlockEntryIndex = 0;
+ do {
+ advance_l();
+ } while (!eos() && block()->GetTrackNumber() != mTrackNum);
+ return;
+ }
+
+ ALOGV("Seeking to: %lld", seekTimeUs);
+
+ // If the Cues have not been located then find them.
+ const mkvparser::Cues* pCues = pSegment->GetCues();
+ const mkvparser::SeekHead* pSH = pSegment->GetSeekHead();
+ if (!pCues && pSH) {
+ const size_t count = pSH->GetCount();
+ const mkvparser::SeekHead::Entry* pEntry;
+ ALOGV("No Cues yet");
+
+ for (size_t index = 0; index < count; index++) {
+ pEntry = pSH->GetEntry(index);
+
+ if (pEntry->id == 0x0C53BB6B) { // Cues ID
+ long len; long long pos;
+ pSegment->ParseCues(pEntry->pos, pos, len);
+ pCues = pSegment->GetCues();
+ ALOGV("Cues found");
+ break;
+ }
+ }
+
+ if (!pCues) {
+ ALOGE("No Cues in file");
+ return;
+ }
+ }
+ else if (!pSH) {
+ ALOGE("No SeekHead");
+ return;
+ }
+
+ const mkvparser::CuePoint* pCP;
+ while (!pCues->DoneParsing()) {
+ pCues->LoadCuePoint();
+ pCP = pCues->GetLast();
+
+ if (pCP->GetTime(pSegment) >= seekTimeNs) {
+ ALOGV("Parsed past relevant Cue");
+ break;
+ }
+ }
+
+ // Find the video track for seeking. It doesn't make sense to search the
+ // audio track because we'd still want to make sure we're jumping to a
+ // keyframe in the video track.
+ mkvparser::Tracks const *pTracks = pSegment->GetTracks();
+ const mkvparser::Track *pTrack = NULL;
+ for (size_t index = 0; index < pTracks->GetTracksCount(); ++index) {
+ pTrack = pTracks->GetTrackByIndex(index);
+ if (pTrack && pTrack->GetType() == 1) { // VIDEO_TRACK
+ ALOGV("Video track located at %d", index);
+ break;
+ }
+ }
+
+ const mkvparser::CuePoint::TrackPosition* pTP;
+ if (pTrack) {
+ pCues->Find(seekTimeNs, pTrack, pCP, pTP);
+ } else {
+ ALOGE("Did not locate a VIDEO_TRACK");
+ return;
+ }
+
+ mCluster = pSegment->FindOrPreloadCluster(pTP->m_pos);
+ if (pTP->m_block > 0) {
+ // m_block starts at 1, but mBlockEntryIndex is expected to start at 0
+ mBlockEntryIndex = pTP->m_block - 1;
+ } else {
+ ALOGE("m_block must be > 0");
+ return;
+ }
long prevKeyFrameBlockEntryIndex = -1;
@@ -593,16 +676,12 @@
return;
}
- if (isLiveStreaming()) {
- ret = mSegment->ParseHeaders();
- CHECK_EQ(ret, 0);
+ ret = mSegment->ParseHeaders();
+ CHECK_EQ(ret, 0);
- long len;
- ret = mSegment->LoadCluster(pos, len);
- CHECK_EQ(ret, 0);
- } else {
- ret = mSegment->Load();
- }
+ long len;
+ ret = mSegment->LoadCluster(pos, len);
+ CHECK_EQ(ret, 0);
if (ret < 0) {
delete mSegment;
diff --git a/media/libstagefright/omx/SoftOMXPlugin.cpp b/media/libstagefright/omx/SoftOMXPlugin.cpp
index 99ffe7d..d0c7346 100644
--- a/media/libstagefright/omx/SoftOMXPlugin.cpp
+++ b/media/libstagefright/omx/SoftOMXPlugin.cpp
@@ -48,6 +48,7 @@
{ "OMX.google.mp3.decoder", "mp3dec", "audio_decoder.mp3" },
{ "OMX.google.vorbis.decoder", "vorbisdec", "audio_decoder.vorbis" },
{ "OMX.google.vpx.decoder", "vpxdec", "video_decoder.vpx" },
+ { "OMX.google.raw.decoder", "rawdec", "audio_decoder.raw" },
};
static const size_t kNumComponents =
diff --git a/services/audioflinger/AudioFlinger.cpp b/services/audioflinger/AudioFlinger.cpp
index c5ad0f5..9e6a6df 100644
--- a/services/audioflinger/AudioFlinger.cpp
+++ b/services/audioflinger/AudioFlinger.cpp
@@ -149,13 +149,6 @@
return rc;
}
-static const char * const audio_interfaces[] = {
- "primary",
- "a2dp",
- "usb",
-};
-#define ARRAY_SIZE(x) (sizeof((x))/sizeof(((x)[0])))
-
// ----------------------------------------------------------------------------
AudioFlinger::AudioFlinger()
@@ -191,87 +184,9 @@
}
}
- for (size_t i = 0; i < ARRAY_SIZE(audio_interfaces); i++) {
- const hw_module_t *mod;
- audio_hw_device_t *dev;
-
- rc = load_audio_interface(audio_interfaces[i], &mod, &dev);
- if (rc)
- continue;
-
- ALOGI("Loaded %s audio interface from %s (%s)", audio_interfaces[i],
- mod->name, mod->id);
- mAudioHwDevs.push(dev);
-
- if (mPrimaryHardwareDev == NULL) {
- mPrimaryHardwareDev = dev;
- ALOGI("Using '%s' (%s.%s) as the primary audio interface",
- mod->name, mod->id, audio_interfaces[i]);
- }
- }
-
- if (mPrimaryHardwareDev == NULL) {
- ALOGE("Primary audio interface not found");
- // proceed, all later accesses to mPrimaryHardwareDev verify it's safe with initCheck()
- }
-
- // Currently (mPrimaryHardwareDev == NULL) == (mAudioHwDevs.size() == 0), but the way the
- // primary HW dev is selected can change so these conditions might not always be equivalent.
- // When that happens, re-visit all the code that assumes this.
-
- AutoMutex lock(mHardwareLock);
-
- // Determine the level of master volume support the primary audio HAL has,
- // and set the initial master volume at the same time.
- float initialVolume = 1.0;
- mMasterVolumeSupportLvl = MVS_NONE;
- if (0 == mPrimaryHardwareDev->init_check(mPrimaryHardwareDev)) {
- audio_hw_device_t *dev = mPrimaryHardwareDev;
-
- mHardwareStatus = AUDIO_HW_GET_MASTER_VOLUME;
- if ((NULL != dev->get_master_volume) &&
- (NO_ERROR == dev->get_master_volume(dev, &initialVolume))) {
- mMasterVolumeSupportLvl = MVS_FULL;
- } else {
- mMasterVolumeSupportLvl = MVS_SETONLY;
- initialVolume = 1.0;
- }
-
- mHardwareStatus = AUDIO_HW_SET_MASTER_VOLUME;
- if ((NULL == dev->set_master_volume) ||
- (NO_ERROR != dev->set_master_volume(dev, initialVolume))) {
- mMasterVolumeSupportLvl = MVS_NONE;
- }
- mHardwareStatus = AUDIO_HW_IDLE;
- }
-
- // Set the mode for each audio HAL, and try to set the initial volume (if
- // supported) for all of the non-primary audio HALs.
- for (size_t i = 0; i < mAudioHwDevs.size(); i++) {
- audio_hw_device_t *dev = mAudioHwDevs[i];
-
- mHardwareStatus = AUDIO_HW_INIT;
- rc = dev->init_check(dev);
- mHardwareStatus = AUDIO_HW_IDLE;
- if (rc == 0) {
- mMode = AUDIO_MODE_NORMAL; // assigned multiple times with same value
- mHardwareStatus = AUDIO_HW_SET_MODE;
- dev->set_mode(dev, mMode);
-
- if ((dev != mPrimaryHardwareDev) &&
- (NULL != dev->set_master_volume)) {
- mHardwareStatus = AUDIO_HW_SET_MASTER_VOLUME;
- dev->set_master_volume(dev, initialVolume);
- }
-
- mHardwareStatus = AUDIO_HW_IDLE;
- }
- }
-
- mMasterVolumeSW = (MVS_NONE == mMasterVolumeSupportLvl)
- ? initialVolume
- : 1.0;
- mMasterVolume = initialVolume;
+ mMode = AUDIO_MODE_NORMAL;
+ mMasterVolumeSW = 1.0;
+ mMasterVolume = 1.0;
mHardwareStatus = AUDIO_HW_IDLE;
}
@@ -289,18 +204,41 @@
for (size_t i = 0; i < mAudioHwDevs.size(); i++) {
// no mHardwareLock needed, as there are no other references to this
- audio_hw_device_close(mAudioHwDevs[i]);
+ audio_hw_device_close(mAudioHwDevs.valueAt(i)->hwDevice());
+ delete mAudioHwDevs.valueAt(i);
}
}
-audio_hw_device_t* AudioFlinger::findSuitableHwDev_l(uint32_t devices)
+static const char * const audio_interfaces[] = {
+ AUDIO_HARDWARE_MODULE_ID_PRIMARY,
+ AUDIO_HARDWARE_MODULE_ID_A2DP,
+ AUDIO_HARDWARE_MODULE_ID_USB,
+};
+#define ARRAY_SIZE(x) (sizeof((x))/sizeof(((x)[0])))
+
+audio_hw_device_t* AudioFlinger::findSuitableHwDev_l(audio_module_handle_t module, uint32_t devices)
{
- /* first matching HW device is returned */
+ // if module is 0, the request comes from an old policy manager and we should load
+ // well known modules
+ if (module == 0) {
+ ALOGW("findSuitableHwDev_l() loading well know audio hw modules");
+ for (size_t i = 0; i < ARRAY_SIZE(audio_interfaces); i++) {
+ loadHwModule_l(audio_interfaces[i]);
+ }
+ } else {
+ // check a match for the requested module handle
+ AudioHwDevice *audioHwdevice = mAudioHwDevs.valueFor(module);
+ if (audioHwdevice != NULL) {
+ return audioHwdevice->hwDevice();
+ }
+ }
+ // then try to find a module supporting the requested device.
for (size_t i = 0; i < mAudioHwDevs.size(); i++) {
- audio_hw_device_t *dev = mAudioHwDevs[i];
+ audio_hw_device_t *dev = mAudioHwDevs.valueAt(i)->hwDevice();
if ((dev->get_supported_devices(dev) & devices) == devices)
return dev;
}
+
return NULL;
}
@@ -411,7 +349,7 @@
// dump all hardware devs
for (size_t i = 0; i < mAudioHwDevs.size(); i++) {
- audio_hw_device_t *dev = mAudioHwDevs[i];
+ audio_hw_device_t *dev = mAudioHwDevs.valueAt(i)->hwDevice();
dev->dump(dev, fd);
}
if (locked) mLock.unlock();
@@ -504,7 +442,7 @@
bool isTimed = (flags & IAudioFlinger::TRACK_TIMED) != 0;
track = thread->createTrack_l(client, streamType, sampleRate, format,
- channelMask, frameCount, sharedBuffer, lSessionId, isTimed, &lStatus);
+ channelMask, frameCount, sharedBuffer, lSessionId, flags, &lStatus);
// move effect chain to this output thread if an effect on same session was waiting
// for a track to be created
@@ -513,6 +451,17 @@
Mutex::Autolock _sl(effectThread->mLock);
moveEffectChain_l(lSessionId, effectThread, thread, true);
}
+
+ // Look for sync events awaiting for a session to be used.
+ for (int i = 0; i < (int)mPendingSyncEvents.size(); i++) {
+ if (mPendingSyncEvents[i]->triggerSession() == lSessionId) {
+ if (thread->isValidSyncEvent(mPendingSyncEvents[i])) {
+ track->setSyncEvent(mPendingSyncEvents[i]);
+ mPendingSyncEvents.removeAt(i);
+ i--;
+ }
+ }
+ }
}
if (lStatus == NO_ERROR) {
trackHandle = new TrackHandle(track);
@@ -599,11 +548,13 @@
float swmv = value;
+ Mutex::Autolock _l(mLock);
+
// when hw supports master volume, don't scale in sw mixer
if (MVS_NONE != mMasterVolumeSupportLvl) {
for (size_t i = 0; i < mAudioHwDevs.size(); i++) {
AutoMutex lock(mHardwareLock);
- audio_hw_device_t *dev = mAudioHwDevs[i];
+ audio_hw_device_t *dev = mAudioHwDevs.valueAt(i)->hwDevice();
mHardwareStatus = AUDIO_HW_SET_MASTER_VOLUME;
if (NULL != dev->set_master_volume) {
@@ -615,7 +566,6 @@
swmv = 1.0;
}
- Mutex::Autolock _l(mLock);
mMasterVolume = value;
mMasterVolumeSW = swmv;
for (size_t i = 0; i < mPlaybackThreads.size(); i++)
@@ -842,22 +792,22 @@
// ioHandle == 0 means the parameters are global to the audio hardware interface
if (ioHandle == 0) {
+ Mutex::Autolock _l(mLock);
status_t final_result = NO_ERROR;
{
- AutoMutex lock(mHardwareLock);
- mHardwareStatus = AUDIO_HW_SET_PARAMETER;
- for (size_t i = 0; i < mAudioHwDevs.size(); i++) {
- audio_hw_device_t *dev = mAudioHwDevs[i];
- status_t result = dev->set_parameters(dev, keyValuePairs.string());
- final_result = result ?: final_result;
- }
- mHardwareStatus = AUDIO_HW_IDLE;
+ AutoMutex lock(mHardwareLock);
+ mHardwareStatus = AUDIO_HW_SET_PARAMETER;
+ for (size_t i = 0; i < mAudioHwDevs.size(); i++) {
+ audio_hw_device_t *dev = mAudioHwDevs.valueAt(i)->hwDevice();
+ status_t result = dev->set_parameters(dev, keyValuePairs.string());
+ final_result = result ?: final_result;
+ }
+ mHardwareStatus = AUDIO_HW_IDLE;
}
// disable AEC and NS if the device is a BT SCO headset supporting those pre processings
AudioParameter param = AudioParameter(keyValuePairs);
String8 value;
if (param.get(String8(AUDIO_PARAMETER_KEY_BT_NREC), value) == NO_ERROR) {
- Mutex::Autolock _l(mLock);
bool btNrecIsOff = (value == AUDIO_PARAMETER_VALUE_OFF);
if (mBtNrecIsOff != btNrecIsOff) {
for (size_t i = 0; i < mRecordThreads.size(); i++) {
@@ -912,6 +862,8 @@
// ALOGV("getParameters() io %d, keys %s, tid %d, calling pid %d",
// ioHandle, keys.string(), gettid(), IPCThreadState::self()->getCallingPid());
+ Mutex::Autolock _l(mLock);
+
if (ioHandle == 0) {
String8 out_s8;
@@ -920,7 +872,7 @@
{
AutoMutex lock(mHardwareLock);
mHardwareStatus = AUDIO_HW_GET_PARAMETER;
- audio_hw_device_t *dev = mAudioHwDevs[i];
+ audio_hw_device_t *dev = mAudioHwDevs.valueAt(i)->hwDevice();
s = dev->get_parameters(dev, keys.string());
mHardwareStatus = AUDIO_HW_IDLE;
}
@@ -930,8 +882,6 @@
return out_s8;
}
- Mutex::Autolock _l(mLock);
-
PlaybackThread *playbackThread = checkPlaybackThread_l(ioHandle);
if (playbackThread != NULL) {
return playbackThread->getParameters(keys);
@@ -1492,8 +1442,6 @@
stream = (audio_stream_type_t) (stream + 1)) {
mStreamTypes[stream].volume = mAudioFlinger->streamVolume_l(stream);
mStreamTypes[stream].mute = mAudioFlinger->streamMute_l(stream);
- // initialized by stream_type_t default constructor
- // mStreamTypes[stream].valid = true;
}
// mStreamTypes[AUDIO_STREAM_CNT] exists but isn't explicitly initialized here,
// because mAudioFlinger doesn't have one to copy from
@@ -1597,12 +1545,50 @@
int frameCount,
const sp<IMemory>& sharedBuffer,
int sessionId,
- bool isTimed,
+ IAudioFlinger::track_flags_t flags,
status_t *status)
{
sp<Track> track;
status_t lStatus;
+ bool isTimed = (flags & IAudioFlinger::TRACK_TIMED) != 0;
+
+ // client expresses a preference for FAST, but we get the final say
+ if ((flags & IAudioFlinger::TRACK_FAST) &&
+ !(
+ // not timed
+ (!isTimed) &&
+ // either of these use cases:
+ (
+ // use case 1: shared buffer with any frame count
+ (
+ (sharedBuffer != 0)
+ ) ||
+ // use case 2: callback handler and small power-of-2 frame count
+ (
+ // unfortunately we can't verify that there's a callback until start()
+ // FIXME supported frame counts should not be hard-coded
+ (
+ (frameCount == 128) ||
+ (frameCount == 256) ||
+ (frameCount == 512)
+ )
+ )
+ ) &&
+ // PCM data
+ audio_is_linear_pcm(format) &&
+ // mono or stereo
+ ( (channelMask == AUDIO_CHANNEL_OUT_MONO) ||
+ (channelMask == AUDIO_CHANNEL_OUT_STEREO) ) &&
+ // hardware sample rate
+ (sampleRate == mSampleRate)
+ // FIXME test that MixerThread for this fast track has a capable output HAL
+ // FIXME add a permission test also?
+ ) ) {
+ ALOGW("AUDIO_POLICY_OUTPUT_FLAG_FAST denied");
+ flags &= ~IAudioFlinger::TRACK_FAST;
+ }
+
if (mType == DIRECT) {
if ((format & AUDIO_FORMAT_MAIN_MASK) == AUDIO_FORMAT_PCM) {
if (sampleRate != mSampleRate || format != mFormat || channelMask != mChannelMask) {
@@ -1650,7 +1636,7 @@
if (!isTimed) {
track = new Track(this, client, streamType, sampleRate, format,
- channelMask, frameCount, sharedBuffer, sessionId);
+ channelMask, frameCount, sharedBuffer, sessionId, flags);
} else {
track = TimedTrack::create(this, client, streamType, sampleRate, format,
channelMask, frameCount, sharedBuffer, sessionId);
@@ -1668,14 +1654,6 @@
chain->setStrategy(AudioSystem::getStrategyForStream(track->streamType()));
chain->incTrackCnt();
}
-
- // invalidate track immediately if the stream type was moved to another thread since
- // createTrack() was called by the client process.
- if (!mStreamTypes[streamType].valid) {
- ALOGW("createTrack_l() on thread %p: invalidating track on stream %d",
- this, streamType);
- android_atomic_or(CBLK_INVALID_ON, &track->mCblk->flags);
- }
}
lStatus = NO_ERROR;
@@ -1913,7 +1891,7 @@
}
// this method must always be called either with ThreadBase mLock held or inside the thread loop
-audio_stream_t* AudioFlinger::PlaybackThread::stream()
+audio_stream_t* AudioFlinger::PlaybackThread::stream() const
{
if (mOutput == NULL) {
return NULL;
@@ -1921,7 +1899,7 @@
return &mOutput->stream->common;
}
-uint32_t AudioFlinger::PlaybackThread::activeSleepTimeUs()
+uint32_t AudioFlinger::PlaybackThread::activeSleepTimeUs() const
{
// A2DP output latency is not due only to buffering capacity. It also reflects encoding,
// decoding and transfer time. So sleeping for half of the latency would likely cause
@@ -1933,6 +1911,36 @@
}
}
+status_t AudioFlinger::PlaybackThread::setSyncEvent(const sp<SyncEvent>& event)
+{
+ if (!isValidSyncEvent(event)) {
+ return BAD_VALUE;
+ }
+
+ Mutex::Autolock _l(mLock);
+
+ for (size_t i = 0; i < mTracks.size(); ++i) {
+ sp<Track> track = mTracks[i];
+ if (event->triggerSession() == track->sessionId()) {
+ track->setSyncEvent(event);
+ return NO_ERROR;
+ }
+ }
+
+ return NAME_NOT_FOUND;
+}
+
+bool AudioFlinger::PlaybackThread::isValidSyncEvent(const sp<SyncEvent>& event)
+{
+ switch (event->type()) {
+ case AudioSystem::SYNC_EVENT_PRESENTATION_COMPLETE:
+ return true;
+ default:
+ break;
+ }
+ return false;
+}
+
// ----------------------------------------------------------------------------
AudioFlinger::MixerThread::MixerThread(const sp<AudioFlinger>& audioFlinger, AudioStreamOut* output,
@@ -2530,7 +2538,15 @@
if (track->isTerminated() || track->isStopped() || track->isPaused()) {
// We have consumed all the buffers of this track.
// Remove it from the list of active tracks.
- tracksToRemove->add(track);
+ // TODO: use actual buffer filling status instead of latency when available from
+ // audio HAL
+ size_t audioHALFrames =
+ (mOutput->stream->get_latency(mOutput->stream)*mSampleRate) / 1000;
+ size_t framesWritten =
+ mBytesWritten / audio_stream_frame_size(&mOutput->stream->common);
+ if (track->presentationComplete(framesWritten, audioHALFrames)) {
+ tracksToRemove->add(track);
+ }
} else {
// No buffers for this track. Give it a few chances to
// fill a buffer, then remove it from active list.
@@ -2622,15 +2638,6 @@
}
}
-void AudioFlinger::PlaybackThread::setStreamValid(audio_stream_type_t streamType, bool valid)
-{
- ALOGV ("PlaybackThread::setStreamValid() thread %p, streamType %d, valid %d",
- this, streamType, valid);
- Mutex::Autolock _l(mLock);
-
- mStreamTypes[streamType].valid = valid;
-}
-
// getTrackName_l() must be called with ThreadBase::mLock held
int AudioFlinger::MixerThread::getTrackName_l()
{
@@ -2768,12 +2775,12 @@
return NO_ERROR;
}
-uint32_t AudioFlinger::MixerThread::idleSleepTimeUs()
+uint32_t AudioFlinger::MixerThread::idleSleepTimeUs() const
{
return (uint32_t)(((mFrameCount * 1000) / mSampleRate) * 1000) / 2;
}
-uint32_t AudioFlinger::MixerThread::suspendSleepTimeUs()
+uint32_t AudioFlinger::MixerThread::suspendSleepTimeUs() const
{
return (uint32_t)(((mFrameCount * 1000) / mSampleRate) * 1000);
}
@@ -2909,7 +2916,14 @@
if (track->isTerminated() || track->isStopped() || track->isPaused()) {
// We have consumed all the buffers of this track.
// Remove it from the list of active tracks.
- trackToRemove = track;
+ // TODO: implement behavior for compressed audio
+ size_t audioHALFrames =
+ (mOutput->stream->get_latency(mOutput->stream)*mSampleRate) / 1000;
+ size_t framesWritten =
+ mBytesWritten / audio_stream_frame_size(&mOutput->stream->common);
+ if (track->presentationComplete(framesWritten, audioHALFrames)) {
+ trackToRemove = track;
+ }
} else {
// No buffers for this track. Give it a few chances to
// fill a buffer, then remove it from active list.
@@ -3110,7 +3124,7 @@
return reconfig;
}
-uint32_t AudioFlinger::DirectOutputThread::activeSleepTimeUs()
+uint32_t AudioFlinger::DirectOutputThread::activeSleepTimeUs() const
{
uint32_t time;
if (audio_is_linear_pcm(mFormat)) {
@@ -3121,7 +3135,7 @@
return time;
}
-uint32_t AudioFlinger::DirectOutputThread::idleSleepTimeUs()
+uint32_t AudioFlinger::DirectOutputThread::idleSleepTimeUs() const
{
uint32_t time;
if (audio_is_linear_pcm(mFormat)) {
@@ -3132,7 +3146,7 @@
return time;
}
-uint32_t AudioFlinger::DirectOutputThread::suspendSleepTimeUs()
+uint32_t AudioFlinger::DirectOutputThread::suspendSleepTimeUs() const
{
uint32_t time;
if (audio_is_linear_pcm(mFormat)) {
@@ -3295,7 +3309,7 @@
return true;
}
-uint32_t AudioFlinger::DuplicatingThread::activeSleepTimeUs()
+uint32_t AudioFlinger::DuplicatingThread::activeSleepTimeUs() const
{
return (mWaitTimeMs * 1000) / 2;
}
@@ -3353,6 +3367,11 @@
// clear all buffers
mCblk->frameCount = frameCount;
mCblk->sampleRate = sampleRate;
+// uncomment the following lines to quickly test 32-bit wraparound
+// mCblk->user = 0xffff0000;
+// mCblk->server = 0xffff0000;
+// mCblk->userBase = 0xffff0000;
+// mCblk->serverBase = 0xffff0000;
mChannelCount = channelCount;
mChannelMask = channelMask;
if (sharedBuffer == 0) {
@@ -3378,6 +3397,11 @@
// clear all buffers
mCblk->frameCount = frameCount;
mCblk->sampleRate = sampleRate;
+// uncomment the following lines to quickly test 32-bit wraparound
+// mCblk->user = 0xffff0000;
+// mCblk->server = 0xffff0000;
+// mCblk->userBase = 0xffff0000;
+// mCblk->serverBase = 0xffff0000;
mChannelCount = channelCount;
mChannelMask = channelMask;
mBuffer = (char*)mCblk + sizeof(audio_track_cblk_t);
@@ -3457,7 +3481,7 @@
if (bufferStart < mBuffer || bufferStart > bufferEnd || bufferEnd > mBufferEnd ||
((unsigned long)bufferStart & (unsigned long)(frameSize - 1))) {
ALOGE("TrackBase::getBuffer buffer out of range:\n start: %p, end %p , mBuffer %p mBufferEnd %p\n \
- server %d, serverBase %d, user %d, userBase %d",
+ server %u, serverBase %u, user %u, userBase %u",
bufferStart, bufferEnd, mBuffer, mBufferEnd,
cblk->server, cblk->serverBase, cblk->user, cblk->userBase);
return NULL;
@@ -3466,6 +3490,12 @@
return bufferStart;
}
+status_t AudioFlinger::ThreadBase::TrackBase::setSyncEvent(const sp<SyncEvent>& event)
+{
+ mSyncEvents.add(event);
+ return NO_ERROR;
+}
+
// ----------------------------------------------------------------------------
// Track constructor must be called with AudioFlinger::mLock and ThreadBase::mLock held
@@ -3478,7 +3508,8 @@
uint32_t channelMask,
int frameCount,
const sp<IMemory>& sharedBuffer,
- int sessionId)
+ int sessionId,
+ IAudioFlinger::track_flags_t flags)
: TrackBase(thread, client, sampleRate, format, channelMask, frameCount, sharedBuffer, sessionId),
mMute(false),
// mFillingUpStatus ?
@@ -3488,7 +3519,9 @@
mName(-1), // see note below
mMainBuffer(thread->mixBuffer()),
mAuxBuffer(NULL),
- mAuxEffectId(0), mHasVolumeController(false)
+ mAuxEffectId(0), mHasVolumeController(false),
+ mPresentationCompleteFrames(0),
+ mFlags(flags)
{
if (mCblk != NULL) {
// NOTE: audio_track_cblk_t::frameSize for 8 bit PCM data is based on a sample size of
@@ -3593,7 +3626,7 @@
if (framesReq > framesReady) {
framesReq = framesReady;
}
- if (s + framesReq > bufferEnd) {
+ if (framesReq > bufferEnd - s) {
framesReq = bufferEnd - s;
}
@@ -3627,11 +3660,20 @@
return false;
}
-status_t AudioFlinger::PlaybackThread::Track::start(pid_t tid)
+status_t AudioFlinger::PlaybackThread::Track::start(pid_t tid,
+ AudioSystem::sync_event_t event,
+ int triggerSession)
{
status_t status = NO_ERROR;
ALOGV("start(%d), calling pid %d session %d tid %d",
mName, IPCThreadState::self()->getCallingPid(), mSessionId, tid);
+ // check for use case 2 with missing callback
+ if (isFastTrack() && (mSharedBuffer == 0) && (tid == 0)) {
+ ALOGW("AUDIO_POLICY_OUTPUT_FLAG_FAST denied");
+ mFlags &= ~IAudioFlinger::TRACK_FAST;
+ // FIXME the track must be invalidated and moved to another thread or
+ // attached directly to the normal mixer now
+ }
sp<ThreadBase> thread = mThread.promote();
if (thread != 0) {
Mutex::Autolock _l(thread->mLock);
@@ -3756,6 +3798,7 @@
android_atomic_or(CBLK_UNDERRUN_ON, &mCblk->flags);
mFillingUpStatus = FS_FILLING;
mResetDone = true;
+ mPresentationCompleteFrames = 0;
}
}
@@ -3781,6 +3824,39 @@
mAuxBuffer = buffer;
}
+bool AudioFlinger::PlaybackThread::Track::presentationComplete(size_t framesWritten,
+ size_t audioHalFrames)
+{
+ // a track is considered presented when the total number of frames written to audio HAL
+ // corresponds to the number of frames written when presentationComplete() is called for the
+ // first time (mPresentationCompleteFrames == 0) plus the buffer filling status at that time.
+ if (mPresentationCompleteFrames == 0) {
+ mPresentationCompleteFrames = framesWritten + audioHalFrames;
+ ALOGV("presentationComplete() reset: mPresentationCompleteFrames %d audioHalFrames %d",
+ mPresentationCompleteFrames, audioHalFrames);
+ }
+ if (framesWritten >= mPresentationCompleteFrames) {
+ ALOGV("presentationComplete() session %d complete: framesWritten %d",
+ mSessionId, framesWritten);
+ triggerEvents(AudioSystem::SYNC_EVENT_PRESENTATION_COMPLETE);
+ mPresentationCompleteFrames = 0;
+ return true;
+ }
+ return false;
+}
+
+void AudioFlinger::PlaybackThread::Track::triggerEvents(AudioSystem::sync_event_t type)
+{
+ for (int i = 0; i < (int)mSyncEvents.size(); i++) {
+ if (mSyncEvents[i]->type() == type) {
+ mSyncEvents[i]->trigger();
+ mSyncEvents.removeAt(i);
+ i--;
+ }
+ }
+}
+
+
// timed audio tracks
sp<AudioFlinger::PlaybackThread::TimedTrack>
@@ -3813,7 +3889,7 @@
const sp<IMemory>& sharedBuffer,
int sessionId)
: Track(thread, client, streamType, sampleRate, format, channelMask,
- frameCount, sharedBuffer, sessionId),
+ frameCount, sharedBuffer, sessionId, IAudioFlinger::TRACK_TIMED),
mTimedSilenceBuffer(NULL),
mTimedSilenceBufferSize(0),
mTimedAudioOutputOnTime(false),
@@ -4224,7 +4300,7 @@
if (framesReq > framesAvail) {
framesReq = framesAvail;
}
- if (s + framesReq > bufferEnd) {
+ if (framesReq > bufferEnd - s) {
framesReq = bufferEnd - s;
}
@@ -4241,12 +4317,14 @@
return NOT_ENOUGH_DATA;
}
-status_t AudioFlinger::RecordThread::RecordTrack::start(pid_t tid)
+status_t AudioFlinger::RecordThread::RecordTrack::start(pid_t tid,
+ AudioSystem::sync_event_t event,
+ int triggerSession)
{
sp<ThreadBase> thread = mThread.promote();
if (thread != 0) {
RecordThread *recordThread = (RecordThread *)thread.get();
- return recordThread->start(this, tid);
+ return recordThread->start(this, tid, event, triggerSession);
} else {
return BAD_VALUE;
}
@@ -4289,7 +4367,8 @@
audio_format_t format,
uint32_t channelMask,
int frameCount)
- : Track(playbackThread, NULL, AUDIO_STREAM_CNT, sampleRate, format, channelMask, frameCount, NULL, 0),
+ : Track(playbackThread, NULL, AUDIO_STREAM_CNT, sampleRate, format, channelMask, frameCount,
+ NULL, 0, IAudioFlinger::TRACK_DEFAULT),
mActive(false), mSourceThread(sourceThread)
{
@@ -4312,9 +4391,11 @@
clearBufferQueue();
}
-status_t AudioFlinger::PlaybackThread::OutputTrack::start(pid_t tid)
+status_t AudioFlinger::PlaybackThread::OutputTrack::start(pid_t tid,
+ AudioSystem::sync_event_t event,
+ int triggerSession)
{
- status_t status = Track::start(tid);
+ status_t status = Track::start(tid, event, triggerSession);
if (status != NO_ERROR) {
return status;
}
@@ -4493,7 +4574,7 @@
uint32_t u = cblk->user;
uint32_t bufferEnd = cblk->userBase + cblk->frameCount;
- if (u + framesReq > bufferEnd) {
+ if (framesReq > bufferEnd - u) {
framesReq = bufferEnd - u;
}
@@ -4757,9 +4838,9 @@
return mRecordTrack->getCblk();
}
-status_t AudioFlinger::RecordHandle::start(pid_t tid) {
+status_t AudioFlinger::RecordHandle::start(pid_t tid, int event, int triggerSession) {
ALOGV("RecordHandle::start()");
- return mRecordTrack->start(tid);
+ return mRecordTrack->start(tid, (AudioSystem::sync_event_t)event, triggerSession);
}
void AudioFlinger::RecordHandle::stop() {
@@ -4968,7 +5049,16 @@
}
}
- mActiveTrack->releaseBuffer(&buffer);
+ if (mFramestoDrop == 0) {
+ mActiveTrack->releaseBuffer(&buffer);
+ } else {
+ if (mFramestoDrop > 0) {
+ mFramestoDrop -= buffer.frameCount;
+ if (mFramestoDrop < 0) {
+ mFramestoDrop = 0;
+ }
+ }
+ }
mActiveTrack->overflow();
}
// client isn't retrieving buffers fast enough
@@ -5050,11 +5140,26 @@
return track;
}
-status_t AudioFlinger::RecordThread::start(RecordThread::RecordTrack* recordTrack, pid_t tid)
+status_t AudioFlinger::RecordThread::start(RecordThread::RecordTrack* recordTrack,
+ pid_t tid, AudioSystem::sync_event_t event,
+ int triggerSession)
{
- ALOGV("RecordThread::start tid=%d", tid);
+ ALOGV("RecordThread::start tid=%d, event %d, triggerSession %d", tid, event, triggerSession);
sp<ThreadBase> strongMe = this;
status_t status = NO_ERROR;
+
+ if (event == AudioSystem::SYNC_EVENT_NONE) {
+ mSyncStartEvent.clear();
+ mFramestoDrop = 0;
+ } else if (event != AudioSystem::SYNC_EVENT_SAME) {
+ mSyncStartEvent = mAudioFlinger->createSyncEvent(event,
+ triggerSession,
+ recordTrack->sessionId(),
+ syncStartEventCallback,
+ this);
+ mFramestoDrop = -1;
+ }
+
{
AutoMutex lock(mLock);
if (mActiveTrack != 0) {
@@ -5073,6 +5178,7 @@
mLock.lock();
if (status != NO_ERROR) {
mActiveTrack.clear();
+ clearSyncStartEvent();
return status;
}
mRsmpInIndex = mFrameCount;
@@ -5101,9 +5207,44 @@
}
startError:
AudioSystem::stopInput(mId);
+ clearSyncStartEvent();
return status;
}
+void AudioFlinger::RecordThread::clearSyncStartEvent()
+{
+ if (mSyncStartEvent != 0) {
+ mSyncStartEvent->cancel();
+ }
+ mSyncStartEvent.clear();
+}
+
+void AudioFlinger::RecordThread::syncStartEventCallback(const wp<SyncEvent>& event)
+{
+ sp<SyncEvent> strongEvent = event.promote();
+
+ if (strongEvent != 0) {
+ RecordThread *me = (RecordThread *)strongEvent->cookie();
+ me->handleSyncStartEvent(strongEvent);
+ }
+}
+
+void AudioFlinger::RecordThread::handleSyncStartEvent(const sp<SyncEvent>& event)
+{
+ ALOGV("handleSyncStartEvent() mActiveTrack %p session %d event->listenerSession() %d",
+ mActiveTrack.get(),
+ mActiveTrack.get() ? mActiveTrack->sessionId() : 0,
+ event->listenerSession());
+
+ if (mActiveTrack != 0 &&
+ event == mSyncStartEvent) {
+ // TODO: use actual buffer filling status instead of 2 buffers when info is available
+ // from audio HAL
+ mFramestoDrop = mFrameCount * 2;
+ mSyncStartEvent.clear();
+ }
+}
+
void AudioFlinger::RecordThread::stop(RecordThread::RecordTrack* recordTrack) {
ALOGV("RecordThread::stop");
sp<ThreadBase> strongMe = this;
@@ -5127,6 +5268,26 @@
}
}
+bool AudioFlinger::RecordThread::isValidSyncEvent(const sp<SyncEvent>& event)
+{
+ return false;
+}
+
+status_t AudioFlinger::RecordThread::setSyncEvent(const sp<SyncEvent>& event)
+{
+ if (!isValidSyncEvent(event)) {
+ return BAD_VALUE;
+ }
+
+ Mutex::Autolock _l(mLock);
+
+ if (mTrack != NULL && event->triggerSession() == mTrack->sessionId()) {
+ mTrack->setSyncEvent(event);
+ return NO_ERROR;
+ }
+ return NAME_NOT_FOUND;
+}
+
status_t AudioFlinger::RecordThread::dump(int fd, const Vector<String16>& args)
{
const size_t SIZE = 256;
@@ -5430,7 +5591,7 @@
}
// this method must always be called either with ThreadBase mLock held or inside the thread loop
-audio_stream_t* AudioFlinger::RecordThread::stream()
+audio_stream_t* AudioFlinger::RecordThread::stream() const
{
if (mInput == NULL) {
return NULL;
@@ -5441,28 +5602,84 @@
// ----------------------------------------------------------------------------
-audio_io_handle_t AudioFlinger::openOutput(uint32_t *pDevices,
- uint32_t *pSamplingRate,
- audio_format_t *pFormat,
- uint32_t *pChannels,
- uint32_t *pLatencyMs,
- audio_policy_output_flags_t flags)
+audio_module_handle_t AudioFlinger::loadHwModule(const char *name)
+{
+ if (!settingsAllowed()) {
+ return 0;
+ }
+ Mutex::Autolock _l(mLock);
+ return loadHwModule_l(name);
+}
+
+// loadHwModule_l() must be called with AudioFlinger::mLock held
+audio_module_handle_t AudioFlinger::loadHwModule_l(const char *name)
+{
+ for (size_t i = 0; i < mAudioHwDevs.size(); i++) {
+ if (strncmp(mAudioHwDevs.valueAt(i)->moduleName(), name, strlen(name)) == 0) {
+ ALOGW("loadHwModule() module %s already loaded", name);
+ return mAudioHwDevs.keyAt(i);
+ }
+ }
+
+ const hw_module_t *mod;
+ audio_hw_device_t *dev;
+
+ int rc = load_audio_interface(name, &mod, &dev);
+ if (rc) {
+ ALOGI("loadHwModule() error %d loading module %s ", rc, name);
+ return 0;
+ }
+
+ mHardwareStatus = AUDIO_HW_INIT;
+ rc = dev->init_check(dev);
+ mHardwareStatus = AUDIO_HW_IDLE;
+ if (rc) {
+ ALOGI("loadHwModule() init check error %d for module %s ", rc, name);
+ return 0;
+ }
+
+ if ((mMasterVolumeSupportLvl != MVS_NONE) &&
+ (NULL != dev->set_master_volume)) {
+ AutoMutex lock(mHardwareLock);
+ mHardwareStatus = AUDIO_HW_SET_MASTER_VOLUME;
+ dev->set_master_volume(dev, mMasterVolume);
+ mHardwareStatus = AUDIO_HW_IDLE;
+ }
+
+ audio_module_handle_t handle = nextUniqueId();
+ mAudioHwDevs.add(handle, new AudioHwDevice(name, dev));
+
+ ALOGI("loadHwModule() Loaded %s audio interface from %s (%s) handle %d",
+ name, mod->name, mod->id, handle);
+
+ return handle;
+
+}
+
+audio_io_handle_t AudioFlinger::openOutput(audio_module_handle_t module,
+ audio_devices_t *pDevices,
+ uint32_t *pSamplingRate,
+ audio_format_t *pFormat,
+ audio_channel_mask_t *pChannelMask,
+ uint32_t *pLatencyMs,
+ audio_policy_output_flags_t flags)
{
status_t status;
PlaybackThread *thread = NULL;
uint32_t samplingRate = pSamplingRate ? *pSamplingRate : 0;
audio_format_t format = pFormat ? *pFormat : AUDIO_FORMAT_DEFAULT;
- uint32_t channels = pChannels ? *pChannels : 0;
+ audio_channel_mask_t channelMask = pChannelMask ? *pChannelMask : 0;
uint32_t latency = pLatencyMs ? *pLatencyMs : 0;
audio_stream_out_t *outStream;
audio_hw_device_t *outHwDev;
- ALOGV("openOutput(), Device %x, SamplingRate %d, Format %d, Channels %x, flags %x",
- pDevices ? *pDevices : 0,
- samplingRate,
- format,
- channels,
- flags);
+ ALOGV("openOutput(), module %d Device %x, SamplingRate %d, Format %d, Channels %x, flags %x",
+ module,
+ pDevices ? *pDevices : 0,
+ samplingRate,
+ format,
+ channelMask,
+ flags);
if (pDevices == NULL || *pDevices == 0) {
return 0;
@@ -5470,19 +5687,19 @@
Mutex::Autolock _l(mLock);
- outHwDev = findSuitableHwDev_l(*pDevices);
+ outHwDev = findSuitableHwDev_l(module, *pDevices);
if (outHwDev == NULL)
return 0;
mHardwareStatus = AUDIO_HW_OUTPUT_OPEN;
status = outHwDev->open_output_stream(outHwDev, *pDevices, &format,
- &channels, &samplingRate, &outStream);
+ &channelMask, &samplingRate, &outStream);
mHardwareStatus = AUDIO_HW_IDLE;
ALOGV("openOutput() openOutputStream returned output %p, SamplingRate %d, Format %d, Channels %x, status %d",
outStream,
samplingRate,
format,
- channels,
+ channelMask,
status);
if (outStream != NULL) {
@@ -5491,7 +5708,7 @@
if ((flags & AUDIO_POLICY_OUTPUT_FLAG_DIRECT) ||
(format != AUDIO_FORMAT_PCM_16_BIT) ||
- (channels != AUDIO_CHANNEL_OUT_STEREO)) {
+ (channelMask != AUDIO_CHANNEL_OUT_STEREO)) {
thread = new DirectOutputThread(this, output, id, *pDevices);
ALOGV("openOutput() created direct output: ID %d thread %p", id, thread);
} else {
@@ -5502,11 +5719,55 @@
if (pSamplingRate != NULL) *pSamplingRate = samplingRate;
if (pFormat != NULL) *pFormat = format;
- if (pChannels != NULL) *pChannels = channels;
+ if (pChannelMask != NULL) *pChannelMask = channelMask;
if (pLatencyMs != NULL) *pLatencyMs = thread->latency();
// notify client processes of the new output creation
thread->audioConfigChanged_l(AudioSystem::OUTPUT_OPENED);
+
+ // the first primary output opened designates the primary hw device
+ if ((mPrimaryHardwareDev == NULL) && (flags & AUDIO_POLICY_OUTPUT_FLAG_PRIMARY)) {
+ ALOGI("Using module %d has the primary audio interface", module);
+ mPrimaryHardwareDev = outHwDev;
+
+ AutoMutex lock(mHardwareLock);
+ mHardwareStatus = AUDIO_HW_SET_MODE;
+ outHwDev->set_mode(outHwDev, mMode);
+
+ // Determine the level of master volume support the primary audio HAL has,
+ // and set the initial master volume at the same time.
+ float initialVolume = 1.0;
+ mMasterVolumeSupportLvl = MVS_NONE;
+
+ mHardwareStatus = AUDIO_HW_GET_MASTER_VOLUME;
+ if ((NULL != outHwDev->get_master_volume) &&
+ (NO_ERROR == outHwDev->get_master_volume(outHwDev, &initialVolume))) {
+ mMasterVolumeSupportLvl = MVS_FULL;
+ } else {
+ mMasterVolumeSupportLvl = MVS_SETONLY;
+ initialVolume = 1.0;
+ }
+
+ mHardwareStatus = AUDIO_HW_SET_MASTER_VOLUME;
+ if ((NULL == outHwDev->set_master_volume) ||
+ (NO_ERROR != outHwDev->set_master_volume(outHwDev, initialVolume))) {
+ mMasterVolumeSupportLvl = MVS_NONE;
+ }
+ // now that we have a primary device, initialize master volume on other devices
+ for (size_t i = 0; i < mAudioHwDevs.size(); i++) {
+ audio_hw_device_t *dev = mAudioHwDevs.valueAt(i)->hwDevice();
+
+ if ((dev != mPrimaryHardwareDev) &&
+ (NULL != dev->set_master_volume)) {
+ dev->set_master_volume(dev, initialVolume);
+ }
+ }
+ mHardwareStatus = AUDIO_HW_IDLE;
+ mMasterVolumeSW = (MVS_NONE == mMasterVolumeSupportLvl)
+ ? initialVolume
+ : 1.0;
+ mMasterVolume = initialVolume;
+ }
return id;
}
@@ -5604,20 +5865,20 @@
return NO_ERROR;
}
-audio_io_handle_t AudioFlinger::openInput(uint32_t *pDevices,
- uint32_t *pSamplingRate,
- audio_format_t *pFormat,
- uint32_t *pChannels,
- audio_in_acoustics_t acoustics)
+audio_io_handle_t AudioFlinger::openInput(audio_module_handle_t module,
+ audio_devices_t *pDevices,
+ uint32_t *pSamplingRate,
+ audio_format_t *pFormat,
+ uint32_t *pChannelMask)
{
status_t status;
RecordThread *thread = NULL;
uint32_t samplingRate = pSamplingRate ? *pSamplingRate : 0;
audio_format_t format = pFormat ? *pFormat : AUDIO_FORMAT_DEFAULT;
- uint32_t channels = pChannels ? *pChannels : 0;
+ audio_channel_mask_t channelMask = pChannelMask ? *pChannelMask : 0;
uint32_t reqSamplingRate = samplingRate;
audio_format_t reqFormat = format;
- uint32_t reqChannels = channels;
+ audio_channel_mask_t reqChannels = channelMask;
audio_stream_in_t *inStream;
audio_hw_device_t *inHwDev;
@@ -5627,20 +5888,19 @@
Mutex::Autolock _l(mLock);
- inHwDev = findSuitableHwDev_l(*pDevices);
+ inHwDev = findSuitableHwDev_l(module, *pDevices);
if (inHwDev == NULL)
return 0;
status = inHwDev->open_input_stream(inHwDev, *pDevices, &format,
- &channels, &samplingRate,
- acoustics,
+ &channelMask, &samplingRate,
+ (audio_in_acoustics_t)0,
&inStream);
- ALOGV("openInput() openInputStream returned input %p, SamplingRate %d, Format %d, Channels %x, acoustics %x, status %d",
+ ALOGV("openInput() openInputStream returned input %p, SamplingRate %d, Format %d, Channels %x, status %d",
inStream,
samplingRate,
format,
- channels,
- acoustics,
+ channelMask,
status);
// If the input could not be opened with the requested parameters and we can handle the conversion internally,
@@ -5649,11 +5909,11 @@
if (inStream == NULL && status == BAD_VALUE &&
reqFormat == format && format == AUDIO_FORMAT_PCM_16_BIT &&
(samplingRate <= 2 * reqSamplingRate) &&
- (popcount(channels) <= FCC_2) && (popcount(reqChannels) <= FCC_2)) {
+ (popcount(channelMask) <= FCC_2) && (popcount(reqChannels) <= FCC_2)) {
ALOGV("openInput() reopening with proposed sampling rate and channels");
status = inHwDev->open_input_stream(inHwDev, *pDevices, &format,
- &channels, &samplingRate,
- acoustics,
+ &channelMask, &samplingRate,
+ (audio_in_acoustics_t)0,
&inStream);
}
@@ -5675,7 +5935,7 @@
ALOGV("openInput() created record thread: ID %d thread %p", id, thread);
if (pSamplingRate != NULL) *pSamplingRate = reqSamplingRate;
if (pFormat != NULL) *pFormat = format;
- if (pChannels != NULL) *pChannels = reqChannels;
+ if (pChannelMask != NULL) *pChannelMask = reqChannels;
input->stream->common.standby(&input->stream->common);
@@ -5728,13 +5988,10 @@
ALOGV("setStreamOutput() stream %d to output %d", stream, output);
audioConfigChanged_l(AudioSystem::STREAM_CONFIG_CHANGED, output, &stream);
- dstThread->setStreamValid(stream, true);
-
for (size_t i = 0; i < mPlaybackThreads.size(); i++) {
PlaybackThread *thread = mPlaybackThreads.valueAt(i).get();
if (thread != dstThread && thread->type() != ThreadBase::DIRECT) {
MixerThread *srcThread = (MixerThread *)thread;
- srcThread->setStreamValid(stream, false);
srcThread->invalidateTracks(stream);
}
}
@@ -5899,6 +6156,37 @@
return thread->device();
}
+sp<AudioFlinger::SyncEvent> AudioFlinger::createSyncEvent(AudioSystem::sync_event_t type,
+ int triggerSession,
+ int listenerSession,
+ sync_event_callback_t callBack,
+ void *cookie)
+{
+ Mutex::Autolock _l(mLock);
+
+ sp<SyncEvent> event = new SyncEvent(type, triggerSession, listenerSession, callBack, cookie);
+ status_t playStatus = NAME_NOT_FOUND;
+ status_t recStatus = NAME_NOT_FOUND;
+ for (size_t i = 0; i < mPlaybackThreads.size(); i++) {
+ playStatus = mPlaybackThreads.valueAt(i)->setSyncEvent(event);
+ if (playStatus == NO_ERROR) {
+ return event;
+ }
+ }
+ for (size_t i = 0; i < mRecordThreads.size(); i++) {
+ recStatus = mRecordThreads.valueAt(i)->setSyncEvent(event);
+ if (recStatus == NO_ERROR) {
+ return event;
+ }
+ }
+ if (playStatus == NAME_NOT_FOUND || recStatus == NAME_NOT_FOUND) {
+ mPendingSyncEvents.add(event);
+ } else {
+ ALOGV("createSyncEvent() invalid event %d", event->type());
+ event.clear();
+ }
+ return event;
+}
// ----------------------------------------------------------------------------
// Effect management
diff --git a/services/audioflinger/AudioFlinger.h b/services/audioflinger/AudioFlinger.h
index 795807d..e493a9a 100644
--- a/services/audioflinger/AudioFlinger.h
+++ b/services/audioflinger/AudioFlinger.h
@@ -43,6 +43,7 @@
#include <system/audio.h>
#include <hardware/audio.h>
+#include <hardware/audio_policy.h>
#include "AudioBufferProvider.h"
@@ -137,12 +138,13 @@
virtual size_t getInputBufferSize(uint32_t sampleRate, audio_format_t format, int channelCount) const;
- virtual audio_io_handle_t openOutput(uint32_t *pDevices,
- uint32_t *pSamplingRate,
- audio_format_t *pFormat,
- uint32_t *pChannels,
- uint32_t *pLatencyMs,
- audio_policy_output_flags_t flags);
+ virtual audio_io_handle_t openOutput(audio_module_handle_t module,
+ audio_devices_t *pDevices,
+ uint32_t *pSamplingRate,
+ audio_format_t *pFormat,
+ audio_channel_mask_t *pChannelMask,
+ uint32_t *pLatencyMs,
+ audio_policy_output_flags_t flags);
virtual audio_io_handle_t openDuplicateOutput(audio_io_handle_t output1,
audio_io_handle_t output2);
@@ -153,11 +155,11 @@
virtual status_t restoreOutput(audio_io_handle_t output);
- virtual audio_io_handle_t openInput(uint32_t *pDevices,
- uint32_t *pSamplingRate,
- audio_format_t *pFormat,
- uint32_t *pChannels,
- audio_in_acoustics_t acoustics);
+ virtual audio_io_handle_t openInput(audio_module_handle_t module,
+ audio_devices_t *pDevices,
+ uint32_t *pSamplingRate,
+ audio_format_t *pFormat,
+ audio_channel_mask_t *pChannelMask);
virtual status_t closeInput(audio_io_handle_t input);
@@ -196,6 +198,8 @@
virtual status_t moveEffects(int sessionId, audio_io_handle_t srcOutput,
audio_io_handle_t dstOutput);
+ virtual audio_module_handle_t loadHwModule(const char *name);
+
virtual status_t onTransact(
uint32_t code,
const Parcel& data,
@@ -204,6 +208,44 @@
// end of IAudioFlinger interface
+ class SyncEvent;
+
+ typedef void (*sync_event_callback_t)(const wp<SyncEvent>& event) ;
+
+ class SyncEvent : public RefBase {
+ public:
+ SyncEvent(AudioSystem::sync_event_t type,
+ int triggerSession,
+ int listenerSession,
+ sync_event_callback_t callBack,
+ void *cookie)
+ : mType(type), mTriggerSession(triggerSession), mListenerSession(listenerSession),
+ mCallback(callBack), mCookie(cookie)
+ {}
+
+ virtual ~SyncEvent() {}
+
+ void trigger() { Mutex::Autolock _l(mLock); if (mCallback) mCallback(this); }
+ void cancel() {Mutex::Autolock _l(mLock); mCallback = NULL; }
+ AudioSystem::sync_event_t type() const { return mType; }
+ int triggerSession() const { return mTriggerSession; }
+ int listenerSession() const { return mListenerSession; }
+ void *cookie() const { return mCookie; }
+
+ private:
+ const AudioSystem::sync_event_t mType;
+ const int mTriggerSession;
+ const int mListenerSession;
+ sync_event_callback_t mCallback;
+ void * const mCookie;
+ Mutex mLock;
+ };
+
+ sp<SyncEvent> createSyncEvent(AudioSystem::sync_event_t type,
+ int triggerSession,
+ int listenerSession,
+ sync_event_callback_t callBack,
+ void *cookie);
private:
audio_mode_t getMode() const { return mMode; }
@@ -218,7 +260,7 @@
// RefBase
virtual void onFirstRef();
- audio_hw_device_t* findSuitableHwDev_l(uint32_t devices);
+ audio_hw_device_t* findSuitableHwDev_l(audio_module_handle_t module, uint32_t devices);
void purgeStaleEffects_l();
// standby delay for MIXER and DUPLICATING playback threads is read from property
@@ -334,11 +376,14 @@
int sessionId);
virtual ~TrackBase();
- virtual status_t start(pid_t tid) = 0;
+ virtual status_t start(pid_t tid,
+ AudioSystem::sync_event_t event = AudioSystem::SYNC_EVENT_NONE,
+ int triggerSession = 0) = 0;
virtual void stop() = 0;
sp<IMemory> getCblk() const { return mCblkMemory; }
audio_track_cblk_t* cblk() const { return mCblk; }
int sessionId() const { return mSessionId; }
+ virtual status_t setSyncEvent(const sp<SyncEvent>& event);
protected:
TrackBase(const TrackBase&);
@@ -385,6 +430,7 @@
const int mSessionId;
uint8_t mChannelCount;
uint32_t mChannelMask;
+ Vector < sp<SyncEvent> >mSyncEvents;
};
class ConfigEvent {
@@ -430,7 +476,7 @@
audio_io_handle_t id() const { return mId;}
bool standby() const { return mStandby; }
uint32_t device() const { return mDevice; }
- virtual audio_stream_t* stream() = 0;
+ virtual audio_stream_t* stream() const = 0;
sp<EffectHandle> createEffect_l(
const sp<AudioFlinger::Client>& client,
@@ -499,6 +545,11 @@
void checkSuspendOnEffectEnabled_l(const sp<EffectModule>& effect,
bool enabled,
int sessionId = AUDIO_SESSION_OUTPUT_MIX);
+
+ virtual status_t setSyncEvent(const sp<SyncEvent>& event) = 0;
+ virtual bool isValidSyncEvent(const sp<SyncEvent>& event) = 0;
+
+
mutable Mutex mLock;
protected:
@@ -583,13 +634,11 @@
struct stream_type_t {
stream_type_t()
: volume(1.0f),
- mute(false),
- valid(true)
+ mute(false)
{
}
float volume;
bool mute;
- bool valid;
};
// --- PlaybackThread ---
@@ -615,11 +664,14 @@
uint32_t channelMask,
int frameCount,
const sp<IMemory>& sharedBuffer,
- int sessionId);
+ int sessionId,
+ IAudioFlinger::track_flags_t flags);
virtual ~Track();
void dump(char* buffer, size_t size);
- virtual status_t start(pid_t tid);
+ virtual status_t start(pid_t tid,
+ AudioSystem::sync_event_t event = AudioSystem::SYNC_EVENT_NONE,
+ int triggerSession = 0);
virtual void stop();
void pause();
@@ -640,6 +692,9 @@
int16_t *mainBuffer() const { return mMainBuffer; }
int auxEffectId() const { return mAuxEffectId; }
+ bool isFastTrack() const
+ { return (mFlags & IAudioFlinger::TRACK_FAST) != 0; }
+
protected:
// for numerous
friend class PlaybackThread;
@@ -670,6 +725,9 @@
return (mStreamType == AUDIO_STREAM_CNT);
}
+ bool presentationComplete(size_t framesWritten, size_t audioHalFrames);
+ void triggerEvents(AudioSystem::sync_event_t type);
+
public:
virtual bool isTimedTrack() const { return false; }
protected:
@@ -688,6 +746,10 @@
int32_t *mAuxBuffer;
int mAuxEffectId;
bool mHasVolumeController;
+ size_t mPresentationCompleteFrames; // number of frames written to the audio HAL
+ // when this track will be fully rendered
+ private:
+ IAudioFlinger::track_flags_t mFlags;
}; // end of Track
class TimedTrack : public Track {
@@ -782,7 +844,9 @@
int frameCount);
virtual ~OutputTrack();
- virtual status_t start(pid_t tid);
+ virtual status_t start(pid_t tid,
+ AudioSystem::sync_event_t event = AudioSystem::SYNC_EVENT_NONE,
+ int triggerSession = 0);
virtual void stop();
bool write(int16_t* data, uint32_t frames);
bool bufferQueueEmpty() const { return mBufferQueue.size() == 0; }
@@ -857,12 +921,12 @@
int frameCount,
const sp<IMemory>& sharedBuffer,
int sessionId,
- bool isTimed,
+ IAudioFlinger::track_flags_t flags,
status_t *status);
AudioStreamOut* getOutput() const;
AudioStreamOut* clearOutput();
- virtual audio_stream_t* stream();
+ virtual audio_stream_t* stream() const;
void suspend() { mSuspended++; }
void restore() { if (mSuspended > 0) mSuspended--; }
@@ -883,7 +947,9 @@
virtual uint32_t hasAudioSession(int sessionId);
virtual uint32_t getStrategyForSession_l(int sessionId);
- void setStreamValid(audio_stream_type_t streamType, bool valid);
+
+ virtual status_t setSyncEvent(const sp<SyncEvent>& event);
+ virtual bool isValidSyncEvent(const sp<SyncEvent>& event);
protected:
int16_t* mMixBuffer;
@@ -901,9 +967,13 @@
// Allocate a track name. Returns name >= 0 if successful, -1 on failure.
virtual int getTrackName_l() = 0;
virtual void deleteTrackName_l(int name) = 0;
- virtual uint32_t activeSleepTimeUs();
- virtual uint32_t idleSleepTimeUs() = 0;
- virtual uint32_t suspendSleepTimeUs() = 0;
+
+ // Time to sleep between cycles when:
+ virtual uint32_t activeSleepTimeUs() const; // mixer state MIXER_TRACKS_ENABLED
+ virtual uint32_t idleSleepTimeUs() const = 0; // mixer state MIXER_IDLE
+ virtual uint32_t suspendSleepTimeUs() const = 0; // audio policy manager suspended us
+ // No sleep when mixer state == MIXER_TRACKS_READY; relies on audio HAL stream->write()
+ // No sleep in standby mode; waits on a condition
// Code snippets that are temporarily lifted up out of threadLoop() until the merge
void checkSilentMode_l();
@@ -989,8 +1059,8 @@
virtual mixer_state prepareTracks_l(Vector< sp<Track> > *tracksToRemove);
virtual int getTrackName_l();
virtual void deleteTrackName_l(int name);
- virtual uint32_t idleSleepTimeUs();
- virtual uint32_t suspendSleepTimeUs();
+ virtual uint32_t idleSleepTimeUs() const;
+ virtual uint32_t suspendSleepTimeUs() const;
virtual void cacheParameters_l();
// threadLoop snippets
@@ -1014,9 +1084,9 @@
protected:
virtual int getTrackName_l();
virtual void deleteTrackName_l(int name);
- virtual uint32_t activeSleepTimeUs();
- virtual uint32_t idleSleepTimeUs();
- virtual uint32_t suspendSleepTimeUs();
+ virtual uint32_t activeSleepTimeUs() const;
+ virtual uint32_t idleSleepTimeUs() const;
+ virtual uint32_t suspendSleepTimeUs() const;
virtual void cacheParameters_l();
// threadLoop snippets
@@ -1051,9 +1121,9 @@
// Thread virtuals
void addOutputTrack(MixerThread* thread);
void removeOutputTrack(MixerThread* thread);
- uint32_t waitTimeMs() { return mWaitTimeMs; }
+ uint32_t waitTimeMs() const { return mWaitTimeMs; }
protected:
- virtual uint32_t activeSleepTimeUs();
+ virtual uint32_t activeSleepTimeUs() const;
private:
bool outputsReady(const SortedVector< sp<OutputTrack> > &outputTracks);
@@ -1145,7 +1215,9 @@
int sessionId);
virtual ~RecordTrack();
- virtual status_t start(pid_t tid);
+ virtual status_t start(pid_t tid,
+ AudioSystem::sync_event_t event = AudioSystem::SYNC_EVENT_NONE,
+ int triggerSession = 0);
virtual void stop();
bool overflow() { bool tmp = mOverflow; mOverflow = false; return tmp; }
@@ -1192,13 +1264,14 @@
int sessionId,
status_t *status);
- status_t start(RecordTrack* recordTrack);
- status_t start(RecordTrack* recordTrack, pid_t tid);
+ status_t start(RecordTrack* recordTrack, pid_t tid,
+ AudioSystem::sync_event_t event,
+ int triggerSession);
void stop(RecordTrack* recordTrack);
status_t dump(int fd, const Vector<String16>& args);
AudioStreamIn* getInput() const;
AudioStreamIn* clearInput();
- virtual audio_stream_t* stream();
+ virtual audio_stream_t* stream() const;
// AudioBufferProvider interface
virtual status_t getNextBuffer(AudioBufferProvider::Buffer* buffer, int64_t pts);
@@ -1215,7 +1288,15 @@
virtual uint32_t hasAudioSession(int sessionId);
RecordTrack* track();
+ virtual status_t setSyncEvent(const sp<SyncEvent>& event);
+ virtual bool isValidSyncEvent(const sp<SyncEvent>& event);
+
+ static void syncStartEventCallback(const wp<SyncEvent>& event);
+ void handleSyncStartEvent(const sp<SyncEvent>& event);
+
private:
+ void clearSyncStartEvent();
+
RecordThread();
AudioStreamIn *mInput;
RecordTrack* mTrack;
@@ -1229,6 +1310,11 @@
const int mReqChannelCount;
const uint32_t mReqSampleRate;
ssize_t mBytesRead;
+ // sync event triggering actual audio capture. Frames read before this event will
+ // be dropped and therefore not read by the application.
+ sp<SyncEvent> mSyncStartEvent;
+ // number of captured frames to drop after the start sync event has been received.
+ ssize_t mFramestoDrop;
};
// server side of the client's IAudioRecord
@@ -1237,7 +1323,7 @@
RecordHandle(const sp<RecordThread::RecordTrack>& recordTrack);
virtual ~RecordHandle();
virtual sp<IMemory> getCblk() const;
- virtual status_t start(pid_t tid);
+ virtual status_t start(pid_t tid, int event, int triggerSession);
virtual void stop();
virtual status_t onTransact(
uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags);
@@ -1617,15 +1703,30 @@
MVS_FULL,
};
+ class AudioHwDevice {
+ public:
+ AudioHwDevice(const char *moduleName, audio_hw_device_t *hwDevice) :
+ mModuleName(strdup(moduleName)), mHwDevice(hwDevice){}
+ ~AudioHwDevice() { free((void *)mModuleName); }
+
+ const char *moduleName() const { return mModuleName; }
+ audio_hw_device_t *hwDevice() const { return mHwDevice; }
+ private:
+ const char * const mModuleName;
+ audio_hw_device_t * const mHwDevice;
+ };
+
mutable Mutex mLock;
DefaultKeyedVector< pid_t, wp<Client> > mClients; // see ~Client()
mutable Mutex mHardwareLock;
+ // NOTE: If both mLock and mHardwareLock mutexes must be held,
+ // always take mLock before mHardwareLock
// These two fields are immutable after onFirstRef(), so no lock needed to access
audio_hw_device_t* mPrimaryHardwareDev; // mAudioHwDevs[0] or NULL
- Vector<audio_hw_device_t*> mAudioHwDevs;
+ DefaultKeyedVector<audio_module_handle_t, AudioHwDevice*> mAudioHwDevs;
// for dump, indicates which hardware operation is currently in progress (but not stream ops)
enum hardware_call_state {
@@ -1675,6 +1776,10 @@
float masterVolume_l() const;
float masterVolumeSW_l() const { return mMasterVolumeSW; }
bool masterMute_l() const { return mMasterMute; }
+ audio_module_handle_t loadHwModule_l(const char *name);
+
+ Vector < sp<SyncEvent> > mPendingSyncEvents; // sync events awaiting for a session
+ // to be created
private:
sp<Client> registerPid_l(pid_t pid); // always returns non-0
diff --git a/services/audioflinger/AudioMixer.cpp b/services/audioflinger/AudioMixer.cpp
index 3f4c19a..0e6ea12 100644
--- a/services/audioflinger/AudioMixer.cpp
+++ b/services/audioflinger/AudioMixer.cpp
@@ -66,32 +66,7 @@
// and mTrackNames is initially 0. However, leave it here until that's verified.
track_t* t = mState.tracks;
for (unsigned i=0 ; i < MAX_NUM_TRACKS ; i++) {
- t->needs = 0;
- t->volume[0] = UNITY_GAIN;
- t->volume[1] = UNITY_GAIN;
- // no initialization needed
- // t->prevVolume[0]
- // t->prevVolume[1]
- t->volumeInc[0] = 0;
- t->volumeInc[1] = 0;
- t->auxLevel = 0;
- t->auxInc = 0;
- // no initialization needed
- // t->prevAuxLevel
- // t->frameCount
- t->channelCount = 2;
- t->enabled = false;
- t->format = 16;
- t->channelMask = AUDIO_CHANNEL_OUT_STEREO;
- t->bufferProvider = NULL;
- t->buffer.raw = NULL;
- // t->buffer.frameCount
- t->hook = NULL;
- t->in = NULL;
- t->resampler = NULL;
- t->sampleRate = mSampleRate;
- t->mainBuffer = NULL;
- t->auxBuffer = NULL;
+ // FIXME redundant per track
t->localTimeFreq = lc.getLocalFreq();
t++;
}
@@ -115,6 +90,38 @@
int n = __builtin_ctz(names);
ALOGV("add track (%d)", n);
mTrackNames |= 1 << n;
+ // assume default parameters for the track, except where noted below
+ track_t* t = &mState.tracks[n];
+ t->needs = 0;
+ t->volume[0] = UNITY_GAIN;
+ t->volume[1] = UNITY_GAIN;
+ // no initialization needed
+ // t->prevVolume[0]
+ // t->prevVolume[1]
+ t->volumeInc[0] = 0;
+ t->volumeInc[1] = 0;
+ t->auxLevel = 0;
+ t->auxInc = 0;
+ // no initialization needed
+ // t->prevAuxLevel
+ // t->frameCount
+ t->channelCount = 2;
+ t->enabled = false;
+ t->format = 16;
+ t->channelMask = AUDIO_CHANNEL_OUT_STEREO;
+ // setBufferProvider(name, AudioBufferProvider *) is required before enable(name)
+ t->bufferProvider = NULL;
+ t->buffer.raw = NULL;
+ // no initialization needed
+ // t->buffer.frameCount
+ t->hook = NULL;
+ t->in = NULL;
+ t->resampler = NULL;
+ t->sampleRate = mSampleRate;
+ // setParameter(name, TRACK, MAIN_BUFFER, mixBuffer) is required before enable(name)
+ t->mainBuffer = NULL;
+ t->auxBuffer = NULL;
+ // see t->localTimeFreq in constructor above
return TRACK0 + n;
}
return -1;
@@ -215,6 +222,9 @@
invalidateState(1 << name);
}
break;
+ case FORMAT:
+ ALOG_ASSERT(valueInt == AUDIO_FORMAT_PCM_16_BIT);
+ break;
default:
LOG_FATAL("bad param");
}
diff --git a/services/audioflinger/AudioPolicyService.cpp b/services/audioflinger/AudioPolicyService.cpp
index c23eb04..15f4349 100644
--- a/services/audioflinger/AudioPolicyService.cpp
+++ b/services/audioflinger/AudioPolicyService.cpp
@@ -31,7 +31,6 @@
#include <utils/threads.h>
#include "AudioPolicyService.h"
#include "ServiceUtilities.h"
-#include <cutils/properties.h>
#include <hardware_legacy/power.h>
#include <media/AudioEffect.h>
#include <media/EffectsFactoryApi.h>
@@ -1330,13 +1329,9 @@
/* implementation of the interface to the policy manager */
extern "C" {
-static audio_io_handle_t aps_open_output(void *service,
- uint32_t *pDevices,
- uint32_t *pSamplingRate,
- audio_format_t *pFormat,
- uint32_t *pChannels,
- uint32_t *pLatencyMs,
- audio_policy_output_flags_t flags)
+
+static audio_module_handle_t aps_load_hw_module(void *service,
+ const char *name)
{
sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
if (af == 0) {
@@ -1344,7 +1339,44 @@
return 0;
}
- return af->openOutput(pDevices, pSamplingRate, pFormat, pChannels,
+ return af->loadHwModule(name);
+}
+
+// deprecated: replaced by aps_open_output_on_module()
+static audio_io_handle_t aps_open_output(void *service,
+ audio_devices_t *pDevices,
+ uint32_t *pSamplingRate,
+ audio_format_t *pFormat,
+ audio_channel_mask_t *pChannelMask,
+ uint32_t *pLatencyMs,
+ audio_policy_output_flags_t flags)
+{
+ sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
+ if (af == 0) {
+ ALOGW("%s: could not get AudioFlinger", __func__);
+ return 0;
+ }
+
+ return af->openOutput((audio_module_handle_t)0, pDevices, pSamplingRate, pFormat, pChannelMask,
+ pLatencyMs, flags);
+}
+
+static audio_io_handle_t aps_open_output_on_module(void *service,
+ audio_module_handle_t module,
+ audio_devices_t *pDevices,
+ uint32_t *pSamplingRate,
+ audio_format_t *pFormat,
+ audio_channel_mask_t *pChannelMask,
+ uint32_t *pLatencyMs,
+ audio_policy_output_flags_t flags)
+{
+ sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
+ if (af == 0) {
+ ALOGW("%s: could not get AudioFlinger", __func__);
+ return 0;
+ }
+ ALOGW("%s: %d", __func__, module);
+ return af->openOutput(module, pDevices, pSamplingRate, pFormat, pChannelMask,
pLatencyMs, flags);
}
@@ -1391,12 +1423,13 @@
return af->restoreOutput(output);
}
+// deprecated: replaced by aps_open_input_on_module()
static audio_io_handle_t aps_open_input(void *service,
- uint32_t *pDevices,
- uint32_t *pSamplingRate,
- audio_format_t *pFormat,
- uint32_t *pChannels,
- audio_in_acoustics_t acoustics)
+ audio_devices_t *pDevices,
+ uint32_t *pSamplingRate,
+ audio_format_t *pFormat,
+ audio_channel_mask_t *pChannelMask,
+ audio_in_acoustics_t acoustics)
{
sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
if (af == 0) {
@@ -1404,8 +1437,23 @@
return 0;
}
- return af->openInput(pDevices, pSamplingRate, pFormat, pChannels,
- acoustics);
+ return af->openInput((audio_module_handle_t)0, pDevices, pSamplingRate, pFormat, pChannelMask);
+}
+
+static audio_io_handle_t aps_open_input_on_module(void *service,
+ audio_module_handle_t module,
+ audio_devices_t *pDevices,
+ uint32_t *pSamplingRate,
+ audio_format_t *pFormat,
+ audio_channel_mask_t *pChannelMask)
+{
+ sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
+ if (af == 0) {
+ ALOGW("%s: could not get AudioFlinger", __func__);
+ return 0;
+ }
+
+ return af->openInput(module, pDevices, pSamplingRate, pFormat, pChannelMask);
}
static int aps_close_input(void *service, audio_io_handle_t input)
@@ -1504,6 +1552,9 @@
stop_tone : aps_stop_tone,
set_voice_volume : aps_set_voice_volume,
move_effects : aps_move_effects,
+ load_hw_module : aps_load_hw_module,
+ open_output_on_module : aps_open_output_on_module,
+ open_input_on_module : aps_open_input_on_module,
};
}; // namespace <unnamed>