blob: 4be25c8cf7941503f9795dcf8123be4227dedba8 [file] [log] [blame]
Phil Burk39f02dd2017-08-04 09:13:31 -07001/*
2 * Copyright (C) 2017 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#define LOG_TAG "AAudioServiceEndpointMMAP"
18//#define LOG_NDEBUG 0
19#include <utils/Log.h>
20
21#include <algorithm>
22#include <assert.h>
23#include <map>
24#include <mutex>
25#include <sstream>
26#include <utils/Singleton.h>
27#include <vector>
28
29
30#include "AAudioEndpointManager.h"
31#include "AAudioServiceEndpoint.h"
32
33#include "core/AudioStreamBuilder.h"
34#include "AAudioServiceEndpoint.h"
35#include "AAudioServiceStreamShared.h"
36#include "AAudioServiceEndpointPlay.h"
37#include "AAudioServiceEndpointMMAP.h"
38
39
40#define AAUDIO_BUFFER_CAPACITY_MIN 4 * 512
41#define AAUDIO_SAMPLE_RATE_DEFAULT 48000
42
43// This is an estimate of the time difference between the HW and the MMAP time.
44// TODO Get presentation timestamps from the HAL instead of using these estimates.
45#define OUTPUT_ESTIMATED_HARDWARE_OFFSET_NANOS (3 * AAUDIO_NANOS_PER_MILLISECOND)
46#define INPUT_ESTIMATED_HARDWARE_OFFSET_NANOS (-1 * AAUDIO_NANOS_PER_MILLISECOND)
47
48using namespace android; // TODO just import names needed
49using namespace aaudio; // TODO just import names needed
50
51AAudioServiceEndpointMMAP::AAudioServiceEndpointMMAP()
52 : mMmapStream(nullptr) {}
53
54AAudioServiceEndpointMMAP::~AAudioServiceEndpointMMAP() {}
55
56std::string AAudioServiceEndpointMMAP::dump() const {
57 std::stringstream result;
58
59 result << " MMAP: framesTransferred = " << mFramesTransferred.get();
60 result << ", HW nanos = " << mHardwareTimeOffsetNanos;
61 result << ", port handle = " << mPortHandle;
62 result << ", audio data FD = " << mAudioDataFileDescriptor;
63 result << "\n";
64
65 result << " HW Offset Micros: " <<
66 (getHardwareTimeOffsetNanos()
67 / AAUDIO_NANOS_PER_MICROSECOND) << "\n";
68
69 result << AAudioServiceEndpoint::dump();
70 return result.str();
71}
72
73aaudio_result_t AAudioServiceEndpointMMAP::open(const aaudio::AAudioStreamRequest &request) {
74 aaudio_result_t result = AAUDIO_OK;
75 const audio_attributes_t attributes = {
76 .content_type = AUDIO_CONTENT_TYPE_MUSIC,
77 .usage = AUDIO_USAGE_MEDIA,
78 .source = AUDIO_SOURCE_VOICE_RECOGNITION,
79 .flags = AUDIO_FLAG_LOW_LATENCY,
80 .tags = ""
81 };
82 audio_config_base_t config;
83 audio_port_handle_t deviceId;
84
85 int32_t burstMinMicros = AAudioProperty_getHardwareBurstMinMicros();
86 int32_t burstMicros = 0;
87
88 copyFrom(request.getConstantConfiguration());
89
90 mMmapClient.clientUid = request.getUserId();
91 mMmapClient.clientPid = request.getProcessId();
92 mMmapClient.packageName.setTo(String16(""));
93
94 mRequestedDeviceId = deviceId = getDeviceId();
95
96 // Fill in config
97 aaudio_format_t aaudioFormat = getFormat();
98 if (aaudioFormat == AAUDIO_UNSPECIFIED || aaudioFormat == AAUDIO_FORMAT_PCM_FLOAT) {
99 aaudioFormat = AAUDIO_FORMAT_PCM_I16;
100 }
101 config.format = AAudioConvert_aaudioToAndroidDataFormat(aaudioFormat);
102
103 int32_t aaudioSampleRate = getSampleRate();
104 if (aaudioSampleRate == AAUDIO_UNSPECIFIED) {
105 aaudioSampleRate = AAUDIO_SAMPLE_RATE_DEFAULT;
106 }
107 config.sample_rate = aaudioSampleRate;
108
109 int32_t aaudioSamplesPerFrame = getSamplesPerFrame();
110
111 aaudio_direction_t direction = getDirection();
112 if (direction == AAUDIO_DIRECTION_OUTPUT) {
113 config.channel_mask = (aaudioSamplesPerFrame == AAUDIO_UNSPECIFIED)
114 ? AUDIO_CHANNEL_OUT_STEREO
115 : audio_channel_out_mask_from_count(aaudioSamplesPerFrame);
116 mHardwareTimeOffsetNanos = OUTPUT_ESTIMATED_HARDWARE_OFFSET_NANOS; // frames at DAC later
117
118 } else if (direction == AAUDIO_DIRECTION_INPUT) {
119 config.channel_mask = (aaudioSamplesPerFrame == AAUDIO_UNSPECIFIED)
120 ? AUDIO_CHANNEL_IN_STEREO
121 : audio_channel_in_mask_from_count(aaudioSamplesPerFrame);
122 mHardwareTimeOffsetNanos = INPUT_ESTIMATED_HARDWARE_OFFSET_NANOS; // frames at ADC earlier
123
124 } else {
125 ALOGE("openMmapStream - invalid direction = %d", direction);
126 return AAUDIO_ERROR_ILLEGAL_ARGUMENT;
127 }
128
129 MmapStreamInterface::stream_direction_t streamDirection =
130 (direction == AAUDIO_DIRECTION_OUTPUT)
131 ? MmapStreamInterface::DIRECTION_OUTPUT
132 : MmapStreamInterface::DIRECTION_INPUT;
133
134 // Open HAL stream. Set mMmapStream
135 status_t status = MmapStreamInterface::openMmapStream(streamDirection,
136 &attributes,
137 &config,
138 mMmapClient,
139 &deviceId,
140 this, // callback
141 mMmapStream,
142 &mPortHandle);
143 ALOGD("AAudioServiceEndpointMMAP::open() mMapClient.uid = %d, pid = %d => portHandle = %d\n",
144 mMmapClient.clientUid, mMmapClient.clientPid, mPortHandle);
145 if (status != OK) {
146 ALOGE("openMmapStream returned status %d", status);
147 return AAUDIO_ERROR_UNAVAILABLE;
148 }
149
150 if (deviceId == AAUDIO_UNSPECIFIED) {
151 ALOGW("AAudioServiceEndpointMMAP::open() - openMmapStream() failed to set deviceId");
152 }
153 setDeviceId(deviceId);
154
155 // Create MMAP/NOIRQ buffer.
156 int32_t minSizeFrames = getBufferCapacity();
157 if (minSizeFrames <= 0) { // zero will get rejected
158 minSizeFrames = AAUDIO_BUFFER_CAPACITY_MIN;
159 }
160 status = mMmapStream->createMmapBuffer(minSizeFrames, &mMmapBufferinfo);
161 if (status != OK) {
162 ALOGE("AAudioServiceEndpointMMAP::open() - createMmapBuffer() failed with status %d %s",
163 status, strerror(-status));
164 result = AAUDIO_ERROR_UNAVAILABLE;
165 goto error;
166 } else {
167 ALOGD("createMmapBuffer status = %d, buffer_size = %d, burst_size %d"
168 ", Sharable FD: %s",
169 status,
170 abs(mMmapBufferinfo.buffer_size_frames),
171 mMmapBufferinfo.burst_size_frames,
172 mMmapBufferinfo.buffer_size_frames < 0 ? "Yes" : "No");
173 }
174
175 setBufferCapacity(mMmapBufferinfo.buffer_size_frames);
176 // The audio HAL indicates if the shared memory fd can be shared outside of audioserver
177 // by returning a negative buffer size
178 if (getBufferCapacity() < 0) {
179 // Exclusive mode can be used by client or service.
180 setBufferCapacity(-getBufferCapacity());
181 } else {
182 // Exclusive mode can only be used by the service because the FD cannot be shared.
183 uid_t audioServiceUid = getuid();
184 if ((mMmapClient.clientUid != audioServiceUid) &&
185 getSharingMode() == AAUDIO_SHARING_MODE_EXCLUSIVE) {
186 // Fallback is handled by caller but indicate what is possible in case
187 // this is used in the future
188 setSharingMode(AAUDIO_SHARING_MODE_SHARED);
189 ALOGW("AAudioServiceEndpointMMAP::open() - exclusive FD cannot be used by client");
190 result = AAUDIO_ERROR_UNAVAILABLE;
191 goto error;
192 }
193 }
194
195 // Get information about the stream and pass it back to the caller.
196 setSamplesPerFrame((direction == AAUDIO_DIRECTION_OUTPUT)
197 ? audio_channel_count_from_out_mask(config.channel_mask)
198 : audio_channel_count_from_in_mask(config.channel_mask));
199
200 // AAudio creates a copy of this FD and retains ownership of the copy.
201 // Assume that AudioFlinger will close the original shared_memory_fd.
202 mAudioDataFileDescriptor.reset(dup(mMmapBufferinfo.shared_memory_fd));
203 if (mAudioDataFileDescriptor.get() == -1) {
204 ALOGE("AAudioServiceEndpointMMAP::open() - could not dup shared_memory_fd");
205 result = AAUDIO_ERROR_INTERNAL;
206 goto error;
207 }
208 mFramesPerBurst = mMmapBufferinfo.burst_size_frames;
209 setFormat(AAudioConvert_androidToAAudioDataFormat(config.format));
210 setSampleRate(config.sample_rate);
211
212 // Scale up the burst size to meet the minimum equivalent in microseconds.
213 // This is to avoid waking the CPU too often when the HW burst is very small
214 // or at high sample rates.
215 do {
216 if (burstMicros > 0) { // skip first loop
217 mFramesPerBurst *= 2;
218 }
219 burstMicros = mFramesPerBurst * static_cast<int64_t>(1000000) / getSampleRate();
220 } while (burstMicros < burstMinMicros);
221
222 ALOGD("AAudioServiceEndpointMMAP::open() original burst = %d, minMicros = %d, to burst = %d\n",
223 mMmapBufferinfo.burst_size_frames, burstMinMicros, mFramesPerBurst);
224
225 ALOGD("AAudioServiceEndpointMMAP::open() actual rate = %d, channels = %d"
226 ", deviceId = %d, capacity = %d\n",
227 getSampleRate(), getSamplesPerFrame(), deviceId, getBufferCapacity());
228
229 return result;
230
231error:
232 close();
233 return result;
234}
235
236aaudio_result_t AAudioServiceEndpointMMAP::close() {
237
238 if (mMmapStream != 0) {
239 ALOGD("AAudioServiceEndpointMMAP::close() clear() endpoint");
240 // Needs to be explicitly cleared or CTS will fail but it is not clear why.
241 mMmapStream.clear();
242 // Apparently the above close is asynchronous. An attempt to open a new device
243 // right after a close can fail. Also some callbacks may still be in flight!
244 // FIXME Make closing synchronous.
245 AudioClock::sleepForNanos(100 * AAUDIO_NANOS_PER_MILLISECOND);
246 }
247
248 return AAUDIO_OK;
249}
250
251aaudio_result_t AAudioServiceEndpointMMAP::startStream(sp<AAudioServiceStreamBase> stream,
252 audio_port_handle_t *clientHandle) {
Phil Burkbcc36742017-08-31 17:24:51 -0700253 // Start the client on behalf of the AAudio service.
254 // Use the port handle that was provided by openMmapStream().
Phil Burk39f02dd2017-08-04 09:13:31 -0700255 return startClient(mMmapClient, &mPortHandle);
256}
257
258aaudio_result_t AAudioServiceEndpointMMAP::stopStream(sp<AAudioServiceStreamBase> stream,
259 audio_port_handle_t clientHandle) {
260 mFramesTransferred.reset32();
261 return stopClient(mPortHandle);
262}
263
264aaudio_result_t AAudioServiceEndpointMMAP::startClient(const android::AudioClient& client,
265 audio_port_handle_t *clientHandle) {
266 if (mMmapStream == nullptr) return AAUDIO_ERROR_NULL;
Phil Burkbcc36742017-08-31 17:24:51 -0700267 ALOGD("AAudioServiceEndpointMMAP::startClient(%p(uid=%d, pid=%d))",
268 &client, client.clientUid, client.clientPid);
Phil Burk39f02dd2017-08-04 09:13:31 -0700269 audio_port_handle_t originalHandle = *clientHandle;
Phil Burkbcc36742017-08-31 17:24:51 -0700270 status_t status = mMmapStream->start(client, clientHandle);
271 aaudio_result_t result = AAudioConvert_androidToAAudioResult(status);
272 ALOGD("AAudioServiceEndpointMMAP::startClient() , %d => %d returns %d",
Phil Burk39f02dd2017-08-04 09:13:31 -0700273 originalHandle, *clientHandle, result);
274 return result;
275}
276
277aaudio_result_t AAudioServiceEndpointMMAP::stopClient(audio_port_handle_t clientHandle) {
278 if (mMmapStream == nullptr) return AAUDIO_ERROR_NULL;
279 aaudio_result_t result = AAudioConvert_androidToAAudioResult(mMmapStream->stop(clientHandle));
280 ALOGD("AAudioServiceEndpointMMAP::stopClient(%d) returns %d", clientHandle, result);
281 return result;
282}
283
284// Get free-running DSP or DMA hardware position from the HAL.
285aaudio_result_t AAudioServiceEndpointMMAP::getFreeRunningPosition(int64_t *positionFrames,
286 int64_t *timeNanos) {
287 struct audio_mmap_position position;
288 if (mMmapStream == nullptr) {
289 return AAUDIO_ERROR_NULL;
290 }
291 status_t status = mMmapStream->getMmapPosition(&position);
292 ALOGV("AAudioServiceEndpointMMAP::getFreeRunningPosition() status= %d, pos = %d, nanos = %lld\n",
293 status, position.position_frames, (long long) position.time_nanoseconds);
294 aaudio_result_t result = AAudioConvert_androidToAAudioResult(status);
295 if (result == AAUDIO_ERROR_UNAVAILABLE) {
296 ALOGW("sendCurrentTimestamp(): getMmapPosition() has no position data available");
297 } else if (result != AAUDIO_OK) {
298 ALOGE("sendCurrentTimestamp(): getMmapPosition() returned status %d", status);
299 } else {
300 // Convert 32-bit position to 64-bit position.
301 mFramesTransferred.update32(position.position_frames);
302 *positionFrames = mFramesTransferred.get();
303 *timeNanos = position.time_nanoseconds;
304 }
305 return result;
306}
307
308aaudio_result_t AAudioServiceEndpointMMAP::getTimestamp(int64_t *positionFrames,
309 int64_t *timeNanos) {
310 return 0; // TODO
311}
312
313
314void AAudioServiceEndpointMMAP::onTearDown() {
315 ALOGD("AAudioServiceEndpointMMAP::onTearDown() called");
316 disconnectRegisteredStreams();
317};
318
319void AAudioServiceEndpointMMAP::onVolumeChanged(audio_channel_mask_t channels,
320 android::Vector<float> values) {
321 // TODO do we really need a different volume for each channel?
322 float volume = values[0];
323 ALOGD("AAudioServiceEndpointMMAP::onVolumeChanged() volume[0] = %f", volume);
324 std::lock_guard<std::mutex> lock(mLockStreams);
325 for(const auto stream : mRegisteredStreams) {
326 stream->onVolumeChanged(volume);
327 }
328};
329
330void AAudioServiceEndpointMMAP::onRoutingChanged(audio_port_handle_t deviceId) {
331 ALOGD("AAudioServiceEndpointMMAP::onRoutingChanged() called with %d, old = %d",
332 deviceId, getDeviceId());
333 if (getDeviceId() != AUDIO_PORT_HANDLE_NONE && getDeviceId() != deviceId) {
334 disconnectRegisteredStreams();
335 }
336 setDeviceId(deviceId);
337};
338
339/**
340 * Get an immutable description of the data queue from the HAL.
341 */
342aaudio_result_t AAudioServiceEndpointMMAP::getDownDataDescription(AudioEndpointParcelable &parcelable)
343{
344 // Gather information on the data queue based on HAL info.
345 int32_t bytesPerFrame = calculateBytesPerFrame();
346 int32_t capacityInBytes = getBufferCapacity() * bytesPerFrame;
347 int fdIndex = parcelable.addFileDescriptor(mAudioDataFileDescriptor, capacityInBytes);
348 parcelable.mDownDataQueueParcelable.setupMemory(fdIndex, 0, capacityInBytes);
349 parcelable.mDownDataQueueParcelable.setBytesPerFrame(bytesPerFrame);
350 parcelable.mDownDataQueueParcelable.setFramesPerBurst(mFramesPerBurst);
351 parcelable.mDownDataQueueParcelable.setCapacityInFrames(getBufferCapacity());
352 return AAUDIO_OK;
353}