blob: db01c8861e02705e31b9d62fbb9d01a0477f1a2f [file] [log] [blame]
Phil Burk39f02dd2017-08-04 09:13:31 -07001/*
2 * Copyright (C) 2017 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#define LOG_TAG "AAudioServiceEndpointMMAP"
18//#define LOG_NDEBUG 0
19#include <utils/Log.h>
20
21#include <algorithm>
22#include <assert.h>
23#include <map>
24#include <mutex>
25#include <sstream>
26#include <utils/Singleton.h>
27#include <vector>
28
29
30#include "AAudioEndpointManager.h"
31#include "AAudioServiceEndpoint.h"
32
33#include "core/AudioStreamBuilder.h"
34#include "AAudioServiceEndpoint.h"
35#include "AAudioServiceStreamShared.h"
36#include "AAudioServiceEndpointPlay.h"
37#include "AAudioServiceEndpointMMAP.h"
38
39
40#define AAUDIO_BUFFER_CAPACITY_MIN 4 * 512
41#define AAUDIO_SAMPLE_RATE_DEFAULT 48000
42
43// This is an estimate of the time difference between the HW and the MMAP time.
44// TODO Get presentation timestamps from the HAL instead of using these estimates.
45#define OUTPUT_ESTIMATED_HARDWARE_OFFSET_NANOS (3 * AAUDIO_NANOS_PER_MILLISECOND)
46#define INPUT_ESTIMATED_HARDWARE_OFFSET_NANOS (-1 * AAUDIO_NANOS_PER_MILLISECOND)
47
48using namespace android; // TODO just import names needed
49using namespace aaudio; // TODO just import names needed
50
51AAudioServiceEndpointMMAP::AAudioServiceEndpointMMAP()
52 : mMmapStream(nullptr) {}
53
54AAudioServiceEndpointMMAP::~AAudioServiceEndpointMMAP() {}
55
56std::string AAudioServiceEndpointMMAP::dump() const {
57 std::stringstream result;
58
59 result << " MMAP: framesTransferred = " << mFramesTransferred.get();
60 result << ", HW nanos = " << mHardwareTimeOffsetNanos;
61 result << ", port handle = " << mPortHandle;
62 result << ", audio data FD = " << mAudioDataFileDescriptor;
63 result << "\n";
64
65 result << " HW Offset Micros: " <<
66 (getHardwareTimeOffsetNanos()
67 / AAUDIO_NANOS_PER_MICROSECOND) << "\n";
68
69 result << AAudioServiceEndpoint::dump();
70 return result.str();
71}
72
73aaudio_result_t AAudioServiceEndpointMMAP::open(const aaudio::AAudioStreamRequest &request) {
74 aaudio_result_t result = AAUDIO_OK;
Phil Burk39f02dd2017-08-04 09:13:31 -070075 audio_config_base_t config;
76 audio_port_handle_t deviceId;
77
78 int32_t burstMinMicros = AAudioProperty_getHardwareBurstMinMicros();
79 int32_t burstMicros = 0;
80
81 copyFrom(request.getConstantConfiguration());
82
Phil Burkd4ccc622017-12-20 15:32:44 -080083 aaudio_direction_t direction = getDirection();
84
85 const audio_content_type_t contentType =
86 AAudioConvert_contentTypeToInternal(getContentType());
87 const audio_usage_t usage = (direction == AAUDIO_DIRECTION_OUTPUT)
88 ? AAudioConvert_usageToInternal(getUsage())
89 : AUDIO_USAGE_UNKNOWN;
90 const audio_source_t source = (direction == AAUDIO_DIRECTION_INPUT)
91 ? AAudioConvert_inputPresetToAudioSource(getInputPreset())
92 : AUDIO_SOURCE_DEFAULT;
93
94 const audio_attributes_t attributes = {
95 .content_type = contentType,
96 .usage = usage,
97 .source = source,
98 .flags = AUDIO_FLAG_LOW_LATENCY,
99 .tags = ""
100 };
Phil Burka62fb952018-01-16 12:44:06 -0800101 ALOGV("open() MMAP attributes.usage = %d, content_type = %d, source = %d",
102 attributes.usage, attributes.content_type, attributes.source);
103
Phil Burk39f02dd2017-08-04 09:13:31 -0700104 mMmapClient.clientUid = request.getUserId();
105 mMmapClient.clientPid = request.getProcessId();
106 mMmapClient.packageName.setTo(String16(""));
107
108 mRequestedDeviceId = deviceId = getDeviceId();
109
110 // Fill in config
111 aaudio_format_t aaudioFormat = getFormat();
112 if (aaudioFormat == AAUDIO_UNSPECIFIED || aaudioFormat == AAUDIO_FORMAT_PCM_FLOAT) {
113 aaudioFormat = AAUDIO_FORMAT_PCM_I16;
114 }
115 config.format = AAudioConvert_aaudioToAndroidDataFormat(aaudioFormat);
116
117 int32_t aaudioSampleRate = getSampleRate();
118 if (aaudioSampleRate == AAUDIO_UNSPECIFIED) {
119 aaudioSampleRate = AAUDIO_SAMPLE_RATE_DEFAULT;
120 }
121 config.sample_rate = aaudioSampleRate;
122
123 int32_t aaudioSamplesPerFrame = getSamplesPerFrame();
124
Phil Burk39f02dd2017-08-04 09:13:31 -0700125 if (direction == AAUDIO_DIRECTION_OUTPUT) {
126 config.channel_mask = (aaudioSamplesPerFrame == AAUDIO_UNSPECIFIED)
127 ? AUDIO_CHANNEL_OUT_STEREO
128 : audio_channel_out_mask_from_count(aaudioSamplesPerFrame);
129 mHardwareTimeOffsetNanos = OUTPUT_ESTIMATED_HARDWARE_OFFSET_NANOS; // frames at DAC later
130
131 } else if (direction == AAUDIO_DIRECTION_INPUT) {
132 config.channel_mask = (aaudioSamplesPerFrame == AAUDIO_UNSPECIFIED)
133 ? AUDIO_CHANNEL_IN_STEREO
134 : audio_channel_in_mask_from_count(aaudioSamplesPerFrame);
135 mHardwareTimeOffsetNanos = INPUT_ESTIMATED_HARDWARE_OFFSET_NANOS; // frames at ADC earlier
136
137 } else {
138 ALOGE("openMmapStream - invalid direction = %d", direction);
139 return AAUDIO_ERROR_ILLEGAL_ARGUMENT;
140 }
141
142 MmapStreamInterface::stream_direction_t streamDirection =
143 (direction == AAUDIO_DIRECTION_OUTPUT)
144 ? MmapStreamInterface::DIRECTION_OUTPUT
145 : MmapStreamInterface::DIRECTION_INPUT;
146
Phil Burk4e1af9f2018-01-03 15:54:35 -0800147 aaudio_session_id_t requestedSessionId = getSessionId();
148 audio_session_t sessionId = AAudioConvert_aaudioToAndroidSessionId(requestedSessionId);
149
Phil Burk39f02dd2017-08-04 09:13:31 -0700150 // Open HAL stream. Set mMmapStream
151 status_t status = MmapStreamInterface::openMmapStream(streamDirection,
152 &attributes,
153 &config,
154 mMmapClient,
155 &deviceId,
Phil Burk4e1af9f2018-01-03 15:54:35 -0800156 &sessionId,
Phil Burk39f02dd2017-08-04 09:13:31 -0700157 this, // callback
158 mMmapStream,
159 &mPortHandle);
Phil Burkfbf031e2017-10-12 15:58:31 -0700160 ALOGD("open() mMapClient.uid = %d, pid = %d => portHandle = %d\n",
Phil Burk39f02dd2017-08-04 09:13:31 -0700161 mMmapClient.clientUid, mMmapClient.clientPid, mPortHandle);
162 if (status != OK) {
163 ALOGE("openMmapStream returned status %d", status);
164 return AAUDIO_ERROR_UNAVAILABLE;
165 }
166
167 if (deviceId == AAUDIO_UNSPECIFIED) {
Phil Burkfbf031e2017-10-12 15:58:31 -0700168 ALOGW("open() - openMmapStream() failed to set deviceId");
Phil Burk39f02dd2017-08-04 09:13:31 -0700169 }
170 setDeviceId(deviceId);
171
Phil Burk4e1af9f2018-01-03 15:54:35 -0800172 if (sessionId == AUDIO_SESSION_ALLOCATE) {
173 ALOGW("open() - openMmapStream() failed to set sessionId");
174 }
175
176 aaudio_session_id_t actualSessionId =
177 (requestedSessionId == AAUDIO_SESSION_ID_NONE)
178 ? AAUDIO_SESSION_ID_NONE
179 : (aaudio_session_id_t) sessionId;
180 setSessionId(actualSessionId);
181 ALOGD("open() deviceId = %d, sessionId = %d", getDeviceId(), getSessionId());
182
Phil Burk39f02dd2017-08-04 09:13:31 -0700183 // Create MMAP/NOIRQ buffer.
184 int32_t minSizeFrames = getBufferCapacity();
185 if (minSizeFrames <= 0) { // zero will get rejected
186 minSizeFrames = AAUDIO_BUFFER_CAPACITY_MIN;
187 }
188 status = mMmapStream->createMmapBuffer(minSizeFrames, &mMmapBufferinfo);
189 if (status != OK) {
Phil Burkfbf031e2017-10-12 15:58:31 -0700190 ALOGE("open() - createMmapBuffer() failed with status %d %s",
Phil Burk39f02dd2017-08-04 09:13:31 -0700191 status, strerror(-status));
192 result = AAUDIO_ERROR_UNAVAILABLE;
193 goto error;
194 } else {
195 ALOGD("createMmapBuffer status = %d, buffer_size = %d, burst_size %d"
196 ", Sharable FD: %s",
197 status,
198 abs(mMmapBufferinfo.buffer_size_frames),
199 mMmapBufferinfo.burst_size_frames,
200 mMmapBufferinfo.buffer_size_frames < 0 ? "Yes" : "No");
201 }
202
203 setBufferCapacity(mMmapBufferinfo.buffer_size_frames);
204 // The audio HAL indicates if the shared memory fd can be shared outside of audioserver
205 // by returning a negative buffer size
206 if (getBufferCapacity() < 0) {
207 // Exclusive mode can be used by client or service.
208 setBufferCapacity(-getBufferCapacity());
209 } else {
210 // Exclusive mode can only be used by the service because the FD cannot be shared.
211 uid_t audioServiceUid = getuid();
212 if ((mMmapClient.clientUid != audioServiceUid) &&
213 getSharingMode() == AAUDIO_SHARING_MODE_EXCLUSIVE) {
214 // Fallback is handled by caller but indicate what is possible in case
215 // this is used in the future
216 setSharingMode(AAUDIO_SHARING_MODE_SHARED);
Phil Burkfbf031e2017-10-12 15:58:31 -0700217 ALOGW("open() - exclusive FD cannot be used by client");
Phil Burk39f02dd2017-08-04 09:13:31 -0700218 result = AAUDIO_ERROR_UNAVAILABLE;
219 goto error;
220 }
221 }
222
223 // Get information about the stream and pass it back to the caller.
224 setSamplesPerFrame((direction == AAUDIO_DIRECTION_OUTPUT)
225 ? audio_channel_count_from_out_mask(config.channel_mask)
226 : audio_channel_count_from_in_mask(config.channel_mask));
227
228 // AAudio creates a copy of this FD and retains ownership of the copy.
229 // Assume that AudioFlinger will close the original shared_memory_fd.
230 mAudioDataFileDescriptor.reset(dup(mMmapBufferinfo.shared_memory_fd));
231 if (mAudioDataFileDescriptor.get() == -1) {
Phil Burkfbf031e2017-10-12 15:58:31 -0700232 ALOGE("open() - could not dup shared_memory_fd");
Phil Burk39f02dd2017-08-04 09:13:31 -0700233 result = AAUDIO_ERROR_INTERNAL;
234 goto error;
235 }
236 mFramesPerBurst = mMmapBufferinfo.burst_size_frames;
237 setFormat(AAudioConvert_androidToAAudioDataFormat(config.format));
238 setSampleRate(config.sample_rate);
239
240 // Scale up the burst size to meet the minimum equivalent in microseconds.
241 // This is to avoid waking the CPU too often when the HW burst is very small
242 // or at high sample rates.
243 do {
244 if (burstMicros > 0) { // skip first loop
245 mFramesPerBurst *= 2;
246 }
247 burstMicros = mFramesPerBurst * static_cast<int64_t>(1000000) / getSampleRate();
248 } while (burstMicros < burstMinMicros);
249
Phil Burkfbf031e2017-10-12 15:58:31 -0700250 ALOGD("open() original burst = %d, minMicros = %d, to burst = %d\n",
Phil Burk39f02dd2017-08-04 09:13:31 -0700251 mMmapBufferinfo.burst_size_frames, burstMinMicros, mFramesPerBurst);
252
Phil Burkfbf031e2017-10-12 15:58:31 -0700253 ALOGD("open() actual rate = %d, channels = %d"
Phil Burk39f02dd2017-08-04 09:13:31 -0700254 ", deviceId = %d, capacity = %d\n",
255 getSampleRate(), getSamplesPerFrame(), deviceId, getBufferCapacity());
256
257 return result;
258
259error:
260 close();
261 return result;
262}
263
264aaudio_result_t AAudioServiceEndpointMMAP::close() {
265
266 if (mMmapStream != 0) {
Phil Burkfbf031e2017-10-12 15:58:31 -0700267 ALOGD("close() clear() endpoint");
Phil Burk39f02dd2017-08-04 09:13:31 -0700268 // Needs to be explicitly cleared or CTS will fail but it is not clear why.
269 mMmapStream.clear();
270 // Apparently the above close is asynchronous. An attempt to open a new device
271 // right after a close can fail. Also some callbacks may still be in flight!
272 // FIXME Make closing synchronous.
273 AudioClock::sleepForNanos(100 * AAUDIO_NANOS_PER_MILLISECOND);
274 }
275
276 return AAUDIO_OK;
277}
278
279aaudio_result_t AAudioServiceEndpointMMAP::startStream(sp<AAudioServiceStreamBase> stream,
280 audio_port_handle_t *clientHandle) {
Phil Burkbcc36742017-08-31 17:24:51 -0700281 // Start the client on behalf of the AAudio service.
282 // Use the port handle that was provided by openMmapStream().
Phil Burk39f02dd2017-08-04 09:13:31 -0700283 return startClient(mMmapClient, &mPortHandle);
284}
285
286aaudio_result_t AAudioServiceEndpointMMAP::stopStream(sp<AAudioServiceStreamBase> stream,
287 audio_port_handle_t clientHandle) {
288 mFramesTransferred.reset32();
Phil Burk73af62a2017-10-26 12:11:47 -0700289
290 // Round 64-bit counter up to a multiple of the buffer capacity.
291 // This is required because the 64-bit counter is used as an index
292 // into a circular buffer and the actual HW position is reset to zero
293 // when the stream is stopped.
294 mFramesTransferred.roundUp64(getBufferCapacity());
295
Phil Burk39f02dd2017-08-04 09:13:31 -0700296 return stopClient(mPortHandle);
297}
298
299aaudio_result_t AAudioServiceEndpointMMAP::startClient(const android::AudioClient& client,
300 audio_port_handle_t *clientHandle) {
301 if (mMmapStream == nullptr) return AAUDIO_ERROR_NULL;
Phil Burkfbf031e2017-10-12 15:58:31 -0700302 ALOGD("startClient(%p(uid=%d, pid=%d))",
Phil Burkbcc36742017-08-31 17:24:51 -0700303 &client, client.clientUid, client.clientPid);
Phil Burk39f02dd2017-08-04 09:13:31 -0700304 audio_port_handle_t originalHandle = *clientHandle;
Phil Burkbcc36742017-08-31 17:24:51 -0700305 status_t status = mMmapStream->start(client, clientHandle);
306 aaudio_result_t result = AAudioConvert_androidToAAudioResult(status);
Phil Burkfbf031e2017-10-12 15:58:31 -0700307 ALOGD("startClient() , %d => %d returns %d",
Phil Burk39f02dd2017-08-04 09:13:31 -0700308 originalHandle, *clientHandle, result);
309 return result;
310}
311
312aaudio_result_t AAudioServiceEndpointMMAP::stopClient(audio_port_handle_t clientHandle) {
313 if (mMmapStream == nullptr) return AAUDIO_ERROR_NULL;
314 aaudio_result_t result = AAudioConvert_androidToAAudioResult(mMmapStream->stop(clientHandle));
Phil Burkfbf031e2017-10-12 15:58:31 -0700315 ALOGD("stopClient(%d) returns %d", clientHandle, result);
Phil Burk39f02dd2017-08-04 09:13:31 -0700316 return result;
317}
318
319// Get free-running DSP or DMA hardware position from the HAL.
320aaudio_result_t AAudioServiceEndpointMMAP::getFreeRunningPosition(int64_t *positionFrames,
321 int64_t *timeNanos) {
322 struct audio_mmap_position position;
323 if (mMmapStream == nullptr) {
324 return AAUDIO_ERROR_NULL;
325 }
326 status_t status = mMmapStream->getMmapPosition(&position);
Phil Burkfbf031e2017-10-12 15:58:31 -0700327 ALOGV("getFreeRunningPosition() status= %d, pos = %d, nanos = %lld\n",
Phil Burk39f02dd2017-08-04 09:13:31 -0700328 status, position.position_frames, (long long) position.time_nanoseconds);
329 aaudio_result_t result = AAudioConvert_androidToAAudioResult(status);
330 if (result == AAUDIO_ERROR_UNAVAILABLE) {
331 ALOGW("sendCurrentTimestamp(): getMmapPosition() has no position data available");
332 } else if (result != AAUDIO_OK) {
333 ALOGE("sendCurrentTimestamp(): getMmapPosition() returned status %d", status);
334 } else {
335 // Convert 32-bit position to 64-bit position.
336 mFramesTransferred.update32(position.position_frames);
337 *positionFrames = mFramesTransferred.get();
338 *timeNanos = position.time_nanoseconds;
339 }
340 return result;
341}
342
343aaudio_result_t AAudioServiceEndpointMMAP::getTimestamp(int64_t *positionFrames,
344 int64_t *timeNanos) {
345 return 0; // TODO
346}
347
348
349void AAudioServiceEndpointMMAP::onTearDown() {
Phil Burkfbf031e2017-10-12 15:58:31 -0700350 ALOGD("onTearDown() called");
Phil Burk39f02dd2017-08-04 09:13:31 -0700351 disconnectRegisteredStreams();
352};
353
354void AAudioServiceEndpointMMAP::onVolumeChanged(audio_channel_mask_t channels,
355 android::Vector<float> values) {
356 // TODO do we really need a different volume for each channel?
357 float volume = values[0];
Phil Burkfbf031e2017-10-12 15:58:31 -0700358 ALOGD("onVolumeChanged() volume[0] = %f", volume);
Phil Burk39f02dd2017-08-04 09:13:31 -0700359 std::lock_guard<std::mutex> lock(mLockStreams);
360 for(const auto stream : mRegisteredStreams) {
361 stream->onVolumeChanged(volume);
362 }
363};
364
365void AAudioServiceEndpointMMAP::onRoutingChanged(audio_port_handle_t deviceId) {
Phil Burkfbf031e2017-10-12 15:58:31 -0700366 ALOGD("onRoutingChanged() called with dev %d, old = %d",
Phil Burk39f02dd2017-08-04 09:13:31 -0700367 deviceId, getDeviceId());
368 if (getDeviceId() != AUDIO_PORT_HANDLE_NONE && getDeviceId() != deviceId) {
369 disconnectRegisteredStreams();
370 }
371 setDeviceId(deviceId);
372};
373
374/**
375 * Get an immutable description of the data queue from the HAL.
376 */
377aaudio_result_t AAudioServiceEndpointMMAP::getDownDataDescription(AudioEndpointParcelable &parcelable)
378{
379 // Gather information on the data queue based on HAL info.
380 int32_t bytesPerFrame = calculateBytesPerFrame();
381 int32_t capacityInBytes = getBufferCapacity() * bytesPerFrame;
382 int fdIndex = parcelable.addFileDescriptor(mAudioDataFileDescriptor, capacityInBytes);
383 parcelable.mDownDataQueueParcelable.setupMemory(fdIndex, 0, capacityInBytes);
384 parcelable.mDownDataQueueParcelable.setBytesPerFrame(bytesPerFrame);
385 parcelable.mDownDataQueueParcelable.setFramesPerBurst(mFramesPerBurst);
386 parcelable.mDownDataQueueParcelable.setCapacityInFrames(getBufferCapacity());
387 return AAUDIO_OK;
388}