Phil Burk | c0c70e3 | 2017-02-09 13:18:38 -0800 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2017 The Android Open Source Project |
| 3 | * |
| 4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | * you may not use this file except in compliance with the License. |
| 6 | * You may obtain a copy of the License at |
| 7 | * |
| 8 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | * |
| 10 | * Unless required by applicable law or agreed to in writing, software |
| 11 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | * See the License for the specific language governing permissions and |
| 14 | * limitations under the License. |
| 15 | */ |
| 16 | |
Eric Laurent | cb4dae2 | 2017-07-01 19:39:32 -0700 | [diff] [blame] | 17 | #define LOG_TAG "AAudioServiceStreamShared" |
Phil Burk | c0c70e3 | 2017-02-09 13:18:38 -0800 | [diff] [blame] | 18 | //#define LOG_NDEBUG 0 |
| 19 | #include <utils/Log.h> |
| 20 | |
Phil Burk | a5222e2 | 2017-07-28 13:31:14 -0700 | [diff] [blame] | 21 | #include <iomanip> |
| 22 | #include <iostream> |
Phil Burk | c0c70e3 | 2017-02-09 13:18:38 -0800 | [diff] [blame] | 23 | #include <mutex> |
| 24 | |
| 25 | #include <aaudio/AAudio.h> |
| 26 | |
| 27 | #include "binding/IAAudioService.h" |
| 28 | |
| 29 | #include "binding/AAudioServiceMessage.h" |
| 30 | #include "AAudioServiceStreamBase.h" |
| 31 | #include "AAudioServiceStreamShared.h" |
| 32 | #include "AAudioEndpointManager.h" |
| 33 | #include "AAudioService.h" |
| 34 | #include "AAudioServiceEndpoint.h" |
| 35 | |
| 36 | using namespace android; |
| 37 | using namespace aaudio; |
| 38 | |
Phil Burk | ec89b2e | 2017-06-20 15:05:06 -0700 | [diff] [blame] | 39 | #define MIN_BURSTS_PER_BUFFER 2 |
| 40 | #define DEFAULT_BURSTS_PER_BUFFER 16 |
| 41 | // This is an arbitrary range. TODO review. |
| 42 | #define MAX_FRAMES_PER_BUFFER (32 * 1024) |
Phil Burk | c0c70e3 | 2017-02-09 13:18:38 -0800 | [diff] [blame] | 43 | |
| 44 | AAudioServiceStreamShared::AAudioServiceStreamShared(AAudioService &audioService) |
| 45 | : mAudioService(audioService) |
Phil Burk | 97350f9 | 2017-07-21 15:59:44 -0700 | [diff] [blame] | 46 | , mTimestampPositionOffset(0) |
Phil Burk | a5222e2 | 2017-07-28 13:31:14 -0700 | [diff] [blame] | 47 | , mXRunCount(0) |
Phil Burk | c0c70e3 | 2017-02-09 13:18:38 -0800 | [diff] [blame] | 48 | { |
| 49 | } |
| 50 | |
Phil Burk | a5222e2 | 2017-07-28 13:31:14 -0700 | [diff] [blame] | 51 | std::string AAudioServiceStreamShared::dumpHeader() { |
| 52 | std::stringstream result; |
| 53 | result << AAudioServiceStreamBase::dumpHeader(); |
| 54 | result << " Write# Read# Avail XRuns"; |
| 55 | return result.str(); |
| 56 | } |
| 57 | |
| 58 | std::string AAudioServiceStreamShared::dump() const { |
| 59 | std::stringstream result; |
| 60 | result << AAudioServiceStreamBase::dump(); |
| 61 | |
| 62 | auto fifo = mAudioDataQueue->getFifoBuffer(); |
| 63 | int32_t readCounter = fifo->getReadCounter(); |
| 64 | int32_t writeCounter = fifo->getWriteCounter(); |
| 65 | result << std::setw(10) << writeCounter; |
| 66 | result << std::setw(10) << readCounter; |
| 67 | result << std::setw(8) << (writeCounter - readCounter); |
| 68 | result << std::setw(8) << getXRunCount(); |
| 69 | |
| 70 | return result.str(); |
| 71 | } |
| 72 | |
Phil Burk | ec89b2e | 2017-06-20 15:05:06 -0700 | [diff] [blame] | 73 | int32_t AAudioServiceStreamShared::calculateBufferCapacity(int32_t requestedCapacityFrames, |
| 74 | int32_t framesPerBurst) { |
| 75 | |
| 76 | if (requestedCapacityFrames > MAX_FRAMES_PER_BUFFER) { |
Phil Burk | 11e8d33 | 2017-05-24 09:59:02 -0700 | [diff] [blame] | 77 | ALOGE("AAudioServiceStreamShared::calculateBufferCapacity() requested capacity %d > max %d", |
Phil Burk | ec89b2e | 2017-06-20 15:05:06 -0700 | [diff] [blame] | 78 | requestedCapacityFrames, MAX_FRAMES_PER_BUFFER); |
| 79 | return AAUDIO_ERROR_OUT_OF_RANGE; |
| 80 | } |
| 81 | |
| 82 | // Determine how many bursts will fit in the buffer. |
| 83 | int32_t numBursts; |
| 84 | if (requestedCapacityFrames == AAUDIO_UNSPECIFIED) { |
| 85 | // Use fewer bursts if default is too many. |
| 86 | if ((DEFAULT_BURSTS_PER_BUFFER * framesPerBurst) > MAX_FRAMES_PER_BUFFER) { |
| 87 | numBursts = MAX_FRAMES_PER_BUFFER / framesPerBurst; |
| 88 | } else { |
| 89 | numBursts = DEFAULT_BURSTS_PER_BUFFER; |
| 90 | } |
| 91 | } else { |
| 92 | // round up to nearest burst boundary |
| 93 | numBursts = (requestedCapacityFrames + framesPerBurst - 1) / framesPerBurst; |
| 94 | } |
| 95 | |
| 96 | // Clip to bare minimum. |
| 97 | if (numBursts < MIN_BURSTS_PER_BUFFER) { |
| 98 | numBursts = MIN_BURSTS_PER_BUFFER; |
| 99 | } |
| 100 | // Check for numeric overflow. |
| 101 | if (numBursts > 0x8000 || framesPerBurst > 0x8000) { |
Phil Burk | 11e8d33 | 2017-05-24 09:59:02 -0700 | [diff] [blame] | 102 | ALOGE("AAudioServiceStreamShared::calculateBufferCapacity() overflow, capacity = %d * %d", |
Phil Burk | ec89b2e | 2017-06-20 15:05:06 -0700 | [diff] [blame] | 103 | numBursts, framesPerBurst); |
| 104 | return AAUDIO_ERROR_OUT_OF_RANGE; |
| 105 | } |
| 106 | int32_t capacityInFrames = numBursts * framesPerBurst; |
| 107 | |
| 108 | // Final sanity check. |
| 109 | if (capacityInFrames > MAX_FRAMES_PER_BUFFER) { |
Phil Burk | 11e8d33 | 2017-05-24 09:59:02 -0700 | [diff] [blame] | 110 | ALOGE("AAudioServiceStreamShared::calculateBufferCapacity() calc capacity %d > max %d", |
Phil Burk | ec89b2e | 2017-06-20 15:05:06 -0700 | [diff] [blame] | 111 | capacityInFrames, MAX_FRAMES_PER_BUFFER); |
| 112 | return AAUDIO_ERROR_OUT_OF_RANGE; |
| 113 | } |
Phil Burk | 11e8d33 | 2017-05-24 09:59:02 -0700 | [diff] [blame] | 114 | ALOGD("AAudioServiceStreamShared::calculateBufferCapacity() requested %d frames, actual = %d", |
Phil Burk | ec89b2e | 2017-06-20 15:05:06 -0700 | [diff] [blame] | 115 | requestedCapacityFrames, capacityInFrames); |
| 116 | return capacityInFrames; |
| 117 | } |
| 118 | |
Phil Burk | c0c70e3 | 2017-02-09 13:18:38 -0800 | [diff] [blame] | 119 | aaudio_result_t AAudioServiceStreamShared::open(const aaudio::AAudioStreamRequest &request, |
| 120 | aaudio::AAudioStreamConfiguration &configurationOutput) { |
| 121 | |
Phil Burk | 11e8d33 | 2017-05-24 09:59:02 -0700 | [diff] [blame] | 122 | sp<AAudioServiceStreamShared> keep(this); |
| 123 | |
Phil Burk | c0c70e3 | 2017-02-09 13:18:38 -0800 | [diff] [blame] | 124 | aaudio_result_t result = AAudioServiceStreamBase::open(request, configurationOutput); |
| 125 | if (result != AAUDIO_OK) { |
Phil Burk | ec89b2e | 2017-06-20 15:05:06 -0700 | [diff] [blame] | 126 | ALOGE("AAudioServiceStreamBase open() returned %d", result); |
Phil Burk | c0c70e3 | 2017-02-09 13:18:38 -0800 | [diff] [blame] | 127 | return result; |
| 128 | } |
| 129 | |
| 130 | const AAudioStreamConfiguration &configurationInput = request.getConstantConfiguration(); |
Phil Burk | c0c70e3 | 2017-02-09 13:18:38 -0800 | [diff] [blame] | 131 | aaudio_direction_t direction = request.getDirection(); |
| 132 | |
Phil Burk | c0c70e3 | 2017-02-09 13:18:38 -0800 | [diff] [blame] | 133 | AAudioEndpointManager &mEndpointManager = AAudioEndpointManager::getInstance(); |
Eric Laurent | a17ae74 | 2017-06-29 15:43:55 -0700 | [diff] [blame] | 134 | mServiceEndpoint = mEndpointManager.openEndpoint(mAudioService, configurationOutput, direction); |
Phil Burk | c0c70e3 | 2017-02-09 13:18:38 -0800 | [diff] [blame] | 135 | if (mServiceEndpoint == nullptr) { |
Phil Burk | 11e8d33 | 2017-05-24 09:59:02 -0700 | [diff] [blame] | 136 | ALOGE("AAudioServiceStreamShared::open() mServiceEndPoint = %p", mServiceEndpoint); |
Phil Burk | c0c70e3 | 2017-02-09 13:18:38 -0800 | [diff] [blame] | 137 | return AAUDIO_ERROR_UNAVAILABLE; |
| 138 | } |
| 139 | |
| 140 | // Is the request compatible with the shared endpoint? |
jiabin | 901f65d | 2017-07-12 17:56:35 -0700 | [diff] [blame] | 141 | mAudioFormat = configurationInput.getFormat(); |
Phil Burk | c0c70e3 | 2017-02-09 13:18:38 -0800 | [diff] [blame] | 142 | if (mAudioFormat == AAUDIO_FORMAT_UNSPECIFIED) { |
| 143 | mAudioFormat = AAUDIO_FORMAT_PCM_FLOAT; |
| 144 | } else if (mAudioFormat != AAUDIO_FORMAT_PCM_FLOAT) { |
Phil Burk | 11e8d33 | 2017-05-24 09:59:02 -0700 | [diff] [blame] | 145 | ALOGE("AAudioServiceStreamShared::open() mAudioFormat = %d, need FLOAT", mAudioFormat); |
Phil Burk | ec89b2e | 2017-06-20 15:05:06 -0700 | [diff] [blame] | 146 | result = AAUDIO_ERROR_INVALID_FORMAT; |
| 147 | goto error; |
Phil Burk | c0c70e3 | 2017-02-09 13:18:38 -0800 | [diff] [blame] | 148 | } |
| 149 | |
| 150 | mSampleRate = configurationInput.getSampleRate(); |
Glenn Kasten | 37a466a | 2017-05-30 15:53:14 -0700 | [diff] [blame] | 151 | if (mSampleRate == AAUDIO_UNSPECIFIED) { |
Phil Burk | c0c70e3 | 2017-02-09 13:18:38 -0800 | [diff] [blame] | 152 | mSampleRate = mServiceEndpoint->getSampleRate(); |
| 153 | } else if (mSampleRate != mServiceEndpoint->getSampleRate()) { |
Phil Burk | 11e8d33 | 2017-05-24 09:59:02 -0700 | [diff] [blame] | 154 | ALOGE("AAudioServiceStreamShared::open() mSampleRate = %d, need %d", |
Phil Burk | 71f35bb | 2017-04-13 16:05:07 -0700 | [diff] [blame] | 155 | mSampleRate, mServiceEndpoint->getSampleRate()); |
Phil Burk | ec89b2e | 2017-06-20 15:05:06 -0700 | [diff] [blame] | 156 | result = AAUDIO_ERROR_INVALID_RATE; |
| 157 | goto error; |
Phil Burk | c0c70e3 | 2017-02-09 13:18:38 -0800 | [diff] [blame] | 158 | } |
| 159 | |
| 160 | mSamplesPerFrame = configurationInput.getSamplesPerFrame(); |
Glenn Kasten | 37a466a | 2017-05-30 15:53:14 -0700 | [diff] [blame] | 161 | if (mSamplesPerFrame == AAUDIO_UNSPECIFIED) { |
Phil Burk | c0c70e3 | 2017-02-09 13:18:38 -0800 | [diff] [blame] | 162 | mSamplesPerFrame = mServiceEndpoint->getSamplesPerFrame(); |
| 163 | } else if (mSamplesPerFrame != mServiceEndpoint->getSamplesPerFrame()) { |
Phil Burk | 11e8d33 | 2017-05-24 09:59:02 -0700 | [diff] [blame] | 164 | ALOGE("AAudioServiceStreamShared::open() mSamplesPerFrame = %d, need %d", |
Phil Burk | 71f35bb | 2017-04-13 16:05:07 -0700 | [diff] [blame] | 165 | mSamplesPerFrame, mServiceEndpoint->getSamplesPerFrame()); |
Phil Burk | ec89b2e | 2017-06-20 15:05:06 -0700 | [diff] [blame] | 166 | result = AAUDIO_ERROR_OUT_OF_RANGE; |
| 167 | goto error; |
Phil Burk | c0c70e3 | 2017-02-09 13:18:38 -0800 | [diff] [blame] | 168 | } |
| 169 | |
Phil Burk | c0c70e3 | 2017-02-09 13:18:38 -0800 | [diff] [blame] | 170 | mFramesPerBurst = mServiceEndpoint->getFramesPerBurst(); |
Phil Burk | 11e8d33 | 2017-05-24 09:59:02 -0700 | [diff] [blame] | 171 | ALOGD("AAudioServiceStreamShared::open() mSampleRate = %d, mFramesPerBurst = %d", |
Phil Burk | ec89b2e | 2017-06-20 15:05:06 -0700 | [diff] [blame] | 172 | mSampleRate, mFramesPerBurst); |
| 173 | |
| 174 | mCapacityInFrames = calculateBufferCapacity(configurationInput.getBufferCapacity(), |
| 175 | mFramesPerBurst); |
| 176 | if (mCapacityInFrames < 0) { |
| 177 | result = mCapacityInFrames; // negative error code |
| 178 | mCapacityInFrames = 0; |
| 179 | goto error; |
Phil Burk | c0c70e3 | 2017-02-09 13:18:38 -0800 | [diff] [blame] | 180 | } |
Phil Burk | c0c70e3 | 2017-02-09 13:18:38 -0800 | [diff] [blame] | 181 | |
| 182 | // Create audio data shared memory buffer for client. |
| 183 | mAudioDataQueue = new SharedRingBuffer(); |
Phil Burk | ec89b2e | 2017-06-20 15:05:06 -0700 | [diff] [blame] | 184 | result = mAudioDataQueue->allocate(calculateBytesPerFrame(), mCapacityInFrames); |
| 185 | if (result != AAUDIO_OK) { |
Phil Burk | 11e8d33 | 2017-05-24 09:59:02 -0700 | [diff] [blame] | 186 | ALOGE("AAudioServiceStreamShared::open() could not allocate FIFO with %d frames", |
Phil Burk | ec89b2e | 2017-06-20 15:05:06 -0700 | [diff] [blame] | 187 | mCapacityInFrames); |
| 188 | result = AAUDIO_ERROR_NO_MEMORY; |
| 189 | goto error; |
| 190 | } |
| 191 | |
| 192 | ALOGD("AAudioServiceStreamShared::open() actual rate = %d, channels = %d, deviceId = %d", |
| 193 | mSampleRate, mSamplesPerFrame, mServiceEndpoint->getDeviceId()); |
Phil Burk | c0c70e3 | 2017-02-09 13:18:38 -0800 | [diff] [blame] | 194 | |
| 195 | // Fill in configuration for client. |
| 196 | configurationOutput.setSampleRate(mSampleRate); |
| 197 | configurationOutput.setSamplesPerFrame(mSamplesPerFrame); |
jiabin | 901f65d | 2017-07-12 17:56:35 -0700 | [diff] [blame] | 198 | configurationOutput.setFormat(mAudioFormat); |
Phil Burk | ec89b2e | 2017-06-20 15:05:06 -0700 | [diff] [blame] | 199 | configurationOutput.setDeviceId(mServiceEndpoint->getDeviceId()); |
Phil Burk | c0c70e3 | 2017-02-09 13:18:38 -0800 | [diff] [blame] | 200 | |
Phil Burk | 11e8d33 | 2017-05-24 09:59:02 -0700 | [diff] [blame] | 201 | result = mServiceEndpoint->registerStream(keep); |
Phil Burk | ec89b2e | 2017-06-20 15:05:06 -0700 | [diff] [blame] | 202 | if (result != AAUDIO_OK) { |
| 203 | goto error; |
| 204 | } |
Phil Burk | c0c70e3 | 2017-02-09 13:18:38 -0800 | [diff] [blame] | 205 | |
Phil Burk | 5a26e66 | 2017-07-07 12:44:48 -0700 | [diff] [blame] | 206 | setState(AAUDIO_STREAM_STATE_OPEN); |
Phil Burk | c0c70e3 | 2017-02-09 13:18:38 -0800 | [diff] [blame] | 207 | return AAUDIO_OK; |
Phil Burk | ec89b2e | 2017-06-20 15:05:06 -0700 | [diff] [blame] | 208 | |
| 209 | error: |
| 210 | close(); |
| 211 | return result; |
Phil Burk | c0c70e3 | 2017-02-09 13:18:38 -0800 | [diff] [blame] | 212 | } |
| 213 | |
| 214 | /** |
| 215 | * Start the flow of audio data. |
| 216 | * |
| 217 | * An AAUDIO_SERVICE_EVENT_STARTED will be sent to the client when complete. |
| 218 | */ |
| 219 | aaudio_result_t AAudioServiceStreamShared::start() { |
Eric Laurent | cb4dae2 | 2017-07-01 19:39:32 -0700 | [diff] [blame] | 220 | if (isRunning()) { |
| 221 | return AAUDIO_OK; |
| 222 | } |
Phil Burk | 71f35bb | 2017-04-13 16:05:07 -0700 | [diff] [blame] | 223 | AAudioServiceEndpoint *endpoint = mServiceEndpoint; |
| 224 | if (endpoint == nullptr) { |
Phil Burk | a5222e2 | 2017-07-28 13:31:14 -0700 | [diff] [blame] | 225 | ALOGE("AAudioServiceStreamShared::start() missing endpoint"); |
Phil Burk | 71f35bb | 2017-04-13 16:05:07 -0700 | [diff] [blame] | 226 | return AAUDIO_ERROR_INVALID_STATE; |
| 227 | } |
Phil Burk | 87c9f64 | 2017-05-17 07:22:39 -0700 | [diff] [blame] | 228 | // For output streams, this will add the stream to the mixer. |
Phil Burk | 71f35bb | 2017-04-13 16:05:07 -0700 | [diff] [blame] | 229 | aaudio_result_t result = endpoint->startStream(this); |
Phil Burk | c0c70e3 | 2017-02-09 13:18:38 -0800 | [diff] [blame] | 230 | if (result != AAUDIO_OK) { |
| 231 | ALOGE("AAudioServiceStreamShared::start() mServiceEndpoint returned %d", result); |
Phil Burk | 5ef003b | 2017-06-30 11:43:37 -0700 | [diff] [blame] | 232 | disconnect(); |
Phil Burk | c0c70e3 | 2017-02-09 13:18:38 -0800 | [diff] [blame] | 233 | } else { |
Eric Laurent | cb4dae2 | 2017-07-01 19:39:32 -0700 | [diff] [blame] | 234 | result = endpoint->getStreamInternal()->startClient(mMmapClient, &mClientHandle); |
| 235 | if (result == AAUDIO_OK) { |
| 236 | result = AAudioServiceStreamBase::start(); |
| 237 | } |
Phil Burk | c0c70e3 | 2017-02-09 13:18:38 -0800 | [diff] [blame] | 238 | } |
Phil Burk | ec89b2e | 2017-06-20 15:05:06 -0700 | [diff] [blame] | 239 | return result; |
Phil Burk | c0c70e3 | 2017-02-09 13:18:38 -0800 | [diff] [blame] | 240 | } |
| 241 | |
| 242 | /** |
| 243 | * Stop the flow of data so that start() can resume without loss of data. |
| 244 | * |
| 245 | * An AAUDIO_SERVICE_EVENT_PAUSED will be sent to the client when complete. |
| 246 | */ |
| 247 | aaudio_result_t AAudioServiceStreamShared::pause() { |
Eric Laurent | cb4dae2 | 2017-07-01 19:39:32 -0700 | [diff] [blame] | 248 | if (!isRunning()) { |
| 249 | return AAUDIO_OK; |
| 250 | } |
Phil Burk | 71f35bb | 2017-04-13 16:05:07 -0700 | [diff] [blame] | 251 | AAudioServiceEndpoint *endpoint = mServiceEndpoint; |
| 252 | if (endpoint == nullptr) { |
Phil Burk | a5222e2 | 2017-07-28 13:31:14 -0700 | [diff] [blame] | 253 | ALOGE("AAudioServiceStreamShared::pause() missing endpoint"); |
Phil Burk | 71f35bb | 2017-04-13 16:05:07 -0700 | [diff] [blame] | 254 | return AAUDIO_ERROR_INVALID_STATE; |
| 255 | } |
Eric Laurent | cb4dae2 | 2017-07-01 19:39:32 -0700 | [diff] [blame] | 256 | endpoint->getStreamInternal()->stopClient(mClientHandle); |
Phil Burk | 71f35bb | 2017-04-13 16:05:07 -0700 | [diff] [blame] | 257 | aaudio_result_t result = endpoint->stopStream(this); |
| 258 | if (result != AAUDIO_OK) { |
| 259 | ALOGE("AAudioServiceStreamShared::pause() mServiceEndpoint returned %d", result); |
Phil Burk | 5ef003b | 2017-06-30 11:43:37 -0700 | [diff] [blame] | 260 | disconnect(); // TODO should we return or pause Base first? |
Phil Burk | 71f35bb | 2017-04-13 16:05:07 -0700 | [diff] [blame] | 261 | } |
| 262 | return AAudioServiceStreamBase::pause(); |
| 263 | } |
| 264 | |
| 265 | aaudio_result_t AAudioServiceStreamShared::stop() { |
Eric Laurent | cb4dae2 | 2017-07-01 19:39:32 -0700 | [diff] [blame] | 266 | if (!isRunning()) { |
| 267 | return AAUDIO_OK; |
| 268 | } |
Phil Burk | 71f35bb | 2017-04-13 16:05:07 -0700 | [diff] [blame] | 269 | AAudioServiceEndpoint *endpoint = mServiceEndpoint; |
| 270 | if (endpoint == nullptr) { |
Phil Burk | a5222e2 | 2017-07-28 13:31:14 -0700 | [diff] [blame] | 271 | ALOGE("AAudioServiceStreamShared::stop() missing endpoint"); |
Phil Burk | 71f35bb | 2017-04-13 16:05:07 -0700 | [diff] [blame] | 272 | return AAUDIO_ERROR_INVALID_STATE; |
| 273 | } |
Eric Laurent | cb4dae2 | 2017-07-01 19:39:32 -0700 | [diff] [blame] | 274 | endpoint->getStreamInternal()->stopClient(mClientHandle); |
Phil Burk | 71f35bb | 2017-04-13 16:05:07 -0700 | [diff] [blame] | 275 | aaudio_result_t result = endpoint->stopStream(this); |
Phil Burk | c0c70e3 | 2017-02-09 13:18:38 -0800 | [diff] [blame] | 276 | if (result != AAUDIO_OK) { |
| 277 | ALOGE("AAudioServiceStreamShared::stop() mServiceEndpoint returned %d", result); |
Phil Burk | 5ef003b | 2017-06-30 11:43:37 -0700 | [diff] [blame] | 278 | disconnect(); |
Phil Burk | c0c70e3 | 2017-02-09 13:18:38 -0800 | [diff] [blame] | 279 | } |
Phil Burk | 71f35bb | 2017-04-13 16:05:07 -0700 | [diff] [blame] | 280 | return AAudioServiceStreamBase::stop(); |
Phil Burk | c0c70e3 | 2017-02-09 13:18:38 -0800 | [diff] [blame] | 281 | } |
| 282 | |
| 283 | /** |
| 284 | * Discard any data held by the underlying HAL or Service. |
| 285 | * |
| 286 | * An AAUDIO_SERVICE_EVENT_FLUSHED will be sent to the client when complete. |
| 287 | */ |
| 288 | aaudio_result_t AAudioServiceStreamShared::flush() { |
Phil Burk | ec89b2e | 2017-06-20 15:05:06 -0700 | [diff] [blame] | 289 | AAudioServiceEndpoint *endpoint = mServiceEndpoint; |
| 290 | if (endpoint == nullptr) { |
Phil Burk | a5222e2 | 2017-07-28 13:31:14 -0700 | [diff] [blame] | 291 | ALOGE("AAudioServiceStreamShared::flush() missing endpoint"); |
Phil Burk | ec89b2e | 2017-06-20 15:05:06 -0700 | [diff] [blame] | 292 | return AAUDIO_ERROR_INVALID_STATE; |
| 293 | } |
| 294 | if (mState != AAUDIO_STREAM_STATE_PAUSED) { |
Eric Laurent | cb4dae2 | 2017-07-01 19:39:32 -0700 | [diff] [blame] | 295 | ALOGE("AAudioServiceStreamShared::flush() stream not paused, state = %s", |
Phil Burk | ec89b2e | 2017-06-20 15:05:06 -0700 | [diff] [blame] | 296 | AAudio_convertStreamStateToText(mState)); |
| 297 | return AAUDIO_ERROR_INVALID_STATE; |
| 298 | } |
| 299 | // Data will get flushed when the client receives the FLUSHED event. |
| 300 | return AAudioServiceStreamBase::flush(); |
Phil Burk | c0c70e3 | 2017-02-09 13:18:38 -0800 | [diff] [blame] | 301 | } |
| 302 | |
| 303 | aaudio_result_t AAudioServiceStreamShared::close() { |
Phil Burk | 98d6d92 | 2017-07-06 11:52:45 -0700 | [diff] [blame] | 304 | if (mState == AAUDIO_STREAM_STATE_CLOSED) { |
| 305 | return AAUDIO_OK; |
Phil Burk | 71f35bb | 2017-04-13 16:05:07 -0700 | [diff] [blame] | 306 | } |
Phil Burk | 98d6d92 | 2017-07-06 11:52:45 -0700 | [diff] [blame] | 307 | |
Eric Laurent | cb4dae2 | 2017-07-01 19:39:32 -0700 | [diff] [blame] | 308 | stop(); |
| 309 | |
Phil Burk | 98d6d92 | 2017-07-06 11:52:45 -0700 | [diff] [blame] | 310 | AAudioServiceEndpoint *endpoint = mServiceEndpoint; |
| 311 | if (endpoint == nullptr) { |
| 312 | return AAUDIO_ERROR_INVALID_STATE; |
| 313 | } |
Phil Burk | 98d6d92 | 2017-07-06 11:52:45 -0700 | [diff] [blame] | 314 | |
| 315 | endpoint->unregisterStream(this); |
| 316 | |
| 317 | AAudioEndpointManager &mEndpointManager = AAudioEndpointManager::getInstance(); |
| 318 | mEndpointManager.closeEndpoint(endpoint); |
| 319 | mServiceEndpoint = nullptr; |
| 320 | |
Phil Burk | 942bdc0 | 2017-05-03 11:36:50 -0700 | [diff] [blame] | 321 | if (mAudioDataQueue != nullptr) { |
| 322 | delete mAudioDataQueue; |
| 323 | mAudioDataQueue = nullptr; |
| 324 | } |
Phil Burk | c0c70e3 | 2017-02-09 13:18:38 -0800 | [diff] [blame] | 325 | return AAudioServiceStreamBase::close(); |
| 326 | } |
| 327 | |
| 328 | /** |
| 329 | * Get an immutable description of the data queue created by this service. |
| 330 | */ |
| 331 | aaudio_result_t AAudioServiceStreamShared::getDownDataDescription(AudioEndpointParcelable &parcelable) |
| 332 | { |
| 333 | // Gather information on the data queue. |
| 334 | mAudioDataQueue->fillParcelable(parcelable, |
| 335 | parcelable.mDownDataQueueParcelable); |
| 336 | parcelable.mDownDataQueueParcelable.setFramesPerBurst(getFramesPerBurst()); |
| 337 | return AAUDIO_OK; |
| 338 | } |
| 339 | |
Phil Burk | 97350f9 | 2017-07-21 15:59:44 -0700 | [diff] [blame] | 340 | void AAudioServiceStreamShared::markTransferTime(Timestamp ×tamp) { |
| 341 | mAtomicTimestamp.write(timestamp); |
Phil Burk | 71f35bb | 2017-04-13 16:05:07 -0700 | [diff] [blame] | 342 | } |
Phil Burk | c0c70e3 | 2017-02-09 13:18:38 -0800 | [diff] [blame] | 343 | |
Phil Burk | 97350f9 | 2017-07-21 15:59:44 -0700 | [diff] [blame] | 344 | // Get timestamp that was written by the real-time service thread, eg. mixer. |
Phil Burk | c0c70e3 | 2017-02-09 13:18:38 -0800 | [diff] [blame] | 345 | aaudio_result_t AAudioServiceStreamShared::getFreeRunningPosition(int64_t *positionFrames, |
| 346 | int64_t *timeNanos) { |
Phil Burk | 97350f9 | 2017-07-21 15:59:44 -0700 | [diff] [blame] | 347 | if (mAtomicTimestamp.isValid()) { |
| 348 | Timestamp timestamp = mAtomicTimestamp.read(); |
| 349 | *positionFrames = timestamp.getPosition(); |
| 350 | *timeNanos = timestamp.getNanoseconds(); |
| 351 | return AAUDIO_OK; |
| 352 | } else { |
| 353 | return AAUDIO_ERROR_UNAVAILABLE; |
| 354 | } |
| 355 | } |
| 356 | |
| 357 | // Get timestamp from lower level service. |
| 358 | aaudio_result_t AAudioServiceStreamShared::getHardwareTimestamp(int64_t *positionFrames, |
| 359 | int64_t *timeNanos) { |
| 360 | |
| 361 | aaudio_result_t result = mServiceEndpoint->getTimestamp(positionFrames, timeNanos); |
| 362 | if (result == AAUDIO_OK) { |
| 363 | *positionFrames -= mTimestampPositionOffset.load(); // Offset from shared MMAP stream |
| 364 | } |
| 365 | return result; |
Phil Burk | c0c70e3 | 2017-02-09 13:18:38 -0800 | [diff] [blame] | 366 | } |