Phil Burk | 87c9f64 | 2017-05-17 07:22:39 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2017 The Android Open Source Project |
| 3 | * |
| 4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | * you may not use this file except in compliance with the License. |
| 6 | * You may obtain a copy of the License at |
| 7 | * |
| 8 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | * |
| 10 | * Unless required by applicable law or agreed to in writing, software |
| 11 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | * See the License for the specific language governing permissions and |
| 14 | * limitations under the License. |
| 15 | */ |
| 16 | |
Phil Burk | 39f02dd | 2017-08-04 09:13:31 -0700 | [diff] [blame] | 17 | #define LOG_TAG "AAudioServiceEndpointPlay" |
Phil Burk | 87c9f64 | 2017-05-17 07:22:39 -0700 | [diff] [blame] | 18 | //#define LOG_NDEBUG 0 |
| 19 | #include <utils/Log.h> |
| 20 | |
| 21 | #include <assert.h> |
| 22 | #include <map> |
| 23 | #include <mutex> |
| 24 | #include <utils/Singleton.h> |
| 25 | |
| 26 | #include "AAudioEndpointManager.h" |
| 27 | #include "AAudioServiceEndpoint.h" |
| 28 | #include <algorithm> |
| 29 | #include <mutex> |
| 30 | #include <vector> |
| 31 | |
| 32 | #include "core/AudioStreamBuilder.h" |
| 33 | #include "AAudioServiceEndpoint.h" |
| 34 | #include "AAudioServiceStreamShared.h" |
| 35 | #include "AAudioServiceEndpointPlay.h" |
Phil Burk | 39f02dd | 2017-08-04 09:13:31 -0700 | [diff] [blame] | 36 | #include "AAudioServiceEndpointShared.h" |
Phil Burk | 2329638 | 2017-11-20 15:45:11 -0800 | [diff] [blame] | 37 | #include "AAudioServiceStreamBase.h" |
Phil Burk | 87c9f64 | 2017-05-17 07:22:39 -0700 | [diff] [blame] | 38 | |
| 39 | using namespace android; // TODO just import names needed |
| 40 | using namespace aaudio; // TODO just import names needed |
| 41 | |
| 42 | #define BURSTS_PER_BUFFER_DEFAULT 2 |
| 43 | |
Ytai Ben-Tsvi | 734e350 | 2020-08-24 14:57:36 -0700 | [diff] [blame] | 44 | AAudioServiceEndpointPlay::AAudioServiceEndpointPlay(AAudioService& audioService) |
| 45 | : AAudioServiceEndpointShared( |
| 46 | new AudioStreamInternalPlay(audioService.asAAudioServiceInterface(), true)) {} |
Phil Burk | 87c9f64 | 2017-05-17 07:22:39 -0700 | [diff] [blame] | 47 | |
Phil Burk | 39f02dd | 2017-08-04 09:13:31 -0700 | [diff] [blame] | 48 | aaudio_result_t AAudioServiceEndpointPlay::open(const aaudio::AAudioStreamRequest &request) { |
| 49 | aaudio_result_t result = AAudioServiceEndpointShared::open(request); |
Phil Burk | 87c9f64 | 2017-05-17 07:22:39 -0700 | [diff] [blame] | 50 | if (result == AAUDIO_OK) { |
| 51 | mMixer.allocate(getStreamInternal()->getSamplesPerFrame(), |
| 52 | getStreamInternal()->getFramesPerBurst()); |
| 53 | |
| 54 | int32_t burstsPerBuffer = AAudioProperty_getMixerBursts(); |
| 55 | if (burstsPerBuffer == 0) { |
| 56 | mLatencyTuningEnabled = true; |
| 57 | burstsPerBuffer = BURSTS_PER_BUFFER_DEFAULT; |
| 58 | } |
Phil Burk | 87c9f64 | 2017-05-17 07:22:39 -0700 | [diff] [blame] | 59 | int32_t desiredBufferSize = burstsPerBuffer * getStreamInternal()->getFramesPerBurst(); |
| 60 | getStreamInternal()->setBufferSize(desiredBufferSize); |
| 61 | } |
| 62 | return result; |
| 63 | } |
| 64 | |
| 65 | // Mix data from each application stream and write result to the shared MMAP stream. |
| 66 | void *AAudioServiceEndpointPlay::callbackLoop() { |
Phil Burk | 19e990e | 2018-03-22 13:59:34 -0700 | [diff] [blame] | 67 | ALOGD("%s() entering >>>>>>>>>>>>>>> MIXER", __func__); |
Eric Laurent | cb4dae2 | 2017-07-01 19:39:32 -0700 | [diff] [blame] | 68 | aaudio_result_t result = AAUDIO_OK; |
Phil Burk | 87c9f64 | 2017-05-17 07:22:39 -0700 | [diff] [blame] | 69 | int64_t timeoutNanos = getStreamInternal()->calculateReasonableTimeout(); |
| 70 | |
| 71 | // result might be a frame count |
| 72 | while (mCallbackEnabled.load() && getStreamInternal()->isActive() && (result >= 0)) { |
| 73 | // Mix data from each active stream. |
| 74 | mMixer.clear(); |
Phil Burk | 39f02dd | 2017-08-04 09:13:31 -0700 | [diff] [blame] | 75 | |
Phil Burk | 97350f9 | 2017-07-21 15:59:44 -0700 | [diff] [blame] | 76 | { // brackets are for lock_guard |
Phil Burk | fd34a93 | 2017-07-19 07:03:52 -0700 | [diff] [blame] | 77 | int index = 0; |
Phil Burk | 97350f9 | 2017-07-21 15:59:44 -0700 | [diff] [blame] | 78 | int64_t mmapFramesWritten = getStreamInternal()->getFramesWritten(); |
| 79 | |
Phil Burk | 87c9f64 | 2017-05-17 07:22:39 -0700 | [diff] [blame] | 80 | std::lock_guard <std::mutex> lock(mLockStreams); |
Chih-Hung Hsieh | 3ef324d | 2018-12-11 11:48:12 -0800 | [diff] [blame] | 81 | for (const auto& clientStream : mRegisteredStreams) { |
Phil Burk | 523b304 | 2017-09-13 13:03:08 -0700 | [diff] [blame] | 82 | int64_t clientFramesRead = 0; |
Phil Burk | 83fb844 | 2017-10-05 16:55:17 -0700 | [diff] [blame] | 83 | bool allowUnderflow = true; |
Phil Burk | 523b304 | 2017-09-13 13:03:08 -0700 | [diff] [blame] | 84 | |
Phil Burk | 762365c | 2018-12-10 16:02:16 -0800 | [diff] [blame] | 85 | if (clientStream->isSuspended()) { |
| 86 | continue; // dead stream |
| 87 | } |
| 88 | |
Phil Burk | 83fb844 | 2017-10-05 16:55:17 -0700 | [diff] [blame] | 89 | aaudio_stream_state_t state = clientStream->getState(); |
| 90 | if (state == AAUDIO_STREAM_STATE_STOPPING) { |
| 91 | allowUnderflow = false; // just read what is already in the FIFO |
| 92 | } else if (state != AAUDIO_STREAM_STATE_STARTED) { |
| 93 | continue; // this stream is not running so skip it. |
Eric Laurent | cb4dae2 | 2017-07-01 19:39:32 -0700 | [diff] [blame] | 94 | } |
Phil Burk | 39f02dd | 2017-08-04 09:13:31 -0700 | [diff] [blame] | 95 | |
Phil Burk | 523b304 | 2017-09-13 13:03:08 -0700 | [diff] [blame] | 96 | sp<AAudioServiceStreamShared> streamShared = |
Phil Burk | 39f02dd | 2017-08-04 09:13:31 -0700 | [diff] [blame] | 97 | static_cast<AAudioServiceStreamShared *>(clientStream.get()); |
| 98 | |
Phil Burk | 523b304 | 2017-09-13 13:03:08 -0700 | [diff] [blame] | 99 | { |
| 100 | // Lock the AudioFifo to protect against close. |
Phil Burk | 0bd745e | 2020-10-17 18:20:01 +0000 | [diff] [blame] | 101 | std::lock_guard <std::mutex> lock(streamShared->audioDataQueueLock); |
Phil Burk | 8f4fe50 | 2020-07-15 23:54:50 +0000 | [diff] [blame] | 102 | std::shared_ptr<SharedRingBuffer> audioDataQueue |
| 103 | = streamShared->getAudioDataQueue_l(); |
| 104 | std::shared_ptr<FifoBuffer> fifo; |
| 105 | if (audioDataQueue && (fifo = audioDataQueue->getFifoBuffer())) { |
Phil Burk | 39f02dd | 2017-08-04 09:13:31 -0700 | [diff] [blame] | 106 | |
Phil Burk | 523b304 | 2017-09-13 13:03:08 -0700 | [diff] [blame] | 107 | // Determine offset between framePosition in client's stream |
| 108 | // vs the underlying MMAP stream. |
| 109 | clientFramesRead = fifo->getReadCounter(); |
| 110 | // These two indices refer to the same frame. |
| 111 | int64_t positionOffset = mmapFramesWritten - clientFramesRead; |
| 112 | streamShared->setTimestampPositionOffset(positionOffset); |
Phil Burk | 39f02dd | 2017-08-04 09:13:31 -0700 | [diff] [blame] | 113 | |
Phil Burk | 2329638 | 2017-11-20 15:45:11 -0800 | [diff] [blame] | 114 | int32_t framesMixed = mMixer.mix(index, fifo, allowUnderflow); |
| 115 | |
| 116 | if (streamShared->isFlowing()) { |
| 117 | // Consider it an underflow if we got less than a burst |
| 118 | // after the data started flowing. |
| 119 | bool underflowed = allowUnderflow |
| 120 | && framesMixed < mMixer.getFramesPerBurst(); |
| 121 | if (underflowed) { |
| 122 | streamShared->incrementXRunCount(); |
| 123 | } |
| 124 | } else if (framesMixed > 0) { |
| 125 | // Mark beginning of data flow after a start. |
| 126 | streamShared->setFlowing(true); |
Phil Burk | 523b304 | 2017-09-13 13:03:08 -0700 | [diff] [blame] | 127 | } |
| 128 | clientFramesRead = fifo->getReadCounter(); |
| 129 | } |
| 130 | } |
| 131 | |
| 132 | if (clientFramesRead > 0) { |
| 133 | // This timestamp represents the completion of data being read out of the |
| 134 | // client buffer. It is sent to the client and used in the timing model |
| 135 | // to decide when the client has room to write more data. |
| 136 | Timestamp timestamp(clientFramesRead, AudioClock::getNanoseconds()); |
| 137 | streamShared->markTransferTime(timestamp); |
Phil Burk | 39f02dd | 2017-08-04 09:13:31 -0700 | [diff] [blame] | 138 | } |
| 139 | |
| 140 | index++; // just used for labelling tracks in systrace |
Phil Burk | 87c9f64 | 2017-05-17 07:22:39 -0700 | [diff] [blame] | 141 | } |
| 142 | } |
| 143 | |
| 144 | // Write mixer output to stream using a blocking write. |
| 145 | result = getStreamInternal()->write(mMixer.getOutputBuffer(), |
| 146 | getFramesPerBurst(), timeoutNanos); |
| 147 | if (result == AAUDIO_ERROR_DISCONNECTED) { |
Phil Burk | e358ec6 | 2020-10-12 23:42:30 +0000 | [diff] [blame] | 148 | ALOGD("%s() write() returned AAUDIO_ERROR_DISCONNECTED", __func__); |
| 149 | // We do not need the returned vector. |
| 150 | (void) AAudioServiceEndpointShared::disconnectRegisteredStreams(); |
Phil Burk | 87c9f64 | 2017-05-17 07:22:39 -0700 | [diff] [blame] | 151 | break; |
| 152 | } else if (result != getFramesPerBurst()) { |
Phil Burk | fbf031e | 2017-10-12 15:58:31 -0700 | [diff] [blame] | 153 | ALOGW("callbackLoop() wrote %d / %d", |
Phil Burk | 87c9f64 | 2017-05-17 07:22:39 -0700 | [diff] [blame] | 154 | result, getFramesPerBurst()); |
| 155 | break; |
| 156 | } |
| 157 | } |
| 158 | |
Phil Burk | 19e990e | 2018-03-22 13:59:34 -0700 | [diff] [blame] | 159 | ALOGD("%s() exiting, enabled = %d, state = %d, result = %d <<<<<<<<<<<<< MIXER", |
| 160 | __func__, mCallbackEnabled.load(), getStreamInternal()->getState(), result); |
Phil Burk | 87c9f64 | 2017-05-17 07:22:39 -0700 | [diff] [blame] | 161 | return NULL; // TODO review |
| 162 | } |