Migrate buffers during surface change
Migrate graphic buffers during surface change in order to avoid
BufferQueue handling complexity later on.
Test: Manually using Chrome and google photo app
Bug: 132302078
Bug: 130862880
Change-Id: Ifb348b5d6a8f5a89dcc10a9f0be075057a5d3a6d
diff --git a/media/codec2/vndk/C2AllocatorGralloc.cpp b/media/codec2/vndk/C2AllocatorGralloc.cpp
index e698bf4..286c48a 100644
--- a/media/codec2/vndk/C2AllocatorGralloc.cpp
+++ b/media/codec2/vndk/C2AllocatorGralloc.cpp
@@ -201,6 +201,22 @@
return res;
}
+ static bool MigrateNativeHandle(
+ native_handle_t *handle,
+ uint32_t generation, uint64_t igbp_id, uint32_t igbp_slot) {
+ if (handle == nullptr || !isValid(handle)) {
+ return false;
+ }
+ ExtraData *ed = getExtraData(handle);
+ if (!ed) return false;
+ ed->generation = generation;
+ ed->igbp_id_lo = uint32_t(igbp_id & 0xFFFFFFFF);
+ ed->igbp_id_hi = uint32_t(igbp_id >> 32);
+ ed->igbp_slot = igbp_slot;
+ return true;
+ }
+
+
static native_handle_t* UnwrapNativeHandle(
const C2Handle *const handle) {
const ExtraData *xd = getExtraData(handle);
@@ -270,6 +286,13 @@
generation, igbp_id, igbp_slot);
}
+bool MigrateNativeCodec2GrallocHandle(
+ native_handle_t *handle,
+ uint32_t generation, uint64_t igbp_id, uint32_t igbp_slot) {
+ return C2HandleGralloc::MigrateNativeHandle(handle, generation, igbp_id, igbp_slot);
+}
+
+
class C2AllocationGralloc : public C2GraphicAllocation {
public:
virtual ~C2AllocationGralloc() override;
diff --git a/media/codec2/vndk/include/C2AllocatorGralloc.h b/media/codec2/vndk/include/C2AllocatorGralloc.h
index 05d989e..ee7524e 100644
--- a/media/codec2/vndk/include/C2AllocatorGralloc.h
+++ b/media/codec2/vndk/include/C2AllocatorGralloc.h
@@ -45,6 +45,16 @@
uint32_t generation = 0, uint64_t igbp_id = 0, uint32_t igbp_slot = 0);
/**
+ * When the gralloc handle is migrated to another bufferqueue, update
+ * bufferqueue information.
+ *
+ * @return {@code true} when native_handle is a wrapped codec2 handle.
+ */
+bool MigrateNativeCodec2GrallocHandle(
+ native_handle_t *handle,
+ uint32_t generation, uint64_t igbp_id, uint32_t igbp_slot);
+
+/**
* \todo Get this from the buffer
*/
void _UnwrapNativeCodec2GrallocMetadata(
diff --git a/media/codec2/vndk/internal/C2BlockInternal.h b/media/codec2/vndk/internal/C2BlockInternal.h
index 84ce70a..4ae946a 100644
--- a/media/codec2/vndk/internal/C2BlockInternal.h
+++ b/media/codec2/vndk/internal/C2BlockInternal.h
@@ -206,23 +206,19 @@
*
* - GetBufferQueueData(): Returns generation, bqId and bqSlot.
* - HoldBlockFromBufferQueue(): Sets "held" status to true.
- * - YieldBlockToBufferQueue(): Sets "held" status to false.
- * - AssignBlockToBufferQueue(): Sets the bufferqueue assignment and
- * "held" status.
+ * - BeginTransferBlockToClient()/EndTransferBlockToClient():
+ * Clear "held" status to false if transfer was successful,
+ * otherwise "held" status remains true.
+ * - BeginAttachBlockToBufferQueue()/EndAttachBlockToBufferQueue():
+ * The will keep "held" status true if attach was eligible.
+ * Otherwise, "held" status is cleared to false. In that case,
+ * ownership of buffer should be transferred to bufferqueue.
+ * - DisplayBlockToBufferQueue()
+ * This will clear "held" status to false.
*
* All these functions operate on _C2BlockPoolData, which can be obtained by
* calling GetGraphicBlockPoolData().
*
- * HoldBlockFromBufferQueue() will mark the block as held, while
- * YieldBlockToBufferQueue() will do the opposite. These two functions do
- * not modify the bufferqueue assignment, so it is not wrong to call
- * HoldBlockFromBufferQueue() after YieldBlockToBufferQueue() if it can be
- * guaranteed that the block is not destroyed during the period between the
- * two calls.
- *
- * AssingBlockToBufferQueue() has a "held" status as an optional argument.
- * The default value is true.
- *
* Maintaining Consistency with IGraphicBufferProducer Operations
* ==============================================================
*
@@ -232,16 +228,20 @@
* information for _C2BlockPoolData, with "held" status set to true.
*
* queueBuffer()
- * - After queueBuffer() is called, YieldBlockToBufferQueue() should be
- * called.
+ * - Before queueBuffer() is called, DisplayBlockToBufferQueue() should be
+ * called to test eligibility. If it's not eligible, do not call
+ * queueBuffer().
*
- * attachBuffer()
- * - After attachBuffer() is called, AssignBlockToBufferQueue() should be
- * called with "held" status set to true.
+ * attachBuffer() - remote migration only.
+ * - Local migration on blockpool side will be done automatically by
+ * blockpool.
+ * - Before attachBuffer(), BeginAttachBlockToBufferQueue() should be called
+ * to test eligiblity.
+ * - After attachBuffer() is called, EndAttachBlockToBufferQueue() should
+ * be called. This will set "held" status to true. If it returned
+ * false, cancelBuffer() should be called.
*
- * detachBuffer()
- * - After detachBuffer() is called, HoldBlockFromBufferQueue() should be
- * called.
+ * detachBuffer() - no-op.
*/
/**
@@ -261,43 +261,12 @@
*/
static
bool GetBufferQueueData(
- const std::shared_ptr<_C2BlockPoolData>& poolData,
+ const std::shared_ptr<const _C2BlockPoolData>& poolData,
uint32_t* generation = nullptr,
uint64_t* bqId = nullptr,
int32_t* bqSlot = nullptr);
/**
- * Set bufferqueue assignment and "held" status to a block created by a
- * bufferqueue-based blockpool.
- *
- * \param poolData blockpool data associated to the block.
- * \param igbp \c IGraphicBufferProducer instance from the designated
- * bufferqueue.
- * \param generation Generation number that the buffer belongs to.
- * \param bqId Id of the bufferqueue that will own the buffer (block).
- * \param bqSlot Slot number of the buffer.
- * \param held Whether the block is held. This "held" status can be
- * changed later by calling YieldBlockToBufferQueue() or
- * HoldBlockFromBufferQueue().
- *
- * \return \c true if \p poolData is valid bufferqueue data;
- * \c false otherwise.
- *
- * Note: \p generation should match the latest generation number set on the
- * bufferqueue, and \p bqId should match the unique id for the bufferqueue
- * (obtainable by calling igbp->getUniqueId()).
- */
- static
- bool AssignBlockToBufferQueue(
- const std::shared_ptr<_C2BlockPoolData>& poolData,
- const ::android::sp<::android::hardware::graphics::bufferqueue::
- V2_0::IGraphicBufferProducer>& igbp,
- uint32_t generation,
- uint64_t bqId,
- int32_t bqSlot,
- bool held = true);
-
- /**
* Hold a block from the designated bufferqueue. This causes the destruction
* of the block to trigger a call to cancelBuffer().
*
@@ -305,6 +274,9 @@
* block. It does not check if that is the case.
*
* \param poolData blockpool data associated to the block.
+ * \param owner block owner from client bufferqueue manager.
+ * If this is expired, the block is not owned by client
+ * anymore.
* \param igbp \c IGraphicBufferProducer instance to be assigned to the
* block. This is not needed when the block is local.
*
@@ -313,24 +285,96 @@
static
bool HoldBlockFromBufferQueue(
const std::shared_ptr<_C2BlockPoolData>& poolData,
+ const std::shared_ptr<int>& owner,
const ::android::sp<::android::hardware::graphics::bufferqueue::
V2_0::IGraphicBufferProducer>& igbp = nullptr);
/**
- * Yield a block to the designated bufferqueue. This causes the destruction
- * of the block not to trigger a call to cancelBuffer();
+ * Prepare a block to be transferred to other process. This blocks
+ * bufferqueue migration from happening. The block should be in held.
*
* This function assumes that \p poolData comes from a bufferqueue-based
* block. It does not check if that is the case.
*
* \param poolData blockpool data associated to the block.
*
- * \return The previous held status.
+ * \return true if transfer is eligible, false otherwise.
*/
static
- bool YieldBlockToBufferQueue(
+ bool BeginTransferBlockToClient(
const std::shared_ptr<_C2BlockPoolData>& poolData);
+ /**
+ * Called after transferring the specified block is finished. Make sure
+ * that BeginTransferBlockToClient() was called before this call.
+ *
+ * This will unblock bufferqueue migration. If transfer result was
+ * successful, this causes the destruction of the block not to trigger a
+ * call to cancelBuffer().
+ * This function assumes that \p poolData comes from a bufferqueue-based
+ * block. It does not check if that is the case.
+ *
+ * \param poolData blockpool data associated to the block.
+ *
+ * \return true if transfer began before, false otherwise.
+ */
+ static
+ bool EndTransferBlockToClient(
+ const std::shared_ptr<_C2BlockPoolData>& poolData,
+ bool transferred);
+
+ /**
+ * Prepare a block to be migrated to another bufferqueue. This blocks
+ * rendering until migration has been finished. The block should be in
+ * held.
+ *
+ * This function assumes that \p poolData comes from a bufferqueue-based
+ * block. It does not check if that is the case.
+ *
+ * \param poolData blockpool data associated to the block.
+ *
+ * \return true if migration is eligible, false otherwise.
+ */
+ static
+ bool BeginAttachBlockToBufferQueue(
+ const std::shared_ptr<_C2BlockPoolData>& poolData);
+
+ /**
+ * Called after migration of the specified block is finished. Make sure
+ * that BeginAttachBlockToBufferQueue() was called before this call.
+ *
+ * This will unblock rendering. if redering is tried during migration,
+ * this returns false. In that case, cancelBuffer() should be called.
+ * This function assumes that \p poolData comes from a bufferqueue-based
+ * block. It does not check if that is the case.
+ *
+ * \param poolData blockpool data associated to the block.
+ *
+ * \return true if migration is eligible, false otherwise.
+ */
+ static
+ bool EndAttachBlockToBufferQueue(
+ const std::shared_ptr<_C2BlockPoolData>& poolData,
+ const std::shared_ptr<int>& owner,
+ const ::android::sp<::android::hardware::graphics::bufferqueue::
+ V2_0::IGraphicBufferProducer>& igbp,
+ uint32_t generation,
+ uint64_t bqId,
+ int32_t bqSlot);
+
+ /**
+ * Indicates a block to be rendered very soon.
+ *
+ * This function assumes that \p poolData comes from a bufferqueue-based
+ * block. It does not check if that is the case.
+ *
+ * \param poolData blockpool data associated to the block.
+ *
+ * \return true if migration is eligible, false otherwise.
+ */
+ static
+ bool DisplayBlockToBufferQueue(
+ const std::shared_ptr<_C2BlockPoolData>& poolData);
};
#endif // ANDROID_STAGEFRIGHT_C2BLOCK_INTERNAL_H_
diff --git a/media/codec2/vndk/platform/C2BqBuffer.cpp b/media/codec2/vndk/platform/C2BqBuffer.cpp
index 3f40e56..5fa48a8 100644
--- a/media/codec2/vndk/platform/C2BqBuffer.cpp
+++ b/media/codec2/vndk/platform/C2BqBuffer.cpp
@@ -61,8 +61,13 @@
uint32_t generation;
uint64_t bqId;
int32_t bqSlot;
+ bool transfer; // local transfer to remote
+ bool attach; // attach on remote
+ bool display; // display on remote;
+ std::weak_ptr<int> owner;
sp<HGraphicBufferProducer> igbp;
std::shared_ptr<C2BufferQueueBlockPool::Impl> localPool;
+ mutable std::mutex lock;
virtual type_t getType() const override {
return TYPE_BUFFERQUEUE;
@@ -71,7 +76,8 @@
// Create a remote BlockPoolData.
C2BufferQueueBlockPoolData(
uint32_t generation, uint64_t bqId, int32_t bqSlot,
- const sp<HGraphicBufferProducer>& producer = nullptr);
+ const std::shared_ptr<int> &owner,
+ const sp<HGraphicBufferProducer>& producer);
// Create a local BlockPoolData.
C2BufferQueueBlockPoolData(
@@ -80,15 +86,19 @@
virtual ~C2BufferQueueBlockPoolData() override;
+ int migrate(const sp<HGraphicBufferProducer>& producer,
+ uint32_t toGeneration, uint64_t toBqId,
+ sp<GraphicBuffer> *buffers, uint32_t oldGeneration);
};
bool _C2BlockFactory::GetBufferQueueData(
- const std::shared_ptr<_C2BlockPoolData>& data,
+ const std::shared_ptr<const _C2BlockPoolData>& data,
uint32_t* generation, uint64_t* bqId, int32_t* bqSlot) {
if (data && data->getType() == _C2BlockPoolData::TYPE_BUFFERQUEUE) {
if (generation) {
- const std::shared_ptr<C2BufferQueueBlockPoolData> poolData =
- std::static_pointer_cast<C2BufferQueueBlockPoolData>(data);
+ const std::shared_ptr<const C2BufferQueueBlockPoolData> poolData =
+ std::static_pointer_cast<const C2BufferQueueBlockPoolData>(data);
+ std::scoped_lock<std::mutex> lock(poolData->lock);
*generation = poolData->generation;
if (bqId) {
*bqId = poolData->bqId;
@@ -102,32 +112,15 @@
return false;
}
-bool _C2BlockFactory::AssignBlockToBufferQueue(
- const std::shared_ptr<_C2BlockPoolData>& data,
- const sp<HGraphicBufferProducer>& igbp,
- uint32_t generation,
- uint64_t bqId,
- int32_t bqSlot,
- bool held) {
- if (data && data->getType() == _C2BlockPoolData::TYPE_BUFFERQUEUE) {
- const std::shared_ptr<C2BufferQueueBlockPoolData> poolData =
- std::static_pointer_cast<C2BufferQueueBlockPoolData>(data);
- poolData->igbp = igbp;
- poolData->generation = generation;
- poolData->bqId = bqId;
- poolData->bqSlot = bqSlot;
- poolData->held = held;
- return true;
- }
- return false;
-}
-
bool _C2BlockFactory::HoldBlockFromBufferQueue(
const std::shared_ptr<_C2BlockPoolData>& data,
+ const std::shared_ptr<int>& owner,
const sp<HGraphicBufferProducer>& igbp) {
const std::shared_ptr<C2BufferQueueBlockPoolData> poolData =
std::static_pointer_cast<C2BufferQueueBlockPoolData>(data);
+ std::scoped_lock<std::mutex> lock(poolData->lock);
if (!poolData->local) {
+ poolData->owner = owner;
poolData->igbp = igbp;
}
if (poolData->held) {
@@ -138,12 +131,86 @@
return true;
}
-bool _C2BlockFactory::YieldBlockToBufferQueue(
+bool _C2BlockFactory::BeginTransferBlockToClient(
const std::shared_ptr<_C2BlockPoolData>& data) {
const std::shared_ptr<C2BufferQueueBlockPoolData> poolData =
std::static_pointer_cast<C2BufferQueueBlockPoolData>(data);
- if (!poolData->held) {
+ std::scoped_lock<std::mutex> lock(poolData->lock);
+ poolData->transfer = true;
+ return true;
+}
+
+bool _C2BlockFactory::EndTransferBlockToClient(
+ const std::shared_ptr<_C2BlockPoolData>& data,
+ bool transfer) {
+ const std::shared_ptr<C2BufferQueueBlockPoolData> poolData =
+ std::static_pointer_cast<C2BufferQueueBlockPoolData>(data);
+ std::scoped_lock<std::mutex> lock(poolData->lock);
+ poolData->transfer = false;
+ if (transfer) {
poolData->held = false;
+ }
+ return true;
+}
+
+bool _C2BlockFactory::BeginAttachBlockToBufferQueue(
+ const std::shared_ptr<_C2BlockPoolData>& data) {
+ const std::shared_ptr<C2BufferQueueBlockPoolData> poolData =
+ std::static_pointer_cast<C2BufferQueueBlockPoolData>(data);
+ std::scoped_lock<std::mutex> lock(poolData->lock);
+ if (poolData->local || poolData->display ||
+ poolData->attach || !poolData->held) {
+ return false;
+ }
+ if (poolData->bqId == 0) {
+ return false;
+ }
+ poolData->attach = true;
+ return true;
+}
+
+// if display was tried during attach, buffer should be retired ASAP.
+bool _C2BlockFactory::EndAttachBlockToBufferQueue(
+ const std::shared_ptr<_C2BlockPoolData>& data,
+ const std::shared_ptr<int>& owner,
+ const sp<HGraphicBufferProducer>& igbp,
+ uint32_t generation,
+ uint64_t bqId,
+ int32_t bqSlot) {
+ const std::shared_ptr<C2BufferQueueBlockPoolData> poolData =
+ std::static_pointer_cast<C2BufferQueueBlockPoolData>(data);
+ std::scoped_lock<std::mutex> lock(poolData->lock);
+ if (poolData->local || !poolData->attach ) {
+ return false;
+ }
+ if (poolData->display) {
+ poolData->attach = false;
+ poolData->held = false;
+ return false;
+ }
+ poolData->attach = false;
+ poolData->held = true;
+ poolData->owner = owner;
+ poolData->igbp = igbp;
+ poolData->generation = generation;
+ poolData->bqId = bqId;
+ poolData->bqSlot = bqSlot;
+ return true;
+}
+
+bool _C2BlockFactory::DisplayBlockToBufferQueue(
+ const std::shared_ptr<_C2BlockPoolData>& data) {
+ const std::shared_ptr<C2BufferQueueBlockPoolData> poolData =
+ std::static_pointer_cast<C2BufferQueueBlockPoolData>(data);
+ std::scoped_lock<std::mutex> lock(poolData->lock);
+ if (poolData->local || poolData->display || !poolData->held) {
+ return false;
+ }
+ if (poolData->bqId == 0) {
+ return false;
+ }
+ poolData->display = true;
+ if (poolData->attach) {
return false;
}
poolData->held = false;
@@ -175,7 +242,9 @@
std::shared_ptr<C2BufferQueueBlockPoolData> poolData =
std::make_shared<C2BufferQueueBlockPoolData>(generation,
bqId,
- (int32_t)bqSlot);
+ (int32_t)bqSlot,
+ nullptr,
+ nullptr);
block = _C2BlockFactory::CreateGraphicBlock(alloc, poolData);
} else {
block = _C2BlockFactory::CreateGraphicBlock(alloc);
@@ -186,6 +255,78 @@
return nullptr;
}
+namespace {
+
+int64_t getTimestampNow() {
+ int64_t stamp;
+ struct timespec ts;
+ // TODO: CLOCK_MONOTONIC_COARSE?
+ clock_gettime(CLOCK_MONOTONIC, &ts);
+ stamp = ts.tv_nsec / 1000;
+ stamp += (ts.tv_sec * 1000000LL);
+ return stamp;
+}
+
+bool getGenerationNumber(const sp<HGraphicBufferProducer> &producer,
+ uint32_t *generation) {
+ status_t status{};
+ int slot{};
+ bool bufferNeedsReallocation{};
+ sp<Fence> fence = new Fence();
+
+ using Input = HGraphicBufferProducer::DequeueBufferInput;
+ using Output = HGraphicBufferProducer::DequeueBufferOutput;
+ Return<void> transResult = producer->dequeueBuffer(
+ Input{640, 480, HAL_PIXEL_FORMAT_YCBCR_420_888, 0},
+ [&status, &slot, &bufferNeedsReallocation, &fence]
+ (HStatus hStatus, int32_t hSlot, Output const& hOutput) {
+ slot = static_cast<int>(hSlot);
+ if (!h2b(hStatus, &status) || !h2b(hOutput.fence, &fence)) {
+ status = ::android::BAD_VALUE;
+ } else {
+ bufferNeedsReallocation =
+ hOutput.bufferNeedsReallocation;
+ }
+ });
+ if (!transResult.isOk() || status != android::OK) {
+ return false;
+ }
+ HFenceWrapper hFenceWrapper{};
+ if (!b2h(fence, &hFenceWrapper)) {
+ (void)producer->detachBuffer(static_cast<int32_t>(slot)).isOk();
+ ALOGE("Invalid fence received from dequeueBuffer.");
+ return false;
+ }
+ sp<GraphicBuffer> slotBuffer = new GraphicBuffer();
+ // N.B. This assumes requestBuffer# returns an existing allocation
+ // instead of a new allocation.
+ transResult = producer->requestBuffer(
+ slot,
+ [&status, &slotBuffer, &generation](
+ HStatus hStatus,
+ HBuffer const& hBuffer,
+ uint32_t generationNumber){
+ if (h2b(hStatus, &status) &&
+ h2b(hBuffer, &slotBuffer) &&
+ slotBuffer) {
+ *generation = generationNumber;
+ slotBuffer->setGenerationNumber(generationNumber);
+ } else {
+ status = android::BAD_VALUE;
+ }
+ });
+ if (!transResult.isOk()) {
+ return false;
+ } else if (status != android::NO_ERROR) {
+ (void)producer->detachBuffer(static_cast<int32_t>(slot)).isOk();
+ return false;
+ }
+ (void)producer->detachBuffer(static_cast<int32_t>(slot)).isOk();
+ return true;
+}
+
+};
+
class C2BufferQueueBlockPool::Impl
: public std::enable_shared_from_this<C2BufferQueueBlockPool::Impl> {
private:
@@ -227,6 +368,7 @@
});
if (!transResult.isOk() || status != android::OK) {
if (transResult.isOk()) {
+ ++mDqFailure;
if (status == android::INVALID_OPERATION ||
status == android::TIMED_OUT ||
status == android::WOULD_BLOCK) {
@@ -238,6 +380,8 @@
ALOGD("cannot dequeue buffer %d", status);
return C2_BAD_VALUE;
}
+ mDqFailure = 0;
+ mLastDqTs = getTimestampNow();
}
HFenceWrapper hFenceWrapper{};
if (!b2h(fence, &hFenceWrapper)) {
@@ -319,6 +463,7 @@
slotBuffer->getGenerationNumber(),
mProducerId, slot,
shared_from_this());
+ mPoolDatas[slot] = poolData;
*block = _C2BlockFactory::CreateGraphicBlock(alloc, poolData);
return C2_OK;
}
@@ -331,7 +476,9 @@
public:
Impl(const std::shared_ptr<C2Allocator> &allocator)
- : mInit(C2_OK), mProducerId(0), mAllocator(allocator) {
+ : mInit(C2_OK), mProducerId(0), mGeneration(0),
+ mDqFailure(0), mLastDqTs(0), mLastDqLogTs(0),
+ mAllocator(allocator) {
}
~Impl() {
@@ -361,6 +508,19 @@
static int kMaxIgbpRetryDelayUs = 10000;
std::unique_lock<std::mutex> lock(mMutex);
+ if (mLastDqLogTs == 0) {
+ mLastDqLogTs = getTimestampNow();
+ } else {
+ int64_t now = getTimestampNow();
+ if (now >= mLastDqLogTs + 5000000) {
+ if (now >= mLastDqTs + 1000000 || mDqFailure > 5) {
+ ALOGW("last successful dequeue was %lld us ago, "
+ "%zu consecutive failures",
+ (long long)(now - mLastDqTs), mDqFailure);
+ }
+ mLastDqLogTs = now;
+ }
+ }
if (mProducerId == 0) {
std::shared_ptr<C2GraphicAllocation> alloc;
c2_status_t err = mAllocator->newGraphicAllocation(
@@ -386,12 +546,14 @@
}
void setRenderCallback(const OnRenderCallback &renderCallback) {
- std::lock_guard<std::mutex> lock(mMutex);
+ std::scoped_lock<std::mutex> lock(mMutex);
mRenderCallback = renderCallback;
}
void configureProducer(const sp<HGraphicBufferProducer> &producer) {
uint64_t producerId = 0;
+ uint32_t generation = 0;
+ bool haveGeneration = false;
if (producer) {
Return<uint64_t> transResult = producer->getUniqueId();
if (!transResult.isOk()) {
@@ -399,9 +561,15 @@
return;
}
producerId = static_cast<uint64_t>(transResult);
+ // TODO: provide gneration number from parameter.
+ haveGeneration = getGenerationNumber(producer, &generation);
}
+ int migrated = 0;
{
- std::lock_guard<std::mutex> lock(mMutex);
+ sp<GraphicBuffer> buffers[NUM_BUFFER_SLOTS];
+ std::weak_ptr<C2BufferQueueBlockPoolData>
+ poolDatas[NUM_BUFFER_SLOTS];
+ std::scoped_lock<std::mutex> lock(mMutex);
bool noInit = false;
for (int i = 0; i < NUM_BUFFER_SLOTS; ++i) {
if (!noInit && mProducer) {
@@ -410,46 +578,88 @@
noInit = !transResult.isOk() ||
static_cast<HStatus>(transResult) == HStatus::NO_INIT;
}
- mBuffers[i].clear();
}
- if (producer) {
+ int32_t oldGeneration = mGeneration;
+ if (producer && haveGeneration) {
mProducer = producer;
mProducerId = producerId;
+ mGeneration = generation;
} else {
mProducer = nullptr;
mProducerId = 0;
+ mGeneration = 0;
+ ALOGW("invalid producer producer(%d), generation(%d)",
+ (bool)producer, haveGeneration);
}
+ if (mProducer) { // migrate buffers
+ for (int i = 0; i < NUM_BUFFER_SLOTS; ++i) {
+ std::shared_ptr<C2BufferQueueBlockPoolData> data =
+ mPoolDatas[i].lock();
+ if (data) {
+ int slot = data->migrate(
+ mProducer, generation,
+ producerId, mBuffers, oldGeneration);
+ if (slot >= 0) {
+ buffers[slot] = mBuffers[i];
+ poolDatas[slot] = data;
+ ++migrated;
+ }
+ }
+ }
+ }
+ for (int i = 0; i < NUM_BUFFER_SLOTS; ++i) {
+ mBuffers[i] = buffers[i];
+ mPoolDatas[i] = poolDatas[i];
+ }
+ }
+ if (producer && haveGeneration) {
+ ALOGD("local generation change %u , "
+ "bqId: %llu migrated buffers # %d",
+ generation, (unsigned long long)producerId, migrated);
}
}
private:
friend struct C2BufferQueueBlockPoolData;
- void cancel(uint64_t igbp_id, int32_t igbp_slot) {
- std::lock_guard<std::mutex> lock(mMutex);
- if (igbp_id == mProducerId && mProducer) {
+ void cancel(uint32_t generation, uint64_t igbp_id, int32_t igbp_slot) {
+ bool cancelled = false;
+ {
+ std::scoped_lock<std::mutex> lock(mMutex);
+ if (generation == mGeneration && igbp_id == mProducerId && mProducer) {
(void)mProducer->cancelBuffer(igbp_slot, hidl_handle{}).isOk();
+ cancelled = true;
+ }
}
}
c2_status_t mInit;
uint64_t mProducerId;
+ uint32_t mGeneration;
OnRenderCallback mRenderCallback;
+ size_t mDqFailure;
+ int64_t mLastDqTs;
+ int64_t mLastDqLogTs;
+
const std::shared_ptr<C2Allocator> mAllocator;
std::mutex mMutex;
sp<HGraphicBufferProducer> mProducer;
+ sp<HGraphicBufferProducer> mSavedProducer;
sp<GraphicBuffer> mBuffers[NUM_BUFFER_SLOTS];
+ std::weak_ptr<C2BufferQueueBlockPoolData> mPoolDatas[NUM_BUFFER_SLOTS];
};
C2BufferQueueBlockPoolData::C2BufferQueueBlockPoolData(
uint32_t generation, uint64_t bqId, int32_t bqSlot,
+ const std::shared_ptr<int>& owner,
const sp<HGraphicBufferProducer>& producer) :
held(producer && bqId != 0), local(false),
generation(generation), bqId(bqId), bqSlot(bqSlot),
- igbp(producer),
+ transfer(false), attach(false), display(false),
+ owner(owner), igbp(producer),
localPool() {
}
@@ -458,6 +668,7 @@
const std::shared_ptr<C2BufferQueueBlockPool::Impl>& pool) :
held(true), local(true),
generation(generation), bqId(bqId), bqSlot(bqSlot),
+ transfer(false), attach(false), display(false),
igbp(pool ? pool->mProducer : nullptr),
localPool(pool) {
}
@@ -466,12 +677,78 @@
if (!held || bqId == 0) {
return;
}
- if (local && localPool) {
- localPool->cancel(bqId, bqSlot);
- } else if (igbp) {
+ if (local) {
+ if (localPool) {
+ localPool->cancel(generation, bqId, bqSlot);
+ }
+ } else if (igbp && !owner.expired()) {
igbp->cancelBuffer(bqSlot, hidl_handle{}).isOk();
}
}
+int C2BufferQueueBlockPoolData::migrate(
+ const sp<HGraphicBufferProducer>& producer,
+ uint32_t toGeneration, uint64_t toBqId,
+ sp<GraphicBuffer> *buffers, uint32_t oldGeneration) {
+ std::scoped_lock<std::mutex> l(lock);
+ if (!held || bqId == 0) {
+ ALOGV("buffer is not owned");
+ return -1;
+ }
+ if (!local || !localPool) {
+ ALOGV("pool is not local");
+ return -1;
+ }
+ if (bqSlot < 0 || bqSlot >= NUM_BUFFER_SLOTS || !buffers[bqSlot]) {
+ ALOGV("slot is not in effect");
+ return -1;
+ }
+ if (toGeneration == generation && bqId == toBqId) {
+ ALOGV("cannot migrate to same bufferqueue");
+ return -1;
+ }
+ if (oldGeneration != generation) {
+ ALOGV("cannot migrate stale buffer");
+ }
+ if (transfer) {
+ // either transferred or detached.
+ ALOGV("buffer is in transfer");
+ return -1;
+ }
+ sp<GraphicBuffer> const& graphicBuffer = buffers[bqSlot];
+ graphicBuffer->setGenerationNumber(toGeneration);
+
+ HBuffer hBuffer{};
+ uint32_t hGenerationNumber{};
+ if (!b2h(graphicBuffer, &hBuffer, &hGenerationNumber)) {
+ ALOGD("I to O conversion failed");
+ return -1;
+ }
+
+ bool converted{};
+ status_t bStatus{};
+ int slot;
+ int *outSlot = &slot;
+ Return<void> transResult =
+ producer->attachBuffer(hBuffer, hGenerationNumber,
+ [&converted, &bStatus, outSlot](
+ HStatus hStatus, int32_t hSlot, bool releaseAll) {
+ converted = h2b(hStatus, &bStatus);
+ *outSlot = static_cast<int>(hSlot);
+ if (converted && releaseAll && bStatus == android::OK) {
+ bStatus = android::INVALID_OPERATION;
+ }
+ });
+ if (!transResult.isOk() || !converted || bStatus != android::OK) {
+ ALOGD("attach failed %d", static_cast<int>(bStatus));
+ return -1;
+ }
+ ALOGV("local migration from gen %u : %u slot %d : %d",
+ generation, toGeneration, bqSlot, slot);
+ generation = toGeneration;
+ bqId = toBqId;
+ bqSlot = slot;
+ return slot;
+}
C2BufferQueueBlockPool::C2BufferQueueBlockPool(
const std::shared_ptr<C2Allocator> &allocator, const local_id_t localId)