Head-tracking library for Immersive Audio
See README.md for details.
Bug: 188502620
Test: atest --host libheadtracking-test
Change-Id: I34201c4780e5e581cc96449bd89863bcbc250783
diff --git a/media/libheadtracking/Android.bp b/media/libheadtracking/Android.bp
new file mode 100644
index 0000000..6141824
--- /dev/null
+++ b/media/libheadtracking/Android.bp
@@ -0,0 +1,40 @@
+cc_library {
+ name: "libheadtracking",
+ host_supported: true,
+ srcs: [
+ "HeadTrackingProcessor.cpp",
+ "ModeSelector.cpp",
+ "Pose.cpp",
+ "PoseDriftCompensator.cpp",
+ "PoseRateLimiter.cpp",
+ "QuaternionUtil.cpp",
+ "ScreenHeadFusion.cpp",
+ "Twist.cpp",
+ ],
+ export_include_dirs: [
+ "include",
+ ],
+ header_libs: [
+ "libeigen",
+ ],
+ export_header_lib_headers: [
+ "libeigen",
+ ],
+}
+
+cc_test_host {
+ name: "libheadtracking-test",
+ srcs: [
+ "HeadTrackingProcessor-test.cpp",
+ "ModeSelector-test.cpp",
+ "Pose-test.cpp",
+ "PoseDriftCompensator-test.cpp",
+ "PoseRateLimiter-test.cpp",
+ "QuaternionUtil-test.cpp",
+ "ScreenHeadFusion-test.cpp",
+ "Twist-test.cpp",
+ ],
+ shared_libs: [
+ "libheadtracking",
+ ],
+}
diff --git a/media/libheadtracking/HeadTrackingProcessor-test.cpp b/media/libheadtracking/HeadTrackingProcessor-test.cpp
new file mode 100644
index 0000000..755f415
--- /dev/null
+++ b/media/libheadtracking/HeadTrackingProcessor-test.cpp
@@ -0,0 +1,130 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "media/HeadTrackingProcessor.h"
+
+#include <gtest/gtest.h>
+
+#include "QuaternionUtil.h"
+#include "TestUtil.h"
+
+namespace android {
+namespace media {
+namespace {
+
+using Eigen::Quaternionf;
+using Eigen::Vector3f;
+using Options = HeadTrackingProcessor::Options;
+
+TEST(HeadTrackingProcessor, Initial) {
+ for (auto mode : {HeadTrackingMode::STATIC, HeadTrackingMode::WORLD_RELATIVE,
+ HeadTrackingMode::SCREEN_RELATIVE}) {
+ std::unique_ptr<HeadTrackingProcessor> processor =
+ createHeadTrackingProcess(Options{}, mode);
+ processor->calculate(0);
+ EXPECT_EQ(processor->getActualMode(), HeadTrackingMode::STATIC);
+ EXPECT_EQ(processor->getHeadToStagePose(), Pose3f());
+ }
+}
+
+TEST(HeadTrackingProcessor, BasicComposition) {
+ const Pose3f worldToHead{{1, 2, 3}, Quaternionf::UnitRandom()};
+ const Pose3f worldToScreen{{4, 5, 6}, Quaternionf::UnitRandom()};
+ const Pose3f screenToStage{{7, 8, 9}, Quaternionf::UnitRandom()};
+ const float physicalToLogical = M_PI_2;
+
+ std::unique_ptr<HeadTrackingProcessor> processor =
+ createHeadTrackingProcess(Options{}, HeadTrackingMode::SCREEN_RELATIVE);
+ processor->setWorldToHeadPose(0, worldToHead, Twist3f());
+ processor->setWorldToScreenPose(0, worldToScreen);
+ processor->setScreenToStagePose(screenToStage);
+ processor->setDisplayOrientation(physicalToLogical);
+ processor->calculate(0);
+ ASSERT_EQ(processor->getActualMode(), HeadTrackingMode::SCREEN_RELATIVE);
+ EXPECT_EQ(processor->getHeadToStagePose(), worldToHead.inverse() * worldToScreen *
+ Pose3f(rotateY(-physicalToLogical)) *
+ screenToStage);
+
+ processor->setDesiredMode(HeadTrackingMode::WORLD_RELATIVE);
+ processor->calculate(0);
+ ASSERT_EQ(processor->getActualMode(), HeadTrackingMode::WORLD_RELATIVE);
+ EXPECT_EQ(processor->getHeadToStagePose(), worldToHead.inverse() * screenToStage);
+
+ processor->setDesiredMode(HeadTrackingMode::STATIC);
+ processor->calculate(0);
+ ASSERT_EQ(processor->getActualMode(), HeadTrackingMode::STATIC);
+ EXPECT_EQ(processor->getHeadToStagePose(), screenToStage);
+}
+
+TEST(HeadTrackingProcessor, Prediction) {
+ const Pose3f worldToHead{{1, 2, 3}, Quaternionf::UnitRandom()};
+ const Twist3f headTwist{{4, 5, 6}, quaternionToRotationVector(Quaternionf::UnitRandom()) / 10};
+ const Pose3f worldToScreen{{4, 5, 6}, Quaternionf::UnitRandom()};
+
+ std::unique_ptr<HeadTrackingProcessor> processor = createHeadTrackingProcess(
+ Options{.predictionDuration = 2.f}, HeadTrackingMode::WORLD_RELATIVE);
+ processor->setWorldToHeadPose(0, worldToHead, headTwist);
+ processor->setWorldToScreenPose(0, worldToScreen);
+ processor->calculate(0);
+ ASSERT_EQ(processor->getActualMode(), HeadTrackingMode::WORLD_RELATIVE);
+ EXPECT_EQ(processor->getHeadToStagePose(), (worldToHead * integrate(headTwist, 2.f)).inverse());
+
+ processor->setDesiredMode(HeadTrackingMode::SCREEN_RELATIVE);
+ processor->calculate(0);
+ ASSERT_EQ(processor->getActualMode(), HeadTrackingMode::SCREEN_RELATIVE);
+ EXPECT_EQ(processor->getHeadToStagePose(),
+ (worldToHead * integrate(headTwist, 2.f)).inverse() * worldToScreen);
+
+ processor->setDesiredMode(HeadTrackingMode::STATIC);
+ processor->calculate(0);
+ ASSERT_EQ(processor->getActualMode(), HeadTrackingMode::STATIC);
+ EXPECT_EQ(processor->getHeadToStagePose(), Pose3f());
+}
+
+TEST(HeadTrackingProcessor, SmoothModeSwitch) {
+ const Pose3f targetHeadToWorld = Pose3f({4, 0, 0}, rotateZ(M_PI / 2));
+
+ std::unique_ptr<HeadTrackingProcessor> processor = createHeadTrackingProcess(
+ Options{.maxTranslationalVelocity = 1}, HeadTrackingMode::STATIC);
+
+ processor->calculate(0);
+
+ processor->setDesiredMode(HeadTrackingMode::WORLD_RELATIVE);
+ processor->setWorldToHeadPose(0, targetHeadToWorld.inverse(), Twist3f());
+
+ // We're expecting a gradual move to the target.
+ processor->calculate(0);
+ EXPECT_EQ(HeadTrackingMode::WORLD_RELATIVE, processor->getActualMode());
+ EXPECT_EQ(processor->getHeadToStagePose(), Pose3f());
+
+ processor->calculate(2);
+ EXPECT_EQ(HeadTrackingMode::WORLD_RELATIVE, processor->getActualMode());
+ EXPECT_EQ(processor->getHeadToStagePose(), Pose3f({2, 0, 0}, rotateZ(M_PI / 4)));
+
+ processor->calculate(4);
+ EXPECT_EQ(HeadTrackingMode::WORLD_RELATIVE, processor->getActualMode());
+ EXPECT_EQ(processor->getHeadToStagePose(), targetHeadToWorld);
+
+ // Now that we've reached the target, we should no longer be rate limiting.
+ processor->setWorldToHeadPose(4, Pose3f(), Twist3f());
+ processor->calculate(5);
+ EXPECT_EQ(HeadTrackingMode::WORLD_RELATIVE, processor->getActualMode());
+ EXPECT_EQ(processor->getHeadToStagePose(), Pose3f());
+}
+
+} // namespace
+} // namespace media
+} // namespace android
diff --git a/media/libheadtracking/HeadTrackingProcessor.cpp b/media/libheadtracking/HeadTrackingProcessor.cpp
new file mode 100644
index 0000000..fe91419
--- /dev/null
+++ b/media/libheadtracking/HeadTrackingProcessor.cpp
@@ -0,0 +1,138 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"){}
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "media/HeadTrackingProcessor.h"
+
+#include "ModeSelector.h"
+#include "PoseDriftCompensator.h"
+#include "QuaternionUtil.h"
+#include "ScreenHeadFusion.h"
+
+namespace android {
+namespace media {
+namespace {
+
+using Eigen::Quaternionf;
+using Eigen::Vector3f;
+
+class HeadTrackingProcessorImpl : public HeadTrackingProcessor {
+ public:
+ HeadTrackingProcessorImpl(const Options& options, HeadTrackingMode initialMode)
+ : mOptions(options),
+ mHeadPoseDriftCompensator(PoseDriftCompensator::Options{
+ .translationalDriftTimeConstant = options.translationalDriftTimeConstant,
+ .rotationalDriftTimeConstant = options.rotationalDriftTimeConstant,
+ }),
+ mScreenPoseDriftCompensator(PoseDriftCompensator::Options{
+ .translationalDriftTimeConstant = options.translationalDriftTimeConstant,
+ .rotationalDriftTimeConstant = options.rotationalDriftTimeConstant,
+ }),
+ mModeSelector(ModeSelector::Options{.freshnessTimeout = options.freshnessTimeout},
+ initialMode),
+ mRateLimiter(PoseRateLimiter::Options{
+ .maxTranslationalVelocity = options.maxTranslationalVelocity,
+ .maxRotationalVelocity = options.maxRotationalVelocity}) {}
+
+ void setDesiredMode(HeadTrackingMode mode) override { mModeSelector.setDesiredMode(mode); }
+
+ void setWorldToHeadPose(int64_t timestamp, const Pose3f& worldToHead,
+ const Twist3f& headTwist) override {
+ Pose3f predictedWorldToHead =
+ worldToHead * integrate(headTwist, mOptions.predictionDuration);
+ mHeadPoseDriftCompensator.setInput(timestamp, predictedWorldToHead);
+ mWorldToHeadTimestamp = timestamp;
+ }
+
+ void setWorldToScreenPose(int64_t timestamp, const Pose3f& worldToScreen) override {
+ mScreenPoseDriftCompensator.setInput(timestamp, worldToScreen);
+ mWorldToScreenTimestamp = timestamp;
+ }
+
+ void setScreenToStagePose(const Pose3f& screenToStage) override {
+ mModeSelector.setScreenToStagePose(screenToStage);
+ }
+
+ void setDisplayOrientation(float physicalToLogicalAngle) override {
+ if (mPhysicalToLogicalAngle != physicalToLogicalAngle) {
+ mRateLimiter.enable();
+ }
+ mPhysicalToLogicalAngle = physicalToLogicalAngle;
+ }
+
+ void calculate(int64_t timestamp) override {
+ if (mWorldToHeadTimestamp.has_value()) {
+ const Pose3f worldToHead = mHeadPoseDriftCompensator.getOutput();
+ mScreenHeadFusion.setWorldToHeadPose(mWorldToHeadTimestamp.value(), worldToHead);
+ mModeSelector.setWorldToHeadPose(mWorldToHeadTimestamp.value(), worldToHead);
+ }
+
+ if (mWorldToScreenTimestamp.has_value()) {
+ const Pose3f worldToLogicalScreen = mScreenPoseDriftCompensator.getOutput() *
+ Pose3f(rotateY(-mPhysicalToLogicalAngle));
+ mScreenHeadFusion.setWorldToScreenPose(mWorldToScreenTimestamp.value(),
+ worldToLogicalScreen);
+ }
+
+ auto maybeScreenToHead = mScreenHeadFusion.calculate();
+ if (maybeScreenToHead.has_value()) {
+ mModeSelector.setScreenToHeadPose(maybeScreenToHead->timestamp,
+ maybeScreenToHead->pose);
+ } else {
+ mModeSelector.setScreenToHeadPose(timestamp, std::nullopt);
+ }
+
+ HeadTrackingMode prevMode = mModeSelector.getActualMode();
+ mModeSelector.calculate(timestamp);
+ if (mModeSelector.getActualMode() != prevMode) {
+ // Mode has changed, enable rate limiting.
+ mRateLimiter.enable();
+ }
+ mRateLimiter.setTarget(mModeSelector.getHeadToStagePose());
+ mHeadToStagePose = mRateLimiter.calculatePose(timestamp);
+ }
+
+ Pose3f getHeadToStagePose() const override { return mHeadToStagePose; }
+
+ HeadTrackingMode getActualMode() const override { return mModeSelector.getActualMode(); }
+
+ void recenter() override {
+ mHeadPoseDriftCompensator.recenter();
+ mScreenPoseDriftCompensator.recenter();
+ mRateLimiter.enable();
+ }
+
+ private:
+ const Options mOptions;
+ float mPhysicalToLogicalAngle = 0;
+ std::optional<int64_t> mWorldToHeadTimestamp;
+ std::optional<int64_t> mWorldToScreenTimestamp;
+ Pose3f mHeadToStagePose;
+ PoseDriftCompensator mHeadPoseDriftCompensator;
+ PoseDriftCompensator mScreenPoseDriftCompensator;
+ ScreenHeadFusion mScreenHeadFusion;
+ ModeSelector mModeSelector;
+ PoseRateLimiter mRateLimiter;
+};
+
+} // namespace
+
+std::unique_ptr<HeadTrackingProcessor> createHeadTrackingProcess(
+ const HeadTrackingProcessor::Options& options, HeadTrackingMode initialMode) {
+ return std::make_unique<HeadTrackingProcessorImpl>(options, initialMode);
+}
+
+} // namespace media
+} // namespace android
diff --git a/media/libheadtracking/ModeSelector-test.cpp b/media/libheadtracking/ModeSelector-test.cpp
new file mode 100644
index 0000000..6247d84
--- /dev/null
+++ b/media/libheadtracking/ModeSelector-test.cpp
@@ -0,0 +1,149 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ModeSelector.h"
+
+#include <gtest/gtest.h>
+
+#include "QuaternionUtil.h"
+#include "TestUtil.h"
+
+namespace android {
+namespace media {
+namespace {
+
+using Eigen::Quaternionf;
+using Eigen::Vector3f;
+
+TEST(ModeSelector, Initial) {
+ ModeSelector::Options options;
+ ModeSelector selector(options);
+
+ selector.calculate(0);
+ EXPECT_EQ(HeadTrackingMode::STATIC, selector.getActualMode());
+ EXPECT_EQ(selector.getHeadToStagePose(), Pose3f());
+}
+
+TEST(ModeSelector, InitialWorldRelative) {
+ const Pose3f worldToHead({1, 2, 3}, Quaternionf::UnitRandom());
+
+ ModeSelector::Options options;
+ ModeSelector selector(options, HeadTrackingMode::WORLD_RELATIVE);
+
+ selector.setWorldToHeadPose(0, worldToHead);
+ selector.calculate(0);
+ EXPECT_EQ(HeadTrackingMode::WORLD_RELATIVE, selector.getActualMode());
+ EXPECT_EQ(selector.getHeadToStagePose(), worldToHead.inverse());
+}
+
+TEST(ModeSelector, InitialScreenRelative) {
+ const Pose3f screenToHead({1, 2, 3}, Quaternionf::UnitRandom());
+
+ ModeSelector::Options options;
+ ModeSelector selector(options, HeadTrackingMode::SCREEN_RELATIVE);
+
+ selector.setScreenToHeadPose(0, screenToHead);
+ selector.calculate(0);
+ EXPECT_EQ(HeadTrackingMode::SCREEN_RELATIVE, selector.getActualMode());
+ EXPECT_EQ(selector.getHeadToStagePose(), screenToHead.inverse());
+}
+
+TEST(ModeSelector, WorldRelative) {
+ const Pose3f worldToHead({1, 2, 3}, Quaternionf::UnitRandom());
+ const Pose3f screenToStage({4, 5, 6}, Quaternionf::UnitRandom());
+
+ ModeSelector::Options options;
+ ModeSelector selector(options);
+
+ selector.setScreenToStagePose(screenToStage);
+
+ selector.setDesiredMode(HeadTrackingMode::WORLD_RELATIVE);
+ selector.setWorldToHeadPose(0, worldToHead);
+ selector.calculate(0);
+ EXPECT_EQ(HeadTrackingMode::WORLD_RELATIVE, selector.getActualMode());
+ EXPECT_EQ(selector.getHeadToStagePose(), worldToHead.inverse() * screenToStage);
+}
+
+TEST(ModeSelector, WorldRelativeStale) {
+ const Pose3f worldToHead({1, 2, 3}, Quaternionf::UnitRandom());
+ const Pose3f screenToStage({4, 5, 6}, Quaternionf::UnitRandom());
+
+ ModeSelector::Options options{.freshnessTimeout = 100};
+ ModeSelector selector(options);
+
+ selector.setScreenToStagePose(screenToStage);
+
+ selector.setDesiredMode(HeadTrackingMode::WORLD_RELATIVE);
+ selector.setWorldToHeadPose(0, worldToHead);
+ selector.calculate(101);
+ EXPECT_EQ(HeadTrackingMode::STATIC, selector.getActualMode());
+ EXPECT_EQ(selector.getHeadToStagePose(), screenToStage);
+}
+
+TEST(ModeSelector, ScreenRelative) {
+ const Pose3f screenToHead({1, 2, 3}, Quaternionf::UnitRandom());
+ const Pose3f screenToStage({4, 5, 6}, Quaternionf::UnitRandom());
+
+ ModeSelector::Options options;
+ ModeSelector selector(options);
+
+ selector.setScreenToStagePose(screenToStage);
+
+ selector.setDesiredMode(HeadTrackingMode::SCREEN_RELATIVE);
+ selector.setScreenToHeadPose(0, screenToHead);
+ selector.calculate(0);
+ EXPECT_EQ(HeadTrackingMode::SCREEN_RELATIVE, selector.getActualMode());
+ EXPECT_EQ(selector.getHeadToStagePose(), screenToHead.inverse() * screenToStage);
+}
+
+TEST(ModeSelector, ScreenRelativeStaleToWorldRelative) {
+ const Pose3f screenToHead({1, 2, 3}, Quaternionf::UnitRandom());
+ const Pose3f screenToStage({4, 5, 6}, Quaternionf::UnitRandom());
+ const Pose3f worldToHead({7, 8, 9}, Quaternionf::UnitRandom());
+
+ ModeSelector::Options options{.freshnessTimeout = 100};
+ ModeSelector selector(options);
+
+ selector.setScreenToStagePose(screenToStage);
+
+ selector.setDesiredMode(HeadTrackingMode::SCREEN_RELATIVE);
+ selector.setScreenToHeadPose(0, screenToHead);
+ selector.setWorldToHeadPose(50, worldToHead);
+ selector.calculate(101);
+ EXPECT_EQ(HeadTrackingMode::WORLD_RELATIVE, selector.getActualMode());
+ EXPECT_EQ(selector.getHeadToStagePose(), worldToHead.inverse() * screenToStage);
+}
+
+TEST(ModeSelector, ScreenRelativeInvalidToWorldRelative) {
+ const Pose3f screenToStage({4, 5, 6}, Quaternionf::UnitRandom());
+ const Pose3f worldToHead({7, 8, 9}, Quaternionf::UnitRandom());
+
+ ModeSelector::Options options;
+ ModeSelector selector(options);
+
+ selector.setScreenToStagePose(screenToStage);
+
+ selector.setDesiredMode(HeadTrackingMode::SCREEN_RELATIVE);
+ selector.setScreenToHeadPose(50, std::nullopt);
+ selector.setWorldToHeadPose(50, worldToHead);
+ selector.calculate(101);
+ EXPECT_EQ(HeadTrackingMode::WORLD_RELATIVE, selector.getActualMode());
+ EXPECT_EQ(selector.getHeadToStagePose(), worldToHead.inverse() * screenToStage);
+}
+
+} // namespace
+} // namespace media
+} // namespace android
diff --git a/media/libheadtracking/ModeSelector.cpp b/media/libheadtracking/ModeSelector.cpp
new file mode 100644
index 0000000..16e1712
--- /dev/null
+++ b/media/libheadtracking/ModeSelector.cpp
@@ -0,0 +1,96 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ModeSelector.h"
+
+namespace android {
+namespace media {
+
+ModeSelector::ModeSelector(const Options& options, HeadTrackingMode initialMode)
+ : mOptions(options), mDesiredMode(initialMode), mActualMode(initialMode) {}
+
+void ModeSelector::setDesiredMode(HeadTrackingMode mode) {
+ mDesiredMode = mode;
+}
+
+void ModeSelector::setScreenToStagePose(const Pose3f& screenToStage) {
+ mScreenToStage = screenToStage;
+}
+
+void ModeSelector::setScreenToHeadPose(int64_t timestamp,
+ const std::optional<Pose3f>& screenToHead) {
+ mScreenToHead = screenToHead;
+ mScreenToHeadTimestamp = timestamp;
+}
+
+void ModeSelector::setWorldToHeadPose(int64_t timestamp, const Pose3f& worldToHead) {
+ mWorldToHead = worldToHead;
+ mWorldToHeadTimestamp = timestamp;
+}
+
+void ModeSelector::calculateActualMode(int64_t timestamp) {
+ bool isValidScreenToHead = mScreenToHead.has_value() &&
+ timestamp - mScreenToHeadTimestamp < mOptions.freshnessTimeout;
+ bool isValidWorldToHead = mWorldToHead.has_value() &&
+ timestamp - mWorldToHeadTimestamp < mOptions.freshnessTimeout;
+
+ HeadTrackingMode mode = mDesiredMode;
+
+ // Optional downgrade from screen-relative to world-relative.
+ if (mode == HeadTrackingMode::SCREEN_RELATIVE) {
+ if (!isValidScreenToHead) {
+ mode = HeadTrackingMode::WORLD_RELATIVE;
+ }
+ }
+
+ // Optional downgrade from world-relative to static.
+ if (mode == HeadTrackingMode::WORLD_RELATIVE) {
+ if (!isValidWorldToHead) {
+ mode = HeadTrackingMode::STATIC;
+ }
+ }
+
+ mActualMode = mode;
+}
+
+void ModeSelector::calculate(int64_t timestamp) {
+ calculateActualMode(timestamp);
+
+ switch (mActualMode) {
+ case HeadTrackingMode::STATIC:
+ mHeadToStage = mScreenToStage;
+ break;
+
+ case HeadTrackingMode::WORLD_RELATIVE:
+ mHeadToStage = mWorldToHead.value().inverse() * mScreenToStage;
+ break;
+
+ case HeadTrackingMode::SCREEN_RELATIVE:
+ mHeadToStage = mScreenToHead.value().inverse() * mScreenToStage;
+ break;
+ }
+}
+
+Pose3f ModeSelector::getHeadToStagePose() const {
+ return mHeadToStage;
+}
+
+HeadTrackingMode ModeSelector::getActualMode() const {
+ return mActualMode;
+}
+
+} // namespace media
+} // namespace android
diff --git a/media/libheadtracking/ModeSelector.h b/media/libheadtracking/ModeSelector.h
new file mode 100644
index 0000000..17a5142
--- /dev/null
+++ b/media/libheadtracking/ModeSelector.h
@@ -0,0 +1,127 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#pragma once
+
+#include <optional>
+
+#include "media/HeadTrackingMode.h"
+#include "media/Pose.h"
+
+#include "PoseRateLimiter.h"
+
+namespace android {
+namespace media {
+
+/**
+ * Head-tracking mode selector.
+ *
+ * This class is responsible for production of the determining pose for audio virtualization, based
+ * on a number of available sources and a selectable mode.
+ *
+ * Typical flow is:
+ * ModeSelector selector(...);
+ * while (...) {
+ * // Set inputs.
+ * selector.setFoo(...);
+ * selector.setBar(...);
+ *
+ * // Update outputs based on inputs.
+ * selector.calculate(...);
+ *
+ * // Get outputs.
+ * Pose3f pose = selector.getHeadToStagePose();
+ * }
+ *
+ * This class is not thread-safe, but thread-compatible.
+ *
+ * For details on the frames of reference involved, their composition and the definitions to the
+ * different modes, refer to:
+ * go/immersive-audio-frames
+ *
+ * The actual mode may deviate from the desired mode in the following cases:
+ * - When we cannot get a valid and fresh estimate of the screen-to-head pose, we will fall back
+ * from screen-relative to world-relative.
+ * - When we cannot get a fresh estimate of the world-to-head pose, we will fall back from
+ * world-relative to static.
+ *
+ * All the timestamps used here are of arbitrary units and origin. They just need to be consistent
+ * between all the calls and with the Options provided for determining freshness and rate limiting.
+ */
+class ModeSelector {
+ public:
+ struct Options {
+ int64_t freshnessTimeout = std::numeric_limits<int64_t>::max();
+ };
+
+ ModeSelector(const Options& options, HeadTrackingMode initialMode = HeadTrackingMode::STATIC);
+
+ /** Sets the desired head-tracking mode. */
+ void setDesiredMode(HeadTrackingMode mode);
+
+ /**
+ * Set the screen-to-stage pose, used in all modes.
+ */
+ void setScreenToStagePose(const Pose3f& screenToStage);
+
+ /**
+ * Set the screen-to-head pose, used in screen-relative mode.
+ * The timestamp needs to reflect how fresh the sample is (not necessarily which point in time
+ * it applies to). nullopt can be used if it is determined that the listener is not in front of
+ * the screen.
+ */
+ void setScreenToHeadPose(int64_t timestamp, const std::optional<Pose3f>& screenToHead);
+
+ /**
+ * Set the world-to-head pose, used in world-relative mode.
+ * The timestamp needs to reflect how fresh the sample is (not necessarily which point in time
+ * it applies to).
+ */
+ void setWorldToHeadPose(int64_t timestamp, const Pose3f& worldToHead);
+
+ /**
+ * Process all the previous inputs and update the outputs.
+ */
+ void calculate(int64_t timestamp);
+
+ /**
+ * Get the aggregate head-to-stage pose (primary output of this module).
+ */
+ Pose3f getHeadToStagePose() const;
+
+ /**
+ * Get the actual head-tracking mode (which may deviate from the desired one as mentioned in the
+ * class documentation above).
+ */
+ HeadTrackingMode getActualMode() const;
+
+ private:
+ const Options mOptions;
+
+ HeadTrackingMode mDesiredMode;
+ Pose3f mScreenToStage;
+ std::optional<Pose3f> mScreenToHead;
+ int64_t mScreenToHeadTimestamp;
+ std::optional<Pose3f> mWorldToHead;
+ int64_t mWorldToHeadTimestamp;
+
+ HeadTrackingMode mActualMode;
+ Pose3f mHeadToStage;
+
+ void calculateActualMode(int64_t timestamp);
+};
+
+} // namespace media
+} // namespace android
diff --git a/media/libheadtracking/OWNERS b/media/libheadtracking/OWNERS
new file mode 100644
index 0000000..e5d0370
--- /dev/null
+++ b/media/libheadtracking/OWNERS
@@ -0,0 +1,2 @@
+ytai@google.com
+elaurent@google.com
diff --git a/media/libheadtracking/Pose-test.cpp b/media/libheadtracking/Pose-test.cpp
new file mode 100644
index 0000000..3ff6a9b
--- /dev/null
+++ b/media/libheadtracking/Pose-test.cpp
@@ -0,0 +1,115 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "media/Pose.h"
+
+#include <gtest/gtest.h>
+
+#include "QuaternionUtil.h"
+#include "TestUtil.h"
+
+using android::media::Pose3f;
+using Eigen::Quaternionf;
+using Eigen::Vector3f;
+
+namespace android {
+namespace media {
+namespace {
+
+TEST(Pose, CtorDefault) {
+ Pose3f pose;
+ EXPECT_EQ(pose.translation(), Vector3f::Zero());
+ EXPECT_EQ(pose.rotation(), Quaternionf::Identity());
+}
+
+TEST(Pose, CtorRotation) {
+ Quaternionf rot = Quaternionf::UnitRandom();
+ Pose3f pose(rot);
+ EXPECT_EQ(pose.translation(), Vector3f::Zero());
+ EXPECT_EQ(pose.rotation(), rot);
+}
+
+TEST(Pose, CtorTranslation) {
+ Vector3f trans{1, 2, 3};
+ Pose3f pose(trans);
+ EXPECT_EQ(pose.translation(), trans);
+ EXPECT_EQ(pose.rotation(), Quaternionf::Identity());
+}
+
+TEST(Pose, CtorTranslationRotation) {
+ Quaternionf rot = Quaternionf::UnitRandom();
+ Vector3f trans{1, 2, 3};
+ Pose3f pose(trans, rot);
+ EXPECT_EQ(pose.translation(), trans);
+ EXPECT_EQ(pose.rotation(), rot);
+}
+
+TEST(Pose, Inverse) {
+ Pose3f pose({1, 2, 3}, Quaternionf::UnitRandom());
+ EXPECT_EQ(pose.inverse() * pose, Pose3f());
+ EXPECT_EQ(pose * pose.inverse(), Pose3f());
+}
+
+TEST(Pose, IsApprox) {
+ constexpr float eps = std::numeric_limits<float>::epsilon();
+
+ EXPECT_EQ(Pose3f({1, 2, 3}, rotationVectorToQuaternion({4, 5, 6})),
+ Pose3f({1 + eps, 2 + eps, 3 + eps},
+ rotationVectorToQuaternion({4 + eps, 5 + eps, 6 + eps})));
+
+ EXPECT_NE(Pose3f({1, 2, 3}, rotationVectorToQuaternion({4, 5, 6})),
+ Pose3f({1.01, 2, 3}, rotationVectorToQuaternion({4, 5, 6})));
+
+ EXPECT_NE(Pose3f({1, 2, 3}, rotationVectorToQuaternion({4, 5, 6})),
+ Pose3f({1, 2, 3}, rotationVectorToQuaternion({4.01, 5, 6})));
+}
+
+TEST(Pose, Compose) {
+ Pose3f p1({1, 2, 3}, rotateZ(M_PI_2));
+ Pose3f p2({4, 5, 6}, rotateX(M_PI_2));
+ Pose3f p3({-4, 6, 9}, p1.rotation() * p2.rotation());
+ EXPECT_EQ(p1 * p2, p3);
+}
+
+TEST(Pose, MoveWithRateLimit_NoLimit) {
+ Pose3f from({1, 1, 1}, Quaternionf::Identity());
+ Pose3f to({1, 1, 2}, rotateZ(M_PI_2));
+ auto result = moveWithRateLimit(from, to, 1, 10, 10);
+ EXPECT_EQ(std::get<0>(result), to);
+ EXPECT_FALSE(std::get<1>(result));
+}
+
+TEST(Pose, MoveWithRateLimit_TranslationLimit) {
+ Pose3f from({1, 1, 1}, Quaternionf::Identity());
+ Pose3f to({1, 1, 2}, rotateZ(M_PI_2));
+ auto result = moveWithRateLimit(from, to, 1, 0.5f, 10);
+ Pose3f expected({1, 1, 1.5f}, rotateZ(M_PI_4));
+ EXPECT_EQ(std::get<0>(result), expected);
+ EXPECT_TRUE(std::get<1>(result));
+}
+
+TEST(Pose, MoveWithRateLimit_RotationLimit) {
+ Pose3f from({1, 1, 1}, Quaternionf::Identity());
+ Pose3f to({1, 1, 2}, rotateZ(M_PI_2));
+ auto result = moveWithRateLimit(from, to, 1, 10, M_PI_4);
+ Pose3f expected({1, 1, 1.5f}, rotateZ(M_PI_4));
+ EXPECT_EQ(std::get<0>(result), expected);
+ EXPECT_TRUE(std::get<1>(result));
+}
+
+} // namespace
+} // namespace media
+} // namespace android
diff --git a/media/libheadtracking/Pose.cpp b/media/libheadtracking/Pose.cpp
new file mode 100644
index 0000000..9eeb2b1
--- /dev/null
+++ b/media/libheadtracking/Pose.cpp
@@ -0,0 +1,54 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "media/Pose.h"
+#include "media/Twist.h"
+
+namespace android {
+namespace media {
+
+std::tuple<Pose3f, bool> moveWithRateLimit(const Pose3f& from, const Pose3f& to, float t,
+ float maxTranslationalVelocity,
+ float maxRotationalVelocity) {
+ // Never rate limit if both limits are set to infinity.
+ if (isinf(maxTranslationalVelocity) && isinf(maxRotationalVelocity)) {
+ return {to, false};
+ }
+ // Always rate limit if t is 0 (required to avoid division by 0).
+ if (t == 0) {
+ return {from, true};
+ }
+
+ Pose3f fromToTo = from.inverse() * to;
+ Twist3f twist = differentiate(fromToTo, t);
+ float angularRotationalRatio = twist.scalarRotationalVelocity() / maxRotationalVelocity;
+ float translationalVelocityRatio =
+ twist.scalarTranslationalVelocity() / maxTranslationalVelocity;
+ float maxRatio = std::max(angularRotationalRatio, translationalVelocityRatio);
+ if (maxRatio <= 1) {
+ return {to, false};
+ }
+ return {from * integrate(twist, t / maxRatio), true};
+}
+
+std::ostream& operator<<(std::ostream& os, const Pose3f& pose) {
+ os << "translation: " << pose.translation().transpose()
+ << " quaternion: " << pose.rotation().coeffs().transpose();
+ return os;
+}
+
+} // namespace media
+} // namespace android
diff --git a/media/libheadtracking/PoseDriftCompensator-test.cpp b/media/libheadtracking/PoseDriftCompensator-test.cpp
new file mode 100644
index 0000000..74f4bee
--- /dev/null
+++ b/media/libheadtracking/PoseDriftCompensator-test.cpp
@@ -0,0 +1,132 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <gtest/gtest.h>
+#include <cmath>
+
+#include "PoseDriftCompensator.h"
+#include "QuaternionUtil.h"
+#include "TestUtil.h"
+
+namespace android {
+namespace media {
+namespace {
+
+using Eigen::Quaternionf;
+using Eigen::Vector3f;
+using Options = PoseDriftCompensator::Options;
+
+TEST(PoseDriftCompensator, Initial) {
+ PoseDriftCompensator comp(Options{});
+ EXPECT_EQ(comp.getOutput(), Pose3f());
+}
+
+TEST(PoseDriftCompensator, NoDrift) {
+ Pose3f pose1({1, 2, 3}, Quaternionf::UnitRandom());
+ Pose3f pose2({4, 5, 6}, Quaternionf::UnitRandom());
+ PoseDriftCompensator comp(Options{});
+
+ comp.setInput(1000, pose1);
+ EXPECT_EQ(comp.getOutput(), pose1);
+
+ comp.setInput(2000, pose2);
+ EXPECT_EQ(comp.getOutput(), pose2);
+
+ comp.recenter();
+ EXPECT_EQ(comp.getOutput(), Pose3f());
+
+ comp.setInput(3000, pose1);
+ EXPECT_EQ(comp.getOutput(), pose2.inverse() * pose1);
+}
+
+TEST(PoseDriftCompensator, NoDriftZeroTime) {
+ Pose3f pose1({1, 2, 3}, Quaternionf::UnitRandom());
+ Pose3f pose2({4, 5, 6}, Quaternionf::UnitRandom());
+ PoseDriftCompensator comp(Options{});
+
+ comp.setInput(1000, pose1);
+ EXPECT_EQ(comp.getOutput(), pose1);
+
+ comp.setInput(1000, pose2);
+ EXPECT_EQ(comp.getOutput(), pose2);
+
+ comp.recenter();
+ EXPECT_EQ(comp.getOutput(), Pose3f());
+
+ comp.setInput(1000, pose1);
+ EXPECT_EQ(comp.getOutput(), pose2.inverse() * pose1);
+}
+
+TEST(PoseDriftCompensator, Asymptotic) {
+ Pose3f pose({1, 2, 3}, Quaternionf::UnitRandom());
+
+ PoseDriftCompensator comp(
+ Options{.translationalDriftTimeConstant = 1, .rotationalDriftTimeConstant = 1});
+
+ // Set the same pose for a long time.
+ for (int64_t t = 0; t < 1000; ++t) {
+ comp.setInput(t, pose);
+ }
+
+ // Output would have faded to approx. identity.
+ EXPECT_EQ(comp.getOutput(), Pose3f());
+}
+
+TEST(PoseDriftCompensator, Fast) {
+ Pose3f pose1({1, 2, 3}, Quaternionf::UnitRandom());
+ Pose3f pose2({4, 5, 6}, Quaternionf::UnitRandom());
+ PoseDriftCompensator comp(
+ Options{.translationalDriftTimeConstant = 1e7, .rotationalDriftTimeConstant = 1e7});
+
+ comp.setInput(0, pose1);
+ EXPECT_EQ(comp.getOutput(), pose1);
+
+ comp.setInput(1, pose2);
+ EXPECT_EQ(comp.getOutput(), pose2);
+
+ comp.recenter();
+ EXPECT_EQ(comp.getOutput(), Pose3f());
+
+ comp.setInput(2, pose1);
+ EXPECT_EQ(comp.getOutput(), pose2.inverse() * pose1);
+}
+
+TEST(PoseDriftCompensator, Drift) {
+ Pose3f pose1({1, 2, 3}, rotateZ(-M_PI * 3 / 4));
+ PoseDriftCompensator comp(
+ Options{.translationalDriftTimeConstant = 500, .rotationalDriftTimeConstant = 1000});
+
+ // Initial pose is used as is.
+ comp.setInput(1000, pose1);
+ EXPECT_EQ(comp.getOutput(), pose1);
+
+ // After 1000 ticks, our rotation should be exp(-1) and translation exp(-2) from identity.
+ comp.setInput(2000, pose1);
+ EXPECT_EQ(comp.getOutput(),
+ Pose3f(Vector3f{1, 2, 3} * std::expf(-2), rotateZ(-M_PI * 3 / 4 * std::expf(-1))));
+
+ // As long as the input stays the same, we'll continue to advance towards identity.
+ comp.setInput(3000, pose1);
+ EXPECT_EQ(comp.getOutput(),
+ Pose3f(Vector3f{1, 2, 3} * std::expf(-4), rotateZ(-M_PI * 3 / 4 * std::expf(-2))));
+
+ comp.recenter();
+ EXPECT_EQ(comp.getOutput(), Pose3f());
+}
+
+} // namespace
+} // namespace media
+} // namespace android
diff --git a/media/libheadtracking/PoseDriftCompensator.cpp b/media/libheadtracking/PoseDriftCompensator.cpp
new file mode 100644
index 0000000..9dfe172
--- /dev/null
+++ b/media/libheadtracking/PoseDriftCompensator.cpp
@@ -0,0 +1,64 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"){}
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "PoseDriftCompensator.h"
+
+#include <cmath>
+
+#include "QuaternionUtil.h"
+
+namespace android {
+namespace media {
+
+using Eigen::Quaternionf;
+using Eigen::Vector3f;
+
+PoseDriftCompensator::PoseDriftCompensator(const Options& options) : mOptions(options) {}
+
+void PoseDriftCompensator::setInput(int64_t timestamp, const Pose3f& input) {
+ if (!mTimestamp.has_value()) {
+ // First input sample sets the output directly.
+ mOutput = input;
+ } else {
+ Pose3f prevInputToInput = mPrevInput.inverse() * input;
+ mOutput = scale(mOutput, timestamp - mTimestamp.value()) * prevInputToInput;
+ }
+ mPrevInput = input;
+ mTimestamp = timestamp;
+}
+
+void PoseDriftCompensator::recenter() {
+ mOutput = Pose3f();
+}
+
+Pose3f PoseDriftCompensator::getOutput() const {
+ return mOutput;
+}
+
+Pose3f PoseDriftCompensator::scale(const Pose3f& pose, int64_t dt) {
+ // Translation.
+ Vector3f translation = pose.translation();
+ translation *= std::expf(-static_cast<float>(dt) / mOptions.translationalDriftTimeConstant);
+
+ // Rotation.
+ Vector3f rotationVec = quaternionToRotationVector(pose.rotation());
+ rotationVec *= std::expf(-static_cast<float>(dt) / mOptions.rotationalDriftTimeConstant);
+
+ return Pose3f(translation, rotationVectorToQuaternion(rotationVec));
+}
+
+} // namespace media
+} // namespace android
diff --git a/media/libheadtracking/PoseDriftCompensator.h b/media/libheadtracking/PoseDriftCompensator.h
new file mode 100644
index 0000000..a71483b
--- /dev/null
+++ b/media/libheadtracking/PoseDriftCompensator.h
@@ -0,0 +1,74 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#pragma once
+
+#include <optional>
+
+#include "media/Pose.h"
+
+namespace android {
+namespace media {
+
+/**
+ * Drift compensator for a stream of poses.
+ *
+ * This is effectively a high-pass filter for a pose stream, removing any DC-offset / bias. The
+ * provided input stream will be "pulled" toward identity with an exponential decay filter with a
+ * configurable time constant. Rotation and translation are handled separately.
+ *
+ * Typical usage:
+ * PoseDriftCompensator comp(...);
+ *
+ * while (...) {
+ * comp.setInput(...);
+ * Pose3f output = comp.getOutput();
+ * }
+ *
+ * There doesn't need to be a 1:1 correspondence between setInput() and getOutput() calls. The
+ * output timestamp is always that of the last setInput() call. Calling recenter() will reset the
+ * bias to the current output, causing the output to be identity.
+ *
+ * The initial bias point is identity.
+ *
+ * This implementation is thread-compatible, but not thread-safe.
+ */
+class PoseDriftCompensator {
+ public:
+ struct Options {
+ float translationalDriftTimeConstant = std::numeric_limits<float>::infinity();
+ float rotationalDriftTimeConstant = std::numeric_limits<float>::infinity();
+ };
+
+ explicit PoseDriftCompensator(const Options& options);
+
+ void setInput(int64_t timestamp, const Pose3f& input);
+
+ void recenter();
+
+ Pose3f getOutput() const;
+
+ private:
+ const Options mOptions;
+
+ Pose3f mPrevInput;
+ Pose3f mOutput;
+ std::optional<int64_t> mTimestamp;
+
+ Pose3f scale(const Pose3f& pose, int64_t dt);
+};
+
+} // namespace media
+} // namespace android
diff --git a/media/libheadtracking/PoseProcessingGraph.png b/media/libheadtracking/PoseProcessingGraph.png
new file mode 100644
index 0000000..8e6dfd2
--- /dev/null
+++ b/media/libheadtracking/PoseProcessingGraph.png
Binary files differ
diff --git a/media/libheadtracking/PoseRateLimiter-test.cpp b/media/libheadtracking/PoseRateLimiter-test.cpp
new file mode 100644
index 0000000..f306183
--- /dev/null
+++ b/media/libheadtracking/PoseRateLimiter-test.cpp
@@ -0,0 +1,101 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <gtest/gtest.h>
+
+#include "PoseRateLimiter.h"
+#include "QuaternionUtil.h"
+#include "TestUtil.h"
+
+namespace android {
+namespace media {
+namespace {
+
+using Eigen::Quaternionf;
+using Eigen::Vector3f;
+using Options = PoseRateLimiter::Options;
+
+TEST(PoseRateLimiter, Initial) {
+ Pose3f target({1, 2, 3}, Quaternionf::UnitRandom());
+ PoseRateLimiter limiter(Options{.maxTranslationalVelocity = 10, .maxRotationalVelocity = 10});
+ limiter.setTarget(target);
+ EXPECT_EQ(limiter.calculatePose(1000), target);
+}
+
+TEST(PoseRateLimiter, UnlimitedZeroTime) {
+ Pose3f target1({1, 2, 3}, Quaternionf::UnitRandom());
+ Pose3f target2({4, 5, 6}, Quaternionf::UnitRandom());
+ PoseRateLimiter limiter(Options{});
+ limiter.setTarget(target1);
+ EXPECT_EQ(limiter.calculatePose(0), target1);
+ limiter.setTarget(target2);
+ EXPECT_EQ(limiter.calculatePose(0), target2);
+ limiter.setTarget(target1);
+ EXPECT_EQ(limiter.calculatePose(0), target1);
+}
+
+TEST(PoseRateLimiter, Limited) {
+ Pose3f pose1({1, 2, 3}, Quaternionf::Identity());
+ Pose3f pose2({1, 2, 8}, rotateZ(M_PI * 5 / 8));
+ PoseRateLimiter limiter(Options{.maxTranslationalVelocity = 1, .maxRotationalVelocity = 10});
+ limiter.setTarget(pose2);
+ EXPECT_EQ(limiter.calculatePose(1000), pose2);
+
+ // Rate limiting is inactive. Should track despite the violation.
+ limiter.setTarget(pose1);
+ EXPECT_EQ(limiter.calculatePose(1001), pose1);
+
+ // Enable rate limiting and observe gradual motion from pose1 to pose2.
+ limiter.enable();
+ limiter.setTarget(pose2);
+ EXPECT_EQ(limiter.calculatePose(1002), Pose3f({1, 2, 4}, rotateZ(M_PI * 1 / 8)));
+ limiter.setTarget(pose2);
+ EXPECT_EQ(limiter.calculatePose(1003), Pose3f({1, 2, 5}, rotateZ(M_PI * 2 / 8)));
+ // Skip a tick.
+ limiter.setTarget(pose2);
+ EXPECT_EQ(limiter.calculatePose(1005), Pose3f({1, 2, 7}, rotateZ(M_PI * 4 / 8)));
+ limiter.setTarget(pose2);
+ EXPECT_EQ(limiter.calculatePose(1006), pose2);
+
+ // We reached the target, so rate limiting should now be disabled.
+ limiter.setTarget(pose1);
+ EXPECT_EQ(limiter.calculatePose(1007), pose1);
+}
+
+TEST(PoseRateLimiter, Reset) {
+ Pose3f pose1({1, 2, 3}, Quaternionf::Identity());
+ Pose3f pose2({1, 2, 8}, rotateZ(M_PI * 5 / 8));
+ PoseRateLimiter limiter(Options{.maxTranslationalVelocity = 1, .maxRotationalVelocity = 10});
+ limiter.setTarget(pose1);
+ EXPECT_EQ(limiter.calculatePose(1000), pose1);
+
+ // Enable rate limiting and observe gradual motion from pose1 to pose2.
+ limiter.enable();
+ limiter.setTarget(pose2);
+ EXPECT_EQ(limiter.calculatePose(1001), Pose3f({1, 2, 4}, rotateZ(M_PI * 1 / 8)));
+
+ // Reset the pose and disable rate limiting.
+ limiter.reset(pose2);
+ EXPECT_EQ(limiter.calculatePose(1002), pose2);
+
+ // Rate limiting should now be disabled.
+ limiter.setTarget(pose1);
+ EXPECT_EQ(limiter.calculatePose(1003), pose1);
+}
+
+} // namespace
+} // namespace media
+} // namespace android
diff --git a/media/libheadtracking/PoseRateLimiter.cpp b/media/libheadtracking/PoseRateLimiter.cpp
new file mode 100644
index 0000000..380e22b
--- /dev/null
+++ b/media/libheadtracking/PoseRateLimiter.cpp
@@ -0,0 +1,52 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"){}
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "PoseRateLimiter.h"
+
+namespace android {
+namespace media {
+
+PoseRateLimiter::PoseRateLimiter(const Options& options) : mOptions(options), mLimiting(false) {}
+
+void PoseRateLimiter::enable() {
+ mLimiting = true;
+}
+
+void PoseRateLimiter::reset(const Pose3f& target) {
+ mLimiting = false;
+ mTargetPose = target;
+}
+
+void PoseRateLimiter::setTarget(const Pose3f& target) {
+ mTargetPose = target;
+}
+
+Pose3f PoseRateLimiter::calculatePose(int64_t timestamp) {
+ assert(mTargetPose.has_value());
+ Pose3f pose;
+ if (mLimiting && mOutput.has_value()) {
+ std::tie(pose, mLimiting) = moveWithRateLimit(
+ mOutput->pose, mTargetPose.value(), timestamp - mOutput->timestamp,
+ mOptions.maxTranslationalVelocity, mOptions.maxRotationalVelocity);
+ } else {
+ pose = mTargetPose.value();
+ }
+ mOutput = Point{pose, timestamp};
+ return pose;
+}
+
+} // namespace media
+} // namespace android
diff --git a/media/libheadtracking/PoseRateLimiter.h b/media/libheadtracking/PoseRateLimiter.h
new file mode 100644
index 0000000..aa2fe80
--- /dev/null
+++ b/media/libheadtracking/PoseRateLimiter.h
@@ -0,0 +1,93 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#pragma once
+
+#include <optional>
+
+#include "media/Pose.h"
+
+namespace android {
+namespace media {
+
+/**
+ * Limits a stream of poses to a given maximum translational and rotational velocities.
+ *
+ * Normal operation:
+ *
+ * Pose3f output;
+ * PoseRateLimiter limiter(...);
+ *
+ * // Limiting is disabled. Output will be the same as last input.
+ * limiter.setTarget(...);
+ * output = limiter.calculatePose(...);
+ * limiter.setTarget(...);
+ * output = limiter.calculatePose(...);
+ *
+ * // Enable limiting. Output will no longer be necessarily the same as last input.
+ * limiter.enable();
+ * limiter.setTarget(...);
+ * output = limiter.calculatePose(...);
+ * limiter.setTarget(...);
+ * output = limiter.calculatePose(...);
+ *
+ * // When eventually the output has been able to catch up with the last input, the limited will be
+ * // automatically disabled again and the output will match the input again.
+ * limiter.setTarget(...);
+ * output = limiter.calculatePose(...);
+ *
+ * As shown above, the limiter is turned on manually via enable(), but turns off automatically as
+ * soon as the output is able to catch up to the input. The intention is that rate limiting will be
+ * turned on at specific times to smooth out any artificial discontinuities introduced to the pose
+ * stream, but the rest of the time will be a simple passthrough.
+
+ * setTarget(...) and calculatePose(...) don't have to be ordered in any particular way. However,
+ * setTarget or reset() must be called at least once prior to the first calculatePose().
+ *
+ * Calling reset() instead of setTarget() forces the output to the given pose and disables rate
+ * limiting.
+ *
+ * This implementation is thread-compatible, but not thread-safe.
+ */
+class PoseRateLimiter {
+ public:
+ struct Options {
+ float maxTranslationalVelocity = std::numeric_limits<float>::infinity();
+ float maxRotationalVelocity = std::numeric_limits<float>::infinity();
+ };
+
+ explicit PoseRateLimiter(const Options& options);
+
+ void enable();
+
+ void reset(const Pose3f& target);
+ void setTarget(const Pose3f& target);
+
+ Pose3f calculatePose(int64_t timestamp);
+
+ private:
+ struct Point {
+ Pose3f pose;
+ int64_t timestamp;
+ };
+
+ const Options mOptions;
+ bool mLimiting;
+ std::optional<Pose3f> mTargetPose;
+ std::optional<Point> mOutput;
+};
+
+} // namespace media
+} // namespace android
diff --git a/media/libheadtracking/QuaternionUtil-test.cpp b/media/libheadtracking/QuaternionUtil-test.cpp
new file mode 100644
index 0000000..e79e54a
--- /dev/null
+++ b/media/libheadtracking/QuaternionUtil-test.cpp
@@ -0,0 +1,56 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <gtest/gtest.h>
+
+#include "QuaternionUtil.h"
+#include "TestUtil.h"
+
+using Eigen::Quaternionf;
+using Eigen::Vector3f;
+
+namespace android {
+namespace media {
+namespace {
+
+TEST(QuaternionUtil, RotationVectorToQuaternion) {
+ // 90 degrees around Z.
+ Vector3f rot = {0, 0, M_PI_2};
+ Quaternionf quat = rotationVectorToQuaternion(rot);
+ ASSERT_EQ(quat * Vector3f(1, 0, 0), Vector3f(0, 1, 0));
+ ASSERT_EQ(quat * Vector3f(0, 1, 0), Vector3f(-1, 0, 0));
+ ASSERT_EQ(quat * Vector3f(0, 0, 1), Vector3f(0, 0, 1));
+}
+
+TEST(QuaternionUtil, QuaternionToRotationVector) {
+ Quaternionf quat = Quaternionf::FromTwoVectors(Vector3f(1, 0, 0), Vector3f(0, 1, 0));
+ Vector3f rot = quaternionToRotationVector(quat);
+ ASSERT_EQ(rot, Vector3f(0, 0, M_PI_2));
+}
+
+TEST(QuaternionUtil, RoundTripFromQuaternion) {
+ Quaternionf quaternion = Quaternionf::UnitRandom();
+ EXPECT_EQ(quaternion, rotationVectorToQuaternion(quaternionToRotationVector(quaternion)));
+}
+
+TEST(QuaternionUtil, RoundTripFromVector) {
+ Vector3f vec{0.1, 0.2, 0.3};
+ EXPECT_EQ(vec, quaternionToRotationVector(rotationVectorToQuaternion(vec)));
+}
+
+} // namespace
+} // namespace media
+} // namespace android
diff --git a/media/libheadtracking/QuaternionUtil.cpp b/media/libheadtracking/QuaternionUtil.cpp
new file mode 100644
index 0000000..5d090de
--- /dev/null
+++ b/media/libheadtracking/QuaternionUtil.cpp
@@ -0,0 +1,103 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "QuaternionUtil.h"
+
+#include <cassert>
+
+namespace android {
+namespace media {
+
+using Eigen::NumTraits;
+using Eigen::Quaternionf;
+using Eigen::Vector3f;
+
+namespace {
+
+Vector3f LogSU2(const Quaternionf& q) {
+ // Implementation of the logarithmic map of SU(2) using atan.
+ // This follows Hertzberg et al. "Integrating Generic Sensor Fusion Algorithms
+ // with Sound State Representations through Encapsulation of Manifolds", Eq.
+ // (31)
+ // We use asin and acos instead of atan to enable the use of Eigen Autodiff
+ // with SU2.
+ const float sign_of_w = q.w() < 0.f ? -1.f : 1.f;
+ const float abs_w = sign_of_w * q.w();
+ const Vector3f v = sign_of_w * q.vec();
+ const float squared_norm_of_v = v.squaredNorm();
+
+ assert(abs(1.f - abs_w * abs_w - squared_norm_of_v) < NumTraits<float>::dummy_precision());
+
+ if (squared_norm_of_v > NumTraits<float>::dummy_precision()) {
+ const float norm_of_v = sqrt(squared_norm_of_v);
+ if (abs_w > NumTraits<float>::dummy_precision()) {
+ // asin(x) = acos(x) at x = 1/sqrt(2).
+ if (norm_of_v <= float(M_SQRT1_2)) {
+ return (asin(norm_of_v) / norm_of_v) * v;
+ }
+ return (acos(abs_w) / norm_of_v) * v;
+ }
+ return (M_PI_2 / norm_of_v) * v;
+ }
+
+ // Taylor expansion at squared_norm_of_v == 0
+ return (1.f / abs_w - squared_norm_of_v / (3.f * pow(abs_w, 3))) * v;
+}
+
+Quaternionf ExpSU2(const Vector3f& delta) {
+ Quaternionf q_delta;
+ const float theta_squared = delta.squaredNorm();
+ if (theta_squared > NumTraits<float>::dummy_precision()) {
+ const float theta = sqrt(theta_squared);
+ q_delta.w() = cos(theta);
+ q_delta.vec() = (sin(theta) / theta) * delta;
+ } else {
+ // taylor expansions around theta == 0
+ q_delta.w() = 1.f - 0.5f * theta_squared;
+ q_delta.vec() = (1.f - 1.f / 6.f * theta_squared) * delta;
+ }
+ return q_delta;
+}
+
+} // namespace
+
+Quaternionf rotationVectorToQuaternion(const Vector3f& rotationVector) {
+ // SU(2) is a double cover of SO(3), thus we have to half the tangent vector
+ // delta
+ const Vector3f half_delta = 0.5f * rotationVector;
+ return ExpSU2(half_delta);
+}
+
+Vector3f quaternionToRotationVector(const Quaternionf& quaternion) {
+ // SU(2) is a double cover of SO(3), thus we have to multiply the tangent
+ // vector delta by two
+ return 2.f * LogSU2(quaternion);
+}
+
+Quaternionf rotateX(float angle) {
+ return rotationVectorToQuaternion(Vector3f(1, 0, 0) * angle);
+}
+
+Quaternionf rotateY(float angle) {
+ return rotationVectorToQuaternion(Vector3f(0, 1, 0) * angle);
+}
+
+Quaternionf rotateZ(float angle) {
+ return rotationVectorToQuaternion(Vector3f(0, 0, 1) * angle);
+}
+
+} // namespace media
+} // namespace android
diff --git a/media/libheadtracking/QuaternionUtil.h b/media/libheadtracking/QuaternionUtil.h
new file mode 100644
index 0000000..f7a2ca9
--- /dev/null
+++ b/media/libheadtracking/QuaternionUtil.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#pragma once
+
+#include <Eigen/Geometry>
+
+namespace android {
+namespace media {
+
+/**
+ * Converts a rotation vector to an equivalent quaternion.
+ * The rotation vector is given as a 3-vector whose direction represents the rotation axis and its
+ * magnitude the rotation angle (in radians) around that axis.
+ */
+Eigen::Quaternionf rotationVectorToQuaternion(const Eigen::Vector3f& rotationVector);
+
+/**
+ * Converts a quaternion to an equivalent rotation vector.
+ * The rotation vector is given as a 3-vector whose direction represents the rotation axis and its
+ * magnitude the rotation angle (in radians) around that axis.
+ */
+Eigen::Vector3f quaternionToRotationVector(const Eigen::Quaternionf& quaternion);
+
+/**
+ * Returns a quaternion representing a rotation around the X-axis with the given amount (in
+ * radians).
+ */
+Eigen::Quaternionf rotateX(float angle);
+
+/**
+ * Returns a quaternion representing a rotation around the Y-axis with the given amount (in
+ * radians).
+ */
+Eigen::Quaternionf rotateY(float angle);
+
+/**
+ * Returns a quaternion representing a rotation around the Z-axis with the given amount (in
+ * radians).
+ */
+Eigen::Quaternionf rotateZ(float angle);
+
+} // namespace media
+} // namespace android
diff --git a/media/libheadtracking/README.md b/media/libheadtracking/README.md
new file mode 100644
index 0000000..3d5b71a
--- /dev/null
+++ b/media/libheadtracking/README.md
@@ -0,0 +1,185 @@
+# Head-Tracking Library For Immersive Audio
+
+This library handles the processing of head-tracking information, necessary for
+Immersive Audio functionality. It goes from bare sensor reading into the final
+pose fed into a virtualizer.
+
+## Basic Usage
+
+The main entry point into this library is the `HeadTrackingProcessor` class.
+This class is provided with the following inputs:
+
+- Head pose, relative to some arbitrary world frame.
+- Screen pose, relative to some arbitrary world frame.
+- Display orientation, defined as the angle between the "physical" screen and
+ the "logical" screen.
+- Transform between the screen and the sound stage.
+- Desired operational mode:
+ - Static: only the sound stage pose is taken into account. This will result
+ in an experience where the sound stage moved with the listener's head.
+ - World-relative: both the head pose and stage pose are taken into account.
+ This will result in an experience where the sound stage is perceived to be
+ located at a fixed place in the world.
+ - Screen-relative: the head pose, screen pose and stage pose are all taken
+ into account. This will result in an experience where the sound stage is
+ perceived to be located at a fixed place relative to the screen.
+
+Once inputs are provided, the `calculate()` method will make the following
+output available:
+
+- Stage pose, relative to the head. This aggregates all the inputs mentioned
+ above and is ready to be fed into a virtualizer.
+- Actual operational mode. May deviate from the desired one in cases where the
+ desired mode cannot be calculated (for example, as result of dropped messages
+ from one of the sensors).
+
+A `recenter()` operation is also available, which indicates to the system that
+whatever pose the screen and head are currently at should be considered as the
+"center" pose, or frame of reference.
+
+## Pose-Related Conventions
+
+### Naming and Composition
+
+When referring to poses in code, it is always good practice to follow
+conventional naming, which highlights the reference and target frames clearly:
+
+Bad:
+
+```
+Pose3f headPose;
+```
+
+Good:
+
+```
+Pose3f worldToHead; // “world” is the reference frame,
+ // “head” is the target frame.
+```
+
+By following this convention, it is easy to follow correct composition of poses,
+by making sure adjacent frames are identical:
+
+```
+Pose3f aToD = aToB * bToC * cToD;
+```
+
+And similarly, inverting the transform simply flips the reference and target:
+
+```
+Pose3f aToB = bToA.inverse();
+```
+
+### Twist
+
+“Twist” is to pose what velocity is to distance: it is the time-derivative of a
+pose, representing the change in pose over a short period of time. Its naming
+convention always states one frame, e.g.:
+Twist3f headTwist;
+
+This means that this twist represents the head-at-time-T to head-at-time-T+dt
+transform. Twists are not composable in the same way as poses.
+
+### Frames of Interest
+
+The frames of interest in this library are defined as follows:
+
+#### Head
+
+This is the listener’s head. The origin is at the center point between the
+ear-drums, the X-axis goes from left ear to right ear, Y-axis goes from the back
+of the head towards the face and Z-axis goes from the bottom of the head to the
+top.
+
+#### Screen
+
+This is the primary screen that the user will be looking at, which is relevant
+for some Immersive Audio use-cases, such as watching a movie. We will follow a
+different convention for this frame than what the Sensor framework uses. The
+origin is at the center of the screen. X-axis goes from left to right, Z-axis
+goes from the screen bottom to the screen top, Y-axis goes “into” the screen (
+from the direction of the viewer). The up/down/left/right of the screen are
+defined as the logical directions used for display. So when flipping the display
+orientation between “landscape” and “portrait”, the frame of reference will
+change with respect to the physical screen.
+
+#### Stage
+
+This is the frame of reference used by the virtualizer for positioning sound
+objects. It is not associated with any physical frame. In a typical
+multi-channel scenario, the listener is at the origin, the X-axis goes from left
+to right, Y-axis from back to front and Z-axis from down to up. For example, a
+front-right speaker is located at positive X, Y and Z=0, a height speaker will
+have a positive Z.
+
+#### World
+
+It is sometimes convenient to use an intermediate frame when dealing with
+head-to-screen transforms. The “world” frame is an arbitrary frame of reference
+in the physical world, relative to which we can measure the head pose and screen
+pose. In (very common) cases when we can’t establish such an absolute frame, we
+can take each measurement relative to a separate, arbitrary frame and high-pass
+the result.
+
+## Processing Description
+
+
+
+The diagram above illustrates the processing that takes place from the inputs to
+the outputs.
+
+### Predictor
+
+The Predictor block gets pose + twist (pose derivative) and extrapolates to
+obtain a predicted head pose (w/ given latency).
+
+### Drift / Bias Compensator
+
+The Drift / Bias Compensator blocks serve two purposes:
+
+- Compensate for floating reference axes by applying a high-pass filter, which
+ slowly pulls the pose toward identity.
+- Establish the reference frame for the poses by having the ability to set the
+ current pose as the reference for future poses (recentering). Effectively,
+ this is resetting the filter state to identity.
+
+### Orientation Compensation
+
+The Orientation Compensation block applies the display orientation to the screen
+pose to obtain the pose of the “logical screen” frame, in which the Y-axis is
+pointing in the direction of the logical screen “up” rather than the physical
+one.
+
+### Screen-Relative Pose
+
+The Screen-Relative Pose block is provided with a head pose and a screen pose
+and estimates the pose of the head relative to the screen. Optionally, this
+module may indicate that the user is likely not in front of the screen via the
+“valid” output.
+
+### Mode Selector
+
+The Mode Selector block aggregates the various sources of pose information into
+a head-to-stage pose that is going to feed the virtualizer. It is controlled by
+the “desired mode” signal that indicates whether the preference is to be in
+either static, world-relative or screen-relative.
+
+The actual mode may diverge from the desired mode. It is determined as follows:
+
+- If the desired mode is static, the actual mode is static.
+- If the desired mode is world-relative:
+ - If head poses are fresh, the actual mode is world-relative.
+ - Otherwise the actual mode is static.
+- If the desired mode is screen-relative:
+ - If head and screen poses are fresh and the ‘valid’ signal is asserted, the
+ actual mode is screen-relative.
+ - Otherwise, apply the same rules as the desired mode being world-relative.
+
+### Rate Limiter
+
+A Rate Limiter block is applied to the final output to smooth out any abrupt
+transitions caused by any of the following events:
+
+- Mode switch.
+- Display orientation switch.
+- Recenter operation.
diff --git a/media/libheadtracking/ScreenHeadFusion-test.cpp b/media/libheadtracking/ScreenHeadFusion-test.cpp
new file mode 100644
index 0000000..ecf27f5
--- /dev/null
+++ b/media/libheadtracking/ScreenHeadFusion-test.cpp
@@ -0,0 +1,75 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <gtest/gtest.h>
+
+#include "ScreenHeadFusion.h"
+#include "TestUtil.h"
+
+using Eigen::Quaternionf;
+using Eigen::Vector3f;
+
+namespace android {
+namespace media {
+namespace {
+
+TEST(ScreenHeadFusion, Init) {
+ ScreenHeadFusion fusion;
+ EXPECT_FALSE(fusion.calculate().has_value());
+}
+
+TEST(ScreenHeadFusion, Calculate_NoHead) {
+ ScreenHeadFusion fusion;
+ fusion.setWorldToScreenPose(0, Pose3f());
+ EXPECT_FALSE(fusion.calculate().has_value());
+}
+
+TEST(ScreenHeadFusion, Calculate_NoScreen) {
+ ScreenHeadFusion fusion;
+ fusion.setWorldToHeadPose(0, Pose3f());
+ EXPECT_FALSE(fusion.calculate().has_value());
+}
+
+TEST(ScreenHeadFusion, Calculate) {
+ Pose3f worldToScreen1({1, 2, 3}, Quaternionf::UnitRandom());
+ Pose3f worldToHead1({4, 5, 6}, Quaternionf::UnitRandom());
+ Pose3f worldToScreen2({11, 12, 13}, Quaternionf::UnitRandom());
+ Pose3f worldToHead2({14, 15, 16}, Quaternionf::UnitRandom());
+
+ ScreenHeadFusion fusion;
+ fusion.setWorldToHeadPose(123, worldToHead1);
+ fusion.setWorldToScreenPose(456, worldToScreen1);
+ auto result = fusion.calculate();
+ ASSERT_TRUE(result.has_value());
+ EXPECT_EQ(123, result->timestamp);
+ EXPECT_EQ(worldToScreen1.inverse() * worldToHead1, result->pose);
+
+ fusion.setWorldToHeadPose(567, worldToHead2);
+ result = fusion.calculate();
+ ASSERT_TRUE(result.has_value());
+ EXPECT_EQ(456, result->timestamp);
+ EXPECT_EQ(worldToScreen1.inverse() * worldToHead2, result->pose);
+
+ fusion.setWorldToScreenPose(678, worldToScreen2);
+ result = fusion.calculate();
+ ASSERT_TRUE(result.has_value());
+ EXPECT_EQ(567, result->timestamp);
+ EXPECT_EQ(worldToScreen2.inverse() * worldToHead2, result->pose);
+}
+
+} // namespace
+} // namespace media
+} // namespace android
diff --git a/media/libheadtracking/ScreenHeadFusion.cpp b/media/libheadtracking/ScreenHeadFusion.cpp
new file mode 100644
index 0000000..f023570
--- /dev/null
+++ b/media/libheadtracking/ScreenHeadFusion.cpp
@@ -0,0 +1,41 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"){}
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ScreenHeadFusion.h"
+
+namespace android {
+namespace media {
+
+void ScreenHeadFusion::setWorldToHeadPose(int64_t timestamp, const Pose3f& worldToHead) {
+ mWorldToHead = TimestampedPose{.timestamp = timestamp, .pose = worldToHead};
+}
+
+void ScreenHeadFusion::setWorldToScreenPose(int64_t timestamp, const Pose3f& worldToScreen) {
+ mWorldToScreen = TimestampedPose{.timestamp = timestamp, .pose = worldToScreen};
+}
+
+std::optional<ScreenHeadFusion::TimestampedPose> ScreenHeadFusion::calculate() {
+ // TODO: this is temporary, simplistic logic.
+ if (!mWorldToHead.has_value() || !mWorldToScreen.has_value()) {
+ return std::nullopt;
+ }
+ return TimestampedPose{
+ .timestamp = std::min(mWorldToHead->timestamp, mWorldToScreen->timestamp),
+ .pose = mWorldToScreen->pose.inverse() * mWorldToHead->pose};
+}
+
+} // namespace media
+} // namespace android
diff --git a/media/libheadtracking/ScreenHeadFusion.h b/media/libheadtracking/ScreenHeadFusion.h
new file mode 100644
index 0000000..ee81100
--- /dev/null
+++ b/media/libheadtracking/ScreenHeadFusion.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#pragma once
+
+#include <optional>
+
+#include "media/Pose.h"
+
+namespace android {
+namespace media {
+
+/**
+ * Combines world-to-head pose with world-to-screen pose to obtain screen-to-head.
+ *
+ * Input poses may arrive separately. The last pose of each kind is taken into account. The
+ * timestamp of the output is the ealier (older) timestamp of the two inputs.
+ *
+ * Output may be nullopt in the following cases:
+ * - Either one of the inputs has not yet been provided.
+ * - It is estimated that the user is no longer facing the screen.
+ *
+ * Typical usage:
+ *
+ * ScreenHeadFusion fusion(...);
+ * fusion.setWorldToHeadPose(...);
+ * fusion.setWorldToScreenPose(...);
+ * auto output = fusion.calculate();
+ *
+ * This class is not thread-safe, but thread-compatible.
+ */
+class ScreenHeadFusion {
+ public:
+ struct TimestampedPose {
+ int64_t timestamp;
+ Pose3f pose;
+ };
+
+ void setWorldToHeadPose(int64_t timestamp, const Pose3f& worldToHead);
+
+ void setWorldToScreenPose(int64_t timestamp, const Pose3f& worldToScreen);
+
+ /**
+ * Returns the screen-to-head pose, or nullopt if invalid.
+ */
+ std::optional<TimestampedPose> calculate();
+
+ private:
+ std::optional<TimestampedPose> mWorldToHead;
+ std::optional<TimestampedPose> mWorldToScreen;
+};
+
+} // namespace media
+} // namespace android
diff --git a/media/libheadtracking/TestUtil.h b/media/libheadtracking/TestUtil.h
new file mode 100644
index 0000000..4636d86
--- /dev/null
+++ b/media/libheadtracking/TestUtil.h
@@ -0,0 +1,127 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#pragma once
+
+#include <gtest/gtest.h>
+
+#include "media/Pose.h"
+#include "media/Twist.h"
+
+namespace {
+
+constexpr float kPoseComparisonPrecision = 1e-5;
+
+} // namespace
+
+// These specializations make {EXPECT,ASSERT}_{EQ,NE} work correctly for Pose3f, Twist3f, Vector3f
+// and Quaternionf.
+namespace testing {
+namespace internal {
+
+template <>
+inline AssertionResult CmpHelperEQ<android::media::Pose3f, android::media::Pose3f>(
+ const char* lhs_expression, const char* rhs_expression, const android::media::Pose3f& lhs,
+ const android::media::Pose3f& rhs) {
+ if (lhs.isApprox(rhs, kPoseComparisonPrecision)) {
+ return AssertionSuccess();
+ }
+
+ return CmpHelperEQFailure(lhs_expression, rhs_expression, lhs, rhs);
+}
+
+template <>
+inline AssertionResult CmpHelperNE<android::media::Pose3f, android::media::Pose3f>(
+ const char* lhs_expression, const char* rhs_expression, const android::media::Pose3f& lhs,
+ const android::media::Pose3f& rhs) {
+ if (!lhs.isApprox(rhs, kPoseComparisonPrecision)) {
+ return AssertionSuccess();
+ }
+
+ return CmpHelperEQFailure(lhs_expression, rhs_expression, lhs, rhs);
+}
+
+template <>
+inline AssertionResult CmpHelperEQ<android::media::Twist3f, android::media::Twist3f>(
+ const char* lhs_expression, const char* rhs_expression, const android::media::Twist3f& lhs,
+ const android::media::Twist3f& rhs) {
+ if (lhs.isApprox(rhs, kPoseComparisonPrecision)) {
+ return AssertionSuccess();
+ }
+
+ return CmpHelperEQFailure(lhs_expression, rhs_expression, lhs, rhs);
+}
+
+template <>
+inline AssertionResult CmpHelperNE<android::media::Twist3f, android::media::Twist3f>(
+ const char* lhs_expression, const char* rhs_expression, const android::media::Twist3f& lhs,
+ const android::media::Twist3f& rhs) {
+ if (!lhs.isApprox(rhs, kPoseComparisonPrecision)) {
+ return AssertionSuccess();
+ }
+
+ return CmpHelperEQFailure(lhs_expression, rhs_expression, lhs, rhs);
+}
+
+template <>
+inline AssertionResult CmpHelperEQ<Eigen::Vector3f, Eigen::Vector3f>(const char* lhs_expression,
+ const char* rhs_expression,
+ const Eigen::Vector3f& lhs,
+ const Eigen::Vector3f& rhs) {
+ if (lhs.isApprox(rhs)) {
+ return AssertionSuccess();
+ }
+
+ return CmpHelperEQFailure(lhs_expression, rhs_expression, lhs, rhs);
+}
+
+template <>
+inline AssertionResult CmpHelperNE<Eigen::Vector3f, Eigen::Vector3f>(const char* lhs_expression,
+ const char* rhs_expression,
+ const Eigen::Vector3f& lhs,
+ const Eigen::Vector3f& rhs) {
+ if (!lhs.isApprox(rhs)) {
+ return AssertionSuccess();
+ }
+
+ return CmpHelperEQFailure(lhs_expression, rhs_expression, lhs, rhs);
+}
+
+template <>
+inline AssertionResult CmpHelperEQ<Eigen::Quaternionf, Eigen::Quaternionf>(
+ const char* lhs_expression, const char* rhs_expression, const Eigen::Quaternionf& lhs,
+ const Eigen::Quaternionf& rhs) {
+ // Negating the coefs results in an equivalent quaternion.
+ if (lhs.isApprox(rhs) || lhs.isApprox(Eigen::Quaternionf(-rhs.coeffs()))) {
+ return AssertionSuccess();
+ }
+
+ return CmpHelperEQFailure(lhs_expression, rhs_expression, lhs, rhs);
+}
+
+template <>
+inline AssertionResult CmpHelperNE<Eigen::Quaternionf, Eigen::Quaternionf>(
+ const char* lhs_expression, const char* rhs_expression, const Eigen::Quaternionf& lhs,
+ const Eigen::Quaternionf& rhs) {
+ // Negating the coefs results in an equivalent quaternion.
+ if (!(lhs.isApprox(rhs) || lhs.isApprox(Eigen::Quaternionf(-rhs.coeffs())))) {
+ return AssertionSuccess();
+ }
+
+ return CmpHelperEQFailure(lhs_expression, rhs_expression, lhs, rhs);
+}
+
+} // namespace internal
+} // namespace testing
diff --git a/media/libheadtracking/Twist-test.cpp b/media/libheadtracking/Twist-test.cpp
new file mode 100644
index 0000000..7984e1e
--- /dev/null
+++ b/media/libheadtracking/Twist-test.cpp
@@ -0,0 +1,67 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "media/Twist.h"
+
+#include <gtest/gtest.h>
+
+#include "QuaternionUtil.h"
+#include "TestUtil.h"
+
+using Eigen::Quaternionf;
+using Eigen::Vector3f;
+
+namespace android {
+namespace media {
+namespace {
+
+TEST(Twist, DefaultCtor) {
+ Twist3f twist;
+ EXPECT_EQ(twist.translationalVelocity(), Vector3f::Zero());
+ EXPECT_EQ(twist.rotationalVelocity(), Vector3f::Zero());
+ EXPECT_FLOAT_EQ(twist.scalarRotationalVelocity(), 0);
+ EXPECT_FLOAT_EQ(twist.scalarTranslationalVelocity(), 0);
+}
+
+TEST(Twist, FullCtor) {
+ Vector3f rot{1, 2, 3};
+ Vector3f trans{4, 5, 6};
+ Twist3f twist(trans, rot);
+ EXPECT_EQ(twist.translationalVelocity(), trans);
+ EXPECT_EQ(twist.rotationalVelocity(), rot);
+ EXPECT_FLOAT_EQ(twist.scalarRotationalVelocity(), std::sqrt(14.f));
+ EXPECT_FLOAT_EQ(twist.scalarTranslationalVelocity(), std::sqrt(77.f));
+}
+
+TEST(Twist, Integrate) {
+ Vector3f trans{1, 2, 3};
+ // 45 deg/sec around Z.
+ Vector3f rot{0, 0, M_PI_4};
+ Twist3f twist(trans, rot);
+ Pose3f pose = integrate(twist, 2.f);
+
+ EXPECT_EQ(pose, Pose3f(Vector3f{2, 4, 6}, rotateZ(M_PI_2)));
+}
+
+TEST(Twist, Differentiate) {
+ Pose3f pose(Vector3f{2, 4, 6}, rotateZ(M_PI_2));
+ Twist3f twist = differentiate(pose, 2.f);
+ EXPECT_EQ(twist, Twist3f(Vector3f(1, 2, 3), Vector3f(0, 0, M_PI_4)));
+}
+
+} // namespace
+} // namespace media
+} // namespace android
diff --git a/media/libheadtracking/Twist.cpp b/media/libheadtracking/Twist.cpp
new file mode 100644
index 0000000..664c4d5
--- /dev/null
+++ b/media/libheadtracking/Twist.cpp
@@ -0,0 +1,43 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "media/Twist.h"
+
+#include "QuaternionUtil.h"
+
+namespace android {
+namespace media {
+
+Pose3f integrate(const Twist3f& twist, float dt) {
+ Eigen::Vector3f translation = twist.translationalVelocity() * dt;
+ Eigen::Vector3f rotationVector = twist.rotationalVelocity() * dt;
+ return Pose3f(translation, rotationVectorToQuaternion(rotationVector));
+}
+
+Twist3f differentiate(const Pose3f& pose, float dt) {
+ Eigen::Vector3f translationalVelocity = pose.translation() / dt;
+ Eigen::Vector3f rotationalVelocity = quaternionToRotationVector(pose.rotation()) / dt;
+ return Twist3f(translationalVelocity, rotationalVelocity);
+}
+
+std::ostream& operator<<(std::ostream& os, const Twist3f& twist) {
+ os << "translation: " << twist.translationalVelocity().transpose()
+ << " rotation vector: " << twist.rotationalVelocity().transpose();
+ return os;
+}
+
+} // namespace media
+} // namespace android
diff --git a/media/libheadtracking/include/media/HeadTrackingMode.h b/media/libheadtracking/include/media/HeadTrackingMode.h
new file mode 100644
index 0000000..38496e8
--- /dev/null
+++ b/media/libheadtracking/include/media/HeadTrackingMode.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#pragma once
+
+namespace android {
+namespace media {
+
+/**
+ * Mode of head-tracking.
+ */
+enum class HeadTrackingMode {
+ /** No head-tracking - screen-to-head pose is assumed to be identity. */
+ STATIC,
+ /** Head tracking enabled - world-to-screen pose is assumed to be identity. */
+ WORLD_RELATIVE,
+ /** Full screen-to-head tracking enabled. */
+ SCREEN_RELATIVE,
+};
+
+} // namespace media
+} // namespace android
diff --git a/media/libheadtracking/include/media/HeadTrackingProcessor.h b/media/libheadtracking/include/media/HeadTrackingProcessor.h
new file mode 100644
index 0000000..e9ce8a2
--- /dev/null
+++ b/media/libheadtracking/include/media/HeadTrackingProcessor.h
@@ -0,0 +1,105 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#pragma once
+
+#include <limits>
+
+#include "HeadTrackingMode.h"
+#include "Pose.h"
+#include "Twist.h"
+
+namespace android {
+namespace media {
+
+/**
+ * Main entry-point for this library.
+ * This interfaces encompasses all the processing required for determining the head-to-stage pose
+ * used for audio virtualization.
+ * The usage involves periodic setting of the inputs, calling calculate() and obtaining the outputs.
+ * This class is not thread-safe, but thread-compatible.
+ */
+class HeadTrackingProcessor {
+ public:
+ virtual ~HeadTrackingProcessor() = default;
+
+ struct Options {
+ float maxTranslationalVelocity = std::numeric_limits<float>::infinity();
+ float maxRotationalVelocity = std::numeric_limits<float>::infinity();
+ float translationalDriftTimeConstant = std::numeric_limits<float>::infinity();
+ float rotationalDriftTimeConstant = std::numeric_limits<float>::infinity();
+ int64_t freshnessTimeout = std::numeric_limits<int64_t>::max();
+ float predictionDuration = 0;
+ };
+
+ /** Sets the desired head-tracking mode. */
+ virtual void setDesiredMode(HeadTrackingMode mode) = 0;
+
+ /**
+ * Sets the world-to-head pose and head twist (velocity).
+ * headTwist is given in the head coordinate frame.
+ */
+ virtual void setWorldToHeadPose(int64_t timestamp, const Pose3f& worldToHead,
+ const Twist3f& headTwist) = 0;
+
+ /**
+ * Sets the world-to-screen pose.
+ */
+ virtual void setWorldToScreenPose(int64_t timestamp, const Pose3f& worldToScreen) = 0;
+
+ /**
+ * Set the screen-to-stage pose, used in all modes.
+ */
+ virtual void setScreenToStagePose(const Pose3f& screenToStage) = 0;
+
+ /**
+ * Sets the display orientation.
+ * Orientation is expressed in the angle of rotation from the physical "up" side of the screen
+ * to the logical "up" side of the content displayed the screen. Counterclockwise angles, as
+ * viewed while facing the screen are positive.
+ */
+ virtual void setDisplayOrientation(float physicalToLogicalAngle) = 0;
+
+ /**
+ * Process all the previous inputs and update the outputs.
+ */
+ virtual void calculate(int64_t timestamp) = 0;
+
+ /**
+ * Get the aggregate head-to-stage pose (primary output of this module).
+ */
+ virtual Pose3f getHeadToStagePose() const = 0;
+
+ /**
+ * Get the actual head-tracking mode (which may deviate from the desired one as mentioned in the
+ * class documentation above).
+ */
+ virtual HeadTrackingMode getActualMode() const = 0;
+
+ /**
+ * This causes the current poses for both the head and screen to be considered "center".
+ */
+ virtual void recenter() = 0;
+};
+
+/**
+ * Creates an instance featuring a default implementation of the HeadTrackingProcessor interface.
+ */
+std::unique_ptr<HeadTrackingProcessor> createHeadTrackingProcess(
+ const HeadTrackingProcessor::Options& options,
+ HeadTrackingMode initialMode = HeadTrackingMode::STATIC);
+
+} // namespace media
+} // namespace android
diff --git a/media/libheadtracking/include/media/Pose.h b/media/libheadtracking/include/media/Pose.h
new file mode 100644
index 0000000..06b33f3
--- /dev/null
+++ b/media/libheadtracking/include/media/Pose.h
@@ -0,0 +1,114 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#pragma once
+
+#include <Eigen/Geometry>
+
+namespace android {
+namespace media {
+
+/**
+ * A 6-DoF pose.
+ * This class represents a proper rigid transformation (translation + rotation) between a reference
+ * frame and a target frame,
+ *
+ * See https://en.wikipedia.org/wiki/Six_degrees_of_freedom
+ */
+class Pose3f {
+ public:
+ /** Typical precision for isApprox comparisons. */
+ static constexpr float kDummyPrecision = 1e-5f;
+
+ Pose3f(const Eigen::Vector3f& translation, const Eigen::Quaternionf& rotation)
+ : mTranslation(translation), mRotation(rotation) {}
+
+ explicit Pose3f(const Eigen::Vector3f& translation)
+ : Pose3f(translation, Eigen::Quaternionf::Identity()) {}
+
+ explicit Pose3f(const Eigen::Quaternionf& rotation)
+ : Pose3f(Eigen::Vector3f::Zero(), rotation) {}
+
+ Pose3f() : Pose3f(Eigen::Vector3f::Zero(), Eigen::Quaternionf::Identity()) {}
+
+ Pose3f(const Pose3f& other) { *this = other; }
+
+ Pose3f& operator=(const Pose3f& other) {
+ mTranslation = other.mTranslation;
+ mRotation = other.mRotation;
+ return *this;
+ }
+
+ Eigen::Vector3f translation() const { return mTranslation; };
+ Eigen::Quaternionf rotation() const { return mRotation; };
+
+ /**
+ * Reverses the reference and target frames.
+ */
+ Pose3f inverse() const {
+ Eigen::Quaternionf invRotation = mRotation.inverse();
+ return Pose3f(-(invRotation * translation()), invRotation);
+ }
+
+ /**
+ * Composes (chains) together two poses. By convention, this only makes sense if the target
+ * frame of the left-hand pose is the same the reference frame of the right-hand pose.
+ * Note that this operator is not commutative.
+ */
+ Pose3f operator*(const Pose3f& other) const {
+ Pose3f result = *this;
+ result *= other;
+ return result;
+ }
+
+ Pose3f& operator*=(const Pose3f& other) {
+ mTranslation += mRotation * other.mTranslation;
+ mRotation *= other.mRotation;
+ return *this;
+ }
+
+ /**
+ * This is an imprecise "fuzzy" comparison, which is only to be used for validity-testing
+ * purposes.
+ */
+ bool isApprox(const Pose3f& other, float prec = kDummyPrecision) const {
+ return (mTranslation - other.mTranslation).norm() < prec &&
+ // Quaternions are equivalent under sign inversion.
+ ((mRotation.coeffs() - other.mRotation.coeffs()).norm() < prec ||
+ (mRotation.coeffs() + other.mRotation.coeffs()).norm() < prec);
+ }
+
+ private:
+ Eigen::Vector3f mTranslation;
+ Eigen::Quaternionf mRotation;
+};
+
+/**
+ * Pretty-printer for Pose3f.
+ */
+std::ostream& operator<<(std::ostream& os, const Pose3f& pose);
+
+/**
+ * Move between the 'from' pose and the 'to' pose, while making sure velocity limits are enforced.
+ * If velocity limits are not violated, returns the 'to' pose and false.
+ * If velocity limits are violated, returns pose farthest along the path that can be reached within
+ * the limits, and true.
+ */
+std::tuple<Pose3f, bool> moveWithRateLimit(const Pose3f& from, const Pose3f& to, float t,
+ float maxTranslationalVelocity,
+ float maxRotationalVelocity);
+
+} // namespace media
+} // namespace android
diff --git a/media/libheadtracking/include/media/Twist.h b/media/libheadtracking/include/media/Twist.h
new file mode 100644
index 0000000..e2fc203
--- /dev/null
+++ b/media/libheadtracking/include/media/Twist.h
@@ -0,0 +1,84 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#pragma once
+
+#include <Eigen/Geometry>
+
+#include "Pose.h"
+
+namespace android {
+namespace media {
+
+/**
+ * A 6-DoF twist.
+ * This class represents the translational and rotational velocity of a rigid object, typically
+ * relative to its own coordinate-frame.
+ * It is created by two 3-vectors, one representing linear motion per time-unit and the other, a
+ * rotation-vector in radians per time-unit (right-handed).
+ */
+class Twist3f {
+ public:
+ Twist3f(const Eigen::Vector3f& translationalVelocity, const Eigen::Vector3f& rotationalVelocity)
+ : mTranslationalVelocity(translationalVelocity), mRotationalVelocity(rotationalVelocity) {}
+
+ Twist3f() : Twist3f(Eigen::Vector3f::Zero(), Eigen::Vector3f::Zero()) {}
+
+ Twist3f(const Twist3f& other) { *this = other; }
+
+ Twist3f& operator=(const Twist3f& other) {
+ mTranslationalVelocity = other.mTranslationalVelocity;
+ mRotationalVelocity = other.mRotationalVelocity;
+ return *this;
+ }
+
+ Eigen::Vector3f translationalVelocity() const { return mTranslationalVelocity; }
+ Eigen::Vector3f rotationalVelocity() const { return mRotationalVelocity; }
+
+ float scalarTranslationalVelocity() const { return mTranslationalVelocity.norm(); }
+ float scalarRotationalVelocity() const { return mRotationalVelocity.norm(); }
+
+ bool isApprox(const Twist3f& other,
+ float prec = Eigen::NumTraits<float>::dummy_precision()) const {
+ return mTranslationalVelocity.isApprox(other.mTranslationalVelocity, prec) &&
+ mRotationalVelocity.isApprox(other.mRotationalVelocity, prec);
+ }
+
+ private:
+ Eigen::Vector3f mTranslationalVelocity;
+ Eigen::Vector3f mRotationalVelocity;
+};
+
+/**
+ * Integrate a twist over time to obtain a pose.
+ * dt is the time over which to integration.
+ * The resulting pose represents the transformation between the starting point and the ending point
+ * of the motion over the time period.
+ */
+Pose3f integrate(const Twist3f& twist, float dt);
+
+/**
+ * Differentiate pose to obtain a twist.
+ * dt is the time of the motion between the reference and the target frames of the pose.
+ */
+Twist3f differentiate(const Pose3f& pose, float dt);
+
+/**
+ * Pretty-printer for twist.
+ */
+std::ostream& operator<<(std::ostream& os, const Twist3f& twist);
+
+} // namespace media
+} // namespace android