@@ -311,7 +311,7 @@ CameraDevice::Camera3RequestDescriptor::~Camera3RequestDescriptor() = default;
CameraDevice::CameraDevice(unsigned int id, std::shared_ptr<Camera> camera)
: id_(id), running_(false), camera_(std::move(camera)),
- facing_(CAMERA_FACING_FRONT), orientation_(0)
+ facing_(CAMERA_FACING_FRONT), orientation_(0), lastResultTimestamp_(0)
{
camera_->requestCompleted.connect(this, &CameraDevice::requestComplete);
@@ -1339,6 +1339,7 @@ const camera_metadata_t *CameraDevice::getStaticMetadata()
ANDROID_REQUEST_PIPELINE_DEPTH,
ANDROID_SCALER_CROP_REGION,
ANDROID_SENSOR_EXPOSURE_TIME,
+ ANDROID_SENSOR_FRAME_DURATION,
ANDROID_SENSOR_ROLLING_SHUTTER_SKEW,
ANDROID_SENSOR_TEST_PATTERN_MODE,
ANDROID_SENSOR_TIMESTAMP,
@@ -1944,6 +1945,7 @@ void CameraDevice::requestComplete(Request *request)
*/
uint64_t timestamp = buffers.begin()->second->metadata().timestamp;
resultMetadata = getResultMetadata(*descriptor, timestamp);
+ lastResultTimestamp_ = timestamp;
/* Handle any JPEG compression. */
for (camera3_stream_buffer_t &buffer : descriptor->buffers_) {
@@ -2074,7 +2076,7 @@ CameraDevice::getResultMetadata(const Camera3RequestDescriptor &descriptor,
* Total bytes for JPEG metadata: 82
*/
std::unique_ptr<CameraMetadata> resultMetadata =
- std::make_unique<CameraMetadata>(44, 166);
+ std::make_unique<CameraMetadata>(45, 174);
if (!resultMetadata->isValid()) {
LOG(HAL, Error) << "Failed to allocate result metadata";
return nullptr;
@@ -2197,6 +2199,13 @@ CameraDevice::getResultMetadata(const Camera3RequestDescriptor &descriptor,
value = ANDROID_NOISE_REDUCTION_MODE_OFF;
resultMetadata->addEntry(ANDROID_NOISE_REDUCTION_MODE, &value, 1);
+ /*
+ * \todo Use sliding window average
+ * int64_t value64 = timestamp - lastResultTimestamp_;
+ */
+ int64_t value64 = 33333333;
+ resultMetadata->addEntry(ANDROID_SENSOR_FRAME_DURATION, &value64, 1);
+
/* 33.3 msec */
const int64_t rolling_shutter_skew = 33300000;
resultMetadata->addEntry(ANDROID_SENSOR_ROLLING_SHUTTER_SKEW,
@@ -131,6 +131,8 @@ private:
unsigned int maxJpegBufferSize_;
+ int64_t lastResultTimestamp_;
+
CameraMetadata lastSettings_;
};
CTS tests that the frame duration is for every frame (after the first 6) are within an the frame duration ranges that we provide in the static metadata. One way it can do this, which it was doing previously, is to calculate it based on the timestamps that we report for each capture result. Another option is that we can report the frame duration ourselves. The frame durations that we report can be more "correct", so use this method to appease CTS. This is part of the fix to allow the following CTS test to pass: - android.hardware.camera2.cts.CaptureRequestTest#testNoiseReductionModeControl Signed-off-by: Paul Elder <paul.elder@ideasonboard.com> --- This probably isn't the right solution, but I can't seem to figure out what the correct route is. CTS checks if the frame duration that we report is within the frame duration range. The frame duration range comes from CONTROL_AE_AVAILABLE_TARGET_FPS_RANGES (where the frame duration is less than or equal to 1e9/30). The frame duration for the current frame is either calculated by CTS by subtracting the SENSOR_TIMESTAMP of the current frame from the one of the last frame (which is what we did previously) or from what we report in SENSOR_FRAME_DURATION. The issue is that the frame duration calculated by CTS is less than the minimum available frame duration that we report. So should we lower that? Or report a different SENSOR_FRAME DURATION, if at all? --- src/android/camera_device.cpp | 13 +++++++++++-- src/android/camera_device.h | 2 ++ 2 files changed, 13 insertions(+), 2 deletions(-)