From patchwork Mon Jan 3 17:09:56 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Umang Jain X-Patchwork-Id: 15238 X-Patchwork-Delegate: umang.jain@ideasonboard.com Return-Path: X-Original-To: parsemail@patchwork.libcamera.org Delivered-To: parsemail@patchwork.libcamera.org Received: from lancelot.ideasonboard.com (lancelot.ideasonboard.com [92.243.16.209]) by patchwork.libcamera.org (Postfix) with ESMTPS id 90EC2C3259 for ; Mon, 3 Jan 2022 17:10:19 +0000 (UTC) Received: from lancelot.ideasonboard.com (localhost [IPv6:::1]) by lancelot.ideasonboard.com (Postfix) with ESMTP id 08BED60915; Mon, 3 Jan 2022 18:10:19 +0100 (CET) Authentication-Results: lancelot.ideasonboard.com; dkim=fail reason="signature verification failed" (1024-bit key; unprotected) header.d=ideasonboard.com header.i=@ideasonboard.com header.b="SZZA1CFt"; dkim-atps=neutral Received: from perceval.ideasonboard.com (perceval.ideasonboard.com [213.167.242.64]) by lancelot.ideasonboard.com (Postfix) with ESMTPS id 71452604F4 for ; Mon, 3 Jan 2022 18:10:18 +0100 (CET) Received: from perceval.ideasonboard.com (unknown [IPv6:2401:4900:1f3e:193e:9a73:f356:8c6a:a1aa]) by perceval.ideasonboard.com (Postfix) with ESMTPSA id 28A84CC; Mon, 3 Jan 2022 18:10:16 +0100 (CET) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=ideasonboard.com; s=mail; t=1641229818; bh=GbL0mvguMBDWLn/Hc3KT+sigc0IjfR0x/sQ+mE8HHxc=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=SZZA1CFtacT35zMURgi1KC/KxhFpEP9glGVnX5Z56S/snAEAC2Viy1cxiDNke1eTs Jxt3uR0GyuUsnz+O52SoTJC1uUjWsz1tMzAaNlr/oSd+BurUZi3SzCKbIITyjZoBna njcsaWAG2NypfnH4RosH+tr6lxxK70yU8y4OkCi8= From: Umang Jain To: libcamera-devel@lists.libcamera.org Date: Mon, 3 Jan 2022 22:39:56 +0530 Message-Id: <20220103170956.323025-5-umang.jain@ideasonboard.com> X-Mailer: git-send-email 2.31.1 In-Reply-To: <20220103170956.323025-1-umang.jain@ideasonboard.com> References: <20220103170956.323025-1-umang.jain@ideasonboard.com> MIME-Version: 1.0 Subject: [libcamera-devel] [PATCH 4/4] ipa: ipu3: Add a IPAFrameContext queue X-BeenThere: libcamera-devel@lists.libcamera.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: libcamera-devel-bounces@lists.libcamera.org Sender: "libcamera-devel" Having a single IPAFrameContext queue is limiting especially when we need to preserve per-frame controls. Right now, we are not processing any controls on the IPA side (processControls()) but sooner or later we need to preserve the controls setting for the frames in the context in a retrievable fashion. Hence a std::deque is introduced to preserve the frame context of the incoming request's settings as soon as it is queued. Since IPAIPU3::processControls() is executed on IPU3CameraData::queuePendingRequests() code path, we need to store the incoming control setting in a separate IPAFrameContext and push that into the queue. The IPAFrameContext is then dropped when processing for that frame has been finished. Signed-off-by: Umang Jain --- src/ipa/ipu3/algorithms/agc.cpp | 18 ++++---- src/ipa/ipu3/algorithms/agc.h | 2 +- src/ipa/ipu3/algorithms/awb.cpp | 18 ++++---- src/ipa/ipu3/algorithms/tone_mapping.cpp | 11 ++--- src/ipa/ipu3/ipa_context.cpp | 57 +++++++++++++++++++++--- src/ipa/ipu3/ipa_context.h | 13 +++++- src/ipa/ipu3/ipu3.cpp | 41 ++++++++++++++--- 7 files changed, 124 insertions(+), 36 deletions(-) diff --git a/src/ipa/ipu3/algorithms/agc.cpp b/src/ipa/ipu3/algorithms/agc.cpp index 1d0778d8..f8e1fef7 100644 --- a/src/ipa/ipu3/algorithms/agc.cpp +++ b/src/ipa/ipu3/algorithms/agc.cpp @@ -99,8 +99,9 @@ int Agc::configure(IPAContext &context, const IPAConfigInfo &configInfo) maxAnalogueGain_ = std::min(context.configuration.agc.maxAnalogueGain, kMaxAnalogueGain); /* Configure the default exposure and gain. */ - context.frameContext.agc.gain = minAnalogueGain_; - context.frameContext.agc.exposure = minShutterSpeed_ / lineDuration_; + IPAFrameContext &frameContext = context.frameContextQueue.front(); + frameContext.agc.gain = minAnalogueGain_; + frameContext.agc.exposure = minShutterSpeed_ / lineDuration_; return 0; } @@ -174,16 +175,17 @@ void Agc::filterExposure() /** * \brief Estimate the new exposure and gain values + * \param[in] frame The frame number * \param[inout] frameContext The shared IPA frame Context * \param[in] yGain The gain calculated based on the relative luminance target * \param[in] iqMeanGain The gain calculated based on the relative luminance target */ -void Agc::computeExposure(IPAFrameContext &frameContext, double yGain, - double iqMeanGain) +void Agc::computeExposure(const uint32_t frame, IPAContext &context, double yGain, double iqMeanGain) { /* Get the effective exposure and gain applied on the sensor. */ - uint32_t exposure = frameContext.sensor.exposure; - double analogueGain = frameContext.sensor.gain; + uint32_t exposure = context.prevFrameContext.sensor.exposure; + double analogueGain = context.prevFrameContext.sensor.gain; + IPAFrameContext &frameContext = context.getFrameContext(frame); /* Use the highest of the two gain estimates. */ double evGain = std::max(yGain, iqMeanGain); @@ -336,7 +338,7 @@ void Agc::process(const uint32_t frame, IPAContext &context, const ipu3_uapi_sta double yTarget = kRelativeLuminanceTarget; for (unsigned int i = 0; i < 8; i++) { - double yValue = estimateLuminance(context.frameContext, + double yValue = estimateLuminance(context.prevFrameContext, context.configuration.grid.bdsGrid, stats, yGain); double extraGain = std::min(10.0, yTarget / (yValue + .001)); @@ -349,7 +351,7 @@ void Agc::process(const uint32_t frame, IPAContext &context, const ipu3_uapi_sta break; } - computeExposure(context.frameContext, yGain, iqMeanGain); + computeExposure(frame, context, yGain, iqMeanGain); frameCount_++; } diff --git a/src/ipa/ipu3/algorithms/agc.h b/src/ipa/ipu3/algorithms/agc.h index c6ab8e91..a3c52fc7 100644 --- a/src/ipa/ipu3/algorithms/agc.h +++ b/src/ipa/ipu3/algorithms/agc.h @@ -34,7 +34,7 @@ private: double measureBrightness(const ipu3_uapi_stats_3a *stats, const ipu3_uapi_grid_config &grid) const; void filterExposure(); - void computeExposure(IPAFrameContext &frameContext, double yGain, + void computeExposure(const uint32_t frame, IPAContext &context, double yGain, double iqMeanGain); double estimateLuminance(IPAFrameContext &frameContext, const ipu3_uapi_grid_config &grid, diff --git a/src/ipa/ipu3/algorithms/awb.cpp b/src/ipa/ipu3/algorithms/awb.cpp index 99fb5305..a8347d0f 100644 --- a/src/ipa/ipu3/algorithms/awb.cpp +++ b/src/ipa/ipu3/algorithms/awb.cpp @@ -382,16 +382,17 @@ void Awb::calculateWBGains(const ipu3_uapi_stats_3a *stats) void Awb::process(const uint32_t frame, IPAContext &context, const ipu3_uapi_stats_3a *stats) { calculateWBGains(stats); + IPAFrameContext &frameContext = context.getFrameContext(frame); /* * Gains are only recalculated if enough zones were detected. * The results are cached, so if no results were calculated, we set the * cached values from asyncResults_ here. */ - context.frameContext.awb.gains.blue = asyncResults_.blueGain; - context.frameContext.awb.gains.green = asyncResults_.greenGain; - context.frameContext.awb.gains.red = asyncResults_.redGain; - context.frameContext.awb.temperatureK = asyncResults_.temperatureK; + frameContext.awb.gains.blue = asyncResults_.blueGain; + frameContext.awb.gains.green = asyncResults_.greenGain; + frameContext.awb.gains.red = asyncResults_.redGain; + frameContext.awb.temperatureK = asyncResults_.temperatureK; } constexpr uint16_t Awb::threshold(float value) @@ -434,6 +435,7 @@ void Awb::prepare([[maybe_unused]] const uint32_t frame, IPAContext &context, ip */ params->acc_param.bnr = imguCssBnrDefaults; Size &bdsOutputSize = context.configuration.grid.bdsOutputSize; + IPAFrameContext &frameContext = context.frameContextQueue.front(); params->acc_param.bnr.column_size = bdsOutputSize.width; params->acc_param.bnr.opt_center.x_reset = grid.x_start - (bdsOutputSize.width / 2); params->acc_param.bnr.opt_center.y_reset = grid.y_start - (bdsOutputSize.height / 2); @@ -442,10 +444,10 @@ void Awb::prepare([[maybe_unused]] const uint32_t frame, IPAContext &context, ip params->acc_param.bnr.opt_center_sqr.y_sqr_reset = params->acc_param.bnr.opt_center.y_reset * params->acc_param.bnr.opt_center.y_reset; /* Convert to u3.13 fixed point values */ - params->acc_param.bnr.wb_gains.gr = 8192 * context.frameContext.awb.gains.green; - params->acc_param.bnr.wb_gains.r = 8192 * context.frameContext.awb.gains.red; - params->acc_param.bnr.wb_gains.b = 8192 * context.frameContext.awb.gains.blue; - params->acc_param.bnr.wb_gains.gb = 8192 * context.frameContext.awb.gains.green; + params->acc_param.bnr.wb_gains.gr = 8192 * frameContext.awb.gains.green; + params->acc_param.bnr.wb_gains.r = 8192 * frameContext.awb.gains.red; + params->acc_param.bnr.wb_gains.b = 8192 * frameContext.awb.gains.blue; + params->acc_param.bnr.wb_gains.gb = 8192 * frameContext.awb.gains.green; LOG(IPU3Awb, Debug) << "Color temperature estimated: " << asyncResults_.temperatureK; diff --git a/src/ipa/ipu3/algorithms/tone_mapping.cpp b/src/ipa/ipu3/algorithms/tone_mapping.cpp index bba5bc9a..ce6c330d 100644 --- a/src/ipa/ipu3/algorithms/tone_mapping.cpp +++ b/src/ipa/ipu3/algorithms/tone_mapping.cpp @@ -42,7 +42,7 @@ int ToneMapping::configure(IPAContext &context, [[maybe_unused]] const IPAConfigInfo &configInfo) { /* Initialise tone mapping gamma value. */ - context.frameContext.toneMapping.gamma = 0.0; + context.frameContextQueue.front().toneMapping.gamma = 0.0; return 0; } @@ -62,7 +62,7 @@ void ToneMapping::prepare([[maybe_unused]] const uint32_t frame, { /* Copy the calculated LUT into the parameters buffer. */ memcpy(params->acc_param.gamma.gc_lut.lut, - context.frameContext.toneMapping.gammaCorrection.lut, + context.frameContextQueue.front().toneMapping.gammaCorrection.lut, IPU3_UAPI_GAMMA_CORR_LUT_ENTRIES * sizeof(params->acc_param.gamma.gc_lut.lut[0])); @@ -83,6 +83,7 @@ void ToneMapping::prepare([[maybe_unused]] const uint32_t frame, void ToneMapping::process(const uint32_t frame, IPAContext &context, [[maybe_unused]] const ipu3_uapi_stats_3a *stats) { + IPAFrameContext &frameContext = context.getFrameContext(frame); /* * Hardcode gamma to 1.1 as a default for now. * @@ -90,11 +91,11 @@ void ToneMapping::process(const uint32_t frame, IPAContext &context, */ gamma_ = 1.1; - if (context.frameContext.toneMapping.gamma == gamma_) + if (frameContext.toneMapping.gamma == gamma_) return; struct ipu3_uapi_gamma_corr_lut &lut = - context.frameContext.toneMapping.gammaCorrection; + frameContext.toneMapping.gammaCorrection; for (uint32_t i = 0; i < std::size(lut.lut); i++) { double j = static_cast(i) / (std::size(lut.lut) - 1); @@ -104,7 +105,7 @@ void ToneMapping::process(const uint32_t frame, IPAContext &context, lut.lut[i] = gamma * 8191; } - context.frameContext.toneMapping.gamma = gamma_; + frameContext.toneMapping.gamma = gamma_; } } /* namespace ipa::ipu3::algorithms */ diff --git a/src/ipa/ipu3/ipa_context.cpp b/src/ipa/ipu3/ipa_context.cpp index 86794ac1..95a08547 100644 --- a/src/ipa/ipu3/ipa_context.cpp +++ b/src/ipa/ipu3/ipa_context.cpp @@ -39,6 +39,48 @@ namespace libcamera::ipa::ipu3 { * algorithm, but should only be written by its owner. */ +/** + * \brief Retrieve the context of a particular frame + * \param[in] frame Frame number + * + * Retrieve the frame context of the \a frame. + * + * \return The frame context of the given frame number or nullptr, if not found + */ +IPAFrameContext &IPAContext::getFrameContext(const uint32_t frame) +{ + auto iter = frameContextQueue.begin(); + while (iter != frameContextQueue.end()) { + if (iter->frame == frame) + return *iter; + + iter++; + } + + /* + * \todo Handle the case where frame-context is not found here. + * Should we be FATAL ? + */ + return *iter; /* returns frameContextQueue.end() */ +} + +/** + * \brief Construct a IPAFrameContext instance + */ +IPAFrameContext::IPAFrameContext() = default; + +/** + * \brief Move constructor for IPAFrameContext + * \param[in] other The other IPAFrameContext + */ +IPAFrameContext::IPAFrameContext(IPAFrameContext &&other) = default; + +/** + * \brief Move assignment operator for IPAFrameContext + * \param[in] other The other IPAFrameContext + */ +IPAFrameContext &IPAFrameContext::operator=(IPAFrameContext &&other) = default; + /** * \struct IPAContext * \brief Global IPA context data shared between all algorithms @@ -46,13 +88,11 @@ namespace libcamera::ipa::ipu3 { * \var IPAContext::configuration * \brief The IPA session configuration, immutable during the session * - * \var IPAContext::frameContext - * \brief The frame context for the frame being processed + * \var IPAContext::frameContextQueue + * \brief A queue of frame contexts to be processed by the IPA * - * \todo While the frame context is supposed to be per-frame, this - * single frame context stores data related to both the current frame - * and the previous frames, with fields being updated as the algorithms - * are run. This needs to be turned into real per-frame data storage. + * \var IPAContext::prevFrameContext + * \brief The latest frame context which the IPA has finished processing */ /** @@ -86,6 +126,11 @@ namespace libcamera::ipa::ipu3 { * \brief Maximum analogue gain supported with the configured sensor */ +/** + * \var IPAFrameContext::frame + * \brief Frame number of the corresponding frame context + */ + /** * \var IPAFrameContext::agc * \brief Context for the Automatic Gain Control algorithm diff --git a/src/ipa/ipu3/ipa_context.h b/src/ipa/ipu3/ipa_context.h index c6dc0814..df2a9779 100644 --- a/src/ipa/ipu3/ipa_context.h +++ b/src/ipa/ipu3/ipa_context.h @@ -8,6 +8,8 @@ #pragma once +#include + #include #include @@ -34,6 +36,12 @@ struct IPASessionConfiguration { }; struct IPAFrameContext { + uint32_t frame; + + IPAFrameContext(); + IPAFrameContext(IPAFrameContext &&other); + IPAFrameContext &operator=(IPAFrameContext &&other); + struct { uint32_t exposure; double gain; @@ -61,8 +69,11 @@ struct IPAFrameContext { }; struct IPAContext { + IPAFrameContext &getFrameContext(const uint32_t frame); + IPASessionConfiguration configuration; - IPAFrameContext frameContext; + std::deque frameContextQueue; + IPAFrameContext prevFrameContext; }; } /* namespace ipa::ipu3 */ diff --git a/src/ipa/ipu3/ipu3.cpp b/src/ipa/ipu3/ipu3.cpp index fa40c41f..9c3d5ff4 100644 --- a/src/ipa/ipu3/ipu3.cpp +++ b/src/ipa/ipu3/ipu3.cpp @@ -336,6 +336,8 @@ int IPAIPU3::start() */ void IPAIPU3::stop() { + while (!context_.frameContextQueue.empty()) + context_.frameContextQueue.pop_front(); } /** @@ -469,6 +471,14 @@ int IPAIPU3::configure(const IPAConfigInfo &configInfo, /* Clean context at configuration */ context_ = {}; + /* + * Insert a initial context into the queue to faciliate + * algo->configure() below. + */ + IPAFrameContext initContext; + initContext.frame = 0; + context_.frameContextQueue.push_back(std::move(initContext)); + calculateBdsGrid(configInfo.bdsOutputSize); lineDuration_ = sensorInfo_.lineLength * 1.0s / sensorInfo_.pixelRate; @@ -518,10 +528,25 @@ void IPAIPU3::unmapBuffers(const std::vector &ids) void IPAIPU3::frameStarted([[maybe_unused]] const uint32_t frame) { + IPAFrameContext newContext; + newContext.frame = frame; + + context_.frameContextQueue.push_back(std::move(newContext)); } void IPAIPU3::frameCompleted([[maybe_unused]] const uint32_t frame) { + while (!context_.frameContextQueue.empty()) { + auto &fc = context_.frameContextQueue.front(); + if (fc.frame < frame) + context_.frameContextQueue.pop_front(); + + /* Keep newer frames */ + if (fc.frame >= frame) { + context_.prevFrameContext = std::move(fc); + break; + } + } } /** @@ -564,8 +589,9 @@ void IPAIPU3::statsReady(const uint32_t frame, const int64_t frameTimestamp, const ipu3_uapi_stats_3a *stats = reinterpret_cast(mem.data()); - context_.frameContext.sensor.exposure = sensorControls.get(V4L2_CID_EXPOSURE).get(); - context_.frameContext.sensor.gain = camHelper_->gain(sensorControls.get(V4L2_CID_ANALOGUE_GAIN).get()); + IPAFrameContext &curFrameContext = context_.frameContextQueue.front(); + curFrameContext.sensor.exposure = sensorControls.get(V4L2_CID_EXPOSURE).get(); + curFrameContext.sensor.gain = camHelper_->gain(sensorControls.get(V4L2_CID_ANALOGUE_GAIN).get()); parseStatistics(frame, frameTimestamp, stats); } @@ -645,11 +671,11 @@ void IPAIPU3::parseStatistics(unsigned int frame, int64_t frameDuration = (defVBlank_ + sensorInfo_.outputSize.height) * lineDuration_.get(); ctrls.set(controls::FrameDuration, frameDuration); - ctrls.set(controls::AnalogueGain, context_.frameContext.sensor.gain); + ctrls.set(controls::AnalogueGain, context_.prevFrameContext.sensor.gain); - ctrls.set(controls::ColourTemperature, context_.frameContext.awb.temperatureK); + ctrls.set(controls::ColourTemperature, context_.prevFrameContext.awb.temperatureK); - ctrls.set(controls::ExposureTime, context_.frameContext.sensor.exposure * lineDuration_.get()); + ctrls.set(controls::ExposureTime, context_.prevFrameContext.sensor.exposure * lineDuration_.get()); /* * \todo The Metadata provides a path to getting extended data @@ -679,8 +705,9 @@ void IPAIPU3::parseStatistics(unsigned int frame, */ void IPAIPU3::setControls(unsigned int frame) { - exposure_ = context_.frameContext.agc.exposure; - gain_ = camHelper_->gainCode(context_.frameContext.agc.gain); + IPAFrameContext &context = context_.frameContextQueue.front(); + exposure_ = context.agc.exposure; + gain_ = camHelper_->gainCode(context.agc.gain); ControlList ctrls(ctrls_); ControlList lensCtrls;