@@ -99,8 +99,9 @@ int Agc::configure(IPAContext &context, const IPAConfigInfo &configInfo)
maxAnalogueGain_ = std::min(context.configuration.agc.maxAnalogueGain, kMaxAnalogueGain);
/* Configure the default exposure and gain. */
- context.frameContext.agc.gain = minAnalogueGain_;
- context.frameContext.agc.exposure = minShutterSpeed_ / lineDuration_;
+ IPAFrameContext &frameContext = context.frameContextQueue.front();
+ frameContext.agc.gain = minAnalogueGain_;
+ frameContext.agc.exposure = minShutterSpeed_ / lineDuration_;
return 0;
}
@@ -178,12 +179,12 @@ void Agc::filterExposure()
* \param[in] yGain The gain calculated based on the relative luminance target
* \param[in] iqMeanGain The gain calculated based on the relative luminance target
*/
-void Agc::computeExposure(IPAFrameContext &frameContext, double yGain,
- double iqMeanGain)
+void Agc::computeExposure(IPAContext &context, double yGain, double iqMeanGain)
{
/* Get the effective exposure and gain applied on the sensor. */
- uint32_t exposure = frameContext.sensor.exposure;
- double analogueGain = frameContext.sensor.gain;
+ uint32_t exposure = context.prevFrameContext.sensor.exposure;
+ double analogueGain = context.prevFrameContext.sensor.gain;
+ IPAFrameContext &frameContext = context.frameContextQueue.front();
/* Use the highest of the two gain estimates. */
double evGain = std::max(yGain, iqMeanGain);
@@ -333,9 +334,9 @@ void Agc::process(IPAContext &context, const ipu3_uapi_stats_3a *stats)
*/
double yGain = 1.0;
double yTarget = kRelativeLuminanceTarget;
-
+ IPAFrameContext &frameContext = context.frameContextQueue.front();
for (unsigned int i = 0; i < 8; i++) {
- double yValue = estimateLuminance(context.frameContext,
+ double yValue = estimateLuminance(frameContext,
context.configuration.grid.bdsGrid,
stats, yGain);
double extraGain = std::min(10.0, yTarget / (yValue + .001));
@@ -348,7 +349,7 @@ void Agc::process(IPAContext &context, const ipu3_uapi_stats_3a *stats)
break;
}
- computeExposure(context.frameContext, yGain, iqMeanGain);
+ computeExposure(context, yGain, iqMeanGain);
frameCount_++;
}
@@ -34,7 +34,7 @@ private:
double measureBrightness(const ipu3_uapi_stats_3a *stats,
const ipu3_uapi_grid_config &grid) const;
void filterExposure();
- void computeExposure(IPAFrameContext &frameContext, double yGain,
+ void computeExposure(IPAContext &context, double yGain,
double iqMeanGain);
double estimateLuminance(IPAFrameContext &frameContext,
const ipu3_uapi_grid_config &grid,
@@ -382,16 +382,17 @@ void Awb::calculateWBGains(const ipu3_uapi_stats_3a *stats)
void Awb::process(IPAContext &context, const ipu3_uapi_stats_3a *stats)
{
calculateWBGains(stats);
+ IPAFrameContext &frameContext = context.frameContextQueue.front();
/*
* Gains are only recalculated if enough zones were detected.
* The results are cached, so if no results were calculated, we set the
* cached values from asyncResults_ here.
*/
- context.frameContext.awb.gains.blue = asyncResults_.blueGain;
- context.frameContext.awb.gains.green = asyncResults_.greenGain;
- context.frameContext.awb.gains.red = asyncResults_.redGain;
- context.frameContext.awb.temperatureK = asyncResults_.temperatureK;
+ frameContext.awb.gains.blue = asyncResults_.blueGain;
+ frameContext.awb.gains.green = asyncResults_.greenGain;
+ frameContext.awb.gains.red = asyncResults_.redGain;
+ frameContext.awb.temperatureK = asyncResults_.temperatureK;
}
constexpr uint16_t Awb::threshold(float value)
@@ -434,6 +435,7 @@ void Awb::prepare(IPAContext &context, ipu3_uapi_params *params)
*/
params->acc_param.bnr = imguCssBnrDefaults;
Size &bdsOutputSize = context.configuration.grid.bdsOutputSize;
+ IPAFrameContext &frameContext = context.frameContextQueue.front();
params->acc_param.bnr.column_size = bdsOutputSize.width;
params->acc_param.bnr.opt_center.x_reset = grid.x_start - (bdsOutputSize.width / 2);
params->acc_param.bnr.opt_center.y_reset = grid.y_start - (bdsOutputSize.height / 2);
@@ -442,10 +444,10 @@ void Awb::prepare(IPAContext &context, ipu3_uapi_params *params)
params->acc_param.bnr.opt_center_sqr.y_sqr_reset = params->acc_param.bnr.opt_center.y_reset
* params->acc_param.bnr.opt_center.y_reset;
/* Convert to u3.13 fixed point values */
- params->acc_param.bnr.wb_gains.gr = 8192 * context.frameContext.awb.gains.green;
- params->acc_param.bnr.wb_gains.r = 8192 * context.frameContext.awb.gains.red;
- params->acc_param.bnr.wb_gains.b = 8192 * context.frameContext.awb.gains.blue;
- params->acc_param.bnr.wb_gains.gb = 8192 * context.frameContext.awb.gains.green;
+ params->acc_param.bnr.wb_gains.gr = 8192 * frameContext.awb.gains.green;
+ params->acc_param.bnr.wb_gains.r = 8192 * frameContext.awb.gains.red;
+ params->acc_param.bnr.wb_gains.b = 8192 * frameContext.awb.gains.blue;
+ params->acc_param.bnr.wb_gains.gb = 8192 * frameContext.awb.gains.green;
LOG(IPU3Awb, Debug) << "Color temperature estimated: " << asyncResults_.temperatureK;
@@ -42,7 +42,7 @@ int ToneMapping::configure(IPAContext &context,
[[maybe_unused]] const IPAConfigInfo &configInfo)
{
/* Initialise tone mapping gamma value. */
- context.frameContext.toneMapping.gamma = 0.0;
+ context.frameContextQueue.front().toneMapping.gamma = 0.0;
return 0;
}
@@ -60,7 +60,7 @@ void ToneMapping::prepare([[maybe_unused]] IPAContext &context,
{
/* Copy the calculated LUT into the parameters buffer. */
memcpy(params->acc_param.gamma.gc_lut.lut,
- context.frameContext.toneMapping.gammaCorrection.lut,
+ context.frameContextQueue.front().toneMapping.gammaCorrection.lut,
IPU3_UAPI_GAMMA_CORR_LUT_ENTRIES *
sizeof(params->acc_param.gamma.gc_lut.lut[0]));
@@ -80,6 +80,7 @@ void ToneMapping::prepare([[maybe_unused]] IPAContext &context,
void ToneMapping::process(IPAContext &context,
[[maybe_unused]] const ipu3_uapi_stats_3a *stats)
{
+ IPAFrameContext &frameContext = context.frameContextQueue.front();
/*
* Hardcode gamma to 1.1 as a default for now.
*
@@ -87,11 +88,11 @@ void ToneMapping::process(IPAContext &context,
*/
gamma_ = 1.1;
- if (context.frameContext.toneMapping.gamma == gamma_)
+ if (frameContext.toneMapping.gamma == gamma_)
return;
struct ipu3_uapi_gamma_corr_lut &lut =
- context.frameContext.toneMapping.gammaCorrection;
+ frameContext.toneMapping.gammaCorrection;
for (uint32_t i = 0; i < std::size(lut.lut); i++) {
double j = static_cast<double>(i) / (std::size(lut.lut) - 1);
@@ -101,7 +102,7 @@ void ToneMapping::process(IPAContext &context,
lut.lut[i] = gamma * 8191;
}
- context.frameContext.toneMapping.gamma = gamma_;
+ frameContext.toneMapping.gamma = gamma_;
}
} /* namespace ipa::ipu3::algorithms */
@@ -39,6 +39,23 @@ namespace libcamera::ipa::ipu3 {
* algorithm, but should only be written by its owner.
*/
+/**
+ * \brief Construct a IPAFrameContext instance
+ */
+IPAFrameContext::IPAFrameContext() = default;
+
+/**
+ * \brief Move constructor for IPAFrameContext
+ * \param[in] other The other IPAFrameContext
+ */
+IPAFrameContext::IPAFrameContext(IPAFrameContext &&other) = default;
+
+/**
+ * \brief Move assignment operator for IPAFrameContext
+ * \param[in] other The other IPAFrameContext
+ */
+IPAFrameContext &IPAFrameContext::operator=(IPAFrameContext &&other) = default;
+
/**
* \struct IPAContext
* \brief Global IPA context data shared between all algorithms
@@ -46,13 +63,11 @@ namespace libcamera::ipa::ipu3 {
* \var IPAContext::configuration
* \brief The IPA session configuration, immutable during the session
*
- * \var IPAContext::frameContext
- * \brief The frame context for the frame being processed
+ * \var IPAContext::frameContextQueue
+ * \brief A queue of frame contexts to be processed by the IPA
*
- * \todo While the frame context is supposed to be per-frame, this
- * single frame context stores data related to both the current frame
- * and the previous frames, with fields being updated as the algorithms
- * are run. This needs to be turned into real per-frame data storage.
+ * \var IPAContext::prevFrameContext
+ * \brief The latest frame context which the IPA has finished processing
*/
/**
@@ -86,6 +101,11 @@ namespace libcamera::ipa::ipu3 {
* \brief Maximum analogue gain supported with the configured sensor
*/
+/**
+ * \var IPAFrameContext::frame
+ * \brief Frame number of the corresponding frame context
+ */
+
/**
* \var IPAFrameContext::agc
* \brief Context for the Automatic Gain Control algorithm
@@ -14,6 +14,8 @@
#include <libcamera/geometry.h>
+#include <queue>
+
namespace libcamera {
namespace ipa::ipu3 {
@@ -34,6 +36,12 @@ struct IPASessionConfiguration {
};
struct IPAFrameContext {
+ uint32_t frame;
+
+ IPAFrameContext();
+ IPAFrameContext(IPAFrameContext &&other);
+ IPAFrameContext &operator=(IPAFrameContext &&other);
+
struct {
uint32_t exposure;
double gain;
@@ -62,7 +70,8 @@ struct IPAFrameContext {
struct IPAContext {
IPASessionConfiguration configuration;
- IPAFrameContext frameContext;
+ std::queue<IPAFrameContext> frameContextQueue;
+ IPAFrameContext prevFrameContext;
};
} /* namespace ipa::ipu3 */
@@ -324,6 +324,8 @@ int IPAIPU3::start()
*/
void IPAIPU3::stop()
{
+ while (!context_.frameContextQueue.empty())
+ context_.frameContextQueue.pop();
}
/**
@@ -457,6 +459,14 @@ int IPAIPU3::configure(const IPAConfigInfo &configInfo,
/* Clean context at configuration */
context_ = {};
+ /*
+ * Insert a initial context into the queue to faciliate
+ * algo->configure() below.
+ */
+ IPAFrameContext initContext;
+ initContext.frame = 0;
+ context_.frameContextQueue.push(std::move(initContext));
+
calculateBdsGrid(configInfo.bdsOutputSize);
lineDuration_ = sensorInfo_.lineLength * 1.0s / sensorInfo_.pixelRate;
@@ -547,8 +557,9 @@ void IPAIPU3::processEvent(const IPU3Event &event)
const ipu3_uapi_stats_3a *stats =
reinterpret_cast<ipu3_uapi_stats_3a *>(mem.data());
- context_.frameContext.sensor.exposure = event.sensorControls.get(V4L2_CID_EXPOSURE).get<int32_t>();
- context_.frameContext.sensor.gain = camHelper_->gain(event.sensorControls.get(V4L2_CID_ANALOGUE_GAIN).get<int32_t>());
+ IPAFrameContext &curFrameContext = context_.frameContextQueue.front();
+ curFrameContext.sensor.exposure = event.sensorControls.get(V4L2_CID_EXPOSURE).get<int32_t>();
+ curFrameContext.sensor.gain = camHelper_->gain(event.sensorControls.get(V4L2_CID_ANALOGUE_GAIN).get<int32_t>());
parseStatistics(event.frame, event.frameTimestamp, stats);
break;
@@ -571,6 +582,11 @@ void IPAIPU3::processControls([[maybe_unused]] unsigned int frame,
[[maybe_unused]] const ControlList &controls)
{
/* \todo Start processing for 'frame' based on 'controls'. */
+
+ IPAFrameContext newContext;
+ newContext.frame = frame;
+
+ context_.frameContextQueue.push(std::move(newContext));
}
/**
@@ -619,6 +635,9 @@ void IPAIPU3::parseStatistics(unsigned int frame,
{
ControlList ctrls(controls::controls);
+ context_.prevFrameContext = std::move(context_.frameContextQueue.front());
+ context_.frameContextQueue.pop();
+
for (auto const &algo : algorithms_)
algo->process(context_, stats);
@@ -628,11 +647,11 @@ void IPAIPU3::parseStatistics(unsigned int frame,
int64_t frameDuration = (defVBlank_ + sensorInfo_.outputSize.height) * lineDuration_.get<std::micro>();
ctrls.set(controls::FrameDuration, frameDuration);
- ctrls.set(controls::AnalogueGain, context_.frameContext.sensor.gain);
+ ctrls.set(controls::AnalogueGain, context_.prevFrameContext.sensor.gain);
- ctrls.set(controls::ColourTemperature, context_.frameContext.awb.temperatureK);
+ ctrls.set(controls::ColourTemperature, context_.prevFrameContext.awb.temperatureK);
- ctrls.set(controls::ExposureTime, context_.frameContext.sensor.exposure * lineDuration_.get<std::micro>());
+ ctrls.set(controls::ExposureTime, context_.prevFrameContext.sensor.exposure * lineDuration_.get<std::micro>());
/*
* \todo The Metadata provides a path to getting extended data
@@ -661,8 +680,9 @@ void IPAIPU3::setControls(unsigned int frame)
IPU3Action op;
op.op = ActionSetSensorControls;
- exposure_ = context_.frameContext.agc.exposure;
- gain_ = camHelper_->gainCode(context_.frameContext.agc.gain);
+ IPAFrameContext &context = context_.frameContextQueue.front();
+ exposure_ = context.agc.exposure;
+ gain_ = camHelper_->gainCode(context.agc.gain);
ControlList ctrls(ctrls_);
ctrls.set(V4L2_CID_EXPOSURE, static_cast<int32_t>(exposure_));
Having a single IPAFrameContext queue is limiting especially when we need to preserve per-frame controls. Right now, we are not processing any controls on the IPA side (processControls()) but sooner or later we need to preserve the controls setting for the frames in a sequential fashion. Since IPAIPU3::processControls() is executed on IPU3CameraData::queuePendingRequests() code path, we need to store the incoming control setting in a separate IPAFrameContext and push that into the queue. The controls for the next frame to be processed by the IPA are retrieved back from the queue in parseStatistics() and set via setControls(). Meanwhile, the last IPAFrameContext is preserved as prevFrameContext to populate the metadata (for ActionMetadataReady). Signed-off-by: Umang Jain <umang.jain@ideasonboard.com> --- - Patch is developed as a part of CTS-FULL tests investigate - testing manual AE control (under W.I.P) - The tests inputs a manual ExposureTime on Requests X, requires to read the same ExposureTime on X's capture result 'exactly'. - In order to facilate that, we need per-frame context preservation (of controls settings) - No regressions on CTS (Limited). - Sometimes I now face over-exposed condition of the stream, as a result of this patch. It's not obvious to me why it happens, but my educated guess is to with initial exposure / gain settings. It is also hard to reproduce (this issue) deterministically. --- src/ipa/ipu3/algorithms/agc.cpp | 19 ++++++------- src/ipa/ipu3/algorithms/agc.h | 2 +- src/ipa/ipu3/algorithms/awb.cpp | 18 +++++++------ src/ipa/ipu3/algorithms/tone_mapping.cpp | 11 ++++---- src/ipa/ipu3/ipa_context.cpp | 32 +++++++++++++++++----- src/ipa/ipu3/ipa_context.h | 11 +++++++- src/ipa/ipu3/ipu3.cpp | 34 +++++++++++++++++++----- 7 files changed, 90 insertions(+), 37 deletions(-)