Patch Detail
Show a patch.
GET /api/patches/13539/?format=api
{ "id": 13539, "url": "https://patchwork.libcamera.org/api/patches/13539/?format=api", "web_url": "https://patchwork.libcamera.org/patch/13539/", "project": { "id": 1, "url": "https://patchwork.libcamera.org/api/projects/1/?format=api", "name": "libcamera", "link_name": "libcamera", "list_id": "libcamera_core", "list_email": "libcamera-devel@lists.libcamera.org", "web_url": "", "scm_url": "", "webscm_url": "" }, "msgid": "<20210827080227.26370-5-jeanmichel.hautbois@ideasonboard.com>", "date": "2021-08-27T08:02:27", "name": "[libcamera-devel,v2,4/4] ipa: ipu3: Introduce a new AGC algorithm", "commit_ref": null, "pull_url": null, "state": "superseded", "archived": false, "hash": "38fc33e544f959821b074525bf20ec24a51a8a3a", "submitter": { "id": 75, "url": "https://patchwork.libcamera.org/api/people/75/?format=api", "name": "Jean-Michel Hautbois", "email": "jeanmichel.hautbois@ideasonboard.com" }, "delegate": null, "mbox": "https://patchwork.libcamera.org/patch/13539/mbox/", "series": [ { "id": 2407, "url": "https://patchwork.libcamera.org/api/series/2407/?format=api", "web_url": "https://patchwork.libcamera.org/project/libcamera/list/?series=2407", "date": "2021-08-27T08:02:23", "name": "IPU3: AWB and AGC improvements", "version": 2, "mbox": "https://patchwork.libcamera.org/series/2407/mbox/" } ], "comments": "https://patchwork.libcamera.org/api/patches/13539/comments/", "check": "pending", "checks": "https://patchwork.libcamera.org/api/patches/13539/checks/", "tags": {}, "headers": { "Return-Path": "<libcamera-devel-bounces@lists.libcamera.org>", "X-Original-To": "parsemail@patchwork.libcamera.org", "Delivered-To": "parsemail@patchwork.libcamera.org", "Received": [ "from lancelot.ideasonboard.com (lancelot.ideasonboard.com\n\t[92.243.16.209])\n\tby patchwork.libcamera.org (Postfix) with ESMTPS id 5E4F6BD87C\n\tfor <parsemail@patchwork.libcamera.org>;\n\tFri, 27 Aug 2021 08:02:40 +0000 (UTC)", "from lancelot.ideasonboard.com (localhost [IPv6:::1])\n\tby lancelot.ideasonboard.com (Postfix) with ESMTP id 24F466893B;\n\tFri, 27 Aug 2021 10:02:40 +0200 (CEST)", "from perceval.ideasonboard.com (perceval.ideasonboard.com\n\t[IPv6:2001:4b98:dc2:55:216:3eff:fef7:d647])\n\tby lancelot.ideasonboard.com (Postfix) with ESMTPS id 1DB7B68928\n\tfor <libcamera-devel@lists.libcamera.org>;\n\tFri, 27 Aug 2021 10:02:35 +0200 (CEST)", "from tatooine.ideasonboard.com (unknown\n\t[IPv6:2a01:e0a:169:7140:ccf5:c267:eba8:cbb5])\n\tby perceval.ideasonboard.com (Postfix) with ESMTPSA id BC2615A1;\n\tFri, 27 Aug 2021 10:02:34 +0200 (CEST)" ], "Authentication-Results": "lancelot.ideasonboard.com;\n\tdkim=fail reason=\"signature verification failed\" (1024-bit key;\n\tunprotected) header.d=ideasonboard.com header.i=@ideasonboard.com\n\theader.b=\"EkobqDm4\"; dkim-atps=neutral", "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/simple; d=ideasonboard.com;\n\ts=mail; t=1630051354;\n\tbh=AfXQ1+rt2Z0+KV0SZ9nHLOlbGSw6IL7JSbdkqFNVOKs=;\n\th=From:To:Cc:Subject:Date:In-Reply-To:References:From;\n\tb=EkobqDm4efPEbXX1t5XdqGF0DJcmkY1YosVGU9AanZUqJOGkgeKGD8CrBC+HMolSq\n\t1erevsylSwG3icnAhncS0rBKlTvtyZDvKafC68g7lzGVso44ED9nxw5fwTmtKFCXC1\n\t1s9Rqmev8gaLWz1I5M3CqO6yf9pINyiDhBUDvIoo=", "From": "Jean-Michel Hautbois <jeanmichel.hautbois@ideasonboard.com>", "To": "libcamera-devel@lists.libcamera.org", "Date": "Fri, 27 Aug 2021 10:02:27 +0200", "Message-Id": "<20210827080227.26370-5-jeanmichel.hautbois@ideasonboard.com>", "X-Mailer": "git-send-email 2.30.2", "In-Reply-To": "<20210827080227.26370-1-jeanmichel.hautbois@ideasonboard.com>", "References": "<20210827080227.26370-1-jeanmichel.hautbois@ideasonboard.com>", "MIME-Version": "1.0", "Content-Transfer-Encoding": "8bit", "Subject": "[libcamera-devel] [PATCH v2 4/4] ipa: ipu3: Introduce a new AGC\n\talgorithm", "X-BeenThere": "libcamera-devel@lists.libcamera.org", "X-Mailman-Version": "2.1.29", "Precedence": "list", "List-Id": "<libcamera-devel.lists.libcamera.org>", "List-Unsubscribe": "<https://lists.libcamera.org/options/libcamera-devel>,\n\t<mailto:libcamera-devel-request@lists.libcamera.org?subject=unsubscribe>", "List-Archive": "<https://lists.libcamera.org/pipermail/libcamera-devel/>", "List-Post": "<mailto:libcamera-devel@lists.libcamera.org>", "List-Help": "<mailto:libcamera-devel-request@lists.libcamera.org?subject=help>", "List-Subscribe": "<https://lists.libcamera.org/listinfo/libcamera-devel>,\n\t<mailto:libcamera-devel-request@lists.libcamera.org?subject=subscribe>", "Errors-To": "libcamera-devel-bounces@lists.libcamera.org", "Sender": "\"libcamera-devel\" <libcamera-devel-bounces@lists.libcamera.org>" }, "content": "The algorithm used until then is a simple one, let's introduce a new\none, based on the one used by the Raspberry Pi code. We can keep both\ncompiled, and chose to instanciate only one, which demonstrates the\nmodularity and ease to add functionnalities to the IPA.\n\nThis algorithm uses the IPAFrameContext to get the latest AWB gains\napplied and use them to estimate the next shutter time and gain values\nto set.\n\nFor the moment it is not activated as the default algorithm as it may be\na bit unstable. More testing is need ;-).\n\nSigned-off-by: Jean-Michel Hautbois <jeanmichel.hautbois@ideasonboard.com>\n---\n src/ipa/ipu3/algorithms/agc_metering.cpp | 427 +++++++++++++++++++++++\n src/ipa/ipu3/algorithms/agc_metering.h | 78 +++++\n src/ipa/ipu3/algorithms/meson.build | 1 +\n src/ipa/ipu3/ipa_context.h | 6 +\n src/ipa/ipu3/ipu3.cpp | 8 +\n 5 files changed, 520 insertions(+)\n create mode 100644 src/ipa/ipu3/algorithms/agc_metering.cpp\n create mode 100644 src/ipa/ipu3/algorithms/agc_metering.h", "diff": "diff --git a/src/ipa/ipu3/algorithms/agc_metering.cpp b/src/ipa/ipu3/algorithms/agc_metering.cpp\nnew file mode 100644\nindex 00000000..1dc05082\n--- /dev/null\n+++ b/src/ipa/ipu3/algorithms/agc_metering.cpp\n@@ -0,0 +1,427 @@\n+/* SPDX-License-Identifier: BSD-2-Clause */\n+/*\n+ * Based on the implementation from the Raspberry Pi IPA,\n+ * Copyright (C) 2019-2021, Raspberry Pi (Trading) Ltd.\n+ * Copyright (C) 2021, Google inc.\n+ *\n+ * agc_metering.cpp - AGC/AEC metering-based control algorithm\n+ */\n+\n+#include \"agc_metering.h\"\n+#include \"awb.h\"\n+\n+#include <algorithm>\n+#include <cmath>\n+#include <numeric>\n+#include <stdint.h>\n+\n+#include <linux/v4l2-controls.h>\n+\n+#include <libcamera/base/log.h>\n+#include <libcamera/base/utils.h>\n+\n+#include \"libipa/histogram.h\"\n+\n+/**\n+ * \\file agc_metering.h\n+ */\n+\n+namespace libcamera {\n+\n+using namespace std::literals::chrono_literals;\n+\n+namespace ipa::ipu3::algorithms {\n+\n+/**\n+ * \\class AgcMetering\n+ * \\brief The class to use the metering-based auto-exposure algorithm\n+ *\n+ * The metering-based algorithm is calculating an exposure and gain value such\n+ * as a given quantity of pixels lie in the top 2% of the histogram. The AWB\n+ * gains are also used here, and all cells in the grid are weighted using a\n+ * specific metering matrix. The default here is Spot metering.\n+ */\n+\n+LOG_DEFINE_CATEGORY(IPU3AgcMetering)\n+\n+/* Histogram constants */\n+static constexpr uint32_t knumHistogramBins = 256;\n+\n+/* seems to be a 10-bit pipeline */\n+static constexpr uint8_t kPipelineBits = 10;\n+\n+/* width of the AGC stats grid */\n+static constexpr uint32_t kAgcStatsSizeX = 7;\n+/* height of the AGC stats grid */\n+static constexpr uint32_t kAgcStatsSizeY = 5;\n+/* size of the AGC stats grid */\n+static constexpr uint32_t kAgcStatsSize = kAgcStatsSizeX * kAgcStatsSizeY;\n+\n+/**\n+ * The AGC algorithm uses region-based metering.\n+ * The image is divided up into regions as:\n+ *\n+ *\t+--+--------------+--+\n+ *\t|11| 9 |12|\n+ *\t+--+--+--------+--+--+\n+ *\t| | | 3 | | |\n+ *\t| | +--+--+--+ | |\n+ *\t|7 |5 |1 |0 |2 |6 |8 |\n+ *\t| | +--+--+--+ | |\n+ *\t| | | 4 | | |\n+ *\t+--+--+--------+--+--+\n+ *\t|13| 10 |14|\n+ *\t+--+--------------+--+\n+ *\n+ * The metering-based algorithm is calculating an exposure and gain value such\n+ * as a given quantity of weighted pixels lie in the top 2% of the histogram.The\n+ * AWB gains applied are also used to estimate the total gain to apply.\n+ *\n+ * An average luminance value for the image is calculated according to:\n+ * \\f$Y = \\frac{\\sum_{i=0}^{i=kNumAgcWeightedZones}{kCenteredWeights_{i}Y_{i}}}\n+ * {\\sum_{i=0}^{i=kNumAgcWeightedZones}{w_{i}}}\\f$\n+ */\n+\n+/* Weight applied on each region */\n+static constexpr double kSpotWeights[kNumAgcWeightedZones] = { 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };\n+\n+/* Region number repartition in the image */\n+static constexpr uint32_t kAgcStatsRegions[kAgcStatsSize] = {\n+\t11, 9, 9, 9, 9, 9, 12,\n+\t 7, 5, 3, 3, 3, 6, 8,\n+\t 7, 5, 1, 0, 2, 6, 8,\n+\t 7, 5, 4, 4, 4, 6, 8,\n+\t13, 10, 10, 10, 10, 10, 14\n+};\n+\n+/* Limit the speed of change between two exposure levels */\n+static constexpr double kFastReduceThreshold = 0.3;\n+\n+AgcMetering::AgcMetering()\n+\t: iqMean_(0.0), prevExposure_(0s), prevExposureNoDg_(0s),\n+\t currentExposure_(0s), currentExposureNoDg_(0s), currentShutter_(1.0s),\n+\t currentAnalogueGain_(1.0)\n+{\n+}\n+\n+/**\n+ * \\brief Configure the AGC given a configInfo\n+ * \\param[in] context The shared IPA context\n+ * \\param[in] configInfo The IPA configuration data, received from the pipeline\n+ * handler\n+ *\n+ * \\return 0\n+ */\n+int AgcMetering::configure(IPAContext &context, const IPAConfigInfo &configInfo)\n+{\n+\t/* Store the line duration in the IPASessionConfiguration */\n+\tcontext.configuration.agc.lineDuration = configInfo.sensorInfo.lineLength\n+\t\t\t\t\t * (1.0s / configInfo.sensorInfo.pixelRate);\n+\n+\t/* \\todo: those values need to be extracted from a configuration file */\n+\tshutterConstraints_.push_back(100us);\n+\tshutterConstraints_.push_back(10ms);\n+\tshutterConstraints_.push_back(33ms);\n+\tgainConstraints_.push_back(1.0);\n+\tgainConstraints_.push_back(4.0);\n+\tgainConstraints_.push_back(16.0);\n+\n+\tfixedShutter_ = 0s;\n+\tfixedAnalogueGain_ = 0.0;\n+\n+\treturn 0;\n+}\n+\n+/**\n+ * \\brief Translate the IPU3 statistics to AGC regions\n+ * \\param[in] stats The statistics buffer coming from the pipeline handler\n+ * \\param[in] grid The grid used to store the statistics in the IPU3\n+ */\n+void AgcMetering::generateStats(const ipu3_uapi_stats_3a *stats,\n+\t\t\t\tconst ipu3_uapi_grid_config &grid)\n+{\n+\t/* We need to have a AGC grid of kAgcStatsSizeX * kAgcStatsSizeY */\n+\tuint32_t regionWidth = round(grid.width / static_cast<double>(kAgcStatsSizeX));\n+\tuint32_t regionHeight = round(grid.height / static_cast<double>(kAgcStatsSizeY));\n+\tuint32_t hist[knumHistogramBins] = { 0 };\n+\n+\t/* Clear the statistics of the previous frame */\n+\tfor (unsigned int i = 0; i < kNumAgcWeightedZones; i++) {\n+\t\tagcStats_[i].bSum = 0;\n+\t\tagcStats_[i].rSum = 0;\n+\t\tagcStats_[i].gSum = 0;\n+\t\tagcStats_[i].counted = 0;\n+\t\tagcStats_[i].total = 0;\n+\t}\n+\n+\tLOG(IPU3AgcMetering, Debug) << \"[\" << (int)grid.width\n+\t\t\t\t << \"x\" << (int)grid.height << \"] cells\"\n+\t\t\t\t << \" scaled to [\" << regionWidth\n+\t\t\t\t << \"x\" << regionHeight << \"] AGC regions\";\n+\n+\t/*\n+\t * Generate a (kAgcStatsSizeX x kAgcStatsSizeY) array from the IPU3 grid\n+\t * which is (grid.width x grid.height).\n+\t */\n+\tfor (unsigned int j = 0; j < kAgcStatsSizeY * regionHeight; j++) {\n+\t\tfor (unsigned int i = 0; i < kAgcStatsSizeX * regionWidth; i++) {\n+\t\t\tuint32_t cellPosition = j * grid.width + i;\n+\t\t\tuint32_t cellX = (cellPosition / regionWidth)\n+\t\t\t\t % kAgcStatsSizeX;\n+\t\t\tuint32_t cellY = ((cellPosition / grid.width) / regionHeight)\n+\t\t\t\t % kAgcStatsSizeY;\n+\n+\t\t\tuint32_t agcRegionPosition = kAgcStatsRegions[cellY * kAgcStatsSizeX + cellX];\n+\t\t\tweights_[agcRegionPosition] = kSpotWeights[agcRegionPosition];\n+\t\t\tcellPosition *= sizeof(Ipu3AwbCell);\n+\n+\t\t\t/* Cast the initial IPU3 structure to simplify the reading */\n+\t\t\tIpu3AwbCell *currentCell = reinterpret_cast<Ipu3AwbCell *>(const_cast<uint8_t *>(&stats->awb_raw_buffer.meta_data[cellPosition]));\n+\t\t\tif (currentCell->satRatio == 0) {\n+\t\t\t\t/* The cell is not saturated, use the current cell */\n+\t\t\t\tagcStats_[agcRegionPosition].counted++;\n+\t\t\t\tuint32_t greenValue = currentCell->greenRedAvg + currentCell->greenBlueAvg;\n+\t\t\t\thist[greenValue / 2]++;\n+\t\t\t\tagcStats_[agcRegionPosition].gSum += greenValue / 2;\n+\t\t\t\tagcStats_[agcRegionPosition].rSum += currentCell->redAvg;\n+\t\t\t\tagcStats_[agcRegionPosition].bSum += currentCell->blueAvg;\n+\t\t\t}\n+\t\t}\n+\t}\n+\n+\t/* Estimate the quantile mean of the top 2% of the histogram */\n+\tiqMean_ = Histogram(Span<uint32_t>(hist)).interQuantileMean(0.98, 1.0);\n+}\n+\n+/**\n+ * \\brief Apply a filter on the exposure value to limit the speed of changes\n+ */\n+void AgcMetering::filterExposure()\n+{\n+\tdouble speed = 0.08;\n+\tif (prevExposure_ == 0s) {\n+\t\t/* DG stands for digital gain.*/\n+\t\tprevExposure_ = currentExposure_;\n+\t\tprevExposureNoDg_ = currentExposureNoDg_;\n+\t} else {\n+\t\t/*\n+\t\t * If we are close to the desired result, go faster to avoid\n+\t\t * making multiple micro-adjustments.\n+\t\t * \\todo: Make this customisable?\n+\t\t */\n+\t\tif (prevExposure_ < 1.2 * currentExposure_ &&\n+\t\t prevExposure_ > 0.8 * currentExposure_)\n+\t\t\tspeed = sqrt(speed);\n+\n+\t\tprevExposure_ = speed * currentExposure_ +\n+\t\t\t\tprevExposure_ * (1.0 - speed);\n+\t\tprevExposureNoDg_ = speed * currentExposureNoDg_ +\n+\t\t\t\tprevExposureNoDg_ * (1.0 - speed);\n+\t}\n+\t/*\n+\t * We can't let the no_dg exposure deviate too far below the\n+\t * total exposure, as there might not be enough digital gain available\n+\t * in the ISP to hide it (which will cause nasty oscillation).\n+\t * \\todo: add the support for digital gain\n+\t */\n+\tif (prevExposureNoDg_ <\n+\t prevExposure_ * kFastReduceThreshold)\n+\t\tprevExposureNoDg_ = prevExposure_ * kFastReduceThreshold;\n+\tLOG(IPU3AgcMetering, Debug) << \"After filtering, total_exposure \" << prevExposure_;\n+}\n+\n+/**\n+ * \\brief Estimate the weighted brightness\n+ * \\param[in] gain The current gain applied\n+ * \\param[in] context The shared IPA context\n+ */\n+double AgcMetering::computeInitialY(double gain, IPAContext &context)\n+{\n+\t/*\n+\t * Note how the calculation below means that equal weights_ give you\n+\t * \"average\" metering (i.e. all pixels equally important).\n+\t */\n+\tdouble redSum = 0, greenSum = 0, blueSum = 0, pixelSum = 0;\n+\tfor (unsigned int i = 0; i < kNumAgcWeightedZones; i++) {\n+\t\t/* We will exclude the saturated pixels from the sum */\n+\t\tdouble counted = agcStats_[i].counted;\n+\t\tdouble rSum = std::min(agcStats_[i].rSum * gain, ((1 << kPipelineBits) - 1) * counted);\n+\t\tdouble gSum = std::min(agcStats_[i].gSum * gain, ((1 << kPipelineBits) - 1) * counted);\n+\t\tdouble bSum = std::min(agcStats_[i].bSum * gain, ((1 << kPipelineBits) - 1) * counted);\n+\t\t/* Weight each channel with the selected metering method */\n+\t\tredSum += rSum * weights_[i];\n+\t\tgreenSum += gSum * weights_[i];\n+\t\tblueSum += bSum * weights_[i];\n+\t\tpixelSum += counted * weights_[i];\n+\t}\n+\t/* We don't want to have a division by 0.0 :-) */\n+\tif (pixelSum == 0.0) {\n+\t\tLOG(IPU3AgcMetering, Warning) << \"computeInitialY: pixel_sum is zero\";\n+\t\treturn 0;\n+\t}\n+\t/*\n+\t * Estimate the sum of the brightness values, weighted with the gains\n+\t * applied on the channels in AWB.\n+\t */\n+\tdouble Y_sum = redSum * context.frameContext.awb.gains.red * .299 +\n+\t\t greenSum * context.frameContext.awb.gains.green * .587 +\n+\t\t blueSum * context.frameContext.awb.gains.blue * .114;\n+\n+\t/* And return the average brightness */\n+\treturn Y_sum / pixelSum / (1 << kPipelineBits);\n+}\n+\n+/**\n+ * \\brief Compute the exposure value\n+ * \\param[in] gain The current gain applied\n+ */\n+void AgcMetering::computeTargetExposure(double gain)\n+{\n+\tcurrentExposure_ = currentExposureNoDg_ * gain;\n+\t/* \\todo: have a list of shutter speeds */\n+\tDuration maxShutterSpeed = shutterConstraints_.back();\n+\tDuration maxTotalExposure = maxShutterSpeed * gainConstraints_.back();\n+\n+\tcurrentExposure_ = std::min(currentExposure_, maxTotalExposure);\n+\tLOG(IPU3AgcMetering, Debug) << \"Target total_exposure \" << currentExposure_;\n+}\n+\n+/**\n+ * \\brief Split exposure value as shutter time and gain\n+ */\n+void AgcMetering::divideUpExposure()\n+{\n+\tDuration exposureValue = prevExposure_;\n+\tDuration shutterTime;\n+\tdouble analogueGain;\n+\tshutterTime = shutterConstraints_[0];\n+\tshutterTime = std::min(shutterTime, shutterConstraints_.back());\n+\tanalogueGain = gainConstraints_[0];\n+\n+\t/**\n+\t * We have an exposure profile with a list of shutter time and gains\n+\t * An example is graphed below:\n+\t *\n+\t * gain shutter time\n+\t * \t\t\t\t\t\t\t\t (ms)\n+\t * ^ ^\n+\t * | |\n+\t * 8x+---------------------------------------------------------xxxxx+30\n+\t * | xxxxx |\n+\t * | xxxxx |\n+\t * | xxxxx |\n+\t * | xxxxx |\n+\t * | xxxxx |\n+\t * | xxxxx |\n+\t * | xxxxx |\n+\t * | xxxxx |\n+\t * | xxxxx |\n+\t * 4x+----------xx--------------------------------------------------+10\n+\t * | xx |\n+\t * | xx |\n+\t * | xx |\n+\t * | xx |\n+\t * | xx |\n+\t * 1x+--xx----------------------------------------------------------+0.1\n+\t * | x |\n+\t * |x |\n+\t * +--------------------------------------------------------------->\n+\t *\t\t\t\ttotal exposure\n+\t */\n+\tif (shutterTime * analogueGain < exposureValue) {\n+\t\tfor (unsigned int stage = 1;\n+\t\t stage < gainConstraints_.size(); stage++) {\n+\t\t\tif (fixedShutter_ == 0s) {\n+\t\t\t\tDuration stageShutter =\n+\t\t\t\t\tstd::min(shutterConstraints_[stage], shutterConstraints_.back());\n+\t\t\t\tif (stageShutter * analogueGain >=\n+\t\t\t\t exposureValue) {\n+\t\t\t\t\tshutterTime =\n+\t\t\t\t\t\texposureValue / analogueGain;\n+\t\t\t\t\tbreak;\n+\t\t\t\t}\n+\t\t\t\tshutterTime = stageShutter;\n+\t\t\t}\n+\t\t\tif (fixedAnalogueGain_ == 0.0) {\n+\t\t\t\tif (gainConstraints_[stage] * shutterTime >= exposureValue) {\n+\t\t\t\t\tanalogueGain = exposureValue / shutterTime;\n+\t\t\t\t\tbreak;\n+\t\t\t\t}\n+\t\t\t\tanalogueGain = gainConstraints_[stage];\n+\t\t\t}\n+\t\t}\n+\t}\n+\tLOG(IPU3AgcMetering, Debug) << \"Divided up shutter and gain are \"\n+\t\t\t\t << shutterTime << \" and \" << analogueGain;\n+\n+\t/* \\todo: flickering avoidance ? */\n+\tfilteredShutter_ = shutterTime;\n+\tfilteredAnalogueGain_ = analogueGain;\n+}\n+\n+/**\n+ * \\brief Calculate the gain for the target value to be in the top 2% of the\n+ * \t histogram\n+ * \\param[in] currentGain The current gain applied\n+ * \\param[in] context The shared IPA context\n+ */\n+void AgcMetering::computeGain(double ¤tGain, IPAContext &context)\n+{\n+\tcurrentGain = 1.0;\n+\t/* \\todo: the target Y needs to be grabbed from a configuration */\n+\tdouble targetY = 0.162;\n+\tfor (int i = 0; i < 8; i++) {\n+\t\tdouble initialY = computeInitialY(currentGain, context);\n+\t\tdouble extra_gain = std::min(10.0, targetY / (initialY + .001));\n+\n+\t\tcurrentGain *= extra_gain;\n+\t\tLOG(IPU3AgcMetering, Debug) << \"Initial Y \" << initialY\n+\t\t\t\t\t << \" target \" << targetY\n+\t\t\t\t\t << \" gives gain \" << currentGain;\n+\t\tif (extra_gain < 1.01)\n+\t\t\tbreak;\n+\t}\n+\n+\t/*\n+\t * Require the top 2% of pixels to lie at or below 0.5 in the pixel\n+\t * range (for a range from 0 to 255, it is 205). This lowers the\n+\t * exposure to stop pixels saturating.\n+\t */\n+\tdouble newGain = (0.5 * knumHistogramBins) / iqMean_;\n+\tLOG(IPU3AgcMetering, Debug) << \"gain: \" << currentGain\n+\t\t\t\t << \" new gain: \" << newGain;\n+\tif (newGain > currentGain)\n+\t\tcurrentGain = newGain;\n+}\n+\n+/**\n+ * \\brief Process IPU3 statistics, and run AGC operations\n+ * \\param[in] context The shared IPA context\n+ * \\param[in] stats The IPU3 statistics and ISP results\n+ */\n+void AgcMetering::process(IPAContext &context, const ipu3_uapi_stats_3a *stats)\n+{\n+\tASSERT(stats->stats_3a_status.awb_en);\n+\tgenerateStats(stats, context.configuration.grid.bdsGrid);\n+\n+\tcurrentShutter_ = context.frameContext.agc.exposure\n+\t\t\t* context.configuration.agc.lineDuration;\n+\tcurrentAnalogueGain_ = context.frameContext.agc.gain;\n+\n+\t/* Estimate the current exposure value */\n+\tcurrentExposureNoDg_ = currentShutter_ * currentAnalogueGain_;\n+\n+\tdouble currentGain = 1.0;\n+\tcomputeGain(currentGain, context);\n+\tcomputeTargetExposure(currentGain);\n+\tfilterExposure();\n+\tdivideUpExposure();\n+\n+\tcontext.frameContext.agc.exposure = filteredShutter_\n+\t\t\t\t\t / context.configuration.agc.lineDuration;\n+\tcontext.frameContext.agc.gain = filteredAnalogueGain_;\n+}\n+\n+} /* namespace ipa::ipu3::algorithms */\n+\n+} /* namespace libcamera */\ndiff --git a/src/ipa/ipu3/algorithms/agc_metering.h b/src/ipa/ipu3/algorithms/agc_metering.h\nnew file mode 100644\nindex 00000000..4fd603e1\n--- /dev/null\n+++ b/src/ipa/ipu3/algorithms/agc_metering.h\n@@ -0,0 +1,78 @@\n+/* SPDX-License-Identifier: LGPL-2.1-or-later */\n+/*\n+ * Based on the implementation from the Raspberry Pi IPA,\n+ * Copyright (C) 2019-2021, Raspberry Pi (Trading) Ltd.\n+ * Copyright (C) 2021, Ideas On Board\n+ *\n+ * agc_metering.h - IPU3 AGC/AEC control algorithm\n+ */\n+#ifndef __LIBCAMERA_IPU3_AGC_H__\n+#define __LIBCAMERA_IPU3_AGC_H__\n+\n+#include <linux/intel-ipu3.h>\n+\n+#include <libcamera/base/utils.h>\n+\n+#include <libcamera/geometry.h>\n+\n+#include \"algorithm.h\"\n+#include \"awb.h\"\n+\n+namespace libcamera {\n+\n+struct IPACameraSensorInfo;\n+\n+namespace ipa::ipu3::algorithms {\n+\n+using utils::Duration;\n+\n+/* Number of weighted zones for metering */\n+static constexpr uint32_t kNumAgcWeightedZones = 15;\n+\n+class AgcMetering : public Algorithm\n+{\n+public:\n+\tAgcMetering();\n+\t~AgcMetering() = default;\n+\n+\tint configure(IPAContext &context, const IPAConfigInfo &configInfo) override;\n+\tvoid process(IPAContext &context, const ipu3_uapi_stats_3a *stats) override;\n+\n+private:\n+\tvoid processBrightness(const ipu3_uapi_stats_3a *stats);\n+\tvoid filterExposure();\n+\tvoid lockExposureGain(uint32_t &exposure, double &gain);\n+\tvoid generateStats(const ipu3_uapi_stats_3a *stats,\n+\t\t\t const ipu3_uapi_grid_config &grid);\n+\tvoid generateZones(std::vector<RGB> &zones);\n+\tdouble computeInitialY(double gain, IPAContext &context);\n+\tvoid computeTargetExposure(double currentGain);\n+\tvoid divideUpExposure();\n+\tvoid computeGain(double ¤tGain, IPAContext &context);\n+\n+\tdouble weights_[kNumAgcWeightedZones];\n+\tstruct Accumulator agcStats_[kNumAgcWeightedZones];\n+\n+\tdouble iqMean_;\n+\n+\tDuration prevExposure_;\n+\tDuration prevExposureNoDg_;\n+\tDuration currentExposure_;\n+\tDuration currentExposureNoDg_;\n+\n+\tDuration currentShutter_;\n+\tstd::vector<Duration> shutterConstraints_;\n+\tDuration fixedShutter_;\n+\tDuration filteredShutter_;\n+\n+\tdouble currentAnalogueGain_;\n+\tstd::vector<double> gainConstraints_;\n+\tdouble fixedAnalogueGain_;\n+\tdouble filteredAnalogueGain_;\n+};\n+\n+} /* namespace ipa::ipu3::algorithms */\n+\n+} /* namespace libcamera */\n+\n+#endif /* __LIBCAMERA_IPU3_AGC_H__ */\ndiff --git a/src/ipa/ipu3/algorithms/meson.build b/src/ipa/ipu3/algorithms/meson.build\nindex 807b53ea..f31b2070 100644\n--- a/src/ipa/ipu3/algorithms/meson.build\n+++ b/src/ipa/ipu3/algorithms/meson.build\n@@ -2,6 +2,7 @@\n \n ipu3_ipa_algorithms = files([\n 'agc_mean.cpp',\n+ 'agc_metering.cpp',\n 'algorithm.cpp',\n 'awb.cpp',\n 'tone_mapping.cpp',\ndiff --git a/src/ipa/ipu3/ipa_context.h b/src/ipa/ipu3/ipa_context.h\nindex 3a292ad7..190a3468 100644\n--- a/src/ipa/ipu3/ipa_context.h\n+++ b/src/ipa/ipu3/ipa_context.h\n@@ -10,6 +10,8 @@\n \n #include <linux/intel-ipu3.h>\n \n+#include <libcamera/base/utils.h>\n+\n #include <libcamera/geometry.h>\n \n namespace libcamera {\n@@ -17,6 +19,10 @@ namespace libcamera {\n namespace ipa::ipu3 {\n \n struct IPASessionConfiguration {\n+\tstruct {\n+\t\tutils::Duration lineDuration;\n+\t} agc;\n+\n \tstruct {\n \t\tipu3_uapi_grid_config bdsGrid;\n \t\tSize bdsOutputSize;\ndiff --git a/src/ipa/ipu3/ipu3.cpp b/src/ipa/ipu3/ipu3.cpp\nindex 6332fc06..be4a082a 100644\n--- a/src/ipa/ipu3/ipu3.cpp\n+++ b/src/ipa/ipu3/ipu3.cpp\n@@ -81,6 +81,14 @@\n * are run. This needs to be turned into real per-frame data storage.\n */\n \n+/**\n+ * \\struct IPASessionConfiguration::agc\n+ * \\brief AGC configuration of the IPA\n+ *\n+ * \\var IPASessionConfiguration::agc::lineDuration\n+ * \\brief Duration of one line dependant on the sensor configuration\n+ */\n+\n /**\n * \\struct IPASessionConfiguration::grid\n * \\brief Grid configuration of the IPA\n", "prefixes": [ "libcamera-devel", "v2", "4/4" ] }