[libcamera-devel,v7,09/11] libcamera: pipeline: Don't rely on bufferCount
diff mbox series

Message ID 20210722232851.747614-10-nfraprado@collabora.com
State Superseded
Headers show
Series
  • lc-compliance: Add test to queue more requests than hardware depth
Related show

Commit Message

Nícolas F. R. A. Prado July 22, 2021, 11:28 p.m. UTC
Pipelines have relied on bufferCount to decide on the number of buffers
to allocate internally through allocateBuffers() and on the number of
V4L2 buffer slots to reserve through importBuffers(). Instead, the
number of internal buffers should be the minimum required by the
algorithms to avoid wasting memory, and the number of V4L2 buffer slots
should overallocate to avoid thrashing dmabuf mappings.

For now, just set them to constants and stop relying on bufferCount, to
allow for its removal.

Signed-off-by: Nícolas F. R. A. Prado <nfraprado@collabora.com>
---

No changes in v7

Changes in v6:
- Added pipeline name as prefix to each BUFFER_SLOT_COUNT and
  INTERNAL_BUFFER_COUNT constant

 src/libcamera/pipeline/ipu3/imgu.cpp              | 12 ++++++------
 src/libcamera/pipeline/ipu3/imgu.h                |  5 ++++-
 src/libcamera/pipeline/ipu3/ipu3.cpp              |  9 +--------
 .../pipeline/raspberrypi/raspberrypi.cpp          | 15 +++++----------
 src/libcamera/pipeline/rkisp1/rkisp1.cpp          |  9 ++-------
 src/libcamera/pipeline/rkisp1/rkisp1_path.cpp     |  2 +-
 src/libcamera/pipeline/rkisp1/rkisp1_path.h       |  3 +++
 src/libcamera/pipeline/simple/converter.cpp       |  4 ++--
 src/libcamera/pipeline/simple/converter.h         |  3 +++
 src/libcamera/pipeline/simple/simple.cpp          |  6 ++----
 src/libcamera/pipeline/uvcvideo/uvcvideo.cpp      |  5 +++--
 src/libcamera/pipeline/vimc/vimc.cpp              |  5 +++--
 12 files changed, 35 insertions(+), 43 deletions(-)

Comments

Laurent Pinchart Aug. 1, 2021, 11:42 p.m. UTC | #1
Hi Nícolas,

Thank you for the patch.

On Thu, Jul 22, 2021 at 08:28:49PM -0300, Nícolas F. R. A. Prado wrote:
> Pipelines have relied on bufferCount to decide on the number of buffers
> to allocate internally through allocateBuffers() and on the number of
> V4L2 buffer slots to reserve through importBuffers(). Instead, the
> number of internal buffers should be the minimum required by the
> algorithms to avoid wasting memory, and the number of V4L2 buffer slots
> should overallocate to avoid thrashing dmabuf mappings.
> 
> For now, just set them to constants and stop relying on bufferCount, to
> allow for its removal.
> 
> Signed-off-by: Nícolas F. R. A. Prado <nfraprado@collabora.com>
> ---
> 
> No changes in v7
> 
> Changes in v6:
> - Added pipeline name as prefix to each BUFFER_SLOT_COUNT and
>   INTERNAL_BUFFER_COUNT constant
> 
>  src/libcamera/pipeline/ipu3/imgu.cpp              | 12 ++++++------
>  src/libcamera/pipeline/ipu3/imgu.h                |  5 ++++-
>  src/libcamera/pipeline/ipu3/ipu3.cpp              |  9 +--------
>  .../pipeline/raspberrypi/raspberrypi.cpp          | 15 +++++----------
>  src/libcamera/pipeline/rkisp1/rkisp1.cpp          |  9 ++-------
>  src/libcamera/pipeline/rkisp1/rkisp1_path.cpp     |  2 +-
>  src/libcamera/pipeline/rkisp1/rkisp1_path.h       |  3 +++
>  src/libcamera/pipeline/simple/converter.cpp       |  4 ++--
>  src/libcamera/pipeline/simple/converter.h         |  3 +++
>  src/libcamera/pipeline/simple/simple.cpp          |  6 ++----
>  src/libcamera/pipeline/uvcvideo/uvcvideo.cpp      |  5 +++--
>  src/libcamera/pipeline/vimc/vimc.cpp              |  5 +++--
>  12 files changed, 35 insertions(+), 43 deletions(-)

Given that some of the pipeline handlers will need more intrusive
changes to address the comments below, you could split this with one
patch per pipeline handler (or perhaps grouping the easy ones together).

> 
> diff --git a/src/libcamera/pipeline/ipu3/imgu.cpp b/src/libcamera/pipeline/ipu3/imgu.cpp
> index e955bc3456ba..f36e99dacbe7 100644
> --- a/src/libcamera/pipeline/ipu3/imgu.cpp
> +++ b/src/libcamera/pipeline/ipu3/imgu.cpp
> @@ -593,22 +593,22 @@ int ImgUDevice::configureVideoDevice(V4L2VideoDevice *dev, unsigned int pad,
>  /**
>   * \brief Allocate buffers for all the ImgU video devices
>   */
> -int ImgUDevice::allocateBuffers(unsigned int bufferCount)
> +int ImgUDevice::allocateBuffers()
>  {
>  	/* Share buffers between CIO2 output and ImgU input. */
> -	int ret = input_->importBuffers(bufferCount);
> +	int ret = input_->importBuffers(IPU3_BUFFER_SLOT_COUNT);
>  	if (ret) {
>  		LOG(IPU3, Error) << "Failed to import ImgU input buffers";
>  		return ret;
>  	}
>  
> -	ret = param_->allocateBuffers(bufferCount, &paramBuffers_);
> +	ret = param_->allocateBuffers(IPU3_INTERNAL_BUFFER_COUNT, &paramBuffers_);
>  	if (ret < 0) {
>  		LOG(IPU3, Error) << "Failed to allocate ImgU param buffers";
>  		goto error;
>  	}
>  
> -	ret = stat_->allocateBuffers(bufferCount, &statBuffers_);
> +	ret = stat_->allocateBuffers(IPU3_INTERNAL_BUFFER_COUNT, &statBuffers_);
>  	if (ret < 0) {
>  		LOG(IPU3, Error) << "Failed to allocate ImgU stat buffers";
>  		goto error;
> @@ -619,13 +619,13 @@ int ImgUDevice::allocateBuffers(unsigned int bufferCount)
>  	 * corresponding stream is active or inactive, as the driver needs
>  	 * buffers to be requested on the V4L2 devices in order to operate.
>  	 */
> -	ret = output_->importBuffers(bufferCount);
> +	ret = output_->importBuffers(IPU3_BUFFER_SLOT_COUNT);
>  	if (ret < 0) {
>  		LOG(IPU3, Error) << "Failed to import ImgU output buffers";
>  		goto error;
>  	}
>  
> -	ret = viewfinder_->importBuffers(bufferCount);
> +	ret = viewfinder_->importBuffers(IPU3_BUFFER_SLOT_COUNT);
>  	if (ret < 0) {
>  		LOG(IPU3, Error) << "Failed to import ImgU viewfinder buffers";
>  		goto error;
> diff --git a/src/libcamera/pipeline/ipu3/imgu.h b/src/libcamera/pipeline/ipu3/imgu.h
> index 9d4915116087..f934a951fc75 100644
> --- a/src/libcamera/pipeline/ipu3/imgu.h
> +++ b/src/libcamera/pipeline/ipu3/imgu.h
> @@ -61,7 +61,7 @@ public:
>  					    outputFormat);
>  	}
>  
> -	int allocateBuffers(unsigned int bufferCount);
> +	int allocateBuffers();
>  	void freeBuffers();
>  
>  	int start();
> @@ -86,6 +86,9 @@ private:
>  	static constexpr unsigned int PAD_VF = 3;
>  	static constexpr unsigned int PAD_STAT = 4;
>  
> +	static constexpr unsigned int IPU3_INTERNAL_BUFFER_COUNT = 4;
> +	static constexpr unsigned int IPU3_BUFFER_SLOT_COUNT = 5;

5 buffer slots is low. It means that if applications cycle more than 5
buffers, the V4L2VideoDevice cache that maintains associations between
dmabufs and buffer slots will the trashed. Due to the internal queue of
requests in the IPU3 pipeline handler (similar to what you have
implemented in "[PATCH 0/3] libcamera: pipeline: Add internal request
queue" for other pipeline handlers), we won't fail at queuing requests,
but performance will suffer. I thus think we need to increase the number
of slots to what applications can be reasonably expected to use. We
could use 8, or even 16, as buffer slots are cheap. The same holds for
other pipeline handlers.

The number of slots for the CIO2 output should match the number of
buffer slots for the ImgU input, as the same buffers are used on the two
video devices. One option is to use IPU3_BUFFER_SLOT_COUNT for the CIO2,
instead of CIO2_BUFFER_COUNT. However, the number of internal CIO2
buffers that are allocated by exportBuffers() in CIO2Device::start(), to
be used in case the application doesn't provide any RAW buffer, should
be lower, as those are real buffer and are thus expensive. The number of
buffers and buffer slots on the CIO2 thus needs to be decoupled.

For proper operation, the CIO2 will require at least two queued buffers
(one being DMA'ed to, and one waiting). We need at least one extra
buffer queued to the ImgU to keep buffers flowing. Depending on
processing timings, it may be that the ImgU will complete processing of
its buffer before the CIO2 captures the next one, leading to a temporary
situation where the CIO2 will have three buffers queued, or the CIO2
will finish the capture first, leading to a temporary situation where
the CIO2 will have one buffer queued and the ImgU will have two buffers
queued. In either case, shortly afterwards, the other component will
complete capture or processing, and we'll get back to a situation with
two buffers queued in the CIO2 and one in the ImgU. That's thus a
minimum of three buffers for raw images.

From an ImgU point of view, we could probably get away with a single
parameter and a single stats buffer. This would however not allow
queuing the next frame for processing in the ImgU before the current
frame completes, so two buffers would be better. Now, if we take the IPA
into account, the statistics buffer will spend some time on the IPA side
for processing. It would thus be best to have an extra statistics buffer
to accommodate that, thus requiring three statistics buffers (and three
parameters buffers, as we associate them together).

This rationale leads to using the same number of internal buffers for
the CIO2, the parameters and the statistics. We currently use four, and
while the logic above indicates we could get away with three, it would
be safer to keep using four in this patch, and possibly reduce the
number of buffers later.

I know documentation isn't fun, but I think this rationale should be
captured in a comment in the IPU3 pipeline handler, along with a \todo
item to try and lower the number of internal buffers to three.

> +
>  	int linkSetup(const std::string &source, unsigned int sourcePad,
>  		      const std::string &sink, unsigned int sinkPad,
>  		      bool enable);
> diff --git a/src/libcamera/pipeline/ipu3/ipu3.cpp b/src/libcamera/pipeline/ipu3/ipu3.cpp
> index 5fd1757bfe13..4efd201c05e5 100644
> --- a/src/libcamera/pipeline/ipu3/ipu3.cpp
> +++ b/src/libcamera/pipeline/ipu3/ipu3.cpp
> @@ -681,16 +681,9 @@ int PipelineHandlerIPU3::allocateBuffers(Camera *camera)
>  {
>  	IPU3CameraData *data = cameraData(camera);
>  	ImgUDevice *imgu = data->imgu_;
> -	unsigned int bufferCount;
>  	int ret;
>  
> -	bufferCount = std::max({
> -		data->outStream_.configuration().bufferCount,
> -		data->vfStream_.configuration().bufferCount,
> -		data->rawStream_.configuration().bufferCount,
> -	});
> -
> -	ret = imgu->allocateBuffers(bufferCount);
> +	ret = imgu->allocateBuffers();
>  	if (ret < 0)
>  		return ret;
>  
> diff --git a/src/libcamera/pipeline/raspberrypi/raspberrypi.cpp b/src/libcamera/pipeline/raspberrypi/raspberrypi.cpp
> index d1cd3d9dc082..776e0f92aed1 100644
> --- a/src/libcamera/pipeline/raspberrypi/raspberrypi.cpp
> +++ b/src/libcamera/pipeline/raspberrypi/raspberrypi.cpp
> @@ -1149,20 +1149,15 @@ int PipelineHandlerRPi::prepareBuffers(Camera *camera)
>  {
>  	RPiCameraData *data = cameraData(camera);
>  	int ret;
> +	constexpr unsigned int bufferCount = 4;
>  
>  	/*
> -	 * Decide how many internal buffers to allocate. For now, simply look
> -	 * at how many external buffers will be provided. We'll need to improve
> -	 * this logic. However, we really must have all streams allocate the same
> -	 * number of buffers to simplify error handling in queueRequestDevice().
> +	 * Allocate internal buffers. We really must have all streams allocate
> +	 * the same number of buffers to simplify error handling in
> +	 * queueRequestDevice().
>  	 */
> -	unsigned int maxBuffers = 0;
> -	for (const Stream *s : camera->streams())
> -		if (static_cast<const RPi::Stream *>(s)->isExternal())
> -			maxBuffers = std::max(maxBuffers, s->configuration().bufferCount);
> -
>  	for (auto const stream : data->streams_) {
> -		ret = stream->prepareBuffers(maxBuffers);
> +		ret = stream->prepareBuffers(bufferCount);

We have a similar problem here, 4 buffer slots is too little, but when
the stream has to allocate internal buffers (!importOnly), which is the
case for most streams, we don't want to overallocate.

I'd like to get feedback from Naush here, but I think this means we'll
have to relax the requirement documented in the comment above, and
accept a different number of buffers for each stream.

>  		if (ret < 0)
>  			return ret;
>  	}
> diff --git a/src/libcamera/pipeline/rkisp1/rkisp1.cpp b/src/libcamera/pipeline/rkisp1/rkisp1.cpp
> index 11325875b929..f4ea2fd4d4d0 100644
> --- a/src/libcamera/pipeline/rkisp1/rkisp1.cpp
> +++ b/src/libcamera/pipeline/rkisp1/rkisp1.cpp
> @@ -690,16 +690,11 @@ int PipelineHandlerRkISP1::allocateBuffers(Camera *camera)
>  	unsigned int ipaBufferId = 1;
>  	int ret;
>  
> -	unsigned int maxCount = std::max({
> -		data->mainPathStream_.configuration().bufferCount,
> -		data->selfPathStream_.configuration().bufferCount,
> -	});
> -
> -	ret = param_->allocateBuffers(maxCount, &paramBuffers_);
> +	ret = param_->allocateBuffers(RKISP1_INTERNAL_BUFFER_COUNT, &paramBuffers_);
>  	if (ret < 0)
>  		goto error;
>  
> -	ret = stat_->allocateBuffers(maxCount, &statBuffers_);
> +	ret = stat_->allocateBuffers(RKISP1_INTERNAL_BUFFER_COUNT, &statBuffers_);
>  	if (ret < 0)
>  		goto error;
>  
> diff --git a/src/libcamera/pipeline/rkisp1/rkisp1_path.cpp b/src/libcamera/pipeline/rkisp1/rkisp1_path.cpp
> index 25f482eb8d8e..fea330f72886 100644
> --- a/src/libcamera/pipeline/rkisp1/rkisp1_path.cpp
> +++ b/src/libcamera/pipeline/rkisp1/rkisp1_path.cpp
> @@ -172,7 +172,7 @@ int RkISP1Path::start()
>  		return -EBUSY;
>  
>  	/* \todo Make buffer count user configurable. */
> -	ret = video_->importBuffers(RKISP1_BUFFER_COUNT);
> +	ret = video_->importBuffers(RKISP1_BUFFER_SLOT_COUNT);
>  	if (ret)
>  		return ret;
>  
> diff --git a/src/libcamera/pipeline/rkisp1/rkisp1_path.h b/src/libcamera/pipeline/rkisp1/rkisp1_path.h
> index 91757600ccdc..3c5891009c58 100644
> --- a/src/libcamera/pipeline/rkisp1/rkisp1_path.h
> +++ b/src/libcamera/pipeline/rkisp1/rkisp1_path.h
> @@ -27,6 +27,9 @@ class V4L2Subdevice;
>  struct StreamConfiguration;
>  struct V4L2SubdeviceFormat;
>  
> +static constexpr unsigned int RKISP1_INTERNAL_BUFFER_COUNT = 4;
> +static constexpr unsigned int RKISP1_BUFFER_SLOT_COUNT = 5;

The situation should be simpler for the rkisp1, as it has a different
pipeline model (inline ISP as opposed to offline ISP for the IPU3). We
can allocate more slots (8 or 16, as for other pipeline handlers), and
restrict the number of internal buffers (for stats and parameters) to
the number of requests we expect to queue to the device at once, plus
one for the IPA.  Four thus seems good. Capturing this rationale in a
comment would be good too.

BTW, I may be too tired to think properly, or just unable to see the
obvious, so please challenge any rationale you think is incorrect.

> +
>  class RkISP1Path
>  {
>  public:
> diff --git a/src/libcamera/pipeline/simple/converter.cpp b/src/libcamera/pipeline/simple/converter.cpp
> index b5e34c4cd0c5..b3bcf01483f7 100644
> --- a/src/libcamera/pipeline/simple/converter.cpp
> +++ b/src/libcamera/pipeline/simple/converter.cpp
> @@ -103,11 +103,11 @@ int SimpleConverter::Stream::exportBuffers(unsigned int count,
>  
>  int SimpleConverter::Stream::start()
>  {
> -	int ret = m2m_->output()->importBuffers(inputBufferCount_);
> +	int ret = m2m_->output()->importBuffers(SIMPLE_BUFFER_SLOT_COUNT);

Shouldn't this be SIMPLE_INTERNAL_BUFFER_COUNT ? Overallocating is not
much of an issue I suppose.

>  	if (ret < 0)
>  		return ret;
>  
> -	ret = m2m_->capture()->importBuffers(outputBufferCount_);
> +	ret = m2m_->capture()->importBuffers(SIMPLE_BUFFER_SLOT_COUNT);
>  	if (ret < 0) {
>  		stop();
>  		return ret;
> diff --git a/src/libcamera/pipeline/simple/converter.h b/src/libcamera/pipeline/simple/converter.h
> index 276a2a291c21..7e1d60674f62 100644
> --- a/src/libcamera/pipeline/simple/converter.h
> +++ b/src/libcamera/pipeline/simple/converter.h
> @@ -29,6 +29,9 @@ class SizeRange;
>  struct StreamConfiguration;
>  class V4L2M2MDevice;
>  
> +constexpr unsigned int SIMPLE_INTERNAL_BUFFER_COUNT = 5;
> +constexpr unsigned int SIMPLE_BUFFER_SLOT_COUNT = 5;

Let's name the variables kSimpleInternalBufferCount and
kSimpleBufferSlotCount, as that's the naming scheme we're moving to for
non-macro constants. Same comment elsewhere in this patch.

Those constants don't belong to converter.h. Could you turn them into
member constants of the SimplePipelineHandler class, as
kNumInternalBuffers (which btw should be removed) ? The number of buffer
slots can be passed as a parameter to SimpleConverter::start().

There's no stats or parameters here, and no IPA, so the situation is
different than for IPU3 and RkISP1. The number of internal buffers
should just be one more than the minimum number of buffers required by
the capture device, I don't think there's another requirement.

> +
>  class SimpleConverter
>  {
>  public:
> diff --git a/src/libcamera/pipeline/simple/simple.cpp b/src/libcamera/pipeline/simple/simple.cpp
> index 1c25a7344f5f..a1163eaf8be2 100644
> --- a/src/libcamera/pipeline/simple/simple.cpp
> +++ b/src/libcamera/pipeline/simple/simple.cpp
> @@ -803,12 +803,10 @@ int SimplePipelineHandler::start(Camera *camera, [[maybe_unused]] const ControlL
>  		 * When using the converter allocate a fixed number of internal
>  		 * buffers.
>  		 */
> -		ret = video->allocateBuffers(kNumInternalBuffers,
> +		ret = video->allocateBuffers(SIMPLE_INTERNAL_BUFFER_COUNT,
>  					     &data->converterBuffers_);
>  	} else {
> -		/* Otherwise, prepare for using buffers from the only stream. */
> -		Stream *stream = &data->streams_[0];
> -		ret = video->importBuffers(stream->configuration().bufferCount);
> +		ret = video->importBuffers(SIMPLE_BUFFER_SLOT_COUNT);
>  	}
>  	if (ret < 0)
>  		return ret;
> diff --git a/src/libcamera/pipeline/uvcvideo/uvcvideo.cpp b/src/libcamera/pipeline/uvcvideo/uvcvideo.cpp
> index fd39b3d3c72c..755949e7a59a 100644
> --- a/src/libcamera/pipeline/uvcvideo/uvcvideo.cpp
> +++ b/src/libcamera/pipeline/uvcvideo/uvcvideo.cpp
> @@ -91,6 +91,8 @@ private:
>  		return static_cast<UVCCameraData *>(
>  			PipelineHandler::cameraData(camera));
>  	}
> +
> +	static constexpr unsigned int UVC_BUFFER_SLOT_COUNT = 5;
>  };
>  
>  UVCCameraConfiguration::UVCCameraConfiguration(UVCCameraData *data)
> @@ -236,9 +238,8 @@ int PipelineHandlerUVC::exportFrameBuffers(Camera *camera,
>  int PipelineHandlerUVC::start(Camera *camera, [[maybe_unused]] const ControlList *controls)
>  {
>  	UVCCameraData *data = cameraData(camera);
> -	unsigned int count = data->stream_.configuration().bufferCount;
>  
> -	int ret = data->video_->importBuffers(count);
> +	int ret = data->video_->importBuffers(UVC_BUFFER_SLOT_COUNT);

For the uvc and vimc pipeline handlers, we have no internal buffers, so
it's quite easy. We should have 8 or 16 slots, as for other pipeline
handlers.

>  	if (ret < 0)
>  		return ret;
>  
> diff --git a/src/libcamera/pipeline/vimc/vimc.cpp b/src/libcamera/pipeline/vimc/vimc.cpp
> index e89d53182c6d..24ba743a946c 100644
> --- a/src/libcamera/pipeline/vimc/vimc.cpp
> +++ b/src/libcamera/pipeline/vimc/vimc.cpp
> @@ -102,6 +102,8 @@ private:
>  		return static_cast<VimcCameraData *>(
>  			PipelineHandler::cameraData(camera));
>  	}
> +
> +	static constexpr unsigned int VIMC_BUFFER_SLOT_COUNT = 5;
>  };
>  
>  namespace {
> @@ -312,9 +314,8 @@ int PipelineHandlerVimc::exportFrameBuffers(Camera *camera,
>  int PipelineHandlerVimc::start(Camera *camera, [[maybe_unused]] const ControlList *controls)
>  {
>  	VimcCameraData *data = cameraData(camera);
> -	unsigned int count = data->stream_.configuration().bufferCount;
>  
> -	int ret = data->video_->importBuffers(count);
> +	int ret = data->video_->importBuffers(VIMC_BUFFER_SLOT_COUNT);
>  	if (ret < 0)
>  		return ret;
>
Nícolas F. R. A. Prado Aug. 7, 2021, 3:03 p.m. UTC | #2
Hi Laurent,

On Mon, Aug 02, 2021 at 02:42:53AM +0300, Laurent Pinchart wrote:
> Hi Nícolas,
> 
> Thank you for the patch.
> 
> On Thu, Jul 22, 2021 at 08:28:49PM -0300, Nícolas F. R. A. Prado wrote:
> > Pipelines have relied on bufferCount to decide on the number of buffers
> > to allocate internally through allocateBuffers() and on the number of
> > V4L2 buffer slots to reserve through importBuffers(). Instead, the
> > number of internal buffers should be the minimum required by the
> > algorithms to avoid wasting memory, and the number of V4L2 buffer slots
> > should overallocate to avoid thrashing dmabuf mappings.
> > 
> > For now, just set them to constants and stop relying on bufferCount, to
> > allow for its removal.
> > 
> > Signed-off-by: Nícolas F. R. A. Prado <nfraprado@collabora.com>
> > ---
> > 
> > No changes in v7
> > 
> > Changes in v6:
> > - Added pipeline name as prefix to each BUFFER_SLOT_COUNT and
> >   INTERNAL_BUFFER_COUNT constant
> > 
> >  src/libcamera/pipeline/ipu3/imgu.cpp              | 12 ++++++------
> >  src/libcamera/pipeline/ipu3/imgu.h                |  5 ++++-
> >  src/libcamera/pipeline/ipu3/ipu3.cpp              |  9 +--------
> >  .../pipeline/raspberrypi/raspberrypi.cpp          | 15 +++++----------
> >  src/libcamera/pipeline/rkisp1/rkisp1.cpp          |  9 ++-------
> >  src/libcamera/pipeline/rkisp1/rkisp1_path.cpp     |  2 +-
> >  src/libcamera/pipeline/rkisp1/rkisp1_path.h       |  3 +++
> >  src/libcamera/pipeline/simple/converter.cpp       |  4 ++--
> >  src/libcamera/pipeline/simple/converter.h         |  3 +++
> >  src/libcamera/pipeline/simple/simple.cpp          |  6 ++----
> >  src/libcamera/pipeline/uvcvideo/uvcvideo.cpp      |  5 +++--
> >  src/libcamera/pipeline/vimc/vimc.cpp              |  5 +++--
> >  12 files changed, 35 insertions(+), 43 deletions(-)
> 
> Given that some of the pipeline handlers will need more intrusive
> changes to address the comments below, you could split this with one
> patch per pipeline handler (or perhaps grouping the easy ones together).
> 
> > 
> > diff --git a/src/libcamera/pipeline/ipu3/imgu.cpp b/src/libcamera/pipeline/ipu3/imgu.cpp
> > index e955bc3456ba..f36e99dacbe7 100644
> > --- a/src/libcamera/pipeline/ipu3/imgu.cpp
> > +++ b/src/libcamera/pipeline/ipu3/imgu.cpp
> > @@ -593,22 +593,22 @@ int ImgUDevice::configureVideoDevice(V4L2VideoDevice *dev, unsigned int pad,
> >  /**
> >   * \brief Allocate buffers for all the ImgU video devices
> >   */
> > -int ImgUDevice::allocateBuffers(unsigned int bufferCount)
> > +int ImgUDevice::allocateBuffers()
> >  {
> >  	/* Share buffers between CIO2 output and ImgU input. */
> > -	int ret = input_->importBuffers(bufferCount);
> > +	int ret = input_->importBuffers(IPU3_BUFFER_SLOT_COUNT);
> >  	if (ret) {
> >  		LOG(IPU3, Error) << "Failed to import ImgU input buffers";
> >  		return ret;
> >  	}
> >  
> > -	ret = param_->allocateBuffers(bufferCount, &paramBuffers_);
> > +	ret = param_->allocateBuffers(IPU3_INTERNAL_BUFFER_COUNT, &paramBuffers_);
> >  	if (ret < 0) {
> >  		LOG(IPU3, Error) << "Failed to allocate ImgU param buffers";
> >  		goto error;
> >  	}
> >  
> > -	ret = stat_->allocateBuffers(bufferCount, &statBuffers_);
> > +	ret = stat_->allocateBuffers(IPU3_INTERNAL_BUFFER_COUNT, &statBuffers_);
> >  	if (ret < 0) {
> >  		LOG(IPU3, Error) << "Failed to allocate ImgU stat buffers";
> >  		goto error;
> > @@ -619,13 +619,13 @@ int ImgUDevice::allocateBuffers(unsigned int bufferCount)
> >  	 * corresponding stream is active or inactive, as the driver needs
> >  	 * buffers to be requested on the V4L2 devices in order to operate.
> >  	 */
> > -	ret = output_->importBuffers(bufferCount);
> > +	ret = output_->importBuffers(IPU3_BUFFER_SLOT_COUNT);
> >  	if (ret < 0) {
> >  		LOG(IPU3, Error) << "Failed to import ImgU output buffers";
> >  		goto error;
> >  	}
> >  
> > -	ret = viewfinder_->importBuffers(bufferCount);
> > +	ret = viewfinder_->importBuffers(IPU3_BUFFER_SLOT_COUNT);
> >  	if (ret < 0) {
> >  		LOG(IPU3, Error) << "Failed to import ImgU viewfinder buffers";
> >  		goto error;
> > diff --git a/src/libcamera/pipeline/ipu3/imgu.h b/src/libcamera/pipeline/ipu3/imgu.h
> > index 9d4915116087..f934a951fc75 100644
> > --- a/src/libcamera/pipeline/ipu3/imgu.h
> > +++ b/src/libcamera/pipeline/ipu3/imgu.h
> > @@ -61,7 +61,7 @@ public:
> >  					    outputFormat);
> >  	}
> >  
> > -	int allocateBuffers(unsigned int bufferCount);
> > +	int allocateBuffers();
> >  	void freeBuffers();
> >  
> >  	int start();
> > @@ -86,6 +86,9 @@ private:
> >  	static constexpr unsigned int PAD_VF = 3;
> >  	static constexpr unsigned int PAD_STAT = 4;
> >  
> > +	static constexpr unsigned int IPU3_INTERNAL_BUFFER_COUNT = 4;
> > +	static constexpr unsigned int IPU3_BUFFER_SLOT_COUNT = 5;
> 
> 5 buffer slots is low. It means that if applications cycle more than 5
> buffers, the V4L2VideoDevice cache that maintains associations between
> dmabufs and buffer slots will the trashed. Due to the internal queue of
> requests in the IPU3 pipeline handler (similar to what you have
> implemented in "[PATCH 0/3] libcamera: pipeline: Add internal request
> queue" for other pipeline handlers), we won't fail at queuing requests,
> but performance will suffer. I thus think we need to increase the number
> of slots to what applications can be reasonably expected to use. We
> could use 8, or even 16, as buffer slots are cheap. The same holds for
> other pipeline handlers.
> 
> The number of slots for the CIO2 output should match the number of
> buffer slots for the ImgU input, as the same buffers are used on the two
> video devices. One option is to use IPU3_BUFFER_SLOT_COUNT for the CIO2,
> instead of CIO2_BUFFER_COUNT. However, the number of internal CIO2
> buffers that are allocated by exportBuffers() in CIO2Device::start(), to
> be used in case the application doesn't provide any RAW buffer, should
> be lower, as those are real buffer and are thus expensive. The number of
> buffers and buffer slots on the CIO2 thus needs to be decoupled.
> 
> For proper operation, the CIO2 will require at least two queued buffers
> (one being DMA'ed to, and one waiting). We need at least one extra
> buffer queued to the ImgU to keep buffers flowing. Depending on
> processing timings, it may be that the ImgU will complete processing of
> its buffer before the CIO2 captures the next one, leading to a temporary
> situation where the CIO2 will have three buffers queued, or the CIO2
> will finish the capture first, leading to a temporary situation where
> the CIO2 will have one buffer queued and the ImgU will have two buffers
> queued. In either case, shortly afterwards, the other component will
> complete capture or processing, and we'll get back to a situation with
> two buffers queued in the CIO2 and one in the ImgU. That's thus a
> minimum of three buffers for raw images.
> 
> From an ImgU point of view, we could probably get away with a single
> parameter and a single stats buffer. This would however not allow
> queuing the next frame for processing in the ImgU before the current
> frame completes, so two buffers would be better. Now, if we take the IPA
> into account, the statistics buffer will spend some time on the IPA side
> for processing. It would thus be best to have an extra statistics buffer
> to accommodate that, thus requiring three statistics buffers (and three
> parameters buffers, as we associate them together).
> 
> This rationale leads to using the same number of internal buffers for
> the CIO2, the parameters and the statistics. We currently use four, and
> while the logic above indicates we could get away with three, it would
> be safer to keep using four in this patch, and possibly reduce the
> number of buffers later.
> 
> I know documentation isn't fun, but I think this rationale should be
> captured in a comment in the IPU3 pipeline handler, along with a \todo
> item to try and lower the number of internal buffers to three.

This is the IPU3 topology as I understand it:

      Output  .               .   Input        Output .
      +---+   .               .   +---+        +---+  .
      |   | --------------------> |   |        |   |  .
      +---+   .               .   +---+        +---+  .
CIO2          .   IPA         .          ImgU         .          IPA
              .        Param  .   Param        Stat   .   Stat
              .        +---+  .   +---+        +---+  .   +---+ 
              .        |   | ---> |   |        |   | ---> |   | 
              .        +---+  .   +---+        +---+  .   +---+ 
          
Your suggestions for the minimum number of buffers required are the following,
from what I understand:

CIO2 raw internal buffers:
- 2x on CIO2 Output (one being DMA'ed, one waiting)
- 1x on ImgU Input

ImgU Param/Stat internal buffers:
- 2x on ImgU Param/Stat (one being processed, one waiting)
- 1x on IPA Stat

This arrangement doesn't seem to take into account that IPU3Frames::Info binds
CIO2 internal buffers and ImgU Param/Stat buffers together. This means that each
raw buffer queued to CIO2 Output needs a Param/Stat buffer as well. And each
Param/Stat buffer queued to ImgU for processing needs a CIO2 raw buffer as well.
After ImgU processing though, the raw buffer gets released and reused, so the
Stat buffer queued to the IPA does not require a CIO2 raw buffer.

This means that to achieve the above minimum, due to the IPU3Frames::Info
constraint, we'd actually need:

CIO2 internal buffers:
- 2x on CIO2 Output (one being DMA'ed, one waiting)
- 2x on ImgU Input (for the two ImgU Param/Stat buffers we want to have there)

ImgU Param/Stat internal buffers:
- 2x on CIO2 Output (for the two CIO2 raw buffers we want to have there)
- 2x on ImgU Param/Stat (one being processed, one waiting)
- 1x on IPA Stat

Also we're not accounting for parameter filling in the IPA before we queue the
buffers to ImgU, but perhaps that's fast enough that it doesn't matter?

Does this make sense? Or am I missing something?

Thanks,
Nícolas

> 
> > +
> >  	int linkSetup(const std::string &source, unsigned int sourcePad,
> >  		      const std::string &sink, unsigned int sinkPad,
> >  		      bool enable);
> > diff --git a/src/libcamera/pipeline/ipu3/ipu3.cpp b/src/libcamera/pipeline/ipu3/ipu3.cpp
> > index 5fd1757bfe13..4efd201c05e5 100644
> > --- a/src/libcamera/pipeline/ipu3/ipu3.cpp
> > +++ b/src/libcamera/pipeline/ipu3/ipu3.cpp
> > @@ -681,16 +681,9 @@ int PipelineHandlerIPU3::allocateBuffers(Camera *camera)
> >  {
> >  	IPU3CameraData *data = cameraData(camera);
> >  	ImgUDevice *imgu = data->imgu_;
> > -	unsigned int bufferCount;
> >  	int ret;
> >  
> > -	bufferCount = std::max({
> > -		data->outStream_.configuration().bufferCount,
> > -		data->vfStream_.configuration().bufferCount,
> > -		data->rawStream_.configuration().bufferCount,
> > -	});
> > -
> > -	ret = imgu->allocateBuffers(bufferCount);
> > +	ret = imgu->allocateBuffers();
> >  	if (ret < 0)
> >  		return ret;
> >  
> > diff --git a/src/libcamera/pipeline/raspberrypi/raspberrypi.cpp b/src/libcamera/pipeline/raspberrypi/raspberrypi.cpp
> > index d1cd3d9dc082..776e0f92aed1 100644
> > --- a/src/libcamera/pipeline/raspberrypi/raspberrypi.cpp
> > +++ b/src/libcamera/pipeline/raspberrypi/raspberrypi.cpp
> > @@ -1149,20 +1149,15 @@ int PipelineHandlerRPi::prepareBuffers(Camera *camera)
> >  {
> >  	RPiCameraData *data = cameraData(camera);
> >  	int ret;
> > +	constexpr unsigned int bufferCount = 4;
> >  
> >  	/*
> > -	 * Decide how many internal buffers to allocate. For now, simply look
> > -	 * at how many external buffers will be provided. We'll need to improve
> > -	 * this logic. However, we really must have all streams allocate the same
> > -	 * number of buffers to simplify error handling in queueRequestDevice().
> > +	 * Allocate internal buffers. We really must have all streams allocate
> > +	 * the same number of buffers to simplify error handling in
> > +	 * queueRequestDevice().
> >  	 */
> > -	unsigned int maxBuffers = 0;
> > -	for (const Stream *s : camera->streams())
> > -		if (static_cast<const RPi::Stream *>(s)->isExternal())
> > -			maxBuffers = std::max(maxBuffers, s->configuration().bufferCount);
> > -
> >  	for (auto const stream : data->streams_) {
> > -		ret = stream->prepareBuffers(maxBuffers);
> > +		ret = stream->prepareBuffers(bufferCount);
> 
> We have a similar problem here, 4 buffer slots is too little, but when
> the stream has to allocate internal buffers (!importOnly), which is the
> case for most streams, we don't want to overallocate.
> 
> I'd like to get feedback from Naush here, but I think this means we'll
> have to relax the requirement documented in the comment above, and
> accept a different number of buffers for each stream.
> 
> >  		if (ret < 0)
> >  			return ret;
> >  	}
> > diff --git a/src/libcamera/pipeline/rkisp1/rkisp1.cpp b/src/libcamera/pipeline/rkisp1/rkisp1.cpp
> > index 11325875b929..f4ea2fd4d4d0 100644
> > --- a/src/libcamera/pipeline/rkisp1/rkisp1.cpp
> > +++ b/src/libcamera/pipeline/rkisp1/rkisp1.cpp
> > @@ -690,16 +690,11 @@ int PipelineHandlerRkISP1::allocateBuffers(Camera *camera)
> >  	unsigned int ipaBufferId = 1;
> >  	int ret;
> >  
> > -	unsigned int maxCount = std::max({
> > -		data->mainPathStream_.configuration().bufferCount,
> > -		data->selfPathStream_.configuration().bufferCount,
> > -	});
> > -
> > -	ret = param_->allocateBuffers(maxCount, &paramBuffers_);
> > +	ret = param_->allocateBuffers(RKISP1_INTERNAL_BUFFER_COUNT, &paramBuffers_);
> >  	if (ret < 0)
> >  		goto error;
> >  
> > -	ret = stat_->allocateBuffers(maxCount, &statBuffers_);
> > +	ret = stat_->allocateBuffers(RKISP1_INTERNAL_BUFFER_COUNT, &statBuffers_);
> >  	if (ret < 0)
> >  		goto error;
> >  
> > diff --git a/src/libcamera/pipeline/rkisp1/rkisp1_path.cpp b/src/libcamera/pipeline/rkisp1/rkisp1_path.cpp
> > index 25f482eb8d8e..fea330f72886 100644
> > --- a/src/libcamera/pipeline/rkisp1/rkisp1_path.cpp
> > +++ b/src/libcamera/pipeline/rkisp1/rkisp1_path.cpp
> > @@ -172,7 +172,7 @@ int RkISP1Path::start()
> >  		return -EBUSY;
> >  
> >  	/* \todo Make buffer count user configurable. */
> > -	ret = video_->importBuffers(RKISP1_BUFFER_COUNT);
> > +	ret = video_->importBuffers(RKISP1_BUFFER_SLOT_COUNT);
> >  	if (ret)
> >  		return ret;
> >  
> > diff --git a/src/libcamera/pipeline/rkisp1/rkisp1_path.h b/src/libcamera/pipeline/rkisp1/rkisp1_path.h
> > index 91757600ccdc..3c5891009c58 100644
> > --- a/src/libcamera/pipeline/rkisp1/rkisp1_path.h
> > +++ b/src/libcamera/pipeline/rkisp1/rkisp1_path.h
> > @@ -27,6 +27,9 @@ class V4L2Subdevice;
> >  struct StreamConfiguration;
> >  struct V4L2SubdeviceFormat;
> >  
> > +static constexpr unsigned int RKISP1_INTERNAL_BUFFER_COUNT = 4;
> > +static constexpr unsigned int RKISP1_BUFFER_SLOT_COUNT = 5;
> 
> The situation should be simpler for the rkisp1, as it has a different
> pipeline model (inline ISP as opposed to offline ISP for the IPU3). We
> can allocate more slots (8 or 16, as for other pipeline handlers), and
> restrict the number of internal buffers (for stats and parameters) to
> the number of requests we expect to queue to the device at once, plus
> one for the IPA.  Four thus seems good. Capturing this rationale in a
> comment would be good too.
> 
> BTW, I may be too tired to think properly, or just unable to see the
> obvious, so please challenge any rationale you think is incorrect.
> 
> > +
> >  class RkISP1Path
> >  {
> >  public:
> > diff --git a/src/libcamera/pipeline/simple/converter.cpp b/src/libcamera/pipeline/simple/converter.cpp
> > index b5e34c4cd0c5..b3bcf01483f7 100644
> > --- a/src/libcamera/pipeline/simple/converter.cpp
> > +++ b/src/libcamera/pipeline/simple/converter.cpp
> > @@ -103,11 +103,11 @@ int SimpleConverter::Stream::exportBuffers(unsigned int count,
> >  
> >  int SimpleConverter::Stream::start()
> >  {
> > -	int ret = m2m_->output()->importBuffers(inputBufferCount_);
> > +	int ret = m2m_->output()->importBuffers(SIMPLE_BUFFER_SLOT_COUNT);
> 
> Shouldn't this be SIMPLE_INTERNAL_BUFFER_COUNT ? Overallocating is not
> much of an issue I suppose.
> 
> >  	if (ret < 0)
> >  		return ret;
> >  
> > -	ret = m2m_->capture()->importBuffers(outputBufferCount_);
> > +	ret = m2m_->capture()->importBuffers(SIMPLE_BUFFER_SLOT_COUNT);
> >  	if (ret < 0) {
> >  		stop();
> >  		return ret;
> > diff --git a/src/libcamera/pipeline/simple/converter.h b/src/libcamera/pipeline/simple/converter.h
> > index 276a2a291c21..7e1d60674f62 100644
> > --- a/src/libcamera/pipeline/simple/converter.h
> > +++ b/src/libcamera/pipeline/simple/converter.h
> > @@ -29,6 +29,9 @@ class SizeRange;
> >  struct StreamConfiguration;
> >  class V4L2M2MDevice;
> >  
> > +constexpr unsigned int SIMPLE_INTERNAL_BUFFER_COUNT = 5;
> > +constexpr unsigned int SIMPLE_BUFFER_SLOT_COUNT = 5;
> 
> Let's name the variables kSimpleInternalBufferCount and
> kSimpleBufferSlotCount, as that's the naming scheme we're moving to for
> non-macro constants. Same comment elsewhere in this patch.
> 
> Those constants don't belong to converter.h. Could you turn them into
> member constants of the SimplePipelineHandler class, as
> kNumInternalBuffers (which btw should be removed) ? The number of buffer
> slots can be passed as a parameter to SimpleConverter::start().
> 
> There's no stats or parameters here, and no IPA, so the situation is
> different than for IPU3 and RkISP1. The number of internal buffers
> should just be one more than the minimum number of buffers required by
> the capture device, I don't think there's another requirement.
> 
> > +
> >  class SimpleConverter
> >  {
> >  public:
> > diff --git a/src/libcamera/pipeline/simple/simple.cpp b/src/libcamera/pipeline/simple/simple.cpp
> > index 1c25a7344f5f..a1163eaf8be2 100644
> > --- a/src/libcamera/pipeline/simple/simple.cpp
> > +++ b/src/libcamera/pipeline/simple/simple.cpp
> > @@ -803,12 +803,10 @@ int SimplePipelineHandler::start(Camera *camera, [[maybe_unused]] const ControlL
> >  		 * When using the converter allocate a fixed number of internal
> >  		 * buffers.
> >  		 */
> > -		ret = video->allocateBuffers(kNumInternalBuffers,
> > +		ret = video->allocateBuffers(SIMPLE_INTERNAL_BUFFER_COUNT,
> >  					     &data->converterBuffers_);
> >  	} else {
> > -		/* Otherwise, prepare for using buffers from the only stream. */
> > -		Stream *stream = &data->streams_[0];
> > -		ret = video->importBuffers(stream->configuration().bufferCount);
> > +		ret = video->importBuffers(SIMPLE_BUFFER_SLOT_COUNT);
> >  	}
> >  	if (ret < 0)
> >  		return ret;
> > diff --git a/src/libcamera/pipeline/uvcvideo/uvcvideo.cpp b/src/libcamera/pipeline/uvcvideo/uvcvideo.cpp
> > index fd39b3d3c72c..755949e7a59a 100644
> > --- a/src/libcamera/pipeline/uvcvideo/uvcvideo.cpp
> > +++ b/src/libcamera/pipeline/uvcvideo/uvcvideo.cpp
> > @@ -91,6 +91,8 @@ private:
> >  		return static_cast<UVCCameraData *>(
> >  			PipelineHandler::cameraData(camera));
> >  	}
> > +
> > +	static constexpr unsigned int UVC_BUFFER_SLOT_COUNT = 5;
> >  };
> >  
> >  UVCCameraConfiguration::UVCCameraConfiguration(UVCCameraData *data)
> > @@ -236,9 +238,8 @@ int PipelineHandlerUVC::exportFrameBuffers(Camera *camera,
> >  int PipelineHandlerUVC::start(Camera *camera, [[maybe_unused]] const ControlList *controls)
> >  {
> >  	UVCCameraData *data = cameraData(camera);
> > -	unsigned int count = data->stream_.configuration().bufferCount;
> >  
> > -	int ret = data->video_->importBuffers(count);
> > +	int ret = data->video_->importBuffers(UVC_BUFFER_SLOT_COUNT);
> 
> For the uvc and vimc pipeline handlers, we have no internal buffers, so
> it's quite easy. We should have 8 or 16 slots, as for other pipeline
> handlers.
> 
> >  	if (ret < 0)
> >  		return ret;
> >  
> > diff --git a/src/libcamera/pipeline/vimc/vimc.cpp b/src/libcamera/pipeline/vimc/vimc.cpp
> > index e89d53182c6d..24ba743a946c 100644
> > --- a/src/libcamera/pipeline/vimc/vimc.cpp
> > +++ b/src/libcamera/pipeline/vimc/vimc.cpp
> > @@ -102,6 +102,8 @@ private:
> >  		return static_cast<VimcCameraData *>(
> >  			PipelineHandler::cameraData(camera));
> >  	}
> > +
> > +	static constexpr unsigned int VIMC_BUFFER_SLOT_COUNT = 5;
> >  };
> >  
> >  namespace {
> > @@ -312,9 +314,8 @@ int PipelineHandlerVimc::exportFrameBuffers(Camera *camera,
> >  int PipelineHandlerVimc::start(Camera *camera, [[maybe_unused]] const ControlList *controls)
> >  {
> >  	VimcCameraData *data = cameraData(camera);
> > -	unsigned int count = data->stream_.configuration().bufferCount;
> >  
> > -	int ret = data->video_->importBuffers(count);
> > +	int ret = data->video_->importBuffers(VIMC_BUFFER_SLOT_COUNT);
> >  	if (ret < 0)
> >  		return ret;
> >  
> 
> -- 
> Regards,
> 
> Laurent Pinchart
Nícolas F. R. A. Prado Aug. 9, 2021, 8:26 p.m. UTC | #3
A few more comments:

On Sat, Aug 07, 2021 at 12:03:52PM -0300, Nícolas F. R. A. Prado wrote:
> Hi Laurent,
> 
> On Mon, Aug 02, 2021 at 02:42:53AM +0300, Laurent Pinchart wrote:
> > Hi Nícolas,
> > 
> > Thank you for the patch.
> > 
> > On Thu, Jul 22, 2021 at 08:28:49PM -0300, Nícolas F. R. A. Prado wrote:
> > > Pipelines have relied on bufferCount to decide on the number of buffers
> > > to allocate internally through allocateBuffers() and on the number of
> > > V4L2 buffer slots to reserve through importBuffers(). Instead, the
> > > number of internal buffers should be the minimum required by the
> > > algorithms to avoid wasting memory, and the number of V4L2 buffer slots
> > > should overallocate to avoid thrashing dmabuf mappings.
> > > 
> > > For now, just set them to constants and stop relying on bufferCount, to
> > > allow for its removal.
> > > 
> > > Signed-off-by: Nícolas F. R. A. Prado <nfraprado@collabora.com>
> > > ---
> > > 
> > > No changes in v7
> > > 
> > > Changes in v6:
> > > - Added pipeline name as prefix to each BUFFER_SLOT_COUNT and
> > >   INTERNAL_BUFFER_COUNT constant
> > > 
> > >  src/libcamera/pipeline/ipu3/imgu.cpp              | 12 ++++++------
> > >  src/libcamera/pipeline/ipu3/imgu.h                |  5 ++++-
> > >  src/libcamera/pipeline/ipu3/ipu3.cpp              |  9 +--------
> > >  .../pipeline/raspberrypi/raspberrypi.cpp          | 15 +++++----------
> > >  src/libcamera/pipeline/rkisp1/rkisp1.cpp          |  9 ++-------
> > >  src/libcamera/pipeline/rkisp1/rkisp1_path.cpp     |  2 +-
> > >  src/libcamera/pipeline/rkisp1/rkisp1_path.h       |  3 +++
> > >  src/libcamera/pipeline/simple/converter.cpp       |  4 ++--
> > >  src/libcamera/pipeline/simple/converter.h         |  3 +++
> > >  src/libcamera/pipeline/simple/simple.cpp          |  6 ++----
> > >  src/libcamera/pipeline/uvcvideo/uvcvideo.cpp      |  5 +++--
> > >  src/libcamera/pipeline/vimc/vimc.cpp              |  5 +++--
> > >  12 files changed, 35 insertions(+), 43 deletions(-)
> > 
> > Given that some of the pipeline handlers will need more intrusive
> > changes to address the comments below, you could split this with one
> > patch per pipeline handler (or perhaps grouping the easy ones together).
> > 
> > > 
> > > diff --git a/src/libcamera/pipeline/ipu3/imgu.cpp b/src/libcamera/pipeline/ipu3/imgu.cpp
> > > index e955bc3456ba..f36e99dacbe7 100644
> > > --- a/src/libcamera/pipeline/ipu3/imgu.cpp
> > > +++ b/src/libcamera/pipeline/ipu3/imgu.cpp
> > > @@ -593,22 +593,22 @@ int ImgUDevice::configureVideoDevice(V4L2VideoDevice *dev, unsigned int pad,
> > >  /**
> > >   * \brief Allocate buffers for all the ImgU video devices
> > >   */
> > > -int ImgUDevice::allocateBuffers(unsigned int bufferCount)
> > > +int ImgUDevice::allocateBuffers()
> > >  {
> > >  	/* Share buffers between CIO2 output and ImgU input. */
> > > -	int ret = input_->importBuffers(bufferCount);
> > > +	int ret = input_->importBuffers(IPU3_BUFFER_SLOT_COUNT);
> > >  	if (ret) {
> > >  		LOG(IPU3, Error) << "Failed to import ImgU input buffers";
> > >  		return ret;
> > >  	}
> > >  
> > > -	ret = param_->allocateBuffers(bufferCount, &paramBuffers_);
> > > +	ret = param_->allocateBuffers(IPU3_INTERNAL_BUFFER_COUNT, &paramBuffers_);
> > >  	if (ret < 0) {
> > >  		LOG(IPU3, Error) << "Failed to allocate ImgU param buffers";
> > >  		goto error;
> > >  	}
> > >  
> > > -	ret = stat_->allocateBuffers(bufferCount, &statBuffers_);
> > > +	ret = stat_->allocateBuffers(IPU3_INTERNAL_BUFFER_COUNT, &statBuffers_);
> > >  	if (ret < 0) {
> > >  		LOG(IPU3, Error) << "Failed to allocate ImgU stat buffers";
> > >  		goto error;
> > > @@ -619,13 +619,13 @@ int ImgUDevice::allocateBuffers(unsigned int bufferCount)
> > >  	 * corresponding stream is active or inactive, as the driver needs
> > >  	 * buffers to be requested on the V4L2 devices in order to operate.
> > >  	 */
> > > -	ret = output_->importBuffers(bufferCount);
> > > +	ret = output_->importBuffers(IPU3_BUFFER_SLOT_COUNT);
> > >  	if (ret < 0) {
> > >  		LOG(IPU3, Error) << "Failed to import ImgU output buffers";
> > >  		goto error;
> > >  	}
> > >  
> > > -	ret = viewfinder_->importBuffers(bufferCount);
> > > +	ret = viewfinder_->importBuffers(IPU3_BUFFER_SLOT_COUNT);
> > >  	if (ret < 0) {
> > >  		LOG(IPU3, Error) << "Failed to import ImgU viewfinder buffers";
> > >  		goto error;
> > > diff --git a/src/libcamera/pipeline/ipu3/imgu.h b/src/libcamera/pipeline/ipu3/imgu.h
> > > index 9d4915116087..f934a951fc75 100644
> > > --- a/src/libcamera/pipeline/ipu3/imgu.h
> > > +++ b/src/libcamera/pipeline/ipu3/imgu.h
> > > @@ -61,7 +61,7 @@ public:
> > >  					    outputFormat);
> > >  	}
> > >  
> > > -	int allocateBuffers(unsigned int bufferCount);
> > > +	int allocateBuffers();
> > >  	void freeBuffers();
> > >  
> > >  	int start();
> > > @@ -86,6 +86,9 @@ private:
> > >  	static constexpr unsigned int PAD_VF = 3;
> > >  	static constexpr unsigned int PAD_STAT = 4;
> > >  
> > > +	static constexpr unsigned int IPU3_INTERNAL_BUFFER_COUNT = 4;
> > > +	static constexpr unsigned int IPU3_BUFFER_SLOT_COUNT = 5;
> > 
> > 5 buffer slots is low. It means that if applications cycle more than 5
> > buffers, the V4L2VideoDevice cache that maintains associations between
> > dmabufs and buffer slots will the trashed. Due to the internal queue of
> > requests in the IPU3 pipeline handler (similar to what you have
> > implemented in "[PATCH 0/3] libcamera: pipeline: Add internal request
> > queue" for other pipeline handlers), we won't fail at queuing requests,
> > but performance will suffer. I thus think we need to increase the number
> > of slots to what applications can be reasonably expected to use. We
> > could use 8, or even 16, as buffer slots are cheap. The same holds for
> > other pipeline handlers.
> > 
> > The number of slots for the CIO2 output should match the number of
> > buffer slots for the ImgU input, as the same buffers are used on the two
> > video devices. One option is to use IPU3_BUFFER_SLOT_COUNT for the CIO2,
> > instead of CIO2_BUFFER_COUNT. However, the number of internal CIO2
> > buffers that are allocated by exportBuffers() in CIO2Device::start(), to
> > be used in case the application doesn't provide any RAW buffer, should
> > be lower, as those are real buffer and are thus expensive. The number of
> > buffers and buffer slots on the CIO2 thus needs to be decoupled.
> > 
> > For proper operation, the CIO2 will require at least two queued buffers
> > (one being DMA'ed to, and one waiting). We need at least one extra
> > buffer queued to the ImgU to keep buffers flowing. Depending on
> > processing timings, it may be that the ImgU will complete processing of
> > its buffer before the CIO2 captures the next one, leading to a temporary
> > situation where the CIO2 will have three buffers queued, or the CIO2
> > will finish the capture first, leading to a temporary situation where
> > the CIO2 will have one buffer queued and the ImgU will have two buffers
> > queued. In either case, shortly afterwards, the other component will
> > complete capture or processing, and we'll get back to a situation with
> > two buffers queued in the CIO2 and one in the ImgU. That's thus a
> > minimum of three buffers for raw images.
> > 
> > From an ImgU point of view, we could probably get away with a single
> > parameter and a single stats buffer. This would however not allow
> > queuing the next frame for processing in the ImgU before the current
> > frame completes, so two buffers would be better. Now, if we take the IPA
> > into account, the statistics buffer will spend some time on the IPA side
> > for processing. It would thus be best to have an extra statistics buffer
> > to accommodate that, thus requiring three statistics buffers (and three
> > parameters buffers, as we associate them together).
> > 
> > This rationale leads to using the same number of internal buffers for
> > the CIO2, the parameters and the statistics. We currently use four, and
> > while the logic above indicates we could get away with three, it would
> > be safer to keep using four in this patch, and possibly reduce the
> > number of buffers later.
> > 
> > I know documentation isn't fun, but I think this rationale should be
> > captured in a comment in the IPU3 pipeline handler, along with a \todo
> > item to try and lower the number of internal buffers to three.
> 
> This is the IPU3 topology as I understand it:
> 
>       Output  .               .   Input        Output .
>       +---+   .               .   +---+        +---+  .
>       |   | --------------------> |   |        |   |  .
>       +---+   .               .   +---+        +---+  .
> CIO2          .   IPA         .          ImgU         .          IPA
>               .        Param  .   Param        Stat   .   Stat
>               .        +---+  .   +---+        +---+  .   +---+ 
>               .        |   | ---> |   |        |   | ---> |   | 
>               .        +---+  .   +---+        +---+  .   +---+ 
>           
> Your suggestions for the minimum number of buffers required are the following,
> from what I understand:
> 
> CIO2 raw internal buffers:
> - 2x on CIO2 Output (one being DMA'ed, one waiting)
> - 1x on ImgU Input
> 
> ImgU Param/Stat internal buffers:
> - 2x on ImgU Param/Stat (one being processed, one waiting)
> - 1x on IPA Stat
> 
> This arrangement doesn't seem to take into account that IPU3Frames::Info binds
> CIO2 internal buffers and ImgU Param/Stat buffers together. This means that each
> raw buffer queued to CIO2 Output needs a Param/Stat buffer as well. And each
> Param/Stat buffer queued to ImgU for processing needs a CIO2 raw buffer as well.
> After ImgU processing though, the raw buffer gets released and reused, so the
> Stat buffer queued to the IPA does not require a CIO2 raw buffer.
> 
> This means that to achieve the above minimum, due to the IPU3Frames::Info
> constraint, we'd actually need:
> 
> CIO2 internal buffers:
> - 2x on CIO2 Output (one being DMA'ed, one waiting)
> - 2x on ImgU Input (for the two ImgU Param/Stat buffers we want to have there)
> 
> ImgU Param/Stat internal buffers:
> - 2x on CIO2 Output (for the two CIO2 raw buffers we want to have there)
> - 2x on ImgU Param/Stat (one being processed, one waiting)
> - 1x on IPA Stat
> 
> Also we're not accounting for parameter filling in the IPA before we queue the
> buffers to ImgU, but perhaps that's fast enough that it doesn't matter?
> 
> Does this make sense? Or am I missing something?
> 
> Thanks,
> Nícolas
> 
> > 
> > > +
> > >  	int linkSetup(const std::string &source, unsigned int sourcePad,
> > >  		      const std::string &sink, unsigned int sinkPad,
> > >  		      bool enable);
> > > diff --git a/src/libcamera/pipeline/ipu3/ipu3.cpp b/src/libcamera/pipeline/ipu3/ipu3.cpp
> > > index 5fd1757bfe13..4efd201c05e5 100644
> > > --- a/src/libcamera/pipeline/ipu3/ipu3.cpp
> > > +++ b/src/libcamera/pipeline/ipu3/ipu3.cpp
> > > @@ -681,16 +681,9 @@ int PipelineHandlerIPU3::allocateBuffers(Camera *camera)
> > >  {
> > >  	IPU3CameraData *data = cameraData(camera);
> > >  	ImgUDevice *imgu = data->imgu_;
> > > -	unsigned int bufferCount;
> > >  	int ret;
> > >  
> > > -	bufferCount = std::max({
> > > -		data->outStream_.configuration().bufferCount,
> > > -		data->vfStream_.configuration().bufferCount,
> > > -		data->rawStream_.configuration().bufferCount,
> > > -	});
> > > -
> > > -	ret = imgu->allocateBuffers(bufferCount);
> > > +	ret = imgu->allocateBuffers();
> > >  	if (ret < 0)
> > >  		return ret;
> > >  
> > > diff --git a/src/libcamera/pipeline/raspberrypi/raspberrypi.cpp b/src/libcamera/pipeline/raspberrypi/raspberrypi.cpp
> > > index d1cd3d9dc082..776e0f92aed1 100644
> > > --- a/src/libcamera/pipeline/raspberrypi/raspberrypi.cpp
> > > +++ b/src/libcamera/pipeline/raspberrypi/raspberrypi.cpp
> > > @@ -1149,20 +1149,15 @@ int PipelineHandlerRPi::prepareBuffers(Camera *camera)
> > >  {
> > >  	RPiCameraData *data = cameraData(camera);
> > >  	int ret;
> > > +	constexpr unsigned int bufferCount = 4;
> > >  
> > >  	/*
> > > -	 * Decide how many internal buffers to allocate. For now, simply look
> > > -	 * at how many external buffers will be provided. We'll need to improve
> > > -	 * this logic. However, we really must have all streams allocate the same
> > > -	 * number of buffers to simplify error handling in queueRequestDevice().
> > > +	 * Allocate internal buffers. We really must have all streams allocate
> > > +	 * the same number of buffers to simplify error handling in
> > > +	 * queueRequestDevice().
> > >  	 */
> > > -	unsigned int maxBuffers = 0;
> > > -	for (const Stream *s : camera->streams())
> > > -		if (static_cast<const RPi::Stream *>(s)->isExternal())
> > > -			maxBuffers = std::max(maxBuffers, s->configuration().bufferCount);
> > > -
> > >  	for (auto const stream : data->streams_) {
> > > -		ret = stream->prepareBuffers(maxBuffers);
> > > +		ret = stream->prepareBuffers(bufferCount);
> > 
> > We have a similar problem here, 4 buffer slots is too little, but when
> > the stream has to allocate internal buffers (!importOnly), which is the
> > case for most streams, we don't want to overallocate.
> > 
> > I'd like to get feedback from Naush here, but I think this means we'll
> > have to relax the requirement documented in the comment above, and
> > accept a different number of buffers for each stream.
> > 
> > >  		if (ret < 0)
> > >  			return ret;
> > >  	}
> > > diff --git a/src/libcamera/pipeline/rkisp1/rkisp1.cpp b/src/libcamera/pipeline/rkisp1/rkisp1.cpp
> > > index 11325875b929..f4ea2fd4d4d0 100644
> > > --- a/src/libcamera/pipeline/rkisp1/rkisp1.cpp
> > > +++ b/src/libcamera/pipeline/rkisp1/rkisp1.cpp
> > > @@ -690,16 +690,11 @@ int PipelineHandlerRkISP1::allocateBuffers(Camera *camera)
> > >  	unsigned int ipaBufferId = 1;
> > >  	int ret;
> > >  
> > > -	unsigned int maxCount = std::max({
> > > -		data->mainPathStream_.configuration().bufferCount,
> > > -		data->selfPathStream_.configuration().bufferCount,
> > > -	});
> > > -
> > > -	ret = param_->allocateBuffers(maxCount, &paramBuffers_);
> > > +	ret = param_->allocateBuffers(RKISP1_INTERNAL_BUFFER_COUNT, &paramBuffers_);
> > >  	if (ret < 0)
> > >  		goto error;
> > >  
> > > -	ret = stat_->allocateBuffers(maxCount, &statBuffers_);
> > > +	ret = stat_->allocateBuffers(RKISP1_INTERNAL_BUFFER_COUNT, &statBuffers_);
> > >  	if (ret < 0)
> > >  		goto error;
> > >  
> > > diff --git a/src/libcamera/pipeline/rkisp1/rkisp1_path.cpp b/src/libcamera/pipeline/rkisp1/rkisp1_path.cpp
> > > index 25f482eb8d8e..fea330f72886 100644
> > > --- a/src/libcamera/pipeline/rkisp1/rkisp1_path.cpp
> > > +++ b/src/libcamera/pipeline/rkisp1/rkisp1_path.cpp
> > > @@ -172,7 +172,7 @@ int RkISP1Path::start()
> > >  		return -EBUSY;
> > >  
> > >  	/* \todo Make buffer count user configurable. */
> > > -	ret = video_->importBuffers(RKISP1_BUFFER_COUNT);
> > > +	ret = video_->importBuffers(RKISP1_BUFFER_SLOT_COUNT);
> > >  	if (ret)
> > >  		return ret;
> > >  
> > > diff --git a/src/libcamera/pipeline/rkisp1/rkisp1_path.h b/src/libcamera/pipeline/rkisp1/rkisp1_path.h
> > > index 91757600ccdc..3c5891009c58 100644
> > > --- a/src/libcamera/pipeline/rkisp1/rkisp1_path.h
> > > +++ b/src/libcamera/pipeline/rkisp1/rkisp1_path.h
> > > @@ -27,6 +27,9 @@ class V4L2Subdevice;
> > >  struct StreamConfiguration;
> > >  struct V4L2SubdeviceFormat;
> > >  
> > > +static constexpr unsigned int RKISP1_INTERNAL_BUFFER_COUNT = 4;
> > > +static constexpr unsigned int RKISP1_BUFFER_SLOT_COUNT = 5;
> > 
> > The situation should be simpler for the rkisp1, as it has a different
> > pipeline model (inline ISP as opposed to offline ISP for the IPU3). We
> > can allocate more slots (8 or 16, as for other pipeline handlers), and
> > restrict the number of internal buffers (for stats and parameters) to
> > the number of requests we expect to queue to the device at once, plus
> > one for the IPA.  Four thus seems good. Capturing this rationale in a
> > comment would be good too.

Shouldn't we also have one extra buffer queued to the capture device, like for
the others, totalling five (four on the capture, one on the IPA)? Or since the
driver already requires three buffers the extra one isn't needed?

I'm not sure how it works, but if the driver requires three buffers at all times
to keep streaming, then I think we indeed should have the extra buffer to avoid
dropping frames. Otherwise, if that requirement is only for starting the stream,
then for drivers that require at least two buffers we shouldn't need an extra
one, I'd think.

> > 
> > BTW, I may be too tired to think properly, or just unable to see the
> > obvious, so please challenge any rationale you think is incorrect.
> > 
> > > +
> > >  class RkISP1Path
> > >  {
> > >  public:
> > > diff --git a/src/libcamera/pipeline/simple/converter.cpp b/src/libcamera/pipeline/simple/converter.cpp
> > > index b5e34c4cd0c5..b3bcf01483f7 100644
> > > --- a/src/libcamera/pipeline/simple/converter.cpp
> > > +++ b/src/libcamera/pipeline/simple/converter.cpp
> > > @@ -103,11 +103,11 @@ int SimpleConverter::Stream::exportBuffers(unsigned int count,
> > >  
> > >  int SimpleConverter::Stream::start()
> > >  {
> > > -	int ret = m2m_->output()->importBuffers(inputBufferCount_);
> > > +	int ret = m2m_->output()->importBuffers(SIMPLE_BUFFER_SLOT_COUNT);
> > 
> > Shouldn't this be SIMPLE_INTERNAL_BUFFER_COUNT ? Overallocating is not
> > much of an issue I suppose.

Indeed. I was under the impression that we should always importBuffers() using
BUFFER_SLOT_COUNT, but now, after reading more code, I understand that's not
always the case (although this seems to be the only case, due to the presence of
the converter).

> > 
> > >  	if (ret < 0)
> > >  		return ret;
> > >  
> > > -	ret = m2m_->capture()->importBuffers(outputBufferCount_);
> > > +	ret = m2m_->capture()->importBuffers(SIMPLE_BUFFER_SLOT_COUNT);
> > >  	if (ret < 0) {
> > >  		stop();
> > >  		return ret;
> > > diff --git a/src/libcamera/pipeline/simple/converter.h b/src/libcamera/pipeline/simple/converter.h
> > > index 276a2a291c21..7e1d60674f62 100644
> > > --- a/src/libcamera/pipeline/simple/converter.h
> > > +++ b/src/libcamera/pipeline/simple/converter.h
> > > @@ -29,6 +29,9 @@ class SizeRange;
> > >  struct StreamConfiguration;
> > >  class V4L2M2MDevice;
> > >  
> > > +constexpr unsigned int SIMPLE_INTERNAL_BUFFER_COUNT = 5;
> > > +constexpr unsigned int SIMPLE_BUFFER_SLOT_COUNT = 5;
> > 
> > Let's name the variables kSimpleInternalBufferCount and
> > kSimpleBufferSlotCount, as that's the naming scheme we're moving to for
> > non-macro constants. Same comment elsewhere in this patch.
> > 
> > Those constants don't belong to converter.h. Could you turn them into
> > member constants of the SimplePipelineHandler class, as
> > kNumInternalBuffers (which btw should be removed) ? The number of buffer
> > slots can be passed as a parameter to SimpleConverter::start().
> > 
> > There's no stats or parameters here, and no IPA, so the situation is
> > different than for IPU3 and RkISP1. The number of internal buffers
> > should just be one more than the minimum number of buffers required by
> > the capture device, I don't think there's another requirement.

Plus one extra to have queued at the converter's 'output' node (which is its
input, confusingly)?

Thanks,
Nícolas

> > 
> > > +
> > >  class SimpleConverter
> > >  {
> > >  public:
> > > diff --git a/src/libcamera/pipeline/simple/simple.cpp b/src/libcamera/pipeline/simple/simple.cpp
> > > index 1c25a7344f5f..a1163eaf8be2 100644
> > > --- a/src/libcamera/pipeline/simple/simple.cpp
> > > +++ b/src/libcamera/pipeline/simple/simple.cpp
> > > @@ -803,12 +803,10 @@ int SimplePipelineHandler::start(Camera *camera, [[maybe_unused]] const ControlL
> > >  		 * When using the converter allocate a fixed number of internal
> > >  		 * buffers.
> > >  		 */
> > > -		ret = video->allocateBuffers(kNumInternalBuffers,
> > > +		ret = video->allocateBuffers(SIMPLE_INTERNAL_BUFFER_COUNT,
> > >  					     &data->converterBuffers_);
> > >  	} else {
> > > -		/* Otherwise, prepare for using buffers from the only stream. */
> > > -		Stream *stream = &data->streams_[0];
> > > -		ret = video->importBuffers(stream->configuration().bufferCount);
> > > +		ret = video->importBuffers(SIMPLE_BUFFER_SLOT_COUNT);
> > >  	}
> > >  	if (ret < 0)
> > >  		return ret;
> > > diff --git a/src/libcamera/pipeline/uvcvideo/uvcvideo.cpp b/src/libcamera/pipeline/uvcvideo/uvcvideo.cpp
> > > index fd39b3d3c72c..755949e7a59a 100644
> > > --- a/src/libcamera/pipeline/uvcvideo/uvcvideo.cpp
> > > +++ b/src/libcamera/pipeline/uvcvideo/uvcvideo.cpp
> > > @@ -91,6 +91,8 @@ private:
> > >  		return static_cast<UVCCameraData *>(
> > >  			PipelineHandler::cameraData(camera));
> > >  	}
> > > +
> > > +	static constexpr unsigned int UVC_BUFFER_SLOT_COUNT = 5;
> > >  };
> > >  
> > >  UVCCameraConfiguration::UVCCameraConfiguration(UVCCameraData *data)
> > > @@ -236,9 +238,8 @@ int PipelineHandlerUVC::exportFrameBuffers(Camera *camera,
> > >  int PipelineHandlerUVC::start(Camera *camera, [[maybe_unused]] const ControlList *controls)
> > >  {
> > >  	UVCCameraData *data = cameraData(camera);
> > > -	unsigned int count = data->stream_.configuration().bufferCount;
> > >  
> > > -	int ret = data->video_->importBuffers(count);
> > > +	int ret = data->video_->importBuffers(UVC_BUFFER_SLOT_COUNT);
> > 
> > For the uvc and vimc pipeline handlers, we have no internal buffers, so
> > it's quite easy. We should have 8 or 16 slots, as for other pipeline
> > handlers.
> > 
> > >  	if (ret < 0)
> > >  		return ret;
> > >  
> > > diff --git a/src/libcamera/pipeline/vimc/vimc.cpp b/src/libcamera/pipeline/vimc/vimc.cpp
> > > index e89d53182c6d..24ba743a946c 100644
> > > --- a/src/libcamera/pipeline/vimc/vimc.cpp
> > > +++ b/src/libcamera/pipeline/vimc/vimc.cpp
> > > @@ -102,6 +102,8 @@ private:
> > >  		return static_cast<VimcCameraData *>(
> > >  			PipelineHandler::cameraData(camera));
> > >  	}
> > > +
> > > +	static constexpr unsigned int VIMC_BUFFER_SLOT_COUNT = 5;
> > >  };
> > >  
> > >  namespace {
> > > @@ -312,9 +314,8 @@ int PipelineHandlerVimc::exportFrameBuffers(Camera *camera,
> > >  int PipelineHandlerVimc::start(Camera *camera, [[maybe_unused]] const ControlList *controls)
> > >  {
> > >  	VimcCameraData *data = cameraData(camera);
> > > -	unsigned int count = data->stream_.configuration().bufferCount;
> > >  
> > > -	int ret = data->video_->importBuffers(count);
> > > +	int ret = data->video_->importBuffers(VIMC_BUFFER_SLOT_COUNT);
> > >  	if (ret < 0)
> > >  		return ret;
> > >  
> > 
> > -- 
> > Regards,
> > 
> > Laurent Pinchart
Naushir Patuck Aug. 12, 2021, 11:32 a.m. UTC | #4
Hi Laurent and Nicolas,


On Mon, 2 Aug 2021 at 00:43, Laurent Pinchart <
laurent.pinchart@ideasonboard.com> wrote:

> Hi Nícolas,
>
> Thank you for the patch.
>
> On Thu, Jul 22, 2021 at 08:28:49PM -0300, Nícolas F. R. A. Prado wrote:
> > Pipelines have relied on bufferCount to decide on the number of buffers
> > to allocate internally through allocateBuffers() and on the number of
> > V4L2 buffer slots to reserve through importBuffers(). Instead, the
> > number of internal buffers should be the minimum required by the
> > algorithms to avoid wasting memory, and the number of V4L2 buffer slots
> > should overallocate to avoid thrashing dmabuf mappings.
> >
> > For now, just set them to constants and stop relying on bufferCount, to
> > allow for its removal.
> >
> > Signed-off-by: Nícolas F. R. A. Prado <nfraprado@collabora.com>
> > ---
> >
> > No changes in v7
> >
> > Changes in v6:
> > - Added pipeline name as prefix to each BUFFER_SLOT_COUNT and
> >   INTERNAL_BUFFER_COUNT constant
> >
> >  src/libcamera/pipeline/ipu3/imgu.cpp              | 12 ++++++------
> >  src/libcamera/pipeline/ipu3/imgu.h                |  5 ++++-
> >  src/libcamera/pipeline/ipu3/ipu3.cpp              |  9 +--------
> >  .../pipeline/raspberrypi/raspberrypi.cpp          | 15 +++++----------
> >  src/libcamera/pipeline/rkisp1/rkisp1.cpp          |  9 ++-------
> >  src/libcamera/pipeline/rkisp1/rkisp1_path.cpp     |  2 +-
> >  src/libcamera/pipeline/rkisp1/rkisp1_path.h       |  3 +++
> >  src/libcamera/pipeline/simple/converter.cpp       |  4 ++--
> >  src/libcamera/pipeline/simple/converter.h         |  3 +++
> >  src/libcamera/pipeline/simple/simple.cpp          |  6 ++----
> >  src/libcamera/pipeline/uvcvideo/uvcvideo.cpp      |  5 +++--
> >  src/libcamera/pipeline/vimc/vimc.cpp              |  5 +++--
> >  12 files changed, 35 insertions(+), 43 deletions(-)
>
> Given that some of the pipeline handlers will need more intrusive
> changes to address the comments below, you could split this with one
> patch per pipeline handler (or perhaps grouping the easy ones together).
>
> >
> > diff --git a/src/libcamera/pipeline/ipu3/imgu.cpp
> b/src/libcamera/pipeline/ipu3/imgu.cpp
> > index e955bc3456ba..f36e99dacbe7 100644
> > --- a/src/libcamera/pipeline/ipu3/imgu.cpp
> > +++ b/src/libcamera/pipeline/ipu3/imgu.cpp
> > @@ -593,22 +593,22 @@ int
> ImgUDevice::configureVideoDevice(V4L2VideoDevice *dev, unsigned int pad,
> >  /**
> >   * \brief Allocate buffers for all the ImgU video devices
> >   */
> > -int ImgUDevice::allocateBuffers(unsigned int bufferCount)
> > +int ImgUDevice::allocateBuffers()
> >  {
> >       /* Share buffers between CIO2 output and ImgU input. */
> > -     int ret = input_->importBuffers(bufferCount);
> > +     int ret = input_->importBuffers(IPU3_BUFFER_SLOT_COUNT);
> >       if (ret) {
> >               LOG(IPU3, Error) << "Failed to import ImgU input buffers";
> >               return ret;
> >       }
> >
> > -     ret = param_->allocateBuffers(bufferCount, &paramBuffers_);
> > +     ret = param_->allocateBuffers(IPU3_INTERNAL_BUFFER_COUNT,
> &paramBuffers_);
> >       if (ret < 0) {
> >               LOG(IPU3, Error) << "Failed to allocate ImgU param
> buffers";
> >               goto error;
> >       }
> >
> > -     ret = stat_->allocateBuffers(bufferCount, &statBuffers_);
> > +     ret = stat_->allocateBuffers(IPU3_INTERNAL_BUFFER_COUNT,
> &statBuffers_);
> >       if (ret < 0) {
> >               LOG(IPU3, Error) << "Failed to allocate ImgU stat buffers";
> >               goto error;
> > @@ -619,13 +619,13 @@ int ImgUDevice::allocateBuffers(unsigned int
> bufferCount)
> >        * corresponding stream is active or inactive, as the driver needs
> >        * buffers to be requested on the V4L2 devices in order to operate.
> >        */
> > -     ret = output_->importBuffers(bufferCount);
> > +     ret = output_->importBuffers(IPU3_BUFFER_SLOT_COUNT);
> >       if (ret < 0) {
> >               LOG(IPU3, Error) << "Failed to import ImgU output buffers";
> >               goto error;
> >       }
> >
> > -     ret = viewfinder_->importBuffers(bufferCount);
> > +     ret = viewfinder_->importBuffers(IPU3_BUFFER_SLOT_COUNT);
> >       if (ret < 0) {
> >               LOG(IPU3, Error) << "Failed to import ImgU viewfinder
> buffers";
> >               goto error;
> > diff --git a/src/libcamera/pipeline/ipu3/imgu.h
> b/src/libcamera/pipeline/ipu3/imgu.h
> > index 9d4915116087..f934a951fc75 100644
> > --- a/src/libcamera/pipeline/ipu3/imgu.h
> > +++ b/src/libcamera/pipeline/ipu3/imgu.h
> > @@ -61,7 +61,7 @@ public:
> >                                           outputFormat);
> >       }
> >
> > -     int allocateBuffers(unsigned int bufferCount);
> > +     int allocateBuffers();
> >       void freeBuffers();
> >
> >       int start();
> > @@ -86,6 +86,9 @@ private:
> >       static constexpr unsigned int PAD_VF = 3;
> >       static constexpr unsigned int PAD_STAT = 4;
> >
> > +     static constexpr unsigned int IPU3_INTERNAL_BUFFER_COUNT = 4;
> > +     static constexpr unsigned int IPU3_BUFFER_SLOT_COUNT = 5;
>
> 5 buffer slots is low. It means that if applications cycle more than 5
> buffers, the V4L2VideoDevice cache that maintains associations between
> dmabufs and buffer slots will the trashed. Due to the internal queue of
> requests in the IPU3 pipeline handler (similar to what you have
> implemented in "[PATCH 0/3] libcamera: pipeline: Add internal request
> queue" for other pipeline handlers), we won't fail at queuing requests,
> but performance will suffer. I thus think we need to increase the number
> of slots to what applications can be reasonably expected to use. We
> could use 8, or even 16, as buffer slots are cheap. The same holds for
> other pipeline handlers.
>
> The number of slots for the CIO2 output should match the number of
> buffer slots for the ImgU input, as the same buffers are used on the two
> video devices. One option is to use IPU3_BUFFER_SLOT_COUNT for the CIO2,
> instead of CIO2_BUFFER_COUNT. However, the number of internal CIO2
> buffers that are allocated by exportBuffers() in CIO2Device::start(), to
> be used in case the application doesn't provide any RAW buffer, should
> be lower, as those are real buffer and are thus expensive. The number of
> buffers and buffer slots on the CIO2 thus needs to be decoupled.
>
> For proper operation, the CIO2 will require at least two queued buffers
> (one being DMA'ed to, and one waiting). We need at least one extra
> buffer queued to the ImgU to keep buffers flowing. Depending on
> processing timings, it may be that the ImgU will complete processing of
> its buffer before the CIO2 captures the next one, leading to a temporary
> situation where the CIO2 will have three buffers queued, or the CIO2
> will finish the capture first, leading to a temporary situation where
> the CIO2 will have one buffer queued and the ImgU will have two buffers
> queued. In either case, shortly afterwards, the other component will
> complete capture or processing, and we'll get back to a situation with
> two buffers queued in the CIO2 and one in the ImgU. That's thus a
> minimum of three buffers for raw images.
>
> From an ImgU point of view, we could probably get away with a single
> parameter and a single stats buffer. This would however not allow
> queuing the next frame for processing in the ImgU before the current
> frame completes, so two buffers would be better. Now, if we take the IPA
> into account, the statistics buffer will spend some time on the IPA side
> for processing. It would thus be best to have an extra statistics buffer
> to accommodate that, thus requiring three statistics buffers (and three
> parameters buffers, as we associate them together).
>
> This rationale leads to using the same number of internal buffers for
> the CIO2, the parameters and the statistics. We currently use four, and
> while the logic above indicates we could get away with three, it would
> be safer to keep using four in this patch, and possibly reduce the
> number of buffers later.
>
> I know documentation isn't fun, but I think this rationale should be
> captured in a comment in the IPU3 pipeline handler, along with a \todo
> item to try and lower the number of internal buffers to three.
>
> > +
> >       int linkSetup(const std::string &source, unsigned int sourcePad,
> >                     const std::string &sink, unsigned int sinkPad,
> >                     bool enable);
> > diff --git a/src/libcamera/pipeline/ipu3/ipu3.cpp
> b/src/libcamera/pipeline/ipu3/ipu3.cpp
> > index 5fd1757bfe13..4efd201c05e5 100644
> > --- a/src/libcamera/pipeline/ipu3/ipu3.cpp
> > +++ b/src/libcamera/pipeline/ipu3/ipu3.cpp
> > @@ -681,16 +681,9 @@ int PipelineHandlerIPU3::allocateBuffers(Camera
> *camera)
> >  {
> >       IPU3CameraData *data = cameraData(camera);
> >       ImgUDevice *imgu = data->imgu_;
> > -     unsigned int bufferCount;
> >       int ret;
> >
> > -     bufferCount = std::max({
> > -             data->outStream_.configuration().bufferCount,
> > -             data->vfStream_.configuration().bufferCount,
> > -             data->rawStream_.configuration().bufferCount,
> > -     });
> > -
> > -     ret = imgu->allocateBuffers(bufferCount);
> > +     ret = imgu->allocateBuffers();
> >       if (ret < 0)
> >               return ret;
> >
> > diff --git a/src/libcamera/pipeline/raspberrypi/raspberrypi.cpp
> b/src/libcamera/pipeline/raspberrypi/raspberrypi.cpp
> > index d1cd3d9dc082..776e0f92aed1 100644
> > --- a/src/libcamera/pipeline/raspberrypi/raspberrypi.cpp
> > +++ b/src/libcamera/pipeline/raspberrypi/raspberrypi.cpp
> > @@ -1149,20 +1149,15 @@ int PipelineHandlerRPi::prepareBuffers(Camera
> *camera)
> >  {
> >       RPiCameraData *data = cameraData(camera);
> >       int ret;
> > +     constexpr unsigned int bufferCount = 4;
> >
> >       /*
> > -      * Decide how many internal buffers to allocate. For now, simply
> look
> > -      * at how many external buffers will be provided. We'll need to
> improve
> > -      * this logic. However, we really must have all streams allocate
> the same
> > -      * number of buffers to simplify error handling in
> queueRequestDevice().
> > +      * Allocate internal buffers. We really must have all streams
> allocate
> > +      * the same number of buffers to simplify error handling in
> > +      * queueRequestDevice().
> >        */
> > -     unsigned int maxBuffers = 0;
> > -     for (const Stream *s : camera->streams())
> > -             if (static_cast<const RPi::Stream *>(s)->isExternal())
> > -                     maxBuffers = std::max(maxBuffers,
> s->configuration().bufferCount);
> > -
> >       for (auto const stream : data->streams_) {
> > -             ret = stream->prepareBuffers(maxBuffers);
> > +             ret = stream->prepareBuffers(bufferCount);
>
> We have a similar problem here, 4 buffer slots is too little, but when
> the stream has to allocate internal buffers (!importOnly), which is the
> case for most streams, we don't want to overallocate.
>
> I'd like to get feedback from Naush here, but I think this means we'll
> have to relax the requirement documented in the comment above, and
> accept a different number of buffers for each stream.
>

Sorry for the late reply to this thread!

As is evident from the above comment, this bit of code does need to be
improved
to avoid over-applications which I will get to at some point. However, to
address this
change and the comments, 4 buffer slots sounds like it might be too
little.  Regarding
the requirement on having streams allocate the same number of buffers -
that can be
relaxed (and the comment removed) as we do handle it correctly.

Regards,
Naush



>
> >               if (ret < 0)
> >                       return ret;
> >       }
> > diff --git a/src/libcamera/pipeline/rkisp1/rkisp1.cpp
> b/src/libcamera/pipeline/rkisp1/rkisp1.cpp
> > index 11325875b929..f4ea2fd4d4d0 100644
> > --- a/src/libcamera/pipeline/rkisp1/rkisp1.cpp
> > +++ b/src/libcamera/pipeline/rkisp1/rkisp1.cpp
> > @@ -690,16 +690,11 @@ int PipelineHandlerRkISP1::allocateBuffers(Camera
> *camera)
> >       unsigned int ipaBufferId = 1;
> >       int ret;
> >
> > -     unsigned int maxCount = std::max({
> > -             data->mainPathStream_.configuration().bufferCount,
> > -             data->selfPathStream_.configuration().bufferCount,
> > -     });
> > -
> > -     ret = param_->allocateBuffers(maxCount, &paramBuffers_);
> > +     ret = param_->allocateBuffers(RKISP1_INTERNAL_BUFFER_COUNT,
> &paramBuffers_);
> >       if (ret < 0)
> >               goto error;
> >
> > -     ret = stat_->allocateBuffers(maxCount, &statBuffers_);
> > +     ret = stat_->allocateBuffers(RKISP1_INTERNAL_BUFFER_COUNT,
> &statBuffers_);
> >       if (ret < 0)
> >               goto error;
> >
> > diff --git a/src/libcamera/pipeline/rkisp1/rkisp1_path.cpp
> b/src/libcamera/pipeline/rkisp1/rkisp1_path.cpp
> > index 25f482eb8d8e..fea330f72886 100644
> > --- a/src/libcamera/pipeline/rkisp1/rkisp1_path.cpp
> > +++ b/src/libcamera/pipeline/rkisp1/rkisp1_path.cpp
> > @@ -172,7 +172,7 @@ int RkISP1Path::start()
> >               return -EBUSY;
> >
> >       /* \todo Make buffer count user configurable. */
> > -     ret = video_->importBuffers(RKISP1_BUFFER_COUNT);
> > +     ret = video_->importBuffers(RKISP1_BUFFER_SLOT_COUNT);
> >       if (ret)
> >               return ret;
> >
> > diff --git a/src/libcamera/pipeline/rkisp1/rkisp1_path.h
> b/src/libcamera/pipeline/rkisp1/rkisp1_path.h
> > index 91757600ccdc..3c5891009c58 100644
> > --- a/src/libcamera/pipeline/rkisp1/rkisp1_path.h
> > +++ b/src/libcamera/pipeline/rkisp1/rkisp1_path.h
> > @@ -27,6 +27,9 @@ class V4L2Subdevice;
> >  struct StreamConfiguration;
> >  struct V4L2SubdeviceFormat;
> >
> > +static constexpr unsigned int RKISP1_INTERNAL_BUFFER_COUNT = 4;
> > +static constexpr unsigned int RKISP1_BUFFER_SLOT_COUNT = 5;
>
> The situation should be simpler for the rkisp1, as it has a different
> pipeline model (inline ISP as opposed to offline ISP for the IPU3). We
> can allocate more slots (8 or 16, as for other pipeline handlers), and
> restrict the number of internal buffers (for stats and parameters) to
> the number of requests we expect to queue to the device at once, plus
> one for the IPA.  Four thus seems good. Capturing this rationale in a
> comment would be good too.
>
> BTW, I may be too tired to think properly, or just unable to see the
> obvious, so please challenge any rationale you think is incorrect.
>
> > +
> >  class RkISP1Path
> >  {
> >  public:
> > diff --git a/src/libcamera/pipeline/simple/converter.cpp
> b/src/libcamera/pipeline/simple/converter.cpp
> > index b5e34c4cd0c5..b3bcf01483f7 100644
> > --- a/src/libcamera/pipeline/simple/converter.cpp
> > +++ b/src/libcamera/pipeline/simple/converter.cpp
> > @@ -103,11 +103,11 @@ int
> SimpleConverter::Stream::exportBuffers(unsigned int count,
> >
> >  int SimpleConverter::Stream::start()
> >  {
> > -     int ret = m2m_->output()->importBuffers(inputBufferCount_);
> > +     int ret = m2m_->output()->importBuffers(SIMPLE_BUFFER_SLOT_COUNT);
>
> Shouldn't this be SIMPLE_INTERNAL_BUFFER_COUNT ? Overallocating is not
> much of an issue I suppose.
>
> >       if (ret < 0)
> >               return ret;
> >
> > -     ret = m2m_->capture()->importBuffers(outputBufferCount_);
> > +     ret = m2m_->capture()->importBuffers(SIMPLE_BUFFER_SLOT_COUNT);
> >       if (ret < 0) {
> >               stop();
> >               return ret;
> > diff --git a/src/libcamera/pipeline/simple/converter.h
> b/src/libcamera/pipeline/simple/converter.h
> > index 276a2a291c21..7e1d60674f62 100644
> > --- a/src/libcamera/pipeline/simple/converter.h
> > +++ b/src/libcamera/pipeline/simple/converter.h
> > @@ -29,6 +29,9 @@ class SizeRange;
> >  struct StreamConfiguration;
> >  class V4L2M2MDevice;
> >
> > +constexpr unsigned int SIMPLE_INTERNAL_BUFFER_COUNT = 5;
> > +constexpr unsigned int SIMPLE_BUFFER_SLOT_COUNT = 5;
>
> Let's name the variables kSimpleInternalBufferCount and
> kSimpleBufferSlotCount, as that's the naming scheme we're moving to for
> non-macro constants. Same comment elsewhere in this patch.
>
> Those constants don't belong to converter.h. Could you turn them into
> member constants of the SimplePipelineHandler class, as
> kNumInternalBuffers (which btw should be removed) ? The number of buffer
> slots can be passed as a parameter to SimpleConverter::start().
>
> There's no stats or parameters here, and no IPA, so the situation is
> different than for IPU3 and RkISP1. The number of internal buffers
> should just be one more than the minimum number of buffers required by
> the capture device, I don't think there's another requirement.
>
> > +
> >  class SimpleConverter
> >  {
> >  public:
> > diff --git a/src/libcamera/pipeline/simple/simple.cpp
> b/src/libcamera/pipeline/simple/simple.cpp
> > index 1c25a7344f5f..a1163eaf8be2 100644
> > --- a/src/libcamera/pipeline/simple/simple.cpp
> > +++ b/src/libcamera/pipeline/simple/simple.cpp
> > @@ -803,12 +803,10 @@ int SimplePipelineHandler::start(Camera *camera,
> [[maybe_unused]] const ControlL
> >                * When using the converter allocate a fixed number of
> internal
> >                * buffers.
> >                */
> > -             ret = video->allocateBuffers(kNumInternalBuffers,
> > +             ret = video->allocateBuffers(SIMPLE_INTERNAL_BUFFER_COUNT,
> >                                            &data->converterBuffers_);
> >       } else {
> > -             /* Otherwise, prepare for using buffers from the only
> stream. */
> > -             Stream *stream = &data->streams_[0];
> > -             ret =
> video->importBuffers(stream->configuration().bufferCount);
> > +             ret = video->importBuffers(SIMPLE_BUFFER_SLOT_COUNT);
> >       }
> >       if (ret < 0)
> >               return ret;
> > diff --git a/src/libcamera/pipeline/uvcvideo/uvcvideo.cpp
> b/src/libcamera/pipeline/uvcvideo/uvcvideo.cpp
> > index fd39b3d3c72c..755949e7a59a 100644
> > --- a/src/libcamera/pipeline/uvcvideo/uvcvideo.cpp
> > +++ b/src/libcamera/pipeline/uvcvideo/uvcvideo.cpp
> > @@ -91,6 +91,8 @@ private:
> >               return static_cast<UVCCameraData *>(
> >                       PipelineHandler::cameraData(camera));
> >       }
> > +
> > +     static constexpr unsigned int UVC_BUFFER_SLOT_COUNT = 5;
> >  };
> >
> >  UVCCameraConfiguration::UVCCameraConfiguration(UVCCameraData *data)
> > @@ -236,9 +238,8 @@ int PipelineHandlerUVC::exportFrameBuffers(Camera
> *camera,
> >  int PipelineHandlerUVC::start(Camera *camera, [[maybe_unused]] const
> ControlList *controls)
> >  {
> >       UVCCameraData *data = cameraData(camera);
> > -     unsigned int count = data->stream_.configuration().bufferCount;
> >
> > -     int ret = data->video_->importBuffers(count);
> > +     int ret = data->video_->importBuffers(UVC_BUFFER_SLOT_COUNT);
>
> For the uvc and vimc pipeline handlers, we have no internal buffers, so
> it's quite easy. We should have 8 or 16 slots, as for other pipeline
> handlers.
>
> >       if (ret < 0)
> >               return ret;
> >
> > diff --git a/src/libcamera/pipeline/vimc/vimc.cpp
> b/src/libcamera/pipeline/vimc/vimc.cpp
> > index e89d53182c6d..24ba743a946c 100644
> > --- a/src/libcamera/pipeline/vimc/vimc.cpp
> > +++ b/src/libcamera/pipeline/vimc/vimc.cpp
> > @@ -102,6 +102,8 @@ private:
> >               return static_cast<VimcCameraData *>(
> >                       PipelineHandler::cameraData(camera));
> >       }
> > +
> > +     static constexpr unsigned int VIMC_BUFFER_SLOT_COUNT = 5;
> >  };
> >
> >  namespace {
> > @@ -312,9 +314,8 @@ int PipelineHandlerVimc::exportFrameBuffers(Camera
> *camera,
> >  int PipelineHandlerVimc::start(Camera *camera, [[maybe_unused]] const
> ControlList *controls)
> >  {
> >       VimcCameraData *data = cameraData(camera);
> > -     unsigned int count = data->stream_.configuration().bufferCount;
> >
> > -     int ret = data->video_->importBuffers(count);
> > +     int ret = data->video_->importBuffers(VIMC_BUFFER_SLOT_COUNT);
> >       if (ret < 0)
> >               return ret;
> >
>
> --
> Regards,
>
> Laurent Pinchart
>
Laurent Pinchart Aug. 17, 2021, 12:21 a.m. UTC | #5
Hi Naush,

On Thu, Aug 12, 2021 at 12:32:28PM +0100, Naushir Patuck wrote:
> On Mon, 2 Aug 2021 at 00:43, Laurent Pinchart wrote:
> > On Thu, Jul 22, 2021 at 08:28:49PM -0300, Nícolas F. R. A. Prado wrote:
> > > Pipelines have relied on bufferCount to decide on the number of buffers
> > > to allocate internally through allocateBuffers() and on the number of
> > > V4L2 buffer slots to reserve through importBuffers(). Instead, the
> > > number of internal buffers should be the minimum required by the
> > > algorithms to avoid wasting memory, and the number of V4L2 buffer slots
> > > should overallocate to avoid thrashing dmabuf mappings.
> > >
> > > For now, just set them to constants and stop relying on bufferCount, to
> > > allow for its removal.
> > >
> > > Signed-off-by: Nícolas F. R. A. Prado <nfraprado@collabora.com>
> > > ---
> > >
> > > No changes in v7
> > >
> > > Changes in v6:
> > > - Added pipeline name as prefix to each BUFFER_SLOT_COUNT and
> > >   INTERNAL_BUFFER_COUNT constant
> > >
> > >  src/libcamera/pipeline/ipu3/imgu.cpp              | 12 ++++++------
> > >  src/libcamera/pipeline/ipu3/imgu.h                |  5 ++++-
> > >  src/libcamera/pipeline/ipu3/ipu3.cpp              |  9 +--------
> > >  .../pipeline/raspberrypi/raspberrypi.cpp          | 15 +++++----------
> > >  src/libcamera/pipeline/rkisp1/rkisp1.cpp          |  9 ++-------
> > >  src/libcamera/pipeline/rkisp1/rkisp1_path.cpp     |  2 +-
> > >  src/libcamera/pipeline/rkisp1/rkisp1_path.h       |  3 +++
> > >  src/libcamera/pipeline/simple/converter.cpp       |  4 ++--
> > >  src/libcamera/pipeline/simple/converter.h         |  3 +++
> > >  src/libcamera/pipeline/simple/simple.cpp          |  6 ++----
> > >  src/libcamera/pipeline/uvcvideo/uvcvideo.cpp      |  5 +++--
> > >  src/libcamera/pipeline/vimc/vimc.cpp              |  5 +++--
> > >  12 files changed, 35 insertions(+), 43 deletions(-)
> >
> > Given that some of the pipeline handlers will need more intrusive
> > changes to address the comments below, you could split this with one
> > patch per pipeline handler (or perhaps grouping the easy ones together).
> >
> > > diff --git a/src/libcamera/pipeline/ipu3/imgu.cpp b/src/libcamera/pipeline/ipu3/imgu.cpp
> > > index e955bc3456ba..f36e99dacbe7 100644
> > > --- a/src/libcamera/pipeline/ipu3/imgu.cpp
> > > +++ b/src/libcamera/pipeline/ipu3/imgu.cpp
> > > @@ -593,22 +593,22 @@ int ImgUDevice::configureVideoDevice(V4L2VideoDevice *dev, unsigned int pad,
> > >  /**
> > >   * \brief Allocate buffers for all the ImgU video devices
> > >   */
> > > -int ImgUDevice::allocateBuffers(unsigned int bufferCount)
> > > +int ImgUDevice::allocateBuffers()
> > >  {
> > >       /* Share buffers between CIO2 output and ImgU input. */
> > > -     int ret = input_->importBuffers(bufferCount);
> > > +     int ret = input_->importBuffers(IPU3_BUFFER_SLOT_COUNT);
> > >       if (ret) {
> > >               LOG(IPU3, Error) << "Failed to import ImgU input buffers";
> > >               return ret;
> > >       }
> > >
> > > -     ret = param_->allocateBuffers(bufferCount, &paramBuffers_);
> > > +     ret = param_->allocateBuffers(IPU3_INTERNAL_BUFFER_COUNT, &paramBuffers_);
> > >       if (ret < 0) {
> > >               LOG(IPU3, Error) << "Failed to allocate ImgU param buffers";
> > >               goto error;
> > >       }
> > >
> > > -     ret = stat_->allocateBuffers(bufferCount, &statBuffers_);
> > > +     ret = stat_->allocateBuffers(IPU3_INTERNAL_BUFFER_COUNT, &statBuffers_);
> > >       if (ret < 0) {
> > >               LOG(IPU3, Error) << "Failed to allocate ImgU stat buffers";
> > >               goto error;
> > > @@ -619,13 +619,13 @@ int ImgUDevice::allocateBuffers(unsigned int bufferCount)
> > >        * corresponding stream is active or inactive, as the driver needs
> > >        * buffers to be requested on the V4L2 devices in order to operate.
> > >        */
> > > -     ret = output_->importBuffers(bufferCount);
> > > +     ret = output_->importBuffers(IPU3_BUFFER_SLOT_COUNT);
> > >       if (ret < 0) {
> > >               LOG(IPU3, Error) << "Failed to import ImgU output buffers";
> > >               goto error;
> > >       }
> > >
> > > -     ret = viewfinder_->importBuffers(bufferCount);
> > > +     ret = viewfinder_->importBuffers(IPU3_BUFFER_SLOT_COUNT);
> > >       if (ret < 0) {
> > >               LOG(IPU3, Error) << "Failed to import ImgU viewfinder buffers";
> > >               goto error;
> > > diff --git a/src/libcamera/pipeline/ipu3/imgu.h b/src/libcamera/pipeline/ipu3/imgu.h
> > > index 9d4915116087..f934a951fc75 100644
> > > --- a/src/libcamera/pipeline/ipu3/imgu.h
> > > +++ b/src/libcamera/pipeline/ipu3/imgu.h
> > > @@ -61,7 +61,7 @@ public:
> > >                                           outputFormat);
> > >       }
> > >
> > > -     int allocateBuffers(unsigned int bufferCount);
> > > +     int allocateBuffers();
> > >       void freeBuffers();
> > >
> > >       int start();
> > > @@ -86,6 +86,9 @@ private:
> > >       static constexpr unsigned int PAD_VF = 3;
> > >       static constexpr unsigned int PAD_STAT = 4;
> > >
> > > +     static constexpr unsigned int IPU3_INTERNAL_BUFFER_COUNT = 4;
> > > +     static constexpr unsigned int IPU3_BUFFER_SLOT_COUNT = 5;
> >
> > 5 buffer slots is low. It means that if applications cycle more than 5
> > buffers, the V4L2VideoDevice cache that maintains associations between
> > dmabufs and buffer slots will the trashed. Due to the internal queue of
> > requests in the IPU3 pipeline handler (similar to what you have
> > implemented in "[PATCH 0/3] libcamera: pipeline: Add internal request
> > queue" for other pipeline handlers), we won't fail at queuing requests,
> > but performance will suffer. I thus think we need to increase the number
> > of slots to what applications can be reasonably expected to use. We
> > could use 8, or even 16, as buffer slots are cheap. The same holds for
> > other pipeline handlers.
> >
> > The number of slots for the CIO2 output should match the number of
> > buffer slots for the ImgU input, as the same buffers are used on the two
> > video devices. One option is to use IPU3_BUFFER_SLOT_COUNT for the CIO2,
> > instead of CIO2_BUFFER_COUNT. However, the number of internal CIO2
> > buffers that are allocated by exportBuffers() in CIO2Device::start(), to
> > be used in case the application doesn't provide any RAW buffer, should
> > be lower, as those are real buffer and are thus expensive. The number of
> > buffers and buffer slots on the CIO2 thus needs to be decoupled.
> >
> > For proper operation, the CIO2 will require at least two queued buffers
> > (one being DMA'ed to, and one waiting). We need at least one extra
> > buffer queued to the ImgU to keep buffers flowing. Depending on
> > processing timings, it may be that the ImgU will complete processing of
> > its buffer before the CIO2 captures the next one, leading to a temporary
> > situation where the CIO2 will have three buffers queued, or the CIO2
> > will finish the capture first, leading to a temporary situation where
> > the CIO2 will have one buffer queued and the ImgU will have two buffers
> > queued. In either case, shortly afterwards, the other component will
> > complete capture or processing, and we'll get back to a situation with
> > two buffers queued in the CIO2 and one in the ImgU. That's thus a
> > minimum of three buffers for raw images.
> >
> > From an ImgU point of view, we could probably get away with a single
> > parameter and a single stats buffer. This would however not allow
> > queuing the next frame for processing in the ImgU before the current
> > frame completes, so two buffers would be better. Now, if we take the IPA
> > into account, the statistics buffer will spend some time on the IPA side
> > for processing. It would thus be best to have an extra statistics buffer
> > to accommodate that, thus requiring three statistics buffers (and three
> > parameters buffers, as we associate them together).
> >
> > This rationale leads to using the same number of internal buffers for
> > the CIO2, the parameters and the statistics. We currently use four, and
> > while the logic above indicates we could get away with three, it would
> > be safer to keep using four in this patch, and possibly reduce the
> > number of buffers later.
> >
> > I know documentation isn't fun, but I think this rationale should be
> > captured in a comment in the IPU3 pipeline handler, along with a \todo
> > item to try and lower the number of internal buffers to three.
> >
> > > +
> > >       int linkSetup(const std::string &source, unsigned int sourcePad,
> > >                     const std::string &sink, unsigned int sinkPad,
> > >                     bool enable);
> > > diff --git a/src/libcamera/pipeline/ipu3/ipu3.cpp b/src/libcamera/pipeline/ipu3/ipu3.cpp
> > > index 5fd1757bfe13..4efd201c05e5 100644
> > > --- a/src/libcamera/pipeline/ipu3/ipu3.cpp
> > > +++ b/src/libcamera/pipeline/ipu3/ipu3.cpp
> > > @@ -681,16 +681,9 @@ int PipelineHandlerIPU3::allocateBuffers(Camera *camera)
> > >  {
> > >       IPU3CameraData *data = cameraData(camera);
> > >       ImgUDevice *imgu = data->imgu_;
> > > -     unsigned int bufferCount;
> > >       int ret;
> > >
> > > -     bufferCount = std::max({
> > > -             data->outStream_.configuration().bufferCount,
> > > -             data->vfStream_.configuration().bufferCount,
> > > -             data->rawStream_.configuration().bufferCount,
> > > -     });
> > > -
> > > -     ret = imgu->allocateBuffers(bufferCount);
> > > +     ret = imgu->allocateBuffers();
> > >       if (ret < 0)
> > >               return ret;
> > >
> > > diff --git a/src/libcamera/pipeline/raspberrypi/raspberrypi.cpp b/src/libcamera/pipeline/raspberrypi/raspberrypi.cpp
> > > index d1cd3d9dc082..776e0f92aed1 100644
> > > --- a/src/libcamera/pipeline/raspberrypi/raspberrypi.cpp
> > > +++ b/src/libcamera/pipeline/raspberrypi/raspberrypi.cpp
> > > @@ -1149,20 +1149,15 @@ int PipelineHandlerRPi::prepareBuffers(Camera *camera)
> > >  {
> > >       RPiCameraData *data = cameraData(camera);
> > >       int ret;
> > > +     constexpr unsigned int bufferCount = 4;
> > >
> > >       /*
> > > -      * Decide how many internal buffers to allocate. For now, simply look
> > > -      * at how many external buffers will be provided. We'll need to improve
> > > -      * this logic. However, we really must have all streams allocate the same
> > > -      * number of buffers to simplify error handling in queueRequestDevice().
> > > +      * Allocate internal buffers. We really must have all streams allocate
> > > +      * the same number of buffers to simplify error handling in
> > > +      * queueRequestDevice().
> > >        */
> > > -     unsigned int maxBuffers = 0;
> > > -     for (const Stream *s : camera->streams())
> > > -             if (static_cast<const RPi::Stream *>(s)->isExternal())
> > > -                     maxBuffers = std::max(maxBuffers, s->configuration().bufferCount);
> > > -
> > >       for (auto const stream : data->streams_) {
> > > -             ret = stream->prepareBuffers(maxBuffers);
> > > +             ret = stream->prepareBuffers(bufferCount);
> >
> > We have a similar problem here, 4 buffer slots is too little, but when
> > the stream has to allocate internal buffers (!importOnly), which is the
> > case for most streams, we don't want to overallocate.
> >
> > I'd like to get feedback from Naush here, but I think this means we'll
> > have to relax the requirement documented in the comment above, and
> > accept a different number of buffers for each stream.
> 
> Sorry for the late reply to this thread!
> 
> As is evident from the above comment, this bit of code does need to be improved
> to avoid over-applications which I will get to at some point. However, to address this
> change and the comments, 4 buffer slots sounds like it might be too little.  Regarding
> the requirement on having streams allocate the same number of buffers - that can be
> relaxed (and the comment removed) as we do handle it correctly.

Thanks for the information. I understand that this means that we can
drop the comment and have different numbers of buffers for different
streams without any other change to the pipeline handler. If that's
incorrect, please let me know.

> > >               if (ret < 0)
> > >                       return ret;
> > >       }
> > > diff --git a/src/libcamera/pipeline/rkisp1/rkisp1.cpp b/src/libcamera/pipeline/rkisp1/rkisp1.cpp
> > > index 11325875b929..f4ea2fd4d4d0 100644
> > > --- a/src/libcamera/pipeline/rkisp1/rkisp1.cpp
> > > +++ b/src/libcamera/pipeline/rkisp1/rkisp1.cpp
> > > @@ -690,16 +690,11 @@ int PipelineHandlerRkISP1::allocateBuffers(Camera *camera)
> > >       unsigned int ipaBufferId = 1;
> > >       int ret;
> > >
> > > -     unsigned int maxCount = std::max({
> > > -             data->mainPathStream_.configuration().bufferCount,
> > > -             data->selfPathStream_.configuration().bufferCount,
> > > -     });
> > > -
> > > -     ret = param_->allocateBuffers(maxCount, &paramBuffers_);
> > > +     ret = param_->allocateBuffers(RKISP1_INTERNAL_BUFFER_COUNT, &paramBuffers_);
> > >       if (ret < 0)
> > >               goto error;
> > >
> > > -     ret = stat_->allocateBuffers(maxCount, &statBuffers_);
> > > +     ret = stat_->allocateBuffers(RKISP1_INTERNAL_BUFFER_COUNT, &statBuffers_);
> > >       if (ret < 0)
> > >               goto error;
> > >
> > > diff --git a/src/libcamera/pipeline/rkisp1/rkisp1_path.cpp b/src/libcamera/pipeline/rkisp1/rkisp1_path.cpp
> > > index 25f482eb8d8e..fea330f72886 100644
> > > --- a/src/libcamera/pipeline/rkisp1/rkisp1_path.cpp
> > > +++ b/src/libcamera/pipeline/rkisp1/rkisp1_path.cpp
> > > @@ -172,7 +172,7 @@ int RkISP1Path::start()
> > >               return -EBUSY;
> > >
> > >       /* \todo Make buffer count user configurable. */
> > > -     ret = video_->importBuffers(RKISP1_BUFFER_COUNT);
> > > +     ret = video_->importBuffers(RKISP1_BUFFER_SLOT_COUNT);
> > >       if (ret)
> > >               return ret;
> > >
> > > diff --git a/src/libcamera/pipeline/rkisp1/rkisp1_path.h b/src/libcamera/pipeline/rkisp1/rkisp1_path.h
> > > index 91757600ccdc..3c5891009c58 100644
> > > --- a/src/libcamera/pipeline/rkisp1/rkisp1_path.h
> > > +++ b/src/libcamera/pipeline/rkisp1/rkisp1_path.h
> > > @@ -27,6 +27,9 @@ class V4L2Subdevice;
> > >  struct StreamConfiguration;
> > >  struct V4L2SubdeviceFormat;
> > >
> > > +static constexpr unsigned int RKISP1_INTERNAL_BUFFER_COUNT = 4;
> > > +static constexpr unsigned int RKISP1_BUFFER_SLOT_COUNT = 5;
> >
> > The situation should be simpler for the rkisp1, as it has a different
> > pipeline model (inline ISP as opposed to offline ISP for the IPU3). We
> > can allocate more slots (8 or 16, as for other pipeline handlers), and
> > restrict the number of internal buffers (for stats and parameters) to
> > the number of requests we expect to queue to the device at once, plus
> > one for the IPA.  Four thus seems good. Capturing this rationale in a
> > comment would be good too.
> >
> > BTW, I may be too tired to think properly, or just unable to see the
> > obvious, so please challenge any rationale you think is incorrect.
> >
> > > +
> > >  class RkISP1Path
> > >  {
> > >  public:
> > > diff --git a/src/libcamera/pipeline/simple/converter.cpp b/src/libcamera/pipeline/simple/converter.cpp
> > > index b5e34c4cd0c5..b3bcf01483f7 100644
> > > --- a/src/libcamera/pipeline/simple/converter.cpp
> > > +++ b/src/libcamera/pipeline/simple/converter.cpp
> > > @@ -103,11 +103,11 @@ int SimpleConverter::Stream::exportBuffers(unsigned int count,
> > >
> > >  int SimpleConverter::Stream::start()
> > >  {
> > > -     int ret = m2m_->output()->importBuffers(inputBufferCount_);
> > > +     int ret = m2m_->output()->importBuffers(SIMPLE_BUFFER_SLOT_COUNT);
> >
> > Shouldn't this be SIMPLE_INTERNAL_BUFFER_COUNT ? Overallocating is not
> > much of an issue I suppose.
> >
> > >       if (ret < 0)
> > >               return ret;
> > >
> > > -     ret = m2m_->capture()->importBuffers(outputBufferCount_);
> > > +     ret = m2m_->capture()->importBuffers(SIMPLE_BUFFER_SLOT_COUNT);
> > >       if (ret < 0) {
> > >               stop();
> > >               return ret;
> > > diff --git a/src/libcamera/pipeline/simple/converter.h b/src/libcamera/pipeline/simple/converter.h
> > > index 276a2a291c21..7e1d60674f62 100644
> > > --- a/src/libcamera/pipeline/simple/converter.h
> > > +++ b/src/libcamera/pipeline/simple/converter.h
> > > @@ -29,6 +29,9 @@ class SizeRange;
> > >  struct StreamConfiguration;
> > >  class V4L2M2MDevice;
> > >
> > > +constexpr unsigned int SIMPLE_INTERNAL_BUFFER_COUNT = 5;
> > > +constexpr unsigned int SIMPLE_BUFFER_SLOT_COUNT = 5;
> >
> > Let's name the variables kSimpleInternalBufferCount and
> > kSimpleBufferSlotCount, as that's the naming scheme we're moving to for
> > non-macro constants. Same comment elsewhere in this patch.
> >
> > Those constants don't belong to converter.h. Could you turn them into
> > member constants of the SimplePipelineHandler class, as
> > kNumInternalBuffers (which btw should be removed) ? The number of buffer
> > slots can be passed as a parameter to SimpleConverter::start().
> >
> > There's no stats or parameters here, and no IPA, so the situation is
> > different than for IPU3 and RkISP1. The number of internal buffers
> > should just be one more than the minimum number of buffers required by
> > the capture device, I don't think there's another requirement.
> >
> > > +
> > >  class SimpleConverter
> > >  {
> > >  public:
> > > diff --git a/src/libcamera/pipeline/simple/simple.cpp b/src/libcamera/pipeline/simple/simple.cpp
> > > index 1c25a7344f5f..a1163eaf8be2 100644
> > > --- a/src/libcamera/pipeline/simple/simple.cpp
> > > +++ b/src/libcamera/pipeline/simple/simple.cpp
> > > @@ -803,12 +803,10 @@ int SimplePipelineHandler::start(Camera *camera, [[maybe_unused]] const ControlL
> > >                * When using the converter allocate a fixed number of internal
> > >                * buffers.
> > >                */
> > > -             ret = video->allocateBuffers(kNumInternalBuffers,
> > > +             ret = video->allocateBuffers(SIMPLE_INTERNAL_BUFFER_COUNT,
> > >                                            &data->converterBuffers_);
> > >       } else {
> > > -             /* Otherwise, prepare for using buffers from the only stream. */
> > > -             Stream *stream = &data->streams_[0];
> > > -             ret = video->importBuffers(stream->configuration().bufferCount);
> > > +             ret = video->importBuffers(SIMPLE_BUFFER_SLOT_COUNT);
> > >       }
> > >       if (ret < 0)
> > >               return ret;
> > > diff --git a/src/libcamera/pipeline/uvcvideo/uvcvideo.cpp b/src/libcamera/pipeline/uvcvideo/uvcvideo.cpp
> > > index fd39b3d3c72c..755949e7a59a 100644
> > > --- a/src/libcamera/pipeline/uvcvideo/uvcvideo.cpp
> > > +++ b/src/libcamera/pipeline/uvcvideo/uvcvideo.cpp
> > > @@ -91,6 +91,8 @@ private:
> > >               return static_cast<UVCCameraData *>(
> > >                       PipelineHandler::cameraData(camera));
> > >       }
> > > +
> > > +     static constexpr unsigned int UVC_BUFFER_SLOT_COUNT = 5;
> > >  };
> > >
> > >  UVCCameraConfiguration::UVCCameraConfiguration(UVCCameraData *data)
> > > @@ -236,9 +238,8 @@ int PipelineHandlerUVC::exportFrameBuffers(Camera *camera,
> > >  int PipelineHandlerUVC::start(Camera *camera, [[maybe_unused]] const ControlList *controls)
> > >  {
> > >       UVCCameraData *data = cameraData(camera);
> > > -     unsigned int count = data->stream_.configuration().bufferCount;
> > >
> > > -     int ret = data->video_->importBuffers(count);
> > > +     int ret = data->video_->importBuffers(UVC_BUFFER_SLOT_COUNT);
> >
> > For the uvc and vimc pipeline handlers, we have no internal buffers, so
> > it's quite easy. We should have 8 or 16 slots, as for other pipeline
> > handlers.
> >
> > >       if (ret < 0)
> > >               return ret;
> > >
> > > diff --git a/src/libcamera/pipeline/vimc/vimc.cpp b/src/libcamera/pipeline/vimc/vimc.cpp
> > > index e89d53182c6d..24ba743a946c 100644
> > > --- a/src/libcamera/pipeline/vimc/vimc.cpp
> > > +++ b/src/libcamera/pipeline/vimc/vimc.cpp
> > > @@ -102,6 +102,8 @@ private:
> > >               return static_cast<VimcCameraData *>(
> > >                       PipelineHandler::cameraData(camera));
> > >       }
> > > +
> > > +     static constexpr unsigned int VIMC_BUFFER_SLOT_COUNT = 5;
> > >  };
> > >
> > >  namespace {
> > > @@ -312,9 +314,8 @@ int PipelineHandlerVimc::exportFrameBuffers(Camera *camera,
> > >  int PipelineHandlerVimc::start(Camera *camera, [[maybe_unused]] const ControlList *controls)
> > >  {
> > >       VimcCameraData *data = cameraData(camera);
> > > -     unsigned int count = data->stream_.configuration().bufferCount;
> > >
> > > -     int ret = data->video_->importBuffers(count);
> > > +     int ret = data->video_->importBuffers(VIMC_BUFFER_SLOT_COUNT);
> > >       if (ret < 0)
> > >               return ret;
> > >
Laurent Pinchart Aug. 17, 2021, 2:33 a.m. UTC | #6
Hi Nícolas,

On Mon, Aug 09, 2021 at 05:26:46PM -0300, Nícolas F. R. A. Prado wrote:
> On Sat, Aug 07, 2021 at 12:03:52PM -0300, Nícolas F. R. A. Prado wrote:
> > On Mon, Aug 02, 2021 at 02:42:53AM +0300, Laurent Pinchart wrote:
> > > On Thu, Jul 22, 2021 at 08:28:49PM -0300, Nícolas F. R. A. Prado wrote:
> > > > Pipelines have relied on bufferCount to decide on the number of buffers
> > > > to allocate internally through allocateBuffers() and on the number of
> > > > V4L2 buffer slots to reserve through importBuffers(). Instead, the
> > > > number of internal buffers should be the minimum required by the
> > > > algorithms to avoid wasting memory, and the number of V4L2 buffer slots
> > > > should overallocate to avoid thrashing dmabuf mappings.
> > > > 
> > > > For now, just set them to constants and stop relying on bufferCount, to
> > > > allow for its removal.
> > > > 
> > > > Signed-off-by: Nícolas F. R. A. Prado <nfraprado@collabora.com>
> > > > ---
> > > > 
> > > > No changes in v7
> > > > 
> > > > Changes in v6:
> > > > - Added pipeline name as prefix to each BUFFER_SLOT_COUNT and
> > > >   INTERNAL_BUFFER_COUNT constant
> > > > 
> > > >  src/libcamera/pipeline/ipu3/imgu.cpp              | 12 ++++++------
> > > >  src/libcamera/pipeline/ipu3/imgu.h                |  5 ++++-
> > > >  src/libcamera/pipeline/ipu3/ipu3.cpp              |  9 +--------
> > > >  .../pipeline/raspberrypi/raspberrypi.cpp          | 15 +++++----------
> > > >  src/libcamera/pipeline/rkisp1/rkisp1.cpp          |  9 ++-------
> > > >  src/libcamera/pipeline/rkisp1/rkisp1_path.cpp     |  2 +-
> > > >  src/libcamera/pipeline/rkisp1/rkisp1_path.h       |  3 +++
> > > >  src/libcamera/pipeline/simple/converter.cpp       |  4 ++--
> > > >  src/libcamera/pipeline/simple/converter.h         |  3 +++
> > > >  src/libcamera/pipeline/simple/simple.cpp          |  6 ++----
> > > >  src/libcamera/pipeline/uvcvideo/uvcvideo.cpp      |  5 +++--
> > > >  src/libcamera/pipeline/vimc/vimc.cpp              |  5 +++--
> > > >  12 files changed, 35 insertions(+), 43 deletions(-)
> > > 
> > > Given that some of the pipeline handlers will need more intrusive
> > > changes to address the comments below, you could split this with one
> > > patch per pipeline handler (or perhaps grouping the easy ones together).
> > > 
> > > > 
> > > > diff --git a/src/libcamera/pipeline/ipu3/imgu.cpp b/src/libcamera/pipeline/ipu3/imgu.cpp
> > > > index e955bc3456ba..f36e99dacbe7 100644
> > > > --- a/src/libcamera/pipeline/ipu3/imgu.cpp
> > > > +++ b/src/libcamera/pipeline/ipu3/imgu.cpp
> > > > @@ -593,22 +593,22 @@ int ImgUDevice::configureVideoDevice(V4L2VideoDevice *dev, unsigned int pad,
> > > >  /**
> > > >   * \brief Allocate buffers for all the ImgU video devices
> > > >   */
> > > > -int ImgUDevice::allocateBuffers(unsigned int bufferCount)
> > > > +int ImgUDevice::allocateBuffers()
> > > >  {
> > > >  	/* Share buffers between CIO2 output and ImgU input. */
> > > > -	int ret = input_->importBuffers(bufferCount);
> > > > +	int ret = input_->importBuffers(IPU3_BUFFER_SLOT_COUNT);
> > > >  	if (ret) {
> > > >  		LOG(IPU3, Error) << "Failed to import ImgU input buffers";
> > > >  		return ret;
> > > >  	}
> > > >  
> > > > -	ret = param_->allocateBuffers(bufferCount, &paramBuffers_);
> > > > +	ret = param_->allocateBuffers(IPU3_INTERNAL_BUFFER_COUNT, &paramBuffers_);
> > > >  	if (ret < 0) {
> > > >  		LOG(IPU3, Error) << "Failed to allocate ImgU param buffers";
> > > >  		goto error;
> > > >  	}
> > > >  
> > > > -	ret = stat_->allocateBuffers(bufferCount, &statBuffers_);
> > > > +	ret = stat_->allocateBuffers(IPU3_INTERNAL_BUFFER_COUNT, &statBuffers_);
> > > >  	if (ret < 0) {
> > > >  		LOG(IPU3, Error) << "Failed to allocate ImgU stat buffers";
> > > >  		goto error;
> > > > @@ -619,13 +619,13 @@ int ImgUDevice::allocateBuffers(unsigned int bufferCount)
> > > >  	 * corresponding stream is active or inactive, as the driver needs
> > > >  	 * buffers to be requested on the V4L2 devices in order to operate.
> > > >  	 */
> > > > -	ret = output_->importBuffers(bufferCount);
> > > > +	ret = output_->importBuffers(IPU3_BUFFER_SLOT_COUNT);
> > > >  	if (ret < 0) {
> > > >  		LOG(IPU3, Error) << "Failed to import ImgU output buffers";
> > > >  		goto error;
> > > >  	}
> > > >  
> > > > -	ret = viewfinder_->importBuffers(bufferCount);
> > > > +	ret = viewfinder_->importBuffers(IPU3_BUFFER_SLOT_COUNT);
> > > >  	if (ret < 0) {
> > > >  		LOG(IPU3, Error) << "Failed to import ImgU viewfinder buffers";
> > > >  		goto error;
> > > > diff --git a/src/libcamera/pipeline/ipu3/imgu.h b/src/libcamera/pipeline/ipu3/imgu.h
> > > > index 9d4915116087..f934a951fc75 100644
> > > > --- a/src/libcamera/pipeline/ipu3/imgu.h
> > > > +++ b/src/libcamera/pipeline/ipu3/imgu.h
> > > > @@ -61,7 +61,7 @@ public:
> > > >  					    outputFormat);
> > > >  	}
> > > >  
> > > > -	int allocateBuffers(unsigned int bufferCount);
> > > > +	int allocateBuffers();
> > > >  	void freeBuffers();
> > > >  
> > > >  	int start();
> > > > @@ -86,6 +86,9 @@ private:
> > > >  	static constexpr unsigned int PAD_VF = 3;
> > > >  	static constexpr unsigned int PAD_STAT = 4;
> > > >  
> > > > +	static constexpr unsigned int IPU3_INTERNAL_BUFFER_COUNT = 4;
> > > > +	static constexpr unsigned int IPU3_BUFFER_SLOT_COUNT = 5;
> > > 
> > > 5 buffer slots is low. It means that if applications cycle more than 5
> > > buffers, the V4L2VideoDevice cache that maintains associations between
> > > dmabufs and buffer slots will the trashed. Due to the internal queue of
> > > requests in the IPU3 pipeline handler (similar to what you have
> > > implemented in "[PATCH 0/3] libcamera: pipeline: Add internal request
> > > queue" for other pipeline handlers), we won't fail at queuing requests,
> > > but performance will suffer. I thus think we need to increase the number
> > > of slots to what applications can be reasonably expected to use. We
> > > could use 8, or even 16, as buffer slots are cheap. The same holds for
> > > other pipeline handlers.
> > > 
> > > The number of slots for the CIO2 output should match the number of
> > > buffer slots for the ImgU input, as the same buffers are used on the two
> > > video devices. One option is to use IPU3_BUFFER_SLOT_COUNT for the CIO2,
> > > instead of CIO2_BUFFER_COUNT. However, the number of internal CIO2
> > > buffers that are allocated by exportBuffers() in CIO2Device::start(), to
> > > be used in case the application doesn't provide any RAW buffer, should
> > > be lower, as those are real buffer and are thus expensive. The number of
> > > buffers and buffer slots on the CIO2 thus needs to be decoupled.
> > > 
> > > For proper operation, the CIO2 will require at least two queued buffers
> > > (one being DMA'ed to, and one waiting). We need at least one extra
> > > buffer queued to the ImgU to keep buffers flowing. Depending on
> > > processing timings, it may be that the ImgU will complete processing of
> > > its buffer before the CIO2 captures the next one, leading to a temporary
> > > situation where the CIO2 will have three buffers queued, or the CIO2
> > > will finish the capture first, leading to a temporary situation where
> > > the CIO2 will have one buffer queued and the ImgU will have two buffers
> > > queued. In either case, shortly afterwards, the other component will
> > > complete capture or processing, and we'll get back to a situation with
> > > two buffers queued in the CIO2 and one in the ImgU. That's thus a
> > > minimum of three buffers for raw images.
> > > 
> > > From an ImgU point of view, we could probably get away with a single
> > > parameter and a single stats buffer. This would however not allow
> > > queuing the next frame for processing in the ImgU before the current
> > > frame completes, so two buffers would be better. Now, if we take the IPA
> > > into account, the statistics buffer will spend some time on the IPA side
> > > for processing. It would thus be best to have an extra statistics buffer
> > > to accommodate that, thus requiring three statistics buffers (and three
> > > parameters buffers, as we associate them together).
> > > 
> > > This rationale leads to using the same number of internal buffers for
> > > the CIO2, the parameters and the statistics. We currently use four, and
> > > while the logic above indicates we could get away with three, it would
> > > be safer to keep using four in this patch, and possibly reduce the
> > > number of buffers later.
> > > 
> > > I know documentation isn't fun, but I think this rationale should be
> > > captured in a comment in the IPU3 pipeline handler, along with a \todo
> > > item to try and lower the number of internal buffers to three.
> > 
> > This is the IPU3 topology as I understand it:
> > 
> >       Output  .               .   Input        Output .
> >       +---+   .               .   +---+        +---+  .
> >       |   | --------------------> |   |        |   |  .
> >       +---+   .               .   +---+        +---+  .
> > CIO2          .   IPA         .          ImgU         .          IPA
> >               .        Param  .   Param        Stat   .   Stat
> >               .        +---+  .   +---+        +---+  .   +---+ 
> >               .        |   | ---> |   |        |   | ---> |   | 
> >               .        +---+  .   +---+        +---+  .   +---+ 
> >           
> > Your suggestions for the minimum number of buffers required are the following,
> > from what I understand:
> > 
> > CIO2 raw internal buffers:
> > - 2x on CIO2 Output (one being DMA'ed, one waiting)
> > - 1x on ImgU Input
> > 
> > ImgU Param/Stat internal buffers:
> > - 2x on ImgU Param/Stat (one being processed, one waiting)
> > - 1x on IPA Stat
> > 
> > This arrangement doesn't seem to take into account that IPU3Frames::Info binds
> > CIO2 internal buffers and ImgU Param/Stat buffers together. This means that each
> > raw buffer queued to CIO2 Output needs a Param/Stat buffer as well. And each
> > Param/Stat buffer queued to ImgU for processing needs a CIO2 raw buffer as well.
> > After ImgU processing though, the raw buffer gets released and reused, so the
> > Stat buffer queued to the IPA does not require a CIO2 raw buffer.
> > 
> > This means that to achieve the above minimum, due to the IPU3Frames::Info
> > constraint, we'd actually need:
> > 
> > CIO2 internal buffers:
> > - 2x on CIO2 Output (one being DMA'ed, one waiting)
> > - 2x on ImgU Input (for the two ImgU Param/Stat buffers we want to have there)
> > 
> > ImgU Param/Stat internal buffers:
> > - 2x on CIO2 Output (for the two CIO2 raw buffers we want to have there)
> > - 2x on ImgU Param/Stat (one being processed, one waiting)

Note that the need to have two buffers here is to ensure back-to-back
processing of frame on the ImgU and thus avoid delays, but this need
actually depends on how fast the ImgU is. With a very fast ImgU
(compared to the frame duration), inter-frame delays may not be an
issue. There's more on this below.

> > - 1x on IPA Stat

Processing of the statistics can occur after the corresponding raw image
buffer has been requeued to the CIO2, the only hard requirement is that
the buffer needs to be available by the time the ImgU will process the
corresponding raw frame buffer again.

> > Also we're not accounting for parameter filling in the IPA before we queue the
> > buffers to ImgU, but perhaps that's fast enough that it doesn't matter?

That's one of the questions we need to answer, I don't think we have
numbers at this time. If filling the parameters buffer takes a
significant amount of time, then that would need to be taken into
account as an additional step in the pipeline, with an additional set of
buffers.

> > Does this make sense? Or am I missing something?

One thing that you make not have taken into account is that the two
buffers queued on the CIO2 output and the two buffers queued on the ImgU
are not necessarily queued at the same time. I'll try to explain.

On the CIO2 side, we have a strong real time requirement to always keep
the CIO2 fed with buffers. The details depend a bit on the hardware and
driver implementations, but the base idea is that once a buffer is
complete and the time comes to move to the next buffer for the next
frame, there has to be a next buffer available. When exactly this occurs
can vary. Some drivers will give the buffer for the next frame to the
device when capture for the current frame starts, and some will give it
when the hardware signals completion of the capture of the current frame
(frame end). In theory this could be delayed even a bit more, but it has
to happen before the hardware needs the new buffer, and giving it when
the DMA completes is often too risky already as vertical blanking can be
short and interrupts can be delayed a bit. I tried to check the driver
to see what the exact requirement is, but I'm not familiar with the
hardware and the code is not very easy to follow.

Note that frame start is the time when the first pixel of the frame is
written to memory, and frame end the time when the last pixel of the
frame is written to memory. The end of frame N and the start of frame
N+1 are separated by the vertical blanking time.

Let's assume that the CIO2 needs to be programmed with the buffer for
frame N+1 at the start of frame N (Edit: I've written all the
explanation below based on this assumption, but after further
investigation, I *think* the CIO2 only requires the buffer for frame N+1
at the beginning of frame N+1, but the driver enforces that the buffer
must be present just before the start of frame N to avoid race
conditions - just before the start of frame N and at the start of frame
N at practically speaking the same thing. Sakari, do you know if this is
correct ?). We'll constantly transition between the following states,
from the CIO2 point of view.

0. (Initial state) 2x idle buffers in the queue, hardware stopped. The
   CIO2 is then started, the first buffer in the queue is given to the
   device to capture the first frame, and the second buffer in the queue
   is given to the device to capture the second frame. The first frame
   starts.

1. 1x active buffer being DMA'ed to, 1x pending buffer already given to
   the hardware for the next frame, 0x idle buffers in the queue. Two
   events can occur at this point, either completion of the current
   frame (-> 2), or a new buffer being queued by userspace (-> 4).

2. 0x active buffer beind DMA'ed to, 1x pending buffer already given to
   the hardware for the next frame, 0x idle buffers in the queue. Two
   events can occur at this point, either start of the next frame (->
   3), or a new buffer being queued by userspace (-> 5).

   This state lasts for the duration of the vertical blanking only, and
   can thus be short-lived.

3. The next frame start. The pending buffer becomes active. We have no
   buffer in the queue to give to the hardware for the next frame. An
   underrun has occurred, a frame will be dropped. Game over.

4. 1x active buffer being DMA'ed to, 1x pending buffer already given to
   the hardware for the next frame, 1x idle buffers in the queue. The
   next event that will occur is the start of the next frame (as the
   other option, a new buffer being queued, will give us additional
   safety by increasing the number of queued buffers, but isn't
   meaningful when considering the case where we try to run with the
   minimum number of buffers possible).

   As the current frame ends, the active buffer is given back to the
   userspace. There's no active buffer (the DMA will start soon, after
   the vertical blanking, when the next frame starts), the pending
   buffer stays pending, and the idle buffer stays idle (-> 5).

5. 0x active buffer beind DMA'ed to, 1x pending buffer already given to
   the hardware for the next frame, 1x idle buffers in the queue. The
   next event that will occur is the start of the next frame (for the
   same reason as in 4).

   As the next frame starts, the pending buffer becomes active. The
   queue buffer is given to the hardware for the subsequent frame. The
   queue of idle buffers become empty (-> 1).

   If this state is reached from state 2, it lasts for the remaining of
   the vertical blanking only. If it is reached from state 4, it lasts
   for the whole vertical blanking. In both cases, it can be
   short-lived.

We can thus cycle either through 1 -> 2 -> 5 -> 1 or through 1 -> 4 -> 5
-> 1. The first cycle requires two buffers for the CIO2, with an
intermediate state (2) that has a single buffer only. This is unsafe, as
a failure to queue a second buffer in the short-lived state 2 will lead
to state 3 and frame drops.

The second cycle requires three buffers for the CIO2. This is the cycle
we want to use, to avoid frame drops. Note that only state 4 requires
all three buffers, and userspace can queue the third buffer at any point
in state 1 (before the end of the current frame). If userspace queues
the frame slightly too late, after the completion of the current frame
but before the start of the next one, we'll go to the unsafe cycle but
will still not lose frames.

Now, let's look at the ImgU side, and assume we use three buffers in
total. The ImgU operates from memory to memory, it thus has no realtime
requirement. It only starts processing a frame when the frame is given
to it. This occurs, from a CIO2 point of view, in the transition from
state 4 to state 5, plus all delays introduced by delivering the CIO2
frame completion event to userspace, queueing the frame to the ImgU (I'm
ignoring the IPA here), and starting the ImgU itself. The ImgU
processing time will, on average, be lower than the frame duration,
otherwise it won't be able to process all frames. Once the ImgU
completes processing of the frame, it will signal this to userspace.
There's also a processing delay there (signalling, task switching, ...),
and userspace will requeue the frame to the CIO2. This has to occur at
the latest before the end of the current frame, otherwise state 1 will
transition to state 2.

We thus see that, in the 3 buffers case, we need to ensure that the
total time to process the frame on the ImgU, from the CIO2 interrupt
signalling the end of state 4 to the buffer being requeued to the CIO2,
thus including all task switching and other delays, doesn't exceed the
duration of states 5 + 1, which is equal to the duration of a frame. The
ImgU processing time itself is guaranteed to be lower than that, but the
additional delays may be problematic. We also need to include a possible
round-trip to the IPA after end of buffer capture by the CIO2 and start
of processing by the ImgU to retrieve the ImgU parameters for the frame.
Three buffers start sounding quite risky. I'm thus correcting myself,
hour buffers seem safer.

None of this takes the parameters or statistics buffers into account,
but I don't think they're particularly problematic in the sense that the
most strict realtime constraints come from the raw image buffers. Feel
free to prove me wrong though :-)

Let's however note that we can probably fetch the ImgU parameters for
the frame that has just been captured before the end of the frame, so
that would remove a delay in the ImgU processing. This assumes that the
algorithms wouldn't need to know the exact exposure time and analog gain
that have been used to capture the current frame in order to compute the
ImgU parameters. This leads to a first question to David: does the
Raspberry Pi IPA require the sensor metadata to calculate ISP
parameters, or are they needed only when processing statistics from
frame N to calculate sensor and ISP parameters of subsequent frames ?

The next question is for everybody (and that's why I've expanded the CC
list to Kieran, Jean-Michel and Sakari too): what did I get wrong in the
above explanation ? :-)

> > > > +
> > > >  	int linkSetup(const std::string &source, unsigned int sourcePad,
> > > >  		      const std::string &sink, unsigned int sinkPad,
> > > >  		      bool enable);
> > > > diff --git a/src/libcamera/pipeline/ipu3/ipu3.cpp b/src/libcamera/pipeline/ipu3/ipu3.cpp
> > > > index 5fd1757bfe13..4efd201c05e5 100644
> > > > --- a/src/libcamera/pipeline/ipu3/ipu3.cpp
> > > > +++ b/src/libcamera/pipeline/ipu3/ipu3.cpp
> > > > @@ -681,16 +681,9 @@ int PipelineHandlerIPU3::allocateBuffers(Camera *camera)
> > > >  {
> > > >  	IPU3CameraData *data = cameraData(camera);
> > > >  	ImgUDevice *imgu = data->imgu_;
> > > > -	unsigned int bufferCount;
> > > >  	int ret;
> > > >  
> > > > -	bufferCount = std::max({
> > > > -		data->outStream_.configuration().bufferCount,
> > > > -		data->vfStream_.configuration().bufferCount,
> > > > -		data->rawStream_.configuration().bufferCount,
> > > > -	});
> > > > -
> > > > -	ret = imgu->allocateBuffers(bufferCount);
> > > > +	ret = imgu->allocateBuffers();
> > > >  	if (ret < 0)
> > > >  		return ret;
> > > >  
> > > > diff --git a/src/libcamera/pipeline/raspberrypi/raspberrypi.cpp b/src/libcamera/pipeline/raspberrypi/raspberrypi.cpp
> > > > index d1cd3d9dc082..776e0f92aed1 100644
> > > > --- a/src/libcamera/pipeline/raspberrypi/raspberrypi.cpp
> > > > +++ b/src/libcamera/pipeline/raspberrypi/raspberrypi.cpp
> > > > @@ -1149,20 +1149,15 @@ int PipelineHandlerRPi::prepareBuffers(Camera *camera)
> > > >  {
> > > >  	RPiCameraData *data = cameraData(camera);
> > > >  	int ret;
> > > > +	constexpr unsigned int bufferCount = 4;
> > > >  
> > > >  	/*
> > > > -	 * Decide how many internal buffers to allocate. For now, simply look
> > > > -	 * at how many external buffers will be provided. We'll need to improve
> > > > -	 * this logic. However, we really must have all streams allocate the same
> > > > -	 * number of buffers to simplify error handling in queueRequestDevice().
> > > > +	 * Allocate internal buffers. We really must have all streams allocate
> > > > +	 * the same number of buffers to simplify error handling in
> > > > +	 * queueRequestDevice().
> > > >  	 */
> > > > -	unsigned int maxBuffers = 0;
> > > > -	for (const Stream *s : camera->streams())
> > > > -		if (static_cast<const RPi::Stream *>(s)->isExternal())
> > > > -			maxBuffers = std::max(maxBuffers, s->configuration().bufferCount);
> > > > -
> > > >  	for (auto const stream : data->streams_) {
> > > > -		ret = stream->prepareBuffers(maxBuffers);
> > > > +		ret = stream->prepareBuffers(bufferCount);
> > > 
> > > We have a similar problem here, 4 buffer slots is too little, but when
> > > the stream has to allocate internal buffers (!importOnly), which is the
> > > case for most streams, we don't want to overallocate.
> > > 
> > > I'd like to get feedback from Naush here, but I think this means we'll
> > > have to relax the requirement documented in the comment above, and
> > > accept a different number of buffers for each stream.
> > > 
> > > >  		if (ret < 0)
> > > >  			return ret;
> > > >  	}
> > > > diff --git a/src/libcamera/pipeline/rkisp1/rkisp1.cpp b/src/libcamera/pipeline/rkisp1/rkisp1.cpp
> > > > index 11325875b929..f4ea2fd4d4d0 100644
> > > > --- a/src/libcamera/pipeline/rkisp1/rkisp1.cpp
> > > > +++ b/src/libcamera/pipeline/rkisp1/rkisp1.cpp
> > > > @@ -690,16 +690,11 @@ int PipelineHandlerRkISP1::allocateBuffers(Camera *camera)
> > > >  	unsigned int ipaBufferId = 1;
> > > >  	int ret;
> > > >  
> > > > -	unsigned int maxCount = std::max({
> > > > -		data->mainPathStream_.configuration().bufferCount,
> > > > -		data->selfPathStream_.configuration().bufferCount,
> > > > -	});
> > > > -
> > > > -	ret = param_->allocateBuffers(maxCount, &paramBuffers_);
> > > > +	ret = param_->allocateBuffers(RKISP1_INTERNAL_BUFFER_COUNT, &paramBuffers_);
> > > >  	if (ret < 0)
> > > >  		goto error;
> > > >  
> > > > -	ret = stat_->allocateBuffers(maxCount, &statBuffers_);
> > > > +	ret = stat_->allocateBuffers(RKISP1_INTERNAL_BUFFER_COUNT, &statBuffers_);
> > > >  	if (ret < 0)
> > > >  		goto error;
> > > >  
> > > > diff --git a/src/libcamera/pipeline/rkisp1/rkisp1_path.cpp b/src/libcamera/pipeline/rkisp1/rkisp1_path.cpp
> > > > index 25f482eb8d8e..fea330f72886 100644
> > > > --- a/src/libcamera/pipeline/rkisp1/rkisp1_path.cpp
> > > > +++ b/src/libcamera/pipeline/rkisp1/rkisp1_path.cpp
> > > > @@ -172,7 +172,7 @@ int RkISP1Path::start()
> > > >  		return -EBUSY;
> > > >  
> > > >  	/* \todo Make buffer count user configurable. */
> > > > -	ret = video_->importBuffers(RKISP1_BUFFER_COUNT);
> > > > +	ret = video_->importBuffers(RKISP1_BUFFER_SLOT_COUNT);
> > > >  	if (ret)
> > > >  		return ret;
> > > >  
> > > > diff --git a/src/libcamera/pipeline/rkisp1/rkisp1_path.h b/src/libcamera/pipeline/rkisp1/rkisp1_path.h
> > > > index 91757600ccdc..3c5891009c58 100644
> > > > --- a/src/libcamera/pipeline/rkisp1/rkisp1_path.h
> > > > +++ b/src/libcamera/pipeline/rkisp1/rkisp1_path.h
> > > > @@ -27,6 +27,9 @@ class V4L2Subdevice;
> > > >  struct StreamConfiguration;
> > > >  struct V4L2SubdeviceFormat;
> > > >  
> > > > +static constexpr unsigned int RKISP1_INTERNAL_BUFFER_COUNT = 4;
> > > > +static constexpr unsigned int RKISP1_BUFFER_SLOT_COUNT = 5;
> > > 
> > > The situation should be simpler for the rkisp1, as it has a different
> > > pipeline model (inline ISP as opposed to offline ISP for the IPU3). We
> > > can allocate more slots (8 or 16, as for other pipeline handlers), and
> > > restrict the number of internal buffers (for stats and parameters) to
> > > the number of requests we expect to queue to the device at once, plus
> > > one for the IPA.  Four thus seems good. Capturing this rationale in a
> > > comment would be good too.
> 
> Shouldn't we also have one extra buffer queued to the capture device, like for
> the others, totalling five (four on the capture, one on the IPA)? Or since the
> driver already requires three buffers the extra one isn't needed?
>
> I'm not sure how it works, but if the driver requires three buffers at all times
> to keep streaming, then I think we indeed should have the extra buffer to avoid
> dropping frames. Otherwise, if that requirement is only for starting the stream,
> then for drivers that require at least two buffers we shouldn't need an extra
> one, I'd think.

It seems to be only needed to start capture. Even then I think it could
be lowered to two buffers, I don't see anything in the driver that
requires three. Maybe someone from Collabora could comment on this ? And
maybe you could give it a try by modifying the driver ?

By the way, if you try to apply the CIO2 reasoning above to the RkISP1,
you will need to take into account the fact the the driver programs the
hardware with the buffer for frame N+1 not at the beginning of frame N,
but at the end of frame N-1.

I think four buffers is enough. We currently use four buffers and it
seems to work :-) Granted, the RkISP1 IPA is a skeleton, so this
argument isn't very strong, but given that the driver only needs two
buffers except at start time, four should be fine.

> > > BTW, I may be too tired to think properly, or just unable to see the
> > > obvious, so please challenge any rationale you think is incorrect.
> > > 
> > > > +
> > > >  class RkISP1Path
> > > >  {
> > > >  public:
> > > > diff --git a/src/libcamera/pipeline/simple/converter.cpp b/src/libcamera/pipeline/simple/converter.cpp
> > > > index b5e34c4cd0c5..b3bcf01483f7 100644
> > > > --- a/src/libcamera/pipeline/simple/converter.cpp
> > > > +++ b/src/libcamera/pipeline/simple/converter.cpp
> > > > @@ -103,11 +103,11 @@ int SimpleConverter::Stream::exportBuffers(unsigned int count,
> > > >  
> > > >  int SimpleConverter::Stream::start()
> > > >  {
> > > > -	int ret = m2m_->output()->importBuffers(inputBufferCount_);
> > > > +	int ret = m2m_->output()->importBuffers(SIMPLE_BUFFER_SLOT_COUNT);
> > > 
> > > Shouldn't this be SIMPLE_INTERNAL_BUFFER_COUNT ? Overallocating is not
> > > much of an issue I suppose.
> 
> Indeed. I was under the impression that we should always importBuffers() using
> BUFFER_SLOT_COUNT, but now, after reading more code, I understand that's not
> always the case (although this seems to be the only case, due to the presence of
> the converter).
> 
> > > >  	if (ret < 0)
> > > >  		return ret;
> > > >  
> > > > -	ret = m2m_->capture()->importBuffers(outputBufferCount_);
> > > > +	ret = m2m_->capture()->importBuffers(SIMPLE_BUFFER_SLOT_COUNT);
> > > >  	if (ret < 0) {
> > > >  		stop();
> > > >  		return ret;
> > > > diff --git a/src/libcamera/pipeline/simple/converter.h b/src/libcamera/pipeline/simple/converter.h
> > > > index 276a2a291c21..7e1d60674f62 100644
> > > > --- a/src/libcamera/pipeline/simple/converter.h
> > > > +++ b/src/libcamera/pipeline/simple/converter.h
> > > > @@ -29,6 +29,9 @@ class SizeRange;
> > > >  struct StreamConfiguration;
> > > >  class V4L2M2MDevice;
> > > >  
> > > > +constexpr unsigned int SIMPLE_INTERNAL_BUFFER_COUNT = 5;
> > > > +constexpr unsigned int SIMPLE_BUFFER_SLOT_COUNT = 5;
> > > 
> > > Let's name the variables kSimpleInternalBufferCount and
> > > kSimpleBufferSlotCount, as that's the naming scheme we're moving to for
> > > non-macro constants. Same comment elsewhere in this patch.
> > > 
> > > Those constants don't belong to converter.h. Could you turn them into
> > > member constants of the SimplePipelineHandler class, as
> > > kNumInternalBuffers (which btw should be removed) ? The number of buffer
> > > slots can be passed as a parameter to SimpleConverter::start().
> > > 
> > > There's no stats or parameters here, and no IPA, so the situation is
> > > different than for IPU3 and RkISP1. The number of internal buffers
> > > should just be one more than the minimum number of buffers required by
> > > the capture device, I don't think there's another requirement.
> 
> Plus one extra to have queued at the converter's 'output' node (which is its
> input, confusingly)?

It depends a bit on the exact timings of the capture device, as is
probably clear with the explanation above (or at least is now clearly
seen as a complicated topic :-)). We need to ensure that the realtime
requirements of the device are met, and that the capture buffers that
complete, and are then processed by the converter, will be requeued in
time to the capture device to meet those requirements.

As the simple pipeline handler deals with a variety of devices, we have
two options, either checking the requirements of each device and
recording them in the supportedDevices array, or pick a common number of
buffers that should be good enough for everybody. I'd start with the
second option for simplicity, and as the pipeline handler currently uses
3 buffers, I'd stick to that for now.

> > > > +
> > > >  class SimpleConverter
> > > >  {
> > > >  public:
> > > > diff --git a/src/libcamera/pipeline/simple/simple.cpp b/src/libcamera/pipeline/simple/simple.cpp
> > > > index 1c25a7344f5f..a1163eaf8be2 100644
> > > > --- a/src/libcamera/pipeline/simple/simple.cpp
> > > > +++ b/src/libcamera/pipeline/simple/simple.cpp
> > > > @@ -803,12 +803,10 @@ int SimplePipelineHandler::start(Camera *camera, [[maybe_unused]] const ControlL
> > > >  		 * When using the converter allocate a fixed number of internal
> > > >  		 * buffers.
> > > >  		 */
> > > > -		ret = video->allocateBuffers(kNumInternalBuffers,
> > > > +		ret = video->allocateBuffers(SIMPLE_INTERNAL_BUFFER_COUNT,
> > > >  					     &data->converterBuffers_);
> > > >  	} else {
> > > > -		/* Otherwise, prepare for using buffers from the only stream. */
> > > > -		Stream *stream = &data->streams_[0];
> > > > -		ret = video->importBuffers(stream->configuration().bufferCount);
> > > > +		ret = video->importBuffers(SIMPLE_BUFFER_SLOT_COUNT);
> > > >  	}
> > > >  	if (ret < 0)
> > > >  		return ret;
> > > > diff --git a/src/libcamera/pipeline/uvcvideo/uvcvideo.cpp b/src/libcamera/pipeline/uvcvideo/uvcvideo.cpp
> > > > index fd39b3d3c72c..755949e7a59a 100644
> > > > --- a/src/libcamera/pipeline/uvcvideo/uvcvideo.cpp
> > > > +++ b/src/libcamera/pipeline/uvcvideo/uvcvideo.cpp
> > > > @@ -91,6 +91,8 @@ private:
> > > >  		return static_cast<UVCCameraData *>(
> > > >  			PipelineHandler::cameraData(camera));
> > > >  	}
> > > > +
> > > > +	static constexpr unsigned int UVC_BUFFER_SLOT_COUNT = 5;
> > > >  };
> > > >  
> > > >  UVCCameraConfiguration::UVCCameraConfiguration(UVCCameraData *data)
> > > > @@ -236,9 +238,8 @@ int PipelineHandlerUVC::exportFrameBuffers(Camera *camera,
> > > >  int PipelineHandlerUVC::start(Camera *camera, [[maybe_unused]] const ControlList *controls)
> > > >  {
> > > >  	UVCCameraData *data = cameraData(camera);
> > > > -	unsigned int count = data->stream_.configuration().bufferCount;
> > > >  
> > > > -	int ret = data->video_->importBuffers(count);
> > > > +	int ret = data->video_->importBuffers(UVC_BUFFER_SLOT_COUNT);
> > > 
> > > For the uvc and vimc pipeline handlers, we have no internal buffers, so
> > > it's quite easy. We should have 8 or 16 slots, as for other pipeline
> > > handlers.
> > > 
> > > >  	if (ret < 0)
> > > >  		return ret;
> > > >  
> > > > diff --git a/src/libcamera/pipeline/vimc/vimc.cpp b/src/libcamera/pipeline/vimc/vimc.cpp
> > > > index e89d53182c6d..24ba743a946c 100644
> > > > --- a/src/libcamera/pipeline/vimc/vimc.cpp
> > > > +++ b/src/libcamera/pipeline/vimc/vimc.cpp
> > > > @@ -102,6 +102,8 @@ private:
> > > >  		return static_cast<VimcCameraData *>(
> > > >  			PipelineHandler::cameraData(camera));
> > > >  	}
> > > > +
> > > > +	static constexpr unsigned int VIMC_BUFFER_SLOT_COUNT = 5;
> > > >  };
> > > >  
> > > >  namespace {
> > > > @@ -312,9 +314,8 @@ int PipelineHandlerVimc::exportFrameBuffers(Camera *camera,
> > > >  int PipelineHandlerVimc::start(Camera *camera, [[maybe_unused]] const ControlList *controls)
> > > >  {
> > > >  	VimcCameraData *data = cameraData(camera);
> > > > -	unsigned int count = data->stream_.configuration().bufferCount;
> > > >  
> > > > -	int ret = data->video_->importBuffers(count);
> > > > +	int ret = data->video_->importBuffers(VIMC_BUFFER_SLOT_COUNT);
> > > >  	if (ret < 0)
> > > >  		return ret;
> > > >
Naushir Patuck Aug. 17, 2021, 6:47 a.m. UTC | #7
Hi Laurent,



On Tue, 17 Aug 2021, 1:21 am Laurent Pinchart, <
laurent.pinchart@ideasonboard.com> wrote:

> Hi Naush,
>
> On Thu, Aug 12, 2021 at 12:32:28PM +0100, Naushir Patuck wrote:
> > On Mon, 2 Aug 2021 at 00:43, Laurent Pinchart wrote:
> > > On Thu, Jul 22, 2021 at 08:28:49PM -0300, Nícolas F. R. A. Prado wrote:
> > > > Pipelines have relied on bufferCount to decide on the number of
> buffers
> > > > to allocate internally through allocateBuffers() and on the number of
> > > > V4L2 buffer slots to reserve through importBuffers(). Instead, the
> > > > number of internal buffers should be the minimum required by the
> > > > algorithms to avoid wasting memory, and the number of V4L2 buffer
> slots
> > > > should overallocate to avoid thrashing dmabuf mappings.
> > > >
> > > > For now, just set them to constants and stop relying on bufferCount,
> to
> > > > allow for its removal.
> > > >
> > > > Signed-off-by: Nícolas F. R. A. Prado <nfraprado@collabora.com>
> > > > ---
> > > >
> > > > No changes in v7
> > > >
> > > > Changes in v6:
> > > > - Added pipeline name as prefix to each BUFFER_SLOT_COUNT and
> > > >   INTERNAL_BUFFER_COUNT constant
> > > >
> > > >  src/libcamera/pipeline/ipu3/imgu.cpp              | 12 ++++++------
> > > >  src/libcamera/pipeline/ipu3/imgu.h                |  5 ++++-
> > > >  src/libcamera/pipeline/ipu3/ipu3.cpp              |  9 +--------
> > > >  .../pipeline/raspberrypi/raspberrypi.cpp          | 15
> +++++----------
> > > >  src/libcamera/pipeline/rkisp1/rkisp1.cpp          |  9 ++-------
> > > >  src/libcamera/pipeline/rkisp1/rkisp1_path.cpp     |  2 +-
> > > >  src/libcamera/pipeline/rkisp1/rkisp1_path.h       |  3 +++
> > > >  src/libcamera/pipeline/simple/converter.cpp       |  4 ++--
> > > >  src/libcamera/pipeline/simple/converter.h         |  3 +++
> > > >  src/libcamera/pipeline/simple/simple.cpp          |  6 ++----
> > > >  src/libcamera/pipeline/uvcvideo/uvcvideo.cpp      |  5 +++--
> > > >  src/libcamera/pipeline/vimc/vimc.cpp              |  5 +++--
> > > >  12 files changed, 35 insertions(+), 43 deletions(-)
> > >
> > > Given that some of the pipeline handlers will need more intrusive
> > > changes to address the comments below, you could split this with one
> > > patch per pipeline handler (or perhaps grouping the easy ones
> together).
> > >
> > > > diff --git a/src/libcamera/pipeline/ipu3/imgu.cpp
> b/src/libcamera/pipeline/ipu3/imgu.cpp
> > > > index e955bc3456ba..f36e99dacbe7 100644
> > > > --- a/src/libcamera/pipeline/ipu3/imgu.cpp
> > > > +++ b/src/libcamera/pipeline/ipu3/imgu.cpp
> > > > @@ -593,22 +593,22 @@ int
> ImgUDevice::configureVideoDevice(V4L2VideoDevice *dev, unsigned int pad,
> > > >  /**
> > > >   * \brief Allocate buffers for all the ImgU video devices
> > > >   */
> > > > -int ImgUDevice::allocateBuffers(unsigned int bufferCount)
> > > > +int ImgUDevice::allocateBuffers()
> > > >  {
> > > >       /* Share buffers between CIO2 output and ImgU input. */
> > > > -     int ret = input_->importBuffers(bufferCount);
> > > > +     int ret = input_->importBuffers(IPU3_BUFFER_SLOT_COUNT);
> > > >       if (ret) {
> > > >               LOG(IPU3, Error) << "Failed to import ImgU input
> buffers";
> > > >               return ret;
> > > >       }
> > > >
> > > > -     ret = param_->allocateBuffers(bufferCount, &paramBuffers_);
> > > > +     ret = param_->allocateBuffers(IPU3_INTERNAL_BUFFER_COUNT,
> &paramBuffers_);
> > > >       if (ret < 0) {
> > > >               LOG(IPU3, Error) << "Failed to allocate ImgU param
> buffers";
> > > >               goto error;
> > > >       }
> > > >
> > > > -     ret = stat_->allocateBuffers(bufferCount, &statBuffers_);
> > > > +     ret = stat_->allocateBuffers(IPU3_INTERNAL_BUFFER_COUNT,
> &statBuffers_);
> > > >       if (ret < 0) {
> > > >               LOG(IPU3, Error) << "Failed to allocate ImgU stat
> buffers";
> > > >               goto error;
> > > > @@ -619,13 +619,13 @@ int ImgUDevice::allocateBuffers(unsigned int
> bufferCount)
> > > >        * corresponding stream is active or inactive, as the driver
> needs
> > > >        * buffers to be requested on the V4L2 devices in order to
> operate.
> > > >        */
> > > > -     ret = output_->importBuffers(bufferCount);
> > > > +     ret = output_->importBuffers(IPU3_BUFFER_SLOT_COUNT);
> > > >       if (ret < 0) {
> > > >               LOG(IPU3, Error) << "Failed to import ImgU output
> buffers";
> > > >               goto error;
> > > >       }
> > > >
> > > > -     ret = viewfinder_->importBuffers(bufferCount);
> > > > +     ret = viewfinder_->importBuffers(IPU3_BUFFER_SLOT_COUNT);
> > > >       if (ret < 0) {
> > > >               LOG(IPU3, Error) << "Failed to import ImgU viewfinder
> buffers";
> > > >               goto error;
> > > > diff --git a/src/libcamera/pipeline/ipu3/imgu.h
> b/src/libcamera/pipeline/ipu3/imgu.h
> > > > index 9d4915116087..f934a951fc75 100644
> > > > --- a/src/libcamera/pipeline/ipu3/imgu.h
> > > > +++ b/src/libcamera/pipeline/ipu3/imgu.h
> > > > @@ -61,7 +61,7 @@ public:
> > > >                                           outputFormat);
> > > >       }
> > > >
> > > > -     int allocateBuffers(unsigned int bufferCount);
> > > > +     int allocateBuffers();
> > > >       void freeBuffers();
> > > >
> > > >       int start();
> > > > @@ -86,6 +86,9 @@ private:
> > > >       static constexpr unsigned int PAD_VF = 3;
> > > >       static constexpr unsigned int PAD_STAT = 4;
> > > >
> > > > +     static constexpr unsigned int IPU3_INTERNAL_BUFFER_COUNT = 4;
> > > > +     static constexpr unsigned int IPU3_BUFFER_SLOT_COUNT = 5;
> > >
> > > 5 buffer slots is low. It means that if applications cycle more than 5
> > > buffers, the V4L2VideoDevice cache that maintains associations between
> > > dmabufs and buffer slots will the trashed. Due to the internal queue of
> > > requests in the IPU3 pipeline handler (similar to what you have
> > > implemented in "[PATCH 0/3] libcamera: pipeline: Add internal request
> > > queue" for other pipeline handlers), we won't fail at queuing requests,
> > > but performance will suffer. I thus think we need to increase the
> number
> > > of slots to what applications can be reasonably expected to use. We
> > > could use 8, or even 16, as buffer slots are cheap. The same holds for
> > > other pipeline handlers.
> > >
> > > The number of slots for the CIO2 output should match the number of
> > > buffer slots for the ImgU input, as the same buffers are used on the
> two
> > > video devices. One option is to use IPU3_BUFFER_SLOT_COUNT for the
> CIO2,
> > > instead of CIO2_BUFFER_COUNT. However, the number of internal CIO2
> > > buffers that are allocated by exportBuffers() in CIO2Device::start(),
> to
> > > be used in case the application doesn't provide any RAW buffer, should
> > > be lower, as those are real buffer and are thus expensive. The number
> of
> > > buffers and buffer slots on the CIO2 thus needs to be decoupled.
> > >
> > > For proper operation, the CIO2 will require at least two queued buffers
> > > (one being DMA'ed to, and one waiting). We need at least one extra
> > > buffer queued to the ImgU to keep buffers flowing. Depending on
> > > processing timings, it may be that the ImgU will complete processing of
> > > its buffer before the CIO2 captures the next one, leading to a
> temporary
> > > situation where the CIO2 will have three buffers queued, or the CIO2
> > > will finish the capture first, leading to a temporary situation where
> > > the CIO2 will have one buffer queued and the ImgU will have two buffers
> > > queued. In either case, shortly afterwards, the other component will
> > > complete capture or processing, and we'll get back to a situation with
> > > two buffers queued in the CIO2 and one in the ImgU. That's thus a
> > > minimum of three buffers for raw images.
> > >
> > > From an ImgU point of view, we could probably get away with a single
> > > parameter and a single stats buffer. This would however not allow
> > > queuing the next frame for processing in the ImgU before the current
> > > frame completes, so two buffers would be better. Now, if we take the
> IPA
> > > into account, the statistics buffer will spend some time on the IPA
> side
> > > for processing. It would thus be best to have an extra statistics
> buffer
> > > to accommodate that, thus requiring three statistics buffers (and three
> > > parameters buffers, as we associate them together).
> > >
> > > This rationale leads to using the same number of internal buffers for
> > > the CIO2, the parameters and the statistics. We currently use four, and
> > > while the logic above indicates we could get away with three, it would
> > > be safer to keep using four in this patch, and possibly reduce the
> > > number of buffers later.
> > >
> > > I know documentation isn't fun, but I think this rationale should be
> > > captured in a comment in the IPU3 pipeline handler, along with a \todo
> > > item to try and lower the number of internal buffers to three.
> > >
> > > > +
> > > >       int linkSetup(const std::string &source, unsigned int
> sourcePad,
> > > >                     const std::string &sink, unsigned int sinkPad,
> > > >                     bool enable);
> > > > diff --git a/src/libcamera/pipeline/ipu3/ipu3.cpp
> b/src/libcamera/pipeline/ipu3/ipu3.cpp
> > > > index 5fd1757bfe13..4efd201c05e5 100644
> > > > --- a/src/libcamera/pipeline/ipu3/ipu3.cpp
> > > > +++ b/src/libcamera/pipeline/ipu3/ipu3.cpp
> > > > @@ -681,16 +681,9 @@ int PipelineHandlerIPU3::allocateBuffers(Camera
> *camera)
> > > >  {
> > > >       IPU3CameraData *data = cameraData(camera);
> > > >       ImgUDevice *imgu = data->imgu_;
> > > > -     unsigned int bufferCount;
> > > >       int ret;
> > > >
> > > > -     bufferCount = std::max({
> > > > -             data->outStream_.configuration().bufferCount,
> > > > -             data->vfStream_.configuration().bufferCount,
> > > > -             data->rawStream_.configuration().bufferCount,
> > > > -     });
> > > > -
> > > > -     ret = imgu->allocateBuffers(bufferCount);
> > > > +     ret = imgu->allocateBuffers();
> > > >       if (ret < 0)
> > > >               return ret;
> > > >
> > > > diff --git a/src/libcamera/pipeline/raspberrypi/raspberrypi.cpp
> b/src/libcamera/pipeline/raspberrypi/raspberrypi.cpp
> > > > index d1cd3d9dc082..776e0f92aed1 100644
> > > > --- a/src/libcamera/pipeline/raspberrypi/raspberrypi.cpp
> > > > +++ b/src/libcamera/pipeline/raspberrypi/raspberrypi.cpp
> > > > @@ -1149,20 +1149,15 @@ int
> PipelineHandlerRPi::prepareBuffers(Camera *camera)
> > > >  {
> > > >       RPiCameraData *data = cameraData(camera);
> > > >       int ret;
> > > > +     constexpr unsigned int bufferCount = 4;
> > > >
> > > >       /*
> > > > -      * Decide how many internal buffers to allocate. For now,
> simply look
> > > > -      * at how many external buffers will be provided. We'll need
> to improve
> > > > -      * this logic. However, we really must have all streams
> allocate the same
> > > > -      * number of buffers to simplify error handling in
> queueRequestDevice().
> > > > +      * Allocate internal buffers. We really must have all streams
> allocate
> > > > +      * the same number of buffers to simplify error handling in
> > > > +      * queueRequestDevice().
> > > >        */
> > > > -     unsigned int maxBuffers = 0;
> > > > -     for (const Stream *s : camera->streams())
> > > > -             if (static_cast<const RPi::Stream *>(s)->isExternal())
> > > > -                     maxBuffers = std::max(maxBuffers,
> s->configuration().bufferCount);
> > > > -
> > > >       for (auto const stream : data->streams_) {
> > > > -             ret = stream->prepareBuffers(maxBuffers);
> > > > +             ret = stream->prepareBuffers(bufferCount);
> > >
> > > We have a similar problem here, 4 buffer slots is too little, but when
> > > the stream has to allocate internal buffers (!importOnly), which is the
> > > case for most streams, we don't want to overallocate.
> > >
> > > I'd like to get feedback from Naush here, but I think this means we'll
> > > have to relax the requirement documented in the comment above, and
> > > accept a different number of buffers for each stream.
> >
> > Sorry for the late reply to this thread!
> >
> > As is evident from the above comment, this bit of code does need to be
> improved
> > to avoid over-applications which I will get to at some point. However,
> to address this
> > change and the comments, 4 buffer slots sounds like it might be too
> little.  Regarding
> > the requirement on having streams allocate the same number of buffers -
> that can be
> > relaxed (and the comment removed) as we do handle it correctly.
>
> Thanks for the information. I understand that this means that we can
> drop the comment and have different numbers of buffers for different
> streams without any other change to the pipeline handler. If that's
> incorrect, please let me know.
>


Yes, that should be the case now.

However, I would probably still prefer to keep the number of Unicam Image
and Unicam Embedded buffers the same for symmetry.
I don't think that should cause any issue with this rework.

Regards,
Naush

>
> > > >               if (ret < 0)
> > > >                       return ret;
> > > >       }
> > > > diff --git a/src/libcamera/pipeline/rkisp1/rkisp1.cpp
> b/src/libcamera/pipeline/rkisp1/rkisp1.cpp
> > > > index 11325875b929..f4ea2fd4d4d0 100644
> > > > --- a/src/libcamera/pipeline/rkisp1/rkisp1.cpp
> > > > +++ b/src/libcamera/pipeline/rkisp1/rkisp1.cpp
> > > > @@ -690,16 +690,11 @@ int
> PipelineHandlerRkISP1::allocateBuffers(Camera *camera)
> > > >       unsigned int ipaBufferId = 1;
> > > >       int ret;
> > > >
> > > > -     unsigned int maxCount = std::max({
> > > > -             data->mainPathStream_.configuration().bufferCount,
> > > > -             data->selfPathStream_.configuration().bufferCount,
> > > > -     });
> > > > -
> > > > -     ret = param_->allocateBuffers(maxCount, &paramBuffers_);
> > > > +     ret = param_->allocateBuffers(RKISP1_INTERNAL_BUFFER_COUNT,
> &paramBuffers_);
> > > >       if (ret < 0)
> > > >               goto error;
> > > >
> > > > -     ret = stat_->allocateBuffers(maxCount, &statBuffers_);
> > > > +     ret = stat_->allocateBuffers(RKISP1_INTERNAL_BUFFER_COUNT,
> &statBuffers_);
> > > >       if (ret < 0)
> > > >               goto error;
> > > >
> > > > diff --git a/src/libcamera/pipeline/rkisp1/rkisp1_path.cpp
> b/src/libcamera/pipeline/rkisp1/rkisp1_path.cpp
> > > > index 25f482eb8d8e..fea330f72886 100644
> > > > --- a/src/libcamera/pipeline/rkisp1/rkisp1_path.cpp
> > > > +++ b/src/libcamera/pipeline/rkisp1/rkisp1_path.cpp
> > > > @@ -172,7 +172,7 @@ int RkISP1Path::start()
> > > >               return -EBUSY;
> > > >
> > > >       /* \todo Make buffer count user configurable. */
> > > > -     ret = video_->importBuffers(RKISP1_BUFFER_COUNT);
> > > > +     ret = video_->importBuffers(RKISP1_BUFFER_SLOT_COUNT);
> > > >       if (ret)
> > > >               return ret;
> > > >
> > > > diff --git a/src/libcamera/pipeline/rkisp1/rkisp1_path.h
> b/src/libcamera/pipeline/rkisp1/rkisp1_path.h
> > > > index 91757600ccdc..3c5891009c58 100644
> > > > --- a/src/libcamera/pipeline/rkisp1/rkisp1_path.h
> > > > +++ b/src/libcamera/pipeline/rkisp1/rkisp1_path.h
> > > > @@ -27,6 +27,9 @@ class V4L2Subdevice;
> > > >  struct StreamConfiguration;
> > > >  struct V4L2SubdeviceFormat;
> > > >
> > > > +static constexpr unsigned int RKISP1_INTERNAL_BUFFER_COUNT = 4;
> > > > +static constexpr unsigned int RKISP1_BUFFER_SLOT_COUNT = 5;
> > >
> > > The situation should be simpler for the rkisp1, as it has a different
> > > pipeline model (inline ISP as opposed to offline ISP for the IPU3). We
> > > can allocate more slots (8 or 16, as for other pipeline handlers), and
> > > restrict the number of internal buffers (for stats and parameters) to
> > > the number of requests we expect to queue to the device at once, plus
> > > one for the IPA.  Four thus seems good. Capturing this rationale in a
> > > comment would be good too.
> > >
> > > BTW, I may be too tired to think properly, or just unable to see the
> > > obvious, so please challenge any rationale you think is incorrect.
> > >
> > > > +
> > > >  class RkISP1Path
> > > >  {
> > > >  public:
> > > > diff --git a/src/libcamera/pipeline/simple/converter.cpp
> b/src/libcamera/pipeline/simple/converter.cpp
> > > > index b5e34c4cd0c5..b3bcf01483f7 100644
> > > > --- a/src/libcamera/pipeline/simple/converter.cpp
> > > > +++ b/src/libcamera/pipeline/simple/converter.cpp
> > > > @@ -103,11 +103,11 @@ int
> SimpleConverter::Stream::exportBuffers(unsigned int count,
> > > >
> > > >  int SimpleConverter::Stream::start()
> > > >  {
> > > > -     int ret = m2m_->output()->importBuffers(inputBufferCount_);
> > > > +     int ret =
> m2m_->output()->importBuffers(SIMPLE_BUFFER_SLOT_COUNT);
> > >
> > > Shouldn't this be SIMPLE_INTERNAL_BUFFER_COUNT ? Overallocating is not
> > > much of an issue I suppose.
> > >
> > > >       if (ret < 0)
> > > >               return ret;
> > > >
> > > > -     ret = m2m_->capture()->importBuffers(outputBufferCount_);
> > > > +     ret = m2m_->capture()->importBuffers(SIMPLE_BUFFER_SLOT_COUNT);
> > > >       if (ret < 0) {
> > > >               stop();
> > > >               return ret;
> > > > diff --git a/src/libcamera/pipeline/simple/converter.h
> b/src/libcamera/pipeline/simple/converter.h
> > > > index 276a2a291c21..7e1d60674f62 100644
> > > > --- a/src/libcamera/pipeline/simple/converter.h
> > > > +++ b/src/libcamera/pipeline/simple/converter.h
> > > > @@ -29,6 +29,9 @@ class SizeRange;
> > > >  struct StreamConfiguration;
> > > >  class V4L2M2MDevice;
> > > >
> > > > +constexpr unsigned int SIMPLE_INTERNAL_BUFFER_COUNT = 5;
> > > > +constexpr unsigned int SIMPLE_BUFFER_SLOT_COUNT = 5;
> > >
> > > Let's name the variables kSimpleInternalBufferCount and
> > > kSimpleBufferSlotCount, as that's the naming scheme we're moving to for
> > > non-macro constants. Same comment elsewhere in this patch.
> > >
> > > Those constants don't belong to converter.h. Could you turn them into
> > > member constants of the SimplePipelineHandler class, as
> > > kNumInternalBuffers (which btw should be removed) ? The number of
> buffer
> > > slots can be passed as a parameter to SimpleConverter::start().
> > >
> > > There's no stats or parameters here, and no IPA, so the situation is
> > > different than for IPU3 and RkISP1. The number of internal buffers
> > > should just be one more than the minimum number of buffers required by
> > > the capture device, I don't think there's another requirement.
> > >
> > > > +
> > > >  class SimpleConverter
> > > >  {
> > > >  public:
> > > > diff --git a/src/libcamera/pipeline/simple/simple.cpp
> b/src/libcamera/pipeline/simple/simple.cpp
> > > > index 1c25a7344f5f..a1163eaf8be2 100644
> > > > --- a/src/libcamera/pipeline/simple/simple.cpp
> > > > +++ b/src/libcamera/pipeline/simple/simple.cpp
> > > > @@ -803,12 +803,10 @@ int SimplePipelineHandler::start(Camera
> *camera, [[maybe_unused]] const ControlL
> > > >                * When using the converter allocate a fixed number of
> internal
> > > >                * buffers.
> > > >                */
> > > > -             ret = video->allocateBuffers(kNumInternalBuffers,
> > > > +             ret =
> video->allocateBuffers(SIMPLE_INTERNAL_BUFFER_COUNT,
> > > >                                            &data->converterBuffers_);
> > > >       } else {
> > > > -             /* Otherwise, prepare for using buffers from the only
> stream. */
> > > > -             Stream *stream = &data->streams_[0];
> > > > -             ret =
> video->importBuffers(stream->configuration().bufferCount);
> > > > +             ret = video->importBuffers(SIMPLE_BUFFER_SLOT_COUNT);
> > > >       }
> > > >       if (ret < 0)
> > > >               return ret;
> > > > diff --git a/src/libcamera/pipeline/uvcvideo/uvcvideo.cpp
> b/src/libcamera/pipeline/uvcvideo/uvcvideo.cpp
> > > > index fd39b3d3c72c..755949e7a59a 100644
> > > > --- a/src/libcamera/pipeline/uvcvideo/uvcvideo.cpp
> > > > +++ b/src/libcamera/pipeline/uvcvideo/uvcvideo.cpp
> > > > @@ -91,6 +91,8 @@ private:
> > > >               return static_cast<UVCCameraData *>(
> > > >                       PipelineHandler::cameraData(camera));
> > > >       }
> > > > +
> > > > +     static constexpr unsigned int UVC_BUFFER_SLOT_COUNT = 5;
> > > >  };
> > > >
> > > >  UVCCameraConfiguration::UVCCameraConfiguration(UVCCameraData *data)
> > > > @@ -236,9 +238,8 @@ int
> PipelineHandlerUVC::exportFrameBuffers(Camera *camera,
> > > >  int PipelineHandlerUVC::start(Camera *camera, [[maybe_unused]]
> const ControlList *controls)
> > > >  {
> > > >       UVCCameraData *data = cameraData(camera);
> > > > -     unsigned int count = data->stream_.configuration().bufferCount;
> > > >
> > > > -     int ret = data->video_->importBuffers(count);
> > > > +     int ret = data->video_->importBuffers(UVC_BUFFER_SLOT_COUNT);
> > >
> > > For the uvc and vimc pipeline handlers, we have no internal buffers, so
> > > it's quite easy. We should have 8 or 16 slots, as for other pipeline
> > > handlers.
> > >
> > > >       if (ret < 0)
> > > >               return ret;
> > > >
> > > > diff --git a/src/libcamera/pipeline/vimc/vimc.cpp
> b/src/libcamera/pipeline/vimc/vimc.cpp
> > > > index e89d53182c6d..24ba743a946c 100644
> > > > --- a/src/libcamera/pipeline/vimc/vimc.cpp
> > > > +++ b/src/libcamera/pipeline/vimc/vimc.cpp
> > > > @@ -102,6 +102,8 @@ private:
> > > >               return static_cast<VimcCameraData *>(
> > > >                       PipelineHandler::cameraData(camera));
> > > >       }
> > > > +
> > > > +     static constexpr unsigned int VIMC_BUFFER_SLOT_COUNT = 5;
> > > >  };
> > > >
> > > >  namespace {
> > > > @@ -312,9 +314,8 @@ int
> PipelineHandlerVimc::exportFrameBuffers(Camera *camera,
> > > >  int PipelineHandlerVimc::start(Camera *camera, [[maybe_unused]]
> const ControlList *controls)
> > > >  {
> > > >       VimcCameraData *data = cameraData(camera);
> > > > -     unsigned int count = data->stream_.configuration().bufferCount;
> > > >
> > > > -     int ret = data->video_->importBuffers(count);
> > > > +     int ret = data->video_->importBuffers(VIMC_BUFFER_SLOT_COUNT);
> > > >       if (ret < 0)
> > > >               return ret;
> > > >
>
> --
> Regards,
>
> Laurent Pinchart
>
Nícolas F. R. A. Prado Aug. 19, 2021, 1:12 p.m. UTC | #8
Hi Laurent,

On Tue, Aug 17, 2021 at 05:33:43AM +0300, Laurent Pinchart wrote:
> Hi Nícolas,
> 
> On Mon, Aug 09, 2021 at 05:26:46PM -0300, Nícolas F. R. A. Prado wrote:
> > On Sat, Aug 07, 2021 at 12:03:52PM -0300, Nícolas F. R. A. Prado wrote:
> > > On Mon, Aug 02, 2021 at 02:42:53AM +0300, Laurent Pinchart wrote:
> > > > On Thu, Jul 22, 2021 at 08:28:49PM -0300, Nícolas F. R. A. Prado wrote:
> > > > > Pipelines have relied on bufferCount to decide on the number of buffers
> > > > > to allocate internally through allocateBuffers() and on the number of
> > > > > V4L2 buffer slots to reserve through importBuffers(). Instead, the
> > > > > number of internal buffers should be the minimum required by the
> > > > > algorithms to avoid wasting memory, and the number of V4L2 buffer slots
> > > > > should overallocate to avoid thrashing dmabuf mappings.
> > > > > 
> > > > > For now, just set them to constants and stop relying on bufferCount, to
> > > > > allow for its removal.
> > > > > 
> > > > > Signed-off-by: Nícolas F. R. A. Prado <nfraprado@collabora.com>
> > > > > ---
> > > > > 
> > > > > No changes in v7
> > > > > 
> > > > > Changes in v6:
> > > > > - Added pipeline name as prefix to each BUFFER_SLOT_COUNT and
> > > > >   INTERNAL_BUFFER_COUNT constant
> > > > > 
> > > > >  src/libcamera/pipeline/ipu3/imgu.cpp              | 12 ++++++------
> > > > >  src/libcamera/pipeline/ipu3/imgu.h                |  5 ++++-
> > > > >  src/libcamera/pipeline/ipu3/ipu3.cpp              |  9 +--------
> > > > >  .../pipeline/raspberrypi/raspberrypi.cpp          | 15 +++++----------
> > > > >  src/libcamera/pipeline/rkisp1/rkisp1.cpp          |  9 ++-------
> > > > >  src/libcamera/pipeline/rkisp1/rkisp1_path.cpp     |  2 +-
> > > > >  src/libcamera/pipeline/rkisp1/rkisp1_path.h       |  3 +++
> > > > >  src/libcamera/pipeline/simple/converter.cpp       |  4 ++--
> > > > >  src/libcamera/pipeline/simple/converter.h         |  3 +++
> > > > >  src/libcamera/pipeline/simple/simple.cpp          |  6 ++----
> > > > >  src/libcamera/pipeline/uvcvideo/uvcvideo.cpp      |  5 +++--
> > > > >  src/libcamera/pipeline/vimc/vimc.cpp              |  5 +++--
> > > > >  12 files changed, 35 insertions(+), 43 deletions(-)
> > > > 
> > > > Given that some of the pipeline handlers will need more intrusive
> > > > changes to address the comments below, you could split this with one
> > > > patch per pipeline handler (or perhaps grouping the easy ones together).
> > > > 
> > > > > 
> > > > > diff --git a/src/libcamera/pipeline/ipu3/imgu.cpp b/src/libcamera/pipeline/ipu3/imgu.cpp
> > > > > index e955bc3456ba..f36e99dacbe7 100644
> > > > > --- a/src/libcamera/pipeline/ipu3/imgu.cpp
> > > > > +++ b/src/libcamera/pipeline/ipu3/imgu.cpp
> > > > > @@ -593,22 +593,22 @@ int ImgUDevice::configureVideoDevice(V4L2VideoDevice *dev, unsigned int pad,
> > > > >  /**
> > > > >   * \brief Allocate buffers for all the ImgU video devices
> > > > >   */
> > > > > -int ImgUDevice::allocateBuffers(unsigned int bufferCount)
> > > > > +int ImgUDevice::allocateBuffers()
> > > > >  {
> > > > >  	/* Share buffers between CIO2 output and ImgU input. */
> > > > > -	int ret = input_->importBuffers(bufferCount);
> > > > > +	int ret = input_->importBuffers(IPU3_BUFFER_SLOT_COUNT);
> > > > >  	if (ret) {
> > > > >  		LOG(IPU3, Error) << "Failed to import ImgU input buffers";
> > > > >  		return ret;
> > > > >  	}
> > > > >  
> > > > > -	ret = param_->allocateBuffers(bufferCount, &paramBuffers_);
> > > > > +	ret = param_->allocateBuffers(IPU3_INTERNAL_BUFFER_COUNT, &paramBuffers_);
> > > > >  	if (ret < 0) {
> > > > >  		LOG(IPU3, Error) << "Failed to allocate ImgU param buffers";
> > > > >  		goto error;
> > > > >  	}
> > > > >  
> > > > > -	ret = stat_->allocateBuffers(bufferCount, &statBuffers_);
> > > > > +	ret = stat_->allocateBuffers(IPU3_INTERNAL_BUFFER_COUNT, &statBuffers_);
> > > > >  	if (ret < 0) {
> > > > >  		LOG(IPU3, Error) << "Failed to allocate ImgU stat buffers";
> > > > >  		goto error;
> > > > > @@ -619,13 +619,13 @@ int ImgUDevice::allocateBuffers(unsigned int bufferCount)
> > > > >  	 * corresponding stream is active or inactive, as the driver needs
> > > > >  	 * buffers to be requested on the V4L2 devices in order to operate.
> > > > >  	 */
> > > > > -	ret = output_->importBuffers(bufferCount);
> > > > > +	ret = output_->importBuffers(IPU3_BUFFER_SLOT_COUNT);
> > > > >  	if (ret < 0) {
> > > > >  		LOG(IPU3, Error) << "Failed to import ImgU output buffers";
> > > > >  		goto error;
> > > > >  	}
> > > > >  
> > > > > -	ret = viewfinder_->importBuffers(bufferCount);
> > > > > +	ret = viewfinder_->importBuffers(IPU3_BUFFER_SLOT_COUNT);
> > > > >  	if (ret < 0) {
> > > > >  		LOG(IPU3, Error) << "Failed to import ImgU viewfinder buffers";
> > > > >  		goto error;
> > > > > diff --git a/src/libcamera/pipeline/ipu3/imgu.h b/src/libcamera/pipeline/ipu3/imgu.h
> > > > > index 9d4915116087..f934a951fc75 100644
> > > > > --- a/src/libcamera/pipeline/ipu3/imgu.h
> > > > > +++ b/src/libcamera/pipeline/ipu3/imgu.h
> > > > > @@ -61,7 +61,7 @@ public:
> > > > >  					    outputFormat);
> > > > >  	}
> > > > >  
> > > > > -	int allocateBuffers(unsigned int bufferCount);
> > > > > +	int allocateBuffers();
> > > > >  	void freeBuffers();
> > > > >  
> > > > >  	int start();
> > > > > @@ -86,6 +86,9 @@ private:
> > > > >  	static constexpr unsigned int PAD_VF = 3;
> > > > >  	static constexpr unsigned int PAD_STAT = 4;
> > > > >  
> > > > > +	static constexpr unsigned int IPU3_INTERNAL_BUFFER_COUNT = 4;
> > > > > +	static constexpr unsigned int IPU3_BUFFER_SLOT_COUNT = 5;
> > > > 
> > > > 5 buffer slots is low. It means that if applications cycle more than 5
> > > > buffers, the V4L2VideoDevice cache that maintains associations between
> > > > dmabufs and buffer slots will the trashed. Due to the internal queue of
> > > > requests in the IPU3 pipeline handler (similar to what you have
> > > > implemented in "[PATCH 0/3] libcamera: pipeline: Add internal request
> > > > queue" for other pipeline handlers), we won't fail at queuing requests,
> > > > but performance will suffer. I thus think we need to increase the number
> > > > of slots to what applications can be reasonably expected to use. We
> > > > could use 8, or even 16, as buffer slots are cheap. The same holds for
> > > > other pipeline handlers.
> > > > 
> > > > The number of slots for the CIO2 output should match the number of
> > > > buffer slots for the ImgU input, as the same buffers are used on the two
> > > > video devices. One option is to use IPU3_BUFFER_SLOT_COUNT for the CIO2,
> > > > instead of CIO2_BUFFER_COUNT. However, the number of internal CIO2
> > > > buffers that are allocated by exportBuffers() in CIO2Device::start(), to
> > > > be used in case the application doesn't provide any RAW buffer, should
> > > > be lower, as those are real buffer and are thus expensive. The number of
> > > > buffers and buffer slots on the CIO2 thus needs to be decoupled.
> > > > 
> > > > For proper operation, the CIO2 will require at least two queued buffers
> > > > (one being DMA'ed to, and one waiting). We need at least one extra
> > > > buffer queued to the ImgU to keep buffers flowing. Depending on
> > > > processing timings, it may be that the ImgU will complete processing of
> > > > its buffer before the CIO2 captures the next one, leading to a temporary
> > > > situation where the CIO2 will have three buffers queued, or the CIO2
> > > > will finish the capture first, leading to a temporary situation where
> > > > the CIO2 will have one buffer queued and the ImgU will have two buffers
> > > > queued. In either case, shortly afterwards, the other component will
> > > > complete capture or processing, and we'll get back to a situation with
> > > > two buffers queued in the CIO2 and one in the ImgU. That's thus a
> > > > minimum of three buffers for raw images.
> > > > 
> > > > From an ImgU point of view, we could probably get away with a single
> > > > parameter and a single stats buffer. This would however not allow
> > > > queuing the next frame for processing in the ImgU before the current
> > > > frame completes, so two buffers would be better. Now, if we take the IPA
> > > > into account, the statistics buffer will spend some time on the IPA side
> > > > for processing. It would thus be best to have an extra statistics buffer
> > > > to accommodate that, thus requiring three statistics buffers (and three
> > > > parameters buffers, as we associate them together).
> > > > 
> > > > This rationale leads to using the same number of internal buffers for
> > > > the CIO2, the parameters and the statistics. We currently use four, and
> > > > while the logic above indicates we could get away with three, it would
> > > > be safer to keep using four in this patch, and possibly reduce the
> > > > number of buffers later.
> > > > 
> > > > I know documentation isn't fun, but I think this rationale should be
> > > > captured in a comment in the IPU3 pipeline handler, along with a \todo
> > > > item to try and lower the number of internal buffers to three.
> > > 
> > > This is the IPU3 topology as I understand it:
> > > 
> > >       Output  .               .   Input        Output .
> > >       +---+   .               .   +---+        +---+  .
> > >       |   | --------------------> |   |        |   |  .
> > >       +---+   .               .   +---+        +---+  .
> > > CIO2          .   IPA         .          ImgU         .          IPA
> > >               .        Param  .   Param        Stat   .   Stat
> > >               .        +---+  .   +---+        +---+  .   +---+ 
> > >               .        |   | ---> |   |        |   | ---> |   | 
> > >               .        +---+  .   +---+        +---+  .   +---+ 
> > >           
> > > Your suggestions for the minimum number of buffers required are the following,
> > > from what I understand:
> > > 
> > > CIO2 raw internal buffers:
> > > - 2x on CIO2 Output (one being DMA'ed, one waiting)
> > > - 1x on ImgU Input
> > > 
> > > ImgU Param/Stat internal buffers:
> > > - 2x on ImgU Param/Stat (one being processed, one waiting)
> > > - 1x on IPA Stat
> > > 
> > > This arrangement doesn't seem to take into account that IPU3Frames::Info binds
> > > CIO2 internal buffers and ImgU Param/Stat buffers together. This means that each
> > > raw buffer queued to CIO2 Output needs a Param/Stat buffer as well. And each
> > > Param/Stat buffer queued to ImgU for processing needs a CIO2 raw buffer as well.
> > > After ImgU processing though, the raw buffer gets released and reused, so the
> > > Stat buffer queued to the IPA does not require a CIO2 raw buffer.
> > > 
> > > This means that to achieve the above minimum, due to the IPU3Frames::Info
> > > constraint, we'd actually need:
> > > 
> > > CIO2 internal buffers:
> > > - 2x on CIO2 Output (one being DMA'ed, one waiting)
> > > - 2x on ImgU Input (for the two ImgU Param/Stat buffers we want to have there)
> > > 
> > > ImgU Param/Stat internal buffers:
> > > - 2x on CIO2 Output (for the two CIO2 raw buffers we want to have there)
> > > - 2x on ImgU Param/Stat (one being processed, one waiting)
> 
> Note that the need to have two buffers here is to ensure back-to-back
> processing of frame on the ImgU and thus avoid delays, but this need
> actually depends on how fast the ImgU is. With a very fast ImgU
> (compared to the frame duration), inter-frame delays may not be an
> issue. There's more on this below.
> 
> > > - 1x on IPA Stat
> 
> Processing of the statistics can occur after the corresponding raw image
> buffer has been requeued to the CIO2, the only hard requirement is that
> the buffer needs to be available by the time the ImgU will process the
> corresponding raw frame buffer again.

IPU3CameraData::queuePendingRequests() creates a IPU3Frames::Info with a param
and stat buffers before adding a raw buffer to it and queuing to the CIO2.
So in order to have the statistics processing by the IPA happen after the
raw buffer has been requeued to the CIO2 we would either need to have one extra
param/stat buffer compared to the number of CIO2 internal buffers, so 5
param/stat buffers, or change that code so that the param/stat buffers are only
added to the FrameInfo after we receive the buffer ready from the CIO2.

In any case as you've mentioned, we currently use four for both and it works
well, so I'll leave it that way, I just wanted to point out that technically the
IPA stat processing is currently part of the requeue delay for CIO2 buffers.

Thanks,
Nícolas

> 
> > > Also we're not accounting for parameter filling in the IPA before we queue the
> > > buffers to ImgU, but perhaps that's fast enough that it doesn't matter?
> 
> That's one of the questions we need to answer, I don't think we have
> numbers at this time. If filling the parameters buffer takes a
> significant amount of time, then that would need to be taken into
> account as an additional step in the pipeline, with an additional set of
> buffers.
> 
> > > Does this make sense? Or am I missing something?
> 
> One thing that you make not have taken into account is that the two
> buffers queued on the CIO2 output and the two buffers queued on the ImgU
> are not necessarily queued at the same time. I'll try to explain.
> 
> On the CIO2 side, we have a strong real time requirement to always keep
> the CIO2 fed with buffers. The details depend a bit on the hardware and
> driver implementations, but the base idea is that once a buffer is
> complete and the time comes to move to the next buffer for the next
> frame, there has to be a next buffer available. When exactly this occurs
> can vary. Some drivers will give the buffer for the next frame to the
> device when capture for the current frame starts, and some will give it
> when the hardware signals completion of the capture of the current frame
> (frame end). In theory this could be delayed even a bit more, but it has
> to happen before the hardware needs the new buffer, and giving it when
> the DMA completes is often too risky already as vertical blanking can be
> short and interrupts can be delayed a bit. I tried to check the driver
> to see what the exact requirement is, but I'm not familiar with the
> hardware and the code is not very easy to follow.
> 
> Note that frame start is the time when the first pixel of the frame is
> written to memory, and frame end the time when the last pixel of the
> frame is written to memory. The end of frame N and the start of frame
> N+1 are separated by the vertical blanking time.
> 
> Let's assume that the CIO2 needs to be programmed with the buffer for
> frame N+1 at the start of frame N (Edit: I've written all the
> explanation below based on this assumption, but after further
> investigation, I *think* the CIO2 only requires the buffer for frame N+1
> at the beginning of frame N+1, but the driver enforces that the buffer
> must be present just before the start of frame N to avoid race
> conditions - just before the start of frame N and at the start of frame
> N at practically speaking the same thing. Sakari, do you know if this is
> correct ?). We'll constantly transition between the following states,
> from the CIO2 point of view.
> 
> 0. (Initial state) 2x idle buffers in the queue, hardware stopped. The
>    CIO2 is then started, the first buffer in the queue is given to the
>    device to capture the first frame, and the second buffer in the queue
>    is given to the device to capture the second frame. The first frame
>    starts.
> 
> 1. 1x active buffer being DMA'ed to, 1x pending buffer already given to
>    the hardware for the next frame, 0x idle buffers in the queue. Two
>    events can occur at this point, either completion of the current
>    frame (-> 2), or a new buffer being queued by userspace (-> 4).
> 
> 2. 0x active buffer beind DMA'ed to, 1x pending buffer already given to
>    the hardware for the next frame, 0x idle buffers in the queue. Two
>    events can occur at this point, either start of the next frame (->
>    3), or a new buffer being queued by userspace (-> 5).
> 
>    This state lasts for the duration of the vertical blanking only, and
>    can thus be short-lived.
> 
> 3. The next frame start. The pending buffer becomes active. We have no
>    buffer in the queue to give to the hardware for the next frame. An
>    underrun has occurred, a frame will be dropped. Game over.
> 
> 4. 1x active buffer being DMA'ed to, 1x pending buffer already given to
>    the hardware for the next frame, 1x idle buffers in the queue. The
>    next event that will occur is the start of the next frame (as the
>    other option, a new buffer being queued, will give us additional
>    safety by increasing the number of queued buffers, but isn't
>    meaningful when considering the case where we try to run with the
>    minimum number of buffers possible).
> 
>    As the current frame ends, the active buffer is given back to the
>    userspace. There's no active buffer (the DMA will start soon, after
>    the vertical blanking, when the next frame starts), the pending
>    buffer stays pending, and the idle buffer stays idle (-> 5).
> 
> 5. 0x active buffer beind DMA'ed to, 1x pending buffer already given to
>    the hardware for the next frame, 1x idle buffers in the queue. The
>    next event that will occur is the start of the next frame (for the
>    same reason as in 4).
> 
>    As the next frame starts, the pending buffer becomes active. The
>    queue buffer is given to the hardware for the subsequent frame. The
>    queue of idle buffers become empty (-> 1).
> 
>    If this state is reached from state 2, it lasts for the remaining of
>    the vertical blanking only. If it is reached from state 4, it lasts
>    for the whole vertical blanking. In both cases, it can be
>    short-lived.
> 
> We can thus cycle either through 1 -> 2 -> 5 -> 1 or through 1 -> 4 -> 5
> -> 1. The first cycle requires two buffers for the CIO2, with an
> intermediate state (2) that has a single buffer only. This is unsafe, as
> a failure to queue a second buffer in the short-lived state 2 will lead
> to state 3 and frame drops.
> 
> The second cycle requires three buffers for the CIO2. This is the cycle
> we want to use, to avoid frame drops. Note that only state 4 requires
> all three buffers, and userspace can queue the third buffer at any point
> in state 1 (before the end of the current frame). If userspace queues
> the frame slightly too late, after the completion of the current frame
> but before the start of the next one, we'll go to the unsafe cycle but
> will still not lose frames.
> 
> Now, let's look at the ImgU side, and assume we use three buffers in
> total. The ImgU operates from memory to memory, it thus has no realtime
> requirement. It only starts processing a frame when the frame is given
> to it. This occurs, from a CIO2 point of view, in the transition from
> state 4 to state 5, plus all delays introduced by delivering the CIO2
> frame completion event to userspace, queueing the frame to the ImgU (I'm
> ignoring the IPA here), and starting the ImgU itself. The ImgU
> processing time will, on average, be lower than the frame duration,
> otherwise it won't be able to process all frames. Once the ImgU
> completes processing of the frame, it will signal this to userspace.
> There's also a processing delay there (signalling, task switching, ...),
> and userspace will requeue the frame to the CIO2. This has to occur at
> the latest before the end of the current frame, otherwise state 1 will
> transition to state 2.
> 
> We thus see that, in the 3 buffers case, we need to ensure that the
> total time to process the frame on the ImgU, from the CIO2 interrupt
> signalling the end of state 4 to the buffer being requeued to the CIO2,
> thus including all task switching and other delays, doesn't exceed the
> duration of states 5 + 1, which is equal to the duration of a frame. The
> ImgU processing time itself is guaranteed to be lower than that, but the
> additional delays may be problematic. We also need to include a possible
> round-trip to the IPA after end of buffer capture by the CIO2 and start
> of processing by the ImgU to retrieve the ImgU parameters for the frame.
> Three buffers start sounding quite risky. I'm thus correcting myself,
> hour buffers seem safer.
> 
> None of this takes the parameters or statistics buffers into account,
> but I don't think they're particularly problematic in the sense that the
> most strict realtime constraints come from the raw image buffers. Feel
> free to prove me wrong though :-)
> 
> Let's however note that we can probably fetch the ImgU parameters for
> the frame that has just been captured before the end of the frame, so
> that would remove a delay in the ImgU processing. This assumes that the
> algorithms wouldn't need to know the exact exposure time and analog gain
> that have been used to capture the current frame in order to compute the
> ImgU parameters. This leads to a first question to David: does the
> Raspberry Pi IPA require the sensor metadata to calculate ISP
> parameters, or are they needed only when processing statistics from
> frame N to calculate sensor and ISP parameters of subsequent frames ?
> 
> The next question is for everybody (and that's why I've expanded the CC
> list to Kieran, Jean-Michel and Sakari too): what did I get wrong in the
> above explanation ? :-)
> 
> > > > > +
> > > > >  	int linkSetup(const std::string &source, unsigned int sourcePad,
> > > > >  		      const std::string &sink, unsigned int sinkPad,
> > > > >  		      bool enable);
> > > > > diff --git a/src/libcamera/pipeline/ipu3/ipu3.cpp b/src/libcamera/pipeline/ipu3/ipu3.cpp
> > > > > index 5fd1757bfe13..4efd201c05e5 100644
> > > > > --- a/src/libcamera/pipeline/ipu3/ipu3.cpp
> > > > > +++ b/src/libcamera/pipeline/ipu3/ipu3.cpp
> > > > > @@ -681,16 +681,9 @@ int PipelineHandlerIPU3::allocateBuffers(Camera *camera)
> > > > >  {
> > > > >  	IPU3CameraData *data = cameraData(camera);
> > > > >  	ImgUDevice *imgu = data->imgu_;
> > > > > -	unsigned int bufferCount;
> > > > >  	int ret;
> > > > >  
> > > > > -	bufferCount = std::max({
> > > > > -		data->outStream_.configuration().bufferCount,
> > > > > -		data->vfStream_.configuration().bufferCount,
> > > > > -		data->rawStream_.configuration().bufferCount,
> > > > > -	});
> > > > > -
> > > > > -	ret = imgu->allocateBuffers(bufferCount);
> > > > > +	ret = imgu->allocateBuffers();
> > > > >  	if (ret < 0)
> > > > >  		return ret;
> > > > >  
> > > > > diff --git a/src/libcamera/pipeline/raspberrypi/raspberrypi.cpp b/src/libcamera/pipeline/raspberrypi/raspberrypi.cpp
> > > > > index d1cd3d9dc082..776e0f92aed1 100644
> > > > > --- a/src/libcamera/pipeline/raspberrypi/raspberrypi.cpp
> > > > > +++ b/src/libcamera/pipeline/raspberrypi/raspberrypi.cpp
> > > > > @@ -1149,20 +1149,15 @@ int PipelineHandlerRPi::prepareBuffers(Camera *camera)
> > > > >  {
> > > > >  	RPiCameraData *data = cameraData(camera);
> > > > >  	int ret;
> > > > > +	constexpr unsigned int bufferCount = 4;
> > > > >  
> > > > >  	/*
> > > > > -	 * Decide how many internal buffers to allocate. For now, simply look
> > > > > -	 * at how many external buffers will be provided. We'll need to improve
> > > > > -	 * this logic. However, we really must have all streams allocate the same
> > > > > -	 * number of buffers to simplify error handling in queueRequestDevice().
> > > > > +	 * Allocate internal buffers. We really must have all streams allocate
> > > > > +	 * the same number of buffers to simplify error handling in
> > > > > +	 * queueRequestDevice().
> > > > >  	 */
> > > > > -	unsigned int maxBuffers = 0;
> > > > > -	for (const Stream *s : camera->streams())
> > > > > -		if (static_cast<const RPi::Stream *>(s)->isExternal())
> > > > > -			maxBuffers = std::max(maxBuffers, s->configuration().bufferCount);
> > > > > -
> > > > >  	for (auto const stream : data->streams_) {
> > > > > -		ret = stream->prepareBuffers(maxBuffers);
> > > > > +		ret = stream->prepareBuffers(bufferCount);
> > > > 
> > > > We have a similar problem here, 4 buffer slots is too little, but when
> > > > the stream has to allocate internal buffers (!importOnly), which is the
> > > > case for most streams, we don't want to overallocate.
> > > > 
> > > > I'd like to get feedback from Naush here, but I think this means we'll
> > > > have to relax the requirement documented in the comment above, and
> > > > accept a different number of buffers for each stream.
> > > > 
> > > > >  		if (ret < 0)
> > > > >  			return ret;
> > > > >  	}
> > > > > diff --git a/src/libcamera/pipeline/rkisp1/rkisp1.cpp b/src/libcamera/pipeline/rkisp1/rkisp1.cpp
> > > > > index 11325875b929..f4ea2fd4d4d0 100644
> > > > > --- a/src/libcamera/pipeline/rkisp1/rkisp1.cpp
> > > > > +++ b/src/libcamera/pipeline/rkisp1/rkisp1.cpp
> > > > > @@ -690,16 +690,11 @@ int PipelineHandlerRkISP1::allocateBuffers(Camera *camera)
> > > > >  	unsigned int ipaBufferId = 1;
> > > > >  	int ret;
> > > > >  
> > > > > -	unsigned int maxCount = std::max({
> > > > > -		data->mainPathStream_.configuration().bufferCount,
> > > > > -		data->selfPathStream_.configuration().bufferCount,
> > > > > -	});
> > > > > -
> > > > > -	ret = param_->allocateBuffers(maxCount, &paramBuffers_);
> > > > > +	ret = param_->allocateBuffers(RKISP1_INTERNAL_BUFFER_COUNT, &paramBuffers_);
> > > > >  	if (ret < 0)
> > > > >  		goto error;
> > > > >  
> > > > > -	ret = stat_->allocateBuffers(maxCount, &statBuffers_);
> > > > > +	ret = stat_->allocateBuffers(RKISP1_INTERNAL_BUFFER_COUNT, &statBuffers_);
> > > > >  	if (ret < 0)
> > > > >  		goto error;
> > > > >  
> > > > > diff --git a/src/libcamera/pipeline/rkisp1/rkisp1_path.cpp b/src/libcamera/pipeline/rkisp1/rkisp1_path.cpp
> > > > > index 25f482eb8d8e..fea330f72886 100644
> > > > > --- a/src/libcamera/pipeline/rkisp1/rkisp1_path.cpp
> > > > > +++ b/src/libcamera/pipeline/rkisp1/rkisp1_path.cpp
> > > > > @@ -172,7 +172,7 @@ int RkISP1Path::start()
> > > > >  		return -EBUSY;
> > > > >  
> > > > >  	/* \todo Make buffer count user configurable. */
> > > > > -	ret = video_->importBuffers(RKISP1_BUFFER_COUNT);
> > > > > +	ret = video_->importBuffers(RKISP1_BUFFER_SLOT_COUNT);
> > > > >  	if (ret)
> > > > >  		return ret;
> > > > >  
> > > > > diff --git a/src/libcamera/pipeline/rkisp1/rkisp1_path.h b/src/libcamera/pipeline/rkisp1/rkisp1_path.h
> > > > > index 91757600ccdc..3c5891009c58 100644
> > > > > --- a/src/libcamera/pipeline/rkisp1/rkisp1_path.h
> > > > > +++ b/src/libcamera/pipeline/rkisp1/rkisp1_path.h
> > > > > @@ -27,6 +27,9 @@ class V4L2Subdevice;
> > > > >  struct StreamConfiguration;
> > > > >  struct V4L2SubdeviceFormat;
> > > > >  
> > > > > +static constexpr unsigned int RKISP1_INTERNAL_BUFFER_COUNT = 4;
> > > > > +static constexpr unsigned int RKISP1_BUFFER_SLOT_COUNT = 5;
> > > > 
> > > > The situation should be simpler for the rkisp1, as it has a different
> > > > pipeline model (inline ISP as opposed to offline ISP for the IPU3). We
> > > > can allocate more slots (8 or 16, as for other pipeline handlers), and
> > > > restrict the number of internal buffers (for stats and parameters) to
> > > > the number of requests we expect to queue to the device at once, plus
> > > > one for the IPA.  Four thus seems good. Capturing this rationale in a
> > > > comment would be good too.
> > 
> > Shouldn't we also have one extra buffer queued to the capture device, like for
> > the others, totalling five (four on the capture, one on the IPA)? Or since the
> > driver already requires three buffers the extra one isn't needed?
> >
> > I'm not sure how it works, but if the driver requires three buffers at all times
> > to keep streaming, then I think we indeed should have the extra buffer to avoid
> > dropping frames. Otherwise, if that requirement is only for starting the stream,
> > then for drivers that require at least two buffers we shouldn't need an extra
> > one, I'd think.
> 
> It seems to be only needed to start capture. Even then I think it could
> be lowered to two buffers, I don't see anything in the driver that
> requires three. Maybe someone from Collabora could comment on this ? And
> maybe you could give it a try by modifying the driver ?
> 
> By the way, if you try to apply the CIO2 reasoning above to the RkISP1,
> you will need to take into account the fact the the driver programs the
> hardware with the buffer for frame N+1 not at the beginning of frame N,
> but at the end of frame N-1.
> 
> I think four buffers is enough. We currently use four buffers and it
> seems to work :-) Granted, the RkISP1 IPA is a skeleton, so this
> argument isn't very strong, but given that the driver only needs two
> buffers except at start time, four should be fine.
> 
> > > > BTW, I may be too tired to think properly, or just unable to see the
> > > > obvious, so please challenge any rationale you think is incorrect.
> > > > 
> > > > > +
> > > > >  class RkISP1Path
> > > > >  {
> > > > >  public:
> > > > > diff --git a/src/libcamera/pipeline/simple/converter.cpp b/src/libcamera/pipeline/simple/converter.cpp
> > > > > index b5e34c4cd0c5..b3bcf01483f7 100644
> > > > > --- a/src/libcamera/pipeline/simple/converter.cpp
> > > > > +++ b/src/libcamera/pipeline/simple/converter.cpp
> > > > > @@ -103,11 +103,11 @@ int SimpleConverter::Stream::exportBuffers(unsigned int count,
> > > > >  
> > > > >  int SimpleConverter::Stream::start()
> > > > >  {
> > > > > -	int ret = m2m_->output()->importBuffers(inputBufferCount_);
> > > > > +	int ret = m2m_->output()->importBuffers(SIMPLE_BUFFER_SLOT_COUNT);
> > > > 
> > > > Shouldn't this be SIMPLE_INTERNAL_BUFFER_COUNT ? Overallocating is not
> > > > much of an issue I suppose.
> > 
> > Indeed. I was under the impression that we should always importBuffers() using
> > BUFFER_SLOT_COUNT, but now, after reading more code, I understand that's not
> > always the case (although this seems to be the only case, due to the presence of
> > the converter).
> > 
> > > > >  	if (ret < 0)
> > > > >  		return ret;
> > > > >  
> > > > > -	ret = m2m_->capture()->importBuffers(outputBufferCount_);
> > > > > +	ret = m2m_->capture()->importBuffers(SIMPLE_BUFFER_SLOT_COUNT);
> > > > >  	if (ret < 0) {
> > > > >  		stop();
> > > > >  		return ret;
> > > > > diff --git a/src/libcamera/pipeline/simple/converter.h b/src/libcamera/pipeline/simple/converter.h
> > > > > index 276a2a291c21..7e1d60674f62 100644
> > > > > --- a/src/libcamera/pipeline/simple/converter.h
> > > > > +++ b/src/libcamera/pipeline/simple/converter.h
> > > > > @@ -29,6 +29,9 @@ class SizeRange;
> > > > >  struct StreamConfiguration;
> > > > >  class V4L2M2MDevice;
> > > > >  
> > > > > +constexpr unsigned int SIMPLE_INTERNAL_BUFFER_COUNT = 5;
> > > > > +constexpr unsigned int SIMPLE_BUFFER_SLOT_COUNT = 5;
> > > > 
> > > > Let's name the variables kSimpleInternalBufferCount and
> > > > kSimpleBufferSlotCount, as that's the naming scheme we're moving to for
> > > > non-macro constants. Same comment elsewhere in this patch.
> > > > 
> > > > Those constants don't belong to converter.h. Could you turn them into
> > > > member constants of the SimplePipelineHandler class, as
> > > > kNumInternalBuffers (which btw should be removed) ? The number of buffer
> > > > slots can be passed as a parameter to SimpleConverter::start().
> > > > 
> > > > There's no stats or parameters here, and no IPA, so the situation is
> > > > different than for IPU3 and RkISP1. The number of internal buffers
> > > > should just be one more than the minimum number of buffers required by
> > > > the capture device, I don't think there's another requirement.
> > 
> > Plus one extra to have queued at the converter's 'output' node (which is its
> > input, confusingly)?
> 
> It depends a bit on the exact timings of the capture device, as is
> probably clear with the explanation above (or at least is now clearly
> seen as a complicated topic :-)). We need to ensure that the realtime
> requirements of the device are met, and that the capture buffers that
> complete, and are then processed by the converter, will be requeued in
> time to the capture device to meet those requirements.
> 
> As the simple pipeline handler deals with a variety of devices, we have
> two options, either checking the requirements of each device and
> recording them in the supportedDevices array, or pick a common number of
> buffers that should be good enough for everybody. I'd start with the
> second option for simplicity, and as the pipeline handler currently uses
> 3 buffers, I'd stick to that for now.
> 
> > > > > +
> > > > >  class SimpleConverter
> > > > >  {
> > > > >  public:
> > > > > diff --git a/src/libcamera/pipeline/simple/simple.cpp b/src/libcamera/pipeline/simple/simple.cpp
> > > > > index 1c25a7344f5f..a1163eaf8be2 100644
> > > > > --- a/src/libcamera/pipeline/simple/simple.cpp
> > > > > +++ b/src/libcamera/pipeline/simple/simple.cpp
> > > > > @@ -803,12 +803,10 @@ int SimplePipelineHandler::start(Camera *camera, [[maybe_unused]] const ControlL
> > > > >  		 * When using the converter allocate a fixed number of internal
> > > > >  		 * buffers.
> > > > >  		 */
> > > > > -		ret = video->allocateBuffers(kNumInternalBuffers,
> > > > > +		ret = video->allocateBuffers(SIMPLE_INTERNAL_BUFFER_COUNT,
> > > > >  					     &data->converterBuffers_);
> > > > >  	} else {
> > > > > -		/* Otherwise, prepare for using buffers from the only stream. */
> > > > > -		Stream *stream = &data->streams_[0];
> > > > > -		ret = video->importBuffers(stream->configuration().bufferCount);
> > > > > +		ret = video->importBuffers(SIMPLE_BUFFER_SLOT_COUNT);
> > > > >  	}
> > > > >  	if (ret < 0)
> > > > >  		return ret;
> > > > > diff --git a/src/libcamera/pipeline/uvcvideo/uvcvideo.cpp b/src/libcamera/pipeline/uvcvideo/uvcvideo.cpp
> > > > > index fd39b3d3c72c..755949e7a59a 100644
> > > > > --- a/src/libcamera/pipeline/uvcvideo/uvcvideo.cpp
> > > > > +++ b/src/libcamera/pipeline/uvcvideo/uvcvideo.cpp
> > > > > @@ -91,6 +91,8 @@ private:
> > > > >  		return static_cast<UVCCameraData *>(
> > > > >  			PipelineHandler::cameraData(camera));
> > > > >  	}
> > > > > +
> > > > > +	static constexpr unsigned int UVC_BUFFER_SLOT_COUNT = 5;
> > > > >  };
> > > > >  
> > > > >  UVCCameraConfiguration::UVCCameraConfiguration(UVCCameraData *data)
> > > > > @@ -236,9 +238,8 @@ int PipelineHandlerUVC::exportFrameBuffers(Camera *camera,
> > > > >  int PipelineHandlerUVC::start(Camera *camera, [[maybe_unused]] const ControlList *controls)
> > > > >  {
> > > > >  	UVCCameraData *data = cameraData(camera);
> > > > > -	unsigned int count = data->stream_.configuration().bufferCount;
> > > > >  
> > > > > -	int ret = data->video_->importBuffers(count);
> > > > > +	int ret = data->video_->importBuffers(UVC_BUFFER_SLOT_COUNT);
> > > > 
> > > > For the uvc and vimc pipeline handlers, we have no internal buffers, so
> > > > it's quite easy. We should have 8 or 16 slots, as for other pipeline
> > > > handlers.
> > > > 
> > > > >  	if (ret < 0)
> > > > >  		return ret;
> > > > >  
> > > > > diff --git a/src/libcamera/pipeline/vimc/vimc.cpp b/src/libcamera/pipeline/vimc/vimc.cpp
> > > > > index e89d53182c6d..24ba743a946c 100644
> > > > > --- a/src/libcamera/pipeline/vimc/vimc.cpp
> > > > > +++ b/src/libcamera/pipeline/vimc/vimc.cpp
> > > > > @@ -102,6 +102,8 @@ private:
> > > > >  		return static_cast<VimcCameraData *>(
> > > > >  			PipelineHandler::cameraData(camera));
> > > > >  	}
> > > > > +
> > > > > +	static constexpr unsigned int VIMC_BUFFER_SLOT_COUNT = 5;
> > > > >  };
> > > > >  
> > > > >  namespace {
> > > > > @@ -312,9 +314,8 @@ int PipelineHandlerVimc::exportFrameBuffers(Camera *camera,
> > > > >  int PipelineHandlerVimc::start(Camera *camera, [[maybe_unused]] const ControlList *controls)
> > > > >  {
> > > > >  	VimcCameraData *data = cameraData(camera);
> > > > > -	unsigned int count = data->stream_.configuration().bufferCount;
> > > > >  
> > > > > -	int ret = data->video_->importBuffers(count);
> > > > > +	int ret = data->video_->importBuffers(VIMC_BUFFER_SLOT_COUNT);
> > > > >  	if (ret < 0)
> > > > >  		return ret;
> > > > >  
> 
> -- 
> Regards,
> 
> Laurent Pinchart
> 
> -- 
> To unsubscribe, send mail to kernel-unsubscribe@lists.collabora.co.uk.
Nícolas F. R. A. Prado Aug. 19, 2021, 8:36 p.m. UTC | #9
Hi again,

On Tue, Aug 17, 2021 at 05:33:43AM +0300, Laurent Pinchart wrote:
> Hi Nícolas,
> 
> On Mon, Aug 09, 2021 at 05:26:46PM -0300, Nícolas F. R. A. Prado wrote:
> > On Sat, Aug 07, 2021 at 12:03:52PM -0300, Nícolas F. R. A. Prado wrote:
> > > On Mon, Aug 02, 2021 at 02:42:53AM +0300, Laurent Pinchart wrote:
> > > > On Thu, Jul 22, 2021 at 08:28:49PM -0300, Nícolas F. R. A. Prado wrote:
> > > > > Pipelines have relied on bufferCount to decide on the number of buffers
> > > > > to allocate internally through allocateBuffers() and on the number of
> > > > > V4L2 buffer slots to reserve through importBuffers(). Instead, the
> > > > > number of internal buffers should be the minimum required by the
> > > > > algorithms to avoid wasting memory, and the number of V4L2 buffer slots
> > > > > should overallocate to avoid thrashing dmabuf mappings.
> > > > > 
> > > > > For now, just set them to constants and stop relying on bufferCount, to
> > > > > allow for its removal.
> > > > > 
> > > > > Signed-off-by: Nícolas F. R. A. Prado <nfraprado@collabora.com>
> > > > > ---
> > > > > 
> > > > > No changes in v7
> > > > > 
> > > > > Changes in v6:
> > > > > - Added pipeline name as prefix to each BUFFER_SLOT_COUNT and
> > > > >   INTERNAL_BUFFER_COUNT constant
> > > > > 
> > > > >  src/libcamera/pipeline/ipu3/imgu.cpp              | 12 ++++++------
> > > > >  src/libcamera/pipeline/ipu3/imgu.h                |  5 ++++-
> > > > >  src/libcamera/pipeline/ipu3/ipu3.cpp              |  9 +--------
> > > > >  .../pipeline/raspberrypi/raspberrypi.cpp          | 15 +++++----------
> > > > >  src/libcamera/pipeline/rkisp1/rkisp1.cpp          |  9 ++-------
> > > > >  src/libcamera/pipeline/rkisp1/rkisp1_path.cpp     |  2 +-
> > > > >  src/libcamera/pipeline/rkisp1/rkisp1_path.h       |  3 +++
> > > > >  src/libcamera/pipeline/simple/converter.cpp       |  4 ++--
> > > > >  src/libcamera/pipeline/simple/converter.h         |  3 +++
> > > > >  src/libcamera/pipeline/simple/simple.cpp          |  6 ++----
> > > > >  src/libcamera/pipeline/uvcvideo/uvcvideo.cpp      |  5 +++--
> > > > >  src/libcamera/pipeline/vimc/vimc.cpp              |  5 +++--
> > > > >  12 files changed, 35 insertions(+), 43 deletions(-)
> > > > 
> > > > Given that some of the pipeline handlers will need more intrusive
> > > > changes to address the comments below, you could split this with one
> > > > patch per pipeline handler (or perhaps grouping the easy ones together).
> > > > 
> > > > > 
> > > > > diff --git a/src/libcamera/pipeline/ipu3/imgu.cpp b/src/libcamera/pipeline/ipu3/imgu.cpp
> > > > > index e955bc3456ba..f36e99dacbe7 100644
> > > > > --- a/src/libcamera/pipeline/ipu3/imgu.cpp
> > > > > +++ b/src/libcamera/pipeline/ipu3/imgu.cpp
> > > > > @@ -593,22 +593,22 @@ int ImgUDevice::configureVideoDevice(V4L2VideoDevice *dev, unsigned int pad,
> > > > >  /**
> > > > >   * \brief Allocate buffers for all the ImgU video devices
> > > > >   */
> > > > > -int ImgUDevice::allocateBuffers(unsigned int bufferCount)
> > > > > +int ImgUDevice::allocateBuffers()
> > > > >  {
> > > > >  	/* Share buffers between CIO2 output and ImgU input. */
> > > > > -	int ret = input_->importBuffers(bufferCount);
> > > > > +	int ret = input_->importBuffers(IPU3_BUFFER_SLOT_COUNT);
> > > > >  	if (ret) {
> > > > >  		LOG(IPU3, Error) << "Failed to import ImgU input buffers";
> > > > >  		return ret;
> > > > >  	}
> > > > >  
> > > > > -	ret = param_->allocateBuffers(bufferCount, &paramBuffers_);
> > > > > +	ret = param_->allocateBuffers(IPU3_INTERNAL_BUFFER_COUNT, &paramBuffers_);
> > > > >  	if (ret < 0) {
> > > > >  		LOG(IPU3, Error) << "Failed to allocate ImgU param buffers";
> > > > >  		goto error;
> > > > >  	}
> > > > >  
> > > > > -	ret = stat_->allocateBuffers(bufferCount, &statBuffers_);
> > > > > +	ret = stat_->allocateBuffers(IPU3_INTERNAL_BUFFER_COUNT, &statBuffers_);
> > > > >  	if (ret < 0) {
> > > > >  		LOG(IPU3, Error) << "Failed to allocate ImgU stat buffers";
> > > > >  		goto error;
> > > > > @@ -619,13 +619,13 @@ int ImgUDevice::allocateBuffers(unsigned int bufferCount)
> > > > >  	 * corresponding stream is active or inactive, as the driver needs
> > > > >  	 * buffers to be requested on the V4L2 devices in order to operate.
> > > > >  	 */
> > > > > -	ret = output_->importBuffers(bufferCount);
> > > > > +	ret = output_->importBuffers(IPU3_BUFFER_SLOT_COUNT);
> > > > >  	if (ret < 0) {
> > > > >  		LOG(IPU3, Error) << "Failed to import ImgU output buffers";
> > > > >  		goto error;
> > > > >  	}
> > > > >  
> > > > > -	ret = viewfinder_->importBuffers(bufferCount);
> > > > > +	ret = viewfinder_->importBuffers(IPU3_BUFFER_SLOT_COUNT);
> > > > >  	if (ret < 0) {
> > > > >  		LOG(IPU3, Error) << "Failed to import ImgU viewfinder buffers";
> > > > >  		goto error;
> > > > > diff --git a/src/libcamera/pipeline/ipu3/imgu.h b/src/libcamera/pipeline/ipu3/imgu.h
> > > > > index 9d4915116087..f934a951fc75 100644
> > > > > --- a/src/libcamera/pipeline/ipu3/imgu.h
> > > > > +++ b/src/libcamera/pipeline/ipu3/imgu.h
> > > > > @@ -61,7 +61,7 @@ public:
> > > > >  					    outputFormat);
> > > > >  	}
> > > > >  
> > > > > -	int allocateBuffers(unsigned int bufferCount);
> > > > > +	int allocateBuffers();
> > > > >  	void freeBuffers();
> > > > >  
> > > > >  	int start();
> > > > > @@ -86,6 +86,9 @@ private:
> > > > >  	static constexpr unsigned int PAD_VF = 3;
> > > > >  	static constexpr unsigned int PAD_STAT = 4;
> > > > >  
> > > > > +	static constexpr unsigned int IPU3_INTERNAL_BUFFER_COUNT = 4;
> > > > > +	static constexpr unsigned int IPU3_BUFFER_SLOT_COUNT = 5;
> > > > 
> > > > 5 buffer slots is low. It means that if applications cycle more than 5
> > > > buffers, the V4L2VideoDevice cache that maintains associations between
> > > > dmabufs and buffer slots will the trashed. Due to the internal queue of
> > > > requests in the IPU3 pipeline handler (similar to what you have
> > > > implemented in "[PATCH 0/3] libcamera: pipeline: Add internal request
> > > > queue" for other pipeline handlers), we won't fail at queuing requests,
> > > > but performance will suffer. I thus think we need to increase the number
> > > > of slots to what applications can be reasonably expected to use. We
> > > > could use 8, or even 16, as buffer slots are cheap. The same holds for
> > > > other pipeline handlers.
> > > > 
> > > > The number of slots for the CIO2 output should match the number of
> > > > buffer slots for the ImgU input, as the same buffers are used on the two
> > > > video devices. One option is to use IPU3_BUFFER_SLOT_COUNT for the CIO2,
> > > > instead of CIO2_BUFFER_COUNT. However, the number of internal CIO2
> > > > buffers that are allocated by exportBuffers() in CIO2Device::start(), to
> > > > be used in case the application doesn't provide any RAW buffer, should
> > > > be lower, as those are real buffer and are thus expensive. The number of
> > > > buffers and buffer slots on the CIO2 thus needs to be decoupled.
> > > > 
> > > > For proper operation, the CIO2 will require at least two queued buffers
> > > > (one being DMA'ed to, and one waiting). We need at least one extra
> > > > buffer queued to the ImgU to keep buffers flowing. Depending on
> > > > processing timings, it may be that the ImgU will complete processing of
> > > > its buffer before the CIO2 captures the next one, leading to a temporary
> > > > situation where the CIO2 will have three buffers queued, or the CIO2
> > > > will finish the capture first, leading to a temporary situation where
> > > > the CIO2 will have one buffer queued and the ImgU will have two buffers
> > > > queued. In either case, shortly afterwards, the other component will
> > > > complete capture or processing, and we'll get back to a situation with
> > > > two buffers queued in the CIO2 and one in the ImgU. That's thus a
> > > > minimum of three buffers for raw images.
> > > > 
> > > > From an ImgU point of view, we could probably get away with a single
> > > > parameter and a single stats buffer. This would however not allow
> > > > queuing the next frame for processing in the ImgU before the current
> > > > frame completes, so two buffers would be better. Now, if we take the IPA
> > > > into account, the statistics buffer will spend some time on the IPA side
> > > > for processing. It would thus be best to have an extra statistics buffer
> > > > to accommodate that, thus requiring three statistics buffers (and three
> > > > parameters buffers, as we associate them together).
> > > > 
> > > > This rationale leads to using the same number of internal buffers for
> > > > the CIO2, the parameters and the statistics. We currently use four, and
> > > > while the logic above indicates we could get away with three, it would
> > > > be safer to keep using four in this patch, and possibly reduce the
> > > > number of buffers later.
> > > > 
> > > > I know documentation isn't fun, but I think this rationale should be
> > > > captured in a comment in the IPU3 pipeline handler, along with a \todo
> > > > item to try and lower the number of internal buffers to three.
> > > 
> > > This is the IPU3 topology as I understand it:
> > > 
> > >       Output  .               .   Input        Output .
> > >       +---+   .               .   +---+        +---+  .
> > >       |   | --------------------> |   |        |   |  .
> > >       +---+   .               .   +---+        +---+  .
> > > CIO2          .   IPA         .          ImgU         .          IPA
> > >               .        Param  .   Param        Stat   .   Stat
> > >               .        +---+  .   +---+        +---+  .   +---+ 
> > >               .        |   | ---> |   |        |   | ---> |   | 
> > >               .        +---+  .   +---+        +---+  .   +---+ 
> > >           
> > > Your suggestions for the minimum number of buffers required are the following,
> > > from what I understand:
> > > 
> > > CIO2 raw internal buffers:
> > > - 2x on CIO2 Output (one being DMA'ed, one waiting)
> > > - 1x on ImgU Input
> > > 
> > > ImgU Param/Stat internal buffers:
> > > - 2x on ImgU Param/Stat (one being processed, one waiting)
> > > - 1x on IPA Stat
> > > 
> > > This arrangement doesn't seem to take into account that IPU3Frames::Info binds
> > > CIO2 internal buffers and ImgU Param/Stat buffers together. This means that each
> > > raw buffer queued to CIO2 Output needs a Param/Stat buffer as well. And each
> > > Param/Stat buffer queued to ImgU for processing needs a CIO2 raw buffer as well.
> > > After ImgU processing though, the raw buffer gets released and reused, so the
> > > Stat buffer queued to the IPA does not require a CIO2 raw buffer.
> > > 
> > > This means that to achieve the above minimum, due to the IPU3Frames::Info
> > > constraint, we'd actually need:
> > > 
> > > CIO2 internal buffers:
> > > - 2x on CIO2 Output (one being DMA'ed, one waiting)
> > > - 2x on ImgU Input (for the two ImgU Param/Stat buffers we want to have there)
> > > 
> > > ImgU Param/Stat internal buffers:
> > > - 2x on CIO2 Output (for the two CIO2 raw buffers we want to have there)
> > > - 2x on ImgU Param/Stat (one being processed, one waiting)
> 
> Note that the need to have two buffers here is to ensure back-to-back
> processing of frame on the ImgU and thus avoid delays, but this need
> actually depends on how fast the ImgU is. With a very fast ImgU
> (compared to the frame duration), inter-frame delays may not be an
> issue. There's more on this below.
> 
> > > - 1x on IPA Stat
> 
> Processing of the statistics can occur after the corresponding raw image
> buffer has been requeued to the CIO2, the only hard requirement is that
> the buffer needs to be available by the time the ImgU will process the
> corresponding raw frame buffer again.
> 
> > > Also we're not accounting for parameter filling in the IPA before we queue the
> > > buffers to ImgU, but perhaps that's fast enough that it doesn't matter?
> 
> That's one of the questions we need to answer, I don't think we have
> numbers at this time. If filling the parameters buffer takes a
> significant amount of time, then that would need to be taken into
> account as an additional step in the pipeline, with an additional set of
> buffers.
> 
> > > Does this make sense? Or am I missing something?
> 
> One thing that you make not have taken into account is that the two
> buffers queued on the CIO2 output and the two buffers queued on the ImgU
> are not necessarily queued at the same time. I'll try to explain.
> 
> On the CIO2 side, we have a strong real time requirement to always keep
> the CIO2 fed with buffers. The details depend a bit on the hardware and
> driver implementations, but the base idea is that once a buffer is
> complete and the time comes to move to the next buffer for the next
> frame, there has to be a next buffer available. When exactly this occurs
> can vary. Some drivers will give the buffer for the next frame to the
> device when capture for the current frame starts, and some will give it
> when the hardware signals completion of the capture of the current frame
> (frame end). In theory this could be delayed even a bit more, but it has
> to happen before the hardware needs the new buffer, and giving it when
> the DMA completes is often too risky already as vertical blanking can be
> short and interrupts can be delayed a bit. I tried to check the driver
> to see what the exact requirement is, but I'm not familiar with the
> hardware and the code is not very easy to follow.
> 
> Note that frame start is the time when the first pixel of the frame is
> written to memory, and frame end the time when the last pixel of the
> frame is written to memory. The end of frame N and the start of frame
> N+1 are separated by the vertical blanking time.
> 
> Let's assume that the CIO2 needs to be programmed with the buffer for
> frame N+1 at the start of frame N (Edit: I've written all the
> explanation below based on this assumption, but after further
> investigation, I *think* the CIO2 only requires the buffer for frame N+1
> at the beginning of frame N+1, but the driver enforces that the buffer
> must be present just before the start of frame N to avoid race
> conditions - just before the start of frame N and at the start of frame
> N at practically speaking the same thing. Sakari, do you know if this is
> correct ?). We'll constantly transition between the following states,
> from the CIO2 point of view.
> 
> 0. (Initial state) 2x idle buffers in the queue, hardware stopped. The
>    CIO2 is then started, the first buffer in the queue is given to the
>    device to capture the first frame, and the second buffer in the queue
>    is given to the device to capture the second frame. The first frame
>    starts.
> 
> 1. 1x active buffer being DMA'ed to, 1x pending buffer already given to
>    the hardware for the next frame, 0x idle buffers in the queue. Two
>    events can occur at this point, either completion of the current
>    frame (-> 2), or a new buffer being queued by userspace (-> 4).
> 
> 2. 0x active buffer beind DMA'ed to, 1x pending buffer already given to
>    the hardware for the next frame, 0x idle buffers in the queue. Two
>    events can occur at this point, either start of the next frame (->
>    3), or a new buffer being queued by userspace (-> 5).
> 
>    This state lasts for the duration of the vertical blanking only, and
>    can thus be short-lived.
> 
> 3. The next frame start. The pending buffer becomes active. We have no
>    buffer in the queue to give to the hardware for the next frame. An
>    underrun has occurred, a frame will be dropped. Game over.
> 
> 4. 1x active buffer being DMA'ed to, 1x pending buffer already given to
>    the hardware for the next frame, 1x idle buffers in the queue. The
>    next event that will occur is the start of the next frame (as the
>    other option, a new buffer being queued, will give us additional
>    safety by increasing the number of queued buffers, but isn't
>    meaningful when considering the case where we try to run with the
>    minimum number of buffers possible).
> 
>    As the current frame ends, the active buffer is given back to the
>    userspace. There's no active buffer (the DMA will start soon, after
>    the vertical blanking, when the next frame starts), the pending
>    buffer stays pending, and the idle buffer stays idle (-> 5).
> 
> 5. 0x active buffer beind DMA'ed to, 1x pending buffer already given to
>    the hardware for the next frame, 1x idle buffers in the queue. The
>    next event that will occur is the start of the next frame (for the
>    same reason as in 4).
> 
>    As the next frame starts, the pending buffer becomes active. The
>    queue buffer is given to the hardware for the subsequent frame. The
>    queue of idle buffers become empty (-> 1).
> 
>    If this state is reached from state 2, it lasts for the remaining of
>    the vertical blanking only. If it is reached from state 4, it lasts
>    for the whole vertical blanking. In both cases, it can be
>    short-lived.
> 
> We can thus cycle either through 1 -> 2 -> 5 -> 1 or through 1 -> 4 -> 5
> -> 1. The first cycle requires two buffers for the CIO2, with an
> intermediate state (2) that has a single buffer only. This is unsafe, as
> a failure to queue a second buffer in the short-lived state 2 will lead
> to state 3 and frame drops.
> 
> The second cycle requires three buffers for the CIO2. This is the cycle
> we want to use, to avoid frame drops. Note that only state 4 requires
> all three buffers, and userspace can queue the third buffer at any point
> in state 1 (before the end of the current frame). If userspace queues
> the frame slightly too late, after the completion of the current frame
> but before the start of the next one, we'll go to the unsafe cycle but
> will still not lose frames.
> 
> Now, let's look at the ImgU side, and assume we use three buffers in
> total. The ImgU operates from memory to memory, it thus has no realtime
> requirement. It only starts processing a frame when the frame is given
> to it. This occurs, from a CIO2 point of view, in the transition from
> state 4 to state 5, plus all delays introduced by delivering the CIO2
> frame completion event to userspace, queueing the frame to the ImgU (I'm
> ignoring the IPA here), and starting the ImgU itself. The ImgU
> processing time will, on average, be lower than the frame duration,
> otherwise it won't be able to process all frames. Once the ImgU
> completes processing of the frame, it will signal this to userspace.
> There's also a processing delay there (signalling, task switching, ...),
> and userspace will requeue the frame to the CIO2. This has to occur at
> the latest before the end of the current frame, otherwise state 1 will
> transition to state 2.
> 
> We thus see that, in the 3 buffers case, we need to ensure that the
> total time to process the frame on the ImgU, from the CIO2 interrupt
> signalling the end of state 4 to the buffer being requeued to the CIO2,
> thus including all task switching and other delays, doesn't exceed the
> duration of states 5 + 1, which is equal to the duration of a frame. The
> ImgU processing time itself is guaranteed to be lower than that, but the
> additional delays may be problematic. We also need to include a possible
> round-trip to the IPA after end of buffer capture by the CIO2 and start
> of processing by the ImgU to retrieve the ImgU parameters for the frame.
> Three buffers start sounding quite risky. I'm thus correcting myself,
> hour buffers seem safer.
> 
> None of this takes the parameters or statistics buffers into account,
> but I don't think they're particularly problematic in the sense that the
> most strict realtime constraints come from the raw image buffers. Feel
> free to prove me wrong though :-)
> 
> Let's however note that we can probably fetch the ImgU parameters for
> the frame that has just been captured before the end of the frame, so
> that would remove a delay in the ImgU processing. This assumes that the
> algorithms wouldn't need to know the exact exposure time and analog gain
> that have been used to capture the current frame in order to compute the
> ImgU parameters. This leads to a first question to David: does the
> Raspberry Pi IPA require the sensor metadata to calculate ISP
> parameters, or are they needed only when processing statistics from
> frame N to calculate sensor and ISP parameters of subsequent frames ?
> 
> The next question is for everybody (and that's why I've expanded the CC
> list to Kieran, Jean-Michel and Sakari too): what did I get wrong in the
> above explanation ? :-)
> 
> > > > > +
> > > > >  	int linkSetup(const std::string &source, unsigned int sourcePad,
> > > > >  		      const std::string &sink, unsigned int sinkPad,
> > > > >  		      bool enable);
> > > > > diff --git a/src/libcamera/pipeline/ipu3/ipu3.cpp b/src/libcamera/pipeline/ipu3/ipu3.cpp
> > > > > index 5fd1757bfe13..4efd201c05e5 100644
> > > > > --- a/src/libcamera/pipeline/ipu3/ipu3.cpp
> > > > > +++ b/src/libcamera/pipeline/ipu3/ipu3.cpp
> > > > > @@ -681,16 +681,9 @@ int PipelineHandlerIPU3::allocateBuffers(Camera *camera)
> > > > >  {
> > > > >  	IPU3CameraData *data = cameraData(camera);
> > > > >  	ImgUDevice *imgu = data->imgu_;
> > > > > -	unsigned int bufferCount;
> > > > >  	int ret;
> > > > >  
> > > > > -	bufferCount = std::max({
> > > > > -		data->outStream_.configuration().bufferCount,
> > > > > -		data->vfStream_.configuration().bufferCount,
> > > > > -		data->rawStream_.configuration().bufferCount,
> > > > > -	});
> > > > > -
> > > > > -	ret = imgu->allocateBuffers(bufferCount);
> > > > > +	ret = imgu->allocateBuffers();
> > > > >  	if (ret < 0)
> > > > >  		return ret;
> > > > >  
> > > > > diff --git a/src/libcamera/pipeline/raspberrypi/raspberrypi.cpp b/src/libcamera/pipeline/raspberrypi/raspberrypi.cpp
> > > > > index d1cd3d9dc082..776e0f92aed1 100644
> > > > > --- a/src/libcamera/pipeline/raspberrypi/raspberrypi.cpp
> > > > > +++ b/src/libcamera/pipeline/raspberrypi/raspberrypi.cpp
> > > > > @@ -1149,20 +1149,15 @@ int PipelineHandlerRPi::prepareBuffers(Camera *camera)
> > > > >  {
> > > > >  	RPiCameraData *data = cameraData(camera);
> > > > >  	int ret;
> > > > > +	constexpr unsigned int bufferCount = 4;
> > > > >  
> > > > >  	/*
> > > > > -	 * Decide how many internal buffers to allocate. For now, simply look
> > > > > -	 * at how many external buffers will be provided. We'll need to improve
> > > > > -	 * this logic. However, we really must have all streams allocate the same
> > > > > -	 * number of buffers to simplify error handling in queueRequestDevice().
> > > > > +	 * Allocate internal buffers. We really must have all streams allocate
> > > > > +	 * the same number of buffers to simplify error handling in
> > > > > +	 * queueRequestDevice().
> > > > >  	 */
> > > > > -	unsigned int maxBuffers = 0;
> > > > > -	for (const Stream *s : camera->streams())
> > > > > -		if (static_cast<const RPi::Stream *>(s)->isExternal())
> > > > > -			maxBuffers = std::max(maxBuffers, s->configuration().bufferCount);
> > > > > -
> > > > >  	for (auto const stream : data->streams_) {
> > > > > -		ret = stream->prepareBuffers(maxBuffers);
> > > > > +		ret = stream->prepareBuffers(bufferCount);
> > > > 
> > > > We have a similar problem here, 4 buffer slots is too little, but when
> > > > the stream has to allocate internal buffers (!importOnly), which is the
> > > > case for most streams, we don't want to overallocate.
> > > > 
> > > > I'd like to get feedback from Naush here, but I think this means we'll
> > > > have to relax the requirement documented in the comment above, and
> > > > accept a different number of buffers for each stream.
> > > > 
> > > > >  		if (ret < 0)
> > > > >  			return ret;
> > > > >  	}
> > > > > diff --git a/src/libcamera/pipeline/rkisp1/rkisp1.cpp b/src/libcamera/pipeline/rkisp1/rkisp1.cpp
> > > > > index 11325875b929..f4ea2fd4d4d0 100644
> > > > > --- a/src/libcamera/pipeline/rkisp1/rkisp1.cpp
> > > > > +++ b/src/libcamera/pipeline/rkisp1/rkisp1.cpp
> > > > > @@ -690,16 +690,11 @@ int PipelineHandlerRkISP1::allocateBuffers(Camera *camera)
> > > > >  	unsigned int ipaBufferId = 1;
> > > > >  	int ret;
> > > > >  
> > > > > -	unsigned int maxCount = std::max({
> > > > > -		data->mainPathStream_.configuration().bufferCount,
> > > > > -		data->selfPathStream_.configuration().bufferCount,
> > > > > -	});
> > > > > -
> > > > > -	ret = param_->allocateBuffers(maxCount, &paramBuffers_);
> > > > > +	ret = param_->allocateBuffers(RKISP1_INTERNAL_BUFFER_COUNT, &paramBuffers_);
> > > > >  	if (ret < 0)
> > > > >  		goto error;
> > > > >  
> > > > > -	ret = stat_->allocateBuffers(maxCount, &statBuffers_);
> > > > > +	ret = stat_->allocateBuffers(RKISP1_INTERNAL_BUFFER_COUNT, &statBuffers_);
> > > > >  	if (ret < 0)
> > > > >  		goto error;
> > > > >  
> > > > > diff --git a/src/libcamera/pipeline/rkisp1/rkisp1_path.cpp b/src/libcamera/pipeline/rkisp1/rkisp1_path.cpp
> > > > > index 25f482eb8d8e..fea330f72886 100644
> > > > > --- a/src/libcamera/pipeline/rkisp1/rkisp1_path.cpp
> > > > > +++ b/src/libcamera/pipeline/rkisp1/rkisp1_path.cpp
> > > > > @@ -172,7 +172,7 @@ int RkISP1Path::start()
> > > > >  		return -EBUSY;
> > > > >  
> > > > >  	/* \todo Make buffer count user configurable. */
> > > > > -	ret = video_->importBuffers(RKISP1_BUFFER_COUNT);
> > > > > +	ret = video_->importBuffers(RKISP1_BUFFER_SLOT_COUNT);
> > > > >  	if (ret)
> > > > >  		return ret;
> > > > >  
> > > > > diff --git a/src/libcamera/pipeline/rkisp1/rkisp1_path.h b/src/libcamera/pipeline/rkisp1/rkisp1_path.h
> > > > > index 91757600ccdc..3c5891009c58 100644
> > > > > --- a/src/libcamera/pipeline/rkisp1/rkisp1_path.h
> > > > > +++ b/src/libcamera/pipeline/rkisp1/rkisp1_path.h
> > > > > @@ -27,6 +27,9 @@ class V4L2Subdevice;
> > > > >  struct StreamConfiguration;
> > > > >  struct V4L2SubdeviceFormat;
> > > > >  
> > > > > +static constexpr unsigned int RKISP1_INTERNAL_BUFFER_COUNT = 4;
> > > > > +static constexpr unsigned int RKISP1_BUFFER_SLOT_COUNT = 5;
> > > > 
> > > > The situation should be simpler for the rkisp1, as it has a different
> > > > pipeline model (inline ISP as opposed to offline ISP for the IPU3). We
> > > > can allocate more slots (8 or 16, as for other pipeline handlers), and
> > > > restrict the number of internal buffers (for stats and parameters) to
> > > > the number of requests we expect to queue to the device at once, plus
> > > > one for the IPA.  Four thus seems good. Capturing this rationale in a
> > > > comment would be good too.
> > 
> > Shouldn't we also have one extra buffer queued to the capture device, like for
> > the others, totalling five (four on the capture, one on the IPA)? Or since the
> > driver already requires three buffers the extra one isn't needed?
> >
> > I'm not sure how it works, but if the driver requires three buffers at all times
> > to keep streaming, then I think we indeed should have the extra buffer to avoid
> > dropping frames. Otherwise, if that requirement is only for starting the stream,
> > then for drivers that require at least two buffers we shouldn't need an extra
> > one, I'd think.
> 
> It seems to be only needed to start capture. Even then I think it could
> be lowered to two buffers, I don't see anything in the driver that
> requires three. Maybe someone from Collabora could comment on this ? And
> maybe you could give it a try by modifying the driver ?
> 
> By the way, if you try to apply the CIO2 reasoning above to the RkISP1,
> you will need to take into account the fact the the driver programs the
> hardware with the buffer for frame N+1 not at the beginning of frame N,
> but at the end of frame N-1.
> 
> I think four buffers is enough. We currently use four buffers and it
> seems to work :-) Granted, the RkISP1 IPA is a skeleton, so this
> argument isn't very strong, but given that the driver only needs two
> buffers except at start time, four should be fine.

Just to give some feedback, I lowered the RKISP1_MIN_BUFFERS_NEEDED constant in
the driver to 1 and tested capture using cam and everything still works as
expected.

Using a single request obviously causes a lot of frame drops and with two
there's still a bit as well. But three requests works completely fine, which
seems to suggest that two buffers are used internally with the third one
covering for the propagation delays to and from userspace (and through the
skeleton IPA) while the first buffer is requeued back.

So four buffers should cover for when the IPA is further developed as well like
you said.

(That said I was able to get frame drops by choosing a resolution of at least
600x400 and saving the frames to disk with -F, but this amount of delay may
already be enough that the application would consider overallocating buffers)

Thanks,
Nícolas

> 
> > > > BTW, I may be too tired to think properly, or just unable to see the
> > > > obvious, so please challenge any rationale you think is incorrect.
> > > > 
> > > > > +
> > > > >  class RkISP1Path
> > > > >  {
> > > > >  public:
> > > > > diff --git a/src/libcamera/pipeline/simple/converter.cpp b/src/libcamera/pipeline/simple/converter.cpp
> > > > > index b5e34c4cd0c5..b3bcf01483f7 100644
> > > > > --- a/src/libcamera/pipeline/simple/converter.cpp
> > > > > +++ b/src/libcamera/pipeline/simple/converter.cpp
> > > > > @@ -103,11 +103,11 @@ int SimpleConverter::Stream::exportBuffers(unsigned int count,
> > > > >  
> > > > >  int SimpleConverter::Stream::start()
> > > > >  {
> > > > > -	int ret = m2m_->output()->importBuffers(inputBufferCount_);
> > > > > +	int ret = m2m_->output()->importBuffers(SIMPLE_BUFFER_SLOT_COUNT);
> > > > 
> > > > Shouldn't this be SIMPLE_INTERNAL_BUFFER_COUNT ? Overallocating is not
> > > > much of an issue I suppose.
> > 
> > Indeed. I was under the impression that we should always importBuffers() using
> > BUFFER_SLOT_COUNT, but now, after reading more code, I understand that's not
> > always the case (although this seems to be the only case, due to the presence of
> > the converter).
> > 
> > > > >  	if (ret < 0)
> > > > >  		return ret;
> > > > >  
> > > > > -	ret = m2m_->capture()->importBuffers(outputBufferCount_);
> > > > > +	ret = m2m_->capture()->importBuffers(SIMPLE_BUFFER_SLOT_COUNT);
> > > > >  	if (ret < 0) {
> > > > >  		stop();
> > > > >  		return ret;
> > > > > diff --git a/src/libcamera/pipeline/simple/converter.h b/src/libcamera/pipeline/simple/converter.h
> > > > > index 276a2a291c21..7e1d60674f62 100644
> > > > > --- a/src/libcamera/pipeline/simple/converter.h
> > > > > +++ b/src/libcamera/pipeline/simple/converter.h
> > > > > @@ -29,6 +29,9 @@ class SizeRange;
> > > > >  struct StreamConfiguration;
> > > > >  class V4L2M2MDevice;
> > > > >  
> > > > > +constexpr unsigned int SIMPLE_INTERNAL_BUFFER_COUNT = 5;
> > > > > +constexpr unsigned int SIMPLE_BUFFER_SLOT_COUNT = 5;
> > > > 
> > > > Let's name the variables kSimpleInternalBufferCount and
> > > > kSimpleBufferSlotCount, as that's the naming scheme we're moving to for
> > > > non-macro constants. Same comment elsewhere in this patch.
> > > > 
> > > > Those constants don't belong to converter.h. Could you turn them into
> > > > member constants of the SimplePipelineHandler class, as
> > > > kNumInternalBuffers (which btw should be removed) ? The number of buffer
> > > > slots can be passed as a parameter to SimpleConverter::start().
> > > > 
> > > > There's no stats or parameters here, and no IPA, so the situation is
> > > > different than for IPU3 and RkISP1. The number of internal buffers
> > > > should just be one more than the minimum number of buffers required by
> > > > the capture device, I don't think there's another requirement.
> > 
> > Plus one extra to have queued at the converter's 'output' node (which is its
> > input, confusingly)?
> 
> It depends a bit on the exact timings of the capture device, as is
> probably clear with the explanation above (or at least is now clearly
> seen as a complicated topic :-)). We need to ensure that the realtime
> requirements of the device are met, and that the capture buffers that
> complete, and are then processed by the converter, will be requeued in
> time to the capture device to meet those requirements.
> 
> As the simple pipeline handler deals with a variety of devices, we have
> two options, either checking the requirements of each device and
> recording them in the supportedDevices array, or pick a common number of
> buffers that should be good enough for everybody. I'd start with the
> second option for simplicity, and as the pipeline handler currently uses
> 3 buffers, I'd stick to that for now.
> 
> > > > > +
> > > > >  class SimpleConverter
> > > > >  {
> > > > >  public:
> > > > > diff --git a/src/libcamera/pipeline/simple/simple.cpp b/src/libcamera/pipeline/simple/simple.cpp
> > > > > index 1c25a7344f5f..a1163eaf8be2 100644
> > > > > --- a/src/libcamera/pipeline/simple/simple.cpp
> > > > > +++ b/src/libcamera/pipeline/simple/simple.cpp
> > > > > @@ -803,12 +803,10 @@ int SimplePipelineHandler::start(Camera *camera, [[maybe_unused]] const ControlL
> > > > >  		 * When using the converter allocate a fixed number of internal
> > > > >  		 * buffers.
> > > > >  		 */
> > > > > -		ret = video->allocateBuffers(kNumInternalBuffers,
> > > > > +		ret = video->allocateBuffers(SIMPLE_INTERNAL_BUFFER_COUNT,
> > > > >  					     &data->converterBuffers_);
> > > > >  	} else {
> > > > > -		/* Otherwise, prepare for using buffers from the only stream. */
> > > > > -		Stream *stream = &data->streams_[0];
> > > > > -		ret = video->importBuffers(stream->configuration().bufferCount);
> > > > > +		ret = video->importBuffers(SIMPLE_BUFFER_SLOT_COUNT);
> > > > >  	}
> > > > >  	if (ret < 0)
> > > > >  		return ret;
> > > > > diff --git a/src/libcamera/pipeline/uvcvideo/uvcvideo.cpp b/src/libcamera/pipeline/uvcvideo/uvcvideo.cpp
> > > > > index fd39b3d3c72c..755949e7a59a 100644
> > > > > --- a/src/libcamera/pipeline/uvcvideo/uvcvideo.cpp
> > > > > +++ b/src/libcamera/pipeline/uvcvideo/uvcvideo.cpp
> > > > > @@ -91,6 +91,8 @@ private:
> > > > >  		return static_cast<UVCCameraData *>(
> > > > >  			PipelineHandler::cameraData(camera));
> > > > >  	}
> > > > > +
> > > > > +	static constexpr unsigned int UVC_BUFFER_SLOT_COUNT = 5;
> > > > >  };
> > > > >  
> > > > >  UVCCameraConfiguration::UVCCameraConfiguration(UVCCameraData *data)
> > > > > @@ -236,9 +238,8 @@ int PipelineHandlerUVC::exportFrameBuffers(Camera *camera,
> > > > >  int PipelineHandlerUVC::start(Camera *camera, [[maybe_unused]] const ControlList *controls)
> > > > >  {
> > > > >  	UVCCameraData *data = cameraData(camera);
> > > > > -	unsigned int count = data->stream_.configuration().bufferCount;
> > > > >  
> > > > > -	int ret = data->video_->importBuffers(count);
> > > > > +	int ret = data->video_->importBuffers(UVC_BUFFER_SLOT_COUNT);
> > > > 
> > > > For the uvc and vimc pipeline handlers, we have no internal buffers, so
> > > > it's quite easy. We should have 8 or 16 slots, as for other pipeline
> > > > handlers.
> > > > 
> > > > >  	if (ret < 0)
> > > > >  		return ret;
> > > > >  
> > > > > diff --git a/src/libcamera/pipeline/vimc/vimc.cpp b/src/libcamera/pipeline/vimc/vimc.cpp
> > > > > index e89d53182c6d..24ba743a946c 100644
> > > > > --- a/src/libcamera/pipeline/vimc/vimc.cpp
> > > > > +++ b/src/libcamera/pipeline/vimc/vimc.cpp
> > > > > @@ -102,6 +102,8 @@ private:
> > > > >  		return static_cast<VimcCameraData *>(
> > > > >  			PipelineHandler::cameraData(camera));
> > > > >  	}
> > > > > +
> > > > > +	static constexpr unsigned int VIMC_BUFFER_SLOT_COUNT = 5;
> > > > >  };
> > > > >  
> > > > >  namespace {
> > > > > @@ -312,9 +314,8 @@ int PipelineHandlerVimc::exportFrameBuffers(Camera *camera,
> > > > >  int PipelineHandlerVimc::start(Camera *camera, [[maybe_unused]] const ControlList *controls)
> > > > >  {
> > > > >  	VimcCameraData *data = cameraData(camera);
> > > > > -	unsigned int count = data->stream_.configuration().bufferCount;
> > > > >  
> > > > > -	int ret = data->video_->importBuffers(count);
> > > > > +	int ret = data->video_->importBuffers(VIMC_BUFFER_SLOT_COUNT);
> > > > >  	if (ret < 0)
> > > > >  		return ret;
> > > > >  
> 
> -- 
> Regards,
> 
> Laurent Pinchart
> 
> -- 
> To unsubscribe, send mail to kernel-unsubscribe@lists.collabora.co.uk.

Patch
diff mbox series

diff --git a/src/libcamera/pipeline/ipu3/imgu.cpp b/src/libcamera/pipeline/ipu3/imgu.cpp
index e955bc3456ba..f36e99dacbe7 100644
--- a/src/libcamera/pipeline/ipu3/imgu.cpp
+++ b/src/libcamera/pipeline/ipu3/imgu.cpp
@@ -593,22 +593,22 @@  int ImgUDevice::configureVideoDevice(V4L2VideoDevice *dev, unsigned int pad,
 /**
  * \brief Allocate buffers for all the ImgU video devices
  */
-int ImgUDevice::allocateBuffers(unsigned int bufferCount)
+int ImgUDevice::allocateBuffers()
 {
 	/* Share buffers between CIO2 output and ImgU input. */
-	int ret = input_->importBuffers(bufferCount);
+	int ret = input_->importBuffers(IPU3_BUFFER_SLOT_COUNT);
 	if (ret) {
 		LOG(IPU3, Error) << "Failed to import ImgU input buffers";
 		return ret;
 	}
 
-	ret = param_->allocateBuffers(bufferCount, &paramBuffers_);
+	ret = param_->allocateBuffers(IPU3_INTERNAL_BUFFER_COUNT, &paramBuffers_);
 	if (ret < 0) {
 		LOG(IPU3, Error) << "Failed to allocate ImgU param buffers";
 		goto error;
 	}
 
-	ret = stat_->allocateBuffers(bufferCount, &statBuffers_);
+	ret = stat_->allocateBuffers(IPU3_INTERNAL_BUFFER_COUNT, &statBuffers_);
 	if (ret < 0) {
 		LOG(IPU3, Error) << "Failed to allocate ImgU stat buffers";
 		goto error;
@@ -619,13 +619,13 @@  int ImgUDevice::allocateBuffers(unsigned int bufferCount)
 	 * corresponding stream is active or inactive, as the driver needs
 	 * buffers to be requested on the V4L2 devices in order to operate.
 	 */
-	ret = output_->importBuffers(bufferCount);
+	ret = output_->importBuffers(IPU3_BUFFER_SLOT_COUNT);
 	if (ret < 0) {
 		LOG(IPU3, Error) << "Failed to import ImgU output buffers";
 		goto error;
 	}
 
-	ret = viewfinder_->importBuffers(bufferCount);
+	ret = viewfinder_->importBuffers(IPU3_BUFFER_SLOT_COUNT);
 	if (ret < 0) {
 		LOG(IPU3, Error) << "Failed to import ImgU viewfinder buffers";
 		goto error;
diff --git a/src/libcamera/pipeline/ipu3/imgu.h b/src/libcamera/pipeline/ipu3/imgu.h
index 9d4915116087..f934a951fc75 100644
--- a/src/libcamera/pipeline/ipu3/imgu.h
+++ b/src/libcamera/pipeline/ipu3/imgu.h
@@ -61,7 +61,7 @@  public:
 					    outputFormat);
 	}
 
-	int allocateBuffers(unsigned int bufferCount);
+	int allocateBuffers();
 	void freeBuffers();
 
 	int start();
@@ -86,6 +86,9 @@  private:
 	static constexpr unsigned int PAD_VF = 3;
 	static constexpr unsigned int PAD_STAT = 4;
 
+	static constexpr unsigned int IPU3_INTERNAL_BUFFER_COUNT = 4;
+	static constexpr unsigned int IPU3_BUFFER_SLOT_COUNT = 5;
+
 	int linkSetup(const std::string &source, unsigned int sourcePad,
 		      const std::string &sink, unsigned int sinkPad,
 		      bool enable);
diff --git a/src/libcamera/pipeline/ipu3/ipu3.cpp b/src/libcamera/pipeline/ipu3/ipu3.cpp
index 5fd1757bfe13..4efd201c05e5 100644
--- a/src/libcamera/pipeline/ipu3/ipu3.cpp
+++ b/src/libcamera/pipeline/ipu3/ipu3.cpp
@@ -681,16 +681,9 @@  int PipelineHandlerIPU3::allocateBuffers(Camera *camera)
 {
 	IPU3CameraData *data = cameraData(camera);
 	ImgUDevice *imgu = data->imgu_;
-	unsigned int bufferCount;
 	int ret;
 
-	bufferCount = std::max({
-		data->outStream_.configuration().bufferCount,
-		data->vfStream_.configuration().bufferCount,
-		data->rawStream_.configuration().bufferCount,
-	});
-
-	ret = imgu->allocateBuffers(bufferCount);
+	ret = imgu->allocateBuffers();
 	if (ret < 0)
 		return ret;
 
diff --git a/src/libcamera/pipeline/raspberrypi/raspberrypi.cpp b/src/libcamera/pipeline/raspberrypi/raspberrypi.cpp
index d1cd3d9dc082..776e0f92aed1 100644
--- a/src/libcamera/pipeline/raspberrypi/raspberrypi.cpp
+++ b/src/libcamera/pipeline/raspberrypi/raspberrypi.cpp
@@ -1149,20 +1149,15 @@  int PipelineHandlerRPi::prepareBuffers(Camera *camera)
 {
 	RPiCameraData *data = cameraData(camera);
 	int ret;
+	constexpr unsigned int bufferCount = 4;
 
 	/*
-	 * Decide how many internal buffers to allocate. For now, simply look
-	 * at how many external buffers will be provided. We'll need to improve
-	 * this logic. However, we really must have all streams allocate the same
-	 * number of buffers to simplify error handling in queueRequestDevice().
+	 * Allocate internal buffers. We really must have all streams allocate
+	 * the same number of buffers to simplify error handling in
+	 * queueRequestDevice().
 	 */
-	unsigned int maxBuffers = 0;
-	for (const Stream *s : camera->streams())
-		if (static_cast<const RPi::Stream *>(s)->isExternal())
-			maxBuffers = std::max(maxBuffers, s->configuration().bufferCount);
-
 	for (auto const stream : data->streams_) {
-		ret = stream->prepareBuffers(maxBuffers);
+		ret = stream->prepareBuffers(bufferCount);
 		if (ret < 0)
 			return ret;
 	}
diff --git a/src/libcamera/pipeline/rkisp1/rkisp1.cpp b/src/libcamera/pipeline/rkisp1/rkisp1.cpp
index 11325875b929..f4ea2fd4d4d0 100644
--- a/src/libcamera/pipeline/rkisp1/rkisp1.cpp
+++ b/src/libcamera/pipeline/rkisp1/rkisp1.cpp
@@ -690,16 +690,11 @@  int PipelineHandlerRkISP1::allocateBuffers(Camera *camera)
 	unsigned int ipaBufferId = 1;
 	int ret;
 
-	unsigned int maxCount = std::max({
-		data->mainPathStream_.configuration().bufferCount,
-		data->selfPathStream_.configuration().bufferCount,
-	});
-
-	ret = param_->allocateBuffers(maxCount, &paramBuffers_);
+	ret = param_->allocateBuffers(RKISP1_INTERNAL_BUFFER_COUNT, &paramBuffers_);
 	if (ret < 0)
 		goto error;
 
-	ret = stat_->allocateBuffers(maxCount, &statBuffers_);
+	ret = stat_->allocateBuffers(RKISP1_INTERNAL_BUFFER_COUNT, &statBuffers_);
 	if (ret < 0)
 		goto error;
 
diff --git a/src/libcamera/pipeline/rkisp1/rkisp1_path.cpp b/src/libcamera/pipeline/rkisp1/rkisp1_path.cpp
index 25f482eb8d8e..fea330f72886 100644
--- a/src/libcamera/pipeline/rkisp1/rkisp1_path.cpp
+++ b/src/libcamera/pipeline/rkisp1/rkisp1_path.cpp
@@ -172,7 +172,7 @@  int RkISP1Path::start()
 		return -EBUSY;
 
 	/* \todo Make buffer count user configurable. */
-	ret = video_->importBuffers(RKISP1_BUFFER_COUNT);
+	ret = video_->importBuffers(RKISP1_BUFFER_SLOT_COUNT);
 	if (ret)
 		return ret;
 
diff --git a/src/libcamera/pipeline/rkisp1/rkisp1_path.h b/src/libcamera/pipeline/rkisp1/rkisp1_path.h
index 91757600ccdc..3c5891009c58 100644
--- a/src/libcamera/pipeline/rkisp1/rkisp1_path.h
+++ b/src/libcamera/pipeline/rkisp1/rkisp1_path.h
@@ -27,6 +27,9 @@  class V4L2Subdevice;
 struct StreamConfiguration;
 struct V4L2SubdeviceFormat;
 
+static constexpr unsigned int RKISP1_INTERNAL_BUFFER_COUNT = 4;
+static constexpr unsigned int RKISP1_BUFFER_SLOT_COUNT = 5;
+
 class RkISP1Path
 {
 public:
diff --git a/src/libcamera/pipeline/simple/converter.cpp b/src/libcamera/pipeline/simple/converter.cpp
index b5e34c4cd0c5..b3bcf01483f7 100644
--- a/src/libcamera/pipeline/simple/converter.cpp
+++ b/src/libcamera/pipeline/simple/converter.cpp
@@ -103,11 +103,11 @@  int SimpleConverter::Stream::exportBuffers(unsigned int count,
 
 int SimpleConverter::Stream::start()
 {
-	int ret = m2m_->output()->importBuffers(inputBufferCount_);
+	int ret = m2m_->output()->importBuffers(SIMPLE_BUFFER_SLOT_COUNT);
 	if (ret < 0)
 		return ret;
 
-	ret = m2m_->capture()->importBuffers(outputBufferCount_);
+	ret = m2m_->capture()->importBuffers(SIMPLE_BUFFER_SLOT_COUNT);
 	if (ret < 0) {
 		stop();
 		return ret;
diff --git a/src/libcamera/pipeline/simple/converter.h b/src/libcamera/pipeline/simple/converter.h
index 276a2a291c21..7e1d60674f62 100644
--- a/src/libcamera/pipeline/simple/converter.h
+++ b/src/libcamera/pipeline/simple/converter.h
@@ -29,6 +29,9 @@  class SizeRange;
 struct StreamConfiguration;
 class V4L2M2MDevice;
 
+constexpr unsigned int SIMPLE_INTERNAL_BUFFER_COUNT = 5;
+constexpr unsigned int SIMPLE_BUFFER_SLOT_COUNT = 5;
+
 class SimpleConverter
 {
 public:
diff --git a/src/libcamera/pipeline/simple/simple.cpp b/src/libcamera/pipeline/simple/simple.cpp
index 1c25a7344f5f..a1163eaf8be2 100644
--- a/src/libcamera/pipeline/simple/simple.cpp
+++ b/src/libcamera/pipeline/simple/simple.cpp
@@ -803,12 +803,10 @@  int SimplePipelineHandler::start(Camera *camera, [[maybe_unused]] const ControlL
 		 * When using the converter allocate a fixed number of internal
 		 * buffers.
 		 */
-		ret = video->allocateBuffers(kNumInternalBuffers,
+		ret = video->allocateBuffers(SIMPLE_INTERNAL_BUFFER_COUNT,
 					     &data->converterBuffers_);
 	} else {
-		/* Otherwise, prepare for using buffers from the only stream. */
-		Stream *stream = &data->streams_[0];
-		ret = video->importBuffers(stream->configuration().bufferCount);
+		ret = video->importBuffers(SIMPLE_BUFFER_SLOT_COUNT);
 	}
 	if (ret < 0)
 		return ret;
diff --git a/src/libcamera/pipeline/uvcvideo/uvcvideo.cpp b/src/libcamera/pipeline/uvcvideo/uvcvideo.cpp
index fd39b3d3c72c..755949e7a59a 100644
--- a/src/libcamera/pipeline/uvcvideo/uvcvideo.cpp
+++ b/src/libcamera/pipeline/uvcvideo/uvcvideo.cpp
@@ -91,6 +91,8 @@  private:
 		return static_cast<UVCCameraData *>(
 			PipelineHandler::cameraData(camera));
 	}
+
+	static constexpr unsigned int UVC_BUFFER_SLOT_COUNT = 5;
 };
 
 UVCCameraConfiguration::UVCCameraConfiguration(UVCCameraData *data)
@@ -236,9 +238,8 @@  int PipelineHandlerUVC::exportFrameBuffers(Camera *camera,
 int PipelineHandlerUVC::start(Camera *camera, [[maybe_unused]] const ControlList *controls)
 {
 	UVCCameraData *data = cameraData(camera);
-	unsigned int count = data->stream_.configuration().bufferCount;
 
-	int ret = data->video_->importBuffers(count);
+	int ret = data->video_->importBuffers(UVC_BUFFER_SLOT_COUNT);
 	if (ret < 0)
 		return ret;
 
diff --git a/src/libcamera/pipeline/vimc/vimc.cpp b/src/libcamera/pipeline/vimc/vimc.cpp
index e89d53182c6d..24ba743a946c 100644
--- a/src/libcamera/pipeline/vimc/vimc.cpp
+++ b/src/libcamera/pipeline/vimc/vimc.cpp
@@ -102,6 +102,8 @@  private:
 		return static_cast<VimcCameraData *>(
 			PipelineHandler::cameraData(camera));
 	}
+
+	static constexpr unsigned int VIMC_BUFFER_SLOT_COUNT = 5;
 };
 
 namespace {
@@ -312,9 +314,8 @@  int PipelineHandlerVimc::exportFrameBuffers(Camera *camera,
 int PipelineHandlerVimc::start(Camera *camera, [[maybe_unused]] const ControlList *controls)
 {
 	VimcCameraData *data = cameraData(camera);
-	unsigned int count = data->stream_.configuration().bufferCount;
 
-	int ret = data->video_->importBuffers(count);
+	int ret = data->video_->importBuffers(VIMC_BUFFER_SLOT_COUNT);
 	if (ret < 0)
 		return ret;