[libcamera-devel,v3,4/4] libcamera: stream: Remove bufferCount
diff mbox series

Message ID 20210421165139.318432-5-nfraprado@collabora.com
State Superseded
Headers show
Series
  • lc-compliance: Add test to queue more requests than hardware depth
Related show

Commit Message

Nícolas F. R. A. Prado April 21, 2021, 4:51 p.m. UTC
Now that the amount of internal buffers allocated is hardcoded by the
pipelines, and the amount of buffers allocated by the
FrameBufferAllocator helper is passed through
FrameBufferAllocator::allocate(), we no longer need to have bufferCount
in the StreamConfiguration, so remove it.

Signed-off-by: Nícolas F. R. A. Prado <nfraprado@collabora.com>
---
 include/libcamera/stream.h                        |  2 --
 src/libcamera/pipeline/ipu3/cio2.cpp              |  1 -
 src/libcamera/pipeline/ipu3/ipu3.cpp              | 15 +--------------
 .../pipeline/raspberrypi/raspberrypi.cpp          |  9 ++-------
 src/libcamera/pipeline/rkisp1/rkisp1.cpp          |  9 ++-------
 src/libcamera/pipeline/rkisp1/rkisp1_path.cpp     |  2 --
 src/libcamera/pipeline/simple/converter.cpp       |  7 ++-----
 src/libcamera/pipeline/simple/converter.h         |  5 ++---
 src/libcamera/pipeline/simple/simple.cpp          | 15 ++++-----------
 src/libcamera/pipeline/uvcvideo/uvcvideo.cpp      |  5 +----
 src/libcamera/pipeline/vimc/vimc.cpp              |  5 +----
 src/libcamera/stream.cpp                          |  9 ++-------
 src/v4l2/v4l2_camera.cpp                          | 14 ++++++++++----
 src/v4l2/v4l2_camera.h                            |  5 +++--
 src/v4l2/v4l2_camera_proxy.cpp                    |  8 +++-----
 test/camera/buffer_import.cpp                     | 10 +++++++---
 test/libtest/buffer_source.cpp                    |  4 ++--
 test/libtest/buffer_source.h                      |  2 +-
 test/v4l2_videodevice/buffer_cache.cpp            |  4 ++--
 19 files changed, 45 insertions(+), 86 deletions(-)

Comments

Laurent Pinchart April 26, 2021, 3:19 a.m. UTC | #1
Hi Nícolas,

Thank you for the patch.

On Wed, Apr 21, 2021 at 01:51:39PM -0300, Nícolas F. R. A. Prado wrote:
> Now that the amount of internal buffers allocated is hardcoded by the
> pipelines, and the amount of buffers allocated by the
> FrameBufferAllocator helper is passed through
> FrameBufferAllocator::allocate(), we no longer need to have bufferCount
> in the StreamConfiguration, so remove it.
> 
> Signed-off-by: Nícolas F. R. A. Prado <nfraprado@collabora.com>
> ---
>  include/libcamera/stream.h                        |  2 --
>  src/libcamera/pipeline/ipu3/cio2.cpp              |  1 -
>  src/libcamera/pipeline/ipu3/ipu3.cpp              | 15 +--------------
>  .../pipeline/raspberrypi/raspberrypi.cpp          |  9 ++-------
>  src/libcamera/pipeline/rkisp1/rkisp1.cpp          |  9 ++-------
>  src/libcamera/pipeline/rkisp1/rkisp1_path.cpp     |  2 --
>  src/libcamera/pipeline/simple/converter.cpp       |  7 ++-----
>  src/libcamera/pipeline/simple/converter.h         |  5 ++---
>  src/libcamera/pipeline/simple/simple.cpp          | 15 ++++-----------
>  src/libcamera/pipeline/uvcvideo/uvcvideo.cpp      |  5 +----
>  src/libcamera/pipeline/vimc/vimc.cpp              |  5 +----
>  src/libcamera/stream.cpp                          |  9 ++-------
>  src/v4l2/v4l2_camera.cpp                          | 14 ++++++++++----
>  src/v4l2/v4l2_camera.h                            |  5 +++--
>  src/v4l2/v4l2_camera_proxy.cpp                    |  8 +++-----
>  test/camera/buffer_import.cpp                     | 10 +++++++---
>  test/libtest/buffer_source.cpp                    |  4 ++--
>  test/libtest/buffer_source.h                      |  2 +-
>  test/v4l2_videodevice/buffer_cache.cpp            |  4 ++--

An update to src/android/ is missing, which breaks compilation of the
HAL :-S

>  19 files changed, 45 insertions(+), 86 deletions(-)
> 
> diff --git a/include/libcamera/stream.h b/include/libcamera/stream.h
> index bb47c390f8a1..f36aeffd9540 100644
> --- a/include/libcamera/stream.h
> +++ b/include/libcamera/stream.h
> @@ -45,8 +45,6 @@ struct StreamConfiguration {
>  	unsigned int stride;
>  	unsigned int frameSize;
>  
> -	unsigned int bufferCount;
> -
>  	Stream *stream() const { return stream_; }
>  	void setStream(Stream *stream) { stream_ = stream; }
>  	const StreamFormats &formats() const { return formats_; }
> diff --git a/src/libcamera/pipeline/ipu3/cio2.cpp b/src/libcamera/pipeline/ipu3/cio2.cpp
> index 3cd777d1b742..1e110fe0c189 100644
> --- a/src/libcamera/pipeline/ipu3/cio2.cpp
> +++ b/src/libcamera/pipeline/ipu3/cio2.cpp
> @@ -213,7 +213,6 @@ StreamConfiguration CIO2Device::generateConfiguration(Size size) const
>  
>  	cfg.size = sensorFormat.size;
>  	cfg.pixelFormat = mbusCodesToPixelFormat.at(sensorFormat.mbus_code);
> -	cfg.bufferCount = CIO2_BUFFER_COUNT;
>  
>  	return cfg;
>  }
> diff --git a/src/libcamera/pipeline/ipu3/ipu3.cpp b/src/libcamera/pipeline/ipu3/ipu3.cpp
> index c8fcc2fda75f..f0a17a553bd3 100644
> --- a/src/libcamera/pipeline/ipu3/ipu3.cpp
> +++ b/src/libcamera/pipeline/ipu3/ipu3.cpp
> @@ -291,7 +291,6 @@ CameraConfiguration::Status IPU3CameraConfiguration::validate()
>  			/* Initialize the RAW stream with the CIO2 configuration. */
>  			cfg->size = cio2Configuration_.size;
>  			cfg->pixelFormat = cio2Configuration_.pixelFormat;
> -			cfg->bufferCount = cio2Configuration_.bufferCount;
>  			cfg->stride = info.stride(cfg->size.width, 0, 64);
>  			cfg->frameSize = info.frameSize(cfg->size, 64);
>  			cfg->setStream(const_cast<Stream *>(&data_->rawStream_));
> @@ -335,7 +334,6 @@ CameraConfiguration::Status IPU3CameraConfiguration::validate()
>  					      IMGU_OUTPUT_HEIGHT_ALIGN);
>  
>  			cfg->pixelFormat = formats::NV12;
> -			cfg->bufferCount = IPU3_BUFFER_COUNT;
>  			cfg->stride = info.stride(cfg->size.width, 0, 1);
>  			cfg->frameSize = info.frameSize(cfg->size, 1);
>  
> @@ -403,7 +401,6 @@ CameraConfiguration *PipelineHandlerIPU3::generateConfiguration(Camera *camera,
>  	Size sensorResolution = data->cio2_.sensor()->resolution();
>  	for (const StreamRole role : roles) {
>  		std::map<PixelFormat, std::vector<SizeRange>> streamFormats;
> -		unsigned int bufferCount;
>  		PixelFormat pixelFormat;
>  		Size size;
>  
> @@ -424,7 +421,6 @@ CameraConfiguration *PipelineHandlerIPU3::generateConfiguration(Camera *camera,
>  			size.height = utils::alignDown(size.height - 1,
>  						       IMGU_OUTPUT_HEIGHT_MARGIN);
>  			pixelFormat = formats::NV12;
> -			bufferCount = IPU3_BUFFER_COUNT;
>  			streamFormats[pixelFormat] = { { IMGU_OUTPUT_MIN_SIZE, size } };
>  
>  			break;
> @@ -434,7 +430,6 @@ CameraConfiguration *PipelineHandlerIPU3::generateConfiguration(Camera *camera,
>  				data->cio2_.generateConfiguration(sensorResolution);
>  			pixelFormat = cio2Config.pixelFormat;
>  			size = cio2Config.size;
> -			bufferCount = cio2Config.bufferCount;
>  
>  			for (const PixelFormat &format : data->cio2_.formats())
>  				streamFormats[format] = data->cio2_.sizes();
> @@ -453,7 +448,6 @@ CameraConfiguration *PipelineHandlerIPU3::generateConfiguration(Camera *camera,
>  					       .alignedDownTo(IMGU_OUTPUT_WIDTH_ALIGN,
>  							      IMGU_OUTPUT_HEIGHT_ALIGN);
>  			pixelFormat = formats::NV12;
> -			bufferCount = IPU3_BUFFER_COUNT;
>  			streamFormats[pixelFormat] = { { IMGU_OUTPUT_MIN_SIZE, size } };
>  
>  			break;
> @@ -470,7 +464,6 @@ CameraConfiguration *PipelineHandlerIPU3::generateConfiguration(Camera *camera,
>  		StreamConfiguration cfg(formats);
>  		cfg.size = size;
>  		cfg.pixelFormat = pixelFormat;
> -		cfg.bufferCount = bufferCount;
>  		config->addConfiguration(cfg);
>  	}
>  
> @@ -668,15 +661,9 @@ int PipelineHandlerIPU3::allocateBuffers(Camera *camera)
>  {
>  	IPU3CameraData *data = cameraData(camera);
>  	ImgUDevice *imgu = data->imgu_;
> -	unsigned int bufferCount;
> +	unsigned int bufferCount = data->properties_.get(properties::QueueDepth);
>  	int ret;
>  
> -	bufferCount = std::max({
> -		data->outStream_.configuration().bufferCount,
> -		data->vfStream_.configuration().bufferCount,
> -		data->rawStream_.configuration().bufferCount,
> -	});

I'm not sure properties::QueueDepth is the right value here. This deals
with both allocation of stats and params buffers, which are internal,
and the number of V4L2 buffer slots for the ImgU input and output. For
the latter, we probably should overallocate, as underallocation would
mean trashing dmabuf mappings. For the former, we shouldn't allocate too
much to avoid wasting memory, so we should take into account how long
the IPA would need to hold on the params and stats buffers.

Jacopo, Jean-Michel, Niklas, any comment ?

> -
>  	ret = imgu->allocateBuffers(bufferCount);
>  	if (ret < 0)
>  		return ret;
> diff --git a/src/libcamera/pipeline/raspberrypi/raspberrypi.cpp b/src/libcamera/pipeline/raspberrypi/raspberrypi.cpp
> index 3f35596fe550..44a8a472ae4f 100644
> --- a/src/libcamera/pipeline/raspberrypi/raspberrypi.cpp
> +++ b/src/libcamera/pipeline/raspberrypi/raspberrypi.cpp
> @@ -471,7 +471,6 @@ CameraConfiguration *PipelineHandlerRPi::generateConfiguration(Camera *camera,
>  	RPiCameraData *data = cameraData(camera);
>  	CameraConfiguration *config = new RPiCameraConfiguration(data);
>  	V4L2DeviceFormat sensorFormat;
> -	unsigned int bufferCount;
>  	PixelFormat pixelFormat;
>  	V4L2VideoDevice::Formats fmts;
>  	Size size;
> @@ -489,7 +488,6 @@ CameraConfiguration *PipelineHandlerRPi::generateConfiguration(Camera *camera,
>  			sensorFormat = findBestMode(fmts, size);
>  			pixelFormat = sensorFormat.fourcc.toPixelFormat();
>  			ASSERT(pixelFormat.isValid());
> -			bufferCount = 2;
>  			rawCount++;
>  			break;
>  
> @@ -498,7 +496,6 @@ CameraConfiguration *PipelineHandlerRPi::generateConfiguration(Camera *camera,
>  			pixelFormat = formats::NV12;
>  			/* Return the largest sensor resolution. */
>  			size = data->sensor_->resolution();
> -			bufferCount = 1;
>  			outCount++;
>  			break;
>  
> @@ -514,7 +511,6 @@ CameraConfiguration *PipelineHandlerRPi::generateConfiguration(Camera *camera,
>  			fmts = data->isp_[Isp::Output0].dev()->formats();
>  			pixelFormat = formats::YUV420;
>  			size = { 1920, 1080 };
> -			bufferCount = 4;
>  			outCount++;
>  			break;
>  
> @@ -522,7 +518,6 @@ CameraConfiguration *PipelineHandlerRPi::generateConfiguration(Camera *camera,
>  			fmts = data->isp_[Isp::Output0].dev()->formats();
>  			pixelFormat = formats::ARGB8888;
>  			size = { 800, 600 };
> -			bufferCount = 4;
>  			outCount++;
>  			break;
>  
> @@ -552,7 +547,6 @@ CameraConfiguration *PipelineHandlerRPi::generateConfiguration(Camera *camera,
>  		StreamConfiguration cfg(formats);
>  		cfg.size = size;
>  		cfg.pixelFormat = pixelFormat;
> -		cfg.bufferCount = bufferCount;
>  		config->addConfiguration(cfg);
>  	}
>  
> @@ -1142,6 +1136,7 @@ int PipelineHandlerRPi::prepareBuffers(Camera *camera)
>  {
>  	RPiCameraData *data = cameraData(camera);
>  	int ret;
> +	unsigned int bufferCount = data->properties_.get(properties::QueueDepth);
>  
>  	/*
>  	 * Decide how many internal buffers to allocate. For now, simply look
> @@ -1152,7 +1147,7 @@ int PipelineHandlerRPi::prepareBuffers(Camera *camera)
>  	unsigned int maxBuffers = 0;
>  	for (const Stream *s : camera->streams())
>  		if (static_cast<const RPi::Stream *>(s)->isExternal())
> -			maxBuffers = std::max(maxBuffers, s->configuration().bufferCount);
> +			maxBuffers = std::max(maxBuffers, bufferCount);

That now looks a bit weird, as bufferCount is constant. This being said,
the above comment for the IPU3 applies here. David, Naush, any comment ?

>  
>  	for (auto const stream : data->streams_) {
>  		ret = stream->prepareBuffers(maxBuffers);
> diff --git a/src/libcamera/pipeline/rkisp1/rkisp1.cpp b/src/libcamera/pipeline/rkisp1/rkisp1.cpp
> index 2d95c1ca2a43..73d4ea6ba8f5 100644
> --- a/src/libcamera/pipeline/rkisp1/rkisp1.cpp
> +++ b/src/libcamera/pipeline/rkisp1/rkisp1.cpp
> @@ -686,16 +686,11 @@ int PipelineHandlerRkISP1::allocateBuffers(Camera *camera)
>  	unsigned int ipaBufferId = 1;
>  	int ret;
>  
> -	unsigned int maxCount = std::max({
> -		data->mainPathStream_.configuration().bufferCount,
> -		data->selfPathStream_.configuration().bufferCount,
> -	});
> -
> -	ret = param_->allocateBuffers(maxCount, &paramBuffers_);
> +	ret = param_->allocateBuffers(RKISP1_BUFFER_COUNT, &paramBuffers_);
>  	if (ret < 0)
>  		goto error;
>  
> -	ret = stat_->allocateBuffers(maxCount, &statBuffers_);
> +	ret = stat_->allocateBuffers(RKISP1_BUFFER_COUNT, &statBuffers_);

Here it should be fine.

>  	if (ret < 0)
>  		goto error;
>  
> diff --git a/src/libcamera/pipeline/rkisp1/rkisp1_path.cpp b/src/libcamera/pipeline/rkisp1/rkisp1_path.cpp
> index 25f482eb8d8e..200e3c2c4cca 100644
> --- a/src/libcamera/pipeline/rkisp1/rkisp1_path.cpp
> +++ b/src/libcamera/pipeline/rkisp1/rkisp1_path.cpp
> @@ -61,7 +61,6 @@ StreamConfiguration RkISP1Path::generateConfiguration(const Size &resolution)
>  	StreamConfiguration cfg(formats);
>  	cfg.pixelFormat = formats::NV12;
>  	cfg.size = maxResolution;
> -	cfg.bufferCount = RKISP1_BUFFER_COUNT;
>  
>  	return cfg;
>  }
> @@ -77,7 +76,6 @@ CameraConfiguration::Status RkISP1Path::validate(StreamConfiguration *cfg)
>  
>  	cfg->size.boundTo(maxResolution_);
>  	cfg->size.expandTo(minResolution_);
> -	cfg->bufferCount = RKISP1_BUFFER_COUNT;
>  
>  	V4L2DeviceFormat format;
>  	format.fourcc = video_->toV4L2PixelFormat(cfg->pixelFormat);
> diff --git a/src/libcamera/pipeline/simple/converter.cpp b/src/libcamera/pipeline/simple/converter.cpp
> index 68644ef6477f..54e7f1b051f7 100644
> --- a/src/libcamera/pipeline/simple/converter.cpp
> +++ b/src/libcamera/pipeline/simple/converter.cpp
> @@ -86,9 +86,6 @@ int SimpleConverter::Stream::configure(const StreamConfiguration &inputCfg,
>  		return -EINVAL;
>  	}
>  
> -	inputBufferCount_ = inputCfg.bufferCount;
> -	outputBufferCount_ = outputCfg.bufferCount;
> -
>  	return 0;
>  }
>  
> @@ -100,11 +97,11 @@ int SimpleConverter::Stream::exportBuffers(unsigned int count,
>  
>  int SimpleConverter::Stream::start()
>  {
> -	int ret = m2m_->output()->importBuffers(inputBufferCount_);
> +	int ret = m2m_->output()->importBuffers(SIMPLE_QUEUE_DEPTH);
>  	if (ret < 0)
>  		return ret;
>  
> -	ret = m2m_->capture()->importBuffers(outputBufferCount_);
> +	ret = m2m_->capture()->importBuffers(SIMPLE_QUEUE_DEPTH);

As above, we should probably overallocate here to avoid trashing the
dmabuf mappings.

>  	if (ret < 0) {
>  		stop();
>  		return ret;
> diff --git a/src/libcamera/pipeline/simple/converter.h b/src/libcamera/pipeline/simple/converter.h
> index 480e528d2210..32313748cd24 100644
> --- a/src/libcamera/pipeline/simple/converter.h
> +++ b/src/libcamera/pipeline/simple/converter.h
> @@ -29,6 +29,8 @@ class SizeRange;
>  struct StreamConfiguration;
>  class V4L2M2MDevice;
>  
> +constexpr unsigned int SIMPLE_QUEUE_DEPTH = 3;
> +
>  class SimpleConverter
>  {
>  public:
> @@ -84,9 +86,6 @@ private:
>  		SimpleConverter *converter_;
>  		unsigned int index_;
>  		std::unique_ptr<V4L2M2MDevice> m2m_;
> -
> -		unsigned int inputBufferCount_;
> -		unsigned int outputBufferCount_;
>  	};
>  
>  	std::string deviceNode_;
> diff --git a/src/libcamera/pipeline/simple/simple.cpp b/src/libcamera/pipeline/simple/simple.cpp
> index b9f14be6733f..9f28bb66e2e7 100644
> --- a/src/libcamera/pipeline/simple/simple.cpp
> +++ b/src/libcamera/pipeline/simple/simple.cpp
> @@ -142,8 +142,6 @@ static const SimplePipelineInfo supportedDevices[] = {
>  
>  } /* namespace */
>  
> -static constexpr unsigned int kNumInternalBuffers = 3;
> -
>  class SimpleCameraData : public CameraData
>  {
>  public:
> @@ -425,7 +423,7 @@ int SimpleCameraData::init()
>  	}
>  
>  	properties_ = sensor_->properties();
> -	properties_.set(properties::QueueDepth, kNumInternalBuffers);
> +	properties_.set(properties::QueueDepth, SIMPLE_QUEUE_DEPTH);
>  
>  	return 0;
>  }
> @@ -616,7 +614,7 @@ CameraConfiguration::Status SimpleCameraConfiguration::validate()
>  		    cfg.size != pipeConfig_->captureSize)
>  			needConversion_ = true;
>  
> -		/* Set the stride, frameSize and bufferCount. */
> +		/* Set the stride and frameSize. */
>  		if (needConversion_) {
>  			std::tie(cfg.stride, cfg.frameSize) =
>  				converter->strideAndFrameSize(cfg.pixelFormat, cfg.size);
> @@ -634,8 +632,6 @@ CameraConfiguration::Status SimpleCameraConfiguration::validate()
>  			cfg.stride = format.planes[0].bpl;
>  			cfg.frameSize = format.planes[0].size;
>  		}
> -
> -		cfg.bufferCount = 3;
>  	}
>  
>  	return status;
> @@ -758,7 +754,6 @@ int SimplePipelineHandler::configure(Camera *camera, CameraConfiguration *c)
>  	inputCfg.pixelFormat = pipeConfig->captureFormat;
>  	inputCfg.size = pipeConfig->captureSize;
>  	inputCfg.stride = captureFormat.planes[0].bpl;
> -	inputCfg.bufferCount = kNumInternalBuffers;
>  
>  	return converter_->configure(inputCfg, outputCfgs);
>  }
> @@ -791,12 +786,10 @@ int SimplePipelineHandler::start(Camera *camera, [[maybe_unused]] const ControlL
>  		 * When using the converter allocate a fixed number of internal
>  		 * buffers.
>  		 */
> -		ret = video->allocateBuffers(kNumInternalBuffers,
> +		ret = video->allocateBuffers(SIMPLE_QUEUE_DEPTH,

This should definitely not overallocate :-) 3 sounds good.

>  					     &data->converterBuffers_);
>  	} else {
> -		/* Otherwise, prepare for using buffers from the only stream. */
> -		Stream *stream = &data->streams_[0];
> -		ret = video->importBuffers(stream->configuration().bufferCount);
> +		ret = video->importBuffers(SIMPLE_QUEUE_DEPTH);

Here we can overallocate too.

>  	}
>  	if (ret < 0)
>  		return ret;
> diff --git a/src/libcamera/pipeline/uvcvideo/uvcvideo.cpp b/src/libcamera/pipeline/uvcvideo/uvcvideo.cpp
> index a148c35f1265..94e6fd9d2d56 100644
> --- a/src/libcamera/pipeline/uvcvideo/uvcvideo.cpp
> +++ b/src/libcamera/pipeline/uvcvideo/uvcvideo.cpp
> @@ -148,8 +148,6 @@ CameraConfiguration::Status UVCCameraConfiguration::validate()
>  		status = Adjusted;
>  	}
>  
> -	cfg.bufferCount = 4;
> -
>  	V4L2DeviceFormat format;
>  	format.fourcc = data_->video_->toV4L2PixelFormat(cfg.pixelFormat);
>  	format.size = cfg.size;
> @@ -191,7 +189,6 @@ CameraConfiguration *PipelineHandlerUVC::generateConfiguration(Camera *camera,
>  
>  	cfg.pixelFormat = formats.pixelformats().front();
>  	cfg.size = formats.sizes(cfg.pixelFormat).back();
> -	cfg.bufferCount = 4;
>  
>  	config->addConfiguration(cfg);
>  
> @@ -236,7 +233,7 @@ int PipelineHandlerUVC::exportFrameBuffers(Camera *camera,
>  int PipelineHandlerUVC::start(Camera *camera, [[maybe_unused]] const ControlList *controls)
>  {
>  	UVCCameraData *data = cameraData(camera);
> -	unsigned int count = data->stream_.configuration().bufferCount;
> +	unsigned int count = data->properties_.get(properties::QueueDepth);
>  
>  	int ret = data->video_->importBuffers(count);

Same here, overallocation is best.

>  	if (ret < 0)
> diff --git a/src/libcamera/pipeline/vimc/vimc.cpp b/src/libcamera/pipeline/vimc/vimc.cpp
> index 22d6fdcbb141..891571afb3e5 100644
> --- a/src/libcamera/pipeline/vimc/vimc.cpp
> +++ b/src/libcamera/pipeline/vimc/vimc.cpp
> @@ -165,8 +165,6 @@ CameraConfiguration::Status VimcCameraConfiguration::validate()
>  		status = Adjusted;
>  	}
>  
> -	cfg.bufferCount = 4;
> -
>  	V4L2DeviceFormat format;
>  	format.fourcc = data_->video_->toV4L2PixelFormat(cfg.pixelFormat);
>  	format.size = cfg.size;
> @@ -222,7 +220,6 @@ CameraConfiguration *PipelineHandlerVimc::generateConfiguration(Camera *camera,
>  
>  	cfg.pixelFormat = formats::BGR888;
>  	cfg.size = { 1920, 1080 };
> -	cfg.bufferCount = 4;
>  
>  	config->addConfiguration(cfg);
>  
> @@ -312,7 +309,7 @@ int PipelineHandlerVimc::exportFrameBuffers(Camera *camera,
>  int PipelineHandlerVimc::start(Camera *camera, [[maybe_unused]] const ControlList *controls)
>  {
>  	VimcCameraData *data = cameraData(camera);
> -	unsigned int count = data->stream_.configuration().bufferCount;
> +	unsigned int count = data->properties_.get(properties::QueueDepth);
>  
>  	int ret = data->video_->importBuffers(count);

I think you know by now what I would say :-)

>  	if (ret < 0)
> diff --git a/src/libcamera/stream.cpp b/src/libcamera/stream.cpp
> index f7bafcf8fc97..be57abce4eb3 100644
> --- a/src/libcamera/stream.cpp
> +++ b/src/libcamera/stream.cpp
> @@ -279,7 +279,7 @@ SizeRange StreamFormats::range(const PixelFormat &pixelformat) const
>   * handlers provide StreamFormats.
>   */
>  StreamConfiguration::StreamConfiguration()
> -	: pixelFormat(0), stride(0), frameSize(0), bufferCount(0),
> +	: pixelFormat(0), stride(0), frameSize(0),
>  	  stream_(nullptr)
>  {
>  }
> @@ -288,7 +288,7 @@ StreamConfiguration::StreamConfiguration()
>   * \brief Construct a configuration with stream formats
>   */
>  StreamConfiguration::StreamConfiguration(const StreamFormats &formats)
> -	: pixelFormat(0), stride(0), frameSize(0), bufferCount(0),
> +	: pixelFormat(0), stride(0), frameSize(0),
>  	  stream_(nullptr), formats_(formats)
>  {
>  }
> @@ -323,11 +323,6 @@ StreamConfiguration::StreamConfiguration(const StreamFormats &formats)
>   * validating the configuration with a call to CameraConfiguration::validate().
>   */
>  
> -/**
> - * \var StreamConfiguration::bufferCount
> - * \brief Requested number of buffers to allocate for the stream
> - */
> -
>  /**
>   * \fn StreamConfiguration::stream()
>   * \brief Retrieve the stream associated with the configuration
> diff --git a/src/v4l2/v4l2_camera.cpp b/src/v4l2/v4l2_camera.cpp
> index 53d97f3e6b86..79bf7aec7782 100644
> --- a/src/v4l2/v4l2_camera.cpp
> +++ b/src/v4l2/v4l2_camera.cpp
> @@ -10,6 +10,8 @@
>  #include <errno.h>
>  #include <unistd.h>
>  
> +#include <libcamera/property_ids.h>
> +
>  #include "libcamera/internal/log.h"
>  
>  using namespace libcamera;
> @@ -107,14 +109,12 @@ void V4L2Camera::requestComplete(Request *request)
>  }
>  
>  int V4L2Camera::configure(StreamConfiguration *streamConfigOut,
> -			  const Size &size, const PixelFormat &pixelformat,
> -			  unsigned int bufferCount)
> +			  const Size &size, const PixelFormat &pixelformat)
>  {
>  	StreamConfiguration &streamConfig = config_->at(0);
>  	streamConfig.size.width = size.width;
>  	streamConfig.size.height = size.height;
>  	streamConfig.pixelFormat = pixelformat;
> -	streamConfig.bufferCount = bufferCount;
>  	/* \todo memoryType (interval vs external) */
>  
>  	CameraConfiguration::Status validation = config_->validate();
> @@ -146,7 +146,6 @@ int V4L2Camera::validateConfiguration(const PixelFormat &pixelFormat,
>  	StreamConfiguration &cfg = config->at(0);
>  	cfg.size = size;
>  	cfg.pixelFormat = pixelFormat;
> -	cfg.bufferCount = 1;
>  
>  	CameraConfiguration::Status validation = config->validate();
>  	if (validation == CameraConfiguration::Invalid)
> @@ -299,3 +298,10 @@ bool V4L2Camera::isRunning()
>  {
>  	return isRunning_;
>  }
> +
> +int V4L2Camera::getBufCount(int count)
> +{
> +	int min = camera_->properties().get(properties::QueueDepth);
> +
> +	return std::max(count, min);
> +}

I'd name this function queueDepth() (we don't usually use a "get" prefix
for getters) and return the value of the property only. The caller can
then decide what to do with it.

> diff --git a/src/v4l2/v4l2_camera.h b/src/v4l2/v4l2_camera.h
> index d238046250e3..68df8ad05917 100644
> --- a/src/v4l2/v4l2_camera.h
> +++ b/src/v4l2/v4l2_camera.h
> @@ -45,8 +45,7 @@ public:
>  	std::vector<Buffer> completedBuffers();
>  
>  	int configure(StreamConfiguration *streamConfigOut,
> -		      const Size &size, const PixelFormat &pixelformat,
> -		      unsigned int bufferCount);
> +		      const Size &size, const PixelFormat &pixelformat);
>  	int validateConfiguration(const PixelFormat &pixelformat,
>  				  const Size &size,
>  				  StreamConfiguration *streamConfigOut);
> @@ -65,6 +64,8 @@ public:
>  
>  	bool isRunning();
>  
> +	int getBufCount(int count);
> +
>  private:
>  	void requestComplete(Request *request);
>  
> diff --git a/src/v4l2/v4l2_camera_proxy.cpp b/src/v4l2/v4l2_camera_proxy.cpp
> index f8bfe595e90e..cd32e44a01ad 100644
> --- a/src/v4l2/v4l2_camera_proxy.cpp
> +++ b/src/v4l2/v4l2_camera_proxy.cpp
> @@ -348,8 +348,7 @@ int V4L2CameraProxy::vidioc_s_fmt(V4L2CameraFile *file, struct v4l2_format *arg)
>  	Size size(arg->fmt.pix.width, arg->fmt.pix.height);
>  	V4L2PixelFormat v4l2Format = V4L2PixelFormat(arg->fmt.pix.pixelformat);
>  	ret = vcam_->configure(&streamConfig_, size,
> -			       PixelFormatInfo::info(v4l2Format).format,
> -			       bufferCount_);
> +			       PixelFormatInfo::info(v4l2Format).format);
>  	if (ret < 0)
>  		return -EINVAL;
>  
> @@ -490,14 +489,13 @@ int V4L2CameraProxy::vidioc_reqbufs(V4L2CameraFile *file, struct v4l2_requestbuf
>  	Size size(v4l2PixFormat_.width, v4l2PixFormat_.height);
>  	V4L2PixelFormat v4l2Format = V4L2PixelFormat(v4l2PixFormat_.pixelformat);
>  	int ret = vcam_->configure(&streamConfig_, size,
> -				   PixelFormatInfo::info(v4l2Format).format,
> -				   arg->count);
> +				   PixelFormatInfo::info(v4l2Format).format);
>  	if (ret < 0)
>  		return -EINVAL;
>  
>  	setFmtFromConfig(streamConfig_);
>  
> -	arg->count = streamConfig_.bufferCount;
> +	arg->count = vcam_->getBufCount(arg->count);
>  	bufferCount_ = arg->count;
>  
>  	ret = vcam_->allocBuffers(arg->count);
> diff --git a/test/camera/buffer_import.cpp b/test/camera/buffer_import.cpp
> index 61f4eb92ae95..f2c38bfb0b72 100644
> --- a/test/camera/buffer_import.cpp
> +++ b/test/camera/buffer_import.cpp
> @@ -12,6 +12,8 @@
>  #include <numeric>
>  #include <vector>
>  
> +#include <libcamera/property_ids.h>
> +
>  #include "libcamera/internal/device_enumerator.h"
>  #include "libcamera/internal/event_dispatcher.h"
>  #include "libcamera/internal/media_device.h"
> @@ -91,10 +93,12 @@ protected:
>  			return TestFail;
>  		}
>  
> +		unsigned int bufCount = camera_->properties().get(properties::QueueDepth);

Let's name the variable bufferCount or numBuffers for consistency.

There are a few issues to solve, but this is a really nice change !

> +
>  		Stream *stream = cfg.stream();
>  
>  		BufferSource source;
> -		int ret = source.allocate(cfg);
> +		int ret = source.allocate(cfg, bufCount);
>  		if (ret != TestPass)
>  			return ret;
>  
> @@ -138,10 +142,10 @@ protected:
>  		while (timer.isRunning())
>  			dispatcher->processEvents();
>  
> -		if (completeRequestsCount_ < cfg.bufferCount * 2) {
> +		if (completeRequestsCount_ < bufCount * 2) {
>  			std::cout << "Failed to capture enough frames (got "
>  				  << completeRequestsCount_ << " expected at least "
> -				  << cfg.bufferCount * 2 << ")" << std::endl;
> +				  << bufCount * 2 << ")" << std::endl;
>  			return TestFail;
>  		}
>  
> diff --git a/test/libtest/buffer_source.cpp b/test/libtest/buffer_source.cpp
> index 73563f2fc39d..c3d5286a2462 100644
> --- a/test/libtest/buffer_source.cpp
> +++ b/test/libtest/buffer_source.cpp
> @@ -24,7 +24,7 @@ BufferSource::~BufferSource()
>  		media_->release();
>  }
>  
> -int BufferSource::allocate(const StreamConfiguration &config)
> +int BufferSource::allocate(const StreamConfiguration &config, unsigned int count)
>  {
>  	/* Locate and open the video device. */
>  	std::string videoDeviceName = "vivid-000-vid-out";
> @@ -77,7 +77,7 @@ int BufferSource::allocate(const StreamConfiguration &config)
>  		return TestFail;
>  	}
>  
> -	if (video->allocateBuffers(config.bufferCount, &buffers_) < 0) {
> +	if (video->allocateBuffers(count, &buffers_) < 0) {
>  		std::cout << "Failed to allocate buffers" << std::endl;
>  		return TestFail;
>  	}
> diff --git a/test/libtest/buffer_source.h b/test/libtest/buffer_source.h
> index 14b4770e8d8a..6a18e269a575 100644
> --- a/test/libtest/buffer_source.h
> +++ b/test/libtest/buffer_source.h
> @@ -20,7 +20,7 @@ public:
>  	BufferSource();
>  	~BufferSource();
>  
> -	int allocate(const StreamConfiguration &config);
> +	int allocate(const StreamConfiguration &config, unsigned int count);
>  	const std::vector<std::unique_ptr<FrameBuffer>> &buffers();
>  
>  private:
> diff --git a/test/v4l2_videodevice/buffer_cache.cpp b/test/v4l2_videodevice/buffer_cache.cpp
> index b3f2bec11783..07fddfd2617c 100644
> --- a/test/v4l2_videodevice/buffer_cache.cpp
> +++ b/test/v4l2_videodevice/buffer_cache.cpp
> @@ -10,6 +10,7 @@
>  #include <vector>
>  
>  #include <libcamera/formats.h>
> +#include <libcamera/property_ids.h>
>  #include <libcamera/stream.h>
>  
>  #include "buffer_source.h"
> @@ -145,10 +146,9 @@ public:
>  		StreamConfiguration cfg;
>  		cfg.pixelFormat = formats::YUYV;
>  		cfg.size = Size(600, 800);
> -		cfg.bufferCount = numBuffers;
>  
>  		BufferSource source;
> -		int ret = source.allocate(cfg);
> +		int ret = source.allocate(cfg, numBuffers);
>  		if (ret != TestPass)
>  			return ret;
>
Niklas Söderlund April 26, 2021, 9:28 a.m. UTC | #2
Hello,

On 2021-04-26 06:19:57 +0300, Laurent Pinchart wrote:

<snip>

> > @@ -668,15 +661,9 @@ int PipelineHandlerIPU3::allocateBuffers(Camera 
> > *camera)
> >  {
> >  	IPU3CameraData *data = cameraData(camera);
> >  	ImgUDevice *imgu = data->imgu_;
> > -	unsigned int bufferCount;
> > +	unsigned int bufferCount = data->properties_.get(properties::QueueDepth);
> >  	int ret;
> >  
> > -	bufferCount = std::max({
> > -		data->outStream_.configuration().bufferCount,
> > -		data->vfStream_.configuration().bufferCount,
> > -		data->rawStream_.configuration().bufferCount,
> > -	});
> 
> I'm not sure properties::QueueDepth is the right value here. This deals
> with both allocation of stats and params buffers, which are internal,
> and the number of V4L2 buffer slots for the ImgU input and output. For
> the latter, we probably should overallocate, as underallocation would
> mean trashing dmabuf mappings. For the former, we shouldn't allocate too
> much to avoid wasting memory, so we should take into account how long
> the IPA would need to hold on the params and stats buffers.
> 
> Jacopo, Jean-Michel, Niklas, any comment ?

I agree that QueueDepth is likely not the best fit here, but neither was 
bufferCount :-) IIRC this was done very early on where bufferCount was 
not really defined and added as a first steeping stone. I'm very happy 
to see this being thought about.

Maybe the best solution for this series is to add some const numerical 
values to the IPU3 pipeline handler to replace the usage of bufferCount 
but not making it depending on QueueDepth?

Maybe even separating it in two values as Laurent outlines, one for the 
output and viewfinder slots allocation and one for the raw stream? Maybe 
it make sens to link the raw streams count with the value used for the 
stats and param buffer counts?
Jean-Michel Hautbois April 26, 2021, 10:17 a.m. UTC | #3
Hi,

On 26/04/2021 11:28, Niklas Söderlund wrote:
> Hello,
> 
> On 2021-04-26 06:19:57 +0300, Laurent Pinchart wrote:
> 
> <snip>
> 
>>> @@ -668,15 +661,9 @@ int PipelineHandlerIPU3::allocateBuffers(Camera 
>>> *camera)
>>>  {
>>>  	IPU3CameraData *data = cameraData(camera);
>>>  	ImgUDevice *imgu = data->imgu_;
>>> -	unsigned int bufferCount;
>>> +	unsigned int bufferCount = data->properties_.get(properties::QueueDepth);
>>>  	int ret;
>>>  
>>> -	bufferCount = std::max({
>>> -		data->outStream_.configuration().bufferCount,
>>> -		data->vfStream_.configuration().bufferCount,
>>> -		data->rawStream_.configuration().bufferCount,
>>> -	});
>>
>> I'm not sure properties::QueueDepth is the right value here. This deals
>> with both allocation of stats and params buffers, which are internal,
>> and the number of V4L2 buffer slots for the ImgU input and output. For
>> the latter, we probably should overallocate, as underallocation would
>> mean trashing dmabuf mappings. For the former, we shouldn't allocate too
>> much to avoid wasting memory, so we should take into account how long
>> the IPA would need to hold on the params and stats buffers.
>>
>> Jacopo, Jean-Michel, Niklas, any comment ?
> 
> I agree that QueueDepth is likely not the best fit here, but neither was 
> bufferCount :-) IIRC this was done very early on where bufferCount was 
> not really defined and added as a first steeping stone. I'm very happy 
> to see this being thought about.
> 
> Maybe the best solution for this series is to add some const numerical 
> values to the IPU3 pipeline handler to replace the usage of bufferCount 
> but not making it depending on QueueDepth?
> 
> Maybe even separating it in two values as Laurent outlines, one for the 
> output and viewfinder slots allocation and one for the raw stream? Maybe 
> it make sens to link the raw streams count with the value used for the 
> stats and param buffer counts?

Indeed IPA may need some buffers, and even now it is not satisfying :-).
Overallocating is a need, and we should not go lower than the highest
number of frame delay in the delayedControls list. For IPU3, EXPOSURE
has a delay of 2, so we should not have less than 3 buffers to be
"comfortable".
My 2 cents :-).
David Plowman April 26, 2021, 10:46 a.m. UTC | #4
Hi everyone

On Mon, 26 Apr 2021 at 11:17, Jean-Michel Hautbois
<jeanmichel.hautbois@ideasonboard.com> wrote:
>
> Hi,
>
> On 26/04/2021 11:28, Niklas Söderlund wrote:
> > Hello,
> >
> > On 2021-04-26 06:19:57 +0300, Laurent Pinchart wrote:
> >
> > <snip>
> >
> >>> @@ -668,15 +661,9 @@ int PipelineHandlerIPU3::allocateBuffers(Camera
> >>> *camera)
> >>>  {
> >>>     IPU3CameraData *data = cameraData(camera);
> >>>     ImgUDevice *imgu = data->imgu_;
> >>> -   unsigned int bufferCount;
> >>> +   unsigned int bufferCount = data->properties_.get(properties::QueueDepth);
> >>>     int ret;
> >>>
> >>> -   bufferCount = std::max({
> >>> -           data->outStream_.configuration().bufferCount,
> >>> -           data->vfStream_.configuration().bufferCount,
> >>> -           data->rawStream_.configuration().bufferCount,
> >>> -   });
> >>
> >> I'm not sure properties::QueueDepth is the right value here. This deals
> >> with both allocation of stats and params buffers, which are internal,
> >> and the number of V4L2 buffer slots for the ImgU input and output. For
> >> the latter, we probably should overallocate, as underallocation would
> >> mean trashing dmabuf mappings. For the former, we shouldn't allocate too
> >> much to avoid wasting memory, so we should take into account how long
> >> the IPA would need to hold on the params and stats buffers.
> >>
> >> Jacopo, Jean-Michel, Niklas, any comment ?
> >
> > I agree that QueueDepth is likely not the best fit here, but neither was
> > bufferCount :-) IIRC this was done very early on where bufferCount was
> > not really defined and added as a first steeping stone. I'm very happy
> > to see this being thought about.
> >
> > Maybe the best solution for this series is to add some const numerical
> > values to the IPU3 pipeline handler to replace the usage of bufferCount
> > but not making it depending on QueueDepth?
> >
> > Maybe even separating it in two values as Laurent outlines, one for the
> > output and viewfinder slots allocation and one for the raw stream? Maybe
> > it make sens to link the raw streams count with the value used for the
> > stats and param buffer counts?
>
> Indeed IPA may need some buffers, and even now it is not satisfying :-).
> Overallocating is a need, and we should not go lower than the highest
> number of frame delay in the delayedControls list. For IPU3, EXPOSURE
> has a delay of 2, so we should not have less than 3 buffers to be
> "comfortable".
> My 2 cents :-).

I've been following this discussion and there seem to be a lot of
queues and delays flying around! Maybe someone can help me to
understand - at the moment I believe there is:

* A notion of how many buffers need to be allocated for each
stream.The more you have, the more resilient processing tends to be to
latencies and delays.

* This number can be different for different streams. You could
imagine an application requiring fewer raw stream buffers, for
example.

* There's a minimum number of buffers that are required for the
pipeline to work. For the Raspberry Pi this is 1.

* I don't understand how delayed controls are related to this. They're
purely to cope with the fact that the sensor has delays before
settings that have been applied take effect, and that this delay may
vary between controls. They also vary with the sensor, they're not
fixed for a particular pipeline handler.

Best regards
David

>
> --
> _______________________________________________
> libcamera-devel mailing list
> libcamera-devel@lists.libcamera.org
> https://lists.libcamera.org/listinfo/libcamera-devel
Jean-Michel Hautbois April 26, 2021, 10:52 a.m. UTC | #5
Hi David,

On 26/04/2021 12:46, David Plowman wrote:
> Hi everyone
> 
> On Mon, 26 Apr 2021 at 11:17, Jean-Michel Hautbois
> <jeanmichel.hautbois@ideasonboard.com> wrote:
>>
>> Hi,
>>
>> On 26/04/2021 11:28, Niklas Söderlund wrote:
>>> Hello,
>>>
>>> On 2021-04-26 06:19:57 +0300, Laurent Pinchart wrote:
>>>
>>> <snip>
>>>
>>>>> @@ -668,15 +661,9 @@ int PipelineHandlerIPU3::allocateBuffers(Camera
>>>>> *camera)
>>>>>  {
>>>>>     IPU3CameraData *data = cameraData(camera);
>>>>>     ImgUDevice *imgu = data->imgu_;
>>>>> -   unsigned int bufferCount;
>>>>> +   unsigned int bufferCount = data->properties_.get(properties::QueueDepth);
>>>>>     int ret;
>>>>>
>>>>> -   bufferCount = std::max({
>>>>> -           data->outStream_.configuration().bufferCount,
>>>>> -           data->vfStream_.configuration().bufferCount,
>>>>> -           data->rawStream_.configuration().bufferCount,
>>>>> -   });
>>>>
>>>> I'm not sure properties::QueueDepth is the right value here. This deals
>>>> with both allocation of stats and params buffers, which are internal,
>>>> and the number of V4L2 buffer slots for the ImgU input and output. For
>>>> the latter, we probably should overallocate, as underallocation would
>>>> mean trashing dmabuf mappings. For the former, we shouldn't allocate too
>>>> much to avoid wasting memory, so we should take into account how long
>>>> the IPA would need to hold on the params and stats buffers.
>>>>
>>>> Jacopo, Jean-Michel, Niklas, any comment ?
>>>
>>> I agree that QueueDepth is likely not the best fit here, but neither was
>>> bufferCount :-) IIRC this was done very early on where bufferCount was
>>> not really defined and added as a first steeping stone. I'm very happy
>>> to see this being thought about.
>>>
>>> Maybe the best solution for this series is to add some const numerical
>>> values to the IPU3 pipeline handler to replace the usage of bufferCount
>>> but not making it depending on QueueDepth?
>>>
>>> Maybe even separating it in two values as Laurent outlines, one for the
>>> output and viewfinder slots allocation and one for the raw stream? Maybe
>>> it make sens to link the raw streams count with the value used for the
>>> stats and param buffer counts?
>>
>> Indeed IPA may need some buffers, and even now it is not satisfying :-).
>> Overallocating is a need, and we should not go lower than the highest
>> number of frame delay in the delayedControls list. For IPU3, EXPOSURE
>> has a delay of 2, so we should not have less than 3 buffers to be
>> "comfortable".
>> My 2 cents :-).
> 
> I've been following this discussion and there seem to be a lot of
> queues and delays flying around! Maybe someone can help me to
> understand - at the moment I believe there is:
> 
> * A notion of how many buffers need to be allocated for each
> stream.The more you have, the more resilient processing tends to be to
> latencies and delays.
> 
> * This number can be different for different streams. You could
> imagine an application requiring fewer raw stream buffers, for
> example.
> 
> * There's a minimum number of buffers that are required for the
> pipeline to work. For the Raspberry Pi this is 1.
> 
> * I don't understand how delayed controls are related to this. They're
> purely to cope with the fact that the sensor has delays before
> settings that have been applied take effect, and that this delay may
> vary between controls. They also vary with the sensor, they're not
> fixed for a particular pipeline handler.

True, I read too fast sorry for having disturbed you :-/.
There is no relation with delayedControls.

JM
Naushir Patuck April 26, 2021, 11:56 a.m. UTC | #6
Hi Laurent, and Nicolas,

On Mon, 26 Apr 2021 at 04:20, Laurent Pinchart <
laurent.pinchart@ideasonboard.com> wrote:

> Hi Nícolas,
>
> Thank you for the patch.
>
> On Wed, Apr 21, 2021 at 01:51:39PM -0300, Nícolas F. R. A. Prado wrote:
> > Now that the amount of internal buffers allocated is hardcoded by the
> > pipelines, and the amount of buffers allocated by the
> > FrameBufferAllocator helper is passed through
> > FrameBufferAllocator::allocate(), we no longer need to have bufferCount
> > in the StreamConfiguration, so remove it.
> >
> > Signed-off-by: Nícolas F. R. A. Prado <nfraprado@collabora.com>
> > ---
> >  include/libcamera/stream.h                        |  2 --
> >  src/libcamera/pipeline/ipu3/cio2.cpp              |  1 -
> >  src/libcamera/pipeline/ipu3/ipu3.cpp              | 15 +--------------
> >  .../pipeline/raspberrypi/raspberrypi.cpp          |  9 ++-------
> >  src/libcamera/pipeline/rkisp1/rkisp1.cpp          |  9 ++-------
> >  src/libcamera/pipeline/rkisp1/rkisp1_path.cpp     |  2 --
> >  src/libcamera/pipeline/simple/converter.cpp       |  7 ++-----
> >  src/libcamera/pipeline/simple/converter.h         |  5 ++---
> >  src/libcamera/pipeline/simple/simple.cpp          | 15 ++++-----------
> >  src/libcamera/pipeline/uvcvideo/uvcvideo.cpp      |  5 +----
> >  src/libcamera/pipeline/vimc/vimc.cpp              |  5 +----
> >  src/libcamera/stream.cpp                          |  9 ++-------
> >  src/v4l2/v4l2_camera.cpp                          | 14 ++++++++++----
> >  src/v4l2/v4l2_camera.h                            |  5 +++--
> >  src/v4l2/v4l2_camera_proxy.cpp                    |  8 +++-----
> >  test/camera/buffer_import.cpp                     | 10 +++++++---
> >  test/libtest/buffer_source.cpp                    |  4 ++--
> >  test/libtest/buffer_source.h                      |  2 +-
> >  test/v4l2_videodevice/buffer_cache.cpp            |  4 ++--
>
> An update to src/android/ is missing, which breaks compilation of the
> HAL :-S
>
> >  19 files changed, 45 insertions(+), 86 deletions(-)
> >
> > diff --git a/include/libcamera/stream.h b/include/libcamera/stream.h
> > index bb47c390f8a1..f36aeffd9540 100644
> > --- a/include/libcamera/stream.h
> > +++ b/include/libcamera/stream.h
> > @@ -45,8 +45,6 @@ struct StreamConfiguration {
> >       unsigned int stride;
> >       unsigned int frameSize;
> >
> > -     unsigned int bufferCount;
> > -
> >       Stream *stream() const { return stream_; }
> >       void setStream(Stream *stream) { stream_ = stream; }
> >       const StreamFormats &formats() const { return formats_; }
> > diff --git a/src/libcamera/pipeline/ipu3/cio2.cpp
> b/src/libcamera/pipeline/ipu3/cio2.cpp
> > index 3cd777d1b742..1e110fe0c189 100644
> > --- a/src/libcamera/pipeline/ipu3/cio2.cpp
> > +++ b/src/libcamera/pipeline/ipu3/cio2.cpp
> > @@ -213,7 +213,6 @@ StreamConfiguration
> CIO2Device::generateConfiguration(Size size) const
> >
> >       cfg.size = sensorFormat.size;
> >       cfg.pixelFormat =
> mbusCodesToPixelFormat.at(sensorFormat.mbus_code);
> > -     cfg.bufferCount = CIO2_BUFFER_COUNT;
> >
> >       return cfg;
> >  }
> > diff --git a/src/libcamera/pipeline/ipu3/ipu3.cpp
> b/src/libcamera/pipeline/ipu3/ipu3.cpp
> > index c8fcc2fda75f..f0a17a553bd3 100644
> > --- a/src/libcamera/pipeline/ipu3/ipu3.cpp
> > +++ b/src/libcamera/pipeline/ipu3/ipu3.cpp
> > @@ -291,7 +291,6 @@ CameraConfiguration::Status
> IPU3CameraConfiguration::validate()
> >                       /* Initialize the RAW stream with the CIO2
> configuration. */
> >                       cfg->size = cio2Configuration_.size;
> >                       cfg->pixelFormat = cio2Configuration_.pixelFormat;
> > -                     cfg->bufferCount = cio2Configuration_.bufferCount;
> >                       cfg->stride = info.stride(cfg->size.width, 0, 64);
> >                       cfg->frameSize = info.frameSize(cfg->size, 64);
> >                       cfg->setStream(const_cast<Stream
> *>(&data_->rawStream_));
> > @@ -335,7 +334,6 @@ CameraConfiguration::Status
> IPU3CameraConfiguration::validate()
> >                                             IMGU_OUTPUT_HEIGHT_ALIGN);
> >
> >                       cfg->pixelFormat = formats::NV12;
> > -                     cfg->bufferCount = IPU3_BUFFER_COUNT;
> >                       cfg->stride = info.stride(cfg->size.width, 0, 1);
> >                       cfg->frameSize = info.frameSize(cfg->size, 1);
> >
> > @@ -403,7 +401,6 @@ CameraConfiguration
> *PipelineHandlerIPU3::generateConfiguration(Camera *camera,
> >       Size sensorResolution = data->cio2_.sensor()->resolution();
> >       for (const StreamRole role : roles) {
> >               std::map<PixelFormat, std::vector<SizeRange>>
> streamFormats;
> > -             unsigned int bufferCount;
> >               PixelFormat pixelFormat;
> >               Size size;
> >
> > @@ -424,7 +421,6 @@ CameraConfiguration
> *PipelineHandlerIPU3::generateConfiguration(Camera *camera,
> >                       size.height = utils::alignDown(size.height - 1,
> >
> IMGU_OUTPUT_HEIGHT_MARGIN);
> >                       pixelFormat = formats::NV12;
> > -                     bufferCount = IPU3_BUFFER_COUNT;
> >                       streamFormats[pixelFormat] = { {
> IMGU_OUTPUT_MIN_SIZE, size } };
> >
> >                       break;
> > @@ -434,7 +430,6 @@ CameraConfiguration
> *PipelineHandlerIPU3::generateConfiguration(Camera *camera,
> >
>  data->cio2_.generateConfiguration(sensorResolution);
> >                       pixelFormat = cio2Config.pixelFormat;
> >                       size = cio2Config.size;
> > -                     bufferCount = cio2Config.bufferCount;
> >
> >                       for (const PixelFormat &format :
> data->cio2_.formats())
> >                               streamFormats[format] =
> data->cio2_.sizes();
> > @@ -453,7 +448,6 @@ CameraConfiguration
> *PipelineHandlerIPU3::generateConfiguration(Camera *camera,
> >
> .alignedDownTo(IMGU_OUTPUT_WIDTH_ALIGN,
> >
>  IMGU_OUTPUT_HEIGHT_ALIGN);
> >                       pixelFormat = formats::NV12;
> > -                     bufferCount = IPU3_BUFFER_COUNT;
> >                       streamFormats[pixelFormat] = { {
> IMGU_OUTPUT_MIN_SIZE, size } };
> >
> >                       break;
> > @@ -470,7 +464,6 @@ CameraConfiguration
> *PipelineHandlerIPU3::generateConfiguration(Camera *camera,
> >               StreamConfiguration cfg(formats);
> >               cfg.size = size;
> >               cfg.pixelFormat = pixelFormat;
> > -             cfg.bufferCount = bufferCount;
> >               config->addConfiguration(cfg);
> >       }
> >
> > @@ -668,15 +661,9 @@ int PipelineHandlerIPU3::allocateBuffers(Camera
> *camera)
> >  {
> >       IPU3CameraData *data = cameraData(camera);
> >       ImgUDevice *imgu = data->imgu_;
> > -     unsigned int bufferCount;
> > +     unsigned int bufferCount =
> data->properties_.get(properties::QueueDepth);
> >       int ret;
> >
> > -     bufferCount = std::max({
> > -             data->outStream_.configuration().bufferCount,
> > -             data->vfStream_.configuration().bufferCount,
> > -             data->rawStream_.configuration().bufferCount,
> > -     });
>
> I'm not sure properties::QueueDepth is the right value here. This deals
> with both allocation of stats and params buffers, which are internal,
> and the number of V4L2 buffer slots for the ImgU input and output. For
> the latter, we probably should overallocate, as underallocation would
> mean trashing dmabuf mappings. For the former, we shouldn't allocate too
> much to avoid wasting memory, so we should take into account how long
> the IPA would need to hold on the params and stats buffers.
>
> Jacopo, Jean-Michel, Niklas, any comment ?
>
> > -
> >       ret = imgu->allocateBuffers(bufferCount);
> >       if (ret < 0)
> >               return ret;
> > diff --git a/src/libcamera/pipeline/raspberrypi/raspberrypi.cpp
> b/src/libcamera/pipeline/raspberrypi/raspberrypi.cpp
> > index 3f35596fe550..44a8a472ae4f 100644
> > --- a/src/libcamera/pipeline/raspberrypi/raspberrypi.cpp
> > +++ b/src/libcamera/pipeline/raspberrypi/raspberrypi.cpp
> > @@ -471,7 +471,6 @@ CameraConfiguration
> *PipelineHandlerRPi::generateConfiguration(Camera *camera,
> >       RPiCameraData *data = cameraData(camera);
> >       CameraConfiguration *config = new RPiCameraConfiguration(data);
> >       V4L2DeviceFormat sensorFormat;
> > -     unsigned int bufferCount;
> >       PixelFormat pixelFormat;
> >       V4L2VideoDevice::Formats fmts;
> >       Size size;
> > @@ -489,7 +488,6 @@ CameraConfiguration
> *PipelineHandlerRPi::generateConfiguration(Camera *camera,
> >                       sensorFormat = findBestMode(fmts, size);
> >                       pixelFormat = sensorFormat.fourcc.toPixelFormat();
> >                       ASSERT(pixelFormat.isValid());
> > -                     bufferCount = 2;
> >                       rawCount++;
> >                       break;
> >
> > @@ -498,7 +496,6 @@ CameraConfiguration
> *PipelineHandlerRPi::generateConfiguration(Camera *camera,
> >                       pixelFormat = formats::NV12;
> >                       /* Return the largest sensor resolution. */
> >                       size = data->sensor_->resolution();
> > -                     bufferCount = 1;
> >                       outCount++;
> >                       break;
> >
> > @@ -514,7 +511,6 @@ CameraConfiguration
> *PipelineHandlerRPi::generateConfiguration(Camera *camera,
> >                       fmts = data->isp_[Isp::Output0].dev()->formats();
> >                       pixelFormat = formats::YUV420;
> >                       size = { 1920, 1080 };
> > -                     bufferCount = 4;
> >                       outCount++;
> >                       break;
> >
> > @@ -522,7 +518,6 @@ CameraConfiguration
> *PipelineHandlerRPi::generateConfiguration(Camera *camera,
> >                       fmts = data->isp_[Isp::Output0].dev()->formats();
> >                       pixelFormat = formats::ARGB8888;
> >                       size = { 800, 600 };
> > -                     bufferCount = 4;
> >                       outCount++;
> >                       break;
> >
> > @@ -552,7 +547,6 @@ CameraConfiguration
> *PipelineHandlerRPi::generateConfiguration(Camera *camera,
> >               StreamConfiguration cfg(formats);
> >               cfg.size = size;
> >               cfg.pixelFormat = pixelFormat;
> > -             cfg.bufferCount = bufferCount;
> >               config->addConfiguration(cfg);
> >       }
> >
> > @@ -1142,6 +1136,7 @@ int PipelineHandlerRPi::prepareBuffers(Camera
> *camera)
> >  {
> >       RPiCameraData *data = cameraData(camera);
> >       int ret;
> > +     unsigned int bufferCount =
> data->properties_.get(properties::QueueDepth);
> >
> >       /*
> >        * Decide how many internal buffers to allocate. For now, simply
> look
> > @@ -1152,7 +1147,7 @@ int PipelineHandlerRPi::prepareBuffers(Camera
> *camera)
> >       unsigned int maxBuffers = 0;
> >       for (const Stream *s : camera->streams())
> >               if (static_cast<const RPi::Stream *>(s)->isExternal())
> > -                     maxBuffers = std::max(maxBuffers,
> s->configuration().bufferCount);
> > +                     maxBuffers = std::max(maxBuffers, bufferCount);
>
> That now looks a bit weird, as bufferCount is constant. This being said,
> the above comment for the IPU3 applies here. David, Naush, any comment ?
>

Similar to what others have mentioned, I don't think properties::QueueDepth
should be
used over here.  This bit of code may need reworking after these changes
have gone in,
but for now, a const value for bufferCount might be more suitable.

Naush



> >
> >       for (auto const stream : data->streams_) {
> >               ret = stream->prepareBuffers(maxBuffers);
> > diff --git a/src/libcamera/pipeline/rkisp1/rkisp1.cpp
> b/src/libcamera/pipeline/rkisp1/rkisp1.cpp
> > index 2d95c1ca2a43..73d4ea6ba8f5 100644
> > --- a/src/libcamera/pipeline/rkisp1/rkisp1.cpp
> > +++ b/src/libcamera/pipeline/rkisp1/rkisp1.cpp
> > @@ -686,16 +686,11 @@ int PipelineHandlerRkISP1::allocateBuffers(Camera
> *camera)
> >       unsigned int ipaBufferId = 1;
> >       int ret;
> >
> > -     unsigned int maxCount = std::max({
> > -             data->mainPathStream_.configuration().bufferCount,
> > -             data->selfPathStream_.configuration().bufferCount,
> > -     });
> > -
> > -     ret = param_->allocateBuffers(maxCount, &paramBuffers_);
> > +     ret = param_->allocateBuffers(RKISP1_BUFFER_COUNT, &paramBuffers_);
> >       if (ret < 0)
> >               goto error;
> >
> > -     ret = stat_->allocateBuffers(maxCount, &statBuffers_);
> > +     ret = stat_->allocateBuffers(RKISP1_BUFFER_COUNT, &statBuffers_);
>
> Here it should be fine.
>
> >       if (ret < 0)
> >               goto error;
> >
> > diff --git a/src/libcamera/pipeline/rkisp1/rkisp1_path.cpp
> b/src/libcamera/pipeline/rkisp1/rkisp1_path.cpp
> > index 25f482eb8d8e..200e3c2c4cca 100644
> > --- a/src/libcamera/pipeline/rkisp1/rkisp1_path.cpp
> > +++ b/src/libcamera/pipeline/rkisp1/rkisp1_path.cpp
> > @@ -61,7 +61,6 @@ StreamConfiguration
> RkISP1Path::generateConfiguration(const Size &resolution)
> >       StreamConfiguration cfg(formats);
> >       cfg.pixelFormat = formats::NV12;
> >       cfg.size = maxResolution;
> > -     cfg.bufferCount = RKISP1_BUFFER_COUNT;
> >
> >       return cfg;
> >  }
> > @@ -77,7 +76,6 @@ CameraConfiguration::Status
> RkISP1Path::validate(StreamConfiguration *cfg)
> >
> >       cfg->size.boundTo(maxResolution_);
> >       cfg->size.expandTo(minResolution_);
> > -     cfg->bufferCount = RKISP1_BUFFER_COUNT;
> >
> >       V4L2DeviceFormat format;
> >       format.fourcc = video_->toV4L2PixelFormat(cfg->pixelFormat);
> > diff --git a/src/libcamera/pipeline/simple/converter.cpp
> b/src/libcamera/pipeline/simple/converter.cpp
> > index 68644ef6477f..54e7f1b051f7 100644
> > --- a/src/libcamera/pipeline/simple/converter.cpp
> > +++ b/src/libcamera/pipeline/simple/converter.cpp
> > @@ -86,9 +86,6 @@ int SimpleConverter::Stream::configure(const
> StreamConfiguration &inputCfg,
> >               return -EINVAL;
> >       }
> >
> > -     inputBufferCount_ = inputCfg.bufferCount;
> > -     outputBufferCount_ = outputCfg.bufferCount;
> > -
> >       return 0;
> >  }
> >
> > @@ -100,11 +97,11 @@ int SimpleConverter::Stream::exportBuffers(unsigned
> int count,
> >
> >  int SimpleConverter::Stream::start()
> >  {
> > -     int ret = m2m_->output()->importBuffers(inputBufferCount_);
> > +     int ret = m2m_->output()->importBuffers(SIMPLE_QUEUE_DEPTH);
> >       if (ret < 0)
> >               return ret;
> >
> > -     ret = m2m_->capture()->importBuffers(outputBufferCount_);
> > +     ret = m2m_->capture()->importBuffers(SIMPLE_QUEUE_DEPTH);
>
> As above, we should probably overallocate here to avoid trashing the
> dmabuf mappings.
>
> >       if (ret < 0) {
> >               stop();
> >               return ret;
> > diff --git a/src/libcamera/pipeline/simple/converter.h
> b/src/libcamera/pipeline/simple/converter.h
> > index 480e528d2210..32313748cd24 100644
> > --- a/src/libcamera/pipeline/simple/converter.h
> > +++ b/src/libcamera/pipeline/simple/converter.h
> > @@ -29,6 +29,8 @@ class SizeRange;
> >  struct StreamConfiguration;
> >  class V4L2M2MDevice;
> >
> > +constexpr unsigned int SIMPLE_QUEUE_DEPTH = 3;
> > +
> >  class SimpleConverter
> >  {
> >  public:
> > @@ -84,9 +86,6 @@ private:
> >               SimpleConverter *converter_;
> >               unsigned int index_;
> >               std::unique_ptr<V4L2M2MDevice> m2m_;
> > -
> > -             unsigned int inputBufferCount_;
> > -             unsigned int outputBufferCount_;
> >       };
> >
> >       std::string deviceNode_;
> > diff --git a/src/libcamera/pipeline/simple/simple.cpp
> b/src/libcamera/pipeline/simple/simple.cpp
> > index b9f14be6733f..9f28bb66e2e7 100644
> > --- a/src/libcamera/pipeline/simple/simple.cpp
> > +++ b/src/libcamera/pipeline/simple/simple.cpp
> > @@ -142,8 +142,6 @@ static const SimplePipelineInfo supportedDevices[] =
> {
> >
> >  } /* namespace */
> >
> > -static constexpr unsigned int kNumInternalBuffers = 3;
> > -
> >  class SimpleCameraData : public CameraData
> >  {
> >  public:
> > @@ -425,7 +423,7 @@ int SimpleCameraData::init()
> >       }
> >
> >       properties_ = sensor_->properties();
> > -     properties_.set(properties::QueueDepth, kNumInternalBuffers);
> > +     properties_.set(properties::QueueDepth, SIMPLE_QUEUE_DEPTH);
> >
> >       return 0;
> >  }
> > @@ -616,7 +614,7 @@ CameraConfiguration::Status
> SimpleCameraConfiguration::validate()
> >                   cfg.size != pipeConfig_->captureSize)
> >                       needConversion_ = true;
> >
> > -             /* Set the stride, frameSize and bufferCount. */
> > +             /* Set the stride and frameSize. */
> >               if (needConversion_) {
> >                       std::tie(cfg.stride, cfg.frameSize) =
> >
>  converter->strideAndFrameSize(cfg.pixelFormat, cfg.size);
> > @@ -634,8 +632,6 @@ CameraConfiguration::Status
> SimpleCameraConfiguration::validate()
> >                       cfg.stride = format.planes[0].bpl;
> >                       cfg.frameSize = format.planes[0].size;
> >               }
> > -
> > -             cfg.bufferCount = 3;
> >       }
> >
> >       return status;
> > @@ -758,7 +754,6 @@ int SimplePipelineHandler::configure(Camera *camera,
> CameraConfiguration *c)
> >       inputCfg.pixelFormat = pipeConfig->captureFormat;
> >       inputCfg.size = pipeConfig->captureSize;
> >       inputCfg.stride = captureFormat.planes[0].bpl;
> > -     inputCfg.bufferCount = kNumInternalBuffers;
> >
> >       return converter_->configure(inputCfg, outputCfgs);
> >  }
> > @@ -791,12 +786,10 @@ int SimplePipelineHandler::start(Camera *camera,
> [[maybe_unused]] const ControlL
> >                * When using the converter allocate a fixed number of
> internal
> >                * buffers.
> >                */
> > -             ret = video->allocateBuffers(kNumInternalBuffers,
> > +             ret = video->allocateBuffers(SIMPLE_QUEUE_DEPTH,
>
> This should definitely not overallocate :-) 3 sounds good.
>
> >                                            &data->converterBuffers_);
> >       } else {
> > -             /* Otherwise, prepare for using buffers from the only
> stream. */
> > -             Stream *stream = &data->streams_[0];
> > -             ret =
> video->importBuffers(stream->configuration().bufferCount);
> > +             ret = video->importBuffers(SIMPLE_QUEUE_DEPTH);
>
> Here we can overallocate too.
>
> >       }
> >       if (ret < 0)
> >               return ret;
> > diff --git a/src/libcamera/pipeline/uvcvideo/uvcvideo.cpp
> b/src/libcamera/pipeline/uvcvideo/uvcvideo.cpp
> > index a148c35f1265..94e6fd9d2d56 100644
> > --- a/src/libcamera/pipeline/uvcvideo/uvcvideo.cpp
> > +++ b/src/libcamera/pipeline/uvcvideo/uvcvideo.cpp
> > @@ -148,8 +148,6 @@ CameraConfiguration::Status
> UVCCameraConfiguration::validate()
> >               status = Adjusted;
> >       }
> >
> > -     cfg.bufferCount = 4;
> > -
> >       V4L2DeviceFormat format;
> >       format.fourcc = data_->video_->toV4L2PixelFormat(cfg.pixelFormat);
> >       format.size = cfg.size;
> > @@ -191,7 +189,6 @@ CameraConfiguration
> *PipelineHandlerUVC::generateConfiguration(Camera *camera,
> >
> >       cfg.pixelFormat = formats.pixelformats().front();
> >       cfg.size = formats.sizes(cfg.pixelFormat).back();
> > -     cfg.bufferCount = 4;
> >
> >       config->addConfiguration(cfg);
> >
> > @@ -236,7 +233,7 @@ int PipelineHandlerUVC::exportFrameBuffers(Camera
> *camera,
> >  int PipelineHandlerUVC::start(Camera *camera, [[maybe_unused]] const
> ControlList *controls)
> >  {
> >       UVCCameraData *data = cameraData(camera);
> > -     unsigned int count = data->stream_.configuration().bufferCount;
> > +     unsigned int count = data->properties_.get(properties::QueueDepth);
> >
> >       int ret = data->video_->importBuffers(count);
>
> Same here, overallocation is best.
>
> >       if (ret < 0)
> > diff --git a/src/libcamera/pipeline/vimc/vimc.cpp
> b/src/libcamera/pipeline/vimc/vimc.cpp
> > index 22d6fdcbb141..891571afb3e5 100644
> > --- a/src/libcamera/pipeline/vimc/vimc.cpp
> > +++ b/src/libcamera/pipeline/vimc/vimc.cpp
> > @@ -165,8 +165,6 @@ CameraConfiguration::Status
> VimcCameraConfiguration::validate()
> >               status = Adjusted;
> >       }
> >
> > -     cfg.bufferCount = 4;
> > -
> >       V4L2DeviceFormat format;
> >       format.fourcc = data_->video_->toV4L2PixelFormat(cfg.pixelFormat);
> >       format.size = cfg.size;
> > @@ -222,7 +220,6 @@ CameraConfiguration
> *PipelineHandlerVimc::generateConfiguration(Camera *camera,
> >
> >       cfg.pixelFormat = formats::BGR888;
> >       cfg.size = { 1920, 1080 };
> > -     cfg.bufferCount = 4;
> >
> >       config->addConfiguration(cfg);
> >
> > @@ -312,7 +309,7 @@ int PipelineHandlerVimc::exportFrameBuffers(Camera
> *camera,
> >  int PipelineHandlerVimc::start(Camera *camera, [[maybe_unused]] const
> ControlList *controls)
> >  {
> >       VimcCameraData *data = cameraData(camera);
> > -     unsigned int count = data->stream_.configuration().bufferCount;
> > +     unsigned int count = data->properties_.get(properties::QueueDepth);
> >
> >       int ret = data->video_->importBuffers(count);
>
> I think you know by now what I would say :-)
>
> >       if (ret < 0)
> > diff --git a/src/libcamera/stream.cpp b/src/libcamera/stream.cpp
> > index f7bafcf8fc97..be57abce4eb3 100644
> > --- a/src/libcamera/stream.cpp
> > +++ b/src/libcamera/stream.cpp
> > @@ -279,7 +279,7 @@ SizeRange StreamFormats::range(const PixelFormat
> &pixelformat) const
> >   * handlers provide StreamFormats.
> >   */
> >  StreamConfiguration::StreamConfiguration()
> > -     : pixelFormat(0), stride(0), frameSize(0), bufferCount(0),
> > +     : pixelFormat(0), stride(0), frameSize(0),
> >         stream_(nullptr)
> >  {
> >  }
> > @@ -288,7 +288,7 @@ StreamConfiguration::StreamConfiguration()
> >   * \brief Construct a configuration with stream formats
> >   */
> >  StreamConfiguration::StreamConfiguration(const StreamFormats &formats)
> > -     : pixelFormat(0), stride(0), frameSize(0), bufferCount(0),
> > +     : pixelFormat(0), stride(0), frameSize(0),
> >         stream_(nullptr), formats_(formats)
> >  {
> >  }
> > @@ -323,11 +323,6 @@ StreamConfiguration::StreamConfiguration(const
> StreamFormats &formats)
> >   * validating the configuration with a call to
> CameraConfiguration::validate().
> >   */
> >
> > -/**
> > - * \var StreamConfiguration::bufferCount
> > - * \brief Requested number of buffers to allocate for the stream
> > - */
> > -
> >  /**
> >   * \fn StreamConfiguration::stream()
> >   * \brief Retrieve the stream associated with the configuration
> > diff --git a/src/v4l2/v4l2_camera.cpp b/src/v4l2/v4l2_camera.cpp
> > index 53d97f3e6b86..79bf7aec7782 100644
> > --- a/src/v4l2/v4l2_camera.cpp
> > +++ b/src/v4l2/v4l2_camera.cpp
> > @@ -10,6 +10,8 @@
> >  #include <errno.h>
> >  #include <unistd.h>
> >
> > +#include <libcamera/property_ids.h>
> > +
> >  #include "libcamera/internal/log.h"
> >
> >  using namespace libcamera;
> > @@ -107,14 +109,12 @@ void V4L2Camera::requestComplete(Request *request)
> >  }
> >
> >  int V4L2Camera::configure(StreamConfiguration *streamConfigOut,
> > -                       const Size &size, const PixelFormat &pixelformat,
> > -                       unsigned int bufferCount)
> > +                       const Size &size, const PixelFormat &pixelformat)
> >  {
> >       StreamConfiguration &streamConfig = config_->at(0);
> >       streamConfig.size.width = size.width;
> >       streamConfig.size.height = size.height;
> >       streamConfig.pixelFormat = pixelformat;
> > -     streamConfig.bufferCount = bufferCount;
> >       /* \todo memoryType (interval vs external) */
> >
> >       CameraConfiguration::Status validation = config_->validate();
> > @@ -146,7 +146,6 @@ int V4L2Camera::validateConfiguration(const
> PixelFormat &pixelFormat,
> >       StreamConfiguration &cfg = config->at(0);
> >       cfg.size = size;
> >       cfg.pixelFormat = pixelFormat;
> > -     cfg.bufferCount = 1;
> >
> >       CameraConfiguration::Status validation = config->validate();
> >       if (validation == CameraConfiguration::Invalid)
> > @@ -299,3 +298,10 @@ bool V4L2Camera::isRunning()
> >  {
> >       return isRunning_;
> >  }
> > +
> > +int V4L2Camera::getBufCount(int count)
> > +{
> > +     int min = camera_->properties().get(properties::QueueDepth);
> > +
> > +     return std::max(count, min);
> > +}
>
> I'd name this function queueDepth() (we don't usually use a "get" prefix
> for getters) and return the value of the property only. The caller can
> then decide what to do with it.
>
> > diff --git a/src/v4l2/v4l2_camera.h b/src/v4l2/v4l2_camera.h
> > index d238046250e3..68df8ad05917 100644
> > --- a/src/v4l2/v4l2_camera.h
> > +++ b/src/v4l2/v4l2_camera.h
> > @@ -45,8 +45,7 @@ public:
> >       std::vector<Buffer> completedBuffers();
> >
> >       int configure(StreamConfiguration *streamConfigOut,
> > -                   const Size &size, const PixelFormat &pixelformat,
> > -                   unsigned int bufferCount);
> > +                   const Size &size, const PixelFormat &pixelformat);
> >       int validateConfiguration(const PixelFormat &pixelformat,
> >                                 const Size &size,
> >                                 StreamConfiguration *streamConfigOut);
> > @@ -65,6 +64,8 @@ public:
> >
> >       bool isRunning();
> >
> > +     int getBufCount(int count);
> > +
> >  private:
> >       void requestComplete(Request *request);
> >
> > diff --git a/src/v4l2/v4l2_camera_proxy.cpp
> b/src/v4l2/v4l2_camera_proxy.cpp
> > index f8bfe595e90e..cd32e44a01ad 100644
> > --- a/src/v4l2/v4l2_camera_proxy.cpp
> > +++ b/src/v4l2/v4l2_camera_proxy.cpp
> > @@ -348,8 +348,7 @@ int V4L2CameraProxy::vidioc_s_fmt(V4L2CameraFile
> *file, struct v4l2_format *arg)
> >       Size size(arg->fmt.pix.width, arg->fmt.pix.height);
> >       V4L2PixelFormat v4l2Format =
> V4L2PixelFormat(arg->fmt.pix.pixelformat);
> >       ret = vcam_->configure(&streamConfig_, size,
> > -                            PixelFormatInfo::info(v4l2Format).format,
> > -                            bufferCount_);
> > +                            PixelFormatInfo::info(v4l2Format).format);
> >       if (ret < 0)
> >               return -EINVAL;
> >
> > @@ -490,14 +489,13 @@ int V4L2CameraProxy::vidioc_reqbufs(V4L2CameraFile
> *file, struct v4l2_requestbuf
> >       Size size(v4l2PixFormat_.width, v4l2PixFormat_.height);
> >       V4L2PixelFormat v4l2Format =
> V4L2PixelFormat(v4l2PixFormat_.pixelformat);
> >       int ret = vcam_->configure(&streamConfig_, size,
> > -
> PixelFormatInfo::info(v4l2Format).format,
> > -                                arg->count);
> > +
> PixelFormatInfo::info(v4l2Format).format);
> >       if (ret < 0)
> >               return -EINVAL;
> >
> >       setFmtFromConfig(streamConfig_);
> >
> > -     arg->count = streamConfig_.bufferCount;
> > +     arg->count = vcam_->getBufCount(arg->count);
> >       bufferCount_ = arg->count;
> >
> >       ret = vcam_->allocBuffers(arg->count);
> > diff --git a/test/camera/buffer_import.cpp
> b/test/camera/buffer_import.cpp
> > index 61f4eb92ae95..f2c38bfb0b72 100644
> > --- a/test/camera/buffer_import.cpp
> > +++ b/test/camera/buffer_import.cpp
> > @@ -12,6 +12,8 @@
> >  #include <numeric>
> >  #include <vector>
> >
> > +#include <libcamera/property_ids.h>
> > +
> >  #include "libcamera/internal/device_enumerator.h"
> >  #include "libcamera/internal/event_dispatcher.h"
> >  #include "libcamera/internal/media_device.h"
> > @@ -91,10 +93,12 @@ protected:
> >                       return TestFail;
> >               }
> >
> > +             unsigned int bufCount =
> camera_->properties().get(properties::QueueDepth);
>
> Let's name the variable bufferCount or numBuffers for consistency.
>
> There are a few issues to solve, but this is a really nice change !
>
> > +
> >               Stream *stream = cfg.stream();
> >
> >               BufferSource source;
> > -             int ret = source.allocate(cfg);
> > +             int ret = source.allocate(cfg, bufCount);
> >               if (ret != TestPass)
> >                       return ret;
> >
> > @@ -138,10 +142,10 @@ protected:
> >               while (timer.isRunning())
> >                       dispatcher->processEvents();
> >
> > -             if (completeRequestsCount_ < cfg.bufferCount * 2) {
> > +             if (completeRequestsCount_ < bufCount * 2) {
> >                       std::cout << "Failed to capture enough frames (got
> "
> >                                 << completeRequestsCount_ << " expected
> at least "
> > -                               << cfg.bufferCount * 2 << ")" <<
> std::endl;
> > +                               << bufCount * 2 << ")" << std::endl;
> >                       return TestFail;
> >               }
> >
> > diff --git a/test/libtest/buffer_source.cpp
> b/test/libtest/buffer_source.cpp
> > index 73563f2fc39d..c3d5286a2462 100644
> > --- a/test/libtest/buffer_source.cpp
> > +++ b/test/libtest/buffer_source.cpp
> > @@ -24,7 +24,7 @@ BufferSource::~BufferSource()
> >               media_->release();
> >  }
> >
> > -int BufferSource::allocate(const StreamConfiguration &config)
> > +int BufferSource::allocate(const StreamConfiguration &config, unsigned
> int count)
> >  {
> >       /* Locate and open the video device. */
> >       std::string videoDeviceName = "vivid-000-vid-out";
> > @@ -77,7 +77,7 @@ int BufferSource::allocate(const StreamConfiguration
> &config)
> >               return TestFail;
> >       }
> >
> > -     if (video->allocateBuffers(config.bufferCount, &buffers_) < 0) {
> > +     if (video->allocateBuffers(count, &buffers_) < 0) {
> >               std::cout << "Failed to allocate buffers" << std::endl;
> >               return TestFail;
> >       }
> > diff --git a/test/libtest/buffer_source.h b/test/libtest/buffer_source.h
> > index 14b4770e8d8a..6a18e269a575 100644
> > --- a/test/libtest/buffer_source.h
> > +++ b/test/libtest/buffer_source.h
> > @@ -20,7 +20,7 @@ public:
> >       BufferSource();
> >       ~BufferSource();
> >
> > -     int allocate(const StreamConfiguration &config);
> > +     int allocate(const StreamConfiguration &config, unsigned int
> count);
> >       const std::vector<std::unique_ptr<FrameBuffer>> &buffers();
> >
> >  private:
> > diff --git a/test/v4l2_videodevice/buffer_cache.cpp
> b/test/v4l2_videodevice/buffer_cache.cpp
> > index b3f2bec11783..07fddfd2617c 100644
> > --- a/test/v4l2_videodevice/buffer_cache.cpp
> > +++ b/test/v4l2_videodevice/buffer_cache.cpp
> > @@ -10,6 +10,7 @@
> >  #include <vector>
> >
> >  #include <libcamera/formats.h>
> > +#include <libcamera/property_ids.h>
> >  #include <libcamera/stream.h>
> >
> >  #include "buffer_source.h"
> > @@ -145,10 +146,9 @@ public:
> >               StreamConfiguration cfg;
> >               cfg.pixelFormat = formats::YUYV;
> >               cfg.size = Size(600, 800);
> > -             cfg.bufferCount = numBuffers;
> >
> >               BufferSource source;
> > -             int ret = source.allocate(cfg);
> > +             int ret = source.allocate(cfg, numBuffers);
> >               if (ret != TestPass)
> >                       return ret;
> >
>
> --
> Regards,
>
> Laurent Pinchart
>
Nícolas F. R. A. Prado April 27, 2021, 4:56 p.m. UTC | #7
Hi,

Em 2021-04-26 06:28, Niklas Söderlund escreveu:

> Hello,
>
> On 2021-04-26 06:19:57 +0300, Laurent Pinchart wrote:
>
> <snip>
>
> > > @@ -668,15 +661,9 @@ int PipelineHandlerIPU3::allocateBuffers(Camera 
> > > *camera)
> > >  {
> > >  	IPU3CameraData *data = cameraData(camera);
> > >  	ImgUDevice *imgu = data->imgu_;
> > > -	unsigned int bufferCount;
> > > +	unsigned int bufferCount = data->properties_.get(properties::QueueDepth);
> > >  	int ret;
> > >  
> > > -	bufferCount = std::max({
> > > -		data->outStream_.configuration().bufferCount,
> > > -		data->vfStream_.configuration().bufferCount,
> > > -		data->rawStream_.configuration().bufferCount,
> > > -	});
> > 
> > I'm not sure properties::QueueDepth is the right value here. This deals
> > with both allocation of stats and params buffers, which are internal,
> > and the number of V4L2 buffer slots for the ImgU input and output. For
> > the latter, we probably should overallocate, as underallocation would
> > mean trashing dmabuf mappings. For the former, we shouldn't allocate too
> > much to avoid wasting memory, so we should take into account how long
> > the IPA would need to hold on the params and stats buffers.
> > 
> > Jacopo, Jean-Michel, Niklas, any comment ?
>
> I agree that QueueDepth is likely not the best fit here, but neither was
> bufferCount :-) IIRC this was done very early on where bufferCount was
> not really defined and added as a first steeping stone. I'm very happy
> to see this being thought about.
>
> Maybe the best solution for this series is to add some const numerical
> values to the IPU3 pipeline handler to replace the usage of bufferCount
> but not making it depending on QueueDepth?
>
> Maybe even separating it in two values as Laurent outlines, one for the
> output and viewfinder slots allocation and one for the raw stream? Maybe
> it make sens to link the raw streams count with the value used for the
> stats and param buffer counts?

Okay, this makes perfect sense. So if I'm understanding it correctly, in general
we don't want to use QueueDepth in importBuffers() and allocateBuffers().
importBuffers() should be on the bigger side, since having it less than the
amount of requests queued decreases performance. allocateBuffers() on the other
hand is just for internal buffers, so it doesn't need to scale with the amount
of requests sent, and how many are needed concerns only the IPA.

Now, my first question is: in the ideal world, importBuffers() should
always "overallocate", meaning that however many requests are sent to the
camera simultaneousy, there are always more buffer slots, so performance is
unaffected, but in reality there are limited resources so we should create a
fixed amount of buffer slots, and by "overallocate" you just meant keep it on
the bigger side, right Laurent?

So I guess my doubt is: how many are we talking about? Does importBuffers(5) and
allocateBuffers(3) sound reasonable? Or maybe we could keep allocateBuffers at
4, for now, since I'm not sure if the IPA would work with less, and 4 was the
amount used all along so probably a safer bet.

Thanks,
Nícolas

Patch
diff mbox series

diff --git a/include/libcamera/stream.h b/include/libcamera/stream.h
index bb47c390f8a1..f36aeffd9540 100644
--- a/include/libcamera/stream.h
+++ b/include/libcamera/stream.h
@@ -45,8 +45,6 @@  struct StreamConfiguration {
 	unsigned int stride;
 	unsigned int frameSize;
 
-	unsigned int bufferCount;
-
 	Stream *stream() const { return stream_; }
 	void setStream(Stream *stream) { stream_ = stream; }
 	const StreamFormats &formats() const { return formats_; }
diff --git a/src/libcamera/pipeline/ipu3/cio2.cpp b/src/libcamera/pipeline/ipu3/cio2.cpp
index 3cd777d1b742..1e110fe0c189 100644
--- a/src/libcamera/pipeline/ipu3/cio2.cpp
+++ b/src/libcamera/pipeline/ipu3/cio2.cpp
@@ -213,7 +213,6 @@  StreamConfiguration CIO2Device::generateConfiguration(Size size) const
 
 	cfg.size = sensorFormat.size;
 	cfg.pixelFormat = mbusCodesToPixelFormat.at(sensorFormat.mbus_code);
-	cfg.bufferCount = CIO2_BUFFER_COUNT;
 
 	return cfg;
 }
diff --git a/src/libcamera/pipeline/ipu3/ipu3.cpp b/src/libcamera/pipeline/ipu3/ipu3.cpp
index c8fcc2fda75f..f0a17a553bd3 100644
--- a/src/libcamera/pipeline/ipu3/ipu3.cpp
+++ b/src/libcamera/pipeline/ipu3/ipu3.cpp
@@ -291,7 +291,6 @@  CameraConfiguration::Status IPU3CameraConfiguration::validate()
 			/* Initialize the RAW stream with the CIO2 configuration. */
 			cfg->size = cio2Configuration_.size;
 			cfg->pixelFormat = cio2Configuration_.pixelFormat;
-			cfg->bufferCount = cio2Configuration_.bufferCount;
 			cfg->stride = info.stride(cfg->size.width, 0, 64);
 			cfg->frameSize = info.frameSize(cfg->size, 64);
 			cfg->setStream(const_cast<Stream *>(&data_->rawStream_));
@@ -335,7 +334,6 @@  CameraConfiguration::Status IPU3CameraConfiguration::validate()
 					      IMGU_OUTPUT_HEIGHT_ALIGN);
 
 			cfg->pixelFormat = formats::NV12;
-			cfg->bufferCount = IPU3_BUFFER_COUNT;
 			cfg->stride = info.stride(cfg->size.width, 0, 1);
 			cfg->frameSize = info.frameSize(cfg->size, 1);
 
@@ -403,7 +401,6 @@  CameraConfiguration *PipelineHandlerIPU3::generateConfiguration(Camera *camera,
 	Size sensorResolution = data->cio2_.sensor()->resolution();
 	for (const StreamRole role : roles) {
 		std::map<PixelFormat, std::vector<SizeRange>> streamFormats;
-		unsigned int bufferCount;
 		PixelFormat pixelFormat;
 		Size size;
 
@@ -424,7 +421,6 @@  CameraConfiguration *PipelineHandlerIPU3::generateConfiguration(Camera *camera,
 			size.height = utils::alignDown(size.height - 1,
 						       IMGU_OUTPUT_HEIGHT_MARGIN);
 			pixelFormat = formats::NV12;
-			bufferCount = IPU3_BUFFER_COUNT;
 			streamFormats[pixelFormat] = { { IMGU_OUTPUT_MIN_SIZE, size } };
 
 			break;
@@ -434,7 +430,6 @@  CameraConfiguration *PipelineHandlerIPU3::generateConfiguration(Camera *camera,
 				data->cio2_.generateConfiguration(sensorResolution);
 			pixelFormat = cio2Config.pixelFormat;
 			size = cio2Config.size;
-			bufferCount = cio2Config.bufferCount;
 
 			for (const PixelFormat &format : data->cio2_.formats())
 				streamFormats[format] = data->cio2_.sizes();
@@ -453,7 +448,6 @@  CameraConfiguration *PipelineHandlerIPU3::generateConfiguration(Camera *camera,
 					       .alignedDownTo(IMGU_OUTPUT_WIDTH_ALIGN,
 							      IMGU_OUTPUT_HEIGHT_ALIGN);
 			pixelFormat = formats::NV12;
-			bufferCount = IPU3_BUFFER_COUNT;
 			streamFormats[pixelFormat] = { { IMGU_OUTPUT_MIN_SIZE, size } };
 
 			break;
@@ -470,7 +464,6 @@  CameraConfiguration *PipelineHandlerIPU3::generateConfiguration(Camera *camera,
 		StreamConfiguration cfg(formats);
 		cfg.size = size;
 		cfg.pixelFormat = pixelFormat;
-		cfg.bufferCount = bufferCount;
 		config->addConfiguration(cfg);
 	}
 
@@ -668,15 +661,9 @@  int PipelineHandlerIPU3::allocateBuffers(Camera *camera)
 {
 	IPU3CameraData *data = cameraData(camera);
 	ImgUDevice *imgu = data->imgu_;
-	unsigned int bufferCount;
+	unsigned int bufferCount = data->properties_.get(properties::QueueDepth);
 	int ret;
 
-	bufferCount = std::max({
-		data->outStream_.configuration().bufferCount,
-		data->vfStream_.configuration().bufferCount,
-		data->rawStream_.configuration().bufferCount,
-	});
-
 	ret = imgu->allocateBuffers(bufferCount);
 	if (ret < 0)
 		return ret;
diff --git a/src/libcamera/pipeline/raspberrypi/raspberrypi.cpp b/src/libcamera/pipeline/raspberrypi/raspberrypi.cpp
index 3f35596fe550..44a8a472ae4f 100644
--- a/src/libcamera/pipeline/raspberrypi/raspberrypi.cpp
+++ b/src/libcamera/pipeline/raspberrypi/raspberrypi.cpp
@@ -471,7 +471,6 @@  CameraConfiguration *PipelineHandlerRPi::generateConfiguration(Camera *camera,
 	RPiCameraData *data = cameraData(camera);
 	CameraConfiguration *config = new RPiCameraConfiguration(data);
 	V4L2DeviceFormat sensorFormat;
-	unsigned int bufferCount;
 	PixelFormat pixelFormat;
 	V4L2VideoDevice::Formats fmts;
 	Size size;
@@ -489,7 +488,6 @@  CameraConfiguration *PipelineHandlerRPi::generateConfiguration(Camera *camera,
 			sensorFormat = findBestMode(fmts, size);
 			pixelFormat = sensorFormat.fourcc.toPixelFormat();
 			ASSERT(pixelFormat.isValid());
-			bufferCount = 2;
 			rawCount++;
 			break;
 
@@ -498,7 +496,6 @@  CameraConfiguration *PipelineHandlerRPi::generateConfiguration(Camera *camera,
 			pixelFormat = formats::NV12;
 			/* Return the largest sensor resolution. */
 			size = data->sensor_->resolution();
-			bufferCount = 1;
 			outCount++;
 			break;
 
@@ -514,7 +511,6 @@  CameraConfiguration *PipelineHandlerRPi::generateConfiguration(Camera *camera,
 			fmts = data->isp_[Isp::Output0].dev()->formats();
 			pixelFormat = formats::YUV420;
 			size = { 1920, 1080 };
-			bufferCount = 4;
 			outCount++;
 			break;
 
@@ -522,7 +518,6 @@  CameraConfiguration *PipelineHandlerRPi::generateConfiguration(Camera *camera,
 			fmts = data->isp_[Isp::Output0].dev()->formats();
 			pixelFormat = formats::ARGB8888;
 			size = { 800, 600 };
-			bufferCount = 4;
 			outCount++;
 			break;
 
@@ -552,7 +547,6 @@  CameraConfiguration *PipelineHandlerRPi::generateConfiguration(Camera *camera,
 		StreamConfiguration cfg(formats);
 		cfg.size = size;
 		cfg.pixelFormat = pixelFormat;
-		cfg.bufferCount = bufferCount;
 		config->addConfiguration(cfg);
 	}
 
@@ -1142,6 +1136,7 @@  int PipelineHandlerRPi::prepareBuffers(Camera *camera)
 {
 	RPiCameraData *data = cameraData(camera);
 	int ret;
+	unsigned int bufferCount = data->properties_.get(properties::QueueDepth);
 
 	/*
 	 * Decide how many internal buffers to allocate. For now, simply look
@@ -1152,7 +1147,7 @@  int PipelineHandlerRPi::prepareBuffers(Camera *camera)
 	unsigned int maxBuffers = 0;
 	for (const Stream *s : camera->streams())
 		if (static_cast<const RPi::Stream *>(s)->isExternal())
-			maxBuffers = std::max(maxBuffers, s->configuration().bufferCount);
+			maxBuffers = std::max(maxBuffers, bufferCount);
 
 	for (auto const stream : data->streams_) {
 		ret = stream->prepareBuffers(maxBuffers);
diff --git a/src/libcamera/pipeline/rkisp1/rkisp1.cpp b/src/libcamera/pipeline/rkisp1/rkisp1.cpp
index 2d95c1ca2a43..73d4ea6ba8f5 100644
--- a/src/libcamera/pipeline/rkisp1/rkisp1.cpp
+++ b/src/libcamera/pipeline/rkisp1/rkisp1.cpp
@@ -686,16 +686,11 @@  int PipelineHandlerRkISP1::allocateBuffers(Camera *camera)
 	unsigned int ipaBufferId = 1;
 	int ret;
 
-	unsigned int maxCount = std::max({
-		data->mainPathStream_.configuration().bufferCount,
-		data->selfPathStream_.configuration().bufferCount,
-	});
-
-	ret = param_->allocateBuffers(maxCount, &paramBuffers_);
+	ret = param_->allocateBuffers(RKISP1_BUFFER_COUNT, &paramBuffers_);
 	if (ret < 0)
 		goto error;
 
-	ret = stat_->allocateBuffers(maxCount, &statBuffers_);
+	ret = stat_->allocateBuffers(RKISP1_BUFFER_COUNT, &statBuffers_);
 	if (ret < 0)
 		goto error;
 
diff --git a/src/libcamera/pipeline/rkisp1/rkisp1_path.cpp b/src/libcamera/pipeline/rkisp1/rkisp1_path.cpp
index 25f482eb8d8e..200e3c2c4cca 100644
--- a/src/libcamera/pipeline/rkisp1/rkisp1_path.cpp
+++ b/src/libcamera/pipeline/rkisp1/rkisp1_path.cpp
@@ -61,7 +61,6 @@  StreamConfiguration RkISP1Path::generateConfiguration(const Size &resolution)
 	StreamConfiguration cfg(formats);
 	cfg.pixelFormat = formats::NV12;
 	cfg.size = maxResolution;
-	cfg.bufferCount = RKISP1_BUFFER_COUNT;
 
 	return cfg;
 }
@@ -77,7 +76,6 @@  CameraConfiguration::Status RkISP1Path::validate(StreamConfiguration *cfg)
 
 	cfg->size.boundTo(maxResolution_);
 	cfg->size.expandTo(minResolution_);
-	cfg->bufferCount = RKISP1_BUFFER_COUNT;
 
 	V4L2DeviceFormat format;
 	format.fourcc = video_->toV4L2PixelFormat(cfg->pixelFormat);
diff --git a/src/libcamera/pipeline/simple/converter.cpp b/src/libcamera/pipeline/simple/converter.cpp
index 68644ef6477f..54e7f1b051f7 100644
--- a/src/libcamera/pipeline/simple/converter.cpp
+++ b/src/libcamera/pipeline/simple/converter.cpp
@@ -86,9 +86,6 @@  int SimpleConverter::Stream::configure(const StreamConfiguration &inputCfg,
 		return -EINVAL;
 	}
 
-	inputBufferCount_ = inputCfg.bufferCount;
-	outputBufferCount_ = outputCfg.bufferCount;
-
 	return 0;
 }
 
@@ -100,11 +97,11 @@  int SimpleConverter::Stream::exportBuffers(unsigned int count,
 
 int SimpleConverter::Stream::start()
 {
-	int ret = m2m_->output()->importBuffers(inputBufferCount_);
+	int ret = m2m_->output()->importBuffers(SIMPLE_QUEUE_DEPTH);
 	if (ret < 0)
 		return ret;
 
-	ret = m2m_->capture()->importBuffers(outputBufferCount_);
+	ret = m2m_->capture()->importBuffers(SIMPLE_QUEUE_DEPTH);
 	if (ret < 0) {
 		stop();
 		return ret;
diff --git a/src/libcamera/pipeline/simple/converter.h b/src/libcamera/pipeline/simple/converter.h
index 480e528d2210..32313748cd24 100644
--- a/src/libcamera/pipeline/simple/converter.h
+++ b/src/libcamera/pipeline/simple/converter.h
@@ -29,6 +29,8 @@  class SizeRange;
 struct StreamConfiguration;
 class V4L2M2MDevice;
 
+constexpr unsigned int SIMPLE_QUEUE_DEPTH = 3;
+
 class SimpleConverter
 {
 public:
@@ -84,9 +86,6 @@  private:
 		SimpleConverter *converter_;
 		unsigned int index_;
 		std::unique_ptr<V4L2M2MDevice> m2m_;
-
-		unsigned int inputBufferCount_;
-		unsigned int outputBufferCount_;
 	};
 
 	std::string deviceNode_;
diff --git a/src/libcamera/pipeline/simple/simple.cpp b/src/libcamera/pipeline/simple/simple.cpp
index b9f14be6733f..9f28bb66e2e7 100644
--- a/src/libcamera/pipeline/simple/simple.cpp
+++ b/src/libcamera/pipeline/simple/simple.cpp
@@ -142,8 +142,6 @@  static const SimplePipelineInfo supportedDevices[] = {
 
 } /* namespace */
 
-static constexpr unsigned int kNumInternalBuffers = 3;
-
 class SimpleCameraData : public CameraData
 {
 public:
@@ -425,7 +423,7 @@  int SimpleCameraData::init()
 	}
 
 	properties_ = sensor_->properties();
-	properties_.set(properties::QueueDepth, kNumInternalBuffers);
+	properties_.set(properties::QueueDepth, SIMPLE_QUEUE_DEPTH);
 
 	return 0;
 }
@@ -616,7 +614,7 @@  CameraConfiguration::Status SimpleCameraConfiguration::validate()
 		    cfg.size != pipeConfig_->captureSize)
 			needConversion_ = true;
 
-		/* Set the stride, frameSize and bufferCount. */
+		/* Set the stride and frameSize. */
 		if (needConversion_) {
 			std::tie(cfg.stride, cfg.frameSize) =
 				converter->strideAndFrameSize(cfg.pixelFormat, cfg.size);
@@ -634,8 +632,6 @@  CameraConfiguration::Status SimpleCameraConfiguration::validate()
 			cfg.stride = format.planes[0].bpl;
 			cfg.frameSize = format.planes[0].size;
 		}
-
-		cfg.bufferCount = 3;
 	}
 
 	return status;
@@ -758,7 +754,6 @@  int SimplePipelineHandler::configure(Camera *camera, CameraConfiguration *c)
 	inputCfg.pixelFormat = pipeConfig->captureFormat;
 	inputCfg.size = pipeConfig->captureSize;
 	inputCfg.stride = captureFormat.planes[0].bpl;
-	inputCfg.bufferCount = kNumInternalBuffers;
 
 	return converter_->configure(inputCfg, outputCfgs);
 }
@@ -791,12 +786,10 @@  int SimplePipelineHandler::start(Camera *camera, [[maybe_unused]] const ControlL
 		 * When using the converter allocate a fixed number of internal
 		 * buffers.
 		 */
-		ret = video->allocateBuffers(kNumInternalBuffers,
+		ret = video->allocateBuffers(SIMPLE_QUEUE_DEPTH,
 					     &data->converterBuffers_);
 	} else {
-		/* Otherwise, prepare for using buffers from the only stream. */
-		Stream *stream = &data->streams_[0];
-		ret = video->importBuffers(stream->configuration().bufferCount);
+		ret = video->importBuffers(SIMPLE_QUEUE_DEPTH);
 	}
 	if (ret < 0)
 		return ret;
diff --git a/src/libcamera/pipeline/uvcvideo/uvcvideo.cpp b/src/libcamera/pipeline/uvcvideo/uvcvideo.cpp
index a148c35f1265..94e6fd9d2d56 100644
--- a/src/libcamera/pipeline/uvcvideo/uvcvideo.cpp
+++ b/src/libcamera/pipeline/uvcvideo/uvcvideo.cpp
@@ -148,8 +148,6 @@  CameraConfiguration::Status UVCCameraConfiguration::validate()
 		status = Adjusted;
 	}
 
-	cfg.bufferCount = 4;
-
 	V4L2DeviceFormat format;
 	format.fourcc = data_->video_->toV4L2PixelFormat(cfg.pixelFormat);
 	format.size = cfg.size;
@@ -191,7 +189,6 @@  CameraConfiguration *PipelineHandlerUVC::generateConfiguration(Camera *camera,
 
 	cfg.pixelFormat = formats.pixelformats().front();
 	cfg.size = formats.sizes(cfg.pixelFormat).back();
-	cfg.bufferCount = 4;
 
 	config->addConfiguration(cfg);
 
@@ -236,7 +233,7 @@  int PipelineHandlerUVC::exportFrameBuffers(Camera *camera,
 int PipelineHandlerUVC::start(Camera *camera, [[maybe_unused]] const ControlList *controls)
 {
 	UVCCameraData *data = cameraData(camera);
-	unsigned int count = data->stream_.configuration().bufferCount;
+	unsigned int count = data->properties_.get(properties::QueueDepth);
 
 	int ret = data->video_->importBuffers(count);
 	if (ret < 0)
diff --git a/src/libcamera/pipeline/vimc/vimc.cpp b/src/libcamera/pipeline/vimc/vimc.cpp
index 22d6fdcbb141..891571afb3e5 100644
--- a/src/libcamera/pipeline/vimc/vimc.cpp
+++ b/src/libcamera/pipeline/vimc/vimc.cpp
@@ -165,8 +165,6 @@  CameraConfiguration::Status VimcCameraConfiguration::validate()
 		status = Adjusted;
 	}
 
-	cfg.bufferCount = 4;
-
 	V4L2DeviceFormat format;
 	format.fourcc = data_->video_->toV4L2PixelFormat(cfg.pixelFormat);
 	format.size = cfg.size;
@@ -222,7 +220,6 @@  CameraConfiguration *PipelineHandlerVimc::generateConfiguration(Camera *camera,
 
 	cfg.pixelFormat = formats::BGR888;
 	cfg.size = { 1920, 1080 };
-	cfg.bufferCount = 4;
 
 	config->addConfiguration(cfg);
 
@@ -312,7 +309,7 @@  int PipelineHandlerVimc::exportFrameBuffers(Camera *camera,
 int PipelineHandlerVimc::start(Camera *camera, [[maybe_unused]] const ControlList *controls)
 {
 	VimcCameraData *data = cameraData(camera);
-	unsigned int count = data->stream_.configuration().bufferCount;
+	unsigned int count = data->properties_.get(properties::QueueDepth);
 
 	int ret = data->video_->importBuffers(count);
 	if (ret < 0)
diff --git a/src/libcamera/stream.cpp b/src/libcamera/stream.cpp
index f7bafcf8fc97..be57abce4eb3 100644
--- a/src/libcamera/stream.cpp
+++ b/src/libcamera/stream.cpp
@@ -279,7 +279,7 @@  SizeRange StreamFormats::range(const PixelFormat &pixelformat) const
  * handlers provide StreamFormats.
  */
 StreamConfiguration::StreamConfiguration()
-	: pixelFormat(0), stride(0), frameSize(0), bufferCount(0),
+	: pixelFormat(0), stride(0), frameSize(0),
 	  stream_(nullptr)
 {
 }
@@ -288,7 +288,7 @@  StreamConfiguration::StreamConfiguration()
  * \brief Construct a configuration with stream formats
  */
 StreamConfiguration::StreamConfiguration(const StreamFormats &formats)
-	: pixelFormat(0), stride(0), frameSize(0), bufferCount(0),
+	: pixelFormat(0), stride(0), frameSize(0),
 	  stream_(nullptr), formats_(formats)
 {
 }
@@ -323,11 +323,6 @@  StreamConfiguration::StreamConfiguration(const StreamFormats &formats)
  * validating the configuration with a call to CameraConfiguration::validate().
  */
 
-/**
- * \var StreamConfiguration::bufferCount
- * \brief Requested number of buffers to allocate for the stream
- */
-
 /**
  * \fn StreamConfiguration::stream()
  * \brief Retrieve the stream associated with the configuration
diff --git a/src/v4l2/v4l2_camera.cpp b/src/v4l2/v4l2_camera.cpp
index 53d97f3e6b86..79bf7aec7782 100644
--- a/src/v4l2/v4l2_camera.cpp
+++ b/src/v4l2/v4l2_camera.cpp
@@ -10,6 +10,8 @@ 
 #include <errno.h>
 #include <unistd.h>
 
+#include <libcamera/property_ids.h>
+
 #include "libcamera/internal/log.h"
 
 using namespace libcamera;
@@ -107,14 +109,12 @@  void V4L2Camera::requestComplete(Request *request)
 }
 
 int V4L2Camera::configure(StreamConfiguration *streamConfigOut,
-			  const Size &size, const PixelFormat &pixelformat,
-			  unsigned int bufferCount)
+			  const Size &size, const PixelFormat &pixelformat)
 {
 	StreamConfiguration &streamConfig = config_->at(0);
 	streamConfig.size.width = size.width;
 	streamConfig.size.height = size.height;
 	streamConfig.pixelFormat = pixelformat;
-	streamConfig.bufferCount = bufferCount;
 	/* \todo memoryType (interval vs external) */
 
 	CameraConfiguration::Status validation = config_->validate();
@@ -146,7 +146,6 @@  int V4L2Camera::validateConfiguration(const PixelFormat &pixelFormat,
 	StreamConfiguration &cfg = config->at(0);
 	cfg.size = size;
 	cfg.pixelFormat = pixelFormat;
-	cfg.bufferCount = 1;
 
 	CameraConfiguration::Status validation = config->validate();
 	if (validation == CameraConfiguration::Invalid)
@@ -299,3 +298,10 @@  bool V4L2Camera::isRunning()
 {
 	return isRunning_;
 }
+
+int V4L2Camera::getBufCount(int count)
+{
+	int min = camera_->properties().get(properties::QueueDepth);
+
+	return std::max(count, min);
+}
diff --git a/src/v4l2/v4l2_camera.h b/src/v4l2/v4l2_camera.h
index d238046250e3..68df8ad05917 100644
--- a/src/v4l2/v4l2_camera.h
+++ b/src/v4l2/v4l2_camera.h
@@ -45,8 +45,7 @@  public:
 	std::vector<Buffer> completedBuffers();
 
 	int configure(StreamConfiguration *streamConfigOut,
-		      const Size &size, const PixelFormat &pixelformat,
-		      unsigned int bufferCount);
+		      const Size &size, const PixelFormat &pixelformat);
 	int validateConfiguration(const PixelFormat &pixelformat,
 				  const Size &size,
 				  StreamConfiguration *streamConfigOut);
@@ -65,6 +64,8 @@  public:
 
 	bool isRunning();
 
+	int getBufCount(int count);
+
 private:
 	void requestComplete(Request *request);
 
diff --git a/src/v4l2/v4l2_camera_proxy.cpp b/src/v4l2/v4l2_camera_proxy.cpp
index f8bfe595e90e..cd32e44a01ad 100644
--- a/src/v4l2/v4l2_camera_proxy.cpp
+++ b/src/v4l2/v4l2_camera_proxy.cpp
@@ -348,8 +348,7 @@  int V4L2CameraProxy::vidioc_s_fmt(V4L2CameraFile *file, struct v4l2_format *arg)
 	Size size(arg->fmt.pix.width, arg->fmt.pix.height);
 	V4L2PixelFormat v4l2Format = V4L2PixelFormat(arg->fmt.pix.pixelformat);
 	ret = vcam_->configure(&streamConfig_, size,
-			       PixelFormatInfo::info(v4l2Format).format,
-			       bufferCount_);
+			       PixelFormatInfo::info(v4l2Format).format);
 	if (ret < 0)
 		return -EINVAL;
 
@@ -490,14 +489,13 @@  int V4L2CameraProxy::vidioc_reqbufs(V4L2CameraFile *file, struct v4l2_requestbuf
 	Size size(v4l2PixFormat_.width, v4l2PixFormat_.height);
 	V4L2PixelFormat v4l2Format = V4L2PixelFormat(v4l2PixFormat_.pixelformat);
 	int ret = vcam_->configure(&streamConfig_, size,
-				   PixelFormatInfo::info(v4l2Format).format,
-				   arg->count);
+				   PixelFormatInfo::info(v4l2Format).format);
 	if (ret < 0)
 		return -EINVAL;
 
 	setFmtFromConfig(streamConfig_);
 
-	arg->count = streamConfig_.bufferCount;
+	arg->count = vcam_->getBufCount(arg->count);
 	bufferCount_ = arg->count;
 
 	ret = vcam_->allocBuffers(arg->count);
diff --git a/test/camera/buffer_import.cpp b/test/camera/buffer_import.cpp
index 61f4eb92ae95..f2c38bfb0b72 100644
--- a/test/camera/buffer_import.cpp
+++ b/test/camera/buffer_import.cpp
@@ -12,6 +12,8 @@ 
 #include <numeric>
 #include <vector>
 
+#include <libcamera/property_ids.h>
+
 #include "libcamera/internal/device_enumerator.h"
 #include "libcamera/internal/event_dispatcher.h"
 #include "libcamera/internal/media_device.h"
@@ -91,10 +93,12 @@  protected:
 			return TestFail;
 		}
 
+		unsigned int bufCount = camera_->properties().get(properties::QueueDepth);
+
 		Stream *stream = cfg.stream();
 
 		BufferSource source;
-		int ret = source.allocate(cfg);
+		int ret = source.allocate(cfg, bufCount);
 		if (ret != TestPass)
 			return ret;
 
@@ -138,10 +142,10 @@  protected:
 		while (timer.isRunning())
 			dispatcher->processEvents();
 
-		if (completeRequestsCount_ < cfg.bufferCount * 2) {
+		if (completeRequestsCount_ < bufCount * 2) {
 			std::cout << "Failed to capture enough frames (got "
 				  << completeRequestsCount_ << " expected at least "
-				  << cfg.bufferCount * 2 << ")" << std::endl;
+				  << bufCount * 2 << ")" << std::endl;
 			return TestFail;
 		}
 
diff --git a/test/libtest/buffer_source.cpp b/test/libtest/buffer_source.cpp
index 73563f2fc39d..c3d5286a2462 100644
--- a/test/libtest/buffer_source.cpp
+++ b/test/libtest/buffer_source.cpp
@@ -24,7 +24,7 @@  BufferSource::~BufferSource()
 		media_->release();
 }
 
-int BufferSource::allocate(const StreamConfiguration &config)
+int BufferSource::allocate(const StreamConfiguration &config, unsigned int count)
 {
 	/* Locate and open the video device. */
 	std::string videoDeviceName = "vivid-000-vid-out";
@@ -77,7 +77,7 @@  int BufferSource::allocate(const StreamConfiguration &config)
 		return TestFail;
 	}
 
-	if (video->allocateBuffers(config.bufferCount, &buffers_) < 0) {
+	if (video->allocateBuffers(count, &buffers_) < 0) {
 		std::cout << "Failed to allocate buffers" << std::endl;
 		return TestFail;
 	}
diff --git a/test/libtest/buffer_source.h b/test/libtest/buffer_source.h
index 14b4770e8d8a..6a18e269a575 100644
--- a/test/libtest/buffer_source.h
+++ b/test/libtest/buffer_source.h
@@ -20,7 +20,7 @@  public:
 	BufferSource();
 	~BufferSource();
 
-	int allocate(const StreamConfiguration &config);
+	int allocate(const StreamConfiguration &config, unsigned int count);
 	const std::vector<std::unique_ptr<FrameBuffer>> &buffers();
 
 private:
diff --git a/test/v4l2_videodevice/buffer_cache.cpp b/test/v4l2_videodevice/buffer_cache.cpp
index b3f2bec11783..07fddfd2617c 100644
--- a/test/v4l2_videodevice/buffer_cache.cpp
+++ b/test/v4l2_videodevice/buffer_cache.cpp
@@ -10,6 +10,7 @@ 
 #include <vector>
 
 #include <libcamera/formats.h>
+#include <libcamera/property_ids.h>
 #include <libcamera/stream.h>
 
 #include "buffer_source.h"
@@ -145,10 +146,9 @@  public:
 		StreamConfiguration cfg;
 		cfg.pixelFormat = formats::YUYV;
 		cfg.size = Size(600, 800);
-		cfg.bufferCount = numBuffers;
 
 		BufferSource source;
-		int ret = source.allocate(cfg);
+		int ret = source.allocate(cfg, numBuffers);
 		if (ret != TestPass)
 			return ret;