[libcamera-devel] libcamera: pipeline: raspberrypi: Add StreamFormats to StreamConfiguration

Message ID 20200610142640.576529-1-naush@raspberrypi.com
State Superseded
Headers show
Series
  • [libcamera-devel] libcamera: pipeline: raspberrypi: Add StreamFormats to StreamConfiguration
Related show

Commit Message

Naushir Patuck June 10, 2020, 2:26 p.m. UTC
In generateConfiguration(), add the device node specific formats to the
StreamConfiguration for each StreamRole requested.

Signed-off-by: Naushir Patuck <naush@raspberrypi.com>
---
 .../pipeline/raspberrypi/raspberrypi.cpp      | 52 +++++++++++++------
 1 file changed, 36 insertions(+), 16 deletions(-)

Comments

Naushir Patuck June 18, 2020, 7:31 a.m. UTC | #1
Hi all,

Gentle ping to see if anyone can review this when you get a chance.

Thanks,

Naush

On Wed, 10 Jun 2020 at 15:26, Naushir Patuck <naush@raspberrypi.com> wrote:
>
> In generateConfiguration(), add the device node specific formats to the
> StreamConfiguration for each StreamRole requested.
>
> Signed-off-by: Naushir Patuck <naush@raspberrypi.com>
> ---
>  .../pipeline/raspberrypi/raspberrypi.cpp      | 52 +++++++++++++------
>  1 file changed, 36 insertions(+), 16 deletions(-)
>
> diff --git a/src/libcamera/pipeline/raspberrypi/raspberrypi.cpp b/src/libcamera/pipeline/raspberrypi/raspberrypi.cpp
> index e16a9c7f..03a1e641 100644
> --- a/src/libcamera/pipeline/raspberrypi/raspberrypi.cpp
> +++ b/src/libcamera/pipeline/raspberrypi/raspberrypi.cpp
> @@ -518,41 +518,45 @@ CameraConfiguration *PipelineHandlerRPi::generateConfiguration(Camera *camera,
>         RPiCameraData *data = cameraData(camera);
>         CameraConfiguration *config = new RPiCameraConfiguration(data);
>         V4L2DeviceFormat sensorFormat;
> +       unsigned int bufferCount;
> +       PixelFormat pixelFormat;
>         V4L2PixFmtMap fmts;
> +       Size size;
>
>         if (roles.empty())
>                 return config;
>
>         for (const StreamRole role : roles) {
> -               StreamConfiguration cfg{};
> -
>                 switch (role) {
>                 case StreamRole::StillCaptureRaw:
> -                       cfg.size = data->sensor_->resolution();
> +                       size = data->sensor_->resolution();
>                         fmts = data->unicam_[Unicam::Image].dev()->formats();
> -                       sensorFormat = findBestMode(fmts, cfg.size);
> -                       cfg.pixelFormat = sensorFormat.fourcc.toPixelFormat();
> -                       ASSERT(cfg.pixelFormat.isValid());
> -                       cfg.bufferCount = 1;
> +                       sensorFormat = findBestMode(fmts, size);
> +                       pixelFormat = sensorFormat.fourcc.toPixelFormat();
> +                       ASSERT(pixelFormat.isValid());
> +                       bufferCount = 1;
>                         break;
>
>                 case StreamRole::StillCapture:
> -                       cfg.pixelFormat = PixelFormat(DRM_FORMAT_NV12);
> +                       fmts = data->isp_[Isp::Output0].dev()->formats();
> +                       pixelFormat = PixelFormat(DRM_FORMAT_NV12);
>                         /* Return the largest sensor resolution. */
> -                       cfg.size = data->sensor_->resolution();
> -                       cfg.bufferCount = 1;
> +                       size = data->sensor_->resolution();
> +                       bufferCount = 1;
>                         break;
>
>                 case StreamRole::VideoRecording:
> -                       cfg.pixelFormat = PixelFormat(DRM_FORMAT_NV12);
> -                       cfg.size = { 1920, 1080 };
> -                       cfg.bufferCount = 4;
> +                       fmts = data->isp_[Isp::Output0].dev()->formats();
> +                       pixelFormat = PixelFormat(DRM_FORMAT_NV12);
> +                       size = { 1920, 1080 };
> +                       bufferCount = 4;
>                         break;
>
>                 case StreamRole::Viewfinder:
> -                       cfg.pixelFormat = PixelFormat(DRM_FORMAT_ARGB8888);
> -                       cfg.size = { 800, 600 };
> -                       cfg.bufferCount = 4;
> +                       fmts = data->isp_[Isp::Output0].dev()->formats();
> +                       pixelFormat = PixelFormat(DRM_FORMAT_ARGB8888);
> +                       size = { 800, 600 };
> +                       bufferCount = 4;
>                         break;
>
>                 default:
> @@ -561,6 +565,22 @@ CameraConfiguration *PipelineHandlerRPi::generateConfiguration(Camera *camera,
>                         break;
>                 }
>
> +               /* Translate the V4L2PixelFormat to PixelFormat. */
> +               std::map<PixelFormat, std::vector<SizeRange>> deviceFormats;
> +               std::transform(fmts.begin(), fmts.end(), std::inserter(deviceFormats, deviceFormats.begin()),
> +                              [&](const decltype(fmts)::value_type &format) {
> +                                       return decltype(deviceFormats)::value_type{
> +                                               format.first.toPixelFormat(),
> +                                               format.second
> +                                       };
> +                              });
> +
> +               /* Add the stream format based on the device node used for the use case. */
> +               StreamFormats formats(deviceFormats);
> +               StreamConfiguration cfg(formats);
> +               cfg.size = size;
> +               cfg.pixelFormat = pixelFormat;
> +               cfg.bufferCount = bufferCount;
>                 config->addConfiguration(cfg);
>         }
>
> --
> 2.25.1
>
Jacopo Mondi June 18, 2020, 8:41 a.m. UTC | #2
Hi Naush,
   sorry for the delay, this fell through the cracks

On Wed, Jun 10, 2020 at 03:26:40PM +0100, Naushir Patuck wrote:
> In generateConfiguration(), add the device node specific formats to the
> StreamConfiguration for each StreamRole requested.
>
> Signed-off-by: Naushir Patuck <naush@raspberrypi.com>
> ---
>  .../pipeline/raspberrypi/raspberrypi.cpp      | 52 +++++++++++++------
>  1 file changed, 36 insertions(+), 16 deletions(-)
>
> diff --git a/src/libcamera/pipeline/raspberrypi/raspberrypi.cpp b/src/libcamera/pipeline/raspberrypi/raspberrypi.cpp
> index e16a9c7f..03a1e641 100644
> --- a/src/libcamera/pipeline/raspberrypi/raspberrypi.cpp
> +++ b/src/libcamera/pipeline/raspberrypi/raspberrypi.cpp
> @@ -518,41 +518,45 @@ CameraConfiguration *PipelineHandlerRPi::generateConfiguration(Camera *camera,
>  	RPiCameraData *data = cameraData(camera);
>  	CameraConfiguration *config = new RPiCameraConfiguration(data);
>  	V4L2DeviceFormat sensorFormat;
> +	unsigned int bufferCount;
> +	PixelFormat pixelFormat;
>  	V4L2PixFmtMap fmts;
> +	Size size;
>
>  	if (roles.empty())
>  		return config;
>
>  	for (const StreamRole role : roles) {
> -		StreamConfiguration cfg{};
> -
>  		switch (role) {
>  		case StreamRole::StillCaptureRaw:
> -			cfg.size = data->sensor_->resolution();
> +			size = data->sensor_->resolution();
>  			fmts = data->unicam_[Unicam::Image].dev()->formats();
> -			sensorFormat = findBestMode(fmts, cfg.size);
> -			cfg.pixelFormat = sensorFormat.fourcc.toPixelFormat();
> -			ASSERT(cfg.pixelFormat.isValid());
> -			cfg.bufferCount = 1;
> +			sensorFormat = findBestMode(fmts, size);
> +			pixelFormat = sensorFormat.fourcc.toPixelFormat();
> +			ASSERT(pixelFormat.isValid());
> +			bufferCount = 1;
>  			break;
>
>  		case StreamRole::StillCapture:
> -			cfg.pixelFormat = PixelFormat(DRM_FORMAT_NV12);
> +			fmts = data->isp_[Isp::Output0].dev()->formats();
> +			pixelFormat = PixelFormat(DRM_FORMAT_NV12);
>  			/* Return the largest sensor resolution. */
> -			cfg.size = data->sensor_->resolution();
> -			cfg.bufferCount = 1;
> +			size = data->sensor_->resolution();
> +			bufferCount = 1;
>  			break;
>
>  		case StreamRole::VideoRecording:
> -			cfg.pixelFormat = PixelFormat(DRM_FORMAT_NV12);
> -			cfg.size = { 1920, 1080 };
> -			cfg.bufferCount = 4;
> +			fmts = data->isp_[Isp::Output0].dev()->formats();
> +			pixelFormat = PixelFormat(DRM_FORMAT_NV12);
> +			size = { 1920, 1080 };
> +			bufferCount = 4;
>  			break;
>
>  		case StreamRole::Viewfinder:
> -			cfg.pixelFormat = PixelFormat(DRM_FORMAT_ARGB8888);
> -			cfg.size = { 800, 600 };
> -			cfg.bufferCount = 4;
> +			fmts = data->isp_[Isp::Output0].dev()->formats();
> +			pixelFormat = PixelFormat(DRM_FORMAT_ARGB8888);
> +			size = { 800, 600 };
> +			bufferCount = 4;
>  			break;
>
>  		default:
> @@ -561,6 +565,22 @@ CameraConfiguration *PipelineHandlerRPi::generateConfiguration(Camera *camera,
>  			break;
>  		}
>
> +		/* Translate the V4L2PixelFormat to PixelFormat. */
> +		std::map<PixelFormat, std::vector<SizeRange>> deviceFormats;
> +		std::transform(fmts.begin(), fmts.end(), std::inserter(deviceFormats, deviceFormats.begin()),
> +			       [&](const decltype(fmts)::value_type &format) {
> +					return decltype(deviceFormats)::value_type{
> +						format.first.toPixelFormat(),
> +						format.second
> +					};
> +			       });

Really took me a while to parse this, as I was not expecting to see
std::inserter() but just deviceFormats.begin() there, but then I
learned about inserter, and indeed if I remove std::inserter() I get
an error due to the fact the copy operator of std::pair is deleted.

> +
> +		/* Add the stream format based on the device node used for the use case. */
> +		StreamFormats formats(deviceFormats);
> +		StreamConfiguration cfg(formats);
> +		cfg.size = size;
> +		cfg.pixelFormat = pixelFormat;
> +		cfg.bufferCount = bufferCount;

Patch looks good to me

Reviewed-by: Jacopo Mondi <jacopo@jmondi.org>

Thanks
  j

>  		config->addConfiguration(cfg);
>  	}
>
> --
> 2.25.1
>
> _______________________________________________
> libcamera-devel mailing list
> libcamera-devel@lists.libcamera.org
> https://lists.libcamera.org/listinfo/libcamera-devel
Kieran Bingham June 18, 2020, 9:03 a.m. UTC | #3
Hi Naush, Jacopo,

On 18/06/2020 09:41, Jacopo Mondi wrote:
> Hi Naush,
>    sorry for the delay, this fell through the cracks
> 
> On Wed, Jun 10, 2020 at 03:26:40PM +0100, Naushir Patuck wrote:
>> In generateConfiguration(), add the device node specific formats to the
>> StreamConfiguration for each StreamRole requested.
>>
>> Signed-off-by: Naushir Patuck <naush@raspberrypi.com>
>> ---
>>  .../pipeline/raspberrypi/raspberrypi.cpp      | 52 +++++++++++++------
>>  1 file changed, 36 insertions(+), 16 deletions(-)
>>
>> diff --git a/src/libcamera/pipeline/raspberrypi/raspberrypi.cpp b/src/libcamera/pipeline/raspberrypi/raspberrypi.cpp
>> index e16a9c7f..03a1e641 100644
>> --- a/src/libcamera/pipeline/raspberrypi/raspberrypi.cpp
>> +++ b/src/libcamera/pipeline/raspberrypi/raspberrypi.cpp
>> @@ -518,41 +518,45 @@ CameraConfiguration *PipelineHandlerRPi::generateConfiguration(Camera *camera,
>>  	RPiCameraData *data = cameraData(camera);
>>  	CameraConfiguration *config = new RPiCameraConfiguration(data);
>>  	V4L2DeviceFormat sensorFormat;
>> +	unsigned int bufferCount;
>> +	PixelFormat pixelFormat;
>>  	V4L2PixFmtMap fmts;
>> +	Size size;
>>
>>  	if (roles.empty())
>>  		return config;
>>
>>  	for (const StreamRole role : roles) {
>> -		StreamConfiguration cfg{};
>> -
>>  		switch (role) {
>>  		case StreamRole::StillCaptureRaw:
>> -			cfg.size = data->sensor_->resolution();
>> +			size = data->sensor_->resolution();
>>  			fmts = data->unicam_[Unicam::Image].dev()->formats();
>> -			sensorFormat = findBestMode(fmts, cfg.size);
>> -			cfg.pixelFormat = sensorFormat.fourcc.toPixelFormat();
>> -			ASSERT(cfg.pixelFormat.isValid());
>> -			cfg.bufferCount = 1;
>> +			sensorFormat = findBestMode(fmts, size);
>> +			pixelFormat = sensorFormat.fourcc.toPixelFormat();
>> +			ASSERT(pixelFormat.isValid());
>> +			bufferCount = 1;
>>  			break;
>>
>>  		case StreamRole::StillCapture:
>> -			cfg.pixelFormat = PixelFormat(DRM_FORMAT_NV12);
>> +			fmts = data->isp_[Isp::Output0].dev()->formats();
>> +			pixelFormat = PixelFormat(DRM_FORMAT_NV12);
>>  			/* Return the largest sensor resolution. */
>> -			cfg.size = data->sensor_->resolution();
>> -			cfg.bufferCount = 1;
>> +			size = data->sensor_->resolution();
>> +			bufferCount = 1;
>>  			break;
>>
>>  		case StreamRole::VideoRecording:
>> -			cfg.pixelFormat = PixelFormat(DRM_FORMAT_NV12);
>> -			cfg.size = { 1920, 1080 };
>> -			cfg.bufferCount = 4;
>> +			fmts = data->isp_[Isp::Output0].dev()->formats();
>> +			pixelFormat = PixelFormat(DRM_FORMAT_NV12);
>> +			size = { 1920, 1080 };
>> +			bufferCount = 4;
>>  			break;
>>
>>  		case StreamRole::Viewfinder:
>> -			cfg.pixelFormat = PixelFormat(DRM_FORMAT_ARGB8888);
>> -			cfg.size = { 800, 600 };
>> -			cfg.bufferCount = 4;
>> +			fmts = data->isp_[Isp::Output0].dev()->formats();
>> +			pixelFormat = PixelFormat(DRM_FORMAT_ARGB8888);
>> +			size = { 800, 600 };
>> +			bufferCount = 4;
>>  			break;
>>
>>  		default:
>> @@ -561,6 +565,22 @@ CameraConfiguration *PipelineHandlerRPi::generateConfiguration(Camera *camera,
>>  			break;
>>  		}
>>
>> +		/* Translate the V4L2PixelFormat to PixelFormat. */
>> +		std::map<PixelFormat, std::vector<SizeRange>> deviceFormats;
>> +		std::transform(fmts.begin(), fmts.end(), std::inserter(deviceFormats, deviceFormats.begin()),
>> +			       [&](const decltype(fmts)::value_type &format) {
>> +					return decltype(deviceFormats)::value_type{
>> +						format.first.toPixelFormat(),
>> +						format.second
>> +					};
>> +			       });
> 
> Really took me a while to parse this, as I was not expecting to see
> std::inserter() but just deviceFormats.begin() there, but then I
> learned about inserter, and indeed if I remove std::inserter() I get
> an error due to the fact the copy operator of std::pair is deleted.

I think that's the same as the conversion routine in the UVC pipeline
handler (which I think came from Laurent).

Not for this patch (I think this can go in as is) but we should really
make a helper for that conversion as it's likely to be used in multiple
places, and it is quite hard to parse on it's own ;-(.

However, I think that overlaps with changes that you (Jacopo) were
working on with ImageFormats anyway.

Eitherway, this patch will help support more formats and enumeration on
the RPi pipeline handler:

Reviewed-by: Kieran Bingham <kieran.bingham@ideasonboard.com>


> 
>> +
>> +		/* Add the stream format based on the device node used for the use case. */
>> +		StreamFormats formats(deviceFormats);
>> +		StreamConfiguration cfg(formats);
>> +		cfg.size = size;
>> +		cfg.pixelFormat = pixelFormat;
>> +		cfg.bufferCount = bufferCount;
> 
> Patch looks good to me
> 
> Reviewed-by: Jacopo Mondi <jacopo@jmondi.org>
> 
> Thanks
>   j
> 
>>  		config->addConfiguration(cfg);
>>  	}
>>
>> --
>> 2.25.1
>>
>> _______________________________________________
>> libcamera-devel mailing list
>> libcamera-devel@lists.libcamera.org
>> https://lists.libcamera.org/listinfo/libcamera-devel
> _______________________________________________
> libcamera-devel mailing list
> libcamera-devel@lists.libcamera.org
> https://lists.libcamera.org/listinfo/libcamera-devel
>
Naushir Patuck June 18, 2020, 9:18 a.m. UTC | #4
Hi,

On Thu, 18 Jun 2020 at 10:03, Kieran Bingham
<kieran.bingham@ideasonboard.com> wrote:
>
> Hi Naush, Jacopo,
>
> On 18/06/2020 09:41, Jacopo Mondi wrote:
> > Hi Naush,
> >    sorry for the delay, this fell through the cracks
> >
> > On Wed, Jun 10, 2020 at 03:26:40PM +0100, Naushir Patuck wrote:
> >> In generateConfiguration(), add the device node specific formats to the
> >> StreamConfiguration for each StreamRole requested.
> >>
> >> Signed-off-by: Naushir Patuck <naush@raspberrypi.com>
> >> ---
> >>  .../pipeline/raspberrypi/raspberrypi.cpp      | 52 +++++++++++++------
> >>  1 file changed, 36 insertions(+), 16 deletions(-)
> >>
> >> diff --git a/src/libcamera/pipeline/raspberrypi/raspberrypi.cpp b/src/libcamera/pipeline/raspberrypi/raspberrypi.cpp
> >> index e16a9c7f..03a1e641 100644
> >> --- a/src/libcamera/pipeline/raspberrypi/raspberrypi.cpp
> >> +++ b/src/libcamera/pipeline/raspberrypi/raspberrypi.cpp
> >> @@ -518,41 +518,45 @@ CameraConfiguration *PipelineHandlerRPi::generateConfiguration(Camera *camera,
> >>      RPiCameraData *data = cameraData(camera);
> >>      CameraConfiguration *config = new RPiCameraConfiguration(data);
> >>      V4L2DeviceFormat sensorFormat;
> >> +    unsigned int bufferCount;
> >> +    PixelFormat pixelFormat;
> >>      V4L2PixFmtMap fmts;
> >> +    Size size;
> >>
> >>      if (roles.empty())
> >>              return config;
> >>
> >>      for (const StreamRole role : roles) {
> >> -            StreamConfiguration cfg{};
> >> -
> >>              switch (role) {
> >>              case StreamRole::StillCaptureRaw:
> >> -                    cfg.size = data->sensor_->resolution();
> >> +                    size = data->sensor_->resolution();
> >>                      fmts = data->unicam_[Unicam::Image].dev()->formats();
> >> -                    sensorFormat = findBestMode(fmts, cfg.size);
> >> -                    cfg.pixelFormat = sensorFormat.fourcc.toPixelFormat();
> >> -                    ASSERT(cfg.pixelFormat.isValid());
> >> -                    cfg.bufferCount = 1;
> >> +                    sensorFormat = findBestMode(fmts, size);
> >> +                    pixelFormat = sensorFormat.fourcc.toPixelFormat();
> >> +                    ASSERT(pixelFormat.isValid());
> >> +                    bufferCount = 1;
> >>                      break;
> >>
> >>              case StreamRole::StillCapture:
> >> -                    cfg.pixelFormat = PixelFormat(DRM_FORMAT_NV12);
> >> +                    fmts = data->isp_[Isp::Output0].dev()->formats();
> >> +                    pixelFormat = PixelFormat(DRM_FORMAT_NV12);
> >>                      /* Return the largest sensor resolution. */
> >> -                    cfg.size = data->sensor_->resolution();
> >> -                    cfg.bufferCount = 1;
> >> +                    size = data->sensor_->resolution();
> >> +                    bufferCount = 1;
> >>                      break;
> >>
> >>              case StreamRole::VideoRecording:
> >> -                    cfg.pixelFormat = PixelFormat(DRM_FORMAT_NV12);
> >> -                    cfg.size = { 1920, 1080 };
> >> -                    cfg.bufferCount = 4;
> >> +                    fmts = data->isp_[Isp::Output0].dev()->formats();
> >> +                    pixelFormat = PixelFormat(DRM_FORMAT_NV12);
> >> +                    size = { 1920, 1080 };
> >> +                    bufferCount = 4;
> >>                      break;
> >>
> >>              case StreamRole::Viewfinder:
> >> -                    cfg.pixelFormat = PixelFormat(DRM_FORMAT_ARGB8888);
> >> -                    cfg.size = { 800, 600 };
> >> -                    cfg.bufferCount = 4;
> >> +                    fmts = data->isp_[Isp::Output0].dev()->formats();
> >> +                    pixelFormat = PixelFormat(DRM_FORMAT_ARGB8888);
> >> +                    size = { 800, 600 };
> >> +                    bufferCount = 4;
> >>                      break;
> >>
> >>              default:
> >> @@ -561,6 +565,22 @@ CameraConfiguration *PipelineHandlerRPi::generateConfiguration(Camera *camera,
> >>                      break;
> >>              }
> >>
> >> +            /* Translate the V4L2PixelFormat to PixelFormat. */
> >> +            std::map<PixelFormat, std::vector<SizeRange>> deviceFormats;
> >> +            std::transform(fmts.begin(), fmts.end(), std::inserter(deviceFormats, deviceFormats.begin()),
> >> +                           [&](const decltype(fmts)::value_type &format) {
> >> +                                    return decltype(deviceFormats)::value_type{
> >> +                                            format.first.toPixelFormat(),
> >> +                                            format.second
> >> +                                    };
> >> +                           });
> >
> > Really took me a while to parse this, as I was not expecting to see
> > std::inserter() but just deviceFormats.begin() there, but then I
> > learned about inserter, and indeed if I remove std::inserter() I get
> > an error due to the fact the copy operator of std::pair is deleted.
>
> I think that's the same as the conversion routine in the UVC pipeline
> handler (which I think came from Laurent).

Yes, I picked this from the uvc pipeline handler.  Took me some goes
to understand it as well :)

>
> Not for this patch (I think this can go in as is) but we should really
> make a helper for that conversion as it's likely to be used in multiple
> places, and it is quite hard to parse on it's own ;-(.

That would make sense.  All pipeline handlers will require this
conversion, I think!

>
> However, I think that overlaps with changes that you (Jacopo) were
> working on with ImageFormats anyway.
>
> Eitherway, this patch will help support more formats and enumeration on
> the RPi pipeline handler:
>
> Reviewed-by: Kieran Bingham <kieran.bingham@ideasonboard.com>
>
>
> >
> >> +
> >> +            /* Add the stream format based on the device node used for the use case. */
> >> +            StreamFormats formats(deviceFormats);
> >> +            StreamConfiguration cfg(formats);
> >> +            cfg.size = size;
> >> +            cfg.pixelFormat = pixelFormat;
> >> +            cfg.bufferCount = bufferCount;
> >
> > Patch looks good to me
> >
> > Reviewed-by: Jacopo Mondi <jacopo@jmondi.org>
> >
> > Thanks
> >   j
> >
> >>              config->addConfiguration(cfg);
> >>      }
> >>
> >> --
> >> 2.25.1
> >>
> >> _______________________________________________
> >> libcamera-devel mailing list
> >> libcamera-devel@lists.libcamera.org
> >> https://lists.libcamera.org/listinfo/libcamera-devel
> > _______________________________________________
> > libcamera-devel mailing list
> > libcamera-devel@lists.libcamera.org
> > https://lists.libcamera.org/listinfo/libcamera-devel
> >
>
> --
> Regards
> --
> Kieran

Regards,
Naush
Laurent Pinchart June 22, 2020, 4:58 a.m. UTC | #5
Hi Naush and everybody,

Thank you for the patch.

On Thu, Jun 18, 2020 at 10:18:38AM +0100, Naushir Patuck wrote:
> On Thu, 18 Jun 2020 at 10:03, Kieran Bingham wrote:
> > On 18/06/2020 09:41, Jacopo Mondi wrote:
> >> On Wed, Jun 10, 2020 at 03:26:40PM +0100, Naushir Patuck wrote:
> >>> In generateConfiguration(), add the device node specific formats to the
> >>> StreamConfiguration for each StreamRole requested.
> >>>
> >>> Signed-off-by: Naushir Patuck <naush@raspberrypi.com>
> >>> ---
> >>>  .../pipeline/raspberrypi/raspberrypi.cpp      | 52 +++++++++++++------
> >>>  1 file changed, 36 insertions(+), 16 deletions(-)
> >>>
> >>> diff --git a/src/libcamera/pipeline/raspberrypi/raspberrypi.cpp b/src/libcamera/pipeline/raspberrypi/raspberrypi.cpp
> >>> index e16a9c7f..03a1e641 100644
> >>> --- a/src/libcamera/pipeline/raspberrypi/raspberrypi.cpp
> >>> +++ b/src/libcamera/pipeline/raspberrypi/raspberrypi.cpp
> >>> @@ -518,41 +518,45 @@ CameraConfiguration *PipelineHandlerRPi::generateConfiguration(Camera *camera,
> >>>      RPiCameraData *data = cameraData(camera);
> >>>      CameraConfiguration *config = new RPiCameraConfiguration(data);
> >>>      V4L2DeviceFormat sensorFormat;
> >>> +    unsigned int bufferCount;
> >>> +    PixelFormat pixelFormat;
> >>>      V4L2PixFmtMap fmts;
> >>> +    Size size;
> >>>
> >>>      if (roles.empty())
> >>>              return config;
> >>>
> >>>      for (const StreamRole role : roles) {
> >>> -            StreamConfiguration cfg{};
> >>> -
> >>>              switch (role) {
> >>>              case StreamRole::StillCaptureRaw:
> >>> -                    cfg.size = data->sensor_->resolution();
> >>> +                    size = data->sensor_->resolution();
> >>>                      fmts = data->unicam_[Unicam::Image].dev()->formats();
> >>> -                    sensorFormat = findBestMode(fmts, cfg.size);
> >>> -                    cfg.pixelFormat = sensorFormat.fourcc.toPixelFormat();
> >>> -                    ASSERT(cfg.pixelFormat.isValid());
> >>> -                    cfg.bufferCount = 1;
> >>> +                    sensorFormat = findBestMode(fmts, size);
> >>> +                    pixelFormat = sensorFormat.fourcc.toPixelFormat();
> >>> +                    ASSERT(pixelFormat.isValid());
> >>> +                    bufferCount = 1;
> >>>                      break;
> >>>
> >>>              case StreamRole::StillCapture:
> >>> -                    cfg.pixelFormat = PixelFormat(DRM_FORMAT_NV12);
> >>> +                    fmts = data->isp_[Isp::Output0].dev()->formats();
> >>> +                    pixelFormat = PixelFormat(DRM_FORMAT_NV12);

This conflicts with recent rework of format handling, this line should
now be

+			pixelFormat = formats::NV12;

Same for the other formats below.

> >>>                      /* Return the largest sensor resolution. */
> >>> -                    cfg.size = data->sensor_->resolution();
> >>> -                    cfg.bufferCount = 1;
> >>> +                    size = data->sensor_->resolution();
> >>> +                    bufferCount = 1;
> >>>                      break;
> >>>
> >>>              case StreamRole::VideoRecording:
> >>> -                    cfg.pixelFormat = PixelFormat(DRM_FORMAT_NV12);
> >>> -                    cfg.size = { 1920, 1080 };
> >>> -                    cfg.bufferCount = 4;
> >>> +                    fmts = data->isp_[Isp::Output0].dev()->formats();
> >>> +                    pixelFormat = PixelFormat(DRM_FORMAT_NV12);
> >>> +                    size = { 1920, 1080 };
> >>> +                    bufferCount = 4;
> >>>                      break;
> >>>
> >>>              case StreamRole::Viewfinder:
> >>> -                    cfg.pixelFormat = PixelFormat(DRM_FORMAT_ARGB8888);
> >>> -                    cfg.size = { 800, 600 };
> >>> -                    cfg.bufferCount = 4;
> >>> +                    fmts = data->isp_[Isp::Output0].dev()->formats();
> >>> +                    pixelFormat = PixelFormat(DRM_FORMAT_ARGB8888);
> >>> +                    size = { 800, 600 };
> >>> +                    bufferCount = 4;
> >>>                      break;
> >>>
> >>>              default:
> >>> @@ -561,6 +565,22 @@ CameraConfiguration *PipelineHandlerRPi::generateConfiguration(Camera *camera,
> >>>                      break;
> >>>              }
> >>>
> >>> +            /* Translate the V4L2PixelFormat to PixelFormat. */
> >>> +            std::map<PixelFormat, std::vector<SizeRange>> deviceFormats;
> >>> +            std::transform(fmts.begin(), fmts.end(), std::inserter(deviceFormats, deviceFormats.begin()),
> >>> +                           [&](const decltype(fmts)::value_type &format) {
> >>> +                                    return decltype(deviceFormats)::value_type{
> >>> +                                            format.first.toPixelFormat(),
> >>> +                                            format.second
> >>> +                                    };
> >>> +                           });

For the unicam device this looks fine to me, as the driver will
give us formats and sizes that match the capabilities of the sensor. For
the ISP, the list of supported pixel formats is queried from the
firmware, filtered against the list of pixel formats supported by the
driver, and all should be fine. However, for the frame sizes, the ISP
driver reports hardcoded minimum and maximum values of 64 and 16384
respectively. Shouldn't we instead, in the pipeline handler, take the
capabilities of both the sensor and the ISP into account to create a
range of sizes that would be closer to what can actually be achieved ?

For UVC the situation is different, as the kernel directly reports the
list of formats and sizes supported by the device.

> >> Really took me a while to parse this, as I was not expecting to see
> >> std::inserter() but just deviceFormats.begin() there, but then I
> >> learned about inserter, and indeed if I remove std::inserter() I get
> >> an error due to the fact the copy operator of std::pair is deleted.
> >
> > I think that's the same as the conversion routine in the UVC pipeline
> > handler (which I think came from Laurent).
> 
> Yes, I picked this from the uvc pipeline handler.  Took me some goes
> to understand it as well :)
> 
> > Not for this patch (I think this can go in as is) but we should really
> > make a helper for that conversion as it's likely to be used in multiple
> > places, and it is quite hard to parse on it's own ;-(.
> 
> That would make sense.  All pipeline handlers will require this
> conversion, I think!
> 
> > However, I think that overlaps with changes that you (Jacopo) were
> > working on with ImageFormats anyway.

Yes, we know this API needs to be reworked, so I wouldn't spend time on
creating a helper right now, but would rather land the ImageFormats
series first, and then discuss how we should rework stream configuration
handling. The fact that this code should consider both the sensor and
the ISP, as explained above, also makes it more difficult to create a
single helper function.

> > Eitherway, this patch will help support more formats and enumeration on
> > the RPi pipeline handler:
> >
> > Reviewed-by: Kieran Bingham <kieran.bingham@ideasonboard.com>
> >
> >>> +
> >>> +            /* Add the stream format based on the device node used for the use case. */
> >>> +            StreamFormats formats(deviceFormats);
> >>> +            StreamConfiguration cfg(formats);
> >>> +            cfg.size = size;
> >>> +            cfg.pixelFormat = pixelFormat;
> >>> +            cfg.bufferCount = bufferCount;
> >>
> >> Patch looks good to me
> >>
> >> Reviewed-by: Jacopo Mondi <jacopo@jmondi.org>
> >>
> >>>              config->addConfiguration(cfg);
> >>>      }
> >>>
Naushir Patuck June 22, 2020, 10:25 a.m. UTC | #6
Hi Laurent,

Thank you for the review.

On Mon, 22 Jun 2020 at 05:59, Laurent Pinchart
<laurent.pinchart@ideasonboard.com> wrote:
>
> Hi Naush and everybody,
>
> Thank you for the patch.
>
> On Thu, Jun 18, 2020 at 10:18:38AM +0100, Naushir Patuck wrote:
> > On Thu, 18 Jun 2020 at 10:03, Kieran Bingham wrote:
> > > On 18/06/2020 09:41, Jacopo Mondi wrote:
> > >> On Wed, Jun 10, 2020 at 03:26:40PM +0100, Naushir Patuck wrote:
> > >>> In generateConfiguration(), add the device node specific formats to the
> > >>> StreamConfiguration for each StreamRole requested.
> > >>>
> > >>> Signed-off-by: Naushir Patuck <naush@raspberrypi.com>
> > >>> ---
> > >>>  .../pipeline/raspberrypi/raspberrypi.cpp      | 52 +++++++++++++------
> > >>>  1 file changed, 36 insertions(+), 16 deletions(-)
> > >>>
> > >>> diff --git a/src/libcamera/pipeline/raspberrypi/raspberrypi.cpp b/src/libcamera/pipeline/raspberrypi/raspberrypi.cpp
> > >>> index e16a9c7f..03a1e641 100644
> > >>> --- a/src/libcamera/pipeline/raspberrypi/raspberrypi.cpp
> > >>> +++ b/src/libcamera/pipeline/raspberrypi/raspberrypi.cpp
> > >>> @@ -518,41 +518,45 @@ CameraConfiguration *PipelineHandlerRPi::generateConfiguration(Camera *camera,
> > >>>      RPiCameraData *data = cameraData(camera);
> > >>>      CameraConfiguration *config = new RPiCameraConfiguration(data);
> > >>>      V4L2DeviceFormat sensorFormat;
> > >>> +    unsigned int bufferCount;
> > >>> +    PixelFormat pixelFormat;
> > >>>      V4L2PixFmtMap fmts;
> > >>> +    Size size;
> > >>>
> > >>>      if (roles.empty())
> > >>>              return config;
> > >>>
> > >>>      for (const StreamRole role : roles) {
> > >>> -            StreamConfiguration cfg{};
> > >>> -
> > >>>              switch (role) {
> > >>>              case StreamRole::StillCaptureRaw:
> > >>> -                    cfg.size = data->sensor_->resolution();
> > >>> +                    size = data->sensor_->resolution();
> > >>>                      fmts = data->unicam_[Unicam::Image].dev()->formats();
> > >>> -                    sensorFormat = findBestMode(fmts, cfg.size);
> > >>> -                    cfg.pixelFormat = sensorFormat.fourcc.toPixelFormat();
> > >>> -                    ASSERT(cfg.pixelFormat.isValid());
> > >>> -                    cfg.bufferCount = 1;
> > >>> +                    sensorFormat = findBestMode(fmts, size);
> > >>> +                    pixelFormat = sensorFormat.fourcc.toPixelFormat();
> > >>> +                    ASSERT(pixelFormat.isValid());
> > >>> +                    bufferCount = 1;
> > >>>                      break;
> > >>>
> > >>>              case StreamRole::StillCapture:
> > >>> -                    cfg.pixelFormat = PixelFormat(DRM_FORMAT_NV12);
> > >>> +                    fmts = data->isp_[Isp::Output0].dev()->formats();
> > >>> +                    pixelFormat = PixelFormat(DRM_FORMAT_NV12);
>
> This conflicts with recent rework of format handling, this line should
> now be
>
> +                       pixelFormat = formats::NV12;
>
> Same for the other formats below.

Will update in the next patch.

>
> > >>>                      /* Return the largest sensor resolution. */
> > >>> -                    cfg.size = data->sensor_->resolution();
> > >>> -                    cfg.bufferCount = 1;
> > >>> +                    size = data->sensor_->resolution();
> > >>> +                    bufferCount = 1;
> > >>>                      break;
> > >>>
> > >>>              case StreamRole::VideoRecording:
> > >>> -                    cfg.pixelFormat = PixelFormat(DRM_FORMAT_NV12);
> > >>> -                    cfg.size = { 1920, 1080 };
> > >>> -                    cfg.bufferCount = 4;
> > >>> +                    fmts = data->isp_[Isp::Output0].dev()->formats();
> > >>> +                    pixelFormat = PixelFormat(DRM_FORMAT_NV12);
> > >>> +                    size = { 1920, 1080 };
> > >>> +                    bufferCount = 4;
> > >>>                      break;
> > >>>
> > >>>              case StreamRole::Viewfinder:
> > >>> -                    cfg.pixelFormat = PixelFormat(DRM_FORMAT_ARGB8888);
> > >>> -                    cfg.size = { 800, 600 };
> > >>> -                    cfg.bufferCount = 4;
> > >>> +                    fmts = data->isp_[Isp::Output0].dev()->formats();
> > >>> +                    pixelFormat = PixelFormat(DRM_FORMAT_ARGB8888);
> > >>> +                    size = { 800, 600 };
> > >>> +                    bufferCount = 4;
> > >>>                      break;
> > >>>
> > >>>              default:
> > >>> @@ -561,6 +565,22 @@ CameraConfiguration *PipelineHandlerRPi::generateConfiguration(Camera *camera,
> > >>>                      break;
> > >>>              }
> > >>>
> > >>> +            /* Translate the V4L2PixelFormat to PixelFormat. */
> > >>> +            std::map<PixelFormat, std::vector<SizeRange>> deviceFormats;
> > >>> +            std::transform(fmts.begin(), fmts.end(), std::inserter(deviceFormats, deviceFormats.begin()),
> > >>> +                           [&](const decltype(fmts)::value_type &format) {
> > >>> +                                    return decltype(deviceFormats)::value_type{
> > >>> +                                            format.first.toPixelFormat(),
> > >>> +                                            format.second
> > >>> +                                    };
> > >>> +                           });
>
> For the unicam device this looks fine to me, as the driver will
> give us formats and sizes that match the capabilities of the sensor. For
> the ISP, the list of supported pixel formats is queried from the
> firmware, filtered against the list of pixel formats supported by the
> driver, and all should be fine. However, for the frame sizes, the ISP
> driver reports hardcoded minimum and maximum values of 64 and 16384
> respectively. Shouldn't we instead, in the pipeline handler, take the
> capabilities of both the sensor and the ISP into account to create a
> range of sizes that would be closer to what can actually be achieved ?

Just checking, so I should populate
StreamFormat::std::vector<SizeRange> with the device ranges?  What
about the StreamConfiguration::size field that I currently populate
with a single value, should I leave that blank/empty?

Regards,
Naush


>
> For UVC the situation is different, as the kernel directly reports the
> list of formats and sizes supported by the device.
>
> > >> Really took me a while to parse this, as I was not expecting to see
> > >> std::inserter() but just deviceFormats.begin() there, but then I
> > >> learned about inserter, and indeed if I remove std::inserter() I get
> > >> an error due to the fact the copy operator of std::pair is deleted.
> > >
> > > I think that's the same as the conversion routine in the UVC pipeline
> > > handler (which I think came from Laurent).
> >
> > Yes, I picked this from the uvc pipeline handler.  Took me some goes
> > to understand it as well :)
> >
> > > Not for this patch (I think this can go in as is) but we should really
> > > make a helper for that conversion as it's likely to be used in multiple
> > > places, and it is quite hard to parse on it's own ;-(.
> >
> > That would make sense.  All pipeline handlers will require this
> > conversion, I think!
> >
> > > However, I think that overlaps with changes that you (Jacopo) were
> > > working on with ImageFormats anyway.
>
> Yes, we know this API needs to be reworked, so I wouldn't spend time on
> creating a helper right now, but would rather land the ImageFormats
> series first, and then discuss how we should rework stream configuration
> handling. The fact that this code should consider both the sensor and
> the ISP, as explained above, also makes it more difficult to create a
> single helper function.
>
> > > Eitherway, this patch will help support more formats and enumeration on
> > > the RPi pipeline handler:
> > >
> > > Reviewed-by: Kieran Bingham <kieran.bingham@ideasonboard.com>
> > >
> > >>> +
> > >>> +            /* Add the stream format based on the device node used for the use case. */
> > >>> +            StreamFormats formats(deviceFormats);
> > >>> +            StreamConfiguration cfg(formats);
> > >>> +            cfg.size = size;
> > >>> +            cfg.pixelFormat = pixelFormat;
> > >>> +            cfg.bufferCount = bufferCount;
> > >>
> > >> Patch looks good to me
> > >>
> > >> Reviewed-by: Jacopo Mondi <jacopo@jmondi.org>
> > >>
> > >>>              config->addConfiguration(cfg);
> > >>>      }
> > >>>
>
> --
> Regards,
>
> Laurent Pinchart
Laurent Pinchart June 22, 2020, 11:46 p.m. UTC | #7
Hi Naush,

On Mon, Jun 22, 2020 at 11:25:47AM +0100, Naushir Patuck wrote:
> On Mon, 22 Jun 2020 at 05:59, Laurent Pinchart wrote:
> > On Thu, Jun 18, 2020 at 10:18:38AM +0100, Naushir Patuck wrote:
> >> On Thu, 18 Jun 2020 at 10:03, Kieran Bingham wrote:
> >>> On 18/06/2020 09:41, Jacopo Mondi wrote:
> >>>> On Wed, Jun 10, 2020 at 03:26:40PM +0100, Naushir Patuck wrote:
> >>>>> In generateConfiguration(), add the device node specific formats to the
> >>>>> StreamConfiguration for each StreamRole requested.
> >>>>>
> >>>>> Signed-off-by: Naushir Patuck <naush@raspberrypi.com>
> >>>>> ---
> >>>>>  .../pipeline/raspberrypi/raspberrypi.cpp      | 52 +++++++++++++------
> >>>>>  1 file changed, 36 insertions(+), 16 deletions(-)
> >>>>>
> >>>>> diff --git a/src/libcamera/pipeline/raspberrypi/raspberrypi.cpp b/src/libcamera/pipeline/raspberrypi/raspberrypi.cpp
> >>>>> index e16a9c7f..03a1e641 100644
> >>>>> --- a/src/libcamera/pipeline/raspberrypi/raspberrypi.cpp
> >>>>> +++ b/src/libcamera/pipeline/raspberrypi/raspberrypi.cpp
> >>>>> @@ -518,41 +518,45 @@ CameraConfiguration *PipelineHandlerRPi::generateConfiguration(Camera *camera,
> >>>>>      RPiCameraData *data = cameraData(camera);
> >>>>>      CameraConfiguration *config = new RPiCameraConfiguration(data);
> >>>>>      V4L2DeviceFormat sensorFormat;
> >>>>> +    unsigned int bufferCount;
> >>>>> +    PixelFormat pixelFormat;
> >>>>>      V4L2PixFmtMap fmts;
> >>>>> +    Size size;
> >>>>>
> >>>>>      if (roles.empty())
> >>>>>              return config;
> >>>>>
> >>>>>      for (const StreamRole role : roles) {
> >>>>> -            StreamConfiguration cfg{};
> >>>>> -
> >>>>>              switch (role) {
> >>>>>              case StreamRole::StillCaptureRaw:
> >>>>> -                    cfg.size = data->sensor_->resolution();
> >>>>> +                    size = data->sensor_->resolution();
> >>>>>                      fmts = data->unicam_[Unicam::Image].dev()->formats();
> >>>>> -                    sensorFormat = findBestMode(fmts, cfg.size);
> >>>>> -                    cfg.pixelFormat = sensorFormat.fourcc.toPixelFormat();
> >>>>> -                    ASSERT(cfg.pixelFormat.isValid());
> >>>>> -                    cfg.bufferCount = 1;
> >>>>> +                    sensorFormat = findBestMode(fmts, size);
> >>>>> +                    pixelFormat = sensorFormat.fourcc.toPixelFormat();
> >>>>> +                    ASSERT(pixelFormat.isValid());
> >>>>> +                    bufferCount = 1;
> >>>>>                      break;
> >>>>>
> >>>>>              case StreamRole::StillCapture:
> >>>>> -                    cfg.pixelFormat = PixelFormat(DRM_FORMAT_NV12);
> >>>>> +                    fmts = data->isp_[Isp::Output0].dev()->formats();
> >>>>> +                    pixelFormat = PixelFormat(DRM_FORMAT_NV12);
> >
> > This conflicts with recent rework of format handling, this line should
> > now be
> >
> > +                       pixelFormat = formats::NV12;
> >
> > Same for the other formats below.
> 
> Will update in the next patch.
> 
> >>>>>                      /* Return the largest sensor resolution. */
> >>>>> -                    cfg.size = data->sensor_->resolution();
> >>>>> -                    cfg.bufferCount = 1;
> >>>>> +                    size = data->sensor_->resolution();
> >>>>> +                    bufferCount = 1;
> >>>>>                      break;
> >>>>>
> >>>>>              case StreamRole::VideoRecording:
> >>>>> -                    cfg.pixelFormat = PixelFormat(DRM_FORMAT_NV12);
> >>>>> -                    cfg.size = { 1920, 1080 };
> >>>>> -                    cfg.bufferCount = 4;
> >>>>> +                    fmts = data->isp_[Isp::Output0].dev()->formats();
> >>>>> +                    pixelFormat = PixelFormat(DRM_FORMAT_NV12);
> >>>>> +                    size = { 1920, 1080 };
> >>>>> +                    bufferCount = 4;
> >>>>>                      break;
> >>>>>
> >>>>>              case StreamRole::Viewfinder:
> >>>>> -                    cfg.pixelFormat = PixelFormat(DRM_FORMAT_ARGB8888);
> >>>>> -                    cfg.size = { 800, 600 };
> >>>>> -                    cfg.bufferCount = 4;
> >>>>> +                    fmts = data->isp_[Isp::Output0].dev()->formats();
> >>>>> +                    pixelFormat = PixelFormat(DRM_FORMAT_ARGB8888);
> >>>>> +                    size = { 800, 600 };
> >>>>> +                    bufferCount = 4;
> >>>>>                      break;
> >>>>>
> >>>>>              default:
> >>>>> @@ -561,6 +565,22 @@ CameraConfiguration *PipelineHandlerRPi::generateConfiguration(Camera *camera,
> >>>>>                      break;
> >>>>>              }
> >>>>>
> >>>>> +            /* Translate the V4L2PixelFormat to PixelFormat. */
> >>>>> +            std::map<PixelFormat, std::vector<SizeRange>> deviceFormats;
> >>>>> +            std::transform(fmts.begin(), fmts.end(), std::inserter(deviceFormats, deviceFormats.begin()),
> >>>>> +                           [&](const decltype(fmts)::value_type &format) {
> >>>>> +                                    return decltype(deviceFormats)::value_type{
> >>>>> +                                            format.first.toPixelFormat(),
> >>>>> +                                            format.second
> >>>>> +                                    };
> >>>>> +                           });
> >
> > For the unicam device this looks fine to me, as the driver will
> > give us formats and sizes that match the capabilities of the sensor. For
> > the ISP, the list of supported pixel formats is queried from the
> > firmware, filtered against the list of pixel formats supported by the
> > driver, and all should be fine. However, for the frame sizes, the ISP
> > driver reports hardcoded minimum and maximum values of 64 and 16384
> > respectively. Shouldn't we instead, in the pipeline handler, take the
> > capabilities of both the sensor and the ISP into account to create a
> > range of sizes that would be closer to what can actually be achieved ?
> 
> Just checking, so I should populate
> StreamFormat::std::vector<SizeRange> with the device ranges?  What
> about the StreamConfiguration::size field that I currently populate
> with a single value, should I leave that blank/empty?

StreamConfigure::size is the recommended size for the stream.
StreamFormats is a map containing all the supported pixel formats, and
for each of them, the supported sizes. The sizes are expressed as a
vector of SizeRange, which must contain at least one entry.

You populate StreamConfigure::size correctly as far as I can tell,
there's no reason to change. The StreamFormats, however, should be
populated with what the camera can produce, and that should be
conditioned by the sizes supported by the sensor and the ISP
capabilities. You could, for instance, take all the sizes supported by
the sensor, and for each of them, create a SizeRange with the minimum
set to the sensor size downscaled as much as possible by the ISP, and
the maximum set to the sensor size.

> > For UVC the situation is different, as the kernel directly reports the
> > list of formats and sizes supported by the device.
> >
> >>>> Really took me a while to parse this, as I was not expecting to see
> >>>> std::inserter() but just deviceFormats.begin() there, but then I
> >>>> learned about inserter, and indeed if I remove std::inserter() I get
> >>>> an error due to the fact the copy operator of std::pair is deleted.
> >>>
> >>> I think that's the same as the conversion routine in the UVC pipeline
> >>> handler (which I think came from Laurent).
> >>
> >> Yes, I picked this from the uvc pipeline handler.  Took me some goes
> >> to understand it as well :)
> >>
> >>> Not for this patch (I think this can go in as is) but we should really
> >>> make a helper for that conversion as it's likely to be used in multiple
> >>> places, and it is quite hard to parse on it's own ;-(.
> >>
> >> That would make sense.  All pipeline handlers will require this
> >> conversion, I think!
> >>
> >>> However, I think that overlaps with changes that you (Jacopo) were
> >>> working on with ImageFormats anyway.
> >
> > Yes, we know this API needs to be reworked, so I wouldn't spend time on
> > creating a helper right now, but would rather land the ImageFormats
> > series first, and then discuss how we should rework stream configuration
> > handling. The fact that this code should consider both the sensor and
> > the ISP, as explained above, also makes it more difficult to create a
> > single helper function.
> >
> >>> Eitherway, this patch will help support more formats and enumeration on
> >>> the RPi pipeline handler:
> >>>
> >>> Reviewed-by: Kieran Bingham <kieran.bingham@ideasonboard.com>
> >>>
> >>>>> +
> >>>>> +            /* Add the stream format based on the device node used for the use case. */
> >>>>> +            StreamFormats formats(deviceFormats);
> >>>>> +            StreamConfiguration cfg(formats);
> >>>>> +            cfg.size = size;
> >>>>> +            cfg.pixelFormat = pixelFormat;
> >>>>> +            cfg.bufferCount = bufferCount;
> >>>>
> >>>> Patch looks good to me
> >>>>
> >>>> Reviewed-by: Jacopo Mondi <jacopo@jmondi.org>
> >>>>
> >>>>>              config->addConfiguration(cfg);
> >>>>>      }
> >>>>>
Naushir Patuck June 23, 2020, 7:59 a.m. UTC | #8
Hi Laurent,

On Tue, 23 Jun 2020 at 00:46, Laurent Pinchart
<laurent.pinchart@ideasonboard.com> wrote:
>
> Hi Naush,
>
> On Mon, Jun 22, 2020 at 11:25:47AM +0100, Naushir Patuck wrote:
> > On Mon, 22 Jun 2020 at 05:59, Laurent Pinchart wrote:
> > > On Thu, Jun 18, 2020 at 10:18:38AM +0100, Naushir Patuck wrote:
> > >> On Thu, 18 Jun 2020 at 10:03, Kieran Bingham wrote:
> > >>> On 18/06/2020 09:41, Jacopo Mondi wrote:
> > >>>> On Wed, Jun 10, 2020 at 03:26:40PM +0100, Naushir Patuck wrote:
> > >>>>> In generateConfiguration(), add the device node specific formats to the
> > >>>>> StreamConfiguration for each StreamRole requested.
> > >>>>>
> > >>>>> Signed-off-by: Naushir Patuck <naush@raspberrypi.com>
> > >>>>> ---
> > >>>>>  .../pipeline/raspberrypi/raspberrypi.cpp      | 52 +++++++++++++------
> > >>>>>  1 file changed, 36 insertions(+), 16 deletions(-)
> > >>>>>
> > >>>>> diff --git a/src/libcamera/pipeline/raspberrypi/raspberrypi.cpp b/src/libcamera/pipeline/raspberrypi/raspberrypi.cpp
> > >>>>> index e16a9c7f..03a1e641 100644
> > >>>>> --- a/src/libcamera/pipeline/raspberrypi/raspberrypi.cpp
> > >>>>> +++ b/src/libcamera/pipeline/raspberrypi/raspberrypi.cpp
> > >>>>> @@ -518,41 +518,45 @@ CameraConfiguration *PipelineHandlerRPi::generateConfiguration(Camera *camera,
> > >>>>>      RPiCameraData *data = cameraData(camera);
> > >>>>>      CameraConfiguration *config = new RPiCameraConfiguration(data);
> > >>>>>      V4L2DeviceFormat sensorFormat;
> > >>>>> +    unsigned int bufferCount;
> > >>>>> +    PixelFormat pixelFormat;
> > >>>>>      V4L2PixFmtMap fmts;
> > >>>>> +    Size size;
> > >>>>>
> > >>>>>      if (roles.empty())
> > >>>>>              return config;
> > >>>>>
> > >>>>>      for (const StreamRole role : roles) {
> > >>>>> -            StreamConfiguration cfg{};
> > >>>>> -
> > >>>>>              switch (role) {
> > >>>>>              case StreamRole::StillCaptureRaw:
> > >>>>> -                    cfg.size = data->sensor_->resolution();
> > >>>>> +                    size = data->sensor_->resolution();
> > >>>>>                      fmts = data->unicam_[Unicam::Image].dev()->formats();
> > >>>>> -                    sensorFormat = findBestMode(fmts, cfg.size);
> > >>>>> -                    cfg.pixelFormat = sensorFormat.fourcc.toPixelFormat();
> > >>>>> -                    ASSERT(cfg.pixelFormat.isValid());
> > >>>>> -                    cfg.bufferCount = 1;
> > >>>>> +                    sensorFormat = findBestMode(fmts, size);
> > >>>>> +                    pixelFormat = sensorFormat.fourcc.toPixelFormat();
> > >>>>> +                    ASSERT(pixelFormat.isValid());
> > >>>>> +                    bufferCount = 1;
> > >>>>>                      break;
> > >>>>>
> > >>>>>              case StreamRole::StillCapture:
> > >>>>> -                    cfg.pixelFormat = PixelFormat(DRM_FORMAT_NV12);
> > >>>>> +                    fmts = data->isp_[Isp::Output0].dev()->formats();
> > >>>>> +                    pixelFormat = PixelFormat(DRM_FORMAT_NV12);
> > >
> > > This conflicts with recent rework of format handling, this line should
> > > now be
> > >
> > > +                       pixelFormat = formats::NV12;
> > >
> > > Same for the other formats below.
> >
> > Will update in the next patch.
> >
> > >>>>>                      /* Return the largest sensor resolution. */
> > >>>>> -                    cfg.size = data->sensor_->resolution();
> > >>>>> -                    cfg.bufferCount = 1;
> > >>>>> +                    size = data->sensor_->resolution();
> > >>>>> +                    bufferCount = 1;
> > >>>>>                      break;
> > >>>>>
> > >>>>>              case StreamRole::VideoRecording:
> > >>>>> -                    cfg.pixelFormat = PixelFormat(DRM_FORMAT_NV12);
> > >>>>> -                    cfg.size = { 1920, 1080 };
> > >>>>> -                    cfg.bufferCount = 4;
> > >>>>> +                    fmts = data->isp_[Isp::Output0].dev()->formats();
> > >>>>> +                    pixelFormat = PixelFormat(DRM_FORMAT_NV12);
> > >>>>> +                    size = { 1920, 1080 };
> > >>>>> +                    bufferCount = 4;
> > >>>>>                      break;
> > >>>>>
> > >>>>>              case StreamRole::Viewfinder:
> > >>>>> -                    cfg.pixelFormat = PixelFormat(DRM_FORMAT_ARGB8888);
> > >>>>> -                    cfg.size = { 800, 600 };
> > >>>>> -                    cfg.bufferCount = 4;
> > >>>>> +                    fmts = data->isp_[Isp::Output0].dev()->formats();
> > >>>>> +                    pixelFormat = PixelFormat(DRM_FORMAT_ARGB8888);
> > >>>>> +                    size = { 800, 600 };
> > >>>>> +                    bufferCount = 4;
> > >>>>>                      break;
> > >>>>>
> > >>>>>              default:
> > >>>>> @@ -561,6 +565,22 @@ CameraConfiguration *PipelineHandlerRPi::generateConfiguration(Camera *camera,
> > >>>>>                      break;
> > >>>>>              }
> > >>>>>
> > >>>>> +            /* Translate the V4L2PixelFormat to PixelFormat. */
> > >>>>> +            std::map<PixelFormat, std::vector<SizeRange>> deviceFormats;
> > >>>>> +            std::transform(fmts.begin(), fmts.end(), std::inserter(deviceFormats, deviceFormats.begin()),
> > >>>>> +                           [&](const decltype(fmts)::value_type &format) {
> > >>>>> +                                    return decltype(deviceFormats)::value_type{
> > >>>>> +                                            format.first.toPixelFormat(),
> > >>>>> +                                            format.second
> > >>>>> +                                    };
> > >>>>> +                           });
> > >
> > > For the unicam device this looks fine to me, as the driver will
> > > give us formats and sizes that match the capabilities of the sensor. For
> > > the ISP, the list of supported pixel formats is queried from the
> > > firmware, filtered against the list of pixel formats supported by the
> > > driver, and all should be fine. However, for the frame sizes, the ISP
> > > driver reports hardcoded minimum and maximum values of 64 and 16384
> > > respectively. Shouldn't we instead, in the pipeline handler, take the
> > > capabilities of both the sensor and the ISP into account to create a
> > > range of sizes that would be closer to what can actually be achieved ?
> >
> > Just checking, so I should populate
> > StreamFormat::std::vector<SizeRange> with the device ranges?  What
> > about the StreamConfiguration::size field that I currently populate
> > with a single value, should I leave that blank/empty?
>
> StreamConfigure::size is the recommended size for the stream.
> StreamFormats is a map containing all the supported pixel formats, and
> for each of them, the supported sizes. The sizes are expressed as a
> vector of SizeRange, which must contain at least one entry.
>
> You populate StreamConfigure::size correctly as far as I can tell,
> there's no reason to change. The StreamFormats, however, should be
> populated with what the camera can produce, and that should be
> conditioned by the sizes supported by the sensor and the ISP
> capabilities. You could, for instance, take all the sizes supported by
> the sensor, and for each of them, create a SizeRange with the minimum
> set to the sensor size downscaled as much as possible by the ISP, and
> the maximum set to the sensor size.
>

Apologies if this is a silly question, but since the ISP hardware
genuinely does allow frame sizes in the range [64,16384] in both
dimensions irrespective of the input size, is what I currently have
for populating SizeRange not correct?

Regards,
Naush


> > > For UVC the situation is different, as the kernel directly reports the
> > > list of formats and sizes supported by the device.
> > >
> > >>>> Really took me a while to parse this, as I was not expecting to see
> > >>>> std::inserter() but just deviceFormats.begin() there, but then I
> > >>>> learned about inserter, and indeed if I remove std::inserter() I get
> > >>>> an error due to the fact the copy operator of std::pair is deleted.
> > >>>
> > >>> I think that's the same as the conversion routine in the UVC pipeline
> > >>> handler (which I think came from Laurent).
> > >>
> > >> Yes, I picked this from the uvc pipeline handler.  Took me some goes
> > >> to understand it as well :)
> > >>
> > >>> Not for this patch (I think this can go in as is) but we should really
> > >>> make a helper for that conversion as it's likely to be used in multiple
> > >>> places, and it is quite hard to parse on it's own ;-(.
> > >>
> > >> That would make sense.  All pipeline handlers will require this
> > >> conversion, I think!
> > >>
> > >>> However, I think that overlaps with changes that you (Jacopo) were
> > >>> working on with ImageFormats anyway.
> > >
> > > Yes, we know this API needs to be reworked, so I wouldn't spend time on
> > > creating a helper right now, but would rather land the ImageFormats
> > > series first, and then discuss how we should rework stream configuration
> > > handling. The fact that this code should consider both the sensor and
> > > the ISP, as explained above, also makes it more difficult to create a
> > > single helper function.
> > >
> > >>> Eitherway, this patch will help support more formats and enumeration on
> > >>> the RPi pipeline handler:
> > >>>
> > >>> Reviewed-by: Kieran Bingham <kieran.bingham@ideasonboard.com>
> > >>>
> > >>>>> +
> > >>>>> +            /* Add the stream format based on the device node used for the use case. */
> > >>>>> +            StreamFormats formats(deviceFormats);
> > >>>>> +            StreamConfiguration cfg(formats);
> > >>>>> +            cfg.size = size;
> > >>>>> +            cfg.pixelFormat = pixelFormat;
> > >>>>> +            cfg.bufferCount = bufferCount;
> > >>>>
> > >>>> Patch looks good to me
> > >>>>
> > >>>> Reviewed-by: Jacopo Mondi <jacopo@jmondi.org>
> > >>>>
> > >>>>>              config->addConfiguration(cfg);
> > >>>>>      }
> > >>>>>
>
> --
> Regards,
>
> Laurent Pinchart
Laurent Pinchart June 25, 2020, 2:21 a.m. UTC | #9
Hi Naush,

On Tue, Jun 23, 2020 at 08:59:49AM +0100, Naushir Patuck wrote:
> On Tue, 23 Jun 2020 at 00:46, Laurent Pinchart wrote:
> > On Mon, Jun 22, 2020 at 11:25:47AM +0100, Naushir Patuck wrote:
> >> On Mon, 22 Jun 2020 at 05:59, Laurent Pinchart wrote:
> >>> On Thu, Jun 18, 2020 at 10:18:38AM +0100, Naushir Patuck wrote:
> >>>> On Thu, 18 Jun 2020 at 10:03, Kieran Bingham wrote:
> >>>>> On 18/06/2020 09:41, Jacopo Mondi wrote:
> >>>>>> On Wed, Jun 10, 2020 at 03:26:40PM +0100, Naushir Patuck wrote:
> >>>>>>> In generateConfiguration(), add the device node specific formats to the
> >>>>>>> StreamConfiguration for each StreamRole requested.
> >>>>>>>
> >>>>>>> Signed-off-by: Naushir Patuck <naush@raspberrypi.com>
> >>>>>>> ---
> >>>>>>>  .../pipeline/raspberrypi/raspberrypi.cpp      | 52 +++++++++++++------
> >>>>>>>  1 file changed, 36 insertions(+), 16 deletions(-)
> >>>>>>>
> >>>>>>> diff --git a/src/libcamera/pipeline/raspberrypi/raspberrypi.cpp b/src/libcamera/pipeline/raspberrypi/raspberrypi.cpp
> >>>>>>> index e16a9c7f..03a1e641 100644
> >>>>>>> --- a/src/libcamera/pipeline/raspberrypi/raspberrypi.cpp
> >>>>>>> +++ b/src/libcamera/pipeline/raspberrypi/raspberrypi.cpp
> >>>>>>> @@ -518,41 +518,45 @@ CameraConfiguration *PipelineHandlerRPi::generateConfiguration(Camera *camera,
> >>>>>>>      RPiCameraData *data = cameraData(camera);
> >>>>>>>      CameraConfiguration *config = new RPiCameraConfiguration(data);
> >>>>>>>      V4L2DeviceFormat sensorFormat;
> >>>>>>> +    unsigned int bufferCount;
> >>>>>>> +    PixelFormat pixelFormat;
> >>>>>>>      V4L2PixFmtMap fmts;
> >>>>>>> +    Size size;
> >>>>>>>
> >>>>>>>      if (roles.empty())
> >>>>>>>              return config;
> >>>>>>>
> >>>>>>>      for (const StreamRole role : roles) {
> >>>>>>> -            StreamConfiguration cfg{};
> >>>>>>> -
> >>>>>>>              switch (role) {
> >>>>>>>              case StreamRole::StillCaptureRaw:
> >>>>>>> -                    cfg.size = data->sensor_->resolution();
> >>>>>>> +                    size = data->sensor_->resolution();
> >>>>>>>                      fmts = data->unicam_[Unicam::Image].dev()->formats();
> >>>>>>> -                    sensorFormat = findBestMode(fmts, cfg.size);
> >>>>>>> -                    cfg.pixelFormat = sensorFormat.fourcc.toPixelFormat();
> >>>>>>> -                    ASSERT(cfg.pixelFormat.isValid());
> >>>>>>> -                    cfg.bufferCount = 1;
> >>>>>>> +                    sensorFormat = findBestMode(fmts, size);
> >>>>>>> +                    pixelFormat = sensorFormat.fourcc.toPixelFormat();
> >>>>>>> +                    ASSERT(pixelFormat.isValid());
> >>>>>>> +                    bufferCount = 1;
> >>>>>>>                      break;
> >>>>>>>
> >>>>>>>              case StreamRole::StillCapture:
> >>>>>>> -                    cfg.pixelFormat = PixelFormat(DRM_FORMAT_NV12);
> >>>>>>> +                    fmts = data->isp_[Isp::Output0].dev()->formats();
> >>>>>>> +                    pixelFormat = PixelFormat(DRM_FORMAT_NV12);
> >>>
> >>> This conflicts with recent rework of format handling, this line should
> >>> now be
> >>>
> >>> +                       pixelFormat = formats::NV12;
> >>>
> >>> Same for the other formats below.
> >>
> >> Will update in the next patch.
> >>
> >>>>>>>                      /* Return the largest sensor resolution. */
> >>>>>>> -                    cfg.size = data->sensor_->resolution();
> >>>>>>> -                    cfg.bufferCount = 1;
> >>>>>>> +                    size = data->sensor_->resolution();
> >>>>>>> +                    bufferCount = 1;
> >>>>>>>                      break;
> >>>>>>>
> >>>>>>>              case StreamRole::VideoRecording:
> >>>>>>> -                    cfg.pixelFormat = PixelFormat(DRM_FORMAT_NV12);
> >>>>>>> -                    cfg.size = { 1920, 1080 };
> >>>>>>> -                    cfg.bufferCount = 4;
> >>>>>>> +                    fmts = data->isp_[Isp::Output0].dev()->formats();
> >>>>>>> +                    pixelFormat = PixelFormat(DRM_FORMAT_NV12);
> >>>>>>> +                    size = { 1920, 1080 };
> >>>>>>> +                    bufferCount = 4;
> >>>>>>>                      break;
> >>>>>>>
> >>>>>>>              case StreamRole::Viewfinder:
> >>>>>>> -                    cfg.pixelFormat = PixelFormat(DRM_FORMAT_ARGB8888);
> >>>>>>> -                    cfg.size = { 800, 600 };
> >>>>>>> -                    cfg.bufferCount = 4;
> >>>>>>> +                    fmts = data->isp_[Isp::Output0].dev()->formats();
> >>>>>>> +                    pixelFormat = PixelFormat(DRM_FORMAT_ARGB8888);
> >>>>>>> +                    size = { 800, 600 };
> >>>>>>> +                    bufferCount = 4;
> >>>>>>>                      break;
> >>>>>>>
> >>>>>>>              default:
> >>>>>>> @@ -561,6 +565,22 @@ CameraConfiguration *PipelineHandlerRPi::generateConfiguration(Camera *camera,
> >>>>>>>                      break;
> >>>>>>>              }
> >>>>>>>
> >>>>>>> +            /* Translate the V4L2PixelFormat to PixelFormat. */
> >>>>>>> +            std::map<PixelFormat, std::vector<SizeRange>> deviceFormats;
> >>>>>>> +            std::transform(fmts.begin(), fmts.end(), std::inserter(deviceFormats, deviceFormats.begin()),
> >>>>>>> +                           [&](const decltype(fmts)::value_type &format) {
> >>>>>>> +                                    return decltype(deviceFormats)::value_type{
> >>>>>>> +                                            format.first.toPixelFormat(),
> >>>>>>> +                                            format.second
> >>>>>>> +                                    };
> >>>>>>> +                           });
> >>>
> >>> For the unicam device this looks fine to me, as the driver will
> >>> give us formats and sizes that match the capabilities of the sensor. For
> >>> the ISP, the list of supported pixel formats is queried from the
> >>> firmware, filtered against the list of pixel formats supported by the
> >>> driver, and all should be fine. However, for the frame sizes, the ISP
> >>> driver reports hardcoded minimum and maximum values of 64 and 16384
> >>> respectively. Shouldn't we instead, in the pipeline handler, take the
> >>> capabilities of both the sensor and the ISP into account to create a
> >>> range of sizes that would be closer to what can actually be achieved ?
> >>
> >> Just checking, so I should populate
> >> StreamFormat::std::vector<SizeRange> with the device ranges?  What
> >> about the StreamConfiguration::size field that I currently populate
> >> with a single value, should I leave that blank/empty?
> >
> > StreamConfigure::size is the recommended size for the stream.
> > StreamFormats is a map containing all the supported pixel formats, and
> > for each of them, the supported sizes. The sizes are expressed as a
> > vector of SizeRange, which must contain at least one entry.
> >
> > You populate StreamConfigure::size correctly as far as I can tell,
> > there's no reason to change. The StreamFormats, however, should be
> > populated with what the camera can produce, and that should be
> > conditioned by the sizes supported by the sensor and the ISP
> > capabilities. You could, for instance, take all the sizes supported by
> > the sensor, and for each of them, create a SizeRange with the minimum
> > set to the sensor size downscaled as much as possible by the ISP, and
> > the maximum set to the sensor size.
> 
> Apologies if this is a silly question, but since the ISP hardware
> genuinely does allow frame sizes in the range [64,16384] in both
> dimensions irrespective of the input size, is what I currently have
> for populating SizeRange not correct?

Then I have to apologize for considering the code wasn't correct :-) I
assumed the scaling ratios were somehow limited.

> >>> For UVC the situation is different, as the kernel directly reports the
> >>> list of formats and sizes supported by the device.
> >>>
> >>>>>> Really took me a while to parse this, as I was not expecting to see
> >>>>>> std::inserter() but just deviceFormats.begin() there, but then I
> >>>>>> learned about inserter, and indeed if I remove std::inserter() I get
> >>>>>> an error due to the fact the copy operator of std::pair is deleted.
> >>>>>
> >>>>> I think that's the same as the conversion routine in the UVC pipeline
> >>>>> handler (which I think came from Laurent).
> >>>>
> >>>> Yes, I picked this from the uvc pipeline handler.  Took me some goes
> >>>> to understand it as well :)
> >>>>
> >>>>> Not for this patch (I think this can go in as is) but we should really
> >>>>> make a helper for that conversion as it's likely to be used in multiple
> >>>>> places, and it is quite hard to parse on it's own ;-(.
> >>>>
> >>>> That would make sense.  All pipeline handlers will require this
> >>>> conversion, I think!
> >>>>
> >>>>> However, I think that overlaps with changes that you (Jacopo) were
> >>>>> working on with ImageFormats anyway.
> >>>
> >>> Yes, we know this API needs to be reworked, so I wouldn't spend time on
> >>> creating a helper right now, but would rather land the ImageFormats
> >>> series first, and then discuss how we should rework stream configuration
> >>> handling. The fact that this code should consider both the sensor and
> >>> the ISP, as explained above, also makes it more difficult to create a
> >>> single helper function.
> >>>
> >>>>> Eitherway, this patch will help support more formats and enumeration on
> >>>>> the RPi pipeline handler:
> >>>>>
> >>>>> Reviewed-by: Kieran Bingham <kieran.bingham@ideasonboard.com>
> >>>>>
> >>>>>>> +
> >>>>>>> +            /* Add the stream format based on the device node used for the use case. */
> >>>>>>> +            StreamFormats formats(deviceFormats);
> >>>>>>> +            StreamConfiguration cfg(formats);
> >>>>>>> +            cfg.size = size;
> >>>>>>> +            cfg.pixelFormat = pixelFormat;
> >>>>>>> +            cfg.bufferCount = bufferCount;
> >>>>>>
> >>>>>> Patch looks good to me
> >>>>>>
> >>>>>> Reviewed-by: Jacopo Mondi <jacopo@jmondi.org>
> >>>>>>
> >>>>>>>              config->addConfiguration(cfg);
> >>>>>>>      }
> >>>>>>>
Laurent Pinchart June 25, 2020, 2:37 a.m. UTC | #10
Hi Naush,

On Thu, Jun 25, 2020 at 05:21:26AM +0300, Laurent Pinchart wrote:
> On Tue, Jun 23, 2020 at 08:59:49AM +0100, Naushir Patuck wrote:
> > On Tue, 23 Jun 2020 at 00:46, Laurent Pinchart wrote:
> >> On Mon, Jun 22, 2020 at 11:25:47AM +0100, Naushir Patuck wrote:
> >>> On Mon, 22 Jun 2020 at 05:59, Laurent Pinchart wrote:
> >>>> On Thu, Jun 18, 2020 at 10:18:38AM +0100, Naushir Patuck wrote:
> >>>>> On Thu, 18 Jun 2020 at 10:03, Kieran Bingham wrote:
> >>>>>> On 18/06/2020 09:41, Jacopo Mondi wrote:
> >>>>>>> On Wed, Jun 10, 2020 at 03:26:40PM +0100, Naushir Patuck wrote:
> >>>>>>>> In generateConfiguration(), add the device node specific formats to the
> >>>>>>>> StreamConfiguration for each StreamRole requested.
> >>>>>>>>
> >>>>>>>> Signed-off-by: Naushir Patuck <naush@raspberrypi.com>
> >>>>>>>> ---
> >>>>>>>>  .../pipeline/raspberrypi/raspberrypi.cpp      | 52 +++++++++++++------
> >>>>>>>>  1 file changed, 36 insertions(+), 16 deletions(-)
> >>>>>>>>
> >>>>>>>> diff --git a/src/libcamera/pipeline/raspberrypi/raspberrypi.cpp b/src/libcamera/pipeline/raspberrypi/raspberrypi.cpp
> >>>>>>>> index e16a9c7f..03a1e641 100644
> >>>>>>>> --- a/src/libcamera/pipeline/raspberrypi/raspberrypi.cpp
> >>>>>>>> +++ b/src/libcamera/pipeline/raspberrypi/raspberrypi.cpp
> >>>>>>>> @@ -518,41 +518,45 @@ CameraConfiguration *PipelineHandlerRPi::generateConfiguration(Camera *camera,
> >>>>>>>>      RPiCameraData *data = cameraData(camera);
> >>>>>>>>      CameraConfiguration *config = new RPiCameraConfiguration(data);
> >>>>>>>>      V4L2DeviceFormat sensorFormat;
> >>>>>>>> +    unsigned int bufferCount;
> >>>>>>>> +    PixelFormat pixelFormat;
> >>>>>>>>      V4L2PixFmtMap fmts;
> >>>>>>>> +    Size size;
> >>>>>>>>
> >>>>>>>>      if (roles.empty())
> >>>>>>>>              return config;
> >>>>>>>>
> >>>>>>>>      for (const StreamRole role : roles) {
> >>>>>>>> -            StreamConfiguration cfg{};
> >>>>>>>> -
> >>>>>>>>              switch (role) {
> >>>>>>>>              case StreamRole::StillCaptureRaw:
> >>>>>>>> -                    cfg.size = data->sensor_->resolution();
> >>>>>>>> +                    size = data->sensor_->resolution();
> >>>>>>>>                      fmts = data->unicam_[Unicam::Image].dev()->formats();
> >>>>>>>> -                    sensorFormat = findBestMode(fmts, cfg.size);
> >>>>>>>> -                    cfg.pixelFormat = sensorFormat.fourcc.toPixelFormat();
> >>>>>>>> -                    ASSERT(cfg.pixelFormat.isValid());
> >>>>>>>> -                    cfg.bufferCount = 1;
> >>>>>>>> +                    sensorFormat = findBestMode(fmts, size);
> >>>>>>>> +                    pixelFormat = sensorFormat.fourcc.toPixelFormat();
> >>>>>>>> +                    ASSERT(pixelFormat.isValid());
> >>>>>>>> +                    bufferCount = 1;
> >>>>>>>>                      break;
> >>>>>>>>
> >>>>>>>>              case StreamRole::StillCapture:
> >>>>>>>> -                    cfg.pixelFormat = PixelFormat(DRM_FORMAT_NV12);
> >>>>>>>> +                    fmts = data->isp_[Isp::Output0].dev()->formats();
> >>>>>>>> +                    pixelFormat = PixelFormat(DRM_FORMAT_NV12);
> >>>>
> >>>> This conflicts with recent rework of format handling, this line should
> >>>> now be
> >>>>
> >>>> +                       pixelFormat = formats::NV12;
> >>>>
> >>>> Same for the other formats below.
> >>>
> >>> Will update in the next patch.
> >>>
> >>>>>>>>                      /* Return the largest sensor resolution. */
> >>>>>>>> -                    cfg.size = data->sensor_->resolution();
> >>>>>>>> -                    cfg.bufferCount = 1;
> >>>>>>>> +                    size = data->sensor_->resolution();
> >>>>>>>> +                    bufferCount = 1;
> >>>>>>>>                      break;
> >>>>>>>>
> >>>>>>>>              case StreamRole::VideoRecording:
> >>>>>>>> -                    cfg.pixelFormat = PixelFormat(DRM_FORMAT_NV12);
> >>>>>>>> -                    cfg.size = { 1920, 1080 };
> >>>>>>>> -                    cfg.bufferCount = 4;
> >>>>>>>> +                    fmts = data->isp_[Isp::Output0].dev()->formats();
> >>>>>>>> +                    pixelFormat = PixelFormat(DRM_FORMAT_NV12);
> >>>>>>>> +                    size = { 1920, 1080 };
> >>>>>>>> +                    bufferCount = 4;
> >>>>>>>>                      break;
> >>>>>>>>
> >>>>>>>>              case StreamRole::Viewfinder:
> >>>>>>>> -                    cfg.pixelFormat = PixelFormat(DRM_FORMAT_ARGB8888);
> >>>>>>>> -                    cfg.size = { 800, 600 };
> >>>>>>>> -                    cfg.bufferCount = 4;
> >>>>>>>> +                    fmts = data->isp_[Isp::Output0].dev()->formats();
> >>>>>>>> +                    pixelFormat = PixelFormat(DRM_FORMAT_ARGB8888);
> >>>>>>>> +                    size = { 800, 600 };
> >>>>>>>> +                    bufferCount = 4;
> >>>>>>>>                      break;
> >>>>>>>>
> >>>>>>>>              default:
> >>>>>>>> @@ -561,6 +565,22 @@ CameraConfiguration *PipelineHandlerRPi::generateConfiguration(Camera *camera,
> >>>>>>>>                      break;
> >>>>>>>>              }
> >>>>>>>>
> >>>>>>>> +            /* Translate the V4L2PixelFormat to PixelFormat. */
> >>>>>>>> +            std::map<PixelFormat, std::vector<SizeRange>> deviceFormats;
> >>>>>>>> +            std::transform(fmts.begin(), fmts.end(), std::inserter(deviceFormats, deviceFormats.begin()),

One additional comment, should this be deviceFormats.end(), to insert
the new entries at the end of the map ? It will make a difference in
efficiency *if* new entries gets inserted right before the iterator.
There's no such guarantee, as fmts is sorted by V4L2PixelFormat, and we
convert that to a PixelFormat, but I think there's a bigger chance that
insertion would happen at the end than at the beginning. If this
analysis is correct, I'll fix the UVC pipeline handler accordingly.

I can address both the formats:: conflict and this issue when applying,
but maybe it would be best if you could test a v2, in which case you
could as well send it out :-)

> >>>>>>>> +                           [&](const decltype(fmts)::value_type &format) {
> >>>>>>>> +                                    return decltype(deviceFormats)::value_type{
> >>>>>>>> +                                            format.first.toPixelFormat(),
> >>>>>>>> +                                            format.second
> >>>>>>>> +                                    };
> >>>>>>>> +                           });
> >>>>
> >>>> For the unicam device this looks fine to me, as the driver will
> >>>> give us formats and sizes that match the capabilities of the sensor. For
> >>>> the ISP, the list of supported pixel formats is queried from the
> >>>> firmware, filtered against the list of pixel formats supported by the
> >>>> driver, and all should be fine. However, for the frame sizes, the ISP
> >>>> driver reports hardcoded minimum and maximum values of 64 and 16384
> >>>> respectively. Shouldn't we instead, in the pipeline handler, take the
> >>>> capabilities of both the sensor and the ISP into account to create a
> >>>> range of sizes that would be closer to what can actually be achieved ?
> >>>
> >>> Just checking, so I should populate
> >>> StreamFormat::std::vector<SizeRange> with the device ranges?  What
> >>> about the StreamConfiguration::size field that I currently populate
> >>> with a single value, should I leave that blank/empty?
> >>
> >> StreamConfigure::size is the recommended size for the stream.
> >> StreamFormats is a map containing all the supported pixel formats, and
> >> for each of them, the supported sizes. The sizes are expressed as a
> >> vector of SizeRange, which must contain at least one entry.
> >>
> >> You populate StreamConfigure::size correctly as far as I can tell,
> >> there's no reason to change. The StreamFormats, however, should be
> >> populated with what the camera can produce, and that should be
> >> conditioned by the sizes supported by the sensor and the ISP
> >> capabilities. You could, for instance, take all the sizes supported by
> >> the sensor, and for each of them, create a SizeRange with the minimum
> >> set to the sensor size downscaled as much as possible by the ISP, and
> >> the maximum set to the sensor size.
> > 
> > Apologies if this is a silly question, but since the ISP hardware
> > genuinely does allow frame sizes in the range [64,16384] in both
> > dimensions irrespective of the input size, is what I currently have
> > for populating SizeRange not correct?
> 
> Then I have to apologize for considering the code wasn't correct :-) I
> assumed the scaling ratios were somehow limited.
> 
> >>>> For UVC the situation is different, as the kernel directly reports the
> >>>> list of formats and sizes supported by the device.
> >>>>
> >>>>>>> Really took me a while to parse this, as I was not expecting to see
> >>>>>>> std::inserter() but just deviceFormats.begin() there, but then I
> >>>>>>> learned about inserter, and indeed if I remove std::inserter() I get
> >>>>>>> an error due to the fact the copy operator of std::pair is deleted.
> >>>>>>
> >>>>>> I think that's the same as the conversion routine in the UVC pipeline
> >>>>>> handler (which I think came from Laurent).
> >>>>>
> >>>>> Yes, I picked this from the uvc pipeline handler.  Took me some goes
> >>>>> to understand it as well :)
> >>>>>
> >>>>>> Not for this patch (I think this can go in as is) but we should really
> >>>>>> make a helper for that conversion as it's likely to be used in multiple
> >>>>>> places, and it is quite hard to parse on it's own ;-(.
> >>>>>
> >>>>> That would make sense.  All pipeline handlers will require this
> >>>>> conversion, I think!
> >>>>>
> >>>>>> However, I think that overlaps with changes that you (Jacopo) were
> >>>>>> working on with ImageFormats anyway.
> >>>>
> >>>> Yes, we know this API needs to be reworked, so I wouldn't spend time on
> >>>> creating a helper right now, but would rather land the ImageFormats
> >>>> series first, and then discuss how we should rework stream configuration
> >>>> handling. The fact that this code should consider both the sensor and
> >>>> the ISP, as explained above, also makes it more difficult to create a
> >>>> single helper function.
> >>>>
> >>>>>> Eitherway, this patch will help support more formats and enumeration on
> >>>>>> the RPi pipeline handler:
> >>>>>>
> >>>>>> Reviewed-by: Kieran Bingham <kieran.bingham@ideasonboard.com>
> >>>>>>
> >>>>>>>> +
> >>>>>>>> +            /* Add the stream format based on the device node used for the use case. */
> >>>>>>>> +            StreamFormats formats(deviceFormats);
> >>>>>>>> +            StreamConfiguration cfg(formats);
> >>>>>>>> +            cfg.size = size;
> >>>>>>>> +            cfg.pixelFormat = pixelFormat;
> >>>>>>>> +            cfg.bufferCount = bufferCount;
> >>>>>>>
> >>>>>>> Patch looks good to me
> >>>>>>>
> >>>>>>> Reviewed-by: Jacopo Mondi <jacopo@jmondi.org>
> >>>>>>>
> >>>>>>>>              config->addConfiguration(cfg);
> >>>>>>>>      }
> >>>>>>>>
Naushir Patuck June 25, 2020, 7:30 a.m. UTC | #11
Hi Laurent,



On Thu, 25 Jun 2020 at 03:37, Laurent Pinchart
<laurent.pinchart@ideasonboard.com> wrote:
>

<snip>

> > >>>>>>>> +            /* Translate the V4L2PixelFormat to PixelFormat. */
> > >>>>>>>> +            std::map<PixelFormat, std::vector<SizeRange>> deviceFormats;
> > >>>>>>>> +            std::transform(fmts.begin(), fmts.end(), std::inserter(deviceFormats, deviceFormats.begin()),
>
> One additional comment, should this be deviceFormats.end(), to insert
> the new entries at the end of the map ? It will make a difference in
> efficiency *if* new entries gets inserted right before the iterator.
> There's no such guarantee, as fmts is sorted by V4L2PixelFormat, and we
> convert that to a PixelFormat, but I think there's a bigger chance that
> insertion would happen at the end than at the beginning. If this
> analysis is correct, I'll fix the UVC pipeline handler accordingly.

Yes, that seems reasonable.

>
> I can address both the formats:: conflict and this issue when applying,
> but maybe it would be best if you could test a v2, in which case you
> could as well send it out :-)
>

I'll push version 2 of this patch shortly.

Regards,
Naush
Jacopo Mondi June 25, 2020, 7:39 a.m. UTC | #12
Hi Laurent, Naush

On Thu, Jun 25, 2020 at 05:37:46AM +0300, Laurent Pinchart wrote:
> Hi Naush,
>
> On Thu, Jun 25, 2020 at 05:21:26AM +0300, Laurent Pinchart wrote:
> > On Tue, Jun 23, 2020 at 08:59:49AM +0100, Naushir Patuck wrote:
> > > On Tue, 23 Jun 2020 at 00:46, Laurent Pinchart wrote:
> > >> On Mon, Jun 22, 2020 at 11:25:47AM +0100, Naushir Patuck wrote:
> > >>> On Mon, 22 Jun 2020 at 05:59, Laurent Pinchart wrote:
> > >>>> On Thu, Jun 18, 2020 at 10:18:38AM +0100, Naushir Patuck wrote:
> > >>>>> On Thu, 18 Jun 2020 at 10:03, Kieran Bingham wrote:
> > >>>>>> On 18/06/2020 09:41, Jacopo Mondi wrote:
> > >>>>>>> On Wed, Jun 10, 2020 at 03:26:40PM +0100, Naushir Patuck wrote:
> > >>>>>>>> In generateConfiguration(), add the device node specific formats to the
> > >>>>>>>> StreamConfiguration for each StreamRole requested.
> > >>>>>>>>
> > >>>>>>>> Signed-off-by: Naushir Patuck <naush@raspberrypi.com>
> > >>>>>>>> ---
> > >>>>>>>>  .../pipeline/raspberrypi/raspberrypi.cpp      | 52 +++++++++++++------
> > >>>>>>>>  1 file changed, 36 insertions(+), 16 deletions(-)
> > >>>>>>>>
> > >>>>>>>> diff --git a/src/libcamera/pipeline/raspberrypi/raspberrypi.cpp b/src/libcamera/pipeline/raspberrypi/raspberrypi.cpp
> > >>>>>>>> index e16a9c7f..03a1e641 100644
> > >>>>>>>> --- a/src/libcamera/pipeline/raspberrypi/raspberrypi.cpp
> > >>>>>>>> +++ b/src/libcamera/pipeline/raspberrypi/raspberrypi.cpp
> > >>>>>>>> @@ -518,41 +518,45 @@ CameraConfiguration *PipelineHandlerRPi::generateConfiguration(Camera *camera,
> > >>>>>>>>      RPiCameraData *data = cameraData(camera);
> > >>>>>>>>      CameraConfiguration *config = new RPiCameraConfiguration(data);
> > >>>>>>>>      V4L2DeviceFormat sensorFormat;
> > >>>>>>>> +    unsigned int bufferCount;
> > >>>>>>>> +    PixelFormat pixelFormat;
> > >>>>>>>>      V4L2PixFmtMap fmts;
> > >>>>>>>> +    Size size;
> > >>>>>>>>
> > >>>>>>>>      if (roles.empty())
> > >>>>>>>>              return config;
> > >>>>>>>>
> > >>>>>>>>      for (const StreamRole role : roles) {
> > >>>>>>>> -            StreamConfiguration cfg{};
> > >>>>>>>> -
> > >>>>>>>>              switch (role) {
> > >>>>>>>>              case StreamRole::StillCaptureRaw:
> > >>>>>>>> -                    cfg.size = data->sensor_->resolution();
> > >>>>>>>> +                    size = data->sensor_->resolution();
> > >>>>>>>>                      fmts = data->unicam_[Unicam::Image].dev()->formats();
> > >>>>>>>> -                    sensorFormat = findBestMode(fmts, cfg.size);
> > >>>>>>>> -                    cfg.pixelFormat = sensorFormat.fourcc.toPixelFormat();
> > >>>>>>>> -                    ASSERT(cfg.pixelFormat.isValid());
> > >>>>>>>> -                    cfg.bufferCount = 1;
> > >>>>>>>> +                    sensorFormat = findBestMode(fmts, size);
> > >>>>>>>> +                    pixelFormat = sensorFormat.fourcc.toPixelFormat();
> > >>>>>>>> +                    ASSERT(pixelFormat.isValid());
> > >>>>>>>> +                    bufferCount = 1;
> > >>>>>>>>                      break;
> > >>>>>>>>
> > >>>>>>>>              case StreamRole::StillCapture:
> > >>>>>>>> -                    cfg.pixelFormat = PixelFormat(DRM_FORMAT_NV12);
> > >>>>>>>> +                    fmts = data->isp_[Isp::Output0].dev()->formats();
> > >>>>>>>> +                    pixelFormat = PixelFormat(DRM_FORMAT_NV12);
> > >>>>
> > >>>> This conflicts with recent rework of format handling, this line should
> > >>>> now be
> > >>>>
> > >>>> +                       pixelFormat = formats::NV12;
> > >>>>
> > >>>> Same for the other formats below.
> > >>>
> > >>> Will update in the next patch.
> > >>>
> > >>>>>>>>                      /* Return the largest sensor resolution. */
> > >>>>>>>> -                    cfg.size = data->sensor_->resolution();
> > >>>>>>>> -                    cfg.bufferCount = 1;
> > >>>>>>>> +                    size = data->sensor_->resolution();
> > >>>>>>>> +                    bufferCount = 1;
> > >>>>>>>>                      break;
> > >>>>>>>>
> > >>>>>>>>              case StreamRole::VideoRecording:
> > >>>>>>>> -                    cfg.pixelFormat = PixelFormat(DRM_FORMAT_NV12);
> > >>>>>>>> -                    cfg.size = { 1920, 1080 };
> > >>>>>>>> -                    cfg.bufferCount = 4;
> > >>>>>>>> +                    fmts = data->isp_[Isp::Output0].dev()->formats();
> > >>>>>>>> +                    pixelFormat = PixelFormat(DRM_FORMAT_NV12);
> > >>>>>>>> +                    size = { 1920, 1080 };
> > >>>>>>>> +                    bufferCount = 4;
> > >>>>>>>>                      break;
> > >>>>>>>>
> > >>>>>>>>              case StreamRole::Viewfinder:
> > >>>>>>>> -                    cfg.pixelFormat = PixelFormat(DRM_FORMAT_ARGB8888);
> > >>>>>>>> -                    cfg.size = { 800, 600 };
> > >>>>>>>> -                    cfg.bufferCount = 4;
> > >>>>>>>> +                    fmts = data->isp_[Isp::Output0].dev()->formats();
> > >>>>>>>> +                    pixelFormat = PixelFormat(DRM_FORMAT_ARGB8888);
> > >>>>>>>> +                    size = { 800, 600 };
> > >>>>>>>> +                    bufferCount = 4;
> > >>>>>>>>                      break;
> > >>>>>>>>
> > >>>>>>>>              default:
> > >>>>>>>> @@ -561,6 +565,22 @@ CameraConfiguration *PipelineHandlerRPi::generateConfiguration(Camera *camera,
> > >>>>>>>>                      break;
> > >>>>>>>>              }
> > >>>>>>>>
> > >>>>>>>> +            /* Translate the V4L2PixelFormat to PixelFormat. */
> > >>>>>>>> +            std::map<PixelFormat, std::vector<SizeRange>> deviceFormats;
> > >>>>>>>> +            std::transform(fmts.begin(), fmts.end(), std::inserter(deviceFormats, deviceFormats.begin()),
>
> One additional comment, should this be deviceFormats.end(), to insert
> the new entries at the end of the map ? It will make a difference in

Shouldn't this be handled by using back_inserter() if that's what we
want ?
Naushir Patuck June 25, 2020, 7:47 a.m. UTC | #13
Hi all,

On Thu, 25 Jun 2020 at 08:35, Jacopo Mondi <jacopo@jmondi.org> wrote:
>
> Hi Laurent, Naush
>
> On Thu, Jun 25, 2020 at 05:37:46AM +0300, Laurent Pinchart wrote:
> > Hi Naush,
> >
> > On Thu, Jun 25, 2020 at 05:21:26AM +0300, Laurent Pinchart wrote:
> > > On Tue, Jun 23, 2020 at 08:59:49AM +0100, Naushir Patuck wrote:
> > > > On Tue, 23 Jun 2020 at 00:46, Laurent Pinchart wrote:
> > > >> On Mon, Jun 22, 2020 at 11:25:47AM +0100, Naushir Patuck wrote:
> > > >>> On Mon, 22 Jun 2020 at 05:59, Laurent Pinchart wrote:
> > > >>>> On Thu, Jun 18, 2020 at 10:18:38AM +0100, Naushir Patuck wrote:
> > > >>>>> On Thu, 18 Jun 2020 at 10:03, Kieran Bingham wrote:
> > > >>>>>> On 18/06/2020 09:41, Jacopo Mondi wrote:
> > > >>>>>>> On Wed, Jun 10, 2020 at 03:26:40PM +0100, Naushir Patuck wrote:
> > > >>>>>>>> In generateConfiguration(), add the device node specific formats to the
> > > >>>>>>>> StreamConfiguration for each StreamRole requested.
> > > >>>>>>>>
> > > >>>>>>>> Signed-off-by: Naushir Patuck <naush@raspberrypi.com>
> > > >>>>>>>> ---
> > > >>>>>>>>  .../pipeline/raspberrypi/raspberrypi.cpp      | 52 +++++++++++++------
> > > >>>>>>>>  1 file changed, 36 insertions(+), 16 deletions(-)
> > > >>>>>>>>
> > > >>>>>>>> diff --git a/src/libcamera/pipeline/raspberrypi/raspberrypi.cpp b/src/libcamera/pipeline/raspberrypi/raspberrypi.cpp
> > > >>>>>>>> index e16a9c7f..03a1e641 100644
> > > >>>>>>>> --- a/src/libcamera/pipeline/raspberrypi/raspberrypi.cpp
> > > >>>>>>>> +++ b/src/libcamera/pipeline/raspberrypi/raspberrypi.cpp
> > > >>>>>>>> @@ -518,41 +518,45 @@ CameraConfiguration *PipelineHandlerRPi::generateConfiguration(Camera *camera,
> > > >>>>>>>>      RPiCameraData *data = cameraData(camera);
> > > >>>>>>>>      CameraConfiguration *config = new RPiCameraConfiguration(data);
> > > >>>>>>>>      V4L2DeviceFormat sensorFormat;
> > > >>>>>>>> +    unsigned int bufferCount;
> > > >>>>>>>> +    PixelFormat pixelFormat;
> > > >>>>>>>>      V4L2PixFmtMap fmts;
> > > >>>>>>>> +    Size size;
> > > >>>>>>>>
> > > >>>>>>>>      if (roles.empty())
> > > >>>>>>>>              return config;
> > > >>>>>>>>
> > > >>>>>>>>      for (const StreamRole role : roles) {
> > > >>>>>>>> -            StreamConfiguration cfg{};
> > > >>>>>>>> -
> > > >>>>>>>>              switch (role) {
> > > >>>>>>>>              case StreamRole::StillCaptureRaw:
> > > >>>>>>>> -                    cfg.size = data->sensor_->resolution();
> > > >>>>>>>> +                    size = data->sensor_->resolution();
> > > >>>>>>>>                      fmts = data->unicam_[Unicam::Image].dev()->formats();
> > > >>>>>>>> -                    sensorFormat = findBestMode(fmts, cfg.size);
> > > >>>>>>>> -                    cfg.pixelFormat = sensorFormat.fourcc.toPixelFormat();
> > > >>>>>>>> -                    ASSERT(cfg.pixelFormat.isValid());
> > > >>>>>>>> -                    cfg.bufferCount = 1;
> > > >>>>>>>> +                    sensorFormat = findBestMode(fmts, size);
> > > >>>>>>>> +                    pixelFormat = sensorFormat.fourcc.toPixelFormat();
> > > >>>>>>>> +                    ASSERT(pixelFormat.isValid());
> > > >>>>>>>> +                    bufferCount = 1;
> > > >>>>>>>>                      break;
> > > >>>>>>>>
> > > >>>>>>>>              case StreamRole::StillCapture:
> > > >>>>>>>> -                    cfg.pixelFormat = PixelFormat(DRM_FORMAT_NV12);
> > > >>>>>>>> +                    fmts = data->isp_[Isp::Output0].dev()->formats();
> > > >>>>>>>> +                    pixelFormat = PixelFormat(DRM_FORMAT_NV12);
> > > >>>>
> > > >>>> This conflicts with recent rework of format handling, this line should
> > > >>>> now be
> > > >>>>
> > > >>>> +                       pixelFormat = formats::NV12;
> > > >>>>
> > > >>>> Same for the other formats below.
> > > >>>
> > > >>> Will update in the next patch.
> > > >>>
> > > >>>>>>>>                      /* Return the largest sensor resolution. */
> > > >>>>>>>> -                    cfg.size = data->sensor_->resolution();
> > > >>>>>>>> -                    cfg.bufferCount = 1;
> > > >>>>>>>> +                    size = data->sensor_->resolution();
> > > >>>>>>>> +                    bufferCount = 1;
> > > >>>>>>>>                      break;
> > > >>>>>>>>
> > > >>>>>>>>              case StreamRole::VideoRecording:
> > > >>>>>>>> -                    cfg.pixelFormat = PixelFormat(DRM_FORMAT_NV12);
> > > >>>>>>>> -                    cfg.size = { 1920, 1080 };
> > > >>>>>>>> -                    cfg.bufferCount = 4;
> > > >>>>>>>> +                    fmts = data->isp_[Isp::Output0].dev()->formats();
> > > >>>>>>>> +                    pixelFormat = PixelFormat(DRM_FORMAT_NV12);
> > > >>>>>>>> +                    size = { 1920, 1080 };
> > > >>>>>>>> +                    bufferCount = 4;
> > > >>>>>>>>                      break;
> > > >>>>>>>>
> > > >>>>>>>>              case StreamRole::Viewfinder:
> > > >>>>>>>> -                    cfg.pixelFormat = PixelFormat(DRM_FORMAT_ARGB8888);
> > > >>>>>>>> -                    cfg.size = { 800, 600 };
> > > >>>>>>>> -                    cfg.bufferCount = 4;
> > > >>>>>>>> +                    fmts = data->isp_[Isp::Output0].dev()->formats();
> > > >>>>>>>> +                    pixelFormat = PixelFormat(DRM_FORMAT_ARGB8888);
> > > >>>>>>>> +                    size = { 800, 600 };
> > > >>>>>>>> +                    bufferCount = 4;
> > > >>>>>>>>                      break;
> > > >>>>>>>>
> > > >>>>>>>>              default:
> > > >>>>>>>> @@ -561,6 +565,22 @@ CameraConfiguration *PipelineHandlerRPi::generateConfiguration(Camera *camera,
> > > >>>>>>>>                      break;
> > > >>>>>>>>              }
> > > >>>>>>>>
> > > >>>>>>>> +            /* Translate the V4L2PixelFormat to PixelFormat. */
> > > >>>>>>>> +            std::map<PixelFormat, std::vector<SizeRange>> deviceFormats;
> > > >>>>>>>> +            std::transform(fmts.begin(), fmts.end(), std::inserter(deviceFormats, deviceFormats.begin()),
> >
> > One additional comment, should this be deviceFormats.end(), to insert
> > the new entries at the end of the map ? It will make a difference in
>
> Shouldn't this be handled by using back_inserter() if that's what we
> want ?

From my understanding, yes, this is probably cleaner.  However, they
should be both equivalent?

Regards,
Naush
Jacopo Mondi June 25, 2020, 8:03 a.m. UTC | #14
Hi Nasuh,

On Thu, Jun 25, 2020 at 08:47:36AM +0100, Naushir Patuck wrote:
> Hi all,
>
> On Thu, 25 Jun 2020 at 08:35, Jacopo Mondi <jacopo@jmondi.org> wrote:
> >
> > Hi Laurent, Naush
> >
> > On Thu, Jun 25, 2020 at 05:37:46AM +0300, Laurent Pinchart wrote:
> > > Hi Naush,
> > >
> > > On Thu, Jun 25, 2020 at 05:21:26AM +0300, Laurent Pinchart wrote:
> > > > On Tue, Jun 23, 2020 at 08:59:49AM +0100, Naushir Patuck wrote:
> > > > > On Tue, 23 Jun 2020 at 00:46, Laurent Pinchart wrote:
> > > > >> On Mon, Jun 22, 2020 at 11:25:47AM +0100, Naushir Patuck wrote:
> > > > >>> On Mon, 22 Jun 2020 at 05:59, Laurent Pinchart wrote:
> > > > >>>> On Thu, Jun 18, 2020 at 10:18:38AM +0100, Naushir Patuck wrote:
> > > > >>>>> On Thu, 18 Jun 2020 at 10:03, Kieran Bingham wrote:
> > > > >>>>>> On 18/06/2020 09:41, Jacopo Mondi wrote:
> > > > >>>>>>> On Wed, Jun 10, 2020 at 03:26:40PM +0100, Naushir Patuck wrote:
> > > > >>>>>>>> In generateConfiguration(), add the device node specific formats to the
> > > > >>>>>>>> StreamConfiguration for each StreamRole requested.
> > > > >>>>>>>>
> > > > >>>>>>>> Signed-off-by: Naushir Patuck <naush@raspberrypi.com>
> > > > >>>>>>>> ---
> > > > >>>>>>>>  .../pipeline/raspberrypi/raspberrypi.cpp      | 52 +++++++++++++------
> > > > >>>>>>>>  1 file changed, 36 insertions(+), 16 deletions(-)
> > > > >>>>>>>>
> > > > >>>>>>>> diff --git a/src/libcamera/pipeline/raspberrypi/raspberrypi.cpp b/src/libcamera/pipeline/raspberrypi/raspberrypi.cpp
> > > > >>>>>>>> index e16a9c7f..03a1e641 100644
> > > > >>>>>>>> --- a/src/libcamera/pipeline/raspberrypi/raspberrypi.cpp
> > > > >>>>>>>> +++ b/src/libcamera/pipeline/raspberrypi/raspberrypi.cpp
> > > > >>>>>>>> @@ -518,41 +518,45 @@ CameraConfiguration *PipelineHandlerRPi::generateConfiguration(Camera *camera,
> > > > >>>>>>>>      RPiCameraData *data = cameraData(camera);
> > > > >>>>>>>>      CameraConfiguration *config = new RPiCameraConfiguration(data);
> > > > >>>>>>>>      V4L2DeviceFormat sensorFormat;
> > > > >>>>>>>> +    unsigned int bufferCount;
> > > > >>>>>>>> +    PixelFormat pixelFormat;
> > > > >>>>>>>>      V4L2PixFmtMap fmts;
> > > > >>>>>>>> +    Size size;
> > > > >>>>>>>>
> > > > >>>>>>>>      if (roles.empty())
> > > > >>>>>>>>              return config;
> > > > >>>>>>>>
> > > > >>>>>>>>      for (const StreamRole role : roles) {
> > > > >>>>>>>> -            StreamConfiguration cfg{};
> > > > >>>>>>>> -
> > > > >>>>>>>>              switch (role) {
> > > > >>>>>>>>              case StreamRole::StillCaptureRaw:
> > > > >>>>>>>> -                    cfg.size = data->sensor_->resolution();
> > > > >>>>>>>> +                    size = data->sensor_->resolution();
> > > > >>>>>>>>                      fmts = data->unicam_[Unicam::Image].dev()->formats();
> > > > >>>>>>>> -                    sensorFormat = findBestMode(fmts, cfg.size);
> > > > >>>>>>>> -                    cfg.pixelFormat = sensorFormat.fourcc.toPixelFormat();
> > > > >>>>>>>> -                    ASSERT(cfg.pixelFormat.isValid());
> > > > >>>>>>>> -                    cfg.bufferCount = 1;
> > > > >>>>>>>> +                    sensorFormat = findBestMode(fmts, size);
> > > > >>>>>>>> +                    pixelFormat = sensorFormat.fourcc.toPixelFormat();
> > > > >>>>>>>> +                    ASSERT(pixelFormat.isValid());
> > > > >>>>>>>> +                    bufferCount = 1;
> > > > >>>>>>>>                      break;
> > > > >>>>>>>>
> > > > >>>>>>>>              case StreamRole::StillCapture:
> > > > >>>>>>>> -                    cfg.pixelFormat = PixelFormat(DRM_FORMAT_NV12);
> > > > >>>>>>>> +                    fmts = data->isp_[Isp::Output0].dev()->formats();
> > > > >>>>>>>> +                    pixelFormat = PixelFormat(DRM_FORMAT_NV12);
> > > > >>>>
> > > > >>>> This conflicts with recent rework of format handling, this line should
> > > > >>>> now be
> > > > >>>>
> > > > >>>> +                       pixelFormat = formats::NV12;
> > > > >>>>
> > > > >>>> Same for the other formats below.
> > > > >>>
> > > > >>> Will update in the next patch.
> > > > >>>
> > > > >>>>>>>>                      /* Return the largest sensor resolution. */
> > > > >>>>>>>> -                    cfg.size = data->sensor_->resolution();
> > > > >>>>>>>> -                    cfg.bufferCount = 1;
> > > > >>>>>>>> +                    size = data->sensor_->resolution();
> > > > >>>>>>>> +                    bufferCount = 1;
> > > > >>>>>>>>                      break;
> > > > >>>>>>>>
> > > > >>>>>>>>              case StreamRole::VideoRecording:
> > > > >>>>>>>> -                    cfg.pixelFormat = PixelFormat(DRM_FORMAT_NV12);
> > > > >>>>>>>> -                    cfg.size = { 1920, 1080 };
> > > > >>>>>>>> -                    cfg.bufferCount = 4;
> > > > >>>>>>>> +                    fmts = data->isp_[Isp::Output0].dev()->formats();
> > > > >>>>>>>> +                    pixelFormat = PixelFormat(DRM_FORMAT_NV12);
> > > > >>>>>>>> +                    size = { 1920, 1080 };
> > > > >>>>>>>> +                    bufferCount = 4;
> > > > >>>>>>>>                      break;
> > > > >>>>>>>>
> > > > >>>>>>>>              case StreamRole::Viewfinder:
> > > > >>>>>>>> -                    cfg.pixelFormat = PixelFormat(DRM_FORMAT_ARGB8888);
> > > > >>>>>>>> -                    cfg.size = { 800, 600 };
> > > > >>>>>>>> -                    cfg.bufferCount = 4;
> > > > >>>>>>>> +                    fmts = data->isp_[Isp::Output0].dev()->formats();
> > > > >>>>>>>> +                    pixelFormat = PixelFormat(DRM_FORMAT_ARGB8888);
> > > > >>>>>>>> +                    size = { 800, 600 };
> > > > >>>>>>>> +                    bufferCount = 4;
> > > > >>>>>>>>                      break;
> > > > >>>>>>>>
> > > > >>>>>>>>              default:
> > > > >>>>>>>> @@ -561,6 +565,22 @@ CameraConfiguration *PipelineHandlerRPi::generateConfiguration(Camera *camera,
> > > > >>>>>>>>                      break;
> > > > >>>>>>>>              }
> > > > >>>>>>>>
> > > > >>>>>>>> +            /* Translate the V4L2PixelFormat to PixelFormat. */
> > > > >>>>>>>> +            std::map<PixelFormat, std::vector<SizeRange>> deviceFormats;
> > > > >>>>>>>> +            std::transform(fmts.begin(), fmts.end(), std::inserter(deviceFormats, deviceFormats.begin()),
> > >
> > > One additional comment, should this be deviceFormats.end(), to insert
> > > the new entries at the end of the map ? It will make a difference in
> >
> > Shouldn't this be handled by using back_inserter() if that's what we
> > want ?
>
> From my understanding, yes, this is probably cleaner.  However, they
> should be both equivalent?

Ah wait, possibly not, I overlooked the type of deviceFormats.
back_inserter() creates an std::back_insert_iterator which calls the
container's push_back() operation which we don't have for maps. I
think std::inserter() with deviceFormats.end() is the way to go.

Sorry for the noise.

>
> Regards,
> Naush

Patch

diff --git a/src/libcamera/pipeline/raspberrypi/raspberrypi.cpp b/src/libcamera/pipeline/raspberrypi/raspberrypi.cpp
index e16a9c7f..03a1e641 100644
--- a/src/libcamera/pipeline/raspberrypi/raspberrypi.cpp
+++ b/src/libcamera/pipeline/raspberrypi/raspberrypi.cpp
@@ -518,41 +518,45 @@  CameraConfiguration *PipelineHandlerRPi::generateConfiguration(Camera *camera,
 	RPiCameraData *data = cameraData(camera);
 	CameraConfiguration *config = new RPiCameraConfiguration(data);
 	V4L2DeviceFormat sensorFormat;
+	unsigned int bufferCount;
+	PixelFormat pixelFormat;
 	V4L2PixFmtMap fmts;
+	Size size;
 
 	if (roles.empty())
 		return config;
 
 	for (const StreamRole role : roles) {
-		StreamConfiguration cfg{};
-
 		switch (role) {
 		case StreamRole::StillCaptureRaw:
-			cfg.size = data->sensor_->resolution();
+			size = data->sensor_->resolution();
 			fmts = data->unicam_[Unicam::Image].dev()->formats();
-			sensorFormat = findBestMode(fmts, cfg.size);
-			cfg.pixelFormat = sensorFormat.fourcc.toPixelFormat();
-			ASSERT(cfg.pixelFormat.isValid());
-			cfg.bufferCount = 1;
+			sensorFormat = findBestMode(fmts, size);
+			pixelFormat = sensorFormat.fourcc.toPixelFormat();
+			ASSERT(pixelFormat.isValid());
+			bufferCount = 1;
 			break;
 
 		case StreamRole::StillCapture:
-			cfg.pixelFormat = PixelFormat(DRM_FORMAT_NV12);
+			fmts = data->isp_[Isp::Output0].dev()->formats();
+			pixelFormat = PixelFormat(DRM_FORMAT_NV12);
 			/* Return the largest sensor resolution. */
-			cfg.size = data->sensor_->resolution();
-			cfg.bufferCount = 1;
+			size = data->sensor_->resolution();
+			bufferCount = 1;
 			break;
 
 		case StreamRole::VideoRecording:
-			cfg.pixelFormat = PixelFormat(DRM_FORMAT_NV12);
-			cfg.size = { 1920, 1080 };
-			cfg.bufferCount = 4;
+			fmts = data->isp_[Isp::Output0].dev()->formats();
+			pixelFormat = PixelFormat(DRM_FORMAT_NV12);
+			size = { 1920, 1080 };
+			bufferCount = 4;
 			break;
 
 		case StreamRole::Viewfinder:
-			cfg.pixelFormat = PixelFormat(DRM_FORMAT_ARGB8888);
-			cfg.size = { 800, 600 };
-			cfg.bufferCount = 4;
+			fmts = data->isp_[Isp::Output0].dev()->formats();
+			pixelFormat = PixelFormat(DRM_FORMAT_ARGB8888);
+			size = { 800, 600 };
+			bufferCount = 4;
 			break;
 
 		default:
@@ -561,6 +565,22 @@  CameraConfiguration *PipelineHandlerRPi::generateConfiguration(Camera *camera,
 			break;
 		}
 
+		/* Translate the V4L2PixelFormat to PixelFormat. */
+		std::map<PixelFormat, std::vector<SizeRange>> deviceFormats;
+		std::transform(fmts.begin(), fmts.end(), std::inserter(deviceFormats, deviceFormats.begin()),
+			       [&](const decltype(fmts)::value_type &format) {
+					return decltype(deviceFormats)::value_type{
+						format.first.toPixelFormat(),
+						format.second
+					};
+			       });
+
+		/* Add the stream format based on the device node used for the use case. */
+		StreamFormats formats(deviceFormats);
+		StreamConfiguration cfg(formats);
+		cfg.size = size;
+		cfg.pixelFormat = pixelFormat;
+		cfg.bufferCount = bufferCount;
 		config->addConfiguration(cfg);
 	}