[libcamera-devel,v2,2/6] pipeline: raspberrypi: Convert the pipeline handler to use media controller
diff mbox series

Message ID 20211022143907.3089419-3-naush@raspberrypi.com
State Superseded
Headers show
Series
  • Raspberry Pi: Conversion to media controller
Related show

Commit Message

Naushir Patuck Oct. 22, 2021, 2:39 p.m. UTC
Switch the pipeline handler to use the new Unicam media controller based driver.
With this change, we directly talk to the sensor device driver to set controls
and set/get formats in the pipeline handler.

This change requires the accompanying Raspberry Pi linux kernel change at
https://github.com/raspberrypi/linux/pull/4645. If this kernel change is not
present, the pipeline handler will fail to run with an error message informing
the user to update the kernel build.

Signed-off-by: Naushir Patuck <naush@raspberrypi.com>
---
 .../pipeline/raspberrypi/raspberrypi.cpp      | 146 +++++++++++-------
 1 file changed, 90 insertions(+), 56 deletions(-)

Comments

David Plowman Oct. 22, 2021, 3:16 p.m. UTC | #1
Hi Naush

Thanks for the new version. I think a lot of stuff is tidier now that
we're clear that we're selecting a subdev format.

On Fri, 22 Oct 2021 at 15:40, Naushir Patuck <naush@raspberrypi.com> wrote:
>
> Switch the pipeline handler to use the new Unicam media controller based driver.
> With this change, we directly talk to the sensor device driver to set controls
> and set/get formats in the pipeline handler.
>
> This change requires the accompanying Raspberry Pi linux kernel change at
> https://github.com/raspberrypi/linux/pull/4645. If this kernel change is not
> present, the pipeline handler will fail to run with an error message informing
> the user to update the kernel build.
>
> Signed-off-by: Naushir Patuck <naush@raspberrypi.com>
> ---
>  .../pipeline/raspberrypi/raspberrypi.cpp      | 146 +++++++++++-------
>  1 file changed, 90 insertions(+), 56 deletions(-)
>
> diff --git a/src/libcamera/pipeline/raspberrypi/raspberrypi.cpp b/src/libcamera/pipeline/raspberrypi/raspberrypi.cpp
> index 1634ca98f481..a31b0f81eba7 100644
> --- a/src/libcamera/pipeline/raspberrypi/raspberrypi.cpp
> +++ b/src/libcamera/pipeline/raspberrypi/raspberrypi.cpp
> @@ -48,6 +48,29 @@ LOG_DEFINE_CATEGORY(RPI)
>
>  namespace {
>
> +/* Map of mbus codes to supported sizes reported by the sensor. */
> +using SensorFormats = std::map<unsigned int, std::vector<Size>>;
> +
> +SensorFormats populateSensorFormats(std::unique_ptr<CameraSensor> &sensor)
> +{
> +       SensorFormats formats;
> +
> +       for (auto const mbusCode : sensor->mbusCodes())
> +               formats.emplace(mbusCode, sensor->sizes(mbusCode));
> +
> +       return formats;
> +}
> +
> +inline V4L2DeviceFormat toV4L2DeviceFormat(V4L2SubdeviceFormat &mode)
> +{
> +       V4L2DeviceFormat deviceFormat;
> +       BayerFormat bayer = BayerFormat::fromMbusCode(mode.mbus_code);
> +
> +       deviceFormat.fourcc = bayer.toV4L2PixelFormat();
> +       deviceFormat.size = mode.size;
> +       return deviceFormat;
> +}
> +

I guess I wonder slightly whether this should be generally available,
but it could easily be a change for another time. (Actually, same
question for isRaw just below...)

>  bool isRaw(PixelFormat &pixFmt)
>  {
>         /*
> @@ -74,11 +97,10 @@ double scoreFormat(double desired, double actual)
>         return score;
>  }
>
> -V4L2DeviceFormat findBestMode(V4L2VideoDevice::Formats &formatsMap,
> -                             const Size &req)
> +V4L2SubdeviceFormat findBestMode(const SensorFormats &formatsMap, const Size &req)
>  {
>         double bestScore = std::numeric_limits<double>::max(), score;
> -       V4L2DeviceFormat bestMode;
> +       V4L2SubdeviceFormat bestMode;
>
>  #define PENALTY_AR             1500.0
>  #define PENALTY_8BIT           2000.0
> @@ -88,18 +110,17 @@ V4L2DeviceFormat findBestMode(V4L2VideoDevice::Formats &formatsMap,
>
>         /* Calculate the closest/best mode from the user requested size. */
>         for (const auto &iter : formatsMap) {
> -               V4L2PixelFormat v4l2Format = iter.first;
> -               const PixelFormatInfo &info = PixelFormatInfo::info(v4l2Format);
> +               const unsigned int mbus_code = iter.first;
> +               const PixelFormat format = BayerFormat::fromMbusCode(mbus_code).toPixelFormat();
> +               const PixelFormatInfo &info = PixelFormatInfo::info(format);
>
> -               for (const SizeRange &sz : iter.second) {
> -                       double modeWidth = sz.contains(req) ? req.width : sz.max.width;
> -                       double modeHeight = sz.contains(req) ? req.height : sz.max.height;
> +               for (const Size &sz : iter.second) {
>                         double reqAr = static_cast<double>(req.width) / req.height;
> -                       double modeAr = modeWidth / modeHeight;
> +                       double modeAr = sz.width / sz.height;
>
>                         /* Score the dimensions for closeness. */
> -                       score = scoreFormat(req.width, modeWidth);
> -                       score += scoreFormat(req.height, modeHeight);
> +                       score = scoreFormat(req.width, sz.width);
> +                       score += scoreFormat(req.height, sz.height);
>                         score += PENALTY_AR * scoreFormat(reqAr, modeAr);
>
>                         /* Add any penalties... this is not an exact science! */
> @@ -115,12 +136,12 @@ V4L2DeviceFormat findBestMode(V4L2VideoDevice::Formats &formatsMap,
>
>                         if (score <= bestScore) {
>                                 bestScore = score;
> -                               bestMode.fourcc = v4l2Format;
> -                               bestMode.size = Size(modeWidth, modeHeight);
> +                               bestMode.mbus_code = mbus_code;
> +                               bestMode.size = sz;
>                         }
>
> -                       LOG(RPI, Info) << "Mode: " << modeWidth << "x" << modeHeight
> -                                      << " fmt " << v4l2Format.toString()
> +                       LOG(RPI, Info) << "Mode: " << sz.width << "x" << sz.height
> +                                      << " fmt " << format.toString()
>                                        << " Score: " << score
>                                        << " (best " << bestScore << ")";
>                 }
> @@ -170,6 +191,7 @@ public:
>         std::unique_ptr<ipa::RPi::IPAProxyRPi> ipa_;
>
>         std::unique_ptr<CameraSensor> sensor_;
> +       SensorFormats sensorFormats_;
>         /* Array of Unicam and ISP device streams and associated buffers/streams. */
>         RPi::Device<Unicam, 2> unicam_;
>         RPi::Device<Isp, 4> isp_;
> @@ -352,9 +374,9 @@ CameraConfiguration::Status RPiCameraConfiguration::validate()
>                          * Calculate the best sensor mode we can use based on
>                          * the user request.
>                          */
> -                       V4L2VideoDevice::Formats fmts = data_->unicam_[Unicam::Image].dev()->formats();
> -                       V4L2DeviceFormat sensorFormat = findBestMode(fmts, cfg.size);
> -                       int ret = data_->unicam_[Unicam::Image].dev()->tryFormat(&sensorFormat);
> +                       V4L2SubdeviceFormat sensorFormat = findBestMode(data_->sensorFormats_, cfg.size);
> +                       V4L2DeviceFormat unicamFormat = toV4L2DeviceFormat(sensorFormat);
> +                       int ret = data_->unicam_[Unicam::Image].dev()->tryFormat(&unicamFormat);
>                         if (ret)
>                                 return Invalid;
>
> @@ -366,7 +388,7 @@ CameraConfiguration::Status RPiCameraConfiguration::validate()
>                          * fetch the "native" (i.e. untransformed) Bayer order,
>                          * because the sensor may currently be flipped!
>                          */
> -                       V4L2PixelFormat fourcc = sensorFormat.fourcc;
> +                       V4L2PixelFormat fourcc = unicamFormat.fourcc;
>                         if (data_->flipsAlterBayerOrder_) {
>                                 BayerFormat bayer = BayerFormat::fromV4L2PixelFormat(fourcc);
>                                 bayer.order = data_->nativeBayerOrder_;
> @@ -375,15 +397,15 @@ CameraConfiguration::Status RPiCameraConfiguration::validate()
>                         }
>
>                         PixelFormat sensorPixFormat = fourcc.toPixelFormat();
> -                       if (cfg.size != sensorFormat.size ||
> +                       if (cfg.size != unicamFormat.size ||
>                             cfg.pixelFormat != sensorPixFormat) {
> -                               cfg.size = sensorFormat.size;
> +                               cfg.size = unicamFormat.size;
>                                 cfg.pixelFormat = sensorPixFormat;
>                                 status = Adjusted;
>                         }
>
> -                       cfg.stride = sensorFormat.planes[0].bpl;
> -                       cfg.frameSize = sensorFormat.planes[0].size;
> +                       cfg.stride = unicamFormat.planes[0].bpl;
> +                       cfg.frameSize = unicamFormat.planes[0].size;
>
>                         rawCount++;
>                 } else {
> @@ -472,7 +494,8 @@ CameraConfiguration *PipelineHandlerRPi::generateConfiguration(Camera *camera,
>  {
>         RPiCameraData *data = cameraData(camera);
>         CameraConfiguration *config = new RPiCameraConfiguration(data);
> -       V4L2DeviceFormat sensorFormat;
> +       V4L2SubdeviceFormat sensorFormat;
> +       V4L2DeviceFormat unicamFormat;
>         unsigned int bufferCount;
>         PixelFormat pixelFormat;
>         V4L2VideoDevice::Formats fmts;
> @@ -487,9 +510,9 @@ CameraConfiguration *PipelineHandlerRPi::generateConfiguration(Camera *camera,
>                 switch (role) {
>                 case StreamRole::Raw:
>                         size = data->sensor_->resolution();
> -                       fmts = data->unicam_[Unicam::Image].dev()->formats();
> -                       sensorFormat = findBestMode(fmts, size);
> -                       pixelFormat = sensorFormat.fourcc.toPixelFormat();
> +                       sensorFormat = findBestMode(data->sensorFormats_, size);
> +                       unicamFormat = toV4L2DeviceFormat(sensorFormat);
> +                       pixelFormat = BayerFormat::fromMbusCode(sensorFormat.mbus_code).toPixelFormat();
>                         ASSERT(pixelFormat.isValid());
>                         bufferCount = 2;
>                         rawCount++;
> @@ -599,32 +622,30 @@ int PipelineHandlerRPi::configure(Camera *camera, CameraConfiguration *config)
>         }
>
>         /* First calculate the best sensor mode we can use based on the user request. */
> -       V4L2VideoDevice::Formats fmts = data->unicam_[Unicam::Image].dev()->formats();
> -       V4L2DeviceFormat sensorFormat = findBestMode(fmts, rawStream ? sensorSize : maxSize);
> +       V4L2SubdeviceFormat sensorFormat = findBestMode(data->sensorFormats_, rawStream ? sensorSize : maxSize);
> +       V4L2DeviceFormat unicamFormat = toV4L2DeviceFormat(sensorFormat);
> +
> +       ret = data->sensor_->setFormat(&sensorFormat);
> +       if (ret)
> +               return ret;
>
>         /*
>          * Unicam image output format. The ISP input format gets set at start,
>          * just in case we have swapped bayer orders due to flips.
>          */
> -       ret = data->unicam_[Unicam::Image].dev()->setFormat(&sensorFormat);
> +       ret = data->unicam_[Unicam::Image].dev()->setFormat(&unicamFormat);
>         if (ret)
>                 return ret;
>
> -       /*
> -        * The control ranges associated with the sensor may need updating
> -        * after a format change.
> -        * \todo Use the CameraSensor::setFormat API instead.
> -        */
> -       data->sensor_->updateControlInfo();
> -
>         LOG(RPI, Info) << "Sensor: " << camera->id()
> -                      << " - Selected mode: " << sensorFormat.toString();
> +                      << " - Selected sensor mode: " << sensorFormat.toString()
> +                      << " - Selected unicam mode: " << unicamFormat.toString();
>
>         /*
>          * This format may be reset on start() if the bayer order has changed
>          * because of flips in the sensor.
>          */
> -       ret = data->isp_[Isp::Input].dev()->setFormat(&sensorFormat);
> +       ret = data->isp_[Isp::Input].dev()->setFormat(&unicamFormat);
>         if (ret)
>                 return ret;
>
> @@ -746,8 +767,8 @@ int PipelineHandlerRPi::configure(Camera *camera, CameraConfiguration *config)
>         data->ispMinCropSize_ = testCrop.size();
>
>         /* Adjust aspect ratio by providing crops on the input image. */
> -       Size size = sensorFormat.size.boundedToAspectRatio(maxSize);
> -       Rectangle crop = size.centeredTo(Rectangle(sensorFormat.size).center());
> +       Size size = unicamFormat.size.boundedToAspectRatio(maxSize);
> +       Rectangle crop = size.centeredTo(Rectangle(unicamFormat.size).center());
>         data->ispCrop_ = crop;
>
>         data->isp_[Isp::Input].dev()->setSelection(V4L2_SEL_TGT_CROP, &crop);
> @@ -761,8 +782,11 @@ int PipelineHandlerRPi::configure(Camera *camera, CameraConfiguration *config)
>          * supports it.
>          */
>         if (data->sensorMetadata_) {
> -               format = {};
> +               V4L2SubdeviceFormat embeddedFormat;
> +
> +               data->sensor_->device()->getFormat(1, &embeddedFormat);
>                 format.fourcc = V4L2PixelFormat(V4L2_META_FMT_SENSOR_DATA);
> +               format.planes[0].size = embeddedFormat.size.width * embeddedFormat.size.height;
>
>                 LOG(RPI, Debug) << "Setting embedded data format.";
>                 ret = data->unicam_[Unicam::Embedded].dev()->setFormat(&format);
> @@ -847,9 +871,14 @@ int PipelineHandlerRPi::start(Camera *camera, const ControlList *controls)
>          * IPA configure may have changed the sensor flips - hence the bayer
>          * order. Get the sensor format and set the ISP input now.
>          */
> -       V4L2DeviceFormat sensorFormat;
> -       data->unicam_[Unicam::Image].dev()->getFormat(&sensorFormat);
> -       ret = data->isp_[Isp::Input].dev()->setFormat(&sensorFormat);
> +       V4L2SubdeviceFormat sensorFormat;
> +       data->sensor_->device()->getFormat(0, &sensorFormat);
> +
> +       V4L2DeviceFormat ispFormat;
> +       ispFormat.fourcc = BayerFormat::fromMbusCode(sensorFormat.mbus_code).toV4L2PixelFormat();

Generally there's the occasional assumption that fromMbusCode won't
return invalid Bayer formats, and I guess we're comfortable with that?
It certainly shouldn't be possible.

But it seems good to me, so:

Reviewed-by: David Plowman <david.plowman@raspberrypi.com>

Thanks!
David

> +       ispFormat.size = sensorFormat.size;
> +
> +       ret = data->isp_[Isp::Input].dev()->setFormat(&ispFormat);
>         if (ret) {
>                 stop(camera);
>                 return ret;
> @@ -1004,6 +1033,8 @@ bool PipelineHandlerRPi::match(DeviceEnumerator *enumerator)
>         if (data->sensor_->init())
>                 return false;
>
> +       data->sensorFormats_ = populateSensorFormats(data->sensor_);
> +
>         ipa::RPi::SensorConfig sensorConfig;
>         if (data->loadIPA(&sensorConfig)) {
>                 LOG(RPI, Error) << "Failed to load a suitable IPA library";
> @@ -1030,6 +1061,11 @@ bool PipelineHandlerRPi::match(DeviceEnumerator *enumerator)
>                         return false;
>         }
>
> +       if (!(data->unicam_[Unicam::Image].dev()->caps().device_caps() & V4L2_CAP_IO_MC)) {
> +               LOG(RPI, Error) << "Unicam driver did not advertise V4L2_CAP_IO_MC, please update your kernel!";
> +               return false;
> +       }
> +
>         /*
>          * Setup our delayed control writer with the sensor default
>          * gain and exposure delays. Mark VBLANK for priority write.
> @@ -1039,7 +1075,7 @@ bool PipelineHandlerRPi::match(DeviceEnumerator *enumerator)
>                 { V4L2_CID_EXPOSURE, { sensorConfig.exposureDelay, false } },
>                 { V4L2_CID_VBLANK, { sensorConfig.vblankDelay, true } }
>         };
> -       data->delayedCtrls_ = std::make_unique<DelayedControls>(data->unicam_[Unicam::Image].dev(), params);
> +       data->delayedCtrls_ = std::make_unique<DelayedControls>(data->sensor_->device(), params);
>         data->sensorMetadata_ = sensorConfig.sensorMetadata;
>
>         /* Register the controls that the Raspberry Pi IPA can handle. */
> @@ -1066,15 +1102,14 @@ bool PipelineHandlerRPi::match(DeviceEnumerator *enumerator)
>          * As part of answering the final question, we reset the camera to
>          * no transform at all.
>          */
> -
> -       V4L2VideoDevice *dev = data->unicam_[Unicam::Image].dev();
> -       const struct v4l2_query_ext_ctrl *hflipCtrl = dev->controlInfo(V4L2_CID_HFLIP);
> +       const V4L2Subdevice *sensor = data->sensor_->device();
> +       const struct v4l2_query_ext_ctrl *hflipCtrl = sensor->controlInfo(V4L2_CID_HFLIP);
>         if (hflipCtrl) {
>                 /* We assume it will support vflips too... */
>                 data->supportsFlips_ = true;
>                 data->flipsAlterBayerOrder_ = hflipCtrl->flags & V4L2_CTRL_FLAG_MODIFY_LAYOUT;
>
> -               ControlList ctrls(dev->controls());
> +               ControlList ctrls(data->sensor_->controls());
>                 ctrls.set(V4L2_CID_HFLIP, 0);
>                 ctrls.set(V4L2_CID_VFLIP, 0);
>                 data->setSensorControls(ctrls);
> @@ -1082,9 +1117,8 @@ bool PipelineHandlerRPi::match(DeviceEnumerator *enumerator)
>
>         /* Look for a valid Bayer format. */
>         BayerFormat bayerFormat;
> -       for (const auto &iter : dev->formats()) {
> -               V4L2PixelFormat v4l2Format = iter.first;
> -               bayerFormat = BayerFormat::fromV4L2PixelFormat(v4l2Format);
> +       for (const auto &iter : data->sensorFormats_) {
> +               bayerFormat = BayerFormat::fromMbusCode(iter.first);
>                 if (bayerFormat.isValid())
>                         break;
>         }
> @@ -1271,7 +1305,7 @@ int RPiCameraData::configureIPA(const CameraConfiguration *config)
>                 }
>         }
>
> -       entityControls.emplace(0, unicam_[Unicam::Image].dev()->controls());
> +       entityControls.emplace(0, sensor_->controls());
>         entityControls.emplace(1, isp_[Isp::Input].dev()->controls());
>
>         /* Always send the user transform to the IPA. */
> @@ -1406,10 +1440,10 @@ void RPiCameraData::setSensorControls(ControlList &controls)
>                 ControlList vblank_ctrl;
>
>                 vblank_ctrl.set(V4L2_CID_VBLANK, controls.get(V4L2_CID_VBLANK));
> -               unicam_[Unicam::Image].dev()->setControls(&vblank_ctrl);
> +               sensor_->setControls(&vblank_ctrl);
>         }
>
> -       unicam_[Unicam::Image].dev()->setControls(&controls);
> +       sensor_->setControls(&controls);
>  }
>
>  void RPiCameraData::unicamBufferDequeue(FrameBuffer *buffer)
> --
> 2.25.1
>
Naushir Patuck Oct. 22, 2021, 3:29 p.m. UTC | #2
Hi David,

Thank you for the feedback.

On Fri, 22 Oct 2021 at 16:16, David Plowman <david.plowman@raspberrypi.com>
wrote:

> Hi Naush
>
> Thanks for the new version. I think a lot of stuff is tidier now that
> we're clear that we're selecting a subdev format.
>
> On Fri, 22 Oct 2021 at 15:40, Naushir Patuck <naush@raspberrypi.com>
> wrote:
> >
> > Switch the pipeline handler to use the new Unicam media controller based
> driver.
> > With this change, we directly talk to the sensor device driver to set
> controls
> > and set/get formats in the pipeline handler.
> >
> > This change requires the accompanying Raspberry Pi linux kernel change at
> > https://github.com/raspberrypi/linux/pull/4645. If this kernel change
> is not
> > present, the pipeline handler will fail to run with an error message
> informing
> > the user to update the kernel build.
> >
> > Signed-off-by: Naushir Patuck <naush@raspberrypi.com>
> > ---
> >  .../pipeline/raspberrypi/raspberrypi.cpp      | 146 +++++++++++-------
> >  1 file changed, 90 insertions(+), 56 deletions(-)
> >
> > diff --git a/src/libcamera/pipeline/raspberrypi/raspberrypi.cpp
> b/src/libcamera/pipeline/raspberrypi/raspberrypi.cpp
> > index 1634ca98f481..a31b0f81eba7 100644
> > --- a/src/libcamera/pipeline/raspberrypi/raspberrypi.cpp
> > +++ b/src/libcamera/pipeline/raspberrypi/raspberrypi.cpp
> > @@ -48,6 +48,29 @@ LOG_DEFINE_CATEGORY(RPI)
> >
> >  namespace {
> >
> > +/* Map of mbus codes to supported sizes reported by the sensor. */
> > +using SensorFormats = std::map<unsigned int, std::vector<Size>>;
> > +
> > +SensorFormats populateSensorFormats(std::unique_ptr<CameraSensor>
> &sensor)
> > +{
> > +       SensorFormats formats;
> > +
> > +       for (auto const mbusCode : sensor->mbusCodes())
> > +               formats.emplace(mbusCode, sensor->sizes(mbusCode));
> > +
> > +       return formats;
> > +}
> > +
> > +inline V4L2DeviceFormat toV4L2DeviceFormat(V4L2SubdeviceFormat &mode)
> > +{
> > +       V4L2DeviceFormat deviceFormat;
> > +       BayerFormat bayer = BayerFormat::fromMbusCode(mode.mbus_code);
> > +
> > +       deviceFormat.fourcc = bayer.toV4L2PixelFormat();
> > +       deviceFormat.size = mode.size;
> > +       return deviceFormat;
> > +}
> > +
>
> I guess I wonder slightly whether this should be generally available,
> but it could easily be a change for another time. (Actually, same
> question for isRaw just below...)
>

Agree, this can be addressed separately.


> >  bool isRaw(PixelFormat &pixFmt)
> >  {
> >         /*
> > @@ -74,11 +97,10 @@ double scoreFormat(double desired, double actual)
> >         return score;
> >  }
> >
> > -V4L2DeviceFormat findBestMode(V4L2VideoDevice::Formats &formatsMap,
> > -                             const Size &req)
> > +V4L2SubdeviceFormat findBestMode(const SensorFormats &formatsMap, const
> Size &req)
> >  {
> >         double bestScore = std::numeric_limits<double>::max(), score;
> > -       V4L2DeviceFormat bestMode;
> > +       V4L2SubdeviceFormat bestMode;
> >
> >  #define PENALTY_AR             1500.0
> >  #define PENALTY_8BIT           2000.0
> > @@ -88,18 +110,17 @@ V4L2DeviceFormat
> findBestMode(V4L2VideoDevice::Formats &formatsMap,
> >
> >         /* Calculate the closest/best mode from the user requested size.
> */
> >         for (const auto &iter : formatsMap) {
> > -               V4L2PixelFormat v4l2Format = iter.first;
> > -               const PixelFormatInfo &info =
> PixelFormatInfo::info(v4l2Format);
> > +               const unsigned int mbus_code = iter.first;
> > +               const PixelFormat format =
> BayerFormat::fromMbusCode(mbus_code).toPixelFormat();
> > +               const PixelFormatInfo &info =
> PixelFormatInfo::info(format);
> >
> > -               for (const SizeRange &sz : iter.second) {
> > -                       double modeWidth = sz.contains(req) ? req.width
> : sz.max.width;
> > -                       double modeHeight = sz.contains(req) ?
> req.height : sz.max.height;
> > +               for (const Size &sz : iter.second) {
> >                         double reqAr = static_cast<double>(req.width) /
> req.height;
> > -                       double modeAr = modeWidth / modeHeight;
> > +                       double modeAr = sz.width / sz.height;
> >
> >                         /* Score the dimensions for closeness. */
> > -                       score = scoreFormat(req.width, modeWidth);
> > -                       score += scoreFormat(req.height, modeHeight);
> > +                       score = scoreFormat(req.width, sz.width);
> > +                       score += scoreFormat(req.height, sz.height);
> >                         score += PENALTY_AR * scoreFormat(reqAr, modeAr);
> >
> >                         /* Add any penalties... this is not an exact
> science! */
> > @@ -115,12 +136,12 @@ V4L2DeviceFormat
> findBestMode(V4L2VideoDevice::Formats &formatsMap,
> >
> >                         if (score <= bestScore) {
> >                                 bestScore = score;
> > -                               bestMode.fourcc = v4l2Format;
> > -                               bestMode.size = Size(modeWidth,
> modeHeight);
> > +                               bestMode.mbus_code = mbus_code;
> > +                               bestMode.size = sz;
> >                         }
> >
> > -                       LOG(RPI, Info) << "Mode: " << modeWidth << "x"
> << modeHeight
> > -                                      << " fmt " <<
> v4l2Format.toString()
> > +                       LOG(RPI, Info) << "Mode: " << sz.width << "x" <<
> sz.height
> > +                                      << " fmt " << format.toString()
> >                                        << " Score: " << score
> >                                        << " (best " << bestScore << ")";
> >                 }
> > @@ -170,6 +191,7 @@ public:
> >         std::unique_ptr<ipa::RPi::IPAProxyRPi> ipa_;
> >
> >         std::unique_ptr<CameraSensor> sensor_;
> > +       SensorFormats sensorFormats_;
> >         /* Array of Unicam and ISP device streams and associated
> buffers/streams. */
> >         RPi::Device<Unicam, 2> unicam_;
> >         RPi::Device<Isp, 4> isp_;
> > @@ -352,9 +374,9 @@ CameraConfiguration::Status
> RPiCameraConfiguration::validate()
> >                          * Calculate the best sensor mode we can use
> based on
> >                          * the user request.
> >                          */
> > -                       V4L2VideoDevice::Formats fmts =
> data_->unicam_[Unicam::Image].dev()->formats();
> > -                       V4L2DeviceFormat sensorFormat =
> findBestMode(fmts, cfg.size);
> > -                       int ret =
> data_->unicam_[Unicam::Image].dev()->tryFormat(&sensorFormat);
> > +                       V4L2SubdeviceFormat sensorFormat =
> findBestMode(data_->sensorFormats_, cfg.size);
> > +                       V4L2DeviceFormat unicamFormat =
> toV4L2DeviceFormat(sensorFormat);
> > +                       int ret =
> data_->unicam_[Unicam::Image].dev()->tryFormat(&unicamFormat);
> >                         if (ret)
> >                                 return Invalid;
> >
> > @@ -366,7 +388,7 @@ CameraConfiguration::Status
> RPiCameraConfiguration::validate()
> >                          * fetch the "native" (i.e. untransformed) Bayer
> order,
> >                          * because the sensor may currently be flipped!
> >                          */
> > -                       V4L2PixelFormat fourcc = sensorFormat.fourcc;
> > +                       V4L2PixelFormat fourcc = unicamFormat.fourcc;
> >                         if (data_->flipsAlterBayerOrder_) {
> >                                 BayerFormat bayer =
> BayerFormat::fromV4L2PixelFormat(fourcc);
> >                                 bayer.order = data_->nativeBayerOrder_;
> > @@ -375,15 +397,15 @@ CameraConfiguration::Status
> RPiCameraConfiguration::validate()
> >                         }
> >
> >                         PixelFormat sensorPixFormat =
> fourcc.toPixelFormat();
> > -                       if (cfg.size != sensorFormat.size ||
> > +                       if (cfg.size != unicamFormat.size ||
> >                             cfg.pixelFormat != sensorPixFormat) {
> > -                               cfg.size = sensorFormat.size;
> > +                               cfg.size = unicamFormat.size;
> >                                 cfg.pixelFormat = sensorPixFormat;
> >                                 status = Adjusted;
> >                         }
> >
> > -                       cfg.stride = sensorFormat.planes[0].bpl;
> > -                       cfg.frameSize = sensorFormat.planes[0].size;
> > +                       cfg.stride = unicamFormat.planes[0].bpl;
> > +                       cfg.frameSize = unicamFormat.planes[0].size;
> >
> >                         rawCount++;
> >                 } else {
> > @@ -472,7 +494,8 @@ CameraConfiguration
> *PipelineHandlerRPi::generateConfiguration(Camera *camera,
> >  {
> >         RPiCameraData *data = cameraData(camera);
> >         CameraConfiguration *config = new RPiCameraConfiguration(data);
> > -       V4L2DeviceFormat sensorFormat;
> > +       V4L2SubdeviceFormat sensorFormat;
> > +       V4L2DeviceFormat unicamFormat;
> >         unsigned int bufferCount;
> >         PixelFormat pixelFormat;
> >         V4L2VideoDevice::Formats fmts;
> > @@ -487,9 +510,9 @@ CameraConfiguration
> *PipelineHandlerRPi::generateConfiguration(Camera *camera,
> >                 switch (role) {
> >                 case StreamRole::Raw:
> >                         size = data->sensor_->resolution();
> > -                       fmts =
> data->unicam_[Unicam::Image].dev()->formats();
> > -                       sensorFormat = findBestMode(fmts, size);
> > -                       pixelFormat =
> sensorFormat.fourcc.toPixelFormat();
> > +                       sensorFormat =
> findBestMode(data->sensorFormats_, size);
> > +                       unicamFormat = toV4L2DeviceFormat(sensorFormat);
> > +                       pixelFormat =
> BayerFormat::fromMbusCode(sensorFormat.mbus_code).toPixelFormat();
> >                         ASSERT(pixelFormat.isValid());
> >                         bufferCount = 2;
> >                         rawCount++;
> > @@ -599,32 +622,30 @@ int PipelineHandlerRPi::configure(Camera *camera,
> CameraConfiguration *config)
> >         }
> >
> >         /* First calculate the best sensor mode we can use based on the
> user request. */
> > -       V4L2VideoDevice::Formats fmts =
> data->unicam_[Unicam::Image].dev()->formats();
> > -       V4L2DeviceFormat sensorFormat = findBestMode(fmts, rawStream ?
> sensorSize : maxSize);
> > +       V4L2SubdeviceFormat sensorFormat =
> findBestMode(data->sensorFormats_, rawStream ? sensorSize : maxSize);
> > +       V4L2DeviceFormat unicamFormat = toV4L2DeviceFormat(sensorFormat);
> > +
> > +       ret = data->sensor_->setFormat(&sensorFormat);
> > +       if (ret)
> > +               return ret;
> >
> >         /*
> >          * Unicam image output format. The ISP input format gets set at
> start,
> >          * just in case we have swapped bayer orders due to flips.
> >          */
> > -       ret =
> data->unicam_[Unicam::Image].dev()->setFormat(&sensorFormat);
> > +       ret =
> data->unicam_[Unicam::Image].dev()->setFormat(&unicamFormat);
> >         if (ret)
> >                 return ret;
> >
> > -       /*
> > -        * The control ranges associated with the sensor may need
> updating
> > -        * after a format change.
> > -        * \todo Use the CameraSensor::setFormat API instead.
> > -        */
> > -       data->sensor_->updateControlInfo();
> > -
> >         LOG(RPI, Info) << "Sensor: " << camera->id()
> > -                      << " - Selected mode: " <<
> sensorFormat.toString();
> > +                      << " - Selected sensor mode: " <<
> sensorFormat.toString()
> > +                      << " - Selected unicam mode: " <<
> unicamFormat.toString();
> >
> >         /*
> >          * This format may be reset on start() if the bayer order has
> changed
> >          * because of flips in the sensor.
> >          */
> > -       ret = data->isp_[Isp::Input].dev()->setFormat(&sensorFormat);
> > +       ret = data->isp_[Isp::Input].dev()->setFormat(&unicamFormat);
> >         if (ret)
> >                 return ret;
> >
> > @@ -746,8 +767,8 @@ int PipelineHandlerRPi::configure(Camera *camera,
> CameraConfiguration *config)
> >         data->ispMinCropSize_ = testCrop.size();
> >
> >         /* Adjust aspect ratio by providing crops on the input image. */
> > -       Size size = sensorFormat.size.boundedToAspectRatio(maxSize);
> > -       Rectangle crop =
> size.centeredTo(Rectangle(sensorFormat.size).center());
> > +       Size size = unicamFormat.size.boundedToAspectRatio(maxSize);
> > +       Rectangle crop =
> size.centeredTo(Rectangle(unicamFormat.size).center());
> >         data->ispCrop_ = crop;
> >
> >         data->isp_[Isp::Input].dev()->setSelection(V4L2_SEL_TGT_CROP,
> &crop);
> > @@ -761,8 +782,11 @@ int PipelineHandlerRPi::configure(Camera *camera,
> CameraConfiguration *config)
> >          * supports it.
> >          */
> >         if (data->sensorMetadata_) {
> > -               format = {};
> > +               V4L2SubdeviceFormat embeddedFormat;
> > +
> > +               data->sensor_->device()->getFormat(1, &embeddedFormat);
> >                 format.fourcc =
> V4L2PixelFormat(V4L2_META_FMT_SENSOR_DATA);
> > +               format.planes[0].size = embeddedFormat.size.width *
> embeddedFormat.size.height;
> >
> >                 LOG(RPI, Debug) << "Setting embedded data format.";
> >                 ret =
> data->unicam_[Unicam::Embedded].dev()->setFormat(&format);
> > @@ -847,9 +871,14 @@ int PipelineHandlerRPi::start(Camera *camera, const
> ControlList *controls)
> >          * IPA configure may have changed the sensor flips - hence the
> bayer
> >          * order. Get the sensor format and set the ISP input now.
> >          */
> > -       V4L2DeviceFormat sensorFormat;
> > -       data->unicam_[Unicam::Image].dev()->getFormat(&sensorFormat);
> > -       ret = data->isp_[Isp::Input].dev()->setFormat(&sensorFormat);
> > +       V4L2SubdeviceFormat sensorFormat;
> > +       data->sensor_->device()->getFormat(0, &sensorFormat);
> > +
> > +       V4L2DeviceFormat ispFormat;
> > +       ispFormat.fourcc =
> BayerFormat::fromMbusCode(sensorFormat.mbus_code).toV4L2PixelFormat();
>
> Generally there's the occasional assumption that fromMbusCode won't
> return invalid Bayer formats, and I guess we're comfortable with that?
> It certainly shouldn't be possible.
>

Yes, I ought to add an assert to ensure the bayer format is valid.

Regards,
Naush


>
> But it seems good to me, so:
>
> Reviewed-by: David Plowman <david.plowman@raspberrypi.com>
>
> Thanks!
> David
>
> > +       ispFormat.size = sensorFormat.size;
> > +
> > +       ret = data->isp_[Isp::Input].dev()->setFormat(&ispFormat);
> >         if (ret) {
> >                 stop(camera);
> >                 return ret;
> > @@ -1004,6 +1033,8 @@ bool PipelineHandlerRPi::match(DeviceEnumerator
> *enumerator)
> >         if (data->sensor_->init())
> >                 return false;
> >
> > +       data->sensorFormats_ = populateSensorFormats(data->sensor_);
> > +
> >         ipa::RPi::SensorConfig sensorConfig;
> >         if (data->loadIPA(&sensorConfig)) {
> >                 LOG(RPI, Error) << "Failed to load a suitable IPA
> library";
> > @@ -1030,6 +1061,11 @@ bool PipelineHandlerRPi::match(DeviceEnumerator
> *enumerator)
> >                         return false;
> >         }
> >
> > +       if (!(data->unicam_[Unicam::Image].dev()->caps().device_caps() &
> V4L2_CAP_IO_MC)) {
> > +               LOG(RPI, Error) << "Unicam driver did not advertise
> V4L2_CAP_IO_MC, please update your kernel!";
> > +               return false;
> > +       }
> > +
> >         /*
> >          * Setup our delayed control writer with the sensor default
> >          * gain and exposure delays. Mark VBLANK for priority write.
> > @@ -1039,7 +1075,7 @@ bool PipelineHandlerRPi::match(DeviceEnumerator
> *enumerator)
> >                 { V4L2_CID_EXPOSURE, { sensorConfig.exposureDelay, false
> } },
> >                 { V4L2_CID_VBLANK, { sensorConfig.vblankDelay, true } }
> >         };
> > -       data->delayedCtrls_ =
> std::make_unique<DelayedControls>(data->unicam_[Unicam::Image].dev(),
> params);
> > +       data->delayedCtrls_ =
> std::make_unique<DelayedControls>(data->sensor_->device(), params);
> >         data->sensorMetadata_ = sensorConfig.sensorMetadata;
> >
> >         /* Register the controls that the Raspberry Pi IPA can handle. */
> > @@ -1066,15 +1102,14 @@ bool PipelineHandlerRPi::match(DeviceEnumerator
> *enumerator)
> >          * As part of answering the final question, we reset the camera
> to
> >          * no transform at all.
> >          */
> > -
> > -       V4L2VideoDevice *dev = data->unicam_[Unicam::Image].dev();
> > -       const struct v4l2_query_ext_ctrl *hflipCtrl =
> dev->controlInfo(V4L2_CID_HFLIP);
> > +       const V4L2Subdevice *sensor = data->sensor_->device();
> > +       const struct v4l2_query_ext_ctrl *hflipCtrl =
> sensor->controlInfo(V4L2_CID_HFLIP);
> >         if (hflipCtrl) {
> >                 /* We assume it will support vflips too... */
> >                 data->supportsFlips_ = true;
> >                 data->flipsAlterBayerOrder_ = hflipCtrl->flags &
> V4L2_CTRL_FLAG_MODIFY_LAYOUT;
> >
> > -               ControlList ctrls(dev->controls());
> > +               ControlList ctrls(data->sensor_->controls());
> >                 ctrls.set(V4L2_CID_HFLIP, 0);
> >                 ctrls.set(V4L2_CID_VFLIP, 0);
> >                 data->setSensorControls(ctrls);
> > @@ -1082,9 +1117,8 @@ bool PipelineHandlerRPi::match(DeviceEnumerator
> *enumerator)
> >
> >         /* Look for a valid Bayer format. */
> >         BayerFormat bayerFormat;
> > -       for (const auto &iter : dev->formats()) {
> > -               V4L2PixelFormat v4l2Format = iter.first;
> > -               bayerFormat =
> BayerFormat::fromV4L2PixelFormat(v4l2Format);
> > +       for (const auto &iter : data->sensorFormats_) {
> > +               bayerFormat = BayerFormat::fromMbusCode(iter.first);
> >                 if (bayerFormat.isValid())
> >                         break;
> >         }
> > @@ -1271,7 +1305,7 @@ int RPiCameraData::configureIPA(const
> CameraConfiguration *config)
> >                 }
> >         }
> >
> > -       entityControls.emplace(0,
> unicam_[Unicam::Image].dev()->controls());
> > +       entityControls.emplace(0, sensor_->controls());
> >         entityControls.emplace(1, isp_[Isp::Input].dev()->controls());
> >
> >         /* Always send the user transform to the IPA. */
> > @@ -1406,10 +1440,10 @@ void
> RPiCameraData::setSensorControls(ControlList &controls)
> >                 ControlList vblank_ctrl;
> >
> >                 vblank_ctrl.set(V4L2_CID_VBLANK,
> controls.get(V4L2_CID_VBLANK));
> > -               unicam_[Unicam::Image].dev()->setControls(&vblank_ctrl);
> > +               sensor_->setControls(&vblank_ctrl);
> >         }
> >
> > -       unicam_[Unicam::Image].dev()->setControls(&controls);
> > +       sensor_->setControls(&controls);
> >  }
> >
> >  void RPiCameraData::unicamBufferDequeue(FrameBuffer *buffer)
> > --
> > 2.25.1
> >
>
Laurent Pinchart Oct. 25, 2021, 4:44 p.m. UTC | #3
Hi Naush,

Thank you for the patch.

On Fri, Oct 22, 2021 at 04:29:34PM +0100, Naushir Patuck wrote:
> On Fri, 22 Oct 2021 at 16:16, David Plowman wrote:
> 
> > Hi Naush
> >
> > Thanks for the new version. I think a lot of stuff is tidier now that
> > we're clear that we're selecting a subdev format.
> >
> > On Fri, 22 Oct 2021 at 15:40, Naushir Patuck wrote:
> > >
> > > Switch the pipeline handler to use the new Unicam media controller based driver.
> > > With this change, we directly talk to the sensor device driver to set controls
> > > and set/get formats in the pipeline handler.
> > >
> > > This change requires the accompanying Raspberry Pi linux kernel change at
> > > https://github.com/raspberrypi/linux/pull/4645. If this kernel change is not
> > > present, the pipeline handler will fail to run with an error message informing
> > > the user to update the kernel build.
> > >
> > > Signed-off-by: Naushir Patuck <naush@raspberrypi.com>
> > > ---
> > >  .../pipeline/raspberrypi/raspberrypi.cpp      | 146 +++++++++++-------
> > >  1 file changed, 90 insertions(+), 56 deletions(-)
> > >
> > > diff --git a/src/libcamera/pipeline/raspberrypi/raspberrypi.cpp b/src/libcamera/pipeline/raspberrypi/raspberrypi.cpp
> > > index 1634ca98f481..a31b0f81eba7 100644
> > > --- a/src/libcamera/pipeline/raspberrypi/raspberrypi.cpp
> > > +++ b/src/libcamera/pipeline/raspberrypi/raspberrypi.cpp
> > > @@ -48,6 +48,29 @@ LOG_DEFINE_CATEGORY(RPI)
> > >
> > >  namespace {
> > >
> > > +/* Map of mbus codes to supported sizes reported by the sensor. */
> > > +using SensorFormats = std::map<unsigned int, std::vector<Size>>;
> > > +
> > > +SensorFormats populateSensorFormats(std::unique_ptr<CameraSensor> &sensor)
> > > +{
> > > +       SensorFormats formats;
> > > +
> > > +       for (auto const mbusCode : sensor->mbusCodes())
> > > +               formats.emplace(mbusCode, sensor->sizes(mbusCode));
> > > +
> > > +       return formats;
> > > +}
> > > +
> > > +inline V4L2DeviceFormat toV4L2DeviceFormat(V4L2SubdeviceFormat &mode)

s/mode/format/

(you know how much I like the concept of sensor mode)

> > > +{
> > > +       V4L2DeviceFormat deviceFormat;
> > > +       BayerFormat bayer = BayerFormat::fromMbusCode(mode.mbus_code);
> > > +
> > > +       deviceFormat.fourcc = bayer.toV4L2PixelFormat();
> > > +       deviceFormat.size = mode.size;
> > > +       return deviceFormat;
> > > +}
> > > +
> >
> > I guess I wonder slightly whether this should be generally available,
> > but it could easily be a change for another time. (Actually, same
> > question for isRaw just below...)
> 
> Agree, this can be addressed separately.

I'm afraid it can't be this simple :-( There's no 1:1 mapping between
media bus codes and V4L2 pixel formats. How a given format received on a
bus by a DMA engine is stored in memory depends on the device. For
instance, unicam will store BGGR RAW10 as V4L2_PIX_FMT_SBGGR10P, while
another device may store it as V4L2_PIX_FMT_SBGGR10 (expanded to 2 bytes
per pixel in memory). Yet another device may left-align the 10 bits,
storing the data in V4L2_PIX_FMT_SBGGR16 format. This mapping can't be
provided by the libcamera core. Using the BayerFormat class to perform
the conversion with BayerFormat::fromMbusCode().toV4L2PixelFormat() may
work in some cases, but not universally.

I'm actually surprised the conversion works here, as I think you'll get
V4L2_PIX_FMT_SBGGR10 when you expect V4L2_PIX_FMT_SBGGR10P. One
possibility to fix this would be to set bayer.packing before calling
toV4L2PixelFormat(). The API is fragile though, and the BayerFormat
class a bit ill-defined (it doesn't tell whether it's supposed to
represent a bus format or a pixel format, and as explained above, it
can't be used interchangeably). This doesn't have to be fixed as part of
this patch series, but I'd like to see that problem addressed, and we
shouldn't build more technical debt.

> > >  bool isRaw(PixelFormat &pixFmt)
> > >  {
> > >         /*
> > > @@ -74,11 +97,10 @@ double scoreFormat(double desired, double actual)
> > >         return score;
> > >  }
> > >
> > > -V4L2DeviceFormat findBestMode(V4L2VideoDevice::Formats &formatsMap,
> > > -                             const Size &req)
> > > +V4L2SubdeviceFormat findBestMode(const SensorFormats &formatsMap, const Size &req)
> > >  {
> > >         double bestScore = std::numeric_limits<double>::max(), score;
> > > -       V4L2DeviceFormat bestMode;
> > > +       V4L2SubdeviceFormat bestMode;
> > >
> > >  #define PENALTY_AR             1500.0
> > >  #define PENALTY_8BIT           2000.0
> > > @@ -88,18 +110,17 @@ V4L2DeviceFormat findBestMode(V4L2VideoDevice::Formats &formatsMap,
> > >
> > >         /* Calculate the closest/best mode from the user requested size. */
> > >         for (const auto &iter : formatsMap) {
> > > -               V4L2PixelFormat v4l2Format = iter.first;
> > > -               const PixelFormatInfo &info = PixelFormatInfo::info(v4l2Format);
> > > +               const unsigned int mbus_code = iter.first;

s/mbus_code/mbusCode/

> > > +               const PixelFormat format = BayerFormat::fromMbusCode(mbus_code).toPixelFormat();

Same issue as above.

> > > +               const PixelFormatInfo &info = PixelFormatInfo::info(format);
> > >
> > > -               for (const SizeRange &sz : iter.second) {

s/sz/size/

> > > -                       double modeWidth = sz.contains(req) ? req.width : sz.max.width;
> > > -                       double modeHeight = sz.contains(req) ? req.height : sz.max.height;
> > > +               for (const Size &sz : iter.second) {
> > >                         double reqAr = static_cast<double>(req.width) / req.height;
> > > -                       double modeAr = modeWidth / modeHeight;
> > > +                       double modeAr = sz.width / sz.height;
> > >
> > >                         /* Score the dimensions for closeness. */
> > > -                       score = scoreFormat(req.width, modeWidth);
> > > -                       score += scoreFormat(req.height, modeHeight);
> > > +                       score = scoreFormat(req.width, sz.width);
> > > +                       score += scoreFormat(req.height, sz.height);
> > >                         score += PENALTY_AR * scoreFormat(reqAr, modeAr);
> > >
> > >                         /* Add any penalties... this is not an exact science! */
> > > @@ -115,12 +136,12 @@ V4L2DeviceFormat findBestMode(V4L2VideoDevice::Formats &formatsMap,
> > >
> > >                         if (score <= bestScore) {
> > >                                 bestScore = score;
> > > -                               bestMode.fourcc = v4l2Format;
> > > -                               bestMode.size = Size(modeWidth, modeHeight);
> > > +                               bestMode.mbus_code = mbus_code;
> > > +                               bestMode.size = sz;
> > >                         }
> > >
> > > -                       LOG(RPI, Info) << "Mode: " << modeWidth << "x" << modeHeight
> > > -                                      << " fmt " << v4l2Format.toString()
> > > +                       LOG(RPI, Info) << "Mode: " << sz.width << "x" << sz.height

You can use sz.toString().

> > > +                                      << " fmt " << format.toString()
> > >                                        << " Score: " << score
> > >                                        << " (best " << bestScore << ")";
> > >                 }
> > > @@ -170,6 +191,7 @@ public:
> > >         std::unique_ptr<ipa::RPi::IPAProxyRPi> ipa_;
> > >
> > >         std::unique_ptr<CameraSensor> sensor_;
> > > +       SensorFormats sensorFormats_;
> > >         /* Array of Unicam and ISP device streams and associated buffers/streams. */
> > >         RPi::Device<Unicam, 2> unicam_;
> > >         RPi::Device<Isp, 4> isp_;
> > > @@ -352,9 +374,9 @@ CameraConfiguration::Status RPiCameraConfiguration::validate()
> > >                          * Calculate the best sensor mode we can use based on
> > >                          * the user request.
> > >                          */
> > > -                       V4L2VideoDevice::Formats fmts = data_->unicam_[Unicam::Image].dev()->formats();
> > > -                       V4L2DeviceFormat sensorFormat = findBestMode(fmts, cfg.size);
> > > -                       int ret = data_->unicam_[Unicam::Image].dev()->tryFormat(&sensorFormat);
> > > +                       V4L2SubdeviceFormat sensorFormat = findBestMode(data_->sensorFormats_, cfg.size);
> > > +                       V4L2DeviceFormat unicamFormat = toV4L2DeviceFormat(sensorFormat);
> > > +                       int ret = data_->unicam_[Unicam::Image].dev()->tryFormat(&unicamFormat);
> > >                         if (ret)
> > >                                 return Invalid;
> > >
> > > @@ -366,7 +388,7 @@ CameraConfiguration::Status RPiCameraConfiguration::validate()
> > >                          * fetch the "native" (i.e. untransformed) Bayer order,
> > >                          * because the sensor may currently be flipped!
> > >                          */
> > > -                       V4L2PixelFormat fourcc = sensorFormat.fourcc;
> > > +                       V4L2PixelFormat fourcc = unicamFormat.fourcc;
> > >                         if (data_->flipsAlterBayerOrder_) {
> > >                                 BayerFormat bayer = BayerFormat::fromV4L2PixelFormat(fourcc);
> > >                                 bayer.order = data_->nativeBayerOrder_;
> > > @@ -375,15 +397,15 @@ CameraConfiguration::Status RPiCameraConfiguration::validate()
> > >                         }
> > >
> > >                         PixelFormat sensorPixFormat = fourcc.toPixelFormat();
> > > -                       if (cfg.size != sensorFormat.size ||
> > > +                       if (cfg.size != unicamFormat.size ||
> > >                             cfg.pixelFormat != sensorPixFormat) {
> > > -                               cfg.size = sensorFormat.size;
> > > +                               cfg.size = unicamFormat.size;
> > >                                 cfg.pixelFormat = sensorPixFormat;
> > >                                 status = Adjusted;
> > >                         }
> > >
> > > -                       cfg.stride = sensorFormat.planes[0].bpl;
> > > -                       cfg.frameSize = sensorFormat.planes[0].size;
> > > +                       cfg.stride = unicamFormat.planes[0].bpl;
> > > +                       cfg.frameSize = unicamFormat.planes[0].size;
> > >
> > >                         rawCount++;
> > >                 } else {
> > > @@ -472,7 +494,8 @@ CameraConfiguration *PipelineHandlerRPi::generateConfiguration(Camera *camera,
> > >  {
> > >         RPiCameraData *data = cameraData(camera);
> > >         CameraConfiguration *config = new RPiCameraConfiguration(data);
> > > -       V4L2DeviceFormat sensorFormat;
> > > +       V4L2SubdeviceFormat sensorFormat;
> > > +       V4L2DeviceFormat unicamFormat;
> > >         unsigned int bufferCount;
> > >         PixelFormat pixelFormat;
> > >         V4L2VideoDevice::Formats fmts;
> > > @@ -487,9 +510,9 @@ CameraConfiguration *PipelineHandlerRPi::generateConfiguration(Camera *camera,
> > >                 switch (role) {
> > >                 case StreamRole::Raw:
> > >                         size = data->sensor_->resolution();
> > > -                       fmts = data->unicam_[Unicam::Image].dev()->formats();
> > > -                       sensorFormat = findBestMode(fmts, size);
> > > -                       pixelFormat = sensorFormat.fourcc.toPixelFormat();
> > > +                       sensorFormat = findBestMode(data->sensorFormats_, size);
> > > +                       unicamFormat = toV4L2DeviceFormat(sensorFormat);
> > > +                       pixelFormat = BayerFormat::fromMbusCode(sensorFormat.mbus_code).toPixelFormat();

Same issue as above.

> > >                         ASSERT(pixelFormat.isValid());
> > >                         bufferCount = 2;
> > >                         rawCount++;
> > > @@ -599,32 +622,30 @@ int PipelineHandlerRPi::configure(Camera *camera, CameraConfiguration *config)
> > >         }
> > >
> > >         /* First calculate the best sensor mode we can use based on the user request. */
> > > -       V4L2VideoDevice::Formats fmts = data->unicam_[Unicam::Image].dev()->formats();
> > > -       V4L2DeviceFormat sensorFormat = findBestMode(fmts, rawStream ? sensorSize : maxSize);
> > > +       V4L2SubdeviceFormat sensorFormat = findBestMode(data->sensorFormats_, rawStream ? sensorSize : maxSize);
> > > +       V4L2DeviceFormat unicamFormat = toV4L2DeviceFormat(sensorFormat);

Shouldn't this go after sensor_->setFormat() in case the sensor adjusts
the format ? Actually, as adjustments are not allowed in configure(),
you should check the that format set on the sensor matches the requested
format.

> > > +
> > > +       ret = data->sensor_->setFormat(&sensorFormat);
> > > +       if (ret)
> > > +               return ret;
> > >
> > >         /*
> > >          * Unicam image output format. The ISP input format gets set at start,
> > >          * just in case we have swapped bayer orders due to flips.
> > >          */
> > > -       ret = data->unicam_[Unicam::Image].dev()->setFormat(&sensorFormat);
> > > +       ret = data->unicam_[Unicam::Image].dev()->setFormat(&unicamFormat);
> > >         if (ret)
> > >                 return ret;
> > >
> > > -       /*
> > > -        * The control ranges associated with the sensor may need updating
> > > -        * after a format change.
> > > -        * \todo Use the CameraSensor::setFormat API instead.
> > > -        */
> > > -       data->sensor_->updateControlInfo();
> > > -
> > >         LOG(RPI, Info) << "Sensor: " << camera->id()
> > > -                      << " - Selected mode: " << sensorFormat.toString();
> > > +                      << " - Selected sensor mode: " << sensorFormat.toString()
> > > +                      << " - Selected unicam mode: " << unicamFormat.toString();

s/mode/format/ on both lines

> > >
> > >         /*
> > >          * This format may be reset on start() if the bayer order has changed
> > >          * because of flips in the sensor.
> > >          */
> > > -       ret = data->isp_[Isp::Input].dev()->setFormat(&sensorFormat);
> > > +       ret = data->isp_[Isp::Input].dev()->setFormat(&unicamFormat);
> > >         if (ret)
> > >                 return ret;
> > >
> > > @@ -746,8 +767,8 @@ int PipelineHandlerRPi::configure(Camera *camera, CameraConfiguration *config)
> > >         data->ispMinCropSize_ = testCrop.size();
> > >
> > >         /* Adjust aspect ratio by providing crops on the input image. */
> > > -       Size size = sensorFormat.size.boundedToAspectRatio(maxSize);
> > > -       Rectangle crop = size.centeredTo(Rectangle(sensorFormat.size).center());
> > > +       Size size = unicamFormat.size.boundedToAspectRatio(maxSize);
> > > +       Rectangle crop = size.centeredTo(Rectangle(unicamFormat.size).center());
> > >         data->ispCrop_ = crop;
> > >
> > >         data->isp_[Isp::Input].dev()->setSelection(V4L2_SEL_TGT_CROP, &crop);
> > > @@ -761,8 +782,11 @@ int PipelineHandlerRPi::configure(Camera *camera, CameraConfiguration *config)
> > >          * supports it.
> > >          */
> > >         if (data->sensorMetadata_) {
> > > -               format = {};
> > > +               V4L2SubdeviceFormat embeddedFormat;
> > > +
> > > +               data->sensor_->device()->getFormat(1, &embeddedFormat);
> > >                 format.fourcc = V4L2PixelFormat(V4L2_META_FMT_SENSOR_DATA);
> > > +               format.planes[0].size = embeddedFormat.size.width * embeddedFormat.size.height;
> > >
> > >                 LOG(RPI, Debug) << "Setting embedded data format.";
> > >                 ret = data->unicam_[Unicam::Embedded].dev()->setFormat(&format);
> > > @@ -847,9 +871,14 @@ int PipelineHandlerRPi::start(Camera *camera, const ControlList *controls)
> > >          * IPA configure may have changed the sensor flips - hence the bayer
> > >          * order. Get the sensor format and set the ISP input now.
> > >          */
> > > -       V4L2DeviceFormat sensorFormat;
> > > -       data->unicam_[Unicam::Image].dev()->getFormat(&sensorFormat);
> > > -       ret = data->isp_[Isp::Input].dev()->setFormat(&sensorFormat);
> > > +       V4L2SubdeviceFormat sensorFormat;
> > > +       data->sensor_->device()->getFormat(0, &sensorFormat);
> > > +
> > > +       V4L2DeviceFormat ispFormat;
> > > +       ispFormat.fourcc = BayerFormat::fromMbusCode(sensorFormat.mbus_code).toV4L2PixelFormat();
> >
> > Generally there's the occasional assumption that fromMbusCode won't
> > return invalid Bayer formats, and I guess we're comfortable with that?
> > It certainly shouldn't be possible.
> 
> Yes, I ought to add an assert to ensure the bayer format is valid.
> 
> > But it seems good to me, so:
> >
> > Reviewed-by: David Plowman <david.plowman@raspberrypi.com>
> >
> > > +       ispFormat.size = sensorFormat.size;
> > > +
> > > +       ret = data->isp_[Isp::Input].dev()->setFormat(&ispFormat);
> > >         if (ret) {
> > >                 stop(camera);
> > >                 return ret;
> > > @@ -1004,6 +1033,8 @@ bool PipelineHandlerRPi::match(DeviceEnumerator *enumerator)
> > >         if (data->sensor_->init())
> > >                 return false;
> > >
> > > +       data->sensorFormats_ = populateSensorFormats(data->sensor_);
> > > +
> > >         ipa::RPi::SensorConfig sensorConfig;
> > >         if (data->loadIPA(&sensorConfig)) {
> > >                 LOG(RPI, Error) << "Failed to load a suitable IPA library";
> > > @@ -1030,6 +1061,11 @@ bool PipelineHandlerRPi::match(DeviceEnumerator *enumerator)
> > >                         return false;
> > >         }
> > >
> > > +       if (!(data->unicam_[Unicam::Image].dev()->caps().device_caps() & V4L2_CAP_IO_MC)) {
> > > +               LOG(RPI, Error) << "Unicam driver did not advertise V4L2_CAP_IO_MC, please update your kernel!";
> > > +               return false;
> > > +       }
> > > +
> > >         /*
> > >          * Setup our delayed control writer with the sensor default
> > >          * gain and exposure delays. Mark VBLANK for priority write.
> > > @@ -1039,7 +1075,7 @@ bool PipelineHandlerRPi::match(DeviceEnumerator *enumerator)
> > >                 { V4L2_CID_EXPOSURE, { sensorConfig.exposureDelay, false } },
> > >                 { V4L2_CID_VBLANK, { sensorConfig.vblankDelay, true } }
> > >         };
> > > -       data->delayedCtrls_ = std::make_unique<DelayedControls>(data->unicam_[Unicam::Image].dev(), params);
> > > +       data->delayedCtrls_ = std::make_unique<DelayedControls>(data->sensor_->device(), params);
> > >         data->sensorMetadata_ = sensorConfig.sensorMetadata;
> > >
> > >         /* Register the controls that the Raspberry Pi IPA can handle. */
> > > @@ -1066,15 +1102,14 @@ bool PipelineHandlerRPi::match(DeviceEnumerator *enumerator)
> > >          * As part of answering the final question, we reset the camera to
> > >          * no transform at all.
> > >          */
> > > -
> > > -       V4L2VideoDevice *dev = data->unicam_[Unicam::Image].dev();
> > > -       const struct v4l2_query_ext_ctrl *hflipCtrl = dev->controlInfo(V4L2_CID_HFLIP);
> > > +       const V4L2Subdevice *sensor = data->sensor_->device();
> > > +       const struct v4l2_query_ext_ctrl *hflipCtrl = sensor->controlInfo(V4L2_CID_HFLIP);
> > >         if (hflipCtrl) {
> > >                 /* We assume it will support vflips too... */
> > >                 data->supportsFlips_ = true;
> > >                 data->flipsAlterBayerOrder_ = hflipCtrl->flags & V4L2_CTRL_FLAG_MODIFY_LAYOUT;
> > >
> > > -               ControlList ctrls(dev->controls());
> > > +               ControlList ctrls(data->sensor_->controls());
> > >                 ctrls.set(V4L2_CID_HFLIP, 0);
> > >                 ctrls.set(V4L2_CID_VFLIP, 0);
> > >                 data->setSensorControls(ctrls);
> > > @@ -1082,9 +1117,8 @@ bool PipelineHandlerRPi::match(DeviceEnumerator *enumerator)
> > >
> > >         /* Look for a valid Bayer format. */
> > >         BayerFormat bayerFormat;
> > > -       for (const auto &iter : dev->formats()) {
> > > -               V4L2PixelFormat v4l2Format = iter.first;
> > > -               bayerFormat = BayerFormat::fromV4L2PixelFormat(v4l2Format);
> > > +       for (const auto &iter : data->sensorFormats_) {
> > > +               bayerFormat = BayerFormat::fromMbusCode(iter.first);
> > >                 if (bayerFormat.isValid())
> > >                         break;
> > >         }
> > > @@ -1271,7 +1305,7 @@ int RPiCameraData::configureIPA(const CameraConfiguration *config)
> > >                 }
> > >         }
> > >
> > > -       entityControls.emplace(0, unicam_[Unicam::Image].dev()->controls());
> > > +       entityControls.emplace(0, sensor_->controls());
> > >         entityControls.emplace(1, isp_[Isp::Input].dev()->controls());
> > >
> > >         /* Always send the user transform to the IPA. */
> > > @@ -1406,10 +1440,10 @@ void RPiCameraData::setSensorControls(ControlList &controls)
> > >                 ControlList vblank_ctrl;
> > >
> > >                 vblank_ctrl.set(V4L2_CID_VBLANK, controls.get(V4L2_CID_VBLANK));
> > > -               unicam_[Unicam::Image].dev()->setControls(&vblank_ctrl);
> > > +               sensor_->setControls(&vblank_ctrl);
> > >         }
> > >
> > > -       unicam_[Unicam::Image].dev()->setControls(&controls);
> > > +       sensor_->setControls(&controls);
> > >  }
> > >
> > >  void RPiCameraData::unicamBufferDequeue(FrameBuffer *buffer)
Naushir Patuck Oct. 26, 2021, 7:47 a.m. UTC | #4
Hi Laurent,

Thank you for your feedback.

On Mon, 25 Oct 2021 at 17:44, Laurent Pinchart <
laurent.pinchart@ideasonboard.com> wrote:

> Hi Naush,
>
> Thank you for the patch.
>
> On Fri, Oct 22, 2021 at 04:29:34PM +0100, Naushir Patuck wrote:
> > On Fri, 22 Oct 2021 at 16:16, David Plowman wrote:
> >
> > > Hi Naush
> > >
> > > Thanks for the new version. I think a lot of stuff is tidier now that
> > > we're clear that we're selecting a subdev format.
> > >
> > > On Fri, 22 Oct 2021 at 15:40, Naushir Patuck wrote:
> > > >
> > > > Switch the pipeline handler to use the new Unicam media controller
> based driver.
> > > > With this change, we directly talk to the sensor device driver to
> set controls
> > > > and set/get formats in the pipeline handler.
> > > >
> > > > This change requires the accompanying Raspberry Pi linux kernel
> change at
> > > > https://github.com/raspberrypi/linux/pull/4645. If this kernel
> change is not
> > > > present, the pipeline handler will fail to run with an error message
> informing
> > > > the user to update the kernel build.
> > > >
> > > > Signed-off-by: Naushir Patuck <naush@raspberrypi.com>
> > > > ---
> > > >  .../pipeline/raspberrypi/raspberrypi.cpp      | 146
> +++++++++++-------
> > > >  1 file changed, 90 insertions(+), 56 deletions(-)
> > > >
> > > > diff --git a/src/libcamera/pipeline/raspberrypi/raspberrypi.cpp
> b/src/libcamera/pipeline/raspberrypi/raspberrypi.cpp
> > > > index 1634ca98f481..a31b0f81eba7 100644
> > > > --- a/src/libcamera/pipeline/raspberrypi/raspberrypi.cpp
> > > > +++ b/src/libcamera/pipeline/raspberrypi/raspberrypi.cpp
> > > > @@ -48,6 +48,29 @@ LOG_DEFINE_CATEGORY(RPI)
> > > >
> > > >  namespace {
> > > >
> > > > +/* Map of mbus codes to supported sizes reported by the sensor. */
> > > > +using SensorFormats = std::map<unsigned int, std::vector<Size>>;
> > > > +
> > > > +SensorFormats populateSensorFormats(std::unique_ptr<CameraSensor>
> &sensor)
> > > > +{
> > > > +       SensorFormats formats;
> > > > +
> > > > +       for (auto const mbusCode : sensor->mbusCodes())
> > > > +               formats.emplace(mbusCode, sensor->sizes(mbusCode));
> > > > +
> > > > +       return formats;
> > > > +}
> > > > +
> > > > +inline V4L2DeviceFormat toV4L2DeviceFormat(V4L2SubdeviceFormat
> &mode)
>
> s/mode/format/
>
> (you know how much I like the concept of sensor mode)
>
> > > > +{
> > > > +       V4L2DeviceFormat deviceFormat;
> > > > +       BayerFormat bayer =
> BayerFormat::fromMbusCode(mode.mbus_code);
> > > > +
> > > > +       deviceFormat.fourcc = bayer.toV4L2PixelFormat();
> > > > +       deviceFormat.size = mode.size;
> > > > +       return deviceFormat;
> > > > +}
> > > > +
> > >
> > > I guess I wonder slightly whether this should be generally available,
> > > but it could easily be a change for another time. (Actually, same
> > > question for isRaw just below...)
> >
> > Agree, this can be addressed separately.
>
> I'm afraid it can't be this simple :-( There's no 1:1 mapping between
> media bus codes and V4L2 pixel formats. How a given format received on a
> bus by a DMA engine is stored in memory depends on the device. For
> instance, unicam will store BGGR RAW10 as V4L2_PIX_FMT_SBGGR10P, while
> another device may store it as V4L2_PIX_FMT_SBGGR10 (expanded to 2 bytes
> per pixel in memory). Yet another device may left-align the 10 bits,
> storing the data in V4L2_PIX_FMT_SBGGR16 format. This mapping can't be
> provided by the libcamera core. Using the BayerFormat class to perform
> the conversion with BayerFormat::fromMbusCode().toV4L2PixelFormat() may
> work in some cases, but not universally.
>

I was also a bit unsure about what to do about mbus to v4l2 format
conversions.
I went with using the BayerFormat class table as that was readily
available, and
not needing to duplicate the table in the pipeline handler. Luckily, I
think in all (standard)
cases, the conversion from mbus codes to v4l2 4cc's through the BayerFormat
class conversion table will be valid for Unicam, but I do take the point
this is
not entirely correct.

Perhaps I will just bite the bullet and add a similar table into our
pipeline handler.  As
you said, avoiding technical debt is a good thing.


>
> I'm actually surprised the conversion works here, as I think you'll get
> V4L2_PIX_FMT_SBGGR10 when you expect V4L2_PIX_FMT_SBGGR10P. One
> possibility to fix this would be to set bayer.packing before calling
> toV4L2PixelFormat().


This works because Unicam can support both packed and unpacked formats
when writing out to SDRAM.  As you have noticed, a subsequent change in this
series adds the option of choosing which to select.

I'll add the new table in the next revision.

Regards,
Naush


> The API is fragile though, and the BayerFormat
> class a bit ill-defined (it doesn't tell whether it's supposed to
> represent a bus format or a pixel format, and as explained above, it
> can't be used interchangeably). This doesn't have to be fixed as part of
> this patch series, but I'd like to see that problem addressed, and we
> shouldn't build more technical debt.
>
> > > >  bool isRaw(PixelFormat &pixFmt)
> > > >  {
> > > >         /*
> > > > @@ -74,11 +97,10 @@ double scoreFormat(double desired, double actual)
> > > >         return score;
> > > >  }
> > > >
> > > > -V4L2DeviceFormat findBestMode(V4L2VideoDevice::Formats &formatsMap,
> > > > -                             const Size &req)
> > > > +V4L2SubdeviceFormat findBestMode(const SensorFormats &formatsMap,
> const Size &req)
> > > >  {
> > > >         double bestScore = std::numeric_limits<double>::max(), score;
> > > > -       V4L2DeviceFormat bestMode;
> > > > +       V4L2SubdeviceFormat bestMode;
> > > >
> > > >  #define PENALTY_AR             1500.0
> > > >  #define PENALTY_8BIT           2000.0
> > > > @@ -88,18 +110,17 @@ V4L2DeviceFormat
> findBestMode(V4L2VideoDevice::Formats &formatsMap,
> > > >
> > > >         /* Calculate the closest/best mode from the user requested
> size. */
> > > >         for (const auto &iter : formatsMap) {
> > > > -               V4L2PixelFormat v4l2Format = iter.first;
> > > > -               const PixelFormatInfo &info =
> PixelFormatInfo::info(v4l2Format);
> > > > +               const unsigned int mbus_code = iter.first;
>
> s/mbus_code/mbusCode/
>
> > > > +               const PixelFormat format =
> BayerFormat::fromMbusCode(mbus_code).toPixelFormat();
>
> Same issue as above.
>
> > > > +               const PixelFormatInfo &info =
> PixelFormatInfo::info(format);
> > > >
> > > > -               for (const SizeRange &sz : iter.second) {
>
> s/sz/size/
>
> > > > -                       double modeWidth = sz.contains(req) ?
> req.width : sz.max.width;
> > > > -                       double modeHeight = sz.contains(req) ?
> req.height : sz.max.height;
> > > > +               for (const Size &sz : iter.second) {
> > > >                         double reqAr =
> static_cast<double>(req.width) / req.height;
> > > > -                       double modeAr = modeWidth / modeHeight;
> > > > +                       double modeAr = sz.width / sz.height;
> > > >
> > > >                         /* Score the dimensions for closeness. */
> > > > -                       score = scoreFormat(req.width, modeWidth);
> > > > -                       score += scoreFormat(req.height, modeHeight);
> > > > +                       score = scoreFormat(req.width, sz.width);
> > > > +                       score += scoreFormat(req.height, sz.height);
> > > >                         score += PENALTY_AR * scoreFormat(reqAr,
> modeAr);
> > > >
> > > >                         /* Add any penalties... this is not an exact
> science! */
> > > > @@ -115,12 +136,12 @@ V4L2DeviceFormat
> findBestMode(V4L2VideoDevice::Formats &formatsMap,
> > > >
> > > >                         if (score <= bestScore) {
> > > >                                 bestScore = score;
> > > > -                               bestMode.fourcc = v4l2Format;
> > > > -                               bestMode.size = Size(modeWidth,
> modeHeight);
> > > > +                               bestMode.mbus_code = mbus_code;
> > > > +                               bestMode.size = sz;
> > > >                         }
> > > >
> > > > -                       LOG(RPI, Info) << "Mode: " << modeWidth <<
> "x" << modeHeight
> > > > -                                      << " fmt " <<
> v4l2Format.toString()
> > > > +                       LOG(RPI, Info) << "Mode: " << sz.width <<
> "x" << sz.height
>
> You can use sz.toString().
>
> > > > +                                      << " fmt " <<
> format.toString()
> > > >                                        << " Score: " << score
> > > >                                        << " (best " << bestScore <<
> ")";
> > > >                 }
> > > > @@ -170,6 +191,7 @@ public:
> > > >         std::unique_ptr<ipa::RPi::IPAProxyRPi> ipa_;
> > > >
> > > >         std::unique_ptr<CameraSensor> sensor_;
> > > > +       SensorFormats sensorFormats_;
> > > >         /* Array of Unicam and ISP device streams and associated
> buffers/streams. */
> > > >         RPi::Device<Unicam, 2> unicam_;
> > > >         RPi::Device<Isp, 4> isp_;
> > > > @@ -352,9 +374,9 @@ CameraConfiguration::Status
> RPiCameraConfiguration::validate()
> > > >                          * Calculate the best sensor mode we can use
> based on
> > > >                          * the user request.
> > > >                          */
> > > > -                       V4L2VideoDevice::Formats fmts =
> data_->unicam_[Unicam::Image].dev()->formats();
> > > > -                       V4L2DeviceFormat sensorFormat =
> findBestMode(fmts, cfg.size);
> > > > -                       int ret =
> data_->unicam_[Unicam::Image].dev()->tryFormat(&sensorFormat);
> > > > +                       V4L2SubdeviceFormat sensorFormat =
> findBestMode(data_->sensorFormats_, cfg.size);
> > > > +                       V4L2DeviceFormat unicamFormat =
> toV4L2DeviceFormat(sensorFormat);
> > > > +                       int ret =
> data_->unicam_[Unicam::Image].dev()->tryFormat(&unicamFormat);
> > > >                         if (ret)
> > > >                                 return Invalid;
> > > >
> > > > @@ -366,7 +388,7 @@ CameraConfiguration::Status
> RPiCameraConfiguration::validate()
> > > >                          * fetch the "native" (i.e. untransformed)
> Bayer order,
> > > >                          * because the sensor may currently be
> flipped!
> > > >                          */
> > > > -                       V4L2PixelFormat fourcc = sensorFormat.fourcc;
> > > > +                       V4L2PixelFormat fourcc = unicamFormat.fourcc;
> > > >                         if (data_->flipsAlterBayerOrder_) {
> > > >                                 BayerFormat bayer =
> BayerFormat::fromV4L2PixelFormat(fourcc);
> > > >                                 bayer.order =
> data_->nativeBayerOrder_;
> > > > @@ -375,15 +397,15 @@ CameraConfiguration::Status
> RPiCameraConfiguration::validate()
> > > >                         }
> > > >
> > > >                         PixelFormat sensorPixFormat =
> fourcc.toPixelFormat();
> > > > -                       if (cfg.size != sensorFormat.size ||
> > > > +                       if (cfg.size != unicamFormat.size ||
> > > >                             cfg.pixelFormat != sensorPixFormat) {
> > > > -                               cfg.size = sensorFormat.size;
> > > > +                               cfg.size = unicamFormat.size;
> > > >                                 cfg.pixelFormat = sensorPixFormat;
> > > >                                 status = Adjusted;
> > > >                         }
> > > >
> > > > -                       cfg.stride = sensorFormat.planes[0].bpl;
> > > > -                       cfg.frameSize = sensorFormat.planes[0].size;
> > > > +                       cfg.stride = unicamFormat.planes[0].bpl;
> > > > +                       cfg.frameSize = unicamFormat.planes[0].size;
> > > >
> > > >                         rawCount++;
> > > >                 } else {
> > > > @@ -472,7 +494,8 @@ CameraConfiguration
> *PipelineHandlerRPi::generateConfiguration(Camera *camera,
> > > >  {
> > > >         RPiCameraData *data = cameraData(camera);
> > > >         CameraConfiguration *config = new
> RPiCameraConfiguration(data);
> > > > -       V4L2DeviceFormat sensorFormat;
> > > > +       V4L2SubdeviceFormat sensorFormat;
> > > > +       V4L2DeviceFormat unicamFormat;
> > > >         unsigned int bufferCount;
> > > >         PixelFormat pixelFormat;
> > > >         V4L2VideoDevice::Formats fmts;
> > > > @@ -487,9 +510,9 @@ CameraConfiguration
> *PipelineHandlerRPi::generateConfiguration(Camera *camera,
> > > >                 switch (role) {
> > > >                 case StreamRole::Raw:
> > > >                         size = data->sensor_->resolution();
> > > > -                       fmts =
> data->unicam_[Unicam::Image].dev()->formats();
> > > > -                       sensorFormat = findBestMode(fmts, size);
> > > > -                       pixelFormat =
> sensorFormat.fourcc.toPixelFormat();
> > > > +                       sensorFormat =
> findBestMode(data->sensorFormats_, size);
> > > > +                       unicamFormat =
> toV4L2DeviceFormat(sensorFormat);
> > > > +                       pixelFormat =
> BayerFormat::fromMbusCode(sensorFormat.mbus_code).toPixelFormat();
>
> Same issue as above.
>
> > > >                         ASSERT(pixelFormat.isValid());
> > > >                         bufferCount = 2;
> > > >                         rawCount++;
> > > > @@ -599,32 +622,30 @@ int PipelineHandlerRPi::configure(Camera
> *camera, CameraConfiguration *config)
> > > >         }
> > > >
> > > >         /* First calculate the best sensor mode we can use based on
> the user request. */
> > > > -       V4L2VideoDevice::Formats fmts =
> data->unicam_[Unicam::Image].dev()->formats();
> > > > -       V4L2DeviceFormat sensorFormat = findBestMode(fmts, rawStream
> ? sensorSize : maxSize);
> > > > +       V4L2SubdeviceFormat sensorFormat =
> findBestMode(data->sensorFormats_, rawStream ? sensorSize : maxSize);
> > > > +       V4L2DeviceFormat unicamFormat =
> toV4L2DeviceFormat(sensorFormat);
>
> Shouldn't this go after sensor_->setFormat() in case the sensor adjusts
> the format ? Actually, as adjustments are not allowed in configure(),
> you should check the that format set on the sensor matches the requested
> format.
>
> > > > +
> > > > +       ret = data->sensor_->setFormat(&sensorFormat);
> > > > +       if (ret)
> > > > +               return ret;
> > > >
> > > >         /*
> > > >          * Unicam image output format. The ISP input format gets set
> at start,
> > > >          * just in case we have swapped bayer orders due to flips.
> > > >          */
> > > > -       ret =
> data->unicam_[Unicam::Image].dev()->setFormat(&sensorFormat);
> > > > +       ret =
> data->unicam_[Unicam::Image].dev()->setFormat(&unicamFormat);
> > > >         if (ret)
> > > >                 return ret;
> > > >
> > > > -       /*
> > > > -        * The control ranges associated with the sensor may need
> updating
> > > > -        * after a format change.
> > > > -        * \todo Use the CameraSensor::setFormat API instead.
> > > > -        */
> > > > -       data->sensor_->updateControlInfo();
> > > > -
> > > >         LOG(RPI, Info) << "Sensor: " << camera->id()
> > > > -                      << " - Selected mode: " <<
> sensorFormat.toString();
> > > > +                      << " - Selected sensor mode: " <<
> sensorFormat.toString()
> > > > +                      << " - Selected unicam mode: " <<
> unicamFormat.toString();
>
> s/mode/format/ on both lines
>
> > > >
> > > >         /*
> > > >          * This format may be reset on start() if the bayer order
> has changed
> > > >          * because of flips in the sensor.
> > > >          */
> > > > -       ret = data->isp_[Isp::Input].dev()->setFormat(&sensorFormat);
> > > > +       ret = data->isp_[Isp::Input].dev()->setFormat(&unicamFormat);
> > > >         if (ret)
> > > >                 return ret;
> > > >
> > > > @@ -746,8 +767,8 @@ int PipelineHandlerRPi::configure(Camera
> *camera, CameraConfiguration *config)
> > > >         data->ispMinCropSize_ = testCrop.size();
> > > >
> > > >         /* Adjust aspect ratio by providing crops on the input
> image. */
> > > > -       Size size = sensorFormat.size.boundedToAspectRatio(maxSize);
> > > > -       Rectangle crop =
> size.centeredTo(Rectangle(sensorFormat.size).center());
> > > > +       Size size = unicamFormat.size.boundedToAspectRatio(maxSize);
> > > > +       Rectangle crop =
> size.centeredTo(Rectangle(unicamFormat.size).center());
> > > >         data->ispCrop_ = crop;
> > > >
> > > >
>  data->isp_[Isp::Input].dev()->setSelection(V4L2_SEL_TGT_CROP, &crop);
> > > > @@ -761,8 +782,11 @@ int PipelineHandlerRPi::configure(Camera
> *camera, CameraConfiguration *config)
> > > >          * supports it.
> > > >          */
> > > >         if (data->sensorMetadata_) {
> > > > -               format = {};
> > > > +               V4L2SubdeviceFormat embeddedFormat;
> > > > +
> > > > +               data->sensor_->device()->getFormat(1,
> &embeddedFormat);
> > > >                 format.fourcc =
> V4L2PixelFormat(V4L2_META_FMT_SENSOR_DATA);
> > > > +               format.planes[0].size = embeddedFormat.size.width *
> embeddedFormat.size.height;
> > > >
> > > >                 LOG(RPI, Debug) << "Setting embedded data format.";
> > > >                 ret =
> data->unicam_[Unicam::Embedded].dev()->setFormat(&format);
> > > > @@ -847,9 +871,14 @@ int PipelineHandlerRPi::start(Camera *camera,
> const ControlList *controls)
> > > >          * IPA configure may have changed the sensor flips - hence
> the bayer
> > > >          * order. Get the sensor format and set the ISP input now.
> > > >          */
> > > > -       V4L2DeviceFormat sensorFormat;
> > > > -       data->unicam_[Unicam::Image].dev()->getFormat(&sensorFormat);
> > > > -       ret = data->isp_[Isp::Input].dev()->setFormat(&sensorFormat);
> > > > +       V4L2SubdeviceFormat sensorFormat;
> > > > +       data->sensor_->device()->getFormat(0, &sensorFormat);
> > > > +
> > > > +       V4L2DeviceFormat ispFormat;
> > > > +       ispFormat.fourcc =
> BayerFormat::fromMbusCode(sensorFormat.mbus_code).toV4L2PixelFormat();
> > >
> > > Generally there's the occasional assumption that fromMbusCode won't
> > > return invalid Bayer formats, and I guess we're comfortable with that?
> > > It certainly shouldn't be possible.
> >
> > Yes, I ought to add an assert to ensure the bayer format is valid.
> >
> > > But it seems good to me, so:
> > >
> > > Reviewed-by: David Plowman <david.plowman@raspberrypi.com>
> > >
> > > > +       ispFormat.size = sensorFormat.size;
> > > > +
> > > > +       ret = data->isp_[Isp::Input].dev()->setFormat(&ispFormat);
> > > >         if (ret) {
> > > >                 stop(camera);
> > > >                 return ret;
> > > > @@ -1004,6 +1033,8 @@ bool
> PipelineHandlerRPi::match(DeviceEnumerator *enumerator)
> > > >         if (data->sensor_->init())
> > > >                 return false;
> > > >
> > > > +       data->sensorFormats_ = populateSensorFormats(data->sensor_);
> > > > +
> > > >         ipa::RPi::SensorConfig sensorConfig;
> > > >         if (data->loadIPA(&sensorConfig)) {
> > > >                 LOG(RPI, Error) << "Failed to load a suitable IPA
> library";
> > > > @@ -1030,6 +1061,11 @@ bool
> PipelineHandlerRPi::match(DeviceEnumerator *enumerator)
> > > >                         return false;
> > > >         }
> > > >
> > > > +       if
> (!(data->unicam_[Unicam::Image].dev()->caps().device_caps() &
> V4L2_CAP_IO_MC)) {
> > > > +               LOG(RPI, Error) << "Unicam driver did not advertise
> V4L2_CAP_IO_MC, please update your kernel!";
> > > > +               return false;
> > > > +       }
> > > > +
> > > >         /*
> > > >          * Setup our delayed control writer with the sensor default
> > > >          * gain and exposure delays. Mark VBLANK for priority write.
> > > > @@ -1039,7 +1075,7 @@ bool
> PipelineHandlerRPi::match(DeviceEnumerator *enumerator)
> > > >                 { V4L2_CID_EXPOSURE, { sensorConfig.exposureDelay,
> false } },
> > > >                 { V4L2_CID_VBLANK, { sensorConfig.vblankDelay, true
> } }
> > > >         };
> > > > -       data->delayedCtrls_ =
> std::make_unique<DelayedControls>(data->unicam_[Unicam::Image].dev(),
> params);
> > > > +       data->delayedCtrls_ =
> std::make_unique<DelayedControls>(data->sensor_->device(), params);
> > > >         data->sensorMetadata_ = sensorConfig.sensorMetadata;
> > > >
> > > >         /* Register the controls that the Raspberry Pi IPA can
> handle. */
> > > > @@ -1066,15 +1102,14 @@ bool
> PipelineHandlerRPi::match(DeviceEnumerator *enumerator)
> > > >          * As part of answering the final question, we reset the
> camera to
> > > >          * no transform at all.
> > > >          */
> > > > -
> > > > -       V4L2VideoDevice *dev = data->unicam_[Unicam::Image].dev();
> > > > -       const struct v4l2_query_ext_ctrl *hflipCtrl =
> dev->controlInfo(V4L2_CID_HFLIP);
> > > > +       const V4L2Subdevice *sensor = data->sensor_->device();
> > > > +       const struct v4l2_query_ext_ctrl *hflipCtrl =
> sensor->controlInfo(V4L2_CID_HFLIP);
> > > >         if (hflipCtrl) {
> > > >                 /* We assume it will support vflips too... */
> > > >                 data->supportsFlips_ = true;
> > > >                 data->flipsAlterBayerOrder_ = hflipCtrl->flags &
> V4L2_CTRL_FLAG_MODIFY_LAYOUT;
> > > >
> > > > -               ControlList ctrls(dev->controls());
> > > > +               ControlList ctrls(data->sensor_->controls());
> > > >                 ctrls.set(V4L2_CID_HFLIP, 0);
> > > >                 ctrls.set(V4L2_CID_VFLIP, 0);
> > > >                 data->setSensorControls(ctrls);
> > > > @@ -1082,9 +1117,8 @@ bool
> PipelineHandlerRPi::match(DeviceEnumerator *enumerator)
> > > >
> > > >         /* Look for a valid Bayer format. */
> > > >         BayerFormat bayerFormat;
> > > > -       for (const auto &iter : dev->formats()) {
> > > > -               V4L2PixelFormat v4l2Format = iter.first;
> > > > -               bayerFormat =
> BayerFormat::fromV4L2PixelFormat(v4l2Format);
> > > > +       for (const auto &iter : data->sensorFormats_) {
> > > > +               bayerFormat = BayerFormat::fromMbusCode(iter.first);
> > > >                 if (bayerFormat.isValid())
> > > >                         break;
> > > >         }
> > > > @@ -1271,7 +1305,7 @@ int RPiCameraData::configureIPA(const
> CameraConfiguration *config)
> > > >                 }
> > > >         }
> > > >
> > > > -       entityControls.emplace(0,
> unicam_[Unicam::Image].dev()->controls());
> > > > +       entityControls.emplace(0, sensor_->controls());
> > > >         entityControls.emplace(1,
> isp_[Isp::Input].dev()->controls());
> > > >
> > > >         /* Always send the user transform to the IPA. */
> > > > @@ -1406,10 +1440,10 @@ void
> RPiCameraData::setSensorControls(ControlList &controls)
> > > >                 ControlList vblank_ctrl;
> > > >
> > > >                 vblank_ctrl.set(V4L2_CID_VBLANK,
> controls.get(V4L2_CID_VBLANK));
> > > > -
>  unicam_[Unicam::Image].dev()->setControls(&vblank_ctrl);
> > > > +               sensor_->setControls(&vblank_ctrl);
> > > >         }
> > > >
> > > > -       unicam_[Unicam::Image].dev()->setControls(&controls);
> > > > +       sensor_->setControls(&controls);
> > > >  }
> > > >
> > > >  void RPiCameraData::unicamBufferDequeue(FrameBuffer *buffer)
>
> --
> Regards,
>
> Laurent Pinchart
>
Laurent Pinchart Oct. 26, 2021, 9:34 a.m. UTC | #5
Hi Naush,
On Tue, Oct 26, 2021 at 08:47:15AM +0100, Naushir Patuck wrote:
> On Mon, 25 Oct 2021 at 17:44, Laurent Pinchart wrote:
> > On Fri, Oct 22, 2021 at 04:29:34PM +0100, Naushir Patuck wrote:
> > > On Fri, 22 Oct 2021 at 16:16, David Plowman wrote:
> > >
> > > > Hi Naush
> > > >
> > > > Thanks for the new version. I think a lot of stuff is tidier now that
> > > > we're clear that we're selecting a subdev format.
> > > >
> > > > On Fri, 22 Oct 2021 at 15:40, Naushir Patuck wrote:
> > > > >
> > > > > Switch the pipeline handler to use the new Unicam media controller based driver.
> > > > > With this change, we directly talk to the sensor device driver to set controls
> > > > > and set/get formats in the pipeline handler.
> > > > >
> > > > > This change requires the accompanying Raspberry Pi linux kernel change at
> > > > > https://github.com/raspberrypi/linux/pull/4645. If this kernel change is not
> > > > > present, the pipeline handler will fail to run with an error message informing
> > > > > the user to update the kernel build.
> > > > >
> > > > > Signed-off-by: Naushir Patuck <naush@raspberrypi.com>
> > > > > ---
> > > > >  .../pipeline/raspberrypi/raspberrypi.cpp      | 146 +++++++++++-------
> > > > >  1 file changed, 90 insertions(+), 56 deletions(-)
> > > > >
> > > > > diff --git a/src/libcamera/pipeline/raspberrypi/raspberrypi.cpp b/src/libcamera/pipeline/raspberrypi/raspberrypi.cpp
> > > > > index 1634ca98f481..a31b0f81eba7 100644
> > > > > --- a/src/libcamera/pipeline/raspberrypi/raspberrypi.cpp
> > > > > +++ b/src/libcamera/pipeline/raspberrypi/raspberrypi.cpp
> > > > > @@ -48,6 +48,29 @@ LOG_DEFINE_CATEGORY(RPI)
> > > > >
> > > > >  namespace {
> > > > >
> > > > > +/* Map of mbus codes to supported sizes reported by the sensor. */
> > > > > +using SensorFormats = std::map<unsigned int, std::vector<Size>>;
> > > > > +
> > > > > +SensorFormats populateSensorFormats(std::unique_ptr<CameraSensor> &sensor)
> > > > > +{
> > > > > +       SensorFormats formats;
> > > > > +
> > > > > +       for (auto const mbusCode : sensor->mbusCodes())
> > > > > +               formats.emplace(mbusCode, sensor->sizes(mbusCode));
> > > > > +
> > > > > +       return formats;
> > > > > +}
> > > > > +
> > > > > +inline V4L2DeviceFormat toV4L2DeviceFormat(V4L2SubdeviceFormat &mode)
> >
> > s/mode/format/
> >
> > (you know how much I like the concept of sensor mode)
> >
> > > > > +{
> > > > > +       V4L2DeviceFormat deviceFormat;
> > > > > +       BayerFormat bayer = BayerFormat::fromMbusCode(mode.mbus_code);
> > > > > +
> > > > > +       deviceFormat.fourcc = bayer.toV4L2PixelFormat();
> > > > > +       deviceFormat.size = mode.size;
> > > > > +       return deviceFormat;
> > > > > +}
> > > > > +
> > > >
> > > > I guess I wonder slightly whether this should be generally available,
> > > > but it could easily be a change for another time. (Actually, same
> > > > question for isRaw just below...)
> > >
> > > Agree, this can be addressed separately.
> >
> > I'm afraid it can't be this simple :-( There's no 1:1 mapping between
> > media bus codes and V4L2 pixel formats. How a given format received on a
> > bus by a DMA engine is stored in memory depends on the device. For
> > instance, unicam will store BGGR RAW10 as V4L2_PIX_FMT_SBGGR10P, while
> > another device may store it as V4L2_PIX_FMT_SBGGR10 (expanded to 2 bytes
> > per pixel in memory). Yet another device may left-align the 10 bits,
> > storing the data in V4L2_PIX_FMT_SBGGR16 format. This mapping can't be
> > provided by the libcamera core. Using the BayerFormat class to perform
> > the conversion with BayerFormat::fromMbusCode().toV4L2PixelFormat() may
> > work in some cases, but not universally.
> 
> I was also a bit unsure about what to do about mbus to v4l2 format conversions.
> I went with using the BayerFormat class table as that was readily available, and
> not needing to duplicate the table in the pipeline handler. Luckily, I think in all (standard)
> cases, the conversion from mbus codes to v4l2 4cc's through the BayerFormat
> class conversion table will be valid for Unicam, but I do take the point this is
> not entirely correct.
> 
> Perhaps I will just bite the bullet and add a similar table into our pipeline handler.  As
> you said, avoiding technical debt is a good thing.

I think that would be best, to avoid introducing code that relies on the
assumption that device-specific conversion can be handled in a standard
class. It would be more difficult to refactor the BayerFormat class
later otherwise.

This being said, what bothers me slightly is that the BayerFormat class
can do the job if the pipeline handler carefully sets the different
members (for instance setting the packing member after converting from
mbus code to BayerFormat and before converting to PixelFormat or
V4L2PixelFormat, but in a different pipeline handler it may also require
overriding the bitDepth member, when the hardware always expands RAW10
to 16 bits with left alignment). I think the existing BayerFormat API is
error-prone in that regard, it's easy to use it incorrectly. I'm not
sure how we could improve that though, and it would be nice to use the
class to perform conversions.

On the other hand, the problem of converting from media bus codes to
pixel formats isn't specific to bayer formats (there are YUV sensors
too), so in the general case it needs to be handled manually in pipeline
handlers anyway.

If you have any clever idea to fix all that, feel free to include it in
the next version :-)

> > I'm actually surprised the conversion works here, as I think you'll get
> > V4L2_PIX_FMT_SBGGR10 when you expect V4L2_PIX_FMT_SBGGR10P. One
> > possibility to fix this would be to set bayer.packing before calling
> > toV4L2PixelFormat().
> 
> This works because Unicam can support both packed and unpacked formats
> when writing out to SDRAM.  As you have noticed, a subsequent change in this
> series adds the option of choosing which to select.

Indeed, I noticed later :-)

By the way, on a semi-related note, I was wondering if unicam was about
to convert RAW10 to RAW8.

> I'll add the new table in the next revision.
> 
> > The API is fragile though, and the BayerFormat
> > class a bit ill-defined (it doesn't tell whether it's supposed to
> > represent a bus format or a pixel format, and as explained above, it
> > can't be used interchangeably). This doesn't have to be fixed as part of
> > this patch series, but I'd like to see that problem addressed, and we
> > shouldn't build more technical debt.
> >
> > > > >  bool isRaw(PixelFormat &pixFmt)
> > > > >  {
> > > > >         /*
> > > > > @@ -74,11 +97,10 @@ double scoreFormat(double desired, double actual)
> > > > >         return score;
> > > > >  }
> > > > >
> > > > > -V4L2DeviceFormat findBestMode(V4L2VideoDevice::Formats &formatsMap,
> > > > > -                             const Size &req)
> > > > > +V4L2SubdeviceFormat findBestMode(const SensorFormats &formatsMap, const Size &req)
> > > > >  {
> > > > >         double bestScore = std::numeric_limits<double>::max(), score;
> > > > > -       V4L2DeviceFormat bestMode;
> > > > > +       V4L2SubdeviceFormat bestMode;
> > > > >
> > > > >  #define PENALTY_AR             1500.0
> > > > >  #define PENALTY_8BIT           2000.0
> > > > > @@ -88,18 +110,17 @@ V4L2DeviceFormat findBestMode(V4L2VideoDevice::Formats &formatsMap,
> > > > >
> > > > >         /* Calculate the closest/best mode from the user requested size. */
> > > > >         for (const auto &iter : formatsMap) {
> > > > > -               V4L2PixelFormat v4l2Format = iter.first;
> > > > > -               const PixelFormatInfo &info = PixelFormatInfo::info(v4l2Format);
> > > > > +               const unsigned int mbus_code = iter.first;
> >
> > s/mbus_code/mbusCode/
> >
> > > > > +               const PixelFormat format = BayerFormat::fromMbusCode(mbus_code).toPixelFormat();
> >
> > Same issue as above.
> >
> > > > > +               const PixelFormatInfo &info = PixelFormatInfo::info(format);
> > > > >
> > > > > -               for (const SizeRange &sz : iter.second) {
> >
> > s/sz/size/
> >
> > > > > -                       double modeWidth = sz.contains(req) ? req.width : sz.max.width;
> > > > > -                       double modeHeight = sz.contains(req) ? req.height : sz.max.height;
> > > > > +               for (const Size &sz : iter.second) {
> > > > >                         double reqAr = static_cast<double>(req.width) / req.height;
> > > > > -                       double modeAr = modeWidth / modeHeight;
> > > > > +                       double modeAr = sz.width / sz.height;
> > > > >
> > > > >                         /* Score the dimensions for closeness. */
> > > > > -                       score = scoreFormat(req.width, modeWidth);
> > > > > -                       score += scoreFormat(req.height, modeHeight);
> > > > > +                       score = scoreFormat(req.width, sz.width);
> > > > > +                       score += scoreFormat(req.height, sz.height);
> > > > >                         score += PENALTY_AR * scoreFormat(reqAr, modeAr);
> > > > >
> > > > >                         /* Add any penalties... this is not an exact science! */
> > > > > @@ -115,12 +136,12 @@ V4L2DeviceFormat findBestMode(V4L2VideoDevice::Formats &formatsMap,
> > > > >
> > > > >                         if (score <= bestScore) {
> > > > >                                 bestScore = score;
> > > > > -                               bestMode.fourcc = v4l2Format;
> > > > > -                               bestMode.size = Size(modeWidth, modeHeight);
> > > > > +                               bestMode.mbus_code = mbus_code;
> > > > > +                               bestMode.size = sz;
> > > > >                         }
> > > > >
> > > > > -                       LOG(RPI, Info) << "Mode: " << modeWidth << "x" << modeHeight
> > > > > -                                      << " fmt " << v4l2Format.toString()
> > > > > +                       LOG(RPI, Info) << "Mode: " << sz.width << "x" << sz.height
> >
> > You can use sz.toString().
> >
> > > > > +                                      << " fmt " << format.toString()
> > > > >                                        << " Score: " << score
> > > > >                                        << " (best " << bestScore << ")";
> > > > >                 }
> > > > > @@ -170,6 +191,7 @@ public:
> > > > >         std::unique_ptr<ipa::RPi::IPAProxyRPi> ipa_;
> > > > >
> > > > >         std::unique_ptr<CameraSensor> sensor_;
> > > > > +       SensorFormats sensorFormats_;
> > > > >         /* Array of Unicam and ISP device streams and associated buffers/streams. */
> > > > >         RPi::Device<Unicam, 2> unicam_;
> > > > >         RPi::Device<Isp, 4> isp_;
> > > > > @@ -352,9 +374,9 @@ CameraConfiguration::Status RPiCameraConfiguration::validate()
> > > > >                          * Calculate the best sensor mode we can use based on
> > > > >                          * the user request.
> > > > >                          */
> > > > > -                       V4L2VideoDevice::Formats fmts = data_->unicam_[Unicam::Image].dev()->formats();
> > > > > -                       V4L2DeviceFormat sensorFormat = findBestMode(fmts, cfg.size);
> > > > > -                       int ret = data_->unicam_[Unicam::Image].dev()->tryFormat(&sensorFormat);
> > > > > +                       V4L2SubdeviceFormat sensorFormat = findBestMode(data_->sensorFormats_, cfg.size);
> > > > > +                       V4L2DeviceFormat unicamFormat = toV4L2DeviceFormat(sensorFormat);
> > > > > +                       int ret = data_->unicam_[Unicam::Image].dev()->tryFormat(&unicamFormat);
> > > > >                         if (ret)
> > > > >                                 return Invalid;
> > > > >
> > > > > @@ -366,7 +388,7 @@ CameraConfiguration::Status RPiCameraConfiguration::validate()
> > > > >                          * fetch the "native" (i.e. untransformed) Bayer order,
> > > > >                          * because the sensor may currently be flipped!
> > > > >                          */
> > > > > -                       V4L2PixelFormat fourcc = sensorFormat.fourcc;
> > > > > +                       V4L2PixelFormat fourcc = unicamFormat.fourcc;
> > > > >                         if (data_->flipsAlterBayerOrder_) {
> > > > >                                 BayerFormat bayer = BayerFormat::fromV4L2PixelFormat(fourcc);
> > > > >                                 bayer.order = data_->nativeBayerOrder_;
> > > > > @@ -375,15 +397,15 @@ CameraConfiguration::Status RPiCameraConfiguration::validate()
> > > > >                         }
> > > > >
> > > > >                         PixelFormat sensorPixFormat = fourcc.toPixelFormat();
> > > > > -                       if (cfg.size != sensorFormat.size ||
> > > > > +                       if (cfg.size != unicamFormat.size ||
> > > > >                             cfg.pixelFormat != sensorPixFormat) {
> > > > > -                               cfg.size = sensorFormat.size;
> > > > > +                               cfg.size = unicamFormat.size;
> > > > >                                 cfg.pixelFormat = sensorPixFormat;
> > > > >                                 status = Adjusted;
> > > > >                         }
> > > > >
> > > > > -                       cfg.stride = sensorFormat.planes[0].bpl;
> > > > > -                       cfg.frameSize = sensorFormat.planes[0].size;
> > > > > +                       cfg.stride = unicamFormat.planes[0].bpl;
> > > > > +                       cfg.frameSize = unicamFormat.planes[0].size;
> > > > >
> > > > >                         rawCount++;
> > > > >                 } else {
> > > > > @@ -472,7 +494,8 @@ CameraConfiguration *PipelineHandlerRPi::generateConfiguration(Camera *camera,
> > > > >  {
> > > > >         RPiCameraData *data = cameraData(camera);
> > > > >         CameraConfiguration *config = new RPiCameraConfiguration(data);
> > > > > -       V4L2DeviceFormat sensorFormat;
> > > > > +       V4L2SubdeviceFormat sensorFormat;
> > > > > +       V4L2DeviceFormat unicamFormat;
> > > > >         unsigned int bufferCount;
> > > > >         PixelFormat pixelFormat;
> > > > >         V4L2VideoDevice::Formats fmts;
> > > > > @@ -487,9 +510,9 @@ CameraConfiguration *PipelineHandlerRPi::generateConfiguration(Camera *camera,
> > > > >                 switch (role) {
> > > > >                 case StreamRole::Raw:
> > > > >                         size = data->sensor_->resolution();
> > > > > -                       fmts = data->unicam_[Unicam::Image].dev()->formats();
> > > > > -                       sensorFormat = findBestMode(fmts, size);
> > > > > -                       pixelFormat = sensorFormat.fourcc.toPixelFormat();
> > > > > +                       sensorFormat = findBestMode(data->sensorFormats_, size);
> > > > > +                       unicamFormat = toV4L2DeviceFormat(sensorFormat);
> > > > > +                       pixelFormat = BayerFormat::fromMbusCode(sensorFormat.mbus_code).toPixelFormat();
> >
> > Same issue as above.
> >
> > > > >                         ASSERT(pixelFormat.isValid());
> > > > >                         bufferCount = 2;
> > > > >                         rawCount++;
> > > > > @@ -599,32 +622,30 @@ int PipelineHandlerRPi::configure(Camera *camera, CameraConfiguration *config)
> > > > >         }
> > > > >
> > > > >         /* First calculate the best sensor mode we can use based on the user request. */
> > > > > -       V4L2VideoDevice::Formats fmts = data->unicam_[Unicam::Image].dev()->formats();
> > > > > -       V4L2DeviceFormat sensorFormat = findBestMode(fmts, rawStream ? sensorSize : maxSize);
> > > > > +       V4L2SubdeviceFormat sensorFormat = findBestMode(data->sensorFormats_, rawStream ? sensorSize : maxSize);
> > > > > +       V4L2DeviceFormat unicamFormat = toV4L2DeviceFormat(sensorFormat);
> >
> > Shouldn't this go after sensor_->setFormat() in case the sensor adjusts
> > the format ? Actually, as adjustments are not allowed in configure(),
> > you should check the that format set on the sensor matches the requested
> > format.
> >
> > > > > +
> > > > > +       ret = data->sensor_->setFormat(&sensorFormat);
> > > > > +       if (ret)
> > > > > +               return ret;
> > > > >
> > > > >         /*
> > > > >          * Unicam image output format. The ISP input format gets set at start,
> > > > >          * just in case we have swapped bayer orders due to flips.
> > > > >          */
> > > > > -       ret = data->unicam_[Unicam::Image].dev()->setFormat(&sensorFormat);
> > > > > +       ret = data->unicam_[Unicam::Image].dev()->setFormat(&unicamFormat);
> > > > >         if (ret)
> > > > >                 return ret;
> > > > >
> > > > > -       /*
> > > > > -        * The control ranges associated with the sensor may need updating
> > > > > -        * after a format change.
> > > > > -        * \todo Use the CameraSensor::setFormat API instead.
> > > > > -        */
> > > > > -       data->sensor_->updateControlInfo();
> > > > > -
> > > > >         LOG(RPI, Info) << "Sensor: " << camera->id()
> > > > > -                      << " - Selected mode: " << sensorFormat.toString();
> > > > > +                      << " - Selected sensor mode: " << sensorFormat.toString()
> > > > > +                      << " - Selected unicam mode: " << unicamFormat.toString();
> > 
> > s/mode/format/ on both lines
> >
> > > > >
> > > > >         /*
> > > > >          * This format may be reset on start() if the bayer order has changed
> > > > >          * because of flips in the sensor.
> > > > >          */
> > > > > -       ret = data->isp_[Isp::Input].dev()->setFormat(&sensorFormat);
> > > > > +       ret = data->isp_[Isp::Input].dev()->setFormat(&unicamFormat);
> > > > >         if (ret)
> > > > >                 return ret;
> > > > >
> > > > > @@ -746,8 +767,8 @@ int PipelineHandlerRPi::configure(Camera *camera, CameraConfiguration *config)
> > > > >         data->ispMinCropSize_ = testCrop.size();
> > > > >
> > > > >         /* Adjust aspect ratio by providing crops on the input image. */
> > > > > -       Size size = sensorFormat.size.boundedToAspectRatio(maxSize);
> > > > > -       Rectangle crop = size.centeredTo(Rectangle(sensorFormat.size).center());
> > > > > +       Size size = unicamFormat.size.boundedToAspectRatio(maxSize);
> > > > > +       Rectangle crop = size.centeredTo(Rectangle(unicamFormat.size).center());
> > > > >         data->ispCrop_ = crop;
> > > > >
> > > > >  data->isp_[Isp::Input].dev()->setSelection(V4L2_SEL_TGT_CROP, &crop);
> > > > > @@ -761,8 +782,11 @@ int PipelineHandlerRPi::configure(Camera *camera, CameraConfiguration *config)
> > > > >          * supports it.
> > > > >          */
> > > > >         if (data->sensorMetadata_) {
> > > > > -               format = {};
> > > > > +               V4L2SubdeviceFormat embeddedFormat;
> > > > > +
> > > > > +               data->sensor_->device()->getFormat(1, &embeddedFormat);
> > > > >                 format.fourcc = V4L2PixelFormat(V4L2_META_FMT_SENSOR_DATA);
> > > > > +               format.planes[0].size = embeddedFormat.size.width * embeddedFormat.size.height;
> > > > >
> > > > >                 LOG(RPI, Debug) << "Setting embedded data format.";
> > > > >                 ret = data->unicam_[Unicam::Embedded].dev()->setFormat(&format);
> > > > > @@ -847,9 +871,14 @@ int PipelineHandlerRPi::start(Camera *camera, const ControlList *controls)
> > > > >          * IPA configure may have changed the sensor flips - hence the bayer
> > > > >          * order. Get the sensor format and set the ISP input now.
> > > > >          */
> > > > > -       V4L2DeviceFormat sensorFormat;
> > > > > -       data->unicam_[Unicam::Image].dev()->getFormat(&sensorFormat);
> > > > > -       ret = data->isp_[Isp::Input].dev()->setFormat(&sensorFormat);
> > > > > +       V4L2SubdeviceFormat sensorFormat;
> > > > > +       data->sensor_->device()->getFormat(0, &sensorFormat);
> > > > > +
> > > > > +       V4L2DeviceFormat ispFormat;
> > > > > +       ispFormat.fourcc = BayerFormat::fromMbusCode(sensorFormat.mbus_code).toV4L2PixelFormat();
> > > >
> > > > Generally there's the occasional assumption that fromMbusCode won't
> > > > return invalid Bayer formats, and I guess we're comfortable with that?
> > > > It certainly shouldn't be possible.
> > >
> > > Yes, I ought to add an assert to ensure the bayer format is valid.
> > >
> > > > But it seems good to me, so:
> > > >
> > > > Reviewed-by: David Plowman <david.plowman@raspberrypi.com>
> > > >
> > > > > +       ispFormat.size = sensorFormat.size;
> > > > > +
> > > > > +       ret = data->isp_[Isp::Input].dev()->setFormat(&ispFormat);
> > > > >         if (ret) {
> > > > >                 stop(camera);
> > > > >                 return ret;
> > > > > @@ -1004,6 +1033,8 @@ bool PipelineHandlerRPi::match(DeviceEnumerator *enumerator)
> > > > >         if (data->sensor_->init())
> > > > >                 return false;
> > > > >
> > > > > +       data->sensorFormats_ = populateSensorFormats(data->sensor_);
> > > > > +
> > > > >         ipa::RPi::SensorConfig sensorConfig;
> > > > >         if (data->loadIPA(&sensorConfig)) {
> > > > >                 LOG(RPI, Error) << "Failed to load a suitable IPA library";
> > > > > @@ -1030,6 +1061,11 @@ bool PipelineHandlerRPi::match(DeviceEnumerator *enumerator)
> > > > >                         return false;
> > > > >         }
> > > > >
> > > > > +       if (!(data->unicam_[Unicam::Image].dev()->caps().device_caps() & V4L2_CAP_IO_MC)) {
> > > > > +               LOG(RPI, Error) << "Unicam driver did not advertise V4L2_CAP_IO_MC, please update your kernel!";
> > > > > +               return false;
> > > > > +       }
> > > > > +
> > > > >         /*
> > > > >          * Setup our delayed control writer with the sensor default
> > > > >          * gain and exposure delays. Mark VBLANK for priority write.
> > > > > @@ -1039,7 +1075,7 @@ bool PipelineHandlerRPi::match(DeviceEnumerator *enumerator)
> > > > >                 { V4L2_CID_EXPOSURE, { sensorConfig.exposureDelay, false } },
> > > > >                 { V4L2_CID_VBLANK, { sensorConfig.vblankDelay, true } }
> > > > >         };
> > > > > -       data->delayedCtrls_ = std::make_unique<DelayedControls>(data->unicam_[Unicam::Image].dev(), params);
> > > > > +       data->delayedCtrls_ = std::make_unique<DelayedControls>(data->sensor_->device(), params);
> > > > >         data->sensorMetadata_ = sensorConfig.sensorMetadata;
> > > > >
> > > > >         /* Register the controls that the Raspberry Pi IPA can handle. */
> > > > > @@ -1066,15 +1102,14 @@ bool PipelineHandlerRPi::match(DeviceEnumerator *enumerator)
> > > > >          * As part of answering the final question, we reset the camera to
> > > > >          * no transform at all.
> > > > >          */
> > > > > -
> > > > > -       V4L2VideoDevice *dev = data->unicam_[Unicam::Image].dev();
> > > > > -       const struct v4l2_query_ext_ctrl *hflipCtrl = dev->controlInfo(V4L2_CID_HFLIP);
> > > > > +       const V4L2Subdevice *sensor = data->sensor_->device();
> > > > > +       const struct v4l2_query_ext_ctrl *hflipCtrl = sensor->controlInfo(V4L2_CID_HFLIP);
> > > > >         if (hflipCtrl) {
> > > > >                 /* We assume it will support vflips too... */
> > > > >                 data->supportsFlips_ = true;
> > > > >                 data->flipsAlterBayerOrder_ = hflipCtrl->flags & V4L2_CTRL_FLAG_MODIFY_LAYOUT;
> > > > >
> > > > > -               ControlList ctrls(dev->controls());
> > > > > +               ControlList ctrls(data->sensor_->controls());
> > > > >                 ctrls.set(V4L2_CID_HFLIP, 0);
> > > > >                 ctrls.set(V4L2_CID_VFLIP, 0);
> > > > >                 data->setSensorControls(ctrls);
> > > > > @@ -1082,9 +1117,8 @@ bool PipelineHandlerRPi::match(DeviceEnumerator *enumerator)
> > > > >
> > > > >         /* Look for a valid Bayer format. */
> > > > >         BayerFormat bayerFormat;
> > > > > -       for (const auto &iter : dev->formats()) {
> > > > > -               V4L2PixelFormat v4l2Format = iter.first;
> > > > > -               bayerFormat = BayerFormat::fromV4L2PixelFormat(v4l2Format);
> > > > > +       for (const auto &iter : data->sensorFormats_) {
> > > > > +               bayerFormat = BayerFormat::fromMbusCode(iter.first);
> > > > >                 if (bayerFormat.isValid())
> > > > >                         break;
> > > > >         }
> > > > > @@ -1271,7 +1305,7 @@ int RPiCameraData::configureIPA(const CameraConfiguration *config)
> > > > >                 }
> > > > >         }
> > > > >
> > > > > -       entityControls.emplace(0, unicam_[Unicam::Image].dev()->controls());
> > > > > +       entityControls.emplace(0, sensor_->controls());
> > > > >         entityControls.emplace(1, isp_[Isp::Input].dev()->controls());
> > > > >
> > > > >         /* Always send the user transform to the IPA. */
> > > > > @@ -1406,10 +1440,10 @@ void RPiCameraData::setSensorControls(ControlList &controls)
> > > > >                 ControlList vblank_ctrl;
> > > > >
> > > > >                 vblank_ctrl.set(V4L2_CID_VBLANK, controls.get(V4L2_CID_VBLANK));
> > > > > -  unicam_[Unicam::Image].dev()->setControls(&vblank_ctrl);
> > > > > +               sensor_->setControls(&vblank_ctrl);
> > > > >         }
> > > > >
> > > > > -       unicam_[Unicam::Image].dev()->setControls(&controls);
> > > > > +       sensor_->setControls(&controls);
> > > > >  }
> > > > >
> > > > >  void RPiCameraData::unicamBufferDequeue(FrameBuffer *buffer)
Naushir Patuck Oct. 26, 2021, 9:50 a.m. UTC | #6
On Tue, 26 Oct 2021 at 10:34, Laurent Pinchart <
laurent.pinchart@ideasonboard.com> wrote:

> Hi Naush,
> On Tue, Oct 26, 2021 at 08:47:15AM +0100, Naushir Patuck wrote:
> > On Mon, 25 Oct 2021 at 17:44, Laurent Pinchart wrote:
> > > On Fri, Oct 22, 2021 at 04:29:34PM +0100, Naushir Patuck wrote:
> > > > On Fri, 22 Oct 2021 at 16:16, David Plowman wrote:
> > > >
> > > > > Hi Naush
> > > > >
> > > > > Thanks for the new version. I think a lot of stuff is tidier now
> that
> > > > > we're clear that we're selecting a subdev format.
> > > > >
> > > > > On Fri, 22 Oct 2021 at 15:40, Naushir Patuck wrote:
> > > > > >
> > > > > > Switch the pipeline handler to use the new Unicam media
> controller based driver.
> > > > > > With this change, we directly talk to the sensor device driver
> to set controls
> > > > > > and set/get formats in the pipeline handler.
> > > > > >
> > > > > > This change requires the accompanying Raspberry Pi linux kernel
> change at
> > > > > > https://github.com/raspberrypi/linux/pull/4645. If this kernel
> change is not
> > > > > > present, the pipeline handler will fail to run with an error
> message informing
> > > > > > the user to update the kernel build.
> > > > > >
> > > > > > Signed-off-by: Naushir Patuck <naush@raspberrypi.com>
> > > > > > ---
> > > > > >  .../pipeline/raspberrypi/raspberrypi.cpp      | 146
> +++++++++++-------
> > > > > >  1 file changed, 90 insertions(+), 56 deletions(-)
> > > > > >
> > > > > > diff --git a/src/libcamera/pipeline/raspberrypi/raspberrypi.cpp
> b/src/libcamera/pipeline/raspberrypi/raspberrypi.cpp
> > > > > > index 1634ca98f481..a31b0f81eba7 100644
> > > > > > --- a/src/libcamera/pipeline/raspberrypi/raspberrypi.cpp
> > > > > > +++ b/src/libcamera/pipeline/raspberrypi/raspberrypi.cpp
> > > > > > @@ -48,6 +48,29 @@ LOG_DEFINE_CATEGORY(RPI)
> > > > > >
> > > > > >  namespace {
> > > > > >
> > > > > > +/* Map of mbus codes to supported sizes reported by the sensor.
> */
> > > > > > +using SensorFormats = std::map<unsigned int, std::vector<Size>>;
> > > > > > +
> > > > > > +SensorFormats
> populateSensorFormats(std::unique_ptr<CameraSensor> &sensor)
> > > > > > +{
> > > > > > +       SensorFormats formats;
> > > > > > +
> > > > > > +       for (auto const mbusCode : sensor->mbusCodes())
> > > > > > +               formats.emplace(mbusCode,
> sensor->sizes(mbusCode));
> > > > > > +
> > > > > > +       return formats;
> > > > > > +}
> > > > > > +
> > > > > > +inline V4L2DeviceFormat toV4L2DeviceFormat(V4L2SubdeviceFormat
> &mode)
> > >
> > > s/mode/format/
> > >
> > > (you know how much I like the concept of sensor mode)
> > >
> > > > > > +{
> > > > > > +       V4L2DeviceFormat deviceFormat;
> > > > > > +       BayerFormat bayer =
> BayerFormat::fromMbusCode(mode.mbus_code);
> > > > > > +
> > > > > > +       deviceFormat.fourcc = bayer.toV4L2PixelFormat();
> > > > > > +       deviceFormat.size = mode.size;
> > > > > > +       return deviceFormat;
> > > > > > +}
> > > > > > +
> > > > >
> > > > > I guess I wonder slightly whether this should be generally
> available,
> > > > > but it could easily be a change for another time. (Actually, same
> > > > > question for isRaw just below...)
> > > >
> > > > Agree, this can be addressed separately.
> > >
> > > I'm afraid it can't be this simple :-( There's no 1:1 mapping between
> > > media bus codes and V4L2 pixel formats. How a given format received on
> a
> > > bus by a DMA engine is stored in memory depends on the device. For
> > > instance, unicam will store BGGR RAW10 as V4L2_PIX_FMT_SBGGR10P, while
> > > another device may store it as V4L2_PIX_FMT_SBGGR10 (expanded to 2
> bytes
> > > per pixel in memory). Yet another device may left-align the 10 bits,
> > > storing the data in V4L2_PIX_FMT_SBGGR16 format. This mapping can't be
> > > provided by the libcamera core. Using the BayerFormat class to perform
> > > the conversion with BayerFormat::fromMbusCode().toV4L2PixelFormat() may
> > > work in some cases, but not universally.
> >
> > I was also a bit unsure about what to do about mbus to v4l2 format
> conversions.
> > I went with using the BayerFormat class table as that was readily
> available, and
> > not needing to duplicate the table in the pipeline handler. Luckily, I
> think in all (standard)
> > cases, the conversion from mbus codes to v4l2 4cc's through the
> BayerFormat
> > class conversion table will be valid for Unicam, but I do take the point
> this is
> > not entirely correct.
> >
> > Perhaps I will just bite the bullet and add a similar table into our
> pipeline handler.  As
> > you said, avoiding technical debt is a good thing.
>
> I think that would be best, to avoid introducing code that relies on the
> assumption that device-specific conversion can be handled in a standard
> class. It would be more difficult to refactor the BayerFormat class
> later otherwise.
>
> This being said, what bothers me slightly is that the BayerFormat class
> can do the job if the pipeline handler carefully sets the different
> members (for instance setting the packing member after converting from
> mbus code to BayerFormat and before converting to PixelFormat or
> V4L2PixelFormat, but in a different pipeline handler it may also require
> overriding the bitDepth member, when the hardware always expands RAW10
> to 16 bits with left alignment). I think the existing BayerFormat API is
> error-prone in that regard, it's easy to use it incorrectly. I'm not
> sure how we could improve that though, and it would be nice to use the
> class to perform conversions.
>
> On the other hand, the problem of converting from media bus codes to
> pixel formats isn't specific to bayer formats (there are YUV sensors
> too), so in the general case it needs to be handled manually in pipeline
> handlers anyway.
>
> If you have any clever idea to fix all that, feel free to include it in
> the next version :-)
>
> > > I'm actually surprised the conversion works here, as I think you'll get
> > > V4L2_PIX_FMT_SBGGR10 when you expect V4L2_PIX_FMT_SBGGR10P. One
> > > possibility to fix this would be to set bayer.packing before calling
> > > toV4L2PixelFormat().
> >
> > This works because Unicam can support both packed and unpacked formats
> > when writing out to SDRAM.  As you have noticed, a subsequent change in
> this
> > series adds the option of choosing which to select.
>
> Indeed, I noticed later :-)
>
> By the way, on a semi-related note, I was wondering if unicam was about
> to convert RAW10 to RAW8.
>

Yes, it should be possible, although I can't say I've ever attempted to do
this
myself :-)



>
> > I'll add the new table in the next revision.
> >
> > > The API is fragile though, and the BayerFormat
> > > class a bit ill-defined (it doesn't tell whether it's supposed to
> > > represent a bus format or a pixel format, and as explained above, it
> > > can't be used interchangeably). This doesn't have to be fixed as part
> of
> > > this patch series, but I'd like to see that problem addressed, and we
> > > shouldn't build more technical debt.
> > >
> > > > > >  bool isRaw(PixelFormat &pixFmt)
> > > > > >  {
> > > > > >         /*
> > > > > > @@ -74,11 +97,10 @@ double scoreFormat(double desired, double
> actual)
> > > > > >         return score;
> > > > > >  }
> > > > > >
> > > > > > -V4L2DeviceFormat findBestMode(V4L2VideoDevice::Formats
> &formatsMap,
> > > > > > -                             const Size &req)
> > > > > > +V4L2SubdeviceFormat findBestMode(const SensorFormats
> &formatsMap, const Size &req)
> > > > > >  {
> > > > > >         double bestScore = std::numeric_limits<double>::max(),
> score;
> > > > > > -       V4L2DeviceFormat bestMode;
> > > > > > +       V4L2SubdeviceFormat bestMode;
> > > > > >
> > > > > >  #define PENALTY_AR             1500.0
> > > > > >  #define PENALTY_8BIT           2000.0
> > > > > > @@ -88,18 +110,17 @@ V4L2DeviceFormat
> findBestMode(V4L2VideoDevice::Formats &formatsMap,
> > > > > >
> > > > > >         /* Calculate the closest/best mode from the user
> requested size. */
> > > > > >         for (const auto &iter : formatsMap) {
> > > > > > -               V4L2PixelFormat v4l2Format = iter.first;
> > > > > > -               const PixelFormatInfo &info =
> PixelFormatInfo::info(v4l2Format);
> > > > > > +               const unsigned int mbus_code = iter.first;
> > >
> > > s/mbus_code/mbusCode/
> > >
> > > > > > +               const PixelFormat format =
> BayerFormat::fromMbusCode(mbus_code).toPixelFormat();
> > >
> > > Same issue as above.
> > >
> > > > > > +               const PixelFormatInfo &info =
> PixelFormatInfo::info(format);
> > > > > >
> > > > > > -               for (const SizeRange &sz : iter.second) {
> > >
> > > s/sz/size/
> > >
> > > > > > -                       double modeWidth = sz.contains(req) ?
> req.width : sz.max.width;
> > > > > > -                       double modeHeight = sz.contains(req) ?
> req.height : sz.max.height;
> > > > > > +               for (const Size &sz : iter.second) {
> > > > > >                         double reqAr =
> static_cast<double>(req.width) / req.height;
> > > > > > -                       double modeAr = modeWidth / modeHeight;
> > > > > > +                       double modeAr = sz.width / sz.height;
> > > > > >
> > > > > >                         /* Score the dimensions for closeness. */
> > > > > > -                       score = scoreFormat(req.width,
> modeWidth);
> > > > > > -                       score += scoreFormat(req.height,
> modeHeight);
> > > > > > +                       score = scoreFormat(req.width, sz.width);
> > > > > > +                       score += scoreFormat(req.height,
> sz.height);
> > > > > >                         score += PENALTY_AR * scoreFormat(reqAr,
> modeAr);
> > > > > >
> > > > > >                         /* Add any penalties... this is not an
> exact science! */
> > > > > > @@ -115,12 +136,12 @@ V4L2DeviceFormat
> findBestMode(V4L2VideoDevice::Formats &formatsMap,
> > > > > >
> > > > > >                         if (score <= bestScore) {
> > > > > >                                 bestScore = score;
> > > > > > -                               bestMode.fourcc = v4l2Format;
> > > > > > -                               bestMode.size = Size(modeWidth,
> modeHeight);
> > > > > > +                               bestMode.mbus_code = mbus_code;
> > > > > > +                               bestMode.size = sz;
> > > > > >                         }
> > > > > >
> > > > > > -                       LOG(RPI, Info) << "Mode: " << modeWidth
> << "x" << modeHeight
> > > > > > -                                      << " fmt " <<
> v4l2Format.toString()
> > > > > > +                       LOG(RPI, Info) << "Mode: " << sz.width
> << "x" << sz.height
> > >
> > > You can use sz.toString().
> > >
> > > > > > +                                      << " fmt " <<
> format.toString()
> > > > > >                                        << " Score: " << score
> > > > > >                                        << " (best " << bestScore
> << ")";
> > > > > >                 }
> > > > > > @@ -170,6 +191,7 @@ public:
> > > > > >         std::unique_ptr<ipa::RPi::IPAProxyRPi> ipa_;
> > > > > >
> > > > > >         std::unique_ptr<CameraSensor> sensor_;
> > > > > > +       SensorFormats sensorFormats_;
> > > > > >         /* Array of Unicam and ISP device streams and associated
> buffers/streams. */
> > > > > >         RPi::Device<Unicam, 2> unicam_;
> > > > > >         RPi::Device<Isp, 4> isp_;
> > > > > > @@ -352,9 +374,9 @@ CameraConfiguration::Status
> RPiCameraConfiguration::validate()
> > > > > >                          * Calculate the best sensor mode we can
> use based on
> > > > > >                          * the user request.
> > > > > >                          */
> > > > > > -                       V4L2VideoDevice::Formats fmts =
> data_->unicam_[Unicam::Image].dev()->formats();
> > > > > > -                       V4L2DeviceFormat sensorFormat =
> findBestMode(fmts, cfg.size);
> > > > > > -                       int ret =
> data_->unicam_[Unicam::Image].dev()->tryFormat(&sensorFormat);
> > > > > > +                       V4L2SubdeviceFormat sensorFormat =
> findBestMode(data_->sensorFormats_, cfg.size);
> > > > > > +                       V4L2DeviceFormat unicamFormat =
> toV4L2DeviceFormat(sensorFormat);
> > > > > > +                       int ret =
> data_->unicam_[Unicam::Image].dev()->tryFormat(&unicamFormat);
> > > > > >                         if (ret)
> > > > > >                                 return Invalid;
> > > > > >
> > > > > > @@ -366,7 +388,7 @@ CameraConfiguration::Status
> RPiCameraConfiguration::validate()
> > > > > >                          * fetch the "native" (i.e.
> untransformed) Bayer order,
> > > > > >                          * because the sensor may currently be
> flipped!
> > > > > >                          */
> > > > > > -                       V4L2PixelFormat fourcc =
> sensorFormat.fourcc;
> > > > > > +                       V4L2PixelFormat fourcc =
> unicamFormat.fourcc;
> > > > > >                         if (data_->flipsAlterBayerOrder_) {
> > > > > >                                 BayerFormat bayer =
> BayerFormat::fromV4L2PixelFormat(fourcc);
> > > > > >                                 bayer.order =
> data_->nativeBayerOrder_;
> > > > > > @@ -375,15 +397,15 @@ CameraConfiguration::Status
> RPiCameraConfiguration::validate()
> > > > > >                         }
> > > > > >
> > > > > >                         PixelFormat sensorPixFormat =
> fourcc.toPixelFormat();
> > > > > > -                       if (cfg.size != sensorFormat.size ||
> > > > > > +                       if (cfg.size != unicamFormat.size ||
> > > > > >                             cfg.pixelFormat != sensorPixFormat) {
> > > > > > -                               cfg.size = sensorFormat.size;
> > > > > > +                               cfg.size = unicamFormat.size;
> > > > > >                                 cfg.pixelFormat =
> sensorPixFormat;
> > > > > >                                 status = Adjusted;
> > > > > >                         }
> > > > > >
> > > > > > -                       cfg.stride = sensorFormat.planes[0].bpl;
> > > > > > -                       cfg.frameSize =
> sensorFormat.planes[0].size;
> > > > > > +                       cfg.stride = unicamFormat.planes[0].bpl;
> > > > > > +                       cfg.frameSize =
> unicamFormat.planes[0].size;
> > > > > >
> > > > > >                         rawCount++;
> > > > > >                 } else {
> > > > > > @@ -472,7 +494,8 @@ CameraConfiguration
> *PipelineHandlerRPi::generateConfiguration(Camera *camera,
> > > > > >  {
> > > > > >         RPiCameraData *data = cameraData(camera);
> > > > > >         CameraConfiguration *config = new
> RPiCameraConfiguration(data);
> > > > > > -       V4L2DeviceFormat sensorFormat;
> > > > > > +       V4L2SubdeviceFormat sensorFormat;
> > > > > > +       V4L2DeviceFormat unicamFormat;
> > > > > >         unsigned int bufferCount;
> > > > > >         PixelFormat pixelFormat;
> > > > > >         V4L2VideoDevice::Formats fmts;
> > > > > > @@ -487,9 +510,9 @@ CameraConfiguration
> *PipelineHandlerRPi::generateConfiguration(Camera *camera,
> > > > > >                 switch (role) {
> > > > > >                 case StreamRole::Raw:
> > > > > >                         size = data->sensor_->resolution();
> > > > > > -                       fmts =
> data->unicam_[Unicam::Image].dev()->formats();
> > > > > > -                       sensorFormat = findBestMode(fmts, size);
> > > > > > -                       pixelFormat =
> sensorFormat.fourcc.toPixelFormat();
> > > > > > +                       sensorFormat =
> findBestMode(data->sensorFormats_, size);
> > > > > > +                       unicamFormat =
> toV4L2DeviceFormat(sensorFormat);
> > > > > > +                       pixelFormat =
> BayerFormat::fromMbusCode(sensorFormat.mbus_code).toPixelFormat();
> > >
> > > Same issue as above.
> > >
> > > > > >                         ASSERT(pixelFormat.isValid());
> > > > > >                         bufferCount = 2;
> > > > > >                         rawCount++;
> > > > > > @@ -599,32 +622,30 @@ int PipelineHandlerRPi::configure(Camera
> *camera, CameraConfiguration *config)
> > > > > >         }
> > > > > >
> > > > > >         /* First calculate the best sensor mode we can use based
> on the user request. */
> > > > > > -       V4L2VideoDevice::Formats fmts =
> data->unicam_[Unicam::Image].dev()->formats();
> > > > > > -       V4L2DeviceFormat sensorFormat = findBestMode(fmts,
> rawStream ? sensorSize : maxSize);
> > > > > > +       V4L2SubdeviceFormat sensorFormat =
> findBestMode(data->sensorFormats_, rawStream ? sensorSize : maxSize);
> > > > > > +       V4L2DeviceFormat unicamFormat =
> toV4L2DeviceFormat(sensorFormat);
> > >
> > > Shouldn't this go after sensor_->setFormat() in case the sensor adjusts
> > > the format ? Actually, as adjustments are not allowed in configure(),
> > > you should check the that format set on the sensor matches the
> requested
> > > format.
> > >
> > > > > > +
> > > > > > +       ret = data->sensor_->setFormat(&sensorFormat);
> > > > > > +       if (ret)
> > > > > > +               return ret;
> > > > > >
> > > > > >         /*
> > > > > >          * Unicam image output format. The ISP input format gets
> set at start,
> > > > > >          * just in case we have swapped bayer orders due to
> flips.
> > > > > >          */
> > > > > > -       ret =
> data->unicam_[Unicam::Image].dev()->setFormat(&sensorFormat);
> > > > > > +       ret =
> data->unicam_[Unicam::Image].dev()->setFormat(&unicamFormat);
> > > > > >         if (ret)
> > > > > >                 return ret;
> > > > > >
> > > > > > -       /*
> > > > > > -        * The control ranges associated with the sensor may
> need updating
> > > > > > -        * after a format change.
> > > > > > -        * \todo Use the CameraSensor::setFormat API instead.
> > > > > > -        */
> > > > > > -       data->sensor_->updateControlInfo();
> > > > > > -
> > > > > >         LOG(RPI, Info) << "Sensor: " << camera->id()
> > > > > > -                      << " - Selected mode: " <<
> sensorFormat.toString();
> > > > > > +                      << " - Selected sensor mode: " <<
> sensorFormat.toString()
> > > > > > +                      << " - Selected unicam mode: " <<
> unicamFormat.toString();
> > >
> > > s/mode/format/ on both lines
> > >
> > > > > >
> > > > > >         /*
> > > > > >          * This format may be reset on start() if the bayer
> order has changed
> > > > > >          * because of flips in the sensor.
> > > > > >          */
> > > > > > -       ret =
> data->isp_[Isp::Input].dev()->setFormat(&sensorFormat);
> > > > > > +       ret =
> data->isp_[Isp::Input].dev()->setFormat(&unicamFormat);
> > > > > >         if (ret)
> > > > > >                 return ret;
> > > > > >
> > > > > > @@ -746,8 +767,8 @@ int PipelineHandlerRPi::configure(Camera
> *camera, CameraConfiguration *config)
> > > > > >         data->ispMinCropSize_ = testCrop.size();
> > > > > >
> > > > > >         /* Adjust aspect ratio by providing crops on the input
> image. */
> > > > > > -       Size size =
> sensorFormat.size.boundedToAspectRatio(maxSize);
> > > > > > -       Rectangle crop =
> size.centeredTo(Rectangle(sensorFormat.size).center());
> > > > > > +       Size size =
> unicamFormat.size.boundedToAspectRatio(maxSize);
> > > > > > +       Rectangle crop =
> size.centeredTo(Rectangle(unicamFormat.size).center());
> > > > > >         data->ispCrop_ = crop;
> > > > > >
> > > > > >  data->isp_[Isp::Input].dev()->setSelection(V4L2_SEL_TGT_CROP,
> &crop);
> > > > > > @@ -761,8 +782,11 @@ int PipelineHandlerRPi::configure(Camera
> *camera, CameraConfiguration *config)
> > > > > >          * supports it.
> > > > > >          */
> > > > > >         if (data->sensorMetadata_) {
> > > > > > -               format = {};
> > > > > > +               V4L2SubdeviceFormat embeddedFormat;
> > > > > > +
> > > > > > +               data->sensor_->device()->getFormat(1,
> &embeddedFormat);
> > > > > >                 format.fourcc =
> V4L2PixelFormat(V4L2_META_FMT_SENSOR_DATA);
> > > > > > +               format.planes[0].size =
> embeddedFormat.size.width * embeddedFormat.size.height;
> > > > > >
> > > > > >                 LOG(RPI, Debug) << "Setting embedded data
> format.";
> > > > > >                 ret =
> data->unicam_[Unicam::Embedded].dev()->setFormat(&format);
> > > > > > @@ -847,9 +871,14 @@ int PipelineHandlerRPi::start(Camera
> *camera, const ControlList *controls)
> > > > > >          * IPA configure may have changed the sensor flips -
> hence the bayer
> > > > > >          * order. Get the sensor format and set the ISP input
> now.
> > > > > >          */
> > > > > > -       V4L2DeviceFormat sensorFormat;
> > > > > > -
>  data->unicam_[Unicam::Image].dev()->getFormat(&sensorFormat);
> > > > > > -       ret =
> data->isp_[Isp::Input].dev()->setFormat(&sensorFormat);
> > > > > > +       V4L2SubdeviceFormat sensorFormat;
> > > > > > +       data->sensor_->device()->getFormat(0, &sensorFormat);
> > > > > > +
> > > > > > +       V4L2DeviceFormat ispFormat;
> > > > > > +       ispFormat.fourcc =
> BayerFormat::fromMbusCode(sensorFormat.mbus_code).toV4L2PixelFormat();
> > > > >
> > > > > Generally there's the occasional assumption that fromMbusCode won't
> > > > > return invalid Bayer formats, and I guess we're comfortable with
> that?
> > > > > It certainly shouldn't be possible.
> > > >
> > > > Yes, I ought to add an assert to ensure the bayer format is valid.
> > > >
> > > > > But it seems good to me, so:
> > > > >
> > > > > Reviewed-by: David Plowman <david.plowman@raspberrypi.com>
> > > > >
> > > > > > +       ispFormat.size = sensorFormat.size;
> > > > > > +
> > > > > > +       ret =
> data->isp_[Isp::Input].dev()->setFormat(&ispFormat);
> > > > > >         if (ret) {
> > > > > >                 stop(camera);
> > > > > >                 return ret;
> > > > > > @@ -1004,6 +1033,8 @@ bool
> PipelineHandlerRPi::match(DeviceEnumerator *enumerator)
> > > > > >         if (data->sensor_->init())
> > > > > >                 return false;
> > > > > >
> > > > > > +       data->sensorFormats_ =
> populateSensorFormats(data->sensor_);
> > > > > > +
> > > > > >         ipa::RPi::SensorConfig sensorConfig;
> > > > > >         if (data->loadIPA(&sensorConfig)) {
> > > > > >                 LOG(RPI, Error) << "Failed to load a suitable
> IPA library";
> > > > > > @@ -1030,6 +1061,11 @@ bool
> PipelineHandlerRPi::match(DeviceEnumerator *enumerator)
> > > > > >                         return false;
> > > > > >         }
> > > > > >
> > > > > > +       if
> (!(data->unicam_[Unicam::Image].dev()->caps().device_caps() &
> V4L2_CAP_IO_MC)) {
> > > > > > +               LOG(RPI, Error) << "Unicam driver did not
> advertise V4L2_CAP_IO_MC, please update your kernel!";
> > > > > > +               return false;
> > > > > > +       }
> > > > > > +
> > > > > >         /*
> > > > > >          * Setup our delayed control writer with the sensor
> default
> > > > > >          * gain and exposure delays. Mark VBLANK for priority
> write.
> > > > > > @@ -1039,7 +1075,7 @@ bool
> PipelineHandlerRPi::match(DeviceEnumerator *enumerator)
> > > > > >                 { V4L2_CID_EXPOSURE, {
> sensorConfig.exposureDelay, false } },
> > > > > >                 { V4L2_CID_VBLANK, { sensorConfig.vblankDelay,
> true } }
> > > > > >         };
> > > > > > -       data->delayedCtrls_ =
> std::make_unique<DelayedControls>(data->unicam_[Unicam::Image].dev(),
> params);
> > > > > > +       data->delayedCtrls_ =
> std::make_unique<DelayedControls>(data->sensor_->device(), params);
> > > > > >         data->sensorMetadata_ = sensorConfig.sensorMetadata;
> > > > > >
> > > > > >         /* Register the controls that the Raspberry Pi IPA can
> handle. */
> > > > > > @@ -1066,15 +1102,14 @@ bool
> PipelineHandlerRPi::match(DeviceEnumerator *enumerator)
> > > > > >          * As part of answering the final question, we reset the
> camera to
> > > > > >          * no transform at all.
> > > > > >          */
> > > > > > -
> > > > > > -       V4L2VideoDevice *dev =
> data->unicam_[Unicam::Image].dev();
> > > > > > -       const struct v4l2_query_ext_ctrl *hflipCtrl =
> dev->controlInfo(V4L2_CID_HFLIP);
> > > > > > +       const V4L2Subdevice *sensor = data->sensor_->device();
> > > > > > +       const struct v4l2_query_ext_ctrl *hflipCtrl =
> sensor->controlInfo(V4L2_CID_HFLIP);
> > > > > >         if (hflipCtrl) {
> > > > > >                 /* We assume it will support vflips too... */
> > > > > >                 data->supportsFlips_ = true;
> > > > > >                 data->flipsAlterBayerOrder_ = hflipCtrl->flags &
> V4L2_CTRL_FLAG_MODIFY_LAYOUT;
> > > > > >
> > > > > > -               ControlList ctrls(dev->controls());
> > > > > > +               ControlList ctrls(data->sensor_->controls());
> > > > > >                 ctrls.set(V4L2_CID_HFLIP, 0);
> > > > > >                 ctrls.set(V4L2_CID_VFLIP, 0);
> > > > > >                 data->setSensorControls(ctrls);
> > > > > > @@ -1082,9 +1117,8 @@ bool
> PipelineHandlerRPi::match(DeviceEnumerator *enumerator)
> > > > > >
> > > > > >         /* Look for a valid Bayer format. */
> > > > > >         BayerFormat bayerFormat;
> > > > > > -       for (const auto &iter : dev->formats()) {
> > > > > > -               V4L2PixelFormat v4l2Format = iter.first;
> > > > > > -               bayerFormat =
> BayerFormat::fromV4L2PixelFormat(v4l2Format);
> > > > > > +       for (const auto &iter : data->sensorFormats_) {
> > > > > > +               bayerFormat =
> BayerFormat::fromMbusCode(iter.first);
> > > > > >                 if (bayerFormat.isValid())
> > > > > >                         break;
> > > > > >         }
> > > > > > @@ -1271,7 +1305,7 @@ int RPiCameraData::configureIPA(const
> CameraConfiguration *config)
> > > > > >                 }
> > > > > >         }
> > > > > >
> > > > > > -       entityControls.emplace(0,
> unicam_[Unicam::Image].dev()->controls());
> > > > > > +       entityControls.emplace(0, sensor_->controls());
> > > > > >         entityControls.emplace(1,
> isp_[Isp::Input].dev()->controls());
> > > > > >
> > > > > >         /* Always send the user transform to the IPA. */
> > > > > > @@ -1406,10 +1440,10 @@ void
> RPiCameraData::setSensorControls(ControlList &controls)
> > > > > >                 ControlList vblank_ctrl;
> > > > > >
> > > > > >                 vblank_ctrl.set(V4L2_CID_VBLANK,
> controls.get(V4L2_CID_VBLANK));
> > > > > > -  unicam_[Unicam::Image].dev()->setControls(&vblank_ctrl);
> > > > > > +               sensor_->setControls(&vblank_ctrl);
> > > > > >         }
> > > > > >
> > > > > > -       unicam_[Unicam::Image].dev()->setControls(&controls);
> > > > > > +       sensor_->setControls(&controls);
> > > > > >  }
> > > > > >
> > > > > >  void RPiCameraData::unicamBufferDequeue(FrameBuffer *buffer)
>
> --
> Regards,
>
> Laurent Pinchart
>
Dave Stevenson Oct. 26, 2021, 10:47 a.m. UTC | #7
On Tue, 26 Oct 2021 at 10:50, Naushir Patuck <naush@raspberrypi.com> wrote:
>
>
>
> On Tue, 26 Oct 2021 at 10:34, Laurent Pinchart <laurent.pinchart@ideasonboard.com> wrote:
>>
>> Hi Naush,
>> On Tue, Oct 26, 2021 at 08:47:15AM +0100, Naushir Patuck wrote:
>> > On Mon, 25 Oct 2021 at 17:44, Laurent Pinchart wrote:
>> > > On Fri, Oct 22, 2021 at 04:29:34PM +0100, Naushir Patuck wrote:
>> > > > On Fri, 22 Oct 2021 at 16:16, David Plowman wrote:
>> > > >
>> > > > > Hi Naush
>> > > > >
>> > > > > Thanks for the new version. I think a lot of stuff is tidier now that
>> > > > > we're clear that we're selecting a subdev format.
>> > > > >
>> > > > > On Fri, 22 Oct 2021 at 15:40, Naushir Patuck wrote:
>> > > > > >
>> > > > > > Switch the pipeline handler to use the new Unicam media controller based driver.
>> > > > > > With this change, we directly talk to the sensor device driver to set controls
>> > > > > > and set/get formats in the pipeline handler.
>> > > > > >
>> > > > > > This change requires the accompanying Raspberry Pi linux kernel change at
>> > > > > > https://github.com/raspberrypi/linux/pull/4645. If this kernel change is not
>> > > > > > present, the pipeline handler will fail to run with an error message informing
>> > > > > > the user to update the kernel build.
>> > > > > >
>> > > > > > Signed-off-by: Naushir Patuck <naush@raspberrypi.com>
>> > > > > > ---
>> > > > > >  .../pipeline/raspberrypi/raspberrypi.cpp      | 146 +++++++++++-------
>> > > > > >  1 file changed, 90 insertions(+), 56 deletions(-)
>> > > > > >
>> > > > > > diff --git a/src/libcamera/pipeline/raspberrypi/raspberrypi.cpp b/src/libcamera/pipeline/raspberrypi/raspberrypi.cpp
>> > > > > > index 1634ca98f481..a31b0f81eba7 100644
>> > > > > > --- a/src/libcamera/pipeline/raspberrypi/raspberrypi.cpp
>> > > > > > +++ b/src/libcamera/pipeline/raspberrypi/raspberrypi.cpp
>> > > > > > @@ -48,6 +48,29 @@ LOG_DEFINE_CATEGORY(RPI)
>> > > > > >
>> > > > > >  namespace {
>> > > > > >
>> > > > > > +/* Map of mbus codes to supported sizes reported by the sensor. */
>> > > > > > +using SensorFormats = std::map<unsigned int, std::vector<Size>>;
>> > > > > > +
>> > > > > > +SensorFormats populateSensorFormats(std::unique_ptr<CameraSensor> &sensor)
>> > > > > > +{
>> > > > > > +       SensorFormats formats;
>> > > > > > +
>> > > > > > +       for (auto const mbusCode : sensor->mbusCodes())
>> > > > > > +               formats.emplace(mbusCode, sensor->sizes(mbusCode));
>> > > > > > +
>> > > > > > +       return formats;
>> > > > > > +}
>> > > > > > +
>> > > > > > +inline V4L2DeviceFormat toV4L2DeviceFormat(V4L2SubdeviceFormat &mode)
>> > >
>> > > s/mode/format/
>> > >
>> > > (you know how much I like the concept of sensor mode)
>> > >
>> > > > > > +{
>> > > > > > +       V4L2DeviceFormat deviceFormat;
>> > > > > > +       BayerFormat bayer = BayerFormat::fromMbusCode(mode.mbus_code);
>> > > > > > +
>> > > > > > +       deviceFormat.fourcc = bayer.toV4L2PixelFormat();
>> > > > > > +       deviceFormat.size = mode.size;
>> > > > > > +       return deviceFormat;
>> > > > > > +}
>> > > > > > +
>> > > > >
>> > > > > I guess I wonder slightly whether this should be generally available,
>> > > > > but it could easily be a change for another time. (Actually, same
>> > > > > question for isRaw just below...)
>> > > >
>> > > > Agree, this can be addressed separately.
>> > >
>> > > I'm afraid it can't be this simple :-( There's no 1:1 mapping between
>> > > media bus codes and V4L2 pixel formats. How a given format received on a
>> > > bus by a DMA engine is stored in memory depends on the device. For
>> > > instance, unicam will store BGGR RAW10 as V4L2_PIX_FMT_SBGGR10P, while
>> > > another device may store it as V4L2_PIX_FMT_SBGGR10 (expanded to 2 bytes
>> > > per pixel in memory). Yet another device may left-align the 10 bits,
>> > > storing the data in V4L2_PIX_FMT_SBGGR16 format. This mapping can't be
>> > > provided by the libcamera core. Using the BayerFormat class to perform
>> > > the conversion with BayerFormat::fromMbusCode().toV4L2PixelFormat() may
>> > > work in some cases, but not universally.
>> >
>> > I was also a bit unsure about what to do about mbus to v4l2 format conversions.
>> > I went with using the BayerFormat class table as that was readily available, and
>> > not needing to duplicate the table in the pipeline handler. Luckily, I think in all (standard)
>> > cases, the conversion from mbus codes to v4l2 4cc's through the BayerFormat
>> > class conversion table will be valid for Unicam, but I do take the point this is
>> > not entirely correct.
>> >
>> > Perhaps I will just bite the bullet and add a similar table into our pipeline handler.  As
>> > you said, avoiding technical debt is a good thing.
>>
>> I think that would be best, to avoid introducing code that relies on the
>> assumption that device-specific conversion can be handled in a standard
>> class. It would be more difficult to refactor the BayerFormat class
>> later otherwise.
>>
>> This being said, what bothers me slightly is that the BayerFormat class
>> can do the job if the pipeline handler carefully sets the different
>> members (for instance setting the packing member after converting from
>> mbus code to BayerFormat and before converting to PixelFormat or
>> V4L2PixelFormat, but in a different pipeline handler it may also require
>> overriding the bitDepth member, when the hardware always expands RAW10
>> to 16 bits with left alignment). I think the existing BayerFormat API is
>> error-prone in that regard, it's easy to use it incorrectly. I'm not
>> sure how we could improve that though, and it would be nice to use the
>> class to perform conversions.
>>
>> On the other hand, the problem of converting from media bus codes to
>> pixel formats isn't specific to bayer formats (there are YUV sensors
>> too), so in the general case it needs to be handled manually in pipeline
>> handlers anyway.
>>
>> If you have any clever idea to fix all that, feel free to include it in
>> the next version :-)
>>
>> > > I'm actually surprised the conversion works here, as I think you'll get
>> > > V4L2_PIX_FMT_SBGGR10 when you expect V4L2_PIX_FMT_SBGGR10P. One
>> > > possibility to fix this would be to set bayer.packing before calling
>> > > toV4L2PixelFormat().
>> >
>> > This works because Unicam can support both packed and unpacked formats
>> > when writing out to SDRAM.  As you have noticed, a subsequent change in this
>> > series adds the option of choosing which to select.
>>
>> Indeed, I noticed later :-)
>>
>> By the way, on a semi-related note, I was wondering if unicam was about
>> to convert RAW10 to RAW8.
>
>
> Yes, it should be possible, although I can't say I've ever attempted to do this
> myself :-)

Having tried it, I'm going to disagree with you :-)

It only repacks the data with no shifting, so if you take raw 10 and
repack to raw 8 you'll lose the top two bits of each pixel, and gain
some very odd image effects in the process.
Repacking to raw 12, raw 14, or raw 16 would be fine, but it would
only be using the bottom 10 bits in each value. This is why we only
support converting V4L2_PIX_FMT_Sxxxx10P to V4L2_PIX_FMT_Sxxxx10
(unpack to 16bit packing with no shifting)

You may be able to compress raw 10 to DPCM10 to 8
(V4L2_PIX_FMT_Sxxxx10DPCM8) if you wanted to save the memory, but
that's a whole different can of worms that I don't want to open.

  Dave

>>
>>
>> > I'll add the new table in the next revision.
>> >
>> > > The API is fragile though, and the BayerFormat
>> > > class a bit ill-defined (it doesn't tell whether it's supposed to
>> > > represent a bus format or a pixel format, and as explained above, it
>> > > can't be used interchangeably). This doesn't have to be fixed as part of
>> > > this patch series, but I'd like to see that problem addressed, and we
>> > > shouldn't build more technical debt.
>> > >
>> > > > > >  bool isRaw(PixelFormat &pixFmt)
>> > > > > >  {
>> > > > > >         /*
>> > > > > > @@ -74,11 +97,10 @@ double scoreFormat(double desired, double actual)
>> > > > > >         return score;
>> > > > > >  }
>> > > > > >
>> > > > > > -V4L2DeviceFormat findBestMode(V4L2VideoDevice::Formats &formatsMap,
>> > > > > > -                             const Size &req)
>> > > > > > +V4L2SubdeviceFormat findBestMode(const SensorFormats &formatsMap, const Size &req)
>> > > > > >  {
>> > > > > >         double bestScore = std::numeric_limits<double>::max(), score;
>> > > > > > -       V4L2DeviceFormat bestMode;
>> > > > > > +       V4L2SubdeviceFormat bestMode;
>> > > > > >
>> > > > > >  #define PENALTY_AR             1500.0
>> > > > > >  #define PENALTY_8BIT           2000.0
>> > > > > > @@ -88,18 +110,17 @@ V4L2DeviceFormat findBestMode(V4L2VideoDevice::Formats &formatsMap,
>> > > > > >
>> > > > > >         /* Calculate the closest/best mode from the user requested size. */
>> > > > > >         for (const auto &iter : formatsMap) {
>> > > > > > -               V4L2PixelFormat v4l2Format = iter.first;
>> > > > > > -               const PixelFormatInfo &info = PixelFormatInfo::info(v4l2Format);
>> > > > > > +               const unsigned int mbus_code = iter.first;
>> > >
>> > > s/mbus_code/mbusCode/
>> > >
>> > > > > > +               const PixelFormat format = BayerFormat::fromMbusCode(mbus_code).toPixelFormat();
>> > >
>> > > Same issue as above.
>> > >
>> > > > > > +               const PixelFormatInfo &info = PixelFormatInfo::info(format);
>> > > > > >
>> > > > > > -               for (const SizeRange &sz : iter.second) {
>> > >
>> > > s/sz/size/
>> > >
>> > > > > > -                       double modeWidth = sz.contains(req) ? req.width : sz.max.width;
>> > > > > > -                       double modeHeight = sz.contains(req) ? req.height : sz.max.height;
>> > > > > > +               for (const Size &sz : iter.second) {
>> > > > > >                         double reqAr = static_cast<double>(req.width) / req.height;
>> > > > > > -                       double modeAr = modeWidth / modeHeight;
>> > > > > > +                       double modeAr = sz.width / sz.height;
>> > > > > >
>> > > > > >                         /* Score the dimensions for closeness. */
>> > > > > > -                       score = scoreFormat(req.width, modeWidth);
>> > > > > > -                       score += scoreFormat(req.height, modeHeight);
>> > > > > > +                       score = scoreFormat(req.width, sz.width);
>> > > > > > +                       score += scoreFormat(req.height, sz.height);
>> > > > > >                         score += PENALTY_AR * scoreFormat(reqAr, modeAr);
>> > > > > >
>> > > > > >                         /* Add any penalties... this is not an exact science! */
>> > > > > > @@ -115,12 +136,12 @@ V4L2DeviceFormat findBestMode(V4L2VideoDevice::Formats &formatsMap,
>> > > > > >
>> > > > > >                         if (score <= bestScore) {
>> > > > > >                                 bestScore = score;
>> > > > > > -                               bestMode.fourcc = v4l2Format;
>> > > > > > -                               bestMode.size = Size(modeWidth, modeHeight);
>> > > > > > +                               bestMode.mbus_code = mbus_code;
>> > > > > > +                               bestMode.size = sz;
>> > > > > >                         }
>> > > > > >
>> > > > > > -                       LOG(RPI, Info) << "Mode: " << modeWidth << "x" << modeHeight
>> > > > > > -                                      << " fmt " << v4l2Format.toString()
>> > > > > > +                       LOG(RPI, Info) << "Mode: " << sz.width << "x" << sz.height
>> > >
>> > > You can use sz.toString().
>> > >
>> > > > > > +                                      << " fmt " << format.toString()
>> > > > > >                                        << " Score: " << score
>> > > > > >                                        << " (best " << bestScore << ")";
>> > > > > >                 }
>> > > > > > @@ -170,6 +191,7 @@ public:
>> > > > > >         std::unique_ptr<ipa::RPi::IPAProxyRPi> ipa_;
>> > > > > >
>> > > > > >         std::unique_ptr<CameraSensor> sensor_;
>> > > > > > +       SensorFormats sensorFormats_;
>> > > > > >         /* Array of Unicam and ISP device streams and associated buffers/streams. */
>> > > > > >         RPi::Device<Unicam, 2> unicam_;
>> > > > > >         RPi::Device<Isp, 4> isp_;
>> > > > > > @@ -352,9 +374,9 @@ CameraConfiguration::Status RPiCameraConfiguration::validate()
>> > > > > >                          * Calculate the best sensor mode we can use based on
>> > > > > >                          * the user request.
>> > > > > >                          */
>> > > > > > -                       V4L2VideoDevice::Formats fmts = data_->unicam_[Unicam::Image].dev()->formats();
>> > > > > > -                       V4L2DeviceFormat sensorFormat = findBestMode(fmts, cfg.size);
>> > > > > > -                       int ret = data_->unicam_[Unicam::Image].dev()->tryFormat(&sensorFormat);
>> > > > > > +                       V4L2SubdeviceFormat sensorFormat = findBestMode(data_->sensorFormats_, cfg.size);
>> > > > > > +                       V4L2DeviceFormat unicamFormat = toV4L2DeviceFormat(sensorFormat);
>> > > > > > +                       int ret = data_->unicam_[Unicam::Image].dev()->tryFormat(&unicamFormat);
>> > > > > >                         if (ret)
>> > > > > >                                 return Invalid;
>> > > > > >
>> > > > > > @@ -366,7 +388,7 @@ CameraConfiguration::Status RPiCameraConfiguration::validate()
>> > > > > >                          * fetch the "native" (i.e. untransformed) Bayer order,
>> > > > > >                          * because the sensor may currently be flipped!
>> > > > > >                          */
>> > > > > > -                       V4L2PixelFormat fourcc = sensorFormat.fourcc;
>> > > > > > +                       V4L2PixelFormat fourcc = unicamFormat.fourcc;
>> > > > > >                         if (data_->flipsAlterBayerOrder_) {
>> > > > > >                                 BayerFormat bayer = BayerFormat::fromV4L2PixelFormat(fourcc);
>> > > > > >                                 bayer.order = data_->nativeBayerOrder_;
>> > > > > > @@ -375,15 +397,15 @@ CameraConfiguration::Status RPiCameraConfiguration::validate()
>> > > > > >                         }
>> > > > > >
>> > > > > >                         PixelFormat sensorPixFormat = fourcc.toPixelFormat();
>> > > > > > -                       if (cfg.size != sensorFormat.size ||
>> > > > > > +                       if (cfg.size != unicamFormat.size ||
>> > > > > >                             cfg.pixelFormat != sensorPixFormat) {
>> > > > > > -                               cfg.size = sensorFormat.size;
>> > > > > > +                               cfg.size = unicamFormat.size;
>> > > > > >                                 cfg.pixelFormat = sensorPixFormat;
>> > > > > >                                 status = Adjusted;
>> > > > > >                         }
>> > > > > >
>> > > > > > -                       cfg.stride = sensorFormat.planes[0].bpl;
>> > > > > > -                       cfg.frameSize = sensorFormat.planes[0].size;
>> > > > > > +                       cfg.stride = unicamFormat.planes[0].bpl;
>> > > > > > +                       cfg.frameSize = unicamFormat.planes[0].size;
>> > > > > >
>> > > > > >                         rawCount++;
>> > > > > >                 } else {
>> > > > > > @@ -472,7 +494,8 @@ CameraConfiguration *PipelineHandlerRPi::generateConfiguration(Camera *camera,
>> > > > > >  {
>> > > > > >         RPiCameraData *data = cameraData(camera);
>> > > > > >         CameraConfiguration *config = new RPiCameraConfiguration(data);
>> > > > > > -       V4L2DeviceFormat sensorFormat;
>> > > > > > +       V4L2SubdeviceFormat sensorFormat;
>> > > > > > +       V4L2DeviceFormat unicamFormat;
>> > > > > >         unsigned int bufferCount;
>> > > > > >         PixelFormat pixelFormat;
>> > > > > >         V4L2VideoDevice::Formats fmts;
>> > > > > > @@ -487,9 +510,9 @@ CameraConfiguration *PipelineHandlerRPi::generateConfiguration(Camera *camera,
>> > > > > >                 switch (role) {
>> > > > > >                 case StreamRole::Raw:
>> > > > > >                         size = data->sensor_->resolution();
>> > > > > > -                       fmts = data->unicam_[Unicam::Image].dev()->formats();
>> > > > > > -                       sensorFormat = findBestMode(fmts, size);
>> > > > > > -                       pixelFormat = sensorFormat.fourcc.toPixelFormat();
>> > > > > > +                       sensorFormat = findBestMode(data->sensorFormats_, size);
>> > > > > > +                       unicamFormat = toV4L2DeviceFormat(sensorFormat);
>> > > > > > +                       pixelFormat = BayerFormat::fromMbusCode(sensorFormat.mbus_code).toPixelFormat();
>> > >
>> > > Same issue as above.
>> > >
>> > > > > >                         ASSERT(pixelFormat.isValid());
>> > > > > >                         bufferCount = 2;
>> > > > > >                         rawCount++;
>> > > > > > @@ -599,32 +622,30 @@ int PipelineHandlerRPi::configure(Camera *camera, CameraConfiguration *config)
>> > > > > >         }
>> > > > > >
>> > > > > >         /* First calculate the best sensor mode we can use based on the user request. */
>> > > > > > -       V4L2VideoDevice::Formats fmts = data->unicam_[Unicam::Image].dev()->formats();
>> > > > > > -       V4L2DeviceFormat sensorFormat = findBestMode(fmts, rawStream ? sensorSize : maxSize);
>> > > > > > +       V4L2SubdeviceFormat sensorFormat = findBestMode(data->sensorFormats_, rawStream ? sensorSize : maxSize);
>> > > > > > +       V4L2DeviceFormat unicamFormat = toV4L2DeviceFormat(sensorFormat);
>> > >
>> > > Shouldn't this go after sensor_->setFormat() in case the sensor adjusts
>> > > the format ? Actually, as adjustments are not allowed in configure(),
>> > > you should check the that format set on the sensor matches the requested
>> > > format.
>> > >
>> > > > > > +
>> > > > > > +       ret = data->sensor_->setFormat(&sensorFormat);
>> > > > > > +       if (ret)
>> > > > > > +               return ret;
>> > > > > >
>> > > > > >         /*
>> > > > > >          * Unicam image output format. The ISP input format gets set at start,
>> > > > > >          * just in case we have swapped bayer orders due to flips.
>> > > > > >          */
>> > > > > > -       ret = data->unicam_[Unicam::Image].dev()->setFormat(&sensorFormat);
>> > > > > > +       ret = data->unicam_[Unicam::Image].dev()->setFormat(&unicamFormat);
>> > > > > >         if (ret)
>> > > > > >                 return ret;
>> > > > > >
>> > > > > > -       /*
>> > > > > > -        * The control ranges associated with the sensor may need updating
>> > > > > > -        * after a format change.
>> > > > > > -        * \todo Use the CameraSensor::setFormat API instead.
>> > > > > > -        */
>> > > > > > -       data->sensor_->updateControlInfo();
>> > > > > > -
>> > > > > >         LOG(RPI, Info) << "Sensor: " << camera->id()
>> > > > > > -                      << " - Selected mode: " << sensorFormat.toString();
>> > > > > > +                      << " - Selected sensor mode: " << sensorFormat.toString()
>> > > > > > +                      << " - Selected unicam mode: " << unicamFormat.toString();
>> > >
>> > > s/mode/format/ on both lines
>> > >
>> > > > > >
>> > > > > >         /*
>> > > > > >          * This format may be reset on start() if the bayer order has changed
>> > > > > >          * because of flips in the sensor.
>> > > > > >          */
>> > > > > > -       ret = data->isp_[Isp::Input].dev()->setFormat(&sensorFormat);
>> > > > > > +       ret = data->isp_[Isp::Input].dev()->setFormat(&unicamFormat);
>> > > > > >         if (ret)
>> > > > > >                 return ret;
>> > > > > >
>> > > > > > @@ -746,8 +767,8 @@ int PipelineHandlerRPi::configure(Camera *camera, CameraConfiguration *config)
>> > > > > >         data->ispMinCropSize_ = testCrop.size();
>> > > > > >
>> > > > > >         /* Adjust aspect ratio by providing crops on the input image. */
>> > > > > > -       Size size = sensorFormat.size.boundedToAspectRatio(maxSize);
>> > > > > > -       Rectangle crop = size.centeredTo(Rectangle(sensorFormat.size).center());
>> > > > > > +       Size size = unicamFormat.size.boundedToAspectRatio(maxSize);
>> > > > > > +       Rectangle crop = size.centeredTo(Rectangle(unicamFormat.size).center());
>> > > > > >         data->ispCrop_ = crop;
>> > > > > >
>> > > > > >  data->isp_[Isp::Input].dev()->setSelection(V4L2_SEL_TGT_CROP, &crop);
>> > > > > > @@ -761,8 +782,11 @@ int PipelineHandlerRPi::configure(Camera *camera, CameraConfiguration *config)
>> > > > > >          * supports it.
>> > > > > >          */
>> > > > > >         if (data->sensorMetadata_) {
>> > > > > > -               format = {};
>> > > > > > +               V4L2SubdeviceFormat embeddedFormat;
>> > > > > > +
>> > > > > > +               data->sensor_->device()->getFormat(1, &embeddedFormat);
>> > > > > >                 format.fourcc = V4L2PixelFormat(V4L2_META_FMT_SENSOR_DATA);
>> > > > > > +               format.planes[0].size = embeddedFormat.size.width * embeddedFormat.size.height;
>> > > > > >
>> > > > > >                 LOG(RPI, Debug) << "Setting embedded data format.";
>> > > > > >                 ret = data->unicam_[Unicam::Embedded].dev()->setFormat(&format);
>> > > > > > @@ -847,9 +871,14 @@ int PipelineHandlerRPi::start(Camera *camera, const ControlList *controls)
>> > > > > >          * IPA configure may have changed the sensor flips - hence the bayer
>> > > > > >          * order. Get the sensor format and set the ISP input now.
>> > > > > >          */
>> > > > > > -       V4L2DeviceFormat sensorFormat;
>> > > > > > -       data->unicam_[Unicam::Image].dev()->getFormat(&sensorFormat);
>> > > > > > -       ret = data->isp_[Isp::Input].dev()->setFormat(&sensorFormat);
>> > > > > > +       V4L2SubdeviceFormat sensorFormat;
>> > > > > > +       data->sensor_->device()->getFormat(0, &sensorFormat);
>> > > > > > +
>> > > > > > +       V4L2DeviceFormat ispFormat;
>> > > > > > +       ispFormat.fourcc = BayerFormat::fromMbusCode(sensorFormat.mbus_code).toV4L2PixelFormat();
>> > > > >
>> > > > > Generally there's the occasional assumption that fromMbusCode won't
>> > > > > return invalid Bayer formats, and I guess we're comfortable with that?
>> > > > > It certainly shouldn't be possible.
>> > > >
>> > > > Yes, I ought to add an assert to ensure the bayer format is valid.
>> > > >
>> > > > > But it seems good to me, so:
>> > > > >
>> > > > > Reviewed-by: David Plowman <david.plowman@raspberrypi.com>
>> > > > >
>> > > > > > +       ispFormat.size = sensorFormat.size;
>> > > > > > +
>> > > > > > +       ret = data->isp_[Isp::Input].dev()->setFormat(&ispFormat);
>> > > > > >         if (ret) {
>> > > > > >                 stop(camera);
>> > > > > >                 return ret;
>> > > > > > @@ -1004,6 +1033,8 @@ bool PipelineHandlerRPi::match(DeviceEnumerator *enumerator)
>> > > > > >         if (data->sensor_->init())
>> > > > > >                 return false;
>> > > > > >
>> > > > > > +       data->sensorFormats_ = populateSensorFormats(data->sensor_);
>> > > > > > +
>> > > > > >         ipa::RPi::SensorConfig sensorConfig;
>> > > > > >         if (data->loadIPA(&sensorConfig)) {
>> > > > > >                 LOG(RPI, Error) << "Failed to load a suitable IPA library";
>> > > > > > @@ -1030,6 +1061,11 @@ bool PipelineHandlerRPi::match(DeviceEnumerator *enumerator)
>> > > > > >                         return false;
>> > > > > >         }
>> > > > > >
>> > > > > > +       if (!(data->unicam_[Unicam::Image].dev()->caps().device_caps() & V4L2_CAP_IO_MC)) {
>> > > > > > +               LOG(RPI, Error) << "Unicam driver did not advertise V4L2_CAP_IO_MC, please update your kernel!";
>> > > > > > +               return false;
>> > > > > > +       }
>> > > > > > +
>> > > > > >         /*
>> > > > > >          * Setup our delayed control writer with the sensor default
>> > > > > >          * gain and exposure delays. Mark VBLANK for priority write.
>> > > > > > @@ -1039,7 +1075,7 @@ bool PipelineHandlerRPi::match(DeviceEnumerator *enumerator)
>> > > > > >                 { V4L2_CID_EXPOSURE, { sensorConfig.exposureDelay, false } },
>> > > > > >                 { V4L2_CID_VBLANK, { sensorConfig.vblankDelay, true } }
>> > > > > >         };
>> > > > > > -       data->delayedCtrls_ = std::make_unique<DelayedControls>(data->unicam_[Unicam::Image].dev(), params);
>> > > > > > +       data->delayedCtrls_ = std::make_unique<DelayedControls>(data->sensor_->device(), params);
>> > > > > >         data->sensorMetadata_ = sensorConfig.sensorMetadata;
>> > > > > >
>> > > > > >         /* Register the controls that the Raspberry Pi IPA can handle. */
>> > > > > > @@ -1066,15 +1102,14 @@ bool PipelineHandlerRPi::match(DeviceEnumerator *enumerator)
>> > > > > >          * As part of answering the final question, we reset the camera to
>> > > > > >          * no transform at all.
>> > > > > >          */
>> > > > > > -
>> > > > > > -       V4L2VideoDevice *dev = data->unicam_[Unicam::Image].dev();
>> > > > > > -       const struct v4l2_query_ext_ctrl *hflipCtrl = dev->controlInfo(V4L2_CID_HFLIP);
>> > > > > > +       const V4L2Subdevice *sensor = data->sensor_->device();
>> > > > > > +       const struct v4l2_query_ext_ctrl *hflipCtrl = sensor->controlInfo(V4L2_CID_HFLIP);
>> > > > > >         if (hflipCtrl) {
>> > > > > >                 /* We assume it will support vflips too... */
>> > > > > >                 data->supportsFlips_ = true;
>> > > > > >                 data->flipsAlterBayerOrder_ = hflipCtrl->flags & V4L2_CTRL_FLAG_MODIFY_LAYOUT;
>> > > > > >
>> > > > > > -               ControlList ctrls(dev->controls());
>> > > > > > +               ControlList ctrls(data->sensor_->controls());
>> > > > > >                 ctrls.set(V4L2_CID_HFLIP, 0);
>> > > > > >                 ctrls.set(V4L2_CID_VFLIP, 0);
>> > > > > >                 data->setSensorControls(ctrls);
>> > > > > > @@ -1082,9 +1117,8 @@ bool PipelineHandlerRPi::match(DeviceEnumerator *enumerator)
>> > > > > >
>> > > > > >         /* Look for a valid Bayer format. */
>> > > > > >         BayerFormat bayerFormat;
>> > > > > > -       for (const auto &iter : dev->formats()) {
>> > > > > > -               V4L2PixelFormat v4l2Format = iter.first;
>> > > > > > -               bayerFormat = BayerFormat::fromV4L2PixelFormat(v4l2Format);
>> > > > > > +       for (const auto &iter : data->sensorFormats_) {
>> > > > > > +               bayerFormat = BayerFormat::fromMbusCode(iter.first);
>> > > > > >                 if (bayerFormat.isValid())
>> > > > > >                         break;
>> > > > > >         }
>> > > > > > @@ -1271,7 +1305,7 @@ int RPiCameraData::configureIPA(const CameraConfiguration *config)
>> > > > > >                 }
>> > > > > >         }
>> > > > > >
>> > > > > > -       entityControls.emplace(0, unicam_[Unicam::Image].dev()->controls());
>> > > > > > +       entityControls.emplace(0, sensor_->controls());
>> > > > > >         entityControls.emplace(1, isp_[Isp::Input].dev()->controls());
>> > > > > >
>> > > > > >         /* Always send the user transform to the IPA. */
>> > > > > > @@ -1406,10 +1440,10 @@ void RPiCameraData::setSensorControls(ControlList &controls)
>> > > > > >                 ControlList vblank_ctrl;
>> > > > > >
>> > > > > >                 vblank_ctrl.set(V4L2_CID_VBLANK, controls.get(V4L2_CID_VBLANK));
>> > > > > > -  unicam_[Unicam::Image].dev()->setControls(&vblank_ctrl);
>> > > > > > +               sensor_->setControls(&vblank_ctrl);
>> > > > > >         }
>> > > > > >
>> > > > > > -       unicam_[Unicam::Image].dev()->setControls(&controls);
>> > > > > > +       sensor_->setControls(&controls);
>> > > > > >  }
>> > > > > >
>> > > > > >  void RPiCameraData::unicamBufferDequeue(FrameBuffer *buffer)
>>
>> --
>> Regards,
>>
>> Laurent Pinchart

Patch
diff mbox series

diff --git a/src/libcamera/pipeline/raspberrypi/raspberrypi.cpp b/src/libcamera/pipeline/raspberrypi/raspberrypi.cpp
index 1634ca98f481..a31b0f81eba7 100644
--- a/src/libcamera/pipeline/raspberrypi/raspberrypi.cpp
+++ b/src/libcamera/pipeline/raspberrypi/raspberrypi.cpp
@@ -48,6 +48,29 @@  LOG_DEFINE_CATEGORY(RPI)
 
 namespace {
 
+/* Map of mbus codes to supported sizes reported by the sensor. */
+using SensorFormats = std::map<unsigned int, std::vector<Size>>;
+
+SensorFormats populateSensorFormats(std::unique_ptr<CameraSensor> &sensor)
+{
+	SensorFormats formats;
+
+	for (auto const mbusCode : sensor->mbusCodes())
+		formats.emplace(mbusCode, sensor->sizes(mbusCode));
+
+	return formats;
+}
+
+inline V4L2DeviceFormat toV4L2DeviceFormat(V4L2SubdeviceFormat &mode)
+{
+	V4L2DeviceFormat deviceFormat;
+	BayerFormat bayer = BayerFormat::fromMbusCode(mode.mbus_code);
+
+	deviceFormat.fourcc = bayer.toV4L2PixelFormat();
+	deviceFormat.size = mode.size;
+	return deviceFormat;
+}
+
 bool isRaw(PixelFormat &pixFmt)
 {
 	/*
@@ -74,11 +97,10 @@  double scoreFormat(double desired, double actual)
 	return score;
 }
 
-V4L2DeviceFormat findBestMode(V4L2VideoDevice::Formats &formatsMap,
-			      const Size &req)
+V4L2SubdeviceFormat findBestMode(const SensorFormats &formatsMap, const Size &req)
 {
 	double bestScore = std::numeric_limits<double>::max(), score;
-	V4L2DeviceFormat bestMode;
+	V4L2SubdeviceFormat bestMode;
 
 #define PENALTY_AR		1500.0
 #define PENALTY_8BIT		2000.0
@@ -88,18 +110,17 @@  V4L2DeviceFormat findBestMode(V4L2VideoDevice::Formats &formatsMap,
 
 	/* Calculate the closest/best mode from the user requested size. */
 	for (const auto &iter : formatsMap) {
-		V4L2PixelFormat v4l2Format = iter.first;
-		const PixelFormatInfo &info = PixelFormatInfo::info(v4l2Format);
+		const unsigned int mbus_code = iter.first;
+		const PixelFormat format = BayerFormat::fromMbusCode(mbus_code).toPixelFormat();
+		const PixelFormatInfo &info = PixelFormatInfo::info(format);
 
-		for (const SizeRange &sz : iter.second) {
-			double modeWidth = sz.contains(req) ? req.width : sz.max.width;
-			double modeHeight = sz.contains(req) ? req.height : sz.max.height;
+		for (const Size &sz : iter.second) {
 			double reqAr = static_cast<double>(req.width) / req.height;
-			double modeAr = modeWidth / modeHeight;
+			double modeAr = sz.width / sz.height;
 
 			/* Score the dimensions for closeness. */
-			score = scoreFormat(req.width, modeWidth);
-			score += scoreFormat(req.height, modeHeight);
+			score = scoreFormat(req.width, sz.width);
+			score += scoreFormat(req.height, sz.height);
 			score += PENALTY_AR * scoreFormat(reqAr, modeAr);
 
 			/* Add any penalties... this is not an exact science! */
@@ -115,12 +136,12 @@  V4L2DeviceFormat findBestMode(V4L2VideoDevice::Formats &formatsMap,
 
 			if (score <= bestScore) {
 				bestScore = score;
-				bestMode.fourcc = v4l2Format;
-				bestMode.size = Size(modeWidth, modeHeight);
+				bestMode.mbus_code = mbus_code;
+				bestMode.size = sz;
 			}
 
-			LOG(RPI, Info) << "Mode: " << modeWidth << "x" << modeHeight
-				       << " fmt " << v4l2Format.toString()
+			LOG(RPI, Info) << "Mode: " << sz.width << "x" << sz.height
+				       << " fmt " << format.toString()
 				       << " Score: " << score
 				       << " (best " << bestScore << ")";
 		}
@@ -170,6 +191,7 @@  public:
 	std::unique_ptr<ipa::RPi::IPAProxyRPi> ipa_;
 
 	std::unique_ptr<CameraSensor> sensor_;
+	SensorFormats sensorFormats_;
 	/* Array of Unicam and ISP device streams and associated buffers/streams. */
 	RPi::Device<Unicam, 2> unicam_;
 	RPi::Device<Isp, 4> isp_;
@@ -352,9 +374,9 @@  CameraConfiguration::Status RPiCameraConfiguration::validate()
 			 * Calculate the best sensor mode we can use based on
 			 * the user request.
 			 */
-			V4L2VideoDevice::Formats fmts = data_->unicam_[Unicam::Image].dev()->formats();
-			V4L2DeviceFormat sensorFormat = findBestMode(fmts, cfg.size);
-			int ret = data_->unicam_[Unicam::Image].dev()->tryFormat(&sensorFormat);
+			V4L2SubdeviceFormat sensorFormat = findBestMode(data_->sensorFormats_, cfg.size);
+			V4L2DeviceFormat unicamFormat = toV4L2DeviceFormat(sensorFormat);
+			int ret = data_->unicam_[Unicam::Image].dev()->tryFormat(&unicamFormat);
 			if (ret)
 				return Invalid;
 
@@ -366,7 +388,7 @@  CameraConfiguration::Status RPiCameraConfiguration::validate()
 			 * fetch the "native" (i.e. untransformed) Bayer order,
 			 * because the sensor may currently be flipped!
 			 */
-			V4L2PixelFormat fourcc = sensorFormat.fourcc;
+			V4L2PixelFormat fourcc = unicamFormat.fourcc;
 			if (data_->flipsAlterBayerOrder_) {
 				BayerFormat bayer = BayerFormat::fromV4L2PixelFormat(fourcc);
 				bayer.order = data_->nativeBayerOrder_;
@@ -375,15 +397,15 @@  CameraConfiguration::Status RPiCameraConfiguration::validate()
 			}
 
 			PixelFormat sensorPixFormat = fourcc.toPixelFormat();
-			if (cfg.size != sensorFormat.size ||
+			if (cfg.size != unicamFormat.size ||
 			    cfg.pixelFormat != sensorPixFormat) {
-				cfg.size = sensorFormat.size;
+				cfg.size = unicamFormat.size;
 				cfg.pixelFormat = sensorPixFormat;
 				status = Adjusted;
 			}
 
-			cfg.stride = sensorFormat.planes[0].bpl;
-			cfg.frameSize = sensorFormat.planes[0].size;
+			cfg.stride = unicamFormat.planes[0].bpl;
+			cfg.frameSize = unicamFormat.planes[0].size;
 
 			rawCount++;
 		} else {
@@ -472,7 +494,8 @@  CameraConfiguration *PipelineHandlerRPi::generateConfiguration(Camera *camera,
 {
 	RPiCameraData *data = cameraData(camera);
 	CameraConfiguration *config = new RPiCameraConfiguration(data);
-	V4L2DeviceFormat sensorFormat;
+	V4L2SubdeviceFormat sensorFormat;
+	V4L2DeviceFormat unicamFormat;
 	unsigned int bufferCount;
 	PixelFormat pixelFormat;
 	V4L2VideoDevice::Formats fmts;
@@ -487,9 +510,9 @@  CameraConfiguration *PipelineHandlerRPi::generateConfiguration(Camera *camera,
 		switch (role) {
 		case StreamRole::Raw:
 			size = data->sensor_->resolution();
-			fmts = data->unicam_[Unicam::Image].dev()->formats();
-			sensorFormat = findBestMode(fmts, size);
-			pixelFormat = sensorFormat.fourcc.toPixelFormat();
+			sensorFormat = findBestMode(data->sensorFormats_, size);
+			unicamFormat = toV4L2DeviceFormat(sensorFormat);
+			pixelFormat = BayerFormat::fromMbusCode(sensorFormat.mbus_code).toPixelFormat();
 			ASSERT(pixelFormat.isValid());
 			bufferCount = 2;
 			rawCount++;
@@ -599,32 +622,30 @@  int PipelineHandlerRPi::configure(Camera *camera, CameraConfiguration *config)
 	}
 
 	/* First calculate the best sensor mode we can use based on the user request. */
-	V4L2VideoDevice::Formats fmts = data->unicam_[Unicam::Image].dev()->formats();
-	V4L2DeviceFormat sensorFormat = findBestMode(fmts, rawStream ? sensorSize : maxSize);
+	V4L2SubdeviceFormat sensorFormat = findBestMode(data->sensorFormats_, rawStream ? sensorSize : maxSize);
+	V4L2DeviceFormat unicamFormat = toV4L2DeviceFormat(sensorFormat);
+
+	ret = data->sensor_->setFormat(&sensorFormat);
+	if (ret)
+		return ret;
 
 	/*
 	 * Unicam image output format. The ISP input format gets set at start,
 	 * just in case we have swapped bayer orders due to flips.
 	 */
-	ret = data->unicam_[Unicam::Image].dev()->setFormat(&sensorFormat);
+	ret = data->unicam_[Unicam::Image].dev()->setFormat(&unicamFormat);
 	if (ret)
 		return ret;
 
-	/*
-	 * The control ranges associated with the sensor may need updating
-	 * after a format change.
-	 * \todo Use the CameraSensor::setFormat API instead.
-	 */
-	data->sensor_->updateControlInfo();
-
 	LOG(RPI, Info) << "Sensor: " << camera->id()
-		       << " - Selected mode: " << sensorFormat.toString();
+		       << " - Selected sensor mode: " << sensorFormat.toString()
+		       << " - Selected unicam mode: " << unicamFormat.toString();
 
 	/*
 	 * This format may be reset on start() if the bayer order has changed
 	 * because of flips in the sensor.
 	 */
-	ret = data->isp_[Isp::Input].dev()->setFormat(&sensorFormat);
+	ret = data->isp_[Isp::Input].dev()->setFormat(&unicamFormat);
 	if (ret)
 		return ret;
 
@@ -746,8 +767,8 @@  int PipelineHandlerRPi::configure(Camera *camera, CameraConfiguration *config)
 	data->ispMinCropSize_ = testCrop.size();
 
 	/* Adjust aspect ratio by providing crops on the input image. */
-	Size size = sensorFormat.size.boundedToAspectRatio(maxSize);
-	Rectangle crop = size.centeredTo(Rectangle(sensorFormat.size).center());
+	Size size = unicamFormat.size.boundedToAspectRatio(maxSize);
+	Rectangle crop = size.centeredTo(Rectangle(unicamFormat.size).center());
 	data->ispCrop_ = crop;
 
 	data->isp_[Isp::Input].dev()->setSelection(V4L2_SEL_TGT_CROP, &crop);
@@ -761,8 +782,11 @@  int PipelineHandlerRPi::configure(Camera *camera, CameraConfiguration *config)
 	 * supports it.
 	 */
 	if (data->sensorMetadata_) {
-		format = {};
+		V4L2SubdeviceFormat embeddedFormat;
+
+		data->sensor_->device()->getFormat(1, &embeddedFormat);
 		format.fourcc = V4L2PixelFormat(V4L2_META_FMT_SENSOR_DATA);
+		format.planes[0].size = embeddedFormat.size.width * embeddedFormat.size.height;
 
 		LOG(RPI, Debug) << "Setting embedded data format.";
 		ret = data->unicam_[Unicam::Embedded].dev()->setFormat(&format);
@@ -847,9 +871,14 @@  int PipelineHandlerRPi::start(Camera *camera, const ControlList *controls)
 	 * IPA configure may have changed the sensor flips - hence the bayer
 	 * order. Get the sensor format and set the ISP input now.
 	 */
-	V4L2DeviceFormat sensorFormat;
-	data->unicam_[Unicam::Image].dev()->getFormat(&sensorFormat);
-	ret = data->isp_[Isp::Input].dev()->setFormat(&sensorFormat);
+	V4L2SubdeviceFormat sensorFormat;
+	data->sensor_->device()->getFormat(0, &sensorFormat);
+
+	V4L2DeviceFormat ispFormat;
+	ispFormat.fourcc = BayerFormat::fromMbusCode(sensorFormat.mbus_code).toV4L2PixelFormat();
+	ispFormat.size = sensorFormat.size;
+
+	ret = data->isp_[Isp::Input].dev()->setFormat(&ispFormat);
 	if (ret) {
 		stop(camera);
 		return ret;
@@ -1004,6 +1033,8 @@  bool PipelineHandlerRPi::match(DeviceEnumerator *enumerator)
 	if (data->sensor_->init())
 		return false;
 
+	data->sensorFormats_ = populateSensorFormats(data->sensor_);
+
 	ipa::RPi::SensorConfig sensorConfig;
 	if (data->loadIPA(&sensorConfig)) {
 		LOG(RPI, Error) << "Failed to load a suitable IPA library";
@@ -1030,6 +1061,11 @@  bool PipelineHandlerRPi::match(DeviceEnumerator *enumerator)
 			return false;
 	}
 
+	if (!(data->unicam_[Unicam::Image].dev()->caps().device_caps() & V4L2_CAP_IO_MC)) {
+		LOG(RPI, Error) << "Unicam driver did not advertise V4L2_CAP_IO_MC, please update your kernel!";
+		return false;
+	}
+
 	/*
 	 * Setup our delayed control writer with the sensor default
 	 * gain and exposure delays. Mark VBLANK for priority write.
@@ -1039,7 +1075,7 @@  bool PipelineHandlerRPi::match(DeviceEnumerator *enumerator)
 		{ V4L2_CID_EXPOSURE, { sensorConfig.exposureDelay, false } },
 		{ V4L2_CID_VBLANK, { sensorConfig.vblankDelay, true } }
 	};
-	data->delayedCtrls_ = std::make_unique<DelayedControls>(data->unicam_[Unicam::Image].dev(), params);
+	data->delayedCtrls_ = std::make_unique<DelayedControls>(data->sensor_->device(), params);
 	data->sensorMetadata_ = sensorConfig.sensorMetadata;
 
 	/* Register the controls that the Raspberry Pi IPA can handle. */
@@ -1066,15 +1102,14 @@  bool PipelineHandlerRPi::match(DeviceEnumerator *enumerator)
 	 * As part of answering the final question, we reset the camera to
 	 * no transform at all.
 	 */
-
-	V4L2VideoDevice *dev = data->unicam_[Unicam::Image].dev();
-	const struct v4l2_query_ext_ctrl *hflipCtrl = dev->controlInfo(V4L2_CID_HFLIP);
+	const V4L2Subdevice *sensor = data->sensor_->device();
+	const struct v4l2_query_ext_ctrl *hflipCtrl = sensor->controlInfo(V4L2_CID_HFLIP);
 	if (hflipCtrl) {
 		/* We assume it will support vflips too... */
 		data->supportsFlips_ = true;
 		data->flipsAlterBayerOrder_ = hflipCtrl->flags & V4L2_CTRL_FLAG_MODIFY_LAYOUT;
 
-		ControlList ctrls(dev->controls());
+		ControlList ctrls(data->sensor_->controls());
 		ctrls.set(V4L2_CID_HFLIP, 0);
 		ctrls.set(V4L2_CID_VFLIP, 0);
 		data->setSensorControls(ctrls);
@@ -1082,9 +1117,8 @@  bool PipelineHandlerRPi::match(DeviceEnumerator *enumerator)
 
 	/* Look for a valid Bayer format. */
 	BayerFormat bayerFormat;
-	for (const auto &iter : dev->formats()) {
-		V4L2PixelFormat v4l2Format = iter.first;
-		bayerFormat = BayerFormat::fromV4L2PixelFormat(v4l2Format);
+	for (const auto &iter : data->sensorFormats_) {
+		bayerFormat = BayerFormat::fromMbusCode(iter.first);
 		if (bayerFormat.isValid())
 			break;
 	}
@@ -1271,7 +1305,7 @@  int RPiCameraData::configureIPA(const CameraConfiguration *config)
 		}
 	}
 
-	entityControls.emplace(0, unicam_[Unicam::Image].dev()->controls());
+	entityControls.emplace(0, sensor_->controls());
 	entityControls.emplace(1, isp_[Isp::Input].dev()->controls());
 
 	/* Always send the user transform to the IPA. */
@@ -1406,10 +1440,10 @@  void RPiCameraData::setSensorControls(ControlList &controls)
 		ControlList vblank_ctrl;
 
 		vblank_ctrl.set(V4L2_CID_VBLANK, controls.get(V4L2_CID_VBLANK));
-		unicam_[Unicam::Image].dev()->setControls(&vblank_ctrl);
+		sensor_->setControls(&vblank_ctrl);
 	}
 
-	unicam_[Unicam::Image].dev()->setControls(&controls);
+	sensor_->setControls(&controls);
 }
 
 void RPiCameraData::unicamBufferDequeue(FrameBuffer *buffer)