diff --git a/src/ipa/rpi/vc4/vc4.cpp b/src/ipa/rpi/vc4/vc4.cpp
index 0e022c2aeed3..f3d83b2afadf 100644
--- a/src/ipa/rpi/vc4/vc4.cpp
+++ b/src/ipa/rpi/vc4/vc4.cpp
@@ -538,7 +538,7 @@ extern "C" {
 const struct IPAModuleInfo ipaModuleInfo = {
 	IPA_MODULE_API_VERSION,
 	1,
-	"PipelineHandlerRPi",
+	"PipelineHandlerVc4",
 	"vc4",
 };
 
diff --git a/src/libcamera/pipeline/rpi/common/meson.build b/src/libcamera/pipeline/rpi/common/meson.build
index 1dec6d3d028b..f8ea790b42a1 100644
--- a/src/libcamera/pipeline/rpi/common/meson.build
+++ b/src/libcamera/pipeline/rpi/common/meson.build
@@ -2,6 +2,7 @@
 
 libcamera_sources += files([
     'delayed_controls.cpp',
+    'pipeline_base.cpp',
     'rpi_stream.cpp',
 ])
 
diff --git a/src/libcamera/pipeline/rpi/common/pipeline_base.cpp b/src/libcamera/pipeline/rpi/common/pipeline_base.cpp
new file mode 100644
index 000000000000..012766b38c32
--- /dev/null
+++ b/src/libcamera/pipeline/rpi/common/pipeline_base.cpp
@@ -0,0 +1,1447 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2019-2023, Raspberry Pi Ltd
+ *
+ * pipeline_base.h - Pipeline handler base class for Raspberry Pi devices
+ */
+
+#include "pipeline_base.h"
+
+#include <chrono>
+
+#include <linux/media-bus-format.h>
+#include <linux/videodev2.h>
+
+#include <libcamera/base/file.h>
+#include <libcamera/base/utils.h>
+
+#include <libcamera/formats.h>
+#include <libcamera/logging.h>
+#include <libcamera/property_ids.h>
+
+#include "libcamera/internal/camera_lens.h"
+#include "libcamera/internal/ipa_manager.h"
+#include "libcamera/internal/v4l2_subdevice.h"
+
+using namespace std::chrono_literals;
+
+namespace libcamera {
+
+using namespace RPi;
+
+LOG_DEFINE_CATEGORY(RPI)
+
+namespace {
+
+constexpr unsigned int defaultRawBitDepth = 12;
+
+bool isRaw(const PixelFormat &pixFmt)
+{
+	/* This test works for both Bayer and raw mono formats. */
+	return BayerFormat::fromPixelFormat(pixFmt).isValid();
+}
+
+PixelFormat mbusCodeToPixelFormat(unsigned int mbus_code,
+				  BayerFormat::Packing packingReq)
+{
+	BayerFormat bayer = BayerFormat::fromMbusCode(mbus_code);
+
+	ASSERT(bayer.isValid());
+
+	bayer.packing = packingReq;
+	PixelFormat pix = bayer.toPixelFormat();
+
+	/*
+	 * Not all formats (e.g. 8-bit or 16-bit Bayer formats) can have packed
+	 * variants. So if the PixelFormat returns as invalid, use the non-packed
+	 * conversion instead.
+	 */
+	if (!pix.isValid()) {
+		bayer.packing = BayerFormat::Packing::None;
+		pix = bayer.toPixelFormat();
+	}
+
+	return pix;
+}
+
+SensorFormats populateSensorFormats(std::unique_ptr<CameraSensor> &sensor)
+{
+	SensorFormats formats;
+
+	for (auto const mbusCode : sensor->mbusCodes())
+		formats.emplace(mbusCode, sensor->sizes(mbusCode));
+
+	return formats;
+}
+
+bool isMonoSensor(std::unique_ptr<CameraSensor> &sensor)
+{
+	unsigned int mbusCode = sensor->mbusCodes()[0];
+	const BayerFormat &bayer = BayerFormat::fromMbusCode(mbusCode);
+
+	return bayer.order == BayerFormat::Order::MONO;
+}
+
+double scoreFormat(double desired, double actual)
+{
+	double score = desired - actual;
+	/* Smaller desired dimensions are preferred. */
+	if (score < 0.0)
+		score = (-score) / 8;
+	/* Penalise non-exact matches. */
+	if (actual != desired)
+		score *= 2;
+
+	return score;
+}
+
+V4L2SubdeviceFormat findBestFormat(const SensorFormats &formatsMap, const Size &req, unsigned int bitDepth)
+{
+	double bestScore = std::numeric_limits<double>::max(), score;
+	V4L2SubdeviceFormat bestFormat;
+	bestFormat.colorSpace = ColorSpace::Raw;
+
+	constexpr float penaltyAr = 1500.0;
+	constexpr float penaltyBitDepth = 500.0;
+
+	/* Calculate the closest/best mode from the user requested size. */
+	for (const auto &iter : formatsMap) {
+		const unsigned int mbusCode = iter.first;
+		const PixelFormat format = mbusCodeToPixelFormat(mbusCode,
+								 BayerFormat::Packing::None);
+		const PixelFormatInfo &info = PixelFormatInfo::info(format);
+
+		for (const Size &size : iter.second) {
+			double reqAr = static_cast<double>(req.width) / req.height;
+			double fmtAr = static_cast<double>(size.width) / size.height;
+
+			/* Score the dimensions for closeness. */
+			score = scoreFormat(req.width, size.width);
+			score += scoreFormat(req.height, size.height);
+			score += penaltyAr * scoreFormat(reqAr, fmtAr);
+
+			/* Add any penalties... this is not an exact science! */
+			score += utils::abs_diff(info.bitsPerPixel, bitDepth) * penaltyBitDepth;
+
+			if (score <= bestScore) {
+				bestScore = score;
+				bestFormat.mbus_code = mbusCode;
+				bestFormat.size = size;
+			}
+
+			LOG(RPI, Debug) << "Format: " << size
+					<< " fmt " << format
+					<< " Score: " << score
+					<< " (best " << bestScore << ")";
+		}
+	}
+
+	return bestFormat;
+}
+
+const std::vector<ColorSpace> validColorSpaces = {
+	ColorSpace::Sycc,
+	ColorSpace::Smpte170m,
+	ColorSpace::Rec709
+};
+
+std::optional<ColorSpace> findValidColorSpace(const ColorSpace &colourSpace)
+{
+	for (auto cs : validColorSpaces) {
+		if (colourSpace.primaries == cs.primaries &&
+		    colourSpace.transferFunction == cs.transferFunction)
+			return cs;
+	}
+
+	return std::nullopt;
+}
+
+bool isRgb(const PixelFormat &pixFmt)
+{
+	const PixelFormatInfo &info = PixelFormatInfo::info(pixFmt);
+	return info.colourEncoding == PixelFormatInfo::ColourEncodingRGB;
+}
+
+bool isYuv(const PixelFormat &pixFmt)
+{
+	/* The code below would return true for raw mono streams, so weed those out first. */
+	if (isRaw(pixFmt))
+		return false;
+
+	const PixelFormatInfo &info = PixelFormatInfo::info(pixFmt);
+	return info.colourEncoding == PixelFormatInfo::ColourEncodingYUV;
+}
+
+} /* namespace */
+
+/*
+ * Raspberry Pi drivers expect the following colour spaces:
+ * - V4L2_COLORSPACE_RAW for raw streams.
+ * - One of V4L2_COLORSPACE_JPEG, V4L2_COLORSPACE_SMPTE170M, V4L2_COLORSPACE_REC709 for
+ *   non-raw streams. Other fields such as transfer function, YCbCr encoding and
+ *   quantisation are not used.
+ *
+ * The libcamera colour spaces that we wish to use corresponding to these are therefore:
+ * - ColorSpace::Raw for V4L2_COLORSPACE_RAW
+ * - ColorSpace::Sycc for V4L2_COLORSPACE_JPEG
+ * - ColorSpace::Smpte170m for V4L2_COLORSPACE_SMPTE170M
+ * - ColorSpace::Rec709 for V4L2_COLORSPACE_REC709
+ */
+CameraConfiguration::Status RPiCameraConfiguration::validateColorSpaces([[maybe_unused]] ColorSpaceFlags flags)
+{
+	Status status = Valid;
+	yuvColorSpace_.reset();
+
+	for (auto cfg : config_) {
+		/* First fix up raw streams to have the "raw" colour space. */
+		if (isRaw(cfg.pixelFormat)) {
+			/* If there was no value here, that doesn't count as "adjusted". */
+			if (cfg.colorSpace && cfg.colorSpace != ColorSpace::Raw)
+				status = Adjusted;
+			cfg.colorSpace = ColorSpace::Raw;
+			continue;
+		}
+
+		/* Next we need to find our shared colour space. The first valid one will do. */
+		if (cfg.colorSpace && !yuvColorSpace_)
+			yuvColorSpace_ = findValidColorSpace(cfg.colorSpace.value());
+	}
+
+	/* If no colour space was given anywhere, choose sYCC. */
+	if (!yuvColorSpace_)
+		yuvColorSpace_ = ColorSpace::Sycc;
+
+	/* Note the version of this that any RGB streams will have to use. */
+	rgbColorSpace_ = yuvColorSpace_;
+	rgbColorSpace_->ycbcrEncoding = ColorSpace::YcbcrEncoding::None;
+	rgbColorSpace_->range = ColorSpace::Range::Full;
+
+	/* Go through the streams again and force everyone to the same colour space. */
+	for (auto cfg : config_) {
+		if (cfg.colorSpace == ColorSpace::Raw)
+			continue;
+
+		if (isYuv(cfg.pixelFormat) && cfg.colorSpace != yuvColorSpace_) {
+			/* Again, no value means "not adjusted". */
+			if (cfg.colorSpace)
+				status = Adjusted;
+			cfg.colorSpace = yuvColorSpace_;
+		}
+		if (isRgb(cfg.pixelFormat) && cfg.colorSpace != rgbColorSpace_) {
+			/* Be nice, and let the YUV version count as non-adjusted too. */
+			if (cfg.colorSpace && cfg.colorSpace != yuvColorSpace_)
+				status = Adjusted;
+			cfg.colorSpace = rgbColorSpace_;
+		}
+	}
+
+	return status;
+}
+
+CameraConfiguration::Status RPiCameraConfiguration::validate()
+{
+	Status status = Valid;
+
+	if (config_.empty())
+		return Invalid;
+
+	status = validateColorSpaces(ColorSpaceFlag::StreamsShareColorSpace);
+
+	/*
+	 * Validate the requested transform against the sensor capabilities and
+	 * rotation and store the final combined transform that configure() will
+	 * need to apply to the sensor to save us working it out again.
+	 */
+	Transform requestedTransform = transform;
+	combinedTransform_ = data_->sensor_->validateTransform(&transform);
+	if (transform != requestedTransform)
+		status = Adjusted;
+
+	std::vector<CameraData::StreamParams> rawStreams, outStreams;
+	for (const auto &[index, cfg] : utils::enumerate(config_)) {
+		if (isRaw(cfg.pixelFormat))
+			rawStreams.emplace_back(index, &cfg);
+		else
+			outStreams.emplace_back(index, &cfg);
+	}
+
+	/* Sort the streams so the highest resolution is first. */
+	std::sort(rawStreams.begin(), rawStreams.end(),
+		  [](auto &l, auto &r) { return l.cfg->size > r.cfg->size; });
+
+	std::sort(outStreams.begin(), outStreams.end(),
+		  [](auto &l, auto &r) { return l.cfg->size > r.cfg->size; });
+
+	/* Do any platform specific fixups. */
+	status = data_->platformValidate(rawStreams, outStreams);
+	if (status == Invalid)
+		return Invalid;
+
+	/* Further fixups on the RAW streams. */
+	for (auto &raw : rawStreams) {
+		StreamConfiguration &cfg = config_.at(raw.index);
+		V4L2DeviceFormat rawFormat;
+
+		const PixelFormatInfo &info = PixelFormatInfo::info(cfg.pixelFormat);
+		unsigned int bitDepth = info.isValid() ? info.bitsPerPixel : defaultRawBitDepth;
+		V4L2SubdeviceFormat sensorFormat = findBestFormat(data_->sensorFormats_, cfg.size, bitDepth);
+
+		rawFormat.size = sensorFormat.size;
+		rawFormat.fourcc = raw.dev->toV4L2PixelFormat(cfg.pixelFormat);
+
+		int ret = raw.dev->tryFormat(&rawFormat);
+		if (ret)
+			return Invalid;
+		/*
+		 * Some sensors change their Bayer order when they are h-flipped
+		 * or v-flipped, according to the transform. If this one does, we
+		 * must advertise the transformed Bayer order in the raw stream.
+		 * Note how we must fetch the "native" (i.e. untransformed) Bayer
+		 * order, because the sensor may currently be flipped!
+		 */
+		V4L2PixelFormat fourcc = rawFormat.fourcc;
+		if (data_->flipsAlterBayerOrder_) {
+			BayerFormat bayer = BayerFormat::fromV4L2PixelFormat(fourcc);
+			bayer.order = data_->nativeBayerOrder_;
+			bayer = bayer.transform(combinedTransform_);
+			fourcc = bayer.toV4L2PixelFormat();
+		}
+
+		PixelFormat inputPixFormat = fourcc.toPixelFormat();
+		if (raw.cfg->size != rawFormat.size || raw.cfg->pixelFormat != inputPixFormat) {
+			raw.cfg->size = rawFormat.size;
+			raw.cfg->pixelFormat = inputPixFormat;
+			status = Adjusted;
+		}
+
+		raw.cfg->stride = rawFormat.planes[0].bpl;
+		raw.cfg->frameSize = rawFormat.planes[0].size;
+	}
+
+	/* Further fixups on the ISP output streams. */
+	for (auto &out : outStreams) {
+		StreamConfiguration &cfg = config_.at(out.index);
+		PixelFormat &cfgPixFmt = cfg.pixelFormat;
+		V4L2VideoDevice::Formats fmts = out.dev->formats();
+
+		if (fmts.find(out.dev->toV4L2PixelFormat(cfgPixFmt)) == fmts.end()) {
+			/* If we cannot find a native format, use a default one. */
+			cfgPixFmt = formats::NV12;
+			status = Adjusted;
+		}
+
+		V4L2DeviceFormat format;
+		format.fourcc = out.dev->toV4L2PixelFormat(cfg.pixelFormat);
+		format.size = cfg.size;
+		/* We want to send the associated YCbCr info through to the driver. */
+		format.colorSpace = yuvColorSpace_;
+
+		LOG(RPI, Debug)
+			<< "Try color space " << ColorSpace::toString(cfg.colorSpace);
+
+		int ret = out.dev->tryFormat(&format);
+		if (ret)
+			return Invalid;
+
+		/*
+		 * But for RGB streams, the YCbCr info gets overwritten on the way back
+		 * so we must check against what the stream cfg says, not what we actually
+		 * requested (which carefully included the YCbCr info)!
+		 */
+		if (cfg.colorSpace != format.colorSpace) {
+			status = Adjusted;
+			LOG(RPI, Debug)
+				<< "Color space changed from "
+				<< ColorSpace::toString(cfg.colorSpace) << " to "
+				<< ColorSpace::toString(format.colorSpace);
+		}
+
+		cfg.colorSpace = format.colorSpace;
+		cfg.stride = format.planes[0].bpl;
+		cfg.frameSize = format.planes[0].size;
+	}
+
+	return status;
+}
+
+V4L2DeviceFormat PipelineHandlerBase::toV4L2DeviceFormat(const V4L2VideoDevice *dev,
+							 const V4L2SubdeviceFormat &format,
+							 BayerFormat::Packing packingReq)
+{
+	unsigned int mbus_code = format.mbus_code;
+	const PixelFormat pix = mbusCodeToPixelFormat(mbus_code, packingReq);
+	V4L2DeviceFormat deviceFormat;
+
+	deviceFormat.fourcc = dev->toV4L2PixelFormat(pix);
+	deviceFormat.size = format.size;
+	deviceFormat.colorSpace = format.colorSpace;
+	return deviceFormat;
+}
+
+std::unique_ptr<CameraConfiguration>
+PipelineHandlerBase::generateConfiguration(Camera *camera, const StreamRoles &roles)
+{
+	CameraData *data = cameraData(camera);
+	std::unique_ptr<CameraConfiguration> config =
+		std::make_unique<RPiCameraConfiguration>(data);
+	V4L2SubdeviceFormat sensorFormat;
+	unsigned int bufferCount;
+	PixelFormat pixelFormat;
+	V4L2VideoDevice::Formats fmts;
+	Size size;
+	std::optional<ColorSpace> colorSpace;
+
+	if (roles.empty())
+		return config;
+
+	Size sensorSize = data->sensor_->resolution();
+	for (const StreamRole role : roles) {
+		switch (role) {
+		case StreamRole::Raw:
+			size = sensorSize;
+			sensorFormat = findBestFormat(data->sensorFormats_, size, defaultRawBitDepth);
+			pixelFormat = mbusCodeToPixelFormat(sensorFormat.mbus_code,
+							    BayerFormat::Packing::CSI2);
+			ASSERT(pixelFormat.isValid());
+			colorSpace = ColorSpace::Raw;
+			bufferCount = 2;
+			break;
+
+		case StreamRole::StillCapture:
+			fmts = data->ispFormats();
+			pixelFormat = formats::NV12;
+			/*
+			 * Still image codecs usually expect the sYCC color space.
+			 * Even RGB codecs will be fine as the RGB we get with the
+			 * sYCC color space is the same as sRGB.
+			 */
+			colorSpace = ColorSpace::Sycc;
+			/* Return the largest sensor resolution. */
+			size = sensorSize;
+			bufferCount = 1;
+			break;
+
+		case StreamRole::VideoRecording:
+			/*
+			 * The colour denoise algorithm requires the analysis
+			 * image, produced by the second ISP output, to be in
+			 * YUV420 format. Select this format as the default, to
+			 * maximize chances that it will be picked by
+			 * applications and enable usage of the colour denoise
+			 * algorithm.
+			 */
+			fmts = data->ispFormats();
+			pixelFormat = formats::YUV420;
+			/*
+			 * Choose a color space appropriate for video recording.
+			 * Rec.709 will be a good default for HD resolutions.
+			 */
+			colorSpace = ColorSpace::Rec709;
+			size = { 1920, 1080 };
+			bufferCount = 4;
+			break;
+
+		case StreamRole::Viewfinder:
+			fmts = data->ispFormats();
+			pixelFormat = formats::ARGB8888;
+			colorSpace = ColorSpace::Sycc;
+			size = { 800, 600 };
+			bufferCount = 4;
+			break;
+
+		default:
+			LOG(RPI, Error) << "Requested stream role not supported: "
+					<< role;
+			return nullptr;
+		}
+
+		std::map<PixelFormat, std::vector<SizeRange>> deviceFormats;
+		if (role == StreamRole::Raw) {
+			/* Translate the MBUS codes to a PixelFormat. */
+			for (const auto &format : data->sensorFormats_) {
+				PixelFormat pf = mbusCodeToPixelFormat(format.first,
+								       BayerFormat::Packing::CSI2);
+				if (pf.isValid())
+					deviceFormats.emplace(std::piecewise_construct, std::forward_as_tuple(pf),
+							      std::forward_as_tuple(format.second.begin(), format.second.end()));
+			}
+		} else {
+			/*
+			 * Translate the V4L2PixelFormat to PixelFormat. Note that we
+			 * limit the recommended largest ISP output size to match the
+			 * sensor resolution.
+			 */
+			for (const auto &format : fmts) {
+				PixelFormat pf = format.first.toPixelFormat();
+				if (pf.isValid()) {
+					const SizeRange &ispSizes = format.second[0];
+					deviceFormats[pf].emplace_back(ispSizes.min, sensorSize,
+								       ispSizes.hStep, ispSizes.vStep);
+				}
+			}
+		}
+
+		/* Add the stream format based on the device node used for the use case. */
+		StreamFormats formats(deviceFormats);
+		StreamConfiguration cfg(formats);
+		cfg.size = size;
+		cfg.pixelFormat = pixelFormat;
+		cfg.colorSpace = colorSpace;
+		cfg.bufferCount = bufferCount;
+		config->addConfiguration(cfg);
+	}
+
+	config->validate();
+
+	return config;
+}
+
+int PipelineHandlerBase::configure(Camera *camera, CameraConfiguration *config)
+{
+	CameraData *data = cameraData(camera);
+	int ret;
+
+	/* Start by freeing all buffers and reset the stream states. */
+	data->freeBuffers();
+	for (auto const stream : data->streams_)
+		stream->setExternal(false);
+
+	std::vector<CameraData::StreamParams> rawStreams, ispStreams;
+	std::optional<BayerFormat::Packing> packing;
+	unsigned int bitDepth = defaultRawBitDepth;
+
+	for (unsigned i = 0; i < config->size(); i++) {
+		StreamConfiguration *cfg = &config->at(i);
+
+		if (isRaw(cfg->pixelFormat))
+			rawStreams.emplace_back(i, cfg);
+		else
+			ispStreams.emplace_back(i, cfg);
+	}
+
+	/* Sort the streams so the highest resolution is first. */
+	std::sort(rawStreams.begin(), rawStreams.end(),
+		  [](auto &l, auto &r) { return l.cfg->size > r.cfg->size; });
+
+	std::sort(ispStreams.begin(), ispStreams.end(),
+		  [](auto &l, auto &r) { return l.cfg->size > r.cfg->size; });
+
+	/*
+	 * Calculate the best sensor mode we can use based on the user's request,
+	 * and apply it to the sensor with the cached tranform, if any.
+	 *
+	 * If we have been given a RAW stream, use that size for setting up the sensor.
+	 */
+	if (!rawStreams.empty()) {
+		BayerFormat bayerFormat = BayerFormat::fromPixelFormat(rawStreams[0].cfg->pixelFormat);
+		/* Replace the user requested packing/bit-depth. */
+		packing = bayerFormat.packing;
+		bitDepth = bayerFormat.bitDepth;
+	}
+
+	V4L2SubdeviceFormat sensorFormat = findBestFormat(data->sensorFormats_,
+							  rawStreams.empty() ? ispStreams[0].cfg->size
+									     : rawStreams[0].cfg->size,
+							  bitDepth);
+	/* Apply any cached transform. */
+	const RPiCameraConfiguration *rpiConfig = static_cast<const RPiCameraConfiguration *>(config);
+
+	/* Then apply the format on the sensor. */
+	ret = data->sensor_->setFormat(&sensorFormat, rpiConfig->combinedTransform_);
+	if (ret)
+		return ret;
+
+	/*
+	 * Platform specific internal stream configuration. This also assigns
+	 * external streams which get configured below.
+	 */
+	ret = data->platformConfigure(sensorFormat, packing, rawStreams, ispStreams);
+	if (ret)
+		return ret;
+
+	ipa::RPi::ConfigResult result;
+	ret = data->configureIPA(config, &result);
+	if (ret) {
+		LOG(RPI, Error) << "Failed to configure the IPA: " << ret;
+		return ret;
+	}
+
+	/*
+	 * Set the scaler crop to the value we are using (scaled to native sensor
+	 * coordinates).
+	 */
+	data->scalerCrop_ = data->scaleIspCrop(data->ispCrop_);
+
+	/*
+	 * Update the ScalerCropMaximum to the correct value for this camera mode.
+	 * For us, it's the same as the "analogue crop".
+	 *
+	 * \todo Make this property the ScalerCrop maximum value when dynamic
+	 * controls are available and set it at validate() time
+	 */
+	data->properties_.set(properties::ScalerCropMaximum, data->sensorInfo_.analogCrop);
+
+	/* Store the mode sensitivity for the application. */
+	data->properties_.set(properties::SensorSensitivity, result.modeSensitivity);
+
+	/* Update the controls that the Raspberry Pi IPA can handle. */
+	ControlInfoMap::Map ctrlMap;
+	for (auto const &c : result.controlInfo)
+		ctrlMap.emplace(c.first, c.second);
+
+	/* Add the ScalerCrop control limits based on the current mode. */
+	Rectangle ispMinCrop = data->scaleIspCrop(Rectangle(data->ispMinCropSize_));
+	ctrlMap[&controls::ScalerCrop] = ControlInfo(ispMinCrop, data->sensorInfo_.analogCrop, data->scalerCrop_);
+
+	data->controlInfo_ = ControlInfoMap(std::move(ctrlMap), result.controlInfo.idmap());
+
+	/* Setup the Video Mux/Bridge entities. */
+	for (auto &[device, link] : data->bridgeDevices_) {
+		/*
+		 * Start by disabling all the sink pad links on the devices in the
+		 * cascade, with the exception of the link connecting the device.
+		 */
+		for (const MediaPad *p : device->entity()->pads()) {
+			if (!(p->flags() & MEDIA_PAD_FL_SINK))
+				continue;
+
+			for (MediaLink *l : p->links()) {
+				if (l != link)
+					l->setEnabled(false);
+			}
+		}
+
+		/*
+		 * Next, enable the entity -> entity links, and setup the pad format.
+		 *
+		 * \todo Some bridge devices may chainge the media bus code, so we
+		 * ought to read the source pad format and propagate it to the sink pad.
+		 */
+		link->setEnabled(true);
+		const MediaPad *sinkPad = link->sink();
+		ret = device->setFormat(sinkPad->index(), &sensorFormat);
+		if (ret) {
+			LOG(RPI, Error) << "Failed to set format on " << device->entity()->name()
+					<< " pad " << sinkPad->index()
+					<< " with format  " << sensorFormat
+					<< ": " << ret;
+			return ret;
+		}
+
+		LOG(RPI, Debug) << "Configured media link on device " << device->entity()->name()
+				<< " on pad " << sinkPad->index();
+	}
+
+	return 0;
+}
+
+int PipelineHandlerBase::exportFrameBuffers([[maybe_unused]] Camera *camera, libcamera::Stream *stream,
+					    std::vector<std::unique_ptr<FrameBuffer>> *buffers)
+{
+	RPi::Stream *s = static_cast<RPi::Stream *>(stream);
+	unsigned int count = stream->configuration().bufferCount;
+	int ret = s->dev()->exportBuffers(count, buffers);
+
+	s->setExportedBuffers(buffers);
+
+	return ret;
+}
+
+int PipelineHandlerBase::start(Camera *camera, const ControlList *controls)
+{
+	CameraData *data = cameraData(camera);
+	int ret;
+
+	/* Check if a ScalerCrop control was specified. */
+	if (controls)
+		data->calculateScalerCrop(*controls);
+
+	/* Start the IPA. */
+	ipa::RPi::StartResult result;
+	data->ipa_->start(controls ? *controls : ControlList{ controls::controls },
+			  &result);
+
+	/* Apply any gain/exposure settings that the IPA may have passed back. */
+	if (!result.controls.empty())
+		data->setSensorControls(result.controls);
+
+	/* Configure the number of dropped frames required on startup. */
+	data->dropFrameCount_ = data->config_.disableStartupFrameDrops ? 0
+								       : result.dropFrameCount;
+
+	for (auto const stream : data->streams_)
+		stream->resetBuffers();
+
+	if (!data->buffersAllocated_) {
+		/* Allocate buffers for internal pipeline usage. */
+		ret = prepareBuffers(camera);
+		if (ret) {
+			LOG(RPI, Error) << "Failed to allocate buffers";
+			data->freeBuffers();
+			stop(camera);
+			return ret;
+		}
+		data->buffersAllocated_ = true;
+	}
+
+	/* We need to set the dropFrameCount_ before queueing buffers. */
+	ret = queueAllBuffers(camera);
+	if (ret) {
+		LOG(RPI, Error) << "Failed to queue buffers";
+		stop(camera);
+		return ret;
+	}
+
+	/*
+	 * Reset the delayed controls with the gain and exposure values set by
+	 * the IPA.
+	 */
+	data->delayedCtrls_->reset(0);
+	data->state_ = CameraData::State::Idle;
+
+	/* Enable SOF event generation. */
+	data->frontendDevice()->setFrameStartEnabled(true);
+
+	data->platformStart();
+
+	/* Start all streams. */
+	for (auto const stream : data->streams_) {
+		ret = stream->dev()->streamOn();
+		if (ret) {
+			stop(camera);
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+void PipelineHandlerBase::stopDevice(Camera *camera)
+{
+	CameraData *data = cameraData(camera);
+
+	data->state_ = CameraData::State::Stopped;
+	data->platformStop();
+
+	for (auto const stream : data->streams_)
+		stream->dev()->streamOff();
+
+	/* Disable SOF event generation. */
+	data->frontendDevice()->setFrameStartEnabled(false);
+
+	data->clearIncompleteRequests();
+
+	/* Stop the IPA. */
+	data->ipa_->stop();
+}
+
+void PipelineHandlerBase::releaseDevice(Camera *camera)
+{
+	CameraData *data = cameraData(camera);
+	data->freeBuffers();
+}
+
+int PipelineHandlerBase::queueRequestDevice(Camera *camera, Request *request)
+{
+	CameraData *data = cameraData(camera);
+
+	if (!data->isRunning())
+		return -EINVAL;
+
+	LOG(RPI, Debug) << "queueRequestDevice: New request.";
+
+	/* Push all buffers supplied in the Request to the respective streams. */
+	for (auto stream : data->streams_) {
+		if (!stream->isExternal())
+			continue;
+
+		FrameBuffer *buffer = request->findBuffer(stream);
+		if (buffer && !stream->getBufferId(buffer)) {
+			/*
+			 * This buffer is not recognised, so it must have been allocated
+			 * outside the v4l2 device. Store it in the stream buffer list
+			 * so we can track it.
+			 */
+			stream->setExternalBuffer(buffer);
+		}
+
+		/*
+		 * If no buffer is provided by the request for this stream, we
+		 * queue a nullptr to the stream to signify that it must use an
+		 * internally allocated buffer for this capture request. This
+		 * buffer will not be given back to the application, but is used
+		 * to support the internal pipeline flow.
+		 *
+		 * The below queueBuffer() call will do nothing if there are not
+		 * enough internal buffers allocated, but this will be handled by
+		 * queuing the request for buffers in the RPiStream object.
+		 */
+		int ret = stream->queueBuffer(buffer);
+		if (ret)
+			return ret;
+	}
+
+	/* Push the request to the back of the queue. */
+	data->requestQueue_.push(request);
+	data->handleState();
+
+	return 0;
+}
+
+int PipelineHandlerBase::registerCamera(MediaDevice *frontend, const std::string &frontendName,
+					MediaDevice *backend, MediaEntity *sensorEntity)
+{
+	std::unique_ptr<CameraData> cameraData = allocateCameraData();
+	CameraData *data = cameraData.get();
+	int ret;
+
+	data->sensor_ = std::make_unique<CameraSensor>(sensorEntity);
+	if (!data->sensor_)
+		return -EINVAL;
+
+	if (data->sensor_->init())
+		return -EINVAL;
+
+	data->sensorFormats_ = populateSensorFormats(data->sensor_);
+
+	/*
+	 * Enumerate all the Video Mux/Bridge devices across the sensor -> Fr
+	 * chain. There may be a cascade of devices in this chain!
+	 */
+	MediaLink *link = sensorEntity->getPadByIndex(0)->links()[0];
+	data->enumerateVideoDevices(link, frontendName);
+
+	ipa::RPi::InitResult result;
+	if (data->loadIPA(&result)) {
+		LOG(RPI, Error) << "Failed to load a suitable IPA library";
+		return -EINVAL;
+	}
+
+	/*
+	 * Setup our delayed control writer with the sensor default
+	 * gain and exposure delays. Mark VBLANK for priority write.
+	 */
+	std::unordered_map<uint32_t, RPi::DelayedControls::ControlParams> params = {
+		{ V4L2_CID_ANALOGUE_GAIN, { result.sensorConfig.gainDelay, false } },
+		{ V4L2_CID_EXPOSURE, { result.sensorConfig.exposureDelay, false } },
+		{ V4L2_CID_HBLANK, { result.sensorConfig.hblankDelay, false } },
+		{ V4L2_CID_VBLANK, { result.sensorConfig.vblankDelay, true } }
+	};
+	data->delayedCtrls_ = std::make_unique<RPi::DelayedControls>(data->sensor_->device(), params);
+	data->sensorMetadata_ = result.sensorConfig.sensorMetadata;
+
+	/* Register initial controls that the Raspberry Pi IPA can handle. */
+	data->controlInfo_ = std::move(result.controlInfo);
+
+	/* Initialize the camera properties. */
+	data->properties_ = data->sensor_->properties();
+
+	/*
+	 * The V4L2_CID_NOTIFY_GAINS control, if present, is used to inform the
+	 * sensor of the colour gains. It is defined to be a linear gain where
+	 * the default value represents a gain of exactly one.
+	 */
+	auto it = data->sensor_->controls().find(V4L2_CID_NOTIFY_GAINS);
+	if (it != data->sensor_->controls().end())
+		data->notifyGainsUnity_ = it->second.def().get<int32_t>();
+
+	/*
+	 * Set a default value for the ScalerCropMaximum property to show
+	 * that we support its use, however, initialise it to zero because
+	 * it's not meaningful until a camera mode has been chosen.
+	 */
+	data->properties_.set(properties::ScalerCropMaximum, Rectangle{});
+
+	/*
+	 * We cache two things about the sensor in relation to transforms
+	 * (meaning horizontal and vertical flips): if they affect the Bayer
+         * ordering, and what the "native" Bayer order is, when no transforms
+	 * are applied.
+	 *
+	 * If flips are supported verify if they affect the Bayer ordering
+	 * and what the "native" Bayer order is, when no transforms are
+	 * applied.
+	 *
+	 * We note that the sensor's cached list of supported formats is
+	 * already in the "native" order, with any flips having been undone.
+	 */
+	const V4L2Subdevice *sensor = data->sensor_->device();
+	const struct v4l2_query_ext_ctrl *hflipCtrl = sensor->controlInfo(V4L2_CID_HFLIP);
+	if (hflipCtrl) {
+		/* We assume it will support vflips too... */
+		data->flipsAlterBayerOrder_ = hflipCtrl->flags & V4L2_CTRL_FLAG_MODIFY_LAYOUT;
+	}
+
+	/* Look for a valid Bayer format. */
+	BayerFormat bayerFormat;
+	for (const auto &iter : data->sensorFormats_) {
+		bayerFormat = BayerFormat::fromMbusCode(iter.first);
+		if (bayerFormat.isValid())
+			break;
+	}
+
+	if (!bayerFormat.isValid()) {
+		LOG(RPI, Error) << "No Bayer format found";
+		return -EINVAL;
+	}
+	data->nativeBayerOrder_ = bayerFormat.order;
+
+	ret = data->loadPipelineConfiguration();
+	if (ret) {
+		LOG(RPI, Error) << "Unable to load pipeline configuration";
+		return ret;
+	}
+
+	ret = platformRegister(cameraData, frontend, backend);
+	if (ret)
+		return ret;
+
+	/* Setup the general IPA signal handlers. */
+	data->frontendDevice()->dequeueTimeout.connect(data, &RPi::CameraData::cameraTimeout);
+	data->frontendDevice()->frameStart.connect(data, &RPi::CameraData::frameStarted);
+	data->ipa_->setDelayedControls.connect(data, &CameraData::setDelayedControls);
+	data->ipa_->setLensControls.connect(data, &CameraData::setLensControls);
+
+	return 0;
+}
+
+void PipelineHandlerBase::mapBuffers(Camera *camera, const BufferMap &buffers, unsigned int mask)
+{
+	CameraData *data = cameraData(camera);
+	std::vector<IPABuffer> bufferIds;
+	/*
+	 * Link the FrameBuffers with the id (key value) in the map stored in
+	 * the RPi stream object - along with an identifier mask.
+	 *
+	 * This will allow us to identify buffers passed between the pipeline
+	 * handler and the IPA.
+	 */
+	for (auto const &it : buffers) {
+		bufferIds.push_back(IPABuffer(mask | it.first,
+					      it.second->planes()));
+		data->bufferIds_.insert(mask | it.first);
+	}
+
+	data->ipa_->mapBuffers(bufferIds);
+}
+
+int PipelineHandlerBase::queueAllBuffers(Camera *camera)
+{
+	CameraData *data = cameraData(camera);
+	int ret;
+
+	for (auto const stream : data->streams_) {
+		if (!stream->isExternal()) {
+			ret = stream->queueAllBuffers();
+			if (ret < 0)
+				return ret;
+		} else {
+			/*
+			 * For external streams, we must queue up a set of internal
+			 * buffers to handle the number of drop frames requested by
+			 * the IPA. This is done by passing nullptr in queueBuffer().
+			 *
+			 * The below queueBuffer() call will do nothing if there
+			 * are not enough internal buffers allocated, but this will
+			 * be handled by queuing the request for buffers in the
+			 * RPiStream object.
+			 */
+			unsigned int i;
+			for (i = 0; i < data->dropFrameCount_; i++) {
+				ret = stream->queueBuffer(nullptr);
+				if (ret)
+					return ret;
+			}
+		}
+	}
+
+	return 0;
+}
+
+void CameraData::freeBuffers()
+{
+	if (ipa_) {
+		/*
+		 * Copy the buffer ids from the unordered_set to a vector to
+		 * pass to the IPA.
+		 */
+		std::vector<unsigned int> bufferIds(bufferIds_.begin(),
+						    bufferIds_.end());
+		ipa_->unmapBuffers(bufferIds);
+		bufferIds_.clear();
+	}
+
+	for (auto const stream : streams_)
+		stream->releaseBuffers();
+
+	platformFreeBuffers();
+
+	buffersAllocated_ = false;
+}
+
+/*
+ * enumerateVideoDevices() iterates over the Media Controller topology, starting
+ * at the sensor and finishing at the frontend. For each sensor, CameraData stores
+ * a unique list of any intermediate video mux or bridge devices connected in a
+ * cascade, together with the entity to entity link.
+ *
+ * Entity pad configuration and link enabling happens at the end of configure().
+ * We first disable all pad links on each entity device in the chain, and then
+ * selectively enabling the specific links to link sensor to the frontend across
+ * all intermediate muxes and bridges.
+ *
+ * In the cascaded topology below, if Sensor1 is used, the Mux2 -> Mux1 link
+ * will be disabled, and Sensor1 -> Mux1 -> Frontend links enabled. Alternatively,
+ * if Sensor3 is used, the Sensor2 -> Mux2 and Sensor1 -> Mux1 links are disabled,
+ * and Sensor3 -> Mux2 -> Mux1 -> Frontend links are enabled. All other links will
+ * remain unchanged.
+ *
+ *  +----------+
+ *  |     FE   |
+ *  +-----^----+
+ *        |
+ *    +---+---+
+ *    | Mux1  |<------+
+ *    +--^----        |
+ *       |            |
+ * +-----+---+    +---+---+
+ * | Sensor1 |    |  Mux2 |<--+
+ * +---------+    +-^-----+   |
+ *                  |         |
+ *          +-------+-+   +---+-----+
+ *          | Sensor2 |   | Sensor3 |
+ *          +---------+   +---------+
+ */
+void CameraData::enumerateVideoDevices(MediaLink *link, const std::string &frontend)
+{
+	const MediaPad *sinkPad = link->sink();
+	const MediaEntity *entity = sinkPad->entity();
+	bool frontendFound = false;
+
+	/* We only deal with Video Mux and Bridge devices in cascade. */
+	if (entity->function() != MEDIA_ENT_F_VID_MUX &&
+	    entity->function() != MEDIA_ENT_F_VID_IF_BRIDGE)
+		return;
+
+	/* Find the source pad for this Video Mux or Bridge device. */
+	const MediaPad *sourcePad = nullptr;
+	for (const MediaPad *pad : entity->pads()) {
+		if (pad->flags() & MEDIA_PAD_FL_SOURCE) {
+			/*
+			 * We can only deal with devices that have a single source
+			 * pad. If this device has multiple source pads, ignore it
+			 * and this branch in the cascade.
+			 */
+			if (sourcePad)
+				return;
+
+			sourcePad = pad;
+		}
+	}
+
+	LOG(RPI, Debug) << "Found video mux device " << entity->name()
+			<< " linked to sink pad " << sinkPad->index();
+
+	bridgeDevices_.emplace_back(std::make_unique<V4L2Subdevice>(entity), link);
+	bridgeDevices_.back().first->open();
+
+	/*
+	 * Iterate through all the sink pad links down the cascade to find any
+	 * other Video Mux and Bridge devices.
+	 */
+	for (MediaLink *l : sourcePad->links()) {
+		enumerateVideoDevices(l, frontend);
+		/* Once we reach the Frontend entity, we are done. */
+		if (l->sink()->entity()->name() == frontend) {
+			frontendFound = true;
+			break;
+		}
+	}
+
+	/* This identifies the end of our entity enumeration recursion. */
+	if (link->source()->entity()->function() == MEDIA_ENT_F_CAM_SENSOR) {
+		/*
+		* If the frontend is not at the end of this cascade, we cannot
+		* configure this topology automatically, so remove all entity references.
+		*/
+		if (!frontendFound) {
+			LOG(RPI, Warning) << "Cannot automatically configure this MC topology!";
+			bridgeDevices_.clear();
+		}
+	}
+}
+
+int CameraData::loadPipelineConfiguration()
+{
+	config_ = {
+		.disableStartupFrameDrops = false,
+		.cameraTimeoutValue = 0,
+	};
+
+	/* Initial configuration of the platform, in case no config file is present */
+	platformPipelineConfigure({});
+
+	char const *configFromEnv = utils::secure_getenv("LIBCAMERA_RPI_CONFIG_FILE");
+	if (!configFromEnv || *configFromEnv == '\0')
+		return 0;
+
+	std::string filename = std::string(configFromEnv);
+	File file(filename);
+
+	if (!file.open(File::OpenModeFlag::ReadOnly)) {
+		LOG(RPI, Error) << "Failed to open configuration file '" << filename << "'";
+		return -EIO;
+	}
+
+	LOG(RPI, Info) << "Using configuration file '" << filename << "'";
+
+	std::unique_ptr<YamlObject> root = YamlParser::parse(file);
+	if (!root) {
+		LOG(RPI, Warning) << "Failed to parse configuration file, using defaults";
+		return 0;
+	}
+
+	std::optional<double> ver = (*root)["version"].get<double>();
+	if (!ver || *ver != 1.0) {
+		LOG(RPI, Error) << "Unexpected configuration file version reported";
+		return -EINVAL;
+	}
+
+	const YamlObject &phConfig = (*root)["pipeline_handler"];
+
+	config_.disableStartupFrameDrops =
+		phConfig["disable_startup_frame_drops"].get<bool>(config_.disableStartupFrameDrops);
+
+	config_.cameraTimeoutValue =
+		phConfig["camera_timeout_value_ms"].get<unsigned int>(config_.cameraTimeoutValue);
+
+	if (config_.cameraTimeoutValue) {
+		/* Disable the IPA signal to control timeout and set the user requested value. */
+		ipa_->setCameraTimeout.disconnect();
+		frontendDevice()->setDequeueTimeout(config_.cameraTimeoutValue * 1ms);
+	}
+
+	return platformPipelineConfigure(root);
+}
+
+int CameraData::loadIPA(ipa::RPi::InitResult *result)
+{
+	ipa_ = IPAManager::createIPA<ipa::RPi::IPAProxyRPi>(pipe(), 1, 1);
+
+	if (!ipa_)
+		return -ENOENT;
+
+	/*
+	 * The configuration (tuning file) is made from the sensor name unless
+	 * the environment variable overrides it.
+	 */
+	std::string configurationFile;
+	char const *configFromEnv = utils::secure_getenv("LIBCAMERA_RPI_TUNING_FILE");
+	if (!configFromEnv || *configFromEnv == '\0') {
+		std::string model = sensor_->model();
+		if (isMonoSensor(sensor_))
+			model += "_mono";
+		configurationFile = ipa_->configurationFile(model + ".json", "rpi");
+	} else {
+		configurationFile = std::string(configFromEnv);
+	}
+
+	IPASettings settings(configurationFile, sensor_->model());
+	ipa::RPi::InitParams params;
+
+	params.lensPresent = !!sensor_->focusLens();
+	int ret = platformInitIpa(params);
+	if (ret)
+		return ret;
+
+	return ipa_->init(settings, params, result);
+}
+
+int CameraData::configureIPA(const CameraConfiguration *config, ipa::RPi::ConfigResult *result)
+{
+	std::map<unsigned int, ControlInfoMap> entityControls;
+	ipa::RPi::ConfigParams params;
+	int ret;
+
+	params.sensorControls = sensor_->controls();
+	if (sensor_->focusLens())
+		params.lensControls = sensor_->focusLens()->controls();
+
+	ret = platformConfigureIpa(params);
+	if (ret)
+		return ret;
+
+	/* We store the IPACameraSensorInfo for digital zoom calculations. */
+	ret = sensor_->sensorInfo(&sensorInfo_);
+	if (ret) {
+		LOG(RPI, Error) << "Failed to retrieve camera sensor info";
+		return ret;
+	}
+
+	/* Always send the user transform to the IPA. */
+	params.transform = static_cast<unsigned int>(config->transform);
+
+	/* Ready the IPA - it must know about the sensor resolution. */
+	ret = ipa_->configure(sensorInfo_, params, result);
+	if (ret < 0) {
+		LOG(RPI, Error) << "IPA configuration failed!";
+		return -EPIPE;
+	}
+
+	if (!result->controls.empty())
+		setSensorControls(result->controls);
+
+	return 0;
+}
+
+void CameraData::setDelayedControls(const ControlList &controls, uint32_t delayContext)
+{
+	if (!delayedCtrls_->push(controls, delayContext))
+		LOG(RPI, Error) << "V4L2 DelayedControl set failed";
+}
+
+void CameraData::setLensControls(const ControlList &controls)
+{
+	CameraLens *lens = sensor_->focusLens();
+
+	if (lens && controls.contains(V4L2_CID_FOCUS_ABSOLUTE)) {
+		ControlValue const &focusValue = controls.get(V4L2_CID_FOCUS_ABSOLUTE);
+		lens->setFocusPosition(focusValue.get<int32_t>());
+	}
+}
+
+void CameraData::setSensorControls(ControlList &controls)
+{
+	/*
+	 * We need to ensure that if both VBLANK and EXPOSURE are present, the
+	 * former must be written ahead of, and separately from EXPOSURE to avoid
+	 * V4L2 rejecting the latter. This is identical to what DelayedControls
+	 * does with the priority write flag.
+	 *
+	 * As a consequence of the below logic, VBLANK gets set twice, and we
+	 * rely on the v4l2 framework to not pass the second control set to the
+	 * driver as the actual control value has not changed.
+	 */
+	if (controls.contains(V4L2_CID_EXPOSURE) && controls.contains(V4L2_CID_VBLANK)) {
+		ControlList vblank_ctrl;
+
+		vblank_ctrl.set(V4L2_CID_VBLANK, controls.get(V4L2_CID_VBLANK));
+		sensor_->setControls(&vblank_ctrl);
+	}
+
+	sensor_->setControls(&controls);
+}
+
+Rectangle CameraData::scaleIspCrop(const Rectangle &ispCrop) const
+{
+	/*
+	 * Scale a crop rectangle defined in the ISP's coordinates into native sensor
+	 * coordinates.
+	 */
+	Rectangle nativeCrop = ispCrop.scaledBy(sensorInfo_.analogCrop.size(),
+						sensorInfo_.outputSize);
+	nativeCrop.translateBy(sensorInfo_.analogCrop.topLeft());
+	return nativeCrop;
+}
+
+void CameraData::calculateScalerCrop(const ControlList &controls)
+{
+	const auto &scalerCrop = controls.get<Rectangle>(controls::ScalerCrop);
+	if (scalerCrop) {
+		Rectangle nativeCrop = *scalerCrop;
+
+		if (!nativeCrop.width || !nativeCrop.height)
+			nativeCrop = { 0, 0, 1, 1 };
+
+		/* Create a version of the crop scaled to ISP (camera mode) pixels. */
+		Rectangle ispCrop = nativeCrop.translatedBy(-sensorInfo_.analogCrop.topLeft());
+		ispCrop.scaleBy(sensorInfo_.outputSize, sensorInfo_.analogCrop.size());
+
+		/*
+		 * The crop that we set must be:
+		 * 1. At least as big as ispMinCropSize_, once that's been
+		 *    enlarged to the same aspect ratio.
+		 * 2. With the same mid-point, if possible.
+		 * 3. But it can't go outside the sensor area.
+		 */
+		Size minSize = ispMinCropSize_.expandedToAspectRatio(nativeCrop.size());
+		Size size = ispCrop.size().expandedTo(minSize);
+		ispCrop = size.centeredTo(ispCrop.center()).enclosedIn(Rectangle(sensorInfo_.outputSize));
+
+		if (ispCrop != ispCrop_) {
+			ispCrop_ = ispCrop;
+			platformIspCrop();
+
+			/*
+			 * Also update the ScalerCrop in the metadata with what we actually
+			 * used. But we must first rescale that from ISP (camera mode) pixels
+			 * back into sensor native pixels.
+			 */
+			scalerCrop_ = scaleIspCrop(ispCrop_);
+		}
+	}
+}
+
+void CameraData::cameraTimeout()
+{
+	LOG(RPI, Error) << "Camera frontend has timed out!";
+	LOG(RPI, Error) << "Please check that your camera sensor connector is attached securely.";
+	LOG(RPI, Error) << "Alternatively, try another cable and/or sensor.";
+
+	state_ = CameraData::State::Error;
+	platformStop();
+
+	/*
+	 * To allow the application to attempt a recovery from this timeout,
+	 * stop all devices streaming, and return any outstanding requests as
+	 * incomplete and cancelled.
+	 */
+	for (auto const stream : streams_)
+		stream->dev()->streamOff();
+
+	clearIncompleteRequests();
+}
+
+void CameraData::frameStarted(uint32_t sequence)
+{
+	LOG(RPI, Debug) << "Frame start " << sequence;
+
+	/* Write any controls for the next frame as soon as we can. */
+	delayedCtrls_->applyControls(sequence);
+}
+
+void CameraData::clearIncompleteRequests()
+{
+	/*
+	 * All outstanding requests (and associated buffers) must be returned
+	 * back to the application.
+	 */
+	while (!requestQueue_.empty()) {
+		Request *request = requestQueue_.front();
+
+		for (auto &b : request->buffers()) {
+			FrameBuffer *buffer = b.second;
+			/*
+			 * Has the buffer already been handed back to the
+			 * request? If not, do so now.
+			 */
+			if (buffer->request()) {
+				buffer->_d()->cancel();
+				pipe()->completeBuffer(request, buffer);
+			}
+		}
+
+		pipe()->completeRequest(request);
+		requestQueue_.pop();
+	}
+}
+
+void CameraData::handleStreamBuffer(FrameBuffer *buffer, RPi::Stream *stream)
+{
+	/*
+	 * It is possible to be here without a pending request, so check
+	 * that we actually have one to action, otherwise we just return
+	 * buffer back to the stream.
+	 */
+	Request *request = requestQueue_.empty() ? nullptr : requestQueue_.front();
+	if (!dropFrameCount_ && request && request->findBuffer(stream) == buffer) {
+		/*
+		 * Check if this is an externally provided buffer, and if
+		 * so, we must stop tracking it in the pipeline handler.
+		 */
+		handleExternalBuffer(buffer, stream);
+		/*
+		 * Tag the buffer as completed, returning it to the
+		 * application.
+		 */
+		pipe()->completeBuffer(request, buffer);
+	} else {
+		/*
+		 * This buffer was not part of the Request (which happens if an
+		 * internal buffer was used for an external stream, or
+		 * unconditionally for internal streams), or there is no pending
+		 * request, so we can recycle it.
+		 */
+		stream->returnBuffer(buffer);
+	}
+}
+
+void CameraData::handleState()
+{
+	switch (state_) {
+	case State::Stopped:
+	case State::Busy:
+	case State::Error:
+		break;
+
+	case State::IpaComplete:
+		/* If the request is completed, we will switch to Idle state. */
+		checkRequestCompleted();
+		/*
+		 * No break here, we want to try running the pipeline again.
+		 * The fallthrough clause below suppresses compiler warnings.
+		 */
+		[[fallthrough]];
+
+	case State::Idle:
+		tryRunPipeline();
+		break;
+	}
+}
+
+void CameraData::handleExternalBuffer(FrameBuffer *buffer, RPi::Stream *stream)
+{
+	unsigned int id = stream->getBufferId(buffer);
+
+	if (!(id & MaskExternalBuffer))
+		return;
+
+	/* Stop the Stream object from tracking the buffer. */
+	stream->removeExternalBuffer(buffer);
+}
+
+void CameraData::checkRequestCompleted()
+{
+	bool requestCompleted = false;
+	/*
+	 * If we are dropping this frame, do not touch the request, simply
+	 * change the state to IDLE when ready.
+	 */
+	if (!dropFrameCount_) {
+		Request *request = requestQueue_.front();
+		if (request->hasPendingBuffers())
+			return;
+
+		/* Must wait for metadata to be filled in before completing. */
+		if (state_ != State::IpaComplete)
+			return;
+
+		pipe()->completeRequest(request);
+		requestQueue_.pop();
+		requestCompleted = true;
+	}
+
+	/*
+	 * Make sure we have three outputs completed in the case of a dropped
+	 * frame.
+	 */
+	if (state_ == State::IpaComplete &&
+	    ((ispOutputCount_ == ispOutputTotal_ && dropFrameCount_) || requestCompleted)) {
+		state_ = State::Idle;
+		if (dropFrameCount_) {
+			dropFrameCount_--;
+			LOG(RPI, Debug) << "Dropping frame at the request of the IPA ("
+					<< dropFrameCount_ << " left)";
+		}
+	}
+}
+
+void CameraData::fillRequestMetadata(const ControlList &bufferControls, Request *request)
+{
+	request->metadata().set(controls::SensorTimestamp,
+				bufferControls.get(controls::SensorTimestamp).value_or(0));
+
+	request->metadata().set(controls::ScalerCrop, scalerCrop_);
+}
+
+} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/rpi/common/pipeline_base.h b/src/libcamera/pipeline/rpi/common/pipeline_base.h
new file mode 100644
index 000000000000..318266a6fb51
--- /dev/null
+++ b/src/libcamera/pipeline/rpi/common/pipeline_base.h
@@ -0,0 +1,276 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2019-2023, Raspberry Pi Ltd
+ *
+ * pipeline_base.h - Pipeline handler base class for Raspberry Pi devices
+ */
+
+#include <map>
+#include <memory>
+#include <optional>
+#include <queue>
+#include <string>
+#include <unordered_set>
+#include <utility>
+#include <vector>
+
+#include <libcamera/controls.h>
+#include <libcamera/request.h>
+
+#include "libcamera/internal/bayer_format.h"
+#include "libcamera/internal/camera.h"
+#include "libcamera/internal/camera_sensor.h"
+#include "libcamera/internal/framebuffer.h"
+#include "libcamera/internal/media_device.h"
+#include "libcamera/internal/media_object.h"
+#include "libcamera/internal/pipeline_handler.h"
+#include "libcamera/internal/v4l2_videodevice.h"
+#include "libcamera/internal/yaml_parser.h"
+
+#include <libcamera/ipa/raspberrypi_ipa_interface.h>
+#include <libcamera/ipa/raspberrypi_ipa_proxy.h>
+
+#include "delayed_controls.h"
+#include "rpi_stream.h"
+
+using namespace std::chrono_literals;
+
+namespace libcamera {
+
+namespace RPi {
+
+/* Map of mbus codes to supported sizes reported by the sensor. */
+using SensorFormats = std::map<unsigned int, std::vector<Size>>;
+
+class CameraData : public Camera::Private
+{
+public:
+	CameraData(PipelineHandler *pipe)
+		: Camera::Private(pipe), state_(State::Stopped),
+		  flipsAlterBayerOrder_(false), dropFrameCount_(0), buffersAllocated_(false),
+		  ispOutputCount_(0), ispOutputTotal_(0)
+	{
+	}
+
+	virtual ~CameraData()
+	{
+	}
+
+	struct StreamParams {
+		StreamParams()
+			: index(0), cfg(nullptr), dev(nullptr)
+		{
+		}
+
+		StreamParams(unsigned int index_, StreamConfiguration *cfg_)
+			: index(index_), cfg(cfg_), dev(nullptr)
+		{
+		}
+
+		unsigned int index;
+		StreamConfiguration *cfg;
+		V4L2VideoDevice *dev;
+	};
+
+	virtual CameraConfiguration::Status platformValidate(std::vector<StreamParams> &rawStreams,
+							     std::vector<StreamParams> &outStreams) const = 0;
+	virtual int platformConfigure(const V4L2SubdeviceFormat &sensorFormat,
+				      std::optional<BayerFormat::Packing> packing,
+				      std::vector<StreamParams> &rawStreams,
+				      std::vector<StreamParams> &outStreams) = 0;
+	virtual void platformStart() = 0;
+	virtual void platformStop() = 0;
+
+	void freeBuffers();
+	virtual void platformFreeBuffers() = 0;
+
+	void enumerateVideoDevices(MediaLink *link, const std::string &frontend);
+
+	int loadPipelineConfiguration();
+	int loadIPA(ipa::RPi::InitResult *result);
+	int configureIPA(const CameraConfiguration *config, ipa::RPi::ConfigResult *result);
+	virtual int platformInitIpa(ipa::RPi::InitParams &params) = 0;
+	virtual int platformConfigureIpa(ipa::RPi::ConfigParams &params) = 0;
+
+	void setDelayedControls(const ControlList &controls, uint32_t delayContext);
+	void setLensControls(const ControlList &controls);
+	void setSensorControls(ControlList &controls);
+
+	Rectangle scaleIspCrop(const Rectangle &ispCrop) const;
+	void calculateScalerCrop(const ControlList &controls);
+	virtual void platformIspCrop() = 0;
+
+	void cameraTimeout();
+	void frameStarted(uint32_t sequence);
+
+	void clearIncompleteRequests();
+	void handleStreamBuffer(FrameBuffer *buffer, Stream *stream);
+	void handleState();
+
+	virtual V4L2VideoDevice::Formats ispFormats() const = 0;
+	virtual V4L2VideoDevice::Formats rawFormats() const = 0;
+	virtual V4L2VideoDevice *frontendDevice() = 0;
+
+	virtual int platformPipelineConfigure(const std::unique_ptr<YamlObject> &root) = 0;
+
+	std::unique_ptr<ipa::RPi::IPAProxyRPi> ipa_;
+
+	std::unique_ptr<CameraSensor> sensor_;
+	SensorFormats sensorFormats_;
+
+	/* The vector below is just for convenience when iterating over all streams. */
+	std::vector<Stream *> streams_;
+	/* Stores the ids of the buffers mapped in the IPA. */
+	std::unordered_set<unsigned int> bufferIds_;
+	/*
+	 * Stores a cascade of Video Mux or Bridge devices between the sensor and
+	 * Unicam together with media link across the entities.
+	 */
+	std::vector<std::pair<std::unique_ptr<V4L2Subdevice>, MediaLink *>> bridgeDevices_;
+
+	std::unique_ptr<DelayedControls> delayedCtrls_;
+	bool sensorMetadata_;
+
+	/*
+	 * All the functions in this class are called from a single calling
+	 * thread. So, we do not need to have any mutex to protect access to any
+	 * of the variables below.
+	 */
+	enum class State { Stopped, Idle, Busy, IpaComplete, Error };
+	State state_;
+
+	bool isRunning()
+	{
+		return state_ != State::Stopped && state_ != State::Error;
+	}
+
+	std::queue<Request *> requestQueue_;
+
+	/* Store the "native" Bayer order (that is, with no transforms applied). */
+	bool flipsAlterBayerOrder_;
+	BayerFormat::Order nativeBayerOrder_;
+
+	/* For handling digital zoom. */
+	IPACameraSensorInfo sensorInfo_;
+	Rectangle ispCrop_; /* crop in ISP (camera mode) pixels */
+	Rectangle scalerCrop_; /* crop in sensor native pixels */
+	Size ispMinCropSize_;
+
+	unsigned int dropFrameCount_;
+
+	/*
+	 * If set, this stores the value that represets a gain of one for
+	 * the V4L2_CID_NOTIFY_GAINS control.
+	 */
+	std::optional<int32_t> notifyGainsUnity_;
+
+	/* Have internal buffers been allocated? */
+	bool buffersAllocated_;
+
+	struct Config {
+		/*
+		 * Override any request from the IPA to drop a number of startup
+		 * frames.
+		 */
+		bool disableStartupFrameDrops;
+		/*
+		 * Override the camera timeout value calculated by the IPA based
+		 * on frame durations.
+		 */
+		unsigned int cameraTimeoutValue;
+	};
+
+	Config config_;
+
+protected:
+	void fillRequestMetadata(const ControlList &bufferControls,
+				 Request *request);
+
+	virtual void tryRunPipeline() = 0;
+
+	unsigned int ispOutputCount_;
+	unsigned int ispOutputTotal_;
+
+private:
+	void handleExternalBuffer(FrameBuffer *buffer, Stream *stream);
+	void checkRequestCompleted();
+};
+
+class PipelineHandlerBase : public PipelineHandler
+{
+public:
+	PipelineHandlerBase(CameraManager *manager)
+		: PipelineHandler(manager)
+	{
+	}
+
+	virtual ~PipelineHandlerBase()
+	{
+	}
+
+	static V4L2DeviceFormat toV4L2DeviceFormat(const V4L2VideoDevice *dev,
+						   const V4L2SubdeviceFormat &format,
+						   BayerFormat::Packing packingReq);
+
+	std::unique_ptr<CameraConfiguration>
+	generateConfiguration(Camera *camera, const StreamRoles &roles) override;
+	int configure(Camera *camera, CameraConfiguration *config) override;
+
+	int exportFrameBuffers(Camera *camera, libcamera::Stream *stream,
+			       std::vector<std::unique_ptr<FrameBuffer>> *buffers) override;
+
+	int start(Camera *camera, const ControlList *controls) override;
+	void stopDevice(Camera *camera) override;
+	void releaseDevice(Camera *camera) override;
+
+	int queueRequestDevice(Camera *camera, Request *request) override;
+
+protected:
+	int registerCamera(MediaDevice *frontent, const std::string &frontendName,
+			   MediaDevice *backend, MediaEntity *sensorEntity);
+
+	void mapBuffers(Camera *camera, const BufferMap &buffers, unsigned int mask);
+
+	virtual std::unique_ptr<CameraData> allocateCameraData() = 0;
+	virtual int platformRegister(std::unique_ptr<CameraData> &cameraData,
+				     MediaDevice *unicam, MediaDevice *isp) = 0;
+
+private:
+	CameraData *cameraData(Camera *camera)
+	{
+		return static_cast<CameraData *>(camera->_d());
+	}
+
+	int queueAllBuffers(Camera *camera);
+	virtual int prepareBuffers(Camera *camera) = 0;
+};
+
+class RPiCameraConfiguration final : public CameraConfiguration
+{
+public:
+	RPiCameraConfiguration(const CameraData *data)
+		: CameraConfiguration(), data_(data)
+	{
+	}
+
+	CameraConfiguration::Status validateColorSpaces(ColorSpaceFlags flags);
+	Status validate() override;
+
+	/* Cache the combinedTransform_ that will be applied to the sensor */
+	Transform combinedTransform_;
+
+private:
+	const CameraData *data_;
+
+	/*
+	 * Store the colour spaces that all our streams will have. RGB format streams
+	 * will have the same colorspace as YUV streams, with YCbCr field cleared and
+	 * range set to full.
+         */
+	std::optional<ColorSpace> yuvColorSpace_;
+	std::optional<ColorSpace> rgbColorSpace_;
+};
+
+} /* namespace RPi */
+
+} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/rpi/vc4/data/example.yaml b/src/libcamera/pipeline/rpi/vc4/data/example.yaml
index c90f518f8849..b8e01adeaf40 100644
--- a/src/libcamera/pipeline/rpi/vc4/data/example.yaml
+++ b/src/libcamera/pipeline/rpi/vc4/data/example.yaml
@@ -34,13 +34,13 @@
                 #
                 # "disable_startup_frame_drops": false,
 
-                # Custom timeout value (in ms) for Unicam to use. This overrides
+                # Custom timeout value (in ms) for camera to use. This overrides
                 # the value computed by the pipeline handler based on frame
                 # durations.
                 #
                 # Set this value to 0 to use the pipeline handler computed
                 # timeout value.
                 #
-                # "unicam_timeout_value_ms": 0,
+                # "camera_timeout_value_ms": 0,
         }
 }
diff --git a/src/libcamera/pipeline/rpi/vc4/meson.build b/src/libcamera/pipeline/rpi/vc4/meson.build
index 228823f30922..cdb049c58d2c 100644
--- a/src/libcamera/pipeline/rpi/vc4/meson.build
+++ b/src/libcamera/pipeline/rpi/vc4/meson.build
@@ -2,7 +2,7 @@
 
 libcamera_sources += files([
     'dma_heaps.cpp',
-    'raspberrypi.cpp',
+    'vc4.cpp',
 ])
 
 subdir('data')
diff --git a/src/libcamera/pipeline/rpi/vc4/raspberrypi.cpp b/src/libcamera/pipeline/rpi/vc4/raspberrypi.cpp
deleted file mode 100644
index bd66468683df..000000000000
--- a/src/libcamera/pipeline/rpi/vc4/raspberrypi.cpp
+++ /dev/null
@@ -1,2428 +0,0 @@
-/* SPDX-License-Identifier: LGPL-2.1-or-later */
-/*
- * Copyright (C) 2019-2021, Raspberry Pi Ltd
- *
- * raspberrypi.cpp - Pipeline handler for VC4 based Raspberry Pi devices
- */
-#include <algorithm>
-#include <assert.h>
-#include <cmath>
-#include <fcntl.h>
-#include <memory>
-#include <mutex>
-#include <queue>
-#include <unordered_set>
-#include <utility>
-
-#include <libcamera/base/file.h>
-#include <libcamera/base/shared_fd.h>
-#include <libcamera/base/utils.h>
-
-#include <libcamera/camera.h>
-#include <libcamera/control_ids.h>
-#include <libcamera/formats.h>
-#include <libcamera/ipa/raspberrypi_ipa_interface.h>
-#include <libcamera/ipa/raspberrypi_ipa_proxy.h>
-#include <libcamera/logging.h>
-#include <libcamera/property_ids.h>
-#include <libcamera/request.h>
-
-#include <linux/bcm2835-isp.h>
-#include <linux/media-bus-format.h>
-#include <linux/videodev2.h>
-
-#include "libcamera/internal/bayer_format.h"
-#include "libcamera/internal/camera.h"
-#include "libcamera/internal/camera_lens.h"
-#include "libcamera/internal/camera_sensor.h"
-#include "libcamera/internal/device_enumerator.h"
-#include "libcamera/internal/framebuffer.h"
-#include "libcamera/internal/ipa_manager.h"
-#include "libcamera/internal/media_device.h"
-#include "libcamera/internal/pipeline_handler.h"
-#include "libcamera/internal/v4l2_videodevice.h"
-#include "libcamera/internal/yaml_parser.h"
-
-#include "delayed_controls.h"
-#include "dma_heaps.h"
-#include "rpi_stream.h"
-
-using namespace std::chrono_literals;
-
-namespace libcamera {
-
-LOG_DEFINE_CATEGORY(RPI)
-
-namespace {
-
-constexpr unsigned int defaultRawBitDepth = 12;
-
-/* Map of mbus codes to supported sizes reported by the sensor. */
-using SensorFormats = std::map<unsigned int, std::vector<Size>>;
-
-SensorFormats populateSensorFormats(std::unique_ptr<CameraSensor> &sensor)
-{
-	SensorFormats formats;
-
-	for (auto const mbusCode : sensor->mbusCodes())
-		formats.emplace(mbusCode, sensor->sizes(mbusCode));
-
-	return formats;
-}
-
-bool isMonoSensor(std::unique_ptr<CameraSensor> &sensor)
-{
-	unsigned int mbusCode = sensor->mbusCodes()[0];
-	const BayerFormat &bayer = BayerFormat::fromMbusCode(mbusCode);
-
-	return bayer.order == BayerFormat::Order::MONO;
-}
-
-PixelFormat mbusCodeToPixelFormat(unsigned int mbus_code,
-				  BayerFormat::Packing packingReq)
-{
-	BayerFormat bayer = BayerFormat::fromMbusCode(mbus_code);
-
-	ASSERT(bayer.isValid());
-
-	bayer.packing = packingReq;
-	PixelFormat pix = bayer.toPixelFormat();
-
-	/*
-	 * Not all formats (e.g. 8-bit or 16-bit Bayer formats) can have packed
-	 * variants. So if the PixelFormat returns as invalid, use the non-packed
-	 * conversion instead.
-	 */
-	if (!pix.isValid()) {
-		bayer.packing = BayerFormat::Packing::None;
-		pix = bayer.toPixelFormat();
-	}
-
-	return pix;
-}
-
-V4L2DeviceFormat toV4L2DeviceFormat(const V4L2VideoDevice *dev,
-				    const V4L2SubdeviceFormat &format,
-				    BayerFormat::Packing packingReq)
-{
-	const PixelFormat pix = mbusCodeToPixelFormat(format.mbus_code, packingReq);
-	V4L2DeviceFormat deviceFormat;
-
-	deviceFormat.fourcc = dev->toV4L2PixelFormat(pix);
-	deviceFormat.size = format.size;
-	deviceFormat.colorSpace = format.colorSpace;
-	return deviceFormat;
-}
-
-bool isRaw(const PixelFormat &pixFmt)
-{
-	/* This test works for both Bayer and raw mono formats. */
-	return BayerFormat::fromPixelFormat(pixFmt).isValid();
-}
-
-double scoreFormat(double desired, double actual)
-{
-	double score = desired - actual;
-	/* Smaller desired dimensions are preferred. */
-	if (score < 0.0)
-		score = (-score) / 8;
-	/* Penalise non-exact matches. */
-	if (actual != desired)
-		score *= 2;
-
-	return score;
-}
-
-V4L2SubdeviceFormat findBestFormat(const SensorFormats &formatsMap, const Size &req, unsigned int bitDepth)
-{
-	double bestScore = std::numeric_limits<double>::max(), score;
-	V4L2SubdeviceFormat bestFormat;
-	bestFormat.colorSpace = ColorSpace::Raw;
-
-	constexpr float penaltyAr = 1500.0;
-	constexpr float penaltyBitDepth = 500.0;
-
-	/* Calculate the closest/best mode from the user requested size. */
-	for (const auto &iter : formatsMap) {
-		const unsigned int mbusCode = iter.first;
-		const PixelFormat format = mbusCodeToPixelFormat(mbusCode,
-								 BayerFormat::Packing::None);
-		const PixelFormatInfo &info = PixelFormatInfo::info(format);
-
-		for (const Size &size : iter.second) {
-			double reqAr = static_cast<double>(req.width) / req.height;
-			double fmtAr = static_cast<double>(size.width) / size.height;
-
-			/* Score the dimensions for closeness. */
-			score = scoreFormat(req.width, size.width);
-			score += scoreFormat(req.height, size.height);
-			score += penaltyAr * scoreFormat(reqAr, fmtAr);
-
-			/* Add any penalties... this is not an exact science! */
-			score += utils::abs_diff(info.bitsPerPixel, bitDepth) * penaltyBitDepth;
-
-			if (score <= bestScore) {
-				bestScore = score;
-				bestFormat.mbus_code = mbusCode;
-				bestFormat.size = size;
-			}
-
-			LOG(RPI, Debug) << "Format: " << size
-					<< " fmt " << format
-					<< " Score: " << score
-					<< " (best " << bestScore << ")";
-		}
-	}
-
-	return bestFormat;
-}
-
-enum class Unicam : unsigned int { Image, Embedded };
-enum class Isp : unsigned int { Input, Output0, Output1, Stats };
-
-} /* namespace */
-
-class RPiCameraData : public Camera::Private
-{
-public:
-	RPiCameraData(PipelineHandler *pipe)
-		: Camera::Private(pipe), state_(State::Stopped),
-		  flipsAlterBayerOrder_(false), dropFrameCount_(0),
-		  buffersAllocated_(false), ispOutputCount_(0)
-	{
-	}
-
-	~RPiCameraData()
-	{
-		freeBuffers();
-	}
-
-	void freeBuffers();
-	void frameStarted(uint32_t sequence);
-
-	int loadIPA(ipa::RPi::InitResult *result);
-	int configureIPA(const CameraConfiguration *config, ipa::RPi::ConfigResult *result);
-	int loadPipelineConfiguration();
-
-	void enumerateVideoDevices(MediaLink *link);
-
-	void processStatsComplete(const ipa::RPi::BufferIds &buffers);
-	void prepareIspComplete(const ipa::RPi::BufferIds &buffers);
-	void setIspControls(const ControlList &controls);
-	void setDelayedControls(const ControlList &controls, uint32_t delayContext);
-	void setLensControls(const ControlList &controls);
-	void setCameraTimeout(uint32_t maxExposureTimeMs);
-	void setSensorControls(ControlList &controls);
-	void unicamTimeout();
-
-	/* bufferComplete signal handlers. */
-	void unicamBufferDequeue(FrameBuffer *buffer);
-	void ispInputDequeue(FrameBuffer *buffer);
-	void ispOutputDequeue(FrameBuffer *buffer);
-
-	void clearIncompleteRequests();
-	void handleStreamBuffer(FrameBuffer *buffer, RPi::Stream *stream);
-	void handleExternalBuffer(FrameBuffer *buffer, RPi::Stream *stream);
-	void handleState();
-	Rectangle scaleIspCrop(const Rectangle &ispCrop) const;
-	void applyScalerCrop(const ControlList &controls);
-
-	std::unique_ptr<ipa::RPi::IPAProxyRPi> ipa_;
-
-	std::unique_ptr<CameraSensor> sensor_;
-	SensorFormats sensorFormats_;
-	/* Array of Unicam and ISP device streams and associated buffers/streams. */
-	RPi::Device<Unicam, 2> unicam_;
-	RPi::Device<Isp, 4> isp_;
-	/* The vector below is just for convenience when iterating over all streams. */
-	std::vector<RPi::Stream *> streams_;
-	/* Stores the ids of the buffers mapped in the IPA. */
-	std::unordered_set<unsigned int> bufferIds_;
-	/*
-	 * Stores a cascade of Video Mux or Bridge devices between the sensor and
-	 * Unicam together with media link across the entities.
-	 */
-	std::vector<std::pair<std::unique_ptr<V4L2Subdevice>, MediaLink *>> bridgeDevices_;
-
-	/* DMAHEAP allocation helper. */
-	RPi::DmaHeap dmaHeap_;
-	SharedFD lsTable_;
-
-	std::unique_ptr<RPi::DelayedControls> delayedCtrls_;
-	bool sensorMetadata_;
-
-	/*
-	 * All the functions in this class are called from a single calling
-	 * thread. So, we do not need to have any mutex to protect access to any
-	 * of the variables below.
-	 */
-	enum class State { Stopped, Idle, Busy, IpaComplete, Error };
-	State state_;
-
-	bool isRunning()
-	{
-		return state_ != State::Stopped && state_ != State::Error;
-	}
-
-	struct BayerFrame {
-		FrameBuffer *buffer;
-		ControlList controls;
-		unsigned int delayContext;
-	};
-
-	std::queue<BayerFrame> bayerQueue_;
-	std::queue<FrameBuffer *> embeddedQueue_;
-	std::deque<Request *> requestQueue_;
-
-	/*
-	 * Store the "native" Bayer order (that is, with no transforms
-	 * applied).
-	 */
-	bool flipsAlterBayerOrder_;
-	BayerFormat::Order nativeBayerOrder_;
-
-	/* For handling digital zoom. */
-	IPACameraSensorInfo sensorInfo_;
-	Rectangle ispCrop_; /* crop in ISP (camera mode) pixels */
-	Rectangle scalerCrop_; /* crop in sensor native pixels */
-	Size ispMinCropSize_;
-
-	unsigned int dropFrameCount_;
-
-	/*
-	 * If set, this stores the value that represets a gain of one for
-	 * the V4L2_CID_NOTIFY_GAINS control.
-	 */
-	std::optional<int32_t> notifyGainsUnity_;
-
-	/* Have internal buffers been allocated? */
-	bool buffersAllocated_;
-
-	struct Config {
-		/*
-		 * The minimum number of internal buffers to be allocated for
-		 * the Unicam Image stream.
-		 */
-		unsigned int minUnicamBuffers;
-		/*
-		 * The minimum total (internal + external) buffer count used for
-		 * the Unicam Image stream.
-		 *
-		 * Note that:
-		 * minTotalUnicamBuffers must be >= 1, and
-		 * minTotalUnicamBuffers >= minUnicamBuffers
-		 */
-		unsigned int minTotalUnicamBuffers;
-		/*
-		 * Override any request from the IPA to drop a number of startup
-		 * frames.
-		 */
-		bool disableStartupFrameDrops;
-		/*
-		 * Override the Unicam timeout value calculated by the IPA based
-		 * on frame durations.
-		 */
-		unsigned int unicamTimeoutValue;
-	};
-
-	Config config_;
-
-private:
-	void checkRequestCompleted();
-	void fillRequestMetadata(const ControlList &bufferControls,
-				 Request *request);
-	void tryRunPipeline();
-	bool findMatchingBuffers(BayerFrame &bayerFrame, FrameBuffer *&embeddedBuffer);
-
-	unsigned int ispOutputCount_;
-};
-
-class RPiCameraConfiguration : public CameraConfiguration
-{
-public:
-	RPiCameraConfiguration(const RPiCameraData *data);
-
-	CameraConfiguration::Status validateColorSpaces(ColorSpaceFlags flags);
-	Status validate() override;
-
-	/* Cache the combinedTransform_ that will be applied to the sensor */
-	Transform combinedTransform_;
-
-private:
-	const RPiCameraData *data_;
-
-	/*
-	 * Store the colour spaces that all our streams will have. RGB format streams
-	 * will have the same colorspace as YUV streams, with YCbCr field cleared and
-	 * range set to full.
-         */
-	std::optional<ColorSpace> yuvColorSpace_;
-	std::optional<ColorSpace> rgbColorSpace_;
-};
-
-class PipelineHandlerRPi : public PipelineHandler
-{
-public:
-	PipelineHandlerRPi(CameraManager *manager);
-
-	std::unique_ptr<CameraConfiguration> generateConfiguration(Camera *camera,
-		const StreamRoles &roles) override;
-	int configure(Camera *camera, CameraConfiguration *config) override;
-
-	int exportFrameBuffers(Camera *camera, Stream *stream,
-			       std::vector<std::unique_ptr<FrameBuffer>> *buffers) override;
-
-	int start(Camera *camera, const ControlList *controls) override;
-	void stopDevice(Camera *camera) override;
-
-	int queueRequestDevice(Camera *camera, Request *request) override;
-
-	bool match(DeviceEnumerator *enumerator) override;
-
-	void releaseDevice(Camera *camera) override;
-
-private:
-	RPiCameraData *cameraData(Camera *camera)
-	{
-		return static_cast<RPiCameraData *>(camera->_d());
-	}
-
-	int registerCamera(MediaDevice *unicam, MediaDevice *isp, MediaEntity *sensorEntity);
-	int queueAllBuffers(Camera *camera);
-	int prepareBuffers(Camera *camera);
-	void mapBuffers(Camera *camera, const RPi::BufferMap &buffers, unsigned int mask);
-};
-
-RPiCameraConfiguration::RPiCameraConfiguration(const RPiCameraData *data)
-	: CameraConfiguration(), data_(data)
-{
-}
-
-static const std::vector<ColorSpace> validColorSpaces = {
-	ColorSpace::Sycc,
-	ColorSpace::Smpte170m,
-	ColorSpace::Rec709
-};
-
-static std::optional<ColorSpace> findValidColorSpace(const ColorSpace &colourSpace)
-{
-	for (auto cs : validColorSpaces) {
-		if (colourSpace.primaries == cs.primaries &&
-		    colourSpace.transferFunction == cs.transferFunction)
-			return cs;
-	}
-
-	return std::nullopt;
-}
-
-static bool isRgb(const PixelFormat &pixFmt)
-{
-	const PixelFormatInfo &info = PixelFormatInfo::info(pixFmt);
-	return info.colourEncoding == PixelFormatInfo::ColourEncodingRGB;
-}
-
-static bool isYuv(const PixelFormat &pixFmt)
-{
-	/* The code below would return true for raw mono streams, so weed those out first. */
-	if (isRaw(pixFmt))
-		return false;
-
-	const PixelFormatInfo &info = PixelFormatInfo::info(pixFmt);
-	return info.colourEncoding == PixelFormatInfo::ColourEncodingYUV;
-}
-
-/*
- * Raspberry Pi drivers expect the following colour spaces:
- * - V4L2_COLORSPACE_RAW for raw streams.
- * - One of V4L2_COLORSPACE_JPEG, V4L2_COLORSPACE_SMPTE170M, V4L2_COLORSPACE_REC709 for
- *   non-raw streams. Other fields such as transfer function, YCbCr encoding and
- *   quantisation are not used.
- *
- * The libcamera colour spaces that we wish to use corresponding to these are therefore:
- * - ColorSpace::Raw for V4L2_COLORSPACE_RAW
- * - ColorSpace::Sycc for V4L2_COLORSPACE_JPEG
- * - ColorSpace::Smpte170m for V4L2_COLORSPACE_SMPTE170M
- * - ColorSpace::Rec709 for V4L2_COLORSPACE_REC709
- */
-
-CameraConfiguration::Status RPiCameraConfiguration::validateColorSpaces([[maybe_unused]] ColorSpaceFlags flags)
-{
-	Status status = Valid;
-	yuvColorSpace_.reset();
-
-	for (auto cfg : config_) {
-		/* First fix up raw streams to have the "raw" colour space. */
-		if (isRaw(cfg.pixelFormat)) {
-			/* If there was no value here, that doesn't count as "adjusted". */
-			if (cfg.colorSpace && cfg.colorSpace != ColorSpace::Raw)
-				status = Adjusted;
-			cfg.colorSpace = ColorSpace::Raw;
-			continue;
-		}
-
-		/* Next we need to find our shared colour space. The first valid one will do. */
-		if (cfg.colorSpace && !yuvColorSpace_)
-			yuvColorSpace_ = findValidColorSpace(cfg.colorSpace.value());
-	}
-
-	/* If no colour space was given anywhere, choose sYCC. */
-	if (!yuvColorSpace_)
-		yuvColorSpace_ = ColorSpace::Sycc;
-
-	/* Note the version of this that any RGB streams will have to use. */
-	rgbColorSpace_ = yuvColorSpace_;
-	rgbColorSpace_->ycbcrEncoding = ColorSpace::YcbcrEncoding::None;
-	rgbColorSpace_->range = ColorSpace::Range::Full;
-
-	/* Go through the streams again and force everyone to the same colour space. */
-	for (auto cfg : config_) {
-		if (cfg.colorSpace == ColorSpace::Raw)
-			continue;
-
-		if (isYuv(cfg.pixelFormat) && cfg.colorSpace != yuvColorSpace_) {
-			/* Again, no value means "not adjusted". */
-			if (cfg.colorSpace)
-				status = Adjusted;
-			cfg.colorSpace = yuvColorSpace_;
-		}
-		if (isRgb(cfg.pixelFormat) && cfg.colorSpace != rgbColorSpace_) {
-			/* Be nice, and let the YUV version count as non-adjusted too. */
-			if (cfg.colorSpace && cfg.colorSpace != yuvColorSpace_)
-				status = Adjusted;
-			cfg.colorSpace = rgbColorSpace_;
-		}
-	}
-
-	return status;
-}
-
-CameraConfiguration::Status RPiCameraConfiguration::validate()
-{
-	Status status = Valid;
-
-	if (config_.empty())
-		return Invalid;
-
-	status = validateColorSpaces(ColorSpaceFlag::StreamsShareColorSpace);
-
-	/*
-	 * Validate the requested transform against the sensor capabilities and
-	 * rotation and store the final combined transform that configure() will
-	 * need to apply to the sensor to save us working it out again.
-	 */
-	Transform requestedTransform = transform;
-	combinedTransform_ = data_->sensor_->validateTransform(&transform);
-	if (transform != requestedTransform)
-		status = Adjusted;
-
-	unsigned int rawCount = 0, outCount = 0, count = 0, maxIndex = 0;
-	std::pair<int, Size> outSize[2];
-	Size maxSize;
-	for (StreamConfiguration &cfg : config_) {
-		if (isRaw(cfg.pixelFormat)) {
-			/*
-			 * Calculate the best sensor mode we can use based on
-			 * the user request.
-			 */
-			V4L2VideoDevice *unicam = data_->unicam_[Unicam::Image].dev();
-			const PixelFormatInfo &info = PixelFormatInfo::info(cfg.pixelFormat);
-			unsigned int bitDepth = info.isValid() ? info.bitsPerPixel : defaultRawBitDepth;
-			V4L2SubdeviceFormat sensorFormat = findBestFormat(data_->sensorFormats_, cfg.size, bitDepth);
-			BayerFormat::Packing packing = BayerFormat::Packing::CSI2;
-			if (info.isValid() && !info.packed)
-				packing = BayerFormat::Packing::None;
-			V4L2DeviceFormat unicamFormat = toV4L2DeviceFormat(unicam, sensorFormat, packing);
-			int ret = unicam->tryFormat(&unicamFormat);
-			if (ret)
-				return Invalid;
-
-			/*
-			 * Some sensors change their Bayer order when they are
-			 * h-flipped or v-flipped, according to the transform.
-			 * If this one does, we must advertise the transformed
-			 * Bayer order in the raw stream. Note how we must
-			 * fetch the "native" (i.e. untransformed) Bayer order,
-			 * because the sensor may currently be flipped!
-			 */
-			V4L2PixelFormat fourcc = unicamFormat.fourcc;
-			if (data_->flipsAlterBayerOrder_) {
-				BayerFormat bayer = BayerFormat::fromV4L2PixelFormat(fourcc);
-				bayer.order = data_->nativeBayerOrder_;
-				bayer = bayer.transform(combinedTransform_);
-				fourcc = bayer.toV4L2PixelFormat();
-			}
-
-			PixelFormat unicamPixFormat = fourcc.toPixelFormat();
-			if (cfg.size != unicamFormat.size ||
-			    cfg.pixelFormat != unicamPixFormat) {
-				cfg.size = unicamFormat.size;
-				cfg.pixelFormat = unicamPixFormat;
-				status = Adjusted;
-			}
-
-			cfg.stride = unicamFormat.planes[0].bpl;
-			cfg.frameSize = unicamFormat.planes[0].size;
-
-			rawCount++;
-		} else {
-			outSize[outCount] = std::make_pair(count, cfg.size);
-			/* Record the largest resolution for fixups later. */
-			if (maxSize < cfg.size) {
-				maxSize = cfg.size;
-				maxIndex = outCount;
-			}
-			outCount++;
-		}
-
-		count++;
-
-		/* Can only output 1 RAW stream, or 2 YUV/RGB streams. */
-		if (rawCount > 1 || outCount > 2) {
-			LOG(RPI, Error) << "Invalid number of streams requested";
-			return Invalid;
-		}
-	}
-
-	/*
-	 * Now do any fixups needed. For the two ISP outputs, one stream must be
-	 * equal or smaller than the other in all dimensions.
-	 */
-	for (unsigned int i = 0; i < outCount; i++) {
-		outSize[i].second.width = std::min(outSize[i].second.width,
-						   maxSize.width);
-		outSize[i].second.height = std::min(outSize[i].second.height,
-						    maxSize.height);
-
-		if (config_.at(outSize[i].first).size != outSize[i].second) {
-			config_.at(outSize[i].first).size = outSize[i].second;
-			status = Adjusted;
-		}
-
-		/*
-		 * Also validate the correct pixel formats here.
-		 * Note that Output0 and Output1 support a different
-		 * set of formats.
-		 *
-		 * Output 0 must be for the largest resolution. We will
-		 * have that fixed up in the code above.
-		 *
-		 */
-		StreamConfiguration &cfg = config_.at(outSize[i].first);
-		PixelFormat &cfgPixFmt = cfg.pixelFormat;
-		V4L2VideoDevice *dev;
-
-		if (i == maxIndex)
-			dev = data_->isp_[Isp::Output0].dev();
-		else
-			dev = data_->isp_[Isp::Output1].dev();
-
-		V4L2VideoDevice::Formats fmts = dev->formats();
-
-		if (fmts.find(dev->toV4L2PixelFormat(cfgPixFmt)) == fmts.end()) {
-			/* If we cannot find a native format, use a default one. */
-			cfgPixFmt = formats::NV12;
-			status = Adjusted;
-		}
-
-		V4L2DeviceFormat format;
-		format.fourcc = dev->toV4L2PixelFormat(cfg.pixelFormat);
-		format.size = cfg.size;
-		/* We want to send the associated YCbCr info through to the driver. */
-		format.colorSpace = yuvColorSpace_;
-
-		LOG(RPI, Debug)
-			<< "Try color space " << ColorSpace::toString(cfg.colorSpace);
-
-		int ret = dev->tryFormat(&format);
-		if (ret)
-			return Invalid;
-
-		/*
-		 * But for RGB streams, the YCbCr info gets overwritten on the way back
-		 * so we must check against what the stream cfg says, not what we actually
-		 * requested (which carefully included the YCbCr info)!
-		 */
-		if (cfg.colorSpace != format.colorSpace) {
-			status = Adjusted;
-			LOG(RPI, Debug)
-				<< "Color space changed from "
-				<< ColorSpace::toString(cfg.colorSpace) << " to "
-				<< ColorSpace::toString(format.colorSpace);
-		}
-
-		cfg.colorSpace = format.colorSpace;
-
-		cfg.stride = format.planes[0].bpl;
-		cfg.frameSize = format.planes[0].size;
-
-	}
-
-	return status;
-}
-
-PipelineHandlerRPi::PipelineHandlerRPi(CameraManager *manager)
-	: PipelineHandler(manager)
-{
-}
-
-std::unique_ptr<CameraConfiguration>
-PipelineHandlerRPi::generateConfiguration(Camera *camera, const StreamRoles &roles)
-{
-	RPiCameraData *data = cameraData(camera);
-	std::unique_ptr<CameraConfiguration> config =
-		std::make_unique<RPiCameraConfiguration>(data);
-	V4L2SubdeviceFormat sensorFormat;
-	unsigned int bufferCount;
-	PixelFormat pixelFormat;
-	V4L2VideoDevice::Formats fmts;
-	Size size;
-	std::optional<ColorSpace> colorSpace;
-
-	if (roles.empty())
-		return config;
-
-	unsigned int rawCount = 0;
-	unsigned int outCount = 0;
-	Size sensorSize = data->sensor_->resolution();
-	for (const StreamRole role : roles) {
-		switch (role) {
-		case StreamRole::Raw:
-			size = sensorSize;
-			sensorFormat = findBestFormat(data->sensorFormats_, size, defaultRawBitDepth);
-			pixelFormat = mbusCodeToPixelFormat(sensorFormat.mbus_code,
-							    BayerFormat::Packing::CSI2);
-			ASSERT(pixelFormat.isValid());
-			colorSpace = ColorSpace::Raw;
-			bufferCount = 2;
-			rawCount++;
-			break;
-
-		case StreamRole::StillCapture:
-			fmts = data->isp_[Isp::Output0].dev()->formats();
-			pixelFormat = formats::NV12;
-			/*
-			 * Still image codecs usually expect the sYCC color space.
-			 * Even RGB codecs will be fine as the RGB we get with the
-			 * sYCC color space is the same as sRGB.
-			 */
-			colorSpace = ColorSpace::Sycc;
-			/* Return the largest sensor resolution. */
-			size = sensorSize;
-			bufferCount = 1;
-			outCount++;
-			break;
-
-		case StreamRole::VideoRecording:
-			/*
-			 * The colour denoise algorithm requires the analysis
-			 * image, produced by the second ISP output, to be in
-			 * YUV420 format. Select this format as the default, to
-			 * maximize chances that it will be picked by
-			 * applications and enable usage of the colour denoise
-			 * algorithm.
-			 */
-			fmts = data->isp_[Isp::Output0].dev()->formats();
-			pixelFormat = formats::YUV420;
-			/*
-			 * Choose a color space appropriate for video recording.
-			 * Rec.709 will be a good default for HD resolutions.
-			 */
-			colorSpace = ColorSpace::Rec709;
-			size = { 1920, 1080 };
-			bufferCount = 4;
-			outCount++;
-			break;
-
-		case StreamRole::Viewfinder:
-			fmts = data->isp_[Isp::Output0].dev()->formats();
-			pixelFormat = formats::ARGB8888;
-			colorSpace = ColorSpace::Sycc;
-			size = { 800, 600 };
-			bufferCount = 4;
-			outCount++;
-			break;
-
-		default:
-			LOG(RPI, Error) << "Requested stream role not supported: "
-					<< role;
-			return nullptr;
-		}
-
-		if (rawCount > 1 || outCount > 2) {
-			LOG(RPI, Error) << "Invalid stream roles requested";
-			return nullptr;
-		}
-
-		std::map<PixelFormat, std::vector<SizeRange>> deviceFormats;
-		if (role == StreamRole::Raw) {
-			/* Translate the MBUS codes to a PixelFormat. */
-			for (const auto &format : data->sensorFormats_) {
-				PixelFormat pf = mbusCodeToPixelFormat(format.first,
-								       BayerFormat::Packing::CSI2);
-				if (pf.isValid())
-					deviceFormats.emplace(std::piecewise_construct,	std::forward_as_tuple(pf),
-						std::forward_as_tuple(format.second.begin(), format.second.end()));
-			}
-		} else {
-			/*
-			 * Translate the V4L2PixelFormat to PixelFormat. Note that we
-			 * limit the recommended largest ISP output size to match the
-			 * sensor resolution.
-			 */
-			for (const auto &format : fmts) {
-				PixelFormat pf = format.first.toPixelFormat();
-				if (pf.isValid()) {
-					const SizeRange &ispSizes = format.second[0];
-					deviceFormats[pf].emplace_back(ispSizes.min, sensorSize,
-								       ispSizes.hStep, ispSizes.vStep);
-				}
-			}
-		}
-
-		/* Add the stream format based on the device node used for the use case. */
-		StreamFormats formats(deviceFormats);
-		StreamConfiguration cfg(formats);
-		cfg.size = size;
-		cfg.pixelFormat = pixelFormat;
-		cfg.colorSpace = colorSpace;
-		cfg.bufferCount = bufferCount;
-		config->addConfiguration(cfg);
-	}
-
-	config->validate();
-
-	return config;
-}
-
-int PipelineHandlerRPi::configure(Camera *camera, CameraConfiguration *config)
-{
-	RPiCameraData *data = cameraData(camera);
-	int ret;
-
-	/* Start by freeing all buffers and reset the Unicam and ISP stream states. */
-	data->freeBuffers();
-	for (auto const stream : data->streams_)
-		stream->setExternal(false);
-
-	BayerFormat::Packing packing = BayerFormat::Packing::CSI2;
-	Size maxSize, sensorSize;
-	unsigned int maxIndex = 0;
-	bool rawStream = false;
-	unsigned int bitDepth = defaultRawBitDepth;
-
-	/*
-	 * Look for the RAW stream (if given) size as well as the largest
-	 * ISP output size.
-	 */
-	for (unsigned i = 0; i < config->size(); i++) {
-		StreamConfiguration &cfg = config->at(i);
-
-		if (isRaw(cfg.pixelFormat)) {
-			/*
-			 * If we have been given a RAW stream, use that size
-			 * for setting up the sensor.
-			 */
-			sensorSize = cfg.size;
-			rawStream = true;
-			/* Check if the user has explicitly set an unpacked format. */
-			BayerFormat bayerFormat = BayerFormat::fromPixelFormat(cfg.pixelFormat);
-			packing = bayerFormat.packing;
-			bitDepth = bayerFormat.bitDepth;
-		} else {
-			if (cfg.size > maxSize) {
-				maxSize = config->at(i).size;
-				maxIndex = i;
-			}
-		}
-	}
-
-	/*
-	 * Calculate the best sensor mode we can use based on the user's
-	 * request, and apply it to the sensor with the cached transform, if
-	 * any.
-	 */
-	V4L2SubdeviceFormat sensorFormat = findBestFormat(data->sensorFormats_, rawStream ? sensorSize : maxSize, bitDepth);
-	const RPiCameraConfiguration *rpiConfig = static_cast<const RPiCameraConfiguration *>(config);
-	ret = data->sensor_->setFormat(&sensorFormat, rpiConfig->combinedTransform_);
-	if (ret)
-		return ret;
-
-	V4L2VideoDevice *unicam = data->unicam_[Unicam::Image].dev();
-	V4L2DeviceFormat unicamFormat = toV4L2DeviceFormat(unicam, sensorFormat, packing);
-	ret = unicam->setFormat(&unicamFormat);
-	if (ret)
-		return ret;
-
-	LOG(RPI, Info) << "Sensor: " << camera->id()
-		       << " - Selected sensor format: " << sensorFormat
-		       << " - Selected unicam format: " << unicamFormat;
-
-	ret = data->isp_[Isp::Input].dev()->setFormat(&unicamFormat);
-	if (ret)
-		return ret;
-
-	/*
-	 * See which streams are requested, and route the user
-	 * StreamConfiguration appropriately.
-	 */
-	V4L2DeviceFormat format;
-	bool output0Set = false, output1Set = false;
-	for (unsigned i = 0; i < config->size(); i++) {
-		StreamConfiguration &cfg = config->at(i);
-
-		if (isRaw(cfg.pixelFormat)) {
-			cfg.setStream(&data->unicam_[Unicam::Image]);
-			data->unicam_[Unicam::Image].setExternal(true);
-			continue;
-		}
-
-		/* The largest resolution gets routed to the ISP Output 0 node. */
-		RPi::Stream *stream = i == maxIndex ? &data->isp_[Isp::Output0]
-						    : &data->isp_[Isp::Output1];
-
-		V4L2PixelFormat fourcc = stream->dev()->toV4L2PixelFormat(cfg.pixelFormat);
-		format.size = cfg.size;
-		format.fourcc = fourcc;
-		format.colorSpace = cfg.colorSpace;
-
-		LOG(RPI, Debug) << "Setting " << stream->name() << " to "
-				<< format;
-
-		ret = stream->dev()->setFormat(&format);
-		if (ret)
-			return -EINVAL;
-
-		if (format.size != cfg.size || format.fourcc != fourcc) {
-			LOG(RPI, Error)
-				<< "Failed to set requested format on " << stream->name()
-				<< ", returned " << format;
-			return -EINVAL;
-		}
-
-		LOG(RPI, Debug)
-			<< "Stream " << stream->name() << " has color space "
-			<< ColorSpace::toString(cfg.colorSpace);
-
-		cfg.setStream(stream);
-		stream->setExternal(true);
-
-		if (i != maxIndex)
-			output1Set = true;
-		else
-			output0Set = true;
-	}
-
-	/*
-	 * If ISP::Output0 stream has not been configured by the application,
-	 * we must allow the hardware to generate an output so that the data
-	 * flow in the pipeline handler remains consistent, and we still generate
-	 * statistics for the IPA to use. So enable the output at a very low
-	 * resolution for internal use.
-	 *
-	 * \todo Allow the pipeline to work correctly without Output0 and only
-	 * statistics coming from the hardware.
-	 */
-	if (!output0Set) {
-		V4L2VideoDevice *dev = data->isp_[Isp::Output0].dev();
-
-		maxSize = Size(320, 240);
-		format = {};
-		format.size = maxSize;
-		format.fourcc = dev->toV4L2PixelFormat(formats::YUV420);
-		/* No one asked for output, so the color space doesn't matter. */
-		format.colorSpace = ColorSpace::Sycc;
-		ret = dev->setFormat(&format);
-		if (ret) {
-			LOG(RPI, Error)
-				<< "Failed to set default format on ISP Output0: "
-				<< ret;
-			return -EINVAL;
-		}
-
-		LOG(RPI, Debug) << "Defaulting ISP Output0 format to "
-				<< format;
-	}
-
-	/*
-	 * If ISP::Output1 stream has not been requested by the application, we
-	 * set it up for internal use now. This second stream will be used for
-	 * fast colour denoise, and must be a quarter resolution of the ISP::Output0
-	 * stream. However, also limit the maximum size to 1200 pixels in the
-	 * larger dimension, just to avoid being wasteful with buffer allocations
-	 * and memory bandwidth.
-	 *
-	 * \todo If Output 1 format is not YUV420, Output 1 ought to be disabled as
-	 * colour denoise will not run.
-	 */
-	if (!output1Set) {
-		V4L2VideoDevice *dev = data->isp_[Isp::Output1].dev();
-
-		V4L2DeviceFormat output1Format;
-		constexpr Size maxDimensions(1200, 1200);
-		const Size limit = maxDimensions.boundedToAspectRatio(format.size);
-
-		output1Format.size = (format.size / 2).boundedTo(limit).alignedDownTo(2, 2);
-		output1Format.colorSpace = format.colorSpace;
-		output1Format.fourcc = dev->toV4L2PixelFormat(formats::YUV420);
-
-		LOG(RPI, Debug) << "Setting ISP Output1 (internal) to "
-				<< output1Format;
-
-		ret = dev->setFormat(&output1Format);
-		if (ret) {
-			LOG(RPI, Error) << "Failed to set format on ISP Output1: "
-					<< ret;
-			return -EINVAL;
-		}
-	}
-
-	/* ISP statistics output format. */
-	format = {};
-	format.fourcc = V4L2PixelFormat(V4L2_META_FMT_BCM2835_ISP_STATS);
-	ret = data->isp_[Isp::Stats].dev()->setFormat(&format);
-	if (ret) {
-		LOG(RPI, Error) << "Failed to set format on ISP stats stream: "
-				<< format;
-		return ret;
-	}
-
-	/* Figure out the smallest selection the ISP will allow. */
-	Rectangle testCrop(0, 0, 1, 1);
-	data->isp_[Isp::Input].dev()->setSelection(V4L2_SEL_TGT_CROP, &testCrop);
-	data->ispMinCropSize_ = testCrop.size();
-
-	/* Adjust aspect ratio by providing crops on the input image. */
-	Size size = unicamFormat.size.boundedToAspectRatio(maxSize);
-	Rectangle crop = size.centeredTo(Rectangle(unicamFormat.size).center());
-	Rectangle defaultCrop = crop;
-	data->ispCrop_ = crop;
-
-	data->isp_[Isp::Input].dev()->setSelection(V4L2_SEL_TGT_CROP, &crop);
-
-	ipa::RPi::ConfigResult result;
-	ret = data->configureIPA(config, &result);
-	if (ret)
-		LOG(RPI, Error) << "Failed to configure the IPA: " << ret;
-
-	/*
-	 * Set the scaler crop to the value we are using (scaled to native sensor
-	 * coordinates).
-	 */
-	data->scalerCrop_ = data->scaleIspCrop(data->ispCrop_);
-
-	/*
-	 * Configure the Unicam embedded data output format only if the sensor
-	 * supports it.
-	 */
-	if (data->sensorMetadata_) {
-		V4L2SubdeviceFormat embeddedFormat;
-
-		data->sensor_->device()->getFormat(1, &embeddedFormat);
-		format.fourcc = V4L2PixelFormat(V4L2_META_FMT_SENSOR_DATA);
-		format.planes[0].size = embeddedFormat.size.width * embeddedFormat.size.height;
-
-		LOG(RPI, Debug) << "Setting embedded data format.";
-		ret = data->unicam_[Unicam::Embedded].dev()->setFormat(&format);
-		if (ret) {
-			LOG(RPI, Error) << "Failed to set format on Unicam embedded: "
-					<< format;
-			return ret;
-		}
-	}
-
-	/*
-	 * Update the ScalerCropMaximum to the correct value for this camera mode.
-	 * For us, it's the same as the "analogue crop".
-	 *
-	 * \todo Make this property the ScalerCrop maximum value when dynamic
-	 * controls are available and set it at validate() time
-	 */
-	data->properties_.set(properties::ScalerCropMaximum, data->sensorInfo_.analogCrop);
-
-	/* Store the mode sensitivity for the application. */
-	data->properties_.set(properties::SensorSensitivity, result.modeSensitivity);
-
-	/* Update the controls that the Raspberry Pi IPA can handle. */
-	ControlInfoMap::Map ctrlMap;
-	for (auto const &c : result.controlInfo)
-		ctrlMap.emplace(c.first, c.second);
-
-	/* Add the ScalerCrop control limits based on the current mode. */
-	Rectangle ispMinCrop = data->scaleIspCrop(Rectangle(data->ispMinCropSize_));
-	defaultCrop = data->scaleIspCrop(defaultCrop);
-	ctrlMap[&controls::ScalerCrop] = ControlInfo(ispMinCrop, data->sensorInfo_.analogCrop, defaultCrop);
-
-	data->controlInfo_ = ControlInfoMap(std::move(ctrlMap), result.controlInfo.idmap());
-
-	/* Setup the Video Mux/Bridge entities. */
-	for (auto &[device, link] : data->bridgeDevices_) {
-		/*
-		 * Start by disabling all the sink pad links on the devices in the
-		 * cascade, with the exception of the link connecting the device.
-		 */
-		for (const MediaPad *p : device->entity()->pads()) {
-			if (!(p->flags() & MEDIA_PAD_FL_SINK))
-				continue;
-
-			for (MediaLink *l : p->links()) {
-				if (l != link)
-					l->setEnabled(false);
-			}
-		}
-
-		/*
-		 * Next, enable the entity -> entity links, and setup the pad format.
-		 *
-		 * \todo Some bridge devices may chainge the media bus code, so we
-		 * ought to read the source pad format and propagate it to the sink pad.
-		 */
-		link->setEnabled(true);
-		const MediaPad *sinkPad = link->sink();
-		ret = device->setFormat(sinkPad->index(), &sensorFormat);
-		if (ret) {
-			LOG(RPI, Error) << "Failed to set format on " << device->entity()->name()
-					<< " pad " << sinkPad->index()
-					<< " with format  " << format
-					<< ": " << ret;
-			return ret;
-		}
-
-		LOG(RPI, Debug) << "Configured media link on device " << device->entity()->name()
-				<< " on pad " << sinkPad->index();
-	}
-
-	return ret;
-}
-
-int PipelineHandlerRPi::exportFrameBuffers([[maybe_unused]] Camera *camera, Stream *stream,
-					   std::vector<std::unique_ptr<FrameBuffer>> *buffers)
-{
-	RPi::Stream *s = static_cast<RPi::Stream *>(stream);
-	unsigned int count = stream->configuration().bufferCount;
-	int ret = s->dev()->exportBuffers(count, buffers);
-
-	s->setExportedBuffers(buffers);
-
-	return ret;
-}
-
-int PipelineHandlerRPi::start(Camera *camera, const ControlList *controls)
-{
-	RPiCameraData *data = cameraData(camera);
-	int ret;
-
-	/* Check if a ScalerCrop control was specified. */
-	if (controls)
-		data->applyScalerCrop(*controls);
-
-	/* Start the IPA. */
-	ipa::RPi::StartResult result;
-	data->ipa_->start(controls ? *controls : ControlList{ controls::controls },
-			  &result);
-
-	/* Apply any gain/exposure settings that the IPA may have passed back. */
-	if (!result.controls.empty())
-		data->setSensorControls(result.controls);
-
-	/* Configure the number of dropped frames required on startup. */
-	data->dropFrameCount_ = data->config_.disableStartupFrameDrops
-				? 0 : result.dropFrameCount;
-
-	for (auto const stream : data->streams_)
-		stream->resetBuffers();
-
-	if (!data->buffersAllocated_) {
-		/* Allocate buffers for internal pipeline usage. */
-		ret = prepareBuffers(camera);
-		if (ret) {
-			LOG(RPI, Error) << "Failed to allocate buffers";
-			data->freeBuffers();
-			stop(camera);
-			return ret;
-		}
-		data->buffersAllocated_ = true;
-	}
-
-	/* We need to set the dropFrameCount_ before queueing buffers. */
-	ret = queueAllBuffers(camera);
-	if (ret) {
-		LOG(RPI, Error) << "Failed to queue buffers";
-		stop(camera);
-		return ret;
-	}
-
-	/* Enable SOF event generation. */
-	data->unicam_[Unicam::Image].dev()->setFrameStartEnabled(true);
-
-	/*
-	 * Reset the delayed controls with the gain and exposure values set by
-	 * the IPA.
-	 */
-	data->delayedCtrls_->reset(0);
-
-	data->state_ = RPiCameraData::State::Idle;
-
-	/* Start all streams. */
-	for (auto const stream : data->streams_) {
-		ret = stream->dev()->streamOn();
-		if (ret) {
-			stop(camera);
-			return ret;
-		}
-	}
-
-	return 0;
-}
-
-void PipelineHandlerRPi::stopDevice(Camera *camera)
-{
-	RPiCameraData *data = cameraData(camera);
-
-	data->state_ = RPiCameraData::State::Stopped;
-
-	/* Disable SOF event generation. */
-	data->unicam_[Unicam::Image].dev()->setFrameStartEnabled(false);
-
-	for (auto const stream : data->streams_)
-		stream->dev()->streamOff();
-
-	data->clearIncompleteRequests();
-	data->bayerQueue_ = {};
-	data->embeddedQueue_ = {};
-
-	/* Stop the IPA. */
-	data->ipa_->stop();
-}
-
-int PipelineHandlerRPi::queueRequestDevice(Camera *camera, Request *request)
-{
-	RPiCameraData *data = cameraData(camera);
-
-	if (!data->isRunning())
-		return -EINVAL;
-
-	LOG(RPI, Debug) << "queueRequestDevice: New request.";
-
-	/* Push all buffers supplied in the Request to the respective streams. */
-	for (auto stream : data->streams_) {
-		if (!stream->isExternal())
-			continue;
-
-		FrameBuffer *buffer = request->findBuffer(stream);
-		if (buffer && !stream->getBufferId(buffer)) {
-			/*
-			 * This buffer is not recognised, so it must have been allocated
-			 * outside the v4l2 device. Store it in the stream buffer list
-			 * so we can track it.
-			 */
-			stream->setExternalBuffer(buffer);
-		}
-
-		/*
-		 * If no buffer is provided by the request for this stream, we
-		 * queue a nullptr to the stream to signify that it must use an
-		 * internally allocated buffer for this capture request. This
-		 * buffer will not be given back to the application, but is used
-		 * to support the internal pipeline flow.
-		 *
-		 * The below queueBuffer() call will do nothing if there are not
-		 * enough internal buffers allocated, but this will be handled by
-		 * queuing the request for buffers in the RPiStream object.
-		 */
-		int ret = stream->queueBuffer(buffer);
-		if (ret)
-			return ret;
-	}
-
-	/* Push the request to the back of the queue. */
-	data->requestQueue_.push_back(request);
-	data->handleState();
-
-	return 0;
-}
-
-bool PipelineHandlerRPi::match(DeviceEnumerator *enumerator)
-{
-	constexpr unsigned int numUnicamDevices = 2;
-
-	/*
-	 * Loop over all Unicam instances, but return out once a match is found.
-	 * This is to ensure we correctly enumrate the camera when an instance
-	 * of Unicam has registered with media controller, but has not registered
-	 * device nodes due to a sensor subdevice failure.
-	 */
-	for (unsigned int i = 0; i < numUnicamDevices; i++) {
-		DeviceMatch unicam("unicam");
-		MediaDevice *unicamDevice = acquireMediaDevice(enumerator, unicam);
-
-		if (!unicamDevice) {
-			LOG(RPI, Debug) << "Unable to acquire a Unicam instance";
-			continue;
-		}
-
-		DeviceMatch isp("bcm2835-isp");
-		MediaDevice *ispDevice = acquireMediaDevice(enumerator, isp);
-
-		if (!ispDevice) {
-			LOG(RPI, Debug) << "Unable to acquire ISP instance";
-			continue;
-		}
-
-		/*
-		 * The loop below is used to register multiple cameras behind one or more
-		 * video mux devices that are attached to a particular Unicam instance.
-		 * Obviously these cameras cannot be used simultaneously.
-		 */
-		unsigned int numCameras = 0;
-		for (MediaEntity *entity : unicamDevice->entities()) {
-			if (entity->function() != MEDIA_ENT_F_CAM_SENSOR)
-				continue;
-
-			int ret = registerCamera(unicamDevice, ispDevice, entity);
-			if (ret)
-				LOG(RPI, Error) << "Failed to register camera "
-						<< entity->name() << ": " << ret;
-			else
-				numCameras++;
-		}
-
-		if (numCameras)
-			return true;
-	}
-
-	return false;
-}
-
-void PipelineHandlerRPi::releaseDevice(Camera *camera)
-{
-	RPiCameraData *data = cameraData(camera);
-	data->freeBuffers();
-}
-
-int PipelineHandlerRPi::registerCamera(MediaDevice *unicam, MediaDevice *isp, MediaEntity *sensorEntity)
-{
-	std::unique_ptr<RPiCameraData> data = std::make_unique<RPiCameraData>(this);
-
-	if (!data->dmaHeap_.isValid())
-		return -ENOMEM;
-
-	MediaEntity *unicamImage = unicam->getEntityByName("unicam-image");
-	MediaEntity *ispOutput0 = isp->getEntityByName("bcm2835-isp0-output0");
-	MediaEntity *ispCapture1 = isp->getEntityByName("bcm2835-isp0-capture1");
-	MediaEntity *ispCapture2 = isp->getEntityByName("bcm2835-isp0-capture2");
-	MediaEntity *ispCapture3 = isp->getEntityByName("bcm2835-isp0-capture3");
-
-	if (!unicamImage || !ispOutput0 || !ispCapture1 || !ispCapture2 || !ispCapture3)
-		return -ENOENT;
-
-	/* Locate and open the unicam video streams. */
-	data->unicam_[Unicam::Image] = RPi::Stream("Unicam Image", unicamImage);
-
-	/* An embedded data node will not be present if the sensor does not support it. */
-	MediaEntity *unicamEmbedded = unicam->getEntityByName("unicam-embedded");
-	if (unicamEmbedded) {
-		data->unicam_[Unicam::Embedded] = RPi::Stream("Unicam Embedded", unicamEmbedded);
-		data->unicam_[Unicam::Embedded].dev()->bufferReady.connect(data.get(),
-									   &RPiCameraData::unicamBufferDequeue);
-	}
-
-	/* Tag the ISP input stream as an import stream. */
-	data->isp_[Isp::Input] = RPi::Stream("ISP Input", ispOutput0, true);
-	data->isp_[Isp::Output0] = RPi::Stream("ISP Output0", ispCapture1);
-	data->isp_[Isp::Output1] = RPi::Stream("ISP Output1", ispCapture2);
-	data->isp_[Isp::Stats] = RPi::Stream("ISP Stats", ispCapture3);
-
-	/* Wire up all the buffer connections. */
-	data->unicam_[Unicam::Image].dev()->dequeueTimeout.connect(data.get(), &RPiCameraData::unicamTimeout);
-	data->unicam_[Unicam::Image].dev()->frameStart.connect(data.get(), &RPiCameraData::frameStarted);
-	data->unicam_[Unicam::Image].dev()->bufferReady.connect(data.get(), &RPiCameraData::unicamBufferDequeue);
-	data->isp_[Isp::Input].dev()->bufferReady.connect(data.get(), &RPiCameraData::ispInputDequeue);
-	data->isp_[Isp::Output0].dev()->bufferReady.connect(data.get(), &RPiCameraData::ispOutputDequeue);
-	data->isp_[Isp::Output1].dev()->bufferReady.connect(data.get(), &RPiCameraData::ispOutputDequeue);
-	data->isp_[Isp::Stats].dev()->bufferReady.connect(data.get(), &RPiCameraData::ispOutputDequeue);
-
-	data->sensor_ = std::make_unique<CameraSensor>(sensorEntity);
-	if (!data->sensor_)
-		return -EINVAL;
-
-	if (data->sensor_->init())
-		return -EINVAL;
-
-	/*
-	 * Enumerate all the Video Mux/Bridge devices across the sensor -> unicam
-	 * chain. There may be a cascade of devices in this chain!
-	 */
-	MediaLink *link = sensorEntity->getPadByIndex(0)->links()[0];
-	data->enumerateVideoDevices(link);
-
-	data->sensorFormats_ = populateSensorFormats(data->sensor_);
-
-	ipa::RPi::InitResult result;
-	if (data->loadIPA(&result)) {
-		LOG(RPI, Error) << "Failed to load a suitable IPA library";
-		return -EINVAL;
-	}
-
-	if (result.sensorConfig.sensorMetadata ^ !!unicamEmbedded) {
-		LOG(RPI, Warning) << "Mismatch between Unicam and CamHelper for embedded data usage!";
-		result.sensorConfig.sensorMetadata = false;
-		if (unicamEmbedded)
-			data->unicam_[Unicam::Embedded].dev()->bufferReady.disconnect();
-	}
-
-	/*
-	 * Open all Unicam and ISP streams. The exception is the embedded data
-	 * stream, which only gets opened below if the IPA reports that the sensor
-	 * supports embedded data.
-	 *
-	 * The below grouping is just for convenience so that we can easily
-	 * iterate over all streams in one go.
-	 */
-	data->streams_.push_back(&data->unicam_[Unicam::Image]);
-	if (result.sensorConfig.sensorMetadata)
-		data->streams_.push_back(&data->unicam_[Unicam::Embedded]);
-
-	for (auto &stream : data->isp_)
-		data->streams_.push_back(&stream);
-
-	for (auto stream : data->streams_) {
-		int ret = stream->dev()->open();
-		if (ret)
-			return ret;
-	}
-
-	if (!data->unicam_[Unicam::Image].dev()->caps().hasMediaController()) {
-		LOG(RPI, Error) << "Unicam driver does not use the MediaController, please update your kernel!";
-		return -EINVAL;
-	}
-
-	/*
-	 * Setup our delayed control writer with the sensor default
-	 * gain and exposure delays. Mark VBLANK for priority write.
-	 */
-	std::unordered_map<uint32_t, RPi::DelayedControls::ControlParams> params = {
-		{ V4L2_CID_ANALOGUE_GAIN, { result.sensorConfig.gainDelay, false } },
-		{ V4L2_CID_EXPOSURE, { result.sensorConfig.exposureDelay, false } },
-		{ V4L2_CID_HBLANK, { result.sensorConfig.hblankDelay, false } },
-		{ V4L2_CID_VBLANK, { result.sensorConfig.vblankDelay, true } }
-	};
-	data->delayedCtrls_ = std::make_unique<RPi::DelayedControls>(data->sensor_->device(), params);
-	data->sensorMetadata_ = result.sensorConfig.sensorMetadata;
-
-	/* Register initial controls that the Raspberry Pi IPA can handle. */
-	data->controlInfo_ = std::move(result.controlInfo);
-
-	/* Initialize the camera properties. */
-	data->properties_ = data->sensor_->properties();
-
-	/*
-	 * The V4L2_CID_NOTIFY_GAINS control, if present, is used to inform the
-	 * sensor of the colour gains. It is defined to be a linear gain where
-	 * the default value represents a gain of exactly one.
-	 */
-	auto it = data->sensor_->controls().find(V4L2_CID_NOTIFY_GAINS);
-	if (it != data->sensor_->controls().end())
-		data->notifyGainsUnity_ = it->second.def().get<int32_t>();
-
-	/*
-	 * Set a default value for the ScalerCropMaximum property to show
-	 * that we support its use, however, initialise it to zero because
-	 * it's not meaningful until a camera mode has been chosen.
-	 */
-	data->properties_.set(properties::ScalerCropMaximum, Rectangle{});
-
-	/*
-	 * We cache two things about the sensor in relation to transforms
-	 * (meaning horizontal and vertical flips): if they affect the Bayer
-	 * ordering, and what the "native" Bayer order is, when no transforms
-	 * are applied.
-	 *
-	 * We note that the sensor's cached list of supported formats is
-	 * already in the "native" order, with any flips having been undone.
-	 */
-	const V4L2Subdevice *sensor = data->sensor_->device();
-	const struct v4l2_query_ext_ctrl *hflipCtrl = sensor->controlInfo(V4L2_CID_HFLIP);
-	if (hflipCtrl) {
-		/* We assume it will support vflips too... */
-		data->flipsAlterBayerOrder_ = hflipCtrl->flags & V4L2_CTRL_FLAG_MODIFY_LAYOUT;
-	}
-
-	/* Look for a valid Bayer format. */
-	BayerFormat bayerFormat;
-	for (const auto &iter : data->sensorFormats_) {
-		bayerFormat = BayerFormat::fromMbusCode(iter.first);
-		if (bayerFormat.isValid())
-			break;
-	}
-
-	if (!bayerFormat.isValid()) {
-		LOG(RPI, Error) << "No Bayer format found";
-		return -EINVAL;
-	}
-	data->nativeBayerOrder_ = bayerFormat.order;
-
-	/*
-	 * List the available streams an application may request. At present, we
-	 * do not advertise Unicam Embedded and ISP Statistics streams, as there
-	 * is no mechanism for the application to request non-image buffer formats.
-	 */
-	std::set<Stream *> streams;
-	streams.insert(&data->unicam_[Unicam::Image]);
-	streams.insert(&data->isp_[Isp::Output0]);
-	streams.insert(&data->isp_[Isp::Output1]);
-
-	int ret = data->loadPipelineConfiguration();
-	if (ret) {
-		LOG(RPI, Error) << "Unable to load pipeline configuration";
-		return ret;
-	}
-
-	/* Create and register the camera. */
-	const std::string &id = data->sensor_->id();
-	std::shared_ptr<Camera> camera =
-		Camera::create(std::move(data), id, streams);
-	PipelineHandler::registerCamera(std::move(camera));
-
-	LOG(RPI, Info) << "Registered camera " << id
-		       << " to Unicam device " << unicam->deviceNode()
-		       << " and ISP device " << isp->deviceNode();
-	return 0;
-}
-
-int PipelineHandlerRPi::queueAllBuffers(Camera *camera)
-{
-	RPiCameraData *data = cameraData(camera);
-	int ret;
-
-	for (auto const stream : data->streams_) {
-		if (!stream->isExternal()) {
-			ret = stream->queueAllBuffers();
-			if (ret < 0)
-				return ret;
-		} else {
-			/*
-			 * For external streams, we must queue up a set of internal
-			 * buffers to handle the number of drop frames requested by
-			 * the IPA. This is done by passing nullptr in queueBuffer().
-			 *
-			 * The below queueBuffer() call will do nothing if there
-			 * are not enough internal buffers allocated, but this will
-			 * be handled by queuing the request for buffers in the
-			 * RPiStream object.
-			 */
-			unsigned int i;
-			for (i = 0; i < data->dropFrameCount_; i++) {
-				ret = stream->queueBuffer(nullptr);
-				if (ret)
-					return ret;
-			}
-		}
-	}
-
-	return 0;
-}
-
-int PipelineHandlerRPi::prepareBuffers(Camera *camera)
-{
-	RPiCameraData *data = cameraData(camera);
-	unsigned int numRawBuffers = 0;
-	int ret;
-
-	for (Stream *s : camera->streams()) {
-		if (isRaw(s->configuration().pixelFormat)) {
-			numRawBuffers = s->configuration().bufferCount;
-			break;
-		}
-	}
-
-	/* Decide how many internal buffers to allocate. */
-	for (auto const stream : data->streams_) {
-		unsigned int numBuffers;
-		/*
-		 * For Unicam, allocate a minimum number of buffers for internal
-		 * use as we want to avoid any frame drops.
-		 */
-		const unsigned int minBuffers = data->config_.minTotalUnicamBuffers;
-		if (stream == &data->unicam_[Unicam::Image]) {
-			/*
-			 * If an application has configured a RAW stream, allocate
-			 * additional buffers to make up the minimum, but ensure
-			 * we have at least minUnicamBuffers of internal buffers
-			 * to use to minimise frame drops.
-			 */
-			numBuffers = std::max<int>(data->config_.minUnicamBuffers,
-						   minBuffers - numRawBuffers);
-		} else if (stream == &data->isp_[Isp::Input]) {
-			/*
-			 * ISP input buffers are imported from Unicam, so follow
-			 * similar logic as above to count all the RAW buffers
-			 * available.
-			 */
-			numBuffers = numRawBuffers +
-				     std::max<int>(data->config_.minUnicamBuffers,
-						   minBuffers - numRawBuffers);
-
-		} else if (stream == &data->unicam_[Unicam::Embedded]) {
-			/*
-			 * Embedded data buffers are (currently) for internal use,
-			 * so allocate the minimum required to avoid frame drops.
-			 */
-			numBuffers = minBuffers;
-		} else {
-			/*
-			 * Since the ISP runs synchronous with the IPA and requests,
-			 * we only ever need one set of internal buffers. Any buffers
-			 * the application wants to hold onto will already be exported
-			 * through PipelineHandlerRPi::exportFrameBuffers().
-			 */
-			numBuffers = 1;
-		}
-
-		ret = stream->prepareBuffers(numBuffers);
-		if (ret < 0)
-			return ret;
-	}
-
-	/*
-	 * Pass the stats and embedded data buffers to the IPA. No other
-	 * buffers need to be passed.
-	 */
-	mapBuffers(camera, data->isp_[Isp::Stats].getBuffers(), RPi::MaskStats);
-	if (data->sensorMetadata_)
-		mapBuffers(camera, data->unicam_[Unicam::Embedded].getBuffers(),
-			   RPi::MaskEmbeddedData);
-
-	return 0;
-}
-
-void PipelineHandlerRPi::mapBuffers(Camera *camera, const RPi::BufferMap &buffers, unsigned int mask)
-{
-	RPiCameraData *data = cameraData(camera);
-	std::vector<IPABuffer> bufferIds;
-	/*
-	 * Link the FrameBuffers with the id (key value) in the map stored in
-	 * the RPi stream object - along with an identifier mask.
-	 *
-	 * This will allow us to identify buffers passed between the pipeline
-	 * handler and the IPA.
-	 */
-	for (auto const &it : buffers) {
-		bufferIds.push_back(IPABuffer(mask | it.first,
-					       it.second->planes()));
-		data->bufferIds_.insert(mask | it.first);
-	}
-
-	data->ipa_->mapBuffers(bufferIds);
-}
-
-void RPiCameraData::freeBuffers()
-{
-	if (ipa_) {
-		/*
-		 * Copy the buffer ids from the unordered_set to a vector to
-		 * pass to the IPA.
-		 */
-		std::vector<unsigned int> bufferIds(bufferIds_.begin(),
-						    bufferIds_.end());
-		ipa_->unmapBuffers(bufferIds);
-		bufferIds_.clear();
-	}
-
-	for (auto const stream : streams_)
-		stream->releaseBuffers();
-
-	buffersAllocated_ = false;
-}
-
-void RPiCameraData::frameStarted(uint32_t sequence)
-{
-	LOG(RPI, Debug) << "frame start " << sequence;
-
-	/* Write any controls for the next frame as soon as we can. */
-	delayedCtrls_->applyControls(sequence);
-}
-
-int RPiCameraData::loadIPA(ipa::RPi::InitResult *result)
-{
-	ipa_ = IPAManager::createIPA<ipa::RPi::IPAProxyRPi>(pipe(), 1, 1);
-
-	if (!ipa_)
-		return -ENOENT;
-
-	ipa_->processStatsComplete.connect(this, &RPiCameraData::processStatsComplete);
-	ipa_->prepareIspComplete.connect(this, &RPiCameraData::prepareIspComplete);
-	ipa_->setIspControls.connect(this, &RPiCameraData::setIspControls);
-	ipa_->setDelayedControls.connect(this, &RPiCameraData::setDelayedControls);
-	ipa_->setLensControls.connect(this, &RPiCameraData::setLensControls);
-	ipa_->setCameraTimeout.connect(this, &RPiCameraData::setCameraTimeout);
-
-	/*
-	 * The configuration (tuning file) is made from the sensor name unless
-	 * the environment variable overrides it.
-	 */
-	std::string configurationFile;
-	char const *configFromEnv = utils::secure_getenv("LIBCAMERA_RPI_TUNING_FILE");
-	if (!configFromEnv || *configFromEnv == '\0') {
-		std::string model = sensor_->model();
-		if (isMonoSensor(sensor_))
-			model += "_mono";
-		configurationFile = ipa_->configurationFile(model + ".json", "rpi");
-	} else {
-		configurationFile = std::string(configFromEnv);
-	}
-
-	IPASettings settings(configurationFile, sensor_->model());
-	ipa::RPi::InitParams params;
-
-	params.lensPresent = !!sensor_->focusLens();
-	return ipa_->init(settings, params, result);
-}
-
-int RPiCameraData::configureIPA(const CameraConfiguration *config, ipa::RPi::ConfigResult *result)
-{
-	std::map<unsigned int, ControlInfoMap> entityControls;
-	ipa::RPi::ConfigParams params;
-
-	/* \todo Move passing of ispControls and lensControls to ipa::init() */
-	params.sensorControls = sensor_->controls();
-	params.ispControls = isp_[Isp::Input].dev()->controls();
-	if (sensor_->focusLens())
-		params.lensControls = sensor_->focusLens()->controls();
-
-	/* Always send the user transform to the IPA. */
-	params.transform = static_cast<unsigned int>(config->transform);
-
-	/* Allocate the lens shading table via dmaHeap and pass to the IPA. */
-	if (!lsTable_.isValid()) {
-		lsTable_ = SharedFD(dmaHeap_.alloc("ls_grid", ipa::RPi::MaxLsGridSize));
-		if (!lsTable_.isValid())
-			return -ENOMEM;
-
-		/* Allow the IPA to mmap the LS table via the file descriptor. */
-		/*
-		 * \todo Investigate if mapping the lens shading table buffer
-		 * could be handled with mapBuffers().
-		 */
-		params.lsTableHandle = lsTable_;
-	}
-
-	/* We store the IPACameraSensorInfo for digital zoom calculations. */
-	int ret = sensor_->sensorInfo(&sensorInfo_);
-	if (ret) {
-		LOG(RPI, Error) << "Failed to retrieve camera sensor info";
-		return ret;
-	}
-
-	/* Ready the IPA - it must know about the sensor resolution. */
-	ret = ipa_->configure(sensorInfo_, params, result);
-	if (ret < 0) {
-		LOG(RPI, Error) << "IPA configuration failed!";
-		return -EPIPE;
-	}
-
-	if (!result->controls.empty())
-		setSensorControls(result->controls);
-
-	return 0;
-}
-
-int RPiCameraData::loadPipelineConfiguration()
-{
-	config_ = {
-		.minUnicamBuffers = 2,
-		.minTotalUnicamBuffers = 4,
-		.disableStartupFrameDrops = false,
-		.unicamTimeoutValue = 0,
-	};
-
-	char const *configFromEnv = utils::secure_getenv("LIBCAMERA_RPI_CONFIG_FILE");
-	if (!configFromEnv || *configFromEnv == '\0')
-		return 0;
-
-	std::string filename = std::string(configFromEnv);
-	File file(filename);
-
-	if (!file.open(File::OpenModeFlag::ReadOnly)) {
-		LOG(RPI, Error) << "Failed to open configuration file '" << filename << "'";
-		return -EIO;
-	}
-
-	LOG(RPI, Info) << "Using configuration file '" << filename << "'";
-
-	std::unique_ptr<YamlObject> root = YamlParser::parse(file);
-	if (!root) {
-		LOG(RPI, Warning) << "Failed to parse configuration file, using defaults";
-		return 0;
-	}
-
-	std::optional<double> ver = (*root)["version"].get<double>();
-	if (!ver || *ver != 1.0) {
-		LOG(RPI, Error) << "Unexpected configuration file version reported";
-		return -EINVAL;
-	}
-
-	const YamlObject &phConfig = (*root)["pipeline_handler"];
-	config_.minUnicamBuffers =
-		phConfig["min_unicam_buffers"].get<unsigned int>(config_.minUnicamBuffers);
-	config_.minTotalUnicamBuffers =
-		phConfig["min_total_unicam_buffers"].get<unsigned int>(config_.minTotalUnicamBuffers);
-	config_.disableStartupFrameDrops =
-		phConfig["disable_startup_frame_drops"].get<bool>(config_.disableStartupFrameDrops);
-	config_.unicamTimeoutValue =
-		phConfig["unicam_timeout_value_ms"].get<unsigned int>(config_.unicamTimeoutValue);
-
-	if (config_.unicamTimeoutValue) {
-		/* Disable the IPA signal to control timeout and set the user requested value. */
-		ipa_->setCameraTimeout.disconnect();
-		unicam_[Unicam::Image].dev()->setDequeueTimeout(config_.unicamTimeoutValue * 1ms);
-	}
-
-	if (config_.minTotalUnicamBuffers < config_.minUnicamBuffers) {
-		LOG(RPI, Error) << "Invalid configuration: min_total_unicam_buffers must be >= min_unicam_buffers";
-		return -EINVAL;
-	}
-
-	if (config_.minTotalUnicamBuffers < 1) {
-		LOG(RPI, Error) << "Invalid configuration: min_total_unicam_buffers must be >= 1";
-		return -EINVAL;
-	}
-
-	return 0;
-}
-
-/*
- * enumerateVideoDevices() iterates over the Media Controller topology, starting
- * at the sensor and finishing at Unicam. For each sensor, RPiCameraData stores
- * a unique list of any intermediate video mux or bridge devices connected in a
- * cascade, together with the entity to entity link.
- *
- * Entity pad configuration and link enabling happens at the end of configure().
- * We first disable all pad links on each entity device in the chain, and then
- * selectively enabling the specific links to link sensor to Unicam across all
- * intermediate muxes and bridges.
- *
- * In the cascaded topology below, if Sensor1 is used, the Mux2 -> Mux1 link
- * will be disabled, and Sensor1 -> Mux1 -> Unicam links enabled. Alternatively,
- * if Sensor3 is used, the Sensor2 -> Mux2 and Sensor1 -> Mux1 links are disabled,
- * and Sensor3 -> Mux2 -> Mux1 -> Unicam links are enabled. All other links will
- * remain unchanged.
- *
- *  +----------+
- *  |  Unicam  |
- *  +-----^----+
- *        |
- *    +---+---+
- *    |  Mux1 <-------+
- *    +--^----+       |
- *       |            |
- * +-----+---+    +---+---+
- * | Sensor1 |    |  Mux2 |<--+
- * +---------+    +-^-----+   |
- *                  |         |
- *          +-------+-+   +---+-----+
- *          | Sensor2 |   | Sensor3 |
- *          +---------+   +---------+
- */
-void RPiCameraData::enumerateVideoDevices(MediaLink *link)
-{
-	const MediaPad *sinkPad = link->sink();
-	const MediaEntity *entity = sinkPad->entity();
-	bool unicamFound = false;
-
-	/* We only deal with Video Mux and Bridge devices in cascade. */
-	if (entity->function() != MEDIA_ENT_F_VID_MUX &&
-	    entity->function() != MEDIA_ENT_F_VID_IF_BRIDGE)
-		return;
-
-	/* Find the source pad for this Video Mux or Bridge device. */
-	const MediaPad *sourcePad = nullptr;
-	for (const MediaPad *pad : entity->pads()) {
-		if (pad->flags() & MEDIA_PAD_FL_SOURCE) {
-			/*
-			 * We can only deal with devices that have a single source
-			 * pad. If this device has multiple source pads, ignore it
-			 * and this branch in the cascade.
-			 */
-			if (sourcePad)
-				return;
-
-			sourcePad = pad;
-		}
-	}
-
-	LOG(RPI, Debug) << "Found video mux device " << entity->name()
-			<< " linked to sink pad " << sinkPad->index();
-
-	bridgeDevices_.emplace_back(std::make_unique<V4L2Subdevice>(entity), link);
-	bridgeDevices_.back().first->open();
-
-	/*
-	 * Iterate through all the sink pad links down the cascade to find any
-	 * other Video Mux and Bridge devices.
-	 */
-	for (MediaLink *l : sourcePad->links()) {
-		enumerateVideoDevices(l);
-		/* Once we reach the Unicam entity, we are done. */
-		if (l->sink()->entity()->name() == "unicam-image") {
-			unicamFound = true;
-			break;
-		}
-	}
-
-	/* This identifies the end of our entity enumeration recursion. */
-	if (link->source()->entity()->function() == MEDIA_ENT_F_CAM_SENSOR) {
-		/*
-		* If Unicam is not at the end of this cascade, we cannot configure
-		* this topology automatically, so remove all entity references.
-		*/
-		if (!unicamFound) {
-			LOG(RPI, Warning) << "Cannot automatically configure this MC topology!";
-			bridgeDevices_.clear();
-		}
-	}
-}
-
-void RPiCameraData::processStatsComplete(const ipa::RPi::BufferIds &buffers)
-{
-	if (!isRunning())
-		return;
-
-	FrameBuffer *buffer = isp_[Isp::Stats].getBuffers().at(buffers.stats & RPi::MaskID);
-
-	handleStreamBuffer(buffer, &isp_[Isp::Stats]);
-
-	/* Last thing to do is to fill up the request metadata. */
-	Request *request = requestQueue_.front();
-	ControlList metadata;
-
-	ipa_->reportMetadata(request->sequence(), &metadata);
-	request->metadata().merge(metadata);
-
-	/*
-	 * Inform the sensor of the latest colour gains if it has the
-	 * V4L2_CID_NOTIFY_GAINS control (which means notifyGainsUnity_ is set).
-	 */
-	const auto &colourGains = metadata.get(libcamera::controls::ColourGains);
-	if (notifyGainsUnity_ && colourGains) {
-		/* The control wants linear gains in the order B, Gb, Gr, R. */
-		ControlList ctrls(sensor_->controls());
-		std::array<int32_t, 4> gains{
-			static_cast<int32_t>((*colourGains)[1] * *notifyGainsUnity_),
-			*notifyGainsUnity_,
-			*notifyGainsUnity_,
-			static_cast<int32_t>((*colourGains)[0] * *notifyGainsUnity_)
-		};
-		ctrls.set(V4L2_CID_NOTIFY_GAINS, Span<const int32_t>{ gains });
-
-		sensor_->setControls(&ctrls);
-	}
-
-	state_ = State::IpaComplete;
-	handleState();
-}
-
-void RPiCameraData::prepareIspComplete(const ipa::RPi::BufferIds &buffers)
-{
-	unsigned int embeddedId = buffers.embedded & RPi::MaskID;
-	unsigned int bayer = buffers.bayer & RPi::MaskID;
-	FrameBuffer *buffer;
-
-	if (!isRunning())
-		return;
-
-	buffer = unicam_[Unicam::Image].getBuffers().at(bayer & RPi::MaskID);
-	LOG(RPI, Debug) << "Input re-queue to ISP, buffer id " << (bayer & RPi::MaskID)
-			<< ", timestamp: " << buffer->metadata().timestamp;
-
-	isp_[Isp::Input].queueBuffer(buffer);
-	ispOutputCount_ = 0;
-
-	if (sensorMetadata_ && embeddedId) {
-		buffer = unicam_[Unicam::Embedded].getBuffers().at(embeddedId & RPi::MaskID);
-		handleStreamBuffer(buffer, &unicam_[Unicam::Embedded]);
-	}
-
-	handleState();
-}
-
-void RPiCameraData::setIspControls(const ControlList &controls)
-{
-	ControlList ctrls = controls;
-
-	if (ctrls.contains(V4L2_CID_USER_BCM2835_ISP_LENS_SHADING)) {
-		ControlValue &value =
-			const_cast<ControlValue &>(ctrls.get(V4L2_CID_USER_BCM2835_ISP_LENS_SHADING));
-		Span<uint8_t> s = value.data();
-		bcm2835_isp_lens_shading *ls =
-			reinterpret_cast<bcm2835_isp_lens_shading *>(s.data());
-		ls->dmabuf = lsTable_.get();
-	}
-
-	isp_[Isp::Input].dev()->setControls(&ctrls);
-	handleState();
-}
-
-void RPiCameraData::setDelayedControls(const ControlList &controls, uint32_t delayContext)
-{
-	if (!delayedCtrls_->push(controls, delayContext))
-		LOG(RPI, Error) << "V4L2 DelayedControl set failed";
-	handleState();
-}
-
-void RPiCameraData::setLensControls(const ControlList &controls)
-{
-	CameraLens *lens = sensor_->focusLens();
-
-	if (lens && controls.contains(V4L2_CID_FOCUS_ABSOLUTE)) {
-		ControlValue const &focusValue = controls.get(V4L2_CID_FOCUS_ABSOLUTE);
-		lens->setFocusPosition(focusValue.get<int32_t>());
-	}
-}
-
-void RPiCameraData::setCameraTimeout(uint32_t maxFrameLengthMs)
-{
-	/*
-	 * Set the dequeue timeout to the larger of 5x the maximum reported
-	 * frame length advertised by the IPA over a number of frames. Allow
-	 * a minimum timeout value of 1s.
-	 */
-	utils::Duration timeout =
-		std::max<utils::Duration>(1s, 5 * maxFrameLengthMs * 1ms);
-
-	LOG(RPI, Debug) << "Setting Unicam timeout to " << timeout;
-	unicam_[Unicam::Image].dev()->setDequeueTimeout(timeout);
-}
-
-void RPiCameraData::setSensorControls(ControlList &controls)
-{
-	/*
-	 * We need to ensure that if both VBLANK and EXPOSURE are present, the
-	 * former must be written ahead of, and separately from EXPOSURE to avoid
-	 * V4L2 rejecting the latter. This is identical to what DelayedControls
-	 * does with the priority write flag.
-	 *
-	 * As a consequence of the below logic, VBLANK gets set twice, and we
-	 * rely on the v4l2 framework to not pass the second control set to the
-	 * driver as the actual control value has not changed.
-	 */
-	if (controls.contains(V4L2_CID_EXPOSURE) && controls.contains(V4L2_CID_VBLANK)) {
-		ControlList vblank_ctrl;
-
-		vblank_ctrl.set(V4L2_CID_VBLANK, controls.get(V4L2_CID_VBLANK));
-		sensor_->setControls(&vblank_ctrl);
-	}
-
-	sensor_->setControls(&controls);
-}
-
-void RPiCameraData::unicamTimeout()
-{
-	LOG(RPI, Error) << "Unicam has timed out!";
-	LOG(RPI, Error) << "Please check that your camera sensor connector is attached securely.";
-	LOG(RPI, Error) << "Alternatively, try another cable and/or sensor.";
-
-	state_ = RPiCameraData::State::Error;
-	/*
-	 * To allow the application to attempt a recovery from this timeout,
-	 * stop all devices streaming, and return any outstanding requests as
-	 * incomplete and cancelled.
-	 */
-	for (auto const stream : streams_)
-		stream->dev()->streamOff();
-
-	clearIncompleteRequests();
-}
-
-void RPiCameraData::unicamBufferDequeue(FrameBuffer *buffer)
-{
-	RPi::Stream *stream = nullptr;
-	int index;
-
-	if (!isRunning())
-		return;
-
-	for (RPi::Stream &s : unicam_) {
-		index = s.getBufferId(buffer);
-		if (index) {
-			stream = &s;
-			break;
-		}
-	}
-
-	/* The buffer must belong to one of our streams. */
-	ASSERT(stream);
-
-	LOG(RPI, Debug) << "Stream " << stream->name() << " buffer dequeue"
-			<< ", buffer id " << index
-			<< ", timestamp: " << buffer->metadata().timestamp;
-
-	if (stream == &unicam_[Unicam::Image]) {
-		/*
-		 * Lookup the sensor controls used for this frame sequence from
-		 * DelayedControl and queue them along with the frame buffer.
-		 */
-		auto [ctrl, delayContext] = delayedCtrls_->get(buffer->metadata().sequence);
-		/*
-		 * Add the frame timestamp to the ControlList for the IPA to use
-		 * as it does not receive the FrameBuffer object.
-		 */
-		ctrl.set(controls::SensorTimestamp, buffer->metadata().timestamp);
-		bayerQueue_.push({ buffer, std::move(ctrl), delayContext });
-	} else {
-		embeddedQueue_.push(buffer);
-	}
-
-	handleState();
-}
-
-void RPiCameraData::ispInputDequeue(FrameBuffer *buffer)
-{
-	if (!isRunning())
-		return;
-
-	LOG(RPI, Debug) << "Stream ISP Input buffer complete"
-			<< ", buffer id " << unicam_[Unicam::Image].getBufferId(buffer)
-			<< ", timestamp: " << buffer->metadata().timestamp;
-
-	/* The ISP input buffer gets re-queued into Unicam. */
-	handleStreamBuffer(buffer, &unicam_[Unicam::Image]);
-	handleState();
-}
-
-void RPiCameraData::ispOutputDequeue(FrameBuffer *buffer)
-{
-	RPi::Stream *stream = nullptr;
-	int index;
-
-	if (!isRunning())
-		return;
-
-	for (RPi::Stream &s : isp_) {
-		index = s.getBufferId(buffer);
-		if (index) {
-			stream = &s;
-			break;
-		}
-	}
-
-	/* The buffer must belong to one of our ISP output streams. */
-	ASSERT(stream);
-
-	LOG(RPI, Debug) << "Stream " << stream->name() << " buffer complete"
-			<< ", buffer id " << index
-			<< ", timestamp: " << buffer->metadata().timestamp;
-
-	/*
-	 * ISP statistics buffer must not be re-queued or sent back to the
-	 * application until after the IPA signals so.
-	 */
-	if (stream == &isp_[Isp::Stats]) {
-		ipa::RPi::ProcessParams params;
-		params.buffers.stats = index | RPi::MaskStats;
-		params.ipaContext = requestQueue_.front()->sequence();
-		ipa_->processStats(params);
-	} else {
-		/* Any other ISP output can be handed back to the application now. */
-		handleStreamBuffer(buffer, stream);
-	}
-
-	/*
-	 * Increment the number of ISP outputs generated.
-	 * This is needed to track dropped frames.
-	 */
-	ispOutputCount_++;
-
-	handleState();
-}
-
-void RPiCameraData::clearIncompleteRequests()
-{
-	/*
-	 * All outstanding requests (and associated buffers) must be returned
-	 * back to the application.
-	 */
-	while (!requestQueue_.empty()) {
-		Request *request = requestQueue_.front();
-
-		for (auto &b : request->buffers()) {
-			FrameBuffer *buffer = b.second;
-			/*
-			 * Has the buffer already been handed back to the
-			 * request? If not, do so now.
-			 */
-			if (buffer->request()) {
-				buffer->_d()->cancel();
-				pipe()->completeBuffer(request, buffer);
-			}
-		}
-
-		pipe()->completeRequest(request);
-		requestQueue_.pop_front();
-	}
-}
-
-void RPiCameraData::handleStreamBuffer(FrameBuffer *buffer, RPi::Stream *stream)
-{
-	/*
-	 * It is possible to be here without a pending request, so check
-	 * that we actually have one to action, otherwise we just return
-	 * buffer back to the stream.
-	 */
-	Request *request = requestQueue_.empty() ? nullptr : requestQueue_.front();
-	if (!dropFrameCount_ && request && request->findBuffer(stream) == buffer) {
-		/*
-		 * Check if this is an externally provided buffer, and if
-		 * so, we must stop tracking it in the pipeline handler.
-		 */
-		handleExternalBuffer(buffer, stream);
-		/*
-		 * Tag the buffer as completed, returning it to the
-		 * application.
-		 */
-		pipe()->completeBuffer(request, buffer);
-	} else {
-		/*
-		 * This buffer was not part of the Request (which happens if an
-		 * internal buffer was used for an external stream, or
-		 * unconditionally for internal streams), or there is no pending
-		 * request, so we can recycle it.
-		 */
-		stream->returnBuffer(buffer);
-	}
-}
-
-void RPiCameraData::handleExternalBuffer(FrameBuffer *buffer, RPi::Stream *stream)
-{
-	unsigned int id = stream->getBufferId(buffer);
-
-	if (!(id & RPi::MaskExternalBuffer))
-		return;
-
-	/* Stop the Stream object from tracking the buffer. */
-	stream->removeExternalBuffer(buffer);
-}
-
-void RPiCameraData::handleState()
-{
-	switch (state_) {
-	case State::Stopped:
-	case State::Busy:
-	case State::Error:
-		break;
-
-	case State::IpaComplete:
-		/* If the request is completed, we will switch to Idle state. */
-		checkRequestCompleted();
-		/*
-		 * No break here, we want to try running the pipeline again.
-		 * The fallthrough clause below suppresses compiler warnings.
-		 */
-		[[fallthrough]];
-
-	case State::Idle:
-		tryRunPipeline();
-		break;
-	}
-}
-
-void RPiCameraData::checkRequestCompleted()
-{
-	bool requestCompleted = false;
-	/*
-	 * If we are dropping this frame, do not touch the request, simply
-	 * change the state to IDLE when ready.
-	 */
-	if (!dropFrameCount_) {
-		Request *request = requestQueue_.front();
-		if (request->hasPendingBuffers())
-			return;
-
-		/* Must wait for metadata to be filled in before completing. */
-		if (state_ != State::IpaComplete)
-			return;
-
-		pipe()->completeRequest(request);
-		requestQueue_.pop_front();
-		requestCompleted = true;
-	}
-
-	/*
-	 * Make sure we have three outputs completed in the case of a dropped
-	 * frame.
-	 */
-	if (state_ == State::IpaComplete &&
-	    ((ispOutputCount_ == 3 && dropFrameCount_) || requestCompleted)) {
-		state_ = State::Idle;
-		if (dropFrameCount_) {
-			dropFrameCount_--;
-			LOG(RPI, Debug) << "Dropping frame at the request of the IPA ("
-					<< dropFrameCount_ << " left)";
-		}
-	}
-}
-
-Rectangle RPiCameraData::scaleIspCrop(const Rectangle &ispCrop) const
-{
-	/*
-	 * Scale a crop rectangle defined in the ISP's coordinates into native sensor
-	 * coordinates.
-	 */
-	Rectangle nativeCrop = ispCrop.scaledBy(sensorInfo_.analogCrop.size(),
-						sensorInfo_.outputSize);
-	nativeCrop.translateBy(sensorInfo_.analogCrop.topLeft());
-	return nativeCrop;
-}
-
-void RPiCameraData::applyScalerCrop(const ControlList &controls)
-{
-	const auto &scalerCrop = controls.get<Rectangle>(controls::ScalerCrop);
-	if (scalerCrop) {
-		Rectangle nativeCrop = *scalerCrop;
-
-		if (!nativeCrop.width || !nativeCrop.height)
-			nativeCrop = { 0, 0, 1, 1 };
-
-		/* Create a version of the crop scaled to ISP (camera mode) pixels. */
-		Rectangle ispCrop = nativeCrop.translatedBy(-sensorInfo_.analogCrop.topLeft());
-		ispCrop.scaleBy(sensorInfo_.outputSize, sensorInfo_.analogCrop.size());
-
-		/*
-		 * The crop that we set must be:
-		 * 1. At least as big as ispMinCropSize_, once that's been
-		 *    enlarged to the same aspect ratio.
-		 * 2. With the same mid-point, if possible.
-		 * 3. But it can't go outside the sensor area.
-		 */
-		Size minSize = ispMinCropSize_.expandedToAspectRatio(nativeCrop.size());
-		Size size = ispCrop.size().expandedTo(minSize);
-		ispCrop = size.centeredTo(ispCrop.center()).enclosedIn(Rectangle(sensorInfo_.outputSize));
-
-		if (ispCrop != ispCrop_) {
-			isp_[Isp::Input].dev()->setSelection(V4L2_SEL_TGT_CROP, &ispCrop);
-			ispCrop_ = ispCrop;
-
-			/*
-			 * Also update the ScalerCrop in the metadata with what we actually
-			 * used. But we must first rescale that from ISP (camera mode) pixels
-			 * back into sensor native pixels.
-			 */
-			scalerCrop_ = scaleIspCrop(ispCrop_);
-		}
-	}
-}
-
-void RPiCameraData::fillRequestMetadata(const ControlList &bufferControls,
-					Request *request)
-{
-	request->metadata().set(controls::SensorTimestamp,
-				bufferControls.get(controls::SensorTimestamp).value_or(0));
-
-	request->metadata().set(controls::ScalerCrop, scalerCrop_);
-}
-
-void RPiCameraData::tryRunPipeline()
-{
-	FrameBuffer *embeddedBuffer;
-	BayerFrame bayerFrame;
-
-	/* If any of our request or buffer queues are empty, we cannot proceed. */
-	if (state_ != State::Idle || requestQueue_.empty() ||
-	    bayerQueue_.empty() || (embeddedQueue_.empty() && sensorMetadata_))
-		return;
-
-	if (!findMatchingBuffers(bayerFrame, embeddedBuffer))
-		return;
-
-	/* Take the first request from the queue and action the IPA. */
-	Request *request = requestQueue_.front();
-
-	/* See if a new ScalerCrop value needs to be applied. */
-	applyScalerCrop(request->controls());
-
-	/*
-	 * Clear the request metadata and fill it with some initial non-IPA
-	 * related controls. We clear it first because the request metadata
-	 * may have been populated if we have dropped the previous frame.
-	 */
-	request->metadata().clear();
-	fillRequestMetadata(bayerFrame.controls, request);
-
-	/* Set our state to say the pipeline is active. */
-	state_ = State::Busy;
-
-	unsigned int bayer = unicam_[Unicam::Image].getBufferId(bayerFrame.buffer);
-
-	LOG(RPI, Debug) << "Signalling prepareIsp:"
-			<< " Bayer buffer id: " << bayer;
-
-	ipa::RPi::PrepareParams params;
-	params.buffers.bayer = RPi::MaskBayerData | bayer;
-	params.sensorControls = std::move(bayerFrame.controls);
-	params.requestControls = request->controls();
-	params.ipaContext = request->sequence();
-	params.delayContext = bayerFrame.delayContext;
-
-	if (embeddedBuffer) {
-		unsigned int embeddedId = unicam_[Unicam::Embedded].getBufferId(embeddedBuffer);
-
-		params.buffers.embedded = RPi::MaskEmbeddedData | embeddedId;
-		LOG(RPI, Debug) << "Signalling prepareIsp:"
-				<< " Embedded buffer id: " << embeddedId;
-	}
-
-	ipa_->prepareIsp(params);
-}
-
-bool RPiCameraData::findMatchingBuffers(BayerFrame &bayerFrame, FrameBuffer *&embeddedBuffer)
-{
-	if (bayerQueue_.empty())
-		return false;
-
-	/*
-	 * Find the embedded data buffer with a matching timestamp to pass to
-	 * the IPA. Any embedded buffers with a timestamp lower than the
-	 * current bayer buffer will be removed and re-queued to the driver.
-	 */
-	uint64_t ts = bayerQueue_.front().buffer->metadata().timestamp;
-	embeddedBuffer = nullptr;
-	while (!embeddedQueue_.empty()) {
-		FrameBuffer *b = embeddedQueue_.front();
-		if (b->metadata().timestamp < ts) {
-			embeddedQueue_.pop();
-			unicam_[Unicam::Embedded].returnBuffer(b);
-			LOG(RPI, Debug) << "Dropping unmatched input frame in stream "
-					<< unicam_[Unicam::Embedded].name();
-		} else if (b->metadata().timestamp == ts) {
-			/* Found a match! */
-			embeddedBuffer = b;
-			embeddedQueue_.pop();
-			break;
-		} else {
-			break; /* Only higher timestamps from here. */
-		}
-	}
-
-	if (!embeddedBuffer && sensorMetadata_) {
-		if (embeddedQueue_.empty()) {
-			/*
-			 * If the embedded buffer queue is empty, wait for the next
-			 * buffer to arrive - dequeue ordering may send the image
-			 * buffer first.
-			 */
-			LOG(RPI, Debug) << "Waiting for next embedded buffer.";
-			return false;
-		}
-
-		/* Log if there is no matching embedded data buffer found. */
-		LOG(RPI, Debug) << "Returning bayer frame without a matching embedded buffer.";
-	}
-
-	bayerFrame = std::move(bayerQueue_.front());
-	bayerQueue_.pop();
-
-	return true;
-}
-
-REGISTER_PIPELINE_HANDLER(PipelineHandlerRPi)
-
-} /* namespace libcamera */
diff --git a/src/libcamera/pipeline/rpi/vc4/vc4.cpp b/src/libcamera/pipeline/rpi/vc4/vc4.cpp
new file mode 100644
index 000000000000..a085a06376a8
--- /dev/null
+++ b/src/libcamera/pipeline/rpi/vc4/vc4.cpp
@@ -0,0 +1,1001 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2019-2023, Raspberry Pi Ltd
+ *
+ * vc4.cpp - Pipeline handler for VC4 based Raspberry Pi devices
+ */
+
+#include <linux/bcm2835-isp.h>
+#include <linux/v4l2-controls.h>
+#include <linux/videodev2.h>
+
+#include <libcamera/formats.h>
+
+#include "libcamera/internal/device_enumerator.h"
+
+#include "dma_heaps.h"
+#include "pipeline_base.h"
+#include "rpi_stream.h"
+
+using namespace std::chrono_literals;
+
+namespace libcamera {
+
+LOG_DECLARE_CATEGORY(RPI)
+
+namespace {
+
+enum class Unicam : unsigned int { Image, Embedded };
+enum class Isp : unsigned int { Input, Output0, Output1, Stats };
+
+} /* namespace */
+
+class Vc4CameraData final : public RPi::CameraData
+{
+public:
+	Vc4CameraData(PipelineHandler *pipe)
+		: RPi::CameraData(pipe)
+	{
+	}
+
+	~Vc4CameraData()
+	{
+		freeBuffers();
+	}
+
+	V4L2VideoDevice::Formats ispFormats() const override
+	{
+		return isp_[Isp::Output0].dev()->formats();
+	}
+
+	V4L2VideoDevice::Formats rawFormats() const override
+	{
+		return unicam_[Unicam::Image].dev()->formats();
+	}
+
+	V4L2VideoDevice *frontendDevice() override
+	{
+		return unicam_[Unicam::Image].dev();
+	}
+
+	void platformFreeBuffers() override
+	{
+	}
+
+	CameraConfiguration::Status platformValidate(std::vector<StreamParams> &rawStreams,
+						     std::vector<StreamParams> &outStreams) const override;
+
+	int platformPipelineConfigure(const std::unique_ptr<YamlObject> &root) override;
+
+	void platformStart() override;
+	void platformStop() override;
+
+	void unicamBufferDequeue(FrameBuffer *buffer);
+	void ispInputDequeue(FrameBuffer *buffer);
+	void ispOutputDequeue(FrameBuffer *buffer);
+
+	void processStatsComplete(const ipa::RPi::BufferIds &buffers);
+	void prepareIspComplete(const ipa::RPi::BufferIds &buffers);
+	void setIspControls(const ControlList &controls);
+	void setCameraTimeout(uint32_t maxFrameLengthMs);
+
+	/* Array of Unicam and ISP device streams and associated buffers/streams. */
+	RPi::Device<Unicam, 2> unicam_;
+	RPi::Device<Isp, 4> isp_;
+
+	/* DMAHEAP allocation helper. */
+	RPi::DmaHeap dmaHeap_;
+	SharedFD lsTable_;
+
+	struct Config {
+		/*
+		 * The minimum number of internal buffers to be allocated for
+		 * the Unicam Image stream.
+		 */
+		unsigned int minUnicamBuffers;
+		/*
+		 * The minimum total (internal + external) buffer count used for
+		 * the Unicam Image stream.
+		 *
+		 * Note that:
+		 * minTotalUnicamBuffers must be >= 1, and
+		 * minTotalUnicamBuffers >= minUnicamBuffers
+		 */
+		unsigned int minTotalUnicamBuffers;
+	};
+
+	Config config_;
+
+private:
+	void platformIspCrop() override
+	{
+		isp_[Isp::Input].dev()->setSelection(V4L2_SEL_TGT_CROP, &ispCrop_);
+	}
+
+	int platformConfigure(const V4L2SubdeviceFormat &sensorFormat,
+			      std::optional<BayerFormat::Packing> packing,
+			      std::vector<StreamParams> &rawStreams,
+			      std::vector<StreamParams> &outStreams) override;
+	int platformConfigureIpa(ipa::RPi::ConfigParams &params) override;
+
+	int platformInitIpa([[maybe_unused]] ipa::RPi::InitParams &params) override
+	{
+		return 0;
+	}
+
+	struct BayerFrame {
+		FrameBuffer *buffer;
+		ControlList controls;
+		unsigned int delayContext;
+	};
+
+	void tryRunPipeline() override;
+	bool findMatchingBuffers(BayerFrame &bayerFrame, FrameBuffer *&embeddedBuffer);
+
+	std::queue<BayerFrame> bayerQueue_;
+	std::queue<FrameBuffer *> embeddedQueue_;
+};
+
+class PipelineHandlerVc4 : public RPi::PipelineHandlerBase
+{
+public:
+	PipelineHandlerVc4(CameraManager *manager)
+		: RPi::PipelineHandlerBase(manager)
+	{
+	}
+
+	~PipelineHandlerVc4()
+	{
+	}
+
+	bool match(DeviceEnumerator *enumerator) override;
+
+private:
+	Vc4CameraData *cameraData(Camera *camera)
+	{
+		return static_cast<Vc4CameraData *>(camera->_d());
+	}
+
+	std::unique_ptr<RPi::CameraData> allocateCameraData() override
+	{
+		return std::make_unique<Vc4CameraData>(this);
+	}
+
+	int prepareBuffers(Camera *camera) override;
+	int platformRegister(std::unique_ptr<RPi::CameraData> &cameraData,
+			     MediaDevice *unicam, MediaDevice *isp) override;
+};
+
+bool PipelineHandlerVc4::match(DeviceEnumerator *enumerator)
+{
+	constexpr unsigned int numUnicamDevices = 2;
+
+	/*
+	 * Loop over all Unicam instances, but return out once a match is found.
+	 * This is to ensure we correctly enumrate the camera when an instance
+	 * of Unicam has registered with media controller, but has not registered
+	 * device nodes due to a sensor subdevice failure.
+	 */
+	for (unsigned int i = 0; i < numUnicamDevices; i++) {
+		DeviceMatch unicam("unicam");
+		MediaDevice *unicamDevice = acquireMediaDevice(enumerator, unicam);
+
+		if (!unicamDevice) {
+			LOG(RPI, Debug) << "Unable to acquire a Unicam instance";
+			break;
+		}
+
+		DeviceMatch isp("bcm2835-isp");
+		MediaDevice *ispDevice = acquireMediaDevice(enumerator, isp);
+
+		if (!ispDevice) {
+			LOG(RPI, Debug) << "Unable to acquire ISP instance";
+			break;
+		}
+
+		/*
+		 * The loop below is used to register multiple cameras behind one or more
+		 * video mux devices that are attached to a particular Unicam instance.
+		 * Obviously these cameras cannot be used simultaneously.
+		 */
+		unsigned int numCameras = 0;
+		for (MediaEntity *entity : unicamDevice->entities()) {
+			if (entity->function() != MEDIA_ENT_F_CAM_SENSOR)
+				continue;
+
+			int ret = RPi::PipelineHandlerBase::registerCamera(unicamDevice, "unicam-image",
+									   ispDevice, entity);
+			if (ret)
+				LOG(RPI, Error) << "Failed to register camera "
+						<< entity->name() << ": " << ret;
+			else
+				numCameras++;
+		}
+
+		if (numCameras)
+			return true;
+	}
+
+	return false;
+}
+
+int PipelineHandlerVc4::prepareBuffers(Camera *camera)
+{
+	Vc4CameraData *data = cameraData(camera);
+	unsigned int numRawBuffers = 0;
+	int ret;
+
+	for (Stream *s : camera->streams()) {
+		if (BayerFormat::fromPixelFormat(s->configuration().pixelFormat).isValid()) {
+			numRawBuffers = s->configuration().bufferCount;
+			break;
+		}
+	}
+
+	/* Decide how many internal buffers to allocate. */
+	for (auto const stream : data->streams_) {
+		unsigned int numBuffers;
+		/*
+		 * For Unicam, allocate a minimum number of buffers for internal
+		 * use as we want to avoid any frame drops.
+		 */
+		const unsigned int minBuffers = data->config_.minTotalUnicamBuffers;
+		if (stream == &data->unicam_[Unicam::Image]) {
+			/*
+			 * If an application has configured a RAW stream, allocate
+			 * additional buffers to make up the minimum, but ensure
+			 * we have at least minUnicamBuffers of internal buffers
+			 * to use to minimise frame drops.
+			 */
+			numBuffers = std::max<int>(data->config_.minUnicamBuffers,
+						   minBuffers - numRawBuffers);
+		} else if (stream == &data->isp_[Isp::Input]) {
+			/*
+			 * ISP input buffers are imported from Unicam, so follow
+			 * similar logic as above to count all the RAW buffers
+			 * available.
+			 */
+			numBuffers = numRawBuffers +
+				     std::max<int>(data->config_.minUnicamBuffers,
+						   minBuffers - numRawBuffers);
+
+		} else if (stream == &data->unicam_[Unicam::Embedded]) {
+			/*
+			 * Embedded data buffers are (currently) for internal use,
+			 * so allocate the minimum required to avoid frame drops.
+			 */
+			numBuffers = minBuffers;
+		} else {
+			/*
+			 * Since the ISP runs synchronous with the IPA and requests,
+			 * we only ever need one set of internal buffers. Any buffers
+			 * the application wants to hold onto will already be exported
+			 * through PipelineHandlerRPi::exportFrameBuffers().
+			 */
+			numBuffers = 1;
+		}
+
+		ret = stream->prepareBuffers(numBuffers);
+		if (ret < 0)
+			return ret;
+	}
+
+	/*
+	 * Pass the stats and embedded data buffers to the IPA. No other
+	 * buffers need to be passed.
+	 */
+	mapBuffers(camera, data->isp_[Isp::Stats].getBuffers(), RPi::MaskStats);
+	if (data->sensorMetadata_)
+		mapBuffers(camera, data->unicam_[Unicam::Embedded].getBuffers(),
+			   RPi::MaskEmbeddedData);
+
+	return 0;
+}
+
+int PipelineHandlerVc4::platformRegister(std::unique_ptr<RPi::CameraData> &cameraData, MediaDevice *unicam, MediaDevice *isp)
+{
+	Vc4CameraData *data = static_cast<Vc4CameraData *>(cameraData.get());
+
+	if (!data->dmaHeap_.isValid())
+		return -ENOMEM;
+
+	MediaEntity *unicamImage = unicam->getEntityByName("unicam-image");
+	MediaEntity *ispOutput0 = isp->getEntityByName("bcm2835-isp0-output0");
+	MediaEntity *ispCapture1 = isp->getEntityByName("bcm2835-isp0-capture1");
+	MediaEntity *ispCapture2 = isp->getEntityByName("bcm2835-isp0-capture2");
+	MediaEntity *ispCapture3 = isp->getEntityByName("bcm2835-isp0-capture3");
+
+	if (!unicamImage || !ispOutput0 || !ispCapture1 || !ispCapture2 || !ispCapture3)
+		return -ENOENT;
+
+	/* Locate and open the unicam video streams. */
+	data->unicam_[Unicam::Image] = RPi::Stream("Unicam Image", unicamImage);
+
+	/* An embedded data node will not be present if the sensor does not support it. */
+	MediaEntity *unicamEmbedded = unicam->getEntityByName("unicam-embedded");
+	if (unicamEmbedded) {
+		data->unicam_[Unicam::Embedded] = RPi::Stream("Unicam Embedded", unicamEmbedded);
+		data->unicam_[Unicam::Embedded].dev()->bufferReady.connect(data,
+									   &Vc4CameraData::unicamBufferDequeue);
+	}
+
+	/* Tag the ISP input stream as an import stream. */
+	data->isp_[Isp::Input] = RPi::Stream("ISP Input", ispOutput0, true);
+	data->isp_[Isp::Output0] = RPi::Stream("ISP Output0", ispCapture1);
+	data->isp_[Isp::Output1] = RPi::Stream("ISP Output1", ispCapture2);
+	data->isp_[Isp::Stats] = RPi::Stream("ISP Stats", ispCapture3);
+
+	/* Wire up all the buffer connections. */
+	data->unicam_[Unicam::Image].dev()->bufferReady.connect(data, &Vc4CameraData::unicamBufferDequeue);
+	data->isp_[Isp::Input].dev()->bufferReady.connect(data, &Vc4CameraData::ispInputDequeue);
+	data->isp_[Isp::Output0].dev()->bufferReady.connect(data, &Vc4CameraData::ispOutputDequeue);
+	data->isp_[Isp::Output1].dev()->bufferReady.connect(data, &Vc4CameraData::ispOutputDequeue);
+	data->isp_[Isp::Stats].dev()->bufferReady.connect(data, &Vc4CameraData::ispOutputDequeue);
+
+	if (data->sensorMetadata_ ^ !!data->unicam_[Unicam::Embedded].dev()) {
+		LOG(RPI, Warning) << "Mismatch between Unicam and CamHelper for embedded data usage!";
+		data->sensorMetadata_ = false;
+		if (data->unicam_[Unicam::Embedded].dev())
+			data->unicam_[Unicam::Embedded].dev()->bufferReady.disconnect();
+	}
+
+	/*
+	 * Open all Unicam and ISP streams. The exception is the embedded data
+	 * stream, which only gets opened below if the IPA reports that the sensor
+	 * supports embedded data.
+	 *
+	 * The below grouping is just for convenience so that we can easily
+	 * iterate over all streams in one go.
+	 */
+	data->streams_.push_back(&data->unicam_[Unicam::Image]);
+	if (data->sensorMetadata_)
+		data->streams_.push_back(&data->unicam_[Unicam::Embedded]);
+
+	for (auto &stream : data->isp_)
+		data->streams_.push_back(&stream);
+
+	for (auto stream : data->streams_) {
+		int ret = stream->dev()->open();
+		if (ret)
+			return ret;
+	}
+
+	if (!data->unicam_[Unicam::Image].dev()->caps().hasMediaController()) {
+		LOG(RPI, Error) << "Unicam driver does not use the MediaController, please update your kernel!";
+		return -EINVAL;
+	}
+
+	/* Write up all the IPA connections. */
+	data->ipa_->processStatsComplete.connect(data, &Vc4CameraData::processStatsComplete);
+	data->ipa_->prepareIspComplete.connect(data, &Vc4CameraData::prepareIspComplete);
+	data->ipa_->setIspControls.connect(data, &Vc4CameraData::setIspControls);
+	data->ipa_->setCameraTimeout.connect(data, &Vc4CameraData::setCameraTimeout);
+
+	/*
+	 * List the available streams an application may request. At present, we
+	 * do not advertise Unicam Embedded and ISP Statistics streams, as there
+	 * is no mechanism for the application to request non-image buffer formats.
+	 */
+	std::set<Stream *> streams;
+	streams.insert(&data->unicam_[Unicam::Image]);
+	streams.insert(&data->isp_[Isp::Output0]);
+	streams.insert(&data->isp_[Isp::Output1]);
+
+	/* Create and register the camera. */
+	const std::string &id = data->sensor_->id();
+	std::shared_ptr<Camera> camera =
+		Camera::create(std::move(cameraData), id, streams);
+	PipelineHandler::registerCamera(std::move(camera));
+
+	LOG(RPI, Info) << "Registered camera " << id
+		       << " to Unicam device " << unicam->deviceNode()
+		       << " and ISP device " << isp->deviceNode();
+
+	return 0;
+}
+
+CameraConfiguration::Status Vc4CameraData::platformValidate(std::vector<StreamParams> &rawStreams,
+							    std::vector<StreamParams> &outStreams) const
+{
+	CameraConfiguration::Status status = CameraConfiguration::Status::Valid;
+
+	/* Can only output 1 RAW stream, or 2 YUV/RGB streams. */
+	if (rawStreams.size() > 1 || outStreams.size() > 2) {
+		LOG(RPI, Error) << "Invalid number of streams requested";
+		return CameraConfiguration::Status::Invalid;
+	}
+
+	if (!rawStreams.empty())
+		rawStreams[0].dev = unicam_[Unicam::Image].dev();
+
+	/*
+	 * For the two ISP outputs, one stream must be equal or smaller than the
+	 * other in all dimensions.
+	 *
+	 * Index 0 contains the largest requested resolution.
+	 */
+	for (unsigned int i = 0; i < outStreams.size(); i++) {
+		Size size;
+
+		size.width = std::min(outStreams[i].cfg->size.width,
+				      outStreams[0].cfg->size.width);
+		size.height = std::min(outStreams[i].cfg->size.height,
+				       outStreams[0].cfg->size.height);
+
+		if (outStreams[i].cfg->size != size) {
+			outStreams[i].cfg->size = size;
+			status = CameraConfiguration::Status::Adjusted;
+		}
+
+		/*
+		 * Output 0 must be for the largest resolution. We will
+		 * have that fixed up in the code above.
+		 */
+		outStreams[i].dev = isp_[i == 0 ? Isp::Output0 : Isp::Output1].dev();
+	}
+
+	return status;
+}
+
+int Vc4CameraData::platformPipelineConfigure(const std::unique_ptr<YamlObject> &root)
+{
+	config_ = {
+		.minUnicamBuffers = 2,
+		.minTotalUnicamBuffers = 4,
+	};
+
+	if (!root)
+		return 0;
+
+	std::optional<double> ver = (*root)["version"].get<double>();
+	if (!ver || *ver != 1.0) {
+		LOG(RPI, Error) << "Unexpected configuration file version reported";
+		return -EINVAL;
+	}
+
+	std::optional<std::string> target = (*root)["target"].get<std::string>();
+	if (!target || *target != "bcm2835") {
+		LOG(RPI, Error) << "Unexpected target reported: expected \"bcm2835\", got "
+				<< *target;
+		return -EINVAL;
+	}
+
+	const YamlObject &phConfig = (*root)["pipeline_handler"];
+	config_.minUnicamBuffers =
+		phConfig["min_unicam_buffers"].get<unsigned int>(config_.minUnicamBuffers);
+	config_.minTotalUnicamBuffers =
+		phConfig["min_total_unicam_buffers"].get<unsigned int>(config_.minTotalUnicamBuffers);
+
+	if (config_.minTotalUnicamBuffers < config_.minUnicamBuffers) {
+		LOG(RPI, Error) << "Invalid configuration: min_total_unicam_buffers must be >= min_unicam_buffers";
+		return -EINVAL;
+	}
+
+	if (config_.minTotalUnicamBuffers < 1) {
+		LOG(RPI, Error) << "Invalid configuration: min_total_unicam_buffers must be >= 1";
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+int Vc4CameraData::platformConfigure(const V4L2SubdeviceFormat &sensorFormat,
+				     std::optional<BayerFormat::Packing> packing,
+				     std::vector<StreamParams> &rawStreams,
+				     std::vector<StreamParams> &outStreams)
+{
+	int ret;
+
+	if (!packing)
+		packing = BayerFormat::Packing::CSI2;
+
+	V4L2VideoDevice *unicam = unicam_[Unicam::Image].dev();
+	V4L2DeviceFormat unicamFormat = RPi::PipelineHandlerBase::toV4L2DeviceFormat(unicam, sensorFormat, *packing);
+
+	ret = unicam->setFormat(&unicamFormat);
+	if (ret)
+		return ret;
+
+	/*
+	 * See which streams are requested, and route the user
+	 * StreamConfiguration appropriately.
+	 */
+	if (!rawStreams.empty()) {
+		rawStreams[0].cfg->setStream(&unicam_[Unicam::Image]);
+		unicam_[Unicam::Image].setExternal(true);
+	}
+
+	ret = isp_[Isp::Input].dev()->setFormat(&unicamFormat);
+	if (ret)
+		return ret;
+
+	LOG(RPI, Info) << "Sensor: " << sensor_->id()
+		       << " - Selected sensor format: " << sensorFormat
+		       << " - Selected unicam format: " << unicamFormat;
+
+	/* Use a sensible small default size if no output streams are configured. */
+	Size maxSize = outStreams.empty() ? Size(320, 240) : outStreams[0].cfg->size;
+	V4L2DeviceFormat format;
+
+	for (unsigned int i = 0; i < outStreams.size(); i++) {
+		StreamConfiguration *cfg = outStreams[i].cfg;
+
+		/* The largest resolution gets routed to the ISP Output 0 node. */
+		RPi::Stream *stream = i == 0 ? &isp_[Isp::Output0] : &isp_[Isp::Output1];
+
+		V4L2PixelFormat fourcc = stream->dev()->toV4L2PixelFormat(cfg->pixelFormat);
+		format.size = cfg->size;
+		format.fourcc = fourcc;
+		format.colorSpace = cfg->colorSpace;
+
+		LOG(RPI, Debug) << "Setting " << stream->name() << " to "
+				<< format;
+
+		ret = stream->dev()->setFormat(&format);
+		if (ret)
+			return -EINVAL;
+
+		if (format.size != cfg->size || format.fourcc != fourcc) {
+			LOG(RPI, Error)
+				<< "Failed to set requested format on " << stream->name()
+				<< ", returned " << format;
+			return -EINVAL;
+		}
+
+		LOG(RPI, Debug)
+			<< "Stream " << stream->name() << " has color space "
+			<< ColorSpace::toString(cfg->colorSpace);
+
+		cfg->setStream(stream);
+		stream->setExternal(true);
+	}
+
+	ispOutputTotal_ = outStreams.size();
+
+	/*
+	 * If ISP::Output0 stream has not been configured by the application,
+	 * we must allow the hardware to generate an output so that the data
+	 * flow in the pipeline handler remains consistent, and we still generate
+	 * statistics for the IPA to use. So enable the output at a very low
+	 * resolution for internal use.
+	 *
+	 * \todo Allow the pipeline to work correctly without Output0 and only
+	 * statistics coming from the hardware.
+	 */
+	if (outStreams.empty()) {
+		V4L2VideoDevice *dev = isp_[Isp::Output0].dev();
+
+		format = {};
+		format.size = maxSize;
+		format.fourcc = dev->toV4L2PixelFormat(formats::YUV420);
+		/* No one asked for output, so the color space doesn't matter. */
+		format.colorSpace = ColorSpace::Sycc;
+		ret = dev->setFormat(&format);
+		if (ret) {
+			LOG(RPI, Error)
+				<< "Failed to set default format on ISP Output0: "
+				<< ret;
+			return -EINVAL;
+		}
+
+		ispOutputTotal_++;
+
+		LOG(RPI, Debug) << "Defaulting ISP Output0 format to "
+				<< format;
+	}
+
+	/*
+	 * If ISP::Output1 stream has not been requested by the application, we
+	 * set it up for internal use now. This second stream will be used for
+	 * fast colour denoise, and must be a quarter resolution of the ISP::Output0
+	 * stream. However, also limit the maximum size to 1200 pixels in the
+	 * larger dimension, just to avoid being wasteful with buffer allocations
+	 * and memory bandwidth.
+	 *
+	 * \todo If Output 1 format is not YUV420, Output 1 ought to be disabled as
+	 * colour denoise will not run.
+	 */
+	if (outStreams.size() == 1) {
+		V4L2VideoDevice *dev = isp_[Isp::Output1].dev();
+
+		V4L2DeviceFormat output1Format;
+		constexpr Size maxDimensions(1200, 1200);
+		const Size limit = maxDimensions.boundedToAspectRatio(format.size);
+
+		output1Format.size = (format.size / 2).boundedTo(limit).alignedDownTo(2, 2);
+		output1Format.colorSpace = format.colorSpace;
+		output1Format.fourcc = dev->toV4L2PixelFormat(formats::YUV420);
+
+		LOG(RPI, Debug) << "Setting ISP Output1 (internal) to "
+				<< output1Format;
+
+		ret = dev->setFormat(&output1Format);
+		if (ret) {
+			LOG(RPI, Error) << "Failed to set format on ISP Output1: "
+					<< ret;
+			return -EINVAL;
+		}
+
+		ispOutputTotal_++;
+	}
+
+	/* ISP statistics output format. */
+	format = {};
+	format.fourcc = V4L2PixelFormat(V4L2_META_FMT_BCM2835_ISP_STATS);
+	ret = isp_[Isp::Stats].dev()->setFormat(&format);
+	if (ret) {
+		LOG(RPI, Error) << "Failed to set format on ISP stats stream: "
+				<< format;
+		return ret;
+	}
+
+	ispOutputTotal_++;
+
+	/*
+	 * Configure the Unicam embedded data output format only if the sensor
+	 * supports it.
+	 */
+	if (sensorMetadata_) {
+		V4L2SubdeviceFormat embeddedFormat;
+
+		sensor_->device()->getFormat(1, &embeddedFormat);
+		format = {};
+		format.fourcc = V4L2PixelFormat(V4L2_META_FMT_SENSOR_DATA);
+		format.planes[0].size = embeddedFormat.size.width * embeddedFormat.size.height;
+
+		LOG(RPI, Debug) << "Setting embedded data format " << format.toString();
+		ret = unicam_[Unicam::Embedded].dev()->setFormat(&format);
+		if (ret) {
+			LOG(RPI, Error) << "Failed to set format on Unicam embedded: "
+					<< format;
+			return ret;
+		}
+	}
+
+	/* Figure out the smallest selection the ISP will allow. */
+	Rectangle testCrop(0, 0, 1, 1);
+	isp_[Isp::Input].dev()->setSelection(V4L2_SEL_TGT_CROP, &testCrop);
+	ispMinCropSize_ = testCrop.size();
+
+	/* Adjust aspect ratio by providing crops on the input image. */
+	Size size = unicamFormat.size.boundedToAspectRatio(maxSize);
+	ispCrop_ = size.centeredTo(Rectangle(unicamFormat.size).center());
+
+	platformIspCrop();
+
+	return 0;
+}
+
+int Vc4CameraData::platformConfigureIpa(ipa::RPi::ConfigParams &params)
+{
+	params.ispControls = isp_[Isp::Input].dev()->controls();
+
+	/* Allocate the lens shading table via dmaHeap and pass to the IPA. */
+	if (!lsTable_.isValid()) {
+		lsTable_ = SharedFD(dmaHeap_.alloc("ls_grid", ipa::RPi::MaxLsGridSize));
+		if (!lsTable_.isValid())
+			return -ENOMEM;
+
+		/* Allow the IPA to mmap the LS table via the file descriptor. */
+		/*
+		 * \todo Investigate if mapping the lens shading table buffer
+		 * could be handled with mapBuffers().
+		 */
+		params.lsTableHandle = lsTable_;
+	}
+
+	return 0;
+}
+
+void Vc4CameraData::platformStart()
+{
+}
+
+void Vc4CameraData::platformStop()
+{
+	bayerQueue_ = {};
+	embeddedQueue_ = {};
+}
+
+void Vc4CameraData::unicamBufferDequeue(FrameBuffer *buffer)
+{
+	RPi::Stream *stream = nullptr;
+	unsigned int index;
+
+	if (!isRunning())
+		return;
+
+	for (RPi::Stream &s : unicam_) {
+		index = s.getBufferId(buffer);
+		if (index) {
+			stream = &s;
+			break;
+		}
+	}
+
+	/* The buffer must belong to one of our streams. */
+	ASSERT(stream);
+
+	LOG(RPI, Debug) << "Stream " << stream->name() << " buffer dequeue"
+			<< ", buffer id " << index
+			<< ", timestamp: " << buffer->metadata().timestamp;
+
+	if (stream == &unicam_[Unicam::Image]) {
+		/*
+		 * Lookup the sensor controls used for this frame sequence from
+		 * DelayedControl and queue them along with the frame buffer.
+		 */
+		auto [ctrl, delayContext] = delayedCtrls_->get(buffer->metadata().sequence);
+		/*
+		 * Add the frame timestamp to the ControlList for the IPA to use
+		 * as it does not receive the FrameBuffer object.
+		 */
+		ctrl.set(controls::SensorTimestamp, buffer->metadata().timestamp);
+		bayerQueue_.push({ buffer, std::move(ctrl), delayContext });
+	} else {
+		embeddedQueue_.push(buffer);
+	}
+
+	handleState();
+}
+
+void Vc4CameraData::ispInputDequeue(FrameBuffer *buffer)
+{
+	if (!isRunning())
+		return;
+
+	LOG(RPI, Debug) << "Stream ISP Input buffer complete"
+			<< ", buffer id " << unicam_[Unicam::Image].getBufferId(buffer)
+			<< ", timestamp: " << buffer->metadata().timestamp;
+
+	/* The ISP input buffer gets re-queued into Unicam. */
+	handleStreamBuffer(buffer, &unicam_[Unicam::Image]);
+	handleState();
+}
+
+void Vc4CameraData::ispOutputDequeue(FrameBuffer *buffer)
+{
+	RPi::Stream *stream = nullptr;
+	unsigned int index;
+
+	if (!isRunning())
+		return;
+
+	for (RPi::Stream &s : isp_) {
+		index = s.getBufferId(buffer);
+		if (index) {
+			stream = &s;
+			break;
+		}
+	}
+
+	/* The buffer must belong to one of our ISP output streams. */
+	ASSERT(stream);
+
+	LOG(RPI, Debug) << "Stream " << stream->name() << " buffer complete"
+			<< ", buffer id " << index
+			<< ", timestamp: " << buffer->metadata().timestamp;
+
+	/*
+	 * ISP statistics buffer must not be re-queued or sent back to the
+	 * application until after the IPA signals so.
+	 */
+	if (stream == &isp_[Isp::Stats]) {
+		ipa::RPi::ProcessParams params;
+		params.buffers.stats = index | RPi::MaskStats;
+		params.ipaContext = requestQueue_.front()->sequence();
+		ipa_->processStats(params);
+	} else {
+		/* Any other ISP output can be handed back to the application now. */
+		handleStreamBuffer(buffer, stream);
+	}
+
+	/*
+	 * Increment the number of ISP outputs generated.
+	 * This is needed to track dropped frames.
+	 */
+	ispOutputCount_++;
+
+	handleState();
+}
+
+void Vc4CameraData::processStatsComplete(const ipa::RPi::BufferIds &buffers)
+{
+	if (!isRunning())
+		return;
+
+	FrameBuffer *buffer = isp_[Isp::Stats].getBuffers().at(buffers.stats & RPi::MaskID);
+
+	handleStreamBuffer(buffer, &isp_[Isp::Stats]);
+
+	/* Last thing to do is to fill up the request metadata. */
+	Request *request = requestQueue_.front();
+	ControlList metadata(controls::controls);
+
+	ipa_->reportMetadata(request->sequence(), &metadata);
+	request->metadata().merge(metadata);
+
+	/*
+	 * Inform the sensor of the latest colour gains if it has the
+	 * V4L2_CID_NOTIFY_GAINS control (which means notifyGainsUnity_ is set).
+	 */
+	const auto &colourGains = metadata.get(libcamera::controls::ColourGains);
+	if (notifyGainsUnity_ && colourGains) {
+		/* The control wants linear gains in the order B, Gb, Gr, R. */
+		ControlList ctrls(sensor_->controls());
+		std::array<int32_t, 4> gains{
+			static_cast<int32_t>((*colourGains)[1] * *notifyGainsUnity_),
+			*notifyGainsUnity_,
+			*notifyGainsUnity_,
+			static_cast<int32_t>((*colourGains)[0] * *notifyGainsUnity_)
+		};
+		ctrls.set(V4L2_CID_NOTIFY_GAINS, Span<const int32_t>{ gains });
+
+		sensor_->setControls(&ctrls);
+	}
+
+	state_ = State::IpaComplete;
+	handleState();
+}
+
+void Vc4CameraData::prepareIspComplete(const ipa::RPi::BufferIds &buffers)
+{
+	unsigned int embeddedId = buffers.embedded & RPi::MaskID;
+	unsigned int bayer = buffers.bayer & RPi::MaskID;
+	FrameBuffer *buffer;
+
+	if (!isRunning())
+		return;
+
+	buffer = unicam_[Unicam::Image].getBuffers().at(bayer & RPi::MaskID);
+	LOG(RPI, Debug) << "Input re-queue to ISP, buffer id " << (bayer & RPi::MaskID)
+			<< ", timestamp: " << buffer->metadata().timestamp;
+
+	isp_[Isp::Input].queueBuffer(buffer);
+	ispOutputCount_ = 0;
+
+	if (sensorMetadata_ && embeddedId) {
+		buffer = unicam_[Unicam::Embedded].getBuffers().at(embeddedId & RPi::MaskID);
+		handleStreamBuffer(buffer, &unicam_[Unicam::Embedded]);
+	}
+
+	handleState();
+}
+
+void Vc4CameraData::setIspControls(const ControlList &controls)
+{
+	ControlList ctrls = controls;
+
+	if (ctrls.contains(V4L2_CID_USER_BCM2835_ISP_LENS_SHADING)) {
+		ControlValue &value =
+			const_cast<ControlValue &>(ctrls.get(V4L2_CID_USER_BCM2835_ISP_LENS_SHADING));
+		Span<uint8_t> s = value.data();
+		bcm2835_isp_lens_shading *ls =
+			reinterpret_cast<bcm2835_isp_lens_shading *>(s.data());
+		ls->dmabuf = lsTable_.get();
+	}
+
+	isp_[Isp::Input].dev()->setControls(&ctrls);
+	handleState();
+}
+
+void Vc4CameraData::setCameraTimeout(uint32_t maxFrameLengthMs)
+{
+	/*
+	 * Set the dequeue timeout to the larger of 5x the maximum reported
+	 * frame length advertised by the IPA over a number of frames. Allow
+	 * a minimum timeout value of 1s.
+	 */
+	utils::Duration timeout =
+		std::max<utils::Duration>(1s, 5 * maxFrameLengthMs * 1ms);
+
+	LOG(RPI, Debug) << "Setting Unicam timeout to " << timeout;
+	unicam_[Unicam::Image].dev()->setDequeueTimeout(timeout);
+}
+
+void Vc4CameraData::tryRunPipeline()
+{
+	FrameBuffer *embeddedBuffer;
+	BayerFrame bayerFrame;
+
+	/* If any of our request or buffer queues are empty, we cannot proceed. */
+	if (state_ != State::Idle || requestQueue_.empty() ||
+	    bayerQueue_.empty() || (embeddedQueue_.empty() && sensorMetadata_))
+		return;
+
+	if (!findMatchingBuffers(bayerFrame, embeddedBuffer))
+		return;
+
+	/* Take the first request from the queue and action the IPA. */
+	Request *request = requestQueue_.front();
+
+	/* See if a new ScalerCrop value needs to be applied. */
+	calculateScalerCrop(request->controls());
+
+	/*
+	 * Clear the request metadata and fill it with some initial non-IPA
+	 * related controls. We clear it first because the request metadata
+	 * may have been populated if we have dropped the previous frame.
+	 */
+	request->metadata().clear();
+	fillRequestMetadata(bayerFrame.controls, request);
+
+	/* Set our state to say the pipeline is active. */
+	state_ = State::Busy;
+
+	unsigned int bayer = unicam_[Unicam::Image].getBufferId(bayerFrame.buffer);
+
+	LOG(RPI, Debug) << "Signalling prepareIsp:"
+			<< " Bayer buffer id: " << bayer;
+
+	ipa::RPi::PrepareParams params;
+	params.buffers.bayer = RPi::MaskBayerData | bayer;
+	params.sensorControls = std::move(bayerFrame.controls);
+	params.requestControls = request->controls();
+	params.ipaContext = request->sequence();
+	params.delayContext = bayerFrame.delayContext;
+
+	if (embeddedBuffer) {
+		unsigned int embeddedId = unicam_[Unicam::Embedded].getBufferId(embeddedBuffer);
+
+		params.buffers.embedded = RPi::MaskEmbeddedData | embeddedId;
+		LOG(RPI, Debug) << "Signalling prepareIsp:"
+				<< " Embedded buffer id: " << embeddedId;
+	}
+
+	ipa_->prepareIsp(params);
+}
+
+bool Vc4CameraData::findMatchingBuffers(BayerFrame &bayerFrame, FrameBuffer *&embeddedBuffer)
+{
+	if (bayerQueue_.empty())
+		return false;
+
+	/*
+	 * Find the embedded data buffer with a matching timestamp to pass to
+	 * the IPA. Any embedded buffers with a timestamp lower than the
+	 * current bayer buffer will be removed and re-queued to the driver.
+	 */
+	uint64_t ts = bayerQueue_.front().buffer->metadata().timestamp;
+	embeddedBuffer = nullptr;
+	while (!embeddedQueue_.empty()) {
+		FrameBuffer *b = embeddedQueue_.front();
+		if (b->metadata().timestamp < ts) {
+			embeddedQueue_.pop();
+			unicam_[Unicam::Embedded].returnBuffer(b);
+			LOG(RPI, Debug) << "Dropping unmatched input frame in stream "
+					<< unicam_[Unicam::Embedded].name();
+		} else if (b->metadata().timestamp == ts) {
+			/* Found a match! */
+			embeddedBuffer = b;
+			embeddedQueue_.pop();
+			break;
+		} else {
+			break; /* Only higher timestamps from here. */
+		}
+	}
+
+	if (!embeddedBuffer && sensorMetadata_) {
+		if (embeddedQueue_.empty()) {
+			/*
+			 * If the embedded buffer queue is empty, wait for the next
+			 * buffer to arrive - dequeue ordering may send the image
+			 * buffer first.
+			 */
+			LOG(RPI, Debug) << "Waiting for next embedded buffer.";
+			return false;
+		}
+
+		/* Log if there is no matching embedded data buffer found. */
+		LOG(RPI, Debug) << "Returning bayer frame without a matching embedded buffer.";
+	}
+
+	bayerFrame = std::move(bayerQueue_.front());
+	bayerQueue_.pop();
+
+	return true;
+}
+
+REGISTER_PIPELINE_HANDLER(PipelineHandlerVc4)
+
+} /* namespace libcamera */
