@@ -15,8 +15,13 @@ Each camera block is a dictionary, containing the following keys:
- `supported_formats` (list of `VirtualCameraData::Resolution`, optional) : List of supported resolution and frame rates of the emulated camera
- `width` (`unsigned int`, default=1920): Width of the window resolution. This needs to be even.
- `height` (`unsigned int`, default=1080): Height of the window resolution.
- - `frame_rates` (list of `int`, default=`[30,60]` ): Range of the frame rate. The list has to be two values of the lower bound and the upper bound of the frame rate.
-- `test_pattern` (`string`, default="bars"): Which test pattern to use as frames. The options are "bars", "lines".
+ - `frame_rates` (list of `int`, default=`[30,60]` ): Range of the frame rate. The list has to be two values of the lower bound and the upper bound of the frame rate. This does not affect the frame rate for now.
+- `frames` (dictionary):
+ - `path` (`string`, default="bars"): Name of a test pattern, path to an image, or path to a directory of a series of images.
+ - The test patterns are "bars" which means color bars, and "lines" which means diagonal lines.
+ - The path to an image has ".jpg" extension.
+ - The path to a directory ends with "/". The name of the images in the directory are "{n}.jpg" with {n} is the sequence of images starting with 0.
+ - `scale_mode`(`string`, default="fill"): Scale mode when the frames are images. The scale modes are "fill", "contain", and "cover". This does not matter when frames is a test pattern. This does not affect the scale mode for now.
- `location` (`string`, default="front"): The location of the camera. Support "front" and "back". This is displayed in qcam camera selection window but this does not change the output.
- `model` (`string`, default="Unknown"): The model name of the camera. This is displayed in qcam camera selection window but this does not change the output.
@@ -35,13 +40,16 @@ A sample config file:
frame_rates:
- 70
- 80
- test_pattern: "bars"
+ frames:
+ path: "lines"
location: "front"
model: "Virtual Video Device"
"Virtual1":
supported_formats:
- width: 800
- test_pattern: "lines"
+ frames:
+ path: "path/to/directory_of_images/"
+ scale_mode: "contain"
location: "back"
model: "Virtual Video Device1"
"Virtual2":
@@ -61,7 +69,7 @@ This is the procedure of the Parser class:
- If the config file contains invalid configuration, this method returns nullptr. The camera will be skipped.
3. Parse each property and register the data.
- `parseSupportedFormats()`: Parses `supported_formats` in the config, which contains resolutions and frame rates.
- - `parseTestPattern()`: Parses `test_pattern` in the config.
+ - `parseFrame()`: Parses `frames` in the config.
- `parseLocation()`: Parses `location` in the config.
- `parseModel()`: Parses `model` in the config.
4. Back to `parseConfigFile()` and append the camera configuration.
new file mode 100644
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2023, Google Inc.
+ *
+ * common_functions.cpp - Helper that do not depend on any class
+ */
+
+#include "common_functions.h"
+
+namespace libcamera {
+
+std::size_t numberOfFilesInDirectory(std::filesystem::path path)
+{
+ using std::filesystem::directory_iterator;
+ return std::distance(directory_iterator(path), directory_iterator{});
+}
+
+} // namespace libcamera
new file mode 100644
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2023, Google Inc.
+ *
+ * common_functions.h - Helper that do not depend on any class
+ */
+
+#pragma once
+
+#include <filesystem>
+
+namespace libcamera {
+
+std::size_t numberOfFilesInDirectory(std::filesystem::path path);
+
+} // namespace libcamera
@@ -23,7 +23,7 @@ public:
/** Fill the output frame buffer.
* Use the frame at the frameCount of image frames
*/
- virtual void generateFrame(const Size &size,
+ virtual void generateFrame(unsigned int &frameCount, const Size &size,
const FrameBuffer *buffer) = 0;
protected:
new file mode 100644
@@ -0,0 +1,163 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2023, Google Inc.
+ *
+ * image_frame_generator.cpp - Derived class of FrameGenerator for
+ * generating frames from images
+ */
+
+#include "image_frame_generator.h"
+
+#include <filesystem>
+#include <memory>
+#include <optional>
+#include <string>
+
+#include <libcamera/base/file.h>
+#include <libcamera/base/log.h>
+
+#include <libcamera/framebuffer.h>
+
+#include "libcamera/internal/mapped_framebuffer.h"
+
+#include "libyuv/convert.h"
+#include "libyuv/scale.h"
+namespace libcamera {
+
+namespace {
+std::filesystem::path constructPath(std::filesystem::path &name, unsigned int i)
+{
+ return name / (std::to_string(i) + ".jpg");
+}
+} // namespace
+
+LOG_DECLARE_CATEGORY(Virtual)
+
+std::unique_ptr<ImageFrameGenerator> ImageFrameGenerator::create(
+ ImageFrames &imageFrames)
+{
+ std::unique_ptr<ImageFrameGenerator> imageFrameGenerator =
+ std::make_unique<ImageFrameGenerator>();
+ imageFrameGenerator->imageFrames_ = &imageFrames;
+
+ /** For each file in the directory
+ * load the image, convert it to NV12, and store the pointer
+ */
+ for (unsigned int i = 0; i < imageFrames.number.value_or(1); i++) {
+ std::filesystem::path path;
+ if (!imageFrames.number.has_value()) {
+ /* If the path is to an image */
+ path = imageFrames.path;
+ } else {
+ /* If the path is to a directory */
+ path = constructPath(imageFrames.path, i);
+ }
+
+ File file(path);
+ if (!file.open(File::OpenModeFlag::ReadOnly)) {
+ LOG(Virtual, Error) << "Failed to open image file " << file.fileName()
+ << ": " << strerror(file.error());
+ return nullptr;
+ }
+
+ /* Read the image file to data */
+ auto fileSize = file.size();
+ if (fileSize <= 0) {
+ LOG(Virtual, Error) << "Invalid image file " << file.fileName()
+ << " with size: " << fileSize;
+ return nullptr;
+ }
+ auto buffer = std::make_unique<uint8_t[]>(file.size());
+ Span<unsigned char> data{ buffer.get(), (unsigned long)fileSize };
+ long dataSize = file.read(data);
+
+ /* Get the width and height of the image */
+ int width, height;
+ if (libyuv::MJPGSize(data.data(), dataSize, &width, &height)) {
+ LOG(Virtual, Error) << "Failed to get the size of the image file: "
+ << file.fileName();
+ return nullptr;
+ }
+
+ /* Convert to NV12 and write the data to tmpY and tmpUV */
+ int halfWidth = (width + 1) / 2;
+ int halfHeight = (height + 1) / 2;
+ std::unique_ptr<uint8_t[]> dstY =
+ std::make_unique<uint8_t[]>(width * height);
+ std::unique_ptr<uint8_t[]> dstUV =
+ std::make_unique<uint8_t[]>(halfWidth * halfHeight * 2);
+ int ret = libyuv::MJPGToNV12(data.data(), dataSize,
+ dstY.get(), width, dstUV.get(),
+ width, width, height, width, height);
+ if (ret != 0) {
+ LOG(Virtual, Error) << "MJPGToNV12() failed with " << ret;
+ }
+
+ imageFrameGenerator->imageFrameDatas_.emplace_back(
+ ImageFrameData{ std::move(dstY), std::move(dstUV),
+ Size(width, height) });
+ }
+ return imageFrameGenerator;
+}
+
+void ImageFrameGenerator::configure(const Size &size)
+{
+ for (unsigned int i = 0; i < imageFrames_->number.value_or(1); i++) {
+ /* Scale the imageFrameDatas_ to scaledY and scaledUV */
+ int halfSizeWidth = (size.width + 1) / 2;
+ int halfSizeHeight = (size.height + 1) / 2;
+ std::unique_ptr<uint8_t[]> scaledY =
+ std::make_unique<uint8_t[]>(size.width * size.height);
+ std::unique_ptr<uint8_t[]> scaledUV =
+ std::make_unique<uint8_t[]>(halfSizeWidth * halfSizeHeight * 2);
+ auto &src = imageFrameDatas_[i];
+
+ /*
+ * \todo Implement "contain" & "cover", based on
+ * |imageFrames_[i].scaleMode|.
+ */
+
+ /*
+ * \todo Some platforms might enforce stride due to GPU, like
+ * ChromeOS ciri (64). The weight needs to be a multiple of
+ * the stride to work properly for now.
+ */
+ libyuv::NV12Scale(src.Y.get(), src.size.width,
+ src.UV.get(), src.size.width,
+ src.size.width, src.size.height,
+ scaledY.get(), size.width, scaledUV.get(), size.width,
+ size.width, size.height, libyuv::FilterMode::kFilterBilinear);
+
+ /* Store the pointers to member variable */
+ scaledFrameDatas_.emplace_back(
+ ImageFrameData{ std::move(scaledY), std::move(scaledUV), size });
+ }
+}
+
+void ImageFrameGenerator::generateFrame(unsigned int &frameCount, const Size &size, const FrameBuffer *buffer)
+{
+ /* Don't do anything when the list of buffers is empty*/
+ if (scaledFrameDatas_.size() == 0)
+ return;
+
+ MappedFrameBuffer mappedFrameBuffer(buffer, MappedFrameBuffer::MapFlag::Write);
+
+ auto planes = mappedFrameBuffer.planes();
+
+ /* Make sure the frameCount does not over the number of images */
+ frameCount %= imageFrames_->number.value_or(1);
+
+ /* Write the scaledY and scaledUV to the mapped frame buffer */
+ libyuv::NV12Copy(scaledFrameDatas_[frameCount].Y.get(), size.width,
+ scaledFrameDatas_[frameCount].UV.get(), size.width, planes[0].begin(),
+ size.width, planes[1].begin(), size.width,
+ size.width, size.height);
+
+ /* proceed an image every 4 frames */
+ /* \todo read the parameter_ from the configuration file? */
+ parameter_++;
+ if (parameter_ % 4 == 0)
+ frameCount++;
+}
+
+} // namespace libcamera
new file mode 100644
@@ -0,0 +1,64 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2023, Google Inc.
+ *
+ * image_frame_generator.h - Derived class of FrameGenerator for
+ * generating frames from images
+ */
+
+#pragma once
+
+#include <filesystem>
+#include <memory>
+#include <optional>
+#include <stdint.h>
+#include <sys/types.h>
+
+#include "frame_generator.h"
+
+namespace libcamera {
+
+enum class ScaleMode : char {
+ Fill = 0,
+ Contain = 1,
+ Cover = 2,
+};
+
+/* Frame configuration provided by the config file */
+struct ImageFrames {
+ std::filesystem::path path;
+ ScaleMode scaleMode;
+ std::optional<unsigned int> number;
+};
+
+class ImageFrameGenerator : public FrameGenerator
+{
+public:
+ /** Factory function to create an ImageFrameGenerator object.
+ * Read the images and convert them to buffers in NV12 format.
+ * Store the pointers to the buffers to a list (imageFrameDatas)
+ */
+ static std::unique_ptr<ImageFrameGenerator> create(ImageFrames &imageFrames);
+
+private:
+ struct ImageFrameData {
+ std::unique_ptr<uint8_t[]> Y;
+ std::unique_ptr<uint8_t[]> UV;
+ Size size;
+ };
+
+ /* Scale the buffers for image frames. */
+ void configure(const Size &size) override;
+ void generateFrame(unsigned int &frameCount, const Size &size, const FrameBuffer *buffer) override;
+
+ /* List of pointers to the not scaled image buffers */
+ std::vector<ImageFrameData> imageFrameDatas_;
+ /* List of pointers to the scaled image buffers */
+ std::vector<ImageFrameData> scaledFrameDatas_;
+ /* Pointer to the imageFrames_ in VirtualCameraData */
+ ImageFrames *imageFrames_;
+ /* Speed parameter. Change to the next image every parameter_ frames. */
+ int parameter_;
+};
+
+} /* namespace libcamera */
@@ -2,11 +2,14 @@
libcamera_internal_sources += files([
'virtual.cpp',
- 'test_pattern_generator.cpp',
'parser.cpp',
+ 'test_pattern_generator.cpp',
+ 'image_frame_generator.cpp',
+ 'common_functions.cpp',
])
libyuv_dep = dependency('libyuv', required : false)
+libjpeg = dependency('libjpeg', required : false)
# Fallback to a subproject if libyuv isn't found, as it's typically not
# provided by distributions.
@@ -26,3 +29,4 @@ if not libyuv_dep.found()
endif
libcamera_deps += [libyuv_dep]
+libcamera_deps += [libjpeg]
@@ -18,6 +18,7 @@
#include "libcamera/internal/pipeline_handler.h"
#include "libcamera/internal/yaml_parser.h"
+#include "common_functions.h"
#include "virtual.h"
namespace libcamera {
@@ -52,12 +53,12 @@ std::vector<std::unique_ptr<VirtualCameraData>> Parser::parseConfigFile(
continue;
}
- data->id_ = cameraId;
+ data->config_.id = cameraId;
ControlInfoMap::Map controls;
/* todo: Check which resolution's frame rate to be reported */
controls[&controls::FrameDurationLimits] =
- ControlInfo(int64_t(1000 / data->supportedResolutions_[0].frameRates[1]),
- int64_t(1000 / data->supportedResolutions_[0].frameRates[0]));
+ ControlInfo(int64_t(1000 / data->config_.resolutions[0].frameRates[1]),
+ int64_t(1000 / data->config_.resolutions[0].frameRates[0]));
data->controlInfo_ = ControlInfoMap(std::move(controls), controls::controls);
configurations.push_back(std::move(data));
}
@@ -72,7 +73,7 @@ std::unique_ptr<VirtualCameraData> Parser::parseCameraConfigData(
if (parseSupportedFormats(cameraConfigData, data.get()))
return nullptr;
- if (parseTestPattern(cameraConfigData, data.get()))
+ if (parseFrame(cameraConfigData, data.get()))
return nullptr;
if (parseLocation(cameraConfigData, data.get()))
@@ -106,30 +107,30 @@ int Parser::parseSupportedFormats(
std::vector<int> frameRates;
if (supportedResolution.contains("frame_rates")) {
auto frameRatesList =
- supportedResolution["frame_rates"].getList<int>().value();
- if (frameRatesList.size() != 2) {
+ supportedResolution["frame_rates"].getList<int>();
+ if (!frameRatesList.has_value() || frameRatesList->size() != 2) {
LOG(Virtual, Error) << "frame_rates needs to be the two edge values of a range";
return -EINVAL;
}
- if (frameRatesList[0] > frameRatesList[1]) {
+ if (frameRatesList.value()[0] > frameRatesList.value()[1]) {
LOG(Virtual, Error) << "frame_rates's first value(lower bound) is higher than the second value(upper bound)";
return -EINVAL;
}
- frameRates.push_back(frameRatesList[0]);
- frameRates.push_back(frameRatesList[1]);
+ frameRates.push_back(frameRatesList.value()[0]);
+ frameRates.push_back(frameRatesList.value()[1]);
} else {
frameRates.push_back(30);
frameRates.push_back(60);
}
- data->supportedResolutions_.emplace_back(
+ data->config_.resolutions.emplace_back(
VirtualCameraData::Resolution{ Size{ width, height },
frameRates });
activeResolution = std::max(activeResolution, Size{ width, height });
}
} else {
- data->supportedResolutions_.emplace_back(
+ data->config_.resolutions.emplace_back(
VirtualCameraData::Resolution{ Size{ 1920, 1080 },
{ 30, 60 } });
activeResolution = Size(1920, 1080);
@@ -141,28 +142,73 @@ int Parser::parseSupportedFormats(
return 0;
}
-int Parser::parseTestPattern(
+int Parser::parseFrame(
const YamlObject &cameraConfigData, VirtualCameraData *data)
{
- std::string testPattern = cameraConfigData["test_pattern"].get<std::string>().value();
+ const YamlObject &frames = cameraConfigData["frames"];
+ /* When there is no frames provided in the config file, use color bar test pattern */
+ if (frames.size() == 0) {
+ data->config_.frame = TestPattern::ColorBars;
+ return 0;
+ }
+
+ if (!frames.isDictionary()) {
+ LOG(Virtual, Error) << "'frames' is not a dictionary.";
+ return -EINVAL;
+ }
+
+ std::string path = frames["path"].get<std::string>("");
+
+ if (auto ext = std::filesystem::path(path).extension();
+ ext == ".jpg" || ext == ".jpeg") {
+ ScaleMode scaleMode;
+ if (parseScaleMode(frames, &scaleMode))
+ return -EINVAL;
+ data->config_.frame = ImageFrames{ path, scaleMode, std::nullopt };
+ } else if (std::filesystem::is_directory(std::filesystem::symlink_status(path))) {
+ ScaleMode scaleMode;
+ if (parseScaleMode(frames, &scaleMode))
+ return -EINVAL;
+ data->config_.frame = ImageFrames{ path, scaleMode,
+ numberOfFilesInDirectory(path) };
+ } else if (path == "bars" || path == "") {
+ /* Default value is "bars" */
+ data->config_.frame = TestPattern::ColorBars;
+ } else if (path == "lines") {
+ data->config_.frame = TestPattern::DiagonalLines;
+ } else {
+ LOG(Virtual, Error) << "Frame: " << path
+ << " is not supported";
+ return -EINVAL;
+ }
+ return 0;
+}
- /* Default value is "bars" */
- if (testPattern == "bars" || testPattern == "") {
- data->testPattern_ = TestPattern::ColorBars;
- } else if (testPattern == "lines") {
- data->testPattern_ = TestPattern::DiagonalLines;
+int Parser::parseScaleMode(
+ const YamlObject &framesConfigData, ScaleMode *scaleMode)
+{
+ std::string mode = framesConfigData["scale_mode"].get<std::string>("");
+
+ /* Default value is fill */
+ if (mode == "fill" || mode == "") {
+ *scaleMode = ScaleMode::Fill;
+ } else if (mode == "contain") {
+ *scaleMode = ScaleMode::Contain;
+ } else if (mode == "cover") {
+ *scaleMode = ScaleMode::Cover;
} else {
- LOG(Virtual, Error) << "Test pattern: " << testPattern
- << "is not supported";
+ LOG(Virtual, Error) << "scaleMode: " << mode
+ << " is not supported";
return -EINVAL;
}
+
return 0;
}
int Parser::parseLocation(
const YamlObject &cameraConfigData, VirtualCameraData *data)
{
- std::string location = cameraConfigData["location"].get<std::string>().value();
+ std::string location = cameraConfigData["location"].get<std::string>("");
/* Default value is properties::CameraLocationFront */
if (location == "front" || location == "") {
@@ -184,7 +230,7 @@ int Parser::parseModel(
const YamlObject &cameraConfigData, VirtualCameraData *data)
{
std::string model =
- cameraConfigData["model"].get<std::string>().value();
+ cameraConfigData["model"].get<std::string>("");
/* Default value is "Unknown" */
if (model == "")
@@ -195,4 +241,4 @@ int Parser::parseModel(
return 0;
}
-} /* namespace libcamera */
+} // namespace libcamera
@@ -34,12 +34,15 @@ private:
int parseSupportedFormats(
const YamlObject &cameraConfigData, VirtualCameraData *data);
- int parseTestPattern(
+ int parseFrame(
const YamlObject &cameraConfigData, VirtualCameraData *data);
int parseLocation(
const YamlObject &cameraConfigData, VirtualCameraData *data);
int parseModel(
const YamlObject &cameraConfigData, VirtualCameraData *data);
+
+ int parseScaleMode(
+ const YamlObject &framesConfigData, ScaleMode *scaleMode);
};
} // namespace libcamera
@@ -20,7 +20,7 @@ LOG_DECLARE_CATEGORY(Virtual)
static const unsigned int kARGBSize = 4;
void TestPatternGenerator::generateFrame(
- const Size &size,
+ [[maybe_unused]] unsigned int &frameCount, const Size &size,
const FrameBuffer *buffer)
{
MappedFrameBuffer mappedFrameBuffer(buffer,
@@ -29,7 +29,7 @@ void TestPatternGenerator::generateFrame(
auto planes = mappedFrameBuffer.planes();
/* TODO: select whether to do shifting or not */
- shiftLeft(size);
+ // shiftLeft(size);
/* Convert the template_ to the frame buffer */
int ret = libyuv::ARGBToNV12(
@@ -25,7 +25,7 @@ enum class TestPattern : char {
class TestPatternGenerator : public FrameGenerator
{
private:
- void generateFrame(const Size &size,
+ void generateFrame(unsigned int &frameCount, const Size &size,
const FrameBuffer *buffer) override;
protected:
new file mode 100644
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2024, Google Inc.
+ *
+ * utils.h - Utility types for Virtual Pipeline Handler
+ */
+
+namespace libcamera {
+
+template<class... Ts>
+struct overloaded : Ts... {
+ using Ts::operator()...;
+};
+template<class... Ts>
+overloaded(Ts...) -> overloaded<Ts...>;
+
+} // namespace libcamera
@@ -20,8 +20,8 @@
#include "libcamera/internal/pipeline_handler.h"
#include "libcamera/internal/yaml_parser.h"
-#include "frame_generator.h"
#include "parser.h"
+#include "utils.h"
namespace libcamera {
@@ -63,12 +63,12 @@ CameraConfiguration::Status VirtualCameraConfiguration::validate()
}
Size maxSize;
- for (const auto &resolution : data_->supportedResolutions_)
+ for (const auto &resolution : data_->config_.resolutions)
maxSize = std::max(maxSize, resolution.size);
for (StreamConfiguration &cfg : config_) {
bool found = false;
- for (const auto &resolution : data_->supportedResolutions_) {
+ for (const auto &resolution : data_->config_.resolutions) {
if (resolution.size.width == cfg.size.width &&
resolution.size.height == cfg.size.height) {
found = true;
@@ -110,7 +110,7 @@ PipelineHandlerVirtual::generateConfiguration(Camera *camera,
return config;
Size minSize, sensorResolution;
- for (const auto &resolution : data->supportedResolutions_) {
+ for (const auto &resolution : data->config_.resolutions) {
if (minSize.isNull() || minSize > resolution.size)
minSize = resolution.size;
@@ -199,7 +199,7 @@ int PipelineHandlerVirtual::exportFrameBuffers(
int PipelineHandlerVirtual::start(Camera *camera,
[[maybe_unused]] const ControlList *controls)
{
- /* \todo Start reading the virtual video if any. */
+ /* Start reading the images/generating test patterns */
VirtualCameraData *data = cameraData(camera);
data->frameGenerator_->configure(data->stream_.configuration().size);
@@ -219,8 +219,8 @@ int PipelineHandlerVirtual::queueRequestDevice([[maybe_unused]] Camera *camera,
/* \todo Read from the virtual video if any. */
for (auto const &[stream, buffer] : request->buffers()) {
- /* map buffer and fill test patterns */
- data->frameGenerator_->generateFrame(stream->configuration().size, buffer);
+ /* Map buffer. Fill test patterns or images */
+ data->frameGenerator_->generateFrame(data->frameCount_, stream->configuration().size, buffer);
completeBuffer(request, buffer);
}
@@ -250,9 +250,10 @@ bool PipelineHandlerVirtual::match([[maybe_unused]] DeviceEnumerator *enumerator
/* Configure and register cameras with configData */
for (auto &data : configData) {
std::set<Stream *> streams{ &data->stream_ };
- std::string id = data->id_;
+ std::string id = data->config_.id;
std::shared_ptr<Camera> camera = Camera::create(std::move(data), id, streams);
+ /* Initialize FrameGenerator*/
initFrameGenerator(camera.get());
registerCamera(std::move(camera));
@@ -264,13 +265,21 @@ bool PipelineHandlerVirtual::match([[maybe_unused]] DeviceEnumerator *enumerator
void PipelineHandlerVirtual::initFrameGenerator(Camera *camera)
{
auto data = cameraData(camera);
- if (data->testPattern_ == TestPattern::DiagonalLines) {
- data->frameGenerator_ = DiagonalLinesGenerator::create();
- } else {
- data->frameGenerator_ = ColorBarsGenerator::create();
- }
+ auto &frame = data->config_.frame;
+ std::visit(overloaded{
+ [&](TestPattern &testPattern) {
+ if (testPattern == TestPattern::DiagonalLines) {
+ data->frameGenerator_ = DiagonalLinesGenerator::create();
+ } else {
+ data->frameGenerator_ = ColorBarsGenerator::create();
+ }
+ },
+ [&](ImageFrames &imageFrames) {
+ data->frameGenerator_ = ImageFrameGenerator::create(imageFrames);
+ } },
+ frame);
}
REGISTER_PIPELINE_HANDLER(PipelineHandlerVirtual, "virtual")
-} /* namespace libcamera */
+} // namespace libcamera
@@ -7,16 +7,22 @@
#pragma once
+#include <variant>
+
#include <libcamera/base/file.h>
#include "libcamera/internal/camera.h"
#include "libcamera/internal/dma_buf_allocator.h"
#include "libcamera/internal/pipeline_handler.h"
+#include "frame_generator.h"
+#include "image_frame_generator.h"
#include "test_pattern_generator.h"
namespace libcamera {
+using VirtualFrame = std::variant<TestPattern, ImageFrames>;
+
class VirtualCameraData : public Camera::Private
{
public:
@@ -24,6 +30,13 @@ public:
Size size;
std::vector<int> frameRates;
};
+ /* The config file is parsed to the Configuration struct */
+ struct Configuration {
+ std::string id;
+ std::vector<Resolution> resolutions;
+ VirtualFrame frame;
+ };
+
VirtualCameraData(PipelineHandler *pipe)
: Camera::Private(pipe)
{
@@ -31,12 +44,9 @@ public:
~VirtualCameraData() = default;
- std::string id_;
- std::vector<Resolution> supportedResolutions_;
- TestPattern testPattern_;
-
+ unsigned int frameCount_ = 0;
+ Configuration config_;
Stream stream_;
-
std::unique_ptr<FrameGenerator> frameGenerator_;
};