@@ -15,8 +15,13 @@ Each camera block is a dictionary, containing the following keys:
- `supported_formats` (list of `VirtualCameraData::Resolution`, optional) : List of supported resolution and frame rates of the emulated camera
- `width` (`unsigned int`, default=1920): Width of the window resolution. This needs to be even.
- `height` (`unsigned int`, default=1080): Height of the window resolution.
- - `frame_rates` (list of `int`, default=`[30,60]` ): Range of the frame rate. The list has to be two values of the lower bound and the upper bound of the frame rate.
-- `test_pattern` (`string`, default="bars"): Which test pattern to use as frames. The options are "bars", "lines".
+ - `frame_rates` (list of `int`, default=`[30,60]` ): Range of the frame rate. The list has to be two values of the lower bound and the upper bound of the frame rate. This does not affect the frame rate for now.
+- `frames` (dictionary):
+ - `path` (`string`, default="bars"): Name of a test pattern, path to an image, or path to a directory of a series of images.
+ - The test patterns are "bars" which means color bars, and "lines" which means diagonal lines.
+ - The path to an image has ".jpg" extension.
+ - The path to a directory ends with "/". The name of the images in the directory are "{n}.jpg" with {n} is the sequence of images starting with 0.
+ - `scale_mode`(`string`, default="fill"): Scale mode when the frames are images. The scale modes are "fill", "contain", and "cover". This does not matter when frames is a test pattern. This does not affect the scale mode for now.
- `location` (`string`, default="front"): The location of the camera. Support "front" and "back". This is displayed in qcam camera selection window but this does not change the output.
- `model` (`string`, default="Unknown"): The model name of the camera. This is displayed in qcam camera selection window but this does not change the output.
@@ -35,13 +40,16 @@ A sample config file:
frame_rates:
- 70
- 80
- test_pattern: "bars"
+ frames:
+ path: "lines"
location: "front"
model: "Virtual Video Device"
"Virtual1":
supported_formats:
- width: 800
- test_pattern: "lines"
+ frames:
+ path: "path/to/directory_of_images/"
+ scale_mode: "contain"
location: "back"
model: "Virtual Video Device1"
"Virtual2":
@@ -61,7 +69,7 @@ This is the procedure of the Parser class:
- If the config file contains invalid configuration, this method returns nullptr. The camera will be skipped.
3. Parse each property and register the data.
- `parseSupportedFormats()`: Parses `supported_formats` in the config, which contains resolutions and frame rates.
- - `parseTestPattern()`: Parses `test_pattern` in the config.
+ - `parseFrame()`: Parses `frames` in the config.
- `parseLocation()`: Parses `location` in the config.
- `parseModel()`: Parses `model` in the config.
4. Back to `parseConfigFile()` and append the camera configuration.
new file mode 100644
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2023, Google Inc.
+ *
+ * common_functions.cpp - Helper that do not depend on any class
+ */
+
+#include "common_functions.h"
+
+namespace libcamera {
+
+std::string getExtension(const std::string &path)
+{
+ size_t i = path.find(".");
+ if (i != std::string::npos) {
+ return path.substr(i);
+ }
+ return "";
+}
+
+std::size_t numberOfFilesInDirectory(std::filesystem::path path)
+{
+ using std::filesystem::directory_iterator;
+ return std::distance(directory_iterator(path), directory_iterator{});
+}
+
+} // namespace libcamera
new file mode 100644
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2023, Google Inc.
+ *
+ * common_functions.h - Helper that do not depend on any class
+ */
+
+#pragma once
+
+#include <filesystem>
+
+namespace libcamera {
+
+std::string getExtension(const std::string &path);
+
+std::size_t numberOfFilesInDirectory(std::filesystem::path path);
+
+} // namespace libcamera
@@ -13,7 +13,9 @@
frame_rates:
- 70
- 80
- test_pattern: "lines"
+ frames:
+ path: "files/libcamera_short_anime/"
+ scale_mode: "fill"
location: "front"
model: "Virtual Video Device"
"Virtual1":
@@ -23,27 +25,27 @@
frame_rates:
- 30
- 60
- test_pattern:
+ frames:
+ path: "files/bike-min.jpeg"
+ scale_mode: "contain"
location: "back"
model: "Virtual Video Device1"
"Virtual2":
supported_formats:
- width: 400
height: 300
- test_pattern: "lines"
+ frames:
+ path: "lines"
+ scale_mode: "cover"
location: "front"
model: "Virtual Video Device2"
"Virtual3":
- supported_formats:
- - width: 100
- height: 100
- - width: 800
- height: 600
- - width: 1920
- height: 1080
- frame_rates:
- - 20
- - 30
location: "a"
- model: "Virtual Video Device3"
"Virtual4":
+ supported_formats:
+ - width: 100
+ height: 101
+"Virtual5":
+ frames:
+ path: "files/chrome_anniversary.jpg"
+ scale_mode: "contain"
@@ -23,7 +23,7 @@ public:
/** Fill the output frame buffer.
* Use the frame at the frameCount of image frames
*/
- virtual void generateFrame(const Size &size,
+ virtual void generateFrame(unsigned int &frameCount, const Size &size,
const FrameBuffer *buffer) = 0;
protected:
new file mode 100644
@@ -0,0 +1,154 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2023, Google Inc.
+ *
+ * image_frame_generator.cpp - Derived class of FrameGenerator for
+ * generating frames from images
+ */
+
+#include "image_frame_generator.h"
+
+#include <memory>
+#include <optional>
+#include <string>
+
+#include <libcamera/base/file.h>
+#include <libcamera/base/log.h>
+
+#include <libcamera/framebuffer.h>
+
+#include "libcamera/internal/mapped_framebuffer.h"
+
+#include "libyuv/convert.h"
+#include "libyuv/scale.h"
+namespace libcamera {
+
+LOG_DECLARE_CATEGORY(Virtual)
+
+std::unique_ptr<ImageFrameGenerator> ImageFrameGenerator::create(
+ ImageFrames &imageFrames)
+{
+ std::unique_ptr<ImageFrameGenerator> imageFrameGenerator =
+ std::make_unique<ImageFrameGenerator>();
+ imageFrameGenerator->imageFrames_ = &imageFrames;
+
+ /** For each file in the directory
+ * load the image, convert it to NV12, and store the pointer
+ */
+ for (unsigned int i = 0; i < imageFrames.number.value_or(1); i++) {
+ std::string path;
+ if (!imageFrames.number.has_value()) {
+ /* If the path is to an image */
+ path = imageFrames.path;
+ } else {
+ /* If the path is to a directory */
+ path = constructPath(imageFrames.path, i);
+ }
+
+ File file(path);
+ bool isOpen = file.open(File::OpenModeFlag::ReadOnly);
+ if (!isOpen) {
+ LOG(Virtual, Error) << "Failed to open image file: " << file.fileName();
+ return nullptr;
+ }
+
+ /* Read the image file to data */
+ uint8_t buffer[file.size()];
+ Span<unsigned char> data{ buffer, (unsigned long)file.size() };
+ long dataSize = file.read(data);
+
+ /* Get the width and height of the image */
+ int width, height;
+ if (libyuv::MJPGSize(data.data(), dataSize, &width, &height)) {
+ LOG(Virtual, Error) << "Failed to get the size of the image file: "
+ << file.fileName();
+ return nullptr;
+ }
+
+ /* Convert to NV12 and write the data to tmpY and tmpUV */
+ int halfWidth = (width + 1) / 2;
+ int halfHeight = (height + 1) / 2;
+ std::unique_ptr<uint8_t[]> dstY =
+ std::make_unique<uint8_t[]>(width * height);
+ std::unique_ptr<uint8_t[]> dstUV =
+ std::make_unique<uint8_t[]>(halfWidth * halfHeight * 2);
+ int ret = libyuv::MJPGToNV12(data.data(), dataSize,
+ dstY.get(), width, dstUV.get(),
+ width, width, height, width, height);
+ if (ret != 0) {
+ LOG(Virtual, Error) << "MJPGToNV12() failed with " << ret;
+ }
+
+ imageFrameGenerator->imageFrameDatas_.emplace_back(
+ ImageFrameData{ std::move(dstY), std::move(dstUV),
+ Size(width, height) });
+ }
+ return imageFrameGenerator;
+}
+
+std::string ImageFrameGenerator::constructPath(std::string &name, unsigned int &i)
+{
+ return name + std::to_string(i) + ".jpg";
+}
+
+void ImageFrameGenerator::configure(const Size &size)
+{
+ for (unsigned int i = 0; i < imageFrames_->number.value_or(1); i++) {
+ /* Scale the imageFrameDatas_ to scaledY and scaledUV */
+ int halfSizeWidth = (size.width + 1) / 2;
+ int halfSizeHeight = (size.height + 1) / 2;
+ std::unique_ptr<uint8_t[]> scaledY =
+ std::make_unique<uint8_t[]>(size.width * size.height);
+ std::unique_ptr<uint8_t[]> scaledUV =
+ std::make_unique<uint8_t[]>(halfSizeWidth * halfSizeHeight * 2);
+ auto &src = imageFrameDatas_[i];
+
+ /*
+ * \todo Implement "contain" & "cover", based on
+ * |imageFrames_[i].scaleMode|.
+ */
+
+ /*
+ * \todo Some platforms might enforce stride due to GPU, like
+ * ChromeOS ciri (64). The weight needs to be a multiple of
+ * the stride to work properly for now.
+ */
+ libyuv::NV12Scale(src.Y.get(), src.size.width,
+ src.UV.get(), src.size.width,
+ src.size.width, src.size.height,
+ scaledY.get(), size.width, scaledUV.get(), size.width,
+ size.width, size.height, libyuv::FilterMode::kFilterBilinear);
+
+ /* Store the pointers to member variable */
+ scaledFrameDatas_.emplace_back(
+ ImageFrameData{ std::move(scaledY), std::move(scaledUV), size });
+ }
+}
+
+void ImageFrameGenerator::generateFrame(unsigned int &frameCount, const Size &size, const FrameBuffer *buffer)
+{
+ /* Don't do anything when the list of buffers is empty*/
+ if (scaledFrameDatas_.size() == 0)
+ return;
+
+ MappedFrameBuffer mappedFrameBuffer(buffer, MappedFrameBuffer::MapFlag::Write);
+
+ auto planes = mappedFrameBuffer.planes();
+
+ /* Make sure the frameCount does not over the number of images */
+ frameCount %= imageFrames_->number.value_or(1);
+
+ /* Write the scaledY and scaledUV to the mapped frame buffer */
+ libyuv::NV12Copy(scaledFrameDatas_[frameCount].Y.get(), size.width,
+ scaledFrameDatas_[frameCount].UV.get(), size.width, planes[0].begin(),
+ size.width, planes[1].begin(), size.width,
+ size.width, size.height);
+
+ /* proceed an image every 4 frames */
+ /* \todo read the parameter_ from the configuration file? */
+ parameter_++;
+ if (parameter_ % 4 == 0)
+ frameCount++;
+}
+
+} // namespace libcamera
new file mode 100644
@@ -0,0 +1,65 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+/*
+ * Copyright (C) 2023, Google Inc.
+ *
+ * image_frame_generator.h - Derived class of FrameGenerator for
+ * generating frames from images
+ */
+
+#pragma once
+
+#include <memory>
+#include <optional>
+#include <stdint.h>
+#include <sys/types.h>
+
+#include "frame_generator.h"
+
+namespace libcamera {
+
+enum class ScaleMode : char {
+ Fill = 0,
+ Contain = 1,
+ Cover = 2,
+};
+
+/* Frame configuration provided by the config file */
+struct ImageFrames {
+ std::string path;
+ ScaleMode scaleMode;
+ std::optional<unsigned int> number;
+};
+
+class ImageFrameGenerator : public FrameGenerator
+{
+public:
+ /** Factory function to create an ImageFrameGenerator object.
+ * Read the images and convert them to buffers in NV12 format.
+ * Store the pointers to the buffers to a list (imageFrameDatas)
+ */
+ static std::unique_ptr<ImageFrameGenerator> create(ImageFrames &imageFrames);
+
+private:
+ struct ImageFrameData {
+ std::unique_ptr<uint8_t[]> Y;
+ std::unique_ptr<uint8_t[]> UV;
+ Size size;
+ };
+
+ /* Scale the buffers for image frames. */
+ void configure(const Size &size) override;
+ void generateFrame(unsigned int &frameCount, const Size &size, const FrameBuffer *buffer) override;
+
+ static std::string constructPath(std::string &name, unsigned int &i);
+
+ /* List of pointers to the not scaled image buffers */
+ std::vector<ImageFrameData> imageFrameDatas_;
+ /* List of pointers to the scaled image buffers */
+ std::vector<ImageFrameData> scaledFrameDatas_;
+ /* Pointer to the imageFrames_ in VirtualCameraData */
+ ImageFrames *imageFrames_;
+ /* Speed parameter. Change to the next image every parameter_ frames. */
+ int parameter_;
+};
+
+} /* namespace libcamera */
@@ -2,11 +2,14 @@
libcamera_sources += files([
'virtual.cpp',
- 'test_pattern_generator.cpp',
'parser.cpp',
+ 'test_pattern_generator.cpp',
+ 'image_frame_generator.cpp',
+ 'common_functions.cpp',
])
libyuv_dep = dependency('libyuv', required : false)
+libjpeg = dependency('libjpeg', required : false)
# Fallback to a subproject if libyuv isn't found, as it's typically not
# provided by distributions.
@@ -26,3 +29,4 @@ if not libyuv_dep.found()
endif
libcamera_deps += [libyuv_dep]
+libcamera_deps += [libjpeg]
@@ -18,6 +18,7 @@
#include "libcamera/internal/pipeline_handler.h"
#include "libcamera/internal/yaml_parser.h"
+#include "common_functions.h"
#include "virtual.h"
namespace libcamera {
@@ -52,12 +53,12 @@ std::vector<std::unique_ptr<VirtualCameraData>> Parser::parseConfigFile(
continue;
}
- data->id_ = cameraId;
+ data->config_.id = cameraId;
ControlInfoMap::Map controls;
/* todo: Check which resolution's frame rate to be reported */
controls[&controls::FrameDurationLimits] =
- ControlInfo(int64_t(1000 / data->supportedResolutions_[0].frameRates[1]),
- int64_t(1000 / data->supportedResolutions_[0].frameRates[0]));
+ ControlInfo(int64_t(1000 / data->config_.resolutions[0].frameRates[1]),
+ int64_t(1000 / data->config_.resolutions[0].frameRates[0]));
data->controlInfo_ = ControlInfoMap(std::move(controls), controls::controls);
configurations.push_back(std::move(data));
}
@@ -72,7 +73,7 @@ std::unique_ptr<VirtualCameraData> Parser::parseCameraConfigData(
if (parseSupportedFormats(cameraConfigData, data.get()))
return nullptr;
- if (parseTestPattern(cameraConfigData, data.get()))
+ if (parseFrame(cameraConfigData, data.get()))
return nullptr;
if (parseLocation(cameraConfigData, data.get()))
@@ -122,14 +123,14 @@ int Parser::parseSupportedFormats(
frameRates.push_back(60);
}
- data->supportedResolutions_.emplace_back(
+ data->config_.resolutions.emplace_back(
VirtualCameraData::Resolution{ Size{ width, height },
frameRates });
activeResolution = std::max(activeResolution, Size{ width, height });
}
} else {
- data->supportedResolutions_.emplace_back(
+ data->config_.resolutions.emplace_back(
VirtualCameraData::Resolution{ Size{ 1920, 1080 },
{ 30, 60 } });
activeResolution = Size(1920, 1080);
@@ -141,21 +142,65 @@ int Parser::parseSupportedFormats(
return 0;
}
-int Parser::parseTestPattern(
+int Parser::parseFrame(
const YamlObject &cameraConfigData, VirtualCameraData *data)
{
- std::string testPattern = cameraConfigData["test_pattern"].get<std::string>().value();
+ const YamlObject &frames = cameraConfigData["frames"];
+ /* When there is no frames provided in the config file, use color bar test pattern */
+ if (frames.size() == 0) {
+ data->config_.frame = TestPattern::ColorBars;
+ return 0;
+ }
+
+ if (!frames.isDictionary()) {
+ LOG(Virtual, Error) << "'frames' is not a dictionary.";
+ return -EINVAL;
+ }
+
+ std::string path = frames["path"].get<std::string>().value();
+
+ if (auto ext = getExtension(path); ext == ".jpg" || ext == ".jpeg") {
+ ScaleMode scaleMode;
+ if (parseScaleMode(frames, &scaleMode))
+ return -EINVAL;
+ data->config_.frame = ImageFrames{ path, scaleMode, std::nullopt };
+ } else if (path.back() == '/') {
+ ScaleMode scaleMode;
+ if (parseScaleMode(frames, &scaleMode))
+ return -EINVAL;
+ data->config_.frame = ImageFrames{ path, scaleMode,
+ numberOfFilesInDirectory(path) };
+ } else if (path == "bars" || path == "") {
+ /* Default value is "bars" */
+ data->config_.frame = TestPattern::ColorBars;
+ } else if (path == "lines") {
+ data->config_.frame = TestPattern::DiagonalLines;
+ } else {
+ LOG(Virtual, Error) << "Frame: " << path
+ << " is not supported";
+ return -EINVAL;
+ }
+ return 0;
+}
- /* Default value is "bars" */
- if (testPattern == "bars" || testPattern == "") {
- data->testPattern_ = TestPattern::ColorBars;
- } else if (testPattern == "lines") {
- data->testPattern_ = TestPattern::DiagonalLines;
+int Parser::parseScaleMode(
+ const YamlObject &framesConfigData, ScaleMode *scaleMode)
+{
+ std::string mode = framesConfigData["scale_mode"].get<std::string>().value();
+
+ /* Default value is fill */
+ if (mode == "fill" || mode == "") {
+ *scaleMode = ScaleMode::Fill;
+ } else if (mode == "contain") {
+ *scaleMode = ScaleMode::Contain;
+ } else if (mode == "cover") {
+ *scaleMode = ScaleMode::Cover;
} else {
- LOG(Virtual, Error) << "Test pattern: " << testPattern
- << "is not supported";
+ LOG(Virtual, Error) << "scaleMode: " << mode
+ << " is not supported";
return -EINVAL;
}
+
return 0;
}
@@ -195,4 +240,4 @@ int Parser::parseModel(
return 0;
}
-} /* namespace libcamera */
+} // namespace libcamera
@@ -34,12 +34,15 @@ private:
int parseSupportedFormats(
const YamlObject &cameraConfigData, VirtualCameraData *data);
- int parseTestPattern(
+ int parseFrame(
const YamlObject &cameraConfigData, VirtualCameraData *data);
int parseLocation(
const YamlObject &cameraConfigData, VirtualCameraData *data);
int parseModel(
const YamlObject &cameraConfigData, VirtualCameraData *data);
+
+ int parseScaleMode(
+ const YamlObject &framesConfigData, ScaleMode *scaleMode);
};
} // namespace libcamera
@@ -20,7 +20,7 @@ LOG_DECLARE_CATEGORY(Virtual)
static const unsigned int kARGBSize = 4;
void TestPatternGenerator::generateFrame(
- const Size &size,
+ [[maybe_unused]] unsigned int &frameCount, const Size &size,
const FrameBuffer *buffer)
{
MappedFrameBuffer mappedFrameBuffer(buffer,
@@ -29,7 +29,7 @@ void TestPatternGenerator::generateFrame(
auto planes = mappedFrameBuffer.planes();
/* TODO: select whether to do shifting or not */
- shiftLeft(size);
+ // shiftLeft(size);
/* Convert the template_ to the frame buffer */
int ret = libyuv::ARGBToNV12(
@@ -25,7 +25,7 @@ enum class TestPattern : char {
class TestPatternGenerator : public FrameGenerator
{
private:
- void generateFrame(const Size &size,
+ void generateFrame(unsigned int &frameCount, const Size &size,
const FrameBuffer *buffer) override;
protected:
@@ -20,9 +20,13 @@
#include "libcamera/internal/pipeline_handler.h"
#include "libcamera/internal/yaml_parser.h"
-#include "frame_generator.h"
#include "parser.h"
+#define ifTestPattern(v) std::holds_alternative<TestPattern>(v)
+#define getTestPattern(v) std::get<TestPattern>(v)
+#define ifImageFrames(v) std::holds_alternative<ImageFrames>(v)
+#define getImageFrames(v) std::get<ImageFrames>(v)
+
namespace libcamera {
LOG_DEFINE_CATEGORY(Virtual)
@@ -63,12 +67,12 @@ CameraConfiguration::Status VirtualCameraConfiguration::validate()
}
Size maxSize;
- for (const auto &resolution : data_->supportedResolutions_)
+ for (const auto &resolution : data_->config_.resolutions)
maxSize = std::max(maxSize, resolution.size);
for (StreamConfiguration &cfg : config_) {
bool found = false;
- for (const auto &resolution : data_->supportedResolutions_) {
+ for (const auto &resolution : data_->config_.resolutions) {
if (resolution.size.width == cfg.size.width &&
resolution.size.height == cfg.size.height) {
found = true;
@@ -110,7 +114,7 @@ PipelineHandlerVirtual::generateConfiguration(Camera *camera,
return config;
Size minSize, sensorResolution;
- for (const auto &resolution : data->supportedResolutions_) {
+ for (const auto &resolution : data->config_.resolutions) {
if (minSize.isNull() || minSize > resolution.size)
minSize = resolution.size;
@@ -191,7 +195,7 @@ int PipelineHandlerVirtual::exportFrameBuffers(
int PipelineHandlerVirtual::start(Camera *camera,
[[maybe_unused]] const ControlList *controls)
{
- /* \todo Start reading the virtual video if any. */
+ /* Start reading the images/generating test patterns */
VirtualCameraData *data = cameraData(camera);
data->frameGenerator_->configure(data->stream_.configuration().size);
@@ -211,8 +215,8 @@ int PipelineHandlerVirtual::queueRequestDevice([[maybe_unused]] Camera *camera,
/* \todo Read from the virtual video if any. */
for (auto const &[stream, buffer] : request->buffers()) {
- /* map buffer and fill test patterns */
- data->frameGenerator_->generateFrame(stream->configuration().size, buffer);
+ /* Map buffer. Fill test patterns or images */
+ data->frameGenerator_->generateFrame(data->frameCount_, stream->configuration().size, buffer);
completeBuffer(request, buffer);
}
@@ -242,9 +246,10 @@ bool PipelineHandlerVirtual::match([[maybe_unused]] DeviceEnumerator *enumerator
/* Configure and register cameras with configData */
for (auto &data : configData) {
std::set<Stream *> streams{ &data->stream_ };
- std::string id = data->id_;
+ std::string id = data->config_.id;
std::shared_ptr<Camera> camera = Camera::create(std::move(data), id, streams);
+ /* Initialize FrameGenerator*/
initFrameGenerator(camera.get());
registerCamera(std::move(camera));
@@ -256,13 +261,19 @@ bool PipelineHandlerVirtual::match([[maybe_unused]] DeviceEnumerator *enumerator
void PipelineHandlerVirtual::initFrameGenerator(Camera *camera)
{
auto data = cameraData(camera);
- if (data->testPattern_ == TestPattern::DiagonalLines) {
- data->frameGenerator_ = DiagonalLinesGenerator::create();
- } else {
- data->frameGenerator_ = ColorBarsGenerator::create();
+ auto &frame = data->config_.frame;
+ if (ifTestPattern(frame)) {
+ TestPattern &testPattern = getTestPattern(frame);
+ if (testPattern == TestPattern::DiagonalLines) {
+ data->frameGenerator_ = DiagonalLinesGenerator::create();
+ } else {
+ data->frameGenerator_ = ColorBarsGenerator::create();
+ }
+ } else if (ifImageFrames(frame)) {
+ data->frameGenerator_ = ImageFrameGenerator::create(getImageFrames(frame));
}
}
REGISTER_PIPELINE_HANDLER(PipelineHandlerVirtual, "virtual")
-} /* namespace libcamera */
+} // namespace libcamera
@@ -7,16 +7,22 @@
#pragma once
+#include <variant>
+
#include <libcamera/base/file.h>
#include "libcamera/internal/camera.h"
#include "libcamera/internal/dma_buf_allocator.h"
#include "libcamera/internal/pipeline_handler.h"
+#include "frame_generator.h"
+#include "image_frame_generator.h"
#include "test_pattern_generator.h"
namespace libcamera {
+using VirtualFrame = std::variant<TestPattern, ImageFrames>;
+
class VirtualCameraData : public Camera::Private
{
public:
@@ -24,6 +30,13 @@ public:
Size size;
std::vector<int> frameRates;
};
+ /* The config file is parsed to the Configuration struct */
+ struct Configuration {
+ std::string id;
+ std::vector<Resolution> resolutions;
+ VirtualFrame frame;
+ };
+
VirtualCameraData(PipelineHandler *pipe)
: Camera::Private(pipe)
{
@@ -31,12 +44,9 @@ public:
~VirtualCameraData() = default;
- std::string id_;
- std::vector<Resolution> supportedResolutions_;
- TestPattern testPattern_;
-
+ unsigned int frameCount_ = 0;
+ Configuration config_;
Stream stream_;
-
std::unique_ptr<FrameGenerator> frameGenerator_;
};