From patchwork Tue Apr 26 11:02:33 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Tomi Valkeinen X-Patchwork-Id: 15724 Return-Path: X-Original-To: parsemail@patchwork.libcamera.org Delivered-To: parsemail@patchwork.libcamera.org Received: from lancelot.ideasonboard.com (lancelot.ideasonboard.com [92.243.16.209]) by patchwork.libcamera.org (Postfix) with ESMTPS id 23275C0F1B for ; Tue, 26 Apr 2022 11:02:59 +0000 (UTC) Received: from lancelot.ideasonboard.com (localhost [IPv6:::1]) by lancelot.ideasonboard.com (Postfix) with ESMTP id 442CE6564A; Tue, 26 Apr 2022 13:02:58 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=libcamera.org; s=mail; t=1650970978; bh=ilf4YKvtWd9BuSvBT0LWHnGr96KzC2etgYRQTYUvkQw=; h=To:Date:In-Reply-To:References:Subject:List-Id:List-Unsubscribe: List-Archive:List-Post:List-Help:List-Subscribe:From:Reply-To: From; b=CQ+q05Co8G9/65dPFJxSB+mBC3aztN4srOcSShLxLsdhgUmzGGt6Gwm8ty1NMQR1t 9SdOO3iVdyS+Muk/IFgxHkXEVrc++rd82NswlbCwee4Rn3SBNvYz94hTfPebAMRKG2 xC+o5WqhZVz+zH7tpD3DaWwqoq1uJe3kJAgU8Vz9cJOeGpsuIA1xV1ESs0SBeSVjB3 0wbRQKvKbk6s0Axb3pI5/bgqHx27Njr2CAms0cx3U5+ooQQRJ49iizb/Zps4DoIs4D j+A8+gjI+YwdNZt5JXk5myKG1OPeHM4KklNVIp+OaiBFw49ey527Rstn1jNzi41xgX qDswWpTwkR9gQ== Received: from perceval.ideasonboard.com (perceval.ideasonboard.com [IPv6:2001:4b98:dc2:55:216:3eff:fef7:d647]) by lancelot.ideasonboard.com (Postfix) with ESMTPS id 5370B60431 for ; Tue, 26 Apr 2022 13:02:55 +0200 (CEST) Authentication-Results: lancelot.ideasonboard.com; dkim=pass (1024-bit key; unprotected) header.d=ideasonboard.com header.i=@ideasonboard.com header.b="TqttP20i"; dkim-atps=neutral Received: from deskari.lan (91-156-85-209.elisa-laajakaista.fi [91.156.85.209]) by perceval.ideasonboard.com (Postfix) with ESMTPSA id 6FEB0488; Tue, 26 Apr 2022 13:02:54 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=ideasonboard.com; s=mail; t=1650970974; bh=ilf4YKvtWd9BuSvBT0LWHnGr96KzC2etgYRQTYUvkQw=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=TqttP20i0voT1vTM1ylAEhVgnQnqTOdujtQsxUkOSkvKLgylU8TO4C0RDcsSBbpI+ 0h6Vi9lHzyJ5H1ksrCl1MyX3gRTSnWXM5ITdB0eTfHx/9GTj/1t8NW5gKOge2Ci4kG s2tuDzpDPgduIdEHrQflyHJBA78POw8qJ5D08kwY= To: libcamera-devel@lists.libcamera.org, David Plowman , Kieran Bingham , Laurent Pinchart , Jacopo Mondi Date: Tue, 26 Apr 2022 14:02:33 +0300 Message-Id: <20220426110236.104511-2-tomi.valkeinen@ideasonboard.com> X-Mailer: git-send-email 2.34.1 In-Reply-To: <20220426110236.104511-1-tomi.valkeinen@ideasonboard.com> References: <20220426110236.104511-1-tomi.valkeinen@ideasonboard.com> MIME-Version: 1.0 Subject: [libcamera-devel] [PATCH v6 1/4] Add Python bindings X-BeenThere: libcamera-devel@lists.libcamera.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-Patchwork-Original-From: Tomi Valkeinen via libcamera-devel From: Tomi Valkeinen Reply-To: Tomi Valkeinen Errors-To: libcamera-devel-bounces@lists.libcamera.org Sender: "libcamera-devel" Add libcamera Python bindings. pybind11 is used to generate the C++ <-> Python layer. We use pybind11 'smart_holder' version to avoid issues with private destructors and shared_ptr. There is also an alternative solution here: https://github.com/pybind/pybind11/pull/2067 Only a subset of libcamera classes are exposed. Implementing and testing the wrapper classes is challenging, and as such only classes that I have needed have been added so far. Signed-off-by: Tomi Valkeinen --- meson.build | 1 + meson_options.txt | 5 + src/meson.build | 1 + src/py/libcamera/__init__.py | 12 + src/py/libcamera/meson.build | 43 ++++ src/py/libcamera/pyenums.cpp | 53 ++++ src/py/libcamera/pymain.cpp | 452 +++++++++++++++++++++++++++++++++++ src/py/meson.build | 1 + subprojects/.gitignore | 3 +- subprojects/pybind11.wrap | 6 + 10 files changed, 576 insertions(+), 1 deletion(-) create mode 100644 src/py/libcamera/__init__.py create mode 100644 src/py/libcamera/meson.build create mode 100644 src/py/libcamera/pyenums.cpp create mode 100644 src/py/libcamera/pymain.cpp create mode 100644 src/py/meson.build create mode 100644 subprojects/pybind11.wrap diff --git a/meson.build b/meson.build index 29d8542d..ff6c2ad6 100644 --- a/meson.build +++ b/meson.build @@ -179,6 +179,7 @@ summary({ 'Tracing support': tracing_enabled, 'Android support': android_enabled, 'GStreamer support': gst_enabled, + 'Python bindings': pycamera_enabled, 'V4L2 emulation support': v4l2_enabled, 'cam application': cam_enabled, 'qcam application': qcam_enabled, diff --git a/meson_options.txt b/meson_options.txt index 2c80ad8b..ca00c78e 100644 --- a/meson_options.txt +++ b/meson_options.txt @@ -58,3 +58,8 @@ option('v4l2', type : 'boolean', value : false, description : 'Compile the V4L2 compatibility layer') + +option('pycamera', + type : 'feature', + value : 'auto', + description : 'Enable libcamera Python bindings (experimental)') diff --git a/src/meson.build b/src/meson.build index e0ea9c35..34663a6f 100644 --- a/src/meson.build +++ b/src/meson.build @@ -37,4 +37,5 @@ subdir('cam') subdir('qcam') subdir('gstreamer') +subdir('py') subdir('v4l2') diff --git a/src/py/libcamera/__init__.py b/src/py/libcamera/__init__.py new file mode 100644 index 00000000..cd7512a2 --- /dev/null +++ b/src/py/libcamera/__init__.py @@ -0,0 +1,12 @@ +# SPDX-License-Identifier: LGPL-2.1-or-later +# Copyright (C) 2021, Tomi Valkeinen + +from ._libcamera import * +import mmap + + +def __FrameBuffer__mmap(self, plane): + return mmap.mmap(self.fd(plane), self.length(plane), mmap.MAP_SHARED, mmap.PROT_READ) + + +FrameBuffer.mmap = __FrameBuffer__mmap diff --git a/src/py/libcamera/meson.build b/src/py/libcamera/meson.build new file mode 100644 index 00000000..82388efb --- /dev/null +++ b/src/py/libcamera/meson.build @@ -0,0 +1,43 @@ +# SPDX-License-Identifier: CC0-1.0 + +py3_dep = dependency('python3', required : get_option('pycamera')) + +if not py3_dep.found() + pycamera_enabled = false + subdir_done() +endif + +pycamera_enabled = true + +pybind11_proj = subproject('pybind11') +pybind11_dep = pybind11_proj.get_variable('pybind11_dep') + +pycamera_sources = files([ + 'pymain.cpp', + 'pyenums.cpp', +]) + +pycamera_deps = [ + libcamera_public, + py3_dep, + pybind11_dep, +] + +pycamera_args = ['-fvisibility=hidden'] +pycamera_args += ['-Wno-shadow'] +pycamera_args += ['-DPYBIND11_USE_SMART_HOLDER_AS_DEFAULT'] + +destdir = get_option('libdir') + '/python' + py3_dep.version() + '/site-packages/libcamera' + +pycamera = shared_module('_libcamera', + pycamera_sources, + install : true, + install_dir : destdir, + name_prefix : '', + dependencies : pycamera_deps, + cpp_args : pycamera_args) + +run_command('ln', '-fsT', '../../../../src/py/libcamera/__init__.py', + meson.current_build_dir() / '__init__.py') + +install_data(['__init__.py'], install_dir : destdir) diff --git a/src/py/libcamera/pyenums.cpp b/src/py/libcamera/pyenums.cpp new file mode 100644 index 00000000..39886656 --- /dev/null +++ b/src/py/libcamera/pyenums.cpp @@ -0,0 +1,53 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2021, Tomi Valkeinen + * + * Python bindings + */ + +#include + +#include + +namespace py = pybind11; + +using namespace libcamera; + +void init_pyenums(py::module &m) +{ + py::enum_(m, "ConfigurationStatus") + .value("Valid", CameraConfiguration::Valid) + .value("Adjusted", CameraConfiguration::Adjusted) + .value("Invalid", CameraConfiguration::Invalid); + + py::enum_(m, "StreamRole") + .value("StillCapture", StreamRole::StillCapture) + .value("Raw", StreamRole::Raw) + .value("VideoRecording", StreamRole::VideoRecording) + .value("Viewfinder", StreamRole::Viewfinder); + + py::enum_(m, "RequestStatus") + .value("Pending", Request::RequestPending) + .value("Complete", Request::RequestComplete) + .value("Cancelled", Request::RequestCancelled); + + py::enum_(m, "FrameMetadataStatus") + .value("Success", FrameMetadata::FrameSuccess) + .value("Error", FrameMetadata::FrameError) + .value("Cancelled", FrameMetadata::FrameCancelled); + + py::enum_(m, "ReuseFlag") + .value("Default", Request::ReuseFlag::Default) + .value("ReuseBuffers", Request::ReuseFlag::ReuseBuffers); + + py::enum_(m, "ControlType") + .value("None", ControlType::ControlTypeNone) + .value("Bool", ControlType::ControlTypeBool) + .value("Byte", ControlType::ControlTypeByte) + .value("Integer32", ControlType::ControlTypeInteger32) + .value("Integer64", ControlType::ControlTypeInteger64) + .value("Float", ControlType::ControlTypeFloat) + .value("String", ControlType::ControlTypeString) + .value("Rectangle", ControlType::ControlTypeRectangle) + .value("Size", ControlType::ControlTypeSize); +} diff --git a/src/py/libcamera/pymain.cpp b/src/py/libcamera/pymain.cpp new file mode 100644 index 00000000..54674caf --- /dev/null +++ b/src/py/libcamera/pymain.cpp @@ -0,0 +1,452 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2021, Tomi Valkeinen + * + * Python bindings + */ + +/* + * To generate pylibcamera stubs: + * PYTHONPATH=build/src/py pybind11-stubgen --no-setup-py -o build/src/py libcamera + */ + +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include +#include + +namespace py = pybind11; + +using namespace std; +using namespace libcamera; + +template +static py::object ValueOrTuple(const ControlValue &cv) +{ + if (cv.isArray()) { + const T *v = reinterpret_cast(cv.data().data()); + auto t = py::tuple(cv.numElements()); + + for (size_t i = 0; i < cv.numElements(); ++i) + t[i] = v[i]; + + return t; + } + + return py::cast(cv.get()); +} + +static py::object ControlValueToPy(const ControlValue &cv) +{ + switch (cv.type()) { + case ControlTypeBool: + return ValueOrTuple(cv); + case ControlTypeByte: + return ValueOrTuple(cv); + case ControlTypeInteger32: + return ValueOrTuple(cv); + case ControlTypeInteger64: + return ValueOrTuple(cv); + case ControlTypeFloat: + return ValueOrTuple(cv); + case ControlTypeString: + return py::cast(cv.get()); + case ControlTypeRectangle: { + const Rectangle *v = reinterpret_cast(cv.data().data()); + return py::make_tuple(v->x, v->y, v->width, v->height); + } + case ControlTypeSize: { + const Size *v = reinterpret_cast(cv.data().data()); + return py::make_tuple(v->width, v->height); + } + case ControlTypeNone: + default: + throw runtime_error("Unsupported ControlValue type"); + } +} + +static ControlValue PyToControlValue(const py::object &ob, ControlType type) +{ + switch (type) { + case ControlTypeBool: + return ControlValue(ob.cast()); + case ControlTypeByte: + return ControlValue(ob.cast()); + case ControlTypeInteger32: + return ControlValue(ob.cast()); + case ControlTypeInteger64: + return ControlValue(ob.cast()); + case ControlTypeFloat: + return ControlValue(ob.cast()); + case ControlTypeString: + return ControlValue(ob.cast()); + case ControlTypeRectangle: + case ControlTypeSize: + case ControlTypeNone: + default: + throw runtime_error("Control type not implemented"); + } +} + +static weak_ptr g_camera_manager; +static int g_eventfd; +static mutex g_reqlist_mutex; +static vector g_reqlist; + +static void handleRequestCompleted(Request *req) +{ + { + lock_guard guard(g_reqlist_mutex); + g_reqlist.push_back(req); + } + + uint64_t v = 1; + write(g_eventfd, &v, 8); +} + +void init_pyenums(py::module &m); + +PYBIND11_MODULE(_libcamera, m) +{ + init_pyenums(m); + + /* Forward declarations */ + + /* + * We need to declare all the classes here so that Python docstrings + * can be generated correctly. + * https://pybind11.readthedocs.io/en/latest/advanced/misc.html#avoiding-c-types-in-docstrings + */ + + auto pyCameraManager = py::class_(m, "CameraManager"); + auto pyCamera = py::class_(m, "Camera"); + auto pyCameraConfiguration = py::class_(m, "CameraConfiguration"); + auto pyStreamConfiguration = py::class_(m, "StreamConfiguration"); + auto pyStreamFormats = py::class_(m, "StreamFormats"); + auto pyFrameBufferAllocator = py::class_(m, "FrameBufferAllocator"); + auto pyFrameBuffer = py::class_(m, "FrameBuffer"); + auto pyStream = py::class_(m, "Stream"); + auto pyControlId = py::class_(m, "ControlId"); + auto pyRequest = py::class_(m, "Request"); + auto pyFrameMetadata = py::class_(m, "FrameMetadata"); + + /* Global functions */ + m.def("logSetLevel", &logSetLevel); + + /* Classes */ + pyCameraManager + .def_static("singleton", []() { + shared_ptr cm = g_camera_manager.lock(); + if (cm) + return cm; + + int fd = eventfd(0, 0); + if (fd == -1) + throw std::system_error(errno, std::generic_category(), "Failed to create eventfd"); + + cm = shared_ptr(new CameraManager, [](auto p) { + close(g_eventfd); + g_eventfd = -1; + delete p; + }); + + g_eventfd = fd; + g_camera_manager = cm; + + int ret = cm->start(); + if (ret) + throw std::system_error(-ret, std::generic_category(), "Failed to start CameraManager"); + + return cm; + }) + + .def_property_readonly("version", &CameraManager::version) + + .def_property_readonly("efd", [](CameraManager &) { + return g_eventfd; + }) + + .def("getReadyRequests", [](CameraManager &) { + vector v; + + { + lock_guard guard(g_reqlist_mutex); + swap(v, g_reqlist); + } + + vector ret; + + for (Request *req : v) { + py::object o = py::cast(req); + /* decrease the ref increased in Camera::queueRequest() */ + o.dec_ref(); + ret.push_back(o); + } + + return ret; + }) + + .def("get", py::overload_cast(&CameraManager::get), py::keep_alive<0, 1>()) + + .def("find", [](CameraManager &self, string str) { + std::transform(str.begin(), str.end(), str.begin(), ::tolower); + + for (auto c : self.cameras()) { + string id = c->id(); + + std::transform(id.begin(), id.end(), id.begin(), ::tolower); + + if (id.find(str) != string::npos) + return c; + } + + return shared_ptr(); + }, py::keep_alive<0, 1>()) + + /* Create a list of Cameras, where each camera has a keep-alive to CameraManager */ + .def_property_readonly("cameras", [](CameraManager &self) { + py::list l; + + for (auto &c : self.cameras()) { + py::object py_cm = py::cast(self); + py::object py_cam = py::cast(c); + py::detail::keep_alive_impl(py_cam, py_cm); + l.append(py_cam); + } + + return l; + }); + + pyCamera + .def_property_readonly("id", &Camera::id) + .def("acquire", &Camera::acquire) + .def("release", &Camera::release) + .def("start", [](Camera &self) { + self.requestCompleted.connect(handleRequestCompleted); + + int ret = self.start(); + if (ret) + self.requestCompleted.disconnect(handleRequestCompleted); + + return ret; + }) + + .def("stop", [](Camera &self) { + int ret = self.stop(); + if (!ret) + self.requestCompleted.disconnect(handleRequestCompleted); + + return ret; + }) + + .def("__repr__", [](Camera &self) { + return ""; + }) + + /* Keep the camera alive, as StreamConfiguration contains a Stream* */ + .def("generateConfiguration", &Camera::generateConfiguration, py::keep_alive<0, 1>()) + .def("configure", &Camera::configure) + + .def("createRequest", &Camera::createRequest, py::arg("cookie") = 0) + + .def("queueRequest", [](Camera &self, Request *req) { + py::object py_req = py::cast(req); + + py_req.inc_ref(); + + int ret = self.queueRequest(req); + if (ret) + py_req.dec_ref(); + + return ret; + }) + + .def_property_readonly("streams", [](Camera &self) { + py::set set; + for (auto &s : self.streams()) { + py::object py_self = py::cast(self); + py::object py_s = py::cast(s); + py::detail::keep_alive_impl(py_s, py_self); + set.add(py_s); + } + return set; + }) + + .def("find_control", [](Camera &self, const string &name) { + const auto &controls = self.controls(); + + auto it = find_if(controls.begin(), controls.end(), + [&name](const auto &kvp) { return kvp.first->name() == name; }); + + if (it == controls.end()) + throw runtime_error("Control not found"); + + return it->first; + }, py::return_value_policy::reference_internal) + + .def_property_readonly("controls", [](Camera &self) { + py::dict ret; + + for (const auto &[id, ci] : self.controls()) { + ret[id->name().c_str()] = make_tuple(ControlValueToPy(ci.min()), + ControlValueToPy(ci.max()), + ControlValueToPy(ci.def())); + } + + return ret; + }) + + .def_property_readonly("properties", [](Camera &self) { + py::dict ret; + + for (const auto &[key, cv] : self.properties()) { + const ControlId *id = properties::properties.at(key); + py::object ob = ControlValueToPy(cv); + + ret[id->name().c_str()] = ob; + } + + return ret; + }); + + pyCameraConfiguration + .def("__iter__", [](CameraConfiguration &self) { + return py::make_iterator(self); + }, py::keep_alive<0, 1>()) + .def("__len__", [](CameraConfiguration &self) { + return self.size(); + }) + .def("validate", &CameraConfiguration::validate) + .def("at", py::overload_cast(&CameraConfiguration::at), py::return_value_policy::reference_internal) + .def_property_readonly("size", &CameraConfiguration::size) + .def_property_readonly("empty", &CameraConfiguration::empty); + + pyStreamConfiguration + .def("toString", &StreamConfiguration::toString) + .def_property_readonly("stream", &StreamConfiguration::stream, py::return_value_policy::reference_internal) + .def_property( + "size", + [](StreamConfiguration &self) { return make_tuple(self.size.width, self.size.height); }, + [](StreamConfiguration &self, tuple size) { self.size.width = get<0>(size); self.size.height = get<1>(size); }) + .def_property( + "pixelFormat", + [](StreamConfiguration &self) { return self.pixelFormat.toString(); }, + [](StreamConfiguration &self, string fmt) { self.pixelFormat = PixelFormat::fromString(fmt); }) + .def_readwrite("stride", &StreamConfiguration::stride) + .def_readwrite("frameSize", &StreamConfiguration::frameSize) + .def_readwrite("bufferCount", &StreamConfiguration::bufferCount) + .def_property_readonly("formats", &StreamConfiguration::formats, py::return_value_policy::reference_internal); + + pyStreamFormats + .def_property_readonly("pixelFormats", [](StreamFormats &self) { + vector fmts; + for (auto &fmt : self.pixelformats()) + fmts.push_back(fmt.toString()); + return fmts; + }) + .def("sizes", [](StreamFormats &self, const string &pixelFormat) { + auto fmt = PixelFormat::fromString(pixelFormat); + vector> fmts; + for (const auto &s : self.sizes(fmt)) + fmts.push_back(make_tuple(s.width, s.height)); + return fmts; + }) + .def("range", [](StreamFormats &self, const string &pixelFormat) { + auto fmt = PixelFormat::fromString(pixelFormat); + const auto &range = self.range(fmt); + return make_tuple(make_tuple(range.hStep, range.vStep), + make_tuple(range.min.width, range.min.height), + make_tuple(range.max.width, range.max.height)); + }); + + pyFrameBufferAllocator + .def(py::init>(), py::keep_alive<1, 2>()) + .def("allocate", &FrameBufferAllocator::allocate) + .def_property_readonly("allocated", &FrameBufferAllocator::allocated) + /* Create a list of FrameBuffers, where each FrameBuffer has a keep-alive to FrameBufferAllocator */ + .def("buffers", [](FrameBufferAllocator &self, Stream *stream) { + py::object py_self = py::cast(self); + py::list l; + for (auto &ub : self.buffers(stream)) { + py::object py_buf = py::cast(ub.get(), py::return_value_policy::reference_internal, py_self); + l.append(py_buf); + } + return l; + }); + + pyFrameBuffer + /* TODO: implement FrameBuffer::Plane properly */ + .def(py::init([](vector> planes, unsigned int cookie) { + vector v; + for (const auto &t : planes) + v.push_back({ SharedFD(get<0>(t)), FrameBuffer::Plane::kInvalidOffset, get<1>(t) }); + return new FrameBuffer(v, cookie); + })) + .def_property_readonly("metadata", &FrameBuffer::metadata, py::return_value_policy::reference_internal) + .def("length", [](FrameBuffer &self, uint32_t idx) { + const FrameBuffer::Plane &plane = self.planes()[idx]; + return plane.length; + }) + .def("fd", [](FrameBuffer &self, uint32_t idx) { + const FrameBuffer::Plane &plane = self.planes()[idx]; + return plane.fd.get(); + }) + .def_property("cookie", &FrameBuffer::cookie, &FrameBuffer::setCookie); + + pyStream + .def_property_readonly("configuration", &Stream::configuration); + + pyControlId + .def_property_readonly("id", &ControlId::id) + .def_property_readonly("name", &ControlId::name) + .def_property_readonly("type", &ControlId::type); + + pyRequest + /* Fence is not supported, so we cannot expose addBuffer() directly */ + .def("addBuffer", [](Request &self, const Stream *stream, FrameBuffer *buffer) { + return self.addBuffer(stream, buffer); + }, py::keep_alive<1, 3>()) /* Request keeps Framebuffer alive */ + .def_property_readonly("status", &Request::status) + .def_property_readonly("buffers", &Request::buffers) + .def_property_readonly("cookie", &Request::cookie) + .def_property_readonly("hasPendingBuffers", &Request::hasPendingBuffers) + .def("set_control", [](Request &self, ControlId &id, py::object value) { + self.controls().set(id.id(), PyToControlValue(value, id.type())); + }) + .def_property_readonly("metadata", [](Request &self) { + py::dict ret; + + for (const auto &[key, cv] : self.metadata()) { + const ControlId *id = controls::controls.at(key); + py::object ob = ControlValueToPy(cv); + + ret[id->name().c_str()] = ob; + } + + return ret; + }) + /* As we add a keep_alive to the fb in addBuffers(), we can only allow reuse with ReuseBuffers. */ + .def("reuse", [](Request &self) { self.reuse(Request::ReuseFlag::ReuseBuffers); }); + + pyFrameMetadata + .def_readonly("status", &FrameMetadata::status) + .def_readonly("sequence", &FrameMetadata::sequence) + .def_readonly("timestamp", &FrameMetadata::timestamp) + /* temporary helper, to be removed */ + .def_property_readonly("bytesused", [](FrameMetadata &self) { + vector v; + v.resize(self.planes().size()); + transform(self.planes().begin(), self.planes().end(), v.begin(), [](const auto &p) { return p.bytesused; }); + return v; + }); +} diff --git a/src/py/meson.build b/src/py/meson.build new file mode 100644 index 00000000..4ce9668c --- /dev/null +++ b/src/py/meson.build @@ -0,0 +1 @@ +subdir('libcamera') diff --git a/subprojects/.gitignore b/subprojects/.gitignore index 391fde2c..757bb072 100644 --- a/subprojects/.gitignore +++ b/subprojects/.gitignore @@ -1,3 +1,4 @@ /googletest-release* /libyuv -/packagecache \ No newline at end of file +/packagecache +/pybind11*/ diff --git a/subprojects/pybind11.wrap b/subprojects/pybind11.wrap new file mode 100644 index 00000000..ebf942ff --- /dev/null +++ b/subprojects/pybind11.wrap @@ -0,0 +1,6 @@ +[wrap-git] +url = https://github.com/tomba/pybind11.git +revision = smart_holder + +[provide] +pybind11 = pybind11_dep From patchwork Tue Apr 26 11:02:34 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Tomi Valkeinen X-Patchwork-Id: 15725 Return-Path: X-Original-To: parsemail@patchwork.libcamera.org Delivered-To: parsemail@patchwork.libcamera.org Received: from lancelot.ideasonboard.com (lancelot.ideasonboard.com [92.243.16.209]) by patchwork.libcamera.org (Postfix) with ESMTPS id 3E424C0F1B for ; Tue, 26 Apr 2022 11:03:01 +0000 (UTC) Received: from lancelot.ideasonboard.com (localhost [IPv6:::1]) by lancelot.ideasonboard.com (Postfix) with ESMTP id E659765652; Tue, 26 Apr 2022 13:03:00 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=libcamera.org; s=mail; t=1650970980; bh=/VqVsKBzSbqC1rHnQfDB2zYydYczz8o2MkjbgtTY1Yg=; h=To:Date:In-Reply-To:References:Subject:List-Id:List-Unsubscribe: List-Archive:List-Post:List-Help:List-Subscribe:From:Reply-To: From; b=0nZ4LnVD3+5qQEIublHQ8qeBlOpR9S1sKFn9TikmhC/c4MoeTH5qfXgJWMm+Xg1jz 7+j+NsJa3D0mjVqGEr7bk+2+yiXIJWmVqqhwB57an3zsxsJOpbKZg0taG//NX86BzN 380I/pjNYjxVlURmrgFKAETC5OdstinqqGW7Ks9JzFh8m1VXbI/Rf8+oSDpDWK1HuU u/Ec8Xx6wQsrznuBu1hfU7lV5Zz+RNpzb6FHBv7ZCeOfscL4FsrqTbJjsbTeHXqEIl JCxCBzgdsG30zIR52YRdqyCeKfWCYO/e7dCnnH7hfiQTgnUqmYr9QmXvJixP+9bIYG O0dc65iR0fJOQ== Received: from perceval.ideasonboard.com (perceval.ideasonboard.com [213.167.242.64]) by lancelot.ideasonboard.com (Postfix) with ESMTPS id 1AA9960431 for ; Tue, 26 Apr 2022 13:02:56 +0200 (CEST) Authentication-Results: lancelot.ideasonboard.com; dkim=pass (1024-bit key; unprotected) header.d=ideasonboard.com header.i=@ideasonboard.com header.b="ux9HApqR"; dkim-atps=neutral Received: from deskari.lan (91-156-85-209.elisa-laajakaista.fi [91.156.85.209]) by perceval.ideasonboard.com (Postfix) with ESMTPSA id 1D2C149C; Tue, 26 Apr 2022 13:02:55 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=ideasonboard.com; s=mail; t=1650970975; bh=/VqVsKBzSbqC1rHnQfDB2zYydYczz8o2MkjbgtTY1Yg=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=ux9HApqRybW++WG/k8lZeuGIwwMHTmtw030Q4bdCMRboFNK+JRvYaPEZM4O31nbLv 1zszp8xpkoiO7agAFgttO6Jto54tRBZycfpl2GcfdL62+NOcKqz3hTfLaZxFk9CwX0 m1uZBxFzv1dRR3V0CbWMUunET1xy2cniRZZvFNdk= To: libcamera-devel@lists.libcamera.org, David Plowman , Kieran Bingham , Laurent Pinchart , Jacopo Mondi Date: Tue, 26 Apr 2022 14:02:34 +0300 Message-Id: <20220426110236.104511-3-tomi.valkeinen@ideasonboard.com> X-Mailer: git-send-email 2.34.1 In-Reply-To: <20220426110236.104511-1-tomi.valkeinen@ideasonboard.com> References: <20220426110236.104511-1-tomi.valkeinen@ideasonboard.com> MIME-Version: 1.0 Subject: [libcamera-devel] [PATCH v6 2/4] py: generate control enums from yaml X-BeenThere: libcamera-devel@lists.libcamera.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-Patchwork-Original-From: Tomi Valkeinen via libcamera-devel From: Tomi Valkeinen Reply-To: Tomi Valkeinen Errors-To: libcamera-devel-bounces@lists.libcamera.org Sender: "libcamera-devel" Generate enums for controls from control_ids.yaml. The generator script has some heuristics to generate nicer enum names. E.g. instead of having "LensShadingMapMode.LensShadingMapModeOff" we get "LensShadingMapMode.Off". This heuristics may need to be updated when the yaml file is changed or new controls are added. Signed-off-by: Tomi Valkeinen --- src/py/libcamera/gen-py-control-enums.py | 95 +++++++++++++++++++++++ src/py/libcamera/meson.build | 9 +++ src/py/libcamera/pyenums_generated.cpp.in | 21 +++++ src/py/libcamera/pymain.cpp | 2 + 4 files changed, 127 insertions(+) create mode 100755 src/py/libcamera/gen-py-control-enums.py create mode 100644 src/py/libcamera/pyenums_generated.cpp.in diff --git a/src/py/libcamera/gen-py-control-enums.py b/src/py/libcamera/gen-py-control-enums.py new file mode 100755 index 00000000..f1b18389 --- /dev/null +++ b/src/py/libcamera/gen-py-control-enums.py @@ -0,0 +1,95 @@ +#!/usr/bin/env python3 +# SPDX-License-Identifier: GPL-2.0-or-later +# +# Generate Python bindings enums for controls from YAML + +import argparse +import string +import sys +import yaml + + +def find_common_prefix(strings): + prefix = strings[0] + + for string in strings[1:]: + while string[:len(prefix)] != prefix and prefix: + prefix = prefix[:len(prefix) - 1] + if not prefix: + break + + return prefix + + +def generate_py(controls): + out = "" + + for ctrl in controls: + name, ctrl = ctrl.popitem() + + enum = ctrl.get('enum') + if not enum: + continue + + if ctrl.get('draft'): + ns = "libcamera::controls::draft::" + else: + ns = "libcamera::controls::" + + cpp_enum = name + "Enum" + + out += "\tpy::enum_<{}{}>(m, \"{}\")\n".format(ns, cpp_enum, name) + + if name == "LensShadingMapMode": + prefix = "LensShadingMapMode" + else: + prefix = find_common_prefix([e["name"] for e in enum]) + + for entry in enum: + cpp_enum = entry["name"] + py_enum = entry["name"][len(prefix):] + + out += "\t\t.value(\"{}\", {}{})\n".format(py_enum, ns, cpp_enum) + + out += "\t;\n" + + return {"enums": out} + + +def fill_template(template, data): + template = open(template, 'rb').read() + template = template.decode('utf-8') + template = string.Template(template) + return template.substitute(data) + + +def main(argv): + # Parse command line arguments + parser = argparse.ArgumentParser() + parser.add_argument('-o', dest='output', metavar='file', type=str, + help='Output file name. Defaults to standard output if not specified.') + parser.add_argument('input', type=str, + help='Input file name.') + parser.add_argument('template', type=str, + help='Template file name.') + args = parser.parse_args(argv[1:]) + + data = open(args.input, 'rb').read() + controls = yaml.safe_load(data)['controls'] + + data = generate_py(controls) + + data = fill_template(args.template, data) + + if args.output: + output = open(args.output, 'wb') + output.write(data.encode('utf-8')) + output.close() + else: + sys.stdout.write(data) + + return 0 + + +if __name__ == '__main__': + sys.exit(main(sys.argv)) diff --git a/src/py/libcamera/meson.build b/src/py/libcamera/meson.build index 82388efb..fe15a675 100644 --- a/src/py/libcamera/meson.build +++ b/src/py/libcamera/meson.build @@ -17,6 +17,15 @@ pycamera_sources = files([ 'pyenums.cpp', ]) +gen_input_files = [meson.source_root() / 'src/libcamera/control_ids.yaml', 'pyenums_generated.cpp.in'] + +generated_sources = custom_target('py_gen_controls', + input : gen_input_files, + output : ['pyenums_generated.cpp'], + command : ['gen-py-control-enums.py', '-o', '@OUTPUT@', '@INPUT@']) + +pycamera_sources += generated_sources + pycamera_deps = [ libcamera_public, py3_dep, diff --git a/src/py/libcamera/pyenums_generated.cpp.in b/src/py/libcamera/pyenums_generated.cpp.in new file mode 100644 index 00000000..96daf257 --- /dev/null +++ b/src/py/libcamera/pyenums_generated.cpp.in @@ -0,0 +1,21 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2021, Tomi Valkeinen + * + * Python bindings + * + * This file is auto-generated. Do not edit. + */ + +#include + +#include + +namespace py = pybind11; + +using namespace libcamera; + +void init_pyenums_generated(py::module& m) +{ +${enums} +} diff --git a/src/py/libcamera/pymain.cpp b/src/py/libcamera/pymain.cpp index 54674caf..81d48a20 100644 --- a/src/py/libcamera/pymain.cpp +++ b/src/py/libcamera/pymain.cpp @@ -115,10 +115,12 @@ static void handleRequestCompleted(Request *req) } void init_pyenums(py::module &m); +void init_pyenums_generated(py::module &m); PYBIND11_MODULE(_libcamera, m) { init_pyenums(m); + init_pyenums_generated(m); /* Forward declarations */ From patchwork Tue Apr 26 11:02:35 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Tomi Valkeinen X-Patchwork-Id: 15726 Return-Path: X-Original-To: parsemail@patchwork.libcamera.org Delivered-To: parsemail@patchwork.libcamera.org Received: from lancelot.ideasonboard.com (lancelot.ideasonboard.com [92.243.16.209]) by patchwork.libcamera.org (Postfix) with ESMTPS id 27FCCC3260 for ; Tue, 26 Apr 2022 11:03:02 +0000 (UTC) Received: from lancelot.ideasonboard.com (localhost [IPv6:::1]) by lancelot.ideasonboard.com (Postfix) with ESMTP id B8AF86564C; Tue, 26 Apr 2022 13:03:01 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=libcamera.org; s=mail; t=1650970981; bh=6fGlgT3t5CFsES2Trl1Zrbr8KRsAnZyunUfu+6cc3PA=; h=To:Date:In-Reply-To:References:Subject:List-Id:List-Unsubscribe: List-Archive:List-Post:List-Help:List-Subscribe:From:Reply-To: From; b=0xJmEFBwp2/u4SF3Et+Y6jNqxUa/KdS0jVoU7VbMUXUzpHNUBvg4j+Ub6EqVRAgZG ZTTaZOXfBSYl4XX8ApyOmN8e0uwgts1TLpg+66SxyUOOGN7isONSMklDlXuQ5TtKcJ W14DbeKyxJk9tvyPiLCYWeCKGHBcit7ChAgCfZMus8Ir6GWiFRu1wYbFbaYrhLeE4k MNNp7DYkuoLYjQfvUoRqienGqkH5kuSIExqv5VIkFrOcRKuRiJ8QlBSgCqwwbc7OHC ppNi5nY5kSG89zg4d/8+CQpV6K6KtdoDHl+rfGIuVOR3W+IChQtUlgymEU4k4pKVAf 6phNsfJksghWg== Received: from perceval.ideasonboard.com (perceval.ideasonboard.com [213.167.242.64]) by lancelot.ideasonboard.com (Postfix) with ESMTPS id F31BA6563D for ; Tue, 26 Apr 2022 13:02:56 +0200 (CEST) Authentication-Results: lancelot.ideasonboard.com; dkim=pass (1024-bit key; unprotected) header.d=ideasonboard.com header.i=@ideasonboard.com header.b="ZSntBn6I"; dkim-atps=neutral Received: from deskari.lan (91-156-85-209.elisa-laajakaista.fi [91.156.85.209]) by perceval.ideasonboard.com (Postfix) with ESMTPSA id 103E2488; Tue, 26 Apr 2022 13:02:56 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=ideasonboard.com; s=mail; t=1650970976; bh=6fGlgT3t5CFsES2Trl1Zrbr8KRsAnZyunUfu+6cc3PA=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=ZSntBn6IqKQBvB2A7XFnFbE94P0/XuBuRQnTuYoju5FyqFPHHtFR4tMaN5xljbqQG jGpFje/5pttkN760KiBxePNZGrg2OWtnSGg7TLo0lBcnLlkNEmNKPZs5rsr9AWqu0G SWhcJDlv2PPSE/hXK2z0pv1bEYUF2VUVlQpbtPnA= To: libcamera-devel@lists.libcamera.org, David Plowman , Kieran Bingham , Laurent Pinchart , Jacopo Mondi Date: Tue, 26 Apr 2022 14:02:35 +0300 Message-Id: <20220426110236.104511-4-tomi.valkeinen@ideasonboard.com> X-Mailer: git-send-email 2.34.1 In-Reply-To: <20220426110236.104511-1-tomi.valkeinen@ideasonboard.com> References: <20220426110236.104511-1-tomi.valkeinen@ideasonboard.com> MIME-Version: 1.0 Subject: [libcamera-devel] [PATCH v6 3/4] py: add unittests.py X-BeenThere: libcamera-devel@lists.libcamera.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-Patchwork-Original-From: Tomi Valkeinen via libcamera-devel From: Tomi Valkeinen Reply-To: Tomi Valkeinen Errors-To: libcamera-devel-bounces@lists.libcamera.org Sender: "libcamera-devel" Add a simple unittests.py as a base for python unittests. Signed-off-by: Tomi Valkeinen --- test/meson.build | 1 + test/py/meson.build | 17 ++ test/py/unittests.py | 368 +++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 386 insertions(+) create mode 100644 test/py/meson.build create mode 100755 test/py/unittests.py diff --git a/test/meson.build b/test/meson.build index fd4c5ca0..623f3baa 100644 --- a/test/meson.build +++ b/test/meson.build @@ -18,6 +18,7 @@ subdir('log') subdir('media_device') subdir('pipeline') subdir('process') +subdir('py') subdir('serialization') subdir('stream') subdir('v4l2_compat') diff --git a/test/py/meson.build b/test/py/meson.build new file mode 100644 index 00000000..8f0a38c6 --- /dev/null +++ b/test/py/meson.build @@ -0,0 +1,17 @@ +# SPDX-License-Identifier: CC0-1.0 + +if not pycamera_enabled + subdir_done() +endif + +pymod = import('python') +py3 = pymod.find_installation('python3') + +pypathdir = meson.build_root() / 'src/py' + +test('pyunittests', + py3, + args : files('unittests.py'), + env : ['PYTHONPATH=' + pypathdir], + suite : 'pybindings', + is_parallel : false) diff --git a/test/py/unittests.py b/test/py/unittests.py new file mode 100755 index 00000000..15d5b4a7 --- /dev/null +++ b/test/py/unittests.py @@ -0,0 +1,368 @@ +#!/usr/bin/env python3 + +# SPDX-License-Identifier: GPL-2.0-or-later +# Copyright (C) 2021, Tomi Valkeinen + +from collections import defaultdict +import errno +import gc +import libcamera as libcam +import os +import selectors +import time +import unittest +import weakref + + +class MyTestCase(unittest.TestCase): + def assertZero(self, a, msg=None): + self.assertEqual(a, 0, msg) + + +class SimpleTestMethods(MyTestCase): + def test_find_ref(self): + cm = libcam.CameraManager.singleton() + wr_cm = weakref.ref(cm) + + cam = cm.find("platform/vimc") + self.assertIsNotNone(cam) + wr_cam = weakref.ref(cam) + + cm = None + gc.collect() + self.assertIsNotNone(wr_cm()) + + cam = None + gc.collect() + self.assertIsNone(wr_cm()) + self.assertIsNone(wr_cam()) + + def test_get_ref(self): + cm = libcam.CameraManager.singleton() + wr_cm = weakref.ref(cm) + + cam = cm.get("platform/vimc.0 Sensor B") + self.assertTrue(cam is not None) + wr_cam = weakref.ref(cam) + + cm = None + gc.collect() + self.assertIsNotNone(wr_cm()) + + cam = None + gc.collect() + self.assertIsNone(wr_cm()) + self.assertIsNone(wr_cam()) + + def test_acquire_release(self): + cm = libcam.CameraManager.singleton() + cam = cm.get("platform/vimc.0 Sensor B") + self.assertTrue(cam is not None) + + ret = cam.acquire() + self.assertZero(ret) + + ret = cam.release() + self.assertZero(ret) + + def test_double_acquire(self): + cm = libcam.CameraManager.singleton() + cam = cm.get("platform/vimc.0 Sensor B") + self.assertTrue(cam is not None) + + ret = cam.acquire() + self.assertZero(ret) + + libcam.logSetLevel("Camera", "FATAL") + ret = cam.acquire() + self.assertEqual(ret, -errno.EBUSY) + libcam.logSetLevel("Camera", "ERROR") + + ret = cam.release() + self.assertZero(ret) + + ret = cam.release() + # I expected EBUSY, but looks like double release works fine + self.assertZero(ret) + + +class CameraTesterBase(MyTestCase): + def setUp(self): + self.cm = libcam.CameraManager.singleton() + self.cam = self.cm.find("platform/vimc") + if self.cam is None: + self.cm = None + raise Exception("No vimc found") + + ret = self.cam.acquire() + if ret != 0: + self.cam = None + self.cm = None + raise Exception("Failed to acquire camera") + + def tearDown(self): + # If a test fails, the camera may be in running state. So always stop. + self.cam.stop() + + ret = self.cam.release() + if ret != 0: + raise Exception("Failed to release camera") + + self.cam = None + self.cm = None + + +class AllocatorTestMethods(CameraTesterBase): + def test_allocator(self): + cam = self.cam + + camconfig = cam.generateConfiguration([libcam.StreamRole.StillCapture]) + self.assertTrue(camconfig.size == 1) + wr_camconfig = weakref.ref(camconfig) + + streamconfig = camconfig.at(0) + wr_streamconfig = weakref.ref(streamconfig) + + ret = cam.configure(camconfig) + self.assertZero(ret) + + stream = streamconfig.stream + wr_stream = weakref.ref(stream) + + # stream should keep streamconfig and camconfig alive + streamconfig = None + camconfig = None + gc.collect() + self.assertIsNotNone(wr_camconfig()) + self.assertIsNotNone(wr_streamconfig()) + + allocator = libcam.FrameBufferAllocator(cam) + ret = allocator.allocate(stream) + self.assertTrue(ret > 0) + wr_allocator = weakref.ref(allocator) + + buffers = allocator.buffers(stream) + buffers = None + + buffer = allocator.buffers(stream)[0] + self.assertIsNotNone(buffer) + wr_buffer = weakref.ref(buffer) + + allocator = None + gc.collect() + self.assertIsNotNone(wr_buffer()) + self.assertIsNotNone(wr_allocator()) + self.assertIsNotNone(wr_stream()) + + buffer = None + gc.collect() + self.assertIsNone(wr_buffer()) + self.assertIsNone(wr_allocator()) + self.assertIsNotNone(wr_stream()) + + stream = None + gc.collect() + self.assertIsNone(wr_stream()) + self.assertIsNone(wr_camconfig()) + self.assertIsNone(wr_streamconfig()) + + +class SimpleCaptureMethods(CameraTesterBase): + def test_sleep(self): + cm = self.cm + cam = self.cam + + camconfig = cam.generateConfiguration([libcam.StreamRole.StillCapture]) + self.assertTrue(camconfig.size == 1) + + streamconfig = camconfig.at(0) + fmts = streamconfig.formats + + ret = cam.configure(camconfig) + self.assertZero(ret) + + stream = streamconfig.stream + + allocator = libcam.FrameBufferAllocator(cam) + ret = allocator.allocate(stream) + self.assertTrue(ret > 0) + + num_bufs = len(allocator.buffers(stream)) + + reqs = [] + for i in range(num_bufs): + req = cam.createRequest(i) + self.assertIsNotNone(req) + + buffer = allocator.buffers(stream)[i] + ret = req.addBuffer(stream, buffer) + self.assertZero(ret) + + reqs.append(req) + + buffer = None + + ret = cam.start() + self.assertZero(ret) + + for req in reqs: + ret = cam.queueRequest(req) + self.assertZero(ret) + + reqs = None + gc.collect() + + time.sleep(0.5) + + reqs = cm.getReadyRequests() + + self.assertTrue(len(reqs) == num_bufs) + + for i, req in enumerate(reqs): + self.assertTrue(i == req.cookie) + + reqs = None + gc.collect() + + ret = cam.stop() + self.assertZero(ret) + + def test_select(self): + cm = self.cm + cam = self.cam + + camconfig = cam.generateConfiguration([libcam.StreamRole.StillCapture]) + self.assertTrue(camconfig.size == 1) + + streamconfig = camconfig.at(0) + fmts = streamconfig.formats + + ret = cam.configure(camconfig) + self.assertZero(ret) + + stream = streamconfig.stream + + allocator = libcam.FrameBufferAllocator(cam) + ret = allocator.allocate(stream) + self.assertTrue(ret > 0) + + num_bufs = len(allocator.buffers(stream)) + + reqs = [] + for i in range(num_bufs): + req = cam.createRequest(i) + self.assertIsNotNone(req) + + buffer = allocator.buffers(stream)[i] + ret = req.addBuffer(stream, buffer) + self.assertZero(ret) + + reqs.append(req) + + buffer = None + + ret = cam.start() + self.assertZero(ret) + + for req in reqs: + ret = cam.queueRequest(req) + self.assertZero(ret) + + reqs = None + gc.collect() + + sel = selectors.DefaultSelector() + sel.register(cm.efd, selectors.EVENT_READ, 123) + + reqs = [] + + running = True + while running: + events = sel.select() + for key, mask in events: + os.read(key.fileobj, 8) + + ready_reqs = cm.getReadyRequests() + + self.assertTrue(len(ready_reqs) > 0) + + reqs += ready_reqs + + if len(reqs) == num_bufs: + running = False + + self.assertTrue(len(reqs) == num_bufs) + + for i, req in enumerate(reqs): + self.assertTrue(i == req.cookie) + + reqs = None + gc.collect() + + ret = cam.stop() + self.assertZero(ret) + + +# Recursively expand slist's objects into olist, using seen to track already +# processed objects. +def _getr(slist, olist, seen): + for e in slist: + if id(e) in seen: + continue + seen.add(id(e)) + olist.append(e) + tl = gc.get_referents(e) + if tl: + _getr(tl, olist, seen) + + +def get_all_objects(ignored=[]): + gcl = gc.get_objects() + olist = [] + seen = set() + + seen.add(id(gcl)) + seen.add(id(olist)) + seen.add(id(seen)) + seen.update(set([id(o) for o in ignored])) + + _getr(gcl, olist, seen) + + return olist + + +def create_type_count_map(olist): + map = defaultdict(int) + for o in olist: + map[type(o)] += 1 + return map + + +def diff_type_count_maps(before, after): + return [(k, after[k] - before[k]) for k in after if after[k] != before[k]] + + +if __name__ == '__main__': + # doesn't work very well, as things always leak a bit + test_leaks = False + + if test_leaks: + gc.unfreeze() + gc.collect() + + obs_before = get_all_objects() + + unittest.main(exit=False) + + if test_leaks: + gc.unfreeze() + gc.collect() + + obs_after = get_all_objects([obs_before]) + + before = create_type_count_map(obs_before) + after = create_type_count_map(obs_after) + + leaks = diff_type_count_maps(before, after) + if len(leaks) > 0: + print(leaks) From patchwork Tue Apr 26 11:02:36 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Tomi Valkeinen X-Patchwork-Id: 15727 Return-Path: X-Original-To: parsemail@patchwork.libcamera.org Delivered-To: parsemail@patchwork.libcamera.org Received: from lancelot.ideasonboard.com (lancelot.ideasonboard.com [92.243.16.209]) by patchwork.libcamera.org (Postfix) with ESMTPS id CE978C326C for ; Tue, 26 Apr 2022 11:03:02 +0000 (UTC) Received: from lancelot.ideasonboard.com (localhost [IPv6:::1]) by lancelot.ideasonboard.com (Postfix) with ESMTP id 5FA7B6564E; Tue, 26 Apr 2022 13:03:02 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=libcamera.org; s=mail; t=1650970982; bh=u0GWr0XFBD34MHNnGfk2T5yscNS0W1+lRomnDScD0js=; h=To:Date:In-Reply-To:References:Subject:List-Id:List-Unsubscribe: List-Archive:List-Post:List-Help:List-Subscribe:From:Reply-To: From; b=FvuD+00RLjC9rW0HDV+L+cKzMGiVcpWZsEAYO76cfsi0vYkTJkRHu06gwrPA+htNj ddGiNbRC8QJdduXQRXNPa8RTZLh18lIL0Ebzx4ahFHLyj/gyfUh5WHl95cU1XE/Q49 8VfpSnWqmE9wsnoKh2OLv1JuExZ4gk3nGLDzOVz/zXmVCwty6z+06VsehemaG7sbdp bGSp7vq8cM9DeMcSNd5IHhD2suzvsSBbiYjPRxKnkyC9Uf92PE/vMoy0JyJerGqB6P +mP9rnm+olWk7c17OfNJT/OXP33osDIdtVZt7HN5n3yrlYLL8w0kKRJ+b7ASgwyrtS kfoejdaxRZRBA== Received: from perceval.ideasonboard.com (perceval.ideasonboard.com [213.167.242.64]) by lancelot.ideasonboard.com (Postfix) with ESMTPS id 659B1604A9 for ; Tue, 26 Apr 2022 13:02:57 +0200 (CEST) Authentication-Results: lancelot.ideasonboard.com; dkim=pass (1024-bit key; unprotected) header.d=ideasonboard.com header.i=@ideasonboard.com header.b="C1UM9t8O"; dkim-atps=neutral Received: from deskari.lan (91-156-85-209.elisa-laajakaista.fi [91.156.85.209]) by perceval.ideasonboard.com (Postfix) with ESMTPSA id AD0E7ABC; Tue, 26 Apr 2022 13:02:56 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=ideasonboard.com; s=mail; t=1650970977; bh=u0GWr0XFBD34MHNnGfk2T5yscNS0W1+lRomnDScD0js=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=C1UM9t8OPrqGjEQ0OmZZOB07Ss6wZ1rc4uCe9vBZ6FUi6j8yQzStb6xwo+SNkABOs 0B7k14jDUBy68+AW6hLo+6uab72bElGkfqNJpNNS8iiJnswFrK1S7blOUQFLmAQ23+ PWNBNtUUa9+FpgWldP77XR8iG5YUBDUOLTEJdP2A= To: libcamera-devel@lists.libcamera.org, David Plowman , Kieran Bingham , Laurent Pinchart , Jacopo Mondi Date: Tue, 26 Apr 2022 14:02:36 +0300 Message-Id: <20220426110236.104511-5-tomi.valkeinen@ideasonboard.com> X-Mailer: git-send-email 2.34.1 In-Reply-To: <20220426110236.104511-1-tomi.valkeinen@ideasonboard.com> References: <20220426110236.104511-1-tomi.valkeinen@ideasonboard.com> MIME-Version: 1.0 Subject: [libcamera-devel] [PATCH v6 4/4] py: Add cam.py X-BeenThere: libcamera-devel@lists.libcamera.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-Patchwork-Original-From: Tomi Valkeinen via libcamera-devel From: Tomi Valkeinen Reply-To: Tomi Valkeinen Errors-To: libcamera-devel-bounces@lists.libcamera.org Sender: "libcamera-devel" Add cam.py, which mimics the 'cam' tool. Four rendering backends are added: * null - Do nothing * kms - Use KMS with dmabufs * qt - SW render on a Qt window * qtgl - OpenGL render on a Qt window All the renderers handle only a few pixel formats, and especially the GL renderer is just a prototype. Signed-off-by: Tomi Valkeinen --- src/py/cam/cam.py | 483 +++++++++++++++++++++++++++++++++++++++ src/py/cam/cam_kms.py | 183 +++++++++++++++ src/py/cam/cam_null.py | 47 ++++ src/py/cam/cam_qt.py | 354 ++++++++++++++++++++++++++++ src/py/cam/cam_qtgl.py | 385 +++++++++++++++++++++++++++++++ src/py/cam/gl_helpers.py | 74 ++++++ 6 files changed, 1526 insertions(+) create mode 100755 src/py/cam/cam.py create mode 100644 src/py/cam/cam_kms.py create mode 100644 src/py/cam/cam_null.py create mode 100644 src/py/cam/cam_qt.py create mode 100644 src/py/cam/cam_qtgl.py create mode 100644 src/py/cam/gl_helpers.py diff --git a/src/py/cam/cam.py b/src/py/cam/cam.py new file mode 100755 index 00000000..4efa6459 --- /dev/null +++ b/src/py/cam/cam.py @@ -0,0 +1,483 @@ +#!/usr/bin/env python3 + +# SPDX-License-Identifier: GPL-2.0-or-later +# Copyright (C) 2021, Tomi Valkeinen + +import argparse +import binascii +import libcamera as libcam +import os +import sys + + +class CustomCameraAction(argparse.Action): + def __call__(self, parser, namespace, values, option_string=None): + print(self.dest, values) + + if "camera" not in namespace or namespace.camera is None: + setattr(namespace, "camera", []) + + previous = namespace.camera + previous.append((self.dest, values)) + setattr(namespace, "camera", previous) + + +class CustomAction(argparse.Action): + def __init__(self, option_strings, dest, **kwargs): + super().__init__(option_strings, dest, default={}, **kwargs) + + def __call__(self, parser, namespace, values, option_string=None): + if len(namespace.camera) == 0: + print(f"Option {option_string} requires a --camera context") + sys.exit(-1) + + if self.type == bool: + values = True + + current = namespace.camera[-1] + + data = getattr(namespace, self.dest) + + if self.nargs == "+": + if current not in data: + data[current] = [] + + data[current] += values + else: + data[current] = values + + +def do_cmd_list(cm): + print("Available cameras:") + + for idx, c in enumerate(cm.cameras): + print(f"{idx + 1}: {c.id}") + + +def do_cmd_list_props(ctx): + camera = ctx["camera"] + + print("Properties for", ctx["id"]) + + for name, prop in camera.properties.items(): + print("\t{}: {}".format(name, prop)) + + +def do_cmd_list_controls(ctx): + camera = ctx["camera"] + + print("Controls for", ctx["id"]) + + for name, prop in camera.controls.items(): + print("\t{}: {}".format(name, prop)) + + +def do_cmd_info(ctx): + camera = ctx["camera"] + + print("Stream info for", ctx["id"]) + + roles = [libcam.StreamRole.Viewfinder] + + camconfig = camera.generateConfiguration(roles) + if camconfig is None: + raise Exception("Generating config failed") + + for i, stream_config in enumerate(camconfig): + print("\t{}: {}".format(i, stream_config.toString())) + + formats = stream_config.formats + for fmt in formats.pixelFormats: + print("\t * Pixelformat:", fmt, formats.range(fmt)) + + for size in formats.sizes(fmt): + print("\t -", size) + + +def acquire(ctx): + camera = ctx["camera"] + + camera.acquire() + + +def release(ctx): + camera = ctx["camera"] + + camera.release() + + +def parse_streams(ctx): + streams = [] + + for stream_desc in ctx["opt-stream"]: + stream_opts = {"role": libcam.StreamRole.Viewfinder} + + for stream_opt in stream_desc.split(","): + if stream_opt == 0: + continue + + arr = stream_opt.split("=") + if len(arr) != 2: + print("Bad stream option", stream_opt) + sys.exit(-1) + + key = arr[0] + value = arr[1] + + if key in ["width", "height"]: + value = int(value) + elif key == "role": + rolemap = { + "still": libcam.StreamRole.StillCapture, + "raw": libcam.StreamRole.Raw, + "video": libcam.StreamRole.VideoRecording, + "viewfinder": libcam.StreamRole.Viewfinder, + } + + role = rolemap.get(value.lower(), None) + + if role is None: + print("Bad stream role", value) + sys.exit(-1) + + value = role + elif key == "pixelformat": + pass + else: + print("Bad stream option key", key) + sys.exit(-1) + + stream_opts[key] = value + + streams.append(stream_opts) + + return streams + + +def configure(ctx): + camera = ctx["camera"] + + streams = parse_streams(ctx) + + roles = [opts["role"] for opts in streams] + + camconfig = camera.generateConfiguration(roles) + if camconfig is None: + raise Exception("Generating config failed") + + for idx, stream_opts in enumerate(streams): + stream_config = camconfig.at(idx) + + if "width" in stream_opts and "height" in stream_opts: + stream_config.size = (stream_opts["width"], stream_opts["height"]) + + if "pixelformat" in stream_opts: + stream_config.pixelFormat = stream_opts["pixelformat"] + + stat = camconfig.validate() + + if stat == libcam.ConfigurationStatus.Invalid: + print("Camera configuration invalid") + exit(-1) + elif stat == libcam.ConfigurationStatus.Adjusted: + if ctx["opt-strict-formats"]: + print("Adjusting camera configuration disallowed by --strict-formats argument") + exit(-1) + + print("Camera configuration adjusted") + + r = camera.configure(camconfig) + if r != 0: + raise Exception("Configure failed") + + ctx["stream-names"] = {} + ctx["streams"] = [] + + for idx, stream_config in enumerate(camconfig): + stream = stream_config.stream + ctx["streams"].append(stream) + ctx["stream-names"][stream] = "stream" + str(idx) + print("{}-{}: stream config {}".format(ctx["id"], ctx["stream-names"][stream], stream.configuration.toString())) + + +def alloc_buffers(ctx): + camera = ctx["camera"] + + allocator = libcam.FrameBufferAllocator(camera) + + for idx, stream in enumerate(ctx["streams"]): + ret = allocator.allocate(stream) + if ret < 0: + print("Can't allocate buffers") + exit(-1) + + allocated = len(allocator.buffers(stream)) + + print("{}-{}: Allocated {} buffers".format(ctx["id"], ctx["stream-names"][stream], allocated)) + + ctx["allocator"] = allocator + + +def create_requests(ctx): + camera = ctx["camera"] + + ctx["requests"] = [] + + # Identify the stream with the least number of buffers + num_bufs = min([len(ctx["allocator"].buffers(stream)) for stream in ctx["streams"]]) + + requests = [] + + for buf_num in range(num_bufs): + request = camera.createRequest(ctx["idx"]) + + if request is None: + print("Can't create request") + exit(-1) + + for stream in ctx["streams"]: + buffers = ctx["allocator"].buffers(stream) + buffer = buffers[buf_num] + + ret = request.addBuffer(stream, buffer) + if ret < 0: + print("Can't set buffer for request") + exit(-1) + + requests.append(request) + + ctx["requests"] = requests + + +def start(ctx): + camera = ctx["camera"] + + camera.start() + + +def stop(ctx): + camera = ctx["camera"] + + camera.stop() + + +def queue_requests(ctx): + camera = ctx["camera"] + + for request in ctx["requests"]: + camera.queueRequest(request) + ctx["reqs-queued"] += 1 + + del ctx["requests"] + + +def capture_init(contexts): + for ctx in contexts: + acquire(ctx) + + for ctx in contexts: + configure(ctx) + + for ctx in contexts: + alloc_buffers(ctx) + + for ctx in contexts: + create_requests(ctx) + + +def capture_start(contexts): + for ctx in contexts: + start(ctx) + + for ctx in contexts: + queue_requests(ctx) + + +# Called from renderer when there is a libcamera event +def event_handler(state): + cm = state["cm"] + contexts = state["contexts"] + + os.read(cm.efd, 8) + + reqs = cm.getReadyRequests() + + for req in reqs: + ctx = next(ctx for ctx in contexts if ctx["idx"] == req.cookie) + request_handler(state, ctx, req) + + running = any(ctx["reqs-completed"] < ctx["opt-capture"] for ctx in contexts) + return running + + +def request_handler(state, ctx, req): + if req.status != libcam.RequestStatus.Complete: + raise Exception("{}: Request failed: {}".format(ctx["id"], req.status)) + + buffers = req.buffers + + # Compute the frame rate. The timestamp is arbitrarily retrieved from + # the first buffer, as all buffers should have matching timestamps. + ts = buffers[next(iter(buffers))].metadata.timestamp + last = ctx.get("last", 0) + fps = 1000000000.0 / (ts - last) if (last != 0 and (ts - last) != 0) else 0 + ctx["last"] = ts + ctx["fps"] = fps + + for stream, fb in buffers.items(): + stream_name = ctx["stream-names"][stream] + + crcs = [] + if ctx["opt-crc"]: + with fb.mmap(0) as b: + crc = binascii.crc32(b) + crcs.append(crc) + + meta = fb.metadata + + print("{:.6f} ({:.2f} fps) {}-{}: seq {}, bytes {}, CRCs {}" + .format(ts / 1000000000, fps, + ctx["id"], stream_name, + meta.sequence, meta.bytesused, + crcs)) + + if ctx["opt-metadata"]: + reqmeta = req.metadata + for ctrl, val in reqmeta.items(): + print(f"\t{ctrl} = {val}") + + if ctx["opt-save-frames"]: + with fb.mmap(0) as b: + filename = "frame-{}-{}-{}.data".format(ctx["id"], stream_name, ctx["reqs-completed"]) + with open(filename, "wb") as f: + f.write(b) + + state["renderer"].request_handler(ctx, req) + + ctx["reqs-completed"] += 1 + + +# Called from renderer when it has finished with a request +def request_prcessed(ctx, req): + camera = ctx["camera"] + + if ctx["reqs-queued"] < ctx["opt-capture"]: + req.reuse() + camera.queueRequest(req) + ctx["reqs-queued"] += 1 + + +def capture_deinit(contexts): + for ctx in contexts: + stop(ctx) + + for ctx in contexts: + release(ctx) + + +def do_cmd_capture(state): + capture_init(state["contexts"]) + + renderer = state["renderer"] + + renderer.setup() + + capture_start(state["contexts"]) + + renderer.run() + + capture_deinit(state["contexts"]) + + +def main(): + parser = argparse.ArgumentParser() + # global options + parser.add_argument("-l", "--list", action="store_true", help="List all cameras") + parser.add_argument("-c", "--camera", type=int, action="extend", nargs=1, default=[], help="Specify which camera to operate on, by index") + parser.add_argument("-p", "--list-properties", action="store_true", help="List cameras properties") + parser.add_argument("--list-controls", action="store_true", help="List cameras controls") + parser.add_argument("-I", "--info", action="store_true", help="Display information about stream(s)") + parser.add_argument("-R", "--renderer", default="null", help="Renderer (null, kms, qt, qtgl)") + + # per camera options + parser.add_argument("-C", "--capture", nargs="?", type=int, const=1000000, action=CustomAction, help="Capture until interrupted by user or until CAPTURE frames captured") + parser.add_argument("--crc", nargs=0, type=bool, action=CustomAction, help="Print CRC32 for captured frames") + parser.add_argument("--save-frames", nargs=0, type=bool, action=CustomAction, help="Save captured frames to files") + parser.add_argument("--metadata", nargs=0, type=bool, action=CustomAction, help="Print the metadata for completed requests") + parser.add_argument("--strict-formats", type=bool, nargs=0, action=CustomAction, help="Do not allow requested stream format(s) to be adjusted") + parser.add_argument("-s", "--stream", nargs="+", action=CustomAction) + args = parser.parse_args() + + cm = libcam.CameraManager.singleton() + + if args.list: + do_cmd_list(cm) + + contexts = [] + + for cam_idx in args.camera: + camera = next((c for i, c in enumerate(cm.cameras) if i + 1 == cam_idx), None) + + if camera is None: + print("Unable to find camera", cam_idx) + return -1 + + contexts.append({ + "camera": camera, + "idx": cam_idx, + "id": "cam" + str(cam_idx), + "reqs-queued": 0, + "reqs-completed": 0, + "opt-capture": args.capture.get(cam_idx, False), + "opt-crc": args.crc.get(cam_idx, False), + "opt-save-frames": args.save_frames.get(cam_idx, False), + "opt-metadata": args.metadata.get(cam_idx, False), + "opt-strict-formats": args.strict_formats.get(cam_idx, False), + "opt-stream": args.stream.get(cam_idx, ["role=viewfinder"]), + }) + + for ctx in contexts: + print("Using camera {} as {}".format(ctx["camera"].id, ctx["id"])) + + for ctx in contexts: + if args.list_properties: + do_cmd_list_props(ctx) + if args.list_controls: + do_cmd_list_controls(ctx) + if args.info: + do_cmd_info(ctx) + + if args.capture: + + state = { + "cm": cm, + "contexts": contexts, + "event_handler": event_handler, + "request_prcessed": request_prcessed, + } + + if args.renderer == "null": + import cam_null + renderer = cam_null.NullRenderer(state) + elif args.renderer == "kms": + import cam_kms + renderer = cam_kms.KMSRenderer(state) + elif args.renderer == "qt": + import cam_qt + renderer = cam_qt.QtRenderer(state) + elif args.renderer == "qtgl": + import cam_qtgl + renderer = cam_qtgl.QtRenderer(state) + else: + print("Bad renderer", args.renderer) + return -1 + + state["renderer"] = renderer + + do_cmd_capture(state) + + return 0 + + +if __name__ == "__main__": + sys.exit(main()) diff --git a/src/py/cam/cam_kms.py b/src/py/cam/cam_kms.py new file mode 100644 index 00000000..ee9fe6c7 --- /dev/null +++ b/src/py/cam/cam_kms.py @@ -0,0 +1,183 @@ +# SPDX-License-Identifier: GPL-2.0-or-later +# Copyright (C) 2021, Tomi Valkeinen + +import pykms +import selectors +import sys + +FMT_MAP = { + "RGB888": pykms.PixelFormat.RGB888, + "YUYV": pykms.PixelFormat.YUYV, + "ARGB8888": pykms.PixelFormat.ARGB8888, + "XRGB8888": pykms.PixelFormat.XRGB8888, +} + + +class KMSRenderer: + def __init__(self, state): + self.state = state + + self.cm = state["cm"] + self.contexts = state["contexts"] + self.running = False + + card = pykms.Card() + + res = pykms.ResourceManager(card) + conn = res.reserve_connector() + crtc = res.reserve_crtc(conn) + mode = conn.get_default_mode() + modeb = mode.to_blob(card) + + req = pykms.AtomicReq(card) + req.add_connector(conn, crtc) + req.add_crtc(crtc, modeb) + r = req.commit_sync(allow_modeset=True) + assert(r == 0) + + self.card = card + self.resman = res + self.crtc = crtc + self.mode = mode + + self.bufqueue = [] + self.current = None + self.next = None + self.cam_2_drm = {} + + # KMS + + def close(self): + req = pykms.AtomicReq(self.card) + for s in self.streams: + req.add_plane(s["plane"], None, None, dst=(0, 0, 0, 0)) + req.commit() + + def add_plane(self, req, stream, fb): + s = next(s for s in self.streams if s["stream"] == stream) + idx = s["idx"] + plane = s["plane"] + + if idx % 2 == 0: + x = 0 + else: + x = self.mode.hdisplay - fb.width + + if idx // 2 == 0: + y = 0 + else: + y = self.mode.vdisplay - fb.height + + req.add_plane(plane, fb, self.crtc, dst=(x, y, fb.width, fb.height)) + + def apply_request(self, drmreq): + + buffers = drmreq["camreq"].buffers + + for stream, fb in buffers.items(): + drmfb = self.cam_2_drm.get(fb, None) + + req = pykms.AtomicReq(self.card) + self.add_plane(req, stream, drmfb) + req.commit() + + def handle_page_flip(self, frame, time): + old = self.current + self.current = self.next + + if len(self.bufqueue) > 0: + self.next = self.bufqueue.pop(0) + else: + self.next = None + + if self.next: + drmreq = self.next + + self.apply_request(drmreq) + + if old: + req = old["camreq"] + ctx = old["camctx"] + self.state["request_prcessed"](ctx, req) + + def queue(self, drmreq): + if not self.next: + self.next = drmreq + self.apply_request(drmreq) + else: + self.bufqueue.append(drmreq) + + # libcamera + + def setup(self): + self.streams = [] + + idx = 0 + for ctx in self.contexts: + for stream in ctx["streams"]: + + cfg = stream.configuration + fmt = cfg.pixelFormat + fmt = FMT_MAP[fmt] + + plane = self.resman.reserve_generic_plane(self.crtc, fmt) + assert(plane is not None) + + self.streams.append({ + "idx": idx, + "stream": stream, + "plane": plane, + "fmt": fmt, + "size": cfg.size, + }) + + for fb in ctx["allocator"].buffers(stream): + w, h = cfg.size + stride = cfg.stride + fd = fb.fd(0) + drmfb = pykms.DmabufFramebuffer(self.card, w, h, fmt, + [fd], [stride], [0]) + self.cam_2_drm[fb] = drmfb + + idx += 1 + + def readdrm(self, fileobj): + for ev in self.card.read_events(): + if ev.type == pykms.DrmEventType.FLIP_COMPLETE: + self.handle_page_flip(ev.seq, ev.time) + + def readcam(self, fd): + self.running = self.state["event_handler"](self.state) + + def readkey(self, fileobj): + sys.stdin.readline() + self.running = False + + def run(self): + print("Capturing...") + + self.running = True + + sel = selectors.DefaultSelector() + sel.register(self.card.fd, selectors.EVENT_READ, self.readdrm) + sel.register(self.cm.efd, selectors.EVENT_READ, self.readcam) + sel.register(sys.stdin, selectors.EVENT_READ, self.readkey) + + print("Press enter to exit") + + while self.running: + events = sel.select() + for key, mask in events: + callback = key.data + callback(key.fileobj) + + print("Exiting...") + + def request_handler(self, ctx, req): + + drmreq = { + "camctx": ctx, + "camreq": req, + } + + self.queue(drmreq) diff --git a/src/py/cam/cam_null.py b/src/py/cam/cam_null.py new file mode 100644 index 00000000..f6e30835 --- /dev/null +++ b/src/py/cam/cam_null.py @@ -0,0 +1,47 @@ +# SPDX-License-Identifier: GPL-2.0-or-later +# Copyright (C) 2021, Tomi Valkeinen + +import selectors +import sys + + +class NullRenderer: + def __init__(self, state): + self.state = state + + self.cm = state["cm"] + self.contexts = state["contexts"] + + self.running = False + + def setup(self): + pass + + def run(self): + print("Capturing...") + + self.running = True + + sel = selectors.DefaultSelector() + sel.register(self.cm.efd, selectors.EVENT_READ, self.readcam) + sel.register(sys.stdin, selectors.EVENT_READ, self.readkey) + + print("Press enter to exit") + + while self.running: + events = sel.select() + for key, mask in events: + callback = key.data + callback(key.fileobj) + + print("Exiting...") + + def readcam(self, fd): + self.running = self.state["event_handler"](self.state) + + def readkey(self, fileobj): + sys.stdin.readline() + self.running = False + + def request_handler(self, ctx, req): + self.state["request_prcessed"](ctx, req) diff --git a/src/py/cam/cam_qt.py b/src/py/cam/cam_qt.py new file mode 100644 index 00000000..30fb7a1d --- /dev/null +++ b/src/py/cam/cam_qt.py @@ -0,0 +1,354 @@ +# SPDX-License-Identifier: GPL-2.0-or-later +# Copyright (C) 2021, Tomi Valkeinen +# +# Debayering code from PiCamera documentation + +from io import BytesIO +from numpy.lib.stride_tricks import as_strided +from PIL import Image +from PIL.ImageQt import ImageQt +from PyQt5 import QtCore, QtGui, QtWidgets +import numpy as np +import sys + + +def rgb_to_pix(rgb): + img = Image.frombuffer("RGB", (rgb.shape[1], rgb.shape[0]), rgb) + qim = ImageQt(img).copy() + pix = QtGui.QPixmap.fromImage(qim) + return pix + + +def separate_components(data, r0, g0, g1, b0): + # Now to split the data up into its red, green, and blue components. The + # Bayer pattern of the OV5647 sensor is BGGR. In other words the first + # row contains alternating green/blue elements, the second row contains + # alternating red/green elements, and so on as illustrated below: + # + # GBGBGBGBGBGBGB + # RGRGRGRGRGRGRG + # GBGBGBGBGBGBGB + # RGRGRGRGRGRGRG + # + # Please note that if you use vflip or hflip to change the orientation + # of the capture, you must flip the Bayer pattern accordingly + + rgb = np.zeros(data.shape + (3,), dtype=data.dtype) + rgb[r0[1]::2, r0[0]::2, 0] = data[r0[1]::2, r0[0]::2] # Red + rgb[g0[1]::2, g0[0]::2, 1] = data[g0[1]::2, g0[0]::2] # Green + rgb[g1[1]::2, g1[0]::2, 1] = data[g1[1]::2, g1[0]::2] # Green + rgb[b0[1]::2, b0[0]::2, 2] = data[b0[1]::2, b0[0]::2] # Blue + + return rgb + + +def demosaic(rgb, r0, g0, g1, b0): + # At this point we now have the raw Bayer data with the correct values + # and colors but the data still requires de-mosaicing and + # post-processing. If you wish to do this yourself, end the script here! + # + # Below we present a fairly naive de-mosaic method that simply + # calculates the weighted average of a pixel based on the pixels + # surrounding it. The weighting is provided b0[1] a b0[1]te representation of + # the Bayer filter which we construct first: + + bayer = np.zeros(rgb.shape, dtype=np.uint8) + bayer[r0[1]::2, r0[0]::2, 0] = 1 # Red + bayer[g0[1]::2, g0[0]::2, 1] = 1 # Green + bayer[g1[1]::2, g1[0]::2, 1] = 1 # Green + bayer[b0[1]::2, b0[0]::2, 2] = 1 # Blue + + # Allocate an array to hold our output with the same shape as the input + # data. After this we define the size of window that will be used to + # calculate each weighted average (3x3). Then we pad out the rgb and + # bayer arrays, adding blank pixels at their edges to compensate for the + # size of the window when calculating averages for edge pixels. + + output = np.empty(rgb.shape, dtype=rgb.dtype) + window = (3, 3) + borders = (window[0] - 1, window[1] - 1) + border = (borders[0] // 2, borders[1] // 2) + + # rgb_pad = np.zeros(( + # rgb.shape[0] + borders[0], + # rgb.shape[1] + borders[1], + # rgb.shape[2]), dtype=rgb.dtype) + # rgb_pad[ + # border[0]:rgb_pad.shape[0] - border[0], + # border[1]:rgb_pad.shape[1] - border[1], + # :] = rgb + # rgb = rgb_pad + # + # bayer_pad = np.zeros(( + # bayer.shape[0] + borders[0], + # bayer.shape[1] + borders[1], + # bayer.shape[2]), dtype=bayer.dtype) + # bayer_pad[ + # border[0]:bayer_pad.shape[0] - border[0], + # border[1]:bayer_pad.shape[1] - border[1], + # :] = bayer + # bayer = bayer_pad + + # In numpy >=1.7.0 just use np.pad (version in Raspbian is 1.6.2 at the + # time of writing...) + # + rgb = np.pad(rgb, [ + (border[0], border[0]), + (border[1], border[1]), + (0, 0), + ], 'constant') + bayer = np.pad(bayer, [ + (border[0], border[0]), + (border[1], border[1]), + (0, 0), + ], 'constant') + + # For each plane in the RGB data, we use a nifty numpy trick + # (as_strided) to construct a view over the plane of 3x3 matrices. We do + # the same for the bayer array, then use Einstein summation on each + # (np.sum is simpler, but copies the data so it's slower), and divide + # the results to get our weighted average: + + for plane in range(3): + p = rgb[..., plane] + b = bayer[..., plane] + pview = as_strided(p, shape=( + p.shape[0] - borders[0], + p.shape[1] - borders[1]) + window, strides=p.strides * 2) + bview = as_strided(b, shape=( + b.shape[0] - borders[0], + b.shape[1] - borders[1]) + window, strides=b.strides * 2) + psum = np.einsum('ijkl->ij', pview) + bsum = np.einsum('ijkl->ij', bview) + output[..., plane] = psum // bsum + + return output + + +def to_rgb(fmt, size, data): + w = size[0] + h = size[1] + + if fmt == "YUYV": + # YUV422 + yuyv = data.reshape((h, w // 2 * 4)) + + # YUV444 + yuv = np.empty((h, w, 3), dtype=np.uint8) + yuv[:, :, 0] = yuyv[:, 0::2] # Y + yuv[:, :, 1] = yuyv[:, 1::4].repeat(2, axis=1) # U + yuv[:, :, 2] = yuyv[:, 3::4].repeat(2, axis=1) # V + + m = np.array([ + [ 1.0, 1.0, 1.0], + [-0.000007154783816076815, -0.3441331386566162, 1.7720025777816772], + [ 1.4019975662231445, -0.7141380310058594 , 0.00001542569043522235] + ]) + + rgb = np.dot(yuv, m) + rgb[:, :, 0] -= 179.45477266423404 + rgb[:, :, 1] += 135.45870971679688 + rgb[:, :, 2] -= 226.8183044444304 + rgb = rgb.astype(np.uint8) + + elif fmt == "RGB888": + rgb = data.reshape((h, w, 3)) + rgb[:, :, [0, 1, 2]] = rgb[:, :, [2, 1, 0]] + + elif fmt == "BGR888": + rgb = data.reshape((h, w, 3)) + + elif fmt in ["ARGB8888", "XRGB8888"]: + rgb = data.reshape((h, w, 4)) + rgb = np.flip(rgb, axis=2) + # drop alpha component + rgb = np.delete(rgb, np.s_[0::4], axis=2) + + elif fmt.startswith("S"): + bayer_pattern = fmt[1:5] + bitspp = int(fmt[5:]) + + # TODO: shifting leaves the lowest bits 0 + if bitspp == 8: + data = data.reshape((h, w)) + data = data.astype(np.uint16) << 8 + elif bitspp in [10, 12]: + data = data.view(np.uint16) + data = data.reshape((h, w)) + data = data << (16 - bitspp) + else: + raise Exception("Bad bitspp:" + str(bitspp)) + + idx = bayer_pattern.find("R") + assert(idx != -1) + r0 = (idx % 2, idx // 2) + + idx = bayer_pattern.find("G") + assert(idx != -1) + g0 = (idx % 2, idx // 2) + + idx = bayer_pattern.find("G", idx + 1) + assert(idx != -1) + g1 = (idx % 2, idx // 2) + + idx = bayer_pattern.find("B") + assert(idx != -1) + b0 = (idx % 2, idx // 2) + + rgb = separate_components(data, r0, g0, g1, b0) + rgb = demosaic(rgb, r0, g0, g1, b0) + rgb = (rgb >> 8).astype(np.uint8) + + else: + rgb = None + + return rgb + + +class QtRenderer: + def __init__(self, state): + self.state = state + + self.cm = state["cm"] + self.contexts = state["contexts"] + + def setup(self): + self.app = QtWidgets.QApplication([]) + + windows = [] + + for ctx in self.contexts: + camera = ctx["camera"] + + for stream in ctx["streams"]: + fmt = stream.configuration.pixelFormat + size = stream.configuration.size + + window = MainWindow(ctx, stream) + window.setAttribute(QtCore.Qt.WA_ShowWithoutActivating) + window.show() + windows.append(window) + + self.windows = windows + + def run(self): + camnotif = QtCore.QSocketNotifier(self.cm.efd, QtCore.QSocketNotifier.Read) + camnotif.activated.connect(lambda x: self.readcam()) + + keynotif = QtCore.QSocketNotifier(sys.stdin.fileno(), QtCore.QSocketNotifier.Read) + keynotif.activated.connect(lambda x: self.readkey()) + + print("Capturing...") + + self.app.exec() + + print("Exiting...") + + def readcam(self): + running = self.state["event_handler"](self.state) + + if not running: + self.app.quit() + + def readkey(self): + sys.stdin.readline() + self.app.quit() + + def request_handler(self, ctx, req): + buffers = req.buffers + + for stream, fb in buffers.items(): + wnd = next(wnd for wnd in self.windows if wnd.stream == stream) + + wnd.handle_request(stream, fb) + + self.state["request_prcessed"](ctx, req) + + def cleanup(self): + for w in self.windows: + w.close() + + +class MainWindow(QtWidgets.QWidget): + def __init__(self, ctx, stream): + super().__init__() + + self.ctx = ctx + self.stream = stream + + self.label = QtWidgets.QLabel() + + windowLayout = QtWidgets.QHBoxLayout() + self.setLayout(windowLayout) + + windowLayout.addWidget(self.label) + + controlsLayout = QtWidgets.QVBoxLayout() + windowLayout.addLayout(controlsLayout) + + windowLayout.addStretch() + + group = QtWidgets.QGroupBox("Info") + groupLayout = QtWidgets.QVBoxLayout() + group.setLayout(groupLayout) + controlsLayout.addWidget(group) + + lab = QtWidgets.QLabel(ctx["id"]) + groupLayout.addWidget(lab) + + self.frameLabel = QtWidgets.QLabel() + groupLayout.addWidget(self.frameLabel) + + group = QtWidgets.QGroupBox("Properties") + groupLayout = QtWidgets.QVBoxLayout() + group.setLayout(groupLayout) + controlsLayout.addWidget(group) + + camera = ctx["camera"] + + for k, v in camera.properties.items(): + lab = QtWidgets.QLabel() + lab.setText(k + " = " + str(v)) + groupLayout.addWidget(lab) + + group = QtWidgets.QGroupBox("Controls") + groupLayout = QtWidgets.QVBoxLayout() + group.setLayout(groupLayout) + controlsLayout.addWidget(group) + + for k, (min, max, default) in camera.controls.items(): + lab = QtWidgets.QLabel() + lab.setText("{} = {}/{}/{}".format(k, min, max, default)) + groupLayout.addWidget(lab) + + controlsLayout.addStretch() + + def buf_to_qpixmap(self, stream, fb): + with fb.mmap(0) as b: + cfg = stream.configuration + w, h = cfg.size + pitch = cfg.stride + + if cfg.pixelFormat == "MJPEG": + img = Image.open(BytesIO(b)) + qim = ImageQt(img).copy() + pix = QtGui.QPixmap.fromImage(qim) + else: + data = np.array(b, dtype=np.uint8) + rgb = to_rgb(cfg.pixelFormat, cfg.size, data) + + if rgb is None: + raise Exception("Format not supported: " + cfg.pixelFormat) + + pix = rgb_to_pix(rgb) + + return pix + + def handle_request(self, stream, fb): + ctx = self.ctx + + pix = self.buf_to_qpixmap(stream, fb) + self.label.setPixmap(pix) + + self.frameLabel.setText("Queued: {}\nDone: {}\nFps: {:.2f}" + .format(ctx["reqs-queued"], ctx["reqs-completed"], ctx["fps"])) diff --git a/src/py/cam/cam_qtgl.py b/src/py/cam/cam_qtgl.py new file mode 100644 index 00000000..8f9ab457 --- /dev/null +++ b/src/py/cam/cam_qtgl.py @@ -0,0 +1,385 @@ +# SPDX-License-Identifier: GPL-2.0-or-later +# Copyright (C) 2021, Tomi Valkeinen + +from PyQt5 import QtCore, QtWidgets +from PyQt5.QtCore import Qt + +import math +import numpy as np +import os +import sys + +os.environ["PYOPENGL_PLATFORM"] = "egl" + +import OpenGL +# OpenGL.FULL_LOGGING = True + +from OpenGL import GL as gl +from OpenGL.EGL.EXT.image_dma_buf_import import * +from OpenGL.EGL.KHR.image import * +from OpenGL.EGL.VERSION.EGL_1_0 import * +from OpenGL.EGL.VERSION.EGL_1_2 import * +from OpenGL.EGL.VERSION.EGL_1_3 import * + +from OpenGL.GLES2.OES.EGL_image import * +from OpenGL.GLES2.OES.EGL_image_external import * +from OpenGL.GLES2.VERSION.GLES2_2_0 import * +from OpenGL.GLES3.VERSION.GLES3_3_0 import * + +from OpenGL.GL import shaders + +from gl_helpers import * + +# libcamera format string -> DRM fourcc +FMT_MAP = { + "RGB888": "RG24", + "XRGB8888": "XR24", + "ARGB8888": "AR24", + "YUYV": "YUYV", +} + + +class EglState: + def __init__(self): + self.create_display() + self.choose_config() + self.create_context() + self.check_extensions() + + def create_display(self): + xdpy = getEGLNativeDisplay() + dpy = eglGetDisplay(xdpy) + self.display = dpy + + def choose_config(self): + dpy = self.display + + major, minor = EGLint(), EGLint() + + b = eglInitialize(dpy, major, minor) + assert(b) + + print("EGL {} {}".format( + eglQueryString(dpy, EGL_VENDOR).decode(), + eglQueryString(dpy, EGL_VERSION).decode())) + + check_egl_extensions(dpy, ["EGL_EXT_image_dma_buf_import"]) + + b = eglBindAPI(EGL_OPENGL_ES_API) + assert(b) + + def print_config(dpy, cfg): + + def _getconf(dpy, cfg, a): + value = ctypes.c_long() + eglGetConfigAttrib(dpy, cfg, a, value) + return value.value + + getconf = lambda a: _getconf(dpy, cfg, a) + + print("EGL Config {}: color buf {}/{}/{}/{} = {}, depth {}, stencil {}, native visualid {}, native visualtype {}".format( + getconf(EGL_CONFIG_ID), + getconf(EGL_ALPHA_SIZE), + getconf(EGL_RED_SIZE), + getconf(EGL_GREEN_SIZE), + getconf(EGL_BLUE_SIZE), + getconf(EGL_BUFFER_SIZE), + getconf(EGL_DEPTH_SIZE), + getconf(EGL_STENCIL_SIZE), + getconf(EGL_NATIVE_VISUAL_ID), + getconf(EGL_NATIVE_VISUAL_TYPE))) + + if False: + num_configs = ctypes.c_long() + eglGetConfigs(dpy, None, 0, num_configs) + print("{} configs".format(num_configs.value)) + + configs = (EGLConfig * num_configs.value)() + eglGetConfigs(dpy, configs, num_configs.value, num_configs) + for config_id in configs: + print_config(dpy, config_id) + + config_attribs = [ + EGL_SURFACE_TYPE, EGL_WINDOW_BIT, + EGL_RED_SIZE, 8, + EGL_GREEN_SIZE, 8, + EGL_BLUE_SIZE, 8, + EGL_ALPHA_SIZE, 0, + EGL_RENDERABLE_TYPE, EGL_OPENGL_ES2_BIT, + EGL_NONE, + ] + + n = EGLint() + configs = (EGLConfig * 1)() + b = eglChooseConfig(dpy, config_attribs, configs, 1, n) + assert(b and n.value == 1) + config = configs[0] + + print("Chosen Config:") + print_config(dpy, config) + + self.config = config + + def create_context(self): + dpy = self.display + + context_attribs = [ + EGL_CONTEXT_CLIENT_VERSION, 2, + EGL_NONE, + ] + + context = eglCreateContext(dpy, self.config, EGL_NO_CONTEXT, context_attribs) + assert(context) + + b = eglMakeCurrent(dpy, EGL_NO_SURFACE, EGL_NO_SURFACE, context) + assert(b) + + self.context = context + + def check_extensions(self): + check_gl_extensions(["GL_OES_EGL_image"]) + + assert(eglCreateImageKHR) + assert(eglDestroyImageKHR) + assert(glEGLImageTargetTexture2DOES) + + +class QtRenderer: + def __init__(self, state): + self.state = state + + def setup(self): + self.app = QtWidgets.QApplication([]) + + window = MainWindow(self.state) + window.setAttribute(QtCore.Qt.WA_ShowWithoutActivating) + window.show() + + self.window = window + + def run(self): + camnotif = QtCore.QSocketNotifier(self.state["cm"].efd, QtCore.QSocketNotifier.Read) + camnotif.activated.connect(lambda x: self.readcam()) + + keynotif = QtCore.QSocketNotifier(sys.stdin.fileno(), QtCore.QSocketNotifier.Read) + keynotif.activated.connect(lambda x: self.readkey()) + + print("Capturing...") + + self.app.exec() + + print("Exiting...") + + def readcam(self): + running = self.state["event_handler"](self.state) + + if not running: + self.app.quit() + + def readkey(self): + sys.stdin.readline() + self.app.quit() + + def request_handler(self, ctx, req): + self.window.handle_request(ctx, req) + + def cleanup(self): + self.window.close() + + +class MainWindow(QtWidgets.QWidget): + def __init__(self, state): + super().__init__() + + self.setAttribute(Qt.WA_PaintOnScreen) + self.setAttribute(Qt.WA_NativeWindow) + + self.state = state + + self.textures = {} + self.reqqueue = {} + self.current = {} + + for ctx in self.state["contexts"]: + + self.reqqueue[ctx["idx"]] = [] + self.current[ctx["idx"]] = [] + + for stream in ctx["streams"]: + fmt = stream.configuration.pixelFormat + size = stream.configuration.size + + if fmt not in FMT_MAP: + raise Exception("Unsupported pixel format: " + str(fmt)) + + self.textures[stream] = None + + num_tiles = len(self.textures) + self.num_columns = math.ceil(math.sqrt(num_tiles)) + self.num_rows = math.ceil(num_tiles / self.num_columns) + + self.egl = EglState() + + self.surface = None + + def paintEngine(self): + return None + + def create_surface(self): + native_surface = c_void_p(self.winId().__int__()) + surface = eglCreateWindowSurface(self.egl.display, self.egl.config, + native_surface, None) + + b = eglMakeCurrent(self.egl.display, self.surface, self.surface, self.egl.context) + assert(b) + + self.surface = surface + + def init_gl(self): + self.create_surface() + + vertShaderSrc = """ + attribute vec2 aPosition; + varying vec2 texcoord; + + void main() + { + gl_Position = vec4(aPosition * 2.0 - 1.0, 0.0, 1.0); + texcoord.x = aPosition.x; + texcoord.y = 1.0 - aPosition.y; + } + """ + fragShaderSrc = """ + #extension GL_OES_EGL_image_external : enable + precision mediump float; + varying vec2 texcoord; + uniform samplerExternalOES texture; + + void main() + { + gl_FragColor = texture2D(texture, texcoord); + } + """ + + program = shaders.compileProgram( + shaders.compileShader(vertShaderSrc, GL_VERTEX_SHADER), + shaders.compileShader(fragShaderSrc, GL_FRAGMENT_SHADER) + ) + + glUseProgram(program) + + glClearColor(0.5, 0.8, 0.7, 1.0) + + vertPositions = [ + 0.0, 0.0, + 1.0, 0.0, + 1.0, 1.0, + 0.0, 1.0 + ] + + inputAttrib = glGetAttribLocation(program, "aPosition") + glVertexAttribPointer(inputAttrib, 2, GL_FLOAT, GL_FALSE, 0, vertPositions) + glEnableVertexAttribArray(inputAttrib) + + def create_texture(self, stream, fb): + cfg = stream.configuration + fmt = cfg.pixelFormat + fmt = str_to_fourcc(FMT_MAP[fmt]) + w, h = cfg.size + + attribs = [ + EGL_WIDTH, w, + EGL_HEIGHT, h, + EGL_LINUX_DRM_FOURCC_EXT, fmt, + EGL_DMA_BUF_PLANE0_FD_EXT, fb.fd(0), + EGL_DMA_BUF_PLANE0_OFFSET_EXT, 0, + EGL_DMA_BUF_PLANE0_PITCH_EXT, cfg.stride, + EGL_NONE, + ] + + image = eglCreateImageKHR(self.egl.display, + EGL_NO_CONTEXT, + EGL_LINUX_DMA_BUF_EXT, + None, + attribs) + assert(image) + + textures = glGenTextures(1) + glBindTexture(GL_TEXTURE_EXTERNAL_OES, textures) + glTexParameteri(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_MAG_FILTER, GL_LINEAR) + glTexParameteri(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_MIN_FILTER, GL_LINEAR) + glTexParameteri(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE) + glTexParameteri(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE) + glEGLImageTargetTexture2DOES(GL_TEXTURE_EXTERNAL_OES, image) + + return textures + + def resizeEvent(self, event): + size = event.size() + + print("Resize", size) + + super().resizeEvent(event) + + if self.surface is None: + return + + glViewport(0, 0, size.width() // 2, size.height()) + + def paintEvent(self, event): + if self.surface is None: + self.init_gl() + + for ctx_idx, queue in self.reqqueue.items(): + if len(queue) == 0: + continue + + ctx = next(ctx for ctx in self.state["contexts"] if ctx["idx"] == ctx_idx) + + if self.current[ctx_idx]: + old = self.current[ctx_idx] + self.current[ctx_idx] = None + self.state["request_prcessed"](ctx, old) + + next_req = queue.pop(0) + self.current[ctx_idx] = next_req + + stream, fb = next(iter(next_req.buffers.items())) + + self.textures[stream] = self.create_texture(stream, fb) + + self.paint_gl() + + def paint_gl(self): + b = eglMakeCurrent(self.egl.display, self.surface, self.surface, self.egl.context) + assert(b) + + glClear(GL_COLOR_BUFFER_BIT) + + size = self.size() + + for idx, ctx in enumerate(self.state["contexts"]): + for stream in ctx["streams"]: + if self.textures[stream] is None: + continue + + w = size.width() // self.num_columns + h = size.height() // self.num_rows + + x = idx % self.num_columns + y = idx // self.num_columns + + x *= w + y *= h + + glViewport(x, y, w, h) + + glBindTexture(GL_TEXTURE_EXTERNAL_OES, self.textures[stream]) + glDrawArrays(GL_TRIANGLE_FAN, 0, 4) + + b = eglSwapBuffers(self.egl.display, self.surface) + assert(b) + + def handle_request(self, ctx, req): + self.reqqueue[ctx["idx"]].append(req) + self.update() diff --git a/src/py/cam/gl_helpers.py b/src/py/cam/gl_helpers.py new file mode 100644 index 00000000..925901dd --- /dev/null +++ b/src/py/cam/gl_helpers.py @@ -0,0 +1,74 @@ +# SPDX-License-Identifier: GPL-2.0-or-later +# Copyright (C) 2021, Tomi Valkeinen + +from OpenGL.EGL.VERSION.EGL_1_0 import EGLNativeDisplayType, eglGetProcAddress, eglQueryString, EGL_EXTENSIONS + +from OpenGL.raw.GLES2 import _types as _cs +from OpenGL.GLES2.VERSION.GLES2_2_0 import * +from OpenGL.GLES3.VERSION.GLES3_3_0 import * +from OpenGL import GL as gl + +from ctypes import c_int, c_char_p, c_void_p, cdll, POINTER, util, \ + pointer, CFUNCTYPE, c_bool + + +def getEGLNativeDisplay(): + _x11lib = cdll.LoadLibrary(util.find_library("X11")) + XOpenDisplay = _x11lib.XOpenDisplay + XOpenDisplay.argtypes = [c_char_p] + XOpenDisplay.restype = POINTER(EGLNativeDisplayType) + + xdpy = XOpenDisplay(None) + + +# Hack. PyOpenGL doesn't seem to manage to find glEGLImageTargetTexture2DOES. +def getglEGLImageTargetTexture2DOES(): + funcptr = eglGetProcAddress("glEGLImageTargetTexture2DOES") + prototype = CFUNCTYPE(None, _cs.GLenum, _cs.GLeglImageOES) + return prototype(funcptr) + + +glEGLImageTargetTexture2DOES = getglEGLImageTargetTexture2DOES() + + +def str_to_fourcc(str): + assert(len(str) == 4) + fourcc = 0 + for i, v in enumerate([ord(c) for c in str]): + fourcc |= v << (i * 8) + return fourcc + + +def get_gl_extensions(): + n = GLint() + glGetIntegerv(GL_NUM_EXTENSIONS, n) + gl_extensions = [] + for i in range(n.value): + gl_extensions.append(gl.glGetStringi(GL_EXTENSIONS, i).decode()) + return gl_extensions + + +def check_gl_extensions(required_extensions): + extensions = get_gl_extensions() + + if False: + print("GL EXTENSIONS: ", " ".join(extensions)) + + for ext in required_extensions: + if ext not in extensions: + raise Exception(ext + " missing") + + +def get_egl_extensions(egl_display): + return eglQueryString(egl_display, EGL_EXTENSIONS).decode().split(" ") + + +def check_egl_extensions(egl_display, required_extensions): + extensions = get_egl_extensions(egl_display) + + if False: + print("EGL EXTENSIONS: ", " ".join(extensions)) + + for ext in required_extensions: + if ext not in extensions: + raise Exception(ext + " missing")