From patchwork Wed Oct 26 16:32:52 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Tomi Valkeinen X-Patchwork-Id: 17690 Return-Path: X-Original-To: parsemail@patchwork.libcamera.org Delivered-To: parsemail@patchwork.libcamera.org Received: from lancelot.ideasonboard.com (lancelot.ideasonboard.com [92.243.16.209]) by patchwork.libcamera.org (Postfix) with ESMTPS id 74D6FBD16B for ; Wed, 26 Oct 2022 16:33:07 +0000 (UTC) Received: from lancelot.ideasonboard.com (localhost [IPv6:::1]) by lancelot.ideasonboard.com (Postfix) with ESMTP id C715362F5C; Wed, 26 Oct 2022 18:33:06 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=libcamera.org; s=mail; t=1666801986; bh=Win9EzSik4UiD6vhwhMXD8O5nm1PHSeXO5R8O7ajqIc=; h=To:Date:Subject:List-Id:List-Unsubscribe:List-Archive:List-Post: List-Help:List-Subscribe:From:Reply-To:From; b=OFhiqENvANehNP/WVGrtDXq5vyNkaP2eQ0Bn2ElVliQyJO5W9EZgN2yoUOvpZv1Sb ye3SofTqJeKuYA+f2GgXUyQOZBf3yuyUtKmHsET9MAg/jcocAZYkiKz4MoBUQyfdNF h4/fYRCdrHBZL8RdAYcNUuH43LoVNanyRazY3Ak56PKQLgG47ATn9mQJ+QlyaR5Pl4 6qRTssYQNFD3lICPdLeKthoyECTYHn7Iq6M5phjTswCGezcbCtQP2q/FijQvk4vYxx o1Cd2il67EggDl0tOdVAS7KJWcxe31eLg4VCKZCb2JLCkfnrrPDTuJ3q2DHc/kCf1t sNjw03RK61j5g== Received: from perceval.ideasonboard.com (perceval.ideasonboard.com [213.167.242.64]) by lancelot.ideasonboard.com (Postfix) with ESMTPS id F29F661F4B for ; Wed, 26 Oct 2022 18:33:05 +0200 (CEST) Authentication-Results: lancelot.ideasonboard.com; dkim=pass (1024-bit key; unprotected) header.d=ideasonboard.com header.i=@ideasonboard.com header.b="lvY9Cngc"; dkim-atps=neutral Received: from desky.lan (91-154-32-225.elisa-laajakaista.fi [91.154.32.225]) by perceval.ideasonboard.com (Postfix) with ESMTPSA id 15E6E4F8; Wed, 26 Oct 2022 18:33:05 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=ideasonboard.com; s=mail; t=1666801985; bh=Win9EzSik4UiD6vhwhMXD8O5nm1PHSeXO5R8O7ajqIc=; h=From:To:Cc:Subject:Date:From; b=lvY9CngcRFYuW1irTfYBjsXtiC7REK78KkvsIqVtRcYRmPimeRqMZ3bkz96VRkN/c CvlRyYC/Zap6B2niSEWQR3UR5n2DP7VBdAV36kAJMbxLV8kavTf/IMiUHbqkWFOGms gE1PAxzcn+OjhX6Nh3O6FUNGqUuHZPb4Y/AXETaI= To: libcamera-devel@lists.libcamera.org, Laurent Pinchart , Kieran Bingham , David Plowman Date: Wed, 26 Oct 2022 19:32:52 +0300 Message-Id: <20221026163252.563851-1-tomi.valkeinen@ideasonboard.com> X-Mailer: git-send-email 2.34.1 MIME-Version: 1.0 Subject: [libcamera-devel] [PATCH] py: examples: Add simple-capture-opencv.py X-BeenThere: libcamera-devel@lists.libcamera.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-Patchwork-Original-From: Tomi Valkeinen via libcamera-devel From: Tomi Valkeinen Reply-To: Tomi Valkeinen Errors-To: libcamera-devel-bounces@lists.libcamera.org Sender: "libcamera-devel" Add a simple example showing how the captured frame can be imported into opencv and processed there. This is only a simple example, and it has the same issue as all the other examples: pixel format conversions. Each pixel format requires custom conversion code, and I have implemented the conversion only for a few of them. Also, this is almost identical to simple-capture.py. Normally it would make sense to add common helpers, but so far I have stayed away from it to keep the examples independent and in a single file. Making the examples "too fancy" will easily make them more difficult to understand for a newcomer. Signed-off-by: Tomi Valkeinen --- src/py/examples/simple-capture-opencv.py | 216 +++++++++++++++++++++++ 1 file changed, 216 insertions(+) create mode 100755 src/py/examples/simple-capture-opencv.py diff --git a/src/py/examples/simple-capture-opencv.py b/src/py/examples/simple-capture-opencv.py new file mode 100755 index 00000000..5a34619e --- /dev/null +++ b/src/py/examples/simple-capture-opencv.py @@ -0,0 +1,216 @@ +#!/usr/bin/env python3 + +# SPDX-License-Identifier: BSD-3-Clause +# Copyright (C) 2022, Tomi Valkeinen + +# A simple capture example with opencv. This is almost identical to +# simple-capture.py, except we use opencv to process and show the image. + +import argparse +import cv2 +import libcamera as libcam +import libcamera.utils +import numpy as np +import selectors +import sys + +# Number of frames to capture +TOTAL_FRAMES = 100 + + +def cartoonify(cvbuf): + grayScaleImage = cv2.cvtColor(cvbuf, cv2.COLOR_BGR2GRAY) + smoothGrayScale = cv2.medianBlur(grayScaleImage, 5) + edgefied = cv2.adaptiveThreshold(smoothGrayScale, 255, + cv2.ADAPTIVE_THRESH_MEAN_C, + cv2.THRESH_BINARY, 9, 9) + return edgefied + + +def libcam_to_cv(mfb: libcamera.utils.MappedFrameBuffer, cfg: libcam.StreamConfiguration): + w = cfg.size.width + h = cfg.size.height + fmt = cfg.pixel_format + + data = np.array(mfb.planes[0], dtype=np.uint8) + + if fmt == libcam.formats.YUYV: + # This is not correct, produces a bluish tint. I didn't find out + # what kind of array opencv wants for YUYV. + yuyv = data.reshape((h, w // 2 * 4)) + yuv = np.empty((h, w, 2), dtype=np.uint8) + yuv[:, :, 0] = yuyv[:, 0::2] # Y + yuv[:, :, 1] = yuyv[:, 1::2] # UV + image = cv2.cvtColor(yuv, cv2.COLOR_YUV2RGB_YUYV) + elif fmt == libcam.formats.RGB888: + rgb = data.reshape((h, w, 3)) + rgb[:, :, [0, 1, 2]] = rgb[:, :, [2, 1, 0]] + image = rgb + elif fmt == libcam.formats.BGR888: + rgb = data.reshape((h, w, 3)) + image = rgb + elif fmt == libcam.formats.MJPEG: + image = cv2.imdecode(data, cv2.IMREAD_COLOR) + else: + raise Exception("Unsupported pixel format") + + return image + + +def main(): + parser = argparse.ArgumentParser() + parser.add_argument('-c', '--camera', type=str, default='1', + help='Camera index number (starting from 1) or part of the name') + parser.add_argument('-f', '--format', type=str, help='Pixel format') + parser.add_argument('-s', '--size', type=str, help='Size ("WxH")') + args = parser.parse_args() + + cm = libcam.CameraManager.singleton() + + try: + if args.camera.isnumeric(): + cam_idx = int(args.camera) + cam = next((cam for i, cam in enumerate(cm.cameras) if i + 1 == cam_idx)) + else: + cam = next((cam for cam in cm.cameras if args.camera in cam.id)) + except Exception: + print(f'Failed to find camera "{args.camera}"') + return -1 + + # Acquire the camera for our use + + ret = cam.acquire() + assert ret == 0 + + # Configure the camera + + cam_config = cam.generate_configuration([libcam.StreamRole.Viewfinder]) + + stream_config = cam_config.at(0) + + if args.format: + fmt = libcam.PixelFormat(args.format) + stream_config.pixel_format = fmt + + if args.size: + w, h = [int(v) for v in args.size.split('x')] + stream_config.size = libcam.Size(w, h) + + ret = cam.configure(cam_config) + assert ret == 0 + + print(f'Capturing {TOTAL_FRAMES} frames with {stream_config}') + + stream = stream_config.stream + + # Allocate the buffers for capture + + allocator = libcam.FrameBufferAllocator(cam) + ret = allocator.allocate(stream) + assert ret > 0 + + num_bufs = len(allocator.buffers(stream)) + + # Create the requests and assign a buffer for each request + + reqs = [] + for i in range(num_bufs): + # Use the buffer index as the cookie + req = cam.create_request(i) + + buffer = allocator.buffers(stream)[i] + ret = req.add_buffer(stream, buffer) + assert ret == 0 + + reqs.append(req) + + # Start the camera + + ret = cam.start() + assert ret == 0 + + # frames_queued and frames_done track the number of frames queued and done + + frames_queued = 0 + frames_done = 0 + + # Queue the requests to the camera + + for req in reqs: + ret = cam.queue_request(req) + assert ret == 0 + frames_queued += 1 + + # The main loop. Wait for the queued Requests to complete, process them, + # and re-queue them again. + + sel = selectors.DefaultSelector() + sel.register(cm.event_fd, selectors.EVENT_READ) + + while frames_done < TOTAL_FRAMES: + # cm.get_ready_requests() does not block, so we use a Selector to wait + # for a camera event. Here we should almost always get a single + # Request, but in some cases there could be multiple or none. + + events = sel.select() + if not events: + continue + + reqs = cm.get_ready_requests() + + for req in reqs: + frames_done += 1 + + buffers = req.buffers + + # A ready Request could contain multiple buffers if multiple streams + # were being used. Here we know we only have a single stream, + # and we use next(iter()) to get the first and only buffer. + + assert len(buffers) == 1 + + stream, fb = next(iter(buffers.items())) + + with libcamera.utils.MappedFrameBuffer(fb) as mfb: + # Convert the raw buffer to opencv (numpy) buffer + image = libcam_to_cv(mfb, stream.configuration) + + # Process the image + image = cartoonify(image) + + # Show the image in a window + cv2.imshow('Image', image) + + # This one is needed for the opencv to show the window... + cv2.waitKey(1) + + meta = fb.metadata + + print("seq {:3}, bytes {}, frames queued/done {:3}/{:<3}" + .format(meta.sequence, + '/'.join([str(p.bytes_used) for p in meta.planes]), + frames_queued, frames_done)) + + # If we want to capture more frames we need to queue more Requests. + # We could create a totally new Request, but it is more efficient + # to reuse the existing one that we just received. + if frames_queued < TOTAL_FRAMES: + req.reuse() + cam.queue_request(req) + frames_queued += 1 + + # Stop the camera + + ret = cam.stop() + assert ret == 0 + + # Release the camera + + ret = cam.release() + assert ret == 0 + + return 0 + + +if __name__ == '__main__': + sys.exit(main())