From patchwork Fri Jan 13 15:17:07 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Daniel Oakley X-Patchwork-Id: 18107 Return-Path: X-Original-To: parsemail@patchwork.libcamera.org Delivered-To: parsemail@patchwork.libcamera.org Received: from lancelot.ideasonboard.com (lancelot.ideasonboard.com [92.243.16.209]) by patchwork.libcamera.org (Postfix) with ESMTPS id 5B79EC3292 for ; Fri, 13 Jan 2023 15:17:53 +0000 (UTC) Received: from lancelot.ideasonboard.com (localhost [IPv6:::1]) by lancelot.ideasonboard.com (Postfix) with ESMTP id B4BBF625DD; Fri, 13 Jan 2023 16:17:52 +0100 (CET) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=libcamera.org; s=mail; t=1673623072; bh=H6OVkaWkiDgsMo9YtqJcuXXbZXnvxdKw5pRuHxljg2g=; h=To:Date:Subject:List-Id:List-Unsubscribe:List-Archive:List-Post: List-Help:List-Subscribe:From:Reply-To:Cc:From; b=E6xE+bRyhNDc4buP77nO7hLvvnKZyDlOFLNU6n+njWKwaVsO7a8tgijpBJ3TA7ZnV zr+deCX8ubOmFTspMEtLnPsaus0wnUiUAV8rOacQ8Xiw1YAMHn3mK5bkclKxxecAhy SJQHi2LUV7uO7QVjEUlyZ1woaPiK8e04XD6BGq7CsQjfCOIp7dru/b7CJczOrzQ44F 4Yz36p60gMkcJs4aQ2gT+l2ch+xTAu6T4JjeHla+0EWBwARoYEhiTN4uLxTIuL7eH6 A7rv+1GFTdZXD6DN01+hYfUZJxA8blRbqgep/P6WmPX4teAVLtgdnmFuW+m0OtdRUl Gw4Nwne1hCmjg== Received: from perceval.ideasonboard.com (perceval.ideasonboard.com [213.167.242.64]) by lancelot.ideasonboard.com (Postfix) with ESMTPS id 93CF161EFF for ; Fri, 13 Jan 2023 16:17:48 +0100 (CET) Authentication-Results: lancelot.ideasonboard.com; dkim=pass (1024-bit key; unprotected) header.d=ideasonboard.com header.i=@ideasonboard.com header.b="E2KVGhQr"; dkim-atps=neutral Received: from danielLaptop.tendawifi.com (unknown [90.242.103.36]) by perceval.ideasonboard.com (Postfix) with ESMTPSA id BE005890; Fri, 13 Jan 2023 16:17:47 +0100 (CET) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=ideasonboard.com; s=mail; t=1673623068; bh=H6OVkaWkiDgsMo9YtqJcuXXbZXnvxdKw5pRuHxljg2g=; h=From:To:Cc:Subject:Date:From; b=E2KVGhQrNFfngzmO15orGXE93FPPEEnofpxRAwvm5Toe+ULPTd6FtIehhQydYS9nR fL0kpR/4gQnsd8i3uVrw+DBnvq1d3Whq78XJ2KWgehhYlQA4dlDUfwOTpfQyf6Xf+/ E1yGVkzFzZzCWecv4JOEiEm3nduNH+n3s//UfT5k= To: libcamera-devel@lists.libcamera.org Date: Fri, 13 Jan 2023 15:17:07 +0000 Message-Id: <20230113151707.5694-1-daniel.oakley@ideasonboard.com> X-Mailer: git-send-email 2.38.2 MIME-Version: 1.0 Subject: [libcamera-devel] [PATCH] py: cam.py: Provide live graph of request metadata X-BeenThere: libcamera-devel@lists.libcamera.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-Patchwork-Original-From: Daniel Oakley via libcamera-devel From: Daniel Oakley Reply-To: Daniel Oakley Cc: Daniel Oakley Errors-To: libcamera-devel-bounces@lists.libcamera.org Sender: "libcamera-devel" Metadata is very useful when improving specific camera configurations. Currently, there is an argument to display the metadata in text form, however this can be hard to visualise and spot changes or patterns over time. Therefore this proposed patch adds an argument to display this metadata in graph form. The metadata graph has 3 optional parameters: - refresh, number of times a second to update the graph - buffer, amount of historic/previous data to show - graphs, number of graphs to split the metadata between Displaying the graph does have some performance penalty, however this has been mostly mitigated through the refresh parameter. Despite this, graphing might not the best of ideas when using the camera to record or save data. This is mainly for debugging purposes. Suggested-by: Kieran Bingham Signed-off-by: Daniel Oakley --- This patch is on top of f3f683c9 (py: cam.py: Fix duplicate metadata output if more than one steam, 2023-01-12) src/py/cam/cam.py | 241 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 241 insertions(+) diff --git a/src/py/cam/cam.py b/src/py/cam/cam.py index 967a72f5..abb6729a 100755 --- a/src/py/cam/cam.py +++ b/src/py/cam/cam.py @@ -21,6 +21,7 @@ class CameraContext: opt_strict_formats: bool opt_crc: bool opt_metadata: bool + opt_metadata_graph: dict opt_save_frames: bool opt_capture: int @@ -39,6 +40,7 @@ class CameraContext: self.id = 'cam' + str(idx) self.reqs_queued = 0 self.reqs_completed = 0 + self.graph_drawer = None def do_cmd_list_props(self): print('Properties for', self.id) @@ -171,6 +173,9 @@ class CameraContext: self.stream_names[stream] = 'stream' + str(idx) print('{}-{}: stream config {}'.format(self.id, self.stream_names[stream], stream.configuration)) + def initialize_graph_drawer(self, graph_arguments): + self.graph_drawer = GraphDrawer(graph_arguments, self.camera.id) + def alloc_buffers(self): allocator = libcam.FrameBufferAllocator(self.camera) @@ -271,6 +276,12 @@ class CaptureState: for ctrl, val in reqmeta.items(): print(f'\t{ctrl} = {val}') + if ctx.opt_metadata_graph: + if not ctx.graph_drawer: + ctx.initialize_graph_drawer(ctx.opt_metadata_graph) + + ctx.graph_drawer.update_graph(ts, req.metadata) + for stream, fb in buffers.items(): stream_name = ctx.stream_names[stream] @@ -345,6 +356,212 @@ class CaptureState: self.__capture_deinit() +# GraphDrawer is a class that manages drawing the graph. It sets up the graph; +# manages graph settings; holds the metadata buffer (previous + current values); +# and updates the graph. +# We store physical lines for each metadata as their x (time) and y (data) can +# be directly updated instead of redrawing each axis - improves performance. +class GraphDrawer: + graph_number: int + buffer_size: int + autoscale_axis: bool + refresh_time: float + + BLOCKED_TYPES = [list, tuple, libcamera._libcamera.Rectangle] + + def __init__(self, graph_arguments, camera_name): + # We only import matplotlib here to reduce mandatory dependencies + import matplotlib.pyplot as plt + self.plt = plt + + self.graph_number = graph_arguments["graphs"] + self.buffer_size = graph_arguments["buffer"] + self.autoscale_axis = graph_arguments["autoscale"] + # Convert FPS into a duration in nanoseconds + self.refresh_time = 1 / graph_arguments["refresh"] * 10**9 + self.previous_time = 0 + + self.figure = plt.figure(camera_name) + self.axis = [] + self.axis_group = {} + self.metadata_buffer = {} + self.lines = {} + self.blit_manager = self.BlitManager(self.figure.canvas) + + # Define the margins for drawing the graphs + self.figure.subplots_adjust( + top=0.95, + bottom=0.05, + left=0.1, + right=0.95, + hspace=0.2, + wspace=0.2 + ) + + self.plt.show(block=False) + + def update_graph(self, time, metadata): + # On the first request, allocate the each metadata entry to an axis + if len(self.axis) == 0: + self.__divide_subplots(self.graph_number, metadata) + self.plt.draw() + + for ctrl, val in metadata.items(): + if type(val) in self.BLOCKED_TYPES or ctrl.name in ["SensorTimestamp"]: + continue + + # The metadata_buffer holds an x array and a y array for each ctrl + # where x is the time series and y is the metadata value + if not self.metadata_buffer.get(ctrl.name, False): + self.metadata_buffer[ctrl.name] = [[], []] + + # Create the lines and configure their style on the graph + if not self.lines.get(ctrl.name, False): + self.lines[ctrl.name], = self.axis_group[ctrl].plot([], label=ctrl.name) + self.blit_manager.add_artist(self.lines[ctrl.name]) + self.lines[ctrl.name].set_alpha(0.7) + self.lines[ctrl.name].set_linewidth(2.4) + + self.metadata_buffer[ctrl.name][0].append(time) + self.metadata_buffer[ctrl.name][1].append(val) + # Remove the oldest entry to keep the buffer fixed at its max size + if len(self.metadata_buffer[ctrl.name][0]) > self.buffer_size: + del self.metadata_buffer[ctrl.name][0][0] + del self.metadata_buffer[ctrl.name][1][0] + + if time - self.previous_time >= self.refresh_time: + self.__animate() + self.previous_time = time + + # This method allocates the metadata into the correct number of graphs based + # on how different their example metadata is from their consecutive neighbour + def __divide_subplots(self, number_of_graphs, example_metadata): + # Create the correct number of axis positioned in a vertical stack + for i in range(1, number_of_graphs + 1): + axis = self.plt.subplot(number_of_graphs, 1, i) + # Remove the visible x axis as it is not redrawn with blit + axis.get_xaxis().set_ticks([]) + self.axis.append(axis) + + cleansed_metadata = {} + for ctrl, val in example_metadata.items(): + if not (type(val) in self.BLOCKED_TYPES or ctrl.name in ["SensorTimestamp"]): + cleansed_metadata[ctrl] = val + + # Summery of what the following code does: + # We first sort the metadata items by value so we can identify the + # difference between them. + # From there we can split them up based on the differences between them. + # We do this by sorting the the ctrls by their differences, and then + # adding N breaks into the standardly sorted list of ctrls (by values) + # next to those name. Where N is the number of graphs we want. + # We then go through the ctrls in order, adding them to the correct + # axis group (increasing their group if a break is found) + + # Sort the metadata lowest to highest so consecutive values can be compared + sorted_metadata = dict(sorted(cleansed_metadata.items(), + key=lambda item: item[1])) + + # Create the dictionary containing the {ctrl:percentage difference} + percent_diff = {} + prev_val = None + for ctrl,val in sorted_metadata.items(): + if prev_val: + percent_diff[ctrl] = val/prev_val + + prev_val = val + + # Sort those percentage differences, highest to lowest, so we can find + # the appropriate break points to separate the metadata by + sorted_diffs = dict(sorted(percent_diff.items(), + key=lambda item: item[1], reverse=True)) + # This the list of ctrls ordered by value, lowest to highest + sorted_ctrl = sorted(sorted_metadata, key=sorted_metadata.get) + + # Add the correct number breaks in, starting with the break that + # separates the greatest distance + i = 0 + for ctrl, _ in sorted_diffs.items(): + i += 1 + if i >= number_of_graphs: + break + + sorted_ctrl.insert(sorted_ctrl.index(ctrl), "~BREAK~") + + # Put the ctrls with their correct axis group, incrementing group when + # a break is found. + group = 0 + for ctrl in sorted_ctrl: + if ctrl == "~BREAK~": + group += 1 + else: + self.axis_group[ctrl] = self.axis[group] + + def __animate(self): + for ctrl, series in self.metadata_buffer.items(): + self.lines[ctrl].set_xdata(series[0]) + self.lines[ctrl].set_ydata(series[1]) + + # Scale and display the legend on the axis + for axis in self.axis: + axis.relim() + axis.legend(loc="upper left") + axis.autoscale_view() + + # Adjust the y scale to be bigger once if manual scaling + if axis.get_autoscaley_on() and not self.autoscale_axis: + axis_ymin, axis_ymax = axis.get_ybound() + axis_yrange = axis_ymax - axis_ymin + axis.set_ybound([axis_ymin - axis_yrange * 0.25, + axis_ymax + axis_yrange * 0.25]) + + axis.set_autoscaley_on(self.autoscale_axis) + axis.set_autoscalex_on(True) + + self.blit_manager.update() + + # This BlitManager is derived from: (comments removed and init simplified) + # matplotlib.org/devdocs/tutorials/advanced/blitting.html#class-based-example + # The BlitManager manages redrawing the graph in a performant way. It also + # prompts a full re-draw (axis and legend) whenever the window is resized. + class BlitManager: + def __init__(self, canvas): + self.canvas = canvas + self._bg = None + self._artists = [] + + self.cid = canvas.mpl_connect("draw_event", self.on_draw) + + def on_draw(self, event): + cv = self.canvas + if event is not None: + if event.canvas != cv: + raise RuntimeError + self._bg = cv.copy_from_bbox(cv.figure.bbox) + self._draw_animated() + + def add_artist(self, art): + if art.figure != self.canvas.figure: + raise RuntimeError + art.set_animated(True) + self._artists.append(art) + + def _draw_animated(self): + fig = self.canvas.figure + for a in self._artists: + fig.draw_artist(a) + + def update(self): + cv = self.canvas + fig = cv.figure + if self._bg is None: + self.on_draw(None) + else: + cv.restore_region(self._bg) + self._draw_animated() + cv.blit(fig.bbox) + cv.flush_events() + class CustomAction(argparse.Action): def __init__(self, option_strings, dest, **kwargs): @@ -393,6 +610,7 @@ def main(): parser.add_argument('--crc', nargs=0, type=bool, action=CustomAction, help='Print CRC32 for captured frames') parser.add_argument('--save-frames', nargs=0, type=bool, action=CustomAction, help='Save captured frames to files') parser.add_argument('--metadata', nargs=0, type=bool, action=CustomAction, help='Print the metadata for completed requests') + parser.add_argument('--metadata-graph', nargs='*', type=str, const=True, action=CustomAction, help='Live graph of metadata. Default args: graphs=3 buffer=100 autoscale=true refresh=10') parser.add_argument('--strict-formats', type=bool, nargs=0, action=CustomAction, help='Do not allow requested stream format(s) to be adjusted') parser.add_argument('-s', '--stream', nargs='+', action=CustomAction) args = parser.parse_args() @@ -418,6 +636,29 @@ def main(): ctx.opt_metadata = args.metadata.get(cam_idx, False) ctx.opt_strict_formats = args.strict_formats.get(cam_idx, False) ctx.opt_stream = args.stream.get(cam_idx, ['role=viewfinder']) + + metadata_graph = args.metadata_graph.get(cam_idx, False) + if metadata_graph is not False: + mdg_args = [] + for i in metadata_graph: + mdg_args += i.split("=") + try: + metadata_graph = { + "graphs": (3 if "graphs" not in mdg_args else + int(mdg_args[mdg_args.index("graphs") + 1])), + "buffer": (100 if "buffer" not in mdg_args else + int(mdg_args[mdg_args.index("buffer") + 1])), + "refresh": (10.0 if "refresh" not in mdg_args else + float(mdg_args[mdg_args.index("refresh") + 1])), + "autoscale": (True if "autoscale" not in mdg_args else + (mdg_args[mdg_args.index("autoscale") + 1]).lower() == "true") + } + except ValueError: + print('Invalid --metadata_graph arguments, see --help') + sys.exit(-1) + + ctx.opt_metadata_graph = metadata_graph + contexts.append(ctx) for ctx in contexts: