Show a patch.

GET /api/patches/20462/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 20462,
    "url": "https://patchwork.libcamera.org/api/patches/20462/?format=api",
    "web_url": "https://patchwork.libcamera.org/patch/20462/",
    "project": {
        "id": 1,
        "url": "https://patchwork.libcamera.org/api/projects/1/?format=api",
        "name": "libcamera",
        "link_name": "libcamera",
        "list_id": "libcamera_core",
        "list_email": "libcamera-devel@lists.libcamera.org",
        "web_url": "",
        "scm_url": "",
        "webscm_url": ""
    },
    "msgid": "<20240628104828.2928109-4-stefan.klug@ideasonboard.com>",
    "date": "2024-06-28T10:46:56",
    "name": "[v2,03/25] libtuning: Copy files from raspberrypi",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": false,
    "hash": "2923aae613a5600034a312237863aab4f3549e19",
    "submitter": {
        "id": 184,
        "url": "https://patchwork.libcamera.org/api/people/184/?format=api",
        "name": "Stefan Klug",
        "email": "stefan.klug@ideasonboard.com"
    },
    "delegate": null,
    "mbox": "https://patchwork.libcamera.org/patch/20462/mbox/",
    "series": [
        {
            "id": 4430,
            "url": "https://patchwork.libcamera.org/api/series/4430/?format=api",
            "web_url": "https://patchwork.libcamera.org/project/libcamera/list/?series=4430",
            "date": "2024-06-28T10:46:53",
            "name": "Add ccm calibration to libtuning",
            "version": 2,
            "mbox": "https://patchwork.libcamera.org/series/4430/mbox/"
        }
    ],
    "comments": "https://patchwork.libcamera.org/api/patches/20462/comments/",
    "check": "pending",
    "checks": "https://patchwork.libcamera.org/api/patches/20462/checks/",
    "tags": {},
    "headers": {
        "Return-Path": "<libcamera-devel-bounces@lists.libcamera.org>",
        "X-Original-To": "parsemail@patchwork.libcamera.org",
        "Delivered-To": "parsemail@patchwork.libcamera.org",
        "Received": [
            "from lancelot.ideasonboard.com (lancelot.ideasonboard.com\n\t[92.243.16.209])\n\tby patchwork.libcamera.org (Postfix) with ESMTPS id 1D55CBD87C\n\tfor <parsemail@patchwork.libcamera.org>;\n\tFri, 28 Jun 2024 10:48:50 +0000 (UTC)",
            "from lancelot.ideasonboard.com (localhost [IPv6:::1])\n\tby lancelot.ideasonboard.com (Postfix) with ESMTP id 8D46E62E1C;\n\tFri, 28 Jun 2024 12:48:49 +0200 (CEST)",
            "from perceval.ideasonboard.com (perceval.ideasonboard.com\n\t[IPv6:2001:4b98:dc2:55:216:3eff:fef7:d647])\n\tby lancelot.ideasonboard.com (Postfix) with ESMTPS id C4BF062C97\n\tfor <libcamera-devel@lists.libcamera.org>;\n\tFri, 28 Jun 2024 12:48:47 +0200 (CEST)",
            "from ideasonboard.com (unknown\n\t[IPv6:2a00:6020:448c:6c00:82ab:924:d918:cd24])\n\tby perceval.ideasonboard.com (Postfix) with ESMTPSA id 3DE14735;\n\tFri, 28 Jun 2024 12:48:23 +0200 (CEST)"
        ],
        "Authentication-Results": "lancelot.ideasonboard.com; dkim=pass (1024-bit key;\n\tunprotected) header.d=ideasonboard.com header.i=@ideasonboard.com\n\theader.b=\"u1JdTbQK\"; dkim-atps=neutral",
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/simple; d=ideasonboard.com;\n\ts=mail; t=1719571703;\n\tbh=KfwR1Zlw7EhbHUh3w6JkbMol3iTRIulaDUREP34UOHs=;\n\th=From:To:Cc:Subject:Date:In-Reply-To:References:From;\n\tb=u1JdTbQKxsHDOyE1yK1MIEjPKk5UtXGYA12ALAFLTyd4wctytJ2QXgw0SH86/cdp6\n\tap+xzUuTApqnDHPE191rhLHAiwy05l0psFCSohhiE6nahs/haSgmjCIPLwVHBx82in\n\tJ/6sTncaQPFgTTKhusOAhNiMrYrFUxpo01VpIcY8=",
        "From": "Stefan Klug <stefan.klug@ideasonboard.com>",
        "To": "libcamera-devel@lists.libcamera.org",
        "Cc": "Stefan Klug <stefan.klug@ideasonboard.com>",
        "Subject": "[PATCH v2 03/25] libtuning: Copy files from raspberrypi",
        "Date": "Fri, 28 Jun 2024 12:46:56 +0200",
        "Message-ID": "<20240628104828.2928109-4-stefan.klug@ideasonboard.com>",
        "X-Mailer": "git-send-email 2.43.0",
        "In-Reply-To": "<20240628104828.2928109-1-stefan.klug@ideasonboard.com>",
        "References": "<20240628104828.2928109-1-stefan.klug@ideasonboard.com>",
        "MIME-Version": "1.0",
        "Content-Type": "text/plain; charset=UTF-8",
        "Content-Transfer-Encoding": "8bit",
        "X-BeenThere": "libcamera-devel@lists.libcamera.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "<libcamera-devel.lists.libcamera.org>",
        "List-Unsubscribe": "<https://lists.libcamera.org/options/libcamera-devel>,\n\t<mailto:libcamera-devel-request@lists.libcamera.org?subject=unsubscribe>",
        "List-Archive": "<https://lists.libcamera.org/pipermail/libcamera-devel/>",
        "List-Post": "<mailto:libcamera-devel@lists.libcamera.org>",
        "List-Help": "<mailto:libcamera-devel-request@lists.libcamera.org?subject=help>",
        "List-Subscribe": "<https://lists.libcamera.org/listinfo/libcamera-devel>,\n\t<mailto:libcamera-devel-request@lists.libcamera.org?subject=subscribe>",
        "Errors-To": "libcamera-devel-bounces@lists.libcamera.org",
        "Sender": "\"libcamera-devel\" <libcamera-devel-bounces@lists.libcamera.org>"
    },
    "content": "Copy ctt_{awb,ccm,colors,ransac} from the raspberrypi tuning scripts as\nbasis for the libcamera implementation. color.py was renamed to\nctt_colors.py to better express the origin.\n\nThe files were taken from commit 66479605baca4a22e2b\n\nSigned-off-by: Stefan Klug <stefan.klug@ideasonboard.com>\n---\n utils/tuning/libtuning/ctt_awb.py    | 376 +++++++++++++++++++++++++\n utils/tuning/libtuning/ctt_ccm.py    | 406 +++++++++++++++++++++++++++\n utils/tuning/libtuning/ctt_colors.py |  30 ++\n utils/tuning/libtuning/ctt_ransac.py |  71 +++++\n 4 files changed, 883 insertions(+)\n create mode 100644 utils/tuning/libtuning/ctt_awb.py\n create mode 100644 utils/tuning/libtuning/ctt_ccm.py\n create mode 100644 utils/tuning/libtuning/ctt_colors.py\n create mode 100644 utils/tuning/libtuning/ctt_ransac.py",
    "diff": "diff --git a/utils/tuning/libtuning/ctt_awb.py b/utils/tuning/libtuning/ctt_awb.py\nnew file mode 100644\nindex 000000000000..5ba6f978a228\n--- /dev/null\n+++ b/utils/tuning/libtuning/ctt_awb.py\n@@ -0,0 +1,376 @@\n+# SPDX-License-Identifier: BSD-2-Clause\n+#\n+# Copyright (C) 2019, Raspberry Pi Ltd\n+#\n+# camera tuning tool for AWB\n+\n+from ctt_image_load import *\n+import matplotlib.pyplot as plt\n+from bisect import bisect_left\n+from scipy.optimize import fmin\n+\n+\n+\"\"\"\n+obtain piecewise linear approximation for colour curve\n+\"\"\"\n+def awb(Cam, cal_cr_list, cal_cb_list, plot):\n+    imgs = Cam.imgs\n+    \"\"\"\n+    condense alsc calibration tables into one dictionary\n+    \"\"\"\n+    if cal_cr_list is None:\n+        colour_cals = None\n+    else:\n+        colour_cals = {}\n+        for cr, cb in zip(cal_cr_list, cal_cb_list):\n+            cr_tab = cr['table']\n+            cb_tab = cb['table']\n+            \"\"\"\n+            normalise tables so min value is 1\n+            \"\"\"\n+            cr_tab = cr_tab/np.min(cr_tab)\n+            cb_tab = cb_tab/np.min(cb_tab)\n+            colour_cals[cr['ct']] = [cr_tab, cb_tab]\n+    \"\"\"\n+    obtain data from greyscale macbeth patches\n+    \"\"\"\n+    rb_raw = []\n+    rbs_hat = []\n+    for Img in imgs:\n+        Cam.log += '\\nProcessing '+Img.name\n+        \"\"\"\n+        get greyscale patches with alsc applied if alsc enabled.\n+        Note: if alsc is disabled then colour_cals will be set to None and the\n+        function will just return the greyscale patches\n+        \"\"\"\n+        r_patchs, b_patchs, g_patchs = get_alsc_patches(Img, colour_cals)\n+        \"\"\"\n+        calculate ratio of r, b to g\n+        \"\"\"\n+        r_g = np.mean(r_patchs/g_patchs)\n+        b_g = np.mean(b_patchs/g_patchs)\n+        Cam.log += '\\n       r : {:.4f}       b : {:.4f}'.format(r_g, b_g)\n+        \"\"\"\n+        The curve tends to be better behaved in so-called hatspace.\n+        R, B, G represent the individual channels. The colour curve is plotted in\n+        r, b space, where:\n+            r = R/G\n+            b = B/G\n+        This will be referred to as dehatspace... (sorry)\n+        Hatspace is defined as:\n+            r_hat = R/(R+B+G)\n+            b_hat = B/(R+B+G)\n+        To convert from dehatspace to hastpace (hat operation):\n+            r_hat = r/(1+r+b)\n+            b_hat = b/(1+r+b)\n+        To convert from hatspace to dehatspace (dehat operation):\n+            r = r_hat/(1-r_hat-b_hat)\n+            b = b_hat/(1-r_hat-b_hat)\n+        Proof is left as an excercise to the reader...\n+        Throughout the code, r and b are sometimes referred to as r_g and b_g\n+        as a reminder that they are ratios\n+        \"\"\"\n+        r_g_hat = r_g/(1+r_g+b_g)\n+        b_g_hat = b_g/(1+r_g+b_g)\n+        Cam.log += '\\n   r_hat : {:.4f}   b_hat : {:.4f}'.format(r_g_hat, b_g_hat)\n+        rbs_hat.append((r_g_hat, b_g_hat, Img.col))\n+        rb_raw.append((r_g, b_g))\n+        Cam.log += '\\n'\n+\n+    Cam.log += '\\nFinished processing images'\n+    \"\"\"\n+    sort all lits simultaneously by r_hat\n+    \"\"\"\n+    rbs_zip = list(zip(rbs_hat, rb_raw))\n+    rbs_zip.sort(key=lambda x: x[0][0])\n+    rbs_hat, rb_raw = list(zip(*rbs_zip))\n+    \"\"\"\n+    unzip tuples ready for processing\n+    \"\"\"\n+    rbs_hat = list(zip(*rbs_hat))\n+    rb_raw = list(zip(*rb_raw))\n+    \"\"\"\n+    fit quadratic fit to r_g hat and b_g_hat\n+    \"\"\"\n+    a, b, c = np.polyfit(rbs_hat[0], rbs_hat[1], 2)\n+    Cam.log += '\\nFit quadratic curve in hatspace'\n+    \"\"\"\n+    the algorithm now approximates the shortest distance from each point to the\n+    curve in dehatspace. Since the fit is done in hatspace, it is easier to\n+    find the actual shortest distance in hatspace and use the projection back\n+    into dehatspace as an overestimate.\n+    The distance will be used for two things:\n+        1) In the case that colour temperature does not strictly decrease with\n+        increasing r/g, the closest point to the line will be chosen out of an\n+        increasing pair of colours.\n+\n+        2) To calculate transverse negative an dpositive, the maximum positive\n+        and negative distance from the line are chosen. This benefits from the\n+        overestimate as the transverse pos/neg are upper bound values.\n+    \"\"\"\n+    \"\"\"\n+    define fit function\n+    \"\"\"\n+    def f(x):\n+        return a*x**2 + b*x + c\n+    \"\"\"\n+    iterate over points (R, B are x and y coordinates of points) and calculate\n+    distance to line in dehatspace\n+    \"\"\"\n+    dists = []\n+    for i, (R, B) in enumerate(zip(rbs_hat[0], rbs_hat[1])):\n+        \"\"\"\n+        define function to minimise as square distance between datapoint and\n+        point on curve. Squaring is monotonic so minimising radius squared is\n+        equivalent to minimising radius\n+        \"\"\"\n+        def f_min(x):\n+            y = f(x)\n+            return((x-R)**2+(y-B)**2)\n+        \"\"\"\n+        perform optimisation with scipy.optmisie.fmin\n+        \"\"\"\n+        x_hat = fmin(f_min, R, disp=0)[0]\n+        y_hat = f(x_hat)\n+        \"\"\"\n+        dehat\n+        \"\"\"\n+        x = x_hat/(1-x_hat-y_hat)\n+        y = y_hat/(1-x_hat-y_hat)\n+        rr = R/(1-R-B)\n+        bb = B/(1-R-B)\n+        \"\"\"\n+        calculate euclidean distance in dehatspace\n+        \"\"\"\n+        dist = ((x-rr)**2+(y-bb)**2)**0.5\n+        \"\"\"\n+        return negative if point is below the fit curve\n+        \"\"\"\n+        if (x+y) > (rr+bb):\n+            dist *= -1\n+        dists.append(dist)\n+    Cam.log += '\\nFound closest point on fit line to each point in dehatspace'\n+    \"\"\"\n+    calculate wiggle factors in awb. 10% added since this is an upper bound\n+    \"\"\"\n+    transverse_neg = - np.min(dists) * 1.1\n+    transverse_pos = np.max(dists) * 1.1\n+    Cam.log += '\\nTransverse pos : {:.5f}'.format(transverse_pos)\n+    Cam.log += '\\nTransverse neg : {:.5f}'.format(transverse_neg)\n+    \"\"\"\n+    set minimum transverse wiggles to 0.1 .\n+    Wiggle factors dictate how far off of the curve the algorithm searches. 0.1\n+    is a suitable minimum that gives better results for lighting conditions not\n+    within calibration dataset. Anything less will generalise poorly.\n+    \"\"\"\n+    if transverse_pos < 0.01:\n+        transverse_pos = 0.01\n+        Cam.log += '\\nForced transverse pos to 0.01'\n+    if transverse_neg < 0.01:\n+        transverse_neg = 0.01\n+        Cam.log += '\\nForced transverse neg to 0.01'\n+\n+    \"\"\"\n+    generate new b_hat values at each r_hat according to fit\n+    \"\"\"\n+    r_hat_fit = np.array(rbs_hat[0])\n+    b_hat_fit = a*r_hat_fit**2 + b*r_hat_fit + c\n+    \"\"\"\n+    transform from hatspace to dehatspace\n+    \"\"\"\n+    r_fit = r_hat_fit/(1-r_hat_fit-b_hat_fit)\n+    b_fit = b_hat_fit/(1-r_hat_fit-b_hat_fit)\n+    c_fit = np.round(rbs_hat[2], 0)\n+    \"\"\"\n+    round to 4dp\n+    \"\"\"\n+    r_fit = np.where((1000*r_fit) % 1 <= 0.05, r_fit+0.0001, r_fit)\n+    r_fit = np.where((1000*r_fit) % 1 >= 0.95, r_fit-0.0001, r_fit)\n+    b_fit = np.where((1000*b_fit) % 1 <= 0.05, b_fit+0.0001, b_fit)\n+    b_fit = np.where((1000*b_fit) % 1 >= 0.95, b_fit-0.0001, b_fit)\n+    r_fit = np.round(r_fit, 4)\n+    b_fit = np.round(b_fit, 4)\n+    \"\"\"\n+    The following code ensures that colour temperature decreases with\n+    increasing r/g\n+    \"\"\"\n+    \"\"\"\n+    iterate backwards over list for easier indexing\n+    \"\"\"\n+    i = len(c_fit) - 1\n+    while i > 0:\n+        if c_fit[i] > c_fit[i-1]:\n+            Cam.log += '\\nColour temperature increase found\\n'\n+            Cam.log += '{} K at r = {} to '.format(c_fit[i-1], r_fit[i-1])\n+            Cam.log += '{} K at r = {}'.format(c_fit[i], r_fit[i])\n+            \"\"\"\n+            if colour temperature increases then discard point furthest from\n+            the transformed fit (dehatspace)\n+            \"\"\"\n+            error_1 = abs(dists[i-1])\n+            error_2 = abs(dists[i])\n+            Cam.log += '\\nDistances from fit:\\n'\n+            Cam.log += '{} K : {:.5f} , '.format(c_fit[i], error_1)\n+            Cam.log += '{} K : {:.5f}'.format(c_fit[i-1], error_2)\n+            \"\"\"\n+            find bad index\n+            note that in python false = 0 and true = 1\n+            \"\"\"\n+            bad = i - (error_1 < error_2)\n+            Cam.log += '\\nPoint at {} K deleted as '.format(c_fit[bad])\n+            Cam.log += 'it is furthest from fit'\n+            \"\"\"\n+            delete bad point\n+            \"\"\"\n+            r_fit = np.delete(r_fit, bad)\n+            b_fit = np.delete(b_fit, bad)\n+            c_fit = np.delete(c_fit, bad).astype(np.uint16)\n+        \"\"\"\n+        note that if a point has been discarded then the length has decreased\n+        by one, meaning that decreasing the index by one will reassess the kept\n+        point against the next point. It is therefore possible, in theory, for\n+        two adjacent points to be discarded, although probably rare\n+        \"\"\"\n+        i -= 1\n+\n+    \"\"\"\n+    return formatted ct curve, ordered by increasing colour temperature\n+    \"\"\"\n+    ct_curve = list(np.array(list(zip(b_fit, r_fit, c_fit))).flatten())[::-1]\n+    Cam.log += '\\nFinal CT curve:'\n+    for i in range(len(ct_curve)//3):\n+        j = 3*i\n+        Cam.log += '\\n  ct: {}  '.format(ct_curve[j])\n+        Cam.log += '  r: {}  '.format(ct_curve[j+1])\n+        Cam.log += '  b: {}  '.format(ct_curve[j+2])\n+\n+    \"\"\"\n+    plotting code for debug\n+    \"\"\"\n+    if plot:\n+        x = np.linspace(np.min(rbs_hat[0]), np.max(rbs_hat[0]), 100)\n+        y = a*x**2 + b*x + c\n+        plt.subplot(2, 1, 1)\n+        plt.title('hatspace')\n+        plt.plot(rbs_hat[0], rbs_hat[1], ls='--', color='blue')\n+        plt.plot(x, y, color='green', ls='-')\n+        plt.scatter(rbs_hat[0], rbs_hat[1], color='red')\n+        for i, ct in enumerate(rbs_hat[2]):\n+            plt.annotate(str(ct), (rbs_hat[0][i], rbs_hat[1][i]))\n+        plt.xlabel('$\\\\hat{r}$')\n+        plt.ylabel('$\\\\hat{b}$')\n+        \"\"\"\n+        optional set axes equal to shortest distance so line really does\n+        looks perpendicular and everybody is happy\n+        \"\"\"\n+        # ax = plt.gca()\n+        # ax.set_aspect('equal')\n+        plt.grid()\n+        plt.subplot(2, 1, 2)\n+        plt.title('dehatspace - indoors?')\n+        plt.plot(r_fit, b_fit, color='blue')\n+        plt.scatter(rb_raw[0], rb_raw[1], color='green')\n+        plt.scatter(r_fit, b_fit, color='red')\n+        for i, ct in enumerate(c_fit):\n+            plt.annotate(str(ct), (r_fit[i], b_fit[i]))\n+        plt.xlabel('$r$')\n+        plt.ylabel('$b$')\n+        \"\"\"\n+        optional set axes equal to shortest distance so line really does\n+        looks perpendicular and everybody is happy\n+        \"\"\"\n+        # ax = plt.gca()\n+        # ax.set_aspect('equal')\n+        plt.subplots_adjust(hspace=0.5)\n+        plt.grid()\n+        plt.show()\n+    \"\"\"\n+    end of plotting code\n+    \"\"\"\n+    return(ct_curve, np.round(transverse_pos, 5), np.round(transverse_neg, 5))\n+\n+\n+\"\"\"\n+obtain greyscale patches and perform alsc colour correction\n+\"\"\"\n+def get_alsc_patches(Img, colour_cals, grey=True):\n+    \"\"\"\n+    get patch centre coordinates, image colour and the actual\n+    patches for each channel, remembering to subtract blacklevel\n+    If grey then only greyscale patches considered\n+    \"\"\"\n+    if grey:\n+        cen_coords = Img.cen_coords[3::4]\n+        col = Img.col\n+        patches = [np.array(Img.patches[i]) for i in Img.order]\n+        r_patchs = patches[0][3::4] - Img.blacklevel_16\n+        b_patchs = patches[3][3::4] - Img.blacklevel_16\n+        \"\"\"\n+        note two green channels are averages\n+        \"\"\"\n+        g_patchs = (patches[1][3::4]+patches[2][3::4])/2 - Img.blacklevel_16\n+    else:\n+        cen_coords = Img.cen_coords\n+        col = Img.col\n+        patches = [np.array(Img.patches[i]) for i in Img.order]\n+        r_patchs = patches[0] - Img.blacklevel_16\n+        b_patchs = patches[3] - Img.blacklevel_16\n+        g_patchs = (patches[1]+patches[2])/2 - Img.blacklevel_16\n+\n+    if colour_cals is None:\n+        return r_patchs, b_patchs, g_patchs\n+    \"\"\"\n+    find where image colour fits in alsc colour calibration tables\n+    \"\"\"\n+    cts = list(colour_cals.keys())\n+    pos = bisect_left(cts, col)\n+    \"\"\"\n+    if img colour is below minimum or above maximum alsc calibration colour, simply\n+    pick extreme closest to img colour\n+    \"\"\"\n+    if pos % len(cts) == 0:\n+        \"\"\"\n+        this works because -0 = 0 = first and -1 = last index\n+        \"\"\"\n+        col_tabs = np.array(colour_cals[cts[-pos//len(cts)]])\n+        \"\"\"\n+    else, perform linear interpolation between existing alsc colour\n+    calibration tables\n+    \"\"\"\n+    else:\n+        bef = cts[pos-1]\n+        aft = cts[pos]\n+        da = col-bef\n+        db = aft-col\n+        bef_tabs = np.array(colour_cals[bef])\n+        aft_tabs = np.array(colour_cals[aft])\n+        col_tabs = (bef_tabs*db + aft_tabs*da)/(da+db)\n+    col_tabs = np.reshape(col_tabs, (2, 12, 16))\n+    \"\"\"\n+    calculate dx, dy used to calculate alsc table\n+    \"\"\"\n+    w, h = Img.w/2, Img.h/2\n+    dx, dy = int(-(-(w-1)//16)), int(-(-(h-1)//12))\n+    \"\"\"\n+    make list of pairs of gains for each patch by selecting the correct value\n+    in alsc colour calibration table\n+    \"\"\"\n+    patch_gains = []\n+    for cen in cen_coords:\n+        x, y = cen[0]//dx, cen[1]//dy\n+        # We could probably do with some better spatial interpolation here?\n+        col_gains = (col_tabs[0][y][x], col_tabs[1][y][x])\n+        patch_gains.append(col_gains)\n+\n+    \"\"\"\n+    multiply the r and b channels in each patch by the respective gain, finally\n+    performing the alsc colour correction\n+    \"\"\"\n+    for i, gains in enumerate(patch_gains):\n+        r_patchs[i] = r_patchs[i] * gains[0]\n+        b_patchs[i] = b_patchs[i] * gains[1]\n+\n+    \"\"\"\n+    return greyscale patches, g channel and correct r, b channels\n+    \"\"\"\n+    return r_patchs, b_patchs, g_patchs\ndiff --git a/utils/tuning/libtuning/ctt_ccm.py b/utils/tuning/libtuning/ctt_ccm.py\nnew file mode 100644\nindex 000000000000..59753e332ee9\n--- /dev/null\n+++ b/utils/tuning/libtuning/ctt_ccm.py\n@@ -0,0 +1,406 @@\n+# SPDX-License-Identifier: BSD-2-Clause\n+#\n+# Copyright (C) 2019, Raspberry Pi Ltd\n+#\n+# camera tuning tool for CCM (colour correction matrix)\n+\n+from ctt_image_load import *\n+from ctt_awb import get_alsc_patches\n+import colors\n+from scipy.optimize import minimize\n+from ctt_visualise import visualise_macbeth_chart\n+import numpy as np\n+\"\"\"\n+takes 8-bit macbeth chart values, degammas and returns 16 bit\n+\"\"\"\n+\n+'''\n+This program has many options from which to derive the color matrix from.\n+The first is average. This minimises the average delta E across all patches of\n+the macbeth chart. Testing across all cameras yeilded this as the most color\n+accurate and vivid. Other options are avalible however.\n+Maximum minimises the maximum Delta E of the patches. It iterates through till\n+a minimum maximum is found (so that there is\n+not one patch that deviates wildly.)\n+This yields generally good results but overall the colors are less accurate\n+Have a fiddle with maximum and see what you think.\n+The final option allows you to select the patches for which to average across.\n+This means that you can bias certain patches, for instance if you want the\n+reds to be more accurate.\n+'''\n+\n+matrix_selection_types = [\"average\", \"maximum\", \"patches\"]\n+typenum = 0  # select from array above, 0 = average, 1 = maximum, 2 = patches\n+test_patches = [1, 2, 5, 8, 9, 12, 14]\n+\n+'''\n+Enter patches to test for. Can also be entered twice if you\n+would like twice as much bias on one patch.\n+'''\n+\n+\n+def degamma(x):\n+    x = x / ((2 ** 8) - 1)  # takes 255 and scales it down to one\n+    x = np.where(x < 0.04045, x / 12.92, ((x + 0.055) / 1.055) ** 2.4)\n+    x = x * ((2 ** 16) - 1)  # takes one and scales up to 65535, 16 bit color\n+    return x\n+\n+\n+def gamma(x):\n+    # Take 3 long array of color values and gamma them\n+    return [((colour / 255) ** (1 / 2.4) * 1.055 - 0.055) * 255 for colour in x]\n+\n+\n+\"\"\"\n+FInds colour correction matrices for list of images\n+\"\"\"\n+\n+\n+def ccm(Cam, cal_cr_list, cal_cb_list):\n+    global matrix_selection_types, typenum\n+    imgs = Cam.imgs\n+    \"\"\"\n+    standard macbeth chart colour values\n+    \"\"\"\n+    m_rgb = np.array([  # these are in RGB\n+        [116, 81, 67],    # dark skin\n+        [199, 147, 129],  # light skin\n+        [91, 122, 156],   # blue sky\n+        [90, 108, 64],    # foliage\n+        [130, 128, 176],  # blue flower\n+        [92, 190, 172],   # bluish green\n+        [224, 124, 47],   # orange\n+        [68, 91, 170],    # purplish blue\n+        [198, 82, 97],    # moderate red\n+        [94, 58, 106],    # purple\n+        [159, 189, 63],   # yellow green\n+        [230, 162, 39],   # orange yellow\n+        [35, 63, 147],    # blue\n+        [67, 149, 74],    # green\n+        [180, 49, 57],    # red\n+        [238, 198, 20],   # yellow\n+        [193, 84, 151],   # magenta\n+        [0, 136, 170],    # cyan (goes out of gamut)\n+        [245, 245, 243],  # white 9.5\n+        [200, 202, 202],  # neutral 8\n+        [161, 163, 163],  # neutral 6.5\n+        [121, 121, 122],  # neutral 5\n+        [82, 84, 86],     # neutral 3.5\n+        [49, 49, 51]      # black 2\n+    ])\n+    \"\"\"\n+    convert reference colours from srgb to rgb\n+    \"\"\"\n+    m_srgb = degamma(m_rgb)  # now in 16 bit color.\n+\n+    # Produce array of LAB values for ideal color chart\n+    m_lab = [colors.RGB_to_LAB(color / 256) for color in m_srgb]\n+\n+    \"\"\"\n+    reorder reference values to match how patches are ordered\n+    \"\"\"\n+    m_srgb = np.array([m_srgb[i::6] for i in range(6)]).reshape((24, 3))\n+    m_lab = np.array([m_lab[i::6] for i in range(6)]).reshape((24, 3))\n+    m_rgb = np.array([m_rgb[i::6] for i in range(6)]).reshape((24, 3))\n+    \"\"\"\n+    reformat alsc correction tables or set colour_cals to None if alsc is\n+    deactivated\n+    \"\"\"\n+    if cal_cr_list is None:\n+        colour_cals = None\n+    else:\n+        colour_cals = {}\n+        for cr, cb in zip(cal_cr_list, cal_cb_list):\n+            cr_tab = cr['table']\n+            cb_tab = cb['table']\n+            \"\"\"\n+            normalise tables so min value is 1\n+            \"\"\"\n+            cr_tab = cr_tab / np.min(cr_tab)\n+            cb_tab = cb_tab / np.min(cb_tab)\n+            colour_cals[cr['ct']] = [cr_tab, cb_tab]\n+\n+    \"\"\"\n+    for each image, perform awb and alsc corrections.\n+    Then calculate the colour correction matrix for that image, recording the\n+    ccm and the colour tempertaure.\n+    \"\"\"\n+    ccm_tab = {}\n+    for Img in imgs:\n+        Cam.log += '\\nProcessing image: ' + Img.name\n+        \"\"\"\n+        get macbeth patches with alsc applied if alsc enabled.\n+        Note: if alsc is disabled then colour_cals will be set to None and no\n+        the function will simply return the macbeth patches\n+        \"\"\"\n+        r, b, g = get_alsc_patches(Img, colour_cals, grey=False)\n+        # 256 values for each patch of sRGB values\n+\n+        \"\"\"\n+        do awb\n+        Note: awb is done by measuring the macbeth chart in the image, rather\n+        than from the awb calibration. This is done so the awb will be perfect\n+        and the ccm matrices will be more accurate.\n+        \"\"\"\n+        r_greys, b_greys, g_greys = r[3::4], b[3::4], g[3::4]\n+        r_g = np.mean(r_greys / g_greys)\n+        b_g = np.mean(b_greys / g_greys)\n+        r = r / r_g\n+        b = b / b_g\n+        \"\"\"\n+        normalise brightness wrt reference macbeth colours and then average\n+        each channel for each patch\n+        \"\"\"\n+        gain = np.mean(m_srgb) / np.mean((r, g, b))\n+        Cam.log += '\\nGain with respect to standard colours: {:.3f}'.format(gain)\n+        r = np.mean(gain * r, axis=1)\n+        b = np.mean(gain * b, axis=1)\n+        g = np.mean(gain * g, axis=1)\n+        \"\"\"\n+        calculate ccm matrix\n+        \"\"\"\n+        # ==== All of below should in sRGB ===##\n+        sumde = 0\n+        ccm = do_ccm(r, g, b, m_srgb)\n+        # This is the initial guess that our optimisation code works with.\n+        original_ccm = ccm\n+        r1 = ccm[0]\n+        r2 = ccm[1]\n+        g1 = ccm[3]\n+        g2 = ccm[4]\n+        b1 = ccm[6]\n+        b2 = ccm[7]\n+        '''\n+        COLOR MATRIX LOOKS AS BELOW\n+        R1 R2 R3   Rval     Outr\n+        G1 G2 G3  *  Gval  =  G\n+        B1 B2 B3   Bval     B\n+        Will be optimising 6 elements and working out the third element using 1-r1-r2 = r3\n+        '''\n+\n+        x0 = [r1, r2, g1, g2, b1, b2]\n+        '''\n+        We use our old CCM as the initial guess for the program to find the\n+        optimised matrix\n+        '''\n+        result = minimize(guess, x0, args=(r, g, b, m_lab), tol=0.01)\n+        '''\n+        This produces a color matrix which has the lowest delta E possible,\n+        based off the input data. Note it is impossible for this to reach\n+        zero since the input data is imperfect\n+        '''\n+\n+        Cam.log += (\"\\n \\n Optimised Matrix Below: \\n \\n\")\n+        [r1, r2, g1, g2, b1, b2] = result.x\n+        # The new, optimised color correction matrix values\n+        optimised_ccm = [r1, r2, (1 - r1 - r2), g1, g2, (1 - g1 - g2), b1, b2, (1 - b1 - b2)]\n+\n+        # This is the optimised Color Matrix (preserving greys by summing rows up to 1)\n+        Cam.log += str(optimised_ccm)\n+        Cam.log += \"\\n Old Color Correction Matrix Below \\n\"\n+        Cam.log += str(ccm)\n+\n+        formatted_ccm = np.array(original_ccm).reshape((3, 3))\n+\n+        '''\n+        below is a whole load of code that then applies the latest color\n+        matrix, and returns LAB values for color. This can then be used\n+        to calculate the final delta E\n+        '''\n+        optimised_ccm_rgb = []  # Original Color Corrected Matrix RGB / LAB\n+        optimised_ccm_lab = []\n+\n+        formatted_optimised_ccm = np.array(optimised_ccm).reshape((3, 3))\n+        after_gamma_rgb = []\n+        after_gamma_lab = []\n+\n+        for RGB in zip(r, g, b):\n+            ccm_applied_rgb = np.dot(formatted_ccm, (np.array(RGB) / 256))\n+            optimised_ccm_rgb.append(gamma(ccm_applied_rgb))\n+            optimised_ccm_lab.append(colors.RGB_to_LAB(ccm_applied_rgb))\n+\n+            optimised_ccm_applied_rgb = np.dot(formatted_optimised_ccm, np.array(RGB) / 256)\n+            after_gamma_rgb.append(gamma(optimised_ccm_applied_rgb))\n+            after_gamma_lab.append(colors.RGB_to_LAB(optimised_ccm_applied_rgb))\n+        '''\n+        Gamma After RGB / LAB - not used in calculations, only used for visualisation\n+        We now want to spit out some data that shows\n+        how the optimisation has improved the color matrices\n+        '''\n+        Cam.log += \"Here are the Improvements\"\n+\n+        # CALCULATE WORST CASE delta e\n+        old_worst_delta_e = 0\n+        before_average = transform_and_evaluate(formatted_ccm, r, g, b, m_lab)\n+        new_worst_delta_e = 0\n+        after_average = transform_and_evaluate(formatted_optimised_ccm, r, g, b, m_lab)\n+        for i in range(24):\n+            old_delta_e = deltae(optimised_ccm_lab[i], m_lab[i])  # Current Old Delta E\n+            new_delta_e = deltae(after_gamma_lab[i], m_lab[i])  # Current New Delta E\n+            if old_delta_e > old_worst_delta_e:\n+                old_worst_delta_e = old_delta_e\n+            if new_delta_e > new_worst_delta_e:\n+                new_worst_delta_e = new_delta_e\n+\n+        Cam.log += \"Before color correction matrix was optimised, we got an average delta E of \" + str(before_average) + \" and a maximum delta E of \" + str(old_worst_delta_e)\n+        Cam.log += \"After color correction matrix was optimised, we got an average delta E of \" + str(after_average) + \" and a maximum delta E of \" + str(new_worst_delta_e)\n+\n+        visualise_macbeth_chart(m_rgb, optimised_ccm_rgb, after_gamma_rgb, str(Img.col) + str(matrix_selection_types[typenum]))\n+        '''\n+        The program will also save some visualisations of improvements.\n+        Very pretty to look at. Top rectangle is ideal, Left square is\n+        before optimisation, right square is after.\n+        '''\n+\n+        \"\"\"\n+        if a ccm has already been calculated for that temperature then don't\n+        overwrite but save both. They will then be averaged later on\n+        \"\"\"  # Now going to use optimised color matrix, optimised_ccm\n+        if Img.col in ccm_tab.keys():\n+            ccm_tab[Img.col].append(optimised_ccm)\n+        else:\n+            ccm_tab[Img.col] = [optimised_ccm]\n+        Cam.log += '\\n'\n+\n+    Cam.log += '\\nFinished processing images'\n+    \"\"\"\n+    average any ccms that share a colour temperature\n+    \"\"\"\n+    for k, v in ccm_tab.items():\n+        tab = np.mean(v, axis=0)\n+        tab = np.where((10000 * tab) % 1 <= 0.05, tab + 0.00001, tab)\n+        tab = np.where((10000 * tab) % 1 >= 0.95, tab - 0.00001, tab)\n+        ccm_tab[k] = list(np.round(tab, 5))\n+        Cam.log += '\\nMatrix calculated for colour temperature of {} K'.format(k)\n+\n+    \"\"\"\n+    return all ccms with respective colour temperature in the correct format,\n+    sorted by their colour temperature\n+    \"\"\"\n+    sorted_ccms = sorted(ccm_tab.items(), key=lambda kv: kv[0])\n+    ccms = []\n+    for i in sorted_ccms:\n+        ccms.append({\n+            'ct': i[0],\n+            'ccm': i[1]\n+        })\n+    return ccms\n+\n+\n+def guess(x0, r, g, b, m_lab):       # provides a method of numerical feedback for the optimisation code\n+    [r1, r2, g1, g2, b1, b2] = x0\n+    ccm = np.array([r1, r2, (1 - r1 - r2),\n+                    g1, g2, (1 - g1 - g2),\n+                    b1, b2, (1 - b1 - b2)]).reshape((3, 3))  # format the matrix correctly\n+    return transform_and_evaluate(ccm, r, g, b, m_lab)\n+\n+\n+def transform_and_evaluate(ccm, r, g, b, m_lab):  # Transforms colors to LAB and applies the correction matrix\n+    # create list of matrix changed colors\n+    realrgb = []\n+    for RGB in zip(r, g, b):\n+        rgb_post_ccm = np.dot(ccm, np.array(RGB) / 256)  # This is RGB values after the color correction matrix has been applied\n+        realrgb.append(colors.RGB_to_LAB(rgb_post_ccm))\n+    # now compare that with m_lab and return numeric result, averaged for each patch\n+    return (sumde(realrgb, m_lab) / 24)  # returns an average result of delta E\n+\n+\n+def sumde(listA, listB):\n+    global typenum, test_patches\n+    sumde = 0\n+    maxde = 0\n+    patchde = []  # Create array of the delta E values for each patch. useful for optimisation of certain patches\n+    for listA_item, listB_item in zip(listA, listB):\n+        if maxde < (deltae(listA_item, listB_item)):\n+            maxde = deltae(listA_item, listB_item)\n+        patchde.append(deltae(listA_item, listB_item))\n+        sumde += deltae(listA_item, listB_item)\n+    '''\n+    The different options specified at the start allow for\n+    the maximum to be returned, average or specific patches\n+    '''\n+    if typenum == 0:\n+        return sumde\n+    if typenum == 1:\n+        return maxde\n+    if typenum == 2:\n+        output = sum([patchde[test_patch] for test_patch in test_patches])\n+        # Selects only certain patches and returns the output for them\n+        return output\n+\n+\n+\"\"\"\n+calculates the ccm for an individual image.\n+ccms are calculated in rgb space, and are fit by hand. Although it is a 3x3\n+matrix, each row must add up to 1 in order to conserve greyness, simplifying\n+calculation.\n+The initial CCM is calculated in RGB, and then optimised in LAB color space\n+This simplifies the initial calculation but then gets us the accuracy of\n+using LAB color space.\n+\"\"\"\n+\n+\n+def do_ccm(r, g, b, m_srgb):\n+    rb = r-b\n+    gb = g-b\n+    rb_2s = (rb * rb)\n+    rb_gbs = (rb * gb)\n+    gb_2s = (gb * gb)\n+\n+    r_rbs = rb * (m_srgb[..., 0] - b)\n+    r_gbs = gb * (m_srgb[..., 0] - b)\n+    g_rbs = rb * (m_srgb[..., 1] - b)\n+    g_gbs = gb * (m_srgb[..., 1] - b)\n+    b_rbs = rb * (m_srgb[..., 2] - b)\n+    b_gbs = gb * (m_srgb[..., 2] - b)\n+\n+    \"\"\"\n+    Obtain least squares fit\n+    \"\"\"\n+    rb_2 = np.sum(rb_2s)\n+    gb_2 = np.sum(gb_2s)\n+    rb_gb = np.sum(rb_gbs)\n+    r_rb = np.sum(r_rbs)\n+    r_gb = np.sum(r_gbs)\n+    g_rb = np.sum(g_rbs)\n+    g_gb = np.sum(g_gbs)\n+    b_rb = np.sum(b_rbs)\n+    b_gb = np.sum(b_gbs)\n+\n+    det = rb_2 * gb_2 - rb_gb * rb_gb\n+\n+    \"\"\"\n+    Raise error if matrix is singular...\n+    This shouldn't really happen with real data but if it does just take new\n+    pictures and try again, not much else to be done unfortunately...\n+    \"\"\"\n+    if det < 0.001:\n+        raise ArithmeticError\n+\n+    r_a = (gb_2 * r_rb - rb_gb * r_gb) / det\n+    r_b = (rb_2 * r_gb - rb_gb * r_rb) / det\n+    \"\"\"\n+    Last row can be calculated by knowing the sum must be 1\n+    \"\"\"\n+    r_c = 1 - r_a - r_b\n+\n+    g_a = (gb_2 * g_rb - rb_gb * g_gb) / det\n+    g_b = (rb_2 * g_gb - rb_gb * g_rb) / det\n+    g_c = 1 - g_a - g_b\n+\n+    b_a = (gb_2 * b_rb - rb_gb * b_gb) / det\n+    b_b = (rb_2 * b_gb - rb_gb * b_rb) / det\n+    b_c = 1 - b_a - b_b\n+\n+    \"\"\"\n+    format ccm\n+    \"\"\"\n+    ccm = [r_a, r_b, r_c, g_a, g_b, g_c, b_a, b_b, b_c]\n+\n+    return ccm\n+\n+\n+def deltae(colorA, colorB):\n+    return ((colorA[0] - colorB[0]) ** 2 + (colorA[1] - colorB[1]) ** 2 + (colorA[2] - colorB[2]) ** 2) ** 0.5\n+    # return ((colorA[1]-colorB[1]) *  * 2 + (colorA[2]-colorB[2]) *  * 2) *  * 0.5\n+    # UNCOMMENT IF YOU WANT TO NEGLECT LUMINANCE FROM CALCULATION OF DELTA E\ndiff --git a/utils/tuning/libtuning/ctt_colors.py b/utils/tuning/libtuning/ctt_colors.py\nnew file mode 100644\nindex 000000000000..cb4d236b04d7\n--- /dev/null\n+++ b/utils/tuning/libtuning/ctt_colors.py\n@@ -0,0 +1,30 @@\n+# Program to convert from RGB to LAB color space\n+def RGB_to_LAB(RGB):  # where RGB is a 1x3 array.   e.g RGB = [100, 255, 230]\n+    num = 0\n+    XYZ = [0, 0, 0]\n+    # converted all the three R, G, B to X, Y, Z\n+    X = RGB[0] * 0.4124 + RGB[1] * 0.3576 + RGB[2] * 0.1805\n+    Y = RGB[0] * 0.2126 + RGB[1] * 0.7152 + RGB[2] * 0.0722\n+    Z = RGB[0] * 0.0193 + RGB[1] * 0.1192 + RGB[2] * 0.9505\n+\n+    XYZ[0] = X / 255 * 100\n+    XYZ[1] = Y / 255 * 100  # XYZ Must be in range 0 -> 100, so scale down from 255\n+    XYZ[2] = Z / 255 * 100\n+    XYZ[0] = XYZ[0] / 95.047  # ref_X =  95.047   Observer= 2°, Illuminant= D65\n+    XYZ[1] = XYZ[1] / 100.0  # ref_Y = 100.000\n+    XYZ[2] = XYZ[2] / 108.883  # ref_Z = 108.883\n+    num = 0\n+    for value in XYZ:\n+        if value > 0.008856:\n+            value = value ** (0.3333333333333333)\n+        else:\n+            value = (7.787 * value) + (16 / 116)\n+        XYZ[num] = value\n+        num = num + 1\n+\n+    # L, A, B, values calculated below\n+    L = (116 * XYZ[1]) - 16\n+    a = 500 * (XYZ[0] - XYZ[1])\n+    b = 200 * (XYZ[1] - XYZ[2])\n+\n+    return [L, a, b]\ndiff --git a/utils/tuning/libtuning/ctt_ransac.py b/utils/tuning/libtuning/ctt_ransac.py\nnew file mode 100644\nindex 000000000000..01bba3022ef0\n--- /dev/null\n+++ b/utils/tuning/libtuning/ctt_ransac.py\n@@ -0,0 +1,71 @@\n+# SPDX-License-Identifier: BSD-2-Clause\n+#\n+# Copyright (C) 2019, Raspberry Pi Ltd\n+#\n+# camera tuning tool RANSAC selector for Macbeth chart locator\n+\n+import numpy as np\n+\n+scale = 2\n+\n+\n+\"\"\"\n+constructs normalised macbeth chart corners for ransac algorithm\n+\"\"\"\n+def get_square_verts(c_err=0.05, scale=scale):\n+    \"\"\"\n+    define macbeth chart corners\n+    \"\"\"\n+    b_bord_x, b_bord_y = scale*8.5, scale*13\n+    s_bord = 6*scale\n+    side = 41*scale\n+    x_max = side*6 + 5*s_bord + 2*b_bord_x\n+    y_max = side*4 + 3*s_bord + 2*b_bord_y\n+    c1 = (0, 0)\n+    c2 = (0, y_max)\n+    c3 = (x_max, y_max)\n+    c4 = (x_max, 0)\n+    mac_norm = np.array((c1, c2, c3, c4), np.float32)\n+    mac_norm = np.array([mac_norm])\n+\n+    square_verts = []\n+    square_0 = np.array(((0, 0), (0, side),\n+                         (side, side), (side, 0)), np.float32)\n+    offset_0 = np.array((b_bord_x, b_bord_y), np.float32)\n+    c_off = side * c_err\n+    offset_cont = np.array(((c_off, c_off), (c_off, -c_off),\n+                            (-c_off, -c_off), (-c_off, c_off)), np.float32)\n+    square_0 += offset_0\n+    square_0 += offset_cont\n+    \"\"\"\n+    define macbeth square corners\n+    \"\"\"\n+    for i in range(6):\n+        shift_i = np.array(((i*side, 0), (i*side, 0),\n+                            (i*side, 0), (i*side, 0)), np.float32)\n+        shift_bord = np.array(((i*s_bord, 0), (i*s_bord, 0),\n+                               (i*s_bord, 0), (i*s_bord, 0)), np.float32)\n+        square_i = square_0 + shift_i + shift_bord\n+        for j in range(4):\n+            shift_j = np.array(((0, j*side), (0, j*side),\n+                                (0, j*side), (0, j*side)), np.float32)\n+            shift_bord = np.array(((0, j*s_bord),\n+                                   (0, j*s_bord), (0, j*s_bord),\n+                                   (0, j*s_bord)), np.float32)\n+            square_j = square_i + shift_j + shift_bord\n+            square_verts.append(square_j)\n+    # print('square_verts')\n+    # print(square_verts)\n+    return np.array(square_verts, np.float32), mac_norm\n+\n+\n+def get_square_centres(c_err=0.05, scale=scale):\n+    \"\"\"\n+    define macbeth square centres\n+    \"\"\"\n+    verts, mac_norm = get_square_verts(c_err, scale=scale)\n+\n+    centres = np.mean(verts, axis=1)\n+    # print('centres')\n+    # print(centres)\n+    return np.array(centres, np.float32)\n",
    "prefixes": [
        "v2",
        "03/25"
    ]
}