[v2,03/25] libtuning: Copy files from raspberrypi
diff mbox series

Message ID 20240628104828.2928109-4-stefan.klug@ideasonboard.com
State Superseded
Headers show
Series
  • Add ccm calibration to libtuning
Related show

Commit Message

Stefan Klug June 28, 2024, 10:46 a.m. UTC
Copy ctt_{awb,ccm,colors,ransac} from the raspberrypi tuning scripts as
basis for the libcamera implementation. color.py was renamed to
ctt_colors.py to better express the origin.

The files were taken from commit 66479605baca4a22e2b

Signed-off-by: Stefan Klug <stefan.klug@ideasonboard.com>
---
 utils/tuning/libtuning/ctt_awb.py    | 376 +++++++++++++++++++++++++
 utils/tuning/libtuning/ctt_ccm.py    | 406 +++++++++++++++++++++++++++
 utils/tuning/libtuning/ctt_colors.py |  30 ++
 utils/tuning/libtuning/ctt_ransac.py |  71 +++++
 4 files changed, 883 insertions(+)
 create mode 100644 utils/tuning/libtuning/ctt_awb.py
 create mode 100644 utils/tuning/libtuning/ctt_ccm.py
 create mode 100644 utils/tuning/libtuning/ctt_colors.py
 create mode 100644 utils/tuning/libtuning/ctt_ransac.py

Comments

Kieran Bingham June 28, 2024, 10:53 a.m. UTC | #1
Quoting Stefan Klug (2024-06-28 11:46:56)
> Copy ctt_{awb,ccm,colors,ransac} from the raspberrypi tuning scripts as
> basis for the libcamera implementation. color.py was renamed to
> ctt_colors.py to better express the origin.
> 
> The files were taken from commit 66479605baca4a22e2b
> 
> Signed-off-by: Stefan Klug <stefan.klug@ideasonboard.com>

As this is a copy, I won't 'review'. 

Acked-by: Kieran Bingham <kieran.bingham@ideasonboard.com>

> ---
>  utils/tuning/libtuning/ctt_awb.py    | 376 +++++++++++++++++++++++++
>  utils/tuning/libtuning/ctt_ccm.py    | 406 +++++++++++++++++++++++++++
>  utils/tuning/libtuning/ctt_colors.py |  30 ++
>  utils/tuning/libtuning/ctt_ransac.py |  71 +++++
>  4 files changed, 883 insertions(+)
>  create mode 100644 utils/tuning/libtuning/ctt_awb.py
>  create mode 100644 utils/tuning/libtuning/ctt_ccm.py
>  create mode 100644 utils/tuning/libtuning/ctt_colors.py
>  create mode 100644 utils/tuning/libtuning/ctt_ransac.py
> 
> diff --git a/utils/tuning/libtuning/ctt_awb.py b/utils/tuning/libtuning/ctt_awb.py
> new file mode 100644
> index 000000000000..5ba6f978a228
> --- /dev/null
> +++ b/utils/tuning/libtuning/ctt_awb.py
> @@ -0,0 +1,376 @@
> +# SPDX-License-Identifier: BSD-2-Clause
> +#
> +# Copyright (C) 2019, Raspberry Pi Ltd
> +#
> +# camera tuning tool for AWB
> +
> +from ctt_image_load import *
> +import matplotlib.pyplot as plt
> +from bisect import bisect_left
> +from scipy.optimize import fmin
> +
> +
> +"""
> +obtain piecewise linear approximation for colour curve
> +"""
> +def awb(Cam, cal_cr_list, cal_cb_list, plot):
> +    imgs = Cam.imgs
> +    """
> +    condense alsc calibration tables into one dictionary
> +    """
> +    if cal_cr_list is None:
> +        colour_cals = None
> +    else:
> +        colour_cals = {}
> +        for cr, cb in zip(cal_cr_list, cal_cb_list):
> +            cr_tab = cr['table']
> +            cb_tab = cb['table']
> +            """
> +            normalise tables so min value is 1
> +            """
> +            cr_tab = cr_tab/np.min(cr_tab)
> +            cb_tab = cb_tab/np.min(cb_tab)
> +            colour_cals[cr['ct']] = [cr_tab, cb_tab]
> +    """
> +    obtain data from greyscale macbeth patches
> +    """
> +    rb_raw = []
> +    rbs_hat = []
> +    for Img in imgs:
> +        Cam.log += '\nProcessing '+Img.name
> +        """
> +        get greyscale patches with alsc applied if alsc enabled.
> +        Note: if alsc is disabled then colour_cals will be set to None and the
> +        function will just return the greyscale patches
> +        """
> +        r_patchs, b_patchs, g_patchs = get_alsc_patches(Img, colour_cals)
> +        """
> +        calculate ratio of r, b to g
> +        """
> +        r_g = np.mean(r_patchs/g_patchs)
> +        b_g = np.mean(b_patchs/g_patchs)
> +        Cam.log += '\n       r : {:.4f}       b : {:.4f}'.format(r_g, b_g)
> +        """
> +        The curve tends to be better behaved in so-called hatspace.
> +        R, B, G represent the individual channels. The colour curve is plotted in
> +        r, b space, where:
> +            r = R/G
> +            b = B/G
> +        This will be referred to as dehatspace... (sorry)
> +        Hatspace is defined as:
> +            r_hat = R/(R+B+G)
> +            b_hat = B/(R+B+G)
> +        To convert from dehatspace to hastpace (hat operation):
> +            r_hat = r/(1+r+b)
> +            b_hat = b/(1+r+b)
> +        To convert from hatspace to dehatspace (dehat operation):
> +            r = r_hat/(1-r_hat-b_hat)
> +            b = b_hat/(1-r_hat-b_hat)
> +        Proof is left as an excercise to the reader...
> +        Throughout the code, r and b are sometimes referred to as r_g and b_g
> +        as a reminder that they are ratios
> +        """
> +        r_g_hat = r_g/(1+r_g+b_g)
> +        b_g_hat = b_g/(1+r_g+b_g)
> +        Cam.log += '\n   r_hat : {:.4f}   b_hat : {:.4f}'.format(r_g_hat, b_g_hat)
> +        rbs_hat.append((r_g_hat, b_g_hat, Img.col))
> +        rb_raw.append((r_g, b_g))
> +        Cam.log += '\n'
> +
> +    Cam.log += '\nFinished processing images'
> +    """
> +    sort all lits simultaneously by r_hat
> +    """
> +    rbs_zip = list(zip(rbs_hat, rb_raw))
> +    rbs_zip.sort(key=lambda x: x[0][0])
> +    rbs_hat, rb_raw = list(zip(*rbs_zip))
> +    """
> +    unzip tuples ready for processing
> +    """
> +    rbs_hat = list(zip(*rbs_hat))
> +    rb_raw = list(zip(*rb_raw))
> +    """
> +    fit quadratic fit to r_g hat and b_g_hat
> +    """
> +    a, b, c = np.polyfit(rbs_hat[0], rbs_hat[1], 2)
> +    Cam.log += '\nFit quadratic curve in hatspace'
> +    """
> +    the algorithm now approximates the shortest distance from each point to the
> +    curve in dehatspace. Since the fit is done in hatspace, it is easier to
> +    find the actual shortest distance in hatspace and use the projection back
> +    into dehatspace as an overestimate.
> +    The distance will be used for two things:
> +        1) In the case that colour temperature does not strictly decrease with
> +        increasing r/g, the closest point to the line will be chosen out of an
> +        increasing pair of colours.
> +
> +        2) To calculate transverse negative an dpositive, the maximum positive
> +        and negative distance from the line are chosen. This benefits from the
> +        overestimate as the transverse pos/neg are upper bound values.
> +    """
> +    """
> +    define fit function
> +    """
> +    def f(x):
> +        return a*x**2 + b*x + c
> +    """
> +    iterate over points (R, B are x and y coordinates of points) and calculate
> +    distance to line in dehatspace
> +    """
> +    dists = []
> +    for i, (R, B) in enumerate(zip(rbs_hat[0], rbs_hat[1])):
> +        """
> +        define function to minimise as square distance between datapoint and
> +        point on curve. Squaring is monotonic so minimising radius squared is
> +        equivalent to minimising radius
> +        """
> +        def f_min(x):
> +            y = f(x)
> +            return((x-R)**2+(y-B)**2)
> +        """
> +        perform optimisation with scipy.optmisie.fmin
> +        """
> +        x_hat = fmin(f_min, R, disp=0)[0]
> +        y_hat = f(x_hat)
> +        """
> +        dehat
> +        """
> +        x = x_hat/(1-x_hat-y_hat)
> +        y = y_hat/(1-x_hat-y_hat)
> +        rr = R/(1-R-B)
> +        bb = B/(1-R-B)
> +        """
> +        calculate euclidean distance in dehatspace
> +        """
> +        dist = ((x-rr)**2+(y-bb)**2)**0.5
> +        """
> +        return negative if point is below the fit curve
> +        """
> +        if (x+y) > (rr+bb):
> +            dist *= -1
> +        dists.append(dist)
> +    Cam.log += '\nFound closest point on fit line to each point in dehatspace'
> +    """
> +    calculate wiggle factors in awb. 10% added since this is an upper bound
> +    """
> +    transverse_neg = - np.min(dists) * 1.1
> +    transverse_pos = np.max(dists) * 1.1
> +    Cam.log += '\nTransverse pos : {:.5f}'.format(transverse_pos)
> +    Cam.log += '\nTransverse neg : {:.5f}'.format(transverse_neg)
> +    """
> +    set minimum transverse wiggles to 0.1 .
> +    Wiggle factors dictate how far off of the curve the algorithm searches. 0.1
> +    is a suitable minimum that gives better results for lighting conditions not
> +    within calibration dataset. Anything less will generalise poorly.
> +    """
> +    if transverse_pos < 0.01:
> +        transverse_pos = 0.01
> +        Cam.log += '\nForced transverse pos to 0.01'
> +    if transverse_neg < 0.01:
> +        transverse_neg = 0.01
> +        Cam.log += '\nForced transverse neg to 0.01'
> +
> +    """
> +    generate new b_hat values at each r_hat according to fit
> +    """
> +    r_hat_fit = np.array(rbs_hat[0])
> +    b_hat_fit = a*r_hat_fit**2 + b*r_hat_fit + c
> +    """
> +    transform from hatspace to dehatspace
> +    """
> +    r_fit = r_hat_fit/(1-r_hat_fit-b_hat_fit)
> +    b_fit = b_hat_fit/(1-r_hat_fit-b_hat_fit)
> +    c_fit = np.round(rbs_hat[2], 0)
> +    """
> +    round to 4dp
> +    """
> +    r_fit = np.where((1000*r_fit) % 1 <= 0.05, r_fit+0.0001, r_fit)
> +    r_fit = np.where((1000*r_fit) % 1 >= 0.95, r_fit-0.0001, r_fit)
> +    b_fit = np.where((1000*b_fit) % 1 <= 0.05, b_fit+0.0001, b_fit)
> +    b_fit = np.where((1000*b_fit) % 1 >= 0.95, b_fit-0.0001, b_fit)
> +    r_fit = np.round(r_fit, 4)
> +    b_fit = np.round(b_fit, 4)
> +    """
> +    The following code ensures that colour temperature decreases with
> +    increasing r/g
> +    """
> +    """
> +    iterate backwards over list for easier indexing
> +    """
> +    i = len(c_fit) - 1
> +    while i > 0:
> +        if c_fit[i] > c_fit[i-1]:
> +            Cam.log += '\nColour temperature increase found\n'
> +            Cam.log += '{} K at r = {} to '.format(c_fit[i-1], r_fit[i-1])
> +            Cam.log += '{} K at r = {}'.format(c_fit[i], r_fit[i])
> +            """
> +            if colour temperature increases then discard point furthest from
> +            the transformed fit (dehatspace)
> +            """
> +            error_1 = abs(dists[i-1])
> +            error_2 = abs(dists[i])
> +            Cam.log += '\nDistances from fit:\n'
> +            Cam.log += '{} K : {:.5f} , '.format(c_fit[i], error_1)
> +            Cam.log += '{} K : {:.5f}'.format(c_fit[i-1], error_2)
> +            """
> +            find bad index
> +            note that in python false = 0 and true = 1
> +            """
> +            bad = i - (error_1 < error_2)
> +            Cam.log += '\nPoint at {} K deleted as '.format(c_fit[bad])
> +            Cam.log += 'it is furthest from fit'
> +            """
> +            delete bad point
> +            """
> +            r_fit = np.delete(r_fit, bad)
> +            b_fit = np.delete(b_fit, bad)
> +            c_fit = np.delete(c_fit, bad).astype(np.uint16)
> +        """
> +        note that if a point has been discarded then the length has decreased
> +        by one, meaning that decreasing the index by one will reassess the kept
> +        point against the next point. It is therefore possible, in theory, for
> +        two adjacent points to be discarded, although probably rare
> +        """
> +        i -= 1
> +
> +    """
> +    return formatted ct curve, ordered by increasing colour temperature
> +    """
> +    ct_curve = list(np.array(list(zip(b_fit, r_fit, c_fit))).flatten())[::-1]
> +    Cam.log += '\nFinal CT curve:'
> +    for i in range(len(ct_curve)//3):
> +        j = 3*i
> +        Cam.log += '\n  ct: {}  '.format(ct_curve[j])
> +        Cam.log += '  r: {}  '.format(ct_curve[j+1])
> +        Cam.log += '  b: {}  '.format(ct_curve[j+2])
> +
> +    """
> +    plotting code for debug
> +    """
> +    if plot:
> +        x = np.linspace(np.min(rbs_hat[0]), np.max(rbs_hat[0]), 100)
> +        y = a*x**2 + b*x + c
> +        plt.subplot(2, 1, 1)
> +        plt.title('hatspace')
> +        plt.plot(rbs_hat[0], rbs_hat[1], ls='--', color='blue')
> +        plt.plot(x, y, color='green', ls='-')
> +        plt.scatter(rbs_hat[0], rbs_hat[1], color='red')
> +        for i, ct in enumerate(rbs_hat[2]):
> +            plt.annotate(str(ct), (rbs_hat[0][i], rbs_hat[1][i]))
> +        plt.xlabel('$\\hat{r}$')
> +        plt.ylabel('$\\hat{b}$')
> +        """
> +        optional set axes equal to shortest distance so line really does
> +        looks perpendicular and everybody is happy
> +        """
> +        # ax = plt.gca()
> +        # ax.set_aspect('equal')
> +        plt.grid()
> +        plt.subplot(2, 1, 2)
> +        plt.title('dehatspace - indoors?')
> +        plt.plot(r_fit, b_fit, color='blue')
> +        plt.scatter(rb_raw[0], rb_raw[1], color='green')
> +        plt.scatter(r_fit, b_fit, color='red')
> +        for i, ct in enumerate(c_fit):
> +            plt.annotate(str(ct), (r_fit[i], b_fit[i]))
> +        plt.xlabel('$r$')
> +        plt.ylabel('$b$')
> +        """
> +        optional set axes equal to shortest distance so line really does
> +        looks perpendicular and everybody is happy
> +        """
> +        # ax = plt.gca()
> +        # ax.set_aspect('equal')
> +        plt.subplots_adjust(hspace=0.5)
> +        plt.grid()
> +        plt.show()
> +    """
> +    end of plotting code
> +    """
> +    return(ct_curve, np.round(transverse_pos, 5), np.round(transverse_neg, 5))
> +
> +
> +"""
> +obtain greyscale patches and perform alsc colour correction
> +"""
> +def get_alsc_patches(Img, colour_cals, grey=True):
> +    """
> +    get patch centre coordinates, image colour and the actual
> +    patches for each channel, remembering to subtract blacklevel
> +    If grey then only greyscale patches considered
> +    """
> +    if grey:
> +        cen_coords = Img.cen_coords[3::4]
> +        col = Img.col
> +        patches = [np.array(Img.patches[i]) for i in Img.order]
> +        r_patchs = patches[0][3::4] - Img.blacklevel_16
> +        b_patchs = patches[3][3::4] - Img.blacklevel_16
> +        """
> +        note two green channels are averages
> +        """
> +        g_patchs = (patches[1][3::4]+patches[2][3::4])/2 - Img.blacklevel_16
> +    else:
> +        cen_coords = Img.cen_coords
> +        col = Img.col
> +        patches = [np.array(Img.patches[i]) for i in Img.order]
> +        r_patchs = patches[0] - Img.blacklevel_16
> +        b_patchs = patches[3] - Img.blacklevel_16
> +        g_patchs = (patches[1]+patches[2])/2 - Img.blacklevel_16
> +
> +    if colour_cals is None:
> +        return r_patchs, b_patchs, g_patchs
> +    """
> +    find where image colour fits in alsc colour calibration tables
> +    """
> +    cts = list(colour_cals.keys())
> +    pos = bisect_left(cts, col)
> +    """
> +    if img colour is below minimum or above maximum alsc calibration colour, simply
> +    pick extreme closest to img colour
> +    """
> +    if pos % len(cts) == 0:
> +        """
> +        this works because -0 = 0 = first and -1 = last index
> +        """
> +        col_tabs = np.array(colour_cals[cts[-pos//len(cts)]])
> +        """
> +    else, perform linear interpolation between existing alsc colour
> +    calibration tables
> +    """
> +    else:
> +        bef = cts[pos-1]
> +        aft = cts[pos]
> +        da = col-bef
> +        db = aft-col
> +        bef_tabs = np.array(colour_cals[bef])
> +        aft_tabs = np.array(colour_cals[aft])
> +        col_tabs = (bef_tabs*db + aft_tabs*da)/(da+db)
> +    col_tabs = np.reshape(col_tabs, (2, 12, 16))
> +    """
> +    calculate dx, dy used to calculate alsc table
> +    """
> +    w, h = Img.w/2, Img.h/2
> +    dx, dy = int(-(-(w-1)//16)), int(-(-(h-1)//12))
> +    """
> +    make list of pairs of gains for each patch by selecting the correct value
> +    in alsc colour calibration table
> +    """
> +    patch_gains = []
> +    for cen in cen_coords:
> +        x, y = cen[0]//dx, cen[1]//dy
> +        # We could probably do with some better spatial interpolation here?
> +        col_gains = (col_tabs[0][y][x], col_tabs[1][y][x])
> +        patch_gains.append(col_gains)
> +
> +    """
> +    multiply the r and b channels in each patch by the respective gain, finally
> +    performing the alsc colour correction
> +    """
> +    for i, gains in enumerate(patch_gains):
> +        r_patchs[i] = r_patchs[i] * gains[0]
> +        b_patchs[i] = b_patchs[i] * gains[1]
> +
> +    """
> +    return greyscale patches, g channel and correct r, b channels
> +    """
> +    return r_patchs, b_patchs, g_patchs
> diff --git a/utils/tuning/libtuning/ctt_ccm.py b/utils/tuning/libtuning/ctt_ccm.py
> new file mode 100644
> index 000000000000..59753e332ee9
> --- /dev/null
> +++ b/utils/tuning/libtuning/ctt_ccm.py
> @@ -0,0 +1,406 @@
> +# SPDX-License-Identifier: BSD-2-Clause
> +#
> +# Copyright (C) 2019, Raspberry Pi Ltd
> +#
> +# camera tuning tool for CCM (colour correction matrix)
> +
> +from ctt_image_load import *
> +from ctt_awb import get_alsc_patches
> +import colors
> +from scipy.optimize import minimize
> +from ctt_visualise import visualise_macbeth_chart
> +import numpy as np
> +"""
> +takes 8-bit macbeth chart values, degammas and returns 16 bit
> +"""
> +
> +'''
> +This program has many options from which to derive the color matrix from.
> +The first is average. This minimises the average delta E across all patches of
> +the macbeth chart. Testing across all cameras yeilded this as the most color
> +accurate and vivid. Other options are avalible however.
> +Maximum minimises the maximum Delta E of the patches. It iterates through till
> +a minimum maximum is found (so that there is
> +not one patch that deviates wildly.)
> +This yields generally good results but overall the colors are less accurate
> +Have a fiddle with maximum and see what you think.
> +The final option allows you to select the patches for which to average across.
> +This means that you can bias certain patches, for instance if you want the
> +reds to be more accurate.
> +'''
> +
> +matrix_selection_types = ["average", "maximum", "patches"]
> +typenum = 0  # select from array above, 0 = average, 1 = maximum, 2 = patches
> +test_patches = [1, 2, 5, 8, 9, 12, 14]
> +
> +'''
> +Enter patches to test for. Can also be entered twice if you
> +would like twice as much bias on one patch.
> +'''
> +
> +
> +def degamma(x):
> +    x = x / ((2 ** 8) - 1)  # takes 255 and scales it down to one
> +    x = np.where(x < 0.04045, x / 12.92, ((x + 0.055) / 1.055) ** 2.4)
> +    x = x * ((2 ** 16) - 1)  # takes one and scales up to 65535, 16 bit color
> +    return x
> +
> +
> +def gamma(x):
> +    # Take 3 long array of color values and gamma them
> +    return [((colour / 255) ** (1 / 2.4) * 1.055 - 0.055) * 255 for colour in x]
> +
> +
> +"""
> +FInds colour correction matrices for list of images
> +"""
> +
> +
> +def ccm(Cam, cal_cr_list, cal_cb_list):
> +    global matrix_selection_types, typenum
> +    imgs = Cam.imgs
> +    """
> +    standard macbeth chart colour values
> +    """
> +    m_rgb = np.array([  # these are in RGB
> +        [116, 81, 67],    # dark skin
> +        [199, 147, 129],  # light skin
> +        [91, 122, 156],   # blue sky
> +        [90, 108, 64],    # foliage
> +        [130, 128, 176],  # blue flower
> +        [92, 190, 172],   # bluish green
> +        [224, 124, 47],   # orange
> +        [68, 91, 170],    # purplish blue
> +        [198, 82, 97],    # moderate red
> +        [94, 58, 106],    # purple
> +        [159, 189, 63],   # yellow green
> +        [230, 162, 39],   # orange yellow
> +        [35, 63, 147],    # blue
> +        [67, 149, 74],    # green
> +        [180, 49, 57],    # red
> +        [238, 198, 20],   # yellow
> +        [193, 84, 151],   # magenta
> +        [0, 136, 170],    # cyan (goes out of gamut)
> +        [245, 245, 243],  # white 9.5
> +        [200, 202, 202],  # neutral 8
> +        [161, 163, 163],  # neutral 6.5
> +        [121, 121, 122],  # neutral 5
> +        [82, 84, 86],     # neutral 3.5
> +        [49, 49, 51]      # black 2
> +    ])
> +    """
> +    convert reference colours from srgb to rgb
> +    """
> +    m_srgb = degamma(m_rgb)  # now in 16 bit color.
> +
> +    # Produce array of LAB values for ideal color chart
> +    m_lab = [colors.RGB_to_LAB(color / 256) for color in m_srgb]
> +
> +    """
> +    reorder reference values to match how patches are ordered
> +    """
> +    m_srgb = np.array([m_srgb[i::6] for i in range(6)]).reshape((24, 3))
> +    m_lab = np.array([m_lab[i::6] for i in range(6)]).reshape((24, 3))
> +    m_rgb = np.array([m_rgb[i::6] for i in range(6)]).reshape((24, 3))
> +    """
> +    reformat alsc correction tables or set colour_cals to None if alsc is
> +    deactivated
> +    """
> +    if cal_cr_list is None:
> +        colour_cals = None
> +    else:
> +        colour_cals = {}
> +        for cr, cb in zip(cal_cr_list, cal_cb_list):
> +            cr_tab = cr['table']
> +            cb_tab = cb['table']
> +            """
> +            normalise tables so min value is 1
> +            """
> +            cr_tab = cr_tab / np.min(cr_tab)
> +            cb_tab = cb_tab / np.min(cb_tab)
> +            colour_cals[cr['ct']] = [cr_tab, cb_tab]
> +
> +    """
> +    for each image, perform awb and alsc corrections.
> +    Then calculate the colour correction matrix for that image, recording the
> +    ccm and the colour tempertaure.
> +    """
> +    ccm_tab = {}
> +    for Img in imgs:
> +        Cam.log += '\nProcessing image: ' + Img.name
> +        """
> +        get macbeth patches with alsc applied if alsc enabled.
> +        Note: if alsc is disabled then colour_cals will be set to None and no
> +        the function will simply return the macbeth patches
> +        """
> +        r, b, g = get_alsc_patches(Img, colour_cals, grey=False)
> +        # 256 values for each patch of sRGB values
> +
> +        """
> +        do awb
> +        Note: awb is done by measuring the macbeth chart in the image, rather
> +        than from the awb calibration. This is done so the awb will be perfect
> +        and the ccm matrices will be more accurate.
> +        """
> +        r_greys, b_greys, g_greys = r[3::4], b[3::4], g[3::4]
> +        r_g = np.mean(r_greys / g_greys)
> +        b_g = np.mean(b_greys / g_greys)
> +        r = r / r_g
> +        b = b / b_g
> +        """
> +        normalise brightness wrt reference macbeth colours and then average
> +        each channel for each patch
> +        """
> +        gain = np.mean(m_srgb) / np.mean((r, g, b))
> +        Cam.log += '\nGain with respect to standard colours: {:.3f}'.format(gain)
> +        r = np.mean(gain * r, axis=1)
> +        b = np.mean(gain * b, axis=1)
> +        g = np.mean(gain * g, axis=1)
> +        """
> +        calculate ccm matrix
> +        """
> +        # ==== All of below should in sRGB ===##
> +        sumde = 0
> +        ccm = do_ccm(r, g, b, m_srgb)
> +        # This is the initial guess that our optimisation code works with.
> +        original_ccm = ccm
> +        r1 = ccm[0]
> +        r2 = ccm[1]
> +        g1 = ccm[3]
> +        g2 = ccm[4]
> +        b1 = ccm[6]
> +        b2 = ccm[7]
> +        '''
> +        COLOR MATRIX LOOKS AS BELOW
> +        R1 R2 R3   Rval     Outr
> +        G1 G2 G3  *  Gval  =  G
> +        B1 B2 B3   Bval     B
> +        Will be optimising 6 elements and working out the third element using 1-r1-r2 = r3
> +        '''
> +
> +        x0 = [r1, r2, g1, g2, b1, b2]
> +        '''
> +        We use our old CCM as the initial guess for the program to find the
> +        optimised matrix
> +        '''
> +        result = minimize(guess, x0, args=(r, g, b, m_lab), tol=0.01)
> +        '''
> +        This produces a color matrix which has the lowest delta E possible,
> +        based off the input data. Note it is impossible for this to reach
> +        zero since the input data is imperfect
> +        '''
> +
> +        Cam.log += ("\n \n Optimised Matrix Below: \n \n")
> +        [r1, r2, g1, g2, b1, b2] = result.x
> +        # The new, optimised color correction matrix values
> +        optimised_ccm = [r1, r2, (1 - r1 - r2), g1, g2, (1 - g1 - g2), b1, b2, (1 - b1 - b2)]
> +
> +        # This is the optimised Color Matrix (preserving greys by summing rows up to 1)
> +        Cam.log += str(optimised_ccm)
> +        Cam.log += "\n Old Color Correction Matrix Below \n"
> +        Cam.log += str(ccm)
> +
> +        formatted_ccm = np.array(original_ccm).reshape((3, 3))
> +
> +        '''
> +        below is a whole load of code that then applies the latest color
> +        matrix, and returns LAB values for color. This can then be used
> +        to calculate the final delta E
> +        '''
> +        optimised_ccm_rgb = []  # Original Color Corrected Matrix RGB / LAB
> +        optimised_ccm_lab = []
> +
> +        formatted_optimised_ccm = np.array(optimised_ccm).reshape((3, 3))
> +        after_gamma_rgb = []
> +        after_gamma_lab = []
> +
> +        for RGB in zip(r, g, b):
> +            ccm_applied_rgb = np.dot(formatted_ccm, (np.array(RGB) / 256))
> +            optimised_ccm_rgb.append(gamma(ccm_applied_rgb))
> +            optimised_ccm_lab.append(colors.RGB_to_LAB(ccm_applied_rgb))
> +
> +            optimised_ccm_applied_rgb = np.dot(formatted_optimised_ccm, np.array(RGB) / 256)
> +            after_gamma_rgb.append(gamma(optimised_ccm_applied_rgb))
> +            after_gamma_lab.append(colors.RGB_to_LAB(optimised_ccm_applied_rgb))
> +        '''
> +        Gamma After RGB / LAB - not used in calculations, only used for visualisation
> +        We now want to spit out some data that shows
> +        how the optimisation has improved the color matrices
> +        '''
> +        Cam.log += "Here are the Improvements"
> +
> +        # CALCULATE WORST CASE delta e
> +        old_worst_delta_e = 0
> +        before_average = transform_and_evaluate(formatted_ccm, r, g, b, m_lab)
> +        new_worst_delta_e = 0
> +        after_average = transform_and_evaluate(formatted_optimised_ccm, r, g, b, m_lab)
> +        for i in range(24):
> +            old_delta_e = deltae(optimised_ccm_lab[i], m_lab[i])  # Current Old Delta E
> +            new_delta_e = deltae(after_gamma_lab[i], m_lab[i])  # Current New Delta E
> +            if old_delta_e > old_worst_delta_e:
> +                old_worst_delta_e = old_delta_e
> +            if new_delta_e > new_worst_delta_e:
> +                new_worst_delta_e = new_delta_e
> +
> +        Cam.log += "Before color correction matrix was optimised, we got an average delta E of " + str(before_average) + " and a maximum delta E of " + str(old_worst_delta_e)
> +        Cam.log += "After color correction matrix was optimised, we got an average delta E of " + str(after_average) + " and a maximum delta E of " + str(new_worst_delta_e)
> +
> +        visualise_macbeth_chart(m_rgb, optimised_ccm_rgb, after_gamma_rgb, str(Img.col) + str(matrix_selection_types[typenum]))
> +        '''
> +        The program will also save some visualisations of improvements.
> +        Very pretty to look at. Top rectangle is ideal, Left square is
> +        before optimisation, right square is after.
> +        '''
> +
> +        """
> +        if a ccm has already been calculated for that temperature then don't
> +        overwrite but save both. They will then be averaged later on
> +        """  # Now going to use optimised color matrix, optimised_ccm
> +        if Img.col in ccm_tab.keys():
> +            ccm_tab[Img.col].append(optimised_ccm)
> +        else:
> +            ccm_tab[Img.col] = [optimised_ccm]
> +        Cam.log += '\n'
> +
> +    Cam.log += '\nFinished processing images'
> +    """
> +    average any ccms that share a colour temperature
> +    """
> +    for k, v in ccm_tab.items():
> +        tab = np.mean(v, axis=0)
> +        tab = np.where((10000 * tab) % 1 <= 0.05, tab + 0.00001, tab)
> +        tab = np.where((10000 * tab) % 1 >= 0.95, tab - 0.00001, tab)
> +        ccm_tab[k] = list(np.round(tab, 5))
> +        Cam.log += '\nMatrix calculated for colour temperature of {} K'.format(k)
> +
> +    """
> +    return all ccms with respective colour temperature in the correct format,
> +    sorted by their colour temperature
> +    """
> +    sorted_ccms = sorted(ccm_tab.items(), key=lambda kv: kv[0])
> +    ccms = []
> +    for i in sorted_ccms:
> +        ccms.append({
> +            'ct': i[0],
> +            'ccm': i[1]
> +        })
> +    return ccms
> +
> +
> +def guess(x0, r, g, b, m_lab):       # provides a method of numerical feedback for the optimisation code
> +    [r1, r2, g1, g2, b1, b2] = x0
> +    ccm = np.array([r1, r2, (1 - r1 - r2),
> +                    g1, g2, (1 - g1 - g2),
> +                    b1, b2, (1 - b1 - b2)]).reshape((3, 3))  # format the matrix correctly
> +    return transform_and_evaluate(ccm, r, g, b, m_lab)
> +
> +
> +def transform_and_evaluate(ccm, r, g, b, m_lab):  # Transforms colors to LAB and applies the correction matrix
> +    # create list of matrix changed colors
> +    realrgb = []
> +    for RGB in zip(r, g, b):
> +        rgb_post_ccm = np.dot(ccm, np.array(RGB) / 256)  # This is RGB values after the color correction matrix has been applied
> +        realrgb.append(colors.RGB_to_LAB(rgb_post_ccm))
> +    # now compare that with m_lab and return numeric result, averaged for each patch
> +    return (sumde(realrgb, m_lab) / 24)  # returns an average result of delta E
> +
> +
> +def sumde(listA, listB):
> +    global typenum, test_patches
> +    sumde = 0
> +    maxde = 0
> +    patchde = []  # Create array of the delta E values for each patch. useful for optimisation of certain patches
> +    for listA_item, listB_item in zip(listA, listB):
> +        if maxde < (deltae(listA_item, listB_item)):
> +            maxde = deltae(listA_item, listB_item)
> +        patchde.append(deltae(listA_item, listB_item))
> +        sumde += deltae(listA_item, listB_item)
> +    '''
> +    The different options specified at the start allow for
> +    the maximum to be returned, average or specific patches
> +    '''
> +    if typenum == 0:
> +        return sumde
> +    if typenum == 1:
> +        return maxde
> +    if typenum == 2:
> +        output = sum([patchde[test_patch] for test_patch in test_patches])
> +        # Selects only certain patches and returns the output for them
> +        return output
> +
> +
> +"""
> +calculates the ccm for an individual image.
> +ccms are calculated in rgb space, and are fit by hand. Although it is a 3x3
> +matrix, each row must add up to 1 in order to conserve greyness, simplifying
> +calculation.
> +The initial CCM is calculated in RGB, and then optimised in LAB color space
> +This simplifies the initial calculation but then gets us the accuracy of
> +using LAB color space.
> +"""
> +
> +
> +def do_ccm(r, g, b, m_srgb):
> +    rb = r-b
> +    gb = g-b
> +    rb_2s = (rb * rb)
> +    rb_gbs = (rb * gb)
> +    gb_2s = (gb * gb)
> +
> +    r_rbs = rb * (m_srgb[..., 0] - b)
> +    r_gbs = gb * (m_srgb[..., 0] - b)
> +    g_rbs = rb * (m_srgb[..., 1] - b)
> +    g_gbs = gb * (m_srgb[..., 1] - b)
> +    b_rbs = rb * (m_srgb[..., 2] - b)
> +    b_gbs = gb * (m_srgb[..., 2] - b)
> +
> +    """
> +    Obtain least squares fit
> +    """
> +    rb_2 = np.sum(rb_2s)
> +    gb_2 = np.sum(gb_2s)
> +    rb_gb = np.sum(rb_gbs)
> +    r_rb = np.sum(r_rbs)
> +    r_gb = np.sum(r_gbs)
> +    g_rb = np.sum(g_rbs)
> +    g_gb = np.sum(g_gbs)
> +    b_rb = np.sum(b_rbs)
> +    b_gb = np.sum(b_gbs)
> +
> +    det = rb_2 * gb_2 - rb_gb * rb_gb
> +
> +    """
> +    Raise error if matrix is singular...
> +    This shouldn't really happen with real data but if it does just take new
> +    pictures and try again, not much else to be done unfortunately...
> +    """
> +    if det < 0.001:
> +        raise ArithmeticError
> +
> +    r_a = (gb_2 * r_rb - rb_gb * r_gb) / det
> +    r_b = (rb_2 * r_gb - rb_gb * r_rb) / det
> +    """
> +    Last row can be calculated by knowing the sum must be 1
> +    """
> +    r_c = 1 - r_a - r_b
> +
> +    g_a = (gb_2 * g_rb - rb_gb * g_gb) / det
> +    g_b = (rb_2 * g_gb - rb_gb * g_rb) / det
> +    g_c = 1 - g_a - g_b
> +
> +    b_a = (gb_2 * b_rb - rb_gb * b_gb) / det
> +    b_b = (rb_2 * b_gb - rb_gb * b_rb) / det
> +    b_c = 1 - b_a - b_b
> +
> +    """
> +    format ccm
> +    """
> +    ccm = [r_a, r_b, r_c, g_a, g_b, g_c, b_a, b_b, b_c]
> +
> +    return ccm
> +
> +
> +def deltae(colorA, colorB):
> +    return ((colorA[0] - colorB[0]) ** 2 + (colorA[1] - colorB[1]) ** 2 + (colorA[2] - colorB[2]) ** 2) ** 0.5
> +    # return ((colorA[1]-colorB[1]) *  * 2 + (colorA[2]-colorB[2]) *  * 2) *  * 0.5
> +    # UNCOMMENT IF YOU WANT TO NEGLECT LUMINANCE FROM CALCULATION OF DELTA E
> diff --git a/utils/tuning/libtuning/ctt_colors.py b/utils/tuning/libtuning/ctt_colors.py
> new file mode 100644
> index 000000000000..cb4d236b04d7
> --- /dev/null
> +++ b/utils/tuning/libtuning/ctt_colors.py
> @@ -0,0 +1,30 @@
> +# Program to convert from RGB to LAB color space
> +def RGB_to_LAB(RGB):  # where RGB is a 1x3 array.   e.g RGB = [100, 255, 230]
> +    num = 0
> +    XYZ = [0, 0, 0]
> +    # converted all the three R, G, B to X, Y, Z
> +    X = RGB[0] * 0.4124 + RGB[1] * 0.3576 + RGB[2] * 0.1805
> +    Y = RGB[0] * 0.2126 + RGB[1] * 0.7152 + RGB[2] * 0.0722
> +    Z = RGB[0] * 0.0193 + RGB[1] * 0.1192 + RGB[2] * 0.9505
> +
> +    XYZ[0] = X / 255 * 100
> +    XYZ[1] = Y / 255 * 100  # XYZ Must be in range 0 -> 100, so scale down from 255
> +    XYZ[2] = Z / 255 * 100
> +    XYZ[0] = XYZ[0] / 95.047  # ref_X =  95.047   Observer= 2°, Illuminant= D65
> +    XYZ[1] = XYZ[1] / 100.0  # ref_Y = 100.000
> +    XYZ[2] = XYZ[2] / 108.883  # ref_Z = 108.883
> +    num = 0
> +    for value in XYZ:
> +        if value > 0.008856:
> +            value = value ** (0.3333333333333333)
> +        else:
> +            value = (7.787 * value) + (16 / 116)
> +        XYZ[num] = value
> +        num = num + 1
> +
> +    # L, A, B, values calculated below
> +    L = (116 * XYZ[1]) - 16
> +    a = 500 * (XYZ[0] - XYZ[1])
> +    b = 200 * (XYZ[1] - XYZ[2])
> +
> +    return [L, a, b]
> diff --git a/utils/tuning/libtuning/ctt_ransac.py b/utils/tuning/libtuning/ctt_ransac.py
> new file mode 100644
> index 000000000000..01bba3022ef0
> --- /dev/null
> +++ b/utils/tuning/libtuning/ctt_ransac.py
> @@ -0,0 +1,71 @@
> +# SPDX-License-Identifier: BSD-2-Clause
> +#
> +# Copyright (C) 2019, Raspberry Pi Ltd
> +#
> +# camera tuning tool RANSAC selector for Macbeth chart locator
> +
> +import numpy as np
> +
> +scale = 2
> +
> +
> +"""
> +constructs normalised macbeth chart corners for ransac algorithm
> +"""
> +def get_square_verts(c_err=0.05, scale=scale):
> +    """
> +    define macbeth chart corners
> +    """
> +    b_bord_x, b_bord_y = scale*8.5, scale*13
> +    s_bord = 6*scale
> +    side = 41*scale
> +    x_max = side*6 + 5*s_bord + 2*b_bord_x
> +    y_max = side*4 + 3*s_bord + 2*b_bord_y
> +    c1 = (0, 0)
> +    c2 = (0, y_max)
> +    c3 = (x_max, y_max)
> +    c4 = (x_max, 0)
> +    mac_norm = np.array((c1, c2, c3, c4), np.float32)
> +    mac_norm = np.array([mac_norm])
> +
> +    square_verts = []
> +    square_0 = np.array(((0, 0), (0, side),
> +                         (side, side), (side, 0)), np.float32)
> +    offset_0 = np.array((b_bord_x, b_bord_y), np.float32)
> +    c_off = side * c_err
> +    offset_cont = np.array(((c_off, c_off), (c_off, -c_off),
> +                            (-c_off, -c_off), (-c_off, c_off)), np.float32)
> +    square_0 += offset_0
> +    square_0 += offset_cont
> +    """
> +    define macbeth square corners
> +    """
> +    for i in range(6):
> +        shift_i = np.array(((i*side, 0), (i*side, 0),
> +                            (i*side, 0), (i*side, 0)), np.float32)
> +        shift_bord = np.array(((i*s_bord, 0), (i*s_bord, 0),
> +                               (i*s_bord, 0), (i*s_bord, 0)), np.float32)
> +        square_i = square_0 + shift_i + shift_bord
> +        for j in range(4):
> +            shift_j = np.array(((0, j*side), (0, j*side),
> +                                (0, j*side), (0, j*side)), np.float32)
> +            shift_bord = np.array(((0, j*s_bord),
> +                                   (0, j*s_bord), (0, j*s_bord),
> +                                   (0, j*s_bord)), np.float32)
> +            square_j = square_i + shift_j + shift_bord
> +            square_verts.append(square_j)
> +    # print('square_verts')
> +    # print(square_verts)
> +    return np.array(square_verts, np.float32), mac_norm
> +
> +
> +def get_square_centres(c_err=0.05, scale=scale):
> +    """
> +    define macbeth square centres
> +    """
> +    verts, mac_norm = get_square_verts(c_err, scale=scale)
> +
> +    centres = np.mean(verts, axis=1)
> +    # print('centres')
> +    # print(centres)
> +    return np.array(centres, np.float32)
> -- 
> 2.43.0
>

Patch
diff mbox series

diff --git a/utils/tuning/libtuning/ctt_awb.py b/utils/tuning/libtuning/ctt_awb.py
new file mode 100644
index 000000000000..5ba6f978a228
--- /dev/null
+++ b/utils/tuning/libtuning/ctt_awb.py
@@ -0,0 +1,376 @@ 
+# SPDX-License-Identifier: BSD-2-Clause
+#
+# Copyright (C) 2019, Raspberry Pi Ltd
+#
+# camera tuning tool for AWB
+
+from ctt_image_load import *
+import matplotlib.pyplot as plt
+from bisect import bisect_left
+from scipy.optimize import fmin
+
+
+"""
+obtain piecewise linear approximation for colour curve
+"""
+def awb(Cam, cal_cr_list, cal_cb_list, plot):
+    imgs = Cam.imgs
+    """
+    condense alsc calibration tables into one dictionary
+    """
+    if cal_cr_list is None:
+        colour_cals = None
+    else:
+        colour_cals = {}
+        for cr, cb in zip(cal_cr_list, cal_cb_list):
+            cr_tab = cr['table']
+            cb_tab = cb['table']
+            """
+            normalise tables so min value is 1
+            """
+            cr_tab = cr_tab/np.min(cr_tab)
+            cb_tab = cb_tab/np.min(cb_tab)
+            colour_cals[cr['ct']] = [cr_tab, cb_tab]
+    """
+    obtain data from greyscale macbeth patches
+    """
+    rb_raw = []
+    rbs_hat = []
+    for Img in imgs:
+        Cam.log += '\nProcessing '+Img.name
+        """
+        get greyscale patches with alsc applied if alsc enabled.
+        Note: if alsc is disabled then colour_cals will be set to None and the
+        function will just return the greyscale patches
+        """
+        r_patchs, b_patchs, g_patchs = get_alsc_patches(Img, colour_cals)
+        """
+        calculate ratio of r, b to g
+        """
+        r_g = np.mean(r_patchs/g_patchs)
+        b_g = np.mean(b_patchs/g_patchs)
+        Cam.log += '\n       r : {:.4f}       b : {:.4f}'.format(r_g, b_g)
+        """
+        The curve tends to be better behaved in so-called hatspace.
+        R, B, G represent the individual channels. The colour curve is plotted in
+        r, b space, where:
+            r = R/G
+            b = B/G
+        This will be referred to as dehatspace... (sorry)
+        Hatspace is defined as:
+            r_hat = R/(R+B+G)
+            b_hat = B/(R+B+G)
+        To convert from dehatspace to hastpace (hat operation):
+            r_hat = r/(1+r+b)
+            b_hat = b/(1+r+b)
+        To convert from hatspace to dehatspace (dehat operation):
+            r = r_hat/(1-r_hat-b_hat)
+            b = b_hat/(1-r_hat-b_hat)
+        Proof is left as an excercise to the reader...
+        Throughout the code, r and b are sometimes referred to as r_g and b_g
+        as a reminder that they are ratios
+        """
+        r_g_hat = r_g/(1+r_g+b_g)
+        b_g_hat = b_g/(1+r_g+b_g)
+        Cam.log += '\n   r_hat : {:.4f}   b_hat : {:.4f}'.format(r_g_hat, b_g_hat)
+        rbs_hat.append((r_g_hat, b_g_hat, Img.col))
+        rb_raw.append((r_g, b_g))
+        Cam.log += '\n'
+
+    Cam.log += '\nFinished processing images'
+    """
+    sort all lits simultaneously by r_hat
+    """
+    rbs_zip = list(zip(rbs_hat, rb_raw))
+    rbs_zip.sort(key=lambda x: x[0][0])
+    rbs_hat, rb_raw = list(zip(*rbs_zip))
+    """
+    unzip tuples ready for processing
+    """
+    rbs_hat = list(zip(*rbs_hat))
+    rb_raw = list(zip(*rb_raw))
+    """
+    fit quadratic fit to r_g hat and b_g_hat
+    """
+    a, b, c = np.polyfit(rbs_hat[0], rbs_hat[1], 2)
+    Cam.log += '\nFit quadratic curve in hatspace'
+    """
+    the algorithm now approximates the shortest distance from each point to the
+    curve in dehatspace. Since the fit is done in hatspace, it is easier to
+    find the actual shortest distance in hatspace and use the projection back
+    into dehatspace as an overestimate.
+    The distance will be used for two things:
+        1) In the case that colour temperature does not strictly decrease with
+        increasing r/g, the closest point to the line will be chosen out of an
+        increasing pair of colours.
+
+        2) To calculate transverse negative an dpositive, the maximum positive
+        and negative distance from the line are chosen. This benefits from the
+        overestimate as the transverse pos/neg are upper bound values.
+    """
+    """
+    define fit function
+    """
+    def f(x):
+        return a*x**2 + b*x + c
+    """
+    iterate over points (R, B are x and y coordinates of points) and calculate
+    distance to line in dehatspace
+    """
+    dists = []
+    for i, (R, B) in enumerate(zip(rbs_hat[0], rbs_hat[1])):
+        """
+        define function to minimise as square distance between datapoint and
+        point on curve. Squaring is monotonic so minimising radius squared is
+        equivalent to minimising radius
+        """
+        def f_min(x):
+            y = f(x)
+            return((x-R)**2+(y-B)**2)
+        """
+        perform optimisation with scipy.optmisie.fmin
+        """
+        x_hat = fmin(f_min, R, disp=0)[0]
+        y_hat = f(x_hat)
+        """
+        dehat
+        """
+        x = x_hat/(1-x_hat-y_hat)
+        y = y_hat/(1-x_hat-y_hat)
+        rr = R/(1-R-B)
+        bb = B/(1-R-B)
+        """
+        calculate euclidean distance in dehatspace
+        """
+        dist = ((x-rr)**2+(y-bb)**2)**0.5
+        """
+        return negative if point is below the fit curve
+        """
+        if (x+y) > (rr+bb):
+            dist *= -1
+        dists.append(dist)
+    Cam.log += '\nFound closest point on fit line to each point in dehatspace'
+    """
+    calculate wiggle factors in awb. 10% added since this is an upper bound
+    """
+    transverse_neg = - np.min(dists) * 1.1
+    transverse_pos = np.max(dists) * 1.1
+    Cam.log += '\nTransverse pos : {:.5f}'.format(transverse_pos)
+    Cam.log += '\nTransverse neg : {:.5f}'.format(transverse_neg)
+    """
+    set minimum transverse wiggles to 0.1 .
+    Wiggle factors dictate how far off of the curve the algorithm searches. 0.1
+    is a suitable minimum that gives better results for lighting conditions not
+    within calibration dataset. Anything less will generalise poorly.
+    """
+    if transverse_pos < 0.01:
+        transverse_pos = 0.01
+        Cam.log += '\nForced transverse pos to 0.01'
+    if transverse_neg < 0.01:
+        transverse_neg = 0.01
+        Cam.log += '\nForced transverse neg to 0.01'
+
+    """
+    generate new b_hat values at each r_hat according to fit
+    """
+    r_hat_fit = np.array(rbs_hat[0])
+    b_hat_fit = a*r_hat_fit**2 + b*r_hat_fit + c
+    """
+    transform from hatspace to dehatspace
+    """
+    r_fit = r_hat_fit/(1-r_hat_fit-b_hat_fit)
+    b_fit = b_hat_fit/(1-r_hat_fit-b_hat_fit)
+    c_fit = np.round(rbs_hat[2], 0)
+    """
+    round to 4dp
+    """
+    r_fit = np.where((1000*r_fit) % 1 <= 0.05, r_fit+0.0001, r_fit)
+    r_fit = np.where((1000*r_fit) % 1 >= 0.95, r_fit-0.0001, r_fit)
+    b_fit = np.where((1000*b_fit) % 1 <= 0.05, b_fit+0.0001, b_fit)
+    b_fit = np.where((1000*b_fit) % 1 >= 0.95, b_fit-0.0001, b_fit)
+    r_fit = np.round(r_fit, 4)
+    b_fit = np.round(b_fit, 4)
+    """
+    The following code ensures that colour temperature decreases with
+    increasing r/g
+    """
+    """
+    iterate backwards over list for easier indexing
+    """
+    i = len(c_fit) - 1
+    while i > 0:
+        if c_fit[i] > c_fit[i-1]:
+            Cam.log += '\nColour temperature increase found\n'
+            Cam.log += '{} K at r = {} to '.format(c_fit[i-1], r_fit[i-1])
+            Cam.log += '{} K at r = {}'.format(c_fit[i], r_fit[i])
+            """
+            if colour temperature increases then discard point furthest from
+            the transformed fit (dehatspace)
+            """
+            error_1 = abs(dists[i-1])
+            error_2 = abs(dists[i])
+            Cam.log += '\nDistances from fit:\n'
+            Cam.log += '{} K : {:.5f} , '.format(c_fit[i], error_1)
+            Cam.log += '{} K : {:.5f}'.format(c_fit[i-1], error_2)
+            """
+            find bad index
+            note that in python false = 0 and true = 1
+            """
+            bad = i - (error_1 < error_2)
+            Cam.log += '\nPoint at {} K deleted as '.format(c_fit[bad])
+            Cam.log += 'it is furthest from fit'
+            """
+            delete bad point
+            """
+            r_fit = np.delete(r_fit, bad)
+            b_fit = np.delete(b_fit, bad)
+            c_fit = np.delete(c_fit, bad).astype(np.uint16)
+        """
+        note that if a point has been discarded then the length has decreased
+        by one, meaning that decreasing the index by one will reassess the kept
+        point against the next point. It is therefore possible, in theory, for
+        two adjacent points to be discarded, although probably rare
+        """
+        i -= 1
+
+    """
+    return formatted ct curve, ordered by increasing colour temperature
+    """
+    ct_curve = list(np.array(list(zip(b_fit, r_fit, c_fit))).flatten())[::-1]
+    Cam.log += '\nFinal CT curve:'
+    for i in range(len(ct_curve)//3):
+        j = 3*i
+        Cam.log += '\n  ct: {}  '.format(ct_curve[j])
+        Cam.log += '  r: {}  '.format(ct_curve[j+1])
+        Cam.log += '  b: {}  '.format(ct_curve[j+2])
+
+    """
+    plotting code for debug
+    """
+    if plot:
+        x = np.linspace(np.min(rbs_hat[0]), np.max(rbs_hat[0]), 100)
+        y = a*x**2 + b*x + c
+        plt.subplot(2, 1, 1)
+        plt.title('hatspace')
+        plt.plot(rbs_hat[0], rbs_hat[1], ls='--', color='blue')
+        plt.plot(x, y, color='green', ls='-')
+        plt.scatter(rbs_hat[0], rbs_hat[1], color='red')
+        for i, ct in enumerate(rbs_hat[2]):
+            plt.annotate(str(ct), (rbs_hat[0][i], rbs_hat[1][i]))
+        plt.xlabel('$\\hat{r}$')
+        plt.ylabel('$\\hat{b}$')
+        """
+        optional set axes equal to shortest distance so line really does
+        looks perpendicular and everybody is happy
+        """
+        # ax = plt.gca()
+        # ax.set_aspect('equal')
+        plt.grid()
+        plt.subplot(2, 1, 2)
+        plt.title('dehatspace - indoors?')
+        plt.plot(r_fit, b_fit, color='blue')
+        plt.scatter(rb_raw[0], rb_raw[1], color='green')
+        plt.scatter(r_fit, b_fit, color='red')
+        for i, ct in enumerate(c_fit):
+            plt.annotate(str(ct), (r_fit[i], b_fit[i]))
+        plt.xlabel('$r$')
+        plt.ylabel('$b$')
+        """
+        optional set axes equal to shortest distance so line really does
+        looks perpendicular and everybody is happy
+        """
+        # ax = plt.gca()
+        # ax.set_aspect('equal')
+        plt.subplots_adjust(hspace=0.5)
+        plt.grid()
+        plt.show()
+    """
+    end of plotting code
+    """
+    return(ct_curve, np.round(transverse_pos, 5), np.round(transverse_neg, 5))
+
+
+"""
+obtain greyscale patches and perform alsc colour correction
+"""
+def get_alsc_patches(Img, colour_cals, grey=True):
+    """
+    get patch centre coordinates, image colour and the actual
+    patches for each channel, remembering to subtract blacklevel
+    If grey then only greyscale patches considered
+    """
+    if grey:
+        cen_coords = Img.cen_coords[3::4]
+        col = Img.col
+        patches = [np.array(Img.patches[i]) for i in Img.order]
+        r_patchs = patches[0][3::4] - Img.blacklevel_16
+        b_patchs = patches[3][3::4] - Img.blacklevel_16
+        """
+        note two green channels are averages
+        """
+        g_patchs = (patches[1][3::4]+patches[2][3::4])/2 - Img.blacklevel_16
+    else:
+        cen_coords = Img.cen_coords
+        col = Img.col
+        patches = [np.array(Img.patches[i]) for i in Img.order]
+        r_patchs = patches[0] - Img.blacklevel_16
+        b_patchs = patches[3] - Img.blacklevel_16
+        g_patchs = (patches[1]+patches[2])/2 - Img.blacklevel_16
+
+    if colour_cals is None:
+        return r_patchs, b_patchs, g_patchs
+    """
+    find where image colour fits in alsc colour calibration tables
+    """
+    cts = list(colour_cals.keys())
+    pos = bisect_left(cts, col)
+    """
+    if img colour is below minimum or above maximum alsc calibration colour, simply
+    pick extreme closest to img colour
+    """
+    if pos % len(cts) == 0:
+        """
+        this works because -0 = 0 = first and -1 = last index
+        """
+        col_tabs = np.array(colour_cals[cts[-pos//len(cts)]])
+        """
+    else, perform linear interpolation between existing alsc colour
+    calibration tables
+    """
+    else:
+        bef = cts[pos-1]
+        aft = cts[pos]
+        da = col-bef
+        db = aft-col
+        bef_tabs = np.array(colour_cals[bef])
+        aft_tabs = np.array(colour_cals[aft])
+        col_tabs = (bef_tabs*db + aft_tabs*da)/(da+db)
+    col_tabs = np.reshape(col_tabs, (2, 12, 16))
+    """
+    calculate dx, dy used to calculate alsc table
+    """
+    w, h = Img.w/2, Img.h/2
+    dx, dy = int(-(-(w-1)//16)), int(-(-(h-1)//12))
+    """
+    make list of pairs of gains for each patch by selecting the correct value
+    in alsc colour calibration table
+    """
+    patch_gains = []
+    for cen in cen_coords:
+        x, y = cen[0]//dx, cen[1]//dy
+        # We could probably do with some better spatial interpolation here?
+        col_gains = (col_tabs[0][y][x], col_tabs[1][y][x])
+        patch_gains.append(col_gains)
+
+    """
+    multiply the r and b channels in each patch by the respective gain, finally
+    performing the alsc colour correction
+    """
+    for i, gains in enumerate(patch_gains):
+        r_patchs[i] = r_patchs[i] * gains[0]
+        b_patchs[i] = b_patchs[i] * gains[1]
+
+    """
+    return greyscale patches, g channel and correct r, b channels
+    """
+    return r_patchs, b_patchs, g_patchs
diff --git a/utils/tuning/libtuning/ctt_ccm.py b/utils/tuning/libtuning/ctt_ccm.py
new file mode 100644
index 000000000000..59753e332ee9
--- /dev/null
+++ b/utils/tuning/libtuning/ctt_ccm.py
@@ -0,0 +1,406 @@ 
+# SPDX-License-Identifier: BSD-2-Clause
+#
+# Copyright (C) 2019, Raspberry Pi Ltd
+#
+# camera tuning tool for CCM (colour correction matrix)
+
+from ctt_image_load import *
+from ctt_awb import get_alsc_patches
+import colors
+from scipy.optimize import minimize
+from ctt_visualise import visualise_macbeth_chart
+import numpy as np
+"""
+takes 8-bit macbeth chart values, degammas and returns 16 bit
+"""
+
+'''
+This program has many options from which to derive the color matrix from.
+The first is average. This minimises the average delta E across all patches of
+the macbeth chart. Testing across all cameras yeilded this as the most color
+accurate and vivid. Other options are avalible however.
+Maximum minimises the maximum Delta E of the patches. It iterates through till
+a minimum maximum is found (so that there is
+not one patch that deviates wildly.)
+This yields generally good results but overall the colors are less accurate
+Have a fiddle with maximum and see what you think.
+The final option allows you to select the patches for which to average across.
+This means that you can bias certain patches, for instance if you want the
+reds to be more accurate.
+'''
+
+matrix_selection_types = ["average", "maximum", "patches"]
+typenum = 0  # select from array above, 0 = average, 1 = maximum, 2 = patches
+test_patches = [1, 2, 5, 8, 9, 12, 14]
+
+'''
+Enter patches to test for. Can also be entered twice if you
+would like twice as much bias on one patch.
+'''
+
+
+def degamma(x):
+    x = x / ((2 ** 8) - 1)  # takes 255 and scales it down to one
+    x = np.where(x < 0.04045, x / 12.92, ((x + 0.055) / 1.055) ** 2.4)
+    x = x * ((2 ** 16) - 1)  # takes one and scales up to 65535, 16 bit color
+    return x
+
+
+def gamma(x):
+    # Take 3 long array of color values and gamma them
+    return [((colour / 255) ** (1 / 2.4) * 1.055 - 0.055) * 255 for colour in x]
+
+
+"""
+FInds colour correction matrices for list of images
+"""
+
+
+def ccm(Cam, cal_cr_list, cal_cb_list):
+    global matrix_selection_types, typenum
+    imgs = Cam.imgs
+    """
+    standard macbeth chart colour values
+    """
+    m_rgb = np.array([  # these are in RGB
+        [116, 81, 67],    # dark skin
+        [199, 147, 129],  # light skin
+        [91, 122, 156],   # blue sky
+        [90, 108, 64],    # foliage
+        [130, 128, 176],  # blue flower
+        [92, 190, 172],   # bluish green
+        [224, 124, 47],   # orange
+        [68, 91, 170],    # purplish blue
+        [198, 82, 97],    # moderate red
+        [94, 58, 106],    # purple
+        [159, 189, 63],   # yellow green
+        [230, 162, 39],   # orange yellow
+        [35, 63, 147],    # blue
+        [67, 149, 74],    # green
+        [180, 49, 57],    # red
+        [238, 198, 20],   # yellow
+        [193, 84, 151],   # magenta
+        [0, 136, 170],    # cyan (goes out of gamut)
+        [245, 245, 243],  # white 9.5
+        [200, 202, 202],  # neutral 8
+        [161, 163, 163],  # neutral 6.5
+        [121, 121, 122],  # neutral 5
+        [82, 84, 86],     # neutral 3.5
+        [49, 49, 51]      # black 2
+    ])
+    """
+    convert reference colours from srgb to rgb
+    """
+    m_srgb = degamma(m_rgb)  # now in 16 bit color.
+
+    # Produce array of LAB values for ideal color chart
+    m_lab = [colors.RGB_to_LAB(color / 256) for color in m_srgb]
+
+    """
+    reorder reference values to match how patches are ordered
+    """
+    m_srgb = np.array([m_srgb[i::6] for i in range(6)]).reshape((24, 3))
+    m_lab = np.array([m_lab[i::6] for i in range(6)]).reshape((24, 3))
+    m_rgb = np.array([m_rgb[i::6] for i in range(6)]).reshape((24, 3))
+    """
+    reformat alsc correction tables or set colour_cals to None if alsc is
+    deactivated
+    """
+    if cal_cr_list is None:
+        colour_cals = None
+    else:
+        colour_cals = {}
+        for cr, cb in zip(cal_cr_list, cal_cb_list):
+            cr_tab = cr['table']
+            cb_tab = cb['table']
+            """
+            normalise tables so min value is 1
+            """
+            cr_tab = cr_tab / np.min(cr_tab)
+            cb_tab = cb_tab / np.min(cb_tab)
+            colour_cals[cr['ct']] = [cr_tab, cb_tab]
+
+    """
+    for each image, perform awb and alsc corrections.
+    Then calculate the colour correction matrix for that image, recording the
+    ccm and the colour tempertaure.
+    """
+    ccm_tab = {}
+    for Img in imgs:
+        Cam.log += '\nProcessing image: ' + Img.name
+        """
+        get macbeth patches with alsc applied if alsc enabled.
+        Note: if alsc is disabled then colour_cals will be set to None and no
+        the function will simply return the macbeth patches
+        """
+        r, b, g = get_alsc_patches(Img, colour_cals, grey=False)
+        # 256 values for each patch of sRGB values
+
+        """
+        do awb
+        Note: awb is done by measuring the macbeth chart in the image, rather
+        than from the awb calibration. This is done so the awb will be perfect
+        and the ccm matrices will be more accurate.
+        """
+        r_greys, b_greys, g_greys = r[3::4], b[3::4], g[3::4]
+        r_g = np.mean(r_greys / g_greys)
+        b_g = np.mean(b_greys / g_greys)
+        r = r / r_g
+        b = b / b_g
+        """
+        normalise brightness wrt reference macbeth colours and then average
+        each channel for each patch
+        """
+        gain = np.mean(m_srgb) / np.mean((r, g, b))
+        Cam.log += '\nGain with respect to standard colours: {:.3f}'.format(gain)
+        r = np.mean(gain * r, axis=1)
+        b = np.mean(gain * b, axis=1)
+        g = np.mean(gain * g, axis=1)
+        """
+        calculate ccm matrix
+        """
+        # ==== All of below should in sRGB ===##
+        sumde = 0
+        ccm = do_ccm(r, g, b, m_srgb)
+        # This is the initial guess that our optimisation code works with.
+        original_ccm = ccm
+        r1 = ccm[0]
+        r2 = ccm[1]
+        g1 = ccm[3]
+        g2 = ccm[4]
+        b1 = ccm[6]
+        b2 = ccm[7]
+        '''
+        COLOR MATRIX LOOKS AS BELOW
+        R1 R2 R3   Rval     Outr
+        G1 G2 G3  *  Gval  =  G
+        B1 B2 B3   Bval     B
+        Will be optimising 6 elements and working out the third element using 1-r1-r2 = r3
+        '''
+
+        x0 = [r1, r2, g1, g2, b1, b2]
+        '''
+        We use our old CCM as the initial guess for the program to find the
+        optimised matrix
+        '''
+        result = minimize(guess, x0, args=(r, g, b, m_lab), tol=0.01)
+        '''
+        This produces a color matrix which has the lowest delta E possible,
+        based off the input data. Note it is impossible for this to reach
+        zero since the input data is imperfect
+        '''
+
+        Cam.log += ("\n \n Optimised Matrix Below: \n \n")
+        [r1, r2, g1, g2, b1, b2] = result.x
+        # The new, optimised color correction matrix values
+        optimised_ccm = [r1, r2, (1 - r1 - r2), g1, g2, (1 - g1 - g2), b1, b2, (1 - b1 - b2)]
+
+        # This is the optimised Color Matrix (preserving greys by summing rows up to 1)
+        Cam.log += str(optimised_ccm)
+        Cam.log += "\n Old Color Correction Matrix Below \n"
+        Cam.log += str(ccm)
+
+        formatted_ccm = np.array(original_ccm).reshape((3, 3))
+
+        '''
+        below is a whole load of code that then applies the latest color
+        matrix, and returns LAB values for color. This can then be used
+        to calculate the final delta E
+        '''
+        optimised_ccm_rgb = []  # Original Color Corrected Matrix RGB / LAB
+        optimised_ccm_lab = []
+
+        formatted_optimised_ccm = np.array(optimised_ccm).reshape((3, 3))
+        after_gamma_rgb = []
+        after_gamma_lab = []
+
+        for RGB in zip(r, g, b):
+            ccm_applied_rgb = np.dot(formatted_ccm, (np.array(RGB) / 256))
+            optimised_ccm_rgb.append(gamma(ccm_applied_rgb))
+            optimised_ccm_lab.append(colors.RGB_to_LAB(ccm_applied_rgb))
+
+            optimised_ccm_applied_rgb = np.dot(formatted_optimised_ccm, np.array(RGB) / 256)
+            after_gamma_rgb.append(gamma(optimised_ccm_applied_rgb))
+            after_gamma_lab.append(colors.RGB_to_LAB(optimised_ccm_applied_rgb))
+        '''
+        Gamma After RGB / LAB - not used in calculations, only used for visualisation
+        We now want to spit out some data that shows
+        how the optimisation has improved the color matrices
+        '''
+        Cam.log += "Here are the Improvements"
+
+        # CALCULATE WORST CASE delta e
+        old_worst_delta_e = 0
+        before_average = transform_and_evaluate(formatted_ccm, r, g, b, m_lab)
+        new_worst_delta_e = 0
+        after_average = transform_and_evaluate(formatted_optimised_ccm, r, g, b, m_lab)
+        for i in range(24):
+            old_delta_e = deltae(optimised_ccm_lab[i], m_lab[i])  # Current Old Delta E
+            new_delta_e = deltae(after_gamma_lab[i], m_lab[i])  # Current New Delta E
+            if old_delta_e > old_worst_delta_e:
+                old_worst_delta_e = old_delta_e
+            if new_delta_e > new_worst_delta_e:
+                new_worst_delta_e = new_delta_e
+
+        Cam.log += "Before color correction matrix was optimised, we got an average delta E of " + str(before_average) + " and a maximum delta E of " + str(old_worst_delta_e)
+        Cam.log += "After color correction matrix was optimised, we got an average delta E of " + str(after_average) + " and a maximum delta E of " + str(new_worst_delta_e)
+
+        visualise_macbeth_chart(m_rgb, optimised_ccm_rgb, after_gamma_rgb, str(Img.col) + str(matrix_selection_types[typenum]))
+        '''
+        The program will also save some visualisations of improvements.
+        Very pretty to look at. Top rectangle is ideal, Left square is
+        before optimisation, right square is after.
+        '''
+
+        """
+        if a ccm has already been calculated for that temperature then don't
+        overwrite but save both. They will then be averaged later on
+        """  # Now going to use optimised color matrix, optimised_ccm
+        if Img.col in ccm_tab.keys():
+            ccm_tab[Img.col].append(optimised_ccm)
+        else:
+            ccm_tab[Img.col] = [optimised_ccm]
+        Cam.log += '\n'
+
+    Cam.log += '\nFinished processing images'
+    """
+    average any ccms that share a colour temperature
+    """
+    for k, v in ccm_tab.items():
+        tab = np.mean(v, axis=0)
+        tab = np.where((10000 * tab) % 1 <= 0.05, tab + 0.00001, tab)
+        tab = np.where((10000 * tab) % 1 >= 0.95, tab - 0.00001, tab)
+        ccm_tab[k] = list(np.round(tab, 5))
+        Cam.log += '\nMatrix calculated for colour temperature of {} K'.format(k)
+
+    """
+    return all ccms with respective colour temperature in the correct format,
+    sorted by their colour temperature
+    """
+    sorted_ccms = sorted(ccm_tab.items(), key=lambda kv: kv[0])
+    ccms = []
+    for i in sorted_ccms:
+        ccms.append({
+            'ct': i[0],
+            'ccm': i[1]
+        })
+    return ccms
+
+
+def guess(x0, r, g, b, m_lab):       # provides a method of numerical feedback for the optimisation code
+    [r1, r2, g1, g2, b1, b2] = x0
+    ccm = np.array([r1, r2, (1 - r1 - r2),
+                    g1, g2, (1 - g1 - g2),
+                    b1, b2, (1 - b1 - b2)]).reshape((3, 3))  # format the matrix correctly
+    return transform_and_evaluate(ccm, r, g, b, m_lab)
+
+
+def transform_and_evaluate(ccm, r, g, b, m_lab):  # Transforms colors to LAB and applies the correction matrix
+    # create list of matrix changed colors
+    realrgb = []
+    for RGB in zip(r, g, b):
+        rgb_post_ccm = np.dot(ccm, np.array(RGB) / 256)  # This is RGB values after the color correction matrix has been applied
+        realrgb.append(colors.RGB_to_LAB(rgb_post_ccm))
+    # now compare that with m_lab and return numeric result, averaged for each patch
+    return (sumde(realrgb, m_lab) / 24)  # returns an average result of delta E
+
+
+def sumde(listA, listB):
+    global typenum, test_patches
+    sumde = 0
+    maxde = 0
+    patchde = []  # Create array of the delta E values for each patch. useful for optimisation of certain patches
+    for listA_item, listB_item in zip(listA, listB):
+        if maxde < (deltae(listA_item, listB_item)):
+            maxde = deltae(listA_item, listB_item)
+        patchde.append(deltae(listA_item, listB_item))
+        sumde += deltae(listA_item, listB_item)
+    '''
+    The different options specified at the start allow for
+    the maximum to be returned, average or specific patches
+    '''
+    if typenum == 0:
+        return sumde
+    if typenum == 1:
+        return maxde
+    if typenum == 2:
+        output = sum([patchde[test_patch] for test_patch in test_patches])
+        # Selects only certain patches and returns the output for them
+        return output
+
+
+"""
+calculates the ccm for an individual image.
+ccms are calculated in rgb space, and are fit by hand. Although it is a 3x3
+matrix, each row must add up to 1 in order to conserve greyness, simplifying
+calculation.
+The initial CCM is calculated in RGB, and then optimised in LAB color space
+This simplifies the initial calculation but then gets us the accuracy of
+using LAB color space.
+"""
+
+
+def do_ccm(r, g, b, m_srgb):
+    rb = r-b
+    gb = g-b
+    rb_2s = (rb * rb)
+    rb_gbs = (rb * gb)
+    gb_2s = (gb * gb)
+
+    r_rbs = rb * (m_srgb[..., 0] - b)
+    r_gbs = gb * (m_srgb[..., 0] - b)
+    g_rbs = rb * (m_srgb[..., 1] - b)
+    g_gbs = gb * (m_srgb[..., 1] - b)
+    b_rbs = rb * (m_srgb[..., 2] - b)
+    b_gbs = gb * (m_srgb[..., 2] - b)
+
+    """
+    Obtain least squares fit
+    """
+    rb_2 = np.sum(rb_2s)
+    gb_2 = np.sum(gb_2s)
+    rb_gb = np.sum(rb_gbs)
+    r_rb = np.sum(r_rbs)
+    r_gb = np.sum(r_gbs)
+    g_rb = np.sum(g_rbs)
+    g_gb = np.sum(g_gbs)
+    b_rb = np.sum(b_rbs)
+    b_gb = np.sum(b_gbs)
+
+    det = rb_2 * gb_2 - rb_gb * rb_gb
+
+    """
+    Raise error if matrix is singular...
+    This shouldn't really happen with real data but if it does just take new
+    pictures and try again, not much else to be done unfortunately...
+    """
+    if det < 0.001:
+        raise ArithmeticError
+
+    r_a = (gb_2 * r_rb - rb_gb * r_gb) / det
+    r_b = (rb_2 * r_gb - rb_gb * r_rb) / det
+    """
+    Last row can be calculated by knowing the sum must be 1
+    """
+    r_c = 1 - r_a - r_b
+
+    g_a = (gb_2 * g_rb - rb_gb * g_gb) / det
+    g_b = (rb_2 * g_gb - rb_gb * g_rb) / det
+    g_c = 1 - g_a - g_b
+
+    b_a = (gb_2 * b_rb - rb_gb * b_gb) / det
+    b_b = (rb_2 * b_gb - rb_gb * b_rb) / det
+    b_c = 1 - b_a - b_b
+
+    """
+    format ccm
+    """
+    ccm = [r_a, r_b, r_c, g_a, g_b, g_c, b_a, b_b, b_c]
+
+    return ccm
+
+
+def deltae(colorA, colorB):
+    return ((colorA[0] - colorB[0]) ** 2 + (colorA[1] - colorB[1]) ** 2 + (colorA[2] - colorB[2]) ** 2) ** 0.5
+    # return ((colorA[1]-colorB[1]) *  * 2 + (colorA[2]-colorB[2]) *  * 2) *  * 0.5
+    # UNCOMMENT IF YOU WANT TO NEGLECT LUMINANCE FROM CALCULATION OF DELTA E
diff --git a/utils/tuning/libtuning/ctt_colors.py b/utils/tuning/libtuning/ctt_colors.py
new file mode 100644
index 000000000000..cb4d236b04d7
--- /dev/null
+++ b/utils/tuning/libtuning/ctt_colors.py
@@ -0,0 +1,30 @@ 
+# Program to convert from RGB to LAB color space
+def RGB_to_LAB(RGB):  # where RGB is a 1x3 array.   e.g RGB = [100, 255, 230]
+    num = 0
+    XYZ = [0, 0, 0]
+    # converted all the three R, G, B to X, Y, Z
+    X = RGB[0] * 0.4124 + RGB[1] * 0.3576 + RGB[2] * 0.1805
+    Y = RGB[0] * 0.2126 + RGB[1] * 0.7152 + RGB[2] * 0.0722
+    Z = RGB[0] * 0.0193 + RGB[1] * 0.1192 + RGB[2] * 0.9505
+
+    XYZ[0] = X / 255 * 100
+    XYZ[1] = Y / 255 * 100  # XYZ Must be in range 0 -> 100, so scale down from 255
+    XYZ[2] = Z / 255 * 100
+    XYZ[0] = XYZ[0] / 95.047  # ref_X =  95.047   Observer= 2°, Illuminant= D65
+    XYZ[1] = XYZ[1] / 100.0  # ref_Y = 100.000
+    XYZ[2] = XYZ[2] / 108.883  # ref_Z = 108.883
+    num = 0
+    for value in XYZ:
+        if value > 0.008856:
+            value = value ** (0.3333333333333333)
+        else:
+            value = (7.787 * value) + (16 / 116)
+        XYZ[num] = value
+        num = num + 1
+
+    # L, A, B, values calculated below
+    L = (116 * XYZ[1]) - 16
+    a = 500 * (XYZ[0] - XYZ[1])
+    b = 200 * (XYZ[1] - XYZ[2])
+
+    return [L, a, b]
diff --git a/utils/tuning/libtuning/ctt_ransac.py b/utils/tuning/libtuning/ctt_ransac.py
new file mode 100644
index 000000000000..01bba3022ef0
--- /dev/null
+++ b/utils/tuning/libtuning/ctt_ransac.py
@@ -0,0 +1,71 @@ 
+# SPDX-License-Identifier: BSD-2-Clause
+#
+# Copyright (C) 2019, Raspberry Pi Ltd
+#
+# camera tuning tool RANSAC selector for Macbeth chart locator
+
+import numpy as np
+
+scale = 2
+
+
+"""
+constructs normalised macbeth chart corners for ransac algorithm
+"""
+def get_square_verts(c_err=0.05, scale=scale):
+    """
+    define macbeth chart corners
+    """
+    b_bord_x, b_bord_y = scale*8.5, scale*13
+    s_bord = 6*scale
+    side = 41*scale
+    x_max = side*6 + 5*s_bord + 2*b_bord_x
+    y_max = side*4 + 3*s_bord + 2*b_bord_y
+    c1 = (0, 0)
+    c2 = (0, y_max)
+    c3 = (x_max, y_max)
+    c4 = (x_max, 0)
+    mac_norm = np.array((c1, c2, c3, c4), np.float32)
+    mac_norm = np.array([mac_norm])
+
+    square_verts = []
+    square_0 = np.array(((0, 0), (0, side),
+                         (side, side), (side, 0)), np.float32)
+    offset_0 = np.array((b_bord_x, b_bord_y), np.float32)
+    c_off = side * c_err
+    offset_cont = np.array(((c_off, c_off), (c_off, -c_off),
+                            (-c_off, -c_off), (-c_off, c_off)), np.float32)
+    square_0 += offset_0
+    square_0 += offset_cont
+    """
+    define macbeth square corners
+    """
+    for i in range(6):
+        shift_i = np.array(((i*side, 0), (i*side, 0),
+                            (i*side, 0), (i*side, 0)), np.float32)
+        shift_bord = np.array(((i*s_bord, 0), (i*s_bord, 0),
+                               (i*s_bord, 0), (i*s_bord, 0)), np.float32)
+        square_i = square_0 + shift_i + shift_bord
+        for j in range(4):
+            shift_j = np.array(((0, j*side), (0, j*side),
+                                (0, j*side), (0, j*side)), np.float32)
+            shift_bord = np.array(((0, j*s_bord),
+                                   (0, j*s_bord), (0, j*s_bord),
+                                   (0, j*s_bord)), np.float32)
+            square_j = square_i + shift_j + shift_bord
+            square_verts.append(square_j)
+    # print('square_verts')
+    # print(square_verts)
+    return np.array(square_verts, np.float32), mac_norm
+
+
+def get_square_centres(c_err=0.05, scale=scale):
+    """
+    define macbeth square centres
+    """
+    verts, mac_norm = get_square_verts(c_err, scale=scale)
+
+    centres = np.mean(verts, axis=1)
+    # print('centres')
+    # print(centres)
+    return np.array(centres, np.float32)