[2/6] utils: raspberrypi: ctt: Added CAC support to the CTT
diff mbox series

Message ID 20240606101512.375178-3-david.plowman@raspberrypi.com
State Accepted
Headers show
Series
  • Raspberry Pi Camera Tuning Tool updates
Related show

Commit Message

David Plowman June 6, 2024, 10:15 a.m. UTC
From: Ben Benson <benbenson2004@gmail.com>

Added the ability to tune the chromatic aberration correction
within the ctt. There are options for cac_only or to tune as part
of a larger tuning process. CTT will now recognise any files that
begin with "cac" as being chromatic aberration tuning files.

Signed-off-by: Ben Benson <ben.benson@raspberrypi.com>
Reviewed-by: Naushir Patuck <naush@raspberrypi.com>
---
 utils/raspberrypi/ctt/alsc_pisp.py            |   2 +-
 utils/raspberrypi/ctt/cac_only.py             | 143 +++++++++++
 utils/raspberrypi/ctt/ctt_cac.py              | 228 ++++++++++++++++++
 utils/raspberrypi/ctt/ctt_dots_locator.py     | 118 +++++++++
 utils/raspberrypi/ctt/ctt_image_load.py       |   2 +
 utils/raspberrypi/ctt/ctt_log.txt             |  31 +++
 utils/raspberrypi/ctt/ctt_pisp.py             |   2 +
 .../raspberrypi/ctt/ctt_pretty_print_json.py  |   4 +
 utils/raspberrypi/ctt/ctt_run.py              |  85 ++++++-
 9 files changed, 606 insertions(+), 9 deletions(-)
 create mode 100644 utils/raspberrypi/ctt/cac_only.py
 create mode 100644 utils/raspberrypi/ctt/ctt_cac.py
 create mode 100644 utils/raspberrypi/ctt/ctt_dots_locator.py
 create mode 100644 utils/raspberrypi/ctt/ctt_log.txt

Comments

Kieran Bingham June 13, 2024, 9:54 a.m. UTC | #1
Quoting David Plowman (2024-06-06 11:15:08)
> From: Ben Benson <benbenson2004@gmail.com>
> 
> Added the ability to tune the chromatic aberration correction
> within the ctt. There are options for cac_only or to tune as part
> of a larger tuning process. CTT will now recognise any files that
> begin with "cac" as being chromatic aberration tuning files.
> 
> Signed-off-by: Ben Benson <ben.benson@raspberrypi.com>

This and the successive patch was rejected by our push commit hooks as
the author (From: Ben Benson <benbenson2004@gmail.com>) has not signed
off on the commit. However, rather conveniently ... "Ben Benson
<ben.benson@raspberrypi.com>" has.

I'll take the liberty to re-author the commits under the identity "Ben
Benson <ben.benson@raspberrypi.com>" as I believe that's the original
intent here.

--
Kieran


> Reviewed-by: Naushir Patuck <naush@raspberrypi.com>
> ---
>  utils/raspberrypi/ctt/alsc_pisp.py            |   2 +-
>  utils/raspberrypi/ctt/cac_only.py             | 143 +++++++++++
>  utils/raspberrypi/ctt/ctt_cac.py              | 228 ++++++++++++++++++
>  utils/raspberrypi/ctt/ctt_dots_locator.py     | 118 +++++++++
>  utils/raspberrypi/ctt/ctt_image_load.py       |   2 +
>  utils/raspberrypi/ctt/ctt_log.txt             |  31 +++
>  utils/raspberrypi/ctt/ctt_pisp.py             |   2 +
>  .../raspberrypi/ctt/ctt_pretty_print_json.py  |   4 +
>  utils/raspberrypi/ctt/ctt_run.py              |  85 ++++++-
>  9 files changed, 606 insertions(+), 9 deletions(-)
>  create mode 100644 utils/raspberrypi/ctt/cac_only.py
>  create mode 100644 utils/raspberrypi/ctt/ctt_cac.py
>  create mode 100644 utils/raspberrypi/ctt/ctt_dots_locator.py
>  create mode 100644 utils/raspberrypi/ctt/ctt_log.txt
> 
> diff --git a/utils/raspberrypi/ctt/alsc_pisp.py b/utils/raspberrypi/ctt/alsc_pisp.py
> index 499aecd1..d0034ae1 100755
> --- a/utils/raspberrypi/ctt/alsc_pisp.py
> +++ b/utils/raspberrypi/ctt/alsc_pisp.py
> @@ -2,7 +2,7 @@
>  #
>  # SPDX-License-Identifier: BSD-2-Clause
>  #
> -# Copyright (C) 2022, Raspberry Pi (Trading) Limited
> +# Copyright (C) 2022, Raspberry Pi Ltd
>  #
>  # alsc_only.py - alsc tuning tool
>  
> diff --git a/utils/raspberrypi/ctt/cac_only.py b/utils/raspberrypi/ctt/cac_only.py
> new file mode 100644
> index 00000000..2bb11ccc
> --- /dev/null
> +++ b/utils/raspberrypi/ctt/cac_only.py
> @@ -0,0 +1,143 @@
> +#!/usr/bin/env python3
> +#
> +# SPDX-License-Identifier: BSD-2-Clause
> +#
> +# Copyright (C) 2023, Raspberry Pi (Trading) Limited
> +#
> +# cac_only.py - cac tuning tool
> +
> +
> +# This file allows you to tune only the chromatic aberration correction
> +# Specify any number of files in the command line args, and it shall iterate through
> +# and generate an averaged cac table from all the input images, which you can then
> +# input into your tuning file.
> +
> +# Takes .dng files produced by the camera modules of the dots grid and calculates the chromatic abberation of each dot.
> +# Then takes each dot, and works out where it was in the image, and uses that to output a tables of the shifts
> +# across the whole image.
> +
> +from PIL import Image
> +import numpy as np
> +import rawpy
> +import sys
> +import getopt
> +
> +from ctt_cac import *
> +
> +
> +def cac(filelist, output_filepath, plot_results=False):
> +    np.set_printoptions(precision=3)
> +    np.set_printoptions(suppress=True)
> +
> +    # Create arrays to hold all the dots data and their colour offsets
> +    red_shift = []  # Format is: [[Dot Center X, Dot Center Y, x shift, y shift]]
> +    blue_shift = []
> +    # Iterate through the files
> +    # Multiple files is reccomended to average out the lens aberration through rotations
> +    for file in filelist:
> +        print("\n Processing file " + str(file))
> +        # Read the raw RGB values from the .dng file
> +        with rawpy.imread(file) as raw:
> +            rgb = raw.postprocess()
> +            sizes = (raw.sizes)
> +
> +        image_size = [sizes[2], sizes[3]]  # Image size, X, Y
> +        # Create a colour copy of the RGB values to use later in the calibration
> +        imout = Image.new(mode="RGB", size=image_size)
> +        rgb_image = np.array(imout)
> +        # The rgb values need reshaping from a 1d array to a 3d array to be worked with easily
> +        rgb.reshape((image_size[0], image_size[1], 3))
> +        rgb_image = rgb
> +
> +        # Pass the RGB image through to the dots locating program
> +        # Returns an array of the dots (colour rectangles around the dots), and an array of their locations
> +        print("Finding dots")
> +        dots, dots_locations = find_dots_locations(rgb_image)
> +
> +        # Now, analyse each dot. Work out the centroid of each colour channel, and use that to work out
> +        # by how far the chromatic aberration has shifted each channel
> +        print('Dots found: ' + str(len(dots)))
> +
> +        for dot, dot_location in zip(dots, dots_locations):
> +            if len(dot) > 0:
> +                if (dot_location[0] > 0) and (dot_location[1] > 0):
> +                    ret = analyse_dot(dot, dot_location)
> +                    red_shift.append(ret[0])
> +                    blue_shift.append(ret[1])
> +
> +    # Take our arrays of red shifts and locations, push them through to be interpolated into a 9x9 matrix
> +    # for the CAC block to handle and then store these as a .json file to be added to the camera
> +    # tuning file
> +    print("\nCreating output grid")
> +    rx, ry, bx, by = shifts_to_yaml(red_shift, blue_shift, image_size)
> +
> +    print("CAC correction complete!")
> +
> +    # The json format that we then paste into the tuning file (manually)
> +    sample = '''
> +    {
> +        "rpi.cac" :
> +        {
> +            "strength": 1.0,
> +            "lut_rx" : [
> +                   rx_vals
> +            ],
> +            "lut_ry" : [
> +                   ry_vals
> +            ],
> +            "lut_bx" : [
> +                   bx_vals
> +            ],
> +            "lut_by" : [
> +                   by_vals
> +            ]
> +        }
> +    }
> +    '''
> +
> +    # Below, may look incorrect, however, the PiSP (standard) dimensions are flipped in comparison to
> +    # PIL image coordinate directions, hence why xr -> yr. Also, the shifts calculated are colour shifts,
> +    # and the PiSP block asks for the values it should shift (hence the * -1, to convert from colour shift to a pixel shift)
> +    sample = sample.replace("rx_vals", pprint_array(ry * -1))
> +    sample = sample.replace("ry_vals", pprint_array(rx * -1))
> +    sample = sample.replace("bx_vals", pprint_array(by * -1))
> +    sample = sample.replace("by_vals", pprint_array(bx * -1))
> +    print("Successfully converted to YAML")
> +    f = open(str(output_filepath), "w+")
> +    f.write(sample)
> +    f.close()
> +    print("Successfully written to yaml file")
> +    '''
> +    If you wish to see a plot of the colour channel shifts, add the -p or --plots option
> +    Can be a quick way of validating if the data/dots you've got are good, or if you need to
> +    change some parameters/take some better images
> +    '''
> +    if plot_results:
> +        plot_shifts(red_shift, blue_shift)
> +
> +
> +if __name__ == "__main__":
> +    argv = sys.argv
> +    # Detect the input and output file paths
> +    arg_output = "output.json"
> +    arg_help = "{0} -i <input> -o <output> -p <plot results>".format(argv[0])
> +    opts, args = getopt.getopt(argv[1:], "hi:o:p", ["help", "input=", "output=", "plot"])
> +
> +    output_location = 0
> +    input_location = 0
> +    filelist = []
> +    plot_results = False
> +    for i in range(len(argv)):
> +        if ("-h") in argv[i]:
> +            print(arg_help)  # print the help message
> +            sys.exit(2)
> +        if "-o" in argv[i]:
> +            output_location = i
> +        if ".dng" in argv[i]:
> +            filelist.append(argv[i])
> +        if "-p" in argv[i]:
> +            plot_results = True
> +
> +    arg_output = argv[output_location + 1]
> +    logfile = open("log.txt", "a+")
> +    cac(filelist, arg_output, plot_results, logfile)
> diff --git a/utils/raspberrypi/ctt/ctt_cac.py b/utils/raspberrypi/ctt/ctt_cac.py
> new file mode 100644
> index 00000000..5a4c5101
> --- /dev/null
> +++ b/utils/raspberrypi/ctt/ctt_cac.py
> @@ -0,0 +1,228 @@
> +# SPDX-License-Identifier: BSD-2-Clause
> +#
> +# Copyright (C) 2023, Raspberry Pi Ltd
> +#
> +# ctt_cac.py - CAC (Chromatic Aberration Correction) tuning tool
> +
> +from PIL import Image
> +import numpy as np
> +import matplotlib.pyplot as plt
> +from matplotlib import cm
> +
> +from ctt_dots_locator import find_dots_locations
> +
> +
> +# This is the wrapper file that creates a JSON entry for you to append
> +# to your camera tuning file.
> +# It calculates the chromatic aberration at different points throughout
> +# the image and uses that to produce a martix that can then be used
> +# in the camera tuning files to correct this aberration.
> +
> +
> +def pprint_array(array):
> +    # Function to print the array in a tidier format
> +    array = array
> +    output = ""
> +    for i in range(len(array)):
> +        for j in range(len(array[0])):
> +            output += str(round(array[i, j], 2)) + ", "
> +        # Add the necessary indentation to the array
> +        output += "\n                   "
> +    # Cut off the end of the array (nicely formats it)
> +    return output[:-22]
> +
> +
> +def plot_shifts(red_shifts, blue_shifts):
> +    # If users want, they can pass a command line option to show the shifts on a graph
> +    # Can be useful to check that the functions are all working, and that the sample
> +    # images are doing the right thing
> +    Xs = np.array(red_shifts)[:, 0]
> +    Ys = np.array(red_shifts)[:, 1]
> +    Zs = np.array(red_shifts)[:, 2]
> +    Zs2 = np.array(red_shifts)[:, 3]
> +    Zs3 = np.array(blue_shifts)[:, 2]
> +    Zs4 = np.array(blue_shifts)[:, 3]
> +
> +    fig, axs = plt.subplots(2, 2)
> +    ax = fig.add_subplot(2, 2, 1, projection='3d')
> +    ax.scatter(Xs, Ys, Zs, cmap=cm.jet, linewidth=0)
> +    ax.set_title('Red X Shift')
> +    ax = fig.add_subplot(2, 2, 2, projection='3d')
> +    ax.scatter(Xs, Ys, Zs2, cmap=cm.jet, linewidth=0)
> +    ax.set_title('Red Y Shift')
> +    ax = fig.add_subplot(2, 2, 3, projection='3d')
> +    ax.scatter(Xs, Ys, Zs3, cmap=cm.jet, linewidth=0)
> +    ax.set_title('Blue X Shift')
> +    ax = fig.add_subplot(2, 2, 4, projection='3d')
> +    ax.scatter(Xs, Ys, Zs4, cmap=cm.jet, linewidth=0)
> +    ax.set_title('Blue Y Shift')
> +    fig.tight_layout()
> +    plt.show()
> +
> +
> +def shifts_to_yaml(red_shift, blue_shift, image_dimensions, output_grid_size=9):
> +    # Convert the shifts to a numpy array for easier handling and initialise other variables
> +    red_shifts = np.array(red_shift)
> +    blue_shifts = np.array(blue_shift)
> +    # create a grid that's smaller than the output grid, which we then interpolate from to get the output values
> +    xrgrid = np.zeros((output_grid_size - 1, output_grid_size - 1))
> +    xbgrid = np.zeros((output_grid_size - 1, output_grid_size - 1))
> +    yrgrid = np.zeros((output_grid_size - 1, output_grid_size - 1))
> +    ybgrid = np.zeros((output_grid_size - 1, output_grid_size - 1))
> +
> +    xrsgrid = []
> +    xbsgrid = []
> +    yrsgrid = []
> +    ybsgrid = []
> +    xg = np.zeros((output_grid_size - 1, output_grid_size - 1))
> +    yg = np.zeros((output_grid_size - 1, output_grid_size - 1))
> +
> +    # Format the grids - numpy doesn't work for this, it wants a
> +    # nice uniformly spaced grid, which we don't know if we have yet, hence the rather mundane setup
> +    for x in range(output_grid_size - 1):
> +        xrsgrid.append([])
> +        yrsgrid.append([])
> +        xbsgrid.append([])
> +        ybsgrid.append([])
> +        for y in range(output_grid_size - 1):
> +            xrsgrid[x].append([])
> +            yrsgrid[x].append([])
> +            xbsgrid[x].append([])
> +            ybsgrid[x].append([])
> +
> +    image_size = (image_dimensions[0], image_dimensions[1])
> +    gridxsize = image_size[0] / (output_grid_size - 1)
> +    gridysize = image_size[1] / (output_grid_size - 1)
> +
> +    # Iterate through each dot, and it's shift values and put these into the correct grid location
> +    for red_shift in red_shifts:
> +        xgridloc = int(red_shift[0] / gridxsize)
> +        ygridloc = int(red_shift[1] / gridysize)
> +        xrsgrid[xgridloc][ygridloc].append(red_shift[2])
> +        yrsgrid[xgridloc][ygridloc].append(red_shift[3])
> +
> +    for blue_shift in blue_shifts:
> +        xgridloc = int(blue_shift[0] / gridxsize)
> +        ygridloc = int(blue_shift[1] / gridysize)
> +        xbsgrid[xgridloc][ygridloc].append(blue_shift[2])
> +        ybsgrid[xgridloc][ygridloc].append(blue_shift[3])
> +
> +    # Now calculate the average pixel shift for each square in the grid
> +    for x in range(output_grid_size - 1):
> +        for y in range(output_grid_size - 1):
> +            xrgrid[x, y] = np.mean(xrsgrid[x][y])
> +            yrgrid[x, y] = np.mean(yrsgrid[x][y])
> +            xbgrid[x, y] = np.mean(xbsgrid[x][y])
> +            ybgrid[x, y] = np.mean(ybsgrid[x][y])
> +
> +    # Next, we start to interpolate the central points of the grid that gets passed to the tuning file
> +    input_grids = np.array([xrgrid, yrgrid, xbgrid, ybgrid])
> +    output_grids = np.zeros((4, output_grid_size, output_grid_size))
> +
> +    # Interpolate the centre of the grid
> +    output_grids[:, 1:-1, 1:-1] = (input_grids[:, 1:, :-1] + input_grids[:, 1:, 1:] + input_grids[:, :-1, 1:] + input_grids[:, :-1, :-1]) / 4
> +
> +    # Edge cases:
> +    output_grids[:, 1:-1, 0] = ((input_grids[:, :-1, 0] + input_grids[:, 1:, 0]) / 2 - output_grids[:, 1:-1, 1]) * 2 + output_grids[:, 1:-1, 1]
> +    output_grids[:, 1:-1, -1] = ((input_grids[:, :-1, 7] + input_grids[:, 1:, 7]) / 2 - output_grids[:, 1:-1, -2]) * 2 + output_grids[:, 1:-1, -2]
> +    output_grids[:, 0, 1:-1] = ((input_grids[:, 0, :-1] + input_grids[:, 0, 1:]) / 2 - output_grids[:, 1, 1:-1]) * 2 + output_grids[:, 1, 1:-1]
> +    output_grids[:, -1, 1:-1] = ((input_grids[:, 7, :-1] + input_grids[:, 7, 1:]) / 2 - output_grids[:, -2, 1:-1]) * 2 + output_grids[:, -2, 1:-1]
> +
> +    # Corner Cases:
> +    output_grids[:, 0, 0] = (output_grids[:, 0, 1] - output_grids[:, 1, 1]) + (output_grids[:, 1, 0] - output_grids[:, 1, 1]) + output_grids[:, 1, 1]
> +    output_grids[:, 0, -1] = (output_grids[:, 0, -2] - output_grids[:, 1, -2]) + (output_grids[:, 1, -1] - output_grids[:, 1, -2]) + output_grids[:, 1, -2]
> +    output_grids[:, -1, 0] = (output_grids[:, -1, 1] - output_grids[:, -2, 1]) + (output_grids[:, -2, 0] - output_grids[:, -2, 1]) + output_grids[:, -2, 1]
> +    output_grids[:, -1, -1] = (output_grids[:, -2, -1] - output_grids[:, -2, -2]) + (output_grids[:, -1, -2] - output_grids[:, -2, -2]) + output_grids[:, -2, -2]
> +
> +    # Below, we swap the x and the y coordinates, and also multiply by a factor of -1
> +    # This is due to the PiSP (standard) dimensions being flipped in comparison to
> +    # PIL image coordinate directions, hence why xr -> yr. Also, the shifts calculated are colour shifts,
> +    # and the PiSP block asks for the values it should shift by (hence the * -1, to convert from colour shift to a pixel shift)
> +
> +    output_grid_yr, output_grid_xr, output_grid_yb, output_grid_xb = output_grids * -1
> +    return output_grid_xr, output_grid_yr, output_grid_xb, output_grid_yb
> +
> +
> +def analyse_dot(dot, dot_location=[0, 0]):
> +    # Scan through the dot, calculate the centroid of each colour channel by doing:
> +    # pixel channel brightness * distance from top left corner
> +    # Sum these, and divide by the sum of each channel's brightnesses to get a centroid for each channel
> +    red_channel = np.array(dot)[:, :, 0]
> +    y_num_pixels = len(red_channel[0])
> +    x_num_pixels = len(red_channel)
> +    yred_weight = np.sum(np.dot(red_channel, np.arange(y_num_pixels)))
> +    xred_weight = np.sum(np.dot(np.arange(x_num_pixels), red_channel))
> +    red_sum = np.sum(red_channel)
> +
> +    green_channel = np.array(dot)[:, :, 1]
> +    ygreen_weight = np.sum(np.dot(green_channel, np.arange(y_num_pixels)))
> +    xgreen_weight = np.sum(np.dot(np.arange(x_num_pixels), green_channel))
> +    green_sum = np.sum(green_channel)
> +
> +    blue_channel = np.array(dot)[:, :, 2]
> +    yblue_weight = np.sum(np.dot(blue_channel, np.arange(y_num_pixels)))
> +    xblue_weight = np.sum(np.dot(np.arange(x_num_pixels), blue_channel))
> +    blue_sum = np.sum(blue_channel)
> +
> +    # We return this structure. It contains 2 arrays that contain:
> +    # the locations of the dot center, along with the channel shifts in the x and y direction:
> +    # [ [red_center_x, red_center_y, red_x_shift, red_y_shift], [blue_center_x, blue_center_y, blue_x_shift, blue_y_shift] ]
> +
> +    return [[int(dot_location[0]) + int(len(dot) / 2), int(dot_location[1]) + int(len(dot[0]) / 2), xred_weight / red_sum - xgreen_weight / green_sum, yred_weight / red_sum - ygreen_weight / green_sum], [dot_location[0] + int(len(dot) / 2), dot_location[1] + int(len(dot[0]) / 2), xblue_weight / blue_sum - xgreen_weight / green_sum, yblue_weight / blue_sum - ygreen_weight / green_sum]]
> +
> +
> +def cac(Cam):
> +    filelist = Cam.imgs_cac
> +
> +    Cam.log += '\nCAC analysing files: {}'.format(str(filelist))
> +    np.set_printoptions(precision=3)
> +    np.set_printoptions(suppress=True)
> +
> +    # Create arrays to hold all the dots data and their colour offsets
> +    red_shift = []  # Format is: [[Dot Center X, Dot Center Y, x shift, y shift]]
> +    blue_shift = []
> +    # Iterate through the files
> +    # Multiple files is reccomended to average out the lens aberration through rotations
> +    for file in filelist:
> +        Cam.log += '\nCAC processing file'
> +        print("\n Processing file")
> +        # Read the raw RGB values
> +        rgb = file.rgb
> +        image_size = [file.h, file.w]  # Image size, X, Y
> +        # Create a colour copy of the RGB values to use later in the calibration
> +        imout = Image.new(mode="RGB", size=image_size)
> +        rgb_image = np.array(imout)
> +        # The rgb values need reshaping from a 1d array to a 3d array to be worked with easily
> +        rgb.reshape((image_size[0], image_size[1], 3))
> +        rgb_image = rgb
> +
> +        # Pass the RGB image through to the dots locating program
> +        # Returns an array of the dots (colour rectangles around the dots), and an array of their locations
> +        print("Finding dots")
> +        Cam.log += '\nFinding dots'
> +        dots, dots_locations = find_dots_locations(rgb_image)
> +
> +        # Now, analyse each dot. Work out the centroid of each colour channel, and use that to work out
> +        # by how far the chromatic aberration has shifted each channel
> +        Cam.log += '\nDots found: {}'.format(str(len(dots)))
> +        print('Dots found: ' + str(len(dots)))
> +
> +        for dot, dot_location in zip(dots, dots_locations):
> +            if len(dot) > 0:
> +                if (dot_location[0] > 0) and (dot_location[1] > 0):
> +                    ret = analyse_dot(dot, dot_location)
> +                    red_shift.append(ret[0])
> +                    blue_shift.append(ret[1])
> +
> +    # Take our arrays of red shifts and locations, push them through to be interpolated into a 9x9 matrix
> +    # for the CAC block to handle and then store these as a .json file to be added to the camera
> +    # tuning file
> +    print("\nCreating output grid")
> +    Cam.log += '\nCreating output grid'
> +    rx, ry, bx, by = shifts_to_yaml(red_shift, blue_shift, image_size)
> +
> +    print("CAC correction complete!")
> +    Cam.log += '\nCAC correction complete!'
> +
> +    # Give the JSON dict back to the main ctt program
> +    return {"strength": 1.0, "lut_rx": list(rx.round(2).reshape(81)), "lut_ry": list(ry.round(2).reshape(81)), "lut_bx": list(bx.round(2).reshape(81)), "lut_by": list(by.round(2).reshape(81))}
> diff --git a/utils/raspberrypi/ctt/ctt_dots_locator.py b/utils/raspberrypi/ctt/ctt_dots_locator.py
> new file mode 100644
> index 00000000..4945c04b
> --- /dev/null
> +++ b/utils/raspberrypi/ctt/ctt_dots_locator.py
> @@ -0,0 +1,118 @@
> +# SPDX-License-Identifier: BSD-2-Clause
> +#
> +# Copyright (C) 2023, Raspberry Pi Ltd
> +#
> +# find_dots.py - Used by CAC algorithm to convert image to set of dots
> +
> +'''
> +This file takes the black and white version of the image, along with
> +the color version. It then located the black dots on the image by
> +thresholding dark pixels.
> +In a rather fun way, the algorithm bounces around the thresholded area in a random path
> +We then use the maximum and minimum of these paths to determine the dot shape and size
> +This info is then used to return colored dots and locations back to the main file
> +'''
> +
> +import numpy as np
> +import random
> +from PIL import Image, ImageEnhance, ImageFilter
> +
> +
> +def find_dots_locations(rgb_image, color_threshold=100, dots_edge_avoid=75, image_edge_avoid=10, search_path_length=500, grid_scan_step_size=10, logfile=open("log.txt", "a+")):
> +    # Initialise some starting variables
> +    pixels = Image.fromarray(rgb_image)
> +    pixels = pixels.convert("L")
> +    enhancer = ImageEnhance.Contrast(pixels)
> +    im_output = enhancer.enhance(1.4)
> +    # We smooth it slightly to make it easier for the dot recognition program to locate the dots
> +    im_output = im_output.filter(ImageFilter.GaussianBlur(radius=2))
> +    bw_image = np.array(im_output)
> +
> +    location = [0, 0]
> +    dots = []
> +    dots_location = []
> +    # the program takes away the edges - we don't want a dot that is half a circle, the
> +    # centroids would all be wrong
> +    for x in range(dots_edge_avoid, len(bw_image) - dots_edge_avoid, grid_scan_step_size):
> +        for y in range(dots_edge_avoid, len(bw_image[0]) - dots_edge_avoid, grid_scan_step_size):
> +            location = [x, y]
> +            scrap_dot = False  # A variable used to make sure that this is a valid dot
> +            if (bw_image[location[0], location[1]] < color_threshold) and not (scrap_dot):
> +                heading = "south"  # Define a starting direction to move in
> +                coords = []
> +                for i in range(search_path_length):  # Creates a path of length `search_path_length`. This turns out to always be enough to work out the rough shape of the dot.
> +                    # Now make sure that the thresholded area doesn't come within 10 pixels of the edge of the image, ensures we capture all the CA
> +                    if ((image_edge_avoid < location[0] < len(bw_image) - image_edge_avoid) and (image_edge_avoid < location[1] < len(bw_image[0]) - image_edge_avoid)) and not (scrap_dot):
> +                        if heading == "south":
> +                            if bw_image[location[0] + 1, location[1]] < color_threshold:
> +                                # Here, notice it does not go south, but actually goes southeast
> +                                # This is crucial in ensuring that we make our way around the majority of the dot
> +                                location[0] = location[0] + 1
> +                                location[1] = location[1] + 1
> +                                heading = "south"
> +                            else:
> +                                # This happens when we reach a thresholded edge. We now randomly change direction and keep searching
> +                                dir = random.randint(1, 2)
> +                                if dir == 1:
> +                                    heading = "west"
> +                                if dir == 2:
> +                                    heading = "east"
> +
> +                        if heading == "east":
> +                            if bw_image[location[0], location[1] + 1] < color_threshold:
> +                                location[1] = location[1] + 1
> +                                heading = "east"
> +                            else:
> +                                dir = random.randint(1, 2)
> +                                if dir == 1:
> +                                    heading = "north"
> +                                if dir == 2:
> +                                    heading = "south"
> +
> +                        if heading == "west":
> +                            if bw_image[location[0], location[1] - 1] < color_threshold:
> +                                location[1] = location[1] - 1
> +                                heading = "west"
> +                            else:
> +                                dir = random.randint(1, 2)
> +                                if dir == 1:
> +                                    heading = "north"
> +                                if dir == 2:
> +                                    heading = "south"
> +
> +                        if heading == "north":
> +                            if bw_image[location[0] - 1, location[1]] < color_threshold:
> +                                location[0] = location[0] - 1
> +                                heading = "north"
> +                            else:
> +                                dir = random.randint(1, 2)
> +                                if dir == 1:
> +                                    heading = "west"
> +                                if dir == 2:
> +                                    heading = "east"
> +                        # Log where our particle travels across the dot
> +                        coords.append([location[0], location[1]])
> +                    else:
> +                        scrap_dot = True  # We just don't have enough space around the dot, discard this one, and move on
> +                if not scrap_dot:
> +                    # get the size of the dot surrounding the dot
> +                    x_coords = np.array(coords)[:, 0]
> +                    y_coords = np.array(coords)[:, 1]
> +                    hsquaresize = max(list(x_coords)) - min(list(x_coords))
> +                    vsquaresize = max(list(y_coords)) - min(list(y_coords))
> +                    # Create the bounding coordinates of the rectangle surrounding the dot
> +                    # Program uses the dotsize + half of the dotsize to ensure we get all that color fringing
> +                    extra_space_factor = 0.45
> +                    top_left_x = (min(list(x_coords)) - int(hsquaresize * extra_space_factor))
> +                    btm_right_x = max(list(x_coords)) + int(hsquaresize * extra_space_factor)
> +                    top_left_y = (min(list(y_coords)) - int(vsquaresize * extra_space_factor))
> +                    btm_right_y = max(list(y_coords)) + int(vsquaresize * extra_space_factor)
> +                    # Overwrite the area of the dot to ensure we don't use it again
> +                    bw_image[top_left_x:btm_right_x, top_left_y:btm_right_y] = 255
> +                    # Add the color version of the dot to the list to send off, along with some coordinates.
> +                    dots.append(rgb_image[top_left_x:btm_right_x, top_left_y:btm_right_y])
> +                    dots_location.append([top_left_x, top_left_y])
> +                else:
> +                    # Dot was too close to the image border to be useable
> +                    pass
> +    return dots, dots_location
> diff --git a/utils/raspberrypi/ctt/ctt_image_load.py b/utils/raspberrypi/ctt/ctt_image_load.py
> index d76ece73..ea5fa360 100644
> --- a/utils/raspberrypi/ctt/ctt_image_load.py
> +++ b/utils/raspberrypi/ctt/ctt_image_load.py
> @@ -350,6 +350,8 @@ def dng_load_image(Cam, im_str):
>          c2 = np.left_shift(raw_data[1::2, 0::2].astype(np.int64), shift)
>          c3 = np.left_shift(raw_data[1::2, 1::2].astype(np.int64), shift)
>          Img.channels = [c0, c1, c2, c3]
> +        Img.rgb = raw_im.postprocess()
> +        Img.sizes = raw_im.sizes
>  
>      except Exception:
>          print("\nERROR: failed to load DNG file", im_str)
> diff --git a/utils/raspberrypi/ctt/ctt_log.txt b/utils/raspberrypi/ctt/ctt_log.txt
> new file mode 100644
> index 00000000..682e24e4
> --- /dev/null
> +++ b/utils/raspberrypi/ctt/ctt_log.txt
> @@ -0,0 +1,31 @@
> +Log created : Fri Aug 25 17:02:58 2023
> +
> +----------------------------------------------------------------------
> +User Arguments
> +----------------------------------------------------------------------
> +
> +Json file output: output.json
> +Calibration images directory: ../ctt/
> +No configuration file input... using default options
> +No log file path input... using default: ctt_log.txt
> +
> +----------------------------------------------------------------------
> +Image Loading
> +----------------------------------------------------------------------
> +
> +Directory: ../ctt/
> +Files found: 1
> +
> +Image: alsc_3000k_0.dng
> +Identified as an ALSC image
> +Colour temperature: 3000 K
> +
> +Images found:
> +Macbeth : 0
> +ALSC : 1 
> +CAC: 0 
> +
> +Camera metadata
> +ERROR: No usable macbeth chart images found
> +
> +----------------------------------------------------------------------
> diff --git a/utils/raspberrypi/ctt/ctt_pisp.py b/utils/raspberrypi/ctt/ctt_pisp.py
> index f837e062..862587a6 100755
> --- a/utils/raspberrypi/ctt/ctt_pisp.py
> +++ b/utils/raspberrypi/ctt/ctt_pisp.py
> @@ -197,6 +197,8 @@ json_template = {
>      },
>      "rpi.ccm": {
>      },
> +    "rpi.cac": {
> +    },
>      "rpi.sharpen": {
>         "threshold": 0.25,
>         "limit": 1.0,
> diff --git a/utils/raspberrypi/ctt/ctt_pretty_print_json.py b/utils/raspberrypi/ctt/ctt_pretty_print_json.py
> index 5d16b2a6..d3bd7d97 100755
> --- a/utils/raspberrypi/ctt/ctt_pretty_print_json.py
> +++ b/utils/raspberrypi/ctt/ctt_pretty_print_json.py
> @@ -24,6 +24,10 @@ class Encoder(json.JSONEncoder):
>              'luminance_lut': 16,
>              'ct_curve': 3,
>              'ccm': 3,
> +            'lut_rx': 9,
> +            'lut_bx': 9,
> +            'lut_by': 9,
> +            'lut_ry': 9,
>              'gamma_curve': 2,
>              'y_target': 2,
>              'prior': 2
> diff --git a/utils/raspberrypi/ctt/ctt_run.py b/utils/raspberrypi/ctt/ctt_run.py
> index 0c85d7db..074136a1 100755
> --- a/utils/raspberrypi/ctt/ctt_run.py
> +++ b/utils/raspberrypi/ctt/ctt_run.py
> @@ -9,6 +9,7 @@
>  import os
>  import sys
>  from ctt_image_load import *
> +from ctt_cac import *
>  from ctt_ccm import *
>  from ctt_awb import *
>  from ctt_alsc import *
> @@ -22,9 +23,10 @@ import re
>  
>  """
>  This file houses the camera object, which is used to perform the calibrations.
> -The camera object houses all the calibration images as attributes in two lists:
> +The camera object houses all the calibration images as attributes in three lists:
>      - imgs (macbeth charts)
>      - imgs_alsc (alsc correction images)
> +    - imgs_cac (cac correction images)
>  Various calibrations are methods of the camera object, and the output is stored
>  in a dictionary called self.json.
>  Once all the caibration has been completed, the Camera.json is written into a
> @@ -73,16 +75,15 @@ class Camera:
>              self.path = ''
>          self.imgs = []
>          self.imgs_alsc = []
> +        self.imgs_cac = []
>          self.log = 'Log created : ' + time.asctime(time.localtime(time.time()))
>          self.log_separator = '\n'+'-'*70+'\n'
>          self.jf = jfile
>          """
>          initial json dict populated by uncalibrated values
>          """
> -
>          self.json = json
>  
> -
>      """
>      Perform colour correction calibrations by comparing macbeth patch colours
>      to standard macbeth chart colours.
> @@ -146,6 +147,62 @@ class Camera:
>          self.log += '\nCCM calibration written to json file'
>          print('Finished CCM calibration')
>  
> +    """
> +    Perform chromatic abberation correction using multiple dots images.
> +    """
> +    def cac_cal(self, do_alsc_colour):
> +        if 'rpi.cac' in self.disable:
> +            return 1
> +        print('\nStarting CAC calibration')
> +        self.log_new_sec('CAC')
> +        """
> +        check if cac images have been taken
> +        """
> +        if len(self.imgs_cac) == 0:
> +            print('\nError:\nNo cac calibration images found')
> +            self.log += '\nERROR: No CAC calibration images found!'
> +            self.log += '\nCAC calibration aborted!'
> +            return 1
> +        """
> +        if image is greyscale then CAC makes no sense
> +        """
> +        if self.grey:
> +            print('\nERROR: Can\'t do CAC on greyscale image!')
> +            self.log += '\nERROR: Cannot perform CAC calibration '
> +            self.log += 'on greyscale image!\nCAC aborted!'
> +            del self.json['rpi.cac']
> +            return 0
> +        a = time.time()
> +        """
> +        Check if camera is greyscale or color. If not greyscale, then perform cac
> +        """
> +        if do_alsc_colour:
> +            """
> +            Here we have a color sensor. Perform cac
> +            """
> +            try:
> +                cacs = cac(self)
> +            except ArithmeticError:
> +                print('ERROR: Matrix is singular!\nTake new pictures and try again...')
> +                self.log += '\nERROR: Singular matrix encountered during fit!'
> +                self.log += '\nCCM aborted!'
> +                return 1
> +        else:
> +            """
> +            case where config options suggest greyscale camera. No point in doing CAC
> +            """
> +            cal_cr_list, cal_cb_list = None, None
> +            self.log += '\nWARNING: No ALSC tables found.\nCCM calibration '
> +            self.log += 'performed without ALSC correction...'
> +
> +        """
> +        Write output to json
> +        """
> +        self.json['rpi.cac']['cac'] = cacs
> +        self.log += '\nCCM calibration written to json file'
> +        print('Finished CCM calibration')
> +
> +
>      """
>      Auto white balance calibration produces a colour curve for
>      various colour temperatures, as well as providing a maximum 'wiggle room'
> @@ -516,6 +573,16 @@ class Camera:
>                      self.log += '\nWARNING: Error reading colour temperature'
>                      self.log += '\nImage discarded!'
>                      print('DISCARDED')
> +            elif 'cac' in filename:
> +                Img = load_image(self, address, mac=False)
> +                self.log += '\nIdentified as an CAC image'
> +                Img.name = filename
> +                self.log += '\nColour temperature: {} K'.format(col)
> +                self.imgs_cac.append(Img)
> +                if blacklevel != -1:
> +                    Img.blacklevel_16 = blacklevel
> +                print(img_suc_msg)
> +                continue
>              else:
>                  self.log += '\nIdentified as macbeth chart image'
>                  """
> @@ -561,6 +628,7 @@ class Camera:
>          self.log += '\n\nImages found:'
>          self.log += '\nMacbeth : {}'.format(len(self.imgs))
>          self.log += '\nALSC : {} '.format(len(self.imgs_alsc))
> +        self.log += '\nCAC: {} '.format(len(self.imgs_cac))
>          self.log += '\n\nCamera metadata'
>          """
>          check usable images found
> @@ -569,22 +637,21 @@ class Camera:
>              print('\nERROR: No usable macbeth chart images found')
>              self.log += '\nERROR: No usable macbeth chart images found'
>              return 0
> -        elif len(self.imgs) == 0 and len(self.imgs_alsc) == 0:
> +        elif len(self.imgs) == 0 and len(self.imgs_alsc) == 0 and len(self.imgs_cac) == 0:
>              print('\nERROR: No usable images found')
>              self.log += '\nERROR: No usable images found'
>              return 0
>          """
>          Double check that every image has come from the same camera...
>          """
> -        all_imgs = self.imgs + self.imgs_alsc
> +        all_imgs = self.imgs + self.imgs_alsc + self.imgs_cac
>          camNames = list(set([Img.camName for Img in all_imgs]))
>          patterns = list(set([Img.pattern for Img in all_imgs]))
>          sigbitss = list(set([Img.sigbits for Img in all_imgs]))
>          blacklevels = list(set([Img.blacklevel_16 for Img in all_imgs]))
>          sizes = list(set([(Img.w, Img.h) for Img in all_imgs]))
>  
> -        if len(camNames) == 1 and len(patterns) == 1 and len(sigbitss) == 1 and \
> -           len(blacklevels) == 1 and len(sizes) == 1:
> +        if 1:
>              self.grey = (patterns[0] == 128)
>              self.blacklevel_16 = blacklevels[0]
>              self.log += '\nName: {}'.format(camNames[0])
> @@ -643,6 +710,7 @@ def run_ctt(json_output, directory, config, log_output, json_template, grid_size
>      mac_small = get_config(macbeth_d, "small", 0, 'bool')
>      mac_show = get_config(macbeth_d, "show", 0, 'bool')
>      mac_config = (mac_small, mac_show)
> +    cac_d = get_config(configs, "cac", {}, 'dict')
>  
>      if blacklevel < -1 or blacklevel >= 2**16:
>          print('\nInvalid blacklevel, defaulted to 64')
> @@ -687,7 +755,8 @@ def run_ctt(json_output, directory, config, log_output, json_template, grid_size
>          Cam.geq_cal()
>          Cam.lux_cal()
>          Cam.noise_cal()
> -        Cam.cac_cal(do_alsc_colour)
> +        if "rpi.cac" in json_template:
> +            Cam.cac_cal(do_alsc_colour)
>          Cam.awb_cal(greyworld, do_alsc_colour, grid_size)
>          Cam.ccm_cal(do_alsc_colour, grid_size)
>  
> -- 
> 2.39.2
>

Patch
diff mbox series

diff --git a/utils/raspberrypi/ctt/alsc_pisp.py b/utils/raspberrypi/ctt/alsc_pisp.py
index 499aecd1..d0034ae1 100755
--- a/utils/raspberrypi/ctt/alsc_pisp.py
+++ b/utils/raspberrypi/ctt/alsc_pisp.py
@@ -2,7 +2,7 @@ 
 #
 # SPDX-License-Identifier: BSD-2-Clause
 #
-# Copyright (C) 2022, Raspberry Pi (Trading) Limited
+# Copyright (C) 2022, Raspberry Pi Ltd
 #
 # alsc_only.py - alsc tuning tool
 
diff --git a/utils/raspberrypi/ctt/cac_only.py b/utils/raspberrypi/ctt/cac_only.py
new file mode 100644
index 00000000..2bb11ccc
--- /dev/null
+++ b/utils/raspberrypi/ctt/cac_only.py
@@ -0,0 +1,143 @@ 
+#!/usr/bin/env python3
+#
+# SPDX-License-Identifier: BSD-2-Clause
+#
+# Copyright (C) 2023, Raspberry Pi (Trading) Limited
+#
+# cac_only.py - cac tuning tool
+
+
+# This file allows you to tune only the chromatic aberration correction
+# Specify any number of files in the command line args, and it shall iterate through
+# and generate an averaged cac table from all the input images, which you can then
+# input into your tuning file.
+
+# Takes .dng files produced by the camera modules of the dots grid and calculates the chromatic abberation of each dot.
+# Then takes each dot, and works out where it was in the image, and uses that to output a tables of the shifts
+# across the whole image.
+
+from PIL import Image
+import numpy as np
+import rawpy
+import sys
+import getopt
+
+from ctt_cac import *
+
+
+def cac(filelist, output_filepath, plot_results=False):
+    np.set_printoptions(precision=3)
+    np.set_printoptions(suppress=True)
+
+    # Create arrays to hold all the dots data and their colour offsets
+    red_shift = []  # Format is: [[Dot Center X, Dot Center Y, x shift, y shift]]
+    blue_shift = []
+    # Iterate through the files
+    # Multiple files is reccomended to average out the lens aberration through rotations
+    for file in filelist:
+        print("\n Processing file " + str(file))
+        # Read the raw RGB values from the .dng file
+        with rawpy.imread(file) as raw:
+            rgb = raw.postprocess()
+            sizes = (raw.sizes)
+
+        image_size = [sizes[2], sizes[3]]  # Image size, X, Y
+        # Create a colour copy of the RGB values to use later in the calibration
+        imout = Image.new(mode="RGB", size=image_size)
+        rgb_image = np.array(imout)
+        # The rgb values need reshaping from a 1d array to a 3d array to be worked with easily
+        rgb.reshape((image_size[0], image_size[1], 3))
+        rgb_image = rgb
+
+        # Pass the RGB image through to the dots locating program
+        # Returns an array of the dots (colour rectangles around the dots), and an array of their locations
+        print("Finding dots")
+        dots, dots_locations = find_dots_locations(rgb_image)
+
+        # Now, analyse each dot. Work out the centroid of each colour channel, and use that to work out
+        # by how far the chromatic aberration has shifted each channel
+        print('Dots found: ' + str(len(dots)))
+
+        for dot, dot_location in zip(dots, dots_locations):
+            if len(dot) > 0:
+                if (dot_location[0] > 0) and (dot_location[1] > 0):
+                    ret = analyse_dot(dot, dot_location)
+                    red_shift.append(ret[0])
+                    blue_shift.append(ret[1])
+
+    # Take our arrays of red shifts and locations, push them through to be interpolated into a 9x9 matrix
+    # for the CAC block to handle and then store these as a .json file to be added to the camera
+    # tuning file
+    print("\nCreating output grid")
+    rx, ry, bx, by = shifts_to_yaml(red_shift, blue_shift, image_size)
+
+    print("CAC correction complete!")
+
+    # The json format that we then paste into the tuning file (manually)
+    sample = '''
+    {
+        "rpi.cac" :
+        {
+            "strength": 1.0,
+            "lut_rx" : [
+                   rx_vals
+            ],
+            "lut_ry" : [
+                   ry_vals
+            ],
+            "lut_bx" : [
+                   bx_vals
+            ],
+            "lut_by" : [
+                   by_vals
+            ]
+        }
+    }
+    '''
+
+    # Below, may look incorrect, however, the PiSP (standard) dimensions are flipped in comparison to
+    # PIL image coordinate directions, hence why xr -> yr. Also, the shifts calculated are colour shifts,
+    # and the PiSP block asks for the values it should shift (hence the * -1, to convert from colour shift to a pixel shift)
+    sample = sample.replace("rx_vals", pprint_array(ry * -1))
+    sample = sample.replace("ry_vals", pprint_array(rx * -1))
+    sample = sample.replace("bx_vals", pprint_array(by * -1))
+    sample = sample.replace("by_vals", pprint_array(bx * -1))
+    print("Successfully converted to YAML")
+    f = open(str(output_filepath), "w+")
+    f.write(sample)
+    f.close()
+    print("Successfully written to yaml file")
+    '''
+    If you wish to see a plot of the colour channel shifts, add the -p or --plots option
+    Can be a quick way of validating if the data/dots you've got are good, or if you need to
+    change some parameters/take some better images
+    '''
+    if plot_results:
+        plot_shifts(red_shift, blue_shift)
+
+
+if __name__ == "__main__":
+    argv = sys.argv
+    # Detect the input and output file paths
+    arg_output = "output.json"
+    arg_help = "{0} -i <input> -o <output> -p <plot results>".format(argv[0])
+    opts, args = getopt.getopt(argv[1:], "hi:o:p", ["help", "input=", "output=", "plot"])
+
+    output_location = 0
+    input_location = 0
+    filelist = []
+    plot_results = False
+    for i in range(len(argv)):
+        if ("-h") in argv[i]:
+            print(arg_help)  # print the help message
+            sys.exit(2)
+        if "-o" in argv[i]:
+            output_location = i
+        if ".dng" in argv[i]:
+            filelist.append(argv[i])
+        if "-p" in argv[i]:
+            plot_results = True
+
+    arg_output = argv[output_location + 1]
+    logfile = open("log.txt", "a+")
+    cac(filelist, arg_output, plot_results, logfile)
diff --git a/utils/raspberrypi/ctt/ctt_cac.py b/utils/raspberrypi/ctt/ctt_cac.py
new file mode 100644
index 00000000..5a4c5101
--- /dev/null
+++ b/utils/raspberrypi/ctt/ctt_cac.py
@@ -0,0 +1,228 @@ 
+# SPDX-License-Identifier: BSD-2-Clause
+#
+# Copyright (C) 2023, Raspberry Pi Ltd
+#
+# ctt_cac.py - CAC (Chromatic Aberration Correction) tuning tool
+
+from PIL import Image
+import numpy as np
+import matplotlib.pyplot as plt
+from matplotlib import cm
+
+from ctt_dots_locator import find_dots_locations
+
+
+# This is the wrapper file that creates a JSON entry for you to append
+# to your camera tuning file.
+# It calculates the chromatic aberration at different points throughout
+# the image and uses that to produce a martix that can then be used
+# in the camera tuning files to correct this aberration.
+
+
+def pprint_array(array):
+    # Function to print the array in a tidier format
+    array = array
+    output = ""
+    for i in range(len(array)):
+        for j in range(len(array[0])):
+            output += str(round(array[i, j], 2)) + ", "
+        # Add the necessary indentation to the array
+        output += "\n                   "
+    # Cut off the end of the array (nicely formats it)
+    return output[:-22]
+
+
+def plot_shifts(red_shifts, blue_shifts):
+    # If users want, they can pass a command line option to show the shifts on a graph
+    # Can be useful to check that the functions are all working, and that the sample
+    # images are doing the right thing
+    Xs = np.array(red_shifts)[:, 0]
+    Ys = np.array(red_shifts)[:, 1]
+    Zs = np.array(red_shifts)[:, 2]
+    Zs2 = np.array(red_shifts)[:, 3]
+    Zs3 = np.array(blue_shifts)[:, 2]
+    Zs4 = np.array(blue_shifts)[:, 3]
+
+    fig, axs = plt.subplots(2, 2)
+    ax = fig.add_subplot(2, 2, 1, projection='3d')
+    ax.scatter(Xs, Ys, Zs, cmap=cm.jet, linewidth=0)
+    ax.set_title('Red X Shift')
+    ax = fig.add_subplot(2, 2, 2, projection='3d')
+    ax.scatter(Xs, Ys, Zs2, cmap=cm.jet, linewidth=0)
+    ax.set_title('Red Y Shift')
+    ax = fig.add_subplot(2, 2, 3, projection='3d')
+    ax.scatter(Xs, Ys, Zs3, cmap=cm.jet, linewidth=0)
+    ax.set_title('Blue X Shift')
+    ax = fig.add_subplot(2, 2, 4, projection='3d')
+    ax.scatter(Xs, Ys, Zs4, cmap=cm.jet, linewidth=0)
+    ax.set_title('Blue Y Shift')
+    fig.tight_layout()
+    plt.show()
+
+
+def shifts_to_yaml(red_shift, blue_shift, image_dimensions, output_grid_size=9):
+    # Convert the shifts to a numpy array for easier handling and initialise other variables
+    red_shifts = np.array(red_shift)
+    blue_shifts = np.array(blue_shift)
+    # create a grid that's smaller than the output grid, which we then interpolate from to get the output values
+    xrgrid = np.zeros((output_grid_size - 1, output_grid_size - 1))
+    xbgrid = np.zeros((output_grid_size - 1, output_grid_size - 1))
+    yrgrid = np.zeros((output_grid_size - 1, output_grid_size - 1))
+    ybgrid = np.zeros((output_grid_size - 1, output_grid_size - 1))
+
+    xrsgrid = []
+    xbsgrid = []
+    yrsgrid = []
+    ybsgrid = []
+    xg = np.zeros((output_grid_size - 1, output_grid_size - 1))
+    yg = np.zeros((output_grid_size - 1, output_grid_size - 1))
+
+    # Format the grids - numpy doesn't work for this, it wants a
+    # nice uniformly spaced grid, which we don't know if we have yet, hence the rather mundane setup
+    for x in range(output_grid_size - 1):
+        xrsgrid.append([])
+        yrsgrid.append([])
+        xbsgrid.append([])
+        ybsgrid.append([])
+        for y in range(output_grid_size - 1):
+            xrsgrid[x].append([])
+            yrsgrid[x].append([])
+            xbsgrid[x].append([])
+            ybsgrid[x].append([])
+
+    image_size = (image_dimensions[0], image_dimensions[1])
+    gridxsize = image_size[0] / (output_grid_size - 1)
+    gridysize = image_size[1] / (output_grid_size - 1)
+
+    # Iterate through each dot, and it's shift values and put these into the correct grid location
+    for red_shift in red_shifts:
+        xgridloc = int(red_shift[0] / gridxsize)
+        ygridloc = int(red_shift[1] / gridysize)
+        xrsgrid[xgridloc][ygridloc].append(red_shift[2])
+        yrsgrid[xgridloc][ygridloc].append(red_shift[3])
+
+    for blue_shift in blue_shifts:
+        xgridloc = int(blue_shift[0] / gridxsize)
+        ygridloc = int(blue_shift[1] / gridysize)
+        xbsgrid[xgridloc][ygridloc].append(blue_shift[2])
+        ybsgrid[xgridloc][ygridloc].append(blue_shift[3])
+
+    # Now calculate the average pixel shift for each square in the grid
+    for x in range(output_grid_size - 1):
+        for y in range(output_grid_size - 1):
+            xrgrid[x, y] = np.mean(xrsgrid[x][y])
+            yrgrid[x, y] = np.mean(yrsgrid[x][y])
+            xbgrid[x, y] = np.mean(xbsgrid[x][y])
+            ybgrid[x, y] = np.mean(ybsgrid[x][y])
+
+    # Next, we start to interpolate the central points of the grid that gets passed to the tuning file
+    input_grids = np.array([xrgrid, yrgrid, xbgrid, ybgrid])
+    output_grids = np.zeros((4, output_grid_size, output_grid_size))
+
+    # Interpolate the centre of the grid
+    output_grids[:, 1:-1, 1:-1] = (input_grids[:, 1:, :-1] + input_grids[:, 1:, 1:] + input_grids[:, :-1, 1:] + input_grids[:, :-1, :-1]) / 4
+
+    # Edge cases:
+    output_grids[:, 1:-1, 0] = ((input_grids[:, :-1, 0] + input_grids[:, 1:, 0]) / 2 - output_grids[:, 1:-1, 1]) * 2 + output_grids[:, 1:-1, 1]
+    output_grids[:, 1:-1, -1] = ((input_grids[:, :-1, 7] + input_grids[:, 1:, 7]) / 2 - output_grids[:, 1:-1, -2]) * 2 + output_grids[:, 1:-1, -2]
+    output_grids[:, 0, 1:-1] = ((input_grids[:, 0, :-1] + input_grids[:, 0, 1:]) / 2 - output_grids[:, 1, 1:-1]) * 2 + output_grids[:, 1, 1:-1]
+    output_grids[:, -1, 1:-1] = ((input_grids[:, 7, :-1] + input_grids[:, 7, 1:]) / 2 - output_grids[:, -2, 1:-1]) * 2 + output_grids[:, -2, 1:-1]
+
+    # Corner Cases:
+    output_grids[:, 0, 0] = (output_grids[:, 0, 1] - output_grids[:, 1, 1]) + (output_grids[:, 1, 0] - output_grids[:, 1, 1]) + output_grids[:, 1, 1]
+    output_grids[:, 0, -1] = (output_grids[:, 0, -2] - output_grids[:, 1, -2]) + (output_grids[:, 1, -1] - output_grids[:, 1, -2]) + output_grids[:, 1, -2]
+    output_grids[:, -1, 0] = (output_grids[:, -1, 1] - output_grids[:, -2, 1]) + (output_grids[:, -2, 0] - output_grids[:, -2, 1]) + output_grids[:, -2, 1]
+    output_grids[:, -1, -1] = (output_grids[:, -2, -1] - output_grids[:, -2, -2]) + (output_grids[:, -1, -2] - output_grids[:, -2, -2]) + output_grids[:, -2, -2]
+
+    # Below, we swap the x and the y coordinates, and also multiply by a factor of -1
+    # This is due to the PiSP (standard) dimensions being flipped in comparison to
+    # PIL image coordinate directions, hence why xr -> yr. Also, the shifts calculated are colour shifts,
+    # and the PiSP block asks for the values it should shift by (hence the * -1, to convert from colour shift to a pixel shift)
+
+    output_grid_yr, output_grid_xr, output_grid_yb, output_grid_xb = output_grids * -1
+    return output_grid_xr, output_grid_yr, output_grid_xb, output_grid_yb
+
+
+def analyse_dot(dot, dot_location=[0, 0]):
+    # Scan through the dot, calculate the centroid of each colour channel by doing:
+    # pixel channel brightness * distance from top left corner
+    # Sum these, and divide by the sum of each channel's brightnesses to get a centroid for each channel
+    red_channel = np.array(dot)[:, :, 0]
+    y_num_pixels = len(red_channel[0])
+    x_num_pixels = len(red_channel)
+    yred_weight = np.sum(np.dot(red_channel, np.arange(y_num_pixels)))
+    xred_weight = np.sum(np.dot(np.arange(x_num_pixels), red_channel))
+    red_sum = np.sum(red_channel)
+
+    green_channel = np.array(dot)[:, :, 1]
+    ygreen_weight = np.sum(np.dot(green_channel, np.arange(y_num_pixels)))
+    xgreen_weight = np.sum(np.dot(np.arange(x_num_pixels), green_channel))
+    green_sum = np.sum(green_channel)
+
+    blue_channel = np.array(dot)[:, :, 2]
+    yblue_weight = np.sum(np.dot(blue_channel, np.arange(y_num_pixels)))
+    xblue_weight = np.sum(np.dot(np.arange(x_num_pixels), blue_channel))
+    blue_sum = np.sum(blue_channel)
+
+    # We return this structure. It contains 2 arrays that contain:
+    # the locations of the dot center, along with the channel shifts in the x and y direction:
+    # [ [red_center_x, red_center_y, red_x_shift, red_y_shift], [blue_center_x, blue_center_y, blue_x_shift, blue_y_shift] ]
+
+    return [[int(dot_location[0]) + int(len(dot) / 2), int(dot_location[1]) + int(len(dot[0]) / 2), xred_weight / red_sum - xgreen_weight / green_sum, yred_weight / red_sum - ygreen_weight / green_sum], [dot_location[0] + int(len(dot) / 2), dot_location[1] + int(len(dot[0]) / 2), xblue_weight / blue_sum - xgreen_weight / green_sum, yblue_weight / blue_sum - ygreen_weight / green_sum]]
+
+
+def cac(Cam):
+    filelist = Cam.imgs_cac
+
+    Cam.log += '\nCAC analysing files: {}'.format(str(filelist))
+    np.set_printoptions(precision=3)
+    np.set_printoptions(suppress=True)
+
+    # Create arrays to hold all the dots data and their colour offsets
+    red_shift = []  # Format is: [[Dot Center X, Dot Center Y, x shift, y shift]]
+    blue_shift = []
+    # Iterate through the files
+    # Multiple files is reccomended to average out the lens aberration through rotations
+    for file in filelist:
+        Cam.log += '\nCAC processing file'
+        print("\n Processing file")
+        # Read the raw RGB values
+        rgb = file.rgb
+        image_size = [file.h, file.w]  # Image size, X, Y
+        # Create a colour copy of the RGB values to use later in the calibration
+        imout = Image.new(mode="RGB", size=image_size)
+        rgb_image = np.array(imout)
+        # The rgb values need reshaping from a 1d array to a 3d array to be worked with easily
+        rgb.reshape((image_size[0], image_size[1], 3))
+        rgb_image = rgb
+
+        # Pass the RGB image through to the dots locating program
+        # Returns an array of the dots (colour rectangles around the dots), and an array of their locations
+        print("Finding dots")
+        Cam.log += '\nFinding dots'
+        dots, dots_locations = find_dots_locations(rgb_image)
+
+        # Now, analyse each dot. Work out the centroid of each colour channel, and use that to work out
+        # by how far the chromatic aberration has shifted each channel
+        Cam.log += '\nDots found: {}'.format(str(len(dots)))
+        print('Dots found: ' + str(len(dots)))
+
+        for dot, dot_location in zip(dots, dots_locations):
+            if len(dot) > 0:
+                if (dot_location[0] > 0) and (dot_location[1] > 0):
+                    ret = analyse_dot(dot, dot_location)
+                    red_shift.append(ret[0])
+                    blue_shift.append(ret[1])
+
+    # Take our arrays of red shifts and locations, push them through to be interpolated into a 9x9 matrix
+    # for the CAC block to handle and then store these as a .json file to be added to the camera
+    # tuning file
+    print("\nCreating output grid")
+    Cam.log += '\nCreating output grid'
+    rx, ry, bx, by = shifts_to_yaml(red_shift, blue_shift, image_size)
+
+    print("CAC correction complete!")
+    Cam.log += '\nCAC correction complete!'
+
+    # Give the JSON dict back to the main ctt program
+    return {"strength": 1.0, "lut_rx": list(rx.round(2).reshape(81)), "lut_ry": list(ry.round(2).reshape(81)), "lut_bx": list(bx.round(2).reshape(81)), "lut_by": list(by.round(2).reshape(81))}
diff --git a/utils/raspberrypi/ctt/ctt_dots_locator.py b/utils/raspberrypi/ctt/ctt_dots_locator.py
new file mode 100644
index 00000000..4945c04b
--- /dev/null
+++ b/utils/raspberrypi/ctt/ctt_dots_locator.py
@@ -0,0 +1,118 @@ 
+# SPDX-License-Identifier: BSD-2-Clause
+#
+# Copyright (C) 2023, Raspberry Pi Ltd
+#
+# find_dots.py - Used by CAC algorithm to convert image to set of dots
+
+'''
+This file takes the black and white version of the image, along with
+the color version. It then located the black dots on the image by
+thresholding dark pixels.
+In a rather fun way, the algorithm bounces around the thresholded area in a random path
+We then use the maximum and minimum of these paths to determine the dot shape and size
+This info is then used to return colored dots and locations back to the main file
+'''
+
+import numpy as np
+import random
+from PIL import Image, ImageEnhance, ImageFilter
+
+
+def find_dots_locations(rgb_image, color_threshold=100, dots_edge_avoid=75, image_edge_avoid=10, search_path_length=500, grid_scan_step_size=10, logfile=open("log.txt", "a+")):
+    # Initialise some starting variables
+    pixels = Image.fromarray(rgb_image)
+    pixels = pixels.convert("L")
+    enhancer = ImageEnhance.Contrast(pixels)
+    im_output = enhancer.enhance(1.4)
+    # We smooth it slightly to make it easier for the dot recognition program to locate the dots
+    im_output = im_output.filter(ImageFilter.GaussianBlur(radius=2))
+    bw_image = np.array(im_output)
+
+    location = [0, 0]
+    dots = []
+    dots_location = []
+    # the program takes away the edges - we don't want a dot that is half a circle, the
+    # centroids would all be wrong
+    for x in range(dots_edge_avoid, len(bw_image) - dots_edge_avoid, grid_scan_step_size):
+        for y in range(dots_edge_avoid, len(bw_image[0]) - dots_edge_avoid, grid_scan_step_size):
+            location = [x, y]
+            scrap_dot = False  # A variable used to make sure that this is a valid dot
+            if (bw_image[location[0], location[1]] < color_threshold) and not (scrap_dot):
+                heading = "south"  # Define a starting direction to move in
+                coords = []
+                for i in range(search_path_length):  # Creates a path of length `search_path_length`. This turns out to always be enough to work out the rough shape of the dot.
+                    # Now make sure that the thresholded area doesn't come within 10 pixels of the edge of the image, ensures we capture all the CA
+                    if ((image_edge_avoid < location[0] < len(bw_image) - image_edge_avoid) and (image_edge_avoid < location[1] < len(bw_image[0]) - image_edge_avoid)) and not (scrap_dot):
+                        if heading == "south":
+                            if bw_image[location[0] + 1, location[1]] < color_threshold:
+                                # Here, notice it does not go south, but actually goes southeast
+                                # This is crucial in ensuring that we make our way around the majority of the dot
+                                location[0] = location[0] + 1
+                                location[1] = location[1] + 1
+                                heading = "south"
+                            else:
+                                # This happens when we reach a thresholded edge. We now randomly change direction and keep searching
+                                dir = random.randint(1, 2)
+                                if dir == 1:
+                                    heading = "west"
+                                if dir == 2:
+                                    heading = "east"
+
+                        if heading == "east":
+                            if bw_image[location[0], location[1] + 1] < color_threshold:
+                                location[1] = location[1] + 1
+                                heading = "east"
+                            else:
+                                dir = random.randint(1, 2)
+                                if dir == 1:
+                                    heading = "north"
+                                if dir == 2:
+                                    heading = "south"
+
+                        if heading == "west":
+                            if bw_image[location[0], location[1] - 1] < color_threshold:
+                                location[1] = location[1] - 1
+                                heading = "west"
+                            else:
+                                dir = random.randint(1, 2)
+                                if dir == 1:
+                                    heading = "north"
+                                if dir == 2:
+                                    heading = "south"
+
+                        if heading == "north":
+                            if bw_image[location[0] - 1, location[1]] < color_threshold:
+                                location[0] = location[0] - 1
+                                heading = "north"
+                            else:
+                                dir = random.randint(1, 2)
+                                if dir == 1:
+                                    heading = "west"
+                                if dir == 2:
+                                    heading = "east"
+                        # Log where our particle travels across the dot
+                        coords.append([location[0], location[1]])
+                    else:
+                        scrap_dot = True  # We just don't have enough space around the dot, discard this one, and move on
+                if not scrap_dot:
+                    # get the size of the dot surrounding the dot
+                    x_coords = np.array(coords)[:, 0]
+                    y_coords = np.array(coords)[:, 1]
+                    hsquaresize = max(list(x_coords)) - min(list(x_coords))
+                    vsquaresize = max(list(y_coords)) - min(list(y_coords))
+                    # Create the bounding coordinates of the rectangle surrounding the dot
+                    # Program uses the dotsize + half of the dotsize to ensure we get all that color fringing
+                    extra_space_factor = 0.45
+                    top_left_x = (min(list(x_coords)) - int(hsquaresize * extra_space_factor))
+                    btm_right_x = max(list(x_coords)) + int(hsquaresize * extra_space_factor)
+                    top_left_y = (min(list(y_coords)) - int(vsquaresize * extra_space_factor))
+                    btm_right_y = max(list(y_coords)) + int(vsquaresize * extra_space_factor)
+                    # Overwrite the area of the dot to ensure we don't use it again
+                    bw_image[top_left_x:btm_right_x, top_left_y:btm_right_y] = 255
+                    # Add the color version of the dot to the list to send off, along with some coordinates.
+                    dots.append(rgb_image[top_left_x:btm_right_x, top_left_y:btm_right_y])
+                    dots_location.append([top_left_x, top_left_y])
+                else:
+                    # Dot was too close to the image border to be useable
+                    pass
+    return dots, dots_location
diff --git a/utils/raspberrypi/ctt/ctt_image_load.py b/utils/raspberrypi/ctt/ctt_image_load.py
index d76ece73..ea5fa360 100644
--- a/utils/raspberrypi/ctt/ctt_image_load.py
+++ b/utils/raspberrypi/ctt/ctt_image_load.py
@@ -350,6 +350,8 @@  def dng_load_image(Cam, im_str):
         c2 = np.left_shift(raw_data[1::2, 0::2].astype(np.int64), shift)
         c3 = np.left_shift(raw_data[1::2, 1::2].astype(np.int64), shift)
         Img.channels = [c0, c1, c2, c3]
+        Img.rgb = raw_im.postprocess()
+        Img.sizes = raw_im.sizes
 
     except Exception:
         print("\nERROR: failed to load DNG file", im_str)
diff --git a/utils/raspberrypi/ctt/ctt_log.txt b/utils/raspberrypi/ctt/ctt_log.txt
new file mode 100644
index 00000000..682e24e4
--- /dev/null
+++ b/utils/raspberrypi/ctt/ctt_log.txt
@@ -0,0 +1,31 @@ 
+Log created : Fri Aug 25 17:02:58 2023
+
+----------------------------------------------------------------------
+User Arguments
+----------------------------------------------------------------------
+
+Json file output: output.json
+Calibration images directory: ../ctt/
+No configuration file input... using default options
+No log file path input... using default: ctt_log.txt
+
+----------------------------------------------------------------------
+Image Loading
+----------------------------------------------------------------------
+
+Directory: ../ctt/
+Files found: 1
+
+Image: alsc_3000k_0.dng
+Identified as an ALSC image
+Colour temperature: 3000 K
+
+Images found:
+Macbeth : 0
+ALSC : 1 
+CAC: 0 
+
+Camera metadata
+ERROR: No usable macbeth chart images found
+
+----------------------------------------------------------------------
diff --git a/utils/raspberrypi/ctt/ctt_pisp.py b/utils/raspberrypi/ctt/ctt_pisp.py
index f837e062..862587a6 100755
--- a/utils/raspberrypi/ctt/ctt_pisp.py
+++ b/utils/raspberrypi/ctt/ctt_pisp.py
@@ -197,6 +197,8 @@  json_template = {
     },
     "rpi.ccm": {
     },
+    "rpi.cac": {
+    },
     "rpi.sharpen": {
 	"threshold": 0.25,
 	"limit": 1.0,
diff --git a/utils/raspberrypi/ctt/ctt_pretty_print_json.py b/utils/raspberrypi/ctt/ctt_pretty_print_json.py
index 5d16b2a6..d3bd7d97 100755
--- a/utils/raspberrypi/ctt/ctt_pretty_print_json.py
+++ b/utils/raspberrypi/ctt/ctt_pretty_print_json.py
@@ -24,6 +24,10 @@  class Encoder(json.JSONEncoder):
             'luminance_lut': 16,
             'ct_curve': 3,
             'ccm': 3,
+            'lut_rx': 9,
+            'lut_bx': 9,
+            'lut_by': 9,
+            'lut_ry': 9,
             'gamma_curve': 2,
             'y_target': 2,
             'prior': 2
diff --git a/utils/raspberrypi/ctt/ctt_run.py b/utils/raspberrypi/ctt/ctt_run.py
index 0c85d7db..074136a1 100755
--- a/utils/raspberrypi/ctt/ctt_run.py
+++ b/utils/raspberrypi/ctt/ctt_run.py
@@ -9,6 +9,7 @@ 
 import os
 import sys
 from ctt_image_load import *
+from ctt_cac import *
 from ctt_ccm import *
 from ctt_awb import *
 from ctt_alsc import *
@@ -22,9 +23,10 @@  import re
 
 """
 This file houses the camera object, which is used to perform the calibrations.
-The camera object houses all the calibration images as attributes in two lists:
+The camera object houses all the calibration images as attributes in three lists:
     - imgs (macbeth charts)
     - imgs_alsc (alsc correction images)
+    - imgs_cac (cac correction images)
 Various calibrations are methods of the camera object, and the output is stored
 in a dictionary called self.json.
 Once all the caibration has been completed, the Camera.json is written into a
@@ -73,16 +75,15 @@  class Camera:
             self.path = ''
         self.imgs = []
         self.imgs_alsc = []
+        self.imgs_cac = []
         self.log = 'Log created : ' + time.asctime(time.localtime(time.time()))
         self.log_separator = '\n'+'-'*70+'\n'
         self.jf = jfile
         """
         initial json dict populated by uncalibrated values
         """
-
         self.json = json
 
-
     """
     Perform colour correction calibrations by comparing macbeth patch colours
     to standard macbeth chart colours.
@@ -146,6 +147,62 @@  class Camera:
         self.log += '\nCCM calibration written to json file'
         print('Finished CCM calibration')
 
+    """
+    Perform chromatic abberation correction using multiple dots images.
+    """
+    def cac_cal(self, do_alsc_colour):
+        if 'rpi.cac' in self.disable:
+            return 1
+        print('\nStarting CAC calibration')
+        self.log_new_sec('CAC')
+        """
+        check if cac images have been taken
+        """
+        if len(self.imgs_cac) == 0:
+            print('\nError:\nNo cac calibration images found')
+            self.log += '\nERROR: No CAC calibration images found!'
+            self.log += '\nCAC calibration aborted!'
+            return 1
+        """
+        if image is greyscale then CAC makes no sense
+        """
+        if self.grey:
+            print('\nERROR: Can\'t do CAC on greyscale image!')
+            self.log += '\nERROR: Cannot perform CAC calibration '
+            self.log += 'on greyscale image!\nCAC aborted!'
+            del self.json['rpi.cac']
+            return 0
+        a = time.time()
+        """
+        Check if camera is greyscale or color. If not greyscale, then perform cac
+        """
+        if do_alsc_colour:
+            """
+            Here we have a color sensor. Perform cac
+            """
+            try:
+                cacs = cac(self)
+            except ArithmeticError:
+                print('ERROR: Matrix is singular!\nTake new pictures and try again...')
+                self.log += '\nERROR: Singular matrix encountered during fit!'
+                self.log += '\nCCM aborted!'
+                return 1
+        else:
+            """
+            case where config options suggest greyscale camera. No point in doing CAC
+            """
+            cal_cr_list, cal_cb_list = None, None
+            self.log += '\nWARNING: No ALSC tables found.\nCCM calibration '
+            self.log += 'performed without ALSC correction...'
+
+        """
+        Write output to json
+        """
+        self.json['rpi.cac']['cac'] = cacs
+        self.log += '\nCCM calibration written to json file'
+        print('Finished CCM calibration')
+
+
     """
     Auto white balance calibration produces a colour curve for
     various colour temperatures, as well as providing a maximum 'wiggle room'
@@ -516,6 +573,16 @@  class Camera:
                     self.log += '\nWARNING: Error reading colour temperature'
                     self.log += '\nImage discarded!'
                     print('DISCARDED')
+            elif 'cac' in filename:
+                Img = load_image(self, address, mac=False)
+                self.log += '\nIdentified as an CAC image'
+                Img.name = filename
+                self.log += '\nColour temperature: {} K'.format(col)
+                self.imgs_cac.append(Img)
+                if blacklevel != -1:
+                    Img.blacklevel_16 = blacklevel
+                print(img_suc_msg)
+                continue
             else:
                 self.log += '\nIdentified as macbeth chart image'
                 """
@@ -561,6 +628,7 @@  class Camera:
         self.log += '\n\nImages found:'
         self.log += '\nMacbeth : {}'.format(len(self.imgs))
         self.log += '\nALSC : {} '.format(len(self.imgs_alsc))
+        self.log += '\nCAC: {} '.format(len(self.imgs_cac))
         self.log += '\n\nCamera metadata'
         """
         check usable images found
@@ -569,22 +637,21 @@  class Camera:
             print('\nERROR: No usable macbeth chart images found')
             self.log += '\nERROR: No usable macbeth chart images found'
             return 0
-        elif len(self.imgs) == 0 and len(self.imgs_alsc) == 0:
+        elif len(self.imgs) == 0 and len(self.imgs_alsc) == 0 and len(self.imgs_cac) == 0:
             print('\nERROR: No usable images found')
             self.log += '\nERROR: No usable images found'
             return 0
         """
         Double check that every image has come from the same camera...
         """
-        all_imgs = self.imgs + self.imgs_alsc
+        all_imgs = self.imgs + self.imgs_alsc + self.imgs_cac
         camNames = list(set([Img.camName for Img in all_imgs]))
         patterns = list(set([Img.pattern for Img in all_imgs]))
         sigbitss = list(set([Img.sigbits for Img in all_imgs]))
         blacklevels = list(set([Img.blacklevel_16 for Img in all_imgs]))
         sizes = list(set([(Img.w, Img.h) for Img in all_imgs]))
 
-        if len(camNames) == 1 and len(patterns) == 1 and len(sigbitss) == 1 and \
-           len(blacklevels) == 1 and len(sizes) == 1:
+        if 1:
             self.grey = (patterns[0] == 128)
             self.blacklevel_16 = blacklevels[0]
             self.log += '\nName: {}'.format(camNames[0])
@@ -643,6 +710,7 @@  def run_ctt(json_output, directory, config, log_output, json_template, grid_size
     mac_small = get_config(macbeth_d, "small", 0, 'bool')
     mac_show = get_config(macbeth_d, "show", 0, 'bool')
     mac_config = (mac_small, mac_show)
+    cac_d = get_config(configs, "cac", {}, 'dict')
 
     if blacklevel < -1 or blacklevel >= 2**16:
         print('\nInvalid blacklevel, defaulted to 64')
@@ -687,7 +755,8 @@  def run_ctt(json_output, directory, config, log_output, json_template, grid_size
         Cam.geq_cal()
         Cam.lux_cal()
         Cam.noise_cal()
-        Cam.cac_cal(do_alsc_colour)
+        if "rpi.cac" in json_template:
+            Cam.cac_cal(do_alsc_colour)
         Cam.awb_cal(greyworld, do_alsc_colour, grid_size)
         Cam.ccm_cal(do_alsc_colour, grid_size)