[libcamera-devel,3/3] qcam: viewfinder_gl: Take color space into account for YUV rendering
diff mbox series

Message ID 20220829100414.28404-4-laurent.pinchart@ideasonboard.com
State Accepted
Headers show
Series
  • Misc color space plumbing improvements
Related show

Commit Message

Laurent Pinchart Aug. 29, 2022, 10:04 a.m. UTC
Update the YUV shaders and the viewfinder_gl to correctly take the
Y'CbCr encoding and the quantization range into account when rendering
YUV formats to RGB. Support for the primaries and transfer function will
be added in a subsequent step.

Signed-off-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
---
 src/qcam/assets/shader/YUV_2_planes.frag | 27 ++++----
 src/qcam/assets/shader/YUV_3_planes.frag | 23 ++++---
 src/qcam/assets/shader/YUV_packed.frag   | 17 ++---
 src/qcam/viewfinder_gl.cpp               | 79 +++++++++++++++++++++++-
 src/qcam/viewfinder_gl.h                 |  2 +
 5 files changed, 115 insertions(+), 33 deletions(-)

Comments

Umang Jain Aug. 30, 2022, 2:13 p.m. UTC | #1
Hi Laurent,

On 8/29/22 3:34 PM, Laurent Pinchart via libcamera-devel wrote:
> Update the YUV shaders and the viewfinder_gl to correctly take the
> Y'CbCr encoding and the quantization range into account when rendering
> YUV formats to RGB. Support for the primaries and transfer function will
> be added in a subsequent step.
>
> Signed-off-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>

Patch looks good and straight forward for most parts, however few 
specifics are still a bit unclear to me
> ---
>   src/qcam/assets/shader/YUV_2_planes.frag | 27 ++++----
>   src/qcam/assets/shader/YUV_3_planes.frag | 23 ++++---
>   src/qcam/assets/shader/YUV_packed.frag   | 17 ++---
>   src/qcam/viewfinder_gl.cpp               | 79 +++++++++++++++++++++++-
>   src/qcam/viewfinder_gl.h                 |  2 +
>   5 files changed, 115 insertions(+), 33 deletions(-)
>
> diff --git a/src/qcam/assets/shader/YUV_2_planes.frag b/src/qcam/assets/shader/YUV_2_planes.frag
> index 254463c05cac..da8dbcc5f801 100644
> --- a/src/qcam/assets/shader/YUV_2_planes.frag
> +++ b/src/qcam/assets/shader/YUV_2_planes.frag
> @@ -13,27 +13,30 @@ varying vec2 textureOut;
>   uniform sampler2D tex_y;
>   uniform sampler2D tex_u;
>   
> +const mat3 yuv2rgb_matrix = mat3(
> +	YUV2RGB_MATRIX
> +);
> +
> +const vec3 yuv2rgb_offset = vec3(
> +	YUV2RGB_Y_OFFSET / 255.0, 128.0 / 255.0, 128.0 / 255.0

I understood the YUV2RGB_Y_OFFSET #define but don't understand where 
other values come from (or why they exist :D)

Maybe I should start learning shaders programming ;-)

Reviewed-by: Umang Jain <umang.jain@ideasonboard.com>
> +);
> +
>   void main(void)
>   {
>   	vec3 yuv;
> -	vec3 rgb;
> -	mat3 yuv2rgb_bt601_mat = mat3(
> -		vec3(1.164,  1.164, 1.164),
> -		vec3(0.000, -0.392, 2.017),
> -		vec3(1.596, -0.813, 0.000)
> -	);
>   
> -	yuv.x = texture2D(tex_y, textureOut).r - 0.063;
> +	yuv.x = texture2D(tex_y, textureOut).r;
>   #if defined(YUV_PATTERN_UV)
> -	yuv.y = texture2D(tex_u, textureOut).r - 0.500;
> -	yuv.z = texture2D(tex_u, textureOut).a - 0.500;
> +	yuv.y = texture2D(tex_u, textureOut).r;
> +	yuv.z = texture2D(tex_u, textureOut).a;
>   #elif defined(YUV_PATTERN_VU)
> -	yuv.y = texture2D(tex_u, textureOut).a - 0.500;
> -	yuv.z = texture2D(tex_u, textureOut).r - 0.500;
> +	yuv.y = texture2D(tex_u, textureOut).a;
> +	yuv.z = texture2D(tex_u, textureOut).r;
>   #else
>   #error Invalid pattern
>   #endif
>   
> -	rgb = yuv2rgb_bt601_mat * yuv;
> +	vec3 rgb = yuv2rgb_matrix * (vec3(y, uv) - yuv2rgb_offset);
> +
>   	gl_FragColor = vec4(rgb, 1.0);
>   }
> diff --git a/src/qcam/assets/shader/YUV_3_planes.frag b/src/qcam/assets/shader/YUV_3_planes.frag
> index 2be74b5d2a9d..e754129d74d1 100644
> --- a/src/qcam/assets/shader/YUV_3_planes.frag
> +++ b/src/qcam/assets/shader/YUV_3_planes.frag
> @@ -14,20 +14,23 @@ uniform sampler2D tex_y;
>   uniform sampler2D tex_u;
>   uniform sampler2D tex_v;
>   
> +const mat3 yuv2rgb_matrix = mat3(
> +	YUV2RGB_MATRIX
> +);
> +
> +const vec3 yuv2rgb_offset = vec3(
> +	YUV2RGB_Y_OFFSET / 255.0, 128.0 / 255.0, 128.0 / 255.0
> +);
> +
>   void main(void)
>   {
>   	vec3 yuv;
> -	vec3 rgb;
> -	mat3 yuv2rgb_bt601_mat = mat3(
> -		vec3(1.164,  1.164, 1.164),
> -		vec3(0.000, -0.392, 2.017),
> -		vec3(1.596, -0.813, 0.000)
> -	);
>   
> -	yuv.x = texture2D(tex_y, textureOut).r - 0.063;
> -	yuv.y = texture2D(tex_u, textureOut).r - 0.500;
> -	yuv.z = texture2D(tex_v, textureOut).r - 0.500;
> +	yuv.x = texture2D(tex_y, textureOut).r;
> +	yuv.y = texture2D(tex_u, textureOut).r;
> +	yuv.z = texture2D(tex_v, textureOut).r;
> +
> +	vec3 rgb = yuv2rgb_matrix * (vec3(y, uv) - yuv2rgb_offset);
>   
> -	rgb = yuv2rgb_bt601_mat * yuv;
>   	gl_FragColor = vec4(rgb, 1.0);
>   }
> diff --git a/src/qcam/assets/shader/YUV_packed.frag b/src/qcam/assets/shader/YUV_packed.frag
> index d6efd4ce92a9..b9ef9d41beae 100644
> --- a/src/qcam/assets/shader/YUV_packed.frag
> +++ b/src/qcam/assets/shader/YUV_packed.frag
> @@ -14,15 +14,16 @@ varying vec2 textureOut;
>   uniform sampler2D tex_y;
>   uniform vec2 tex_step;
>   
> +const mat3 yuv2rgb_matrix = mat3(
> +	YUV2RGB_MATRIX
> +);
> +
> +const vec3 yuv2rgb_offset = vec3(
> +	YUV2RGB_Y_OFFSET / 255.0, 128.0 / 255.0, 128.0 / 255.0
> +);
> +
>   void main(void)
>   {
> -	mat3 yuv2rgb_bt601_mat = mat3(
> -		vec3(1.164,  1.164, 1.164),
> -		vec3(0.000, -0.392, 2.017),
> -		vec3(1.596, -0.813, 0.000)
> -	);
> -	vec3 yuv2rgb_bt601_offset = vec3(0.063, 0.500, 0.500);
> -
>   	/*
>   	 * The sampler won't interpolate the texture correctly along the X axis,
>   	 * as each RGBA pixel effectively stores two pixels. We thus need to
> @@ -76,7 +77,7 @@ void main(void)
>   
>   	float y = mix(y_left, y_right, step(0.5, f_x));
>   
> -	vec3 rgb = yuv2rgb_bt601_mat * (vec3(y, uv) - yuv2rgb_bt601_offset);
> +	vec3 rgb = yuv2rgb_matrix * (vec3(y, uv) - yuv2rgb_offset);
>   
>   	gl_FragColor = vec4(rgb, 1.0);
>   }
> diff --git a/src/qcam/viewfinder_gl.cpp b/src/qcam/viewfinder_gl.cpp
> index ec295b6de0dd..e2aa24703ff0 100644
> --- a/src/qcam/viewfinder_gl.cpp
> +++ b/src/qcam/viewfinder_gl.cpp
> @@ -7,9 +7,12 @@
>   
>   #include "viewfinder_gl.h"
>   
> +#include <array>
> +
>   #include <QByteArray>
>   #include <QFile>
>   #include <QImage>
> +#include <QStringList>
>   
>   #include <libcamera/formats.h>
>   
> @@ -56,7 +59,8 @@ static const QList<libcamera::PixelFormat> supportedFormats{
>   };
>   
>   ViewFinderGL::ViewFinderGL(QWidget *parent)
> -	: QOpenGLWidget(parent), buffer_(nullptr), image_(nullptr),
> +	: QOpenGLWidget(parent), buffer_(nullptr),
> +	  colorSpace_(libcamera::ColorSpace::Raw), image_(nullptr),
>   	  vertexBuffer_(QOpenGLBuffer::VertexBuffer)
>   {
>   }
> @@ -72,10 +76,10 @@ const QList<libcamera::PixelFormat> &ViewFinderGL::nativeFormats() const
>   }
>   
>   int ViewFinderGL::setFormat(const libcamera::PixelFormat &format, const QSize &size,
> -			    [[maybe_unused]] const libcamera::ColorSpace &colorSpace,
> +			    const libcamera::ColorSpace &colorSpace,
>   			    unsigned int stride)
>   {
> -	if (format != format_) {
> +	if (format != format_ || colorSpace != colorSpace_) {
>   		/*
>   		 * If the fragment already exists, remove it and create a new
>   		 * one for the new format.
> @@ -89,7 +93,10 @@ int ViewFinderGL::setFormat(const libcamera::PixelFormat &format, const QSize &s
>   		if (!selectFormat(format))
>   			return -1;
>   
> +		selectColorSpace(colorSpace);
> +
>   		format_ = format;
> +		colorSpace_ = colorSpace;
>   	}
>   
>   	size_ = size;
> @@ -318,6 +325,72 @@ bool ViewFinderGL::selectFormat(const libcamera::PixelFormat &format)
>   	return ret;
>   }
>   
> +void ViewFinderGL::selectColorSpace(const libcamera::ColorSpace &colorSpace)
> +{
> +	std::array<double, 9> yuv2rgb;
> +
> +	/* OpenGL stores arrays in column-major order. */
> +	switch (colorSpace.ycbcrEncoding) {
> +	case libcamera::ColorSpace::YcbcrEncoding::None:
> +		yuv2rgb = {
> +			1.0000,  0.0000,  0.0000,
> +			0.0000,  1.0000,  0.0000,
> +			0.0000,  0.0000,  1.0000,
> +		};
> +		break;
> +
> +	case libcamera::ColorSpace::YcbcrEncoding::Rec601:
> +		yuv2rgb = {
> +			1.0000,  1.0000,  1.0000,
> +			0.0000, -0.3441,  1.7720,
> +			1.4020, -0.7141,  0.0000,
> +		};
> +		break;
> +
> +	case libcamera::ColorSpace::YcbcrEncoding::Rec709:
> +		yuv2rgb = {
> +			1.0000,  1.0000,  1.0000,
> +			0.0000, -0.1873,  1.8856,
> +			1.5748, -0.4681,  0.0000,
> +		};
> +		break;
> +
> +	case libcamera::ColorSpace::YcbcrEncoding::Rec2020:
> +		yuv2rgb = {
> +			1.0000,  1.0000,  1.0000,
> +			0.0000, -0.1646,  1.8814,
> +			1.4746, -0.5714,  0.0000,
> +		};
> +		break;
> +	}
> +
> +	double offset;
> +
> +	switch (colorSpace.range) {
> +	case libcamera::ColorSpace::Range::Full:
> +		offset = 0.0;
> +		break;
> +
> +	case libcamera::ColorSpace::Range::Limited:
> +		offset = 16.0;
> +
> +		for (unsigned int i = 0; i < 3; ++i)
> +			yuv2rgb[i] *= 255.0 / 219.0;
> +		for (unsigned int i = 4; i < 9; ++i)
> +			yuv2rgb[i] *= 255.0 / 224.0;
> +		break;
> +	}
> +
> +	QStringList matrix;
> +
> +	for (double coeff : yuv2rgb)
> +		matrix.append(QString::number(coeff, 'f'));
> +
> +	fragmentShaderDefines_.append("#define YUV2RGB_MATRIX " + matrix.join(", "));
> +	fragmentShaderDefines_.append(QString("#define YUV2RGB_Y_OFFSET %1")
> +		.arg(offset, 0, 'f', 1));
> +}
> +
>   bool ViewFinderGL::createVertexShader()
>   {
>   	/* Create Vertex Shader */
> diff --git a/src/qcam/viewfinder_gl.h b/src/qcam/viewfinder_gl.h
> index 798830a31cd2..68c2912df12f 100644
> --- a/src/qcam/viewfinder_gl.h
> +++ b/src/qcam/viewfinder_gl.h
> @@ -57,6 +57,7 @@ protected:
>   
>   private:
>   	bool selectFormat(const libcamera::PixelFormat &format);
> +	void selectColorSpace(const libcamera::ColorSpace &colorSpace);
>   
>   	void configureTexture(QOpenGLTexture &texture);
>   	bool createFragmentShader();
> @@ -67,6 +68,7 @@ private:
>   	/* Captured image size, format and buffer */
>   	libcamera::FrameBuffer *buffer_;
>   	libcamera::PixelFormat format_;
> +	libcamera::ColorSpace colorSpace_;
>   	QSize size_;
>   	unsigned int stride_;
>   	Image *image_;
Laurent Pinchart Aug. 30, 2022, 5:27 p.m. UTC | #2
Hi Umang,

On Tue, Aug 30, 2022 at 07:43:12PM +0530, Umang Jain wrote:
> On 8/29/22 3:34 PM, Laurent Pinchart via libcamera-devel wrote:
> > Update the YUV shaders and the viewfinder_gl to correctly take the
> > Y'CbCr encoding and the quantization range into account when rendering
> > YUV formats to RGB. Support for the primaries and transfer function will
> > be added in a subsequent step.
> >
> > Signed-off-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
> 
> Patch looks good and straight forward for most parts, however few 
> specifics are still a bit unclear to me
> 
> > ---
> >   src/qcam/assets/shader/YUV_2_planes.frag | 27 ++++----
> >   src/qcam/assets/shader/YUV_3_planes.frag | 23 ++++---
> >   src/qcam/assets/shader/YUV_packed.frag   | 17 ++---
> >   src/qcam/viewfinder_gl.cpp               | 79 +++++++++++++++++++++++-
> >   src/qcam/viewfinder_gl.h                 |  2 +
> >   5 files changed, 115 insertions(+), 33 deletions(-)
> >
> > diff --git a/src/qcam/assets/shader/YUV_2_planes.frag b/src/qcam/assets/shader/YUV_2_planes.frag
> > index 254463c05cac..da8dbcc5f801 100644
> > --- a/src/qcam/assets/shader/YUV_2_planes.frag
> > +++ b/src/qcam/assets/shader/YUV_2_planes.frag
> > @@ -13,27 +13,30 @@ varying vec2 textureOut;
> >   uniform sampler2D tex_y;
> >   uniform sampler2D tex_u;
> >   
> > +const mat3 yuv2rgb_matrix = mat3(
> > +	YUV2RGB_MATRIX
> > +);
> > +
> > +const vec3 yuv2rgb_offset = vec3(
> > +	YUV2RGB_Y_OFFSET / 255.0, 128.0 / 255.0, 128.0 / 255.0
> 
> I understood the YUV2RGB_Y_OFFSET #define but don't understand where 
> other values come from (or why they exist :D)

The quantization of the Cb and Cr values in all relevant color spaces
(ITU-R BT.601, BT.709, BT.2020, ...) add an offset of 128 (for 8-bit
values). For instance, in BT.709, we have

D'Cb = INT[(224*E'Cb + 128)*2^(n-8)]

where D'Cb is the Cb signal after quantization, E'Cb the Cb signal
before quantization (in the [-0.5, 0.5] range), and n the number of
bits). INT[] denotes rounding to the closest integer.

The 224 multiplier creates a limited quantization range, following the
above formula, -0.5 will be quantized to INT[224 * -0.5 + 128] = 16, and
0.5 to INT[224 * 0.5 + 128] = 240. The values are then stored as 8-bit
unsigned integers in memory.

For full range quantization, the same applies, with a multiplier equal
to 255 instead of 224. [-0.5, 0.5] is thus mapped to [0, 255].

We need to apply the reverse quantization on D'Y, D'Cb and D'Cr in order
to get the original E'Y, E'Cb and E'Cr values (in the [0.0, 1.0] and
[-0.5, 0.5] ranges respectively for E'Y and E'C[br]. Starting with full
range, given

D'Cb = INT[(255*E'Cb + 128)] (for 8-bit data)

the inverse is given by

E'Cb = (D'Cb - 128) / 255

or

E'Cb = D'Cb / 255 - 128 / 255

OpenGL, when reading texture data through a floating point texture
sampler (which we do in the shader by calling texture2D on a sampler2D
variable), normalizes the values stored in memory ([0, 255]) to the
[0.0, 1.0] range. This means that the D'Cb value is already divided by
255 by the GPU. We only need to subtract 128 / 255 to get the original
E'Cb value.

In the limited quantization range case, we have

D'Cb = INT[(225*E'Cb + 128)] (for 8-bit data)

the inverse is given by

E'Cb = (D'Cb - 128) / 224

Let's introduce the 255 factor:

E'Cb = (D'Cb - 128) / 255 * 255 / 224

which can also be written as

E'Cb = (D'Cb / 255 - 128 / 255) * 255 / 224

We thus have

E'Cb(lim) = E'Cb(full) * 255 / 224

The shader doesn't include the 255 / 224 multiplier directly, it gets
included by the C++ code in the yuv2rgb matrix, and there's no need for
a different offset between the limited and full range quantization.

I hope this helps clarifying the implementation.

> Maybe I should start learning shaders programming ;-)
> 
> Reviewed-by: Umang Jain <umang.jain@ideasonboard.com>
> 
> > +);
> > +
> >   void main(void)
> >   {
> >   	vec3 yuv;
> > -	vec3 rgb;
> > -	mat3 yuv2rgb_bt601_mat = mat3(
> > -		vec3(1.164,  1.164, 1.164),
> > -		vec3(0.000, -0.392, 2.017),
> > -		vec3(1.596, -0.813, 0.000)
> > -	);
> >   
> > -	yuv.x = texture2D(tex_y, textureOut).r - 0.063;
> > +	yuv.x = texture2D(tex_y, textureOut).r;
> >   #if defined(YUV_PATTERN_UV)
> > -	yuv.y = texture2D(tex_u, textureOut).r - 0.500;
> > -	yuv.z = texture2D(tex_u, textureOut).a - 0.500;
> > +	yuv.y = texture2D(tex_u, textureOut).r;
> > +	yuv.z = texture2D(tex_u, textureOut).a;
> >   #elif defined(YUV_PATTERN_VU)
> > -	yuv.y = texture2D(tex_u, textureOut).a - 0.500;
> > -	yuv.z = texture2D(tex_u, textureOut).r - 0.500;
> > +	yuv.y = texture2D(tex_u, textureOut).a;
> > +	yuv.z = texture2D(tex_u, textureOut).r;
> >   #else
> >   #error Invalid pattern
> >   #endif
> >   
> > -	rgb = yuv2rgb_bt601_mat * yuv;
> > +	vec3 rgb = yuv2rgb_matrix * (vec3(y, uv) - yuv2rgb_offset);
> > +
> >   	gl_FragColor = vec4(rgb, 1.0);
> >   }
> > diff --git a/src/qcam/assets/shader/YUV_3_planes.frag b/src/qcam/assets/shader/YUV_3_planes.frag
> > index 2be74b5d2a9d..e754129d74d1 100644
> > --- a/src/qcam/assets/shader/YUV_3_planes.frag
> > +++ b/src/qcam/assets/shader/YUV_3_planes.frag
> > @@ -14,20 +14,23 @@ uniform sampler2D tex_y;
> >   uniform sampler2D tex_u;
> >   uniform sampler2D tex_v;
> >   
> > +const mat3 yuv2rgb_matrix = mat3(
> > +	YUV2RGB_MATRIX
> > +);
> > +
> > +const vec3 yuv2rgb_offset = vec3(
> > +	YUV2RGB_Y_OFFSET / 255.0, 128.0 / 255.0, 128.0 / 255.0
> > +);
> > +
> >   void main(void)
> >   {
> >   	vec3 yuv;
> > -	vec3 rgb;
> > -	mat3 yuv2rgb_bt601_mat = mat3(
> > -		vec3(1.164,  1.164, 1.164),
> > -		vec3(0.000, -0.392, 2.017),
> > -		vec3(1.596, -0.813, 0.000)
> > -	);
> >   
> > -	yuv.x = texture2D(tex_y, textureOut).r - 0.063;
> > -	yuv.y = texture2D(tex_u, textureOut).r - 0.500;
> > -	yuv.z = texture2D(tex_v, textureOut).r - 0.500;
> > +	yuv.x = texture2D(tex_y, textureOut).r;
> > +	yuv.y = texture2D(tex_u, textureOut).r;
> > +	yuv.z = texture2D(tex_v, textureOut).r;
> > +
> > +	vec3 rgb = yuv2rgb_matrix * (vec3(y, uv) - yuv2rgb_offset);
> >   
> > -	rgb = yuv2rgb_bt601_mat * yuv;
> >   	gl_FragColor = vec4(rgb, 1.0);
> >   }
> > diff --git a/src/qcam/assets/shader/YUV_packed.frag b/src/qcam/assets/shader/YUV_packed.frag
> > index d6efd4ce92a9..b9ef9d41beae 100644
> > --- a/src/qcam/assets/shader/YUV_packed.frag
> > +++ b/src/qcam/assets/shader/YUV_packed.frag
> > @@ -14,15 +14,16 @@ varying vec2 textureOut;
> >   uniform sampler2D tex_y;
> >   uniform vec2 tex_step;
> >   
> > +const mat3 yuv2rgb_matrix = mat3(
> > +	YUV2RGB_MATRIX
> > +);
> > +
> > +const vec3 yuv2rgb_offset = vec3(
> > +	YUV2RGB_Y_OFFSET / 255.0, 128.0 / 255.0, 128.0 / 255.0
> > +);
> > +
> >   void main(void)
> >   {
> > -	mat3 yuv2rgb_bt601_mat = mat3(
> > -		vec3(1.164,  1.164, 1.164),
> > -		vec3(0.000, -0.392, 2.017),
> > -		vec3(1.596, -0.813, 0.000)
> > -	);
> > -	vec3 yuv2rgb_bt601_offset = vec3(0.063, 0.500, 0.500);
> > -
> >   	/*
> >   	 * The sampler won't interpolate the texture correctly along the X axis,
> >   	 * as each RGBA pixel effectively stores two pixels. We thus need to
> > @@ -76,7 +77,7 @@ void main(void)
> >   
> >   	float y = mix(y_left, y_right, step(0.5, f_x));
> >   
> > -	vec3 rgb = yuv2rgb_bt601_mat * (vec3(y, uv) - yuv2rgb_bt601_offset);
> > +	vec3 rgb = yuv2rgb_matrix * (vec3(y, uv) - yuv2rgb_offset);
> >   
> >   	gl_FragColor = vec4(rgb, 1.0);
> >   }
> > diff --git a/src/qcam/viewfinder_gl.cpp b/src/qcam/viewfinder_gl.cpp
> > index ec295b6de0dd..e2aa24703ff0 100644
> > --- a/src/qcam/viewfinder_gl.cpp
> > +++ b/src/qcam/viewfinder_gl.cpp
> > @@ -7,9 +7,12 @@
> >   
> >   #include "viewfinder_gl.h"
> >   
> > +#include <array>
> > +
> >   #include <QByteArray>
> >   #include <QFile>
> >   #include <QImage>
> > +#include <QStringList>
> >   
> >   #include <libcamera/formats.h>
> >   
> > @@ -56,7 +59,8 @@ static const QList<libcamera::PixelFormat> supportedFormats{
> >   };
> >   
> >   ViewFinderGL::ViewFinderGL(QWidget *parent)
> > -	: QOpenGLWidget(parent), buffer_(nullptr), image_(nullptr),
> > +	: QOpenGLWidget(parent), buffer_(nullptr),
> > +	  colorSpace_(libcamera::ColorSpace::Raw), image_(nullptr),
> >   	  vertexBuffer_(QOpenGLBuffer::VertexBuffer)
> >   {
> >   }
> > @@ -72,10 +76,10 @@ const QList<libcamera::PixelFormat> &ViewFinderGL::nativeFormats() const
> >   }
> >   
> >   int ViewFinderGL::setFormat(const libcamera::PixelFormat &format, const QSize &size,
> > -			    [[maybe_unused]] const libcamera::ColorSpace &colorSpace,
> > +			    const libcamera::ColorSpace &colorSpace,
> >   			    unsigned int stride)
> >   {
> > -	if (format != format_) {
> > +	if (format != format_ || colorSpace != colorSpace_) {
> >   		/*
> >   		 * If the fragment already exists, remove it and create a new
> >   		 * one for the new format.
> > @@ -89,7 +93,10 @@ int ViewFinderGL::setFormat(const libcamera::PixelFormat &format, const QSize &s
> >   		if (!selectFormat(format))
> >   			return -1;
> >   
> > +		selectColorSpace(colorSpace);
> > +
> >   		format_ = format;
> > +		colorSpace_ = colorSpace;
> >   	}
> >   
> >   	size_ = size;
> > @@ -318,6 +325,72 @@ bool ViewFinderGL::selectFormat(const libcamera::PixelFormat &format)
> >   	return ret;
> >   }
> >   
> > +void ViewFinderGL::selectColorSpace(const libcamera::ColorSpace &colorSpace)
> > +{
> > +	std::array<double, 9> yuv2rgb;
> > +
> > +	/* OpenGL stores arrays in column-major order. */
> > +	switch (colorSpace.ycbcrEncoding) {
> > +	case libcamera::ColorSpace::YcbcrEncoding::None:
> > +		yuv2rgb = {
> > +			1.0000,  0.0000,  0.0000,
> > +			0.0000,  1.0000,  0.0000,
> > +			0.0000,  0.0000,  1.0000,
> > +		};
> > +		break;
> > +
> > +	case libcamera::ColorSpace::YcbcrEncoding::Rec601:
> > +		yuv2rgb = {
> > +			1.0000,  1.0000,  1.0000,
> > +			0.0000, -0.3441,  1.7720,
> > +			1.4020, -0.7141,  0.0000,
> > +		};
> > +		break;
> > +
> > +	case libcamera::ColorSpace::YcbcrEncoding::Rec709:
> > +		yuv2rgb = {
> > +			1.0000,  1.0000,  1.0000,
> > +			0.0000, -0.1873,  1.8856,
> > +			1.5748, -0.4681,  0.0000,
> > +		};
> > +		break;
> > +
> > +	case libcamera::ColorSpace::YcbcrEncoding::Rec2020:
> > +		yuv2rgb = {
> > +			1.0000,  1.0000,  1.0000,
> > +			0.0000, -0.1646,  1.8814,
> > +			1.4746, -0.5714,  0.0000,
> > +		};
> > +		break;
> > +	}
> > +
> > +	double offset;
> > +
> > +	switch (colorSpace.range) {
> > +	case libcamera::ColorSpace::Range::Full:
> > +		offset = 0.0;
> > +		break;
> > +
> > +	case libcamera::ColorSpace::Range::Limited:
> > +		offset = 16.0;
> > +
> > +		for (unsigned int i = 0; i < 3; ++i)
> > +			yuv2rgb[i] *= 255.0 / 219.0;
> > +		for (unsigned int i = 4; i < 9; ++i)
> > +			yuv2rgb[i] *= 255.0 / 224.0;
> > +		break;
> > +	}
> > +
> > +	QStringList matrix;
> > +
> > +	for (double coeff : yuv2rgb)
> > +		matrix.append(QString::number(coeff, 'f'));
> > +
> > +	fragmentShaderDefines_.append("#define YUV2RGB_MATRIX " + matrix.join(", "));
> > +	fragmentShaderDefines_.append(QString("#define YUV2RGB_Y_OFFSET %1")
> > +		.arg(offset, 0, 'f', 1));
> > +}
> > +
> >   bool ViewFinderGL::createVertexShader()
> >   {
> >   	/* Create Vertex Shader */
> > diff --git a/src/qcam/viewfinder_gl.h b/src/qcam/viewfinder_gl.h
> > index 798830a31cd2..68c2912df12f 100644
> > --- a/src/qcam/viewfinder_gl.h
> > +++ b/src/qcam/viewfinder_gl.h
> > @@ -57,6 +57,7 @@ protected:
> >   
> >   private:
> >   	bool selectFormat(const libcamera::PixelFormat &format);
> > +	void selectColorSpace(const libcamera::ColorSpace &colorSpace);
> >   
> >   	void configureTexture(QOpenGLTexture &texture);
> >   	bool createFragmentShader();
> > @@ -67,6 +68,7 @@ private:
> >   	/* Captured image size, format and buffer */
> >   	libcamera::FrameBuffer *buffer_;
> >   	libcamera::PixelFormat format_;
> > +	libcamera::ColorSpace colorSpace_;
> >   	QSize size_;
> >   	unsigned int stride_;
> >   	Image *image_;
Kunal Agarwal Aug. 31, 2022, 10:05 a.m. UTC | #3
Hi Laurent and Umang,

> Hi Umang,
> The quantization of the Cb and Cr values in all relevant color spaces
> (ITU-R BT.601, BT.709, BT.2020, ...) add an offset of 128 (for 8-bit
> values). For instance, in BT.709, we have
>
> D'Cb = INT[(224*E'Cb + 128)*2^(n-8)]
>
> where D'Cb is the Cb signal after quantization, E'Cb the Cb signal
> before quantization (in the [-0.5, 0.5] range), and n the number of
> bits). INT[] denotes rounding to the closest integer.
>
> The 224 multiplier creates a limited quantization range, following the
> above formula, -0.5 will be quantized to INT[224 * -0.5 + 128] = 16, and
> 0.5 to INT[224 * 0.5 + 128] = 240. The values are then stored as 8-bit
> unsigned integers in memory.
>
> For full range quantization, the same applies, with a multiplier equal
> to 255 instead of 224. [-0.5, 0.5] is thus mapped to [0, 255].
>
> We need to apply the reverse quantization on D'Y, D'Cb and D'Cr in order
> to get the original E'Y, E'Cb and E'Cr values (in the [0.0, 1.0] and
> [-0.5, 0.5] ranges respectively for E'Y and E'C[br]. Starting with full
> range, given
>
> D'Cb = INT[(255*E'Cb + 128)] (for 8-bit data)
>
> the inverse is given by
>
> E'Cb = (D'Cb - 128) / 255
>
> or
>
> E'Cb = D'Cb / 255 - 128 / 255
>
> OpenGL, when reading texture data through a floating point texture
> sampler (which we do in the shader by calling texture2D on a sampler2D
> variable), normalizes the values stored in memory ([0, 255]) to the
> [0.0, 1.0] range. This means that the D'Cb value is already divided by
> 255 by the GPU. We only need to subtract 128 / 255 to get the original
> E'Cb value.
>
> In the limited quantization range case, we have
>
> D'Cb = INT[(225*E'Cb + 128)] (for 8-bit data)
>
> the inverse is given by
>
> E'Cb = (D'Cb - 128) / 224
>
> Let's introduce the 255 factor:
>
> E'Cb = (D'Cb - 128) / 255 * 255 / 224
>
> which can also be written as
>
> E'Cb = (D'Cb / 255 - 128 / 255) * 255 / 224
>
> We thus have
>
> E'Cb(lim) = E'Cb(full) * 255 / 224
>
> The shader doesn't include the 255 / 224 multiplier directly, it gets
> included by the C++ code in the yuv2rgb matrix, and there's no need for
> a different offset between the limited and full range quantization.
>
> I hope this helps clarifying the implementation.
> --
> Regards,
>
> Laurent Pinchart

I had gone through this conversion on multiple resources.
The implementation looks correct.

Reviewed-by: Kunal Agarwal <kunalagarwal1072002@gmail.com>

Regards,

Kunal Agarwal


On Tue, Aug 30, 2022 at 10:57 PM Laurent Pinchart via libcamera-devel <
libcamera-devel@lists.libcamera.org> wrote:

> Hi Umang,
>
> On Tue, Aug 30, 2022 at 07:43:12PM +0530, Umang Jain wrote:
> > On 8/29/22 3:34 PM, Laurent Pinchart via libcamera-devel wrote:
> > > Update the YUV shaders and the viewfinder_gl to correctly take the
> > > Y'CbCr encoding and the quantization range into account when rendering
> > > YUV formats to RGB. Support for the primaries and transfer function
> will
> > > be added in a subsequent step.
> > >
> > > Signed-off-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
> >
> > Patch looks good and straight forward for most parts, however few
> > specifics are still a bit unclear to me
> >
> > > ---
> > >   src/qcam/assets/shader/YUV_2_planes.frag | 27 ++++----
> > >   src/qcam/assets/shader/YUV_3_planes.frag | 23 ++++---
> > >   src/qcam/assets/shader/YUV_packed.frag   | 17 ++---
> > >   src/qcam/viewfinder_gl.cpp               | 79
> +++++++++++++++++++++++-
> > >   src/qcam/viewfinder_gl.h                 |  2 +
> > >   5 files changed, 115 insertions(+), 33 deletions(-)
> > >
> > > diff --git a/src/qcam/assets/shader/YUV_2_planes.frag
> b/src/qcam/assets/shader/YUV_2_planes.frag
> > > index 254463c05cac..da8dbcc5f801 100644
> > > --- a/src/qcam/assets/shader/YUV_2_planes.frag
> > > +++ b/src/qcam/assets/shader/YUV_2_planes.frag
> > > @@ -13,27 +13,30 @@ varying vec2 textureOut;
> > >   uniform sampler2D tex_y;
> > >   uniform sampler2D tex_u;
> > >
> > > +const mat3 yuv2rgb_matrix = mat3(
> > > +   YUV2RGB_MATRIX
> > > +);
> > > +
> > > +const vec3 yuv2rgb_offset = vec3(
> > > +   YUV2RGB_Y_OFFSET / 255.0, 128.0 / 255.0, 128.0 / 255.0
> >
> > I understood the YUV2RGB_Y_OFFSET #define but don't understand where
> > other values come from (or why they exist :D)
>
> The quantization of the Cb and Cr values in all relevant color spaces
> (ITU-R BT.601, BT.709, BT.2020, ...) add an offset of 128 (for 8-bit
> values). For instance, in BT.709, we have
>
> D'Cb = INT[(224*E'Cb + 128)*2^(n-8)]
>
> where D'Cb is the Cb signal after quantization, E'Cb the Cb signal
> before quantization (in the [-0.5, 0.5] range), and n the number of
> bits). INT[] denotes rounding to the closest integer.
>
> The 224 multiplier creates a limited quantization range, following the
> above formula, -0.5 will be quantized to INT[224 * -0.5 + 128] = 16, and
> 0.5 to INT[224 * 0.5 + 128] = 240. The values are then stored as 8-bit
> unsigned integers in memory.
>
> For full range quantization, the same applies, with a multiplier equal
> to 255 instead of 224. [-0.5, 0.5] is thus mapped to [0, 255].
>
> We need to apply the reverse quantization on D'Y, D'Cb and D'Cr in order
> to get the original E'Y, E'Cb and E'Cr values (in the [0.0, 1.0] and
> [-0.5, 0.5] ranges respectively for E'Y and E'C[br]. Starting with full
> range, given
>
> D'Cb = INT[(255*E'Cb + 128)] (for 8-bit data)
>
> the inverse is given by
>
> E'Cb = (D'Cb - 128) / 255
>
> or
>
> E'Cb = D'Cb / 255 - 128 / 255
>
> OpenGL, when reading texture data through a floating point texture
> sampler (which we do in the shader by calling texture2D on a sampler2D
> variable), normalizes the values stored in memory ([0, 255]) to the
> [0.0, 1.0] range. This means that the D'Cb value is already divided by
> 255 by the GPU. We only need to subtract 128 / 255 to get the original
> E'Cb value.
>
> In the limited quantization range case, we have
>
> D'Cb = INT[(225*E'Cb + 128)] (for 8-bit data)
>
> the inverse is given by
>
> E'Cb = (D'Cb - 128) / 224
>
> Let's introduce the 255 factor:
>
> E'Cb = (D'Cb - 128) / 255 * 255 / 224
>
> which can also be written as
>
> E'Cb = (D'Cb / 255 - 128 / 255) * 255 / 224
>
> We thus have
>
> E'Cb(lim) = E'Cb(full) * 255 / 224
>
> The shader doesn't include the 255 / 224 multiplier directly, it gets
> included by the C++ code in the yuv2rgb matrix, and there's no need for
> a different offset between the limited and full range quantization.
>
> I hope this helps clarifying the implementation.
>
> > Maybe I should start learning shaders programming ;-)
> >
> > Reviewed-by: Umang Jain <umang.jain@ideasonboard.com>
> >
> > > +);
> > > +
> > >   void main(void)
> > >   {
> > >     vec3 yuv;
> > > -   vec3 rgb;
> > > -   mat3 yuv2rgb_bt601_mat = mat3(
> > > -           vec3(1.164,  1.164, 1.164),
> > > -           vec3(0.000, -0.392, 2.017),
> > > -           vec3(1.596, -0.813, 0.000)
> > > -   );
> > >
> > > -   yuv.x = texture2D(tex_y, textureOut).r - 0.063;
> > > +   yuv.x = texture2D(tex_y, textureOut).r;
> > >   #if defined(YUV_PATTERN_UV)
> > > -   yuv.y = texture2D(tex_u, textureOut).r - 0.500;
> > > -   yuv.z = texture2D(tex_u, textureOut).a - 0.500;
> > > +   yuv.y = texture2D(tex_u, textureOut).r;
> > > +   yuv.z = texture2D(tex_u, textureOut).a;
> > >   #elif defined(YUV_PATTERN_VU)
> > > -   yuv.y = texture2D(tex_u, textureOut).a - 0.500;
> > > -   yuv.z = texture2D(tex_u, textureOut).r - 0.500;
> > > +   yuv.y = texture2D(tex_u, textureOut).a;
> > > +   yuv.z = texture2D(tex_u, textureOut).r;
> > >   #else
> > >   #error Invalid pattern
> > >   #endif
> > >
> > > -   rgb = yuv2rgb_bt601_mat * yuv;
> > > +   vec3 rgb = yuv2rgb_matrix * (vec3(y, uv) - yuv2rgb_offset);
> > > +
> > >     gl_FragColor = vec4(rgb, 1.0);
> > >   }
> > > diff --git a/src/qcam/assets/shader/YUV_3_planes.frag
> b/src/qcam/assets/shader/YUV_3_planes.frag
> > > index 2be74b5d2a9d..e754129d74d1 100644
> > > --- a/src/qcam/assets/shader/YUV_3_planes.frag
> > > +++ b/src/qcam/assets/shader/YUV_3_planes.frag
> > > @@ -14,20 +14,23 @@ uniform sampler2D tex_y;
> > >   uniform sampler2D tex_u;
> > >   uniform sampler2D tex_v;
> > >
> > > +const mat3 yuv2rgb_matrix = mat3(
> > > +   YUV2RGB_MATRIX
> > > +);
> > > +
> > > +const vec3 yuv2rgb_offset = vec3(
> > > +   YUV2RGB_Y_OFFSET / 255.0, 128.0 / 255.0, 128.0 / 255.0
> > > +);
> > > +
> > >   void main(void)
> > >   {
> > >     vec3 yuv;
> > > -   vec3 rgb;
> > > -   mat3 yuv2rgb_bt601_mat = mat3(
> > > -           vec3(1.164,  1.164, 1.164),
> > > -           vec3(0.000, -0.392, 2.017),
> > > -           vec3(1.596, -0.813, 0.000)
> > > -   );
> > >
> > > -   yuv.x = texture2D(tex_y, textureOut).r - 0.063;
> > > -   yuv.y = texture2D(tex_u, textureOut).r - 0.500;
> > > -   yuv.z = texture2D(tex_v, textureOut).r - 0.500;
> > > +   yuv.x = texture2D(tex_y, textureOut).r;
> > > +   yuv.y = texture2D(tex_u, textureOut).r;
> > > +   yuv.z = texture2D(tex_v, textureOut).r;
> > > +
> > > +   vec3 rgb = yuv2rgb_matrix * (vec3(y, uv) - yuv2rgb_offset);
> > >
> > > -   rgb = yuv2rgb_bt601_mat * yuv;
> > >     gl_FragColor = vec4(rgb, 1.0);
> > >   }
> > > diff --git a/src/qcam/assets/shader/YUV_packed.frag
> b/src/qcam/assets/shader/YUV_packed.frag
> > > index d6efd4ce92a9..b9ef9d41beae 100644
> > > --- a/src/qcam/assets/shader/YUV_packed.frag
> > > +++ b/src/qcam/assets/shader/YUV_packed.frag
> > > @@ -14,15 +14,16 @@ varying vec2 textureOut;
> > >   uniform sampler2D tex_y;
> > >   uniform vec2 tex_step;
> > >
> > > +const mat3 yuv2rgb_matrix = mat3(
> > > +   YUV2RGB_MATRIX
> > > +);
> > > +
> > > +const vec3 yuv2rgb_offset = vec3(
> > > +   YUV2RGB_Y_OFFSET / 255.0, 128.0 / 255.0, 128.0 / 255.0
> > > +);
> > > +
> > >   void main(void)
> > >   {
> > > -   mat3 yuv2rgb_bt601_mat = mat3(
> > > -           vec3(1.164,  1.164, 1.164),
> > > -           vec3(0.000, -0.392, 2.017),
> > > -           vec3(1.596, -0.813, 0.000)
> > > -   );
> > > -   vec3 yuv2rgb_bt601_offset = vec3(0.063, 0.500, 0.500);
> > > -
> > >     /*
> > >      * The sampler won't interpolate the texture correctly along the X
> axis,
> > >      * as each RGBA pixel effectively stores two pixels. We thus need
> to
> > > @@ -76,7 +77,7 @@ void main(void)
> > >
> > >     float y = mix(y_left, y_right, step(0.5, f_x));
> > >
> > > -   vec3 rgb = yuv2rgb_bt601_mat * (vec3(y, uv) -
> yuv2rgb_bt601_offset);
> > > +   vec3 rgb = yuv2rgb_matrix * (vec3(y, uv) - yuv2rgb_offset);
> > >
> > >     gl_FragColor = vec4(rgb, 1.0);
> > >   }
> > > diff --git a/src/qcam/viewfinder_gl.cpp b/src/qcam/viewfinder_gl.cpp
> > > index ec295b6de0dd..e2aa24703ff0 100644
> > > --- a/src/qcam/viewfinder_gl.cpp
> > > +++ b/src/qcam/viewfinder_gl.cpp
> > > @@ -7,9 +7,12 @@
> > >
> > >   #include "viewfinder_gl.h"
> > >
> > > +#include <array>
> > > +
> > >   #include <QByteArray>
> > >   #include <QFile>
> > >   #include <QImage>
> > > +#include <QStringList>
> > >
> > >   #include <libcamera/formats.h>
> > >
> > > @@ -56,7 +59,8 @@ static const QList<libcamera::PixelFormat>
> supportedFormats{
> > >   };
> > >
> > >   ViewFinderGL::ViewFinderGL(QWidget *parent)
> > > -   : QOpenGLWidget(parent), buffer_(nullptr), image_(nullptr),
> > > +   : QOpenGLWidget(parent), buffer_(nullptr),
> > > +     colorSpace_(libcamera::ColorSpace::Raw), image_(nullptr),
> > >       vertexBuffer_(QOpenGLBuffer::VertexBuffer)
> > >   {
> > >   }
> > > @@ -72,10 +76,10 @@ const QList<libcamera::PixelFormat>
> &ViewFinderGL::nativeFormats() const
> > >   }
> > >
> > >   int ViewFinderGL::setFormat(const libcamera::PixelFormat &format,
> const QSize &size,
> > > -                       [[maybe_unused]] const libcamera::ColorSpace
> &colorSpace,
> > > +                       const libcamera::ColorSpace &colorSpace,
> > >                         unsigned int stride)
> > >   {
> > > -   if (format != format_) {
> > > +   if (format != format_ || colorSpace != colorSpace_) {
> > >             /*
> > >              * If the fragment already exists, remove it and create a
> new
> > >              * one for the new format.
> > > @@ -89,7 +93,10 @@ int ViewFinderGL::setFormat(const
> libcamera::PixelFormat &format, const QSize &s
> > >             if (!selectFormat(format))
> > >                     return -1;
> > >
> > > +           selectColorSpace(colorSpace);
> > > +
> > >             format_ = format;
> > > +           colorSpace_ = colorSpace;
> > >     }
> > >
> > >     size_ = size;
> > > @@ -318,6 +325,72 @@ bool ViewFinderGL::selectFormat(const
> libcamera::PixelFormat &format)
> > >     return ret;
> > >   }
> > >
> > > +void ViewFinderGL::selectColorSpace(const libcamera::ColorSpace
> &colorSpace)
> > > +{
> > > +   std::array<double, 9> yuv2rgb;
> > > +
> > > +   /* OpenGL stores arrays in column-major order. */
> > > +   switch (colorSpace.ycbcrEncoding) {
> > > +   case libcamera::ColorSpace::YcbcrEncoding::None:
> > > +           yuv2rgb = {
> > > +                   1.0000,  0.0000,  0.0000,
> > > +                   0.0000,  1.0000,  0.0000,
> > > +                   0.0000,  0.0000,  1.0000,
> > > +           };
> > > +           break;
> > > +
> > > +   case libcamera::ColorSpace::YcbcrEncoding::Rec601:
> > > +           yuv2rgb = {
> > > +                   1.0000,  1.0000,  1.0000,
> > > +                   0.0000, -0.3441,  1.7720,
> > > +                   1.4020, -0.7141,  0.0000,
> > > +           };
> > > +           break;
> > > +
> > > +   case libcamera::ColorSpace::YcbcrEncoding::Rec709:
> > > +           yuv2rgb = {
> > > +                   1.0000,  1.0000,  1.0000,
> > > +                   0.0000, -0.1873,  1.8856,
> > > +                   1.5748, -0.4681,  0.0000,
> > > +           };
> > > +           break;
> > > +
> > > +   case libcamera::ColorSpace::YcbcrEncoding::Rec2020:
> > > +           yuv2rgb = {
> > > +                   1.0000,  1.0000,  1.0000,
> > > +                   0.0000, -0.1646,  1.8814,
> > > +                   1.4746, -0.5714,  0.0000,
> > > +           };
> > > +           break;
> > > +   }
> > > +
> > > +   double offset;
> > > +
> > > +   switch (colorSpace.range) {
> > > +   case libcamera::ColorSpace::Range::Full:
> > > +           offset = 0.0;
> > > +           break;
> > > +
> > > +   case libcamera::ColorSpace::Range::Limited:
> > > +           offset = 16.0;
> > > +
> > > +           for (unsigned int i = 0; i < 3; ++i)
> > > +                   yuv2rgb[i] *= 255.0 / 219.0;
> > > +           for (unsigned int i = 4; i < 9; ++i)
> > > +                   yuv2rgb[i] *= 255.0 / 224.0;
> > > +           break;
> > > +   }
> > > +
> > > +   QStringList matrix;
> > > +
> > > +   for (double coeff : yuv2rgb)
> > > +           matrix.append(QString::number(coeff, 'f'));
> > > +
> > > +   fragmentShaderDefines_.append("#define YUV2RGB_MATRIX " +
> matrix.join(", "));
> > > +   fragmentShaderDefines_.append(QString("#define YUV2RGB_Y_OFFSET
> %1")
> > > +           .arg(offset, 0, 'f', 1));
> > > +}
> > > +
> > >   bool ViewFinderGL::createVertexShader()
> > >   {
> > >     /* Create Vertex Shader */
> > > diff --git a/src/qcam/viewfinder_gl.h b/src/qcam/viewfinder_gl.h
> > > index 798830a31cd2..68c2912df12f 100644
> > > --- a/src/qcam/viewfinder_gl.h
> > > +++ b/src/qcam/viewfinder_gl.h
> > > @@ -57,6 +57,7 @@ protected:
> > >
> > >   private:
> > >     bool selectFormat(const libcamera::PixelFormat &format);
> > > +   void selectColorSpace(const libcamera::ColorSpace &colorSpace);
> > >
> > >     void configureTexture(QOpenGLTexture &texture);
> > >     bool createFragmentShader();
> > > @@ -67,6 +68,7 @@ private:
> > >     /* Captured image size, format and buffer */
> > >     libcamera::FrameBuffer *buffer_;
> > >     libcamera::PixelFormat format_;
> > > +   libcamera::ColorSpace colorSpace_;
> > >     QSize size_;
> > >     unsigned int stride_;
> > >     Image *image_;
>
> --
> Regards,
>
> Laurent Pinchart
>
Umang Jain Sept. 2, 2022, 5:42 a.m. UTC | #4
Hi Laurent,

On 8/30/22 10:57 PM, Laurent Pinchart wrote:
> Hi Umang,
>
> On Tue, Aug 30, 2022 at 07:43:12PM +0530, Umang Jain wrote:
>> On 8/29/22 3:34 PM, Laurent Pinchart via libcamera-devel wrote:
>>> Update the YUV shaders and the viewfinder_gl to correctly take the
>>> Y'CbCr encoding and the quantization range into account when rendering
>>> YUV formats to RGB. Support for the primaries and transfer function will
>>> be added in a subsequent step.
>>>
>>> Signed-off-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
>> Patch looks good and straight forward for most parts, however few
>> specifics are still a bit unclear to me
>>
>>> ---
>>>    src/qcam/assets/shader/YUV_2_planes.frag | 27 ++++----
>>>    src/qcam/assets/shader/YUV_3_planes.frag | 23 ++++---
>>>    src/qcam/assets/shader/YUV_packed.frag   | 17 ++---
>>>    src/qcam/viewfinder_gl.cpp               | 79 +++++++++++++++++++++++-
>>>    src/qcam/viewfinder_gl.h                 |  2 +
>>>    5 files changed, 115 insertions(+), 33 deletions(-)
>>>
>>> diff --git a/src/qcam/assets/shader/YUV_2_planes.frag b/src/qcam/assets/shader/YUV_2_planes.frag
>>> index 254463c05cac..da8dbcc5f801 100644
>>> --- a/src/qcam/assets/shader/YUV_2_planes.frag
>>> +++ b/src/qcam/assets/shader/YUV_2_planes.frag
>>> @@ -13,27 +13,30 @@ varying vec2 textureOut;
>>>    uniform sampler2D tex_y;
>>>    uniform sampler2D tex_u;
>>>    
>>> +const mat3 yuv2rgb_matrix = mat3(
>>> +	YUV2RGB_MATRIX
>>> +);
>>> +
>>> +const vec3 yuv2rgb_offset = vec3(
>>> +	YUV2RGB_Y_OFFSET / 255.0, 128.0 / 255.0, 128.0 / 255.0
>> I understood the YUV2RGB_Y_OFFSET #define but don't understand where
>> other values come from (or why they exist :D)
> The quantization of the Cb and Cr values in all relevant color spaces
> (ITU-R BT.601, BT.709, BT.2020, ...) add an offset of 128 (for 8-bit
> values). For instance, in BT.709, we have
>
> D'Cb = INT[(224*E'Cb + 128)*2^(n-8)]
>
> where D'Cb is the Cb signal after quantization, E'Cb the Cb signal
> before quantization (in the [-0.5, 0.5] range), and n the number of
> bits). INT[] denotes rounding to the closest integer.
>
> The 224 multiplier creates a limited quantization range, following the
> above formula, -0.5 will be quantized to INT[224 * -0.5 + 128] = 16, and
> 0.5 to INT[224 * 0.5 + 128] = 240. The values are then stored as 8-bit
> unsigned integers in memory.
>
> For full range quantization, the same applies, with a multiplier equal
> to 255 instead of 224. [-0.5, 0.5] is thus mapped to [0, 255].
>
> We need to apply the reverse quantization on D'Y, D'Cb and D'Cr in order
> to get the original E'Y, E'Cb and E'Cr values (in the [0.0, 1.0] and
> [-0.5, 0.5] ranges respectively for E'Y and E'C[br]. Starting with full
> range, given
>
> D'Cb = INT[(255*E'Cb + 128)] (for 8-bit data)
>
> the inverse is given by
>
> E'Cb = (D'Cb - 128) / 255
>
> or
>
> E'Cb = D'Cb / 255 - 128 / 255
>
> OpenGL, when reading texture data through a floating point texture
> sampler (which we do in the shader by calling texture2D on a sampler2D
> variable), normalizes the values stored in memory ([0, 255]) to the
> [0.0, 1.0] range. This means that the D'Cb value is already divided by
> 255 by the GPU. We only need to subtract 128 / 255 to get the original
> E'Cb value.
>
> In the limited quantization range case, we have
>
> D'Cb = INT[(225*E'Cb + 128)] (for 8-bit data)
>
> the inverse is given by
>
> E'Cb = (D'Cb - 128) / 224
>
> Let's introduce the 255 factor:
>
> E'Cb = (D'Cb - 128) / 255 * 255 / 224
>
> which can also be written as
>
> E'Cb = (D'Cb / 255 - 128 / 255) * 255 / 224
>
> We thus have
>
> E'Cb(lim) = E'Cb(full) * 255 / 224
>
> The shader doesn't include the 255 / 224 multiplier directly, it gets
> included by the C++ code in the yuv2rgb matrix, and there's no need for
> a different offset between the limited and full range quantization.

Ah thanks, I got time to read and understand, thanks for the write-up!
>
> I hope this helps clarifying the implementation.

Yes, it does.

>
>> Maybe I should start learning shaders programming ;-)
>>
>> Reviewed-by: Umang Jain <umang.jain@ideasonboard.com>
>>
>>> +);
>>> +
>>>    void main(void)
>>>    {
>>>    	vec3 yuv;
>>> -	vec3 rgb;
>>> -	mat3 yuv2rgb_bt601_mat = mat3(
>>> -		vec3(1.164,  1.164, 1.164),
>>> -		vec3(0.000, -0.392, 2.017),
>>> -		vec3(1.596, -0.813, 0.000)
>>> -	);
>>>    
>>> -	yuv.x = texture2D(tex_y, textureOut).r - 0.063;
>>> +	yuv.x = texture2D(tex_y, textureOut).r;
>>>    #if defined(YUV_PATTERN_UV)
>>> -	yuv.y = texture2D(tex_u, textureOut).r - 0.500;
>>> -	yuv.z = texture2D(tex_u, textureOut).a - 0.500;
>>> +	yuv.y = texture2D(tex_u, textureOut).r;
>>> +	yuv.z = texture2D(tex_u, textureOut).a;
>>>    #elif defined(YUV_PATTERN_VU)
>>> -	yuv.y = texture2D(tex_u, textureOut).a - 0.500;
>>> -	yuv.z = texture2D(tex_u, textureOut).r - 0.500;
>>> +	yuv.y = texture2D(tex_u, textureOut).a;
>>> +	yuv.z = texture2D(tex_u, textureOut).r;
>>>    #else
>>>    #error Invalid pattern
>>>    #endif
>>>    
>>> -	rgb = yuv2rgb_bt601_mat * yuv;
>>> +	vec3 rgb = yuv2rgb_matrix * (vec3(y, uv) - yuv2rgb_offset);
>>> +
>>>    	gl_FragColor = vec4(rgb, 1.0);
>>>    }
>>> diff --git a/src/qcam/assets/shader/YUV_3_planes.frag b/src/qcam/assets/shader/YUV_3_planes.frag
>>> index 2be74b5d2a9d..e754129d74d1 100644
>>> --- a/src/qcam/assets/shader/YUV_3_planes.frag
>>> +++ b/src/qcam/assets/shader/YUV_3_planes.frag
>>> @@ -14,20 +14,23 @@ uniform sampler2D tex_y;
>>>    uniform sampler2D tex_u;
>>>    uniform sampler2D tex_v;
>>>    
>>> +const mat3 yuv2rgb_matrix = mat3(
>>> +	YUV2RGB_MATRIX
>>> +);
>>> +
>>> +const vec3 yuv2rgb_offset = vec3(
>>> +	YUV2RGB_Y_OFFSET / 255.0, 128.0 / 255.0, 128.0 / 255.0
>>> +);
>>> +
>>>    void main(void)
>>>    {
>>>    	vec3 yuv;
>>> -	vec3 rgb;
>>> -	mat3 yuv2rgb_bt601_mat = mat3(
>>> -		vec3(1.164,  1.164, 1.164),
>>> -		vec3(0.000, -0.392, 2.017),
>>> -		vec3(1.596, -0.813, 0.000)
>>> -	);
>>>    
>>> -	yuv.x = texture2D(tex_y, textureOut).r - 0.063;
>>> -	yuv.y = texture2D(tex_u, textureOut).r - 0.500;
>>> -	yuv.z = texture2D(tex_v, textureOut).r - 0.500;
>>> +	yuv.x = texture2D(tex_y, textureOut).r;
>>> +	yuv.y = texture2D(tex_u, textureOut).r;
>>> +	yuv.z = texture2D(tex_v, textureOut).r;
>>> +
>>> +	vec3 rgb = yuv2rgb_matrix * (vec3(y, uv) - yuv2rgb_offset);
>>>    
>>> -	rgb = yuv2rgb_bt601_mat * yuv;
>>>    	gl_FragColor = vec4(rgb, 1.0);
>>>    }
>>> diff --git a/src/qcam/assets/shader/YUV_packed.frag b/src/qcam/assets/shader/YUV_packed.frag
>>> index d6efd4ce92a9..b9ef9d41beae 100644
>>> --- a/src/qcam/assets/shader/YUV_packed.frag
>>> +++ b/src/qcam/assets/shader/YUV_packed.frag
>>> @@ -14,15 +14,16 @@ varying vec2 textureOut;
>>>    uniform sampler2D tex_y;
>>>    uniform vec2 tex_step;
>>>    
>>> +const mat3 yuv2rgb_matrix = mat3(
>>> +	YUV2RGB_MATRIX
>>> +);
>>> +
>>> +const vec3 yuv2rgb_offset = vec3(
>>> +	YUV2RGB_Y_OFFSET / 255.0, 128.0 / 255.0, 128.0 / 255.0
>>> +);
>>> +
>>>    void main(void)
>>>    {
>>> -	mat3 yuv2rgb_bt601_mat = mat3(
>>> -		vec3(1.164,  1.164, 1.164),
>>> -		vec3(0.000, -0.392, 2.017),
>>> -		vec3(1.596, -0.813, 0.000)
>>> -	);
>>> -	vec3 yuv2rgb_bt601_offset = vec3(0.063, 0.500, 0.500);
>>> -
>>>    	/*
>>>    	 * The sampler won't interpolate the texture correctly along the X axis,
>>>    	 * as each RGBA pixel effectively stores two pixels. We thus need to
>>> @@ -76,7 +77,7 @@ void main(void)
>>>    
>>>    	float y = mix(y_left, y_right, step(0.5, f_x));
>>>    
>>> -	vec3 rgb = yuv2rgb_bt601_mat * (vec3(y, uv) - yuv2rgb_bt601_offset);
>>> +	vec3 rgb = yuv2rgb_matrix * (vec3(y, uv) - yuv2rgb_offset);
>>>    
>>>    	gl_FragColor = vec4(rgb, 1.0);
>>>    }
>>> diff --git a/src/qcam/viewfinder_gl.cpp b/src/qcam/viewfinder_gl.cpp
>>> index ec295b6de0dd..e2aa24703ff0 100644
>>> --- a/src/qcam/viewfinder_gl.cpp
>>> +++ b/src/qcam/viewfinder_gl.cpp
>>> @@ -7,9 +7,12 @@
>>>    
>>>    #include "viewfinder_gl.h"
>>>    
>>> +#include <array>
>>> +
>>>    #include <QByteArray>
>>>    #include <QFile>
>>>    #include <QImage>
>>> +#include <QStringList>
>>>    
>>>    #include <libcamera/formats.h>
>>>    
>>> @@ -56,7 +59,8 @@ static const QList<libcamera::PixelFormat> supportedFormats{
>>>    };
>>>    
>>>    ViewFinderGL::ViewFinderGL(QWidget *parent)
>>> -	: QOpenGLWidget(parent), buffer_(nullptr), image_(nullptr),
>>> +	: QOpenGLWidget(parent), buffer_(nullptr),
>>> +	  colorSpace_(libcamera::ColorSpace::Raw), image_(nullptr),
>>>    	  vertexBuffer_(QOpenGLBuffer::VertexBuffer)
>>>    {
>>>    }
>>> @@ -72,10 +76,10 @@ const QList<libcamera::PixelFormat> &ViewFinderGL::nativeFormats() const
>>>    }
>>>    
>>>    int ViewFinderGL::setFormat(const libcamera::PixelFormat &format, const QSize &size,
>>> -			    [[maybe_unused]] const libcamera::ColorSpace &colorSpace,
>>> +			    const libcamera::ColorSpace &colorSpace,
>>>    			    unsigned int stride)
>>>    {
>>> -	if (format != format_) {
>>> +	if (format != format_ || colorSpace != colorSpace_) {
>>>    		/*
>>>    		 * If the fragment already exists, remove it and create a new
>>>    		 * one for the new format.
>>> @@ -89,7 +93,10 @@ int ViewFinderGL::setFormat(const libcamera::PixelFormat &format, const QSize &s
>>>    		if (!selectFormat(format))
>>>    			return -1;
>>>    
>>> +		selectColorSpace(colorSpace);
>>> +
>>>    		format_ = format;
>>> +		colorSpace_ = colorSpace;
>>>    	}
>>>    
>>>    	size_ = size;
>>> @@ -318,6 +325,72 @@ bool ViewFinderGL::selectFormat(const libcamera::PixelFormat &format)
>>>    	return ret;
>>>    }
>>>    
>>> +void ViewFinderGL::selectColorSpace(const libcamera::ColorSpace &colorSpace)
>>> +{
>>> +	std::array<double, 9> yuv2rgb;
>>> +
>>> +	/* OpenGL stores arrays in column-major order. */
>>> +	switch (colorSpace.ycbcrEncoding) {
>>> +	case libcamera::ColorSpace::YcbcrEncoding::None:
>>> +		yuv2rgb = {
>>> +			1.0000,  0.0000,  0.0000,
>>> +			0.0000,  1.0000,  0.0000,
>>> +			0.0000,  0.0000,  1.0000,
>>> +		};
>>> +		break;
>>> +
>>> +	case libcamera::ColorSpace::YcbcrEncoding::Rec601:
>>> +		yuv2rgb = {
>>> +			1.0000,  1.0000,  1.0000,
>>> +			0.0000, -0.3441,  1.7720,
>>> +			1.4020, -0.7141,  0.0000,
>>> +		};
>>> +		break;
>>> +
>>> +	case libcamera::ColorSpace::YcbcrEncoding::Rec709:
>>> +		yuv2rgb = {
>>> +			1.0000,  1.0000,  1.0000,
>>> +			0.0000, -0.1873,  1.8856,
>>> +			1.5748, -0.4681,  0.0000,
>>> +		};
>>> +		break;
>>> +
>>> +	case libcamera::ColorSpace::YcbcrEncoding::Rec2020:
>>> +		yuv2rgb = {
>>> +			1.0000,  1.0000,  1.0000,
>>> +			0.0000, -0.1646,  1.8814,
>>> +			1.4746, -0.5714,  0.0000,
>>> +		};
>>> +		break;
>>> +	}
>>> +
>>> +	double offset;
>>> +
>>> +	switch (colorSpace.range) {
>>> +	case libcamera::ColorSpace::Range::Full:
>>> +		offset = 0.0;
>>> +		break;
>>> +
>>> +	case libcamera::ColorSpace::Range::Limited:
>>> +		offset = 16.0;
>>> +
>>> +		for (unsigned int i = 0; i < 3; ++i)
>>> +			yuv2rgb[i] *= 255.0 / 219.0;
>>> +		for (unsigned int i = 4; i < 9; ++i)
>>> +			yuv2rgb[i] *= 255.0 / 224.0;
>>> +		break;
>>> +	}
>>> +
>>> +	QStringList matrix;
>>> +
>>> +	for (double coeff : yuv2rgb)
>>> +		matrix.append(QString::number(coeff, 'f'));
>>> +
>>> +	fragmentShaderDefines_.append("#define YUV2RGB_MATRIX " + matrix.join(", "));
>>> +	fragmentShaderDefines_.append(QString("#define YUV2RGB_Y_OFFSET %1")
>>> +		.arg(offset, 0, 'f', 1));
>>> +}
>>> +
>>>    bool ViewFinderGL::createVertexShader()
>>>    {
>>>    	/* Create Vertex Shader */
>>> diff --git a/src/qcam/viewfinder_gl.h b/src/qcam/viewfinder_gl.h
>>> index 798830a31cd2..68c2912df12f 100644
>>> --- a/src/qcam/viewfinder_gl.h
>>> +++ b/src/qcam/viewfinder_gl.h
>>> @@ -57,6 +57,7 @@ protected:
>>>    
>>>    private:
>>>    	bool selectFormat(const libcamera::PixelFormat &format);
>>> +	void selectColorSpace(const libcamera::ColorSpace &colorSpace);
>>>    
>>>    	void configureTexture(QOpenGLTexture &texture);
>>>    	bool createFragmentShader();
>>> @@ -67,6 +68,7 @@ private:
>>>    	/* Captured image size, format and buffer */
>>>    	libcamera::FrameBuffer *buffer_;
>>>    	libcamera::PixelFormat format_;
>>> +	libcamera::ColorSpace colorSpace_;
>>>    	QSize size_;
>>>    	unsigned int stride_;
>>>    	Image *image_;

Patch
diff mbox series

diff --git a/src/qcam/assets/shader/YUV_2_planes.frag b/src/qcam/assets/shader/YUV_2_planes.frag
index 254463c05cac..da8dbcc5f801 100644
--- a/src/qcam/assets/shader/YUV_2_planes.frag
+++ b/src/qcam/assets/shader/YUV_2_planes.frag
@@ -13,27 +13,30 @@  varying vec2 textureOut;
 uniform sampler2D tex_y;
 uniform sampler2D tex_u;
 
+const mat3 yuv2rgb_matrix = mat3(
+	YUV2RGB_MATRIX
+);
+
+const vec3 yuv2rgb_offset = vec3(
+	YUV2RGB_Y_OFFSET / 255.0, 128.0 / 255.0, 128.0 / 255.0
+);
+
 void main(void)
 {
 	vec3 yuv;
-	vec3 rgb;
-	mat3 yuv2rgb_bt601_mat = mat3(
-		vec3(1.164,  1.164, 1.164),
-		vec3(0.000, -0.392, 2.017),
-		vec3(1.596, -0.813, 0.000)
-	);
 
-	yuv.x = texture2D(tex_y, textureOut).r - 0.063;
+	yuv.x = texture2D(tex_y, textureOut).r;
 #if defined(YUV_PATTERN_UV)
-	yuv.y = texture2D(tex_u, textureOut).r - 0.500;
-	yuv.z = texture2D(tex_u, textureOut).a - 0.500;
+	yuv.y = texture2D(tex_u, textureOut).r;
+	yuv.z = texture2D(tex_u, textureOut).a;
 #elif defined(YUV_PATTERN_VU)
-	yuv.y = texture2D(tex_u, textureOut).a - 0.500;
-	yuv.z = texture2D(tex_u, textureOut).r - 0.500;
+	yuv.y = texture2D(tex_u, textureOut).a;
+	yuv.z = texture2D(tex_u, textureOut).r;
 #else
 #error Invalid pattern
 #endif
 
-	rgb = yuv2rgb_bt601_mat * yuv;
+	vec3 rgb = yuv2rgb_matrix * (vec3(y, uv) - yuv2rgb_offset);
+
 	gl_FragColor = vec4(rgb, 1.0);
 }
diff --git a/src/qcam/assets/shader/YUV_3_planes.frag b/src/qcam/assets/shader/YUV_3_planes.frag
index 2be74b5d2a9d..e754129d74d1 100644
--- a/src/qcam/assets/shader/YUV_3_planes.frag
+++ b/src/qcam/assets/shader/YUV_3_planes.frag
@@ -14,20 +14,23 @@  uniform sampler2D tex_y;
 uniform sampler2D tex_u;
 uniform sampler2D tex_v;
 
+const mat3 yuv2rgb_matrix = mat3(
+	YUV2RGB_MATRIX
+);
+
+const vec3 yuv2rgb_offset = vec3(
+	YUV2RGB_Y_OFFSET / 255.0, 128.0 / 255.0, 128.0 / 255.0
+);
+
 void main(void)
 {
 	vec3 yuv;
-	vec3 rgb;
-	mat3 yuv2rgb_bt601_mat = mat3(
-		vec3(1.164,  1.164, 1.164),
-		vec3(0.000, -0.392, 2.017),
-		vec3(1.596, -0.813, 0.000)
-	);
 
-	yuv.x = texture2D(tex_y, textureOut).r - 0.063;
-	yuv.y = texture2D(tex_u, textureOut).r - 0.500;
-	yuv.z = texture2D(tex_v, textureOut).r - 0.500;
+	yuv.x = texture2D(tex_y, textureOut).r;
+	yuv.y = texture2D(tex_u, textureOut).r;
+	yuv.z = texture2D(tex_v, textureOut).r;
+
+	vec3 rgb = yuv2rgb_matrix * (vec3(y, uv) - yuv2rgb_offset);
 
-	rgb = yuv2rgb_bt601_mat * yuv;
 	gl_FragColor = vec4(rgb, 1.0);
 }
diff --git a/src/qcam/assets/shader/YUV_packed.frag b/src/qcam/assets/shader/YUV_packed.frag
index d6efd4ce92a9..b9ef9d41beae 100644
--- a/src/qcam/assets/shader/YUV_packed.frag
+++ b/src/qcam/assets/shader/YUV_packed.frag
@@ -14,15 +14,16 @@  varying vec2 textureOut;
 uniform sampler2D tex_y;
 uniform vec2 tex_step;
 
+const mat3 yuv2rgb_matrix = mat3(
+	YUV2RGB_MATRIX
+);
+
+const vec3 yuv2rgb_offset = vec3(
+	YUV2RGB_Y_OFFSET / 255.0, 128.0 / 255.0, 128.0 / 255.0
+);
+
 void main(void)
 {
-	mat3 yuv2rgb_bt601_mat = mat3(
-		vec3(1.164,  1.164, 1.164),
-		vec3(0.000, -0.392, 2.017),
-		vec3(1.596, -0.813, 0.000)
-	);
-	vec3 yuv2rgb_bt601_offset = vec3(0.063, 0.500, 0.500);
-
 	/*
 	 * The sampler won't interpolate the texture correctly along the X axis,
 	 * as each RGBA pixel effectively stores two pixels. We thus need to
@@ -76,7 +77,7 @@  void main(void)
 
 	float y = mix(y_left, y_right, step(0.5, f_x));
 
-	vec3 rgb = yuv2rgb_bt601_mat * (vec3(y, uv) - yuv2rgb_bt601_offset);
+	vec3 rgb = yuv2rgb_matrix * (vec3(y, uv) - yuv2rgb_offset);
 
 	gl_FragColor = vec4(rgb, 1.0);
 }
diff --git a/src/qcam/viewfinder_gl.cpp b/src/qcam/viewfinder_gl.cpp
index ec295b6de0dd..e2aa24703ff0 100644
--- a/src/qcam/viewfinder_gl.cpp
+++ b/src/qcam/viewfinder_gl.cpp
@@ -7,9 +7,12 @@ 
 
 #include "viewfinder_gl.h"
 
+#include <array>
+
 #include <QByteArray>
 #include <QFile>
 #include <QImage>
+#include <QStringList>
 
 #include <libcamera/formats.h>
 
@@ -56,7 +59,8 @@  static const QList<libcamera::PixelFormat> supportedFormats{
 };
 
 ViewFinderGL::ViewFinderGL(QWidget *parent)
-	: QOpenGLWidget(parent), buffer_(nullptr), image_(nullptr),
+	: QOpenGLWidget(parent), buffer_(nullptr),
+	  colorSpace_(libcamera::ColorSpace::Raw), image_(nullptr),
 	  vertexBuffer_(QOpenGLBuffer::VertexBuffer)
 {
 }
@@ -72,10 +76,10 @@  const QList<libcamera::PixelFormat> &ViewFinderGL::nativeFormats() const
 }
 
 int ViewFinderGL::setFormat(const libcamera::PixelFormat &format, const QSize &size,
-			    [[maybe_unused]] const libcamera::ColorSpace &colorSpace,
+			    const libcamera::ColorSpace &colorSpace,
 			    unsigned int stride)
 {
-	if (format != format_) {
+	if (format != format_ || colorSpace != colorSpace_) {
 		/*
 		 * If the fragment already exists, remove it and create a new
 		 * one for the new format.
@@ -89,7 +93,10 @@  int ViewFinderGL::setFormat(const libcamera::PixelFormat &format, const QSize &s
 		if (!selectFormat(format))
 			return -1;
 
+		selectColorSpace(colorSpace);
+
 		format_ = format;
+		colorSpace_ = colorSpace;
 	}
 
 	size_ = size;
@@ -318,6 +325,72 @@  bool ViewFinderGL::selectFormat(const libcamera::PixelFormat &format)
 	return ret;
 }
 
+void ViewFinderGL::selectColorSpace(const libcamera::ColorSpace &colorSpace)
+{
+	std::array<double, 9> yuv2rgb;
+
+	/* OpenGL stores arrays in column-major order. */
+	switch (colorSpace.ycbcrEncoding) {
+	case libcamera::ColorSpace::YcbcrEncoding::None:
+		yuv2rgb = {
+			1.0000,  0.0000,  0.0000,
+			0.0000,  1.0000,  0.0000,
+			0.0000,  0.0000,  1.0000,
+		};
+		break;
+
+	case libcamera::ColorSpace::YcbcrEncoding::Rec601:
+		yuv2rgb = {
+			1.0000,  1.0000,  1.0000,
+			0.0000, -0.3441,  1.7720,
+			1.4020, -0.7141,  0.0000,
+		};
+		break;
+
+	case libcamera::ColorSpace::YcbcrEncoding::Rec709:
+		yuv2rgb = {
+			1.0000,  1.0000,  1.0000,
+			0.0000, -0.1873,  1.8856,
+			1.5748, -0.4681,  0.0000,
+		};
+		break;
+
+	case libcamera::ColorSpace::YcbcrEncoding::Rec2020:
+		yuv2rgb = {
+			1.0000,  1.0000,  1.0000,
+			0.0000, -0.1646,  1.8814,
+			1.4746, -0.5714,  0.0000,
+		};
+		break;
+	}
+
+	double offset;
+
+	switch (colorSpace.range) {
+	case libcamera::ColorSpace::Range::Full:
+		offset = 0.0;
+		break;
+
+	case libcamera::ColorSpace::Range::Limited:
+		offset = 16.0;
+
+		for (unsigned int i = 0; i < 3; ++i)
+			yuv2rgb[i] *= 255.0 / 219.0;
+		for (unsigned int i = 4; i < 9; ++i)
+			yuv2rgb[i] *= 255.0 / 224.0;
+		break;
+	}
+
+	QStringList matrix;
+
+	for (double coeff : yuv2rgb)
+		matrix.append(QString::number(coeff, 'f'));
+
+	fragmentShaderDefines_.append("#define YUV2RGB_MATRIX " + matrix.join(", "));
+	fragmentShaderDefines_.append(QString("#define YUV2RGB_Y_OFFSET %1")
+		.arg(offset, 0, 'f', 1));
+}
+
 bool ViewFinderGL::createVertexShader()
 {
 	/* Create Vertex Shader */
diff --git a/src/qcam/viewfinder_gl.h b/src/qcam/viewfinder_gl.h
index 798830a31cd2..68c2912df12f 100644
--- a/src/qcam/viewfinder_gl.h
+++ b/src/qcam/viewfinder_gl.h
@@ -57,6 +57,7 @@  protected:
 
 private:
 	bool selectFormat(const libcamera::PixelFormat &format);
+	void selectColorSpace(const libcamera::ColorSpace &colorSpace);
 
 	void configureTexture(QOpenGLTexture &texture);
 	bool createFragmentShader();
@@ -67,6 +68,7 @@  private:
 	/* Captured image size, format and buffer */
 	libcamera::FrameBuffer *buffer_;
 	libcamera::PixelFormat format_;
+	libcamera::ColorSpace colorSpace_;
 	QSize size_;
 	unsigned int stride_;
 	Image *image_;