Coverage for colour/appearance/cam16.py: 100%
96 statements
« prev ^ index » next coverage.py v7.11.0, created at 2025-11-15 19:01 +1300
« prev ^ index » next coverage.py v7.11.0, created at 2025-11-15 19:01 +1300
1"""
2CAM16 Colour Appearance Model
3=============================
5Define the *CAM16* colour appearance model for predicting perceptual colour
6attributes under varying viewing conditions.
8- :class:`colour.appearance.InductionFactors_CAM16`
9- :attr:`colour.VIEWING_CONDITIONS_CAM16`
10- :class:`colour.CAM_Specification_CAM16`
11- :func:`colour.XYZ_to_CAM16`
12- :func:`colour.CAM16_to_XYZ`
14References
15----------
16- :cite:`Li2017` : Li, C., Li, Z., Wang, Z., Xu, Y., Luo, M. R., Cui, G.,
17 Melgosa, M., Brill, M. H., & Pointer, M. (2017). Comprehensive color
18 solutions: CAM16, CAT16, and CAM16-UCS. Color Research & Application,
19 42(6), 703-718. doi:10.1002/col.22131
20"""
22from __future__ import annotations
24from dataclasses import astuple, dataclass, field
26import numpy as np
28from colour.adaptation import CAT_CAT16
29from colour.algebra import spow, vecmul
30from colour.appearance.ciecam02 import (
31 VIEWING_CONDITIONS_CIECAM02,
32 InductionFactors_CIECAM02,
33 P,
34 achromatic_response_forward,
35 achromatic_response_inverse,
36 brightness_correlate,
37 chroma_correlate,
38 colourfulness_correlate,
39 degree_of_adaptation,
40 eccentricity_factor,
41 hue_angle,
42 hue_quadrature,
43 lightness_correlate,
44 matrix_post_adaptation_non_linear_response_compression,
45 opponent_colour_dimensions_forward,
46 opponent_colour_dimensions_inverse,
47 post_adaptation_non_linear_response_compression_forward,
48 post_adaptation_non_linear_response_compression_inverse,
49 saturation_correlate,
50 temporary_magnitude_quantity_inverse,
51 viewing_conditions_dependent_parameters,
52)
53from colour.hints import ( # noqa: TC001
54 Annotated,
55 ArrayLike,
56 Domain100,
57 NDArrayFloat,
58 Range100,
59)
60from colour.utilities import (
61 CanonicalMapping,
62 MixinDataclassArithmetic,
63 MixinDataclassIterable,
64 as_float,
65 as_float_array,
66 from_range_100,
67 from_range_degrees,
68 has_only_nan,
69 ones,
70 to_domain_100,
71 to_domain_degrees,
72 tsplit,
73)
75__author__ = "Colour Developers"
76__copyright__ = "Copyright 2013 Colour Developers"
77__license__ = "BSD-3-Clause - https://opensource.org/licenses/BSD-3-Clause"
78__maintainer__ = "Colour Developers"
79__email__ = "colour-developers@colour-science.org"
80__status__ = "Production"
82__all__ = [
83 "MATRIX_16",
84 "MATRIX_INVERSE_16",
85 "InductionFactors_CAM16",
86 "VIEWING_CONDITIONS_CAM16",
87 "CAM_Specification_CAM16",
88 "XYZ_to_CAM16",
89 "CAM16_to_XYZ",
90]
92MATRIX_16: NDArrayFloat = CAT_CAT16
93"""Adaptation matrix :math:`M_{16}`."""
95MATRIX_INVERSE_16: NDArrayFloat = np.linalg.inv(MATRIX_16)
96"""Inverse adaptation matrix :math:`M^{-1}_{16}`."""
99@dataclass(frozen=True)
100class InductionFactors_CAM16(MixinDataclassIterable):
101 """
102 Define the *CAM16* colour appearance model induction factors.
104 Parameters
105 ----------
106 F
107 Maximum degree of adaptation :math:`F`.
108 c
109 Exponential non-linearity :math:`c`.
110 N_c
111 Chromatic induction factor :math:`N_c`.
113 Notes
114 -----
115 - The *CAM16* colour appearance model induction factors are
116 identical to the *CIECAM02* colour appearance model
117 induction factors.
119 References
120 ----------
121 :cite:`Li2017`
122 """
124 F: float
125 c: float
126 N_c: float
129VIEWING_CONDITIONS_CAM16: CanonicalMapping = CanonicalMapping(
130 VIEWING_CONDITIONS_CIECAM02
131)
132VIEWING_CONDITIONS_CAM16.__doc__ = """
133Define the reference *CAM16* colour appearance model viewing conditions.
135References
136----------
137:cite:`Li2017`
138"""
141@dataclass
142class CAM_Specification_CAM16(MixinDataclassArithmetic):
143 """
144 Define the *CAM16* colour appearance model specification.
146 Parameters
147 ----------
148 J
149 Correlate of *lightness* :math:`J`.
150 C
151 Correlate of *chroma* :math:`C`.
152 h
153 *Hue* angle :math:`h` in degrees.
154 s
155 Correlate of *saturation* :math:`s`.
156 Q
157 Correlate of *brightness* :math:`Q`.
158 M
159 Correlate of *colourfulness* :math:`M`.
160 H
161 *Hue* :math:`h` quadrature :math:`H`.
162 HC
163 *Hue* :math:`h` composition :math:`H^C`.
165 References
166 ----------
167 :cite:`Li2017`
168 """
170 J: float | NDArrayFloat | None = field(default_factory=lambda: None)
171 C: float | NDArrayFloat | None = field(default_factory=lambda: None)
172 h: float | NDArrayFloat | None = field(default_factory=lambda: None)
173 s: float | NDArrayFloat | None = field(default_factory=lambda: None)
174 Q: float | NDArrayFloat | None = field(default_factory=lambda: None)
175 M: float | NDArrayFloat | None = field(default_factory=lambda: None)
176 H: float | NDArrayFloat | None = field(default_factory=lambda: None)
177 HC: float | NDArrayFloat | None = field(default_factory=lambda: None)
180def XYZ_to_CAM16(
181 XYZ: Domain100,
182 XYZ_w: Domain100,
183 L_A: ArrayLike,
184 Y_b: ArrayLike,
185 surround: (
186 InductionFactors_CIECAM02 | InductionFactors_CAM16
187 ) = VIEWING_CONDITIONS_CAM16["Average"],
188 discount_illuminant: bool = False,
189 compute_H: bool = True,
190) -> Annotated[CAM_Specification_CAM16, (100, 100, 360, 100, 100, 100, 400)]:
191 """
192 Compute the *CAM16* colour appearance model correlates from the specified
193 *CIE XYZ* tristimulus values.
195 Parameters
196 ----------
197 XYZ
198 *CIE XYZ* tristimulus values of test sample / stimulus.
199 XYZ_w
200 *CIE XYZ* tristimulus values of reference white.
201 L_A
202 Adapting field *luminance* :math:`L_A` in :math:`cd/m^2`, (often
203 taken to be 20% of the luminance of a white object in the scene).
204 Y_b
205 Luminous factor of background :math:`Y_b` such as
206 :math:`Y_b = 100 \\times L_b / L_w` where :math:`L_w` is the
207 luminance of the light source and :math:`L_b` is the luminance of
208 the background. For viewing images, :math:`Y_b` can be the average
209 :math:`Y` value for the pixels in the entire image, or frequently,
210 a :math:`Y` value of 20, approximating an :math:`L^*` of 50 is
211 used.
212 surround
213 Surround viewing conditions induction factors.
214 discount_illuminant
215 Truth value indicating if the illuminant should be discounted.
216 compute_H
217 Whether to compute *Hue* :math:`h` quadrature :math:`H`.
218 :math:`H` is rarely used, and expensive to compute.
220 Returns
221 -------
222 :class:`colour.CAM_Specification_CAM16`
223 *CAM16* colour appearance model specification.
225 Notes
226 -----
227 +---------------------+-----------------------+---------------+
228 | **Domain** | **Scale - Reference** | **Scale - 1** |
229 +=====================+=======================+===============+
230 | ``XYZ`` | 100 | 1 |
231 +---------------------+-----------------------+---------------+
232 | ``XYZ_w`` | 100 | 1 |
233 +---------------------+-----------------------+---------------+
235 +---------------------+-----------------------+---------------+
236 | **Range** | **Scale - Reference** | **Scale - 1** |
237 +=====================+=======================+===============+
238 | ``specification.J`` | 100 | 1 |
239 +---------------------+-----------------------+---------------+
240 | ``specification.C`` | 100 | 1 |
241 +---------------------+-----------------------+---------------+
242 | ``specification.h`` | 360 | 1 |
243 +---------------------+-----------------------+---------------+
244 | ``specification.s`` | 100 | 1 |
245 +---------------------+-----------------------+---------------+
246 | ``specification.Q`` | 100 | 1 |
247 +---------------------+-----------------------+---------------+
248 | ``specification.M`` | 100 | 1 |
249 +---------------------+-----------------------+---------------+
250 | ``specification.H`` | 400 | 1 |
251 +---------------------+-----------------------+---------------+
253 References
254 ----------
255 :cite:`Li2017`
257 Examples
258 --------
259 >>> XYZ = np.array([19.01, 20.00, 21.78])
260 >>> XYZ_w = np.array([95.05, 100.00, 108.88])
261 >>> L_A = 318.31
262 >>> Y_b = 20.0
263 >>> surround = VIEWING_CONDITIONS_CAM16["Average"]
264 >>> XYZ_to_CAM16(XYZ, XYZ_w, L_A, Y_b, surround) # doctest: +ELLIPSIS
265 CAM_Specification_CAM16(J=41.7312079..., C=0.1033557..., \
266h=217.0679597..., s=2.3450150..., Q=195.3717089..., M=0.1074367..., \
267H=275.5949861..., HC=None)
268 """
270 XYZ = to_domain_100(XYZ)
271 XYZ_w = to_domain_100(XYZ_w)
272 _X_w, Y_w, _Z_w = tsplit(XYZ_w)
273 L_A = as_float_array(L_A)
274 Y_b = as_float_array(Y_b)
276 # Step 0
277 # Converting *CIE XYZ* tristimulus values to sharpened *RGB* values.
278 RGB_w = vecmul(MATRIX_16, XYZ_w)
280 # Computing degree of adaptation :math:`D`.
281 D = (
282 np.clip(degree_of_adaptation(surround.F, L_A), 0, 1)
283 if not discount_illuminant
284 else ones(L_A.shape)
285 )
287 n, F_L, N_bb, N_cb, z = viewing_conditions_dependent_parameters(Y_b, Y_w, L_A)
289 D_RGB = D[..., None] * Y_w[..., None] / RGB_w + 1 - D[..., None]
290 RGB_wc = D_RGB * RGB_w
292 # Applying forward post-adaptation non-linear response compression.
293 RGB_aw = post_adaptation_non_linear_response_compression_forward(RGB_wc, F_L)
295 # Computing achromatic responses for the whitepoint.
296 A_w = achromatic_response_forward(RGB_aw, N_bb)
298 # Step 1
299 # Converting *CIE XYZ* tristimulus values to sharpened *RGB* values.
300 RGB = vecmul(MATRIX_16, XYZ)
302 # Step 2
303 RGB_c = D_RGB * RGB
305 # Step 3
306 # Applying forward post-adaptation non-linear response compression.
307 RGB_a = post_adaptation_non_linear_response_compression_forward(RGB_c, F_L)
309 # Step 4
310 # Converting to preliminary cartesian coordinates.
311 a, b = tsplit(opponent_colour_dimensions_forward(RGB_a))
313 # Computing the *hue* angle :math:`h`.
314 h = hue_angle(a, b)
316 # Step 5
317 # Computing eccentricity factor *e_t*.
318 e_t = eccentricity_factor(h)
320 # Computing hue :math:`h` quadrature :math:`H`.
321 H = hue_quadrature(h) if compute_H else np.full(h.shape, np.nan)
322 # TODO: Compute hue composition.
324 # Step 6
325 # Computing achromatic responses for the stimulus.
326 A = achromatic_response_forward(RGB_a, N_bb)
328 # Step 7
329 # Computing the correlate of *Lightness* :math:`J`.
330 J = lightness_correlate(A, A_w, surround.c, z)
332 # Step 8
333 # Computing the correlate of *brightness* :math:`Q`.
334 Q = brightness_correlate(surround.c, J, A_w, F_L)
336 # Step 9
337 # Computing the correlate of *chroma* :math:`C`.
338 C = chroma_correlate(J, n, surround.N_c, N_cb, e_t, a, b, RGB_a)
340 # Computing the correlate of *colourfulness* :math:`M`.
341 M = colourfulness_correlate(C, F_L)
343 # Computing the correlate of *saturation* :math:`s`.
344 s = saturation_correlate(M, Q)
346 return CAM_Specification_CAM16(
347 J=as_float(from_range_100(J)),
348 C=as_float(from_range_100(C)),
349 h=as_float(from_range_degrees(h)),
350 s=as_float(from_range_100(s)),
351 Q=as_float(from_range_100(Q)),
352 M=as_float(from_range_100(M)),
353 H=as_float(from_range_degrees(H, 400)),
354 HC=None,
355 )
358def CAM16_to_XYZ(
359 specification: Annotated[
360 CAM_Specification_CAM16, (100, 100, 360, 100, 100, 100, 400)
361 ],
362 XYZ_w: Domain100,
363 L_A: ArrayLike,
364 Y_b: ArrayLike,
365 surround: (
366 InductionFactors_CIECAM02 | InductionFactors_CAM16
367 ) = VIEWING_CONDITIONS_CAM16["Average"],
368 discount_illuminant: bool = False,
369) -> Range100:
370 """
371 Convert the *CAM16* colour appearance model specification to *CIE XYZ*
372 tristimulus values.
374 Parameters
375 ----------
376 specification
377 *CAM16* colour appearance model specification. Correlate of
378 *lightness* :math:`J`, correlate of *chroma* :math:`C` or correlate
379 of *colourfulness* :math:`M` and *hue* angle :math:`h` in degrees
380 must be specified, e.g., :math:`JCh` or :math:`JMh`.
381 XYZ_w
382 *CIE XYZ* tristimulus values of reference white.
383 L_A
384 Adapting field *luminance* :math:`L_A` in :math:`cd/m^2`, (often
385 taken to be 20% of the luminance of a white object in the scene).
386 Y_b
387 Luminous factor of background :math:`Y_b` such as
388 :math:`Y_b = 100 \\times L_b / L_w` where :math:`L_w` is the
389 luminance of the light source and :math:`L_b` is the luminance of
390 the background. For viewing images, :math:`Y_b` can be the average
391 :math:`Y` value for the pixels in the entire image, or frequently,
392 a :math:`Y` value of 20, approximating an :math:`L^*` of 50 is
393 used.
394 surround
395 Surround viewing conditions induction factors.
396 discount_illuminant
397 Truth value indicating if the illuminant should be discounted.
399 Returns
400 -------
401 :class:`numpy.ndarray`
402 *CIE XYZ* tristimulus values.
404 Raises
405 ------
406 ValueError
407 If neither :math:`C` nor :math:`M` correlates have been defined
408 in the ``specification`` argument.
410 Notes
411 -----
412 +---------------------+-----------------------+---------------+
413 | **Domain** | **Scale - Reference** | **Scale - 1** |
414 +=====================+=======================+===============+
415 | ``specification.J`` | 100 | 1 |
416 +---------------------+-----------------------+---------------+
417 | ``specification.C`` | 100 | 1 |
418 +---------------------+-----------------------+---------------+
419 | ``specification.h`` | 360 | 1 |
420 +---------------------+-----------------------+---------------+
421 | ``specification.s`` | 100 | 1 |
422 +---------------------+-----------------------+---------------+
423 | ``specification.Q`` | 100 | 1 |
424 +---------------------+-----------------------+---------------+
425 | ``specification.M`` | 100 | 1 |
426 +---------------------+-----------------------+---------------+
427 | ``specification.H`` | 360 | 1 |
428 +---------------------+-----------------------+---------------+
429 | ``XYZ_w`` | 100 | 1 |
430 +---------------------+-----------------------+---------------+
432 +---------------------+-----------------------+---------------+
433 | **Range** | **Scale - Reference** | **Scale - 1** |
434 +=====================+=======================+===============+
435 | ``XYZ`` | 100 | 1 |
436 +---------------------+-----------------------+---------------+
438 References
439 ----------
440 :cite:`Li2017`
442 Examples
443 --------
444 >>> specification = CAM_Specification_CAM16(
445 ... J=41.731207905126638, C=0.103355738709070, h=217.067959767393010
446 ... )
447 >>> XYZ_w = np.array([95.05, 100.00, 108.88])
448 >>> L_A = 318.31
449 >>> Y_b = 20.0
450 >>> CAM16_to_XYZ(specification, XYZ_w, L_A, Y_b) # doctest: +ELLIPSIS
451 array([ 19.01..., 20... , 21.78...])
452 """
454 J, C, h, _s, _Q, M, _H, _HC = astuple(specification)
456 J = to_domain_100(J)
457 C = to_domain_100(C)
458 h = to_domain_degrees(h)
459 M = to_domain_100(M)
460 L_A = as_float_array(L_A)
461 XYZ_w = to_domain_100(XYZ_w)
462 _X_w, Y_w, _Z_w = tsplit(XYZ_w)
464 # Step 0
465 # Converting *CIE XYZ* tristimulus values to sharpened *RGB* values.
466 RGB_w = vecmul(MATRIX_16, XYZ_w)
468 # Computing degree of adaptation :math:`D`.
469 D = (
470 np.clip(degree_of_adaptation(surround.F, L_A), 0, 1)
471 if not discount_illuminant
472 else ones(L_A.shape)
473 )
475 n, F_L, N_bb, N_cb, z = viewing_conditions_dependent_parameters(Y_b, Y_w, L_A)
477 D_RGB = D[..., None] * Y_w[..., None] / RGB_w + 1 - D[..., None]
478 RGB_wc = D_RGB * RGB_w
480 # Applying forward post-adaptation non-linear response compression.
481 RGB_aw = post_adaptation_non_linear_response_compression_forward(RGB_wc, F_L)
483 # Computing achromatic responses for the whitepoint.
484 A_w = achromatic_response_forward(RGB_aw, N_bb)
486 # Step 1
487 if has_only_nan(C) and not has_only_nan(M):
488 C = M / spow(F_L, 0.25)
489 elif has_only_nan(C):
490 error = (
491 'Either "C" or "M" correlate must be defined in '
492 'the "CAM_Specification_CAM16" argument!'
493 )
495 raise ValueError(error)
497 # Step 2
498 # Computing temporary magnitude quantity :math:`t`.
499 t = temporary_magnitude_quantity_inverse(C, J, n)
501 # Computing eccentricity factor *e_t*.
502 e_t = eccentricity_factor(h)
504 # Computing achromatic response :math:`A` for the stimulus.
505 A = achromatic_response_inverse(A_w, J, surround.c, z)
507 # Computing *P_1* to *P_3*.
508 P_n = P(surround.N_c, N_cb, e_t, t, A, N_bb)
509 _P_1, P_2, _P_3 = tsplit(P_n)
511 # Step 3
512 # Computing opponent colour dimensions :math:`a` and :math:`b`.
513 ab = opponent_colour_dimensions_inverse(P_n, h)
514 a, b = tsplit(ab) * np.where(t == 0, 0, 1)
516 # Step 4
517 # Applying post-adaptation non-linear response compression matrix.
518 RGB_a = matrix_post_adaptation_non_linear_response_compression(P_2, a, b)
520 # Step 5
521 # Applying inverse post-adaptation non-linear response compression.
522 RGB_c = post_adaptation_non_linear_response_compression_inverse(RGB_a, F_L)
524 # Step 6
525 RGB = RGB_c / D_RGB
527 # Step 7
528 XYZ = vecmul(MATRIX_INVERSE_16, RGB)
530 return from_range_100(XYZ)