[mutter/wip/nielsdg/add-yuv-support: 52/56] cogl: Replace G_8 with R_8 pixel format



commit 4614e0caaa1830ead3684895c2a33574234b1f14
Author: Niels De Graef <niels degraef barco com>
Date:   Wed Jun 12 15:14:15 2019 +0200

    cogl: Replace G_8 with R_8 pixel format
    
    Cogl doesn't use `COGL_PIXEL_FORMAT_G_8` anywhere, and it has differing
    implementations depending on the driver (Mesa averages it out over the
    diferent components, while NVIDIA stores it in the R component).
    
    Furthermore, by having this new pixel format, we can use this for
    single-component uploads, which will be useful when dealing with
    non-trivial textures (such as YUV-based textures).

 clutter/tests/interactive/test-cogl-tex-convert.c |  2 +-
 cogl/cogl/cogl-bitmap-conversion.c                |  2 +-
 cogl/cogl/cogl-bitmap-packing.h                   | 32 ++++++++---------------
 cogl/cogl/cogl-gles2-context.c                    |  4 +--
 cogl/cogl/cogl-pixel-format.c                     |  4 +--
 cogl/cogl/cogl-pixel-format.h                     |  6 ++---
 cogl/cogl/driver/gl/gl/cogl-driver-gl.c           | 17 ++++--------
 cogl/cogl/driver/gl/gles/cogl-driver-gles.c       |  6 ++---
 cogl/tests/conform/test-read-texture-formats.c    |  7 +----
 cogl/tests/conform/test-write-texture-formats.c   |  7 +----
 10 files changed, 30 insertions(+), 57 deletions(-)
---
diff --git a/clutter/tests/interactive/test-cogl-tex-convert.c 
b/clutter/tests/interactive/test-cogl-tex-convert.c
index 819c02d20..908c1b026 100644
--- a/clutter/tests/interactive/test-cogl-tex-convert.c
+++ b/clutter/tests/interactive/test-cogl-tex-convert.c
@@ -173,7 +173,7 @@ test_coglbox_init (TestCoglbox *self)
   priv->cogl_tex_id[3] =
     cogl_texture_new_from_file (file,
                                 COGL_TEXTURE_NONE,
-                               COGL_PIXEL_FORMAT_G_8,
+                               COGL_PIXEL_FORMAT_R_8,
                                 NULL);
 
   g_free (file);
diff --git a/cogl/cogl/cogl-bitmap-conversion.c b/cogl/cogl/cogl-bitmap-conversion.c
index 447db00b3..b3ebf8773 100644
--- a/cogl/cogl/cogl-bitmap-conversion.c
+++ b/cogl/cogl/cogl-bitmap-conversion.c
@@ -328,7 +328,7 @@ _cogl_bitmap_needs_short_temp_buffer (CoglPixelFormat format)
     case COGL_PIXEL_FORMAT_RGB_565:
     case COGL_PIXEL_FORMAT_RGBA_4444:
     case COGL_PIXEL_FORMAT_RGBA_5551:
-    case COGL_PIXEL_FORMAT_G_8:
+    case COGL_PIXEL_FORMAT_R_8:
     case COGL_PIXEL_FORMAT_RGB_888:
     case COGL_PIXEL_FORMAT_BGR_888:
     case COGL_PIXEL_FORMAT_RGBA_8888:
diff --git a/cogl/cogl/cogl-bitmap-packing.h b/cogl/cogl/cogl-bitmap-packing.h
index 1b8e140ff..3a1646fbf 100644
--- a/cogl/cogl/cogl-bitmap-packing.h
+++ b/cogl/cogl/cogl-bitmap-packing.h
@@ -65,21 +65,16 @@ G_PASTE (_cogl_unpack_a_8_, component_size) (const uint8_t *src,
 }
 
 inline static void
-G_PASTE (_cogl_unpack_g_8_, component_size) (const uint8_t *src,
+G_PASTE (_cogl_unpack_r_8_, component_size) (const uint8_t *src,
                                              component_type *dst,
                                              int width)
 {
-  /* FIXME: I'm not sure if this is right. It looks like Nvidia and
-     Mesa handle luminance textures differently. Maybe we should
-     consider just removing luminance textures for Cogl 2.0 because
-     they have been removed in GL 3.0 */
   while (width-- > 0)
     {
-      component_type v = UNPACK_BYTE (src[0]);
-      dst[0] = v;
-      dst[1] = v;
-      dst[2] = v;
-      dst[3] = UNPACK_BYTE (255);
+      dst[0] = UNPACK_BYTE (*src);
+      dst[1] = 0;
+      dst[2] = 0;
+      dst[3] = 0;
       dst += 4;
       src++;
     }
@@ -341,8 +336,8 @@ G_PASTE (_cogl_unpack_, component_size) (CoglPixelFormat format,
     case COGL_PIXEL_FORMAT_A_8:
       G_PASTE (_cogl_unpack_a_8_, component_size) (src, dst, width);
       break;
-    case COGL_PIXEL_FORMAT_G_8:
-      G_PASTE (_cogl_unpack_g_8_, component_size) (src, dst, width);
+    case COGL_PIXEL_FORMAT_R_8:
+      G_PASTE (_cogl_unpack_r_8_, component_size) (src, dst, width);
       break;
     case COGL_PIXEL_FORMAT_RG_88:
       G_PASTE (_cogl_unpack_rg_88_, component_size) (src, dst, width);
@@ -433,18 +428,13 @@ G_PASTE (_cogl_pack_a_8_, component_size) (const component_type *src,
 }
 
 inline static void
-G_PASTE (_cogl_pack_g_8_, component_size) (const component_type *src,
+G_PASTE (_cogl_pack_r_8_, component_size) (const component_type *src,
                                            uint8_t *dst,
                                            int width)
 {
-  /* FIXME: I'm not sure if this is right. It looks like Nvidia and
-     Mesa handle luminance textures differently. Maybe we should
-     consider just removing luminance textures for Cogl 2.0 because
-     they have been removed in GL 3.0 */
   while (width-- > 0)
     {
-      component_type v = (src[0] + src[1] + src[2]) / 3;
-      *dst = PACK_BYTE (v);
+      *dst = PACK_BYTE (src[0]);
       src += 4;
       dst++;
     }
@@ -702,8 +692,8 @@ G_PASTE (_cogl_pack_, component_size) (CoglPixelFormat format,
     case COGL_PIXEL_FORMAT_A_8:
       G_PASTE (_cogl_pack_a_8_, component_size) (src, dst, width);
       break;
-    case COGL_PIXEL_FORMAT_G_8:
-      G_PASTE (_cogl_pack_g_8_, component_size) (src, dst, width);
+    case COGL_PIXEL_FORMAT_R_8:
+      G_PASTE (_cogl_pack_r_8_, component_size) (src, dst, width);
       break;
     case COGL_PIXEL_FORMAT_RG_88:
       G_PASTE (_cogl_pack_rg_88_, component_size) (src, dst, width);
diff --git a/cogl/cogl/cogl-gles2-context.c b/cogl/cogl/cogl-gles2-context.c
index e51b72276..5624f74bd 100644
--- a/cogl/cogl/cogl-gles2-context.c
+++ b/cogl/cogl/cogl-gles2-context.c
@@ -297,8 +297,8 @@ copy_flipped_texture (CoglGLES2Context *gles2_ctx,
       internal_format = COGL_PIXEL_FORMAT_A_8;
       break;
 
-    case GL_LUMINANCE:
-      internal_format = COGL_PIXEL_FORMAT_G_8;
+    case GL_RED:
+      internal_format = COGL_PIXEL_FORMAT_R_8;
       break;
 
     default:
diff --git a/cogl/cogl/cogl-pixel-format.c b/cogl/cogl/cogl-pixel-format.c
index a8c0857a6..a62388469 100644
--- a/cogl/cogl/cogl-pixel-format.c
+++ b/cogl/cogl/cogl-pixel-format.c
@@ -83,8 +83,8 @@ static const CoglPixelFormatInfo format_info_table[] = {
     .aligned = -1
   },
   {
-    .cogl_format = COGL_PIXEL_FORMAT_G_8,
-    .format_str = "G_8",
+    .cogl_format = COGL_PIXEL_FORMAT_R_8,
+    .format_str = "R_8",
     .bpp = 1,
     .aligned = 1
   },
diff --git a/cogl/cogl/cogl-pixel-format.h b/cogl/cogl/cogl-pixel-format.h
index c2659fee0..6b015f9e2 100644
--- a/cogl/cogl/cogl-pixel-format.h
+++ b/cogl/cogl/cogl-pixel-format.h
@@ -95,7 +95,7 @@ G_BEGIN_DECLS
  * The mapping is the following (see discussion on bug #660188):
  *
  * 0     = undefined
- * 1, 8  = 1 bpp (e.g. A_8, G_8)
+ * 1, 8  = 1 bpp (e.g. A_8, R_8)
  * 2     = 3 bpp, aligned (e.g. 888)
  * 3     = 4 bpp, aligned (e.g. 8888)
  * 4-6   = 2 bpp, not aligned (e.g. 565, 4444, 5551)
@@ -146,7 +146,7 @@ G_BEGIN_DECLS
  * @COGL_PIXEL_FORMAT_RGBA_4444: RGBA, 16 bits
  * @COGL_PIXEL_FORMAT_RGBA_5551: RGBA, 16 bits
  * @COGL_PIXEL_FORMAT_YUV: Not currently supported
- * @COGL_PIXEL_FORMAT_G_8: Single luminance component
+ * @COGL_PIXEL_FORMAT_R_8: Single red component, 8 bits
  * @COGL_PIXEL_FORMAT_RGB_888: RGB, 24 bits
  * @COGL_PIXEL_FORMAT_BGR_888: BGR, 24 bits
  * @COGL_PIXEL_FORMAT_RGBA_8888: RGBA, 32 bits
@@ -198,7 +198,7 @@ typedef enum /*< prefix=COGL_PIXEL_FORMAT >*/
   COGL_PIXEL_FORMAT_RGBA_4444     = 5 | COGL_A_BIT,
   COGL_PIXEL_FORMAT_RGBA_5551     = 6 | COGL_A_BIT,
   COGL_PIXEL_FORMAT_YUV           = 7,
-  COGL_PIXEL_FORMAT_G_8           = 8,
+  COGL_PIXEL_FORMAT_R_8           = 8,
 
   COGL_PIXEL_FORMAT_RG_88         = 9,
 
diff --git a/cogl/cogl/driver/gl/gl/cogl-driver-gl.c b/cogl/cogl/driver/gl/gl/cogl-driver-gl.c
index cc870762a..bb42e7910 100644
--- a/cogl/cogl/driver/gl/gl/cogl-driver-gl.c
+++ b/cogl/cogl/driver/gl/gl/cogl-driver-gl.c
@@ -58,18 +58,11 @@ _cogl_driver_pixel_format_from_gl_internal (CoglContext *context,
     {
     case GL_ALPHA: case GL_ALPHA4: case GL_ALPHA8:
     case GL_ALPHA12: case GL_ALPHA16:
-      /* Cogl only supports one single-component texture so if we have
-       * ended up with a red texture then it is probably being used as
-       * a component-alpha texture */
-    case GL_RED:
-
       *out_format = COGL_PIXEL_FORMAT_A_8;
       return TRUE;
 
-    case GL_LUMINANCE: case GL_LUMINANCE4: case GL_LUMINANCE8:
-    case GL_LUMINANCE12: case GL_LUMINANCE16:
-
-      *out_format = COGL_PIXEL_FORMAT_G_8;
+    case GL_RED:
+      *out_format = COGL_PIXEL_FORMAT_R_8;
       return TRUE;
 
     case GL_RG:
@@ -126,9 +119,9 @@ _cogl_driver_pixel_format_to_gl (CoglContext     *context,
         }
       gltype = GL_UNSIGNED_BYTE;
       break;
-    case COGL_PIXEL_FORMAT_G_8:
-      glintformat = GL_LUMINANCE;
-      glformat = GL_LUMINANCE;
+    case COGL_PIXEL_FORMAT_R_8:
+      glintformat = GL_RED;
+      glformat = GL_RED;
       gltype = GL_UNSIGNED_BYTE;
       break;
 
diff --git a/cogl/cogl/driver/gl/gles/cogl-driver-gles.c b/cogl/cogl/driver/gl/gles/cogl-driver-gles.c
index 1583ad335..829d2a38b 100644
--- a/cogl/cogl/driver/gl/gles/cogl-driver-gles.c
+++ b/cogl/cogl/driver/gl/gles/cogl-driver-gles.c
@@ -86,9 +86,9 @@ _cogl_driver_pixel_format_to_gl (CoglContext     *context,
       glformat = GL_ALPHA;
       gltype = GL_UNSIGNED_BYTE;
       break;
-    case COGL_PIXEL_FORMAT_G_8:
-      glintformat = GL_LUMINANCE;
-      glformat = GL_LUMINANCE;
+    case COGL_PIXEL_FORMAT_R_8:
+      glintformat = GL_RED;
+      glformat = GL_RED;
       gltype = GL_UNSIGNED_BYTE;
       break;
 
diff --git a/cogl/tests/conform/test-read-texture-formats.c b/cogl/tests/conform/test-read-texture-formats.c
index 9add8127d..379fdcfad 100644
--- a/cogl/tests/conform/test-read-texture-formats.c
+++ b/cogl/tests/conform/test-read-texture-formats.c
@@ -173,12 +173,7 @@ test_read_texture_formats (void)
 
   test_read_byte (tex_2d, COGL_PIXEL_FORMAT_A_8, 0x78);
 
-#if 0
-  /* I'm not sure what's the right value to put here because Nvidia
-     and Mesa seem to behave differently so one of them must be
-     wrong. */
-  test_read_byte (tex_2d, COGL_PIXEL_FORMAT_G_8, 0x9c);
-#endif
+  test_read_byte (tex_2d, COGL_PIXEL_FORMAT_R_8, 0x12);
 
   /* We should always be able to read into an RG buffer regardless of
    * whether RG textures are supported because Cogl will do the
diff --git a/cogl/tests/conform/test-write-texture-formats.c b/cogl/tests/conform/test-write-texture-formats.c
index ca8015771..5cb473518 100644
--- a/cogl/tests/conform/test-write-texture-formats.c
+++ b/cogl/tests/conform/test-write-texture-formats.c
@@ -135,12 +135,7 @@ void
 test_write_texture_formats (void)
 {
   test_write_byte (test_ctx, COGL_PIXEL_FORMAT_A_8, 0x34, 0x00000034);
-#if 0
-  /* I'm not sure what's the right value to put here because Nvidia
-     and Mesa seem to behave differently so one of them must be
-     wrong. */
-  test_write_byte (test_ctx, COGL_PIXEL_FORMAT_G_8, 0x34, 0x340000ff);
-#endif
+  test_write_byte (test_ctx, COGL_PIXEL_FORMAT_R_8, 0x34, 0x34000000);
 
   /* We should always be able to read from an RG buffer regardless of
    * whether RG textures are supported because Cogl will do the


[Date Prev][Date Next]   [Thread Prev][Thread Next]   [Thread Index] [Date Index] [Author Index]