unit pixfmt; {$include ffmpeg.inc} interface /// * // * copyright (c) 2006 Michael Niedermayer // * // * This file is part of FFmpeg. // * // * FFmpeg is free software; you can redistribute it and/or // * modify it under the terms of the GNU Lesser General Public // * License as published by the Free Software Foundation; either // * version 2.1 of the License, or (at your option) any later version. // * // * FFmpeg is distributed in the hope that it will be useful, // * but WITHOUT ANY WARRANTY; without even the implied warranty of // * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU // * Lesser General Public License for more details. // * // * You should have received a copy of the GNU Lesser General Public // * License along with FFmpeg; if not, write to the Free Software // * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA // */ Const AVPALETTE_SIZE = 1024; AVPALETTE_COUNT = 256; (* * Pixel format. * * @note * AV_PIX_FMT_RGB32 is handled in an endian-specific manner. An RGBA * color is put together as: * (A << 24) | (R << 16) | (G << 8) | B * This is stored as BGRA on little-endian CPU architectures and ARGB on * big-endian CPUs. * * @par * When the pixel format is palettized RGB (AV_PIX_FMT_PAL8), the palettized * image data is stored in AVFrame.data[0]. The palette is transported in * AVFrame.data[1], is 1024 bytes long (256 4-byte entries) and is * formatted the same as in AV_PIX_FMT_RGB32 described above (i.e., it is * also endian-specific). Note also that the individual RGB palette * components stored in AVFrame.data[1] should be in the range 0..255. * This is important as many custom PAL8 video codecs that were designed * to run on the IBM VGA graphics adapter use 6-bit palette components. * * @par * For all the 8bit per pixel formats, an RGB32 palette is in data[1] like * for pal8. This palette is filled in automatically by the function * allocating the picture. * * @note * Make sure that all newly added big-endian formats have (pix_fmt & 1) == 1 * and that all newly added little-endian formats have (pix_fmt & 1) == 0. * This allows simpler detection of big vs little-endian. *) Type pAVPixelFormat = ^TAVPixelFormat; TAVPixelFormat = ( // AV_PIX_FMT_NONE = -1, // AV_PIX_FMT_YUV420P, /// < planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples) AV_PIX_FMT_YUYV422, /// < {packed} YUV 4:2:2, 16bpp, Y0 Cb Y1 Cr AV_PIX_FMT_RGB24, /// < {packed} RGB 8:8:8, 24bpp, RGBRGB... AV_PIX_FMT_BGR24, /// < {packed} RGB 8:8:8, 24bpp, BGRBGR... AV_PIX_FMT_YUV422P, /// < planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples) AV_PIX_FMT_YUV444P, /// < planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples) AV_PIX_FMT_YUV410P, /// < planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples) AV_PIX_FMT_YUV411P, /// < planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples) AV_PIX_FMT_GRAY8, /// < Y , 8bpp AV_PIX_FMT_MONOWHITE, /// < Y , 1bpp, 0 is white, 1 is black, in each byte pixels are ordered from the msb to the lsb AV_PIX_FMT_MONOBLACK, /// < Y , 1bpp, 0 is black, 1 is white, in each byte pixels are ordered from the msb to the lsb AV_PIX_FMT_PAL8, /// < 8 bit with PIX_FMT_RGB32 palette AV_PIX_FMT_YUVJ420P, /// < planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of PIX_FMT_YUV420P and setting color_range AV_PIX_FMT_YUVJ422P, /// < planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of PIX_FMT_YUV422P and setting color_range AV_PIX_FMT_YUVJ444P, /// < planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of PIX_FMT_YUV444P and setting color_range {$IFDEF FF_API_XVMC} AV_PIX_FMT_XVMC_MPEG2_MC, /// < XVideo Motion Acceleration via common packet passing AV_PIX_FMT_XVMC_MPEG2_IDCT, {$ENDIF}// * FF_API_XVMC */ AV_PIX_FMT_UYVY422, /// < {packed} YUV 4:2:2, 16bpp, Cb Y0 Cr Y1 AV_PIX_FMT_UYYVYY411, /// < {packed} YUV 4:1:1, 12bpp, Cb Y0 Y1 Cr Y2 Y3 AV_PIX_FMT_BGR8, /// < {packed} RGB 3:3:2, 8bpp, (msb)2B 3G 3R(lsb) AV_PIX_FMT_BGR4, /// < {packed} RGB 1:2:1 bitstream, 4bpp, (msb)1B 2G 1R(lsb), a byte contains two pixels, the first pixel in the byte is the one composed by the 4 msb bits AV_PIX_FMT_BGR4_BYTE, /// < {packed} RGB 1:2:1, 8bpp, (msb)1B 2G 1R(lsb) AV_PIX_FMT_RGB8, /// < {packed} RGB 3:3:2, 8bpp, (msb)2R 3G 3B(lsb) AV_PIX_FMT_RGB4, /// < {packed} RGB 1:2:1 bitstream, 4bpp, (msb)1R 2G 1B(lsb), a byte contains two pixels, the first pixel in the byte is the one composed by the 4 msb bits AV_PIX_FMT_RGB4_BYTE, /// < {packed} RGB 1:2:1, 8bpp, (msb)1R 2G 1B(lsb) AV_PIX_FMT_NV12, /// < planar YUV 4:2:0, 12bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (first byte U and the following byte V) AV_PIX_FMT_NV21, /// < as above, but U and V bytes are swapped AV_PIX_FMT_ARGB, /// < {packed} ARGB 8:8:8:8, 32bpp, ARGBARGB... AV_PIX_FMT_RGBA, /// < {packed} RGBA 8:8:8:8, 32bpp, RGBARGBA... AV_PIX_FMT_ABGR, /// < {packed} ABGR 8:8:8:8, 32bpp, ABGRABGR... AV_PIX_FMT_BGRA, /// < {packed} BGRA 8:8:8:8, 32bpp, BGRABGRA... AV_PIX_FMT_GRAY16BE, /// < Y , 16bpp, big-endian AV_PIX_FMT_GRAY16LE, /// < Y , 16bpp, little-endian AV_PIX_FMT_YUV440P, /// < planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples) AV_PIX_FMT_YUVJ440P, /// < planar YUV 4:4:0 full scale (JPEG), deprecated in favor of PIX_FMT_YUV440P and setting color_range AV_PIX_FMT_YUVA420P, /// < planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples) {$IFDEF FF_API_VDPAU} AV_PIX_FMT_VDPAU_H264, /// < H.264 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers AV_PIX_FMT_VDPAU_MPEG1, /// < MPEG-1 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers AV_PIX_FMT_VDPAU_MPEG2, /// < MPEG-2 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers AV_PIX_FMT_VDPAU_WMV3, /// < WMV3 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers AV_PIX_FMT_VDPAU_VC1, /// < VC-1 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers {$ENDIF} AV_PIX_FMT_RGB48BE, /// < {packed} RGB 16:16:16, 48bpp, 16R, 16G, 16B, the 2-byte value for each R/G/B component is stored as big-endian AV_PIX_FMT_RGB48LE, /// < {packed} RGB 16:16:16, 48bpp, 16R, 16G, 16B, the 2-byte value for each R/G/B component is stored as little-endian AV_PIX_FMT_RGB565BE, /// < {packed} RGB 5:6:5, 16bpp, (msb) 5R 6G 5B(lsb), big-endian AV_PIX_FMT_RGB565LE, /// < {packed} RGB 5:6:5, 16bpp, (msb) 5R 6G 5B(lsb), little-endian AV_PIX_FMT_RGB555BE, /// < {packed} RGB 5:5:5, 16bpp, (msb)1A 5R 5G 5B(lsb), big-endian, most significant bit to 0 AV_PIX_FMT_RGB555LE, /// < {packed} RGB 5:5:5, 16bpp, (msb)1A 5R 5G 5B(lsb), little-endian, most significant bit to 0 AV_PIX_FMT_BGR565BE, /// < {packed} BGR 5:6:5, 16bpp, (msb) 5B 6G 5R(lsb), big-endian AV_PIX_FMT_BGR565LE, /// < {packed} BGR 5:6:5, 16bpp, (msb) 5B 6G 5R(lsb), little-endian AV_PIX_FMT_BGR555BE, /// < {packed} BGR 5:5:5, 16bpp, (msb)1A 5B 5G 5R(lsb), big-endian, most significant bit to 1 AV_PIX_FMT_BGR555LE, /// < {packed} BGR 5:5:5, 16bpp, (msb)1A 5B 5G 5R(lsb), little-endian, most significant bit to 1 AV_PIX_FMT_VAAPI_MOCO, /// < HW acceleration through VA API at motion compensation entry-point, Picture.data[3] contains a vaapi_render_state struct which contains macroblocks as well as various fields extracted from headers AV_PIX_FMT_VAAPI_IDCT, /// < HW acceleration through VA API at IDCT entry-point, Picture.data[3] contains a vaapi_render_state struct which contains fields extracted from headers AV_PIX_FMT_VAAPI_VLD, /// < HW decoding through VA API, Picture.data[3] contains a vaapi_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers AV_PIX_FMT_YUV420P16LE, /// < planar YUV 4:2:0, 24bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian AV_PIX_FMT_YUV420P16BE, /// < planar YUV 4:2:0, 24bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian AV_PIX_FMT_YUV422P16LE, /// < planar YUV 4:2:2, 32bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian AV_PIX_FMT_YUV422P16BE, /// < planar YUV 4:2:2, 32bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian AV_PIX_FMT_YUV444P16LE, /// < planar YUV 4:4:4, 48bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian AV_PIX_FMT_YUV444P16BE, /// < planar YUV 4:4:4, 48bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian {$IFDEF FF_API_VDPAU} AV_PIX_FMT_VDPAU_MPEG4, /// < MPEG4 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers {$ENDIF} AV_PIX_FMT_DXVA2_VLD, /// < HW decoding through DXVA2, Picture.data[3] contains a LPDIRECT3DSURFACE9 pointer AV_PIX_FMT_RGB444LE, /// < {packed} RGB 4:4:4, 16bpp, (msb)4A 4R 4G 4B(lsb), little-endian, most significant bits to 0 AV_PIX_FMT_RGB444BE, /// < {packed} RGB 4:4:4, 16bpp, (msb)4A 4R 4G 4B(lsb), big-endian, most significant bits to 0 AV_PIX_FMT_BGR444LE, /// < {packed} BGR 4:4:4, 16bpp, (msb)4A 4B 4G 4R(lsb), little-endian, most significant bits to 1 AV_PIX_FMT_BGR444BE, /// < {packed} BGR 4:4:4, 16bpp, (msb)4A 4B 4G 4R(lsb), big-endian, most significant bits to 1 AV_PIX_FMT_GRAY8A, /// < 8bit gray, 8bit alpha AV_PIX_FMT_BGR48BE, /// < {packed} RGB 16:16:16, 48bpp, 16B, 16G, 16R, the 2-byte value for each R/G/B component is stored as big-endian AV_PIX_FMT_BGR48LE, /// < {packed} RGB 16:16:16, 48bpp, 16B, 16G, 16R, the 2-byte value for each R/G/B component is stored as little-endian (* * The following 12 formats have the disadvantage of needing 1 format for each bit depth. * Notice that each 9/10 bits sample is stored in 16 bits with extra padding. * If you want to support multiple bit depths, then using AV_PIX_FMT_YUV420P16* with the bpp stored separately is better. *) AV_PIX_FMT_YUV420P9BE, /// < planar YUV 4:2:0, 13.5bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian AV_PIX_FMT_YUV420P9LE, /// < planar YUV 4:2:0, 13.5bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian AV_PIX_FMT_YUV420P10BE, /// < planar YUV 4:2:0, 15bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian AV_PIX_FMT_YUV420P10LE, /// < planar YUV 4:2:0, 15bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian AV_PIX_FMT_YUV422P10BE, /// < planar YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian AV_PIX_FMT_YUV422P10LE, /// < planar YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian AV_PIX_FMT_YUV444P9BE, /// < planar YUV 4:4:4, 27bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian AV_PIX_FMT_YUV444P9LE, /// < planar YUV 4:4:4, 27bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian AV_PIX_FMT_YUV444P10BE, /// < planar YUV 4:4:4, 30bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian AV_PIX_FMT_YUV444P10LE, /// < planar YUV 4:4:4, 30bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian AV_PIX_FMT_YUV422P9BE, /// < planar YUV 4:2:2, 18bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian AV_PIX_FMT_YUV422P9LE, /// < planar YUV 4:2:2, 18bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian AV_PIX_FMT_VDA_VLD, /// < hardware decoding through VDA {$IFDEF AV_PIX_FMT_ABI_GIT_MASTER} AV_PIX_FMT_RGBA64BE, /// < {packed} RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is stored as big-endian AV_PIX_FMT_RGBA64LE, /// < {packed} RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is stored as little-endian AV_PIX_FMT_BGRA64BE, /// < {packed} RGBA 16:16:16:16, 64bpp, 16B, 16G, 16R, 16A, the 2-byte value for each R/G/B/A component is stored as big-endian AV_PIX_FMT_BGRA64LE, /// < {packed} RGBA 16:16:16:16, 64bpp, 16B, 16G, 16R, 16A, the 2-byte value for each R/G/B/A component is stored as little-endian {$ENDIF} AV_PIX_FMT_GBRP, /// < planar GBR 4:4:4 24bpp AV_PIX_FMT_GBRP9BE, /// < planar GBR 4:4:4 27bpp, big-endian AV_PIX_FMT_GBRP9LE, /// < planar GBR 4:4:4 27bpp, little-endian AV_PIX_FMT_GBRP10BE, /// < planar GBR 4:4:4 30bpp, big-endian AV_PIX_FMT_GBRP10LE, /// < planar GBR 4:4:4 30bpp, little-endian AV_PIX_FMT_GBRP16BE, /// < planar GBR 4:4:4 48bpp, big-endian AV_PIX_FMT_GBRP16LE, /// < planar GBR 4:4:4 48bpp, little-endian (* * duplicated pixel formats for compatibility with libav. * FFmpeg supports these formats since May 8 2012 and Jan 28 2012 (commits f9ca1ac7 and 143a5c55) * Libav added them Oct 12 2012 with incompatible values (commit 6d5600e85) *) AV_PIX_FMT_YUVA422P_LIBAV, /// < planar YUV 4:2:2 24bpp, (1 Cr & Cb sample per 2x1 Y & A samples) AV_PIX_FMT_YUVA444P_LIBAV, /// < planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples) AV_PIX_FMT_YUVA420P9BE, /// < planar YUV 4:2:0 22.5bpp, (1 Cr & Cb sample per 2x2 Y & A samples), big-endian AV_PIX_FMT_YUVA420P9LE, /// < planar YUV 4:2:0 22.5bpp, (1 Cr & Cb sample per 2x2 Y & A samples), little-endian AV_PIX_FMT_YUVA422P9BE, /// < planar YUV 4:2:2 27bpp, (1 Cr & Cb sample per 2x1 Y & A samples), big-endian AV_PIX_FMT_YUVA422P9LE, /// < planar YUV 4:2:2 27bpp, (1 Cr & Cb sample per 2x1 Y & A samples), little-endian AV_PIX_FMT_YUVA444P9BE, /// < planar YUV 4:4:4 36bpp, (1 Cr & Cb sample per 1x1 Y & A samples), big-endian AV_PIX_FMT_YUVA444P9LE, /// < planar YUV 4:4:4 36bpp, (1 Cr & Cb sample per 1x1 Y & A samples), little-endian AV_PIX_FMT_YUVA420P10BE, /// < planar YUV 4:2:0 25bpp, (1 Cr & Cb sample per 2x2 Y & A samples, big-endian) AV_PIX_FMT_YUVA420P10LE, /// < planar YUV 4:2:0 25bpp, (1 Cr & Cb sample per 2x2 Y & A samples, little-endian) AV_PIX_FMT_YUVA422P10BE, /// < planar YUV 4:2:2 30bpp, (1 Cr & Cb sample per 2x1 Y & A samples, big-endian) AV_PIX_FMT_YUVA422P10LE, /// < planar YUV 4:2:2 30bpp, (1 Cr & Cb sample per 2x1 Y & A samples, little-endian) AV_PIX_FMT_YUVA444P10BE, /// < planar YUV 4:4:4 40bpp, (1 Cr & Cb sample per 1x1 Y & A samples, big-endian) AV_PIX_FMT_YUVA444P10LE, /// < planar YUV 4:4:4 40bpp, (1 Cr & Cb sample per 1x1 Y & A samples, little-endian) AV_PIX_FMT_YUVA420P16BE, /// < planar YUV 4:2:0 40bpp, (1 Cr & Cb sample per 2x2 Y & A samples, big-endian) AV_PIX_FMT_YUVA420P16LE, /// < planar YUV 4:2:0 40bpp, (1 Cr & Cb sample per 2x2 Y & A samples, little-endian) AV_PIX_FMT_YUVA422P16BE, /// < planar YUV 4:2:2 48bpp, (1 Cr & Cb sample per 2x1 Y & A samples, big-endian) AV_PIX_FMT_YUVA422P16LE, /// < planar YUV 4:2:2 48bpp, (1 Cr & Cb sample per 2x1 Y & A samples, little-endian) AV_PIX_FMT_YUVA444P16BE, /// < planar YUV 4:4:4 64bpp, (1 Cr & Cb sample per 1x1 Y & A samples, big-endian) AV_PIX_FMT_YUVA444P16LE, /// < planar YUV 4:4:4 64bpp, (1 Cr & Cb sample per 1x1 Y & A samples, little-endian) AV_PIX_FMT_VDPAU, /// < HW acceleration through VDPAU, Picture.data[3] contains a VdpVideoSurface AV_PIX_FMT_XYZ12LE, /// < {packed} XYZ 4:4:4, 36 bpp, (msb) 12X, 12Y, 12Z (lsb), the 2-byte value for each X/Y/Z is stored as little-endian, the 4 lower bits are set to 0 AV_PIX_FMT_XYZ12BE, /// < {packed} XYZ 4:4:4, 36 bpp, (msb) 12X, 12Y, 12Z (lsb), the 2-byte value for each X/Y/Z is stored as big-endian, the 4 lower bits are set to 0 AV_PIX_FMT_NV16, /// < interleaved chroma YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples) AV_PIX_FMT_NV20LE, /// < interleaved chroma YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian AV_PIX_FMT_NV20BE, /// < interleaved chroma YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian {$IFNDEF AV_PIX_FMT_ABI_GIT_MASTER} AV_PIX_FMT_RGBA64BE = $123, /// < {packed} RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is stored as big-endian AV_PIX_FMT_RGBA64LE, /// < {packed} RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is stored as little-endian AV_PIX_FMT_BGRA64BE, /// < {packed} RGBA 16:16:16:16, 64bpp, 16B, 16G, 16R, 16A, the 2-byte value for each R/G/B/A component is stored as big-endian AV_PIX_FMT_BGRA64LE, /// < {packed} RGBA 16:16:16:16, 64bpp, 16B, 16G, 16R, 16A, the 2-byte value for each R/G/B/A component is stored as little-endian {$ENDIF} AV_PIX_FMT_0RGB = $123 + 4, /// < {packed} RGB 8:8:8, 32bpp, 0RGB0RGB... AV_PIX_FMT_RGB0, /// < {packed} RGB 8:8:8, 32bpp, RGB0RGB0... AV_PIX_FMT_0BGR, /// < {packed} BGR 8:8:8, 32bpp, 0BGR0BGR... AV_PIX_FMT_BGR0, /// < {packed} BGR 8:8:8, 32bpp, BGR0BGR0... AV_PIX_FMT_YUVA444P, /// < planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples) AV_PIX_FMT_YUVA422P, /// < planar YUV 4:2:2 24bpp, (1 Cr & Cb sample per 2x1 Y & A samples) AV_PIX_FMT_YUV420P12BE, /// < planar YUV 4:2:0,18bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian AV_PIX_FMT_YUV420P12LE, /// < planar YUV 4:2:0,18bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian AV_PIX_FMT_YUV420P14BE, /// < planar YUV 4:2:0,21bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian AV_PIX_FMT_YUV420P14LE, /// < planar YUV 4:2:0,21bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian AV_PIX_FMT_YUV422P12BE, /// < planar YUV 4:2:2,24bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian AV_PIX_FMT_YUV422P12LE, /// < planar YUV 4:2:2,24bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian AV_PIX_FMT_YUV422P14BE, /// < planar YUV 4:2:2,28bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian AV_PIX_FMT_YUV422P14LE, /// < planar YUV 4:2:2,28bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian AV_PIX_FMT_YUV444P12BE, /// < planar YUV 4:4:4,36bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian AV_PIX_FMT_YUV444P12LE, /// < planar YUV 4:4:4,36bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian AV_PIX_FMT_YUV444P14BE, /// < planar YUV 4:4:4,42bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian AV_PIX_FMT_YUV444P14LE, /// < planar YUV 4:4:4,42bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian AV_PIX_FMT_GBRP12BE, /// < planar GBR 4:4:4 36bpp, big-endian AV_PIX_FMT_GBRP12LE, /// < planar GBR 4:4:4 36bpp, little-endian AV_PIX_FMT_GBRP14BE, /// < planar GBR 4:4:4 42bpp, big-endian AV_PIX_FMT_GBRP14LE, /// < planar GBR 4:4:4 42bpp, little-endian AV_PIX_FMT_GBRAP, /// < planar GBRA 4:4:4:4 32bpp AV_PIX_FMT_GBRAP16BE, /// < planar GBRA 4:4:4:4 64bpp, big-endian AV_PIX_FMT_GBRAP16LE, /// < planar GBRA 4:4:4:4 64bpp, little-endian AV_PIX_FMT_YUVJ411P, /// < planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples) full scale (JPEG), deprecated in favor of PIX_FMT_YUV411P and setting color_range AV_PIX_FMT_BAYER_BGGR8, /// < bayer, BGBG..(odd line), GRGR..(even line), 8-bit samples */ AV_PIX_FMT_BAYER_RGGB8, /// < bayer, RGRG..(odd line), GBGB..(even line), 8-bit samples */ AV_PIX_FMT_BAYER_GBRG8, /// < bayer, GBGB..(odd line), RGRG..(even line), 8-bit samples */ AV_PIX_FMT_BAYER_GRBG8, /// < bayer, GRGR..(odd line), BGBG..(even line), 8-bit samples */ AV_PIX_FMT_BAYER_BGGR16LE, /// < bayer, BGBG..(odd line), GRGR..(even line), 16-bit samples, little-endian */ AV_PIX_FMT_BAYER_BGGR16BE, /// < bayer, BGBG..(odd line), GRGR..(even line), 16-bit samples, big-endian */ AV_PIX_FMT_BAYER_RGGB16LE, /// < bayer, RGRG..(odd line), GBGB..(even line), 16-bit samples, little-endian */ AV_PIX_FMT_BAYER_RGGB16BE, /// < bayer, RGRG..(odd line), GBGB..(even line), 16-bit samples, big-endian */ AV_PIX_FMT_BAYER_GBRG16LE, /// < bayer, GBGB..(odd line), RGRG..(even line), 16-bit samples, little-endian */ AV_PIX_FMT_BAYER_GBRG16BE, /// < bayer, GBGB..(odd line), RGRG..(even line), 16-bit samples, big-endian */ AV_PIX_FMT_BAYER_GRBG16LE, /// < bayer, GRGR..(odd line), BGBG..(even line), 16-bit samples, little-endian */ AV_PIX_FMT_BAYER_GRBG16BE, /// < bayer, GRGR..(odd line), BGBG..(even line), 16-bit samples, big-endian */ AV_PIX_FMT_NB /// < number of pixel formats, DO NOT USE THIS if you want to link with shared libav* because the number of formats might differ between versions {$IFDEF FF_API_PIX_FMT} , {$INCLUDE old_pix_fmts.inc} {$ENDIF} ); const {$IFDEF AV_HAVE_INCOMPATIBLE_LIBAV_ABI} AV_PIX_FMT_YUVA422P = AV_PIX_FMT_YUVA422P_LIBAV; AV_PIX_FMT_YUVA444P = AV_PIX_FMT_YUVA444P_LIBAV; {$ENDIF} AV_PIX_FMT_Y400A = AV_PIX_FMT_GRAY8A; AV_PIX_FMT_GBR24P = AV_PIX_FMT_GBRP; {$IFDEF AV_HAVE_BIGENDIAN} // #define AV_PIX_FMT_NE(be, le) AV_PIX_FMT_##be AV_PIX_FMT_RGB32 = AV_PIX_FMT_ARGB; AV_PIX_FMT_RGB32_1 = AV_PIX_FMT_RGBA; AV_PIX_FMT_BGR32 = AV_PIX_FMT_ABGR; AV_PIX_FMT_BGR32_1 = AV_PIX_FMT_BGRA; AV_PIX_FMT_0RGB32 = AV_PIX_FMT_0RGB; AV_PIX_FMT_0BGR32 = AV_PIX_FMT_0BGR; // AV_PIX_FMT_GRAY16 = AV_PIX_FMT_GRAY16BE; AV_PIX_FMT_RGB48 = AV_PIX_FMT_RGB48BE; AV_PIX_FMT_RGB565 = AV_PIX_FMT_RGB565BE; AV_PIX_FMT_RGB555 = AV_PIX_FMT_RGB555BE; AV_PIX_FMT_RGB444 = AV_PIX_FMT_RGB444BE; AV_PIX_FMT_BGR48 = AV_PIX_FMT_BGR48BE; AV_PIX_FMT_BGR565 = AV_PIX_FMT_BGR565BE; AV_PIX_FMT_BGR555 = AV_PIX_FMT_BGR555BE; AV_PIX_FMT_BGR444 = AV_PIX_FMT_BGR444BE; // AV_PIX_FMT_YUV420P9 = AV_PIX_FMT_YUV420P9BE; AV_PIX_FMT_YUV422P9 = AV_PIX_FMT_YUV422P9BE; AV_PIX_FMT_YUV444P9 = AV_PIX_FMT_YUV444P9BE; AV_PIX_FMT_YUV420P10 = AV_PIX_FMT_YUV420P10BE; AV_PIX_FMT_YUV422P10 = AV_PIX_FMT_YUV422P10BE; AV_PIX_FMT_YUV444P10 = AV_PIX_FMT_YUV444P10BE; AV_PIX_FMT_YUV420P12 = AV_PIX_FMT_YUV420P12BE; AV_PIX_FMT_YUV422P12 = AV_PIX_FMT_YUV422P12BE; AV_PIX_FMT_YUV444P12 = AV_PIX_FMT_YUV444P12BE; AV_PIX_FMT_YUV420P14 = AV_PIX_FMT_YUV420P14BE; AV_PIX_FMT_YUV422P14 = AV_PIX_FMT_YUV422P14BE; AV_PIX_FMT_YUV444P14 = AV_PIX_FMT_YUV444P14BE; AV_PIX_FMT_YUV420P16 = AV_PIX_FMT_YUV420P16BE; AV_PIX_FMT_YUV422P16 = AV_PIX_FMT_YUV422P16BE; AV_PIX_FMT_YUV444P16 = AV_PIX_FMT_YUV444P16BE; // AV_PIX_FMT_RGBA64 = AV_PIX_FMT_RGBA64BE; AV_PIX_FMT_BGRA64 = AV_PIX_FMT_BGRA64BE; AV_PIX_FMT_GBRP9 = AV_PIX_FMT_GBRP9BE; AV_PIX_FMT_GBRP10 = AV_PIX_FMT_GBRP10BE; AV_PIX_FMT_GBRP12 = AV_PIX_FMT_GBRP12BE; AV_PIX_FMT_GBRP14 = AV_PIX_FMT_GBRP14BE; AV_PIX_FMT_GBRP16 = AV_PIX_FMT_GBRP16BE; AV_PIX_FMT_GBRAP16 = AV_PIX_FMT_GBRAP16BE; // AV_PIX_FMT_BAYER_BGGR16 = AV_PIX_FMT_BAYER_BGGR16BE; AV_PIX_FMT_BAYER_RGGB16 = AV_PIX_FMT_BAYER_RGGB16BE; AV_PIX_FMT_BAYER_GBRG16 = AV_PIX_FMT_BAYER_GBRG16BE; AV_PIX_FMT_BAYER_GRBG16 = AV_PIX_FMT_BAYER_GRBG16BE; // // AV_PIX_FMT_YUVA420P9 = AV_PIX_FMT_YUVA420P9BE; AV_PIX_FMT_YUVA422P9 = AV_PIX_FMT_YUVA422P9BE; AV_PIX_FMT_YUVA444P9 = AV_PIX_FMT_YUVA444P9BE; AV_PIX_FMT_YUVA420P10 = AV_PIX_FMT_YUVA420P10BE; AV_PIX_FMT_YUVA422P10 = AV_PIX_FMT_YUVA422P10BE; AV_PIX_FMT_YUVA444P10 = AV_PIX_FMT_YUVA444P10BE; AV_PIX_FMT_YUVA420P16 = AV_PIX_FMT_YUVA420P16BE; AV_PIX_FMT_YUVA422P16 = AV_PIX_FMT_YUVA422P16BE; AV_PIX_FMT_YUVA444P16 = AV_PIX_FMT_YUVA444P16BE; // AV_PIX_FMT_XYZ12 = AV_PIX_FMT_XYZ12BE; AV_PIX_FMT_NV20 = AV_PIX_FMT_NV20BE; {$ELSE} // #define AV_PIX_FMT_NE(be, le) AV_PIX_FMT_##le AV_PIX_FMT_RGB32 = AV_PIX_FMT_BGRA; AV_PIX_FMT_RGB32_1 = AV_PIX_FMT_ABGR; AV_PIX_FMT_BGR32 = AV_PIX_FMT_RGBA; AV_PIX_FMT_BGR32_1 = AV_PIX_FMT_ARGB; AV_PIX_FMT_0RGB32 = AV_PIX_FMT_BGR0; AV_PIX_FMT_0BGR32 = AV_PIX_FMT_RGB0; // AV_PIX_FMT_GRAY16 = AV_PIX_FMT_GRAY16LE; AV_PIX_FMT_RGB48 = AV_PIX_FMT_RGB48LE; AV_PIX_FMT_RGB565 = AV_PIX_FMT_RGB565LE; AV_PIX_FMT_RGB555 = AV_PIX_FMT_RGB555LE; AV_PIX_FMT_RGB444 = AV_PIX_FMT_RGB444LE; AV_PIX_FMT_BGR48 = AV_PIX_FMT_BGR48LE; AV_PIX_FMT_BGR565 = AV_PIX_FMT_BGR565LE; AV_PIX_FMT_BGR555 = AV_PIX_FMT_BGR555LE; AV_PIX_FMT_BGR444 = AV_PIX_FMT_BGR444LE; // AV_PIX_FMT_YUV420P9 = AV_PIX_FMT_YUV420P9LE; AV_PIX_FMT_YUV422P9 = AV_PIX_FMT_YUV422P9LE; AV_PIX_FMT_YUV444P9 = AV_PIX_FMT_YUV444P9LE; AV_PIX_FMT_YUV420P10 = AV_PIX_FMT_YUV420P10LE; AV_PIX_FMT_YUV422P10 = AV_PIX_FMT_YUV422P10LE; AV_PIX_FMT_YUV444P10 = AV_PIX_FMT_YUV444P10LE; AV_PIX_FMT_YUV420P12 = AV_PIX_FMT_YUV420P12LE; AV_PIX_FMT_YUV422P12 = AV_PIX_FMT_YUV422P12LE; AV_PIX_FMT_YUV444P12 = AV_PIX_FMT_YUV444P12LE; AV_PIX_FMT_YUV420P14 = AV_PIX_FMT_YUV420P14LE; AV_PIX_FMT_YUV422P14 = AV_PIX_FMT_YUV422P14LE; AV_PIX_FMT_YUV444P14 = AV_PIX_FMT_YUV444P14LE; AV_PIX_FMT_YUV420P16 = AV_PIX_FMT_YUV420P16LE; AV_PIX_FMT_YUV422P16 = AV_PIX_FMT_YUV422P16LE; AV_PIX_FMT_YUV444P16 = AV_PIX_FMT_YUV444P16LE; // AV_PIX_FMT_RGBA64 = AV_PIX_FMT_RGBA64LE; AV_PIX_FMT_BGRA64 = AV_PIX_FMT_BGRA64LE; AV_PIX_FMT_GBRP9 = AV_PIX_FMT_GBRP9LE; AV_PIX_FMT_GBRP10 = AV_PIX_FMT_GBRP10LE; AV_PIX_FMT_GBRP12 = AV_PIX_FMT_GBRP12LE; AV_PIX_FMT_GBRP14 = AV_PIX_FMT_GBRP14LE; AV_PIX_FMT_GBRP16 = AV_PIX_FMT_GBRP16LE; AV_PIX_FMT_GBRAP16 = AV_PIX_FMT_GBRAP16LE; // AV_PIX_FMT_BAYER_BGGR16 = AV_PIX_FMT_BAYER_BGGR16LE; AV_PIX_FMT_BAYER_RGGB16 = AV_PIX_FMT_BAYER_RGGB16LE; AV_PIX_FMT_BAYER_GBRG16 = AV_PIX_FMT_BAYER_GBRG16LE; AV_PIX_FMT_BAYER_GRBG16 = AV_PIX_FMT_BAYER_GRBG16LE; // // AV_PIX_FMT_YUVA420P9 = AV_PIX_FMT_YUVA420P9LE; AV_PIX_FMT_YUVA422P9 = AV_PIX_FMT_YUVA422P9LE; AV_PIX_FMT_YUVA444P9 = AV_PIX_FMT_YUVA444P9LE; AV_PIX_FMT_YUVA420P10 = AV_PIX_FMT_YUVA420P10LE; AV_PIX_FMT_YUVA422P10 = AV_PIX_FMT_YUVA422P10LE; AV_PIX_FMT_YUVA444P10 = AV_PIX_FMT_YUVA444P10LE; AV_PIX_FMT_YUVA420P16 = AV_PIX_FMT_YUVA420P16LE; AV_PIX_FMT_YUVA422P16 = AV_PIX_FMT_YUVA422P16LE; AV_PIX_FMT_YUVA444P16 = AV_PIX_FMT_YUVA444P16LE; // AV_PIX_FMT_XYZ12 = AV_PIX_FMT_XYZ12LE; AV_PIX_FMT_NV20 = AV_PIX_FMT_NV20LE; {$ENDIF} {$IFDEF FF_API_PIX_FMT} Type TPixelFormat = TAVPixelFormat; Const // PIX_FMT_Y400A = AV_PIX_FMT_Y400A; PIX_FMT_GBR24P = AV_PIX_FMT_GBR24P; // // PIX_FMT_NE(be, le) AV_PIX_FMT_NE(be, le) // PIX_FMT_RGB32 = AV_PIX_FMT_RGB32; PIX_FMT_RGB32_1 = AV_PIX_FMT_RGB32_1; PIX_FMT_BGR32 = AV_PIX_FMT_BGR32; PIX_FMT_BGR32_1 = AV_PIX_FMT_BGR32_1; PIX_FMT_0RGB32 = AV_PIX_FMT_0RGB32; PIX_FMT_0BGR32 = AV_PIX_FMT_0BGR32; // PIX_FMT_GRAY16 = AV_PIX_FMT_GRAY16; PIX_FMT_RGB48 = AV_PIX_FMT_RGB48; PIX_FMT_RGB565 = AV_PIX_FMT_RGB565; PIX_FMT_RGB555 = AV_PIX_FMT_RGB555; PIX_FMT_RGB444 = AV_PIX_FMT_RGB444; PIX_FMT_BGR48 = AV_PIX_FMT_BGR48; PIX_FMT_BGR565 = AV_PIX_FMT_BGR565; PIX_FMT_BGR555 = AV_PIX_FMT_BGR555; PIX_FMT_BGR444 = AV_PIX_FMT_BGR444; // PIX_FMT_YUV420P9 = AV_PIX_FMT_YUV420P9; PIX_FMT_YUV422P9 = AV_PIX_FMT_YUV422P9; PIX_FMT_YUV444P9 = AV_PIX_FMT_YUV444P9; PIX_FMT_YUV420P10 = AV_PIX_FMT_YUV420P10; PIX_FMT_YUV422P10 = AV_PIX_FMT_YUV422P10; PIX_FMT_YUV444P10 = AV_PIX_FMT_YUV444P10; PIX_FMT_YUV420P12 = AV_PIX_FMT_YUV420P12; PIX_FMT_YUV422P12 = AV_PIX_FMT_YUV422P12; PIX_FMT_YUV444P12 = AV_PIX_FMT_YUV444P12; PIX_FMT_YUV420P14 = AV_PIX_FMT_YUV420P14; PIX_FMT_YUV422P14 = AV_PIX_FMT_YUV422P14; PIX_FMT_YUV444P14 = AV_PIX_FMT_YUV444P14; PIX_FMT_YUV420P16 = AV_PIX_FMT_YUV420P16; PIX_FMT_YUV422P16 = AV_PIX_FMT_YUV422P16; PIX_FMT_YUV444P16 = AV_PIX_FMT_YUV444P16; // PIX_FMT_RGBA64 = AV_PIX_FMT_RGBA64; PIX_FMT_BGRA64 = AV_PIX_FMT_BGRA64; PIX_FMT_GBRP9 = AV_PIX_FMT_GBRP9; PIX_FMT_GBRP10 = AV_PIX_FMT_GBRP10; PIX_FMT_GBRP12 = AV_PIX_FMT_GBRP12; PIX_FMT_GBRP14 = AV_PIX_FMT_GBRP14; PIX_FMT_GBRP16 = AV_PIX_FMT_GBRP16; {$ENDIF} implementation end.