Code/Resource
Windows Develop
Linux-Unix program
Internet-Socket-Network
Web Server
Browser Client
Ftp Server
Ftp Client
Browser Plugins
Proxy Server
Email Server
Email Client
WEB Mail
Firewall-Security
Telnet Server
Telnet Client
ICQ-IM-Chat
Search Engine
Sniffer Package capture
Remote Control
xml-soap-webservice
P2P
WEB(ASP,PHP,...)
TCP/IP Stack
SNMP
Grid Computing
SilverLight
DNS
Cluster Service
Network Security
Communication-Mobile
Game Program
Editor
Multimedia program
Graph program
Compiler program
Compress-Decompress algrithms
Crypt_Decrypt algrithms
Mathimatics-Numerical algorithms
MultiLanguage
Disk/Storage
Java Develop
assembly language
Applications
Other systems
Database system
Embeded-SCM Develop
FlashMX/Flex
source in ebook
Delphi VCL
OS Develop
MiddleWare
MPI
MacOS develop
LabView
ELanguage
Software/Tools
E-Books
Artical/Document
yuv2rgb.c
Package: 142_61_thumb_advanced.rar [view]
Upload User: dangjiwu
Upload Date: 2013-07-19
Package Size: 42019k
Code Size: 179k
Category:
Symbian
Development Platform:
Visual C++
- /* ***** BEGIN LICENSE BLOCK *****
- * Source last modified: $Id: yuv2rgb.c,v 1.2.42.2 2004/07/13 19:01:32 bobclark Exp $
- *
- * Portions Copyright (c) 1995-2004 RealNetworks, Inc. All Rights Reserved.
- *
- * The contents of this file, and the files included with this file,
- * are subject to the current version of the RealNetworks Public
- * Source License (the "RPSL") available at
- * http://www.helixcommunity.org/content/rpsl unless you have licensed
- * the file under the current version of the RealNetworks Community
- * Source License (the "RCSL") available at
- * http://www.helixcommunity.org/content/rcsl, in which case the RCSL
- * will apply. You may also obtain the license terms directly from
- * RealNetworks. You may not use this file except in compliance with
- * the RPSL or, if you have a valid RCSL with RealNetworks applicable
- * to this file, the RCSL. Please see the applicable RPSL or RCSL for
- * the rights, obligations and limitations governing use of the
- * contents of the file.
- *
- * Alternatively, the contents of this file may be used under the
- * terms of the GNU General Public License Version 2 or later (the
- * "GPL") in which case the provisions of the GPL are applicable
- * instead of those above. If you wish to allow use of your version of
- * this file only under the terms of the GPL, and not to allow others
- * to use your version of this file under the terms of either the RPSL
- * or RCSL, indicate your decision by deleting the provisions above
- * and replace them with the notice and other provisions required by
- * the GPL. If you do not delete the provisions above, a recipient may
- * use your version of this file under the terms of any one of the
- * RPSL, the RCSL or the GPL.
- *
- * This file is part of the Helix DNA Technology. RealNetworks is the
- * developer of the Original Code and owns the copyrights in the
- * portions it created.
- *
- * This file, and the files included with this file, is distributed
- * and made available on an 'AS IS' basis, WITHOUT WARRANTY OF ANY
- * KIND, EITHER EXPRESS OR IMPLIED, AND REALNETWORKS HEREBY DISCLAIMS
- * ALL SUCH WARRANTIES, INCLUDING WITHOUT LIMITATION, ANY WARRANTIES
- * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, QUIET
- * ENJOYMENT OR NON-INFRINGEMENT.
- *
- * Technology Compatibility Kit Test Suite(s) Location:
- * http://www.helixcommunity.org/content/tck
- *
- * Contributor(s):
- *
- * ***** END LICENSE BLOCK ***** */
- /*** #includes: ********************************************/
- #include "env.h"
- #include "rgb.h" /* basic RGB-data definitions & macros */
- #include "yuv.h" /* YUV-to-RGB conversion tables & macros */
- #include "clip.h" /* macros for clipping & dithering */
- #include "scale.h" /* scale algorithms */
- #include "colorlib.h" /* ensure that prototypes get extern C'ed */
- #ifdef _MACINTOSH
- #pragma require_prototypes off
- #endif
- static int YUVtoRGB2 (
- int dest_format,
- unsigned char *dest_ptr, int dest_width, int dest_height,
- int dest_pitch, int dest_x, int dest_y, int dest_dx, int dest_dy,
- unsigned char *pY, unsigned char *pU, unsigned char *pV,
- int src_width, int src_height, int yPitch, int uPitch, int vPitch,
- int src_x, int src_y, int src_dx, int src_dy);
- /*** Additional pixel-level macros: ************************/
- /*
- * Add dither, clip and assign values to RGB pixels:
- */
- #define RGBX_CLIP_X(f,rnd,x,v) (CLIP(rnd,BITS(f,x),v) << START(f,x))
- #define RGBX_CLIP_SET(f,rnd,a,r,g,b)
- a##_rgb = RGBX_CLIP_X(f,rnd,R,r) | RGBX_CLIP_X(f,rnd,G,g) | RGBX_CLIP_X(f,rnd,B,b)
- #define RGB32_CLIP_SET(rnd,a,r,g,b) RGBX_CLIP_SET(RGB32,rnd,a,r,g,b)
- #define BGR32_CLIP_SET(rnd,a,r,g,b) RGBX_CLIP_SET(BGR32,rnd,a,r,g,b)
- #define RGB24_CLIP_SET(rnd,a,r,g,b)
- a##_b = CLIP(rnd,8,b), a##_g = CLIP(rnd,8,g), a##_r = CLIP(rnd,8,r)
- #define RGB565_CLIP_SET(rnd,a,r,g,b) RGBX_CLIP_SET(RGB565,rnd,a,r,g,b)
- #define RGB555_CLIP_SET(rnd,a,r,g,b) RGBX_CLIP_SET(RGB555,rnd,a,r,g,b)
- #define RGB444_CLIP_SET(rnd,a,r,g,b) RGBX_CLIP_SET(RGB444,rnd,a,r,g,b)
- #define RGB8_CLIP_SET(rnd,a,r,g,b)
- a##_idx = pmap[(CLIP(rnd,4,r)<<8) | (CLIP(rnd,4,g)<<4) | CLIP(rnd,4,b)]
- /*
- * Generic RGB clipping & assignment macro:
- */
- #define CLIP_SET(f,rnd,a,r,g,b) f##_CLIP_SET(rnd,a,r,g,b)
- /*
- * YUV 2x1-block load and convert macros:
- */
- #define YUV_LOAD_CONVERT_2x1_FAST(df,a1,a2,sy1,sy2,su,sv)
- {
- register int y1, y2, rv, guv, bu;
- bu = butab[su[0]];
- guv = gutab[su[0]] + gvtab[sv[0]];
- rv = rvtab[sv[0]];
- y1 = ytab[sy1[0]];
- y2 = ytab[sy2[0]];
- CLIP_SET(df,ROUND,a1,y1+rv,y1+guv,y1+bu);
- CLIP_SET(df,ROUND,a2,y2+rv,y2+guv,y2+bu);
- }
- /* with Hue rotation: */
- #define YUV_LOAD_CONVERT_2x1_FULL(df,a1,a2,sy1,sy2,su,sv)
- {
- register int y1, y2, ruv, guv, buv;
- buv = butab[su[0]] + bvtab[sv[0]];
- guv = gutab[su[0]] + gvtab[sv[0]];
- ruv = rutab[su[0]] + rvtab[sv[0]];
- y1 = ytab[sy1[0]];
- y2 = ytab[sy2[0]];
- CLIP_SET(df,ROUND,a1,y1+ruv,y1+guv,y1+buv);
- CLIP_SET(df,ROUND,a2,y2+ruv,y2+guv,y2+buv);
- }
- /*
- * Generic YUV 2x1-block load & convert macro:
- */
- #define YUV_LOAD_CONVERT_2x1(cc,df,a1,a2,sy1,sy2,su,sv)
- YUV_LOAD_CONVERT_2x1_##cc(df,a1,a2,sy1,sy2,su,sv)
- /*
- * YUV 2x2-block load and convert macros:
- * (without dithering)
- */
- #define YUV_LOAD_CONVERT_2x2_FAST(df,a11,a12,a21,a22,sy1,sy2,su,sv)
- {
- register int y11, y12, y21, y22, rv, guv, bu;
- bu = butab[su[0]];
- guv = gutab[su[0]] + gvtab[sv[0]];
- rv = rvtab[sv[0]];
- y11 = ytab[sy1[0]];
- y21 = ytab[sy2[0]];
- y12 = ytab[sy1[1]];
- y22 = ytab[sy2[1]];
- CLIP_SET(df,ROUND,a11,y11+rv,y11+guv,y11+bu);
- CLIP_SET(df,ROUND,a21,y21+rv,y21+guv,y21+bu);
- CLIP_SET(df,ROUND,a12,y12+rv,y12+guv,y12+bu);
- CLIP_SET(df,ROUND,a22,y22+rv,y22+guv,y22+bu);
- }
- /* with Hue rotation: */
- #define YUV_LOAD_CONVERT_2x2_FULL(df,a11,a12,a21,a22,sy1,sy2,su,sv)
- {
- register int y11, y12, y21, y22, ruv, guv, buv;
- buv = butab[su[0]] + bvtab[sv[0]];
- guv = gutab[su[0]] + gvtab[sv[0]];
- ruv = rutab[su[0]] + rvtab[sv[0]];
- y11 = ytab[sy1[0]];
- y21 = ytab[sy2[0]];
- y12 = ytab[sy1[1]];
- y22 = ytab[sy2[1]];
- CLIP_SET(df,ROUND,a11,y11+ruv,y11+guv,y11+buv);
- CLIP_SET(df,ROUND,a21,y21+ruv,y21+guv,y21+buv);
- CLIP_SET(df,ROUND,a12,y12+ruv,y12+guv,y12+buv);
- CLIP_SET(df,ROUND,a22,y22+ruv,y22+guv,y22+buv);
- }
- /*
- * Generic YUV 2x1-block load & convert macro:
- */
- #define YUV_LOAD_CONVERT_2x2(cc,df,a11,a12,a21,a22,sy1,sy2,su,sv)
- YUV_LOAD_CONVERT_2x2_##cc(df,a11,a12,a21,a22,sy1,sy2,su,sv)
- /*
- * YUV 2x2-block load and convert macros:
- * (adds symmetric 2x2 dither noise)
- */
- #define YUV_LOAD_CONVERT_DITHER_2x2_FAST(df,a11,a12,a21,a22,sy1,sy2,su,sv)
- {
- register int y11, y12, y21, y22, rv, guv, bu;
- bu = butab[su[0]];
- guv = gutab[su[0]] + gvtab[sv[0]];
- rv = rvtab[sv[0]];
- y11 = ytab[sy1[0]];
- y21 = ytab[sy2[0]];
- y12 = ytab[sy1[1]];
- y22 = ytab[sy2[1]];
- CLIP_SET(df,HIGH,a11,y11+rv,y11+guv,y11+bu);
- CLIP_SET(df,LOW ,a21,y21+rv,y21+guv,y21+bu);
- CLIP_SET(df,LOW ,a12,y12+rv,y12+guv,y12+bu);
- CLIP_SET(df,HIGH,a22,y22+rv,y22+guv,y22+bu);
- }
- /* with Hue rotation: */
- #define YUV_LOAD_CONVERT_DITHER_2x2_FULL(df,a11,a12,a21,a22,sy1,sy2,su,sv)
- {
- register int y11, y12, y21, y22, ruv, guv, buv;
- buv = butab[su[0]] + bvtab[sv[0]];
- guv = gutab[su[0]] + gvtab[sv[0]];
- ruv = rutab[su[0]] + rvtab[sv[0]];
- y11 = ytab[sy1[0]];
- y21 = ytab[sy2[0]];
- y12 = ytab[sy1[1]];
- y22 = ytab[sy2[1]];
- CLIP_SET(df,HIGH,a11,y11+ruv,y11+guv,y11+buv);
- CLIP_SET(df,LOW ,a21,y21+ruv,y21+guv,y21+buv);
- CLIP_SET(df,LOW ,a12,y12+ruv,y12+guv,y12+buv);
- CLIP_SET(df,HIGH,a22,y22+ruv,y22+guv,y22+buv);
- }
- /*
- * Generic YUV 2x1-block load & convert macro:
- */
- #define YUV_LOAD_CONVERT_DITHER_2x2(cc,df,a11,a12,a21,a22,sy1,sy2,su,sv)
- YUV_LOAD_CONVERT_DITHER_2x2_##cc(df,a11,a12,a21,a22,sy1,sy2,su,sv)
- /*
- * Generic YUV load-convert-store macros:
- */
- #define YUV_LOAD_CONVERT_STORE_2x1(cc,df,d1,d2,sy1,sy2,su,sv)
- {
- PIXEL(df,a1); PIXEL(df,a2);
- YUV_LOAD_CONVERT_2x1(cc,df,a1,a2,sy1,sy2,su,sv);
- sy1++; sy2++; su++; sv++;
- STORE(df,d1,a1);
- d1+=BPP(df);
- STORE(df,d2,a2);
- d2+=BPP(df);
- }
- #define YUV_LOAD_CONVERT_STORE_2x2(cc,df,d1,d2,sy1,sy2,su,sv)
- {
- PIXEL(df,a11); PIXEL(df,a12);
- PIXEL(df,a21); PIXEL(df,a22);
- YUV_LOAD_CONVERT_2x2(cc,df,a11,a12,a21,a22,sy1,sy2,su,sv);
- sy1+=2; sy2+=2; su++; sv++;
- STORE(df,d1,a11);
- STORE(df,d1+BPP(df),a12);
- d1+=2*BPP(df);
- STORE(df,d2,a21);
- STORE(df,d2+BPP(df),a22);
- d2+=2*BPP(df);
- }
- #define YUV_LOAD_CONVERT_DITHER_STORE_2x2(cc,df,d1,d2,sy1,sy2,su,sv)
- {
- PIXEL(df,a11); PIXEL(df,a12);
- PIXEL(df,a21); PIXEL(df,a22);
- YUV_LOAD_CONVERT_DITHER_2x2(cc,df,a11,a12,a21,a22,sy1,sy2,su,sv);
- sy1+=2; sy2+=2; su++; sv++;
- STORE(df,d1,a11);
- STORE(df,d1+BPP(df),a12);
- d1+=2*BPP(df);
- STORE(df,d2,a21);
- STORE(df,d2+BPP(df),a22);
- d2+=2*BPP(df);
- }
- /*
- * Generic YUV load-convert-average-store macros:
- * [d1],[d2] = convert([s1],[s2]);
- * [d01] = ([d0]+[d1])/2;
- * [d12] = ([d1]+[d2])/2;
- */
- #define YUV_LOAD_CONVERT_AVERAGE_STORE_2x1(cc,df,d0,d01,d1,d12,d2,sy1,sy2,su,sv)
- {
- PIXEL(df,a1); PIXEL(df,a2);
- YUV_LOAD_CONVERT_2x1(cc,df,a1,a2,sy1,sy2,su,sv);
- sy1++; sy2++; su++; sv++;
- STORE(df,d1,a1);
- d1+=BPP(df);
- STORE(df,d2,a2);
- d2+=BPP(df);
- AVERAGE(df,a2,a1,a2);
- LOAD_AVERAGE(df,a1,a1,d0);
- d0+=BPP(df);
- STORE(df,d01,a1);
- d01+=BPP(df);
- STORE(df,d12,a2);
- d12+=BPP(df);
- }
- #define YUV_LOAD_CONVERT_AVERAGE_STORE_2x2(cc,df,d0,d01,d1,d12,d2,sy1,sy2,su,sv)
- {
- PIXEL(df,a11); PIXEL(df,a12);
- PIXEL(df,a21); PIXEL(df,a22);
- YUV_LOAD_CONVERT_2x2(cc,df,a11,a12,a21,a22,sy1,sy2,su,sv);
- sy1+=2; sy2+=2; su++; sv++;
- STORE(df,d1,a11);
- STORE(df,d1+BPP(df),a12);
- d1+=2*BPP(df);
- STORE(df,d2,a21);
- STORE(df,d2+BPP(df),a22);
- d2+=2*BPP(df);
- AVERAGE(df,a21,a11,a21);
- AVERAGE(df,a22,a12,a22);
- LOAD_AVERAGE(df,a11,a11,d0);
- LOAD_AVERAGE(df,a12,a12,d0+BPP(df));
- d0+=2*BPP(df);
- STORE(df,d01,a11);
- STORE(df,d01+BPP(df),a12);
- d01+=2*BPP(df);
- STORE(df,d12,a21);
- STORE(df,d12+BPP(df),a22);
- d12+=2*BPP(df);
- }
- #define YUV_LOAD_CONVERT_AVERAGE_DITHER_STORE_2x2(cc,df,d0,d01,d1,d12,d2,sy1,sy2,su,sv)
- {
- PIXEL(df,a11); PIXEL(df,a12);
- PIXEL(df,a21); PIXEL(df,a22);
- YUV_LOAD_CONVERT_DITHER_2x2(cc,df,a11,a12,a21,a22,sy1,sy2,su,sv);
- sy1+=2; sy2+=2; su++; sv++;
- STORE(df,d1,a11);
- STORE(df,d1+BPP(df),a12);
- d1+=2*BPP(df);
- STORE(df,d2,a21);
- STORE(df,d2+BPP(df),a22);
- d2+=2*BPP(df);
- AVERAGE(df,a21,a11,a21);
- AVERAGE(df,a22,a12,a22);
- LOAD_AVERAGE(df,a11,a11,d0);
- LOAD_AVERAGE(df,a12,a12,d0+BPP(df));
- d0+=2*BPP(df);
- STORE(df,d01,a11);
- STORE(df,d01+BPP(df),a12);
- d01+=2*BPP(df);
- STORE(df,d12,a21);
- STORE(df,d12+BPP(df),a22);
- d12+=2*BPP(df);
- }
- /*** Generic YUVtoRGB double-row converters: ***************/
- /*
- * Generic YUVtoRGB double-row shrinking converter:
- * uses read-ahead optimization to process full 2x2 blocks
- * whenever possible.
- */
- #define DBLROW_SHRINK(cc,df,d1,d2,dest_x,dest_dx,sy1,sy2,su,sv,src_x,src_dx)
- {
- /* initialize local variables: */
- register int count = dest_dx;
- register int limit = src_dx >> 1; /* -1 */
- register int step = dest_dx;
- /* check row length: */
- if (count) {
- /* check if we have an odd first block: */
- if (src_x & 1)
- goto start_odd;
- /* process even pixels: */
- do {
- PIXEL(df,a11); PIXEL(df,a12);
- PIXEL(df,a21); PIXEL(df,a22);
- /* make one Bresenham step ahead: */
- if ((limit -= step) < 0) {
- limit += src_dx;
- /* can we process 2x2 pixels? */
- if (!--count)
- goto last_pixel;
- /* process full 2x2 block: */
- YUV_LOAD_CONVERT_2x2(cc,df,a11,a12,a21,a22,sy1,sy2,su,sv);
- sy1+=2; sy2+=2; su++; sv++;
- STORE(df,d1,a11);
- STORE(df,d1+BPP(df),a12);
- d1+=2*BPP(df);
- STORE(df,d2,a21);
- STORE(df,d2+BPP(df),a22);
- d2+=2*BPP(df);
- } else {
- /* proc. first 2x1 block & skip next: */
- YUV_LOAD_CONVERT_2x1(cc,df,a11,a21,sy1,sy2,su,sv);
- sy1+=2; sy2+=2; su++; sv++;
- STORE(df,d1,a11);
- d1+=BPP(df);
- STORE(df,d2,a21);
- d2+=BPP(df);
- }
- /* inverted Bresenham stepping: */
- while ((limit -= step) >= 0) {
- /* skip next even source pixel: */
- sy1++; sy2++;
- if ((limit -= step) < 0)
- goto cont_odd;
- /* skip odd source pixel: */
- sy1++; sy2++;
- su++; sv++; /* next chroma: */
- }
- cont_even: /* continue loop with next even pixel: */
- limit += src_dx;
- } while (--count);
- goto done;
- last_pixel: /* use this branch to process last pixel:*/
- count++;
- start_odd: /* process odd pixels: */
- do {
- PIXEL(df,a11); PIXEL(df,a21);
- YUV_LOAD_CONVERT_2x1(cc,df,a11,a21,sy1,sy2,su,sv);
- STORE(df,d1,a11);
- d1+=BPP(df);
- STORE(df,d2,a21);
- d2+=BPP(df);
- /* inverted Bresenham stepping: */
- do {
- /* skip odd source pixel: */
- sy1++; sy2++;
- su++; sv++; /* next chroma: */
- if ((limit -= step) < 0)
- goto cont_even;
- /* skip even source pixel: */
- sy1++; sy2++;
- } while ((limit -= step) >= 0);
- cont_odd: limit += src_dx;
- } while (--count);
- done: ;
- }
- }
- /*
- * Generic YUVtoRGB double-row copy converter:
- */
- #define DBLROW_COPY(cc,df,d1,d2,dest_x,dest_dx,sy1,sy2,su,sv,src_x,src_dx)
- {
- register int count = dest_dx;
- /* convert first 2x1 block: */
- if ((src_x & 1) && count) {
- YUV_LOAD_CONVERT_STORE_2x1(cc,df,d1,d2,sy1,sy2,su,sv);
- count--;
- }
- /* convert all integral 2x2 blocks: */
- while (count >= 2) {
- YUV_LOAD_CONVERT_DITHER_STORE_2x2(cc,df,d1,d2,sy1,sy2,su,sv);
- count -= 2;
- }
- /* convert last 2x1 block: */
- if (count) {
- YUV_LOAD_CONVERT_STORE_2x1(cc,df,d1,d2,sy1,sy2,su,sv);
- }
- }
- /*
- * Generic YUVtoRGB double row stretching converter:
- */
- #define DBLROW_STRETCH(cc,df,d1,d2,dest_x,dest_dx,sy1,sy2,su,sv,src_x,src_dx)
- {
- /* initialize local variables: */
- register int count = dest_dx;
- register int limit = dest_dx >> 1; /* !!! */
- register int step = src_dx;
- /* # of pixels to be processed separately: */
- int remainder = dest_dx - limit;
- if ((src_x + src_dx) & 1) remainder += dest_dx;
- remainder /= step;
- /* check row length: */
- if (count) {
- PIXEL(df,a11); PIXEL(df,a12);
- PIXEL(df,a21); PIXEL(df,a22);
- /* update count: */
- if ((count -= remainder) <= 0)
- goto convert_last;
- /* check if we have an odd first block: */
- if (src_x & 1) {
- /* convert first 2x1 block: */
- YUV_LOAD_CONVERT_2x1(cc,df,a12,a22,sy1,sy2,su,sv);
- sy1++; sy2++; su++; sv++;
- goto rep_odd;
- }
- /* the main loop: */
- while (1) {
- /* load & convert next 2x2 pixels: */
- YUV_LOAD_CONVERT_DITHER_2x2(cc,df,a11,a12,a21,a22,sy1,sy2,su,sv);
- sy1+=2; sy2+=2; su++; sv++;
- /* replicate even pixels: */
- do {
- STORE(df,d1,a11);
- d1+=BPP(df);
- STORE(df,d2,a21);
- d2+=BPP(df);
- if (!(--count))
- goto rep_last;
- } while ((limit -= step) >= 0);
- limit += dest_dx;
- rep_odd: /* replicate odd pixels: */
- do {
- STORE(df,d1,a12);
- d1+=BPP(df);
- STORE(df,d2,a22);
- d2+=BPP(df);
- if (!(--count))
- goto check_last;
- } while ((limit -= step) >= 0);
- limit += dest_dx;
- }
- check_last: /* check if we need to convert one more pixel:*/
- if ((src_x + src_dx) & 1) {
- convert_last: /* last 2x1 block: */
- YUV_LOAD_CONVERT_2x1(cc,df,a12,a22,sy1,sy2,su,sv);
- }
- /* restore the number of remaining pixels: */
- rep_last: count += remainder;
- while (count --) {
- /* replicate them: */
- STORE(df,d1,a12);
- d1+=BPP(df);
- STORE(df,d2,a22);
- d2+=BPP(df);
- }
- }
- }
- /*
- * Generic row 2x-stretching converter:
- */
- #define DBLROW_STRETCH2X(cc,df,d1,d2,dest_x,dest_dx,sy1,sy2,su,sv,src_x,src_dx)
- {
- /* initialize local variables: */
- register int count = src_dx;
- /* check row length: */
- if (count) {
- PIXEL(df,a11); PIXEL(df,a12);
- PIXEL(df,a21); PIXEL(df,a22);
- /* check if we have an odd or single pixel: */
- if ((src_x & 1) || count < 2) {
- /* process first 2x1 block: */
- YUV_LOAD_CONVERT_2x1(cc,df,a12,a22,sy1,sy2,su,sv);
- sy1++; sy2++; su++; sv++;
- STORE(df,d1,a12);
- STORE(df,d2,a22);
- d1 += BPP(df);
- d2 += BPP(df);
- count -= 1;
- } else {
- /* process first 2x2 block: */
- YUV_LOAD_CONVERT_DITHER_2x2(cc,df,a11,a12,a21,a22,sy1,sy2,su,sv);
- sy1+=2; sy2+=2; su++; sv++;
- STORE(df,d1,a11);
- STORE(df,d2,a21);
- /* calculate & store half-pixels: */
- AVERAGE(df,a11,a11,a12);
- AVERAGE(df,a21,a21,a22);
- STORE(df,d1+BPP(df),a11);
- STORE(df,d1+2*BPP(df),a12);
- STORE(df,d2+BPP(df),a21);
- STORE(df,d2+2*BPP(df),a22);
- d1 += 3*BPP(df);
- d2 += 3*BPP(df);
- count -= 2;
- }
- /* process all internal 4x2 blocks: */
- while (count >= 4) {
- /* process second 2x2 block: */
- PIXEL(df,a13); PIXEL(df,a23);
- YUV_LOAD_CONVERT_DITHER_2x2(cc,df,a11,a13,a21,a23,sy1,sy2,su,sv);
- sy1+=2; sy2+=2; su++; sv++;
- /* calculate & store first half-pixels: */
- AVERAGE(df,a12,a12,a11);
- AVERAGE(df,a22,a22,a21);
- STORE(df,d1+0*BPP(df),a12);
- STORE(df,d1+1*BPP(df),a11);
- STORE(df,d2+0*BPP(df),a22);
- STORE(df,d2+1*BPP(df),a21);
- /* calculate & store second half-pixels: */
- AVERAGE(df,a11,a11,a13);
- AVERAGE(df,a21,a21,a23);
- STORE(df,d1+2*BPP(df),a11);
- STORE(df,d1+3*BPP(df),a13);
- STORE(df,d2+2*BPP(df),a21);
- STORE(df,d2+3*BPP(df),a23);
- /* process third 2x2 block: */
- YUV_LOAD_CONVERT_DITHER_2x2(cc,df,a11,a12,a21,a22,sy1,sy2,su,sv);
- sy1+=2; sy2+=2; su++; sv++;
- /* calculate & store third half-pixels: */
- AVERAGE(df,a13,a13,a11);
- AVERAGE(df,a23,a23,a21);
- STORE(df,d1+4*BPP(df),a13);
- STORE(df,d1+5*BPP(df),a11);
- STORE(df,d2+4*BPP(df),a23);
- STORE(df,d2+5*BPP(df),a21);
- /* calculate & store fourth half-pixels: */
- AVERAGE(df,a11,a11,a12);
- AVERAGE(df,a21,a21,a22);
- STORE(df,d1+6*BPP(df),a11);
- STORE(df,d1+7*BPP(df),a12);
- STORE(df,d2+6*BPP(df),a21);
- STORE(df,d2+7*BPP(df),a22);
- d1 += 8*BPP(df);
- d2 += 8*BPP(df);
- count -= 4;
- }
- /* check if we have one more 2x2 block: */
- if (count >= 2) {
- /* process last 2x2 block: */
- PIXEL(df,a13); PIXEL(df,a23);
- YUV_LOAD_CONVERT_DITHER_2x2(cc,df,a11,a13,a21,a23,sy1,sy2,su,sv);
- sy1+=2; sy2+=2; su++; sv++;
- /* calculate & store first half-pixels: */
- AVERAGE(df,a12,a12,a11);
- AVERAGE(df,a22,a22,a21);
- STORE(df,d1+0*BPP(df),a12);
- STORE(df,d1+1*BPP(df),a11);
- STORE(df,d2+0*BPP(df),a22);
- STORE(df,d2+1*BPP(df),a21);
- /* calculate & store second half-pixels: */
- AVERAGE(df,a11,a11,a13);
- AVERAGE(df,a21,a21,a23);
- STORE(df,d1+2*BPP(df),a11);
- STORE(df,d1+3*BPP(df),a13);
- STORE(df,d2+2*BPP(df),a21);
- STORE(df,d2+3*BPP(df),a23);
- /* move last converted pixels to a12/22: */
- COPY(df,a12,a13);
- COPY(df,a22,a23);
- d1 += 4*BPP(df);
- d2 += 4*BPP(df);
- count -= 2;
- }
- /* check if we have one more 2x1 block: */
- if (count >= 1) {
- /* process last 2x1 block: */
- YUV_LOAD_CONVERT_2x1(cc,df,a11,a21,sy1,sy2,su,sv);
- /* calculate & store last half-pixels: */
- AVERAGE(df,a12,a12,a11);
- AVERAGE(df,a22,a22,a21);
- STORE(df,d1+0*BPP(df),a12);
- STORE(df,d1+1*BPP(df),a11);
- STORE(df,d1+2*BPP(df),a11);
- STORE(df,d2+0*BPP(df),a22);
- STORE(df,d2+1*BPP(df),a21);
- STORE(df,d2+2*BPP(df),a21);
- } else {
- /* just replicate last pixels: */
- STORE(df,d1,a12);
- STORE(df,d2,a22);
- }
- }
- }
- /*
- * Generic row 2x+ stretching converter:
- * "???" comments mean that under normal conditions these jumps
- * should never be executed; nevertheless, I left these checks
- * in place to guarantee the correct termination of the algorithm
- * in all possible scenarios.
- */
- #define DBLROW_STRETCH2XPLUS(cc,df,d1,d2,dest_x,dest_dx,sy1,sy2,su,sv,src_x,src_dx)
- {
- /* initialize local variables: */
- register int count = dest_dx;
- register int limit = dest_dx >> 1; /* !!! */
- register int step = src_dx << 1; /* !!! */
- /* # of half-pixels to be processed separately: */
- int remainder = 3*dest_dx - limit;
- if ((src_x + src_dx) & 1) remainder += 2*dest_dx;
- remainder /= step;
- /* check row length: */
- if (count) {
- PIXEL(df,a11); PIXEL(df,a12);
- PIXEL(df,a21); PIXEL(df,a22);
- PIXEL(df,a13); PIXEL(df,a23);
- /* check if an odd or single 2x1 block: */
- if ((src_x & 1) || src_dx < 2) {
- /* convert first 2x1 block: */
- YUV_LOAD_CONVERT_2x1(cc,df,a12,a22,sy1,sy2,su,sv);
- sy1++; sy2++; su++; sv++;
- /* update count: */
- if ((count -= remainder) <= 0)
- goto rep_last;
- goto rep_odd;
- } else {
- /* convert first 2x2 block: */
- YUV_LOAD_CONVERT_DITHER_2x2(cc,df,a11,a12,a21,a22,sy1,sy2,su,sv);
- sy1+=2; sy2+=2; su++; sv++;
- /* update count: */
- if ((count -= remainder) <= 0)
- goto rep_last_2; /* ??? */
- goto rep_even;
- }
- /* the main loop: */
- while (1) {
- /* load & convert second 2x2 block: */
- YUV_LOAD_CONVERT_DITHER_2x2(cc,df,a11,a13,a21,a23,sy1,sy2,su,sv);
- sy1+=2; sy2+=2; su++; sv++;
- /* calc. & replicate first half-pixels: */
- AVERAGE(df,a12,a12,a11);
- AVERAGE(df,a22,a22,a21);
- do {
- STORE(df,d1,a12);
- d1+=BPP(df);
- STORE(df,d2,a22);
- d2+=BPP(df);
- if (!(--count))
- goto rep_last; /* ??? */
- } while ((limit -= step) >= 0);
- limit += dest_dx;
- /* replicate second even integral pixels: */
- do {
- STORE(df,d1,a11);
- d1+=BPP(df);
- STORE(df,d2,a21);
- d2+=BPP(df);
- if (!(--count))
- goto rep_last_2; /* ??? */
- } while ((limit -= step) >= 0);
- limit += dest_dx;
- /* calc. & replicate second half-pixels: */
- AVERAGE(df,a11,a11,a13);
- AVERAGE(df,a21,a21,a23);
- do {
- STORE(df,d1,a11);
- d1+=BPP(df);
- STORE(df,d2,a21);
- d2+=BPP(df);
- if (!(--count))
- goto rep_last_3; /* !!! */
- } while ((limit -= step) >= 0);
- limit += dest_dx;
- /* replicate second odd integral pixels: */
- do {
- STORE(df,d1,a13);
- d1+=BPP(df);
- STORE(df,d2,a23);
- d2+=BPP(df);
- if (!(--count))
- goto last_pixel_2; /* !!! */
- } while ((limit -= step) >= 0);
- limit += dest_dx;
- /* load & convert third 2x2 block: */
- YUV_LOAD_CONVERT_DITHER_2x2(cc,df,a11,a12,a21,a22,sy1,sy2,su,sv);
- sy1+=2; sy2+=2; su++; sv++;
- /* calc. & replicate third half-pixels: */
- AVERAGE(df,a13,a13,a11);
- AVERAGE(df,a23,a23,a21);
- do {
- STORE(df,d1,a13);
- d1+=BPP(df);
- STORE(df,d2,a23);
- d2+=BPP(df);
- if (!(--count))
- goto rep_last_3; /* ??? */
- } while ((limit -= step) >= 0);
- limit += dest_dx;
- rep_even: /* replicate third even integral pixels: */
- do {
- STORE(df,d1,a11);
- d1+=BPP(df);
- STORE(df,d2,a21);
- d2+=BPP(df);
- if (!(--count))
- goto rep_last_2; /* ??? */
- } while ((limit -= step) >= 0);
- limit += dest_dx;
- /* calc. & replicate fourth half-pixels: */
- AVERAGE(df,a11,a11,a12);
- AVERAGE(df,a21,a21,a22);
- do {
- STORE(df,d1,a11);
- d1+=BPP(df);
- STORE(df,d2,a21);
- d2+=BPP(df);
- if (!(--count))
- goto rep_last; /* !!! */
- } while ((limit -= step) >= 0);
- limit += dest_dx;
- rep_odd: /* replicate third odd integral pixels: */
- do {
- STORE(df,d1,a12);
- d1+=BPP(df);
- STORE(df,d2,a22);
- d2+=BPP(df);
- if (!(--count))
- goto last_pixel; /* !!! */
- } while ((limit -= step) >= 0);
- limit += dest_dx;
- }
- last_pixel_2:/* store last integral pixels in a11/21: */
- COPY(df,a11,a13);
- COPY(df,a21,a23);
- last_pixel: /* check if we need to convert one more pixel:*/
- if ((src_x + src_dx) & 1) {
- /* update count & remainder: */
- register int r2 = remainder >> 1;
- count += r2; remainder -= r2;
- if (count <= 0)
- goto rep_last;
- /* load & convert last 2x1 block: */
- YUV_LOAD_CONVERT_2x1(cc,df,a12,a22,sy1,sy2,su,sv);
- /* calc. & replicate last half-pixels: */
- AVERAGE(df,a11,a11,a12);
- AVERAGE(df,a21,a21,a22);
- do {
- STORE(df,d1,a11);
- d1+=BPP(df);
- STORE(df,d2,a21);
- d2+=BPP(df);
- if (!(--count))
- goto rep_last; /* !!! */
- } while ((limit -= step) >= 0);
- }
- goto rep_last;
- rep_last_3: /* store last converted pixels in a12/22: */
- COPY(df,a12,a13);
- COPY(df,a22,a23);
- goto rep_last;
- rep_last_2: /* store last converted pixels in a12/22: */
- COPY(df,a12,a11);
- COPY(df,a22,a21);
- /* restore the number of remaining pixels: */
- rep_last: count += remainder;
- while (count --) {
- /* replicate them: */
- STORE(df,d1,a12);
- d1+=BPP(df);
- STORE(df,d2,a22);
- d2+=BPP(df);
- }
- }
- }
- /*** Generic YUVtoRGB double-row 2x converters: ************/
- /*
- * Generic YUVtoRGB double-row shrinking converter:
- * uses read-ahead optimization to process full 2x2 blocks
- * whenever possible.
- */
- #define DBLROW2X_SHRINK(cc,df,d0,d01,d1,d12,d2,dest_x,dest_dx,sy1,sy2,su,sv,src_x,src_dx)
- {
- /* initialize local variables: */
- register int count = dest_dx;
- register int limit = src_dx >> 1; /* -1 */
- register int step = dest_dx;
- /* check row length: */
- if (count) {
- /* check if we have an odd first block: */
- if (src_x & 1)
- goto start_odd;
- /* process even pixels: */
- do {
- PIXEL(df,a11); PIXEL(df,a12);
- PIXEL(df,a21); PIXEL(df,a22);
- /* make one Bresenham step ahead: */
- if ((limit -= step) < 0) {
- limit += src_dx;
- /* can we process 2x2 pixels? */
- if (!--count)
- goto last_pixel;
- /* process full 2x2 block: */
- YUV_LOAD_CONVERT_2x2(cc,df,a11,a12,a21,a22,sy1,sy2,su,sv);
- sy1+=2; sy2+=2; su++; sv++;
- STORE(df,d1,a11);
- STORE(df,d1+BPP(df),a12);
- d1+=2*BPP(df);
- STORE(df,d2,a21);
- STORE(df,d2+BPP(df),a22);
- d2+=2*BPP(df);
- /* process average pixels: */
- AVERAGE(df,a21,a11,a21);
- AVERAGE(df,a22,a12,a22);
- LOAD_AVERAGE(df,a11,a11,d0);
- LOAD_AVERAGE(df,a12,a12,d0+BPP(df));
- d0+=2*BPP(df);
- STORE(df,d01,a11);
- STORE(df,d01+BPP(df),a12);
- d01+=2*BPP(df);
- STORE(df,d12,a21);
- STORE(df,d12+BPP(df),a22);
- d12+=2*BPP(df);
- } else {
- /* proc. first 2x1 block & skip next: */
- YUV_LOAD_CONVERT_2x1(cc,df,a11,a21,sy1,sy2,su,sv);
- sy1+=2; sy2+=2; su++; sv++;
- STORE(df,d1,a11);
- d1+=BPP(df);
- STORE(df,d2,a21);
- d2+=BPP(df);
- /* process average pixels: */
- AVERAGE(df,a21,a11,a21);
- LOAD_AVERAGE(df,a11,a11,d0);
- d0+=BPP(df);
- STORE(df,d01,a11);
- d01+=BPP(df);
- STORE(df,d12,a21);
- d12+=BPP(df);
- }
- /* inverted Bresenham stepping: */
- while ((limit -= step) >= 0) {
- /* skip next even source pixel: */
- sy1++; sy2++;
- if ((limit -= step) < 0)
- goto cont_odd;
- /* skip odd source pixel: */
- sy1++; sy2++;
- su++; sv++; /* next chroma: */
- }
- cont_even: /* continue loop with next even pixel: */
- limit += src_dx;
- } while (--count);
- goto done;
- last_pixel: /* use this branch to process last pixel:*/
- count++;
- start_odd: /* process odd pixels: */
- do {
- /* convert 2x1 block: */
- PIXEL(df,a11); PIXEL(df,a21);
- YUV_LOAD_CONVERT_2x1(cc,df,a11,a21,sy1,sy2,su,sv);
- STORE(df,d1,a11);
- d1+=BPP(df);
- STORE(df,d2,a21);
- d2+=BPP(df);
- /* process average pixels: */
- AVERAGE(df,a21,a11,a21);
- LOAD_AVERAGE(df,a11,a11,d0);
- d0+=BPP(df);
- STORE(df,d01,a11);
- d01+=BPP(df);
- STORE(df,d12,a21);
- d12+=BPP(df);
- /* inverted Bresenham stepping: */
- do {
- /* skip odd source pixel: */
- sy1++; sy2++;
- su++; sv++; /* next chroma: */
- if ((limit -= step) < 0)
- goto cont_even;
- /* skip even source pixel: */
- sy1++; sy2++;
- } while ((limit -= step) >= 0);
- cont_odd: limit += src_dx;
- } while (--count);
- done: ;
- }
- }
- /*
- * Generic YUVtoRGB double-row copy converter:
- */
- #define DBLROW2X_COPY(cc,df,d0,d01,d1,d12,d2,dest_x,dest_dx,sy1,sy2,su,sv,src_x,src_dx)
- {
- register int count = dest_dx;
- /* convert first 2x1 block: */
- if ((src_x & 1) && count) {
- YUV_LOAD_CONVERT_AVERAGE_STORE_2x1(cc,df,d0,d01,d1,d12,d2,sy1,sy2,su,sv);
- count--;
- }
- /* convert all integral 2x2 blocks: */
- while (count >= 2) {
- YUV_LOAD_CONVERT_AVERAGE_DITHER_STORE_2x2(cc,df,d0,d01,d1,d12,d2,sy1,sy2,su,sv);
- count -= 2;
- }
- /* convert last 2x1 block: */
- if (count) {
- YUV_LOAD_CONVERT_AVERAGE_STORE_2x1(cc,df,d0,d01,d1,d12,d2,sy1,sy2,su,sv);
- }
- }
- /*
- * Generic YUVtoRGB double row stretching converter:
- */
- #define DBLROW2X_STRETCH(cc,df,d0,d01,d1,d12,d2,dest_x,dest_dx,sy1,sy2,su,sv,src_x,src_dx)
- {
- /* initialize local variables: */
- register int count = dest_dx;
- register int limit = dest_dx >> 1; /* !!! */
- register int step = src_dx;
- /* # of pixels to be processed separately: */
- int remainder = dest_dx - limit;
- if ((src_x + src_dx) & 1) remainder += dest_dx;
- remainder /= step;
- /* check row length: */
- if (count) {
- PIXEL(df,a11); PIXEL(df,a12);
- PIXEL(df,a21); PIXEL(df,a22);
- PIXEL(df,a01x);PIXEL(df,a12x);
- /* update count: */
- if ((count -= remainder) <= 0)
- goto convert_last;
- /* check if we have an odd first block: */
- if (src_x & 1) {
- /* convert first 2x1 block: */
- YUV_LOAD_CONVERT_2x1(cc,df,a12,a22,sy1,sy2,su,sv);
- sy1++; sy2++; su++; sv++;
- goto rep_odd;
- }
- /* the main loop: */
- while (1) {
- /* load & convert next 2x2 pixels: */
- YUV_LOAD_CONVERT_DITHER_2x2(cc,df,a11,a12,a21,a22,sy1,sy2,su,sv);
- sy1+=2; sy2+=2; su++; sv++;
- /* average and replicate even pixels: */
- LOAD_AVERAGE(df,a01x,a11,d0);
- AVERAGE(df,a12x,a11,a21);
- do {
- d0+=BPP(df);
- STORE(df,d01,a01x);
- d01+=BPP(df);
- STORE(df,d1,a11);
- d1+=BPP(df);
- STORE(df,d12,a12x);
- d12+=BPP(df);
- STORE(df,d2,a21);
- d2+=BPP(df);
- if (!(--count))
- goto rep_last;
- } while ((limit -= step) >= 0);
- limit += dest_dx;
- rep_odd: /* average & replicate odd pixels: */
- LOAD_AVERAGE(df,a01x,a12,d0);
- AVERAGE(df,a12x,a12,a22);
- do {
- d0+=BPP(df);
- STORE(df,d01,a01x);
- d01+=BPP(df);
- STORE(df,d1,a12);
- d1+=BPP(df);
- STORE(df,d12,a12x);
- d12+=BPP(df);
- STORE(df,d2,a22);
- d2+=BPP(df);
- if (!(--count))
- goto check_last;
- } while ((limit -= step) >= 0);
- limit += dest_dx;
- }
- check_last: /* check if we need to convert one more pixel:*/
- if ((src_x + src_dx) & 1) {
- convert_last: /* last 2x1 block: */
- YUV_LOAD_CONVERT_2x1(cc,df,a12,a22,sy1,sy2,su,sv);
- /* calc. average pixels: */
- LOAD_AVERAGE(df,a01x,a12,d0);
- AVERAGE(df,a12x,a12,a22);
- }
- /* restore the number of remaining pixels: */
- rep_last: count += remainder;
- while (count --) {
- /* replicate them: */
- STORE(df,d01,a01x);
- d01+=BPP(df);
- STORE(df,d1,a12);
- d1+=BPP(df);
- STORE(df,d12,a12x);
- d12+=BPP(df);
- STORE(df,d2,a22);
- d2+=BPP(df);
- }
- }
- }
- /*
- * Generic row 2x-stretching converter:
- */
- #define DBLROW2X_STRETCH2X(cc,df,d0,d01,d1,d12,d2,dest_x,dest_dx,sy1,sy2,su,sv,src_x,src_dx)
- {
- /* initialize local variables: */
- register int count = src_dx;
- /* check row length: */
- if (count) {
- PIXEL(df,a011);PIXEL(df,a012);
- PIXEL(df,a11); PIXEL(df,a12);
- PIXEL(df,a121);PIXEL(df,a122);
- PIXEL(df,a21); PIXEL(df,a22);
- /* check if we have an odd or single pixel: */
- if ((src_x & 1) || count < 2) {
- /* process first 2x1 block: */
- YUV_LOAD_CONVERT_2x1(cc,df,a12,a22,sy1,sy2,su,sv);
- sy1++; sy2++; su++; sv++;
- STORE(df,d1+0*BPP(df),a12);
- STORE(df,d2+0*BPP(df),a22);
- /* process vertical half-pixels: */
- LOAD_AVERAGE(df,a012,a12,d0);
- STORE(df,d01+0*BPP(df),a012);
- AVERAGE(df,a122,a12,a22);
- STORE(df,d12+0*BPP(df),a122);
- /* shift pointers: */
- d0 += BPP(df);
- d01 += BPP(df);
- d1 += BPP(df);
- d12 += BPP(df);
- d2 += BPP(df);
- count -= 1;
- } else {
- /* process first 2x2 block: */
- YUV_LOAD_CONVERT_DITHER_2x2(cc,df,a11,a12,a21,a22,sy1,sy2,su,sv);
- sy1+=2; sy2+=2; su++; sv++;
- STORE(df,d1+0*BPP(df),a11);
- STORE(df,d2+0*BPP(df),a21);
- STORE(df,d1+2*BPP(df),a12);
- STORE(df,d2+2*BPP(df),a22);
- /* process vertical half-pixels: */
- LOAD_AVERAGE(df,a011,a11,d0);
- STORE(df,d01+0*BPP(df),a011);
- AVERAGE(df,a121,a11,a21);
- STORE(df,d12+0*BPP(df),a121);
- LOAD_AVERAGE(df,a012,a12,d0+2*BPP(df));
- STORE(df,d01+2*BPP(df),a012);
- AVERAGE(df,a122,a12,a22);
- STORE(df,d12+2*BPP(df),a122);
- /* process horisontal half-pixels: */
- AVERAGE(df,a011,a011,a012);
- STORE(df,d01+1*BPP(df),a011);
- AVERAGE(df,a11,a11,a12);
- STORE(df,d1+1*BPP(df),a11);
- AVERAGE(df,a121,a121,a122);
- STORE(df,d12+1*BPP(df),a121);
- AVERAGE(df,a21,a21,a22);
- STORE(df,d2+1*BPP(df),a21);
- /* shift pointers: */
- d0 += 3*BPP(df);
- d01 += 3*BPP(df);
- d1 += 3*BPP(df);
- d12 += 3*BPP(df);
- d2 += 3*BPP(df);
- count -= 2;
- }
- /* process all internal 4x2 blocks: */
- while (count >= 4) {
- /* process second 2x2 block: */
- PIXEL(df,a013); PIXEL(df,a13);
- PIXEL(df,a123); PIXEL(df,a23);
- YUV_LOAD_CONVERT_DITHER_2x2(cc,df,a11,a13,a21,a23,sy1,sy2,su,sv);
- sy1+=2; sy2+=2; su++; sv++;
- STORE(df,d1+1*BPP(df),a11);
- STORE(df,d2+1*BPP(df),a21);
- STORE(df,d1+3*BPP(df),a13);
- STORE(df,d2+3*BPP(df),a23);
- /* process vertical half-pixels: */
- LOAD_AVERAGE(df,a011,a11,d0+1*BPP(df));
- STORE(df,d01+1*BPP(df),a011);
- AVERAGE(df,a121,a11,a21);
- STORE(df,d12+1*BPP(df),a121);
- LOAD_AVERAGE(df,a013,a13,d0+3*BPP(df));
- STORE(df,d01+3*BPP(df),a013);
- AVERAGE(df,a123,a13,a23);
- STORE(df,d12+3*BPP(df),a123);
- /* process horisontal half-pixels: */
- AVERAGE(df,a012,a012,a011);
- STORE(df,d01+0*BPP(df),a012);
- AVERAGE(df,a12,a12,a11);
- STORE(df,d1+0*BPP(df),a12);
- AVERAGE(df,a122,a122,a121);
- STORE(df,d12+0*BPP(df),a122);
- AVERAGE(df,a22,a22,a21);
- STORE(df,d2+0*BPP(df),a22);
- AVERAGE(df,a011,a011,a013);
- STORE(df,d01+2*BPP(df),a011); /*!!!*/
- AVERAGE(df,a11,a11,a13);
- STORE(df,d1+2*BPP(df),a11);
- AVERAGE(df,a121,a121,a123);
- STORE(df,d12+2*BPP(df),a121); /*!!!*/
- AVERAGE(df,a21,a21,a23);
- STORE(df,d2+2*BPP(df),a21);
- /* process third 2x2 block: */
- YUV_LOAD_CONVERT_DITHER_2x2(cc,df,a11,a12,a21,a22,sy1,sy2,su,sv);
- sy1+=2; sy2+=2; su++; sv++;
- STORE(df,d1+5*BPP(df),a11);
- STORE(df,d2+5*BPP(df),a21);
- STORE(df,d1+7*BPP(df),a12);
- STORE(df,d2+7*BPP(df),a22);
- /* process vertical half-pixels: */
- LOAD_AVERAGE(df,a011,a11,d0+5*BPP(df));
- STORE(df,d01+5*BPP(df),a011);
- AVERAGE(df,a121,a11,a21);
- STORE(df,d12+5*BPP(df),a121);
- LOAD_AVERAGE(df,a012,a12,d0+7*BPP(df));
- STORE(df,d01+7*BPP(df),a012);
- AVERAGE(df,a122,a12,a22);
- STORE(df,d12+7*BPP(df),a122);
- /* process horisontal half-pixels: */
- AVERAGE(df,a013,a013,a011);
- STORE(df,d01+4*BPP(df),a013);
- AVERAGE(df,a13,a13,a11);
- STORE(df,d1+4*BPP(df),a13);
- AVERAGE(df,a123,a123,a121);
- STORE(df,d12+4*BPP(df),a123);
- AVERAGE(df,a23,a23,a21);
- STORE(df,d2+4*BPP(df),a23);
- AVERAGE(df,a011,a011,a012);
- STORE(df,d01+6*BPP(df),a011);
- AVERAGE(df,a11,a11,a12);
- STORE(df,d1+6*BPP(df),a11);
- AVERAGE(df,a121,a121,a122);
- STORE(df,d12+6*BPP(df),a121);
- AVERAGE(df,a21,a21,a22);
- STORE(df,d2+6*BPP(df),a21);
- /* shift pointers: */
- d0 += 8*BPP(df);
- d01 += 8*BPP(df);
- d1 += 8*BPP(df);
- d12 += 8*BPP(df);
- d2 += 8*BPP(df);
- count -= 4;
- }
- /* check if we have one more 2x2 block: */
- if (count >= 2) {
- /* process last 2x2 block: */
- PIXEL(df,a013); PIXEL(df,a13);
- PIXEL(df,a123); PIXEL(df,a23);
- YUV_LOAD_CONVERT_DITHER_2x2(cc,df,a11,a13,a21,a23,sy1,sy2,su,sv);
- sy1+=2; sy2+=2; su++; sv++;
- STORE(df,d1+1*BPP(df),a11);
- STORE(df,d2+1*BPP(df),a21);
- STORE(df,d1+3*BPP(df),a13);
- STORE(df,d2+3*BPP(df),a23);
- /* process vertical half-pixels: */
- LOAD_AVERAGE(df,a011,a11,d0+1*BPP(df));
- STORE(df,d01+1*BPP(df),a011);
- AVERAGE(df,a121,a11,a21);
- STORE(df,d12+1*BPP(df),a121);
- LOAD_AVERAGE(df,a013,a13,d0+3*BPP(df));
- STORE(df,d01+3*BPP(df),a013);
- AVERAGE(df,a123,a13,a23);
- STORE(df,d12+3*BPP(df),a123);
- /* process horisontal half-pixels: */
- AVERAGE(df,a012,a012,a011);
- STORE(df,d01+0*BPP(df),a012);
- AVERAGE(df,a12,a12,a11);
- STORE(df,d1+0*BPP(df),a12);
- AVERAGE(df,a122,a122,a121);
- STORE(df,d12+0*BPP(df),a122);
- AVERAGE(df,a22,a22,a21);
- STORE(df,d2+0*BPP(df),a22);
- AVERAGE(df,a011,a011,a013);
- STORE(df,d01+2*BPP(df),a011); /*!!!*/
- AVERAGE(df,a11,a11,a13);
- STORE(df,d1+2*BPP(df),a11);
- AVERAGE(df,a121,a121,a123);
- STORE(df,d12+2*BPP(df),a121); /*!!!*/
- AVERAGE(df,a21,a21,a23);
- STORE(df,d2+2*BPP(df),a21);
- /* move last converted pixels to a12/22: */
- COPY(df,a012,a013);
- COPY(df,a12,a13);
- COPY(df,a122,a123);
- COPY(df,a22,a23);
- /* shift pointers: */
- d0 += 4*BPP(df);
- d01 += 4*BPP(df);
- d1 += 4*BPP(df);
- d12 += 4*BPP(df);
- d2 += 4*BPP(df);
- count -= 2;
- }
- /* check if we have one more 2x1 block: */
- if (count >= 1) {
- /* process last 2x1 block: */
- YUV_LOAD_CONVERT_2x1(cc,df,a11,a21,sy1,sy2,su,sv);
- STORE(df,d1+1*BPP(df),a11);
- STORE(df,d1+2*BPP(df),a11);
- STORE(df,d2+1*BPP(df),a21);
- STORE(df,d2+2*BPP(df),a21);
- /* process vertical half-pixels: */
- LOAD_AVERAGE(df,a011,a11,d0+1*BPP(df));
- STORE(df,d01+1*BPP(df),a011);
- STORE(df,d01+2*BPP(df),a011);
- AVERAGE(df,a121,a11,a21);
- STORE(df,d12+1*BPP(df),a121);
- STORE(df,d12+2*BPP(df),a121);
- /* process horisontal half-pixels: */
- AVERAGE(df,a012,a012,a011);
- STORE(df,d01+0*BPP(df),a012);
- AVERAGE(df,a12,a12,a11);
- STORE(df,d1+0*BPP(df),a12);
- AVERAGE(df,a122,a122,a121);
- STORE(df,d12+0*BPP(df),a122);
- AVERAGE(df,a22,a22,a21);
- STORE(df,d2+0*BPP(df),a22);
- } else {
- /* just replicate last column: */
- STORE(df,d01,a012);
- STORE(df,d1,a12);
- STORE(df,d12,a122);
- STORE(df,d2,a22);
- }
- }
- }
- /*
- * Generic row 2x+ stretching converter:
- * "???" comments mean that under normal conditions these jumps
- * should never be executed; nevertheless, I left these checks
- * in place to guarantee the correct termination of the algorithm
- * in all possible scenarios.
- */
- #define DBLROW2X_STRETCH2XPLUS(cc,df,d0,d01,d1,d12,d2,dest_x,dest_dx,sy1,sy2,su,sv,src_x,src_dx)
- {
- /* initialize local variables: */
- register int count = dest_dx;
- register int limit = dest_dx >> 1; /* !!! */
- register int step = src_dx << 1; /* !!! */
- /* # of half-pixels to be processed separately: */
- int remainder = 3*dest_dx - limit;
- if ((src_x + src_dx) & 1) remainder += 2*dest_dx;
- remainder /= step;
- /* check row length: */
- if (count) {
- PIXEL(df,a11); PIXEL(df,a12);
- PIXEL(df,a21); PIXEL(df,a22);
- PIXEL(df,a13); PIXEL(df,a23);
- PIXEL(df,a01x);PIXEL(df,a12x);
- /* check if an odd or single 2x1 block: */
- if ((src_x & 1) || src_dx < 2) {
- /* convert first 2x1 block: */
- YUV_LOAD_CONVERT_2x1(cc,df,a12,a22,sy1,sy2,su,sv);
- sy1++; sy2++; su++; sv++;
- /* update count: */
- if ((count -= remainder) <= 0)
- goto rep_last;
- goto rep_odd;
- } else {
- /* convert first 2x2 block: */
- YUV_LOAD_CONVERT_DITHER_2x2(cc,df,a11,a12,a21,a22,sy1,sy2,su,sv);
- sy1+=2; sy2+=2; su++; sv++;
- /* update count: */
- if ((count -= remainder) <= 0)
- goto rep_last_2; /* ??? */
- goto rep_even;
- }
- /* the main loop (a11,a12-last conv.pixels): */
- while (1) {
- /* load & convert second 2x2 block: */
- YUV_LOAD_CONVERT_DITHER_2x2(cc,df,a11,a13,a21,a23,sy1,sy2,su,sv);
- sy1+=2; sy2+=2; su++; sv++;
- /* calc. & replicate first half-pixels: */
- AVERAGE(df,a12,a12,a11);
- LOAD_AVERAGE(df,a01x,a12,d0);
- AVERAGE(df,a22,a22,a21);
- AVERAGE(df,a12x,a12,a22);
- do {
- d0+=BPP(df);
- STORE(df,d01,a01x);
- d01+=BPP(df);
- STORE(df,d1,a12);
- d1+=BPP(df);
- STORE(df,d12,a12x);
- d12+=BPP(df);
- STORE(df,d2,a22);
- d2+=BPP(df);
- if (!(--count))
- goto rep_last; /* ??? */
- } while ((limit -= step) >= 0);
- limit += dest_dx;
- /* get vertical half-pixels:*/
- LOAD_AVERAGE(df,a01x,a11,d0);
- AVERAGE(df,a12x,a11,a21);
- /* replicate second even integral pixels: */
- do {
- d0+=BPP(df);
- STORE(df,d01,a01x);
- d01+=BPP(df);
- STORE(df,d1,a11);
- d1+=BPP(df);
- STORE(df,d12,a12x);
- d12+=BPP(df);
- STORE(df,d2,a21);
- d2+=BPP(df);
- if (!(--count))
- goto rep_last_2; /* ??? */
- } while ((limit -= step) >= 0);
- limit += dest_dx;
- /* calc. & replicate second half-pixels: */
- AVERAGE(df,a11,a11,a13);
- LOAD_AVERAGE(df,a01x,a11,d0);
- AVERAGE(df,a21,a21,a23);
- AVERAGE(df,a12x,a11,a21);
- do {
- d0+=BPP(df);
- STORE(df,d01,a01x);
- d01+=BPP(df);
- STORE(df,d1,a11);
- d1+=BPP(df);
- STORE(df,d12,a12x);
- d12+=BPP(df);
- STORE(df,d2,a21);
- d2+=BPP(df);
- if (!(--count))
- goto rep_last_3; /* !!! */
- } while ((limit -= step) >= 0);
- limit += dest_dx;
- /* get vertical half-pixels:*/
- LOAD_AVERAGE(df,a01x,a13,d0);
- AVERAGE(df,a12x,a13,a23);
- /* replicate second odd integral pixels: */
- do {
- d0+=BPP(df);
- STORE(df,d01,a01x);
- d01+=BPP(df);
- STORE(df,d1,a13);
- d1+=BPP(df);
- STORE(df,d12,a12x);
- d12+=BPP(df);
- STORE(df,d2,a23);
- d2+=BPP(df);
- if (!(--count))
- goto last_pixel_2; /* !!! */
- } while ((limit -= step) >= 0);
- limit += dest_dx;
- /* load & convert third 2x2 block: */
- YUV_LOAD_CONVERT_DITHER_2x2(cc,df,a11,a12,a21,a22,sy1,sy2,su,sv);
- sy1+=2; sy2+=2; su++; sv++;
- /* calc. & replicate third half-pixels: */
- AVERAGE(df,a13,a13,a11);
- LOAD_AVERAGE(df,a01x,a13,d0);
- AVERAGE(df,a23,a23,a21);
- AVERAGE(df,a12x,a13,a23);
- do {
- d0+=BPP(df);
- STORE(df,d01,a01x);
- d01+=BPP(df);
- STORE(df,d1,a13);
- d1+=BPP(df);
- STORE(df,d12,a12x);
- d12+=BPP(df);
- STORE(df,d2,a23);
- d2+=BPP(df);
- if (!(--count))
- goto rep_last_3; /* ??? */
- } while ((limit -= step) >= 0);
- limit += dest_dx;
- rep_even: /* get vertical half-pixels:*/
- LOAD_AVERAGE(df,a01x,a11,d0);
- AVERAGE(df,a12x,a11,a21);
- /* replicate third even integral pixels: */
- do {
- d0+=BPP(df);
- STORE(df,d01,a01x);
- d01+=BPP(df);
- STORE(df,d1,a11);
- d1+=BPP(df);
- STORE(df,d12,a12x);
- d12+=BPP(df);
- STORE(df,d2,a21);
- d2+=BPP(df);
- if (!(--count))
- goto rep_last_2; /* ??? */
- } while ((limit -= step) >= 0);
- limit += dest_dx;
- /* calc. & replicate fourth half-pixels: */
- AVERAGE(df,a11,a11,a12);
- LOAD_AVERAGE(df,a01x,a11,d0);
- AVERAGE(df,a21,a21,a22);
- AVERAGE(df,a12x,a11,a21);
- do {
- d0+=BPP(df);
- STORE(df,d01,a01x);
- d01+=BPP(df);
- STORE(df,d1,a11);
- d1+=BPP(df);
- STORE(df,d12,a12x);
- d12+=BPP(df);
- STORE(df,d2,a21);
- d2+=BPP(df);
- if (!(--count))
- goto rep_last; /* !!! */
- } while ((limit -= step) >= 0);
- limit += dest_dx;
- rep_odd: /* get vertical half-pixels:*/
- LOAD_AVERAGE(df,a01x,a12,d0);
- AVERAGE(df,a12x,a12,a22);
- /* replicate third odd integral pixels: */
- do {
- d0+=BPP(df);
- STORE(df,d01,a01x);
- d01+=BPP(df);
- STORE(df,d1,a12);
- d1+=BPP(df);
- STORE(df,d12,a12x);
- d12+=BPP(df);
- STORE(df,d2,a22);
- d2+=BPP(df);
- if (!(--count))
- goto last_pixel; /* !!! */
- } while ((limit -= step) >= 0);
- limit += dest_dx;
- }
- last_pixel_2:/* store last integral pixels in a11/21: */
- COPY(df,a11,a13);
- COPY(df,a21,a23);
- last_pixel: /* check if we need to convert one more pixel:*/
- if ((src_x + src_dx) & 1) {
- /* update count & remainder: */
- register int r2 = remainder >> 1;
- count += r2; remainder -= r2;
- if (count <= 0)
- goto rep_last;
- /* load & convert last 2x1 block: */
- YUV_LOAD_CONVERT_2x1(cc,df,a12,a22,sy1,sy2,su,sv);
- /* calc. & replicate last half-pixels: */
- AVERAGE(df,a11,a11,a12);
- LOAD_AVERAGE(df,a01x,a11,d0);
- AVERAGE(df,a21,a21,a22);
- AVERAGE(df,a12x,a11,a21);
- do {
- d0+=BPP(df);
- STORE(df,d01,a01x);
- d01+=BPP(df);
- STORE(df,d1,a11);
- d1+=BPP(df);
- STORE(df,d12,a12x);
- d12+=BPP(df);
- STORE(df,d2,a21);
- d2+=BPP(df);
- if (!(--count))
- goto rep_last; /* ??? */
- } while ((limit -= step) >= 0);
- /* get last vertical half-pixels:*/
- LOAD_AVERAGE(df,a01x,a12,d0);
- AVERAGE(df,a12x,a12,a22);
- }
- goto rep_last;
- rep_last_3: /* store last converted pixels in a12/22: */
- COPY(df,a12,a13);
- COPY(df,a22,a23);
- goto rep_last;
- rep_last_2: /* store last converted pixels in a12/22: */
- COPY(df,a12,a11);
- COPY(df,a22,a21);
- /* restore the number of remaining pixels: */
- rep_last: count += remainder;
- /* get vertical half-pixels:*/
- LOAD_AVERAGE(df,a01x,a12,d0);
- AVERAGE(df,a12x,a12,a22);
- /* replicate them: */
- while (count --) {
- STORE(df,d01,a01x);
- d01+=BPP(df);
- STORE(df,d1,a12);
- d1+=BPP(df);
- STORE(df,d12,a12x);
- d12+=BPP(df);
- STORE(df,d2,a22);
- d2+=BPP(df);
- }
- }
- }
- /***********************************************************/
- /*
- * Function names:
- */
- #define FN(df,sf) sf##to##df
- #define FN2(df,sf) sf##to##df##x
- #define DBLROW_FN(df,sf,cc,t) sf##to##df##_DBLROW_##cc##_##t
- #define DBLROW2X_FN(df,sf,cc,t) sf##to##df##_DBLROW2X_##cc##_##t
- /*
- * Function replication macros:
- * (dblrow- and dblrow2x- converters)
- */
- #define DBLROW_FUNC(df,sf,cc,t)
- static void DBLROW_FN(df,sf,cc,t) (unsigned char *d1, unsigned char *d2,
- int dest_x, int dest_dx, unsigned char *sy1, unsigned char *sy2,
- unsigned char *su, unsigned char *sv, int src_x, int src_dx)
- DBLROW_##t(cc,df,d1,d2,dest_x,dest_dx,sy1,sy2,su,sv,src_x,src_dx)
- #define DBLROW2X_FUNC(df,sf,cc,t)
- static void DBLROW2X_FN(df,sf,cc,t) (unsigned char *d1, unsigned char *d12,
- unsigned char *d2, unsigned char *d23, unsigned char *d3,
- int dest_x, int dest_dx, unsigned char *sy1, unsigned char *sy2,
- unsigned char *su, unsigned char *sv, int src_x, int src_dx)
- DBLROW2X_##t(cc,df,d1,d12,d2,d23,d3,dest_x,dest_dx,sy1,sy2,su,sv,src_x,src_dx)
- /***********************************************************/
- /*
- * Actual double-row functions:
- */
- DBLROW_FUNC(RGB32, I420 ,FAST, SHRINK)
- DBLROW_FUNC(RGB32, I420 ,FAST, COPY)
- DBLROW_FUNC(RGB32, I420 ,FAST, STRETCH)
- DBLROW_FUNC(RGB32, I420 ,FAST, STRETCH2X)
- DBLROW_FUNC(RGB32, I420 ,FAST, STRETCH2XPLUS)
- DBLROW_FUNC(BGR32, I420 ,FAST, SHRINK)
- DBLROW_FUNC(BGR32, I420 ,FAST, COPY)
- DBLROW_FUNC(BGR32, I420 ,FAST, STRETCH)
- DBLROW_FUNC(BGR32, I420 ,FAST, STRETCH2X)
- DBLROW_FUNC(BGR32, I420 ,FAST, STRETCH2XPLUS)
- DBLROW_FUNC(RGB24, I420 ,FAST, SHRINK)
- DBLROW_FUNC(RGB24, I420 ,FAST, COPY)
- DBLROW_FUNC(RGB24, I420 ,FAST, STRETCH)
- DBLROW_FUNC(RGB24, I420 ,FAST, STRETCH2X)
- DBLROW_FUNC(RGB24, I420 ,FAST, STRETCH2XPLUS)
- DBLROW_FUNC(RGB565, I420 ,FAST, SHRINK)
- DBLROW_FUNC(RGB565, I420 ,FAST, COPY)
- DBLROW_FUNC(RGB565, I420 ,FAST, STRETCH)
- DBLROW_FUNC(RGB565, I420 ,FAST, STRETCH2X)
- DBLROW_FUNC(RGB565, I420 ,FAST, STRETCH2XPLUS)
- DBLROW_FUNC(RGB555, I420 ,FAST, SHRINK)
- DBLROW_FUNC(RGB555, I420 ,FAST, COPY)
- DBLROW_FUNC(RGB555, I420 ,FAST, STRETCH)
- DBLROW_FUNC(RGB555, I420 ,FAST, STRETCH2X)
- DBLROW_FUNC(RGB555, I420 ,FAST, STRETCH2XPLUS)
- DBLROW_FUNC(RGB444, I420 ,FAST, SHRINK)
- DBLROW_FUNC(RGB444, I420 ,FAST, COPY)
- DBLROW_FUNC(RGB444, I420 ,FAST, STRETCH)
- DBLROW_FUNC(RGB444, I420 ,FAST, STRETCH2X)
- DBLROW_FUNC(RGB444, I420 ,FAST, STRETCH2XPLUS)
- DBLROW_FUNC(RGB8, I420 ,FAST, SHRINK)
- DBLROW_FUNC(RGB8, I420 ,FAST, COPY)
- DBLROW_FUNC(RGB8, I420 ,FAST, STRETCH)
- DBLROW_FUNC(RGB8, I420 ,FAST, STRETCH2X)
- DBLROW_FUNC(RGB8, I420 ,FAST, STRETCH2XPLUS)
- /* converters with hue correction: */
- DBLROW_FUNC(RGB32, I420 ,FULL, SHRINK)
- DBLROW_FUNC(RGB32, I420 ,FULL, COPY)
- DBLROW_FUNC(RGB32, I420 ,FULL, STRETCH)
- DBLROW_FUNC(RGB32, I420 ,FULL, STRETCH2X)
- DBLROW_FUNC(RGB32, I420 ,FULL, STRETCH2XPLUS)
- DBLROW_FUNC(BGR32, I420 ,FULL, SHRINK)
- DBLROW_FUNC(BGR32, I420 ,FULL, COPY)
- DBLROW_FUNC(BGR32, I420 ,FULL, STRETCH)
- DBLROW_FUNC(BGR32, I420 ,FULL, STRETCH2X)
- DBLROW_FUNC(BGR32, I420 ,FULL, STRETCH2XPLUS)
- DBLROW_FUNC(RGB24, I420 ,FULL, SHRINK)
- DBLROW_FUNC(RGB24, I420 ,FULL, COPY)
- DBLROW_FUNC(RGB24, I420 ,FULL, STRETCH)
- DBLROW_FUNC(RGB24, I420 ,FULL, STRETCH2X)
- DBLROW_FUNC(RGB24, I420 ,FULL, STRETCH2XPLUS)
- DBLROW_FUNC(RGB565, I420 ,FULL, SHRINK)
- DBLROW_FUNC(RGB565, I420 ,FULL, COPY)
- DBLROW_FUNC(RGB565, I420 ,FULL, STRETCH)
- DBLROW_FUNC(RGB565, I420 ,FULL, STRETCH2X)
- DBLROW_FUNC(RGB565, I420 ,FULL, STRETCH2XPLUS)
- DBLROW_FUNC(RGB555, I420 ,FULL, SHRINK)
- DBLROW_FUNC(RGB555, I420 ,FULL, COPY)
- DBLROW_FUNC(RGB555, I420 ,FULL, STRETCH)
- DBLROW_FUNC(RGB555, I420 ,FULL, STRETCH2X)
- DBLROW_FUNC(RGB555, I420 ,FULL, STRETCH2XPLUS)
- DBLROW_FUNC(RGB444, I420 ,FULL, SHRINK)
- DBLROW_FUNC(RGB444, I420 ,FULL, COPY)
- DBLROW_FUNC(RGB444, I420 ,FULL, STRETCH)
- DBLROW_FUNC(RGB444, I420 ,FULL, STRETCH2X)
- DBLROW_FUNC(RGB444, I420 ,FULL, STRETCH2XPLUS)
- DBLROW_FUNC(RGB8, I420 ,FULL, SHRINK)
- DBLROW_FUNC(RGB8, I420 ,FULL, COPY)
- DBLROW_FUNC(RGB8, I420 ,FULL, STRETCH)
- DBLROW_FUNC(RGB8, I420 ,FULL, STRETCH2X)
- DBLROW_FUNC(RGB8, I420 ,FULL, STRETCH2XPLUS)
- /*
- * Actual double-row 2x functions:
- */
- DBLROW2X_FUNC(RGB32, I420 ,FAST, SHRINK)
- DBLROW2X_FUNC(RGB32, I420 ,FAST, COPY)
- DBLROW2X_FUNC(RGB32, I420 ,FAST, STRETCH)
- DBLROW2X_FUNC(RGB32, I420 ,FAST, STRETCH2X)
- DBLROW2X_FUNC(RGB32, I420 ,FAST, STRETCH2XPLUS)
- DBLROW2X_FUNC(BGR32, I420 ,FAST, SHRINK)
- DBLROW2X_FUNC(BGR32, I420 ,FAST, COPY)
- DBLROW2X_FUNC(BGR32, I420 ,FAST, STRETCH)
- DBLROW2X_FUNC(BGR32, I420 ,FAST, STRETCH2X)
- DBLROW2X_FUNC(BGR32, I420 ,FAST, STRETCH2XPLUS)
- DBLROW2X_FUNC(RGB24, I420 ,FAST, SHRINK)
- DBLROW2X_FUNC(RGB24, I420 ,FAST, COPY)
- DBLROW2X_FUNC(RGB24, I420 ,FAST, STRETCH)
- DBLROW2X_FUNC(RGB24, I420 ,FAST, STRETCH2X)
- DBLROW2X_FUNC(RGB24, I420 ,FAST, STRETCH2XPLUS)
- DBLROW2X_FUNC(RGB565, I420 ,FAST, SHRINK)
- DBLROW2X_FUNC(RGB565, I420 ,FAST, COPY)
- DBLROW2X_FUNC(RGB565, I420 ,FAST, STRETCH)
- DBLROW2X_FUNC(RGB565, I420 ,FAST, STRETCH2X)
- DBLROW2X_FUNC(RGB565, I420 ,FAST, STRETCH2XPLUS)
- DBLROW2X_FUNC(RGB555, I420 ,FAST, SHRINK)
- DBLROW2X_FUNC(RGB555, I420 ,FAST, COPY)
- DBLROW2X_FUNC(RGB555, I420 ,FAST, STRETCH)
- DBLROW2X_FUNC(RGB555, I420 ,FAST, STRETCH2X)
- DBLROW2X_FUNC(RGB555, I420 ,FAST, STRETCH2XPLUS)
- DBLROW2X_FUNC(RGB444, I420 ,FAST, SHRINK)
- DBLROW2X_FUNC(RGB444, I420 ,FAST, COPY)
- DBLROW2X_FUNC(RGB444, I420 ,FAST, STRETCH)
- DBLROW2X_FUNC(RGB444, I420 ,FAST, STRETCH2X)
- DBLROW2X_FUNC(RGB444, I420 ,FAST, STRETCH2XPLUS)
- DBLROW2X_FUNC(RGB8, I420 ,FAST, SHRINK)
- DBLROW2X_FUNC(RGB8, I420 ,FAST, COPY)
- DBLROW2X_FUNC(RGB8, I420 ,FAST, STRETCH)
- DBLROW2X_FUNC(RGB8, I420 ,FAST, STRETCH2X)
- DBLROW2X_FUNC(RGB8, I420 ,FAST, STRETCH2XPLUS)
- /* converters with hue correction: */
- DBLROW2X_FUNC(RGB32, I420 ,FULL, SHRINK)
- DBLROW2X_FUNC(RGB32, I420 ,FULL, COPY)
- DBLROW2X_FUNC(RGB32, I420 ,FULL, STRETCH)
- DBLROW2X_FUNC(RGB32, I420 ,FULL, STRETCH2X)
- DBLROW2X_FUNC(RGB32, I420 ,FULL, STRETCH2XPLUS)
- DBLROW2X_FUNC(BGR32, I420 ,FULL, SHRINK)
- DBLROW2X_FUNC(BGR32, I420 ,FULL, COPY)
- DBLROW2X_FUNC(BGR32, I420 ,FULL, STRETCH)
- DBLROW2X_FUNC(BGR32, I420 ,FULL, STRETCH2X)
- DBLROW2X_FUNC(BGR32, I420 ,FULL, STRETCH2XPLUS)
- DBLROW2X_FUNC(RGB24, I420 ,FULL, SHRINK)
- DBLROW2X_FUNC(RGB24, I420 ,FULL, COPY)
- DBLROW2X_FUNC(RGB24, I420 ,FULL, STRETCH)
- DBLROW2X_FUNC(RGB24, I420 ,FULL, STRETCH2X)
- DBLROW2X_FUNC(RGB24, I420 ,FULL, STRETCH2XPLUS)
- DBLROW2X_FUNC(RGB565, I420 ,FULL, SHRINK)
- DBLROW2X_FUNC(RGB565, I420 ,FULL, COPY)
- DBLROW2X_FUNC(RGB565, I420 ,FULL, STRETCH)
- DBLROW2X_FUNC(RGB565, I420 ,FULL, STRETCH2X)
- DBLROW2X_FUNC(RGB565, I420 ,FULL, STRETCH2XPLUS)
- DBLROW2X_FUNC(RGB555, I420 ,FULL, SHRINK)
- DBLROW2X_FUNC(RGB555, I420 ,FULL, COPY)
- DBLROW2X_FUNC(RGB555, I420 ,FULL, STRETCH)
- DBLROW2X_FUNC(RGB555, I420 ,FULL, STRETCH2X)
- DBLROW2X_FUNC(RGB555, I420 ,FULL, STRETCH2XPLUS)
- DBLROW2X_FUNC(RGB444, I420 ,FULL, SHRINK)
- DBLROW2X_FUNC(RGB444, I420 ,FULL, COPY)
- DBLROW2X_FUNC(RGB444, I420 ,FULL, STRETCH)
- DBLROW2X_FUNC(RGB444, I420 ,FULL, STRETCH2X)
- DBLROW2X_FUNC(RGB444, I420 ,FULL, STRETCH2XPLUS)
- DBLROW2X_FUNC(RGB8, I420 ,FULL, SHRINK)
- DBLROW2X_FUNC(RGB8, I420 ,FULL, COPY)
- DBLROW2X_FUNC(RGB8, I420 ,FULL, STRETCH)
- DBLROW2X_FUNC(RGB8, I420 ,FULL, STRETCH2X)
- DBLROW2X_FUNC(RGB8, I420 ,FULL, STRETCH2XPLUS)
- /*
- * Double-row scale function selection tables:
- * [conversion type][source format][row scale type]
- */
- static void (* DblRowFuncs [2][RGB_FORMATS][SCALE_FUNCS]) (
- unsigned char *d1, unsigned char *d2, int dest_x, int dest_dx,
- unsigned char *sy1, unsigned char *sy2,
- unsigned char *su, unsigned char *sv, int src_x, int src_dx) =
- {
- { {
- #if defined (HELIX_FEATURE_CC_RGB32out)
- #if defined (HXCOLOR_SHRINK)
- DBLROW_FN(RGB32 ,I420 ,FAST, SHRINK),
- #else
- 0,
- #endif //HXCOLOR_SHRINK
- DBLROW_FN(RGB32 ,I420 ,FAST, COPY),
- #if defined (HXCOLOR_STRETCH)
- DBLROW_FN(RGB32 ,I420 ,FAST, STRETCH),
- #else
- 0,
- #endif //HXCOLOR_STRETCH
- #if defined (HXCOLOR_STRETCH2X)
- DBLROW_FN(RGB32 ,I420 ,FAST, STRETCH2X),
- #else
- 0,
- #endif //HXCOLOR_STRETCH2X
- #if defined (HXCOLOR_STRETCH2XPLUS)
- DBLROW_FN(RGB32 ,I420 ,FAST, STRETCH2XPLUS)
- #else
- 0
- #endif //HXCOLOR_STRETCH2XPLUS
- #else
- 0,
- 0,
- 0,
- 0,
- 0
- #endif //HELIX_FEATURE_CC_RGB32out
- },{
- #if defined (HELIX_FEATURE_CC_BGR32out)
- #if defined (HXCOLOR_SHRINK)
- DBLROW_FN(BGR32 ,I420 ,FAST, SHRINK),
- #else
- 0,
- #endif //HXCOLOR_SHRINK
- DBLROW_FN(BGR32 ,I420 ,FAST, COPY),
- #if defined (HXCOLOR_STRETCH)
- DBLROW_FN(BGR32 ,I420 ,FAST, STRETCH),
- #else
- 0,
- #endif //HXCOLOR_STRETCH
- #if defined (HXCOLOR_STRETCH2X)
- DBLROW_FN(BGR32 ,I420 ,FAST, STRETCH2X),
- #else
- 0,
- #endif //HXCOLOR_STRETCH2X
- #if defined (HXCOLOR_STRETCH2XPLUS)
- DBLROW_FN(BGR32 ,I420 ,FAST, STRETCH2XPLUS)
- #else
- 0
- #endif //HXCOLOR_STRETCH2XPLUS
- #else
- 0,
- 0,
- 0,
- 0,
- 0
- #endif //HELIX_FEATURE_CC_BGR32out
- },{
- #if defined (HELIX_FEATURE_CC_RGB24out)
- #if defined (HXCOLOR_SHRINK)
- DBLROW_FN(RGB24 ,I420 ,FAST, SHRINK),
- #else
- 0,
- #endif //HXCOLOR_SHRINK
- DBLROW_FN(RGB24 ,I420 ,FAST, COPY),
- #if defined (HXCOLOR_STRETCH)
- DBLROW_FN(RGB24 ,I420 ,FAST, STRETCH),
- #else
- 0,
- #endif //HXCOLOR_STRETCH
- #if defined (HXCOLOR_STRETCH2X)
- DBLROW_FN(RGB24 ,I420 ,FAST, STRETCH2X),
- #else
- 0,
- #endif //HXCOLOR_STRETCH2X
- #if defined (HXCOLOR_STRETCH2XPLUS)
- DBLROW_FN(RGB24 ,I420 ,FAST, STRETCH2XPLUS)
- #else
- 0
- #endif //HXCOLOR_STRETCH2XPLUS
- #else
- 0,
- 0,
- 0,
- 0,
- 0
- #endif //HELIX_FEATURE_CC_RGB24out
- },{
- #if defined (HELIX_FEATURE_CC_RGB565out)
- #if defined (HXCOLOR_SHRINK)
- DBLROW_FN(RGB565,I420 ,FAST, SHRINK),
- #else
- 0,
- #endif //HXCOLOR_SHRINK
- DBLROW_FN(RGB565,I420 ,FAST, COPY),
- #if defined (HXCOLOR_STRETCH)
- DBLROW_FN(RGB565,I420 ,FAST, STRETCH),
- #else
- 0,
- #endif //HXCOLOR_STRETCH
- #if defined (HXCOLOR_STRETCH2X)
- DBLROW_FN(RGB565,I420 ,FAST, STRETCH2X),
- #else
- 0,
- #endif //HXCOLOR_STRETCH2X
- #if defined (HXCOLOR_STRETCH2XPLUS)
- DBLROW_FN(RGB565,I420 ,FAST, STRETCH2XPLUS)
- #else
- 0
- #endif //HXCOLOR_STRETCH2XPLUS
- #else
- 0,
- 0,
- 0,
- 0,
- 0
- #endif //HELIX_FEATURE_CC_RGB565out
- },{
- #if defined (HELIX_FEATURE_CC_RGB555out)
- #if defined (HXCOLOR_SHRINK)
- DBLROW_FN(RGB555,I420 ,FAST, SHRINK),
- #else
- 0,
- #endif //HXCOLOR_SHRINK
- DBLROW_FN(RGB555,I420 ,FAST, COPY),
- #if defined (HXCOLOR_STRETCH)
- DBLROW_FN(RGB555,I420 ,FAST, STRETCH),
- #else
- 0,
- #endif //HXCOLOR_STRETCH
- #if defined (HXCOLOR_STRETCH2X)
- DBLROW_FN(RGB555,I420 ,FAST, STRETCH2X),
- #else
- 0,
- #endif //HXCOLOR_STRETCH2X
- #if defined (HXCOLOR_STRETCH2XPLUS)
- DBLROW_FN(RGB555,I420 ,FAST, STRETCH2XPLUS)
- #else
- 0
- #endif //HXCOLOR_STRETCH2XPLUS
- #else
- 0,
- 0,
- 0,
- 0,
- 0
- #endif //HELIX_FEATURE_CC_RGB555out
- },{
- #if defined (HELIX_FEATURE_CC_RGB444out)
- #if defined (HXCOLOR_SHRINK)
- DBLROW_FN(RGB444,I420 ,FAST, SHRINK),
- #else
- 0,
- #endif //HXCOLOR_SHRINK
- DBLROW_FN(RGB444,I420 ,FAST, COPY),
- #if defined (HXCOLOR_STRETCH)
- DBLROW_FN(RGB444,I420 ,FAST, STRETCH),
- #else
- 0,
- #endif //HXCOLOR_STRETCH
- #if defined (HXCOLOR_STRETCH2X)
- DBLROW_FN(RGB444,I420 ,FAST, STRETCH2X),
- #else
- 0,
- #endif //HXCOLOR_STRETCH2X
- #if defined (HXCOLOR_STRETCH2XPLUS)
- DBLROW_FN(RGB444,I420 ,FAST, STRETCH2XPLUS)
- #else
- 0
- #endif //HXCOLOR_STRETCH2XPLUS
- #else
- 0,
- 0,
- 0,
- 0,
- 0
- #endif //HELIX_FEATURE_CC_RGB444out
- },{
- #if defined (HELIX_FEATURE_CC_RGB8out)
- #if defined (HXCOLOR_SHRINK)
- DBLROW_FN(RGB8 ,I420 ,FAST, SHRINK),
- #else
- 0,
- #endif //HXCOLOR_SHRINK
- DBLROW_FN(RGB8 ,I420 ,FAST, COPY),
- #if defined (HXCOLOR_STRETCH)
- DBLROW_FN(RGB8 ,I420 ,FAST, STRETCH),
- #else
- 0,
- #endif //HXCOLOR_STRETCH
- #if defined (HXCOLOR_STRETCH2X)
- DBLROW_FN(RGB8 ,I420 ,FAST, STRETCH2X),
- #else
- 0,
- #endif //HXCOLOR_STRETCH2X
- #if defined (HXCOLOR_STRETCH2XPLUS)
- DBLROW_FN(RGB8 ,I420 ,FAST, STRETCH2XPLUS)
- #else
- 0
- #endif//HXCOLOR_STRETCH2XPLUS
- #else
- 0,
- 0,
- 0,
- 0,
- 0
- #endif //HELIX_FEATURE_CC_RGB8out
- }
- },{ {
- #if defined _PLUS_HXCOLOR && defined (HELIX_FEATURE_CC_RGB32out)
- #if defined (HXCOLOR_SHRINK)
- DBLROW_FN(RGB32 ,I420 ,FULL, SHRINK),
- #else
- 0,
- #endif //HXCOLOR_SHRINK
- DBLROW_FN(RGB32 ,I420 ,FULL, COPY),
- #if defined (HXCOLOR_STRETCH)
- DBLROW_FN(RGB32 ,I420 ,FULL, STRETCH),
- #else
- 0,
- #endif //HXCOLOR_STRETCH
- #if defined (HXCOLOR_STRETCH2X)
- DBLROW_FN(RGB32 ,I420 ,FULL, STRETCH2X),
- #else
- 0,
- #endif //HXCOLOR_STRETCH2X
- #if defined (HXCOLOR_STRETCH2XPLUS)
- DBLROW_FN(RGB32 ,I420 ,FULL, STRETCH2XPLUS)
- #else
- 0
- #endif //HXCOLOR_STRETCH2XPLUS
- #else
- 0,
- 0,
- 0,
- 0,
- 0
- #endif
- },{
- #if defined _PLUS_HXCOLOR && defined (HELIX_FEATURE_CC_BGR32out)
- #if defined (HXCOLOR_SHRINK)
- DBLROW_FN(BGR32 ,I420 ,FULL, SHRINK),
- #else
- 0,
- #endif //HXCOLOR_SHRINK
- DBLROW_FN(BGR32 ,I420 ,FULL, COPY),
- #if defined (HXCOLOR_STRETCH)
- DBLROW_FN(BGR32 ,I420 ,FULL, STRETCH),
- #else
- 0,
- #endif //HXCOLOR_STRETCH
- #if defined (HXCOLOR_STRETCH2X)
- DBLROW_FN(BGR32 ,I420 ,FULL, STRETCH2X),
- #else
- 0,
- #endif // HXCOLOR_STRETCH2X
- #if defined (HXCOLOR_STRETCH2XPLUS)
- DBLROW_FN(BGR32 ,I420 ,FULL, STRETCH2XPLUS)
- #else
- 0
- #endif //HXCOLOR_STRETCH2XPLUS
- #else
- 0,
- 0,
- 0,
- 0,
- 0
- #endif
- },{
- #if defined _PLUS_HXCOLOR && defined (HELIX_FEATURE_CC_RGB24out)
- #if defined (HXCOLOR_SHRINK)
- DBLROW_FN(RGB24 ,I420 ,FULL, SHRINK),
- #else
- 0,
- #endif //HXCOLOR_SHRINK
- DBLROW_FN(RGB24 ,I420 ,FULL, COPY),
- #if defined (HXCOLOR_STRETCH)
- DBLROW_FN(RGB24 ,I420 ,FULL, STRETCH),
- #else
- 0,
- #endif //HXCOLOR_STRETCH
- #if defined (HXCOLOR_STRETCH2X)
- DBLROW_FN(RGB24 ,I420 ,FULL, STRETCH2X),
- #else
- 0,
- #endif //HXCOLOR_STRETCH2X
- #if defined (HXCOLOR_STRETCH2XPLUS)
- DBLROW_FN(RGB24 ,I420 ,FULL, STRETCH2XPLUS)
- #else
- 0
- #endif //HXCOLOR_STRETCH2XPLUS
- #else
- 0,
- 0,
- 0,
- 0,
- 0
- #endif
- },{
- #if defined _PLUS_HXCOLOR && defined (HELIX_FEATURE_CC_RGB565out)
- #if defined (HXCOLOR_SHRINK)
- DBLROW_FN(RGB565,I420 ,FULL, SHRINK),
- #else
- 0,
- #endif //HXCOLOR_SHRINK
- DBLROW_FN(RGB565,I420 ,FULL, COPY),
- #if defined (HXCOLOR_STRETCH)
- DBLROW_FN(RGB565,I420 ,FULL, STRETCH),
- #else
- 0,
- #endif //HXCOLOR_STRETCH