yuv2rgb.c
Upload User: dangjiwu
Upload Date: 2013-07-19
Package Size: 42019k
Code Size: 179k
Category:

Symbian

Development Platform:

Visual C++

  1. /* ***** BEGIN LICENSE BLOCK *****
  2.  * Source last modified: $Id: yuv2rgb.c,v 1.2.42.2 2004/07/13 19:01:32 bobclark Exp $
  3.  * 
  4.  * Portions Copyright (c) 1995-2004 RealNetworks, Inc. All Rights Reserved.
  5.  * 
  6.  * The contents of this file, and the files included with this file,
  7.  * are subject to the current version of the RealNetworks Public
  8.  * Source License (the "RPSL") available at
  9.  * http://www.helixcommunity.org/content/rpsl unless you have licensed
  10.  * the file under the current version of the RealNetworks Community
  11.  * Source License (the "RCSL") available at
  12.  * http://www.helixcommunity.org/content/rcsl, in which case the RCSL
  13.  * will apply. You may also obtain the license terms directly from
  14.  * RealNetworks.  You may not use this file except in compliance with
  15.  * the RPSL or, if you have a valid RCSL with RealNetworks applicable
  16.  * to this file, the RCSL.  Please see the applicable RPSL or RCSL for
  17.  * the rights, obligations and limitations governing use of the
  18.  * contents of the file.
  19.  * 
  20.  * Alternatively, the contents of this file may be used under the
  21.  * terms of the GNU General Public License Version 2 or later (the
  22.  * "GPL") in which case the provisions of the GPL are applicable
  23.  * instead of those above. If you wish to allow use of your version of
  24.  * this file only under the terms of the GPL, and not to allow others
  25.  * to use your version of this file under the terms of either the RPSL
  26.  * or RCSL, indicate your decision by deleting the provisions above
  27.  * and replace them with the notice and other provisions required by
  28.  * the GPL. If you do not delete the provisions above, a recipient may
  29.  * use your version of this file under the terms of any one of the
  30.  * RPSL, the RCSL or the GPL.
  31.  * 
  32.  * This file is part of the Helix DNA Technology. RealNetworks is the
  33.  * developer of the Original Code and owns the copyrights in the
  34.  * portions it created.
  35.  * 
  36.  * This file, and the files included with this file, is distributed
  37.  * and made available on an 'AS IS' basis, WITHOUT WARRANTY OF ANY
  38.  * KIND, EITHER EXPRESS OR IMPLIED, AND REALNETWORKS HEREBY DISCLAIMS
  39.  * ALL SUCH WARRANTIES, INCLUDING WITHOUT LIMITATION, ANY WARRANTIES
  40.  * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, QUIET
  41.  * ENJOYMENT OR NON-INFRINGEMENT.
  42.  * 
  43.  * Technology Compatibility Kit Test Suite(s) Location:
  44.  *    http://www.helixcommunity.org/content/tck
  45.  * 
  46.  * Contributor(s):
  47.  * 
  48.  * ***** END LICENSE BLOCK ***** */
  49. /*** #includes: ********************************************/
  50. #include "env.h"
  51. #include "rgb.h"    /* basic RGB-data definitions & macros */
  52. #include "yuv.h"    /* YUV-to-RGB conversion tables & macros */
  53. #include "clip.h"   /* macros for clipping & dithering */
  54. #include "scale.h"  /* scale algorithms */
  55. #include "colorlib.h" /* ensure that prototypes get extern C'ed */
  56. #ifdef _MACINTOSH
  57. #pragma require_prototypes off
  58. #endif
  59. static int YUVtoRGB2 (
  60.     int dest_format,
  61.     unsigned char *dest_ptr, int dest_width, int dest_height,
  62.     int dest_pitch, int dest_x, int dest_y, int dest_dx, int dest_dy,
  63.     unsigned char *pY, unsigned char *pU, unsigned char *pV,
  64.     int src_width, int src_height, int yPitch, int uPitch, int vPitch,
  65.     int src_x, int src_y, int src_dx, int src_dy);
  66. /*** Additional pixel-level macros: ************************/
  67. /*
  68.  * Add dither, clip and assign values to RGB pixels:
  69.  */
  70. #define RGBX_CLIP_X(f,rnd,x,v)  (CLIP(rnd,BITS(f,x),v) << START(f,x))
  71. #define RGBX_CLIP_SET(f,rnd,a,r,g,b) 
  72.     a##_rgb = RGBX_CLIP_X(f,rnd,R,r) | RGBX_CLIP_X(f,rnd,G,g) | RGBX_CLIP_X(f,rnd,B,b)
  73. #define RGB32_CLIP_SET(rnd,a,r,g,b)  RGBX_CLIP_SET(RGB32,rnd,a,r,g,b)
  74. #define BGR32_CLIP_SET(rnd,a,r,g,b)  RGBX_CLIP_SET(BGR32,rnd,a,r,g,b)
  75. #define RGB24_CLIP_SET(rnd,a,r,g,b)  
  76.     a##_b = CLIP(rnd,8,b), a##_g = CLIP(rnd,8,g), a##_r = CLIP(rnd,8,r)
  77. #define RGB565_CLIP_SET(rnd,a,r,g,b) RGBX_CLIP_SET(RGB565,rnd,a,r,g,b)
  78. #define RGB555_CLIP_SET(rnd,a,r,g,b) RGBX_CLIP_SET(RGB555,rnd,a,r,g,b)
  79. #define RGB444_CLIP_SET(rnd,a,r,g,b) RGBX_CLIP_SET(RGB444,rnd,a,r,g,b)
  80. #define RGB8_CLIP_SET(rnd,a,r,g,b)   
  81.     a##_idx = pmap[(CLIP(rnd,4,r)<<8) | (CLIP(rnd,4,g)<<4) | CLIP(rnd,4,b)]
  82. /*
  83.  * Generic RGB clipping & assignment macro:
  84.  */
  85. #define CLIP_SET(f,rnd,a,r,g,b)      f##_CLIP_SET(rnd,a,r,g,b)
  86. /*
  87.  * YUV 2x1-block load and convert macros:
  88.  */
  89. #define YUV_LOAD_CONVERT_2x1_FAST(df,a1,a2,sy1,sy2,su,sv)   
  90.     {                                                       
  91.         register int y1, y2, rv, guv, bu;                   
  92.         bu = butab[su[0]];                                  
  93.         guv = gutab[su[0]] + gvtab[sv[0]];                  
  94.         rv = rvtab[sv[0]];                                  
  95.         y1 = ytab[sy1[0]];                                  
  96.         y2 = ytab[sy2[0]];                                  
  97.         CLIP_SET(df,ROUND,a1,y1+rv,y1+guv,y1+bu);           
  98.         CLIP_SET(df,ROUND,a2,y2+rv,y2+guv,y2+bu);           
  99.     }
  100. /* with Hue rotation: */
  101. #define YUV_LOAD_CONVERT_2x1_FULL(df,a1,a2,sy1,sy2,su,sv)   
  102.     {                                                       
  103.         register int y1, y2, ruv, guv, buv;                 
  104.         buv = butab[su[0]] + bvtab[sv[0]];                  
  105.         guv = gutab[su[0]] + gvtab[sv[0]];                  
  106.         ruv = rutab[su[0]] + rvtab[sv[0]];                  
  107.         y1 = ytab[sy1[0]];                                  
  108.         y2 = ytab[sy2[0]];                                  
  109.         CLIP_SET(df,ROUND,a1,y1+ruv,y1+guv,y1+buv);         
  110.         CLIP_SET(df,ROUND,a2,y2+ruv,y2+guv,y2+buv);         
  111.     }
  112. /*
  113.  * Generic YUV 2x1-block load & convert macro:
  114.  */
  115. #define YUV_LOAD_CONVERT_2x1(cc,df,a1,a2,sy1,sy2,su,sv)  
  116.     YUV_LOAD_CONVERT_2x1_##cc(df,a1,a2,sy1,sy2,su,sv)
  117. /*
  118.  * YUV 2x2-block load and convert macros:
  119.  * (without dithering)
  120.  */
  121. #define YUV_LOAD_CONVERT_2x2_FAST(df,a11,a12,a21,a22,sy1,sy2,su,sv) 
  122.     {                                                       
  123.         register int y11, y12, y21, y22, rv, guv, bu;       
  124.         bu = butab[su[0]];                                  
  125.         guv = gutab[su[0]] + gvtab[sv[0]];                  
  126.         rv = rvtab[sv[0]];                                  
  127.         y11 = ytab[sy1[0]];                                 
  128.         y21 = ytab[sy2[0]];                                 
  129.         y12 = ytab[sy1[1]];                                 
  130.         y22 = ytab[sy2[1]];                                 
  131.         CLIP_SET(df,ROUND,a11,y11+rv,y11+guv,y11+bu);       
  132.         CLIP_SET(df,ROUND,a21,y21+rv,y21+guv,y21+bu);       
  133.         CLIP_SET(df,ROUND,a12,y12+rv,y12+guv,y12+bu);       
  134.         CLIP_SET(df,ROUND,a22,y22+rv,y22+guv,y22+bu);       
  135.     }
  136. /* with Hue rotation: */
  137. #define YUV_LOAD_CONVERT_2x2_FULL(df,a11,a12,a21,a22,sy1,sy2,su,sv) 
  138.     {                                                       
  139.         register int y11, y12, y21, y22, ruv, guv, buv;     
  140.         buv = butab[su[0]] + bvtab[sv[0]];                  
  141.         guv = gutab[su[0]] + gvtab[sv[0]];                  
  142.         ruv = rutab[su[0]] + rvtab[sv[0]];                  
  143.         y11 = ytab[sy1[0]];                                 
  144.         y21 = ytab[sy2[0]];                                 
  145.         y12 = ytab[sy1[1]];                                 
  146.         y22 = ytab[sy2[1]];                                 
  147.         CLIP_SET(df,ROUND,a11,y11+ruv,y11+guv,y11+buv);     
  148.         CLIP_SET(df,ROUND,a21,y21+ruv,y21+guv,y21+buv);     
  149.         CLIP_SET(df,ROUND,a12,y12+ruv,y12+guv,y12+buv);     
  150.         CLIP_SET(df,ROUND,a22,y22+ruv,y22+guv,y22+buv);     
  151.     }
  152. /*
  153.  * Generic YUV 2x1-block load & convert macro:
  154.  */
  155. #define YUV_LOAD_CONVERT_2x2(cc,df,a11,a12,a21,a22,sy1,sy2,su,sv) 
  156.     YUV_LOAD_CONVERT_2x2_##cc(df,a11,a12,a21,a22,sy1,sy2,su,sv)
  157. /*
  158.  * YUV 2x2-block load and convert macros:
  159.  *  (adds symmetric 2x2 dither noise)
  160.  */
  161. #define YUV_LOAD_CONVERT_DITHER_2x2_FAST(df,a11,a12,a21,a22,sy1,sy2,su,sv) 
  162.     {                                                       
  163.         register int y11, y12, y21, y22, rv, guv, bu;       
  164.         bu = butab[su[0]];                                  
  165.         guv = gutab[su[0]] + gvtab[sv[0]];                  
  166.         rv = rvtab[sv[0]];                                  
  167.         y11 = ytab[sy1[0]];                                 
  168.         y21 = ytab[sy2[0]];                                 
  169.         y12 = ytab[sy1[1]];                                 
  170.         y22 = ytab[sy2[1]];                                 
  171.         CLIP_SET(df,HIGH,a11,y11+rv,y11+guv,y11+bu);        
  172.         CLIP_SET(df,LOW ,a21,y21+rv,y21+guv,y21+bu);        
  173.         CLIP_SET(df,LOW ,a12,y12+rv,y12+guv,y12+bu);        
  174.         CLIP_SET(df,HIGH,a22,y22+rv,y22+guv,y22+bu);        
  175.     }
  176. /* with Hue rotation: */
  177. #define YUV_LOAD_CONVERT_DITHER_2x2_FULL(df,a11,a12,a21,a22,sy1,sy2,su,sv) 
  178.     {                                                       
  179.         register int y11, y12, y21, y22, ruv, guv, buv;     
  180.         buv = butab[su[0]] + bvtab[sv[0]];                  
  181.         guv = gutab[su[0]] + gvtab[sv[0]];                  
  182.         ruv = rutab[su[0]] + rvtab[sv[0]];                  
  183.         y11 = ytab[sy1[0]];                                 
  184.         y21 = ytab[sy2[0]];                                 
  185.         y12 = ytab[sy1[1]];                                 
  186.         y22 = ytab[sy2[1]];                                 
  187.         CLIP_SET(df,HIGH,a11,y11+ruv,y11+guv,y11+buv);      
  188.         CLIP_SET(df,LOW ,a21,y21+ruv,y21+guv,y21+buv);      
  189.         CLIP_SET(df,LOW ,a12,y12+ruv,y12+guv,y12+buv);      
  190.         CLIP_SET(df,HIGH,a22,y22+ruv,y22+guv,y22+buv);      
  191.     }
  192. /*
  193.  * Generic YUV 2x1-block load & convert macro:
  194.  */
  195. #define YUV_LOAD_CONVERT_DITHER_2x2(cc,df,a11,a12,a21,a22,sy1,sy2,su,sv) 
  196.     YUV_LOAD_CONVERT_DITHER_2x2_##cc(df,a11,a12,a21,a22,sy1,sy2,su,sv)
  197. /*
  198.  * Generic YUV load-convert-store macros:
  199.  */
  200. #define YUV_LOAD_CONVERT_STORE_2x1(cc,df,d1,d2,sy1,sy2,su,sv) 
  201.     {                                                       
  202.         PIXEL(df,a1); PIXEL(df,a2);                         
  203.         YUV_LOAD_CONVERT_2x1(cc,df,a1,a2,sy1,sy2,su,sv);    
  204.         sy1++; sy2++; su++; sv++;                           
  205.         STORE(df,d1,a1);                                    
  206.         d1+=BPP(df);                                        
  207.         STORE(df,d2,a2);                                    
  208.         d2+=BPP(df);                                        
  209.     }
  210. #define YUV_LOAD_CONVERT_STORE_2x2(cc,df,d1,d2,sy1,sy2,su,sv) 
  211.     {                                                       
  212.         PIXEL(df,a11); PIXEL(df,a12);                       
  213.         PIXEL(df,a21); PIXEL(df,a22);                       
  214.         YUV_LOAD_CONVERT_2x2(cc,df,a11,a12,a21,a22,sy1,sy2,su,sv); 
  215.         sy1+=2; sy2+=2; su++; sv++;                         
  216.         STORE(df,d1,a11);                                   
  217.         STORE(df,d1+BPP(df),a12);                           
  218.         d1+=2*BPP(df);                                      
  219.         STORE(df,d2,a21);                                   
  220.         STORE(df,d2+BPP(df),a22);                           
  221.         d2+=2*BPP(df);                                      
  222.     }
  223. #define YUV_LOAD_CONVERT_DITHER_STORE_2x2(cc,df,d1,d2,sy1,sy2,su,sv) 
  224.     {                                                       
  225.         PIXEL(df,a11); PIXEL(df,a12);                       
  226.         PIXEL(df,a21); PIXEL(df,a22);                       
  227.         YUV_LOAD_CONVERT_DITHER_2x2(cc,df,a11,a12,a21,a22,sy1,sy2,su,sv); 
  228.         sy1+=2; sy2+=2; su++; sv++;                         
  229.         STORE(df,d1,a11);                                   
  230.         STORE(df,d1+BPP(df),a12);                           
  231.         d1+=2*BPP(df);                                      
  232.         STORE(df,d2,a21);                                   
  233.         STORE(df,d2+BPP(df),a22);                           
  234.         d2+=2*BPP(df);                                      
  235.     }
  236. /*
  237.  * Generic YUV load-convert-average-store macros:
  238.  *  [d1],[d2] = convert([s1],[s2]);
  239.  *  [d01] = ([d0]+[d1])/2;
  240.  *  [d12] = ([d1]+[d2])/2;
  241.  */
  242. #define YUV_LOAD_CONVERT_AVERAGE_STORE_2x1(cc,df,d0,d01,d1,d12,d2,sy1,sy2,su,sv) 
  243.     {                                                       
  244.         PIXEL(df,a1); PIXEL(df,a2);                         
  245.         YUV_LOAD_CONVERT_2x1(cc,df,a1,a2,sy1,sy2,su,sv);    
  246.         sy1++; sy2++; su++; sv++;                           
  247.         STORE(df,d1,a1);                                    
  248.         d1+=BPP(df);                                        
  249.         STORE(df,d2,a2);                                    
  250.         d2+=BPP(df);                                        
  251.         AVERAGE(df,a2,a1,a2);                               
  252.         LOAD_AVERAGE(df,a1,a1,d0);                          
  253.         d0+=BPP(df);                                        
  254.         STORE(df,d01,a1);                                   
  255.         d01+=BPP(df);                                       
  256.         STORE(df,d12,a2);                                   
  257.         d12+=BPP(df);                                       
  258.     }
  259. #define YUV_LOAD_CONVERT_AVERAGE_STORE_2x2(cc,df,d0,d01,d1,d12,d2,sy1,sy2,su,sv) 
  260.     {                                                       
  261.         PIXEL(df,a11); PIXEL(df,a12);                       
  262.         PIXEL(df,a21); PIXEL(df,a22);                       
  263.         YUV_LOAD_CONVERT_2x2(cc,df,a11,a12,a21,a22,sy1,sy2,su,sv); 
  264.         sy1+=2; sy2+=2; su++; sv++;                         
  265.         STORE(df,d1,a11);                                   
  266.         STORE(df,d1+BPP(df),a12);                           
  267.         d1+=2*BPP(df);                                      
  268.         STORE(df,d2,a21);                                   
  269.         STORE(df,d2+BPP(df),a22);                           
  270.         d2+=2*BPP(df);                                      
  271.         AVERAGE(df,a21,a11,a21);                            
  272.         AVERAGE(df,a22,a12,a22);                            
  273.         LOAD_AVERAGE(df,a11,a11,d0);                        
  274.         LOAD_AVERAGE(df,a12,a12,d0+BPP(df));                
  275.         d0+=2*BPP(df);                                      
  276.         STORE(df,d01,a11);                                  
  277.         STORE(df,d01+BPP(df),a12);                          
  278.         d01+=2*BPP(df);                                     
  279.         STORE(df,d12,a21);                                  
  280.         STORE(df,d12+BPP(df),a22);                          
  281.         d12+=2*BPP(df);                                     
  282.     }
  283. #define YUV_LOAD_CONVERT_AVERAGE_DITHER_STORE_2x2(cc,df,d0,d01,d1,d12,d2,sy1,sy2,su,sv) 
  284.     {                                                       
  285.         PIXEL(df,a11); PIXEL(df,a12);                       
  286.         PIXEL(df,a21); PIXEL(df,a22);                       
  287.         YUV_LOAD_CONVERT_DITHER_2x2(cc,df,a11,a12,a21,a22,sy1,sy2,su,sv); 
  288.         sy1+=2; sy2+=2; su++; sv++;                         
  289.         STORE(df,d1,a11);                                   
  290.         STORE(df,d1+BPP(df),a12);                           
  291.         d1+=2*BPP(df);                                      
  292.         STORE(df,d2,a21);                                   
  293.         STORE(df,d2+BPP(df),a22);                           
  294.         d2+=2*BPP(df);                                      
  295.         AVERAGE(df,a21,a11,a21);                            
  296.         AVERAGE(df,a22,a12,a22);                            
  297.         LOAD_AVERAGE(df,a11,a11,d0);                        
  298.         LOAD_AVERAGE(df,a12,a12,d0+BPP(df));                
  299.         d0+=2*BPP(df);                                      
  300.         STORE(df,d01,a11);                                  
  301.         STORE(df,d01+BPP(df),a12);                          
  302.         d01+=2*BPP(df);                                     
  303.         STORE(df,d12,a21);                                  
  304.         STORE(df,d12+BPP(df),a22);                          
  305.         d12+=2*BPP(df);                                     
  306.     }
  307. /*** Generic YUVtoRGB double-row converters: ***************/
  308. /*
  309.  * Generic YUVtoRGB double-row shrinking converter:
  310.  *  uses read-ahead optimization to process full 2x2 blocks
  311.  *  whenever possible.
  312.  */
  313. #define DBLROW_SHRINK(cc,df,d1,d2,dest_x,dest_dx,sy1,sy2,su,sv,src_x,src_dx) 
  314.     {                                                       
  315.         /* initialize local variables: */                   
  316.         register int count = dest_dx;                       
  317.         register int limit = src_dx >> 1; /* -1 */          
  318.         register int step = dest_dx;                        
  319.         /* check row length: */                             
  320.         if (count) {                                        
  321.             /* check if we have an odd first block: */      
  322.             if (src_x & 1)                                  
  323.                 goto start_odd;                             
  324.             /* process even pixels: */                      
  325.             do {                                            
  326.                 PIXEL(df,a11); PIXEL(df,a12);               
  327.                 PIXEL(df,a21); PIXEL(df,a22);               
  328.                 /* make one Bresenham step ahead: */        
  329.                 if ((limit -= step) < 0) {                  
  330.                     limit += src_dx;                        
  331.                     /* can we process 2x2 pixels? */        
  332.                     if (!--count)                           
  333.                         goto last_pixel;                    
  334.                     /* process full 2x2 block: */           
  335.                     YUV_LOAD_CONVERT_2x2(cc,df,a11,a12,a21,a22,sy1,sy2,su,sv); 
  336.                     sy1+=2; sy2+=2; su++; sv++;             
  337.                     STORE(df,d1,a11);                       
  338.                     STORE(df,d1+BPP(df),a12);               
  339.                     d1+=2*BPP(df);                          
  340.                     STORE(df,d2,a21);                       
  341.                     STORE(df,d2+BPP(df),a22);               
  342.                     d2+=2*BPP(df);                          
  343.                 } else {                                    
  344.                     /* proc. first 2x1 block & skip next: */
  345.                     YUV_LOAD_CONVERT_2x1(cc,df,a11,a21,sy1,sy2,su,sv); 
  346.                     sy1+=2; sy2+=2; su++; sv++;             
  347.                     STORE(df,d1,a11);                       
  348.                     d1+=BPP(df);                            
  349.                     STORE(df,d2,a21);                       
  350.                     d2+=BPP(df);                            
  351.                 }                                           
  352.                 /* inverted Bresenham stepping: */          
  353.                 while ((limit -= step) >= 0) {              
  354.                     /* skip next even source pixel: */      
  355.                     sy1++; sy2++;                           
  356.                     if ((limit -= step) < 0)                
  357.                         goto cont_odd;                      
  358.                     /* skip odd source pixel: */            
  359.                     sy1++; sy2++;                           
  360.                     su++; sv++; /* next chroma: */          
  361.                 }                                           
  362. cont_even:      /* continue loop with next even pixel: */   
  363.                 limit += src_dx;                            
  364.             } while (--count);                              
  365.             goto done;                                      
  366. last_pixel: /* use this branch to process last pixel:*/     
  367.             count++;                                        
  368. start_odd:  /* process odd pixels: */                       
  369.             do {                                            
  370.                 PIXEL(df,a11); PIXEL(df,a21);               
  371.                 YUV_LOAD_CONVERT_2x1(cc,df,a11,a21,sy1,sy2,su,sv); 
  372.                 STORE(df,d1,a11);                           
  373.                 d1+=BPP(df);                                
  374.                 STORE(df,d2,a21);                           
  375.                 d2+=BPP(df);                                
  376.                 /* inverted Bresenham stepping: */          
  377.                 do {                                        
  378.                     /* skip odd source pixel: */            
  379.                     sy1++; sy2++;                           
  380.                     su++; sv++; /* next chroma: */          
  381.                     if ((limit -= step) < 0)                
  382.                         goto cont_even;                     
  383.                     /* skip even source pixel: */           
  384.                     sy1++; sy2++;                           
  385.                 } while ((limit -= step) >= 0);             
  386. cont_odd:       limit += src_dx;                            
  387.             } while (--count);                              
  388. done:       ;                                               
  389.         }                                                   
  390.     }
  391. /*
  392.  * Generic YUVtoRGB double-row copy converter:
  393.  */
  394. #define DBLROW_COPY(cc,df,d1,d2,dest_x,dest_dx,sy1,sy2,su,sv,src_x,src_dx) 
  395.     {                                                       
  396.         register int count = dest_dx;                       
  397.         /* convert first 2x1 block: */                      
  398.         if ((src_x & 1) && count) {                         
  399.             YUV_LOAD_CONVERT_STORE_2x1(cc,df,d1,d2,sy1,sy2,su,sv); 
  400.             count--;                                        
  401.         }                                                   
  402.         /* convert all integral 2x2 blocks: */              
  403.         while (count >= 2) {                                
  404.             YUV_LOAD_CONVERT_DITHER_STORE_2x2(cc,df,d1,d2,sy1,sy2,su,sv); 
  405.             count -= 2;                                     
  406.         }                                                   
  407.         /* convert last 2x1 block: */                       
  408.         if (count) {                                        
  409.             YUV_LOAD_CONVERT_STORE_2x1(cc,df,d1,d2,sy1,sy2,su,sv); 
  410.         }                                                   
  411.     }
  412. /*
  413.  * Generic YUVtoRGB double row stretching converter:
  414.  */
  415. #define DBLROW_STRETCH(cc,df,d1,d2,dest_x,dest_dx,sy1,sy2,su,sv,src_x,src_dx) 
  416.     {                                                       
  417.         /* initialize local variables: */                   
  418.         register int count = dest_dx;                       
  419.         register int limit = dest_dx >> 1; /* !!! */        
  420.         register int step = src_dx;                         
  421.         /* # of pixels to be processed separately: */       
  422.         int remainder = dest_dx - limit;                    
  423.         if ((src_x + src_dx) & 1) remainder += dest_dx;     
  424.         remainder /= step;                                  
  425.         /* check row length: */                             
  426.         if (count) {                                        
  427.             PIXEL(df,a11); PIXEL(df,a12);                   
  428.             PIXEL(df,a21); PIXEL(df,a22);                   
  429.             /* update count: */                             
  430.             if ((count -= remainder) <= 0)                  
  431.                 goto convert_last;                          
  432.             /* check if we have an odd first block: */      
  433.             if (src_x & 1) {                                
  434.                 /* convert first 2x1 block: */              
  435.                 YUV_LOAD_CONVERT_2x1(cc,df,a12,a22,sy1,sy2,su,sv); 
  436.                 sy1++; sy2++; su++; sv++;                   
  437.                 goto rep_odd;                               
  438.             }                                               
  439.             /* the main loop: */                            
  440.             while (1) {                                     
  441.                 /* load & convert next 2x2 pixels: */       
  442.                 YUV_LOAD_CONVERT_DITHER_2x2(cc,df,a11,a12,a21,a22,sy1,sy2,su,sv); 
  443.                 sy1+=2; sy2+=2; su++; sv++;                 
  444.                 /* replicate even pixels: */                
  445.                 do {                                        
  446.                     STORE(df,d1,a11);                       
  447.                     d1+=BPP(df);                            
  448.                     STORE(df,d2,a21);                       
  449.                     d2+=BPP(df);                            
  450.                     if (!(--count))                         
  451.                         goto rep_last;                      
  452.                 } while ((limit -= step) >= 0);             
  453.                 limit += dest_dx;                           
  454. rep_odd:        /* replicate odd pixels: */                 
  455.                 do {                                        
  456.                     STORE(df,d1,a12);                       
  457.                     d1+=BPP(df);                            
  458.                     STORE(df,d2,a22);                       
  459.                     d2+=BPP(df);                            
  460.                     if (!(--count))                         
  461.                         goto check_last;                    
  462.                 } while ((limit -= step) >= 0);             
  463.                 limit += dest_dx;                           
  464.             }                                               
  465. check_last: /* check if we need to convert one more pixel:*/
  466.             if ((src_x + src_dx) & 1) {                     
  467. convert_last:   /* last 2x1 block: */                       
  468.                 YUV_LOAD_CONVERT_2x1(cc,df,a12,a22,sy1,sy2,su,sv); 
  469.             }                                               
  470.             /* restore the number of remaining pixels: */   
  471. rep_last:   count += remainder;                             
  472.             while (count --) {                              
  473.                 /* replicate them: */                       
  474.                 STORE(df,d1,a12);                           
  475.                 d1+=BPP(df);                                
  476.                 STORE(df,d2,a22);                           
  477.                 d2+=BPP(df);                                
  478.             }                                               
  479.         }                                                   
  480.     }
  481. /*
  482.  * Generic row 2x-stretching converter:
  483.  */
  484. #define DBLROW_STRETCH2X(cc,df,d1,d2,dest_x,dest_dx,sy1,sy2,su,sv,src_x,src_dx) 
  485.     {                                                       
  486.         /* initialize local variables: */                   
  487.         register int count = src_dx;                        
  488.         /* check row length: */                             
  489.         if (count) {                                        
  490.             PIXEL(df,a11); PIXEL(df,a12);                   
  491.             PIXEL(df,a21); PIXEL(df,a22);                   
  492.             /* check if we have an odd or single pixel: */  
  493.             if ((src_x & 1) || count < 2) {                 
  494.                 /* process first 2x1 block: */              
  495.                 YUV_LOAD_CONVERT_2x1(cc,df,a12,a22,sy1,sy2,su,sv); 
  496.                 sy1++; sy2++; su++; sv++;                   
  497.                 STORE(df,d1,a12);                           
  498.                 STORE(df,d2,a22);                           
  499.                 d1 += BPP(df);                              
  500.                 d2 += BPP(df);                              
  501.                 count -= 1;                                 
  502.             } else {                                        
  503.                 /* process first 2x2 block: */              
  504.                 YUV_LOAD_CONVERT_DITHER_2x2(cc,df,a11,a12,a21,a22,sy1,sy2,su,sv); 
  505.                 sy1+=2; sy2+=2; su++; sv++;                 
  506.                 STORE(df,d1,a11);                           
  507.                 STORE(df,d2,a21);                           
  508.                 /* calculate & store half-pixels: */        
  509.                 AVERAGE(df,a11,a11,a12);                    
  510.                 AVERAGE(df,a21,a21,a22);                    
  511.                 STORE(df,d1+BPP(df),a11);                   
  512.                 STORE(df,d1+2*BPP(df),a12);                 
  513.                 STORE(df,d2+BPP(df),a21);                   
  514.                 STORE(df,d2+2*BPP(df),a22);                 
  515.                 d1 += 3*BPP(df);                            
  516.                 d2 += 3*BPP(df);                            
  517.                 count -= 2;                                 
  518.             }                                               
  519.             /* process all internal 4x2 blocks: */          
  520.             while (count >= 4) {                            
  521.                 /* process second 2x2 block: */             
  522.                 PIXEL(df,a13); PIXEL(df,a23);               
  523.                 YUV_LOAD_CONVERT_DITHER_2x2(cc,df,a11,a13,a21,a23,sy1,sy2,su,sv); 
  524.                 sy1+=2; sy2+=2; su++; sv++;                 
  525.                 /* calculate & store first half-pixels: */  
  526.                 AVERAGE(df,a12,a12,a11);                    
  527.                 AVERAGE(df,a22,a22,a21);                    
  528.                 STORE(df,d1+0*BPP(df),a12);                 
  529.                 STORE(df,d1+1*BPP(df),a11);                 
  530.                 STORE(df,d2+0*BPP(df),a22);                 
  531.                 STORE(df,d2+1*BPP(df),a21);                 
  532.                 /* calculate & store second half-pixels: */ 
  533.                 AVERAGE(df,a11,a11,a13);                    
  534.                 AVERAGE(df,a21,a21,a23);                    
  535.                 STORE(df,d1+2*BPP(df),a11);                 
  536.                 STORE(df,d1+3*BPP(df),a13);                 
  537.                 STORE(df,d2+2*BPP(df),a21);                 
  538.                 STORE(df,d2+3*BPP(df),a23);                 
  539.                 /* process third 2x2 block: */              
  540.                 YUV_LOAD_CONVERT_DITHER_2x2(cc,df,a11,a12,a21,a22,sy1,sy2,su,sv); 
  541.                 sy1+=2; sy2+=2; su++; sv++;                 
  542.                 /* calculate & store third half-pixels: */  
  543.                 AVERAGE(df,a13,a13,a11);                    
  544.                 AVERAGE(df,a23,a23,a21);                    
  545.                 STORE(df,d1+4*BPP(df),a13);                 
  546.                 STORE(df,d1+5*BPP(df),a11);                 
  547.                 STORE(df,d2+4*BPP(df),a23);                 
  548.                 STORE(df,d2+5*BPP(df),a21);                 
  549.                 /* calculate & store fourth half-pixels: */ 
  550.                 AVERAGE(df,a11,a11,a12);                    
  551.                 AVERAGE(df,a21,a21,a22);                    
  552.                 STORE(df,d1+6*BPP(df),a11);                 
  553.                 STORE(df,d1+7*BPP(df),a12);                 
  554.                 STORE(df,d2+6*BPP(df),a21);                 
  555.                 STORE(df,d2+7*BPP(df),a22);                 
  556.                 d1 += 8*BPP(df);                            
  557.                 d2 += 8*BPP(df);                            
  558.                 count -= 4;                                 
  559.             }                                               
  560.             /* check if we have one more 2x2 block: */      
  561.             if (count >= 2) {                               
  562.                 /* process last 2x2 block: */               
  563.                 PIXEL(df,a13); PIXEL(df,a23);               
  564.                 YUV_LOAD_CONVERT_DITHER_2x2(cc,df,a11,a13,a21,a23,sy1,sy2,su,sv); 
  565.                 sy1+=2; sy2+=2; su++; sv++;                 
  566.                 /* calculate & store first half-pixels: */  
  567.                 AVERAGE(df,a12,a12,a11);                    
  568.                 AVERAGE(df,a22,a22,a21);                    
  569.                 STORE(df,d1+0*BPP(df),a12);                 
  570.                 STORE(df,d1+1*BPP(df),a11);                 
  571.                 STORE(df,d2+0*BPP(df),a22);                 
  572.                 STORE(df,d2+1*BPP(df),a21);                 
  573.                 /* calculate & store second half-pixels: */ 
  574.                 AVERAGE(df,a11,a11,a13);                    
  575.                 AVERAGE(df,a21,a21,a23);                    
  576.                 STORE(df,d1+2*BPP(df),a11);                 
  577.                 STORE(df,d1+3*BPP(df),a13);                 
  578.                 STORE(df,d2+2*BPP(df),a21);                 
  579.                 STORE(df,d2+3*BPP(df),a23);                 
  580.                 /* move last converted pixels to a12/22: */ 
  581.                 COPY(df,a12,a13);                           
  582.                 COPY(df,a22,a23);                           
  583.                 d1 += 4*BPP(df);                            
  584.                 d2 += 4*BPP(df);                            
  585.                 count -= 2;                                 
  586.             }                                               
  587.             /* check if we have one more 2x1 block: */      
  588.             if (count >= 1) {                               
  589.                 /* process last 2x1 block: */               
  590.                 YUV_LOAD_CONVERT_2x1(cc,df,a11,a21,sy1,sy2,su,sv); 
  591.                 /* calculate & store last half-pixels: */   
  592.                 AVERAGE(df,a12,a12,a11);                    
  593.                 AVERAGE(df,a22,a22,a21);                    
  594.                 STORE(df,d1+0*BPP(df),a12);                 
  595.                 STORE(df,d1+1*BPP(df),a11);                 
  596.                 STORE(df,d1+2*BPP(df),a11);                 
  597.                 STORE(df,d2+0*BPP(df),a22);                 
  598.                 STORE(df,d2+1*BPP(df),a21);                 
  599.                 STORE(df,d2+2*BPP(df),a21);                 
  600.             } else {                                        
  601.                 /* just replicate last pixels: */           
  602.                 STORE(df,d1,a12);                           
  603.                 STORE(df,d2,a22);                           
  604.             }                                               
  605.         }                                                   
  606.     }
  607. /*
  608.  * Generic row 2x+ stretching converter:
  609.  *  "???" comments mean that under normal conditions these jumps
  610.  *  should never be executed; nevertheless, I left these checks
  611.  *  in place to guarantee the correct termination of the algorithm
  612.  *  in all possible scenarios.
  613.  */
  614. #define DBLROW_STRETCH2XPLUS(cc,df,d1,d2,dest_x,dest_dx,sy1,sy2,su,sv,src_x,src_dx) 
  615.     {                                                       
  616.         /* initialize local variables: */                   
  617.         register int count = dest_dx;                       
  618.         register int limit = dest_dx >> 1; /* !!! */        
  619.         register int step = src_dx << 1;  /* !!! */         
  620.         /* # of half-pixels to be processed separately: */  
  621.         int remainder = 3*dest_dx - limit;                  
  622.         if ((src_x + src_dx) & 1) remainder += 2*dest_dx;   
  623.         remainder /= step;                                  
  624.         /* check row length: */                             
  625.         if (count) {                                        
  626.             PIXEL(df,a11); PIXEL(df,a12);                   
  627.             PIXEL(df,a21); PIXEL(df,a22);                   
  628.             PIXEL(df,a13); PIXEL(df,a23);                   
  629.             /* check if an odd or single 2x1 block: */      
  630.             if ((src_x & 1) || src_dx < 2) {                
  631.                 /* convert first 2x1 block: */              
  632.                 YUV_LOAD_CONVERT_2x1(cc,df,a12,a22,sy1,sy2,su,sv); 
  633.                 sy1++; sy2++; su++; sv++;                   
  634.                 /* update count: */                         
  635.                 if ((count -= remainder) <= 0)              
  636.                     goto rep_last;                          
  637.                 goto rep_odd;                               
  638.             } else {                                        
  639.                 /* convert first 2x2 block: */              
  640.                 YUV_LOAD_CONVERT_DITHER_2x2(cc,df,a11,a12,a21,a22,sy1,sy2,su,sv); 
  641.                 sy1+=2; sy2+=2; su++; sv++;                 
  642.                 /* update count: */                         
  643.                 if ((count -= remainder) <= 0)              
  644.                     goto rep_last_2;        /* ??? */       
  645.                 goto rep_even;                              
  646.             }                                               
  647.             /* the main loop: */                            
  648.             while (1) {                                     
  649.                 /* load & convert second 2x2 block: */      
  650.                 YUV_LOAD_CONVERT_DITHER_2x2(cc,df,a11,a13,a21,a23,sy1,sy2,su,sv); 
  651.                 sy1+=2; sy2+=2; su++; sv++;                 
  652.                 /* calc. & replicate first half-pixels: */  
  653.                 AVERAGE(df,a12,a12,a11);                    
  654.                 AVERAGE(df,a22,a22,a21);                    
  655.                 do {                                        
  656.                     STORE(df,d1,a12);                       
  657.                     d1+=BPP(df);                            
  658.                     STORE(df,d2,a22);                       
  659.                     d2+=BPP(df);                            
  660.                     if (!(--count))                         
  661.                         goto rep_last;      /* ??? */       
  662.                 } while ((limit -= step) >= 0);             
  663.                 limit += dest_dx;                           
  664.                 /* replicate second even integral pixels: */
  665.                 do {                                        
  666.                     STORE(df,d1,a11);                       
  667.                     d1+=BPP(df);                            
  668.                     STORE(df,d2,a21);                       
  669.                     d2+=BPP(df);                            
  670.                     if (!(--count))                         
  671.                         goto rep_last_2;    /* ??? */       
  672.                 } while ((limit -= step) >= 0);             
  673.                 limit += dest_dx;                           
  674.                 /* calc. & replicate second half-pixels: */ 
  675.                 AVERAGE(df,a11,a11,a13);                    
  676.                 AVERAGE(df,a21,a21,a23);                    
  677.                 do {                                        
  678.                     STORE(df,d1,a11);                       
  679.                     d1+=BPP(df);                            
  680.                     STORE(df,d2,a21);                       
  681.                     d2+=BPP(df);                            
  682.                     if (!(--count))                         
  683.                         goto rep_last_3;    /* !!! */       
  684.                 } while ((limit -= step) >= 0);             
  685.                 limit += dest_dx;                           
  686.                 /* replicate second odd integral pixels: */ 
  687.                 do {                                        
  688.                     STORE(df,d1,a13);                       
  689.                     d1+=BPP(df);                            
  690.                     STORE(df,d2,a23);                       
  691.                     d2+=BPP(df);                            
  692.                     if (!(--count))                         
  693.                         goto last_pixel_2;  /* !!! */       
  694.                 } while ((limit -= step) >= 0);             
  695.                 limit += dest_dx;                           
  696.                 /* load & convert third 2x2 block: */       
  697.                 YUV_LOAD_CONVERT_DITHER_2x2(cc,df,a11,a12,a21,a22,sy1,sy2,su,sv); 
  698.                 sy1+=2; sy2+=2; su++; sv++;                 
  699.                 /* calc. & replicate third half-pixels: */  
  700.                 AVERAGE(df,a13,a13,a11);                    
  701.                 AVERAGE(df,a23,a23,a21);                    
  702.                 do {                                        
  703.                     STORE(df,d1,a13);                       
  704.                     d1+=BPP(df);                            
  705.                     STORE(df,d2,a23);                       
  706.                     d2+=BPP(df);                            
  707.                     if (!(--count))                         
  708.                         goto rep_last_3;    /* ??? */       
  709.                 } while ((limit -= step) >= 0);             
  710.                 limit += dest_dx;                           
  711. rep_even:       /* replicate third even integral pixels: */ 
  712.                 do {                                        
  713.                     STORE(df,d1,a11);                       
  714.                     d1+=BPP(df);                            
  715.                     STORE(df,d2,a21);                       
  716.                     d2+=BPP(df);                            
  717.                     if (!(--count))                         
  718.                         goto rep_last_2;    /* ??? */       
  719.                 } while ((limit -= step) >= 0);             
  720.                 limit += dest_dx;                           
  721.                 /* calc. & replicate fourth half-pixels: */ 
  722.                 AVERAGE(df,a11,a11,a12);                    
  723.                 AVERAGE(df,a21,a21,a22);                    
  724.                 do {                                        
  725.                     STORE(df,d1,a11);                       
  726.                     d1+=BPP(df);                            
  727.                     STORE(df,d2,a21);                       
  728.                     d2+=BPP(df);                            
  729.                     if (!(--count))                         
  730.                         goto rep_last;      /* !!! */       
  731.                 } while ((limit -= step) >= 0);             
  732.                 limit += dest_dx;                           
  733. rep_odd:        /* replicate third odd integral pixels: */  
  734.                 do {                                        
  735.                     STORE(df,d1,a12);                       
  736.                     d1+=BPP(df);                            
  737.                     STORE(df,d2,a22);                       
  738.                     d2+=BPP(df);                            
  739.                     if (!(--count))                         
  740.                         goto last_pixel;    /* !!! */       
  741.                 } while ((limit -= step) >= 0);             
  742.                 limit += dest_dx;                           
  743.             }                                               
  744. last_pixel_2:/* store last integral pixels in a11/21: */    
  745.             COPY(df,a11,a13);                               
  746.             COPY(df,a21,a23);                               
  747. last_pixel: /* check if we need to convert one more pixel:*/
  748.             if ((src_x + src_dx) & 1) {                     
  749.                 /* update count & remainder: */             
  750.                 register int r2 = remainder >> 1;           
  751.                 count += r2; remainder -= r2;               
  752.                 if (count <= 0)                             
  753.                     goto rep_last;                          
  754.                 /* load & convert last 2x1 block: */        
  755.                 YUV_LOAD_CONVERT_2x1(cc,df,a12,a22,sy1,sy2,su,sv); 
  756.                 /* calc. & replicate last half-pixels: */   
  757.                 AVERAGE(df,a11,a11,a12);                    
  758.                 AVERAGE(df,a21,a21,a22);                    
  759.                 do {                                        
  760.                     STORE(df,d1,a11);                       
  761.                     d1+=BPP(df);                            
  762.                     STORE(df,d2,a21);                       
  763.                     d2+=BPP(df);                            
  764.                     if (!(--count))                         
  765.                         goto rep_last;      /* !!! */       
  766.                 } while ((limit -= step) >= 0);             
  767.             }                                               
  768.             goto rep_last;                                  
  769. rep_last_3: /* store last converted pixels in a12/22: */    
  770.             COPY(df,a12,a13);                               
  771.             COPY(df,a22,a23);                               
  772.             goto rep_last;                                  
  773. rep_last_2: /* store last converted pixels in a12/22: */    
  774.             COPY(df,a12,a11);                               
  775.             COPY(df,a22,a21);                               
  776.             /* restore the number of remaining pixels: */   
  777. rep_last:   count += remainder;                             
  778.             while (count --) {                              
  779.                 /* replicate them: */                       
  780.                 STORE(df,d1,a12);                           
  781.                 d1+=BPP(df);                                
  782.                 STORE(df,d2,a22);                           
  783.                 d2+=BPP(df);                                
  784.             }                                               
  785.         }                                                   
  786.     }
  787. /*** Generic YUVtoRGB double-row 2x converters: ************/
  788. /*
  789.  * Generic YUVtoRGB double-row shrinking converter:
  790.  *  uses read-ahead optimization to process full 2x2 blocks
  791.  *  whenever possible.
  792.  */
  793. #define DBLROW2X_SHRINK(cc,df,d0,d01,d1,d12,d2,dest_x,dest_dx,sy1,sy2,su,sv,src_x,src_dx) 
  794.     {                                                       
  795.         /* initialize local variables: */                   
  796.         register int count = dest_dx;                       
  797.         register int limit = src_dx >> 1; /* -1 */          
  798.         register int step = dest_dx;                        
  799.         /* check row length: */                             
  800.         if (count) {                                        
  801.             /* check if we have an odd first block: */      
  802.             if (src_x & 1)                                  
  803.                 goto start_odd;                             
  804.             /* process even pixels: */                      
  805.             do {                                            
  806.                 PIXEL(df,a11); PIXEL(df,a12);               
  807.                 PIXEL(df,a21); PIXEL(df,a22);               
  808.                 /* make one Bresenham step ahead: */        
  809.                 if ((limit -= step) < 0) {                  
  810.                     limit += src_dx;                        
  811.                     /* can we process 2x2 pixels? */        
  812.                     if (!--count)                           
  813.                         goto last_pixel;                    
  814.                     /* process full 2x2 block: */           
  815.                     YUV_LOAD_CONVERT_2x2(cc,df,a11,a12,a21,a22,sy1,sy2,su,sv); 
  816.                     sy1+=2; sy2+=2; su++; sv++;             
  817.                     STORE(df,d1,a11);                       
  818.                     STORE(df,d1+BPP(df),a12);               
  819.                     d1+=2*BPP(df);                          
  820.                     STORE(df,d2,a21);                       
  821.                     STORE(df,d2+BPP(df),a22);               
  822.                     d2+=2*BPP(df);                          
  823.                     /* process average pixels: */           
  824.                     AVERAGE(df,a21,a11,a21);                
  825.                     AVERAGE(df,a22,a12,a22);                
  826.                     LOAD_AVERAGE(df,a11,a11,d0);            
  827.                     LOAD_AVERAGE(df,a12,a12,d0+BPP(df));    
  828.                     d0+=2*BPP(df);                          
  829.                     STORE(df,d01,a11);                      
  830.                     STORE(df,d01+BPP(df),a12);              
  831.                     d01+=2*BPP(df);                         
  832.                     STORE(df,d12,a21);                      
  833.                     STORE(df,d12+BPP(df),a22);              
  834.                     d12+=2*BPP(df);                         
  835.                 } else {                                    
  836.                     /* proc. first 2x1 block & skip next: */
  837.                     YUV_LOAD_CONVERT_2x1(cc,df,a11,a21,sy1,sy2,su,sv); 
  838.                     sy1+=2; sy2+=2; su++; sv++;             
  839.                     STORE(df,d1,a11);                       
  840.                     d1+=BPP(df);                            
  841.                     STORE(df,d2,a21);                       
  842.                     d2+=BPP(df);                            
  843.                     /* process average pixels: */           
  844.                     AVERAGE(df,a21,a11,a21);                
  845.                     LOAD_AVERAGE(df,a11,a11,d0);            
  846.                     d0+=BPP(df);                            
  847.                     STORE(df,d01,a11);                      
  848.                     d01+=BPP(df);                           
  849.                     STORE(df,d12,a21);                      
  850.                     d12+=BPP(df);                           
  851.                 }                                           
  852.                 /* inverted Bresenham stepping: */          
  853.                 while ((limit -= step) >= 0) {              
  854.                     /* skip next even source pixel: */      
  855.                     sy1++; sy2++;                           
  856.                     if ((limit -= step) < 0)                
  857.                         goto cont_odd;                      
  858.                     /* skip odd source pixel: */            
  859.                     sy1++; sy2++;                           
  860.                     su++; sv++; /* next chroma: */          
  861.                 }                                           
  862. cont_even:      /* continue loop with next even pixel: */   
  863.                 limit += src_dx;                            
  864.             } while (--count);                              
  865.             goto done;                                      
  866. last_pixel: /* use this branch to process last pixel:*/     
  867.             count++;                                        
  868. start_odd:  /* process odd pixels: */                       
  869.             do {                                            
  870.                 /* convert 2x1 block: */                    
  871.                 PIXEL(df,a11); PIXEL(df,a21);               
  872.                 YUV_LOAD_CONVERT_2x1(cc,df,a11,a21,sy1,sy2,su,sv); 
  873.                 STORE(df,d1,a11);                           
  874.                 d1+=BPP(df);                                
  875.                 STORE(df,d2,a21);                           
  876.                 d2+=BPP(df);                                
  877.                 /* process average pixels: */               
  878.                 AVERAGE(df,a21,a11,a21);                    
  879.                 LOAD_AVERAGE(df,a11,a11,d0);                
  880.                 d0+=BPP(df);                                
  881.                 STORE(df,d01,a11);                          
  882.                 d01+=BPP(df);                               
  883.                 STORE(df,d12,a21);                          
  884.                 d12+=BPP(df);                               
  885.                 /* inverted Bresenham stepping: */          
  886.                 do {                                        
  887.                     /* skip odd source pixel: */            
  888.                     sy1++; sy2++;                           
  889.                     su++; sv++; /* next chroma: */          
  890.                     if ((limit -= step) < 0)                
  891.                         goto cont_even;                     
  892.                     /* skip even source pixel: */           
  893.                     sy1++; sy2++;                           
  894.                 } while ((limit -= step) >= 0);             
  895. cont_odd:       limit += src_dx;                            
  896.             } while (--count);                              
  897. done:       ;                                               
  898.         }                                                   
  899.     }
  900. /*
  901.  * Generic YUVtoRGB double-row copy converter:
  902.  */
  903. #define DBLROW2X_COPY(cc,df,d0,d01,d1,d12,d2,dest_x,dest_dx,sy1,sy2,su,sv,src_x,src_dx) 
  904.     {                                                       
  905.         register int count = dest_dx;                       
  906.         /* convert first 2x1 block: */                      
  907.         if ((src_x & 1) && count) {                         
  908.             YUV_LOAD_CONVERT_AVERAGE_STORE_2x1(cc,df,d0,d01,d1,d12,d2,sy1,sy2,su,sv); 
  909.             count--;                                        
  910.         }                                                   
  911.         /* convert all integral 2x2 blocks: */              
  912.         while (count >= 2) {                                
  913.             YUV_LOAD_CONVERT_AVERAGE_DITHER_STORE_2x2(cc,df,d0,d01,d1,d12,d2,sy1,sy2,su,sv); 
  914.             count -= 2;                                     
  915.         }                                                   
  916.         /* convert last 2x1 block: */                       
  917.         if (count) {                                        
  918.             YUV_LOAD_CONVERT_AVERAGE_STORE_2x1(cc,df,d0,d01,d1,d12,d2,sy1,sy2,su,sv); 
  919.         }                                                   
  920.     }
  921. /*
  922.  * Generic YUVtoRGB double row stretching converter:
  923.  */
  924. #define DBLROW2X_STRETCH(cc,df,d0,d01,d1,d12,d2,dest_x,dest_dx,sy1,sy2,su,sv,src_x,src_dx) 
  925.     {                                                       
  926.         /* initialize local variables: */                   
  927.         register int count = dest_dx;                       
  928.         register int limit = dest_dx >> 1; /* !!! */        
  929.         register int step = src_dx;                         
  930.         /* # of pixels to be processed separately: */       
  931.         int remainder = dest_dx - limit;                    
  932.         if ((src_x + src_dx) & 1) remainder += dest_dx;     
  933.         remainder /= step;                                  
  934.         /* check row length: */                             
  935.         if (count) {                                        
  936.             PIXEL(df,a11); PIXEL(df,a12);                   
  937.             PIXEL(df,a21); PIXEL(df,a22);                   
  938.             PIXEL(df,a01x);PIXEL(df,a12x);                  
  939.             /* update count: */                             
  940.             if ((count -= remainder) <= 0)                  
  941.                 goto convert_last;                          
  942.             /* check if we have an odd first block: */      
  943.             if (src_x & 1) {                                
  944.                 /* convert first 2x1 block: */              
  945.                 YUV_LOAD_CONVERT_2x1(cc,df,a12,a22,sy1,sy2,su,sv); 
  946.                 sy1++; sy2++; su++; sv++;                   
  947.                 goto rep_odd;                               
  948.             }                                               
  949.             /* the main loop: */                            
  950.             while (1) {                                     
  951.                 /* load & convert next 2x2 pixels: */       
  952.                 YUV_LOAD_CONVERT_DITHER_2x2(cc,df,a11,a12,a21,a22,sy1,sy2,su,sv); 
  953.                 sy1+=2; sy2+=2; su++; sv++;                 
  954.                 /* average and replicate even pixels: */    
  955.                 LOAD_AVERAGE(df,a01x,a11,d0);               
  956.                 AVERAGE(df,a12x,a11,a21);                   
  957.                 do {                                        
  958.                     d0+=BPP(df);                            
  959.                     STORE(df,d01,a01x);                     
  960.                     d01+=BPP(df);                           
  961.                     STORE(df,d1,a11);                       
  962.                     d1+=BPP(df);                            
  963.                     STORE(df,d12,a12x);                     
  964.                     d12+=BPP(df);                           
  965.                     STORE(df,d2,a21);                       
  966.                     d2+=BPP(df);                            
  967.                     if (!(--count))                         
  968.                         goto rep_last;                      
  969.                 } while ((limit -= step) >= 0);             
  970.                 limit += dest_dx;                           
  971. rep_odd:        /* average & replicate odd pixels: */       
  972.                 LOAD_AVERAGE(df,a01x,a12,d0);               
  973.                 AVERAGE(df,a12x,a12,a22);                   
  974.                 do {                                        
  975.                     d0+=BPP(df);                            
  976.                     STORE(df,d01,a01x);                     
  977.                     d01+=BPP(df);                           
  978.                     STORE(df,d1,a12);                       
  979.                     d1+=BPP(df);                            
  980.                     STORE(df,d12,a12x);                     
  981.                     d12+=BPP(df);                           
  982.                     STORE(df,d2,a22);                       
  983.                     d2+=BPP(df);                            
  984.                     if (!(--count))                         
  985.                         goto check_last;                    
  986.                 } while ((limit -= step) >= 0);             
  987.                 limit += dest_dx;                           
  988.             }                                               
  989. check_last: /* check if we need to convert one more pixel:*/
  990.             if ((src_x + src_dx) & 1) {                     
  991. convert_last:   /* last 2x1 block: */                       
  992.                 YUV_LOAD_CONVERT_2x1(cc,df,a12,a22,sy1,sy2,su,sv); 
  993.                 /* calc. average pixels: */                 
  994.                 LOAD_AVERAGE(df,a01x,a12,d0);               
  995.                 AVERAGE(df,a12x,a12,a22);                   
  996.             }                                               
  997.             /* restore the number of remaining pixels: */   
  998. rep_last:   count += remainder;                             
  999.             while (count --) {                              
  1000.                 /* replicate them: */                       
  1001.                 STORE(df,d01,a01x);                         
  1002.                 d01+=BPP(df);                               
  1003.                 STORE(df,d1,a12);                           
  1004.                 d1+=BPP(df);                                
  1005.                 STORE(df,d12,a12x);                         
  1006.                 d12+=BPP(df);                               
  1007.                 STORE(df,d2,a22);                           
  1008.                 d2+=BPP(df);                                
  1009.             }                                               
  1010.         }                                                   
  1011.     }
  1012. /*
  1013.  * Generic row 2x-stretching converter:
  1014.  */
  1015. #define DBLROW2X_STRETCH2X(cc,df,d0,d01,d1,d12,d2,dest_x,dest_dx,sy1,sy2,su,sv,src_x,src_dx) 
  1016.     {                                                       
  1017.         /* initialize local variables: */                   
  1018.         register int count = src_dx;                        
  1019.         /* check row length: */                             
  1020.         if (count) {                                        
  1021.             PIXEL(df,a011);PIXEL(df,a012);                  
  1022.             PIXEL(df,a11); PIXEL(df,a12);                   
  1023.             PIXEL(df,a121);PIXEL(df,a122);                  
  1024.             PIXEL(df,a21); PIXEL(df,a22);                   
  1025.             /* check if we have an odd or single pixel: */  
  1026.             if ((src_x & 1) || count < 2) {                 
  1027.                 /* process first 2x1 block: */              
  1028.                 YUV_LOAD_CONVERT_2x1(cc,df,a12,a22,sy1,sy2,su,sv); 
  1029.                 sy1++; sy2++; su++; sv++;                   
  1030.                 STORE(df,d1+0*BPP(df),a12);                 
  1031.                 STORE(df,d2+0*BPP(df),a22);                 
  1032.                 /* process vertical half-pixels: */         
  1033.                 LOAD_AVERAGE(df,a012,a12,d0);               
  1034.                 STORE(df,d01+0*BPP(df),a012);               
  1035.                 AVERAGE(df,a122,a12,a22);                   
  1036.                 STORE(df,d12+0*BPP(df),a122);               
  1037.                 /* shift pointers: */                       
  1038.                 d0  += BPP(df);                             
  1039.                 d01 += BPP(df);                             
  1040.                 d1  += BPP(df);                             
  1041.                 d12 += BPP(df);                             
  1042.                 d2  += BPP(df);                             
  1043.                 count -= 1;                                 
  1044.             } else {                                         
  1045.                 /* process first 2x2 block: */              
  1046.                 YUV_LOAD_CONVERT_DITHER_2x2(cc,df,a11,a12,a21,a22,sy1,sy2,su,sv); 
  1047.                 sy1+=2; sy2+=2; su++; sv++;                 
  1048.                 STORE(df,d1+0*BPP(df),a11);                 
  1049.                 STORE(df,d2+0*BPP(df),a21);                 
  1050.                 STORE(df,d1+2*BPP(df),a12);                 
  1051.                 STORE(df,d2+2*BPP(df),a22);                 
  1052.                 /* process vertical half-pixels: */         
  1053.                 LOAD_AVERAGE(df,a011,a11,d0);               
  1054.                 STORE(df,d01+0*BPP(df),a011);               
  1055.                 AVERAGE(df,a121,a11,a21);                   
  1056.                 STORE(df,d12+0*BPP(df),a121);               
  1057.                 LOAD_AVERAGE(df,a012,a12,d0+2*BPP(df));     
  1058.                 STORE(df,d01+2*BPP(df),a012);               
  1059.                 AVERAGE(df,a122,a12,a22);                   
  1060.                 STORE(df,d12+2*BPP(df),a122);               
  1061.                 /* process horisontal half-pixels: */       
  1062.                 AVERAGE(df,a011,a011,a012);                 
  1063.                 STORE(df,d01+1*BPP(df),a011);               
  1064.                 AVERAGE(df,a11,a11,a12);                    
  1065.                 STORE(df,d1+1*BPP(df),a11);                 
  1066.                 AVERAGE(df,a121,a121,a122);                 
  1067.                 STORE(df,d12+1*BPP(df),a121);               
  1068.                 AVERAGE(df,a21,a21,a22);                    
  1069.                 STORE(df,d2+1*BPP(df),a21);                 
  1070.                 /* shift pointers: */                       
  1071.                 d0  += 3*BPP(df);                           
  1072.                 d01 += 3*BPP(df);                           
  1073.                 d1  += 3*BPP(df);                           
  1074.                 d12 += 3*BPP(df);                           
  1075.                 d2  += 3*BPP(df);                           
  1076.                 count -= 2;                                 
  1077.             }                                               
  1078.             /* process all internal 4x2 blocks: */          
  1079.             while (count >= 4) {                            
  1080.                 /* process second 2x2 block: */             
  1081.                 PIXEL(df,a013); PIXEL(df,a13);              
  1082.                 PIXEL(df,a123); PIXEL(df,a23);              
  1083.                 YUV_LOAD_CONVERT_DITHER_2x2(cc,df,a11,a13,a21,a23,sy1,sy2,su,sv); 
  1084.                 sy1+=2; sy2+=2; su++; sv++;                 
  1085.                 STORE(df,d1+1*BPP(df),a11);                 
  1086.                 STORE(df,d2+1*BPP(df),a21);                 
  1087.                 STORE(df,d1+3*BPP(df),a13);                 
  1088.                 STORE(df,d2+3*BPP(df),a23);                 
  1089.                 /* process vertical half-pixels: */         
  1090.                 LOAD_AVERAGE(df,a011,a11,d0+1*BPP(df));     
  1091.                 STORE(df,d01+1*BPP(df),a011);               
  1092.                 AVERAGE(df,a121,a11,a21);                   
  1093.                 STORE(df,d12+1*BPP(df),a121);               
  1094.                 LOAD_AVERAGE(df,a013,a13,d0+3*BPP(df));     
  1095.                 STORE(df,d01+3*BPP(df),a013);               
  1096.                 AVERAGE(df,a123,a13,a23);                   
  1097.                 STORE(df,d12+3*BPP(df),a123);               
  1098.                 /* process horisontal half-pixels: */       
  1099.                 AVERAGE(df,a012,a012,a011);                 
  1100.                 STORE(df,d01+0*BPP(df),a012);               
  1101.                 AVERAGE(df,a12,a12,a11);                    
  1102.                 STORE(df,d1+0*BPP(df),a12);                 
  1103.                 AVERAGE(df,a122,a122,a121);                 
  1104.                 STORE(df,d12+0*BPP(df),a122);               
  1105.                 AVERAGE(df,a22,a22,a21);                    
  1106.                 STORE(df,d2+0*BPP(df),a22);                 
  1107.                 AVERAGE(df,a011,a011,a013);                 
  1108.                 STORE(df,d01+2*BPP(df),a011); /*!!!*/       
  1109.                 AVERAGE(df,a11,a11,a13);                    
  1110.                 STORE(df,d1+2*BPP(df),a11);                 
  1111.                 AVERAGE(df,a121,a121,a123);                 
  1112.                 STORE(df,d12+2*BPP(df),a121); /*!!!*/       
  1113.                 AVERAGE(df,a21,a21,a23);                    
  1114.                 STORE(df,d2+2*BPP(df),a21);                 
  1115.                 /* process third 2x2 block: */              
  1116.                 YUV_LOAD_CONVERT_DITHER_2x2(cc,df,a11,a12,a21,a22,sy1,sy2,su,sv); 
  1117.                 sy1+=2; sy2+=2; su++; sv++;                 
  1118.                 STORE(df,d1+5*BPP(df),a11);                 
  1119.                 STORE(df,d2+5*BPP(df),a21);                 
  1120.                 STORE(df,d1+7*BPP(df),a12);                 
  1121.                 STORE(df,d2+7*BPP(df),a22);                 
  1122.                 /* process vertical half-pixels: */         
  1123.                 LOAD_AVERAGE(df,a011,a11,d0+5*BPP(df));     
  1124.                 STORE(df,d01+5*BPP(df),a011);               
  1125.                 AVERAGE(df,a121,a11,a21);                   
  1126.                 STORE(df,d12+5*BPP(df),a121);               
  1127.                 LOAD_AVERAGE(df,a012,a12,d0+7*BPP(df));     
  1128.                 STORE(df,d01+7*BPP(df),a012);               
  1129.                 AVERAGE(df,a122,a12,a22);                   
  1130.                 STORE(df,d12+7*BPP(df),a122);               
  1131.                 /* process horisontal half-pixels: */       
  1132.                 AVERAGE(df,a013,a013,a011);                 
  1133.                 STORE(df,d01+4*BPP(df),a013);               
  1134.                 AVERAGE(df,a13,a13,a11);                    
  1135.                 STORE(df,d1+4*BPP(df),a13);                 
  1136.                 AVERAGE(df,a123,a123,a121);                 
  1137.                 STORE(df,d12+4*BPP(df),a123);               
  1138.                 AVERAGE(df,a23,a23,a21);                    
  1139.                 STORE(df,d2+4*BPP(df),a23);                 
  1140.                 AVERAGE(df,a011,a011,a012);                 
  1141.                 STORE(df,d01+6*BPP(df),a011);               
  1142.                 AVERAGE(df,a11,a11,a12);                    
  1143.                 STORE(df,d1+6*BPP(df),a11);                 
  1144.                 AVERAGE(df,a121,a121,a122);                 
  1145.                 STORE(df,d12+6*BPP(df),a121);               
  1146.                 AVERAGE(df,a21,a21,a22);                    
  1147.                 STORE(df,d2+6*BPP(df),a21);                 
  1148.                 /* shift pointers: */                       
  1149.                 d0  += 8*BPP(df);                           
  1150.                 d01 += 8*BPP(df);                           
  1151.                 d1  += 8*BPP(df);                           
  1152.                 d12 += 8*BPP(df);                           
  1153.                 d2  += 8*BPP(df);                           
  1154.                 count -= 4;                                 
  1155.             }                                               
  1156.             /* check if we have one more 2x2 block: */      
  1157.             if (count >= 2) {                               
  1158.                 /* process last 2x2 block: */               
  1159.                 PIXEL(df,a013); PIXEL(df,a13);              
  1160.                 PIXEL(df,a123); PIXEL(df,a23);              
  1161.                 YUV_LOAD_CONVERT_DITHER_2x2(cc,df,a11,a13,a21,a23,sy1,sy2,su,sv); 
  1162.                 sy1+=2; sy2+=2; su++; sv++;                 
  1163.                 STORE(df,d1+1*BPP(df),a11);                 
  1164.                 STORE(df,d2+1*BPP(df),a21);                 
  1165.                 STORE(df,d1+3*BPP(df),a13);                 
  1166.                 STORE(df,d2+3*BPP(df),a23);                 
  1167.                 /* process vertical half-pixels: */         
  1168.                 LOAD_AVERAGE(df,a011,a11,d0+1*BPP(df));     
  1169.                 STORE(df,d01+1*BPP(df),a011);               
  1170.                 AVERAGE(df,a121,a11,a21);                   
  1171.                 STORE(df,d12+1*BPP(df),a121);               
  1172.                 LOAD_AVERAGE(df,a013,a13,d0+3*BPP(df));     
  1173.                 STORE(df,d01+3*BPP(df),a013);               
  1174.                 AVERAGE(df,a123,a13,a23);                   
  1175.                 STORE(df,d12+3*BPP(df),a123);               
  1176.                 /* process horisontal half-pixels: */       
  1177.                 AVERAGE(df,a012,a012,a011);                 
  1178.                 STORE(df,d01+0*BPP(df),a012);               
  1179.                 AVERAGE(df,a12,a12,a11);                    
  1180.                 STORE(df,d1+0*BPP(df),a12);                 
  1181.                 AVERAGE(df,a122,a122,a121);                 
  1182.                 STORE(df,d12+0*BPP(df),a122);               
  1183.                 AVERAGE(df,a22,a22,a21);                    
  1184.                 STORE(df,d2+0*BPP(df),a22);                 
  1185.                 AVERAGE(df,a011,a011,a013);                 
  1186.                 STORE(df,d01+2*BPP(df),a011); /*!!!*/       
  1187.                 AVERAGE(df,a11,a11,a13);                    
  1188.                 STORE(df,d1+2*BPP(df),a11);                 
  1189.                 AVERAGE(df,a121,a121,a123);                 
  1190.                 STORE(df,d12+2*BPP(df),a121); /*!!!*/       
  1191.                 AVERAGE(df,a21,a21,a23);                    
  1192.                 STORE(df,d2+2*BPP(df),a21);                 
  1193.                 /* move last converted pixels to a12/22: */ 
  1194.                 COPY(df,a012,a013);                         
  1195.                 COPY(df,a12,a13);                           
  1196.                 COPY(df,a122,a123);                         
  1197.                 COPY(df,a22,a23);                           
  1198.                 /* shift pointers: */                       
  1199.                 d0  += 4*BPP(df);                           
  1200.                 d01 += 4*BPP(df);                           
  1201.                 d1  += 4*BPP(df);                           
  1202.                 d12 += 4*BPP(df);                           
  1203.                 d2  += 4*BPP(df);                           
  1204.                 count -= 2;                                 
  1205.             }                                               
  1206.             /* check if we have one more 2x1 block: */      
  1207.             if (count >= 1) {                               
  1208.                 /* process last 2x1 block: */               
  1209.                 YUV_LOAD_CONVERT_2x1(cc,df,a11,a21,sy1,sy2,su,sv); 
  1210.                 STORE(df,d1+1*BPP(df),a11);                 
  1211.                 STORE(df,d1+2*BPP(df),a11);                 
  1212.                 STORE(df,d2+1*BPP(df),a21);                 
  1213.                 STORE(df,d2+2*BPP(df),a21);                 
  1214.                 /* process vertical half-pixels: */         
  1215.                 LOAD_AVERAGE(df,a011,a11,d0+1*BPP(df));     
  1216.                 STORE(df,d01+1*BPP(df),a011);               
  1217.                 STORE(df,d01+2*BPP(df),a011);               
  1218.                 AVERAGE(df,a121,a11,a21);                   
  1219.                 STORE(df,d12+1*BPP(df),a121);               
  1220.                 STORE(df,d12+2*BPP(df),a121);               
  1221.                 /* process horisontal half-pixels: */       
  1222.                 AVERAGE(df,a012,a012,a011);                 
  1223.                 STORE(df,d01+0*BPP(df),a012);               
  1224.                 AVERAGE(df,a12,a12,a11);                    
  1225.                 STORE(df,d1+0*BPP(df),a12);                 
  1226.                 AVERAGE(df,a122,a122,a121);                 
  1227.                 STORE(df,d12+0*BPP(df),a122);               
  1228.                 AVERAGE(df,a22,a22,a21);                    
  1229.                 STORE(df,d2+0*BPP(df),a22);                 
  1230.             } else {                                        
  1231.                 /* just replicate last column: */           
  1232.                 STORE(df,d01,a012);                         
  1233.                 STORE(df,d1,a12);                           
  1234.                 STORE(df,d12,a122);                         
  1235.                 STORE(df,d2,a22);                           
  1236.             }                                               
  1237.         }                                                   
  1238.     }
  1239. /*
  1240.  * Generic row 2x+ stretching converter:
  1241.  *  "???" comments mean that under normal conditions these jumps
  1242.  *  should never be executed; nevertheless, I left these checks
  1243.  *  in place to guarantee the correct termination of the algorithm
  1244.  *  in all possible scenarios.
  1245.  */
  1246. #define DBLROW2X_STRETCH2XPLUS(cc,df,d0,d01,d1,d12,d2,dest_x,dest_dx,sy1,sy2,su,sv,src_x,src_dx) 
  1247.     {                                                       
  1248.         /* initialize local variables: */                   
  1249.         register int count = dest_dx;                       
  1250.         register int limit = dest_dx >> 1; /* !!! */        
  1251.         register int step = src_dx << 1;  /* !!! */         
  1252.         /* # of half-pixels to be processed separately: */  
  1253.         int remainder = 3*dest_dx - limit;                  
  1254.         if ((src_x + src_dx) & 1) remainder += 2*dest_dx;   
  1255.         remainder /= step;                                  
  1256.         /* check row length: */                             
  1257.         if (count) {                                        
  1258.             PIXEL(df,a11); PIXEL(df,a12);                   
  1259.             PIXEL(df,a21); PIXEL(df,a22);                   
  1260.             PIXEL(df,a13); PIXEL(df,a23);                   
  1261.             PIXEL(df,a01x);PIXEL(df,a12x);                  
  1262.             /* check if an odd or single 2x1 block: */      
  1263.             if ((src_x & 1) || src_dx < 2) {                
  1264.                 /* convert first 2x1 block: */              
  1265.                 YUV_LOAD_CONVERT_2x1(cc,df,a12,a22,sy1,sy2,su,sv); 
  1266.                 sy1++; sy2++; su++; sv++;                   
  1267.                 /* update count: */                         
  1268.                 if ((count -= remainder) <= 0)              
  1269.                     goto rep_last;                          
  1270.                 goto rep_odd;                               
  1271.             } else {                                        
  1272.                 /* convert first 2x2 block: */              
  1273.                 YUV_LOAD_CONVERT_DITHER_2x2(cc,df,a11,a12,a21,a22,sy1,sy2,su,sv); 
  1274.                 sy1+=2; sy2+=2; su++; sv++;                 
  1275.                 /* update count: */                         
  1276.                 if ((count -= remainder) <= 0)              
  1277.                     goto rep_last_2;        /* ??? */       
  1278.                 goto rep_even;                              
  1279.             }                                               
  1280.             /* the main loop (a11,a12-last conv.pixels): */ 
  1281.             while (1) {                                     
  1282.                 /* load & convert second 2x2 block: */      
  1283.                 YUV_LOAD_CONVERT_DITHER_2x2(cc,df,a11,a13,a21,a23,sy1,sy2,su,sv); 
  1284.                 sy1+=2; sy2+=2; su++; sv++;                 
  1285.                 /* calc. & replicate first half-pixels: */  
  1286.                 AVERAGE(df,a12,a12,a11);                    
  1287.                 LOAD_AVERAGE(df,a01x,a12,d0);               
  1288.                 AVERAGE(df,a22,a22,a21);                    
  1289.                 AVERAGE(df,a12x,a12,a22);                   
  1290.                 do {                                        
  1291.                     d0+=BPP(df);                            
  1292.                     STORE(df,d01,a01x);                     
  1293.                     d01+=BPP(df);                           
  1294.                     STORE(df,d1,a12);                       
  1295.                     d1+=BPP(df);                            
  1296.                     STORE(df,d12,a12x);                     
  1297.                     d12+=BPP(df);                           
  1298.                     STORE(df,d2,a22);                       
  1299.                     d2+=BPP(df);                            
  1300.                     if (!(--count))                         
  1301.                         goto rep_last;      /* ??? */       
  1302.                 } while ((limit -= step) >= 0);             
  1303.                 limit += dest_dx;                           
  1304.                 /* get vertical half-pixels:*/              
  1305.                 LOAD_AVERAGE(df,a01x,a11,d0);               
  1306.                 AVERAGE(df,a12x,a11,a21);                   
  1307.                 /* replicate second even integral pixels: */
  1308.                 do {                                        
  1309.                     d0+=BPP(df);                            
  1310.                     STORE(df,d01,a01x);                     
  1311.                     d01+=BPP(df);                           
  1312.                     STORE(df,d1,a11);                       
  1313.                     d1+=BPP(df);                            
  1314.                     STORE(df,d12,a12x);                     
  1315.                     d12+=BPP(df);                           
  1316.                     STORE(df,d2,a21);                       
  1317.                     d2+=BPP(df);                            
  1318.                     if (!(--count))                         
  1319.                         goto rep_last_2;    /* ??? */       
  1320.                 } while ((limit -= step) >= 0);             
  1321.                 limit += dest_dx;                           
  1322.                 /* calc. & replicate second half-pixels: */ 
  1323.                 AVERAGE(df,a11,a11,a13);                    
  1324.                 LOAD_AVERAGE(df,a01x,a11,d0);               
  1325.                 AVERAGE(df,a21,a21,a23);                    
  1326.                 AVERAGE(df,a12x,a11,a21);                   
  1327.                 do {                                        
  1328.                     d0+=BPP(df);                            
  1329.                     STORE(df,d01,a01x);                     
  1330.                     d01+=BPP(df);                           
  1331.                     STORE(df,d1,a11);                       
  1332.                     d1+=BPP(df);                            
  1333.                     STORE(df,d12,a12x);                     
  1334.                     d12+=BPP(df);                           
  1335.                     STORE(df,d2,a21);                       
  1336.                     d2+=BPP(df);                            
  1337.                     if (!(--count))                         
  1338.                         goto rep_last_3;    /* !!! */       
  1339.                 } while ((limit -= step) >= 0);             
  1340.                 limit += dest_dx;                           
  1341.                 /* get vertical half-pixels:*/              
  1342.                 LOAD_AVERAGE(df,a01x,a13,d0);               
  1343.                 AVERAGE(df,a12x,a13,a23);                   
  1344.                 /* replicate second odd integral pixels: */ 
  1345.                 do {                                        
  1346.                     d0+=BPP(df);                            
  1347.                     STORE(df,d01,a01x);                     
  1348.                     d01+=BPP(df);                           
  1349.                     STORE(df,d1,a13);                       
  1350.                     d1+=BPP(df);                            
  1351.                     STORE(df,d12,a12x);                     
  1352.                     d12+=BPP(df);                           
  1353.                     STORE(df,d2,a23);                       
  1354.                     d2+=BPP(df);                            
  1355.                     if (!(--count))                         
  1356.                         goto last_pixel_2;  /* !!! */       
  1357.                 } while ((limit -= step) >= 0);             
  1358.                 limit += dest_dx;                           
  1359.                 /* load & convert third 2x2 block: */       
  1360.                 YUV_LOAD_CONVERT_DITHER_2x2(cc,df,a11,a12,a21,a22,sy1,sy2,su,sv); 
  1361.                 sy1+=2; sy2+=2; su++; sv++;                 
  1362.                 /* calc. & replicate third half-pixels: */  
  1363.                 AVERAGE(df,a13,a13,a11);                    
  1364.                 LOAD_AVERAGE(df,a01x,a13,d0);               
  1365.                 AVERAGE(df,a23,a23,a21);                    
  1366.                 AVERAGE(df,a12x,a13,a23);                   
  1367.                 do {                                        
  1368.                     d0+=BPP(df);                            
  1369.                     STORE(df,d01,a01x);                     
  1370.                     d01+=BPP(df);                           
  1371.                     STORE(df,d1,a13);                       
  1372.                     d1+=BPP(df);                            
  1373.                     STORE(df,d12,a12x);                     
  1374.                     d12+=BPP(df);                           
  1375.                     STORE(df,d2,a23);                       
  1376.                     d2+=BPP(df);                            
  1377.                     if (!(--count))                         
  1378.                         goto rep_last_3;    /* ??? */       
  1379.                 } while ((limit -= step) >= 0);             
  1380.                 limit += dest_dx;                           
  1381. rep_even:       /* get vertical half-pixels:*/              
  1382.                 LOAD_AVERAGE(df,a01x,a11,d0);               
  1383.                 AVERAGE(df,a12x,a11,a21);                   
  1384.                 /* replicate third even integral pixels: */ 
  1385.                 do {                                        
  1386.                     d0+=BPP(df);                            
  1387.                     STORE(df,d01,a01x);                     
  1388.                     d01+=BPP(df);                           
  1389.                     STORE(df,d1,a11);                       
  1390.                     d1+=BPP(df);                            
  1391.                     STORE(df,d12,a12x);                     
  1392.                     d12+=BPP(df);                           
  1393.                     STORE(df,d2,a21);                       
  1394.                     d2+=BPP(df);                            
  1395.                     if (!(--count))                         
  1396.                         goto rep_last_2;    /* ??? */       
  1397.                 } while ((limit -= step) >= 0);             
  1398.                 limit += dest_dx;                           
  1399.                 /* calc. & replicate fourth half-pixels: */ 
  1400.                 AVERAGE(df,a11,a11,a12);                    
  1401.                 LOAD_AVERAGE(df,a01x,a11,d0);               
  1402.                 AVERAGE(df,a21,a21,a22);                    
  1403.                 AVERAGE(df,a12x,a11,a21);                   
  1404.                 do {                                        
  1405.                     d0+=BPP(df);                            
  1406.                     STORE(df,d01,a01x);                     
  1407.                     d01+=BPP(df);                           
  1408.                     STORE(df,d1,a11);                       
  1409.                     d1+=BPP(df);                            
  1410.                     STORE(df,d12,a12x);                     
  1411.                     d12+=BPP(df);                           
  1412.                     STORE(df,d2,a21);                       
  1413.                     d2+=BPP(df);                            
  1414.                     if (!(--count))                         
  1415.                         goto rep_last;      /* !!! */       
  1416.                 } while ((limit -= step) >= 0);             
  1417.                 limit += dest_dx;                           
  1418. rep_odd:        /* get vertical half-pixels:*/              
  1419.                 LOAD_AVERAGE(df,a01x,a12,d0);               
  1420.                 AVERAGE(df,a12x,a12,a22);                   
  1421.                 /* replicate third odd integral pixels: */  
  1422.                 do {                                        
  1423.                     d0+=BPP(df);                            
  1424.                     STORE(df,d01,a01x);                     
  1425.                     d01+=BPP(df);                           
  1426.                     STORE(df,d1,a12);                       
  1427.                     d1+=BPP(df);                            
  1428.                     STORE(df,d12,a12x);                     
  1429.                     d12+=BPP(df);                           
  1430.                     STORE(df,d2,a22);                       
  1431.                     d2+=BPP(df);                            
  1432.                     if (!(--count))                         
  1433.                         goto last_pixel;    /* !!! */       
  1434.                 } while ((limit -= step) >= 0);             
  1435.                 limit += dest_dx;                           
  1436.             }                                               
  1437. last_pixel_2:/* store last integral pixels in a11/21: */    
  1438.             COPY(df,a11,a13);                               
  1439.             COPY(df,a21,a23);                               
  1440. last_pixel: /* check if we need to convert one more pixel:*/
  1441.             if ((src_x + src_dx) & 1) {                     
  1442.                 /* update count & remainder: */             
  1443.                 register int r2 = remainder >> 1;           
  1444.                 count += r2; remainder -= r2;               
  1445.                 if (count <= 0)                             
  1446.                     goto rep_last;                          
  1447.                 /* load & convert last 2x1 block: */        
  1448.                 YUV_LOAD_CONVERT_2x1(cc,df,a12,a22,sy1,sy2,su,sv); 
  1449.                 /* calc. & replicate last half-pixels: */   
  1450.                 AVERAGE(df,a11,a11,a12);                    
  1451.                 LOAD_AVERAGE(df,a01x,a11,d0);               
  1452.                 AVERAGE(df,a21,a21,a22);                    
  1453.                 AVERAGE(df,a12x,a11,a21);                   
  1454.                 do {                                        
  1455.                     d0+=BPP(df);                            
  1456.                     STORE(df,d01,a01x);                     
  1457.                     d01+=BPP(df);                           
  1458.                     STORE(df,d1,a11);                       
  1459.                     d1+=BPP(df);                            
  1460.                     STORE(df,d12,a12x);                     
  1461.                     d12+=BPP(df);                           
  1462.                     STORE(df,d2,a21);                       
  1463.                     d2+=BPP(df);                            
  1464.                     if (!(--count))                         
  1465.                         goto rep_last;      /* ??? */       
  1466.                 } while ((limit -= step) >= 0);             
  1467.                 /* get last vertical half-pixels:*/         
  1468.                 LOAD_AVERAGE(df,a01x,a12,d0);               
  1469.                 AVERAGE(df,a12x,a12,a22);                   
  1470.             }                                               
  1471.             goto rep_last;                                  
  1472. rep_last_3: /* store last converted pixels in a12/22: */    
  1473.             COPY(df,a12,a13);                               
  1474.             COPY(df,a22,a23);                               
  1475.             goto rep_last;                                  
  1476. rep_last_2: /* store last converted pixels in a12/22: */    
  1477.             COPY(df,a12,a11);                               
  1478.             COPY(df,a22,a21);                               
  1479.             /* restore the number of remaining pixels: */   
  1480. rep_last:   count += remainder;                             
  1481.             /* get vertical half-pixels:*/                  
  1482.             LOAD_AVERAGE(df,a01x,a12,d0);                   
  1483.             AVERAGE(df,a12x,a12,a22);                       
  1484.             /* replicate them: */                           
  1485.             while (count --) {                              
  1486.                 STORE(df,d01,a01x);                         
  1487.                 d01+=BPP(df);                               
  1488.                 STORE(df,d1,a12);                           
  1489.                 d1+=BPP(df);                                
  1490.                 STORE(df,d12,a12x);                         
  1491.                 d12+=BPP(df);                               
  1492.                 STORE(df,d2,a22);                           
  1493.                 d2+=BPP(df);                                
  1494.             }                                               
  1495.         }                                                   
  1496.     }
  1497. /***********************************************************/
  1498. /*
  1499.  * Function names:
  1500.  */
  1501. #define FN(df,sf)               sf##to##df
  1502. #define FN2(df,sf)              sf##to##df##x
  1503. #define DBLROW_FN(df,sf,cc,t)   sf##to##df##_DBLROW_##cc##_##t
  1504. #define DBLROW2X_FN(df,sf,cc,t) sf##to##df##_DBLROW2X_##cc##_##t
  1505. /*
  1506.  * Function replication macros:
  1507.  *  (dblrow- and dblrow2x- converters)
  1508.  */
  1509. #define DBLROW_FUNC(df,sf,cc,t)   
  1510.     static void DBLROW_FN(df,sf,cc,t) (unsigned char *d1, unsigned char *d2,
  1511.         int dest_x, int dest_dx, unsigned char *sy1, unsigned char *sy2,    
  1512.         unsigned char *su, unsigned char *sv, int src_x, int src_dx)        
  1513.         DBLROW_##t(cc,df,d1,d2,dest_x,dest_dx,sy1,sy2,su,sv,src_x,src_dx)
  1514. #define DBLROW2X_FUNC(df,sf,cc,t)   
  1515.     static void DBLROW2X_FN(df,sf,cc,t) (unsigned char *d1, unsigned char *d12,
  1516.         unsigned char *d2, unsigned char *d23, unsigned char *d3,           
  1517.         int dest_x, int dest_dx, unsigned char *sy1, unsigned char *sy2,    
  1518.         unsigned char *su, unsigned char *sv, int src_x, int src_dx)        
  1519.         DBLROW2X_##t(cc,df,d1,d12,d2,d23,d3,dest_x,dest_dx,sy1,sy2,su,sv,src_x,src_dx)
  1520. /***********************************************************/
  1521. /*
  1522.  * Actual double-row functions:
  1523.  */
  1524. DBLROW_FUNC(RGB32,  I420 ,FAST, SHRINK)
  1525. DBLROW_FUNC(RGB32,  I420 ,FAST, COPY)
  1526. DBLROW_FUNC(RGB32,  I420 ,FAST, STRETCH)
  1527. DBLROW_FUNC(RGB32,  I420 ,FAST, STRETCH2X)
  1528. DBLROW_FUNC(RGB32,  I420 ,FAST, STRETCH2XPLUS)
  1529. DBLROW_FUNC(BGR32,  I420 ,FAST, SHRINK)
  1530. DBLROW_FUNC(BGR32,  I420 ,FAST, COPY)
  1531. DBLROW_FUNC(BGR32,  I420 ,FAST, STRETCH)
  1532. DBLROW_FUNC(BGR32,  I420 ,FAST, STRETCH2X)
  1533. DBLROW_FUNC(BGR32,  I420 ,FAST, STRETCH2XPLUS)
  1534. DBLROW_FUNC(RGB24,  I420 ,FAST, SHRINK)
  1535. DBLROW_FUNC(RGB24,  I420 ,FAST, COPY)
  1536. DBLROW_FUNC(RGB24,  I420 ,FAST, STRETCH)
  1537. DBLROW_FUNC(RGB24,  I420 ,FAST, STRETCH2X)
  1538. DBLROW_FUNC(RGB24,  I420 ,FAST, STRETCH2XPLUS)
  1539. DBLROW_FUNC(RGB565, I420 ,FAST, SHRINK)
  1540. DBLROW_FUNC(RGB565, I420 ,FAST, COPY)
  1541. DBLROW_FUNC(RGB565, I420 ,FAST, STRETCH)
  1542. DBLROW_FUNC(RGB565, I420 ,FAST, STRETCH2X)
  1543. DBLROW_FUNC(RGB565, I420 ,FAST, STRETCH2XPLUS)
  1544. DBLROW_FUNC(RGB555, I420 ,FAST, SHRINK)
  1545. DBLROW_FUNC(RGB555, I420 ,FAST, COPY)
  1546. DBLROW_FUNC(RGB555, I420 ,FAST, STRETCH)
  1547. DBLROW_FUNC(RGB555, I420 ,FAST, STRETCH2X)
  1548. DBLROW_FUNC(RGB555, I420 ,FAST, STRETCH2XPLUS)
  1549. DBLROW_FUNC(RGB444, I420 ,FAST, SHRINK)
  1550. DBLROW_FUNC(RGB444, I420 ,FAST, COPY)
  1551. DBLROW_FUNC(RGB444, I420 ,FAST, STRETCH)
  1552. DBLROW_FUNC(RGB444, I420 ,FAST, STRETCH2X)
  1553. DBLROW_FUNC(RGB444, I420 ,FAST, STRETCH2XPLUS)
  1554. DBLROW_FUNC(RGB8,   I420 ,FAST, SHRINK)
  1555. DBLROW_FUNC(RGB8,   I420 ,FAST, COPY)
  1556. DBLROW_FUNC(RGB8,   I420 ,FAST, STRETCH)
  1557. DBLROW_FUNC(RGB8,   I420 ,FAST, STRETCH2X)
  1558. DBLROW_FUNC(RGB8,   I420 ,FAST, STRETCH2XPLUS)
  1559. /* converters with hue correction: */
  1560. DBLROW_FUNC(RGB32,  I420 ,FULL, SHRINK)
  1561. DBLROW_FUNC(RGB32,  I420 ,FULL, COPY)
  1562. DBLROW_FUNC(RGB32,  I420 ,FULL, STRETCH)
  1563. DBLROW_FUNC(RGB32,  I420 ,FULL, STRETCH2X)
  1564. DBLROW_FUNC(RGB32,  I420 ,FULL, STRETCH2XPLUS)
  1565. DBLROW_FUNC(BGR32,  I420 ,FULL, SHRINK)
  1566. DBLROW_FUNC(BGR32,  I420 ,FULL, COPY)
  1567. DBLROW_FUNC(BGR32,  I420 ,FULL, STRETCH)
  1568. DBLROW_FUNC(BGR32,  I420 ,FULL, STRETCH2X)
  1569. DBLROW_FUNC(BGR32,  I420 ,FULL, STRETCH2XPLUS)
  1570. DBLROW_FUNC(RGB24,  I420 ,FULL, SHRINK)
  1571. DBLROW_FUNC(RGB24,  I420 ,FULL, COPY)
  1572. DBLROW_FUNC(RGB24,  I420 ,FULL, STRETCH)
  1573. DBLROW_FUNC(RGB24,  I420 ,FULL, STRETCH2X)
  1574. DBLROW_FUNC(RGB24,  I420 ,FULL, STRETCH2XPLUS)
  1575. DBLROW_FUNC(RGB565, I420 ,FULL, SHRINK)
  1576. DBLROW_FUNC(RGB565, I420 ,FULL, COPY)
  1577. DBLROW_FUNC(RGB565, I420 ,FULL, STRETCH)
  1578. DBLROW_FUNC(RGB565, I420 ,FULL, STRETCH2X)
  1579. DBLROW_FUNC(RGB565, I420 ,FULL, STRETCH2XPLUS)
  1580. DBLROW_FUNC(RGB555, I420 ,FULL, SHRINK)
  1581. DBLROW_FUNC(RGB555, I420 ,FULL, COPY)
  1582. DBLROW_FUNC(RGB555, I420 ,FULL, STRETCH)
  1583. DBLROW_FUNC(RGB555, I420 ,FULL, STRETCH2X)
  1584. DBLROW_FUNC(RGB555, I420 ,FULL, STRETCH2XPLUS)
  1585. DBLROW_FUNC(RGB444, I420 ,FULL, SHRINK)
  1586. DBLROW_FUNC(RGB444, I420 ,FULL, COPY)
  1587. DBLROW_FUNC(RGB444, I420 ,FULL, STRETCH)
  1588. DBLROW_FUNC(RGB444, I420 ,FULL, STRETCH2X)
  1589. DBLROW_FUNC(RGB444, I420 ,FULL, STRETCH2XPLUS)
  1590. DBLROW_FUNC(RGB8,   I420 ,FULL, SHRINK)
  1591. DBLROW_FUNC(RGB8,   I420 ,FULL, COPY)
  1592. DBLROW_FUNC(RGB8,   I420 ,FULL, STRETCH)
  1593. DBLROW_FUNC(RGB8,   I420 ,FULL, STRETCH2X)
  1594. DBLROW_FUNC(RGB8,   I420 ,FULL, STRETCH2XPLUS)
  1595. /*
  1596.  * Actual double-row 2x functions:
  1597.  */
  1598. DBLROW2X_FUNC(RGB32,  I420 ,FAST, SHRINK)
  1599. DBLROW2X_FUNC(RGB32,  I420 ,FAST, COPY)
  1600. DBLROW2X_FUNC(RGB32,  I420 ,FAST, STRETCH)
  1601. DBLROW2X_FUNC(RGB32,  I420 ,FAST, STRETCH2X)
  1602. DBLROW2X_FUNC(RGB32,  I420 ,FAST, STRETCH2XPLUS)
  1603. DBLROW2X_FUNC(BGR32,  I420 ,FAST, SHRINK)
  1604. DBLROW2X_FUNC(BGR32,  I420 ,FAST, COPY)
  1605. DBLROW2X_FUNC(BGR32,  I420 ,FAST, STRETCH)
  1606. DBLROW2X_FUNC(BGR32,  I420 ,FAST, STRETCH2X)
  1607. DBLROW2X_FUNC(BGR32,  I420 ,FAST, STRETCH2XPLUS)
  1608. DBLROW2X_FUNC(RGB24,  I420 ,FAST, SHRINK)
  1609. DBLROW2X_FUNC(RGB24,  I420 ,FAST, COPY)
  1610. DBLROW2X_FUNC(RGB24,  I420 ,FAST, STRETCH)
  1611. DBLROW2X_FUNC(RGB24,  I420 ,FAST, STRETCH2X)
  1612. DBLROW2X_FUNC(RGB24,  I420 ,FAST, STRETCH2XPLUS)
  1613. DBLROW2X_FUNC(RGB565, I420 ,FAST, SHRINK)
  1614. DBLROW2X_FUNC(RGB565, I420 ,FAST, COPY)
  1615. DBLROW2X_FUNC(RGB565, I420 ,FAST, STRETCH)
  1616. DBLROW2X_FUNC(RGB565, I420 ,FAST, STRETCH2X)
  1617. DBLROW2X_FUNC(RGB565, I420 ,FAST, STRETCH2XPLUS)
  1618. DBLROW2X_FUNC(RGB555, I420 ,FAST, SHRINK)
  1619. DBLROW2X_FUNC(RGB555, I420 ,FAST, COPY)
  1620. DBLROW2X_FUNC(RGB555, I420 ,FAST, STRETCH)
  1621. DBLROW2X_FUNC(RGB555, I420 ,FAST, STRETCH2X)
  1622. DBLROW2X_FUNC(RGB555, I420 ,FAST, STRETCH2XPLUS)
  1623. DBLROW2X_FUNC(RGB444, I420 ,FAST, SHRINK)
  1624. DBLROW2X_FUNC(RGB444, I420 ,FAST, COPY)
  1625. DBLROW2X_FUNC(RGB444, I420 ,FAST, STRETCH)
  1626. DBLROW2X_FUNC(RGB444, I420 ,FAST, STRETCH2X)
  1627. DBLROW2X_FUNC(RGB444, I420 ,FAST, STRETCH2XPLUS)
  1628. DBLROW2X_FUNC(RGB8,   I420 ,FAST, SHRINK)
  1629. DBLROW2X_FUNC(RGB8,   I420 ,FAST, COPY)
  1630. DBLROW2X_FUNC(RGB8,   I420 ,FAST, STRETCH)
  1631. DBLROW2X_FUNC(RGB8,   I420 ,FAST, STRETCH2X)
  1632. DBLROW2X_FUNC(RGB8,   I420 ,FAST, STRETCH2XPLUS)
  1633. /* converters with hue correction: */
  1634. DBLROW2X_FUNC(RGB32,  I420 ,FULL, SHRINK)
  1635. DBLROW2X_FUNC(RGB32,  I420 ,FULL, COPY)
  1636. DBLROW2X_FUNC(RGB32,  I420 ,FULL, STRETCH)
  1637. DBLROW2X_FUNC(RGB32,  I420 ,FULL, STRETCH2X)
  1638. DBLROW2X_FUNC(RGB32,  I420 ,FULL, STRETCH2XPLUS)
  1639. DBLROW2X_FUNC(BGR32,  I420 ,FULL, SHRINK)
  1640. DBLROW2X_FUNC(BGR32,  I420 ,FULL, COPY)
  1641. DBLROW2X_FUNC(BGR32,  I420 ,FULL, STRETCH)
  1642. DBLROW2X_FUNC(BGR32,  I420 ,FULL, STRETCH2X)
  1643. DBLROW2X_FUNC(BGR32,  I420 ,FULL, STRETCH2XPLUS)
  1644. DBLROW2X_FUNC(RGB24,  I420 ,FULL, SHRINK)
  1645. DBLROW2X_FUNC(RGB24,  I420 ,FULL, COPY)
  1646. DBLROW2X_FUNC(RGB24,  I420 ,FULL, STRETCH)
  1647. DBLROW2X_FUNC(RGB24,  I420 ,FULL, STRETCH2X)
  1648. DBLROW2X_FUNC(RGB24,  I420 ,FULL, STRETCH2XPLUS)
  1649. DBLROW2X_FUNC(RGB565, I420 ,FULL, SHRINK)
  1650. DBLROW2X_FUNC(RGB565, I420 ,FULL, COPY)
  1651. DBLROW2X_FUNC(RGB565, I420 ,FULL, STRETCH)
  1652. DBLROW2X_FUNC(RGB565, I420 ,FULL, STRETCH2X)
  1653. DBLROW2X_FUNC(RGB565, I420 ,FULL, STRETCH2XPLUS)
  1654. DBLROW2X_FUNC(RGB555, I420 ,FULL, SHRINK)
  1655. DBLROW2X_FUNC(RGB555, I420 ,FULL, COPY)
  1656. DBLROW2X_FUNC(RGB555, I420 ,FULL, STRETCH)
  1657. DBLROW2X_FUNC(RGB555, I420 ,FULL, STRETCH2X)
  1658. DBLROW2X_FUNC(RGB555, I420 ,FULL, STRETCH2XPLUS)
  1659. DBLROW2X_FUNC(RGB444, I420 ,FULL, SHRINK)
  1660. DBLROW2X_FUNC(RGB444, I420 ,FULL, COPY)
  1661. DBLROW2X_FUNC(RGB444, I420 ,FULL, STRETCH)
  1662. DBLROW2X_FUNC(RGB444, I420 ,FULL, STRETCH2X)
  1663. DBLROW2X_FUNC(RGB444, I420 ,FULL, STRETCH2XPLUS)
  1664. DBLROW2X_FUNC(RGB8,   I420 ,FULL, SHRINK)
  1665. DBLROW2X_FUNC(RGB8,   I420 ,FULL, COPY)
  1666. DBLROW2X_FUNC(RGB8,   I420 ,FULL, STRETCH)
  1667. DBLROW2X_FUNC(RGB8,   I420 ,FULL, STRETCH2X)
  1668. DBLROW2X_FUNC(RGB8,   I420 ,FULL, STRETCH2XPLUS)
  1669. /*
  1670.  * Double-row scale function selection tables:
  1671.  *  [conversion type][source format][row scale type]
  1672.  */
  1673. static void (* DblRowFuncs [2][RGB_FORMATS][SCALE_FUNCS]) (
  1674.     unsigned char *d1, unsigned char *d2, int dest_x, int dest_dx,
  1675.     unsigned char *sy1, unsigned char *sy2,
  1676.     unsigned char *su, unsigned char *sv, int src_x, int src_dx) =
  1677. {
  1678.     {   {        
  1679. #if defined (HELIX_FEATURE_CC_RGB32out)
  1680.     #if defined (HXCOLOR_SHRINK)
  1681.             DBLROW_FN(RGB32 ,I420 ,FAST, SHRINK),
  1682.     #else   
  1683.             0,
  1684.     #endif //HXCOLOR_SHRINK
  1685.             
  1686.             DBLROW_FN(RGB32 ,I420 ,FAST, COPY),
  1687.             
  1688.     #if defined (HXCOLOR_STRETCH)
  1689.             DBLROW_FN(RGB32 ,I420 ,FAST, STRETCH),
  1690.     #else
  1691.             0,
  1692.     #endif //HXCOLOR_STRETCH
  1693.     #if defined (HXCOLOR_STRETCH2X)
  1694.             DBLROW_FN(RGB32 ,I420 ,FAST, STRETCH2X),
  1695.     #else
  1696.             0,
  1697.     #endif //HXCOLOR_STRETCH2X
  1698.     #if defined (HXCOLOR_STRETCH2XPLUS)
  1699.             DBLROW_FN(RGB32 ,I420 ,FAST, STRETCH2XPLUS)
  1700.     #else
  1701.             0
  1702.     #endif //HXCOLOR_STRETCH2XPLUS
  1703. #else
  1704.     0,
  1705.     0,
  1706.     0,
  1707.     0,
  1708.     0
  1709. #endif //HELIX_FEATURE_CC_RGB32out
  1710.         },{
  1711. #if defined (HELIX_FEATURE_CC_BGR32out)
  1712.     #if defined (HXCOLOR_SHRINK)
  1713.             DBLROW_FN(BGR32 ,I420 ,FAST, SHRINK),
  1714.     #else
  1715.             0,
  1716.     #endif //HXCOLOR_SHRINK
  1717.             
  1718.             DBLROW_FN(BGR32 ,I420 ,FAST, COPY),
  1719.     
  1720.     #if defined (HXCOLOR_STRETCH)
  1721.             DBLROW_FN(BGR32 ,I420 ,FAST, STRETCH),
  1722.     #else
  1723.             0,
  1724.     #endif //HXCOLOR_STRETCH
  1725.     #if defined (HXCOLOR_STRETCH2X)
  1726.             DBLROW_FN(BGR32 ,I420 ,FAST, STRETCH2X),
  1727.     #else
  1728.             0,
  1729.     #endif //HXCOLOR_STRETCH2X
  1730.     #if defined (HXCOLOR_STRETCH2XPLUS)
  1731.             DBLROW_FN(BGR32 ,I420 ,FAST, STRETCH2XPLUS)
  1732.     #else
  1733.             0
  1734.     #endif //HXCOLOR_STRETCH2XPLUS
  1735. #else
  1736.     0,
  1737.     0,
  1738.     0,
  1739.     0,
  1740.     0
  1741. #endif //HELIX_FEATURE_CC_BGR32out
  1742.         },{
  1743. #if defined (HELIX_FEATURE_CC_RGB24out)
  1744.     #if defined (HXCOLOR_SHRINK)
  1745.             DBLROW_FN(RGB24 ,I420 ,FAST, SHRINK),
  1746.     #else   
  1747.             0,
  1748.     #endif //HXCOLOR_SHRINK
  1749.             DBLROW_FN(RGB24 ,I420 ,FAST, COPY),
  1750.     #if defined (HXCOLOR_STRETCH)
  1751.             DBLROW_FN(RGB24 ,I420 ,FAST, STRETCH),
  1752.     #else
  1753.             0,
  1754.     #endif //HXCOLOR_STRETCH
  1755.     #if defined (HXCOLOR_STRETCH2X)
  1756.             DBLROW_FN(RGB24 ,I420 ,FAST, STRETCH2X),
  1757.     #else
  1758.             0,
  1759.     #endif //HXCOLOR_STRETCH2X
  1760.     #if defined (HXCOLOR_STRETCH2XPLUS)
  1761.             DBLROW_FN(RGB24 ,I420 ,FAST, STRETCH2XPLUS)
  1762.     #else
  1763.             0
  1764.     #endif  //HXCOLOR_STRETCH2XPLUS
  1765. #else
  1766.     0,
  1767.     0,
  1768.     0,
  1769.     0,
  1770.     0
  1771. #endif //HELIX_FEATURE_CC_RGB24out
  1772.         },{
  1773. #if defined (HELIX_FEATURE_CC_RGB565out)
  1774.     #if defined (HXCOLOR_SHRINK)
  1775.             DBLROW_FN(RGB565,I420 ,FAST, SHRINK),
  1776.     #else
  1777.             0,
  1778.     #endif //HXCOLOR_SHRINK
  1779.             DBLROW_FN(RGB565,I420 ,FAST, COPY),
  1780.     #if defined (HXCOLOR_STRETCH)
  1781.             DBLROW_FN(RGB565,I420 ,FAST, STRETCH),
  1782.     #else
  1783.             0,
  1784.     #endif //HXCOLOR_STRETCH
  1785.     #if defined (HXCOLOR_STRETCH2X)
  1786.             DBLROW_FN(RGB565,I420 ,FAST, STRETCH2X),
  1787.     #else
  1788.             0,
  1789.     #endif //HXCOLOR_STRETCH2X
  1790.     #if defined (HXCOLOR_STRETCH2XPLUS)
  1791.             DBLROW_FN(RGB565,I420 ,FAST, STRETCH2XPLUS)
  1792.     #else
  1793.             0
  1794.     #endif //HXCOLOR_STRETCH2XPLUS
  1795. #else
  1796.     0,
  1797.     0,
  1798.     0,
  1799.     0,
  1800.     0
  1801. #endif //HELIX_FEATURE_CC_RGB565out
  1802.         },{
  1803. #if defined (HELIX_FEATURE_CC_RGB555out)
  1804.     #if defined (HXCOLOR_SHRINK)
  1805.             DBLROW_FN(RGB555,I420 ,FAST, SHRINK),
  1806.     #else
  1807.             0,
  1808.     #endif //HXCOLOR_SHRINK
  1809.             DBLROW_FN(RGB555,I420 ,FAST, COPY),
  1810.     #if defined (HXCOLOR_STRETCH)
  1811.             DBLROW_FN(RGB555,I420 ,FAST, STRETCH),
  1812.     #else
  1813.             0,
  1814.     #endif //HXCOLOR_STRETCH
  1815.     #if defined (HXCOLOR_STRETCH2X)
  1816.             DBLROW_FN(RGB555,I420 ,FAST, STRETCH2X),
  1817.     #else
  1818.             0,
  1819.     #endif //HXCOLOR_STRETCH2X
  1820.     #if defined (HXCOLOR_STRETCH2XPLUS)            
  1821.             DBLROW_FN(RGB555,I420 ,FAST, STRETCH2XPLUS)
  1822.     #else
  1823.             0
  1824.     #endif //HXCOLOR_STRETCH2XPLUS
  1825. #else
  1826.     0,
  1827.     0,
  1828.     0,
  1829.     0,
  1830.     0
  1831. #endif //HELIX_FEATURE_CC_RGB555out
  1832.         },{
  1833. #if defined (HELIX_FEATURE_CC_RGB444out)
  1834.     #if defined (HXCOLOR_SHRINK)
  1835.             DBLROW_FN(RGB444,I420 ,FAST, SHRINK),
  1836.     #else
  1837.             0,
  1838.     #endif //HXCOLOR_SHRINK
  1839.             DBLROW_FN(RGB444,I420 ,FAST, COPY),
  1840.     #if defined (HXCOLOR_STRETCH)
  1841.             DBLROW_FN(RGB444,I420 ,FAST, STRETCH),
  1842.     #else
  1843.             0,
  1844.     #endif //HXCOLOR_STRETCH
  1845.     #if defined (HXCOLOR_STRETCH2X)
  1846.             DBLROW_FN(RGB444,I420 ,FAST, STRETCH2X),
  1847.     #else
  1848.             0,
  1849.     #endif //HXCOLOR_STRETCH2X
  1850.     #if defined (HXCOLOR_STRETCH2XPLUS)            
  1851.             DBLROW_FN(RGB444,I420 ,FAST, STRETCH2XPLUS)
  1852.     #else
  1853.             0
  1854.     #endif //HXCOLOR_STRETCH2XPLUS
  1855. #else
  1856.     0,
  1857.     0,
  1858.     0,
  1859.     0,
  1860.     0
  1861. #endif //HELIX_FEATURE_CC_RGB444out
  1862.         },{
  1863. #if defined (HELIX_FEATURE_CC_RGB8out)
  1864.     #if defined (HXCOLOR_SHRINK)
  1865.             DBLROW_FN(RGB8  ,I420 ,FAST, SHRINK),
  1866.     #else
  1867.             0,
  1868.     #endif //HXCOLOR_SHRINK
  1869.             DBLROW_FN(RGB8  ,I420 ,FAST, COPY),
  1870.     #if defined (HXCOLOR_STRETCH)
  1871.             DBLROW_FN(RGB8  ,I420 ,FAST, STRETCH),
  1872.     #else
  1873.             0,
  1874.     #endif //HXCOLOR_STRETCH
  1875.     #if defined (HXCOLOR_STRETCH2X)
  1876.             DBLROW_FN(RGB8  ,I420 ,FAST, STRETCH2X),
  1877.     #else
  1878.             0,
  1879.     #endif //HXCOLOR_STRETCH2X
  1880.     #if defined (HXCOLOR_STRETCH2XPLUS) 
  1881.             DBLROW_FN(RGB8  ,I420 ,FAST, STRETCH2XPLUS)
  1882.     #else
  1883.             0
  1884.     #endif//HXCOLOR_STRETCH2XPLUS
  1885. #else
  1886.     0,
  1887.     0,
  1888.     0,
  1889.     0,
  1890.     0
  1891. #endif //HELIX_FEATURE_CC_RGB8out
  1892.         }
  1893.     },{ {
  1894. #if defined _PLUS_HXCOLOR && defined (HELIX_FEATURE_CC_RGB32out)
  1895.     #if defined (HXCOLOR_SHRINK)
  1896.             DBLROW_FN(RGB32 ,I420 ,FULL, SHRINK),
  1897.     #else
  1898.             0,
  1899.     #endif //HXCOLOR_SHRINK
  1900.             DBLROW_FN(RGB32 ,I420 ,FULL, COPY),
  1901.     #if defined (HXCOLOR_STRETCH)
  1902.             DBLROW_FN(RGB32 ,I420 ,FULL, STRETCH),
  1903.     #else
  1904.             0,
  1905.     #endif //HXCOLOR_STRETCH
  1906.     #if defined (HXCOLOR_STRETCH2X)
  1907.             DBLROW_FN(RGB32 ,I420 ,FULL, STRETCH2X),
  1908.     #else
  1909.             0,
  1910.     #endif //HXCOLOR_STRETCH2X
  1911.     #if defined (HXCOLOR_STRETCH2XPLUS)
  1912.             DBLROW_FN(RGB32 ,I420 ,FULL, STRETCH2XPLUS)
  1913.     #else
  1914.             0
  1915.     #endif //HXCOLOR_STRETCH2XPLUS
  1916. #else
  1917.     0,
  1918.     0,
  1919.     0,
  1920.     0,
  1921.     0
  1922. #endif
  1923.         },{
  1924. #if defined _PLUS_HXCOLOR && defined (HELIX_FEATURE_CC_BGR32out)
  1925.     #if defined (HXCOLOR_SHRINK)
  1926.             DBLROW_FN(BGR32 ,I420 ,FULL, SHRINK),
  1927.     #else
  1928.             0,
  1929.     #endif //HXCOLOR_SHRINK
  1930.             
  1931.             DBLROW_FN(BGR32 ,I420 ,FULL, COPY),
  1932.             
  1933.     #if defined (HXCOLOR_STRETCH)
  1934.             DBLROW_FN(BGR32 ,I420 ,FULL, STRETCH),
  1935.     #else
  1936.             0,
  1937.     #endif //HXCOLOR_STRETCH
  1938.     #if defined (HXCOLOR_STRETCH2X)
  1939.             DBLROW_FN(BGR32 ,I420 ,FULL, STRETCH2X),
  1940.     #else
  1941.             0,
  1942.     #endif // HXCOLOR_STRETCH2X
  1943.     #if defined (HXCOLOR_STRETCH2XPLUS)            
  1944.             DBLROW_FN(BGR32 ,I420 ,FULL, STRETCH2XPLUS)
  1945.     #else
  1946.             0
  1947.     #endif //HXCOLOR_STRETCH2XPLUS
  1948. #else
  1949.     0,
  1950.     0,
  1951.     0,
  1952.     0,
  1953.     0
  1954. #endif
  1955.         },{
  1956. #if defined _PLUS_HXCOLOR && defined (HELIX_FEATURE_CC_RGB24out)
  1957.     #if defined (HXCOLOR_SHRINK)
  1958.             DBLROW_FN(RGB24 ,I420 ,FULL, SHRINK),
  1959.     #else
  1960.             0,
  1961.     #endif //HXCOLOR_SHRINK
  1962.             DBLROW_FN(RGB24 ,I420 ,FULL, COPY),
  1963.     #if defined (HXCOLOR_STRETCH)
  1964.             DBLROW_FN(RGB24 ,I420 ,FULL, STRETCH),
  1965.     #else
  1966.             0,
  1967.     #endif //HXCOLOR_STRETCH
  1968.     #if defined (HXCOLOR_STRETCH2X)
  1969.             DBLROW_FN(RGB24 ,I420 ,FULL, STRETCH2X),
  1970.     #else
  1971.             0,
  1972.     #endif //HXCOLOR_STRETCH2X
  1973.                  
  1974.     #if defined (HXCOLOR_STRETCH2XPLUS)
  1975.             DBLROW_FN(RGB24 ,I420 ,FULL, STRETCH2XPLUS)
  1976.     #else
  1977.             0
  1978.     #endif //HXCOLOR_STRETCH2XPLUS
  1979.             
  1980. #else
  1981.     0,
  1982.     0,
  1983.     0,
  1984.     0,
  1985.     0
  1986. #endif
  1987.         },{
  1988. #if defined _PLUS_HXCOLOR && defined (HELIX_FEATURE_CC_RGB565out)
  1989.     #if defined (HXCOLOR_SHRINK)
  1990.             DBLROW_FN(RGB565,I420 ,FULL, SHRINK),
  1991.     #else
  1992.             0,
  1993.     #endif //HXCOLOR_SHRINK
  1994.             DBLROW_FN(RGB565,I420 ,FULL, COPY),
  1995.     
  1996.     #if defined (HXCOLOR_STRETCH)
  1997.             DBLROW_FN(RGB565,I420 ,FULL, STRETCH),
  1998.     #else
  1999.             0,
  2000.     #endif //HXCOLOR_STRETCH
  2001.