dct.c 34 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820
  1. /*****************************************************************************
  2. * dct.c: ppc transform and zigzag
  3. *****************************************************************************
  4. * Copyright (C) 2003-2018 x264 project
  5. *
  6. * Authors: Guillaume Poirier <gpoirier@mplayerhq.hu>
  7. * Eric Petit <eric.petit@lapsus.org>
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License as published by
  11. * the Free Software Foundation; either version 2 of the License, or
  12. * (at your option) any later version.
  13. *
  14. * This program is distributed in the hope that it will be useful,
  15. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  16. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  17. * GNU General Public License for more details.
  18. *
  19. * You should have received a copy of the GNU General Public License
  20. * along with this program; if not, write to the Free Software
  21. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111, USA.
  22. *
  23. * This program is also available under a commercial proprietary license.
  24. * For more information, contact us at licensing@x264.com.
  25. *****************************************************************************/
  26. #include "common/common.h"
  27. #include "ppccommon.h"
  28. #include "dct.h"
  29. #if !HIGH_BIT_DEPTH
  30. #define VEC_DCT(a0,a1,a2,a3,b0,b1,b2,b3) \
  31. b1 = vec_add( a0, a3 ); \
  32. b3 = vec_add( a1, a2 ); \
  33. b0 = vec_add( b1, b3 ); \
  34. b2 = vec_sub( b1, b3 ); \
  35. a0 = vec_sub( a0, a3 ); \
  36. a1 = vec_sub( a1, a2 ); \
  37. b1 = vec_add( a0, a0 ); \
  38. b1 = vec_add( b1, a1 ); \
  39. b3 = vec_sub( a0, a1 ); \
  40. b3 = vec_sub( b3, a1 )
  41. void x264_sub4x4_dct_altivec( int16_t dct[16], uint8_t *pix1, uint8_t *pix2 )
  42. {
  43. PREP_DIFF_8BYTEALIGNED;
  44. vec_s16_t dct0v, dct1v, dct2v, dct3v;
  45. vec_s16_t tmp0v, tmp1v, tmp2v, tmp3v;
  46. vec_u8_t permHighv;
  47. VEC_DIFF_H_8BYTE_ALIGNED( pix1, FENC_STRIDE, pix2, FDEC_STRIDE, 4, dct0v );
  48. VEC_DIFF_H_8BYTE_ALIGNED( pix1, FENC_STRIDE, pix2, FDEC_STRIDE, 4, dct1v );
  49. VEC_DIFF_H_8BYTE_ALIGNED( pix1, FENC_STRIDE, pix2, FDEC_STRIDE, 4, dct2v );
  50. VEC_DIFF_H_8BYTE_ALIGNED( pix1, FENC_STRIDE, pix2, FDEC_STRIDE, 4, dct3v );
  51. VEC_DCT( dct0v, dct1v, dct2v, dct3v, tmp0v, tmp1v, tmp2v, tmp3v );
  52. VEC_TRANSPOSE_4( tmp0v, tmp1v, tmp2v, tmp3v,
  53. dct0v, dct1v, dct2v, dct3v );
  54. permHighv = (vec_u8_t) CV(0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17);
  55. VEC_DCT( dct0v, dct1v, dct2v, dct3v, tmp0v, tmp1v, tmp2v, tmp3v );
  56. vec_st(vec_perm(tmp0v, tmp1v, permHighv), 0, dct);
  57. vec_st(vec_perm(tmp2v, tmp3v, permHighv), 16, dct);
  58. }
  59. void x264_sub8x8_dct_altivec( int16_t dct[4][16], uint8_t *pix1, uint8_t *pix2 )
  60. {
  61. PREP_DIFF_8BYTEALIGNED;
  62. vec_s16_t dct0v, dct1v, dct2v, dct3v, dct4v, dct5v, dct6v, dct7v;
  63. vec_s16_t tmp0v, tmp1v, tmp2v, tmp3v, tmp4v, tmp5v, tmp6v, tmp7v;
  64. vec_u8_t permHighv, permLowv;
  65. VEC_DIFF_H_8BYTE_ALIGNED( pix1, FENC_STRIDE, pix2, FDEC_STRIDE, 8, dct0v );
  66. VEC_DIFF_H_8BYTE_ALIGNED( pix1, FENC_STRIDE, pix2, FDEC_STRIDE, 8, dct1v );
  67. VEC_DIFF_H_8BYTE_ALIGNED( pix1, FENC_STRIDE, pix2, FDEC_STRIDE, 8, dct2v );
  68. VEC_DIFF_H_8BYTE_ALIGNED( pix1, FENC_STRIDE, pix2, FDEC_STRIDE, 8, dct3v );
  69. VEC_DIFF_H_8BYTE_ALIGNED( pix1, FENC_STRIDE, pix2, FDEC_STRIDE, 8, dct4v );
  70. VEC_DIFF_H_8BYTE_ALIGNED( pix1, FENC_STRIDE, pix2, FDEC_STRIDE, 8, dct5v );
  71. VEC_DIFF_H_8BYTE_ALIGNED( pix1, FENC_STRIDE, pix2, FDEC_STRIDE, 8, dct6v );
  72. VEC_DIFF_H_8BYTE_ALIGNED( pix1, FENC_STRIDE, pix2, FDEC_STRIDE, 8, dct7v );
  73. VEC_DCT( dct0v, dct1v, dct2v, dct3v, tmp0v, tmp1v, tmp2v, tmp3v );
  74. VEC_DCT( dct4v, dct5v, dct6v, dct7v, tmp4v, tmp5v, tmp6v, tmp7v );
  75. VEC_TRANSPOSE_8( tmp0v, tmp1v, tmp2v, tmp3v,
  76. tmp4v, tmp5v, tmp6v, tmp7v,
  77. dct0v, dct1v, dct2v, dct3v,
  78. dct4v, dct5v, dct6v, dct7v );
  79. permHighv = (vec_u8_t) CV(0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17);
  80. permLowv = (vec_u8_t) CV(0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F);
  81. VEC_DCT( dct0v, dct1v, dct2v, dct3v, tmp0v, tmp1v, tmp2v, tmp3v );
  82. VEC_DCT( dct4v, dct5v, dct6v, dct7v, tmp4v, tmp5v, tmp6v, tmp7v );
  83. vec_st(vec_perm(tmp0v, tmp1v, permHighv), 0, *dct);
  84. vec_st(vec_perm(tmp2v, tmp3v, permHighv), 16, *dct);
  85. vec_st(vec_perm(tmp4v, tmp5v, permHighv), 32, *dct);
  86. vec_st(vec_perm(tmp6v, tmp7v, permHighv), 48, *dct);
  87. vec_st(vec_perm(tmp0v, tmp1v, permLowv), 64, *dct);
  88. vec_st(vec_perm(tmp2v, tmp3v, permLowv), 80, *dct);
  89. vec_st(vec_perm(tmp4v, tmp5v, permLowv), 96, *dct);
  90. vec_st(vec_perm(tmp6v, tmp7v, permLowv), 112, *dct);
  91. }
  92. void x264_sub16x16_dct_altivec( int16_t dct[16][16], uint8_t *pix1, uint8_t *pix2 )
  93. {
  94. x264_sub8x8_dct_altivec( &dct[ 0], &pix1[0], &pix2[0] );
  95. x264_sub8x8_dct_altivec( &dct[ 4], &pix1[8], &pix2[8] );
  96. x264_sub8x8_dct_altivec( &dct[ 8], &pix1[8*FENC_STRIDE+0], &pix2[8*FDEC_STRIDE+0] );
  97. x264_sub8x8_dct_altivec( &dct[12], &pix1[8*FENC_STRIDE+8], &pix2[8*FDEC_STRIDE+8] );
  98. }
  99. /***************************************************************************
  100. * 8x8 transform:
  101. ***************************************************************************/
  102. static void pix_diff( uint8_t *p1, uint8_t *p2, vec_s16_t *diff, int i )
  103. {
  104. vec_s16_t pix1v, pix2v, tmp[4];
  105. vec_u8_t pix1v8, pix2v8;
  106. LOAD_ZERO;
  107. for( int j = 0; j < 4; j++ )
  108. {
  109. pix1v8 = vec_vsx_ld( 0, p1 );
  110. pix2v8 = vec_vsx_ld( 0, p2 );
  111. pix1v = vec_u8_to_s16_h( pix1v8 );
  112. pix2v = vec_u8_to_s16_h( pix2v8 );
  113. tmp[j] = vec_sub( pix1v, pix2v );
  114. p1 += FENC_STRIDE;
  115. p2 += FDEC_STRIDE;
  116. }
  117. diff[i] = vec_add( tmp[0], tmp[1] );
  118. diff[i] = vec_add( diff[i], tmp[2] );
  119. diff[i] = vec_add( diff[i], tmp[3] );
  120. }
  121. void x264_sub8x8_dct_dc_altivec( int16_t dct[4], uint8_t *pix1, uint8_t *pix2 )
  122. {
  123. vec_s16_t diff[2], tmp;
  124. vec_s32_t sum[2];
  125. vec_s32_t zero32 = vec_splat_s32(0);
  126. vec_u8_t mask = { 0x00, 0x01, 0x00, 0x01, 0x04, 0x05, 0x04, 0x05,
  127. 0x02, 0x03, 0x02, 0x03, 0x06, 0x07, 0x06, 0x07 };
  128. pix_diff( &pix1[0], &pix2[0], diff, 0 );
  129. pix_diff( &pix1[4*FENC_STRIDE], &pix2[4*FDEC_STRIDE], diff, 1 );
  130. sum[0] = vec_sum4s( diff[0], zero32 );
  131. sum[1] = vec_sum4s( diff[1], zero32 );
  132. diff[0] = vec_packs( sum[0], sum[1] );
  133. sum[0] = vec_sum4s( diff[0], zero32 );
  134. diff[0] = vec_packs( sum[0], zero32 );
  135. diff[0] = vec_perm( diff[0], diff[0], mask ); // 0 0 2 2 1 1 3 3
  136. tmp = xxpermdi( diff[0], diff[0], 2 ); // 1 1 3 3 0 0 2 2
  137. diff[1] = vec_add( diff[0], tmp ); // 0+1 0+1 2+3 2+3
  138. diff[0] = vec_sub( diff[0], tmp ); // 0-1 0-1 2-3 2-3
  139. tmp = vec_mergeh( diff[1], diff[0] ); // 0+1 0-1 0+1 0-1 2+3 2-3 2+3 2-3
  140. diff[0] = xxpermdi( tmp, tmp, 2 ); // 2+3 2-3 2+3 2-3
  141. diff[1] = vec_add( tmp, diff[0] ); // 0+1+2+3 0-1+2+3
  142. diff[0] = vec_sub( tmp, diff[0] ); // 0+1-2-3 0-1-2+3
  143. diff[0] = vec_mergeh( diff[1], diff[0] );
  144. diff[1] = vec_ld( 0, dct );
  145. diff[0] = xxpermdi( diff[0], diff[1], 0 );
  146. vec_st( diff[0], 0, dct );
  147. }
  148. /* DCT8_1D unrolled by 8 in Altivec */
  149. #define DCT8_1D_ALTIVEC( dct0v, dct1v, dct2v, dct3v, dct4v, dct5v, dct6v, dct7v ) \
  150. { \
  151. /* int s07 = SRC(0) + SRC(7); */ \
  152. vec_s16_t s07v = vec_add( dct0v, dct7v); \
  153. /* int s16 = SRC(1) + SRC(6); */ \
  154. vec_s16_t s16v = vec_add( dct1v, dct6v); \
  155. /* int s25 = SRC(2) + SRC(5); */ \
  156. vec_s16_t s25v = vec_add( dct2v, dct5v); \
  157. /* int s34 = SRC(3) + SRC(4); */ \
  158. vec_s16_t s34v = vec_add( dct3v, dct4v); \
  159. \
  160. /* int a0 = s07 + s34; */ \
  161. vec_s16_t a0v = vec_add(s07v, s34v); \
  162. /* int a1 = s16 + s25; */ \
  163. vec_s16_t a1v = vec_add(s16v, s25v); \
  164. /* int a2 = s07 - s34; */ \
  165. vec_s16_t a2v = vec_sub(s07v, s34v); \
  166. /* int a3 = s16 - s25; */ \
  167. vec_s16_t a3v = vec_sub(s16v, s25v); \
  168. \
  169. /* int d07 = SRC(0) - SRC(7); */ \
  170. vec_s16_t d07v = vec_sub( dct0v, dct7v); \
  171. /* int d16 = SRC(1) - SRC(6); */ \
  172. vec_s16_t d16v = vec_sub( dct1v, dct6v); \
  173. /* int d25 = SRC(2) - SRC(5); */ \
  174. vec_s16_t d25v = vec_sub( dct2v, dct5v); \
  175. /* int d34 = SRC(3) - SRC(4); */ \
  176. vec_s16_t d34v = vec_sub( dct3v, dct4v); \
  177. \
  178. /* int a4 = d16 + d25 + (d07 + (d07>>1)); */ \
  179. vec_s16_t a4v = vec_add( vec_add(d16v, d25v), vec_add(d07v, vec_sra(d07v, onev)) );\
  180. /* int a5 = d07 - d34 - (d25 + (d25>>1)); */ \
  181. vec_s16_t a5v = vec_sub( vec_sub(d07v, d34v), vec_add(d25v, vec_sra(d25v, onev)) );\
  182. /* int a6 = d07 + d34 - (d16 + (d16>>1)); */ \
  183. vec_s16_t a6v = vec_sub( vec_add(d07v, d34v), vec_add(d16v, vec_sra(d16v, onev)) );\
  184. /* int a7 = d16 - d25 + (d34 + (d34>>1)); */ \
  185. vec_s16_t a7v = vec_add( vec_sub(d16v, d25v), vec_add(d34v, vec_sra(d34v, onev)) );\
  186. \
  187. /* DST(0) = a0 + a1; */ \
  188. dct0v = vec_add( a0v, a1v ); \
  189. /* DST(1) = a4 + (a7>>2); */ \
  190. dct1v = vec_add( a4v, vec_sra(a7v, twov) ); \
  191. /* DST(2) = a2 + (a3>>1); */ \
  192. dct2v = vec_add( a2v, vec_sra(a3v, onev) ); \
  193. /* DST(3) = a5 + (a6>>2); */ \
  194. dct3v = vec_add( a5v, vec_sra(a6v, twov) ); \
  195. /* DST(4) = a0 - a1; */ \
  196. dct4v = vec_sub( a0v, a1v ); \
  197. /* DST(5) = a6 - (a5>>2); */ \
  198. dct5v = vec_sub( a6v, vec_sra(a5v, twov) ); \
  199. /* DST(6) = (a2>>1) - a3 ; */ \
  200. dct6v = vec_sub( vec_sra(a2v, onev), a3v ); \
  201. /* DST(7) = (a4>>2) - a7 ; */ \
  202. dct7v = vec_sub( vec_sra(a4v, twov), a7v ); \
  203. }
  204. void x264_sub8x8_dct8_altivec( int16_t dct[64], uint8_t *pix1, uint8_t *pix2 )
  205. {
  206. vec_u16_t onev = vec_splat_u16(1);
  207. vec_u16_t twov = vec_add( onev, onev );
  208. PREP_DIFF_8BYTEALIGNED;
  209. vec_s16_t dct0v, dct1v, dct2v, dct3v,
  210. dct4v, dct5v, dct6v, dct7v;
  211. VEC_DIFF_H_8BYTE_ALIGNED( pix1, FENC_STRIDE, pix2, FDEC_STRIDE, 8, dct0v );
  212. VEC_DIFF_H_8BYTE_ALIGNED( pix1, FENC_STRIDE, pix2, FDEC_STRIDE, 8, dct1v );
  213. VEC_DIFF_H_8BYTE_ALIGNED( pix1, FENC_STRIDE, pix2, FDEC_STRIDE, 8, dct2v );
  214. VEC_DIFF_H_8BYTE_ALIGNED( pix1, FENC_STRIDE, pix2, FDEC_STRIDE, 8, dct3v );
  215. VEC_DIFF_H_8BYTE_ALIGNED( pix1, FENC_STRIDE, pix2, FDEC_STRIDE, 8, dct4v );
  216. VEC_DIFF_H_8BYTE_ALIGNED( pix1, FENC_STRIDE, pix2, FDEC_STRIDE, 8, dct5v );
  217. VEC_DIFF_H_8BYTE_ALIGNED( pix1, FENC_STRIDE, pix2, FDEC_STRIDE, 8, dct6v );
  218. VEC_DIFF_H_8BYTE_ALIGNED( pix1, FENC_STRIDE, pix2, FDEC_STRIDE, 8, dct7v );
  219. DCT8_1D_ALTIVEC( dct0v, dct1v, dct2v, dct3v,
  220. dct4v, dct5v, dct6v, dct7v );
  221. vec_s16_t dct_tr0v, dct_tr1v, dct_tr2v, dct_tr3v,
  222. dct_tr4v, dct_tr5v, dct_tr6v, dct_tr7v;
  223. VEC_TRANSPOSE_8(dct0v, dct1v, dct2v, dct3v,
  224. dct4v, dct5v, dct6v, dct7v,
  225. dct_tr0v, dct_tr1v, dct_tr2v, dct_tr3v,
  226. dct_tr4v, dct_tr5v, dct_tr6v, dct_tr7v );
  227. DCT8_1D_ALTIVEC( dct_tr0v, dct_tr1v, dct_tr2v, dct_tr3v,
  228. dct_tr4v, dct_tr5v, dct_tr6v, dct_tr7v );
  229. vec_st( dct_tr0v, 0, dct );
  230. vec_st( dct_tr1v, 16, dct );
  231. vec_st( dct_tr2v, 32, dct );
  232. vec_st( dct_tr3v, 48, dct );
  233. vec_st( dct_tr4v, 64, dct );
  234. vec_st( dct_tr5v, 80, dct );
  235. vec_st( dct_tr6v, 96, dct );
  236. vec_st( dct_tr7v, 112, dct );
  237. }
  238. void x264_sub16x16_dct8_altivec( int16_t dct[4][64], uint8_t *pix1, uint8_t *pix2 )
  239. {
  240. x264_sub8x8_dct8_altivec( dct[0], &pix1[0], &pix2[0] );
  241. x264_sub8x8_dct8_altivec( dct[1], &pix1[8], &pix2[8] );
  242. x264_sub8x8_dct8_altivec( dct[2], &pix1[8*FENC_STRIDE+0], &pix2[8*FDEC_STRIDE+0] );
  243. x264_sub8x8_dct8_altivec( dct[3], &pix1[8*FENC_STRIDE+8], &pix2[8*FDEC_STRIDE+8] );
  244. }
  245. /****************************************************************************
  246. * IDCT transform:
  247. ****************************************************************************/
  248. #define ALTIVEC_STORE8_DC_SUM_CLIP(dest, dcv) \
  249. { \
  250. /* unaligned load */ \
  251. vec_u8_t dstv = vec_vsx_ld( 0, dest ); \
  252. vec_s16_t dcvsum = vec_adds( dcv, vec_u8_to_s16_h( dstv ) ); \
  253. vec_u8_t dcvsum8 = vec_packsu( dcvsum, vec_u8_to_s16_l( dstv ) ); \
  254. /* unaligned store */ \
  255. vec_vsx_st( dcvsum8, 0, dest ); \
  256. }
  257. void x264_add8x8_idct_dc_altivec( uint8_t *p_dst, int16_t dct[4] )
  258. {
  259. vec_s16_t dcv0, dcv1;
  260. vec_s16_t v32 = vec_sl( vec_splat_s16( 8 ), vec_splat_u16( 2 ) );
  261. vec_u16_t v6 = vec_splat_u16( 6 );
  262. vec_s16_t dctv = vec_ld( 0, dct );
  263. vec_u8_t dstv0, dstv1, dstv2, dstv3, dstv4, dstv5, dstv6, dstv7;
  264. vec_s16_t dcvsum0, dcvsum1, dcvsum2, dcvsum3, dcvsum4, dcvsum5, dcvsum6, dcvsum7;
  265. vec_u8_t dcvsum8_0, dcvsum8_1, dcvsum8_2, dcvsum8_3, dcvsum8_4, dcvsum8_5, dcvsum8_6, dcvsum8_7;
  266. LOAD_ZERO;
  267. dctv = vec_sra( vec_add( dctv, v32 ), v6 );
  268. dcv1 = (vec_s16_t)vec_mergeh( dctv, dctv );
  269. dcv0 = (vec_s16_t)vec_mergeh( (vec_s32_t)dcv1, (vec_s32_t)dcv1 );
  270. dcv1 = (vec_s16_t)vec_mergel( (vec_s32_t)dcv1, (vec_s32_t)dcv1 );
  271. dstv0 = vec_vsx_ld( 0, p_dst );
  272. dstv4 = vec_vsx_ld( 0, p_dst + 4*FDEC_STRIDE );
  273. dstv1 = vec_vsx_ld( 0, p_dst + 1*FDEC_STRIDE );
  274. dstv5 = vec_vsx_ld( 0, p_dst + 4*FDEC_STRIDE + 1*FDEC_STRIDE );
  275. dstv2 = vec_vsx_ld( 0, p_dst + 2*FDEC_STRIDE);
  276. dstv6 = vec_vsx_ld( 0, p_dst + 4*FDEC_STRIDE + 2*FDEC_STRIDE );
  277. dstv3 = vec_vsx_ld( 0, p_dst + 3*FDEC_STRIDE);
  278. dstv7 = vec_vsx_ld( 0, p_dst + 4*FDEC_STRIDE + 3*FDEC_STRIDE );
  279. vec_s16_t s0 = vec_u8_to_s16_h( dstv0 );
  280. vec_s16_t s1 = vec_u8_to_s16_h( dstv4 );
  281. vec_s16_t s2 = vec_u8_to_s16_h( dstv1 );
  282. vec_s16_t s3 = vec_u8_to_s16_h( dstv5 );
  283. vec_s16_t s4 = vec_u8_to_s16_h( dstv2 );
  284. vec_s16_t s5 = vec_u8_to_s16_h( dstv6 );
  285. vec_s16_t s6 = vec_u8_to_s16_h( dstv3 );
  286. vec_s16_t s7 = vec_u8_to_s16_h( dstv7 );
  287. dcvsum0 = vec_adds( dcv0, s0 );
  288. dcvsum4 = vec_adds( dcv1, s1 );
  289. dcvsum1 = vec_adds( dcv0, s2 );
  290. dcvsum5 = vec_adds( dcv1, s3 );
  291. dcvsum2 = vec_adds( dcv0, s4 );
  292. dcvsum6 = vec_adds( dcv1, s5 );
  293. dcvsum3 = vec_adds( dcv0, s6 );
  294. dcvsum7 = vec_adds( dcv1, s7 );
  295. dcvsum8_0 = vec_packsu( dcvsum0, vec_u8_to_s16_l( dstv0 ) );
  296. dcvsum8_1 = vec_packsu( dcvsum1, vec_u8_to_s16_l( dstv1 ) );
  297. dcvsum8_2 = vec_packsu( dcvsum2, vec_u8_to_s16_l( dstv2 ) );
  298. dcvsum8_3 = vec_packsu( dcvsum3, vec_u8_to_s16_l( dstv3 ) );
  299. dcvsum8_4 = vec_packsu( dcvsum4, vec_u8_to_s16_l( dstv4 ) );
  300. dcvsum8_5 = vec_packsu( dcvsum5, vec_u8_to_s16_l( dstv5 ) );
  301. dcvsum8_6 = vec_packsu( dcvsum6, vec_u8_to_s16_l( dstv6 ) );
  302. dcvsum8_7 = vec_packsu( dcvsum7, vec_u8_to_s16_l( dstv7 ) );
  303. vec_vsx_st( dcvsum8_0, 0, p_dst );
  304. vec_vsx_st( dcvsum8_4, 0, p_dst + 4*FDEC_STRIDE );
  305. vec_vsx_st( dcvsum8_1, 0, p_dst + 1*FDEC_STRIDE );
  306. vec_vsx_st( dcvsum8_5, 0, p_dst + 4*FDEC_STRIDE + 1*FDEC_STRIDE );
  307. vec_vsx_st( dcvsum8_2, 0, p_dst + 2*FDEC_STRIDE );
  308. vec_vsx_st( dcvsum8_6, 0, p_dst + 4*FDEC_STRIDE + 2*FDEC_STRIDE );
  309. vec_vsx_st( dcvsum8_3, 0, p_dst + 3*FDEC_STRIDE );
  310. vec_vsx_st( dcvsum8_7, 0, p_dst + 4*FDEC_STRIDE + 3*FDEC_STRIDE );
  311. }
  312. #define LOAD16 \
  313. dstv0 = vec_ld( 0, p_dst ); \
  314. dstv1 = vec_ld( 0, p_dst + 1*FDEC_STRIDE ); \
  315. dstv2 = vec_ld( 0, p_dst + 2*FDEC_STRIDE ); \
  316. dstv3 = vec_ld( 0, p_dst + 3*FDEC_STRIDE );
  317. #define SUM16 \
  318. dcvsum0 = vec_adds( dcv0, vec_u8_to_s16_h( dstv0 ) ); \
  319. dcvsum4 = vec_adds( dcv1, vec_u8_to_s16_l( dstv0 ) ); \
  320. dcvsum1 = vec_adds( dcv0, vec_u8_to_s16_h( dstv1 ) ); \
  321. dcvsum5 = vec_adds( dcv1, vec_u8_to_s16_l( dstv1 ) ); \
  322. dcvsum2 = vec_adds( dcv0, vec_u8_to_s16_h( dstv2 ) ); \
  323. dcvsum6 = vec_adds( dcv1, vec_u8_to_s16_l( dstv2 ) ); \
  324. dcvsum3 = vec_adds( dcv0, vec_u8_to_s16_h( dstv3 ) ); \
  325. dcvsum7 = vec_adds( dcv1, vec_u8_to_s16_l( dstv3 ) ); \
  326. dcvsum8_0 = vec_packsu( dcvsum0, dcvsum4 ); \
  327. dcvsum8_1 = vec_packsu( dcvsum1, dcvsum5 ); \
  328. dcvsum8_2 = vec_packsu( dcvsum2, dcvsum6 ); \
  329. dcvsum8_3 = vec_packsu( dcvsum3, dcvsum7 );
  330. #define STORE16 \
  331. vec_st( dcvsum8_0, 0, p_dst ); \
  332. vec_st( dcvsum8_1, 0, p_dst + 1*FDEC_STRIDE ); \
  333. vec_st( dcvsum8_2, 0, p_dst + 2*FDEC_STRIDE ); \
  334. vec_st( dcvsum8_3, 0, p_dst + 3*FDEC_STRIDE );
  335. void x264_add16x16_idct_dc_altivec( uint8_t *p_dst, int16_t dct[16] )
  336. {
  337. vec_s16_t dcv0, dcv1;
  338. vec_s16_t v32 = vec_sl( vec_splat_s16( 8 ), vec_splat_u16( 2 ) );
  339. vec_u16_t v6 = vec_splat_u16( 6 );
  340. vec_u8_t dstv0, dstv1, dstv2, dstv3;
  341. vec_s16_t dcvsum0, dcvsum1, dcvsum2, dcvsum3, dcvsum4, dcvsum5, dcvsum6, dcvsum7;
  342. vec_u8_t dcvsum8_0, dcvsum8_1, dcvsum8_2, dcvsum8_3;
  343. LOAD_ZERO;
  344. for( int i = 0; i < 2; i++ )
  345. {
  346. vec_s16_t dctv = vec_ld( 0, dct );
  347. dctv = vec_sra( vec_add( dctv, v32 ), v6 );
  348. dcv1 = (vec_s16_t)vec_mergeh( dctv, dctv );
  349. dcv0 = (vec_s16_t)vec_mergeh( (vec_s32_t)dcv1, (vec_s32_t)dcv1 );
  350. dcv1 = (vec_s16_t)vec_mergel( (vec_s32_t)dcv1, (vec_s32_t)dcv1 );
  351. LOAD16;
  352. SUM16;
  353. STORE16;
  354. p_dst += 4*FDEC_STRIDE;
  355. dcv1 = (vec_s16_t)vec_mergel( dctv, dctv );
  356. dcv0 = (vec_s16_t)vec_mergeh( (vec_s32_t)dcv1, (vec_s32_t)dcv1 );
  357. dcv1 = (vec_s16_t)vec_mergel( (vec_s32_t)dcv1, (vec_s32_t)dcv1 );
  358. LOAD16;
  359. SUM16;
  360. STORE16;
  361. dct += 8;
  362. p_dst += 4*FDEC_STRIDE;
  363. }
  364. }
  365. #define IDCT_1D_ALTIVEC(s0, s1, s2, s3, d0, d1, d2, d3) \
  366. { \
  367. /* a0 = SRC(0) + SRC(2); */ \
  368. vec_s16_t a0v = vec_add(s0, s2); \
  369. /* a1 = SRC(0) - SRC(2); */ \
  370. vec_s16_t a1v = vec_sub(s0, s2); \
  371. /* a2 = (SRC(1)>>1) - SRC(3); */ \
  372. vec_s16_t a2v = vec_sub(vec_sra(s1, onev), s3); \
  373. /* a3 = (SRC(3)>>1) + SRC(1); */ \
  374. vec_s16_t a3v = vec_add(vec_sra(s3, onev), s1); \
  375. /* DST(0, a0 + a3); */ \
  376. d0 = vec_add(a0v, a3v); \
  377. /* DST(1, a1 + a2); */ \
  378. d1 = vec_add(a1v, a2v); \
  379. /* DST(2, a1 - a2); */ \
  380. d2 = vec_sub(a1v, a2v); \
  381. /* DST(3, a0 - a3); */ \
  382. d3 = vec_sub(a0v, a3v); \
  383. }
  384. #define VEC_LOAD_U8_ADD_S16_STORE_U8(va) \
  385. vdst_orig = vec_ld(0, dst); \
  386. vdst = vec_perm(vdst_orig, zero_u8v, vdst_mask); \
  387. vdst_ss = (vec_s16_t)vec_mergeh(zero_u8v, vdst); \
  388. va = vec_add(va, vdst_ss); \
  389. va_u8 = vec_s16_to_u8(va); \
  390. va_u32 = vec_splat((vec_u32_t)va_u8, 0); \
  391. vec_ste(va_u32, element, (uint32_t*)dst);
  392. #define ALTIVEC_STORE4_SUM_CLIP(dest, idctv) \
  393. { \
  394. /* unaligned load */ \
  395. vec_u8_t dstv = vec_vsx_ld(0, dest); \
  396. vec_s16_t idct_sh6 = vec_sra(idctv, sixv); \
  397. vec_u16_t dst16 = vec_u8_to_u16_h(dstv); \
  398. vec_s16_t idstsum = vec_adds(idct_sh6, (vec_s16_t)dst16); \
  399. vec_u8_t idstsum8 = vec_s16_to_u8(idstsum); \
  400. /* unaligned store */ \
  401. vec_u32_t bodyv = vec_splat((vec_u32_t)idstsum8, 0); \
  402. int element = ((unsigned long)dest & 0xf) >> 2; \
  403. vec_ste(bodyv, element, (uint32_t *)dest); \
  404. }
  405. void x264_add4x4_idct_altivec( uint8_t *dst, int16_t dct[16] )
  406. {
  407. vec_u16_t onev = vec_splat_u16(1);
  408. dct[0] += 32; // rounding for the >>6 at the end
  409. vec_s16_t s0, s1, s2, s3;
  410. s0 = vec_ld( 0x00, dct );
  411. s1 = vec_sld( s0, s0, 8 );
  412. s2 = vec_ld( 0x10, dct );
  413. s3 = vec_sld( s2, s2, 8 );
  414. vec_s16_t d0, d1, d2, d3;
  415. IDCT_1D_ALTIVEC( s0, s1, s2, s3, d0, d1, d2, d3 );
  416. vec_s16_t tr0, tr1, tr2, tr3;
  417. VEC_TRANSPOSE_4( d0, d1, d2, d3, tr0, tr1, tr2, tr3 );
  418. vec_s16_t idct0, idct1, idct2, idct3;
  419. IDCT_1D_ALTIVEC( tr0, tr1, tr2, tr3, idct0, idct1, idct2, idct3 );
  420. vec_u16_t sixv = vec_splat_u16(6);
  421. LOAD_ZERO;
  422. ALTIVEC_STORE4_SUM_CLIP( &dst[0*FDEC_STRIDE], idct0 );
  423. ALTIVEC_STORE4_SUM_CLIP( &dst[1*FDEC_STRIDE], idct1 );
  424. ALTIVEC_STORE4_SUM_CLIP( &dst[2*FDEC_STRIDE], idct2 );
  425. ALTIVEC_STORE4_SUM_CLIP( &dst[3*FDEC_STRIDE], idct3 );
  426. }
  427. void x264_add8x8_idct_altivec( uint8_t *p_dst, int16_t dct[4][16] )
  428. {
  429. x264_add4x4_idct_altivec( &p_dst[0], dct[0] );
  430. x264_add4x4_idct_altivec( &p_dst[4], dct[1] );
  431. x264_add4x4_idct_altivec( &p_dst[4*FDEC_STRIDE+0], dct[2] );
  432. x264_add4x4_idct_altivec( &p_dst[4*FDEC_STRIDE+4], dct[3] );
  433. }
  434. void x264_add16x16_idct_altivec( uint8_t *p_dst, int16_t dct[16][16] )
  435. {
  436. x264_add8x8_idct_altivec( &p_dst[0], &dct[0] );
  437. x264_add8x8_idct_altivec( &p_dst[8], &dct[4] );
  438. x264_add8x8_idct_altivec( &p_dst[8*FDEC_STRIDE+0], &dct[8] );
  439. x264_add8x8_idct_altivec( &p_dst[8*FDEC_STRIDE+8], &dct[12] );
  440. }
  441. #define IDCT8_1D_ALTIVEC(s0, s1, s2, s3, s4, s5, s6, s7, d0, d1, d2, d3, d4, d5, d6, d7)\
  442. {\
  443. /* a0 = SRC(0) + SRC(4); */ \
  444. vec_s16_t a0v = vec_add(s0, s4); \
  445. /* a2 = SRC(0) - SRC(4); */ \
  446. vec_s16_t a2v = vec_sub(s0, s4); \
  447. /* a4 = (SRC(2)>>1) - SRC(6); */ \
  448. vec_s16_t a4v = vec_sub(vec_sra(s2, onev), s6); \
  449. /* a6 = (SRC(6)>>1) + SRC(2); */ \
  450. vec_s16_t a6v = vec_add(vec_sra(s6, onev), s2); \
  451. /* b0 = a0 + a6; */ \
  452. vec_s16_t b0v = vec_add(a0v, a6v); \
  453. /* b2 = a2 + a4; */ \
  454. vec_s16_t b2v = vec_add(a2v, a4v); \
  455. /* b4 = a2 - a4; */ \
  456. vec_s16_t b4v = vec_sub(a2v, a4v); \
  457. /* b6 = a0 - a6; */ \
  458. vec_s16_t b6v = vec_sub(a0v, a6v); \
  459. /* a1 = SRC(5) - SRC(3) - SRC(7) - (SRC(7)>>1); */ \
  460. /* a1 = (SRC(5)-SRC(3)) - (SRC(7) + (SRC(7)>>1)); */ \
  461. vec_s16_t a1v = vec_sub( vec_sub(s5, s3), vec_add(s7, vec_sra(s7, onev)) );\
  462. /* a3 = SRC(7) + SRC(1) - SRC(3) - (SRC(3)>>1); */ \
  463. /* a3 = (SRC(7)+SRC(1)) - (SRC(3) + (SRC(3)>>1)); */ \
  464. vec_s16_t a3v = vec_sub( vec_add(s7, s1), vec_add(s3, vec_sra(s3, onev)) );\
  465. /* a5 = SRC(7) - SRC(1) + SRC(5) + (SRC(5)>>1); */ \
  466. /* a5 = (SRC(7)-SRC(1)) + SRC(5) + (SRC(5)>>1); */ \
  467. vec_s16_t a5v = vec_add( vec_sub(s7, s1), vec_add(s5, vec_sra(s5, onev)) );\
  468. /* a7 = SRC(5)+SRC(3) + SRC(1) + (SRC(1)>>1); */ \
  469. vec_s16_t a7v = vec_add( vec_add(s5, s3), vec_add(s1, vec_sra(s1, onev)) );\
  470. /* b1 = (a7>>2) + a1; */ \
  471. vec_s16_t b1v = vec_add( vec_sra(a7v, twov), a1v); \
  472. /* b3 = a3 + (a5>>2); */ \
  473. vec_s16_t b3v = vec_add(a3v, vec_sra(a5v, twov)); \
  474. /* b5 = (a3>>2) - a5; */ \
  475. vec_s16_t b5v = vec_sub( vec_sra(a3v, twov), a5v); \
  476. /* b7 = a7 - (a1>>2); */ \
  477. vec_s16_t b7v = vec_sub( a7v, vec_sra(a1v, twov)); \
  478. /* DST(0, b0 + b7); */ \
  479. d0 = vec_add(b0v, b7v); \
  480. /* DST(1, b2 + b5); */ \
  481. d1 = vec_add(b2v, b5v); \
  482. /* DST(2, b4 + b3); */ \
  483. d2 = vec_add(b4v, b3v); \
  484. /* DST(3, b6 + b1); */ \
  485. d3 = vec_add(b6v, b1v); \
  486. /* DST(4, b6 - b1); */ \
  487. d4 = vec_sub(b6v, b1v); \
  488. /* DST(5, b4 - b3); */ \
  489. d5 = vec_sub(b4v, b3v); \
  490. /* DST(6, b2 - b5); */ \
  491. d6 = vec_sub(b2v, b5v); \
  492. /* DST(7, b0 - b7); */ \
  493. d7 = vec_sub(b0v, b7v); \
  494. }
  495. #define ALTIVEC_STORE_SUM_CLIP(dest, idctv) \
  496. { \
  497. vec_s16_t idct_sh6 = vec_sra( idctv, sixv ); \
  498. /* unaligned load */ \
  499. vec_u8_t dstv = vec_vsx_ld( 0, dest ); \
  500. vec_s16_t idstsum = vec_adds( idct_sh6, vec_u8_to_s16_h( dstv ) ); \
  501. vec_u8_t idstsum8 = vec_packsu( idstsum, vec_u8_to_s16_l( dstv ) ); \
  502. /* unaligned store */ \
  503. vec_vsx_st( idstsum8, 0, dest ); \
  504. }
  505. void x264_add8x8_idct8_altivec( uint8_t *dst, int16_t dct[64] )
  506. {
  507. vec_u16_t onev = vec_splat_u16(1);
  508. vec_u16_t twov = vec_splat_u16(2);
  509. dct[0] += 32; // rounding for the >>6 at the end
  510. vec_s16_t s0, s1, s2, s3, s4, s5, s6, s7;
  511. s0 = vec_ld(0x00, dct);
  512. s1 = vec_ld(0x10, dct);
  513. s2 = vec_ld(0x20, dct);
  514. s3 = vec_ld(0x30, dct);
  515. s4 = vec_ld(0x40, dct);
  516. s5 = vec_ld(0x50, dct);
  517. s6 = vec_ld(0x60, dct);
  518. s7 = vec_ld(0x70, dct);
  519. vec_s16_t d0, d1, d2, d3, d4, d5, d6, d7;
  520. IDCT8_1D_ALTIVEC(s0, s1, s2, s3, s4, s5, s6, s7, d0, d1, d2, d3, d4, d5, d6, d7);
  521. vec_s16_t tr0, tr1, tr2, tr3, tr4, tr5, tr6, tr7;
  522. VEC_TRANSPOSE_8( d0, d1, d2, d3, d4, d5, d6, d7,
  523. tr0, tr1, tr2, tr3, tr4, tr5, tr6, tr7);
  524. vec_s16_t idct0, idct1, idct2, idct3, idct4, idct5, idct6, idct7;
  525. IDCT8_1D_ALTIVEC(tr0, tr1, tr2, tr3, tr4, tr5, tr6, tr7,
  526. idct0, idct1, idct2, idct3, idct4, idct5, idct6, idct7);
  527. vec_u16_t sixv = vec_splat_u16(6);
  528. LOAD_ZERO;
  529. ALTIVEC_STORE_SUM_CLIP(&dst[0*FDEC_STRIDE], idct0);
  530. ALTIVEC_STORE_SUM_CLIP(&dst[1*FDEC_STRIDE], idct1);
  531. ALTIVEC_STORE_SUM_CLIP(&dst[2*FDEC_STRIDE], idct2);
  532. ALTIVEC_STORE_SUM_CLIP(&dst[3*FDEC_STRIDE], idct3);
  533. ALTIVEC_STORE_SUM_CLIP(&dst[4*FDEC_STRIDE], idct4);
  534. ALTIVEC_STORE_SUM_CLIP(&dst[5*FDEC_STRIDE], idct5);
  535. ALTIVEC_STORE_SUM_CLIP(&dst[6*FDEC_STRIDE], idct6);
  536. ALTIVEC_STORE_SUM_CLIP(&dst[7*FDEC_STRIDE], idct7);
  537. }
  538. void x264_add16x16_idct8_altivec( uint8_t *dst, int16_t dct[4][64] )
  539. {
  540. x264_add8x8_idct8_altivec( &dst[0], dct[0] );
  541. x264_add8x8_idct8_altivec( &dst[8], dct[1] );
  542. x264_add8x8_idct8_altivec( &dst[8*FDEC_STRIDE+0], dct[2] );
  543. x264_add8x8_idct8_altivec( &dst[8*FDEC_STRIDE+8], dct[3] );
  544. }
  545. void x264_zigzag_scan_4x4_frame_altivec( int16_t level[16], int16_t dct[16] )
  546. {
  547. vec_s16_t dct0v, dct1v;
  548. vec_s16_t tmp0v, tmp1v;
  549. dct0v = vec_ld(0x00, dct);
  550. dct1v = vec_ld(0x10, dct);
  551. const vec_u8_t sel0 = (vec_u8_t) CV(0,1,8,9,2,3,4,5,10,11,16,17,24,25,18,19);
  552. const vec_u8_t sel1 = (vec_u8_t) CV(12,13,6,7,14,15,20,21,26,27,28,29,22,23,30,31);
  553. tmp0v = vec_perm( dct0v, dct1v, sel0 );
  554. tmp1v = vec_perm( dct0v, dct1v, sel1 );
  555. vec_st( tmp0v, 0x00, level );
  556. vec_st( tmp1v, 0x10, level );
  557. }
  558. void x264_zigzag_scan_4x4_field_altivec( int16_t level[16], int16_t dct[16] )
  559. {
  560. vec_s16_t dct0v, dct1v;
  561. vec_s16_t tmp0v, tmp1v;
  562. dct0v = vec_ld(0x00, dct);
  563. dct1v = vec_ld(0x10, dct);
  564. const vec_u8_t sel0 = (vec_u8_t) CV(0,1,2,3,8,9,4,5,6,7,10,11,12,13,14,15);
  565. tmp0v = vec_perm( dct0v, dct1v, sel0 );
  566. tmp1v = dct1v;
  567. vec_st( tmp0v, 0x00, level );
  568. vec_st( tmp1v, 0x10, level );
  569. }
  570. void x264_zigzag_scan_8x8_frame_altivec( int16_t level[64], int16_t dct[64] )
  571. {
  572. vec_s16_t tmpv[6];
  573. vec_s16_t dct0v = vec_ld( 0*16, dct );
  574. vec_s16_t dct1v = vec_ld( 1*16, dct );
  575. vec_s16_t dct2v = vec_ld( 2*16, dct );
  576. vec_s16_t dct3v = vec_ld( 3*16, dct );
  577. vec_s16_t dct4v = vec_ld( 4*16, dct );
  578. vec_s16_t dct5v = vec_ld( 5*16, dct );
  579. vec_s16_t dct6v = vec_ld( 6*16, dct );
  580. vec_s16_t dct7v = vec_ld( 7*16, dct );
  581. const vec_u8_t mask1[14] = {
  582. { 0x00, 0x01, 0x02, 0x03, 0x12, 0x13, 0x14, 0x15, 0x0A, 0x0B, 0x04, 0x05, 0x06, 0x07, 0x0C, 0x0D },
  583. { 0x0A, 0x0B, 0x0C, 0x0D, 0x00, 0x00, 0x0E, 0x0F, 0x00, 0x00, 0x00, 0x00, 0x10, 0x11, 0x12, 0x13 },
  584. { 0x00, 0x01, 0x02, 0x03, 0x18, 0x19, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F },
  585. { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x18, 0x19, 0x16, 0x17, 0x0C, 0x0D, 0x0E, 0x0F },
  586. { 0x00, 0x00, 0x14, 0x15, 0x18, 0x19, 0x02, 0x03, 0x04, 0x05, 0x08, 0x09, 0x06, 0x07, 0x12, 0x13 },
  587. { 0x12, 0x13, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F },
  588. { 0x1A, 0x1B, 0x10, 0x11, 0x08, 0x09, 0x04, 0x05, 0x02, 0x03, 0x0C, 0x0D, 0x14, 0x15, 0x18, 0x19 },
  589. { 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x0A, 0x0B },
  590. { 0x00, 0x01, 0x02, 0x03, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F, 0x06, 0x07, 0x04, 0x05, 0x08, 0x09 },
  591. { 0x00, 0x11, 0x16, 0x17, 0x18, 0x19, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x1A, 0x1B },
  592. { 0x02, 0x03, 0x18, 0x19, 0x16, 0x17, 0x1A, 0x1B, 0x1C, 0x1D, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09 },
  593. { 0x08, 0x09, 0x0A, 0x0B, 0x06, 0x07, 0x0E, 0x0F, 0x10, 0x11, 0x00, 0x00, 0x12, 0x13, 0x14, 0x15 },
  594. { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x16, 0x17, 0x0C, 0x0D, 0x0E, 0x0F },
  595. { 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F, 0x08, 0x09, 0x06, 0x07, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F }
  596. };
  597. tmpv[0] = vec_mergeh( dct0v, dct1v );
  598. tmpv[1] = vec_mergeh( dct2v, dct3v );
  599. tmpv[2] = (vec_s16_t)vec_mergeh( (vec_s32_t)tmpv[0], (vec_s32_t)tmpv[1] );
  600. tmpv[3] = vec_perm( tmpv[2], dct0v, mask1[0] );
  601. vec_st( tmpv[3], 0*16, level );
  602. tmpv[4] = vec_mergeh( dct4v, dct5v );
  603. tmpv[3] = vec_perm( tmpv[0], tmpv[4], mask1[1] );
  604. tmpv[3] = vec_perm( tmpv[3], dct0v, mask1[2] );
  605. tmpv[3] = vec_perm( tmpv[3], tmpv[1], mask1[3] );
  606. vec_st( tmpv[3], 1*16, level );
  607. tmpv[3] = vec_mergel( dct0v, dct1v );
  608. tmpv[1] = vec_mergel( tmpv[1], dct2v );
  609. tmpv[5] = vec_perm( tmpv[3], tmpv[1], mask1[4] );
  610. tmpv[5] = vec_perm( tmpv[5], dct4v, mask1[5] );
  611. vec_st( tmpv[5], 2*16, level );
  612. tmpv[2] = vec_mergeh( dct5v, dct6v );
  613. tmpv[5] = vec_mergeh( tmpv[2], dct7v );
  614. tmpv[4] = vec_mergel( tmpv[4], tmpv[1] );
  615. tmpv[0] = vec_perm( tmpv[5], tmpv[4], mask1[6] );
  616. vec_st( tmpv[0], 3*16, level );
  617. tmpv[1] = vec_mergel( dct2v, dct3v );
  618. tmpv[0] = vec_mergel( dct4v, dct5v );
  619. tmpv[4] = vec_perm( tmpv[1], tmpv[0], mask1[7] );
  620. tmpv[3] = vec_perm( tmpv[4], tmpv[3], mask1[8] );
  621. vec_st( tmpv[3], 4*16, level );
  622. tmpv[3] = vec_mergeh( dct6v, dct7v );
  623. tmpv[2] = vec_mergel( dct3v, dct4v );
  624. tmpv[2] = vec_perm( tmpv[2], dct5v, mask1[9] );
  625. tmpv[3] = vec_perm( tmpv[2], tmpv[3], mask1[10] );
  626. vec_st( tmpv[3], 5*16, level );
  627. tmpv[1] = vec_mergel( tmpv[1], tmpv[2] );
  628. tmpv[2] = vec_mergel( dct6v, dct7v );
  629. tmpv[1] = vec_perm( tmpv[1], tmpv[2], mask1[11] );
  630. tmpv[1] = vec_perm( tmpv[1], dct7v, mask1[12] );
  631. vec_st( tmpv[1], 6*16, level );
  632. tmpv[2] = vec_perm( tmpv[2], tmpv[0], mask1[13] );
  633. vec_st( tmpv[2], 7*16, level );
  634. }
  635. void x264_zigzag_interleave_8x8_cavlc_altivec( int16_t *dst, int16_t *src, uint8_t *nnz )
  636. {
  637. vec_s16_t tmpv[8];
  638. vec_s16_t merge[2];
  639. vec_s16_t permv[3];
  640. vec_s16_t orv[4];
  641. vec_s16_t src0v = vec_ld( 0*16, src );
  642. vec_s16_t src1v = vec_ld( 1*16, src );
  643. vec_s16_t src2v = vec_ld( 2*16, src );
  644. vec_s16_t src3v = vec_ld( 3*16, src );
  645. vec_s16_t src4v = vec_ld( 4*16, src );
  646. vec_s16_t src5v = vec_ld( 5*16, src );
  647. vec_s16_t src6v = vec_ld( 6*16, src );
  648. vec_s16_t src7v = vec_ld( 7*16, src );
  649. vec_u8_t pack;
  650. vec_u8_t nnzv = vec_vsx_ld( 0, nnz );
  651. vec_u8_t shift = vec_splat_u8( 7 );
  652. LOAD_ZERO;
  653. const vec_u8_t mask[3] = {
  654. { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17 },
  655. { 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F },
  656. { 0x10, 0x11, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x12, 0x13, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F }
  657. };
  658. tmpv[0] = vec_mergeh( src0v, src1v );
  659. tmpv[1] = vec_mergel( src0v, src1v );
  660. tmpv[2] = vec_mergeh( src2v, src3v );
  661. tmpv[3] = vec_mergel( src2v, src3v );
  662. tmpv[4] = vec_mergeh( src4v, src5v );
  663. tmpv[5] = vec_mergel( src4v, src5v );
  664. tmpv[6] = vec_mergeh( src6v, src7v );
  665. tmpv[7] = vec_mergel( src6v, src7v );
  666. merge[0] = vec_mergeh( tmpv[0], tmpv[1] );
  667. merge[1] = vec_mergeh( tmpv[2], tmpv[3] );
  668. permv[0] = vec_perm( merge[0], merge[1], mask[0] );
  669. permv[1] = vec_perm( merge[0], merge[1], mask[1] );
  670. vec_st( permv[0], 0*16, dst );
  671. merge[0] = vec_mergeh( tmpv[4], tmpv[5] );
  672. merge[1] = vec_mergeh( tmpv[6], tmpv[7] );
  673. permv[0] = vec_perm( merge[0], merge[1], mask[0] );
  674. permv[2] = vec_perm( merge[0], merge[1], mask[1] );
  675. vec_st( permv[0], 1*16, dst );
  676. vec_st( permv[1], 2*16, dst );
  677. vec_st( permv[2], 3*16, dst );
  678. merge[0] = vec_mergel( tmpv[0], tmpv[1] );
  679. merge[1] = vec_mergel( tmpv[2], tmpv[3] );
  680. permv[0] = vec_perm( merge[0], merge[1], mask[0] );
  681. permv[1] = vec_perm( merge[0], merge[1], mask[1] );
  682. vec_st( permv[0], 4*16, dst );
  683. merge[0] = vec_mergel( tmpv[4], tmpv[5] );
  684. merge[1] = vec_mergel( tmpv[6], tmpv[7] );
  685. permv[0] = vec_perm( merge[0], merge[1], mask[0] );
  686. permv[2] = vec_perm( merge[0], merge[1], mask[1] );
  687. vec_st( permv[0], 5*16, dst );
  688. vec_st( permv[1], 6*16, dst );
  689. vec_st( permv[2], 7*16, dst );
  690. orv[0] = vec_or( src0v, src1v );
  691. orv[1] = vec_or( src2v, src3v );
  692. orv[2] = vec_or( src4v, src5v );
  693. orv[3] = vec_or( src6v, src7v );
  694. permv[0] = vec_or( orv[0], orv[1] );
  695. permv[1] = vec_or( orv[2], orv[3] );
  696. permv[0] = vec_or( permv[0], permv[1] );
  697. permv[1] = vec_perm( permv[0], permv[0], mask[1] );
  698. permv[0] = vec_or( permv[0], permv[1] );
  699. pack = (vec_u8_t)vec_packs( permv[0], permv[0] );
  700. pack = (vec_u8_t)vec_cmpeq( pack, zerov );
  701. pack = vec_nor( pack, zerov );
  702. pack = vec_sr( pack, shift );
  703. nnzv = vec_perm( nnzv, pack, mask[2] );
  704. vec_st( nnzv, 0, nnz );
  705. }
  706. #endif // !HIGH_BIT_DEPTH