mc.c 53 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420
  1. /*****************************************************************************
  2. * mc.c: ppc motion compensation
  3. *****************************************************************************
  4. * Copyright (C) 2003-2018 x264 project
  5. *
  6. * Authors: Eric Petit <eric.petit@lapsus.org>
  7. * Guillaume Poirier <gpoirier@mplayerhq.hu>
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License as published by
  11. * the Free Software Foundation; either version 2 of the License, or
  12. * (at your option) any later version.
  13. *
  14. * This program is distributed in the hope that it will be useful,
  15. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  16. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  17. * GNU General Public License for more details.
  18. *
  19. * You should have received a copy of the GNU General Public License
  20. * along with this program; if not, write to the Free Software
  21. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111, USA.
  22. *
  23. * This program is also available under a commercial proprietary license.
  24. * For more information, contact us at licensing@x264.com.
  25. *****************************************************************************/
  26. #include "common/common.h"
  27. #include "ppccommon.h"
  28. #include "mc.h"
  29. #if !HIGH_BIT_DEPTH
  30. typedef void (*pf_mc_t)( uint8_t *src, intptr_t i_src,
  31. uint8_t *dst, intptr_t i_dst, int i_height );
  32. static inline void pixel_avg2_w4_altivec( uint8_t *dst, intptr_t i_dst,
  33. uint8_t *src1, intptr_t i_src1,
  34. uint8_t *src2, int i_height )
  35. {
  36. for( int y = 0; y < i_height; y++ )
  37. {
  38. for( int x = 0; x < 4; x++ )
  39. dst[x] = ( src1[x] + src2[x] + 1 ) >> 1;
  40. dst += i_dst;
  41. src1 += i_src1;
  42. src2 += i_src1;
  43. }
  44. }
  45. static inline void pixel_avg2_w8_altivec( uint8_t *dst, intptr_t i_dst,
  46. uint8_t *src1, intptr_t i_src1,
  47. uint8_t *src2, int i_height )
  48. {
  49. vec_u8_t src1v, src2v;
  50. PREP_STORE8;
  51. for( int y = 0; y < i_height; y++ )
  52. {
  53. src1v = vec_vsx_ld( 0, src1 );
  54. src2v = vec_vsx_ld( 0, src2 );
  55. src1v = vec_avg( src1v, src2v );
  56. VEC_STORE8(src1v, dst);
  57. dst += i_dst;
  58. src1 += i_src1;
  59. src2 += i_src1;
  60. }
  61. }
  62. static inline void pixel_avg2_w16_altivec( uint8_t *dst, intptr_t i_dst,
  63. uint8_t *src1, intptr_t i_src1,
  64. uint8_t *src2, int i_height )
  65. {
  66. vec_u8_t src1v, src2v;
  67. for( int y = 0; y < i_height; y++ )
  68. {
  69. src1v = vec_vsx_ld( 0, src1 );
  70. src2v = vec_vsx_ld( 0, src2 );
  71. src1v = vec_avg( src1v, src2v );
  72. vec_st(src1v, 0, dst);
  73. dst += i_dst;
  74. src1 += i_src1;
  75. src2 += i_src1;
  76. }
  77. }
  78. static inline void pixel_avg2_w20_altivec( uint8_t *dst, intptr_t i_dst,
  79. uint8_t *src1, intptr_t i_src1,
  80. uint8_t *src2, int i_height )
  81. {
  82. pixel_avg2_w16_altivec(dst, i_dst, src1, i_src1, src2, i_height);
  83. pixel_avg2_w4_altivec(dst+16, i_dst, src1+16, i_src1, src2+16, i_height);
  84. }
  85. /* mc_copy: plain c */
  86. #define MC_COPY( name, a ) \
  87. static void name( uint8_t *dst, intptr_t i_dst, \
  88. uint8_t *src, intptr_t i_src, int i_height ) \
  89. { \
  90. int y; \
  91. for( y = 0; y < i_height; y++ ) \
  92. { \
  93. memcpy( dst, src, a ); \
  94. src += i_src; \
  95. dst += i_dst; \
  96. } \
  97. }
  98. MC_COPY( mc_copy_w4_altivec, 4 )
  99. MC_COPY( mc_copy_w8_altivec, 8 )
  100. static void mc_copy_w16_altivec( uint8_t *dst, intptr_t i_dst,
  101. uint8_t *src, intptr_t i_src, int i_height )
  102. {
  103. vec_u8_t cpyV;
  104. for( int y = 0; y < i_height; y++ )
  105. {
  106. cpyV = vec_vsx_ld( 0, src );
  107. vec_st(cpyV, 0, dst);
  108. src += i_src;
  109. dst += i_dst;
  110. }
  111. }
  112. static void mc_copy_w16_aligned_altivec( uint8_t *dst, intptr_t i_dst,
  113. uint8_t *src, intptr_t i_src, int i_height )
  114. {
  115. for( int y = 0; y < i_height; ++y )
  116. {
  117. vec_u8_t cpyV = vec_ld( 0, src );
  118. vec_st(cpyV, 0, dst);
  119. src += i_src;
  120. dst += i_dst;
  121. }
  122. }
  123. #define x264_plane_copy_swap_core_altivec x264_template(plane_copy_swap_core_altivec)
  124. void x264_plane_copy_swap_core_altivec( uint8_t *dst, intptr_t i_dst,
  125. uint8_t *src, intptr_t i_src, int w, int h )
  126. {
  127. const vec_u8_t mask = { 0x01, 0x00, 0x03, 0x02, 0x05, 0x04, 0x07, 0x06, 0x09, 0x08, 0x0B, 0x0A, 0x0D, 0x0C, 0x0F, 0x0E };
  128. for( int y = 0; y < h; y++, dst += i_dst, src += i_src )
  129. for( int x = 0; x < 2 * w; x += 16 )
  130. {
  131. vec_u8_t srcv = vec_vsx_ld( x, src );
  132. vec_u8_t dstv = vec_perm( srcv, srcv, mask );
  133. vec_vsx_st( dstv, x, dst );
  134. }
  135. }
  136. #define x264_plane_copy_interleave_core_altivec x264_template(plane_copy_interleave_core_altivec)
  137. void x264_plane_copy_interleave_core_altivec( uint8_t *dst, intptr_t i_dst,
  138. uint8_t *srcu, intptr_t i_srcu,
  139. uint8_t *srcv, intptr_t i_srcv, int w, int h )
  140. {
  141. for( int y = 0; y < h; y++, dst += i_dst, srcu += i_srcu, srcv += i_srcv )
  142. for( int x = 0; x < w; x += 16 )
  143. {
  144. vec_u8_t srcvv = vec_vsx_ld( x, srcv );
  145. vec_u8_t srcuv = vec_vsx_ld( x, srcu );
  146. vec_u8_t dstv1 = vec_mergeh( srcuv, srcvv );
  147. vec_u8_t dstv2 = vec_mergel( srcuv, srcvv );
  148. vec_vsx_st( dstv1, 2 * x, dst );
  149. vec_vsx_st( dstv2, 2 * x + 16, dst );
  150. }
  151. }
  152. void x264_store_interleave_chroma_altivec( uint8_t *dst, intptr_t i_dst,
  153. uint8_t *srcu, uint8_t *srcv, int height )
  154. {
  155. for( int y = 0; y < height; y++, dst += i_dst, srcu += FDEC_STRIDE, srcv += FDEC_STRIDE )
  156. {
  157. vec_u8_t srcvv = vec_vsx_ld( 0, srcv );
  158. vec_u8_t srcuv = vec_vsx_ld( 0, srcu );
  159. vec_u8_t dstv = vec_mergeh( srcuv, srcvv );
  160. vec_vsx_st(dstv, 0, dst);
  161. }
  162. }
  163. void x264_plane_copy_deinterleave_altivec( uint8_t *dstu, intptr_t i_dstu,
  164. uint8_t *dstv, intptr_t i_dstv,
  165. uint8_t *src, intptr_t i_src, int w, int h )
  166. {
  167. const vec_u8_t mask[2] = {
  168. { 0x00, 0x02, 0x04, 0x06, 0x08, 0x0A, 0x0C, 0x0E, 0x10, 0x12, 0x14, 0x16, 0x18, 0x1A, 0x1C, 0x1E },
  169. { 0x01, 0x03, 0x05, 0x07, 0x09, 0x0B, 0x0D, 0x0F, 0x11, 0x13, 0x15, 0x17, 0x19, 0x1B, 0x1D, 0x1F }
  170. };
  171. for( int y = 0; y < h; y++, dstu += i_dstu, dstv += i_dstv, src += i_src )
  172. {
  173. for( int x = 0; x < w; x += 16 )
  174. {
  175. vec_u8_t srcv1 = vec_vsx_ld( 2 * x, src );
  176. vec_u8_t srcv2 = vec_vsx_ld( 2 * x + 16, src );
  177. vec_u8_t dstuv = vec_perm( srcv1, srcv2, mask[0] );
  178. vec_u8_t dstvv = vec_perm( srcv1, srcv2, mask[1] );
  179. vec_vsx_st( dstuv, x, dstu );
  180. vec_vsx_st( dstvv, x, dstv );
  181. }
  182. }
  183. }
  184. static void load_deinterleave_chroma_fenc_altivec( uint8_t *dst, uint8_t *src, intptr_t i_src, int height )
  185. {
  186. const vec_u8_t mask = { 0x00, 0x02, 0x04, 0x06, 0x08, 0x0A, 0x0C, 0x0E, 0x01, 0x03, 0x05, 0x07, 0x09, 0x0B, 0x0D, 0x0F };
  187. for( int y = 0; y < height; y += 2, dst += 2*FENC_STRIDE, src += 2*i_src )
  188. {
  189. vec_u8_t src0 = vec_ld( 0, src );
  190. vec_u8_t src1 = vec_ld( i_src, src );
  191. vec_st( vec_perm( src0, src0, mask ), 0*FENC_STRIDE, dst );
  192. vec_st( vec_perm( src1, src1, mask ), 1*FENC_STRIDE, dst );
  193. }
  194. }
  195. #if HAVE_VSX
  196. void x264_plane_copy_deinterleave_rgb_altivec( uint8_t *dsta, intptr_t i_dsta,
  197. uint8_t *dstb, intptr_t i_dstb,
  198. uint8_t *dstc, intptr_t i_dstc,
  199. uint8_t *src, intptr_t i_src,
  200. int pw, int w, int h )
  201. {
  202. if( pw == 3 )
  203. {
  204. const vec_u8_t mask[4] = {
  205. { 0x00, 0x03, 0x06, 0x09, 0x0C, 0x0F, 0x12, 0x15, 0x01, 0x04, 0x07, 0x0A, 0x0D, 0x10, 0x13, 0x16 },
  206. { 0x08, 0x0B, 0x0E, 0x11, 0x14, 0x17, 0x1A, 0x1D, 0x09, 0x0C, 0x0F, 0x12, 0x15, 0x18, 0x1B, 0x1E },
  207. { 0x02, 0x05, 0x08, 0x0B, 0x0E, 0x11, 0x14, 0x17, 0x1A, 0x1D, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF },
  208. { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x10, 0x13, 0x16, 0x19, 0x1C, 0x1F }
  209. };
  210. for( int y = 0; y < h; y++, dsta += i_dsta, dstb += i_dstb, dstc += i_dstc, src += i_src )
  211. {
  212. for( int x = 0; x < w; x += 16 )
  213. {
  214. vec_u8_t srcv1 = vec_vsx_ld( 3 * x, src );
  215. vec_u8_t srcv2 = vec_vsx_ld( 3 * x + 16, src );
  216. vec_u8_t srcv3 = vec_vsx_ld( 3 * x + 32, src );
  217. vec_u64_t tmp1 = (vec_u64_t)vec_perm( srcv1, srcv2, mask[0] ); // a0 a1 a2 a3 a4 a5 a6 a7 b0 b1 b2 b3 b4 b5 b6 b7
  218. vec_u64_t tmp2 = (vec_u64_t)vec_perm( srcv2, srcv3, mask[1] ); // a8 a9 a10 a11 a12 a13 a14 a15 b8 b9 b10 b11 b12 b13 b14 b15
  219. vec_st( (vec_u8_t)vec_mergeh( tmp1, tmp2 ), x, dsta );
  220. vec_st( (vec_u8_t)vec_mergel( tmp1, tmp2 ), x, dstb );
  221. srcv1 = vec_perm( srcv1, srcv2, mask[2] ); // c0 c1 c2 c3 c4 c5 c6 c7 c8 c9
  222. srcv1 = vec_perm( srcv1, srcv3, mask[3] ); // c0 c1 c2 c3 c4 c5 c6 c7 c8 c9 c10 c11 c12 c13 c14 c15
  223. vec_st( srcv1, x, dstc );
  224. }
  225. }
  226. }
  227. else
  228. {
  229. const vec_u8_t mask[2] = {
  230. { 0x00, 0x04, 0x08, 0x0C, 0x10, 0x14, 0x18, 0x1C, 0x01, 0x05, 0x09, 0x0D, 0x11, 0x15, 0x19, 0x1D },
  231. { 0x02, 0x06, 0x0A, 0x0E, 0x12, 0x16, 0x1A, 0x1E, 0x03, 0x07, 0x0B, 0x0F, 0x13, 0x17, 0x1B, 0x1F }
  232. };
  233. for( int y = 0; y < h; y++, dsta += i_dsta, dstb += i_dstb, dstc += i_dstc, src += i_src )
  234. {
  235. for( int x = 0; x < w; x += 16 )
  236. {
  237. vec_u8_t srcv1 = vec_vsx_ld( 4 * x, src );
  238. vec_u8_t srcv2 = vec_vsx_ld( 4 * x + 16, src );
  239. vec_u8_t srcv3 = vec_vsx_ld( 4 * x + 32, src );
  240. vec_u8_t srcv4 = vec_vsx_ld( 4 * x + 48, src );
  241. vec_u64_t tmp1 = (vec_u64_t)vec_perm( srcv1, srcv2, mask[0] ); // a0 a1 a2 a3 a4 a5 a6 a7 b0 b1 b2 b3 b4 b5 b6 b7
  242. vec_u64_t tmp2 = (vec_u64_t)vec_perm( srcv3, srcv4, mask[0] ); // a8 a9 a10 a11 a12 a13 a14 a15 b8 b9 b10 b11 b12 b13 b14 b15
  243. vec_st( (vec_u8_t)vec_mergeh( tmp1, tmp2 ), x, dsta );
  244. vec_st( (vec_u8_t)vec_mergel( tmp1, tmp2 ), x, dstb );
  245. tmp1 = (vec_u64_t)vec_perm( srcv1, srcv2, mask[1] ); // c0 c1 c2 c3 c4 c5 c6 c7
  246. tmp2 = (vec_u64_t)vec_perm( srcv3, srcv4, mask[1] ); // c8 c9 c10 c11 c12 c13 c14 c15
  247. vec_st( (vec_u8_t)vec_mergeh( tmp1, tmp2 ), x, dstc );
  248. }
  249. }
  250. }
  251. }
  252. #endif
  253. static void mc_luma_altivec( uint8_t *dst, intptr_t i_dst_stride,
  254. uint8_t *src[4], intptr_t i_src_stride,
  255. int mvx, int mvy,
  256. int i_width, int i_height, const x264_weight_t *weight )
  257. {
  258. int qpel_idx = ((mvy&3)<<2) + (mvx&3);
  259. intptr_t offset = (mvy>>2)*i_src_stride + (mvx>>2);
  260. uint8_t *src1 = src[x264_hpel_ref0[qpel_idx]] + offset + ((mvy&3) == 3) * i_src_stride;
  261. if( qpel_idx & 5 ) /* qpel interpolation needed */
  262. {
  263. uint8_t *src2 = src[x264_hpel_ref1[qpel_idx]] + offset + ((mvx&3) == 3);
  264. switch( i_width )
  265. {
  266. case 4:
  267. pixel_avg2_w4_altivec( dst, i_dst_stride, src1, i_src_stride, src2, i_height );
  268. break;
  269. case 8:
  270. pixel_avg2_w8_altivec( dst, i_dst_stride, src1, i_src_stride, src2, i_height );
  271. break;
  272. case 16:
  273. default:
  274. pixel_avg2_w16_altivec( dst, i_dst_stride, src1, i_src_stride, src2, i_height );
  275. }
  276. if( weight->weightfn )
  277. weight->weightfn[i_width>>2]( dst, i_dst_stride, dst, i_dst_stride, weight, i_height );
  278. }
  279. else if( weight->weightfn )
  280. weight->weightfn[i_width>>2]( dst, i_dst_stride, src1, i_src_stride, weight, i_height );
  281. else
  282. {
  283. switch( i_width )
  284. {
  285. case 4:
  286. mc_copy_w4_altivec( dst, i_dst_stride, src1, i_src_stride, i_height );
  287. break;
  288. case 8:
  289. mc_copy_w8_altivec( dst, i_dst_stride, src1, i_src_stride, i_height );
  290. break;
  291. case 16:
  292. mc_copy_w16_altivec( dst, i_dst_stride, src1, i_src_stride, i_height );
  293. break;
  294. }
  295. }
  296. }
  297. static uint8_t *get_ref_altivec( uint8_t *dst, intptr_t *i_dst_stride,
  298. uint8_t *src[4], intptr_t i_src_stride,
  299. int mvx, int mvy,
  300. int i_width, int i_height, const x264_weight_t *weight )
  301. {
  302. int qpel_idx = ((mvy&3)<<2) + (mvx&3);
  303. intptr_t offset = (mvy>>2)*i_src_stride + (mvx>>2);
  304. uint8_t *src1 = src[x264_hpel_ref0[qpel_idx]] + offset + ((mvy&3) == 3) * i_src_stride;
  305. if( qpel_idx & 5 ) /* qpel interpolation needed */
  306. {
  307. uint8_t *src2 = src[x264_hpel_ref1[qpel_idx]] + offset + ((mvx&3) == 3);
  308. switch( i_width )
  309. {
  310. case 4:
  311. pixel_avg2_w4_altivec( dst, *i_dst_stride, src1, i_src_stride, src2, i_height );
  312. break;
  313. case 8:
  314. pixel_avg2_w8_altivec( dst, *i_dst_stride, src1, i_src_stride, src2, i_height );
  315. break;
  316. case 12:
  317. case 16:
  318. default:
  319. pixel_avg2_w16_altivec( dst, *i_dst_stride, src1, i_src_stride, src2, i_height );
  320. break;
  321. case 20:
  322. pixel_avg2_w20_altivec( dst, *i_dst_stride, src1, i_src_stride, src2, i_height );
  323. break;
  324. }
  325. if( weight->weightfn )
  326. weight->weightfn[i_width>>2]( dst, *i_dst_stride, dst, *i_dst_stride, weight, i_height );
  327. return dst;
  328. }
  329. else if( weight->weightfn )
  330. {
  331. weight->weightfn[i_width>>2]( dst, *i_dst_stride, src1, i_src_stride, weight, i_height );
  332. return dst;
  333. }
  334. else
  335. {
  336. *i_dst_stride = i_src_stride;
  337. return src1;
  338. }
  339. }
  340. static void mc_chroma_2xh( uint8_t *dstu, uint8_t *dstv, intptr_t i_dst_stride,
  341. uint8_t *src, intptr_t i_src_stride,
  342. int mvx, int mvy, int i_height )
  343. {
  344. uint8_t *srcp;
  345. int d8x = mvx&0x07;
  346. int d8y = mvy&0x07;
  347. int cA = (8-d8x)*(8-d8y);
  348. int cB = d8x *(8-d8y);
  349. int cC = (8-d8x)*d8y;
  350. int cD = d8x *d8y;
  351. src += (mvy >> 3) * i_src_stride + (mvx >> 3)*2;
  352. srcp = &src[i_src_stride];
  353. for( int y = 0; y < i_height; y++ )
  354. {
  355. dstu[0] = ( cA*src[0] + cB*src[2] + cC*srcp[0] + cD*srcp[2] + 32 ) >> 6;
  356. dstv[0] = ( cA*src[1] + cB*src[3] + cC*srcp[1] + cD*srcp[3] + 32 ) >> 6;
  357. dstu[1] = ( cA*src[2] + cB*src[4] + cC*srcp[2] + cD*srcp[4] + 32 ) >> 6;
  358. dstv[1] = ( cA*src[3] + cB*src[5] + cC*srcp[3] + cD*srcp[5] + 32 ) >> 6;
  359. src += i_src_stride;
  360. srcp += i_src_stride;
  361. dstu += i_dst_stride;
  362. dstv += i_dst_stride;
  363. }
  364. }
  365. #ifdef WORDS_BIGENDIAN
  366. #define VSLD(a,b,n) vec_sld(a,b,n)
  367. #else
  368. #define VSLD(a,b,n) vec_sld(b,a,16-n)
  369. #endif
  370. static void mc_chroma_4xh_altivec( uint8_t *dstu, uint8_t *dstv, intptr_t i_dst_stride,
  371. uint8_t *src, intptr_t i_src_stride,
  372. int mvx, int mvy, int i_height )
  373. {
  374. uint8_t *srcp;
  375. int d8x = mvx & 0x07;
  376. int d8y = mvy & 0x07;
  377. ALIGNED_16( uint16_t coeff[4] );
  378. coeff[0] = (8-d8x)*(8-d8y);
  379. coeff[1] = d8x *(8-d8y);
  380. coeff[2] = (8-d8x)*d8y;
  381. coeff[3] = d8x *d8y;
  382. src += (mvy >> 3) * i_src_stride + (mvx >> 3)*2;
  383. srcp = &src[i_src_stride];
  384. LOAD_ZERO;
  385. vec_u16_t coeff0v, coeff1v, coeff2v, coeff3v;
  386. vec_u8_t src2v_8, dstuv, dstvv;
  387. vec_u16_t src0v_16, src1v_16, src2v_16, src3v_16, dstv16;
  388. vec_u16_t shiftv, k32v;
  389. #ifdef WORDS_BIGENDIAN
  390. static const vec_u8_t perm0v = CV(1,5,9,13,1,5,9,13,1,5,9,13,1,5,9,13);
  391. static const vec_u8_t perm1v = CV(3,7,11,15,3,7,11,15,3,7,11,15,3,7,11,15);
  392. #else
  393. static const vec_u8_t perm0v = CV(0,4,8,12,0,4,8,12,0,4,8,12,0,4,8,12);
  394. static const vec_u8_t perm1v = CV(2,6,10,14,2,6,10,14,2,6,10,14,2,6,10,14);
  395. #endif
  396. coeff0v = vec_ld( 0, coeff );
  397. coeff3v = vec_splat( coeff0v, 3 );
  398. coeff2v = vec_splat( coeff0v, 2 );
  399. coeff1v = vec_splat( coeff0v, 1 );
  400. coeff0v = vec_splat( coeff0v, 0 );
  401. k32v = vec_sl( vec_splat_u16( 1 ), vec_splat_u16( 5 ) );
  402. shiftv = vec_splat_u16( 6 );
  403. src2v_8 = vec_vsx_ld( 0, src );
  404. src2v_16 = vec_u8_to_u16( src2v_8 );
  405. src3v_16 = vec_u8_to_u16( VSLD( src2v_8, src2v_8, 2 ) );
  406. for( int y = 0; y < i_height; y += 2 )
  407. {
  408. src0v_16 = src2v_16;
  409. src1v_16 = src3v_16;
  410. src2v_8 = vec_vsx_ld( 0, srcp );
  411. src2v_16 = vec_u8_to_u16( src2v_8 );
  412. src3v_16 = vec_u8_to_u16( VSLD( src2v_8, src2v_8, 2 ) );
  413. dstv16 = vec_mladd( coeff0v, src0v_16, k32v );
  414. dstv16 = vec_mladd( coeff1v, src1v_16, dstv16 );
  415. dstv16 = vec_mladd( coeff2v, src2v_16, dstv16 );
  416. dstv16 = vec_mladd( coeff3v, src3v_16, dstv16 );
  417. dstv16 = vec_sr( dstv16, shiftv );
  418. dstuv = (vec_u8_t)vec_perm( dstv16, dstv16, perm0v );
  419. dstvv = (vec_u8_t)vec_perm( dstv16, dstv16, perm1v );
  420. vec_ste( (vec_u32_t)dstuv, 0, (uint32_t*) dstu );
  421. vec_ste( (vec_u32_t)dstvv, 0, (uint32_t*) dstv );
  422. srcp += i_src_stride;
  423. dstu += i_dst_stride;
  424. dstv += i_dst_stride;
  425. src0v_16 = src2v_16;
  426. src1v_16 = src3v_16;
  427. src2v_8 = vec_vsx_ld( 0, srcp );
  428. src2v_16 = vec_u8_to_u16( src2v_8 );
  429. src3v_16 = vec_u8_to_u16( VSLD( src2v_8, src2v_8, 2 ) );
  430. dstv16 = vec_mladd( coeff0v, src0v_16, k32v );
  431. dstv16 = vec_mladd( coeff1v, src1v_16, dstv16 );
  432. dstv16 = vec_mladd( coeff2v, src2v_16, dstv16 );
  433. dstv16 = vec_mladd( coeff3v, src3v_16, dstv16 );
  434. dstv16 = vec_sr( dstv16, shiftv );
  435. dstuv = (vec_u8_t)vec_perm( dstv16, dstv16, perm0v );
  436. dstvv = (vec_u8_t)vec_perm( dstv16, dstv16, perm1v );
  437. vec_ste( (vec_u32_t)dstuv, 0, (uint32_t*) dstu );
  438. vec_ste( (vec_u32_t)dstvv, 0, (uint32_t*) dstv );
  439. srcp += i_src_stride;
  440. dstu += i_dst_stride;
  441. dstv += i_dst_stride;
  442. }
  443. }
  444. static void mc_chroma_8xh_altivec( uint8_t *dstu, uint8_t *dstv, intptr_t i_dst_stride,
  445. uint8_t *src, intptr_t i_src_stride,
  446. int mvx, int mvy, int i_height )
  447. {
  448. uint8_t *srcp;
  449. int d8x = mvx & 0x07;
  450. int d8y = mvy & 0x07;
  451. ALIGNED_16( uint16_t coeff[4] );
  452. coeff[0] = (8-d8x)*(8-d8y);
  453. coeff[1] = d8x *(8-d8y);
  454. coeff[2] = (8-d8x)*d8y;
  455. coeff[3] = d8x *d8y;
  456. src += (mvy >> 3) * i_src_stride + (mvx >> 3)*2;
  457. srcp = &src[i_src_stride];
  458. LOAD_ZERO;
  459. PREP_STORE8;
  460. vec_u16_t coeff0v, coeff1v, coeff2v, coeff3v;
  461. vec_u8_t src0v_8, src1v_8, src2v_8, src3v_8;
  462. vec_u8_t dstuv, dstvv;
  463. vec_u16_t src0v_16h, src1v_16h, src2v_16h, src3v_16h, dstv_16h;
  464. vec_u16_t src0v_16l, src1v_16l, src2v_16l, src3v_16l, dstv_16l;
  465. vec_u16_t shiftv, k32v;
  466. coeff0v = vec_ld( 0, coeff );
  467. coeff3v = vec_splat( coeff0v, 3 );
  468. coeff2v = vec_splat( coeff0v, 2 );
  469. coeff1v = vec_splat( coeff0v, 1 );
  470. coeff0v = vec_splat( coeff0v, 0 );
  471. k32v = vec_sl( vec_splat_u16( 1 ), vec_splat_u16( 5 ) );
  472. shiftv = vec_splat_u16( 6 );
  473. #ifdef WORDS_BIGENDIAN
  474. static const vec_u8_t perm0v = CV(1,5,9,13,17,21,25,29,0,0,0,0,0,0,0,0);
  475. static const vec_u8_t perm1v = CV(3,7,11,15,19,23,27,31,0,0,0,0,0,0,0,0);
  476. #else
  477. static const vec_u8_t perm0v = CV(0,4,8,12,16,20,24,28,1,1,1,1,1,1,1,1);
  478. static const vec_u8_t perm1v = CV(2,6,10,14,18,22,26,30,1,1,1,1,1,1,1,1);
  479. #endif
  480. src2v_8 = vec_vsx_ld( 0, src );
  481. src3v_8 = vec_vsx_ld( 16, src );
  482. src3v_8 = VSLD( src2v_8, src3v_8, 2 );
  483. for( int y = 0; y < i_height; y += 2 )
  484. {
  485. src0v_8 = src2v_8;
  486. src1v_8 = src3v_8;
  487. src2v_8 = vec_vsx_ld( 0, srcp );
  488. src3v_8 = vec_vsx_ld( 16, srcp );
  489. src3v_8 = VSLD( src2v_8, src3v_8, 2 );
  490. src0v_16h = vec_u8_to_u16_h( src0v_8 );
  491. src0v_16l = vec_u8_to_u16_l( src0v_8 );
  492. src1v_16h = vec_u8_to_u16_h( src1v_8 );
  493. src1v_16l = vec_u8_to_u16_l( src1v_8 );
  494. src2v_16h = vec_u8_to_u16_h( src2v_8 );
  495. src2v_16l = vec_u8_to_u16_l( src2v_8 );
  496. src3v_16h = vec_u8_to_u16_h( src3v_8 );
  497. src3v_16l = vec_u8_to_u16_l( src3v_8 );
  498. dstv_16h = vec_mladd( coeff0v, src0v_16h, k32v );
  499. dstv_16l = vec_mladd( coeff0v, src0v_16l, k32v );
  500. dstv_16h = vec_mladd( coeff1v, src1v_16h, dstv_16h );
  501. dstv_16l = vec_mladd( coeff1v, src1v_16l, dstv_16l );
  502. dstv_16h = vec_mladd( coeff2v, src2v_16h, dstv_16h );
  503. dstv_16l = vec_mladd( coeff2v, src2v_16l, dstv_16l );
  504. dstv_16h = vec_mladd( coeff3v, src3v_16h, dstv_16h );
  505. dstv_16l = vec_mladd( coeff3v, src3v_16l, dstv_16l );
  506. dstv_16h = vec_sr( dstv_16h, shiftv );
  507. dstv_16l = vec_sr( dstv_16l, shiftv );
  508. dstuv = (vec_u8_t)vec_perm( dstv_16h, dstv_16l, perm0v );
  509. dstvv = (vec_u8_t)vec_perm( dstv_16h, dstv_16l, perm1v );
  510. VEC_STORE8( dstuv, dstu );
  511. VEC_STORE8( dstvv, dstv );
  512. srcp += i_src_stride;
  513. dstu += i_dst_stride;
  514. dstv += i_dst_stride;
  515. src0v_8 = src2v_8;
  516. src1v_8 = src3v_8;
  517. src2v_8 = vec_vsx_ld( 0, srcp );
  518. src3v_8 = vec_vsx_ld( 16, srcp );
  519. src3v_8 = VSLD( src2v_8, src3v_8, 2 );
  520. src0v_16h = vec_u8_to_u16_h( src0v_8 );
  521. src0v_16l = vec_u8_to_u16_l( src0v_8 );
  522. src1v_16h = vec_u8_to_u16_h( src1v_8 );
  523. src1v_16l = vec_u8_to_u16_l( src1v_8 );
  524. src2v_16h = vec_u8_to_u16_h( src2v_8 );
  525. src2v_16l = vec_u8_to_u16_l( src2v_8 );
  526. src3v_16h = vec_u8_to_u16_h( src3v_8 );
  527. src3v_16l = vec_u8_to_u16_l( src3v_8 );
  528. dstv_16h = vec_mladd( coeff0v, src0v_16h, k32v );
  529. dstv_16l = vec_mladd( coeff0v, src0v_16l, k32v );
  530. dstv_16h = vec_mladd( coeff1v, src1v_16h, dstv_16h );
  531. dstv_16l = vec_mladd( coeff1v, src1v_16l, dstv_16l );
  532. dstv_16h = vec_mladd( coeff2v, src2v_16h, dstv_16h );
  533. dstv_16l = vec_mladd( coeff2v, src2v_16l, dstv_16l );
  534. dstv_16h = vec_mladd( coeff3v, src3v_16h, dstv_16h );
  535. dstv_16l = vec_mladd( coeff3v, src3v_16l, dstv_16l );
  536. dstv_16h = vec_sr( dstv_16h, shiftv );
  537. dstv_16l = vec_sr( dstv_16l, shiftv );
  538. dstuv = (vec_u8_t)vec_perm( dstv_16h, dstv_16l, perm0v );
  539. dstvv = (vec_u8_t)vec_perm( dstv_16h, dstv_16l, perm1v );
  540. VEC_STORE8( dstuv, dstu );
  541. VEC_STORE8( dstvv, dstv );
  542. srcp += i_src_stride;
  543. dstu += i_dst_stride;
  544. dstv += i_dst_stride;
  545. }
  546. }
  547. static void mc_chroma_altivec( uint8_t *dstu, uint8_t *dstv, intptr_t i_dst_stride,
  548. uint8_t *src, intptr_t i_src_stride,
  549. int mvx, int mvy, int i_width, int i_height )
  550. {
  551. if( i_width == 8 )
  552. mc_chroma_8xh_altivec( dstu, dstv, i_dst_stride, src, i_src_stride,
  553. mvx, mvy, i_height );
  554. else if( i_width == 4 )
  555. mc_chroma_4xh_altivec( dstu, dstv, i_dst_stride, src, i_src_stride,
  556. mvx, mvy, i_height );
  557. else
  558. mc_chroma_2xh( dstu, dstv, i_dst_stride, src, i_src_stride,
  559. mvx, mvy, i_height );
  560. }
  561. #define HPEL_FILTER_1( t1v, t2v, t3v, t4v, t5v, t6v ) \
  562. { \
  563. t1v = vec_add( t1v, t6v ); \
  564. t2v = vec_add( t2v, t5v ); \
  565. t3v = vec_add( t3v, t4v ); \
  566. \
  567. t1v = vec_sub( t1v, t2v ); /* (a-b) */ \
  568. t2v = vec_sub( t2v, t3v ); /* (b-c) */ \
  569. t2v = vec_sl( t2v, twov ); /* (b-c)*4 */ \
  570. t1v = vec_sub( t1v, t2v ); /* a-5*b+4*c */ \
  571. t3v = vec_sl( t3v, fourv ); /* 16*c */ \
  572. t1v = vec_add( t1v, t3v ); /* a-5*b+20*c */ \
  573. }
  574. #define HPEL_FILTER_2( t1v, t2v, t3v, t4v, t5v, t6v ) \
  575. { \
  576. t1v = vec_add( t1v, t6v ); \
  577. t2v = vec_add( t2v, t5v ); \
  578. t3v = vec_add( t3v, t4v ); \
  579. \
  580. t1v = vec_sub( t1v, t2v ); /* (a-b) */ \
  581. t1v = vec_sra( t1v, twov ); /* (a-b)/4 */ \
  582. t1v = vec_sub( t1v, t2v ); /* (a-b)/4-b */ \
  583. t1v = vec_add( t1v, t3v ); /* (a-b)/4-b+c */ \
  584. t1v = vec_sra( t1v, twov ); /* ((a-b)/4-b+c)/4 */ \
  585. t1v = vec_add( t1v, t3v ); /* ((a-b)/4-b+c)/4+c = (a-5*b+20*c)/16 */ \
  586. }
  587. #define HPEL_FILTER_HORIZONTAL() \
  588. { \
  589. src1v = vec_vsx_ld( x- 2+i_stride*y, src ); \
  590. src6v = vec_vsx_ld( x+14+i_stride*y, src ); \
  591. \
  592. src2v = VSLD( src1v, src6v, 1 ); \
  593. src3v = VSLD( src1v, src6v, 2 ); \
  594. src4v = VSLD( src1v, src6v, 3 ); \
  595. src5v = VSLD( src1v, src6v, 4 ); \
  596. src6v = VSLD( src1v, src6v, 5 ); \
  597. \
  598. temp1v = vec_u8_to_s16_h( src1v ); \
  599. temp2v = vec_u8_to_s16_h( src2v ); \
  600. temp3v = vec_u8_to_s16_h( src3v ); \
  601. temp4v = vec_u8_to_s16_h( src4v ); \
  602. temp5v = vec_u8_to_s16_h( src5v ); \
  603. temp6v = vec_u8_to_s16_h( src6v ); \
  604. \
  605. HPEL_FILTER_1( temp1v, temp2v, temp3v, \
  606. temp4v, temp5v, temp6v ); \
  607. \
  608. dest1v = vec_add( temp1v, sixteenv ); \
  609. dest1v = vec_sra( dest1v, fivev ); \
  610. \
  611. temp1v = vec_u8_to_s16_l( src1v ); \
  612. temp2v = vec_u8_to_s16_l( src2v ); \
  613. temp3v = vec_u8_to_s16_l( src3v ); \
  614. temp4v = vec_u8_to_s16_l( src4v ); \
  615. temp5v = vec_u8_to_s16_l( src5v ); \
  616. temp6v = vec_u8_to_s16_l( src6v ); \
  617. \
  618. HPEL_FILTER_1( temp1v, temp2v, temp3v, \
  619. temp4v, temp5v, temp6v ); \
  620. \
  621. dest2v = vec_add( temp1v, sixteenv ); \
  622. dest2v = vec_sra( dest2v, fivev ); \
  623. \
  624. destv = vec_packsu( dest1v, dest2v ); \
  625. \
  626. vec_vsx_st( destv, x+i_stride*y, dsth ); \
  627. }
  628. #define HPEL_FILTER_VERTICAL() \
  629. { \
  630. src1v = vec_vsx_ld( x+i_stride*(y-2), src ); \
  631. src2v = vec_vsx_ld( x+i_stride*(y-1), src ); \
  632. src3v = vec_vsx_ld( x+i_stride*(y-0), src ); \
  633. src4v = vec_vsx_ld( x+i_stride*(y+1), src ); \
  634. src5v = vec_vsx_ld( x+i_stride*(y+2), src ); \
  635. src6v = vec_vsx_ld( x+i_stride*(y+3), src ); \
  636. \
  637. temp1v = vec_u8_to_s16_h( src1v ); \
  638. temp2v = vec_u8_to_s16_h( src2v ); \
  639. temp3v = vec_u8_to_s16_h( src3v ); \
  640. temp4v = vec_u8_to_s16_h( src4v ); \
  641. temp5v = vec_u8_to_s16_h( src5v ); \
  642. temp6v = vec_u8_to_s16_h( src6v ); \
  643. \
  644. HPEL_FILTER_1( temp1v, temp2v, temp3v, \
  645. temp4v, temp5v, temp6v ); \
  646. \
  647. dest1v = vec_add( temp1v, sixteenv ); \
  648. dest1v = vec_sra( dest1v, fivev ); \
  649. \
  650. temp4v = vec_u8_to_s16_l( src1v ); \
  651. temp5v = vec_u8_to_s16_l( src2v ); \
  652. temp6v = vec_u8_to_s16_l( src3v ); \
  653. temp7v = vec_u8_to_s16_l( src4v ); \
  654. temp8v = vec_u8_to_s16_l( src5v ); \
  655. temp9v = vec_u8_to_s16_l( src6v ); \
  656. \
  657. HPEL_FILTER_1( temp4v, temp5v, temp6v, \
  658. temp7v, temp8v, temp9v ); \
  659. \
  660. dest2v = vec_add( temp4v, sixteenv ); \
  661. dest2v = vec_sra( dest2v, fivev ); \
  662. \
  663. destv = vec_packsu( dest1v, dest2v ); \
  664. \
  665. vec_vsx_st( destv, x+i_stride*y, dstv ); \
  666. }
  667. #define HPEL_FILTER_CENTRAL() \
  668. { \
  669. temp1v = VSLD( tempav, tempbv, 12 ); \
  670. temp2v = VSLD( tempav, tempbv, 14 ); \
  671. temp3v = tempbv; \
  672. temp4v = VSLD( tempbv, tempcv, 2 ); \
  673. temp5v = VSLD( tempbv, tempcv, 4 ); \
  674. temp6v = VSLD( tempbv, tempcv, 6 ); \
  675. \
  676. HPEL_FILTER_2( temp1v, temp2v, temp3v, \
  677. temp4v, temp5v, temp6v ); \
  678. \
  679. dest1v = vec_add( temp1v, thirtytwov ); \
  680. dest1v = vec_sra( dest1v, sixv ); \
  681. \
  682. temp1v = VSLD( tempbv, tempcv, 12 ); \
  683. temp2v = VSLD( tempbv, tempcv, 14 ); \
  684. temp3v = tempcv; \
  685. temp4v = VSLD( tempcv, tempdv, 2 ); \
  686. temp5v = VSLD( tempcv, tempdv, 4 ); \
  687. temp6v = VSLD( tempcv, tempdv, 6 ); \
  688. \
  689. HPEL_FILTER_2( temp1v, temp2v, temp3v, \
  690. temp4v, temp5v, temp6v ); \
  691. \
  692. dest2v = vec_add( temp1v, thirtytwov ); \
  693. dest2v = vec_sra( dest2v, sixv ); \
  694. \
  695. destv = vec_packsu( dest1v, dest2v ); \
  696. \
  697. vec_vsx_st( destv, x-16+i_stride*y, dstc ); \
  698. }
  699. void x264_hpel_filter_altivec( uint8_t *dsth, uint8_t *dstv, uint8_t *dstc, uint8_t *src,
  700. intptr_t i_stride, int i_width, int i_height, int16_t *buf )
  701. {
  702. vec_u8_t destv;
  703. vec_u8_t src1v, src2v, src3v, src4v, src5v, src6v;
  704. vec_s16_t dest1v, dest2v;
  705. vec_s16_t temp1v, temp2v, temp3v, temp4v, temp5v, temp6v, temp7v, temp8v, temp9v;
  706. vec_s16_t tempav, tempbv, tempcv, tempdv, tempev;
  707. LOAD_ZERO;
  708. vec_u16_t twov, fourv, fivev, sixv;
  709. vec_s16_t sixteenv, thirtytwov;
  710. vec_u16_u temp_u;
  711. temp_u.s[0]=2;
  712. twov = vec_splat( temp_u.v, 0 );
  713. temp_u.s[0]=4;
  714. fourv = vec_splat( temp_u.v, 0 );
  715. temp_u.s[0]=5;
  716. fivev = vec_splat( temp_u.v, 0 );
  717. temp_u.s[0]=6;
  718. sixv = vec_splat( temp_u.v, 0 );
  719. temp_u.s[0]=16;
  720. sixteenv = (vec_s16_t)vec_splat( temp_u.v, 0 );
  721. temp_u.s[0]=32;
  722. thirtytwov = (vec_s16_t)vec_splat( temp_u.v, 0 );
  723. for( int y = 0; y < i_height; y++ )
  724. {
  725. int x = 0;
  726. /* horizontal_filter */
  727. HPEL_FILTER_HORIZONTAL();
  728. /* vertical_filter */
  729. HPEL_FILTER_VERTICAL();
  730. /* central_filter */
  731. tempav = tempcv;
  732. tempbv = tempdv;
  733. tempcv = vec_splat( temp1v, 0 ); /* first only */
  734. tempdv = temp1v;
  735. tempev = temp4v;
  736. for( x = 16; x < i_width; x+=16 )
  737. {
  738. /* horizontal_filter */
  739. HPEL_FILTER_HORIZONTAL();
  740. /* vertical_filter */
  741. HPEL_FILTER_VERTICAL();
  742. /* central_filter */
  743. tempav = tempcv;
  744. tempbv = tempdv;
  745. tempcv = tempev;
  746. tempdv = temp1v;
  747. tempev = temp4v;
  748. HPEL_FILTER_CENTRAL();
  749. }
  750. /* Partial vertical filter */
  751. src1v = vec_vsx_ld( x+i_stride*(y-2), src );
  752. src2v = vec_vsx_ld( x+i_stride*(y-1), src );
  753. src3v = vec_vsx_ld( x+i_stride*(y-0), src );
  754. src4v = vec_vsx_ld( x+i_stride*(y+1), src );
  755. src5v = vec_vsx_ld( x+i_stride*(y+2), src );
  756. src6v = vec_vsx_ld( x+i_stride*(y+3), src );
  757. temp1v = vec_u8_to_s16_h( src1v );
  758. temp2v = vec_u8_to_s16_h( src2v );
  759. temp3v = vec_u8_to_s16_h( src3v );
  760. temp4v = vec_u8_to_s16_h( src4v );
  761. temp5v = vec_u8_to_s16_h( src5v );
  762. temp6v = vec_u8_to_s16_h( src6v );
  763. HPEL_FILTER_1( temp1v, temp2v, temp3v, temp4v, temp5v, temp6v );
  764. /* central_filter */
  765. tempav = tempcv;
  766. tempbv = tempdv;
  767. tempcv = tempev;
  768. tempdv = temp1v;
  769. /* tempev is not used */
  770. HPEL_FILTER_CENTRAL();
  771. }
  772. }
  773. static void frame_init_lowres_core_altivec( uint8_t *src0, uint8_t *dst0, uint8_t *dsth, uint8_t *dstv, uint8_t *dstc,
  774. intptr_t src_stride, intptr_t dst_stride, int width, int height )
  775. {
  776. int w = width >> 4;
  777. int end = (width & 15);
  778. vec_u8_t src0v, src1v, src2v;
  779. vec_u8_t lv, hv, src1p1v;
  780. vec_u8_t avg0v, avg1v, avghv, avghp1v, avgleftv, avgrightv;
  781. static const vec_u8_t inverse_bridge_shuffle = CV(0x00, 0x02, 0x04, 0x06, 0x08, 0x0A, 0x0C, 0x0E, 0x10, 0x12, 0x14, 0x16, 0x18, 0x1A, 0x1C, 0x1E );
  782. #ifndef WORDS_BIGENDIAN
  783. static const vec_u8_t inverse_bridge_shuffle_1 = CV(0x01, 0x03, 0x05, 0x07, 0x09, 0x0B, 0x0D, 0x0F, 0x11, 0x13, 0x15, 0x17, 0x19, 0x1B, 0x1D, 0x1F );
  784. #endif
  785. for( int y = 0; y < height; y++ )
  786. {
  787. int x;
  788. uint8_t *src1 = src0+src_stride;
  789. uint8_t *src2 = src1+src_stride;
  790. src0v = vec_ld(0, src0);
  791. src1v = vec_ld(0, src1);
  792. src2v = vec_ld(0, src2);
  793. avg0v = vec_avg(src0v, src1v);
  794. avg1v = vec_avg(src1v, src2v);
  795. for( x = 0; x < w; x++ )
  796. {
  797. lv = vec_ld(16*(x*2+1), src0);
  798. src1v = vec_ld(16*(x*2+1), src1);
  799. avghv = vec_avg(lv, src1v);
  800. lv = vec_ld(16*(x*2+2), src0);
  801. src1p1v = vec_ld(16*(x*2+2), src1);
  802. avghp1v = vec_avg(lv, src1p1v);
  803. avgleftv = vec_avg(VSLD(avg0v, avghv, 1), avg0v);
  804. avgrightv = vec_avg(VSLD(avghv, avghp1v, 1), avghv);
  805. vec_st(vec_perm(avgleftv, avgrightv, inverse_bridge_shuffle), 16*x, dst0);
  806. #ifdef WORDS_BIGENDIAN
  807. vec_st((vec_u8_t)vec_pack((vec_u16_t)avgleftv,(vec_u16_t)avgrightv), 16*x, dsth);
  808. #else
  809. vec_st(vec_perm(avgleftv, avgrightv, inverse_bridge_shuffle_1), 16*x, dsth);
  810. #endif
  811. avg0v = avghp1v;
  812. hv = vec_ld(16*(x*2+1), src2);
  813. avghv = vec_avg(src1v, hv);
  814. hv = vec_ld(16*(x*2+2), src2);
  815. avghp1v = vec_avg(src1p1v, hv);
  816. avgleftv = vec_avg(VSLD(avg1v, avghv, 1), avg1v);
  817. avgrightv = vec_avg(VSLD(avghv, avghp1v, 1), avghv);
  818. vec_st(vec_perm(avgleftv, avgrightv, inverse_bridge_shuffle), 16*x, dstv);
  819. #ifdef WORDS_BIGENDIAN
  820. vec_st((vec_u8_t)vec_pack((vec_u16_t)avgleftv,(vec_u16_t)avgrightv), 16*x, dstc);
  821. #else
  822. vec_st(vec_perm(avgleftv, avgrightv, inverse_bridge_shuffle_1), 16*x, dstc);
  823. #endif
  824. avg1v = avghp1v;
  825. }
  826. if( end )
  827. {
  828. lv = vec_ld(16*(x*2+1), src0);
  829. src1v = vec_ld(16*(x*2+1), src1);
  830. avghv = vec_avg(lv, src1v);
  831. lv = vec_ld(16*(x*2+1), src2);
  832. avghp1v = vec_avg(src1v, lv);
  833. avgleftv = vec_avg(VSLD(avg0v, avghv, 1), avg0v);
  834. avgrightv = vec_avg(VSLD(avg1v, avghp1v, 1), avg1v);
  835. lv = vec_perm(avgleftv, avgrightv, inverse_bridge_shuffle);
  836. #ifdef WORDS_BIGENDIAN
  837. hv = (vec_u8_t)vec_pack((vec_u16_t)avgleftv,(vec_u16_t)avgrightv);
  838. #else
  839. hv = vec_perm(avgleftv, avgrightv, inverse_bridge_shuffle_1);
  840. #endif
  841. vec_ste((vec_u32_t)lv,16*x,(uint32_t*)dst0);
  842. vec_ste((vec_u32_t)lv,16*x+4,(uint32_t*)dst0);
  843. vec_ste((vec_u32_t)hv,16*x,(uint32_t*)dsth);
  844. vec_ste((vec_u32_t)hv,16*x+4,(uint32_t*)dsth);
  845. lv = vec_sld(lv, lv, 8);
  846. hv = vec_sld(hv, hv, 8);
  847. vec_ste((vec_u32_t)lv,16*x,(uint32_t*)dstv);
  848. vec_ste((vec_u32_t)lv,16*x+4,(uint32_t*)dstv);
  849. vec_ste((vec_u32_t)hv,16*x,(uint32_t*)dstc);
  850. vec_ste((vec_u32_t)hv,16*x+4,(uint32_t*)dstc);
  851. }
  852. src0 += src_stride*2;
  853. dst0 += dst_stride;
  854. dsth += dst_stride;
  855. dstv += dst_stride;
  856. dstc += dst_stride;
  857. }
  858. }
  859. static void mc_weight_w2_altivec( uint8_t *dst, intptr_t i_dst, uint8_t *src, intptr_t i_src,
  860. const x264_weight_t *weight, int i_height )
  861. {
  862. LOAD_ZERO;
  863. vec_u8_t srcv;
  864. vec_s16_t weightv;
  865. vec_s16_t scalev, offsetv, denomv, roundv;
  866. vec_s16_u loadv;
  867. int denom = weight->i_denom;
  868. loadv.s[0] = weight->i_scale;
  869. scalev = vec_splat( loadv.v, 0 );
  870. loadv.s[0] = weight->i_offset;
  871. offsetv = vec_splat( loadv.v, 0 );
  872. if( denom >= 1 )
  873. {
  874. loadv.s[0] = denom;
  875. denomv = vec_splat( loadv.v, 0 );
  876. loadv.s[0] = 1<<(denom - 1);
  877. roundv = vec_splat( loadv.v, 0 );
  878. for( int y = 0; y < i_height; y++, dst += i_dst, src += i_src )
  879. {
  880. srcv = vec_vsx_ld( 0, src );
  881. weightv = vec_u8_to_s16( srcv );
  882. weightv = vec_mladd( weightv, scalev, roundv );
  883. weightv = vec_sra( weightv, (vec_u16_t)denomv );
  884. weightv = vec_add( weightv, offsetv );
  885. srcv = vec_packsu( weightv, zero_s16v );
  886. vec_ste( vec_splat( (vec_u16_t)srcv, 0 ), 0, (uint16_t*)dst );
  887. }
  888. }
  889. else
  890. {
  891. for( int y = 0; y < i_height; y++, dst += i_dst, src += i_src )
  892. {
  893. srcv = vec_vsx_ld( 0, src );
  894. weightv = vec_u8_to_s16( srcv );
  895. weightv = vec_mladd( weightv, scalev, offsetv );
  896. srcv = vec_packsu( weightv, zero_s16v );
  897. vec_ste( vec_splat( (vec_u16_t)srcv, 0 ), 0, (uint16_t*)dst );
  898. }
  899. }
  900. }
  901. static void mc_weight_w4_altivec( uint8_t *dst, intptr_t i_dst, uint8_t *src, intptr_t i_src,
  902. const x264_weight_t *weight, int i_height )
  903. {
  904. LOAD_ZERO;
  905. vec_u8_t srcv;
  906. vec_s16_t weightv;
  907. vec_s16_t scalev, offsetv, denomv, roundv;
  908. vec_s16_u loadv;
  909. int denom = weight->i_denom;
  910. loadv.s[0] = weight->i_scale;
  911. scalev = vec_splat( loadv.v, 0 );
  912. loadv.s[0] = weight->i_offset;
  913. offsetv = vec_splat( loadv.v, 0 );
  914. if( denom >= 1 )
  915. {
  916. loadv.s[0] = denom;
  917. denomv = vec_splat( loadv.v, 0 );
  918. loadv.s[0] = 1<<(denom - 1);
  919. roundv = vec_splat( loadv.v, 0 );
  920. for( int y = 0; y < i_height; y++, dst += i_dst, src += i_src )
  921. {
  922. srcv = vec_vsx_ld( 0, src );
  923. weightv = vec_u8_to_s16( srcv );
  924. weightv = vec_mladd( weightv, scalev, roundv );
  925. weightv = vec_sra( weightv, (vec_u16_t)denomv );
  926. weightv = vec_add( weightv, offsetv );
  927. srcv = vec_packsu( weightv, zero_s16v );
  928. vec_ste( vec_splat( (vec_u32_t)srcv, 0 ), 0, (uint32_t*)dst );
  929. }
  930. }
  931. else
  932. {
  933. for( int y = 0; y < i_height; y++, dst += i_dst, src += i_src )
  934. {
  935. srcv = vec_vsx_ld( 0, src );
  936. weightv = vec_u8_to_s16( srcv );
  937. weightv = vec_mladd( weightv, scalev, offsetv );
  938. srcv = vec_packsu( weightv, zero_s16v );
  939. vec_ste( vec_splat( (vec_u32_t)srcv, 0 ), 0, (uint32_t*)dst );
  940. }
  941. }
  942. }
  943. static void mc_weight_w8_altivec( uint8_t *dst, intptr_t i_dst, uint8_t *src, intptr_t i_src,
  944. const x264_weight_t *weight, int i_height )
  945. {
  946. LOAD_ZERO;
  947. PREP_STORE8;
  948. vec_u8_t srcv;
  949. vec_s16_t weightv;
  950. vec_s16_t scalev, offsetv, denomv, roundv;
  951. vec_s16_u loadv;
  952. int denom = weight->i_denom;
  953. loadv.s[0] = weight->i_scale;
  954. scalev = vec_splat( loadv.v, 0 );
  955. loadv.s[0] = weight->i_offset;
  956. offsetv = vec_splat( loadv.v, 0 );
  957. if( denom >= 1 )
  958. {
  959. loadv.s[0] = denom;
  960. denomv = vec_splat( loadv.v, 0 );
  961. loadv.s[0] = 1<<(denom - 1);
  962. roundv = vec_splat( loadv.v, 0 );
  963. for( int y = 0; y < i_height; y++, dst += i_dst, src += i_src )
  964. {
  965. srcv = vec_vsx_ld( 0, src );
  966. weightv = vec_u8_to_s16( srcv );
  967. weightv = vec_mladd( weightv, scalev, roundv );
  968. weightv = vec_sra( weightv, (vec_u16_t)denomv );
  969. weightv = vec_add( weightv, offsetv );
  970. srcv = vec_packsu( weightv, zero_s16v );
  971. VEC_STORE8( srcv, dst );
  972. }
  973. }
  974. else
  975. {
  976. for( int y = 0; y < i_height; y++, dst += i_dst, src += i_src )
  977. {
  978. srcv = vec_vsx_ld( 0, src );
  979. weightv = vec_u8_to_s16( srcv );
  980. weightv = vec_mladd( weightv, scalev, offsetv );
  981. srcv = vec_packsu( weightv, zero_s16v );
  982. VEC_STORE8( srcv, dst );
  983. }
  984. }
  985. }
  986. static void mc_weight_w16_altivec( uint8_t *dst, intptr_t i_dst, uint8_t *src, intptr_t i_src,
  987. const x264_weight_t *weight, int i_height )
  988. {
  989. LOAD_ZERO;
  990. vec_u8_t srcv;
  991. vec_s16_t weight_lv, weight_hv;
  992. vec_s16_t scalev, offsetv, denomv, roundv;
  993. vec_s16_u loadv;
  994. int denom = weight->i_denom;
  995. loadv.s[0] = weight->i_scale;
  996. scalev = vec_splat( loadv.v, 0 );
  997. loadv.s[0] = weight->i_offset;
  998. offsetv = vec_splat( loadv.v, 0 );
  999. if( denom >= 1 )
  1000. {
  1001. loadv.s[0] = denom;
  1002. denomv = vec_splat( loadv.v, 0 );
  1003. loadv.s[0] = 1<<(denom - 1);
  1004. roundv = vec_splat( loadv.v, 0 );
  1005. for( int y = 0; y < i_height; y++, dst += i_dst, src += i_src )
  1006. {
  1007. srcv = vec_vsx_ld( 0, src );
  1008. weight_hv = vec_u8_to_s16_h( srcv );
  1009. weight_lv = vec_u8_to_s16_l( srcv );
  1010. weight_hv = vec_mladd( weight_hv, scalev, roundv );
  1011. weight_lv = vec_mladd( weight_lv, scalev, roundv );
  1012. weight_hv = vec_sra( weight_hv, (vec_u16_t)denomv );
  1013. weight_lv = vec_sra( weight_lv, (vec_u16_t)denomv );
  1014. weight_hv = vec_add( weight_hv, offsetv );
  1015. weight_lv = vec_add( weight_lv, offsetv );
  1016. srcv = vec_packsu( weight_hv, weight_lv );
  1017. vec_st( srcv, 0, dst );
  1018. }
  1019. }
  1020. else
  1021. {
  1022. for( int y = 0; y < i_height; y++, dst += i_dst, src += i_src )
  1023. {
  1024. srcv = vec_vsx_ld( 0, src );
  1025. weight_hv = vec_u8_to_s16_h( srcv );
  1026. weight_lv = vec_u8_to_s16_l( srcv );
  1027. weight_hv = vec_mladd( weight_hv, scalev, offsetv );
  1028. weight_lv = vec_mladd( weight_lv, scalev, offsetv );
  1029. srcv = vec_packsu( weight_hv, weight_lv );
  1030. vec_st( srcv, 0, dst );
  1031. }
  1032. }
  1033. }
  1034. static void mc_weight_w20_altivec( uint8_t *dst, intptr_t i_dst, uint8_t *src, intptr_t i_src,
  1035. const x264_weight_t *weight, int i_height )
  1036. {
  1037. LOAD_ZERO;
  1038. vec_u8_t srcv, srcv2;
  1039. vec_s16_t weight_lv, weight_hv, weight_3v;
  1040. vec_s16_t scalev, offsetv, denomv, roundv;
  1041. vec_s16_u loadv;
  1042. int denom = weight->i_denom;
  1043. loadv.s[0] = weight->i_scale;
  1044. scalev = vec_splat( loadv.v, 0 );
  1045. loadv.s[0] = weight->i_offset;
  1046. offsetv = vec_splat( loadv.v, 0 );
  1047. if( denom >= 1 )
  1048. {
  1049. int16_t round = 1 << (denom - 1);
  1050. vec_s16_t tab[4] = {
  1051. { weight->i_scale, weight->i_scale, weight->i_scale, weight->i_scale, 1, 1, 1, 1 },
  1052. { weight->i_offset, weight->i_offset, weight->i_offset, weight->i_offset, 0, 0, 0, 0 },
  1053. { denom, denom, denom, denom, 0, 0, 0, 0 },
  1054. { round, round, round, round, 0, 0, 0, 0 },
  1055. };
  1056. loadv.s[0] = denom;
  1057. denomv = vec_splat( loadv.v, 0 );
  1058. loadv.s[0] = round;
  1059. roundv = vec_splat( loadv.v, 0 );
  1060. for( int y = 0; y < i_height; y++, dst += i_dst, src += i_src )
  1061. {
  1062. srcv = vec_vsx_ld( 0, src );
  1063. srcv2 = vec_vsx_ld( 16, src );
  1064. weight_hv = vec_u8_to_s16_h( srcv );
  1065. weight_lv = vec_u8_to_s16_l( srcv );
  1066. weight_3v = vec_u8_to_s16_h( srcv2 );
  1067. weight_hv = vec_mladd( weight_hv, scalev, roundv );
  1068. weight_lv = vec_mladd( weight_lv, scalev, roundv );
  1069. weight_3v = vec_mladd( weight_3v, tab[0], tab[3] );
  1070. weight_hv = vec_sra( weight_hv, (vec_u16_t)denomv );
  1071. weight_lv = vec_sra( weight_lv, (vec_u16_t)denomv );
  1072. weight_3v = vec_sra( weight_3v, (vec_u16_t)tab[2] );
  1073. weight_hv = vec_add( weight_hv, offsetv );
  1074. weight_lv = vec_add( weight_lv, offsetv );
  1075. weight_3v = vec_add( weight_3v, tab[1] );
  1076. srcv = vec_packsu( weight_hv, weight_lv );
  1077. srcv2 = vec_packsu( weight_3v, vec_u8_to_s16_l( srcv2 ) );
  1078. vec_vsx_st( srcv, 0, dst );
  1079. vec_vsx_st( srcv2, 16, dst );
  1080. }
  1081. }
  1082. else
  1083. {
  1084. vec_s16_t offset_mask = { weight->i_offset, weight->i_offset, weight->i_offset,
  1085. weight->i_offset, 0, 0, 0, 0 };
  1086. for( int y = 0; y < i_height; y++, dst += i_dst, src += i_src )
  1087. {
  1088. srcv = vec_vsx_ld( 0, src );
  1089. srcv2 = vec_vsx_ld( 16, src );
  1090. weight_hv = vec_u8_to_s16_h( srcv );
  1091. weight_lv = vec_u8_to_s16_l( srcv );
  1092. weight_3v = vec_u8_to_s16_h( srcv2 );
  1093. weight_hv = vec_mladd( weight_hv, scalev, offsetv );
  1094. weight_lv = vec_mladd( weight_lv, scalev, offsetv );
  1095. weight_3v = vec_mladd( weight_3v, scalev, offset_mask );
  1096. srcv = vec_packsu( weight_hv, weight_lv );
  1097. srcv2 = vec_packsu( weight_3v, vec_u8_to_s16_l( srcv2 ) );
  1098. vec_vsx_st( srcv, 0, dst );
  1099. vec_vsx_st( srcv2, 16, dst );
  1100. }
  1101. }
  1102. }
  1103. static weight_fn_t mc_weight_wtab_altivec[6] =
  1104. {
  1105. mc_weight_w2_altivec,
  1106. mc_weight_w4_altivec,
  1107. mc_weight_w8_altivec,
  1108. mc_weight_w16_altivec,
  1109. mc_weight_w16_altivec,
  1110. mc_weight_w20_altivec,
  1111. };
  1112. PLANE_COPY_SWAP(16, altivec)
  1113. PLANE_INTERLEAVE(altivec)
  1114. #endif // !HIGH_BIT_DEPTH
  1115. #if HIGH_BIT_DEPTH
  1116. #define LOAD_SRC( l ) \
  1117. { \
  1118. srcv[l] = vec_vsx_ld( s, src ); \
  1119. s += 16; \
  1120. srcv[l + 1] = vec_vsx_ld( s, src ); \
  1121. s += 16; \
  1122. }
  1123. #define STORE_8( mask, shift, dst, a, b ) \
  1124. { \
  1125. dstv = (vec_u16_t)vec_perm( srcv[a], srcv[b], mask ); \
  1126. dstv = vec_sr( dstv, shift ); \
  1127. dstv = vec_and( dstv, and_mask ); \
  1128. \
  1129. vec_st( dstv, offset, dst ); \
  1130. }
  1131. // v210 input is only compatible with bit-depth of 10 bits
  1132. void x264_plane_copy_deinterleave_v210_altivec( uint16_t *dsty, intptr_t i_dsty,
  1133. uint16_t *dstc, intptr_t i_dstc,
  1134. uint32_t *src, intptr_t i_src, int w, int h )
  1135. {
  1136. #ifdef WORDS_BIGENDIAN
  1137. const vec_u8_t masky[3] = {
  1138. { 0x02, 0x01, 0x05, 0x04, 0x07, 0x06, 0x0A, 0x09, 0x0D, 0x0C, 0x0F, 0x0E, 0x12, 0x11, 0x15, 0x14 },
  1139. { 0x07, 0x06, 0x0A, 0x09, 0x0D, 0x0C, 0x0F, 0x0E, 0x12, 0x11, 0x15, 0x14, 0x17, 0x16, 0x1A, 0x19 },
  1140. { 0x0D, 0x0C, 0x0F, 0x0E, 0x12, 0x11, 0x15, 0x14, 0x17, 0x16, 0x1A, 0x19, 0x1D, 0x1C, 0x1F, 0x1E }
  1141. };
  1142. const vec_u8_t maskc[3] = {
  1143. { 0x01, 0x00, 0x03, 0x02, 0x06, 0x05, 0x09, 0x08, 0x0B, 0x0A, 0x0E, 0x0D, 0x11, 0x10, 0x13, 0x12 },
  1144. { 0x06, 0x05, 0x09, 0x08, 0x0B, 0x0A, 0x0E, 0x0D, 0x11, 0x10, 0x13, 0x12, 0x16, 0x15, 0x19, 0x18 },
  1145. { 0x0B, 0x0A, 0x0E, 0x0D, 0x11, 0x10, 0x13, 0x12, 0x16, 0x15, 0x19, 0x18, 0x1B, 0x1A, 0x1E, 0x1D }
  1146. };
  1147. #else
  1148. const vec_u8_t masky[3] = {
  1149. { 0x01, 0x02, 0x04, 0x05, 0x06, 0x07, 0x09, 0x0A, 0x0C, 0x0D, 0x0E, 0x0F, 0x11, 0x12, 0x14, 0x15 },
  1150. { 0x06, 0x07, 0x09, 0x0A, 0x0C, 0x0D, 0x0E, 0x0F, 0x11, 0x12, 0x14, 0x15, 0x16, 0x17, 0x19, 0x1A },
  1151. { 0x0C, 0x0D, 0x0E, 0x0F, 0x11, 0x12, 0x14, 0x15, 0x16, 0x17, 0x19, 0x1A, 0x1C, 0x1D, 0x1E, 0x1F }
  1152. };
  1153. const vec_u8_t maskc[3] = {
  1154. { 0x00, 0x01, 0x02, 0x03, 0x05, 0x06, 0x08, 0x09, 0x0A, 0x0B, 0x0D, 0x0E, 0x10, 0x11, 0x12, 0x13 },
  1155. { 0x05, 0x06, 0x08, 0x09, 0x0A, 0x0B, 0x0D, 0x0E, 0x10, 0x11, 0x12, 0x13, 0x15, 0x16, 0x18, 0x19 },
  1156. { 0x0A, 0x0B, 0x0D, 0x0E, 0x10, 0x11, 0x12, 0x13, 0x15, 0x16, 0x18, 0x19, 0x1A, 0x1B, 0x1D, 0x1E }
  1157. };
  1158. #endif
  1159. const vec_u16_t shift[3] = {
  1160. { 0, 4, 2, 0, 4, 2, 0, 4 },
  1161. { 2, 0, 4, 2, 0, 4, 2, 0 },
  1162. { 4, 2, 0, 4, 2, 0, 4, 2 }
  1163. };
  1164. vec_u16_t dstv;
  1165. vec_u16_t and_mask = vec_sub( vec_sl( vec_splat_u16( 1 ), vec_splat_u16( 10 ) ), vec_splat_u16( 1 ) );
  1166. vec_u32_t srcv[4];
  1167. for( int i = 0; i < h; i++ )
  1168. {
  1169. int offset = 0;
  1170. int s = 0;
  1171. for( int j = 0; j < w; j += 24 )
  1172. {
  1173. LOAD_SRC( 0 );
  1174. STORE_8( maskc[0], shift[0], dstc, 0, 1 );
  1175. STORE_8( masky[0], shift[1], dsty, 0, 1 );
  1176. offset += 16;
  1177. LOAD_SRC( 2 );
  1178. STORE_8( maskc[1], shift[1], dstc, 1, 2 );
  1179. STORE_8( masky[1], shift[2], dsty, 1, 2 );
  1180. offset += 16;
  1181. STORE_8( maskc[2], shift[2], dstc, 2, 3 );
  1182. STORE_8( masky[2], shift[0], dsty, 2, 3 );
  1183. offset += 16;
  1184. }
  1185. dsty += i_dsty;
  1186. dstc += i_dstc;
  1187. src += i_src;
  1188. }
  1189. }
  1190. #endif // HIGH_BIT_DEPTH
  1191. void x264_mc_init_altivec( x264_mc_functions_t *pf )
  1192. {
  1193. #if HIGH_BIT_DEPTH
  1194. pf->plane_copy_deinterleave_v210 = x264_plane_copy_deinterleave_v210_altivec;
  1195. #else // !HIGH_BIT_DEPTH
  1196. pf->mc_luma = mc_luma_altivec;
  1197. pf->get_ref = get_ref_altivec;
  1198. pf->mc_chroma = mc_chroma_altivec;
  1199. pf->copy_16x16_unaligned = mc_copy_w16_altivec;
  1200. pf->copy[PIXEL_16x16] = mc_copy_w16_aligned_altivec;
  1201. pf->hpel_filter = x264_hpel_filter_altivec;
  1202. pf->frame_init_lowres_core = frame_init_lowres_core_altivec;
  1203. pf->weight = mc_weight_wtab_altivec;
  1204. pf->plane_copy_swap = plane_copy_swap_altivec;
  1205. pf->plane_copy_interleave = plane_copy_interleave_altivec;
  1206. pf->store_interleave_chroma = x264_store_interleave_chroma_altivec;
  1207. pf->plane_copy_deinterleave = x264_plane_copy_deinterleave_altivec;
  1208. pf->load_deinterleave_chroma_fenc = load_deinterleave_chroma_fenc_altivec;
  1209. #if HAVE_VSX
  1210. pf->plane_copy_deinterleave_rgb = x264_plane_copy_deinterleave_rgb_altivec;
  1211. #endif // HAVE_VSX
  1212. #endif // !HIGH_BIT_DEPTH
  1213. }