predict.c 31 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054
  1. /*****************************************************************************
  2. * predict.c: intra prediction
  3. *****************************************************************************
  4. * Copyright (C) 2003-2018 x264 project
  5. *
  6. * Authors: Laurent Aimar <fenrir@via.ecp.fr>
  7. * Loren Merritt <lorenm@u.washington.edu>
  8. * Fiona Glaser <fiona@x264.com>
  9. * Henrik Gramner <henrik@gramner.com>
  10. *
  11. * This program is free software; you can redistribute it and/or modify
  12. * it under the terms of the GNU General Public License as published by
  13. * the Free Software Foundation; either version 2 of the License, or
  14. * (at your option) any later version.
  15. *
  16. * This program is distributed in the hope that it will be useful,
  17. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  18. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  19. * GNU General Public License for more details.
  20. *
  21. * You should have received a copy of the GNU General Public License
  22. * along with this program; if not, write to the Free Software
  23. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111, USA.
  24. *
  25. * This program is also available under a commercial proprietary license.
  26. * For more information, contact us at licensing@x264.com.
  27. *****************************************************************************/
  28. /* predict4x4 are inspired from ffmpeg h264 decoder */
  29. #include "common.h"
  30. #if HAVE_MMX
  31. # include "x86/predict.h"
  32. #endif
  33. #if ARCH_PPC
  34. # include "ppc/predict.h"
  35. #endif
  36. #if ARCH_ARM
  37. # include "arm/predict.h"
  38. #endif
  39. #if ARCH_AARCH64
  40. # include "aarch64/predict.h"
  41. #endif
  42. #if ARCH_MIPS
  43. # include "mips/predict.h"
  44. #endif
  45. /****************************************************************************
  46. * 16x16 prediction for intra luma block
  47. ****************************************************************************/
  48. #define PREDICT_16x16_DC(v)\
  49. for( int i = 0; i < 16; i++ )\
  50. {\
  51. MPIXEL_X4( src+ 0 ) = v;\
  52. MPIXEL_X4( src+ 4 ) = v;\
  53. MPIXEL_X4( src+ 8 ) = v;\
  54. MPIXEL_X4( src+12 ) = v;\
  55. src += FDEC_STRIDE;\
  56. }
  57. void x264_predict_16x16_dc_c( pixel *src )
  58. {
  59. int dc = 0;
  60. for( int i = 0; i < 16; i++ )
  61. {
  62. dc += src[-1 + i * FDEC_STRIDE];
  63. dc += src[i - FDEC_STRIDE];
  64. }
  65. pixel4 dcsplat = PIXEL_SPLAT_X4( ( dc + 16 ) >> 5 );
  66. PREDICT_16x16_DC( dcsplat );
  67. }
  68. static void predict_16x16_dc_left_c( pixel *src )
  69. {
  70. int dc = 0;
  71. for( int i = 0; i < 16; i++ )
  72. dc += src[-1 + i * FDEC_STRIDE];
  73. pixel4 dcsplat = PIXEL_SPLAT_X4( ( dc + 8 ) >> 4 );
  74. PREDICT_16x16_DC( dcsplat );
  75. }
  76. static void predict_16x16_dc_top_c( pixel *src )
  77. {
  78. int dc = 0;
  79. for( int i = 0; i < 16; i++ )
  80. dc += src[i - FDEC_STRIDE];
  81. pixel4 dcsplat = PIXEL_SPLAT_X4( ( dc + 8 ) >> 4 );
  82. PREDICT_16x16_DC( dcsplat );
  83. }
  84. static void predict_16x16_dc_128_c( pixel *src )
  85. {
  86. PREDICT_16x16_DC( PIXEL_SPLAT_X4( 1 << (BIT_DEPTH-1) ) );
  87. }
  88. void x264_predict_16x16_h_c( pixel *src )
  89. {
  90. for( int i = 0; i < 16; i++ )
  91. {
  92. const pixel4 v = PIXEL_SPLAT_X4( src[-1] );
  93. MPIXEL_X4( src+ 0 ) = v;
  94. MPIXEL_X4( src+ 4 ) = v;
  95. MPIXEL_X4( src+ 8 ) = v;
  96. MPIXEL_X4( src+12 ) = v;
  97. src += FDEC_STRIDE;
  98. }
  99. }
  100. void x264_predict_16x16_v_c( pixel *src )
  101. {
  102. pixel4 v0 = MPIXEL_X4( &src[ 0-FDEC_STRIDE] );
  103. pixel4 v1 = MPIXEL_X4( &src[ 4-FDEC_STRIDE] );
  104. pixel4 v2 = MPIXEL_X4( &src[ 8-FDEC_STRIDE] );
  105. pixel4 v3 = MPIXEL_X4( &src[12-FDEC_STRIDE] );
  106. for( int i = 0; i < 16; i++ )
  107. {
  108. MPIXEL_X4( src+ 0 ) = v0;
  109. MPIXEL_X4( src+ 4 ) = v1;
  110. MPIXEL_X4( src+ 8 ) = v2;
  111. MPIXEL_X4( src+12 ) = v3;
  112. src += FDEC_STRIDE;
  113. }
  114. }
  115. void x264_predict_16x16_p_c( pixel *src )
  116. {
  117. int H = 0, V = 0;
  118. /* calculate H and V */
  119. for( int i = 0; i <= 7; i++ )
  120. {
  121. H += ( i + 1 ) * ( src[ 8 + i - FDEC_STRIDE ] - src[6 -i -FDEC_STRIDE] );
  122. V += ( i + 1 ) * ( src[-1 + (8+i)*FDEC_STRIDE] - src[-1 + (6-i)*FDEC_STRIDE] );
  123. }
  124. int a = 16 * ( src[-1 + 15*FDEC_STRIDE] + src[15 - FDEC_STRIDE] );
  125. int b = ( 5 * H + 32 ) >> 6;
  126. int c = ( 5 * V + 32 ) >> 6;
  127. int i00 = a - b * 7 - c * 7 + 16;
  128. for( int y = 0; y < 16; y++ )
  129. {
  130. int pix = i00;
  131. for( int x = 0; x < 16; x++ )
  132. {
  133. src[x] = x264_clip_pixel( pix>>5 );
  134. pix += b;
  135. }
  136. src += FDEC_STRIDE;
  137. i00 += c;
  138. }
  139. }
  140. /****************************************************************************
  141. * 8x8 prediction for intra chroma block (4:2:0)
  142. ****************************************************************************/
  143. static void predict_8x8c_dc_128_c( pixel *src )
  144. {
  145. for( int y = 0; y < 8; y++ )
  146. {
  147. MPIXEL_X4( src+0 ) = PIXEL_SPLAT_X4( 1 << (BIT_DEPTH-1) );
  148. MPIXEL_X4( src+4 ) = PIXEL_SPLAT_X4( 1 << (BIT_DEPTH-1) );
  149. src += FDEC_STRIDE;
  150. }
  151. }
  152. static void predict_8x8c_dc_left_c( pixel *src )
  153. {
  154. int dc0 = 0, dc1 = 0;
  155. for( int y = 0; y < 4; y++ )
  156. {
  157. dc0 += src[y * FDEC_STRIDE - 1];
  158. dc1 += src[(y+4) * FDEC_STRIDE - 1];
  159. }
  160. pixel4 dc0splat = PIXEL_SPLAT_X4( ( dc0 + 2 ) >> 2 );
  161. pixel4 dc1splat = PIXEL_SPLAT_X4( ( dc1 + 2 ) >> 2 );
  162. for( int y = 0; y < 4; y++ )
  163. {
  164. MPIXEL_X4( src+0 ) = dc0splat;
  165. MPIXEL_X4( src+4 ) = dc0splat;
  166. src += FDEC_STRIDE;
  167. }
  168. for( int y = 0; y < 4; y++ )
  169. {
  170. MPIXEL_X4( src+0 ) = dc1splat;
  171. MPIXEL_X4( src+4 ) = dc1splat;
  172. src += FDEC_STRIDE;
  173. }
  174. }
  175. static void predict_8x8c_dc_top_c( pixel *src )
  176. {
  177. int dc0 = 0, dc1 = 0;
  178. for( int x = 0; x < 4; x++ )
  179. {
  180. dc0 += src[x - FDEC_STRIDE];
  181. dc1 += src[x + 4 - FDEC_STRIDE];
  182. }
  183. pixel4 dc0splat = PIXEL_SPLAT_X4( ( dc0 + 2 ) >> 2 );
  184. pixel4 dc1splat = PIXEL_SPLAT_X4( ( dc1 + 2 ) >> 2 );
  185. for( int y = 0; y < 8; y++ )
  186. {
  187. MPIXEL_X4( src+0 ) = dc0splat;
  188. MPIXEL_X4( src+4 ) = dc1splat;
  189. src += FDEC_STRIDE;
  190. }
  191. }
  192. void x264_predict_8x8c_dc_c( pixel *src )
  193. {
  194. int s0 = 0, s1 = 0, s2 = 0, s3 = 0;
  195. /*
  196. s0 s1
  197. s2
  198. s3
  199. */
  200. for( int i = 0; i < 4; i++ )
  201. {
  202. s0 += src[i - FDEC_STRIDE];
  203. s1 += src[i + 4 - FDEC_STRIDE];
  204. s2 += src[-1 + i * FDEC_STRIDE];
  205. s3 += src[-1 + (i+4)*FDEC_STRIDE];
  206. }
  207. /*
  208. dc0 dc1
  209. dc2 dc3
  210. */
  211. pixel4 dc0 = PIXEL_SPLAT_X4( ( s0 + s2 + 4 ) >> 3 );
  212. pixel4 dc1 = PIXEL_SPLAT_X4( ( s1 + 2 ) >> 2 );
  213. pixel4 dc2 = PIXEL_SPLAT_X4( ( s3 + 2 ) >> 2 );
  214. pixel4 dc3 = PIXEL_SPLAT_X4( ( s1 + s3 + 4 ) >> 3 );
  215. for( int y = 0; y < 4; y++ )
  216. {
  217. MPIXEL_X4( src+0 ) = dc0;
  218. MPIXEL_X4( src+4 ) = dc1;
  219. src += FDEC_STRIDE;
  220. }
  221. for( int y = 0; y < 4; y++ )
  222. {
  223. MPIXEL_X4( src+0 ) = dc2;
  224. MPIXEL_X4( src+4 ) = dc3;
  225. src += FDEC_STRIDE;
  226. }
  227. }
  228. void x264_predict_8x8c_h_c( pixel *src )
  229. {
  230. for( int i = 0; i < 8; i++ )
  231. {
  232. pixel4 v = PIXEL_SPLAT_X4( src[-1] );
  233. MPIXEL_X4( src+0 ) = v;
  234. MPIXEL_X4( src+4 ) = v;
  235. src += FDEC_STRIDE;
  236. }
  237. }
  238. void x264_predict_8x8c_v_c( pixel *src )
  239. {
  240. pixel4 v0 = MPIXEL_X4( src+0-FDEC_STRIDE );
  241. pixel4 v1 = MPIXEL_X4( src+4-FDEC_STRIDE );
  242. for( int i = 0; i < 8; i++ )
  243. {
  244. MPIXEL_X4( src+0 ) = v0;
  245. MPIXEL_X4( src+4 ) = v1;
  246. src += FDEC_STRIDE;
  247. }
  248. }
  249. void x264_predict_8x8c_p_c( pixel *src )
  250. {
  251. int H = 0, V = 0;
  252. for( int i = 0; i < 4; i++ )
  253. {
  254. H += ( i + 1 ) * ( src[4+i - FDEC_STRIDE] - src[2 - i -FDEC_STRIDE] );
  255. V += ( i + 1 ) * ( src[-1 +(i+4)*FDEC_STRIDE] - src[-1+(2-i)*FDEC_STRIDE] );
  256. }
  257. int a = 16 * ( src[-1+7*FDEC_STRIDE] + src[7 - FDEC_STRIDE] );
  258. int b = ( 17 * H + 16 ) >> 5;
  259. int c = ( 17 * V + 16 ) >> 5;
  260. int i00 = a -3*b -3*c + 16;
  261. for( int y = 0; y < 8; y++ )
  262. {
  263. int pix = i00;
  264. for( int x = 0; x < 8; x++ )
  265. {
  266. src[x] = x264_clip_pixel( pix>>5 );
  267. pix += b;
  268. }
  269. src += FDEC_STRIDE;
  270. i00 += c;
  271. }
  272. }
  273. /****************************************************************************
  274. * 8x16 prediction for intra chroma block (4:2:2)
  275. ****************************************************************************/
  276. static void predict_8x16c_dc_128_c( pixel *src )
  277. {
  278. for( int y = 0; y < 16; y++ )
  279. {
  280. MPIXEL_X4( src+0 ) = PIXEL_SPLAT_X4( 1 << (BIT_DEPTH-1) );
  281. MPIXEL_X4( src+4 ) = PIXEL_SPLAT_X4( 1 << (BIT_DEPTH-1) );
  282. src += FDEC_STRIDE;
  283. }
  284. }
  285. static void predict_8x16c_dc_left_c( pixel *src )
  286. {
  287. for( int i = 0; i < 4; i++ )
  288. {
  289. int dc = 0;
  290. for( int y = 0; y < 4; y++ )
  291. dc += src[y*FDEC_STRIDE - 1];
  292. pixel4 dcsplat = PIXEL_SPLAT_X4( (dc + 2) >> 2 );
  293. for( int y = 0; y < 4; y++ )
  294. {
  295. MPIXEL_X4( src+0 ) = dcsplat;
  296. MPIXEL_X4( src+4 ) = dcsplat;
  297. src += FDEC_STRIDE;
  298. }
  299. }
  300. }
  301. static void predict_8x16c_dc_top_c( pixel *src )
  302. {
  303. int dc0 = 0, dc1 = 0;
  304. for( int x = 0; x < 4; x++ )
  305. {
  306. dc0 += src[x - FDEC_STRIDE];
  307. dc1 += src[x + 4 - FDEC_STRIDE];
  308. }
  309. pixel4 dc0splat = PIXEL_SPLAT_X4( ( dc0 + 2 ) >> 2 );
  310. pixel4 dc1splat = PIXEL_SPLAT_X4( ( dc1 + 2 ) >> 2 );
  311. for( int y = 0; y < 16; y++ )
  312. {
  313. MPIXEL_X4( src+0 ) = dc0splat;
  314. MPIXEL_X4( src+4 ) = dc1splat;
  315. src += FDEC_STRIDE;
  316. }
  317. }
  318. void x264_predict_8x16c_dc_c( pixel *src )
  319. {
  320. int s0 = 0, s1 = 0, s2 = 0, s3 = 0, s4 = 0, s5 = 0;
  321. /*
  322. s0 s1
  323. s2
  324. s3
  325. s4
  326. s5
  327. */
  328. for( int i = 0; i < 4; i++ )
  329. {
  330. s0 += src[i+0 - FDEC_STRIDE];
  331. s1 += src[i+4 - FDEC_STRIDE];
  332. s2 += src[-1 + (i+0) * FDEC_STRIDE];
  333. s3 += src[-1 + (i+4) * FDEC_STRIDE];
  334. s4 += src[-1 + (i+8) * FDEC_STRIDE];
  335. s5 += src[-1 + (i+12) * FDEC_STRIDE];
  336. }
  337. /*
  338. dc0 dc1
  339. dc2 dc3
  340. dc4 dc5
  341. dc6 dc7
  342. */
  343. pixel4 dc0 = PIXEL_SPLAT_X4( ( s0 + s2 + 4 ) >> 3 );
  344. pixel4 dc1 = PIXEL_SPLAT_X4( ( s1 + 2 ) >> 2 );
  345. pixel4 dc2 = PIXEL_SPLAT_X4( ( s3 + 2 ) >> 2 );
  346. pixel4 dc3 = PIXEL_SPLAT_X4( ( s1 + s3 + 4 ) >> 3 );
  347. pixel4 dc4 = PIXEL_SPLAT_X4( ( s4 + 2 ) >> 2 );
  348. pixel4 dc5 = PIXEL_SPLAT_X4( ( s1 + s4 + 4 ) >> 3 );
  349. pixel4 dc6 = PIXEL_SPLAT_X4( ( s5 + 2 ) >> 2 );
  350. pixel4 dc7 = PIXEL_SPLAT_X4( ( s1 + s5 + 4 ) >> 3 );
  351. for( int y = 0; y < 4; y++ )
  352. {
  353. MPIXEL_X4( src+0 ) = dc0;
  354. MPIXEL_X4( src+4 ) = dc1;
  355. src += FDEC_STRIDE;
  356. }
  357. for( int y = 0; y < 4; y++ )
  358. {
  359. MPIXEL_X4( src+0 ) = dc2;
  360. MPIXEL_X4( src+4 ) = dc3;
  361. src += FDEC_STRIDE;
  362. }
  363. for( int y = 0; y < 4; y++ )
  364. {
  365. MPIXEL_X4( src+0 ) = dc4;
  366. MPIXEL_X4( src+4 ) = dc5;
  367. src += FDEC_STRIDE;
  368. }
  369. for( int y = 0; y < 4; y++ )
  370. {
  371. MPIXEL_X4( src+0 ) = dc6;
  372. MPIXEL_X4( src+4 ) = dc7;
  373. src += FDEC_STRIDE;
  374. }
  375. }
  376. void x264_predict_8x16c_h_c( pixel *src )
  377. {
  378. for( int i = 0; i < 16; i++ )
  379. {
  380. pixel4 v = PIXEL_SPLAT_X4( src[-1] );
  381. MPIXEL_X4( src+0 ) = v;
  382. MPIXEL_X4( src+4 ) = v;
  383. src += FDEC_STRIDE;
  384. }
  385. }
  386. void x264_predict_8x16c_v_c( pixel *src )
  387. {
  388. pixel4 v0 = MPIXEL_X4( src+0-FDEC_STRIDE );
  389. pixel4 v1 = MPIXEL_X4( src+4-FDEC_STRIDE );
  390. for( int i = 0; i < 16; i++ )
  391. {
  392. MPIXEL_X4( src+0 ) = v0;
  393. MPIXEL_X4( src+4 ) = v1;
  394. src += FDEC_STRIDE;
  395. }
  396. }
  397. void x264_predict_8x16c_p_c( pixel *src )
  398. {
  399. int H = 0;
  400. int V = 0;
  401. for( int i = 0; i < 4; i++ )
  402. H += ( i + 1 ) * ( src[4 + i - FDEC_STRIDE] - src[2 - i - FDEC_STRIDE] );
  403. for( int i = 0; i < 8; i++ )
  404. V += ( i + 1 ) * ( src[-1 + (i+8)*FDEC_STRIDE] - src[-1 + (6-i)*FDEC_STRIDE] );
  405. int a = 16 * ( src[-1 + 15*FDEC_STRIDE] + src[7 - FDEC_STRIDE] );
  406. int b = ( 17 * H + 16 ) >> 5;
  407. int c = ( 5 * V + 32 ) >> 6;
  408. int i00 = a -3*b -7*c + 16;
  409. for( int y = 0; y < 16; y++ )
  410. {
  411. int pix = i00;
  412. for( int x = 0; x < 8; x++ )
  413. {
  414. src[x] = x264_clip_pixel( pix>>5 );
  415. pix += b;
  416. }
  417. src += FDEC_STRIDE;
  418. i00 += c;
  419. }
  420. }
  421. /****************************************************************************
  422. * 4x4 prediction for intra luma block
  423. ****************************************************************************/
  424. #define SRC(x,y) src[(x)+(y)*FDEC_STRIDE]
  425. #define SRC_X4(x,y) MPIXEL_X4( &SRC(x,y) )
  426. #define PREDICT_4x4_DC(v)\
  427. SRC_X4(0,0) = SRC_X4(0,1) = SRC_X4(0,2) = SRC_X4(0,3) = v;
  428. static void predict_4x4_dc_128_c( pixel *src )
  429. {
  430. PREDICT_4x4_DC( PIXEL_SPLAT_X4( 1 << (BIT_DEPTH-1) ) );
  431. }
  432. static void predict_4x4_dc_left_c( pixel *src )
  433. {
  434. pixel4 dc = PIXEL_SPLAT_X4( (SRC(-1,0) + SRC(-1,1) + SRC(-1,2) + SRC(-1,3) + 2) >> 2 );
  435. PREDICT_4x4_DC( dc );
  436. }
  437. static void predict_4x4_dc_top_c( pixel *src )
  438. {
  439. pixel4 dc = PIXEL_SPLAT_X4( (SRC(0,-1) + SRC(1,-1) + SRC(2,-1) + SRC(3,-1) + 2) >> 2 );
  440. PREDICT_4x4_DC( dc );
  441. }
  442. void x264_predict_4x4_dc_c( pixel *src )
  443. {
  444. pixel4 dc = PIXEL_SPLAT_X4( (SRC(-1,0) + SRC(-1,1) + SRC(-1,2) + SRC(-1,3) +
  445. SRC(0,-1) + SRC(1,-1) + SRC(2,-1) + SRC(3,-1) + 4) >> 3 );
  446. PREDICT_4x4_DC( dc );
  447. }
  448. void x264_predict_4x4_h_c( pixel *src )
  449. {
  450. SRC_X4(0,0) = PIXEL_SPLAT_X4( SRC(-1,0) );
  451. SRC_X4(0,1) = PIXEL_SPLAT_X4( SRC(-1,1) );
  452. SRC_X4(0,2) = PIXEL_SPLAT_X4( SRC(-1,2) );
  453. SRC_X4(0,3) = PIXEL_SPLAT_X4( SRC(-1,3) );
  454. }
  455. void x264_predict_4x4_v_c( pixel *src )
  456. {
  457. PREDICT_4x4_DC(SRC_X4(0,-1));
  458. }
  459. #define PREDICT_4x4_LOAD_LEFT\
  460. int l0 = SRC(-1,0);\
  461. int l1 = SRC(-1,1);\
  462. int l2 = SRC(-1,2);\
  463. UNUSED int l3 = SRC(-1,3);
  464. #define PREDICT_4x4_LOAD_TOP\
  465. int t0 = SRC(0,-1);\
  466. int t1 = SRC(1,-1);\
  467. int t2 = SRC(2,-1);\
  468. UNUSED int t3 = SRC(3,-1);
  469. #define PREDICT_4x4_LOAD_TOP_RIGHT\
  470. int t4 = SRC(4,-1);\
  471. int t5 = SRC(5,-1);\
  472. int t6 = SRC(6,-1);\
  473. UNUSED int t7 = SRC(7,-1);
  474. #define F1(a,b) (((a)+(b)+1)>>1)
  475. #define F2(a,b,c) (((a)+2*(b)+(c)+2)>>2)
  476. static void predict_4x4_ddl_c( pixel *src )
  477. {
  478. PREDICT_4x4_LOAD_TOP
  479. PREDICT_4x4_LOAD_TOP_RIGHT
  480. SRC(0,0)= F2(t0,t1,t2);
  481. SRC(1,0)=SRC(0,1)= F2(t1,t2,t3);
  482. SRC(2,0)=SRC(1,1)=SRC(0,2)= F2(t2,t3,t4);
  483. SRC(3,0)=SRC(2,1)=SRC(1,2)=SRC(0,3)= F2(t3,t4,t5);
  484. SRC(3,1)=SRC(2,2)=SRC(1,3)= F2(t4,t5,t6);
  485. SRC(3,2)=SRC(2,3)= F2(t5,t6,t7);
  486. SRC(3,3)= F2(t6,t7,t7);
  487. }
  488. static void predict_4x4_ddr_c( pixel *src )
  489. {
  490. int lt = SRC(-1,-1);
  491. PREDICT_4x4_LOAD_LEFT
  492. PREDICT_4x4_LOAD_TOP
  493. SRC(3,0)= F2(t3,t2,t1);
  494. SRC(2,0)=SRC(3,1)= F2(t2,t1,t0);
  495. SRC(1,0)=SRC(2,1)=SRC(3,2)= F2(t1,t0,lt);
  496. SRC(0,0)=SRC(1,1)=SRC(2,2)=SRC(3,3)= F2(t0,lt,l0);
  497. SRC(0,1)=SRC(1,2)=SRC(2,3)= F2(lt,l0,l1);
  498. SRC(0,2)=SRC(1,3)= F2(l0,l1,l2);
  499. SRC(0,3)= F2(l1,l2,l3);
  500. }
  501. static void predict_4x4_vr_c( pixel *src )
  502. {
  503. int lt = SRC(-1,-1);
  504. PREDICT_4x4_LOAD_LEFT
  505. PREDICT_4x4_LOAD_TOP
  506. SRC(0,3)= F2(l2,l1,l0);
  507. SRC(0,2)= F2(l1,l0,lt);
  508. SRC(0,1)=SRC(1,3)= F2(l0,lt,t0);
  509. SRC(0,0)=SRC(1,2)= F1(lt,t0);
  510. SRC(1,1)=SRC(2,3)= F2(lt,t0,t1);
  511. SRC(1,0)=SRC(2,2)= F1(t0,t1);
  512. SRC(2,1)=SRC(3,3)= F2(t0,t1,t2);
  513. SRC(2,0)=SRC(3,2)= F1(t1,t2);
  514. SRC(3,1)= F2(t1,t2,t3);
  515. SRC(3,0)= F1(t2,t3);
  516. }
  517. static void predict_4x4_hd_c( pixel *src )
  518. {
  519. int lt= SRC(-1,-1);
  520. PREDICT_4x4_LOAD_LEFT
  521. PREDICT_4x4_LOAD_TOP
  522. SRC(0,3)= F1(l2,l3);
  523. SRC(1,3)= F2(l1,l2,l3);
  524. SRC(0,2)=SRC(2,3)= F1(l1,l2);
  525. SRC(1,2)=SRC(3,3)= F2(l0,l1,l2);
  526. SRC(0,1)=SRC(2,2)= F1(l0,l1);
  527. SRC(1,1)=SRC(3,2)= F2(lt,l0,l1);
  528. SRC(0,0)=SRC(2,1)= F1(lt,l0);
  529. SRC(1,0)=SRC(3,1)= F2(t0,lt,l0);
  530. SRC(2,0)= F2(t1,t0,lt);
  531. SRC(3,0)= F2(t2,t1,t0);
  532. }
  533. static void predict_4x4_vl_c( pixel *src )
  534. {
  535. PREDICT_4x4_LOAD_TOP
  536. PREDICT_4x4_LOAD_TOP_RIGHT
  537. SRC(0,0)= F1(t0,t1);
  538. SRC(0,1)= F2(t0,t1,t2);
  539. SRC(1,0)=SRC(0,2)= F1(t1,t2);
  540. SRC(1,1)=SRC(0,3)= F2(t1,t2,t3);
  541. SRC(2,0)=SRC(1,2)= F1(t2,t3);
  542. SRC(2,1)=SRC(1,3)= F2(t2,t3,t4);
  543. SRC(3,0)=SRC(2,2)= F1(t3,t4);
  544. SRC(3,1)=SRC(2,3)= F2(t3,t4,t5);
  545. SRC(3,2)= F1(t4,t5);
  546. SRC(3,3)= F2(t4,t5,t6);
  547. }
  548. static void predict_4x4_hu_c( pixel *src )
  549. {
  550. PREDICT_4x4_LOAD_LEFT
  551. SRC(0,0)= F1(l0,l1);
  552. SRC(1,0)= F2(l0,l1,l2);
  553. SRC(2,0)=SRC(0,1)= F1(l1,l2);
  554. SRC(3,0)=SRC(1,1)= F2(l1,l2,l3);
  555. SRC(2,1)=SRC(0,2)= F1(l2,l3);
  556. SRC(3,1)=SRC(1,2)= F2(l2,l3,l3);
  557. SRC(3,2)=SRC(1,3)=SRC(0,3)=
  558. SRC(2,2)=SRC(2,3)=SRC(3,3)= l3;
  559. }
  560. /****************************************************************************
  561. * 8x8 prediction for intra luma block
  562. ****************************************************************************/
  563. #define PL(y) \
  564. edge[14-y] = F2(SRC(-1,y-1), SRC(-1,y), SRC(-1,y+1));
  565. #define PT(x) \
  566. edge[16+x] = F2(SRC(x-1,-1), SRC(x,-1), SRC(x+1,-1));
  567. static void predict_8x8_filter_c( pixel *src, pixel edge[36], int i_neighbor, int i_filters )
  568. {
  569. /* edge[7..14] = l7..l0
  570. * edge[15] = lt
  571. * edge[16..31] = t0 .. t15
  572. * edge[32] = t15 */
  573. int have_lt = i_neighbor & MB_TOPLEFT;
  574. if( i_filters & MB_LEFT )
  575. {
  576. edge[15] = (SRC(0,-1) + 2*SRC(-1,-1) + SRC(-1,0) + 2) >> 2;
  577. edge[14] = ((have_lt ? SRC(-1,-1) : SRC(-1,0))
  578. + 2*SRC(-1,0) + SRC(-1,1) + 2) >> 2;
  579. PL(1) PL(2) PL(3) PL(4) PL(5) PL(6)
  580. edge[6] =
  581. edge[7] = (SRC(-1,6) + 3*SRC(-1,7) + 2) >> 2;
  582. }
  583. if( i_filters & MB_TOP )
  584. {
  585. int have_tr = i_neighbor & MB_TOPRIGHT;
  586. edge[16] = ((have_lt ? SRC(-1,-1) : SRC(0,-1))
  587. + 2*SRC(0,-1) + SRC(1,-1) + 2) >> 2;
  588. PT(1) PT(2) PT(3) PT(4) PT(5) PT(6)
  589. edge[23] = (SRC(6,-1) + 2*SRC(7,-1)
  590. + (have_tr ? SRC(8,-1) : SRC(7,-1)) + 2) >> 2;
  591. if( i_filters & MB_TOPRIGHT )
  592. {
  593. if( have_tr )
  594. {
  595. PT(8) PT(9) PT(10) PT(11) PT(12) PT(13) PT(14)
  596. edge[31] =
  597. edge[32] = (SRC(14,-1) + 3*SRC(15,-1) + 2) >> 2;
  598. }
  599. else
  600. {
  601. MPIXEL_X4( edge+24 ) = PIXEL_SPLAT_X4( SRC(7,-1) );
  602. MPIXEL_X4( edge+28 ) = PIXEL_SPLAT_X4( SRC(7,-1) );
  603. edge[32] = SRC(7,-1);
  604. }
  605. }
  606. }
  607. }
  608. #undef PL
  609. #undef PT
  610. #define PL(y) \
  611. UNUSED int l##y = edge[14-y];
  612. #define PT(x) \
  613. UNUSED int t##x = edge[16+x];
  614. #define PREDICT_8x8_LOAD_TOPLEFT \
  615. int lt = edge[15];
  616. #define PREDICT_8x8_LOAD_LEFT \
  617. PL(0) PL(1) PL(2) PL(3) PL(4) PL(5) PL(6) PL(7)
  618. #define PREDICT_8x8_LOAD_TOP \
  619. PT(0) PT(1) PT(2) PT(3) PT(4) PT(5) PT(6) PT(7)
  620. #define PREDICT_8x8_LOAD_TOPRIGHT \
  621. PT(8) PT(9) PT(10) PT(11) PT(12) PT(13) PT(14) PT(15)
  622. #define PREDICT_8x8_DC(v) \
  623. for( int y = 0; y < 8; y++ ) { \
  624. MPIXEL_X4( src+0 ) = v; \
  625. MPIXEL_X4( src+4 ) = v; \
  626. src += FDEC_STRIDE; \
  627. }
  628. static void predict_8x8_dc_128_c( pixel *src, pixel edge[36] )
  629. {
  630. PREDICT_8x8_DC( PIXEL_SPLAT_X4( 1 << (BIT_DEPTH-1) ) );
  631. }
  632. static void predict_8x8_dc_left_c( pixel *src, pixel edge[36] )
  633. {
  634. PREDICT_8x8_LOAD_LEFT
  635. pixel4 dc = PIXEL_SPLAT_X4( (l0+l1+l2+l3+l4+l5+l6+l7+4) >> 3 );
  636. PREDICT_8x8_DC( dc );
  637. }
  638. static void predict_8x8_dc_top_c( pixel *src, pixel edge[36] )
  639. {
  640. PREDICT_8x8_LOAD_TOP
  641. pixel4 dc = PIXEL_SPLAT_X4( (t0+t1+t2+t3+t4+t5+t6+t7+4) >> 3 );
  642. PREDICT_8x8_DC( dc );
  643. }
  644. void x264_predict_8x8_dc_c( pixel *src, pixel edge[36] )
  645. {
  646. PREDICT_8x8_LOAD_LEFT
  647. PREDICT_8x8_LOAD_TOP
  648. pixel4 dc = PIXEL_SPLAT_X4( (l0+l1+l2+l3+l4+l5+l6+l7+t0+t1+t2+t3+t4+t5+t6+t7+8) >> 4 );
  649. PREDICT_8x8_DC( dc );
  650. }
  651. void x264_predict_8x8_h_c( pixel *src, pixel edge[36] )
  652. {
  653. PREDICT_8x8_LOAD_LEFT
  654. #define ROW(y) MPIXEL_X4( src+y*FDEC_STRIDE+0 ) =\
  655. MPIXEL_X4( src+y*FDEC_STRIDE+4 ) = PIXEL_SPLAT_X4( l##y );
  656. ROW(0); ROW(1); ROW(2); ROW(3); ROW(4); ROW(5); ROW(6); ROW(7);
  657. #undef ROW
  658. }
  659. void x264_predict_8x8_v_c( pixel *src, pixel edge[36] )
  660. {
  661. pixel4 top[2] = { MPIXEL_X4( edge+16 ),
  662. MPIXEL_X4( edge+20 ) };
  663. for( int y = 0; y < 8; y++ )
  664. {
  665. MPIXEL_X4( src+y*FDEC_STRIDE+0 ) = top[0];
  666. MPIXEL_X4( src+y*FDEC_STRIDE+4 ) = top[1];
  667. }
  668. }
  669. static void predict_8x8_ddl_c( pixel *src, pixel edge[36] )
  670. {
  671. PREDICT_8x8_LOAD_TOP
  672. PREDICT_8x8_LOAD_TOPRIGHT
  673. SRC(0,0)= F2(t0,t1,t2);
  674. SRC(0,1)=SRC(1,0)= F2(t1,t2,t3);
  675. SRC(0,2)=SRC(1,1)=SRC(2,0)= F2(t2,t3,t4);
  676. SRC(0,3)=SRC(1,2)=SRC(2,1)=SRC(3,0)= F2(t3,t4,t5);
  677. SRC(0,4)=SRC(1,3)=SRC(2,2)=SRC(3,1)=SRC(4,0)= F2(t4,t5,t6);
  678. SRC(0,5)=SRC(1,4)=SRC(2,3)=SRC(3,2)=SRC(4,1)=SRC(5,0)= F2(t5,t6,t7);
  679. SRC(0,6)=SRC(1,5)=SRC(2,4)=SRC(3,3)=SRC(4,2)=SRC(5,1)=SRC(6,0)= F2(t6,t7,t8);
  680. SRC(0,7)=SRC(1,6)=SRC(2,5)=SRC(3,4)=SRC(4,3)=SRC(5,2)=SRC(6,1)=SRC(7,0)= F2(t7,t8,t9);
  681. SRC(1,7)=SRC(2,6)=SRC(3,5)=SRC(4,4)=SRC(5,3)=SRC(6,2)=SRC(7,1)= F2(t8,t9,t10);
  682. SRC(2,7)=SRC(3,6)=SRC(4,5)=SRC(5,4)=SRC(6,3)=SRC(7,2)= F2(t9,t10,t11);
  683. SRC(3,7)=SRC(4,6)=SRC(5,5)=SRC(6,4)=SRC(7,3)= F2(t10,t11,t12);
  684. SRC(4,7)=SRC(5,6)=SRC(6,5)=SRC(7,4)= F2(t11,t12,t13);
  685. SRC(5,7)=SRC(6,6)=SRC(7,5)= F2(t12,t13,t14);
  686. SRC(6,7)=SRC(7,6)= F2(t13,t14,t15);
  687. SRC(7,7)= F2(t14,t15,t15);
  688. }
  689. static void predict_8x8_ddr_c( pixel *src, pixel edge[36] )
  690. {
  691. PREDICT_8x8_LOAD_TOP
  692. PREDICT_8x8_LOAD_LEFT
  693. PREDICT_8x8_LOAD_TOPLEFT
  694. SRC(0,7)= F2(l7,l6,l5);
  695. SRC(0,6)=SRC(1,7)= F2(l6,l5,l4);
  696. SRC(0,5)=SRC(1,6)=SRC(2,7)= F2(l5,l4,l3);
  697. SRC(0,4)=SRC(1,5)=SRC(2,6)=SRC(3,7)= F2(l4,l3,l2);
  698. SRC(0,3)=SRC(1,4)=SRC(2,5)=SRC(3,6)=SRC(4,7)= F2(l3,l2,l1);
  699. SRC(0,2)=SRC(1,3)=SRC(2,4)=SRC(3,5)=SRC(4,6)=SRC(5,7)= F2(l2,l1,l0);
  700. SRC(0,1)=SRC(1,2)=SRC(2,3)=SRC(3,4)=SRC(4,5)=SRC(5,6)=SRC(6,7)= F2(l1,l0,lt);
  701. SRC(0,0)=SRC(1,1)=SRC(2,2)=SRC(3,3)=SRC(4,4)=SRC(5,5)=SRC(6,6)=SRC(7,7)= F2(l0,lt,t0);
  702. SRC(1,0)=SRC(2,1)=SRC(3,2)=SRC(4,3)=SRC(5,4)=SRC(6,5)=SRC(7,6)= F2(lt,t0,t1);
  703. SRC(2,0)=SRC(3,1)=SRC(4,2)=SRC(5,3)=SRC(6,4)=SRC(7,5)= F2(t0,t1,t2);
  704. SRC(3,0)=SRC(4,1)=SRC(5,2)=SRC(6,3)=SRC(7,4)= F2(t1,t2,t3);
  705. SRC(4,0)=SRC(5,1)=SRC(6,2)=SRC(7,3)= F2(t2,t3,t4);
  706. SRC(5,0)=SRC(6,1)=SRC(7,2)= F2(t3,t4,t5);
  707. SRC(6,0)=SRC(7,1)= F2(t4,t5,t6);
  708. SRC(7,0)= F2(t5,t6,t7);
  709. }
  710. static void predict_8x8_vr_c( pixel *src, pixel edge[36] )
  711. {
  712. PREDICT_8x8_LOAD_TOP
  713. PREDICT_8x8_LOAD_LEFT
  714. PREDICT_8x8_LOAD_TOPLEFT
  715. SRC(0,6)= F2(l5,l4,l3);
  716. SRC(0,7)= F2(l6,l5,l4);
  717. SRC(0,4)=SRC(1,6)= F2(l3,l2,l1);
  718. SRC(0,5)=SRC(1,7)= F2(l4,l3,l2);
  719. SRC(0,2)=SRC(1,4)=SRC(2,6)= F2(l1,l0,lt);
  720. SRC(0,3)=SRC(1,5)=SRC(2,7)= F2(l2,l1,l0);
  721. SRC(0,1)=SRC(1,3)=SRC(2,5)=SRC(3,7)= F2(l0,lt,t0);
  722. SRC(0,0)=SRC(1,2)=SRC(2,4)=SRC(3,6)= F1(lt,t0);
  723. SRC(1,1)=SRC(2,3)=SRC(3,5)=SRC(4,7)= F2(lt,t0,t1);
  724. SRC(1,0)=SRC(2,2)=SRC(3,4)=SRC(4,6)= F1(t0,t1);
  725. SRC(2,1)=SRC(3,3)=SRC(4,5)=SRC(5,7)= F2(t0,t1,t2);
  726. SRC(2,0)=SRC(3,2)=SRC(4,4)=SRC(5,6)= F1(t1,t2);
  727. SRC(3,1)=SRC(4,3)=SRC(5,5)=SRC(6,7)= F2(t1,t2,t3);
  728. SRC(3,0)=SRC(4,2)=SRC(5,4)=SRC(6,6)= F1(t2,t3);
  729. SRC(4,1)=SRC(5,3)=SRC(6,5)=SRC(7,7)= F2(t2,t3,t4);
  730. SRC(4,0)=SRC(5,2)=SRC(6,4)=SRC(7,6)= F1(t3,t4);
  731. SRC(5,1)=SRC(6,3)=SRC(7,5)= F2(t3,t4,t5);
  732. SRC(5,0)=SRC(6,2)=SRC(7,4)= F1(t4,t5);
  733. SRC(6,1)=SRC(7,3)= F2(t4,t5,t6);
  734. SRC(6,0)=SRC(7,2)= F1(t5,t6);
  735. SRC(7,1)= F2(t5,t6,t7);
  736. SRC(7,0)= F1(t6,t7);
  737. }
  738. static void predict_8x8_hd_c( pixel *src, pixel edge[36] )
  739. {
  740. PREDICT_8x8_LOAD_TOP
  741. PREDICT_8x8_LOAD_LEFT
  742. PREDICT_8x8_LOAD_TOPLEFT
  743. int p1 = pack_pixel_1to2(F1(l6,l7), F2(l5,l6,l7));
  744. int p2 = pack_pixel_1to2(F1(l5,l6), F2(l4,l5,l6));
  745. int p3 = pack_pixel_1to2(F1(l4,l5), F2(l3,l4,l5));
  746. int p4 = pack_pixel_1to2(F1(l3,l4), F2(l2,l3,l4));
  747. int p5 = pack_pixel_1to2(F1(l2,l3), F2(l1,l2,l3));
  748. int p6 = pack_pixel_1to2(F1(l1,l2), F2(l0,l1,l2));
  749. int p7 = pack_pixel_1to2(F1(l0,l1), F2(lt,l0,l1));
  750. int p8 = pack_pixel_1to2(F1(lt,l0), F2(l0,lt,t0));
  751. int p9 = pack_pixel_1to2(F2(t1,t0,lt), F2(t2,t1,t0));
  752. int p10 = pack_pixel_1to2(F2(t3,t2,t1), F2(t4,t3,t2));
  753. int p11 = pack_pixel_1to2(F2(t5,t4,t3), F2(t6,t5,t4));
  754. SRC_X4(0,7)= pack_pixel_2to4(p1,p2);
  755. SRC_X4(0,6)= pack_pixel_2to4(p2,p3);
  756. SRC_X4(4,7)=SRC_X4(0,5)= pack_pixel_2to4(p3,p4);
  757. SRC_X4(4,6)=SRC_X4(0,4)= pack_pixel_2to4(p4,p5);
  758. SRC_X4(4,5)=SRC_X4(0,3)= pack_pixel_2to4(p5,p6);
  759. SRC_X4(4,4)=SRC_X4(0,2)= pack_pixel_2to4(p6,p7);
  760. SRC_X4(4,3)=SRC_X4(0,1)= pack_pixel_2to4(p7,p8);
  761. SRC_X4(4,2)=SRC_X4(0,0)= pack_pixel_2to4(p8,p9);
  762. SRC_X4(4,1)= pack_pixel_2to4(p9,p10);
  763. SRC_X4(4,0)= pack_pixel_2to4(p10,p11);
  764. }
  765. static void predict_8x8_vl_c( pixel *src, pixel edge[36] )
  766. {
  767. PREDICT_8x8_LOAD_TOP
  768. PREDICT_8x8_LOAD_TOPRIGHT
  769. SRC(0,0)= F1(t0,t1);
  770. SRC(0,1)= F2(t0,t1,t2);
  771. SRC(0,2)=SRC(1,0)= F1(t1,t2);
  772. SRC(0,3)=SRC(1,1)= F2(t1,t2,t3);
  773. SRC(0,4)=SRC(1,2)=SRC(2,0)= F1(t2,t3);
  774. SRC(0,5)=SRC(1,3)=SRC(2,1)= F2(t2,t3,t4);
  775. SRC(0,6)=SRC(1,4)=SRC(2,2)=SRC(3,0)= F1(t3,t4);
  776. SRC(0,7)=SRC(1,5)=SRC(2,3)=SRC(3,1)= F2(t3,t4,t5);
  777. SRC(1,6)=SRC(2,4)=SRC(3,2)=SRC(4,0)= F1(t4,t5);
  778. SRC(1,7)=SRC(2,5)=SRC(3,3)=SRC(4,1)= F2(t4,t5,t6);
  779. SRC(2,6)=SRC(3,4)=SRC(4,2)=SRC(5,0)= F1(t5,t6);
  780. SRC(2,7)=SRC(3,5)=SRC(4,3)=SRC(5,1)= F2(t5,t6,t7);
  781. SRC(3,6)=SRC(4,4)=SRC(5,2)=SRC(6,0)= F1(t6,t7);
  782. SRC(3,7)=SRC(4,5)=SRC(5,3)=SRC(6,1)= F2(t6,t7,t8);
  783. SRC(4,6)=SRC(5,4)=SRC(6,2)=SRC(7,0)= F1(t7,t8);
  784. SRC(4,7)=SRC(5,5)=SRC(6,3)=SRC(7,1)= F2(t7,t8,t9);
  785. SRC(5,6)=SRC(6,4)=SRC(7,2)= F1(t8,t9);
  786. SRC(5,7)=SRC(6,5)=SRC(7,3)= F2(t8,t9,t10);
  787. SRC(6,6)=SRC(7,4)= F1(t9,t10);
  788. SRC(6,7)=SRC(7,5)= F2(t9,t10,t11);
  789. SRC(7,6)= F1(t10,t11);
  790. SRC(7,7)= F2(t10,t11,t12);
  791. }
  792. static void predict_8x8_hu_c( pixel *src, pixel edge[36] )
  793. {
  794. PREDICT_8x8_LOAD_LEFT
  795. int p1 = pack_pixel_1to2(F1(l0,l1), F2(l0,l1,l2));
  796. int p2 = pack_pixel_1to2(F1(l1,l2), F2(l1,l2,l3));
  797. int p3 = pack_pixel_1to2(F1(l2,l3), F2(l2,l3,l4));
  798. int p4 = pack_pixel_1to2(F1(l3,l4), F2(l3,l4,l5));
  799. int p5 = pack_pixel_1to2(F1(l4,l5), F2(l4,l5,l6));
  800. int p6 = pack_pixel_1to2(F1(l5,l6), F2(l5,l6,l7));
  801. int p7 = pack_pixel_1to2(F1(l6,l7), F2(l6,l7,l7));
  802. int p8 = pack_pixel_1to2(l7,l7);
  803. SRC_X4(0,0)= pack_pixel_2to4(p1,p2);
  804. SRC_X4(0,1)= pack_pixel_2to4(p2,p3);
  805. SRC_X4(4,0)=SRC_X4(0,2)= pack_pixel_2to4(p3,p4);
  806. SRC_X4(4,1)=SRC_X4(0,3)= pack_pixel_2to4(p4,p5);
  807. SRC_X4(4,2)=SRC_X4(0,4)= pack_pixel_2to4(p5,p6);
  808. SRC_X4(4,3)=SRC_X4(0,5)= pack_pixel_2to4(p6,p7);
  809. SRC_X4(4,4)=SRC_X4(0,6)= pack_pixel_2to4(p7,p8);
  810. SRC_X4(4,5)=SRC_X4(4,6)= SRC_X4(0,7) = SRC_X4(4,7) = pack_pixel_2to4(p8,p8);
  811. }
  812. /****************************************************************************
  813. * Exported functions:
  814. ****************************************************************************/
  815. void x264_predict_16x16_init( int cpu, x264_predict_t pf[7] )
  816. {
  817. pf[I_PRED_16x16_V ] = x264_predict_16x16_v_c;
  818. pf[I_PRED_16x16_H ] = x264_predict_16x16_h_c;
  819. pf[I_PRED_16x16_DC] = x264_predict_16x16_dc_c;
  820. pf[I_PRED_16x16_P ] = x264_predict_16x16_p_c;
  821. pf[I_PRED_16x16_DC_LEFT]= predict_16x16_dc_left_c;
  822. pf[I_PRED_16x16_DC_TOP ]= predict_16x16_dc_top_c;
  823. pf[I_PRED_16x16_DC_128 ]= predict_16x16_dc_128_c;
  824. #if HAVE_MMX
  825. x264_predict_16x16_init_mmx( cpu, pf );
  826. #endif
  827. #if HAVE_ALTIVEC
  828. if( cpu&X264_CPU_ALTIVEC )
  829. x264_predict_16x16_init_altivec( pf );
  830. #endif
  831. #if HAVE_ARMV6
  832. x264_predict_16x16_init_arm( cpu, pf );
  833. #endif
  834. #if ARCH_AARCH64
  835. x264_predict_16x16_init_aarch64( cpu, pf );
  836. #endif
  837. #if !HIGH_BIT_DEPTH
  838. #if HAVE_MSA
  839. if( cpu&X264_CPU_MSA )
  840. {
  841. pf[I_PRED_16x16_V ] = x264_intra_predict_vert_16x16_msa;
  842. pf[I_PRED_16x16_H ] = x264_intra_predict_hor_16x16_msa;
  843. pf[I_PRED_16x16_DC] = x264_intra_predict_dc_16x16_msa;
  844. pf[I_PRED_16x16_P ] = x264_intra_predict_plane_16x16_msa;
  845. pf[I_PRED_16x16_DC_LEFT]= x264_intra_predict_dc_left_16x16_msa;
  846. pf[I_PRED_16x16_DC_TOP ]= x264_intra_predict_dc_top_16x16_msa;
  847. pf[I_PRED_16x16_DC_128 ]= x264_intra_predict_dc_128_16x16_msa;
  848. }
  849. #endif
  850. #endif
  851. }
  852. void x264_predict_8x8c_init( int cpu, x264_predict_t pf[7] )
  853. {
  854. pf[I_PRED_CHROMA_V ] = x264_predict_8x8c_v_c;
  855. pf[I_PRED_CHROMA_H ] = x264_predict_8x8c_h_c;
  856. pf[I_PRED_CHROMA_DC] = x264_predict_8x8c_dc_c;
  857. pf[I_PRED_CHROMA_P ] = x264_predict_8x8c_p_c;
  858. pf[I_PRED_CHROMA_DC_LEFT]= predict_8x8c_dc_left_c;
  859. pf[I_PRED_CHROMA_DC_TOP ]= predict_8x8c_dc_top_c;
  860. pf[I_PRED_CHROMA_DC_128 ]= predict_8x8c_dc_128_c;
  861. #if HAVE_MMX
  862. x264_predict_8x8c_init_mmx( cpu, pf );
  863. #endif
  864. #if HAVE_ALTIVEC
  865. if( cpu&X264_CPU_ALTIVEC )
  866. x264_predict_8x8c_init_altivec( pf );
  867. #endif
  868. #if HAVE_ARMV6
  869. x264_predict_8x8c_init_arm( cpu, pf );
  870. #endif
  871. #if ARCH_AARCH64
  872. x264_predict_8x8c_init_aarch64( cpu, pf );
  873. #endif
  874. #if !HIGH_BIT_DEPTH
  875. #if HAVE_MSA
  876. if( cpu&X264_CPU_MSA )
  877. {
  878. pf[I_PRED_CHROMA_P ] = x264_intra_predict_plane_8x8_msa;
  879. }
  880. #endif
  881. #endif
  882. }
  883. void x264_predict_8x16c_init( int cpu, x264_predict_t pf[7] )
  884. {
  885. pf[I_PRED_CHROMA_V ] = x264_predict_8x16c_v_c;
  886. pf[I_PRED_CHROMA_H ] = x264_predict_8x16c_h_c;
  887. pf[I_PRED_CHROMA_DC] = x264_predict_8x16c_dc_c;
  888. pf[I_PRED_CHROMA_P ] = x264_predict_8x16c_p_c;
  889. pf[I_PRED_CHROMA_DC_LEFT]= predict_8x16c_dc_left_c;
  890. pf[I_PRED_CHROMA_DC_TOP ]= predict_8x16c_dc_top_c;
  891. pf[I_PRED_CHROMA_DC_128 ]= predict_8x16c_dc_128_c;
  892. #if HAVE_MMX
  893. x264_predict_8x16c_init_mmx( cpu, pf );
  894. #endif
  895. #if HAVE_ARMV6
  896. x264_predict_8x16c_init_arm( cpu, pf );
  897. #endif
  898. #if ARCH_AARCH64
  899. x264_predict_8x16c_init_aarch64( cpu, pf );
  900. #endif
  901. }
  902. void x264_predict_8x8_init( int cpu, x264_predict8x8_t pf[12], x264_predict_8x8_filter_t *predict_filter )
  903. {
  904. pf[I_PRED_8x8_V] = x264_predict_8x8_v_c;
  905. pf[I_PRED_8x8_H] = x264_predict_8x8_h_c;
  906. pf[I_PRED_8x8_DC] = x264_predict_8x8_dc_c;
  907. pf[I_PRED_8x8_DDL] = predict_8x8_ddl_c;
  908. pf[I_PRED_8x8_DDR] = predict_8x8_ddr_c;
  909. pf[I_PRED_8x8_VR] = predict_8x8_vr_c;
  910. pf[I_PRED_8x8_HD] = predict_8x8_hd_c;
  911. pf[I_PRED_8x8_VL] = predict_8x8_vl_c;
  912. pf[I_PRED_8x8_HU] = predict_8x8_hu_c;
  913. pf[I_PRED_8x8_DC_LEFT]= predict_8x8_dc_left_c;
  914. pf[I_PRED_8x8_DC_TOP] = predict_8x8_dc_top_c;
  915. pf[I_PRED_8x8_DC_128] = predict_8x8_dc_128_c;
  916. *predict_filter = predict_8x8_filter_c;
  917. #if HAVE_MMX
  918. x264_predict_8x8_init_mmx( cpu, pf, predict_filter );
  919. #endif
  920. #if HAVE_ARMV6
  921. x264_predict_8x8_init_arm( cpu, pf, predict_filter );
  922. #endif
  923. #if ARCH_AARCH64
  924. x264_predict_8x8_init_aarch64( cpu, pf, predict_filter );
  925. #endif
  926. #if !HIGH_BIT_DEPTH
  927. #if HAVE_MSA
  928. if( cpu&X264_CPU_MSA )
  929. {
  930. pf[I_PRED_8x8_DDL] = x264_intra_predict_ddl_8x8_msa;
  931. }
  932. #endif
  933. #endif
  934. }
  935. void x264_predict_4x4_init( int cpu, x264_predict_t pf[12] )
  936. {
  937. pf[I_PRED_4x4_V] = x264_predict_4x4_v_c;
  938. pf[I_PRED_4x4_H] = x264_predict_4x4_h_c;
  939. pf[I_PRED_4x4_DC] = x264_predict_4x4_dc_c;
  940. pf[I_PRED_4x4_DDL] = predict_4x4_ddl_c;
  941. pf[I_PRED_4x4_DDR] = predict_4x4_ddr_c;
  942. pf[I_PRED_4x4_VR] = predict_4x4_vr_c;
  943. pf[I_PRED_4x4_HD] = predict_4x4_hd_c;
  944. pf[I_PRED_4x4_VL] = predict_4x4_vl_c;
  945. pf[I_PRED_4x4_HU] = predict_4x4_hu_c;
  946. pf[I_PRED_4x4_DC_LEFT]= predict_4x4_dc_left_c;
  947. pf[I_PRED_4x4_DC_TOP] = predict_4x4_dc_top_c;
  948. pf[I_PRED_4x4_DC_128] = predict_4x4_dc_128_c;
  949. #if HAVE_MMX
  950. x264_predict_4x4_init_mmx( cpu, pf );
  951. #endif
  952. #if HAVE_ARMV6
  953. x264_predict_4x4_init_arm( cpu, pf );
  954. #endif
  955. #if ARCH_AARCH64
  956. x264_predict_4x4_init_aarch64( cpu, pf );
  957. #endif
  958. }