pixel.c 57 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525
  1. /*****************************************************************************
  2. * pixel.c: pixel metrics
  3. *****************************************************************************
  4. * Copyright (C) 2003-2018 x264 project
  5. *
  6. * Authors: Loren Merritt <lorenm@u.washington.edu>
  7. * Laurent Aimar <fenrir@via.ecp.fr>
  8. * Fiona Glaser <fiona@x264.com>
  9. *
  10. * This program is free software; you can redistribute it and/or modify
  11. * it under the terms of the GNU General Public License as published by
  12. * the Free Software Foundation; either version 2 of the License, or
  13. * (at your option) any later version.
  14. *
  15. * This program is distributed in the hope that it will be useful,
  16. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  17. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  18. * GNU General Public License for more details.
  19. *
  20. * You should have received a copy of the GNU General Public License
  21. * along with this program; if not, write to the Free Software
  22. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111, USA.
  23. *
  24. * This program is also available under a commercial proprietary license.
  25. * For more information, contact us at licensing@x264.com.
  26. *****************************************************************************/
  27. #include "common.h"
  28. #if HAVE_MMX
  29. # include "x86/pixel.h"
  30. # include "x86/predict.h"
  31. #endif
  32. #if ARCH_PPC
  33. # include "ppc/pixel.h"
  34. #endif
  35. #if ARCH_ARM
  36. # include "arm/pixel.h"
  37. # include "arm/predict.h"
  38. #endif
  39. #if ARCH_AARCH64
  40. # include "aarch64/pixel.h"
  41. # include "aarch64/predict.h"
  42. #endif
  43. #if ARCH_MIPS
  44. # include "mips/pixel.h"
  45. #endif
  46. /****************************************************************************
  47. * pixel_sad_WxH
  48. ****************************************************************************/
  49. #define PIXEL_SAD_C( name, lx, ly ) \
  50. static int name( pixel *pix1, intptr_t i_stride_pix1, \
  51. pixel *pix2, intptr_t i_stride_pix2 ) \
  52. { \
  53. int i_sum = 0; \
  54. for( int y = 0; y < ly; y++ ) \
  55. { \
  56. for( int x = 0; x < lx; x++ ) \
  57. { \
  58. i_sum += abs( pix1[x] - pix2[x] ); \
  59. } \
  60. pix1 += i_stride_pix1; \
  61. pix2 += i_stride_pix2; \
  62. } \
  63. return i_sum; \
  64. }
  65. PIXEL_SAD_C( x264_pixel_sad_16x16, 16, 16 )
  66. PIXEL_SAD_C( x264_pixel_sad_16x8, 16, 8 )
  67. PIXEL_SAD_C( x264_pixel_sad_8x16, 8, 16 )
  68. PIXEL_SAD_C( x264_pixel_sad_8x8, 8, 8 )
  69. PIXEL_SAD_C( x264_pixel_sad_8x4, 8, 4 )
  70. PIXEL_SAD_C( x264_pixel_sad_4x16, 4, 16 )
  71. PIXEL_SAD_C( x264_pixel_sad_4x8, 4, 8 )
  72. PIXEL_SAD_C( x264_pixel_sad_4x4, 4, 4 )
  73. /****************************************************************************
  74. * pixel_ssd_WxH
  75. ****************************************************************************/
  76. #define PIXEL_SSD_C( name, lx, ly ) \
  77. static int name( pixel *pix1, intptr_t i_stride_pix1, \
  78. pixel *pix2, intptr_t i_stride_pix2 ) \
  79. { \
  80. int i_sum = 0; \
  81. for( int y = 0; y < ly; y++ ) \
  82. { \
  83. for( int x = 0; x < lx; x++ ) \
  84. { \
  85. int d = pix1[x] - pix2[x]; \
  86. i_sum += d*d; \
  87. } \
  88. pix1 += i_stride_pix1; \
  89. pix2 += i_stride_pix2; \
  90. } \
  91. return i_sum; \
  92. }
  93. PIXEL_SSD_C( x264_pixel_ssd_16x16, 16, 16 )
  94. PIXEL_SSD_C( x264_pixel_ssd_16x8, 16, 8 )
  95. PIXEL_SSD_C( x264_pixel_ssd_8x16, 8, 16 )
  96. PIXEL_SSD_C( x264_pixel_ssd_8x8, 8, 8 )
  97. PIXEL_SSD_C( x264_pixel_ssd_8x4, 8, 4 )
  98. PIXEL_SSD_C( x264_pixel_ssd_4x16, 4, 16 )
  99. PIXEL_SSD_C( x264_pixel_ssd_4x8, 4, 8 )
  100. PIXEL_SSD_C( x264_pixel_ssd_4x4, 4, 4 )
  101. uint64_t x264_pixel_ssd_wxh( x264_pixel_function_t *pf, pixel *pix1, intptr_t i_pix1,
  102. pixel *pix2, intptr_t i_pix2, int i_width, int i_height )
  103. {
  104. uint64_t i_ssd = 0;
  105. int y;
  106. int align = !(((intptr_t)pix1 | (intptr_t)pix2 | i_pix1 | i_pix2) & 15);
  107. #define SSD(size) i_ssd += pf->ssd[size]( pix1 + y*i_pix1 + x, i_pix1, \
  108. pix2 + y*i_pix2 + x, i_pix2 );
  109. for( y = 0; y < i_height-15; y += 16 )
  110. {
  111. int x = 0;
  112. if( align )
  113. for( ; x < i_width-15; x += 16 )
  114. SSD(PIXEL_16x16);
  115. for( ; x < i_width-7; x += 8 )
  116. SSD(PIXEL_8x16);
  117. }
  118. if( y < i_height-7 )
  119. for( int x = 0; x < i_width-7; x += 8 )
  120. SSD(PIXEL_8x8);
  121. #undef SSD
  122. #define SSD1 { int d = pix1[y*i_pix1+x] - pix2[y*i_pix2+x]; i_ssd += d*d; }
  123. if( i_width & 7 )
  124. {
  125. for( y = 0; y < (i_height & ~7); y++ )
  126. for( int x = i_width & ~7; x < i_width; x++ )
  127. SSD1;
  128. }
  129. if( i_height & 7 )
  130. {
  131. for( y = i_height & ~7; y < i_height; y++ )
  132. for( int x = 0; x < i_width; x++ )
  133. SSD1;
  134. }
  135. #undef SSD1
  136. return i_ssd;
  137. }
  138. static void pixel_ssd_nv12_core( pixel *pixuv1, intptr_t stride1, pixel *pixuv2, intptr_t stride2,
  139. int width, int height, uint64_t *ssd_u, uint64_t *ssd_v )
  140. {
  141. *ssd_u = 0, *ssd_v = 0;
  142. for( int y = 0; y < height; y++, pixuv1+=stride1, pixuv2+=stride2 )
  143. for( int x = 0; x < width; x++ )
  144. {
  145. int du = pixuv1[2*x] - pixuv2[2*x];
  146. int dv = pixuv1[2*x+1] - pixuv2[2*x+1];
  147. *ssd_u += du*du;
  148. *ssd_v += dv*dv;
  149. }
  150. }
  151. void x264_pixel_ssd_nv12( x264_pixel_function_t *pf, pixel *pix1, intptr_t i_pix1, pixel *pix2, intptr_t i_pix2,
  152. int i_width, int i_height, uint64_t *ssd_u, uint64_t *ssd_v )
  153. {
  154. pf->ssd_nv12_core( pix1, i_pix1, pix2, i_pix2, i_width&~7, i_height, ssd_u, ssd_v );
  155. if( i_width&7 )
  156. {
  157. uint64_t tmp[2];
  158. pixel_ssd_nv12_core( pix1+(i_width&~7), i_pix1, pix2+(i_width&~7), i_pix2, i_width&7, i_height, &tmp[0], &tmp[1] );
  159. *ssd_u += tmp[0];
  160. *ssd_v += tmp[1];
  161. }
  162. }
  163. /****************************************************************************
  164. * pixel_var_wxh
  165. ****************************************************************************/
  166. #define PIXEL_VAR_C( name, w, h ) \
  167. static uint64_t name( pixel *pix, intptr_t i_stride ) \
  168. { \
  169. uint32_t sum = 0, sqr = 0; \
  170. for( int y = 0; y < h; y++ ) \
  171. { \
  172. for( int x = 0; x < w; x++ ) \
  173. { \
  174. sum += pix[x]; \
  175. sqr += pix[x] * pix[x]; \
  176. } \
  177. pix += i_stride; \
  178. } \
  179. return sum + ((uint64_t)sqr << 32); \
  180. }
  181. PIXEL_VAR_C( pixel_var_16x16, 16, 16 )
  182. PIXEL_VAR_C( pixel_var_8x16, 8, 16 )
  183. PIXEL_VAR_C( pixel_var_8x8, 8, 8 )
  184. /****************************************************************************
  185. * pixel_var2_wxh
  186. ****************************************************************************/
  187. #define PIXEL_VAR2_C( name, h, shift ) \
  188. static int name( pixel *fenc, pixel *fdec, int ssd[2] ) \
  189. { \
  190. int sum_u = 0, sum_v = 0, sqr_u = 0, sqr_v = 0; \
  191. for( int y = 0; y < h; y++ ) \
  192. { \
  193. for( int x = 0; x < 8; x++ ) \
  194. { \
  195. int diff_u = fenc[x] - fdec[x]; \
  196. int diff_v = fenc[x+FENC_STRIDE/2] - fdec[x+FDEC_STRIDE/2]; \
  197. sum_u += diff_u; \
  198. sum_v += diff_v; \
  199. sqr_u += diff_u * diff_u; \
  200. sqr_v += diff_v * diff_v; \
  201. } \
  202. fenc += FENC_STRIDE; \
  203. fdec += FDEC_STRIDE; \
  204. } \
  205. ssd[0] = sqr_u; \
  206. ssd[1] = sqr_v; \
  207. return sqr_u - ((int64_t)sum_u * sum_u >> shift) + \
  208. sqr_v - ((int64_t)sum_v * sum_v >> shift); \
  209. }
  210. PIXEL_VAR2_C( pixel_var2_8x16, 16, 7 )
  211. PIXEL_VAR2_C( pixel_var2_8x8, 8, 6 )
  212. #if BIT_DEPTH > 8
  213. typedef uint32_t sum_t;
  214. typedef uint64_t sum2_t;
  215. #else
  216. typedef uint16_t sum_t;
  217. typedef uint32_t sum2_t;
  218. #endif
  219. #define BITS_PER_SUM (8 * sizeof(sum_t))
  220. #define HADAMARD4(d0, d1, d2, d3, s0, s1, s2, s3) {\
  221. sum2_t t0 = s0 + s1;\
  222. sum2_t t1 = s0 - s1;\
  223. sum2_t t2 = s2 + s3;\
  224. sum2_t t3 = s2 - s3;\
  225. d0 = t0 + t2;\
  226. d2 = t0 - t2;\
  227. d1 = t1 + t3;\
  228. d3 = t1 - t3;\
  229. }
  230. // in: a pseudo-simd number of the form x+(y<<16)
  231. // return: abs(x)+(abs(y)<<16)
  232. static ALWAYS_INLINE sum2_t abs2( sum2_t a )
  233. {
  234. sum2_t s = ((a>>(BITS_PER_SUM-1))&(((sum2_t)1<<BITS_PER_SUM)+1))*((sum_t)-1);
  235. return (a+s)^s;
  236. }
  237. /****************************************************************************
  238. * pixel_satd_WxH: sum of 4x4 Hadamard transformed differences
  239. ****************************************************************************/
  240. static NOINLINE int x264_pixel_satd_4x4( pixel *pix1, intptr_t i_pix1, pixel *pix2, intptr_t i_pix2 )
  241. {
  242. sum2_t tmp[4][2];
  243. sum2_t a0, a1, a2, a3, b0, b1;
  244. sum2_t sum = 0;
  245. for( int i = 0; i < 4; i++, pix1 += i_pix1, pix2 += i_pix2 )
  246. {
  247. a0 = pix1[0] - pix2[0];
  248. a1 = pix1[1] - pix2[1];
  249. b0 = (a0+a1) + ((a0-a1)<<BITS_PER_SUM);
  250. a2 = pix1[2] - pix2[2];
  251. a3 = pix1[3] - pix2[3];
  252. b1 = (a2+a3) + ((a2-a3)<<BITS_PER_SUM);
  253. tmp[i][0] = b0 + b1;
  254. tmp[i][1] = b0 - b1;
  255. }
  256. for( int i = 0; i < 2; i++ )
  257. {
  258. HADAMARD4( a0, a1, a2, a3, tmp[0][i], tmp[1][i], tmp[2][i], tmp[3][i] );
  259. a0 = abs2(a0) + abs2(a1) + abs2(a2) + abs2(a3);
  260. sum += ((sum_t)a0) + (a0>>BITS_PER_SUM);
  261. }
  262. return sum >> 1;
  263. }
  264. static NOINLINE int x264_pixel_satd_8x4( pixel *pix1, intptr_t i_pix1, pixel *pix2, intptr_t i_pix2 )
  265. {
  266. sum2_t tmp[4][4];
  267. sum2_t a0, a1, a2, a3;
  268. sum2_t sum = 0;
  269. for( int i = 0; i < 4; i++, pix1 += i_pix1, pix2 += i_pix2 )
  270. {
  271. a0 = (pix1[0] - pix2[0]) + ((sum2_t)(pix1[4] - pix2[4]) << BITS_PER_SUM);
  272. a1 = (pix1[1] - pix2[1]) + ((sum2_t)(pix1[5] - pix2[5]) << BITS_PER_SUM);
  273. a2 = (pix1[2] - pix2[2]) + ((sum2_t)(pix1[6] - pix2[6]) << BITS_PER_SUM);
  274. a3 = (pix1[3] - pix2[3]) + ((sum2_t)(pix1[7] - pix2[7]) << BITS_PER_SUM);
  275. HADAMARD4( tmp[i][0], tmp[i][1], tmp[i][2], tmp[i][3], a0,a1,a2,a3 );
  276. }
  277. for( int i = 0; i < 4; i++ )
  278. {
  279. HADAMARD4( a0, a1, a2, a3, tmp[0][i], tmp[1][i], tmp[2][i], tmp[3][i] );
  280. sum += abs2(a0) + abs2(a1) + abs2(a2) + abs2(a3);
  281. }
  282. return (((sum_t)sum) + (sum>>BITS_PER_SUM)) >> 1;
  283. }
  284. #define PIXEL_SATD_C( w, h, sub )\
  285. static int x264_pixel_satd_##w##x##h( pixel *pix1, intptr_t i_pix1, pixel *pix2, intptr_t i_pix2 )\
  286. {\
  287. int sum = sub( pix1, i_pix1, pix2, i_pix2 )\
  288. + sub( pix1+4*i_pix1, i_pix1, pix2+4*i_pix2, i_pix2 );\
  289. if( w==16 )\
  290. sum+= sub( pix1+8, i_pix1, pix2+8, i_pix2 )\
  291. + sub( pix1+8+4*i_pix1, i_pix1, pix2+8+4*i_pix2, i_pix2 );\
  292. if( h==16 )\
  293. sum+= sub( pix1+8*i_pix1, i_pix1, pix2+8*i_pix2, i_pix2 )\
  294. + sub( pix1+12*i_pix1, i_pix1, pix2+12*i_pix2, i_pix2 );\
  295. if( w==16 && h==16 )\
  296. sum+= sub( pix1+8+8*i_pix1, i_pix1, pix2+8+8*i_pix2, i_pix2 )\
  297. + sub( pix1+8+12*i_pix1, i_pix1, pix2+8+12*i_pix2, i_pix2 );\
  298. return sum;\
  299. }
  300. PIXEL_SATD_C( 16, 16, x264_pixel_satd_8x4 )
  301. PIXEL_SATD_C( 16, 8, x264_pixel_satd_8x4 )
  302. PIXEL_SATD_C( 8, 16, x264_pixel_satd_8x4 )
  303. PIXEL_SATD_C( 8, 8, x264_pixel_satd_8x4 )
  304. PIXEL_SATD_C( 4, 16, x264_pixel_satd_4x4 )
  305. PIXEL_SATD_C( 4, 8, x264_pixel_satd_4x4 )
  306. static NOINLINE int sa8d_8x8( pixel *pix1, intptr_t i_pix1, pixel *pix2, intptr_t i_pix2 )
  307. {
  308. sum2_t tmp[8][4];
  309. sum2_t a0, a1, a2, a3, a4, a5, a6, a7, b0, b1, b2, b3;
  310. sum2_t sum = 0;
  311. for( int i = 0; i < 8; i++, pix1 += i_pix1, pix2 += i_pix2 )
  312. {
  313. a0 = pix1[0] - pix2[0];
  314. a1 = pix1[1] - pix2[1];
  315. b0 = (a0+a1) + ((a0-a1)<<BITS_PER_SUM);
  316. a2 = pix1[2] - pix2[2];
  317. a3 = pix1[3] - pix2[3];
  318. b1 = (a2+a3) + ((a2-a3)<<BITS_PER_SUM);
  319. a4 = pix1[4] - pix2[4];
  320. a5 = pix1[5] - pix2[5];
  321. b2 = (a4+a5) + ((a4-a5)<<BITS_PER_SUM);
  322. a6 = pix1[6] - pix2[6];
  323. a7 = pix1[7] - pix2[7];
  324. b3 = (a6+a7) + ((a6-a7)<<BITS_PER_SUM);
  325. HADAMARD4( tmp[i][0], tmp[i][1], tmp[i][2], tmp[i][3], b0,b1,b2,b3 );
  326. }
  327. for( int i = 0; i < 4; i++ )
  328. {
  329. HADAMARD4( a0, a1, a2, a3, tmp[0][i], tmp[1][i], tmp[2][i], tmp[3][i] );
  330. HADAMARD4( a4, a5, a6, a7, tmp[4][i], tmp[5][i], tmp[6][i], tmp[7][i] );
  331. b0 = abs2(a0+a4) + abs2(a0-a4);
  332. b0 += abs2(a1+a5) + abs2(a1-a5);
  333. b0 += abs2(a2+a6) + abs2(a2-a6);
  334. b0 += abs2(a3+a7) + abs2(a3-a7);
  335. sum += (sum_t)b0 + (b0>>BITS_PER_SUM);
  336. }
  337. return sum;
  338. }
  339. static int x264_pixel_sa8d_8x8( pixel *pix1, intptr_t i_pix1, pixel *pix2, intptr_t i_pix2 )
  340. {
  341. int sum = sa8d_8x8( pix1, i_pix1, pix2, i_pix2 );
  342. return (sum+2)>>2;
  343. }
  344. static int x264_pixel_sa8d_16x16( pixel *pix1, intptr_t i_pix1, pixel *pix2, intptr_t i_pix2 )
  345. {
  346. int sum = sa8d_8x8( pix1, i_pix1, pix2, i_pix2 )
  347. + sa8d_8x8( pix1+8, i_pix1, pix2+8, i_pix2 )
  348. + sa8d_8x8( pix1+8*i_pix1, i_pix1, pix2+8*i_pix2, i_pix2 )
  349. + sa8d_8x8( pix1+8+8*i_pix1, i_pix1, pix2+8+8*i_pix2, i_pix2 );
  350. return (sum+2)>>2;
  351. }
  352. static NOINLINE uint64_t pixel_hadamard_ac( pixel *pix, intptr_t stride )
  353. {
  354. sum2_t tmp[32];
  355. sum2_t a0, a1, a2, a3, dc;
  356. sum2_t sum4 = 0, sum8 = 0;
  357. for( int i = 0; i < 8; i++, pix+=stride )
  358. {
  359. sum2_t *t = tmp + (i&3) + (i&4)*4;
  360. a0 = (pix[0]+pix[1]) + ((sum2_t)(pix[0]-pix[1])<<BITS_PER_SUM);
  361. a1 = (pix[2]+pix[3]) + ((sum2_t)(pix[2]-pix[3])<<BITS_PER_SUM);
  362. t[0] = a0 + a1;
  363. t[4] = a0 - a1;
  364. a2 = (pix[4]+pix[5]) + ((sum2_t)(pix[4]-pix[5])<<BITS_PER_SUM);
  365. a3 = (pix[6]+pix[7]) + ((sum2_t)(pix[6]-pix[7])<<BITS_PER_SUM);
  366. t[8] = a2 + a3;
  367. t[12] = a2 - a3;
  368. }
  369. for( int i = 0; i < 8; i++ )
  370. {
  371. HADAMARD4( a0, a1, a2, a3, tmp[i*4+0], tmp[i*4+1], tmp[i*4+2], tmp[i*4+3] );
  372. tmp[i*4+0] = a0;
  373. tmp[i*4+1] = a1;
  374. tmp[i*4+2] = a2;
  375. tmp[i*4+3] = a3;
  376. sum4 += abs2(a0) + abs2(a1) + abs2(a2) + abs2(a3);
  377. }
  378. for( int i = 0; i < 8; i++ )
  379. {
  380. HADAMARD4( a0,a1,a2,a3, tmp[i], tmp[8+i], tmp[16+i], tmp[24+i] );
  381. sum8 += abs2(a0) + abs2(a1) + abs2(a2) + abs2(a3);
  382. }
  383. dc = (sum_t)(tmp[0] + tmp[8] + tmp[16] + tmp[24]);
  384. sum4 = (sum_t)sum4 + (sum4>>BITS_PER_SUM) - dc;
  385. sum8 = (sum_t)sum8 + (sum8>>BITS_PER_SUM) - dc;
  386. return ((uint64_t)sum8<<32) + sum4;
  387. }
  388. #define HADAMARD_AC(w,h) \
  389. static uint64_t x264_pixel_hadamard_ac_##w##x##h( pixel *pix, intptr_t stride )\
  390. {\
  391. uint64_t sum = pixel_hadamard_ac( pix, stride );\
  392. if( w==16 )\
  393. sum += pixel_hadamard_ac( pix+8, stride );\
  394. if( h==16 )\
  395. sum += pixel_hadamard_ac( pix+8*stride, stride );\
  396. if( w==16 && h==16 )\
  397. sum += pixel_hadamard_ac( pix+8*stride+8, stride );\
  398. return ((sum>>34)<<32) + ((uint32_t)sum>>1);\
  399. }
  400. HADAMARD_AC( 16, 16 )
  401. HADAMARD_AC( 16, 8 )
  402. HADAMARD_AC( 8, 16 )
  403. HADAMARD_AC( 8, 8 )
  404. /****************************************************************************
  405. * pixel_sad_x4
  406. ****************************************************************************/
  407. #define SAD_X( size ) \
  408. static void x264_pixel_sad_x3_##size( pixel *fenc, pixel *pix0, pixel *pix1, pixel *pix2,\
  409. intptr_t i_stride, int scores[3] )\
  410. {\
  411. scores[0] = x264_pixel_sad_##size( fenc, FENC_STRIDE, pix0, i_stride );\
  412. scores[1] = x264_pixel_sad_##size( fenc, FENC_STRIDE, pix1, i_stride );\
  413. scores[2] = x264_pixel_sad_##size( fenc, FENC_STRIDE, pix2, i_stride );\
  414. }\
  415. static void x264_pixel_sad_x4_##size( pixel *fenc, pixel *pix0, pixel *pix1,pixel *pix2, pixel *pix3,\
  416. intptr_t i_stride, int scores[4] )\
  417. {\
  418. scores[0] = x264_pixel_sad_##size( fenc, FENC_STRIDE, pix0, i_stride );\
  419. scores[1] = x264_pixel_sad_##size( fenc, FENC_STRIDE, pix1, i_stride );\
  420. scores[2] = x264_pixel_sad_##size( fenc, FENC_STRIDE, pix2, i_stride );\
  421. scores[3] = x264_pixel_sad_##size( fenc, FENC_STRIDE, pix3, i_stride );\
  422. }
  423. SAD_X( 16x16 )
  424. SAD_X( 16x8 )
  425. SAD_X( 8x16 )
  426. SAD_X( 8x8 )
  427. SAD_X( 8x4 )
  428. SAD_X( 4x8 )
  429. SAD_X( 4x4 )
  430. /****************************************************************************
  431. * pixel_satd_x4
  432. * no faster than single satd, but needed for satd to be a drop-in replacement for sad
  433. ****************************************************************************/
  434. #define SATD_X( size, cpu ) \
  435. static void x264_pixel_satd_x3_##size##cpu( pixel *fenc, pixel *pix0, pixel *pix1, pixel *pix2,\
  436. intptr_t i_stride, int scores[3] )\
  437. {\
  438. scores[0] = x264_pixel_satd_##size##cpu( fenc, FENC_STRIDE, pix0, i_stride );\
  439. scores[1] = x264_pixel_satd_##size##cpu( fenc, FENC_STRIDE, pix1, i_stride );\
  440. scores[2] = x264_pixel_satd_##size##cpu( fenc, FENC_STRIDE, pix2, i_stride );\
  441. }\
  442. static void x264_pixel_satd_x4_##size##cpu( pixel *fenc, pixel *pix0, pixel *pix1, pixel *pix2, pixel *pix3,\
  443. intptr_t i_stride, int scores[4] )\
  444. {\
  445. scores[0] = x264_pixel_satd_##size##cpu( fenc, FENC_STRIDE, pix0, i_stride );\
  446. scores[1] = x264_pixel_satd_##size##cpu( fenc, FENC_STRIDE, pix1, i_stride );\
  447. scores[2] = x264_pixel_satd_##size##cpu( fenc, FENC_STRIDE, pix2, i_stride );\
  448. scores[3] = x264_pixel_satd_##size##cpu( fenc, FENC_STRIDE, pix3, i_stride );\
  449. }
  450. #define SATD_X_DECL6( cpu )\
  451. SATD_X( 16x16, cpu )\
  452. SATD_X( 16x8, cpu )\
  453. SATD_X( 8x16, cpu )\
  454. SATD_X( 8x8, cpu )\
  455. SATD_X( 8x4, cpu )\
  456. SATD_X( 4x8, cpu )
  457. #define SATD_X_DECL7( cpu )\
  458. SATD_X_DECL6( cpu )\
  459. SATD_X( 4x4, cpu )
  460. SATD_X_DECL7()
  461. #if HAVE_MMX
  462. SATD_X_DECL7( _mmx2 )
  463. #if !HIGH_BIT_DEPTH
  464. SATD_X_DECL6( _sse2 )
  465. SATD_X_DECL7( _ssse3 )
  466. SATD_X_DECL6( _ssse3_atom )
  467. SATD_X_DECL7( _sse4 )
  468. SATD_X_DECL7( _avx )
  469. SATD_X_DECL7( _xop )
  470. SATD_X_DECL7( _avx512 )
  471. #endif // !HIGH_BIT_DEPTH
  472. #endif
  473. #if !HIGH_BIT_DEPTH
  474. #if HAVE_ARMV6 || ARCH_AARCH64
  475. SATD_X_DECL7( _neon )
  476. #endif
  477. #endif // !HIGH_BIT_DEPTH
  478. #define INTRA_MBCMP_8x8( mbcmp, cpu, cpu2 )\
  479. static void intra_##mbcmp##_x3_8x8##cpu( pixel *fenc, pixel edge[36], int res[3] )\
  480. {\
  481. ALIGNED_ARRAY_16( pixel, pix, [8*FDEC_STRIDE] );\
  482. x264_predict_8x8_v##cpu2( pix, edge );\
  483. res[0] = x264_pixel_##mbcmp##_8x8##cpu( pix, FDEC_STRIDE, fenc, FENC_STRIDE );\
  484. x264_predict_8x8_h##cpu2( pix, edge );\
  485. res[1] = x264_pixel_##mbcmp##_8x8##cpu( pix, FDEC_STRIDE, fenc, FENC_STRIDE );\
  486. x264_predict_8x8_dc##cpu2( pix, edge );\
  487. res[2] = x264_pixel_##mbcmp##_8x8##cpu( pix, FDEC_STRIDE, fenc, FENC_STRIDE );\
  488. }
  489. INTRA_MBCMP_8x8( sad,, _c )
  490. INTRA_MBCMP_8x8(sa8d,, _c )
  491. #if HIGH_BIT_DEPTH && HAVE_MMX
  492. #define x264_predict_8x8_v_sse2 x264_predict_8x8_v_sse
  493. INTRA_MBCMP_8x8( sad, _mmx2, _c )
  494. INTRA_MBCMP_8x8(sa8d, _sse2, _sse2 )
  495. #endif
  496. #if !HIGH_BIT_DEPTH && (HAVE_ARMV6 || ARCH_AARCH64)
  497. INTRA_MBCMP_8x8( sad, _neon, _neon )
  498. INTRA_MBCMP_8x8(sa8d, _neon, _neon )
  499. #endif
  500. #define INTRA_MBCMP( mbcmp, size, pred1, pred2, pred3, chroma, cpu, cpu2 )\
  501. static void intra_##mbcmp##_x3_##size##chroma##cpu( pixel *fenc, pixel *fdec, int res[3] )\
  502. {\
  503. x264_predict_##size##chroma##_##pred1##cpu2( fdec );\
  504. res[0] = x264_pixel_##mbcmp##_##size##cpu( fdec, FDEC_STRIDE, fenc, FENC_STRIDE );\
  505. x264_predict_##size##chroma##_##pred2##cpu2( fdec );\
  506. res[1] = x264_pixel_##mbcmp##_##size##cpu( fdec, FDEC_STRIDE, fenc, FENC_STRIDE );\
  507. x264_predict_##size##chroma##_##pred3##cpu2( fdec );\
  508. res[2] = x264_pixel_##mbcmp##_##size##cpu( fdec, FDEC_STRIDE, fenc, FENC_STRIDE );\
  509. }
  510. INTRA_MBCMP( sad, 4x4, v, h, dc, ,, _c )
  511. INTRA_MBCMP(satd, 4x4, v, h, dc, ,, _c )
  512. INTRA_MBCMP( sad, 8x8, dc, h, v, c,, _c )
  513. INTRA_MBCMP(satd, 8x8, dc, h, v, c,, _c )
  514. INTRA_MBCMP( sad, 8x16, dc, h, v, c,, _c )
  515. INTRA_MBCMP(satd, 8x16, dc, h, v, c,, _c )
  516. INTRA_MBCMP( sad, 16x16, v, h, dc, ,, _c )
  517. INTRA_MBCMP(satd, 16x16, v, h, dc, ,, _c )
  518. #if HAVE_MMX
  519. #if HIGH_BIT_DEPTH
  520. #define x264_predict_8x8c_v_mmx2 x264_predict_8x8c_v_mmx
  521. #define x264_predict_8x16c_v_mmx2 x264_predict_8x16c_v_c
  522. #define x264_predict_16x16_dc_mmx2 x264_predict_16x16_dc_c
  523. #define x264_predict_8x8c_v_sse2 x264_predict_8x8c_v_sse
  524. #define x264_predict_8x16c_v_sse2 x264_predict_8x16c_v_sse
  525. #define x264_predict_16x16_v_sse2 x264_predict_16x16_v_sse
  526. INTRA_MBCMP( sad, 4x4, v, h, dc, , _mmx2, _c )
  527. INTRA_MBCMP( sad, 8x8, dc, h, v, c, _mmx2, _mmx2 )
  528. INTRA_MBCMP( sad, 8x16, dc, h, v, c, _mmx2, _mmx2 )
  529. INTRA_MBCMP(satd, 8x16, dc, h, v, c, _mmx2, _mmx2 )
  530. INTRA_MBCMP( sad, 16x16, v, h, dc, , _mmx2, _mmx2 )
  531. INTRA_MBCMP( sad, 8x8, dc, h, v, c, _sse2, _sse2 )
  532. INTRA_MBCMP( sad, 8x16, dc, h, v, c, _sse2, _sse2 )
  533. INTRA_MBCMP(satd, 8x16, dc, h, v, c, _sse2, _sse2 )
  534. INTRA_MBCMP( sad, 16x16, v, h, dc, , _sse2, _sse2 )
  535. INTRA_MBCMP( sad, 8x8, dc, h, v, c, _ssse3, _sse2 )
  536. INTRA_MBCMP( sad, 8x16, dc, h, v, c, _ssse3, _sse2 )
  537. INTRA_MBCMP(satd, 8x16, dc, h, v, c, _ssse3, _sse2 )
  538. INTRA_MBCMP( sad, 16x16, v, h, dc, , _ssse3, _sse2 )
  539. INTRA_MBCMP(satd, 8x16, dc, h, v, c, _sse4, _sse2 )
  540. INTRA_MBCMP(satd, 8x16, dc, h, v, c, _avx, _sse2 )
  541. #else
  542. #define x264_predict_8x16c_v_mmx2 x264_predict_8x16c_v_mmx
  543. INTRA_MBCMP( sad, 8x16, dc, h, v, c, _mmx2, _mmx2 )
  544. INTRA_MBCMP(satd, 8x16, dc, h, v, c, _mmx2, _mmx2 )
  545. INTRA_MBCMP( sad, 8x16, dc, h, v, c, _sse2, _mmx2 )
  546. INTRA_MBCMP(satd, 8x16, dc, h, v, c, _sse2, _mmx2 )
  547. INTRA_MBCMP(satd, 8x16, dc, h, v, c, _ssse3, _mmx2 )
  548. INTRA_MBCMP(satd, 8x16, dc, h, v, c, _sse4, _mmx2 )
  549. INTRA_MBCMP(satd, 8x16, dc, h, v, c, _avx, _mmx2 )
  550. INTRA_MBCMP(satd, 8x16, dc, h, v, c, _xop, _mmx2 )
  551. #endif
  552. #endif
  553. #if !HIGH_BIT_DEPTH && HAVE_ARMV6
  554. INTRA_MBCMP( sad, 4x4, v, h, dc, , _neon, _armv6 )
  555. INTRA_MBCMP(satd, 4x4, v, h, dc, , _neon, _armv6 )
  556. INTRA_MBCMP( sad, 8x8, dc, h, v, c, _neon, _neon )
  557. INTRA_MBCMP(satd, 8x8, dc, h, v, c, _neon, _neon )
  558. INTRA_MBCMP( sad, 8x16, dc, h, v, c, _neon, _c )
  559. INTRA_MBCMP(satd, 8x16, dc, h, v, c, _neon, _c )
  560. INTRA_MBCMP( sad, 16x16, v, h, dc, , _neon, _neon )
  561. INTRA_MBCMP(satd, 16x16, v, h, dc, , _neon, _neon )
  562. #endif
  563. #if !HIGH_BIT_DEPTH && ARCH_AARCH64
  564. INTRA_MBCMP( sad, 4x4, v, h, dc, , _neon, _neon )
  565. INTRA_MBCMP(satd, 4x4, v, h, dc, , _neon, _neon )
  566. INTRA_MBCMP( sad, 8x8, dc, h, v, c, _neon, _neon )
  567. INTRA_MBCMP(satd, 8x8, dc, h, v, c, _neon, _neon )
  568. INTRA_MBCMP( sad, 8x16, dc, h, v, c, _neon, _neon )
  569. INTRA_MBCMP(satd, 8x16, dc, h, v, c, _neon, _neon )
  570. INTRA_MBCMP( sad, 16x16, v, h, dc, , _neon, _neon )
  571. INTRA_MBCMP(satd, 16x16, v, h, dc, , _neon, _neon )
  572. #endif
  573. // No C implementation of intra_satd_x9. See checkasm for its behavior,
  574. // or see mb_analyse_intra for the entirely different algorithm we
  575. // use when lacking an asm implementation of it.
  576. /****************************************************************************
  577. * structural similarity metric
  578. ****************************************************************************/
  579. static void ssim_4x4x2_core( const pixel *pix1, intptr_t stride1,
  580. const pixel *pix2, intptr_t stride2,
  581. int sums[2][4] )
  582. {
  583. for( int z = 0; z < 2; z++ )
  584. {
  585. uint32_t s1 = 0, s2 = 0, ss = 0, s12 = 0;
  586. for( int y = 0; y < 4; y++ )
  587. for( int x = 0; x < 4; x++ )
  588. {
  589. int a = pix1[x+y*stride1];
  590. int b = pix2[x+y*stride2];
  591. s1 += a;
  592. s2 += b;
  593. ss += a*a;
  594. ss += b*b;
  595. s12 += a*b;
  596. }
  597. sums[z][0] = s1;
  598. sums[z][1] = s2;
  599. sums[z][2] = ss;
  600. sums[z][3] = s12;
  601. pix1 += 4;
  602. pix2 += 4;
  603. }
  604. }
  605. static float ssim_end1( int s1, int s2, int ss, int s12 )
  606. {
  607. /* Maximum value for 10-bit is: ss*64 = (2^10-1)^2*16*4*64 = 4286582784, which will overflow in some cases.
  608. * s1*s1, s2*s2, and s1*s2 also obtain this value for edge cases: ((2^10-1)*16*4)^2 = 4286582784.
  609. * Maximum value for 9-bit is: ss*64 = (2^9-1)^2*16*4*64 = 1069551616, which will not overflow. */
  610. #if BIT_DEPTH > 9
  611. #define type float
  612. static const float ssim_c1 = .01*.01*PIXEL_MAX*PIXEL_MAX*64;
  613. static const float ssim_c2 = .03*.03*PIXEL_MAX*PIXEL_MAX*64*63;
  614. #else
  615. #define type int
  616. static const int ssim_c1 = (int)(.01*.01*PIXEL_MAX*PIXEL_MAX*64 + .5);
  617. static const int ssim_c2 = (int)(.03*.03*PIXEL_MAX*PIXEL_MAX*64*63 + .5);
  618. #endif
  619. type fs1 = s1;
  620. type fs2 = s2;
  621. type fss = ss;
  622. type fs12 = s12;
  623. type vars = fss*64 - fs1*fs1 - fs2*fs2;
  624. type covar = fs12*64 - fs1*fs2;
  625. return (float)(2*fs1*fs2 + ssim_c1) * (float)(2*covar + ssim_c2)
  626. / ((float)(fs1*fs1 + fs2*fs2 + ssim_c1) * (float)(vars + ssim_c2));
  627. #undef type
  628. }
  629. static float ssim_end4( int sum0[5][4], int sum1[5][4], int width )
  630. {
  631. float ssim = 0.0;
  632. for( int i = 0; i < width; i++ )
  633. ssim += ssim_end1( sum0[i][0] + sum0[i+1][0] + sum1[i][0] + sum1[i+1][0],
  634. sum0[i][1] + sum0[i+1][1] + sum1[i][1] + sum1[i+1][1],
  635. sum0[i][2] + sum0[i+1][2] + sum1[i][2] + sum1[i+1][2],
  636. sum0[i][3] + sum0[i+1][3] + sum1[i][3] + sum1[i+1][3] );
  637. return ssim;
  638. }
  639. float x264_pixel_ssim_wxh( x264_pixel_function_t *pf,
  640. pixel *pix1, intptr_t stride1,
  641. pixel *pix2, intptr_t stride2,
  642. int width, int height, void *buf, int *cnt )
  643. {
  644. int z = 0;
  645. float ssim = 0.0;
  646. int (*sum0)[4] = buf;
  647. int (*sum1)[4] = sum0 + (width >> 2) + 3;
  648. width >>= 2;
  649. height >>= 2;
  650. for( int y = 1; y < height; y++ )
  651. {
  652. for( ; z <= y; z++ )
  653. {
  654. XCHG( void*, sum0, sum1 );
  655. for( int x = 0; x < width; x+=2 )
  656. pf->ssim_4x4x2_core( &pix1[4*(x+z*stride1)], stride1, &pix2[4*(x+z*stride2)], stride2, &sum0[x] );
  657. }
  658. for( int x = 0; x < width-1; x += 4 )
  659. ssim += pf->ssim_end4( sum0+x, sum1+x, X264_MIN(4,width-x-1) );
  660. }
  661. *cnt = (height-1) * (width-1);
  662. return ssim;
  663. }
  664. static int pixel_vsad( pixel *src, intptr_t stride, int height )
  665. {
  666. int score = 0;
  667. for( int i = 1; i < height; i++, src += stride )
  668. for( int j = 0; j < 16; j++ )
  669. score += abs(src[j] - src[j+stride]);
  670. return score;
  671. }
  672. int x264_field_vsad( x264_t *h, int mb_x, int mb_y )
  673. {
  674. int score_field, score_frame;
  675. int stride = h->fenc->i_stride[0];
  676. int mb_stride = h->mb.i_mb_stride;
  677. pixel *fenc = h->fenc->plane[0] + 16 * (mb_x + mb_y * stride);
  678. int mb_xy = mb_x + mb_y*mb_stride;
  679. /* We don't want to analyze pixels outside the frame, as it gives inaccurate results. */
  680. int mbpair_height = X264_MIN( h->param.i_height - mb_y * 16, 32 );
  681. score_frame = h->pixf.vsad( fenc, stride, mbpair_height );
  682. score_field = h->pixf.vsad( fenc, stride*2, mbpair_height >> 1 );
  683. score_field += h->pixf.vsad( fenc+stride, stride*2, mbpair_height >> 1 );
  684. if( mb_x > 0 )
  685. score_field += 512 - h->mb.field[mb_xy -1]*1024;
  686. if( mb_y > 0 )
  687. score_field += 512 - h->mb.field[mb_xy-mb_stride]*1024;
  688. return (score_field < score_frame);
  689. }
  690. static int pixel_asd8( pixel *pix1, intptr_t stride1, pixel *pix2, intptr_t stride2, int height )
  691. {
  692. int sum = 0;
  693. for( int y = 0; y < height; y++, pix1 += stride1, pix2 += stride2 )
  694. for( int x = 0; x < 8; x++ )
  695. sum += pix1[x] - pix2[x];
  696. return abs( sum );
  697. }
  698. /****************************************************************************
  699. * successive elimination
  700. ****************************************************************************/
  701. static int x264_pixel_ads4( int enc_dc[4], uint16_t *sums, int delta,
  702. uint16_t *cost_mvx, int16_t *mvs, int width, int thresh )
  703. {
  704. int nmv = 0;
  705. for( int i = 0; i < width; i++, sums++ )
  706. {
  707. int ads = abs( enc_dc[0] - sums[0] )
  708. + abs( enc_dc[1] - sums[8] )
  709. + abs( enc_dc[2] - sums[delta] )
  710. + abs( enc_dc[3] - sums[delta+8] )
  711. + cost_mvx[i];
  712. if( ads < thresh )
  713. mvs[nmv++] = i;
  714. }
  715. return nmv;
  716. }
  717. static int x264_pixel_ads2( int enc_dc[2], uint16_t *sums, int delta,
  718. uint16_t *cost_mvx, int16_t *mvs, int width, int thresh )
  719. {
  720. int nmv = 0;
  721. for( int i = 0; i < width; i++, sums++ )
  722. {
  723. int ads = abs( enc_dc[0] - sums[0] )
  724. + abs( enc_dc[1] - sums[delta] )
  725. + cost_mvx[i];
  726. if( ads < thresh )
  727. mvs[nmv++] = i;
  728. }
  729. return nmv;
  730. }
  731. static int x264_pixel_ads1( int enc_dc[1], uint16_t *sums, int delta,
  732. uint16_t *cost_mvx, int16_t *mvs, int width, int thresh )
  733. {
  734. int nmv = 0;
  735. for( int i = 0; i<width; i++, sums++ )
  736. {
  737. int ads = abs( enc_dc[0] - sums[0] )
  738. + cost_mvx[i];
  739. if( ads < thresh )
  740. mvs[nmv++] = i;
  741. }
  742. return nmv;
  743. }
  744. /****************************************************************************
  745. * x264_pixel_init:
  746. ****************************************************************************/
  747. void x264_pixel_init( int cpu, x264_pixel_function_t *pixf )
  748. {
  749. memset( pixf, 0, sizeof(*pixf) );
  750. #define INIT2_NAME( name1, name2, cpu ) \
  751. pixf->name1[PIXEL_16x16] = x264_pixel_##name2##_16x16##cpu;\
  752. pixf->name1[PIXEL_16x8] = x264_pixel_##name2##_16x8##cpu;
  753. #define INIT4_NAME( name1, name2, cpu ) \
  754. INIT2_NAME( name1, name2, cpu ) \
  755. pixf->name1[PIXEL_8x16] = x264_pixel_##name2##_8x16##cpu;\
  756. pixf->name1[PIXEL_8x8] = x264_pixel_##name2##_8x8##cpu;
  757. #define INIT5_NAME( name1, name2, cpu ) \
  758. INIT4_NAME( name1, name2, cpu ) \
  759. pixf->name1[PIXEL_8x4] = x264_pixel_##name2##_8x4##cpu;
  760. #define INIT6_NAME( name1, name2, cpu ) \
  761. INIT5_NAME( name1, name2, cpu ) \
  762. pixf->name1[PIXEL_4x8] = x264_pixel_##name2##_4x8##cpu;
  763. #define INIT7_NAME( name1, name2, cpu ) \
  764. INIT6_NAME( name1, name2, cpu ) \
  765. pixf->name1[PIXEL_4x4] = x264_pixel_##name2##_4x4##cpu;
  766. #define INIT8_NAME( name1, name2, cpu ) \
  767. INIT7_NAME( name1, name2, cpu ) \
  768. pixf->name1[PIXEL_4x16] = x264_pixel_##name2##_4x16##cpu;
  769. #define INIT2( name, cpu ) INIT2_NAME( name, name, cpu )
  770. #define INIT4( name, cpu ) INIT4_NAME( name, name, cpu )
  771. #define INIT5( name, cpu ) INIT5_NAME( name, name, cpu )
  772. #define INIT6( name, cpu ) INIT6_NAME( name, name, cpu )
  773. #define INIT7( name, cpu ) INIT7_NAME( name, name, cpu )
  774. #define INIT8( name, cpu ) INIT8_NAME( name, name, cpu )
  775. #define INIT_ADS( cpu ) \
  776. pixf->ads[PIXEL_16x16] = x264_pixel_ads4##cpu;\
  777. pixf->ads[PIXEL_16x8] = x264_pixel_ads2##cpu;\
  778. pixf->ads[PIXEL_8x8] = x264_pixel_ads1##cpu;
  779. INIT8( sad, );
  780. INIT8_NAME( sad_aligned, sad, );
  781. INIT7( sad_x3, );
  782. INIT7( sad_x4, );
  783. INIT8( ssd, );
  784. INIT8( satd, );
  785. INIT7( satd_x3, );
  786. INIT7( satd_x4, );
  787. INIT4( hadamard_ac, );
  788. INIT_ADS( );
  789. pixf->sa8d[PIXEL_16x16] = x264_pixel_sa8d_16x16;
  790. pixf->sa8d[PIXEL_8x8] = x264_pixel_sa8d_8x8;
  791. pixf->var[PIXEL_16x16] = pixel_var_16x16;
  792. pixf->var[PIXEL_8x16] = pixel_var_8x16;
  793. pixf->var[PIXEL_8x8] = pixel_var_8x8;
  794. pixf->var2[PIXEL_8x16] = pixel_var2_8x16;
  795. pixf->var2[PIXEL_8x8] = pixel_var2_8x8;
  796. pixf->ssd_nv12_core = pixel_ssd_nv12_core;
  797. pixf->ssim_4x4x2_core = ssim_4x4x2_core;
  798. pixf->ssim_end4 = ssim_end4;
  799. pixf->vsad = pixel_vsad;
  800. pixf->asd8 = pixel_asd8;
  801. pixf->intra_sad_x3_4x4 = intra_sad_x3_4x4;
  802. pixf->intra_satd_x3_4x4 = intra_satd_x3_4x4;
  803. pixf->intra_sad_x3_8x8 = intra_sad_x3_8x8;
  804. pixf->intra_sa8d_x3_8x8 = intra_sa8d_x3_8x8;
  805. pixf->intra_sad_x3_8x8c = intra_sad_x3_8x8c;
  806. pixf->intra_satd_x3_8x8c = intra_satd_x3_8x8c;
  807. pixf->intra_sad_x3_8x16c = intra_sad_x3_8x16c;
  808. pixf->intra_satd_x3_8x16c = intra_satd_x3_8x16c;
  809. pixf->intra_sad_x3_16x16 = intra_sad_x3_16x16;
  810. pixf->intra_satd_x3_16x16 = intra_satd_x3_16x16;
  811. #if HIGH_BIT_DEPTH
  812. #if HAVE_MMX
  813. if( cpu&X264_CPU_MMX2 )
  814. {
  815. INIT7( sad, _mmx2 );
  816. INIT7_NAME( sad_aligned, sad, _mmx2 );
  817. INIT7( sad_x3, _mmx2 );
  818. INIT7( sad_x4, _mmx2 );
  819. INIT8( satd, _mmx2 );
  820. INIT7( satd_x3, _mmx2 );
  821. INIT7( satd_x4, _mmx2 );
  822. INIT4( hadamard_ac, _mmx2 );
  823. INIT8( ssd, _mmx2 );
  824. INIT_ADS( _mmx2 );
  825. pixf->intra_sad_x3_4x4 = intra_sad_x3_4x4_mmx2;
  826. pixf->intra_satd_x3_4x4 = x264_intra_satd_x3_4x4_mmx2;
  827. pixf->intra_sad_x3_8x8 = intra_sad_x3_8x8_mmx2;
  828. pixf->intra_sad_x3_8x8c = intra_sad_x3_8x8c_mmx2;
  829. pixf->intra_satd_x3_8x8c = x264_intra_satd_x3_8x8c_mmx2;
  830. pixf->intra_sad_x3_8x16c = intra_sad_x3_8x16c_mmx2;
  831. pixf->intra_satd_x3_8x16c = intra_satd_x3_8x16c_mmx2;
  832. pixf->intra_sad_x3_16x16 = intra_sad_x3_16x16_mmx2;
  833. pixf->intra_satd_x3_16x16 = x264_intra_satd_x3_16x16_mmx2;
  834. }
  835. if( cpu&X264_CPU_SSE2 )
  836. {
  837. INIT4_NAME( sad_aligned, sad, _sse2_aligned );
  838. INIT5( ssd, _sse2 );
  839. INIT6( satd, _sse2 );
  840. pixf->satd[PIXEL_4x16] = x264_pixel_satd_4x16_sse2;
  841. pixf->ssd_nv12_core = x264_pixel_ssd_nv12_core_sse2;
  842. pixf->ssim_4x4x2_core = x264_pixel_ssim_4x4x2_core_sse2;
  843. pixf->ssim_end4 = x264_pixel_ssim_end4_sse2;
  844. pixf->var[PIXEL_16x16] = x264_pixel_var_16x16_sse2;
  845. pixf->var[PIXEL_8x8] = x264_pixel_var_8x8_sse2;
  846. pixf->var2[PIXEL_8x8] = x264_pixel_var2_8x8_sse2;
  847. pixf->var2[PIXEL_8x16] = x264_pixel_var2_8x16_sse2;
  848. pixf->sa8d[PIXEL_16x16] = x264_pixel_sa8d_16x16_sse2;
  849. pixf->sa8d[PIXEL_8x8] = x264_pixel_sa8d_8x8_sse2;
  850. #if ARCH_X86_64
  851. pixf->sa8d_satd[PIXEL_16x16] = x264_pixel_sa8d_satd_16x16_sse2;
  852. #endif
  853. pixf->intra_sad_x3_4x4 = x264_intra_sad_x3_4x4_sse2;
  854. pixf->intra_sad_x3_8x8 = x264_intra_sad_x3_8x8_sse2;
  855. pixf->intra_sa8d_x3_8x8 = intra_sa8d_x3_8x8_sse2;
  856. }
  857. if( (cpu&X264_CPU_SSE2) && !(cpu&X264_CPU_SSE2_IS_SLOW) )
  858. {
  859. INIT5( sad, _sse2 );
  860. INIT2( sad_x3, _sse2 );
  861. INIT2( sad_x4, _sse2 );
  862. INIT_ADS( _sse2 );
  863. if( !(cpu&X264_CPU_STACK_MOD4) )
  864. {
  865. INIT4( hadamard_ac, _sse2 );
  866. }
  867. pixf->vsad = x264_pixel_vsad_sse2;
  868. pixf->asd8 = x264_pixel_asd8_sse2;
  869. pixf->intra_sad_x3_8x8 = x264_intra_sad_x3_8x8_sse2;
  870. pixf->intra_sad_x3_8x8c = intra_sad_x3_8x8c_sse2;
  871. pixf->intra_sad_x3_8x16c = intra_sad_x3_8x16c_sse2;
  872. pixf->intra_satd_x3_8x16c = intra_satd_x3_8x16c_sse2;
  873. pixf->intra_sad_x3_16x16 = intra_sad_x3_16x16_sse2;
  874. }
  875. if( cpu&X264_CPU_SSE2_IS_FAST )
  876. {
  877. pixf->sad[PIXEL_8x16] = x264_pixel_sad_8x16_sse2;
  878. pixf->sad_x3[PIXEL_8x16] = x264_pixel_sad_x3_8x16_sse2;
  879. pixf->sad_x3[PIXEL_8x8] = x264_pixel_sad_x3_8x8_sse2;
  880. pixf->sad_x3[PIXEL_8x4] = x264_pixel_sad_x3_8x4_sse2;
  881. pixf->sad_x4[PIXEL_8x16] = x264_pixel_sad_x4_8x16_sse2;
  882. pixf->sad_x4[PIXEL_8x8] = x264_pixel_sad_x4_8x8_sse2;
  883. pixf->sad_x4[PIXEL_8x4] = x264_pixel_sad_x4_8x4_sse2;
  884. }
  885. if( cpu&X264_CPU_SSSE3 )
  886. {
  887. INIT4_NAME( sad_aligned, sad, _ssse3_aligned );
  888. pixf->sad_aligned[PIXEL_4x4] = x264_pixel_sad_4x4_ssse3;
  889. pixf->sad_aligned[PIXEL_4x8] = x264_pixel_sad_4x8_ssse3;
  890. INIT7( sad, _ssse3 );
  891. INIT7( sad_x3, _ssse3 );
  892. INIT7( sad_x4, _ssse3 );
  893. #if ARCH_X86 || !defined( __MACH__ )
  894. INIT_ADS( _ssse3 );
  895. #endif
  896. INIT6( satd, _ssse3 );
  897. pixf->satd[PIXEL_4x16] = x264_pixel_satd_4x16_ssse3;
  898. if( !(cpu&X264_CPU_STACK_MOD4) )
  899. {
  900. INIT4( hadamard_ac, _ssse3 );
  901. }
  902. pixf->vsad = x264_pixel_vsad_ssse3;
  903. pixf->asd8 = x264_pixel_asd8_ssse3;
  904. pixf->sa8d[PIXEL_16x16]= x264_pixel_sa8d_16x16_ssse3;
  905. pixf->sa8d[PIXEL_8x8] = x264_pixel_sa8d_8x8_ssse3;
  906. #if ARCH_X86_64
  907. pixf->sa8d_satd[PIXEL_16x16] = x264_pixel_sa8d_satd_16x16_ssse3;
  908. #endif
  909. pixf->intra_sad_x3_4x4 = x264_intra_sad_x3_4x4_ssse3;
  910. pixf->intra_sad_x3_8x8 = x264_intra_sad_x3_8x8_ssse3;
  911. pixf->intra_sad_x3_8x8c = intra_sad_x3_8x8c_ssse3;
  912. pixf->intra_sad_x3_8x16c = intra_sad_x3_8x16c_ssse3;
  913. pixf->intra_satd_x3_8x16c = intra_satd_x3_8x16c_ssse3;
  914. pixf->intra_sad_x3_16x16 = intra_sad_x3_16x16_ssse3;
  915. }
  916. if( cpu&X264_CPU_SSE4 )
  917. {
  918. INIT6( satd, _sse4 );
  919. pixf->satd[PIXEL_4x16] = x264_pixel_satd_4x16_sse4;
  920. if( !(cpu&X264_CPU_STACK_MOD4) )
  921. {
  922. INIT4( hadamard_ac, _sse4 );
  923. }
  924. pixf->sa8d[PIXEL_16x16]= x264_pixel_sa8d_16x16_sse4;
  925. pixf->sa8d[PIXEL_8x8] = x264_pixel_sa8d_8x8_sse4;
  926. #if ARCH_X86_64
  927. pixf->sa8d_satd[PIXEL_16x16] = x264_pixel_sa8d_satd_16x16_sse4;
  928. #endif
  929. pixf->intra_satd_x3_8x16c = intra_satd_x3_8x16c_sse4;
  930. }
  931. if( cpu&X264_CPU_AVX )
  932. {
  933. INIT5_NAME( sad_aligned, sad, _ssse3 ); /* AVX-capable CPUs doesn't benefit from an aligned version */
  934. #if ARCH_X86 || !defined( __MACH__ )
  935. INIT_ADS( _avx );
  936. #endif
  937. INIT6( satd, _avx );
  938. pixf->satd[PIXEL_4x16] = x264_pixel_satd_4x16_avx;
  939. if( !(cpu&X264_CPU_STACK_MOD4) )
  940. {
  941. INIT4( hadamard_ac, _avx );
  942. }
  943. pixf->intra_sad_x3_4x4 = x264_intra_sad_x3_4x4_avx;
  944. pixf->sa8d[PIXEL_16x16]= x264_pixel_sa8d_16x16_avx;
  945. pixf->sa8d[PIXEL_8x8] = x264_pixel_sa8d_8x8_avx;
  946. pixf->var[PIXEL_16x16] = x264_pixel_var_16x16_avx;
  947. pixf->var[PIXEL_8x8] = x264_pixel_var_8x8_avx;
  948. pixf->ssd_nv12_core = x264_pixel_ssd_nv12_core_avx;
  949. pixf->ssim_4x4x2_core = x264_pixel_ssim_4x4x2_core_avx;
  950. pixf->ssim_end4 = x264_pixel_ssim_end4_avx;
  951. #if ARCH_X86_64
  952. pixf->sa8d_satd[PIXEL_16x16] = x264_pixel_sa8d_satd_16x16_avx;
  953. #endif
  954. pixf->intra_satd_x3_8x16c = intra_satd_x3_8x16c_avx;
  955. }
  956. if( cpu&X264_CPU_XOP )
  957. {
  958. INIT5( sad_x3, _xop );
  959. INIT5( sad_x4, _xop );
  960. pixf->ssd_nv12_core = x264_pixel_ssd_nv12_core_xop;
  961. pixf->vsad = x264_pixel_vsad_xop;
  962. pixf->asd8 = x264_pixel_asd8_xop;
  963. #if ARCH_X86_64
  964. pixf->sa8d_satd[PIXEL_16x16] = x264_pixel_sa8d_satd_16x16_xop;
  965. #endif
  966. }
  967. if( cpu&X264_CPU_AVX2 )
  968. {
  969. INIT2( ssd, _avx2 );
  970. INIT2( sad, _avx2 );
  971. INIT2_NAME( sad_aligned, sad, _avx2 );
  972. INIT2( sad_x3, _avx2 );
  973. INIT2( sad_x4, _avx2 );
  974. pixf->var[PIXEL_16x16] = x264_pixel_var_16x16_avx2;
  975. pixf->var2[PIXEL_8x8] = x264_pixel_var2_8x8_avx2;
  976. pixf->var2[PIXEL_8x16] = x264_pixel_var2_8x16_avx2;
  977. pixf->vsad = x264_pixel_vsad_avx2;
  978. pixf->ssd_nv12_core = x264_pixel_ssd_nv12_core_avx2;
  979. pixf->intra_sad_x3_8x8 = x264_intra_sad_x3_8x8_avx2;
  980. }
  981. if( cpu&X264_CPU_AVX512 )
  982. {
  983. pixf->var[PIXEL_8x16] = x264_pixel_var_8x16_avx512;
  984. pixf->var[PIXEL_16x16] = x264_pixel_var_16x16_avx512;
  985. pixf->var2[PIXEL_8x8] = x264_pixel_var2_8x8_avx512;
  986. pixf->var2[PIXEL_8x16] = x264_pixel_var2_8x16_avx512;
  987. }
  988. #endif // HAVE_MMX
  989. #else // !HIGH_BIT_DEPTH
  990. #if HAVE_MMX
  991. if( cpu&X264_CPU_MMX )
  992. {
  993. INIT8( ssd, _mmx );
  994. }
  995. if( cpu&X264_CPU_MMX2 )
  996. {
  997. INIT8( sad, _mmx2 );
  998. INIT8_NAME( sad_aligned, sad, _mmx2 );
  999. INIT7( sad_x3, _mmx2 );
  1000. INIT7( sad_x4, _mmx2 );
  1001. INIT8( satd, _mmx2 );
  1002. INIT7( satd_x3, _mmx2 );
  1003. INIT7( satd_x4, _mmx2 );
  1004. INIT4( hadamard_ac, _mmx2 );
  1005. INIT_ADS( _mmx2 );
  1006. #if ARCH_X86
  1007. pixf->sa8d[PIXEL_16x16] = x264_pixel_sa8d_16x16_mmx2;
  1008. pixf->sa8d[PIXEL_8x8] = x264_pixel_sa8d_8x8_mmx2;
  1009. pixf->intra_sa8d_x3_8x8 = x264_intra_sa8d_x3_8x8_mmx2;
  1010. pixf->ssim_4x4x2_core = x264_pixel_ssim_4x4x2_core_mmx2;
  1011. pixf->vsad = x264_pixel_vsad_mmx2;
  1012. if( cpu&X264_CPU_CACHELINE_32 )
  1013. {
  1014. INIT5( sad, _cache32_mmx2 );
  1015. INIT4( sad_x3, _cache32_mmx2 );
  1016. INIT4( sad_x4, _cache32_mmx2 );
  1017. }
  1018. else if( cpu&X264_CPU_CACHELINE_64 && !(cpu&X264_CPU_SLOW_ATOM) )
  1019. {
  1020. INIT5( sad, _cache64_mmx2 );
  1021. INIT4( sad_x3, _cache64_mmx2 );
  1022. INIT4( sad_x4, _cache64_mmx2 );
  1023. }
  1024. #else
  1025. if( cpu&X264_CPU_CACHELINE_64 && !(cpu&X264_CPU_SLOW_ATOM) )
  1026. {
  1027. pixf->sad[PIXEL_8x16] = x264_pixel_sad_8x16_cache64_mmx2;
  1028. pixf->sad[PIXEL_8x8] = x264_pixel_sad_8x8_cache64_mmx2;
  1029. pixf->sad[PIXEL_8x4] = x264_pixel_sad_8x4_cache64_mmx2;
  1030. pixf->sad_x3[PIXEL_8x16] = x264_pixel_sad_x3_8x16_cache64_mmx2;
  1031. pixf->sad_x3[PIXEL_8x8] = x264_pixel_sad_x3_8x8_cache64_mmx2;
  1032. pixf->sad_x4[PIXEL_8x16] = x264_pixel_sad_x4_8x16_cache64_mmx2;
  1033. pixf->sad_x4[PIXEL_8x8] = x264_pixel_sad_x4_8x8_cache64_mmx2;
  1034. }
  1035. #endif
  1036. pixf->intra_satd_x3_16x16 = x264_intra_satd_x3_16x16_mmx2;
  1037. pixf->intra_sad_x3_16x16 = x264_intra_sad_x3_16x16_mmx2;
  1038. pixf->intra_satd_x3_8x16c = intra_satd_x3_8x16c_mmx2;
  1039. pixf->intra_sad_x3_8x16c = intra_sad_x3_8x16c_mmx2;
  1040. pixf->intra_satd_x3_8x8c = x264_intra_satd_x3_8x8c_mmx2;
  1041. pixf->intra_sad_x3_8x8c = x264_intra_sad_x3_8x8c_mmx2;
  1042. pixf->intra_sad_x3_8x8 = x264_intra_sad_x3_8x8_mmx2;
  1043. pixf->intra_satd_x3_4x4 = x264_intra_satd_x3_4x4_mmx2;
  1044. pixf->intra_sad_x3_4x4 = x264_intra_sad_x3_4x4_mmx2;
  1045. }
  1046. if( cpu&X264_CPU_SSE2 )
  1047. {
  1048. INIT5( ssd, _sse2slow );
  1049. INIT2_NAME( sad_aligned, sad, _sse2_aligned );
  1050. pixf->var[PIXEL_16x16] = x264_pixel_var_16x16_sse2;
  1051. pixf->ssd_nv12_core = x264_pixel_ssd_nv12_core_sse2;
  1052. pixf->ssim_4x4x2_core = x264_pixel_ssim_4x4x2_core_sse2;
  1053. pixf->ssim_end4 = x264_pixel_ssim_end4_sse2;
  1054. pixf->sa8d[PIXEL_16x16] = x264_pixel_sa8d_16x16_sse2;
  1055. pixf->sa8d[PIXEL_8x8] = x264_pixel_sa8d_8x8_sse2;
  1056. #if ARCH_X86_64
  1057. pixf->intra_sa8d_x3_8x8 = x264_intra_sa8d_x3_8x8_sse2;
  1058. pixf->sa8d_satd[PIXEL_16x16] = x264_pixel_sa8d_satd_16x16_sse2;
  1059. #endif
  1060. pixf->var2[PIXEL_8x8] = x264_pixel_var2_8x8_sse2;
  1061. pixf->var2[PIXEL_8x16] = x264_pixel_var2_8x16_sse2;
  1062. pixf->vsad = x264_pixel_vsad_sse2;
  1063. pixf->asd8 = x264_pixel_asd8_sse2;
  1064. }
  1065. if( (cpu&X264_CPU_SSE2) && !(cpu&X264_CPU_SSE2_IS_SLOW) )
  1066. {
  1067. INIT2( sad, _sse2 );
  1068. INIT2( sad_x3, _sse2 );
  1069. INIT2( sad_x4, _sse2 );
  1070. INIT6( satd, _sse2 );
  1071. pixf->satd[PIXEL_4x16] = x264_pixel_satd_4x16_sse2;
  1072. INIT6( satd_x3, _sse2 );
  1073. INIT6( satd_x4, _sse2 );
  1074. INIT4( hadamard_ac, _sse2 );
  1075. INIT_ADS( _sse2 );
  1076. pixf->var[PIXEL_8x8] = x264_pixel_var_8x8_sse2;
  1077. pixf->var[PIXEL_8x16] = x264_pixel_var_8x16_sse2;
  1078. pixf->intra_sad_x3_16x16 = x264_intra_sad_x3_16x16_sse2;
  1079. pixf->intra_satd_x3_8x16c = intra_satd_x3_8x16c_sse2;
  1080. pixf->intra_sad_x3_8x16c = intra_sad_x3_8x16c_sse2;
  1081. if( cpu&X264_CPU_CACHELINE_64 )
  1082. {
  1083. INIT2( ssd, _sse2); /* faster for width 16 on p4 */
  1084. #if ARCH_X86
  1085. INIT2( sad, _cache64_sse2 );
  1086. INIT2( sad_x3, _cache64_sse2 );
  1087. INIT2( sad_x4, _cache64_sse2 );
  1088. #endif
  1089. if( cpu&X264_CPU_SSE2_IS_FAST )
  1090. {
  1091. pixf->sad_x3[PIXEL_8x16] = x264_pixel_sad_x3_8x16_cache64_sse2;
  1092. pixf->sad_x4[PIXEL_8x16] = x264_pixel_sad_x4_8x16_cache64_sse2;
  1093. }
  1094. }
  1095. }
  1096. if( cpu&X264_CPU_SSE2_IS_FAST && !(cpu&X264_CPU_CACHELINE_64) )
  1097. {
  1098. pixf->sad_aligned[PIXEL_8x16] = x264_pixel_sad_8x16_sse2;
  1099. pixf->sad[PIXEL_8x16] = x264_pixel_sad_8x16_sse2;
  1100. pixf->sad_x3[PIXEL_8x16] = x264_pixel_sad_x3_8x16_sse2;
  1101. pixf->sad_x3[PIXEL_8x8] = x264_pixel_sad_x3_8x8_sse2;
  1102. pixf->sad_x3[PIXEL_8x4] = x264_pixel_sad_x3_8x4_sse2;
  1103. pixf->sad_x4[PIXEL_8x16] = x264_pixel_sad_x4_8x16_sse2;
  1104. pixf->sad_x4[PIXEL_8x8] = x264_pixel_sad_x4_8x8_sse2;
  1105. pixf->sad_x4[PIXEL_8x4] = x264_pixel_sad_x4_8x4_sse2;
  1106. }
  1107. if( (cpu&X264_CPU_SSE3) && (cpu&X264_CPU_CACHELINE_64) )
  1108. {
  1109. INIT2( sad, _sse3 );
  1110. INIT2( sad_x3, _sse3 );
  1111. INIT2( sad_x4, _sse3 );
  1112. }
  1113. if( cpu&X264_CPU_SSSE3 )
  1114. {
  1115. INIT4( hadamard_ac, _ssse3 );
  1116. if( !(cpu&X264_CPU_STACK_MOD4) )
  1117. {
  1118. pixf->intra_sad_x9_4x4 = x264_intra_sad_x9_4x4_ssse3;
  1119. pixf->intra_satd_x9_4x4 = x264_intra_satd_x9_4x4_ssse3;
  1120. pixf->intra_sad_x9_8x8 = x264_intra_sad_x9_8x8_ssse3;
  1121. #if ARCH_X86_64
  1122. pixf->intra_sa8d_x9_8x8 = x264_intra_sa8d_x9_8x8_ssse3;
  1123. #endif
  1124. }
  1125. #if ARCH_X86 || !defined( __MACH__ )
  1126. INIT_ADS( _ssse3 );
  1127. #endif
  1128. if( cpu&X264_CPU_SLOW_ATOM )
  1129. {
  1130. pixf->sa8d[PIXEL_16x16]= x264_pixel_sa8d_16x16_ssse3_atom;
  1131. pixf->sa8d[PIXEL_8x8] = x264_pixel_sa8d_8x8_ssse3_atom;
  1132. INIT6( satd, _ssse3_atom );
  1133. pixf->satd[PIXEL_4x16] = x264_pixel_satd_4x16_ssse3_atom;
  1134. INIT6( satd_x3, _ssse3_atom );
  1135. INIT6( satd_x4, _ssse3_atom );
  1136. INIT4( hadamard_ac, _ssse3_atom );
  1137. #if ARCH_X86_64
  1138. pixf->sa8d_satd[PIXEL_16x16] = x264_pixel_sa8d_satd_16x16_ssse3_atom;
  1139. #endif
  1140. }
  1141. else
  1142. {
  1143. INIT8( ssd, _ssse3 );
  1144. pixf->sa8d[PIXEL_16x16]= x264_pixel_sa8d_16x16_ssse3;
  1145. pixf->sa8d[PIXEL_8x8] = x264_pixel_sa8d_8x8_ssse3;
  1146. INIT8( satd, _ssse3 );
  1147. INIT7( satd_x3, _ssse3 );
  1148. INIT7( satd_x4, _ssse3 );
  1149. #if ARCH_X86_64
  1150. pixf->sa8d_satd[PIXEL_16x16] = x264_pixel_sa8d_satd_16x16_ssse3;
  1151. #endif
  1152. }
  1153. pixf->intra_satd_x3_16x16 = x264_intra_satd_x3_16x16_ssse3;
  1154. if( !(cpu&X264_CPU_SLOW_PSHUFB) )
  1155. pixf->intra_sad_x3_16x16 = x264_intra_sad_x3_16x16_ssse3;
  1156. pixf->intra_satd_x3_8x16c = intra_satd_x3_8x16c_ssse3;
  1157. pixf->intra_satd_x3_8x8c = x264_intra_satd_x3_8x8c_ssse3;
  1158. pixf->intra_sad_x3_8x8c = x264_intra_sad_x3_8x8c_ssse3;
  1159. pixf->var2[PIXEL_8x8] = x264_pixel_var2_8x8_ssse3;
  1160. pixf->var2[PIXEL_8x16] = x264_pixel_var2_8x16_ssse3;
  1161. pixf->asd8 = x264_pixel_asd8_ssse3;
  1162. if( cpu&X264_CPU_CACHELINE_64 )
  1163. {
  1164. INIT2( sad, _cache64_ssse3 );
  1165. INIT2( sad_x3, _cache64_ssse3 );
  1166. INIT2( sad_x4, _cache64_ssse3 );
  1167. }
  1168. else
  1169. {
  1170. INIT2( sad_x3, _ssse3 );
  1171. INIT5( sad_x4, _ssse3 );
  1172. }
  1173. if( (cpu&X264_CPU_SLOW_ATOM) || (cpu&X264_CPU_SLOW_SHUFFLE) )
  1174. {
  1175. INIT5( ssd, _sse2 ); /* on conroe, sse2 is faster for width8/16 */
  1176. }
  1177. }
  1178. if( cpu&X264_CPU_SSE4 )
  1179. {
  1180. INIT8( satd, _sse4 );
  1181. INIT7( satd_x3, _sse4 );
  1182. INIT7( satd_x4, _sse4 );
  1183. INIT4( hadamard_ac, _sse4 );
  1184. if( !(cpu&X264_CPU_STACK_MOD4) )
  1185. {
  1186. pixf->intra_sad_x9_4x4 = x264_intra_sad_x9_4x4_sse4;
  1187. pixf->intra_satd_x9_4x4 = x264_intra_satd_x9_4x4_sse4;
  1188. pixf->intra_sad_x9_8x8 = x264_intra_sad_x9_8x8_sse4;
  1189. #if ARCH_X86_64
  1190. pixf->intra_sa8d_x9_8x8 = x264_intra_sa8d_x9_8x8_sse4;
  1191. #endif
  1192. }
  1193. pixf->sa8d[PIXEL_16x16]= x264_pixel_sa8d_16x16_sse4;
  1194. pixf->sa8d[PIXEL_8x8] = x264_pixel_sa8d_8x8_sse4;
  1195. pixf->intra_satd_x3_8x16c = intra_satd_x3_8x16c_sse4;
  1196. #if ARCH_X86_64
  1197. pixf->sa8d_satd[PIXEL_16x16] = x264_pixel_sa8d_satd_16x16_sse4;
  1198. #endif
  1199. }
  1200. if( cpu&X264_CPU_AVX )
  1201. {
  1202. INIT2_NAME( sad_aligned, sad, _sse2 ); /* AVX-capable CPUs doesn't benefit from an aligned version */
  1203. INIT2( sad_x3, _avx );
  1204. INIT2( sad_x4, _avx );
  1205. INIT8( satd, _avx );
  1206. INIT7( satd_x3, _avx );
  1207. INIT7( satd_x4, _avx );
  1208. #if ARCH_X86 || !defined( __MACH__ )
  1209. INIT_ADS( _avx );
  1210. #endif
  1211. INIT4( hadamard_ac, _avx );
  1212. if( !(cpu&X264_CPU_STACK_MOD4) )
  1213. {
  1214. pixf->intra_sad_x9_4x4 = x264_intra_sad_x9_4x4_avx;
  1215. pixf->intra_satd_x9_4x4 = x264_intra_satd_x9_4x4_avx;
  1216. pixf->intra_sad_x9_8x8 = x264_intra_sad_x9_8x8_avx;
  1217. #if ARCH_X86_64
  1218. pixf->intra_sa8d_x9_8x8 = x264_intra_sa8d_x9_8x8_avx;
  1219. #endif
  1220. }
  1221. INIT5( ssd, _avx );
  1222. pixf->sa8d[PIXEL_16x16]= x264_pixel_sa8d_16x16_avx;
  1223. pixf->sa8d[PIXEL_8x8] = x264_pixel_sa8d_8x8_avx;
  1224. pixf->intra_satd_x3_8x16c = intra_satd_x3_8x16c_avx;
  1225. pixf->ssd_nv12_core = x264_pixel_ssd_nv12_core_avx;
  1226. pixf->var[PIXEL_16x16] = x264_pixel_var_16x16_avx;
  1227. pixf->var[PIXEL_8x16] = x264_pixel_var_8x16_avx;
  1228. pixf->var[PIXEL_8x8] = x264_pixel_var_8x8_avx;
  1229. pixf->ssim_4x4x2_core = x264_pixel_ssim_4x4x2_core_avx;
  1230. pixf->ssim_end4 = x264_pixel_ssim_end4_avx;
  1231. #if ARCH_X86_64
  1232. pixf->sa8d_satd[PIXEL_16x16] = x264_pixel_sa8d_satd_16x16_avx;
  1233. #endif
  1234. }
  1235. if( cpu&X264_CPU_XOP )
  1236. {
  1237. INIT7( satd, _xop );
  1238. INIT7( satd_x3, _xop );
  1239. INIT7( satd_x4, _xop );
  1240. INIT4( hadamard_ac, _xop );
  1241. if( !(cpu&X264_CPU_STACK_MOD4) )
  1242. {
  1243. pixf->intra_satd_x9_4x4 = x264_intra_satd_x9_4x4_xop;
  1244. }
  1245. INIT5( ssd, _xop );
  1246. pixf->sa8d[PIXEL_16x16]= x264_pixel_sa8d_16x16_xop;
  1247. pixf->sa8d[PIXEL_8x8] = x264_pixel_sa8d_8x8_xop;
  1248. pixf->intra_satd_x3_8x16c = intra_satd_x3_8x16c_xop;
  1249. pixf->ssd_nv12_core = x264_pixel_ssd_nv12_core_xop;
  1250. #if ARCH_X86_64
  1251. pixf->sa8d_satd[PIXEL_16x16] = x264_pixel_sa8d_satd_16x16_xop;
  1252. #endif
  1253. }
  1254. if( cpu&X264_CPU_AVX2 )
  1255. {
  1256. INIT2( ssd, _avx2 );
  1257. INIT2( sad_x3, _avx2 );
  1258. INIT2( sad_x4, _avx2 );
  1259. INIT4( satd, _avx2 );
  1260. INIT2( hadamard_ac, _avx2 );
  1261. #if ARCH_X86 || !defined( __MACH__ )
  1262. INIT_ADS( _avx2 );
  1263. #endif
  1264. pixf->sa8d[PIXEL_8x8] = x264_pixel_sa8d_8x8_avx2;
  1265. pixf->var[PIXEL_16x16] = x264_pixel_var_16x16_avx2;
  1266. pixf->var2[PIXEL_8x16] = x264_pixel_var2_8x16_avx2;
  1267. pixf->var2[PIXEL_8x8] = x264_pixel_var2_8x8_avx2;
  1268. pixf->intra_sad_x3_16x16 = x264_intra_sad_x3_16x16_avx2;
  1269. pixf->intra_sad_x9_8x8 = x264_intra_sad_x9_8x8_avx2;
  1270. pixf->intra_sad_x3_8x8c = x264_intra_sad_x3_8x8c_avx2;
  1271. pixf->ssd_nv12_core = x264_pixel_ssd_nv12_core_avx2;
  1272. #if ARCH_X86_64
  1273. pixf->sa8d_satd[PIXEL_16x16] = x264_pixel_sa8d_satd_16x16_avx2;
  1274. #endif
  1275. }
  1276. if( cpu&X264_CPU_AVX512 )
  1277. {
  1278. INIT8( sad, _avx512 );
  1279. INIT8_NAME( sad_aligned, sad, _avx512 );
  1280. INIT7( sad_x3, _avx512 );
  1281. INIT7( sad_x4, _avx512 );
  1282. INIT8( satd, _avx512 );
  1283. INIT7( satd_x3, _avx512 );
  1284. INIT7( satd_x4, _avx512 );
  1285. pixf->sa8d[PIXEL_8x8] = x264_pixel_sa8d_8x8_avx512;
  1286. pixf->var[PIXEL_8x8] = x264_pixel_var_8x8_avx512;
  1287. pixf->var[PIXEL_8x16] = x264_pixel_var_8x16_avx512;
  1288. pixf->var[PIXEL_16x16] = x264_pixel_var_16x16_avx512;
  1289. pixf->var2[PIXEL_8x8] = x264_pixel_var2_8x8_avx512;
  1290. pixf->var2[PIXEL_8x16] = x264_pixel_var2_8x16_avx512;
  1291. }
  1292. #endif //HAVE_MMX
  1293. #if HAVE_ARMV6
  1294. if( cpu&X264_CPU_ARMV6 )
  1295. {
  1296. pixf->sad[PIXEL_4x8] = x264_pixel_sad_4x8_armv6;
  1297. pixf->sad[PIXEL_4x4] = x264_pixel_sad_4x4_armv6;
  1298. pixf->sad_aligned[PIXEL_4x8] = x264_pixel_sad_4x8_armv6;
  1299. pixf->sad_aligned[PIXEL_4x4] = x264_pixel_sad_4x4_armv6;
  1300. }
  1301. if( cpu&X264_CPU_NEON )
  1302. {
  1303. INIT5( sad, _neon );
  1304. INIT5( sad_aligned, _neon );
  1305. INIT7( sad_x3, _neon );
  1306. INIT7( sad_x4, _neon );
  1307. INIT7( ssd, _neon );
  1308. INIT7( satd, _neon );
  1309. INIT7( satd_x3, _neon );
  1310. INIT7( satd_x4, _neon );
  1311. INIT4( hadamard_ac, _neon );
  1312. pixf->sa8d[PIXEL_8x8] = x264_pixel_sa8d_8x8_neon;
  1313. pixf->sa8d[PIXEL_16x16] = x264_pixel_sa8d_16x16_neon;
  1314. pixf->sa8d_satd[PIXEL_16x16] = x264_pixel_sa8d_satd_16x16_neon;
  1315. pixf->var[PIXEL_8x8] = x264_pixel_var_8x8_neon;
  1316. pixf->var[PIXEL_8x16] = x264_pixel_var_8x16_neon;
  1317. pixf->var[PIXEL_16x16] = x264_pixel_var_16x16_neon;
  1318. pixf->var2[PIXEL_8x8] = x264_pixel_var2_8x8_neon;
  1319. pixf->var2[PIXEL_8x16] = x264_pixel_var2_8x16_neon;
  1320. pixf->vsad = x264_pixel_vsad_neon;
  1321. pixf->asd8 = x264_pixel_asd8_neon;
  1322. pixf->intra_sad_x3_4x4 = intra_sad_x3_4x4_neon;
  1323. pixf->intra_satd_x3_4x4 = intra_satd_x3_4x4_neon;
  1324. pixf->intra_sad_x3_8x8 = intra_sad_x3_8x8_neon;
  1325. pixf->intra_sa8d_x3_8x8 = intra_sa8d_x3_8x8_neon;
  1326. pixf->intra_sad_x3_8x8c = intra_sad_x3_8x8c_neon;
  1327. pixf->intra_satd_x3_8x8c = intra_satd_x3_8x8c_neon;
  1328. pixf->intra_sad_x3_8x16c = intra_sad_x3_8x16c_neon;
  1329. pixf->intra_satd_x3_8x16c = intra_satd_x3_8x16c_neon;
  1330. pixf->intra_sad_x3_16x16 = intra_sad_x3_16x16_neon;
  1331. pixf->intra_satd_x3_16x16 = intra_satd_x3_16x16_neon;
  1332. pixf->ssd_nv12_core = x264_pixel_ssd_nv12_core_neon;
  1333. pixf->ssim_4x4x2_core = x264_pixel_ssim_4x4x2_core_neon;
  1334. pixf->ssim_end4 = x264_pixel_ssim_end4_neon;
  1335. if( cpu&X264_CPU_FAST_NEON_MRC )
  1336. {
  1337. pixf->sad[PIXEL_4x8] = x264_pixel_sad_4x8_neon;
  1338. pixf->sad[PIXEL_4x4] = x264_pixel_sad_4x4_neon;
  1339. pixf->sad_aligned[PIXEL_4x8] = x264_pixel_sad_aligned_4x8_neon;
  1340. pixf->sad_aligned[PIXEL_4x4] = x264_pixel_sad_aligned_4x4_neon;
  1341. }
  1342. else // really just scheduled for dual issue / A8
  1343. {
  1344. INIT5( sad_aligned, _neon_dual );
  1345. }
  1346. }
  1347. #endif
  1348. #if ARCH_AARCH64
  1349. if( cpu&X264_CPU_NEON )
  1350. {
  1351. INIT8( sad, _neon );
  1352. // AArch64 has no distinct instructions for aligned load/store
  1353. INIT8_NAME( sad_aligned, sad, _neon );
  1354. INIT7( sad_x3, _neon );
  1355. INIT7( sad_x4, _neon );
  1356. INIT8( ssd, _neon );
  1357. INIT8( satd, _neon );
  1358. INIT7( satd_x3, _neon );
  1359. INIT7( satd_x4, _neon );
  1360. INIT4( hadamard_ac, _neon );
  1361. pixf->sa8d[PIXEL_8x8] = x264_pixel_sa8d_8x8_neon;
  1362. pixf->sa8d[PIXEL_16x16] = x264_pixel_sa8d_16x16_neon;
  1363. pixf->sa8d_satd[PIXEL_16x16] = x264_pixel_sa8d_satd_16x16_neon;
  1364. pixf->var[PIXEL_8x8] = x264_pixel_var_8x8_neon;
  1365. pixf->var[PIXEL_8x16] = x264_pixel_var_8x16_neon;
  1366. pixf->var[PIXEL_16x16] = x264_pixel_var_16x16_neon;
  1367. pixf->var2[PIXEL_8x8] = x264_pixel_var2_8x8_neon;
  1368. pixf->var2[PIXEL_8x16] = x264_pixel_var2_8x16_neon;
  1369. pixf->vsad = x264_pixel_vsad_neon;
  1370. pixf->asd8 = x264_pixel_asd8_neon;
  1371. pixf->intra_sad_x3_4x4 = intra_sad_x3_4x4_neon;
  1372. pixf->intra_satd_x3_4x4 = intra_satd_x3_4x4_neon;
  1373. pixf->intra_sad_x3_8x8 = intra_sad_x3_8x8_neon;
  1374. pixf->intra_sa8d_x3_8x8 = intra_sa8d_x3_8x8_neon;
  1375. pixf->intra_sad_x3_8x8c = intra_sad_x3_8x8c_neon;
  1376. pixf->intra_satd_x3_8x8c = intra_satd_x3_8x8c_neon;
  1377. pixf->intra_sad_x3_8x16c = intra_sad_x3_8x16c_neon;
  1378. pixf->intra_satd_x3_8x16c = intra_satd_x3_8x16c_neon;
  1379. pixf->intra_sad_x3_16x16 = intra_sad_x3_16x16_neon;
  1380. pixf->intra_satd_x3_16x16 = intra_satd_x3_16x16_neon;
  1381. pixf->ssd_nv12_core = x264_pixel_ssd_nv12_core_neon;
  1382. pixf->ssim_4x4x2_core = x264_pixel_ssim_4x4x2_core_neon;
  1383. pixf->ssim_end4 = x264_pixel_ssim_end4_neon;
  1384. }
  1385. #endif // ARCH_AARCH64
  1386. #if HAVE_MSA
  1387. if( cpu&X264_CPU_MSA )
  1388. {
  1389. INIT8( sad, _msa );
  1390. INIT8_NAME( sad_aligned, sad, _msa );
  1391. INIT8( ssd, _msa );
  1392. INIT7( sad_x3, _msa );
  1393. INIT7( sad_x4, _msa );
  1394. INIT8( satd, _msa );
  1395. INIT4( hadamard_ac, _msa );
  1396. pixf->intra_sad_x3_4x4 = x264_intra_sad_x3_4x4_msa;
  1397. pixf->intra_sad_x3_8x8 = x264_intra_sad_x3_8x8_msa;
  1398. pixf->intra_sad_x3_8x8c = x264_intra_sad_x3_8x8c_msa;
  1399. pixf->intra_sad_x3_16x16 = x264_intra_sad_x3_16x16_msa;
  1400. pixf->intra_satd_x3_4x4 = x264_intra_satd_x3_4x4_msa;
  1401. pixf->intra_satd_x3_16x16 = x264_intra_satd_x3_16x16_msa;
  1402. pixf->intra_satd_x3_8x8c = x264_intra_satd_x3_8x8c_msa;
  1403. pixf->intra_sa8d_x3_8x8 = x264_intra_sa8d_x3_8x8_msa;
  1404. pixf->ssim_4x4x2_core = x264_ssim_4x4x2_core_msa;
  1405. pixf->var[PIXEL_16x16] = x264_pixel_var_16x16_msa;
  1406. pixf->var[PIXEL_8x16] = x264_pixel_var_8x16_msa;
  1407. pixf->var[PIXEL_8x8] = x264_pixel_var_8x8_msa;
  1408. //pixf->var2[PIXEL_8x16] = x264_pixel_var2_8x16_msa;
  1409. //pixf->var2[PIXEL_8x8] = x264_pixel_var2_8x8_msa;
  1410. pixf->sa8d[PIXEL_16x16] = x264_pixel_sa8d_16x16_msa;
  1411. pixf->sa8d[PIXEL_8x8] = x264_pixel_sa8d_8x8_msa;
  1412. }
  1413. #endif // HAVE_MSA
  1414. #endif // HIGH_BIT_DEPTH
  1415. #if HAVE_ALTIVEC
  1416. if( cpu&X264_CPU_ALTIVEC )
  1417. {
  1418. x264_pixel_init_altivec( pixf );
  1419. }
  1420. #endif
  1421. pixf->ads[PIXEL_8x16] =
  1422. pixf->ads[PIXEL_8x4] =
  1423. pixf->ads[PIXEL_4x8] = pixf->ads[PIXEL_16x8];
  1424. pixf->ads[PIXEL_4x4] = pixf->ads[PIXEL_8x8];
  1425. }