dct.c 35 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091
  1. /*****************************************************************************
  2. * dct.c: transform and zigzag
  3. *****************************************************************************
  4. * Copyright (C) 2003-2018 x264 project
  5. *
  6. * Authors: Loren Merritt <lorenm@u.washington.edu>
  7. * Laurent Aimar <fenrir@via.ecp.fr>
  8. * Henrik Gramner <henrik@gramner.com>
  9. *
  10. * This program is free software; you can redistribute it and/or modify
  11. * it under the terms of the GNU General Public License as published by
  12. * the Free Software Foundation; either version 2 of the License, or
  13. * (at your option) any later version.
  14. *
  15. * This program is distributed in the hope that it will be useful,
  16. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  17. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  18. * GNU General Public License for more details.
  19. *
  20. * You should have received a copy of the GNU General Public License
  21. * along with this program; if not, write to the Free Software
  22. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111, USA.
  23. *
  24. * This program is also available under a commercial proprietary license.
  25. * For more information, contact us at licensing@x264.com.
  26. *****************************************************************************/
  27. #include "common.h"
  28. #if HAVE_MMX
  29. # include "x86/dct.h"
  30. #endif
  31. #if ARCH_PPC
  32. # include "ppc/dct.h"
  33. #endif
  34. #if ARCH_ARM
  35. # include "arm/dct.h"
  36. #endif
  37. #if ARCH_AARCH64
  38. # include "aarch64/dct.h"
  39. #endif
  40. #if ARCH_MIPS
  41. # include "mips/dct.h"
  42. #endif
  43. static void dct4x4dc( dctcoef d[16] )
  44. {
  45. dctcoef tmp[16];
  46. for( int i = 0; i < 4; i++ )
  47. {
  48. int s01 = d[i*4+0] + d[i*4+1];
  49. int d01 = d[i*4+0] - d[i*4+1];
  50. int s23 = d[i*4+2] + d[i*4+3];
  51. int d23 = d[i*4+2] - d[i*4+3];
  52. tmp[0*4+i] = s01 + s23;
  53. tmp[1*4+i] = s01 - s23;
  54. tmp[2*4+i] = d01 - d23;
  55. tmp[3*4+i] = d01 + d23;
  56. }
  57. for( int i = 0; i < 4; i++ )
  58. {
  59. int s01 = tmp[i*4+0] + tmp[i*4+1];
  60. int d01 = tmp[i*4+0] - tmp[i*4+1];
  61. int s23 = tmp[i*4+2] + tmp[i*4+3];
  62. int d23 = tmp[i*4+2] - tmp[i*4+3];
  63. d[i*4+0] = ( s01 + s23 + 1 ) >> 1;
  64. d[i*4+1] = ( s01 - s23 + 1 ) >> 1;
  65. d[i*4+2] = ( d01 - d23 + 1 ) >> 1;
  66. d[i*4+3] = ( d01 + d23 + 1 ) >> 1;
  67. }
  68. }
  69. static void idct4x4dc( dctcoef d[16] )
  70. {
  71. dctcoef tmp[16];
  72. for( int i = 0; i < 4; i++ )
  73. {
  74. int s01 = d[i*4+0] + d[i*4+1];
  75. int d01 = d[i*4+0] - d[i*4+1];
  76. int s23 = d[i*4+2] + d[i*4+3];
  77. int d23 = d[i*4+2] - d[i*4+3];
  78. tmp[0*4+i] = s01 + s23;
  79. tmp[1*4+i] = s01 - s23;
  80. tmp[2*4+i] = d01 - d23;
  81. tmp[3*4+i] = d01 + d23;
  82. }
  83. for( int i = 0; i < 4; i++ )
  84. {
  85. int s01 = tmp[i*4+0] + tmp[i*4+1];
  86. int d01 = tmp[i*4+0] - tmp[i*4+1];
  87. int s23 = tmp[i*4+2] + tmp[i*4+3];
  88. int d23 = tmp[i*4+2] - tmp[i*4+3];
  89. d[i*4+0] = s01 + s23;
  90. d[i*4+1] = s01 - s23;
  91. d[i*4+2] = d01 - d23;
  92. d[i*4+3] = d01 + d23;
  93. }
  94. }
  95. static void dct2x4dc( dctcoef dct[8], dctcoef dct4x4[8][16] )
  96. {
  97. int a0 = dct4x4[0][0] + dct4x4[1][0];
  98. int a1 = dct4x4[2][0] + dct4x4[3][0];
  99. int a2 = dct4x4[4][0] + dct4x4[5][0];
  100. int a3 = dct4x4[6][0] + dct4x4[7][0];
  101. int a4 = dct4x4[0][0] - dct4x4[1][0];
  102. int a5 = dct4x4[2][0] - dct4x4[3][0];
  103. int a6 = dct4x4[4][0] - dct4x4[5][0];
  104. int a7 = dct4x4[6][0] - dct4x4[7][0];
  105. int b0 = a0 + a1;
  106. int b1 = a2 + a3;
  107. int b2 = a4 + a5;
  108. int b3 = a6 + a7;
  109. int b4 = a0 - a1;
  110. int b5 = a2 - a3;
  111. int b6 = a4 - a5;
  112. int b7 = a6 - a7;
  113. dct[0] = b0 + b1;
  114. dct[1] = b2 + b3;
  115. dct[2] = b0 - b1;
  116. dct[3] = b2 - b3;
  117. dct[4] = b4 - b5;
  118. dct[5] = b6 - b7;
  119. dct[6] = b4 + b5;
  120. dct[7] = b6 + b7;
  121. dct4x4[0][0] = 0;
  122. dct4x4[1][0] = 0;
  123. dct4x4[2][0] = 0;
  124. dct4x4[3][0] = 0;
  125. dct4x4[4][0] = 0;
  126. dct4x4[5][0] = 0;
  127. dct4x4[6][0] = 0;
  128. dct4x4[7][0] = 0;
  129. }
  130. static inline void pixel_sub_wxh( dctcoef *diff, int i_size,
  131. pixel *pix1, int i_pix1, pixel *pix2, int i_pix2 )
  132. {
  133. for( int y = 0; y < i_size; y++ )
  134. {
  135. for( int x = 0; x < i_size; x++ )
  136. diff[x + y*i_size] = pix1[x] - pix2[x];
  137. pix1 += i_pix1;
  138. pix2 += i_pix2;
  139. }
  140. }
  141. static void sub4x4_dct( dctcoef dct[16], pixel *pix1, pixel *pix2 )
  142. {
  143. dctcoef d[16];
  144. dctcoef tmp[16];
  145. pixel_sub_wxh( d, 4, pix1, FENC_STRIDE, pix2, FDEC_STRIDE );
  146. for( int i = 0; i < 4; i++ )
  147. {
  148. int s03 = d[i*4+0] + d[i*4+3];
  149. int s12 = d[i*4+1] + d[i*4+2];
  150. int d03 = d[i*4+0] - d[i*4+3];
  151. int d12 = d[i*4+1] - d[i*4+2];
  152. tmp[0*4+i] = s03 + s12;
  153. tmp[1*4+i] = 2*d03 + d12;
  154. tmp[2*4+i] = s03 - s12;
  155. tmp[3*4+i] = d03 - 2*d12;
  156. }
  157. for( int i = 0; i < 4; i++ )
  158. {
  159. int s03 = tmp[i*4+0] + tmp[i*4+3];
  160. int s12 = tmp[i*4+1] + tmp[i*4+2];
  161. int d03 = tmp[i*4+0] - tmp[i*4+3];
  162. int d12 = tmp[i*4+1] - tmp[i*4+2];
  163. dct[i*4+0] = s03 + s12;
  164. dct[i*4+1] = 2*d03 + d12;
  165. dct[i*4+2] = s03 - s12;
  166. dct[i*4+3] = d03 - 2*d12;
  167. }
  168. }
  169. static void sub8x8_dct( dctcoef dct[4][16], pixel *pix1, pixel *pix2 )
  170. {
  171. sub4x4_dct( dct[0], &pix1[0], &pix2[0] );
  172. sub4x4_dct( dct[1], &pix1[4], &pix2[4] );
  173. sub4x4_dct( dct[2], &pix1[4*FENC_STRIDE+0], &pix2[4*FDEC_STRIDE+0] );
  174. sub4x4_dct( dct[3], &pix1[4*FENC_STRIDE+4], &pix2[4*FDEC_STRIDE+4] );
  175. }
  176. static void sub16x16_dct( dctcoef dct[16][16], pixel *pix1, pixel *pix2 )
  177. {
  178. sub8x8_dct( &dct[ 0], &pix1[0], &pix2[0] );
  179. sub8x8_dct( &dct[ 4], &pix1[8], &pix2[8] );
  180. sub8x8_dct( &dct[ 8], &pix1[8*FENC_STRIDE+0], &pix2[8*FDEC_STRIDE+0] );
  181. sub8x8_dct( &dct[12], &pix1[8*FENC_STRIDE+8], &pix2[8*FDEC_STRIDE+8] );
  182. }
  183. static int sub4x4_dct_dc( pixel *pix1, pixel *pix2 )
  184. {
  185. int sum = 0;
  186. for( int i=0; i<4; i++, pix1 += FENC_STRIDE, pix2 += FDEC_STRIDE )
  187. sum += pix1[0] + pix1[1] + pix1[2] + pix1[3]
  188. - pix2[0] - pix2[1] - pix2[2] - pix2[3];
  189. return sum;
  190. }
  191. static void sub8x8_dct_dc( dctcoef dct[4], pixel *pix1, pixel *pix2 )
  192. {
  193. dct[0] = sub4x4_dct_dc( &pix1[0], &pix2[0] );
  194. dct[1] = sub4x4_dct_dc( &pix1[4], &pix2[4] );
  195. dct[2] = sub4x4_dct_dc( &pix1[4*FENC_STRIDE+0], &pix2[4*FDEC_STRIDE+0] );
  196. dct[3] = sub4x4_dct_dc( &pix1[4*FENC_STRIDE+4], &pix2[4*FDEC_STRIDE+4] );
  197. /* 2x2 DC transform */
  198. int d0 = dct[0] + dct[1];
  199. int d1 = dct[2] + dct[3];
  200. int d2 = dct[0] - dct[1];
  201. int d3 = dct[2] - dct[3];
  202. dct[0] = d0 + d1;
  203. dct[1] = d0 - d1;
  204. dct[2] = d2 + d3;
  205. dct[3] = d2 - d3;
  206. }
  207. static void sub8x16_dct_dc( dctcoef dct[8], pixel *pix1, pixel *pix2 )
  208. {
  209. int a0 = sub4x4_dct_dc( &pix1[ 0*FENC_STRIDE+0], &pix2[ 0*FDEC_STRIDE+0] );
  210. int a1 = sub4x4_dct_dc( &pix1[ 0*FENC_STRIDE+4], &pix2[ 0*FDEC_STRIDE+4] );
  211. int a2 = sub4x4_dct_dc( &pix1[ 4*FENC_STRIDE+0], &pix2[ 4*FDEC_STRIDE+0] );
  212. int a3 = sub4x4_dct_dc( &pix1[ 4*FENC_STRIDE+4], &pix2[ 4*FDEC_STRIDE+4] );
  213. int a4 = sub4x4_dct_dc( &pix1[ 8*FENC_STRIDE+0], &pix2[ 8*FDEC_STRIDE+0] );
  214. int a5 = sub4x4_dct_dc( &pix1[ 8*FENC_STRIDE+4], &pix2[ 8*FDEC_STRIDE+4] );
  215. int a6 = sub4x4_dct_dc( &pix1[12*FENC_STRIDE+0], &pix2[12*FDEC_STRIDE+0] );
  216. int a7 = sub4x4_dct_dc( &pix1[12*FENC_STRIDE+4], &pix2[12*FDEC_STRIDE+4] );
  217. /* 2x4 DC transform */
  218. int b0 = a0 + a1;
  219. int b1 = a2 + a3;
  220. int b2 = a4 + a5;
  221. int b3 = a6 + a7;
  222. int b4 = a0 - a1;
  223. int b5 = a2 - a3;
  224. int b6 = a4 - a5;
  225. int b7 = a6 - a7;
  226. a0 = b0 + b1;
  227. a1 = b2 + b3;
  228. a2 = b4 + b5;
  229. a3 = b6 + b7;
  230. a4 = b0 - b1;
  231. a5 = b2 - b3;
  232. a6 = b4 - b5;
  233. a7 = b6 - b7;
  234. dct[0] = a0 + a1;
  235. dct[1] = a2 + a3;
  236. dct[2] = a0 - a1;
  237. dct[3] = a2 - a3;
  238. dct[4] = a4 - a5;
  239. dct[5] = a6 - a7;
  240. dct[6] = a4 + a5;
  241. dct[7] = a6 + a7;
  242. }
  243. static void add4x4_idct( pixel *p_dst, dctcoef dct[16] )
  244. {
  245. dctcoef d[16];
  246. dctcoef tmp[16];
  247. for( int i = 0; i < 4; i++ )
  248. {
  249. int s02 = dct[0*4+i] + dct[2*4+i];
  250. int d02 = dct[0*4+i] - dct[2*4+i];
  251. int s13 = dct[1*4+i] + (dct[3*4+i]>>1);
  252. int d13 = (dct[1*4+i]>>1) - dct[3*4+i];
  253. tmp[i*4+0] = s02 + s13;
  254. tmp[i*4+1] = d02 + d13;
  255. tmp[i*4+2] = d02 - d13;
  256. tmp[i*4+3] = s02 - s13;
  257. }
  258. for( int i = 0; i < 4; i++ )
  259. {
  260. int s02 = tmp[0*4+i] + tmp[2*4+i];
  261. int d02 = tmp[0*4+i] - tmp[2*4+i];
  262. int s13 = tmp[1*4+i] + (tmp[3*4+i]>>1);
  263. int d13 = (tmp[1*4+i]>>1) - tmp[3*4+i];
  264. d[0*4+i] = ( s02 + s13 + 32 ) >> 6;
  265. d[1*4+i] = ( d02 + d13 + 32 ) >> 6;
  266. d[2*4+i] = ( d02 - d13 + 32 ) >> 6;
  267. d[3*4+i] = ( s02 - s13 + 32 ) >> 6;
  268. }
  269. for( int y = 0; y < 4; y++ )
  270. {
  271. for( int x = 0; x < 4; x++ )
  272. p_dst[x] = x264_clip_pixel( p_dst[x] + d[y*4+x] );
  273. p_dst += FDEC_STRIDE;
  274. }
  275. }
  276. static void add8x8_idct( pixel *p_dst, dctcoef dct[4][16] )
  277. {
  278. add4x4_idct( &p_dst[0], dct[0] );
  279. add4x4_idct( &p_dst[4], dct[1] );
  280. add4x4_idct( &p_dst[4*FDEC_STRIDE+0], dct[2] );
  281. add4x4_idct( &p_dst[4*FDEC_STRIDE+4], dct[3] );
  282. }
  283. static void add16x16_idct( pixel *p_dst, dctcoef dct[16][16] )
  284. {
  285. add8x8_idct( &p_dst[0], &dct[0] );
  286. add8x8_idct( &p_dst[8], &dct[4] );
  287. add8x8_idct( &p_dst[8*FDEC_STRIDE+0], &dct[8] );
  288. add8x8_idct( &p_dst[8*FDEC_STRIDE+8], &dct[12] );
  289. }
  290. /****************************************************************************
  291. * 8x8 transform:
  292. ****************************************************************************/
  293. #define DCT8_1D {\
  294. int s07 = SRC(0) + SRC(7);\
  295. int s16 = SRC(1) + SRC(6);\
  296. int s25 = SRC(2) + SRC(5);\
  297. int s34 = SRC(3) + SRC(4);\
  298. int a0 = s07 + s34;\
  299. int a1 = s16 + s25;\
  300. int a2 = s07 - s34;\
  301. int a3 = s16 - s25;\
  302. int d07 = SRC(0) - SRC(7);\
  303. int d16 = SRC(1) - SRC(6);\
  304. int d25 = SRC(2) - SRC(5);\
  305. int d34 = SRC(3) - SRC(4);\
  306. int a4 = d16 + d25 + (d07 + (d07>>1));\
  307. int a5 = d07 - d34 - (d25 + (d25>>1));\
  308. int a6 = d07 + d34 - (d16 + (d16>>1));\
  309. int a7 = d16 - d25 + (d34 + (d34>>1));\
  310. DST(0) = a0 + a1 ;\
  311. DST(1) = a4 + (a7>>2);\
  312. DST(2) = a2 + (a3>>1);\
  313. DST(3) = a5 + (a6>>2);\
  314. DST(4) = a0 - a1 ;\
  315. DST(5) = a6 - (a5>>2);\
  316. DST(6) = (a2>>1) - a3 ;\
  317. DST(7) = (a4>>2) - a7 ;\
  318. }
  319. static void sub8x8_dct8( dctcoef dct[64], pixel *pix1, pixel *pix2 )
  320. {
  321. dctcoef tmp[64];
  322. pixel_sub_wxh( tmp, 8, pix1, FENC_STRIDE, pix2, FDEC_STRIDE );
  323. #define SRC(x) tmp[x*8+i]
  324. #define DST(x) tmp[x*8+i]
  325. for( int i = 0; i < 8; i++ )
  326. DCT8_1D
  327. #undef SRC
  328. #undef DST
  329. #define SRC(x) tmp[i*8+x]
  330. #define DST(x) dct[x*8+i]
  331. for( int i = 0; i < 8; i++ )
  332. DCT8_1D
  333. #undef SRC
  334. #undef DST
  335. }
  336. static void sub16x16_dct8( dctcoef dct[4][64], pixel *pix1, pixel *pix2 )
  337. {
  338. sub8x8_dct8( dct[0], &pix1[0], &pix2[0] );
  339. sub8x8_dct8( dct[1], &pix1[8], &pix2[8] );
  340. sub8x8_dct8( dct[2], &pix1[8*FENC_STRIDE+0], &pix2[8*FDEC_STRIDE+0] );
  341. sub8x8_dct8( dct[3], &pix1[8*FENC_STRIDE+8], &pix2[8*FDEC_STRIDE+8] );
  342. }
  343. #define IDCT8_1D {\
  344. int a0 = SRC(0) + SRC(4);\
  345. int a2 = SRC(0) - SRC(4);\
  346. int a4 = (SRC(2)>>1) - SRC(6);\
  347. int a6 = (SRC(6)>>1) + SRC(2);\
  348. int b0 = a0 + a6;\
  349. int b2 = a2 + a4;\
  350. int b4 = a2 - a4;\
  351. int b6 = a0 - a6;\
  352. int a1 = -SRC(3) + SRC(5) - SRC(7) - (SRC(7)>>1);\
  353. int a3 = SRC(1) + SRC(7) - SRC(3) - (SRC(3)>>1);\
  354. int a5 = -SRC(1) + SRC(7) + SRC(5) + (SRC(5)>>1);\
  355. int a7 = SRC(3) + SRC(5) + SRC(1) + (SRC(1)>>1);\
  356. int b1 = (a7>>2) + a1;\
  357. int b3 = a3 + (a5>>2);\
  358. int b5 = (a3>>2) - a5;\
  359. int b7 = a7 - (a1>>2);\
  360. DST(0, b0 + b7);\
  361. DST(1, b2 + b5);\
  362. DST(2, b4 + b3);\
  363. DST(3, b6 + b1);\
  364. DST(4, b6 - b1);\
  365. DST(5, b4 - b3);\
  366. DST(6, b2 - b5);\
  367. DST(7, b0 - b7);\
  368. }
  369. static void add8x8_idct8( pixel *dst, dctcoef dct[64] )
  370. {
  371. dct[0] += 32; // rounding for the >>6 at the end
  372. #define SRC(x) dct[x*8+i]
  373. #define DST(x,rhs) dct[x*8+i] = (rhs)
  374. for( int i = 0; i < 8; i++ )
  375. IDCT8_1D
  376. #undef SRC
  377. #undef DST
  378. #define SRC(x) dct[i*8+x]
  379. #define DST(x,rhs) dst[i + x*FDEC_STRIDE] = x264_clip_pixel( dst[i + x*FDEC_STRIDE] + ((rhs) >> 6) );
  380. for( int i = 0; i < 8; i++ )
  381. IDCT8_1D
  382. #undef SRC
  383. #undef DST
  384. }
  385. static void add16x16_idct8( pixel *dst, dctcoef dct[4][64] )
  386. {
  387. add8x8_idct8( &dst[0], dct[0] );
  388. add8x8_idct8( &dst[8], dct[1] );
  389. add8x8_idct8( &dst[8*FDEC_STRIDE+0], dct[2] );
  390. add8x8_idct8( &dst[8*FDEC_STRIDE+8], dct[3] );
  391. }
  392. static inline void add4x4_idct_dc( pixel *p_dst, dctcoef dc )
  393. {
  394. dc = (dc + 32) >> 6;
  395. for( int i = 0; i < 4; i++, p_dst += FDEC_STRIDE )
  396. {
  397. p_dst[0] = x264_clip_pixel( p_dst[0] + dc );
  398. p_dst[1] = x264_clip_pixel( p_dst[1] + dc );
  399. p_dst[2] = x264_clip_pixel( p_dst[2] + dc );
  400. p_dst[3] = x264_clip_pixel( p_dst[3] + dc );
  401. }
  402. }
  403. static void add8x8_idct_dc( pixel *p_dst, dctcoef dct[4] )
  404. {
  405. add4x4_idct_dc( &p_dst[0], dct[0] );
  406. add4x4_idct_dc( &p_dst[4], dct[1] );
  407. add4x4_idct_dc( &p_dst[4*FDEC_STRIDE+0], dct[2] );
  408. add4x4_idct_dc( &p_dst[4*FDEC_STRIDE+4], dct[3] );
  409. }
  410. static void add16x16_idct_dc( pixel *p_dst, dctcoef dct[16] )
  411. {
  412. for( int i = 0; i < 4; i++, dct += 4, p_dst += 4*FDEC_STRIDE )
  413. {
  414. add4x4_idct_dc( &p_dst[ 0], dct[0] );
  415. add4x4_idct_dc( &p_dst[ 4], dct[1] );
  416. add4x4_idct_dc( &p_dst[ 8], dct[2] );
  417. add4x4_idct_dc( &p_dst[12], dct[3] );
  418. }
  419. }
  420. /****************************************************************************
  421. * x264_dct_init:
  422. ****************************************************************************/
  423. void x264_dct_init( int cpu, x264_dct_function_t *dctf )
  424. {
  425. dctf->sub4x4_dct = sub4x4_dct;
  426. dctf->add4x4_idct = add4x4_idct;
  427. dctf->sub8x8_dct = sub8x8_dct;
  428. dctf->sub8x8_dct_dc = sub8x8_dct_dc;
  429. dctf->add8x8_idct = add8x8_idct;
  430. dctf->add8x8_idct_dc = add8x8_idct_dc;
  431. dctf->sub8x16_dct_dc = sub8x16_dct_dc;
  432. dctf->sub16x16_dct = sub16x16_dct;
  433. dctf->add16x16_idct = add16x16_idct;
  434. dctf->add16x16_idct_dc = add16x16_idct_dc;
  435. dctf->sub8x8_dct8 = sub8x8_dct8;
  436. dctf->add8x8_idct8 = add8x8_idct8;
  437. dctf->sub16x16_dct8 = sub16x16_dct8;
  438. dctf->add16x16_idct8 = add16x16_idct8;
  439. dctf->dct4x4dc = dct4x4dc;
  440. dctf->idct4x4dc = idct4x4dc;
  441. dctf->dct2x4dc = dct2x4dc;
  442. #if HIGH_BIT_DEPTH
  443. #if HAVE_MMX
  444. if( cpu&X264_CPU_MMX )
  445. {
  446. dctf->sub4x4_dct = x264_sub4x4_dct_mmx;
  447. dctf->sub8x8_dct = x264_sub8x8_dct_mmx;
  448. dctf->sub16x16_dct = x264_sub16x16_dct_mmx;
  449. }
  450. if( cpu&X264_CPU_SSE2 )
  451. {
  452. dctf->add4x4_idct = x264_add4x4_idct_sse2;
  453. dctf->dct4x4dc = x264_dct4x4dc_sse2;
  454. dctf->idct4x4dc = x264_idct4x4dc_sse2;
  455. dctf->dct2x4dc = x264_dct2x4dc_sse2;
  456. dctf->sub8x8_dct8 = x264_sub8x8_dct8_sse2;
  457. dctf->sub16x16_dct8 = x264_sub16x16_dct8_sse2;
  458. dctf->add8x8_idct = x264_add8x8_idct_sse2;
  459. dctf->add16x16_idct = x264_add16x16_idct_sse2;
  460. dctf->add8x8_idct8 = x264_add8x8_idct8_sse2;
  461. dctf->add16x16_idct8 = x264_add16x16_idct8_sse2;
  462. dctf->sub8x8_dct_dc = x264_sub8x8_dct_dc_sse2;
  463. dctf->add8x8_idct_dc = x264_add8x8_idct_dc_sse2;
  464. dctf->sub8x16_dct_dc = x264_sub8x16_dct_dc_sse2;
  465. dctf->add16x16_idct_dc= x264_add16x16_idct_dc_sse2;
  466. }
  467. if( cpu&X264_CPU_SSE4 )
  468. {
  469. dctf->sub8x8_dct8 = x264_sub8x8_dct8_sse4;
  470. dctf->sub16x16_dct8 = x264_sub16x16_dct8_sse4;
  471. }
  472. if( cpu&X264_CPU_AVX )
  473. {
  474. dctf->add4x4_idct = x264_add4x4_idct_avx;
  475. dctf->dct4x4dc = x264_dct4x4dc_avx;
  476. dctf->idct4x4dc = x264_idct4x4dc_avx;
  477. dctf->dct2x4dc = x264_dct2x4dc_avx;
  478. dctf->sub8x8_dct8 = x264_sub8x8_dct8_avx;
  479. dctf->sub16x16_dct8 = x264_sub16x16_dct8_avx;
  480. dctf->add8x8_idct = x264_add8x8_idct_avx;
  481. dctf->add16x16_idct = x264_add16x16_idct_avx;
  482. dctf->add8x8_idct8 = x264_add8x8_idct8_avx;
  483. dctf->add16x16_idct8 = x264_add16x16_idct8_avx;
  484. dctf->add8x8_idct_dc = x264_add8x8_idct_dc_avx;
  485. dctf->sub8x16_dct_dc = x264_sub8x16_dct_dc_avx;
  486. dctf->add16x16_idct_dc= x264_add16x16_idct_dc_avx;
  487. }
  488. #endif // HAVE_MMX
  489. #else // !HIGH_BIT_DEPTH
  490. #if HAVE_MMX
  491. if( cpu&X264_CPU_MMX )
  492. {
  493. dctf->sub4x4_dct = x264_sub4x4_dct_mmx;
  494. dctf->add4x4_idct = x264_add4x4_idct_mmx;
  495. dctf->idct4x4dc = x264_idct4x4dc_mmx;
  496. dctf->sub8x8_dct_dc = x264_sub8x8_dct_dc_mmx2;
  497. #if !ARCH_X86_64
  498. dctf->sub8x8_dct = x264_sub8x8_dct_mmx;
  499. dctf->sub16x16_dct = x264_sub16x16_dct_mmx;
  500. dctf->add8x8_idct = x264_add8x8_idct_mmx;
  501. dctf->add16x16_idct = x264_add16x16_idct_mmx;
  502. dctf->sub8x8_dct8 = x264_sub8x8_dct8_mmx;
  503. dctf->sub16x16_dct8 = x264_sub16x16_dct8_mmx;
  504. dctf->add8x8_idct8 = x264_add8x8_idct8_mmx;
  505. dctf->add16x16_idct8= x264_add16x16_idct8_mmx;
  506. #endif
  507. }
  508. if( cpu&X264_CPU_MMX2 )
  509. {
  510. dctf->dct4x4dc = x264_dct4x4dc_mmx2;
  511. dctf->dct2x4dc = x264_dct2x4dc_mmx2;
  512. dctf->add8x8_idct_dc = x264_add8x8_idct_dc_mmx2;
  513. dctf->add16x16_idct_dc = x264_add16x16_idct_dc_mmx2;
  514. }
  515. if( cpu&X264_CPU_SSE2 )
  516. {
  517. dctf->sub8x8_dct8 = x264_sub8x8_dct8_sse2;
  518. dctf->sub16x16_dct8 = x264_sub16x16_dct8_sse2;
  519. dctf->sub8x8_dct_dc = x264_sub8x8_dct_dc_sse2;
  520. dctf->sub8x16_dct_dc= x264_sub8x16_dct_dc_sse2;
  521. dctf->add8x8_idct8 = x264_add8x8_idct8_sse2;
  522. dctf->add16x16_idct8= x264_add16x16_idct8_sse2;
  523. if( !(cpu&X264_CPU_SSE2_IS_SLOW) )
  524. {
  525. dctf->sub8x8_dct = x264_sub8x8_dct_sse2;
  526. dctf->sub16x16_dct = x264_sub16x16_dct_sse2;
  527. dctf->add8x8_idct = x264_add8x8_idct_sse2;
  528. dctf->add16x16_idct = x264_add16x16_idct_sse2;
  529. dctf->add16x16_idct_dc = x264_add16x16_idct_dc_sse2;
  530. }
  531. }
  532. if( (cpu&X264_CPU_SSSE3) && !(cpu&X264_CPU_SSE2_IS_SLOW) )
  533. {
  534. dctf->sub8x16_dct_dc = x264_sub8x16_dct_dc_ssse3;
  535. if( !(cpu&X264_CPU_SLOW_ATOM) )
  536. {
  537. dctf->sub4x4_dct = x264_sub4x4_dct_ssse3;
  538. dctf->sub8x8_dct = x264_sub8x8_dct_ssse3;
  539. dctf->sub16x16_dct = x264_sub16x16_dct_ssse3;
  540. dctf->sub8x8_dct8 = x264_sub8x8_dct8_ssse3;
  541. dctf->sub16x16_dct8 = x264_sub16x16_dct8_ssse3;
  542. if( !(cpu&X264_CPU_SLOW_PSHUFB) )
  543. {
  544. dctf->add8x8_idct_dc = x264_add8x8_idct_dc_ssse3;
  545. dctf->add16x16_idct_dc = x264_add16x16_idct_dc_ssse3;
  546. }
  547. }
  548. }
  549. if( cpu&X264_CPU_SSE4 )
  550. dctf->add4x4_idct = x264_add4x4_idct_sse4;
  551. if( cpu&X264_CPU_AVX )
  552. {
  553. dctf->add4x4_idct = x264_add4x4_idct_avx;
  554. dctf->add8x8_idct = x264_add8x8_idct_avx;
  555. dctf->add16x16_idct = x264_add16x16_idct_avx;
  556. dctf->add8x8_idct8 = x264_add8x8_idct8_avx;
  557. dctf->add16x16_idct8 = x264_add16x16_idct8_avx;
  558. dctf->add16x16_idct_dc = x264_add16x16_idct_dc_avx;
  559. dctf->sub8x8_dct = x264_sub8x8_dct_avx;
  560. dctf->sub16x16_dct = x264_sub16x16_dct_avx;
  561. dctf->sub8x8_dct8 = x264_sub8x8_dct8_avx;
  562. dctf->sub16x16_dct8 = x264_sub16x16_dct8_avx;
  563. }
  564. if( cpu&X264_CPU_XOP )
  565. {
  566. dctf->sub8x8_dct = x264_sub8x8_dct_xop;
  567. dctf->sub16x16_dct = x264_sub16x16_dct_xop;
  568. }
  569. if( cpu&X264_CPU_AVX2 )
  570. {
  571. dctf->add8x8_idct = x264_add8x8_idct_avx2;
  572. dctf->add16x16_idct = x264_add16x16_idct_avx2;
  573. dctf->sub8x8_dct = x264_sub8x8_dct_avx2;
  574. dctf->sub16x16_dct = x264_sub16x16_dct_avx2;
  575. dctf->add16x16_idct_dc = x264_add16x16_idct_dc_avx2;
  576. #if ARCH_X86_64
  577. dctf->sub16x16_dct8 = x264_sub16x16_dct8_avx2;
  578. #endif
  579. }
  580. if( cpu&X264_CPU_AVX512 )
  581. {
  582. dctf->sub4x4_dct = x264_sub4x4_dct_avx512;
  583. dctf->sub8x8_dct = x264_sub8x8_dct_avx512;
  584. dctf->sub16x16_dct = x264_sub16x16_dct_avx512;
  585. dctf->sub8x8_dct_dc = x264_sub8x8_dct_dc_avx512;
  586. dctf->sub8x16_dct_dc = x264_sub8x16_dct_dc_avx512;
  587. dctf->add8x8_idct = x264_add8x8_idct_avx512;
  588. }
  589. #endif //HAVE_MMX
  590. #if HAVE_ALTIVEC
  591. if( cpu&X264_CPU_ALTIVEC )
  592. {
  593. dctf->sub4x4_dct = x264_sub4x4_dct_altivec;
  594. dctf->sub8x8_dct = x264_sub8x8_dct_altivec;
  595. dctf->sub16x16_dct = x264_sub16x16_dct_altivec;
  596. dctf->add8x8_idct_dc = x264_add8x8_idct_dc_altivec;
  597. dctf->add16x16_idct_dc = x264_add16x16_idct_dc_altivec;
  598. dctf->add4x4_idct = x264_add4x4_idct_altivec;
  599. dctf->add8x8_idct = x264_add8x8_idct_altivec;
  600. dctf->add16x16_idct = x264_add16x16_idct_altivec;
  601. dctf->sub8x8_dct_dc = x264_sub8x8_dct_dc_altivec;
  602. dctf->sub8x8_dct8 = x264_sub8x8_dct8_altivec;
  603. dctf->sub16x16_dct8 = x264_sub16x16_dct8_altivec;
  604. dctf->add8x8_idct8 = x264_add8x8_idct8_altivec;
  605. dctf->add16x16_idct8= x264_add16x16_idct8_altivec;
  606. }
  607. #endif
  608. #if HAVE_ARMV6 || ARCH_AARCH64
  609. if( cpu&X264_CPU_NEON )
  610. {
  611. dctf->sub4x4_dct = x264_sub4x4_dct_neon;
  612. dctf->sub8x8_dct = x264_sub8x8_dct_neon;
  613. dctf->sub16x16_dct = x264_sub16x16_dct_neon;
  614. dctf->add8x8_idct_dc = x264_add8x8_idct_dc_neon;
  615. dctf->add16x16_idct_dc = x264_add16x16_idct_dc_neon;
  616. dctf->sub8x8_dct_dc = x264_sub8x8_dct_dc_neon;
  617. dctf->dct4x4dc = x264_dct4x4dc_neon;
  618. dctf->idct4x4dc = x264_idct4x4dc_neon;
  619. dctf->add4x4_idct = x264_add4x4_idct_neon;
  620. dctf->add8x8_idct = x264_add8x8_idct_neon;
  621. dctf->add16x16_idct = x264_add16x16_idct_neon;
  622. dctf->sub8x8_dct8 = x264_sub8x8_dct8_neon;
  623. dctf->sub16x16_dct8 = x264_sub16x16_dct8_neon;
  624. dctf->add8x8_idct8 = x264_add8x8_idct8_neon;
  625. dctf->add16x16_idct8= x264_add16x16_idct8_neon;
  626. dctf->sub8x16_dct_dc= x264_sub8x16_dct_dc_neon;
  627. }
  628. #endif
  629. #if HAVE_MSA
  630. if( cpu&X264_CPU_MSA )
  631. {
  632. dctf->sub4x4_dct = x264_sub4x4_dct_msa;
  633. dctf->sub8x8_dct = x264_sub8x8_dct_msa;
  634. dctf->sub16x16_dct = x264_sub16x16_dct_msa;
  635. dctf->sub8x8_dct_dc = x264_sub8x8_dct_dc_msa;
  636. dctf->sub8x16_dct_dc = x264_sub8x16_dct_dc_msa;
  637. dctf->dct4x4dc = x264_dct4x4dc_msa;
  638. dctf->idct4x4dc = x264_idct4x4dc_msa;
  639. dctf->add4x4_idct = x264_add4x4_idct_msa;
  640. dctf->add8x8_idct = x264_add8x8_idct_msa;
  641. dctf->add8x8_idct_dc = x264_add8x8_idct_dc_msa;
  642. dctf->add16x16_idct = x264_add16x16_idct_msa;
  643. dctf->add16x16_idct_dc = x264_add16x16_idct_dc_msa;
  644. dctf->add8x8_idct8 = x264_add8x8_idct8_msa;
  645. dctf->add16x16_idct8 = x264_add16x16_idct8_msa;
  646. }
  647. #endif
  648. #endif // HIGH_BIT_DEPTH
  649. }
  650. #define ZIG(i,y,x) level[i] = dct[x*8+y];
  651. #define ZIGZAG8_FRAME\
  652. ZIG( 0,0,0) ZIG( 1,0,1) ZIG( 2,1,0) ZIG( 3,2,0)\
  653. ZIG( 4,1,1) ZIG( 5,0,2) ZIG( 6,0,3) ZIG( 7,1,2)\
  654. ZIG( 8,2,1) ZIG( 9,3,0) ZIG(10,4,0) ZIG(11,3,1)\
  655. ZIG(12,2,2) ZIG(13,1,3) ZIG(14,0,4) ZIG(15,0,5)\
  656. ZIG(16,1,4) ZIG(17,2,3) ZIG(18,3,2) ZIG(19,4,1)\
  657. ZIG(20,5,0) ZIG(21,6,0) ZIG(22,5,1) ZIG(23,4,2)\
  658. ZIG(24,3,3) ZIG(25,2,4) ZIG(26,1,5) ZIG(27,0,6)\
  659. ZIG(28,0,7) ZIG(29,1,6) ZIG(30,2,5) ZIG(31,3,4)\
  660. ZIG(32,4,3) ZIG(33,5,2) ZIG(34,6,1) ZIG(35,7,0)\
  661. ZIG(36,7,1) ZIG(37,6,2) ZIG(38,5,3) ZIG(39,4,4)\
  662. ZIG(40,3,5) ZIG(41,2,6) ZIG(42,1,7) ZIG(43,2,7)\
  663. ZIG(44,3,6) ZIG(45,4,5) ZIG(46,5,4) ZIG(47,6,3)\
  664. ZIG(48,7,2) ZIG(49,7,3) ZIG(50,6,4) ZIG(51,5,5)\
  665. ZIG(52,4,6) ZIG(53,3,7) ZIG(54,4,7) ZIG(55,5,6)\
  666. ZIG(56,6,5) ZIG(57,7,4) ZIG(58,7,5) ZIG(59,6,6)\
  667. ZIG(60,5,7) ZIG(61,6,7) ZIG(62,7,6) ZIG(63,7,7)\
  668. #define ZIGZAG8_FIELD\
  669. ZIG( 0,0,0) ZIG( 1,1,0) ZIG( 2,2,0) ZIG( 3,0,1)\
  670. ZIG( 4,1,1) ZIG( 5,3,0) ZIG( 6,4,0) ZIG( 7,2,1)\
  671. ZIG( 8,0,2) ZIG( 9,3,1) ZIG(10,5,0) ZIG(11,6,0)\
  672. ZIG(12,7,0) ZIG(13,4,1) ZIG(14,1,2) ZIG(15,0,3)\
  673. ZIG(16,2,2) ZIG(17,5,1) ZIG(18,6,1) ZIG(19,7,1)\
  674. ZIG(20,3,2) ZIG(21,1,3) ZIG(22,0,4) ZIG(23,2,3)\
  675. ZIG(24,4,2) ZIG(25,5,2) ZIG(26,6,2) ZIG(27,7,2)\
  676. ZIG(28,3,3) ZIG(29,1,4) ZIG(30,0,5) ZIG(31,2,4)\
  677. ZIG(32,4,3) ZIG(33,5,3) ZIG(34,6,3) ZIG(35,7,3)\
  678. ZIG(36,3,4) ZIG(37,1,5) ZIG(38,0,6) ZIG(39,2,5)\
  679. ZIG(40,4,4) ZIG(41,5,4) ZIG(42,6,4) ZIG(43,7,4)\
  680. ZIG(44,3,5) ZIG(45,1,6) ZIG(46,2,6) ZIG(47,4,5)\
  681. ZIG(48,5,5) ZIG(49,6,5) ZIG(50,7,5) ZIG(51,3,6)\
  682. ZIG(52,0,7) ZIG(53,1,7) ZIG(54,4,6) ZIG(55,5,6)\
  683. ZIG(56,6,6) ZIG(57,7,6) ZIG(58,2,7) ZIG(59,3,7)\
  684. ZIG(60,4,7) ZIG(61,5,7) ZIG(62,6,7) ZIG(63,7,7)
  685. #define ZIGZAG4_FRAME\
  686. ZIGDC( 0,0,0) ZIG( 1,0,1) ZIG( 2,1,0) ZIG( 3,2,0)\
  687. ZIG( 4,1,1) ZIG( 5,0,2) ZIG( 6,0,3) ZIG( 7,1,2)\
  688. ZIG( 8,2,1) ZIG( 9,3,0) ZIG(10,3,1) ZIG(11,2,2)\
  689. ZIG(12,1,3) ZIG(13,2,3) ZIG(14,3,2) ZIG(15,3,3)
  690. #define ZIGZAG4_FIELD\
  691. ZIGDC( 0,0,0) ZIG( 1,1,0) ZIG( 2,0,1) ZIG( 3,2,0)\
  692. ZIG( 4,3,0) ZIG( 5,1,1) ZIG( 6,2,1) ZIG( 7,3,1)\
  693. ZIG( 8,0,2) ZIG( 9,1,2) ZIG(10,2,2) ZIG(11,3,2)\
  694. ZIG(12,0,3) ZIG(13,1,3) ZIG(14,2,3) ZIG(15,3,3)
  695. static void zigzag_scan_8x8_frame( dctcoef level[64], dctcoef dct[64] )
  696. {
  697. ZIGZAG8_FRAME
  698. }
  699. static void zigzag_scan_8x8_field( dctcoef level[64], dctcoef dct[64] )
  700. {
  701. ZIGZAG8_FIELD
  702. }
  703. #undef ZIG
  704. #define ZIG(i,y,x) level[i] = dct[x*4+y];
  705. #define ZIGDC(i,y,x) ZIG(i,y,x)
  706. static void zigzag_scan_4x4_frame( dctcoef level[16], dctcoef dct[16] )
  707. {
  708. ZIGZAG4_FRAME
  709. }
  710. static void zigzag_scan_4x4_field( dctcoef level[16], dctcoef dct[16] )
  711. {
  712. memcpy( level, dct, 2 * sizeof(dctcoef) );
  713. ZIG(2,0,1) ZIG(3,2,0) ZIG(4,3,0) ZIG(5,1,1)
  714. memcpy( level+6, dct+6, 10 * sizeof(dctcoef) );
  715. }
  716. #undef ZIG
  717. #define ZIG(i,y,x) {\
  718. int oe = x+y*FENC_STRIDE;\
  719. int od = x+y*FDEC_STRIDE;\
  720. level[i] = p_src[oe] - p_dst[od];\
  721. nz |= level[i];\
  722. }
  723. #define COPY4x4\
  724. CPPIXEL_X4( p_dst+0*FDEC_STRIDE, p_src+0*FENC_STRIDE );\
  725. CPPIXEL_X4( p_dst+1*FDEC_STRIDE, p_src+1*FENC_STRIDE );\
  726. CPPIXEL_X4( p_dst+2*FDEC_STRIDE, p_src+2*FENC_STRIDE );\
  727. CPPIXEL_X4( p_dst+3*FDEC_STRIDE, p_src+3*FENC_STRIDE );
  728. #define CPPIXEL_X8(dst,src) ( CPPIXEL_X4(dst,src), CPPIXEL_X4(dst+4,src+4) )
  729. #define COPY8x8\
  730. CPPIXEL_X8( p_dst+0*FDEC_STRIDE, p_src+0*FENC_STRIDE );\
  731. CPPIXEL_X8( p_dst+1*FDEC_STRIDE, p_src+1*FENC_STRIDE );\
  732. CPPIXEL_X8( p_dst+2*FDEC_STRIDE, p_src+2*FENC_STRIDE );\
  733. CPPIXEL_X8( p_dst+3*FDEC_STRIDE, p_src+3*FENC_STRIDE );\
  734. CPPIXEL_X8( p_dst+4*FDEC_STRIDE, p_src+4*FENC_STRIDE );\
  735. CPPIXEL_X8( p_dst+5*FDEC_STRIDE, p_src+5*FENC_STRIDE );\
  736. CPPIXEL_X8( p_dst+6*FDEC_STRIDE, p_src+6*FENC_STRIDE );\
  737. CPPIXEL_X8( p_dst+7*FDEC_STRIDE, p_src+7*FENC_STRIDE );
  738. static int zigzag_sub_4x4_frame( dctcoef level[16], const pixel *p_src, pixel *p_dst )
  739. {
  740. int nz = 0;
  741. ZIGZAG4_FRAME
  742. COPY4x4
  743. return !!nz;
  744. }
  745. static int zigzag_sub_4x4_field( dctcoef level[16], const pixel *p_src, pixel *p_dst )
  746. {
  747. int nz = 0;
  748. ZIGZAG4_FIELD
  749. COPY4x4
  750. return !!nz;
  751. }
  752. #undef ZIGDC
  753. #define ZIGDC(i,y,x) {\
  754. int oe = x+y*FENC_STRIDE;\
  755. int od = x+y*FDEC_STRIDE;\
  756. *dc = p_src[oe] - p_dst[od];\
  757. level[0] = 0;\
  758. }
  759. static int zigzag_sub_4x4ac_frame( dctcoef level[16], const pixel *p_src, pixel *p_dst, dctcoef *dc )
  760. {
  761. int nz = 0;
  762. ZIGZAG4_FRAME
  763. COPY4x4
  764. return !!nz;
  765. }
  766. static int zigzag_sub_4x4ac_field( dctcoef level[16], const pixel *p_src, pixel *p_dst, dctcoef *dc )
  767. {
  768. int nz = 0;
  769. ZIGZAG4_FIELD
  770. COPY4x4
  771. return !!nz;
  772. }
  773. static int zigzag_sub_8x8_frame( dctcoef level[64], const pixel *p_src, pixel *p_dst )
  774. {
  775. int nz = 0;
  776. ZIGZAG8_FRAME
  777. COPY8x8
  778. return !!nz;
  779. }
  780. static int zigzag_sub_8x8_field( dctcoef level[64], const pixel *p_src, pixel *p_dst )
  781. {
  782. int nz = 0;
  783. ZIGZAG8_FIELD
  784. COPY8x8
  785. return !!nz;
  786. }
  787. #undef ZIG
  788. #undef COPY4x4
  789. static void zigzag_interleave_8x8_cavlc( dctcoef *dst, dctcoef *src, uint8_t *nnz )
  790. {
  791. for( int i = 0; i < 4; i++ )
  792. {
  793. int nz = 0;
  794. for( int j = 0; j < 16; j++ )
  795. {
  796. nz |= src[i+j*4];
  797. dst[i*16+j] = src[i+j*4];
  798. }
  799. nnz[(i&1) + (i>>1)*8] = !!nz;
  800. }
  801. }
  802. void x264_zigzag_init( int cpu, x264_zigzag_function_t *pf_progressive, x264_zigzag_function_t *pf_interlaced )
  803. {
  804. pf_interlaced->scan_8x8 = zigzag_scan_8x8_field;
  805. pf_progressive->scan_8x8 = zigzag_scan_8x8_frame;
  806. pf_interlaced->scan_4x4 = zigzag_scan_4x4_field;
  807. pf_progressive->scan_4x4 = zigzag_scan_4x4_frame;
  808. pf_interlaced->sub_8x8 = zigzag_sub_8x8_field;
  809. pf_progressive->sub_8x8 = zigzag_sub_8x8_frame;
  810. pf_interlaced->sub_4x4 = zigzag_sub_4x4_field;
  811. pf_progressive->sub_4x4 = zigzag_sub_4x4_frame;
  812. pf_interlaced->sub_4x4ac = zigzag_sub_4x4ac_field;
  813. pf_progressive->sub_4x4ac = zigzag_sub_4x4ac_frame;
  814. #if HIGH_BIT_DEPTH
  815. #if HAVE_MMX
  816. if( cpu&X264_CPU_SSE2 )
  817. {
  818. pf_interlaced->scan_4x4 = x264_zigzag_scan_4x4_field_sse2;
  819. pf_progressive->scan_4x4 = x264_zigzag_scan_4x4_frame_sse2;
  820. pf_progressive->scan_8x8 = x264_zigzag_scan_8x8_frame_sse2;
  821. }
  822. if( cpu&X264_CPU_SSE4 )
  823. pf_interlaced->scan_8x8 = x264_zigzag_scan_8x8_field_sse4;
  824. if( cpu&X264_CPU_AVX )
  825. pf_interlaced->scan_8x8 = x264_zigzag_scan_8x8_field_avx;
  826. #if ARCH_X86_64
  827. if( cpu&X264_CPU_AVX )
  828. {
  829. pf_progressive->scan_4x4 = x264_zigzag_scan_4x4_frame_avx;
  830. pf_progressive->scan_8x8 = x264_zigzag_scan_8x8_frame_avx;
  831. }
  832. #endif // ARCH_X86_64
  833. if( cpu&X264_CPU_AVX512 )
  834. {
  835. pf_interlaced->scan_4x4 = x264_zigzag_scan_4x4_field_avx512;
  836. pf_progressive->scan_4x4 = x264_zigzag_scan_4x4_frame_avx512;
  837. pf_interlaced->scan_8x8 = x264_zigzag_scan_8x8_field_avx512;
  838. pf_progressive->scan_8x8 = x264_zigzag_scan_8x8_frame_avx512;
  839. }
  840. #endif // HAVE_MMX
  841. #else
  842. #if HAVE_MMX
  843. if( cpu&X264_CPU_MMX )
  844. pf_progressive->scan_4x4 = x264_zigzag_scan_4x4_frame_mmx;
  845. if( cpu&X264_CPU_MMX2 )
  846. {
  847. pf_interlaced->scan_8x8 = x264_zigzag_scan_8x8_field_mmx2;
  848. pf_progressive->scan_8x8 = x264_zigzag_scan_8x8_frame_mmx2;
  849. }
  850. if( cpu&X264_CPU_SSE )
  851. pf_interlaced->scan_4x4 = x264_zigzag_scan_4x4_field_sse;
  852. if( cpu&X264_CPU_SSE2_IS_FAST )
  853. pf_progressive->scan_8x8 = x264_zigzag_scan_8x8_frame_sse2;
  854. if( cpu&X264_CPU_SSSE3 )
  855. {
  856. pf_interlaced->sub_4x4 = x264_zigzag_sub_4x4_field_ssse3;
  857. pf_progressive->sub_4x4 = x264_zigzag_sub_4x4_frame_ssse3;
  858. pf_interlaced->sub_4x4ac = x264_zigzag_sub_4x4ac_field_ssse3;
  859. pf_progressive->sub_4x4ac= x264_zigzag_sub_4x4ac_frame_ssse3;
  860. pf_progressive->scan_8x8 = x264_zigzag_scan_8x8_frame_ssse3;
  861. if( !(cpu&X264_CPU_SLOW_SHUFFLE) )
  862. pf_progressive->scan_4x4 = x264_zigzag_scan_4x4_frame_ssse3;
  863. }
  864. if( cpu&X264_CPU_AVX )
  865. {
  866. pf_interlaced->sub_4x4 = x264_zigzag_sub_4x4_field_avx;
  867. pf_progressive->sub_4x4 = x264_zigzag_sub_4x4_frame_avx;
  868. #if ARCH_X86_64
  869. pf_interlaced->sub_4x4ac = x264_zigzag_sub_4x4ac_field_avx;
  870. pf_progressive->sub_4x4ac= x264_zigzag_sub_4x4ac_frame_avx;
  871. #endif
  872. pf_progressive->scan_4x4 = x264_zigzag_scan_4x4_frame_avx;
  873. }
  874. if( cpu&X264_CPU_XOP )
  875. {
  876. pf_progressive->scan_4x4 = x264_zigzag_scan_4x4_frame_xop;
  877. pf_progressive->scan_8x8 = x264_zigzag_scan_8x8_frame_xop;
  878. pf_interlaced->scan_8x8 = x264_zigzag_scan_8x8_field_xop;
  879. }
  880. if( cpu&X264_CPU_AVX512 )
  881. {
  882. pf_interlaced->scan_4x4 = x264_zigzag_scan_4x4_field_avx512;
  883. pf_progressive->scan_4x4 = x264_zigzag_scan_4x4_frame_avx512;
  884. pf_interlaced->scan_8x8 = x264_zigzag_scan_8x8_field_avx512;
  885. pf_progressive->scan_8x8 = x264_zigzag_scan_8x8_frame_avx512;
  886. }
  887. #endif // HAVE_MMX
  888. #if HAVE_ALTIVEC
  889. if( cpu&X264_CPU_ALTIVEC )
  890. {
  891. pf_interlaced->scan_4x4 = x264_zigzag_scan_4x4_field_altivec;
  892. pf_progressive->scan_4x4 = x264_zigzag_scan_4x4_frame_altivec;
  893. pf_progressive->scan_8x8 = x264_zigzag_scan_8x8_frame_altivec;
  894. }
  895. #endif
  896. #if HAVE_ARMV6 || ARCH_AARCH64
  897. if( cpu&X264_CPU_NEON )
  898. {
  899. pf_progressive->scan_4x4 = x264_zigzag_scan_4x4_frame_neon;
  900. #if ARCH_AARCH64
  901. pf_interlaced->scan_4x4 = x264_zigzag_scan_4x4_field_neon;
  902. pf_interlaced->scan_8x8 = x264_zigzag_scan_8x8_field_neon;
  903. pf_interlaced->sub_4x4 = x264_zigzag_sub_4x4_field_neon;
  904. pf_interlaced->sub_4x4ac = x264_zigzag_sub_4x4ac_field_neon;
  905. pf_interlaced->sub_8x8 = x264_zigzag_sub_8x8_field_neon;
  906. pf_progressive->scan_8x8 = x264_zigzag_scan_8x8_frame_neon;
  907. pf_progressive->sub_4x4 = x264_zigzag_sub_4x4_frame_neon;
  908. pf_progressive->sub_4x4ac = x264_zigzag_sub_4x4ac_frame_neon;
  909. pf_progressive->sub_8x8 = x264_zigzag_sub_8x8_frame_neon;
  910. #endif // ARCH_AARCH64
  911. }
  912. #endif // HAVE_ARMV6 || ARCH_AARCH64
  913. #endif // HIGH_BIT_DEPTH
  914. pf_interlaced->interleave_8x8_cavlc =
  915. pf_progressive->interleave_8x8_cavlc = zigzag_interleave_8x8_cavlc;
  916. #if HAVE_MMX
  917. #if HIGH_BIT_DEPTH
  918. if( cpu&X264_CPU_SSE2 )
  919. {
  920. pf_interlaced->interleave_8x8_cavlc =
  921. pf_progressive->interleave_8x8_cavlc = x264_zigzag_interleave_8x8_cavlc_sse2;
  922. }
  923. if( cpu&X264_CPU_AVX )
  924. {
  925. pf_interlaced->interleave_8x8_cavlc =
  926. pf_progressive->interleave_8x8_cavlc = x264_zigzag_interleave_8x8_cavlc_avx;
  927. }
  928. if( cpu&X264_CPU_AVX512 )
  929. {
  930. pf_interlaced->interleave_8x8_cavlc =
  931. pf_progressive->interleave_8x8_cavlc = x264_zigzag_interleave_8x8_cavlc_avx512;
  932. }
  933. #else
  934. if( cpu&X264_CPU_MMX )
  935. {
  936. pf_interlaced->interleave_8x8_cavlc =
  937. pf_progressive->interleave_8x8_cavlc = x264_zigzag_interleave_8x8_cavlc_mmx;
  938. }
  939. if( (cpu&X264_CPU_SSE2) && !(cpu&(X264_CPU_SLOW_SHUFFLE|X264_CPU_SSE2_IS_SLOW)) )
  940. {
  941. pf_interlaced->interleave_8x8_cavlc =
  942. pf_progressive->interleave_8x8_cavlc = x264_zigzag_interleave_8x8_cavlc_sse2;
  943. }
  944. if( cpu&X264_CPU_AVX )
  945. {
  946. pf_interlaced->interleave_8x8_cavlc =
  947. pf_progressive->interleave_8x8_cavlc = x264_zigzag_interleave_8x8_cavlc_avx;
  948. }
  949. if( cpu&X264_CPU_AVX2 )
  950. {
  951. pf_interlaced->interleave_8x8_cavlc =
  952. pf_progressive->interleave_8x8_cavlc = x264_zigzag_interleave_8x8_cavlc_avx2;
  953. }
  954. if( cpu&X264_CPU_AVX512 )
  955. {
  956. pf_interlaced->interleave_8x8_cavlc =
  957. pf_progressive->interleave_8x8_cavlc = x264_zigzag_interleave_8x8_cavlc_avx512;
  958. }
  959. #endif // HIGH_BIT_DEPTH
  960. #endif
  961. #if !HIGH_BIT_DEPTH
  962. #if ARCH_AARCH64
  963. if( cpu&X264_CPU_NEON )
  964. {
  965. pf_interlaced->interleave_8x8_cavlc =
  966. pf_progressive->interleave_8x8_cavlc = x264_zigzag_interleave_8x8_cavlc_neon;
  967. }
  968. #endif // ARCH_AARCH64
  969. #if HAVE_ALTIVEC
  970. if( cpu&X264_CPU_ALTIVEC )
  971. {
  972. pf_interlaced->interleave_8x8_cavlc =
  973. pf_progressive->interleave_8x8_cavlc = x264_zigzag_interleave_8x8_cavlc_altivec;
  974. }
  975. #endif // HAVE_ALTIVEC
  976. #if HAVE_MSA
  977. if( cpu&X264_CPU_MSA )
  978. {
  979. pf_progressive->scan_4x4 = x264_zigzag_scan_4x4_frame_msa;
  980. }
  981. #endif
  982. #endif // !HIGH_BIT_DEPTH
  983. }