dct-32.asm 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590
  1. ;*****************************************************************************
  2. ;* dct-32.asm: x86_32 transform and zigzag
  3. ;*****************************************************************************
  4. ;* Copyright (C) 2003-2018 x264 project
  5. ;*
  6. ;* Authors: Loren Merritt <lorenm@u.washington.edu>
  7. ;* Holger Lubitz <holger@lubitz.org>
  8. ;* Laurent Aimar <fenrir@via.ecp.fr>
  9. ;* Min Chen <chenm001.163.com>
  10. ;* Christian Heine <sennindemokrit@gmx.net>
  11. ;*
  12. ;* This program is free software; you can redistribute it and/or modify
  13. ;* it under the terms of the GNU General Public License as published by
  14. ;* the Free Software Foundation; either version 2 of the License, or
  15. ;* (at your option) any later version.
  16. ;*
  17. ;* This program is distributed in the hope that it will be useful,
  18. ;* but WITHOUT ANY WARRANTY; without even the implied warranty of
  19. ;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  20. ;* GNU General Public License for more details.
  21. ;*
  22. ;* You should have received a copy of the GNU General Public License
  23. ;* along with this program; if not, write to the Free Software
  24. ;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111, USA.
  25. ;*
  26. ;* This program is also available under a commercial proprietary license.
  27. ;* For more information, contact us at licensing@x264.com.
  28. ;*****************************************************************************
  29. %include "x86inc.asm"
  30. %include "x86util.asm"
  31. SECTION .text
  32. cextern pd_32
  33. cextern pw_pixel_max
  34. cextern pw_2
  35. cextern pw_m2
  36. cextern pw_32
  37. cextern hsub_mul
  38. %macro SPILL_SHUFFLE 3-* ; ptr, list of regs, list of memory offsets
  39. %xdefine %%base %1
  40. %rep %0/2
  41. %xdefine %%tmp m%2
  42. %rotate %0/2
  43. mova [%%base + %2*16], %%tmp
  44. %rotate 1-%0/2
  45. %endrep
  46. %endmacro
  47. %macro UNSPILL_SHUFFLE 3-*
  48. %xdefine %%base %1
  49. %rep %0/2
  50. %xdefine %%tmp m%2
  51. %rotate %0/2
  52. mova %%tmp, [%%base + %2*16]
  53. %rotate 1-%0/2
  54. %endrep
  55. %endmacro
  56. %macro SPILL 2+ ; assume offsets are the same as reg numbers
  57. SPILL_SHUFFLE %1, %2, %2
  58. %endmacro
  59. %macro UNSPILL 2+
  60. UNSPILL_SHUFFLE %1, %2, %2
  61. %endmacro
  62. ; in: size, m0..m7
  63. ; out: 0,4,6 in memory at %10,%11,%12, rest in regs
  64. %macro DCT8_1D 12
  65. SUMSUB_BA %1, %9, %2 ; %9 = s07, %2 = d07
  66. SUMSUB_BA %1, %8, %3 ; %8 = s16, %3 = d16
  67. SUMSUB_BA %1, %7, %4 ; %7 = s25, %4 = d25
  68. SUMSUB_BA %1, %6, %5 ; %6 = s34, %5 = d34
  69. SUMSUB_BA %1, %6, %9 ; %6 = a0, %9 = a2
  70. SUMSUB_BA %1, %7, %8 ; %7 = a1, %8 = a3
  71. SUMSUB_BA %1, %7, %6 ; %7 = dst0, %6 = dst4
  72. mova %10, m%7
  73. mova %11, m%6
  74. psra%1 m%7, m%8, 1 ; a3>>1
  75. padd%1 m%7, m%9 ; a2 + (a3>>1)
  76. psra%1 m%9, 1 ; a2>>1
  77. psub%1 m%9, m%8 ; (a2>>1) - a3
  78. mova %12, m%9
  79. psra%1 m%6, m%4, 1
  80. padd%1 m%6, m%4 ; d25+(d25>>1)
  81. psub%1 m%8, m%2, m%5 ; a5 = d07-d34-(d25+(d25>>1))
  82. psub%1 m%8, m%6
  83. psra%1 m%6, m%3, 1
  84. padd%1 m%6, m%3 ; d16+(d16>>1)
  85. padd%1 m%9, m%2, m%5
  86. psub%1 m%9, m%6 ; a6 = d07+d34-(d16+(d16>>1))
  87. psra%1 m%6, m%2, 1
  88. padd%1 m%6, m%2 ; d07+(d07>>1)
  89. padd%1 m%6, m%3
  90. padd%1 m%6, m%4 ; a4 = d16+d25+(d07+(d07>>1))
  91. psra%1 m%2, m%5, 1
  92. padd%1 m%2, m%5 ; d34+(d34>>1)
  93. padd%1 m%2, m%3
  94. psub%1 m%2, m%4 ; a7 = d16-d25+(d34+(d34>>1))
  95. psra%1 m%5, m%2, 2
  96. padd%1 m%5, m%6 ; a4 + (a7>>2)
  97. psra%1 m%4, m%9, 2
  98. padd%1 m%4, m%8 ; a5 + (a6>>2)
  99. psra%1 m%6, 2
  100. psra%1 m%8, 2
  101. psub%1 m%6, m%2 ; (a4>>2) - a7
  102. psub%1 m%9, m%8 ; a6 - (a5>>2)
  103. SWAP %3, %5, %4, %7, %9, %6
  104. %endmacro
  105. ; in: size, m[1,2,3,5,6,7], 0,4 in mem at %10,%11
  106. ; out: m0..m7
  107. %macro IDCT8_1D 11
  108. psra%1 m%2, m%4, 1
  109. psra%1 m%6, m%8, 1
  110. psub%1 m%2, m%8
  111. padd%1 m%6, m%4
  112. psra%1 m%8, m%3, 1
  113. padd%1 m%8, m%3
  114. padd%1 m%8, m%5
  115. padd%1 m%8, m%7
  116. psra%1 m%4, m%7, 1
  117. padd%1 m%4, m%7
  118. padd%1 m%4, m%9
  119. psub%1 m%4, m%3
  120. psub%1 m%3, m%5
  121. psub%1 m%7, m%5
  122. padd%1 m%3, m%9
  123. psub%1 m%7, m%9
  124. psra%1 m%5, 1
  125. psra%1 m%9, 1
  126. psub%1 m%3, m%5
  127. psub%1 m%7, m%9
  128. psra%1 m%5, m%8, 2
  129. psra%1 m%9, m%4, 2
  130. padd%1 m%5, m%7
  131. padd%1 m%9, m%3
  132. psra%1 m%7, 2
  133. psra%1 m%3, 2
  134. psub%1 m%8, m%7
  135. psub%1 m%3, m%4
  136. mova m%4, %10
  137. mova m%7, %11
  138. SUMSUB_BA %1, %7, %4
  139. SUMSUB_BA %1, %6, %7
  140. SUMSUB_BA %1, %2, %4
  141. SUMSUB_BA %1, %8, %6
  142. SUMSUB_BA %1, %3, %2
  143. SUMSUB_BA %1, %9, %4
  144. SUMSUB_BA %1, %5, %7
  145. SWAP %2, %4
  146. SWAP %6, %8
  147. SWAP %2, %6, %7
  148. SWAP %4, %9, %8
  149. %endmacro
  150. %if HIGH_BIT_DEPTH
  151. %macro SUB8x8_DCT8 0
  152. cglobal sub8x8_dct8, 3,3,8
  153. cglobal_label .skip_prologue
  154. LOAD_DIFF8x4 0,1,2,3, none,none, r1, r2
  155. LOAD_DIFF8x4 4,5,6,7, none,none, r1, r2
  156. DCT8_1D w, 0,1,2,3,4,5,6,7, [r0],[r0+0x10],[r0+0x50]
  157. mova m0, [r0]
  158. mova [r0+0x30], m5
  159. mova [r0+0x70], m7
  160. TRANSPOSE4x4W 0,1,2,3,4
  161. WIDEN_SXWD 0,4
  162. WIDEN_SXWD 1,5
  163. WIDEN_SXWD 2,6
  164. WIDEN_SXWD 3,7
  165. DCT8_1D d, 0,4,1,5,2,6,3,7, [r0],[r0+0x80],[r0+0xC0]
  166. mova [r0+0x20], m4
  167. mova [r0+0x40], m1
  168. mova [r0+0x60], m5
  169. mova [r0+0xA0], m6
  170. mova [r0+0xE0], m7
  171. mova m4, [r0+0x10]
  172. mova m5, [r0+0x30]
  173. mova m6, [r0+0x50]
  174. mova m7, [r0+0x70]
  175. TRANSPOSE4x4W 4,5,6,7,0
  176. WIDEN_SXWD 4,0
  177. WIDEN_SXWD 5,1
  178. WIDEN_SXWD 6,2
  179. WIDEN_SXWD 7,3
  180. DCT8_1D d,4,0,5,1,6,2,7,3, [r0+0x10],[r0+0x90],[r0+0xD0]
  181. mova [r0+0x30], m0
  182. mova [r0+0x50], m5
  183. mova [r0+0x70], m1
  184. mova [r0+0xB0], m2
  185. mova [r0+0xF0], m3
  186. ret
  187. %endmacro ; SUB8x8_DCT8
  188. INIT_XMM sse2
  189. SUB8x8_DCT8
  190. INIT_XMM sse4
  191. SUB8x8_DCT8
  192. INIT_XMM avx
  193. SUB8x8_DCT8
  194. %macro ADD8x8_IDCT8 0
  195. cglobal add8x8_idct8, 2,2
  196. add r1, 128
  197. cglobal_label .skip_prologue
  198. UNSPILL_SHUFFLE r1, 1,2,3,5,6,7, -6,-4,-2,2,4,6
  199. IDCT8_1D d,0,1,2,3,4,5,6,7,[r1-128],[r1+0]
  200. mova [r1+0], m4
  201. TRANSPOSE4x4D 0,1,2,3,4
  202. paddd m0, [pd_32]
  203. mova m4, [r1+0]
  204. SPILL_SHUFFLE r1, 0,1,2,3, -8,-6,-4,-2
  205. TRANSPOSE4x4D 4,5,6,7,3
  206. paddd m4, [pd_32]
  207. SPILL_SHUFFLE r1, 4,5,6,7, 0,2,4,6
  208. UNSPILL_SHUFFLE r1, 1,2,3,5,6,7, -5,-3,-1,3,5,7
  209. IDCT8_1D d,0,1,2,3,4,5,6,7,[r1-112],[r1+16]
  210. mova [r1+16], m4
  211. TRANSPOSE4x4D 0,1,2,3,4
  212. mova m4, [r1+16]
  213. mova [r1-112], m0
  214. TRANSPOSE4x4D 4,5,6,7,0
  215. SPILL_SHUFFLE r1, 4,5,6,7, 1,3,5,7
  216. UNSPILL_SHUFFLE r1, 5,6,7, -6,-4,-2
  217. IDCT8_1D d,4,5,6,7,0,1,2,3,[r1-128],[r1-112]
  218. SPILL_SHUFFLE r1, 4,5,6,7,0,1,2,3, -8,-7,-6,-5,-4,-3,-2,-1
  219. UNSPILL_SHUFFLE r1, 1,2,3,5,6,7, 2,4,6,3,5,7
  220. IDCT8_1D d,0,1,2,3,4,5,6,7,[r1+0],[r1+16]
  221. SPILL_SHUFFLE r1, 7,6,5, 7,6,5
  222. mova m7, [pw_pixel_max]
  223. pxor m6, m6
  224. mova m5, [r1-128]
  225. STORE_DIFF m5, m0, m6, m7, [r0+0*FDEC_STRIDEB]
  226. mova m0, [r1-112]
  227. STORE_DIFF m0, m1, m6, m7, [r0+1*FDEC_STRIDEB]
  228. mova m0, [r1-96]
  229. STORE_DIFF m0, m2, m6, m7, [r0+2*FDEC_STRIDEB]
  230. mova m0, [r1-80]
  231. STORE_DIFF m0, m3, m6, m7, [r0+3*FDEC_STRIDEB]
  232. mova m0, [r1-64]
  233. STORE_DIFF m0, m4, m6, m7, [r0+4*FDEC_STRIDEB]
  234. mova m0, [r1-48]
  235. mova m1, [r1+80]
  236. STORE_DIFF m0, m1, m6, m7, [r0+5*FDEC_STRIDEB]
  237. mova m0, [r1-32]
  238. mova m1, [r1+96]
  239. STORE_DIFF m0, m1, m6, m7, [r0+6*FDEC_STRIDEB]
  240. mova m0, [r1-16]
  241. mova m1, [r1+112]
  242. STORE_DIFF m0, m1, m6, m7, [r0+7*FDEC_STRIDEB]
  243. RET
  244. %endmacro ; ADD8x8_IDCT8
  245. INIT_XMM sse2
  246. ADD8x8_IDCT8
  247. INIT_XMM avx
  248. ADD8x8_IDCT8
  249. %else ; !HIGH_BIT_DEPTH
  250. INIT_MMX
  251. ALIGN 16
  252. load_diff_4x8_mmx:
  253. LOAD_DIFF m0, m7, none, [r1+0*FENC_STRIDE], [r2+0*FDEC_STRIDE]
  254. LOAD_DIFF m1, m7, none, [r1+1*FENC_STRIDE], [r2+1*FDEC_STRIDE]
  255. LOAD_DIFF m2, m7, none, [r1+2*FENC_STRIDE], [r2+2*FDEC_STRIDE]
  256. LOAD_DIFF m3, m7, none, [r1+3*FENC_STRIDE], [r2+3*FDEC_STRIDE]
  257. LOAD_DIFF m4, m7, none, [r1+4*FENC_STRIDE], [r2+4*FDEC_STRIDE]
  258. LOAD_DIFF m5, m7, none, [r1+5*FENC_STRIDE], [r2+5*FDEC_STRIDE]
  259. movq [r0], m0
  260. LOAD_DIFF m6, m7, none, [r1+6*FENC_STRIDE], [r2+6*FDEC_STRIDE]
  261. LOAD_DIFF m7, m0, none, [r1+7*FENC_STRIDE], [r2+7*FDEC_STRIDE]
  262. movq m0, [r0]
  263. ret
  264. cglobal dct8_mmx
  265. DCT8_1D w,0,1,2,3,4,5,6,7,[r0],[r0+0x40],[r0+0x60]
  266. SAVE_MM_PERMUTATION
  267. ret
  268. ;-----------------------------------------------------------------------------
  269. ; void sub8x8_dct8( int16_t dct[8][8], uint8_t *pix1, uint8_t *pix2 )
  270. ;-----------------------------------------------------------------------------
  271. cglobal sub8x8_dct8_mmx, 3,3
  272. global sub8x8_dct8_mmx.skip_prologue
  273. .skip_prologue:
  274. RESET_MM_PERMUTATION
  275. call load_diff_4x8_mmx
  276. call dct8_mmx
  277. UNSPILL r0, 0
  278. TRANSPOSE4x4W 0,1,2,3,4
  279. SPILL r0, 0,1,2,3
  280. UNSPILL r0, 4,6
  281. TRANSPOSE4x4W 4,5,6,7,0
  282. SPILL r0, 4,5,6,7
  283. RESET_MM_PERMUTATION
  284. add r1, 4
  285. add r2, 4
  286. add r0, 8
  287. call load_diff_4x8_mmx
  288. sub r1, 4
  289. sub r2, 4
  290. call dct8_mmx
  291. sub r0, 8
  292. UNSPILL r0+8, 4,6
  293. TRANSPOSE4x4W 4,5,6,7,0
  294. SPILL r0+8, 4,5,6,7
  295. UNSPILL r0+8, 0
  296. TRANSPOSE4x4W 0,1,2,3,5
  297. UNSPILL r0, 4,5,6,7
  298. SPILL_SHUFFLE r0, 0,1,2,3, 4,5,6,7
  299. movq mm4, m6 ; depends on the permutation to not produce conflicts
  300. movq mm0, m4
  301. movq mm1, m5
  302. movq mm2, mm4
  303. movq mm3, m7
  304. RESET_MM_PERMUTATION
  305. UNSPILL r0+8, 4,5,6,7
  306. add r0, 8
  307. call dct8_mmx
  308. sub r0, 8
  309. SPILL r0+8, 1,2,3,5,7
  310. RESET_MM_PERMUTATION
  311. UNSPILL r0, 0,1,2,3,4,5,6,7
  312. call dct8_mmx
  313. SPILL r0, 1,2,3,5,7
  314. ret
  315. cglobal idct8_mmx
  316. IDCT8_1D w,0,1,2,3,4,5,6,7,[r1+0],[r1+64]
  317. SAVE_MM_PERMUTATION
  318. ret
  319. %macro ADD_STORE_ROW 3
  320. movq m1, [r0+%1*FDEC_STRIDE]
  321. punpckhbw m2, m1, m0
  322. punpcklbw m1, m0
  323. paddw m1, %2
  324. paddw m2, %3
  325. packuswb m1, m2
  326. movq [r0+%1*FDEC_STRIDE], m1
  327. %endmacro
  328. ;-----------------------------------------------------------------------------
  329. ; void add8x8_idct8( uint8_t *dst, int16_t dct[8][8] )
  330. ;-----------------------------------------------------------------------------
  331. cglobal add8x8_idct8_mmx, 2,2
  332. global add8x8_idct8_mmx.skip_prologue
  333. .skip_prologue:
  334. INIT_MMX
  335. add word [r1], 32
  336. UNSPILL r1, 1,2,3,5,6,7
  337. call idct8_mmx
  338. SPILL r1, 7
  339. TRANSPOSE4x4W 0,1,2,3,7
  340. SPILL r1, 0,1,2,3
  341. UNSPILL r1, 7
  342. TRANSPOSE4x4W 4,5,6,7,0
  343. SPILL r1, 4,5,6,7
  344. INIT_MMX
  345. UNSPILL r1+8, 1,2,3,5,6,7
  346. add r1, 8
  347. call idct8_mmx
  348. sub r1, 8
  349. SPILL r1+8, 7
  350. TRANSPOSE4x4W 0,1,2,3,7
  351. SPILL r1+8, 0,1,2,3
  352. UNSPILL r1+8, 7
  353. TRANSPOSE4x4W 4,5,6,7,0
  354. SPILL r1+8, 4,5,6,7
  355. INIT_MMX
  356. movq m3, [r1+0x08]
  357. movq m0, [r1+0x40]
  358. movq [r1+0x40], m3
  359. movq [r1+0x08], m0
  360. ; memory layout at this time:
  361. ; A0------ A1------
  362. ; B0------ F0------
  363. ; C0------ G0------
  364. ; D0------ H0------
  365. ; E0------ E1------
  366. ; B1------ F1------
  367. ; C1------ G1------
  368. ; D1------ H1------
  369. UNSPILL_SHUFFLE r1, 1,2,3, 5,6,7
  370. UNSPILL r1+8, 5,6,7
  371. add r1, 8
  372. call idct8_mmx
  373. sub r1, 8
  374. psraw m0, 6
  375. psraw m1, 6
  376. psraw m2, 6
  377. psraw m3, 6
  378. psraw m4, 6
  379. psraw m5, 6
  380. psraw m6, 6
  381. psraw m7, 6
  382. movq [r1+0x08], m0 ; mm4
  383. movq [r1+0x48], m4 ; mm5
  384. movq [r1+0x58], m5 ; mm0
  385. movq [r1+0x68], m6 ; mm2
  386. movq [r1+0x78], m7 ; mm6
  387. movq mm5, [r1+0x18]
  388. movq mm6, [r1+0x28]
  389. movq [r1+0x18], m1 ; mm1
  390. movq [r1+0x28], m2 ; mm7
  391. movq mm7, [r1+0x38]
  392. movq [r1+0x38], m3 ; mm3
  393. movq mm1, [r1+0x10]
  394. movq mm2, [r1+0x20]
  395. movq mm3, [r1+0x30]
  396. call idct8_mmx
  397. psraw m0, 6
  398. psraw m1, 6
  399. psraw m2, 6
  400. psraw m3, 6
  401. psraw m4, 6
  402. psraw m5, 6
  403. psraw m6, 6
  404. psraw m7, 6
  405. SPILL r1, 0,1,2
  406. pxor m0, m0
  407. ADD_STORE_ROW 0, [r1+0x00], [r1+0x08]
  408. ADD_STORE_ROW 1, [r1+0x10], [r1+0x18]
  409. ADD_STORE_ROW 2, [r1+0x20], [r1+0x28]
  410. ADD_STORE_ROW 3, m3, [r1+0x38]
  411. ADD_STORE_ROW 4, m4, [r1+0x48]
  412. ADD_STORE_ROW 5, m5, [r1+0x58]
  413. ADD_STORE_ROW 6, m6, [r1+0x68]
  414. ADD_STORE_ROW 7, m7, [r1+0x78]
  415. ret
  416. %macro DCT_SUB8 0
  417. cglobal sub8x8_dct, 3,3
  418. add r2, 4*FDEC_STRIDE
  419. cglobal_label .skip_prologue
  420. %if cpuflag(ssse3)
  421. mova m7, [hsub_mul]
  422. %endif
  423. LOAD_DIFF8x4 0, 1, 2, 3, 6, 7, r1, r2-4*FDEC_STRIDE
  424. SPILL r0, 1,2
  425. SWAP 2, 7
  426. LOAD_DIFF8x4 4, 5, 6, 7, 1, 2, r1, r2-4*FDEC_STRIDE
  427. UNSPILL r0, 1
  428. SPILL r0, 7
  429. SWAP 2, 7
  430. UNSPILL r0, 2
  431. DCT4_1D 0, 1, 2, 3, 7
  432. TRANSPOSE2x4x4W 0, 1, 2, 3, 7
  433. UNSPILL r0, 7
  434. SPILL r0, 2
  435. DCT4_1D 4, 5, 6, 7, 2
  436. TRANSPOSE2x4x4W 4, 5, 6, 7, 2
  437. UNSPILL r0, 2
  438. SPILL r0, 6
  439. DCT4_1D 0, 1, 2, 3, 6
  440. UNSPILL r0, 6
  441. STORE_DCT 0, 1, 2, 3, r0, 0
  442. DCT4_1D 4, 5, 6, 7, 3
  443. STORE_DCT 4, 5, 6, 7, r0, 64
  444. ret
  445. ;-----------------------------------------------------------------------------
  446. ; void sub8x8_dct8( int16_t dct[8][8], uint8_t *pix1, uint8_t *pix2 )
  447. ;-----------------------------------------------------------------------------
  448. cglobal sub8x8_dct8, 3,3
  449. add r2, 4*FDEC_STRIDE
  450. cglobal_label .skip_prologue
  451. %if cpuflag(ssse3)
  452. mova m7, [hsub_mul]
  453. LOAD_DIFF8x4 0, 1, 2, 3, 4, 7, r1, r2-4*FDEC_STRIDE
  454. SPILL r0, 0,1
  455. SWAP 1, 7
  456. LOAD_DIFF8x4 4, 5, 6, 7, 0, 1, r1, r2-4*FDEC_STRIDE
  457. UNSPILL r0, 0,1
  458. %else
  459. LOAD_DIFF m0, m7, none, [r1+0*FENC_STRIDE], [r2-4*FDEC_STRIDE]
  460. LOAD_DIFF m1, m7, none, [r1+1*FENC_STRIDE], [r2-3*FDEC_STRIDE]
  461. LOAD_DIFF m2, m7, none, [r1+2*FENC_STRIDE], [r2-2*FDEC_STRIDE]
  462. LOAD_DIFF m3, m7, none, [r1+3*FENC_STRIDE], [r2-1*FDEC_STRIDE]
  463. LOAD_DIFF m4, m7, none, [r1+4*FENC_STRIDE], [r2+0*FDEC_STRIDE]
  464. LOAD_DIFF m5, m7, none, [r1+5*FENC_STRIDE], [r2+1*FDEC_STRIDE]
  465. SPILL r0, 0
  466. LOAD_DIFF m6, m7, none, [r1+6*FENC_STRIDE], [r2+2*FDEC_STRIDE]
  467. LOAD_DIFF m7, m0, none, [r1+7*FENC_STRIDE], [r2+3*FDEC_STRIDE]
  468. UNSPILL r0, 0
  469. %endif
  470. DCT8_1D w,0,1,2,3,4,5,6,7,[r0],[r0+0x40],[r0+0x60]
  471. UNSPILL r0, 0,4
  472. TRANSPOSE8x8W 0,1,2,3,4,5,6,7,[r0+0x60],[r0+0x40],1
  473. UNSPILL r0, 4
  474. DCT8_1D w,0,1,2,3,4,5,6,7,[r0],[r0+0x40],[r0+0x60]
  475. SPILL r0, 1,2,3,5,7
  476. ret
  477. %endmacro
  478. INIT_XMM sse2
  479. %define movdqa movaps
  480. %define punpcklqdq movlhps
  481. DCT_SUB8
  482. %undef movdqa
  483. %undef punpcklqdq
  484. INIT_XMM ssse3
  485. DCT_SUB8
  486. INIT_XMM avx
  487. DCT_SUB8
  488. INIT_XMM xop
  489. DCT_SUB8
  490. ;-----------------------------------------------------------------------------
  491. ; void add8x8_idct( uint8_t *pix, int16_t dct[4][4][4] )
  492. ;-----------------------------------------------------------------------------
  493. %macro ADD8x8 0
  494. cglobal add8x8_idct, 2,2
  495. add r0, 4*FDEC_STRIDE
  496. cglobal_label .skip_prologue
  497. UNSPILL_SHUFFLE r1, 0,2,1,3, 0,1,2,3
  498. SBUTTERFLY qdq, 0, 1, 4
  499. SBUTTERFLY qdq, 2, 3, 4
  500. UNSPILL_SHUFFLE r1, 4,6,5,7, 4,5,6,7
  501. SPILL r1, 0
  502. SBUTTERFLY qdq, 4, 5, 0
  503. SBUTTERFLY qdq, 6, 7, 0
  504. UNSPILL r1,0
  505. IDCT4_1D w,0,1,2,3,r1
  506. SPILL r1, 4
  507. TRANSPOSE2x4x4W 0,1,2,3,4
  508. UNSPILL r1, 4
  509. IDCT4_1D w,4,5,6,7,r1
  510. SPILL r1, 0
  511. TRANSPOSE2x4x4W 4,5,6,7,0
  512. UNSPILL r1, 0
  513. paddw m0, [pw_32]
  514. IDCT4_1D w,0,1,2,3,r1
  515. paddw m4, [pw_32]
  516. IDCT4_1D w,4,5,6,7,r1
  517. SPILL r1, 6,7
  518. pxor m7, m7
  519. DIFFx2 m0, m1, m6, m7, [r0-4*FDEC_STRIDE], [r0-3*FDEC_STRIDE]; m5
  520. DIFFx2 m2, m3, m6, m7, [r0-2*FDEC_STRIDE], [r0-1*FDEC_STRIDE]; m5
  521. UNSPILL_SHUFFLE r1, 0,2, 6,7
  522. DIFFx2 m4, m5, m6, m7, [r0+0*FDEC_STRIDE], [r0+1*FDEC_STRIDE]; m5
  523. DIFFx2 m0, m2, m6, m7, [r0+2*FDEC_STRIDE], [r0+3*FDEC_STRIDE]; m5
  524. STORE_IDCT m1, m3, m5, m2
  525. ret
  526. %endmacro ; ADD8x8
  527. INIT_XMM sse2
  528. ADD8x8
  529. INIT_XMM avx
  530. ADD8x8
  531. ;-----------------------------------------------------------------------------
  532. ; void add8x8_idct8( uint8_t *p_dst, int16_t dct[8][8] )
  533. ;-----------------------------------------------------------------------------
  534. %macro ADD8x8_IDCT8 0
  535. cglobal add8x8_idct8, 2,2
  536. add r0, 4*FDEC_STRIDE
  537. cglobal_label .skip_prologue
  538. UNSPILL r1, 1,2,3,5,6,7
  539. IDCT8_1D w,0,1,2,3,4,5,6,7,[r1+0],[r1+64]
  540. SPILL r1, 6
  541. TRANSPOSE8x8W 0,1,2,3,4,5,6,7,[r1+0x60],[r1+0x40],1
  542. paddw m0, [pw_32]
  543. SPILL r1, 0
  544. IDCT8_1D w,0,1,2,3,4,5,6,7,[r1+0],[r1+64]
  545. SPILL r1, 6,7
  546. pxor m7, m7
  547. DIFFx2 m0, m1, m6, m7, [r0-4*FDEC_STRIDE], [r0-3*FDEC_STRIDE]; m5
  548. DIFFx2 m2, m3, m6, m7, [r0-2*FDEC_STRIDE], [r0-1*FDEC_STRIDE]; m5
  549. UNSPILL_SHUFFLE r1, 0,2, 6,7
  550. DIFFx2 m4, m5, m6, m7, [r0+0*FDEC_STRIDE], [r0+1*FDEC_STRIDE]; m5
  551. DIFFx2 m0, m2, m6, m7, [r0+2*FDEC_STRIDE], [r0+3*FDEC_STRIDE]; m5
  552. STORE_IDCT m1, m3, m5, m2
  553. ret
  554. %endmacro ; ADD8x8_IDCT8
  555. INIT_XMM sse2
  556. ADD8x8_IDCT8
  557. INIT_XMM avx
  558. ADD8x8_IDCT8
  559. %endif ; !HIGH_BIT_DEPTH