dct-64.asm 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424
  1. ;*****************************************************************************
  2. ;* dct-64.asm: x86_64 transform and zigzag
  3. ;*****************************************************************************
  4. ;* Copyright (C) 2003-2018 x264 project
  5. ;*
  6. ;* Authors: Loren Merritt <lorenm@u.washington.edu>
  7. ;* Holger Lubitz <holger@lubitz.org>
  8. ;* Laurent Aimar <fenrir@via.ecp.fr>
  9. ;* Min Chen <chenm001.163.com>
  10. ;*
  11. ;* This program is free software; you can redistribute it and/or modify
  12. ;* it under the terms of the GNU General Public License as published by
  13. ;* the Free Software Foundation; either version 2 of the License, or
  14. ;* (at your option) any later version.
  15. ;*
  16. ;* This program is distributed in the hope that it will be useful,
  17. ;* but WITHOUT ANY WARRANTY; without even the implied warranty of
  18. ;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  19. ;* GNU General Public License for more details.
  20. ;*
  21. ;* You should have received a copy of the GNU General Public License
  22. ;* along with this program; if not, write to the Free Software
  23. ;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111, USA.
  24. ;*
  25. ;* This program is also available under a commercial proprietary license.
  26. ;* For more information, contact us at licensing@x264.com.
  27. ;*****************************************************************************
  28. %include "x86inc.asm"
  29. %include "x86util.asm"
  30. SECTION .text
  31. cextern pd_32
  32. cextern pw_pixel_max
  33. cextern pw_2
  34. cextern pw_m2
  35. cextern pw_32
  36. cextern hsub_mul
  37. ; in: size, m0..m7, temp, temp
  38. ; out: m0..m7
  39. %macro DCT8_1D 11
  40. SUMSUB_BA %1, %6, %5, %11 ; %6=s34, %5=d34
  41. SUMSUB_BA %1, %7, %4, %11 ; %7=s25, %4=d25
  42. SUMSUB_BA %1, %8, %3, %11 ; %8=s16, %3=d16
  43. SUMSUB_BA %1, %9, %2, %11 ; %9=s07, %2=d07
  44. SUMSUB_BA %1, %7, %8, %11 ; %7=a1, %8=a3
  45. SUMSUB_BA %1, %6, %9, %11 ; %6=a0, %9=a2
  46. psra%1 m%10, m%2, 1
  47. padd%1 m%10, m%2
  48. padd%1 m%10, m%3
  49. padd%1 m%10, m%4 ; %10=a4
  50. psra%1 m%11, m%5, 1
  51. padd%1 m%11, m%5
  52. padd%1 m%11, m%3
  53. psub%1 m%11, m%4 ; %11=a7
  54. SUMSUB_BA %1, %5, %2
  55. psub%1 m%2, m%4
  56. psub%1 m%5, m%3
  57. psra%1 m%4, 1
  58. psra%1 m%3, 1
  59. psub%1 m%2, m%4 ; %2=a5
  60. psub%1 m%5, m%3 ; %5=a6
  61. psra%1 m%3, m%11, 2
  62. padd%1 m%3, m%10 ; %3=b1
  63. psra%1 m%10, 2
  64. psub%1 m%10, m%11 ; %10=b7
  65. SUMSUB_BA %1, %7, %6, %11 ; %7=b0, %6=b4
  66. psra%1 m%4, m%8, 1
  67. padd%1 m%4, m%9 ; %4=b2
  68. psra%1 m%9, 1
  69. psub%1 m%9, m%8 ; %9=b6
  70. psra%1 m%8, m%5, 2
  71. padd%1 m%8, m%2 ; %8=b3
  72. psra%1 m%2, 2
  73. psub%1 m%5, m%2 ; %5=b5
  74. SWAP %2, %7, %5, %8, %9, %10
  75. %endmacro
  76. %macro IDCT8_1D 11
  77. SUMSUB_BA %1, %6, %2, %10 ; %5=a0, %1=a2
  78. psra%1 m%10, m%3, 1
  79. padd%1 m%10, m%3
  80. padd%1 m%10, m%5
  81. padd%1 m%10, m%7 ; %9=a7
  82. psra%1 m%11, m%4, 1
  83. psub%1 m%11, m%8 ; %10=a4
  84. psra%1 m%8, 1
  85. padd%1 m%8, m%4 ; %7=a6
  86. psra%1 m%4, m%7, 1
  87. padd%1 m%4, m%7
  88. padd%1 m%4, m%9
  89. psub%1 m%4, m%3 ; %3=a5
  90. psub%1 m%3, m%5
  91. psub%1 m%7, m%5
  92. padd%1 m%3, m%9
  93. psub%1 m%7, m%9
  94. psra%1 m%5, 1
  95. psra%1 m%9, 1
  96. psub%1 m%3, m%5 ; %2=a3
  97. psub%1 m%7, m%9 ; %6=a1
  98. psra%1 m%5, m%10, 2
  99. padd%1 m%5, m%7 ; %4=b1
  100. psra%1 m%7, 2
  101. psub%1 m%10, m%7 ; %9=b7
  102. SUMSUB_BA %1, %8, %6, %7 ; %7=b0, %5=b6
  103. SUMSUB_BA %1, %11, %2, %7 ; %10=b2, %1=b4
  104. psra%1 m%9, m%4, 2
  105. padd%1 m%9, m%3 ; %8=b3
  106. psra%1 m%3, 2
  107. psub%1 m%3, m%4 ; %2=b5
  108. SUMSUB_BA %1, %10, %8, %7 ; %9=c0, %7=c7
  109. SUMSUB_BA %1, %3, %11, %7 ; %2=c1, %10=c6
  110. SUMSUB_BA %1, %9, %2, %7 ; %8=c2, %1=c5
  111. SUMSUB_BA %1, %5, %6, %7 ; %4=c3, %5=c4
  112. SWAP %11, %4
  113. SWAP %2, %10, %7
  114. SWAP %4, %9, %8
  115. %endmacro
  116. %if HIGH_BIT_DEPTH
  117. %macro SUB8x8_DCT8 0
  118. cglobal sub8x8_dct8, 3,3,14
  119. TAIL_CALL .skip_prologue, 0
  120. cglobal_label .skip_prologue
  121. LOAD_DIFF8x4 0,1,2,3, none,none, r1, r2
  122. LOAD_DIFF8x4 4,5,6,7, none,none, r1, r2
  123. DCT8_1D w, 0,1,2,3,4,5,6,7, 8,9
  124. TRANSPOSE4x4W 0,1,2,3,8
  125. WIDEN_SXWD 0,8
  126. WIDEN_SXWD 1,9
  127. WIDEN_SXWD 2,10
  128. WIDEN_SXWD 3,11
  129. DCT8_1D d, 0,8,1,9,2,10,3,11, 12,13
  130. mova [r0+0x00], m0
  131. mova [r0+0x20], m8
  132. mova [r0+0x40], m1
  133. mova [r0+0x60], m9
  134. mova [r0+0x80], m2
  135. mova [r0+0xA0], m10
  136. mova [r0+0xC0], m3
  137. mova [r0+0xE0], m11
  138. TRANSPOSE4x4W 4,5,6,7,0
  139. WIDEN_SXWD 4,0
  140. WIDEN_SXWD 5,1
  141. WIDEN_SXWD 6,2
  142. WIDEN_SXWD 7,3
  143. DCT8_1D d,4,0,5,1,6,2,7,3, 8,9
  144. mova [r0+0x10], m4
  145. mova [r0+0x30], m0
  146. mova [r0+0x50], m5
  147. mova [r0+0x70], m1
  148. mova [r0+0x90], m6
  149. mova [r0+0xB0], m2
  150. mova [r0+0xD0], m7
  151. mova [r0+0xF0], m3
  152. ret
  153. %endmacro ; SUB8x8_DCT8
  154. INIT_XMM sse2
  155. SUB8x8_DCT8
  156. INIT_XMM sse4
  157. SUB8x8_DCT8
  158. INIT_XMM avx
  159. SUB8x8_DCT8
  160. %macro ADD8x8_IDCT8 0
  161. cglobal add8x8_idct8, 2,2,16
  162. add r1, 128
  163. TAIL_CALL .skip_prologue, 0
  164. cglobal_label .skip_prologue
  165. mova m0, [r1-128]
  166. mova m1, [r1-96]
  167. mova m2, [r1-64]
  168. mova m3, [r1-32]
  169. mova m4, [r1+ 0]
  170. mova m5, [r1+32]
  171. mova m6, [r1+64]
  172. mova m7, [r1+96]
  173. IDCT8_1D d,0,1,2,3,4,5,6,7,8,9
  174. TRANSPOSE4x4D 0,1,2,3,8
  175. TRANSPOSE4x4D 4,5,6,7,8
  176. paddd m0, [pd_32]
  177. paddd m4, [pd_32]
  178. mova [r1+64], m6
  179. mova [r1+96], m7
  180. mova m8, [r1-112]
  181. mova m9, [r1-80]
  182. mova m10, [r1-48]
  183. mova m11, [r1-16]
  184. mova m12, [r1+16]
  185. mova m13, [r1+48]
  186. mova m14, [r1+80]
  187. mova m15, [r1+112]
  188. IDCT8_1D d,8,9,10,11,12,13,14,15,6,7
  189. TRANSPOSE4x4D 8,9,10,11,6
  190. TRANSPOSE4x4D 12,13,14,15,6
  191. IDCT8_1D d,0,1,2,3,8,9,10,11,6,7
  192. mova [r1-112], m8
  193. mova [r1-80], m9
  194. mova m6, [r1+64]
  195. mova m7, [r1+96]
  196. IDCT8_1D d,4,5,6,7,12,13,14,15,8,9
  197. pxor m8, m8
  198. mova m9, [pw_pixel_max]
  199. STORE_DIFF m0, m4, m8, m9, [r0+0*FDEC_STRIDEB]
  200. STORE_DIFF m1, m5, m8, m9, [r0+1*FDEC_STRIDEB]
  201. STORE_DIFF m2, m6, m8, m9, [r0+2*FDEC_STRIDEB]
  202. STORE_DIFF m3, m7, m8, m9, [r0+3*FDEC_STRIDEB]
  203. mova m0, [r1-112]
  204. mova m1, [r1-80]
  205. STORE_DIFF m0, m12, m8, m9, [r0+4*FDEC_STRIDEB]
  206. STORE_DIFF m1, m13, m8, m9, [r0+5*FDEC_STRIDEB]
  207. STORE_DIFF m10, m14, m8, m9, [r0+6*FDEC_STRIDEB]
  208. STORE_DIFF m11, m15, m8, m9, [r0+7*FDEC_STRIDEB]
  209. ret
  210. %endmacro ; ADD8x8_IDCT8
  211. INIT_XMM sse2
  212. ADD8x8_IDCT8
  213. INIT_XMM avx
  214. ADD8x8_IDCT8
  215. %else ; !HIGH_BIT_DEPTH
  216. %macro DCT_SUB8 0
  217. cglobal sub8x8_dct, 3,3,10
  218. add r2, 4*FDEC_STRIDE
  219. %if cpuflag(ssse3)
  220. mova m7, [hsub_mul]
  221. %endif
  222. TAIL_CALL .skip_prologue, 0
  223. cglobal_label .skip_prologue
  224. SWAP 7, 9
  225. LOAD_DIFF8x4 0, 1, 2, 3, 8, 9, r1, r2-4*FDEC_STRIDE
  226. LOAD_DIFF8x4 4, 5, 6, 7, 8, 9, r1, r2-4*FDEC_STRIDE
  227. DCT4_1D 0, 1, 2, 3, 8
  228. TRANSPOSE2x4x4W 0, 1, 2, 3, 8
  229. DCT4_1D 4, 5, 6, 7, 8
  230. TRANSPOSE2x4x4W 4, 5, 6, 7, 8
  231. DCT4_1D 0, 1, 2, 3, 8
  232. STORE_DCT 0, 1, 2, 3, r0, 0
  233. DCT4_1D 4, 5, 6, 7, 8
  234. STORE_DCT 4, 5, 6, 7, r0, 64
  235. ret
  236. ;-----------------------------------------------------------------------------
  237. ; void sub8x8_dct8( int16_t dct[8][8], uint8_t *pix1, uint8_t *pix2 )
  238. ;-----------------------------------------------------------------------------
  239. cglobal sub8x8_dct8, 3,3,11
  240. add r2, 4*FDEC_STRIDE
  241. %if cpuflag(ssse3)
  242. mova m7, [hsub_mul]
  243. %endif
  244. TAIL_CALL .skip_prologue, 0
  245. cglobal_label .skip_prologue
  246. SWAP 7, 10
  247. LOAD_DIFF8x4 0, 1, 2, 3, 4, 10, r1, r2-4*FDEC_STRIDE
  248. LOAD_DIFF8x4 4, 5, 6, 7, 8, 10, r1, r2-4*FDEC_STRIDE
  249. DCT8_1D w, 0,1,2,3,4,5,6,7,8,9
  250. TRANSPOSE8x8W 0,1,2,3,4,5,6,7,8
  251. DCT8_1D w, 0,1,2,3,4,5,6,7,8,9
  252. movdqa [r0+0x00], m0
  253. movdqa [r0+0x10], m1
  254. movdqa [r0+0x20], m2
  255. movdqa [r0+0x30], m3
  256. movdqa [r0+0x40], m4
  257. movdqa [r0+0x50], m5
  258. movdqa [r0+0x60], m6
  259. movdqa [r0+0x70], m7
  260. ret
  261. %endmacro
  262. INIT_XMM sse2
  263. %define movdqa movaps
  264. %define punpcklqdq movlhps
  265. DCT_SUB8
  266. %undef movdqa
  267. %undef punpcklqdq
  268. INIT_XMM ssse3
  269. DCT_SUB8
  270. INIT_XMM avx
  271. DCT_SUB8
  272. INIT_XMM xop
  273. DCT_SUB8
  274. INIT_YMM avx2
  275. cglobal sub16x16_dct8, 3,3,10
  276. add r0, 128
  277. add r2, 4*FDEC_STRIDE
  278. call .sub16x8_dct8
  279. add r0, 256
  280. add r1, FENC_STRIDE*8
  281. add r2, FDEC_STRIDE*8
  282. call .sub16x8_dct8
  283. RET
  284. .sub16x8_dct8:
  285. LOAD_DIFF16x2_AVX2 0, 1, 2, 3, 0, 1
  286. LOAD_DIFF16x2_AVX2 2, 3, 4, 5, 2, 3
  287. LOAD_DIFF16x2_AVX2 4, 5, 6, 7, 4, 5
  288. LOAD_DIFF16x2_AVX2 6, 7, 8, 9, 6, 7
  289. DCT8_1D w, 0,1,2,3,4,5,6,7,8,9
  290. TRANSPOSE8x8W 0,1,2,3,4,5,6,7,8
  291. DCT8_1D w, 0,1,2,3,4,5,6,7,8,9
  292. mova [r0-0x80+0x00], xm0
  293. vextracti128 [r0+0x00], m0, 1
  294. mova [r0-0x80+0x10], xm1
  295. vextracti128 [r0+0x10], m1, 1
  296. mova [r0-0x80+0x20], xm2
  297. vextracti128 [r0+0x20], m2, 1
  298. mova [r0-0x80+0x30], xm3
  299. vextracti128 [r0+0x30], m3, 1
  300. mova [r0-0x80+0x40], xm4
  301. vextracti128 [r0+0x40], m4, 1
  302. mova [r0-0x80+0x50], xm5
  303. vextracti128 [r0+0x50], m5, 1
  304. mova [r0-0x80+0x60], xm6
  305. vextracti128 [r0+0x60], m6, 1
  306. mova [r0-0x80+0x70], xm7
  307. vextracti128 [r0+0x70], m7, 1
  308. ret
  309. ;-----------------------------------------------------------------------------
  310. ; void add8x8_idct8( uint8_t *p_dst, int16_t dct[8][8] )
  311. ;-----------------------------------------------------------------------------
  312. %macro ADD8x8_IDCT8 0
  313. cglobal add8x8_idct8, 2,2,11
  314. add r0, 4*FDEC_STRIDE
  315. pxor m7, m7
  316. TAIL_CALL .skip_prologue, 0
  317. cglobal_label .skip_prologue
  318. SWAP 7, 9
  319. movdqa m0, [r1+0x00]
  320. movdqa m1, [r1+0x10]
  321. movdqa m2, [r1+0x20]
  322. movdqa m3, [r1+0x30]
  323. movdqa m4, [r1+0x40]
  324. movdqa m5, [r1+0x50]
  325. movdqa m6, [r1+0x60]
  326. movdqa m7, [r1+0x70]
  327. IDCT8_1D w,0,1,2,3,4,5,6,7,8,10
  328. TRANSPOSE8x8W 0,1,2,3,4,5,6,7,8
  329. paddw m0, [pw_32] ; rounding for the >>6 at the end
  330. IDCT8_1D w,0,1,2,3,4,5,6,7,8,10
  331. DIFFx2 m0, m1, m8, m9, [r0-4*FDEC_STRIDE], [r0-3*FDEC_STRIDE]
  332. DIFFx2 m2, m3, m8, m9, [r0-2*FDEC_STRIDE], [r0-1*FDEC_STRIDE]
  333. DIFFx2 m4, m5, m8, m9, [r0+0*FDEC_STRIDE], [r0+1*FDEC_STRIDE]
  334. DIFFx2 m6, m7, m8, m9, [r0+2*FDEC_STRIDE], [r0+3*FDEC_STRIDE]
  335. STORE_IDCT m1, m3, m5, m7
  336. ret
  337. %endmacro ; ADD8x8_IDCT8
  338. INIT_XMM sse2
  339. ADD8x8_IDCT8
  340. INIT_XMM avx
  341. ADD8x8_IDCT8
  342. ;-----------------------------------------------------------------------------
  343. ; void add8x8_idct( uint8_t *pix, int16_t dct[4][4][4] )
  344. ;-----------------------------------------------------------------------------
  345. %macro ADD8x8 0
  346. cglobal add8x8_idct, 2,2,11
  347. add r0, 4*FDEC_STRIDE
  348. pxor m7, m7
  349. TAIL_CALL .skip_prologue, 0
  350. cglobal_label .skip_prologue
  351. SWAP 7, 9
  352. mova m0, [r1+ 0]
  353. mova m2, [r1+16]
  354. mova m1, [r1+32]
  355. mova m3, [r1+48]
  356. SBUTTERFLY qdq, 0, 1, 4
  357. SBUTTERFLY qdq, 2, 3, 4
  358. mova m4, [r1+64]
  359. mova m6, [r1+80]
  360. mova m5, [r1+96]
  361. mova m7, [r1+112]
  362. SBUTTERFLY qdq, 4, 5, 8
  363. SBUTTERFLY qdq, 6, 7, 8
  364. IDCT4_1D w,0,1,2,3,8,10
  365. TRANSPOSE2x4x4W 0,1,2,3,8
  366. IDCT4_1D w,4,5,6,7,8,10
  367. TRANSPOSE2x4x4W 4,5,6,7,8
  368. paddw m0, [pw_32]
  369. IDCT4_1D w,0,1,2,3,8,10
  370. paddw m4, [pw_32]
  371. IDCT4_1D w,4,5,6,7,8,10
  372. DIFFx2 m0, m1, m8, m9, [r0-4*FDEC_STRIDE], [r0-3*FDEC_STRIDE]
  373. DIFFx2 m2, m3, m8, m9, [r0-2*FDEC_STRIDE], [r0-1*FDEC_STRIDE]
  374. DIFFx2 m4, m5, m8, m9, [r0+0*FDEC_STRIDE], [r0+1*FDEC_STRIDE]
  375. DIFFx2 m6, m7, m8, m9, [r0+2*FDEC_STRIDE], [r0+3*FDEC_STRIDE]
  376. STORE_IDCT m1, m3, m5, m7
  377. ret
  378. %endmacro ; ADD8x8
  379. INIT_XMM sse2
  380. ADD8x8
  381. INIT_XMM avx
  382. ADD8x8
  383. %endif ; !HIGH_BIT_DEPTH