sad16-a.asm 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727
  1. ;*****************************************************************************
  2. ;* sad16-a.asm: x86 high depth sad functions
  3. ;*****************************************************************************
  4. ;* Copyright (C) 2010-2018 x264 project
  5. ;*
  6. ;* Authors: Oskar Arvidsson <oskar@irock.se>
  7. ;* Henrik Gramner <henrik@gramner.com>
  8. ;*
  9. ;* This program is free software; you can redistribute it and/or modify
  10. ;* it under the terms of the GNU General Public License as published by
  11. ;* the Free Software Foundation; either version 2 of the License, or
  12. ;* (at your option) any later version.
  13. ;*
  14. ;* This program is distributed in the hope that it will be useful,
  15. ;* but WITHOUT ANY WARRANTY; without even the implied warranty of
  16. ;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  17. ;* GNU General Public License for more details.
  18. ;*
  19. ;* You should have received a copy of the GNU General Public License
  20. ;* along with this program; if not, write to the Free Software
  21. ;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111, USA.
  22. ;*
  23. ;* This program is also available under a commercial proprietary license.
  24. ;* For more information, contact us at licensing@x264.com.
  25. ;*****************************************************************************
  26. %include "x86inc.asm"
  27. %include "x86util.asm"
  28. SECTION .text
  29. cextern pw_1
  30. cextern pw_4
  31. cextern pw_8
  32. ;=============================================================================
  33. ; SAD MMX
  34. ;=============================================================================
  35. %macro SAD_INC_1x16P_MMX 0
  36. movu m1, [r0+ 0]
  37. movu m2, [r0+ 8]
  38. movu m3, [r0+16]
  39. movu m4, [r0+24]
  40. psubw m1, [r2+ 0]
  41. psubw m2, [r2+ 8]
  42. psubw m3, [r2+16]
  43. psubw m4, [r2+24]
  44. ABSW2 m1, m2, m1, m2, m5, m6
  45. ABSW2 m3, m4, m3, m4, m7, m5
  46. lea r0, [r0+2*r1]
  47. lea r2, [r2+2*r3]
  48. paddw m1, m2
  49. paddw m3, m4
  50. paddw m0, m1
  51. paddw m0, m3
  52. %endmacro
  53. %macro SAD_INC_2x8P_MMX 0
  54. movu m1, [r0+0]
  55. movu m2, [r0+8]
  56. movu m3, [r0+2*r1+0]
  57. movu m4, [r0+2*r1+8]
  58. psubw m1, [r2+0]
  59. psubw m2, [r2+8]
  60. psubw m3, [r2+2*r3+0]
  61. psubw m4, [r2+2*r3+8]
  62. ABSW2 m1, m2, m1, m2, m5, m6
  63. ABSW2 m3, m4, m3, m4, m7, m5
  64. lea r0, [r0+4*r1]
  65. lea r2, [r2+4*r3]
  66. paddw m1, m2
  67. paddw m3, m4
  68. paddw m0, m1
  69. paddw m0, m3
  70. %endmacro
  71. %macro SAD_INC_2x4P_MMX 0
  72. movu m1, [r0]
  73. movu m2, [r0+2*r1]
  74. psubw m1, [r2]
  75. psubw m2, [r2+2*r3]
  76. ABSW2 m1, m2, m1, m2, m3, m4
  77. lea r0, [r0+4*r1]
  78. lea r2, [r2+4*r3]
  79. paddw m0, m1
  80. paddw m0, m2
  81. %endmacro
  82. ;-----------------------------------------------------------------------------
  83. ; int pixel_sad_NxM( uint16_t *, intptr_t, uint16_t *, intptr_t )
  84. ;-----------------------------------------------------------------------------
  85. %macro SAD_MMX 3
  86. cglobal pixel_sad_%1x%2, 4,5-(%2&4/4)
  87. pxor m0, m0
  88. %if %2 == 4
  89. SAD_INC_%3x%1P_MMX
  90. SAD_INC_%3x%1P_MMX
  91. %else
  92. mov r4d, %2/%3
  93. .loop:
  94. SAD_INC_%3x%1P_MMX
  95. dec r4d
  96. jg .loop
  97. %endif
  98. %if %1*%2 == 256
  99. HADDUW m0, m1
  100. %else
  101. HADDW m0, m1
  102. %endif
  103. movd eax, m0
  104. RET
  105. %endmacro
  106. INIT_MMX mmx2
  107. SAD_MMX 16, 16, 1
  108. SAD_MMX 16, 8, 1
  109. SAD_MMX 8, 16, 2
  110. SAD_MMX 8, 8, 2
  111. SAD_MMX 8, 4, 2
  112. SAD_MMX 4, 8, 2
  113. SAD_MMX 4, 4, 2
  114. INIT_MMX ssse3
  115. SAD_MMX 4, 8, 2
  116. SAD_MMX 4, 4, 2
  117. ;=============================================================================
  118. ; SAD XMM
  119. ;=============================================================================
  120. %macro SAD_INC_2ROW 1
  121. %if 2*%1 > mmsize
  122. movu m1, [r2+ 0]
  123. movu m2, [r2+16]
  124. movu m3, [r2+2*r3+ 0]
  125. movu m4, [r2+2*r3+16]
  126. psubw m1, [r0+ 0]
  127. psubw m2, [r0+16]
  128. psubw m3, [r0+2*r1+ 0]
  129. psubw m4, [r0+2*r1+16]
  130. ABSW2 m1, m2, m1, m2, m5, m6
  131. lea r0, [r0+4*r1]
  132. lea r2, [r2+4*r3]
  133. ABSW2 m3, m4, m3, m4, m7, m5
  134. paddw m1, m2
  135. paddw m3, m4
  136. paddw m0, m1
  137. paddw m0, m3
  138. %else
  139. movu m1, [r2]
  140. movu m2, [r2+2*r3]
  141. psubw m1, [r0]
  142. psubw m2, [r0+2*r1]
  143. ABSW2 m1, m2, m1, m2, m3, m4
  144. lea r0, [r0+4*r1]
  145. lea r2, [r2+4*r3]
  146. paddw m0, m1
  147. paddw m0, m2
  148. %endif
  149. %endmacro
  150. ;-----------------------------------------------------------------------------
  151. ; int pixel_sad_NxM( uint16_t *, intptr_t, uint16_t *, intptr_t )
  152. ;-----------------------------------------------------------------------------
  153. %macro SAD 2
  154. cglobal pixel_sad_%1x%2, 4,5-(%2&4/4),8*(%1/mmsize)
  155. pxor m0, m0
  156. %if %2 == 4
  157. SAD_INC_2ROW %1
  158. SAD_INC_2ROW %1
  159. %else
  160. mov r4d, %2/2
  161. .loop:
  162. SAD_INC_2ROW %1
  163. dec r4d
  164. jg .loop
  165. %endif
  166. HADDW m0, m1
  167. movd eax, xm0
  168. RET
  169. %endmacro
  170. INIT_XMM sse2
  171. SAD 16, 16
  172. SAD 16, 8
  173. SAD 8, 16
  174. SAD 8, 8
  175. SAD 8, 4
  176. INIT_XMM sse2, aligned
  177. SAD 16, 16
  178. SAD 16, 8
  179. SAD 8, 16
  180. SAD 8, 8
  181. INIT_XMM ssse3
  182. SAD 16, 16
  183. SAD 16, 8
  184. SAD 8, 16
  185. SAD 8, 8
  186. SAD 8, 4
  187. INIT_XMM ssse3, aligned
  188. SAD 16, 16
  189. SAD 16, 8
  190. SAD 8, 16
  191. SAD 8, 8
  192. INIT_YMM avx2
  193. SAD 16, 16
  194. SAD 16, 8
  195. ;=============================================================================
  196. ; SAD x3/x4
  197. ;=============================================================================
  198. %macro SAD_X3_INC_P 0
  199. add r0, 4*FENC_STRIDE
  200. lea r1, [r1+4*r4]
  201. lea r2, [r2+4*r4]
  202. lea r3, [r3+4*r4]
  203. %endmacro
  204. %macro SAD_X3_ONE_START 0
  205. mova m3, [r0]
  206. movu m0, [r1]
  207. movu m1, [r2]
  208. movu m2, [r3]
  209. psubw m0, m3
  210. psubw m1, m3
  211. psubw m2, m3
  212. ABSW2 m0, m1, m0, m1, m4, m5
  213. ABSW m2, m2, m6
  214. %endmacro
  215. %macro SAD_X3_ONE 2
  216. mova m6, [r0+%1]
  217. movu m3, [r1+%2]
  218. movu m4, [r2+%2]
  219. movu m5, [r3+%2]
  220. psubw m3, m6
  221. psubw m4, m6
  222. psubw m5, m6
  223. ABSW2 m3, m4, m3, m4, m7, m6
  224. ABSW m5, m5, m6
  225. paddw m0, m3
  226. paddw m1, m4
  227. paddw m2, m5
  228. %endmacro
  229. %macro SAD_X3_END 2
  230. %if mmsize == 8 && %1*%2 == 256
  231. HADDUW m0, m3
  232. HADDUW m1, m4
  233. HADDUW m2, m5
  234. %else
  235. HADDW m0, m3
  236. HADDW m1, m4
  237. HADDW m2, m5
  238. %endif
  239. %if UNIX64
  240. movd [r5+0], xm0
  241. movd [r5+4], xm1
  242. movd [r5+8], xm2
  243. %else
  244. mov r0, r5mp
  245. movd [r0+0], xm0
  246. movd [r0+4], xm1
  247. movd [r0+8], xm2
  248. %endif
  249. RET
  250. %endmacro
  251. %macro SAD_X4_INC_P 0
  252. add r0, 4*FENC_STRIDE
  253. lea r1, [r1+4*r5]
  254. lea r2, [r2+4*r5]
  255. lea r3, [r3+4*r5]
  256. lea r4, [r4+4*r5]
  257. %endmacro
  258. %macro SAD_X4_ONE_START 0
  259. mova m4, [r0]
  260. movu m0, [r1]
  261. movu m1, [r2]
  262. movu m2, [r3]
  263. movu m3, [r4]
  264. psubw m0, m4
  265. psubw m1, m4
  266. psubw m2, m4
  267. psubw m3, m4
  268. ABSW2 m0, m1, m0, m1, m5, m6
  269. ABSW2 m2, m3, m2, m3, m4, m7
  270. %endmacro
  271. %macro SAD_X4_ONE 2
  272. mova m4, [r0+%1]
  273. movu m5, [r1+%2]
  274. movu m6, [r2+%2]
  275. %if num_mmregs > 8
  276. movu m7, [r3+%2]
  277. movu m8, [r4+%2]
  278. psubw m5, m4
  279. psubw m6, m4
  280. psubw m7, m4
  281. psubw m8, m4
  282. ABSW2 m5, m6, m5, m6, m9, m10
  283. ABSW2 m7, m8, m7, m8, m9, m10
  284. paddw m0, m5
  285. paddw m1, m6
  286. paddw m2, m7
  287. paddw m3, m8
  288. %elif cpuflag(ssse3)
  289. movu m7, [r3+%2]
  290. psubw m5, m4
  291. psubw m6, m4
  292. psubw m7, m4
  293. movu m4, [r4+%2]
  294. pabsw m5, m5
  295. psubw m4, [r0+%1]
  296. pabsw m6, m6
  297. pabsw m7, m7
  298. pabsw m4, m4
  299. paddw m0, m5
  300. paddw m1, m6
  301. paddw m2, m7
  302. paddw m3, m4
  303. %else ; num_mmregs == 8 && !ssse3
  304. psubw m5, m4
  305. psubw m6, m4
  306. ABSW m5, m5, m7
  307. ABSW m6, m6, m7
  308. paddw m0, m5
  309. paddw m1, m6
  310. movu m5, [r3+%2]
  311. movu m6, [r4+%2]
  312. psubw m5, m4
  313. psubw m6, m4
  314. ABSW2 m5, m6, m5, m6, m7, m4
  315. paddw m2, m5
  316. paddw m3, m6
  317. %endif
  318. %endmacro
  319. %macro SAD_X4_END 2
  320. %if mmsize == 8 && %1*%2 == 256
  321. HADDUW m0, m4
  322. HADDUW m1, m5
  323. HADDUW m2, m6
  324. HADDUW m3, m7
  325. %else
  326. HADDW m0, m4
  327. HADDW m1, m5
  328. HADDW m2, m6
  329. HADDW m3, m7
  330. %endif
  331. mov r0, r6mp
  332. movd [r0+ 0], xm0
  333. movd [r0+ 4], xm1
  334. movd [r0+ 8], xm2
  335. movd [r0+12], xm3
  336. RET
  337. %endmacro
  338. %macro SAD_X_2xNP 4
  339. %assign x %3
  340. %rep %4
  341. SAD_X%1_ONE x*mmsize, x*mmsize
  342. SAD_X%1_ONE 2*FENC_STRIDE+x*mmsize, 2*%2+x*mmsize
  343. %assign x x+1
  344. %endrep
  345. %endmacro
  346. %macro PIXEL_VSAD 0
  347. cglobal pixel_vsad, 3,3,8
  348. mova m0, [r0]
  349. mova m1, [r0+16]
  350. mova m2, [r0+2*r1]
  351. mova m3, [r0+2*r1+16]
  352. lea r0, [r0+4*r1]
  353. psubw m0, m2
  354. psubw m1, m3
  355. ABSW2 m0, m1, m0, m1, m4, m5
  356. paddw m0, m1
  357. sub r2d, 2
  358. je .end
  359. .loop:
  360. mova m4, [r0]
  361. mova m5, [r0+16]
  362. mova m6, [r0+2*r1]
  363. mova m7, [r0+2*r1+16]
  364. lea r0, [r0+4*r1]
  365. psubw m2, m4
  366. psubw m3, m5
  367. psubw m4, m6
  368. psubw m5, m7
  369. ABSW m2, m2, m1
  370. ABSW m3, m3, m1
  371. ABSW m4, m4, m1
  372. ABSW m5, m5, m1
  373. paddw m0, m2
  374. paddw m0, m3
  375. paddw m0, m4
  376. paddw m0, m5
  377. mova m2, m6
  378. mova m3, m7
  379. sub r2d, 2
  380. jg .loop
  381. .end:
  382. %if BIT_DEPTH == 9
  383. HADDW m0, m1 ; max sum: 62(pixel diffs)*511(pixel_max)=31682
  384. %else
  385. HADDUW m0, m1 ; max sum: 62(pixel diffs)*1023(pixel_max)=63426
  386. %endif
  387. movd eax, m0
  388. RET
  389. %endmacro
  390. INIT_XMM sse2
  391. PIXEL_VSAD
  392. INIT_XMM ssse3
  393. PIXEL_VSAD
  394. INIT_XMM xop
  395. PIXEL_VSAD
  396. INIT_YMM avx2
  397. cglobal pixel_vsad, 3,3
  398. mova m0, [r0]
  399. mova m1, [r0+2*r1]
  400. lea r0, [r0+4*r1]
  401. psubw m0, m1
  402. pabsw m0, m0
  403. sub r2d, 2
  404. je .end
  405. .loop:
  406. mova m2, [r0]
  407. mova m3, [r0+2*r1]
  408. lea r0, [r0+4*r1]
  409. psubw m1, m2
  410. psubw m2, m3
  411. pabsw m1, m1
  412. pabsw m2, m2
  413. paddw m0, m1
  414. paddw m0, m2
  415. mova m1, m3
  416. sub r2d, 2
  417. jg .loop
  418. .end:
  419. %if BIT_DEPTH == 9
  420. HADDW m0, m1
  421. %else
  422. HADDUW m0, m1
  423. %endif
  424. movd eax, xm0
  425. RET
  426. ;-----------------------------------------------------------------------------
  427. ; void pixel_sad_xN_WxH( uint16_t *fenc, uint16_t *pix0, uint16_t *pix1,
  428. ; uint16_t *pix2, intptr_t i_stride, int scores[3] )
  429. ;-----------------------------------------------------------------------------
  430. %macro SAD_X 3
  431. cglobal pixel_sad_x%1_%2x%3, 6,7,XMM_REGS
  432. %assign regnum %1+1
  433. %xdefine STRIDE r %+ regnum
  434. mov r6, %3/2-1
  435. SAD_X%1_ONE_START
  436. SAD_X%1_ONE 2*FENC_STRIDE, 2*STRIDE
  437. SAD_X_2xNP %1, STRIDE, 1, %2/(mmsize/2)-1
  438. .loop:
  439. SAD_X%1_INC_P
  440. SAD_X_2xNP %1, STRIDE, 0, %2/(mmsize/2)
  441. dec r6
  442. jg .loop
  443. %if %1 == 4
  444. mov r6, r6m
  445. %endif
  446. SAD_X%1_END %2, %3
  447. %endmacro
  448. INIT_MMX mmx2
  449. %define XMM_REGS 0
  450. SAD_X 3, 16, 16
  451. SAD_X 3, 16, 8
  452. SAD_X 3, 8, 16
  453. SAD_X 3, 8, 8
  454. SAD_X 3, 8, 4
  455. SAD_X 3, 4, 8
  456. SAD_X 3, 4, 4
  457. SAD_X 4, 16, 16
  458. SAD_X 4, 16, 8
  459. SAD_X 4, 8, 16
  460. SAD_X 4, 8, 8
  461. SAD_X 4, 8, 4
  462. SAD_X 4, 4, 8
  463. SAD_X 4, 4, 4
  464. INIT_MMX ssse3
  465. SAD_X 3, 4, 8
  466. SAD_X 3, 4, 4
  467. SAD_X 4, 4, 8
  468. SAD_X 4, 4, 4
  469. INIT_XMM ssse3
  470. %define XMM_REGS 7
  471. SAD_X 3, 16, 16
  472. SAD_X 3, 16, 8
  473. SAD_X 3, 8, 16
  474. SAD_X 3, 8, 8
  475. SAD_X 3, 8, 4
  476. %define XMM_REGS 9
  477. SAD_X 4, 16, 16
  478. SAD_X 4, 16, 8
  479. SAD_X 4, 8, 16
  480. SAD_X 4, 8, 8
  481. SAD_X 4, 8, 4
  482. INIT_XMM sse2
  483. %define XMM_REGS 8
  484. SAD_X 3, 16, 16
  485. SAD_X 3, 16, 8
  486. SAD_X 3, 8, 16
  487. SAD_X 3, 8, 8
  488. SAD_X 3, 8, 4
  489. %define XMM_REGS 11
  490. SAD_X 4, 16, 16
  491. SAD_X 4, 16, 8
  492. SAD_X 4, 8, 16
  493. SAD_X 4, 8, 8
  494. SAD_X 4, 8, 4
  495. INIT_XMM xop
  496. %define XMM_REGS 7
  497. SAD_X 3, 16, 16
  498. SAD_X 3, 16, 8
  499. SAD_X 3, 8, 16
  500. SAD_X 3, 8, 8
  501. SAD_X 3, 8, 4
  502. %define XMM_REGS 9
  503. SAD_X 4, 16, 16
  504. SAD_X 4, 16, 8
  505. SAD_X 4, 8, 16
  506. SAD_X 4, 8, 8
  507. SAD_X 4, 8, 4
  508. INIT_YMM avx2
  509. %define XMM_REGS 7
  510. SAD_X 3, 16, 16
  511. SAD_X 3, 16, 8
  512. %define XMM_REGS 9
  513. SAD_X 4, 16, 16
  514. SAD_X 4, 16, 8
  515. ;-----------------------------------------------------------------------------
  516. ; void intra_sad_x3_4x4( uint16_t *fenc, uint16_t *fdec, int res[3] );
  517. ;-----------------------------------------------------------------------------
  518. %macro INTRA_SAD_X3_4x4 0
  519. cglobal intra_sad_x3_4x4, 3,3,7
  520. %if cpuflag(ssse3)
  521. movddup m0, [r1-1*FDEC_STRIDEB]
  522. %else
  523. movq m0, [r1-1*FDEC_STRIDEB]
  524. punpcklqdq m0, m0
  525. %endif
  526. movq m1, [r0+0*FENC_STRIDEB]
  527. movq m2, [r0+2*FENC_STRIDEB]
  528. pshuflw m6, m0, q1032
  529. paddw m6, m0
  530. pshuflw m5, m6, q2301
  531. paddw m6, m5
  532. punpcklqdq m6, m6 ; A+B+C+D 8 times
  533. movhps m1, [r0+1*FENC_STRIDEB]
  534. movhps m2, [r0+3*FENC_STRIDEB]
  535. psubw m3, m1, m0
  536. psubw m0, m2
  537. ABSW2 m3, m0, m3, m0, m4, m5
  538. paddw m0, m3
  539. movd m3, [r1+0*FDEC_STRIDEB-4]
  540. movd m4, [r1+2*FDEC_STRIDEB-4]
  541. movhps m3, [r1+1*FDEC_STRIDEB-8]
  542. movhps m4, [r1+3*FDEC_STRIDEB-8]
  543. pshufhw m3, m3, q3333
  544. pshufhw m4, m4, q3333
  545. pshuflw m3, m3, q1111 ; FF FF EE EE
  546. pshuflw m4, m4, q1111 ; HH HH GG GG
  547. paddw m5, m3, m4
  548. paddw m6, [pw_4]
  549. paddw m6, m5
  550. pshufd m5, m5, q1032
  551. paddw m5, m6
  552. psrlw m5, 3
  553. psubw m6, m5, m2
  554. psubw m5, m1
  555. psubw m1, m3
  556. psubw m2, m4
  557. ABSW2 m5, m6, m5, m6, m3, m4
  558. ABSW2 m1, m2, m1, m2, m3, m4
  559. paddw m5, m6
  560. paddw m1, m2
  561. %if cpuflag(ssse3)
  562. phaddw m0, m1
  563. movhlps m3, m5
  564. paddw m5, m3
  565. phaddw m0, m5
  566. pmaddwd m0, [pw_1]
  567. mova [r2], m0
  568. %else
  569. HADDW m0, m3
  570. HADDW m1, m3
  571. HADDW m5, m3
  572. movd [r2], m0 ; V prediction cost
  573. movd [r2+4], m1 ; H prediction cost
  574. movd [r2+8], m5 ; DC prediction cost
  575. %endif
  576. RET
  577. %endmacro
  578. INIT_XMM sse2
  579. INTRA_SAD_X3_4x4
  580. INIT_XMM ssse3
  581. INTRA_SAD_X3_4x4
  582. INIT_XMM avx
  583. INTRA_SAD_X3_4x4
  584. ;-----------------------------------------------------------------------------
  585. ; void intra_sad_x3_8x8( pixel *fenc, pixel edge[36], int res[3] );
  586. ;-----------------------------------------------------------------------------
  587. ;m0 = DC
  588. ;m6 = V
  589. ;m7 = H
  590. ;m1 = DC score
  591. ;m2 = V score
  592. ;m3 = H score
  593. ;m5 = temp
  594. ;m4 = pixel row
  595. %macro INTRA_SAD_HVDC_ITER 2
  596. mova m4, [r0+(%1-4)*FENC_STRIDEB]
  597. psubw m4, m0
  598. ABSW m4, m4, m5
  599. ACCUM paddw, 1, 4, %1
  600. mova m4, [r0+(%1-4)*FENC_STRIDEB]
  601. psubw m4, m6
  602. ABSW m4, m4, m5
  603. ACCUM paddw, 2, 4, %1
  604. pshufd m5, m7, %2
  605. psubw m5, [r0+(%1-4)*FENC_STRIDEB]
  606. ABSW m5, m5, m4
  607. ACCUM paddw, 3, 5, %1
  608. %endmacro
  609. %macro INTRA_SAD_X3_8x8 0
  610. cglobal intra_sad_x3_8x8, 3,3,8
  611. add r0, 4*FENC_STRIDEB
  612. movu m0, [r1+7*SIZEOF_PIXEL]
  613. mova m6, [r1+16*SIZEOF_PIXEL] ;V prediction
  614. mova m7, m0
  615. paddw m0, m6
  616. punpckhwd m7, m7
  617. HADDW m0, m4
  618. paddw m0, [pw_8]
  619. psrlw m0, 4
  620. SPLATW m0, m0
  621. INTRA_SAD_HVDC_ITER 0, q3333
  622. INTRA_SAD_HVDC_ITER 1, q2222
  623. INTRA_SAD_HVDC_ITER 2, q1111
  624. INTRA_SAD_HVDC_ITER 3, q0000
  625. movq m7, [r1+7*SIZEOF_PIXEL]
  626. punpcklwd m7, m7
  627. INTRA_SAD_HVDC_ITER 4, q3333
  628. INTRA_SAD_HVDC_ITER 5, q2222
  629. INTRA_SAD_HVDC_ITER 6, q1111
  630. INTRA_SAD_HVDC_ITER 7, q0000
  631. %if cpuflag(ssse3)
  632. phaddw m2, m3 ; 2 2 2 2 3 3 3 3
  633. movhlps m3, m1
  634. paddw m1, m3 ; 1 1 1 1 _ _ _ _
  635. phaddw m2, m1 ; 2 2 3 3 1 1 _ _
  636. pmaddwd m2, [pw_1] ; 2 3 1 _
  637. mova [r2], m2
  638. %else
  639. HADDW m2, m4
  640. HADDW m3, m4
  641. HADDW m1, m4
  642. movd [r2+0], m2
  643. movd [r2+4], m3
  644. movd [r2+8], m1
  645. %endif
  646. RET
  647. %endmacro
  648. INIT_XMM sse2
  649. INTRA_SAD_X3_8x8
  650. INIT_XMM ssse3
  651. INTRA_SAD_X3_8x8
  652. %macro INTRA_SAD_HVDC_ITER_YMM 2
  653. mova xm4, [r0+(%1-4)*FENC_STRIDEB]
  654. vinserti128 m4, m4, [r0+%1*FENC_STRIDEB], 1
  655. pshufd m5, m7, %2
  656. psubw m5, m4
  657. pabsw m5, m5
  658. ACCUM paddw, 2, 5, %1 ; H
  659. psubw m5, m4, m6
  660. psubw m4, m0
  661. pabsw m5, m5
  662. pabsw m4, m4
  663. ACCUM paddw, 1, 5, %1 ; V
  664. ACCUM paddw, 3, 4, %1 ; DC
  665. %endmacro
  666. INIT_YMM avx2
  667. cglobal intra_sad_x3_8x8, 3,3,8
  668. add r0, 4*FENC_STRIDEB
  669. movu xm0, [r1+7*SIZEOF_PIXEL]
  670. vbroadcasti128 m6, [r1+16*SIZEOF_PIXEL] ; V prediction
  671. vpermq m7, m0, q0011
  672. paddw xm0, xm6
  673. paddw xm0, [pw_1] ; equal to +8 after HADDW
  674. HADDW xm0, xm4
  675. psrld xm0, 4
  676. vpbroadcastw m0, xm0
  677. punpcklwd m7, m7
  678. INTRA_SAD_HVDC_ITER_YMM 0, q3333
  679. INTRA_SAD_HVDC_ITER_YMM 1, q2222
  680. INTRA_SAD_HVDC_ITER_YMM 2, q1111
  681. INTRA_SAD_HVDC_ITER_YMM 3, q0000
  682. phaddw m1, m2 ; 1 1 1 1 2 2 2 2 1 1 1 1 2 2 2 2
  683. punpckhqdq m2, m3, m3
  684. paddw m3, m2 ; 3 3 3 3 _ _ _ _ 3 3 3 3 _ _ _ _
  685. phaddw m1, m3 ; 1 1 2 2 3 3 _ _ 1 1 2 2 3 3 _ _
  686. vextracti128 xm2, m1, 1
  687. paddw xm1, xm2 ; 1 1 2 2 3 3 _ _
  688. pmaddwd xm1, [pw_1] ; 1 2 3 _
  689. mova [r2], xm1
  690. RET