mc.c 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778
  1. /*****************************************************************************
  2. * mc.c: motion compensation
  3. *****************************************************************************
  4. * Copyright (C) 2003-2018 x264 project
  5. *
  6. * Authors: Laurent Aimar <fenrir@via.ecp.fr>
  7. * Loren Merritt <lorenm@u.washington.edu>
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License as published by
  11. * the Free Software Foundation; either version 2 of the License, or
  12. * (at your option) any later version.
  13. *
  14. * This program is distributed in the hope that it will be useful,
  15. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  16. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  17. * GNU General Public License for more details.
  18. *
  19. * You should have received a copy of the GNU General Public License
  20. * along with this program; if not, write to the Free Software
  21. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111, USA.
  22. *
  23. * This program is also available under a commercial proprietary license.
  24. * For more information, contact us at licensing@x264.com.
  25. *****************************************************************************/
  26. #include "common.h"
  27. #if HAVE_MMX
  28. #include "x86/mc.h"
  29. #endif
  30. #if ARCH_PPC
  31. #include "ppc/mc.h"
  32. #endif
  33. #if ARCH_ARM
  34. #include "arm/mc.h"
  35. #endif
  36. #if ARCH_AARCH64
  37. #include "aarch64/mc.h"
  38. #endif
  39. #if ARCH_MIPS
  40. #include "mips/mc.h"
  41. #endif
  42. static inline void pixel_avg( pixel *dst, intptr_t i_dst_stride,
  43. pixel *src1, intptr_t i_src1_stride,
  44. pixel *src2, intptr_t i_src2_stride, int i_width, int i_height )
  45. {
  46. for( int y = 0; y < i_height; y++ )
  47. {
  48. for( int x = 0; x < i_width; x++ )
  49. dst[x] = ( src1[x] + src2[x] + 1 ) >> 1;
  50. dst += i_dst_stride;
  51. src1 += i_src1_stride;
  52. src2 += i_src2_stride;
  53. }
  54. }
  55. static inline void pixel_avg_wxh( pixel *dst, intptr_t i_dst,
  56. pixel *src1, intptr_t i_src1,
  57. pixel *src2, intptr_t i_src2, int width, int height )
  58. {
  59. for( int y = 0; y < height; y++ )
  60. {
  61. for( int x = 0; x < width; x++ )
  62. dst[x] = ( src1[x] + src2[x] + 1 ) >> 1;
  63. src1 += i_src1;
  64. src2 += i_src2;
  65. dst += i_dst;
  66. }
  67. }
  68. /* Implicit weighted bipred only:
  69. * assumes log2_denom = 5, offset = 0, weight1 + weight2 = 64 */
  70. static inline void pixel_avg_weight_wxh( pixel *dst, intptr_t i_dst,
  71. pixel *src1, intptr_t i_src1,
  72. pixel *src2, intptr_t i_src2, int width, int height, int i_weight1 )
  73. {
  74. int i_weight2 = 64 - i_weight1;
  75. for( int y = 0; y<height; y++, dst += i_dst, src1 += i_src1, src2 += i_src2 )
  76. for( int x = 0; x<width; x++ )
  77. dst[x] = x264_clip_pixel( (src1[x]*i_weight1 + src2[x]*i_weight2 + (1<<5)) >> 6 );
  78. }
  79. #undef op_scale2
  80. #define PIXEL_AVG_C( name, width, height ) \
  81. static void name( pixel *pix1, intptr_t i_stride_pix1, \
  82. pixel *pix2, intptr_t i_stride_pix2, \
  83. pixel *pix3, intptr_t i_stride_pix3, int weight ) \
  84. { \
  85. if( weight == 32 ) \
  86. pixel_avg_wxh( pix1, i_stride_pix1, pix2, i_stride_pix2, pix3, i_stride_pix3, width, height ); \
  87. else \
  88. pixel_avg_weight_wxh( pix1, i_stride_pix1, pix2, i_stride_pix2, pix3, i_stride_pix3, width, height, weight ); \
  89. }
  90. PIXEL_AVG_C( pixel_avg_16x16, 16, 16 )
  91. PIXEL_AVG_C( pixel_avg_16x8, 16, 8 )
  92. PIXEL_AVG_C( pixel_avg_8x16, 8, 16 )
  93. PIXEL_AVG_C( pixel_avg_8x8, 8, 8 )
  94. PIXEL_AVG_C( pixel_avg_8x4, 8, 4 )
  95. PIXEL_AVG_C( pixel_avg_4x16, 4, 16 )
  96. PIXEL_AVG_C( pixel_avg_4x8, 4, 8 )
  97. PIXEL_AVG_C( pixel_avg_4x4, 4, 4 )
  98. PIXEL_AVG_C( pixel_avg_4x2, 4, 2 )
  99. PIXEL_AVG_C( pixel_avg_2x8, 2, 8 )
  100. PIXEL_AVG_C( pixel_avg_2x4, 2, 4 )
  101. PIXEL_AVG_C( pixel_avg_2x2, 2, 2 )
  102. static void weight_cache( x264_t *h, x264_weight_t *w )
  103. {
  104. w->weightfn = h->mc.weight;
  105. }
  106. #define opscale(x) dst[x] = x264_clip_pixel( ((src[x] * scale + (1<<(denom - 1))) >> denom) + offset )
  107. #define opscale_noden(x) dst[x] = x264_clip_pixel( src[x] * scale + offset )
  108. static void mc_weight( pixel *dst, intptr_t i_dst_stride, pixel *src, intptr_t i_src_stride,
  109. const x264_weight_t *weight, int i_width, int i_height )
  110. {
  111. int offset = weight->i_offset << (BIT_DEPTH-8);
  112. int scale = weight->i_scale;
  113. int denom = weight->i_denom;
  114. if( denom >= 1 )
  115. {
  116. for( int y = 0; y < i_height; y++, dst += i_dst_stride, src += i_src_stride )
  117. for( int x = 0; x < i_width; x++ )
  118. opscale( x );
  119. }
  120. else
  121. {
  122. for( int y = 0; y < i_height; y++, dst += i_dst_stride, src += i_src_stride )
  123. for( int x = 0; x < i_width; x++ )
  124. opscale_noden( x );
  125. }
  126. }
  127. #define MC_WEIGHT_C( name, width ) \
  128. static void name( pixel *dst, intptr_t i_dst_stride, pixel *src, intptr_t i_src_stride, const x264_weight_t *weight, int height ) \
  129. { \
  130. mc_weight( dst, i_dst_stride, src, i_src_stride, weight, width, height );\
  131. }
  132. MC_WEIGHT_C( mc_weight_w20, 20 )
  133. MC_WEIGHT_C( mc_weight_w16, 16 )
  134. MC_WEIGHT_C( mc_weight_w12, 12 )
  135. MC_WEIGHT_C( mc_weight_w8, 8 )
  136. MC_WEIGHT_C( mc_weight_w4, 4 )
  137. MC_WEIGHT_C( mc_weight_w2, 2 )
  138. static weight_fn_t mc_weight_wtab[6] =
  139. {
  140. mc_weight_w2,
  141. mc_weight_w4,
  142. mc_weight_w8,
  143. mc_weight_w12,
  144. mc_weight_w16,
  145. mc_weight_w20,
  146. };
  147. static void mc_copy( pixel *src, intptr_t i_src_stride, pixel *dst, intptr_t i_dst_stride, int i_width, int i_height )
  148. {
  149. for( int y = 0; y < i_height; y++ )
  150. {
  151. memcpy( dst, src, i_width * sizeof(pixel) );
  152. src += i_src_stride;
  153. dst += i_dst_stride;
  154. }
  155. }
  156. #define TAPFILTER(pix, d) ((pix)[x-2*d] + (pix)[x+3*d] - 5*((pix)[x-d] + (pix)[x+2*d]) + 20*((pix)[x] + (pix)[x+d]))
  157. static void hpel_filter( pixel *dsth, pixel *dstv, pixel *dstc, pixel *src,
  158. intptr_t stride, int width, int height, int16_t *buf )
  159. {
  160. const int pad = (BIT_DEPTH > 9) ? (-10 * PIXEL_MAX) : 0;
  161. for( int y = 0; y < height; y++ )
  162. {
  163. for( int x = -2; x < width+3; x++ )
  164. {
  165. int v = TAPFILTER(src,stride);
  166. dstv[x] = x264_clip_pixel( (v + 16) >> 5 );
  167. /* transform v for storage in a 16-bit integer */
  168. buf[x+2] = v + pad;
  169. }
  170. for( int x = 0; x < width; x++ )
  171. dstc[x] = x264_clip_pixel( (TAPFILTER(buf+2,1) - 32*pad + 512) >> 10 );
  172. for( int x = 0; x < width; x++ )
  173. dsth[x] = x264_clip_pixel( (TAPFILTER(src,1) + 16) >> 5 );
  174. dsth += stride;
  175. dstv += stride;
  176. dstc += stride;
  177. src += stride;
  178. }
  179. }
  180. static void mc_luma( pixel *dst, intptr_t i_dst_stride,
  181. pixel *src[4], intptr_t i_src_stride,
  182. int mvx, int mvy,
  183. int i_width, int i_height, const x264_weight_t *weight )
  184. {
  185. int qpel_idx = ((mvy&3)<<2) + (mvx&3);
  186. int offset = (mvy>>2)*i_src_stride + (mvx>>2);
  187. pixel *src1 = src[x264_hpel_ref0[qpel_idx]] + offset + ((mvy&3) == 3) * i_src_stride;
  188. if( qpel_idx & 5 ) /* qpel interpolation needed */
  189. {
  190. pixel *src2 = src[x264_hpel_ref1[qpel_idx]] + offset + ((mvx&3) == 3);
  191. pixel_avg( dst, i_dst_stride, src1, i_src_stride,
  192. src2, i_src_stride, i_width, i_height );
  193. if( weight->weightfn )
  194. mc_weight( dst, i_dst_stride, dst, i_dst_stride, weight, i_width, i_height );
  195. }
  196. else if( weight->weightfn )
  197. mc_weight( dst, i_dst_stride, src1, i_src_stride, weight, i_width, i_height );
  198. else
  199. mc_copy( src1, i_src_stride, dst, i_dst_stride, i_width, i_height );
  200. }
  201. static pixel *get_ref( pixel *dst, intptr_t *i_dst_stride,
  202. pixel *src[4], intptr_t i_src_stride,
  203. int mvx, int mvy,
  204. int i_width, int i_height, const x264_weight_t *weight )
  205. {
  206. int qpel_idx = ((mvy&3)<<2) + (mvx&3);
  207. int offset = (mvy>>2)*i_src_stride + (mvx>>2);
  208. pixel *src1 = src[x264_hpel_ref0[qpel_idx]] + offset + ((mvy&3) == 3) * i_src_stride;
  209. if( qpel_idx & 5 ) /* qpel interpolation needed */
  210. {
  211. pixel *src2 = src[x264_hpel_ref1[qpel_idx]] + offset + ((mvx&3) == 3);
  212. pixel_avg( dst, *i_dst_stride, src1, i_src_stride,
  213. src2, i_src_stride, i_width, i_height );
  214. if( weight->weightfn )
  215. mc_weight( dst, *i_dst_stride, dst, *i_dst_stride, weight, i_width, i_height );
  216. return dst;
  217. }
  218. else if( weight->weightfn )
  219. {
  220. mc_weight( dst, *i_dst_stride, src1, i_src_stride, weight, i_width, i_height );
  221. return dst;
  222. }
  223. else
  224. {
  225. *i_dst_stride = i_src_stride;
  226. return src1;
  227. }
  228. }
  229. /* full chroma mc (ie until 1/8 pixel)*/
  230. static void mc_chroma( pixel *dstu, pixel *dstv, intptr_t i_dst_stride,
  231. pixel *src, intptr_t i_src_stride,
  232. int mvx, int mvy,
  233. int i_width, int i_height )
  234. {
  235. pixel *srcp;
  236. int d8x = mvx&0x07;
  237. int d8y = mvy&0x07;
  238. int cA = (8-d8x)*(8-d8y);
  239. int cB = d8x *(8-d8y);
  240. int cC = (8-d8x)*d8y;
  241. int cD = d8x *d8y;
  242. src += (mvy >> 3) * i_src_stride + (mvx >> 3)*2;
  243. srcp = &src[i_src_stride];
  244. for( int y = 0; y < i_height; y++ )
  245. {
  246. for( int x = 0; x < i_width; x++ )
  247. {
  248. dstu[x] = ( cA*src[2*x] + cB*src[2*x+2] +
  249. cC*srcp[2*x] + cD*srcp[2*x+2] + 32 ) >> 6;
  250. dstv[x] = ( cA*src[2*x+1] + cB*src[2*x+3] +
  251. cC*srcp[2*x+1] + cD*srcp[2*x+3] + 32 ) >> 6;
  252. }
  253. dstu += i_dst_stride;
  254. dstv += i_dst_stride;
  255. src = srcp;
  256. srcp += i_src_stride;
  257. }
  258. }
  259. #define MC_COPY(W) \
  260. static void mc_copy_w##W( pixel *dst, intptr_t i_dst, pixel *src, intptr_t i_src, int i_height ) \
  261. { \
  262. mc_copy( src, i_src, dst, i_dst, W, i_height ); \
  263. }
  264. MC_COPY( 16 )
  265. MC_COPY( 8 )
  266. MC_COPY( 4 )
  267. void x264_plane_copy_c( pixel *dst, intptr_t i_dst,
  268. pixel *src, intptr_t i_src, int w, int h )
  269. {
  270. while( h-- )
  271. {
  272. memcpy( dst, src, w * sizeof(pixel) );
  273. dst += i_dst;
  274. src += i_src;
  275. }
  276. }
  277. void x264_plane_copy_swap_c( pixel *dst, intptr_t i_dst,
  278. pixel *src, intptr_t i_src, int w, int h )
  279. {
  280. for( int y=0; y<h; y++, dst+=i_dst, src+=i_src )
  281. for( int x=0; x<2*w; x+=2 )
  282. {
  283. dst[x] = src[x+1];
  284. dst[x+1] = src[x];
  285. }
  286. }
  287. void x264_plane_copy_interleave_c( pixel *dst, intptr_t i_dst,
  288. pixel *srcu, intptr_t i_srcu,
  289. pixel *srcv, intptr_t i_srcv, int w, int h )
  290. {
  291. for( int y=0; y<h; y++, dst+=i_dst, srcu+=i_srcu, srcv+=i_srcv )
  292. for( int x=0; x<w; x++ )
  293. {
  294. dst[2*x] = srcu[x];
  295. dst[2*x+1] = srcv[x];
  296. }
  297. }
  298. void x264_plane_copy_deinterleave_c( pixel *dsta, intptr_t i_dsta, pixel *dstb, intptr_t i_dstb,
  299. pixel *src, intptr_t i_src, int w, int h )
  300. {
  301. for( int y=0; y<h; y++, dsta+=i_dsta, dstb+=i_dstb, src+=i_src )
  302. for( int x=0; x<w; x++ )
  303. {
  304. dsta[x] = src[2*x];
  305. dstb[x] = src[2*x+1];
  306. }
  307. }
  308. static void plane_copy_deinterleave_rgb_c( pixel *dsta, intptr_t i_dsta,
  309. pixel *dstb, intptr_t i_dstb,
  310. pixel *dstc, intptr_t i_dstc,
  311. pixel *src, intptr_t i_src, int pw, int w, int h )
  312. {
  313. for( int y=0; y<h; y++, dsta+=i_dsta, dstb+=i_dstb, dstc+=i_dstc, src+=i_src )
  314. {
  315. for( int x=0; x<w; x++ )
  316. {
  317. dsta[x] = src[x*pw];
  318. dstb[x] = src[x*pw+1];
  319. dstc[x] = src[x*pw+2];
  320. }
  321. }
  322. }
  323. #if WORDS_BIGENDIAN
  324. static ALWAYS_INLINE uint32_t v210_endian_fix32( uint32_t x )
  325. {
  326. return (x<<24) + ((x<<8)&0xff0000) + ((x>>8)&0xff00) + (x>>24);
  327. }
  328. #else
  329. #define v210_endian_fix32(x) (x)
  330. #endif
  331. static void plane_copy_deinterleave_v210_c( pixel *dsty, intptr_t i_dsty,
  332. pixel *dstc, intptr_t i_dstc,
  333. uint32_t *src, intptr_t i_src, int w, int h )
  334. {
  335. for( int l = 0; l < h; l++ )
  336. {
  337. pixel *dsty0 = dsty;
  338. pixel *dstc0 = dstc;
  339. uint32_t *src0 = src;
  340. for( int n = 0; n < w; n += 3 )
  341. {
  342. uint32_t s = v210_endian_fix32( *src0++ );
  343. *dstc0++ = s & 0x03FF;
  344. *dsty0++ = (s >> 10) & 0x03FF;
  345. *dstc0++ = (s >> 20) & 0x03FF;
  346. s = v210_endian_fix32( *src0++ );
  347. *dsty0++ = s & 0x03FF;
  348. *dstc0++ = (s >> 10) & 0x03FF;
  349. *dsty0++ = (s >> 20) & 0x03FF;
  350. }
  351. dsty += i_dsty;
  352. dstc += i_dstc;
  353. src += i_src;
  354. }
  355. }
  356. static void store_interleave_chroma( pixel *dst, intptr_t i_dst, pixel *srcu, pixel *srcv, int height )
  357. {
  358. for( int y=0; y<height; y++, dst+=i_dst, srcu+=FDEC_STRIDE, srcv+=FDEC_STRIDE )
  359. for( int x=0; x<8; x++ )
  360. {
  361. dst[2*x] = srcu[x];
  362. dst[2*x+1] = srcv[x];
  363. }
  364. }
  365. static void load_deinterleave_chroma_fenc( pixel *dst, pixel *src, intptr_t i_src, int height )
  366. {
  367. x264_plane_copy_deinterleave_c( dst, FENC_STRIDE, dst+FENC_STRIDE/2, FENC_STRIDE, src, i_src, 8, height );
  368. }
  369. static void load_deinterleave_chroma_fdec( pixel *dst, pixel *src, intptr_t i_src, int height )
  370. {
  371. x264_plane_copy_deinterleave_c( dst, FDEC_STRIDE, dst+FDEC_STRIDE/2, FDEC_STRIDE, src, i_src, 8, height );
  372. }
  373. static void prefetch_fenc_null( pixel *pix_y, intptr_t stride_y,
  374. pixel *pix_uv, intptr_t stride_uv, int mb_x )
  375. {}
  376. static void prefetch_ref_null( pixel *pix, intptr_t stride, int parity )
  377. {}
  378. static void memzero_aligned( void * dst, size_t n )
  379. {
  380. memset( dst, 0, n );
  381. }
  382. static void integral_init4h( uint16_t *sum, pixel *pix, intptr_t stride )
  383. {
  384. int v = pix[0]+pix[1]+pix[2]+pix[3];
  385. for( int x = 0; x < stride-4; x++ )
  386. {
  387. sum[x] = v + sum[x-stride];
  388. v += pix[x+4] - pix[x];
  389. }
  390. }
  391. static void integral_init8h( uint16_t *sum, pixel *pix, intptr_t stride )
  392. {
  393. int v = pix[0]+pix[1]+pix[2]+pix[3]+pix[4]+pix[5]+pix[6]+pix[7];
  394. for( int x = 0; x < stride-8; x++ )
  395. {
  396. sum[x] = v + sum[x-stride];
  397. v += pix[x+8] - pix[x];
  398. }
  399. }
  400. static void integral_init4v( uint16_t *sum8, uint16_t *sum4, intptr_t stride )
  401. {
  402. for( int x = 0; x < stride-8; x++ )
  403. sum4[x] = sum8[x+4*stride] - sum8[x];
  404. for( int x = 0; x < stride-8; x++ )
  405. sum8[x] = sum8[x+8*stride] + sum8[x+8*stride+4] - sum8[x] - sum8[x+4];
  406. }
  407. static void integral_init8v( uint16_t *sum8, intptr_t stride )
  408. {
  409. for( int x = 0; x < stride-8; x++ )
  410. sum8[x] = sum8[x+8*stride] - sum8[x];
  411. }
  412. void x264_frame_init_lowres( x264_t *h, x264_frame_t *frame )
  413. {
  414. pixel *src = frame->plane[0];
  415. int i_stride = frame->i_stride[0];
  416. int i_height = frame->i_lines[0];
  417. int i_width = frame->i_width[0];
  418. // duplicate last row and column so that their interpolation doesn't have to be special-cased
  419. for( int y = 0; y < i_height; y++ )
  420. src[i_width+y*i_stride] = src[i_width-1+y*i_stride];
  421. memcpy( src+i_stride*i_height, src+i_stride*(i_height-1), (i_width+1) * sizeof(pixel) );
  422. h->mc.frame_init_lowres_core( src, frame->lowres[0], frame->lowres[1], frame->lowres[2], frame->lowres[3],
  423. i_stride, frame->i_stride_lowres, frame->i_width_lowres, frame->i_lines_lowres );
  424. x264_frame_expand_border_lowres( frame );
  425. memset( frame->i_cost_est, -1, sizeof(frame->i_cost_est) );
  426. for( int y = 0; y < h->param.i_bframe + 2; y++ )
  427. for( int x = 0; x < h->param.i_bframe + 2; x++ )
  428. frame->i_row_satds[y][x][0] = -1;
  429. for( int y = 0; y <= !!h->param.i_bframe; y++ )
  430. for( int x = 0; x <= h->param.i_bframe; x++ )
  431. frame->lowres_mvs[y][x][0][0] = 0x7FFF;
  432. }
  433. static void frame_init_lowres_core( pixel *src0, pixel *dst0, pixel *dsth, pixel *dstv, pixel *dstc,
  434. intptr_t src_stride, intptr_t dst_stride, int width, int height )
  435. {
  436. for( int y = 0; y < height; y++ )
  437. {
  438. pixel *src1 = src0+src_stride;
  439. pixel *src2 = src1+src_stride;
  440. for( int x = 0; x<width; x++ )
  441. {
  442. // slower than naive bilinear, but matches asm
  443. #define FILTER(a,b,c,d) ((((a+b+1)>>1)+((c+d+1)>>1)+1)>>1)
  444. dst0[x] = FILTER(src0[2*x ], src1[2*x ], src0[2*x+1], src1[2*x+1]);
  445. dsth[x] = FILTER(src0[2*x+1], src1[2*x+1], src0[2*x+2], src1[2*x+2]);
  446. dstv[x] = FILTER(src1[2*x ], src2[2*x ], src1[2*x+1], src2[2*x+1]);
  447. dstc[x] = FILTER(src1[2*x+1], src2[2*x+1], src1[2*x+2], src2[2*x+2]);
  448. #undef FILTER
  449. }
  450. src0 += src_stride*2;
  451. dst0 += dst_stride;
  452. dsth += dst_stride;
  453. dstv += dst_stride;
  454. dstc += dst_stride;
  455. }
  456. }
  457. /* Estimate the total amount of influence on future quality that could be had if we
  458. * were to improve the reference samples used to inter predict any given macroblock. */
  459. static void mbtree_propagate_cost( int16_t *dst, uint16_t *propagate_in, uint16_t *intra_costs,
  460. uint16_t *inter_costs, uint16_t *inv_qscales, float *fps_factor, int len )
  461. {
  462. float fps = *fps_factor;
  463. for( int i = 0; i < len; i++ )
  464. {
  465. int intra_cost = intra_costs[i];
  466. int inter_cost = X264_MIN(intra_costs[i], inter_costs[i] & LOWRES_COST_MASK);
  467. float propagate_intra = intra_cost * inv_qscales[i];
  468. float propagate_amount = propagate_in[i] + propagate_intra*fps;
  469. float propagate_num = intra_cost - inter_cost;
  470. float propagate_denom = intra_cost;
  471. dst[i] = X264_MIN((int)(propagate_amount * propagate_num / propagate_denom + 0.5f), 32767);
  472. }
  473. }
  474. static void mbtree_propagate_list( x264_t *h, uint16_t *ref_costs, int16_t (*mvs)[2],
  475. int16_t *propagate_amount, uint16_t *lowres_costs,
  476. int bipred_weight, int mb_y, int len, int list )
  477. {
  478. unsigned stride = h->mb.i_mb_stride;
  479. unsigned width = h->mb.i_mb_width;
  480. unsigned height = h->mb.i_mb_height;
  481. for( unsigned i = 0; i < len; i++ )
  482. {
  483. int lists_used = lowres_costs[i]>>LOWRES_COST_SHIFT;
  484. if( !(lists_used & (1 << list)) )
  485. continue;
  486. int listamount = propagate_amount[i];
  487. /* Apply bipred weighting. */
  488. if( lists_used == 3 )
  489. listamount = (listamount * bipred_weight + 32) >> 6;
  490. /* Early termination for simple case of mv0. */
  491. if( !M32( mvs[i] ) )
  492. {
  493. MC_CLIP_ADD( ref_costs[mb_y*stride + i], listamount );
  494. continue;
  495. }
  496. int x = mvs[i][0];
  497. int y = mvs[i][1];
  498. unsigned mbx = (x>>5)+i;
  499. unsigned mby = (y>>5)+mb_y;
  500. unsigned idx0 = mbx + mby * stride;
  501. unsigned idx2 = idx0 + stride;
  502. x &= 31;
  503. y &= 31;
  504. int idx0weight = (32-y)*(32-x);
  505. int idx1weight = (32-y)*x;
  506. int idx2weight = y*(32-x);
  507. int idx3weight = y*x;
  508. idx0weight = (idx0weight * listamount + 512) >> 10;
  509. idx1weight = (idx1weight * listamount + 512) >> 10;
  510. idx2weight = (idx2weight * listamount + 512) >> 10;
  511. idx3weight = (idx3weight * listamount + 512) >> 10;
  512. if( mbx < width-1 && mby < height-1 )
  513. {
  514. MC_CLIP_ADD( ref_costs[idx0+0], idx0weight );
  515. MC_CLIP_ADD( ref_costs[idx0+1], idx1weight );
  516. MC_CLIP_ADD( ref_costs[idx2+0], idx2weight );
  517. MC_CLIP_ADD( ref_costs[idx2+1], idx3weight );
  518. }
  519. else
  520. {
  521. /* Note: this takes advantage of unsigned representation to
  522. * catch negative mbx/mby. */
  523. if( mby < height )
  524. {
  525. if( mbx < width )
  526. MC_CLIP_ADD( ref_costs[idx0+0], idx0weight );
  527. if( mbx+1 < width )
  528. MC_CLIP_ADD( ref_costs[idx0+1], idx1weight );
  529. }
  530. if( mby+1 < height )
  531. {
  532. if( mbx < width )
  533. MC_CLIP_ADD( ref_costs[idx2+0], idx2weight );
  534. if( mbx+1 < width )
  535. MC_CLIP_ADD( ref_costs[idx2+1], idx3weight );
  536. }
  537. }
  538. }
  539. }
  540. /* Conversion between float and Q8.8 fixed point (big-endian) for storage */
  541. static void mbtree_fix8_pack( uint16_t *dst, float *src, int count )
  542. {
  543. for( int i = 0; i < count; i++ )
  544. dst[i] = endian_fix16( (int16_t)(src[i] * 256.0f) );
  545. }
  546. static void mbtree_fix8_unpack( float *dst, uint16_t *src, int count )
  547. {
  548. for( int i = 0; i < count; i++ )
  549. dst[i] = (int16_t)endian_fix16( src[i] ) * (1.0f/256.0f);
  550. }
  551. void x264_mc_init( int cpu, x264_mc_functions_t *pf, int cpu_independent )
  552. {
  553. pf->mc_luma = mc_luma;
  554. pf->get_ref = get_ref;
  555. pf->mc_chroma = mc_chroma;
  556. pf->avg[PIXEL_16x16]= pixel_avg_16x16;
  557. pf->avg[PIXEL_16x8] = pixel_avg_16x8;
  558. pf->avg[PIXEL_8x16] = pixel_avg_8x16;
  559. pf->avg[PIXEL_8x8] = pixel_avg_8x8;
  560. pf->avg[PIXEL_8x4] = pixel_avg_8x4;
  561. pf->avg[PIXEL_4x16] = pixel_avg_4x16;
  562. pf->avg[PIXEL_4x8] = pixel_avg_4x8;
  563. pf->avg[PIXEL_4x4] = pixel_avg_4x4;
  564. pf->avg[PIXEL_4x2] = pixel_avg_4x2;
  565. pf->avg[PIXEL_2x8] = pixel_avg_2x8;
  566. pf->avg[PIXEL_2x4] = pixel_avg_2x4;
  567. pf->avg[PIXEL_2x2] = pixel_avg_2x2;
  568. pf->weight = mc_weight_wtab;
  569. pf->offsetadd = mc_weight_wtab;
  570. pf->offsetsub = mc_weight_wtab;
  571. pf->weight_cache = weight_cache;
  572. pf->copy_16x16_unaligned = mc_copy_w16;
  573. pf->copy[PIXEL_16x16] = mc_copy_w16;
  574. pf->copy[PIXEL_8x8] = mc_copy_w8;
  575. pf->copy[PIXEL_4x4] = mc_copy_w4;
  576. pf->store_interleave_chroma = store_interleave_chroma;
  577. pf->load_deinterleave_chroma_fenc = load_deinterleave_chroma_fenc;
  578. pf->load_deinterleave_chroma_fdec = load_deinterleave_chroma_fdec;
  579. pf->plane_copy = x264_plane_copy_c;
  580. pf->plane_copy_swap = x264_plane_copy_swap_c;
  581. pf->plane_copy_interleave = x264_plane_copy_interleave_c;
  582. pf->plane_copy_deinterleave = x264_plane_copy_deinterleave_c;
  583. pf->plane_copy_deinterleave_yuyv = x264_plane_copy_deinterleave_c;
  584. pf->plane_copy_deinterleave_rgb = plane_copy_deinterleave_rgb_c;
  585. pf->plane_copy_deinterleave_v210 = plane_copy_deinterleave_v210_c;
  586. pf->hpel_filter = hpel_filter;
  587. pf->prefetch_fenc_400 = prefetch_fenc_null;
  588. pf->prefetch_fenc_420 = prefetch_fenc_null;
  589. pf->prefetch_fenc_422 = prefetch_fenc_null;
  590. pf->prefetch_ref = prefetch_ref_null;
  591. pf->memcpy_aligned = memcpy;
  592. pf->memzero_aligned = memzero_aligned;
  593. pf->frame_init_lowres_core = frame_init_lowres_core;
  594. pf->integral_init4h = integral_init4h;
  595. pf->integral_init8h = integral_init8h;
  596. pf->integral_init4v = integral_init4v;
  597. pf->integral_init8v = integral_init8v;
  598. pf->mbtree_propagate_cost = mbtree_propagate_cost;
  599. pf->mbtree_propagate_list = mbtree_propagate_list;
  600. pf->mbtree_fix8_pack = mbtree_fix8_pack;
  601. pf->mbtree_fix8_unpack = mbtree_fix8_unpack;
  602. #if HAVE_MMX
  603. x264_mc_init_mmx( cpu, pf );
  604. #endif
  605. #if HAVE_ALTIVEC
  606. if( cpu&X264_CPU_ALTIVEC )
  607. x264_mc_init_altivec( pf );
  608. #endif
  609. #if HAVE_ARMV6
  610. x264_mc_init_arm( cpu, pf );
  611. #endif
  612. #if ARCH_AARCH64
  613. x264_mc_init_aarch64( cpu, pf );
  614. #endif
  615. #if HAVE_MSA
  616. if( cpu&X264_CPU_MSA )
  617. x264_mc_init_mips( cpu, pf );
  618. #endif
  619. if( cpu_independent )
  620. {
  621. pf->mbtree_propagate_cost = mbtree_propagate_cost;
  622. pf->mbtree_propagate_list = mbtree_propagate_list;
  623. }
  624. }
  625. void x264_frame_filter( x264_t *h, x264_frame_t *frame, int mb_y, int b_end )
  626. {
  627. const int b_interlaced = PARAM_INTERLACED;
  628. int start = mb_y*16 - 8; // buffer = 4 for deblock + 3 for 6tap, rounded to 8
  629. int height = (b_end ? frame->i_lines[0] + 16*PARAM_INTERLACED : (mb_y+b_interlaced)*16) + 8;
  630. if( mb_y & b_interlaced )
  631. return;
  632. for( int p = 0; p < (CHROMA444 ? 3 : 1); p++ )
  633. {
  634. int stride = frame->i_stride[p];
  635. const int width = frame->i_width[p];
  636. int offs = start*stride - 8; // buffer = 3 for 6tap, aligned to 8 for simd
  637. if( !b_interlaced || h->mb.b_adaptive_mbaff )
  638. h->mc.hpel_filter(
  639. frame->filtered[p][1] + offs,
  640. frame->filtered[p][2] + offs,
  641. frame->filtered[p][3] + offs,
  642. frame->plane[p] + offs,
  643. stride, width + 16, height - start,
  644. h->scratch_buffer );
  645. if( b_interlaced )
  646. {
  647. /* MC must happen between pixels in the same field. */
  648. stride = frame->i_stride[p] << 1;
  649. start = (mb_y*16 >> 1) - 8;
  650. int height_fld = ((b_end ? frame->i_lines[p] : mb_y*16) >> 1) + 8;
  651. offs = start*stride - 8;
  652. for( int i = 0; i < 2; i++, offs += frame->i_stride[p] )
  653. {
  654. h->mc.hpel_filter(
  655. frame->filtered_fld[p][1] + offs,
  656. frame->filtered_fld[p][2] + offs,
  657. frame->filtered_fld[p][3] + offs,
  658. frame->plane_fld[p] + offs,
  659. stride, width + 16, height_fld - start,
  660. h->scratch_buffer );
  661. }
  662. }
  663. }
  664. /* generate integral image:
  665. * frame->integral contains 2 planes. in the upper plane, each element is
  666. * the sum of an 8x8 pixel region with top-left corner on that point.
  667. * in the lower plane, 4x4 sums (needed only with --partitions p4x4). */
  668. if( frame->integral )
  669. {
  670. int stride = frame->i_stride[0];
  671. if( start < 0 )
  672. {
  673. memset( frame->integral - PADV * stride - PADH, 0, stride * sizeof(uint16_t) );
  674. start = -PADV;
  675. }
  676. if( b_end )
  677. height += PADV-9;
  678. for( int y = start; y < height; y++ )
  679. {
  680. pixel *pix = frame->plane[0] + y * stride - PADH;
  681. uint16_t *sum8 = frame->integral + (y+1) * stride - PADH;
  682. uint16_t *sum4;
  683. if( h->frames.b_have_sub8x8_esa )
  684. {
  685. h->mc.integral_init4h( sum8, pix, stride );
  686. sum8 -= 8*stride;
  687. sum4 = sum8 + stride * (frame->i_lines[0] + PADV*2);
  688. if( y >= 8-PADV )
  689. h->mc.integral_init4v( sum8, sum4, stride );
  690. }
  691. else
  692. {
  693. h->mc.integral_init8h( sum8, pix, stride );
  694. if( y >= 8-PADV )
  695. h->mc.integral_init8v( sum8-8*stride, stride );
  696. }
  697. }
  698. }
  699. }