frame.c 32 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891
  1. /*****************************************************************************
  2. * frame.c: frame handling
  3. *****************************************************************************
  4. * Copyright (C) 2003-2018 x264 project
  5. *
  6. * Authors: Laurent Aimar <fenrir@via.ecp.fr>
  7. * Loren Merritt <lorenm@u.washington.edu>
  8. * Fiona Glaser <fiona@x264.com>
  9. *
  10. * This program is free software; you can redistribute it and/or modify
  11. * it under the terms of the GNU General Public License as published by
  12. * the Free Software Foundation; either version 2 of the License, or
  13. * (at your option) any later version.
  14. *
  15. * This program is distributed in the hope that it will be useful,
  16. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  17. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  18. * GNU General Public License for more details.
  19. *
  20. * You should have received a copy of the GNU General Public License
  21. * along with this program; if not, write to the Free Software
  22. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111, USA.
  23. *
  24. * This program is also available under a commercial proprietary license.
  25. * For more information, contact us at licensing@x264.com.
  26. *****************************************************************************/
  27. #include "common.h"
  28. static int align_stride( int x, int align, int disalign )
  29. {
  30. x = ALIGN( x, align );
  31. if( !(x&(disalign-1)) )
  32. x += align;
  33. return x;
  34. }
  35. static int align_plane_size( int x, int disalign )
  36. {
  37. if( !(x&(disalign-1)) )
  38. x += 128;
  39. return x;
  40. }
  41. static int frame_internal_csp( int external_csp )
  42. {
  43. int csp = external_csp & X264_CSP_MASK;
  44. if( csp == X264_CSP_I400 )
  45. return X264_CSP_I400;
  46. if( csp >= X264_CSP_I420 && csp < X264_CSP_I422 )
  47. return X264_CSP_NV12;
  48. if( csp >= X264_CSP_I422 && csp < X264_CSP_I444 )
  49. return X264_CSP_NV16;
  50. if( csp >= X264_CSP_I444 && csp <= X264_CSP_RGB )
  51. return X264_CSP_I444;
  52. return X264_CSP_NONE;
  53. }
  54. static x264_frame_t *frame_new( x264_t *h, int b_fdec )
  55. {
  56. x264_frame_t *frame;
  57. int i_csp = frame_internal_csp( h->param.i_csp );
  58. int i_mb_count = h->mb.i_mb_count;
  59. int i_stride, i_width, i_lines, luma_plane_count;
  60. int i_padv = PADV << PARAM_INTERLACED;
  61. int align = 16;
  62. #if ARCH_X86 || ARCH_X86_64
  63. if( h->param.cpu&X264_CPU_CACHELINE_64 || h->param.cpu&X264_CPU_AVX512 )
  64. align = 64;
  65. else if( h->param.cpu&X264_CPU_CACHELINE_32 || h->param.cpu&X264_CPU_AVX )
  66. align = 32;
  67. #endif
  68. #if ARCH_PPC
  69. int disalign = 1<<9;
  70. #else
  71. int disalign = 1<<10;
  72. #endif
  73. /* ensure frame alignment after PADH is added */
  74. int padh_align = X264_MAX( align - PADH * sizeof(pixel), 0 ) / sizeof(pixel);
  75. CHECKED_MALLOCZERO( frame, sizeof(x264_frame_t) );
  76. PREALLOC_INIT
  77. /* allocate frame data (+64 for extra data for me) */
  78. i_width = h->mb.i_mb_width*16;
  79. i_lines = h->mb.i_mb_height*16;
  80. i_stride = align_stride( i_width + 2*PADH, align, disalign );
  81. if( i_csp == X264_CSP_NV12 || i_csp == X264_CSP_NV16 )
  82. {
  83. luma_plane_count = 1;
  84. frame->i_plane = 2;
  85. for( int i = 0; i < 2; i++ )
  86. {
  87. frame->i_width[i] = i_width >> i;
  88. frame->i_lines[i] = i_lines >> (i && i_csp == X264_CSP_NV12);
  89. frame->i_stride[i] = i_stride;
  90. }
  91. }
  92. else if( i_csp == X264_CSP_I444 )
  93. {
  94. luma_plane_count = 3;
  95. frame->i_plane = 3;
  96. for( int i = 0; i < 3; i++ )
  97. {
  98. frame->i_width[i] = i_width;
  99. frame->i_lines[i] = i_lines;
  100. frame->i_stride[i] = i_stride;
  101. }
  102. }
  103. else if( i_csp == X264_CSP_I400 )
  104. {
  105. luma_plane_count = 1;
  106. frame->i_plane = 1;
  107. frame->i_width[0] = i_width;
  108. frame->i_lines[0] = i_lines;
  109. frame->i_stride[0] = i_stride;
  110. }
  111. else
  112. goto fail;
  113. frame->i_csp = i_csp;
  114. frame->i_width_lowres = frame->i_width[0]/2;
  115. frame->i_lines_lowres = frame->i_lines[0]/2;
  116. frame->i_stride_lowres = align_stride( frame->i_width_lowres + 2*PADH, align, disalign<<1 );
  117. for( int i = 0; i < h->param.i_bframe + 2; i++ )
  118. for( int j = 0; j < h->param.i_bframe + 2; j++ )
  119. PREALLOC( frame->i_row_satds[i][j], i_lines/16 * sizeof(int) );
  120. frame->i_poc = -1;
  121. frame->i_type = X264_TYPE_AUTO;
  122. frame->i_qpplus1 = X264_QP_AUTO;
  123. frame->i_pts = -1;
  124. frame->i_frame = -1;
  125. frame->i_frame_num = -1;
  126. frame->i_lines_completed = -1;
  127. frame->b_fdec = b_fdec;
  128. frame->i_pic_struct = PIC_STRUCT_AUTO;
  129. frame->i_field_cnt = -1;
  130. frame->i_duration =
  131. frame->i_cpb_duration =
  132. frame->i_dpb_output_delay =
  133. frame->i_cpb_delay = 0;
  134. frame->i_coded_fields_lookahead =
  135. frame->i_cpb_delay_lookahead = -1;
  136. frame->orig = frame;
  137. if( i_csp == X264_CSP_NV12 || i_csp == X264_CSP_NV16 )
  138. {
  139. int chroma_padv = i_padv >> (i_csp == X264_CSP_NV12);
  140. int chroma_plane_size = (frame->i_stride[1] * (frame->i_lines[1] + 2*chroma_padv));
  141. PREALLOC( frame->buffer[1], (chroma_plane_size + padh_align) * sizeof(pixel) );
  142. if( PARAM_INTERLACED )
  143. PREALLOC( frame->buffer_fld[1], (chroma_plane_size + padh_align) * sizeof(pixel) );
  144. }
  145. /* all 4 luma planes allocated together, since the cacheline split code
  146. * requires them to be in-phase wrt cacheline alignment. */
  147. for( int p = 0; p < luma_plane_count; p++ )
  148. {
  149. int luma_plane_size = align_plane_size( frame->i_stride[p] * (frame->i_lines[p] + 2*i_padv), disalign );
  150. if( h->param.analyse.i_subpel_refine && b_fdec )
  151. luma_plane_size *= 4;
  152. /* FIXME: Don't allocate both buffers in non-adaptive MBAFF. */
  153. PREALLOC( frame->buffer[p], (luma_plane_size + padh_align) * sizeof(pixel) );
  154. if( PARAM_INTERLACED )
  155. PREALLOC( frame->buffer_fld[p], (luma_plane_size + padh_align) * sizeof(pixel) );
  156. }
  157. frame->b_duplicate = 0;
  158. if( b_fdec ) /* fdec frame */
  159. {
  160. PREALLOC( frame->mb_type, i_mb_count * sizeof(int8_t) );
  161. PREALLOC( frame->mb_partition, i_mb_count * sizeof(uint8_t) );
  162. PREALLOC( frame->mv[0], 2*16 * i_mb_count * sizeof(int16_t) );
  163. PREALLOC( frame->mv16x16, 2*(i_mb_count+1) * sizeof(int16_t) );
  164. PREALLOC( frame->ref[0], 4 * i_mb_count * sizeof(int8_t) );
  165. if( h->param.i_bframe )
  166. {
  167. PREALLOC( frame->mv[1], 2*16 * i_mb_count * sizeof(int16_t) );
  168. PREALLOC( frame->ref[1], 4 * i_mb_count * sizeof(int8_t) );
  169. }
  170. else
  171. {
  172. frame->mv[1] = NULL;
  173. frame->ref[1] = NULL;
  174. }
  175. PREALLOC( frame->i_row_bits, i_lines/16 * sizeof(int) );
  176. PREALLOC( frame->f_row_qp, i_lines/16 * sizeof(float) );
  177. PREALLOC( frame->f_row_qscale, i_lines/16 * sizeof(float) );
  178. if( h->param.analyse.i_me_method >= X264_ME_ESA )
  179. PREALLOC( frame->buffer[3], frame->i_stride[0] * (frame->i_lines[0] + 2*i_padv) * sizeof(uint16_t) << h->frames.b_have_sub8x8_esa );
  180. if( PARAM_INTERLACED )
  181. PREALLOC( frame->field, i_mb_count * sizeof(uint8_t) );
  182. if( h->param.analyse.b_mb_info )
  183. PREALLOC( frame->effective_qp, i_mb_count * sizeof(uint8_t) );
  184. }
  185. else /* fenc frame */
  186. {
  187. if( h->frames.b_have_lowres )
  188. {
  189. int luma_plane_size = align_plane_size( frame->i_stride_lowres * (frame->i_lines[0]/2 + 2*PADV), disalign );
  190. PREALLOC( frame->buffer_lowres, (4 * luma_plane_size + padh_align) * sizeof(pixel) );
  191. for( int j = 0; j <= !!h->param.i_bframe; j++ )
  192. for( int i = 0; i <= h->param.i_bframe; i++ )
  193. {
  194. PREALLOC( frame->lowres_mvs[j][i], 2*h->mb.i_mb_count*sizeof(int16_t) );
  195. PREALLOC( frame->lowres_mv_costs[j][i], h->mb.i_mb_count*sizeof(int) );
  196. }
  197. PREALLOC( frame->i_propagate_cost, i_mb_count * sizeof(uint16_t) );
  198. for( int j = 0; j <= h->param.i_bframe+1; j++ )
  199. for( int i = 0; i <= h->param.i_bframe+1; i++ )
  200. PREALLOC( frame->lowres_costs[j][i], i_mb_count * sizeof(uint16_t) );
  201. /* mbtree asm can overread the input buffers, make sure we don't read outside of allocated memory. */
  202. prealloc_size += NATIVE_ALIGN;
  203. }
  204. if( h->param.rc.i_aq_mode )
  205. {
  206. PREALLOC( frame->f_qp_offset, h->mb.i_mb_count * sizeof(float) );
  207. PREALLOC( frame->f_qp_offset_aq, h->mb.i_mb_count * sizeof(float) );
  208. if( h->frames.b_have_lowres )
  209. PREALLOC( frame->i_inv_qscale_factor, (h->mb.i_mb_count+3) * sizeof(uint16_t) );
  210. }
  211. }
  212. PREALLOC_END( frame->base );
  213. if( i_csp == X264_CSP_NV12 || i_csp == X264_CSP_NV16 )
  214. {
  215. int chroma_padv = i_padv >> (i_csp == X264_CSP_NV12);
  216. frame->plane[1] = frame->buffer[1] + frame->i_stride[1] * chroma_padv + PADH + padh_align;
  217. if( PARAM_INTERLACED )
  218. frame->plane_fld[1] = frame->buffer_fld[1] + frame->i_stride[1] * chroma_padv + PADH + padh_align;
  219. }
  220. for( int p = 0; p < luma_plane_count; p++ )
  221. {
  222. int luma_plane_size = align_plane_size( frame->i_stride[p] * (frame->i_lines[p] + 2*i_padv), disalign );
  223. if( h->param.analyse.i_subpel_refine && b_fdec )
  224. {
  225. for( int i = 0; i < 4; i++ )
  226. {
  227. frame->filtered[p][i] = frame->buffer[p] + i*luma_plane_size + frame->i_stride[p] * i_padv + PADH + padh_align;
  228. frame->filtered_fld[p][i] = frame->buffer_fld[p] + i*luma_plane_size + frame->i_stride[p] * i_padv + PADH + padh_align;
  229. }
  230. frame->plane[p] = frame->filtered[p][0];
  231. frame->plane_fld[p] = frame->filtered_fld[p][0];
  232. }
  233. else
  234. {
  235. frame->filtered[p][0] = frame->plane[p] = frame->buffer[p] + frame->i_stride[p] * i_padv + PADH + padh_align;
  236. frame->filtered_fld[p][0] = frame->plane_fld[p] = frame->buffer_fld[p] + frame->i_stride[p] * i_padv + PADH + padh_align;
  237. }
  238. }
  239. if( b_fdec )
  240. {
  241. M32( frame->mv16x16[0] ) = 0;
  242. frame->mv16x16++;
  243. if( h->param.analyse.i_me_method >= X264_ME_ESA )
  244. frame->integral = (uint16_t*)frame->buffer[3] + frame->i_stride[0] * i_padv + PADH;
  245. }
  246. else
  247. {
  248. if( h->frames.b_have_lowres )
  249. {
  250. int luma_plane_size = align_plane_size( frame->i_stride_lowres * (frame->i_lines[0]/2 + 2*PADV), disalign );
  251. for( int i = 0; i < 4; i++ )
  252. frame->lowres[i] = frame->buffer_lowres + frame->i_stride_lowres * PADV + PADH + padh_align + i * luma_plane_size;
  253. for( int j = 0; j <= !!h->param.i_bframe; j++ )
  254. for( int i = 0; i <= h->param.i_bframe; i++ )
  255. memset( frame->lowres_mvs[j][i], 0, 2*h->mb.i_mb_count*sizeof(int16_t) );
  256. frame->i_intra_cost = frame->lowres_costs[0][0];
  257. memset( frame->i_intra_cost, -1, (i_mb_count+3) * sizeof(uint16_t) );
  258. if( h->param.rc.i_aq_mode )
  259. /* shouldn't really be initialized, just silences a valgrind false-positive in x264_mbtree_propagate_cost_sse2 */
  260. memset( frame->i_inv_qscale_factor, 0, (h->mb.i_mb_count+3) * sizeof(uint16_t) );
  261. }
  262. }
  263. if( x264_pthread_mutex_init( &frame->mutex, NULL ) )
  264. goto fail;
  265. if( x264_pthread_cond_init( &frame->cv, NULL ) )
  266. goto fail;
  267. #if HAVE_OPENCL
  268. frame->opencl.ocl = h->opencl.ocl;
  269. #endif
  270. return frame;
  271. fail:
  272. x264_free( frame );
  273. return NULL;
  274. }
  275. void x264_frame_delete( x264_frame_t *frame )
  276. {
  277. /* Duplicate frames are blank copies of real frames (including pointers),
  278. * so freeing those pointers would cause a double free later. */
  279. if( !frame->b_duplicate )
  280. {
  281. x264_free( frame->base );
  282. if( frame->param && frame->param->param_free )
  283. frame->param->param_free( frame->param );
  284. if( frame->mb_info_free )
  285. frame->mb_info_free( frame->mb_info );
  286. if( frame->extra_sei.sei_free )
  287. {
  288. for( int i = 0; i < frame->extra_sei.num_payloads; i++ )
  289. frame->extra_sei.sei_free( frame->extra_sei.payloads[i].payload );
  290. frame->extra_sei.sei_free( frame->extra_sei.payloads );
  291. }
  292. x264_pthread_mutex_destroy( &frame->mutex );
  293. x264_pthread_cond_destroy( &frame->cv );
  294. #if HAVE_OPENCL
  295. x264_opencl_frame_delete( frame );
  296. #endif
  297. }
  298. x264_free( frame );
  299. }
  300. static int get_plane_ptr( x264_t *h, x264_picture_t *src, uint8_t **pix, int *stride, int plane, int xshift, int yshift )
  301. {
  302. int width = h->param.i_width >> xshift;
  303. int height = h->param.i_height >> yshift;
  304. *pix = src->img.plane[plane];
  305. *stride = src->img.i_stride[plane];
  306. if( src->img.i_csp & X264_CSP_VFLIP )
  307. {
  308. *pix += (height-1) * *stride;
  309. *stride = -*stride;
  310. }
  311. if( width > abs(*stride) )
  312. {
  313. x264_log( h, X264_LOG_ERROR, "Input picture width (%d) is greater than stride (%d)\n", width, *stride );
  314. return -1;
  315. }
  316. return 0;
  317. }
  318. #define get_plane_ptr(...) do { if( get_plane_ptr(__VA_ARGS__) < 0 ) return -1; } while( 0 )
  319. int x264_frame_copy_picture( x264_t *h, x264_frame_t *dst, x264_picture_t *src )
  320. {
  321. int i_csp = src->img.i_csp & X264_CSP_MASK;
  322. if( dst->i_csp != frame_internal_csp( i_csp ) )
  323. {
  324. x264_log( h, X264_LOG_ERROR, "Invalid input colorspace\n" );
  325. return -1;
  326. }
  327. #if HIGH_BIT_DEPTH
  328. if( !(src->img.i_csp & X264_CSP_HIGH_DEPTH) )
  329. {
  330. x264_log( h, X264_LOG_ERROR, "This build of x264 requires high depth input. Rebuild to support 8-bit input.\n" );
  331. return -1;
  332. }
  333. #else
  334. if( src->img.i_csp & X264_CSP_HIGH_DEPTH )
  335. {
  336. x264_log( h, X264_LOG_ERROR, "This build of x264 requires 8-bit input. Rebuild to support high depth input.\n" );
  337. return -1;
  338. }
  339. #endif
  340. if( BIT_DEPTH != 10 && i_csp == X264_CSP_V210 )
  341. {
  342. x264_log( h, X264_LOG_ERROR, "v210 input is only compatible with bit-depth of 10 bits\n" );
  343. return -1;
  344. }
  345. if( src->i_type < X264_TYPE_AUTO || src->i_type > X264_TYPE_KEYFRAME )
  346. {
  347. x264_log( h, X264_LOG_WARNING, "forced frame type (%d) at %d is unknown\n", src->i_type, h->frames.i_input );
  348. dst->i_forced_type = X264_TYPE_AUTO;
  349. }
  350. else
  351. dst->i_forced_type = src->i_type;
  352. dst->i_type = dst->i_forced_type;
  353. dst->i_qpplus1 = src->i_qpplus1;
  354. dst->i_pts = dst->i_reordered_pts = src->i_pts;
  355. dst->param = src->param;
  356. dst->i_pic_struct = src->i_pic_struct;
  357. dst->extra_sei = src->extra_sei;
  358. dst->opaque = src->opaque;
  359. dst->mb_info = h->param.analyse.b_mb_info ? src->prop.mb_info : NULL;
  360. dst->mb_info_free = h->param.analyse.b_mb_info ? src->prop.mb_info_free : NULL;
  361. uint8_t *pix[3];
  362. int stride[3];
  363. if( i_csp == X264_CSP_YUYV || i_csp == X264_CSP_UYVY )
  364. {
  365. int p = i_csp == X264_CSP_UYVY;
  366. h->mc.plane_copy_deinterleave_yuyv( dst->plane[p], dst->i_stride[p], dst->plane[p^1], dst->i_stride[p^1],
  367. (pixel*)src->img.plane[0], src->img.i_stride[0], h->param.i_width, h->param.i_height );
  368. }
  369. else if( i_csp == X264_CSP_V210 )
  370. {
  371. stride[0] = src->img.i_stride[0];
  372. pix[0] = src->img.plane[0];
  373. h->mc.plane_copy_deinterleave_v210( dst->plane[0], dst->i_stride[0],
  374. dst->plane[1], dst->i_stride[1],
  375. (uint32_t *)pix[0], stride[0]/sizeof(uint32_t), h->param.i_width, h->param.i_height );
  376. }
  377. else if( i_csp >= X264_CSP_BGR )
  378. {
  379. stride[0] = src->img.i_stride[0];
  380. pix[0] = src->img.plane[0];
  381. if( src->img.i_csp & X264_CSP_VFLIP )
  382. {
  383. pix[0] += (h->param.i_height-1) * stride[0];
  384. stride[0] = -stride[0];
  385. }
  386. int b = i_csp==X264_CSP_RGB;
  387. h->mc.plane_copy_deinterleave_rgb( dst->plane[1+b], dst->i_stride[1+b],
  388. dst->plane[0], dst->i_stride[0],
  389. dst->plane[2-b], dst->i_stride[2-b],
  390. (pixel*)pix[0], stride[0]/sizeof(pixel), i_csp==X264_CSP_BGRA ? 4 : 3, h->param.i_width, h->param.i_height );
  391. }
  392. else
  393. {
  394. int v_shift = CHROMA_V_SHIFT;
  395. get_plane_ptr( h, src, &pix[0], &stride[0], 0, 0, 0 );
  396. h->mc.plane_copy( dst->plane[0], dst->i_stride[0], (pixel*)pix[0],
  397. stride[0]/sizeof(pixel), h->param.i_width, h->param.i_height );
  398. if( i_csp == X264_CSP_NV12 || i_csp == X264_CSP_NV16 )
  399. {
  400. get_plane_ptr( h, src, &pix[1], &stride[1], 1, 0, v_shift );
  401. h->mc.plane_copy( dst->plane[1], dst->i_stride[1], (pixel*)pix[1],
  402. stride[1]/sizeof(pixel), h->param.i_width, h->param.i_height>>v_shift );
  403. }
  404. else if( i_csp == X264_CSP_NV21 )
  405. {
  406. get_plane_ptr( h, src, &pix[1], &stride[1], 1, 0, v_shift );
  407. h->mc.plane_copy_swap( dst->plane[1], dst->i_stride[1], (pixel*)pix[1],
  408. stride[1]/sizeof(pixel), h->param.i_width>>1, h->param.i_height>>v_shift );
  409. }
  410. else if( i_csp == X264_CSP_I420 || i_csp == X264_CSP_I422 || i_csp == X264_CSP_YV12 || i_csp == X264_CSP_YV16 )
  411. {
  412. int uv_swap = i_csp == X264_CSP_YV12 || i_csp == X264_CSP_YV16;
  413. get_plane_ptr( h, src, &pix[1], &stride[1], uv_swap ? 2 : 1, 1, v_shift );
  414. get_plane_ptr( h, src, &pix[2], &stride[2], uv_swap ? 1 : 2, 1, v_shift );
  415. h->mc.plane_copy_interleave( dst->plane[1], dst->i_stride[1],
  416. (pixel*)pix[1], stride[1]/sizeof(pixel),
  417. (pixel*)pix[2], stride[2]/sizeof(pixel),
  418. h->param.i_width>>1, h->param.i_height>>v_shift );
  419. }
  420. else if( i_csp == X264_CSP_I444 || i_csp == X264_CSP_YV24 )
  421. {
  422. get_plane_ptr( h, src, &pix[1], &stride[1], i_csp==X264_CSP_I444 ? 1 : 2, 0, 0 );
  423. get_plane_ptr( h, src, &pix[2], &stride[2], i_csp==X264_CSP_I444 ? 2 : 1, 0, 0 );
  424. h->mc.plane_copy( dst->plane[1], dst->i_stride[1], (pixel*)pix[1],
  425. stride[1]/sizeof(pixel), h->param.i_width, h->param.i_height );
  426. h->mc.plane_copy( dst->plane[2], dst->i_stride[2], (pixel*)pix[2],
  427. stride[2]/sizeof(pixel), h->param.i_width, h->param.i_height );
  428. }
  429. }
  430. return 0;
  431. }
  432. static ALWAYS_INLINE void pixel_memset( pixel *dst, pixel *src, int len, int size )
  433. {
  434. uint8_t *dstp = (uint8_t*)dst;
  435. uint32_t v1 = *src;
  436. uint32_t v2 = size == 1 ? v1 + (v1 << 8) : M16( src );
  437. uint32_t v4 = size <= 2 ? v2 + (v2 << 16) : M32( src );
  438. int i = 0;
  439. len *= size;
  440. /* Align the input pointer if it isn't already */
  441. if( (intptr_t)dstp & (WORD_SIZE - 1) )
  442. {
  443. if( size <= 2 && ((intptr_t)dstp & 3) )
  444. {
  445. if( size == 1 && ((intptr_t)dstp & 1) )
  446. dstp[i++] = v1;
  447. if( (intptr_t)dstp & 2 )
  448. {
  449. M16( dstp+i ) = v2;
  450. i += 2;
  451. }
  452. }
  453. if( WORD_SIZE == 8 && (intptr_t)dstp & 4 )
  454. {
  455. M32( dstp+i ) = v4;
  456. i += 4;
  457. }
  458. }
  459. /* Main copy loop */
  460. if( WORD_SIZE == 8 )
  461. {
  462. uint64_t v8 = v4 + ((uint64_t)v4<<32);
  463. for( ; i < len - 7; i+=8 )
  464. M64( dstp+i ) = v8;
  465. }
  466. for( ; i < len - 3; i+=4 )
  467. M32( dstp+i ) = v4;
  468. /* Finish up the last few bytes */
  469. if( size <= 2 )
  470. {
  471. if( i < len - 1 )
  472. {
  473. M16( dstp+i ) = v2;
  474. i += 2;
  475. }
  476. if( size == 1 && i != len )
  477. dstp[i] = v1;
  478. }
  479. }
  480. static ALWAYS_INLINE void plane_expand_border( pixel *pix, int i_stride, int i_width, int i_height, int i_padh, int i_padv, int b_pad_top, int b_pad_bottom, int b_chroma )
  481. {
  482. #define PPIXEL(x, y) ( pix + (x) + (y)*i_stride )
  483. for( int y = 0; y < i_height; y++ )
  484. {
  485. /* left band */
  486. pixel_memset( PPIXEL(-i_padh, y), PPIXEL(0, y), i_padh>>b_chroma, sizeof(pixel)<<b_chroma );
  487. /* right band */
  488. pixel_memset( PPIXEL(i_width, y), PPIXEL(i_width-1-b_chroma, y), i_padh>>b_chroma, sizeof(pixel)<<b_chroma );
  489. }
  490. /* upper band */
  491. if( b_pad_top )
  492. for( int y = 0; y < i_padv; y++ )
  493. memcpy( PPIXEL(-i_padh, -y-1), PPIXEL(-i_padh, 0), (i_width+2*i_padh) * sizeof(pixel) );
  494. /* lower band */
  495. if( b_pad_bottom )
  496. for( int y = 0; y < i_padv; y++ )
  497. memcpy( PPIXEL(-i_padh, i_height+y), PPIXEL(-i_padh, i_height-1), (i_width+2*i_padh) * sizeof(pixel) );
  498. #undef PPIXEL
  499. }
  500. void x264_frame_expand_border( x264_t *h, x264_frame_t *frame, int mb_y )
  501. {
  502. int pad_top = mb_y == 0;
  503. int pad_bot = mb_y == h->mb.i_mb_height - (1 << SLICE_MBAFF);
  504. int b_start = mb_y == h->i_threadslice_start;
  505. int b_end = mb_y == h->i_threadslice_end - (1 << SLICE_MBAFF);
  506. if( mb_y & SLICE_MBAFF )
  507. return;
  508. for( int i = 0; i < frame->i_plane; i++ )
  509. {
  510. int h_shift = i && CHROMA_H_SHIFT;
  511. int v_shift = i && CHROMA_V_SHIFT;
  512. int stride = frame->i_stride[i];
  513. int width = 16*h->mb.i_mb_width;
  514. int height = (pad_bot ? 16*(h->mb.i_mb_height - mb_y) >> SLICE_MBAFF : 16) >> v_shift;
  515. int padh = PADH;
  516. int padv = PADV >> v_shift;
  517. // buffer: 2 chroma, 3 luma (rounded to 4) because deblocking goes beyond the top of the mb
  518. if( b_end && !b_start )
  519. height += 4 >> (v_shift + SLICE_MBAFF);
  520. pixel *pix;
  521. int starty = 16*mb_y - 4*!b_start;
  522. if( SLICE_MBAFF )
  523. {
  524. // border samples for each field are extended separately
  525. pix = frame->plane_fld[i] + (starty*stride >> v_shift);
  526. plane_expand_border( pix, stride*2, width, height, padh, padv, pad_top, pad_bot, h_shift );
  527. plane_expand_border( pix+stride, stride*2, width, height, padh, padv, pad_top, pad_bot, h_shift );
  528. height = (pad_bot ? 16*(h->mb.i_mb_height - mb_y) : 32) >> v_shift;
  529. if( b_end && !b_start )
  530. height += 4 >> v_shift;
  531. pix = frame->plane[i] + (starty*stride >> v_shift);
  532. plane_expand_border( pix, stride, width, height, padh, padv, pad_top, pad_bot, h_shift );
  533. }
  534. else
  535. {
  536. pix = frame->plane[i] + (starty*stride >> v_shift);
  537. plane_expand_border( pix, stride, width, height, padh, padv, pad_top, pad_bot, h_shift );
  538. }
  539. }
  540. }
  541. void x264_frame_expand_border_filtered( x264_t *h, x264_frame_t *frame, int mb_y, int b_end )
  542. {
  543. /* during filtering, 8 extra pixels were filtered on each edge,
  544. * but up to 3 of the horizontal ones may be wrong.
  545. we want to expand border from the last filtered pixel */
  546. int b_start = !mb_y;
  547. int width = 16*h->mb.i_mb_width + 8;
  548. int height = b_end ? (16*(h->mb.i_mb_height - mb_y) >> SLICE_MBAFF) + 16 : 16;
  549. int padh = PADH - 4;
  550. int padv = PADV - 8;
  551. for( int p = 0; p < (CHROMA444 ? 3 : 1); p++ )
  552. for( int i = 1; i < 4; i++ )
  553. {
  554. int stride = frame->i_stride[p];
  555. // buffer: 8 luma, to match the hpel filter
  556. pixel *pix;
  557. if( SLICE_MBAFF )
  558. {
  559. pix = frame->filtered_fld[p][i] + (16*mb_y - 16) * stride - 4;
  560. plane_expand_border( pix, stride*2, width, height, padh, padv, b_start, b_end, 0 );
  561. plane_expand_border( pix+stride, stride*2, width, height, padh, padv, b_start, b_end, 0 );
  562. }
  563. pix = frame->filtered[p][i] + (16*mb_y - 8) * stride - 4;
  564. plane_expand_border( pix, stride, width, height << SLICE_MBAFF, padh, padv, b_start, b_end, 0 );
  565. }
  566. }
  567. void x264_frame_expand_border_lowres( x264_frame_t *frame )
  568. {
  569. for( int i = 0; i < 4; i++ )
  570. plane_expand_border( frame->lowres[i], frame->i_stride_lowres, frame->i_width_lowres, frame->i_lines_lowres, PADH, PADV, 1, 1, 0 );
  571. }
  572. void x264_frame_expand_border_chroma( x264_t *h, x264_frame_t *frame, int plane )
  573. {
  574. int v_shift = CHROMA_V_SHIFT;
  575. plane_expand_border( frame->plane[plane], frame->i_stride[plane], 16*h->mb.i_mb_width, 16*h->mb.i_mb_height>>v_shift,
  576. PADH, PADV>>v_shift, 1, 1, CHROMA_H_SHIFT );
  577. }
  578. void x264_frame_expand_border_mod16( x264_t *h, x264_frame_t *frame )
  579. {
  580. for( int i = 0; i < frame->i_plane; i++ )
  581. {
  582. int i_width = h->param.i_width;
  583. int h_shift = i && CHROMA_H_SHIFT;
  584. int v_shift = i && CHROMA_V_SHIFT;
  585. int i_height = h->param.i_height >> v_shift;
  586. int i_padx = (h->mb.i_mb_width * 16 - h->param.i_width);
  587. int i_pady = (h->mb.i_mb_height * 16 - h->param.i_height) >> v_shift;
  588. if( i_padx )
  589. {
  590. for( int y = 0; y < i_height; y++ )
  591. pixel_memset( &frame->plane[i][y*frame->i_stride[i] + i_width],
  592. &frame->plane[i][y*frame->i_stride[i] + i_width - 1-h_shift],
  593. i_padx>>h_shift, sizeof(pixel)<<h_shift );
  594. }
  595. if( i_pady )
  596. {
  597. for( int y = i_height; y < i_height + i_pady; y++ )
  598. memcpy( &frame->plane[i][y*frame->i_stride[i]],
  599. &frame->plane[i][(i_height-(~y&PARAM_INTERLACED)-1)*frame->i_stride[i]],
  600. (i_width + i_padx) * sizeof(pixel) );
  601. }
  602. }
  603. }
  604. void x264_expand_border_mbpair( x264_t *h, int mb_x, int mb_y )
  605. {
  606. for( int i = 0; i < h->fenc->i_plane; i++ )
  607. {
  608. int v_shift = i && CHROMA_V_SHIFT;
  609. int stride = h->fenc->i_stride[i];
  610. int height = h->param.i_height >> v_shift;
  611. int pady = (h->mb.i_mb_height * 16 - h->param.i_height) >> v_shift;
  612. pixel *fenc = h->fenc->plane[i] + 16*mb_x;
  613. for( int y = height; y < height + pady; y++ )
  614. memcpy( fenc + y*stride, fenc + (height-1)*stride, 16*sizeof(pixel) );
  615. }
  616. }
  617. /* threading */
  618. void x264_frame_cond_broadcast( x264_frame_t *frame, int i_lines_completed )
  619. {
  620. x264_pthread_mutex_lock( &frame->mutex );
  621. frame->i_lines_completed = i_lines_completed;
  622. x264_pthread_cond_broadcast( &frame->cv );
  623. x264_pthread_mutex_unlock( &frame->mutex );
  624. }
  625. void x264_frame_cond_wait( x264_frame_t *frame, int i_lines_completed )
  626. {
  627. x264_pthread_mutex_lock( &frame->mutex );
  628. while( frame->i_lines_completed < i_lines_completed )
  629. x264_pthread_cond_wait( &frame->cv, &frame->mutex );
  630. x264_pthread_mutex_unlock( &frame->mutex );
  631. }
  632. void x264_threadslice_cond_broadcast( x264_t *h, int pass )
  633. {
  634. x264_pthread_mutex_lock( &h->mutex );
  635. h->i_threadslice_pass = pass;
  636. if( pass > 0 )
  637. x264_pthread_cond_broadcast( &h->cv );
  638. x264_pthread_mutex_unlock( &h->mutex );
  639. }
  640. void x264_threadslice_cond_wait( x264_t *h, int pass )
  641. {
  642. x264_pthread_mutex_lock( &h->mutex );
  643. while( h->i_threadslice_pass < pass )
  644. x264_pthread_cond_wait( &h->cv, &h->mutex );
  645. x264_pthread_mutex_unlock( &h->mutex );
  646. }
  647. int x264_frame_new_slice( x264_t *h, x264_frame_t *frame )
  648. {
  649. if( h->param.i_slice_count_max )
  650. {
  651. int slice_count;
  652. if( h->param.b_sliced_threads )
  653. slice_count = x264_pthread_fetch_and_add( &frame->i_slice_count, 1, &frame->mutex );
  654. else
  655. slice_count = frame->i_slice_count++;
  656. if( slice_count >= h->param.i_slice_count_max )
  657. return -1;
  658. }
  659. return 0;
  660. }
  661. /* list operators */
  662. void x264_frame_push( x264_frame_t **list, x264_frame_t *frame )
  663. {
  664. int i = 0;
  665. while( list[i] ) i++;
  666. list[i] = frame;
  667. }
  668. x264_frame_t *x264_frame_pop( x264_frame_t **list )
  669. {
  670. x264_frame_t *frame;
  671. int i = 0;
  672. assert( list[0] );
  673. while( list[i+1] ) i++;
  674. frame = list[i];
  675. list[i] = NULL;
  676. return frame;
  677. }
  678. void x264_frame_unshift( x264_frame_t **list, x264_frame_t *frame )
  679. {
  680. int i = 0;
  681. while( list[i] ) i++;
  682. while( i-- )
  683. list[i+1] = list[i];
  684. list[0] = frame;
  685. }
  686. x264_frame_t *x264_frame_shift( x264_frame_t **list )
  687. {
  688. x264_frame_t *frame = list[0];
  689. int i;
  690. for( i = 0; list[i]; i++ )
  691. list[i] = list[i+1];
  692. assert(frame);
  693. return frame;
  694. }
  695. void x264_frame_push_unused( x264_t *h, x264_frame_t *frame )
  696. {
  697. assert( frame->i_reference_count > 0 );
  698. frame->i_reference_count--;
  699. if( frame->i_reference_count == 0 )
  700. x264_frame_push( h->frames.unused[frame->b_fdec], frame );
  701. }
  702. x264_frame_t *x264_frame_pop_unused( x264_t *h, int b_fdec )
  703. {
  704. x264_frame_t *frame;
  705. if( h->frames.unused[b_fdec][0] )
  706. frame = x264_frame_pop( h->frames.unused[b_fdec] );
  707. else
  708. frame = frame_new( h, b_fdec );
  709. if( !frame )
  710. return NULL;
  711. frame->b_last_minigop_bframe = 0;
  712. frame->i_reference_count = 1;
  713. frame->b_intra_calculated = 0;
  714. frame->b_scenecut = 1;
  715. frame->b_keyframe = 0;
  716. frame->b_corrupt = 0;
  717. frame->i_slice_count = h->param.b_sliced_threads ? h->param.i_threads : 1;
  718. memset( frame->weight, 0, sizeof(frame->weight) );
  719. memset( frame->f_weighted_cost_delta, 0, sizeof(frame->f_weighted_cost_delta) );
  720. return frame;
  721. }
  722. void x264_frame_push_blank_unused( x264_t *h, x264_frame_t *frame )
  723. {
  724. assert( frame->i_reference_count > 0 );
  725. frame->i_reference_count--;
  726. if( frame->i_reference_count == 0 )
  727. x264_frame_push( h->frames.blank_unused, frame );
  728. }
  729. x264_frame_t *x264_frame_pop_blank_unused( x264_t *h )
  730. {
  731. x264_frame_t *frame;
  732. if( h->frames.blank_unused[0] )
  733. frame = x264_frame_pop( h->frames.blank_unused );
  734. else
  735. frame = x264_malloc( sizeof(x264_frame_t) );
  736. if( !frame )
  737. return NULL;
  738. frame->b_duplicate = 1;
  739. frame->i_reference_count = 1;
  740. return frame;
  741. }
  742. void x264_weight_scale_plane( x264_t *h, pixel *dst, intptr_t i_dst_stride, pixel *src, intptr_t i_src_stride,
  743. int i_width, int i_height, x264_weight_t *w )
  744. {
  745. /* Weight horizontal strips of height 16. This was found to be the optimal height
  746. * in terms of the cache loads. */
  747. while( i_height > 0 )
  748. {
  749. int x;
  750. for( x = 0; x < i_width-8; x += 16 )
  751. w->weightfn[16>>2]( dst+x, i_dst_stride, src+x, i_src_stride, w, X264_MIN( i_height, 16 ) );
  752. if( x < i_width )
  753. w->weightfn[ 8>>2]( dst+x, i_dst_stride, src+x, i_src_stride, w, X264_MIN( i_height, 16 ) );
  754. i_height -= 16;
  755. dst += 16 * i_dst_stride;
  756. src += 16 * i_src_stride;
  757. }
  758. }
  759. void x264_frame_delete_list( x264_frame_t **list )
  760. {
  761. int i = 0;
  762. if( !list )
  763. return;
  764. while( list[i] )
  765. x264_frame_delete( list[i++] );
  766. x264_free( list );
  767. }
  768. int x264_sync_frame_list_init( x264_sync_frame_list_t *slist, int max_size )
  769. {
  770. if( max_size < 0 )
  771. return -1;
  772. slist->i_max_size = max_size;
  773. slist->i_size = 0;
  774. CHECKED_MALLOCZERO( slist->list, (max_size+1) * sizeof(x264_frame_t*) );
  775. if( x264_pthread_mutex_init( &slist->mutex, NULL ) ||
  776. x264_pthread_cond_init( &slist->cv_fill, NULL ) ||
  777. x264_pthread_cond_init( &slist->cv_empty, NULL ) )
  778. return -1;
  779. return 0;
  780. fail:
  781. return -1;
  782. }
  783. void x264_sync_frame_list_delete( x264_sync_frame_list_t *slist )
  784. {
  785. x264_pthread_mutex_destroy( &slist->mutex );
  786. x264_pthread_cond_destroy( &slist->cv_fill );
  787. x264_pthread_cond_destroy( &slist->cv_empty );
  788. x264_frame_delete_list( slist->list );
  789. }
  790. void x264_sync_frame_list_push( x264_sync_frame_list_t *slist, x264_frame_t *frame )
  791. {
  792. x264_pthread_mutex_lock( &slist->mutex );
  793. while( slist->i_size == slist->i_max_size )
  794. x264_pthread_cond_wait( &slist->cv_empty, &slist->mutex );
  795. slist->list[ slist->i_size++ ] = frame;
  796. x264_pthread_mutex_unlock( &slist->mutex );
  797. x264_pthread_cond_broadcast( &slist->cv_fill );
  798. }
  799. x264_frame_t *x264_sync_frame_list_pop( x264_sync_frame_list_t *slist )
  800. {
  801. x264_frame_t *frame;
  802. x264_pthread_mutex_lock( &slist->mutex );
  803. while( !slist->i_size )
  804. x264_pthread_cond_wait( &slist->cv_fill, &slist->mutex );
  805. frame = slist->list[ --slist->i_size ];
  806. slist->list[ slist->i_size ] = NULL;
  807. x264_pthread_cond_broadcast( &slist->cv_empty );
  808. x264_pthread_mutex_unlock( &slist->mutex );
  809. return frame;
  810. }