rdo.c 48 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178
  1. /*****************************************************************************
  2. * rdo.c: rate-distortion optimization
  3. *****************************************************************************
  4. * Copyright (C) 2005-2018 x264 project
  5. *
  6. * Authors: Loren Merritt <lorenm@u.washington.edu>
  7. * Fiona Glaser <fiona@x264.com>
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License as published by
  11. * the Free Software Foundation; either version 2 of the License, or
  12. * (at your option) any later version.
  13. *
  14. * This program is distributed in the hope that it will be useful,
  15. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  16. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  17. * GNU General Public License for more details.
  18. *
  19. * You should have received a copy of the GNU General Public License
  20. * along with this program; if not, write to the Free Software
  21. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111, USA.
  22. *
  23. * This program is also available under a commercial proprietary license.
  24. * For more information, contact us at licensing@x264.com.
  25. *****************************************************************************/
  26. /* duplicate all the writer functions, just calculating bit cost
  27. * instead of writing the bitstream.
  28. * TODO: use these for fast 1st pass too. */
  29. #define RDO_SKIP_BS 1
  30. /* Transition and size tables for abs<9 MVD and residual coding */
  31. /* Consist of i_prefix-2 1s, one zero, and a bypass sign bit */
  32. #define x264_cabac_transition_unary x264_template(cabac_transition_unary)
  33. uint8_t x264_cabac_transition_unary[15][128];
  34. #define x264_cabac_size_unary x264_template(cabac_size_unary)
  35. uint16_t x264_cabac_size_unary[15][128];
  36. /* Transition and size tables for abs>9 MVD */
  37. /* Consist of 5 1s and a bypass sign bit */
  38. static uint8_t cabac_transition_5ones[128];
  39. static uint16_t cabac_size_5ones[128];
  40. /* CAVLC: produces exactly the same bit count as a normal encode */
  41. /* this probably still leaves some unnecessary computations */
  42. #define bs_write1(s,v) ((s)->i_bits_encoded += 1)
  43. #define bs_write(s,n,v) ((s)->i_bits_encoded += (n))
  44. #define bs_write_ue(s,v) ((s)->i_bits_encoded += bs_size_ue(v))
  45. #define bs_write_se(s,v) ((s)->i_bits_encoded += bs_size_se(v))
  46. #define bs_write_te(s,v,l) ((s)->i_bits_encoded += bs_size_te(v,l))
  47. #undef x264_macroblock_write_cavlc
  48. #define x264_macroblock_write_cavlc static macroblock_size_cavlc
  49. #include "cavlc.c"
  50. /* CABAC: not exactly the same. x264_cabac_size_decision() keeps track of
  51. * fractional bits, but only finite precision. */
  52. #undef x264_cabac_encode_decision
  53. #undef x264_cabac_encode_decision_noup
  54. #undef x264_cabac_encode_bypass
  55. #undef x264_cabac_encode_terminal
  56. #undef x264_cabac_encode_ue_bypass
  57. #define x264_cabac_encode_decision(c,x,v) x264_cabac_size_decision(c,x,v)
  58. #define x264_cabac_encode_decision_noup(c,x,v) x264_cabac_size_decision_noup(c,x,v)
  59. #define x264_cabac_encode_terminal(c) ((c)->f8_bits_encoded += 7)
  60. #define x264_cabac_encode_bypass(c,v) ((c)->f8_bits_encoded += 256)
  61. #define x264_cabac_encode_ue_bypass(c,e,v) ((c)->f8_bits_encoded += (bs_size_ue_big(v+(1<<e)-1)-e)<<8)
  62. #undef x264_macroblock_write_cabac
  63. #define x264_macroblock_write_cabac static macroblock_size_cabac
  64. #include "cabac.c"
  65. #define COPY_CABAC h->mc.memcpy_aligned( &cabac_tmp.f8_bits_encoded, &h->cabac.f8_bits_encoded, \
  66. sizeof(int) + (CHROMA444 ? 1024+12 : 460) )
  67. #define COPY_CABAC_PART( pos, size ) memcpy( &cb->state[pos], &h->cabac.state[pos], size )
  68. static ALWAYS_INLINE uint64_t cached_hadamard( x264_t *h, int size, int x, int y )
  69. {
  70. static const uint8_t hadamard_shift_x[4] = {4, 4, 3, 3};
  71. static const uint8_t hadamard_shift_y[4] = {4-0, 3-0, 4-1, 3-1};
  72. static const uint8_t hadamard_offset[4] = {0, 1, 3, 5};
  73. int cache_index = (x >> hadamard_shift_x[size]) + (y >> hadamard_shift_y[size])
  74. + hadamard_offset[size];
  75. uint64_t res = h->mb.pic.fenc_hadamard_cache[cache_index];
  76. if( res )
  77. return res - 1;
  78. else
  79. {
  80. pixel *fenc = h->mb.pic.p_fenc[0] + x + y*FENC_STRIDE;
  81. res = h->pixf.hadamard_ac[size]( fenc, FENC_STRIDE );
  82. h->mb.pic.fenc_hadamard_cache[cache_index] = res + 1;
  83. return res;
  84. }
  85. }
  86. static ALWAYS_INLINE int cached_satd( x264_t *h, int size, int x, int y )
  87. {
  88. static const uint8_t satd_shift_x[3] = {3, 2, 2};
  89. static const uint8_t satd_shift_y[3] = {2-1, 3-2, 2-2};
  90. static const uint8_t satd_offset[3] = {0, 8, 16};
  91. int cache_index = (x >> satd_shift_x[size - PIXEL_8x4]) + (y >> satd_shift_y[size - PIXEL_8x4])
  92. + satd_offset[size - PIXEL_8x4];
  93. int res = h->mb.pic.fenc_satd_cache[cache_index];
  94. if( res )
  95. return res - 1;
  96. else
  97. {
  98. pixel *fenc = h->mb.pic.p_fenc[0] + x + y*FENC_STRIDE;
  99. int dc = h->pixf.sad[size]( fenc, FENC_STRIDE, (pixel*)x264_zero, 0 ) >> 1;
  100. res = h->pixf.satd[size]( fenc, FENC_STRIDE, (pixel*)x264_zero, 0 ) - dc;
  101. h->mb.pic.fenc_satd_cache[cache_index] = res + 1;
  102. return res;
  103. }
  104. }
  105. /* Psy RD distortion metric: SSD plus "Absolute Difference of Complexities" */
  106. /* SATD and SA8D are used to measure block complexity. */
  107. /* The difference between SATD and SA8D scores are both used to avoid bias from the DCT size. Using SATD */
  108. /* only, for example, results in overusage of 8x8dct, while the opposite occurs when using SA8D. */
  109. /* FIXME: Is there a better metric than averaged SATD/SA8D difference for complexity difference? */
  110. /* Hadamard transform is recursive, so a SATD+SA8D can be done faster by taking advantage of this fact. */
  111. /* This optimization can also be used in non-RD transform decision. */
  112. static inline int ssd_plane( x264_t *h, int size, int p, int x, int y )
  113. {
  114. int satd = 0;
  115. pixel *fdec = h->mb.pic.p_fdec[p] + x + y*FDEC_STRIDE;
  116. pixel *fenc = h->mb.pic.p_fenc[p] + x + y*FENC_STRIDE;
  117. if( p == 0 && h->mb.i_psy_rd )
  118. {
  119. /* If the plane is smaller than 8x8, we can't do an SA8D; this probably isn't a big problem. */
  120. if( size <= PIXEL_8x8 )
  121. {
  122. uint64_t fdec_acs = h->pixf.hadamard_ac[size]( fdec, FDEC_STRIDE );
  123. uint64_t fenc_acs = cached_hadamard( h, size, x, y );
  124. satd = abs((int32_t)fdec_acs - (int32_t)fenc_acs)
  125. + abs((int32_t)(fdec_acs>>32) - (int32_t)(fenc_acs>>32));
  126. satd >>= 1;
  127. }
  128. else
  129. {
  130. int dc = h->pixf.sad[size]( fdec, FDEC_STRIDE, (pixel*)x264_zero, 0 ) >> 1;
  131. satd = abs(h->pixf.satd[size]( fdec, FDEC_STRIDE, (pixel*)x264_zero, 0 ) - dc - cached_satd( h, size, x, y ));
  132. }
  133. satd = (satd * h->mb.i_psy_rd * h->mb.i_psy_rd_lambda + 128) >> 8;
  134. }
  135. return h->pixf.ssd[size](fenc, FENC_STRIDE, fdec, FDEC_STRIDE) + satd;
  136. }
  137. static inline int ssd_mb( x264_t *h )
  138. {
  139. int i_ssd = ssd_plane( h, PIXEL_16x16, 0, 0, 0 );
  140. if( CHROMA_FORMAT )
  141. {
  142. int chroma_size = h->luma2chroma_pixel[PIXEL_16x16];
  143. int chroma_ssd = ssd_plane( h, chroma_size, 1, 0, 0 ) + ssd_plane( h, chroma_size, 2, 0, 0 );
  144. i_ssd += ((uint64_t)chroma_ssd * h->mb.i_chroma_lambda2_offset + 128) >> 8;
  145. }
  146. return i_ssd;
  147. }
  148. static int rd_cost_mb( x264_t *h, int i_lambda2 )
  149. {
  150. int b_transform_bak = h->mb.b_transform_8x8;
  151. int i_ssd;
  152. int i_bits;
  153. int type_bak = h->mb.i_type;
  154. x264_macroblock_encode( h );
  155. if( h->mb.b_deblock_rdo )
  156. x264_macroblock_deblock( h );
  157. i_ssd = ssd_mb( h );
  158. if( IS_SKIP( h->mb.i_type ) )
  159. {
  160. i_bits = (1 * i_lambda2 + 128) >> 8;
  161. }
  162. else if( h->param.b_cabac )
  163. {
  164. x264_cabac_t cabac_tmp;
  165. COPY_CABAC;
  166. macroblock_size_cabac( h, &cabac_tmp );
  167. i_bits = ( (uint64_t)cabac_tmp.f8_bits_encoded * i_lambda2 + 32768 ) >> 16;
  168. }
  169. else
  170. {
  171. macroblock_size_cavlc( h );
  172. i_bits = ( (uint64_t)h->out.bs.i_bits_encoded * i_lambda2 + 128 ) >> 8;
  173. }
  174. h->mb.b_transform_8x8 = b_transform_bak;
  175. h->mb.i_type = type_bak;
  176. return X264_MIN( i_ssd + i_bits, COST_MAX );
  177. }
  178. /* partition RD functions use 8 bits more precision to avoid large rounding errors at low QPs */
  179. static uint64_t rd_cost_subpart( x264_t *h, int i_lambda2, int i4, int i_pixel )
  180. {
  181. uint64_t i_ssd, i_bits;
  182. x264_macroblock_encode_p4x4( h, i4 );
  183. if( i_pixel == PIXEL_8x4 )
  184. x264_macroblock_encode_p4x4( h, i4+1 );
  185. if( i_pixel == PIXEL_4x8 )
  186. x264_macroblock_encode_p4x4( h, i4+2 );
  187. i_ssd = ssd_plane( h, i_pixel, 0, block_idx_x[i4]*4, block_idx_y[i4]*4 );
  188. if( CHROMA444 )
  189. {
  190. int chromassd = ssd_plane( h, i_pixel, 1, block_idx_x[i4]*4, block_idx_y[i4]*4 )
  191. + ssd_plane( h, i_pixel, 2, block_idx_x[i4]*4, block_idx_y[i4]*4 );
  192. chromassd = ((uint64_t)chromassd * h->mb.i_chroma_lambda2_offset + 128) >> 8;
  193. i_ssd += chromassd;
  194. }
  195. if( h->param.b_cabac )
  196. {
  197. x264_cabac_t cabac_tmp;
  198. COPY_CABAC;
  199. subpartition_size_cabac( h, &cabac_tmp, i4, i_pixel );
  200. i_bits = ( (uint64_t)cabac_tmp.f8_bits_encoded * i_lambda2 + 128 ) >> 8;
  201. }
  202. else
  203. i_bits = subpartition_size_cavlc( h, i4, i_pixel );
  204. return (i_ssd<<8) + i_bits;
  205. }
  206. uint64_t x264_rd_cost_part( x264_t *h, int i_lambda2, int i4, int i_pixel )
  207. {
  208. uint64_t i_ssd, i_bits;
  209. int i8 = i4 >> 2;
  210. if( i_pixel == PIXEL_16x16 )
  211. {
  212. int i_cost = rd_cost_mb( h, i_lambda2 );
  213. return i_cost;
  214. }
  215. if( i_pixel > PIXEL_8x8 )
  216. return rd_cost_subpart( h, i_lambda2, i4, i_pixel );
  217. h->mb.i_cbp_luma = 0;
  218. x264_macroblock_encode_p8x8( h, i8 );
  219. if( i_pixel == PIXEL_16x8 )
  220. x264_macroblock_encode_p8x8( h, i8+1 );
  221. if( i_pixel == PIXEL_8x16 )
  222. x264_macroblock_encode_p8x8( h, i8+2 );
  223. int ssd_x = 8*(i8&1);
  224. int ssd_y = 8*(i8>>1);
  225. i_ssd = ssd_plane( h, i_pixel, 0, ssd_x, ssd_y );
  226. if( CHROMA_FORMAT )
  227. {
  228. int chroma_size = h->luma2chroma_pixel[i_pixel];
  229. int chroma_ssd = ssd_plane( h, chroma_size, 1, ssd_x>>CHROMA_H_SHIFT, ssd_y>>CHROMA_V_SHIFT )
  230. + ssd_plane( h, chroma_size, 2, ssd_x>>CHROMA_H_SHIFT, ssd_y>>CHROMA_V_SHIFT );
  231. i_ssd += ((uint64_t)chroma_ssd * h->mb.i_chroma_lambda2_offset + 128) >> 8;
  232. }
  233. if( h->param.b_cabac )
  234. {
  235. x264_cabac_t cabac_tmp;
  236. COPY_CABAC;
  237. partition_size_cabac( h, &cabac_tmp, i8, i_pixel );
  238. i_bits = ( (uint64_t)cabac_tmp.f8_bits_encoded * i_lambda2 + 128 ) >> 8;
  239. }
  240. else
  241. i_bits = (uint64_t)partition_size_cavlc( h, i8, i_pixel ) * i_lambda2;
  242. return (i_ssd<<8) + i_bits;
  243. }
  244. static uint64_t rd_cost_i8x8( x264_t *h, int i_lambda2, int i8, int i_mode, pixel edge[4][32] )
  245. {
  246. uint64_t i_ssd, i_bits;
  247. int plane_count = CHROMA444 ? 3 : 1;
  248. int i_qp = h->mb.i_qp;
  249. h->mb.i_cbp_luma &= ~(1<<i8);
  250. h->mb.b_transform_8x8 = 1;
  251. for( int p = 0; p < plane_count; p++ )
  252. {
  253. x264_mb_encode_i8x8( h, p, i8, i_qp, i_mode, edge[p], 1 );
  254. i_qp = h->mb.i_chroma_qp;
  255. }
  256. i_ssd = ssd_plane( h, PIXEL_8x8, 0, (i8&1)*8, (i8>>1)*8 );
  257. if( CHROMA444 )
  258. {
  259. int chromassd = ssd_plane( h, PIXEL_8x8, 1, (i8&1)*8, (i8>>1)*8 )
  260. + ssd_plane( h, PIXEL_8x8, 2, (i8&1)*8, (i8>>1)*8 );
  261. chromassd = ((uint64_t)chromassd * h->mb.i_chroma_lambda2_offset + 128) >> 8;
  262. i_ssd += chromassd;
  263. }
  264. if( h->param.b_cabac )
  265. {
  266. x264_cabac_t cabac_tmp;
  267. COPY_CABAC;
  268. partition_i8x8_size_cabac( h, &cabac_tmp, i8, i_mode );
  269. i_bits = ( (uint64_t)cabac_tmp.f8_bits_encoded * i_lambda2 + 128 ) >> 8;
  270. }
  271. else
  272. i_bits = (uint64_t)partition_i8x8_size_cavlc( h, i8, i_mode ) * i_lambda2;
  273. return (i_ssd<<8) + i_bits;
  274. }
  275. static uint64_t rd_cost_i4x4( x264_t *h, int i_lambda2, int i4, int i_mode )
  276. {
  277. uint64_t i_ssd, i_bits;
  278. int plane_count = CHROMA444 ? 3 : 1;
  279. int i_qp = h->mb.i_qp;
  280. for( int p = 0; p < plane_count; p++ )
  281. {
  282. x264_mb_encode_i4x4( h, p, i4, i_qp, i_mode, 1 );
  283. i_qp = h->mb.i_chroma_qp;
  284. }
  285. i_ssd = ssd_plane( h, PIXEL_4x4, 0, block_idx_x[i4]*4, block_idx_y[i4]*4 );
  286. if( CHROMA444 )
  287. {
  288. int chromassd = ssd_plane( h, PIXEL_4x4, 1, block_idx_x[i4]*4, block_idx_y[i4]*4 )
  289. + ssd_plane( h, PIXEL_4x4, 2, block_idx_x[i4]*4, block_idx_y[i4]*4 );
  290. chromassd = ((uint64_t)chromassd * h->mb.i_chroma_lambda2_offset + 128) >> 8;
  291. i_ssd += chromassd;
  292. }
  293. if( h->param.b_cabac )
  294. {
  295. x264_cabac_t cabac_tmp;
  296. COPY_CABAC;
  297. partition_i4x4_size_cabac( h, &cabac_tmp, i4, i_mode );
  298. i_bits = ( (uint64_t)cabac_tmp.f8_bits_encoded * i_lambda2 + 128 ) >> 8;
  299. }
  300. else
  301. i_bits = (uint64_t)partition_i4x4_size_cavlc( h, i4, i_mode ) * i_lambda2;
  302. return (i_ssd<<8) + i_bits;
  303. }
  304. static uint64_t rd_cost_chroma( x264_t *h, int i_lambda2, int i_mode, int b_dct )
  305. {
  306. uint64_t i_ssd, i_bits;
  307. if( b_dct )
  308. x264_mb_encode_chroma( h, 0, h->mb.i_chroma_qp );
  309. int chromapix = h->luma2chroma_pixel[PIXEL_16x16];
  310. i_ssd = ssd_plane( h, chromapix, 1, 0, 0 )
  311. + ssd_plane( h, chromapix, 2, 0, 0 );
  312. h->mb.i_chroma_pred_mode = i_mode;
  313. if( h->param.b_cabac )
  314. {
  315. x264_cabac_t cabac_tmp;
  316. COPY_CABAC;
  317. chroma_size_cabac( h, &cabac_tmp );
  318. i_bits = ( (uint64_t)cabac_tmp.f8_bits_encoded * i_lambda2 + 128 ) >> 8;
  319. }
  320. else
  321. i_bits = (uint64_t)chroma_size_cavlc( h ) * i_lambda2;
  322. return (i_ssd<<8) + i_bits;
  323. }
  324. /****************************************************************************
  325. * Trellis RD quantization
  326. ****************************************************************************/
  327. #define TRELLIS_SCORE_MAX -1LL // negative marks the node as invalid
  328. #define TRELLIS_SCORE_BIAS 1LL<<60; // bias so that all valid scores are positive, even after negative contributions from psy
  329. #define CABAC_SIZE_BITS 8
  330. #define LAMBDA_BITS 4
  331. /* precalculate the cost of coding various combinations of bits in a single context */
  332. void x264_rdo_init( void )
  333. {
  334. for( int i_prefix = 0; i_prefix < 15; i_prefix++ )
  335. {
  336. for( int i_ctx = 0; i_ctx < 128; i_ctx++ )
  337. {
  338. int f8_bits = 0;
  339. uint8_t ctx = i_ctx;
  340. for( int i = 1; i < i_prefix; i++ )
  341. f8_bits += x264_cabac_size_decision2( &ctx, 1 );
  342. if( i_prefix > 0 && i_prefix < 14 )
  343. f8_bits += x264_cabac_size_decision2( &ctx, 0 );
  344. f8_bits += 1 << CABAC_SIZE_BITS; //sign
  345. x264_cabac_size_unary[i_prefix][i_ctx] = f8_bits;
  346. x264_cabac_transition_unary[i_prefix][i_ctx] = ctx;
  347. }
  348. }
  349. for( int i_ctx = 0; i_ctx < 128; i_ctx++ )
  350. {
  351. int f8_bits = 0;
  352. uint8_t ctx = i_ctx;
  353. for( int i = 0; i < 5; i++ )
  354. f8_bits += x264_cabac_size_decision2( &ctx, 1 );
  355. f8_bits += 1 << CABAC_SIZE_BITS; //sign
  356. cabac_size_5ones[i_ctx] = f8_bits;
  357. cabac_transition_5ones[i_ctx] = ctx;
  358. }
  359. }
  360. typedef struct
  361. {
  362. uint64_t score;
  363. int level_idx; // index into level_tree[]
  364. uint8_t cabac_state[4]; // just contexts 0,4,8,9 of the 10 relevant to coding abs_level_m1
  365. } trellis_node_t;
  366. typedef struct
  367. {
  368. uint16_t next;
  369. uint16_t abs_level;
  370. } trellis_level_t;
  371. // TODO:
  372. // save cabac state between blocks?
  373. // use trellis' RD score instead of x264_mb_decimate_score?
  374. // code 8x8 sig/last flags forwards with deadzone and save the contexts at
  375. // each position?
  376. // change weights when using CQMs?
  377. // possible optimizations:
  378. // make scores fit in 32bit
  379. // save quantized coefs during rd, to avoid a duplicate trellis in the final encode
  380. // if trellissing all MBRD modes, finish SSD calculation so we can skip all of
  381. // the normal dequant/idct/ssd/cabac
  382. // the unquant_mf here is not the same as dequant_mf:
  383. // in normal operation (dct->quant->dequant->idct) the dct and idct are not
  384. // normalized. quant/dequant absorb those scaling factors.
  385. // in this function, we just do (quant->unquant) and want the output to be
  386. // comparable to the input. so unquant is the direct inverse of quant,
  387. // and uses the dct scaling factors, not the idct ones.
  388. #define SIGN(x,y) ((x^(y >> 31))-(y >> 31))
  389. #define SET_LEVEL(ndst, nsrc, l) {\
  390. if( sizeof(trellis_level_t) == sizeof(uint32_t) )\
  391. M32( &level_tree[levels_used] ) = pack16to32( nsrc.level_idx, l );\
  392. else\
  393. level_tree[levels_used] = (trellis_level_t){ nsrc.level_idx, l };\
  394. ndst.level_idx = levels_used;\
  395. levels_used++;\
  396. }
  397. // encode all values of the dc coef in a block which is known to have no ac
  398. static NOINLINE
  399. int trellis_dc_shortcut( int sign_coef, int quant_coef, int unquant_mf, int coef_weight, int lambda2, uint8_t *cabac_state, int cost_sig )
  400. {
  401. uint64_t bscore = TRELLIS_SCORE_MAX;
  402. int ret = 0;
  403. int q = abs( quant_coef );
  404. for( int abs_level = q-1; abs_level <= q; abs_level++ )
  405. {
  406. int unquant_abs_level = (unquant_mf * abs_level + 128) >> 8;
  407. /* Optimize rounding for DC coefficients in DC-only luma 4x4/8x8 blocks. */
  408. int d = sign_coef - ((SIGN(unquant_abs_level, sign_coef) + 8)&~15);
  409. uint64_t score = (uint64_t)d*d * coef_weight;
  410. /* code the proposed level, and count how much entropy it would take */
  411. if( abs_level )
  412. {
  413. unsigned f8_bits = cost_sig;
  414. int prefix = X264_MIN( abs_level - 1, 14 );
  415. f8_bits += x264_cabac_size_decision_noup2( cabac_state+1, prefix > 0 );
  416. f8_bits += x264_cabac_size_unary[prefix][cabac_state[5]];
  417. if( abs_level >= 15 )
  418. f8_bits += bs_size_ue_big( abs_level - 15 ) << CABAC_SIZE_BITS;
  419. score += (uint64_t)f8_bits * lambda2 >> ( CABAC_SIZE_BITS - LAMBDA_BITS );
  420. }
  421. COPY2_IF_LT( bscore, score, ret, abs_level );
  422. }
  423. return SIGN(ret, sign_coef);
  424. }
  425. // encode one value of one coef in one context
  426. static ALWAYS_INLINE
  427. int trellis_coef( int j, int const_level, int abs_level, int prefix, int suffix_cost,
  428. int node_ctx, int level1_ctx, int levelgt1_ctx, uint64_t ssd, int cost_siglast[3],
  429. trellis_node_t *nodes_cur, trellis_node_t *nodes_prev,
  430. trellis_level_t *level_tree, int levels_used, int lambda2, uint8_t *level_state )
  431. {
  432. uint64_t score = nodes_prev[j].score + ssd;
  433. /* code the proposed level, and count how much entropy it would take */
  434. unsigned f8_bits = cost_siglast[ j ? 1 : 2 ];
  435. uint8_t level1_state = (j >= 3) ? nodes_prev[j].cabac_state[level1_ctx>>2] : level_state[level1_ctx];
  436. f8_bits += x264_cabac_entropy[level1_state ^ (const_level > 1)];
  437. uint8_t levelgt1_state;
  438. if( const_level > 1 )
  439. {
  440. levelgt1_state = j >= 6 ? nodes_prev[j].cabac_state[levelgt1_ctx-6] : level_state[levelgt1_ctx];
  441. f8_bits += x264_cabac_size_unary[prefix][levelgt1_state] + suffix_cost;
  442. }
  443. else
  444. f8_bits += 1 << CABAC_SIZE_BITS;
  445. score += (uint64_t)f8_bits * lambda2 >> ( CABAC_SIZE_BITS - LAMBDA_BITS );
  446. /* save the node if it's better than any existing node with the same cabac ctx */
  447. if( score < nodes_cur[node_ctx].score )
  448. {
  449. nodes_cur[node_ctx].score = score;
  450. if( j == 2 || (j <= 3 && node_ctx == 4) ) // init from input state
  451. M32(nodes_cur[node_ctx].cabac_state) = M32(level_state+12);
  452. else if( j >= 3 )
  453. M32(nodes_cur[node_ctx].cabac_state) = M32(nodes_prev[j].cabac_state);
  454. if( j >= 3 ) // skip the transition if we're not going to reuse the context
  455. nodes_cur[node_ctx].cabac_state[level1_ctx>>2] = x264_cabac_transition[level1_state][const_level > 1];
  456. if( const_level > 1 && node_ctx == 7 )
  457. nodes_cur[node_ctx].cabac_state[levelgt1_ctx-6] = x264_cabac_transition_unary[prefix][levelgt1_state];
  458. nodes_cur[node_ctx].level_idx = nodes_prev[j].level_idx;
  459. SET_LEVEL( nodes_cur[node_ctx], nodes_prev[j], abs_level );
  460. }
  461. return levels_used;
  462. }
  463. // encode one value of one coef in all contexts, templated by which value that is.
  464. // in ctx_lo, the set of live nodes is contiguous and starts at ctx0, so return as soon as we've seen one failure.
  465. // in ctx_hi, they're contiguous within each block of 4 ctxs, but not necessarily starting at the beginning,
  466. // so exploiting that would be more complicated.
  467. static NOINLINE
  468. int trellis_coef0_0( uint64_t ssd0, trellis_node_t *nodes_cur, trellis_node_t *nodes_prev,
  469. trellis_level_t *level_tree, int levels_used )
  470. {
  471. nodes_cur[0].score = nodes_prev[0].score + ssd0;
  472. nodes_cur[0].level_idx = nodes_prev[0].level_idx;
  473. for( int j = 1; j < 4 && (int64_t)nodes_prev[j].score >= 0; j++ )
  474. {
  475. nodes_cur[j].score = nodes_prev[j].score;
  476. if( j >= 3 )
  477. M32(nodes_cur[j].cabac_state) = M32(nodes_prev[j].cabac_state);
  478. SET_LEVEL( nodes_cur[j], nodes_prev[j], 0 );
  479. }
  480. return levels_used;
  481. }
  482. static NOINLINE
  483. int trellis_coef0_1( uint64_t ssd0, trellis_node_t *nodes_cur, trellis_node_t *nodes_prev,
  484. trellis_level_t *level_tree, int levels_used )
  485. {
  486. for( int j = 1; j < 8; j++ )
  487. // this branch only affects speed, not function; there's nothing wrong with updating invalid nodes in coef0.
  488. if( (int64_t)nodes_prev[j].score >= 0 )
  489. {
  490. nodes_cur[j].score = nodes_prev[j].score;
  491. if( j >= 3 )
  492. M32(nodes_cur[j].cabac_state) = M32(nodes_prev[j].cabac_state);
  493. SET_LEVEL( nodes_cur[j], nodes_prev[j], 0 );
  494. }
  495. return levels_used;
  496. }
  497. #define COEF(const_level, ctx_hi, j, ...)\
  498. if( !j || (int64_t)nodes_prev[j].score >= 0 )\
  499. levels_used = trellis_coef( j, const_level, abs_level, prefix, suffix_cost, __VA_ARGS__,\
  500. j?ssd1:ssd0, cost_siglast, nodes_cur, nodes_prev,\
  501. level_tree, levels_used, lambda2, level_state );\
  502. else if( !ctx_hi )\
  503. return levels_used;
  504. static NOINLINE
  505. int trellis_coef1_0( uint64_t ssd0, uint64_t ssd1, int cost_siglast[3],
  506. trellis_node_t *nodes_cur, trellis_node_t *nodes_prev,
  507. trellis_level_t *level_tree, int levels_used, int lambda2,
  508. uint8_t *level_state )
  509. {
  510. int abs_level = 1, prefix = 1, suffix_cost = 0;
  511. COEF( 1, 0, 0, 1, 1, 0 );
  512. COEF( 1, 0, 1, 2, 2, 0 );
  513. COEF( 1, 0, 2, 3, 3, 0 );
  514. COEF( 1, 0, 3, 3, 4, 0 );
  515. return levels_used;
  516. }
  517. static NOINLINE
  518. int trellis_coef1_1( uint64_t ssd0, uint64_t ssd1, int cost_siglast[3],
  519. trellis_node_t *nodes_cur, trellis_node_t *nodes_prev,
  520. trellis_level_t *level_tree, int levels_used, int lambda2,
  521. uint8_t *level_state )
  522. {
  523. int abs_level = 1, prefix = 1, suffix_cost = 0;
  524. COEF( 1, 1, 1, 2, 2, 0 );
  525. COEF( 1, 1, 2, 3, 3, 0 );
  526. COEF( 1, 1, 3, 3, 4, 0 );
  527. COEF( 1, 1, 4, 4, 0, 0 );
  528. COEF( 1, 1, 5, 5, 0, 0 );
  529. COEF( 1, 1, 6, 6, 0, 0 );
  530. COEF( 1, 1, 7, 7, 0, 0 );
  531. return levels_used;
  532. }
  533. static NOINLINE
  534. int trellis_coefn_0( int abs_level, uint64_t ssd0, uint64_t ssd1, int cost_siglast[3],
  535. trellis_node_t *nodes_cur, trellis_node_t *nodes_prev,
  536. trellis_level_t *level_tree, int levels_used, int lambda2,
  537. uint8_t *level_state, int levelgt1_ctx )
  538. {
  539. int prefix = X264_MIN( abs_level-1, 14 );
  540. int suffix_cost = abs_level >= 15 ? bs_size_ue_big( abs_level - 15 ) << CABAC_SIZE_BITS : 0;
  541. COEF( 2, 0, 0, 4, 1, 5 );
  542. COEF( 2, 0, 1, 4, 2, 5 );
  543. COEF( 2, 0, 2, 4, 3, 5 );
  544. COEF( 2, 0, 3, 4, 4, 5 );
  545. return levels_used;
  546. }
  547. static NOINLINE
  548. int trellis_coefn_1( int abs_level, uint64_t ssd0, uint64_t ssd1, int cost_siglast[3],
  549. trellis_node_t *nodes_cur, trellis_node_t *nodes_prev,
  550. trellis_level_t *level_tree, int levels_used, int lambda2,
  551. uint8_t *level_state, int levelgt1_ctx )
  552. {
  553. int prefix = X264_MIN( abs_level-1, 14 );
  554. int suffix_cost = abs_level >= 15 ? bs_size_ue_big( abs_level - 15 ) << CABAC_SIZE_BITS : 0;
  555. COEF( 2, 1, 1, 4, 2, 5 );
  556. COEF( 2, 1, 2, 4, 3, 5 );
  557. COEF( 2, 1, 3, 4, 4, 5 );
  558. COEF( 2, 1, 4, 5, 0, 6 );
  559. COEF( 2, 1, 5, 6, 0, 7 );
  560. COEF( 2, 1, 6, 7, 0, 8 );
  561. COEF( 2, 1, 7, 7, 0, levelgt1_ctx );
  562. return levels_used;
  563. }
  564. static ALWAYS_INLINE
  565. int quant_trellis_cabac( x264_t *h, dctcoef *dct,
  566. udctcoef *quant_mf, udctcoef *quant_bias, const int *unquant_mf,
  567. const uint8_t *zigzag, int ctx_block_cat, int lambda2, int b_ac,
  568. int b_chroma, int dc, int num_coefs, int idx )
  569. {
  570. ALIGNED_ARRAY_64( dctcoef, orig_coefs, [64] );
  571. ALIGNED_ARRAY_64( dctcoef, quant_coefs, [64] );
  572. const uint32_t *coef_weight1 = num_coefs == 64 ? x264_dct8_weight_tab : x264_dct4_weight_tab;
  573. const uint32_t *coef_weight2 = num_coefs == 64 ? x264_dct8_weight2_tab : x264_dct4_weight2_tab;
  574. const int b_interlaced = MB_INTERLACED;
  575. uint8_t *cabac_state_sig = &h->cabac.state[ x264_significant_coeff_flag_offset[b_interlaced][ctx_block_cat] ];
  576. uint8_t *cabac_state_last = &h->cabac.state[ x264_last_coeff_flag_offset[b_interlaced][ctx_block_cat] ];
  577. int levelgt1_ctx = b_chroma && dc ? 8 : 9;
  578. if( dc )
  579. {
  580. if( num_coefs == 16 )
  581. {
  582. memcpy( orig_coefs, dct, sizeof(dctcoef)*16 );
  583. if( !h->quantf.quant_4x4_dc( dct, quant_mf[0] >> 1, quant_bias[0] << 1 ) )
  584. return 0;
  585. h->zigzagf.scan_4x4( quant_coefs, dct );
  586. }
  587. else
  588. {
  589. memcpy( orig_coefs, dct, sizeof(dctcoef)*num_coefs );
  590. int nz = h->quantf.quant_2x2_dc( &dct[0], quant_mf[0] >> 1, quant_bias[0] << 1 );
  591. if( num_coefs == 8 )
  592. nz |= h->quantf.quant_2x2_dc( &dct[4], quant_mf[0] >> 1, quant_bias[0] << 1 );
  593. if( !nz )
  594. return 0;
  595. for( int i = 0; i < num_coefs; i++ )
  596. quant_coefs[i] = dct[zigzag[i]];
  597. }
  598. }
  599. else
  600. {
  601. if( num_coefs == 64 )
  602. {
  603. h->mc.memcpy_aligned( orig_coefs, dct, sizeof(dctcoef)*64 );
  604. if( !h->quantf.quant_8x8( dct, quant_mf, quant_bias ) )
  605. return 0;
  606. h->zigzagf.scan_8x8( quant_coefs, dct );
  607. }
  608. else //if( num_coefs == 16 )
  609. {
  610. memcpy( orig_coefs, dct, sizeof(dctcoef)*16 );
  611. if( !h->quantf.quant_4x4( dct, quant_mf, quant_bias ) )
  612. return 0;
  613. h->zigzagf.scan_4x4( quant_coefs, dct );
  614. }
  615. }
  616. int last_nnz = h->quantf.coeff_last[ctx_block_cat]( quant_coefs+b_ac )+b_ac;
  617. uint8_t *cabac_state = &h->cabac.state[ x264_coeff_abs_level_m1_offset[ctx_block_cat] ];
  618. /* shortcut for dc-only blocks.
  619. * this doesn't affect the output, but saves some unnecessary computation. */
  620. if( last_nnz == 0 && !dc )
  621. {
  622. int cost_sig = x264_cabac_size_decision_noup2( &cabac_state_sig[0], 1 )
  623. + x264_cabac_size_decision_noup2( &cabac_state_last[0], 1 );
  624. dct[0] = trellis_dc_shortcut( orig_coefs[0], quant_coefs[0], unquant_mf[0], coef_weight2[0], lambda2, cabac_state, cost_sig );
  625. return !!dct[0];
  626. }
  627. #if HAVE_MMX && ARCH_X86_64 && !defined( __MACH__ )
  628. #define TRELLIS_ARGS unquant_mf, zigzag, lambda2, last_nnz, orig_coefs, quant_coefs, dct,\
  629. cabac_state_sig, cabac_state_last, M64(cabac_state), M16(cabac_state+8)
  630. if( num_coefs == 16 && !dc )
  631. if( b_chroma || !h->mb.i_psy_trellis )
  632. return h->quantf.trellis_cabac_4x4( TRELLIS_ARGS, b_ac );
  633. else
  634. return h->quantf.trellis_cabac_4x4_psy( TRELLIS_ARGS, b_ac, h->mb.pic.fenc_dct4[idx&15], h->mb.i_psy_trellis );
  635. else if( num_coefs == 64 && !dc )
  636. if( b_chroma || !h->mb.i_psy_trellis )
  637. return h->quantf.trellis_cabac_8x8( TRELLIS_ARGS, b_interlaced );
  638. else
  639. return h->quantf.trellis_cabac_8x8_psy( TRELLIS_ARGS, b_interlaced, h->mb.pic.fenc_dct8[idx&3], h->mb.i_psy_trellis);
  640. else if( num_coefs == 8 && dc )
  641. return h->quantf.trellis_cabac_chroma_422_dc( TRELLIS_ARGS );
  642. else if( dc )
  643. return h->quantf.trellis_cabac_dc( TRELLIS_ARGS, num_coefs-1 );
  644. #endif
  645. // (# of coefs) * (# of ctx) * (# of levels tried) = 1024
  646. // we don't need to keep all of those: (# of coefs) * (# of ctx) would be enough,
  647. // but it takes more time to remove dead states than you gain in reduced memory.
  648. trellis_level_t level_tree[64*8*2];
  649. int levels_used = 1;
  650. /* init trellis */
  651. trellis_node_t nodes[2][8];
  652. trellis_node_t *nodes_cur = nodes[0];
  653. trellis_node_t *nodes_prev = nodes[1];
  654. trellis_node_t *bnode;
  655. for( int j = 1; j < 4; j++ )
  656. nodes_cur[j].score = TRELLIS_SCORE_MAX;
  657. nodes_cur[0].score = TRELLIS_SCORE_BIAS;
  658. nodes_cur[0].level_idx = 0;
  659. level_tree[0].abs_level = 0;
  660. level_tree[0].next = 0;
  661. ALIGNED_4( uint8_t level_state[16] );
  662. memcpy( level_state, cabac_state, 10 );
  663. level_state[12] = cabac_state[0]; // packed subset for copying into trellis_node_t
  664. level_state[13] = cabac_state[4];
  665. level_state[14] = cabac_state[8];
  666. level_state[15] = cabac_state[9];
  667. idx &= num_coefs == 64 ? 3 : 15;
  668. // coefs are processed in reverse order, because that's how the abs value is coded.
  669. // last_coef and significant_coef flags are normally coded in forward order, but
  670. // we have to reverse them to match the levels.
  671. // in 4x4 blocks, last_coef and significant_coef use a separate context for each
  672. // position, so the order doesn't matter, and we don't even have to update their contexts.
  673. // in 8x8 blocks, some positions share contexts, so we'll just have to hope that
  674. // cabac isn't too sensitive.
  675. int i = last_nnz;
  676. #define TRELLIS_LOOP(ctx_hi)\
  677. for( ; i >= b_ac; i-- )\
  678. {\
  679. /* skip 0s: this doesn't affect the output, but saves some unnecessary computation. */\
  680. if( !quant_coefs[i] )\
  681. {\
  682. /* no need to calculate ssd of 0s: it's the same in all nodes.\
  683. * no need to modify level_tree for ctx=0: it starts with an infinite loop of 0s.
  684. * subtracting from one score is equivalent to adding to the rest. */\
  685. if( !ctx_hi )\
  686. {\
  687. int sigindex = !dc && num_coefs == 64 ? x264_significant_coeff_flag_offset_8x8[b_interlaced][i] :\
  688. b_chroma && dc && num_coefs == 8 ? x264_coeff_flag_offset_chroma_422_dc[i] : i;\
  689. uint64_t cost_sig0 = x264_cabac_size_decision_noup2( &cabac_state_sig[sigindex], 0 )\
  690. * (uint64_t)lambda2 >> ( CABAC_SIZE_BITS - LAMBDA_BITS );\
  691. nodes_cur[0].score -= cost_sig0;\
  692. }\
  693. for( int j = 1; j < (ctx_hi?8:4); j++ )\
  694. SET_LEVEL( nodes_cur[j], nodes_cur[j], 0 );\
  695. continue;\
  696. }\
  697. \
  698. int sign_coef = orig_coefs[zigzag[i]];\
  699. int abs_coef = abs( sign_coef );\
  700. int q = abs( quant_coefs[i] );\
  701. int cost_siglast[3]; /* { zero, nonzero, nonzero-and-last } */\
  702. XCHG( trellis_node_t*, nodes_cur, nodes_prev );\
  703. for( int j = ctx_hi; j < 8; j++ )\
  704. nodes_cur[j].score = TRELLIS_SCORE_MAX;\
  705. \
  706. if( i < num_coefs-1 || ctx_hi )\
  707. {\
  708. int sigindex = !dc && num_coefs == 64 ? x264_significant_coeff_flag_offset_8x8[b_interlaced][i] :\
  709. b_chroma && dc && num_coefs == 8 ? x264_coeff_flag_offset_chroma_422_dc[i] : i;\
  710. int lastindex = !dc && num_coefs == 64 ? x264_last_coeff_flag_offset_8x8[i] :\
  711. b_chroma && dc && num_coefs == 8 ? x264_coeff_flag_offset_chroma_422_dc[i] : i;\
  712. cost_siglast[0] = x264_cabac_size_decision_noup2( &cabac_state_sig[sigindex], 0 );\
  713. int cost_sig1 = x264_cabac_size_decision_noup2( &cabac_state_sig[sigindex], 1 );\
  714. cost_siglast[1] = x264_cabac_size_decision_noup2( &cabac_state_last[lastindex], 0 ) + cost_sig1;\
  715. if( !ctx_hi )\
  716. cost_siglast[2] = x264_cabac_size_decision_noup2( &cabac_state_last[lastindex], 1 ) + cost_sig1;\
  717. }\
  718. else\
  719. {\
  720. cost_siglast[0] = cost_siglast[1] = cost_siglast[2] = 0;\
  721. }\
  722. \
  723. /* there are a few cases where increasing the coeff magnitude helps,\
  724. * but it's only around .003 dB, and skipping them ~doubles the speed of trellis.\
  725. * could also try q-2: that sometimes helps, but also sometimes decimates blocks\
  726. * that are better left coded, especially at QP > 40. */\
  727. uint64_t ssd0[2], ssd1[2];\
  728. for( int k = 0; k < 2; k++ )\
  729. {\
  730. int abs_level = q-1+k;\
  731. int unquant_abs_level = (((dc?unquant_mf[0]<<1:unquant_mf[zigzag[i]]) * abs_level + 128) >> 8);\
  732. int d = abs_coef - unquant_abs_level;\
  733. /* Psy trellis: bias in favor of higher AC coefficients in the reconstructed frame. */\
  734. if( h->mb.i_psy_trellis && i && !dc && !b_chroma )\
  735. {\
  736. int orig_coef = (num_coefs == 64) ? h->mb.pic.fenc_dct8[idx][zigzag[i]] : h->mb.pic.fenc_dct4[idx][zigzag[i]];\
  737. int predicted_coef = orig_coef - sign_coef;\
  738. int psy_value = abs(unquant_abs_level + SIGN(predicted_coef, sign_coef));\
  739. int psy_weight = coef_weight1[zigzag[i]] * h->mb.i_psy_trellis;\
  740. ssd1[k] = (uint64_t)d*d * coef_weight2[zigzag[i]] - psy_weight * psy_value;\
  741. }\
  742. else\
  743. /* FIXME: for i16x16 dc is this weight optimal? */\
  744. ssd1[k] = (uint64_t)d*d * (dc?256:coef_weight2[zigzag[i]]);\
  745. ssd0[k] = ssd1[k];\
  746. if( !i && !dc && !ctx_hi )\
  747. {\
  748. /* Optimize rounding for DC coefficients in DC-only luma 4x4/8x8 blocks. */\
  749. d = sign_coef - ((SIGN(unquant_abs_level, sign_coef) + 8)&~15);\
  750. ssd0[k] = (uint64_t)d*d * coef_weight2[zigzag[i]];\
  751. }\
  752. }\
  753. \
  754. /* argument passing imposes some significant overhead here. gcc's interprocedural register allocation isn't up to it. */\
  755. switch( q )\
  756. {\
  757. case 1:\
  758. ssd1[0] += (uint64_t)cost_siglast[0] * lambda2 >> ( CABAC_SIZE_BITS - LAMBDA_BITS );\
  759. levels_used = trellis_coef0_##ctx_hi( ssd0[0]-ssd1[0], nodes_cur, nodes_prev, level_tree, levels_used );\
  760. levels_used = trellis_coef1_##ctx_hi( ssd0[1]-ssd1[0], ssd1[1]-ssd1[0], cost_siglast, nodes_cur, nodes_prev, level_tree, levels_used, lambda2, level_state );\
  761. goto next##ctx_hi;\
  762. case 2:\
  763. levels_used = trellis_coef1_##ctx_hi( ssd0[0], ssd1[0], cost_siglast, nodes_cur, nodes_prev, level_tree, levels_used, lambda2, level_state );\
  764. levels_used = trellis_coefn_##ctx_hi( q, ssd0[1], ssd1[1], cost_siglast, nodes_cur, nodes_prev, level_tree, levels_used, lambda2, level_state, levelgt1_ctx );\
  765. goto next1;\
  766. default:\
  767. levels_used = trellis_coefn_##ctx_hi( q-1, ssd0[0], ssd1[0], cost_siglast, nodes_cur, nodes_prev, level_tree, levels_used, lambda2, level_state, levelgt1_ctx );\
  768. levels_used = trellis_coefn_##ctx_hi( q, ssd0[1], ssd1[1], cost_siglast, nodes_cur, nodes_prev, level_tree, levels_used, lambda2, level_state, levelgt1_ctx );\
  769. goto next1;\
  770. }\
  771. next##ctx_hi:;\
  772. }\
  773. /* output levels from the best path through the trellis */\
  774. bnode = &nodes_cur[ctx_hi];\
  775. for( int j = ctx_hi+1; j < (ctx_hi?8:4); j++ )\
  776. if( nodes_cur[j].score < bnode->score )\
  777. bnode = &nodes_cur[j];
  778. // keep 2 versions of the main quantization loop, depending on which subsets of the node_ctxs are live
  779. // node_ctx 0..3, i.e. having not yet encountered any coefs that might be quantized to >1
  780. TRELLIS_LOOP(0);
  781. if( bnode == &nodes_cur[0] )
  782. {
  783. /* We only need to zero an empty 4x4 block. 8x8 can be
  784. implicitly emptied via zero nnz, as can dc. */
  785. if( num_coefs == 16 && !dc )
  786. memset( dct, 0, 16 * sizeof(dctcoef) );
  787. return 0;
  788. }
  789. if( 0 ) // accessible only by goto, not fallthrough
  790. {
  791. // node_ctx 1..7 (ctx0 ruled out because we never try both level0 and level2+ on the same coef)
  792. TRELLIS_LOOP(1);
  793. }
  794. int level = bnode->level_idx;
  795. for( i = b_ac; i <= last_nnz; i++ )
  796. {
  797. dct[zigzag[i]] = SIGN(level_tree[level].abs_level, dct[zigzag[i]]);
  798. level = level_tree[level].next;
  799. }
  800. return 1;
  801. }
  802. /* FIXME: This is a gigantic hack. See below.
  803. *
  804. * CAVLC is much more difficult to trellis than CABAC.
  805. *
  806. * CABAC has only three states to track: significance map, last, and the
  807. * level state machine.
  808. * CAVLC, by comparison, has five: coeff_token (trailing + total),
  809. * total_zeroes, zero_run, and the level state machine.
  810. *
  811. * I know of no paper that has managed to design a close-to-optimal trellis
  812. * that covers all five of these and isn't exponential-time. As a result, this
  813. * "trellis" isn't: it's just a QNS search. Patches welcome for something better.
  814. * It's actually surprisingly fast, albeit not quite optimal. It's pretty close
  815. * though; since CAVLC only has 2^16 possible rounding modes (assuming only two
  816. * roundings as options), a bruteforce search is feasible. Testing shows
  817. * that this QNS is reasonably close to optimal in terms of compression.
  818. *
  819. * TODO:
  820. * Don't bother changing large coefficients when it wouldn't affect bit cost
  821. * (e.g. only affecting bypassed suffix bits).
  822. * Don't re-run all parts of CAVLC bit cost calculation when not necessary.
  823. * e.g. when changing a coefficient from one non-zero value to another in
  824. * such a way that trailing ones and suffix length isn't affected. */
  825. static ALWAYS_INLINE
  826. int quant_trellis_cavlc( x264_t *h, dctcoef *dct,
  827. const udctcoef *quant_mf, const int *unquant_mf,
  828. const uint8_t *zigzag, int ctx_block_cat, int lambda2, int b_ac,
  829. int b_chroma, int dc, int num_coefs, int idx, int b_8x8 )
  830. {
  831. ALIGNED_ARRAY_16( dctcoef, quant_coefs,[2],[16] );
  832. ALIGNED_ARRAY_16( dctcoef, coefs,[16] );
  833. const uint32_t *coef_weight1 = b_8x8 ? x264_dct8_weight_tab : x264_dct4_weight_tab;
  834. const uint32_t *coef_weight2 = b_8x8 ? x264_dct8_weight2_tab : x264_dct4_weight2_tab;
  835. int delta_distortion[16];
  836. int64_t score = 1ULL<<62;
  837. int i, j;
  838. const int f = 1<<15;
  839. int nC = b_chroma && dc ? 3 + (num_coefs>>2)
  840. : ct_index[x264_mb_predict_non_zero_code( h, !b_chroma && dc ? (idx - LUMA_DC)*16 : idx )];
  841. for( i = 0; i < 16; i += 16/sizeof(*coefs) )
  842. M128( &coefs[i] ) = M128_ZERO;
  843. /* Code for handling 8x8dct -> 4x4dct CAVLC munging. Input/output use a different
  844. * step/start/end than internal processing. */
  845. int step = 1;
  846. int start = b_ac;
  847. int end = num_coefs - 1;
  848. if( b_8x8 )
  849. {
  850. start = idx&3;
  851. end = 60 + start;
  852. step = 4;
  853. }
  854. idx &= 15;
  855. lambda2 <<= LAMBDA_BITS;
  856. /* Find last non-zero coefficient. */
  857. for( i = end; i >= start; i -= step )
  858. if( (unsigned)(dct[zigzag[i]] * (dc?quant_mf[0]>>1:quant_mf[zigzag[i]]) + f-1) >= 2*f )
  859. break;
  860. if( i < start )
  861. goto zeroblock;
  862. /* Prepare for QNS search: calculate distortion caused by each DCT coefficient
  863. * rounding to be searched.
  864. *
  865. * We only search two roundings (nearest and nearest-1) like in CABAC trellis,
  866. * so we just store the difference in distortion between them. */
  867. int last_nnz = b_8x8 ? i >> 2 : i;
  868. int coef_mask = 0;
  869. int round_mask = 0;
  870. for( i = b_ac, j = start; i <= last_nnz; i++, j += step )
  871. {
  872. int coef = dct[zigzag[j]];
  873. int abs_coef = abs(coef);
  874. int sign = coef < 0 ? -1 : 1;
  875. int nearest_quant = ( f + abs_coef * (dc?quant_mf[0]>>1:quant_mf[zigzag[j]]) ) >> 16;
  876. quant_coefs[1][i] = quant_coefs[0][i] = sign * nearest_quant;
  877. coefs[i] = quant_coefs[1][i];
  878. if( nearest_quant )
  879. {
  880. /* We initialize the trellis with a deadzone halfway between nearest rounding
  881. * and always-round-down. This gives much better results than initializing to either
  882. * extreme.
  883. * FIXME: should we initialize to the deadzones used by deadzone quant? */
  884. int deadzone_quant = ( f/2 + abs_coef * (dc?quant_mf[0]>>1:quant_mf[zigzag[j]]) ) >> 16;
  885. int unquant1 = (((dc?unquant_mf[0]<<1:unquant_mf[zigzag[j]]) * (nearest_quant-0) + 128) >> 8);
  886. int unquant0 = (((dc?unquant_mf[0]<<1:unquant_mf[zigzag[j]]) * (nearest_quant-1) + 128) >> 8);
  887. int d1 = abs_coef - unquant1;
  888. int d0 = abs_coef - unquant0;
  889. delta_distortion[i] = (d0*d0 - d1*d1) * (dc?256:coef_weight2[zigzag[j]]);
  890. /* Psy trellis: bias in favor of higher AC coefficients in the reconstructed frame. */
  891. if( h->mb.i_psy_trellis && j && !dc && !b_chroma )
  892. {
  893. int orig_coef = b_8x8 ? h->mb.pic.fenc_dct8[idx>>2][zigzag[j]] : h->mb.pic.fenc_dct4[idx][zigzag[j]];
  894. int predicted_coef = orig_coef - coef;
  895. int psy_weight = coef_weight1[zigzag[j]];
  896. int psy_value0 = h->mb.i_psy_trellis * abs(predicted_coef + unquant0 * sign);
  897. int psy_value1 = h->mb.i_psy_trellis * abs(predicted_coef + unquant1 * sign);
  898. delta_distortion[i] += (psy_value0 - psy_value1) * psy_weight;
  899. }
  900. quant_coefs[0][i] = sign * (nearest_quant-1);
  901. if( deadzone_quant != nearest_quant )
  902. coefs[i] = quant_coefs[0][i];
  903. else
  904. round_mask |= 1 << i;
  905. }
  906. else
  907. delta_distortion[i] = 0;
  908. coef_mask |= (!!coefs[i]) << i;
  909. }
  910. /* Calculate the cost of the starting state. */
  911. h->out.bs.i_bits_encoded = 0;
  912. if( !coef_mask )
  913. bs_write_vlc( &h->out.bs, x264_coeff0_token[nC] );
  914. else
  915. cavlc_block_residual_internal( h, ctx_block_cat, coefs + b_ac, nC );
  916. score = (int64_t)h->out.bs.i_bits_encoded * lambda2;
  917. /* QNS loop: pick the change that improves RD the most, apply it, repeat.
  918. * coef_mask and round_mask are used to simplify tracking of nonzeroness
  919. * and rounding modes chosen. */
  920. while( 1 )
  921. {
  922. int64_t iter_score = score;
  923. int iter_distortion_delta = 0;
  924. int iter_coef = -1;
  925. int iter_mask = coef_mask;
  926. int iter_round = round_mask;
  927. for( i = b_ac; i <= last_nnz; i++ )
  928. {
  929. if( !delta_distortion[i] )
  930. continue;
  931. /* Set up all the variables for this iteration. */
  932. int cur_round = round_mask ^ (1 << i);
  933. int round_change = (cur_round >> i)&1;
  934. int old_coef = coefs[i];
  935. int new_coef = quant_coefs[round_change][i];
  936. int cur_mask = (coef_mask&~(1 << i))|(!!new_coef << i);
  937. int cur_distortion_delta = delta_distortion[i] * (round_change ? -1 : 1);
  938. int64_t cur_score = cur_distortion_delta;
  939. coefs[i] = new_coef;
  940. /* Count up bits. */
  941. h->out.bs.i_bits_encoded = 0;
  942. if( !cur_mask )
  943. bs_write_vlc( &h->out.bs, x264_coeff0_token[nC] );
  944. else
  945. cavlc_block_residual_internal( h, ctx_block_cat, coefs + b_ac, nC );
  946. cur_score += (int64_t)h->out.bs.i_bits_encoded * lambda2;
  947. coefs[i] = old_coef;
  948. if( cur_score < iter_score )
  949. {
  950. iter_score = cur_score;
  951. iter_coef = i;
  952. iter_mask = cur_mask;
  953. iter_round = cur_round;
  954. iter_distortion_delta = cur_distortion_delta;
  955. }
  956. }
  957. if( iter_coef >= 0 )
  958. {
  959. score = iter_score - iter_distortion_delta;
  960. coef_mask = iter_mask;
  961. round_mask = iter_round;
  962. coefs[iter_coef] = quant_coefs[((round_mask >> iter_coef)&1)][iter_coef];
  963. /* Don't try adjusting coefficients we've already adjusted.
  964. * Testing suggests this doesn't hurt results -- and sometimes actually helps. */
  965. delta_distortion[iter_coef] = 0;
  966. }
  967. else
  968. break;
  969. }
  970. if( coef_mask )
  971. {
  972. for( i = b_ac, j = start; i < num_coefs; i++, j += step )
  973. dct[zigzag[j]] = coefs[i];
  974. return 1;
  975. }
  976. zeroblock:
  977. if( !dc )
  978. {
  979. if( b_8x8 )
  980. for( i = start; i <= end; i+=step )
  981. dct[zigzag[i]] = 0;
  982. else
  983. memset( dct, 0, 16*sizeof(dctcoef) );
  984. }
  985. return 0;
  986. }
  987. int x264_quant_luma_dc_trellis( x264_t *h, dctcoef *dct, int i_quant_cat, int i_qp, int ctx_block_cat, int b_intra, int idx )
  988. {
  989. if( h->param.b_cabac )
  990. return quant_trellis_cabac( h, dct,
  991. h->quant4_mf[i_quant_cat][i_qp], h->quant4_bias0[i_quant_cat][i_qp],
  992. h->unquant4_mf[i_quant_cat][i_qp], x264_zigzag_scan4[MB_INTERLACED],
  993. ctx_block_cat, h->mb.i_trellis_lambda2[0][b_intra], 0, 0, 1, 16, idx );
  994. return quant_trellis_cavlc( h, dct,
  995. h->quant4_mf[i_quant_cat][i_qp], h->unquant4_mf[i_quant_cat][i_qp], x264_zigzag_scan4[MB_INTERLACED],
  996. DCT_LUMA_DC, h->mb.i_trellis_lambda2[0][b_intra], 0, 0, 1, 16, idx, 0 );
  997. }
  998. static const uint8_t zigzag_scan2x2[4] = { 0, 1, 2, 3 };
  999. static const uint8_t zigzag_scan2x4[8] = { 0, 2, 1, 4, 6, 3, 5, 7 };
  1000. int x264_quant_chroma_dc_trellis( x264_t *h, dctcoef *dct, int i_qp, int b_intra, int idx )
  1001. {
  1002. const uint8_t *zigzag;
  1003. int num_coefs;
  1004. int quant_cat = CQM_4IC+1 - b_intra;
  1005. if( CHROMA_FORMAT == CHROMA_422 )
  1006. {
  1007. zigzag = zigzag_scan2x4;
  1008. num_coefs = 8;
  1009. }
  1010. else
  1011. {
  1012. zigzag = zigzag_scan2x2;
  1013. num_coefs = 4;
  1014. }
  1015. if( h->param.b_cabac )
  1016. return quant_trellis_cabac( h, dct,
  1017. h->quant4_mf[quant_cat][i_qp], h->quant4_bias0[quant_cat][i_qp],
  1018. h->unquant4_mf[quant_cat][i_qp], zigzag,
  1019. DCT_CHROMA_DC, h->mb.i_trellis_lambda2[1][b_intra], 0, 1, 1, num_coefs, idx );
  1020. return quant_trellis_cavlc( h, dct,
  1021. h->quant4_mf[quant_cat][i_qp], h->unquant4_mf[quant_cat][i_qp], zigzag,
  1022. DCT_CHROMA_DC, h->mb.i_trellis_lambda2[1][b_intra], 0, 1, 1, num_coefs, idx, 0 );
  1023. }
  1024. int x264_quant_4x4_trellis( x264_t *h, dctcoef *dct, int i_quant_cat,
  1025. int i_qp, int ctx_block_cat, int b_intra, int b_chroma, int idx )
  1026. {
  1027. static const uint8_t ctx_ac[14] = {0,1,0,0,1,0,0,1,0,0,0,1,0,0};
  1028. int b_ac = ctx_ac[ctx_block_cat];
  1029. if( h->param.b_cabac )
  1030. return quant_trellis_cabac( h, dct,
  1031. h->quant4_mf[i_quant_cat][i_qp], h->quant4_bias0[i_quant_cat][i_qp],
  1032. h->unquant4_mf[i_quant_cat][i_qp], x264_zigzag_scan4[MB_INTERLACED],
  1033. ctx_block_cat, h->mb.i_trellis_lambda2[b_chroma][b_intra], b_ac, b_chroma, 0, 16, idx );
  1034. return quant_trellis_cavlc( h, dct,
  1035. h->quant4_mf[i_quant_cat][i_qp], h->unquant4_mf[i_quant_cat][i_qp],
  1036. x264_zigzag_scan4[MB_INTERLACED],
  1037. ctx_block_cat, h->mb.i_trellis_lambda2[b_chroma][b_intra], b_ac, b_chroma, 0, 16, idx, 0 );
  1038. }
  1039. int x264_quant_8x8_trellis( x264_t *h, dctcoef *dct, int i_quant_cat,
  1040. int i_qp, int ctx_block_cat, int b_intra, int b_chroma, int idx )
  1041. {
  1042. if( h->param.b_cabac )
  1043. {
  1044. return quant_trellis_cabac( h, dct,
  1045. h->quant8_mf[i_quant_cat][i_qp], h->quant8_bias0[i_quant_cat][i_qp],
  1046. h->unquant8_mf[i_quant_cat][i_qp], x264_zigzag_scan8[MB_INTERLACED],
  1047. ctx_block_cat, h->mb.i_trellis_lambda2[b_chroma][b_intra], 0, b_chroma, 0, 64, idx );
  1048. }
  1049. /* 8x8 CAVLC is split into 4 4x4 blocks */
  1050. int nzaccum = 0;
  1051. for( int i = 0; i < 4; i++ )
  1052. {
  1053. int nz = quant_trellis_cavlc( h, dct,
  1054. h->quant8_mf[i_quant_cat][i_qp], h->unquant8_mf[i_quant_cat][i_qp],
  1055. x264_zigzag_scan8[MB_INTERLACED],
  1056. DCT_LUMA_4x4, h->mb.i_trellis_lambda2[b_chroma][b_intra], 0, b_chroma, 0, 16, idx*4+i, 1 );
  1057. /* Set up nonzero count for future calls */
  1058. h->mb.cache.non_zero_count[x264_scan8[idx*4+i]] = nz;
  1059. nzaccum |= nz;
  1060. }
  1061. STORE_8x8_NNZ( 0, idx, 0 );
  1062. return nzaccum;
  1063. }