ratecontrol.c 120 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105
  1. /*****************************************************************************
  2. * ratecontrol.c: ratecontrol
  3. *****************************************************************************
  4. * Copyright (C) 2005-2018 x264 project
  5. *
  6. * Authors: Loren Merritt <lorenm@u.washington.edu>
  7. * Michael Niedermayer <michaelni@gmx.at>
  8. * Gabriel Bouvigne <gabriel.bouvigne@joost.com>
  9. * Fiona Glaser <fiona@x264.com>
  10. * Måns Rullgård <mru@mru.ath.cx>
  11. *
  12. * This program is free software; you can redistribute it and/or modify
  13. * it under the terms of the GNU General Public License as published by
  14. * the Free Software Foundation; either version 2 of the License, or
  15. * (at your option) any later version.
  16. *
  17. * This program is distributed in the hope that it will be useful,
  18. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  19. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  20. * GNU General Public License for more details.
  21. *
  22. * You should have received a copy of the GNU General Public License
  23. * along with this program; if not, write to the Free Software
  24. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111, USA.
  25. *
  26. * This program is also available under a commercial proprietary license.
  27. * For more information, contact us at licensing@x264.com.
  28. *****************************************************************************/
  29. #undef NDEBUG // always check asserts, the speed effect is far too small to disable them
  30. #include "common/common.h"
  31. #include "ratecontrol.h"
  32. #include "me.h"
  33. typedef struct
  34. {
  35. int pict_type;
  36. int frame_type;
  37. int kept_as_ref;
  38. double qscale;
  39. int mv_bits;
  40. int tex_bits;
  41. int misc_bits;
  42. double expected_bits; /* total expected bits up to the current frame (current one excluded) */
  43. double expected_vbv;
  44. double new_qscale;
  45. float new_qp;
  46. int i_count;
  47. int p_count;
  48. int s_count;
  49. float blurred_complexity;
  50. char direct_mode;
  51. int16_t weight[3][2];
  52. int16_t i_weight_denom[2];
  53. int refcount[16];
  54. int refs;
  55. int64_t i_duration;
  56. int64_t i_cpb_duration;
  57. int out_num;
  58. } ratecontrol_entry_t;
  59. typedef struct
  60. {
  61. float coeff_min;
  62. float coeff;
  63. float count;
  64. float decay;
  65. float offset;
  66. } predictor_t;
  67. struct x264_ratecontrol_t
  68. {
  69. /* constants */
  70. int b_abr;
  71. int b_2pass;
  72. int b_vbv;
  73. int b_vbv_min_rate;
  74. double fps;
  75. double bitrate;
  76. double rate_tolerance;
  77. double qcompress;
  78. int nmb; /* number of macroblocks in a frame */
  79. int qp_constant[3];
  80. /* current frame */
  81. ratecontrol_entry_t *rce;
  82. float qpm; /* qp for current macroblock: precise float for AQ */
  83. float qpa_rc; /* average of macroblocks' qp before aq */
  84. float qpa_rc_prev;
  85. int qpa_aq; /* average of macroblocks' qp after aq */
  86. int qpa_aq_prev;
  87. float qp_novbv; /* QP for the current frame if 1-pass VBV was disabled. */
  88. /* VBV stuff */
  89. double buffer_size;
  90. int64_t buffer_fill_final;
  91. int64_t buffer_fill_final_min;
  92. double buffer_fill; /* planned buffer, if all in-progress frames hit their bit budget */
  93. double buffer_rate; /* # of bits added to buffer_fill after each frame */
  94. double vbv_max_rate; /* # of bits added to buffer_fill per second */
  95. predictor_t *pred; /* predict frame size from satd */
  96. int single_frame_vbv;
  97. float rate_factor_max_increment; /* Don't allow RF above (CRF + this value). */
  98. /* ABR stuff */
  99. int last_satd;
  100. double last_rceq;
  101. double cplxr_sum; /* sum of bits*qscale/rceq */
  102. double expected_bits_sum; /* sum of qscale2bits after rceq, ratefactor, and overflow, only includes finished frames */
  103. int64_t filler_bits_sum; /* sum in bits of finished frames' filler data */
  104. double wanted_bits_window; /* target bitrate * window */
  105. double cbr_decay;
  106. double short_term_cplxsum;
  107. double short_term_cplxcount;
  108. double rate_factor_constant;
  109. double ip_offset;
  110. double pb_offset;
  111. /* 2pass stuff */
  112. FILE *p_stat_file_out;
  113. char *psz_stat_file_tmpname;
  114. FILE *p_mbtree_stat_file_out;
  115. char *psz_mbtree_stat_file_tmpname;
  116. char *psz_mbtree_stat_file_name;
  117. FILE *p_mbtree_stat_file_in;
  118. int num_entries; /* number of ratecontrol_entry_ts */
  119. ratecontrol_entry_t *entry; /* FIXME: copy needed data and free this once init is done */
  120. ratecontrol_entry_t **entry_out;
  121. double last_qscale;
  122. double last_qscale_for[3]; /* last qscale for a specific pict type, used for max_diff & ipb factor stuff */
  123. int last_non_b_pict_type;
  124. double accum_p_qp; /* for determining I-frame quant */
  125. double accum_p_norm;
  126. double last_accum_p_norm;
  127. double lmin[3]; /* min qscale by frame type */
  128. double lmax[3];
  129. double lstep; /* max change (multiply) in qscale per frame */
  130. struct
  131. {
  132. uint16_t *qp_buffer[2]; /* Global buffers for converting MB-tree quantizer data. */
  133. int qpbuf_pos; /* In order to handle pyramid reordering, QP buffer acts as a stack.
  134. * This value is the current position (0 or 1). */
  135. int src_mb_count;
  136. /* For rescaling */
  137. int rescale_enabled;
  138. float *scale_buffer[2]; /* Intermediate buffers */
  139. int filtersize[2]; /* filter size (H/V) */
  140. float *coeffs[2];
  141. int *pos[2];
  142. int srcdim[2]; /* Source dimensions (W/H) */
  143. } mbtree;
  144. /* MBRC stuff */
  145. volatile float frame_size_estimated; /* Access to this variable must be atomic: double is
  146. * not atomic on all arches we care about */
  147. double frame_size_maximum; /* Maximum frame size due to MinCR */
  148. double frame_size_planned;
  149. double slice_size_planned;
  150. predictor_t *row_pred;
  151. predictor_t row_preds[3][2];
  152. predictor_t *pred_b_from_p; /* predict B-frame size from P-frame satd */
  153. int bframes; /* # consecutive B-frames before this P-frame */
  154. int bframe_bits; /* total cost of those frames */
  155. int i_zones;
  156. x264_zone_t *zones;
  157. x264_zone_t *prev_zone;
  158. /* hrd stuff */
  159. int initial_cpb_removal_delay;
  160. int initial_cpb_removal_delay_offset;
  161. double nrt_first_access_unit; /* nominal removal time */
  162. double previous_cpb_final_arrival_time;
  163. uint64_t hrd_multiply_denom;
  164. };
  165. static int parse_zones( x264_t *h );
  166. static int init_pass2(x264_t *);
  167. static float rate_estimate_qscale( x264_t *h );
  168. static int update_vbv( x264_t *h, int bits );
  169. static void update_vbv_plan( x264_t *h, int overhead );
  170. static float predict_size( predictor_t *p, float q, float var );
  171. static void update_predictor( predictor_t *p, float q, float var, float bits );
  172. #define CMP_OPT_FIRST_PASS( opt, param_val )\
  173. {\
  174. if( ( p = strstr( opts, opt "=" ) ) && sscanf( p, opt "=%d" , &i ) && param_val != i )\
  175. {\
  176. x264_log( h, X264_LOG_ERROR, "different " opt " setting than first pass (%d vs %d)\n", param_val, i );\
  177. return -1;\
  178. }\
  179. }
  180. /* Terminology:
  181. * qp = h.264's quantizer
  182. * qscale = linearized quantizer = Lagrange multiplier
  183. */
  184. static inline float qp2qscale( float qp )
  185. {
  186. return 0.85f * powf( 2.0f, ( qp - (12.0f + QP_BD_OFFSET) ) / 6.0f );
  187. }
  188. static inline float qscale2qp( float qscale )
  189. {
  190. return (12.0f + QP_BD_OFFSET) + 6.0f * log2f( qscale/0.85f );
  191. }
  192. /* Texture bitrate is not quite inversely proportional to qscale,
  193. * probably due the the changing number of SKIP blocks.
  194. * MV bits level off at about qp<=12, because the lambda used
  195. * for motion estimation is constant there. */
  196. static inline double qscale2bits( ratecontrol_entry_t *rce, double qscale )
  197. {
  198. if( qscale<0.1 )
  199. qscale = 0.1;
  200. return (rce->tex_bits + .1) * pow( rce->qscale / qscale, 1.1 )
  201. + rce->mv_bits * pow( X264_MAX(rce->qscale, 1) / X264_MAX(qscale, 1), 0.5 )
  202. + rce->misc_bits;
  203. }
  204. static ALWAYS_INLINE uint32_t ac_energy_var( uint64_t sum_ssd, int shift, x264_frame_t *frame, int i, int b_store )
  205. {
  206. uint32_t sum = sum_ssd;
  207. uint32_t ssd = sum_ssd >> 32;
  208. if( b_store )
  209. {
  210. frame->i_pixel_sum[i] += sum;
  211. frame->i_pixel_ssd[i] += ssd;
  212. }
  213. return ssd - ((uint64_t)sum * sum >> shift);
  214. }
  215. static ALWAYS_INLINE uint32_t ac_energy_plane( x264_t *h, int mb_x, int mb_y, x264_frame_t *frame, int i, int b_chroma, int b_field, int b_store )
  216. {
  217. int height = b_chroma ? 16>>CHROMA_V_SHIFT : 16;
  218. int stride = frame->i_stride[i];
  219. int offset = b_field
  220. ? 16 * mb_x + height * (mb_y&~1) * stride + (mb_y&1) * stride
  221. : 16 * mb_x + height * mb_y * stride;
  222. stride <<= b_field;
  223. if( b_chroma )
  224. {
  225. ALIGNED_ARRAY_64( pixel, pix,[FENC_STRIDE*16] );
  226. int chromapix = h->luma2chroma_pixel[PIXEL_16x16];
  227. int shift = 7 - CHROMA_V_SHIFT;
  228. h->mc.load_deinterleave_chroma_fenc( pix, frame->plane[1] + offset, stride, height );
  229. return ac_energy_var( h->pixf.var[chromapix]( pix, FENC_STRIDE ), shift, frame, 1, b_store )
  230. + ac_energy_var( h->pixf.var[chromapix]( pix+FENC_STRIDE/2, FENC_STRIDE ), shift, frame, 2, b_store );
  231. }
  232. else
  233. return ac_energy_var( h->pixf.var[PIXEL_16x16]( frame->plane[i] + offset, stride ), 8, frame, i, b_store );
  234. }
  235. // Find the total AC energy of the block in all planes.
  236. static NOINLINE uint32_t ac_energy_mb( x264_t *h, int mb_x, int mb_y, x264_frame_t *frame )
  237. {
  238. /* This function contains annoying hacks because GCC has a habit of reordering emms
  239. * and putting it after floating point ops. As a result, we put the emms at the end of the
  240. * function and make sure that its always called before the float math. Noinline makes
  241. * sure no reordering goes on. */
  242. uint32_t var;
  243. x264_prefetch_fenc( h, frame, mb_x, mb_y );
  244. if( h->mb.b_adaptive_mbaff )
  245. {
  246. /* We don't know the super-MB mode we're going to pick yet, so
  247. * simply try both and pick the lower of the two. */
  248. uint32_t var_interlaced, var_progressive;
  249. var_interlaced = ac_energy_plane( h, mb_x, mb_y, frame, 0, 0, 1, 1 );
  250. var_progressive = ac_energy_plane( h, mb_x, mb_y, frame, 0, 0, 0, 0 );
  251. if( CHROMA444 )
  252. {
  253. var_interlaced += ac_energy_plane( h, mb_x, mb_y, frame, 1, 0, 1, 1 );
  254. var_progressive += ac_energy_plane( h, mb_x, mb_y, frame, 1, 0, 0, 0 );
  255. var_interlaced += ac_energy_plane( h, mb_x, mb_y, frame, 2, 0, 1, 1 );
  256. var_progressive += ac_energy_plane( h, mb_x, mb_y, frame, 2, 0, 0, 0 );
  257. }
  258. else if( CHROMA_FORMAT )
  259. {
  260. var_interlaced += ac_energy_plane( h, mb_x, mb_y, frame, 1, 1, 1, 1 );
  261. var_progressive += ac_energy_plane( h, mb_x, mb_y, frame, 1, 1, 0, 0 );
  262. }
  263. var = X264_MIN( var_interlaced, var_progressive );
  264. }
  265. else
  266. {
  267. var = ac_energy_plane( h, mb_x, mb_y, frame, 0, 0, PARAM_INTERLACED, 1 );
  268. if( CHROMA444 )
  269. {
  270. var += ac_energy_plane( h, mb_x, mb_y, frame, 1, 0, PARAM_INTERLACED, 1 );
  271. var += ac_energy_plane( h, mb_x, mb_y, frame, 2, 0, PARAM_INTERLACED, 1 );
  272. }
  273. else if( CHROMA_FORMAT )
  274. var += ac_energy_plane( h, mb_x, mb_y, frame, 1, 1, PARAM_INTERLACED, 1 );
  275. }
  276. x264_emms();
  277. return var;
  278. }
  279. void x264_adaptive_quant_frame( x264_t *h, x264_frame_t *frame, float *quant_offsets )
  280. {
  281. /* Initialize frame stats */
  282. for( int i = 0; i < 3; i++ )
  283. {
  284. frame->i_pixel_sum[i] = 0;
  285. frame->i_pixel_ssd[i] = 0;
  286. }
  287. /* Degenerate cases */
  288. if( h->param.rc.i_aq_mode == X264_AQ_NONE || h->param.rc.f_aq_strength == 0 )
  289. {
  290. /* Need to init it anyways for MB tree */
  291. if( h->param.rc.i_aq_mode && h->param.rc.f_aq_strength == 0 )
  292. {
  293. if( quant_offsets )
  294. {
  295. for( int mb_xy = 0; mb_xy < h->mb.i_mb_count; mb_xy++ )
  296. frame->f_qp_offset[mb_xy] = frame->f_qp_offset_aq[mb_xy] = quant_offsets[mb_xy];
  297. if( h->frames.b_have_lowres )
  298. for( int mb_xy = 0; mb_xy < h->mb.i_mb_count; mb_xy++ )
  299. frame->i_inv_qscale_factor[mb_xy] = x264_exp2fix8( frame->f_qp_offset[mb_xy] );
  300. }
  301. else
  302. {
  303. memset( frame->f_qp_offset, 0, h->mb.i_mb_count * sizeof(float) );
  304. memset( frame->f_qp_offset_aq, 0, h->mb.i_mb_count * sizeof(float) );
  305. if( h->frames.b_have_lowres )
  306. for( int mb_xy = 0; mb_xy < h->mb.i_mb_count; mb_xy++ )
  307. frame->i_inv_qscale_factor[mb_xy] = 256;
  308. }
  309. }
  310. /* Need variance data for weighted prediction */
  311. if( h->param.analyse.i_weighted_pred )
  312. {
  313. for( int mb_y = 0; mb_y < h->mb.i_mb_height; mb_y++ )
  314. for( int mb_x = 0; mb_x < h->mb.i_mb_width; mb_x++ )
  315. ac_energy_mb( h, mb_x, mb_y, frame );
  316. }
  317. else
  318. return;
  319. }
  320. /* Actual adaptive quantization */
  321. else
  322. {
  323. /* constants chosen to result in approximately the same overall bitrate as without AQ.
  324. * FIXME: while they're written in 5 significant digits, they're only tuned to 2. */
  325. float strength;
  326. float avg_adj = 0.f;
  327. float bias_strength = 0.f;
  328. if( h->param.rc.i_aq_mode == X264_AQ_AUTOVARIANCE || h->param.rc.i_aq_mode == X264_AQ_AUTOVARIANCE_BIASED )
  329. {
  330. float bit_depth_correction = 1.f / (1 << (2*(BIT_DEPTH-8)));
  331. float avg_adj_pow2 = 0.f;
  332. for( int mb_y = 0; mb_y < h->mb.i_mb_height; mb_y++ )
  333. for( int mb_x = 0; mb_x < h->mb.i_mb_width; mb_x++ )
  334. {
  335. uint32_t energy = ac_energy_mb( h, mb_x, mb_y, frame );
  336. float qp_adj = powf( energy * bit_depth_correction + 1, 0.125f );
  337. frame->f_qp_offset[mb_x + mb_y*h->mb.i_mb_stride] = qp_adj;
  338. avg_adj += qp_adj;
  339. avg_adj_pow2 += qp_adj * qp_adj;
  340. }
  341. avg_adj /= h->mb.i_mb_count;
  342. avg_adj_pow2 /= h->mb.i_mb_count;
  343. strength = h->param.rc.f_aq_strength * avg_adj;
  344. avg_adj = avg_adj - 0.5f * (avg_adj_pow2 - 14.f) / avg_adj;
  345. bias_strength = h->param.rc.f_aq_strength;
  346. }
  347. else
  348. strength = h->param.rc.f_aq_strength * 1.0397f;
  349. for( int mb_y = 0; mb_y < h->mb.i_mb_height; mb_y++ )
  350. for( int mb_x = 0; mb_x < h->mb.i_mb_width; mb_x++ )
  351. {
  352. float qp_adj;
  353. int mb_xy = mb_x + mb_y*h->mb.i_mb_stride;
  354. if( h->param.rc.i_aq_mode == X264_AQ_AUTOVARIANCE_BIASED )
  355. {
  356. qp_adj = frame->f_qp_offset[mb_xy];
  357. qp_adj = strength * (qp_adj - avg_adj) + bias_strength * (1.f - 14.f / (qp_adj * qp_adj));
  358. }
  359. else if( h->param.rc.i_aq_mode == X264_AQ_AUTOVARIANCE )
  360. {
  361. qp_adj = frame->f_qp_offset[mb_xy];
  362. qp_adj = strength * (qp_adj - avg_adj);
  363. }
  364. else
  365. {
  366. uint32_t energy = ac_energy_mb( h, mb_x, mb_y, frame );
  367. qp_adj = strength * (x264_log2( X264_MAX(energy, 1) ) - (14.427f + 2*(BIT_DEPTH-8)));
  368. }
  369. if( quant_offsets )
  370. qp_adj += quant_offsets[mb_xy];
  371. frame->f_qp_offset[mb_xy] =
  372. frame->f_qp_offset_aq[mb_xy] = qp_adj;
  373. if( h->frames.b_have_lowres )
  374. frame->i_inv_qscale_factor[mb_xy] = x264_exp2fix8(qp_adj);
  375. }
  376. }
  377. /* Remove mean from SSD calculation */
  378. for( int i = 0; i < 3; i++ )
  379. {
  380. uint64_t ssd = frame->i_pixel_ssd[i];
  381. uint64_t sum = frame->i_pixel_sum[i];
  382. int width = 16*h->mb.i_mb_width >> (i && CHROMA_H_SHIFT);
  383. int height = 16*h->mb.i_mb_height >> (i && CHROMA_V_SHIFT);
  384. frame->i_pixel_ssd[i] = ssd - (sum * sum + width * height / 2) / (width * height);
  385. }
  386. }
  387. static int macroblock_tree_rescale_init( x264_t *h, x264_ratecontrol_t *rc )
  388. {
  389. /* Use fractional QP array dimensions to compensate for edge padding */
  390. float srcdim[2] = {rc->mbtree.srcdim[0] / 16.f, rc->mbtree.srcdim[1] / 16.f};
  391. float dstdim[2] = { h->param.i_width / 16.f, h->param.i_height / 16.f};
  392. int srcdimi[2] = {ceil(srcdim[0]), ceil(srcdim[1])};
  393. int dstdimi[2] = {ceil(dstdim[0]), ceil(dstdim[1])};
  394. if( h->param.b_interlaced || h->param.b_fake_interlaced )
  395. {
  396. srcdimi[1] = (srcdimi[1]+1)&~1;
  397. dstdimi[1] = (dstdimi[1]+1)&~1;
  398. }
  399. rc->mbtree.src_mb_count = srcdimi[0] * srcdimi[1];
  400. CHECKED_MALLOC( rc->mbtree.qp_buffer[0], rc->mbtree.src_mb_count * sizeof(uint16_t) );
  401. if( h->param.i_bframe_pyramid && h->param.rc.b_stat_read )
  402. CHECKED_MALLOC( rc->mbtree.qp_buffer[1], rc->mbtree.src_mb_count * sizeof(uint16_t) );
  403. rc->mbtree.qpbuf_pos = -1;
  404. /* No rescaling to do */
  405. if( srcdimi[0] == dstdimi[0] && srcdimi[1] == dstdimi[1] )
  406. return 0;
  407. rc->mbtree.rescale_enabled = 1;
  408. /* Allocate intermediate scaling buffers */
  409. CHECKED_MALLOC( rc->mbtree.scale_buffer[0], srcdimi[0] * srcdimi[1] * sizeof(float) );
  410. CHECKED_MALLOC( rc->mbtree.scale_buffer[1], dstdimi[0] * srcdimi[1] * sizeof(float) );
  411. /* Allocate and calculate resize filter parameters and coefficients */
  412. for( int i = 0; i < 2; i++ )
  413. {
  414. if( srcdim[i] > dstdim[i] ) // downscale
  415. rc->mbtree.filtersize[i] = 1 + (2 * srcdimi[i] + dstdimi[i] - 1) / dstdimi[i];
  416. else // upscale
  417. rc->mbtree.filtersize[i] = 3;
  418. CHECKED_MALLOC( rc->mbtree.coeffs[i], rc->mbtree.filtersize[i] * dstdimi[i] * sizeof(float) );
  419. CHECKED_MALLOC( rc->mbtree.pos[i], dstdimi[i] * sizeof(int) );
  420. /* Initialize filter coefficients */
  421. float inc = srcdim[i] / dstdim[i];
  422. float dmul = inc > 1.f ? dstdim[i] / srcdim[i] : 1.f;
  423. float dstinsrc = 0.5f * inc - 0.5f;
  424. int filtersize = rc->mbtree.filtersize[i];
  425. for( int j = 0; j < dstdimi[i]; j++ )
  426. {
  427. int pos = dstinsrc - (filtersize - 2.f) * 0.5f;
  428. float sum = 0.0;
  429. rc->mbtree.pos[i][j] = pos;
  430. for( int k = 0; k < filtersize; k++ )
  431. {
  432. float d = fabs( pos + k - dstinsrc ) * dmul;
  433. float coeff = X264_MAX( 1.f - d, 0 );
  434. rc->mbtree.coeffs[i][j * filtersize + k] = coeff;
  435. sum += coeff;
  436. }
  437. sum = 1.0f / sum;
  438. for( int k = 0; k < filtersize; k++ )
  439. rc->mbtree.coeffs[i][j * filtersize + k] *= sum;
  440. dstinsrc += inc;
  441. }
  442. }
  443. /* Write back actual qp array dimensions */
  444. rc->mbtree.srcdim[0] = srcdimi[0];
  445. rc->mbtree.srcdim[1] = srcdimi[1];
  446. return 0;
  447. fail:
  448. return -1;
  449. }
  450. static void macroblock_tree_rescale_destroy( x264_ratecontrol_t *rc )
  451. {
  452. for( int i = 0; i < 2; i++ )
  453. {
  454. x264_free( rc->mbtree.qp_buffer[i] );
  455. x264_free( rc->mbtree.scale_buffer[i] );
  456. x264_free( rc->mbtree.coeffs[i] );
  457. x264_free( rc->mbtree.pos[i] );
  458. }
  459. }
  460. static ALWAYS_INLINE float tapfilter( float *src, int pos, int max, int stride, float *coeff, int filtersize )
  461. {
  462. float sum = 0.f;
  463. for( int i = 0; i < filtersize; i++, pos++ )
  464. sum += src[x264_clip3( pos, 0, max-1 )*stride] * coeff[i];
  465. return sum;
  466. }
  467. static void macroblock_tree_rescale( x264_t *h, x264_ratecontrol_t *rc, float *dst )
  468. {
  469. float *input, *output;
  470. int filtersize, stride, height;
  471. /* H scale first */
  472. input = rc->mbtree.scale_buffer[0];
  473. output = rc->mbtree.scale_buffer[1];
  474. filtersize = rc->mbtree.filtersize[0];
  475. stride = rc->mbtree.srcdim[0];
  476. height = rc->mbtree.srcdim[1];
  477. for( int y = 0; y < height; y++, input += stride, output += h->mb.i_mb_width )
  478. {
  479. float *coeff = rc->mbtree.coeffs[0];
  480. for( int x = 0; x < h->mb.i_mb_width; x++, coeff+=filtersize )
  481. output[x] = tapfilter( input, rc->mbtree.pos[0][x], stride, 1, coeff, filtersize );
  482. }
  483. /* V scale next */
  484. input = rc->mbtree.scale_buffer[1];
  485. output = dst;
  486. filtersize = rc->mbtree.filtersize[1];
  487. stride = h->mb.i_mb_width;
  488. height = rc->mbtree.srcdim[1];
  489. for( int x = 0; x < h->mb.i_mb_width; x++, input++, output++ )
  490. {
  491. float *coeff = rc->mbtree.coeffs[1];
  492. for( int y = 0; y < h->mb.i_mb_height; y++, coeff+=filtersize )
  493. output[y*stride] = tapfilter( input, rc->mbtree.pos[1][y], height, stride, coeff, filtersize );
  494. }
  495. }
  496. int x264_macroblock_tree_read( x264_t *h, x264_frame_t *frame, float *quant_offsets )
  497. {
  498. x264_ratecontrol_t *rc = h->rc;
  499. uint8_t i_type_actual = rc->entry[frame->i_frame].pict_type;
  500. if( rc->entry[frame->i_frame].kept_as_ref )
  501. {
  502. uint8_t i_type;
  503. if( rc->mbtree.qpbuf_pos < 0 )
  504. {
  505. do
  506. {
  507. rc->mbtree.qpbuf_pos++;
  508. if( !fread( &i_type, 1, 1, rc->p_mbtree_stat_file_in ) )
  509. goto fail;
  510. if( fread( rc->mbtree.qp_buffer[rc->mbtree.qpbuf_pos], sizeof(uint16_t), rc->mbtree.src_mb_count, rc->p_mbtree_stat_file_in ) != rc->mbtree.src_mb_count )
  511. goto fail;
  512. if( i_type != i_type_actual && rc->mbtree.qpbuf_pos == 1 )
  513. {
  514. x264_log( h, X264_LOG_ERROR, "MB-tree frametype %d doesn't match actual frametype %d.\n", i_type, i_type_actual );
  515. return -1;
  516. }
  517. } while( i_type != i_type_actual );
  518. }
  519. float *dst = rc->mbtree.rescale_enabled ? rc->mbtree.scale_buffer[0] : frame->f_qp_offset;
  520. h->mc.mbtree_fix8_unpack( dst, rc->mbtree.qp_buffer[rc->mbtree.qpbuf_pos], rc->mbtree.src_mb_count );
  521. if( rc->mbtree.rescale_enabled )
  522. macroblock_tree_rescale( h, rc, frame->f_qp_offset );
  523. if( h->frames.b_have_lowres )
  524. for( int i = 0; i < h->mb.i_mb_count; i++ )
  525. frame->i_inv_qscale_factor[i] = x264_exp2fix8( frame->f_qp_offset[i] );
  526. rc->mbtree.qpbuf_pos--;
  527. }
  528. else
  529. x264_adaptive_quant_frame( h, frame, quant_offsets );
  530. return 0;
  531. fail:
  532. x264_log( h, X264_LOG_ERROR, "Incomplete MB-tree stats file.\n" );
  533. return -1;
  534. }
  535. int x264_reference_build_list_optimal( x264_t *h )
  536. {
  537. ratecontrol_entry_t *rce = h->rc->rce;
  538. x264_frame_t *frames[16];
  539. x264_weight_t weights[16][3];
  540. int refcount[16];
  541. if( rce->refs != h->i_ref[0] )
  542. return -1;
  543. memcpy( frames, h->fref[0], sizeof(frames) );
  544. memcpy( refcount, rce->refcount, sizeof(refcount) );
  545. memcpy( weights, h->fenc->weight, sizeof(weights) );
  546. memset( &h->fenc->weight[1][0], 0, sizeof(x264_weight_t[15][3]) );
  547. /* For now don't reorder ref 0; it seems to lower quality
  548. in most cases due to skips. */
  549. for( int ref = 1; ref < h->i_ref[0]; ref++ )
  550. {
  551. int max = -1;
  552. int bestref = 1;
  553. for( int i = 1; i < h->i_ref[0]; i++ )
  554. /* Favor lower POC as a tiebreaker. */
  555. COPY2_IF_GT( max, refcount[i], bestref, i );
  556. /* FIXME: If there are duplicates from frames other than ref0 then it is possible
  557. * that the optimal ordering doesnt place every duplicate. */
  558. refcount[bestref] = -1;
  559. h->fref[0][ref] = frames[bestref];
  560. memcpy( h->fenc->weight[ref], weights[bestref], sizeof(weights[bestref]) );
  561. }
  562. return 0;
  563. }
  564. static char *strcat_filename( char *input, char *suffix )
  565. {
  566. char *output = x264_malloc( strlen( input ) + strlen( suffix ) + 1 );
  567. if( !output )
  568. return NULL;
  569. strcpy( output, input );
  570. strcat( output, suffix );
  571. return output;
  572. }
  573. void x264_ratecontrol_init_reconfigurable( x264_t *h, int b_init )
  574. {
  575. x264_ratecontrol_t *rc = h->rc;
  576. if( !b_init && rc->b_2pass )
  577. return;
  578. if( h->param.rc.i_rc_method == X264_RC_CRF )
  579. {
  580. /* Arbitrary rescaling to make CRF somewhat similar to QP.
  581. * Try to compensate for MB-tree's effects as well. */
  582. double base_cplx = h->mb.i_mb_count * (h->param.i_bframe ? 120 : 80);
  583. double mbtree_offset = h->param.rc.b_mb_tree ? (1.0-h->param.rc.f_qcompress)*13.5 : 0;
  584. rc->rate_factor_constant = pow( base_cplx, 1 - rc->qcompress )
  585. / qp2qscale( h->param.rc.f_rf_constant + mbtree_offset + QP_BD_OFFSET );
  586. }
  587. if( h->param.rc.i_vbv_max_bitrate > 0 && h->param.rc.i_vbv_buffer_size > 0 )
  588. {
  589. /* We don't support changing the ABR bitrate right now,
  590. so if the stream starts as CBR, keep it CBR. */
  591. if( rc->b_vbv_min_rate )
  592. h->param.rc.i_vbv_max_bitrate = h->param.rc.i_bitrate;
  593. if( h->param.rc.i_vbv_buffer_size < (int)(h->param.rc.i_vbv_max_bitrate / rc->fps) )
  594. {
  595. h->param.rc.i_vbv_buffer_size = h->param.rc.i_vbv_max_bitrate / rc->fps;
  596. x264_log( h, X264_LOG_WARNING, "VBV buffer size cannot be smaller than one frame, using %d kbit\n",
  597. h->param.rc.i_vbv_buffer_size );
  598. }
  599. int kilobit_size = h->param.i_avcintra_class ? 1024 : 1000;
  600. int vbv_buffer_size = h->param.rc.i_vbv_buffer_size * kilobit_size;
  601. int vbv_max_bitrate = h->param.rc.i_vbv_max_bitrate * kilobit_size;
  602. /* Init HRD */
  603. if( h->param.i_nal_hrd && b_init )
  604. {
  605. h->sps->vui.hrd.i_cpb_cnt = 1;
  606. h->sps->vui.hrd.b_cbr_hrd = h->param.i_nal_hrd == X264_NAL_HRD_CBR;
  607. h->sps->vui.hrd.i_time_offset_length = 0;
  608. #define BR_SHIFT 6
  609. #define CPB_SHIFT 4
  610. // normalize HRD size and rate to the value / scale notation
  611. h->sps->vui.hrd.i_bit_rate_scale = x264_clip3( x264_ctz( vbv_max_bitrate ) - BR_SHIFT, 0, 15 );
  612. h->sps->vui.hrd.i_bit_rate_value = vbv_max_bitrate >> ( h->sps->vui.hrd.i_bit_rate_scale + BR_SHIFT );
  613. h->sps->vui.hrd.i_bit_rate_unscaled = h->sps->vui.hrd.i_bit_rate_value << ( h->sps->vui.hrd.i_bit_rate_scale + BR_SHIFT );
  614. h->sps->vui.hrd.i_cpb_size_scale = x264_clip3( x264_ctz( vbv_buffer_size ) - CPB_SHIFT, 0, 15 );
  615. h->sps->vui.hrd.i_cpb_size_value = vbv_buffer_size >> ( h->sps->vui.hrd.i_cpb_size_scale + CPB_SHIFT );
  616. h->sps->vui.hrd.i_cpb_size_unscaled = h->sps->vui.hrd.i_cpb_size_value << ( h->sps->vui.hrd.i_cpb_size_scale + CPB_SHIFT );
  617. #undef CPB_SHIFT
  618. #undef BR_SHIFT
  619. // arbitrary
  620. #define MAX_DURATION 0.5
  621. int max_cpb_output_delay = X264_MIN( h->param.i_keyint_max * MAX_DURATION * h->sps->vui.i_time_scale / h->sps->vui.i_num_units_in_tick, INT_MAX );
  622. int max_dpb_output_delay = h->sps->vui.i_max_dec_frame_buffering * MAX_DURATION * h->sps->vui.i_time_scale / h->sps->vui.i_num_units_in_tick;
  623. int max_delay = (int)(90000.0 * (double)h->sps->vui.hrd.i_cpb_size_unscaled / h->sps->vui.hrd.i_bit_rate_unscaled + 0.5);
  624. h->sps->vui.hrd.i_initial_cpb_removal_delay_length = 2 + x264_clip3( 32 - x264_clz( max_delay ), 4, 22 );
  625. h->sps->vui.hrd.i_cpb_removal_delay_length = x264_clip3( 32 - x264_clz( max_cpb_output_delay ), 4, 31 );
  626. h->sps->vui.hrd.i_dpb_output_delay_length = x264_clip3( 32 - x264_clz( max_dpb_output_delay ), 4, 31 );
  627. #undef MAX_DURATION
  628. vbv_buffer_size = h->sps->vui.hrd.i_cpb_size_unscaled;
  629. vbv_max_bitrate = h->sps->vui.hrd.i_bit_rate_unscaled;
  630. }
  631. else if( h->param.i_nal_hrd && !b_init )
  632. {
  633. x264_log( h, X264_LOG_WARNING, "VBV parameters cannot be changed when NAL HRD is in use\n" );
  634. return;
  635. }
  636. h->sps->vui.hrd.i_bit_rate_unscaled = vbv_max_bitrate;
  637. h->sps->vui.hrd.i_cpb_size_unscaled = vbv_buffer_size;
  638. if( rc->b_vbv_min_rate )
  639. rc->bitrate = (double)h->param.rc.i_bitrate * kilobit_size;
  640. rc->buffer_rate = vbv_max_bitrate / rc->fps;
  641. rc->vbv_max_rate = vbv_max_bitrate;
  642. rc->buffer_size = vbv_buffer_size;
  643. rc->single_frame_vbv = rc->buffer_rate * 1.1 > rc->buffer_size;
  644. rc->cbr_decay = 1.0 - rc->buffer_rate / rc->buffer_size
  645. * 0.5 * X264_MAX(0, 1.5 - rc->buffer_rate * rc->fps / rc->bitrate);
  646. if( h->param.rc.i_rc_method == X264_RC_CRF && h->param.rc.f_rf_constant_max )
  647. {
  648. rc->rate_factor_max_increment = h->param.rc.f_rf_constant_max - h->param.rc.f_rf_constant;
  649. if( rc->rate_factor_max_increment <= 0 )
  650. {
  651. x264_log( h, X264_LOG_WARNING, "CRF max must be greater than CRF\n" );
  652. rc->rate_factor_max_increment = 0;
  653. }
  654. }
  655. if( b_init )
  656. {
  657. if( h->param.rc.f_vbv_buffer_init > 1. )
  658. h->param.rc.f_vbv_buffer_init = x264_clip3f( h->param.rc.f_vbv_buffer_init / h->param.rc.i_vbv_buffer_size, 0, 1 );
  659. h->param.rc.f_vbv_buffer_init = x264_clip3f( X264_MAX( h->param.rc.f_vbv_buffer_init, rc->buffer_rate / rc->buffer_size ), 0, 1);
  660. rc->buffer_fill_final =
  661. rc->buffer_fill_final_min = rc->buffer_size * h->param.rc.f_vbv_buffer_init * h->sps->vui.i_time_scale;
  662. rc->b_vbv = 1;
  663. rc->b_vbv_min_rate = !rc->b_2pass
  664. && h->param.rc.i_rc_method == X264_RC_ABR
  665. && h->param.rc.i_vbv_max_bitrate <= h->param.rc.i_bitrate;
  666. }
  667. }
  668. }
  669. int x264_ratecontrol_new( x264_t *h )
  670. {
  671. x264_ratecontrol_t *rc;
  672. x264_emms();
  673. CHECKED_MALLOCZERO( h->rc, h->param.i_threads * sizeof(x264_ratecontrol_t) );
  674. rc = h->rc;
  675. rc->b_abr = h->param.rc.i_rc_method != X264_RC_CQP && !h->param.rc.b_stat_read;
  676. rc->b_2pass = h->param.rc.i_rc_method == X264_RC_ABR && h->param.rc.b_stat_read;
  677. /* FIXME: use integers */
  678. if( h->param.i_fps_num > 0 && h->param.i_fps_den > 0 )
  679. rc->fps = (float) h->param.i_fps_num / h->param.i_fps_den;
  680. else
  681. rc->fps = 25.0;
  682. if( h->param.rc.b_mb_tree )
  683. {
  684. h->param.rc.f_pb_factor = 1;
  685. rc->qcompress = 1;
  686. }
  687. else
  688. rc->qcompress = h->param.rc.f_qcompress;
  689. rc->bitrate = h->param.rc.i_bitrate * (h->param.i_avcintra_class ? 1024. : 1000.);
  690. rc->rate_tolerance = h->param.rc.f_rate_tolerance;
  691. rc->nmb = h->mb.i_mb_count;
  692. rc->last_non_b_pict_type = -1;
  693. rc->cbr_decay = 1.0;
  694. if( h->param.rc.i_rc_method != X264_RC_ABR && h->param.rc.b_stat_read )
  695. {
  696. x264_log( h, X264_LOG_ERROR, "CRF/CQP is incompatible with 2pass.\n" );
  697. return -1;
  698. }
  699. x264_ratecontrol_init_reconfigurable( h, 1 );
  700. if( h->param.i_nal_hrd )
  701. {
  702. uint64_t denom = (uint64_t)h->sps->vui.hrd.i_bit_rate_unscaled * h->sps->vui.i_time_scale;
  703. uint64_t num = 90000;
  704. x264_reduce_fraction64( &num, &denom );
  705. rc->hrd_multiply_denom = 90000 / num;
  706. double bits_required = log2( num )
  707. + log2( h->sps->vui.i_time_scale )
  708. + log2( h->sps->vui.hrd.i_cpb_size_unscaled );
  709. if( bits_required >= 63 )
  710. {
  711. x264_log( h, X264_LOG_ERROR, "HRD with very large timescale and bufsize not supported\n" );
  712. return -1;
  713. }
  714. }
  715. if( rc->rate_tolerance < 0.01 )
  716. {
  717. x264_log( h, X264_LOG_WARNING, "bitrate tolerance too small, using .01\n" );
  718. rc->rate_tolerance = 0.01;
  719. }
  720. h->mb.b_variable_qp = rc->b_vbv || h->param.rc.i_aq_mode;
  721. if( rc->b_abr )
  722. {
  723. /* FIXME ABR_INIT_QP is actually used only in CRF */
  724. #define ABR_INIT_QP (( h->param.rc.i_rc_method == X264_RC_CRF ? h->param.rc.f_rf_constant : 24 ) + QP_BD_OFFSET)
  725. rc->accum_p_norm = .01;
  726. rc->accum_p_qp = ABR_INIT_QP * rc->accum_p_norm;
  727. /* estimated ratio that produces a reasonable QP for the first I-frame */
  728. rc->cplxr_sum = .01 * pow( 7.0e5, rc->qcompress ) * pow( h->mb.i_mb_count, 0.5 );
  729. rc->wanted_bits_window = 1.0 * rc->bitrate / rc->fps;
  730. rc->last_non_b_pict_type = SLICE_TYPE_I;
  731. }
  732. rc->ip_offset = 6.0 * log2f( h->param.rc.f_ip_factor );
  733. rc->pb_offset = 6.0 * log2f( h->param.rc.f_pb_factor );
  734. rc->qp_constant[SLICE_TYPE_P] = h->param.rc.i_qp_constant;
  735. rc->qp_constant[SLICE_TYPE_I] = x264_clip3( h->param.rc.i_qp_constant - rc->ip_offset + 0.5, 0, QP_MAX );
  736. rc->qp_constant[SLICE_TYPE_B] = x264_clip3( h->param.rc.i_qp_constant + rc->pb_offset + 0.5, 0, QP_MAX );
  737. h->mb.ip_offset = rc->ip_offset + 0.5;
  738. rc->lstep = pow( 2, h->param.rc.i_qp_step / 6.0 );
  739. rc->last_qscale = qp2qscale( 26 + QP_BD_OFFSET );
  740. int num_preds = h->param.b_sliced_threads * h->param.i_threads + 1;
  741. CHECKED_MALLOC( rc->pred, 5 * sizeof(predictor_t) * num_preds );
  742. CHECKED_MALLOC( rc->pred_b_from_p, sizeof(predictor_t) );
  743. static const float pred_coeff_table[3] = { 1.0, 1.0, 1.5 };
  744. for( int i = 0; i < 3; i++ )
  745. {
  746. rc->last_qscale_for[i] = qp2qscale( ABR_INIT_QP );
  747. rc->lmin[i] = qp2qscale( h->param.rc.i_qp_min );
  748. rc->lmax[i] = qp2qscale( h->param.rc.i_qp_max );
  749. for( int j = 0; j < num_preds; j++ )
  750. {
  751. rc->pred[i+j*5].coeff_min = pred_coeff_table[i] / 2;
  752. rc->pred[i+j*5].coeff = pred_coeff_table[i];
  753. rc->pred[i+j*5].count = 1.0;
  754. rc->pred[i+j*5].decay = 0.5;
  755. rc->pred[i+j*5].offset = 0.0;
  756. }
  757. for( int j = 0; j < 2; j++ )
  758. {
  759. rc->row_preds[i][j].coeff_min = .25 / 4;
  760. rc->row_preds[i][j].coeff = .25;
  761. rc->row_preds[i][j].count = 1.0;
  762. rc->row_preds[i][j].decay = 0.5;
  763. rc->row_preds[i][j].offset = 0.0;
  764. }
  765. }
  766. rc->pred_b_from_p->coeff_min = 0.5 / 2;
  767. rc->pred_b_from_p->coeff = 0.5;
  768. rc->pred_b_from_p->count = 1.0;
  769. rc->pred_b_from_p->decay = 0.5;
  770. rc->pred_b_from_p->offset = 0.0;
  771. if( parse_zones( h ) < 0 )
  772. {
  773. x264_log( h, X264_LOG_ERROR, "failed to parse zones\n" );
  774. return -1;
  775. }
  776. /* Load stat file and init 2pass algo */
  777. if( h->param.rc.b_stat_read )
  778. {
  779. char *p, *stats_in, *stats_buf;
  780. /* read 1st pass stats */
  781. assert( h->param.rc.psz_stat_in );
  782. stats_buf = stats_in = x264_slurp_file( h->param.rc.psz_stat_in );
  783. if( !stats_buf )
  784. {
  785. x264_log( h, X264_LOG_ERROR, "ratecontrol_init: can't open stats file\n" );
  786. return -1;
  787. }
  788. if( h->param.rc.b_mb_tree )
  789. {
  790. char *mbtree_stats_in = strcat_filename( h->param.rc.psz_stat_in, ".mbtree" );
  791. if( !mbtree_stats_in )
  792. return -1;
  793. rc->p_mbtree_stat_file_in = x264_fopen( mbtree_stats_in, "rb" );
  794. x264_free( mbtree_stats_in );
  795. if( !rc->p_mbtree_stat_file_in )
  796. {
  797. x264_log( h, X264_LOG_ERROR, "ratecontrol_init: can't open mbtree stats file\n" );
  798. return -1;
  799. }
  800. }
  801. /* check whether 1st pass options were compatible with current options */
  802. if( strncmp( stats_buf, "#options:", 9 ) )
  803. {
  804. x264_log( h, X264_LOG_ERROR, "options list in stats file not valid\n" );
  805. return -1;
  806. }
  807. float res_factor, res_factor_bits;
  808. {
  809. int i, j;
  810. uint32_t k, l;
  811. char *opts = stats_buf;
  812. stats_in = strchr( stats_buf, '\n' );
  813. if( !stats_in )
  814. return -1;
  815. *stats_in = '\0';
  816. stats_in++;
  817. if( sscanf( opts, "#options: %dx%d", &i, &j ) != 2 )
  818. {
  819. x264_log( h, X264_LOG_ERROR, "resolution specified in stats file not valid\n" );
  820. return -1;
  821. }
  822. else if( h->param.rc.b_mb_tree )
  823. {
  824. rc->mbtree.srcdim[0] = i;
  825. rc->mbtree.srcdim[1] = j;
  826. }
  827. res_factor = (float)h->param.i_width * h->param.i_height / (i*j);
  828. /* Change in bits relative to resolution isn't quite linear on typical sources,
  829. * so we'll at least try to roughly approximate this effect. */
  830. res_factor_bits = powf( res_factor, 0.7 );
  831. if( !( p = strstr( opts, "timebase=" ) ) || sscanf( p, "timebase=%u/%u", &k, &l ) != 2 )
  832. {
  833. x264_log( h, X264_LOG_ERROR, "timebase specified in stats file not valid\n" );
  834. return -1;
  835. }
  836. if( k != h->param.i_timebase_num || l != h->param.i_timebase_den )
  837. {
  838. x264_log( h, X264_LOG_ERROR, "timebase mismatch with 1st pass (%u/%u vs %u/%u)\n",
  839. h->param.i_timebase_num, h->param.i_timebase_den, k, l );
  840. return -1;
  841. }
  842. CMP_OPT_FIRST_PASS( "bitdepth", BIT_DEPTH );
  843. CMP_OPT_FIRST_PASS( "weightp", X264_MAX( 0, h->param.analyse.i_weighted_pred ) );
  844. CMP_OPT_FIRST_PASS( "bframes", h->param.i_bframe );
  845. CMP_OPT_FIRST_PASS( "b_pyramid", h->param.i_bframe_pyramid );
  846. CMP_OPT_FIRST_PASS( "intra_refresh", h->param.b_intra_refresh );
  847. CMP_OPT_FIRST_PASS( "open_gop", h->param.b_open_gop );
  848. CMP_OPT_FIRST_PASS( "bluray_compat", h->param.b_bluray_compat );
  849. CMP_OPT_FIRST_PASS( "mbtree", h->param.rc.b_mb_tree );
  850. if( (p = strstr( opts, "interlaced=" )) )
  851. {
  852. char *current = h->param.b_interlaced ? h->param.b_tff ? "tff" : "bff" : h->param.b_fake_interlaced ? "fake" : "0";
  853. char buf[5];
  854. sscanf( p, "interlaced=%4s", buf );
  855. if( strcmp( current, buf ) )
  856. {
  857. x264_log( h, X264_LOG_ERROR, "different interlaced setting than first pass (%s vs %s)\n", current, buf );
  858. return -1;
  859. }
  860. }
  861. if( (p = strstr( opts, "keyint=" )) )
  862. {
  863. p += 7;
  864. char buf[13] = "infinite ";
  865. if( h->param.i_keyint_max != X264_KEYINT_MAX_INFINITE )
  866. sprintf( buf, "%d ", h->param.i_keyint_max );
  867. if( strncmp( p, buf, strlen(buf) ) )
  868. {
  869. x264_log( h, X264_LOG_ERROR, "different keyint setting than first pass (%.*s vs %.*s)\n",
  870. strlen(buf)-1, buf, strcspn(p, " "), p );
  871. return -1;
  872. }
  873. }
  874. if( strstr( opts, "qp=0" ) && h->param.rc.i_rc_method == X264_RC_ABR )
  875. x264_log( h, X264_LOG_WARNING, "1st pass was lossless, bitrate prediction will be inaccurate\n" );
  876. if( !strstr( opts, "direct=3" ) && h->param.analyse.i_direct_mv_pred == X264_DIRECT_PRED_AUTO )
  877. {
  878. x264_log( h, X264_LOG_WARNING, "direct=auto not used on the first pass\n" );
  879. h->mb.b_direct_auto_write = 1;
  880. }
  881. if( ( p = strstr( opts, "b_adapt=" ) ) && sscanf( p, "b_adapt=%d", &i ) && i >= X264_B_ADAPT_NONE && i <= X264_B_ADAPT_TRELLIS )
  882. h->param.i_bframe_adaptive = i;
  883. else if( h->param.i_bframe )
  884. {
  885. x264_log( h, X264_LOG_ERROR, "b_adapt method specified in stats file not valid\n" );
  886. return -1;
  887. }
  888. if( (h->param.rc.b_mb_tree || h->param.rc.i_vbv_buffer_size) && ( p = strstr( opts, "rc_lookahead=" ) ) && sscanf( p, "rc_lookahead=%d", &i ) )
  889. h->param.rc.i_lookahead = i;
  890. }
  891. /* find number of pics */
  892. p = stats_in;
  893. int num_entries;
  894. for( num_entries = -1; p; num_entries++ )
  895. p = strchr( p + 1, ';' );
  896. if( !num_entries )
  897. {
  898. x264_log( h, X264_LOG_ERROR, "empty stats file\n" );
  899. return -1;
  900. }
  901. rc->num_entries = num_entries;
  902. if( h->param.i_frame_total < rc->num_entries && h->param.i_frame_total > 0 )
  903. {
  904. x264_log( h, X264_LOG_WARNING, "2nd pass has fewer frames than 1st pass (%d vs %d)\n",
  905. h->param.i_frame_total, rc->num_entries );
  906. }
  907. if( h->param.i_frame_total > rc->num_entries )
  908. {
  909. x264_log( h, X264_LOG_ERROR, "2nd pass has more frames than 1st pass (%d vs %d)\n",
  910. h->param.i_frame_total, rc->num_entries );
  911. return -1;
  912. }
  913. CHECKED_MALLOCZERO( rc->entry, rc->num_entries * sizeof(ratecontrol_entry_t) );
  914. CHECKED_MALLOC( rc->entry_out, rc->num_entries * sizeof(ratecontrol_entry_t*) );
  915. /* init all to skipped p frames */
  916. for( int i = 0; i < rc->num_entries; i++ )
  917. {
  918. ratecontrol_entry_t *rce = &rc->entry[i];
  919. rce->pict_type = SLICE_TYPE_P;
  920. rce->qscale = rce->new_qscale = qp2qscale( 20 + QP_BD_OFFSET );
  921. rce->misc_bits = rc->nmb + 10;
  922. rce->new_qp = 0;
  923. rc->entry_out[i] = rce;
  924. }
  925. /* read stats */
  926. p = stats_in;
  927. double total_qp_aq = 0;
  928. for( int i = 0; i < rc->num_entries; i++ )
  929. {
  930. ratecontrol_entry_t *rce;
  931. int frame_number = 0;
  932. int frame_out_number = 0;
  933. char pict_type = 0;
  934. int e;
  935. char *next;
  936. float qp_rc, qp_aq;
  937. int ref;
  938. next= strchr(p, ';');
  939. if( next )
  940. *next++ = 0; //sscanf is unbelievably slow on long strings
  941. e = sscanf( p, " in:%d out:%d ", &frame_number, &frame_out_number );
  942. if( frame_number < 0 || frame_number >= rc->num_entries )
  943. {
  944. x264_log( h, X264_LOG_ERROR, "bad frame number (%d) at stats line %d\n", frame_number, i );
  945. return -1;
  946. }
  947. if( frame_out_number < 0 || frame_out_number >= rc->num_entries )
  948. {
  949. x264_log( h, X264_LOG_ERROR, "bad frame output number (%d) at stats line %d\n", frame_out_number, i );
  950. return -1;
  951. }
  952. rce = &rc->entry[frame_number];
  953. rc->entry_out[frame_out_number] = rce;
  954. rce->direct_mode = 0;
  955. e += sscanf( p, " in:%*d out:%*d type:%c dur:%"SCNd64" cpbdur:%"SCNd64" q:%f aq:%f tex:%d mv:%d misc:%d imb:%d pmb:%d smb:%d d:%c",
  956. &pict_type, &rce->i_duration, &rce->i_cpb_duration, &qp_rc, &qp_aq, &rce->tex_bits,
  957. &rce->mv_bits, &rce->misc_bits, &rce->i_count, &rce->p_count,
  958. &rce->s_count, &rce->direct_mode );
  959. rce->tex_bits *= res_factor_bits;
  960. rce->mv_bits *= res_factor_bits;
  961. rce->misc_bits *= res_factor_bits;
  962. rce->i_count *= res_factor;
  963. rce->p_count *= res_factor;
  964. rce->s_count *= res_factor;
  965. p = strstr( p, "ref:" );
  966. if( !p )
  967. goto parse_error;
  968. p += 4;
  969. for( ref = 0; ref < 16; ref++ )
  970. {
  971. if( sscanf( p, " %d", &rce->refcount[ref] ) != 1 )
  972. break;
  973. p = strchr( p+1, ' ' );
  974. if( !p )
  975. goto parse_error;
  976. }
  977. rce->refs = ref;
  978. /* find weights */
  979. rce->i_weight_denom[0] = rce->i_weight_denom[1] = -1;
  980. char *w = strchr( p, 'w' );
  981. if( w )
  982. {
  983. int count = sscanf( w, "w:%hd,%hd,%hd,%hd,%hd,%hd,%hd,%hd",
  984. &rce->i_weight_denom[0], &rce->weight[0][0], &rce->weight[0][1],
  985. &rce->i_weight_denom[1], &rce->weight[1][0], &rce->weight[1][1],
  986. &rce->weight[2][0], &rce->weight[2][1] );
  987. if( count == 3 )
  988. rce->i_weight_denom[1] = -1;
  989. else if( count != 8 )
  990. rce->i_weight_denom[0] = rce->i_weight_denom[1] = -1;
  991. }
  992. if( pict_type != 'b' )
  993. rce->kept_as_ref = 1;
  994. switch( pict_type )
  995. {
  996. case 'I':
  997. rce->frame_type = X264_TYPE_IDR;
  998. rce->pict_type = SLICE_TYPE_I;
  999. break;
  1000. case 'i':
  1001. rce->frame_type = X264_TYPE_I;
  1002. rce->pict_type = SLICE_TYPE_I;
  1003. break;
  1004. case 'P':
  1005. rce->frame_type = X264_TYPE_P;
  1006. rce->pict_type = SLICE_TYPE_P;
  1007. break;
  1008. case 'B':
  1009. rce->frame_type = X264_TYPE_BREF;
  1010. rce->pict_type = SLICE_TYPE_B;
  1011. break;
  1012. case 'b':
  1013. rce->frame_type = X264_TYPE_B;
  1014. rce->pict_type = SLICE_TYPE_B;
  1015. break;
  1016. default: e = -1; break;
  1017. }
  1018. if( e < 14 )
  1019. {
  1020. parse_error:
  1021. x264_log( h, X264_LOG_ERROR, "statistics are damaged at line %d, parser out=%d\n", i, e );
  1022. return -1;
  1023. }
  1024. rce->qscale = qp2qscale( qp_rc );
  1025. total_qp_aq += qp_aq;
  1026. p = next;
  1027. }
  1028. if( !h->param.b_stitchable )
  1029. h->pps->i_pic_init_qp = SPEC_QP( (int)(total_qp_aq / rc->num_entries + 0.5) );
  1030. x264_free( stats_buf );
  1031. if( h->param.rc.i_rc_method == X264_RC_ABR )
  1032. {
  1033. if( init_pass2( h ) < 0 )
  1034. return -1;
  1035. } /* else we're using constant quant, so no need to run the bitrate allocation */
  1036. }
  1037. /* Open output file */
  1038. /* If input and output files are the same, output to a temp file
  1039. * and move it to the real name only when it's complete */
  1040. if( h->param.rc.b_stat_write )
  1041. {
  1042. char *p;
  1043. rc->psz_stat_file_tmpname = strcat_filename( h->param.rc.psz_stat_out, ".temp" );
  1044. if( !rc->psz_stat_file_tmpname )
  1045. return -1;
  1046. rc->p_stat_file_out = x264_fopen( rc->psz_stat_file_tmpname, "wb" );
  1047. if( rc->p_stat_file_out == NULL )
  1048. {
  1049. x264_log( h, X264_LOG_ERROR, "ratecontrol_init: can't open stats file\n" );
  1050. return -1;
  1051. }
  1052. p = x264_param2string( &h->param, 1 );
  1053. if( p )
  1054. fprintf( rc->p_stat_file_out, "#options: %s\n", p );
  1055. x264_free( p );
  1056. if( h->param.rc.b_mb_tree && !h->param.rc.b_stat_read )
  1057. {
  1058. rc->psz_mbtree_stat_file_tmpname = strcat_filename( h->param.rc.psz_stat_out, ".mbtree.temp" );
  1059. rc->psz_mbtree_stat_file_name = strcat_filename( h->param.rc.psz_stat_out, ".mbtree" );
  1060. if( !rc->psz_mbtree_stat_file_tmpname || !rc->psz_mbtree_stat_file_name )
  1061. return -1;
  1062. rc->p_mbtree_stat_file_out = x264_fopen( rc->psz_mbtree_stat_file_tmpname, "wb" );
  1063. if( rc->p_mbtree_stat_file_out == NULL )
  1064. {
  1065. x264_log( h, X264_LOG_ERROR, "ratecontrol_init: can't open mbtree stats file\n" );
  1066. return -1;
  1067. }
  1068. }
  1069. }
  1070. if( h->param.rc.b_mb_tree && (h->param.rc.b_stat_read || h->param.rc.b_stat_write) )
  1071. {
  1072. if( !h->param.rc.b_stat_read )
  1073. {
  1074. rc->mbtree.srcdim[0] = h->param.i_width;
  1075. rc->mbtree.srcdim[1] = h->param.i_height;
  1076. }
  1077. if( macroblock_tree_rescale_init( h, rc ) < 0 )
  1078. return -1;
  1079. }
  1080. for( int i = 0; i<h->param.i_threads; i++ )
  1081. {
  1082. h->thread[i]->rc = rc+i;
  1083. if( i )
  1084. {
  1085. rc[i] = rc[0];
  1086. h->thread[i]->param = h->param;
  1087. h->thread[i]->mb.b_variable_qp = h->mb.b_variable_qp;
  1088. h->thread[i]->mb.ip_offset = h->mb.ip_offset;
  1089. }
  1090. }
  1091. return 0;
  1092. fail:
  1093. return -1;
  1094. }
  1095. static int parse_zone( x264_t *h, x264_zone_t *z, char *p )
  1096. {
  1097. int len = 0;
  1098. char *tok, UNUSED *saveptr=NULL;
  1099. z->param = NULL;
  1100. z->f_bitrate_factor = 1;
  1101. if( 3 <= sscanf(p, "%d,%d,q=%d%n", &z->i_start, &z->i_end, &z->i_qp, &len) )
  1102. z->b_force_qp = 1;
  1103. else if( 3 <= sscanf(p, "%d,%d,b=%f%n", &z->i_start, &z->i_end, &z->f_bitrate_factor, &len) )
  1104. z->b_force_qp = 0;
  1105. else if( 2 <= sscanf(p, "%d,%d%n", &z->i_start, &z->i_end, &len) )
  1106. z->b_force_qp = 0;
  1107. else
  1108. {
  1109. x264_log( h, X264_LOG_ERROR, "invalid zone: \"%s\"\n", p );
  1110. return -1;
  1111. }
  1112. p += len;
  1113. if( !*p )
  1114. return 0;
  1115. CHECKED_MALLOC( z->param, sizeof(x264_param_t) );
  1116. memcpy( z->param, &h->param, sizeof(x264_param_t) );
  1117. z->param->param_free = x264_free;
  1118. while( (tok = strtok_r( p, ",", &saveptr )) )
  1119. {
  1120. char *val = strchr( tok, '=' );
  1121. if( val )
  1122. {
  1123. *val = '\0';
  1124. val++;
  1125. }
  1126. if( x264_param_parse( z->param, tok, val ) )
  1127. {
  1128. x264_log( h, X264_LOG_ERROR, "invalid zone param: %s = %s\n", tok, val );
  1129. return -1;
  1130. }
  1131. p = NULL;
  1132. }
  1133. return 0;
  1134. fail:
  1135. return -1;
  1136. }
  1137. static int parse_zones( x264_t *h )
  1138. {
  1139. x264_ratecontrol_t *rc = h->rc;
  1140. if( h->param.rc.psz_zones && !h->param.rc.i_zones )
  1141. {
  1142. char *psz_zones, *p;
  1143. CHECKED_MALLOC( psz_zones, strlen( h->param.rc.psz_zones )+1 );
  1144. strcpy( psz_zones, h->param.rc.psz_zones );
  1145. h->param.rc.i_zones = 1;
  1146. for( p = psz_zones; *p; p++ )
  1147. h->param.rc.i_zones += (*p == '/');
  1148. CHECKED_MALLOC( h->param.rc.zones, h->param.rc.i_zones * sizeof(x264_zone_t) );
  1149. p = psz_zones;
  1150. for( int i = 0; i < h->param.rc.i_zones; i++ )
  1151. {
  1152. int i_tok = strcspn( p, "/" );
  1153. p[i_tok] = 0;
  1154. if( parse_zone( h, &h->param.rc.zones[i], p ) )
  1155. {
  1156. x264_free( psz_zones );
  1157. return -1;
  1158. }
  1159. p += i_tok + 1;
  1160. }
  1161. x264_free( psz_zones );
  1162. }
  1163. if( h->param.rc.i_zones > 0 )
  1164. {
  1165. for( int i = 0; i < h->param.rc.i_zones; i++ )
  1166. {
  1167. x264_zone_t z = h->param.rc.zones[i];
  1168. if( z.i_start < 0 || z.i_start > z.i_end )
  1169. {
  1170. x264_log( h, X264_LOG_ERROR, "invalid zone: start=%d end=%d\n",
  1171. z.i_start, z.i_end );
  1172. return -1;
  1173. }
  1174. else if( !z.b_force_qp && z.f_bitrate_factor <= 0 )
  1175. {
  1176. x264_log( h, X264_LOG_ERROR, "invalid zone: bitrate_factor=%f\n",
  1177. z.f_bitrate_factor );
  1178. return -1;
  1179. }
  1180. }
  1181. rc->i_zones = h->param.rc.i_zones + 1;
  1182. CHECKED_MALLOC( rc->zones, rc->i_zones * sizeof(x264_zone_t) );
  1183. memcpy( rc->zones+1, h->param.rc.zones, (rc->i_zones-1) * sizeof(x264_zone_t) );
  1184. // default zone to fall back to if none of the others match
  1185. rc->zones[0].i_start = 0;
  1186. rc->zones[0].i_end = INT_MAX;
  1187. rc->zones[0].b_force_qp = 0;
  1188. rc->zones[0].f_bitrate_factor = 1;
  1189. CHECKED_MALLOC( rc->zones[0].param, sizeof(x264_param_t) );
  1190. memcpy( rc->zones[0].param, &h->param, sizeof(x264_param_t) );
  1191. for( int i = 1; i < rc->i_zones; i++ )
  1192. {
  1193. if( !rc->zones[i].param )
  1194. rc->zones[i].param = rc->zones[0].param;
  1195. }
  1196. }
  1197. return 0;
  1198. fail:
  1199. return -1;
  1200. }
  1201. static x264_zone_t *get_zone( x264_t *h, int frame_num )
  1202. {
  1203. for( int i = h->rc->i_zones - 1; i >= 0; i-- )
  1204. {
  1205. x264_zone_t *z = &h->rc->zones[i];
  1206. if( frame_num >= z->i_start && frame_num <= z->i_end )
  1207. return z;
  1208. }
  1209. return NULL;
  1210. }
  1211. void x264_ratecontrol_summary( x264_t *h )
  1212. {
  1213. x264_ratecontrol_t *rc = h->rc;
  1214. if( rc->b_abr && h->param.rc.i_rc_method == X264_RC_ABR && rc->cbr_decay > .9999 )
  1215. {
  1216. double base_cplx = h->mb.i_mb_count * (h->param.i_bframe ? 120 : 80);
  1217. double mbtree_offset = h->param.rc.b_mb_tree ? (1.0-h->param.rc.f_qcompress)*13.5 : 0;
  1218. x264_log( h, X264_LOG_INFO, "final ratefactor: %.2f\n",
  1219. qscale2qp( pow( base_cplx, 1 - rc->qcompress )
  1220. * rc->cplxr_sum / rc->wanted_bits_window ) - mbtree_offset - QP_BD_OFFSET );
  1221. }
  1222. }
  1223. void x264_ratecontrol_delete( x264_t *h )
  1224. {
  1225. x264_ratecontrol_t *rc = h->rc;
  1226. int b_regular_file;
  1227. if( rc->p_stat_file_out )
  1228. {
  1229. b_regular_file = x264_is_regular_file( rc->p_stat_file_out );
  1230. fclose( rc->p_stat_file_out );
  1231. if( h->i_frame >= rc->num_entries && b_regular_file )
  1232. if( x264_rename( rc->psz_stat_file_tmpname, h->param.rc.psz_stat_out ) != 0 )
  1233. {
  1234. x264_log( h, X264_LOG_ERROR, "failed to rename \"%s\" to \"%s\"\n",
  1235. rc->psz_stat_file_tmpname, h->param.rc.psz_stat_out );
  1236. }
  1237. x264_free( rc->psz_stat_file_tmpname );
  1238. }
  1239. if( rc->p_mbtree_stat_file_out )
  1240. {
  1241. b_regular_file = x264_is_regular_file( rc->p_mbtree_stat_file_out );
  1242. fclose( rc->p_mbtree_stat_file_out );
  1243. if( h->i_frame >= rc->num_entries && b_regular_file )
  1244. if( x264_rename( rc->psz_mbtree_stat_file_tmpname, rc->psz_mbtree_stat_file_name ) != 0 )
  1245. {
  1246. x264_log( h, X264_LOG_ERROR, "failed to rename \"%s\" to \"%s\"\n",
  1247. rc->psz_mbtree_stat_file_tmpname, rc->psz_mbtree_stat_file_name );
  1248. }
  1249. x264_free( rc->psz_mbtree_stat_file_tmpname );
  1250. x264_free( rc->psz_mbtree_stat_file_name );
  1251. }
  1252. if( rc->p_mbtree_stat_file_in )
  1253. fclose( rc->p_mbtree_stat_file_in );
  1254. x264_free( rc->pred );
  1255. x264_free( rc->pred_b_from_p );
  1256. x264_free( rc->entry );
  1257. x264_free( rc->entry_out );
  1258. macroblock_tree_rescale_destroy( rc );
  1259. if( rc->zones )
  1260. {
  1261. x264_free( rc->zones[0].param );
  1262. for( int i = 1; i < rc->i_zones; i++ )
  1263. if( rc->zones[i].param != rc->zones[0].param && rc->zones[i].param->param_free )
  1264. rc->zones[i].param->param_free( rc->zones[i].param );
  1265. x264_free( rc->zones );
  1266. }
  1267. x264_free( rc );
  1268. }
  1269. static void accum_p_qp_update( x264_t *h, float qp )
  1270. {
  1271. x264_ratecontrol_t *rc = h->rc;
  1272. rc->accum_p_qp *= .95;
  1273. rc->accum_p_norm *= .95;
  1274. rc->accum_p_norm += 1;
  1275. if( h->sh.i_type == SLICE_TYPE_I )
  1276. rc->accum_p_qp += qp + rc->ip_offset;
  1277. else
  1278. rc->accum_p_qp += qp;
  1279. }
  1280. void x264_ratecontrol_zone_init( x264_t *h )
  1281. {
  1282. x264_ratecontrol_t *rc = h->rc;
  1283. x264_zone_t *zone = get_zone( h, h->fenc->i_frame );
  1284. if( zone && (!rc->prev_zone || zone->param != rc->prev_zone->param) )
  1285. x264_encoder_reconfig_apply( h, zone->param );
  1286. rc->prev_zone = zone;
  1287. }
  1288. /* Before encoding a frame, choose a QP for it */
  1289. void x264_ratecontrol_start( x264_t *h, int i_force_qp, int overhead )
  1290. {
  1291. x264_ratecontrol_t *rc = h->rc;
  1292. ratecontrol_entry_t *rce = NULL;
  1293. x264_zone_t *zone = get_zone( h, h->fenc->i_frame );
  1294. float q;
  1295. x264_emms();
  1296. if( h->param.rc.b_stat_read )
  1297. {
  1298. int frame = h->fenc->i_frame;
  1299. assert( frame >= 0 && frame < rc->num_entries );
  1300. rce = h->rc->rce = &h->rc->entry[frame];
  1301. if( h->sh.i_type == SLICE_TYPE_B
  1302. && h->param.analyse.i_direct_mv_pred == X264_DIRECT_PRED_AUTO )
  1303. {
  1304. h->sh.b_direct_spatial_mv_pred = ( rce->direct_mode == 's' );
  1305. h->mb.b_direct_auto_read = ( rce->direct_mode == 's' || rce->direct_mode == 't' );
  1306. }
  1307. }
  1308. if( rc->b_vbv )
  1309. {
  1310. memset( h->fdec->i_row_bits, 0, h->mb.i_mb_height * sizeof(int) );
  1311. memset( h->fdec->f_row_qp, 0, h->mb.i_mb_height * sizeof(float) );
  1312. memset( h->fdec->f_row_qscale, 0, h->mb.i_mb_height * sizeof(float) );
  1313. rc->row_pred = rc->row_preds[h->sh.i_type];
  1314. rc->buffer_rate = h->fenc->i_cpb_duration * rc->vbv_max_rate * h->sps->vui.i_num_units_in_tick / h->sps->vui.i_time_scale;
  1315. update_vbv_plan( h, overhead );
  1316. const x264_level_t *l = x264_levels;
  1317. while( l->level_idc != 0 && l->level_idc != h->param.i_level_idc )
  1318. l++;
  1319. int mincr = l->mincr;
  1320. if( h->param.b_bluray_compat )
  1321. mincr = 4;
  1322. /* Profiles above High don't require minCR, so just set the maximum to a large value. */
  1323. if( h->sps->i_profile_idc > PROFILE_HIGH )
  1324. rc->frame_size_maximum = 1e9;
  1325. else
  1326. {
  1327. /* The spec has a bizarre special case for the first frame. */
  1328. if( h->i_frame == 0 )
  1329. {
  1330. //384 * ( Max( PicSizeInMbs, fR * MaxMBPS ) + MaxMBPS * ( tr( 0 ) - tr,n( 0 ) ) ) / MinCR
  1331. double fr = 1. / (h->param.i_level_idc >= 60 ? 300 : 172);
  1332. int pic_size_in_mbs = h->mb.i_mb_width * h->mb.i_mb_height;
  1333. rc->frame_size_maximum = 384 * BIT_DEPTH * X264_MAX( pic_size_in_mbs, fr*l->mbps ) / mincr;
  1334. }
  1335. else
  1336. {
  1337. //384 * MaxMBPS * ( tr( n ) - tr( n - 1 ) ) / MinCR
  1338. rc->frame_size_maximum = 384 * BIT_DEPTH * ((double)h->fenc->i_cpb_duration * h->sps->vui.i_num_units_in_tick / h->sps->vui.i_time_scale) * l->mbps / mincr;
  1339. }
  1340. }
  1341. }
  1342. if( h->sh.i_type != SLICE_TYPE_B )
  1343. rc->bframes = h->fenc->i_bframes;
  1344. if( rc->b_abr )
  1345. {
  1346. q = qscale2qp( rate_estimate_qscale( h ) );
  1347. }
  1348. else if( rc->b_2pass )
  1349. {
  1350. rce->new_qscale = rate_estimate_qscale( h );
  1351. q = qscale2qp( rce->new_qscale );
  1352. }
  1353. else /* CQP */
  1354. {
  1355. if( h->sh.i_type == SLICE_TYPE_B && h->fdec->b_kept_as_ref )
  1356. q = ( rc->qp_constant[ SLICE_TYPE_B ] + rc->qp_constant[ SLICE_TYPE_P ] ) / 2;
  1357. else
  1358. q = rc->qp_constant[ h->sh.i_type ];
  1359. if( zone )
  1360. {
  1361. if( zone->b_force_qp )
  1362. q += zone->i_qp - rc->qp_constant[SLICE_TYPE_P];
  1363. else
  1364. q -= 6*log2f( zone->f_bitrate_factor );
  1365. }
  1366. }
  1367. if( i_force_qp != X264_QP_AUTO )
  1368. q = i_force_qp - 1;
  1369. q = x264_clip3f( q, h->param.rc.i_qp_min, h->param.rc.i_qp_max );
  1370. rc->qpa_rc = rc->qpa_rc_prev =
  1371. rc->qpa_aq = rc->qpa_aq_prev = 0;
  1372. h->fdec->f_qp_avg_rc =
  1373. h->fdec->f_qp_avg_aq =
  1374. rc->qpm = q;
  1375. if( rce )
  1376. rce->new_qp = q;
  1377. accum_p_qp_update( h, rc->qpm );
  1378. if( h->sh.i_type != SLICE_TYPE_B )
  1379. rc->last_non_b_pict_type = h->sh.i_type;
  1380. }
  1381. static float predict_row_size( x264_t *h, int y, float qscale )
  1382. {
  1383. /* average between two predictors:
  1384. * absolute SATD, and scaled bit cost of the colocated row in the previous frame */
  1385. x264_ratecontrol_t *rc = h->rc;
  1386. float pred_s = predict_size( &rc->row_pred[0], qscale, h->fdec->i_row_satd[y] );
  1387. if( h->sh.i_type == SLICE_TYPE_I || qscale >= h->fref[0][0]->f_row_qscale[y] )
  1388. {
  1389. if( h->sh.i_type == SLICE_TYPE_P
  1390. && h->fref[0][0]->i_type == h->fdec->i_type
  1391. && h->fref[0][0]->f_row_qscale[y] > 0
  1392. && h->fref[0][0]->i_row_satd[y] > 0
  1393. && (abs(h->fref[0][0]->i_row_satd[y] - h->fdec->i_row_satd[y]) < h->fdec->i_row_satd[y]/2))
  1394. {
  1395. float pred_t = h->fref[0][0]->i_row_bits[y] * h->fdec->i_row_satd[y] / h->fref[0][0]->i_row_satd[y]
  1396. * h->fref[0][0]->f_row_qscale[y] / qscale;
  1397. return (pred_s + pred_t) * 0.5f;
  1398. }
  1399. return pred_s;
  1400. }
  1401. /* Our QP is lower than the reference! */
  1402. else
  1403. {
  1404. float pred_intra = predict_size( &rc->row_pred[1], qscale, h->fdec->i_row_satds[0][0][y] );
  1405. /* Sum: better to overestimate than underestimate by using only one of the two predictors. */
  1406. return pred_intra + pred_s;
  1407. }
  1408. }
  1409. static int row_bits_so_far( x264_t *h, int y )
  1410. {
  1411. int bits = 0;
  1412. for( int i = h->i_threadslice_start; i <= y; i++ )
  1413. bits += h->fdec->i_row_bits[i];
  1414. return bits;
  1415. }
  1416. static float predict_row_size_to_end( x264_t *h, int y, float qp )
  1417. {
  1418. float qscale = qp2qscale( qp );
  1419. float bits = 0;
  1420. for( int i = y+1; i < h->i_threadslice_end; i++ )
  1421. bits += predict_row_size( h, i, qscale );
  1422. return bits;
  1423. }
  1424. /* TODO:
  1425. * eliminate all use of qp in row ratecontrol: make it entirely qscale-based.
  1426. * make this function stop being needlessly O(N^2)
  1427. * update more often than once per row? */
  1428. int x264_ratecontrol_mb( x264_t *h, int bits )
  1429. {
  1430. x264_ratecontrol_t *rc = h->rc;
  1431. const int y = h->mb.i_mb_y;
  1432. h->fdec->i_row_bits[y] += bits;
  1433. rc->qpa_aq += h->mb.i_qp;
  1434. if( h->mb.i_mb_x != h->mb.i_mb_width - 1 )
  1435. return 0;
  1436. x264_emms();
  1437. rc->qpa_rc += rc->qpm * h->mb.i_mb_width;
  1438. if( !rc->b_vbv )
  1439. return 0;
  1440. float qscale = qp2qscale( rc->qpm );
  1441. h->fdec->f_row_qp[y] = rc->qpm;
  1442. h->fdec->f_row_qscale[y] = qscale;
  1443. update_predictor( &rc->row_pred[0], qscale, h->fdec->i_row_satd[y], h->fdec->i_row_bits[y] );
  1444. if( h->sh.i_type != SLICE_TYPE_I && rc->qpm < h->fref[0][0]->f_row_qp[y] )
  1445. update_predictor( &rc->row_pred[1], qscale, h->fdec->i_row_satds[0][0][y], h->fdec->i_row_bits[y] );
  1446. /* update ratecontrol per-mbpair in MBAFF */
  1447. if( SLICE_MBAFF && !(y&1) )
  1448. return 0;
  1449. /* FIXME: We don't currently support the case where there's a slice
  1450. * boundary in between. */
  1451. int can_reencode_row = h->sh.i_first_mb <= ((h->mb.i_mb_y - SLICE_MBAFF) * h->mb.i_mb_stride);
  1452. /* tweak quality based on difference from predicted size */
  1453. float prev_row_qp = h->fdec->f_row_qp[y];
  1454. float qp_absolute_max = h->param.rc.i_qp_max;
  1455. if( rc->rate_factor_max_increment )
  1456. qp_absolute_max = X264_MIN( qp_absolute_max, rc->qp_novbv + rc->rate_factor_max_increment );
  1457. float qp_max = X264_MIN( prev_row_qp + h->param.rc.i_qp_step, qp_absolute_max );
  1458. float qp_min = X264_MAX( prev_row_qp - h->param.rc.i_qp_step, h->param.rc.i_qp_min );
  1459. float step_size = 0.5f;
  1460. float slice_size_planned = h->param.b_sliced_threads ? rc->slice_size_planned : rc->frame_size_planned;
  1461. float bits_so_far = row_bits_so_far( h, y );
  1462. float max_frame_error = x264_clip3f( 1.0 / h->mb.i_mb_height, 0.05, 0.25 );
  1463. float max_frame_size = rc->frame_size_maximum - rc->frame_size_maximum * max_frame_error;
  1464. max_frame_size = X264_MIN( max_frame_size, rc->buffer_fill - rc->buffer_rate * max_frame_error );
  1465. float size_of_other_slices = 0;
  1466. if( h->param.b_sliced_threads )
  1467. {
  1468. float size_of_other_slices_planned = 0;
  1469. for( int i = 0; i < h->param.i_threads; i++ )
  1470. if( h != h->thread[i] )
  1471. {
  1472. size_of_other_slices += h->thread[i]->rc->frame_size_estimated;
  1473. size_of_other_slices_planned += h->thread[i]->rc->slice_size_planned;
  1474. }
  1475. float weight = rc->slice_size_planned / rc->frame_size_planned;
  1476. size_of_other_slices = (size_of_other_slices - size_of_other_slices_planned) * weight + size_of_other_slices_planned;
  1477. }
  1478. if( y < h->i_threadslice_end-1 )
  1479. {
  1480. /* B-frames shouldn't use lower QP than their reference frames. */
  1481. if( h->sh.i_type == SLICE_TYPE_B )
  1482. {
  1483. qp_min = X264_MAX( qp_min, X264_MAX( h->fref[0][0]->f_row_qp[y+1], h->fref[1][0]->f_row_qp[y+1] ) );
  1484. rc->qpm = X264_MAX( rc->qpm, qp_min );
  1485. }
  1486. float buffer_left_planned = rc->buffer_fill - rc->frame_size_planned;
  1487. buffer_left_planned = X264_MAX( buffer_left_planned, 0.f );
  1488. /* More threads means we have to be more cautious in letting ratecontrol use up extra bits. */
  1489. float rc_tol = buffer_left_planned / h->param.i_threads * rc->rate_tolerance;
  1490. float b1 = bits_so_far + predict_row_size_to_end( h, y, rc->qpm ) + size_of_other_slices;
  1491. float trust_coeff = x264_clip3f( bits_so_far / slice_size_planned, 0.0, 1.0 );
  1492. /* Don't increase the row QPs until a sufficent amount of the bits of the frame have been processed, in case a flat */
  1493. /* area at the top of the frame was measured inaccurately. */
  1494. if( trust_coeff < 0.05f )
  1495. qp_max = qp_absolute_max = prev_row_qp;
  1496. if( h->sh.i_type != SLICE_TYPE_I )
  1497. rc_tol *= 0.5f;
  1498. if( !rc->b_vbv_min_rate )
  1499. qp_min = X264_MAX( qp_min, rc->qp_novbv );
  1500. while( rc->qpm < qp_max
  1501. && ((b1 > rc->frame_size_planned + rc_tol) ||
  1502. (b1 > rc->frame_size_planned && rc->qpm < rc->qp_novbv) ||
  1503. (b1 > rc->buffer_fill - buffer_left_planned * 0.5f)) )
  1504. {
  1505. rc->qpm += step_size;
  1506. b1 = bits_so_far + predict_row_size_to_end( h, y, rc->qpm ) + size_of_other_slices;
  1507. }
  1508. float b_max = b1 + ((rc->buffer_fill - rc->buffer_size + rc->buffer_rate) * 0.90f - b1) * trust_coeff;
  1509. rc->qpm -= step_size;
  1510. float b2 = bits_so_far + predict_row_size_to_end( h, y, rc->qpm ) + size_of_other_slices;
  1511. while( rc->qpm > qp_min && rc->qpm < prev_row_qp
  1512. && (rc->qpm > h->fdec->f_row_qp[0] || rc->single_frame_vbv)
  1513. && (b2 < max_frame_size)
  1514. && ((b2 < rc->frame_size_planned * 0.8f) || (b2 < b_max)) )
  1515. {
  1516. b1 = b2;
  1517. rc->qpm -= step_size;
  1518. b2 = bits_so_far + predict_row_size_to_end( h, y, rc->qpm ) + size_of_other_slices;
  1519. }
  1520. rc->qpm += step_size;
  1521. /* avoid VBV underflow or MinCR violation */
  1522. while( rc->qpm < qp_absolute_max && (b1 > max_frame_size) )
  1523. {
  1524. rc->qpm += step_size;
  1525. b1 = bits_so_far + predict_row_size_to_end( h, y, rc->qpm ) + size_of_other_slices;
  1526. }
  1527. h->rc->frame_size_estimated = b1 - size_of_other_slices;
  1528. /* If the current row was large enough to cause a large QP jump, try re-encoding it. */
  1529. if( rc->qpm > qp_max && prev_row_qp < qp_max && can_reencode_row )
  1530. {
  1531. /* Bump QP to halfway in between... close enough. */
  1532. rc->qpm = x264_clip3f( (prev_row_qp + rc->qpm)*0.5f, prev_row_qp + 1.0f, qp_max );
  1533. rc->qpa_rc = rc->qpa_rc_prev;
  1534. rc->qpa_aq = rc->qpa_aq_prev;
  1535. h->fdec->i_row_bits[y] = 0;
  1536. h->fdec->i_row_bits[y-SLICE_MBAFF] = 0;
  1537. return -1;
  1538. }
  1539. }
  1540. else
  1541. {
  1542. h->rc->frame_size_estimated = bits_so_far;
  1543. /* Last-ditch attempt: if the last row of the frame underflowed the VBV,
  1544. * try again. */
  1545. if( rc->qpm < qp_max && can_reencode_row
  1546. && (bits_so_far + size_of_other_slices > X264_MIN( rc->frame_size_maximum, rc->buffer_fill )) )
  1547. {
  1548. rc->qpm = qp_max;
  1549. rc->qpa_rc = rc->qpa_rc_prev;
  1550. rc->qpa_aq = rc->qpa_aq_prev;
  1551. h->fdec->i_row_bits[y] = 0;
  1552. h->fdec->i_row_bits[y-SLICE_MBAFF] = 0;
  1553. return -1;
  1554. }
  1555. }
  1556. rc->qpa_rc_prev = rc->qpa_rc;
  1557. rc->qpa_aq_prev = rc->qpa_aq;
  1558. return 0;
  1559. }
  1560. int x264_ratecontrol_qp( x264_t *h )
  1561. {
  1562. x264_emms();
  1563. return x264_clip3( h->rc->qpm + 0.5f, h->param.rc.i_qp_min, h->param.rc.i_qp_max );
  1564. }
  1565. int x264_ratecontrol_mb_qp( x264_t *h )
  1566. {
  1567. x264_emms();
  1568. float qp = h->rc->qpm;
  1569. if( h->param.rc.i_aq_mode )
  1570. {
  1571. /* MB-tree currently doesn't adjust quantizers in unreferenced frames. */
  1572. float qp_offset = h->fdec->b_kept_as_ref ? h->fenc->f_qp_offset[h->mb.i_mb_xy] : h->fenc->f_qp_offset_aq[h->mb.i_mb_xy];
  1573. /* Scale AQ's effect towards zero in emergency mode. */
  1574. if( qp > QP_MAX_SPEC )
  1575. qp_offset *= (QP_MAX - qp) / (QP_MAX - QP_MAX_SPEC);
  1576. qp += qp_offset;
  1577. }
  1578. return x264_clip3( qp + 0.5f, h->param.rc.i_qp_min, h->param.rc.i_qp_max );
  1579. }
  1580. /* In 2pass, force the same frame types as in the 1st pass */
  1581. int x264_ratecontrol_slice_type( x264_t *h, int frame_num )
  1582. {
  1583. x264_ratecontrol_t *rc = h->rc;
  1584. if( h->param.rc.b_stat_read )
  1585. {
  1586. if( frame_num >= rc->num_entries )
  1587. {
  1588. /* We could try to initialize everything required for ABR and
  1589. * adaptive B-frames, but that would be complicated.
  1590. * So just calculate the average QP used so far. */
  1591. h->param.rc.i_qp_constant = (h->stat.i_frame_count[SLICE_TYPE_P] == 0) ? 24 + QP_BD_OFFSET
  1592. : 1 + h->stat.f_frame_qp[SLICE_TYPE_P] / h->stat.i_frame_count[SLICE_TYPE_P];
  1593. rc->qp_constant[SLICE_TYPE_P] = x264_clip3( h->param.rc.i_qp_constant, 0, QP_MAX );
  1594. rc->qp_constant[SLICE_TYPE_I] = x264_clip3( (int)( qscale2qp( qp2qscale( h->param.rc.i_qp_constant ) / fabs( h->param.rc.f_ip_factor )) + 0.5 ), 0, QP_MAX );
  1595. rc->qp_constant[SLICE_TYPE_B] = x264_clip3( (int)( qscale2qp( qp2qscale( h->param.rc.i_qp_constant ) * fabs( h->param.rc.f_pb_factor )) + 0.5 ), 0, QP_MAX );
  1596. x264_log( h, X264_LOG_ERROR, "2nd pass has more frames than 1st pass (%d)\n", rc->num_entries );
  1597. x264_log( h, X264_LOG_ERROR, "continuing anyway, at constant QP=%d\n", h->param.rc.i_qp_constant );
  1598. if( h->param.i_bframe_adaptive )
  1599. x264_log( h, X264_LOG_ERROR, "disabling adaptive B-frames\n" );
  1600. for( int i = 0; i < h->param.i_threads; i++ )
  1601. {
  1602. h->thread[i]->rc->b_abr = 0;
  1603. h->thread[i]->rc->b_2pass = 0;
  1604. h->thread[i]->param.rc.i_rc_method = X264_RC_CQP;
  1605. h->thread[i]->param.rc.b_stat_read = 0;
  1606. h->thread[i]->param.i_bframe_adaptive = 0;
  1607. h->thread[i]->param.i_scenecut_threshold = 0;
  1608. h->thread[i]->param.rc.b_mb_tree = 0;
  1609. if( h->thread[i]->param.i_bframe > 1 )
  1610. h->thread[i]->param.i_bframe = 1;
  1611. }
  1612. return X264_TYPE_AUTO;
  1613. }
  1614. return rc->entry[frame_num].frame_type;
  1615. }
  1616. else
  1617. return X264_TYPE_AUTO;
  1618. }
  1619. void x264_ratecontrol_set_weights( x264_t *h, x264_frame_t *frm )
  1620. {
  1621. ratecontrol_entry_t *rce = &h->rc->entry[frm->i_frame];
  1622. if( h->param.analyse.i_weighted_pred <= 0 )
  1623. return;
  1624. if( rce->i_weight_denom[0] >= 0 )
  1625. SET_WEIGHT( frm->weight[0][0], 1, rce->weight[0][0], rce->i_weight_denom[0], rce->weight[0][1] );
  1626. if( rce->i_weight_denom[1] >= 0 )
  1627. {
  1628. SET_WEIGHT( frm->weight[0][1], 1, rce->weight[1][0], rce->i_weight_denom[1], rce->weight[1][1] );
  1629. SET_WEIGHT( frm->weight[0][2], 1, rce->weight[2][0], rce->i_weight_denom[1], rce->weight[2][1] );
  1630. }
  1631. }
  1632. /* After encoding one frame, save stats and update ratecontrol state */
  1633. int x264_ratecontrol_end( x264_t *h, int bits, int *filler )
  1634. {
  1635. x264_ratecontrol_t *rc = h->rc;
  1636. const int *mbs = h->stat.frame.i_mb_count;
  1637. x264_emms();
  1638. h->stat.frame.i_mb_count_skip = mbs[P_SKIP] + mbs[B_SKIP];
  1639. h->stat.frame.i_mb_count_i = mbs[I_16x16] + mbs[I_8x8] + mbs[I_4x4];
  1640. h->stat.frame.i_mb_count_p = mbs[P_L0] + mbs[P_8x8];
  1641. for( int i = B_DIRECT; i < B_8x8; i++ )
  1642. h->stat.frame.i_mb_count_p += mbs[i];
  1643. h->fdec->f_qp_avg_rc = rc->qpa_rc /= h->mb.i_mb_count;
  1644. h->fdec->f_qp_avg_aq = (float)rc->qpa_aq / h->mb.i_mb_count;
  1645. h->fdec->f_crf_avg = h->param.rc.f_rf_constant + h->fdec->f_qp_avg_rc - rc->qp_novbv;
  1646. if( h->param.rc.b_stat_write )
  1647. {
  1648. char c_type = h->sh.i_type==SLICE_TYPE_I ? (h->fenc->i_poc==0 ? 'I' : 'i')
  1649. : h->sh.i_type==SLICE_TYPE_P ? 'P'
  1650. : h->fenc->b_kept_as_ref ? 'B' : 'b';
  1651. int dir_frame = h->stat.frame.i_direct_score[1] - h->stat.frame.i_direct_score[0];
  1652. int dir_avg = h->stat.i_direct_score[1] - h->stat.i_direct_score[0];
  1653. char c_direct = h->mb.b_direct_auto_write ?
  1654. ( dir_frame>0 ? 's' : dir_frame<0 ? 't' :
  1655. dir_avg>0 ? 's' : dir_avg<0 ? 't' : '-' )
  1656. : '-';
  1657. if( fprintf( rc->p_stat_file_out,
  1658. "in:%d out:%d type:%c dur:%"PRId64" cpbdur:%"PRId64" q:%.2f aq:%.2f tex:%d mv:%d misc:%d imb:%d pmb:%d smb:%d d:%c ref:",
  1659. h->fenc->i_frame, h->i_frame,
  1660. c_type, h->fenc->i_duration,
  1661. h->fenc->i_cpb_duration,
  1662. rc->qpa_rc, h->fdec->f_qp_avg_aq,
  1663. h->stat.frame.i_tex_bits,
  1664. h->stat.frame.i_mv_bits,
  1665. h->stat.frame.i_misc_bits,
  1666. h->stat.frame.i_mb_count_i,
  1667. h->stat.frame.i_mb_count_p,
  1668. h->stat.frame.i_mb_count_skip,
  1669. c_direct) < 0 )
  1670. goto fail;
  1671. /* Only write information for reference reordering once. */
  1672. int use_old_stats = h->param.rc.b_stat_read && rc->rce->refs > 1;
  1673. for( int i = 0; i < (use_old_stats ? rc->rce->refs : h->i_ref[0]); i++ )
  1674. {
  1675. int refcount = use_old_stats ? rc->rce->refcount[i]
  1676. : PARAM_INTERLACED ? h->stat.frame.i_mb_count_ref[0][i*2]
  1677. + h->stat.frame.i_mb_count_ref[0][i*2+1]
  1678. : h->stat.frame.i_mb_count_ref[0][i];
  1679. if( fprintf( rc->p_stat_file_out, "%d ", refcount ) < 0 )
  1680. goto fail;
  1681. }
  1682. if( h->param.analyse.i_weighted_pred >= X264_WEIGHTP_SIMPLE && h->sh.weight[0][0].weightfn )
  1683. {
  1684. if( fprintf( rc->p_stat_file_out, "w:%d,%d,%d",
  1685. h->sh.weight[0][0].i_denom, h->sh.weight[0][0].i_scale, h->sh.weight[0][0].i_offset ) < 0 )
  1686. goto fail;
  1687. if( h->sh.weight[0][1].weightfn || h->sh.weight[0][2].weightfn )
  1688. {
  1689. if( fprintf( rc->p_stat_file_out, ",%d,%d,%d,%d,%d ",
  1690. h->sh.weight[0][1].i_denom, h->sh.weight[0][1].i_scale, h->sh.weight[0][1].i_offset,
  1691. h->sh.weight[0][2].i_scale, h->sh.weight[0][2].i_offset ) < 0 )
  1692. goto fail;
  1693. }
  1694. else if( fprintf( rc->p_stat_file_out, " " ) < 0 )
  1695. goto fail;
  1696. }
  1697. if( fprintf( rc->p_stat_file_out, ";\n") < 0 )
  1698. goto fail;
  1699. /* Don't re-write the data in multi-pass mode. */
  1700. if( h->param.rc.b_mb_tree && h->fenc->b_kept_as_ref && !h->param.rc.b_stat_read )
  1701. {
  1702. uint8_t i_type = h->sh.i_type;
  1703. h->mc.mbtree_fix8_pack( rc->mbtree.qp_buffer[0], h->fenc->f_qp_offset, h->mb.i_mb_count );
  1704. if( fwrite( &i_type, 1, 1, rc->p_mbtree_stat_file_out ) < 1 )
  1705. goto fail;
  1706. if( fwrite( rc->mbtree.qp_buffer[0], sizeof(uint16_t), h->mb.i_mb_count, rc->p_mbtree_stat_file_out ) < h->mb.i_mb_count )
  1707. goto fail;
  1708. }
  1709. }
  1710. if( rc->b_abr )
  1711. {
  1712. if( h->sh.i_type != SLICE_TYPE_B )
  1713. rc->cplxr_sum += bits * qp2qscale( rc->qpa_rc ) / rc->last_rceq;
  1714. else
  1715. {
  1716. /* Depends on the fact that B-frame's QP is an offset from the following P-frame's.
  1717. * Not perfectly accurate with B-refs, but good enough. */
  1718. rc->cplxr_sum += bits * qp2qscale( rc->qpa_rc ) / (rc->last_rceq * fabs( h->param.rc.f_pb_factor ));
  1719. }
  1720. rc->cplxr_sum *= rc->cbr_decay;
  1721. rc->wanted_bits_window += h->fenc->f_duration * rc->bitrate;
  1722. rc->wanted_bits_window *= rc->cbr_decay;
  1723. }
  1724. if( rc->b_2pass )
  1725. rc->expected_bits_sum += qscale2bits( rc->rce, qp2qscale( rc->rce->new_qp ) );
  1726. if( h->mb.b_variable_qp )
  1727. {
  1728. if( h->sh.i_type == SLICE_TYPE_B )
  1729. {
  1730. rc->bframe_bits += bits;
  1731. if( h->fenc->b_last_minigop_bframe )
  1732. {
  1733. update_predictor( rc->pred_b_from_p, qp2qscale( rc->qpa_rc ),
  1734. h->fref[1][h->i_ref[1]-1]->i_satd, rc->bframe_bits / rc->bframes );
  1735. rc->bframe_bits = 0;
  1736. }
  1737. }
  1738. }
  1739. *filler = update_vbv( h, bits );
  1740. rc->filler_bits_sum += *filler * 8;
  1741. if( h->sps->vui.b_nal_hrd_parameters_present )
  1742. {
  1743. if( h->fenc->i_frame == 0 )
  1744. {
  1745. // access unit initialises the HRD
  1746. h->fenc->hrd_timing.cpb_initial_arrival_time = 0;
  1747. rc->initial_cpb_removal_delay = h->initial_cpb_removal_delay;
  1748. rc->initial_cpb_removal_delay_offset = h->initial_cpb_removal_delay_offset;
  1749. h->fenc->hrd_timing.cpb_removal_time = rc->nrt_first_access_unit = (double)rc->initial_cpb_removal_delay / 90000;
  1750. }
  1751. else
  1752. {
  1753. h->fenc->hrd_timing.cpb_removal_time = rc->nrt_first_access_unit + (double)(h->fenc->i_cpb_delay - h->i_cpb_delay_pir_offset) *
  1754. h->sps->vui.i_num_units_in_tick / h->sps->vui.i_time_scale;
  1755. if( h->fenc->b_keyframe )
  1756. {
  1757. rc->nrt_first_access_unit = h->fenc->hrd_timing.cpb_removal_time;
  1758. rc->initial_cpb_removal_delay = h->initial_cpb_removal_delay;
  1759. rc->initial_cpb_removal_delay_offset = h->initial_cpb_removal_delay_offset;
  1760. }
  1761. double cpb_earliest_arrival_time = h->fenc->hrd_timing.cpb_removal_time - (double)rc->initial_cpb_removal_delay / 90000;
  1762. if( !h->fenc->b_keyframe )
  1763. cpb_earliest_arrival_time -= (double)rc->initial_cpb_removal_delay_offset / 90000;
  1764. if( h->sps->vui.hrd.b_cbr_hrd )
  1765. h->fenc->hrd_timing.cpb_initial_arrival_time = rc->previous_cpb_final_arrival_time;
  1766. else
  1767. h->fenc->hrd_timing.cpb_initial_arrival_time = X264_MAX( rc->previous_cpb_final_arrival_time, cpb_earliest_arrival_time );
  1768. }
  1769. int filler_bits = *filler ? X264_MAX( (FILLER_OVERHEAD - h->param.b_annexb), *filler )*8 : 0;
  1770. // Equation C-6
  1771. h->fenc->hrd_timing.cpb_final_arrival_time = rc->previous_cpb_final_arrival_time = h->fenc->hrd_timing.cpb_initial_arrival_time +
  1772. (double)(bits + filler_bits) / h->sps->vui.hrd.i_bit_rate_unscaled;
  1773. h->fenc->hrd_timing.dpb_output_time = (double)h->fenc->i_dpb_output_delay * h->sps->vui.i_num_units_in_tick / h->sps->vui.i_time_scale +
  1774. h->fenc->hrd_timing.cpb_removal_time;
  1775. }
  1776. return 0;
  1777. fail:
  1778. x264_log( h, X264_LOG_ERROR, "ratecontrol_end: stats file could not be written to\n" );
  1779. return -1;
  1780. }
  1781. /****************************************************************************
  1782. * 2 pass functions
  1783. ***************************************************************************/
  1784. /**
  1785. * modify the bitrate curve from pass1 for one frame
  1786. */
  1787. static double get_qscale(x264_t *h, ratecontrol_entry_t *rce, double rate_factor, int frame_num)
  1788. {
  1789. x264_ratecontrol_t *rcc= h->rc;
  1790. x264_zone_t *zone = get_zone( h, frame_num );
  1791. double q;
  1792. if( h->param.rc.b_mb_tree )
  1793. {
  1794. double timescale = (double)h->sps->vui.i_num_units_in_tick / h->sps->vui.i_time_scale;
  1795. q = pow( BASE_FRAME_DURATION / CLIP_DURATION(rce->i_duration * timescale), 1 - h->param.rc.f_qcompress );
  1796. }
  1797. else
  1798. q = pow( rce->blurred_complexity, 1 - rcc->qcompress );
  1799. // avoid NaN's in the rc_eq
  1800. if( !isfinite(q) || rce->tex_bits + rce->mv_bits == 0 )
  1801. q = rcc->last_qscale_for[rce->pict_type];
  1802. else
  1803. {
  1804. rcc->last_rceq = q;
  1805. q /= rate_factor;
  1806. rcc->last_qscale = q;
  1807. }
  1808. if( zone )
  1809. {
  1810. if( zone->b_force_qp )
  1811. q = qp2qscale( zone->i_qp );
  1812. else
  1813. q /= zone->f_bitrate_factor;
  1814. }
  1815. return q;
  1816. }
  1817. static double get_diff_limited_q(x264_t *h, ratecontrol_entry_t *rce, double q, int frame_num)
  1818. {
  1819. x264_ratecontrol_t *rcc = h->rc;
  1820. const int pict_type = rce->pict_type;
  1821. x264_zone_t *zone = get_zone( h, frame_num );
  1822. // force I/B quants as a function of P quants
  1823. const double last_p_q = rcc->last_qscale_for[SLICE_TYPE_P];
  1824. const double last_non_b_q= rcc->last_qscale_for[rcc->last_non_b_pict_type];
  1825. if( pict_type == SLICE_TYPE_I )
  1826. {
  1827. double iq = q;
  1828. double pq = qp2qscale( rcc->accum_p_qp / rcc->accum_p_norm );
  1829. double ip_factor = fabs( h->param.rc.f_ip_factor );
  1830. /* don't apply ip_factor if the following frame is also I */
  1831. if( rcc->accum_p_norm <= 0 )
  1832. q = iq;
  1833. else if( h->param.rc.f_ip_factor < 0 )
  1834. q = iq / ip_factor;
  1835. else if( rcc->accum_p_norm >= 1 )
  1836. q = pq / ip_factor;
  1837. else
  1838. q = rcc->accum_p_norm * pq / ip_factor + (1 - rcc->accum_p_norm) * iq;
  1839. }
  1840. else if( pict_type == SLICE_TYPE_B )
  1841. {
  1842. if( h->param.rc.f_pb_factor > 0 )
  1843. q = last_non_b_q;
  1844. if( !rce->kept_as_ref )
  1845. q *= fabs( h->param.rc.f_pb_factor );
  1846. }
  1847. else if( pict_type == SLICE_TYPE_P
  1848. && rcc->last_non_b_pict_type == SLICE_TYPE_P
  1849. && rce->tex_bits == 0 )
  1850. {
  1851. q = last_p_q;
  1852. }
  1853. /* last qscale / qdiff stuff */
  1854. if( rcc->last_non_b_pict_type == pict_type &&
  1855. (pict_type!=SLICE_TYPE_I || rcc->last_accum_p_norm < 1) )
  1856. {
  1857. double last_q = rcc->last_qscale_for[pict_type];
  1858. double max_qscale = last_q * rcc->lstep;
  1859. double min_qscale = last_q / rcc->lstep;
  1860. if ( q > max_qscale ) q = max_qscale;
  1861. else if( q < min_qscale ) q = min_qscale;
  1862. }
  1863. rcc->last_qscale_for[pict_type] = q;
  1864. if( pict_type != SLICE_TYPE_B )
  1865. rcc->last_non_b_pict_type = pict_type;
  1866. if( pict_type == SLICE_TYPE_I )
  1867. {
  1868. rcc->last_accum_p_norm = rcc->accum_p_norm;
  1869. rcc->accum_p_norm = 0;
  1870. rcc->accum_p_qp = 0;
  1871. }
  1872. if( pict_type == SLICE_TYPE_P )
  1873. {
  1874. float mask = 1 - pow( (float)rce->i_count / rcc->nmb, 2 );
  1875. rcc->accum_p_qp = mask * (qscale2qp( q ) + rcc->accum_p_qp);
  1876. rcc->accum_p_norm = mask * (1 + rcc->accum_p_norm);
  1877. }
  1878. if( zone )
  1879. {
  1880. if( zone->b_force_qp )
  1881. q = qp2qscale( zone->i_qp );
  1882. else
  1883. q /= zone->f_bitrate_factor;
  1884. }
  1885. return q;
  1886. }
  1887. static float predict_size( predictor_t *p, float q, float var )
  1888. {
  1889. return (p->coeff*var + p->offset) / (q*p->count);
  1890. }
  1891. static void update_predictor( predictor_t *p, float q, float var, float bits )
  1892. {
  1893. float range = 1.5;
  1894. if( var < 10 )
  1895. return;
  1896. float old_coeff = p->coeff / p->count;
  1897. float old_offset = p->offset / p->count;
  1898. float new_coeff = X264_MAX( (bits*q - old_offset) / var, p->coeff_min );
  1899. float new_coeff_clipped = x264_clip3f( new_coeff, old_coeff/range, old_coeff*range );
  1900. float new_offset = bits*q - new_coeff_clipped * var;
  1901. if( new_offset >= 0 )
  1902. new_coeff = new_coeff_clipped;
  1903. else
  1904. new_offset = 0;
  1905. p->count *= p->decay;
  1906. p->coeff *= p->decay;
  1907. p->offset *= p->decay;
  1908. p->count ++;
  1909. p->coeff += new_coeff;
  1910. p->offset += new_offset;
  1911. }
  1912. // update VBV after encoding a frame
  1913. static int update_vbv( x264_t *h, int bits )
  1914. {
  1915. int filler = 0;
  1916. int bitrate = h->sps->vui.hrd.i_bit_rate_unscaled;
  1917. x264_ratecontrol_t *rcc = h->rc;
  1918. x264_ratecontrol_t *rct = h->thread[0]->rc;
  1919. int64_t buffer_size = (int64_t)h->sps->vui.hrd.i_cpb_size_unscaled * h->sps->vui.i_time_scale;
  1920. if( rcc->last_satd >= h->mb.i_mb_count )
  1921. update_predictor( &rct->pred[h->sh.i_type], qp2qscale( rcc->qpa_rc ), rcc->last_satd, bits );
  1922. if( !rcc->b_vbv )
  1923. return filler;
  1924. uint64_t buffer_diff = (uint64_t)bits * h->sps->vui.i_time_scale;
  1925. rct->buffer_fill_final -= buffer_diff;
  1926. rct->buffer_fill_final_min -= buffer_diff;
  1927. if( rct->buffer_fill_final_min < 0 )
  1928. {
  1929. double underflow = (double)rct->buffer_fill_final_min / h->sps->vui.i_time_scale;
  1930. if( rcc->rate_factor_max_increment && rcc->qpm >= rcc->qp_novbv + rcc->rate_factor_max_increment )
  1931. x264_log( h, X264_LOG_DEBUG, "VBV underflow due to CRF-max (frame %d, %.0f bits)\n", h->i_frame, underflow );
  1932. else
  1933. x264_log( h, X264_LOG_WARNING, "VBV underflow (frame %d, %.0f bits)\n", h->i_frame, underflow );
  1934. rct->buffer_fill_final =
  1935. rct->buffer_fill_final_min = 0;
  1936. }
  1937. if( h->param.i_avcintra_class )
  1938. buffer_diff = buffer_size;
  1939. else
  1940. buffer_diff = (uint64_t)bitrate * h->sps->vui.i_num_units_in_tick * h->fenc->i_cpb_duration;
  1941. rct->buffer_fill_final += buffer_diff;
  1942. rct->buffer_fill_final_min += buffer_diff;
  1943. if( rct->buffer_fill_final > buffer_size )
  1944. {
  1945. if( h->param.rc.b_filler )
  1946. {
  1947. int64_t scale = (int64_t)h->sps->vui.i_time_scale * 8;
  1948. filler = (rct->buffer_fill_final - buffer_size + scale - 1) / scale;
  1949. bits = h->param.i_avcintra_class ? filler * 8 : X264_MAX( (FILLER_OVERHEAD - h->param.b_annexb), filler ) * 8;
  1950. buffer_diff = (uint64_t)bits * h->sps->vui.i_time_scale;
  1951. rct->buffer_fill_final -= buffer_diff;
  1952. rct->buffer_fill_final_min -= buffer_diff;
  1953. }
  1954. else
  1955. {
  1956. rct->buffer_fill_final = X264_MIN( rct->buffer_fill_final, buffer_size );
  1957. rct->buffer_fill_final_min = X264_MIN( rct->buffer_fill_final_min, buffer_size );
  1958. }
  1959. }
  1960. return filler;
  1961. }
  1962. void x264_hrd_fullness( x264_t *h )
  1963. {
  1964. x264_ratecontrol_t *rct = h->thread[0]->rc;
  1965. uint64_t denom = (uint64_t)h->sps->vui.hrd.i_bit_rate_unscaled * h->sps->vui.i_time_scale / rct->hrd_multiply_denom;
  1966. uint64_t cpb_state = rct->buffer_fill_final;
  1967. uint64_t cpb_size = (uint64_t)h->sps->vui.hrd.i_cpb_size_unscaled * h->sps->vui.i_time_scale;
  1968. uint64_t multiply_factor = 90000 / rct->hrd_multiply_denom;
  1969. if( rct->buffer_fill_final < 0 || rct->buffer_fill_final > (int64_t)cpb_size )
  1970. {
  1971. x264_log( h, X264_LOG_WARNING, "CPB %s: %.0f bits in a %.0f-bit buffer\n",
  1972. rct->buffer_fill_final < 0 ? "underflow" : "overflow",
  1973. (double)rct->buffer_fill_final / h->sps->vui.i_time_scale, (double)cpb_size / h->sps->vui.i_time_scale );
  1974. }
  1975. h->initial_cpb_removal_delay = (multiply_factor * cpb_state) / denom;
  1976. h->initial_cpb_removal_delay_offset = (multiply_factor * cpb_size) / denom - h->initial_cpb_removal_delay;
  1977. int64_t decoder_buffer_fill = h->initial_cpb_removal_delay * denom / multiply_factor;
  1978. rct->buffer_fill_final_min = X264_MIN( rct->buffer_fill_final_min, decoder_buffer_fill );
  1979. }
  1980. // provisionally update VBV according to the planned size of all frames currently in progress
  1981. static void update_vbv_plan( x264_t *h, int overhead )
  1982. {
  1983. x264_ratecontrol_t *rcc = h->rc;
  1984. rcc->buffer_fill = h->thread[0]->rc->buffer_fill_final_min / h->sps->vui.i_time_scale;
  1985. if( h->i_thread_frames > 1 )
  1986. {
  1987. int j = h->rc - h->thread[0]->rc;
  1988. for( int i = 1; i < h->i_thread_frames; i++ )
  1989. {
  1990. x264_t *t = h->thread[ (j+i)%h->i_thread_frames ];
  1991. double bits = t->rc->frame_size_planned;
  1992. if( !t->b_thread_active )
  1993. continue;
  1994. bits = X264_MAX(bits, t->rc->frame_size_estimated);
  1995. rcc->buffer_fill -= bits;
  1996. rcc->buffer_fill = X264_MAX( rcc->buffer_fill, 0 );
  1997. rcc->buffer_fill += t->rc->buffer_rate;
  1998. rcc->buffer_fill = X264_MIN( rcc->buffer_fill, rcc->buffer_size );
  1999. }
  2000. }
  2001. rcc->buffer_fill = X264_MIN( rcc->buffer_fill, rcc->buffer_size );
  2002. rcc->buffer_fill -= overhead;
  2003. }
  2004. // apply VBV constraints and clip qscale to between lmin and lmax
  2005. static double clip_qscale( x264_t *h, int pict_type, double q )
  2006. {
  2007. x264_ratecontrol_t *rcc = h->rc;
  2008. double lmin = rcc->lmin[pict_type];
  2009. double lmax = rcc->lmax[pict_type];
  2010. if( rcc->rate_factor_max_increment )
  2011. lmax = X264_MIN( lmax, qp2qscale( rcc->qp_novbv + rcc->rate_factor_max_increment ) );
  2012. double q0 = q;
  2013. /* B-frames are not directly subject to VBV,
  2014. * since they are controlled by the P-frames' QPs. */
  2015. if( rcc->b_vbv && rcc->last_satd > 0 )
  2016. {
  2017. double fenc_cpb_duration = (double)h->fenc->i_cpb_duration *
  2018. h->sps->vui.i_num_units_in_tick / h->sps->vui.i_time_scale;
  2019. /* Lookahead VBV: raise the quantizer as necessary such that no frames in
  2020. * the lookahead overflow and such that the buffer is in a reasonable state
  2021. * by the end of the lookahead. */
  2022. if( h->param.rc.i_lookahead )
  2023. {
  2024. int terminate = 0;
  2025. /* Avoid an infinite loop. */
  2026. for( int iterations = 0; iterations < 1000 && terminate != 3; iterations++ )
  2027. {
  2028. double frame_q[3];
  2029. double cur_bits = predict_size( &rcc->pred[h->sh.i_type], q, rcc->last_satd );
  2030. double buffer_fill_cur = rcc->buffer_fill - cur_bits;
  2031. double target_fill;
  2032. double total_duration = 0;
  2033. double last_duration = fenc_cpb_duration;
  2034. frame_q[0] = h->sh.i_type == SLICE_TYPE_I ? q * h->param.rc.f_ip_factor : q;
  2035. frame_q[1] = frame_q[0] * h->param.rc.f_pb_factor;
  2036. frame_q[2] = frame_q[0] / h->param.rc.f_ip_factor;
  2037. /* Loop over the planned future frames. */
  2038. for( int j = 0; buffer_fill_cur >= 0 && buffer_fill_cur <= rcc->buffer_size; j++ )
  2039. {
  2040. total_duration += last_duration;
  2041. buffer_fill_cur += rcc->vbv_max_rate * last_duration;
  2042. int i_type = h->fenc->i_planned_type[j];
  2043. int i_satd = h->fenc->i_planned_satd[j];
  2044. if( i_type == X264_TYPE_AUTO )
  2045. break;
  2046. i_type = IS_X264_TYPE_I( i_type ) ? SLICE_TYPE_I : IS_X264_TYPE_B( i_type ) ? SLICE_TYPE_B : SLICE_TYPE_P;
  2047. cur_bits = predict_size( &rcc->pred[i_type], frame_q[i_type], i_satd );
  2048. buffer_fill_cur -= cur_bits;
  2049. last_duration = h->fenc->f_planned_cpb_duration[j];
  2050. }
  2051. /* Try to get to get the buffer at least 50% filled, but don't set an impossible goal. */
  2052. target_fill = X264_MIN( rcc->buffer_fill + total_duration * rcc->vbv_max_rate * 0.5, rcc->buffer_size * 0.5 );
  2053. if( buffer_fill_cur < target_fill )
  2054. {
  2055. q *= 1.01;
  2056. terminate |= 1;
  2057. continue;
  2058. }
  2059. /* Try to get the buffer no more than 80% filled, but don't set an impossible goal. */
  2060. target_fill = x264_clip3f( rcc->buffer_fill - total_duration * rcc->vbv_max_rate * 0.5, rcc->buffer_size * 0.8, rcc->buffer_size );
  2061. if( rcc->b_vbv_min_rate && buffer_fill_cur > target_fill )
  2062. {
  2063. q /= 1.01;
  2064. terminate |= 2;
  2065. continue;
  2066. }
  2067. break;
  2068. }
  2069. }
  2070. /* Fallback to old purely-reactive algorithm: no lookahead. */
  2071. else
  2072. {
  2073. if( ( pict_type == SLICE_TYPE_P ||
  2074. ( pict_type == SLICE_TYPE_I && rcc->last_non_b_pict_type == SLICE_TYPE_I ) ) &&
  2075. rcc->buffer_fill/rcc->buffer_size < 0.5 )
  2076. {
  2077. q /= x264_clip3f( 2.0*rcc->buffer_fill/rcc->buffer_size, 0.5, 1.0 );
  2078. }
  2079. /* Now a hard threshold to make sure the frame fits in VBV.
  2080. * This one is mostly for I-frames. */
  2081. double bits = predict_size( &rcc->pred[h->sh.i_type], q, rcc->last_satd );
  2082. /* For small VBVs, allow the frame to use up the entire VBV. */
  2083. double max_fill_factor = h->param.rc.i_vbv_buffer_size >= 5*h->param.rc.i_vbv_max_bitrate / rcc->fps ? 2 : 1;
  2084. /* For single-frame VBVs, request that the frame use up the entire VBV. */
  2085. double min_fill_factor = rcc->single_frame_vbv ? 1 : 2;
  2086. if( bits > rcc->buffer_fill/max_fill_factor )
  2087. {
  2088. double qf = x264_clip3f( rcc->buffer_fill/(max_fill_factor*bits), 0.2, 1.0 );
  2089. q /= qf;
  2090. bits *= qf;
  2091. }
  2092. if( bits < rcc->buffer_rate/min_fill_factor )
  2093. {
  2094. double qf = x264_clip3f( bits*min_fill_factor/rcc->buffer_rate, 0.001, 1.0 );
  2095. q *= qf;
  2096. }
  2097. q = X264_MAX( q0, q );
  2098. }
  2099. /* Check B-frame complexity, and use up any bits that would
  2100. * overflow before the next P-frame. */
  2101. if( h->sh.i_type == SLICE_TYPE_P && !rcc->single_frame_vbv )
  2102. {
  2103. int nb = rcc->bframes;
  2104. double bits = predict_size( &rcc->pred[h->sh.i_type], q, rcc->last_satd );
  2105. double pbbits = bits;
  2106. double bbits = predict_size( rcc->pred_b_from_p, q * h->param.rc.f_pb_factor, rcc->last_satd );
  2107. double space;
  2108. double bframe_cpb_duration = 0;
  2109. double minigop_cpb_duration;
  2110. for( int i = 0; i < nb; i++ )
  2111. bframe_cpb_duration += h->fenc->f_planned_cpb_duration[i];
  2112. if( bbits * nb > bframe_cpb_duration * rcc->vbv_max_rate )
  2113. nb = 0;
  2114. pbbits += nb * bbits;
  2115. minigop_cpb_duration = bframe_cpb_duration + fenc_cpb_duration;
  2116. space = rcc->buffer_fill + minigop_cpb_duration*rcc->vbv_max_rate - rcc->buffer_size;
  2117. if( pbbits < space )
  2118. {
  2119. q *= X264_MAX( pbbits / space, bits / (0.5 * rcc->buffer_size) );
  2120. }
  2121. q = X264_MAX( q0/2, q );
  2122. }
  2123. /* Apply MinCR and buffer fill restrictions */
  2124. double bits = predict_size( &rcc->pred[h->sh.i_type], q, rcc->last_satd );
  2125. double frame_size_maximum = X264_MIN( rcc->frame_size_maximum, X264_MAX( rcc->buffer_fill, 0.001 ) );
  2126. if( bits > frame_size_maximum )
  2127. q *= bits / frame_size_maximum;
  2128. if( !rcc->b_vbv_min_rate )
  2129. q = X264_MAX( q0, q );
  2130. }
  2131. if( lmin==lmax )
  2132. return lmin;
  2133. else if( rcc->b_2pass )
  2134. {
  2135. double min2 = log( lmin );
  2136. double max2 = log( lmax );
  2137. q = (log(q) - min2)/(max2-min2) - 0.5;
  2138. q = 1.0/(1.0 + exp( -4*q ));
  2139. q = q*(max2-min2) + min2;
  2140. return exp( q );
  2141. }
  2142. else
  2143. return x264_clip3f( q, lmin, lmax );
  2144. }
  2145. // update qscale for 1 frame based on actual bits used so far
  2146. static float rate_estimate_qscale( x264_t *h )
  2147. {
  2148. float q;
  2149. x264_ratecontrol_t *rcc = h->rc;
  2150. ratecontrol_entry_t rce = {0};
  2151. int pict_type = h->sh.i_type;
  2152. int64_t total_bits = 8*(h->stat.i_frame_size[SLICE_TYPE_I]
  2153. + h->stat.i_frame_size[SLICE_TYPE_P]
  2154. + h->stat.i_frame_size[SLICE_TYPE_B])
  2155. - rcc->filler_bits_sum;
  2156. if( rcc->b_2pass )
  2157. {
  2158. rce = *rcc->rce;
  2159. if( pict_type != rce.pict_type )
  2160. {
  2161. x264_log( h, X264_LOG_ERROR, "slice=%c but 2pass stats say %c\n",
  2162. slice_type_to_char[pict_type], slice_type_to_char[rce.pict_type] );
  2163. }
  2164. }
  2165. if( pict_type == SLICE_TYPE_B )
  2166. {
  2167. /* B-frames don't have independent ratecontrol, but rather get the
  2168. * average QP of the two adjacent P-frames + an offset */
  2169. int i0 = IS_X264_TYPE_I(h->fref_nearest[0]->i_type);
  2170. int i1 = IS_X264_TYPE_I(h->fref_nearest[1]->i_type);
  2171. int dt0 = abs(h->fenc->i_poc - h->fref_nearest[0]->i_poc);
  2172. int dt1 = abs(h->fenc->i_poc - h->fref_nearest[1]->i_poc);
  2173. float q0 = h->fref_nearest[0]->f_qp_avg_rc;
  2174. float q1 = h->fref_nearest[1]->f_qp_avg_rc;
  2175. if( h->fref_nearest[0]->i_type == X264_TYPE_BREF )
  2176. q0 -= rcc->pb_offset/2;
  2177. if( h->fref_nearest[1]->i_type == X264_TYPE_BREF )
  2178. q1 -= rcc->pb_offset/2;
  2179. if( i0 && i1 )
  2180. q = (q0 + q1) / 2 + rcc->ip_offset;
  2181. else if( i0 )
  2182. q = q1;
  2183. else if( i1 )
  2184. q = q0;
  2185. else
  2186. q = (q0*dt1 + q1*dt0) / (dt0 + dt1);
  2187. if( h->fenc->b_kept_as_ref )
  2188. q += rcc->pb_offset/2;
  2189. else
  2190. q += rcc->pb_offset;
  2191. rcc->qp_novbv = q;
  2192. q = qp2qscale( q );
  2193. if( rcc->b_2pass )
  2194. rcc->frame_size_planned = qscale2bits( &rce, q );
  2195. else
  2196. rcc->frame_size_planned = predict_size( rcc->pred_b_from_p, q, h->fref[1][h->i_ref[1]-1]->i_satd );
  2197. /* Limit planned size by MinCR */
  2198. if( rcc->b_vbv )
  2199. rcc->frame_size_planned = X264_MIN( rcc->frame_size_planned, rcc->frame_size_maximum );
  2200. h->rc->frame_size_estimated = rcc->frame_size_planned;
  2201. /* For row SATDs */
  2202. if( rcc->b_vbv )
  2203. rcc->last_satd = x264_rc_analyse_slice( h );
  2204. return q;
  2205. }
  2206. else
  2207. {
  2208. double abr_buffer = 2 * rcc->rate_tolerance * rcc->bitrate;
  2209. double predicted_bits = total_bits;
  2210. if( h->i_thread_frames > 1 )
  2211. {
  2212. int j = h->rc - h->thread[0]->rc;
  2213. for( int i = 1; i < h->i_thread_frames; i++ )
  2214. {
  2215. x264_t *t = h->thread[(j+i) % h->i_thread_frames];
  2216. double bits = t->rc->frame_size_planned;
  2217. if( !t->b_thread_active )
  2218. continue;
  2219. bits = X264_MAX(bits, t->rc->frame_size_estimated);
  2220. predicted_bits += bits;
  2221. }
  2222. }
  2223. if( rcc->b_2pass )
  2224. {
  2225. double lmin = rcc->lmin[pict_type];
  2226. double lmax = rcc->lmax[pict_type];
  2227. double diff;
  2228. /* Adjust ABR buffer based on distance to the end of the video. */
  2229. if( rcc->num_entries > h->i_frame )
  2230. {
  2231. double final_bits = rcc->entry_out[rcc->num_entries-1]->expected_bits;
  2232. double video_pos = rce.expected_bits / final_bits;
  2233. double scale_factor = sqrt( (1 - video_pos) * rcc->num_entries );
  2234. abr_buffer *= 0.5 * X264_MAX( scale_factor, 0.5 );
  2235. }
  2236. diff = predicted_bits - rce.expected_bits;
  2237. q = rce.new_qscale;
  2238. q /= x264_clip3f((abr_buffer - diff) / abr_buffer, .5, 2);
  2239. if( h->i_frame >= rcc->fps && rcc->expected_bits_sum >= 1 )
  2240. {
  2241. /* Adjust quant based on the difference between
  2242. * achieved and expected bitrate so far */
  2243. double cur_time = (double)h->i_frame / rcc->num_entries;
  2244. double w = x264_clip3f( cur_time*100, 0.0, 1.0 );
  2245. q *= pow( (double)total_bits / rcc->expected_bits_sum, w );
  2246. }
  2247. rcc->qp_novbv = qscale2qp( q );
  2248. if( rcc->b_vbv )
  2249. {
  2250. /* Do not overflow vbv */
  2251. double expected_size = qscale2bits( &rce, q );
  2252. double expected_vbv = rcc->buffer_fill + rcc->buffer_rate - expected_size;
  2253. double expected_fullness = rce.expected_vbv / rcc->buffer_size;
  2254. double qmax = q*(2 - expected_fullness);
  2255. double size_constraint = 1 + expected_fullness;
  2256. qmax = X264_MAX( qmax, rce.new_qscale );
  2257. if( expected_fullness < .05 )
  2258. qmax = lmax;
  2259. qmax = X264_MIN(qmax, lmax);
  2260. while( ((expected_vbv < rce.expected_vbv/size_constraint) && (q < qmax)) ||
  2261. ((expected_vbv < 0) && (q < lmax)))
  2262. {
  2263. q *= 1.05;
  2264. expected_size = qscale2bits(&rce, q);
  2265. expected_vbv = rcc->buffer_fill + rcc->buffer_rate - expected_size;
  2266. }
  2267. rcc->last_satd = x264_rc_analyse_slice( h );
  2268. }
  2269. q = x264_clip3f( q, lmin, lmax );
  2270. }
  2271. else /* 1pass ABR */
  2272. {
  2273. /* Calculate the quantizer which would have produced the desired
  2274. * average bitrate if it had been applied to all frames so far.
  2275. * Then modulate that quant based on the current frame's complexity
  2276. * relative to the average complexity so far (using the 2pass RCEQ).
  2277. * Then bias the quant up or down if total size so far was far from
  2278. * the target.
  2279. * Result: Depending on the value of rate_tolerance, there is a
  2280. * tradeoff between quality and bitrate precision. But at large
  2281. * tolerances, the bit distribution approaches that of 2pass. */
  2282. double wanted_bits, overflow = 1;
  2283. rcc->last_satd = x264_rc_analyse_slice( h );
  2284. rcc->short_term_cplxsum *= 0.5;
  2285. rcc->short_term_cplxcount *= 0.5;
  2286. rcc->short_term_cplxsum += rcc->last_satd / (CLIP_DURATION(h->fenc->f_duration) / BASE_FRAME_DURATION);
  2287. rcc->short_term_cplxcount ++;
  2288. rce.tex_bits = rcc->last_satd;
  2289. rce.blurred_complexity = rcc->short_term_cplxsum / rcc->short_term_cplxcount;
  2290. rce.mv_bits = 0;
  2291. rce.p_count = rcc->nmb;
  2292. rce.i_count = 0;
  2293. rce.s_count = 0;
  2294. rce.qscale = 1;
  2295. rce.pict_type = pict_type;
  2296. rce.i_duration = h->fenc->i_duration;
  2297. if( h->param.rc.i_rc_method == X264_RC_CRF )
  2298. {
  2299. q = get_qscale( h, &rce, rcc->rate_factor_constant, h->fenc->i_frame );
  2300. }
  2301. else
  2302. {
  2303. q = get_qscale( h, &rce, rcc->wanted_bits_window / rcc->cplxr_sum, h->fenc->i_frame );
  2304. /* ABR code can potentially be counterproductive in CBR, so just don't bother.
  2305. * Don't run it if the frame complexity is zero either. */
  2306. if( !rcc->b_vbv_min_rate && rcc->last_satd )
  2307. {
  2308. // FIXME is it simpler to keep track of wanted_bits in ratecontrol_end?
  2309. int i_frame_done = h->i_frame;
  2310. double time_done = i_frame_done / rcc->fps;
  2311. if( h->param.b_vfr_input && i_frame_done > 0 )
  2312. time_done = ((double)(h->fenc->i_reordered_pts - h->i_reordered_pts_delay)) * h->param.i_timebase_num / h->param.i_timebase_den;
  2313. wanted_bits = time_done * rcc->bitrate;
  2314. if( wanted_bits > 0 )
  2315. {
  2316. abr_buffer *= X264_MAX( 1, sqrt( time_done ) );
  2317. overflow = x264_clip3f( 1.0 + (predicted_bits - wanted_bits) / abr_buffer, .5, 2 );
  2318. q *= overflow;
  2319. }
  2320. }
  2321. }
  2322. if( pict_type == SLICE_TYPE_I && h->param.i_keyint_max > 1
  2323. /* should test _next_ pict type, but that isn't decided yet */
  2324. && rcc->last_non_b_pict_type != SLICE_TYPE_I )
  2325. {
  2326. q = qp2qscale( rcc->accum_p_qp / rcc->accum_p_norm );
  2327. q /= fabs( h->param.rc.f_ip_factor );
  2328. }
  2329. else if( h->i_frame > 0 )
  2330. {
  2331. if( h->param.rc.i_rc_method != X264_RC_CRF )
  2332. {
  2333. /* Asymmetric clipping, because symmetric would prevent
  2334. * overflow control in areas of rapidly oscillating complexity */
  2335. double lmin = rcc->last_qscale_for[pict_type] / rcc->lstep;
  2336. double lmax = rcc->last_qscale_for[pict_type] * rcc->lstep;
  2337. if( overflow > 1.1 && h->i_frame > 3 )
  2338. lmax *= rcc->lstep;
  2339. else if( overflow < 0.9 )
  2340. lmin /= rcc->lstep;
  2341. q = x264_clip3f(q, lmin, lmax);
  2342. }
  2343. }
  2344. else if( h->param.rc.i_rc_method == X264_RC_CRF && rcc->qcompress != 1 )
  2345. {
  2346. q = qp2qscale( ABR_INIT_QP ) / fabs( h->param.rc.f_ip_factor );
  2347. }
  2348. rcc->qp_novbv = qscale2qp( q );
  2349. //FIXME use get_diff_limited_q() ?
  2350. q = clip_qscale( h, pict_type, q );
  2351. }
  2352. rcc->last_qscale_for[pict_type] =
  2353. rcc->last_qscale = q;
  2354. if( !(rcc->b_2pass && !rcc->b_vbv) && h->fenc->i_frame == 0 )
  2355. rcc->last_qscale_for[SLICE_TYPE_P] = q * fabs( h->param.rc.f_ip_factor );
  2356. if( rcc->b_2pass )
  2357. rcc->frame_size_planned = qscale2bits( &rce, q );
  2358. else
  2359. rcc->frame_size_planned = predict_size( &rcc->pred[h->sh.i_type], q, rcc->last_satd );
  2360. /* Always use up the whole VBV in this case. */
  2361. if( rcc->single_frame_vbv )
  2362. rcc->frame_size_planned = rcc->buffer_rate;
  2363. /* Limit planned size by MinCR */
  2364. if( rcc->b_vbv )
  2365. rcc->frame_size_planned = X264_MIN( rcc->frame_size_planned, rcc->frame_size_maximum );
  2366. h->rc->frame_size_estimated = rcc->frame_size_planned;
  2367. return q;
  2368. }
  2369. }
  2370. static void threads_normalize_predictors( x264_t *h )
  2371. {
  2372. double totalsize = 0;
  2373. for( int i = 0; i < h->param.i_threads; i++ )
  2374. totalsize += h->thread[i]->rc->slice_size_planned;
  2375. double factor = h->rc->frame_size_planned / totalsize;
  2376. for( int i = 0; i < h->param.i_threads; i++ )
  2377. h->thread[i]->rc->slice_size_planned *= factor;
  2378. }
  2379. void x264_threads_distribute_ratecontrol( x264_t *h )
  2380. {
  2381. int row;
  2382. x264_ratecontrol_t *rc = h->rc;
  2383. x264_emms();
  2384. float qscale = qp2qscale( rc->qpm );
  2385. /* Initialize row predictors */
  2386. if( h->i_frame == 0 )
  2387. for( int i = 0; i < h->param.i_threads; i++ )
  2388. {
  2389. x264_t *t = h->thread[i];
  2390. if( t != h )
  2391. memcpy( t->rc->row_preds, rc->row_preds, sizeof(rc->row_preds) );
  2392. }
  2393. for( int i = 0; i < h->param.i_threads; i++ )
  2394. {
  2395. x264_t *t = h->thread[i];
  2396. if( t != h )
  2397. memcpy( t->rc, rc, offsetof(x264_ratecontrol_t, row_pred) );
  2398. t->rc->row_pred = t->rc->row_preds[h->sh.i_type];
  2399. /* Calculate the planned slice size. */
  2400. if( rc->b_vbv && rc->frame_size_planned )
  2401. {
  2402. int size = 0;
  2403. for( row = t->i_threadslice_start; row < t->i_threadslice_end; row++ )
  2404. size += h->fdec->i_row_satd[row];
  2405. t->rc->slice_size_planned = predict_size( &rc->pred[h->sh.i_type + (i+1)*5], qscale, size );
  2406. }
  2407. else
  2408. t->rc->slice_size_planned = 0;
  2409. }
  2410. if( rc->b_vbv && rc->frame_size_planned )
  2411. {
  2412. threads_normalize_predictors( h );
  2413. if( rc->single_frame_vbv )
  2414. {
  2415. /* Compensate for our max frame error threshold: give more bits (proportionally) to smaller slices. */
  2416. for( int i = 0; i < h->param.i_threads; i++ )
  2417. {
  2418. x264_t *t = h->thread[i];
  2419. float max_frame_error = x264_clip3f( 1.0 / (t->i_threadslice_end - t->i_threadslice_start), 0.05, 0.25 );
  2420. t->rc->slice_size_planned += 2 * max_frame_error * rc->frame_size_planned;
  2421. }
  2422. threads_normalize_predictors( h );
  2423. }
  2424. for( int i = 0; i < h->param.i_threads; i++ )
  2425. h->thread[i]->rc->frame_size_estimated = h->thread[i]->rc->slice_size_planned;
  2426. }
  2427. }
  2428. void x264_threads_merge_ratecontrol( x264_t *h )
  2429. {
  2430. x264_ratecontrol_t *rc = h->rc;
  2431. x264_emms();
  2432. for( int i = 0; i < h->param.i_threads; i++ )
  2433. {
  2434. x264_t *t = h->thread[i];
  2435. x264_ratecontrol_t *rct = h->thread[i]->rc;
  2436. if( h->param.rc.i_vbv_buffer_size )
  2437. {
  2438. int size = 0;
  2439. for( int row = t->i_threadslice_start; row < t->i_threadslice_end; row++ )
  2440. size += h->fdec->i_row_satd[row];
  2441. int bits = t->stat.frame.i_mv_bits + t->stat.frame.i_tex_bits + t->stat.frame.i_misc_bits;
  2442. int mb_count = (t->i_threadslice_end - t->i_threadslice_start) * h->mb.i_mb_width;
  2443. update_predictor( &rc->pred[h->sh.i_type+(i+1)*5], qp2qscale( rct->qpa_rc/mb_count ), size, bits );
  2444. }
  2445. if( !i )
  2446. continue;
  2447. rc->qpa_rc += rct->qpa_rc;
  2448. rc->qpa_aq += rct->qpa_aq;
  2449. }
  2450. }
  2451. void x264_thread_sync_ratecontrol( x264_t *cur, x264_t *prev, x264_t *next )
  2452. {
  2453. if( cur != prev )
  2454. {
  2455. #define COPY(var) memcpy(&cur->rc->var, &prev->rc->var, sizeof(cur->rc->var))
  2456. /* these vars are updated in x264_ratecontrol_start()
  2457. * so copy them from the context that most recently started (prev)
  2458. * to the context that's about to start (cur). */
  2459. COPY(accum_p_qp);
  2460. COPY(accum_p_norm);
  2461. COPY(last_satd);
  2462. COPY(last_rceq);
  2463. COPY(last_qscale_for);
  2464. COPY(last_non_b_pict_type);
  2465. COPY(short_term_cplxsum);
  2466. COPY(short_term_cplxcount);
  2467. COPY(bframes);
  2468. COPY(prev_zone);
  2469. COPY(mbtree.qpbuf_pos);
  2470. /* these vars can be updated by x264_ratecontrol_init_reconfigurable */
  2471. COPY(bitrate);
  2472. COPY(buffer_size);
  2473. COPY(buffer_rate);
  2474. COPY(vbv_max_rate);
  2475. COPY(single_frame_vbv);
  2476. COPY(cbr_decay);
  2477. COPY(rate_factor_constant);
  2478. COPY(rate_factor_max_increment);
  2479. #undef COPY
  2480. }
  2481. if( cur != next )
  2482. {
  2483. #define COPY(var) next->rc->var = cur->rc->var
  2484. /* these vars are updated in x264_ratecontrol_end()
  2485. * so copy them from the context that most recently ended (cur)
  2486. * to the context that's about to end (next) */
  2487. COPY(cplxr_sum);
  2488. COPY(expected_bits_sum);
  2489. COPY(filler_bits_sum);
  2490. COPY(wanted_bits_window);
  2491. COPY(bframe_bits);
  2492. COPY(initial_cpb_removal_delay);
  2493. COPY(initial_cpb_removal_delay_offset);
  2494. COPY(nrt_first_access_unit);
  2495. COPY(previous_cpb_final_arrival_time);
  2496. #undef COPY
  2497. }
  2498. //FIXME row_preds[] (not strictly necessary, but would improve prediction)
  2499. /* the rest of the variables are either constant or thread-local */
  2500. }
  2501. static int find_underflow( x264_t *h, double *fills, int *t0, int *t1, int over )
  2502. {
  2503. /* find an interval ending on an overflow or underflow (depending on whether
  2504. * we're adding or removing bits), and starting on the earliest frame that
  2505. * can influence the buffer fill of that end frame. */
  2506. x264_ratecontrol_t *rcc = h->rc;
  2507. const double buffer_min = .1 * rcc->buffer_size;
  2508. const double buffer_max = .9 * rcc->buffer_size;
  2509. double fill = fills[*t0-1];
  2510. double parity = over ? 1. : -1.;
  2511. int start = -1, end = -1;
  2512. for( int i = *t0; i < rcc->num_entries; i++ )
  2513. {
  2514. fill += (rcc->entry_out[i]->i_cpb_duration * rcc->vbv_max_rate * h->sps->vui.i_num_units_in_tick / h->sps->vui.i_time_scale -
  2515. qscale2bits( rcc->entry_out[i], rcc->entry_out[i]->new_qscale )) * parity;
  2516. fill = x264_clip3f(fill, 0, rcc->buffer_size);
  2517. fills[i] = fill;
  2518. if( fill <= buffer_min || i == 0 )
  2519. {
  2520. if( end >= 0 )
  2521. break;
  2522. start = i;
  2523. }
  2524. else if( fill >= buffer_max && start >= 0 )
  2525. end = i;
  2526. }
  2527. *t0 = start;
  2528. *t1 = end;
  2529. return start >= 0 && end >= 0;
  2530. }
  2531. static int fix_underflow( x264_t *h, int t0, int t1, double adjustment, double qscale_min, double qscale_max )
  2532. {
  2533. x264_ratecontrol_t *rcc = h->rc;
  2534. double qscale_orig, qscale_new;
  2535. int adjusted = 0;
  2536. if( t0 > 0 )
  2537. t0++;
  2538. for( int i = t0; i <= t1; i++ )
  2539. {
  2540. qscale_orig = rcc->entry_out[i]->new_qscale;
  2541. qscale_orig = x264_clip3f( qscale_orig, qscale_min, qscale_max );
  2542. qscale_new = qscale_orig * adjustment;
  2543. qscale_new = x264_clip3f( qscale_new, qscale_min, qscale_max );
  2544. rcc->entry_out[i]->new_qscale = qscale_new;
  2545. adjusted = adjusted || (qscale_new != qscale_orig);
  2546. }
  2547. return adjusted;
  2548. }
  2549. static double count_expected_bits( x264_t *h )
  2550. {
  2551. x264_ratecontrol_t *rcc = h->rc;
  2552. double expected_bits = 0;
  2553. for( int i = 0; i < rcc->num_entries; i++ )
  2554. {
  2555. ratecontrol_entry_t *rce = rcc->entry_out[i];
  2556. rce->expected_bits = expected_bits;
  2557. expected_bits += qscale2bits( rce, rce->new_qscale );
  2558. }
  2559. return expected_bits;
  2560. }
  2561. static int vbv_pass2( x264_t *h, double all_available_bits )
  2562. {
  2563. /* for each interval of buffer_full .. underflow, uniformly increase the qp of all
  2564. * frames in the interval until either buffer is full at some intermediate frame or the
  2565. * last frame in the interval no longer underflows. Recompute intervals and repeat.
  2566. * Then do the converse to put bits back into overflow areas until target size is met */
  2567. x264_ratecontrol_t *rcc = h->rc;
  2568. double *fills;
  2569. double expected_bits = 0;
  2570. double adjustment;
  2571. double prev_bits = 0;
  2572. int t0, t1;
  2573. double qscale_min = qp2qscale( h->param.rc.i_qp_min );
  2574. double qscale_max = qp2qscale( h->param.rc.i_qp_max );
  2575. int iterations = 0;
  2576. int adj_min, adj_max;
  2577. CHECKED_MALLOC( fills, (rcc->num_entries+1)*sizeof(double) );
  2578. fills++;
  2579. /* adjust overall stream size */
  2580. do
  2581. {
  2582. iterations++;
  2583. prev_bits = expected_bits;
  2584. if( expected_bits )
  2585. { /* not first iteration */
  2586. adjustment = X264_MAX(X264_MIN(expected_bits / all_available_bits, 0.999), 0.9);
  2587. fills[-1] = rcc->buffer_size * h->param.rc.f_vbv_buffer_init;
  2588. t0 = 0;
  2589. /* fix overflows */
  2590. adj_min = 1;
  2591. while( adj_min && find_underflow( h, fills, &t0, &t1, 1 ) )
  2592. {
  2593. adj_min = fix_underflow( h, t0, t1, adjustment, qscale_min, qscale_max );
  2594. t0 = t1;
  2595. }
  2596. }
  2597. fills[-1] = rcc->buffer_size * (1. - h->param.rc.f_vbv_buffer_init);
  2598. t0 = 0;
  2599. /* fix underflows -- should be done after overflow, as we'd better undersize target than underflowing VBV */
  2600. adj_max = 1;
  2601. while( adj_max && find_underflow( h, fills, &t0, &t1, 0 ) )
  2602. adj_max = fix_underflow( h, t0, t1, 1.001, qscale_min, qscale_max );
  2603. expected_bits = count_expected_bits( h );
  2604. } while( (expected_bits < .995*all_available_bits) && ((int64_t)(expected_bits+.5) > (int64_t)(prev_bits+.5)) );
  2605. if( !adj_max )
  2606. x264_log( h, X264_LOG_WARNING, "vbv-maxrate issue, qpmax or vbv-maxrate too low\n");
  2607. /* store expected vbv filling values for tracking when encoding */
  2608. for( int i = 0; i < rcc->num_entries; i++ )
  2609. rcc->entry_out[i]->expected_vbv = rcc->buffer_size - fills[i];
  2610. x264_free( fills-1 );
  2611. return 0;
  2612. fail:
  2613. return -1;
  2614. }
  2615. static int init_pass2( x264_t *h )
  2616. {
  2617. x264_ratecontrol_t *rcc = h->rc;
  2618. uint64_t all_const_bits = 0;
  2619. double timescale = (double)h->sps->vui.i_num_units_in_tick / h->sps->vui.i_time_scale;
  2620. double duration = 0;
  2621. for( int i = 0; i < rcc->num_entries; i++ )
  2622. duration += rcc->entry[i].i_duration;
  2623. duration *= timescale;
  2624. uint64_t all_available_bits = h->param.rc.i_bitrate * 1000. * duration;
  2625. double rate_factor, step_mult;
  2626. double qblur = h->param.rc.f_qblur;
  2627. double cplxblur = h->param.rc.f_complexity_blur;
  2628. const int filter_size = (int)(qblur*4) | 1;
  2629. double expected_bits;
  2630. double *qscale, *blurred_qscale;
  2631. double base_cplx = h->mb.i_mb_count * (h->param.i_bframe ? 120 : 80);
  2632. /* find total/average complexity & const_bits */
  2633. for( int i = 0; i < rcc->num_entries; i++ )
  2634. {
  2635. ratecontrol_entry_t *rce = &rcc->entry[i];
  2636. all_const_bits += rce->misc_bits;
  2637. }
  2638. if( all_available_bits < all_const_bits)
  2639. {
  2640. x264_log( h, X264_LOG_ERROR, "requested bitrate is too low. estimated minimum is %d kbps\n",
  2641. (int)(all_const_bits * rcc->fps / (rcc->num_entries * 1000.)) );
  2642. return -1;
  2643. }
  2644. /* Blur complexities, to reduce local fluctuation of QP.
  2645. * We don't blur the QPs directly, because then one very simple frame
  2646. * could drag down the QP of a nearby complex frame and give it more
  2647. * bits than intended. */
  2648. for( int i = 0; i < rcc->num_entries; i++ )
  2649. {
  2650. ratecontrol_entry_t *rce = &rcc->entry[i];
  2651. double weight_sum = 0;
  2652. double cplx_sum = 0;
  2653. double weight = 1.0;
  2654. double gaussian_weight;
  2655. /* weighted average of cplx of future frames */
  2656. for( int j = 1; j < cplxblur*2 && j < rcc->num_entries-i; j++ )
  2657. {
  2658. ratecontrol_entry_t *rcj = &rcc->entry[i+j];
  2659. double frame_duration = CLIP_DURATION(rcj->i_duration * timescale) / BASE_FRAME_DURATION;
  2660. weight *= 1 - pow( (float)rcj->i_count / rcc->nmb, 2 );
  2661. if( weight < .0001 )
  2662. break;
  2663. gaussian_weight = weight * exp( -j*j/200.0 );
  2664. weight_sum += gaussian_weight;
  2665. cplx_sum += gaussian_weight * (qscale2bits( rcj, 1 ) - rcj->misc_bits) / frame_duration;
  2666. }
  2667. /* weighted average of cplx of past frames */
  2668. weight = 1.0;
  2669. for( int j = 0; j <= cplxblur*2 && j <= i; j++ )
  2670. {
  2671. ratecontrol_entry_t *rcj = &rcc->entry[i-j];
  2672. double frame_duration = CLIP_DURATION(rcj->i_duration * timescale) / BASE_FRAME_DURATION;
  2673. gaussian_weight = weight * exp( -j*j/200.0 );
  2674. weight_sum += gaussian_weight;
  2675. cplx_sum += gaussian_weight * (qscale2bits( rcj, 1 ) - rcj->misc_bits) / frame_duration;
  2676. weight *= 1 - pow( (float)rcj->i_count / rcc->nmb, 2 );
  2677. if( weight < .0001 )
  2678. break;
  2679. }
  2680. rce->blurred_complexity = cplx_sum / weight_sum;
  2681. }
  2682. CHECKED_MALLOC( qscale, sizeof(double)*rcc->num_entries );
  2683. if( filter_size > 1 )
  2684. CHECKED_MALLOC( blurred_qscale, sizeof(double)*rcc->num_entries );
  2685. else
  2686. blurred_qscale = qscale;
  2687. /* Search for a factor which, when multiplied by the RCEQ values from
  2688. * each frame, adds up to the desired total size.
  2689. * There is no exact closed-form solution because of VBV constraints and
  2690. * because qscale2bits is not invertible, but we can start with the simple
  2691. * approximation of scaling the 1st pass by the ratio of bitrates.
  2692. * The search range is probably overkill, but speed doesn't matter here. */
  2693. expected_bits = 1;
  2694. for( int i = 0; i < rcc->num_entries; i++ )
  2695. {
  2696. double q = get_qscale(h, &rcc->entry[i], 1.0, i);
  2697. expected_bits += qscale2bits(&rcc->entry[i], q);
  2698. rcc->last_qscale_for[rcc->entry[i].pict_type] = q;
  2699. }
  2700. step_mult = all_available_bits / expected_bits;
  2701. rate_factor = 0;
  2702. for( double step = 1E4 * step_mult; step > 1E-7 * step_mult; step *= 0.5)
  2703. {
  2704. expected_bits = 0;
  2705. rate_factor += step;
  2706. rcc->last_non_b_pict_type = -1;
  2707. rcc->last_accum_p_norm = 1;
  2708. rcc->accum_p_norm = 0;
  2709. rcc->last_qscale_for[0] =
  2710. rcc->last_qscale_for[1] =
  2711. rcc->last_qscale_for[2] = pow( base_cplx, 1 - rcc->qcompress ) / rate_factor;
  2712. /* find qscale */
  2713. for( int i = 0; i < rcc->num_entries; i++ )
  2714. {
  2715. qscale[i] = get_qscale( h, &rcc->entry[i], rate_factor, -1 );
  2716. rcc->last_qscale_for[rcc->entry[i].pict_type] = qscale[i];
  2717. }
  2718. /* fixed I/B qscale relative to P */
  2719. for( int i = rcc->num_entries-1; i >= 0; i-- )
  2720. {
  2721. qscale[i] = get_diff_limited_q( h, &rcc->entry[i], qscale[i], i );
  2722. assert(qscale[i] >= 0);
  2723. }
  2724. /* smooth curve */
  2725. if( filter_size > 1 )
  2726. {
  2727. assert( filter_size%2 == 1 );
  2728. for( int i = 0; i < rcc->num_entries; i++ )
  2729. {
  2730. ratecontrol_entry_t *rce = &rcc->entry[i];
  2731. double q = 0.0, sum = 0.0;
  2732. for( int j = 0; j < filter_size; j++ )
  2733. {
  2734. int idx = i+j-filter_size/2;
  2735. double d = idx-i;
  2736. double coeff = qblur==0 ? 1.0 : exp( -d*d/(qblur*qblur) );
  2737. if( idx < 0 || idx >= rcc->num_entries )
  2738. continue;
  2739. if( rce->pict_type != rcc->entry[idx].pict_type )
  2740. continue;
  2741. q += qscale[idx] * coeff;
  2742. sum += coeff;
  2743. }
  2744. blurred_qscale[i] = q/sum;
  2745. }
  2746. }
  2747. /* find expected bits */
  2748. for( int i = 0; i < rcc->num_entries; i++ )
  2749. {
  2750. ratecontrol_entry_t *rce = &rcc->entry[i];
  2751. rce->new_qscale = clip_qscale( h, rce->pict_type, blurred_qscale[i] );
  2752. assert(rce->new_qscale >= 0);
  2753. expected_bits += qscale2bits( rce, rce->new_qscale );
  2754. }
  2755. if( expected_bits > all_available_bits )
  2756. rate_factor -= step;
  2757. }
  2758. x264_free( qscale );
  2759. if( filter_size > 1 )
  2760. x264_free( blurred_qscale );
  2761. if( rcc->b_vbv )
  2762. if( vbv_pass2( h, all_available_bits ) )
  2763. return -1;
  2764. expected_bits = count_expected_bits( h );
  2765. if( fabs( expected_bits/all_available_bits - 1.0 ) > 0.01 )
  2766. {
  2767. double avgq = 0;
  2768. for( int i = 0; i < rcc->num_entries; i++ )
  2769. avgq += rcc->entry[i].new_qscale;
  2770. avgq = qscale2qp( avgq / rcc->num_entries );
  2771. if( expected_bits > all_available_bits || !rcc->b_vbv )
  2772. x264_log( h, X264_LOG_WARNING, "Error: 2pass curve failed to converge\n" );
  2773. x264_log( h, X264_LOG_WARNING, "target: %.2f kbit/s, expected: %.2f kbit/s, avg QP: %.4f\n",
  2774. (float)h->param.rc.i_bitrate,
  2775. expected_bits * rcc->fps / (rcc->num_entries * 1000.),
  2776. avgq );
  2777. if( expected_bits < all_available_bits && avgq < h->param.rc.i_qp_min + 2 )
  2778. {
  2779. if( h->param.rc.i_qp_min > 0 )
  2780. x264_log( h, X264_LOG_WARNING, "try reducing target bitrate or reducing qp_min (currently %d)\n", h->param.rc.i_qp_min );
  2781. else
  2782. x264_log( h, X264_LOG_WARNING, "try reducing target bitrate\n" );
  2783. }
  2784. else if( expected_bits > all_available_bits && avgq > h->param.rc.i_qp_max - 2 )
  2785. {
  2786. if( h->param.rc.i_qp_max < QP_MAX )
  2787. x264_log( h, X264_LOG_WARNING, "try increasing target bitrate or increasing qp_max (currently %d)\n", h->param.rc.i_qp_max );
  2788. else
  2789. x264_log( h, X264_LOG_WARNING, "try increasing target bitrate\n");
  2790. }
  2791. else if( !(rcc->b_2pass && rcc->b_vbv) )
  2792. x264_log( h, X264_LOG_WARNING, "internal error\n" );
  2793. }
  2794. return 0;
  2795. fail:
  2796. return -1;
  2797. }