分享

x264代码剖析(十四):核心算法之宏块编码函数x264_macroblock_encode()

 托尼虎 2018-12-25

x264代码剖析(十四):核心算法之宏块编码函数x264_macroblock_encode()

 版权声明:本文为博主原创文章,转载请标注转载网址:http://blog.csdn.net/frd2009041510 https://blog.csdn.net/FRD2009041510/article/details/50959404

x264代码剖析(十四):核心算法之宏块编码函数x264_macroblock_encode()

 

        宏块编码函数x264_macroblock_encode()是完成变换与量化的主要函数,而x264_macroblock_encode()调用了x264_macroblock_encode_internal()函数,在x264_macroblock_encode_internal()函数中,主要完成了如下功能:

 

x264_macroblock_encode_skip():编码Skip类型宏块。

x264_mb_encode_i16x16():编码Intra16x16类型的宏块。该函数除了进行DCT变换之外,还对16个小块的DC系数进行了Hadamard变换。

x264_mb_encode_i4x4():编码Intra4x4类型的宏块。

帧间宏块编码:这一部分代码直接写在了函数体里面。

x264_mb_encode_chroma():编码色度块。

 

        x264_macroblock_encode()函数与x264_macroblock_encode_internal()函数都处于encoder文件夹内的macroblock.c,其调用关系图如下所示:

 



1x264_macroblock_encode()函数

 

        x264_macroblock_encode()函数处于encoder文件夹内的macroblock.c中,x264_macroblock_encode()封装了x264_macroblock_encode_internal()。如果色度模式是YUV444的话,传递的参数plane_count=3chroma=0;如果不是YUV444的话,传递的参数plane_count=1chroma=1

 

对应的代码如下:

 

  1. /******************************************************************/
  2. /******************************************************************/
  3. /*
  4. ======Analysed by RuiDong Fang
  5. ======Csdn Blog:http://blog.csdn.net/frd2009041510
  6. ======Date:2016.03.22
  7. */
  8. /******************************************************************/
  9. /******************************************************************/
  10. /************====== 宏块编码函数x264_macroblock_encode() ======************/
  11. /*
  12. 功能:x264_macroblock_encode()封装了x264_macroblock_encode_internal(),即编码的内部函数——残差DCT变换、量化
  13. */
  14. void x264_macroblock_encode( x264_t *h )
  15. {
  16. if( CHROMA444 )
  17. x264_macroblock_encode_internal( h, 3, 0 );//YUV444相当于把YUV3个分量都当做Y编码
  18. else
  19. x264_macroblock_encode_internal( h, 1, 1 );
  20. }

 

2x264_macroblock_encode_internal()函数

 

        x264_macroblock_encode_internal()函数也处于encoder文件夹内的macroblock.c中,具体的代码分析如下:

 

  1. /************====== 宏块编码函数x264_macroblock_encode_internal() ======************/
  2. /*
  3. 功能:调用了编码-残差DCT变换、量化-内部函数
  4. */
  5. /*****************************************************************************
  6. * x264_macroblock_encode:
  7. *****************************************************************************/
  8. static ALWAYS_INLINE void x264_macroblock_encode_internal( x264_t *h, int plane_count, int chroma )
  9. {
  10. int i_qp = h->mb.i_qp;
  11. int b_decimate = h->mb.b_dct_decimate;
  12. int b_force_no_skip = 0;
  13. int nz;
  14. h->mb.i_cbp_luma = 0;
  15. for( int p = 0; p < plane_count; p++ )
  16. h->mb.cache.non_zero_count[x264_scan8[LUMA_DC+p]] = 0;
  17. /*======== PCM ========*/
  18. if( h->mb.i_type == I_PCM )//PCM
  19. {
  20. /* if PCM is chosen, we need to store reconstructed frame data */
  21. for( int p = 0; p < plane_count; p++ )
  22. h->mc.copy[PIXEL_16x16]( h->mb.pic.p_fdec[p], FDEC_STRIDE, h->mb.pic.p_fenc[p], FENC_STRIDE, 16 );
  23. if( chroma )
  24. {
  25. int height = 16 >> CHROMA_V_SHIFT;
  26. h->mc.copy[PIXEL_8x8] ( h->mb.pic.p_fdec[1], FDEC_STRIDE, h->mb.pic.p_fenc[1], FENC_STRIDE, height );
  27. h->mc.copy[PIXEL_8x8] ( h->mb.pic.p_fdec[2], FDEC_STRIDE, h->mb.pic.p_fenc[2], FENC_STRIDE, height );
  28. }
  29. return;
  30. }
  31. if( !h->mb.b_allow_skip )
  32. {
  33. b_force_no_skip = 1;
  34. if( IS_SKIP(h->mb.i_type) )
  35. {
  36. if( h->mb.i_type == P_SKIP )
  37. h->mb.i_type = P_L0;
  38. else if( h->mb.i_type == B_SKIP )
  39. h->mb.i_type = B_DIRECT;
  40. }
  41. }
  42. //根据不同的宏块类型,进行编码
  43. /*======== P-skip ========*/
  44. if( h->mb.i_type == P_SKIP )
  45. {
  46. /* don't do pskip motion compensation if it was already done in macroblock_analyse */
  47. if( !h->mb.b_skip_mc )
  48. {
  49. int mvx = x264_clip3( h->mb.cache.mv[0][x264_scan8[0]][0],
  50. h->mb.mv_min[0], h->mb.mv_max[0] );
  51. int mvy = x264_clip3( h->mb.cache.mv[0][x264_scan8[0]][1],
  52. h->mb.mv_min[1], h->mb.mv_max[1] );
  53. for( int p = 0; p < plane_count; p++ )
  54. h->mc.mc_luma( h->mb.pic.p_fdec[p], FDEC_STRIDE,
  55. &h->mb.pic.p_fref[0][0][p*4], h->mb.pic.i_stride[p],
  56. mvx, mvy, 16, 16, &h->sh.weight[0][p] );
  57. if( chroma )
  58. {
  59. int v_shift = CHROMA_V_SHIFT;
  60. int height = 16 >> v_shift;
  61. /* Special case for mv0, which is (of course) very common in P-skip mode. */
  62. if( mvx | mvy )
  63. h->mc.mc_chroma( h->mb.pic.p_fdec[1], h->mb.pic.p_fdec[2], FDEC_STRIDE,
  64. h->mb.pic.p_fref[0][0][4], h->mb.pic.i_stride[1],
  65. mvx, 2*mvy>>v_shift, 8, height );
  66. else
  67. h->mc.load_deinterleave_chroma_fdec( h->mb.pic.p_fdec[1], h->mb.pic.p_fref[0][0][4],
  68. h->mb.pic.i_stride[1], height );
  69. if( h->sh.weight[0][1].weightfn )
  70. h->sh.weight[0][1].weightfn[8>>2]( h->mb.pic.p_fdec[1], FDEC_STRIDE,
  71. h->mb.pic.p_fdec[1], FDEC_STRIDE,
  72. &h->sh.weight[0][1], height );
  73. if( h->sh.weight[0][2].weightfn )
  74. h->sh.weight[0][2].weightfn[8>>2]( h->mb.pic.p_fdec[2], FDEC_STRIDE,
  75. h->mb.pic.p_fdec[2], FDEC_STRIDE,
  76. &h->sh.weight[0][2], height );
  77. }
  78. }
  79. x264_macroblock_encode_skip( h ); ////////////////////////////编码skip类型宏块
  80. return;
  81. }
  82. /*======== B-skip ========*/
  83. if( h->mb.i_type == B_SKIP )
  84. {
  85. /* don't do bskip motion compensation if it was already done in macroblock_analyse */
  86. if( !h->mb.b_skip_mc )
  87. x264_mb_mc( h );
  88. x264_macroblock_encode_skip( h ); ////////////////////////////编码skip类型宏块
  89. return;
  90. }
  91. /*======== 帧内 ========*/
  92. if( h->mb.i_type == I_16x16 )
  93. {
  94. h->mb.b_transform_8x8 = 0;
  95. for( int p = 0; p < plane_count; p++, i_qp = h->mb.i_chroma_qp )
  96. x264_mb_encode_i16x16( h, p, i_qp ); ////////////////////////////如果是Intra16x16类型,调用x264_mb_encode_i16x16()编码宏块(分别编码Y,U,V)
  97. }
  98. else if( h->mb.i_type == I_8x8 )
  99. {
  100. h->mb.b_transform_8x8 = 1;
  101. /* If we already encoded 3 of the 4 i8x8 blocks, we don't have to do them again. */
  102. if( h->mb.i_skip_intra )
  103. {
  104. h->mc.copy[PIXEL_16x16]( h->mb.pic.p_fdec[0], FDEC_STRIDE, h->mb.pic.i8x8_fdec_buf, 16, 16 );
  105. M32( &h->mb.cache.non_zero_count[x264_scan8[ 0]] ) = h->mb.pic.i8x8_nnz_buf[0];
  106. M32( &h->mb.cache.non_zero_count[x264_scan8[ 2]] ) = h->mb.pic.i8x8_nnz_buf[1];
  107. M32( &h->mb.cache.non_zero_count[x264_scan8[ 8]] ) = h->mb.pic.i8x8_nnz_buf[2];
  108. M32( &h->mb.cache.non_zero_count[x264_scan8[10]] ) = h->mb.pic.i8x8_nnz_buf[3];
  109. h->mb.i_cbp_luma = h->mb.pic.i8x8_cbp;
  110. /* In RD mode, restore the now-overwritten DCT data. */
  111. if( h->mb.i_skip_intra == 2 )
  112. h->mc.memcpy_aligned( h->dct.luma8x8, h->mb.pic.i8x8_dct_buf, sizeof(h->mb.pic.i8x8_dct_buf) );
  113. }
  114. for( int p = 0; p < plane_count; p++, i_qp = h->mb.i_chroma_qp )
  115. {
  116. for( int i = (p == 0 && h->mb.i_skip_intra) ? 3 : 0 ; i < 4; i++ )
  117. {
  118. int i_mode = h->mb.cache.intra4x4_pred_mode[x264_scan8[4*i]];
  119. x264_mb_encode_i8x8( h, p, i, i_qp, i_mode, NULL, 1 ); ////////////////////////////如果是Intra8x8类型,循环4次调用x264_mb_encode_i8x8()编码宏块
  120. }
  121. }
  122. }
  123. else if( h->mb.i_type == I_4x4 )
  124. {
  125. h->mb.b_transform_8x8 = 0;
  126. /* If we already encoded 15 of the 16 i4x4 blocks, we don't have to do them again. */
  127. if( h->mb.i_skip_intra )
  128. {
  129. h->mc.copy[PIXEL_16x16]( h->mb.pic.p_fdec[0], FDEC_STRIDE, h->mb.pic.i4x4_fdec_buf, 16, 16 );
  130. M32( &h->mb.cache.non_zero_count[x264_scan8[ 0]] ) = h->mb.pic.i4x4_nnz_buf[0];
  131. M32( &h->mb.cache.non_zero_count[x264_scan8[ 2]] ) = h->mb.pic.i4x4_nnz_buf[1];
  132. M32( &h->mb.cache.non_zero_count[x264_scan8[ 8]] ) = h->mb.pic.i4x4_nnz_buf[2];
  133. M32( &h->mb.cache.non_zero_count[x264_scan8[10]] ) = h->mb.pic.i4x4_nnz_buf[3];
  134. h->mb.i_cbp_luma = h->mb.pic.i4x4_cbp;
  135. /* In RD mode, restore the now-overwritten DCT data. */
  136. if( h->mb.i_skip_intra == 2 )
  137. h->mc.memcpy_aligned( h->dct.luma4x4, h->mb.pic.i4x4_dct_buf, sizeof(h->mb.pic.i4x4_dct_buf) );
  138. }
  139. for( int p = 0; p < plane_count; p++, i_qp = h->mb.i_chroma_qp )
  140. {
  141. for( int i = (p == 0 && h->mb.i_skip_intra) ? 15 : 0 ; i < 16; i++ )
  142. {
  143. pixel *p_dst = &h->mb.pic.p_fdec[p][block_idx_xy_fdec[i]];
  144. int i_mode = h->mb.cache.intra4x4_pred_mode[x264_scan8[i]];
  145. if( (h->mb.i_neighbour4[i] & (MB_TOPRIGHT|MB_TOP)) == MB_TOP )
  146. /* emulate missing topright samples */
  147. MPIXEL_X4( &p_dst[4-FDEC_STRIDE] ) = PIXEL_SPLAT_X4( p_dst[3-FDEC_STRIDE] );
  148. x264_mb_encode_i4x4( h, p, i, i_qp, i_mode, 1 ); ////////////////////////////如果是Intra4x4类型,循环16次调用x264_mb_encode_i4x4()编码宏块
  149. }
  150. }
  151. }
  152. /*======== 帧间 ========*/
  153. else /* Inter MB */
  154. {
  155. int i_decimate_mb = 0;
  156. /* Don't repeat motion compensation if it was already done in non-RD transform analysis */
  157. if( !h->mb.b_skip_mc )
  158. x264_mb_mc( h );
  159. if( h->mb.b_lossless )//===================lossless情况
  160. {
  161. if( h->mb.b_transform_8x8 )
  162. for( int p = 0; p < plane_count; p++ )
  163. for( int i8x8 = 0; i8x8 < 4; i8x8++ )
  164. {
  165. int x = i8x8&1;
  166. int y = i8x8>>1;
  167. nz = h->zigzagf.sub_8x8( h->dct.luma8x8[p*4+i8x8], h->mb.pic.p_fenc[p] + 8*x + 8*y*FENC_STRIDE,
  168. h->mb.pic.p_fdec[p] + 8*x + 8*y*FDEC_STRIDE );
  169. STORE_8x8_NNZ( p, i8x8, nz );
  170. h->mb.i_cbp_luma |= nz << i8x8;
  171. }
  172. else
  173. for( int p = 0; p < plane_count; p++ )
  174. for( int i4x4 = 0; i4x4 < 16; i4x4++ )
  175. {
  176. nz = h->zigzagf.sub_4x4( h->dct.luma4x4[p*16+i4x4],
  177. h->mb.pic.p_fenc[p]+block_idx_xy_fenc[i4x4],
  178. h->mb.pic.p_fdec[p]+block_idx_xy_fdec[i4x4] );
  179. h->mb.cache.non_zero_count[x264_scan8[p*16+i4x4]] = nz;
  180. h->mb.i_cbp_luma |= nz << (i4x4>>2);
  181. }
  182. }
  183. else if( h->mb.b_transform_8x8 )//===================DCT8x8情况
  184. {
  185. ALIGNED_ARRAY_N( dctcoef, dct8x8,[4],[64] );
  186. b_decimate &= !h->mb.b_trellis || !h->param.b_cabac; // 8x8 trellis is inherently optimal decimation for CABAC
  187. for( int p = 0; p < plane_count; p++, i_qp = h->mb.i_chroma_qp )
  188. {
  189. CLEAR_16x16_NNZ( p );
  190. h->dctf.sub16x16_dct8( dct8x8, h->mb.pic.p_fenc[p], h->mb.pic.p_fdec[p] );
  191. h->nr_count[1+!!p*2] += h->mb.b_noise_reduction * 4;
  192. int plane_cbp = 0;
  193. for( int idx = 0; idx < 4; idx++ )
  194. {
  195. nz = x264_quant_8x8( h, dct8x8[idx], i_qp, ctx_cat_plane[DCT_LUMA_8x8][p], 0, p, idx );
  196. if( nz )
  197. {
  198. h->zigzagf.scan_8x8( h->dct.luma8x8[p*4+idx], dct8x8[idx] );
  199. if( b_decimate )
  200. {
  201. int i_decimate_8x8 = h->quantf.decimate_score64( h->dct.luma8x8[p*4+idx] );
  202. i_decimate_mb += i_decimate_8x8;
  203. if( i_decimate_8x8 >= 4 )
  204. plane_cbp |= 1<<idx;
  205. }
  206. else
  207. plane_cbp |= 1<<idx;
  208. }
  209. }
  210. if( i_decimate_mb >= 6 || !b_decimate )
  211. {
  212. h->mb.i_cbp_luma |= plane_cbp;
  213. FOREACH_BIT( idx, 0, plane_cbp )
  214. {
  215. h->quantf.dequant_8x8( dct8x8[idx], h->dequant8_mf[p?CQM_8PC:CQM_8PY], i_qp );
  216. h->dctf.add8x8_idct8( &h->mb.pic.p_fdec[p][8*(idx&1) + 8*(idx>>1)*FDEC_STRIDE], dct8x8[idx] );
  217. STORE_8x8_NNZ( p, idx, 1 );
  218. }
  219. }
  220. }
  221. }
  222. else//===================最普通的情况
  223. {
  224. // 帧间预测:16x16 宏块被划分为8x8,每个8x8再次被划分为4x4
  225. ALIGNED_ARRAY_N( dctcoef, dct4x4,[16],[16] );
  226. for( int p = 0; p < plane_count; p++, i_qp = h->mb.i_chroma_qp )
  227. {
  228. CLEAR_16x16_NNZ( p );
  229. //16x16DCT(实际上分解为16个4x4DCT)
  230. //求编码帧p_fenc和重建帧p_fdec之间的残差,然后进行DCT变换
  231. h->dctf.sub16x16_dct( dct4x4, h->mb.pic.p_fenc[p], h->mb.pic.p_fdec[p] ); ///////////////////对16x16块调用x264_dct_function_t的sub16x16_dct()汇编函数,求得编码宏块数据p_fenc与重建宏块数据p_fdec之间的残差(“sub”),并对残差进行DCT变换
  232. if( h->mb.b_noise_reduction )
  233. {
  234. h->nr_count[0+!!p*2] += 16;
  235. for( int idx = 0; idx < 16; idx++ )
  236. h->quantf.denoise_dct( dct4x4[idx], h->nr_residual_sum[0+!!p*2], h->nr_offset[0+!!p*2], 16 );
  237. }
  238. int plane_cbp = 0;
  239. //16x16的块分成4个8x8的块
  240. for( int i8x8 = 0; i8x8 < 4; i8x8++ )
  241. {
  242. int i_decimate_8x8 = b_decimate ? 0 : 6;
  243. int nnz8x8 = 0;
  244. if( h->mb.b_trellis )
  245. {
  246. for( int i4x4 = 0; i4x4 < 4; i4x4++ )
  247. {
  248. int idx = i8x8*4+i4x4;
  249. if( x264_quant_4x4_trellis( h, dct4x4[idx], CQM_4PY, i_qp, ctx_cat_plane[DCT_LUMA_4x4][p], 0, !!p, p*16+idx ) )
  250. {
  251. h->zigzagf.scan_4x4( h->dct.luma4x4[p*16+idx], dct4x4[idx] );
  252. h->quantf.dequant_4x4( dct4x4[idx], h->dequant4_mf[p?CQM_4PC:CQM_4PY], i_qp );
  253. if( i_decimate_8x8 < 6 )
  254. i_decimate_8x8 += h->quantf.decimate_score16( h->dct.luma4x4[p*16+idx] );
  255. h->mb.cache.non_zero_count[x264_scan8[p*16+idx]] = 1;
  256. nnz8x8 = 1;
  257. }
  258. }
  259. }
  260. else
  261. {
  262. //8x8的块分成4个4x4的块,每个4x4的块再分别进行量化
  263. nnz8x8 = nz = h->quantf.quant_4x4x4( &dct4x4[i8x8*4], h->quant4_mf[CQM_4PY][i_qp], h->quant4_bias[CQM_4PY][i_qp] ); /////////////////分成4个8x8的块,对每个8x8块分别调用x264_quant_function_t的quant_4x4x4()汇编函数进行量化
  264. if( nz )
  265. {
  266. FOREACH_BIT( idx, i8x8*4, nz )
  267. {
  268. h->zigzagf.scan_4x4( h->dct.luma4x4[p*16+idx], dct4x4[idx] );//建立重建帧
  269. h->quantf.dequant_4x4( dct4x4[idx], h->dequant4_mf[p?CQM_4PC:CQM_4PY], i_qp ); //////////////////////分成16个4x4的块,对每个4x4块分别调用x264_quant_function_t的dequant_4x4()汇编函数进行反量化(用于重建帧)
  270. if( i_decimate_8x8 < 6 )
  271. i_decimate_8x8 += h->quantf.decimate_score16( h->dct.luma4x4[p*16+idx] );
  272. h->mb.cache.non_zero_count[x264_scan8[p*16+idx]] = 1;
  273. }
  274. }
  275. }
  276. if( nnz8x8 )
  277. {
  278. i_decimate_mb += i_decimate_8x8;
  279. if( i_decimate_8x8 < 4 )
  280. STORE_8x8_NNZ( p, i8x8, 0 );
  281. else
  282. plane_cbp |= 1<<i8x8;
  283. }
  284. }
  285. if( i_decimate_mb < 6 )
  286. {
  287. plane_cbp = 0;
  288. CLEAR_16x16_NNZ( p );
  289. }
  290. else
  291. {
  292. h->mb.i_cbp_luma |= plane_cbp;
  293. FOREACH_BIT( i8x8, 0, plane_cbp )
  294. {
  295. //用于建立重建帧
  296. //残差进行DCT反变换之后,叠加到预测数据上
  297. h->dctf.add8x8_idct( &h->mb.pic.p_fdec[p][(i8x8&1)*8 + (i8x8>>1)*8*FDEC_STRIDE], &dct4x4[i8x8*4] ); ////////////分成4个8x8的块,对每个8x8块分别调用x264_dct_function_t的add8x8_idct()汇编函数,对残差进行DCT反变换,并将反变换后的数据叠加(“add”)至预测数据上(用于重建帧)
  298. }
  299. }
  300. }
  301. }
  302. }
  303. /* encode chroma */
  304. if( chroma )
  305. {
  306. if( IS_INTRA( h->mb.i_type ) )
  307. {
  308. int i_mode = h->mb.i_chroma_pred_mode;
  309. if( h->mb.b_lossless )
  310. x264_predict_lossless_chroma( h, i_mode );
  311. else
  312. {
  313. h->predict_chroma[i_mode]( h->mb.pic.p_fdec[1] );
  314. h->predict_chroma[i_mode]( h->mb.pic.p_fdec[2] );
  315. }
  316. }
  317. /* encode the 8x8 blocks */
  318. x264_mb_encode_chroma( h, !IS_INTRA( h->mb.i_type ), h->mb.i_chroma_qp ); /////////////////////编码色度块
  319. }
  320. else
  321. h->mb.i_cbp_chroma = 0;
  322. /* store cbp */
  323. int cbp = h->mb.i_cbp_chroma << 4 | h->mb.i_cbp_luma;
  324. if( h->param.b_cabac )
  325. cbp |= h->mb.cache.non_zero_count[x264_scan8[LUMA_DC ]] << 8
  326. | h->mb.cache.non_zero_count[x264_scan8[CHROMA_DC+0]] << 9
  327. | h->mb.cache.non_zero_count[x264_scan8[CHROMA_DC+1]] << 10;
  328. h->mb.cbp[h->mb.i_mb_xy] = cbp;
  329. /* Check for P_SKIP
  330. * XXX: in the me perhaps we should take x264_mb_predict_mv_pskip into account
  331. * (if multiple mv give same result)*/
  332. if( !b_force_no_skip )
  333. {
  334. if( h->mb.i_type == P_L0 && h->mb.i_partition == D_16x16 &&
  335. !(h->mb.i_cbp_luma | h->mb.i_cbp_chroma) &&
  336. M32( h->mb.cache.mv[0][x264_scan8[0]] ) == M32( h->mb.cache.pskip_mv )
  337. && h->mb.cache.ref[0][x264_scan8[0]] == 0 )
  338. {
  339. h->mb.i_type = P_SKIP;
  340. }
  341. /* Check for B_SKIP */
  342. if( h->mb.i_type == B_DIRECT && !(h->mb.i_cbp_luma | h->mb.i_cbp_chroma) )
  343. {
  344. h->mb.i_type = B_SKIP;
  345. }
  346. }
  347. }


        从源代码可以看出,x264_macroblock_encode_internal()的流程大致如下:


1)、如果是Skip类型,调用x264_macroblock_encode_skip()编码宏块。

2)、如果是Intra16x16类型,调用x264_mb_encode_i16x16()编码宏块。

3)、如果是Intra4x4类型,循环16次调用x264_mb_encode_i4x4()编码宏块。

4)、如果是Inter类型,则不再调用子函数,而是直接进行编码:

        a)、对16x16块调用x264_dct_function_tsub16x16_dct()汇编函数,求得编码宏块数据p_fenc与重建宏块数据p_fdec之间的残差(“sub”),并对残差进行DCT变换。

        b)、分成48x8的块,对每个8x8块分别调用x264_quant_function_tquant_4x4x4()汇编函数进行量化。

        c)、分成164x4的块,对每个4x4块分别调用x264_quant_function_tdequant_4x4()汇编函数进行反量化(用于重建帧)。

        d)、分成48x8的块,对每个8x8块分别调用x264_dct_function_tadd8x8_idct()汇编函数,对残差进行DCT反变换,并将反变换后的数据叠加(“add”)至预测数据上(用于重建帧)。

5)、如果对色度编码,调用x264_mb_encode_chroma()

 

        从Inter宏块编码的步骤可以看出,编码就是“DCT变换+量化”两步的组合。在接下来的文章中将依次分析变换、量化的具体代码。

    本站是提供个人知识管理的网络存储空间,所有内容均由用户发布,不代表本站观点。请注意甄别内容中的联系方式、诱导购买等信息,谨防诈骗。如发现有害或侵权内容,请点击一键举报。
    转藏 分享 献花(0

    0条评论

    发表

    请遵守用户 评论公约

    类似文章 更多