You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

830 lines
42KB

  1. /*
  2. * H.26L/H.264/AVC/JVT/14496-10/... loop filter
  3. * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. /**
  22. * @file
  23. * H.264 / AVC / MPEG4 part10 loop filter.
  24. * @author Michael Niedermayer <michaelni@gmx.at>
  25. */
  26. #include "libavutil/intreadwrite.h"
  27. #include "internal.h"
  28. #include "dsputil.h"
  29. #include "avcodec.h"
  30. #include "mpegvideo.h"
  31. #include "h264.h"
  32. #include "mathops.h"
  33. #include "rectangle.h"
  34. /* Deblocking filter (p153) */
  35. static const uint8_t alpha_table[52*3] = {
  36. 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  37. 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  38. 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  39. 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  40. 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  41. 0, 0, 0, 0, 0, 0, 4, 4, 5, 6,
  42. 7, 8, 9, 10, 12, 13, 15, 17, 20, 22,
  43. 25, 28, 32, 36, 40, 45, 50, 56, 63, 71,
  44. 80, 90,101,113,127,144,162,182,203,226,
  45. 255,255,
  46. 255,255,255,255,255,255,255,255,255,255,255,255,255,
  47. 255,255,255,255,255,255,255,255,255,255,255,255,255,
  48. 255,255,255,255,255,255,255,255,255,255,255,255,255,
  49. 255,255,255,255,255,255,255,255,255,255,255,255,255,
  50. };
  51. static const uint8_t beta_table[52*3] = {
  52. 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  53. 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  54. 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  55. 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  56. 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  57. 0, 0, 0, 0, 0, 0, 2, 2, 2, 3,
  58. 3, 3, 3, 4, 4, 4, 6, 6, 7, 7,
  59. 8, 8, 9, 9, 10, 10, 11, 11, 12, 12,
  60. 13, 13, 14, 14, 15, 15, 16, 16, 17, 17,
  61. 18, 18,
  62. 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18,
  63. 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18,
  64. 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18,
  65. 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18,
  66. };
  67. static const uint8_t tc0_table[52*3][4] = {
  68. {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 },
  69. {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 },
  70. {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 },
  71. {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 },
  72. {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 },
  73. {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 },
  74. {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 },
  75. {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 },
  76. {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 },
  77. {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 },
  78. {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 },
  79. {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 1 },
  80. {-1, 0, 0, 1 }, {-1, 0, 0, 1 }, {-1, 0, 0, 1 }, {-1, 0, 1, 1 }, {-1, 0, 1, 1 }, {-1, 1, 1, 1 },
  81. {-1, 1, 1, 1 }, {-1, 1, 1, 1 }, {-1, 1, 1, 1 }, {-1, 1, 1, 2 }, {-1, 1, 1, 2 }, {-1, 1, 1, 2 },
  82. {-1, 1, 1, 2 }, {-1, 1, 2, 3 }, {-1, 1, 2, 3 }, {-1, 2, 2, 3 }, {-1, 2, 2, 4 }, {-1, 2, 3, 4 },
  83. {-1, 2, 3, 4 }, {-1, 3, 3, 5 }, {-1, 3, 4, 6 }, {-1, 3, 4, 6 }, {-1, 4, 5, 7 }, {-1, 4, 5, 8 },
  84. {-1, 4, 6, 9 }, {-1, 5, 7,10 }, {-1, 6, 8,11 }, {-1, 6, 8,13 }, {-1, 7,10,14 }, {-1, 8,11,16 },
  85. {-1, 9,12,18 }, {-1,10,13,20 }, {-1,11,15,23 }, {-1,13,17,25 },
  86. {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 },
  87. {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 },
  88. {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 },
  89. {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 },
  90. {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 },
  91. {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 },
  92. {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 },
  93. {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 },
  94. {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 },
  95. };
  96. /* intra: 0 if this loopfilter call is guaranteed to be inter (bS < 4), 1 if it might be intra (bS == 4) */
  97. static av_always_inline void filter_mb_edgev(uint8_t *pix, int stride,
  98. const int16_t bS[4],
  99. unsigned int qp, int a, int b,
  100. H264Context *h, int intra)
  101. {
  102. const unsigned int index_a = qp + a;
  103. const int alpha = alpha_table[index_a];
  104. const int beta = beta_table[qp + b];
  105. if (alpha ==0 || beta == 0) return;
  106. if( bS[0] < 4 || !intra ) {
  107. int8_t tc[4];
  108. tc[0] = tc0_table[index_a][bS[0]];
  109. tc[1] = tc0_table[index_a][bS[1]];
  110. tc[2] = tc0_table[index_a][bS[2]];
  111. tc[3] = tc0_table[index_a][bS[3]];
  112. h->h264dsp.h264_h_loop_filter_luma(pix, stride, alpha, beta, tc);
  113. } else {
  114. h->h264dsp.h264_h_loop_filter_luma_intra(pix, stride, alpha, beta);
  115. }
  116. }
  117. static av_always_inline void filter_mb_edgecv(uint8_t *pix, int stride,
  118. const int16_t bS[4],
  119. unsigned int qp, int a, int b,
  120. H264Context *h, int intra)
  121. {
  122. const unsigned int index_a = qp + a;
  123. const int alpha = alpha_table[index_a];
  124. const int beta = beta_table[qp + b];
  125. if (alpha ==0 || beta == 0) return;
  126. if( bS[0] < 4 || !intra ) {
  127. int8_t tc[4];
  128. tc[0] = tc0_table[index_a][bS[0]]+1;
  129. tc[1] = tc0_table[index_a][bS[1]]+1;
  130. tc[2] = tc0_table[index_a][bS[2]]+1;
  131. tc[3] = tc0_table[index_a][bS[3]]+1;
  132. h->h264dsp.h264_h_loop_filter_chroma(pix, stride, alpha, beta, tc);
  133. } else {
  134. h->h264dsp.h264_h_loop_filter_chroma_intra(pix, stride, alpha, beta);
  135. }
  136. }
  137. static av_always_inline void filter_mb_mbaff_edgev(H264Context *h, uint8_t *pix,
  138. int stride,
  139. const int16_t bS[7], int bsi,
  140. int qp, int a, int b,
  141. int intra)
  142. {
  143. const unsigned int index_a = qp + a;
  144. const int alpha = alpha_table[index_a];
  145. const int beta = beta_table[qp + b];
  146. if (alpha ==0 || beta == 0) return;
  147. if( bS[0] < 4 || !intra ) {
  148. int8_t tc[4];
  149. tc[0] = tc0_table[index_a][bS[0*bsi]];
  150. tc[1] = tc0_table[index_a][bS[1*bsi]];
  151. tc[2] = tc0_table[index_a][bS[2*bsi]];
  152. tc[3] = tc0_table[index_a][bS[3*bsi]];
  153. h->h264dsp.h264_h_loop_filter_luma_mbaff(pix, stride, alpha, beta, tc);
  154. } else {
  155. h->h264dsp.h264_h_loop_filter_luma_mbaff_intra(pix, stride, alpha, beta);
  156. }
  157. }
  158. static av_always_inline void filter_mb_mbaff_edgecv(H264Context *h,
  159. uint8_t *pix, int stride,
  160. const int16_t bS[7],
  161. int bsi, int qp, int a,
  162. int b, int intra)
  163. {
  164. const unsigned int index_a = qp + a;
  165. const int alpha = alpha_table[index_a];
  166. const int beta = beta_table[qp + b];
  167. if (alpha ==0 || beta == 0) return;
  168. if( bS[0] < 4 || !intra ) {
  169. int8_t tc[4];
  170. tc[0] = tc0_table[index_a][bS[0*bsi]] + 1;
  171. tc[1] = tc0_table[index_a][bS[1*bsi]] + 1;
  172. tc[2] = tc0_table[index_a][bS[2*bsi]] + 1;
  173. tc[3] = tc0_table[index_a][bS[3*bsi]] + 1;
  174. h->h264dsp.h264_h_loop_filter_chroma_mbaff(pix, stride, alpha, beta, tc);
  175. } else {
  176. h->h264dsp.h264_h_loop_filter_chroma_mbaff_intra(pix, stride, alpha, beta);
  177. }
  178. }
  179. static av_always_inline void filter_mb_edgeh(uint8_t *pix, int stride,
  180. const int16_t bS[4],
  181. unsigned int qp, int a, int b,
  182. H264Context *h, int intra)
  183. {
  184. const unsigned int index_a = qp + a;
  185. const int alpha = alpha_table[index_a];
  186. const int beta = beta_table[qp + b];
  187. if (alpha ==0 || beta == 0) return;
  188. if( bS[0] < 4 || !intra ) {
  189. int8_t tc[4];
  190. tc[0] = tc0_table[index_a][bS[0]];
  191. tc[1] = tc0_table[index_a][bS[1]];
  192. tc[2] = tc0_table[index_a][bS[2]];
  193. tc[3] = tc0_table[index_a][bS[3]];
  194. h->h264dsp.h264_v_loop_filter_luma(pix, stride, alpha, beta, tc);
  195. } else {
  196. h->h264dsp.h264_v_loop_filter_luma_intra(pix, stride, alpha, beta);
  197. }
  198. }
  199. static av_always_inline void filter_mb_edgech(uint8_t *pix, int stride,
  200. const int16_t bS[4],
  201. unsigned int qp, int a, int b,
  202. H264Context *h, int intra)
  203. {
  204. const unsigned int index_a = qp + a;
  205. const int alpha = alpha_table[index_a];
  206. const int beta = beta_table[qp + b];
  207. if (alpha ==0 || beta == 0) return;
  208. if( bS[0] < 4 || !intra ) {
  209. int8_t tc[4];
  210. tc[0] = tc0_table[index_a][bS[0]]+1;
  211. tc[1] = tc0_table[index_a][bS[1]]+1;
  212. tc[2] = tc0_table[index_a][bS[2]]+1;
  213. tc[3] = tc0_table[index_a][bS[3]]+1;
  214. h->h264dsp.h264_v_loop_filter_chroma(pix, stride, alpha, beta, tc);
  215. } else {
  216. h->h264dsp.h264_v_loop_filter_chroma_intra(pix, stride, alpha, beta);
  217. }
  218. }
  219. static av_always_inline void h264_filter_mb_fast_internal(H264Context *h,
  220. int mb_x, int mb_y,
  221. uint8_t *img_y,
  222. uint8_t *img_cb,
  223. uint8_t *img_cr,
  224. unsigned int linesize,
  225. unsigned int uvlinesize,
  226. int pixel_shift)
  227. {
  228. MpegEncContext * const s = &h->s;
  229. int chroma = CHROMA && !(CONFIG_GRAY && (s->flags&CODEC_FLAG_GRAY));
  230. int chroma444 = CHROMA444;
  231. int chroma422 = CHROMA422;
  232. int mb_xy = h->mb_xy;
  233. int left_type= h->left_type[LTOP];
  234. int top_type= h->top_type;
  235. int qp_bd_offset = 6 * (h->sps.bit_depth_luma - 8);
  236. int a = h->slice_alpha_c0_offset - qp_bd_offset;
  237. int b = h->slice_beta_offset - qp_bd_offset;
  238. int mb_type = s->current_picture.f.mb_type[mb_xy];
  239. int qp = s->current_picture.f.qscale_table[mb_xy];
  240. int qp0 = s->current_picture.f.qscale_table[mb_xy - 1];
  241. int qp1 = s->current_picture.f.qscale_table[h->top_mb_xy];
  242. int qpc = get_chroma_qp( h, 0, qp );
  243. int qpc0 = get_chroma_qp( h, 0, qp0 );
  244. int qpc1 = get_chroma_qp( h, 0, qp1 );
  245. qp0 = (qp + qp0 + 1) >> 1;
  246. qp1 = (qp + qp1 + 1) >> 1;
  247. qpc0 = (qpc + qpc0 + 1) >> 1;
  248. qpc1 = (qpc + qpc1 + 1) >> 1;
  249. if( IS_INTRA(mb_type) ) {
  250. static const int16_t bS4[4] = {4,4,4,4};
  251. static const int16_t bS3[4] = {3,3,3,3};
  252. const int16_t *bSH = FIELD_PICTURE ? bS3 : bS4;
  253. if(left_type)
  254. filter_mb_edgev( &img_y[4*0<<pixel_shift], linesize, bS4, qp0, a, b, h, 1);
  255. if( IS_8x8DCT(mb_type) ) {
  256. filter_mb_edgev( &img_y[4*2<<pixel_shift], linesize, bS3, qp, a, b, h, 0);
  257. if(top_type){
  258. filter_mb_edgeh( &img_y[4*0*linesize], linesize, bSH, qp1, a, b, h, 1);
  259. }
  260. filter_mb_edgeh( &img_y[4*2*linesize], linesize, bS3, qp, a, b, h, 0);
  261. } else {
  262. filter_mb_edgev( &img_y[4*1<<pixel_shift], linesize, bS3, qp, a, b, h, 0);
  263. filter_mb_edgev( &img_y[4*2<<pixel_shift], linesize, bS3, qp, a, b, h, 0);
  264. filter_mb_edgev( &img_y[4*3<<pixel_shift], linesize, bS3, qp, a, b, h, 0);
  265. if(top_type){
  266. filter_mb_edgeh( &img_y[4*0*linesize], linesize, bSH, qp1, a, b, h, 1);
  267. }
  268. filter_mb_edgeh( &img_y[4*1*linesize], linesize, bS3, qp, a, b, h, 0);
  269. filter_mb_edgeh( &img_y[4*2*linesize], linesize, bS3, qp, a, b, h, 0);
  270. filter_mb_edgeh( &img_y[4*3*linesize], linesize, bS3, qp, a, b, h, 0);
  271. }
  272. if(chroma){
  273. if(chroma444){
  274. if(left_type){
  275. filter_mb_edgev( &img_cb[4*0<<pixel_shift], linesize, bS4, qpc0, a, b, h, 1);
  276. filter_mb_edgev( &img_cr[4*0<<pixel_shift], linesize, bS4, qpc0, a, b, h, 1);
  277. }
  278. if( IS_8x8DCT(mb_type) ) {
  279. filter_mb_edgev( &img_cb[4*2<<pixel_shift], linesize, bS3, qpc, a, b, h, 0);
  280. filter_mb_edgev( &img_cr[4*2<<pixel_shift], linesize, bS3, qpc, a, b, h, 0);
  281. if(top_type){
  282. filter_mb_edgeh( &img_cb[4*0*linesize], linesize, bSH, qpc1, a, b, h, 1 );
  283. filter_mb_edgeh( &img_cr[4*0*linesize], linesize, bSH, qpc1, a, b, h, 1 );
  284. }
  285. filter_mb_edgeh( &img_cb[4*2*linesize], linesize, bS3, qpc, a, b, h, 0);
  286. filter_mb_edgeh( &img_cr[4*2*linesize], linesize, bS3, qpc, a, b, h, 0);
  287. } else {
  288. filter_mb_edgev( &img_cb[4*1<<pixel_shift], linesize, bS3, qpc, a, b, h, 0);
  289. filter_mb_edgev( &img_cr[4*1<<pixel_shift], linesize, bS3, qpc, a, b, h, 0);
  290. filter_mb_edgev( &img_cb[4*2<<pixel_shift], linesize, bS3, qpc, a, b, h, 0);
  291. filter_mb_edgev( &img_cr[4*2<<pixel_shift], linesize, bS3, qpc, a, b, h, 0);
  292. filter_mb_edgev( &img_cb[4*3<<pixel_shift], linesize, bS3, qpc, a, b, h, 0);
  293. filter_mb_edgev( &img_cr[4*3<<pixel_shift], linesize, bS3, qpc, a, b, h, 0);
  294. if(top_type){
  295. filter_mb_edgeh( &img_cb[4*0*linesize], linesize, bSH, qpc1, a, b, h, 1);
  296. filter_mb_edgeh( &img_cr[4*0*linesize], linesize, bSH, qpc1, a, b, h, 1);
  297. }
  298. filter_mb_edgeh( &img_cb[4*1*linesize], linesize, bS3, qpc, a, b, h, 0);
  299. filter_mb_edgeh( &img_cr[4*1*linesize], linesize, bS3, qpc, a, b, h, 0);
  300. filter_mb_edgeh( &img_cb[4*2*linesize], linesize, bS3, qpc, a, b, h, 0);
  301. filter_mb_edgeh( &img_cr[4*2*linesize], linesize, bS3, qpc, a, b, h, 0);
  302. filter_mb_edgeh( &img_cb[4*3*linesize], linesize, bS3, qpc, a, b, h, 0);
  303. filter_mb_edgeh( &img_cr[4*3*linesize], linesize, bS3, qpc, a, b, h, 0);
  304. }
  305. }else if(chroma422){
  306. if(left_type){
  307. filter_mb_edgecv(&img_cb[2*0<<pixel_shift], uvlinesize, bS4, qpc0, a, b, h, 1);
  308. filter_mb_edgecv(&img_cr[2*0<<pixel_shift], uvlinesize, bS4, qpc0, a, b, h, 1);
  309. }
  310. filter_mb_edgecv(&img_cb[2*2<<pixel_shift], uvlinesize, bS3, qpc, a, b, h, 0);
  311. filter_mb_edgecv(&img_cr[2*2<<pixel_shift], uvlinesize, bS3, qpc, a, b, h, 0);
  312. if(top_type){
  313. filter_mb_edgech(&img_cb[4*0*uvlinesize], uvlinesize, bSH, qpc1, a, b, h, 1);
  314. filter_mb_edgech(&img_cr[4*0*uvlinesize], uvlinesize, bSH, qpc1, a, b, h, 1);
  315. }
  316. filter_mb_edgech(&img_cb[4*1*uvlinesize], uvlinesize, bS3, qpc, a, b, h, 0);
  317. filter_mb_edgech(&img_cr[4*1*uvlinesize], uvlinesize, bS3, qpc, a, b, h, 0);
  318. filter_mb_edgech(&img_cb[4*2*uvlinesize], uvlinesize, bS3, qpc, a, b, h, 0);
  319. filter_mb_edgech(&img_cr[4*2*uvlinesize], uvlinesize, bS3, qpc, a, b, h, 0);
  320. filter_mb_edgech(&img_cb[4*3*uvlinesize], uvlinesize, bS3, qpc, a, b, h, 0);
  321. filter_mb_edgech(&img_cr[4*3*uvlinesize], uvlinesize, bS3, qpc, a, b, h, 0);
  322. }else{
  323. if(left_type){
  324. filter_mb_edgecv( &img_cb[2*0<<pixel_shift], uvlinesize, bS4, qpc0, a, b, h, 1);
  325. filter_mb_edgecv( &img_cr[2*0<<pixel_shift], uvlinesize, bS4, qpc0, a, b, h, 1);
  326. }
  327. filter_mb_edgecv( &img_cb[2*2<<pixel_shift], uvlinesize, bS3, qpc, a, b, h, 0);
  328. filter_mb_edgecv( &img_cr[2*2<<pixel_shift], uvlinesize, bS3, qpc, a, b, h, 0);
  329. if(top_type){
  330. filter_mb_edgech( &img_cb[2*0*uvlinesize], uvlinesize, bSH, qpc1, a, b, h, 1);
  331. filter_mb_edgech( &img_cr[2*0*uvlinesize], uvlinesize, bSH, qpc1, a, b, h, 1);
  332. }
  333. filter_mb_edgech( &img_cb[2*2*uvlinesize], uvlinesize, bS3, qpc, a, b, h, 0);
  334. filter_mb_edgech( &img_cr[2*2*uvlinesize], uvlinesize, bS3, qpc, a, b, h, 0);
  335. }
  336. }
  337. return;
  338. } else {
  339. LOCAL_ALIGNED_8(int16_t, bS, [2], [4][4]);
  340. int edges;
  341. if( IS_8x8DCT(mb_type) && (h->cbp&7) == 7 && !chroma444 ) {
  342. edges = 4;
  343. AV_WN64A(bS[0][0], 0x0002000200020002ULL);
  344. AV_WN64A(bS[0][2], 0x0002000200020002ULL);
  345. AV_WN64A(bS[1][0], 0x0002000200020002ULL);
  346. AV_WN64A(bS[1][2], 0x0002000200020002ULL);
  347. } else {
  348. int mask_edge1 = (3*(((5*mb_type)>>5)&1)) | (mb_type>>4); //(mb_type & (MB_TYPE_16x16 | MB_TYPE_8x16)) ? 3 : (mb_type & MB_TYPE_16x8) ? 1 : 0;
  349. int mask_edge0 = 3*((mask_edge1>>1) & ((5*left_type)>>5)&1); // (mb_type & (MB_TYPE_16x16 | MB_TYPE_8x16)) && (h->left_type[LTOP] & (MB_TYPE_16x16 | MB_TYPE_8x16)) ? 3 : 0;
  350. int step = 1+(mb_type>>24); //IS_8x8DCT(mb_type) ? 2 : 1;
  351. edges = 4 - 3*((mb_type>>3) & !(h->cbp & 15)); //(mb_type & MB_TYPE_16x16) && !(h->cbp & 15) ? 1 : 4;
  352. h->h264dsp.h264_loop_filter_strength( bS, h->non_zero_count_cache, h->ref_cache, h->mv_cache,
  353. h->list_count==2, edges, step, mask_edge0, mask_edge1, FIELD_PICTURE);
  354. }
  355. if( IS_INTRA(left_type) )
  356. AV_WN64A(bS[0][0], 0x0004000400040004ULL);
  357. if( IS_INTRA(top_type) )
  358. AV_WN64A(bS[1][0], FIELD_PICTURE ? 0x0003000300030003ULL : 0x0004000400040004ULL);
  359. #define FILTER(hv,dir,edge,intra)\
  360. if(AV_RN64A(bS[dir][edge])) { \
  361. filter_mb_edge##hv( &img_y[4*edge*(dir?linesize:1<<pixel_shift)], linesize, bS[dir][edge], edge ? qp : qp##dir, a, b, h, intra );\
  362. if(chroma){\
  363. if(chroma444){\
  364. filter_mb_edge##hv( &img_cb[4*edge*(dir?linesize:1<<pixel_shift)], linesize, bS[dir][edge], edge ? qpc : qpc##dir, a, b, h, intra );\
  365. filter_mb_edge##hv( &img_cr[4*edge*(dir?linesize:1<<pixel_shift)], linesize, bS[dir][edge], edge ? qpc : qpc##dir, a, b, h, intra );\
  366. } else if(!(edge&1)) {\
  367. filter_mb_edgec##hv( &img_cb[2*edge*(dir?uvlinesize:1<<pixel_shift)], uvlinesize, bS[dir][edge], edge ? qpc : qpc##dir, a, b, h, intra );\
  368. filter_mb_edgec##hv( &img_cr[2*edge*(dir?uvlinesize:1<<pixel_shift)], uvlinesize, bS[dir][edge], edge ? qpc : qpc##dir, a, b, h, intra );\
  369. }\
  370. }\
  371. }
  372. if(left_type)
  373. FILTER(v,0,0,1);
  374. if( edges == 1 ) {
  375. if(top_type)
  376. FILTER(h,1,0,1);
  377. } else if( IS_8x8DCT(mb_type) ) {
  378. FILTER(v,0,2,0);
  379. if(top_type)
  380. FILTER(h,1,0,1);
  381. FILTER(h,1,2,0);
  382. } else {
  383. FILTER(v,0,1,0);
  384. FILTER(v,0,2,0);
  385. FILTER(v,0,3,0);
  386. if(top_type)
  387. FILTER(h,1,0,1);
  388. FILTER(h,1,1,0);
  389. FILTER(h,1,2,0);
  390. FILTER(h,1,3,0);
  391. }
  392. #undef FILTER
  393. }
  394. }
  395. void ff_h264_filter_mb_fast( H264Context *h, int mb_x, int mb_y, uint8_t *img_y, uint8_t *img_cb, uint8_t *img_cr, unsigned int linesize, unsigned int uvlinesize) {
  396. av_assert2(!FRAME_MBAFF);
  397. if(!h->h264dsp.h264_loop_filter_strength || h->pps.chroma_qp_diff) {
  398. ff_h264_filter_mb(h, mb_x, mb_y, img_y, img_cb, img_cr, linesize, uvlinesize);
  399. return;
  400. }
  401. #if CONFIG_SMALL
  402. h264_filter_mb_fast_internal(h, mb_x, mb_y, img_y, img_cb, img_cr, linesize, uvlinesize, h->pixel_shift);
  403. #else
  404. if(h->pixel_shift){
  405. h264_filter_mb_fast_internal(h, mb_x, mb_y, img_y, img_cb, img_cr, linesize, uvlinesize, 1);
  406. }else{
  407. h264_filter_mb_fast_internal(h, mb_x, mb_y, img_y, img_cb, img_cr, linesize, uvlinesize, 0);
  408. }
  409. #endif
  410. }
  411. static int check_mv(H264Context *h, long b_idx, long bn_idx, int mvy_limit){
  412. int v;
  413. v= h->ref_cache[0][b_idx] != h->ref_cache[0][bn_idx];
  414. if(!v && h->ref_cache[0][b_idx]!=-1)
  415. v= h->mv_cache[0][b_idx][0] - h->mv_cache[0][bn_idx][0] + 3 >= 7U |
  416. FFABS( h->mv_cache[0][b_idx][1] - h->mv_cache[0][bn_idx][1] ) >= mvy_limit;
  417. if(h->list_count==2){
  418. if(!v)
  419. v = h->ref_cache[1][b_idx] != h->ref_cache[1][bn_idx] |
  420. h->mv_cache[1][b_idx][0] - h->mv_cache[1][bn_idx][0] + 3 >= 7U |
  421. FFABS( h->mv_cache[1][b_idx][1] - h->mv_cache[1][bn_idx][1] ) >= mvy_limit;
  422. if(v){
  423. if(h->ref_cache[0][b_idx] != h->ref_cache[1][bn_idx] |
  424. h->ref_cache[1][b_idx] != h->ref_cache[0][bn_idx])
  425. return 1;
  426. return
  427. h->mv_cache[0][b_idx][0] - h->mv_cache[1][bn_idx][0] + 3 >= 7U |
  428. FFABS( h->mv_cache[0][b_idx][1] - h->mv_cache[1][bn_idx][1] ) >= mvy_limit |
  429. h->mv_cache[1][b_idx][0] - h->mv_cache[0][bn_idx][0] + 3 >= 7U |
  430. FFABS( h->mv_cache[1][b_idx][1] - h->mv_cache[0][bn_idx][1] ) >= mvy_limit;
  431. }
  432. }
  433. return v;
  434. }
  435. static av_always_inline void filter_mb_dir(H264Context *h, int mb_x, int mb_y, uint8_t *img_y, uint8_t *img_cb, uint8_t *img_cr, unsigned int linesize, unsigned int uvlinesize, int mb_xy, int mb_type, int mvy_limit, int first_vertical_edge_done, int a, int b, int chroma, int dir) {
  436. MpegEncContext * const s = &h->s;
  437. int edge;
  438. int chroma_qp_avg[2];
  439. int chroma444 = CHROMA444;
  440. int chroma422 = CHROMA422;
  441. const int mbm_xy = dir == 0 ? mb_xy -1 : h->top_mb_xy;
  442. const int mbm_type = dir == 0 ? h->left_type[LTOP] : h->top_type;
  443. // how often to recheck mv-based bS when iterating between edges
  444. static const uint8_t mask_edge_tab[2][8]={{0,3,3,3,1,1,1,1},
  445. {0,3,1,1,3,3,3,3}};
  446. const int mask_edge = mask_edge_tab[dir][(mb_type>>3)&7];
  447. const int edges = mask_edge== 3 && !(h->cbp&15) ? 1 : 4;
  448. // how often to recheck mv-based bS when iterating along each edge
  449. const int mask_par0 = mb_type & (MB_TYPE_16x16 | (MB_TYPE_8x16 >> dir));
  450. if(mbm_type && !first_vertical_edge_done){
  451. if (FRAME_MBAFF && (dir == 1) && ((mb_y&1) == 0)
  452. && IS_INTERLACED(mbm_type&~mb_type)
  453. ) {
  454. // This is a special case in the norm where the filtering must
  455. // be done twice (one each of the field) even if we are in a
  456. // frame macroblock.
  457. //
  458. unsigned int tmp_linesize = 2 * linesize;
  459. unsigned int tmp_uvlinesize = 2 * uvlinesize;
  460. int mbn_xy = mb_xy - 2 * s->mb_stride;
  461. int j;
  462. for(j=0; j<2; j++, mbn_xy += s->mb_stride){
  463. DECLARE_ALIGNED(8, int16_t, bS)[4];
  464. int qp;
  465. if (IS_INTRA(mb_type | s->current_picture.f.mb_type[mbn_xy])) {
  466. AV_WN64A(bS, 0x0003000300030003ULL);
  467. } else {
  468. if (!CABAC && IS_8x8DCT(s->current_picture.f.mb_type[mbn_xy])) {
  469. bS[0]= 1+((h->cbp_table[mbn_xy] & 0x4000)||h->non_zero_count_cache[scan8[0]+0]);
  470. bS[1]= 1+((h->cbp_table[mbn_xy] & 0x4000)||h->non_zero_count_cache[scan8[0]+1]);
  471. bS[2]= 1+((h->cbp_table[mbn_xy] & 0x8000)||h->non_zero_count_cache[scan8[0]+2]);
  472. bS[3]= 1+((h->cbp_table[mbn_xy] & 0x8000)||h->non_zero_count_cache[scan8[0]+3]);
  473. }else{
  474. const uint8_t *mbn_nnz = h->non_zero_count[mbn_xy] + 3*4;
  475. int i;
  476. for( i = 0; i < 4; i++ ) {
  477. bS[i] = 1 + !!(h->non_zero_count_cache[scan8[0]+i] | mbn_nnz[i]);
  478. }
  479. }
  480. }
  481. // Do not use s->qscale as luma quantizer because it has not the same
  482. // value in IPCM macroblocks.
  483. qp = (s->current_picture.f.qscale_table[mb_xy] + s->current_picture.f.qscale_table[mbn_xy] + 1) >> 1;
  484. tprintf(s->avctx, "filter mb:%d/%d dir:%d edge:%d, QPy:%d ls:%d uvls:%d", mb_x, mb_y, dir, edge, qp, tmp_linesize, tmp_uvlinesize);
  485. { int i; for (i = 0; i < 4; i++) tprintf(s->avctx, " bS[%d]:%d", i, bS[i]); tprintf(s->avctx, "\n"); }
  486. filter_mb_edgeh( &img_y[j*linesize], tmp_linesize, bS, qp, a, b, h, 0 );
  487. chroma_qp_avg[0] = (h->chroma_qp[0] + get_chroma_qp(h, 0, s->current_picture.f.qscale_table[mbn_xy]) + 1) >> 1;
  488. chroma_qp_avg[1] = (h->chroma_qp[1] + get_chroma_qp(h, 1, s->current_picture.f.qscale_table[mbn_xy]) + 1) >> 1;
  489. if (chroma) {
  490. if (chroma444) {
  491. filter_mb_edgeh (&img_cb[j*uvlinesize], tmp_uvlinesize, bS, chroma_qp_avg[0], a, b, h, 0);
  492. filter_mb_edgeh (&img_cr[j*uvlinesize], tmp_uvlinesize, bS, chroma_qp_avg[1], a, b, h, 0);
  493. } else {
  494. filter_mb_edgech(&img_cb[j*uvlinesize], tmp_uvlinesize, bS, chroma_qp_avg[0], a, b, h, 0);
  495. filter_mb_edgech(&img_cr[j*uvlinesize], tmp_uvlinesize, bS, chroma_qp_avg[1], a, b, h, 0);
  496. }
  497. }
  498. }
  499. }else{
  500. DECLARE_ALIGNED(8, int16_t, bS)[4];
  501. int qp;
  502. if( IS_INTRA(mb_type|mbm_type)) {
  503. AV_WN64A(bS, 0x0003000300030003ULL);
  504. if ( (!IS_INTERLACED(mb_type|mbm_type))
  505. || ((FRAME_MBAFF || (s->picture_structure != PICT_FRAME)) && (dir == 0))
  506. )
  507. AV_WN64A(bS, 0x0004000400040004ULL);
  508. } else {
  509. int i;
  510. int mv_done;
  511. if( dir && FRAME_MBAFF && IS_INTERLACED(mb_type ^ mbm_type)) {
  512. AV_WN64A(bS, 0x0001000100010001ULL);
  513. mv_done = 1;
  514. }
  515. else if( mask_par0 && ((mbm_type & (MB_TYPE_16x16 | (MB_TYPE_8x16 >> dir)))) ) {
  516. int b_idx= 8 + 4;
  517. int bn_idx= b_idx - (dir ? 8:1);
  518. bS[0] = bS[1] = bS[2] = bS[3] = check_mv(h, 8 + 4, bn_idx, mvy_limit);
  519. mv_done = 1;
  520. }
  521. else
  522. mv_done = 0;
  523. for( i = 0; i < 4; i++ ) {
  524. int x = dir == 0 ? 0 : i;
  525. int y = dir == 0 ? i : 0;
  526. int b_idx= 8 + 4 + x + 8*y;
  527. int bn_idx= b_idx - (dir ? 8:1);
  528. if( h->non_zero_count_cache[b_idx] |
  529. h->non_zero_count_cache[bn_idx] ) {
  530. bS[i] = 2;
  531. }
  532. else if(!mv_done)
  533. {
  534. bS[i] = check_mv(h, b_idx, bn_idx, mvy_limit);
  535. }
  536. }
  537. }
  538. /* Filter edge */
  539. // Do not use s->qscale as luma quantizer because it has not the same
  540. // value in IPCM macroblocks.
  541. if(bS[0]+bS[1]+bS[2]+bS[3]){
  542. qp = (s->current_picture.f.qscale_table[mb_xy] + s->current_picture.f.qscale_table[mbm_xy] + 1) >> 1;
  543. //tprintf(s->avctx, "filter mb:%d/%d dir:%d edge:%d, QPy:%d, QPc:%d, QPcn:%d\n", mb_x, mb_y, dir, edge, qp, h->chroma_qp[0], s->current_picture.qscale_table[mbn_xy]);
  544. tprintf(s->avctx, "filter mb:%d/%d dir:%d edge:%d, QPy:%d ls:%d uvls:%d", mb_x, mb_y, dir, edge, qp, linesize, uvlinesize);
  545. //{ int i; for (i = 0; i < 4; i++) tprintf(s->avctx, " bS[%d]:%d", i, bS[i]); tprintf(s->avctx, "\n"); }
  546. chroma_qp_avg[0] = (h->chroma_qp[0] + get_chroma_qp(h, 0, s->current_picture.f.qscale_table[mbm_xy]) + 1) >> 1;
  547. chroma_qp_avg[1] = (h->chroma_qp[1] + get_chroma_qp(h, 1, s->current_picture.f.qscale_table[mbm_xy]) + 1) >> 1;
  548. if( dir == 0 ) {
  549. filter_mb_edgev( &img_y[0], linesize, bS, qp, a, b, h, 1 );
  550. if (chroma) {
  551. if (chroma444) {
  552. filter_mb_edgev ( &img_cb[0], uvlinesize, bS, chroma_qp_avg[0], a, b, h, 1);
  553. filter_mb_edgev ( &img_cr[0], uvlinesize, bS, chroma_qp_avg[1], a, b, h, 1);
  554. } else {
  555. filter_mb_edgecv( &img_cb[0], uvlinesize, bS, chroma_qp_avg[0], a, b, h, 1);
  556. filter_mb_edgecv( &img_cr[0], uvlinesize, bS, chroma_qp_avg[1], a, b, h, 1);
  557. }
  558. }
  559. } else {
  560. filter_mb_edgeh( &img_y[0], linesize, bS, qp, a, b, h, 1 );
  561. if (chroma) {
  562. if (chroma444) {
  563. filter_mb_edgeh ( &img_cb[0], uvlinesize, bS, chroma_qp_avg[0], a, b, h, 1);
  564. filter_mb_edgeh ( &img_cr[0], uvlinesize, bS, chroma_qp_avg[1], a, b, h, 1);
  565. } else {
  566. filter_mb_edgech( &img_cb[0], uvlinesize, bS, chroma_qp_avg[0], a, b, h, 1);
  567. filter_mb_edgech( &img_cr[0], uvlinesize, bS, chroma_qp_avg[1], a, b, h, 1);
  568. }
  569. }
  570. }
  571. }
  572. }
  573. }
  574. /* Calculate bS */
  575. for( edge = 1; edge < edges; edge++ ) {
  576. DECLARE_ALIGNED(8, int16_t, bS)[4];
  577. int qp;
  578. const int deblock_edge = !IS_8x8DCT(mb_type & (edge<<24)); // (edge&1) && IS_8x8DCT(mb_type)
  579. if (!deblock_edge && (!chroma422 || dir == 0))
  580. continue;
  581. if( IS_INTRA(mb_type)) {
  582. AV_WN64A(bS, 0x0003000300030003ULL);
  583. } else {
  584. int i;
  585. int mv_done;
  586. if( edge & mask_edge ) {
  587. AV_ZERO64(bS);
  588. mv_done = 1;
  589. }
  590. else if( mask_par0 ) {
  591. int b_idx= 8 + 4 + edge * (dir ? 8:1);
  592. int bn_idx= b_idx - (dir ? 8:1);
  593. bS[0] = bS[1] = bS[2] = bS[3] = check_mv(h, b_idx, bn_idx, mvy_limit);
  594. mv_done = 1;
  595. }
  596. else
  597. mv_done = 0;
  598. for( i = 0; i < 4; i++ ) {
  599. int x = dir == 0 ? edge : i;
  600. int y = dir == 0 ? i : edge;
  601. int b_idx= 8 + 4 + x + 8*y;
  602. int bn_idx= b_idx - (dir ? 8:1);
  603. if( h->non_zero_count_cache[b_idx] |
  604. h->non_zero_count_cache[bn_idx] ) {
  605. bS[i] = 2;
  606. }
  607. else if(!mv_done)
  608. {
  609. bS[i] = check_mv(h, b_idx, bn_idx, mvy_limit);
  610. }
  611. }
  612. if(bS[0]+bS[1]+bS[2]+bS[3] == 0)
  613. continue;
  614. }
  615. /* Filter edge */
  616. // Do not use s->qscale as luma quantizer because it has not the same
  617. // value in IPCM macroblocks.
  618. qp = s->current_picture.f.qscale_table[mb_xy];
  619. //tprintf(s->avctx, "filter mb:%d/%d dir:%d edge:%d, QPy:%d, QPc:%d, QPcn:%d\n", mb_x, mb_y, dir, edge, qp, h->chroma_qp[0], s->current_picture.qscale_table[mbn_xy]);
  620. tprintf(s->avctx, "filter mb:%d/%d dir:%d edge:%d, QPy:%d ls:%d uvls:%d", mb_x, mb_y, dir, edge, qp, linesize, uvlinesize);
  621. //{ int i; for (i = 0; i < 4; i++) tprintf(s->avctx, " bS[%d]:%d", i, bS[i]); tprintf(s->avctx, "\n"); }
  622. if( dir == 0 ) {
  623. filter_mb_edgev( &img_y[4*edge << h->pixel_shift], linesize, bS, qp, a, b, h, 0 );
  624. if (chroma) {
  625. if (chroma444) {
  626. filter_mb_edgev ( &img_cb[4*edge << h->pixel_shift], uvlinesize, bS, h->chroma_qp[0], a, b, h, 0);
  627. filter_mb_edgev ( &img_cr[4*edge << h->pixel_shift], uvlinesize, bS, h->chroma_qp[1], a, b, h, 0);
  628. } else if( (edge&1) == 0 ) {
  629. filter_mb_edgecv( &img_cb[2*edge << h->pixel_shift], uvlinesize, bS, h->chroma_qp[0], a, b, h, 0);
  630. filter_mb_edgecv( &img_cr[2*edge << h->pixel_shift], uvlinesize, bS, h->chroma_qp[1], a, b, h, 0);
  631. }
  632. }
  633. } else {
  634. if (chroma422) {
  635. if (deblock_edge)
  636. filter_mb_edgeh(&img_y[4*edge*linesize], linesize, bS, qp, a, b, h, 0);
  637. if (chroma) {
  638. filter_mb_edgech(&img_cb[4*edge*uvlinesize], uvlinesize, bS, h->chroma_qp[0], a, b, h, 0);
  639. filter_mb_edgech(&img_cr[4*edge*uvlinesize], uvlinesize, bS, h->chroma_qp[1], a, b, h, 0);
  640. }
  641. } else {
  642. filter_mb_edgeh(&img_y[4*edge*linesize], linesize, bS, qp, a, b, h, 0);
  643. if (chroma) {
  644. if (chroma444) {
  645. filter_mb_edgeh (&img_cb[4*edge*uvlinesize], uvlinesize, bS, h->chroma_qp[0], a, b, h, 0);
  646. filter_mb_edgeh (&img_cr[4*edge*uvlinesize], uvlinesize, bS, h->chroma_qp[1], a, b, h, 0);
  647. } else if ((edge&1) == 0) {
  648. filter_mb_edgech(&img_cb[2*edge*uvlinesize], uvlinesize, bS, h->chroma_qp[0], a, b, h, 0);
  649. filter_mb_edgech(&img_cr[2*edge*uvlinesize], uvlinesize, bS, h->chroma_qp[1], a, b, h, 0);
  650. }
  651. }
  652. }
  653. }
  654. }
  655. }
  656. void ff_h264_filter_mb( H264Context *h, int mb_x, int mb_y, uint8_t *img_y, uint8_t *img_cb, uint8_t *img_cr, unsigned int linesize, unsigned int uvlinesize) {
  657. MpegEncContext * const s = &h->s;
  658. const int mb_xy= mb_x + mb_y*s->mb_stride;
  659. const int mb_type = s->current_picture.f.mb_type[mb_xy];
  660. const int mvy_limit = IS_INTERLACED(mb_type) ? 2 : 4;
  661. int first_vertical_edge_done = 0;
  662. av_unused int dir;
  663. int chroma = CHROMA && !(CONFIG_GRAY && (s->flags&CODEC_FLAG_GRAY));
  664. int qp_bd_offset = 6 * (h->sps.bit_depth_luma - 8);
  665. int a = h->slice_alpha_c0_offset - qp_bd_offset;
  666. int b = h->slice_beta_offset - qp_bd_offset;
  667. if (FRAME_MBAFF
  668. // and current and left pair do not have the same interlaced type
  669. && IS_INTERLACED(mb_type^h->left_type[LTOP])
  670. // and left mb is in available to us
  671. && h->left_type[LTOP]) {
  672. /* First vertical edge is different in MBAFF frames
  673. * There are 8 different bS to compute and 2 different Qp
  674. */
  675. DECLARE_ALIGNED(8, int16_t, bS)[8];
  676. int qp[2];
  677. int bqp[2];
  678. int rqp[2];
  679. int mb_qp, mbn0_qp, mbn1_qp;
  680. int i;
  681. first_vertical_edge_done = 1;
  682. if( IS_INTRA(mb_type) ) {
  683. AV_WN64A(&bS[0], 0x0004000400040004ULL);
  684. AV_WN64A(&bS[4], 0x0004000400040004ULL);
  685. } else {
  686. static const uint8_t offset[2][2][8]={
  687. {
  688. {3+4*0, 3+4*0, 3+4*0, 3+4*0, 3+4*1, 3+4*1, 3+4*1, 3+4*1},
  689. {3+4*2, 3+4*2, 3+4*2, 3+4*2, 3+4*3, 3+4*3, 3+4*3, 3+4*3},
  690. },{
  691. {3+4*0, 3+4*1, 3+4*2, 3+4*3, 3+4*0, 3+4*1, 3+4*2, 3+4*3},
  692. {3+4*0, 3+4*1, 3+4*2, 3+4*3, 3+4*0, 3+4*1, 3+4*2, 3+4*3},
  693. }
  694. };
  695. const uint8_t *off= offset[MB_FIELD][mb_y&1];
  696. for( i = 0; i < 8; i++ ) {
  697. int j= MB_FIELD ? i>>2 : i&1;
  698. int mbn_xy = h->left_mb_xy[LEFT(j)];
  699. int mbn_type= h->left_type[LEFT(j)];
  700. if( IS_INTRA( mbn_type ) )
  701. bS[i] = 4;
  702. else{
  703. bS[i] = 1 + !!(h->non_zero_count_cache[12+8*(i>>1)] |
  704. ((!h->pps.cabac && IS_8x8DCT(mbn_type)) ?
  705. (h->cbp_table[mbn_xy] & (((MB_FIELD ? (i&2) : (mb_y&1)) ? 8 : 2) << 12))
  706. :
  707. h->non_zero_count[mbn_xy][ off[i] ]));
  708. }
  709. }
  710. }
  711. mb_qp = s->current_picture.f.qscale_table[mb_xy];
  712. mbn0_qp = s->current_picture.f.qscale_table[h->left_mb_xy[0]];
  713. mbn1_qp = s->current_picture.f.qscale_table[h->left_mb_xy[1]];
  714. qp[0] = ( mb_qp + mbn0_qp + 1 ) >> 1;
  715. bqp[0] = ( get_chroma_qp( h, 0, mb_qp ) +
  716. get_chroma_qp( h, 0, mbn0_qp ) + 1 ) >> 1;
  717. rqp[0] = ( get_chroma_qp( h, 1, mb_qp ) +
  718. get_chroma_qp( h, 1, mbn0_qp ) + 1 ) >> 1;
  719. qp[1] = ( mb_qp + mbn1_qp + 1 ) >> 1;
  720. bqp[1] = ( get_chroma_qp( h, 0, mb_qp ) +
  721. get_chroma_qp( h, 0, mbn1_qp ) + 1 ) >> 1;
  722. rqp[1] = ( get_chroma_qp( h, 1, mb_qp ) +
  723. get_chroma_qp( h, 1, mbn1_qp ) + 1 ) >> 1;
  724. /* Filter edge */
  725. tprintf(s->avctx, "filter mb:%d/%d MBAFF, QPy:%d/%d, QPb:%d/%d QPr:%d/%d ls:%d uvls:%d", mb_x, mb_y, qp[0], qp[1], bqp[0], bqp[1], rqp[0], rqp[1], linesize, uvlinesize);
  726. { int i; for (i = 0; i < 8; i++) tprintf(s->avctx, " bS[%d]:%d", i, bS[i]); tprintf(s->avctx, "\n"); }
  727. if(MB_FIELD){
  728. filter_mb_mbaff_edgev ( h, img_y , linesize, bS , 1, qp [0], a, b, 1 );
  729. filter_mb_mbaff_edgev ( h, img_y + 8* linesize, linesize, bS+4, 1, qp [1], a, b, 1 );
  730. if (chroma){
  731. if (CHROMA444) {
  732. filter_mb_mbaff_edgev ( h, img_cb, uvlinesize, bS , 1, bqp[0], a, b, 1 );
  733. filter_mb_mbaff_edgev ( h, img_cb + 8*uvlinesize, uvlinesize, bS+4, 1, bqp[1], a, b, 1 );
  734. filter_mb_mbaff_edgev ( h, img_cr, uvlinesize, bS , 1, rqp[0], a, b, 1 );
  735. filter_mb_mbaff_edgev ( h, img_cr + 8*uvlinesize, uvlinesize, bS+4, 1, rqp[1], a, b, 1 );
  736. } else if (CHROMA422) {
  737. filter_mb_mbaff_edgecv(h, img_cb, uvlinesize, bS , 1, bqp[0], a, b, 1);
  738. filter_mb_mbaff_edgecv(h, img_cb + 8*uvlinesize, uvlinesize, bS+4, 1, bqp[1], a, b, 1);
  739. filter_mb_mbaff_edgecv(h, img_cr, uvlinesize, bS , 1, rqp[0], a, b, 1);
  740. filter_mb_mbaff_edgecv(h, img_cr + 8*uvlinesize, uvlinesize, bS+4, 1, rqp[1], a, b, 1);
  741. }else{
  742. filter_mb_mbaff_edgecv( h, img_cb, uvlinesize, bS , 1, bqp[0], a, b, 1 );
  743. filter_mb_mbaff_edgecv( h, img_cb + 4*uvlinesize, uvlinesize, bS+4, 1, bqp[1], a, b, 1 );
  744. filter_mb_mbaff_edgecv( h, img_cr, uvlinesize, bS , 1, rqp[0], a, b, 1 );
  745. filter_mb_mbaff_edgecv( h, img_cr + 4*uvlinesize, uvlinesize, bS+4, 1, rqp[1], a, b, 1 );
  746. }
  747. }
  748. }else{
  749. filter_mb_mbaff_edgev ( h, img_y , 2* linesize, bS , 2, qp [0], a, b, 1 );
  750. filter_mb_mbaff_edgev ( h, img_y + linesize, 2* linesize, bS+1, 2, qp [1], a, b, 1 );
  751. if (chroma){
  752. if (CHROMA444) {
  753. filter_mb_mbaff_edgev ( h, img_cb, 2*uvlinesize, bS , 2, bqp[0], a, b, 1 );
  754. filter_mb_mbaff_edgev ( h, img_cb + uvlinesize, 2*uvlinesize, bS+1, 2, bqp[1], a, b, 1 );
  755. filter_mb_mbaff_edgev ( h, img_cr, 2*uvlinesize, bS , 2, rqp[0], a, b, 1 );
  756. filter_mb_mbaff_edgev ( h, img_cr + uvlinesize, 2*uvlinesize, bS+1, 2, rqp[1], a, b, 1 );
  757. }else{
  758. filter_mb_mbaff_edgecv( h, img_cb, 2*uvlinesize, bS , 2, bqp[0], a, b, 1 );
  759. filter_mb_mbaff_edgecv( h, img_cb + uvlinesize, 2*uvlinesize, bS+1, 2, bqp[1], a, b, 1 );
  760. filter_mb_mbaff_edgecv( h, img_cr, 2*uvlinesize, bS , 2, rqp[0], a, b, 1 );
  761. filter_mb_mbaff_edgecv( h, img_cr + uvlinesize, 2*uvlinesize, bS+1, 2, rqp[1], a, b, 1 );
  762. }
  763. }
  764. }
  765. }
  766. #if CONFIG_SMALL
  767. for( dir = 0; dir < 2; dir++ )
  768. filter_mb_dir(h, mb_x, mb_y, img_y, img_cb, img_cr, linesize, uvlinesize, mb_xy, mb_type, mvy_limit, dir ? 0 : first_vertical_edge_done, a, b, chroma, dir);
  769. #else
  770. filter_mb_dir(h, mb_x, mb_y, img_y, img_cb, img_cr, linesize, uvlinesize, mb_xy, mb_type, mvy_limit, first_vertical_edge_done, a, b, chroma, 0);
  771. filter_mb_dir(h, mb_x, mb_y, img_y, img_cb, img_cr, linesize, uvlinesize, mb_xy, mb_type, mvy_limit, 0, a, b, chroma, 1);
  772. #endif
  773. }