You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

851 lines
42KB

  1. /*
  2. * H.26L/H.264/AVC/JVT/14496-10/... loop filter
  3. * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. /**
  22. * @file
  23. * H.264 / AVC / MPEG-4 part10 loop filter.
  24. * @author Michael Niedermayer <michaelni@gmx.at>
  25. */
  26. #include "libavutil/internal.h"
  27. #include "libavutil/intreadwrite.h"
  28. #include "libavutil/mem_internal.h"
  29. #include "internal.h"
  30. #include "avcodec.h"
  31. #include "h264dec.h"
  32. #include "h264_ps.h"
  33. #include "mathops.h"
  34. #include "mpegutils.h"
  35. #include "rectangle.h"
  36. /* Deblocking filter (p153) */
  37. static const uint8_t alpha_table[52*3] = {
  38. 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  39. 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  40. 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  41. 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  42. 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  43. 0, 0, 0, 0, 0, 0, 4, 4, 5, 6,
  44. 7, 8, 9, 10, 12, 13, 15, 17, 20, 22,
  45. 25, 28, 32, 36, 40, 45, 50, 56, 63, 71,
  46. 80, 90,101,113,127,144,162,182,203,226,
  47. 255,255,
  48. 255,255,255,255,255,255,255,255,255,255,255,255,255,
  49. 255,255,255,255,255,255,255,255,255,255,255,255,255,
  50. 255,255,255,255,255,255,255,255,255,255,255,255,255,
  51. 255,255,255,255,255,255,255,255,255,255,255,255,255,
  52. };
  53. static const uint8_t beta_table[52*3] = {
  54. 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  55. 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  56. 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  57. 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  58. 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  59. 0, 0, 0, 0, 0, 0, 2, 2, 2, 3,
  60. 3, 3, 3, 4, 4, 4, 6, 6, 7, 7,
  61. 8, 8, 9, 9, 10, 10, 11, 11, 12, 12,
  62. 13, 13, 14, 14, 15, 15, 16, 16, 17, 17,
  63. 18, 18,
  64. 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18,
  65. 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18,
  66. 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18,
  67. 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18,
  68. };
  69. static const uint8_t tc0_table[52*3][4] = {
  70. {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 },
  71. {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 },
  72. {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 },
  73. {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 },
  74. {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 },
  75. {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 },
  76. {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 },
  77. {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 },
  78. {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 },
  79. {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 },
  80. {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 },
  81. {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 1 },
  82. {-1, 0, 0, 1 }, {-1, 0, 0, 1 }, {-1, 0, 0, 1 }, {-1, 0, 1, 1 }, {-1, 0, 1, 1 }, {-1, 1, 1, 1 },
  83. {-1, 1, 1, 1 }, {-1, 1, 1, 1 }, {-1, 1, 1, 1 }, {-1, 1, 1, 2 }, {-1, 1, 1, 2 }, {-1, 1, 1, 2 },
  84. {-1, 1, 1, 2 }, {-1, 1, 2, 3 }, {-1, 1, 2, 3 }, {-1, 2, 2, 3 }, {-1, 2, 2, 4 }, {-1, 2, 3, 4 },
  85. {-1, 2, 3, 4 }, {-1, 3, 3, 5 }, {-1, 3, 4, 6 }, {-1, 3, 4, 6 }, {-1, 4, 5, 7 }, {-1, 4, 5, 8 },
  86. {-1, 4, 6, 9 }, {-1, 5, 7,10 }, {-1, 6, 8,11 }, {-1, 6, 8,13 }, {-1, 7,10,14 }, {-1, 8,11,16 },
  87. {-1, 9,12,18 }, {-1,10,13,20 }, {-1,11,15,23 }, {-1,13,17,25 },
  88. {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 },
  89. {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 },
  90. {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 },
  91. {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 },
  92. {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 },
  93. {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 },
  94. {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 },
  95. {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 },
  96. {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 },
  97. };
  98. /* intra: 0 if this loopfilter call is guaranteed to be inter (bS < 4), 1 if it might be intra (bS == 4) */
  99. static av_always_inline void filter_mb_edgev(uint8_t *pix, int stride,
  100. const int16_t bS[4],
  101. unsigned int qp, int a, int b,
  102. const H264Context *h, int intra)
  103. {
  104. const unsigned int index_a = qp + a;
  105. const int alpha = alpha_table[index_a];
  106. const int beta = beta_table[qp + b];
  107. if (alpha ==0 || beta == 0) return;
  108. if( bS[0] < 4 || !intra ) {
  109. int8_t tc[4];
  110. tc[0] = tc0_table[index_a][bS[0]];
  111. tc[1] = tc0_table[index_a][bS[1]];
  112. tc[2] = tc0_table[index_a][bS[2]];
  113. tc[3] = tc0_table[index_a][bS[3]];
  114. h->h264dsp.h264_h_loop_filter_luma(pix, stride, alpha, beta, tc);
  115. } else {
  116. h->h264dsp.h264_h_loop_filter_luma_intra(pix, stride, alpha, beta);
  117. }
  118. }
  119. static av_always_inline void filter_mb_edgecv(uint8_t *pix, int stride,
  120. const int16_t bS[4],
  121. unsigned int qp, int a, int b,
  122. const H264Context *h, int intra)
  123. {
  124. const unsigned int index_a = qp + a;
  125. const int alpha = alpha_table[index_a];
  126. const int beta = beta_table[qp + b];
  127. if (alpha ==0 || beta == 0) return;
  128. if( bS[0] < 4 || !intra ) {
  129. int8_t tc[4];
  130. tc[0] = tc0_table[index_a][bS[0]]+1;
  131. tc[1] = tc0_table[index_a][bS[1]]+1;
  132. tc[2] = tc0_table[index_a][bS[2]]+1;
  133. tc[3] = tc0_table[index_a][bS[3]]+1;
  134. h->h264dsp.h264_h_loop_filter_chroma(pix, stride, alpha, beta, tc);
  135. } else {
  136. h->h264dsp.h264_h_loop_filter_chroma_intra(pix, stride, alpha, beta);
  137. }
  138. }
  139. static av_always_inline void filter_mb_mbaff_edgev(const H264Context *h, uint8_t *pix,
  140. int stride,
  141. const int16_t bS[7], int bsi,
  142. int qp, int a, int b,
  143. int intra)
  144. {
  145. const unsigned int index_a = qp + a;
  146. const int alpha = alpha_table[index_a];
  147. const int beta = beta_table[qp + b];
  148. if (alpha ==0 || beta == 0) return;
  149. if( bS[0] < 4 || !intra ) {
  150. int8_t tc[4];
  151. tc[0] = tc0_table[index_a][bS[0*bsi]];
  152. tc[1] = tc0_table[index_a][bS[1*bsi]];
  153. tc[2] = tc0_table[index_a][bS[2*bsi]];
  154. tc[3] = tc0_table[index_a][bS[3*bsi]];
  155. h->h264dsp.h264_h_loop_filter_luma_mbaff(pix, stride, alpha, beta, tc);
  156. } else {
  157. h->h264dsp.h264_h_loop_filter_luma_mbaff_intra(pix, stride, alpha, beta);
  158. }
  159. }
  160. static av_always_inline void filter_mb_mbaff_edgecv(const H264Context *h,
  161. uint8_t *pix, int stride,
  162. const int16_t bS[7],
  163. int bsi, int qp, int a,
  164. int b, int intra)
  165. {
  166. const unsigned int index_a = qp + a;
  167. const int alpha = alpha_table[index_a];
  168. const int beta = beta_table[qp + b];
  169. if (alpha ==0 || beta == 0) return;
  170. if( bS[0] < 4 || !intra ) {
  171. int8_t tc[4];
  172. tc[0] = tc0_table[index_a][bS[0*bsi]] + 1;
  173. tc[1] = tc0_table[index_a][bS[1*bsi]] + 1;
  174. tc[2] = tc0_table[index_a][bS[2*bsi]] + 1;
  175. tc[3] = tc0_table[index_a][bS[3*bsi]] + 1;
  176. h->h264dsp.h264_h_loop_filter_chroma_mbaff(pix, stride, alpha, beta, tc);
  177. } else {
  178. h->h264dsp.h264_h_loop_filter_chroma_mbaff_intra(pix, stride, alpha, beta);
  179. }
  180. }
  181. static av_always_inline void filter_mb_edgeh(uint8_t *pix, int stride,
  182. const int16_t bS[4],
  183. unsigned int qp, int a, int b,
  184. const H264Context *h, int intra)
  185. {
  186. const unsigned int index_a = qp + a;
  187. const int alpha = alpha_table[index_a];
  188. const int beta = beta_table[qp + b];
  189. if (alpha ==0 || beta == 0) return;
  190. if( bS[0] < 4 || !intra ) {
  191. int8_t tc[4];
  192. tc[0] = tc0_table[index_a][bS[0]];
  193. tc[1] = tc0_table[index_a][bS[1]];
  194. tc[2] = tc0_table[index_a][bS[2]];
  195. tc[3] = tc0_table[index_a][bS[3]];
  196. h->h264dsp.h264_v_loop_filter_luma(pix, stride, alpha, beta, tc);
  197. } else {
  198. h->h264dsp.h264_v_loop_filter_luma_intra(pix, stride, alpha, beta);
  199. }
  200. }
  201. static av_always_inline void filter_mb_edgech(uint8_t *pix, int stride,
  202. const int16_t bS[4],
  203. unsigned int qp, int a, int b,
  204. const H264Context *h, int intra)
  205. {
  206. const unsigned int index_a = qp + a;
  207. const int alpha = alpha_table[index_a];
  208. const int beta = beta_table[qp + b];
  209. if (alpha ==0 || beta == 0) return;
  210. if( bS[0] < 4 || !intra ) {
  211. int8_t tc[4];
  212. tc[0] = tc0_table[index_a][bS[0]]+1;
  213. tc[1] = tc0_table[index_a][bS[1]]+1;
  214. tc[2] = tc0_table[index_a][bS[2]]+1;
  215. tc[3] = tc0_table[index_a][bS[3]]+1;
  216. h->h264dsp.h264_v_loop_filter_chroma(pix, stride, alpha, beta, tc);
  217. } else {
  218. h->h264dsp.h264_v_loop_filter_chroma_intra(pix, stride, alpha, beta);
  219. }
  220. }
  221. static av_always_inline void h264_filter_mb_fast_internal(const H264Context *h,
  222. H264SliceContext *sl,
  223. int mb_x, int mb_y,
  224. uint8_t *img_y,
  225. uint8_t *img_cb,
  226. uint8_t *img_cr,
  227. unsigned int linesize,
  228. unsigned int uvlinesize,
  229. int pixel_shift)
  230. {
  231. int chroma = CHROMA(h) && !(CONFIG_GRAY && (h->flags & AV_CODEC_FLAG_GRAY));
  232. int chroma444 = CHROMA444(h);
  233. int chroma422 = CHROMA422(h);
  234. int mb_xy = sl->mb_xy;
  235. int left_type = sl->left_type[LTOP];
  236. int top_type = sl->top_type;
  237. int qp_bd_offset = 6 * (h->ps.sps->bit_depth_luma - 8);
  238. int a = 52 + sl->slice_alpha_c0_offset - qp_bd_offset;
  239. int b = 52 + sl->slice_beta_offset - qp_bd_offset;
  240. int mb_type = h->cur_pic.mb_type[mb_xy];
  241. int qp = h->cur_pic.qscale_table[mb_xy];
  242. int qp0 = h->cur_pic.qscale_table[mb_xy - 1];
  243. int qp1 = h->cur_pic.qscale_table[sl->top_mb_xy];
  244. int qpc = get_chroma_qp(h->ps.pps, 0, qp);
  245. int qpc0 = get_chroma_qp(h->ps.pps, 0, qp0);
  246. int qpc1 = get_chroma_qp(h->ps.pps, 0, qp1);
  247. qp0 = (qp + qp0 + 1) >> 1;
  248. qp1 = (qp + qp1 + 1) >> 1;
  249. qpc0 = (qpc + qpc0 + 1) >> 1;
  250. qpc1 = (qpc + qpc1 + 1) >> 1;
  251. if( IS_INTRA(mb_type) ) {
  252. static const int16_t bS4[4] = {4,4,4,4};
  253. static const int16_t bS3[4] = {3,3,3,3};
  254. const int16_t *bSH = FIELD_PICTURE(h) ? bS3 : bS4;
  255. if(left_type)
  256. filter_mb_edgev( &img_y[4*0<<pixel_shift], linesize, bS4, qp0, a, b, h, 1);
  257. if( IS_8x8DCT(mb_type) ) {
  258. filter_mb_edgev( &img_y[4*2<<pixel_shift], linesize, bS3, qp, a, b, h, 0);
  259. if(top_type){
  260. filter_mb_edgeh( &img_y[4*0*linesize], linesize, bSH, qp1, a, b, h, 1);
  261. }
  262. filter_mb_edgeh( &img_y[4*2*linesize], linesize, bS3, qp, a, b, h, 0);
  263. } else {
  264. filter_mb_edgev( &img_y[4*1<<pixel_shift], linesize, bS3, qp, a, b, h, 0);
  265. filter_mb_edgev( &img_y[4*2<<pixel_shift], linesize, bS3, qp, a, b, h, 0);
  266. filter_mb_edgev( &img_y[4*3<<pixel_shift], linesize, bS3, qp, a, b, h, 0);
  267. if(top_type){
  268. filter_mb_edgeh( &img_y[4*0*linesize], linesize, bSH, qp1, a, b, h, 1);
  269. }
  270. filter_mb_edgeh( &img_y[4*1*linesize], linesize, bS3, qp, a, b, h, 0);
  271. filter_mb_edgeh( &img_y[4*2*linesize], linesize, bS3, qp, a, b, h, 0);
  272. filter_mb_edgeh( &img_y[4*3*linesize], linesize, bS3, qp, a, b, h, 0);
  273. }
  274. if(chroma){
  275. if(chroma444){
  276. if(left_type){
  277. filter_mb_edgev( &img_cb[4*0<<pixel_shift], linesize, bS4, qpc0, a, b, h, 1);
  278. filter_mb_edgev( &img_cr[4*0<<pixel_shift], linesize, bS4, qpc0, a, b, h, 1);
  279. }
  280. if( IS_8x8DCT(mb_type) ) {
  281. filter_mb_edgev( &img_cb[4*2<<pixel_shift], linesize, bS3, qpc, a, b, h, 0);
  282. filter_mb_edgev( &img_cr[4*2<<pixel_shift], linesize, bS3, qpc, a, b, h, 0);
  283. if(top_type){
  284. filter_mb_edgeh( &img_cb[4*0*linesize], linesize, bSH, qpc1, a, b, h, 1 );
  285. filter_mb_edgeh( &img_cr[4*0*linesize], linesize, bSH, qpc1, a, b, h, 1 );
  286. }
  287. filter_mb_edgeh( &img_cb[4*2*linesize], linesize, bS3, qpc, a, b, h, 0);
  288. filter_mb_edgeh( &img_cr[4*2*linesize], linesize, bS3, qpc, a, b, h, 0);
  289. } else {
  290. filter_mb_edgev( &img_cb[4*1<<pixel_shift], linesize, bS3, qpc, a, b, h, 0);
  291. filter_mb_edgev( &img_cr[4*1<<pixel_shift], linesize, bS3, qpc, a, b, h, 0);
  292. filter_mb_edgev( &img_cb[4*2<<pixel_shift], linesize, bS3, qpc, a, b, h, 0);
  293. filter_mb_edgev( &img_cr[4*2<<pixel_shift], linesize, bS3, qpc, a, b, h, 0);
  294. filter_mb_edgev( &img_cb[4*3<<pixel_shift], linesize, bS3, qpc, a, b, h, 0);
  295. filter_mb_edgev( &img_cr[4*3<<pixel_shift], linesize, bS3, qpc, a, b, h, 0);
  296. if(top_type){
  297. filter_mb_edgeh( &img_cb[4*0*linesize], linesize, bSH, qpc1, a, b, h, 1);
  298. filter_mb_edgeh( &img_cr[4*0*linesize], linesize, bSH, qpc1, a, b, h, 1);
  299. }
  300. filter_mb_edgeh( &img_cb[4*1*linesize], linesize, bS3, qpc, a, b, h, 0);
  301. filter_mb_edgeh( &img_cr[4*1*linesize], linesize, bS3, qpc, a, b, h, 0);
  302. filter_mb_edgeh( &img_cb[4*2*linesize], linesize, bS3, qpc, a, b, h, 0);
  303. filter_mb_edgeh( &img_cr[4*2*linesize], linesize, bS3, qpc, a, b, h, 0);
  304. filter_mb_edgeh( &img_cb[4*3*linesize], linesize, bS3, qpc, a, b, h, 0);
  305. filter_mb_edgeh( &img_cr[4*3*linesize], linesize, bS3, qpc, a, b, h, 0);
  306. }
  307. }else if(chroma422){
  308. if(left_type){
  309. filter_mb_edgecv(&img_cb[2*0<<pixel_shift], uvlinesize, bS4, qpc0, a, b, h, 1);
  310. filter_mb_edgecv(&img_cr[2*0<<pixel_shift], uvlinesize, bS4, qpc0, a, b, h, 1);
  311. }
  312. filter_mb_edgecv(&img_cb[2*2<<pixel_shift], uvlinesize, bS3, qpc, a, b, h, 0);
  313. filter_mb_edgecv(&img_cr[2*2<<pixel_shift], uvlinesize, bS3, qpc, a, b, h, 0);
  314. if(top_type){
  315. filter_mb_edgech(&img_cb[4*0*uvlinesize], uvlinesize, bSH, qpc1, a, b, h, 1);
  316. filter_mb_edgech(&img_cr[4*0*uvlinesize], uvlinesize, bSH, qpc1, a, b, h, 1);
  317. }
  318. filter_mb_edgech(&img_cb[4*1*uvlinesize], uvlinesize, bS3, qpc, a, b, h, 0);
  319. filter_mb_edgech(&img_cr[4*1*uvlinesize], uvlinesize, bS3, qpc, a, b, h, 0);
  320. filter_mb_edgech(&img_cb[4*2*uvlinesize], uvlinesize, bS3, qpc, a, b, h, 0);
  321. filter_mb_edgech(&img_cr[4*2*uvlinesize], uvlinesize, bS3, qpc, a, b, h, 0);
  322. filter_mb_edgech(&img_cb[4*3*uvlinesize], uvlinesize, bS3, qpc, a, b, h, 0);
  323. filter_mb_edgech(&img_cr[4*3*uvlinesize], uvlinesize, bS3, qpc, a, b, h, 0);
  324. }else{
  325. if(left_type){
  326. filter_mb_edgecv( &img_cb[2*0<<pixel_shift], uvlinesize, bS4, qpc0, a, b, h, 1);
  327. filter_mb_edgecv( &img_cr[2*0<<pixel_shift], uvlinesize, bS4, qpc0, a, b, h, 1);
  328. }
  329. filter_mb_edgecv( &img_cb[2*2<<pixel_shift], uvlinesize, bS3, qpc, a, b, h, 0);
  330. filter_mb_edgecv( &img_cr[2*2<<pixel_shift], uvlinesize, bS3, qpc, a, b, h, 0);
  331. if(top_type){
  332. filter_mb_edgech( &img_cb[2*0*uvlinesize], uvlinesize, bSH, qpc1, a, b, h, 1);
  333. filter_mb_edgech( &img_cr[2*0*uvlinesize], uvlinesize, bSH, qpc1, a, b, h, 1);
  334. }
  335. filter_mb_edgech( &img_cb[2*2*uvlinesize], uvlinesize, bS3, qpc, a, b, h, 0);
  336. filter_mb_edgech( &img_cr[2*2*uvlinesize], uvlinesize, bS3, qpc, a, b, h, 0);
  337. }
  338. }
  339. return;
  340. } else {
  341. LOCAL_ALIGNED(8, int16_t, bS, [2], [4][4]);
  342. int edges;
  343. if( IS_8x8DCT(mb_type) && (sl->cbp&7) == 7 && !chroma444 ) {
  344. edges = 4;
  345. AV_WN64A(bS[0][0], 0x0002000200020002ULL);
  346. AV_WN64A(bS[0][2], 0x0002000200020002ULL);
  347. AV_WN64A(bS[1][0], 0x0002000200020002ULL);
  348. AV_WN64A(bS[1][2], 0x0002000200020002ULL);
  349. } else {
  350. int mask_edge1 = (3*(((5*mb_type)>>5)&1)) | (mb_type>>4); //(mb_type & (MB_TYPE_16x16 | MB_TYPE_8x16)) ? 3 : (mb_type & MB_TYPE_16x8) ? 1 : 0;
  351. int mask_edge0 = 3*((mask_edge1>>1) & ((5*left_type)>>5)&1); // (mb_type & (MB_TYPE_16x16 | MB_TYPE_8x16)) && (h->left_type[LTOP] & (MB_TYPE_16x16 | MB_TYPE_8x16)) ? 3 : 0;
  352. int step = 1+(mb_type>>24); //IS_8x8DCT(mb_type) ? 2 : 1;
  353. edges = 4 - 3*((mb_type>>3) & !(sl->cbp & 15)); //(mb_type & MB_TYPE_16x16) && !(h->cbp & 15) ? 1 : 4;
  354. h->h264dsp.h264_loop_filter_strength(bS, sl->non_zero_count_cache, sl->ref_cache, sl->mv_cache,
  355. sl->list_count==2, edges, step, mask_edge0, mask_edge1, FIELD_PICTURE(h));
  356. }
  357. if( IS_INTRA(left_type) )
  358. AV_WN64A(bS[0][0], 0x0004000400040004ULL);
  359. if( IS_INTRA(top_type) )
  360. AV_WN64A(bS[1][0], FIELD_PICTURE(h) ? 0x0003000300030003ULL : 0x0004000400040004ULL);
  361. #define FILTER(hv,dir,edge,intra)\
  362. if(AV_RN64A(bS[dir][edge])) { \
  363. filter_mb_edge##hv( &img_y[4*edge*(dir?linesize:1<<pixel_shift)], linesize, bS[dir][edge], edge ? qp : qp##dir, a, b, h, intra );\
  364. if(chroma){\
  365. if(chroma444){\
  366. filter_mb_edge##hv( &img_cb[4*edge*(dir?linesize:1<<pixel_shift)], linesize, bS[dir][edge], edge ? qpc : qpc##dir, a, b, h, intra );\
  367. filter_mb_edge##hv( &img_cr[4*edge*(dir?linesize:1<<pixel_shift)], linesize, bS[dir][edge], edge ? qpc : qpc##dir, a, b, h, intra );\
  368. } else if(!(edge&1)) {\
  369. filter_mb_edgec##hv( &img_cb[2*edge*(dir?uvlinesize:1<<pixel_shift)], uvlinesize, bS[dir][edge], edge ? qpc : qpc##dir, a, b, h, intra );\
  370. filter_mb_edgec##hv( &img_cr[2*edge*(dir?uvlinesize:1<<pixel_shift)], uvlinesize, bS[dir][edge], edge ? qpc : qpc##dir, a, b, h, intra );\
  371. }\
  372. }\
  373. }
  374. if(left_type)
  375. FILTER(v,0,0,1);
  376. if( edges == 1 ) {
  377. if(top_type)
  378. FILTER(h,1,0,1);
  379. } else if( IS_8x8DCT(mb_type) ) {
  380. FILTER(v,0,2,0);
  381. if(top_type)
  382. FILTER(h,1,0,1);
  383. FILTER(h,1,2,0);
  384. } else {
  385. FILTER(v,0,1,0);
  386. FILTER(v,0,2,0);
  387. FILTER(v,0,3,0);
  388. if(top_type)
  389. FILTER(h,1,0,1);
  390. FILTER(h,1,1,0);
  391. FILTER(h,1,2,0);
  392. FILTER(h,1,3,0);
  393. }
  394. #undef FILTER
  395. }
  396. }
  397. void ff_h264_filter_mb_fast(const H264Context *h, H264SliceContext *sl,
  398. int mb_x, int mb_y, uint8_t *img_y,
  399. uint8_t *img_cb, uint8_t *img_cr,
  400. unsigned int linesize, unsigned int uvlinesize)
  401. {
  402. av_assert2(!FRAME_MBAFF(h));
  403. if(!h->h264dsp.h264_loop_filter_strength || h->ps.pps->chroma_qp_diff) {
  404. ff_h264_filter_mb(h, sl, mb_x, mb_y, img_y, img_cb, img_cr, linesize, uvlinesize);
  405. return;
  406. }
  407. #if CONFIG_SMALL
  408. h264_filter_mb_fast_internal(h, sl, mb_x, mb_y, img_y, img_cb, img_cr, linesize, uvlinesize, h->pixel_shift);
  409. #else
  410. if(h->pixel_shift){
  411. h264_filter_mb_fast_internal(h, sl, mb_x, mb_y, img_y, img_cb, img_cr, linesize, uvlinesize, 1);
  412. }else{
  413. h264_filter_mb_fast_internal(h, sl, mb_x, mb_y, img_y, img_cb, img_cr, linesize, uvlinesize, 0);
  414. }
  415. #endif
  416. }
  417. static int check_mv(H264SliceContext *sl, long b_idx, long bn_idx, int mvy_limit)
  418. {
  419. int v;
  420. v = sl->ref_cache[0][b_idx] != sl->ref_cache[0][bn_idx];
  421. if (!v && sl->ref_cache[0][b_idx] != -1)
  422. v = sl->mv_cache[0][b_idx][0] - sl->mv_cache[0][bn_idx][0] + 3 >= 7U |
  423. FFABS(sl->mv_cache[0][b_idx][1] - sl->mv_cache[0][bn_idx][1]) >= mvy_limit;
  424. if (sl->list_count == 2) {
  425. if(!v)
  426. v = sl->ref_cache[1][b_idx] != sl->ref_cache[1][bn_idx] |
  427. sl->mv_cache[1][b_idx][0] - sl->mv_cache[1][bn_idx][0] + 3 >= 7U |
  428. FFABS(sl->mv_cache[1][b_idx][1] - sl->mv_cache[1][bn_idx][1]) >= mvy_limit;
  429. if(v){
  430. if (sl->ref_cache[0][b_idx] != sl->ref_cache[1][bn_idx] |
  431. sl->ref_cache[1][b_idx] != sl->ref_cache[0][bn_idx])
  432. return 1;
  433. return
  434. sl->mv_cache[0][b_idx][0] - sl->mv_cache[1][bn_idx][0] + 3 >= 7U |
  435. FFABS(sl->mv_cache[0][b_idx][1] - sl->mv_cache[1][bn_idx][1]) >= mvy_limit |
  436. sl->mv_cache[1][b_idx][0] - sl->mv_cache[0][bn_idx][0] + 3 >= 7U |
  437. FFABS(sl->mv_cache[1][b_idx][1] - sl->mv_cache[0][bn_idx][1]) >= mvy_limit;
  438. }
  439. }
  440. return v;
  441. }
  442. static av_always_inline void filter_mb_dir(const H264Context *h, H264SliceContext *sl,
  443. int mb_x, int mb_y,
  444. uint8_t *img_y, uint8_t *img_cb, uint8_t *img_cr,
  445. unsigned int linesize, unsigned int uvlinesize,
  446. int mb_xy, int mb_type, int mvy_limit,
  447. int first_vertical_edge_done, int a, int b,
  448. int chroma, int dir)
  449. {
  450. int edge;
  451. int chroma_qp_avg[2];
  452. int chroma444 = CHROMA444(h);
  453. int chroma422 = CHROMA422(h);
  454. const int mbm_xy = dir == 0 ? mb_xy -1 : sl->top_mb_xy;
  455. const int mbm_type = dir == 0 ? sl->left_type[LTOP] : sl->top_type;
  456. // how often to recheck mv-based bS when iterating between edges
  457. static const uint8_t mask_edge_tab[2][8]={{0,3,3,3,1,1,1,1},
  458. {0,3,1,1,3,3,3,3}};
  459. const int mask_edge = mask_edge_tab[dir][(mb_type>>3)&7];
  460. const int edges = mask_edge== 3 && !(sl->cbp&15) ? 1 : 4;
  461. // how often to recheck mv-based bS when iterating along each edge
  462. const int mask_par0 = mb_type & (MB_TYPE_16x16 | (MB_TYPE_8x16 >> dir));
  463. if(mbm_type && !first_vertical_edge_done){
  464. if (FRAME_MBAFF(h) && (dir == 1) && ((mb_y&1) == 0)
  465. && IS_INTERLACED(mbm_type&~mb_type)
  466. ) {
  467. // This is a special case in the norm where the filtering must
  468. // be done twice (one each of the field) even if we are in a
  469. // frame macroblock.
  470. //
  471. unsigned int tmp_linesize = 2 * linesize;
  472. unsigned int tmp_uvlinesize = 2 * uvlinesize;
  473. int mbn_xy = mb_xy - 2 * h->mb_stride;
  474. int j;
  475. for(j=0; j<2; j++, mbn_xy += h->mb_stride){
  476. LOCAL_ALIGNED(8, int16_t, bS, [4]);
  477. int qp;
  478. if (IS_INTRA(mb_type | h->cur_pic.mb_type[mbn_xy])) {
  479. AV_WN64A(bS, 0x0003000300030003ULL);
  480. } else {
  481. if (!CABAC(h) && IS_8x8DCT(h->cur_pic.mb_type[mbn_xy])) {
  482. bS[0]= 1+((h->cbp_table[mbn_xy] & 0x4000) || sl->non_zero_count_cache[scan8[0]+0]);
  483. bS[1]= 1+((h->cbp_table[mbn_xy] & 0x4000) || sl->non_zero_count_cache[scan8[0]+1]);
  484. bS[2]= 1+((h->cbp_table[mbn_xy] & 0x8000) || sl->non_zero_count_cache[scan8[0]+2]);
  485. bS[3]= 1+((h->cbp_table[mbn_xy] & 0x8000) || sl->non_zero_count_cache[scan8[0]+3]);
  486. }else{
  487. const uint8_t *mbn_nnz = h->non_zero_count[mbn_xy] + 3*4;
  488. int i;
  489. for( i = 0; i < 4; i++ ) {
  490. bS[i] = 1 + !!(sl->non_zero_count_cache[scan8[0]+i] | mbn_nnz[i]);
  491. }
  492. }
  493. }
  494. // Do not use s->qscale as luma quantizer because it has not the same
  495. // value in IPCM macroblocks.
  496. qp = (h->cur_pic.qscale_table[mb_xy] + h->cur_pic.qscale_table[mbn_xy] + 1) >> 1;
  497. ff_tlog(h->avctx, "filter mb:%d/%d dir:%d, QPy:%d ls:%d uvls:%d", mb_x, mb_y, dir, qp, tmp_linesize, tmp_uvlinesize);
  498. { int i; for (i = 0; i < 4; i++) ff_tlog(h->avctx, " bS[%d]:%d", i, bS[i]); ff_tlog(h->avctx, "\n"); }
  499. filter_mb_edgeh( &img_y[j*linesize], tmp_linesize, bS, qp, a, b, h, 0 );
  500. chroma_qp_avg[0] = (sl->chroma_qp[0] + get_chroma_qp(h->ps.pps, 0, h->cur_pic.qscale_table[mbn_xy]) + 1) >> 1;
  501. chroma_qp_avg[1] = (sl->chroma_qp[1] + get_chroma_qp(h->ps.pps, 1, h->cur_pic.qscale_table[mbn_xy]) + 1) >> 1;
  502. if (chroma) {
  503. if (chroma444) {
  504. filter_mb_edgeh (&img_cb[j*uvlinesize], tmp_uvlinesize, bS, chroma_qp_avg[0], a, b, h, 0);
  505. filter_mb_edgeh (&img_cr[j*uvlinesize], tmp_uvlinesize, bS, chroma_qp_avg[1], a, b, h, 0);
  506. } else {
  507. filter_mb_edgech(&img_cb[j*uvlinesize], tmp_uvlinesize, bS, chroma_qp_avg[0], a, b, h, 0);
  508. filter_mb_edgech(&img_cr[j*uvlinesize], tmp_uvlinesize, bS, chroma_qp_avg[1], a, b, h, 0);
  509. }
  510. }
  511. }
  512. }else{
  513. LOCAL_ALIGNED(8, int16_t, bS, [4]);
  514. int qp;
  515. if( IS_INTRA(mb_type|mbm_type)) {
  516. AV_WN64A(bS, 0x0003000300030003ULL);
  517. if ( (!IS_INTERLACED(mb_type|mbm_type))
  518. || ((FRAME_MBAFF(h) || (h->picture_structure != PICT_FRAME)) && (dir == 0))
  519. )
  520. AV_WN64A(bS, 0x0004000400040004ULL);
  521. } else {
  522. int i;
  523. int mv_done;
  524. if( dir && FRAME_MBAFF(h) && IS_INTERLACED(mb_type ^ mbm_type)) {
  525. AV_WN64A(bS, 0x0001000100010001ULL);
  526. mv_done = 1;
  527. }
  528. else if( mask_par0 && ((mbm_type & (MB_TYPE_16x16 | (MB_TYPE_8x16 >> dir)))) ) {
  529. int b_idx= 8 + 4;
  530. int bn_idx= b_idx - (dir ? 8:1);
  531. bS[0] = bS[1] = bS[2] = bS[3] = check_mv(sl, 8 + 4, bn_idx, mvy_limit);
  532. mv_done = 1;
  533. }
  534. else
  535. mv_done = 0;
  536. for( i = 0; i < 4; i++ ) {
  537. int x = dir == 0 ? 0 : i;
  538. int y = dir == 0 ? i : 0;
  539. int b_idx= 8 + 4 + x + 8*y;
  540. int bn_idx= b_idx - (dir ? 8:1);
  541. if (sl->non_zero_count_cache[b_idx] |
  542. sl->non_zero_count_cache[bn_idx]) {
  543. bS[i] = 2;
  544. }
  545. else if(!mv_done)
  546. {
  547. bS[i] = check_mv(sl, b_idx, bn_idx, mvy_limit);
  548. }
  549. }
  550. }
  551. /* Filter edge */
  552. // Do not use s->qscale as luma quantizer because it has not the same
  553. // value in IPCM macroblocks.
  554. if(bS[0]+bS[1]+bS[2]+bS[3]){
  555. qp = (h->cur_pic.qscale_table[mb_xy] + h->cur_pic.qscale_table[mbm_xy] + 1) >> 1;
  556. //ff_tlog(h->avctx, "filter mb:%d/%d dir:%d edge:%d, QPy:%d, QPc:%d, QPcn:%d\n", mb_x, mb_y, dir, edge, qp, h->chroma_qp[0], h->cur_pic.qscale_table[mbn_xy]);
  557. ff_tlog(h->avctx, "filter mb:%d/%d dir:%d edge:%d, QPy:%d ls:%d uvls:%d", mb_x, mb_y, dir, edge, qp, linesize, uvlinesize);
  558. //{ int i; for (i = 0; i < 4; i++) ff_tlog(h->avctx, " bS[%d]:%d", i, bS[i]); ff_tlog(h->avctx, "\n"); }
  559. chroma_qp_avg[0] = (sl->chroma_qp[0] + get_chroma_qp(h->ps.pps, 0, h->cur_pic.qscale_table[mbm_xy]) + 1) >> 1;
  560. chroma_qp_avg[1] = (sl->chroma_qp[1] + get_chroma_qp(h->ps.pps, 1, h->cur_pic.qscale_table[mbm_xy]) + 1) >> 1;
  561. if( dir == 0 ) {
  562. filter_mb_edgev( &img_y[0], linesize, bS, qp, a, b, h, 1 );
  563. if (chroma) {
  564. if (chroma444) {
  565. filter_mb_edgev ( &img_cb[0], uvlinesize, bS, chroma_qp_avg[0], a, b, h, 1);
  566. filter_mb_edgev ( &img_cr[0], uvlinesize, bS, chroma_qp_avg[1], a, b, h, 1);
  567. } else {
  568. filter_mb_edgecv( &img_cb[0], uvlinesize, bS, chroma_qp_avg[0], a, b, h, 1);
  569. filter_mb_edgecv( &img_cr[0], uvlinesize, bS, chroma_qp_avg[1], a, b, h, 1);
  570. }
  571. }
  572. } else {
  573. filter_mb_edgeh( &img_y[0], linesize, bS, qp, a, b, h, 1 );
  574. if (chroma) {
  575. if (chroma444) {
  576. filter_mb_edgeh ( &img_cb[0], uvlinesize, bS, chroma_qp_avg[0], a, b, h, 1);
  577. filter_mb_edgeh ( &img_cr[0], uvlinesize, bS, chroma_qp_avg[1], a, b, h, 1);
  578. } else {
  579. filter_mb_edgech( &img_cb[0], uvlinesize, bS, chroma_qp_avg[0], a, b, h, 1);
  580. filter_mb_edgech( &img_cr[0], uvlinesize, bS, chroma_qp_avg[1], a, b, h, 1);
  581. }
  582. }
  583. }
  584. }
  585. }
  586. }
  587. /* Calculate bS */
  588. for( edge = 1; edge < edges; edge++ ) {
  589. LOCAL_ALIGNED(8, int16_t, bS, [4]);
  590. int qp;
  591. const int deblock_edge = !IS_8x8DCT(mb_type & (edge<<24)); // (edge&1) && IS_8x8DCT(mb_type)
  592. if (!deblock_edge && (!chroma422 || dir == 0))
  593. continue;
  594. if( IS_INTRA(mb_type)) {
  595. AV_WN64A(bS, 0x0003000300030003ULL);
  596. } else {
  597. int i;
  598. int mv_done;
  599. if( edge & mask_edge ) {
  600. AV_ZERO64(bS);
  601. mv_done = 1;
  602. }
  603. else if( mask_par0 ) {
  604. int b_idx= 8 + 4 + edge * (dir ? 8:1);
  605. int bn_idx= b_idx - (dir ? 8:1);
  606. bS[0] = bS[1] = bS[2] = bS[3] = check_mv(sl, b_idx, bn_idx, mvy_limit);
  607. mv_done = 1;
  608. }
  609. else
  610. mv_done = 0;
  611. for( i = 0; i < 4; i++ ) {
  612. int x = dir == 0 ? edge : i;
  613. int y = dir == 0 ? i : edge;
  614. int b_idx= 8 + 4 + x + 8*y;
  615. int bn_idx= b_idx - (dir ? 8:1);
  616. if (sl->non_zero_count_cache[b_idx] |
  617. sl->non_zero_count_cache[bn_idx]) {
  618. bS[i] = 2;
  619. }
  620. else if(!mv_done)
  621. {
  622. bS[i] = check_mv(sl, b_idx, bn_idx, mvy_limit);
  623. }
  624. }
  625. if(bS[0]+bS[1]+bS[2]+bS[3] == 0)
  626. continue;
  627. }
  628. /* Filter edge */
  629. // Do not use s->qscale as luma quantizer because it has not the same
  630. // value in IPCM macroblocks.
  631. qp = h->cur_pic.qscale_table[mb_xy];
  632. //ff_tlog(h->avctx, "filter mb:%d/%d dir:%d edge:%d, QPy:%d, QPc:%d, QPcn:%d\n", mb_x, mb_y, dir, edge, qp, h->chroma_qp[0], h->cur_pic.qscale_table[mbn_xy]);
  633. ff_tlog(h->avctx, "filter mb:%d/%d dir:%d edge:%d, QPy:%d ls:%d uvls:%d", mb_x, mb_y, dir, edge, qp, linesize, uvlinesize);
  634. //{ int i; for (i = 0; i < 4; i++) ff_tlog(h->avctx, " bS[%d]:%d", i, bS[i]); ff_tlog(h->avctx, "\n"); }
  635. if( dir == 0 ) {
  636. filter_mb_edgev( &img_y[4*edge << h->pixel_shift], linesize, bS, qp, a, b, h, 0 );
  637. if (chroma) {
  638. if (chroma444) {
  639. filter_mb_edgev ( &img_cb[4*edge << h->pixel_shift], uvlinesize, bS, sl->chroma_qp[0], a, b, h, 0);
  640. filter_mb_edgev ( &img_cr[4*edge << h->pixel_shift], uvlinesize, bS, sl->chroma_qp[1], a, b, h, 0);
  641. } else if( (edge&1) == 0 ) {
  642. filter_mb_edgecv( &img_cb[2*edge << h->pixel_shift], uvlinesize, bS, sl->chroma_qp[0], a, b, h, 0);
  643. filter_mb_edgecv( &img_cr[2*edge << h->pixel_shift], uvlinesize, bS, sl->chroma_qp[1], a, b, h, 0);
  644. }
  645. }
  646. } else {
  647. if (chroma422) {
  648. if (deblock_edge)
  649. filter_mb_edgeh(&img_y[4*edge*linesize], linesize, bS, qp, a, b, h, 0);
  650. if (chroma) {
  651. filter_mb_edgech(&img_cb[4*edge*uvlinesize], uvlinesize, bS, sl->chroma_qp[0], a, b, h, 0);
  652. filter_mb_edgech(&img_cr[4*edge*uvlinesize], uvlinesize, bS, sl->chroma_qp[1], a, b, h, 0);
  653. }
  654. } else {
  655. filter_mb_edgeh(&img_y[4*edge*linesize], linesize, bS, qp, a, b, h, 0);
  656. if (chroma) {
  657. if (chroma444) {
  658. filter_mb_edgeh (&img_cb[4*edge*uvlinesize], uvlinesize, bS, sl->chroma_qp[0], a, b, h, 0);
  659. filter_mb_edgeh (&img_cr[4*edge*uvlinesize], uvlinesize, bS, sl->chroma_qp[1], a, b, h, 0);
  660. } else if ((edge&1) == 0) {
  661. filter_mb_edgech(&img_cb[2*edge*uvlinesize], uvlinesize, bS, sl->chroma_qp[0], a, b, h, 0);
  662. filter_mb_edgech(&img_cr[2*edge*uvlinesize], uvlinesize, bS, sl->chroma_qp[1], a, b, h, 0);
  663. }
  664. }
  665. }
  666. }
  667. }
  668. }
  669. void ff_h264_filter_mb(const H264Context *h, H264SliceContext *sl,
  670. int mb_x, int mb_y,
  671. uint8_t *img_y, uint8_t *img_cb, uint8_t *img_cr,
  672. unsigned int linesize, unsigned int uvlinesize)
  673. {
  674. const int mb_xy= mb_x + mb_y*h->mb_stride;
  675. const int mb_type = h->cur_pic.mb_type[mb_xy];
  676. const int mvy_limit = IS_INTERLACED(mb_type) ? 2 : 4;
  677. int first_vertical_edge_done = 0;
  678. int chroma = CHROMA(h) && !(CONFIG_GRAY && (h->flags & AV_CODEC_FLAG_GRAY));
  679. int qp_bd_offset = 6 * (h->ps.sps->bit_depth_luma - 8);
  680. int a = 52 + sl->slice_alpha_c0_offset - qp_bd_offset;
  681. int b = 52 + sl->slice_beta_offset - qp_bd_offset;
  682. if (FRAME_MBAFF(h)
  683. // and current and left pair do not have the same interlaced type
  684. && IS_INTERLACED(mb_type ^ sl->left_type[LTOP])
  685. // and left mb is in available to us
  686. && sl->left_type[LTOP]) {
  687. /* First vertical edge is different in MBAFF frames
  688. * There are 8 different bS to compute and 2 different Qp
  689. */
  690. LOCAL_ALIGNED(8, int16_t, bS, [8]);
  691. int qp[2];
  692. int bqp[2];
  693. int rqp[2];
  694. int mb_qp, mbn0_qp, mbn1_qp;
  695. int i;
  696. first_vertical_edge_done = 1;
  697. if( IS_INTRA(mb_type) ) {
  698. AV_WN64A(&bS[0], 0x0004000400040004ULL);
  699. AV_WN64A(&bS[4], 0x0004000400040004ULL);
  700. } else {
  701. static const uint8_t offset[2][2][8]={
  702. {
  703. {3+4*0, 3+4*0, 3+4*0, 3+4*0, 3+4*1, 3+4*1, 3+4*1, 3+4*1},
  704. {3+4*2, 3+4*2, 3+4*2, 3+4*2, 3+4*3, 3+4*3, 3+4*3, 3+4*3},
  705. },{
  706. {3+4*0, 3+4*1, 3+4*2, 3+4*3, 3+4*0, 3+4*1, 3+4*2, 3+4*3},
  707. {3+4*0, 3+4*1, 3+4*2, 3+4*3, 3+4*0, 3+4*1, 3+4*2, 3+4*3},
  708. }
  709. };
  710. const uint8_t *off= offset[MB_FIELD(sl)][mb_y&1];
  711. for( i = 0; i < 8; i++ ) {
  712. int j= MB_FIELD(sl) ? i>>2 : i&1;
  713. int mbn_xy = sl->left_mb_xy[LEFT(j)];
  714. int mbn_type = sl->left_type[LEFT(j)];
  715. if( IS_INTRA( mbn_type ) )
  716. bS[i] = 4;
  717. else{
  718. bS[i] = 1 + !!(sl->non_zero_count_cache[12+8*(i>>1)] |
  719. ((!h->ps.pps->cabac && IS_8x8DCT(mbn_type)) ?
  720. (h->cbp_table[mbn_xy] & (((MB_FIELD(sl) ? (i&2) : (mb_y&1)) ? 8 : 2) << 12))
  721. :
  722. h->non_zero_count[mbn_xy][ off[i] ]));
  723. }
  724. }
  725. }
  726. mb_qp = h->cur_pic.qscale_table[mb_xy];
  727. mbn0_qp = h->cur_pic.qscale_table[sl->left_mb_xy[0]];
  728. mbn1_qp = h->cur_pic.qscale_table[sl->left_mb_xy[1]];
  729. qp[0] = ( mb_qp + mbn0_qp + 1 ) >> 1;
  730. bqp[0] = (get_chroma_qp(h->ps.pps, 0, mb_qp) +
  731. get_chroma_qp(h->ps.pps, 0, mbn0_qp) + 1) >> 1;
  732. rqp[0] = (get_chroma_qp(h->ps.pps, 1, mb_qp) +
  733. get_chroma_qp(h->ps.pps, 1, mbn0_qp) + 1) >> 1;
  734. qp[1] = ( mb_qp + mbn1_qp + 1 ) >> 1;
  735. bqp[1] = (get_chroma_qp(h->ps.pps, 0, mb_qp) +
  736. get_chroma_qp(h->ps.pps, 0, mbn1_qp) + 1 ) >> 1;
  737. rqp[1] = (get_chroma_qp(h->ps.pps, 1, mb_qp) +
  738. get_chroma_qp(h->ps.pps, 1, mbn1_qp) + 1 ) >> 1;
  739. /* Filter edge */
  740. ff_tlog(h->avctx, "filter mb:%d/%d MBAFF, QPy:%d/%d, QPb:%d/%d QPr:%d/%d ls:%d uvls:%d", mb_x, mb_y, qp[0], qp[1], bqp[0], bqp[1], rqp[0], rqp[1], linesize, uvlinesize);
  741. { int i; for (i = 0; i < 8; i++) ff_tlog(h->avctx, " bS[%d]:%d", i, bS[i]); ff_tlog(h->avctx, "\n"); }
  742. if (MB_FIELD(sl)) {
  743. filter_mb_mbaff_edgev ( h, img_y , linesize, bS , 1, qp [0], a, b, 1 );
  744. filter_mb_mbaff_edgev ( h, img_y + 8* linesize, linesize, bS+4, 1, qp [1], a, b, 1 );
  745. if (chroma){
  746. if (CHROMA444(h)) {
  747. filter_mb_mbaff_edgev ( h, img_cb, uvlinesize, bS , 1, bqp[0], a, b, 1 );
  748. filter_mb_mbaff_edgev ( h, img_cb + 8*uvlinesize, uvlinesize, bS+4, 1, bqp[1], a, b, 1 );
  749. filter_mb_mbaff_edgev ( h, img_cr, uvlinesize, bS , 1, rqp[0], a, b, 1 );
  750. filter_mb_mbaff_edgev ( h, img_cr + 8*uvlinesize, uvlinesize, bS+4, 1, rqp[1], a, b, 1 );
  751. } else if (CHROMA422(h)) {
  752. filter_mb_mbaff_edgecv(h, img_cb, uvlinesize, bS , 1, bqp[0], a, b, 1);
  753. filter_mb_mbaff_edgecv(h, img_cb + 8*uvlinesize, uvlinesize, bS+4, 1, bqp[1], a, b, 1);
  754. filter_mb_mbaff_edgecv(h, img_cr, uvlinesize, bS , 1, rqp[0], a, b, 1);
  755. filter_mb_mbaff_edgecv(h, img_cr + 8*uvlinesize, uvlinesize, bS+4, 1, rqp[1], a, b, 1);
  756. }else{
  757. filter_mb_mbaff_edgecv( h, img_cb, uvlinesize, bS , 1, bqp[0], a, b, 1 );
  758. filter_mb_mbaff_edgecv( h, img_cb + 4*uvlinesize, uvlinesize, bS+4, 1, bqp[1], a, b, 1 );
  759. filter_mb_mbaff_edgecv( h, img_cr, uvlinesize, bS , 1, rqp[0], a, b, 1 );
  760. filter_mb_mbaff_edgecv( h, img_cr + 4*uvlinesize, uvlinesize, bS+4, 1, rqp[1], a, b, 1 );
  761. }
  762. }
  763. }else{
  764. filter_mb_mbaff_edgev ( h, img_y , 2* linesize, bS , 2, qp [0], a, b, 1 );
  765. filter_mb_mbaff_edgev ( h, img_y + linesize, 2* linesize, bS+1, 2, qp [1], a, b, 1 );
  766. if (chroma){
  767. if (CHROMA444(h)) {
  768. filter_mb_mbaff_edgev ( h, img_cb, 2*uvlinesize, bS , 2, bqp[0], a, b, 1 );
  769. filter_mb_mbaff_edgev ( h, img_cb + uvlinesize, 2*uvlinesize, bS+1, 2, bqp[1], a, b, 1 );
  770. filter_mb_mbaff_edgev ( h, img_cr, 2*uvlinesize, bS , 2, rqp[0], a, b, 1 );
  771. filter_mb_mbaff_edgev ( h, img_cr + uvlinesize, 2*uvlinesize, bS+1, 2, rqp[1], a, b, 1 );
  772. }else{
  773. filter_mb_mbaff_edgecv( h, img_cb, 2*uvlinesize, bS , 2, bqp[0], a, b, 1 );
  774. filter_mb_mbaff_edgecv( h, img_cb + uvlinesize, 2*uvlinesize, bS+1, 2, bqp[1], a, b, 1 );
  775. filter_mb_mbaff_edgecv( h, img_cr, 2*uvlinesize, bS , 2, rqp[0], a, b, 1 );
  776. filter_mb_mbaff_edgecv( h, img_cr + uvlinesize, 2*uvlinesize, bS+1, 2, rqp[1], a, b, 1 );
  777. }
  778. }
  779. }
  780. }
  781. #if CONFIG_SMALL
  782. {
  783. int dir;
  784. for (dir = 0; dir < 2; dir++)
  785. filter_mb_dir(h, sl, mb_x, mb_y, img_y, img_cb, img_cr, linesize,
  786. uvlinesize, mb_xy, mb_type, mvy_limit,
  787. dir ? 0 : first_vertical_edge_done, a, b,
  788. chroma, dir);
  789. }
  790. #else
  791. filter_mb_dir(h, sl, mb_x, mb_y, img_y, img_cb, img_cr, linesize, uvlinesize, mb_xy, mb_type, mvy_limit, first_vertical_edge_done, a, b, chroma, 0);
  792. filter_mb_dir(h, sl, mb_x, mb_y, img_y, img_cb, img_cr, linesize, uvlinesize, mb_xy, mb_type, mvy_limit, 0, a, b, chroma, 1);
  793. #endif
  794. }