You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

755 lines
36KB

  1. /*
  2. * H.26L/H.264/AVC/JVT/14496-10/... loop filter
  3. * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
  4. *
  5. * This file is part of Libav.
  6. *
  7. * Libav is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * Libav is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with Libav; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. /**
  22. * @file
  23. * H.264 / AVC / MPEG4 part10 loop filter.
  24. * @author Michael Niedermayer <michaelni@gmx.at>
  25. */
  26. #include "libavutil/intreadwrite.h"
  27. #include "internal.h"
  28. #include "dsputil.h"
  29. #include "avcodec.h"
  30. #include "mpegvideo.h"
  31. #include "h264.h"
  32. #include "mathops.h"
  33. #include "rectangle.h"
  34. //#undef NDEBUG
  35. #include <assert.h>
  36. /* Deblocking filter (p153) */
  37. static const uint8_t alpha_table[52*3] = {
  38. 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  39. 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  40. 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  41. 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  42. 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  43. 0, 0, 0, 0, 0, 0, 4, 4, 5, 6,
  44. 7, 8, 9, 10, 12, 13, 15, 17, 20, 22,
  45. 25, 28, 32, 36, 40, 45, 50, 56, 63, 71,
  46. 80, 90,101,113,127,144,162,182,203,226,
  47. 255,255,
  48. 255,255,255,255,255,255,255,255,255,255,255,255,255,
  49. 255,255,255,255,255,255,255,255,255,255,255,255,255,
  50. 255,255,255,255,255,255,255,255,255,255,255,255,255,
  51. 255,255,255,255,255,255,255,255,255,255,255,255,255,
  52. };
  53. static const uint8_t beta_table[52*3] = {
  54. 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  55. 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  56. 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  57. 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  58. 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  59. 0, 0, 0, 0, 0, 0, 2, 2, 2, 3,
  60. 3, 3, 3, 4, 4, 4, 6, 6, 7, 7,
  61. 8, 8, 9, 9, 10, 10, 11, 11, 12, 12,
  62. 13, 13, 14, 14, 15, 15, 16, 16, 17, 17,
  63. 18, 18,
  64. 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18,
  65. 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18,
  66. 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18,
  67. 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18,
  68. };
  69. static const uint8_t tc0_table[52*3][4] = {
  70. {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 },
  71. {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 },
  72. {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 },
  73. {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 },
  74. {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 },
  75. {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 },
  76. {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 },
  77. {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 },
  78. {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 },
  79. {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 },
  80. {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 },
  81. {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 0 }, {-1, 0, 0, 1 },
  82. {-1, 0, 0, 1 }, {-1, 0, 0, 1 }, {-1, 0, 0, 1 }, {-1, 0, 1, 1 }, {-1, 0, 1, 1 }, {-1, 1, 1, 1 },
  83. {-1, 1, 1, 1 }, {-1, 1, 1, 1 }, {-1, 1, 1, 1 }, {-1, 1, 1, 2 }, {-1, 1, 1, 2 }, {-1, 1, 1, 2 },
  84. {-1, 1, 1, 2 }, {-1, 1, 2, 3 }, {-1, 1, 2, 3 }, {-1, 2, 2, 3 }, {-1, 2, 2, 4 }, {-1, 2, 3, 4 },
  85. {-1, 2, 3, 4 }, {-1, 3, 3, 5 }, {-1, 3, 4, 6 }, {-1, 3, 4, 6 }, {-1, 4, 5, 7 }, {-1, 4, 5, 8 },
  86. {-1, 4, 6, 9 }, {-1, 5, 7,10 }, {-1, 6, 8,11 }, {-1, 6, 8,13 }, {-1, 7,10,14 }, {-1, 8,11,16 },
  87. {-1, 9,12,18 }, {-1,10,13,20 }, {-1,11,15,23 }, {-1,13,17,25 },
  88. {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 },
  89. {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 },
  90. {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 },
  91. {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 },
  92. {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 },
  93. {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 },
  94. {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 },
  95. {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 },
  96. {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 }, {-1,13,17,25 },
  97. };
  98. static void av_always_inline filter_mb_edgev( uint8_t *pix, int stride, const int16_t bS[4], unsigned int qp, H264Context *h) {
  99. const int qp_bd_offset = 6 * (h->sps.bit_depth_luma - 8);
  100. const unsigned int index_a = qp - qp_bd_offset + h->slice_alpha_c0_offset;
  101. const int alpha = alpha_table[index_a];
  102. const int beta = beta_table[qp - qp_bd_offset + h->slice_beta_offset];
  103. if (alpha ==0 || beta == 0) return;
  104. if( bS[0] < 4 ) {
  105. int8_t tc[4];
  106. tc[0] = tc0_table[index_a][bS[0]];
  107. tc[1] = tc0_table[index_a][bS[1]];
  108. tc[2] = tc0_table[index_a][bS[2]];
  109. tc[3] = tc0_table[index_a][bS[3]];
  110. h->h264dsp.h264_h_loop_filter_luma(pix, stride, alpha, beta, tc);
  111. } else {
  112. h->h264dsp.h264_h_loop_filter_luma_intra(pix, stride, alpha, beta);
  113. }
  114. }
  115. static void av_always_inline filter_mb_edgecv( uint8_t *pix, int stride, const int16_t bS[4], unsigned int qp, H264Context *h ) {
  116. const int qp_bd_offset = 6 * (h->sps.bit_depth_luma - 8);
  117. const unsigned int index_a = qp - qp_bd_offset + h->slice_alpha_c0_offset;
  118. const int alpha = alpha_table[index_a];
  119. const int beta = beta_table[qp - qp_bd_offset + h->slice_beta_offset];
  120. if (alpha ==0 || beta == 0) return;
  121. if( bS[0] < 4 ) {
  122. int8_t tc[4];
  123. tc[0] = tc0_table[index_a][bS[0]]+1;
  124. tc[1] = tc0_table[index_a][bS[1]]+1;
  125. tc[2] = tc0_table[index_a][bS[2]]+1;
  126. tc[3] = tc0_table[index_a][bS[3]]+1;
  127. h->h264dsp.h264_h_loop_filter_chroma(pix, stride, alpha, beta, tc);
  128. } else {
  129. h->h264dsp.h264_h_loop_filter_chroma_intra(pix, stride, alpha, beta);
  130. }
  131. }
  132. static void filter_mb_mbaff_edgev( H264Context *h, uint8_t *pix, int stride, const int16_t bS[7], int bsi, int qp ) {
  133. const int qp_bd_offset = 6 * (h->sps.bit_depth_luma - 8);
  134. int index_a = qp - qp_bd_offset + h->slice_alpha_c0_offset;
  135. int alpha = alpha_table[index_a];
  136. int beta = beta_table[qp - qp_bd_offset + h->slice_beta_offset];
  137. if (alpha ==0 || beta == 0) return;
  138. if( bS[0] < 4 ) {
  139. int8_t tc[4];
  140. tc[0] = tc0_table[index_a][bS[0*bsi]];
  141. tc[1] = tc0_table[index_a][bS[1*bsi]];
  142. tc[2] = tc0_table[index_a][bS[2*bsi]];
  143. tc[3] = tc0_table[index_a][bS[3*bsi]];
  144. h->h264dsp.h264_h_loop_filter_luma_mbaff(pix, stride, alpha, beta, tc);
  145. } else {
  146. h->h264dsp.h264_h_loop_filter_luma_mbaff_intra(pix, stride, alpha, beta);
  147. }
  148. }
  149. static void filter_mb_mbaff_edgecv( H264Context *h, uint8_t *pix, int stride, const int16_t bS[7], int bsi, int qp ) {
  150. const int qp_bd_offset = 6 * (h->sps.bit_depth_luma - 8);
  151. int index_a = qp - qp_bd_offset + h->slice_alpha_c0_offset;
  152. int alpha = alpha_table[index_a];
  153. int beta = beta_table[qp - qp_bd_offset + h->slice_beta_offset];
  154. if (alpha ==0 || beta == 0) return;
  155. if( bS[0] < 4 ) {
  156. int8_t tc[4];
  157. tc[0] = tc0_table[index_a][bS[0*bsi]] + 1;
  158. tc[1] = tc0_table[index_a][bS[1*bsi]] + 1;
  159. tc[2] = tc0_table[index_a][bS[2*bsi]] + 1;
  160. tc[3] = tc0_table[index_a][bS[3*bsi]] + 1;
  161. h->h264dsp.h264_h_loop_filter_chroma_mbaff(pix, stride, alpha, beta, tc);
  162. } else {
  163. h->h264dsp.h264_h_loop_filter_chroma_mbaff_intra(pix, stride, alpha, beta);
  164. }
  165. }
  166. static void av_always_inline filter_mb_edgeh( uint8_t *pix, int stride, const int16_t bS[4], unsigned int qp, H264Context *h ) {
  167. const int qp_bd_offset = 6 * (h->sps.bit_depth_luma - 8);
  168. const unsigned int index_a = qp - qp_bd_offset + h->slice_alpha_c0_offset;
  169. const int alpha = alpha_table[index_a];
  170. const int beta = beta_table[qp - qp_bd_offset + h->slice_beta_offset];
  171. if (alpha ==0 || beta == 0) return;
  172. if( bS[0] < 4 ) {
  173. int8_t tc[4];
  174. tc[0] = tc0_table[index_a][bS[0]];
  175. tc[1] = tc0_table[index_a][bS[1]];
  176. tc[2] = tc0_table[index_a][bS[2]];
  177. tc[3] = tc0_table[index_a][bS[3]];
  178. h->h264dsp.h264_v_loop_filter_luma(pix, stride, alpha, beta, tc);
  179. } else {
  180. h->h264dsp.h264_v_loop_filter_luma_intra(pix, stride, alpha, beta);
  181. }
  182. }
  183. static void av_always_inline filter_mb_edgech( uint8_t *pix, int stride, const int16_t bS[4], unsigned int qp, H264Context *h ) {
  184. const int qp_bd_offset = 6 * (h->sps.bit_depth_luma - 8);
  185. const unsigned int index_a = qp - qp_bd_offset + h->slice_alpha_c0_offset;
  186. const int alpha = alpha_table[index_a];
  187. const int beta = beta_table[qp - qp_bd_offset + h->slice_beta_offset];
  188. if (alpha ==0 || beta == 0) return;
  189. if( bS[0] < 4 ) {
  190. int8_t tc[4];
  191. tc[0] = tc0_table[index_a][bS[0]]+1;
  192. tc[1] = tc0_table[index_a][bS[1]]+1;
  193. tc[2] = tc0_table[index_a][bS[2]]+1;
  194. tc[3] = tc0_table[index_a][bS[3]]+1;
  195. h->h264dsp.h264_v_loop_filter_chroma(pix, stride, alpha, beta, tc);
  196. } else {
  197. h->h264dsp.h264_v_loop_filter_chroma_intra(pix, stride, alpha, beta);
  198. }
  199. }
  200. void ff_h264_filter_mb_fast( H264Context *h, int mb_x, int mb_y, uint8_t *img_y, uint8_t *img_cb, uint8_t *img_cr, unsigned int linesize, unsigned int uvlinesize) {
  201. MpegEncContext * const s = &h->s;
  202. int mb_xy;
  203. int mb_type, left_type, top_type;
  204. int qp, qp0, qp1, qpc, qpc0, qpc1, qp_thresh;
  205. int chroma = !(CONFIG_GRAY && (s->flags&CODEC_FLAG_GRAY));
  206. int chroma444 = CHROMA444;
  207. mb_xy = h->mb_xy;
  208. if(!h->h264dsp.h264_loop_filter_strength || h->pps.chroma_qp_diff) {
  209. ff_h264_filter_mb(h, mb_x, mb_y, img_y, img_cb, img_cr, linesize, uvlinesize);
  210. return;
  211. }
  212. assert(!FRAME_MBAFF);
  213. left_type= h->left_type[LTOP];
  214. top_type= h->top_type;
  215. mb_type = s->current_picture.mb_type[mb_xy];
  216. qp = s->current_picture.qscale_table[mb_xy];
  217. qp0 = s->current_picture.qscale_table[mb_xy-1];
  218. qp1 = s->current_picture.qscale_table[h->top_mb_xy];
  219. qpc = get_chroma_qp( h, 0, qp );
  220. qpc0 = get_chroma_qp( h, 0, qp0 );
  221. qpc1 = get_chroma_qp( h, 0, qp1 );
  222. qp0 = (qp + qp0 + 1) >> 1;
  223. qp1 = (qp + qp1 + 1) >> 1;
  224. qpc0 = (qpc + qpc0 + 1) >> 1;
  225. qpc1 = (qpc + qpc1 + 1) >> 1;
  226. qp_thresh = 15+52 - h->slice_alpha_c0_offset;
  227. if(qp <= qp_thresh && qp0 <= qp_thresh && qp1 <= qp_thresh &&
  228. qpc <= qp_thresh && qpc0 <= qp_thresh && qpc1 <= qp_thresh)
  229. return;
  230. if( IS_INTRA(mb_type) ) {
  231. static const int16_t bS4[4] = {4,4,4,4};
  232. static const int16_t bS3[4] = {3,3,3,3};
  233. const int16_t *bSH = FIELD_PICTURE ? bS3 : bS4;
  234. if(left_type)
  235. filter_mb_edgev( &img_y[4*0], linesize, bS4, qp0, h);
  236. if( IS_8x8DCT(mb_type) ) {
  237. filter_mb_edgev( &img_y[4*2], linesize, bS3, qp, h);
  238. if(top_type){
  239. filter_mb_edgeh( &img_y[4*0*linesize], linesize, bSH, qp1, h);
  240. }
  241. filter_mb_edgeh( &img_y[4*2*linesize], linesize, bS3, qp, h);
  242. } else {
  243. filter_mb_edgev( &img_y[4*1], linesize, bS3, qp, h);
  244. filter_mb_edgev( &img_y[4*2], linesize, bS3, qp, h);
  245. filter_mb_edgev( &img_y[4*3], linesize, bS3, qp, h);
  246. if(top_type){
  247. filter_mb_edgeh( &img_y[4*0*linesize], linesize, bSH, qp1, h);
  248. }
  249. filter_mb_edgeh( &img_y[4*1*linesize], linesize, bS3, qp, h);
  250. filter_mb_edgeh( &img_y[4*2*linesize], linesize, bS3, qp, h);
  251. filter_mb_edgeh( &img_y[4*3*linesize], linesize, bS3, qp, h);
  252. }
  253. if(chroma){
  254. if(chroma444){
  255. if(left_type){
  256. filter_mb_edgev( &img_cb[4*0], linesize, bS4, qpc0, h);
  257. filter_mb_edgev( &img_cr[4*0], linesize, bS4, qpc0, h);
  258. }
  259. if( IS_8x8DCT(mb_type) ) {
  260. filter_mb_edgev( &img_cb[4*2], linesize, bS3, qpc, h);
  261. filter_mb_edgev( &img_cr[4*2], linesize, bS3, qpc, h);
  262. if(top_type){
  263. filter_mb_edgeh( &img_cb[4*0*linesize], linesize, bSH, qpc1, h);
  264. filter_mb_edgeh( &img_cr[4*0*linesize], linesize, bSH, qpc1, h);
  265. }
  266. filter_mb_edgeh( &img_cb[4*2*linesize], linesize, bS3, qpc, h);
  267. filter_mb_edgeh( &img_cr[4*2*linesize], linesize, bS3, qpc, h);
  268. } else {
  269. filter_mb_edgev( &img_cb[4*1], linesize, bS3, qpc, h);
  270. filter_mb_edgev( &img_cr[4*1], linesize, bS3, qpc, h);
  271. filter_mb_edgev( &img_cb[4*2], linesize, bS3, qpc, h);
  272. filter_mb_edgev( &img_cr[4*2], linesize, bS3, qpc, h);
  273. filter_mb_edgev( &img_cb[4*3], linesize, bS3, qpc, h);
  274. filter_mb_edgev( &img_cr[4*3], linesize, bS3, qpc, h);
  275. if(top_type){
  276. filter_mb_edgeh( &img_cb[4*0*linesize], linesize, bSH, qpc1, h);
  277. filter_mb_edgeh( &img_cr[4*0*linesize], linesize, bSH, qpc1, h);
  278. }
  279. filter_mb_edgeh( &img_cb[4*1*linesize], linesize, bS3, qpc, h);
  280. filter_mb_edgeh( &img_cr[4*1*linesize], linesize, bS3, qpc, h);
  281. filter_mb_edgeh( &img_cb[4*2*linesize], linesize, bS3, qpc, h);
  282. filter_mb_edgeh( &img_cr[4*2*linesize], linesize, bS3, qpc, h);
  283. filter_mb_edgeh( &img_cb[4*3*linesize], linesize, bS3, qpc, h);
  284. filter_mb_edgeh( &img_cr[4*3*linesize], linesize, bS3, qpc, h);
  285. }
  286. }else{
  287. if(left_type){
  288. filter_mb_edgecv( &img_cb[2*0], uvlinesize, bS4, qpc0, h);
  289. filter_mb_edgecv( &img_cr[2*0], uvlinesize, bS4, qpc0, h);
  290. }
  291. filter_mb_edgecv( &img_cb[2*2], uvlinesize, bS3, qpc, h);
  292. filter_mb_edgecv( &img_cr[2*2], uvlinesize, bS3, qpc, h);
  293. if(top_type){
  294. filter_mb_edgech( &img_cb[2*0*uvlinesize], uvlinesize, bSH, qpc1, h);
  295. filter_mb_edgech( &img_cr[2*0*uvlinesize], uvlinesize, bSH, qpc1, h);
  296. }
  297. filter_mb_edgech( &img_cb[2*2*uvlinesize], uvlinesize, bS3, qpc, h);
  298. filter_mb_edgech( &img_cr[2*2*uvlinesize], uvlinesize, bS3, qpc, h);
  299. }
  300. }
  301. return;
  302. } else {
  303. LOCAL_ALIGNED_8(int16_t, bS, [2], [4][4]);
  304. int edges;
  305. if( IS_8x8DCT(mb_type) && (h->cbp&7) == 7 ) {
  306. edges = 4;
  307. AV_WN64A(bS[0][0], 0x0002000200020002ULL);
  308. AV_WN64A(bS[0][2], 0x0002000200020002ULL);
  309. AV_WN64A(bS[1][0], 0x0002000200020002ULL);
  310. AV_WN64A(bS[1][2], 0x0002000200020002ULL);
  311. } else {
  312. int mask_edge1 = (3*(((5*mb_type)>>5)&1)) | (mb_type>>4); //(mb_type & (MB_TYPE_16x16 | MB_TYPE_8x16)) ? 3 : (mb_type & MB_TYPE_16x8) ? 1 : 0;
  313. int mask_edge0 = 3*((mask_edge1>>1) & ((5*left_type)>>5)&1); // (mb_type & (MB_TYPE_16x16 | MB_TYPE_8x16)) && (h->left_type[LTOP] & (MB_TYPE_16x16 | MB_TYPE_8x16)) ? 3 : 0;
  314. int step = 1+(mb_type>>24); //IS_8x8DCT(mb_type) ? 2 : 1;
  315. edges = 4 - 3*((mb_type>>3) & !(h->cbp & 15)); //(mb_type & MB_TYPE_16x16) && !(h->cbp & 15) ? 1 : 4;
  316. h->h264dsp.h264_loop_filter_strength( bS, h->non_zero_count_cache, h->ref_cache, h->mv_cache,
  317. h->list_count==2, edges, step, mask_edge0, mask_edge1, FIELD_PICTURE);
  318. }
  319. if( IS_INTRA(left_type) )
  320. AV_WN64A(bS[0][0], 0x0004000400040004ULL);
  321. if( IS_INTRA(top_type) )
  322. AV_WN64A(bS[1][0], FIELD_PICTURE ? 0x0003000300030003ULL : 0x0004000400040004ULL);
  323. #define FILTER(hv,dir,edge)\
  324. if(AV_RN64A(bS[dir][edge])) { \
  325. filter_mb_edge##hv( &img_y[4*edge*(dir?linesize:1)], linesize, bS[dir][edge], edge ? qp : qp##dir, h );\
  326. if(chroma){\
  327. if(chroma444){\
  328. filter_mb_edge##hv( &img_cb[4*edge*(dir?linesize:1)], linesize, bS[dir][edge], edge ? qpc : qpc##dir, h );\
  329. filter_mb_edge##hv( &img_cr[4*edge*(dir?linesize:1)], linesize, bS[dir][edge], edge ? qpc : qpc##dir, h );\
  330. } else if(!(edge&1)) {\
  331. filter_mb_edgec##hv( &img_cb[2*edge*(dir?uvlinesize:1)], uvlinesize, bS[dir][edge], edge ? qpc : qpc##dir, h );\
  332. filter_mb_edgec##hv( &img_cr[2*edge*(dir?uvlinesize:1)], uvlinesize, bS[dir][edge], edge ? qpc : qpc##dir, h );\
  333. }\
  334. }\
  335. }
  336. if(left_type)
  337. FILTER(v,0,0);
  338. if( edges == 1 ) {
  339. if(top_type)
  340. FILTER(h,1,0);
  341. } else if( IS_8x8DCT(mb_type) ) {
  342. FILTER(v,0,2);
  343. if(top_type)
  344. FILTER(h,1,0);
  345. FILTER(h,1,2);
  346. } else {
  347. FILTER(v,0,1);
  348. FILTER(v,0,2);
  349. FILTER(v,0,3);
  350. if(top_type)
  351. FILTER(h,1,0);
  352. FILTER(h,1,1);
  353. FILTER(h,1,2);
  354. FILTER(h,1,3);
  355. }
  356. #undef FILTER
  357. }
  358. }
  359. static int check_mv(H264Context *h, long b_idx, long bn_idx, int mvy_limit){
  360. int v;
  361. v= h->ref_cache[0][b_idx] != h->ref_cache[0][bn_idx];
  362. if(!v && h->ref_cache[0][b_idx]!=-1)
  363. v= h->mv_cache[0][b_idx][0] - h->mv_cache[0][bn_idx][0] + 3 >= 7U |
  364. FFABS( h->mv_cache[0][b_idx][1] - h->mv_cache[0][bn_idx][1] ) >= mvy_limit;
  365. if(h->list_count==2){
  366. if(!v)
  367. v = h->ref_cache[1][b_idx] != h->ref_cache[1][bn_idx] |
  368. h->mv_cache[1][b_idx][0] - h->mv_cache[1][bn_idx][0] + 3 >= 7U |
  369. FFABS( h->mv_cache[1][b_idx][1] - h->mv_cache[1][bn_idx][1] ) >= mvy_limit;
  370. if(v){
  371. if(h->ref_cache[0][b_idx] != h->ref_cache[1][bn_idx] |
  372. h->ref_cache[1][b_idx] != h->ref_cache[0][bn_idx])
  373. return 1;
  374. return
  375. h->mv_cache[0][b_idx][0] - h->mv_cache[1][bn_idx][0] + 3 >= 7U |
  376. FFABS( h->mv_cache[0][b_idx][1] - h->mv_cache[1][bn_idx][1] ) >= mvy_limit |
  377. h->mv_cache[1][b_idx][0] - h->mv_cache[0][bn_idx][0] + 3 >= 7U |
  378. FFABS( h->mv_cache[1][b_idx][1] - h->mv_cache[0][bn_idx][1] ) >= mvy_limit;
  379. }
  380. }
  381. return v;
  382. }
  383. static av_always_inline void filter_mb_dir(H264Context *h, int mb_x, int mb_y, uint8_t *img_y, uint8_t *img_cb, uint8_t *img_cr, unsigned int linesize, unsigned int uvlinesize, int mb_xy, int mb_type, int mvy_limit, int first_vertical_edge_done, int chroma, int chroma444, int dir) {
  384. MpegEncContext * const s = &h->s;
  385. int edge;
  386. int chroma_qp_avg[2];
  387. const int mbm_xy = dir == 0 ? mb_xy -1 : h->top_mb_xy;
  388. const int mbm_type = dir == 0 ? h->left_type[LTOP] : h->top_type;
  389. // how often to recheck mv-based bS when iterating between edges
  390. static const uint8_t mask_edge_tab[2][8]={{0,3,3,3,1,1,1,1},
  391. {0,3,1,1,3,3,3,3}};
  392. const int mask_edge = mask_edge_tab[dir][(mb_type>>3)&7];
  393. const int edges = mask_edge== 3 && !(h->cbp&15) ? 1 : 4;
  394. // how often to recheck mv-based bS when iterating along each edge
  395. const int mask_par0 = mb_type & (MB_TYPE_16x16 | (MB_TYPE_8x16 >> dir));
  396. if(mbm_type && !first_vertical_edge_done){
  397. if (FRAME_MBAFF && (dir == 1) && ((mb_y&1) == 0)
  398. && IS_INTERLACED(mbm_type&~mb_type)
  399. ) {
  400. // This is a special case in the norm where the filtering must
  401. // be done twice (one each of the field) even if we are in a
  402. // frame macroblock.
  403. //
  404. unsigned int tmp_linesize = 2 * linesize;
  405. unsigned int tmp_uvlinesize = 2 * uvlinesize;
  406. int mbn_xy = mb_xy - 2 * s->mb_stride;
  407. int j;
  408. for(j=0; j<2; j++, mbn_xy += s->mb_stride){
  409. DECLARE_ALIGNED(8, int16_t, bS)[4];
  410. int qp;
  411. if( IS_INTRA(mb_type|s->current_picture.mb_type[mbn_xy]) ) {
  412. AV_WN64A(bS, 0x0003000300030003ULL);
  413. } else {
  414. if(!CABAC && IS_8x8DCT(s->current_picture.mb_type[mbn_xy])){
  415. bS[0]= 1+((h->cbp_table[mbn_xy] & 0x4000)||h->non_zero_count_cache[scan8[0]+0]);
  416. bS[1]= 1+((h->cbp_table[mbn_xy] & 0x4000)||h->non_zero_count_cache[scan8[0]+1]);
  417. bS[2]= 1+((h->cbp_table[mbn_xy] & 0x8000)||h->non_zero_count_cache[scan8[0]+2]);
  418. bS[3]= 1+((h->cbp_table[mbn_xy] & 0x8000)||h->non_zero_count_cache[scan8[0]+3]);
  419. }else{
  420. const uint8_t *mbn_nnz = h->non_zero_count[mbn_xy] + 3*4;
  421. int i;
  422. for( i = 0; i < 4; i++ ) {
  423. bS[i] = 1 + !!(h->non_zero_count_cache[scan8[0]+i] | mbn_nnz[i]);
  424. }
  425. }
  426. }
  427. // Do not use s->qscale as luma quantizer because it has not the same
  428. // value in IPCM macroblocks.
  429. qp = ( s->current_picture.qscale_table[mb_xy] + s->current_picture.qscale_table[mbn_xy] + 1 ) >> 1;
  430. tprintf(s->avctx, "filter mb:%d/%d dir:%d edge:%d, QPy:%d ls:%d uvls:%d", mb_x, mb_y, dir, edge, qp, tmp_linesize, tmp_uvlinesize);
  431. { int i; for (i = 0; i < 4; i++) tprintf(s->avctx, " bS[%d]:%d", i, bS[i]); tprintf(s->avctx, "\n"); }
  432. filter_mb_edgeh( &img_y[j*linesize], tmp_linesize, bS, qp, h );
  433. chroma_qp_avg[0] = (h->chroma_qp[0] + get_chroma_qp( h, 0, s->current_picture.qscale_table[mbn_xy] ) + 1 ) >> 1;
  434. chroma_qp_avg[1] = (h->chroma_qp[1] + get_chroma_qp( h, 1, s->current_picture.qscale_table[mbn_xy] ) + 1 ) >> 1;
  435. if (chroma) {
  436. if (chroma444) {
  437. filter_mb_edgeh (&img_cb[j*uvlinesize], tmp_uvlinesize, bS, chroma_qp_avg[0], h);
  438. filter_mb_edgeh (&img_cr[j*uvlinesize], tmp_uvlinesize, bS, chroma_qp_avg[1], h);
  439. } else {
  440. filter_mb_edgech(&img_cb[j*uvlinesize], tmp_uvlinesize, bS, chroma_qp_avg[0], h);
  441. filter_mb_edgech(&img_cr[j*uvlinesize], tmp_uvlinesize, bS, chroma_qp_avg[1], h);
  442. }
  443. }
  444. }
  445. }else{
  446. DECLARE_ALIGNED(8, int16_t, bS)[4];
  447. int qp;
  448. if( IS_INTRA(mb_type|mbm_type)) {
  449. AV_WN64A(bS, 0x0003000300030003ULL);
  450. if ( (!IS_INTERLACED(mb_type|mbm_type))
  451. || ((FRAME_MBAFF || (s->picture_structure != PICT_FRAME)) && (dir == 0))
  452. )
  453. AV_WN64A(bS, 0x0004000400040004ULL);
  454. } else {
  455. int i;
  456. int mv_done;
  457. if( dir && FRAME_MBAFF && IS_INTERLACED(mb_type ^ mbm_type)) {
  458. AV_WN64A(bS, 0x0001000100010001ULL);
  459. mv_done = 1;
  460. }
  461. else if( mask_par0 && ((mbm_type & (MB_TYPE_16x16 | (MB_TYPE_8x16 >> dir)))) ) {
  462. int b_idx= 8 + 4;
  463. int bn_idx= b_idx - (dir ? 8:1);
  464. bS[0] = bS[1] = bS[2] = bS[3] = check_mv(h, 8 + 4, bn_idx, mvy_limit);
  465. mv_done = 1;
  466. }
  467. else
  468. mv_done = 0;
  469. for( i = 0; i < 4; i++ ) {
  470. int x = dir == 0 ? 0 : i;
  471. int y = dir == 0 ? i : 0;
  472. int b_idx= 8 + 4 + x + 8*y;
  473. int bn_idx= b_idx - (dir ? 8:1);
  474. if( h->non_zero_count_cache[b_idx] |
  475. h->non_zero_count_cache[bn_idx] ) {
  476. bS[i] = 2;
  477. }
  478. else if(!mv_done)
  479. {
  480. bS[i] = check_mv(h, b_idx, bn_idx, mvy_limit);
  481. }
  482. }
  483. }
  484. /* Filter edge */
  485. // Do not use s->qscale as luma quantizer because it has not the same
  486. // value in IPCM macroblocks.
  487. if(bS[0]+bS[1]+bS[2]+bS[3]){
  488. qp = ( s->current_picture.qscale_table[mb_xy] + s->current_picture.qscale_table[mbm_xy] + 1 ) >> 1;
  489. //tprintf(s->avctx, "filter mb:%d/%d dir:%d edge:%d, QPy:%d, QPc:%d, QPcn:%d\n", mb_x, mb_y, dir, edge, qp, h->chroma_qp[0], s->current_picture.qscale_table[mbn_xy]);
  490. tprintf(s->avctx, "filter mb:%d/%d dir:%d edge:%d, QPy:%d ls:%d uvls:%d", mb_x, mb_y, dir, edge, qp, linesize, uvlinesize);
  491. //{ int i; for (i = 0; i < 4; i++) tprintf(s->avctx, " bS[%d]:%d", i, bS[i]); tprintf(s->avctx, "\n"); }
  492. chroma_qp_avg[0] = (h->chroma_qp[0] + get_chroma_qp( h, 0, s->current_picture.qscale_table[mbm_xy] ) + 1 ) >> 1;
  493. chroma_qp_avg[1] = (h->chroma_qp[1] + get_chroma_qp( h, 1, s->current_picture.qscale_table[mbm_xy] ) + 1 ) >> 1;
  494. if( dir == 0 ) {
  495. filter_mb_edgev( &img_y[0], linesize, bS, qp, h );
  496. if (chroma) {
  497. if (chroma444) {
  498. filter_mb_edgev ( &img_cb[0], uvlinesize, bS, chroma_qp_avg[0], h);
  499. filter_mb_edgev ( &img_cr[0], uvlinesize, bS, chroma_qp_avg[1], h);
  500. } else {
  501. filter_mb_edgecv( &img_cb[0], uvlinesize, bS, chroma_qp_avg[0], h);
  502. filter_mb_edgecv( &img_cr[0], uvlinesize, bS, chroma_qp_avg[1], h);
  503. }
  504. }
  505. } else {
  506. filter_mb_edgeh( &img_y[0], linesize, bS, qp, h );
  507. if (chroma) {
  508. if (chroma444) {
  509. filter_mb_edgeh ( &img_cb[0], uvlinesize, bS, chroma_qp_avg[0], h);
  510. filter_mb_edgeh ( &img_cr[0], uvlinesize, bS, chroma_qp_avg[1], h);
  511. } else {
  512. filter_mb_edgech( &img_cb[0], uvlinesize, bS, chroma_qp_avg[0], h);
  513. filter_mb_edgech( &img_cr[0], uvlinesize, bS, chroma_qp_avg[1], h);
  514. }
  515. }
  516. }
  517. }
  518. }
  519. }
  520. /* Calculate bS */
  521. for( edge = 1; edge < edges; edge++ ) {
  522. DECLARE_ALIGNED(8, int16_t, bS)[4];
  523. int qp;
  524. if( IS_8x8DCT(mb_type & (edge<<24)) ) // (edge&1) && IS_8x8DCT(mb_type)
  525. continue;
  526. if( IS_INTRA(mb_type)) {
  527. AV_WN64A(bS, 0x0003000300030003ULL);
  528. } else {
  529. int i;
  530. int mv_done;
  531. if( edge & mask_edge ) {
  532. AV_ZERO64(bS);
  533. mv_done = 1;
  534. }
  535. else if( mask_par0 ) {
  536. int b_idx= 8 + 4 + edge * (dir ? 8:1);
  537. int bn_idx= b_idx - (dir ? 8:1);
  538. bS[0] = bS[1] = bS[2] = bS[3] = check_mv(h, b_idx, bn_idx, mvy_limit);
  539. mv_done = 1;
  540. }
  541. else
  542. mv_done = 0;
  543. for( i = 0; i < 4; i++ ) {
  544. int x = dir == 0 ? edge : i;
  545. int y = dir == 0 ? i : edge;
  546. int b_idx= 8 + 4 + x + 8*y;
  547. int bn_idx= b_idx - (dir ? 8:1);
  548. if( h->non_zero_count_cache[b_idx] |
  549. h->non_zero_count_cache[bn_idx] ) {
  550. bS[i] = 2;
  551. }
  552. else if(!mv_done)
  553. {
  554. bS[i] = check_mv(h, b_idx, bn_idx, mvy_limit);
  555. }
  556. }
  557. if(bS[0]+bS[1]+bS[2]+bS[3] == 0)
  558. continue;
  559. }
  560. /* Filter edge */
  561. // Do not use s->qscale as luma quantizer because it has not the same
  562. // value in IPCM macroblocks.
  563. qp = s->current_picture.qscale_table[mb_xy];
  564. //tprintf(s->avctx, "filter mb:%d/%d dir:%d edge:%d, QPy:%d, QPc:%d, QPcn:%d\n", mb_x, mb_y, dir, edge, qp, h->chroma_qp[0], s->current_picture.qscale_table[mbn_xy]);
  565. tprintf(s->avctx, "filter mb:%d/%d dir:%d edge:%d, QPy:%d ls:%d uvls:%d", mb_x, mb_y, dir, edge, qp, linesize, uvlinesize);
  566. //{ int i; for (i = 0; i < 4; i++) tprintf(s->avctx, " bS[%d]:%d", i, bS[i]); tprintf(s->avctx, "\n"); }
  567. if( dir == 0 ) {
  568. filter_mb_edgev( &img_y[4*edge << h->pixel_shift], linesize, bS, qp, h );
  569. if (chroma) {
  570. if (chroma444) {
  571. filter_mb_edgev ( &img_cb[4*edge << h->pixel_shift], uvlinesize, bS, h->chroma_qp[0], h);
  572. filter_mb_edgev ( &img_cr[4*edge << h->pixel_shift], uvlinesize, bS, h->chroma_qp[1], h);
  573. } else if( (edge&1) == 0 ) {
  574. filter_mb_edgecv( &img_cb[2*edge << h->pixel_shift], uvlinesize, bS, h->chroma_qp[0], h);
  575. filter_mb_edgecv( &img_cr[2*edge << h->pixel_shift], uvlinesize, bS, h->chroma_qp[1], h);
  576. }
  577. }
  578. } else {
  579. filter_mb_edgeh( &img_y[4*edge*linesize], linesize, bS, qp, h );
  580. if (chroma) {
  581. if (chroma444) {
  582. filter_mb_edgeh ( &img_cb[4*edge*uvlinesize], uvlinesize, bS, h->chroma_qp[0], h);
  583. filter_mb_edgeh ( &img_cr[4*edge*uvlinesize], uvlinesize, bS, h->chroma_qp[1], h);
  584. } else if( (edge&1) == 0 ) {
  585. filter_mb_edgech( &img_cb[2*edge*uvlinesize], uvlinesize, bS, h->chroma_qp[0], h);
  586. filter_mb_edgech( &img_cr[2*edge*uvlinesize], uvlinesize, bS, h->chroma_qp[1], h);
  587. }
  588. }
  589. }
  590. }
  591. }
  592. void ff_h264_filter_mb( H264Context *h, int mb_x, int mb_y, uint8_t *img_y, uint8_t *img_cb, uint8_t *img_cr, unsigned int linesize, unsigned int uvlinesize) {
  593. MpegEncContext * const s = &h->s;
  594. const int mb_xy= mb_x + mb_y*s->mb_stride;
  595. const int mb_type = s->current_picture.mb_type[mb_xy];
  596. const int mvy_limit = IS_INTERLACED(mb_type) ? 2 : 4;
  597. int first_vertical_edge_done = 0;
  598. av_unused int dir;
  599. int chroma = !(CONFIG_GRAY && (s->flags&CODEC_FLAG_GRAY));
  600. if (FRAME_MBAFF
  601. // and current and left pair do not have the same interlaced type
  602. && IS_INTERLACED(mb_type^h->left_type[LTOP])
  603. // and left mb is in available to us
  604. && h->left_type[LTOP]) {
  605. /* First vertical edge is different in MBAFF frames
  606. * There are 8 different bS to compute and 2 different Qp
  607. */
  608. DECLARE_ALIGNED(8, int16_t, bS)[8];
  609. int qp[2];
  610. int bqp[2];
  611. int rqp[2];
  612. int mb_qp, mbn0_qp, mbn1_qp;
  613. int i;
  614. first_vertical_edge_done = 1;
  615. if( IS_INTRA(mb_type) ) {
  616. AV_WN64A(&bS[0], 0x0004000400040004ULL);
  617. AV_WN64A(&bS[4], 0x0004000400040004ULL);
  618. } else {
  619. static const uint8_t offset[2][2][8]={
  620. {
  621. {3+4*0, 3+4*0, 3+4*0, 3+4*0, 3+4*1, 3+4*1, 3+4*1, 3+4*1},
  622. {3+4*2, 3+4*2, 3+4*2, 3+4*2, 3+4*3, 3+4*3, 3+4*3, 3+4*3},
  623. },{
  624. {3+4*0, 3+4*1, 3+4*2, 3+4*3, 3+4*0, 3+4*1, 3+4*2, 3+4*3},
  625. {3+4*0, 3+4*1, 3+4*2, 3+4*3, 3+4*0, 3+4*1, 3+4*2, 3+4*3},
  626. }
  627. };
  628. const uint8_t *off= offset[MB_FIELD][mb_y&1];
  629. for( i = 0; i < 8; i++ ) {
  630. int j= MB_FIELD ? i>>2 : i&1;
  631. int mbn_xy = h->left_mb_xy[LEFT(j)];
  632. int mbn_type= h->left_type[LEFT(j)];
  633. if( IS_INTRA( mbn_type ) )
  634. bS[i] = 4;
  635. else{
  636. bS[i] = 1 + !!(h->non_zero_count_cache[12+8*(i>>1)] |
  637. ((!h->pps.cabac && IS_8x8DCT(mbn_type)) ?
  638. (h->cbp_table[mbn_xy] & (((MB_FIELD ? (i&2) : (mb_y&1)) ? 8 : 2) << 12))
  639. :
  640. h->non_zero_count[mbn_xy][ off[i] ]));
  641. }
  642. }
  643. }
  644. mb_qp = s->current_picture.qscale_table[mb_xy];
  645. mbn0_qp = s->current_picture.qscale_table[h->left_mb_xy[0]];
  646. mbn1_qp = s->current_picture.qscale_table[h->left_mb_xy[1]];
  647. qp[0] = ( mb_qp + mbn0_qp + 1 ) >> 1;
  648. bqp[0] = ( get_chroma_qp( h, 0, mb_qp ) +
  649. get_chroma_qp( h, 0, mbn0_qp ) + 1 ) >> 1;
  650. rqp[0] = ( get_chroma_qp( h, 1, mb_qp ) +
  651. get_chroma_qp( h, 1, mbn0_qp ) + 1 ) >> 1;
  652. qp[1] = ( mb_qp + mbn1_qp + 1 ) >> 1;
  653. bqp[1] = ( get_chroma_qp( h, 0, mb_qp ) +
  654. get_chroma_qp( h, 0, mbn1_qp ) + 1 ) >> 1;
  655. rqp[1] = ( get_chroma_qp( h, 1, mb_qp ) +
  656. get_chroma_qp( h, 1, mbn1_qp ) + 1 ) >> 1;
  657. /* Filter edge */
  658. tprintf(s->avctx, "filter mb:%d/%d MBAFF, QPy:%d/%d, QPb:%d/%d QPr:%d/%d ls:%d uvls:%d", mb_x, mb_y, qp[0], qp[1], bqp[0], bqp[1], rqp[0], rqp[1], linesize, uvlinesize);
  659. { int i; for (i = 0; i < 8; i++) tprintf(s->avctx, " bS[%d]:%d", i, bS[i]); tprintf(s->avctx, "\n"); }
  660. if(MB_FIELD){
  661. filter_mb_mbaff_edgev ( h, img_y , linesize, bS , 1, qp [0] );
  662. filter_mb_mbaff_edgev ( h, img_y + 8* linesize, linesize, bS+4, 1, qp [1] );
  663. if (chroma){
  664. if (CHROMA444) {
  665. filter_mb_mbaff_edgev ( h, img_cb, uvlinesize, bS , 1, bqp[0] );
  666. filter_mb_mbaff_edgev ( h, img_cb + 8*uvlinesize, uvlinesize, bS+4, 1, bqp[1] );
  667. filter_mb_mbaff_edgev ( h, img_cr, uvlinesize, bS , 1, rqp[0] );
  668. filter_mb_mbaff_edgev ( h, img_cr + 8*uvlinesize, uvlinesize, bS+4, 1, rqp[1] );
  669. }else{
  670. filter_mb_mbaff_edgecv( h, img_cb, uvlinesize, bS , 1, bqp[0] );
  671. filter_mb_mbaff_edgecv( h, img_cb + 4*uvlinesize, uvlinesize, bS+4, 1, bqp[1] );
  672. filter_mb_mbaff_edgecv( h, img_cr, uvlinesize, bS , 1, rqp[0] );
  673. filter_mb_mbaff_edgecv( h, img_cr + 4*uvlinesize, uvlinesize, bS+4, 1, rqp[1] );
  674. }
  675. }
  676. }else{
  677. filter_mb_mbaff_edgev ( h, img_y , 2* linesize, bS , 2, qp [0] );
  678. filter_mb_mbaff_edgev ( h, img_y + linesize, 2* linesize, bS+1, 2, qp [1] );
  679. if (chroma){
  680. if (CHROMA444) {
  681. filter_mb_mbaff_edgev ( h, img_cb, 2*uvlinesize, bS , 2, bqp[0] );
  682. filter_mb_mbaff_edgev ( h, img_cb + uvlinesize, 2*uvlinesize, bS+1, 2, bqp[1] );
  683. filter_mb_mbaff_edgev ( h, img_cr, 2*uvlinesize, bS , 2, rqp[0] );
  684. filter_mb_mbaff_edgev ( h, img_cr + uvlinesize, 2*uvlinesize, bS+1, 2, rqp[1] );
  685. }else{
  686. filter_mb_mbaff_edgecv( h, img_cb, 2*uvlinesize, bS , 2, bqp[0] );
  687. filter_mb_mbaff_edgecv( h, img_cb + uvlinesize, 2*uvlinesize, bS+1, 2, bqp[1] );
  688. filter_mb_mbaff_edgecv( h, img_cr, 2*uvlinesize, bS , 2, rqp[0] );
  689. filter_mb_mbaff_edgecv( h, img_cr + uvlinesize, 2*uvlinesize, bS+1, 2, rqp[1] );
  690. }
  691. }
  692. }
  693. }
  694. #if CONFIG_SMALL
  695. for( dir = 0; dir < 2; dir++ )
  696. filter_mb_dir(h, mb_x, mb_y, img_y, img_cb, img_cr, linesize, uvlinesize, mb_xy, mb_type, mvy_limit, dir ? 0 : first_vertical_edge_done, chroma, CHROMA444, dir);
  697. #else
  698. filter_mb_dir(h, mb_x, mb_y, img_y, img_cb, img_cr, linesize, uvlinesize, mb_xy, mb_type, mvy_limit, first_vertical_edge_done, chroma, CHROMA444, 0);
  699. filter_mb_dir(h, mb_x, mb_y, img_y, img_cb, img_cr, linesize, uvlinesize, mb_xy, mb_type, mvy_limit, 0, chroma, CHROMA444, 1);
  700. #endif
  701. }