You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

632 lines
26KB

  1. /*
  2. * H.26L/H.264/AVC/JVT/14496-10/... direct mb/block decoding
  3. * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. /**
  22. * @file
  23. * H.264 / AVC / MPEG4 part10 direct mb/block decoding.
  24. * @author Michael Niedermayer <michaelni@gmx.at>
  25. */
  26. #include "internal.h"
  27. #include "dsputil.h"
  28. #include "avcodec.h"
  29. #include "mpegvideo.h"
  30. #include "h264.h"
  31. #include "rectangle.h"
  32. #include "thread.h"
  33. //#undef NDEBUG
  34. #include <assert.h>
  35. static int get_scale_factor(H264Context * const h, int poc, int poc1, int i){
  36. int poc0 = h->ref_list[0][i].poc;
  37. int td = av_clip(poc1 - poc0, -128, 127);
  38. if(td == 0 || h->ref_list[0][i].long_ref){
  39. return 256;
  40. }else{
  41. int tb = av_clip(poc - poc0, -128, 127);
  42. int tx = (16384 + (FFABS(td) >> 1)) / td;
  43. return av_clip((tb*tx + 32) >> 6, -1024, 1023);
  44. }
  45. }
  46. void ff_h264_direct_dist_scale_factor(H264Context * const h){
  47. MpegEncContext * const s = &h->s;
  48. const int poc = h->s.current_picture_ptr->field_poc[ s->picture_structure == PICT_BOTTOM_FIELD ];
  49. const int poc1 = h->ref_list[1][0].poc;
  50. int i, field;
  51. for(field=0; field<2; field++){
  52. const int poc = h->s.current_picture_ptr->field_poc[field];
  53. const int poc1 = h->ref_list[1][0].field_poc[field];
  54. for(i=0; i < 2*h->ref_count[0]; i++)
  55. h->dist_scale_factor_field[field][i^field] = get_scale_factor(h, poc, poc1, i+16);
  56. }
  57. for(i=0; i<h->ref_count[0]; i++){
  58. h->dist_scale_factor[i] = get_scale_factor(h, poc, poc1, i);
  59. }
  60. }
  61. static void fill_colmap(H264Context *h, int map[2][16+32], int list, int field, int colfield, int mbafi){
  62. MpegEncContext * const s = &h->s;
  63. Picture * const ref1 = &h->ref_list[1][0];
  64. int j, old_ref, rfield;
  65. int start= mbafi ? 16 : 0;
  66. int end = mbafi ? 16+2*h->ref_count[0] : h->ref_count[0];
  67. int interl= mbafi || s->picture_structure != PICT_FRAME;
  68. /* bogus; fills in for missing frames */
  69. memset(map[list], 0, sizeof(map[list]));
  70. for(rfield=0; rfield<2; rfield++){
  71. for(old_ref=0; old_ref<ref1->ref_count[colfield][list]; old_ref++){
  72. int poc = ref1->ref_poc[colfield][list][old_ref];
  73. if (!interl)
  74. poc |= 3;
  75. else if( interl && (poc&3) == 3) //FIXME store all MBAFF references so this isnt needed
  76. poc= (poc&~3) + rfield + 1;
  77. for(j=start; j<end; j++){
  78. if (4 * h->ref_list[0][j].frame_num + (h->ref_list[0][j].f.reference & 3) == poc) {
  79. int cur_ref= mbafi ? (j-16)^field : j;
  80. if(ref1->mbaff)
  81. map[list][2*old_ref + (rfield^field) + 16] = cur_ref;
  82. if(rfield == field || !interl)
  83. map[list][old_ref] = cur_ref;
  84. break;
  85. }
  86. }
  87. }
  88. }
  89. }
  90. void ff_h264_direct_ref_list_init(H264Context * const h){
  91. MpegEncContext * const s = &h->s;
  92. Picture * const ref1 = &h->ref_list[1][0];
  93. Picture * const cur = s->current_picture_ptr;
  94. int list, j, field;
  95. int sidx= (s->picture_structure&1)^1;
  96. int ref1sidx = (ref1->f.reference&1)^1;
  97. for(list=0; list<2; list++){
  98. cur->ref_count[sidx][list] = h->ref_count[list];
  99. for(j=0; j<h->ref_count[list]; j++)
  100. cur->ref_poc[sidx][list][j] = 4 * h->ref_list[list][j].frame_num + (h->ref_list[list][j].f.reference & 3);
  101. }
  102. if(s->picture_structure == PICT_FRAME){
  103. memcpy(cur->ref_count[1], cur->ref_count[0], sizeof(cur->ref_count[0]));
  104. memcpy(cur->ref_poc [1], cur->ref_poc [0], sizeof(cur->ref_poc [0]));
  105. }
  106. cur->mbaff= FRAME_MBAFF;
  107. h->col_fieldoff= 0;
  108. if(s->picture_structure == PICT_FRAME){
  109. int cur_poc = s->current_picture_ptr->poc;
  110. int *col_poc = h->ref_list[1]->field_poc;
  111. h->col_parity= (FFABS(col_poc[0] - cur_poc) >= FFABS(col_poc[1] - cur_poc));
  112. ref1sidx=sidx= h->col_parity;
  113. } else if (!(s->picture_structure & h->ref_list[1][0].f.reference) && !h->ref_list[1][0].mbaff) { // FL -> FL & differ parity
  114. h->col_fieldoff = 2 * h->ref_list[1][0].f.reference - 3;
  115. }
  116. if (cur->f.pict_type != AV_PICTURE_TYPE_B || h->direct_spatial_mv_pred)
  117. return;
  118. for(list=0; list<2; list++){
  119. fill_colmap(h, h->map_col_to_list0, list, sidx, ref1sidx, 0);
  120. if(FRAME_MBAFF)
  121. for(field=0; field<2; field++)
  122. fill_colmap(h, h->map_col_to_list0_field[field], list, field, field, 1);
  123. }
  124. }
  125. static void await_reference_mb_row(H264Context * const h, Picture *ref, int mb_y)
  126. {
  127. int ref_field = ref->f.reference - 1;
  128. int ref_field_picture = ref->field_picture;
  129. int ref_height = 16*h->s.mb_height >> ref_field_picture;
  130. if(!HAVE_THREADS || !(h->s.avctx->active_thread_type&FF_THREAD_FRAME))
  131. return;
  132. //FIXME it can be safe to access mb stuff
  133. //even if pixels aren't deblocked yet
  134. ff_thread_await_progress(&ref->f,
  135. FFMIN(16 * mb_y >> ref_field_picture, ref_height - 1),
  136. ref_field_picture && ref_field);
  137. }
  138. static void pred_spatial_direct_motion(H264Context * const h, int *mb_type){
  139. MpegEncContext * const s = &h->s;
  140. int b8_stride = 2;
  141. int b4_stride = h->b_stride;
  142. int mb_xy = h->mb_xy, mb_y = s->mb_y;
  143. int mb_type_col[2];
  144. const int16_t (*l1mv0)[2], (*l1mv1)[2];
  145. const int8_t *l1ref0, *l1ref1;
  146. const int is_b8x8 = IS_8X8(*mb_type);
  147. unsigned int sub_mb_type= MB_TYPE_L0L1;
  148. int i8, i4;
  149. int ref[2];
  150. int mv[2];
  151. int list;
  152. assert(h->ref_list[1][0].f.reference & 3);
  153. await_reference_mb_row(h, &h->ref_list[1][0], s->mb_y + !!IS_INTERLACED(*mb_type));
  154. #define MB_TYPE_16x16_OR_INTRA (MB_TYPE_16x16|MB_TYPE_INTRA4x4|MB_TYPE_INTRA16x16|MB_TYPE_INTRA_PCM)
  155. /* ref = min(neighbors) */
  156. for(list=0; list<2; list++){
  157. int left_ref = h->ref_cache[list][scan8[0] - 1];
  158. int top_ref = h->ref_cache[list][scan8[0] - 8];
  159. int refc = h->ref_cache[list][scan8[0] - 8 + 4];
  160. const int16_t *C= h->mv_cache[list][ scan8[0] - 8 + 4];
  161. if(refc == PART_NOT_AVAILABLE){
  162. refc = h->ref_cache[list][scan8[0] - 8 - 1];
  163. C = h-> mv_cache[list][scan8[0] - 8 - 1];
  164. }
  165. ref[list] = FFMIN3((unsigned)left_ref, (unsigned)top_ref, (unsigned)refc);
  166. if(ref[list] >= 0){
  167. //this is just pred_motion() but with the cases removed that cannot happen for direct blocks
  168. const int16_t * const A= h->mv_cache[list][ scan8[0] - 1 ];
  169. const int16_t * const B= h->mv_cache[list][ scan8[0] - 8 ];
  170. int match_count= (left_ref==ref[list]) + (top_ref==ref[list]) + (refc==ref[list]);
  171. if(match_count > 1){ //most common
  172. mv[list]= pack16to32(mid_pred(A[0], B[0], C[0]),
  173. mid_pred(A[1], B[1], C[1]) );
  174. }else {
  175. assert(match_count==1);
  176. if(left_ref==ref[list]){
  177. mv[list]= AV_RN32A(A);
  178. }else if(top_ref==ref[list]){
  179. mv[list]= AV_RN32A(B);
  180. }else{
  181. mv[list]= AV_RN32A(C);
  182. }
  183. }
  184. }else{
  185. int mask= ~(MB_TYPE_L0 << (2*list));
  186. mv[list] = 0;
  187. ref[list] = -1;
  188. if(!is_b8x8)
  189. *mb_type &= mask;
  190. sub_mb_type &= mask;
  191. }
  192. }
  193. if(ref[0] < 0 && ref[1] < 0){
  194. ref[0] = ref[1] = 0;
  195. if(!is_b8x8)
  196. *mb_type |= MB_TYPE_L0L1;
  197. sub_mb_type |= MB_TYPE_L0L1;
  198. }
  199. if(!(is_b8x8|mv[0]|mv[1])){
  200. fill_rectangle(&h->ref_cache[0][scan8[0]], 4, 4, 8, (uint8_t)ref[0], 1);
  201. fill_rectangle(&h->ref_cache[1][scan8[0]], 4, 4, 8, (uint8_t)ref[1], 1);
  202. fill_rectangle(&h->mv_cache[0][scan8[0]], 4, 4, 8, 0, 4);
  203. fill_rectangle(&h->mv_cache[1][scan8[0]], 4, 4, 8, 0, 4);
  204. *mb_type= (*mb_type & ~(MB_TYPE_8x8|MB_TYPE_16x8|MB_TYPE_8x16|MB_TYPE_P1L0|MB_TYPE_P1L1))|MB_TYPE_16x16|MB_TYPE_DIRECT2;
  205. return;
  206. }
  207. if (IS_INTERLACED(h->ref_list[1][0].f.mb_type[mb_xy])) { // AFL/AFR/FR/FL -> AFL/FL
  208. if (!IS_INTERLACED(*mb_type)) { // AFR/FR -> AFL/FL
  209. mb_y = (s->mb_y&~1) + h->col_parity;
  210. mb_xy= s->mb_x + ((s->mb_y&~1) + h->col_parity)*s->mb_stride;
  211. b8_stride = 0;
  212. }else{
  213. mb_y += h->col_fieldoff;
  214. mb_xy += s->mb_stride*h->col_fieldoff; // non zero for FL -> FL & differ parity
  215. }
  216. goto single_col;
  217. }else{ // AFL/AFR/FR/FL -> AFR/FR
  218. if(IS_INTERLACED(*mb_type)){ // AFL /FL -> AFR/FR
  219. mb_y = s->mb_y&~1;
  220. mb_xy= s->mb_x + (s->mb_y&~1)*s->mb_stride;
  221. mb_type_col[0] = h->ref_list[1][0].f.mb_type[mb_xy];
  222. mb_type_col[1] = h->ref_list[1][0].f.mb_type[mb_xy + s->mb_stride];
  223. b8_stride = 2+4*s->mb_stride;
  224. b4_stride *= 6;
  225. if (IS_INTERLACED(mb_type_col[0]) != IS_INTERLACED(mb_type_col[1])) {
  226. mb_type_col[0] &= ~MB_TYPE_INTERLACED;
  227. mb_type_col[1] &= ~MB_TYPE_INTERLACED;
  228. }
  229. sub_mb_type |= MB_TYPE_16x16|MB_TYPE_DIRECT2; /* B_SUB_8x8 */
  230. if( (mb_type_col[0] & MB_TYPE_16x16_OR_INTRA)
  231. && (mb_type_col[1] & MB_TYPE_16x16_OR_INTRA)
  232. && !is_b8x8){
  233. *mb_type |= MB_TYPE_16x8 |MB_TYPE_DIRECT2; /* B_16x8 */
  234. }else{
  235. *mb_type |= MB_TYPE_8x8;
  236. }
  237. }else{ // AFR/FR -> AFR/FR
  238. single_col:
  239. mb_type_col[0] =
  240. mb_type_col[1] = h->ref_list[1][0].f.mb_type[mb_xy];
  241. sub_mb_type |= MB_TYPE_16x16|MB_TYPE_DIRECT2; /* B_SUB_8x8 */
  242. if(!is_b8x8 && (mb_type_col[0] & MB_TYPE_16x16_OR_INTRA)){
  243. *mb_type |= MB_TYPE_16x16|MB_TYPE_DIRECT2; /* B_16x16 */
  244. }else if(!is_b8x8 && (mb_type_col[0] & (MB_TYPE_16x8|MB_TYPE_8x16))){
  245. *mb_type |= MB_TYPE_DIRECT2 | (mb_type_col[0] & (MB_TYPE_16x8|MB_TYPE_8x16));
  246. }else{
  247. if(!h->sps.direct_8x8_inference_flag){
  248. /* FIXME save sub mb types from previous frames (or derive from MVs)
  249. * so we know exactly what block size to use */
  250. sub_mb_type += (MB_TYPE_8x8-MB_TYPE_16x16); /* B_SUB_4x4 */
  251. }
  252. *mb_type |= MB_TYPE_8x8;
  253. }
  254. }
  255. }
  256. await_reference_mb_row(h, &h->ref_list[1][0], mb_y);
  257. l1mv0 = &h->ref_list[1][0].f.motion_val[0][h->mb2b_xy [mb_xy]];
  258. l1mv1 = &h->ref_list[1][0].f.motion_val[1][h->mb2b_xy [mb_xy]];
  259. l1ref0 = &h->ref_list[1][0].f.ref_index [0][4 * mb_xy];
  260. l1ref1 = &h->ref_list[1][0].f.ref_index [1][4 * mb_xy];
  261. if(!b8_stride){
  262. if(s->mb_y&1){
  263. l1ref0 += 2;
  264. l1ref1 += 2;
  265. l1mv0 += 2*b4_stride;
  266. l1mv1 += 2*b4_stride;
  267. }
  268. }
  269. if(IS_INTERLACED(*mb_type) != IS_INTERLACED(mb_type_col[0])){
  270. int n=0;
  271. for(i8=0; i8<4; i8++){
  272. int x8 = i8&1;
  273. int y8 = i8>>1;
  274. int xy8 = x8+y8*b8_stride;
  275. int xy4 = 3*x8+y8*b4_stride;
  276. int a,b;
  277. if(is_b8x8 && !IS_DIRECT(h->sub_mb_type[i8]))
  278. continue;
  279. h->sub_mb_type[i8] = sub_mb_type;
  280. fill_rectangle(&h->ref_cache[0][scan8[i8*4]], 2, 2, 8, (uint8_t)ref[0], 1);
  281. fill_rectangle(&h->ref_cache[1][scan8[i8*4]], 2, 2, 8, (uint8_t)ref[1], 1);
  282. if(!IS_INTRA(mb_type_col[y8]) && !h->ref_list[1][0].long_ref
  283. && ( (l1ref0[xy8] == 0 && FFABS(l1mv0[xy4][0]) <= 1 && FFABS(l1mv0[xy4][1]) <= 1)
  284. || (l1ref0[xy8] < 0 && l1ref1[xy8] == 0 && FFABS(l1mv1[xy4][0]) <= 1 && FFABS(l1mv1[xy4][1]) <= 1))){
  285. a=b=0;
  286. if(ref[0] > 0)
  287. a= mv[0];
  288. if(ref[1] > 0)
  289. b= mv[1];
  290. n++;
  291. }else{
  292. a= mv[0];
  293. b= mv[1];
  294. }
  295. fill_rectangle(&h->mv_cache[0][scan8[i8*4]], 2, 2, 8, a, 4);
  296. fill_rectangle(&h->mv_cache[1][scan8[i8*4]], 2, 2, 8, b, 4);
  297. }
  298. if(!is_b8x8 && !(n&3))
  299. *mb_type= (*mb_type & ~(MB_TYPE_8x8|MB_TYPE_16x8|MB_TYPE_8x16|MB_TYPE_P1L0|MB_TYPE_P1L1))|MB_TYPE_16x16|MB_TYPE_DIRECT2;
  300. }else if(IS_16X16(*mb_type)){
  301. int a,b;
  302. fill_rectangle(&h->ref_cache[0][scan8[0]], 4, 4, 8, (uint8_t)ref[0], 1);
  303. fill_rectangle(&h->ref_cache[1][scan8[0]], 4, 4, 8, (uint8_t)ref[1], 1);
  304. if(!IS_INTRA(mb_type_col[0]) && !h->ref_list[1][0].long_ref
  305. && ( (l1ref0[0] == 0 && FFABS(l1mv0[0][0]) <= 1 && FFABS(l1mv0[0][1]) <= 1)
  306. || (l1ref0[0] < 0 && l1ref1[0] == 0 && FFABS(l1mv1[0][0]) <= 1 && FFABS(l1mv1[0][1]) <= 1
  307. && h->x264_build>33U))){
  308. a=b=0;
  309. if(ref[0] > 0)
  310. a= mv[0];
  311. if(ref[1] > 0)
  312. b= mv[1];
  313. }else{
  314. a= mv[0];
  315. b= mv[1];
  316. }
  317. fill_rectangle(&h->mv_cache[0][scan8[0]], 4, 4, 8, a, 4);
  318. fill_rectangle(&h->mv_cache[1][scan8[0]], 4, 4, 8, b, 4);
  319. }else{
  320. int n=0;
  321. for(i8=0; i8<4; i8++){
  322. const int x8 = i8&1;
  323. const int y8 = i8>>1;
  324. if(is_b8x8 && !IS_DIRECT(h->sub_mb_type[i8]))
  325. continue;
  326. h->sub_mb_type[i8] = sub_mb_type;
  327. fill_rectangle(&h->mv_cache[0][scan8[i8*4]], 2, 2, 8, mv[0], 4);
  328. fill_rectangle(&h->mv_cache[1][scan8[i8*4]], 2, 2, 8, mv[1], 4);
  329. fill_rectangle(&h->ref_cache[0][scan8[i8*4]], 2, 2, 8, (uint8_t)ref[0], 1);
  330. fill_rectangle(&h->ref_cache[1][scan8[i8*4]], 2, 2, 8, (uint8_t)ref[1], 1);
  331. assert(b8_stride==2);
  332. /* col_zero_flag */
  333. if(!IS_INTRA(mb_type_col[0]) && !h->ref_list[1][0].long_ref && ( l1ref0[i8] == 0
  334. || (l1ref0[i8] < 0 && l1ref1[i8] == 0
  335. && h->x264_build>33U))){
  336. const int16_t (*l1mv)[2]= l1ref0[i8] == 0 ? l1mv0 : l1mv1;
  337. if(IS_SUB_8X8(sub_mb_type)){
  338. const int16_t *mv_col = l1mv[x8*3 + y8*3*b4_stride];
  339. if(FFABS(mv_col[0]) <= 1 && FFABS(mv_col[1]) <= 1){
  340. if(ref[0] == 0)
  341. fill_rectangle(&h->mv_cache[0][scan8[i8*4]], 2, 2, 8, 0, 4);
  342. if(ref[1] == 0)
  343. fill_rectangle(&h->mv_cache[1][scan8[i8*4]], 2, 2, 8, 0, 4);
  344. n+=4;
  345. }
  346. }else{
  347. int m=0;
  348. for(i4=0; i4<4; i4++){
  349. const int16_t *mv_col = l1mv[x8*2 + (i4&1) + (y8*2 + (i4>>1))*b4_stride];
  350. if(FFABS(mv_col[0]) <= 1 && FFABS(mv_col[1]) <= 1){
  351. if(ref[0] == 0)
  352. AV_ZERO32(h->mv_cache[0][scan8[i8*4+i4]]);
  353. if(ref[1] == 0)
  354. AV_ZERO32(h->mv_cache[1][scan8[i8*4+i4]]);
  355. m++;
  356. }
  357. }
  358. if(!(m&3))
  359. h->sub_mb_type[i8]+= MB_TYPE_16x16 - MB_TYPE_8x8;
  360. n+=m;
  361. }
  362. }
  363. }
  364. if(!is_b8x8 && !(n&15))
  365. *mb_type= (*mb_type & ~(MB_TYPE_8x8|MB_TYPE_16x8|MB_TYPE_8x16|MB_TYPE_P1L0|MB_TYPE_P1L1))|MB_TYPE_16x16|MB_TYPE_DIRECT2;
  366. }
  367. }
  368. static void pred_temp_direct_motion(H264Context * const h, int *mb_type){
  369. MpegEncContext * const s = &h->s;
  370. int b8_stride = 2;
  371. int b4_stride = h->b_stride;
  372. int mb_xy = h->mb_xy, mb_y = s->mb_y;
  373. int mb_type_col[2];
  374. const int16_t (*l1mv0)[2], (*l1mv1)[2];
  375. const int8_t *l1ref0, *l1ref1;
  376. const int is_b8x8 = IS_8X8(*mb_type);
  377. unsigned int sub_mb_type;
  378. int i8, i4;
  379. assert(h->ref_list[1][0].f.reference & 3);
  380. await_reference_mb_row(h, &h->ref_list[1][0], s->mb_y + !!IS_INTERLACED(*mb_type));
  381. if (IS_INTERLACED(h->ref_list[1][0].f.mb_type[mb_xy])) { // AFL/AFR/FR/FL -> AFL/FL
  382. if (!IS_INTERLACED(*mb_type)) { // AFR/FR -> AFL/FL
  383. mb_y = (s->mb_y&~1) + h->col_parity;
  384. mb_xy= s->mb_x + ((s->mb_y&~1) + h->col_parity)*s->mb_stride;
  385. b8_stride = 0;
  386. }else{
  387. mb_y += h->col_fieldoff;
  388. mb_xy += s->mb_stride*h->col_fieldoff; // non zero for FL -> FL & differ parity
  389. }
  390. goto single_col;
  391. }else{ // AFL/AFR/FR/FL -> AFR/FR
  392. if(IS_INTERLACED(*mb_type)){ // AFL /FL -> AFR/FR
  393. mb_y = s->mb_y&~1;
  394. mb_xy= s->mb_x + (s->mb_y&~1)*s->mb_stride;
  395. mb_type_col[0] = h->ref_list[1][0].f.mb_type[mb_xy];
  396. mb_type_col[1] = h->ref_list[1][0].f.mb_type[mb_xy + s->mb_stride];
  397. b8_stride = 2+4*s->mb_stride;
  398. b4_stride *= 6;
  399. if (IS_INTERLACED(mb_type_col[0]) != IS_INTERLACED(mb_type_col[1])) {
  400. mb_type_col[0] &= ~MB_TYPE_INTERLACED;
  401. mb_type_col[1] &= ~MB_TYPE_INTERLACED;
  402. }
  403. sub_mb_type = MB_TYPE_16x16|MB_TYPE_P0L0|MB_TYPE_P0L1|MB_TYPE_DIRECT2; /* B_SUB_8x8 */
  404. if( (mb_type_col[0] & MB_TYPE_16x16_OR_INTRA)
  405. && (mb_type_col[1] & MB_TYPE_16x16_OR_INTRA)
  406. && !is_b8x8){
  407. *mb_type |= MB_TYPE_16x8 |MB_TYPE_L0L1|MB_TYPE_DIRECT2; /* B_16x8 */
  408. }else{
  409. *mb_type |= MB_TYPE_8x8|MB_TYPE_L0L1;
  410. }
  411. }else{ // AFR/FR -> AFR/FR
  412. single_col:
  413. mb_type_col[0] =
  414. mb_type_col[1] = h->ref_list[1][0].f.mb_type[mb_xy];
  415. sub_mb_type = MB_TYPE_16x16|MB_TYPE_P0L0|MB_TYPE_P0L1|MB_TYPE_DIRECT2; /* B_SUB_8x8 */
  416. if(!is_b8x8 && (mb_type_col[0] & MB_TYPE_16x16_OR_INTRA)){
  417. *mb_type |= MB_TYPE_16x16|MB_TYPE_P0L0|MB_TYPE_P0L1|MB_TYPE_DIRECT2; /* B_16x16 */
  418. }else if(!is_b8x8 && (mb_type_col[0] & (MB_TYPE_16x8|MB_TYPE_8x16))){
  419. *mb_type |= MB_TYPE_L0L1|MB_TYPE_DIRECT2 | (mb_type_col[0] & (MB_TYPE_16x8|MB_TYPE_8x16));
  420. }else{
  421. if(!h->sps.direct_8x8_inference_flag){
  422. /* FIXME save sub mb types from previous frames (or derive from MVs)
  423. * so we know exactly what block size to use */
  424. sub_mb_type = MB_TYPE_8x8|MB_TYPE_P0L0|MB_TYPE_P0L1|MB_TYPE_DIRECT2; /* B_SUB_4x4 */
  425. }
  426. *mb_type |= MB_TYPE_8x8|MB_TYPE_L0L1;
  427. }
  428. }
  429. }
  430. await_reference_mb_row(h, &h->ref_list[1][0], mb_y);
  431. l1mv0 = &h->ref_list[1][0].f.motion_val[0][h->mb2b_xy [mb_xy]];
  432. l1mv1 = &h->ref_list[1][0].f.motion_val[1][h->mb2b_xy [mb_xy]];
  433. l1ref0 = &h->ref_list[1][0].f.ref_index [0][4 * mb_xy];
  434. l1ref1 = &h->ref_list[1][0].f.ref_index [1][4 * mb_xy];
  435. if(!b8_stride){
  436. if(s->mb_y&1){
  437. l1ref0 += 2;
  438. l1ref1 += 2;
  439. l1mv0 += 2*b4_stride;
  440. l1mv1 += 2*b4_stride;
  441. }
  442. }
  443. {
  444. const int *map_col_to_list0[2] = {h->map_col_to_list0[0], h->map_col_to_list0[1]};
  445. const int *dist_scale_factor = h->dist_scale_factor;
  446. int ref_offset;
  447. if(FRAME_MBAFF && IS_INTERLACED(*mb_type)){
  448. map_col_to_list0[0] = h->map_col_to_list0_field[s->mb_y&1][0];
  449. map_col_to_list0[1] = h->map_col_to_list0_field[s->mb_y&1][1];
  450. dist_scale_factor =h->dist_scale_factor_field[s->mb_y&1];
  451. }
  452. ref_offset = (h->ref_list[1][0].mbaff<<4) & (mb_type_col[0]>>3); //if(h->ref_list[1][0].mbaff && IS_INTERLACED(mb_type_col[0])) ref_offset=16 else 0
  453. if(IS_INTERLACED(*mb_type) != IS_INTERLACED(mb_type_col[0])){
  454. int y_shift = 2*!IS_INTERLACED(*mb_type);
  455. assert(h->sps.direct_8x8_inference_flag);
  456. for(i8=0; i8<4; i8++){
  457. const int x8 = i8&1;
  458. const int y8 = i8>>1;
  459. int ref0, scale;
  460. const int16_t (*l1mv)[2]= l1mv0;
  461. if(is_b8x8 && !IS_DIRECT(h->sub_mb_type[i8]))
  462. continue;
  463. h->sub_mb_type[i8] = sub_mb_type;
  464. fill_rectangle(&h->ref_cache[1][scan8[i8*4]], 2, 2, 8, 0, 1);
  465. if(IS_INTRA(mb_type_col[y8])){
  466. fill_rectangle(&h->ref_cache[0][scan8[i8*4]], 2, 2, 8, 0, 1);
  467. fill_rectangle(&h-> mv_cache[0][scan8[i8*4]], 2, 2, 8, 0, 4);
  468. fill_rectangle(&h-> mv_cache[1][scan8[i8*4]], 2, 2, 8, 0, 4);
  469. continue;
  470. }
  471. ref0 = l1ref0[x8 + y8*b8_stride];
  472. if(ref0 >= 0)
  473. ref0 = map_col_to_list0[0][ref0 + ref_offset];
  474. else{
  475. ref0 = map_col_to_list0[1][l1ref1[x8 + y8*b8_stride] + ref_offset];
  476. l1mv= l1mv1;
  477. }
  478. scale = dist_scale_factor[ref0];
  479. fill_rectangle(&h->ref_cache[0][scan8[i8*4]], 2, 2, 8, ref0, 1);
  480. {
  481. const int16_t *mv_col = l1mv[x8*3 + y8*b4_stride];
  482. int my_col = (mv_col[1]<<y_shift)/2;
  483. int mx = (scale * mv_col[0] + 128) >> 8;
  484. int my = (scale * my_col + 128) >> 8;
  485. fill_rectangle(&h->mv_cache[0][scan8[i8*4]], 2, 2, 8, pack16to32(mx,my), 4);
  486. fill_rectangle(&h->mv_cache[1][scan8[i8*4]], 2, 2, 8, pack16to32(mx-mv_col[0],my-my_col), 4);
  487. }
  488. }
  489. return;
  490. }
  491. /* one-to-one mv scaling */
  492. if(IS_16X16(*mb_type)){
  493. int ref, mv0, mv1;
  494. fill_rectangle(&h->ref_cache[1][scan8[0]], 4, 4, 8, 0, 1);
  495. if(IS_INTRA(mb_type_col[0])){
  496. ref=mv0=mv1=0;
  497. }else{
  498. const int ref0 = l1ref0[0] >= 0 ? map_col_to_list0[0][l1ref0[0] + ref_offset]
  499. : map_col_to_list0[1][l1ref1[0] + ref_offset];
  500. const int scale = dist_scale_factor[ref0];
  501. const int16_t *mv_col = l1ref0[0] >= 0 ? l1mv0[0] : l1mv1[0];
  502. int mv_l0[2];
  503. mv_l0[0] = (scale * mv_col[0] + 128) >> 8;
  504. mv_l0[1] = (scale * mv_col[1] + 128) >> 8;
  505. ref= ref0;
  506. mv0= pack16to32(mv_l0[0],mv_l0[1]);
  507. mv1= pack16to32(mv_l0[0]-mv_col[0],mv_l0[1]-mv_col[1]);
  508. }
  509. fill_rectangle(&h->ref_cache[0][scan8[0]], 4, 4, 8, ref, 1);
  510. fill_rectangle(&h-> mv_cache[0][scan8[0]], 4, 4, 8, mv0, 4);
  511. fill_rectangle(&h-> mv_cache[1][scan8[0]], 4, 4, 8, mv1, 4);
  512. }else{
  513. for(i8=0; i8<4; i8++){
  514. const int x8 = i8&1;
  515. const int y8 = i8>>1;
  516. int ref0, scale;
  517. const int16_t (*l1mv)[2]= l1mv0;
  518. if(is_b8x8 && !IS_DIRECT(h->sub_mb_type[i8]))
  519. continue;
  520. h->sub_mb_type[i8] = sub_mb_type;
  521. fill_rectangle(&h->ref_cache[1][scan8[i8*4]], 2, 2, 8, 0, 1);
  522. if(IS_INTRA(mb_type_col[0])){
  523. fill_rectangle(&h->ref_cache[0][scan8[i8*4]], 2, 2, 8, 0, 1);
  524. fill_rectangle(&h-> mv_cache[0][scan8[i8*4]], 2, 2, 8, 0, 4);
  525. fill_rectangle(&h-> mv_cache[1][scan8[i8*4]], 2, 2, 8, 0, 4);
  526. continue;
  527. }
  528. assert(b8_stride == 2);
  529. ref0 = l1ref0[i8];
  530. if(ref0 >= 0)
  531. ref0 = map_col_to_list0[0][ref0 + ref_offset];
  532. else{
  533. ref0 = map_col_to_list0[1][l1ref1[i8] + ref_offset];
  534. l1mv= l1mv1;
  535. }
  536. scale = dist_scale_factor[ref0];
  537. fill_rectangle(&h->ref_cache[0][scan8[i8*4]], 2, 2, 8, ref0, 1);
  538. if(IS_SUB_8X8(sub_mb_type)){
  539. const int16_t *mv_col = l1mv[x8*3 + y8*3*b4_stride];
  540. int mx = (scale * mv_col[0] + 128) >> 8;
  541. int my = (scale * mv_col[1] + 128) >> 8;
  542. fill_rectangle(&h->mv_cache[0][scan8[i8*4]], 2, 2, 8, pack16to32(mx,my), 4);
  543. fill_rectangle(&h->mv_cache[1][scan8[i8*4]], 2, 2, 8, pack16to32(mx-mv_col[0],my-mv_col[1]), 4);
  544. }else
  545. for(i4=0; i4<4; i4++){
  546. const int16_t *mv_col = l1mv[x8*2 + (i4&1) + (y8*2 + (i4>>1))*b4_stride];
  547. int16_t *mv_l0 = h->mv_cache[0][scan8[i8*4+i4]];
  548. mv_l0[0] = (scale * mv_col[0] + 128) >> 8;
  549. mv_l0[1] = (scale * mv_col[1] + 128) >> 8;
  550. AV_WN32A(h->mv_cache[1][scan8[i8*4+i4]],
  551. pack16to32(mv_l0[0]-mv_col[0],mv_l0[1]-mv_col[1]));
  552. }
  553. }
  554. }
  555. }
  556. }
  557. void ff_h264_pred_direct_motion(H264Context * const h, int *mb_type){
  558. if(h->direct_spatial_mv_pred){
  559. pred_spatial_direct_motion(h, mb_type);
  560. }else{
  561. pred_temp_direct_motion(h, mb_type);
  562. }
  563. }