You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

768 lines
26KB

  1. /*
  2. * HEVC video decoder
  3. *
  4. * Copyright (C) 2012 - 2013 Guillaume Martres
  5. * Copyright (C) 2013 Anand Meher Kotra
  6. *
  7. * This file is part of FFmpeg.
  8. *
  9. * FFmpeg is free software; you can redistribute it and/or
  10. * modify it under the terms of the GNU Lesser General Public
  11. * License as published by the Free Software Foundation; either
  12. * version 2.1 of the License, or (at your option) any later version.
  13. *
  14. * FFmpeg is distributed in the hope that it will be useful,
  15. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  16. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  17. * Lesser General Public License for more details.
  18. *
  19. * You should have received a copy of the GNU Lesser General Public
  20. * License along with FFmpeg; if not, write to the Free Software
  21. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  22. */
  23. #include "hevc.h"
  24. static const uint8_t l0_l1_cand_idx[12][2] = {
  25. { 0, 1, },
  26. { 1, 0, },
  27. { 0, 2, },
  28. { 2, 0, },
  29. { 1, 2, },
  30. { 2, 1, },
  31. { 0, 3, },
  32. { 3, 0, },
  33. { 1, 3, },
  34. { 3, 1, },
  35. { 2, 3, },
  36. { 3, 2, },
  37. };
  38. void ff_hevc_set_neighbour_available(HEVCContext *s, int x0, int y0,
  39. int nPbW, int nPbH)
  40. {
  41. HEVCLocalContext *lc = s->HEVClc;
  42. int x0b = x0 & ((1 << s->sps->log2_ctb_size) - 1);
  43. int y0b = y0 & ((1 << s->sps->log2_ctb_size) - 1);
  44. lc->na.cand_up = (lc->ctb_up_flag || y0b);
  45. lc->na.cand_left = (lc->ctb_left_flag || x0b);
  46. lc->na.cand_up_left = (!x0b && !y0b) ? lc->ctb_up_left_flag : lc->na.cand_left && lc->na.cand_up;
  47. lc->na.cand_up_right_sap =
  48. ((x0b + nPbW) == (1 << s->sps->log2_ctb_size)) ?
  49. lc->ctb_up_right_flag && !y0b : lc->na.cand_up;
  50. lc->na.cand_up_right =
  51. lc->na.cand_up_right_sap
  52. && (x0 + nPbW) < lc->end_of_tiles_x;
  53. lc->na.cand_bottom_left = ((y0 + nPbH) >= lc->end_of_tiles_y) ? 0 : lc->na.cand_left;
  54. }
  55. /*
  56. * 6.4.1 Derivation process for z-scan order block availability
  57. */
  58. static av_always_inline int z_scan_block_avail(HEVCContext *s, int xCurr, int yCurr,
  59. int xN, int yN)
  60. {
  61. #define MIN_TB_ADDR_ZS(x, y) \
  62. s->pps->min_tb_addr_zs[(y) * (s->sps->tb_mask+2) + (x)]
  63. int xCurr_ctb = xCurr >> s->sps->log2_ctb_size;
  64. int yCurr_ctb = yCurr >> s->sps->log2_ctb_size;
  65. int xN_ctb = xN >> s->sps->log2_ctb_size;
  66. int yN_ctb = yN >> s->sps->log2_ctb_size;
  67. if( yN_ctb < yCurr_ctb || xN_ctb < xCurr_ctb )
  68. return 1;
  69. else {
  70. int Curr = MIN_TB_ADDR_ZS((xCurr >> s->sps->log2_min_tb_size) & s->sps->tb_mask,
  71. (yCurr >> s->sps->log2_min_tb_size) & s->sps->tb_mask);
  72. int N = MIN_TB_ADDR_ZS((xN >> s->sps->log2_min_tb_size) & s->sps->tb_mask,
  73. (yN >> s->sps->log2_min_tb_size) & s->sps->tb_mask);
  74. return N <= Curr;
  75. }
  76. }
  77. //check if the two luma locations belong to the same mostion estimation region
  78. static av_always_inline int is_diff_mer(HEVCContext *s, int xN, int yN, int xP, int yP)
  79. {
  80. uint8_t plevel = s->pps->log2_parallel_merge_level;
  81. return xN >> plevel == xP >> plevel &&
  82. yN >> plevel == yP >> plevel;
  83. }
  84. #define MATCH_MV(x) (AV_RN32A(&A.x) == AV_RN32A(&B.x))
  85. #define MATCH(x) (A.x == B.x)
  86. // check if the mv's and refidx are the same between A and B
  87. static av_always_inline int compare_mv_ref_idx(struct MvField A, struct MvField B)
  88. {
  89. int a_pf = A.pred_flag;
  90. int b_pf = B.pred_flag;
  91. if (a_pf == b_pf) {
  92. if (a_pf == PF_BI) {
  93. return MATCH(ref_idx[0]) && MATCH_MV(mv[0]) &&
  94. MATCH(ref_idx[1]) && MATCH_MV(mv[1]);
  95. } else if (a_pf == PF_L0) {
  96. return MATCH(ref_idx[0]) && MATCH_MV(mv[0]);
  97. } else if (a_pf == PF_L1) {
  98. return MATCH(ref_idx[1]) && MATCH_MV(mv[1]);
  99. }
  100. }
  101. return 0;
  102. }
  103. static av_always_inline void mv_scale(Mv *dst, Mv *src, int td, int tb)
  104. {
  105. int tx, scale_factor;
  106. td = av_clip_int8(td);
  107. tb = av_clip_int8(tb);
  108. tx = (0x4000 + abs(td / 2)) / td;
  109. scale_factor = av_clip((tb * tx + 32) >> 6, -4096, 4095);
  110. dst->x = av_clip_int16((scale_factor * src->x + 127 +
  111. (scale_factor * src->x < 0)) >> 8);
  112. dst->y = av_clip_int16((scale_factor * src->y + 127 +
  113. (scale_factor * src->y < 0)) >> 8);
  114. }
  115. static int check_mvset(Mv *mvLXCol, Mv *mvCol,
  116. int colPic, int poc,
  117. RefPicList *refPicList, int X, int refIdxLx,
  118. RefPicList *refPicList_col, int listCol, int refidxCol)
  119. {
  120. int cur_lt = refPicList[X].isLongTerm[refIdxLx];
  121. int col_lt = refPicList_col[listCol].isLongTerm[refidxCol];
  122. int col_poc_diff, cur_poc_diff;
  123. if (cur_lt != col_lt) {
  124. mvLXCol->x = 0;
  125. mvLXCol->y = 0;
  126. return 0;
  127. }
  128. col_poc_diff = colPic - refPicList_col[listCol].list[refidxCol];
  129. cur_poc_diff = poc - refPicList[X].list[refIdxLx];
  130. if (cur_lt || col_poc_diff == cur_poc_diff || !col_poc_diff) {
  131. mvLXCol->x = mvCol->x;
  132. mvLXCol->y = mvCol->y;
  133. } else {
  134. mv_scale(mvLXCol, mvCol, col_poc_diff, cur_poc_diff);
  135. }
  136. return 1;
  137. }
  138. #define CHECK_MVSET(l) \
  139. check_mvset(mvLXCol, temp_col.mv + l, \
  140. colPic, s->poc, \
  141. refPicList, X, refIdxLx, \
  142. refPicList_col, L ## l, temp_col.ref_idx[l])
  143. // derive the motion vectors section 8.5.3.1.8
  144. static int derive_temporal_colocated_mvs(HEVCContext *s, MvField temp_col,
  145. int refIdxLx, Mv *mvLXCol, int X,
  146. int colPic, RefPicList *refPicList_col)
  147. {
  148. RefPicList *refPicList = s->ref->refPicList;
  149. if (temp_col.pred_flag == PF_INTRA)
  150. return 0;
  151. if (!(temp_col.pred_flag & PF_L0))
  152. return CHECK_MVSET(1);
  153. else if (temp_col.pred_flag == PF_L0)
  154. return CHECK_MVSET(0);
  155. else if (temp_col.pred_flag == PF_BI) {
  156. int check_diffpicount = 0;
  157. int i, j;
  158. for (j = 0; j < 2; j++) {
  159. for (i = 0; i < refPicList[j].nb_refs; i++) {
  160. if (refPicList[j].list[i] > s->poc) {
  161. check_diffpicount++;
  162. break;
  163. }
  164. }
  165. }
  166. if (!check_diffpicount) {
  167. if (X==0)
  168. return CHECK_MVSET(0);
  169. else
  170. return CHECK_MVSET(1);
  171. } else {
  172. if (s->sh.collocated_list == L1)
  173. return CHECK_MVSET(0);
  174. else
  175. return CHECK_MVSET(1);
  176. }
  177. }
  178. return 0;
  179. }
  180. #define TAB_MVF(x, y) \
  181. tab_mvf[(y) * min_pu_width + x]
  182. #define TAB_MVF_PU(v) \
  183. TAB_MVF(((x ## v) >> s->sps->log2_min_pu_size), \
  184. ((y ## v) >> s->sps->log2_min_pu_size))
  185. #define DERIVE_TEMPORAL_COLOCATED_MVS \
  186. derive_temporal_colocated_mvs(s, temp_col, \
  187. refIdxLx, mvLXCol, X, colPic, \
  188. ff_hevc_get_ref_list(s, ref, x, y))
  189. /*
  190. * 8.5.3.1.7 temporal luma motion vector prediction
  191. */
  192. static int temporal_luma_motion_vector(HEVCContext *s, int x0, int y0,
  193. int nPbW, int nPbH, int refIdxLx,
  194. Mv *mvLXCol, int X)
  195. {
  196. MvField *tab_mvf;
  197. MvField temp_col;
  198. int x, y, x_pu, y_pu;
  199. int min_pu_width = s->sps->min_pu_width;
  200. int availableFlagLXCol = 0;
  201. int colPic;
  202. HEVCFrame *ref = s->ref->collocated_ref;
  203. if (!ref)
  204. return 0;
  205. tab_mvf = ref->tab_mvf;
  206. colPic = ref->poc;
  207. //bottom right collocated motion vector
  208. x = x0 + nPbW;
  209. y = y0 + nPbH;
  210. if (tab_mvf &&
  211. (y0 >> s->sps->log2_ctb_size) == (y >> s->sps->log2_ctb_size) &&
  212. y < s->sps->height &&
  213. x < s->sps->width) {
  214. x &= ~15;
  215. y &= ~15;
  216. if (s->threads_type == FF_THREAD_FRAME)
  217. ff_thread_await_progress(&ref->tf, y, 0);
  218. x_pu = x >> s->sps->log2_min_pu_size;
  219. y_pu = y >> s->sps->log2_min_pu_size;
  220. temp_col = TAB_MVF(x_pu, y_pu);
  221. availableFlagLXCol = DERIVE_TEMPORAL_COLOCATED_MVS;
  222. }
  223. // derive center collocated motion vector
  224. if (tab_mvf && !availableFlagLXCol) {
  225. x = x0 + (nPbW >> 1);
  226. y = y0 + (nPbH >> 1);
  227. x &= ~15;
  228. y &= ~15;
  229. if (s->threads_type == FF_THREAD_FRAME)
  230. ff_thread_await_progress(&ref->tf, y, 0);
  231. x_pu = x >> s->sps->log2_min_pu_size;
  232. y_pu = y >> s->sps->log2_min_pu_size;
  233. temp_col = TAB_MVF(x_pu, y_pu);
  234. availableFlagLXCol = DERIVE_TEMPORAL_COLOCATED_MVS;
  235. }
  236. return availableFlagLXCol;
  237. }
  238. #define AVAILABLE(cand, v) \
  239. (cand && !(TAB_MVF_PU(v).pred_flag == PF_INTRA))
  240. #define PRED_BLOCK_AVAILABLE(v) \
  241. z_scan_block_avail(s, x0, y0, x ## v, y ## v)
  242. #define COMPARE_MV_REFIDX(a, b) \
  243. compare_mv_ref_idx(TAB_MVF_PU(a), TAB_MVF_PU(b))
  244. /*
  245. * 8.5.3.1.2 Derivation process for spatial merging candidates
  246. */
  247. static void derive_spatial_merge_candidates(HEVCContext *s, int x0, int y0,
  248. int nPbW, int nPbH,
  249. int log2_cb_size,
  250. int singleMCLFlag, int part_idx,
  251. int merge_idx,
  252. struct MvField mergecandlist[])
  253. {
  254. HEVCLocalContext *lc = s->HEVClc;
  255. RefPicList *refPicList = s->ref->refPicList;
  256. MvField *tab_mvf = s->ref->tab_mvf;
  257. const int min_pu_width = s->sps->min_pu_width;
  258. const int cand_bottom_left = lc->na.cand_bottom_left;
  259. const int cand_left = lc->na.cand_left;
  260. const int cand_up_left = lc->na.cand_up_left;
  261. const int cand_up = lc->na.cand_up;
  262. const int cand_up_right = lc->na.cand_up_right_sap;
  263. const int xA1 = x0 - 1;
  264. const int yA1 = y0 + nPbH - 1;
  265. const int xB1 = x0 + nPbW - 1;
  266. const int yB1 = y0 - 1;
  267. const int xB0 = x0 + nPbW;
  268. const int yB0 = y0 - 1;
  269. const int xA0 = x0 - 1;
  270. const int yA0 = y0 + nPbH;
  271. const int xB2 = x0 - 1;
  272. const int yB2 = y0 - 1;
  273. const int nb_refs = (s->sh.slice_type == P_SLICE) ?
  274. s->sh.nb_refs[0] : FFMIN(s->sh.nb_refs[0], s->sh.nb_refs[1]);
  275. int zero_idx = 0;
  276. int nb_merge_cand = 0;
  277. int nb_orig_merge_cand = 0;
  278. int is_available_a0;
  279. int is_available_a1;
  280. int is_available_b0;
  281. int is_available_b1;
  282. int is_available_b2;
  283. if (!singleMCLFlag && part_idx == 1 &&
  284. (lc->cu.part_mode == PART_Nx2N ||
  285. lc->cu.part_mode == PART_nLx2N ||
  286. lc->cu.part_mode == PART_nRx2N) ||
  287. is_diff_mer(s, xA1, yA1, x0, y0)) {
  288. is_available_a1 = 0;
  289. } else {
  290. is_available_a1 = AVAILABLE(cand_left, A1);
  291. if (is_available_a1) {
  292. mergecandlist[nb_merge_cand] = TAB_MVF_PU(A1);
  293. if (merge_idx == 0) return;
  294. nb_merge_cand++;
  295. }
  296. }
  297. if (!singleMCLFlag && part_idx == 1 &&
  298. (lc->cu.part_mode == PART_2NxN ||
  299. lc->cu.part_mode == PART_2NxnU ||
  300. lc->cu.part_mode == PART_2NxnD) ||
  301. is_diff_mer(s, xB1, yB1, x0, y0)) {
  302. is_available_b1 = 0;
  303. } else {
  304. is_available_b1 = AVAILABLE(cand_up, B1);
  305. if (is_available_b1 &&
  306. !(is_available_a1 && COMPARE_MV_REFIDX(B1, A1))) {
  307. mergecandlist[nb_merge_cand] = TAB_MVF_PU(B1);
  308. if (merge_idx == nb_merge_cand) return;
  309. nb_merge_cand++;
  310. }
  311. }
  312. // above right spatial merge candidate
  313. is_available_b0 = AVAILABLE(cand_up_right, B0) &&
  314. xB0 < s->sps->width &&
  315. PRED_BLOCK_AVAILABLE(B0) &&
  316. !is_diff_mer(s, xB0, yB0, x0, y0);
  317. if (is_available_b0 &&
  318. !(is_available_b1 && COMPARE_MV_REFIDX(B0, B1))) {
  319. mergecandlist[nb_merge_cand] = TAB_MVF_PU(B0);
  320. if (merge_idx == nb_merge_cand) return;
  321. nb_merge_cand++;
  322. }
  323. // left bottom spatial merge candidate
  324. is_available_a0 = AVAILABLE(cand_bottom_left, A0) &&
  325. yA0 < s->sps->height &&
  326. PRED_BLOCK_AVAILABLE(A0) &&
  327. !is_diff_mer(s, xA0, yA0, x0, y0);
  328. if (is_available_a0 &&
  329. !(is_available_a1 && COMPARE_MV_REFIDX(A0, A1))) {
  330. mergecandlist[nb_merge_cand] = TAB_MVF_PU(A0);
  331. if (merge_idx == nb_merge_cand) return;
  332. nb_merge_cand++;
  333. }
  334. // above left spatial merge candidate
  335. is_available_b2 = AVAILABLE(cand_up_left, B2) &&
  336. !is_diff_mer(s, xB2, yB2, x0, y0);
  337. if (is_available_b2 &&
  338. !(is_available_a1 && COMPARE_MV_REFIDX(B2, A1)) &&
  339. !(is_available_b1 && COMPARE_MV_REFIDX(B2, B1)) &&
  340. nb_merge_cand != 4) {
  341. mergecandlist[nb_merge_cand] = TAB_MVF_PU(B2);
  342. if (merge_idx == nb_merge_cand) return;
  343. nb_merge_cand++;
  344. }
  345. // temporal motion vector candidate
  346. if (s->sh.slice_temporal_mvp_enabled_flag &&
  347. nb_merge_cand < s->sh.max_num_merge_cand) {
  348. Mv mv_l0_col, mv_l1_col;
  349. int available_l0 = temporal_luma_motion_vector(s, x0, y0, nPbW, nPbH,
  350. 0, &mv_l0_col, 0);
  351. int available_l1 = (s->sh.slice_type == B_SLICE) ?
  352. temporal_luma_motion_vector(s, x0, y0, nPbW, nPbH,
  353. 0, &mv_l1_col, 1) : 0;
  354. if (available_l0 || available_l1) {
  355. mergecandlist[nb_merge_cand].pred_flag = available_l0 + (available_l1 << 1);
  356. if (available_l0) {
  357. mergecandlist[nb_merge_cand].mv[0] = mv_l0_col;
  358. mergecandlist[nb_merge_cand].ref_idx[0] = 0;
  359. }
  360. if (available_l1) {
  361. mergecandlist[nb_merge_cand].mv[1] = mv_l1_col;
  362. mergecandlist[nb_merge_cand].ref_idx[1] = 0;
  363. }
  364. if (merge_idx == nb_merge_cand) return;
  365. nb_merge_cand++;
  366. }
  367. }
  368. nb_orig_merge_cand = nb_merge_cand;
  369. // combined bi-predictive merge candidates (applies for B slices)
  370. if (s->sh.slice_type == B_SLICE && nb_orig_merge_cand > 1 &&
  371. nb_orig_merge_cand < s->sh.max_num_merge_cand) {
  372. int comb_idx = 0;
  373. for (comb_idx = 0; nb_merge_cand < s->sh.max_num_merge_cand &&
  374. comb_idx < nb_orig_merge_cand * (nb_orig_merge_cand - 1); comb_idx++) {
  375. int l0_cand_idx = l0_l1_cand_idx[comb_idx][0];
  376. int l1_cand_idx = l0_l1_cand_idx[comb_idx][1];
  377. MvField l0_cand = mergecandlist[l0_cand_idx];
  378. MvField l1_cand = mergecandlist[l1_cand_idx];
  379. if ((l0_cand.pred_flag & PF_L0) && (l1_cand.pred_flag & PF_L1) &&
  380. (refPicList[0].list[l0_cand.ref_idx[0]] !=
  381. refPicList[1].list[l1_cand.ref_idx[1]] ||
  382. AV_RN32A(&l0_cand.mv[0]) != AV_RN32A(&l1_cand.mv[1]))) {
  383. mergecandlist[nb_merge_cand].ref_idx[0] = l0_cand.ref_idx[0];
  384. mergecandlist[nb_merge_cand].ref_idx[1] = l1_cand.ref_idx[1];
  385. mergecandlist[nb_merge_cand].pred_flag = PF_BI;
  386. AV_COPY32(&mergecandlist[nb_merge_cand].mv[0], &l0_cand.mv[0]);
  387. AV_COPY32(&mergecandlist[nb_merge_cand].mv[1], &l1_cand.mv[1]);
  388. if (merge_idx == nb_merge_cand) return;
  389. nb_merge_cand++;
  390. }
  391. }
  392. }
  393. // append Zero motion vector candidates
  394. while (nb_merge_cand < s->sh.max_num_merge_cand) {
  395. mergecandlist[nb_merge_cand].pred_flag = PF_L0 + ((s->sh.slice_type == B_SLICE) << 1);
  396. AV_ZERO32(mergecandlist[nb_merge_cand].mv+0);
  397. AV_ZERO32(mergecandlist[nb_merge_cand].mv+1);
  398. mergecandlist[nb_merge_cand].ref_idx[0] = zero_idx < nb_refs ? zero_idx : 0;
  399. mergecandlist[nb_merge_cand].ref_idx[1] = zero_idx < nb_refs ? zero_idx : 0;
  400. if (merge_idx == nb_merge_cand) return;
  401. nb_merge_cand++;
  402. zero_idx++;
  403. }
  404. }
  405. /*
  406. * 8.5.3.1.1 Derivation process of luma Mvs for merge mode
  407. */
  408. void ff_hevc_luma_mv_merge_mode(HEVCContext *s, int x0, int y0, int nPbW,
  409. int nPbH, int log2_cb_size, int part_idx,
  410. int merge_idx, MvField *mv)
  411. {
  412. int singleMCLFlag = 0;
  413. int nCS = 1 << log2_cb_size;
  414. LOCAL_ALIGNED(4, MvField, mergecand_list, [MRG_MAX_NUM_CANDS]);
  415. int nPbW2 = nPbW;
  416. int nPbH2 = nPbH;
  417. HEVCLocalContext *lc = s->HEVClc;
  418. if (s->pps->log2_parallel_merge_level > 2 && nCS == 8) {
  419. singleMCLFlag = 1;
  420. x0 = lc->cu.x;
  421. y0 = lc->cu.y;
  422. nPbW = nCS;
  423. nPbH = nCS;
  424. part_idx = 0;
  425. }
  426. ff_hevc_set_neighbour_available(s, x0, y0, nPbW, nPbH);
  427. derive_spatial_merge_candidates(s, x0, y0, nPbW, nPbH, log2_cb_size,
  428. singleMCLFlag, part_idx,
  429. merge_idx, mergecand_list);
  430. if (mergecand_list[merge_idx].pred_flag == PF_BI &&
  431. (nPbW2 + nPbH2) == 12) {
  432. mergecand_list[merge_idx].pred_flag = PF_L0;
  433. }
  434. *mv = mergecand_list[merge_idx];
  435. }
  436. static av_always_inline void dist_scale(HEVCContext *s, Mv *mv,
  437. int min_pu_width, int x, int y,
  438. int elist, int ref_idx_curr, int ref_idx)
  439. {
  440. RefPicList *refPicList = s->ref->refPicList;
  441. MvField *tab_mvf = s->ref->tab_mvf;
  442. int ref_pic_elist = refPicList[elist].list[TAB_MVF(x, y).ref_idx[elist]];
  443. int ref_pic_curr = refPicList[ref_idx_curr].list[ref_idx];
  444. if (ref_pic_elist != ref_pic_curr) {
  445. int poc_diff = s->poc - ref_pic_elist;
  446. if (!poc_diff)
  447. poc_diff = 1;
  448. mv_scale(mv, mv, poc_diff, s->poc - ref_pic_curr);
  449. }
  450. }
  451. static int mv_mp_mode_mx(HEVCContext *s, int x, int y, int pred_flag_index,
  452. Mv *mv, int ref_idx_curr, int ref_idx)
  453. {
  454. MvField *tab_mvf = s->ref->tab_mvf;
  455. int min_pu_width = s->sps->min_pu_width;
  456. RefPicList *refPicList = s->ref->refPicList;
  457. if (((TAB_MVF(x, y).pred_flag) & (1 << pred_flag_index)) &&
  458. refPicList[pred_flag_index].list[TAB_MVF(x, y).ref_idx[pred_flag_index]] == refPicList[ref_idx_curr].list[ref_idx]) {
  459. *mv = TAB_MVF(x, y).mv[pred_flag_index];
  460. return 1;
  461. }
  462. return 0;
  463. }
  464. static int mv_mp_mode_mx_lt(HEVCContext *s, int x, int y, int pred_flag_index,
  465. Mv *mv, int ref_idx_curr, int ref_idx)
  466. {
  467. MvField *tab_mvf = s->ref->tab_mvf;
  468. int min_pu_width = s->sps->min_pu_width;
  469. RefPicList *refPicList = s->ref->refPicList;
  470. if ((TAB_MVF(x, y).pred_flag) & (1 << pred_flag_index)) {
  471. int currIsLongTerm = refPicList[ref_idx_curr].isLongTerm[ref_idx];
  472. int colIsLongTerm =
  473. refPicList[pred_flag_index].isLongTerm[(TAB_MVF(x, y).ref_idx[pred_flag_index])];
  474. if (colIsLongTerm == currIsLongTerm) {
  475. *mv = TAB_MVF(x, y).mv[pred_flag_index];
  476. if (!currIsLongTerm)
  477. dist_scale(s, mv, min_pu_width, x, y,
  478. pred_flag_index, ref_idx_curr, ref_idx);
  479. return 1;
  480. }
  481. }
  482. return 0;
  483. }
  484. #define MP_MX(v, pred, mx) \
  485. mv_mp_mode_mx(s, \
  486. (x ## v) >> s->sps->log2_min_pu_size, \
  487. (y ## v) >> s->sps->log2_min_pu_size, \
  488. pred, &mx, ref_idx_curr, ref_idx)
  489. #define MP_MX_LT(v, pred, mx) \
  490. mv_mp_mode_mx_lt(s, \
  491. (x ## v) >> s->sps->log2_min_pu_size, \
  492. (y ## v) >> s->sps->log2_min_pu_size, \
  493. pred, &mx, ref_idx_curr, ref_idx)
  494. void ff_hevc_luma_mv_mvp_mode(HEVCContext *s, int x0, int y0, int nPbW,
  495. int nPbH, int log2_cb_size, int part_idx,
  496. int merge_idx, MvField *mv,
  497. int mvp_lx_flag, int LX)
  498. {
  499. HEVCLocalContext *lc = s->HEVClc;
  500. MvField *tab_mvf = s->ref->tab_mvf;
  501. int isScaledFlag_L0 = 0;
  502. int availableFlagLXA0 = 1;
  503. int availableFlagLXB0 = 1;
  504. int numMVPCandLX = 0;
  505. int min_pu_width = s->sps->min_pu_width;
  506. int xA0, yA0;
  507. int is_available_a0;
  508. int xA1, yA1;
  509. int is_available_a1;
  510. int xB0, yB0;
  511. int is_available_b0;
  512. int xB1, yB1;
  513. int is_available_b1;
  514. int xB2, yB2;
  515. int is_available_b2;
  516. Mv mvpcand_list[2] = { { 0 } };
  517. Mv mxA;
  518. Mv mxB;
  519. int ref_idx_curr = 0;
  520. int ref_idx = 0;
  521. int pred_flag_index_l0;
  522. int pred_flag_index_l1;
  523. const int cand_bottom_left = lc->na.cand_bottom_left;
  524. const int cand_left = lc->na.cand_left;
  525. const int cand_up_left = lc->na.cand_up_left;
  526. const int cand_up = lc->na.cand_up;
  527. const int cand_up_right = lc->na.cand_up_right_sap;
  528. ref_idx_curr = LX;
  529. ref_idx = mv->ref_idx[LX];
  530. pred_flag_index_l0 = LX;
  531. pred_flag_index_l1 = !LX;
  532. // left bottom spatial candidate
  533. xA0 = x0 - 1;
  534. yA0 = y0 + nPbH;
  535. is_available_a0 = AVAILABLE(cand_bottom_left, A0) &&
  536. yA0 < s->sps->height &&
  537. PRED_BLOCK_AVAILABLE(A0);
  538. //left spatial merge candidate
  539. xA1 = x0 - 1;
  540. yA1 = y0 + nPbH - 1;
  541. is_available_a1 = AVAILABLE(cand_left, A1);
  542. if (is_available_a0 || is_available_a1)
  543. isScaledFlag_L0 = 1;
  544. if (is_available_a0) {
  545. if (MP_MX(A0, pred_flag_index_l0, mxA)) {
  546. goto b_candidates;
  547. }
  548. if (MP_MX(A0, pred_flag_index_l1, mxA)) {
  549. goto b_candidates;
  550. }
  551. }
  552. if (is_available_a1) {
  553. if (MP_MX(A1, pred_flag_index_l0, mxA)) {
  554. goto b_candidates;
  555. }
  556. if (MP_MX(A1, pred_flag_index_l1, mxA)) {
  557. goto b_candidates;
  558. }
  559. }
  560. if (is_available_a0) {
  561. if (MP_MX_LT(A0, pred_flag_index_l0, mxA)) {
  562. goto b_candidates;
  563. }
  564. if (MP_MX_LT(A0, pred_flag_index_l1, mxA)) {
  565. goto b_candidates;
  566. }
  567. }
  568. if (is_available_a1) {
  569. if (MP_MX_LT(A1, pred_flag_index_l0, mxA)) {
  570. goto b_candidates;
  571. }
  572. if (MP_MX_LT(A1, pred_flag_index_l1, mxA)) {
  573. goto b_candidates;
  574. }
  575. }
  576. availableFlagLXA0 = 0;
  577. b_candidates:
  578. // B candidates
  579. // above right spatial merge candidate
  580. xB0 = x0 + nPbW;
  581. yB0 = y0 - 1;
  582. is_available_b0 = AVAILABLE(cand_up_right, B0) &&
  583. xB0 < s->sps->width &&
  584. PRED_BLOCK_AVAILABLE(B0);
  585. // above spatial merge candidate
  586. xB1 = x0 + nPbW - 1;
  587. yB1 = y0 - 1;
  588. is_available_b1 = AVAILABLE(cand_up, B1);
  589. // above left spatial merge candidate
  590. xB2 = x0 - 1;
  591. yB2 = y0 - 1;
  592. is_available_b2 = AVAILABLE(cand_up_left, B2);
  593. // above right spatial merge candidate
  594. if (is_available_b0) {
  595. if (MP_MX(B0, pred_flag_index_l0, mxB)) {
  596. goto scalef;
  597. }
  598. if (MP_MX(B0, pred_flag_index_l1, mxB)) {
  599. goto scalef;
  600. }
  601. }
  602. // above spatial merge candidate
  603. if (is_available_b1) {
  604. if (MP_MX(B1, pred_flag_index_l0, mxB)) {
  605. goto scalef;
  606. }
  607. if (MP_MX(B1, pred_flag_index_l1, mxB)) {
  608. goto scalef;
  609. }
  610. }
  611. // above left spatial merge candidate
  612. if (is_available_b2) {
  613. if (MP_MX(B2, pred_flag_index_l0, mxB)) {
  614. goto scalef;
  615. }
  616. if (MP_MX(B2, pred_flag_index_l1, mxB)) {
  617. goto scalef;
  618. }
  619. }
  620. availableFlagLXB0 = 0;
  621. scalef:
  622. if (!isScaledFlag_L0) {
  623. if (availableFlagLXB0) {
  624. availableFlagLXA0 = 1;
  625. mxA = mxB;
  626. }
  627. availableFlagLXB0 = 0;
  628. // XB0 and L1
  629. if (is_available_b0) {
  630. availableFlagLXB0 = MP_MX_LT(B0, pred_flag_index_l0, mxB);
  631. if (!availableFlagLXB0)
  632. availableFlagLXB0 = MP_MX_LT(B0, pred_flag_index_l1, mxB);
  633. }
  634. if (is_available_b1 && !availableFlagLXB0) {
  635. availableFlagLXB0 = MP_MX_LT(B1, pred_flag_index_l0, mxB);
  636. if (!availableFlagLXB0)
  637. availableFlagLXB0 = MP_MX_LT(B1, pred_flag_index_l1, mxB);
  638. }
  639. if (is_available_b2 && !availableFlagLXB0) {
  640. availableFlagLXB0 = MP_MX_LT(B2, pred_flag_index_l0, mxB);
  641. if (!availableFlagLXB0)
  642. availableFlagLXB0 = MP_MX_LT(B2, pred_flag_index_l1, mxB);
  643. }
  644. }
  645. if (availableFlagLXA0)
  646. mvpcand_list[numMVPCandLX++] = mxA;
  647. if (availableFlagLXB0 && (!availableFlagLXA0 || mxA.x != mxB.x || mxA.y != mxB.y))
  648. mvpcand_list[numMVPCandLX++] = mxB;
  649. //temporal motion vector prediction candidate
  650. if (numMVPCandLX < 2 && s->sh.slice_temporal_mvp_enabled_flag &&
  651. mvp_lx_flag == numMVPCandLX) {
  652. Mv mv_col;
  653. int available_col = temporal_luma_motion_vector(s, x0, y0, nPbW,
  654. nPbH, ref_idx,
  655. &mv_col, LX);
  656. if (available_col)
  657. mvpcand_list[numMVPCandLX++] = mv_col;
  658. }
  659. mv->mv[LX] = mvpcand_list[mvp_lx_flag];
  660. }