You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

833 lines
28KB

  1. /*
  2. * HEVC video decoder
  3. *
  4. * Copyright (C) 2012 - 2013 Guillaume Martres
  5. * Copyright (C) 2013 Anand Meher Kotra
  6. *
  7. * This file is part of FFmpeg.
  8. *
  9. * FFmpeg is free software; you can redistribute it and/or
  10. * modify it under the terms of the GNU Lesser General Public
  11. * License as published by the Free Software Foundation; either
  12. * version 2.1 of the License, or (at your option) any later version.
  13. *
  14. * FFmpeg is distributed in the hope that it will be useful,
  15. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  16. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  17. * Lesser General Public License for more details.
  18. *
  19. * You should have received a copy of the GNU Lesser General Public
  20. * License along with FFmpeg; if not, write to the Free Software
  21. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  22. */
  23. #include "hevc.h"
  24. static const uint8_t l0_l1_cand_idx[12][2] = {
  25. { 0, 1, },
  26. { 1, 0, },
  27. { 0, 2, },
  28. { 2, 0, },
  29. { 1, 2, },
  30. { 2, 1, },
  31. { 0, 3, },
  32. { 3, 0, },
  33. { 1, 3, },
  34. { 3, 1, },
  35. { 2, 3, },
  36. { 3, 2, },
  37. };
  38. void ff_hevc_set_neighbour_available(HEVCContext *s, int x0, int y0,
  39. int nPbW, int nPbH)
  40. {
  41. HEVCLocalContext *lc = s->HEVClc;
  42. int x0b = x0 & ((1 << s->sps->log2_ctb_size) - 1);
  43. int y0b = y0 & ((1 << s->sps->log2_ctb_size) - 1);
  44. lc->na.cand_up = (lc->ctb_up_flag || y0b);
  45. lc->na.cand_left = (lc->ctb_left_flag || x0b);
  46. lc->na.cand_up_left = (!x0b && !y0b) ? lc->ctb_up_left_flag : lc->na.cand_left && lc->na.cand_up;
  47. lc->na.cand_up_right_sap =
  48. ((x0b + nPbW) == (1 << s->sps->log2_ctb_size)) ?
  49. lc->ctb_up_right_flag && !y0b : lc->na.cand_up;
  50. lc->na.cand_up_right =
  51. ((x0b + nPbW) == (1 << s->sps->log2_ctb_size) ?
  52. lc->ctb_up_right_flag && !y0b : lc->na.cand_up )
  53. && (x0 + nPbW) < lc->end_of_tiles_x;
  54. lc->na.cand_bottom_left = ((y0 + nPbH) >= lc->end_of_tiles_y) ? 0 : lc->na.cand_left;
  55. }
  56. /*
  57. * 6.4.1 Derivation process for z-scan order block availability
  58. */
  59. static int z_scan_block_avail(HEVCContext *s, int xCurr, int yCurr,
  60. int xN, int yN)
  61. {
  62. #define MIN_TB_ADDR_ZS(x, y) \
  63. s->pps->min_tb_addr_zs[(y) * (s->sps->tb_mask+2) + (x)]
  64. int xCurr_ctb = xCurr >> s->sps->log2_ctb_size;
  65. int yCurr_ctb = yCurr >> s->sps->log2_ctb_size;
  66. int xN_ctb = xN >> s->sps->log2_ctb_size;
  67. int yN_ctb = yN >> s->sps->log2_ctb_size;
  68. if (xN < 0 || yN < 0 ||
  69. xN >= s->sps->width ||
  70. yN >= s->sps->height)
  71. return 0;
  72. if( yN_ctb < yCurr_ctb || xN_ctb < xCurr_ctb )
  73. return 1;
  74. else {
  75. int Curr = MIN_TB_ADDR_ZS((xCurr >> s->sps->log2_min_tb_size) & s->sps->tb_mask,
  76. (yCurr >> s->sps->log2_min_tb_size) & s->sps->tb_mask);
  77. int N = MIN_TB_ADDR_ZS((xN >> s->sps->log2_min_tb_size) & s->sps->tb_mask,
  78. (yN >> s->sps->log2_min_tb_size) & s->sps->tb_mask);
  79. return N <= Curr;
  80. }
  81. }
  82. static int same_prediction_block(HEVCLocalContext *lc, int log2_cb_size,
  83. int x0, int y0, int nPbW, int nPbH,
  84. int xA1, int yA1, int partIdx)
  85. {
  86. return !(nPbW << 1 == 1 << log2_cb_size &&
  87. nPbH << 1 == 1 << log2_cb_size && partIdx == 1 &&
  88. lc->cu.x + nPbW > xA1 &&
  89. lc->cu.y + nPbH <= yA1);
  90. }
  91. /*
  92. * 6.4.2 Derivation process for prediction block availability
  93. */
  94. static int check_prediction_block_available(HEVCContext *s, int log2_cb_size,
  95. int x0, int y0, int nPbW, int nPbH,
  96. int xA1, int yA1, int partIdx)
  97. {
  98. HEVCLocalContext *lc = s->HEVClc;
  99. if (lc->cu.x < xA1 && lc->cu.y < yA1 &&
  100. (lc->cu.x + (1 << log2_cb_size)) > xA1 &&
  101. (lc->cu.y + (1 << log2_cb_size)) > yA1)
  102. return same_prediction_block(lc, log2_cb_size, x0, y0,
  103. nPbW, nPbH, xA1, yA1, partIdx);
  104. else
  105. return z_scan_block_avail(s, x0, y0, xA1, yA1);
  106. }
  107. //check if the two luma locations belong to the same mostion estimation region
  108. static int isDiffMER(HEVCContext *s, int xN, int yN, int xP, int yP)
  109. {
  110. uint8_t plevel = s->pps->log2_parallel_merge_level;
  111. return xN >> plevel == xP >> plevel &&
  112. yN >> plevel == yP >> plevel;
  113. }
  114. #define MATCH_MV(x) (AV_RN32A(&A.x) == AV_RN32A(&B.x))
  115. #define MATCH(x) (A.x == B.x)
  116. // check if the mv's and refidx are the same between A and B
  117. static int compareMVrefidx(struct MvField A, struct MvField B)
  118. {
  119. int a_pf = A.pred_flag;
  120. int b_pf = B.pred_flag;
  121. if (a_pf == b_pf) {
  122. if (a_pf == PF_BI) {
  123. return MATCH(ref_idx[0]) && MATCH_MV(mv[0]) &&
  124. MATCH(ref_idx[1]) && MATCH_MV(mv[1]);
  125. } else if (a_pf == PF_L0) {
  126. return MATCH(ref_idx[0]) && MATCH_MV(mv[0]);
  127. } else if (a_pf == PF_L1) {
  128. return MATCH(ref_idx[1]) && MATCH_MV(mv[1]);
  129. }
  130. }
  131. return 0;
  132. }
  133. static av_always_inline void mv_scale(Mv *dst, Mv *src, int td, int tb)
  134. {
  135. int tx, scale_factor;
  136. td = av_clip_int8(td);
  137. tb = av_clip_int8(tb);
  138. tx = (0x4000 + abs(td / 2)) / td;
  139. scale_factor = av_clip((tb * tx + 32) >> 6, -4096, 4095);
  140. dst->x = av_clip_int16((scale_factor * src->x + 127 +
  141. (scale_factor * src->x < 0)) >> 8);
  142. dst->y = av_clip_int16((scale_factor * src->y + 127 +
  143. (scale_factor * src->y < 0)) >> 8);
  144. }
  145. static int check_mvset(Mv *mvLXCol, Mv *mvCol,
  146. int colPic, int poc,
  147. RefPicList *refPicList, int X, int refIdxLx,
  148. RefPicList *refPicList_col, int listCol, int refidxCol)
  149. {
  150. int cur_lt = refPicList[X].isLongTerm[refIdxLx];
  151. int col_lt = refPicList_col[listCol].isLongTerm[refidxCol];
  152. int col_poc_diff, cur_poc_diff;
  153. if (cur_lt != col_lt) {
  154. mvLXCol->x = 0;
  155. mvLXCol->y = 0;
  156. return 0;
  157. }
  158. col_poc_diff = colPic - refPicList_col[listCol].list[refidxCol];
  159. cur_poc_diff = poc - refPicList[X].list[refIdxLx];
  160. if (cur_lt || col_poc_diff == cur_poc_diff || !col_poc_diff) {
  161. mvLXCol->x = mvCol->x;
  162. mvLXCol->y = mvCol->y;
  163. } else {
  164. mv_scale(mvLXCol, mvCol, col_poc_diff, cur_poc_diff);
  165. }
  166. return 1;
  167. }
  168. #define CHECK_MVSET(l) \
  169. check_mvset(mvLXCol, temp_col.mv + l, \
  170. colPic, s->poc, \
  171. refPicList, X, refIdxLx, \
  172. refPicList_col, L ## l, temp_col.ref_idx[l])
  173. // derive the motion vectors section 8.5.3.1.8
  174. static int derive_temporal_colocated_mvs(HEVCContext *s, MvField temp_col,
  175. int refIdxLx, Mv *mvLXCol, int X,
  176. int colPic, RefPicList *refPicList_col)
  177. {
  178. RefPicList *refPicList = s->ref->refPicList;
  179. if (temp_col.pred_flag == PF_INTRA)
  180. return 0;
  181. if (!(temp_col.pred_flag & PF_L0))
  182. return CHECK_MVSET(1);
  183. else if (temp_col.pred_flag == PF_L0)
  184. return CHECK_MVSET(0);
  185. else if (temp_col.pred_flag == PF_BI) {
  186. int check_diffpicount = 0;
  187. int i, j;
  188. for (j = 0; j < 2; j++) {
  189. for (i = 0; i < refPicList[j].nb_refs; i++) {
  190. if (refPicList[j].list[i] > s->poc) {
  191. check_diffpicount++;
  192. break;
  193. }
  194. }
  195. }
  196. if (!check_diffpicount) {
  197. if (X==0)
  198. return CHECK_MVSET(0);
  199. else
  200. return CHECK_MVSET(1);
  201. } else {
  202. if (s->sh.collocated_list == L1)
  203. return CHECK_MVSET(0);
  204. else
  205. return CHECK_MVSET(1);
  206. }
  207. }
  208. return 0;
  209. }
  210. #define TAB_MVF(x, y) \
  211. tab_mvf[(y) * min_pu_width + x]
  212. #define TAB_MVF_PU(v) \
  213. TAB_MVF(x ## v ## _pu, y ## v ## _pu)
  214. #define DERIVE_TEMPORAL_COLOCATED_MVS \
  215. derive_temporal_colocated_mvs(s, temp_col, \
  216. refIdxLx, mvLXCol, X, colPic, \
  217. ff_hevc_get_ref_list(s, ref, x, y))
  218. /*
  219. * 8.5.3.1.7 temporal luma motion vector prediction
  220. */
  221. static int temporal_luma_motion_vector(HEVCContext *s, int x0, int y0,
  222. int nPbW, int nPbH, int refIdxLx,
  223. Mv *mvLXCol, int X)
  224. {
  225. MvField *tab_mvf;
  226. MvField temp_col;
  227. int x, y, x_pu, y_pu;
  228. int min_pu_width = s->sps->min_pu_width;
  229. int availableFlagLXCol = 0;
  230. int colPic;
  231. HEVCFrame *ref = s->ref->collocated_ref;
  232. if (!ref)
  233. return 0;
  234. tab_mvf = ref->tab_mvf;
  235. colPic = ref->poc;
  236. //bottom right collocated motion vector
  237. x = x0 + nPbW;
  238. y = y0 + nPbH;
  239. if (s->threads_type == FF_THREAD_FRAME )
  240. ff_thread_await_progress(&ref->tf, y, 0);
  241. if (tab_mvf &&
  242. (y0 >> s->sps->log2_ctb_size) == (y >> s->sps->log2_ctb_size) &&
  243. y < s->sps->height &&
  244. x < s->sps->width) {
  245. x &= -16;
  246. y &= -16;
  247. x_pu = x >> s->sps->log2_min_pu_size;
  248. y_pu = y >> s->sps->log2_min_pu_size;
  249. temp_col = TAB_MVF(x_pu, y_pu);
  250. availableFlagLXCol = DERIVE_TEMPORAL_COLOCATED_MVS;
  251. }
  252. // derive center collocated motion vector
  253. if (tab_mvf && !availableFlagLXCol) {
  254. x = x0 + (nPbW >> 1);
  255. y = y0 + (nPbH >> 1);
  256. x &= -16;
  257. y &= -16;
  258. x_pu = x >> s->sps->log2_min_pu_size;
  259. y_pu = y >> s->sps->log2_min_pu_size;
  260. temp_col = TAB_MVF(x_pu, y_pu);
  261. availableFlagLXCol = DERIVE_TEMPORAL_COLOCATED_MVS;
  262. }
  263. return availableFlagLXCol;
  264. }
  265. #define AVAILABLE(cand, v) \
  266. (cand && !(TAB_MVF_PU(v).pred_flag == PF_INTRA))
  267. #define PRED_BLOCK_AVAILABLE(v) \
  268. check_prediction_block_available(s, log2_cb_size, \
  269. x0, y0, nPbW, nPbH, \
  270. x ## v, y ## v, part_idx)
  271. #define COMPARE_MV_REFIDX(a, b) \
  272. compareMVrefidx(TAB_MVF_PU(a), TAB_MVF_PU(b))
  273. /*
  274. * 8.5.3.1.2 Derivation process for spatial merging candidates
  275. */
  276. static void derive_spatial_merge_candidates(HEVCContext *s, int x0, int y0,
  277. int nPbW, int nPbH,
  278. int log2_cb_size,
  279. int singleMCLFlag, int part_idx,
  280. int merge_idx,
  281. struct MvField mergecandlist[])
  282. {
  283. HEVCLocalContext *lc = s->HEVClc;
  284. RefPicList *refPicList = s->ref->refPicList;
  285. MvField *tab_mvf = s->ref->tab_mvf;
  286. const int min_pu_width = s->sps->min_pu_width;
  287. const int cand_bottom_left = lc->na.cand_bottom_left;
  288. const int cand_left = lc->na.cand_left;
  289. const int cand_up_left = lc->na.cand_up_left;
  290. const int cand_up = lc->na.cand_up;
  291. const int cand_up_right = lc->na.cand_up_right_sap;
  292. const int xA1 = x0 - 1;
  293. const int yA1 = y0 + nPbH - 1;
  294. const int xA1_pu = xA1 >> s->sps->log2_min_pu_size;
  295. const int yA1_pu = yA1 >> s->sps->log2_min_pu_size;
  296. const int xB1 = x0 + nPbW - 1;
  297. const int yB1 = y0 - 1;
  298. const int xB1_pu = xB1 >> s->sps->log2_min_pu_size;
  299. const int yB1_pu = yB1 >> s->sps->log2_min_pu_size;
  300. const int xB0 = x0 + nPbW;
  301. const int yB0 = y0 - 1;
  302. const int xB0_pu = xB0 >> s->sps->log2_min_pu_size;
  303. const int yB0_pu = yB0 >> s->sps->log2_min_pu_size;
  304. const int xA0 = x0 - 1;
  305. const int yA0 = y0 + nPbH;
  306. const int xA0_pu = xA0 >> s->sps->log2_min_pu_size;
  307. const int yA0_pu = yA0 >> s->sps->log2_min_pu_size;
  308. const int xB2 = x0 - 1;
  309. const int yB2 = y0 - 1;
  310. const int xB2_pu = xB2 >> s->sps->log2_min_pu_size;
  311. const int yB2_pu = yB2 >> s->sps->log2_min_pu_size;
  312. const int nb_refs = (s->sh.slice_type == P_SLICE) ?
  313. s->sh.nb_refs[0] : FFMIN(s->sh.nb_refs[0], s->sh.nb_refs[1]);
  314. int check_MER = 1;
  315. int check_MER_1 = 1;
  316. int zero_idx = 0;
  317. int nb_merge_cand = 0;
  318. int nb_orig_merge_cand = 0;
  319. int is_available_a0;
  320. int is_available_a1;
  321. int is_available_b0;
  322. int is_available_b1;
  323. int is_available_b2;
  324. int check_B0;
  325. int check_A0;
  326. //first left spatial merge candidate
  327. is_available_a1 = AVAILABLE(cand_left, A1);
  328. if (!singleMCLFlag && part_idx == 1 &&
  329. (lc->cu.part_mode == PART_Nx2N ||
  330. lc->cu.part_mode == PART_nLx2N ||
  331. lc->cu.part_mode == PART_nRx2N) ||
  332. isDiffMER(s, xA1, yA1, x0, y0)) {
  333. is_available_a1 = 0;
  334. }
  335. if (is_available_a1) {
  336. mergecandlist[0] = TAB_MVF_PU(A1);
  337. if (merge_idx == 0) return;
  338. nb_merge_cand++;
  339. }
  340. // above spatial merge candidate
  341. is_available_b1 = AVAILABLE(cand_up, B1);
  342. if (!singleMCLFlag && part_idx == 1 &&
  343. (lc->cu.part_mode == PART_2NxN ||
  344. lc->cu.part_mode == PART_2NxnU ||
  345. lc->cu.part_mode == PART_2NxnD) ||
  346. isDiffMER(s, xB1, yB1, x0, y0)) {
  347. is_available_b1 = 0;
  348. }
  349. if (is_available_a1 && is_available_b1)
  350. check_MER = !COMPARE_MV_REFIDX(B1, A1);
  351. if (is_available_b1 && check_MER)
  352. mergecandlist[nb_merge_cand++] = TAB_MVF_PU(B1);
  353. // above right spatial merge candidate
  354. check_MER = 1;
  355. check_B0 = PRED_BLOCK_AVAILABLE(B0);
  356. is_available_b0 = check_B0 && AVAILABLE(cand_up_right, B0);
  357. if (isDiffMER(s, xB0, yB0, x0, y0))
  358. is_available_b0 = 0;
  359. if (is_available_b1 && is_available_b0)
  360. check_MER = !COMPARE_MV_REFIDX(B0, B1);
  361. if (is_available_b0 && check_MER) {
  362. mergecandlist[nb_merge_cand] = TAB_MVF_PU(B0);
  363. if (merge_idx == nb_merge_cand) return;
  364. nb_merge_cand++;
  365. }
  366. // left bottom spatial merge candidate
  367. check_MER = 1;
  368. check_A0 = PRED_BLOCK_AVAILABLE(A0);
  369. is_available_a0 = check_A0 && AVAILABLE(cand_bottom_left, A0);
  370. if (isDiffMER(s, xA0, yA0, x0, y0))
  371. is_available_a0 = 0;
  372. if (is_available_a1 && is_available_a0)
  373. check_MER = !COMPARE_MV_REFIDX(A0, A1);
  374. if (is_available_a0 && check_MER) {
  375. mergecandlist[nb_merge_cand] = TAB_MVF_PU(A0);
  376. if (merge_idx == nb_merge_cand) return;
  377. nb_merge_cand++;
  378. }
  379. // above left spatial merge candidate
  380. check_MER = 1;
  381. is_available_b2 = AVAILABLE(cand_up_left, B2);
  382. if (isDiffMER(s, xB2, yB2, x0, y0))
  383. is_available_b2 = 0;
  384. if (is_available_a1 && is_available_b2)
  385. check_MER = !COMPARE_MV_REFIDX(B2, A1);
  386. if (is_available_b1 && is_available_b2)
  387. check_MER_1 = !COMPARE_MV_REFIDX(B2, B1);
  388. if (is_available_b2 && check_MER && check_MER_1 && nb_merge_cand != 4) {
  389. mergecandlist[nb_merge_cand] = TAB_MVF_PU(B2);
  390. if (merge_idx == nb_merge_cand) return;
  391. nb_merge_cand++;
  392. }
  393. // temporal motion vector candidate
  394. if (s->sh.slice_temporal_mvp_enabled_flag &&
  395. nb_merge_cand < s->sh.max_num_merge_cand) {
  396. Mv mv_l0_col, mv_l1_col;
  397. int available_l0 = temporal_luma_motion_vector(s, x0, y0, nPbW, nPbH,
  398. 0, &mv_l0_col, 0);
  399. int available_l1 = (s->sh.slice_type == B_SLICE) ?
  400. temporal_luma_motion_vector(s, x0, y0, nPbW, nPbH,
  401. 0, &mv_l1_col, 1) : 0;
  402. if (available_l0 || available_l1) {
  403. mergecandlist[nb_merge_cand].pred_flag = available_l0 + (available_l1 << 1);
  404. if (available_l0) {
  405. mergecandlist[nb_merge_cand].mv[0] = mv_l0_col;
  406. mergecandlist[nb_merge_cand].ref_idx[0] = 0;
  407. }
  408. if (available_l1) {
  409. mergecandlist[nb_merge_cand].mv[1] = mv_l1_col;
  410. mergecandlist[nb_merge_cand].ref_idx[1] = 0;
  411. }
  412. if (merge_idx == nb_merge_cand) return;
  413. nb_merge_cand++;
  414. }
  415. }
  416. nb_orig_merge_cand = nb_merge_cand;
  417. // combined bi-predictive merge candidates (applies for B slices)
  418. if (s->sh.slice_type == B_SLICE && nb_orig_merge_cand > 1 &&
  419. nb_orig_merge_cand < s->sh.max_num_merge_cand) {
  420. int comb_idx = 0;
  421. for (comb_idx = 0; nb_merge_cand < s->sh.max_num_merge_cand &&
  422. comb_idx < nb_orig_merge_cand * (nb_orig_merge_cand - 1); comb_idx++) {
  423. int l0_cand_idx = l0_l1_cand_idx[comb_idx][0];
  424. int l1_cand_idx = l0_l1_cand_idx[comb_idx][1];
  425. MvField l0_cand = mergecandlist[l0_cand_idx];
  426. MvField l1_cand = mergecandlist[l1_cand_idx];
  427. if ((l0_cand.pred_flag & PF_L0) && (l1_cand.pred_flag & PF_L1) &&
  428. (refPicList[0].list[l0_cand.ref_idx[0]] !=
  429. refPicList[1].list[l1_cand.ref_idx[1]] ||
  430. AV_RN32A(&l0_cand.mv[0]) != AV_RN32A(&l1_cand.mv[1]))) {
  431. mergecandlist[nb_merge_cand].ref_idx[0] = l0_cand.ref_idx[0];
  432. mergecandlist[nb_merge_cand].ref_idx[1] = l1_cand.ref_idx[1];
  433. mergecandlist[nb_merge_cand].pred_flag = PF_BI;
  434. AV_COPY32(&mergecandlist[nb_merge_cand].mv[0], &l0_cand.mv[0]);
  435. AV_COPY32(&mergecandlist[nb_merge_cand].mv[1], &l1_cand.mv[1]);
  436. if (merge_idx == nb_merge_cand) return;
  437. nb_merge_cand++;
  438. }
  439. }
  440. }
  441. // append Zero motion vector candidates
  442. while (nb_merge_cand < s->sh.max_num_merge_cand) {
  443. mergecandlist[nb_merge_cand].pred_flag = PF_L0 + ((s->sh.slice_type == B_SLICE) << 1);
  444. AV_ZERO32(mergecandlist[nb_merge_cand].mv+0);
  445. AV_ZERO32(mergecandlist[nb_merge_cand].mv+1);
  446. mergecandlist[nb_merge_cand].ref_idx[0] = zero_idx < nb_refs ? zero_idx : 0;
  447. mergecandlist[nb_merge_cand].ref_idx[1] = zero_idx < nb_refs ? zero_idx : 0;
  448. if (merge_idx == nb_merge_cand) return;
  449. nb_merge_cand++;
  450. zero_idx++;
  451. }
  452. }
  453. /*
  454. * 8.5.3.1.1 Derivation process of luma Mvs for merge mode
  455. */
  456. void ff_hevc_luma_mv_merge_mode(HEVCContext *s, int x0, int y0, int nPbW,
  457. int nPbH, int log2_cb_size, int part_idx,
  458. int merge_idx, MvField *mv)
  459. {
  460. int singleMCLFlag = 0;
  461. int nCS = 1 << log2_cb_size;
  462. LOCAL_ALIGNED(4, MvField, mergecand_list, [MRG_MAX_NUM_CANDS]);
  463. int nPbW2 = nPbW;
  464. int nPbH2 = nPbH;
  465. HEVCLocalContext *lc = s->HEVClc;
  466. if (s->pps->log2_parallel_merge_level > 2 && nCS == 8) {
  467. singleMCLFlag = 1;
  468. x0 = lc->cu.x;
  469. y0 = lc->cu.y;
  470. nPbW = nCS;
  471. nPbH = nCS;
  472. part_idx = 0;
  473. }
  474. ff_hevc_set_neighbour_available(s, x0, y0, nPbW, nPbH);
  475. derive_spatial_merge_candidates(s, x0, y0, nPbW, nPbH, log2_cb_size,
  476. singleMCLFlag, part_idx,
  477. merge_idx, mergecand_list);
  478. if (mergecand_list[merge_idx].pred_flag == PF_BI &&
  479. (nPbW2 + nPbH2) == 12) {
  480. mergecand_list[merge_idx].pred_flag = PF_L0;
  481. }
  482. *mv = mergecand_list[merge_idx];
  483. }
  484. static av_always_inline void dist_scale(HEVCContext *s, Mv *mv,
  485. int min_pu_width, int x, int y,
  486. int elist, int ref_idx_curr, int ref_idx)
  487. {
  488. RefPicList *refPicList = s->ref->refPicList;
  489. MvField *tab_mvf = s->ref->tab_mvf;
  490. int ref_pic_elist = refPicList[elist].list[TAB_MVF(x, y).ref_idx[elist]];
  491. int ref_pic_curr = refPicList[ref_idx_curr].list[ref_idx];
  492. if (ref_pic_elist != ref_pic_curr) {
  493. int poc_diff = s->poc - ref_pic_elist;
  494. if (!poc_diff)
  495. poc_diff = 1;
  496. mv_scale(mv, mv, poc_diff, s->poc - ref_pic_curr);
  497. }
  498. }
  499. static int mv_mp_mode_mx(HEVCContext *s, int x, int y, int pred_flag_index,
  500. Mv *mv, int ref_idx_curr, int ref_idx)
  501. {
  502. MvField *tab_mvf = s->ref->tab_mvf;
  503. int min_pu_width = s->sps->min_pu_width;
  504. RefPicList *refPicList = s->ref->refPicList;
  505. if (((TAB_MVF(x, y).pred_flag) & (1 << pred_flag_index)) &&
  506. refPicList[pred_flag_index].list[TAB_MVF(x, y).ref_idx[pred_flag_index]] == refPicList[ref_idx_curr].list[ref_idx]) {
  507. *mv = TAB_MVF(x, y).mv[pred_flag_index];
  508. return 1;
  509. }
  510. return 0;
  511. }
  512. static int mv_mp_mode_mx_lt(HEVCContext *s, int x, int y, int pred_flag_index,
  513. Mv *mv, int ref_idx_curr, int ref_idx)
  514. {
  515. MvField *tab_mvf = s->ref->tab_mvf;
  516. int min_pu_width = s->sps->min_pu_width;
  517. RefPicList *refPicList = s->ref->refPicList;
  518. if ((TAB_MVF(x, y).pred_flag) & (1 << pred_flag_index)) {
  519. int currIsLongTerm = refPicList[ref_idx_curr].isLongTerm[ref_idx];
  520. int colIsLongTerm =
  521. refPicList[pred_flag_index].isLongTerm[(TAB_MVF(x, y).ref_idx[pred_flag_index])];
  522. if (colIsLongTerm == currIsLongTerm) {
  523. *mv = TAB_MVF(x, y).mv[pred_flag_index];
  524. if (!currIsLongTerm)
  525. dist_scale(s, mv, min_pu_width, x, y,
  526. pred_flag_index, ref_idx_curr, ref_idx);
  527. return 1;
  528. }
  529. }
  530. return 0;
  531. }
  532. #define MP_MX(v, pred, mx) \
  533. mv_mp_mode_mx(s, x ## v ## _pu, y ## v ## _pu, pred, \
  534. &mx, ref_idx_curr, ref_idx)
  535. #define MP_MX_LT(v, pred, mx) \
  536. mv_mp_mode_mx_lt(s, x ## v ## _pu, y ## v ## _pu, pred, \
  537. &mx, ref_idx_curr, ref_idx)
  538. void ff_hevc_luma_mv_mvp_mode(HEVCContext *s, int x0, int y0, int nPbW,
  539. int nPbH, int log2_cb_size, int part_idx,
  540. int merge_idx, MvField *mv,
  541. int mvp_lx_flag, int LX)
  542. {
  543. HEVCLocalContext *lc = s->HEVClc;
  544. MvField *tab_mvf = s->ref->tab_mvf;
  545. int isScaledFlag_L0 = 0;
  546. int availableFlagLXA0 = 0;
  547. int availableFlagLXB0 = 0;
  548. int numMVPCandLX = 0;
  549. int min_pu_width = s->sps->min_pu_width;
  550. int xA0, yA0;
  551. int xA0_pu, yA0_pu;
  552. int is_available_a0;
  553. int xA1, yA1;
  554. int xA1_pu, yA1_pu;
  555. int is_available_a1;
  556. int xB0, yB0;
  557. int xB0_pu, yB0_pu;
  558. int is_available_b0;
  559. int xB1, yB1;
  560. int xB1_pu = 0, yB1_pu = 0;
  561. int is_available_b1 = 0;
  562. int xB2, yB2;
  563. int xB2_pu = 0, yB2_pu = 0;
  564. int is_available_b2 = 0;
  565. Mv mvpcand_list[2] = { { 0 } };
  566. Mv mxA;
  567. Mv mxB;
  568. int ref_idx_curr = 0;
  569. int ref_idx = 0;
  570. int pred_flag_index_l0;
  571. int pred_flag_index_l1;
  572. int x0b = x0 & ((1 << s->sps->log2_ctb_size) - 1);
  573. int y0b = y0 & ((1 << s->sps->log2_ctb_size) - 1);
  574. int cand_up = (lc->ctb_up_flag || y0b);
  575. int cand_left = (lc->ctb_left_flag || x0b);
  576. int cand_up_left =
  577. (!x0b && !y0b) ? lc->ctb_up_left_flag : cand_left && cand_up;
  578. int cand_up_right =
  579. (x0b + nPbW == (1 << s->sps->log2_ctb_size) ||
  580. x0 + nPbW >= lc->end_of_tiles_x) ? lc->ctb_up_right_flag && !y0b
  581. : cand_up;
  582. int cand_bottom_left = (y0 + nPbH >= lc->end_of_tiles_y) ? 0 : cand_left;
  583. ref_idx_curr = LX;
  584. ref_idx = mv->ref_idx[LX];
  585. pred_flag_index_l0 = LX;
  586. pred_flag_index_l1 = !LX;
  587. // left bottom spatial candidate
  588. xA0 = x0 - 1;
  589. yA0 = y0 + nPbH;
  590. xA0_pu = xA0 >> s->sps->log2_min_pu_size;
  591. yA0_pu = yA0 >> s->sps->log2_min_pu_size;
  592. is_available_a0 = PRED_BLOCK_AVAILABLE(A0) && AVAILABLE(cand_bottom_left, A0);
  593. //left spatial merge candidate
  594. xA1 = x0 - 1;
  595. yA1 = y0 + nPbH - 1;
  596. xA1_pu = xA1 >> s->sps->log2_min_pu_size;
  597. yA1_pu = yA1 >> s->sps->log2_min_pu_size;
  598. is_available_a1 = AVAILABLE(cand_left, A1);
  599. if (is_available_a0 || is_available_a1)
  600. isScaledFlag_L0 = 1;
  601. if (is_available_a0) {
  602. availableFlagLXA0 = MP_MX(A0, pred_flag_index_l0, mxA);
  603. if (!availableFlagLXA0)
  604. availableFlagLXA0 = MP_MX(A0, pred_flag_index_l1, mxA);
  605. }
  606. if (is_available_a1 && !availableFlagLXA0) {
  607. availableFlagLXA0 = MP_MX(A1, pred_flag_index_l0, mxA);
  608. if (!availableFlagLXA0)
  609. availableFlagLXA0 = MP_MX(A1, pred_flag_index_l1, mxA);
  610. }
  611. if (is_available_a0 && !availableFlagLXA0) {
  612. availableFlagLXA0 = MP_MX_LT(A0, pred_flag_index_l0, mxA);
  613. if (!availableFlagLXA0)
  614. availableFlagLXA0 = MP_MX_LT(A0, pred_flag_index_l1, mxA);
  615. }
  616. if (is_available_a1 && !availableFlagLXA0) {
  617. availableFlagLXA0 = MP_MX_LT(A1, pred_flag_index_l0, mxA);
  618. if (!availableFlagLXA0)
  619. availableFlagLXA0 = MP_MX_LT(A1, pred_flag_index_l1, mxA);
  620. }
  621. if(availableFlagLXA0 && !mvp_lx_flag) {
  622. mv->mv[LX] = mxA;
  623. return;
  624. }
  625. // B candidates
  626. // above right spatial merge candidate
  627. xB0 = x0 + nPbW;
  628. yB0 = y0 - 1;
  629. xB0_pu = xB0 >> s->sps->log2_min_pu_size;
  630. yB0_pu = yB0 >> s->sps->log2_min_pu_size;
  631. is_available_b0 = PRED_BLOCK_AVAILABLE(B0) && AVAILABLE(cand_up_right, B0);
  632. if (is_available_b0) {
  633. availableFlagLXB0 = MP_MX(B0, pred_flag_index_l0, mxB);
  634. if (!availableFlagLXB0)
  635. availableFlagLXB0 = MP_MX(B0, pred_flag_index_l1, mxB);
  636. }
  637. if (!availableFlagLXB0) {
  638. // above spatial merge candidate
  639. xB1 = x0 + nPbW - 1;
  640. yB1 = y0 - 1;
  641. xB1_pu = xB1 >> s->sps->log2_min_pu_size;
  642. yB1_pu = yB1 >> s->sps->log2_min_pu_size;
  643. is_available_b1 = AVAILABLE(cand_up, B1);
  644. if (is_available_b1) {
  645. availableFlagLXB0 = MP_MX(B1, pred_flag_index_l0, mxB);
  646. if (!availableFlagLXB0)
  647. availableFlagLXB0 = MP_MX(B1, pred_flag_index_l1, mxB);
  648. }
  649. }
  650. if (!availableFlagLXB0) {
  651. // above left spatial merge candidate
  652. xB2 = x0 - 1;
  653. yB2 = y0 - 1;
  654. xB2_pu = xB2 >> s->sps->log2_min_pu_size;
  655. yB2_pu = yB2 >> s->sps->log2_min_pu_size;
  656. is_available_b2 = AVAILABLE(cand_up_left, B2);
  657. if (is_available_b2) {
  658. availableFlagLXB0 = MP_MX(B2, pred_flag_index_l0, mxB);
  659. if (!availableFlagLXB0)
  660. availableFlagLXB0 = MP_MX(B2, pred_flag_index_l1, mxB);
  661. }
  662. }
  663. if (isScaledFlag_L0 == 0) {
  664. if (availableFlagLXB0) {
  665. availableFlagLXA0 = 1;
  666. mxA = mxB;
  667. }
  668. availableFlagLXB0 = 0;
  669. // XB0 and L1
  670. if (is_available_b0) {
  671. availableFlagLXB0 = MP_MX_LT(B0, pred_flag_index_l0, mxB);
  672. if (!availableFlagLXB0)
  673. availableFlagLXB0 = MP_MX_LT(B0, pred_flag_index_l1, mxB);
  674. }
  675. if (is_available_b1 && !availableFlagLXB0) {
  676. availableFlagLXB0 = MP_MX_LT(B1, pred_flag_index_l0, mxB);
  677. if (!availableFlagLXB0)
  678. availableFlagLXB0 = MP_MX_LT(B1, pred_flag_index_l1, mxB);
  679. }
  680. if (is_available_b2 && !availableFlagLXB0) {
  681. availableFlagLXB0 = MP_MX_LT(B2, pred_flag_index_l0, mxB);
  682. if (!availableFlagLXB0)
  683. availableFlagLXB0 = MP_MX_LT(B2, pred_flag_index_l1, mxB);
  684. }
  685. }
  686. if (availableFlagLXA0)
  687. mvpcand_list[numMVPCandLX++] = mxA;
  688. if (availableFlagLXB0 && (!availableFlagLXA0 || mxA.x != mxB.x || mxA.y != mxB.y))
  689. mvpcand_list[numMVPCandLX++] = mxB;
  690. //temporal motion vector prediction candidate
  691. if (numMVPCandLX < 2 && s->sh.slice_temporal_mvp_enabled_flag) {
  692. Mv mv_col;
  693. int available_col = temporal_luma_motion_vector(s, x0, y0, nPbW,
  694. nPbH, ref_idx,
  695. &mv_col, LX);
  696. if (available_col)
  697. mvpcand_list[numMVPCandLX++] = mv_col;
  698. }
  699. mv->mv[LX] = mvpcand_list[mvp_lx_flag];
  700. }