You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

821 lines
28KB

  1. /*
  2. * HEVC video decoder
  3. *
  4. * Copyright (C) 2012 - 2013 Guillaume Martres
  5. * Copyright (C) 2013 Anand Meher Kotra
  6. *
  7. * This file is part of Libav.
  8. *
  9. * Libav is free software; you can redistribute it and/or
  10. * modify it under the terms of the GNU Lesser General Public
  11. * License as published by the Free Software Foundation; either
  12. * version 2.1 of the License, or (at your option) any later version.
  13. *
  14. * Libav is distributed in the hope that it will be useful,
  15. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  16. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  17. * Lesser General Public License for more details.
  18. *
  19. * You should have received a copy of the GNU Lesser General Public
  20. * License along with Libav; if not, write to the Free Software
  21. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  22. */
  23. #include "hevc.h"
  24. static const uint8_t l0_l1_cand_idx[12][2] = {
  25. { 0, 1, },
  26. { 1, 0, },
  27. { 0, 2, },
  28. { 2, 0, },
  29. { 1, 2, },
  30. { 2, 1, },
  31. { 0, 3, },
  32. { 3, 0, },
  33. { 1, 3, },
  34. { 3, 1, },
  35. { 2, 3, },
  36. { 3, 2, },
  37. };
  38. void ff_hevc_set_neighbour_available(HEVCContext *s, int x0, int y0,
  39. int nPbW, int nPbH)
  40. {
  41. HEVCLocalContext *lc = &s->HEVClc;
  42. int x0b = x0 & ((1 << s->sps->log2_ctb_size) - 1);
  43. int y0b = y0 & ((1 << s->sps->log2_ctb_size) - 1);
  44. lc->na.cand_up = (lc->ctb_up_flag || y0b);
  45. lc->na.cand_left = (lc->ctb_left_flag || x0b);
  46. lc->na.cand_up_left = (!x0b && !y0b) ? lc->ctb_up_left_flag : lc->na.cand_left && lc->na.cand_up;
  47. lc->na.cand_up_right_sap =
  48. ((x0b + nPbW) == (1 << s->sps->log2_ctb_size)) ?
  49. lc->ctb_up_right_flag && !y0b : lc->na.cand_up;
  50. lc->na.cand_up_right =
  51. ((x0b + nPbW) == (1 << s->sps->log2_ctb_size) ?
  52. lc->ctb_up_right_flag && !y0b : lc->na.cand_up )
  53. && (x0 + nPbW) < lc->end_of_tiles_x;
  54. lc->na.cand_bottom_left = ((y0 + nPbH) >= lc->end_of_tiles_y) ? 0 : lc->na.cand_left;
  55. }
  56. /*
  57. * 6.4.1 Derivation process for z-scan order block availability
  58. */
  59. static int z_scan_block_avail(HEVCContext *s, int xCurr, int yCurr,
  60. int xN, int yN)
  61. {
  62. #define MIN_TB_ADDR_ZS(x, y) \
  63. s->pps->min_tb_addr_zs[(y) * s->sps->min_tb_width + (x)]
  64. int Curr = MIN_TB_ADDR_ZS(xCurr >> s->sps->log2_min_tb_size,
  65. yCurr >> s->sps->log2_min_tb_size);
  66. int N;
  67. if (xN < 0 || yN < 0 ||
  68. xN >= s->sps->width ||
  69. yN >= s->sps->height)
  70. return 0;
  71. N = MIN_TB_ADDR_ZS(xN >> s->sps->log2_min_tb_size,
  72. yN >> s->sps->log2_min_tb_size);
  73. return N <= Curr;
  74. }
  75. static int same_prediction_block(HEVCLocalContext *lc, int log2_cb_size,
  76. int x0, int y0, int nPbW, int nPbH,
  77. int xA1, int yA1, int partIdx)
  78. {
  79. return !(nPbW << 1 == 1 << log2_cb_size &&
  80. nPbH << 1 == 1 << log2_cb_size && partIdx == 1 &&
  81. lc->cu.x + nPbW > xA1 &&
  82. lc->cu.y + nPbH <= yA1);
  83. }
  84. /*
  85. * 6.4.2 Derivation process for prediction block availability
  86. */
  87. static int check_prediction_block_available(HEVCContext *s, int log2_cb_size,
  88. int x0, int y0, int nPbW, int nPbH,
  89. int xA1, int yA1, int partIdx)
  90. {
  91. HEVCLocalContext *lc = &s->HEVClc;
  92. if (lc->cu.x < xA1 && lc->cu.y < yA1 &&
  93. (lc->cu.x + (1 << log2_cb_size)) > xA1 &&
  94. (lc->cu.y + (1 << log2_cb_size)) > yA1)
  95. return same_prediction_block(lc, log2_cb_size, x0, y0,
  96. nPbW, nPbH, xA1, yA1, partIdx);
  97. else
  98. return z_scan_block_avail(s, x0, y0, xA1, yA1);
  99. }
  100. //check if the two luma locations belong to the same mostion estimation region
  101. static int isDiffMER(HEVCContext *s, int xN, int yN, int xP, int yP)
  102. {
  103. uint8_t plevel = s->pps->log2_parallel_merge_level;
  104. return xN >> plevel == xP >> plevel &&
  105. yN >> plevel == yP >> plevel;
  106. }
  107. #define MATCH(x) (A.x == B.x)
  108. // check if the mv's and refidx are the same between A and B
  109. static int compareMVrefidx(struct MvField A, struct MvField B)
  110. {
  111. if (A.pred_flag[0] && A.pred_flag[1] && B.pred_flag[0] && B.pred_flag[1])
  112. return MATCH(ref_idx[0]) && MATCH(mv[0].x) && MATCH(mv[0].y) &&
  113. MATCH(ref_idx[1]) && MATCH(mv[1].x) && MATCH(mv[1].y);
  114. if (A.pred_flag[0] && !A.pred_flag[1] && B.pred_flag[0] && !B.pred_flag[1])
  115. return MATCH(ref_idx[0]) && MATCH(mv[0].x) && MATCH(mv[0].y);
  116. if (!A.pred_flag[0] && A.pred_flag[1] && !B.pred_flag[0] && B.pred_flag[1])
  117. return MATCH(ref_idx[1]) && MATCH(mv[1].x) && MATCH(mv[1].y);
  118. return 0;
  119. }
  120. static av_always_inline void mv_scale(Mv *dst, Mv *src, int td, int tb)
  121. {
  122. int tx, scale_factor;
  123. td = av_clip_int8_c(td);
  124. tb = av_clip_int8_c(tb);
  125. tx = (0x4000 + abs(td / 2)) / td;
  126. scale_factor = av_clip_c((tb * tx + 32) >> 6, -4096, 4095);
  127. dst->x = av_clip_int16_c((scale_factor * src->x + 127 +
  128. (scale_factor * src->x < 0)) >> 8);
  129. dst->y = av_clip_int16_c((scale_factor * src->y + 127 +
  130. (scale_factor * src->y < 0)) >> 8);
  131. }
  132. static int check_mvset(Mv *mvLXCol, Mv *mvCol,
  133. int colPic, int poc,
  134. RefPicList *refPicList, int X, int refIdxLx,
  135. RefPicList *refPicList_col, int listCol, int refidxCol)
  136. {
  137. int cur_lt = refPicList[X].isLongTerm[refIdxLx];
  138. int col_lt = refPicList_col[listCol].isLongTerm[refidxCol];
  139. int col_poc_diff, cur_poc_diff;
  140. if (cur_lt != col_lt) {
  141. mvLXCol->x = 0;
  142. mvLXCol->y = 0;
  143. return 0;
  144. }
  145. col_poc_diff = colPic - refPicList_col[listCol].list[refidxCol];
  146. cur_poc_diff = poc - refPicList[X].list[refIdxLx];
  147. if (!col_poc_diff)
  148. col_poc_diff = 1; // error resilience
  149. if (cur_lt || col_poc_diff == cur_poc_diff) {
  150. mvLXCol->x = mvCol->x;
  151. mvLXCol->y = mvCol->y;
  152. } else {
  153. mv_scale(mvLXCol, mvCol, col_poc_diff, cur_poc_diff);
  154. }
  155. return 1;
  156. }
  157. #define CHECK_MVSET(l) \
  158. check_mvset(mvLXCol, temp_col.mv + l, \
  159. colPic, s->poc, \
  160. refPicList, X, refIdxLx, \
  161. refPicList_col, L ## l, temp_col.ref_idx[l])
  162. // derive the motion vectors section 8.5.3.1.8
  163. static int derive_temporal_colocated_mvs(HEVCContext *s, MvField temp_col,
  164. int refIdxLx, Mv *mvLXCol, int X,
  165. int colPic, RefPicList *refPicList_col)
  166. {
  167. RefPicList *refPicList = s->ref->refPicList;
  168. if (temp_col.is_intra) {
  169. mvLXCol->x = 0;
  170. mvLXCol->y = 0;
  171. return 0;
  172. }
  173. if (temp_col.pred_flag[0] == 0)
  174. return CHECK_MVSET(1);
  175. else if (temp_col.pred_flag[0] == 1 && temp_col.pred_flag[1] == 0)
  176. return CHECK_MVSET(0);
  177. else if (temp_col.pred_flag[0] == 1 && temp_col.pred_flag[1] == 1) {
  178. int check_diffpicount = 0;
  179. int i = 0;
  180. for (i = 0; i < refPicList[0].nb_refs; i++) {
  181. if (refPicList[0].list[i] > s->poc)
  182. check_diffpicount++;
  183. }
  184. for (i = 0; i < refPicList[1].nb_refs; i++) {
  185. if (refPicList[1].list[i] > s->poc)
  186. check_diffpicount++;
  187. }
  188. if (check_diffpicount == 0 && X == 0)
  189. return CHECK_MVSET(0);
  190. else if (check_diffpicount == 0 && X == 1)
  191. return CHECK_MVSET(1);
  192. else {
  193. if (s->sh.collocated_list == L1)
  194. return CHECK_MVSET(0);
  195. else
  196. return CHECK_MVSET(1);
  197. }
  198. }
  199. return 0;
  200. }
  201. #define TAB_MVF(x, y) \
  202. tab_mvf[(y) * min_pu_width + x]
  203. #define TAB_MVF_PU(v) \
  204. TAB_MVF(x ## v ## _pu, y ## v ## _pu)
  205. #define DERIVE_TEMPORAL_COLOCATED_MVS \
  206. derive_temporal_colocated_mvs(s, temp_col, \
  207. refIdxLx, mvLXCol, X, colPic, \
  208. ff_hevc_get_ref_list(s, ref, x, y))
  209. /*
  210. * 8.5.3.1.7 temporal luma motion vector prediction
  211. */
  212. static int temporal_luma_motion_vector(HEVCContext *s, int x0, int y0,
  213. int nPbW, int nPbH, int refIdxLx,
  214. Mv *mvLXCol, int X)
  215. {
  216. MvField *tab_mvf;
  217. MvField temp_col;
  218. int x, y, x_pu, y_pu;
  219. int min_pu_width = s->sps->min_pu_width;
  220. int availableFlagLXCol = 0;
  221. int colPic;
  222. HEVCFrame *ref = s->ref->collocated_ref;
  223. if (!ref)
  224. return 0;
  225. tab_mvf = ref->tab_mvf;
  226. colPic = ref->poc;
  227. //bottom right collocated motion vector
  228. x = x0 + nPbW;
  229. y = y0 + nPbH;
  230. ff_thread_await_progress(&ref->tf, y, 0);
  231. if (tab_mvf &&
  232. (y0 >> s->sps->log2_ctb_size) == (y >> s->sps->log2_ctb_size) &&
  233. y < s->sps->height &&
  234. x < s->sps->width) {
  235. x = ((x >> 4) << 4);
  236. y = ((y >> 4) << 4);
  237. x_pu = x >> s->sps->log2_min_pu_size;
  238. y_pu = y >> s->sps->log2_min_pu_size;
  239. temp_col = TAB_MVF(x_pu, y_pu);
  240. availableFlagLXCol = DERIVE_TEMPORAL_COLOCATED_MVS;
  241. }
  242. // derive center collocated motion vector
  243. if (tab_mvf && !availableFlagLXCol) {
  244. x = x0 + (nPbW >> 1);
  245. y = y0 + (nPbH >> 1);
  246. x = ((x >> 4) << 4);
  247. y = ((y >> 4) << 4);
  248. x_pu = x >> s->sps->log2_min_pu_size;
  249. y_pu = y >> s->sps->log2_min_pu_size;
  250. temp_col = TAB_MVF(x_pu, y_pu);
  251. availableFlagLXCol = DERIVE_TEMPORAL_COLOCATED_MVS;
  252. }
  253. return availableFlagLXCol;
  254. }
  255. #define AVAILABLE(cand, v) \
  256. (cand && !TAB_MVF_PU(v).is_intra)
  257. #define PRED_BLOCK_AVAILABLE(v) \
  258. check_prediction_block_available(s, log2_cb_size, \
  259. x0, y0, nPbW, nPbH, \
  260. x ## v, y ## v, part_idx)
  261. #define COMPARE_MV_REFIDX(a, b) \
  262. compareMVrefidx(TAB_MVF_PU(a), TAB_MVF_PU(b))
  263. /*
  264. * 8.5.3.1.2 Derivation process for spatial merging candidates
  265. */
  266. static void derive_spatial_merge_candidates(HEVCContext *s, int x0, int y0,
  267. int nPbW, int nPbH,
  268. int log2_cb_size,
  269. int singleMCLFlag, int part_idx,
  270. struct MvField mergecandlist[])
  271. {
  272. HEVCLocalContext *lc = &s->HEVClc;
  273. RefPicList *refPicList = s->ref->refPicList;
  274. MvField *tab_mvf = s->ref->tab_mvf;
  275. const int min_pu_width = s->sps->min_pu_width;
  276. const int cand_bottom_left = lc->na.cand_bottom_left;
  277. const int cand_left = lc->na.cand_left;
  278. const int cand_up_left = lc->na.cand_up_left;
  279. const int cand_up = lc->na.cand_up;
  280. const int cand_up_right = lc->na.cand_up_right_sap;
  281. const int xA1 = x0 - 1;
  282. const int yA1 = y0 + nPbH - 1;
  283. const int xA1_pu = xA1 >> s->sps->log2_min_pu_size;
  284. const int yA1_pu = yA1 >> s->sps->log2_min_pu_size;
  285. const int xB1 = x0 + nPbW - 1;
  286. const int yB1 = y0 - 1;
  287. const int xB1_pu = xB1 >> s->sps->log2_min_pu_size;
  288. const int yB1_pu = yB1 >> s->sps->log2_min_pu_size;
  289. const int xB0 = x0 + nPbW;
  290. const int yB0 = y0 - 1;
  291. const int xB0_pu = xB0 >> s->sps->log2_min_pu_size;
  292. const int yB0_pu = yB0 >> s->sps->log2_min_pu_size;
  293. const int xA0 = x0 - 1;
  294. const int yA0 = y0 + nPbH;
  295. const int xA0_pu = xA0 >> s->sps->log2_min_pu_size;
  296. const int yA0_pu = yA0 >> s->sps->log2_min_pu_size;
  297. const int xB2 = x0 - 1;
  298. const int yB2 = y0 - 1;
  299. const int xB2_pu = xB2 >> s->sps->log2_min_pu_size;
  300. const int yB2_pu = yB2 >> s->sps->log2_min_pu_size;
  301. const int nb_refs = (s->sh.slice_type == P_SLICE) ?
  302. s->sh.nb_refs[0] : FFMIN(s->sh.nb_refs[0], s->sh.nb_refs[1]);
  303. int check_MER = 1;
  304. int check_MER_1 = 1;
  305. int zero_idx = 0;
  306. int nb_merge_cand = 0;
  307. int nb_orig_merge_cand = 0;
  308. int is_available_a0;
  309. int is_available_a1;
  310. int is_available_b0;
  311. int is_available_b1;
  312. int is_available_b2;
  313. int check_B0;
  314. int check_A0;
  315. //first left spatial merge candidate
  316. is_available_a1 = AVAILABLE(cand_left, A1);
  317. if (!singleMCLFlag && part_idx == 1 &&
  318. (lc->cu.part_mode == PART_Nx2N ||
  319. lc->cu.part_mode == PART_nLx2N ||
  320. lc->cu.part_mode == PART_nRx2N) ||
  321. isDiffMER(s, xA1, yA1, x0, y0)) {
  322. is_available_a1 = 0;
  323. }
  324. if (is_available_a1)
  325. mergecandlist[nb_merge_cand++] = TAB_MVF_PU(A1);
  326. // above spatial merge candidate
  327. is_available_b1 = AVAILABLE(cand_up, B1);
  328. if (!singleMCLFlag && part_idx == 1 &&
  329. (lc->cu.part_mode == PART_2NxN ||
  330. lc->cu.part_mode == PART_2NxnU ||
  331. lc->cu.part_mode == PART_2NxnD) ||
  332. isDiffMER(s, xB1, yB1, x0, y0)) {
  333. is_available_b1 = 0;
  334. }
  335. if (is_available_a1 && is_available_b1)
  336. check_MER = !COMPARE_MV_REFIDX(B1, A1);
  337. if (is_available_b1 && check_MER)
  338. mergecandlist[nb_merge_cand++] = TAB_MVF_PU(B1);
  339. // above right spatial merge candidate
  340. check_MER = 1;
  341. check_B0 = PRED_BLOCK_AVAILABLE(B0);
  342. is_available_b0 = check_B0 && AVAILABLE(cand_up_right, B0);
  343. if (isDiffMER(s, xB0, yB0, x0, y0))
  344. is_available_b0 = 0;
  345. if (is_available_b1 && is_available_b0)
  346. check_MER = !COMPARE_MV_REFIDX(B0, B1);
  347. if (is_available_b0 && check_MER)
  348. mergecandlist[nb_merge_cand++] = TAB_MVF_PU(B0);
  349. // left bottom spatial merge candidate
  350. check_MER = 1;
  351. check_A0 = PRED_BLOCK_AVAILABLE(A0);
  352. is_available_a0 = check_A0 && AVAILABLE(cand_bottom_left, A0);
  353. if (isDiffMER(s, xA0, yA0, x0, y0))
  354. is_available_a0 = 0;
  355. if (is_available_a1 && is_available_a0)
  356. check_MER = !COMPARE_MV_REFIDX(A0, A1);
  357. if (is_available_a0 && check_MER)
  358. mergecandlist[nb_merge_cand++] = TAB_MVF_PU(A0);
  359. // above left spatial merge candidate
  360. check_MER = 1;
  361. is_available_b2 = AVAILABLE(cand_up_left, B2);
  362. if (isDiffMER(s, xB2, yB2, x0, y0))
  363. is_available_b2 = 0;
  364. if (is_available_a1 && is_available_b2)
  365. check_MER = !COMPARE_MV_REFIDX(B2, A1);
  366. if (is_available_b1 && is_available_b2)
  367. check_MER_1 = !COMPARE_MV_REFIDX(B2, B1);
  368. if (is_available_b2 && check_MER && check_MER_1 && nb_merge_cand != 4)
  369. mergecandlist[nb_merge_cand++] = TAB_MVF_PU(B2);
  370. // temporal motion vector candidate
  371. if (s->sh.slice_temporal_mvp_enabled_flag &&
  372. nb_merge_cand < s->sh.max_num_merge_cand) {
  373. Mv mv_l0_col, mv_l1_col;
  374. int available_l0 = temporal_luma_motion_vector(s, x0, y0, nPbW, nPbH,
  375. 0, &mv_l0_col, 0);
  376. int available_l1 = (s->sh.slice_type == B_SLICE) ?
  377. temporal_luma_motion_vector(s, x0, y0, nPbW, nPbH,
  378. 0, &mv_l1_col, 1) : 0;
  379. if (available_l0 || available_l1) {
  380. mergecandlist[nb_merge_cand].is_intra = 0;
  381. mergecandlist[nb_merge_cand].pred_flag[0] = available_l0;
  382. mergecandlist[nb_merge_cand].pred_flag[1] = available_l1;
  383. if (available_l0) {
  384. mergecandlist[nb_merge_cand].mv[0] = mv_l0_col;
  385. mergecandlist[nb_merge_cand].ref_idx[0] = 0;
  386. }
  387. if (available_l1) {
  388. mergecandlist[nb_merge_cand].mv[1] = mv_l1_col;
  389. mergecandlist[nb_merge_cand].ref_idx[1] = 0;
  390. }
  391. nb_merge_cand++;
  392. }
  393. }
  394. nb_orig_merge_cand = nb_merge_cand;
  395. // combined bi-predictive merge candidates (applies for B slices)
  396. if (s->sh.slice_type == B_SLICE && nb_orig_merge_cand > 1 &&
  397. nb_orig_merge_cand < s->sh.max_num_merge_cand) {
  398. int comb_idx;
  399. for (comb_idx = 0; nb_merge_cand < s->sh.max_num_merge_cand &&
  400. comb_idx < nb_orig_merge_cand * (nb_orig_merge_cand - 1); comb_idx++) {
  401. int l0_cand_idx = l0_l1_cand_idx[comb_idx][0];
  402. int l1_cand_idx = l0_l1_cand_idx[comb_idx][1];
  403. MvField l0_cand = mergecandlist[l0_cand_idx];
  404. MvField l1_cand = mergecandlist[l1_cand_idx];
  405. if (l0_cand.pred_flag[0] && l1_cand.pred_flag[1] &&
  406. (refPicList[0].list[l0_cand.ref_idx[0]] !=
  407. refPicList[1].list[l1_cand.ref_idx[1]] ||
  408. l0_cand.mv[0].x != l1_cand.mv[1].x ||
  409. l0_cand.mv[0].y != l1_cand.mv[1].y)) {
  410. mergecandlist[nb_merge_cand].ref_idx[0] = l0_cand.ref_idx[0];
  411. mergecandlist[nb_merge_cand].ref_idx[1] = l1_cand.ref_idx[1];
  412. mergecandlist[nb_merge_cand].pred_flag[0] = 1;
  413. mergecandlist[nb_merge_cand].pred_flag[1] = 1;
  414. mergecandlist[nb_merge_cand].mv[0].x = l0_cand.mv[0].x;
  415. mergecandlist[nb_merge_cand].mv[0].y = l0_cand.mv[0].y;
  416. mergecandlist[nb_merge_cand].mv[1].x = l1_cand.mv[1].x;
  417. mergecandlist[nb_merge_cand].mv[1].y = l1_cand.mv[1].y;
  418. mergecandlist[nb_merge_cand].is_intra = 0;
  419. nb_merge_cand++;
  420. }
  421. }
  422. }
  423. // append Zero motion vector candidates
  424. while (nb_merge_cand < s->sh.max_num_merge_cand) {
  425. mergecandlist[nb_merge_cand].pred_flag[0] = 1;
  426. mergecandlist[nb_merge_cand].pred_flag[1] = s->sh.slice_type == B_SLICE;
  427. mergecandlist[nb_merge_cand].mv[0].x = 0;
  428. mergecandlist[nb_merge_cand].mv[0].y = 0;
  429. mergecandlist[nb_merge_cand].mv[1].x = 0;
  430. mergecandlist[nb_merge_cand].mv[1].y = 0;
  431. mergecandlist[nb_merge_cand].is_intra = 0;
  432. mergecandlist[nb_merge_cand].ref_idx[0] = zero_idx < nb_refs ? zero_idx : 0;
  433. mergecandlist[nb_merge_cand].ref_idx[1] = zero_idx < nb_refs ? zero_idx : 0;
  434. nb_merge_cand++;
  435. zero_idx++;
  436. }
  437. }
  438. /*
  439. * 8.5.3.1.1 Derivation process of luma Mvs for merge mode
  440. */
  441. void ff_hevc_luma_mv_merge_mode(HEVCContext *s, int x0, int y0, int nPbW,
  442. int nPbH, int log2_cb_size, int part_idx,
  443. int merge_idx, MvField *mv)
  444. {
  445. int singleMCLFlag = 0;
  446. int nCS = 1 << log2_cb_size;
  447. struct MvField mergecand_list[MRG_MAX_NUM_CANDS] = { { { { 0 } } } };
  448. int nPbW2 = nPbW;
  449. int nPbH2 = nPbH;
  450. HEVCLocalContext *lc = &s->HEVClc;
  451. if (s->pps->log2_parallel_merge_level > 2 && nCS == 8) {
  452. singleMCLFlag = 1;
  453. x0 = lc->cu.x;
  454. y0 = lc->cu.y;
  455. nPbW = nCS;
  456. nPbH = nCS;
  457. part_idx = 0;
  458. }
  459. ff_hevc_set_neighbour_available(s, x0, y0, nPbW, nPbH);
  460. derive_spatial_merge_candidates(s, x0, y0, nPbW, nPbH, log2_cb_size,
  461. singleMCLFlag, part_idx, mergecand_list);
  462. if (mergecand_list[merge_idx].pred_flag[0] == 1 &&
  463. mergecand_list[merge_idx].pred_flag[1] == 1 &&
  464. (nPbW2 + nPbH2) == 12) {
  465. mergecand_list[merge_idx].ref_idx[1] = -1;
  466. mergecand_list[merge_idx].pred_flag[1] = 0;
  467. }
  468. *mv = mergecand_list[merge_idx];
  469. }
  470. static av_always_inline void dist_scale(HEVCContext *s, Mv *mv,
  471. int min_pu_width, int x, int y,
  472. int elist, int ref_idx_curr, int ref_idx)
  473. {
  474. RefPicList *refPicList = s->ref->refPicList;
  475. MvField *tab_mvf = s->ref->tab_mvf;
  476. int ref_pic_elist = refPicList[elist].list[TAB_MVF(x, y).ref_idx[elist]];
  477. int ref_pic_curr = refPicList[ref_idx_curr].list[ref_idx];
  478. if (ref_pic_elist != ref_pic_curr) {
  479. int poc_diff = s->poc - ref_pic_elist;
  480. if (!poc_diff)
  481. poc_diff = 1;
  482. mv_scale(mv, mv, poc_diff, s->poc - ref_pic_curr);
  483. }
  484. }
  485. static int mv_mp_mode_mx(HEVCContext *s, int x, int y, int pred_flag_index,
  486. Mv *mv, int ref_idx_curr, int ref_idx)
  487. {
  488. MvField *tab_mvf = s->ref->tab_mvf;
  489. int min_pu_width = s->sps->min_pu_width;
  490. RefPicList *refPicList = s->ref->refPicList;
  491. if (TAB_MVF(x, y).pred_flag[pred_flag_index] == 1 &&
  492. refPicList[pred_flag_index].list[TAB_MVF(x, y).ref_idx[pred_flag_index]] == refPicList[ref_idx_curr].list[ref_idx]) {
  493. *mv = TAB_MVF(x, y).mv[pred_flag_index];
  494. return 1;
  495. }
  496. return 0;
  497. }
  498. static int mv_mp_mode_mx_lt(HEVCContext *s, int x, int y, int pred_flag_index,
  499. Mv *mv, int ref_idx_curr, int ref_idx)
  500. {
  501. MvField *tab_mvf = s->ref->tab_mvf;
  502. int min_pu_width = s->sps->min_pu_width;
  503. RefPicList *refPicList = s->ref->refPicList;
  504. int currIsLongTerm = refPicList[ref_idx_curr].isLongTerm[ref_idx];
  505. int colIsLongTerm =
  506. refPicList[pred_flag_index].isLongTerm[(TAB_MVF(x, y).ref_idx[pred_flag_index])];
  507. if (TAB_MVF(x, y).pred_flag[pred_flag_index] &&
  508. colIsLongTerm == currIsLongTerm) {
  509. *mv = TAB_MVF(x, y).mv[pred_flag_index];
  510. if (!currIsLongTerm)
  511. dist_scale(s, mv, min_pu_width, x, y,
  512. pred_flag_index, ref_idx_curr, ref_idx);
  513. return 1;
  514. }
  515. return 0;
  516. }
  517. #define MP_MX(v, pred, mx) \
  518. mv_mp_mode_mx(s, x ## v ## _pu, y ## v ## _pu, pred, \
  519. &mx, ref_idx_curr, ref_idx)
  520. #define MP_MX_LT(v, pred, mx) \
  521. mv_mp_mode_mx_lt(s, x ## v ## _pu, y ## v ## _pu, pred, \
  522. &mx, ref_idx_curr, ref_idx)
  523. void ff_hevc_luma_mv_mvp_mode(HEVCContext *s, int x0, int y0, int nPbW,
  524. int nPbH, int log2_cb_size, int part_idx,
  525. int merge_idx, MvField *mv,
  526. int mvp_lx_flag, int LX)
  527. {
  528. HEVCLocalContext *lc = &s->HEVClc;
  529. MvField *tab_mvf = s->ref->tab_mvf;
  530. int isScaledFlag_L0 = 0;
  531. int availableFlagLXA0 = 0;
  532. int availableFlagLXB0 = 0;
  533. int numMVPCandLX = 0;
  534. int min_pu_width = s->sps->min_pu_width;
  535. int xA0, yA0;
  536. int xA0_pu, yA0_pu;
  537. int is_available_a0;
  538. int xA1, yA1;
  539. int xA1_pu, yA1_pu;
  540. int is_available_a1;
  541. int xB0, yB0;
  542. int xB0_pu, yB0_pu;
  543. int is_available_b0;
  544. int xB1, yB1;
  545. int xB1_pu = 0, yB1_pu = 0;
  546. int is_available_b1 = 0;
  547. int xB2, yB2;
  548. int xB2_pu = 0, yB2_pu = 0;
  549. int is_available_b2 = 0;
  550. Mv mvpcand_list[2] = { { 0 } };
  551. Mv mxA = { 0 };
  552. Mv mxB = { 0 };
  553. int ref_idx_curr = 0;
  554. int ref_idx = 0;
  555. int pred_flag_index_l0;
  556. int pred_flag_index_l1;
  557. int x0b = x0 & ((1 << s->sps->log2_ctb_size) - 1);
  558. int y0b = y0 & ((1 << s->sps->log2_ctb_size) - 1);
  559. int cand_up = (lc->ctb_up_flag || y0b);
  560. int cand_left = (lc->ctb_left_flag || x0b);
  561. int cand_up_left =
  562. (!x0b && !y0b) ? lc->ctb_up_left_flag : cand_left && cand_up;
  563. int cand_up_right =
  564. (x0b + nPbW == (1 << s->sps->log2_ctb_size) ||
  565. x0 + nPbW >= lc->end_of_tiles_x) ? lc->ctb_up_right_flag && !y0b
  566. : cand_up;
  567. int cand_bottom_left = (y0 + nPbH >= lc->end_of_tiles_y) ? 0 : cand_left;
  568. ref_idx_curr = LX;
  569. ref_idx = mv->ref_idx[LX];
  570. pred_flag_index_l0 = LX;
  571. pred_flag_index_l1 = !LX;
  572. // left bottom spatial candidate
  573. xA0 = x0 - 1;
  574. yA0 = y0 + nPbH;
  575. xA0_pu = xA0 >> s->sps->log2_min_pu_size;
  576. yA0_pu = yA0 >> s->sps->log2_min_pu_size;
  577. is_available_a0 = PRED_BLOCK_AVAILABLE(A0) && AVAILABLE(cand_bottom_left, A0);
  578. //left spatial merge candidate
  579. xA1 = x0 - 1;
  580. yA1 = y0 + nPbH - 1;
  581. xA1_pu = xA1 >> s->sps->log2_min_pu_size;
  582. yA1_pu = yA1 >> s->sps->log2_min_pu_size;
  583. is_available_a1 = AVAILABLE(cand_left, A1);
  584. if (is_available_a0 || is_available_a1)
  585. isScaledFlag_L0 = 1;
  586. if (is_available_a0) {
  587. availableFlagLXA0 = MP_MX(A0, pred_flag_index_l0, mxA);
  588. if (!availableFlagLXA0)
  589. availableFlagLXA0 = MP_MX(A0, pred_flag_index_l1, mxA);
  590. }
  591. if (is_available_a1 && !availableFlagLXA0) {
  592. availableFlagLXA0 = MP_MX(A1, pred_flag_index_l0, mxA);
  593. if (!availableFlagLXA0)
  594. availableFlagLXA0 = MP_MX(A1, pred_flag_index_l1, mxA);
  595. }
  596. if (is_available_a0 && !availableFlagLXA0) {
  597. availableFlagLXA0 = MP_MX_LT(A0, pred_flag_index_l0, mxA);
  598. if (!availableFlagLXA0)
  599. availableFlagLXA0 = MP_MX_LT(A0, pred_flag_index_l1, mxA);
  600. }
  601. if (is_available_a1 && !availableFlagLXA0) {
  602. availableFlagLXA0 = MP_MX_LT(A1, pred_flag_index_l0, mxA);
  603. if (!availableFlagLXA0)
  604. availableFlagLXA0 = MP_MX_LT(A1, pred_flag_index_l1, mxA);
  605. }
  606. // B candidates
  607. // above right spatial merge candidate
  608. xB0 = x0 + nPbW;
  609. yB0 = y0 - 1;
  610. xB0_pu = xB0 >> s->sps->log2_min_pu_size;
  611. yB0_pu = yB0 >> s->sps->log2_min_pu_size;
  612. is_available_b0 = PRED_BLOCK_AVAILABLE(B0) && AVAILABLE(cand_up_right, B0);
  613. if (is_available_b0) {
  614. availableFlagLXB0 = MP_MX(B0, pred_flag_index_l0, mxB);
  615. if (!availableFlagLXB0)
  616. availableFlagLXB0 = MP_MX(B0, pred_flag_index_l1, mxB);
  617. }
  618. if (!availableFlagLXB0) {
  619. // above spatial merge candidate
  620. xB1 = x0 + nPbW - 1;
  621. yB1 = y0 - 1;
  622. xB1_pu = xB1 >> s->sps->log2_min_pu_size;
  623. yB1_pu = yB1 >> s->sps->log2_min_pu_size;
  624. is_available_b1 = AVAILABLE(cand_up, B1);
  625. if (is_available_b1) {
  626. availableFlagLXB0 = MP_MX(B1, pred_flag_index_l0, mxB);
  627. if (!availableFlagLXB0)
  628. availableFlagLXB0 = MP_MX(B1, pred_flag_index_l1, mxB);
  629. }
  630. }
  631. if (!availableFlagLXB0) {
  632. // above left spatial merge candidate
  633. xB2 = x0 - 1;
  634. yB2 = y0 - 1;
  635. xB2_pu = xB2 >> s->sps->log2_min_pu_size;
  636. yB2_pu = yB2 >> s->sps->log2_min_pu_size;
  637. is_available_b2 = AVAILABLE(cand_up_left, B2);
  638. if (is_available_b2) {
  639. availableFlagLXB0 = MP_MX(B2, pred_flag_index_l0, mxB);
  640. if (!availableFlagLXB0)
  641. availableFlagLXB0 = MP_MX(B2, pred_flag_index_l1, mxB);
  642. }
  643. }
  644. if (isScaledFlag_L0 == 0) {
  645. if (availableFlagLXB0) {
  646. availableFlagLXA0 = 1;
  647. mxA = mxB;
  648. }
  649. availableFlagLXB0 = 0;
  650. // XB0 and L1
  651. if (is_available_b0) {
  652. availableFlagLXB0 = MP_MX_LT(B0, pred_flag_index_l0, mxB);
  653. if (!availableFlagLXB0)
  654. availableFlagLXB0 = MP_MX_LT(B0, pred_flag_index_l1, mxB);
  655. }
  656. if (is_available_b1 && !availableFlagLXB0) {
  657. availableFlagLXB0 = MP_MX_LT(B1, pred_flag_index_l0, mxB);
  658. if (!availableFlagLXB0)
  659. availableFlagLXB0 = MP_MX_LT(B1, pred_flag_index_l1, mxB);
  660. }
  661. if (is_available_b2 && !availableFlagLXB0) {
  662. availableFlagLXB0 = MP_MX_LT(B2, pred_flag_index_l0, mxB);
  663. if (!availableFlagLXB0)
  664. availableFlagLXB0 = MP_MX_LT(B2, pred_flag_index_l1, mxB);
  665. }
  666. }
  667. if (availableFlagLXA0)
  668. mvpcand_list[numMVPCandLX++] = mxA;
  669. if (availableFlagLXB0 && (!availableFlagLXA0 || mxA.x != mxB.x || mxA.y != mxB.y))
  670. mvpcand_list[numMVPCandLX++] = mxB;
  671. //temporal motion vector prediction candidate
  672. if (numMVPCandLX < 2 && s->sh.slice_temporal_mvp_enabled_flag) {
  673. Mv mv_col;
  674. int available_col = temporal_luma_motion_vector(s, x0, y0, nPbW,
  675. nPbH, ref_idx,
  676. &mv_col, LX);
  677. if (available_col)
  678. mvpcand_list[numMVPCandLX++] = mv_col;
  679. }
  680. // insert zero motion vectors when the number of available candidates are less than 2
  681. while (numMVPCandLX < 2)
  682. mvpcand_list[numMVPCandLX++] = (Mv){ 0, 0 };
  683. mv->mv[LX].x = mvpcand_list[mvp_lx_flag].x;
  684. mv->mv[LX].y = mvpcand_list[mvp_lx_flag].y;
  685. }