You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

879 lines
38KB

  1. /*
  2. * HEVC video decoder
  3. *
  4. * Copyright (C) 2012 - 2013 Guillaume Martres
  5. * Copyright (C) 2013 Seppo Tomperi
  6. * Copyright (C) 2013 Wassim Hamidouche
  7. *
  8. * This file is part of FFmpeg.
  9. *
  10. * FFmpeg is free software; you can redistribute it and/or
  11. * modify it under the terms of the GNU Lesser General Public
  12. * License as published by the Free Software Foundation; either
  13. * version 2.1 of the License, or (at your option) any later version.
  14. *
  15. * FFmpeg is distributed in the hope that it will be useful,
  16. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  17. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  18. * Lesser General Public License for more details.
  19. *
  20. * You should have received a copy of the GNU Lesser General Public
  21. * License along with FFmpeg; if not, write to the Free Software
  22. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  23. */
  24. #include "libavutil/common.h"
  25. #include "libavutil/internal.h"
  26. #include "cabac_functions.h"
  27. #include "golomb.h"
  28. #include "hevc.h"
  29. #include "bit_depth_template.c"
  30. #define LUMA 0
  31. #define CB 1
  32. #define CR 2
  33. static const uint8_t tctable[54] = {
  34. 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, // QP 0...18
  35. 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, // QP 19...37
  36. 5, 5, 6, 6, 7, 8, 9, 10, 11, 13, 14, 16, 18, 20, 22, 24 // QP 38...53
  37. };
  38. static const uint8_t betatable[52] = {
  39. 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6, 7, 8, // QP 0...18
  40. 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, // QP 19...37
  41. 38, 40, 42, 44, 46, 48, 50, 52, 54, 56, 58, 60, 62, 64 // QP 38...51
  42. };
  43. static int chroma_tc(HEVCContext *s, int qp_y, int c_idx, int tc_offset)
  44. {
  45. static const int qp_c[] = {
  46. 29, 30, 31, 32, 33, 33, 34, 34, 35, 35, 36, 36, 37, 37
  47. };
  48. int qp, qp_i, offset, idxt;
  49. // slice qp offset is not used for deblocking
  50. if (c_idx == 1)
  51. offset = s->ps.pps->cb_qp_offset;
  52. else
  53. offset = s->ps.pps->cr_qp_offset;
  54. qp_i = av_clip(qp_y + offset, 0, 57);
  55. if (s->ps.sps->chroma_format_idc == 1) {
  56. if (qp_i < 30)
  57. qp = qp_i;
  58. else if (qp_i > 43)
  59. qp = qp_i - 6;
  60. else
  61. qp = qp_c[qp_i - 30];
  62. } else {
  63. qp = av_clip(qp_i, 0, 51);
  64. }
  65. idxt = av_clip(qp + DEFAULT_INTRA_TC_OFFSET + tc_offset, 0, 53);
  66. return tctable[idxt];
  67. }
  68. static int get_qPy_pred(HEVCContext *s, int xBase, int yBase, int log2_cb_size)
  69. {
  70. HEVCLocalContext *lc = s->HEVClc;
  71. int ctb_size_mask = (1 << s->ps.sps->log2_ctb_size) - 1;
  72. int MinCuQpDeltaSizeMask = (1 << (s->ps.sps->log2_ctb_size -
  73. s->ps.pps->diff_cu_qp_delta_depth)) - 1;
  74. int xQgBase = xBase - (xBase & MinCuQpDeltaSizeMask);
  75. int yQgBase = yBase - (yBase & MinCuQpDeltaSizeMask);
  76. int min_cb_width = s->ps.sps->min_cb_width;
  77. int x_cb = xQgBase >> s->ps.sps->log2_min_cb_size;
  78. int y_cb = yQgBase >> s->ps.sps->log2_min_cb_size;
  79. int availableA = (xBase & ctb_size_mask) &&
  80. (xQgBase & ctb_size_mask);
  81. int availableB = (yBase & ctb_size_mask) &&
  82. (yQgBase & ctb_size_mask);
  83. int qPy_pred, qPy_a, qPy_b;
  84. // qPy_pred
  85. if (lc->first_qp_group || (!xQgBase && !yQgBase)) {
  86. lc->first_qp_group = !lc->tu.is_cu_qp_delta_coded;
  87. qPy_pred = s->sh.slice_qp;
  88. } else {
  89. qPy_pred = lc->qPy_pred;
  90. }
  91. // qPy_a
  92. if (availableA == 0)
  93. qPy_a = qPy_pred;
  94. else
  95. qPy_a = s->qp_y_tab[(x_cb - 1) + y_cb * min_cb_width];
  96. // qPy_b
  97. if (availableB == 0)
  98. qPy_b = qPy_pred;
  99. else
  100. qPy_b = s->qp_y_tab[x_cb + (y_cb - 1) * min_cb_width];
  101. av_assert2(qPy_a >= -s->ps.sps->qp_bd_offset && qPy_a < 52);
  102. av_assert2(qPy_b >= -s->ps.sps->qp_bd_offset && qPy_b < 52);
  103. return (qPy_a + qPy_b + 1) >> 1;
  104. }
  105. void ff_hevc_set_qPy(HEVCContext *s, int xBase, int yBase, int log2_cb_size)
  106. {
  107. int qp_y = get_qPy_pred(s, xBase, yBase, log2_cb_size);
  108. if (s->HEVClc->tu.cu_qp_delta != 0) {
  109. int off = s->ps.sps->qp_bd_offset;
  110. s->HEVClc->qp_y = FFUMOD(qp_y + s->HEVClc->tu.cu_qp_delta + 52 + 2 * off,
  111. 52 + off) - off;
  112. } else
  113. s->HEVClc->qp_y = qp_y;
  114. }
  115. static int get_qPy(HEVCContext *s, int xC, int yC)
  116. {
  117. int log2_min_cb_size = s->ps.sps->log2_min_cb_size;
  118. int x = xC >> log2_min_cb_size;
  119. int y = yC >> log2_min_cb_size;
  120. return s->qp_y_tab[x + y * s->ps.sps->min_cb_width];
  121. }
  122. static void copy_CTB(uint8_t *dst, const uint8_t *src, int width, int height,
  123. intptr_t stride_dst, intptr_t stride_src)
  124. {
  125. int i, j;
  126. if (((intptr_t)dst | (intptr_t)src | stride_dst | stride_src) & 15) {
  127. for (i = 0; i < height; i++) {
  128. for (j = 0; j < width; j+=8)
  129. AV_COPY64U(dst+j, src+j);
  130. dst += stride_dst;
  131. src += stride_src;
  132. }
  133. } else {
  134. for (i = 0; i < height; i++) {
  135. for (j = 0; j < width; j+=16)
  136. AV_COPY128(dst+j, src+j);
  137. dst += stride_dst;
  138. src += stride_src;
  139. }
  140. }
  141. }
  142. static void copy_pixel(uint8_t *dst, const uint8_t *src, int pixel_shift)
  143. {
  144. if (pixel_shift)
  145. *(uint16_t *)dst = *(uint16_t *)src;
  146. else
  147. *dst = *src;
  148. }
  149. static void copy_vert(uint8_t *dst, const uint8_t *src,
  150. int pixel_shift, int height,
  151. int stride_dst, int stride_src)
  152. {
  153. int i;
  154. if (pixel_shift == 0) {
  155. for (i = 0; i < height; i++) {
  156. *dst = *src;
  157. dst += stride_dst;
  158. src += stride_src;
  159. }
  160. } else {
  161. for (i = 0; i < height; i++) {
  162. *(uint16_t *)dst = *(uint16_t *)src;
  163. dst += stride_dst;
  164. src += stride_src;
  165. }
  166. }
  167. }
  168. static void copy_CTB_to_hv(HEVCContext *s, const uint8_t *src,
  169. int stride_src, int x, int y, int width, int height,
  170. int c_idx, int x_ctb, int y_ctb)
  171. {
  172. int sh = s->ps.sps->pixel_shift;
  173. int w = s->ps.sps->width >> s->ps.sps->hshift[c_idx];
  174. int h = s->ps.sps->height >> s->ps.sps->vshift[c_idx];
  175. /* copy horizontal edges */
  176. memcpy(s->sao_pixel_buffer_h[c_idx] + (((2 * y_ctb) * w + x) << sh),
  177. src, width << sh);
  178. memcpy(s->sao_pixel_buffer_h[c_idx] + (((2 * y_ctb + 1) * w + x) << sh),
  179. src + stride_src * (height - 1), width << sh);
  180. /* copy vertical edges */
  181. copy_vert(s->sao_pixel_buffer_v[c_idx] + (((2 * x_ctb) * h + y) << sh), src, sh, height, 1 << sh, stride_src);
  182. copy_vert(s->sao_pixel_buffer_v[c_idx] + (((2 * x_ctb + 1) * h + y) << sh), src + ((width - 1) << sh), sh, height, 1 << sh, stride_src);
  183. }
  184. static void restore_tqb_pixels(HEVCContext *s,
  185. uint8_t *src1, const uint8_t *dst1,
  186. ptrdiff_t stride_src, ptrdiff_t stride_dst,
  187. int x0, int y0, int width, int height, int c_idx)
  188. {
  189. if ( s->ps.pps->transquant_bypass_enable_flag ||
  190. (s->ps.sps->pcm.loop_filter_disable_flag && s->ps.sps->pcm_enabled_flag)) {
  191. int x, y;
  192. int min_pu_size = 1 << s->ps.sps->log2_min_pu_size;
  193. int hshift = s->ps.sps->hshift[c_idx];
  194. int vshift = s->ps.sps->vshift[c_idx];
  195. int x_min = ((x0 ) >> s->ps.sps->log2_min_pu_size);
  196. int y_min = ((y0 ) >> s->ps.sps->log2_min_pu_size);
  197. int x_max = ((x0 + width ) >> s->ps.sps->log2_min_pu_size);
  198. int y_max = ((y0 + height) >> s->ps.sps->log2_min_pu_size);
  199. int len = (min_pu_size >> hshift) << s->ps.sps->pixel_shift;
  200. for (y = y_min; y < y_max; y++) {
  201. for (x = x_min; x < x_max; x++) {
  202. if (s->is_pcm[y * s->ps.sps->min_pu_width + x]) {
  203. int n;
  204. uint8_t *src = src1 + (((y << s->ps.sps->log2_min_pu_size) - y0) >> vshift) * stride_src + ((((x << s->ps.sps->log2_min_pu_size) - x0) >> hshift) << s->ps.sps->pixel_shift);
  205. const uint8_t *dst = dst1 + (((y << s->ps.sps->log2_min_pu_size) - y0) >> vshift) * stride_dst + ((((x << s->ps.sps->log2_min_pu_size) - x0) >> hshift) << s->ps.sps->pixel_shift);
  206. for (n = 0; n < (min_pu_size >> vshift); n++) {
  207. memcpy(src, dst, len);
  208. src += stride_src;
  209. dst += stride_dst;
  210. }
  211. }
  212. }
  213. }
  214. }
  215. }
  216. #define CTB(tab, x, y) ((tab)[(y) * s->ps.sps->ctb_width + (x)])
  217. static void sao_filter_CTB(HEVCContext *s, int x, int y)
  218. {
  219. static const uint8_t sao_tab[8] = { 0, 1, 2, 2, 3, 3, 4, 4 };
  220. HEVCLocalContext *lc = s->HEVClc;
  221. int c_idx;
  222. int edges[4]; // 0 left 1 top 2 right 3 bottom
  223. int x_ctb = x >> s->ps.sps->log2_ctb_size;
  224. int y_ctb = y >> s->ps.sps->log2_ctb_size;
  225. int ctb_addr_rs = y_ctb * s->ps.sps->ctb_width + x_ctb;
  226. int ctb_addr_ts = s->ps.pps->ctb_addr_rs_to_ts[ctb_addr_rs];
  227. SAOParams *sao = &CTB(s->sao, x_ctb, y_ctb);
  228. // flags indicating unfilterable edges
  229. uint8_t vert_edge[] = { 0, 0 };
  230. uint8_t horiz_edge[] = { 0, 0 };
  231. uint8_t diag_edge[] = { 0, 0, 0, 0 };
  232. uint8_t lfase = CTB(s->filter_slice_edges, x_ctb, y_ctb);
  233. uint8_t no_tile_filter = s->ps.pps->tiles_enabled_flag &&
  234. !s->ps.pps->loop_filter_across_tiles_enabled_flag;
  235. uint8_t restore = no_tile_filter || !lfase;
  236. uint8_t left_tile_edge = 0;
  237. uint8_t right_tile_edge = 0;
  238. uint8_t up_tile_edge = 0;
  239. uint8_t bottom_tile_edge = 0;
  240. edges[0] = x_ctb == 0;
  241. edges[1] = y_ctb == 0;
  242. edges[2] = x_ctb == s->ps.sps->ctb_width - 1;
  243. edges[3] = y_ctb == s->ps.sps->ctb_height - 1;
  244. if (restore) {
  245. if (!edges[0]) {
  246. left_tile_edge = no_tile_filter && s->ps.pps->tile_id[ctb_addr_ts] != s->ps.pps->tile_id[s->ps.pps->ctb_addr_rs_to_ts[ctb_addr_rs-1]];
  247. vert_edge[0] = (!lfase && CTB(s->tab_slice_address, x_ctb, y_ctb) != CTB(s->tab_slice_address, x_ctb - 1, y_ctb)) || left_tile_edge;
  248. }
  249. if (!edges[2]) {
  250. right_tile_edge = no_tile_filter && s->ps.pps->tile_id[ctb_addr_ts] != s->ps.pps->tile_id[s->ps.pps->ctb_addr_rs_to_ts[ctb_addr_rs+1]];
  251. vert_edge[1] = (!lfase && CTB(s->tab_slice_address, x_ctb, y_ctb) != CTB(s->tab_slice_address, x_ctb + 1, y_ctb)) || right_tile_edge;
  252. }
  253. if (!edges[1]) {
  254. up_tile_edge = no_tile_filter && s->ps.pps->tile_id[ctb_addr_ts] != s->ps.pps->tile_id[s->ps.pps->ctb_addr_rs_to_ts[ctb_addr_rs - s->ps.sps->ctb_width]];
  255. horiz_edge[0] = (!lfase && CTB(s->tab_slice_address, x_ctb, y_ctb) != CTB(s->tab_slice_address, x_ctb, y_ctb - 1)) || up_tile_edge;
  256. }
  257. if (!edges[3]) {
  258. bottom_tile_edge = no_tile_filter && s->ps.pps->tile_id[ctb_addr_ts] != s->ps.pps->tile_id[s->ps.pps->ctb_addr_rs_to_ts[ctb_addr_rs + s->ps.sps->ctb_width]];
  259. horiz_edge[1] = (!lfase && CTB(s->tab_slice_address, x_ctb, y_ctb) != CTB(s->tab_slice_address, x_ctb, y_ctb + 1)) || bottom_tile_edge;
  260. }
  261. if (!edges[0] && !edges[1]) {
  262. diag_edge[0] = (!lfase && CTB(s->tab_slice_address, x_ctb, y_ctb) != CTB(s->tab_slice_address, x_ctb - 1, y_ctb - 1)) || left_tile_edge || up_tile_edge;
  263. }
  264. if (!edges[1] && !edges[2]) {
  265. diag_edge[1] = (!lfase && CTB(s->tab_slice_address, x_ctb, y_ctb) != CTB(s->tab_slice_address, x_ctb + 1, y_ctb - 1)) || right_tile_edge || up_tile_edge;
  266. }
  267. if (!edges[2] && !edges[3]) {
  268. diag_edge[2] = (!lfase && CTB(s->tab_slice_address, x_ctb, y_ctb) != CTB(s->tab_slice_address, x_ctb + 1, y_ctb + 1)) || right_tile_edge || bottom_tile_edge;
  269. }
  270. if (!edges[0] && !edges[3]) {
  271. diag_edge[3] = (!lfase && CTB(s->tab_slice_address, x_ctb, y_ctb) != CTB(s->tab_slice_address, x_ctb - 1, y_ctb + 1)) || left_tile_edge || bottom_tile_edge;
  272. }
  273. }
  274. for (c_idx = 0; c_idx < (s->ps.sps->chroma_format_idc ? 3 : 1); c_idx++) {
  275. int x0 = x >> s->ps.sps->hshift[c_idx];
  276. int y0 = y >> s->ps.sps->vshift[c_idx];
  277. int stride_src = s->frame->linesize[c_idx];
  278. int ctb_size_h = (1 << (s->ps.sps->log2_ctb_size)) >> s->ps.sps->hshift[c_idx];
  279. int ctb_size_v = (1 << (s->ps.sps->log2_ctb_size)) >> s->ps.sps->vshift[c_idx];
  280. int width = FFMIN(ctb_size_h, (s->ps.sps->width >> s->ps.sps->hshift[c_idx]) - x0);
  281. int height = FFMIN(ctb_size_v, (s->ps.sps->height >> s->ps.sps->vshift[c_idx]) - y0);
  282. int tab = sao_tab[(FFALIGN(width, 8) >> 3) - 1];
  283. uint8_t *src = &s->frame->data[c_idx][y0 * stride_src + (x0 << s->ps.sps->pixel_shift)];
  284. int stride_dst;
  285. uint8_t *dst;
  286. switch (sao->type_idx[c_idx]) {
  287. case SAO_BAND:
  288. copy_CTB_to_hv(s, src, stride_src, x0, y0, width, height, c_idx,
  289. x_ctb, y_ctb);
  290. if (s->ps.pps->transquant_bypass_enable_flag ||
  291. (s->ps.sps->pcm.loop_filter_disable_flag && s->ps.sps->pcm_enabled_flag)) {
  292. dst = lc->edge_emu_buffer;
  293. stride_dst = 2*MAX_PB_SIZE;
  294. copy_CTB(dst, src, width << s->ps.sps->pixel_shift, height, stride_dst, stride_src);
  295. s->hevcdsp.sao_band_filter[tab](src, dst, stride_src, stride_dst,
  296. sao->offset_val[c_idx], sao->band_position[c_idx],
  297. width, height);
  298. restore_tqb_pixels(s, src, dst, stride_src, stride_dst,
  299. x, y, width, height, c_idx);
  300. } else {
  301. s->hevcdsp.sao_band_filter[tab](src, src, stride_src, stride_src,
  302. sao->offset_val[c_idx], sao->band_position[c_idx],
  303. width, height);
  304. }
  305. sao->type_idx[c_idx] = SAO_APPLIED;
  306. break;
  307. case SAO_EDGE:
  308. {
  309. int w = s->ps.sps->width >> s->ps.sps->hshift[c_idx];
  310. int h = s->ps.sps->height >> s->ps.sps->vshift[c_idx];
  311. int left_edge = edges[0];
  312. int top_edge = edges[1];
  313. int right_edge = edges[2];
  314. int bottom_edge = edges[3];
  315. int sh = s->ps.sps->pixel_shift;
  316. int left_pixels, right_pixels;
  317. stride_dst = 2*MAX_PB_SIZE + AV_INPUT_BUFFER_PADDING_SIZE;
  318. dst = lc->edge_emu_buffer + stride_dst + AV_INPUT_BUFFER_PADDING_SIZE;
  319. if (!top_edge) {
  320. int left = 1 - left_edge;
  321. int right = 1 - right_edge;
  322. const uint8_t *src1[2];
  323. uint8_t *dst1;
  324. int src_idx, pos;
  325. dst1 = dst - stride_dst - (left << sh);
  326. src1[0] = src - stride_src - (left << sh);
  327. src1[1] = s->sao_pixel_buffer_h[c_idx] + (((2 * y_ctb - 1) * w + x0 - left) << sh);
  328. pos = 0;
  329. if (left) {
  330. src_idx = (CTB(s->sao, x_ctb-1, y_ctb-1).type_idx[c_idx] ==
  331. SAO_APPLIED);
  332. copy_pixel(dst1, src1[src_idx], sh);
  333. pos += (1 << sh);
  334. }
  335. src_idx = (CTB(s->sao, x_ctb, y_ctb-1).type_idx[c_idx] ==
  336. SAO_APPLIED);
  337. memcpy(dst1 + pos, src1[src_idx] + pos, width << sh);
  338. if (right) {
  339. pos += width << sh;
  340. src_idx = (CTB(s->sao, x_ctb+1, y_ctb-1).type_idx[c_idx] ==
  341. SAO_APPLIED);
  342. copy_pixel(dst1 + pos, src1[src_idx] + pos, sh);
  343. }
  344. }
  345. if (!bottom_edge) {
  346. int left = 1 - left_edge;
  347. int right = 1 - right_edge;
  348. const uint8_t *src1[2];
  349. uint8_t *dst1;
  350. int src_idx, pos;
  351. dst1 = dst + height * stride_dst - (left << sh);
  352. src1[0] = src + height * stride_src - (left << sh);
  353. src1[1] = s->sao_pixel_buffer_h[c_idx] + (((2 * y_ctb + 2) * w + x0 - left) << sh);
  354. pos = 0;
  355. if (left) {
  356. src_idx = (CTB(s->sao, x_ctb-1, y_ctb+1).type_idx[c_idx] ==
  357. SAO_APPLIED);
  358. copy_pixel(dst1, src1[src_idx], sh);
  359. pos += (1 << sh);
  360. }
  361. src_idx = (CTB(s->sao, x_ctb, y_ctb+1).type_idx[c_idx] ==
  362. SAO_APPLIED);
  363. memcpy(dst1 + pos, src1[src_idx] + pos, width << sh);
  364. if (right) {
  365. pos += width << sh;
  366. src_idx = (CTB(s->sao, x_ctb+1, y_ctb+1).type_idx[c_idx] ==
  367. SAO_APPLIED);
  368. copy_pixel(dst1 + pos, src1[src_idx] + pos, sh);
  369. }
  370. }
  371. left_pixels = 0;
  372. if (!left_edge) {
  373. if (CTB(s->sao, x_ctb-1, y_ctb).type_idx[c_idx] == SAO_APPLIED) {
  374. copy_vert(dst - (1 << sh),
  375. s->sao_pixel_buffer_v[c_idx] + (((2 * x_ctb - 1) * h + y0) << sh),
  376. sh, height, stride_dst, 1 << sh);
  377. } else {
  378. left_pixels = 1;
  379. }
  380. }
  381. right_pixels = 0;
  382. if (!right_edge) {
  383. if (CTB(s->sao, x_ctb+1, y_ctb).type_idx[c_idx] == SAO_APPLIED) {
  384. copy_vert(dst + (width << sh),
  385. s->sao_pixel_buffer_v[c_idx] + (((2 * x_ctb + 2) * h + y0) << sh),
  386. sh, height, stride_dst, 1 << sh);
  387. } else {
  388. right_pixels = 1;
  389. }
  390. }
  391. copy_CTB(dst - (left_pixels << sh),
  392. src - (left_pixels << sh),
  393. (width + left_pixels + right_pixels) << sh,
  394. height, stride_dst, stride_src);
  395. copy_CTB_to_hv(s, src, stride_src, x0, y0, width, height, c_idx,
  396. x_ctb, y_ctb);
  397. s->hevcdsp.sao_edge_filter[tab](src, dst, stride_src, sao->offset_val[c_idx],
  398. sao->eo_class[c_idx], width, height);
  399. s->hevcdsp.sao_edge_restore[restore](src, dst,
  400. stride_src, stride_dst,
  401. sao,
  402. edges, width,
  403. height, c_idx,
  404. vert_edge,
  405. horiz_edge,
  406. diag_edge);
  407. restore_tqb_pixels(s, src, dst, stride_src, stride_dst,
  408. x, y, width, height, c_idx);
  409. sao->type_idx[c_idx] = SAO_APPLIED;
  410. break;
  411. }
  412. }
  413. }
  414. }
  415. static int get_pcm(HEVCContext *s, int x, int y)
  416. {
  417. int log2_min_pu_size = s->ps.sps->log2_min_pu_size;
  418. int x_pu, y_pu;
  419. if (x < 0 || y < 0)
  420. return 2;
  421. x_pu = x >> log2_min_pu_size;
  422. y_pu = y >> log2_min_pu_size;
  423. if (x_pu >= s->ps.sps->min_pu_width || y_pu >= s->ps.sps->min_pu_height)
  424. return 2;
  425. return s->is_pcm[y_pu * s->ps.sps->min_pu_width + x_pu];
  426. }
  427. #define TC_CALC(qp, bs) \
  428. tctable[av_clip((qp) + DEFAULT_INTRA_TC_OFFSET * ((bs) - 1) + \
  429. (tc_offset >> 1 << 1), \
  430. 0, MAX_QP + DEFAULT_INTRA_TC_OFFSET)]
  431. static void deblocking_filter_CTB(HEVCContext *s, int x0, int y0)
  432. {
  433. uint8_t *src;
  434. int x, y;
  435. int chroma, beta;
  436. int32_t c_tc[2], tc[2];
  437. uint8_t no_p[2] = { 0 };
  438. uint8_t no_q[2] = { 0 };
  439. int log2_ctb_size = s->ps.sps->log2_ctb_size;
  440. int x_end, x_end2, y_end;
  441. int ctb_size = 1 << log2_ctb_size;
  442. int ctb = (x0 >> log2_ctb_size) +
  443. (y0 >> log2_ctb_size) * s->ps.sps->ctb_width;
  444. int cur_tc_offset = s->deblock[ctb].tc_offset;
  445. int cur_beta_offset = s->deblock[ctb].beta_offset;
  446. int left_tc_offset, left_beta_offset;
  447. int tc_offset, beta_offset;
  448. int pcmf = (s->ps.sps->pcm_enabled_flag &&
  449. s->ps.sps->pcm.loop_filter_disable_flag) ||
  450. s->ps.pps->transquant_bypass_enable_flag;
  451. if (x0) {
  452. left_tc_offset = s->deblock[ctb - 1].tc_offset;
  453. left_beta_offset = s->deblock[ctb - 1].beta_offset;
  454. } else {
  455. left_tc_offset = 0;
  456. left_beta_offset = 0;
  457. }
  458. x_end = x0 + ctb_size;
  459. if (x_end > s->ps.sps->width)
  460. x_end = s->ps.sps->width;
  461. y_end = y0 + ctb_size;
  462. if (y_end > s->ps.sps->height)
  463. y_end = s->ps.sps->height;
  464. tc_offset = cur_tc_offset;
  465. beta_offset = cur_beta_offset;
  466. x_end2 = x_end;
  467. if (x_end2 != s->ps.sps->width)
  468. x_end2 -= 8;
  469. for (y = y0; y < y_end; y += 8) {
  470. // vertical filtering luma
  471. for (x = x0 ? x0 : 8; x < x_end; x += 8) {
  472. const int bs0 = s->vertical_bs[(x + y * s->bs_width) >> 2];
  473. const int bs1 = s->vertical_bs[(x + (y + 4) * s->bs_width) >> 2];
  474. if (bs0 || bs1) {
  475. const int qp = (get_qPy(s, x - 1, y) + get_qPy(s, x, y) + 1) >> 1;
  476. beta = betatable[av_clip(qp + beta_offset, 0, MAX_QP)];
  477. tc[0] = bs0 ? TC_CALC(qp, bs0) : 0;
  478. tc[1] = bs1 ? TC_CALC(qp, bs1) : 0;
  479. src = &s->frame->data[LUMA][y * s->frame->linesize[LUMA] + (x << s->ps.sps->pixel_shift)];
  480. if (pcmf) {
  481. no_p[0] = get_pcm(s, x - 1, y);
  482. no_p[1] = get_pcm(s, x - 1, y + 4);
  483. no_q[0] = get_pcm(s, x, y);
  484. no_q[1] = get_pcm(s, x, y + 4);
  485. s->hevcdsp.hevc_v_loop_filter_luma_c(src,
  486. s->frame->linesize[LUMA],
  487. beta, tc, no_p, no_q);
  488. } else
  489. s->hevcdsp.hevc_v_loop_filter_luma(src,
  490. s->frame->linesize[LUMA],
  491. beta, tc, no_p, no_q);
  492. }
  493. }
  494. if(!y)
  495. continue;
  496. // horizontal filtering luma
  497. for (x = x0 ? x0 - 8 : 0; x < x_end2; x += 8) {
  498. const int bs0 = s->horizontal_bs[( x + y * s->bs_width) >> 2];
  499. const int bs1 = s->horizontal_bs[((x + 4) + y * s->bs_width) >> 2];
  500. if (bs0 || bs1) {
  501. const int qp = (get_qPy(s, x, y - 1) + get_qPy(s, x, y) + 1) >> 1;
  502. tc_offset = x >= x0 ? cur_tc_offset : left_tc_offset;
  503. beta_offset = x >= x0 ? cur_beta_offset : left_beta_offset;
  504. beta = betatable[av_clip(qp + beta_offset, 0, MAX_QP)];
  505. tc[0] = bs0 ? TC_CALC(qp, bs0) : 0;
  506. tc[1] = bs1 ? TC_CALC(qp, bs1) : 0;
  507. src = &s->frame->data[LUMA][y * s->frame->linesize[LUMA] + (x << s->ps.sps->pixel_shift)];
  508. if (pcmf) {
  509. no_p[0] = get_pcm(s, x, y - 1);
  510. no_p[1] = get_pcm(s, x + 4, y - 1);
  511. no_q[0] = get_pcm(s, x, y);
  512. no_q[1] = get_pcm(s, x + 4, y);
  513. s->hevcdsp.hevc_h_loop_filter_luma_c(src,
  514. s->frame->linesize[LUMA],
  515. beta, tc, no_p, no_q);
  516. } else
  517. s->hevcdsp.hevc_h_loop_filter_luma(src,
  518. s->frame->linesize[LUMA],
  519. beta, tc, no_p, no_q);
  520. }
  521. }
  522. }
  523. if (s->ps.sps->chroma_format_idc) {
  524. for (chroma = 1; chroma <= 2; chroma++) {
  525. int h = 1 << s->ps.sps->hshift[chroma];
  526. int v = 1 << s->ps.sps->vshift[chroma];
  527. // vertical filtering chroma
  528. for (y = y0; y < y_end; y += (8 * v)) {
  529. for (x = x0 ? x0 : 8 * h; x < x_end; x += (8 * h)) {
  530. const int bs0 = s->vertical_bs[(x + y * s->bs_width) >> 2];
  531. const int bs1 = s->vertical_bs[(x + (y + (4 * v)) * s->bs_width) >> 2];
  532. if ((bs0 == 2) || (bs1 == 2)) {
  533. const int qp0 = (get_qPy(s, x - 1, y) + get_qPy(s, x, y) + 1) >> 1;
  534. const int qp1 = (get_qPy(s, x - 1, y + (4 * v)) + get_qPy(s, x, y + (4 * v)) + 1) >> 1;
  535. c_tc[0] = (bs0 == 2) ? chroma_tc(s, qp0, chroma, tc_offset) : 0;
  536. c_tc[1] = (bs1 == 2) ? chroma_tc(s, qp1, chroma, tc_offset) : 0;
  537. src = &s->frame->data[chroma][(y >> s->ps.sps->vshift[chroma]) * s->frame->linesize[chroma] + ((x >> s->ps.sps->hshift[chroma]) << s->ps.sps->pixel_shift)];
  538. if (pcmf) {
  539. no_p[0] = get_pcm(s, x - 1, y);
  540. no_p[1] = get_pcm(s, x - 1, y + (4 * v));
  541. no_q[0] = get_pcm(s, x, y);
  542. no_q[1] = get_pcm(s, x, y + (4 * v));
  543. s->hevcdsp.hevc_v_loop_filter_chroma_c(src,
  544. s->frame->linesize[chroma],
  545. c_tc, no_p, no_q);
  546. } else
  547. s->hevcdsp.hevc_v_loop_filter_chroma(src,
  548. s->frame->linesize[chroma],
  549. c_tc, no_p, no_q);
  550. }
  551. }
  552. if(!y)
  553. continue;
  554. // horizontal filtering chroma
  555. tc_offset = x0 ? left_tc_offset : cur_tc_offset;
  556. x_end2 = x_end;
  557. if (x_end != s->ps.sps->width)
  558. x_end2 = x_end - 8 * h;
  559. for (x = x0 ? x0 - 8 * h : 0; x < x_end2; x += (8 * h)) {
  560. const int bs0 = s->horizontal_bs[( x + y * s->bs_width) >> 2];
  561. const int bs1 = s->horizontal_bs[((x + 4 * h) + y * s->bs_width) >> 2];
  562. if ((bs0 == 2) || (bs1 == 2)) {
  563. const int qp0 = bs0 == 2 ? (get_qPy(s, x, y - 1) + get_qPy(s, x, y) + 1) >> 1 : 0;
  564. const int qp1 = bs1 == 2 ? (get_qPy(s, x + (4 * h), y - 1) + get_qPy(s, x + (4 * h), y) + 1) >> 1 : 0;
  565. c_tc[0] = bs0 == 2 ? chroma_tc(s, qp0, chroma, tc_offset) : 0;
  566. c_tc[1] = bs1 == 2 ? chroma_tc(s, qp1, chroma, cur_tc_offset) : 0;
  567. src = &s->frame->data[chroma][(y >> s->ps.sps->vshift[1]) * s->frame->linesize[chroma] + ((x >> s->ps.sps->hshift[1]) << s->ps.sps->pixel_shift)];
  568. if (pcmf) {
  569. no_p[0] = get_pcm(s, x, y - 1);
  570. no_p[1] = get_pcm(s, x + (4 * h), y - 1);
  571. no_q[0] = get_pcm(s, x, y);
  572. no_q[1] = get_pcm(s, x + (4 * h), y);
  573. s->hevcdsp.hevc_h_loop_filter_chroma_c(src,
  574. s->frame->linesize[chroma],
  575. c_tc, no_p, no_q);
  576. } else
  577. s->hevcdsp.hevc_h_loop_filter_chroma(src,
  578. s->frame->linesize[chroma],
  579. c_tc, no_p, no_q);
  580. }
  581. }
  582. }
  583. }
  584. }
  585. }
  586. static int boundary_strength(HEVCContext *s, MvField *curr, MvField *neigh,
  587. RefPicList *neigh_refPicList)
  588. {
  589. if (curr->pred_flag == PF_BI && neigh->pred_flag == PF_BI) {
  590. // same L0 and L1
  591. if (s->ref->refPicList[0].list[curr->ref_idx[0]] == neigh_refPicList[0].list[neigh->ref_idx[0]] &&
  592. s->ref->refPicList[0].list[curr->ref_idx[0]] == s->ref->refPicList[1].list[curr->ref_idx[1]] &&
  593. neigh_refPicList[0].list[neigh->ref_idx[0]] == neigh_refPicList[1].list[neigh->ref_idx[1]]) {
  594. if ((FFABS(neigh->mv[0].x - curr->mv[0].x) >= 4 || FFABS(neigh->mv[0].y - curr->mv[0].y) >= 4 ||
  595. FFABS(neigh->mv[1].x - curr->mv[1].x) >= 4 || FFABS(neigh->mv[1].y - curr->mv[1].y) >= 4) &&
  596. (FFABS(neigh->mv[1].x - curr->mv[0].x) >= 4 || FFABS(neigh->mv[1].y - curr->mv[0].y) >= 4 ||
  597. FFABS(neigh->mv[0].x - curr->mv[1].x) >= 4 || FFABS(neigh->mv[0].y - curr->mv[1].y) >= 4))
  598. return 1;
  599. else
  600. return 0;
  601. } else if (neigh_refPicList[0].list[neigh->ref_idx[0]] == s->ref->refPicList[0].list[curr->ref_idx[0]] &&
  602. neigh_refPicList[1].list[neigh->ref_idx[1]] == s->ref->refPicList[1].list[curr->ref_idx[1]]) {
  603. if (FFABS(neigh->mv[0].x - curr->mv[0].x) >= 4 || FFABS(neigh->mv[0].y - curr->mv[0].y) >= 4 ||
  604. FFABS(neigh->mv[1].x - curr->mv[1].x) >= 4 || FFABS(neigh->mv[1].y - curr->mv[1].y) >= 4)
  605. return 1;
  606. else
  607. return 0;
  608. } else if (neigh_refPicList[1].list[neigh->ref_idx[1]] == s->ref->refPicList[0].list[curr->ref_idx[0]] &&
  609. neigh_refPicList[0].list[neigh->ref_idx[0]] == s->ref->refPicList[1].list[curr->ref_idx[1]]) {
  610. if (FFABS(neigh->mv[1].x - curr->mv[0].x) >= 4 || FFABS(neigh->mv[1].y - curr->mv[0].y) >= 4 ||
  611. FFABS(neigh->mv[0].x - curr->mv[1].x) >= 4 || FFABS(neigh->mv[0].y - curr->mv[1].y) >= 4)
  612. return 1;
  613. else
  614. return 0;
  615. } else {
  616. return 1;
  617. }
  618. } else if ((curr->pred_flag != PF_BI) && (neigh->pred_flag != PF_BI)){ // 1 MV
  619. Mv A, B;
  620. int ref_A, ref_B;
  621. if (curr->pred_flag & 1) {
  622. A = curr->mv[0];
  623. ref_A = s->ref->refPicList[0].list[curr->ref_idx[0]];
  624. } else {
  625. A = curr->mv[1];
  626. ref_A = s->ref->refPicList[1].list[curr->ref_idx[1]];
  627. }
  628. if (neigh->pred_flag & 1) {
  629. B = neigh->mv[0];
  630. ref_B = neigh_refPicList[0].list[neigh->ref_idx[0]];
  631. } else {
  632. B = neigh->mv[1];
  633. ref_B = neigh_refPicList[1].list[neigh->ref_idx[1]];
  634. }
  635. if (ref_A == ref_B) {
  636. if (FFABS(A.x - B.x) >= 4 || FFABS(A.y - B.y) >= 4)
  637. return 1;
  638. else
  639. return 0;
  640. } else
  641. return 1;
  642. }
  643. return 1;
  644. }
  645. void ff_hevc_deblocking_boundary_strengths(HEVCContext *s, int x0, int y0,
  646. int log2_trafo_size)
  647. {
  648. HEVCLocalContext *lc = s->HEVClc;
  649. MvField *tab_mvf = s->ref->tab_mvf;
  650. int log2_min_pu_size = s->ps.sps->log2_min_pu_size;
  651. int log2_min_tu_size = s->ps.sps->log2_min_tb_size;
  652. int min_pu_width = s->ps.sps->min_pu_width;
  653. int min_tu_width = s->ps.sps->min_tb_width;
  654. int is_intra = tab_mvf[(y0 >> log2_min_pu_size) * min_pu_width +
  655. (x0 >> log2_min_pu_size)].pred_flag == PF_INTRA;
  656. int boundary_upper, boundary_left;
  657. int i, j, bs;
  658. boundary_upper = y0 > 0 && !(y0 & 7);
  659. if (boundary_upper &&
  660. ((!s->sh.slice_loop_filter_across_slices_enabled_flag &&
  661. lc->boundary_flags & BOUNDARY_UPPER_SLICE &&
  662. (y0 % (1 << s->ps.sps->log2_ctb_size)) == 0) ||
  663. (!s->ps.pps->loop_filter_across_tiles_enabled_flag &&
  664. lc->boundary_flags & BOUNDARY_UPPER_TILE &&
  665. (y0 % (1 << s->ps.sps->log2_ctb_size)) == 0)))
  666. boundary_upper = 0;
  667. if (boundary_upper) {
  668. RefPicList *rpl_top = (lc->boundary_flags & BOUNDARY_UPPER_SLICE) ?
  669. ff_hevc_get_ref_list(s, s->ref, x0, y0 - 1) :
  670. s->ref->refPicList;
  671. int yp_pu = (y0 - 1) >> log2_min_pu_size;
  672. int yq_pu = y0 >> log2_min_pu_size;
  673. int yp_tu = (y0 - 1) >> log2_min_tu_size;
  674. int yq_tu = y0 >> log2_min_tu_size;
  675. for (i = 0; i < (1 << log2_trafo_size); i += 4) {
  676. int x_pu = (x0 + i) >> log2_min_pu_size;
  677. int x_tu = (x0 + i) >> log2_min_tu_size;
  678. MvField *top = &tab_mvf[yp_pu * min_pu_width + x_pu];
  679. MvField *curr = &tab_mvf[yq_pu * min_pu_width + x_pu];
  680. uint8_t top_cbf_luma = s->cbf_luma[yp_tu * min_tu_width + x_tu];
  681. uint8_t curr_cbf_luma = s->cbf_luma[yq_tu * min_tu_width + x_tu];
  682. if (curr->pred_flag == PF_INTRA || top->pred_flag == PF_INTRA)
  683. bs = 2;
  684. else if (curr_cbf_luma || top_cbf_luma)
  685. bs = 1;
  686. else
  687. bs = boundary_strength(s, curr, top, rpl_top);
  688. s->horizontal_bs[((x0 + i) + y0 * s->bs_width) >> 2] = bs;
  689. }
  690. }
  691. // bs for vertical TU boundaries
  692. boundary_left = x0 > 0 && !(x0 & 7);
  693. if (boundary_left &&
  694. ((!s->sh.slice_loop_filter_across_slices_enabled_flag &&
  695. lc->boundary_flags & BOUNDARY_LEFT_SLICE &&
  696. (x0 % (1 << s->ps.sps->log2_ctb_size)) == 0) ||
  697. (!s->ps.pps->loop_filter_across_tiles_enabled_flag &&
  698. lc->boundary_flags & BOUNDARY_LEFT_TILE &&
  699. (x0 % (1 << s->ps.sps->log2_ctb_size)) == 0)))
  700. boundary_left = 0;
  701. if (boundary_left) {
  702. RefPicList *rpl_left = (lc->boundary_flags & BOUNDARY_LEFT_SLICE) ?
  703. ff_hevc_get_ref_list(s, s->ref, x0 - 1, y0) :
  704. s->ref->refPicList;
  705. int xp_pu = (x0 - 1) >> log2_min_pu_size;
  706. int xq_pu = x0 >> log2_min_pu_size;
  707. int xp_tu = (x0 - 1) >> log2_min_tu_size;
  708. int xq_tu = x0 >> log2_min_tu_size;
  709. for (i = 0; i < (1 << log2_trafo_size); i += 4) {
  710. int y_pu = (y0 + i) >> log2_min_pu_size;
  711. int y_tu = (y0 + i) >> log2_min_tu_size;
  712. MvField *left = &tab_mvf[y_pu * min_pu_width + xp_pu];
  713. MvField *curr = &tab_mvf[y_pu * min_pu_width + xq_pu];
  714. uint8_t left_cbf_luma = s->cbf_luma[y_tu * min_tu_width + xp_tu];
  715. uint8_t curr_cbf_luma = s->cbf_luma[y_tu * min_tu_width + xq_tu];
  716. if (curr->pred_flag == PF_INTRA || left->pred_flag == PF_INTRA)
  717. bs = 2;
  718. else if (curr_cbf_luma || left_cbf_luma)
  719. bs = 1;
  720. else
  721. bs = boundary_strength(s, curr, left, rpl_left);
  722. s->vertical_bs[(x0 + (y0 + i) * s->bs_width) >> 2] = bs;
  723. }
  724. }
  725. if (log2_trafo_size > log2_min_pu_size && !is_intra) {
  726. RefPicList *rpl = s->ref->refPicList;
  727. // bs for TU internal horizontal PU boundaries
  728. for (j = 8; j < (1 << log2_trafo_size); j += 8) {
  729. int yp_pu = (y0 + j - 1) >> log2_min_pu_size;
  730. int yq_pu = (y0 + j) >> log2_min_pu_size;
  731. for (i = 0; i < (1 << log2_trafo_size); i += 4) {
  732. int x_pu = (x0 + i) >> log2_min_pu_size;
  733. MvField *top = &tab_mvf[yp_pu * min_pu_width + x_pu];
  734. MvField *curr = &tab_mvf[yq_pu * min_pu_width + x_pu];
  735. bs = boundary_strength(s, curr, top, rpl);
  736. s->horizontal_bs[((x0 + i) + (y0 + j) * s->bs_width) >> 2] = bs;
  737. }
  738. }
  739. // bs for TU internal vertical PU boundaries
  740. for (j = 0; j < (1 << log2_trafo_size); j += 4) {
  741. int y_pu = (y0 + j) >> log2_min_pu_size;
  742. for (i = 8; i < (1 << log2_trafo_size); i += 8) {
  743. int xp_pu = (x0 + i - 1) >> log2_min_pu_size;
  744. int xq_pu = (x0 + i) >> log2_min_pu_size;
  745. MvField *left = &tab_mvf[y_pu * min_pu_width + xp_pu];
  746. MvField *curr = &tab_mvf[y_pu * min_pu_width + xq_pu];
  747. bs = boundary_strength(s, curr, left, rpl);
  748. s->vertical_bs[((x0 + i) + (y0 + j) * s->bs_width) >> 2] = bs;
  749. }
  750. }
  751. }
  752. }
  753. #undef LUMA
  754. #undef CB
  755. #undef CR
  756. void ff_hevc_hls_filter(HEVCContext *s, int x, int y, int ctb_size)
  757. {
  758. int x_end = x >= s->ps.sps->width - ctb_size;
  759. if (s->avctx->skip_loop_filter < AVDISCARD_ALL)
  760. deblocking_filter_CTB(s, x, y);
  761. if (s->ps.sps->sao_enabled) {
  762. int y_end = y >= s->ps.sps->height - ctb_size;
  763. if (y && x)
  764. sao_filter_CTB(s, x - ctb_size, y - ctb_size);
  765. if (x && y_end)
  766. sao_filter_CTB(s, x - ctb_size, y);
  767. if (y && x_end) {
  768. sao_filter_CTB(s, x, y - ctb_size);
  769. if (s->threads_type & FF_THREAD_FRAME )
  770. ff_thread_report_progress(&s->ref->tf, y, 0);
  771. }
  772. if (x_end && y_end) {
  773. sao_filter_CTB(s, x , y);
  774. if (s->threads_type & FF_THREAD_FRAME )
  775. ff_thread_report_progress(&s->ref->tf, y + ctb_size, 0);
  776. }
  777. } else if (s->threads_type & FF_THREAD_FRAME && x_end)
  778. ff_thread_report_progress(&s->ref->tf, y + ctb_size - 4, 0);
  779. }
  780. void ff_hevc_hls_filters(HEVCContext *s, int x_ctb, int y_ctb, int ctb_size)
  781. {
  782. int x_end = x_ctb >= s->ps.sps->width - ctb_size;
  783. int y_end = y_ctb >= s->ps.sps->height - ctb_size;
  784. if (y_ctb && x_ctb)
  785. ff_hevc_hls_filter(s, x_ctb - ctb_size, y_ctb - ctb_size, ctb_size);
  786. if (y_ctb && x_end)
  787. ff_hevc_hls_filter(s, x_ctb, y_ctb - ctb_size, ctb_size);
  788. if (x_ctb && y_end)
  789. ff_hevc_hls_filter(s, x_ctb - ctb_size, y_ctb, ctb_size);
  790. }