You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

878 lines
38KB

  1. /*
  2. * HEVC video decoder
  3. *
  4. * Copyright (C) 2012 - 2013 Guillaume Martres
  5. * Copyright (C) 2013 Seppo Tomperi
  6. * Copyright (C) 2013 Wassim Hamidouche
  7. *
  8. * This file is part of FFmpeg.
  9. *
  10. * FFmpeg is free software; you can redistribute it and/or
  11. * modify it under the terms of the GNU Lesser General Public
  12. * License as published by the Free Software Foundation; either
  13. * version 2.1 of the License, or (at your option) any later version.
  14. *
  15. * FFmpeg is distributed in the hope that it will be useful,
  16. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  17. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  18. * Lesser General Public License for more details.
  19. *
  20. * You should have received a copy of the GNU Lesser General Public
  21. * License along with FFmpeg; if not, write to the Free Software
  22. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  23. */
  24. #include "libavutil/common.h"
  25. #include "libavutil/internal.h"
  26. #include "cabac_functions.h"
  27. #include "hevc.h"
  28. #include "bit_depth_template.c"
  29. #define LUMA 0
  30. #define CB 1
  31. #define CR 2
  32. static const uint8_t tctable[54] = {
  33. 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, // QP 0...18
  34. 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, // QP 19...37
  35. 5, 5, 6, 6, 7, 8, 9, 10, 11, 13, 14, 16, 18, 20, 22, 24 // QP 38...53
  36. };
  37. static const uint8_t betatable[52] = {
  38. 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6, 7, 8, // QP 0...18
  39. 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, // QP 19...37
  40. 38, 40, 42, 44, 46, 48, 50, 52, 54, 56, 58, 60, 62, 64 // QP 38...51
  41. };
  42. static int chroma_tc(HEVCContext *s, int qp_y, int c_idx, int tc_offset)
  43. {
  44. static const int qp_c[] = {
  45. 29, 30, 31, 32, 33, 33, 34, 34, 35, 35, 36, 36, 37, 37
  46. };
  47. int qp, qp_i, offset, idxt;
  48. // slice qp offset is not used for deblocking
  49. if (c_idx == 1)
  50. offset = s->ps.pps->cb_qp_offset;
  51. else
  52. offset = s->ps.pps->cr_qp_offset;
  53. qp_i = av_clip(qp_y + offset, 0, 57);
  54. if (s->ps.sps->chroma_format_idc == 1) {
  55. if (qp_i < 30)
  56. qp = qp_i;
  57. else if (qp_i > 43)
  58. qp = qp_i - 6;
  59. else
  60. qp = qp_c[qp_i - 30];
  61. } else {
  62. qp = av_clip(qp_i, 0, 51);
  63. }
  64. idxt = av_clip(qp + DEFAULT_INTRA_TC_OFFSET + tc_offset, 0, 53);
  65. return tctable[idxt];
  66. }
  67. static int get_qPy_pred(HEVCContext *s, int xBase, int yBase, int log2_cb_size)
  68. {
  69. HEVCLocalContext *lc = s->HEVClc;
  70. int ctb_size_mask = (1 << s->ps.sps->log2_ctb_size) - 1;
  71. int MinCuQpDeltaSizeMask = (1 << (s->ps.sps->log2_ctb_size -
  72. s->ps.pps->diff_cu_qp_delta_depth)) - 1;
  73. int xQgBase = xBase - (xBase & MinCuQpDeltaSizeMask);
  74. int yQgBase = yBase - (yBase & MinCuQpDeltaSizeMask);
  75. int min_cb_width = s->ps.sps->min_cb_width;
  76. int x_cb = xQgBase >> s->ps.sps->log2_min_cb_size;
  77. int y_cb = yQgBase >> s->ps.sps->log2_min_cb_size;
  78. int availableA = (xBase & ctb_size_mask) &&
  79. (xQgBase & ctb_size_mask);
  80. int availableB = (yBase & ctb_size_mask) &&
  81. (yQgBase & ctb_size_mask);
  82. int qPy_pred, qPy_a, qPy_b;
  83. // qPy_pred
  84. if (lc->first_qp_group || (!xQgBase && !yQgBase)) {
  85. lc->first_qp_group = !lc->tu.is_cu_qp_delta_coded;
  86. qPy_pred = s->sh.slice_qp;
  87. } else {
  88. qPy_pred = lc->qPy_pred;
  89. }
  90. // qPy_a
  91. if (availableA == 0)
  92. qPy_a = qPy_pred;
  93. else
  94. qPy_a = s->qp_y_tab[(x_cb - 1) + y_cb * min_cb_width];
  95. // qPy_b
  96. if (availableB == 0)
  97. qPy_b = qPy_pred;
  98. else
  99. qPy_b = s->qp_y_tab[x_cb + (y_cb - 1) * min_cb_width];
  100. av_assert2(qPy_a >= -s->ps.sps->qp_bd_offset && qPy_a < 52);
  101. av_assert2(qPy_b >= -s->ps.sps->qp_bd_offset && qPy_b < 52);
  102. return (qPy_a + qPy_b + 1) >> 1;
  103. }
  104. void ff_hevc_set_qPy(HEVCContext *s, int xBase, int yBase, int log2_cb_size)
  105. {
  106. int qp_y = get_qPy_pred(s, xBase, yBase, log2_cb_size);
  107. if (s->HEVClc->tu.cu_qp_delta != 0) {
  108. int off = s->ps.sps->qp_bd_offset;
  109. s->HEVClc->qp_y = FFUMOD(qp_y + s->HEVClc->tu.cu_qp_delta + 52 + 2 * off,
  110. 52 + off) - off;
  111. } else
  112. s->HEVClc->qp_y = qp_y;
  113. }
  114. static int get_qPy(HEVCContext *s, int xC, int yC)
  115. {
  116. int log2_min_cb_size = s->ps.sps->log2_min_cb_size;
  117. int x = xC >> log2_min_cb_size;
  118. int y = yC >> log2_min_cb_size;
  119. return s->qp_y_tab[x + y * s->ps.sps->min_cb_width];
  120. }
  121. static void copy_CTB(uint8_t *dst, const uint8_t *src, int width, int height,
  122. intptr_t stride_dst, intptr_t stride_src)
  123. {
  124. int i, j;
  125. if (((intptr_t)dst | (intptr_t)src | stride_dst | stride_src) & 15) {
  126. for (i = 0; i < height; i++) {
  127. for (j = 0; j < width; j+=8)
  128. AV_COPY64U(dst+j, src+j);
  129. dst += stride_dst;
  130. src += stride_src;
  131. }
  132. } else {
  133. for (i = 0; i < height; i++) {
  134. for (j = 0; j < width; j+=16)
  135. AV_COPY128(dst+j, src+j);
  136. dst += stride_dst;
  137. src += stride_src;
  138. }
  139. }
  140. }
  141. static void copy_pixel(uint8_t *dst, const uint8_t *src, int pixel_shift)
  142. {
  143. if (pixel_shift)
  144. *(uint16_t *)dst = *(uint16_t *)src;
  145. else
  146. *dst = *src;
  147. }
  148. static void copy_vert(uint8_t *dst, const uint8_t *src,
  149. int pixel_shift, int height,
  150. int stride_dst, int stride_src)
  151. {
  152. int i;
  153. if (pixel_shift == 0) {
  154. for (i = 0; i < height; i++) {
  155. *dst = *src;
  156. dst += stride_dst;
  157. src += stride_src;
  158. }
  159. } else {
  160. for (i = 0; i < height; i++) {
  161. *(uint16_t *)dst = *(uint16_t *)src;
  162. dst += stride_dst;
  163. src += stride_src;
  164. }
  165. }
  166. }
  167. static void copy_CTB_to_hv(HEVCContext *s, const uint8_t *src,
  168. int stride_src, int x, int y, int width, int height,
  169. int c_idx, int x_ctb, int y_ctb)
  170. {
  171. int sh = s->ps.sps->pixel_shift;
  172. int w = s->ps.sps->width >> s->ps.sps->hshift[c_idx];
  173. int h = s->ps.sps->height >> s->ps.sps->vshift[c_idx];
  174. /* copy horizontal edges */
  175. memcpy(s->sao_pixel_buffer_h[c_idx] + (((2 * y_ctb) * w + x) << sh),
  176. src, width << sh);
  177. memcpy(s->sao_pixel_buffer_h[c_idx] + (((2 * y_ctb + 1) * w + x) << sh),
  178. src + stride_src * (height - 1), width << sh);
  179. /* copy vertical edges */
  180. copy_vert(s->sao_pixel_buffer_v[c_idx] + (((2 * x_ctb) * h + y) << sh), src, sh, height, 1 << sh, stride_src);
  181. copy_vert(s->sao_pixel_buffer_v[c_idx] + (((2 * x_ctb + 1) * h + y) << sh), src + ((width - 1) << sh), sh, height, 1 << sh, stride_src);
  182. }
  183. static void restore_tqb_pixels(HEVCContext *s,
  184. uint8_t *src1, const uint8_t *dst1,
  185. ptrdiff_t stride_src, ptrdiff_t stride_dst,
  186. int x0, int y0, int width, int height, int c_idx)
  187. {
  188. if ( s->ps.pps->transquant_bypass_enable_flag ||
  189. (s->ps.sps->pcm.loop_filter_disable_flag && s->ps.sps->pcm_enabled_flag)) {
  190. int x, y;
  191. int min_pu_size = 1 << s->ps.sps->log2_min_pu_size;
  192. int hshift = s->ps.sps->hshift[c_idx];
  193. int vshift = s->ps.sps->vshift[c_idx];
  194. int x_min = ((x0 ) >> s->ps.sps->log2_min_pu_size);
  195. int y_min = ((y0 ) >> s->ps.sps->log2_min_pu_size);
  196. int x_max = ((x0 + width ) >> s->ps.sps->log2_min_pu_size);
  197. int y_max = ((y0 + height) >> s->ps.sps->log2_min_pu_size);
  198. int len = (min_pu_size >> hshift) << s->ps.sps->pixel_shift;
  199. for (y = y_min; y < y_max; y++) {
  200. for (x = x_min; x < x_max; x++) {
  201. if (s->is_pcm[y * s->ps.sps->min_pu_width + x]) {
  202. int n;
  203. uint8_t *src = src1 + (((y << s->ps.sps->log2_min_pu_size) - y0) >> vshift) * stride_src + ((((x << s->ps.sps->log2_min_pu_size) - x0) >> hshift) << s->ps.sps->pixel_shift);
  204. const uint8_t *dst = dst1 + (((y << s->ps.sps->log2_min_pu_size) - y0) >> vshift) * stride_dst + ((((x << s->ps.sps->log2_min_pu_size) - x0) >> hshift) << s->ps.sps->pixel_shift);
  205. for (n = 0; n < (min_pu_size >> vshift); n++) {
  206. memcpy(src, dst, len);
  207. src += stride_src;
  208. dst += stride_dst;
  209. }
  210. }
  211. }
  212. }
  213. }
  214. }
  215. #define CTB(tab, x, y) ((tab)[(y) * s->ps.sps->ctb_width + (x)])
  216. static void sao_filter_CTB(HEVCContext *s, int x, int y)
  217. {
  218. static const uint8_t sao_tab[8] = { 0, 1, 2, 2, 3, 3, 4, 4 };
  219. HEVCLocalContext *lc = s->HEVClc;
  220. int c_idx;
  221. int edges[4]; // 0 left 1 top 2 right 3 bottom
  222. int x_ctb = x >> s->ps.sps->log2_ctb_size;
  223. int y_ctb = y >> s->ps.sps->log2_ctb_size;
  224. int ctb_addr_rs = y_ctb * s->ps.sps->ctb_width + x_ctb;
  225. int ctb_addr_ts = s->ps.pps->ctb_addr_rs_to_ts[ctb_addr_rs];
  226. SAOParams *sao = &CTB(s->sao, x_ctb, y_ctb);
  227. // flags indicating unfilterable edges
  228. uint8_t vert_edge[] = { 0, 0 };
  229. uint8_t horiz_edge[] = { 0, 0 };
  230. uint8_t diag_edge[] = { 0, 0, 0, 0 };
  231. uint8_t lfase = CTB(s->filter_slice_edges, x_ctb, y_ctb);
  232. uint8_t no_tile_filter = s->ps.pps->tiles_enabled_flag &&
  233. !s->ps.pps->loop_filter_across_tiles_enabled_flag;
  234. uint8_t restore = no_tile_filter || !lfase;
  235. uint8_t left_tile_edge = 0;
  236. uint8_t right_tile_edge = 0;
  237. uint8_t up_tile_edge = 0;
  238. uint8_t bottom_tile_edge = 0;
  239. edges[0] = x_ctb == 0;
  240. edges[1] = y_ctb == 0;
  241. edges[2] = x_ctb == s->ps.sps->ctb_width - 1;
  242. edges[3] = y_ctb == s->ps.sps->ctb_height - 1;
  243. if (restore) {
  244. if (!edges[0]) {
  245. left_tile_edge = no_tile_filter && s->ps.pps->tile_id[ctb_addr_ts] != s->ps.pps->tile_id[s->ps.pps->ctb_addr_rs_to_ts[ctb_addr_rs-1]];
  246. vert_edge[0] = (!lfase && CTB(s->tab_slice_address, x_ctb, y_ctb) != CTB(s->tab_slice_address, x_ctb - 1, y_ctb)) || left_tile_edge;
  247. }
  248. if (!edges[2]) {
  249. right_tile_edge = no_tile_filter && s->ps.pps->tile_id[ctb_addr_ts] != s->ps.pps->tile_id[s->ps.pps->ctb_addr_rs_to_ts[ctb_addr_rs+1]];
  250. vert_edge[1] = (!lfase && CTB(s->tab_slice_address, x_ctb, y_ctb) != CTB(s->tab_slice_address, x_ctb + 1, y_ctb)) || right_tile_edge;
  251. }
  252. if (!edges[1]) {
  253. up_tile_edge = no_tile_filter && s->ps.pps->tile_id[ctb_addr_ts] != s->ps.pps->tile_id[s->ps.pps->ctb_addr_rs_to_ts[ctb_addr_rs - s->ps.sps->ctb_width]];
  254. horiz_edge[0] = (!lfase && CTB(s->tab_slice_address, x_ctb, y_ctb) != CTB(s->tab_slice_address, x_ctb, y_ctb - 1)) || up_tile_edge;
  255. }
  256. if (!edges[3]) {
  257. bottom_tile_edge = no_tile_filter && s->ps.pps->tile_id[ctb_addr_ts] != s->ps.pps->tile_id[s->ps.pps->ctb_addr_rs_to_ts[ctb_addr_rs + s->ps.sps->ctb_width]];
  258. horiz_edge[1] = (!lfase && CTB(s->tab_slice_address, x_ctb, y_ctb) != CTB(s->tab_slice_address, x_ctb, y_ctb + 1)) || bottom_tile_edge;
  259. }
  260. if (!edges[0] && !edges[1]) {
  261. diag_edge[0] = (!lfase && CTB(s->tab_slice_address, x_ctb, y_ctb) != CTB(s->tab_slice_address, x_ctb - 1, y_ctb - 1)) || left_tile_edge || up_tile_edge;
  262. }
  263. if (!edges[1] && !edges[2]) {
  264. diag_edge[1] = (!lfase && CTB(s->tab_slice_address, x_ctb, y_ctb) != CTB(s->tab_slice_address, x_ctb + 1, y_ctb - 1)) || right_tile_edge || up_tile_edge;
  265. }
  266. if (!edges[2] && !edges[3]) {
  267. diag_edge[2] = (!lfase && CTB(s->tab_slice_address, x_ctb, y_ctb) != CTB(s->tab_slice_address, x_ctb + 1, y_ctb + 1)) || right_tile_edge || bottom_tile_edge;
  268. }
  269. if (!edges[0] && !edges[3]) {
  270. diag_edge[3] = (!lfase && CTB(s->tab_slice_address, x_ctb, y_ctb) != CTB(s->tab_slice_address, x_ctb - 1, y_ctb + 1)) || left_tile_edge || bottom_tile_edge;
  271. }
  272. }
  273. for (c_idx = 0; c_idx < (s->ps.sps->chroma_format_idc ? 3 : 1); c_idx++) {
  274. int x0 = x >> s->ps.sps->hshift[c_idx];
  275. int y0 = y >> s->ps.sps->vshift[c_idx];
  276. int stride_src = s->frame->linesize[c_idx];
  277. int ctb_size_h = (1 << (s->ps.sps->log2_ctb_size)) >> s->ps.sps->hshift[c_idx];
  278. int ctb_size_v = (1 << (s->ps.sps->log2_ctb_size)) >> s->ps.sps->vshift[c_idx];
  279. int width = FFMIN(ctb_size_h, (s->ps.sps->width >> s->ps.sps->hshift[c_idx]) - x0);
  280. int height = FFMIN(ctb_size_v, (s->ps.sps->height >> s->ps.sps->vshift[c_idx]) - y0);
  281. int tab = sao_tab[(FFALIGN(width, 8) >> 3) - 1];
  282. uint8_t *src = &s->frame->data[c_idx][y0 * stride_src + (x0 << s->ps.sps->pixel_shift)];
  283. int stride_dst;
  284. uint8_t *dst;
  285. switch (sao->type_idx[c_idx]) {
  286. case SAO_BAND:
  287. copy_CTB_to_hv(s, src, stride_src, x0, y0, width, height, c_idx,
  288. x_ctb, y_ctb);
  289. if (s->ps.pps->transquant_bypass_enable_flag ||
  290. (s->ps.sps->pcm.loop_filter_disable_flag && s->ps.sps->pcm_enabled_flag)) {
  291. dst = lc->edge_emu_buffer;
  292. stride_dst = 2*MAX_PB_SIZE;
  293. copy_CTB(dst, src, width << s->ps.sps->pixel_shift, height, stride_dst, stride_src);
  294. s->hevcdsp.sao_band_filter[tab](src, dst, stride_src, stride_dst,
  295. sao->offset_val[c_idx], sao->band_position[c_idx],
  296. width, height);
  297. restore_tqb_pixels(s, src, dst, stride_src, stride_dst,
  298. x, y, width, height, c_idx);
  299. } else {
  300. s->hevcdsp.sao_band_filter[tab](src, src, stride_src, stride_src,
  301. sao->offset_val[c_idx], sao->band_position[c_idx],
  302. width, height);
  303. }
  304. sao->type_idx[c_idx] = SAO_APPLIED;
  305. break;
  306. case SAO_EDGE:
  307. {
  308. int w = s->ps.sps->width >> s->ps.sps->hshift[c_idx];
  309. int h = s->ps.sps->height >> s->ps.sps->vshift[c_idx];
  310. int left_edge = edges[0];
  311. int top_edge = edges[1];
  312. int right_edge = edges[2];
  313. int bottom_edge = edges[3];
  314. int sh = s->ps.sps->pixel_shift;
  315. int left_pixels, right_pixels;
  316. stride_dst = 2*MAX_PB_SIZE + AV_INPUT_BUFFER_PADDING_SIZE;
  317. dst = lc->edge_emu_buffer + stride_dst + AV_INPUT_BUFFER_PADDING_SIZE;
  318. if (!top_edge) {
  319. int left = 1 - left_edge;
  320. int right = 1 - right_edge;
  321. const uint8_t *src1[2];
  322. uint8_t *dst1;
  323. int src_idx, pos;
  324. dst1 = dst - stride_dst - (left << sh);
  325. src1[0] = src - stride_src - (left << sh);
  326. src1[1] = s->sao_pixel_buffer_h[c_idx] + (((2 * y_ctb - 1) * w + x0 - left) << sh);
  327. pos = 0;
  328. if (left) {
  329. src_idx = (CTB(s->sao, x_ctb-1, y_ctb-1).type_idx[c_idx] ==
  330. SAO_APPLIED);
  331. copy_pixel(dst1, src1[src_idx], sh);
  332. pos += (1 << sh);
  333. }
  334. src_idx = (CTB(s->sao, x_ctb, y_ctb-1).type_idx[c_idx] ==
  335. SAO_APPLIED);
  336. memcpy(dst1 + pos, src1[src_idx] + pos, width << sh);
  337. if (right) {
  338. pos += width << sh;
  339. src_idx = (CTB(s->sao, x_ctb+1, y_ctb-1).type_idx[c_idx] ==
  340. SAO_APPLIED);
  341. copy_pixel(dst1 + pos, src1[src_idx] + pos, sh);
  342. }
  343. }
  344. if (!bottom_edge) {
  345. int left = 1 - left_edge;
  346. int right = 1 - right_edge;
  347. const uint8_t *src1[2];
  348. uint8_t *dst1;
  349. int src_idx, pos;
  350. dst1 = dst + height * stride_dst - (left << sh);
  351. src1[0] = src + height * stride_src - (left << sh);
  352. src1[1] = s->sao_pixel_buffer_h[c_idx] + (((2 * y_ctb + 2) * w + x0 - left) << sh);
  353. pos = 0;
  354. if (left) {
  355. src_idx = (CTB(s->sao, x_ctb-1, y_ctb+1).type_idx[c_idx] ==
  356. SAO_APPLIED);
  357. copy_pixel(dst1, src1[src_idx], sh);
  358. pos += (1 << sh);
  359. }
  360. src_idx = (CTB(s->sao, x_ctb, y_ctb+1).type_idx[c_idx] ==
  361. SAO_APPLIED);
  362. memcpy(dst1 + pos, src1[src_idx] + pos, width << sh);
  363. if (right) {
  364. pos += width << sh;
  365. src_idx = (CTB(s->sao, x_ctb+1, y_ctb+1).type_idx[c_idx] ==
  366. SAO_APPLIED);
  367. copy_pixel(dst1 + pos, src1[src_idx] + pos, sh);
  368. }
  369. }
  370. left_pixels = 0;
  371. if (!left_edge) {
  372. if (CTB(s->sao, x_ctb-1, y_ctb).type_idx[c_idx] == SAO_APPLIED) {
  373. copy_vert(dst - (1 << sh),
  374. s->sao_pixel_buffer_v[c_idx] + (((2 * x_ctb - 1) * h + y0) << sh),
  375. sh, height, stride_dst, 1 << sh);
  376. } else {
  377. left_pixels = 1;
  378. }
  379. }
  380. right_pixels = 0;
  381. if (!right_edge) {
  382. if (CTB(s->sao, x_ctb+1, y_ctb).type_idx[c_idx] == SAO_APPLIED) {
  383. copy_vert(dst + (width << sh),
  384. s->sao_pixel_buffer_v[c_idx] + (((2 * x_ctb + 2) * h + y0) << sh),
  385. sh, height, stride_dst, 1 << sh);
  386. } else {
  387. right_pixels = 1;
  388. }
  389. }
  390. copy_CTB(dst - (left_pixels << sh),
  391. src - (left_pixels << sh),
  392. (width + left_pixels + right_pixels) << sh,
  393. height, stride_dst, stride_src);
  394. copy_CTB_to_hv(s, src, stride_src, x0, y0, width, height, c_idx,
  395. x_ctb, y_ctb);
  396. s->hevcdsp.sao_edge_filter[tab](src, dst, stride_src, sao->offset_val[c_idx],
  397. sao->eo_class[c_idx], width, height);
  398. s->hevcdsp.sao_edge_restore[restore](src, dst,
  399. stride_src, stride_dst,
  400. sao,
  401. edges, width,
  402. height, c_idx,
  403. vert_edge,
  404. horiz_edge,
  405. diag_edge);
  406. restore_tqb_pixels(s, src, dst, stride_src, stride_dst,
  407. x, y, width, height, c_idx);
  408. sao->type_idx[c_idx] = SAO_APPLIED;
  409. break;
  410. }
  411. }
  412. }
  413. }
  414. static int get_pcm(HEVCContext *s, int x, int y)
  415. {
  416. int log2_min_pu_size = s->ps.sps->log2_min_pu_size;
  417. int x_pu, y_pu;
  418. if (x < 0 || y < 0)
  419. return 2;
  420. x_pu = x >> log2_min_pu_size;
  421. y_pu = y >> log2_min_pu_size;
  422. if (x_pu >= s->ps.sps->min_pu_width || y_pu >= s->ps.sps->min_pu_height)
  423. return 2;
  424. return s->is_pcm[y_pu * s->ps.sps->min_pu_width + x_pu];
  425. }
  426. #define TC_CALC(qp, bs) \
  427. tctable[av_clip((qp) + DEFAULT_INTRA_TC_OFFSET * ((bs) - 1) + \
  428. (tc_offset >> 1 << 1), \
  429. 0, MAX_QP + DEFAULT_INTRA_TC_OFFSET)]
  430. static void deblocking_filter_CTB(HEVCContext *s, int x0, int y0)
  431. {
  432. uint8_t *src;
  433. int x, y;
  434. int chroma, beta;
  435. int32_t c_tc[2], tc[2];
  436. uint8_t no_p[2] = { 0 };
  437. uint8_t no_q[2] = { 0 };
  438. int log2_ctb_size = s->ps.sps->log2_ctb_size;
  439. int x_end, x_end2, y_end;
  440. int ctb_size = 1 << log2_ctb_size;
  441. int ctb = (x0 >> log2_ctb_size) +
  442. (y0 >> log2_ctb_size) * s->ps.sps->ctb_width;
  443. int cur_tc_offset = s->deblock[ctb].tc_offset;
  444. int cur_beta_offset = s->deblock[ctb].beta_offset;
  445. int left_tc_offset, left_beta_offset;
  446. int tc_offset, beta_offset;
  447. int pcmf = (s->ps.sps->pcm_enabled_flag &&
  448. s->ps.sps->pcm.loop_filter_disable_flag) ||
  449. s->ps.pps->transquant_bypass_enable_flag;
  450. if (x0) {
  451. left_tc_offset = s->deblock[ctb - 1].tc_offset;
  452. left_beta_offset = s->deblock[ctb - 1].beta_offset;
  453. } else {
  454. left_tc_offset = 0;
  455. left_beta_offset = 0;
  456. }
  457. x_end = x0 + ctb_size;
  458. if (x_end > s->ps.sps->width)
  459. x_end = s->ps.sps->width;
  460. y_end = y0 + ctb_size;
  461. if (y_end > s->ps.sps->height)
  462. y_end = s->ps.sps->height;
  463. tc_offset = cur_tc_offset;
  464. beta_offset = cur_beta_offset;
  465. x_end2 = x_end;
  466. if (x_end2 != s->ps.sps->width)
  467. x_end2 -= 8;
  468. for (y = y0; y < y_end; y += 8) {
  469. // vertical filtering luma
  470. for (x = x0 ? x0 : 8; x < x_end; x += 8) {
  471. const int bs0 = s->vertical_bs[(x + y * s->bs_width) >> 2];
  472. const int bs1 = s->vertical_bs[(x + (y + 4) * s->bs_width) >> 2];
  473. if (bs0 || bs1) {
  474. const int qp = (get_qPy(s, x - 1, y) + get_qPy(s, x, y) + 1) >> 1;
  475. beta = betatable[av_clip(qp + beta_offset, 0, MAX_QP)];
  476. tc[0] = bs0 ? TC_CALC(qp, bs0) : 0;
  477. tc[1] = bs1 ? TC_CALC(qp, bs1) : 0;
  478. src = &s->frame->data[LUMA][y * s->frame->linesize[LUMA] + (x << s->ps.sps->pixel_shift)];
  479. if (pcmf) {
  480. no_p[0] = get_pcm(s, x - 1, y);
  481. no_p[1] = get_pcm(s, x - 1, y + 4);
  482. no_q[0] = get_pcm(s, x, y);
  483. no_q[1] = get_pcm(s, x, y + 4);
  484. s->hevcdsp.hevc_v_loop_filter_luma_c(src,
  485. s->frame->linesize[LUMA],
  486. beta, tc, no_p, no_q);
  487. } else
  488. s->hevcdsp.hevc_v_loop_filter_luma(src,
  489. s->frame->linesize[LUMA],
  490. beta, tc, no_p, no_q);
  491. }
  492. }
  493. if(!y)
  494. continue;
  495. // horizontal filtering luma
  496. for (x = x0 ? x0 - 8 : 0; x < x_end2; x += 8) {
  497. const int bs0 = s->horizontal_bs[( x + y * s->bs_width) >> 2];
  498. const int bs1 = s->horizontal_bs[((x + 4) + y * s->bs_width) >> 2];
  499. if (bs0 || bs1) {
  500. const int qp = (get_qPy(s, x, y - 1) + get_qPy(s, x, y) + 1) >> 1;
  501. tc_offset = x >= x0 ? cur_tc_offset : left_tc_offset;
  502. beta_offset = x >= x0 ? cur_beta_offset : left_beta_offset;
  503. beta = betatable[av_clip(qp + beta_offset, 0, MAX_QP)];
  504. tc[0] = bs0 ? TC_CALC(qp, bs0) : 0;
  505. tc[1] = bs1 ? TC_CALC(qp, bs1) : 0;
  506. src = &s->frame->data[LUMA][y * s->frame->linesize[LUMA] + (x << s->ps.sps->pixel_shift)];
  507. if (pcmf) {
  508. no_p[0] = get_pcm(s, x, y - 1);
  509. no_p[1] = get_pcm(s, x + 4, y - 1);
  510. no_q[0] = get_pcm(s, x, y);
  511. no_q[1] = get_pcm(s, x + 4, y);
  512. s->hevcdsp.hevc_h_loop_filter_luma_c(src,
  513. s->frame->linesize[LUMA],
  514. beta, tc, no_p, no_q);
  515. } else
  516. s->hevcdsp.hevc_h_loop_filter_luma(src,
  517. s->frame->linesize[LUMA],
  518. beta, tc, no_p, no_q);
  519. }
  520. }
  521. }
  522. if (s->ps.sps->chroma_format_idc) {
  523. for (chroma = 1; chroma <= 2; chroma++) {
  524. int h = 1 << s->ps.sps->hshift[chroma];
  525. int v = 1 << s->ps.sps->vshift[chroma];
  526. // vertical filtering chroma
  527. for (y = y0; y < y_end; y += (8 * v)) {
  528. for (x = x0 ? x0 : 8 * h; x < x_end; x += (8 * h)) {
  529. const int bs0 = s->vertical_bs[(x + y * s->bs_width) >> 2];
  530. const int bs1 = s->vertical_bs[(x + (y + (4 * v)) * s->bs_width) >> 2];
  531. if ((bs0 == 2) || (bs1 == 2)) {
  532. const int qp0 = (get_qPy(s, x - 1, y) + get_qPy(s, x, y) + 1) >> 1;
  533. const int qp1 = (get_qPy(s, x - 1, y + (4 * v)) + get_qPy(s, x, y + (4 * v)) + 1) >> 1;
  534. c_tc[0] = (bs0 == 2) ? chroma_tc(s, qp0, chroma, tc_offset) : 0;
  535. c_tc[1] = (bs1 == 2) ? chroma_tc(s, qp1, chroma, tc_offset) : 0;
  536. src = &s->frame->data[chroma][(y >> s->ps.sps->vshift[chroma]) * s->frame->linesize[chroma] + ((x >> s->ps.sps->hshift[chroma]) << s->ps.sps->pixel_shift)];
  537. if (pcmf) {
  538. no_p[0] = get_pcm(s, x - 1, y);
  539. no_p[1] = get_pcm(s, x - 1, y + (4 * v));
  540. no_q[0] = get_pcm(s, x, y);
  541. no_q[1] = get_pcm(s, x, y + (4 * v));
  542. s->hevcdsp.hevc_v_loop_filter_chroma_c(src,
  543. s->frame->linesize[chroma],
  544. c_tc, no_p, no_q);
  545. } else
  546. s->hevcdsp.hevc_v_loop_filter_chroma(src,
  547. s->frame->linesize[chroma],
  548. c_tc, no_p, no_q);
  549. }
  550. }
  551. if(!y)
  552. continue;
  553. // horizontal filtering chroma
  554. tc_offset = x0 ? left_tc_offset : cur_tc_offset;
  555. x_end2 = x_end;
  556. if (x_end != s->ps.sps->width)
  557. x_end2 = x_end - 8 * h;
  558. for (x = x0 ? x0 - 8 * h : 0; x < x_end2; x += (8 * h)) {
  559. const int bs0 = s->horizontal_bs[( x + y * s->bs_width) >> 2];
  560. const int bs1 = s->horizontal_bs[((x + 4 * h) + y * s->bs_width) >> 2];
  561. if ((bs0 == 2) || (bs1 == 2)) {
  562. const int qp0 = bs0 == 2 ? (get_qPy(s, x, y - 1) + get_qPy(s, x, y) + 1) >> 1 : 0;
  563. const int qp1 = bs1 == 2 ? (get_qPy(s, x + (4 * h), y - 1) + get_qPy(s, x + (4 * h), y) + 1) >> 1 : 0;
  564. c_tc[0] = bs0 == 2 ? chroma_tc(s, qp0, chroma, tc_offset) : 0;
  565. c_tc[1] = bs1 == 2 ? chroma_tc(s, qp1, chroma, cur_tc_offset) : 0;
  566. src = &s->frame->data[chroma][(y >> s->ps.sps->vshift[1]) * s->frame->linesize[chroma] + ((x >> s->ps.sps->hshift[1]) << s->ps.sps->pixel_shift)];
  567. if (pcmf) {
  568. no_p[0] = get_pcm(s, x, y - 1);
  569. no_p[1] = get_pcm(s, x + (4 * h), y - 1);
  570. no_q[0] = get_pcm(s, x, y);
  571. no_q[1] = get_pcm(s, x + (4 * h), y);
  572. s->hevcdsp.hevc_h_loop_filter_chroma_c(src,
  573. s->frame->linesize[chroma],
  574. c_tc, no_p, no_q);
  575. } else
  576. s->hevcdsp.hevc_h_loop_filter_chroma(src,
  577. s->frame->linesize[chroma],
  578. c_tc, no_p, no_q);
  579. }
  580. }
  581. }
  582. }
  583. }
  584. }
  585. static int boundary_strength(HEVCContext *s, MvField *curr, MvField *neigh,
  586. RefPicList *neigh_refPicList)
  587. {
  588. if (curr->pred_flag == PF_BI && neigh->pred_flag == PF_BI) {
  589. // same L0 and L1
  590. if (s->ref->refPicList[0].list[curr->ref_idx[0]] == neigh_refPicList[0].list[neigh->ref_idx[0]] &&
  591. s->ref->refPicList[0].list[curr->ref_idx[0]] == s->ref->refPicList[1].list[curr->ref_idx[1]] &&
  592. neigh_refPicList[0].list[neigh->ref_idx[0]] == neigh_refPicList[1].list[neigh->ref_idx[1]]) {
  593. if ((FFABS(neigh->mv[0].x - curr->mv[0].x) >= 4 || FFABS(neigh->mv[0].y - curr->mv[0].y) >= 4 ||
  594. FFABS(neigh->mv[1].x - curr->mv[1].x) >= 4 || FFABS(neigh->mv[1].y - curr->mv[1].y) >= 4) &&
  595. (FFABS(neigh->mv[1].x - curr->mv[0].x) >= 4 || FFABS(neigh->mv[1].y - curr->mv[0].y) >= 4 ||
  596. FFABS(neigh->mv[0].x - curr->mv[1].x) >= 4 || FFABS(neigh->mv[0].y - curr->mv[1].y) >= 4))
  597. return 1;
  598. else
  599. return 0;
  600. } else if (neigh_refPicList[0].list[neigh->ref_idx[0]] == s->ref->refPicList[0].list[curr->ref_idx[0]] &&
  601. neigh_refPicList[1].list[neigh->ref_idx[1]] == s->ref->refPicList[1].list[curr->ref_idx[1]]) {
  602. if (FFABS(neigh->mv[0].x - curr->mv[0].x) >= 4 || FFABS(neigh->mv[0].y - curr->mv[0].y) >= 4 ||
  603. FFABS(neigh->mv[1].x - curr->mv[1].x) >= 4 || FFABS(neigh->mv[1].y - curr->mv[1].y) >= 4)
  604. return 1;
  605. else
  606. return 0;
  607. } else if (neigh_refPicList[1].list[neigh->ref_idx[1]] == s->ref->refPicList[0].list[curr->ref_idx[0]] &&
  608. neigh_refPicList[0].list[neigh->ref_idx[0]] == s->ref->refPicList[1].list[curr->ref_idx[1]]) {
  609. if (FFABS(neigh->mv[1].x - curr->mv[0].x) >= 4 || FFABS(neigh->mv[1].y - curr->mv[0].y) >= 4 ||
  610. FFABS(neigh->mv[0].x - curr->mv[1].x) >= 4 || FFABS(neigh->mv[0].y - curr->mv[1].y) >= 4)
  611. return 1;
  612. else
  613. return 0;
  614. } else {
  615. return 1;
  616. }
  617. } else if ((curr->pred_flag != PF_BI) && (neigh->pred_flag != PF_BI)){ // 1 MV
  618. Mv A, B;
  619. int ref_A, ref_B;
  620. if (curr->pred_flag & 1) {
  621. A = curr->mv[0];
  622. ref_A = s->ref->refPicList[0].list[curr->ref_idx[0]];
  623. } else {
  624. A = curr->mv[1];
  625. ref_A = s->ref->refPicList[1].list[curr->ref_idx[1]];
  626. }
  627. if (neigh->pred_flag & 1) {
  628. B = neigh->mv[0];
  629. ref_B = neigh_refPicList[0].list[neigh->ref_idx[0]];
  630. } else {
  631. B = neigh->mv[1];
  632. ref_B = neigh_refPicList[1].list[neigh->ref_idx[1]];
  633. }
  634. if (ref_A == ref_B) {
  635. if (FFABS(A.x - B.x) >= 4 || FFABS(A.y - B.y) >= 4)
  636. return 1;
  637. else
  638. return 0;
  639. } else
  640. return 1;
  641. }
  642. return 1;
  643. }
  644. void ff_hevc_deblocking_boundary_strengths(HEVCContext *s, int x0, int y0,
  645. int log2_trafo_size)
  646. {
  647. HEVCLocalContext *lc = s->HEVClc;
  648. MvField *tab_mvf = s->ref->tab_mvf;
  649. int log2_min_pu_size = s->ps.sps->log2_min_pu_size;
  650. int log2_min_tu_size = s->ps.sps->log2_min_tb_size;
  651. int min_pu_width = s->ps.sps->min_pu_width;
  652. int min_tu_width = s->ps.sps->min_tb_width;
  653. int is_intra = tab_mvf[(y0 >> log2_min_pu_size) * min_pu_width +
  654. (x0 >> log2_min_pu_size)].pred_flag == PF_INTRA;
  655. int boundary_upper, boundary_left;
  656. int i, j, bs;
  657. boundary_upper = y0 > 0 && !(y0 & 7);
  658. if (boundary_upper &&
  659. ((!s->sh.slice_loop_filter_across_slices_enabled_flag &&
  660. lc->boundary_flags & BOUNDARY_UPPER_SLICE &&
  661. (y0 % (1 << s->ps.sps->log2_ctb_size)) == 0) ||
  662. (!s->ps.pps->loop_filter_across_tiles_enabled_flag &&
  663. lc->boundary_flags & BOUNDARY_UPPER_TILE &&
  664. (y0 % (1 << s->ps.sps->log2_ctb_size)) == 0)))
  665. boundary_upper = 0;
  666. if (boundary_upper) {
  667. RefPicList *rpl_top = (lc->boundary_flags & BOUNDARY_UPPER_SLICE) ?
  668. ff_hevc_get_ref_list(s, s->ref, x0, y0 - 1) :
  669. s->ref->refPicList;
  670. int yp_pu = (y0 - 1) >> log2_min_pu_size;
  671. int yq_pu = y0 >> log2_min_pu_size;
  672. int yp_tu = (y0 - 1) >> log2_min_tu_size;
  673. int yq_tu = y0 >> log2_min_tu_size;
  674. for (i = 0; i < (1 << log2_trafo_size); i += 4) {
  675. int x_pu = (x0 + i) >> log2_min_pu_size;
  676. int x_tu = (x0 + i) >> log2_min_tu_size;
  677. MvField *top = &tab_mvf[yp_pu * min_pu_width + x_pu];
  678. MvField *curr = &tab_mvf[yq_pu * min_pu_width + x_pu];
  679. uint8_t top_cbf_luma = s->cbf_luma[yp_tu * min_tu_width + x_tu];
  680. uint8_t curr_cbf_luma = s->cbf_luma[yq_tu * min_tu_width + x_tu];
  681. if (curr->pred_flag == PF_INTRA || top->pred_flag == PF_INTRA)
  682. bs = 2;
  683. else if (curr_cbf_luma || top_cbf_luma)
  684. bs = 1;
  685. else
  686. bs = boundary_strength(s, curr, top, rpl_top);
  687. s->horizontal_bs[((x0 + i) + y0 * s->bs_width) >> 2] = bs;
  688. }
  689. }
  690. // bs for vertical TU boundaries
  691. boundary_left = x0 > 0 && !(x0 & 7);
  692. if (boundary_left &&
  693. ((!s->sh.slice_loop_filter_across_slices_enabled_flag &&
  694. lc->boundary_flags & BOUNDARY_LEFT_SLICE &&
  695. (x0 % (1 << s->ps.sps->log2_ctb_size)) == 0) ||
  696. (!s->ps.pps->loop_filter_across_tiles_enabled_flag &&
  697. lc->boundary_flags & BOUNDARY_LEFT_TILE &&
  698. (x0 % (1 << s->ps.sps->log2_ctb_size)) == 0)))
  699. boundary_left = 0;
  700. if (boundary_left) {
  701. RefPicList *rpl_left = (lc->boundary_flags & BOUNDARY_LEFT_SLICE) ?
  702. ff_hevc_get_ref_list(s, s->ref, x0 - 1, y0) :
  703. s->ref->refPicList;
  704. int xp_pu = (x0 - 1) >> log2_min_pu_size;
  705. int xq_pu = x0 >> log2_min_pu_size;
  706. int xp_tu = (x0 - 1) >> log2_min_tu_size;
  707. int xq_tu = x0 >> log2_min_tu_size;
  708. for (i = 0; i < (1 << log2_trafo_size); i += 4) {
  709. int y_pu = (y0 + i) >> log2_min_pu_size;
  710. int y_tu = (y0 + i) >> log2_min_tu_size;
  711. MvField *left = &tab_mvf[y_pu * min_pu_width + xp_pu];
  712. MvField *curr = &tab_mvf[y_pu * min_pu_width + xq_pu];
  713. uint8_t left_cbf_luma = s->cbf_luma[y_tu * min_tu_width + xp_tu];
  714. uint8_t curr_cbf_luma = s->cbf_luma[y_tu * min_tu_width + xq_tu];
  715. if (curr->pred_flag == PF_INTRA || left->pred_flag == PF_INTRA)
  716. bs = 2;
  717. else if (curr_cbf_luma || left_cbf_luma)
  718. bs = 1;
  719. else
  720. bs = boundary_strength(s, curr, left, rpl_left);
  721. s->vertical_bs[(x0 + (y0 + i) * s->bs_width) >> 2] = bs;
  722. }
  723. }
  724. if (log2_trafo_size > log2_min_pu_size && !is_intra) {
  725. RefPicList *rpl = s->ref->refPicList;
  726. // bs for TU internal horizontal PU boundaries
  727. for (j = 8; j < (1 << log2_trafo_size); j += 8) {
  728. int yp_pu = (y0 + j - 1) >> log2_min_pu_size;
  729. int yq_pu = (y0 + j) >> log2_min_pu_size;
  730. for (i = 0; i < (1 << log2_trafo_size); i += 4) {
  731. int x_pu = (x0 + i) >> log2_min_pu_size;
  732. MvField *top = &tab_mvf[yp_pu * min_pu_width + x_pu];
  733. MvField *curr = &tab_mvf[yq_pu * min_pu_width + x_pu];
  734. bs = boundary_strength(s, curr, top, rpl);
  735. s->horizontal_bs[((x0 + i) + (y0 + j) * s->bs_width) >> 2] = bs;
  736. }
  737. }
  738. // bs for TU internal vertical PU boundaries
  739. for (j = 0; j < (1 << log2_trafo_size); j += 4) {
  740. int y_pu = (y0 + j) >> log2_min_pu_size;
  741. for (i = 8; i < (1 << log2_trafo_size); i += 8) {
  742. int xp_pu = (x0 + i - 1) >> log2_min_pu_size;
  743. int xq_pu = (x0 + i) >> log2_min_pu_size;
  744. MvField *left = &tab_mvf[y_pu * min_pu_width + xp_pu];
  745. MvField *curr = &tab_mvf[y_pu * min_pu_width + xq_pu];
  746. bs = boundary_strength(s, curr, left, rpl);
  747. s->vertical_bs[((x0 + i) + (y0 + j) * s->bs_width) >> 2] = bs;
  748. }
  749. }
  750. }
  751. }
  752. #undef LUMA
  753. #undef CB
  754. #undef CR
  755. void ff_hevc_hls_filter(HEVCContext *s, int x, int y, int ctb_size)
  756. {
  757. int x_end = x >= s->ps.sps->width - ctb_size;
  758. if (s->avctx->skip_loop_filter < AVDISCARD_ALL)
  759. deblocking_filter_CTB(s, x, y);
  760. if (s->ps.sps->sao_enabled) {
  761. int y_end = y >= s->ps.sps->height - ctb_size;
  762. if (y && x)
  763. sao_filter_CTB(s, x - ctb_size, y - ctb_size);
  764. if (x && y_end)
  765. sao_filter_CTB(s, x - ctb_size, y);
  766. if (y && x_end) {
  767. sao_filter_CTB(s, x, y - ctb_size);
  768. if (s->threads_type & FF_THREAD_FRAME )
  769. ff_thread_report_progress(&s->ref->tf, y, 0);
  770. }
  771. if (x_end && y_end) {
  772. sao_filter_CTB(s, x , y);
  773. if (s->threads_type & FF_THREAD_FRAME )
  774. ff_thread_report_progress(&s->ref->tf, y + ctb_size, 0);
  775. }
  776. } else if (s->threads_type & FF_THREAD_FRAME && x_end)
  777. ff_thread_report_progress(&s->ref->tf, y + ctb_size - 4, 0);
  778. }
  779. void ff_hevc_hls_filters(HEVCContext *s, int x_ctb, int y_ctb, int ctb_size)
  780. {
  781. int x_end = x_ctb >= s->ps.sps->width - ctb_size;
  782. int y_end = y_ctb >= s->ps.sps->height - ctb_size;
  783. if (y_ctb && x_ctb)
  784. ff_hevc_hls_filter(s, x_ctb - ctb_size, y_ctb - ctb_size, ctb_size);
  785. if (y_ctb && x_end)
  786. ff_hevc_hls_filter(s, x_ctb, y_ctb - ctb_size, ctb_size);
  787. if (x_ctb && y_end)
  788. ff_hevc_hls_filter(s, x_ctb - ctb_size, y_ctb, ctb_size);
  789. }