You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

872 lines
37KB

  1. /*
  2. * HEVC video decoder
  3. *
  4. * Copyright (C) 2012 - 2013 Guillaume Martres
  5. * Copyright (C) 2013 Seppo Tomperi
  6. * Copyright (C) 2013 Wassim Hamidouche
  7. *
  8. * This file is part of FFmpeg.
  9. *
  10. * FFmpeg is free software; you can redistribute it and/or
  11. * modify it under the terms of the GNU Lesser General Public
  12. * License as published by the Free Software Foundation; either
  13. * version 2.1 of the License, or (at your option) any later version.
  14. *
  15. * FFmpeg is distributed in the hope that it will be useful,
  16. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  17. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  18. * Lesser General Public License for more details.
  19. *
  20. * You should have received a copy of the GNU Lesser General Public
  21. * License along with FFmpeg; if not, write to the Free Software
  22. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  23. */
  24. #include "libavutil/common.h"
  25. #include "libavutil/internal.h"
  26. #include "cabac_functions.h"
  27. #include "golomb.h"
  28. #include "hevc.h"
  29. #include "bit_depth_template.c"
  30. #define LUMA 0
  31. #define CB 1
  32. #define CR 2
  33. static const uint8_t tctable[54] = {
  34. 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, // QP 0...18
  35. 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, // QP 19...37
  36. 5, 5, 6, 6, 7, 8, 9, 10, 11, 13, 14, 16, 18, 20, 22, 24 // QP 38...53
  37. };
  38. static const uint8_t betatable[52] = {
  39. 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6, 7, 8, // QP 0...18
  40. 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, // QP 19...37
  41. 38, 40, 42, 44, 46, 48, 50, 52, 54, 56, 58, 60, 62, 64 // QP 38...51
  42. };
  43. static int chroma_tc(HEVCContext *s, int qp_y, int c_idx, int tc_offset)
  44. {
  45. static const int qp_c[] = {
  46. 29, 30, 31, 32, 33, 33, 34, 34, 35, 35, 36, 36, 37, 37
  47. };
  48. int qp, qp_i, offset, idxt;
  49. // slice qp offset is not used for deblocking
  50. if (c_idx == 1)
  51. offset = s->pps->cb_qp_offset;
  52. else
  53. offset = s->pps->cr_qp_offset;
  54. qp_i = av_clip(qp_y + offset, 0, 57);
  55. if (s->sps->chroma_format_idc == 1) {
  56. if (qp_i < 30)
  57. qp = qp_i;
  58. else if (qp_i > 43)
  59. qp = qp_i - 6;
  60. else
  61. qp = qp_c[qp_i - 30];
  62. } else {
  63. qp = av_clip(qp_i, 0, 51);
  64. }
  65. idxt = av_clip(qp + DEFAULT_INTRA_TC_OFFSET + tc_offset, 0, 53);
  66. return tctable[idxt];
  67. }
  68. static int get_qPy_pred(HEVCContext *s, int xBase, int yBase, int log2_cb_size)
  69. {
  70. HEVCLocalContext *lc = s->HEVClc;
  71. int ctb_size_mask = (1 << s->sps->log2_ctb_size) - 1;
  72. int MinCuQpDeltaSizeMask = (1 << (s->sps->log2_ctb_size -
  73. s->pps->diff_cu_qp_delta_depth)) - 1;
  74. int xQgBase = xBase - (xBase & MinCuQpDeltaSizeMask);
  75. int yQgBase = yBase - (yBase & MinCuQpDeltaSizeMask);
  76. int min_cb_width = s->sps->min_cb_width;
  77. int x_cb = xQgBase >> s->sps->log2_min_cb_size;
  78. int y_cb = yQgBase >> s->sps->log2_min_cb_size;
  79. int availableA = (xBase & ctb_size_mask) &&
  80. (xQgBase & ctb_size_mask);
  81. int availableB = (yBase & ctb_size_mask) &&
  82. (yQgBase & ctb_size_mask);
  83. int qPy_pred, qPy_a, qPy_b;
  84. // qPy_pred
  85. if (lc->first_qp_group || (!xQgBase && !yQgBase)) {
  86. lc->first_qp_group = !lc->tu.is_cu_qp_delta_coded;
  87. qPy_pred = s->sh.slice_qp;
  88. } else {
  89. qPy_pred = lc->qPy_pred;
  90. }
  91. // qPy_a
  92. if (availableA == 0)
  93. qPy_a = qPy_pred;
  94. else
  95. qPy_a = s->qp_y_tab[(x_cb - 1) + y_cb * min_cb_width];
  96. // qPy_b
  97. if (availableB == 0)
  98. qPy_b = qPy_pred;
  99. else
  100. qPy_b = s->qp_y_tab[x_cb + (y_cb - 1) * min_cb_width];
  101. av_assert2(qPy_a >= -s->sps->qp_bd_offset && qPy_a < 52);
  102. av_assert2(qPy_b >= -s->sps->qp_bd_offset && qPy_b < 52);
  103. return (qPy_a + qPy_b + 1) >> 1;
  104. }
  105. void ff_hevc_set_qPy(HEVCContext *s, int xBase, int yBase, int log2_cb_size)
  106. {
  107. int qp_y = get_qPy_pred(s, xBase, yBase, log2_cb_size);
  108. if (s->HEVClc->tu.cu_qp_delta != 0) {
  109. int off = s->sps->qp_bd_offset;
  110. s->HEVClc->qp_y = FFUMOD(qp_y + s->HEVClc->tu.cu_qp_delta + 52 + 2 * off,
  111. 52 + off) - off;
  112. } else
  113. s->HEVClc->qp_y = qp_y;
  114. }
  115. static int get_qPy(HEVCContext *s, int xC, int yC)
  116. {
  117. int log2_min_cb_size = s->sps->log2_min_cb_size;
  118. int x = xC >> log2_min_cb_size;
  119. int y = yC >> log2_min_cb_size;
  120. return s->qp_y_tab[x + y * s->sps->min_cb_width];
  121. }
  122. static void copy_CTB(uint8_t *dst, const uint8_t *src, int width, int height,
  123. intptr_t stride_dst, intptr_t stride_src)
  124. {
  125. int i, j;
  126. if (((intptr_t)dst | (intptr_t)src | stride_dst | stride_src) & 15) {
  127. for (i = 0; i < height; i++) {
  128. for (j = 0; j < width; j+=8)
  129. AV_COPY64(dst+j, src+j);
  130. dst += stride_dst;
  131. src += stride_src;
  132. }
  133. } else {
  134. for (i = 0; i < height; i++) {
  135. for (j = 0; j < width; j+=16)
  136. AV_COPY128(dst+j, src+j);
  137. dst += stride_dst;
  138. src += stride_src;
  139. }
  140. }
  141. }
  142. static void copy_pixel(uint8_t *dst, const uint8_t *src, int pixel_shift)
  143. {
  144. if (pixel_shift)
  145. *(uint16_t *)dst = *(uint16_t *)src;
  146. else
  147. *dst = *src;
  148. }
  149. static void copy_vert(uint8_t *dst, const uint8_t *src,
  150. int pixel_shift, int height,
  151. int stride_dst, int stride_src)
  152. {
  153. int i;
  154. if (pixel_shift == 0) {
  155. for (i = 0; i < height; i++) {
  156. *dst = *src;
  157. dst += stride_dst;
  158. src += stride_src;
  159. }
  160. } else {
  161. for (i = 0; i < height; i++) {
  162. *(uint16_t *)dst = *(uint16_t *)src;
  163. dst += stride_dst;
  164. src += stride_src;
  165. }
  166. }
  167. }
  168. static void copy_CTB_to_hv(HEVCContext *s, const uint8_t *src,
  169. int stride_src, int x, int y, int width, int height,
  170. int c_idx, int x_ctb, int y_ctb)
  171. {
  172. int sh = s->sps->pixel_shift;
  173. int w = s->sps->width >> s->sps->hshift[c_idx];
  174. int h = s->sps->height >> s->sps->vshift[c_idx];
  175. /* copy horizontal edges */
  176. memcpy(s->sao_pixel_buffer_h[c_idx] + (((2 * y_ctb) * w + x) << sh),
  177. src, width << sh);
  178. memcpy(s->sao_pixel_buffer_h[c_idx] + (((2 * y_ctb + 1) * w + x) << sh),
  179. src + stride_src * (height - 1), width << sh);
  180. /* copy vertical edges */
  181. copy_vert(s->sao_pixel_buffer_v[c_idx] + (((2 * x_ctb) * h + y) << sh), src, sh, height, 1 << sh, stride_src);
  182. copy_vert(s->sao_pixel_buffer_v[c_idx] + (((2 * x_ctb + 1) * h + y) << sh), src + ((width - 1) << sh), sh, height, 1 << sh, stride_src);
  183. }
  184. static void restore_tqb_pixels(HEVCContext *s,
  185. uint8_t *src1, const uint8_t *dst1,
  186. ptrdiff_t stride_src, ptrdiff_t stride_dst,
  187. int x0, int y0, int width, int height, int c_idx)
  188. {
  189. if ( s->pps->transquant_bypass_enable_flag ||
  190. (s->sps->pcm.loop_filter_disable_flag && s->sps->pcm_enabled_flag)) {
  191. int x, y;
  192. int min_pu_size = 1 << s->sps->log2_min_pu_size;
  193. int hshift = s->sps->hshift[c_idx];
  194. int vshift = s->sps->vshift[c_idx];
  195. int x_min = ((x0 ) >> s->sps->log2_min_pu_size);
  196. int y_min = ((y0 ) >> s->sps->log2_min_pu_size);
  197. int x_max = ((x0 + width ) >> s->sps->log2_min_pu_size);
  198. int y_max = ((y0 + height) >> s->sps->log2_min_pu_size);
  199. int len = (min_pu_size >> hshift) << s->sps->pixel_shift;
  200. for (y = y_min; y < y_max; y++) {
  201. for (x = x_min; x < x_max; x++) {
  202. if (s->is_pcm[y * s->sps->min_pu_width + x]) {
  203. int n;
  204. uint8_t *src = src1 + (((y << s->sps->log2_min_pu_size) - y0) >> vshift) * stride_src + ((((x << s->sps->log2_min_pu_size) - x0) >> hshift) << s->sps->pixel_shift);
  205. const uint8_t *dst = dst1 + (((y << s->sps->log2_min_pu_size) - y0) >> vshift) * stride_dst + ((((x << s->sps->log2_min_pu_size) - x0) >> hshift) << s->sps->pixel_shift);
  206. for (n = 0; n < (min_pu_size >> vshift); n++) {
  207. memcpy(src, dst, len);
  208. src += stride_src;
  209. dst += stride_dst;
  210. }
  211. }
  212. }
  213. }
  214. }
  215. }
  216. #define CTB(tab, x, y) ((tab)[(y) * s->sps->ctb_width + (x)])
  217. static void sao_filter_CTB(HEVCContext *s, int x, int y)
  218. {
  219. static const uint8_t band_tab[8] = { 0, 1, 2, 2, 3, 3, 4, 4 };
  220. HEVCLocalContext *lc = s->HEVClc;
  221. int c_idx;
  222. int edges[4]; // 0 left 1 top 2 right 3 bottom
  223. int x_ctb = x >> s->sps->log2_ctb_size;
  224. int y_ctb = y >> s->sps->log2_ctb_size;
  225. int ctb_addr_rs = y_ctb * s->sps->ctb_width + x_ctb;
  226. int ctb_addr_ts = s->pps->ctb_addr_rs_to_ts[ctb_addr_rs];
  227. SAOParams *sao = &CTB(s->sao, x_ctb, y_ctb);
  228. // flags indicating unfilterable edges
  229. uint8_t vert_edge[] = { 0, 0 };
  230. uint8_t horiz_edge[] = { 0, 0 };
  231. uint8_t diag_edge[] = { 0, 0, 0, 0 };
  232. uint8_t lfase = CTB(s->filter_slice_edges, x_ctb, y_ctb);
  233. uint8_t no_tile_filter = s->pps->tiles_enabled_flag &&
  234. !s->pps->loop_filter_across_tiles_enabled_flag;
  235. uint8_t restore = no_tile_filter || !lfase;
  236. uint8_t left_tile_edge = 0;
  237. uint8_t right_tile_edge = 0;
  238. uint8_t up_tile_edge = 0;
  239. uint8_t bottom_tile_edge = 0;
  240. edges[0] = x_ctb == 0;
  241. edges[1] = y_ctb == 0;
  242. edges[2] = x_ctb == s->sps->ctb_width - 1;
  243. edges[3] = y_ctb == s->sps->ctb_height - 1;
  244. if (restore) {
  245. if (!edges[0]) {
  246. left_tile_edge = no_tile_filter && s->pps->tile_id[ctb_addr_ts] != s->pps->tile_id[s->pps->ctb_addr_rs_to_ts[ctb_addr_rs-1]];
  247. vert_edge[0] = (!lfase && CTB(s->tab_slice_address, x_ctb, y_ctb) != CTB(s->tab_slice_address, x_ctb - 1, y_ctb)) || left_tile_edge;
  248. }
  249. if (!edges[2]) {
  250. right_tile_edge = no_tile_filter && s->pps->tile_id[ctb_addr_ts] != s->pps->tile_id[s->pps->ctb_addr_rs_to_ts[ctb_addr_rs+1]];
  251. vert_edge[1] = (!lfase && CTB(s->tab_slice_address, x_ctb, y_ctb) != CTB(s->tab_slice_address, x_ctb + 1, y_ctb)) || right_tile_edge;
  252. }
  253. if (!edges[1]) {
  254. up_tile_edge = no_tile_filter && s->pps->tile_id[ctb_addr_ts] != s->pps->tile_id[s->pps->ctb_addr_rs_to_ts[ctb_addr_rs - s->sps->ctb_width]];
  255. horiz_edge[0] = (!lfase && CTB(s->tab_slice_address, x_ctb, y_ctb) != CTB(s->tab_slice_address, x_ctb, y_ctb - 1)) || up_tile_edge;
  256. }
  257. if (!edges[3]) {
  258. bottom_tile_edge = no_tile_filter && s->pps->tile_id[ctb_addr_ts] != s->pps->tile_id[s->pps->ctb_addr_rs_to_ts[ctb_addr_rs + s->sps->ctb_width]];
  259. horiz_edge[1] = (!lfase && CTB(s->tab_slice_address, x_ctb, y_ctb) != CTB(s->tab_slice_address, x_ctb, y_ctb + 1)) || bottom_tile_edge;
  260. }
  261. if (!edges[0] && !edges[1]) {
  262. diag_edge[0] = (!lfase && CTB(s->tab_slice_address, x_ctb, y_ctb) != CTB(s->tab_slice_address, x_ctb - 1, y_ctb - 1)) || left_tile_edge || up_tile_edge;
  263. }
  264. if (!edges[1] && !edges[2]) {
  265. diag_edge[1] = (!lfase && CTB(s->tab_slice_address, x_ctb, y_ctb) != CTB(s->tab_slice_address, x_ctb + 1, y_ctb - 1)) || right_tile_edge || up_tile_edge;
  266. }
  267. if (!edges[2] && !edges[3]) {
  268. diag_edge[2] = (!lfase && CTB(s->tab_slice_address, x_ctb, y_ctb) != CTB(s->tab_slice_address, x_ctb + 1, y_ctb + 1)) || right_tile_edge || bottom_tile_edge;
  269. }
  270. if (!edges[0] && !edges[3]) {
  271. diag_edge[3] = (!lfase && CTB(s->tab_slice_address, x_ctb, y_ctb) != CTB(s->tab_slice_address, x_ctb - 1, y_ctb + 1)) || left_tile_edge || bottom_tile_edge;
  272. }
  273. }
  274. for (c_idx = 0; c_idx < (s->sps->chroma_format_idc ? 3 : 1); c_idx++) {
  275. int x0 = x >> s->sps->hshift[c_idx];
  276. int y0 = y >> s->sps->vshift[c_idx];
  277. int stride_src = s->frame->linesize[c_idx];
  278. int ctb_size_h = (1 << (s->sps->log2_ctb_size)) >> s->sps->hshift[c_idx];
  279. int ctb_size_v = (1 << (s->sps->log2_ctb_size)) >> s->sps->vshift[c_idx];
  280. int width = FFMIN(ctb_size_h, (s->sps->width >> s->sps->hshift[c_idx]) - x0);
  281. int height = FFMIN(ctb_size_v, (s->sps->height >> s->sps->vshift[c_idx]) - y0);
  282. int tab = band_tab[(FFALIGN(width, 8) >> 3) - 1];
  283. uint8_t *src = &s->frame->data[c_idx][y0 * stride_src + (x0 << s->sps->pixel_shift)];
  284. int stride_dst;
  285. uint8_t *dst;
  286. switch (sao->type_idx[c_idx]) {
  287. case SAO_BAND:
  288. dst = lc->edge_emu_buffer;
  289. stride_dst = 2*MAX_PB_SIZE;
  290. copy_CTB(dst, src, width << s->sps->pixel_shift, height, stride_dst, stride_src);
  291. copy_CTB_to_hv(s, src, stride_src, x0, y0, width, height, c_idx,
  292. x_ctb, y_ctb);
  293. s->hevcdsp.sao_band_filter[tab](src, dst, stride_src, stride_dst,
  294. sao->offset_val[c_idx], sao->band_position[c_idx],
  295. width, height);
  296. restore_tqb_pixels(s, src, dst, stride_src, stride_dst,
  297. x, y, width, height, c_idx);
  298. sao->type_idx[c_idx] = SAO_APPLIED;
  299. break;
  300. case SAO_EDGE:
  301. {
  302. int w = s->sps->width >> s->sps->hshift[c_idx];
  303. int h = s->sps->height >> s->sps->vshift[c_idx];
  304. int left_edge = edges[0];
  305. int top_edge = edges[1];
  306. int right_edge = edges[2];
  307. int bottom_edge = edges[3];
  308. int sh = s->sps->pixel_shift;
  309. int left_pixels, right_pixels;
  310. stride_dst = 2*MAX_PB_SIZE + FF_INPUT_BUFFER_PADDING_SIZE;
  311. dst = lc->edge_emu_buffer + stride_dst + FF_INPUT_BUFFER_PADDING_SIZE;
  312. if (!top_edge) {
  313. int left = 1 - left_edge;
  314. int right = 1 - right_edge;
  315. const uint8_t *src1[2];
  316. uint8_t *dst1;
  317. int src_idx, pos;
  318. dst1 = dst - stride_dst - (left << sh);
  319. src1[0] = src - stride_src - (left << sh);
  320. src1[1] = s->sao_pixel_buffer_h[c_idx] + (((2 * y_ctb - 1) * w + x0 - left) << sh);
  321. pos = 0;
  322. if (left) {
  323. src_idx = (CTB(s->sao, x_ctb-1, y_ctb-1).type_idx[c_idx] ==
  324. SAO_APPLIED);
  325. copy_pixel(dst1, src1[src_idx], sh);
  326. pos += (1 << sh);
  327. }
  328. src_idx = (CTB(s->sao, x_ctb, y_ctb-1).type_idx[c_idx] ==
  329. SAO_APPLIED);
  330. memcpy(dst1 + pos, src1[src_idx] + pos, width << sh);
  331. if (right) {
  332. pos += width << sh;
  333. src_idx = (CTB(s->sao, x_ctb+1, y_ctb-1).type_idx[c_idx] ==
  334. SAO_APPLIED);
  335. copy_pixel(dst1 + pos, src1[src_idx] + pos, sh);
  336. }
  337. }
  338. if (!bottom_edge) {
  339. int left = 1 - left_edge;
  340. int right = 1 - right_edge;
  341. const uint8_t *src1[2];
  342. uint8_t *dst1;
  343. int src_idx, pos;
  344. dst1 = dst + height * stride_dst - (left << sh);
  345. src1[0] = src + height * stride_src - (left << sh);
  346. src1[1] = s->sao_pixel_buffer_h[c_idx] + (((2 * y_ctb + 2) * w + x0 - left) << sh);
  347. pos = 0;
  348. if (left) {
  349. src_idx = (CTB(s->sao, x_ctb-1, y_ctb+1).type_idx[c_idx] ==
  350. SAO_APPLIED);
  351. copy_pixel(dst1, src1[src_idx], sh);
  352. pos += (1 << sh);
  353. }
  354. src_idx = (CTB(s->sao, x_ctb, y_ctb+1).type_idx[c_idx] ==
  355. SAO_APPLIED);
  356. memcpy(dst1 + pos, src1[src_idx] + pos, width << sh);
  357. if (right) {
  358. pos += width << sh;
  359. src_idx = (CTB(s->sao, x_ctb+1, y_ctb+1).type_idx[c_idx] ==
  360. SAO_APPLIED);
  361. copy_pixel(dst1 + pos, src1[src_idx] + pos, sh);
  362. }
  363. }
  364. left_pixels = 0;
  365. if (!left_edge) {
  366. if (CTB(s->sao, x_ctb-1, y_ctb).type_idx[c_idx] == SAO_APPLIED) {
  367. copy_vert(dst - (1 << sh),
  368. s->sao_pixel_buffer_v[c_idx] + (((2 * x_ctb - 1) * h + y0) << sh),
  369. sh, height, stride_dst, 1 << sh);
  370. } else {
  371. left_pixels = 1;
  372. }
  373. }
  374. right_pixels = 0;
  375. if (!right_edge) {
  376. if (CTB(s->sao, x_ctb+1, y_ctb).type_idx[c_idx] == SAO_APPLIED) {
  377. copy_vert(dst + (width << sh),
  378. s->sao_pixel_buffer_v[c_idx] + (((2 * x_ctb + 2) * h + y0) << sh),
  379. sh, height, stride_dst, 1 << sh);
  380. } else {
  381. right_pixels = 1;
  382. }
  383. }
  384. copy_CTB(dst - (left_pixels << sh),
  385. src - (left_pixels << sh),
  386. (width + left_pixels + right_pixels) << sh,
  387. height, stride_dst, stride_src);
  388. copy_CTB_to_hv(s, src, stride_src, x0, y0, width, height, c_idx,
  389. x_ctb, y_ctb);
  390. s->hevcdsp.sao_edge_filter(src, dst, stride_src, stride_dst,
  391. sao->offset_val[c_idx], sao->eo_class[c_idx],
  392. width, height);
  393. s->hevcdsp.sao_edge_restore[restore](src, dst,
  394. stride_src, stride_dst,
  395. sao,
  396. edges, width,
  397. height, c_idx,
  398. vert_edge,
  399. horiz_edge,
  400. diag_edge);
  401. restore_tqb_pixels(s, src, dst, stride_src, stride_dst,
  402. x, y, width, height, c_idx);
  403. sao->type_idx[c_idx] = SAO_APPLIED;
  404. break;
  405. }
  406. }
  407. }
  408. }
  409. static int get_pcm(HEVCContext *s, int x, int y)
  410. {
  411. int log2_min_pu_size = s->sps->log2_min_pu_size;
  412. int x_pu, y_pu;
  413. if (x < 0 || y < 0)
  414. return 2;
  415. x_pu = x >> log2_min_pu_size;
  416. y_pu = y >> log2_min_pu_size;
  417. if (x_pu >= s->sps->min_pu_width || y_pu >= s->sps->min_pu_height)
  418. return 2;
  419. return s->is_pcm[y_pu * s->sps->min_pu_width + x_pu];
  420. }
  421. #define TC_CALC(qp, bs) \
  422. tctable[av_clip((qp) + DEFAULT_INTRA_TC_OFFSET * ((bs) - 1) + \
  423. (tc_offset >> 1 << 1), \
  424. 0, MAX_QP + DEFAULT_INTRA_TC_OFFSET)]
  425. static void deblocking_filter_CTB(HEVCContext *s, int x0, int y0)
  426. {
  427. uint8_t *src;
  428. int x, y;
  429. int chroma, beta;
  430. int32_t c_tc[2], tc[2];
  431. uint8_t no_p[2] = { 0 };
  432. uint8_t no_q[2] = { 0 };
  433. int log2_ctb_size = s->sps->log2_ctb_size;
  434. int x_end, x_end2, y_end;
  435. int ctb_size = 1 << log2_ctb_size;
  436. int ctb = (x0 >> log2_ctb_size) +
  437. (y0 >> log2_ctb_size) * s->sps->ctb_width;
  438. int cur_tc_offset = s->deblock[ctb].tc_offset;
  439. int cur_beta_offset = s->deblock[ctb].beta_offset;
  440. int left_tc_offset, left_beta_offset;
  441. int tc_offset, beta_offset;
  442. int pcmf = (s->sps->pcm_enabled_flag &&
  443. s->sps->pcm.loop_filter_disable_flag) ||
  444. s->pps->transquant_bypass_enable_flag;
  445. if (x0) {
  446. left_tc_offset = s->deblock[ctb - 1].tc_offset;
  447. left_beta_offset = s->deblock[ctb - 1].beta_offset;
  448. } else {
  449. left_tc_offset = 0;
  450. left_beta_offset = 0;
  451. }
  452. x_end = x0 + ctb_size;
  453. if (x_end > s->sps->width)
  454. x_end = s->sps->width;
  455. y_end = y0 + ctb_size;
  456. if (y_end > s->sps->height)
  457. y_end = s->sps->height;
  458. tc_offset = cur_tc_offset;
  459. beta_offset = cur_beta_offset;
  460. x_end2 = x_end;
  461. if (x_end2 != s->sps->width)
  462. x_end2 -= 8;
  463. for (y = y0; y < y_end; y += 8) {
  464. // vertical filtering luma
  465. for (x = x0 ? x0 : 8; x < x_end; x += 8) {
  466. const int bs0 = s->vertical_bs[(x + y * s->bs_width) >> 2];
  467. const int bs1 = s->vertical_bs[(x + (y + 4) * s->bs_width) >> 2];
  468. if (bs0 || bs1) {
  469. const int qp = (get_qPy(s, x - 1, y) + get_qPy(s, x, y) + 1) >> 1;
  470. beta = betatable[av_clip(qp + beta_offset, 0, MAX_QP)];
  471. tc[0] = bs0 ? TC_CALC(qp, bs0) : 0;
  472. tc[1] = bs1 ? TC_CALC(qp, bs1) : 0;
  473. src = &s->frame->data[LUMA][y * s->frame->linesize[LUMA] + (x << s->sps->pixel_shift)];
  474. if (pcmf) {
  475. no_p[0] = get_pcm(s, x - 1, y);
  476. no_p[1] = get_pcm(s, x - 1, y + 4);
  477. no_q[0] = get_pcm(s, x, y);
  478. no_q[1] = get_pcm(s, x, y + 4);
  479. s->hevcdsp.hevc_v_loop_filter_luma_c(src,
  480. s->frame->linesize[LUMA],
  481. beta, tc, no_p, no_q);
  482. } else
  483. s->hevcdsp.hevc_v_loop_filter_luma(src,
  484. s->frame->linesize[LUMA],
  485. beta, tc, no_p, no_q);
  486. }
  487. }
  488. if(!y)
  489. continue;
  490. // horizontal filtering luma
  491. for (x = x0 ? x0 - 8 : 0; x < x_end2; x += 8) {
  492. const int bs0 = s->horizontal_bs[( x + y * s->bs_width) >> 2];
  493. const int bs1 = s->horizontal_bs[((x + 4) + y * s->bs_width) >> 2];
  494. if (bs0 || bs1) {
  495. const int qp = (get_qPy(s, x, y - 1) + get_qPy(s, x, y) + 1) >> 1;
  496. tc_offset = x >= x0 ? cur_tc_offset : left_tc_offset;
  497. beta_offset = x >= x0 ? cur_beta_offset : left_beta_offset;
  498. beta = betatable[av_clip(qp + beta_offset, 0, MAX_QP)];
  499. tc[0] = bs0 ? TC_CALC(qp, bs0) : 0;
  500. tc[1] = bs1 ? TC_CALC(qp, bs1) : 0;
  501. src = &s->frame->data[LUMA][y * s->frame->linesize[LUMA] + (x << s->sps->pixel_shift)];
  502. if (pcmf) {
  503. no_p[0] = get_pcm(s, x, y - 1);
  504. no_p[1] = get_pcm(s, x + 4, y - 1);
  505. no_q[0] = get_pcm(s, x, y);
  506. no_q[1] = get_pcm(s, x + 4, y);
  507. s->hevcdsp.hevc_h_loop_filter_luma_c(src,
  508. s->frame->linesize[LUMA],
  509. beta, tc, no_p, no_q);
  510. } else
  511. s->hevcdsp.hevc_h_loop_filter_luma(src,
  512. s->frame->linesize[LUMA],
  513. beta, tc, no_p, no_q);
  514. }
  515. }
  516. }
  517. if (s->sps->chroma_format_idc) {
  518. for (chroma = 1; chroma <= 2; chroma++) {
  519. int h = 1 << s->sps->hshift[chroma];
  520. int v = 1 << s->sps->vshift[chroma];
  521. // vertical filtering chroma
  522. for (y = y0; y < y_end; y += (8 * v)) {
  523. for (x = x0 ? x0 : 8 * h; x < x_end; x += (8 * h)) {
  524. const int bs0 = s->vertical_bs[(x + y * s->bs_width) >> 2];
  525. const int bs1 = s->vertical_bs[(x + (y + (4 * v)) * s->bs_width) >> 2];
  526. if ((bs0 == 2) || (bs1 == 2)) {
  527. const int qp0 = (get_qPy(s, x - 1, y) + get_qPy(s, x, y) + 1) >> 1;
  528. const int qp1 = (get_qPy(s, x - 1, y + (4 * v)) + get_qPy(s, x, y + (4 * v)) + 1) >> 1;
  529. c_tc[0] = (bs0 == 2) ? chroma_tc(s, qp0, chroma, tc_offset) : 0;
  530. c_tc[1] = (bs1 == 2) ? chroma_tc(s, qp1, chroma, tc_offset) : 0;
  531. src = &s->frame->data[chroma][(y >> s->sps->vshift[chroma]) * s->frame->linesize[chroma] + ((x >> s->sps->hshift[chroma]) << s->sps->pixel_shift)];
  532. if (pcmf) {
  533. no_p[0] = get_pcm(s, x - 1, y);
  534. no_p[1] = get_pcm(s, x - 1, y + (4 * v));
  535. no_q[0] = get_pcm(s, x, y);
  536. no_q[1] = get_pcm(s, x, y + (4 * v));
  537. s->hevcdsp.hevc_v_loop_filter_chroma_c(src,
  538. s->frame->linesize[chroma],
  539. c_tc, no_p, no_q);
  540. } else
  541. s->hevcdsp.hevc_v_loop_filter_chroma(src,
  542. s->frame->linesize[chroma],
  543. c_tc, no_p, no_q);
  544. }
  545. }
  546. if(!y)
  547. continue;
  548. // horizontal filtering chroma
  549. tc_offset = x0 ? left_tc_offset : cur_tc_offset;
  550. x_end2 = x_end;
  551. if (x_end != s->sps->width)
  552. x_end2 = x_end - 8 * h;
  553. for (x = x0 ? x0 - 8 * h : 0; x < x_end2; x += (8 * h)) {
  554. const int bs0 = s->horizontal_bs[( x + y * s->bs_width) >> 2];
  555. const int bs1 = s->horizontal_bs[((x + 4 * h) + y * s->bs_width) >> 2];
  556. if ((bs0 == 2) || (bs1 == 2)) {
  557. const int qp0 = bs0 == 2 ? (get_qPy(s, x, y - 1) + get_qPy(s, x, y) + 1) >> 1 : 0;
  558. const int qp1 = bs1 == 2 ? (get_qPy(s, x + (4 * h), y - 1) + get_qPy(s, x + (4 * h), y) + 1) >> 1 : 0;
  559. c_tc[0] = bs0 == 2 ? chroma_tc(s, qp0, chroma, tc_offset) : 0;
  560. c_tc[1] = bs1 == 2 ? chroma_tc(s, qp1, chroma, cur_tc_offset) : 0;
  561. src = &s->frame->data[chroma][(y >> s->sps->vshift[1]) * s->frame->linesize[chroma] + ((x >> s->sps->hshift[1]) << s->sps->pixel_shift)];
  562. if (pcmf) {
  563. no_p[0] = get_pcm(s, x, y - 1);
  564. no_p[1] = get_pcm(s, x + (4 * h), y - 1);
  565. no_q[0] = get_pcm(s, x, y);
  566. no_q[1] = get_pcm(s, x + (4 * h), y);
  567. s->hevcdsp.hevc_h_loop_filter_chroma_c(src,
  568. s->frame->linesize[chroma],
  569. c_tc, no_p, no_q);
  570. } else
  571. s->hevcdsp.hevc_h_loop_filter_chroma(src,
  572. s->frame->linesize[chroma],
  573. c_tc, no_p, no_q);
  574. }
  575. }
  576. }
  577. }
  578. }
  579. }
  580. static int boundary_strength(HEVCContext *s, MvField *curr, MvField *neigh,
  581. RefPicList *neigh_refPicList)
  582. {
  583. if (curr->pred_flag == PF_BI && neigh->pred_flag == PF_BI) {
  584. // same L0 and L1
  585. if (s->ref->refPicList[0].list[curr->ref_idx[0]] == neigh_refPicList[0].list[neigh->ref_idx[0]] &&
  586. s->ref->refPicList[0].list[curr->ref_idx[0]] == s->ref->refPicList[1].list[curr->ref_idx[1]] &&
  587. neigh_refPicList[0].list[neigh->ref_idx[0]] == neigh_refPicList[1].list[neigh->ref_idx[1]]) {
  588. if ((FFABS(neigh->mv[0].x - curr->mv[0].x) >= 4 || FFABS(neigh->mv[0].y - curr->mv[0].y) >= 4 ||
  589. FFABS(neigh->mv[1].x - curr->mv[1].x) >= 4 || FFABS(neigh->mv[1].y - curr->mv[1].y) >= 4) &&
  590. (FFABS(neigh->mv[1].x - curr->mv[0].x) >= 4 || FFABS(neigh->mv[1].y - curr->mv[0].y) >= 4 ||
  591. FFABS(neigh->mv[0].x - curr->mv[1].x) >= 4 || FFABS(neigh->mv[0].y - curr->mv[1].y) >= 4))
  592. return 1;
  593. else
  594. return 0;
  595. } else if (neigh_refPicList[0].list[neigh->ref_idx[0]] == s->ref->refPicList[0].list[curr->ref_idx[0]] &&
  596. neigh_refPicList[1].list[neigh->ref_idx[1]] == s->ref->refPicList[1].list[curr->ref_idx[1]]) {
  597. if (FFABS(neigh->mv[0].x - curr->mv[0].x) >= 4 || FFABS(neigh->mv[0].y - curr->mv[0].y) >= 4 ||
  598. FFABS(neigh->mv[1].x - curr->mv[1].x) >= 4 || FFABS(neigh->mv[1].y - curr->mv[1].y) >= 4)
  599. return 1;
  600. else
  601. return 0;
  602. } else if (neigh_refPicList[1].list[neigh->ref_idx[1]] == s->ref->refPicList[0].list[curr->ref_idx[0]] &&
  603. neigh_refPicList[0].list[neigh->ref_idx[0]] == s->ref->refPicList[1].list[curr->ref_idx[1]]) {
  604. if (FFABS(neigh->mv[1].x - curr->mv[0].x) >= 4 || FFABS(neigh->mv[1].y - curr->mv[0].y) >= 4 ||
  605. FFABS(neigh->mv[0].x - curr->mv[1].x) >= 4 || FFABS(neigh->mv[0].y - curr->mv[1].y) >= 4)
  606. return 1;
  607. else
  608. return 0;
  609. } else {
  610. return 1;
  611. }
  612. } else if ((curr->pred_flag != PF_BI) && (neigh->pred_flag != PF_BI)){ // 1 MV
  613. Mv A, B;
  614. int ref_A, ref_B;
  615. if (curr->pred_flag & 1) {
  616. A = curr->mv[0];
  617. ref_A = s->ref->refPicList[0].list[curr->ref_idx[0]];
  618. } else {
  619. A = curr->mv[1];
  620. ref_A = s->ref->refPicList[1].list[curr->ref_idx[1]];
  621. }
  622. if (neigh->pred_flag & 1) {
  623. B = neigh->mv[0];
  624. ref_B = neigh_refPicList[0].list[neigh->ref_idx[0]];
  625. } else {
  626. B = neigh->mv[1];
  627. ref_B = neigh_refPicList[1].list[neigh->ref_idx[1]];
  628. }
  629. if (ref_A == ref_B) {
  630. if (FFABS(A.x - B.x) >= 4 || FFABS(A.y - B.y) >= 4)
  631. return 1;
  632. else
  633. return 0;
  634. } else
  635. return 1;
  636. }
  637. return 1;
  638. }
  639. void ff_hevc_deblocking_boundary_strengths(HEVCContext *s, int x0, int y0,
  640. int log2_trafo_size)
  641. {
  642. HEVCLocalContext *lc = s->HEVClc;
  643. MvField *tab_mvf = s->ref->tab_mvf;
  644. int log2_min_pu_size = s->sps->log2_min_pu_size;
  645. int log2_min_tu_size = s->sps->log2_min_tb_size;
  646. int min_pu_width = s->sps->min_pu_width;
  647. int min_tu_width = s->sps->min_tb_width;
  648. int is_intra = tab_mvf[(y0 >> log2_min_pu_size) * min_pu_width +
  649. (x0 >> log2_min_pu_size)].pred_flag == PF_INTRA;
  650. int boundary_upper, boundary_left;
  651. int i, j, bs;
  652. boundary_upper = y0 > 0 && !(y0 & 7);
  653. if (boundary_upper &&
  654. ((!s->sh.slice_loop_filter_across_slices_enabled_flag &&
  655. lc->boundary_flags & BOUNDARY_UPPER_SLICE &&
  656. (y0 % (1 << s->sps->log2_ctb_size)) == 0) ||
  657. (!s->pps->loop_filter_across_tiles_enabled_flag &&
  658. lc->boundary_flags & BOUNDARY_UPPER_TILE &&
  659. (y0 % (1 << s->sps->log2_ctb_size)) == 0)))
  660. boundary_upper = 0;
  661. if (boundary_upper) {
  662. RefPicList *rpl_top = (lc->boundary_flags & BOUNDARY_UPPER_SLICE) ?
  663. ff_hevc_get_ref_list(s, s->ref, x0, y0 - 1) :
  664. s->ref->refPicList;
  665. int yp_pu = (y0 - 1) >> log2_min_pu_size;
  666. int yq_pu = y0 >> log2_min_pu_size;
  667. int yp_tu = (y0 - 1) >> log2_min_tu_size;
  668. int yq_tu = y0 >> log2_min_tu_size;
  669. for (i = 0; i < (1 << log2_trafo_size); i += 4) {
  670. int x_pu = (x0 + i) >> log2_min_pu_size;
  671. int x_tu = (x0 + i) >> log2_min_tu_size;
  672. MvField *top = &tab_mvf[yp_pu * min_pu_width + x_pu];
  673. MvField *curr = &tab_mvf[yq_pu * min_pu_width + x_pu];
  674. uint8_t top_cbf_luma = s->cbf_luma[yp_tu * min_tu_width + x_tu];
  675. uint8_t curr_cbf_luma = s->cbf_luma[yq_tu * min_tu_width + x_tu];
  676. if (curr->pred_flag == PF_INTRA || top->pred_flag == PF_INTRA)
  677. bs = 2;
  678. else if (curr_cbf_luma || top_cbf_luma)
  679. bs = 1;
  680. else
  681. bs = boundary_strength(s, curr, top, rpl_top);
  682. s->horizontal_bs[((x0 + i) + y0 * s->bs_width) >> 2] = bs;
  683. }
  684. }
  685. // bs for vertical TU boundaries
  686. boundary_left = x0 > 0 && !(x0 & 7);
  687. if (boundary_left &&
  688. ((!s->sh.slice_loop_filter_across_slices_enabled_flag &&
  689. lc->boundary_flags & BOUNDARY_LEFT_SLICE &&
  690. (x0 % (1 << s->sps->log2_ctb_size)) == 0) ||
  691. (!s->pps->loop_filter_across_tiles_enabled_flag &&
  692. lc->boundary_flags & BOUNDARY_LEFT_TILE &&
  693. (x0 % (1 << s->sps->log2_ctb_size)) == 0)))
  694. boundary_left = 0;
  695. if (boundary_left) {
  696. RefPicList *rpl_left = (lc->boundary_flags & BOUNDARY_LEFT_SLICE) ?
  697. ff_hevc_get_ref_list(s, s->ref, x0 - 1, y0) :
  698. s->ref->refPicList;
  699. int xp_pu = (x0 - 1) >> log2_min_pu_size;
  700. int xq_pu = x0 >> log2_min_pu_size;
  701. int xp_tu = (x0 - 1) >> log2_min_tu_size;
  702. int xq_tu = x0 >> log2_min_tu_size;
  703. for (i = 0; i < (1 << log2_trafo_size); i += 4) {
  704. int y_pu = (y0 + i) >> log2_min_pu_size;
  705. int y_tu = (y0 + i) >> log2_min_tu_size;
  706. MvField *left = &tab_mvf[y_pu * min_pu_width + xp_pu];
  707. MvField *curr = &tab_mvf[y_pu * min_pu_width + xq_pu];
  708. uint8_t left_cbf_luma = s->cbf_luma[y_tu * min_tu_width + xp_tu];
  709. uint8_t curr_cbf_luma = s->cbf_luma[y_tu * min_tu_width + xq_tu];
  710. if (curr->pred_flag == PF_INTRA || left->pred_flag == PF_INTRA)
  711. bs = 2;
  712. else if (curr_cbf_luma || left_cbf_luma)
  713. bs = 1;
  714. else
  715. bs = boundary_strength(s, curr, left, rpl_left);
  716. s->vertical_bs[(x0 + (y0 + i) * s->bs_width) >> 2] = bs;
  717. }
  718. }
  719. if (log2_trafo_size > log2_min_pu_size && !is_intra) {
  720. RefPicList *rpl = s->ref->refPicList;
  721. // bs for TU internal horizontal PU boundaries
  722. for (j = 8; j < (1 << log2_trafo_size); j += 8) {
  723. int yp_pu = (y0 + j - 1) >> log2_min_pu_size;
  724. int yq_pu = (y0 + j) >> log2_min_pu_size;
  725. for (i = 0; i < (1 << log2_trafo_size); i += 4) {
  726. int x_pu = (x0 + i) >> log2_min_pu_size;
  727. MvField *top = &tab_mvf[yp_pu * min_pu_width + x_pu];
  728. MvField *curr = &tab_mvf[yq_pu * min_pu_width + x_pu];
  729. bs = boundary_strength(s, curr, top, rpl);
  730. s->horizontal_bs[((x0 + i) + (y0 + j) * s->bs_width) >> 2] = bs;
  731. }
  732. }
  733. // bs for TU internal vertical PU boundaries
  734. for (j = 0; j < (1 << log2_trafo_size); j += 4) {
  735. int y_pu = (y0 + j) >> log2_min_pu_size;
  736. for (i = 8; i < (1 << log2_trafo_size); i += 8) {
  737. int xp_pu = (x0 + i - 1) >> log2_min_pu_size;
  738. int xq_pu = (x0 + i) >> log2_min_pu_size;
  739. MvField *left = &tab_mvf[y_pu * min_pu_width + xp_pu];
  740. MvField *curr = &tab_mvf[y_pu * min_pu_width + xq_pu];
  741. bs = boundary_strength(s, curr, left, rpl);
  742. s->vertical_bs[((x0 + i) + (y0 + j) * s->bs_width) >> 2] = bs;
  743. }
  744. }
  745. }
  746. }
  747. #undef LUMA
  748. #undef CB
  749. #undef CR
  750. void ff_hevc_hls_filter(HEVCContext *s, int x, int y, int ctb_size)
  751. {
  752. int x_end = x >= s->sps->width - ctb_size;
  753. deblocking_filter_CTB(s, x, y);
  754. if (s->sps->sao_enabled) {
  755. int y_end = y >= s->sps->height - ctb_size;
  756. if (y && x)
  757. sao_filter_CTB(s, x - ctb_size, y - ctb_size);
  758. if (x && y_end)
  759. sao_filter_CTB(s, x - ctb_size, y);
  760. if (y && x_end) {
  761. sao_filter_CTB(s, x, y - ctb_size);
  762. if (s->threads_type & FF_THREAD_FRAME )
  763. ff_thread_report_progress(&s->ref->tf, y, 0);
  764. }
  765. if (x_end && y_end) {
  766. sao_filter_CTB(s, x , y);
  767. if (s->threads_type & FF_THREAD_FRAME )
  768. ff_thread_report_progress(&s->ref->tf, y + ctb_size, 0);
  769. }
  770. } else if (s->threads_type & FF_THREAD_FRAME && x_end)
  771. ff_thread_report_progress(&s->ref->tf, y + ctb_size - 4, 0);
  772. }
  773. void ff_hevc_hls_filters(HEVCContext *s, int x_ctb, int y_ctb, int ctb_size)
  774. {
  775. int x_end = x_ctb >= s->sps->width - ctb_size;
  776. int y_end = y_ctb >= s->sps->height - ctb_size;
  777. if (y_ctb && x_ctb)
  778. ff_hevc_hls_filter(s, x_ctb - ctb_size, y_ctb - ctb_size, ctb_size);
  779. if (y_ctb && x_end)
  780. ff_hevc_hls_filter(s, x_ctb, y_ctb - ctb_size, ctb_size);
  781. if (x_ctb && y_end)
  782. ff_hevc_hls_filter(s, x_ctb - ctb_size, y_ctb, ctb_size);
  783. }