You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1064 lines
40KB

  1. /*
  2. * Copyright (c) 2015-2016 Kieran Kunhya <kieran@kunhya.com>
  3. *
  4. * This file is part of FFmpeg.
  5. *
  6. * FFmpeg is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2.1 of the License, or (at your option) any later version.
  10. *
  11. * FFmpeg is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with FFmpeg; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  19. */
  20. /**
  21. * @file
  22. * Cineform HD video decoder
  23. */
  24. #include "libavutil/attributes.h"
  25. #include "libavutil/buffer.h"
  26. #include "libavutil/common.h"
  27. #include "libavutil/imgutils.h"
  28. #include "libavutil/intreadwrite.h"
  29. #include "libavutil/opt.h"
  30. #include "avcodec.h"
  31. #include "bytestream.h"
  32. #include "get_bits.h"
  33. #include "internal.h"
  34. #include "thread.h"
  35. #include "cfhd.h"
  36. #define ALPHA_COMPAND_DC_OFFSET 256
  37. #define ALPHA_COMPAND_GAIN 9400
  38. enum CFHDParam {
  39. ChannelCount = 12,
  40. SubbandCount = 14,
  41. ImageWidth = 20,
  42. ImageHeight = 21,
  43. LowpassPrecision = 35,
  44. SubbandNumber = 48,
  45. Quantization = 53,
  46. ChannelNumber = 62,
  47. SampleFlags = 68,
  48. BitsPerComponent = 101,
  49. ChannelWidth = 104,
  50. ChannelHeight = 105,
  51. PrescaleShift = 109,
  52. };
  53. static av_cold int cfhd_init(AVCodecContext *avctx)
  54. {
  55. CFHDContext *s = avctx->priv_data;
  56. avctx->bits_per_raw_sample = 10;
  57. s->avctx = avctx;
  58. return ff_cfhd_init_vlcs(s);
  59. }
  60. static void init_plane_defaults(CFHDContext *s)
  61. {
  62. s->subband_num = 0;
  63. s->level = 0;
  64. s->subband_num_actual = 0;
  65. }
  66. static void init_peak_table_defaults(CFHDContext *s)
  67. {
  68. s->peak.level = 0;
  69. s->peak.offset = 0;
  70. memset(&s->peak.base, 0, sizeof(s->peak.base));
  71. }
  72. static void init_frame_defaults(CFHDContext *s)
  73. {
  74. s->coded_width = 0;
  75. s->coded_height = 0;
  76. s->cropped_height = 0;
  77. s->bpc = 10;
  78. s->channel_cnt = 4;
  79. s->subband_cnt = SUBBAND_COUNT;
  80. s->channel_num = 0;
  81. s->lowpass_precision = 16;
  82. s->quantisation = 1;
  83. s->wavelet_depth = 3;
  84. s->pshift = 1;
  85. s->codebook = 0;
  86. s->difference_coding = 0;
  87. s->progressive = 0;
  88. init_plane_defaults(s);
  89. init_peak_table_defaults(s);
  90. }
  91. /* TODO: merge with VLC tables or use LUT */
  92. static inline int dequant_and_decompand(int level, int quantisation, int codebook)
  93. {
  94. if (codebook == 0 || codebook == 1) {
  95. int64_t abslevel = abs(level);
  96. if (level < 264)
  97. return (abslevel + ((768 * abslevel * abslevel * abslevel) / (255 * 255 * 255))) *
  98. FFSIGN(level) * quantisation;
  99. else
  100. return level * quantisation;
  101. } else
  102. return level * quantisation;
  103. }
  104. static inline void difference_coding(int16_t *band, int width, int height)
  105. {
  106. int i,j;
  107. for (i = 0; i < height; i++) {
  108. for (j = 1; j < width; j++) {
  109. band[j] += band[j-1];
  110. }
  111. band += width;
  112. }
  113. }
  114. static inline void peak_table(int16_t *band, Peak *peak, int length)
  115. {
  116. int i;
  117. for (i = 0; i < length; i++)
  118. if (abs(band[i]) > peak->level)
  119. band[i] = bytestream2_get_le16(&peak->base);
  120. }
  121. static inline void process_alpha(int16_t *alpha, int width)
  122. {
  123. int i, channel;
  124. for (i = 0; i < width; i++) {
  125. channel = alpha[i];
  126. channel -= ALPHA_COMPAND_DC_OFFSET;
  127. channel <<= 3;
  128. channel *= ALPHA_COMPAND_GAIN;
  129. channel >>= 16;
  130. channel = av_clip_uintp2(channel, 12);
  131. alpha[i] = channel;
  132. }
  133. }
  134. static inline void process_bayer(AVFrame *frame)
  135. {
  136. const int linesize = frame->linesize[0];
  137. uint16_t *r = (uint16_t *)frame->data[0];
  138. uint16_t *g1 = (uint16_t *)(frame->data[0] + 2);
  139. uint16_t *g2 = (uint16_t *)(frame->data[0] + frame->linesize[0]);
  140. uint16_t *b = (uint16_t *)(frame->data[0] + frame->linesize[0] + 2);
  141. const int mid = 2048;
  142. for (int y = 0; y < frame->height >> 1; y++) {
  143. for (int x = 0; x < frame->width; x += 2) {
  144. int R, G1, G2, B;
  145. int g, rg, bg, gd;
  146. g = r[x];
  147. rg = g1[x];
  148. bg = g2[x];
  149. gd = b[x];
  150. gd -= mid;
  151. R = (rg - mid) * 2 + g;
  152. G1 = g + gd;
  153. G2 = g - gd;
  154. B = (bg - mid) * 2 + g;
  155. R = av_clip_uintp2(R * 16, 16);
  156. G1 = av_clip_uintp2(G1 * 16, 16);
  157. G2 = av_clip_uintp2(G2 * 16, 16);
  158. B = av_clip_uintp2(B * 16, 16);
  159. r[x] = R;
  160. g1[x] = G1;
  161. g2[x] = G2;
  162. b[x] = B;
  163. }
  164. r += linesize;
  165. g1 += linesize;
  166. g2 += linesize;
  167. b += linesize;
  168. }
  169. }
  170. static inline void filter(int16_t *output, ptrdiff_t out_stride,
  171. int16_t *low, ptrdiff_t low_stride,
  172. int16_t *high, ptrdiff_t high_stride,
  173. int len, int clip)
  174. {
  175. int16_t tmp;
  176. int i;
  177. for (i = 0; i < len; i++) {
  178. if (i == 0) {
  179. tmp = (11*low[0*low_stride] - 4*low[1*low_stride] + low[2*low_stride] + 4) >> 3;
  180. output[(2*i+0)*out_stride] = (tmp + high[0*high_stride]) >> 1;
  181. if (clip)
  182. output[(2*i+0)*out_stride] = av_clip_uintp2_c(output[(2*i+0)*out_stride], clip);
  183. tmp = ( 5*low[0*low_stride] + 4*low[1*low_stride] - low[2*low_stride] + 4) >> 3;
  184. output[(2*i+1)*out_stride] = (tmp - high[0*high_stride]) >> 1;
  185. if (clip)
  186. output[(2*i+1)*out_stride] = av_clip_uintp2_c(output[(2*i+1)*out_stride], clip);
  187. } else if (i == len-1) {
  188. tmp = ( 5*low[i*low_stride] + 4*low[(i-1)*low_stride] - low[(i-2)*low_stride] + 4) >> 3;
  189. output[(2*i+0)*out_stride] = (tmp + high[i*high_stride]) >> 1;
  190. if (clip)
  191. output[(2*i+0)*out_stride] = av_clip_uintp2_c(output[(2*i+0)*out_stride], clip);
  192. tmp = (11*low[i*low_stride] - 4*low[(i-1)*low_stride] + low[(i-2)*low_stride] + 4) >> 3;
  193. output[(2*i+1)*out_stride] = (tmp - high[i*high_stride]) >> 1;
  194. if (clip)
  195. output[(2*i+1)*out_stride] = av_clip_uintp2_c(output[(2*i+1)*out_stride], clip);
  196. } else {
  197. tmp = (low[(i-1)*low_stride] - low[(i+1)*low_stride] + 4) >> 3;
  198. output[(2*i+0)*out_stride] = (tmp + low[i*low_stride] + high[i*high_stride]) >> 1;
  199. if (clip)
  200. output[(2*i+0)*out_stride] = av_clip_uintp2_c(output[(2*i+0)*out_stride], clip);
  201. tmp = (low[(i+1)*low_stride] - low[(i-1)*low_stride] + 4) >> 3;
  202. output[(2*i+1)*out_stride] = (tmp + low[i*low_stride] - high[i*high_stride]) >> 1;
  203. if (clip)
  204. output[(2*i+1)*out_stride] = av_clip_uintp2_c(output[(2*i+1)*out_stride], clip);
  205. }
  206. }
  207. }
  208. static inline void interlaced_vertical_filter(int16_t *output, int16_t *low, int16_t *high,
  209. int width, int linesize, int plane)
  210. {
  211. int i;
  212. int16_t even, odd;
  213. for (i = 0; i < width; i++) {
  214. even = (low[i] - high[i])/2;
  215. odd = (low[i] + high[i])/2;
  216. output[i] = av_clip_uintp2(even, 10);
  217. output[i + linesize] = av_clip_uintp2(odd, 10);
  218. }
  219. }
  220. static void horiz_filter(int16_t *output, int16_t *low, int16_t *high,
  221. int width)
  222. {
  223. filter(output, 1, low, 1, high, 1, width, 0);
  224. }
  225. static void horiz_filter_clip(int16_t *output, int16_t *low, int16_t *high,
  226. int width, int clip)
  227. {
  228. filter(output, 1, low, 1, high, 1, width, clip);
  229. }
  230. static void horiz_filter_clip_bayer(int16_t *output, int16_t *low, int16_t *high,
  231. int width, int clip)
  232. {
  233. filter(output, 2, low, 1, high, 1, width, clip);
  234. }
  235. static void vert_filter(int16_t *output, ptrdiff_t out_stride,
  236. int16_t *low, ptrdiff_t low_stride,
  237. int16_t *high, ptrdiff_t high_stride, int len)
  238. {
  239. filter(output, out_stride, low, low_stride, high, high_stride, len, 0);
  240. }
  241. static void free_buffers(CFHDContext *s)
  242. {
  243. int i, j;
  244. for (i = 0; i < FF_ARRAY_ELEMS(s->plane); i++) {
  245. av_freep(&s->plane[i].idwt_buf);
  246. av_freep(&s->plane[i].idwt_tmp);
  247. for (j = 0; j < 9; j++)
  248. s->plane[i].subband[j] = NULL;
  249. for (j = 0; j < 8; j++)
  250. s->plane[i].l_h[j] = NULL;
  251. }
  252. s->a_height = 0;
  253. s->a_width = 0;
  254. }
  255. static int alloc_buffers(AVCodecContext *avctx)
  256. {
  257. CFHDContext *s = avctx->priv_data;
  258. int i, j, ret, planes;
  259. int chroma_x_shift, chroma_y_shift;
  260. unsigned k;
  261. if (s->coded_format == AV_PIX_FMT_BAYER_RGGB16) {
  262. s->coded_width *= 2;
  263. s->coded_height *= 2;
  264. }
  265. if ((ret = ff_set_dimensions(avctx, s->coded_width, s->coded_height)) < 0)
  266. return ret;
  267. avctx->pix_fmt = s->coded_format;
  268. if ((ret = av_pix_fmt_get_chroma_sub_sample(s->coded_format,
  269. &chroma_x_shift,
  270. &chroma_y_shift)) < 0)
  271. return ret;
  272. planes = av_pix_fmt_count_planes(s->coded_format);
  273. if (s->coded_format == AV_PIX_FMT_BAYER_RGGB16) {
  274. planes = 4;
  275. chroma_x_shift = 1;
  276. chroma_y_shift = 1;
  277. }
  278. for (i = 0; i < planes; i++) {
  279. int w8, h8, w4, h4, w2, h2;
  280. int width = i ? avctx->width >> chroma_x_shift : avctx->width;
  281. int height = i ? avctx->height >> chroma_y_shift : avctx->height;
  282. ptrdiff_t stride = FFALIGN(width / 8, 8) * 8;
  283. if (chroma_y_shift)
  284. height = FFALIGN(height / 8, 2) * 8;
  285. s->plane[i].width = width;
  286. s->plane[i].height = height;
  287. s->plane[i].stride = stride;
  288. w8 = FFALIGN(s->plane[i].width / 8, 8);
  289. h8 = height / 8;
  290. w4 = w8 * 2;
  291. h4 = h8 * 2;
  292. w2 = w4 * 2;
  293. h2 = h4 * 2;
  294. s->plane[i].idwt_buf =
  295. av_mallocz_array(height * stride, sizeof(*s->plane[i].idwt_buf));
  296. s->plane[i].idwt_tmp =
  297. av_malloc_array(height * stride, sizeof(*s->plane[i].idwt_tmp));
  298. if (!s->plane[i].idwt_buf || !s->plane[i].idwt_tmp)
  299. return AVERROR(ENOMEM);
  300. s->plane[i].subband[0] = s->plane[i].idwt_buf;
  301. s->plane[i].subband[1] = s->plane[i].idwt_buf + 2 * w8 * h8;
  302. s->plane[i].subband[2] = s->plane[i].idwt_buf + 1 * w8 * h8;
  303. s->plane[i].subband[3] = s->plane[i].idwt_buf + 3 * w8 * h8;
  304. s->plane[i].subband[4] = s->plane[i].idwt_buf + 2 * w4 * h4;
  305. s->plane[i].subband[5] = s->plane[i].idwt_buf + 1 * w4 * h4;
  306. s->plane[i].subband[6] = s->plane[i].idwt_buf + 3 * w4 * h4;
  307. s->plane[i].subband[7] = s->plane[i].idwt_buf + 2 * w2 * h2;
  308. s->plane[i].subband[8] = s->plane[i].idwt_buf + 1 * w2 * h2;
  309. s->plane[i].subband[9] = s->plane[i].idwt_buf + 3 * w2 * h2;
  310. for (j = 0; j < DWT_LEVELS; j++) {
  311. for (k = 0; k < FF_ARRAY_ELEMS(s->plane[i].band[j]); k++) {
  312. s->plane[i].band[j][k].a_width = w8 << j;
  313. s->plane[i].band[j][k].a_height = h8 << j;
  314. }
  315. }
  316. /* ll2 and ll1 commented out because they are done in-place */
  317. s->plane[i].l_h[0] = s->plane[i].idwt_tmp;
  318. s->plane[i].l_h[1] = s->plane[i].idwt_tmp + 2 * w8 * h8;
  319. // s->plane[i].l_h[2] = ll2;
  320. s->plane[i].l_h[3] = s->plane[i].idwt_tmp;
  321. s->plane[i].l_h[4] = s->plane[i].idwt_tmp + 2 * w4 * h4;
  322. // s->plane[i].l_h[5] = ll1;
  323. s->plane[i].l_h[6] = s->plane[i].idwt_tmp;
  324. s->plane[i].l_h[7] = s->plane[i].idwt_tmp + 2 * w2 * h2;
  325. }
  326. s->a_height = s->coded_height;
  327. s->a_width = s->coded_width;
  328. s->a_format = s->coded_format;
  329. return 0;
  330. }
  331. static int cfhd_decode(AVCodecContext *avctx, void *data, int *got_frame,
  332. AVPacket *avpkt)
  333. {
  334. CFHDContext *s = avctx->priv_data;
  335. GetByteContext gb;
  336. ThreadFrame frame = { .f = data };
  337. AVFrame *pic = data;
  338. int ret = 0, i, j, planes, plane, got_buffer = 0;
  339. int16_t *coeff_data;
  340. s->coded_format = AV_PIX_FMT_YUV422P10;
  341. init_frame_defaults(s);
  342. planes = av_pix_fmt_count_planes(s->coded_format);
  343. bytestream2_init(&gb, avpkt->data, avpkt->size);
  344. while (bytestream2_get_bytes_left(&gb) > 4) {
  345. /* Bit weird but implement the tag parsing as the spec says */
  346. uint16_t tagu = bytestream2_get_be16(&gb);
  347. int16_t tag = (int16_t)tagu;
  348. int8_t tag8 = (int8_t)(tagu >> 8);
  349. uint16_t abstag = abs(tag);
  350. int8_t abs_tag8 = abs(tag8);
  351. uint16_t data = bytestream2_get_be16(&gb);
  352. if (abs_tag8 >= 0x60 && abs_tag8 <= 0x6f) {
  353. av_log(avctx, AV_LOG_DEBUG, "large len %x\n", ((tagu & 0xff) << 16) | data);
  354. } else if (tag == SampleFlags) {
  355. av_log(avctx, AV_LOG_DEBUG, "Progressive?%"PRIu16"\n", data);
  356. s->progressive = data & 0x0001;
  357. } else if (tag == ImageWidth) {
  358. av_log(avctx, AV_LOG_DEBUG, "Width %"PRIu16"\n", data);
  359. s->coded_width = data;
  360. } else if (tag == ImageHeight) {
  361. av_log(avctx, AV_LOG_DEBUG, "Height %"PRIu16"\n", data);
  362. s->coded_height = data;
  363. } else if (tag == 101) {
  364. av_log(avctx, AV_LOG_DEBUG, "Bits per component: %"PRIu16"\n", data);
  365. if (data < 1 || data > 31) {
  366. av_log(avctx, AV_LOG_ERROR, "Bits per component %d is invalid\n", data);
  367. ret = AVERROR(EINVAL);
  368. break;
  369. }
  370. s->bpc = data;
  371. } else if (tag == ChannelCount) {
  372. av_log(avctx, AV_LOG_DEBUG, "Channel Count: %"PRIu16"\n", data);
  373. s->channel_cnt = data;
  374. if (data > 4) {
  375. av_log(avctx, AV_LOG_ERROR, "Channel Count of %"PRIu16" is unsupported\n", data);
  376. ret = AVERROR_PATCHWELCOME;
  377. break;
  378. }
  379. } else if (tag == SubbandCount) {
  380. av_log(avctx, AV_LOG_DEBUG, "Subband Count: %"PRIu16"\n", data);
  381. if (data != SUBBAND_COUNT) {
  382. av_log(avctx, AV_LOG_ERROR, "Subband Count of %"PRIu16" is unsupported\n", data);
  383. ret = AVERROR_PATCHWELCOME;
  384. break;
  385. }
  386. } else if (tag == ChannelNumber) {
  387. s->channel_num = data;
  388. av_log(avctx, AV_LOG_DEBUG, "Channel number %"PRIu16"\n", data);
  389. if (s->channel_num >= planes) {
  390. av_log(avctx, AV_LOG_ERROR, "Invalid channel number\n");
  391. ret = AVERROR(EINVAL);
  392. break;
  393. }
  394. init_plane_defaults(s);
  395. } else if (tag == SubbandNumber) {
  396. if (s->subband_num != 0 && data == 1) // hack
  397. s->level++;
  398. av_log(avctx, AV_LOG_DEBUG, "Subband number %"PRIu16"\n", data);
  399. s->subband_num = data;
  400. if (s->level >= DWT_LEVELS) {
  401. av_log(avctx, AV_LOG_ERROR, "Invalid level\n");
  402. ret = AVERROR(EINVAL);
  403. break;
  404. }
  405. if (s->subband_num > 3) {
  406. av_log(avctx, AV_LOG_ERROR, "Invalid subband number\n");
  407. ret = AVERROR(EINVAL);
  408. break;
  409. }
  410. } else if (tag == 51) {
  411. av_log(avctx, AV_LOG_DEBUG, "Subband number actual %"PRIu16"\n", data);
  412. s->subband_num_actual = data;
  413. if (s->subband_num_actual >= 10) {
  414. av_log(avctx, AV_LOG_ERROR, "Invalid subband number actual\n");
  415. ret = AVERROR(EINVAL);
  416. break;
  417. }
  418. } else if (tag == LowpassPrecision)
  419. av_log(avctx, AV_LOG_DEBUG, "Lowpass precision bits: %"PRIu16"\n", data);
  420. else if (tag == Quantization) {
  421. s->quantisation = data;
  422. av_log(avctx, AV_LOG_DEBUG, "Quantisation: %"PRIu16"\n", data);
  423. } else if (tag == PrescaleShift) {
  424. s->prescale_shift[0] = (data >> 0) & 0x7;
  425. s->prescale_shift[1] = (data >> 3) & 0x7;
  426. s->prescale_shift[2] = (data >> 6) & 0x7;
  427. av_log(avctx, AV_LOG_DEBUG, "Prescale shift (VC-5): %x\n", data);
  428. } else if (tag == 27) {
  429. av_log(avctx, AV_LOG_DEBUG, "Lowpass width %"PRIu16"\n", data);
  430. if (data < 3 || data > s->plane[s->channel_num].band[0][0].a_width) {
  431. av_log(avctx, AV_LOG_ERROR, "Invalid lowpass width\n");
  432. ret = AVERROR(EINVAL);
  433. break;
  434. }
  435. s->plane[s->channel_num].band[0][0].width = data;
  436. s->plane[s->channel_num].band[0][0].stride = data;
  437. } else if (tag == 28) {
  438. av_log(avctx, AV_LOG_DEBUG, "Lowpass height %"PRIu16"\n", data);
  439. if (data < 3 || data > s->plane[s->channel_num].band[0][0].a_height) {
  440. av_log(avctx, AV_LOG_ERROR, "Invalid lowpass height\n");
  441. ret = AVERROR(EINVAL);
  442. break;
  443. }
  444. s->plane[s->channel_num].band[0][0].height = data;
  445. } else if (tag == 1)
  446. av_log(avctx, AV_LOG_DEBUG, "Sample type? %"PRIu16"\n", data);
  447. else if (tag == 10) {
  448. if (data != 0) {
  449. avpriv_report_missing_feature(avctx, "Transform type of %"PRIu16, data);
  450. ret = AVERROR_PATCHWELCOME;
  451. break;
  452. } else if (data == 1) {
  453. av_log(avctx, AV_LOG_ERROR, "unsupported transform type\n");
  454. ret = AVERROR_PATCHWELCOME;
  455. break;
  456. }
  457. av_log(avctx, AV_LOG_DEBUG, "Transform-type? %"PRIu16"\n", data);
  458. } else if (abstag >= 0x4000 && abstag <= 0x40ff) {
  459. if (abstag == 0x4001)
  460. s->peak.level = 0;
  461. av_log(avctx, AV_LOG_DEBUG, "Small chunk length %d %s\n", data * 4, tag < 0 ? "optional" : "required");
  462. bytestream2_skipu(&gb, data * 4);
  463. } else if (tag == 23) {
  464. av_log(avctx, AV_LOG_DEBUG, "Skip frame\n");
  465. avpriv_report_missing_feature(avctx, "Skip frame");
  466. ret = AVERROR_PATCHWELCOME;
  467. break;
  468. } else if (tag == 2) {
  469. av_log(avctx, AV_LOG_DEBUG, "tag=2 header - skipping %i tag/value pairs\n", data);
  470. if (data > bytestream2_get_bytes_left(&gb) / 4) {
  471. av_log(avctx, AV_LOG_ERROR, "too many tag/value pairs (%d)\n", data);
  472. ret = AVERROR_INVALIDDATA;
  473. break;
  474. }
  475. for (i = 0; i < data; i++) {
  476. uint16_t tag2 = bytestream2_get_be16(&gb);
  477. uint16_t val2 = bytestream2_get_be16(&gb);
  478. av_log(avctx, AV_LOG_DEBUG, "Tag/Value = %x %x\n", tag2, val2);
  479. }
  480. } else if (tag == 41) {
  481. av_log(avctx, AV_LOG_DEBUG, "Highpass width %i channel %i level %i subband %i\n", data, s->channel_num, s->level, s->subband_num);
  482. if (data < 3) {
  483. av_log(avctx, AV_LOG_ERROR, "Invalid highpass width\n");
  484. ret = AVERROR(EINVAL);
  485. break;
  486. }
  487. s->plane[s->channel_num].band[s->level][s->subband_num].width = data;
  488. s->plane[s->channel_num].band[s->level][s->subband_num].stride = FFALIGN(data, 8);
  489. } else if (tag == 42) {
  490. av_log(avctx, AV_LOG_DEBUG, "Highpass height %i\n", data);
  491. if (data < 3) {
  492. av_log(avctx, AV_LOG_ERROR, "Invalid highpass height\n");
  493. ret = AVERROR(EINVAL);
  494. break;
  495. }
  496. s->plane[s->channel_num].band[s->level][s->subband_num].height = data;
  497. } else if (tag == 49) {
  498. av_log(avctx, AV_LOG_DEBUG, "Highpass width2 %i\n", data);
  499. if (data < 3) {
  500. av_log(avctx, AV_LOG_ERROR, "Invalid highpass width2\n");
  501. ret = AVERROR(EINVAL);
  502. break;
  503. }
  504. s->plane[s->channel_num].band[s->level][s->subband_num].width = data;
  505. s->plane[s->channel_num].band[s->level][s->subband_num].stride = FFALIGN(data, 8);
  506. } else if (tag == 50) {
  507. av_log(avctx, AV_LOG_DEBUG, "Highpass height2 %i\n", data);
  508. if (data < 3) {
  509. av_log(avctx, AV_LOG_ERROR, "Invalid highpass height2\n");
  510. ret = AVERROR(EINVAL);
  511. break;
  512. }
  513. s->plane[s->channel_num].band[s->level][s->subband_num].height = data;
  514. } else if (tag == 71) {
  515. s->codebook = data;
  516. av_log(avctx, AV_LOG_DEBUG, "Codebook %i\n", s->codebook);
  517. } else if (tag == 72) {
  518. s->codebook = data & 0xf;
  519. s->difference_coding = (data >> 4) & 1;
  520. av_log(avctx, AV_LOG_DEBUG, "Other codebook? %i\n", s->codebook);
  521. } else if (tag == 70) {
  522. av_log(avctx, AV_LOG_DEBUG, "Subsampling or bit-depth flag? %i\n", data);
  523. if (!(data == 10 || data == 12)) {
  524. av_log(avctx, AV_LOG_ERROR, "Invalid bits per channel\n");
  525. ret = AVERROR(EINVAL);
  526. break;
  527. }
  528. s->bpc = data;
  529. } else if (tag == 84) {
  530. av_log(avctx, AV_LOG_DEBUG, "Sample format? %i\n", data);
  531. if (data == 1) {
  532. s->coded_format = AV_PIX_FMT_YUV422P10;
  533. } else if (data == 2) {
  534. s->coded_format = AV_PIX_FMT_BAYER_RGGB16;
  535. } else if (data == 3) {
  536. s->coded_format = AV_PIX_FMT_GBRP12;
  537. } else if (data == 4) {
  538. s->coded_format = AV_PIX_FMT_GBRAP12;
  539. } else {
  540. avpriv_report_missing_feature(avctx, "Sample format of %"PRIu16, data);
  541. ret = AVERROR_PATCHWELCOME;
  542. break;
  543. }
  544. planes = data == 2 ? 4 : av_pix_fmt_count_planes(s->coded_format);
  545. } else if (tag == -85) {
  546. av_log(avctx, AV_LOG_DEBUG, "Cropped height %"PRIu16"\n", data);
  547. s->cropped_height = data;
  548. } else if (tag == -75) {
  549. s->peak.offset &= ~0xffff;
  550. s->peak.offset |= (data & 0xffff);
  551. s->peak.base = gb;
  552. s->peak.level = 0;
  553. } else if (tag == -76) {
  554. s->peak.offset &= 0xffff;
  555. s->peak.offset |= (data & 0xffffU)<<16;
  556. s->peak.base = gb;
  557. s->peak.level = 0;
  558. } else if (tag == -74 && s->peak.offset) {
  559. s->peak.level = data;
  560. bytestream2_seek(&s->peak.base, s->peak.offset - 4, SEEK_CUR);
  561. } else
  562. av_log(avctx, AV_LOG_DEBUG, "Unknown tag %i data %x\n", tag, data);
  563. /* Some kind of end of header tag */
  564. if (tag == 4 && data == 0x1a4a && s->coded_width && s->coded_height &&
  565. s->coded_format != AV_PIX_FMT_NONE) {
  566. if (s->a_width != s->coded_width || s->a_height != s->coded_height ||
  567. s->a_format != s->coded_format) {
  568. free_buffers(s);
  569. if ((ret = alloc_buffers(avctx)) < 0) {
  570. free_buffers(s);
  571. return ret;
  572. }
  573. }
  574. ret = ff_set_dimensions(avctx, s->coded_width, s->coded_height);
  575. if (ret < 0)
  576. return ret;
  577. if (s->cropped_height) {
  578. unsigned height = s->cropped_height << (avctx->pix_fmt == AV_PIX_FMT_BAYER_RGGB16);
  579. if (avctx->height < height)
  580. return AVERROR_INVALIDDATA;
  581. avctx->height = height;
  582. }
  583. frame.f->width =
  584. frame.f->height = 0;
  585. if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
  586. return ret;
  587. s->coded_width = 0;
  588. s->coded_height = 0;
  589. s->coded_format = AV_PIX_FMT_NONE;
  590. got_buffer = 1;
  591. }
  592. coeff_data = s->plane[s->channel_num].subband[s->subband_num_actual];
  593. /* Lowpass coefficients */
  594. if (tag == 4 && data == 0xf0f && s->a_width && s->a_height) {
  595. int lowpass_height = s->plane[s->channel_num].band[0][0].height;
  596. int lowpass_width = s->plane[s->channel_num].band[0][0].width;
  597. int lowpass_a_height = s->plane[s->channel_num].band[0][0].a_height;
  598. int lowpass_a_width = s->plane[s->channel_num].band[0][0].a_width;
  599. if (!got_buffer) {
  600. av_log(avctx, AV_LOG_ERROR, "No end of header tag found\n");
  601. ret = AVERROR(EINVAL);
  602. goto end;
  603. }
  604. if (lowpass_height > lowpass_a_height || lowpass_width > lowpass_a_width ||
  605. lowpass_a_width * lowpass_a_height * sizeof(int16_t) > bytestream2_get_bytes_left(&gb)) {
  606. av_log(avctx, AV_LOG_ERROR, "Too many lowpass coefficients\n");
  607. ret = AVERROR(EINVAL);
  608. goto end;
  609. }
  610. av_log(avctx, AV_LOG_DEBUG, "Start of lowpass coeffs component %d height:%d, width:%d\n", s->channel_num, lowpass_height, lowpass_width);
  611. for (i = 0; i < lowpass_height; i++) {
  612. for (j = 0; j < lowpass_width; j++)
  613. coeff_data[j] = bytestream2_get_be16u(&gb);
  614. coeff_data += lowpass_width;
  615. }
  616. /* Align to mod-4 position to continue reading tags */
  617. bytestream2_seek(&gb, bytestream2_tell(&gb) & 3, SEEK_CUR);
  618. /* Copy last line of coefficients if odd height */
  619. if (lowpass_height & 1) {
  620. memcpy(&coeff_data[lowpass_height * lowpass_width],
  621. &coeff_data[(lowpass_height - 1) * lowpass_width],
  622. lowpass_width * sizeof(*coeff_data));
  623. }
  624. av_log(avctx, AV_LOG_DEBUG, "Lowpass coefficients %d\n", lowpass_width * lowpass_height);
  625. }
  626. if (tag == 55 && s->subband_num_actual != 255 && s->a_width && s->a_height) {
  627. int highpass_height = s->plane[s->channel_num].band[s->level][s->subband_num].height;
  628. int highpass_width = s->plane[s->channel_num].band[s->level][s->subband_num].width;
  629. int highpass_a_width = s->plane[s->channel_num].band[s->level][s->subband_num].a_width;
  630. int highpass_a_height = s->plane[s->channel_num].band[s->level][s->subband_num].a_height;
  631. int highpass_stride = s->plane[s->channel_num].band[s->level][s->subband_num].stride;
  632. int expected;
  633. int a_expected = highpass_a_height * highpass_a_width;
  634. int level, run, coeff;
  635. int count = 0, bytes;
  636. if (!got_buffer) {
  637. av_log(avctx, AV_LOG_ERROR, "No end of header tag found\n");
  638. ret = AVERROR(EINVAL);
  639. goto end;
  640. }
  641. if (highpass_height > highpass_a_height || highpass_width > highpass_a_width || a_expected < highpass_height * (uint64_t)highpass_stride) {
  642. av_log(avctx, AV_LOG_ERROR, "Too many highpass coefficients\n");
  643. ret = AVERROR(EINVAL);
  644. goto end;
  645. }
  646. expected = highpass_height * highpass_stride;
  647. av_log(avctx, AV_LOG_DEBUG, "Start subband coeffs plane %i level %i codebook %i expected %i\n", s->channel_num, s->level, s->codebook, expected);
  648. init_get_bits(&s->gb, gb.buffer, bytestream2_get_bytes_left(&gb) * 8);
  649. {
  650. OPEN_READER(re, &s->gb);
  651. if (!s->codebook) {
  652. while (1) {
  653. UPDATE_CACHE(re, &s->gb);
  654. GET_RL_VLC(level, run, re, &s->gb, s->table_9_rl_vlc,
  655. VLC_BITS, 3, 1);
  656. /* escape */
  657. if (level == 64)
  658. break;
  659. count += run;
  660. if (count > expected)
  661. break;
  662. coeff = dequant_and_decompand(level, s->quantisation, 0);
  663. for (i = 0; i < run; i++)
  664. *coeff_data++ = coeff;
  665. }
  666. } else {
  667. while (1) {
  668. UPDATE_CACHE(re, &s->gb);
  669. GET_RL_VLC(level, run, re, &s->gb, s->table_18_rl_vlc,
  670. VLC_BITS, 3, 1);
  671. /* escape */
  672. if (level == 255 && run == 2)
  673. break;
  674. count += run;
  675. if (count > expected)
  676. break;
  677. coeff = dequant_and_decompand(level, s->quantisation, s->codebook);
  678. for (i = 0; i < run; i++)
  679. *coeff_data++ = coeff;
  680. }
  681. }
  682. CLOSE_READER(re, &s->gb);
  683. }
  684. if (count > expected) {
  685. av_log(avctx, AV_LOG_ERROR, "Escape codeword not found, probably corrupt data\n");
  686. ret = AVERROR(EINVAL);
  687. goto end;
  688. }
  689. if (s->peak.level)
  690. peak_table(coeff_data - count, &s->peak, count);
  691. if (s->difference_coding)
  692. difference_coding(s->plane[s->channel_num].subband[s->subband_num_actual], highpass_width, highpass_height);
  693. bytes = FFALIGN(AV_CEIL_RSHIFT(get_bits_count(&s->gb), 3), 4);
  694. if (bytes > bytestream2_get_bytes_left(&gb)) {
  695. av_log(avctx, AV_LOG_ERROR, "Bitstream overread error\n");
  696. ret = AVERROR(EINVAL);
  697. goto end;
  698. } else
  699. bytestream2_seek(&gb, bytes, SEEK_CUR);
  700. av_log(avctx, AV_LOG_DEBUG, "End subband coeffs %i extra %i\n", count, count - expected);
  701. s->codebook = 0;
  702. /* Copy last line of coefficients if odd height */
  703. if (highpass_height & 1) {
  704. memcpy(&coeff_data[highpass_height * highpass_stride],
  705. &coeff_data[(highpass_height - 1) * highpass_stride],
  706. highpass_stride * sizeof(*coeff_data));
  707. }
  708. }
  709. }
  710. if (!s->a_width || !s->a_height || s->a_format == AV_PIX_FMT_NONE ||
  711. s->coded_width || s->coded_height || s->coded_format != AV_PIX_FMT_NONE) {
  712. av_log(avctx, AV_LOG_ERROR, "Invalid dimensions\n");
  713. ret = AVERROR(EINVAL);
  714. goto end;
  715. }
  716. if (!got_buffer) {
  717. av_log(avctx, AV_LOG_ERROR, "No end of header tag found\n");
  718. ret = AVERROR(EINVAL);
  719. goto end;
  720. }
  721. planes = av_pix_fmt_count_planes(avctx->pix_fmt);
  722. if (avctx->pix_fmt == AV_PIX_FMT_BAYER_RGGB16) {
  723. if (!s->progressive)
  724. return AVERROR_INVALIDDATA;
  725. planes = 4;
  726. }
  727. for (plane = 0; plane < planes && !ret; plane++) {
  728. /* level 1 */
  729. int lowpass_height = s->plane[plane].band[0][0].height;
  730. int lowpass_width = s->plane[plane].band[0][0].width;
  731. int highpass_stride = s->plane[plane].band[0][1].stride;
  732. int act_plane = plane == 1 ? 2 : plane == 2 ? 1 : plane;
  733. ptrdiff_t dst_linesize;
  734. int16_t *low, *high, *output, *dst;
  735. if (avctx->pix_fmt == AV_PIX_FMT_BAYER_RGGB16) {
  736. act_plane = 0;
  737. dst_linesize = pic->linesize[act_plane];
  738. } else {
  739. dst_linesize = pic->linesize[act_plane] / 2;
  740. }
  741. if (lowpass_height > s->plane[plane].band[0][0].a_height || lowpass_width > s->plane[plane].band[0][0].a_width ||
  742. !highpass_stride || s->plane[plane].band[0][1].width > s->plane[plane].band[0][1].a_width) {
  743. av_log(avctx, AV_LOG_ERROR, "Invalid plane dimensions\n");
  744. ret = AVERROR(EINVAL);
  745. goto end;
  746. }
  747. av_log(avctx, AV_LOG_DEBUG, "Decoding level 1 plane %i %i %i %i\n", plane, lowpass_height, lowpass_width, highpass_stride);
  748. low = s->plane[plane].subband[0];
  749. high = s->plane[plane].subband[2];
  750. output = s->plane[plane].l_h[0];
  751. for (i = 0; i < lowpass_width; i++) {
  752. vert_filter(output, lowpass_width, low, lowpass_width, high, highpass_stride, lowpass_height);
  753. low++;
  754. high++;
  755. output++;
  756. }
  757. low = s->plane[plane].subband[1];
  758. high = s->plane[plane].subband[3];
  759. output = s->plane[plane].l_h[1];
  760. for (i = 0; i < lowpass_width; i++) {
  761. // note the stride of "low" is highpass_stride
  762. vert_filter(output, lowpass_width, low, highpass_stride, high, highpass_stride, lowpass_height);
  763. low++;
  764. high++;
  765. output++;
  766. }
  767. low = s->plane[plane].l_h[0];
  768. high = s->plane[plane].l_h[1];
  769. output = s->plane[plane].subband[0];
  770. for (i = 0; i < lowpass_height * 2; i++) {
  771. horiz_filter(output, low, high, lowpass_width);
  772. low += lowpass_width;
  773. high += lowpass_width;
  774. output += lowpass_width * 2;
  775. }
  776. if (s->bpc == 12) {
  777. output = s->plane[plane].subband[0];
  778. for (i = 0; i < lowpass_height * 2; i++) {
  779. for (j = 0; j < lowpass_width * 2; j++)
  780. output[j] *= 4;
  781. output += lowpass_width * 2;
  782. }
  783. }
  784. /* level 2 */
  785. lowpass_height = s->plane[plane].band[1][1].height;
  786. lowpass_width = s->plane[plane].band[1][1].width;
  787. highpass_stride = s->plane[plane].band[1][1].stride;
  788. if (lowpass_height > s->plane[plane].band[1][1].a_height || lowpass_width > s->plane[plane].band[1][1].a_width ||
  789. !highpass_stride || s->plane[plane].band[1][1].width > s->plane[plane].band[1][1].a_width) {
  790. av_log(avctx, AV_LOG_ERROR, "Invalid plane dimensions\n");
  791. ret = AVERROR(EINVAL);
  792. goto end;
  793. }
  794. av_log(avctx, AV_LOG_DEBUG, "Level 2 plane %i %i %i %i\n", plane, lowpass_height, lowpass_width, highpass_stride);
  795. low = s->plane[plane].subband[0];
  796. high = s->plane[plane].subband[5];
  797. output = s->plane[plane].l_h[3];
  798. for (i = 0; i < lowpass_width; i++) {
  799. vert_filter(output, lowpass_width, low, lowpass_width, high, highpass_stride, lowpass_height);
  800. low++;
  801. high++;
  802. output++;
  803. }
  804. low = s->plane[plane].subband[4];
  805. high = s->plane[plane].subband[6];
  806. output = s->plane[plane].l_h[4];
  807. for (i = 0; i < lowpass_width; i++) {
  808. vert_filter(output, lowpass_width, low, highpass_stride, high, highpass_stride, lowpass_height);
  809. low++;
  810. high++;
  811. output++;
  812. }
  813. low = s->plane[plane].l_h[3];
  814. high = s->plane[plane].l_h[4];
  815. output = s->plane[plane].subband[0];
  816. for (i = 0; i < lowpass_height * 2; i++) {
  817. horiz_filter(output, low, high, lowpass_width);
  818. low += lowpass_width;
  819. high += lowpass_width;
  820. output += lowpass_width * 2;
  821. }
  822. output = s->plane[plane].subband[0];
  823. for (i = 0; i < lowpass_height * 2; i++) {
  824. for (j = 0; j < lowpass_width * 2; j++)
  825. output[j] *= 4;
  826. output += lowpass_width * 2;
  827. }
  828. /* level 3 */
  829. lowpass_height = s->plane[plane].band[2][1].height;
  830. lowpass_width = s->plane[plane].band[2][1].width;
  831. highpass_stride = s->plane[plane].band[2][1].stride;
  832. if (lowpass_height > s->plane[plane].band[2][1].a_height || lowpass_width > s->plane[plane].band[2][1].a_width ||
  833. !highpass_stride || s->plane[plane].band[2][1].width > s->plane[plane].band[2][1].a_width) {
  834. av_log(avctx, AV_LOG_ERROR, "Invalid plane dimensions\n");
  835. ret = AVERROR(EINVAL);
  836. goto end;
  837. }
  838. av_log(avctx, AV_LOG_DEBUG, "Level 3 plane %i %i %i %i\n", plane, lowpass_height, lowpass_width, highpass_stride);
  839. if (s->progressive) {
  840. low = s->plane[plane].subband[0];
  841. high = s->plane[plane].subband[8];
  842. output = s->plane[plane].l_h[6];
  843. for (i = 0; i < lowpass_width; i++) {
  844. vert_filter(output, lowpass_width, low, lowpass_width, high, highpass_stride, lowpass_height);
  845. low++;
  846. high++;
  847. output++;
  848. }
  849. low = s->plane[plane].subband[7];
  850. high = s->plane[plane].subband[9];
  851. output = s->plane[plane].l_h[7];
  852. for (i = 0; i < lowpass_width; i++) {
  853. vert_filter(output, lowpass_width, low, highpass_stride, high, highpass_stride, lowpass_height);
  854. low++;
  855. high++;
  856. output++;
  857. }
  858. dst = (int16_t *)pic->data[act_plane];
  859. if (avctx->pix_fmt == AV_PIX_FMT_BAYER_RGGB16) {
  860. if (plane & 1)
  861. dst++;
  862. if (plane > 1)
  863. dst += pic->linesize[act_plane] >> 1;
  864. }
  865. low = s->plane[plane].l_h[6];
  866. high = s->plane[plane].l_h[7];
  867. if (avctx->pix_fmt == AV_PIX_FMT_BAYER_RGGB16 &&
  868. (lowpass_height * 2 > avctx->coded_height / 2 ||
  869. lowpass_width * 2 > avctx->coded_width / 2 )
  870. ) {
  871. ret = AVERROR_INVALIDDATA;
  872. goto end;
  873. }
  874. for (i = 0; i < lowpass_height * 2; i++) {
  875. if (avctx->pix_fmt == AV_PIX_FMT_BAYER_RGGB16)
  876. horiz_filter_clip_bayer(dst, low, high, lowpass_width, s->bpc);
  877. else
  878. horiz_filter_clip(dst, low, high, lowpass_width, s->bpc);
  879. if (avctx->pix_fmt == AV_PIX_FMT_GBRAP12 && act_plane == 3)
  880. process_alpha(dst, lowpass_width * 2);
  881. low += lowpass_width;
  882. high += lowpass_width;
  883. dst += dst_linesize;
  884. }
  885. } else {
  886. av_log(avctx, AV_LOG_DEBUG, "interlaced frame ? %d", pic->interlaced_frame);
  887. pic->interlaced_frame = 1;
  888. low = s->plane[plane].subband[0];
  889. high = s->plane[plane].subband[7];
  890. output = s->plane[plane].l_h[6];
  891. for (i = 0; i < lowpass_height; i++) {
  892. horiz_filter(output, low, high, lowpass_width);
  893. low += lowpass_width;
  894. high += lowpass_width;
  895. output += lowpass_width * 2;
  896. }
  897. low = s->plane[plane].subband[8];
  898. high = s->plane[plane].subband[9];
  899. output = s->plane[plane].l_h[7];
  900. for (i = 0; i < lowpass_height; i++) {
  901. horiz_filter(output, low, high, lowpass_width);
  902. low += lowpass_width;
  903. high += lowpass_width;
  904. output += lowpass_width * 2;
  905. }
  906. dst = (int16_t *)pic->data[act_plane];
  907. low = s->plane[plane].l_h[6];
  908. high = s->plane[plane].l_h[7];
  909. for (i = 0; i < lowpass_height; i++) {
  910. interlaced_vertical_filter(dst, low, high, lowpass_width * 2, pic->linesize[act_plane]/2, act_plane);
  911. low += lowpass_width * 2;
  912. high += lowpass_width * 2;
  913. dst += pic->linesize[act_plane];
  914. }
  915. }
  916. }
  917. if (avctx->pix_fmt == AV_PIX_FMT_BAYER_RGGB16)
  918. process_bayer(pic);
  919. end:
  920. if (ret < 0)
  921. return ret;
  922. *got_frame = 1;
  923. return avpkt->size;
  924. }
  925. static av_cold int cfhd_close(AVCodecContext *avctx)
  926. {
  927. CFHDContext *s = avctx->priv_data;
  928. free_buffers(s);
  929. ff_free_vlc(&s->vlc_9);
  930. ff_free_vlc(&s->vlc_18);
  931. return 0;
  932. }
  933. AVCodec ff_cfhd_decoder = {
  934. .name = "cfhd",
  935. .long_name = NULL_IF_CONFIG_SMALL("Cineform HD"),
  936. .type = AVMEDIA_TYPE_VIDEO,
  937. .id = AV_CODEC_ID_CFHD,
  938. .priv_data_size = sizeof(CFHDContext),
  939. .init = cfhd_init,
  940. .close = cfhd_close,
  941. .decode = cfhd_decode,
  942. .capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS,
  943. .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | FF_CODEC_CAP_INIT_CLEANUP,
  944. };