You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

785 lines
29KB

  1. /*
  2. * DV encoder
  3. * Copyright (c) 2003 Roman Shaposhnik
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. *
  21. * quant_deadzone code and fixes sponsored by NOA GmbH
  22. */
  23. /**
  24. * @file
  25. * DV encoder
  26. */
  27. #include "config.h"
  28. #include "libavutil/attributes.h"
  29. #include "libavutil/internal.h"
  30. #include "libavutil/opt.h"
  31. #include "libavutil/pixdesc.h"
  32. #include "avcodec.h"
  33. #include "dv.h"
  34. #include "dv_profile_internal.h"
  35. #include "dv_tablegen.h"
  36. #include "fdctdsp.h"
  37. #include "internal.h"
  38. #include "mathops.h"
  39. #include "me_cmp.h"
  40. #include "pixblockdsp.h"
  41. #include "put_bits.h"
  42. static av_cold int dvvideo_encode_init(AVCodecContext *avctx)
  43. {
  44. DVVideoContext *s = avctx->priv_data;
  45. FDCTDSPContext fdsp;
  46. MECmpContext mecc;
  47. PixblockDSPContext pdsp;
  48. int ret;
  49. s->sys = av_dv_codec_profile2(avctx->width, avctx->height, avctx->pix_fmt, avctx->time_base);
  50. if (!s->sys) {
  51. av_log(avctx, AV_LOG_ERROR, "Found no DV profile for %ix%i %s video. "
  52. "Valid DV profiles are:\n",
  53. avctx->width, avctx->height, av_get_pix_fmt_name(avctx->pix_fmt));
  54. ff_dv_print_profiles(avctx, AV_LOG_ERROR);
  55. return AVERROR(EINVAL);
  56. }
  57. if (avctx->height > 576) {
  58. av_log(avctx, AV_LOG_ERROR, "DVCPRO HD encoding is not supported.\n");
  59. return AVERROR_PATCHWELCOME;
  60. }
  61. ret = ff_dv_init_dynamic_tables(s, s->sys);
  62. if (ret < 0) {
  63. av_log(avctx, AV_LOG_ERROR, "Error initializing work tables.\n");
  64. return ret;
  65. }
  66. dv_vlc_map_tableinit();
  67. memset(&fdsp,0, sizeof(fdsp));
  68. memset(&mecc,0, sizeof(mecc));
  69. memset(&pdsp,0, sizeof(pdsp));
  70. ff_fdctdsp_init(&fdsp, avctx);
  71. ff_me_cmp_init(&mecc, avctx);
  72. ff_pixblockdsp_init(&pdsp, avctx);
  73. ff_set_cmp(&mecc, mecc.ildct_cmp, avctx->ildct_cmp);
  74. s->get_pixels = pdsp.get_pixels;
  75. s->ildct_cmp = mecc.ildct_cmp[5];
  76. s->fdct[0] = fdsp.fdct;
  77. s->fdct[1] = fdsp.fdct248;
  78. return ff_dvvideo_init(avctx);
  79. }
  80. /* bit budget for AC only in 5 MBs */
  81. static const int vs_total_ac_bits = (100 * 4 + 68 * 2) * 5;
  82. static const int mb_area_start[5] = { 1, 6, 21, 43, 64 };
  83. #if CONFIG_SMALL
  84. /* Convert run and level (where level != 0) pair into VLC, returning bit size */
  85. static av_always_inline int dv_rl2vlc(int run, int level, int sign,
  86. uint32_t *vlc)
  87. {
  88. int size;
  89. if (run < DV_VLC_MAP_RUN_SIZE && level < DV_VLC_MAP_LEV_SIZE) {
  90. *vlc = dv_vlc_map[run][level].vlc | sign;
  91. size = dv_vlc_map[run][level].size;
  92. } else {
  93. if (level < DV_VLC_MAP_LEV_SIZE) {
  94. *vlc = dv_vlc_map[0][level].vlc | sign;
  95. size = dv_vlc_map[0][level].size;
  96. } else {
  97. *vlc = 0xfe00 | (level << 1) | sign;
  98. size = 16;
  99. }
  100. if (run) {
  101. *vlc |= ((run < 16) ? dv_vlc_map[run - 1][0].vlc :
  102. (0x1f80 | (run - 1))) << size;
  103. size += (run < 16) ? dv_vlc_map[run - 1][0].size : 13;
  104. }
  105. }
  106. return size;
  107. }
  108. static av_always_inline int dv_rl2vlc_size(int run, int level)
  109. {
  110. int size;
  111. if (run < DV_VLC_MAP_RUN_SIZE && level < DV_VLC_MAP_LEV_SIZE) {
  112. size = dv_vlc_map[run][level].size;
  113. } else {
  114. size = (level < DV_VLC_MAP_LEV_SIZE) ? dv_vlc_map[0][level].size : 16;
  115. if (run)
  116. size += (run < 16) ? dv_vlc_map[run - 1][0].size : 13;
  117. }
  118. return size;
  119. }
  120. #else
  121. static av_always_inline int dv_rl2vlc(int run, int l, int sign, uint32_t *vlc)
  122. {
  123. *vlc = dv_vlc_map[run][l].vlc | sign;
  124. return dv_vlc_map[run][l].size;
  125. }
  126. static av_always_inline int dv_rl2vlc_size(int run, int l)
  127. {
  128. return dv_vlc_map[run][l].size;
  129. }
  130. #endif
  131. typedef struct EncBlockInfo {
  132. int area_q[4];
  133. int bit_size[4];
  134. int prev[5];
  135. int cur_ac;
  136. int cno;
  137. int dct_mode;
  138. int16_t mb[64];
  139. uint8_t next[64];
  140. uint8_t sign[64];
  141. uint8_t partial_bit_count;
  142. uint32_t partial_bit_buffer; /* we can't use uint16_t here */
  143. } EncBlockInfo;
  144. static av_always_inline PutBitContext *dv_encode_ac(EncBlockInfo *bi,
  145. PutBitContext *pb_pool,
  146. PutBitContext *pb_end)
  147. {
  148. int prev, bits_left;
  149. PutBitContext *pb = pb_pool;
  150. int size = bi->partial_bit_count;
  151. uint32_t vlc = bi->partial_bit_buffer;
  152. bi->partial_bit_count =
  153. bi->partial_bit_buffer = 0;
  154. for (;;) {
  155. /* Find suitable storage space */
  156. for (; size > (bits_left = put_bits_left(pb)); pb++) {
  157. if (bits_left) {
  158. size -= bits_left;
  159. put_bits(pb, bits_left, vlc >> size);
  160. vlc = av_mod_uintp2(vlc, size);
  161. }
  162. if (pb + 1 >= pb_end) {
  163. bi->partial_bit_count = size;
  164. bi->partial_bit_buffer = vlc;
  165. return pb;
  166. }
  167. }
  168. /* Store VLC */
  169. put_bits(pb, size, vlc);
  170. if (bi->cur_ac >= 64)
  171. break;
  172. /* Construct the next VLC */
  173. prev = bi->cur_ac;
  174. bi->cur_ac = bi->next[prev];
  175. if (bi->cur_ac < 64) {
  176. size = dv_rl2vlc(bi->cur_ac - prev - 1, bi->mb[bi->cur_ac],
  177. bi->sign[bi->cur_ac], &vlc);
  178. } else {
  179. size = 4;
  180. vlc = 6; /* End Of Block stamp */
  181. }
  182. }
  183. return pb;
  184. }
  185. static av_always_inline int dv_guess_dct_mode(DVVideoContext *s, uint8_t *data,
  186. ptrdiff_t linesize)
  187. {
  188. if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_DCT) {
  189. int ps = s->ildct_cmp(NULL, data, NULL, linesize, 8) - 400;
  190. if (ps > 0) {
  191. int is = s->ildct_cmp(NULL, data, NULL, linesize << 1, 4) +
  192. s->ildct_cmp(NULL, data + linesize, NULL, linesize << 1, 4);
  193. return ps > is;
  194. }
  195. }
  196. return 0;
  197. }
  198. static const int dv_weight_bits = 18;
  199. static const int dv_weight_88[64] = {
  200. 131072, 257107, 257107, 242189, 252167, 242189, 235923, 237536,
  201. 237536, 235923, 229376, 231390, 223754, 231390, 229376, 222935,
  202. 224969, 217965, 217965, 224969, 222935, 200636, 218652, 211916,
  203. 212325, 211916, 218652, 200636, 188995, 196781, 205965, 206433,
  204. 206433, 205965, 196781, 188995, 185364, 185364, 200636, 200704,
  205. 200636, 185364, 185364, 174609, 180568, 195068, 195068, 180568,
  206. 174609, 170091, 175557, 189591, 175557, 170091, 165371, 170627,
  207. 170627, 165371, 160727, 153560, 160727, 144651, 144651, 136258,
  208. };
  209. static const int dv_weight_248[64] = {
  210. 131072, 262144, 257107, 257107, 242189, 242189, 242189, 242189,
  211. 237536, 237536, 229376, 229376, 200636, 200636, 224973, 224973,
  212. 223754, 223754, 235923, 235923, 229376, 229376, 217965, 217965,
  213. 211916, 211916, 196781, 196781, 185364, 185364, 206433, 206433,
  214. 211916, 211916, 222935, 222935, 200636, 200636, 205964, 205964,
  215. 200704, 200704, 180568, 180568, 175557, 175557, 195068, 195068,
  216. 185364, 185364, 188995, 188995, 174606, 174606, 175557, 175557,
  217. 170627, 170627, 153560, 153560, 165371, 165371, 144651, 144651,
  218. };
  219. static av_always_inline int dv_init_enc_block(EncBlockInfo *bi, uint8_t *data,
  220. ptrdiff_t linesize,
  221. DVVideoContext *s, int bias)
  222. {
  223. const int *weight;
  224. const uint8_t *zigzag_scan;
  225. LOCAL_ALIGNED_16(int16_t, blk, [64]);
  226. int i, area;
  227. /* We offer two different methods for class number assignment: the
  228. * method suggested in SMPTE 314M Table 22, and an improved
  229. * method. The SMPTE method is very conservative; it assigns class
  230. * 3 (i.e. severe quantization) to any block where the largest AC
  231. * component is greater than 36. FFmpeg's DV encoder tracks AC bit
  232. * consumption precisely, so there is no need to bias most blocks
  233. * towards strongly lossy compression. Instead, we assign class 2
  234. * to most blocks, and use class 3 only when strictly necessary
  235. * (for blocks whose largest AC component exceeds 255). */
  236. #if 0 /* SMPTE spec method */
  237. static const int classes[] = { 12, 24, 36, 0xffff };
  238. #else /* improved FFmpeg method */
  239. static const int classes[] = { -1, -1, 255, 0xffff };
  240. #endif
  241. int max = classes[0];
  242. int prev = 0;
  243. const unsigned deadzone = s->quant_deadzone;
  244. const unsigned threshold = 2 * deadzone;
  245. av_assert2((((int) blk) & 15) == 0);
  246. bi->area_q[0] =
  247. bi->area_q[1] =
  248. bi->area_q[2] =
  249. bi->area_q[3] = 0;
  250. bi->partial_bit_count = 0;
  251. bi->partial_bit_buffer = 0;
  252. bi->cur_ac = 0;
  253. if (data) {
  254. bi->dct_mode = dv_guess_dct_mode(s, data, linesize);
  255. s->get_pixels(blk, data, linesize);
  256. s->fdct[bi->dct_mode](blk);
  257. } else {
  258. /* We rely on the fact that encoding all zeros leads to an immediate
  259. * EOB, which is precisely what the spec calls for in the "dummy"
  260. * blocks. */
  261. memset(blk, 0, 64 * sizeof(*blk));
  262. bi->dct_mode = 0;
  263. }
  264. bi->mb[0] = blk[0];
  265. zigzag_scan = bi->dct_mode ? ff_dv_zigzag248_direct : ff_zigzag_direct;
  266. weight = bi->dct_mode ? dv_weight_248 : dv_weight_88;
  267. for (area = 0; area < 4; area++) {
  268. bi->prev[area] = prev;
  269. bi->bit_size[area] = 1; // 4 areas 4 bits for EOB :)
  270. for (i = mb_area_start[area]; i < mb_area_start[area + 1]; i++) {
  271. int level = blk[zigzag_scan[i]];
  272. if (level + deadzone > threshold) {
  273. bi->sign[i] = (level >> 31) & 1;
  274. /* Weight it and shift down into range, adding for rounding.
  275. * The extra division by a factor of 2^4 reverses the 8x
  276. * expansion of the DCT AND the 2x doubling of the weights. */
  277. level = (FFABS(level) * weight[i] + (1 << (dv_weight_bits + 3))) >>
  278. (dv_weight_bits + 4);
  279. if (!level)
  280. continue;
  281. bi->mb[i] = level;
  282. if (level > max)
  283. max = level;
  284. bi->bit_size[area] += dv_rl2vlc_size(i - prev - 1, level);
  285. bi->next[prev] = i;
  286. prev = i;
  287. }
  288. }
  289. }
  290. bi->next[prev] = i;
  291. for (bi->cno = 0; max > classes[bi->cno]; bi->cno++)
  292. ;
  293. bi->cno += bias;
  294. if (bi->cno >= 3) {
  295. bi->cno = 3;
  296. prev = 0;
  297. i = bi->next[prev];
  298. for (area = 0; area < 4; area++) {
  299. bi->prev[area] = prev;
  300. bi->bit_size[area] = 1; // 4 areas 4 bits for EOB :)
  301. for (; i < mb_area_start[area + 1]; i = bi->next[i]) {
  302. bi->mb[i] >>= 1;
  303. if (bi->mb[i]) {
  304. bi->bit_size[area] += dv_rl2vlc_size(i - prev - 1, bi->mb[i]);
  305. bi->next[prev] = i;
  306. prev = i;
  307. }
  308. }
  309. }
  310. bi->next[prev] = i;
  311. }
  312. return bi->bit_size[0] + bi->bit_size[1] +
  313. bi->bit_size[2] + bi->bit_size[3];
  314. }
  315. static inline void dv_guess_qnos(EncBlockInfo *blks, int *qnos)
  316. {
  317. int size[5];
  318. int i, j, k, a, prev, a2;
  319. EncBlockInfo *b;
  320. size[0] =
  321. size[1] =
  322. size[2] =
  323. size[3] =
  324. size[4] = 1 << 24;
  325. do {
  326. b = blks;
  327. for (i = 0; i < 5; i++) {
  328. if (!qnos[i])
  329. continue;
  330. qnos[i]--;
  331. size[i] = 0;
  332. for (j = 0; j < 6; j++, b++) {
  333. for (a = 0; a < 4; a++) {
  334. if (b->area_q[a] != ff_dv_quant_shifts[qnos[i] + ff_dv_quant_offset[b->cno]][a]) {
  335. b->bit_size[a] = 1; // 4 areas 4 bits for EOB :)
  336. b->area_q[a]++;
  337. prev = b->prev[a];
  338. av_assert2(b->next[prev] >= mb_area_start[a + 1] || b->mb[prev]);
  339. for (k = b->next[prev]; k < mb_area_start[a + 1]; k = b->next[k]) {
  340. b->mb[k] >>= 1;
  341. if (b->mb[k]) {
  342. b->bit_size[a] += dv_rl2vlc_size(k - prev - 1, b->mb[k]);
  343. prev = k;
  344. } else {
  345. if (b->next[k] >= mb_area_start[a + 1] && b->next[k] < 64) {
  346. for (a2 = a + 1; b->next[k] >= mb_area_start[a2 + 1]; a2++)
  347. b->prev[a2] = prev;
  348. av_assert2(a2 < 4);
  349. av_assert2(b->mb[b->next[k]]);
  350. b->bit_size[a2] += dv_rl2vlc_size(b->next[k] - prev - 1, b->mb[b->next[k]]) -
  351. dv_rl2vlc_size(b->next[k] - k - 1, b->mb[b->next[k]]);
  352. av_assert2(b->prev[a2] == k && (a2 + 1 >= 4 || b->prev[a2 + 1] != k));
  353. b->prev[a2] = prev;
  354. }
  355. b->next[prev] = b->next[k];
  356. }
  357. }
  358. b->prev[a + 1] = prev;
  359. }
  360. size[i] += b->bit_size[a];
  361. }
  362. }
  363. if (vs_total_ac_bits >= size[0] + size[1] + size[2] + size[3] + size[4])
  364. return;
  365. }
  366. } while (qnos[0] | qnos[1] | qnos[2] | qnos[3] | qnos[4]);
  367. for (a = 2; a == 2 || vs_total_ac_bits < size[0]; a += a) {
  368. b = blks;
  369. size[0] = 5 * 6 * 4; // EOB
  370. for (j = 0; j < 6 * 5; j++, b++) {
  371. prev = b->prev[0];
  372. for (k = b->next[prev]; k < 64; k = b->next[k]) {
  373. if (b->mb[k] < a && b->mb[k] > -a) {
  374. b->next[prev] = b->next[k];
  375. } else {
  376. size[0] += dv_rl2vlc_size(k - prev - 1, b->mb[k]);
  377. prev = k;
  378. }
  379. }
  380. }
  381. }
  382. }
  383. static int dv_encode_video_segment(AVCodecContext *avctx, void *arg)
  384. {
  385. DVVideoContext *s = avctx->priv_data;
  386. DVwork_chunk *work_chunk = arg;
  387. int mb_index, i, j;
  388. int mb_x, mb_y, c_offset;
  389. ptrdiff_t linesize, y_stride;
  390. uint8_t *y_ptr;
  391. uint8_t *dif;
  392. LOCAL_ALIGNED_8(uint8_t, scratch, [128]);
  393. EncBlockInfo enc_blks[5 * DV_MAX_BPM];
  394. PutBitContext pbs[5 * DV_MAX_BPM];
  395. PutBitContext *pb;
  396. EncBlockInfo *enc_blk;
  397. int vs_bit_size = 0;
  398. int qnos[5] = { 15, 15, 15, 15, 15 }; /* No quantization */
  399. int *qnosp = &qnos[0];
  400. dif = &s->buf[work_chunk->buf_offset * 80];
  401. enc_blk = &enc_blks[0];
  402. for (mb_index = 0; mb_index < 5; mb_index++) {
  403. dv_calculate_mb_xy(s, work_chunk, mb_index, &mb_x, &mb_y);
  404. /* initializing luminance blocks */
  405. if ((s->sys->pix_fmt == AV_PIX_FMT_YUV420P) ||
  406. (s->sys->pix_fmt == AV_PIX_FMT_YUV411P && mb_x >= (704 / 8)) ||
  407. (s->sys->height >= 720 && mb_y != 134)) {
  408. y_stride = s->frame->linesize[0] << 3;
  409. } else {
  410. y_stride = 16;
  411. }
  412. y_ptr = s->frame->data[0] +
  413. ((mb_y * s->frame->linesize[0] + mb_x) << 3);
  414. linesize = s->frame->linesize[0];
  415. if (s->sys->video_stype == 4) { /* SD 422 */
  416. vs_bit_size +=
  417. dv_init_enc_block(enc_blk + 0, y_ptr, linesize, s, 0) +
  418. dv_init_enc_block(enc_blk + 1, NULL, linesize, s, 0) +
  419. dv_init_enc_block(enc_blk + 2, y_ptr + 8, linesize, s, 0) +
  420. dv_init_enc_block(enc_blk + 3, NULL, linesize, s, 0);
  421. } else {
  422. vs_bit_size +=
  423. dv_init_enc_block(enc_blk + 0, y_ptr, linesize, s, 0) +
  424. dv_init_enc_block(enc_blk + 1, y_ptr + 8, linesize, s, 0) +
  425. dv_init_enc_block(enc_blk + 2, y_ptr + y_stride, linesize, s, 0) +
  426. dv_init_enc_block(enc_blk + 3, y_ptr + 8 + y_stride, linesize, s, 0);
  427. }
  428. enc_blk += 4;
  429. /* initializing chrominance blocks */
  430. c_offset = (((mb_y >> (s->sys->pix_fmt == AV_PIX_FMT_YUV420P)) * s->frame->linesize[1] +
  431. (mb_x >> ((s->sys->pix_fmt == AV_PIX_FMT_YUV411P) ? 2 : 1))) << 3);
  432. for (j = 2; j; j--) {
  433. uint8_t *c_ptr = s->frame->data[j] + c_offset;
  434. linesize = s->frame->linesize[j];
  435. y_stride = (mb_y == 134) ? 8 : (s->frame->linesize[j] << 3);
  436. if (s->sys->pix_fmt == AV_PIX_FMT_YUV411P && mb_x >= (704 / 8)) {
  437. uint8_t *d;
  438. uint8_t *b = scratch;
  439. for (i = 0; i < 8; i++) {
  440. d = c_ptr + (linesize << 3);
  441. b[0] = c_ptr[0];
  442. b[1] = c_ptr[1];
  443. b[2] = c_ptr[2];
  444. b[3] = c_ptr[3];
  445. b[4] = d[0];
  446. b[5] = d[1];
  447. b[6] = d[2];
  448. b[7] = d[3];
  449. c_ptr += linesize;
  450. b += 16;
  451. }
  452. c_ptr = scratch;
  453. linesize = 16;
  454. }
  455. vs_bit_size += dv_init_enc_block(enc_blk++, c_ptr, linesize, s, 1);
  456. if (s->sys->bpm == 8)
  457. vs_bit_size += dv_init_enc_block(enc_blk++, c_ptr + y_stride,
  458. linesize, s, 1);
  459. }
  460. }
  461. if (vs_total_ac_bits < vs_bit_size)
  462. dv_guess_qnos(&enc_blks[0], qnosp);
  463. /* DIF encoding process */
  464. for (j = 0; j < 5 * s->sys->bpm;) {
  465. int start_mb = j;
  466. dif[3] = *qnosp++;
  467. dif += 4;
  468. /* First pass over individual cells only */
  469. for (i = 0; i < s->sys->bpm; i++, j++) {
  470. int sz = s->sys->block_sizes[i] >> 3;
  471. init_put_bits(&pbs[j], dif, sz);
  472. put_sbits(&pbs[j], 9, ((enc_blks[j].mb[0] >> 3) - 1024 + 2) >> 2);
  473. put_bits(&pbs[j], 1, enc_blks[j].dct_mode);
  474. put_bits(&pbs[j], 2, enc_blks[j].cno);
  475. dv_encode_ac(&enc_blks[j], &pbs[j], &pbs[j + 1]);
  476. dif += sz;
  477. }
  478. /* Second pass over each MB space */
  479. pb = &pbs[start_mb];
  480. for (i = 0; i < s->sys->bpm; i++)
  481. if (enc_blks[start_mb + i].partial_bit_count)
  482. pb = dv_encode_ac(&enc_blks[start_mb + i], pb,
  483. &pbs[start_mb + s->sys->bpm]);
  484. }
  485. /* Third and final pass over the whole video segment space */
  486. pb = &pbs[0];
  487. for (j = 0; j < 5 * s->sys->bpm; j++) {
  488. if (enc_blks[j].partial_bit_count)
  489. pb = dv_encode_ac(&enc_blks[j], pb, &pbs[s->sys->bpm * 5]);
  490. if (enc_blks[j].partial_bit_count)
  491. av_log(avctx, AV_LOG_ERROR, "ac bitstream overflow\n");
  492. }
  493. for (j = 0; j < 5 * s->sys->bpm; j++) {
  494. int pos;
  495. int size = pbs[j].size_in_bits >> 3;
  496. flush_put_bits(&pbs[j]);
  497. pos = put_bits_count(&pbs[j]) >> 3;
  498. if (pos > size) {
  499. av_log(avctx, AV_LOG_ERROR,
  500. "bitstream written beyond buffer size\n");
  501. return -1;
  502. }
  503. memset(pbs[j].buf + pos, 0xff, size - pos);
  504. }
  505. return 0;
  506. }
  507. static inline int dv_write_pack(enum dv_pack_type pack_id, DVVideoContext *c,
  508. uint8_t *buf)
  509. {
  510. /*
  511. * Here's what SMPTE314M says about these two:
  512. * (page 6) APTn, AP1n, AP2n, AP3n: These data shall be identical
  513. * as track application IDs (APTn = 001, AP1n =
  514. * 001, AP2n = 001, AP3n = 001), if the source signal
  515. * comes from a digital VCR. If the signal source is
  516. * unknown, all bits for these data shall be set to 1.
  517. * (page 12) STYPE: STYPE defines a signal type of video signal
  518. * 00000b = 4:1:1 compression
  519. * 00100b = 4:2:2 compression
  520. * XXXXXX = Reserved
  521. * Now, I've got two problems with these statements:
  522. * 1. it looks like APT == 111b should be a safe bet, but it isn't.
  523. * It seems that for PAL as defined in IEC 61834 we have to set
  524. * APT to 000 and for SMPTE314M to 001.
  525. * 2. It is not at all clear what STYPE is used for 4:2:0 PAL
  526. * compression scheme (if any).
  527. */
  528. int apt = (c->sys->pix_fmt == AV_PIX_FMT_YUV420P ? 0 : 1);
  529. int fs = c->frame->top_field_first ? 0x00 : 0x40;
  530. uint8_t aspect = 0;
  531. if ((int) (av_q2d(c->avctx->sample_aspect_ratio) *
  532. c->avctx->width / c->avctx->height * 10) >= 17) /* 16:9 */
  533. aspect = 0x02;
  534. buf[0] = (uint8_t) pack_id;
  535. switch (pack_id) {
  536. case dv_header525: /* I can't imagine why these two weren't defined as real */
  537. case dv_header625: /* packs in SMPTE314M -- they definitely look like ones */
  538. buf[1] = 0xf8 | /* reserved -- always 1 */
  539. (apt & 0x07); /* APT: Track application ID */
  540. buf[2] = (0 << 7) | /* TF1: audio data is 0 - valid; 1 - invalid */
  541. (0x0f << 3) | /* reserved -- always 1 */
  542. (apt & 0x07); /* AP1: Audio application ID */
  543. buf[3] = (0 << 7) | /* TF2: video data is 0 - valid; 1 - invalid */
  544. (0x0f << 3) | /* reserved -- always 1 */
  545. (apt & 0x07); /* AP2: Video application ID */
  546. buf[4] = (0 << 7) | /* TF3: subcode(SSYB) is 0 - valid; 1 - invalid */
  547. (0x0f << 3) | /* reserved -- always 1 */
  548. (apt & 0x07); /* AP3: Subcode application ID */
  549. break;
  550. case dv_video_source:
  551. buf[1] = 0xff; /* reserved -- always 1 */
  552. buf[2] = (1 << 7) | /* B/W: 0 - b/w, 1 - color */
  553. (1 << 6) | /* following CLF is valid - 0, invalid - 1 */
  554. (3 << 4) | /* CLF: color frames ID (see ITU-R BT.470-4) */
  555. 0xf; /* reserved -- always 1 */
  556. buf[3] = (3 << 6) | /* reserved -- always 1 */
  557. (c->sys->dsf << 5) | /* system: 60fields/50fields */
  558. c->sys->video_stype; /* signal type video compression */
  559. buf[4] = 0xff; /* VISC: 0xff -- no information */
  560. break;
  561. case dv_video_control:
  562. buf[1] = (0 << 6) | /* Copy generation management (CGMS) 0 -- free */
  563. 0x3f; /* reserved -- always 1 */
  564. buf[2] = 0xc8 | /* reserved -- always b11001xxx */
  565. aspect;
  566. buf[3] = (1 << 7) | /* frame/field flag 1 -- frame, 0 -- field */
  567. fs | /* first/second field flag 0 -- field 2, 1 -- field 1 */
  568. (1 << 5) | /* frame change flag 0 -- same picture as before, 1 -- different */
  569. (1 << 4) | /* 1 - interlaced, 0 - noninterlaced */
  570. 0xc; /* reserved -- always b1100 */
  571. buf[4] = 0xff; /* reserved -- always 1 */
  572. break;
  573. default:
  574. buf[1] =
  575. buf[2] =
  576. buf[3] =
  577. buf[4] = 0xff;
  578. }
  579. return 5;
  580. }
  581. static inline int dv_write_dif_id(enum dv_section_type t, uint8_t chan_num,
  582. uint8_t seq_num, uint8_t dif_num,
  583. uint8_t *buf)
  584. {
  585. buf[0] = (uint8_t) t; /* Section type */
  586. buf[1] = (seq_num << 4) | /* DIF seq number 0-9 for 525/60; 0-11 for 625/50 */
  587. (chan_num << 3) | /* FSC: for 50Mb/s 0 - first channel; 1 - second */
  588. 7; /* reserved -- always 1 */
  589. buf[2] = dif_num; /* DIF block number Video: 0-134, Audio: 0-8 */
  590. return 3;
  591. }
  592. static inline int dv_write_ssyb_id(uint8_t syb_num, uint8_t fr, uint8_t *buf)
  593. {
  594. if (syb_num == 0 || syb_num == 6) {
  595. buf[0] = (fr << 7) | /* FR ID 1 - first half of each channel; 0 - second */
  596. (0 << 4) | /* AP3 (Subcode application ID) */
  597. 0x0f; /* reserved -- always 1 */
  598. } else if (syb_num == 11) {
  599. buf[0] = (fr << 7) | /* FR ID 1 - first half of each channel; 0 - second */
  600. 0x7f; /* reserved -- always 1 */
  601. } else {
  602. buf[0] = (fr << 7) | /* FR ID 1 - first half of each channel; 0 - second */
  603. (0 << 4) | /* APT (Track application ID) */
  604. 0x0f; /* reserved -- always 1 */
  605. }
  606. buf[1] = 0xf0 | /* reserved -- always 1 */
  607. (syb_num & 0x0f); /* SSYB number 0 - 11 */
  608. buf[2] = 0xff; /* reserved -- always 1 */
  609. return 3;
  610. }
  611. static void dv_format_frame(DVVideoContext *c, uint8_t *buf)
  612. {
  613. int chan, i, j, k;
  614. for (chan = 0; chan < c->sys->n_difchan; chan++) {
  615. for (i = 0; i < c->sys->difseg_size; i++) {
  616. memset(buf, 0xff, 80 * 6); /* first 6 DIF blocks are for control data */
  617. /* DV header: 1DIF */
  618. buf += dv_write_dif_id(dv_sect_header, chan, i, 0, buf);
  619. buf += dv_write_pack((c->sys->dsf ? dv_header625 : dv_header525),
  620. c, buf);
  621. buf += 72; /* unused bytes */
  622. /* DV subcode: 2DIFs */
  623. for (j = 0; j < 2; j++) {
  624. buf += dv_write_dif_id(dv_sect_subcode, chan, i, j, buf);
  625. for (k = 0; k < 6; k++)
  626. buf += dv_write_ssyb_id(k, (i < c->sys->difseg_size / 2), buf) + 5;
  627. buf += 29; /* unused bytes */
  628. }
  629. /* DV VAUX: 3DIFS */
  630. for (j = 0; j < 3; j++) {
  631. buf += dv_write_dif_id(dv_sect_vaux, chan, i, j, buf);
  632. buf += dv_write_pack(dv_video_source, c, buf);
  633. buf += dv_write_pack(dv_video_control, c, buf);
  634. buf += 7 * 5;
  635. buf += dv_write_pack(dv_video_source, c, buf);
  636. buf += dv_write_pack(dv_video_control, c, buf);
  637. buf += 4 * 5 + 2; /* unused bytes */
  638. }
  639. /* DV Audio/Video: 135 Video DIFs + 9 Audio DIFs */
  640. for (j = 0; j < 135; j++) {
  641. if (j % 15 == 0) {
  642. memset(buf, 0xff, 80);
  643. buf += dv_write_dif_id(dv_sect_audio, chan, i, j / 15, buf);
  644. buf += 77; /* audio control & shuffled PCM audio */
  645. }
  646. buf += dv_write_dif_id(dv_sect_video, chan, i, j, buf);
  647. buf += 77; /* 1 video macroblock: 1 bytes control
  648. * 4 * 14 bytes Y 8x8 data
  649. * 10 bytes Cr 8x8 data
  650. * 10 bytes Cb 8x8 data */
  651. }
  652. }
  653. }
  654. }
  655. static int dvvideo_encode_frame(AVCodecContext *c, AVPacket *pkt,
  656. const AVFrame *frame, int *got_packet)
  657. {
  658. DVVideoContext *s = c->priv_data;
  659. int ret;
  660. if ((ret = ff_alloc_packet2(c, pkt, s->sys->frame_size, 0)) < 0)
  661. return ret;
  662. c->pix_fmt = s->sys->pix_fmt;
  663. s->frame = frame;
  664. #if FF_API_CODED_FRAME
  665. FF_DISABLE_DEPRECATION_WARNINGS
  666. c->coded_frame->key_frame = 1;
  667. c->coded_frame->pict_type = AV_PICTURE_TYPE_I;
  668. FF_ENABLE_DEPRECATION_WARNINGS
  669. #endif
  670. s->buf = pkt->data;
  671. c->execute(c, dv_encode_video_segment, s->work_chunks, NULL,
  672. dv_work_pool_size(s->sys), sizeof(DVwork_chunk));
  673. emms_c();
  674. dv_format_frame(s, pkt->data);
  675. pkt->flags |= AV_PKT_FLAG_KEY;
  676. *got_packet = 1;
  677. return 0;
  678. }
  679. #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
  680. #define OFFSET(x) offsetof(DVVideoContext, x)
  681. static const AVOption dv_options[] = {
  682. { "quant_deadzone", "Quantizer dead zone", OFFSET(quant_deadzone), AV_OPT_TYPE_INT, { .i64 = 7 }, 0, 1024, VE },
  683. { NULL },
  684. };
  685. static const AVClass dvvideo_encode_class = {
  686. .class_name = "dvvideo encoder",
  687. .item_name = av_default_item_name,
  688. .option = dv_options,
  689. .version = LIBAVUTIL_VERSION_INT,
  690. };
  691. AVCodec ff_dvvideo_encoder = {
  692. .name = "dvvideo",
  693. .long_name = NULL_IF_CONFIG_SMALL("DV (Digital Video)"),
  694. .type = AVMEDIA_TYPE_VIDEO,
  695. .id = AV_CODEC_ID_DVVIDEO,
  696. .priv_data_size = sizeof(DVVideoContext),
  697. .init = dvvideo_encode_init,
  698. .encode2 = dvvideo_encode_frame,
  699. .capabilities = AV_CODEC_CAP_SLICE_THREADS | AV_CODEC_CAP_FRAME_THREADS | AV_CODEC_CAP_INTRA_ONLY,
  700. .pix_fmts = (const enum AVPixelFormat[]) {
  701. AV_PIX_FMT_YUV411P, AV_PIX_FMT_YUV422P,
  702. AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE
  703. },
  704. .priv_class = &dvvideo_encode_class,
  705. };