You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1256 lines
44KB

  1. /*
  2. * DV decoder
  3. * Copyright (c) 2002 Fabrice Bellard.
  4. * Copyright (c) 2004 Roman Shaposhnik.
  5. *
  6. * DV encoder
  7. * Copyright (c) 2003 Roman Shaposhnik.
  8. *
  9. * 50 Mbps (DVCPRO50) support
  10. * Copyright (c) 2006 Daniel Maas <dmaas@maasdigital.com>
  11. *
  12. * 100 Mbps (DVCPRO HD) support
  13. * Initial code by Daniel Maas <dmaas@maasdigital.com> (funded by BBC R&D)
  14. * Final code by Roman Shaposhnik
  15. *
  16. * Many thanks to Dan Dennedy <dan@dennedy.org> for providing wealth
  17. * of DV technical info.
  18. *
  19. * This file is part of FFmpeg.
  20. *
  21. * FFmpeg is free software; you can redistribute it and/or
  22. * modify it under the terms of the GNU Lesser General Public
  23. * License as published by the Free Software Foundation; either
  24. * version 2.1 of the License, or (at your option) any later version.
  25. *
  26. * FFmpeg is distributed in the hope that it will be useful,
  27. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  28. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  29. * Lesser General Public License for more details.
  30. *
  31. * You should have received a copy of the GNU Lesser General Public
  32. * License along with FFmpeg; if not, write to the Free Software
  33. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  34. */
  35. /**
  36. * @file dv.c
  37. * DV codec.
  38. */
  39. #define ALT_BITSTREAM_READER
  40. #include "avcodec.h"
  41. #include "dsputil.h"
  42. #include "bitstream.h"
  43. #include "simple_idct.h"
  44. #include "dvdata.h"
  45. //#undef NDEBUG
  46. //#include <assert.h>
  47. typedef struct DVVideoContext {
  48. const DVprofile* sys;
  49. AVFrame picture;
  50. AVCodecContext *avctx;
  51. uint8_t *buf;
  52. uint8_t dv_zigzag[2][64];
  53. uint32_t dv_idct_factor[2][2][22][64];
  54. uint32_t dv100_idct_factor[4][4][16][64];
  55. void (*get_pixels)(DCTELEM *block, const uint8_t *pixels, int line_size);
  56. void (*fdct[2])(DCTELEM *block);
  57. void (*idct_put[2])(uint8_t *dest, int line_size, DCTELEM *block);
  58. } DVVideoContext;
  59. /* MultiThreading - dv_anchor applies to entire DV codec, not just the avcontext */
  60. /* one element is needed for each video segment in a DV frame */
  61. /* at most there are 4 DIF channels * 12 DIF sequences * 27 video segments (1080i50) */
  62. #define DV_ANCHOR_SIZE (4*12*27)
  63. static void* dv_anchor[DV_ANCHOR_SIZE];
  64. #define TEX_VLC_BITS 9
  65. #ifdef DV_CODEC_TINY_TARGET
  66. #define DV_VLC_MAP_RUN_SIZE 15
  67. #define DV_VLC_MAP_LEV_SIZE 23
  68. #else
  69. #define DV_VLC_MAP_RUN_SIZE 64
  70. #define DV_VLC_MAP_LEV_SIZE 512 //FIXME sign was removed so this should be /2 but needs check
  71. #endif
  72. /* XXX: also include quantization */
  73. static RL_VLC_ELEM dv_rl_vlc[1184];
  74. /* VLC encoding lookup table */
  75. static struct dv_vlc_pair {
  76. uint32_t vlc;
  77. uint8_t size;
  78. } dv_vlc_map[DV_VLC_MAP_RUN_SIZE][DV_VLC_MAP_LEV_SIZE];
  79. static void dv_build_unquantize_tables(DVVideoContext *s, uint8_t* perm)
  80. {
  81. int i, q, a;
  82. /* NOTE: max left shift is 6 */
  83. for(q = 0; q < 22; q++) {
  84. /* 88DCT */
  85. i=1;
  86. for(a = 0; a<4; a++) {
  87. for(; i < dv_quant_areas[a]; i++) {
  88. /* 88 table */
  89. s->dv_idct_factor[0][0][q][i] = dv_iweight_88[i]<<(dv_quant_shifts[q][a] + 1);
  90. s->dv_idct_factor[1][0][q][i] = s->dv_idct_factor[0][0][q][i]<<1;
  91. /* 248 table */
  92. s->dv_idct_factor[0][1][q][i] = dv_iweight_248[i]<<(dv_quant_shifts[q][a] + 1);
  93. s->dv_idct_factor[1][1][q][i] = s->dv_idct_factor[0][1][q][i]<<1;
  94. }
  95. }
  96. }
  97. for(a = 0; a < 4; a++) {
  98. for(q = 0; q < 16; q++) {
  99. for(i = 1; i < 64; i++) {
  100. s->dv100_idct_factor[0][a][q][i]= (dv100_qstep[q]<<(a+9))*dv_iweight_1080_y[i];
  101. s->dv100_idct_factor[1][a][q][i]= (dv100_qstep[q]<<(a+9))*dv_iweight_1080_c[i];
  102. s->dv100_idct_factor[2][a][q][i]= (dv100_qstep[q]<<(a+9))*dv_iweight_720_y[i];
  103. s->dv100_idct_factor[3][a][q][i]= (dv100_qstep[q]<<(a+9))*dv_iweight_720_c[i];
  104. }
  105. }
  106. }
  107. }
  108. static av_cold int dvvideo_init(AVCodecContext *avctx)
  109. {
  110. DVVideoContext *s = avctx->priv_data;
  111. DSPContext dsp;
  112. static int done=0;
  113. int i, j;
  114. if (!done) {
  115. VLC dv_vlc;
  116. uint16_t new_dv_vlc_bits[NB_DV_VLC*2];
  117. uint8_t new_dv_vlc_len[NB_DV_VLC*2];
  118. uint8_t new_dv_vlc_run[NB_DV_VLC*2];
  119. int16_t new_dv_vlc_level[NB_DV_VLC*2];
  120. done = 1;
  121. /* dv_anchor lets each thread know its Id */
  122. for (i=0; i<DV_ANCHOR_SIZE; i++)
  123. dv_anchor[i] = (void*)(size_t)i;
  124. /* it's faster to include sign bit in a generic VLC parsing scheme */
  125. for (i=0, j=0; i<NB_DV_VLC; i++, j++) {
  126. new_dv_vlc_bits[j] = dv_vlc_bits[i];
  127. new_dv_vlc_len[j] = dv_vlc_len[i];
  128. new_dv_vlc_run[j] = dv_vlc_run[i];
  129. new_dv_vlc_level[j] = dv_vlc_level[i];
  130. if (dv_vlc_level[i]) {
  131. new_dv_vlc_bits[j] <<= 1;
  132. new_dv_vlc_len[j]++;
  133. j++;
  134. new_dv_vlc_bits[j] = (dv_vlc_bits[i] << 1) | 1;
  135. new_dv_vlc_len[j] = dv_vlc_len[i] + 1;
  136. new_dv_vlc_run[j] = dv_vlc_run[i];
  137. new_dv_vlc_level[j] = -dv_vlc_level[i];
  138. }
  139. }
  140. /* NOTE: as a trick, we use the fact the no codes are unused
  141. to accelerate the parsing of partial codes */
  142. init_vlc(&dv_vlc, TEX_VLC_BITS, j,
  143. new_dv_vlc_len, 1, 1, new_dv_vlc_bits, 2, 2, 0);
  144. assert(dv_vlc.table_size == 1184);
  145. for(i = 0; i < dv_vlc.table_size; i++){
  146. int code= dv_vlc.table[i][0];
  147. int len = dv_vlc.table[i][1];
  148. int level, run;
  149. if(len<0){ //more bits needed
  150. run= 0;
  151. level= code;
  152. } else {
  153. run= new_dv_vlc_run[code] + 1;
  154. level= new_dv_vlc_level[code];
  155. }
  156. dv_rl_vlc[i].len = len;
  157. dv_rl_vlc[i].level = level;
  158. dv_rl_vlc[i].run = run;
  159. }
  160. free_vlc(&dv_vlc);
  161. for (i = 0; i < NB_DV_VLC - 1; i++) {
  162. if (dv_vlc_run[i] >= DV_VLC_MAP_RUN_SIZE)
  163. continue;
  164. #ifdef DV_CODEC_TINY_TARGET
  165. if (dv_vlc_level[i] >= DV_VLC_MAP_LEV_SIZE)
  166. continue;
  167. #endif
  168. if (dv_vlc_map[dv_vlc_run[i]][dv_vlc_level[i]].size != 0)
  169. continue;
  170. dv_vlc_map[dv_vlc_run[i]][dv_vlc_level[i]].vlc = dv_vlc_bits[i] <<
  171. (!!dv_vlc_level[i]);
  172. dv_vlc_map[dv_vlc_run[i]][dv_vlc_level[i]].size = dv_vlc_len[i] +
  173. (!!dv_vlc_level[i]);
  174. }
  175. for (i = 0; i < DV_VLC_MAP_RUN_SIZE; i++) {
  176. #ifdef DV_CODEC_TINY_TARGET
  177. for (j = 1; j < DV_VLC_MAP_LEV_SIZE; j++) {
  178. if (dv_vlc_map[i][j].size == 0) {
  179. dv_vlc_map[i][j].vlc = dv_vlc_map[0][j].vlc |
  180. (dv_vlc_map[i-1][0].vlc << (dv_vlc_map[0][j].size));
  181. dv_vlc_map[i][j].size = dv_vlc_map[i-1][0].size +
  182. dv_vlc_map[0][j].size;
  183. }
  184. }
  185. #else
  186. for (j = 1; j < DV_VLC_MAP_LEV_SIZE/2; j++) {
  187. if (dv_vlc_map[i][j].size == 0) {
  188. dv_vlc_map[i][j].vlc = dv_vlc_map[0][j].vlc |
  189. (dv_vlc_map[i-1][0].vlc << (dv_vlc_map[0][j].size));
  190. dv_vlc_map[i][j].size = dv_vlc_map[i-1][0].size +
  191. dv_vlc_map[0][j].size;
  192. }
  193. dv_vlc_map[i][((uint16_t)(-j))&0x1ff].vlc =
  194. dv_vlc_map[i][j].vlc | 1;
  195. dv_vlc_map[i][((uint16_t)(-j))&0x1ff].size =
  196. dv_vlc_map[i][j].size;
  197. }
  198. #endif
  199. }
  200. }
  201. /* Generic DSP setup */
  202. dsputil_init(&dsp, avctx);
  203. s->get_pixels = dsp.get_pixels;
  204. /* 88DCT setup */
  205. s->fdct[0] = dsp.fdct;
  206. s->idct_put[0] = dsp.idct_put;
  207. for (i=0; i<64; i++)
  208. s->dv_zigzag[0][i] = dsp.idct_permutation[ff_zigzag_direct[i]];
  209. /* 248DCT setup */
  210. s->fdct[1] = dsp.fdct248;
  211. s->idct_put[1] = ff_simple_idct248_put; // FIXME: need to add it to DSP
  212. if(avctx->lowres){
  213. for (i=0; i<64; i++){
  214. int j= ff_zigzag248_direct[i];
  215. s->dv_zigzag[1][i] = dsp.idct_permutation[(j&7) + (j&8)*4 + (j&48)/2];
  216. }
  217. }else
  218. memcpy(s->dv_zigzag[1], ff_zigzag248_direct, 64);
  219. /* XXX: do it only for constant case */
  220. dv_build_unquantize_tables(s, dsp.idct_permutation);
  221. avctx->coded_frame = &s->picture;
  222. s->avctx= avctx;
  223. return 0;
  224. }
  225. // #define VLC_DEBUG
  226. // #define printf(...) av_log(NULL, AV_LOG_ERROR, __VA_ARGS__)
  227. typedef struct BlockInfo {
  228. const uint32_t *factor_table;
  229. const uint8_t *scan_table;
  230. uint8_t pos; /* position in block */
  231. void (*idct_put)(uint8_t *dest, int line_size, DCTELEM *block);
  232. uint8_t partial_bit_count;
  233. uint16_t partial_bit_buffer;
  234. int shift_offset;
  235. } BlockInfo;
  236. /* bit budget for AC only in 5 MBs */
  237. static const int vs_total_ac_bits = (100 * 4 + 68*2) * 5;
  238. /* see dv_88_areas and dv_248_areas for details */
  239. static const int mb_area_start[5] = { 1, 6, 21, 43, 64 };
  240. static inline int get_bits_left(GetBitContext *s)
  241. {
  242. return s->size_in_bits - get_bits_count(s);
  243. }
  244. static inline int put_bits_left(PutBitContext* s)
  245. {
  246. return (s->buf_end - s->buf) * 8 - put_bits_count(s);
  247. }
  248. /* decode ac coefs */
  249. static void dv_decode_ac(GetBitContext *gb, BlockInfo *mb, DCTELEM *block)
  250. {
  251. int last_index = gb->size_in_bits;
  252. const uint8_t *scan_table = mb->scan_table;
  253. const uint32_t *factor_table = mb->factor_table;
  254. int pos = mb->pos;
  255. int partial_bit_count = mb->partial_bit_count;
  256. int level, run, vlc_len, index;
  257. OPEN_READER(re, gb);
  258. UPDATE_CACHE(re, gb);
  259. /* if we must parse a partial vlc, we do it here */
  260. if (partial_bit_count > 0) {
  261. re_cache = ((unsigned)re_cache >> partial_bit_count) |
  262. (mb->partial_bit_buffer << (sizeof(re_cache)*8 - partial_bit_count));
  263. re_index -= partial_bit_count;
  264. mb->partial_bit_count = 0;
  265. }
  266. /* get the AC coefficients until last_index is reached */
  267. for(;;) {
  268. #ifdef VLC_DEBUG
  269. printf("%2d: bits=%04x index=%d\n", pos, SHOW_UBITS(re, gb, 16), re_index);
  270. #endif
  271. /* our own optimized GET_RL_VLC */
  272. index = NEG_USR32(re_cache, TEX_VLC_BITS);
  273. vlc_len = dv_rl_vlc[index].len;
  274. if (vlc_len < 0) {
  275. index = NEG_USR32((unsigned)re_cache << TEX_VLC_BITS, -vlc_len) + dv_rl_vlc[index].level;
  276. vlc_len = TEX_VLC_BITS - vlc_len;
  277. }
  278. level = dv_rl_vlc[index].level;
  279. run = dv_rl_vlc[index].run;
  280. /* gotta check if we're still within gb boundaries */
  281. if (re_index + vlc_len > last_index) {
  282. /* should be < 16 bits otherwise a codeword could have been parsed */
  283. mb->partial_bit_count = last_index - re_index;
  284. mb->partial_bit_buffer = NEG_USR32(re_cache, mb->partial_bit_count);
  285. re_index = last_index;
  286. break;
  287. }
  288. re_index += vlc_len;
  289. #ifdef VLC_DEBUG
  290. printf("run=%d level=%d\n", run, level);
  291. #endif
  292. pos += run;
  293. if (pos >= 64)
  294. break;
  295. level = (level*factor_table[pos] + (1 << (dv_iweight_bits-1))) >> dv_iweight_bits;
  296. block[scan_table[pos]] = level;
  297. UPDATE_CACHE(re, gb);
  298. }
  299. CLOSE_READER(re, gb);
  300. mb->pos = pos;
  301. }
  302. static inline void bit_copy(PutBitContext *pb, GetBitContext *gb)
  303. {
  304. int bits_left = get_bits_left(gb);
  305. while (bits_left >= MIN_CACHE_BITS) {
  306. put_bits(pb, MIN_CACHE_BITS, get_bits(gb, MIN_CACHE_BITS));
  307. bits_left -= MIN_CACHE_BITS;
  308. }
  309. if (bits_left > 0) {
  310. put_bits(pb, bits_left, get_bits(gb, bits_left));
  311. }
  312. }
  313. /* mb_x and mb_y are in units of 8 pixels */
  314. static inline void dv_decode_video_segment(DVVideoContext *s,
  315. const uint8_t *buf_ptr1,
  316. const uint16_t *mb_pos_ptr)
  317. {
  318. int quant, dc, dct_mode, class1, j;
  319. int mb_index, mb_x, mb_y, v, last_index;
  320. int y_stride, i;
  321. DCTELEM *block, *block1;
  322. int c_offset;
  323. uint8_t *y_ptr;
  324. const uint8_t *buf_ptr;
  325. PutBitContext pb, vs_pb;
  326. GetBitContext gb;
  327. BlockInfo mb_data[5 * DV_MAX_BPM], *mb, *mb1;
  328. DECLARE_ALIGNED_16(DCTELEM, sblock[5*DV_MAX_BPM][64]);
  329. DECLARE_ALIGNED_8(uint8_t, mb_bit_buffer[80 + 4]); /* allow some slack */
  330. DECLARE_ALIGNED_8(uint8_t, vs_bit_buffer[5 * 80 + 4]); /* allow some slack */
  331. const int log2_blocksize= 3-s->avctx->lowres;
  332. int is_field_mode[5];
  333. assert((((int)mb_bit_buffer)&7)==0);
  334. assert((((int)vs_bit_buffer)&7)==0);
  335. memset(sblock, 0, sizeof(sblock));
  336. /* pass 1 : read DC and AC coefficients in blocks */
  337. buf_ptr = buf_ptr1;
  338. block1 = &sblock[0][0];
  339. mb1 = mb_data;
  340. init_put_bits(&vs_pb, vs_bit_buffer, 5 * 80);
  341. for(mb_index = 0; mb_index < 5; mb_index++, mb1 += s->sys->bpm, block1 += s->sys->bpm * 64) {
  342. /* skip header */
  343. quant = buf_ptr[3] & 0x0f;
  344. buf_ptr += 4;
  345. init_put_bits(&pb, mb_bit_buffer, 80);
  346. mb = mb1;
  347. block = block1;
  348. is_field_mode[mb_index] = 0;
  349. for(j = 0;j < s->sys->bpm; j++) {
  350. last_index = s->sys->block_sizes[j];
  351. init_get_bits(&gb, buf_ptr, last_index);
  352. /* get the dc */
  353. dc = get_sbits(&gb, 9);
  354. dct_mode = get_bits1(&gb);
  355. class1 = get_bits(&gb, 2);
  356. if (DV_PROFILE_IS_HD(s->sys)) {
  357. mb->idct_put = s->idct_put[0];
  358. mb->scan_table = s->dv_zigzag[0];
  359. mb->factor_table = s->dv100_idct_factor[((s->sys->height == 720)<<1)&(j < 4)][class1][quant];
  360. is_field_mode[mb_index] |= !j && dct_mode;
  361. } else {
  362. mb->idct_put = s->idct_put[dct_mode && log2_blocksize==3];
  363. mb->scan_table = s->dv_zigzag[dct_mode];
  364. mb->factor_table = s->dv_idct_factor[class1 == 3][dct_mode]
  365. [quant + dv_quant_offset[class1]];
  366. }
  367. dc = dc << 2;
  368. /* convert to unsigned because 128 is not added in the
  369. standard IDCT */
  370. dc += 1024;
  371. block[0] = dc;
  372. buf_ptr += last_index >> 3;
  373. mb->pos = 0;
  374. mb->partial_bit_count = 0;
  375. #ifdef VLC_DEBUG
  376. printf("MB block: %d, %d ", mb_index, j);
  377. #endif
  378. dv_decode_ac(&gb, mb, block);
  379. /* write the remaining bits in a new buffer only if the
  380. block is finished */
  381. if (mb->pos >= 64)
  382. bit_copy(&pb, &gb);
  383. block += 64;
  384. mb++;
  385. }
  386. /* pass 2 : we can do it just after */
  387. #ifdef VLC_DEBUG
  388. printf("***pass 2 size=%d MB#=%d\n", put_bits_count(&pb), mb_index);
  389. #endif
  390. block = block1;
  391. mb = mb1;
  392. init_get_bits(&gb, mb_bit_buffer, put_bits_count(&pb));
  393. flush_put_bits(&pb);
  394. for(j = 0;j < s->sys->bpm; j++, block += 64, mb++) {
  395. if (mb->pos < 64 && get_bits_left(&gb) > 0) {
  396. dv_decode_ac(&gb, mb, block);
  397. /* if still not finished, no need to parse other blocks */
  398. if (mb->pos < 64)
  399. break;
  400. }
  401. }
  402. /* all blocks are finished, so the extra bytes can be used at
  403. the video segment level */
  404. if (j >= s->sys->bpm)
  405. bit_copy(&vs_pb, &gb);
  406. }
  407. /* we need a pass other the whole video segment */
  408. #ifdef VLC_DEBUG
  409. printf("***pass 3 size=%d\n", put_bits_count(&vs_pb));
  410. #endif
  411. block = &sblock[0][0];
  412. mb = mb_data;
  413. init_get_bits(&gb, vs_bit_buffer, put_bits_count(&vs_pb));
  414. flush_put_bits(&vs_pb);
  415. for(mb_index = 0; mb_index < 5; mb_index++) {
  416. for(j = 0;j < s->sys->bpm; j++) {
  417. if (mb->pos < 64) {
  418. #ifdef VLC_DEBUG
  419. printf("start %d:%d\n", mb_index, j);
  420. #endif
  421. dv_decode_ac(&gb, mb, block);
  422. }
  423. if (mb->pos >= 64 && mb->pos < 127)
  424. av_log(NULL, AV_LOG_ERROR, "AC EOB marker is absent pos=%d\n", mb->pos);
  425. block += 64;
  426. mb++;
  427. }
  428. }
  429. /* compute idct and place blocks */
  430. block = &sblock[0][0];
  431. mb = mb_data;
  432. for(mb_index = 0; mb_index < 5; mb_index++) {
  433. v = *mb_pos_ptr++;
  434. mb_x = v & 0xff;
  435. mb_y = v >> 8;
  436. /* We work with 720p frames split in half. The odd half-frame (chan==2,3) is displaced :-( */
  437. if (s->sys->height == 720 && ((s->buf[1]>>2)&0x3) == 0) {
  438. mb_y -= (mb_y>17)?18:-72; /* shifting the Y coordinate down by 72/2 macro blocks */
  439. }
  440. /* idct_put'ting luminance */
  441. if ((s->sys->pix_fmt == PIX_FMT_YUV420P) ||
  442. (s->sys->pix_fmt == PIX_FMT_YUV411P && mb_x >= (704 / 8)) ||
  443. (s->sys->height >= 720 && mb_y != 134)) {
  444. y_stride = (s->picture.linesize[0]<<((!is_field_mode[mb_index])*log2_blocksize)) - (2<<log2_blocksize);
  445. } else {
  446. y_stride = 0;
  447. }
  448. y_ptr = s->picture.data[0] + ((mb_y * s->picture.linesize[0] + mb_x)<<log2_blocksize);
  449. for(j = 0; j < 2; j++, y_ptr += y_stride) {
  450. for (i=0; i<2; i++, block += 64, mb++, y_ptr += (1<<log2_blocksize))
  451. if (s->sys->pix_fmt == PIX_FMT_YUV422P && s->sys->width == 720 && i)
  452. y_ptr -= (1<<log2_blocksize);
  453. else
  454. mb->idct_put(y_ptr, s->picture.linesize[0]<<is_field_mode[mb_index], block);
  455. }
  456. /* idct_put'ting chrominance */
  457. c_offset = (((mb_y>>(s->sys->pix_fmt == PIX_FMT_YUV420P)) * s->picture.linesize[1] +
  458. (mb_x>>((s->sys->pix_fmt == PIX_FMT_YUV411P)?2:1)))<<log2_blocksize);
  459. for(j=2; j; j--) {
  460. uint8_t *c_ptr = s->picture.data[j] + c_offset;
  461. if (s->sys->pix_fmt == PIX_FMT_YUV411P && mb_x >= (704 / 8)) {
  462. uint64_t aligned_pixels[64/8];
  463. uint8_t *pixels = (uint8_t*)aligned_pixels;
  464. uint8_t *c_ptr1, *ptr1;
  465. int x, y;
  466. mb->idct_put(pixels, 8, block);
  467. for(y = 0; y < (1<<log2_blocksize); y++, c_ptr += s->picture.linesize[j], pixels += 8) {
  468. ptr1= pixels + (1<<(log2_blocksize-1));
  469. c_ptr1 = c_ptr + (s->picture.linesize[j]<<log2_blocksize);
  470. for(x=0; x < (1<<(log2_blocksize-1)); x++) {
  471. c_ptr[x]= pixels[x];
  472. c_ptr1[x]= ptr1[x];
  473. }
  474. }
  475. block += 64; mb++;
  476. } else {
  477. y_stride = (mb_y == 134) ? (1<<log2_blocksize) :
  478. s->picture.linesize[j]<<((!is_field_mode[mb_index])*log2_blocksize);
  479. for (i=0; i<(1<<(s->sys->bpm==8)); i++, block += 64, mb++, c_ptr += y_stride)
  480. mb->idct_put(c_ptr, s->picture.linesize[j]<<is_field_mode[mb_index], block);
  481. }
  482. }
  483. }
  484. }
  485. #ifdef DV_CODEC_TINY_TARGET
  486. /* Converts run and level (where level != 0) pair into vlc, returning bit size */
  487. static av_always_inline int dv_rl2vlc(int run, int level, int sign, uint32_t* vlc)
  488. {
  489. int size;
  490. if (run < DV_VLC_MAP_RUN_SIZE && level < DV_VLC_MAP_LEV_SIZE) {
  491. *vlc = dv_vlc_map[run][level].vlc | sign;
  492. size = dv_vlc_map[run][level].size;
  493. }
  494. else {
  495. if (level < DV_VLC_MAP_LEV_SIZE) {
  496. *vlc = dv_vlc_map[0][level].vlc | sign;
  497. size = dv_vlc_map[0][level].size;
  498. } else {
  499. *vlc = 0xfe00 | (level << 1) | sign;
  500. size = 16;
  501. }
  502. if (run) {
  503. *vlc |= ((run < 16) ? dv_vlc_map[run-1][0].vlc :
  504. (0x1f80 | (run - 1))) << size;
  505. size += (run < 16) ? dv_vlc_map[run-1][0].size : 13;
  506. }
  507. }
  508. return size;
  509. }
  510. static av_always_inline int dv_rl2vlc_size(int run, int level)
  511. {
  512. int size;
  513. if (run < DV_VLC_MAP_RUN_SIZE && level < DV_VLC_MAP_LEV_SIZE) {
  514. size = dv_vlc_map[run][level].size;
  515. }
  516. else {
  517. size = (level < DV_VLC_MAP_LEV_SIZE) ? dv_vlc_map[0][level].size : 16;
  518. if (run) {
  519. size += (run < 16) ? dv_vlc_map[run-1][0].size : 13;
  520. }
  521. }
  522. return size;
  523. }
  524. #else
  525. static av_always_inline int dv_rl2vlc(int run, int l, int sign, uint32_t* vlc)
  526. {
  527. *vlc = dv_vlc_map[run][l].vlc | sign;
  528. return dv_vlc_map[run][l].size;
  529. }
  530. static av_always_inline int dv_rl2vlc_size(int run, int l)
  531. {
  532. return dv_vlc_map[run][l].size;
  533. }
  534. #endif
  535. typedef struct EncBlockInfo {
  536. int area_q[4];
  537. int bit_size[4];
  538. int prev[5];
  539. int cur_ac;
  540. int cno;
  541. int dct_mode;
  542. DCTELEM mb[64];
  543. uint8_t next[64];
  544. uint8_t sign[64];
  545. uint8_t partial_bit_count;
  546. uint32_t partial_bit_buffer; /* we can't use uint16_t here */
  547. } EncBlockInfo;
  548. static av_always_inline PutBitContext* dv_encode_ac(EncBlockInfo* bi, PutBitContext* pb_pool,
  549. PutBitContext* pb_end)
  550. {
  551. int prev;
  552. int bits_left;
  553. PutBitContext* pb = pb_pool;
  554. int size = bi->partial_bit_count;
  555. uint32_t vlc = bi->partial_bit_buffer;
  556. bi->partial_bit_count = bi->partial_bit_buffer = 0;
  557. for(;;){
  558. /* Find suitable storage space */
  559. for (; size > (bits_left = put_bits_left(pb)); pb++) {
  560. if (bits_left) {
  561. size -= bits_left;
  562. put_bits(pb, bits_left, vlc >> size);
  563. vlc = vlc & ((1<<size)-1);
  564. }
  565. if (pb + 1 >= pb_end) {
  566. bi->partial_bit_count = size;
  567. bi->partial_bit_buffer = vlc;
  568. return pb;
  569. }
  570. }
  571. /* Store VLC */
  572. put_bits(pb, size, vlc);
  573. if(bi->cur_ac>=64)
  574. break;
  575. /* Construct the next VLC */
  576. prev= bi->cur_ac;
  577. bi->cur_ac = bi->next[prev];
  578. if(bi->cur_ac < 64){
  579. size = dv_rl2vlc(bi->cur_ac - prev - 1, bi->mb[bi->cur_ac], bi->sign[bi->cur_ac], &vlc);
  580. } else {
  581. size = 4; vlc = 6; /* End Of Block stamp */
  582. }
  583. }
  584. return pb;
  585. }
  586. static av_always_inline void dv_set_class_number(DCTELEM* blk, EncBlockInfo* bi,
  587. const uint8_t* zigzag_scan, const int *weight, int bias)
  588. {
  589. int i, area;
  590. /* We offer two different methods for class number assignment: the
  591. method suggested in SMPTE 314M Table 22, and an improved
  592. method. The SMPTE method is very conservative; it assigns class
  593. 3 (i.e. severe quantization) to any block where the largest AC
  594. component is greater than 36. ffmpeg's DV encoder tracks AC bit
  595. consumption precisely, so there is no need to bias most blocks
  596. towards strongly lossy compression. Instead, we assign class 2
  597. to most blocks, and use class 3 only when strictly necessary
  598. (for blocks whose largest AC component exceeds 255). */
  599. #if 0 /* SMPTE spec method */
  600. static const int classes[] = {12, 24, 36, 0xffff};
  601. #else /* improved ffmpeg method */
  602. static const int classes[] = {-1, -1, 255, 0xffff};
  603. #endif
  604. int max=classes[0];
  605. int prev=0;
  606. bi->mb[0] = blk[0];
  607. for (area = 0; area < 4; area++) {
  608. bi->prev[area] = prev;
  609. bi->bit_size[area] = 1; // 4 areas 4 bits for EOB :)
  610. for (i=mb_area_start[area]; i<mb_area_start[area+1]; i++) {
  611. int level = blk[zigzag_scan[i]];
  612. if (level+15 > 30U) {
  613. bi->sign[i] = (level>>31)&1;
  614. /* weigh it and and shift down into range, adding for rounding */
  615. /* the extra division by a factor of 2^4 reverses the 8x expansion of the DCT
  616. AND the 2x doubling of the weights */
  617. level = (FFABS(level) * weight[i] + (1<<(dv_weight_bits+3))) >> (dv_weight_bits+4);
  618. bi->mb[i] = level;
  619. if(level>max) max= level;
  620. bi->bit_size[area] += dv_rl2vlc_size(i - prev - 1, level);
  621. bi->next[prev]= i;
  622. prev= i;
  623. }
  624. }
  625. }
  626. bi->next[prev]= i;
  627. for(bi->cno = 0; max > classes[bi->cno]; bi->cno++);
  628. bi->cno += bias;
  629. if (bi->cno >= 3) {
  630. bi->cno = 3;
  631. prev=0;
  632. i= bi->next[prev];
  633. for (area = 0; area < 4; area++) {
  634. bi->prev[area] = prev;
  635. bi->bit_size[area] = 1; // 4 areas 4 bits for EOB :)
  636. for (; i<mb_area_start[area+1]; i= bi->next[i]) {
  637. bi->mb[i] >>=1;
  638. if (bi->mb[i]) {
  639. bi->bit_size[area] += dv_rl2vlc_size(i - prev - 1, bi->mb[i]);
  640. bi->next[prev]= i;
  641. prev= i;
  642. }
  643. }
  644. }
  645. bi->next[prev]= i;
  646. }
  647. }
  648. //FIXME replace this by dsputil
  649. #define SC(x, y) ((s[x] - s[y]) ^ ((s[x] - s[y]) >> 7))
  650. static av_always_inline int dv_guess_dct_mode(DCTELEM *blk) {
  651. DCTELEM *s;
  652. int score88 = 0;
  653. int score248 = 0;
  654. int i;
  655. /* Compute 8-8 score (small values give a better chance for 8-8 DCT) */
  656. s = blk;
  657. for(i=0; i<7; i++) {
  658. score88 += SC(0, 8) + SC(1, 9) + SC(2, 10) + SC(3, 11) +
  659. SC(4, 12) + SC(5,13) + SC(6, 14) + SC(7, 15);
  660. s += 8;
  661. }
  662. /* Compute 2-4-8 score (small values give a better chance for 2-4-8 DCT) */
  663. s = blk;
  664. for(i=0; i<6; i++) {
  665. score248 += SC(0, 16) + SC(1,17) + SC(2, 18) + SC(3, 19) +
  666. SC(4, 20) + SC(5,21) + SC(6, 22) + SC(7, 23);
  667. s += 8;
  668. }
  669. return (score88 - score248 > -10);
  670. }
  671. static inline void dv_guess_qnos(EncBlockInfo* blks, int* qnos)
  672. {
  673. int size[5];
  674. int i, j, k, a, prev, a2;
  675. EncBlockInfo* b;
  676. size[0] = size[1] = size[2] = size[3] = size[4] = 1<<24;
  677. do {
  678. b = blks;
  679. for (i=0; i<5; i++) {
  680. if (!qnos[i])
  681. continue;
  682. qnos[i]--;
  683. size[i] = 0;
  684. for (j=0; j<6; j++, b++) {
  685. for (a=0; a<4; a++) {
  686. if (b->area_q[a] != dv_quant_shifts[qnos[i] + dv_quant_offset[b->cno]][a]) {
  687. b->bit_size[a] = 1; // 4 areas 4 bits for EOB :)
  688. b->area_q[a]++;
  689. prev= b->prev[a];
  690. assert(b->next[prev] >= mb_area_start[a+1] || b->mb[prev]);
  691. for (k= b->next[prev] ; k<mb_area_start[a+1]; k= b->next[k]) {
  692. b->mb[k] >>= 1;
  693. if (b->mb[k]) {
  694. b->bit_size[a] += dv_rl2vlc_size(k - prev - 1, b->mb[k]);
  695. prev= k;
  696. } else {
  697. if(b->next[k] >= mb_area_start[a+1] && b->next[k]<64){
  698. for(a2=a+1; b->next[k] >= mb_area_start[a2+1]; a2++)
  699. b->prev[a2] = prev;
  700. assert(a2<4);
  701. assert(b->mb[b->next[k]]);
  702. b->bit_size[a2] += dv_rl2vlc_size(b->next[k] - prev - 1, b->mb[b->next[k]])
  703. -dv_rl2vlc_size(b->next[k] - k - 1, b->mb[b->next[k]]);
  704. assert(b->prev[a2]==k && (a2+1 >= 4 || b->prev[a2+1]!=k));
  705. b->prev[a2] = prev;
  706. }
  707. b->next[prev] = b->next[k];
  708. }
  709. }
  710. b->prev[a+1]= prev;
  711. }
  712. size[i] += b->bit_size[a];
  713. }
  714. }
  715. if(vs_total_ac_bits >= size[0] + size[1] + size[2] + size[3] + size[4])
  716. return;
  717. }
  718. } while (qnos[0]|qnos[1]|qnos[2]|qnos[3]|qnos[4]);
  719. for(a=2; a==2 || vs_total_ac_bits < size[0]; a+=a){
  720. b = blks;
  721. size[0] = 5*6*4; //EOB
  722. for (j=0; j<6*5; j++, b++) {
  723. prev= b->prev[0];
  724. for (k= b->next[prev]; k<64; k= b->next[k]) {
  725. if(b->mb[k] < a && b->mb[k] > -a){
  726. b->next[prev] = b->next[k];
  727. }else{
  728. size[0] += dv_rl2vlc_size(k - prev - 1, b->mb[k]);
  729. prev= k;
  730. }
  731. }
  732. }
  733. }
  734. }
  735. static inline void dv_encode_video_segment(DVVideoContext *s,
  736. uint8_t *dif,
  737. const uint16_t *mb_pos_ptr)
  738. {
  739. int mb_index, i, j, v;
  740. int mb_x, mb_y, c_offset, linesize;
  741. uint8_t* y_ptr;
  742. uint8_t* data;
  743. uint8_t* ptr;
  744. int do_edge_wrap;
  745. DECLARE_ALIGNED_16(DCTELEM, block[64]);
  746. EncBlockInfo enc_blks[5*6];
  747. PutBitContext pbs[5*6];
  748. PutBitContext* pb;
  749. EncBlockInfo* enc_blk;
  750. int vs_bit_size = 0;
  751. int qnos[5];
  752. assert((((int)block) & 15) == 0);
  753. enc_blk = &enc_blks[0];
  754. pb = &pbs[0];
  755. for(mb_index = 0; mb_index < 5; mb_index++) {
  756. v = *mb_pos_ptr++;
  757. mb_x = v & 0xff;
  758. mb_y = v >> 8;
  759. y_ptr = s->picture.data[0] + ((mb_y * s->picture.linesize[0] + mb_x)<<3);
  760. c_offset = (((mb_y>>(s->sys->pix_fmt == PIX_FMT_YUV420P)) * s->picture.linesize[1] +
  761. (mb_x>>((s->sys->pix_fmt == PIX_FMT_YUV411P)?2:1)))<<3);
  762. do_edge_wrap = 0;
  763. qnos[mb_index] = 15; /* No quantization */
  764. ptr = dif + mb_index*80 + 4;
  765. for(j = 0;j < 6; j++) {
  766. int dummy = 0;
  767. if (s->sys->pix_fmt == PIX_FMT_YUV422P) { /* 4:2:2 */
  768. if (j == 0 || j == 2) {
  769. /* Y0 Y1 */
  770. data = y_ptr + ((j>>1) * 8);
  771. linesize = s->picture.linesize[0];
  772. } else if (j > 3) {
  773. /* Cr Cb */
  774. data = s->picture.data[6 - j] + c_offset;
  775. linesize = s->picture.linesize[6 - j];
  776. } else {
  777. /* j=1 and j=3 are "dummy" blocks, used for AC data only */
  778. data = 0;
  779. linesize = 0;
  780. dummy = 1;
  781. }
  782. } else { /* 4:1:1 or 4:2:0 */
  783. if (j < 4) { /* Four Y blocks */
  784. /* NOTE: at end of line, the macroblock is handled as 420 */
  785. if (s->sys->pix_fmt == PIX_FMT_YUV411P && mb_x < (704 / 8)) {
  786. data = y_ptr + (j * 8);
  787. } else {
  788. data = y_ptr + ((j & 1) * 8) + ((j >> 1) * 8 * s->picture.linesize[0]);
  789. }
  790. linesize = s->picture.linesize[0];
  791. } else { /* Cr and Cb blocks */
  792. /* don't ask Fabrice why they inverted Cb and Cr ! */
  793. data = s->picture.data[6 - j] + c_offset;
  794. linesize = s->picture.linesize[6 - j];
  795. if (s->sys->pix_fmt == PIX_FMT_YUV411P && mb_x >= (704 / 8))
  796. do_edge_wrap = 1;
  797. }
  798. }
  799. /* Everything is set up -- now just copy data -> DCT block */
  800. if (do_edge_wrap) { /* Edge wrap copy: 4x16 -> 8x8 */
  801. uint8_t* d;
  802. DCTELEM *b = block;
  803. for (i=0;i<8;i++) {
  804. d = data + 8 * linesize;
  805. b[0] = data[0]; b[1] = data[1]; b[2] = data[2]; b[3] = data[3];
  806. b[4] = d[0]; b[5] = d[1]; b[6] = d[2]; b[7] = d[3];
  807. data += linesize;
  808. b += 8;
  809. }
  810. } else { /* Simple copy: 8x8 -> 8x8 */
  811. if (!dummy)
  812. s->get_pixels(block, data, linesize);
  813. }
  814. if(s->avctx->flags & CODEC_FLAG_INTERLACED_DCT)
  815. enc_blk->dct_mode = dv_guess_dct_mode(block);
  816. else
  817. enc_blk->dct_mode = 0;
  818. enc_blk->area_q[0] = enc_blk->area_q[1] = enc_blk->area_q[2] = enc_blk->area_q[3] = 0;
  819. enc_blk->partial_bit_count = 0;
  820. enc_blk->partial_bit_buffer = 0;
  821. enc_blk->cur_ac = 0;
  822. if (dummy) {
  823. /* We rely on the fact that encoding all zeros leads to an immediate EOB,
  824. which is precisely what the spec calls for in the "dummy" blocks. */
  825. memset(block, 0, sizeof(block));
  826. } else {
  827. s->fdct[enc_blk->dct_mode](block);
  828. }
  829. dv_set_class_number(block, enc_blk,
  830. enc_blk->dct_mode ? ff_zigzag248_direct : ff_zigzag_direct,
  831. enc_blk->dct_mode ? dv_weight_248 : dv_weight_88,
  832. j/4);
  833. init_put_bits(pb, ptr, s->sys->block_sizes[j]/8);
  834. put_bits(pb, 9, (uint16_t)(((enc_blk->mb[0] >> 3) - 1024 + 2) >> 2));
  835. put_bits(pb, 1, enc_blk->dct_mode);
  836. put_bits(pb, 2, enc_blk->cno);
  837. vs_bit_size += enc_blk->bit_size[0] + enc_blk->bit_size[1] +
  838. enc_blk->bit_size[2] + enc_blk->bit_size[3];
  839. ++enc_blk;
  840. ++pb;
  841. ptr += s->sys->block_sizes[j]/8;
  842. }
  843. }
  844. if (vs_total_ac_bits < vs_bit_size)
  845. dv_guess_qnos(&enc_blks[0], &qnos[0]);
  846. for (i=0; i<5; i++) {
  847. dif[i*80 + 3] = qnos[i];
  848. }
  849. /* First pass over individual cells only */
  850. for (j=0; j<5*6; j++)
  851. dv_encode_ac(&enc_blks[j], &pbs[j], &pbs[j+1]);
  852. /* Second pass over each MB space */
  853. for (j=0; j<5*6; j+=6) {
  854. pb= &pbs[j];
  855. for (i=0; i<6; i++) {
  856. if (enc_blks[i+j].partial_bit_count)
  857. pb=dv_encode_ac(&enc_blks[i+j], pb, &pbs[j+6]);
  858. }
  859. }
  860. /* Third and final pass over the whole vides segment space */
  861. pb= &pbs[0];
  862. for (j=0; j<5*6; j++) {
  863. if (enc_blks[j].partial_bit_count)
  864. pb=dv_encode_ac(&enc_blks[j], pb, &pbs[6*5]);
  865. if (enc_blks[j].partial_bit_count)
  866. av_log(NULL, AV_LOG_ERROR, "ac bitstream overflow\n");
  867. }
  868. for (j=0; j<5*6; j++)
  869. flush_put_bits(&pbs[j]);
  870. }
  871. static int dv_decode_mt(AVCodecContext *avctx, void* sl)
  872. {
  873. DVVideoContext *s = avctx->priv_data;
  874. int slice = (size_t)sl;
  875. /* which DIF channel is this? */
  876. int chan = slice / (s->sys->difseg_size * 27);
  877. /* slice within the DIF channel */
  878. int chan_slice = slice % (s->sys->difseg_size * 27);
  879. /* byte offset of this channel's data */
  880. int chan_offset = chan * s->sys->difseg_size * 150 * 80;
  881. /* DIF sequence */
  882. int seq = chan_slice / 27;
  883. /* in 1080i50 and 720p50 some seq are unused */
  884. if ((DV_PROFILE_IS_1080i50(s->sys) && chan != 0 && seq == 11) ||
  885. (DV_PROFILE_IS_720p50(s->sys) && seq > 9))
  886. return 0;
  887. dv_decode_video_segment(s, &s->buf[(seq*6+(chan_slice/3)+chan_slice*5+7)*80 + chan_offset],
  888. &s->sys->video_place[slice*5]);
  889. return 0;
  890. }
  891. #ifdef CONFIG_DVVIDEO_ENCODER
  892. static int dv_encode_mt(AVCodecContext *avctx, void* sl)
  893. {
  894. DVVideoContext *s = avctx->priv_data;
  895. int slice = (size_t)sl;
  896. /* which DIF channel is this? */
  897. int chan = slice / (s->sys->difseg_size * 27);
  898. /* slice within the DIF channel */
  899. int chan_slice = slice % (s->sys->difseg_size * 27);
  900. /* byte offset of this channel's data */
  901. int chan_offset = chan * s->sys->difseg_size * 150 * 80;
  902. dv_encode_video_segment(s, &s->buf[((chan_slice/27)*6+(chan_slice/3)+chan_slice*5+7)*80 + chan_offset],
  903. &s->sys->video_place[slice*5]);
  904. return 0;
  905. }
  906. #endif
  907. #ifdef CONFIG_DECODERS
  908. /* NOTE: exactly one frame must be given (120000 bytes for NTSC,
  909. 144000 bytes for PAL - or twice those for 50Mbps) */
  910. static int dvvideo_decode_frame(AVCodecContext *avctx,
  911. void *data, int *data_size,
  912. const uint8_t *buf, int buf_size)
  913. {
  914. DVVideoContext *s = avctx->priv_data;
  915. s->sys = dv_frame_profile(buf);
  916. if (!s->sys || buf_size < s->sys->frame_size)
  917. return -1; /* NOTE: we only accept several full frames */
  918. if(s->picture.data[0])
  919. avctx->release_buffer(avctx, &s->picture);
  920. s->picture.reference = 0;
  921. s->picture.key_frame = 1;
  922. s->picture.pict_type = FF_I_TYPE;
  923. avctx->pix_fmt = s->sys->pix_fmt;
  924. avctx->time_base = (AVRational){s->sys->frame_rate_base, s->sys->frame_rate};
  925. avcodec_set_dimensions(avctx, s->sys->width, s->sys->height);
  926. if(avctx->get_buffer(avctx, &s->picture) < 0) {
  927. av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
  928. return -1;
  929. }
  930. s->picture.interlaced_frame = 1;
  931. s->picture.top_field_first = 0;
  932. s->buf = buf;
  933. avctx->execute(avctx, dv_decode_mt, (void**)&dv_anchor[0], NULL,
  934. s->sys->n_difchan * s->sys->difseg_size * 27);
  935. emms_c();
  936. /* return image */
  937. *data_size = sizeof(AVFrame);
  938. *(AVFrame*)data= s->picture;
  939. return s->sys->frame_size;
  940. }
  941. #endif
  942. static inline int dv_write_pack(enum dv_pack_type pack_id, DVVideoContext *c, uint8_t* buf)
  943. {
  944. /*
  945. * Here's what SMPTE314M says about these two:
  946. * (page 6) APTn, AP1n, AP2n, AP3n: These data shall be identical
  947. * as track application IDs (APTn = 001, AP1n =
  948. * 001, AP2n = 001, AP3n = 001), if the source signal
  949. * comes from a digital VCR. If the signal source is
  950. * unknown, all bits for these data shall be set to 1.
  951. * (page 12) STYPE: STYPE defines a signal type of video signal
  952. * 00000b = 4:1:1 compression
  953. * 00100b = 4:2:2 compression
  954. * XXXXXX = Reserved
  955. * Now, I've got two problems with these statements:
  956. * 1. it looks like APT == 111b should be a safe bet, but it isn't.
  957. * It seems that for PAL as defined in IEC 61834 we have to set
  958. * APT to 000 and for SMPTE314M to 001.
  959. * 2. It is not at all clear what STYPE is used for 4:2:0 PAL
  960. * compression scheme (if any).
  961. */
  962. int apt = (c->sys->pix_fmt == PIX_FMT_YUV420P ? 0 : 1);
  963. int stype = (c->sys->pix_fmt == PIX_FMT_YUV422P ? 4 : 0);
  964. uint8_t aspect = 0;
  965. if((int)(av_q2d(c->avctx->sample_aspect_ratio) * c->avctx->width / c->avctx->height * 10) == 17) /* 16:9 */
  966. aspect = 0x02;
  967. buf[0] = (uint8_t)pack_id;
  968. switch (pack_id) {
  969. case dv_header525: /* I can't imagine why these two weren't defined as real */
  970. case dv_header625: /* packs in SMPTE314M -- they definitely look like ones */
  971. buf[1] = 0xf8 | /* reserved -- always 1 */
  972. (apt & 0x07); /* APT: Track application ID */
  973. buf[2] = (0 << 7) | /* TF1: audio data is 0 - valid; 1 - invalid */
  974. (0x0f << 3) | /* reserved -- always 1 */
  975. (apt & 0x07); /* AP1: Audio application ID */
  976. buf[3] = (0 << 7) | /* TF2: video data is 0 - valid; 1 - invalid */
  977. (0x0f << 3) | /* reserved -- always 1 */
  978. (apt & 0x07); /* AP2: Video application ID */
  979. buf[4] = (0 << 7) | /* TF3: subcode(SSYB) is 0 - valid; 1 - invalid */
  980. (0x0f << 3) | /* reserved -- always 1 */
  981. (apt & 0x07); /* AP3: Subcode application ID */
  982. break;
  983. case dv_video_source:
  984. buf[1] = 0xff; /* reserved -- always 1 */
  985. buf[2] = (1 << 7) | /* B/W: 0 - b/w, 1 - color */
  986. (1 << 6) | /* following CLF is valid - 0, invalid - 1 */
  987. (3 << 4) | /* CLF: color frames id (see ITU-R BT.470-4) */
  988. 0xf; /* reserved -- always 1 */
  989. buf[3] = (3 << 6) | /* reserved -- always 1 */
  990. (c->sys->dsf << 5) | /* system: 60fields/50fields */
  991. stype; /* signal type video compression */
  992. buf[4] = 0xff; /* VISC: 0xff -- no information */
  993. break;
  994. case dv_video_control:
  995. buf[1] = (0 << 6) | /* Copy generation management (CGMS) 0 -- free */
  996. 0x3f; /* reserved -- always 1 */
  997. buf[2] = 0xc8 | /* reserved -- always b11001xxx */
  998. aspect;
  999. buf[3] = (1 << 7) | /* Frame/field flag 1 -- frame, 0 -- field */
  1000. (1 << 6) | /* First/second field flag 0 -- field 2, 1 -- field 1 */
  1001. (1 << 5) | /* Frame change flag 0 -- same picture as before, 1 -- different */
  1002. (1 << 4) | /* 1 - interlaced, 0 - noninterlaced */
  1003. 0xc; /* reserved -- always b1100 */
  1004. buf[4] = 0xff; /* reserved -- always 1 */
  1005. break;
  1006. default:
  1007. buf[1] = buf[2] = buf[3] = buf[4] = 0xff;
  1008. }
  1009. return 5;
  1010. }
  1011. #ifdef CONFIG_DVVIDEO_ENCODER
  1012. static void dv_format_frame(DVVideoContext* c, uint8_t* buf)
  1013. {
  1014. int chan, i, j, k;
  1015. for (chan = 0; chan < c->sys->n_difchan; chan++) {
  1016. for (i = 0; i < c->sys->difseg_size; i++) {
  1017. memset(buf, 0xff, 80 * 6); /* First 6 DIF blocks are for control data */
  1018. /* DV header: 1DIF */
  1019. buf += dv_write_dif_id(dv_sect_header, chan, i, 0, buf);
  1020. buf += dv_write_pack((c->sys->dsf ? dv_header625 : dv_header525), c, buf);
  1021. buf += 72; /* unused bytes */
  1022. /* DV subcode: 2DIFs */
  1023. for (j = 0; j < 2; j++) {
  1024. buf += dv_write_dif_id(dv_sect_subcode, chan, i, j, buf);
  1025. for (k = 0; k < 6; k++)
  1026. buf += dv_write_ssyb_id(k, (i < c->sys->difseg_size/2), buf) + 5;
  1027. buf += 29; /* unused bytes */
  1028. }
  1029. /* DV VAUX: 3DIFS */
  1030. for (j = 0; j < 3; j++) {
  1031. buf += dv_write_dif_id(dv_sect_vaux, chan, i, j, buf);
  1032. buf += dv_write_pack(dv_video_source, c, buf);
  1033. buf += dv_write_pack(dv_video_control, c, buf);
  1034. buf += 7*5;
  1035. buf += dv_write_pack(dv_video_source, c, buf);
  1036. buf += dv_write_pack(dv_video_control, c, buf);
  1037. buf += 4*5 + 2; /* unused bytes */
  1038. }
  1039. /* DV Audio/Video: 135 Video DIFs + 9 Audio DIFs */
  1040. for (j = 0; j < 135; j++) {
  1041. if (j%15 == 0) {
  1042. memset(buf, 0xff, 80);
  1043. buf += dv_write_dif_id(dv_sect_audio, chan, i, j/15, buf);
  1044. buf += 77; /* audio control & shuffled PCM audio */
  1045. }
  1046. buf += dv_write_dif_id(dv_sect_video, chan, i, j, buf);
  1047. buf += 77; /* 1 video macro block: 1 bytes control
  1048. 4 * 14 bytes Y 8x8 data
  1049. 10 bytes Cr 8x8 data
  1050. 10 bytes Cb 8x8 data */
  1051. }
  1052. }
  1053. }
  1054. }
  1055. static int dvvideo_encode_frame(AVCodecContext *c, uint8_t *buf, int buf_size,
  1056. void *data)
  1057. {
  1058. DVVideoContext *s = c->priv_data;
  1059. s->sys = dv_codec_profile(c);
  1060. if (!s->sys)
  1061. return -1;
  1062. if(buf_size < s->sys->frame_size)
  1063. return -1;
  1064. c->pix_fmt = s->sys->pix_fmt;
  1065. s->picture = *((AVFrame *)data);
  1066. s->picture.key_frame = 1;
  1067. s->picture.pict_type = FF_I_TYPE;
  1068. s->buf = buf;
  1069. c->execute(c, dv_encode_mt, (void**)&dv_anchor[0], NULL,
  1070. s->sys->n_difchan * s->sys->difseg_size * 27);
  1071. emms_c();
  1072. dv_format_frame(s, buf);
  1073. return s->sys->frame_size;
  1074. }
  1075. #endif
  1076. static int dvvideo_close(AVCodecContext *c)
  1077. {
  1078. DVVideoContext *s = c->priv_data;
  1079. if(s->picture.data[0])
  1080. c->release_buffer(c, &s->picture);
  1081. return 0;
  1082. }
  1083. #ifdef CONFIG_DVVIDEO_ENCODER
  1084. AVCodec dvvideo_encoder = {
  1085. "dvvideo",
  1086. CODEC_TYPE_VIDEO,
  1087. CODEC_ID_DVVIDEO,
  1088. sizeof(DVVideoContext),
  1089. dvvideo_init,
  1090. dvvideo_encode_frame,
  1091. .pix_fmts = (enum PixelFormat[]) {PIX_FMT_YUV411P, PIX_FMT_YUV422P, PIX_FMT_YUV420P, PIX_FMT_NONE},
  1092. .long_name = NULL_IF_CONFIG_SMALL("DV (Digital Video)"),
  1093. };
  1094. #endif // CONFIG_DVVIDEO_ENCODER
  1095. #ifdef CONFIG_DVVIDEO_DECODER
  1096. AVCodec dvvideo_decoder = {
  1097. "dvvideo",
  1098. CODEC_TYPE_VIDEO,
  1099. CODEC_ID_DVVIDEO,
  1100. sizeof(DVVideoContext),
  1101. dvvideo_init,
  1102. NULL,
  1103. dvvideo_close,
  1104. dvvideo_decode_frame,
  1105. CODEC_CAP_DR1,
  1106. NULL,
  1107. .long_name = NULL_IF_CONFIG_SMALL("DV (Digital Video)"),
  1108. };
  1109. #endif