decoding. All muxing/demuxing functionality is now available in libavformat/dv.[ch]. * dv1394.c and avidec.c were hooked up with general DV demuxer. * DVAUDIO is dead! Long live pcm_s16le! * DV audio is now always recognized -- which means we can now hear all those ducks quaking in pond.dv. Originally committed as revision 2319 to svn://svn.ffmpeg.org/ffmpeg/trunktags/v0.5
@@ -103,7 +103,6 @@ void avcodec_register_all(void) | |||||
register_avcodec(&mpeg_xvmc_decoder); | register_avcodec(&mpeg_xvmc_decoder); | ||||
#endif | #endif | ||||
register_avcodec(&dvvideo_decoder); | register_avcodec(&dvvideo_decoder); | ||||
register_avcodec(&dvaudio_decoder); | |||||
register_avcodec(&mjpeg_decoder); | register_avcodec(&mjpeg_decoder); | ||||
register_avcodec(&mjpegb_decoder); | register_avcodec(&mjpegb_decoder); | ||||
register_avcodec(&mp2_decoder); | register_avcodec(&mp2_decoder); | ||||
@@ -1388,7 +1388,6 @@ extern AVCodec rv10_decoder; | |||||
extern AVCodec svq1_decoder; | extern AVCodec svq1_decoder; | ||||
extern AVCodec svq3_decoder; | extern AVCodec svq3_decoder; | ||||
extern AVCodec dvvideo_decoder; | extern AVCodec dvvideo_decoder; | ||||
extern AVCodec dvaudio_decoder; | |||||
extern AVCodec wmav1_decoder; | extern AVCodec wmav1_decoder; | ||||
extern AVCodec wmav2_decoder; | extern AVCodec wmav2_decoder; | ||||
extern AVCodec mjpeg_decoder; | extern AVCodec mjpeg_decoder; | ||||
@@ -25,34 +25,29 @@ | |||||
#include "dsputil.h" | #include "dsputil.h" | ||||
#include "mpegvideo.h" | #include "mpegvideo.h" | ||||
#include "simple_idct.h" | #include "simple_idct.h" | ||||
#define NTSC_FRAME_SIZE 120000 | |||||
#define PAL_FRAME_SIZE 144000 | |||||
#define TEX_VLC_BITS 9 | |||||
#include "dvdata.h" | |||||
typedef struct DVVideoDecodeContext { | typedef struct DVVideoDecodeContext { | ||||
AVCodecContext *avctx; | |||||
const DVprofile* sys; | |||||
GetBitContext gb; | GetBitContext gb; | ||||
VLC *vlc; | |||||
int sampling_411; /* 0 = 420, 1 = 411 */ | |||||
int width, height; | |||||
uint8_t *current_picture[3]; /* picture structure */ | |||||
AVFrame picture; | AVFrame picture; | ||||
int linesize[3]; | |||||
DCTELEM block[5*6][64] __align8; | DCTELEM block[5*6][64] __align8; | ||||
/* FIXME: the following is extracted from DSP */ | |||||
uint8_t dv_zigzag[2][64]; | uint8_t dv_zigzag[2][64]; | ||||
uint8_t idct_permutation[64]; | uint8_t idct_permutation[64]; | ||||
void (*get_pixels)(DCTELEM *block, const uint8_t *pixels, int line_size); | |||||
void (*fdct)(DCTELEM *block); | |||||
/* XXX: move it to static storage ? */ | /* XXX: move it to static storage ? */ | ||||
uint8_t dv_shift[2][22][64]; | uint8_t dv_shift[2][22][64]; | ||||
void (*idct_put[2])(uint8_t *dest, int line_size, DCTELEM *block); | void (*idct_put[2])(uint8_t *dest, int line_size, DCTELEM *block); | ||||
} DVVideoDecodeContext; | } DVVideoDecodeContext; | ||||
#include "dvdata.h" | |||||
static VLC dv_vlc; | |||||
#define TEX_VLC_BITS 9 | |||||
/* XXX: also include quantization */ | /* XXX: also include quantization */ | ||||
static RL_VLC_ELEM *dv_rl_vlc[1]; | static RL_VLC_ELEM *dv_rl_vlc[1]; | ||||
static VLC_TYPE dv_vlc_codes[15][23]; | |||||
static void dv_build_unquantize_tables(DVVideoDecodeContext *s) | static void dv_build_unquantize_tables(DVVideoDecodeContext *s) | ||||
{ | { | ||||
@@ -85,6 +80,7 @@ static int dvvideo_decode_init(AVCodecContext *avctx) | |||||
if (!done) { | if (!done) { | ||||
int i; | int i; | ||||
VLC dv_vlc; | |||||
done = 1; | done = 1; | ||||
@@ -114,6 +110,12 @@ static int dvvideo_decode_init(AVCodecContext *avctx) | |||||
dv_rl_vlc[0][i].level = level; | dv_rl_vlc[0][i].level = level; | ||||
dv_rl_vlc[0][i].run = run; | dv_rl_vlc[0][i].run = run; | ||||
} | } | ||||
memset(dv_vlc_codes, 0xff, sizeof(dv_vlc_codes)); | |||||
for (i = 0; i < NB_DV_VLC - 1; i++) { | |||||
if (dv_vlc_run[i] < 15 && dv_vlc_level[i] < 23 && dv_vlc_len[i] < 15) | |||||
dv_vlc_codes[dv_vlc_run[i]][dv_vlc_level[i]] = i; | |||||
} | |||||
} | } | ||||
/* ugly way to get the idct & scantable */ | /* ugly way to get the idct & scantable */ | ||||
@@ -124,6 +126,9 @@ static int dvvideo_decode_init(AVCodecContext *avctx) | |||||
if (DCT_common_init(&s2) < 0) | if (DCT_common_init(&s2) < 0) | ||||
return -1; | return -1; | ||||
s->get_pixels = s2.dsp.get_pixels; | |||||
s->fdct = s2.dsp.fdct; | |||||
s->idct_put[0] = s2.dsp.idct_put; | s->idct_put[0] = s2.dsp.idct_put; | ||||
memcpy(s->idct_permutation, s2.dsp.idct_permutation, 64); | memcpy(s->idct_permutation, s2.dsp.idct_permutation, 64); | ||||
memcpy(s->dv_zigzag[0], s2.intra_scantable.permutated, 64); | memcpy(s->dv_zigzag[0], s2.intra_scantable.permutated, 64); | ||||
@@ -134,11 +139,11 @@ static int dvvideo_decode_init(AVCodecContext *avctx) | |||||
/* XXX: do it only for constant case */ | /* XXX: do it only for constant case */ | ||||
dv_build_unquantize_tables(s); | dv_build_unquantize_tables(s); | ||||
return 0; | return 0; | ||||
} | } | ||||
//#define VLC_DEBUG | |||||
// #define VLC_DEBUG | |||||
typedef struct BlockInfo { | typedef struct BlockInfo { | ||||
const uint8_t *shift_table; | const uint8_t *shift_table; | ||||
@@ -450,29 +455,29 @@ static inline void dv_decode_video_segment(DVVideoDecodeContext *s, | |||||
v = *mb_pos_ptr++; | v = *mb_pos_ptr++; | ||||
mb_x = v & 0xff; | mb_x = v & 0xff; | ||||
mb_y = v >> 8; | mb_y = v >> 8; | ||||
y_ptr = s->current_picture[0] + (mb_y * s->linesize[0] * 8) + (mb_x * 8); | |||||
if (s->sampling_411) | |||||
c_offset = (mb_y * s->linesize[1] * 8) + ((mb_x >> 2) * 8); | |||||
y_ptr = s->picture.data[0] + (mb_y * s->picture.linesize[0] * 8) + (mb_x * 8); | |||||
if (s->sys->pix_fmt == PIX_FMT_YUV411P) | |||||
c_offset = (mb_y * s->picture.linesize[1] * 8) + ((mb_x >> 2) * 8); | |||||
else | else | ||||
c_offset = ((mb_y >> 1) * s->linesize[1] * 8) + ((mb_x >> 1) * 8); | |||||
c_offset = ((mb_y >> 1) * s->picture.linesize[1] * 8) + ((mb_x >> 1) * 8); | |||||
for(j = 0;j < 6; j++) { | for(j = 0;j < 6; j++) { | ||||
idct_put = s->idct_put[mb->dct_mode]; | idct_put = s->idct_put[mb->dct_mode]; | ||||
if (j < 4) { | if (j < 4) { | ||||
if (s->sampling_411 && mb_x < (704 / 8)) { | |||||
if (s->sys->pix_fmt == PIX_FMT_YUV411P && mb_x < (704 / 8)) { | |||||
/* NOTE: at end of line, the macroblock is handled as 420 */ | /* NOTE: at end of line, the macroblock is handled as 420 */ | ||||
idct_put(y_ptr + (j * 8), s->linesize[0], block); | |||||
idct_put(y_ptr + (j * 8), s->picture.linesize[0], block); | |||||
} else { | } else { | ||||
idct_put(y_ptr + ((j & 1) * 8) + ((j >> 1) * 8 * s->linesize[0]), | |||||
s->linesize[0], block); | |||||
idct_put(y_ptr + ((j & 1) * 8) + ((j >> 1) * 8 * s->picture.linesize[0]), | |||||
s->picture.linesize[0], block); | |||||
} | } | ||||
} else { | } else { | ||||
if (s->sampling_411 && mb_x >= (704 / 8)) { | |||||
if (s->sys->pix_fmt == PIX_FMT_YUV411P && mb_x >= (704 / 8)) { | |||||
uint8_t pixels[64], *c_ptr, *c_ptr1, *ptr; | uint8_t pixels[64], *c_ptr, *c_ptr1, *ptr; | ||||
int y, linesize; | int y, linesize; | ||||
/* NOTE: at end of line, the macroblock is handled as 420 */ | /* NOTE: at end of line, the macroblock is handled as 420 */ | ||||
idct_put(pixels, 8, block); | idct_put(pixels, 8, block); | ||||
linesize = s->linesize[6 - j]; | |||||
c_ptr = s->current_picture[6 - j] + c_offset; | |||||
linesize = s->picture.linesize[6 - j]; | |||||
c_ptr = s->picture.data[6 - j] + c_offset; | |||||
ptr = pixels; | ptr = pixels; | ||||
for(y = 0;y < 8; y++) { | for(y = 0;y < 8; y++) { | ||||
/* convert to 411P */ | /* convert to 411P */ | ||||
@@ -486,8 +491,8 @@ static inline void dv_decode_video_segment(DVVideoDecodeContext *s, | |||||
} | } | ||||
} else { | } else { | ||||
/* don't ask me why they inverted Cb and Cr ! */ | /* don't ask me why they inverted Cb and Cr ! */ | ||||
idct_put(s->current_picture[6 - j] + c_offset, | |||||
s->linesize[6 - j], block); | |||||
idct_put(s->picture.data[6 - j] + c_offset, | |||||
s->picture.linesize[6 - j], block); | |||||
} | } | ||||
} | } | ||||
block += 64; | block += 64; | ||||
@@ -496,7 +501,6 @@ static inline void dv_decode_video_segment(DVVideoDecodeContext *s, | |||||
} | } | ||||
} | } | ||||
/* NOTE: exactly one frame must be given (120000 bytes for NTSC, | /* NOTE: exactly one frame must be given (120000 bytes for NTSC, | ||||
144000 bytes for PAL) */ | 144000 bytes for PAL) */ | ||||
static int dvvideo_decode_frame(AVCodecContext *avctx, | static int dvvideo_decode_frame(AVCodecContext *avctx, | ||||
@@ -504,115 +508,35 @@ static int dvvideo_decode_frame(AVCodecContext *avctx, | |||||
uint8_t *buf, int buf_size) | uint8_t *buf, int buf_size) | ||||
{ | { | ||||
DVVideoDecodeContext *s = avctx->priv_data; | DVVideoDecodeContext *s = avctx->priv_data; | ||||
int sct, dsf, apt, ds, nb_dif_segs, vs, width, height, i, packet_size; | |||||
uint8_t *buf_ptr; | |||||
int ds, vs; | |||||
const uint16_t *mb_pos_ptr; | const uint16_t *mb_pos_ptr; | ||||
/* parse id */ | |||||
init_get_bits(&s->gb, buf, buf_size*8); | |||||
sct = get_bits(&s->gb, 3); | |||||
if (sct != 0) | |||||
return -1; | |||||
skip_bits(&s->gb, 5); | |||||
get_bits(&s->gb, 4); /* dsn (sequence number */ | |||||
get_bits(&s->gb, 1); /* fsc (channel number) */ | |||||
skip_bits(&s->gb, 3); | |||||
get_bits(&s->gb, 8); /* dbn (diff block number 0-134) */ | |||||
dsf = get_bits(&s->gb, 1); /* 0 = NTSC 1 = PAL */ | |||||
if (get_bits(&s->gb, 1) != 0) | |||||
return -1; | |||||
skip_bits(&s->gb, 11); | |||||
apt = get_bits(&s->gb, 3); /* apt */ | |||||
get_bits(&s->gb, 1); /* tf1 */ | |||||
skip_bits(&s->gb, 4); | |||||
get_bits(&s->gb, 3); /* ap1 */ | |||||
get_bits(&s->gb, 1); /* tf2 */ | |||||
skip_bits(&s->gb, 4); | |||||
get_bits(&s->gb, 3); /* ap2 */ | |||||
get_bits(&s->gb, 1); /* tf3 */ | |||||
skip_bits(&s->gb, 4); | |||||
get_bits(&s->gb, 3); /* ap3 */ | |||||
/* init size */ | |||||
width = 720; | |||||
if (dsf) { | |||||
avctx->frame_rate = 25; | |||||
avctx->frame_rate_base = 1; | |||||
packet_size = PAL_FRAME_SIZE; | |||||
height = 576; | |||||
nb_dif_segs = 12; | |||||
} else { | |||||
avctx->frame_rate = 30000; | |||||
avctx->frame_rate_base = 1001; | |||||
packet_size = NTSC_FRAME_SIZE; | |||||
height = 480; | |||||
nb_dif_segs = 10; | |||||
} | |||||
/* NOTE: we only accept several full frames */ | |||||
if (buf_size < packet_size) | |||||
return -1; | |||||
/* NTSC[dsf == 0] is always 720x480, 4:1:1 | |||||
* PAL[dsf == 1] is always 720x576, 4:2:0 for IEC 68134[apt == 0] | |||||
* but for the SMPTE 314M[apt == 1] it is 720x576, 4:1:1 | |||||
*/ | |||||
s->sampling_411 = !dsf || apt; | |||||
if (s->sampling_411) { | |||||
mb_pos_ptr = dsf ? dv_place_411P : dv_place_411; | |||||
avctx->pix_fmt = PIX_FMT_YUV411P; | |||||
} else { | |||||
mb_pos_ptr = dv_place_420; | |||||
avctx->pix_fmt = PIX_FMT_YUV420P; | |||||
} | |||||
avctx->width = width; | |||||
avctx->height = height; | |||||
/* Once again, this is pretty complicated by the fact that the same | |||||
* field is used differently by IEC 68134[apt == 0] and | |||||
* SMPTE 314M[apt == 1]. | |||||
*/ | |||||
if (buf[VAUX_TC61_OFFSET] == 0x61 && | |||||
((apt == 0 && (buf[VAUX_TC61_OFFSET + 2] & 0x07) == 0x07) || | |||||
(apt == 1 && (buf[VAUX_TC61_OFFSET + 2] & 0x07) == 0x02))) | |||||
avctx->aspect_ratio = 16.0 / 9.0; | |||||
else | |||||
avctx->aspect_ratio = 4.0 / 3.0; | |||||
s->sys = dv_frame_profile(buf); | |||||
if (!s->sys || buf_size < s->sys->frame_size) | |||||
return -1; /* NOTE: we only accept several full frames */ | |||||
if(s->picture.data[0]) | if(s->picture.data[0]) | ||||
avctx->release_buffer(avctx, &s->picture); | avctx->release_buffer(avctx, &s->picture); | ||||
s->picture.reference= 0; | |||||
s->picture.reference = 0; | |||||
avctx->pix_fmt = s->sys->pix_fmt; | |||||
if(avctx->get_buffer(avctx, &s->picture) < 0) { | if(avctx->get_buffer(avctx, &s->picture) < 0) { | ||||
fprintf(stderr, "get_buffer() failed\n"); | fprintf(stderr, "get_buffer() failed\n"); | ||||
return -1; | return -1; | ||||
} | } | ||||
for(i=0;i<3;i++) { | |||||
s->current_picture[i] = s->picture.data[i]; | |||||
s->linesize[i] = s->picture.linesize[i]; | |||||
if (!s->current_picture[i]) | |||||
return -1; | |||||
} | |||||
s->width = width; | |||||
s->height = height; | |||||
/* for each DIF segment */ | /* for each DIF segment */ | ||||
buf_ptr = buf; | |||||
for (ds = 0; ds < nb_dif_segs; ds++) { | |||||
buf_ptr += 6 * 80; /* skip DIF segment header */ | |||||
mb_pos_ptr = s->sys->video_place; | |||||
for (ds = 0; ds < s->sys->difseg_size; ds++) { | |||||
buf += 6 * 80; /* skip DIF segment header */ | |||||
for(vs = 0; vs < 27; vs++) { | for(vs = 0; vs < 27; vs++) { | ||||
if ((vs % 3) == 0) { | |||||
/* skip audio block */ | |||||
buf_ptr += 80; | |||||
} | |||||
dv_decode_video_segment(s, buf_ptr, mb_pos_ptr); | |||||
buf_ptr += 5 * 80; | |||||
if ((vs % 3) == 0) | |||||
buf += 80; /* skip audio block */ | |||||
dv_decode_video_segment(s, buf, mb_pos_ptr); | |||||
buf += 5 * 80; | |||||
mb_pos_ptr += 5; | mb_pos_ptr += 5; | ||||
} | } | ||||
} | } | ||||
@@ -623,7 +547,7 @@ static int dvvideo_decode_frame(AVCodecContext *avctx, | |||||
*data_size = sizeof(AVFrame); | *data_size = sizeof(AVFrame); | ||||
*(AVFrame*)data= s->picture; | *(AVFrame*)data= s->picture; | ||||
return packet_size; | |||||
return s->sys->frame_size; | |||||
} | } | ||||
static int dvvideo_decode_end(AVCodecContext *avctx) | static int dvvideo_decode_end(AVCodecContext *avctx) | ||||
@@ -645,158 +569,3 @@ AVCodec dvvideo_decoder = { | |||||
CODEC_CAP_DR1, | CODEC_CAP_DR1, | ||||
NULL | NULL | ||||
}; | }; | ||||
typedef struct DVAudioDecodeContext { | |||||
AVCodecContext *avctx; | |||||
GetBitContext gb; | |||||
} DVAudioDecodeContext; | |||||
static int dvaudio_decode_init(AVCodecContext *avctx) | |||||
{ | |||||
// DVAudioDecodeContext *s = avctx->priv_data; | |||||
return 0; | |||||
} | |||||
static uint16_t dv_audio_12to16(uint16_t sample) | |||||
{ | |||||
uint16_t shift, result; | |||||
sample = (sample < 0x800) ? sample : sample | 0xf000; | |||||
shift = (sample & 0xf00) >> 8; | |||||
if (shift < 0x2 || shift > 0xd) { | |||||
result = sample; | |||||
} else if (shift < 0x8) { | |||||
shift--; | |||||
result = (sample - (256 * shift)) << shift; | |||||
} else { | |||||
shift = 0xe - shift; | |||||
result = ((sample + ((256 * shift) + 1)) << shift) - 1; | |||||
} | |||||
return result; | |||||
} | |||||
/* NOTE: exactly one frame must be given (120000 bytes for NTSC, | |||||
144000 bytes for PAL) | |||||
There's a couple of assumptions being made here: | |||||
1. By default we silence erroneous (0x8000/16bit 0x800/12bit) | |||||
audio samples. We can pass them upwards when ffmpeg will be ready | |||||
to deal with them. | |||||
2. We don't do software emphasis. | |||||
3. Audio is always returned as 16bit linear samples: 12bit | |||||
nonlinear samples are converted into 16bit linear ones. | |||||
*/ | |||||
static int dvaudio_decode_frame(AVCodecContext *avctx, | |||||
void *data, int *data_size, | |||||
uint8_t *buf, int buf_size) | |||||
{ | |||||
DVVideoDecodeContext *s = avctx->priv_data; | |||||
const uint16_t (*unshuffle)[9]; | |||||
int smpls, freq, quant, sys, stride, difseg, ad, dp, nb_dif_segs, i; | |||||
uint16_t lc, rc; | |||||
uint8_t *buf_ptr; | |||||
/* parse id */ | |||||
init_get_bits(&s->gb, &buf[AAUX_AS_OFFSET], 5*8); | |||||
i = get_bits(&s->gb, 8); | |||||
if (i != 0x50) { /* No audio ? */ | |||||
*data_size = 0; | |||||
return buf_size; | |||||
} | |||||
get_bits(&s->gb, 1); /* 0 - locked audio, 1 - unlocked audio */ | |||||
skip_bits(&s->gb, 1); | |||||
smpls = get_bits(&s->gb, 6); /* samples in this frame - min. samples */ | |||||
skip_bits(&s->gb, 8); | |||||
skip_bits(&s->gb, 2); | |||||
sys = get_bits(&s->gb, 1); /* 0 - 60 fields, 1 = 50 fields */ | |||||
skip_bits(&s->gb, 5); | |||||
get_bits(&s->gb, 1); /* 0 - emphasis on, 1 - emphasis off */ | |||||
get_bits(&s->gb, 1); /* 0 - reserved, 1 - emphasis time constant 50/15us */ | |||||
freq = get_bits(&s->gb, 3); /* 0 - 48KHz, 1 - 44,1kHz, 2 - 32 kHz */ | |||||
quant = get_bits(&s->gb, 3); /* 0 - 16bit linear, 1 - 12bit nonlinear */ | |||||
if (quant > 1) | |||||
return -1; /* Unsupported quantization */ | |||||
avctx->sample_rate = dv_audio_frequency[freq]; | |||||
avctx->channels = 2; | |||||
avctx->bit_rate = avctx->channels * avctx->sample_rate * 16; | |||||
// What about: | |||||
// avctx->frame_size = | |||||
*data_size = (dv_audio_min_samples[sys][freq] + smpls) * | |||||
avctx->channels * 2; | |||||
if (sys) { | |||||
nb_dif_segs = 12; | |||||
stride = 108; | |||||
unshuffle = dv_place_audio50; | |||||
} else { | |||||
nb_dif_segs = 10; | |||||
stride = 90; | |||||
unshuffle = dv_place_audio60; | |||||
} | |||||
/* for each DIF segment */ | |||||
buf_ptr = buf; | |||||
for (difseg = 0; difseg < nb_dif_segs; difseg++) { | |||||
buf_ptr += 6 * 80; /* skip DIF segment header */ | |||||
for (ad = 0; ad < 9; ad++) { | |||||
for (dp = 8; dp < 80; dp+=2) { | |||||
if (quant == 0) { /* 16bit quantization */ | |||||
i = unshuffle[difseg][ad] + (dp - 8)/2 * stride; | |||||
((short *)data)[i] = (buf_ptr[dp] << 8) | buf_ptr[dp+1]; | |||||
if (((unsigned short *)data)[i] == 0x8000) | |||||
((short *)data)[i] = 0; | |||||
} else { /* 12bit quantization */ | |||||
if (difseg >= nb_dif_segs/2) | |||||
goto out; /* We're not doing 4ch at this time */ | |||||
lc = ((uint16_t)buf_ptr[dp] << 4) | | |||||
((uint16_t)buf_ptr[dp+2] >> 4); | |||||
rc = ((uint16_t)buf_ptr[dp+1] << 4) | | |||||
((uint16_t)buf_ptr[dp+2] & 0x0f); | |||||
lc = (lc == 0x800 ? 0 : dv_audio_12to16(lc)); | |||||
rc = (rc == 0x800 ? 0 : dv_audio_12to16(rc)); | |||||
i = unshuffle[difseg][ad] + (dp - 8)/3 * stride; | |||||
((short *)data)[i] = lc; | |||||
i = unshuffle[difseg+nb_dif_segs/2][ad] + (dp - 8)/3 * stride; | |||||
((short *)data)[i] = rc; | |||||
++dp; | |||||
} | |||||
} | |||||
buf_ptr += 16 * 80; /* 15 Video DIFs + 1 Audio DIF */ | |||||
} | |||||
} | |||||
out: | |||||
return buf_size; | |||||
} | |||||
static int dvaudio_decode_end(AVCodecContext *avctx) | |||||
{ | |||||
// DVAudioDecodeContext *s = avctx->priv_data; | |||||
return 0; | |||||
} | |||||
AVCodec dvaudio_decoder = { | |||||
"dvaudio", | |||||
CODEC_TYPE_AUDIO, | |||||
CODEC_ID_DVAUDIO, | |||||
sizeof(DVAudioDecodeContext), | |||||
dvaudio_decode_init, | |||||
NULL, | |||||
dvaudio_decode_end, | |||||
dvaudio_decode_frame, | |||||
0, | |||||
NULL | |||||
}; |
@@ -21,11 +21,34 @@ | |||||
* @file dvdata.h | * @file dvdata.h | ||||
* Constants for DV codec. | * Constants for DV codec. | ||||
*/ | */ | ||||
/* | |||||
* DVprofile is used to express the differences between various | |||||
* DV flavors. For now it's primarily used for differentiating | |||||
* 525/60 and 625/50, but the plans are to use it for various | |||||
* DV specs as well (e.g. SMPTE314M vs. IEC 61834). | |||||
*/ | |||||
typedef struct DVprofile { | |||||
int dsf; /* value of the dsf in the DV header */ | |||||
int frame_size; /* total size of one frame in bytes */ | |||||
int difseg_size; /* number of DIF segments */ | |||||
int frame_rate; | |||||
int frame_rate_base; | |||||
int ltc_divisor; /* FPS from the LTS standpoint */ | |||||
int height; /* picture height in pixels */ | |||||
int width; /* picture width in pixels */ | |||||
const uint16_t *video_place; /* positions of all DV macro blocks */ | |||||
enum PixelFormat pix_fmt; /* picture pixel format */ | |||||
int audio_stride; /* size of audio_shuffle table */ | |||||
int audio_min_samples[3];/* min ammount of audio samples */ | |||||
/* for 48Khz, 44.1Khz and 32Khz */ | |||||
int audio_samples_dist[5];/* how many samples are supposed to be */ | |||||
/* in each frame in a 5 frames window */ | |||||
const uint16_t (*audio_shuffle)[9]; /* PCM shuffling table */ | |||||
} DVprofile; | |||||
#define NB_DV_VLC 409 | #define NB_DV_VLC 409 | ||||
#define AAUX_AS_OFFSET (80*6 + 80*16*3 + 3) | |||||
#define AAUX_ASC_OFFSET (80*6 + 80*16*4 + 3) | |||||
#define VAUX_TC61_OFFSET (80*5 + 48 + 5) | |||||
static const uint16_t dv_vlc_bits[409] = { | static const uint16_t dv_vlc_bits[409] = { | ||||
0x0000, 0x0002, 0x0007, 0x0008, 0x0009, 0x0014, 0x0015, 0x0016, | 0x0000, 0x0002, 0x0007, 0x0008, 0x0009, 0x0014, 0x0015, 0x0016, | ||||
@@ -283,7 +306,7 @@ static const uint8_t dv_248_areas[64] = { | |||||
1,2,2,3,3,3,3,3, | 1,2,2,3,3,3,3,3, | ||||
}; | }; | ||||
static uint8_t dv_quant_shifts[22][4] = { | |||||
static const uint8_t dv_quant_shifts[22][4] = { | |||||
{ 3,3,4,4 }, | { 3,3,4,4 }, | ||||
{ 3,3,4,4 }, | { 3,3,4,4 }, | ||||
{ 2,3,3,4 }, | { 2,3,3,4 }, | ||||
@@ -1240,7 +1263,7 @@ static const uint16_t dv_place_411[1350] = { | |||||
0x0834, 0x2320, 0x2f44, 0x3810, 0x1658, | 0x0834, 0x2320, 0x2f44, 0x3810, 0x1658, | ||||
}; | }; | ||||
static const uint16_t dv_place_audio60[10][9] = { | |||||
static const uint16_t dv_audio_shuffle525[10][9] = { | |||||
{ 0, 30, 60, 20, 50, 80, 10, 40, 70 }, /* 1st channel */ | { 0, 30, 60, 20, 50, 80, 10, 40, 70 }, /* 1st channel */ | ||||
{ 6, 36, 66, 26, 56, 86, 16, 46, 76 }, | { 6, 36, 66, 26, 56, 86, 16, 46, 76 }, | ||||
{ 12, 42, 72, 2, 32, 62, 22, 52, 82 }, | { 12, 42, 72, 2, 32, 62, 22, 52, 82 }, | ||||
@@ -1254,7 +1277,7 @@ static const uint16_t dv_place_audio60[10][9] = { | |||||
{ 25, 55, 85, 15, 45, 75, 5, 35, 65 }, | { 25, 55, 85, 15, 45, 75, 5, 35, 65 }, | ||||
}; | }; | ||||
static const uint16_t dv_place_audio50[12][9] = { | |||||
static const uint16_t dv_audio_shuffle625[12][9] = { | |||||
{ 0, 36, 72, 26, 62, 98, 16, 52, 88}, /* 1st channel */ | { 0, 36, 72, 26, 62, 98, 16, 52, 88}, /* 1st channel */ | ||||
{ 6, 42, 78, 32, 68, 104, 22, 58, 94}, | { 6, 42, 78, 32, 68, 104, 22, 58, 94}, | ||||
{ 12, 48, 84, 2, 38, 74, 28, 64, 100}, | { 12, 48, 84, 2, 38, 74, 28, 64, 100}, | ||||
@@ -1271,10 +1294,77 @@ static const uint16_t dv_place_audio50[12][9] = { | |||||
}; | }; | ||||
static const int dv_audio_frequency[3] = { | static const int dv_audio_frequency[3] = { | ||||
48000, 44100, 32000, | |||||
48000, 44100, 32000, | |||||
}; | }; | ||||
static const int dv_audio_min_samples[2][3] = { | |||||
{ 1580, 1452, 1053 }, /* 60 fields */ | |||||
{ 1896, 1742, 1264 }, /* 50 fileds */ | |||||
static const DVprofile dv_profiles[] = { | |||||
{ .dsf = 0, | |||||
.frame_size = 120000, /* IEC 61834, SMPTE-314M - 525/60 (NTSC) */ | |||||
.difseg_size = 10, | |||||
.frame_rate = 30000, | |||||
.ltc_divisor = 30, | |||||
.frame_rate_base = 1001, | |||||
.height = 480, | |||||
.width = 720, | |||||
.video_place = dv_place_411, | |||||
.pix_fmt = PIX_FMT_YUV411P, | |||||
.audio_stride = 90, | |||||
.audio_min_samples = { 1580, 1452, 1053 }, /* for 48, 44.1 and 32Khz */ | |||||
.audio_samples_dist = { 1602, 1601, 1602, 1601, 1602 }, | |||||
.audio_shuffle = dv_audio_shuffle525, | |||||
}, | |||||
{ .dsf = 1, | |||||
.frame_size = 144000, /* IEC 61834 - 625/50 (PAL) */ | |||||
.difseg_size = 12, | |||||
.frame_rate = 25, | |||||
.frame_rate_base = 1, | |||||
.ltc_divisor = 25, | |||||
.height = 576, | |||||
.width = 720, | |||||
.video_place = dv_place_420, | |||||
.pix_fmt = PIX_FMT_YUV420P, | |||||
.audio_stride = 108, | |||||
.audio_min_samples = { 1896, 1742, 1264 }, /* for 48, 44.1 and 32Khz */ | |||||
.audio_samples_dist = { 1920, 1920, 1920, 1920, 1920 }, | |||||
.audio_shuffle = dv_audio_shuffle625, | |||||
}, | |||||
{ .dsf = 1, | |||||
.frame_size = 144000, /* SMPTE-314M - 625/50 (PAL) */ | |||||
.difseg_size = 12, | |||||
.frame_rate = 25, | |||||
.frame_rate_base = 1, | |||||
.ltc_divisor = 25, | |||||
.height = 576, | |||||
.width = 720, | |||||
.video_place = dv_place_411P, | |||||
.pix_fmt = PIX_FMT_YUV411P, | |||||
.audio_stride = 108, | |||||
.audio_min_samples = { 1896, 1742, 1264 }, /* for 48, 44.1 and 32Khz */ | |||||
.audio_samples_dist = { 1920, 1920, 1920, 1920, 1920 }, | |||||
.audio_shuffle = dv_audio_shuffle625, | |||||
} | |||||
}; | }; | ||||
static inline const DVprofile* dv_frame_profile(uint8_t* frame) | |||||
{ | |||||
if ((frame[3] & 0x80) == 0) { /* DSF flag */ | |||||
return &dv_profiles[0]; | |||||
} | |||||
else if ((frame[5] & 0x07) == 0) { /* APT flag */ | |||||
return &dv_profiles[1]; | |||||
} | |||||
else | |||||
return &dv_profiles[2]; | |||||
} | |||||
static inline const DVprofile* dv_codec_profile(AVCodecContext* codec) | |||||
{ | |||||
if (codec->width != 720) { | |||||
return NULL; | |||||
} | |||||
else if (codec->height == 480) { | |||||
return &dv_profiles[0]; | |||||
} | |||||
else | |||||
return &dv_profiles[1]; | |||||
} |
@@ -13,7 +13,7 @@ PPOBJS= | |||||
# mux and demuxes | # mux and demuxes | ||||
OBJS+=mpeg.o mpegts.o mpegtsenc.o ffm.o crc.o img.o raw.o rm.o \ | OBJS+=mpeg.o mpegts.o mpegtsenc.o ffm.o crc.o img.o raw.o rm.o \ | ||||
avienc.o avidec.o wav.o swf.o au.o gif.o mov.o mpjpeg.o dvcore.o dv.o \ | |||||
avienc.o avidec.o wav.o swf.o au.o gif.o mov.o mpjpeg.o dv.o \ | |||||
yuv4mpeg.o 4xm.o flvenc.o flvdec.o movenc.o psxstr.o idroq.o ipmovie.o \ | yuv4mpeg.o 4xm.o flvenc.o flvdec.o movenc.o psxstr.o idroq.o ipmovie.o \ | ||||
nut.o wc3movie.o mp3.o | nut.o wc3movie.o mp3.o | ||||
@@ -18,21 +18,10 @@ | |||||
*/ | */ | ||||
#include "avformat.h" | #include "avformat.h" | ||||
#include "avi.h" | #include "avi.h" | ||||
#include "dv.h" | |||||
//#define DEBUG | //#define DEBUG | ||||
static const struct AVI1Handler { | |||||
enum CodecID vcid; | |||||
enum CodecID acid; | |||||
uint32_t tag; | |||||
} AVI1Handlers[] = { | |||||
{ CODEC_ID_DVVIDEO, CODEC_ID_DVAUDIO, MKTAG('d', 'v', 's', 'd') }, | |||||
{ CODEC_ID_DVVIDEO, CODEC_ID_DVAUDIO, MKTAG('d', 'v', 'h', 'd') }, | |||||
{ CODEC_ID_DVVIDEO, CODEC_ID_DVAUDIO, MKTAG('d', 'v', 's', 'l') }, | |||||
/* This is supposed to be the last one */ | |||||
{ CODEC_ID_NONE, CODEC_ID_NONE, 0 }, | |||||
}; | |||||
typedef struct AVIIndex { | typedef struct AVIIndex { | ||||
unsigned char tag[4]; | unsigned char tag[4]; | ||||
unsigned int flags, pos, len; | unsigned int flags, pos, len; | ||||
@@ -40,14 +29,11 @@ typedef struct AVIIndex { | |||||
} AVIIndex; | } AVIIndex; | ||||
typedef struct { | typedef struct { | ||||
int64_t riff_end; | |||||
int64_t movi_end; | |||||
int type; | |||||
uint8_t *buf; | |||||
int buf_size; | |||||
int stream_index; | |||||
int64_t riff_end; | |||||
int64_t movi_end; | |||||
offset_t movi_list; | offset_t movi_list; | ||||
AVIIndex *first, *last; | AVIIndex *first, *last; | ||||
void* dv_demux; | |||||
} AVIContext; | } AVIContext; | ||||
#ifdef DEBUG | #ifdef DEBUG | ||||
@@ -97,11 +83,6 @@ static int avi_read_header(AVFormatContext *s, AVFormatParameters *ap) | |||||
stream_index = -1; | stream_index = -1; | ||||
codec_type = -1; | codec_type = -1; | ||||
frame_period = 0; | frame_period = 0; | ||||
avi->type = 2; | |||||
avi->buf = av_malloc(1); | |||||
if (!avi->buf) | |||||
return -1; | |||||
avi->buf_size = 1; | |||||
for(;;) { | for(;;) { | ||||
if (url_feof(pb)) | if (url_feof(pb)) | ||||
goto fail; | goto fail; | ||||
@@ -134,7 +115,7 @@ static int avi_read_header(AVFormatContext *s, AVFormatParameters *ap) | |||||
url_fskip(pb, 4 * 4); | url_fskip(pb, 4 * 4); | ||||
n = get_le32(pb); | n = get_le32(pb); | ||||
for(i=0;i<n;i++) { | for(i=0;i<n;i++) { | ||||
st = av_new_stream(s, 0); | |||||
st = av_new_stream(s, i); | |||||
if (!st) | if (!st) | ||||
goto fail; | goto fail; | ||||
} | } | ||||
@@ -144,24 +125,36 @@ static int avi_read_header(AVFormatContext *s, AVFormatParameters *ap) | |||||
/* stream header */ | /* stream header */ | ||||
stream_index++; | stream_index++; | ||||
tag1 = get_le32(pb); | tag1 = get_le32(pb); | ||||
handler = get_le32(pb); /* codec tag */ | |||||
switch(tag1) { | switch(tag1) { | ||||
case MKTAG('i', 'a', 'v', 's'): | case MKTAG('i', 'a', 'v', 's'): | ||||
case MKTAG('i', 'v', 'a', 's'): | case MKTAG('i', 'v', 'a', 's'): | ||||
/* | |||||
* After some consideration -- I don't think we | |||||
* have to support anything but DV in a type1 AVIs. | |||||
*/ | |||||
if (s->nb_streams != 1) | if (s->nb_streams != 1) | ||||
goto fail; | goto fail; | ||||
avi->type = 1; | |||||
avi->stream_index = 0; | |||||
if (handler != MKTAG('d', 'v', 's', 'd') && | |||||
handler != MKTAG('d', 'v', 'h', 'd') && | |||||
handler != MKTAG('d', 'v', 's', 'l')) | |||||
goto fail; | |||||
avi->dv_demux = dv_init_demux(s, stream_index, stream_index + 1); | |||||
if (!avi->dv_demux) | |||||
goto fail; | |||||
stream_index++; | |||||
case MKTAG('v', 'i', 'd', 's'): | case MKTAG('v', 'i', 'd', 's'): | ||||
codec_type = CODEC_TYPE_VIDEO; | codec_type = CODEC_TYPE_VIDEO; | ||||
if (stream_index >= s->nb_streams) { | if (stream_index >= s->nb_streams) { | ||||
url_fskip(pb, size - 4); | |||||
url_fskip(pb, size - 8); | |||||
break; | break; | ||||
} | } | ||||
st = s->streams[stream_index]; | st = s->streams[stream_index]; | ||||
handler = get_le32(pb); /* codec tag */ | |||||
get_le32(pb); /* flags */ | get_le32(pb); /* flags */ | ||||
get_le16(pb); /* priority */ | get_le16(pb); /* priority */ | ||||
get_le16(pb); /* language */ | get_le16(pb); /* language */ | ||||
@@ -186,29 +179,6 @@ static int avi_read_header(AVFormatContext *s, AVFormatParameters *ap) | |||||
st->codec.frame_rate_base * AV_TIME_BASE / | st->codec.frame_rate_base * AV_TIME_BASE / | ||||
st->codec.frame_rate; | st->codec.frame_rate; | ||||
if (avi->type == 1) { | |||||
AVStream *st; | |||||
st = av_new_stream(s, 0); | |||||
if (!st) | |||||
goto fail; | |||||
stream_index++; | |||||
for (i=0; AVI1Handlers[i].tag != 0; ++i) | |||||
if (AVI1Handlers[i].tag == handler) | |||||
break; | |||||
if (AVI1Handlers[i].tag != 0) { | |||||
s->streams[0]->codec.codec_type = CODEC_TYPE_VIDEO; | |||||
s->streams[0]->codec.codec_id = AVI1Handlers[i].vcid; | |||||
s->streams[1]->codec.codec_type = CODEC_TYPE_AUDIO; | |||||
s->streams[1]->codec.codec_id = AVI1Handlers[i].acid; | |||||
} else { | |||||
goto fail; | |||||
} | |||||
} | |||||
url_fskip(pb, size - 9 * 4); | url_fskip(pb, size - 9 * 4); | ||||
break; | break; | ||||
case MKTAG('a', 'u', 'd', 's'): | case MKTAG('a', 'u', 'd', 's'): | ||||
@@ -218,12 +188,11 @@ static int avi_read_header(AVFormatContext *s, AVFormatParameters *ap) | |||||
codec_type = CODEC_TYPE_AUDIO; | codec_type = CODEC_TYPE_AUDIO; | ||||
if (stream_index >= s->nb_streams) { | if (stream_index >= s->nb_streams) { | ||||
url_fskip(pb, size - 4); | |||||
url_fskip(pb, size - 8); | |||||
break; | break; | ||||
} | } | ||||
st = s->streams[stream_index]; | st = s->streams[stream_index]; | ||||
get_le32(pb); /* tag */ | |||||
get_le32(pb); /* flags */ | get_le32(pb); /* flags */ | ||||
get_le16(pb); /* priority */ | get_le16(pb); /* priority */ | ||||
get_le16(pb); /* language */ | get_le16(pb); /* language */ | ||||
@@ -244,7 +213,7 @@ static int avi_read_header(AVFormatContext *s, AVFormatParameters *ap) | |||||
break; | break; | ||||
case MKTAG('s', 't', 'r', 'f'): | case MKTAG('s', 't', 'r', 'f'): | ||||
/* stream header */ | /* stream header */ | ||||
if (stream_index >= s->nb_streams || avi->type == 1) { | |||||
if (stream_index >= s->nb_streams || avi->dv_demux) { | |||||
url_fskip(pb, size); | url_fskip(pb, size); | ||||
} else { | } else { | ||||
st = s->streams[stream_index]; | st = s->streams[stream_index]; | ||||
@@ -305,7 +274,6 @@ static int avi_read_header(AVFormatContext *s, AVFormatParameters *ap) | |||||
/* check stream number */ | /* check stream number */ | ||||
if (stream_index != s->nb_streams - 1) { | if (stream_index != s->nb_streams - 1) { | ||||
fail: | fail: | ||||
av_free(avi->buf); | |||||
for(i=0;i<s->nb_streams;i++) { | for(i=0;i<s->nb_streams;i++) { | ||||
av_freep(&s->streams[i]->codec.extradata); | av_freep(&s->streams[i]->codec.extradata); | ||||
av_freep(&s->streams[i]); | av_freep(&s->streams[i]); | ||||
@@ -316,31 +284,21 @@ static int avi_read_header(AVFormatContext *s, AVFormatParameters *ap) | |||||
return 0; | return 0; | ||||
} | } | ||||
static void __destruct_pkt(struct AVPacket *pkt) | |||||
{ | |||||
pkt->data = NULL; pkt->size = 0; | |||||
return; | |||||
} | |||||
static int avi_read_packet(AVFormatContext *s, AVPacket *pkt) | static int avi_read_packet(AVFormatContext *s, AVPacket *pkt) | ||||
{ | { | ||||
AVIContext *avi = s->priv_data; | AVIContext *avi = s->priv_data; | ||||
ByteIOContext *pb = &s->pb; | ByteIOContext *pb = &s->pb; | ||||
int n, d[8], size, i; | int n, d[8], size, i; | ||||
void* dstr; | |||||
memset(d, -1, sizeof(int)*8); | memset(d, -1, sizeof(int)*8); | ||||
if (avi->type == 1 && avi->stream_index) { | |||||
/* duplicate DV packet */ | |||||
av_init_packet(pkt); | |||||
pkt->data = avi->buf; | |||||
pkt->size = avi->buf_size; | |||||
pkt->destruct = __destruct_pkt; | |||||
pkt->stream_index = avi->stream_index; | |||||
avi->stream_index = !avi->stream_index; | |||||
return 0; | |||||
if (avi->dv_demux) { | |||||
size = dv_get_packet(avi->dv_demux, pkt); | |||||
if (size >= 0) | |||||
return size; | |||||
} | } | ||||
for(i=url_ftell(pb); !url_feof(pb); i++) { | for(i=url_ftell(pb); !url_feof(pb); i++) { | ||||
int j; | int j; | ||||
@@ -387,26 +345,24 @@ static int avi_read_packet(AVFormatContext *s, AVPacket *pkt) | |||||
&& n < s->nb_streams | && n < s->nb_streams | ||||
&& i + size <= avi->movi_end) { | && i + size <= avi->movi_end) { | ||||
if (avi->type == 1) { | |||||
uint8_t *tbuf = av_realloc(avi->buf, size + FF_INPUT_BUFFER_PADDING_SIZE); | |||||
if (!tbuf) | |||||
return -1; | |||||
avi->buf = tbuf; | |||||
avi->buf_size = size; | |||||
av_init_packet(pkt); | |||||
pkt->data = avi->buf; | |||||
pkt->size = avi->buf_size; | |||||
pkt->destruct = __destruct_pkt; | |||||
avi->stream_index = n; | |||||
} else { | |||||
av_new_packet(pkt, size); | |||||
} | |||||
av_new_packet(pkt, size); | |||||
get_buffer(pb, pkt->data, size); | get_buffer(pb, pkt->data, size); | ||||
if (size & 1) | |||||
if (size & 1) { | |||||
get_byte(pb); | get_byte(pb); | ||||
pkt->stream_index = n; | |||||
pkt->flags |= PKT_FLAG_KEY; // FIXME: We really should read index for that | |||||
return 0; | |||||
size++; | |||||
} | |||||
if (avi->dv_demux) { | |||||
dstr = pkt->destruct; | |||||
size = dv_produce_packet(avi->dv_demux, pkt, | |||||
pkt->data, pkt->size); | |||||
pkt->destruct = dstr; | |||||
} else { | |||||
pkt->stream_index = n; | |||||
pkt->flags |= PKT_FLAG_KEY; // FIXME: We really should read | |||||
// index for that | |||||
} | |||||
return size; | |||||
} | } | ||||
} | } | ||||
return -1; | return -1; | ||||
@@ -416,7 +372,6 @@ static int avi_read_close(AVFormatContext *s) | |||||
{ | { | ||||
int i; | int i; | ||||
AVIContext *avi = s->priv_data; | AVIContext *avi = s->priv_data; | ||||
av_free(avi->buf); | |||||
for(i=0;i<s->nb_streams;i++) { | for(i=0;i<s->nb_streams;i++) { | ||||
AVStream *st = s->streams[i]; | AVStream *st = s->streams[i]; | ||||
@@ -424,6 +379,9 @@ static int avi_read_close(AVFormatContext *s) | |||||
av_free(st->codec.extradata); | av_free(st->codec.extradata); | ||||
} | } | ||||
if (avi->dv_demux) | |||||
av_free(avi->dv_demux); | |||||
return 0; | return 0; | ||||
} | } | ||||
@@ -1,4 +1,10 @@ | |||||
/* | /* | ||||
* General DV muxer/demuxer | |||||
* Copyright (c) 2003 Roman Shaposhnick | |||||
* | |||||
* Many thanks to Dan Dennedy <dan@dennedy.org> for providing wealth | |||||
* of DV technical info. | |||||
* | |||||
* Raw DV format | * Raw DV format | ||||
* Copyright (c) 2002 Fabrice Bellard. | * Copyright (c) 2002 Fabrice Bellard. | ||||
* | * | ||||
@@ -16,38 +22,696 @@ | |||||
* License along with this library; if not, write to the Free Software | * License along with this library; if not, write to the Free Software | ||||
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||||
*/ | */ | ||||
#include <time.h> | |||||
#include "avformat.h" | #include "avformat.h" | ||||
#include "dvcore.h" | |||||
#include "dvdata.h" | |||||
typedef struct DVDemuxContext { | typedef struct DVDemuxContext { | ||||
int is_audio; | |||||
uint8_t buf[144000]; | |||||
int size; | |||||
AVPacket audio_pkt; | |||||
AVStream *vst; | |||||
AVStream *ast; | |||||
} DVDemuxContext; | } DVDemuxContext; | ||||
/* raw input */ | |||||
static int dv_read_header(AVFormatContext *s, | |||||
AVFormatParameters *ap) | |||||
typedef struct DVMuxContext { | |||||
const DVprofile* sys; /* Current DV profile. E.g.: 525/60, 625/50 */ | |||||
uint8_t frame_buf[144000]; /* frame under contruction */ | |||||
FifoBuffer audio_data; /* Fifo for storing excessive amounts of PCM */ | |||||
int frames; /* Number of a current frame */ | |||||
time_t start_time; /* Start time of recording */ | |||||
uint8_t aspect; /* Aspect ID 0 - 4:3, 7 - 16:9 */ | |||||
int has_audio; /* frame under contruction has audio */ | |||||
int has_video; /* frame under contruction has video */ | |||||
} DVMuxContext; | |||||
enum dv_section_type { | |||||
dv_sect_header = 0x1f, | |||||
dv_sect_subcode = 0x3f, | |||||
dv_sect_vaux = 0x56, | |||||
dv_sect_audio = 0x76, | |||||
dv_sect_video = 0x96, | |||||
}; | |||||
enum dv_pack_type { | |||||
dv_header525 = 0x3f, /* see dv_write_pack for important details on */ | |||||
dv_header625 = 0xbf, /* these two packs */ | |||||
dv_timecode = 0x13, | |||||
dv_audio_source = 0x50, | |||||
dv_audio_control = 0x51, | |||||
dv_audio_recdate = 0x52, | |||||
dv_audio_rectime = 0x53, | |||||
dv_video_source = 0x60, | |||||
dv_video_control = 0x61, | |||||
dv_viedo_recdate = 0x62, | |||||
dv_video_rectime = 0x63, | |||||
dv_unknown_pack = 0xff, | |||||
}; | |||||
/* | |||||
* The reason why the following three big ugly looking tables are | |||||
* here is my lack of DV spec IEC 61834. The tables were basically | |||||
* constructed to make code that places packs in SSYB, VAUX and | |||||
* AAUX blocks very simple and table-driven. They conform to the | |||||
* SMPTE 314M and the output of my personal DV camcorder, neither | |||||
* of which is sufficient for a reliable DV stream producing. Thus | |||||
* while code is still in development I'll be gathering input from | |||||
* people with different DV equipment and modifying the tables to | |||||
* accommodate all the quirks. Later on, if possible, some of them | |||||
* will be folded into smaller tables and/or switch-if logic. For | |||||
* now, my only excuse is -- they don't eat up that much of a space. | |||||
*/ | |||||
static const int dv_ssyb_packs_dist[12][6] = { | |||||
{ 0x13, 0x13, 0x13, 0x13, 0x13, 0x13 }, | |||||
{ 0x13, 0x13, 0x13, 0x13, 0x13, 0x13 }, | |||||
{ 0x13, 0x13, 0x13, 0x13, 0x13, 0x13 }, | |||||
{ 0x13, 0x13, 0x13, 0x13, 0x13, 0x13 }, | |||||
{ 0x13, 0x13, 0x13, 0x13, 0x13, 0x13 }, | |||||
{ 0x13, 0x13, 0x13, 0x13, 0x13, 0x13 }, | |||||
{ 0x13, 0x62, 0x63, 0x13, 0x62, 0x63 }, | |||||
{ 0x13, 0x62, 0x63, 0x13, 0x62, 0x63 }, | |||||
{ 0x13, 0x62, 0x63, 0x13, 0x62, 0x63 }, | |||||
{ 0x13, 0x62, 0x63, 0x13, 0x62, 0x63 }, | |||||
{ 0x13, 0x62, 0x63, 0x13, 0x62, 0x63 }, | |||||
{ 0x13, 0x62, 0x63, 0x13, 0x62, 0x63 }, | |||||
}; | |||||
static const int dv_vaux_packs_dist[12][15] = { | |||||
{ 0x60, 0x61, 0x62, 0x63, 0xff, 0xff, 0xff, 0xff, 0xff, | |||||
0x60, 0x61, 0x62, 0x63, 0xff, 0xff }, | |||||
{ 0x60, 0x61, 0x62, 0x63, 0xff, 0xff, 0xff, 0xff, 0xff, | |||||
0x60, 0x61, 0x62, 0x63, 0xff, 0xff }, | |||||
{ 0x60, 0x61, 0x62, 0x63, 0xff, 0xff, 0xff, 0xff, 0xff, | |||||
0x60, 0x61, 0x62, 0x63, 0xff, 0xff }, | |||||
{ 0x60, 0x61, 0x62, 0x63, 0xff, 0xff, 0xff, 0xff, 0xff, | |||||
0x60, 0x61, 0x62, 0x63, 0xff, 0xff }, | |||||
{ 0x60, 0x61, 0x62, 0x63, 0xff, 0xff, 0xff, 0xff, 0xff, | |||||
0x60, 0x61, 0x62, 0x63, 0xff, 0xff }, | |||||
{ 0x60, 0x61, 0x62, 0x63, 0xff, 0xff, 0xff, 0xff, 0xff, | |||||
0x60, 0x61, 0x62, 0x63, 0xff, 0xff }, | |||||
{ 0x60, 0x61, 0x62, 0x63, 0xff, 0xff, 0xff, 0xff, 0xff, | |||||
0x60, 0x61, 0x62, 0x63, 0xff, 0xff }, | |||||
{ 0x60, 0x61, 0x62, 0x63, 0xff, 0xff, 0xff, 0xff, 0xff, | |||||
0x60, 0x61, 0x62, 0x63, 0xff, 0xff }, | |||||
{ 0x60, 0x61, 0x62, 0x63, 0xff, 0xff, 0xff, 0xff, 0xff, | |||||
0x60, 0x61, 0x62, 0x63, 0xff, 0xff }, | |||||
{ 0x60, 0x61, 0x62, 0x63, 0xff, 0xff, 0xff, 0xff, 0xff, | |||||
0x60, 0x61, 0x62, 0x63, 0xff, 0xff }, | |||||
{ 0x60, 0x61, 0x62, 0x63, 0xff, 0xff, 0xff, 0xff, 0xff, | |||||
0x60, 0x61, 0x62, 0x63, 0xff, 0xff }, | |||||
{ 0x60, 0x61, 0x62, 0x63, 0xff, 0xff, 0xff, 0xff, 0xff, | |||||
0x60, 0x61, 0x62, 0x63, 0xff, 0xff }, | |||||
}; | |||||
static const int dv_aaux_packs_dist[12][9] = { | |||||
{ 0xff, 0xff, 0xff, 0x50, 0x51, 0x52, 0x53, 0xff, 0xff }, | |||||
{ 0x50, 0x51, 0x52, 0x53, 0xff, 0xff, 0xff, 0xff, 0xff }, | |||||
{ 0xff, 0xff, 0xff, 0x50, 0x51, 0x52, 0x53, 0xff, 0xff }, | |||||
{ 0x50, 0x51, 0x52, 0x53, 0xff, 0xff, 0xff, 0xff, 0xff }, | |||||
{ 0xff, 0xff, 0xff, 0x50, 0x51, 0x52, 0x53, 0xff, 0xff }, | |||||
{ 0x50, 0x51, 0x52, 0x53, 0xff, 0xff, 0xff, 0xff, 0xff }, | |||||
{ 0xff, 0xff, 0xff, 0x50, 0x51, 0x52, 0x53, 0xff, 0xff }, | |||||
{ 0x50, 0x51, 0x52, 0x53, 0xff, 0xff, 0xff, 0xff, 0xff }, | |||||
{ 0xff, 0xff, 0xff, 0x50, 0x51, 0x52, 0x53, 0xff, 0xff }, | |||||
{ 0x50, 0x51, 0x52, 0x53, 0xff, 0xff, 0xff, 0xff, 0xff }, | |||||
{ 0xff, 0xff, 0xff, 0x50, 0x51, 0x52, 0x53, 0xff, 0xff }, | |||||
{ 0x50, 0x51, 0x52, 0x53, 0xff, 0xff, 0xff, 0xff, 0xff }, | |||||
}; | |||||
static inline uint16_t dv_audio_12to16(uint16_t sample) | |||||
{ | |||||
uint16_t shift, result; | |||||
sample = (sample < 0x800) ? sample : sample | 0xf000; | |||||
shift = (sample & 0xf00) >> 8; | |||||
if (shift < 0x2 || shift > 0xd) { | |||||
result = sample; | |||||
} else if (shift < 0x8) { | |||||
shift--; | |||||
result = (sample - (256 * shift)) << shift; | |||||
} else { | |||||
shift = 0xe - shift; | |||||
result = ((sample + ((256 * shift) + 1)) << shift) - 1; | |||||
} | |||||
return result; | |||||
} | |||||
static int dv_audio_frame_size(const DVprofile* sys, int frame) | |||||
{ | { | ||||
AVStream *vst, *ast; | |||||
DVDemuxContext *c = s->priv_data; | |||||
return sys->audio_samples_dist[frame % (sizeof(sys->audio_samples_dist)/ | |||||
sizeof(sys->audio_samples_dist[0]))]; | |||||
} | |||||
vst = av_new_stream(s, 0); | |||||
if (!vst) | |||||
return AVERROR_NOMEM; | |||||
vst->codec.codec_type = CODEC_TYPE_VIDEO; | |||||
vst->codec.codec_id = CODEC_ID_DVVIDEO; | |||||
vst->codec.bit_rate = 25000000; | |||||
static int dv_write_pack(enum dv_pack_type pack_id, DVMuxContext *c, uint8_t* buf) | |||||
{ | |||||
struct tm tc; | |||||
time_t ct; | |||||
int ltc_frame; | |||||
ast = av_new_stream(s, 1); | |||||
if (!ast) | |||||
return AVERROR_NOMEM; | |||||
buf[0] = (uint8_t)pack_id; | |||||
switch (pack_id) { | |||||
case dv_header525: /* I can't imagine why these two weren't defined as real */ | |||||
case dv_header625: /* packs in SMPTE314M -- they definitely look like ones */ | |||||
buf[1] = 0xf8 | /* reserved -- always 1 */ | |||||
(0 & 0x07); /* APT: Track application ID */ | |||||
buf[2] = (0 << 7) | /* TF1: audio data is 0 - valid; 1 - invalid */ | |||||
(0x0f << 3) | /* reserved -- always 1 */ | |||||
(0 & 0x07); /* AP1: Audio application ID */ | |||||
buf[3] = (0 << 7) | /* TF2: video data is 0 - valid; 1 - invalid */ | |||||
(0x0f << 3) | /* reserved -- always 1 */ | |||||
(0 & 0x07); /* AP2: Video application ID */ | |||||
buf[4] = (0 << 7) | /* TF3: subcode(SSYB) is 0 - valid; 1 - invalid */ | |||||
(0x0f << 3) | /* reserved -- always 1 */ | |||||
(0 & 0x07); /* AP3: Subcode application ID */ | |||||
break; | |||||
case dv_timecode: | |||||
ct = (time_t)(c->frames / ((float)c->sys->frame_rate / | |||||
(float)c->sys->frame_rate_base)); | |||||
localtime_r(&ct, &tc); | |||||
/* | |||||
* LTC drop-frame frame counter drops two frames (0 and 1) every | |||||
* minute, unless it is exactly divisible by 10 | |||||
*/ | |||||
ltc_frame = (c->frames + 2*ct/60 - 2*ct/600) % c->sys->ltc_divisor; | |||||
buf[1] = (0 << 7) | /* Color fame: 0 - unsync; 1 - sync mode */ | |||||
(1 << 6) | /* Drop frame timecode: 0 - nondrop; 1 - drop */ | |||||
((ltc_frame / 10) << 4) | /* Tens of frames */ | |||||
(ltc_frame % 10); /* Units of frames */ | |||||
buf[2] = (1 << 7) | /* Biphase mark polarity correction: 0 - even; 1 - odd */ | |||||
((tc.tm_sec / 10) << 4) | /* Tens of seconds */ | |||||
(tc.tm_sec % 10); /* Units of seconds */ | |||||
buf[3] = (1 << 7) | /* Binary group flag BGF0 */ | |||||
((tc.tm_min / 10) << 4) | /* Tens of minutes */ | |||||
(tc.tm_min % 10); /* Units of minutes */ | |||||
buf[4] = (1 << 7) | /* Binary group flag BGF2 */ | |||||
(1 << 6) | /* Binary group flag BGF1 */ | |||||
((tc.tm_hour / 10) << 4) | /* Tens of hours */ | |||||
(tc.tm_hour % 10); /* Units of hours */ | |||||
break; | |||||
case dv_audio_source: /* AAUX source pack */ | |||||
buf[1] = (0 << 7) | /* locked mode */ | |||||
(1 << 6) | /* reserved -- always 1 */ | |||||
(dv_audio_frame_size(c->sys, c->frames) - | |||||
c->sys->audio_min_samples[0]); | |||||
/* # of samples */ | |||||
buf[2] = (0 << 7) | /* multi-stereo */ | |||||
(0 << 5) | /* #of audio channels per block: 0 -- 1 channel */ | |||||
(0 << 4) | /* pair bit: 0 -- one pair of channels */ | |||||
0; /* audio mode */ | |||||
buf[3] = (1 << 7) | /* res */ | |||||
(1 << 6) | /* multi-language flag */ | |||||
(c->sys->dsf << 5) | /* system: 60fields/50fields */ | |||||
0; /* definition: 0 -- SD (525/625) */ | |||||
buf[4] = (1 << 7) | /* emphasis: 1 -- off */ | |||||
(0 << 6) | /* emphasis time constant: 0 -- reserved */ | |||||
(0 << 3) | /* frequency: 0 -- 48Khz, 1 -- 44,1Khz, 2 -- 32Khz */ | |||||
0; /* quantization: 0 -- 16bit linear, 1 -- 12bit nonlinear */ | |||||
break; | |||||
case dv_audio_control: | |||||
buf[1] = (0 << 6) | /* copy protection: 0 -- unrestricted */ | |||||
(1 << 4) | /* input source: 1 -- digital input */ | |||||
(3 << 2) | /* compression: 3 -- no information */ | |||||
0; /* misc. info/SMPTE emphasis off */ | |||||
buf[2] = (1 << 7) | /* recording start point: 1 -- no */ | |||||
(1 << 6) | /* recording end point: 1 -- no */ | |||||
(1 << 3) | /* recording mode: 1 -- original */ | |||||
7; | |||||
buf[3] = (1 << 7) | /* direction: 1 -- forward */ | |||||
0x20; /* speed */ | |||||
buf[4] = (1 << 7) | /* reserved -- always 1 */ | |||||
0x7f; /* genre category */ | |||||
break; | |||||
case dv_audio_recdate: | |||||
case dv_viedo_recdate: /* VAUX recording date */ | |||||
ct = c->start_time + (time_t)(c->frames / | |||||
((float)c->sys->frame_rate / (float)c->sys->frame_rate_base)); | |||||
localtime_r(&ct, &tc); | |||||
buf[1] = 0xff; /* ds, tm, tens of time zone, units of time zone */ | |||||
/* 0xff is very likely to be "unknown" */ | |||||
buf[2] = (3 << 6) | /* reserved -- always 1 */ | |||||
((tc.tm_mday / 10) << 4) | /* Tens of day */ | |||||
(tc.tm_mday % 10); /* Units of day */ | |||||
buf[3] = /* we set high 4 bits to 0, shouldn't we set them to week? */ | |||||
(tc.tm_mon % 10); /* Units of month */ | |||||
buf[4] = (((tc.tm_year % 100) / 10) << 4) | /* Tens of year */ | |||||
(tc.tm_year % 10); /* Units of year */ | |||||
break; | |||||
case dv_audio_rectime: /* AAUX recording time */ | |||||
case dv_video_rectime: /* VAUX recording time */ | |||||
ct = c->start_time + (time_t)(c->frames / | |||||
((float)c->sys->frame_rate / (float)c->sys->frame_rate_base)); | |||||
localtime_r(&ct, &tc); | |||||
buf[1] = (3 << 6) | /* reserved -- always 1 */ | |||||
0x3f; /* tens of frame, units of frame: 0x3f - "unknown" ? */ | |||||
buf[2] = (1 << 7) | /* reserved -- always 1 */ | |||||
((tc.tm_sec / 10) << 4) | /* Tens of seconds */ | |||||
(tc.tm_sec % 10); /* Units of seconds */ | |||||
buf[3] = (1 << 7) | /* reserved -- always 1 */ | |||||
((tc.tm_min / 10) << 4) | /* Tens of minutes */ | |||||
(tc.tm_min % 10); /* Units of minutes */ | |||||
buf[4] = (3 << 6) | /* reserved -- always 1 */ | |||||
((tc.tm_hour / 10) << 4) | /* Tens of hours */ | |||||
(tc.tm_hour % 10); /* Units of hours */ | |||||
break; | |||||
case dv_video_source: | |||||
buf[1] = 0xff; /* reserved -- always 1 */ | |||||
buf[2] = (1 << 7) | /* B/W: 0 - b/w, 1 - color */ | |||||
(1 << 6) | /* following CLF is valid - 0, invalid - 1 */ | |||||
(3 << 4) | /* CLF: color frames id (see ITU-R BT.470-4) */ | |||||
0xf; /* reserved -- always 1 */ | |||||
buf[3] = (3 << 6) | /* reserved -- always 1 */ | |||||
(c->sys->dsf << 5) | /* system: 60fields/50fields */ | |||||
0; /* signal type video compression */ | |||||
buf[4] = 0xff; /* VISC: 0xff -- no information */ | |||||
break; | |||||
case dv_video_control: | |||||
buf[1] = (0 << 6) | /* Copy generation management (CGMS) 0 -- free */ | |||||
0x3f; /* reserved -- always 1 */ | |||||
buf[2] = 0xc8 | /* reserved -- always b11001xxx */ | |||||
c->aspect; | |||||
buf[3] = (1 << 7) | /* Frame/field flag 1 -- frame, 0 -- field */ | |||||
(1 << 6) | /* First/second field flag 0 -- field 2, 1 -- field 1 */ | |||||
(1 << 5) | /* Frame change flag 0 -- same picture as before, 1 -- different */ | |||||
(1 << 4) | /* 1 - interlaced, 0 - noninterlaced */ | |||||
0xc; /* reserved -- always b1100 */ | |||||
buf[4] = 0xff; /* reserved -- always 1 */ | |||||
break; | |||||
default: | |||||
buf[1] = buf[2] = buf[3] = buf[4] = 0xff; | |||||
} | |||||
return 5; | |||||
} | |||||
ast->codec.codec_type = CODEC_TYPE_AUDIO; | |||||
ast->codec.codec_id = CODEC_ID_DVAUDIO; | |||||
c->is_audio = 0; | |||||
static inline int dv_write_dif_id(enum dv_section_type t, uint8_t seq_num, | |||||
uint8_t dif_num, uint8_t* buf) | |||||
{ | |||||
buf[0] = (uint8_t)t; /* Section type */ | |||||
buf[1] = (seq_num<<4) | /* DIF seq number 0-9 for 525/60; 0-11 for 625/50 */ | |||||
(0 << 3) | /* FSC: for 50Mb/s 0 - first channel; 1 - second */ | |||||
7; /* reserved -- always 1 */ | |||||
buf[2] = dif_num; /* DIF block number Video: 0-134, Audio: 0-8 */ | |||||
return 3; | |||||
} | |||||
return 0; | |||||
static inline int dv_write_ssyb_id(uint8_t syb_num, uint8_t fr, uint8_t* buf) | |||||
{ | |||||
if (syb_num == 0 || syb_num == 6) { | |||||
buf[0] = (fr<<7) | /* FR ID 1 - first half of each channel; 0 - second */ | |||||
(0<<4) | /* AP3 (Subcode application ID) */ | |||||
0x0f; /* reserved -- always 1 */ | |||||
} | |||||
else if (syb_num == 11) { | |||||
buf[0] = (fr<<7) | /* FR ID 1 - first half of each channel; 0 - second */ | |||||
0x7f; /* reserved -- always 1 */ | |||||
} | |||||
else { | |||||
buf[0] = (fr<<7) | /* FR ID 1 - first half of each channel; 0 - second */ | |||||
(0<<4) | /* APT (Track application ID) */ | |||||
0x0f; /* reserved -- always 1 */ | |||||
} | |||||
buf[1] = 0xf0 | /* reserved -- always 1 */ | |||||
(syb_num & 0x0f); /* SSYB number 0 - 11 */ | |||||
buf[2] = 0xff; /* reserved -- always 1 */ | |||||
return 3; | |||||
} | |||||
static void dv_format_frame(DVMuxContext *c, uint8_t* buf) | |||||
{ | |||||
int i, j, k; | |||||
for (i = 0; i < c->sys->difseg_size; i++) { | |||||
memset(buf, 0xff, 80 * 6); /* First 6 DIF blocks are for control data */ | |||||
/* DV header: 1DIF */ | |||||
buf += dv_write_dif_id(dv_sect_header, i, 0, buf); | |||||
buf += dv_write_pack((c->sys->dsf ? dv_header625 : dv_header525), c, buf); | |||||
buf += 72; /* unused bytes */ | |||||
/* DV subcode: 2DIFs */ | |||||
for (j = 0; j < 2; j++) { | |||||
buf += dv_write_dif_id( dv_sect_subcode, i, j, buf); | |||||
for (k = 0; k < 6; k++) { | |||||
buf += dv_write_ssyb_id(k, (i < c->sys->difseg_size/2), buf); | |||||
buf += dv_write_pack(dv_ssyb_packs_dist[i][k], c, buf); | |||||
} | |||||
buf += 29; /* unused bytes */ | |||||
} | |||||
/* DV VAUX: 3DIFs */ | |||||
for (j = 0; j < 3; j++) { | |||||
buf += dv_write_dif_id(dv_sect_vaux, i, j, buf); | |||||
for (k = 0; k < 15 ; k++) | |||||
buf += dv_write_pack(dv_vaux_packs_dist[i][k], c, buf); | |||||
buf += 2; /* unused bytes */ | |||||
} | |||||
/* DV Audio/Video: 135 Video DIFs + 9 Audio DIFs */ | |||||
for (j = 0; j < 135; j++) { | |||||
if (j%15 == 0) { | |||||
buf += dv_write_dif_id(dv_sect_audio, i, j/15, buf); | |||||
buf += dv_write_pack(dv_aaux_packs_dist[i][j/15], c, buf); | |||||
buf += 72; /* shuffled PCM audio */ | |||||
} | |||||
buf += dv_write_dif_id(dv_sect_video, i, j, buf); | |||||
buf += 77; /* 1 video macro block: 1 bytes control | |||||
4 * 14 bytes Y 8x8 data | |||||
10 bytes Cr 8x8 data | |||||
10 bytes Cb 8x8 data */ | |||||
} | |||||
} | |||||
} | |||||
static void dv_inject_audio(DVMuxContext *c, const uint8_t* pcm, uint8_t* frame_ptr) | |||||
{ | |||||
int i, j, d, of; | |||||
for (i = 0; i < c->sys->difseg_size; i++) { | |||||
frame_ptr += 6 * 80; /* skip DIF segment header */ | |||||
for (j = 0; j < 9; j++) { | |||||
for (d = 8; d < 80; d+=2) { | |||||
of = c->sys->audio_shuffle[i][j] + (d - 8)/2 * c->sys->audio_stride; | |||||
frame_ptr[d] = pcm[of*2+1]; // FIXME: may be we have to admit | |||||
frame_ptr[d+1] = pcm[of*2]; // that DV is a big endian PCM | |||||
} | |||||
frame_ptr += 16 * 80; /* 15 Video DIFs + 1 Audio DIF */ | |||||
} | |||||
} | |||||
} | |||||
static void dv_inject_video(DVMuxContext *c, const uint8_t* video_data, uint8_t* frame_ptr) | |||||
{ | |||||
int i, j; | |||||
int ptr = 0; | |||||
for (i = 0; i < c->sys->difseg_size; i++) { | |||||
ptr += 6 * 80; /* skip DIF segment header */ | |||||
for (j = 0; j < 135; j++) { | |||||
if (j%15 == 0) | |||||
ptr += 80; /* skip Audio DIF */ | |||||
ptr += 3; | |||||
memcpy(frame_ptr + ptr, video_data + ptr, 77); | |||||
ptr += 77; | |||||
} | |||||
} | |||||
} | |||||
/* | |||||
* This is the dumbest implementation of all -- it simply looks at | |||||
* a fixed offset and if pack isn't there -- fails. We might want | |||||
* to have a fallback mechanism for complete search of missing packs. | |||||
*/ | |||||
static const uint8_t* dv_extract_pack(uint8_t* frame, enum dv_pack_type t) | |||||
{ | |||||
int offs; | |||||
switch (t) { | |||||
case dv_audio_source: | |||||
offs = (80*6 + 80*16*3 + 3); | |||||
break; | |||||
case dv_audio_control: | |||||
offs = (80*6 + 80*16*4 + 3); | |||||
break; | |||||
case dv_video_control: | |||||
offs = (80*5 + 48 + 5); | |||||
break; | |||||
default: | |||||
return NULL; | |||||
} | |||||
return (frame[offs] == t ? &frame[offs] : NULL); | |||||
} | |||||
/* | |||||
* There's a couple of assumptions being made here: | |||||
* 1. By default we silence erroneous (0x8000/16bit 0x800/12bit) audio samples. | |||||
* We can pass them upwards when ffmpeg will be ready to deal with them. | |||||
* 2. We don't do software emphasis. | |||||
* 3. Audio is always returned as 16bit linear samples: 12bit nonlinear samples | |||||
* are converted into 16bit linear ones. | |||||
*/ | |||||
static int dv_extract_audio(uint8_t* frame, uint8_t* pcm) | |||||
{ | |||||
int size, i, j, d, of, smpls, freq, quant; | |||||
uint16_t lc, rc; | |||||
const DVprofile* sys; | |||||
const uint8_t* as_pack; | |||||
as_pack = dv_extract_pack(frame, dv_audio_source); | |||||
if (!as_pack) /* No audio ? */ | |||||
return 0; | |||||
sys = dv_frame_profile(frame); | |||||
smpls = as_pack[1] & 0x3f; /* samples in this frame - min. samples */ | |||||
freq = (as_pack[4] >> 3) & 0x07; /* 0 - 48KHz, 1 - 44,1kHz, 2 - 32 kHz */ | |||||
quant = as_pack[4] & 0x07; /* 0 - 16bit linear, 1 - 12bit nonlinear */ | |||||
if (quant > 1) | |||||
return -1; /* Unsupported quantization */ | |||||
size = (sys->audio_min_samples[freq] + smpls) * 4; /* 2ch, 2bytes */ | |||||
/* for each DIF segment */ | |||||
for (i = 0; i < sys->difseg_size; i++) { | |||||
frame += 6 * 80; /* skip DIF segment header */ | |||||
for (j = 0; j < 9; j++) { | |||||
for (d = 8; d < 80; d += 2) { | |||||
if (quant == 0) { /* 16bit quantization */ | |||||
of = sys->audio_shuffle[i][j] + (d - 8)/2 * sys->audio_stride; | |||||
if (of*2 >= size) | |||||
continue; | |||||
pcm[of*2] = frame[d+1]; // FIXME: may be we have to admit | |||||
pcm[of*2+1] = frame[d]; // that DV is a big endian PCM | |||||
if (pcm[of*2+1] == 0x80 && pcm[of*2] == 0x00) | |||||
pcm[of*2+1] = 0; | |||||
} else { /* 12bit quantization */ | |||||
if (i >= sys->difseg_size/2) | |||||
goto out; /* We're not doing 4ch at this time */ | |||||
lc = ((uint16_t)frame[d] << 4) | | |||||
((uint16_t)frame[d+2] >> 4); | |||||
rc = ((uint16_t)frame[d+1] << 4) | | |||||
((uint16_t)frame[d+2] & 0x0f); | |||||
lc = (lc == 0x800 ? 0 : dv_audio_12to16(lc)); | |||||
rc = (rc == 0x800 ? 0 : dv_audio_12to16(rc)); | |||||
of = sys->audio_shuffle[i][j] + (d - 8)/3 * sys->audio_stride; | |||||
if (of*2 >= size) | |||||
continue; | |||||
pcm[of*2] = lc & 0xff; // FIXME: may be we have to admit | |||||
pcm[of*2+1] = lc >> 8; // that DV is a big endian PCM | |||||
of = sys->audio_shuffle[i+sys->difseg_size/2][j] + | |||||
(d - 8)/3 * sys->audio_stride; | |||||
pcm[of*2] = rc & 0xff; // FIXME: may be we have to admit | |||||
pcm[of*2+1] = rc >> 8; // that DV is a big endian PCM | |||||
++d; | |||||
} | |||||
} | |||||
frame += 16 * 80; /* 15 Video DIFs + 1 Audio DIF */ | |||||
} | |||||
} | |||||
out: | |||||
return size; | |||||
} | |||||
static int dv_extract_audio_info(uint8_t* frame, AVCodecContext* avctx) | |||||
{ | |||||
const uint8_t* as_pack; | |||||
const DVprofile* sys; | |||||
int freq, smpls; | |||||
sys = dv_frame_profile(frame); | |||||
as_pack = dv_extract_pack(frame, dv_audio_source); | |||||
if (!as_pack || !sys) /* No audio ? */ | |||||
return -1; | |||||
smpls = as_pack[1] & 0x3f; /* samples in this frame - min. samples */ | |||||
freq = (as_pack[4] >> 3) & 0x07; /* 0 - 48KHz, 1 - 44,1kHz, 2 - 32 kHz */ | |||||
avctx->sample_rate = dv_audio_frequency[freq]; | |||||
avctx->channels = 2; | |||||
avctx->bit_rate = avctx->channels * avctx->sample_rate * 16; | |||||
return (sys->audio_min_samples[freq] + smpls) * 4; /* 2ch, 2bytes */; | |||||
} | |||||
static int dv_extract_video_info(uint8_t* frame, AVCodecContext* avctx) | |||||
{ | |||||
const DVprofile* sys; | |||||
const uint8_t* vsc_pack; | |||||
int apt; | |||||
int size = 0; | |||||
sys = dv_frame_profile(frame); | |||||
if (sys) { | |||||
apt = frame[4] & 0x07; | |||||
avctx->frame_rate = sys->frame_rate; | |||||
avctx->frame_rate_base = sys->frame_rate_base; | |||||
avctx->width = sys->width; | |||||
avctx->height = sys->height; | |||||
avctx->pix_fmt = sys->pix_fmt; | |||||
vsc_pack = dv_extract_pack(frame, dv_video_control); | |||||
if (vsc_pack && (vsc_pack[2] & 0x07) == (apt?0x02:0x07)) | |||||
avctx->aspect_ratio = 16.0 / 9.0; | |||||
else | |||||
avctx->aspect_ratio = 4.0 / 3.0; | |||||
size = sys->frame_size; | |||||
} | |||||
return size; | |||||
} | |||||
/* | |||||
* The following 6 functions constitute our interface to the world | |||||
*/ | |||||
int dv_assemble_frame(DVMuxContext *c, AVStream* st, | |||||
const uint8_t* data, int data_size, uint8_t** frame) | |||||
{ | |||||
uint8_t pcm[8192]; | |||||
int fsize, reqasize; | |||||
*frame = &c->frame_buf[0]; | |||||
if (c->has_audio && c->has_video) { /* must be a stale frame */ | |||||
dv_format_frame(c, *frame); | |||||
c->frames++; | |||||
c->has_audio = c->has_video = 0; | |||||
} | |||||
if (st->codec.codec_type == CODEC_TYPE_VIDEO) { | |||||
/* FIXME: we have to have more sensible approach than this one */ | |||||
if (c->has_video) | |||||
fprintf(stderr, "Can't process DV frame #%d. Insufficient audio data or severe sync problem.\n", c->frames); | |||||
dv_inject_video(c, data, *frame); | |||||
c->has_video = 1; | |||||
data_size = 0; | |||||
} | |||||
reqasize = 4 * dv_audio_frame_size(c->sys, c->frames); | |||||
fsize = fifo_size(&c->audio_data, c->audio_data.rptr); | |||||
if (st->codec.codec_type == CODEC_TYPE_AUDIO || (c->has_video && fsize >= reqasize)) { | |||||
if (fsize + data_size >= reqasize && !c->has_audio) { | |||||
if (fsize >= reqasize) { | |||||
fifo_read(&c->audio_data, &pcm[0], reqasize, &c->audio_data.rptr); | |||||
} else { | |||||
fifo_read(&c->audio_data, &pcm[0], fsize, &c->audio_data.rptr); | |||||
memcpy(&pcm[fsize], &data[0], reqasize - fsize); | |||||
data += reqasize - fsize; | |||||
data_size -= reqasize - fsize; | |||||
} | |||||
dv_inject_audio(c, &pcm[0], *frame); | |||||
c->has_audio = 1; | |||||
} | |||||
/* FIXME: we have to have more sensible approach than this one */ | |||||
if (fifo_size(&c->audio_data, c->audio_data.rptr) + data_size >= 100*AVCODEC_MAX_AUDIO_FRAME_SIZE) | |||||
fprintf(stderr, "Can't process DV frame #%d. Insufficient video data or severe sync problem.\n", c->frames); | |||||
fifo_write(&c->audio_data, (uint8_t *)data, data_size, &c->audio_data.wptr); | |||||
} | |||||
return (c->has_audio && c->has_video) ? c->sys->frame_size : 0; | |||||
} | |||||
DVMuxContext* dv_init_mux(AVFormatContext* s) | |||||
{ | |||||
DVMuxContext *c; | |||||
AVStream *vst; | |||||
AVStream *ast; | |||||
c = av_mallocz(sizeof(DVMuxContext)); | |||||
if (!c) | |||||
return NULL; | |||||
if (s->nb_streams != 2) | |||||
goto bail_out; | |||||
/* We have to sort out where audio and where video stream is */ | |||||
if (s->streams[0]->codec.codec_type == CODEC_TYPE_VIDEO && | |||||
s->streams[1]->codec.codec_type == CODEC_TYPE_AUDIO) { | |||||
vst = s->streams[0]; | |||||
ast = s->streams[1]; | |||||
} | |||||
else if (s->streams[1]->codec.codec_type == CODEC_TYPE_VIDEO && | |||||
s->streams[0]->codec.codec_type == CODEC_TYPE_AUDIO) { | |||||
vst = s->streams[1]; | |||||
ast = s->streams[0]; | |||||
} else | |||||
goto bail_out; | |||||
/* Some checks -- DV format is very picky about its incoming streams */ | |||||
if (vst->codec.codec_id != CODEC_ID_DVVIDEO || | |||||
ast->codec.codec_id != CODEC_ID_PCM_S16LE) | |||||
goto bail_out; | |||||
if (ast->codec.sample_rate != 48000 || | |||||
ast->codec.channels != 2) | |||||
goto bail_out; | |||||
c->sys = dv_codec_profile(&vst->codec); | |||||
if (!c->sys) | |||||
goto bail_out; | |||||
/* Ok, everything seems to be in working order */ | |||||
c->frames = 0; | |||||
c->has_audio = c->has_video = 0; | |||||
c->start_time = time(NULL); | |||||
c->aspect = 0; /* 4:3 is the default */ | |||||
if (vst->codec.aspect_ratio == 16.0 / 9.0) | |||||
c->aspect = 0x07; | |||||
if (fifo_init(&c->audio_data, 100*AVCODEC_MAX_AUDIO_FRAME_SIZE) < 0) | |||||
goto bail_out; | |||||
dv_format_frame(c, &c->frame_buf[0]); | |||||
return c; | |||||
bail_out: | |||||
av_free(c); | |||||
return NULL; | |||||
} | |||||
void dv_delete_mux(DVMuxContext *c) | |||||
{ | |||||
fifo_free(&c->audio_data); | |||||
av_free(c); | |||||
} | |||||
DVDemuxContext* dv_init_demux(AVFormatContext *s, int vid, int aid) | |||||
{ | |||||
DVDemuxContext *c; | |||||
c = av_mallocz(sizeof(DVDemuxContext)); | |||||
if (!c) | |||||
return NULL; | |||||
c->vst = (vid < s->nb_streams) ? s->streams[vid] : av_new_stream(s, vid); | |||||
c->ast = (aid < s->nb_streams) ? s->streams[aid] : av_new_stream(s, aid); | |||||
if (!c->vst || !c->ast) | |||||
goto fail; | |||||
c->vst->codec.codec_type = CODEC_TYPE_VIDEO; | |||||
c->vst->codec.codec_id = CODEC_ID_DVVIDEO; | |||||
c->vst->codec.bit_rate = 25000000; | |||||
c->ast->codec.codec_type = CODEC_TYPE_AUDIO; | |||||
c->ast->codec.codec_id = CODEC_ID_PCM_S16LE; | |||||
c->audio_pkt.size = 0; | |||||
return c; | |||||
fail: | |||||
if (c->vst) | |||||
av_free(c->vst); | |||||
if (c->ast) | |||||
av_free(c->ast); | |||||
av_free(c); | |||||
return NULL; | |||||
} | } | ||||
static void __destruct_pkt(struct AVPacket *pkt) | static void __destruct_pkt(struct AVPacket *pkt) | ||||
@@ -56,43 +720,99 @@ static void __destruct_pkt(struct AVPacket *pkt) | |||||
return; | return; | ||||
} | } | ||||
static int dv_read_packet(AVFormatContext *s, AVPacket *pkt) | |||||
int dv_get_packet(DVDemuxContext *c, AVPacket *pkt) | |||||
{ | { | ||||
int ret; | |||||
DVDemuxContext *c = s->priv_data; | |||||
if (!c->is_audio) { | |||||
ret = get_buffer(&s->pb, c->buf, 4); | |||||
if (ret <= 0) | |||||
return -EIO; | |||||
c->size = dv_frame_profile(&c->buf[0])->frame_size; | |||||
ret = get_buffer(&s->pb, c->buf + 4, c->size - 4); | |||||
if (ret <= 0) | |||||
return -EIO; | |||||
int size = -1; | |||||
if (c->audio_pkt.size) { | |||||
*pkt = c->audio_pkt; | |||||
c->audio_pkt.size = 0; | |||||
size = pkt->size; | |||||
} | } | ||||
return size; | |||||
} | |||||
int dv_produce_packet(DVDemuxContext *c, AVPacket *pkt, | |||||
uint8_t* buf, int buf_size) | |||||
{ | |||||
int size; | |||||
if (buf_size < 4 || buf_size < dv_frame_profile(buf)->frame_size) | |||||
return -1; /* Broken frame, or not enough data */ | |||||
/* Queueing audio packet */ | |||||
/* FIXME: in case of no audio/bad audio we have to do something */ | |||||
size = dv_extract_audio_info(buf, &c->ast->codec); | |||||
if (av_new_packet(&c->audio_pkt, size) < 0) | |||||
return AVERROR_NOMEM; | |||||
dv_extract_audio(buf, c->audio_pkt.data); | |||||
c->audio_pkt.size = size; | |||||
c->audio_pkt.stream_index = c->ast->id; | |||||
/* Now it's time to return video packet */ | |||||
size = dv_extract_video_info(buf, &c->vst->codec); | |||||
av_init_packet(pkt); | av_init_packet(pkt); | ||||
pkt->destruct = __destruct_pkt; | pkt->destruct = __destruct_pkt; | ||||
pkt->data = c->buf; | |||||
pkt->size = c->size; | |||||
pkt->stream_index = c->is_audio; | |||||
pkt->data = buf; | |||||
pkt->size = size; | |||||
pkt->flags |= PKT_FLAG_KEY; | pkt->flags |= PKT_FLAG_KEY; | ||||
pkt->stream_index = c->vst->id; | |||||
c->is_audio = !c->is_audio; | |||||
return c->size; | |||||
return size; | |||||
} | |||||
/************************************************************ | |||||
* Implementation of the easiest DV storage of all -- raw DV. | |||||
************************************************************/ | |||||
typedef struct RawDVContext { | |||||
void* dv_demux; | |||||
uint8_t buf[144000]; | |||||
} RawDVContext; | |||||
static int dv_read_header(AVFormatContext *s, | |||||
AVFormatParameters *ap) | |||||
{ | |||||
RawDVContext *c = s->priv_data; | |||||
c->dv_demux = dv_init_demux(s, 0, 1); | |||||
return c->dv_demux ? 0 : -1; | |||||
} | |||||
static int dv_read_packet(AVFormatContext *s, AVPacket *pkt) | |||||
{ | |||||
int size; | |||||
RawDVContext *c = s->priv_data; | |||||
size = dv_get_packet(c->dv_demux, pkt); | |||||
if (size < 0) { | |||||
if (get_buffer(&s->pb, c->buf, 4) <= 0) | |||||
return -EIO; | |||||
size = dv_frame_profile(c->buf)->frame_size; | |||||
if (get_buffer(&s->pb, c->buf + 4, size - 4) <= 0) | |||||
return -EIO; | |||||
size = dv_produce_packet(c->dv_demux, pkt, c->buf, size); | |||||
} | |||||
return size; | |||||
} | } | ||||
static int dv_read_close(AVFormatContext *s) | static int dv_read_close(AVFormatContext *s) | ||||
{ | { | ||||
RawDVContext *c = s->priv_data; | |||||
av_free(c->dv_demux); | |||||
return 0; | return 0; | ||||
} | } | ||||
int dv_write_header(struct AVFormatContext *s) | |||||
static int dv_write_header(AVFormatContext *s) | |||||
{ | { | ||||
DVMuxContext *c = s->priv_data; | |||||
if (s->nb_streams != 2 || dv_core_init(c, s->streams) != 0) { | |||||
s->priv_data = dv_init_mux(s); | |||||
if (!s->priv_data) { | |||||
fprintf(stderr, "Can't initialize DV format!\n" | fprintf(stderr, "Can't initialize DV format!\n" | ||||
"Make sure that you supply exactly two streams:\n" | "Make sure that you supply exactly two streams:\n" | ||||
" video: 25fps or 29.97fps, audio: 2ch/48Khz/PCM\n"); | " video: 25fps or 29.97fps, audio: 2ch/48Khz/PCM\n"); | ||||
@@ -101,23 +821,20 @@ int dv_write_header(struct AVFormatContext *s) | |||||
return 0; | return 0; | ||||
} | } | ||||
int dv_write_packet(struct AVFormatContext *s, | |||||
int stream_index, | |||||
const uint8_t *buf, int size, int64_t pts) | |||||
static int dv_write_packet(struct AVFormatContext *s, | |||||
int stream_index, | |||||
const uint8_t *buf, int size, int64_t pts) | |||||
{ | { | ||||
DVMuxContext *c = s->priv_data; | |||||
uint8_t* frame; | |||||
int fsize; | |||||
if (stream_index == c->vst) | |||||
dv_assemble_frame(c, buf, NULL, 0); | |||||
else | |||||
dv_assemble_frame(c, NULL, buf, size); | |||||
if (c->has_audio && c->has_video) { | |||||
put_buffer(&s->pb, &c->frame_buf[0], c->sys->frame_size); | |||||
fsize = dv_assemble_frame((DVMuxContext *)s->priv_data, s->streams[stream_index], | |||||
buf, size, &frame); | |||||
if (fsize > 0) { | |||||
put_buffer(&s->pb, frame, fsize); | |||||
put_flush_packet(&s->pb); | put_flush_packet(&s->pb); | ||||
} | |||||
return 0; | |||||
} | |||||
return fsize; | |||||
} | } | ||||
/* | /* | ||||
@@ -126,16 +843,16 @@ int dv_write_packet(struct AVFormatContext *s, | |||||
* Currently we simply drop the last frame. I don't know whether this | * Currently we simply drop the last frame. I don't know whether this | ||||
* is the best strategy of all | * is the best strategy of all | ||||
*/ | */ | ||||
int dv_write_trailer(struct AVFormatContext *s) | |||||
static int dv_write_trailer(struct AVFormatContext *s) | |||||
{ | { | ||||
dv_core_delete((DVMuxContext *)s->priv_data); | |||||
dv_delete_mux((DVMuxContext *)s->priv_data); | |||||
return 0; | return 0; | ||||
} | } | ||||
static AVInputFormat dv_iformat = { | static AVInputFormat dv_iformat = { | ||||
"dv", | "dv", | ||||
"DV video format", | "DV video format", | ||||
sizeof(DVDemuxContext), | |||||
sizeof(RawDVContext), | |||||
NULL, | NULL, | ||||
dv_read_header, | dv_read_header, | ||||
dv_read_packet, | dv_read_packet, | ||||
@@ -143,7 +860,7 @@ static AVInputFormat dv_iformat = { | |||||
.extensions = "dv", | .extensions = "dv", | ||||
}; | }; | ||||
AVOutputFormat dv_oformat = { | |||||
static AVOutputFormat dv_oformat = { | |||||
"dv", | "dv", | ||||
"DV video format", | "DV video format", | ||||
NULL, | NULL, | ||||
@@ -0,0 +1,32 @@ | |||||
/* | |||||
* General DV muxer/demuxer | |||||
* Copyright (c) 2003 Roman Shaposhnick | |||||
* | |||||
* Many thanks to Dan Dennedy <dan@dennedy.org> for providing wealth | |||||
* of DV technical info. | |||||
* | |||||
* Raw DV format | |||||
* Copyright (c) 2002 Fabrice Bellard. | |||||
* | |||||
* This library is free software; you can redistribute it and/or | |||||
* modify it under the terms of the GNU Lesser General Public | |||||
* License as published by the Free Software Foundation; either | |||||
* version 2 of the License, or (at your option) any later version. | |||||
* | |||||
* This library is distributed in the hope that it will be useful, | |||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of | |||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |||||
* Lesser General Public License for more details. | |||||
* | |||||
* You should have received a copy of the GNU Lesser General Public | |||||
* License along with this library; if not, write to the Free Software | |||||
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | |||||
*/ | |||||
void* dv_init_demux(AVFormatContext *s, int vid, int aid); | |||||
int dv_get_packet(void*, AVPacket *); | |||||
int dv_produce_packet(void*, AVPacket*, uint8_t*, int); | |||||
void* dv_init_mux(AVFormatContext* s); | |||||
int dv_assemble_frame(void *c, AVStream*, const uint8_t*, int, uint8_t**); | |||||
void dv_delete_mux(void*); |
@@ -31,13 +31,11 @@ | |||||
#undef DV1394_DEBUG | #undef DV1394_DEBUG | ||||
#include "dv1394.h" | #include "dv1394.h" | ||||
#include "dv.h" | |||||
struct dv1394_data { | struct dv1394_data { | ||||
int fd; | int fd; | ||||
int channel; | int channel; | ||||
int width, height; | |||||
int frame_rate; | |||||
int frame_size; | |||||
int format; | int format; | ||||
void *ring; /* Ring buffer */ | void *ring; /* Ring buffer */ | ||||
@@ -45,9 +43,9 @@ struct dv1394_data { | |||||
int avail; /* Number of frames available for reading */ | int avail; /* Number of frames available for reading */ | ||||
int done; /* Number of completed frames */ | int done; /* Number of completed frames */ | ||||
int stream; /* Current stream. 0 - video, 1 - audio */ | |||||
int64_t pts; /* Current timestamp */ | int64_t pts; /* Current timestamp */ | ||||
AVStream *vst, *ast; | |||||
void* dv_demux; /* Generic DV muxing/demuxing context */ | |||||
}; | }; | ||||
/* | /* | ||||
@@ -69,7 +67,6 @@ static int dv1394_reset(struct dv1394_data *dv) | |||||
return -1; | return -1; | ||||
dv->avail = dv->done = 0; | dv->avail = dv->done = 0; | ||||
dv->stream = 0; | |||||
return 0; | return 0; | ||||
} | } | ||||
@@ -88,14 +85,9 @@ static int dv1394_read_header(AVFormatContext * context, AVFormatParameters * ap | |||||
struct dv1394_data *dv = context->priv_data; | struct dv1394_data *dv = context->priv_data; | ||||
const char *video_device; | const char *video_device; | ||||
dv->vst = av_new_stream(context, 0); | |||||
if (!dv->vst) | |||||
return -ENOMEM; | |||||
dv->ast = av_new_stream(context, 1); | |||||
if (!dv->ast) { | |||||
av_free(dv->vst); | |||||
return -ENOMEM; | |||||
} | |||||
dv->dv_demux = dv_init_demux(context, 0, 1); | |||||
if (!dv->dv_demux) | |||||
goto failed; | |||||
if (ap->standard && !strcasecmp(ap->standard, "pal")) | if (ap->standard && !strcasecmp(ap->standard, "pal")) | ||||
dv->format = DV1394_PAL; | dv->format = DV1394_PAL; | ||||
@@ -107,17 +99,6 @@ static int dv1394_read_header(AVFormatContext * context, AVFormatParameters * ap | |||||
else | else | ||||
dv->channel = DV1394_DEFAULT_CHANNEL; | dv->channel = DV1394_DEFAULT_CHANNEL; | ||||
dv->width = DV1394_WIDTH; | |||||
if (dv->format == DV1394_NTSC) { | |||||
dv->height = DV1394_NTSC_HEIGHT; | |||||
dv->frame_size = DV1394_NTSC_FRAME_SIZE; | |||||
dv->frame_rate = 30; | |||||
} else { | |||||
dv->height = DV1394_PAL_HEIGHT; | |||||
dv->frame_size = DV1394_PAL_FRAME_SIZE; | |||||
dv->frame_rate = 25; | |||||
} | |||||
/* Open and initialize DV1394 device */ | /* Open and initialize DV1394 device */ | ||||
video_device = ap->device; | video_device = ap->device; | ||||
if (!video_device) | if (!video_device) | ||||
@@ -140,21 +121,6 @@ static int dv1394_read_header(AVFormatContext * context, AVFormatParameters * ap | |||||
goto failed; | goto failed; | ||||
} | } | ||||
dv->stream = 0; | |||||
dv->vst->codec.codec_type = CODEC_TYPE_VIDEO; | |||||
dv->vst->codec.codec_id = CODEC_ID_DVVIDEO; | |||||
dv->vst->codec.width = dv->width; | |||||
dv->vst->codec.height = dv->height; | |||||
dv->vst->codec.frame_rate = dv->frame_rate; | |||||
dv->vst->codec.frame_rate_base = 1; | |||||
dv->vst->codec.bit_rate = 25000000; /* Consumer DV is 25Mbps */ | |||||
dv->ast->codec.codec_type = CODEC_TYPE_AUDIO; | |||||
dv->ast->codec.codec_id = CODEC_ID_DVAUDIO; | |||||
dv->ast->codec.channels = 2; | |||||
dv->ast->codec.sample_rate= 48000; | |||||
av_set_pts_info(context, 48, 1, 1000000); | av_set_pts_info(context, 48, 1, 1000000); | ||||
if (dv1394_start(dv) < 0) | if (dv1394_start(dv) < 0) | ||||
@@ -164,55 +130,17 @@ static int dv1394_read_header(AVFormatContext * context, AVFormatParameters * ap | |||||
failed: | failed: | ||||
close(dv->fd); | close(dv->fd); | ||||
av_free(dv->vst); | |||||
av_free(dv->ast); | |||||
return -EIO; | return -EIO; | ||||
} | } | ||||
static void __destruct_pkt(struct AVPacket *pkt) | |||||
{ | |||||
pkt->data = NULL; pkt->size = 0; | |||||
return; | |||||
} | |||||
static inline int __get_frame(struct dv1394_data *dv, AVPacket *pkt) | |||||
{ | |||||
char *ptr = dv->ring + (dv->index * DV1394_PAL_FRAME_SIZE); | |||||
if (dv->stream) { | |||||
dv->index = (dv->index + 1) % DV1394_RING_FRAMES; | |||||
dv->done++; dv->avail--; | |||||
} else { | |||||
dv->pts = av_gettime() & ((1LL << 48) - 1); | |||||
} | |||||
dv->format = ((ptr[3] & 0x80) == 0) ? DV1394_NTSC : DV1394_PAL; | |||||
if (dv->format == DV1394_NTSC) { | |||||
dv->frame_size = DV1394_NTSC_FRAME_SIZE; | |||||
dv->vst->codec.height = dv->height = DV1394_NTSC_HEIGHT; | |||||
dv->vst->codec.frame_rate = dv->frame_rate = 30; | |||||
} else { | |||||
dv->frame_size = DV1394_PAL_FRAME_SIZE; | |||||
dv->vst->codec.height = dv->height = DV1394_PAL_HEIGHT; | |||||
dv->vst->codec.frame_rate = dv->frame_rate = 25; | |||||
} | |||||
av_init_packet(pkt); | |||||
pkt->destruct = __destruct_pkt; | |||||
pkt->data = ptr; | |||||
pkt->size = dv->frame_size; | |||||
pkt->pts = dv->pts; | |||||
pkt->stream_index = dv->stream; | |||||
pkt->flags |= PKT_FLAG_KEY; | |||||
dv->stream ^= 1; | |||||
return dv->frame_size; | |||||
} | |||||
static int dv1394_read_packet(AVFormatContext *context, AVPacket *pkt) | static int dv1394_read_packet(AVFormatContext *context, AVPacket *pkt) | ||||
{ | { | ||||
struct dv1394_data *dv = context->priv_data; | struct dv1394_data *dv = context->priv_data; | ||||
int size; | |||||
size = dv_get_packet(dv->dv_demux, pkt); | |||||
if (size > 0) | |||||
goto out; | |||||
if (!dv->avail) { | if (!dv->avail) { | ||||
struct dv1394_status s; | struct dv1394_status s; | ||||
@@ -276,7 +204,16 @@ restart_poll: | |||||
dv->done); | dv->done); | ||||
#endif | #endif | ||||
return __get_frame(dv, pkt); | |||||
size = dv_produce_packet(dv->dv_demux, pkt, | |||||
dv->ring + (dv->index * DV1394_PAL_FRAME_SIZE), | |||||
DV1394_PAL_FRAME_SIZE); | |||||
dv->index = (dv->index + 1) % DV1394_RING_FRAMES; | |||||
dv->done++; dv->avail--; | |||||
dv->pts = av_gettime() & ((1LL << 48) - 1); | |||||
out: | |||||
pkt->pts = dv->pts; | |||||
return size; | |||||
} | } | ||||
static int dv1394_close(AVFormatContext * context) | static int dv1394_close(AVFormatContext * context) | ||||
@@ -292,6 +229,7 @@ static int dv1394_close(AVFormatContext * context) | |||||
perror("Failed to munmap DV1394 ring buffer"); | perror("Failed to munmap DV1394 ring buffer"); | ||||
close(dv->fd); | close(dv->fd); | ||||
av_free(dv->dv_demux); | |||||
return 0; | return 0; | ||||
} | } | ||||