And rename it to retimeinterleave, use the pcm_rechunk bitstream filter for rechunking. By seperating the two functions we hopefully get cleaner code. Signed-off-by: Marton Balint <cus@passwd.hu>tags/n4.3
@@ -2722,6 +2722,7 @@ fraps_decoder_select="bswapdsp huffman" | |||
g2m_decoder_deps="zlib" | |||
g2m_decoder_select="blockdsp idctdsp jpegtables" | |||
g729_decoder_select="audiodsp" | |||
gxf_encoder_select="pcm_rechunk_bsf" | |||
h261_decoder_select="mpegvideo" | |||
h261_encoder_select="mpegvideoenc" | |||
h263_decoder_select="h263_parser h263dsp mpegvideo qpeldsp" | |||
@@ -2794,6 +2795,7 @@ mv30_decoder_select="aandcttables blockdsp" | |||
mvha_decoder_deps="zlib" | |||
mvha_decoder_select="llviddsp" | |||
mwsc_decoder_deps="zlib" | |||
mxf_encoder_select="pcm_rechunk_bsf" | |||
mxpeg_decoder_select="mjpeg_decoder" | |||
nellymoser_decoder_select="mdct sinewin" | |||
nellymoser_encoder_select="audio_frame_queue mdct sinewin" | |||
@@ -205,7 +205,7 @@ OBJS-$(CONFIG_GIF_DEMUXER) += gifdec.o | |||
OBJS-$(CONFIG_GSM_DEMUXER) += gsmdec.o | |||
OBJS-$(CONFIG_GSM_MUXER) += rawenc.o | |||
OBJS-$(CONFIG_GXF_DEMUXER) += gxf.o | |||
OBJS-$(CONFIG_GXF_MUXER) += gxfenc.o audiointerleave.o | |||
OBJS-$(CONFIG_GXF_MUXER) += gxfenc.o retimeinterleave.o | |||
OBJS-$(CONFIG_G722_DEMUXER) += g722.o rawdec.o | |||
OBJS-$(CONFIG_G722_MUXER) += rawenc.o | |||
OBJS-$(CONFIG_G723_1_DEMUXER) += g723_1.o | |||
@@ -347,7 +347,7 @@ OBJS-$(CONFIG_MUSX_DEMUXER) += musx.o | |||
OBJS-$(CONFIG_MV_DEMUXER) += mvdec.o | |||
OBJS-$(CONFIG_MVI_DEMUXER) += mvi.o | |||
OBJS-$(CONFIG_MXF_DEMUXER) += mxfdec.o mxf.o | |||
OBJS-$(CONFIG_MXF_MUXER) += mxfenc.o mxf.o audiointerleave.o avc.o | |||
OBJS-$(CONFIG_MXF_MUXER) += mxfenc.o mxf.o retimeinterleave.o avc.o | |||
OBJS-$(CONFIG_MXG_DEMUXER) += mxg.o | |||
OBJS-$(CONFIG_NC_DEMUXER) += ncdec.o | |||
OBJS-$(CONFIG_NISTSPHERE_DEMUXER) += nistspheredec.o pcm.o | |||
@@ -1,148 +0,0 @@ | |||
/* | |||
* Audio Interleaving functions | |||
* | |||
* Copyright (c) 2009 Baptiste Coudurier <baptiste dot coudurier at gmail dot com> | |||
* | |||
* This file is part of FFmpeg. | |||
* | |||
* FFmpeg is free software; you can redistribute it and/or | |||
* modify it under the terms of the GNU Lesser General Public | |||
* License as published by the Free Software Foundation; either | |||
* version 2.1 of the License, or (at your option) any later version. | |||
* | |||
* FFmpeg is distributed in the hope that it will be useful, | |||
* but WITHOUT ANY WARRANTY; without even the implied warranty of | |||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |||
* Lesser General Public License for more details. | |||
* | |||
* You should have received a copy of the GNU Lesser General Public | |||
* License along with FFmpeg; if not, write to the Free Software | |||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA | |||
*/ | |||
#include "libavutil/fifo.h" | |||
#include "libavutil/mathematics.h" | |||
#include "avformat.h" | |||
#include "audiointerleave.h" | |||
#include "internal.h" | |||
void ff_audio_interleave_close(AVFormatContext *s) | |||
{ | |||
int i; | |||
for (i = 0; i < s->nb_streams; i++) { | |||
AVStream *st = s->streams[i]; | |||
AudioInterleaveContext *aic = st->priv_data; | |||
if (aic && st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) | |||
av_fifo_freep(&aic->fifo); | |||
} | |||
} | |||
int ff_audio_interleave_init(AVFormatContext *s, | |||
const int samples_per_frame, | |||
AVRational time_base) | |||
{ | |||
int i; | |||
if (!time_base.num) { | |||
av_log(s, AV_LOG_ERROR, "timebase not set for audio interleave\n"); | |||
return AVERROR(EINVAL); | |||
} | |||
for (i = 0; i < s->nb_streams; i++) { | |||
AVStream *st = s->streams[i]; | |||
AudioInterleaveContext *aic = st->priv_data; | |||
if (st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) { | |||
int max_samples = samples_per_frame ? samples_per_frame : | |||
av_rescale_rnd(st->codecpar->sample_rate, time_base.num, time_base.den, AV_ROUND_UP); | |||
aic->sample_size = (st->codecpar->channels * | |||
av_get_bits_per_sample(st->codecpar->codec_id)) / 8; | |||
if (!aic->sample_size) { | |||
av_log(s, AV_LOG_ERROR, "could not compute sample size\n"); | |||
return AVERROR(EINVAL); | |||
} | |||
aic->samples_per_frame = samples_per_frame; | |||
aic->time_base = time_base; | |||
if (!(aic->fifo = av_fifo_alloc_array(100, max_samples))) | |||
return AVERROR(ENOMEM); | |||
aic->fifo_size = 100 * max_samples; | |||
} | |||
} | |||
return 0; | |||
} | |||
static int interleave_new_audio_packet(AVFormatContext *s, AVPacket *pkt, | |||
int stream_index, int flush) | |||
{ | |||
AVStream *st = s->streams[stream_index]; | |||
AudioInterleaveContext *aic = st->priv_data; | |||
int ret; | |||
int nb_samples = aic->samples_per_frame ? aic->samples_per_frame : | |||
(av_rescale_q(aic->n + 1, av_make_q(st->codecpar->sample_rate, 1), av_inv_q(aic->time_base)) - aic->nb_samples); | |||
int frame_size = nb_samples * aic->sample_size; | |||
int size = FFMIN(av_fifo_size(aic->fifo), frame_size); | |||
if (!size || (!flush && size == av_fifo_size(aic->fifo))) | |||
return 0; | |||
ret = av_new_packet(pkt, frame_size); | |||
if (ret < 0) | |||
return ret; | |||
av_fifo_generic_read(aic->fifo, pkt->data, size, NULL); | |||
if (size < pkt->size) | |||
memset(pkt->data + size, 0, pkt->size - size); | |||
pkt->dts = pkt->pts = aic->dts; | |||
pkt->duration = av_rescale_q(nb_samples, st->time_base, aic->time_base); | |||
pkt->stream_index = stream_index; | |||
aic->dts += pkt->duration; | |||
aic->nb_samples += nb_samples; | |||
aic->n++; | |||
return pkt->size; | |||
} | |||
int ff_audio_rechunk_interleave(AVFormatContext *s, AVPacket *out, AVPacket *pkt, int flush, | |||
int (*get_packet)(AVFormatContext *, AVPacket *, AVPacket *, int), | |||
int (*compare_ts)(AVFormatContext *, const AVPacket *, const AVPacket *)) | |||
{ | |||
int i, ret; | |||
if (pkt) { | |||
AVStream *st = s->streams[pkt->stream_index]; | |||
AudioInterleaveContext *aic = st->priv_data; | |||
if (st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) { | |||
unsigned new_size = av_fifo_size(aic->fifo) + pkt->size; | |||
if (new_size > aic->fifo_size) { | |||
if (av_fifo_realloc2(aic->fifo, new_size) < 0) | |||
return AVERROR(ENOMEM); | |||
aic->fifo_size = new_size; | |||
} | |||
av_fifo_generic_write(aic->fifo, pkt->data, pkt->size, NULL); | |||
} else { | |||
// rewrite pts and dts to be decoded time line position | |||
pkt->pts = pkt->dts = aic->dts; | |||
aic->dts += pkt->duration; | |||
if ((ret = ff_interleave_add_packet(s, pkt, compare_ts)) < 0) | |||
return ret; | |||
} | |||
pkt = NULL; | |||
} | |||
for (i = 0; i < s->nb_streams; i++) { | |||
AVStream *st = s->streams[i]; | |||
if (st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) { | |||
AVPacket new_pkt; | |||
while ((ret = interleave_new_audio_packet(s, &new_pkt, i, flush)) > 0) { | |||
if ((ret = ff_interleave_add_packet(s, &new_pkt, compare_ts)) < 0) | |||
return ret; | |||
} | |||
if (ret < 0) | |||
return ret; | |||
} | |||
} | |||
return get_packet(s, out, NULL, flush); | |||
} |
@@ -27,8 +27,9 @@ | |||
#include "avformat.h" | |||
#include "internal.h" | |||
#include "gxf.h" | |||
#include "audiointerleave.h" | |||
#include "retimeinterleave.h" | |||
#define GXF_SAMPLES_PER_FRAME 32768 | |||
#define GXF_AUDIO_PACKET_SIZE 65536 | |||
#define GXF_TIMECODE(c, d, h, m, s, f) \ | |||
@@ -44,7 +45,7 @@ typedef struct GXFTimecode{ | |||
} GXFTimecode; | |||
typedef struct GXFStreamContext { | |||
AudioInterleaveContext aic; | |||
RetimeInterleaveContext aic; | |||
uint32_t track_type; | |||
uint32_t sample_size; | |||
uint32_t sample_rate; | |||
@@ -663,8 +664,6 @@ static int gxf_write_umf_packet(AVFormatContext *s) | |||
return updatePacketSize(pb, pos); | |||
} | |||
static const int GXF_samples_per_frame = 32768; | |||
static void gxf_init_timecode_track(GXFStreamContext *sc, GXFStreamContext *vsc) | |||
{ | |||
if (!vsc) | |||
@@ -736,6 +735,9 @@ static int gxf_write_header(AVFormatContext *s) | |||
av_log(s, AV_LOG_ERROR, "only mono tracks are allowed\n"); | |||
return -1; | |||
} | |||
ret = ff_stream_add_bitstream_filter(st, "pcm_rechunk", "n="AV_STRINGIFY(GXF_SAMPLES_PER_FRAME)); | |||
if (ret < 0) | |||
return ret; | |||
sc->track_type = 2; | |||
sc->sample_rate = st->codecpar->sample_rate; | |||
avpriv_set_pts_info(st, 64, 1, sc->sample_rate); | |||
@@ -813,14 +815,12 @@ static int gxf_write_header(AVFormatContext *s) | |||
return -1; | |||
} | |||
} | |||
ff_retime_interleave_init(&sc->aic, st->time_base); | |||
/* FIXME first 10 audio tracks are 0 to 9 next 22 are A to V */ | |||
sc->media_info = media_info<<8 | ('0'+tracks[media_info]++); | |||
sc->order = s->nb_streams - st->index; | |||
} | |||
if (ff_audio_interleave_init(s, GXF_samples_per_frame, (AVRational){ 1, 48000 }) < 0) | |||
return -1; | |||
if (tcr && vsc) | |||
gxf_init_timecode(s, &gxf->tc, tcr->value, vsc->fields); | |||
@@ -877,8 +877,6 @@ static void gxf_deinit(AVFormatContext *s) | |||
{ | |||
GXFContext *gxf = s->priv_data; | |||
ff_audio_interleave_close(s); | |||
av_freep(&gxf->flt_entries); | |||
av_freep(&gxf->map_offsets); | |||
} | |||
@@ -1016,8 +1014,8 @@ static int gxf_interleave_packet(AVFormatContext *s, AVPacket *out, AVPacket *pk | |||
{ | |||
if (pkt && s->streams[pkt->stream_index]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) | |||
pkt->duration = 2; // enforce 2 fields | |||
return ff_audio_rechunk_interleave(s, out, pkt, flush, | |||
ff_interleave_packet_per_dts, gxf_compare_field_nb); | |||
return ff_retime_interleave(s, out, pkt, flush, | |||
ff_interleave_packet_per_dts, gxf_compare_field_nb); | |||
} | |||
AVOutputFormat ff_gxf_muxer = { | |||
@@ -52,7 +52,7 @@ | |||
#include "libavcodec/h264_ps.h" | |||
#include "libavcodec/golomb.h" | |||
#include "libavcodec/internal.h" | |||
#include "audiointerleave.h" | |||
#include "retimeinterleave.h" | |||
#include "avformat.h" | |||
#include "avio_internal.h" | |||
#include "internal.h" | |||
@@ -79,7 +79,7 @@ typedef struct MXFIndexEntry { | |||
} MXFIndexEntry; | |||
typedef struct MXFStreamContext { | |||
AudioInterleaveContext aic; | |||
RetimeInterleaveContext aic; | |||
UID track_essence_element_key; | |||
int index; ///< index in mxf_essence_container_uls table | |||
const UID *codec_ul; | |||
@@ -2538,6 +2538,7 @@ static int mxf_write_header(AVFormatContext *s) | |||
if (mxf->signal_standard >= 0) | |||
sc->signal_standard = mxf->signal_standard; | |||
} else if (st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) { | |||
char bsf_arg[32]; | |||
if (st->codecpar->sample_rate != 48000) { | |||
av_log(s, AV_LOG_ERROR, "only 48khz is implemented\n"); | |||
return -1; | |||
@@ -2580,6 +2581,10 @@ static int mxf_write_header(AVFormatContext *s) | |||
av_rescale_rnd(st->codecpar->sample_rate, mxf->time_base.num, mxf->time_base.den, AV_ROUND_UP) * | |||
av_get_bits_per_sample(st->codecpar->codec_id) / 8; | |||
} | |||
snprintf(bsf_arg, sizeof(bsf_arg), "r=%d/%d", mxf->tc.rate.num, mxf->tc.rate.den); | |||
ret = ff_stream_add_bitstream_filter(st, "pcm_rechunk", bsf_arg); | |||
if (ret < 0) | |||
return ret; | |||
} else if (st->codecpar->codec_type == AVMEDIA_TYPE_DATA) { | |||
AVDictionaryEntry *e = av_dict_get(st->metadata, "data_type", NULL, 0); | |||
if (e && !strcmp(e->value, "vbi_vanc_smpte_436M")) { | |||
@@ -2593,6 +2598,7 @@ static int mxf_write_header(AVFormatContext *s) | |||
return -1; | |||
} | |||
} | |||
ff_retime_interleave_init(&sc->aic, av_inv_q(mxf->tc.rate)); | |||
if (sc->index == -1) { | |||
sc->index = mxf_get_essence_container_ul_index(st->codecpar->codec_id); | |||
@@ -2646,9 +2652,6 @@ static int mxf_write_header(AVFormatContext *s) | |||
return AVERROR(ENOMEM); | |||
mxf->timecode_track->index = -1; | |||
if (ff_audio_interleave_init(s, 0, av_inv_q(mxf->tc.rate)) < 0) | |||
return -1; | |||
return 0; | |||
} | |||
@@ -3010,8 +3013,6 @@ static void mxf_deinit(AVFormatContext *s) | |||
{ | |||
MXFContext *mxf = s->priv_data; | |||
ff_audio_interleave_close(s); | |||
av_freep(&mxf->index_entries); | |||
av_freep(&mxf->body_partition_offset); | |||
if (mxf->timecode_track) { | |||
@@ -3086,8 +3087,8 @@ static int mxf_compare_timestamps(AVFormatContext *s, const AVPacket *next, | |||
static int mxf_interleave(AVFormatContext *s, AVPacket *out, AVPacket *pkt, int flush) | |||
{ | |||
return ff_audio_rechunk_interleave(s, out, pkt, flush, | |||
mxf_interleave_get_packet, mxf_compare_timestamps); | |||
return ff_retime_interleave(s, out, pkt, flush, | |||
mxf_interleave_get_packet, mxf_compare_timestamps); | |||
} | |||
#define MXF_COMMON_OPTIONS \ | |||
@@ -0,0 +1,51 @@ | |||
/* | |||
* Retime Interleaving functions | |||
* | |||
* Copyright (c) 2009 Baptiste Coudurier <baptiste dot coudurier at gmail dot com> | |||
* | |||
* This file is part of FFmpeg. | |||
* | |||
* FFmpeg is free software; you can redistribute it and/or | |||
* modify it under the terms of the GNU Lesser General Public | |||
* License as published by the Free Software Foundation; either | |||
* version 2.1 of the License, or (at your option) any later version. | |||
* | |||
* FFmpeg is distributed in the hope that it will be useful, | |||
* but WITHOUT ANY WARRANTY; without even the implied warranty of | |||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |||
* Lesser General Public License for more details. | |||
* | |||
* You should have received a copy of the GNU Lesser General Public | |||
* License along with FFmpeg; if not, write to the Free Software | |||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA | |||
*/ | |||
#include "libavutil/mathematics.h" | |||
#include "avformat.h" | |||
#include "retimeinterleave.h" | |||
#include "internal.h" | |||
void ff_retime_interleave_init(RetimeInterleaveContext *aic, AVRational time_base) | |||
{ | |||
aic->time_base = time_base; | |||
} | |||
int ff_retime_interleave(AVFormatContext *s, AVPacket *out, AVPacket *pkt, int flush, | |||
int (*get_packet)(AVFormatContext *, AVPacket *, AVPacket *, int), | |||
int (*compare_ts)(AVFormatContext *, const AVPacket *, const AVPacket *)) | |||
{ | |||
int ret; | |||
if (pkt) { | |||
AVStream *st = s->streams[pkt->stream_index]; | |||
RetimeInterleaveContext *aic = st->priv_data; | |||
pkt->duration = av_rescale_q(pkt->duration, st->time_base, aic->time_base); | |||
// rewrite pts and dts to be decoded time line position | |||
pkt->pts = pkt->dts = aic->dts; | |||
aic->dts += pkt->duration; | |||
if ((ret = ff_interleave_add_packet(s, pkt, compare_ts)) < 0) | |||
return ret; | |||
} | |||
return get_packet(s, out, NULL, flush); | |||
} |
@@ -20,36 +20,31 @@ | |||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA | |||
*/ | |||
#ifndef AVFORMAT_AUDIOINTERLEAVE_H | |||
#define AVFORMAT_AUDIOINTERLEAVE_H | |||
#ifndef AVFORMAT_RETIMEINTERLEAVE_H | |||
#define AVFORMAT_RETIMEINTERLEAVE_H | |||
#include "libavutil/fifo.h" | |||
#include "avformat.h" | |||
typedef struct AudioInterleaveContext { | |||
AVFifoBuffer *fifo; | |||
unsigned fifo_size; ///< size of currently allocated FIFO | |||
int64_t n; ///< number of generated packets | |||
int64_t nb_samples; ///< number of generated samples | |||
typedef struct RetimeInterleaveContext { | |||
uint64_t dts; ///< current dts | |||
int sample_size; ///< size of one sample all channels included | |||
int samples_per_frame; ///< samples per frame if fixed, 0 otherwise | |||
AVRational time_base; ///< time base of output audio packets | |||
} AudioInterleaveContext; | |||
AVRational time_base; ///< time base of output packets | |||
} RetimeInterleaveContext; | |||
int ff_audio_interleave_init(AVFormatContext *s, const int samples_per_frame, AVRational time_base); | |||
void ff_audio_interleave_close(AVFormatContext *s); | |||
/** | |||
* Init the retime interleave context | |||
*/ | |||
void ff_retime_interleave_init(RetimeInterleaveContext *aic, AVRational time_base); | |||
/** | |||
* Rechunk audio PCM packets per AudioInterleaveContext->samples_per_frame | |||
* and interleave them correctly. | |||
* The first element of AVStream->priv_data must be AudioInterleaveContext | |||
* Retime packets per RetimeInterleaveContext->time_base and interleave them | |||
* correctly. | |||
* The first element of AVStream->priv_data must be RetimeInterleaveContext | |||
* when using this function. | |||
* | |||
* @param get_packet function will output a packet when streams are correctly interleaved. | |||
* @param compare_ts function will compare AVPackets and decide interleaving order. | |||
*/ | |||
int ff_audio_rechunk_interleave(AVFormatContext *s, AVPacket *out, AVPacket *pkt, int flush, | |||
int ff_retime_interleave(AVFormatContext *s, AVPacket *out, AVPacket *pkt, int flush, | |||
int (*get_packet)(AVFormatContext *, AVPacket *, AVPacket *, int), | |||
int (*compare_ts)(AVFormatContext *, const AVPacket *, const AVPacket *)); | |||