* qatar/master: (25 commits) Replace custom DEBUG preprocessor trickery by the standard one. vorbis: Remove non-compiling debug statement. vorbis: Remove pointless DEBUG #ifdef around debug output macros. cook: Remove non-compiling debug output. Remove pointless #ifdefs around function declarations in a header. Replace #ifdef + av_log() combinations by av_dlog(). Replace custom debug output functions by av_dlog(). cook: Remove unused debug functions. Remove stray extra arguments from av_dlog() invocations. targa: fix big-endian build v4l2: remove one forgotten use of AVFormatParameters.pix_fmt. vfwcap: add a framerate private option. v4l2: add a framerate private option. libdc1394: add a framerate private option. fbdev: add a framerate private option. bktr: add a framerate private option. oma: check avio_read() return value nutdec: remove unused variable Remove unused variables swscale: allocate larger buffer to handle altivec overreads. ... Conflicts: ffmpeg.c libavcodec/dca.c libavcodec/dirac.c libavcodec/error_resilience.c libavcodec/h264.c libavcodec/mpeg12.c libavcodec/mpeg4videodec.c libavcodec/mpegvideo.c libavcodec/mpegvideo_enc.c libavcodec/pthread.c libavcodec/rv10.c libavcodec/s302m.c libavcodec/shorten.c libavcodec/truemotion2.c libavcodec/utils.c libavdevice/dv1394.c libavdevice/fbdev.c libavdevice/libdc1394.c libavdevice/v4l2.c libavformat/4xm.c libavformat/apetag.c libavformat/asfdec.c libavformat/avidec.c libavformat/mmf.c libavformat/mpeg.c libavformat/mpegenc.c libavformat/mpegts.c libavformat/oggdec.c libavformat/oggparseogm.c libavformat/rl2.c libavformat/rmdec.c libavformat/rpl.c libavformat/rtpdec_latm.c libavformat/sauce.c libavformat/sol.c libswscale/utils.c tests/ref/vsynth1/error tests/ref/vsynth2/error Merged-by: Michael Niedermayer <michaelni@gmx.at>tags/n0.8
@@ -1077,6 +1077,8 @@ HAVE_LIST=" | |||||
isatty | isatty | ||||
kbhit | kbhit | ||||
ldbrx | ldbrx | ||||
libdc1394_1 | |||||
libdc1394_2 | |||||
llrint | llrint | ||||
llrintf | llrintf | ||||
local_aligned_16 | local_aligned_16 | ||||
@@ -2914,7 +2916,6 @@ check_mathfunc truncf | |||||
enabled avisynth && require2 vfw32 "windows.h vfw.h" AVIFileInit -lavifil32 | enabled avisynth && require2 vfw32 "windows.h vfw.h" AVIFileInit -lavifil32 | ||||
enabled libcelt && require libcelt celt/celt.h celt_decode -lcelt0 | enabled libcelt && require libcelt celt/celt.h celt_decode -lcelt0 | ||||
enabled frei0r && { check_header frei0r.h || die "ERROR: frei0r.h header not found"; } | enabled frei0r && { check_header frei0r.h || die "ERROR: frei0r.h header not found"; } | ||||
enabled libdc1394 && require_pkg_config libdc1394-2 dc1394/dc1394.h dc1394_new | |||||
enabled libdirac && require_pkg_config dirac \ | enabled libdirac && require_pkg_config dirac \ | ||||
"libdirac_decoder/dirac_parser.h libdirac_encoder/dirac_encoder.h" \ | "libdirac_decoder/dirac_parser.h libdirac_encoder/dirac_encoder.h" \ | ||||
"dirac_decoder_init dirac_encoder_init" | "dirac_decoder_init dirac_encoder_init" | ||||
@@ -2946,6 +2947,15 @@ enabled libxavs && require libxavs xavs.h xavs_encoder_encode -lxavs | |||||
enabled libxvid && require libxvid xvid.h xvid_global -lxvidcore | enabled libxvid && require libxvid xvid.h xvid_global -lxvidcore | ||||
enabled mlib && require mediaLib mlib_types.h mlib_VectorSub_S16_U8_Mod -lmlib | enabled mlib && require mediaLib mlib_types.h mlib_VectorSub_S16_U8_Mod -lmlib | ||||
# libdc1394 check | |||||
if enabled libdc1394; then | |||||
{ check_lib dc1394/dc1394.h dc1394_new -ldc1394 -lraw1394 && | |||||
enable libdc1394_2; } || | |||||
{ check_lib libdc1394/dc1394_control.h dc1394_create_handle -ldc1394_control -lraw1394 && | |||||
enable libdc1394_1; } || | |||||
die "ERROR: No version of libdc1394 found " | |||||
fi | |||||
SDL_CONFIG="${cross_prefix}sdl-config" | SDL_CONFIG="${cross_prefix}sdl-config" | ||||
if check_pkg_config sdl SDL_version.h SDL_Linked_Version; then | if check_pkg_config sdl SDL_version.h SDL_Linked_Version; then | ||||
check_cpp_condition SDL.h "(SDL_MAJOR_VERSION<<16 | SDL_MINOR_VERSION<<8 | SDL_PATCHLEVEL) >= 0x010201" $sdl_cflags && | check_cpp_condition SDL.h "(SDL_MAJOR_VERSION<<16 | SDL_MINOR_VERSION<<8 | SDL_PATCHLEVEL) >= 0x010201" $sdl_cflags && | ||||
@@ -1242,7 +1242,7 @@ static void do_video_out(AVFormatContext *s, | |||||
} | } | ||||
} | } | ||||
sws_scale(ost->img_resample_ctx, formatted_picture->data, formatted_picture->linesize, | sws_scale(ost->img_resample_ctx, formatted_picture->data, formatted_picture->linesize, | ||||
0, ost->resample_height, ost->resample_frame.data, ost->resample_frame.linesize); | |||||
0, ost->resample_height, final_picture->data, final_picture->linesize); | |||||
} | } | ||||
#endif | #endif | ||||
@@ -3723,7 +3723,6 @@ static void new_audio_stream(AVFormatContext *oc, int file_idx) | |||||
static void new_data_stream(AVFormatContext *oc, int file_idx) | static void new_data_stream(AVFormatContext *oc, int file_idx) | ||||
{ | { | ||||
AVStream *st; | AVStream *st; | ||||
AVOutputStream *ost av_unused; | |||||
AVCodec *codec=NULL; | AVCodec *codec=NULL; | ||||
AVCodecContext *data_enc; | AVCodecContext *data_enc; | ||||
@@ -3732,7 +3731,7 @@ static void new_data_stream(AVFormatContext *oc, int file_idx) | |||||
fprintf(stderr, "Could not alloc stream\n"); | fprintf(stderr, "Could not alloc stream\n"); | ||||
ffmpeg_exit(1); | ffmpeg_exit(1); | ||||
} | } | ||||
ost = new_output_stream(oc, file_idx); | |||||
new_output_stream(oc, file_idx); | |||||
data_enc = st->codec; | data_enc = st->codec; | ||||
output_codecs = grow_array(output_codecs, sizeof(*output_codecs), &nb_output_codecs, nb_output_codecs + 1); | output_codecs = grow_array(output_codecs, sizeof(*output_codecs), &nb_output_codecs, nb_output_codecs + 1); | ||||
if (!data_stream_copy) { | if (!data_stream_copy) { | ||||
@@ -1760,7 +1760,7 @@ static int http_parse_request(HTTPContext *c) | |||||
} | } | ||||
} | } | ||||
#ifdef DEBUG_WMP | |||||
#ifdef DEBUG | |||||
http_log("\nGot request:\n%s\n", c->buffer); | http_log("\nGot request:\n%s\n", c->buffer); | ||||
#endif | #endif | ||||
@@ -1790,7 +1790,7 @@ static int http_parse_request(HTTPContext *c) | |||||
return 0; | return 0; | ||||
} | } | ||||
#ifdef DEBUG_WMP | |||||
#ifdef DEBUG | |||||
if (strcmp(stream->filename + strlen(stream->filename) - 4, ".asf") == 0) | if (strcmp(stream->filename + strlen(stream->filename) - 4, ".asf") == 0) | ||||
http_log("\nGot request:\n%s\n", c->buffer); | http_log("\nGot request:\n%s\n", c->buffer); | ||||
#endif | #endif | ||||
@@ -312,7 +312,7 @@ static void encode_window_bands_info(AACEncContext *s, SingleChannelElement *sce | |||||
int win, int group_len, const float lambda) | int win, int group_len, const float lambda) | ||||
{ | { | ||||
BandCodingPath path[120][12]; | BandCodingPath path[120][12]; | ||||
int w, swb, cb, start, start2, size; | |||||
int w, swb, cb, start, size; | |||||
int i, j; | int i, j; | ||||
const int max_sfb = sce->ics.max_sfb; | const int max_sfb = sce->ics.max_sfb; | ||||
const int run_bits = sce->ics.num_windows == 1 ? 5 : 3; | const int run_bits = sce->ics.num_windows == 1 ? 5 : 3; | ||||
@@ -330,7 +330,6 @@ static void encode_window_bands_info(AACEncContext *s, SingleChannelElement *sce | |||||
path[0][cb].run = 0; | path[0][cb].run = 0; | ||||
} | } | ||||
for (swb = 0; swb < max_sfb; swb++) { | for (swb = 0; swb < max_sfb; swb++) { | ||||
start2 = start; | |||||
size = sce->ics.swb_sizes[swb]; | size = sce->ics.swb_sizes[swb]; | ||||
if (sce->zeroes[win*16 + swb]) { | if (sce->zeroes[win*16 + swb]) { | ||||
for (cb = 0; cb < 12; cb++) { | for (cb = 0; cb < 12; cb++) { | ||||
@@ -414,7 +413,7 @@ static void codebook_trellis_rate(AACEncContext *s, SingleChannelElement *sce, | |||||
int win, int group_len, const float lambda) | int win, int group_len, const float lambda) | ||||
{ | { | ||||
BandCodingPath path[120][12]; | BandCodingPath path[120][12]; | ||||
int w, swb, cb, start, start2, size; | |||||
int w, swb, cb, start, size; | |||||
int i, j; | int i, j; | ||||
const int max_sfb = sce->ics.max_sfb; | const int max_sfb = sce->ics.max_sfb; | ||||
const int run_bits = sce->ics.num_windows == 1 ? 5 : 3; | const int run_bits = sce->ics.num_windows == 1 ? 5 : 3; | ||||
@@ -432,7 +431,6 @@ static void codebook_trellis_rate(AACEncContext *s, SingleChannelElement *sce, | |||||
path[0][cb].run = 0; | path[0][cb].run = 0; | ||||
} | } | ||||
for (swb = 0; swb < max_sfb; swb++) { | for (swb = 0; swb < max_sfb; swb++) { | ||||
start2 = start; | |||||
size = sce->ics.swb_sizes[swb]; | size = sce->ics.swb_sizes[swb]; | ||||
if (sce->zeroes[win*16 + swb]) { | if (sce->zeroes[win*16 + swb]) { | ||||
for (cb = 0; cb < 12; cb++) { | for (cb = 0; cb < 12; cb++) { | ||||
@@ -1007,12 +1005,11 @@ static void search_for_quantizers_fast(AVCodecContext *avctx, AACEncContext *s, | |||||
SingleChannelElement *sce, | SingleChannelElement *sce, | ||||
const float lambda) | const float lambda) | ||||
{ | { | ||||
int start = 0, i, w, w2, g; | |||||
int i, w, w2, g; | |||||
int minq = 255; | int minq = 255; | ||||
memset(sce->sf_idx, 0, sizeof(sce->sf_idx)); | memset(sce->sf_idx, 0, sizeof(sce->sf_idx)); | ||||
for (w = 0; w < sce->ics.num_windows; w += sce->ics.group_len[w]) { | for (w = 0; w < sce->ics.num_windows; w += sce->ics.group_len[w]) { | ||||
start = w*128; | |||||
for (g = 0; g < sce->ics.num_swb; g++) { | for (g = 0; g < sce->ics.num_swb; g++) { | ||||
for (w2 = 0; w2 < sce->ics.group_len[w]; w2++) { | for (w2 = 0; w2 < sce->ics.group_len[w]; w2++) { | ||||
FFPsyBand *band = &s->psy.psy_bands[s->cur_channel*PSY_MAX_BANDS+(w+w2)*16+g]; | FFPsyBand *band = &s->psy.psy_bands[s->cur_channel*PSY_MAX_BANDS+(w+w2)*16+g]; | ||||
@@ -30,6 +30,7 @@ | |||||
* add temporal noise shaping | * add temporal noise shaping | ||||
***********************************/ | ***********************************/ | ||||
#include "libavutil/opt.h" | |||||
#include "avcodec.h" | #include "avcodec.h" | ||||
#include "put_bits.h" | #include "put_bits.h" | ||||
#include "dsputil.h" | #include "dsputil.h" | ||||
@@ -489,7 +490,7 @@ static int aac_encode_frame(AVCodecContext *avctx, | |||||
AACEncContext *s = avctx->priv_data; | AACEncContext *s = avctx->priv_data; | ||||
int16_t *samples = s->samples, *samples2, *la; | int16_t *samples = s->samples, *samples2, *la; | ||||
ChannelElement *cpe; | ChannelElement *cpe; | ||||
int i, ch, w, chans, tag, start_ch; | |||||
int i, ch, w, g, chans, tag, start_ch; | |||||
const uint8_t *chan_map = aac_chan_configs[avctx->channels-1]; | const uint8_t *chan_map = aac_chan_configs[avctx->channels-1]; | ||||
int chan_el_counter[4]; | int chan_el_counter[4]; | ||||
FFPsyWindowInfo windows[AAC_MAX_CHANNELS]; | FFPsyWindowInfo windows[AAC_MAX_CHANNELS]; | ||||
@@ -587,8 +588,16 @@ static int aac_encode_frame(AVCodecContext *avctx, | |||||
} | } | ||||
} | } | ||||
s->cur_channel = start_ch; | s->cur_channel = start_ch; | ||||
if (cpe->common_window && s->coder->search_for_ms) | |||||
s->coder->search_for_ms(s, cpe, s->lambda); | |||||
if (s->options.stereo_mode && cpe->common_window) { | |||||
if (s->options.stereo_mode > 0) { | |||||
IndividualChannelStream *ics = &cpe->ch[0].ics; | |||||
for (w = 0; w < ics->num_windows; w += ics->group_len[w]) | |||||
for (g = 0; g < ics->num_swb; g++) | |||||
cpe->ms_mask[w*16+g] = 1; | |||||
} else if (s->coder->search_for_ms) { | |||||
s->coder->search_for_ms(s, cpe, s->lambda); | |||||
} | |||||
} | |||||
adjust_frame_information(s, cpe, chans); | adjust_frame_information(s, cpe, chans); | ||||
if (chans == 2) { | if (chans == 2) { | ||||
put_bits(&s->pb, 1, cpe->common_window); | put_bits(&s->pb, 1, cpe->common_window); | ||||
@@ -645,6 +654,22 @@ static av_cold int aac_encode_end(AVCodecContext *avctx) | |||||
return 0; | return 0; | ||||
} | } | ||||
#define AACENC_FLAGS AV_OPT_FLAG_ENCODING_PARAM | AV_OPT_FLAG_AUDIO_PARAM | |||||
static const AVOption aacenc_options[] = { | |||||
{"stereo_mode", "Stereo coding method", offsetof(AACEncContext, options.stereo_mode), FF_OPT_TYPE_INT, {.dbl = 0}, -1, 1, AACENC_FLAGS, "stereo_mode"}, | |||||
{"auto", "Selected by the Encoder", 0, FF_OPT_TYPE_CONST, {.dbl = -1 }, INT_MIN, INT_MAX, AACENC_FLAGS, "stereo_mode"}, | |||||
{"ms_off", "Disable Mid/Side coding", 0, FF_OPT_TYPE_CONST, {.dbl = 0 }, INT_MIN, INT_MAX, AACENC_FLAGS, "stereo_mode"}, | |||||
{"ms_force", "Force Mid/Side for the whole frame if possible", 0, FF_OPT_TYPE_CONST, {.dbl = 1 }, INT_MIN, INT_MAX, AACENC_FLAGS, "stereo_mode"}, | |||||
{NULL} | |||||
}; | |||||
static const AVClass aacenc_class = { | |||||
"AAC encoder", | |||||
av_default_item_name, | |||||
aacenc_options, | |||||
LIBAVUTIL_VERSION_INT, | |||||
}; | |||||
AVCodec ff_aac_encoder = { | AVCodec ff_aac_encoder = { | ||||
"aac", | "aac", | ||||
AVMEDIA_TYPE_AUDIO, | AVMEDIA_TYPE_AUDIO, | ||||
@@ -656,4 +681,5 @@ AVCodec ff_aac_encoder = { | |||||
.capabilities = CODEC_CAP_SMALL_LAST_FRAME | CODEC_CAP_DELAY | CODEC_CAP_EXPERIMENTAL, | .capabilities = CODEC_CAP_SMALL_LAST_FRAME | CODEC_CAP_DELAY | CODEC_CAP_EXPERIMENTAL, | ||||
.sample_fmts = (const enum AVSampleFormat[]){AV_SAMPLE_FMT_S16,AV_SAMPLE_FMT_NONE}, | .sample_fmts = (const enum AVSampleFormat[]){AV_SAMPLE_FMT_S16,AV_SAMPLE_FMT_NONE}, | ||||
.long_name = NULL_IF_CONFIG_SMALL("Advanced Audio Coding"), | .long_name = NULL_IF_CONFIG_SMALL("Advanced Audio Coding"), | ||||
.priv_class = &aacenc_class, | |||||
}; | }; |
@@ -30,6 +30,10 @@ | |||||
#include "psymodel.h" | #include "psymodel.h" | ||||
typedef struct AACEncOptions { | |||||
int stereo_mode; | |||||
} AACEncOptions; | |||||
struct AACEncContext; | struct AACEncContext; | ||||
typedef struct AACCoefficientsEncoder { | typedef struct AACCoefficientsEncoder { | ||||
@@ -48,6 +52,8 @@ extern AACCoefficientsEncoder ff_aac_coders[]; | |||||
* AAC encoder context | * AAC encoder context | ||||
*/ | */ | ||||
typedef struct AACEncContext { | typedef struct AACEncContext { | ||||
AVClass *av_class; | |||||
AACEncOptions options; ///< encoding options | |||||
PutBitContext pb; | PutBitContext pb; | ||||
FFTContext mdct1024; ///< long (1024 samples) frame transform context | FFTContext mdct1024; ///< long (1024 samples) frame transform context | ||||
FFTContext mdct128; ///< short (128 samples) frame transform context | FFTContext mdct128; ///< short (128 samples) frame transform context | ||||
@@ -66,7 +66,6 @@ | |||||
#define SUBBAND_SIZE 20 | #define SUBBAND_SIZE 20 | ||||
#define MAX_SUBPACKETS 5 | #define MAX_SUBPACKETS 5 | ||||
//#define COOKDEBUG | |||||
typedef struct { | typedef struct { | ||||
int *now; | int *now; | ||||
@@ -166,38 +165,6 @@ typedef struct cook { | |||||
static float pow2tab[127]; | static float pow2tab[127]; | ||||
static float rootpow2tab[127]; | static float rootpow2tab[127]; | ||||
/* debug functions */ | |||||
#ifdef COOKDEBUG | |||||
static void dump_float_table(float* table, int size, int delimiter) { | |||||
int i=0; | |||||
av_log(NULL,AV_LOG_ERROR,"\n[%d]: ",i); | |||||
for (i=0 ; i<size ; i++) { | |||||
av_log(NULL, AV_LOG_ERROR, "%5.1f, ", table[i]); | |||||
if ((i+1)%delimiter == 0) av_log(NULL,AV_LOG_ERROR,"\n[%d]: ",i+1); | |||||
} | |||||
} | |||||
static void dump_int_table(int* table, int size, int delimiter) { | |||||
int i=0; | |||||
av_log(NULL,AV_LOG_ERROR,"\n[%d]: ",i); | |||||
for (i=0 ; i<size ; i++) { | |||||
av_log(NULL, AV_LOG_ERROR, "%d, ", table[i]); | |||||
if ((i+1)%delimiter == 0) av_log(NULL,AV_LOG_ERROR,"\n[%d]: ",i+1); | |||||
} | |||||
} | |||||
static void dump_short_table(short* table, int size, int delimiter) { | |||||
int i=0; | |||||
av_log(NULL,AV_LOG_ERROR,"\n[%d]: ",i); | |||||
for (i=0 ; i<size ; i++) { | |||||
av_log(NULL, AV_LOG_ERROR, "%d, ", table[i]); | |||||
if ((i+1)%delimiter == 0) av_log(NULL,AV_LOG_ERROR,"\n[%d]: ",i+1); | |||||
} | |||||
} | |||||
#endif | |||||
/*************** init functions ***************/ | /*************** init functions ***************/ | ||||
/* table generator */ | /* table generator */ | ||||
@@ -1037,7 +1004,7 @@ static int cook_decode_frame(AVCodecContext *avctx, | |||||
return avctx->block_align; | return avctx->block_align; | ||||
} | } | ||||
#ifdef COOKDEBUG | |||||
#ifdef DEBUG | |||||
static void dump_cook_context(COOKContext *q) | static void dump_cook_context(COOKContext *q) | ||||
{ | { | ||||
//int i=0; | //int i=0; | ||||
@@ -1055,7 +1022,6 @@ static void dump_cook_context(COOKContext *q) | |||||
PRINT("samples_per_channel",q->subpacket[0].samples_per_channel); | PRINT("samples_per_channel",q->subpacket[0].samples_per_channel); | ||||
PRINT("samples_per_frame",q->subpacket[0].samples_per_frame); | PRINT("samples_per_frame",q->subpacket[0].samples_per_frame); | ||||
PRINT("subbands",q->subpacket[0].subbands); | PRINT("subbands",q->subpacket[0].subbands); | ||||
PRINT("random_state",q->random_state); | |||||
PRINT("js_subband_start",q->subpacket[0].js_subband_start); | PRINT("js_subband_start",q->subpacket[0].js_subband_start); | ||||
PRINT("log2_numvector_size",q->subpacket[0].log2_numvector_size); | PRINT("log2_numvector_size",q->subpacket[0].log2_numvector_size); | ||||
PRINT("numvector_size",q->subpacket[0].numvector_size); | PRINT("numvector_size",q->subpacket[0].numvector_size); | ||||
@@ -1280,7 +1246,7 @@ static av_cold int cook_decode_init(AVCodecContext *avctx) | |||||
else | else | ||||
avctx->channel_layout = (avctx->channels==2) ? AV_CH_LAYOUT_STEREO : AV_CH_LAYOUT_MONO; | avctx->channel_layout = (avctx->channels==2) ? AV_CH_LAYOUT_STEREO : AV_CH_LAYOUT_MONO; | ||||
#ifdef COOKDEBUG | |||||
#ifdef DEBUG | |||||
dump_cook_context(q); | dump_cook_context(q); | ||||
#endif | #endif | ||||
return 0; | return 0; | ||||
@@ -1535,8 +1535,6 @@ static void dca_exss_parse_header(DCAContext *s) | |||||
{ | { | ||||
int ss_index; | int ss_index; | ||||
int blownup; | int blownup; | ||||
int header_size av_unused; | |||||
int hd_size av_unused; | |||||
int num_audiop = 1; | int num_audiop = 1; | ||||
int num_assets = 1; | int num_assets = 1; | ||||
int active_ss_mask[8]; | int active_ss_mask[8]; | ||||
@@ -1549,8 +1547,8 @@ static void dca_exss_parse_header(DCAContext *s) | |||||
ss_index = get_bits(&s->gb, 2); | ss_index = get_bits(&s->gb, 2); | ||||
blownup = get_bits1(&s->gb); | blownup = get_bits1(&s->gb); | ||||
header_size = get_bits(&s->gb, 8 + 4 * blownup) + 1; | |||||
hd_size = get_bits_long(&s->gb, 16 + 4 * blownup) + 1; | |||||
skip_bits(&s->gb, 8 + 4 * blownup); // header_size | |||||
skip_bits(&s->gb, 16 + 4 * blownup); // hd_size | |||||
s->static_fields = get_bits1(&s->gb); | s->static_fields = get_bits1(&s->gb); | ||||
if (s->static_fields) { | if (s->static_fields) { | ||||
@@ -245,11 +245,11 @@ static int parse_source_parameters(AVCodecContext *avctx, GetBitContext *gb, | |||||
int ff_dirac_parse_sequence_header(AVCodecContext *avctx, GetBitContext *gb, | int ff_dirac_parse_sequence_header(AVCodecContext *avctx, GetBitContext *gb, | ||||
dirac_source_params *source) | dirac_source_params *source) | ||||
{ | { | ||||
unsigned version_major, version_minor av_unused; | |||||
unsigned version_major; | |||||
unsigned video_format, picture_coding_mode; | unsigned video_format, picture_coding_mode; | ||||
version_major = svq3_get_ue_golomb(gb); | version_major = svq3_get_ue_golomb(gb); | ||||
version_minor = svq3_get_ue_golomb(gb); | |||||
svq3_get_ue_golomb(gb); /* version_minor */ | |||||
avctx->profile = svq3_get_ue_golomb(gb); | avctx->profile = svq3_get_ue_golomb(gb); | ||||
avctx->level = svq3_get_ue_golomb(gb); | avctx->level = svq3_get_ue_golomb(gb); | ||||
video_format = svq3_get_ue_golomb(gb); | video_format = svq3_get_ue_golomb(gb); | ||||
@@ -32,6 +32,7 @@ | |||||
#include "mpegvideo.h" | #include "mpegvideo.h" | ||||
#include "h264.h" | #include "h264.h" | ||||
#include "rectangle.h" | #include "rectangle.h" | ||||
#include "thread.h" | |||||
/* | /* | ||||
* H264 redefines mb_intra so it is not mistakely used (its uninitialized in h264) | * H264 redefines mb_intra so it is not mistakely used (its uninitialized in h264) | ||||
@@ -436,8 +437,7 @@ int score_sum=0; | |||||
int best_score=256*256*256*64; | int best_score=256*256*256*64; | ||||
int best_pred=0; | int best_pred=0; | ||||
const int mot_index= (mb_x + mb_y*mot_stride) * mot_step; | const int mot_index= (mb_x + mb_y*mot_stride) * mot_step; | ||||
int prev_x= s->current_picture.motion_val[0][mot_index][0]; | |||||
int prev_y= s->current_picture.motion_val[0][mot_index][1]; | |||||
int prev_x, prev_y, prev_ref; | |||||
if((mb_x^mb_y^pass)&1) continue; | if((mb_x^mb_y^pass)&1) continue; | ||||
@@ -535,10 +535,26 @@ skip_mean_and_median: | |||||
/* zero MV */ | /* zero MV */ | ||||
pred_count++; | pred_count++; | ||||
if (!fixed[mb_xy] && 0) { | |||||
if (s->avctx->codec_id == CODEC_ID_H264) { | |||||
// FIXME | |||||
} else { | |||||
ff_thread_await_progress((AVFrame *) s->last_picture_ptr, | |||||
mb_y, 0); | |||||
} | |||||
prev_x = s->last_picture.motion_val[0][mot_index][0]; | |||||
prev_y = s->last_picture.motion_val[0][mot_index][1]; | |||||
prev_ref = s->last_picture.ref_index[0][4*mb_xy]; | |||||
} else { | |||||
prev_x = s->current_picture.motion_val[0][mot_index][0]; | |||||
prev_y = s->current_picture.motion_val[0][mot_index][1]; | |||||
prev_ref = s->current_picture.ref_index[0][4*mb_xy]; | |||||
} | |||||
/* last MV */ | /* last MV */ | ||||
mv_predictor[pred_count][0]= s->current_picture.motion_val[0][mot_index][0]; | |||||
mv_predictor[pred_count][1]= s->current_picture.motion_val[0][mot_index][1]; | |||||
ref [pred_count] = s->current_picture.ref_index[0][4*mb_xy]; | |||||
mv_predictor[pred_count][0]= prev_x; | |||||
mv_predictor[pred_count][1]= prev_y; | |||||
ref [pred_count] = prev_ref; | |||||
pred_count++; | pred_count++; | ||||
s->mv_dir = MV_DIR_FORWARD; | s->mv_dir = MV_DIR_FORWARD; | ||||
@@ -670,6 +686,12 @@ static int is_intra_more_likely(MpegEncContext *s){ | |||||
uint8_t *mb_ptr = s->current_picture.data[0] + mb_x*16 + mb_y*16*s->linesize; | uint8_t *mb_ptr = s->current_picture.data[0] + mb_x*16 + mb_y*16*s->linesize; | ||||
uint8_t *last_mb_ptr= s->last_picture.data [0] + mb_x*16 + mb_y*16*s->linesize; | uint8_t *last_mb_ptr= s->last_picture.data [0] + mb_x*16 + mb_y*16*s->linesize; | ||||
if (s->avctx->codec_id == CODEC_ID_H264) { | |||||
// FIXME | |||||
} else { | |||||
ff_thread_await_progress((AVFrame *) s->last_picture_ptr, | |||||
mb_y, 0); | |||||
} | |||||
is_intra_likely += s->dsp.sad[0](NULL, last_mb_ptr, mb_ptr , s->linesize, 16); | is_intra_likely += s->dsp.sad[0](NULL, last_mb_ptr, mb_ptr , s->linesize, 16); | ||||
// FIXME need await_progress() here | // FIXME need await_progress() here | ||||
is_intra_likely -= s->dsp.sad[0](NULL, last_mb_ptr, last_mb_ptr+s->linesize*16, s->linesize, 16); | is_intra_likely -= s->dsp.sad[0](NULL, last_mb_ptr, last_mb_ptr+s->linesize*16, s->linesize, 16); | ||||
@@ -690,6 +712,7 @@ void ff_er_frame_start(MpegEncContext *s){ | |||||
memset(s->error_status_table, MV_ERROR|AC_ERROR|DC_ERROR|VP_START|AC_END|DC_END|MV_END, s->mb_stride*s->mb_height*sizeof(uint8_t)); | memset(s->error_status_table, MV_ERROR|AC_ERROR|DC_ERROR|VP_START|AC_END|DC_END|MV_END, s->mb_stride*s->mb_height*sizeof(uint8_t)); | ||||
s->error_count= 3*s->mb_num; | s->error_count= 3*s->mb_num; | ||||
s->error_occurred = 0; | |||||
} | } | ||||
/** | /** | ||||
@@ -729,7 +752,10 @@ void ff_er_add_slice(MpegEncContext *s, int startx, int starty, int endx, int en | |||||
s->error_count -= end_i - start_i + 1; | s->error_count -= end_i - start_i + 1; | ||||
} | } | ||||
if(status & (AC_ERROR|DC_ERROR|MV_ERROR)) s->error_count= INT_MAX; | |||||
if(status & (AC_ERROR|DC_ERROR|MV_ERROR)) { | |||||
s->error_occurred = 1; | |||||
s->error_count= INT_MAX; | |||||
} | |||||
if(mask == ~0x7F){ | if(mask == ~0x7F){ | ||||
memset(&s->error_status_table[start_xy], 0, (end_xy - start_xy) * sizeof(uint8_t)); | memset(&s->error_status_table[start_xy], 0, (end_xy - start_xy) * sizeof(uint8_t)); | ||||
@@ -1009,7 +1035,12 @@ void ff_er_frame_end(MpegEncContext *s){ | |||||
int time_pp= s->pp_time; | int time_pp= s->pp_time; | ||||
int time_pb= s->pb_time; | int time_pb= s->pb_time; | ||||
// FIXME await_progress here | |||||
if (s->avctx->codec_id == CODEC_ID_H264) { | |||||
//FIXME | |||||
} else { | |||||
ff_thread_await_progress((AVFrame *) s->next_picture_ptr, | |||||
mb_y, 0); | |||||
} | |||||
s->mv[0][0][0] = s->next_picture.motion_val[0][xy][0]*time_pb/time_pp; | s->mv[0][0][0] = s->next_picture.motion_val[0][xy][0]*time_pb/time_pp; | ||||
s->mv[0][0][1] = s->next_picture.motion_val[0][xy][1]*time_pb/time_pp; | s->mv[0][0][1] = s->next_picture.motion_val[0][xy][1]*time_pb/time_pp; | ||||
s->mv[1][0][0] = s->next_picture.motion_val[0][xy][0]*(time_pb - time_pp)/time_pp; | s->mv[1][0][0] = s->next_picture.motion_val[0][xy][0]*(time_pb - time_pp)/time_pp; | ||||
@@ -656,8 +656,11 @@ retry: | |||||
if(s->slice_height==0 || s->mb_x!=0 || (s->mb_y%s->slice_height)!=0 || get_bits_count(&s->gb) > s->gb.size_in_bits) | if(s->slice_height==0 || s->mb_x!=0 || (s->mb_y%s->slice_height)!=0 || get_bits_count(&s->gb) > s->gb.size_in_bits) | ||||
break; | break; | ||||
}else{ | }else{ | ||||
int prev_x=s->mb_x, prev_y=s->mb_y; | |||||
if(ff_h263_resync(s)<0) | if(ff_h263_resync(s)<0) | ||||
break; | break; | ||||
if (prev_y * s->mb_width + prev_x < s->mb_y * s->mb_width + s->mb_x) | |||||
s->error_occurred = 1; | |||||
} | } | ||||
if(s->msmpeg4_version<4 && s->h263_pred) | if(s->msmpeg4_version<4 && s->h263_pred) | ||||
@@ -246,6 +246,141 @@ static int ff_h264_decode_rbsp_trailing(H264Context *h, const uint8_t *src){ | |||||
return 0; | return 0; | ||||
} | } | ||||
static inline int get_lowest_part_list_y(H264Context *h, Picture *pic, int n, int height, | |||||
int y_offset, int list){ | |||||
int raw_my= h->mv_cache[list][ scan8[n] ][1]; | |||||
int filter_height= (raw_my&3) ? 2 : 0; | |||||
int full_my= (raw_my>>2) + y_offset; | |||||
int top = full_my - filter_height, bottom = full_my + height + filter_height; | |||||
return FFMAX(abs(top), bottom); | |||||
} | |||||
static inline void get_lowest_part_y(H264Context *h, int refs[2][48], int n, int height, | |||||
int y_offset, int list0, int list1, int *nrefs){ | |||||
MpegEncContext * const s = &h->s; | |||||
int my; | |||||
y_offset += 16*(s->mb_y >> MB_FIELD); | |||||
if(list0){ | |||||
int ref_n = h->ref_cache[0][ scan8[n] ]; | |||||
Picture *ref= &h->ref_list[0][ref_n]; | |||||
// Error resilience puts the current picture in the ref list. | |||||
// Don't try to wait on these as it will cause a deadlock. | |||||
// Fields can wait on each other, though. | |||||
if(ref->thread_opaque != s->current_picture.thread_opaque || | |||||
(ref->reference&3) != s->picture_structure) { | |||||
my = get_lowest_part_list_y(h, ref, n, height, y_offset, 0); | |||||
if (refs[0][ref_n] < 0) nrefs[0] += 1; | |||||
refs[0][ref_n] = FFMAX(refs[0][ref_n], my); | |||||
} | |||||
} | |||||
if(list1){ | |||||
int ref_n = h->ref_cache[1][ scan8[n] ]; | |||||
Picture *ref= &h->ref_list[1][ref_n]; | |||||
if(ref->thread_opaque != s->current_picture.thread_opaque || | |||||
(ref->reference&3) != s->picture_structure) { | |||||
my = get_lowest_part_list_y(h, ref, n, height, y_offset, 1); | |||||
if (refs[1][ref_n] < 0) nrefs[1] += 1; | |||||
refs[1][ref_n] = FFMAX(refs[1][ref_n], my); | |||||
} | |||||
} | |||||
} | |||||
/** | |||||
* Wait until all reference frames are available for MC operations. | |||||
* | |||||
* @param h the H264 context | |||||
*/ | |||||
static void await_references(H264Context *h){ | |||||
MpegEncContext * const s = &h->s; | |||||
const int mb_xy= h->mb_xy; | |||||
const int mb_type= s->current_picture.mb_type[mb_xy]; | |||||
int refs[2][48]; | |||||
int nrefs[2] = {0}; | |||||
int ref, list; | |||||
memset(refs, -1, sizeof(refs)); | |||||
if(IS_16X16(mb_type)){ | |||||
get_lowest_part_y(h, refs, 0, 16, 0, | |||||
IS_DIR(mb_type, 0, 0), IS_DIR(mb_type, 0, 1), nrefs); | |||||
}else if(IS_16X8(mb_type)){ | |||||
get_lowest_part_y(h, refs, 0, 8, 0, | |||||
IS_DIR(mb_type, 0, 0), IS_DIR(mb_type, 0, 1), nrefs); | |||||
get_lowest_part_y(h, refs, 8, 8, 8, | |||||
IS_DIR(mb_type, 1, 0), IS_DIR(mb_type, 1, 1), nrefs); | |||||
}else if(IS_8X16(mb_type)){ | |||||
get_lowest_part_y(h, refs, 0, 16, 0, | |||||
IS_DIR(mb_type, 0, 0), IS_DIR(mb_type, 0, 1), nrefs); | |||||
get_lowest_part_y(h, refs, 4, 16, 0, | |||||
IS_DIR(mb_type, 1, 0), IS_DIR(mb_type, 1, 1), nrefs); | |||||
}else{ | |||||
int i; | |||||
assert(IS_8X8(mb_type)); | |||||
for(i=0; i<4; i++){ | |||||
const int sub_mb_type= h->sub_mb_type[i]; | |||||
const int n= 4*i; | |||||
int y_offset= (i&2)<<2; | |||||
if(IS_SUB_8X8(sub_mb_type)){ | |||||
get_lowest_part_y(h, refs, n , 8, y_offset, | |||||
IS_DIR(sub_mb_type, 0, 0), IS_DIR(sub_mb_type, 0, 1), nrefs); | |||||
}else if(IS_SUB_8X4(sub_mb_type)){ | |||||
get_lowest_part_y(h, refs, n , 4, y_offset, | |||||
IS_DIR(sub_mb_type, 0, 0), IS_DIR(sub_mb_type, 0, 1), nrefs); | |||||
get_lowest_part_y(h, refs, n+2, 4, y_offset+4, | |||||
IS_DIR(sub_mb_type, 0, 0), IS_DIR(sub_mb_type, 0, 1), nrefs); | |||||
}else if(IS_SUB_4X8(sub_mb_type)){ | |||||
get_lowest_part_y(h, refs, n , 8, y_offset, | |||||
IS_DIR(sub_mb_type, 0, 0), IS_DIR(sub_mb_type, 0, 1), nrefs); | |||||
get_lowest_part_y(h, refs, n+1, 8, y_offset, | |||||
IS_DIR(sub_mb_type, 0, 0), IS_DIR(sub_mb_type, 0, 1), nrefs); | |||||
}else{ | |||||
int j; | |||||
assert(IS_SUB_4X4(sub_mb_type)); | |||||
for(j=0; j<4; j++){ | |||||
int sub_y_offset= y_offset + 2*(j&2); | |||||
get_lowest_part_y(h, refs, n+j, 4, sub_y_offset, | |||||
IS_DIR(sub_mb_type, 0, 0), IS_DIR(sub_mb_type, 0, 1), nrefs); | |||||
} | |||||
} | |||||
} | |||||
} | |||||
for(list=h->list_count-1; list>=0; list--){ | |||||
for(ref=0; ref<48 && nrefs[list]; ref++){ | |||||
int row = refs[list][ref]; | |||||
if(row >= 0){ | |||||
Picture *ref_pic = &h->ref_list[list][ref]; | |||||
int ref_field = ref_pic->reference - 1; | |||||
int ref_field_picture = ref_pic->field_picture; | |||||
int pic_height = 16*s->mb_height >> ref_field_picture; | |||||
row <<= MB_MBAFF; | |||||
nrefs[list]--; | |||||
if(!FIELD_PICTURE && ref_field_picture){ // frame referencing two fields | |||||
ff_thread_await_progress((AVFrame*)ref_pic, FFMIN((row >> 1) - !(row&1), pic_height-1), 1); | |||||
ff_thread_await_progress((AVFrame*)ref_pic, FFMIN((row >> 1) , pic_height-1), 0); | |||||
}else if(FIELD_PICTURE && !ref_field_picture){ // field referencing one field of a frame | |||||
ff_thread_await_progress((AVFrame*)ref_pic, FFMIN(row*2 + ref_field , pic_height-1), 0); | |||||
}else if(FIELD_PICTURE){ | |||||
ff_thread_await_progress((AVFrame*)ref_pic, FFMIN(row, pic_height-1), ref_field); | |||||
}else{ | |||||
ff_thread_await_progress((AVFrame*)ref_pic, FFMIN(row, pic_height-1), 0); | |||||
} | |||||
} | |||||
} | |||||
} | |||||
} | |||||
#if 0 | #if 0 | ||||
/** | /** | ||||
* DCT transforms the 16 dc values. | * DCT transforms the 16 dc values. | ||||
@@ -315,6 +450,7 @@ static void chroma_dc_dct_c(DCTELEM *block){ | |||||
static void free_tables(H264Context *h, int free_rbsp){ | static void free_tables(H264Context *h, int free_rbsp){ | ||||
int i; | int i; | ||||
H264Context *hx; | H264Context *hx; | ||||
av_freep(&h->intra4x4_pred_mode); | av_freep(&h->intra4x4_pred_mode); | ||||
av_freep(&h->chroma_pred_mode_table); | av_freep(&h->chroma_pred_mode_table); | ||||
av_freep(&h->cbp_table); | av_freep(&h->cbp_table); | ||||
@@ -611,6 +747,7 @@ av_cold int ff_h264_decode_init(AVCodecContext *avctx){ | |||||
return 0; | return 0; | ||||
} | } | ||||
#define IN_RANGE(a, b, size) (((a) >= (b)) && ((a) < ((b)+(size)))) | #define IN_RANGE(a, b, size) (((a) >= (b)) && ((a) < ((b)+(size)))) | ||||
static void copy_picture_range(Picture **to, Picture **from, int count, MpegEncContext *new_base, MpegEncContext *old_base) | static void copy_picture_range(Picture **to, Picture **from, int count, MpegEncContext *new_base, MpegEncContext *old_base) | ||||
{ | { | ||||
@@ -711,7 +848,8 @@ static int decode_update_thread_context(AVCodecContext *dst, const AVCodecContex | |||||
copy_fields(h, h1, poc_lsb, redundant_pic_count); | copy_fields(h, h1, poc_lsb, redundant_pic_count); | ||||
//reference lists | //reference lists | ||||
copy_fields(h, h1, ref_count, intra_gb); | |||||
copy_fields(h, h1, ref_count, list_count); | |||||
copy_fields(h, h1, ref_list, intra_gb); | |||||
copy_fields(h, h1, short_ref, cabac_init_idc); | copy_fields(h, h1, short_ref, cabac_init_idc); | ||||
copy_picture_range(h->short_ref, h1->short_ref, 32, s, s1); | copy_picture_range(h->short_ref, h1->short_ref, 32, s, s1); | ||||
@@ -738,6 +876,7 @@ int ff_h264_frame_start(H264Context *h){ | |||||
MpegEncContext * const s = &h->s; | MpegEncContext * const s = &h->s; | ||||
int i; | int i; | ||||
const int pixel_shift = h->pixel_shift; | const int pixel_shift = h->pixel_shift; | ||||
int thread_count = (s->avctx->active_thread_type & FF_THREAD_SLICE) ? s->avctx->thread_count : 1; | |||||
if(MPV_frame_start(s, s->avctx) < 0) | if(MPV_frame_start(s, s->avctx) < 0) | ||||
return -1; | return -1; | ||||
@@ -766,7 +905,7 @@ int ff_h264_frame_start(H264Context *h){ | |||||
/* can't be in alloc_tables because linesize isn't known there. | /* can't be in alloc_tables because linesize isn't known there. | ||||
* FIXME: redo bipred weight to not require extra buffer? */ | * FIXME: redo bipred weight to not require extra buffer? */ | ||||
for(i = 0; i < s->avctx->thread_count; i++) | |||||
for(i = 0; i < thread_count; i++) | |||||
if(h->thread_context[i] && !h->thread_context[i]->s.obmc_scratchpad) | if(h->thread_context[i] && !h->thread_context[i]->s.obmc_scratchpad) | ||||
h->thread_context[i]->s.obmc_scratchpad = av_malloc(16*2*s->linesize + 8*2*s->uvlinesize); | h->thread_context[i]->s.obmc_scratchpad = av_malloc(16*2*s->linesize + 8*2*s->uvlinesize); | ||||
@@ -2910,12 +3049,6 @@ static int decode_nal_units(H264Context *h, const uint8_t *buf, int buf_size){ | |||||
int nal_index; | int nal_index; | ||||
h->max_contexts = (HAVE_THREADS && (s->avctx->active_thread_type&FF_THREAD_SLICE)) ? avctx->thread_count : 1; | h->max_contexts = (HAVE_THREADS && (s->avctx->active_thread_type&FF_THREAD_SLICE)) ? avctx->thread_count : 1; | ||||
#if 0 | |||||
int i; | |||||
for(i=0; i<50; i++){ | |||||
av_log(NULL, AV_LOG_ERROR,"%02X ", buf[i]); | |||||
} | |||||
#endif | |||||
if(!(s->flags2 & CODEC_FLAG2_CHUNKS)){ | if(!(s->flags2 & CODEC_FLAG2_CHUNKS)){ | ||||
h->current_slice = 0; | h->current_slice = 0; | ||||
if (!s->first_field) | if (!s->first_field) | ||||
@@ -3491,8 +3624,7 @@ AVCodec ff_h264_decoder = { | |||||
ff_h264_decode_end, | ff_h264_decode_end, | ||||
decode_frame, | decode_frame, | ||||
/*CODEC_CAP_DRAW_HORIZ_BAND |*/ CODEC_CAP_DR1 | CODEC_CAP_DELAY | | /*CODEC_CAP_DRAW_HORIZ_BAND |*/ CODEC_CAP_DR1 | CODEC_CAP_DELAY | | ||||
CODEC_CAP_FRAME_THREADS | | |||||
CODEC_CAP_SLICE_THREADS, | |||||
CODEC_CAP_SLICE_THREADS | CODEC_CAP_FRAME_THREADS, | |||||
.flush= flush_dpb, | .flush= flush_dpb, | ||||
.long_name = NULL_IF_CONFIG_SMALL("H.264 / AVC / MPEG-4 AVC / MPEG-4 part 10"), | .long_name = NULL_IF_CONFIG_SMALL("H.264 / AVC / MPEG-4 AVC / MPEG-4 part 10"), | ||||
.init_thread_copy = ONLY_IF_THREADS_ENABLED(decode_init_thread_copy), | .init_thread_copy = ONLY_IF_THREADS_ENABLED(decode_init_thread_copy), | ||||
@@ -629,7 +629,7 @@ static int decode_band(IVI5DecContext *ctx, int plane_num, | |||||
FFSWAP(int16_t, band->rv_map->valtab[idx1], band->rv_map->valtab[idx2]); | FFSWAP(int16_t, band->rv_map->valtab[idx1], band->rv_map->valtab[idx2]); | ||||
} | } | ||||
#if IVI_DEBUG | |||||
#ifdef DEBUG | |||||
if (band->checksum_present) { | if (band->checksum_present) { | ||||
uint16_t chksum = ivi_calc_band_checksum(band); | uint16_t chksum = ivi_calc_band_checksum(band); | ||||
if (chksum != band->checksum) { | if (chksum != band->checksum) { | ||||
@@ -46,14 +46,6 @@ | |||||
#define PALETTE_COUNT 256 | #define PALETTE_COUNT 256 | ||||
/* debugging support */ | |||||
#define DEBUG_INTERPLAY 0 | |||||
#if DEBUG_INTERPLAY | |||||
#define debug_interplay(x,...) av_log(NULL, AV_LOG_DEBUG, x, __VA_ARGS__) | |||||
#else | |||||
static inline void debug_interplay(const char *format, ...) { } | |||||
#endif | |||||
typedef struct IpvideoContext { | typedef struct IpvideoContext { | ||||
AVCodecContext *avctx; | AVCodecContext *avctx; | ||||
@@ -141,7 +133,7 @@ static int ipvideo_decode_block_opcode_0x2(IpvideoContext *s) | |||||
y = 8 + ((B - 56) / 29); | y = 8 + ((B - 56) / 29); | ||||
} | } | ||||
debug_interplay (" motion byte = %d, (x, y) = (%d, %d)\n", B, x, y); | |||||
av_dlog(NULL, " motion byte = %d, (x, y) = (%d, %d)\n", B, x, y); | |||||
return copy_from(s, &s->second_last_frame, x, y); | return copy_from(s, &s->second_last_frame, x, y); | ||||
} | } | ||||
@@ -169,7 +161,7 @@ static int ipvideo_decode_block_opcode_0x3(IpvideoContext *s) | |||||
y = -( 8 + ((B - 56) / 29)); | y = -( 8 + ((B - 56) / 29)); | ||||
} | } | ||||
debug_interplay (" motion byte = %d, (x, y) = (%d, %d)\n", B, x, y); | |||||
av_dlog(NULL, " motion byte = %d, (x, y) = (%d, %d)\n", B, x, y); | |||||
return copy_from(s, &s->current_frame, x, y); | return copy_from(s, &s->current_frame, x, y); | ||||
} | } | ||||
@@ -192,7 +184,7 @@ static int ipvideo_decode_block_opcode_0x4(IpvideoContext *s) | |||||
x = -8 + BL; | x = -8 + BL; | ||||
y = -8 + BH; | y = -8 + BH; | ||||
debug_interplay (" motion byte = %d, (x, y) = (%d, %d)\n", B, x, y); | |||||
av_dlog(NULL, " motion byte = %d, (x, y) = (%d, %d)\n", B, x, y); | |||||
return copy_from(s, &s->last_frame, x, y); | return copy_from(s, &s->last_frame, x, y); | ||||
} | } | ||||
@@ -207,7 +199,7 @@ static int ipvideo_decode_block_opcode_0x5(IpvideoContext *s) | |||||
x = *s->stream_ptr++; | x = *s->stream_ptr++; | ||||
y = *s->stream_ptr++; | y = *s->stream_ptr++; | ||||
debug_interplay (" motion bytes = %d, %d\n", x, y); | |||||
av_dlog(NULL, " motion bytes = %d, %d\n", x, y); | |||||
return copy_from(s, &s->last_frame, x, y); | return copy_from(s, &s->last_frame, x, y); | ||||
} | } | ||||
@@ -588,7 +580,7 @@ static int ipvideo_decode_block_opcode_0x6_16(IpvideoContext *s) | |||||
x = *s->stream_ptr++; | x = *s->stream_ptr++; | ||||
y = *s->stream_ptr++; | y = *s->stream_ptr++; | ||||
debug_interplay (" motion bytes = %d, %d\n", x, y); | |||||
av_dlog(NULL, " motion bytes = %d, %d\n", x, y); | |||||
return copy_from(s, &s->second_last_frame, x, y); | return copy_from(s, &s->second_last_frame, x, y); | ||||
} | } | ||||
@@ -965,7 +957,7 @@ static void ipvideo_decode_opcodes(IpvideoContext *s) | |||||
static int frame = 0; | static int frame = 0; | ||||
GetBitContext gb; | GetBitContext gb; | ||||
debug_interplay("------------------ frame %d\n", frame); | |||||
av_dlog(NULL, "------------------ frame %d\n", frame); | |||||
frame++; | frame++; | ||||
if (!s->is_16bpp) { | if (!s->is_16bpp) { | ||||
@@ -991,8 +983,8 @@ static void ipvideo_decode_opcodes(IpvideoContext *s) | |||||
for (x = 0; x < s->avctx->width; x += 8) { | for (x = 0; x < s->avctx->width; x += 8) { | ||||
opcode = get_bits(&gb, 4); | opcode = get_bits(&gb, 4); | ||||
debug_interplay(" block @ (%3d, %3d): encoding 0x%X, data ptr @ %p\n", | |||||
x, y, opcode, s->stream_ptr); | |||||
av_dlog(NULL, " block @ (%3d, %3d): encoding 0x%X, data ptr @ %p\n", | |||||
x, y, opcode, s->stream_ptr); | |||||
if (!s->is_16bpp) { | if (!s->is_16bpp) { | ||||
s->pixel_ptr = s->current_frame.data[0] + x | s->pixel_ptr = s->current_frame.data[0] + x | ||||
@@ -152,7 +152,7 @@ int ff_h263_decode_mba(MpegEncContext *s) | |||||
*/ | */ | ||||
static int h263_decode_gob_header(MpegEncContext *s) | static int h263_decode_gob_header(MpegEncContext *s) | ||||
{ | { | ||||
unsigned int val, gfid, gob_number; | |||||
unsigned int val, gob_number; | |||||
int left; | int left; | ||||
/* Check for GOB Start Code */ | /* Check for GOB Start Code */ | ||||
@@ -183,12 +183,12 @@ static int h263_decode_gob_header(MpegEncContext *s) | |||||
s->qscale = get_bits(&s->gb, 5); /* SQUANT */ | s->qscale = get_bits(&s->gb, 5); /* SQUANT */ | ||||
if(get_bits1(&s->gb)==0) | if(get_bits1(&s->gb)==0) | ||||
return -1; | return -1; | ||||
gfid = get_bits(&s->gb, 2); /* GFID */ | |||||
skip_bits(&s->gb, 2); /* GFID */ | |||||
}else{ | }else{ | ||||
gob_number = get_bits(&s->gb, 5); /* GN */ | gob_number = get_bits(&s->gb, 5); /* GN */ | ||||
s->mb_x= 0; | s->mb_x= 0; | ||||
s->mb_y= s->gob_index* gob_number; | s->mb_y= s->gob_index* gob_number; | ||||
gfid = get_bits(&s->gb, 2); /* GFID */ | |||||
skip_bits(&s->gb, 2); /* GFID */ | |||||
s->qscale = get_bits(&s->gb, 5); /* GQUANT */ | s->qscale = get_bits(&s->gb, 5); /* GQUANT */ | ||||
} | } | ||||
@@ -418,8 +418,8 @@ int ff_ivi_decode_blocks(GetBitContext *gb, IVIBandDesc *band, IVITile *tile) | |||||
break; | break; | ||||
pos = band->scan[scan_pos]; | pos = band->scan[scan_pos]; | ||||
if (IVI_DEBUG && !val) | |||||
av_log(NULL, AV_LOG_ERROR, "Val = 0 encountered!\n"); | |||||
if (!val) | |||||
av_dlog(NULL, "Val = 0 encountered!\n"); | |||||
q = (base_tab[pos] * quant) >> 9; | q = (base_tab[pos] * quant) >> 9; | ||||
if (q > 1) | if (q > 1) | ||||
@@ -563,7 +563,7 @@ void ff_ivi_process_empty_tile(AVCodecContext *avctx, IVIBandDesc *band, | |||||
} | } | ||||
#if IVI_DEBUG | |||||
#ifdef DEBUG | |||||
uint16_t ivi_calc_band_checksum (IVIBandDesc *band) | uint16_t ivi_calc_band_checksum (IVIBandDesc *band) | ||||
{ | { | ||||
int x, y; | int x, y; | ||||
@@ -33,8 +33,6 @@ | |||||
#include "get_bits.h" | #include "get_bits.h" | ||||
#include <stdint.h> | #include <stdint.h> | ||||
#define IVI_DEBUG 0 | |||||
#define IVI_VLC_BITS 13 ///< max number of bits of the ivi's huffman codes | #define IVI_VLC_BITS 13 ///< max number of bits of the ivi's huffman codes | ||||
/** | /** | ||||
@@ -340,7 +338,6 @@ void ff_ivi_process_empty_tile(AVCodecContext *avctx, IVIBandDesc *band, | |||||
*/ | */ | ||||
void ff_ivi_output_plane(IVIPlaneDesc *plane, uint8_t *dst, int dst_pitch); | void ff_ivi_output_plane(IVIPlaneDesc *plane, uint8_t *dst, int dst_pitch); | ||||
#if IVI_DEBUG | |||||
/** | /** | ||||
* Calculate band checksum from band data. | * Calculate band checksum from band data. | ||||
*/ | */ | ||||
@@ -350,6 +347,5 @@ uint16_t ivi_calc_band_checksum (IVIBandDesc *band); | |||||
* Verify that band data lies in range. | * Verify that band data lies in range. | ||||
*/ | */ | ||||
int ivi_check_band (IVIBandDesc *band, const uint8_t *ref, int pitch); | int ivi_check_band (IVIBandDesc *band, const uint8_t *ref, int pitch); | ||||
#endif | |||||
#endif /* AVCODEC_IVI_COMMON_H */ | #endif /* AVCODEC_IVI_COMMON_H */ |
@@ -1670,7 +1670,7 @@ static int mpeg_field_start(MpegEncContext *s, const uint8_t *buf, int buf_size) | |||||
*s->current_picture_ptr->pan_scan= s1->pan_scan; | *s->current_picture_ptr->pan_scan= s1->pan_scan; | ||||
if (HAVE_PTHREADS && avctx->active_thread_type&FF_THREAD_FRAME) | |||||
if (HAVE_PTHREADS && (avctx->active_thread_type & FF_THREAD_FRAME)) | |||||
ff_thread_finish_setup(avctx); | ff_thread_finish_setup(avctx); | ||||
}else{ //second field | }else{ //second field | ||||
int i; | int i; | ||||
@@ -2004,7 +2004,7 @@ static int slice_end(AVCodecContext *avctx, AVFrame *pict) | |||||
*pict= *(AVFrame*)s->current_picture_ptr; | *pict= *(AVFrame*)s->current_picture_ptr; | ||||
ff_print_debug_info(s, pict); | ff_print_debug_info(s, pict); | ||||
} else { | } else { | ||||
if (avctx->active_thread_type&FF_THREAD_FRAME) | |||||
if (avctx->active_thread_type & FF_THREAD_FRAME) | |||||
s->picture_number++; | s->picture_number++; | ||||
/* latency of 1 frame for I- and P-frames */ | /* latency of 1 frame for I- and P-frames */ | ||||
/* XXX: use another variable than picture_number */ | /* XXX: use another variable than picture_number */ | ||||
@@ -2179,14 +2179,13 @@ static void mpeg_decode_gop(AVCodecContext *avctx, | |||||
Mpeg1Context *s1 = avctx->priv_data; | Mpeg1Context *s1 = avctx->priv_data; | ||||
MpegEncContext *s = &s1->mpeg_enc_ctx; | MpegEncContext *s = &s1->mpeg_enc_ctx; | ||||
int drop_frame_flag; | |||||
int time_code_hours, time_code_minutes; | int time_code_hours, time_code_minutes; | ||||
int time_code_seconds, time_code_pictures; | int time_code_seconds, time_code_pictures; | ||||
int broken_link; | int broken_link; | ||||
init_get_bits(&s->gb, buf, buf_size*8); | init_get_bits(&s->gb, buf, buf_size*8); | ||||
drop_frame_flag = get_bits1(&s->gb); | |||||
skip_bits1(&s->gb); /* drop_frame_flag */ | |||||
time_code_hours=get_bits(&s->gb,5); | time_code_hours=get_bits(&s->gb,5); | ||||
time_code_minutes = get_bits(&s->gb,6); | time_code_minutes = get_bits(&s->gb,6); | ||||
@@ -2340,7 +2339,7 @@ static int decode_chunks(AVCodecContext *avctx, | |||||
buf_ptr = ff_find_start_code(buf_ptr,buf_end, &start_code); | buf_ptr = ff_find_start_code(buf_ptr,buf_end, &start_code); | ||||
if (start_code > 0x1ff){ | if (start_code > 0x1ff){ | ||||
if(s2->pict_type != AV_PICTURE_TYPE_B || avctx->skip_frame <= AVDISCARD_DEFAULT){ | if(s2->pict_type != AV_PICTURE_TYPE_B || avctx->skip_frame <= AVDISCARD_DEFAULT){ | ||||
if(HAVE_THREADS && avctx->active_thread_type&FF_THREAD_SLICE){ | |||||
if(HAVE_THREADS && (avctx->active_thread_type & FF_THREAD_SLICE)){ | |||||
int i; | int i; | ||||
assert(avctx->thread_count > 1); | assert(avctx->thread_count > 1); | ||||
@@ -2509,7 +2508,7 @@ static int decode_chunks(AVCodecContext *avctx, | |||||
break; | break; | ||||
} | } | ||||
if(HAVE_THREADS && avctx->active_thread_type&FF_THREAD_SLICE){ | |||||
if(HAVE_THREADS && (avctx->active_thread_type & FF_THREAD_SLICE)){ | |||||
int threshold= (s2->mb_height*s->slice_count + avctx->thread_count/2) / avctx->thread_count; | int threshold= (s2->mb_height*s->slice_count + avctx->thread_count/2) / avctx->thread_count; | ||||
assert(avctx->thread_count > 1); | assert(avctx->thread_count > 1); | ||||
if(threshold <= mb_y){ | if(threshold <= mb_y){ | ||||
@@ -761,10 +761,9 @@ void ff_mpeg1_encode_init(MpegEncContext *s) | |||||
if(mv==0) len= ff_mpeg12_mbMotionVectorTable[0][1]; | if(mv==0) len= ff_mpeg12_mbMotionVectorTable[0][1]; | ||||
else{ | else{ | ||||
int val, bit_size, range, code; | |||||
int val, bit_size, code; | |||||
bit_size = f_code - 1; | bit_size = f_code - 1; | ||||
range = 1 << bit_size; | |||||
val=mv; | val=mv; | ||||
if (val < 0) | if (val < 0) | ||||
@@ -397,14 +397,13 @@ int mpeg4_decode_video_packet_header(MpegEncContext *s) | |||||
header_extension= get_bits1(&s->gb); | header_extension= get_bits1(&s->gb); | ||||
} | } | ||||
if(header_extension){ | if(header_extension){ | ||||
int time_increment; | |||||
int time_incr=0; | int time_incr=0; | ||||
while (get_bits1(&s->gb) != 0) | while (get_bits1(&s->gb) != 0) | ||||
time_incr++; | time_incr++; | ||||
check_marker(&s->gb, "before time_increment in video packed header"); | check_marker(&s->gb, "before time_increment in video packed header"); | ||||
time_increment= get_bits(&s->gb, s->time_increment_bits); | |||||
skip_bits(&s->gb, s->time_increment_bits); /* time_increment */ | |||||
check_marker(&s->gb, "before vop_coding_type in video packed header"); | check_marker(&s->gb, "before vop_coding_type in video packed header"); | ||||
skip_bits(&s->gb, 2); /* vop coding type */ | skip_bits(&s->gb, 2); /* vop coding type */ | ||||
@@ -1805,16 +1804,14 @@ no_cplx_est: | |||||
if (s->scalability) { | if (s->scalability) { | ||||
GetBitContext bak= *gb; | GetBitContext bak= *gb; | ||||
int ref_layer_id; | |||||
int ref_layer_sampling_dir; | |||||
int h_sampling_factor_n; | int h_sampling_factor_n; | ||||
int h_sampling_factor_m; | int h_sampling_factor_m; | ||||
int v_sampling_factor_n; | int v_sampling_factor_n; | ||||
int v_sampling_factor_m; | int v_sampling_factor_m; | ||||
s->hierachy_type= get_bits1(gb); | s->hierachy_type= get_bits1(gb); | ||||
ref_layer_id= get_bits(gb, 4); | |||||
ref_layer_sampling_dir= get_bits1(gb); | |||||
skip_bits(gb, 4); /* ref_layer_id */ | |||||
skip_bits1(gb); /* ref_layer_sampling_dir */ | |||||
h_sampling_factor_n= get_bits(gb, 5); | h_sampling_factor_n= get_bits(gb, 5); | ||||
h_sampling_factor_m= get_bits(gb, 5); | h_sampling_factor_m= get_bits(gb, 5); | ||||
v_sampling_factor_n= get_bits(gb, 5); | v_sampling_factor_n= get_bits(gb, 5); | ||||
@@ -1993,15 +1990,13 @@ static int decode_vop_header(MpegEncContext *s, GetBitContext *gb){ | |||||
if (s->shape != RECT_SHAPE) { | if (s->shape != RECT_SHAPE) { | ||||
if (s->vol_sprite_usage != 1 || s->pict_type != AV_PICTURE_TYPE_I) { | if (s->vol_sprite_usage != 1 || s->pict_type != AV_PICTURE_TYPE_I) { | ||||
int width, height, hor_spat_ref, ver_spat_ref; | |||||
width = get_bits(gb, 13); | |||||
skip_bits(gb, 13); /* width */ | |||||
skip_bits1(gb); /* marker */ | skip_bits1(gb); /* marker */ | ||||
height = get_bits(gb, 13); | |||||
skip_bits(gb, 13); /* height */ | |||||
skip_bits1(gb); /* marker */ | skip_bits1(gb); /* marker */ | ||||
hor_spat_ref = get_bits(gb, 13); /* hor_spat_ref */ | |||||
skip_bits(gb, 13); /* hor_spat_ref */ | |||||
skip_bits1(gb); /* marker */ | skip_bits1(gb); /* marker */ | ||||
ver_spat_ref = get_bits(gb, 13); /* ver_spat_ref */ | |||||
skip_bits(gb, 13); /* ver_spat_ref */ | |||||
} | } | ||||
skip_bits1(gb); /* change_CR_disable */ | skip_bits1(gb); /* change_CR_disable */ | ||||
@@ -527,7 +527,7 @@ int ff_mpeg_update_thread_context(AVCodecContext *dst, const AVCodecContext *src | |||||
s->last_pict_type= s1->pict_type; | s->last_pict_type= s1->pict_type; | ||||
if (s1->current_picture_ptr) s->last_lambda_for[s1->pict_type] = s1->current_picture_ptr->quality; | if (s1->current_picture_ptr) s->last_lambda_for[s1->pict_type] = s1->current_picture_ptr->quality; | ||||
if(s1->pict_type!=AV_PICTURE_TYPE_B){ | |||||
if(s1->pict_type!=FF_B_TYPE){ | |||||
s->last_non_b_pict_type= s1->pict_type; | s->last_non_b_pict_type= s1->pict_type; | ||||
} | } | ||||
} | } | ||||
@@ -586,7 +586,8 @@ av_cold int MPV_common_init(MpegEncContext *s) | |||||
return -1; | return -1; | ||||
} | } | ||||
if(s->avctx->active_thread_type&FF_THREAD_SLICE && | |||||
if((s->avctx->active_thread_type & FF_THREAD_SLICE) && | |||||
(s->avctx->thread_count > MAX_THREADS || (s->avctx->thread_count > s->mb_height && s->mb_height))){ | (s->avctx->thread_count > MAX_THREADS || (s->avctx->thread_count > s->mb_height && s->mb_height))){ | ||||
av_log(s->avctx, AV_LOG_ERROR, "too many threads\n"); | av_log(s->avctx, AV_LOG_ERROR, "too many threads\n"); | ||||
return -1; | return -1; | ||||
@@ -763,6 +764,7 @@ av_cold int MPV_common_init(MpegEncContext *s) | |||||
if(init_duplicate_context(s, s) < 0) goto fail; | if(init_duplicate_context(s, s) < 0) goto fail; | ||||
s->start_mb_y = 0; | s->start_mb_y = 0; | ||||
s->end_mb_y = s->mb_height; | s->end_mb_y = s->mb_height; | ||||
} | } | ||||
return 0; | return 0; | ||||
@@ -2634,6 +2636,6 @@ void ff_set_qscale(MpegEncContext * s, int qscale) | |||||
void MPV_report_decode_progress(MpegEncContext *s) | void MPV_report_decode_progress(MpegEncContext *s) | ||||
{ | { | ||||
if (s->pict_type != AV_PICTURE_TYPE_B && !s->partitioned_frame) | |||||
if (s->pict_type != FF_B_TYPE && !s->partitioned_frame && !s->error_occurred) | |||||
ff_thread_report_progress((AVFrame*)s->current_picture_ptr, s->mb_y, 0); | ff_thread_report_progress((AVFrame*)s->current_picture_ptr, s->mb_y, 0); | ||||
} | } |
@@ -474,7 +474,7 @@ typedef struct MpegEncContext { | |||||
int last_bits; ///< temp var used for calculating the above vars | int last_bits; ///< temp var used for calculating the above vars | ||||
/* error concealment / resync */ | /* error concealment / resync */ | ||||
int error_count; | |||||
int error_count, error_occurred; | |||||
uint8_t *error_status_table; ///< table of the error status of each MB | uint8_t *error_status_table; ///< table of the error status of each MB | ||||
#define VP_START 1 ///< current MB is the first after a resync marker | #define VP_START 1 ///< current MB is the first after a resync marker | ||||
#define AC_ERROR 2 | #define AC_ERROR 2 | ||||
@@ -2759,7 +2759,7 @@ static int encode_picture(MpegEncContext *s, int picture_number) | |||||
{ | { | ||||
int i; | int i; | ||||
int bits; | int bits; | ||||
int context_count = s->avctx->active_thread_type&FF_THREAD_SLICE ? s->avctx->thread_count : 1; | |||||
int context_count = (s->avctx->active_thread_type & FF_THREAD_SLICE) ? s->avctx->thread_count : 1; | |||||
s->picture_number = picture_number; | s->picture_number = picture_number; | ||||
@@ -32,7 +32,7 @@ static void mpegvideo_extract_headers(AVCodecParserContext *s, | |||||
uint32_t start_code; | uint32_t start_code; | ||||
int frame_rate_index, ext_type, bytes_left; | int frame_rate_index, ext_type, bytes_left; | ||||
int frame_rate_ext_n, frame_rate_ext_d; | int frame_rate_ext_n, frame_rate_ext_d; | ||||
int picture_structure, top_field_first, repeat_first_field, progressive_frame; | |||||
int top_field_first, repeat_first_field, progressive_frame; | |||||
int horiz_size_ext, vert_size_ext, bit_rate_ext; | int horiz_size_ext, vert_size_ext, bit_rate_ext; | ||||
int did_set_size=0; | int did_set_size=0; | ||||
//FIXME replace the crap with get_bits() | //FIXME replace the crap with get_bits() | ||||
@@ -91,7 +91,6 @@ static void mpegvideo_extract_headers(AVCodecParserContext *s, | |||||
break; | break; | ||||
case 0x8: /* picture coding extension */ | case 0x8: /* picture coding extension */ | ||||
if (bytes_left >= 5) { | if (bytes_left >= 5) { | ||||
picture_structure = buf[2]&3; | |||||
top_field_first = buf[3] & (1 << 7); | top_field_first = buf[3] & (1 << 7); | ||||
repeat_first_field = buf[3] & (1 << 1); | repeat_first_field = buf[3] & (1 << 1); | ||||
progressive_frame = buf[4] & (1 << 7); | progressive_frame = buf[4] & (1 << 7); | ||||
@@ -1528,9 +1528,7 @@ int msmpeg4_decode_ext_header(MpegEncContext * s, int buf_size) | |||||
/* the alt_bitstream reader could read over the end so we need to check it */ | /* the alt_bitstream reader could read over the end so we need to check it */ | ||||
if(left>=length && left<length+8) | if(left>=length && left<length+8) | ||||
{ | { | ||||
int fps; | |||||
fps= get_bits(&s->gb, 5); | |||||
skip_bits(&s->gb, 5); /* fps */ | |||||
s->bit_rate= get_bits(&s->gb, 11)*1024; | s->bit_rate= get_bits(&s->gb, 11)*1024; | ||||
if(s->msmpeg4_version>=3) | if(s->msmpeg4_version>=3) | ||||
s->flipflop_rounding= get_bits1(&s->gb); | s->flipflop_rounding= get_bits1(&s->gb); | ||||
@@ -379,7 +379,7 @@ static int decode_frame(AVCodecContext *avctx, | |||||
AVFrame *p; | AVFrame *p; | ||||
uint8_t *crow_buf_base = NULL; | uint8_t *crow_buf_base = NULL; | ||||
uint32_t tag, length; | uint32_t tag, length; | ||||
int ret, crc; | |||||
int ret; | |||||
FFSWAP(AVFrame *, s->current_picture, s->last_picture); | FFSWAP(AVFrame *, s->current_picture, s->last_picture); | ||||
avctx->coded_frame= s->current_picture; | avctx->coded_frame= s->current_picture; | ||||
@@ -433,7 +433,7 @@ static int decode_frame(AVCodecContext *avctx, | |||||
s->compression_type = *s->bytestream++; | s->compression_type = *s->bytestream++; | ||||
s->filter_type = *s->bytestream++; | s->filter_type = *s->bytestream++; | ||||
s->interlace_type = *s->bytestream++; | s->interlace_type = *s->bytestream++; | ||||
crc = bytestream_get_be32(&s->bytestream); | |||||
s->bytestream += 4; /* crc */ | |||||
s->state |= PNG_IHDR; | s->state |= PNG_IHDR; | ||||
av_dlog(avctx, "width=%d height=%d depth=%d color_type=%d compression_type=%d filter_type=%d interlace_type=%d\n", | av_dlog(avctx, "width=%d height=%d depth=%d color_type=%d compression_type=%d filter_type=%d interlace_type=%d\n", | ||||
s->width, s->height, s->bit_depth, s->color_type, | s->width, s->height, s->bit_depth, s->color_type, | ||||
@@ -528,8 +528,7 @@ static int decode_frame(AVCodecContext *avctx, | |||||
s->state |= PNG_IDAT; | s->state |= PNG_IDAT; | ||||
if (png_decode_idat(s, length) < 0) | if (png_decode_idat(s, length) < 0) | ||||
goto fail; | goto fail; | ||||
/* skip crc */ | |||||
crc = bytestream_get_be32(&s->bytestream); | |||||
s->bytestream += 4; /* crc */ | |||||
break; | break; | ||||
case MKTAG('P', 'L', 'T', 'E'): | case MKTAG('P', 'L', 'T', 'E'): | ||||
{ | { | ||||
@@ -549,7 +548,7 @@ static int decode_frame(AVCodecContext *avctx, | |||||
s->palette[i] = (0xff << 24); | s->palette[i] = (0xff << 24); | ||||
} | } | ||||
s->state |= PNG_PLTE; | s->state |= PNG_PLTE; | ||||
crc = bytestream_get_be32(&s->bytestream); | |||||
s->bytestream += 4; /* crc */ | |||||
} | } | ||||
break; | break; | ||||
case MKTAG('t', 'R', 'N', 'S'): | case MKTAG('t', 'R', 'N', 'S'): | ||||
@@ -565,13 +564,13 @@ static int decode_frame(AVCodecContext *avctx, | |||||
v = *s->bytestream++; | v = *s->bytestream++; | ||||
s->palette[i] = (s->palette[i] & 0x00ffffff) | (v << 24); | s->palette[i] = (s->palette[i] & 0x00ffffff) | (v << 24); | ||||
} | } | ||||
crc = bytestream_get_be32(&s->bytestream); | |||||
s->bytestream += 4; /* crc */ | |||||
} | } | ||||
break; | break; | ||||
case MKTAG('I', 'E', 'N', 'D'): | case MKTAG('I', 'E', 'N', 'D'): | ||||
if (!(s->state & PNG_ALLIMAGE)) | if (!(s->state & PNG_ALLIMAGE)) | ||||
goto fail; | goto fail; | ||||
crc = bytestream_get_be32(&s->bytestream); | |||||
s->bytestream += 4; /* crc */ | |||||
goto exit_loop; | goto exit_loop; | ||||
default: | default: | ||||
/* skip tag */ | /* skip tag */ | ||||
@@ -55,7 +55,7 @@ typedef struct ThreadContext { | |||||
} ThreadContext; | } ThreadContext; | ||||
/// Max number of frame buffers that can be allocated when using frame threads. | /// Max number of frame buffers that can be allocated when using frame threads. | ||||
#define MAX_BUFFERS 33 | |||||
#define MAX_BUFFERS (32+1) | |||||
/** | /** | ||||
* Context used by codec threads and stored in their AVCodecContext thread_opaque. | * Context used by codec threads and stored in their AVCodecContext thread_opaque. | ||||
@@ -235,7 +235,7 @@ int rv_decode_dc(MpegEncContext *s, int n) | |||||
/* read RV 1.0 compatible frame header */ | /* read RV 1.0 compatible frame header */ | ||||
static int rv10_decode_picture_header(MpegEncContext *s) | static int rv10_decode_picture_header(MpegEncContext *s) | ||||
{ | { | ||||
int mb_count, pb_frame, marker, unk av_unused, mb_xy; | |||||
int mb_count, pb_frame, marker, mb_xy; | |||||
marker = get_bits1(&s->gb); | marker = get_bits1(&s->gb); | ||||
@@ -282,7 +282,7 @@ static int rv10_decode_picture_header(MpegEncContext *s) | |||||
s->mb_y = 0; | s->mb_y = 0; | ||||
mb_count = s->mb_width * s->mb_height; | mb_count = s->mb_width * s->mb_height; | ||||
} | } | ||||
unk= get_bits(&s->gb, 3); /* ignored */ | |||||
skip_bits(&s->gb, 3); /* ignored */ | |||||
s->f_code = 1; | s->f_code = 1; | ||||
s->unrestricted_mv = 1; | s->unrestricted_mv = 1; | ||||
@@ -29,7 +29,7 @@ static int s302m_parse_frame_header(AVCodecContext *avctx, const uint8_t *buf, | |||||
int buf_size) | int buf_size) | ||||
{ | { | ||||
uint32_t h; | uint32_t h; | ||||
int frame_size, channels, id av_unused, bits; | |||||
int frame_size, channels, bits; | |||||
if (buf_size <= AES3_HEADER_LEN) { | if (buf_size <= AES3_HEADER_LEN) { | ||||
av_log(avctx, AV_LOG_ERROR, "frame is too short\n"); | av_log(avctx, AV_LOG_ERROR, "frame is too short\n"); | ||||
@@ -48,7 +48,6 @@ static int s302m_parse_frame_header(AVCodecContext *avctx, const uint8_t *buf, | |||||
h = AV_RB32(buf); | h = AV_RB32(buf); | ||||
frame_size = (h >> 16) & 0xffff; | frame_size = (h >> 16) & 0xffff; | ||||
channels = ((h >> 14) & 0x0003) * 2 + 2; | channels = ((h >> 14) & 0x0003) * 2 + 2; | ||||
id = (h >> 6) & 0x00ff; | |||||
bits = ((h >> 4) & 0x0003) * 4 + 16; | bits = ((h >> 4) & 0x0003) * 4 + 16; | ||||
if (AES3_HEADER_LEN + frame_size != buf_size || bits > 24) { | if (AES3_HEADER_LEN + frame_size != buf_size || bits > 24) { | ||||
@@ -196,7 +196,6 @@ static int decode_wave_header(AVCodecContext *avctx, uint8_t *header, int header | |||||
{ | { | ||||
GetBitContext hb; | GetBitContext hb; | ||||
int len; | int len; | ||||
int chunk_size av_unused; | |||||
short wave_format; | short wave_format; | ||||
init_get_bits(&hb, header, header_size*8); | init_get_bits(&hb, header, header_size*8); | ||||
@@ -205,7 +204,7 @@ static int decode_wave_header(AVCodecContext *avctx, uint8_t *header, int header | |||||
return -1; | return -1; | ||||
} | } | ||||
chunk_size = get_le32(&hb); | |||||
skip_bits_long(&hb, 32); /* chunk_size */ | |||||
if (get_le32(&hb) != MKTAG('W','A','V','E')) { | if (get_le32(&hb) != MKTAG('W','A','V','E')) { | ||||
av_log(avctx, AV_LOG_ERROR, "missing WAVE tag\n"); | av_log(avctx, AV_LOG_ERROR, "missing WAVE tag\n"); | ||||
@@ -33,7 +33,6 @@ | |||||
*/ | */ | ||||
//#define DEBUG_SVQ1 | |||||
#include "avcodec.h" | #include "avcodec.h" | ||||
#include "dsputil.h" | #include "dsputil.h" | ||||
#include "mpegvideo.h" | #include "mpegvideo.h" | ||||
@@ -238,9 +237,9 @@ static int svq1_decode_block_intra (GetBitContext *bitbuf, uint8_t *pixels, int | |||||
} | } | ||||
if ((stages > 0) && (level >= 4)) { | if ((stages > 0) && (level >= 4)) { | ||||
#ifdef DEBUG_SVQ1 | |||||
av_log(s->avctx, AV_LOG_INFO, "Error (svq1_decode_block_intra): invalid vector: stages=%i level=%i\n",stages,level); | |||||
#endif | |||||
av_dlog(NULL, | |||||
"Error (svq1_decode_block_intra): invalid vector: stages=%i level=%i\n", | |||||
stages, level); | |||||
return -1; /* invalid vector */ | return -1; /* invalid vector */ | ||||
} | } | ||||
@@ -288,9 +287,9 @@ static int svq1_decode_block_non_intra (GetBitContext *bitbuf, uint8_t *pixels, | |||||
if (stages == -1) continue; /* skip vector */ | if (stages == -1) continue; /* skip vector */ | ||||
if ((stages > 0) && (level >= 4)) { | if ((stages > 0) && (level >= 4)) { | ||||
#ifdef DEBUG_SVQ1 | |||||
av_log(s->avctx, AV_LOG_INFO, "Error (svq1_decode_block_non_intra): invalid vector: stages=%i level=%i\n",stages,level); | |||||
#endif | |||||
av_dlog(NULL, | |||||
"Error (svq1_decode_block_non_intra): invalid vector: stages=%i level=%i\n", | |||||
stages, level); | |||||
return -1; /* invalid vector */ | return -1; /* invalid vector */ | ||||
} | } | ||||
@@ -499,9 +498,7 @@ static int svq1_decode_delta_block (MpegEncContext *s, GetBitContext *bitbuf, | |||||
if (result != 0) | if (result != 0) | ||||
{ | { | ||||
#ifdef DEBUG_SVQ1 | |||||
av_log(s->avctx, AV_LOG_INFO, "Error in svq1_motion_inter_block %i\n",result); | |||||
#endif | |||||
av_dlog(s->avctx, "Error in svq1_motion_inter_block %i\n", result); | |||||
break; | break; | ||||
} | } | ||||
result = svq1_decode_block_non_intra (bitbuf, current, pitch); | result = svq1_decode_block_non_intra (bitbuf, current, pitch); | ||||
@@ -512,9 +509,7 @@ static int svq1_decode_delta_block (MpegEncContext *s, GetBitContext *bitbuf, | |||||
if (result != 0) | if (result != 0) | ||||
{ | { | ||||
#ifdef DEBUG_SVQ1 | |||||
av_log(s->avctx, AV_LOG_INFO, "Error in svq1_motion_inter_4v_block %i\n",result); | |||||
#endif | |||||
av_dlog(s->avctx, "Error in svq1_motion_inter_4v_block %i\n", result); | |||||
break; | break; | ||||
} | } | ||||
result = svq1_decode_block_non_intra (bitbuf, current, pitch); | result = svq1_decode_block_non_intra (bitbuf, current, pitch); | ||||
@@ -554,9 +549,8 @@ static void svq1_parse_string (GetBitContext *bitbuf, uint8_t *out) { | |||||
static int svq1_decode_frame_header (GetBitContext *bitbuf,MpegEncContext *s) { | static int svq1_decode_frame_header (GetBitContext *bitbuf,MpegEncContext *s) { | ||||
int frame_size_code; | int frame_size_code; | ||||
int temporal_reference; | |||||
temporal_reference = get_bits (bitbuf, 8); | |||||
skip_bits(bitbuf, 8); /* temporal_reference */ | |||||
/* frame type */ | /* frame type */ | ||||
s->pict_type= get_bits (bitbuf, 2)+1; | s->pict_type= get_bits (bitbuf, 2)+1; | ||||
@@ -661,9 +655,7 @@ static int svq1_decode_frame(AVCodecContext *avctx, | |||||
if (result != 0) | if (result != 0) | ||||
{ | { | ||||
#ifdef DEBUG_SVQ1 | |||||
av_log(s->avctx, AV_LOG_INFO, "Error in svq1_decode_frame_header %i\n",result); | |||||
#endif | |||||
av_dlog(s->avctx, "Error in svq1_decode_frame_header %i\n",result); | |||||
return result; | return result; | ||||
} | } | ||||
@@ -712,9 +704,7 @@ static int svq1_decode_frame(AVCodecContext *avctx, | |||||
result = svq1_decode_block_intra (&s->gb, ¤t[x], linesize); | result = svq1_decode_block_intra (&s->gb, ¤t[x], linesize); | ||||
if (result != 0) | if (result != 0) | ||||
{ | { | ||||
//#ifdef DEBUG_SVQ1 | |||||
av_log(s->avctx, AV_LOG_INFO, "Error in svq1_decode_block %i (keyframe)\n",result); | av_log(s->avctx, AV_LOG_INFO, "Error in svq1_decode_block %i (keyframe)\n",result); | ||||
//#endif | |||||
goto err; | goto err; | ||||
} | } | ||||
} | } | ||||
@@ -730,9 +720,7 @@ static int svq1_decode_frame(AVCodecContext *avctx, | |||||
linesize, pmv, x, y); | linesize, pmv, x, y); | ||||
if (result != 0) | if (result != 0) | ||||
{ | { | ||||
#ifdef DEBUG_SVQ1 | |||||
av_log(s->avctx, AV_LOG_INFO, "Error in svq1_decode_delta_block %i\n",result); | |||||
#endif | |||||
av_dlog(s->avctx, "Error in svq1_decode_delta_block %i\n",result); | |||||
goto err; | goto err; | ||||
} | } | ||||
} | } | ||||
@@ -108,18 +108,18 @@ static int decode_frame(AVCodecContext *avctx, | |||||
AVFrame * const p= (AVFrame*)&s->picture; | AVFrame * const p= (AVFrame*)&s->picture; | ||||
uint8_t *dst; | uint8_t *dst; | ||||
int stride; | int stride; | ||||
int idlen, pal, compr, x, y, w, h, bpp, flags; | |||||
int idlen, compr, y, w, h, bpp, flags; | |||||
int first_clr, colors, csize; | int first_clr, colors, csize; | ||||
/* parse image header */ | /* parse image header */ | ||||
CHECK_BUFFER_SIZE(buf, buf_end, 18, "header"); | CHECK_BUFFER_SIZE(buf, buf_end, 18, "header"); | ||||
idlen = *buf++; | idlen = *buf++; | ||||
pal = *buf++; | |||||
buf++; /* pal */ | |||||
compr = *buf++; | compr = *buf++; | ||||
first_clr = AV_RL16(buf); buf += 2; | first_clr = AV_RL16(buf); buf += 2; | ||||
colors = AV_RL16(buf); buf += 2; | colors = AV_RL16(buf); buf += 2; | ||||
csize = *buf++; | csize = *buf++; | ||||
x = AV_RL16(buf); buf += 2; | |||||
buf += 2; /* x */ | |||||
y = AV_RL16(buf); buf += 2; | y = AV_RL16(buf); buf += 2; | ||||
w = AV_RL16(buf); buf += 2; | w = AV_RL16(buf); buf += 2; | ||||
h = AV_RL16(buf); buf += 2; | h = AV_RL16(buf); buf += 2; | ||||
@@ -210,6 +210,7 @@ static int decode_frame(AVCodecContext *avctx, | |||||
CHECK_BUFFER_SIZE(buf, buf_end, img_size, "image data"); | CHECK_BUFFER_SIZE(buf, buf_end, img_size, "image data"); | ||||
for(y = 0; y < s->height; y++){ | for(y = 0; y < s->height; y++){ | ||||
#if HAVE_BIGENDIAN | #if HAVE_BIGENDIAN | ||||
int x; | |||||
if((s->bpp + 1) >> 3 == 2){ | if((s->bpp + 1) >> 3 == 2){ | ||||
uint16_t *dst16 = (uint16_t*)dst; | uint16_t *dst16 = (uint16_t*)dst; | ||||
for(x = 0; x < s->width; x++) | for(x = 0; x < s->width; x++) | ||||
@@ -211,19 +211,6 @@ static inline int tm2_read_header(TM2Context *ctx, const uint8_t *buf) | |||||
/* av_log (ctx->avctx, AV_LOG_ERROR, "TM2 old header: not implemented (yet)\n"); */ | /* av_log (ctx->avctx, AV_LOG_ERROR, "TM2 old header: not implemented (yet)\n"); */ | ||||
return 40; | return 40; | ||||
} else if(magic == 0x00000101) { /* new header */ | } else if(magic == 0x00000101) { /* new header */ | ||||
av_unused int w, h, size, flags, xr, yr, length; | |||||
length = AV_RL32(buf); | |||||
buf += 4; | |||||
init_get_bits(&ctx->gb, buf, 32 * 8); | |||||
size = get_bits_long(&ctx->gb, 31); | |||||
h = get_bits(&ctx->gb, 15); | |||||
w = get_bits(&ctx->gb, 15); | |||||
flags = get_bits_long(&ctx->gb, 31); | |||||
yr = get_bits(&ctx->gb, 9); | |||||
xr = get_bits(&ctx->gb, 9); | |||||
return 40; | return 40; | ||||
} else { | } else { | ||||
av_log (ctx->avctx, AV_LOG_ERROR, "Not a TM2 header: 0x%08X\n", magic); | av_log (ctx->avctx, AV_LOG_ERROR, "Not a TM2 header: 0x%08X\n", magic); | ||||
@@ -75,7 +75,6 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPac | |||||
int buf_size = avpkt->size; | int buf_size = avpkt->size; | ||||
CamtasiaContext * const c = avctx->priv_data; | CamtasiaContext * const c = avctx->priv_data; | ||||
const unsigned char *encoded = buf; | const unsigned char *encoded = buf; | ||||
unsigned char *outptr; | |||||
int zret; // Zlib return code | int zret; // Zlib return code | ||||
int len = buf_size; | int len = buf_size; | ||||
@@ -89,8 +88,6 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPac | |||||
return -1; | return -1; | ||||
} | } | ||||
outptr = c->pic.data[0]; // Output image pointer | |||||
zret = inflateReset(&(c->zstream)); | zret = inflateReset(&(c->zstream)); | ||||
if (zret != Z_OK) { | if (zret != Z_OK) { | ||||
av_log(avctx, AV_LOG_ERROR, "Inflate reset error: %d\n", zret); | av_log(avctx, AV_LOG_ERROR, "Inflate reset error: %d\n", zret); | ||||
@@ -115,7 +115,7 @@ typedef struct InternalBuffer{ | |||||
enum PixelFormat pix_fmt; | enum PixelFormat pix_fmt; | ||||
}InternalBuffer; | }InternalBuffer; | ||||
#define INTERNAL_BUFFER_SIZE 33 | |||||
#define INTERNAL_BUFFER_SIZE (32+1) | |||||
void avcodec_align_dimensions2(AVCodecContext *s, int *width, int *height, int linesize_align[4]){ | void avcodec_align_dimensions2(AVCodecContext *s, int *width, int *height, int linesize_align[4]){ | ||||
int w_align= 1; | int w_align= 1; | ||||
@@ -20,9 +20,6 @@ | |||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA | ||||
*/ | */ | ||||
#undef V_DEBUG | |||||
//#define V_DEBUG | |||||
#define ALT_BITSTREAM_READER_LE | #define ALT_BITSTREAM_READER_LE | ||||
#include "avcodec.h" | #include "avcodec.h" | ||||
#include "get_bits.h" | #include "get_bits.h" | ||||
@@ -57,7 +54,7 @@ int ff_vorbis_len2vlc(uint8_t *bits, uint32_t *codes, unsigned num) | |||||
unsigned i, j, p, code; | unsigned i, j, p, code; | ||||
#ifdef V_DEBUG | |||||
#ifdef DEBUG | |||||
GetBitContext gb; | GetBitContext gb; | ||||
#endif | #endif | ||||
@@ -74,7 +71,7 @@ int ff_vorbis_len2vlc(uint8_t *bits, uint32_t *codes, unsigned num) | |||||
for (i = 0; i < bits[p]; ++i) | for (i = 0; i < bits[p]; ++i) | ||||
exit_at_level[i+1] = 1 << i; | exit_at_level[i+1] = 1 << i; | ||||
#ifdef V_DEBUG | |||||
#ifdef DEBUG | |||||
av_log(NULL, AV_LOG_INFO, " %u. of %u code len %d code %d - ", p, num, bits[p], codes[p]); | av_log(NULL, AV_LOG_INFO, " %u. of %u code len %d code %d - ", p, num, bits[p], codes[p]); | ||||
init_get_bits(&gb, (uint8_t *)&codes[p], bits[p]); | init_get_bits(&gb, (uint8_t *)&codes[p], bits[p]); | ||||
for (i = 0; i < bits[p]; ++i) | for (i = 0; i < bits[p]; ++i) | ||||
@@ -102,7 +99,7 @@ int ff_vorbis_len2vlc(uint8_t *bits, uint32_t *codes, unsigned num) | |||||
exit_at_level[j] = code + (1 << (j - 1)); | exit_at_level[j] = code + (1 << (j - 1)); | ||||
codes[p] = code; | codes[p] = code; | ||||
#ifdef V_DEBUG | |||||
#ifdef DEBUG | |||||
av_log(NULL, AV_LOG_INFO, " %d. code len %d code %d - ", p, bits[p], codes[p]); | av_log(NULL, AV_LOG_INFO, " %d. code len %d code %d - ", p, bits[p], codes[p]); | ||||
init_get_bits(&gb, (uint8_t *)&codes[p], bits[p]); | init_get_bits(&gb, (uint8_t *)&codes[p], bits[p]); | ||||
for (i = 0; i < bits[p]; ++i) | for (i = 0; i < bits[p]; ++i) | ||||
@@ -20,10 +20,6 @@ | |||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA | ||||
*/ | */ | ||||
#undef V_DEBUG | |||||
//#define V_DEBUG | |||||
//#define AV_DEBUG(...) av_log(NULL, AV_LOG_INFO, __VA_ARGS__) | |||||
#include <math.h> | #include <math.h> | ||||
#define ALT_BITSTREAM_READER_LE | #define ALT_BITSTREAM_READER_LE | ||||
@@ -41,10 +37,6 @@ | |||||
#define V_MAX_VLCS (1 << 16) | #define V_MAX_VLCS (1 << 16) | ||||
#define V_MAX_PARTITIONS (1 << 20) | #define V_MAX_PARTITIONS (1 << 20) | ||||
#ifndef V_DEBUG | |||||
#define AV_DEBUG(...) | |||||
#endif | |||||
#undef NDEBUG | #undef NDEBUG | ||||
#include <assert.h> | #include <assert.h> | ||||
@@ -245,7 +237,7 @@ static int vorbis_parse_setup_hdr_codebooks(vorbis_context *vc) | |||||
vc->codebook_count = get_bits(gb, 8) + 1; | vc->codebook_count = get_bits(gb, 8) + 1; | ||||
AV_DEBUG(" Codebooks: %d \n", vc->codebook_count); | |||||
av_dlog(NULL, " Codebooks: %d \n", vc->codebook_count); | |||||
vc->codebooks = av_mallocz(vc->codebook_count * sizeof(*vc->codebooks)); | vc->codebooks = av_mallocz(vc->codebook_count * sizeof(*vc->codebooks)); | ||||
tmp_vlc_bits = av_mallocz(V_MAX_VLCS * sizeof(*tmp_vlc_bits)); | tmp_vlc_bits = av_mallocz(V_MAX_VLCS * sizeof(*tmp_vlc_bits)); | ||||
@@ -256,7 +248,7 @@ static int vorbis_parse_setup_hdr_codebooks(vorbis_context *vc) | |||||
vorbis_codebook *codebook_setup = &vc->codebooks[cb]; | vorbis_codebook *codebook_setup = &vc->codebooks[cb]; | ||||
unsigned ordered, t, entries, used_entries = 0; | unsigned ordered, t, entries, used_entries = 0; | ||||
AV_DEBUG(" %u. Codebook\n", cb); | |||||
av_dlog(NULL, " %u. Codebook\n", cb); | |||||
if (get_bits(gb, 24) != 0x564342) { | if (get_bits(gb, 24) != 0x564342) { | ||||
av_log(vc->avccontext, AV_LOG_ERROR, | av_log(vc->avccontext, AV_LOG_ERROR, | ||||
@@ -281,17 +273,17 @@ static int vorbis_parse_setup_hdr_codebooks(vorbis_context *vc) | |||||
ordered = get_bits1(gb); | ordered = get_bits1(gb); | ||||
AV_DEBUG(" codebook_dimensions %d, codebook_entries %u\n", | |||||
codebook_setup->dimensions, entries); | |||||
av_dlog(NULL, " codebook_dimensions %d, codebook_entries %u\n", | |||||
codebook_setup->dimensions, entries); | |||||
if (!ordered) { | if (!ordered) { | ||||
unsigned ce, flag; | unsigned ce, flag; | ||||
unsigned sparse = get_bits1(gb); | unsigned sparse = get_bits1(gb); | ||||
AV_DEBUG(" not ordered \n"); | |||||
av_dlog(NULL, " not ordered \n"); | |||||
if (sparse) { | if (sparse) { | ||||
AV_DEBUG(" sparse \n"); | |||||
av_dlog(NULL, " sparse \n"); | |||||
used_entries = 0; | used_entries = 0; | ||||
for (ce = 0; ce < entries; ++ce) { | for (ce = 0; ce < entries; ++ce) { | ||||
@@ -303,7 +295,7 @@ static int vorbis_parse_setup_hdr_codebooks(vorbis_context *vc) | |||||
tmp_vlc_bits[ce] = 0; | tmp_vlc_bits[ce] = 0; | ||||
} | } | ||||
} else { | } else { | ||||
AV_DEBUG(" not sparse \n"); | |||||
av_dlog(NULL, " not sparse \n"); | |||||
used_entries = entries; | used_entries = entries; | ||||
for (ce = 0; ce < entries; ++ce) | for (ce = 0; ce < entries; ++ce) | ||||
@@ -313,17 +305,17 @@ static int vorbis_parse_setup_hdr_codebooks(vorbis_context *vc) | |||||
unsigned current_entry = 0; | unsigned current_entry = 0; | ||||
unsigned current_length = get_bits(gb, 5) + 1; | unsigned current_length = get_bits(gb, 5) + 1; | ||||
AV_DEBUG(" ordered, current length: %u\n", current_length); //FIXME | |||||
av_dlog(NULL, " ordered, current length: %u\n", current_length); //FIXME | |||||
used_entries = entries; | used_entries = entries; | ||||
for (; current_entry < used_entries && current_length <= 32; ++current_length) { | for (; current_entry < used_entries && current_length <= 32; ++current_length) { | ||||
unsigned i, number; | unsigned i, number; | ||||
AV_DEBUG(" number bits: %u ", ilog(entries - current_entry)); | |||||
av_dlog(NULL, " number bits: %u ", ilog(entries - current_entry)); | |||||
number = get_bits(gb, ilog(entries - current_entry)); | number = get_bits(gb, ilog(entries - current_entry)); | ||||
AV_DEBUG(" number: %u\n", number); | |||||
av_dlog(NULL, " number: %u\n", number); | |||||
for (i = current_entry; i < number+current_entry; ++i) | for (i = current_entry; i < number+current_entry; ++i) | ||||
if (i < used_entries) | if (i < used_entries) | ||||
@@ -339,7 +331,8 @@ static int vorbis_parse_setup_hdr_codebooks(vorbis_context *vc) | |||||
codebook_setup->lookup_type = get_bits(gb, 4); | codebook_setup->lookup_type = get_bits(gb, 4); | ||||
AV_DEBUG(" lookup type: %d : %s \n", codebook_setup->lookup_type, codebook_setup->lookup_type ? "vq" : "no lookup"); | |||||
av_dlog(NULL, " lookup type: %d : %s \n", codebook_setup->lookup_type, | |||||
codebook_setup->lookup_type ? "vq" : "no lookup"); | |||||
// If the codebook is used for (inverse) VQ, calculate codevectors. | // If the codebook is used for (inverse) VQ, calculate codevectors. | ||||
@@ -352,14 +345,17 @@ static int vorbis_parse_setup_hdr_codebooks(vorbis_context *vc) | |||||
unsigned codebook_value_bits = get_bits(gb, 4) + 1; | unsigned codebook_value_bits = get_bits(gb, 4) + 1; | ||||
unsigned codebook_sequence_p = get_bits1(gb); | unsigned codebook_sequence_p = get_bits1(gb); | ||||
AV_DEBUG(" We expect %d numbers for building the codevectors. \n", codebook_lookup_values); | |||||
AV_DEBUG(" delta %f minmum %f \n", codebook_delta_value, codebook_minimum_value); | |||||
av_dlog(NULL, " We expect %d numbers for building the codevectors. \n", | |||||
codebook_lookup_values); | |||||
av_dlog(NULL, " delta %f minmum %f \n", | |||||
codebook_delta_value, codebook_minimum_value); | |||||
for (i = 0; i < codebook_lookup_values; ++i) { | for (i = 0; i < codebook_lookup_values; ++i) { | ||||
codebook_multiplicands[i] = get_bits(gb, codebook_value_bits); | codebook_multiplicands[i] = get_bits(gb, codebook_value_bits); | ||||
AV_DEBUG(" multiplicands*delta+minmum : %e \n", (float)codebook_multiplicands[i]*codebook_delta_value+codebook_minimum_value); | |||||
AV_DEBUG(" multiplicand %u\n", codebook_multiplicands[i]); | |||||
av_dlog(NULL, " multiplicands*delta+minmum : %e \n", | |||||
(float)codebook_multiplicands[i] * codebook_delta_value + codebook_minimum_value); | |||||
av_dlog(NULL, " multiplicand %u\n", codebook_multiplicands[i]); | |||||
} | } | ||||
// Weed out unused vlcs and build codevector vector | // Weed out unused vlcs and build codevector vector | ||||
@@ -374,9 +370,7 @@ static int vorbis_parse_setup_hdr_codebooks(vorbis_context *vc) | |||||
float last = 0.0; | float last = 0.0; | ||||
unsigned lookup_offset = i; | unsigned lookup_offset = i; | ||||
#ifdef V_DEBUG | |||||
av_log(vc->avccontext, AV_LOG_INFO, "Lookup offset %u ,", i); | |||||
#endif | |||||
av_dlog(vc->avccontext, "Lookup offset %u ,", i); | |||||
for (k = 0; k < dim; ++k) { | for (k = 0; k < dim; ++k) { | ||||
unsigned multiplicand_offset = lookup_offset % codebook_lookup_values; | unsigned multiplicand_offset = lookup_offset % codebook_lookup_values; | ||||
@@ -387,12 +381,11 @@ static int vorbis_parse_setup_hdr_codebooks(vorbis_context *vc) | |||||
} | } | ||||
tmp_vlc_bits[j] = tmp_vlc_bits[i]; | tmp_vlc_bits[j] = tmp_vlc_bits[i]; | ||||
#ifdef V_DEBUG | |||||
av_log(vc->avccontext, AV_LOG_INFO, "real lookup offset %u, vector: ", j); | |||||
av_dlog(vc->avccontext, "real lookup offset %u, vector: ", j); | |||||
for (k = 0; k < dim; ++k) | for (k = 0; k < dim; ++k) | ||||
av_log(vc->avccontext, AV_LOG_INFO, " %f ", codebook_setup->codevectors[j * dim + k]); | |||||
av_log(vc->avccontext, AV_LOG_INFO, "\n"); | |||||
#endif | |||||
av_dlog(vc->avccontext, " %f ", | |||||
codebook_setup->codevectors[j * dim + k]); | |||||
av_dlog(vc->avccontext, "\n"); | |||||
++j; | ++j; | ||||
} | } | ||||
@@ -453,8 +446,8 @@ static int vorbis_parse_setup_hdr_tdtransforms(vorbis_context *vc) | |||||
for (i = 0; i < vorbis_time_count; ++i) { | for (i = 0; i < vorbis_time_count; ++i) { | ||||
unsigned vorbis_tdtransform = get_bits(gb, 16); | unsigned vorbis_tdtransform = get_bits(gb, 16); | ||||
AV_DEBUG(" Vorbis time domain transform %u: %u\n", | |||||
vorbis_time_count, vorbis_tdtransform); | |||||
av_dlog(NULL, " Vorbis time domain transform %u: %u\n", | |||||
vorbis_time_count, vorbis_tdtransform); | |||||
if (vorbis_tdtransform) { | if (vorbis_tdtransform) { | ||||
av_log(vc->avccontext, AV_LOG_ERROR, "Vorbis time domain transform data nonzero. \n"); | av_log(vc->avccontext, AV_LOG_ERROR, "Vorbis time domain transform data nonzero. \n"); | ||||
@@ -485,7 +478,7 @@ static int vorbis_parse_setup_hdr_floors(vorbis_context *vc) | |||||
floor_setup->floor_type = get_bits(gb, 16); | floor_setup->floor_type = get_bits(gb, 16); | ||||
AV_DEBUG(" %d. floor type %d \n", i, floor_setup->floor_type); | |||||
av_dlog(NULL, " %d. floor type %d \n", i, floor_setup->floor_type); | |||||
if (floor_setup->floor_type == 1) { | if (floor_setup->floor_type == 1) { | ||||
int maximum_class = -1; | int maximum_class = -1; | ||||
@@ -495,29 +488,33 @@ static int vorbis_parse_setup_hdr_floors(vorbis_context *vc) | |||||
floor_setup->data.t1.partitions = get_bits(gb, 5); | floor_setup->data.t1.partitions = get_bits(gb, 5); | ||||
AV_DEBUG(" %d.floor: %d partitions \n", i, floor_setup->data.t1.partitions); | |||||
av_dlog(NULL, " %d.floor: %d partitions \n", | |||||
i, floor_setup->data.t1.partitions); | |||||
for (j = 0; j < floor_setup->data.t1.partitions; ++j) { | for (j = 0; j < floor_setup->data.t1.partitions; ++j) { | ||||
floor_setup->data.t1.partition_class[j] = get_bits(gb, 4); | floor_setup->data.t1.partition_class[j] = get_bits(gb, 4); | ||||
if (floor_setup->data.t1.partition_class[j] > maximum_class) | if (floor_setup->data.t1.partition_class[j] > maximum_class) | ||||
maximum_class = floor_setup->data.t1.partition_class[j]; | maximum_class = floor_setup->data.t1.partition_class[j]; | ||||
AV_DEBUG(" %d. floor %d partition class %d \n", i, j, floor_setup->data.t1.partition_class[j]); | |||||
av_dlog(NULL, " %d. floor %d partition class %d \n", | |||||
i, j, floor_setup->data.t1.partition_class[j]); | |||||
} | } | ||||
AV_DEBUG(" maximum class %d \n", maximum_class); | |||||
av_dlog(NULL, " maximum class %d \n", maximum_class); | |||||
for (j = 0; j <= maximum_class; ++j) { | for (j = 0; j <= maximum_class; ++j) { | ||||
floor_setup->data.t1.class_dimensions[j] = get_bits(gb, 3) + 1; | floor_setup->data.t1.class_dimensions[j] = get_bits(gb, 3) + 1; | ||||
floor_setup->data.t1.class_subclasses[j] = get_bits(gb, 2); | floor_setup->data.t1.class_subclasses[j] = get_bits(gb, 2); | ||||
AV_DEBUG(" %d floor %d class dim: %d subclasses %d \n", i, j, floor_setup->data.t1.class_dimensions[j], floor_setup->data.t1.class_subclasses[j]); | |||||
av_dlog(NULL, " %d floor %d class dim: %d subclasses %d \n", i, j, | |||||
floor_setup->data.t1.class_dimensions[j], | |||||
floor_setup->data.t1.class_subclasses[j]); | |||||
if (floor_setup->data.t1.class_subclasses[j]) { | if (floor_setup->data.t1.class_subclasses[j]) { | ||||
GET_VALIDATED_INDEX(floor_setup->data.t1.class_masterbook[j], 8, vc->codebook_count) | GET_VALIDATED_INDEX(floor_setup->data.t1.class_masterbook[j], 8, vc->codebook_count) | ||||
AV_DEBUG(" masterbook: %d \n", floor_setup->data.t1.class_masterbook[j]); | |||||
av_dlog(NULL, " masterbook: %d \n", floor_setup->data.t1.class_masterbook[j]); | |||||
} | } | ||||
for (k = 0; k < (1 << floor_setup->data.t1.class_subclasses[j]); ++k) { | for (k = 0; k < (1 << floor_setup->data.t1.class_subclasses[j]); ++k) { | ||||
@@ -526,7 +523,7 @@ static int vorbis_parse_setup_hdr_floors(vorbis_context *vc) | |||||
VALIDATE_INDEX(bits, vc->codebook_count) | VALIDATE_INDEX(bits, vc->codebook_count) | ||||
floor_setup->data.t1.subclass_books[j][k] = bits; | floor_setup->data.t1.subclass_books[j][k] = bits; | ||||
AV_DEBUG(" book %d. : %d \n", k, floor_setup->data.t1.subclass_books[j][k]); | |||||
av_dlog(NULL, " book %d. : %d \n", k, floor_setup->data.t1.subclass_books[j][k]); | |||||
} | } | ||||
} | } | ||||
@@ -555,8 +552,8 @@ static int vorbis_parse_setup_hdr_floors(vorbis_context *vc) | |||||
for (k = 0; k < floor_setup->data.t1.class_dimensions[floor_setup->data.t1.partition_class[j]]; ++k, ++floor1_values) { | for (k = 0; k < floor_setup->data.t1.class_dimensions[floor_setup->data.t1.partition_class[j]]; ++k, ++floor1_values) { | ||||
floor_setup->data.t1.list[floor1_values].x = get_bits(gb, rangebits); | floor_setup->data.t1.list[floor1_values].x = get_bits(gb, rangebits); | ||||
AV_DEBUG(" %u. floor1 Y coord. %d\n", floor1_values, | |||||
floor_setup->data.t1.list[floor1_values].x); | |||||
av_dlog(NULL, " %u. floor1 Y coord. %d\n", floor1_values, | |||||
floor_setup->data.t1.list[floor1_values].x); | |||||
} | } | ||||
} | } | ||||
@@ -608,28 +605,26 @@ static int vorbis_parse_setup_hdr_floors(vorbis_context *vc) | |||||
if (!floor_setup->data.t0.lsp) | if (!floor_setup->data.t0.lsp) | ||||
return -1; | return -1; | ||||
#ifdef V_DEBUG /* debug output parsed headers */ | |||||
AV_DEBUG("floor0 order: %u\n", floor_setup->data.t0.order); | |||||
AV_DEBUG("floor0 rate: %u\n", floor_setup->data.t0.rate); | |||||
AV_DEBUG("floor0 bark map size: %u\n", | |||||
floor_setup->data.t0.bark_map_size); | |||||
AV_DEBUG("floor0 amplitude bits: %u\n", | |||||
floor_setup->data.t0.amplitude_bits); | |||||
AV_DEBUG("floor0 amplitude offset: %u\n", | |||||
floor_setup->data.t0.amplitude_offset); | |||||
AV_DEBUG("floor0 number of books: %u\n", | |||||
floor_setup->data.t0.num_books); | |||||
AV_DEBUG("floor0 book list pointer: %p\n", | |||||
floor_setup->data.t0.book_list); | |||||
/* debug output parsed headers */ | |||||
av_dlog(NULL, "floor0 order: %u\n", floor_setup->data.t0.order); | |||||
av_dlog(NULL, "floor0 rate: %u\n", floor_setup->data.t0.rate); | |||||
av_dlog(NULL, "floor0 bark map size: %u\n", | |||||
floor_setup->data.t0.bark_map_size); | |||||
av_dlog(NULL, "floor0 amplitude bits: %u\n", | |||||
floor_setup->data.t0.amplitude_bits); | |||||
av_dlog(NULL, "floor0 amplitude offset: %u\n", | |||||
floor_setup->data.t0.amplitude_offset); | |||||
av_dlog(NULL, "floor0 number of books: %u\n", | |||||
floor_setup->data.t0.num_books); | |||||
av_dlog(NULL, "floor0 book list pointer: %p\n", | |||||
floor_setup->data.t0.book_list); | |||||
{ | { | ||||
int idx; | int idx; | ||||
for (idx = 0; idx < floor_setup->data.t0.num_books; ++idx) { | for (idx = 0; idx < floor_setup->data.t0.num_books; ++idx) { | ||||
AV_DEBUG(" Book %d: %u\n", | |||||
idx+1, | |||||
floor_setup->data.t0.book_list[idx]); | |||||
av_dlog(NULL, " Book %d: %u\n", idx + 1, | |||||
floor_setup->data.t0.book_list[idx]); | |||||
} | } | ||||
} | } | ||||
#endif | |||||
} else { | } else { | ||||
av_log(vc->avccontext, AV_LOG_ERROR, "Invalid floor type!\n"); | av_log(vc->avccontext, AV_LOG_ERROR, "Invalid floor type!\n"); | ||||
return -1; | return -1; | ||||
@@ -648,7 +643,7 @@ static int vorbis_parse_setup_hdr_residues(vorbis_context *vc) | |||||
vc->residue_count = get_bits(gb, 6)+1; | vc->residue_count = get_bits(gb, 6)+1; | ||||
vc->residues = av_mallocz(vc->residue_count * sizeof(*vc->residues)); | vc->residues = av_mallocz(vc->residue_count * sizeof(*vc->residues)); | ||||
AV_DEBUG(" There are %d residues. \n", vc->residue_count); | |||||
av_dlog(NULL, " There are %d residues. \n", vc->residue_count); | |||||
for (i = 0; i < vc->residue_count; ++i) { | for (i = 0; i < vc->residue_count; ++i) { | ||||
vorbis_residue *res_setup = &vc->residues[i]; | vorbis_residue *res_setup = &vc->residues[i]; | ||||
@@ -657,7 +652,7 @@ static int vorbis_parse_setup_hdr_residues(vorbis_context *vc) | |||||
res_setup->type = get_bits(gb, 16); | res_setup->type = get_bits(gb, 16); | ||||
AV_DEBUG(" %u. residue type %d\n", i, res_setup->type); | |||||
av_dlog(NULL, " %u. residue type %d\n", i, res_setup->type); | |||||
res_setup->begin = get_bits(gb, 24); | res_setup->begin = get_bits(gb, 24); | ||||
res_setup->end = get_bits(gb, 24); | res_setup->end = get_bits(gb, 24); | ||||
@@ -684,8 +679,9 @@ static int vorbis_parse_setup_hdr_residues(vorbis_context *vc) | |||||
if (!res_setup->classifs) | if (!res_setup->classifs) | ||||
return AVERROR(ENOMEM); | return AVERROR(ENOMEM); | ||||
AV_DEBUG(" begin %d end %d part.size %d classif.s %d classbook %d \n", res_setup->begin, res_setup->end, res_setup->partition_size, | |||||
res_setup->classifications, res_setup->classbook); | |||||
av_dlog(NULL, " begin %d end %d part.size %d classif.s %d classbook %d \n", | |||||
res_setup->begin, res_setup->end, res_setup->partition_size, | |||||
res_setup->classifications, res_setup->classbook); | |||||
for (j = 0; j < res_setup->classifications; ++j) { | for (j = 0; j < res_setup->classifications; ++j) { | ||||
high_bits = 0; | high_bits = 0; | ||||
@@ -694,7 +690,7 @@ static int vorbis_parse_setup_hdr_residues(vorbis_context *vc) | |||||
high_bits = get_bits(gb, 5); | high_bits = get_bits(gb, 5); | ||||
cascade[j] = (high_bits << 3) + low_bits; | cascade[j] = (high_bits << 3) + low_bits; | ||||
AV_DEBUG(" %u class cascade depth: %d\n", j, ilog(cascade[j])); | |||||
av_dlog(NULL, " %u class cascade depth: %d\n", j, ilog(cascade[j])); | |||||
} | } | ||||
res_setup->maxpass = 0; | res_setup->maxpass = 0; | ||||
@@ -703,8 +699,8 @@ static int vorbis_parse_setup_hdr_residues(vorbis_context *vc) | |||||
if (cascade[j]&(1 << k)) { | if (cascade[j]&(1 << k)) { | ||||
GET_VALIDATED_INDEX(res_setup->books[j][k], 8, vc->codebook_count) | GET_VALIDATED_INDEX(res_setup->books[j][k], 8, vc->codebook_count) | ||||
AV_DEBUG(" %u class cascade depth %u book: %d\n", | |||||
j, k, res_setup->books[j][k]); | |||||
av_dlog(NULL, " %u class cascade depth %u book: %d\n", | |||||
j, k, res_setup->books[j][k]); | |||||
if (k>res_setup->maxpass) | if (k>res_setup->maxpass) | ||||
res_setup->maxpass = k; | res_setup->maxpass = k; | ||||
@@ -727,7 +723,7 @@ static int vorbis_parse_setup_hdr_mappings(vorbis_context *vc) | |||||
vc->mapping_count = get_bits(gb, 6)+1; | vc->mapping_count = get_bits(gb, 6)+1; | ||||
vc->mappings = av_mallocz(vc->mapping_count * sizeof(*vc->mappings)); | vc->mappings = av_mallocz(vc->mapping_count * sizeof(*vc->mappings)); | ||||
AV_DEBUG(" There are %d mappings. \n", vc->mapping_count); | |||||
av_dlog(NULL, " There are %d mappings. \n", vc->mapping_count); | |||||
for (i = 0; i < vc->mapping_count; ++i) { | for (i = 0; i < vc->mapping_count; ++i) { | ||||
vorbis_mapping *mapping_setup = &vc->mappings[i]; | vorbis_mapping *mapping_setup = &vc->mappings[i]; | ||||
@@ -756,8 +752,8 @@ static int vorbis_parse_setup_hdr_mappings(vorbis_context *vc) | |||||
mapping_setup->coupling_steps = 0; | mapping_setup->coupling_steps = 0; | ||||
} | } | ||||
AV_DEBUG(" %u mapping coupling steps: %d\n", | |||||
i, mapping_setup->coupling_steps); | |||||
av_dlog(NULL, " %u mapping coupling steps: %d\n", | |||||
i, mapping_setup->coupling_steps); | |||||
if (get_bits(gb, 2)) { | if (get_bits(gb, 2)) { | ||||
av_log(vc->avccontext, AV_LOG_ERROR, "%u. mapping setup data invalid.\n", i); | av_log(vc->avccontext, AV_LOG_ERROR, "%u. mapping setup data invalid.\n", i); | ||||
@@ -776,10 +772,9 @@ static int vorbis_parse_setup_hdr_mappings(vorbis_context *vc) | |||||
GET_VALIDATED_INDEX(mapping_setup->submap_floor[j], 8, vc->floor_count) | GET_VALIDATED_INDEX(mapping_setup->submap_floor[j], 8, vc->floor_count) | ||||
GET_VALIDATED_INDEX(mapping_setup->submap_residue[j], 8, vc->residue_count) | GET_VALIDATED_INDEX(mapping_setup->submap_residue[j], 8, vc->residue_count) | ||||
AV_DEBUG(" %u mapping %u submap : floor %d, residue %d\n", | |||||
i, j, | |||||
mapping_setup->submap_floor[j], | |||||
mapping_setup->submap_residue[j]); | |||||
av_dlog(NULL, " %u mapping %u submap : floor %d, residue %d\n", i, j, | |||||
mapping_setup->submap_floor[j], | |||||
mapping_setup->submap_residue[j]); | |||||
} | } | ||||
} | } | ||||
return 0; | return 0; | ||||
@@ -814,12 +809,9 @@ static void create_map(vorbis_context *vc, unsigned floor_number) | |||||
vf->map_size[blockflag] = n; | vf->map_size[blockflag] = n; | ||||
} | } | ||||
#ifdef V_DEBUG | |||||
for (idx = 0; idx <= n; ++idx) { | for (idx = 0; idx <= n; ++idx) { | ||||
AV_DEBUG("floor0 map: map at pos %d is %d\n", | |||||
idx, map[idx]); | |||||
av_dlog(NULL, "floor0 map: map at pos %d is %d\n", idx, map[idx]); | |||||
} | } | ||||
#endif | |||||
} | } | ||||
static int vorbis_parse_setup_hdr_modes(vorbis_context *vc) | static int vorbis_parse_setup_hdr_modes(vorbis_context *vc) | ||||
@@ -830,7 +822,7 @@ static int vorbis_parse_setup_hdr_modes(vorbis_context *vc) | |||||
vc->mode_count = get_bits(gb, 6) + 1; | vc->mode_count = get_bits(gb, 6) + 1; | ||||
vc->modes = av_mallocz(vc->mode_count * sizeof(*vc->modes)); | vc->modes = av_mallocz(vc->mode_count * sizeof(*vc->modes)); | ||||
AV_DEBUG(" There are %d modes.\n", vc->mode_count); | |||||
av_dlog(NULL, " There are %d modes.\n", vc->mode_count); | |||||
for (i = 0; i < vc->mode_count; ++i) { | for (i = 0; i < vc->mode_count; ++i) { | ||||
vorbis_mode *mode_setup = &vc->modes[i]; | vorbis_mode *mode_setup = &vc->modes[i]; | ||||
@@ -840,9 +832,9 @@ static int vorbis_parse_setup_hdr_modes(vorbis_context *vc) | |||||
mode_setup->transformtype = get_bits(gb, 16); //FIXME check | mode_setup->transformtype = get_bits(gb, 16); //FIXME check | ||||
GET_VALIDATED_INDEX(mode_setup->mapping, 8, vc->mapping_count); | GET_VALIDATED_INDEX(mode_setup->mapping, 8, vc->mapping_count); | ||||
AV_DEBUG(" %u mode: blockflag %d, windowtype %d, transformtype %d, mapping %d\n", | |||||
i, mode_setup->blockflag, mode_setup->windowtype, | |||||
mode_setup->transformtype, mode_setup->mapping); | |||||
av_dlog(NULL, " %u mode: blockflag %d, windowtype %d, transformtype %d, mapping %d\n", | |||||
i, mode_setup->blockflag, mode_setup->windowtype, | |||||
mode_setup->transformtype, mode_setup->mapping); | |||||
} | } | ||||
return 0; | return 0; | ||||
} | } | ||||
@@ -950,7 +942,7 @@ static int vorbis_parse_id_hdr(vorbis_context *vc) | |||||
ff_mdct_init(&vc->mdct[0], bl0, 1, -vc->scale_bias); | ff_mdct_init(&vc->mdct[0], bl0, 1, -vc->scale_bias); | ||||
ff_mdct_init(&vc->mdct[1], bl1, 1, -vc->scale_bias); | ff_mdct_init(&vc->mdct[1], bl1, 1, -vc->scale_bias); | ||||
AV_DEBUG(" vorbis version %d \n audio_channels %d \n audio_samplerate %d \n bitrate_max %d \n bitrate_nom %d \n bitrate_min %d \n blk_0 %d blk_1 %d \n ", | |||||
av_dlog(NULL, " vorbis version %d \n audio_channels %d \n audio_samplerate %d \n bitrate_max %d \n bitrate_nom %d \n bitrate_min %d \n blk_0 %d blk_1 %d \n ", | |||||
vc->version, vc->audio_channels, vc->audio_samplerate, vc->bitrate_maximum, vc->bitrate_nominal, vc->bitrate_minimum, vc->blocksize[0], vc->blocksize[1]); | vc->version, vc->audio_channels, vc->audio_samplerate, vc->bitrate_maximum, vc->bitrate_nominal, vc->bitrate_minimum, vc->blocksize[0], vc->blocksize[1]); | ||||
/* | /* | ||||
@@ -1058,7 +1050,7 @@ static int vorbis_floor0_decode(vorbis_context *vc, | |||||
"floor0 dec: booknumber too high!\n"); | "floor0 dec: booknumber too high!\n"); | ||||
book_idx = 0; | book_idx = 0; | ||||
} | } | ||||
AV_DEBUG("floor0 dec: booknumber: %u\n", book_idx); | |||||
av_dlog(NULL, "floor0 dec: booknumber: %u\n", book_idx); | |||||
codebook = vc->codebooks[vf->book_list[book_idx]]; | codebook = vc->codebooks[vf->book_list[book_idx]]; | ||||
/* Invalid codebook! */ | /* Invalid codebook! */ | ||||
if (!codebook.codevectors) | if (!codebook.codevectors) | ||||
@@ -1067,13 +1059,13 @@ static int vorbis_floor0_decode(vorbis_context *vc, | |||||
while (lsp_len<vf->order) { | while (lsp_len<vf->order) { | ||||
int vec_off; | int vec_off; | ||||
AV_DEBUG("floor0 dec: book dimension: %d\n", codebook.dimensions); | |||||
AV_DEBUG("floor0 dec: maximum depth: %d\n", codebook.maxdepth); | |||||
av_dlog(NULL, "floor0 dec: book dimension: %d\n", codebook.dimensions); | |||||
av_dlog(NULL, "floor0 dec: maximum depth: %d\n", codebook.maxdepth); | |||||
/* read temp vector */ | /* read temp vector */ | ||||
vec_off = get_vlc2(&vc->gb, codebook.vlc.table, | vec_off = get_vlc2(&vc->gb, codebook.vlc.table, | ||||
codebook.nb_bits, codebook.maxdepth) | codebook.nb_bits, codebook.maxdepth) | ||||
* codebook.dimensions; | * codebook.dimensions; | ||||
AV_DEBUG("floor0 dec: vector offset: %d\n", vec_off); | |||||
av_dlog(NULL, "floor0 dec: vector offset: %d\n", vec_off); | |||||
/* copy each vector component and add last to it */ | /* copy each vector component and add last to it */ | ||||
for (idx = 0; idx < codebook.dimensions; ++idx) | for (idx = 0; idx < codebook.dimensions; ++idx) | ||||
lsp[lsp_len+idx] = codebook.codevectors[vec_off+idx] + last; | lsp[lsp_len+idx] = codebook.codevectors[vec_off+idx] + last; | ||||
@@ -1081,14 +1073,12 @@ static int vorbis_floor0_decode(vorbis_context *vc, | |||||
lsp_len += codebook.dimensions; | lsp_len += codebook.dimensions; | ||||
} | } | ||||
#ifdef V_DEBUG | |||||
/* DEBUG: output lsp coeffs */ | /* DEBUG: output lsp coeffs */ | ||||
{ | { | ||||
int idx; | int idx; | ||||
for (idx = 0; idx < lsp_len; ++idx) | for (idx = 0; idx < lsp_len; ++idx) | ||||
AV_DEBUG("floor0 dec: coeff at %d is %f\n", idx, lsp[idx]); | |||||
av_dlog(NULL, "floor0 dec: coeff at %d is %f\n", idx, lsp[idx]); | |||||
} | } | ||||
#endif | |||||
/* synthesize floor output vector */ | /* synthesize floor output vector */ | ||||
{ | { | ||||
@@ -1099,9 +1089,6 @@ static int vorbis_floor0_decode(vorbis_context *vc, | |||||
for (i = 0; i < order; i++) | for (i = 0; i < order; i++) | ||||
lsp[i] = 2.0f * cos(lsp[i]); | lsp[i] = 2.0f * cos(lsp[i]); | ||||
AV_DEBUG("floor0 synth: map_size = %d; m = %d; wstep = %f\n", | |||||
vf->map_size, order, wstep); | |||||
i = 0; | i = 0; | ||||
while (i < vf->map_size[blockflag]) { | while (i < vf->map_size[blockflag]) { | ||||
int j, iter_cond = vf->map[blockflag][i]; | int j, iter_cond = vf->map[blockflag][i]; | ||||
@@ -1141,7 +1128,7 @@ static int vorbis_floor0_decode(vorbis_context *vc, | |||||
return 1; | return 1; | ||||
} | } | ||||
AV_DEBUG(" Floor0 decoded\n"); | |||||
av_dlog(NULL, " Floor0 decoded\n"); | |||||
return 0; | return 0; | ||||
} | } | ||||
@@ -1168,7 +1155,7 @@ static int vorbis_floor1_decode(vorbis_context *vc, | |||||
floor1_Y[0] = get_bits(gb, ilog(range - 1)); | floor1_Y[0] = get_bits(gb, ilog(range - 1)); | ||||
floor1_Y[1] = get_bits(gb, ilog(range - 1)); | floor1_Y[1] = get_bits(gb, ilog(range - 1)); | ||||
AV_DEBUG("floor 0 Y %d floor 1 Y %d \n", floor1_Y[0], floor1_Y[1]); | |||||
av_dlog(NULL, "floor 0 Y %d floor 1 Y %d \n", floor1_Y[0], floor1_Y[1]); | |||||
offset = 2; | offset = 2; | ||||
for (i = 0; i < vf->partitions; ++i) { | for (i = 0; i < vf->partitions; ++i) { | ||||
@@ -1178,7 +1165,7 @@ static int vorbis_floor1_decode(vorbis_context *vc, | |||||
csub = (1 << cbits) - 1; | csub = (1 << cbits) - 1; | ||||
cval = 0; | cval = 0; | ||||
AV_DEBUG("Cbits %u\n", cbits); | |||||
av_dlog(NULL, "Cbits %u\n", cbits); | |||||
if (cbits) // this reads all subclasses for this partition's class | if (cbits) // this reads all subclasses for this partition's class | ||||
cval = get_vlc2(gb, vc->codebooks[vf->class_masterbook[partition_class]].vlc.table, | cval = get_vlc2(gb, vc->codebooks[vf->class_masterbook[partition_class]].vlc.table, | ||||
@@ -1187,8 +1174,8 @@ static int vorbis_floor1_decode(vorbis_context *vc, | |||||
for (j = 0; j < cdim; ++j) { | for (j = 0; j < cdim; ++j) { | ||||
book = vf->subclass_books[partition_class][cval & csub]; | book = vf->subclass_books[partition_class][cval & csub]; | ||||
AV_DEBUG("book %d Cbits %u cval %u bits:%d\n", | |||||
book, cbits, cval, get_bits_count(gb)); | |||||
av_dlog(NULL, "book %d Cbits %u cval %u bits:%d\n", | |||||
book, cbits, cval, get_bits_count(gb)); | |||||
cval = cval >> cbits; | cval = cval >> cbits; | ||||
if (book > -1) { | if (book > -1) { | ||||
@@ -1198,7 +1185,8 @@ static int vorbis_floor1_decode(vorbis_context *vc, | |||||
floor1_Y[offset+j] = 0; | floor1_Y[offset+j] = 0; | ||||
} | } | ||||
AV_DEBUG(" floor(%d) = %d \n", vf->list[offset+j].x, floor1_Y[offset+j]); | |||||
av_dlog(NULL, " floor(%d) = %d \n", | |||||
vf->list[offset+j].x, floor1_Y[offset+j]); | |||||
} | } | ||||
offset+=cdim; | offset+=cdim; | ||||
} | } | ||||
@@ -1256,15 +1244,15 @@ static int vorbis_floor1_decode(vorbis_context *vc, | |||||
floor1_Y_final[i] = predicted; | floor1_Y_final[i] = predicted; | ||||
} | } | ||||
AV_DEBUG(" Decoded floor(%d) = %u / val %u\n", | |||||
vf->list[i].x, floor1_Y_final[i], val); | |||||
av_dlog(NULL, " Decoded floor(%d) = %u / val %u\n", | |||||
vf->list[i].x, floor1_Y_final[i], val); | |||||
} | } | ||||
// Curve synth - connect the calculated dots and convert from dB scale FIXME optimize ? | // Curve synth - connect the calculated dots and convert from dB scale FIXME optimize ? | ||||
ff_vorbis_floor1_render_list(vf->list, vf->x_list_dim, floor1_Y_final, floor1_flag, vf->multiplier, vec, vf->list[1].x); | ff_vorbis_floor1_render_list(vf->list, vf->x_list_dim, floor1_Y_final, floor1_flag, vf->multiplier, vec, vf->list[1].x); | ||||
AV_DEBUG(" Floor decoded\n"); | |||||
av_dlog(NULL, " Floor decoded\n"); | |||||
return 0; | return 0; | ||||
} | } | ||||
@@ -1295,7 +1283,7 @@ static av_always_inline int vorbis_residue_decode_internal(vorbis_context *vc, | |||||
ch_used = ch; | ch_used = ch; | ||||
} | } | ||||
AV_DEBUG(" residue type 0/1/2 decode begin, ch: %d cpc %d \n", ch, c_p_c); | |||||
av_dlog(NULL, " residue type 0/1/2 decode begin, ch: %d cpc %d \n", ch, c_p_c); | |||||
for (pass = 0; pass <= vr->maxpass; ++pass) { // FIXME OPTIMIZE? | for (pass = 0; pass <= vr->maxpass; ++pass) { // FIXME OPTIMIZE? | ||||
uint16_t voffset, partition_count, j_times_ptns_to_read; | uint16_t voffset, partition_count, j_times_ptns_to_read; | ||||
@@ -1309,7 +1297,7 @@ static av_always_inline int vorbis_residue_decode_internal(vorbis_context *vc, | |||||
unsigned temp = get_vlc2(gb, vc->codebooks[vr->classbook].vlc.table, | unsigned temp = get_vlc2(gb, vc->codebooks[vr->classbook].vlc.table, | ||||
vc->codebooks[vr->classbook].nb_bits, 3); | vc->codebooks[vr->classbook].nb_bits, 3); | ||||
AV_DEBUG("Classword: %u\n", temp); | |||||
av_dlog(NULL, "Classword: %u\n", temp); | |||||
assert(vr->classifications > 1 && temp <= 65536); //needed for inverse[] | assert(vr->classifications > 1 && temp <= 65536); //needed for inverse[] | ||||
for (i = 0; i < c_p_c; ++i) { | for (i = 0; i < c_p_c; ++i) { | ||||
@@ -1354,7 +1342,8 @@ static av_always_inline int vorbis_residue_decode_internal(vorbis_context *vc, | |||||
for (l = 0; l < dim; ++l, ++voffs) { | for (l = 0; l < dim; ++l, ++voffs) { | ||||
vec[voffs]+=codebook.codevectors[coffs+l]; // FPMATH | vec[voffs]+=codebook.codevectors[coffs+l]; // FPMATH | ||||
AV_DEBUG(" pass %d offs: %d curr: %f change: %f cv offs.: %d \n", pass, voffs, vec[voffs], codebook.codevectors[coffs+l], coffs); | |||||
av_dlog(NULL, " pass %d offs: %d curr: %f change: %f cv offs.: %d \n", | |||||
pass, voffs, vec[voffs], codebook.codevectors[coffs+l], coffs); | |||||
} | } | ||||
} | } | ||||
} else if (vr_type == 2 && ch == 2 && (voffset & 1) == 0 && (dim & 1) == 0) { // most frequent case optimized | } else if (vr_type == 2 && ch == 2 && (voffset & 1) == 0 && (dim & 1) == 0) { // most frequent case optimized | ||||
@@ -1381,7 +1370,10 @@ static av_always_inline int vorbis_residue_decode_internal(vorbis_context *vc, | |||||
vec[voffs ] += codebook.codevectors[coffs + l ]; // FPMATH | vec[voffs ] += codebook.codevectors[coffs + l ]; // FPMATH | ||||
vec[voffs + vlen] += codebook.codevectors[coffs + l + 1]; // FPMATH | vec[voffs + vlen] += codebook.codevectors[coffs + l + 1]; // FPMATH | ||||
AV_DEBUG(" pass %d offs: %d curr: %f change: %f cv offs.: %d+%d \n", pass, voffset / ch + (voffs % ch) * vlen, vec[voffset / ch + (voffs % ch) * vlen], codebook.codevectors[coffs + l], coffs, l); | |||||
av_dlog(NULL, " pass %d offs: %d curr: %f change: %f cv offs.: %d+%d \n", | |||||
pass, voffset / ch + (voffs % ch) * vlen, | |||||
vec[voffset / ch + (voffs % ch) * vlen], | |||||
codebook.codevectors[coffs + l], coffs, l); | |||||
} | } | ||||
} | } | ||||
@@ -1393,7 +1385,10 @@ static av_always_inline int vorbis_residue_decode_internal(vorbis_context *vc, | |||||
for (l = 0; l < dim; ++l, ++voffs) { | for (l = 0; l < dim; ++l, ++voffs) { | ||||
vec[voffs / ch + (voffs % ch) * vlen] += codebook.codevectors[coffs + l]; // FPMATH FIXME use if and counter instead of / and % | vec[voffs / ch + (voffs % ch) * vlen] += codebook.codevectors[coffs + l]; // FPMATH FIXME use if and counter instead of / and % | ||||
AV_DEBUG(" pass %d offs: %d curr: %f change: %f cv offs.: %d+%d \n", pass, voffset / ch + (voffs % ch) * vlen, vec[voffset / ch + (voffs % ch) * vlen], codebook.codevectors[coffs + l], coffs, l); | |||||
av_dlog(NULL, " pass %d offs: %d curr: %f change: %f cv offs.: %d+%d \n", | |||||
pass, voffset / ch + (voffs % ch) * vlen, | |||||
vec[voffset / ch + (voffs % ch) * vlen], | |||||
codebook.codevectors[coffs + l], coffs, l); | |||||
} | } | ||||
} | } | ||||
} | } | ||||
@@ -1481,8 +1476,8 @@ static int vorbis_parse_audio_packet(vorbis_context *vc) | |||||
vc->mode_number = mode_number; | vc->mode_number = mode_number; | ||||
mapping = &vc->mappings[vc->modes[mode_number].mapping]; | mapping = &vc->mappings[vc->modes[mode_number].mapping]; | ||||
AV_DEBUG(" Mode number: %u , mapping: %d , blocktype %d\n", mode_number, | |||||
vc->modes[mode_number].mapping, vc->modes[mode_number].blockflag); | |||||
av_dlog(NULL, " Mode number: %u , mapping: %d , blocktype %d\n", mode_number, | |||||
vc->modes[mode_number].mapping, vc->modes[mode_number].blockflag); | |||||
blockflag = vc->modes[mode_number].blockflag; | blockflag = vc->modes[mode_number].blockflag; | ||||
blocksize = vc->blocksize[blockflag]; | blocksize = vc->blocksize[blockflag]; | ||||
@@ -1611,7 +1606,7 @@ static int vorbis_decode_frame(AVCodecContext *avccontext, | |||||
if (!buf_size) | if (!buf_size) | ||||
return 0; | return 0; | ||||
AV_DEBUG("packet length %d \n", buf_size); | |||||
av_dlog(NULL, "packet length %d \n", buf_size); | |||||
init_get_bits(gb, buf, buf_size*8); | init_get_bits(gb, buf, buf_size*8); | ||||
@@ -1628,7 +1623,8 @@ static int vorbis_decode_frame(AVCodecContext *avccontext, | |||||
return buf_size ; | return buf_size ; | ||||
} | } | ||||
AV_DEBUG("parsed %d bytes %d bits, returned %d samples (*ch*bits) \n", get_bits_count(gb)/8, get_bits_count(gb)%8, len); | |||||
av_dlog(NULL, "parsed %d bytes %d bits, returned %d samples (*ch*bits) \n", | |||||
get_bits_count(gb) / 8, get_bits_count(gb) % 8, len); | |||||
if (vc->audio_channels > 8) { | if (vc->audio_channels > 8) { | ||||
for (i = 0; i < vc->audio_channels; i++) | for (i = 0; i < vc->audio_channels; i++) | ||||
@@ -90,14 +90,6 @@ | |||||
#define CPLZ_TAG MKBETAG('C', 'P', 'L', 'Z') | #define CPLZ_TAG MKBETAG('C', 'P', 'L', 'Z') | ||||
#define VPTZ_TAG MKBETAG('V', 'P', 'T', 'Z') | #define VPTZ_TAG MKBETAG('V', 'P', 'T', 'Z') | ||||
#define VQA_DEBUG 0 | |||||
#if VQA_DEBUG | |||||
#define vqa_debug printf | |||||
#else | |||||
static inline void vqa_debug(const char *format, ...) { } | |||||
#endif | |||||
typedef struct VqaContext { | typedef struct VqaContext { | ||||
AVCodecContext *avctx; | AVCodecContext *avctx; | ||||
@@ -213,7 +205,7 @@ static void decode_format80(const unsigned char *src, int src_size, | |||||
while (src_index < src_size) { | while (src_index < src_size) { | ||||
vqa_debug(" opcode %02X: ", src[src_index]); | |||||
av_dlog(NULL, " opcode %02X: ", src[src_index]); | |||||
/* 0x80 means that frame is finished */ | /* 0x80 means that frame is finished */ | ||||
if (src[src_index] == 0x80) | if (src[src_index] == 0x80) | ||||
@@ -232,7 +224,7 @@ static void decode_format80(const unsigned char *src, int src_size, | |||||
src_index += 2; | src_index += 2; | ||||
src_pos = AV_RL16(&src[src_index]); | src_pos = AV_RL16(&src[src_index]); | ||||
src_index += 2; | src_index += 2; | ||||
vqa_debug("(1) copy %X bytes from absolute pos %X\n", count, src_pos); | |||||
av_dlog(NULL, "(1) copy %X bytes from absolute pos %X\n", count, src_pos); | |||||
CHECK_COUNT(); | CHECK_COUNT(); | ||||
for (i = 0; i < count; i++) | for (i = 0; i < count; i++) | ||||
dest[dest_index + i] = dest[src_pos + i]; | dest[dest_index + i] = dest[src_pos + i]; | ||||
@@ -244,7 +236,7 @@ static void decode_format80(const unsigned char *src, int src_size, | |||||
count = AV_RL16(&src[src_index]); | count = AV_RL16(&src[src_index]); | ||||
src_index += 2; | src_index += 2; | ||||
color = src[src_index++]; | color = src[src_index++]; | ||||
vqa_debug("(2) set %X bytes to %02X\n", count, color); | |||||
av_dlog(NULL, "(2) set %X bytes to %02X\n", count, color); | |||||
CHECK_COUNT(); | CHECK_COUNT(); | ||||
memset(&dest[dest_index], color, count); | memset(&dest[dest_index], color, count); | ||||
dest_index += count; | dest_index += count; | ||||
@@ -254,7 +246,7 @@ static void decode_format80(const unsigned char *src, int src_size, | |||||
count = (src[src_index++] & 0x3F) + 3; | count = (src[src_index++] & 0x3F) + 3; | ||||
src_pos = AV_RL16(&src[src_index]); | src_pos = AV_RL16(&src[src_index]); | ||||
src_index += 2; | src_index += 2; | ||||
vqa_debug("(3) copy %X bytes from absolute pos %X\n", count, src_pos); | |||||
av_dlog(NULL, "(3) copy %X bytes from absolute pos %X\n", count, src_pos); | |||||
CHECK_COUNT(); | CHECK_COUNT(); | ||||
for (i = 0; i < count; i++) | for (i = 0; i < count; i++) | ||||
dest[dest_index + i] = dest[src_pos + i]; | dest[dest_index + i] = dest[src_pos + i]; | ||||
@@ -263,7 +255,7 @@ static void decode_format80(const unsigned char *src, int src_size, | |||||
} else if (src[src_index] > 0x80) { | } else if (src[src_index] > 0x80) { | ||||
count = src[src_index++] & 0x3F; | count = src[src_index++] & 0x3F; | ||||
vqa_debug("(4) copy %X bytes from source to dest\n", count); | |||||
av_dlog(NULL, "(4) copy %X bytes from source to dest\n", count); | |||||
CHECK_COUNT(); | CHECK_COUNT(); | ||||
memcpy(&dest[dest_index], &src[src_index], count); | memcpy(&dest[dest_index], &src[src_index], count); | ||||
src_index += count; | src_index += count; | ||||
@@ -274,7 +266,7 @@ static void decode_format80(const unsigned char *src, int src_size, | |||||
count = ((src[src_index] & 0x70) >> 4) + 3; | count = ((src[src_index] & 0x70) >> 4) + 3; | ||||
src_pos = AV_RB16(&src[src_index]) & 0x0FFF; | src_pos = AV_RB16(&src[src_index]) & 0x0FFF; | ||||
src_index += 2; | src_index += 2; | ||||
vqa_debug("(5) copy %X bytes from relpos %X\n", count, src_pos); | |||||
av_dlog(NULL, "(5) copy %X bytes from relpos %X\n", count, src_pos); | |||||
CHECK_COUNT(); | CHECK_COUNT(); | ||||
for (i = 0; i < count; i++) | for (i = 0; i < count; i++) | ||||
dest[dest_index + i] = dest[dest_index - src_pos + i]; | dest[dest_index + i] = dest[dest_index - src_pos + i]; | ||||
@@ -133,7 +133,7 @@ ADD4x4IDCT avx | |||||
%macro ADD16_OP 3 | %macro ADD16_OP 3 | ||||
cmp byte [r4+%3], 0 | cmp byte [r4+%3], 0 | ||||
jz .skipblock%2 | jz .skipblock%2 | ||||
mov r5d, dword [r1+%2*4] | |||||
mov r5d, [r1+%2*4] | |||||
call add4x4_idct_%1 | call add4x4_idct_%1 | ||||
.skipblock%2: | .skipblock%2: | ||||
%if %2<15 | %if %2<15 | ||||
@@ -159,7 +159,7 @@ cglobal h264_idct_add16_10_%1, 5,6 | |||||
ADD16_OP %1, 13, 7+3*8 | ADD16_OP %1, 13, 7+3*8 | ||||
ADD16_OP %1, 14, 6+4*8 | ADD16_OP %1, 14, 6+4*8 | ||||
ADD16_OP %1, 15, 7+4*8 | ADD16_OP %1, 15, 7+4*8 | ||||
RET | |||||
REP_RET | |||||
%endmacro | %endmacro | ||||
INIT_XMM | INIT_XMM | ||||
@@ -201,7 +201,7 @@ IDCT_ADD16_10 avx | |||||
INIT_MMX | INIT_MMX | ||||
cglobal h264_idct_dc_add_10_mmx2,3,3 | cglobal h264_idct_dc_add_10_mmx2,3,3 | ||||
movd m0, dword [r1] | |||||
movd m0, [r1] | |||||
paddd m0, [pd_32] | paddd m0, [pd_32] | ||||
psrad m0, 6 | psrad m0, 6 | ||||
lea r1, [r2*3] | lea r1, [r2*3] | ||||
@@ -215,7 +215,7 @@ cglobal h264_idct_dc_add_10_mmx2,3,3 | |||||
;----------------------------------------------------------------------------- | ;----------------------------------------------------------------------------- | ||||
%macro IDCT8_DC_ADD 1 | %macro IDCT8_DC_ADD 1 | ||||
cglobal h264_idct8_dc_add_10_%1,3,3,7 | cglobal h264_idct8_dc_add_10_%1,3,3,7 | ||||
mov r1d, dword [r1] | |||||
mov r1d, [r1] | |||||
add r1, 32 | add r1, 32 | ||||
sar r1, 6 | sar r1, 6 | ||||
movd m0, r1d | movd m0, r1d | ||||
@@ -240,26 +240,27 @@ IDCT8_DC_ADD avx | |||||
;----------------------------------------------------------------------------- | ;----------------------------------------------------------------------------- | ||||
%macro AC 2 | %macro AC 2 | ||||
.ac%2 | .ac%2 | ||||
mov r5d, dword [r1+(%2+0)*4] | |||||
mov r5d, [r1+(%2+0)*4] | |||||
call add4x4_idct_%1 | call add4x4_idct_%1 | ||||
mov r5d, dword [r1+(%2+1)*4] | |||||
mov r5d, [r1+(%2+1)*4] | |||||
add r2, 64 | add r2, 64 | ||||
call add4x4_idct_%1 | call add4x4_idct_%1 | ||||
add r2, 64 | add r2, 64 | ||||
jmp .skipadd%2 | jmp .skipadd%2 | ||||
%endmacro | %endmacro | ||||
%assign last_block 16 | |||||
%macro ADD16_OP_INTRA 3 | %macro ADD16_OP_INTRA 3 | ||||
cmp word [r4+%3], 0 | |||||
cmp word [r4+%3], 0 | |||||
jnz .ac%2 | jnz .ac%2 | ||||
mov r6d, dword [r2+ 0] | |||||
or r6d, dword [r2+64] | |||||
mov r5d, [r2+ 0] | |||||
or r5d, [r2+64] | |||||
jz .skipblock%2 | jz .skipblock%2 | ||||
mov r5d, dword [r1+(%2+0)*4] | |||||
mov r5d, [r1+(%2+0)*4] | |||||
call idct_dc_add_%1 | call idct_dc_add_%1 | ||||
.skipblock%2: | .skipblock%2: | ||||
%if %2<15 | |||||
add r2, 128 | |||||
%if %2<last_block-2 | |||||
add r2, 128 | |||||
%endif | %endif | ||||
.skipadd%2: | .skipadd%2: | ||||
%endmacro | %endmacro | ||||
@@ -287,12 +288,15 @@ cglobal h264_idct_add16intra_10_%1,5,7,8 | |||||
ADD16_OP_INTRA %1, 10, 4+4*8 | ADD16_OP_INTRA %1, 10, 4+4*8 | ||||
ADD16_OP_INTRA %1, 12, 6+3*8 | ADD16_OP_INTRA %1, 12, 6+3*8 | ||||
ADD16_OP_INTRA %1, 14, 6+4*8 | ADD16_OP_INTRA %1, 14, 6+4*8 | ||||
RET | |||||
%assign i 14 | |||||
%rep 8 | |||||
AC %1, i | |||||
%assign i i-2 | |||||
%endrep | |||||
REP_RET | |||||
AC %1, 8 | |||||
AC %1, 10 | |||||
AC %1, 12 | |||||
AC %1, 14 | |||||
AC %1, 0 | |||||
AC %1, 2 | |||||
AC %1, 4 | |||||
AC %1, 6 | |||||
%endmacro | %endmacro | ||||
INIT_XMM | INIT_XMM | ||||
@@ -302,47 +306,33 @@ INIT_AVX | |||||
IDCT_ADD16INTRA_10 avx | IDCT_ADD16INTRA_10 avx | ||||
%endif | %endif | ||||
%assign last_block 24 | |||||
;----------------------------------------------------------------------------- | ;----------------------------------------------------------------------------- | ||||
; h264_idct_add8(pixel **dst, const int *block_offset, dctcoef *block, int stride, const uint8_t nnzc[6*8]) | ; h264_idct_add8(pixel **dst, const int *block_offset, dctcoef *block, int stride, const uint8_t nnzc[6*8]) | ||||
;----------------------------------------------------------------------------- | ;----------------------------------------------------------------------------- | ||||
%macro IDCT_ADD8 1 | %macro IDCT_ADD8 1 | ||||
cglobal h264_idct_add8_10_%1,5,7 | cglobal h264_idct_add8_10_%1,5,7 | ||||
mov r5, 16 | |||||
add r2, 1024 | |||||
%ifdef PIC | |||||
lea r11, [scan8_mem] | |||||
%endif | |||||
%ifdef ARCH_X86_64 | |||||
mov r10, r0 | |||||
%endif | |||||
.nextblock: | |||||
movzx r6, byte [scan8+r5] | |||||
movzx r6, byte [r4+r6] | |||||
or r6d, dword [r2] | |||||
test r6, r6 | |||||
jz .skipblock | |||||
%ifdef ARCH_X86_64 | %ifdef ARCH_X86_64 | ||||
mov r0d, dword [r1+r5*4] | |||||
add r0, [r10] | |||||
%else | |||||
mov r0, r0m | |||||
mov r0, [r0] | |||||
add r0, dword [r1+r5*4] | |||||
mov r10, r0 | |||||
%endif | %endif | ||||
IDCT4_ADD_10 r0, r2, r3 | |||||
.skipblock: | |||||
inc r5 | |||||
add r2, 64 | |||||
test r5, 3 | |||||
jnz .nextblock | |||||
add r2, 1024 | |||||
mov r0, [r0] | |||||
ADD16_OP_INTRA %1, 16, 1+1*8 | |||||
ADD16_OP_INTRA %1, 18, 1+2*8 | |||||
%ifdef ARCH_X86_64 | %ifdef ARCH_X86_64 | ||||
add r10, gprsize | |||||
mov r0, [r10+gprsize] | |||||
%else | %else | ||||
add r0mp, gprsize | |||||
mov r0, r0m | |||||
mov r0, [r0+gprsize] | |||||
%endif | %endif | ||||
test r5, 4 | |||||
jnz .nextblock | |||||
ADD16_OP_INTRA %1, 20, 1+4*8 | |||||
ADD16_OP_INTRA %1, 22, 1+5*8 | |||||
REP_RET | REP_RET | ||||
AC %1, 16 | |||||
AC %1, 18 | |||||
AC %1, 20 | |||||
AC %1, 22 | |||||
%endmacro ; IDCT_ADD8 | %endmacro ; IDCT_ADD8 | ||||
INIT_XMM | INIT_XMM | ||||
@@ -356,51 +346,51 @@ IDCT_ADD8 avx | |||||
; void h264_idct8_add(pixel *dst, dctcoef *block, int stride) | ; void h264_idct8_add(pixel *dst, dctcoef *block, int stride) | ||||
;----------------------------------------------------------------------------- | ;----------------------------------------------------------------------------- | ||||
%macro IDCT8_1D 2 | %macro IDCT8_1D 2 | ||||
SWAP 0, 1 | |||||
psrad m4, m5, 1 | |||||
psrad m1, m0, 1 | |||||
paddd m4, m5 | |||||
paddd m1, m0 | |||||
paddd m4, m7 | |||||
paddd m1, m5 | |||||
psubd m4, m0 | |||||
paddd m1, m3 | |||||
psubd m0, m3 | |||||
psubd m5, m3 | |||||
paddd m0, m7 | |||||
psubd m5, m7 | |||||
psrad m3, 1 | |||||
psrad m7, 1 | |||||
psubd m0, m3 | |||||
psubd m5, m7 | |||||
SWAP 1, 7 | |||||
psrad m1, m7, 2 | |||||
psrad m3, m4, 2 | |||||
paddd m3, m0 | |||||
psrad m0, 2 | |||||
paddd m1, m5 | |||||
psrad m5, 2 | |||||
psubd m0, m4 | |||||
psubd m7, m5 | |||||
SWAP 5, 6 | |||||
psrad m4, m2, 1 | |||||
psrad m6, m5, 1 | |||||
psubd m4, m5 | |||||
paddd m6, m2 | |||||
mova m2, %1 | |||||
mova m5, %2 | |||||
SUMSUB_BA d, 5, 2 | |||||
SUMSUB_BA d, 6, 5 | |||||
SUMSUB_BA d, 4, 2 | |||||
SUMSUB_BA d, 7, 6 | |||||
SUMSUB_BA d, 0, 4 | |||||
SUMSUB_BA d, 3, 2 | |||||
SUMSUB_BA d, 1, 5 | |||||
SWAP 7, 6, 4, 5, 2, 3, 1, 0 ; 70315246 -> 01234567 | |||||
SWAP 0, 1 | |||||
psrad m4, m5, 1 | |||||
psrad m1, m0, 1 | |||||
paddd m4, m5 | |||||
paddd m1, m0 | |||||
paddd m4, m7 | |||||
paddd m1, m5 | |||||
psubd m4, m0 | |||||
paddd m1, m3 | |||||
psubd m0, m3 | |||||
psubd m5, m3 | |||||
paddd m0, m7 | |||||
psubd m5, m7 | |||||
psrad m3, 1 | |||||
psrad m7, 1 | |||||
psubd m0, m3 | |||||
psubd m5, m7 | |||||
SWAP 1, 7 | |||||
psrad m1, m7, 2 | |||||
psrad m3, m4, 2 | |||||
paddd m3, m0 | |||||
psrad m0, 2 | |||||
paddd m1, m5 | |||||
psrad m5, 2 | |||||
psubd m0, m4 | |||||
psubd m7, m5 | |||||
SWAP 5, 6 | |||||
psrad m4, m2, 1 | |||||
psrad m6, m5, 1 | |||||
psubd m4, m5 | |||||
paddd m6, m2 | |||||
mova m2, %1 | |||||
mova m5, %2 | |||||
SUMSUB_BA d, 5, 2 | |||||
SUMSUB_BA d, 6, 5 | |||||
SUMSUB_BA d, 4, 2 | |||||
SUMSUB_BA d, 7, 6 | |||||
SUMSUB_BA d, 0, 4 | |||||
SUMSUB_BA d, 3, 2 | |||||
SUMSUB_BA d, 1, 5 | |||||
SWAP 7, 6, 4, 5, 2, 3, 1, 0 ; 70315246 -> 01234567 | |||||
%endmacro | %endmacro | ||||
%macro IDCT8_1D_FULL 1 | %macro IDCT8_1D_FULL 1 | ||||
@@ -536,7 +526,7 @@ IDCT8_ADD avx | |||||
%macro IDCT8_ADD4_OP 3 | %macro IDCT8_ADD4_OP 3 | ||||
cmp byte [r4+%3], 0 | cmp byte [r4+%3], 0 | ||||
jz .skipblock%2 | jz .skipblock%2 | ||||
mov r0d, dword [r6+%2*4] | |||||
mov r0d, [r6+%2*4] | |||||
add r0, r5 | add r0, r5 | ||||
call h264_idct8_add1_10_%1 | call h264_idct8_add1_10_%1 | ||||
.skipblock%2: | .skipblock%2: | ||||
@@ -836,7 +836,6 @@ PRED8x8_H ssse3 | |||||
;----------------------------------------------------------------------------- | ;----------------------------------------------------------------------------- | ||||
; void pred8x8_top_dc_mmxext(uint8_t *src, int stride) | ; void pred8x8_top_dc_mmxext(uint8_t *src, int stride) | ||||
;----------------------------------------------------------------------------- | ;----------------------------------------------------------------------------- | ||||
%ifdef CONFIG_GPL | |||||
cglobal pred8x8_top_dc_mmxext, 2,5 | cglobal pred8x8_top_dc_mmxext, 2,5 | ||||
sub r0, r1 | sub r0, r1 | ||||
movq mm0, [r0] | movq mm0, [r0] | ||||
@@ -927,7 +926,6 @@ cglobal pred8x8_dc_mmxext, 2,5 | |||||
movq [r4+r1*1], m1 | movq [r4+r1*1], m1 | ||||
movq [r4+r1*2], m1 | movq [r4+r1*2], m1 | ||||
RET | RET | ||||
%endif | |||||
;----------------------------------------------------------------------------- | ;----------------------------------------------------------------------------- | ||||
; void pred8x8_dc_rv40(uint8_t *src, int stride) | ; void pred8x8_dc_rv40(uint8_t *src, int stride) | ||||
@@ -1083,7 +1081,6 @@ cglobal pred8x8_tm_vp8_ssse3, 2,3,6 | |||||
;----------------------------------------------------------------------------- | ;----------------------------------------------------------------------------- | ||||
; void pred8x8l_top_dc(uint8_t *src, int has_topleft, int has_topright, int stride) | ; void pred8x8l_top_dc(uint8_t *src, int has_topleft, int has_topright, int stride) | ||||
;----------------------------------------------------------------------------- | ;----------------------------------------------------------------------------- | ||||
%ifdef CONFIG_GPL | |||||
%macro PRED8x8L_TOP_DC 1 | %macro PRED8x8L_TOP_DC 1 | ||||
cglobal pred8x8l_top_dc_%1, 4,4 | cglobal pred8x8l_top_dc_%1, 4,4 | ||||
sub r0, r3 | sub r0, r3 | ||||
@@ -2476,7 +2473,6 @@ PRED8x8L_HORIZONTAL_DOWN sse2 | |||||
INIT_MMX | INIT_MMX | ||||
%define PALIGNR PALIGNR_SSSE3 | %define PALIGNR PALIGNR_SSSE3 | ||||
PRED8x8L_HORIZONTAL_DOWN ssse3 | PRED8x8L_HORIZONTAL_DOWN ssse3 | ||||
%endif | |||||
;----------------------------------------------------------------------------- | ;----------------------------------------------------------------------------- | ||||
; void pred4x4_dc_mmxext(uint8_t *src, const uint8_t *topright, int stride) | ; void pred4x4_dc_mmxext(uint8_t *src, const uint8_t *topright, int stride) | ||||
@@ -2608,7 +2604,6 @@ cglobal pred4x4_vertical_vp8_mmxext, 3,3 | |||||
;----------------------------------------------------------------------------- | ;----------------------------------------------------------------------------- | ||||
; void pred4x4_down_left_mmxext(uint8_t *src, const uint8_t *topright, int stride) | ; void pred4x4_down_left_mmxext(uint8_t *src, const uint8_t *topright, int stride) | ||||
;----------------------------------------------------------------------------- | ;----------------------------------------------------------------------------- | ||||
%ifdef CONFIG_GPL | |||||
INIT_MMX | INIT_MMX | ||||
cglobal pred4x4_down_left_mmxext, 3,3 | cglobal pred4x4_down_left_mmxext, 3,3 | ||||
sub r0, r2 | sub r0, r2 | ||||
@@ -2786,4 +2781,3 @@ cglobal pred4x4_down_right_mmxext, 3,3 | |||||
psrlq m0, 8 | psrlq m0, 8 | ||||
movh [r0+r2*1], m0 | movh [r0+r2*1], m0 | ||||
RET | RET | ||||
%endif |
@@ -129,7 +129,6 @@ void ff_h264_pred_init_x86(H264PredContext *h, int codec_id, const int bit_depth | |||||
h->pred16x16[HOR_PRED8x8 ] = ff_pred16x16_horizontal_mmxext; | h->pred16x16[HOR_PRED8x8 ] = ff_pred16x16_horizontal_mmxext; | ||||
h->pred16x16[DC_PRED8x8 ] = ff_pred16x16_dc_mmxext; | h->pred16x16[DC_PRED8x8 ] = ff_pred16x16_dc_mmxext; | ||||
h->pred8x8 [HOR_PRED8x8 ] = ff_pred8x8_horizontal_mmxext; | h->pred8x8 [HOR_PRED8x8 ] = ff_pred8x8_horizontal_mmxext; | ||||
#if CONFIG_GPL | |||||
h->pred8x8l [TOP_DC_PRED ] = ff_pred8x8l_top_dc_mmxext; | h->pred8x8l [TOP_DC_PRED ] = ff_pred8x8l_top_dc_mmxext; | ||||
h->pred8x8l [DC_PRED ] = ff_pred8x8l_dc_mmxext; | h->pred8x8l [DC_PRED ] = ff_pred8x8l_dc_mmxext; | ||||
h->pred8x8l [HOR_PRED ] = ff_pred8x8l_horizontal_mmxext; | h->pred8x8l [HOR_PRED ] = ff_pred8x8l_horizontal_mmxext; | ||||
@@ -142,9 +141,7 @@ void ff_h264_pred_init_x86(H264PredContext *h, int codec_id, const int bit_depth | |||||
h->pred4x4 [DIAG_DOWN_RIGHT_PRED] = ff_pred4x4_down_right_mmxext; | h->pred4x4 [DIAG_DOWN_RIGHT_PRED] = ff_pred4x4_down_right_mmxext; | ||||
h->pred4x4 [VERT_RIGHT_PRED ] = ff_pred4x4_vertical_right_mmxext; | h->pred4x4 [VERT_RIGHT_PRED ] = ff_pred4x4_vertical_right_mmxext; | ||||
h->pred4x4 [HOR_DOWN_PRED ] = ff_pred4x4_horizontal_down_mmxext; | h->pred4x4 [HOR_DOWN_PRED ] = ff_pred4x4_horizontal_down_mmxext; | ||||
#endif | |||||
h->pred4x4 [DC_PRED ] = ff_pred4x4_dc_mmxext; | h->pred4x4 [DC_PRED ] = ff_pred4x4_dc_mmxext; | ||||
#if CONFIG_GPL | |||||
if (codec_id == CODEC_ID_VP8 || codec_id == CODEC_ID_H264) | if (codec_id == CODEC_ID_VP8 || codec_id == CODEC_ID_H264) | ||||
h->pred4x4 [DIAG_DOWN_LEFT_PRED ] = ff_pred4x4_down_left_mmxext; | h->pred4x4 [DIAG_DOWN_LEFT_PRED ] = ff_pred4x4_down_left_mmxext; | ||||
if (codec_id == CODEC_ID_SVQ3 || codec_id == CODEC_ID_H264) | if (codec_id == CODEC_ID_SVQ3 || codec_id == CODEC_ID_H264) | ||||
@@ -156,7 +153,6 @@ void ff_h264_pred_init_x86(H264PredContext *h, int codec_id, const int bit_depth | |||||
h->pred8x8 [TOP_DC_PRED8x8 ] = ff_pred8x8_top_dc_mmxext; | h->pred8x8 [TOP_DC_PRED8x8 ] = ff_pred8x8_top_dc_mmxext; | ||||
h->pred8x8 [DC_PRED8x8 ] = ff_pred8x8_dc_mmxext; | h->pred8x8 [DC_PRED8x8 ] = ff_pred8x8_dc_mmxext; | ||||
} | } | ||||
#endif | |||||
if (codec_id == CODEC_ID_VP8) { | if (codec_id == CODEC_ID_VP8) { | ||||
h->pred16x16[PLANE_PRED8x8] = ff_pred16x16_tm_vp8_mmxext; | h->pred16x16[PLANE_PRED8x8] = ff_pred16x16_tm_vp8_mmxext; | ||||
h->pred8x8 [DC_PRED8x8 ] = ff_pred8x8_dc_rv40_mmxext; | h->pred8x8 [DC_PRED8x8 ] = ff_pred8x8_dc_rv40_mmxext; | ||||
@@ -181,13 +177,11 @@ void ff_h264_pred_init_x86(H264PredContext *h, int codec_id, const int bit_depth | |||||
if (mm_flags & AV_CPU_FLAG_SSE2) { | if (mm_flags & AV_CPU_FLAG_SSE2) { | ||||
h->pred16x16[DC_PRED8x8 ] = ff_pred16x16_dc_sse2; | h->pred16x16[DC_PRED8x8 ] = ff_pred16x16_dc_sse2; | ||||
#if CONFIG_GPL | |||||
h->pred8x8l [DIAG_DOWN_LEFT_PRED ] = ff_pred8x8l_down_left_sse2; | h->pred8x8l [DIAG_DOWN_LEFT_PRED ] = ff_pred8x8l_down_left_sse2; | ||||
h->pred8x8l [DIAG_DOWN_RIGHT_PRED] = ff_pred8x8l_down_right_sse2; | h->pred8x8l [DIAG_DOWN_RIGHT_PRED] = ff_pred8x8l_down_right_sse2; | ||||
h->pred8x8l [VERT_RIGHT_PRED ] = ff_pred8x8l_vertical_right_sse2; | h->pred8x8l [VERT_RIGHT_PRED ] = ff_pred8x8l_vertical_right_sse2; | ||||
h->pred8x8l [VERT_LEFT_PRED ] = ff_pred8x8l_vertical_left_sse2; | h->pred8x8l [VERT_LEFT_PRED ] = ff_pred8x8l_vertical_left_sse2; | ||||
h->pred8x8l [HOR_DOWN_PRED ] = ff_pred8x8l_horizontal_down_sse2; | h->pred8x8l [HOR_DOWN_PRED ] = ff_pred8x8l_horizontal_down_sse2; | ||||
#endif | |||||
if (codec_id == CODEC_ID_VP8) { | if (codec_id == CODEC_ID_VP8) { | ||||
h->pred16x16[PLANE_PRED8x8] = ff_pred16x16_tm_vp8_sse2; | h->pred16x16[PLANE_PRED8x8] = ff_pred16x16_tm_vp8_sse2; | ||||
h->pred8x8 [PLANE_PRED8x8] = ff_pred8x8_tm_vp8_sse2; | h->pred8x8 [PLANE_PRED8x8] = ff_pred8x8_tm_vp8_sse2; | ||||
@@ -207,7 +201,6 @@ void ff_h264_pred_init_x86(H264PredContext *h, int codec_id, const int bit_depth | |||||
h->pred16x16[HOR_PRED8x8 ] = ff_pred16x16_horizontal_ssse3; | h->pred16x16[HOR_PRED8x8 ] = ff_pred16x16_horizontal_ssse3; | ||||
h->pred16x16[DC_PRED8x8 ] = ff_pred16x16_dc_ssse3; | h->pred16x16[DC_PRED8x8 ] = ff_pred16x16_dc_ssse3; | ||||
h->pred8x8 [HOR_PRED8x8 ] = ff_pred8x8_horizontal_ssse3; | h->pred8x8 [HOR_PRED8x8 ] = ff_pred8x8_horizontal_ssse3; | ||||
#if CONFIG_GPL | |||||
h->pred8x8l [TOP_DC_PRED ] = ff_pred8x8l_top_dc_ssse3; | h->pred8x8l [TOP_DC_PRED ] = ff_pred8x8l_top_dc_ssse3; | ||||
h->pred8x8l [DC_PRED ] = ff_pred8x8l_dc_ssse3; | h->pred8x8l [DC_PRED ] = ff_pred8x8l_dc_ssse3; | ||||
h->pred8x8l [HOR_PRED ] = ff_pred8x8l_horizontal_ssse3; | h->pred8x8l [HOR_PRED ] = ff_pred8x8l_horizontal_ssse3; | ||||
@@ -218,7 +211,6 @@ void ff_h264_pred_init_x86(H264PredContext *h, int codec_id, const int bit_depth | |||||
h->pred8x8l [VERT_LEFT_PRED ] = ff_pred8x8l_vertical_left_ssse3; | h->pred8x8l [VERT_LEFT_PRED ] = ff_pred8x8l_vertical_left_ssse3; | ||||
h->pred8x8l [HOR_UP_PRED ] = ff_pred8x8l_horizontal_up_ssse3; | h->pred8x8l [HOR_UP_PRED ] = ff_pred8x8l_horizontal_up_ssse3; | ||||
h->pred8x8l [HOR_DOWN_PRED ] = ff_pred8x8l_horizontal_down_ssse3; | h->pred8x8l [HOR_DOWN_PRED ] = ff_pred8x8l_horizontal_down_ssse3; | ||||
#endif | |||||
if (codec_id == CODEC_ID_VP8) { | if (codec_id == CODEC_ID_VP8) { | ||||
h->pred8x8 [PLANE_PRED8x8] = ff_pred8x8_tm_vp8_ssse3; | h->pred8x8 [PLANE_PRED8x8] = ff_pred8x8_tm_vp8_ssse3; | ||||
h->pred4x4 [TM_VP8_PRED ] = ff_pred4x4_tm_vp8_ssse3; | h->pred4x4 [TM_VP8_PRED ] = ff_pred4x4_tm_vp8_ssse3; | ||||
@@ -54,11 +54,10 @@ typedef struct { | |||||
int video_fd; | int video_fd; | ||||
int tuner_fd; | int tuner_fd; | ||||
int width, height; | int width, height; | ||||
int frame_rate; | |||||
int frame_rate_base; | |||||
uint64_t per_frame; | uint64_t per_frame; | ||||
int standard; | int standard; | ||||
char *video_size; /**< String describing video size, set by a private option. */ | char *video_size; /**< String describing video size, set by a private option. */ | ||||
char *framerate; /**< Set by a private option. */ | |||||
} VideoData; | } VideoData; | ||||
@@ -249,8 +248,7 @@ static int grab_read_header(AVFormatContext *s1, AVFormatParameters *ap) | |||||
VideoData *s = s1->priv_data; | VideoData *s = s1->priv_data; | ||||
AVStream *st; | AVStream *st; | ||||
int width, height; | int width, height; | ||||
int frame_rate; | |||||
int frame_rate_base; | |||||
AVRational fps; | |||||
int ret = 0; | int ret = 0; | ||||
if (ap->time_base.den <= 0) { | if (ap->time_base.den <= 0) { | ||||
@@ -262,14 +260,18 @@ static int grab_read_header(AVFormatContext *s1, AVFormatParameters *ap) | |||||
av_log(s1, AV_LOG_ERROR, "Couldn't parse video size.\n"); | av_log(s1, AV_LOG_ERROR, "Couldn't parse video size.\n"); | ||||
goto out; | goto out; | ||||
} | } | ||||
if ((ret = av_parse_video_rate(&fps, s->framerate)) < 0) { | |||||
av_log(s1, AV_LOG_ERROR, "Couldn't parse framerate.\n"); | |||||
goto out; | |||||
} | |||||
#if FF_API_FORMAT_PARAMETERS | #if FF_API_FORMAT_PARAMETERS | ||||
if (ap->width > 0) | if (ap->width > 0) | ||||
width = ap->width; | width = ap->width; | ||||
if (ap->height > 0) | if (ap->height > 0) | ||||
height = ap->height; | height = ap->height; | ||||
if (ap->time_base.num) | |||||
fps = (AVRational){ap->time_base.den, ap->time_base.num}; | |||||
#endif | #endif | ||||
frame_rate = ap->time_base.den; | |||||
frame_rate_base = ap->time_base.num; | |||||
st = av_new_stream(s1, 0); | st = av_new_stream(s1, 0); | ||||
if (!st) { | if (!st) { | ||||
@@ -280,17 +282,15 @@ static int grab_read_header(AVFormatContext *s1, AVFormatParameters *ap) | |||||
s->width = width; | s->width = width; | ||||
s->height = height; | s->height = height; | ||||
s->frame_rate = frame_rate; | |||||
s->frame_rate_base = frame_rate_base; | |||||
s->per_frame = ((uint64_t)1000000 * s->frame_rate_base) / s->frame_rate; | |||||
s->per_frame = ((uint64_t)1000000 * fps.den) / fps.num; | |||||
st->codec->codec_type = AVMEDIA_TYPE_VIDEO; | st->codec->codec_type = AVMEDIA_TYPE_VIDEO; | ||||
st->codec->pix_fmt = PIX_FMT_YUV420P; | st->codec->pix_fmt = PIX_FMT_YUV420P; | ||||
st->codec->codec_id = CODEC_ID_RAWVIDEO; | st->codec->codec_id = CODEC_ID_RAWVIDEO; | ||||
st->codec->width = width; | st->codec->width = width; | ||||
st->codec->height = height; | st->codec->height = height; | ||||
st->codec->time_base.den = frame_rate; | |||||
st->codec->time_base.num = frame_rate_base; | |||||
st->codec->time_base.den = fps.num; | |||||
st->codec->time_base.num = fps.den; | |||||
#if FF_API_FORMAT_PARAMETERS | #if FF_API_FORMAT_PARAMETERS | ||||
if (ap->standard) { | if (ap->standard) { | ||||
@@ -314,6 +314,7 @@ static int grab_read_header(AVFormatContext *s1, AVFormatParameters *ap) | |||||
out: | out: | ||||
av_freep(&s->video_size); | av_freep(&s->video_size); | ||||
av_freep(&s->framerate); | |||||
return ret; | return ret; | ||||
} | } | ||||
@@ -346,6 +347,7 @@ static const AVOption options[] = { | |||||
{ "PALM", "", 0, FF_OPT_TYPE_CONST, {.dbl = PALM}, 0, 0, AV_OPT_FLAG_DECODING_PARAM, "standard" }, | { "PALM", "", 0, FF_OPT_TYPE_CONST, {.dbl = PALM}, 0, 0, AV_OPT_FLAG_DECODING_PARAM, "standard" }, | ||||
{ "NTSCJ", "", 0, FF_OPT_TYPE_CONST, {.dbl = NTSCJ}, 0, 0, AV_OPT_FLAG_DECODING_PARAM, "standard" }, | { "NTSCJ", "", 0, FF_OPT_TYPE_CONST, {.dbl = NTSCJ}, 0, 0, AV_OPT_FLAG_DECODING_PARAM, "standard" }, | ||||
{ "video_size", "A string describing frame size, such as 640x480 or hd720.", OFFSET(video_size), FF_OPT_TYPE_STRING, {.str = "vga"}, 0, 0, DEC }, | { "video_size", "A string describing frame size, such as 640x480 or hd720.", OFFSET(video_size), FF_OPT_TYPE_STRING, {.str = "vga"}, 0, 0, DEC }, | ||||
{ "framerate", "", OFFSET(framerate), FF_OPT_TYPE_STRING, {.str = "ntsc"}, 0, 0, DEC }, | |||||
{ NULL }, | { NULL }, | ||||
}; | }; | ||||
@@ -33,9 +33,6 @@ | |||||
#include "libavutil/log.h" | #include "libavutil/log.h" | ||||
#include "libavutil/opt.h" | #include "libavutil/opt.h" | ||||
#include "avdevice.h" | #include "avdevice.h" | ||||
#undef DV1394_DEBUG | |||||
#include "libavformat/dv.h" | #include "libavformat/dv.h" | ||||
#include "dv1394.h" | #include "dv1394.h" | ||||
@@ -177,15 +174,13 @@ restart_poll: | |||||
av_log(context, AV_LOG_ERROR, "Failed to get status: %s\n", strerror(errno)); | av_log(context, AV_LOG_ERROR, "Failed to get status: %s\n", strerror(errno)); | ||||
return AVERROR(EIO); | return AVERROR(EIO); | ||||
} | } | ||||
#ifdef DV1394_DEBUG | |||||
av_log(context, AV_LOG_DEBUG, "DV1394: status\n" | |||||
av_dlog(context, "DV1394: status\n" | |||||
"\tactive_frame\t%d\n" | "\tactive_frame\t%d\n" | ||||
"\tfirst_clear_frame\t%d\n" | "\tfirst_clear_frame\t%d\n" | ||||
"\tn_clear_frames\t%d\n" | "\tn_clear_frames\t%d\n" | ||||
"\tdropped_frames\t%d\n", | "\tdropped_frames\t%d\n", | ||||
s.active_frame, s.first_clear_frame, | s.active_frame, s.first_clear_frame, | ||||
s.n_clear_frames, s.dropped_frames); | s.n_clear_frames, s.dropped_frames); | ||||
#endif | |||||
dv->avail = s.n_clear_frames; | dv->avail = s.n_clear_frames; | ||||
dv->index = s.first_clear_frame; | dv->index = s.first_clear_frame; | ||||
@@ -200,10 +195,8 @@ restart_poll: | |||||
} | } | ||||
} | } | ||||
#ifdef DV1394_DEBUG | |||||
av_log(context, AV_LOG_DEBUG, "index %d, avail %d, done %d\n", dv->index, dv->avail, | |||||
av_dlog(context, "index %d, avail %d, done %d\n", dv->index, dv->avail, | |||||
dv->done); | dv->done); | ||||
#endif | |||||
size = dv_produce_packet(dv->dv_demux, pkt, | size = dv_produce_packet(dv->dv_demux, pkt, | ||||
dv->ring + (dv->index * DV1394_PAL_FRAME_SIZE), | dv->ring + (dv->index * DV1394_PAL_FRAME_SIZE), | ||||
@@ -37,7 +37,10 @@ | |||||
#include <time.h> | #include <time.h> | ||||
#include <linux/fb.h> | #include <linux/fb.h> | ||||
#include "libavutil/log.h" | |||||
#include "libavutil/mem.h" | #include "libavutil/mem.h" | ||||
#include "libavutil/opt.h" | |||||
#include "libavutil/parseutils.h" | |||||
#include "libavutil/pixdesc.h" | #include "libavutil/pixdesc.h" | ||||
#include "avdevice.h" | #include "avdevice.h" | ||||
@@ -74,8 +77,10 @@ static enum PixelFormat get_pixfmt_from_fb_varinfo(struct fb_var_screeninfo *var | |||||
} | } | ||||
typedef struct { | typedef struct { | ||||
AVClass *class; ///< class for private options | |||||
int frame_size; ///< size in bytes of a grabbed frame | int frame_size; ///< size in bytes of a grabbed frame | ||||
AVRational time_base; ///< time base | |||||
AVRational fps; ///< framerate | |||||
char *framerate; ///< framerate string set by a private option | |||||
int64_t time_frame; ///< time for the next frame to output (in 1/1000000 units) | int64_t time_frame; ///< time for the next frame to output (in 1/1000000 units) | ||||
int fd; ///< framebuffer device file descriptor | int fd; ///< framebuffer device file descriptor | ||||
@@ -97,16 +102,21 @@ av_cold static int fbdev_read_header(AVFormatContext *avctx, | |||||
enum PixelFormat pix_fmt; | enum PixelFormat pix_fmt; | ||||
int ret, flags = O_RDONLY; | int ret, flags = O_RDONLY; | ||||
ret = av_parse_video_rate(&fbdev->fps, fbdev->framerate); | |||||
av_freep(&fbdev->framerate); | |||||
if (ret < 0) { | |||||
av_log(avctx, AV_LOG_ERROR, "Couldn't parse framerate.\n"); | |||||
return ret; | |||||
} | |||||
#if FF_API_FORMAT_PARAMETERS | |||||
if (ap->time_base.num) | |||||
fbdev->fps = (AVRational){ap->time_base.den, ap->time_base.num}; | |||||
#endif | |||||
if (!(st = av_new_stream(avctx, 0))) | if (!(st = av_new_stream(avctx, 0))) | ||||
return AVERROR(ENOMEM); | return AVERROR(ENOMEM); | ||||
av_set_pts_info(st, 64, 1, 1000000); /* 64 bits pts in microseconds */ | av_set_pts_info(st, 64, 1, 1000000); /* 64 bits pts in microseconds */ | ||||
if (ap->time_base.den <= 0) { | |||||
av_log(avctx, AV_LOG_ERROR, "Invalid time base %d/%d\n", | |||||
ap->time_base.num, ap->time_base.den); | |||||
return AVERROR(EINVAL); | |||||
} | |||||
/* NONBLOCK is ignored by the fbdev driver, only set for consistency */ | /* NONBLOCK is ignored by the fbdev driver, only set for consistency */ | ||||
if (avctx->flags & AVFMT_FLAG_NONBLOCK) | if (avctx->flags & AVFMT_FLAG_NONBLOCK) | ||||
flags |= O_NONBLOCK; | flags |= O_NONBLOCK; | ||||
@@ -146,7 +156,6 @@ av_cold static int fbdev_read_header(AVFormatContext *avctx, | |||||
fbdev->bytes_per_pixel = (fbdev->varinfo.bits_per_pixel + 7) >> 3; | fbdev->bytes_per_pixel = (fbdev->varinfo.bits_per_pixel + 7) >> 3; | ||||
fbdev->frame_linesize = fbdev->width * fbdev->bytes_per_pixel; | fbdev->frame_linesize = fbdev->width * fbdev->bytes_per_pixel; | ||||
fbdev->frame_size = fbdev->frame_linesize * fbdev->heigth; | fbdev->frame_size = fbdev->frame_linesize * fbdev->heigth; | ||||
fbdev->time_base = ap->time_base; | |||||
fbdev->time_frame = AV_NOPTS_VALUE; | fbdev->time_frame = AV_NOPTS_VALUE; | ||||
fbdev->data = mmap(NULL, fbdev->fixinfo.smem_len, PROT_READ, MAP_SHARED, fbdev->fd, 0); | fbdev->data = mmap(NULL, fbdev->fixinfo.smem_len, PROT_READ, MAP_SHARED, fbdev->fd, 0); | ||||
if (fbdev->data == MAP_FAILED) { | if (fbdev->data == MAP_FAILED) { | ||||
@@ -162,13 +171,13 @@ av_cold static int fbdev_read_header(AVFormatContext *avctx, | |||||
st->codec->pix_fmt = pix_fmt; | st->codec->pix_fmt = pix_fmt; | ||||
st->codec->time_base = ap->time_base; | st->codec->time_base = ap->time_base; | ||||
st->codec->bit_rate = | st->codec->bit_rate = | ||||
fbdev->width * fbdev->heigth * fbdev->bytes_per_pixel / av_q2d(ap->time_base) * 8; | |||||
fbdev->width * fbdev->heigth * fbdev->bytes_per_pixel * av_q2d(fbdev->fps) * 8; | |||||
av_log(avctx, AV_LOG_INFO, | av_log(avctx, AV_LOG_INFO, | ||||
"w:%d h:%d bpp:%d pixfmt:%s tb:%d/%d bit_rate:%d\n", | |||||
"w:%d h:%d bpp:%d pixfmt:%s fps:%d/%d bit_rate:%d\n", | |||||
fbdev->width, fbdev->heigth, fbdev->varinfo.bits_per_pixel, | fbdev->width, fbdev->heigth, fbdev->varinfo.bits_per_pixel, | ||||
av_pix_fmt_descriptors[pix_fmt].name, | av_pix_fmt_descriptors[pix_fmt].name, | ||||
ap->time_base.num, ap->time_base.den, | |||||
fbdev->fps.num, fbdev->fps.den, | |||||
st->codec->bit_rate); | st->codec->bit_rate); | ||||
return 0; | return 0; | ||||
@@ -196,7 +205,7 @@ static int fbdev_read_packet(AVFormatContext *avctx, AVPacket *pkt) | |||||
"time_frame:%"PRId64" curtime:%"PRId64" delay:%"PRId64"\n", | "time_frame:%"PRId64" curtime:%"PRId64" delay:%"PRId64"\n", | ||||
fbdev->time_frame, curtime, delay); | fbdev->time_frame, curtime, delay); | ||||
if (delay <= 0) { | if (delay <= 0) { | ||||
fbdev->time_frame += INT64_C(1000000) * av_q2d(fbdev->time_base); | |||||
fbdev->time_frame += INT64_C(1000000) / av_q2d(fbdev->fps); | |||||
break; | break; | ||||
} | } | ||||
if (avctx->flags & AVFMT_FLAG_NONBLOCK) | if (avctx->flags & AVFMT_FLAG_NONBLOCK) | ||||
@@ -240,6 +249,20 @@ av_cold static int fbdev_read_close(AVFormatContext *avctx) | |||||
return 0; | return 0; | ||||
} | } | ||||
#define OFFSET(x) offsetof(FBDevContext, x) | |||||
#define DEC AV_OPT_FLAG_DECODING_PARAM | |||||
static const AVOption options[] = { | |||||
{ "framerate","", OFFSET(framerate), FF_OPT_TYPE_STRING, {.str = "25"}, 0, 0, DEC }, | |||||
{ NULL }, | |||||
}; | |||||
static const AVClass fbdev_class = { | |||||
.class_name = "fbdev indev", | |||||
.item_name = av_default_item_name, | |||||
.option = options, | |||||
.version = LIBAVUTIL_VERSION_INT, | |||||
}; | |||||
AVInputFormat ff_fbdev_demuxer = { | AVInputFormat ff_fbdev_demuxer = { | ||||
.name = "fbdev", | .name = "fbdev", | ||||
.long_name = NULL_IF_CONFIG_SMALL("Linux framebuffer"), | .long_name = NULL_IF_CONFIG_SMALL("Linux framebuffer"), | ||||
@@ -248,4 +271,5 @@ AVInputFormat ff_fbdev_demuxer = { | |||||
.read_packet = fbdev_read_packet, | .read_packet = fbdev_read_packet, | ||||
.read_close = fbdev_read_close, | .read_close = fbdev_read_close, | ||||
.flags = AVFMT_NOFILE, | .flags = AVFMT_NOFILE, | ||||
.priv_class = &fbdev_class, | |||||
}; | }; |
@@ -2,7 +2,6 @@ | |||||
* IIDC1394 grab interface (uses libdc1394 and libraw1394) | * IIDC1394 grab interface (uses libdc1394 and libraw1394) | ||||
* Copyright (c) 2004 Roman Shaposhnik | * Copyright (c) 2004 Roman Shaposhnik | ||||
* Copyright (c) 2008 Alessandro Sappia | * Copyright (c) 2008 Alessandro Sappia | ||||
* Copyright (c) 2011 Martin Lambers | |||||
* | * | ||||
* This file is part of FFmpeg. | * This file is part of FFmpeg. | ||||
* | * | ||||
@@ -22,47 +21,63 @@ | |||||
*/ | */ | ||||
#include "config.h" | #include "config.h" | ||||
#include "libavformat/avformat.h" | |||||
#include "libavutil/log.h" | #include "libavutil/log.h" | ||||
#include "libavutil/opt.h" | #include "libavutil/opt.h" | ||||
#include "avdevice.h" | |||||
#include <stdlib.h> | |||||
#include <string.h> | |||||
#include "libavutil/parseutils.h" | #include "libavutil/parseutils.h" | ||||
#include "libavutil/pixdesc.h" | #include "libavutil/pixdesc.h" | ||||
#if HAVE_LIBDC1394_2 | |||||
#include <dc1394/dc1394.h> | #include <dc1394/dc1394.h> | ||||
#elif HAVE_LIBDC1394_1 | |||||
#include <libraw1394/raw1394.h> | |||||
#include <libdc1394/dc1394_control.h> | |||||
#define DC1394_VIDEO_MODE_320x240_YUV422 MODE_320x240_YUV422 | |||||
#define DC1394_VIDEO_MODE_640x480_YUV411 MODE_640x480_YUV411 | |||||
#define DC1394_VIDEO_MODE_640x480_YUV422 MODE_640x480_YUV422 | |||||
#define DC1394_FRAMERATE_1_875 FRAMERATE_1_875 | |||||
#define DC1394_FRAMERATE_3_75 FRAMERATE_3_75 | |||||
#define DC1394_FRAMERATE_7_5 FRAMERATE_7_5 | |||||
#define DC1394_FRAMERATE_15 FRAMERATE_15 | |||||
#define DC1394_FRAMERATE_30 FRAMERATE_30 | |||||
#define DC1394_FRAMERATE_60 FRAMERATE_60 | |||||
#define DC1394_FRAMERATE_120 FRAMERATE_120 | |||||
#define DC1394_FRAMERATE_240 FRAMERATE_240 | |||||
#endif | |||||
#undef free | #undef free | ||||
typedef struct dc1394_data { | typedef struct dc1394_data { | ||||
AVClass *class; | AVClass *class; | ||||
#if HAVE_LIBDC1394_1 | |||||
raw1394handle_t handle; | |||||
dc1394_cameracapture camera; | |||||
int channel; | |||||
#elif HAVE_LIBDC1394_2 | |||||
dc1394_t *d; | dc1394_t *d; | ||||
dc1394camera_t *camera; | dc1394camera_t *camera; | ||||
dc1394video_frame_t *frame; | dc1394video_frame_t *frame; | ||||
#endif | |||||
int current_frame; | int current_frame; | ||||
int fps; | |||||
int frame_rate; /**< frames per 1000 seconds (fps * 1000) */ | |||||
char *video_size; /**< String describing video size, set by a private option. */ | char *video_size; /**< String describing video size, set by a private option. */ | ||||
char *pixel_format; /**< Set by a private option. */ | char *pixel_format; /**< Set by a private option. */ | ||||
char *framerate; /**< Set by a private option. */ | |||||
AVPacket packet; | AVPacket packet; | ||||
} dc1394_data; | } dc1394_data; | ||||
/* The list of color codings that we support. | |||||
* We assume big endian for the dc1394 16bit modes: libdc1394 never sets the | |||||
* flag little_endian in dc1394video_frame_t. */ | |||||
struct dc1394_color_coding { | |||||
int pix_fmt; | |||||
int score; | |||||
uint32_t coding; | |||||
} dc1394_color_codings[] = { | |||||
{ PIX_FMT_GRAY16BE, 1000, DC1394_COLOR_CODING_MONO16 }, | |||||
{ PIX_FMT_RGB48BE, 1100, DC1394_COLOR_CODING_RGB16 }, | |||||
{ PIX_FMT_GRAY8, 1200, DC1394_COLOR_CODING_MONO8 }, | |||||
{ PIX_FMT_RGB24, 1300, DC1394_COLOR_CODING_RGB8 }, | |||||
{ PIX_FMT_UYYVYY411, 1400, DC1394_COLOR_CODING_YUV411 }, | |||||
{ PIX_FMT_UYVY422, 1500, DC1394_COLOR_CODING_YUV422 }, | |||||
{ PIX_FMT_NONE, 0, 0 } /* gotta be the last one */ | |||||
struct dc1394_frame_format { | |||||
int width; | |||||
int height; | |||||
enum PixelFormat pix_fmt; | |||||
int frame_size_id; | |||||
} dc1394_frame_formats[] = { | |||||
{ 320, 240, PIX_FMT_UYVY422, DC1394_VIDEO_MODE_320x240_YUV422 }, | |||||
{ 640, 480, PIX_FMT_UYYVYY411, DC1394_VIDEO_MODE_640x480_YUV411 }, | |||||
{ 640, 480, PIX_FMT_UYVY422, DC1394_VIDEO_MODE_640x480_YUV422 }, | |||||
{ 0, 0, 0, 0 } /* gotta be the last one */ | |||||
}; | }; | ||||
struct dc1394_frame_rate { | struct dc1394_frame_rate { | ||||
@@ -83,8 +98,12 @@ struct dc1394_frame_rate { | |||||
#define OFFSET(x) offsetof(dc1394_data, x) | #define OFFSET(x) offsetof(dc1394_data, x) | ||||
#define DEC AV_OPT_FLAG_DECODING_PARAM | #define DEC AV_OPT_FLAG_DECODING_PARAM | ||||
static const AVOption options[] = { | static const AVOption options[] = { | ||||
#if HAVE_LIBDC1394_1 | |||||
{ "channel", "", offsetof(dc1394_data, channel), FF_OPT_TYPE_INT, {.dbl = 0}, 0, INT_MAX, AV_OPT_FLAG_DECODING_PARAM }, | |||||
#endif | |||||
{ "video_size", "A string describing frame size, such as 640x480 or hd720.", OFFSET(video_size), FF_OPT_TYPE_STRING, {.str = "qvga"}, 0, 0, DEC }, | { "video_size", "A string describing frame size, such as 640x480 or hd720.", OFFSET(video_size), FF_OPT_TYPE_STRING, {.str = "qvga"}, 0, 0, DEC }, | ||||
{ "pixel_format", "", OFFSET(pixel_format), FF_OPT_TYPE_STRING, {.str = "uyvy422"}, 0, 0, DEC }, | { "pixel_format", "", OFFSET(pixel_format), FF_OPT_TYPE_STRING, {.str = "uyvy422"}, 0, 0, DEC }, | ||||
{ "framerate", "", OFFSET(framerate), FF_OPT_TYPE_STRING, {.str = "ntsc"}, 0, 0, DEC }, | |||||
{ NULL }, | { NULL }, | ||||
}; | }; | ||||
@@ -95,183 +114,220 @@ static const AVClass libdc1394_class = { | |||||
.version = LIBAVUTIL_VERSION_INT, | .version = LIBAVUTIL_VERSION_INT, | ||||
}; | }; | ||||
static int dc1394_read_header(AVFormatContext *c, AVFormatParameters * ap) | |||||
static inline int dc1394_read_common(AVFormatContext *c, AVFormatParameters *ap, | |||||
struct dc1394_frame_format **select_fmt, struct dc1394_frame_rate **select_fps) | |||||
{ | { | ||||
dc1394_data* dc1394 = c->priv_data; | dc1394_data* dc1394 = c->priv_data; | ||||
AVStream *vst; | |||||
const struct dc1394_color_coding *cc; | |||||
const struct dc1394_frame_rate *fr; | |||||
dc1394camera_list_t *list; | |||||
dc1394video_modes_t video_modes; | |||||
dc1394video_mode_t video_mode; | |||||
dc1394framerates_t frame_rates; | |||||
dc1394framerate_t frame_rate; | |||||
uint32_t dc1394_width, dc1394_height, dc1394_color_coding; | |||||
int rate, best_rate; | |||||
int score, max_score; | |||||
int final_width, final_height, final_pix_fmt, final_frame_rate; | |||||
int res, i, j; | |||||
int ret=-1; | |||||
/* Now let us prep the hardware. */ | |||||
dc1394->d = dc1394_new(); | |||||
dc1394_camera_enumerate (dc1394->d, &list); | |||||
if ( !list || list->num == 0) { | |||||
av_log(c, AV_LOG_ERROR, "Unable to look for an IIDC camera\n\n"); | |||||
AVStream* vst; | |||||
struct dc1394_frame_format *fmt; | |||||
struct dc1394_frame_rate *fps; | |||||
enum PixelFormat pix_fmt; | |||||
int width, height; | |||||
AVRational framerate; | |||||
int ret = 0; | |||||
if ((pix_fmt = av_get_pix_fmt(dc1394->pixel_format)) == PIX_FMT_NONE) { | |||||
av_log(c, AV_LOG_ERROR, "No such pixel format: %s.\n", dc1394->pixel_format); | |||||
ret = AVERROR(EINVAL); | |||||
goto out; | goto out; | ||||
} | } | ||||
/* FIXME: To select a specific camera I need to search in list its guid */ | |||||
dc1394->camera = dc1394_camera_new (dc1394->d, list->ids[0].guid); | |||||
if (list->num > 1) { | |||||
av_log(c, AV_LOG_INFO, "Working with the first camera found\n"); | |||||
} | |||||
/* Freeing list of cameras */ | |||||
dc1394_camera_free_list (list); | |||||
/* Get the list of video modes supported by the camera. */ | |||||
res = dc1394_video_get_supported_modes (dc1394->camera, &video_modes); | |||||
if (res != DC1394_SUCCESS) { | |||||
av_log(c, AV_LOG_ERROR, "Could not get video formats.\n"); | |||||
goto out_camera; | |||||
} | |||||
if (dc1394->pixel_format) { | |||||
if ((ap->pix_fmt = av_get_pix_fmt(dc1394->pixel_format)) == PIX_FMT_NONE) { | |||||
av_log(c, AV_LOG_ERROR, "No such pixel format: %s.\n", dc1394->pixel_format); | |||||
ret = AVERROR(EINVAL); | |||||
goto out; | |||||
} | |||||
} | |||||
if (dc1394->video_size) { | |||||
if ((ret = av_parse_video_size(&ap->width, &ap->height, dc1394->video_size)) < 0) { | |||||
av_log(c, AV_LOG_ERROR, "Couldn't parse video size.\n"); | |||||
goto out; | |||||
} | |||||
} | |||||
/* Choose the best mode. */ | |||||
rate = (ap->time_base.num ? av_rescale(1000, ap->time_base.den, ap->time_base.num) : -1); | |||||
max_score = -1; | |||||
for (i = 0; i < video_modes.num; i++) { | |||||
if (video_modes.modes[i] == DC1394_VIDEO_MODE_EXIF | |||||
|| (video_modes.modes[i] >= DC1394_VIDEO_MODE_FORMAT7_MIN | |||||
&& video_modes.modes[i] <= DC1394_VIDEO_MODE_FORMAT7_MAX)) { | |||||
/* These modes are currently not supported as they would require | |||||
* much more work. For the remaining modes, the functions | |||||
* dc1394_get_image_size_from_video_mode and | |||||
* dc1394_get_color_coding_from_video_mode do not need to query the | |||||
* camera, and thus cannot fail. */ | |||||
continue; | |||||
} | |||||
dc1394_get_color_coding_from_video_mode (NULL, video_modes.modes[i], | |||||
&dc1394_color_coding); | |||||
for (cc = dc1394_color_codings; cc->pix_fmt != PIX_FMT_NONE; cc++) | |||||
if (cc->coding == dc1394_color_coding) | |||||
break; | |||||
if (cc->pix_fmt == PIX_FMT_NONE) { | |||||
/* We currently cannot handle this color coding. */ | |||||
continue; | |||||
} | |||||
/* Here we know that the mode is supported. Get its frame size and the list | |||||
* of frame rates supported by the camera for this mode. This list is sorted | |||||
* in ascending order according to libdc1394 example programs. */ | |||||
dc1394_get_image_size_from_video_mode (NULL, video_modes.modes[i], | |||||
&dc1394_width, &dc1394_height); | |||||
res = dc1394_video_get_supported_framerates (dc1394->camera, video_modes.modes[i], | |||||
&frame_rates); | |||||
if (res != DC1394_SUCCESS || frame_rates.num == 0) { | |||||
av_log(c, AV_LOG_ERROR, "Cannot get frame rates for video mode.\n"); | |||||
goto out_camera; | |||||
} | |||||
/* Choose the best frame rate. */ | |||||
best_rate = -1; | |||||
for (j = 0; j < frame_rates.num; j++) { | |||||
for (fr = dc1394_frame_rates; fr->frame_rate; fr++) { | |||||
if (fr->frame_rate_id == frame_rates.framerates[j]) { | |||||
break; | |||||
} | |||||
} | |||||
if (!fr->frame_rate) { | |||||
/* This frame rate is not supported. */ | |||||
continue; | |||||
} | |||||
best_rate = fr->frame_rate; | |||||
frame_rate = fr->frame_rate_id; | |||||
if (ap->time_base.num && rate == fr->frame_rate) { | |||||
/* This is the requested frame rate. */ | |||||
break; | |||||
} | |||||
} | |||||
if (best_rate == -1) { | |||||
/* No supported rate found. */ | |||||
continue; | |||||
} | |||||
/* Here we know that both the mode and the rate are supported. Compute score. */ | |||||
if (ap->width && ap->height | |||||
&& (dc1394_width == ap->width && dc1394_height == ap->height)) { | |||||
score = 110000; | |||||
} else { | |||||
score = dc1394_width * 10; // 1600 - 16000 | |||||
} | |||||
if (ap->pix_fmt == cc->pix_fmt) { | |||||
score += 90000; | |||||
} else { | |||||
score += cc->score; // 1000 - 1500 | |||||
} | |||||
if (ap->time_base.num && rate == best_rate) { | |||||
score += 70000; | |||||
} else { | |||||
score += best_rate / 1000; // 1 - 240 | |||||
} | |||||
if (score > max_score) { | |||||
video_mode = video_modes.modes[i]; | |||||
final_width = dc1394_width; | |||||
final_height = dc1394_height; | |||||
final_pix_fmt = cc->pix_fmt; | |||||
final_frame_rate = best_rate; | |||||
max_score = score; | |||||
} | |||||
} | |||||
if (max_score == -1) { | |||||
av_log(c, AV_LOG_ERROR, "No suitable video mode / frame rate available.\n"); | |||||
goto out_camera; | |||||
} | |||||
if (ap->width && ap->height && !(ap->width == final_width && ap->height == final_height)) { | |||||
av_log(c, AV_LOG_WARNING, "Requested frame size is not available, using fallback.\n"); | |||||
if ((ret = av_parse_video_size(&width, &height, dc1394->video_size)) < 0) { | |||||
av_log(c, AV_LOG_ERROR, "Couldn't parse video size.\n"); | |||||
goto out; | |||||
} | } | ||||
if (ap->pix_fmt != PIX_FMT_NONE && ap->pix_fmt != final_pix_fmt) { | |||||
av_log(c, AV_LOG_WARNING, "Requested pixel format is not supported, using fallback.\n"); | |||||
if ((ret = av_parse_video_rate(&framerate, dc1394->framerate)) < 0) { | |||||
av_log(c, AV_LOG_ERROR, "Couldn't parse framerate.\n"); | |||||
goto out; | |||||
} | } | ||||
if (ap->time_base.num && rate != final_frame_rate) { | |||||
av_log(c, AV_LOG_WARNING, "Requested frame rate is not available, using fallback.\n"); | |||||
#if FF_API_FORMAT_PARAMETERS | |||||
if (ap->width > 0) | |||||
width = ap->width; | |||||
if (ap->height > 0) | |||||
height = ap->height; | |||||
if (ap->pix_fmt) | |||||
pix_fmt = ap->pix_fmt; | |||||
if (ap->time_base.num) | |||||
framerate = (AVRational){ap->time_base.den, ap->time_base.num}; | |||||
#endif | |||||
dc1394->frame_rate = av_rescale(1000, framerate.num, framerate.den); | |||||
for (fmt = dc1394_frame_formats; fmt->width; fmt++) | |||||
if (fmt->pix_fmt == pix_fmt && fmt->width == width && fmt->height == height) | |||||
break; | |||||
for (fps = dc1394_frame_rates; fps->frame_rate; fps++) | |||||
if (fps->frame_rate == dc1394->frame_rate) | |||||
break; | |||||
if (!fps->frame_rate || !fmt->width) { | |||||
av_log(c, AV_LOG_ERROR, "Can't find matching camera format for %s, %dx%d@%d:1000fps\n", avcodec_get_pix_fmt_name(pix_fmt), | |||||
width, height, dc1394->frame_rate); | |||||
ret = AVERROR(EINVAL); | |||||
goto out; | |||||
} | } | ||||
/* create a video stream */ | /* create a video stream */ | ||||
vst = av_new_stream(c, 0); | vst = av_new_stream(c, 0); | ||||
if (!vst) | |||||
goto out_camera; | |||||
if (!vst) { | |||||
ret = AVERROR(ENOMEM); | |||||
goto out; | |||||
} | |||||
av_set_pts_info(vst, 64, 1, 1000); | av_set_pts_info(vst, 64, 1, 1000); | ||||
vst->codec->codec_type = AVMEDIA_TYPE_VIDEO; | vst->codec->codec_type = AVMEDIA_TYPE_VIDEO; | ||||
vst->codec->codec_id = CODEC_ID_RAWVIDEO; | vst->codec->codec_id = CODEC_ID_RAWVIDEO; | ||||
vst->codec->time_base.den = final_frame_rate; | |||||
vst->codec->time_base.num = 1000; | |||||
vst->codec->width = final_width; | |||||
vst->codec->height = final_height; | |||||
vst->codec->pix_fmt = final_pix_fmt; | |||||
vst->codec->time_base.den = framerate.num; | |||||
vst->codec->time_base.num = framerate.den; | |||||
vst->codec->width = fmt->width; | |||||
vst->codec->height = fmt->height; | |||||
vst->codec->pix_fmt = fmt->pix_fmt; | |||||
/* packet init */ | /* packet init */ | ||||
av_init_packet(&dc1394->packet); | av_init_packet(&dc1394->packet); | ||||
dc1394->packet.size = avpicture_get_size(final_pix_fmt, final_width, final_height); | |||||
dc1394->packet.size = avpicture_get_size(fmt->pix_fmt, fmt->width, fmt->height); | |||||
dc1394->packet.stream_index = vst->index; | dc1394->packet.stream_index = vst->index; | ||||
dc1394->packet.flags |= AV_PKT_FLAG_KEY; | dc1394->packet.flags |= AV_PKT_FLAG_KEY; | ||||
dc1394->current_frame = 0; | dc1394->current_frame = 0; | ||||
dc1394->fps = final_frame_rate; | |||||
vst->codec->bit_rate = av_rescale(dc1394->packet.size * 8, final_frame_rate, 1000); | |||||
vst->codec->bit_rate = av_rescale(dc1394->packet.size * 8, fps->frame_rate, 1000); | |||||
*select_fps = fps; | |||||
*select_fmt = fmt; | |||||
out: | |||||
av_freep(&dc1394->video_size); | |||||
av_freep(&dc1394->pixel_format); | |||||
av_freep(&dc1394->framerate); | |||||
return ret; | |||||
} | |||||
#if HAVE_LIBDC1394_1 | |||||
static int dc1394_v1_read_header(AVFormatContext *c, AVFormatParameters * ap) | |||||
{ | |||||
dc1394_data* dc1394 = c->priv_data; | |||||
AVStream* vst; | |||||
nodeid_t* camera_nodes; | |||||
int res; | |||||
struct dc1394_frame_format *fmt = NULL; | |||||
struct dc1394_frame_rate *fps = NULL; | |||||
if (dc1394_read_common(c,ap,&fmt,&fps) != 0) | |||||
return -1; | |||||
#if FF_API_FORMAT_PARAMETERS | |||||
if (ap->channel) | |||||
dc1394->channel = ap->channel; | |||||
#endif | |||||
/* Now let us prep the hardware. */ | |||||
dc1394->handle = dc1394_create_handle(0); /* FIXME: gotta have ap->port */ | |||||
if (!dc1394->handle) { | |||||
av_log(c, AV_LOG_ERROR, "Can't acquire dc1394 handle on port %d\n", 0 /* ap->port */); | |||||
goto out; | |||||
} | |||||
camera_nodes = dc1394_get_camera_nodes(dc1394->handle, &res, 1); | |||||
if (!camera_nodes || camera_nodes[dc1394->channel] == DC1394_NO_CAMERA) { | |||||
av_log(c, AV_LOG_ERROR, "There's no IIDC camera on the channel %d\n", dc1394->channel); | |||||
goto out_handle; | |||||
} | |||||
res = dc1394_dma_setup_capture(dc1394->handle, camera_nodes[dc1394->channel], | |||||
0, | |||||
FORMAT_VGA_NONCOMPRESSED, | |||||
fmt->frame_size_id, | |||||
SPEED_400, | |||||
fps->frame_rate_id, 8, 1, | |||||
c->filename, | |||||
&dc1394->camera); | |||||
dc1394_free_camera_nodes(camera_nodes); | |||||
if (res != DC1394_SUCCESS) { | |||||
av_log(c, AV_LOG_ERROR, "Can't prepare camera for the DMA capture\n"); | |||||
goto out_handle; | |||||
} | |||||
res = dc1394_start_iso_transmission(dc1394->handle, dc1394->camera.node); | |||||
if (res != DC1394_SUCCESS) { | |||||
av_log(c, AV_LOG_ERROR, "Can't start isochronous transmission\n"); | |||||
goto out_handle_dma; | |||||
} | |||||
return 0; | |||||
out_handle_dma: | |||||
dc1394_dma_unlisten(dc1394->handle, &dc1394->camera); | |||||
dc1394_dma_release_camera(dc1394->handle, &dc1394->camera); | |||||
out_handle: | |||||
dc1394_destroy_handle(dc1394->handle); | |||||
out: | |||||
return -1; | |||||
} | |||||
static int dc1394_v1_read_packet(AVFormatContext *c, AVPacket *pkt) | |||||
{ | |||||
struct dc1394_data *dc1394 = c->priv_data; | |||||
int res; | |||||
/* discard stale frame */ | |||||
if (dc1394->current_frame++) { | |||||
if (dc1394_dma_done_with_buffer(&dc1394->camera) != DC1394_SUCCESS) | |||||
av_log(c, AV_LOG_ERROR, "failed to release %d frame\n", dc1394->current_frame); | |||||
} | |||||
res = dc1394_dma_single_capture(&dc1394->camera); | |||||
if (res == DC1394_SUCCESS) { | |||||
dc1394->packet.data = (uint8_t *)(dc1394->camera.capture_buffer); | |||||
dc1394->packet.pts = (dc1394->current_frame * 1000000) / dc1394->frame_rate; | |||||
res = dc1394->packet.size; | |||||
} else { | |||||
av_log(c, AV_LOG_ERROR, "DMA capture failed\n"); | |||||
dc1394->packet.data = NULL; | |||||
res = -1; | |||||
} | |||||
*pkt = dc1394->packet; | |||||
return res; | |||||
} | |||||
static int dc1394_v1_close(AVFormatContext * context) | |||||
{ | |||||
struct dc1394_data *dc1394 = context->priv_data; | |||||
dc1394_stop_iso_transmission(dc1394->handle, dc1394->camera.node); | |||||
dc1394_dma_unlisten(dc1394->handle, &dc1394->camera); | |||||
dc1394_dma_release_camera(dc1394->handle, &dc1394->camera); | |||||
dc1394_destroy_handle(dc1394->handle); | |||||
return 0; | |||||
} | |||||
#elif HAVE_LIBDC1394_2 | |||||
static int dc1394_v2_read_header(AVFormatContext *c, AVFormatParameters * ap) | |||||
{ | |||||
dc1394_data* dc1394 = c->priv_data; | |||||
dc1394camera_list_t *list; | |||||
int res, i; | |||||
struct dc1394_frame_format *fmt = NULL; | |||||
struct dc1394_frame_rate *fps = NULL; | |||||
if (dc1394_read_common(c,ap,&fmt,&fps) != 0) | |||||
return -1; | |||||
/* Now let us prep the hardware. */ | |||||
dc1394->d = dc1394_new(); | |||||
dc1394_camera_enumerate (dc1394->d, &list); | |||||
if ( !list || list->num == 0) { | |||||
av_log(c, AV_LOG_ERROR, "Unable to look for an IIDC camera\n\n"); | |||||
goto out; | |||||
} | |||||
/* FIXME: To select a specific camera I need to search in list its guid */ | |||||
dc1394->camera = dc1394_camera_new (dc1394->d, list->ids[0].guid); | |||||
if (list->num > 1) { | |||||
av_log(c, AV_LOG_INFO, "Working with the first camera found\n"); | |||||
} | |||||
/* Freeing list of cameras */ | |||||
dc1394_camera_free_list (list); | |||||
/* Select MAX Speed possible from the cam */ | /* Select MAX Speed possible from the cam */ | ||||
if (dc1394->camera->bmode_capable>0) { | if (dc1394->camera->bmode_capable>0) { | ||||
@@ -289,13 +345,13 @@ static int dc1394_read_header(AVFormatContext *c, AVFormatParameters * ap) | |||||
goto out_camera; | goto out_camera; | ||||
} | } | ||||
if (dc1394_video_set_mode(dc1394->camera, video_mode) != DC1394_SUCCESS) { | |||||
if (dc1394_video_set_mode(dc1394->camera, fmt->frame_size_id) != DC1394_SUCCESS) { | |||||
av_log(c, AV_LOG_ERROR, "Couldn't set video format\n"); | av_log(c, AV_LOG_ERROR, "Couldn't set video format\n"); | ||||
goto out_camera; | goto out_camera; | ||||
} | } | ||||
if (dc1394_video_set_framerate(dc1394->camera, frame_rate) != DC1394_SUCCESS) { | |||||
av_log(c, AV_LOG_ERROR, "Could not set framerate %d.\n", final_frame_rate); | |||||
if (dc1394_video_set_framerate(dc1394->camera,fps->frame_rate_id) != DC1394_SUCCESS) { | |||||
av_log(c, AV_LOG_ERROR, "Couldn't set framerate %d \n",fps->frame_rate); | |||||
goto out_camera; | goto out_camera; | ||||
} | } | ||||
if (dc1394_capture_setup(dc1394->camera, 10, DC1394_CAPTURE_FLAGS_DEFAULT)!=DC1394_SUCCESS) { | if (dc1394_capture_setup(dc1394->camera, 10, DC1394_CAPTURE_FLAGS_DEFAULT)!=DC1394_SUCCESS) { | ||||
@@ -314,13 +370,11 @@ out_camera: | |||||
dc1394_video_set_transmission(dc1394->camera, DC1394_OFF); | dc1394_video_set_transmission(dc1394->camera, DC1394_OFF); | ||||
dc1394_camera_free (dc1394->camera); | dc1394_camera_free (dc1394->camera); | ||||
out: | out: | ||||
av_freep(&dc1394->video_size); | |||||
av_freep(&dc1394->pixel_format); | |||||
dc1394_free(dc1394->d); | dc1394_free(dc1394->d); | ||||
return ret; | |||||
return -1; | |||||
} | } | ||||
static int dc1394_read_packet(AVFormatContext *c, AVPacket *pkt) | |||||
static int dc1394_v2_read_packet(AVFormatContext *c, AVPacket *pkt) | |||||
{ | { | ||||
struct dc1394_data *dc1394 = c->priv_data; | struct dc1394_data *dc1394 = c->priv_data; | ||||
int res; | int res; | ||||
@@ -334,7 +388,7 @@ static int dc1394_read_packet(AVFormatContext *c, AVPacket *pkt) | |||||
res = dc1394_capture_dequeue(dc1394->camera, DC1394_CAPTURE_POLICY_WAIT, &dc1394->frame); | res = dc1394_capture_dequeue(dc1394->camera, DC1394_CAPTURE_POLICY_WAIT, &dc1394->frame); | ||||
if (res == DC1394_SUCCESS) { | if (res == DC1394_SUCCESS) { | ||||
dc1394->packet.data = (uint8_t *)(dc1394->frame->image); | dc1394->packet.data = (uint8_t *)(dc1394->frame->image); | ||||
dc1394->packet.pts = (dc1394->current_frame * 1000000) / (dc1394->fps); | |||||
dc1394->packet.pts = (dc1394->current_frame * 1000000) / (dc1394->frame_rate); | |||||
res = dc1394->frame->image_bytes; | res = dc1394->frame->image_bytes; | ||||
} else { | } else { | ||||
av_log(c, AV_LOG_ERROR, "DMA capture failed\n"); | av_log(c, AV_LOG_ERROR, "DMA capture failed\n"); | ||||
@@ -346,7 +400,7 @@ static int dc1394_read_packet(AVFormatContext *c, AVPacket *pkt) | |||||
return res; | return res; | ||||
} | } | ||||
static int dc1394_close(AVFormatContext * context) | |||||
static int dc1394_v2_close(AVFormatContext * context) | |||||
{ | { | ||||
struct dc1394_data *dc1394 = context->priv_data; | struct dc1394_data *dc1394 = context->priv_data; | ||||
@@ -360,11 +414,25 @@ static int dc1394_close(AVFormatContext * context) | |||||
AVInputFormat ff_libdc1394_demuxer = { | AVInputFormat ff_libdc1394_demuxer = { | ||||
.name = "libdc1394", | .name = "libdc1394", | ||||
.long_name = NULL_IF_CONFIG_SMALL("dc1394 A/V grab"), | |||||
.long_name = NULL_IF_CONFIG_SMALL("dc1394 v.2 A/V grab"), | |||||
.priv_data_size = sizeof(struct dc1394_data), | |||||
.read_header = dc1394_v2_read_header, | |||||
.read_packet = dc1394_v2_read_packet, | |||||
.read_close = dc1394_v2_close, | |||||
.flags = AVFMT_NOFILE, | |||||
.priv_class = &libdc1394_class, | |||||
}; | |||||
#endif | |||||
#if HAVE_LIBDC1394_1 | |||||
AVInputFormat ff_libdc1394_demuxer = { | |||||
.name = "libdc1394", | |||||
.long_name = NULL_IF_CONFIG_SMALL("dc1394 v.1 A/V grab"), | |||||
.priv_data_size = sizeof(struct dc1394_data), | .priv_data_size = sizeof(struct dc1394_data), | ||||
.read_header = dc1394_read_header, | |||||
.read_packet = dc1394_read_packet, | |||||
.read_close = dc1394_close, | |||||
.read_header = dc1394_v1_read_header, | |||||
.read_packet = dc1394_v1_read_packet, | |||||
.read_close = dc1394_v1_close, | |||||
.flags = AVFMT_NOFILE, | .flags = AVFMT_NOFILE, | ||||
.priv_class = &libdc1394_class, | .priv_class = &libdc1394_class, | ||||
}; | }; | ||||
#endif |
@@ -76,6 +76,7 @@ struct video_data { | |||||
int channel; | int channel; | ||||
char *video_size; /**< String describing video size, set by a private option. */ | char *video_size; /**< String describing video size, set by a private option. */ | ||||
char *pixel_format; /**< Set by a private option. */ | char *pixel_format; /**< Set by a private option. */ | ||||
char *framerate; /**< Set by a private option. */ | |||||
}; | }; | ||||
struct buff_data { | struct buff_data { | ||||
@@ -438,12 +439,19 @@ static int v4l2_set_parameters(AVFormatContext *s1, AVFormatParameters *ap) | |||||
struct v4l2_streamparm streamparm = {0}; | struct v4l2_streamparm streamparm = {0}; | ||||
struct v4l2_fract *tpf = &streamparm.parm.capture.timeperframe; | struct v4l2_fract *tpf = &streamparm.parm.capture.timeperframe; | ||||
int i, ret; | int i, ret; | ||||
AVRational fps; | |||||
streamparm.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; | streamparm.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; | ||||
if (s->framerate && (ret = av_parse_video_rate(&fps, s->framerate)) < 0) { | |||||
av_log(s1, AV_LOG_ERROR, "Couldn't parse framerate.\n"); | |||||
return ret; | |||||
} | |||||
#if FF_API_FORMAT_PARAMETERS | #if FF_API_FORMAT_PARAMETERS | ||||
if (ap->channel > 0) | if (ap->channel > 0) | ||||
s->channel = ap->channel; | s->channel = ap->channel; | ||||
if (ap->time_base.num) | |||||
fps = (AVRational){ap->time_base.den, ap->time_base.num}; | |||||
#endif | #endif | ||||
/* set tv video input */ | /* set tv video input */ | ||||
@@ -492,34 +500,32 @@ static int v4l2_set_parameters(AVFormatContext *s1, AVFormatParameters *ap) | |||||
} | } | ||||
} | } | ||||
if (ap->time_base.num && ap->time_base.den) { | |||||
if (fps.num && fps.den) { | |||||
av_log(s1, AV_LOG_DEBUG, "Setting time per frame to %d/%d\n", | av_log(s1, AV_LOG_DEBUG, "Setting time per frame to %d/%d\n", | ||||
ap->time_base.num, ap->time_base.den); | |||||
tpf->numerator = ap->time_base.num; | |||||
tpf->denominator = ap->time_base.den; | |||||
fps.den, fps.num); | |||||
tpf->numerator = fps.den; | |||||
tpf->denominator = fps.num; | |||||
if (ioctl(s->fd, VIDIOC_S_PARM, &streamparm) != 0) { | if (ioctl(s->fd, VIDIOC_S_PARM, &streamparm) != 0) { | ||||
av_log(s1, AV_LOG_ERROR, | av_log(s1, AV_LOG_ERROR, | ||||
"ioctl set time per frame(%d/%d) failed\n", | "ioctl set time per frame(%d/%d) failed\n", | ||||
ap->time_base.num, ap->time_base.den); | |||||
fps.den, fps.num); | |||||
return AVERROR(EIO); | return AVERROR(EIO); | ||||
} | } | ||||
if (ap->time_base.den != tpf->denominator || | |||||
ap->time_base.num != tpf->numerator) { | |||||
if (fps.num != tpf->denominator || | |||||
fps.den != tpf->numerator) { | |||||
av_log(s1, AV_LOG_INFO, | av_log(s1, AV_LOG_INFO, | ||||
"The driver changed the time per frame from %d/%d to %d/%d\n", | "The driver changed the time per frame from %d/%d to %d/%d\n", | ||||
ap->time_base.num, ap->time_base.den, | |||||
fps.den, fps.num, | |||||
tpf->numerator, tpf->denominator); | tpf->numerator, tpf->denominator); | ||||
} | } | ||||
} else { | } else { | ||||
/* if timebase value is not set in ap, read the timebase value from the driver */ | |||||
/* if timebase value is not set, read the timebase value from the driver */ | |||||
if (ioctl(s->fd, VIDIOC_G_PARM, &streamparm) != 0) { | if (ioctl(s->fd, VIDIOC_G_PARM, &streamparm) != 0) { | ||||
av_log(s1, AV_LOG_ERROR, "ioctl(VIDIOC_G_PARM): %s\n", strerror(errno)); | av_log(s1, AV_LOG_ERROR, "ioctl(VIDIOC_G_PARM): %s\n", strerror(errno)); | ||||
return AVERROR(errno); | return AVERROR(errno); | ||||
} | } | ||||
} | } | ||||
ap->time_base.num = tpf->numerator; | |||||
ap->time_base.den = tpf->denominator; | |||||
return 0; | return 0; | ||||
} | } | ||||
@@ -616,7 +622,7 @@ static int v4l2_read_header(AVFormatContext *s1, AVFormatParameters *ap) | |||||
desired_format = device_try_init(s1, pix_fmt, &s->width, &s->height, &codec_id); | desired_format = device_try_init(s1, pix_fmt, &s->width, &s->height, &codec_id); | ||||
if (desired_format == 0) { | if (desired_format == 0) { | ||||
av_log(s1, AV_LOG_ERROR, "Cannot find a proper format for " | av_log(s1, AV_LOG_ERROR, "Cannot find a proper format for " | ||||
"codec_id %d, pix_fmt %d.\n", s1->video_codec_id, ap->pix_fmt); | |||||
"codec_id %d, pix_fmt %d.\n", s1->video_codec_id, pix_fmt); | |||||
close(s->fd); | close(s->fd); | ||||
res = AVERROR(EIO); | res = AVERROR(EIO); | ||||
@@ -660,6 +666,7 @@ out: | |||||
av_freep(&s->video_size); | av_freep(&s->video_size); | ||||
av_freep(&s->pixel_format); | av_freep(&s->pixel_format); | ||||
av_freep(&s->standard); | av_freep(&s->standard); | ||||
av_freep(&s->framerate); | |||||
return res; | return res; | ||||
} | } | ||||
@@ -711,6 +718,7 @@ static const AVOption options[] = { | |||||
{ "channel", "", OFFSET(channel), FF_OPT_TYPE_INT, {.dbl = 0 }, 0, INT_MAX, AV_OPT_FLAG_DECODING_PARAM }, | { "channel", "", OFFSET(channel), FF_OPT_TYPE_INT, {.dbl = 0 }, 0, INT_MAX, AV_OPT_FLAG_DECODING_PARAM }, | ||||
{ "video_size", "A string describing frame size, such as 640x480 or hd720.", OFFSET(video_size), FF_OPT_TYPE_STRING, {.str = NULL}, 0, 0, DEC }, | { "video_size", "A string describing frame size, such as 640x480 or hd720.", OFFSET(video_size), FF_OPT_TYPE_STRING, {.str = NULL}, 0, 0, DEC }, | ||||
{ "pixel_format", "", OFFSET(pixel_format), FF_OPT_TYPE_STRING, {.str = NULL}, 0, 0, DEC }, | { "pixel_format", "", OFFSET(pixel_format), FF_OPT_TYPE_STRING, {.str = NULL}, 0, 0, DEC }, | ||||
{ "framerate", "", OFFSET(framerate), FF_OPT_TYPE_STRING, {.str = NULL}, 0, 0, DEC }, | |||||
{ NULL }, | { NULL }, | ||||
}; | }; | ||||
@@ -26,8 +26,6 @@ | |||||
#include <vfw.h> | #include <vfw.h> | ||||
#include "avdevice.h" | #include "avdevice.h" | ||||
//#define DEBUG_VFW | |||||
/* Defines for VFW missing from MinGW. | /* Defines for VFW missing from MinGW. | ||||
* Remove this when MinGW incorporates them. */ | * Remove this when MinGW incorporates them. */ | ||||
#define HWND_MESSAGE ((HWND)-3) | #define HWND_MESSAGE ((HWND)-3) | ||||
@@ -43,6 +41,7 @@ struct vfw_ctx { | |||||
unsigned int curbufsize; | unsigned int curbufsize; | ||||
unsigned int frame_num; | unsigned int frame_num; | ||||
char *video_size; /**< A string describing video size, set by a private option. */ | char *video_size; /**< A string describing video size, set by a private option. */ | ||||
char *framerate; /**< Set by a private option. */ | |||||
}; | }; | ||||
static enum PixelFormat vfw_pixfmt(DWORD biCompression, WORD biBitCount) | static enum PixelFormat vfw_pixfmt(DWORD biCompression, WORD biBitCount) | ||||
@@ -119,7 +118,7 @@ static void dump_captureparms(AVFormatContext *s, CAPTUREPARMS *cparms) | |||||
static void dump_videohdr(AVFormatContext *s, VIDEOHDR *vhdr) | static void dump_videohdr(AVFormatContext *s, VIDEOHDR *vhdr) | ||||
{ | { | ||||
#ifdef DEBUG_VFW | |||||
#ifdef DEBUG | |||||
av_log(s, AV_LOG_DEBUG, "VIDEOHDR\n"); | av_log(s, AV_LOG_DEBUG, "VIDEOHDR\n"); | ||||
dstruct(s, vhdr, lpData, "p"); | dstruct(s, vhdr, lpData, "p"); | ||||
dstruct(s, vhdr, dwBufferLength, "lu"); | dstruct(s, vhdr, dwBufferLength, "lu"); | ||||
@@ -234,6 +233,7 @@ static int vfw_read_close(AVFormatContext *s) | |||||
} | } | ||||
av_freep(&ctx->video_size); | av_freep(&ctx->video_size); | ||||
av_freep(&ctx->framerate); | |||||
return 0; | return 0; | ||||
} | } | ||||
@@ -250,6 +250,7 @@ static int vfw_read_header(AVFormatContext *s, AVFormatParameters *ap) | |||||
DWORD biCompression; | DWORD biCompression; | ||||
WORD biBitCount; | WORD biBitCount; | ||||
int ret; | int ret; | ||||
AVRational fps; | |||||
if (!strcmp(s->filename, "list")) { | if (!strcmp(s->filename, "list")) { | ||||
for (devnum = 0; devnum <= 9; devnum++) { | for (devnum = 0; devnum <= 9; devnum++) { | ||||
@@ -267,10 +268,10 @@ static int vfw_read_header(AVFormatContext *s, AVFormatParameters *ap) | |||||
return AVERROR(EIO); | return AVERROR(EIO); | ||||
} | } | ||||
if(!ap->time_base.den) { | |||||
av_log(s, AV_LOG_ERROR, "A time base must be specified.\n"); | |||||
return AVERROR(EIO); | |||||
} | |||||
#if FF_API_FORMAT_PARAMETERS | |||||
if (ap->time_base.num) | |||||
fps = (AVRational){ap->time_base.den, ap->time_base.num}; | |||||
#endif | |||||
ctx->hwnd = capCreateCaptureWindow(NULL, 0, 0, 0, 0, 0, HWND_MESSAGE, 0); | ctx->hwnd = capCreateCaptureWindow(NULL, 0, 0, 0, 0, 0, HWND_MESSAGE, 0); | ||||
if(!ctx->hwnd) { | if(!ctx->hwnd) { | ||||
@@ -369,7 +370,7 @@ static int vfw_read_header(AVFormatContext *s, AVFormatParameters *ap) | |||||
cparms.fYield = 1; // Spawn a background thread | cparms.fYield = 1; // Spawn a background thread | ||||
cparms.dwRequestMicroSecPerFrame = | cparms.dwRequestMicroSecPerFrame = | ||||
(ap->time_base.num*1000000) / ap->time_base.den; | |||||
(fps.den*1000000) / fps.num; | |||||
cparms.fAbortLeftMouse = 0; | cparms.fAbortLeftMouse = 0; | ||||
cparms.fAbortRightMouse = 0; | cparms.fAbortRightMouse = 0; | ||||
cparms.fCaptureAudio = 0; | cparms.fCaptureAudio = 0; | ||||
@@ -381,7 +382,7 @@ static int vfw_read_header(AVFormatContext *s, AVFormatParameters *ap) | |||||
goto fail_io; | goto fail_io; | ||||
codec = st->codec; | codec = st->codec; | ||||
codec->time_base = ap->time_base; | |||||
codec->time_base = (AVRational){fps.den, fps.num}; | |||||
codec->codec_type = AVMEDIA_TYPE_VIDEO; | codec->codec_type = AVMEDIA_TYPE_VIDEO; | ||||
codec->width = bi->bmiHeader.biWidth; | codec->width = bi->bmiHeader.biWidth; | ||||
codec->height = bi->bmiHeader.biHeight; | codec->height = bi->bmiHeader.biHeight; | ||||
@@ -469,6 +470,7 @@ static int vfw_read_packet(AVFormatContext *s, AVPacket *pkt) | |||||
#define DEC AV_OPT_FLAG_DECODING_PARAM | #define DEC AV_OPT_FLAG_DECODING_PARAM | ||||
static const AVOption options[] = { | static const AVOption options[] = { | ||||
{ "video_size", "A string describing frame size, such as 640x480 or hd720.", OFFSET(video_size), FF_OPT_TYPE_STRING, {.str = NULL}, 0, 0, DEC }, | { "video_size", "A string describing frame size, such as 640x480 or hd720.", OFFSET(video_size), FF_OPT_TYPE_STRING, {.str = NULL}, 0, 0, DEC }, | ||||
{ "framerate", "", OFFSET(framerate), FF_OPT_TYPE_STRING, {.str = "ntsc"}, 0, 0, DEC }, | |||||
{ NULL }, | { NULL }, | ||||
}; | }; | ||||
@@ -246,7 +246,7 @@ static int fourxm_read_packet(AVFormatContext *s, | |||||
FourxmDemuxContext *fourxm = s->priv_data; | FourxmDemuxContext *fourxm = s->priv_data; | ||||
AVIOContext *pb = s->pb; | AVIOContext *pb = s->pb; | ||||
unsigned int fourcc_tag; | unsigned int fourcc_tag; | ||||
unsigned int size, out_size av_unused; | |||||
unsigned int size; | |||||
int ret = 0; | int ret = 0; | ||||
unsigned int track_number; | unsigned int track_number; | ||||
int packet_read = 0; | int packet_read = 0; | ||||
@@ -295,7 +295,7 @@ static int fourxm_read_packet(AVFormatContext *s, | |||||
case snd__TAG: | case snd__TAG: | ||||
track_number = avio_rl32(pb); | track_number = avio_rl32(pb); | ||||
out_size= avio_rl32(pb); | |||||
avio_skip(pb, 4); | |||||
size-=8; | size-=8; | ||||
if (track_number < fourxm->track_count && fourxm->tracks[track_number].channels>0) { | if (track_number < fourxm->track_count && fourxm->tracks[track_number].channels>0) { | ||||
@@ -26,8 +26,6 @@ | |||||
#include "avformat.h" | #include "avformat.h" | ||||
#include "apetag.h" | #include "apetag.h" | ||||
#define ENABLE_DEBUG 0 | |||||
/* The earliest and latest file formats supported by this library */ | /* The earliest and latest file formats supported by this library */ | ||||
#define APE_MIN_VERSION 3950 | #define APE_MIN_VERSION 3950 | ||||
#define APE_MAX_VERSION 3990 | #define APE_MAX_VERSION 3990 | ||||
@@ -96,7 +94,7 @@ static int ape_probe(AVProbeData * p) | |||||
static void ape_dumpinfo(AVFormatContext * s, APEContext * ape_ctx) | static void ape_dumpinfo(AVFormatContext * s, APEContext * ape_ctx) | ||||
{ | { | ||||
#if ENABLE_DEBUG | |||||
#ifdef DEBUG | |||||
int i; | int i; | ||||
av_log(s, AV_LOG_DEBUG, "Descriptor Block:\n\n"); | av_log(s, AV_LOG_DEBUG, "Descriptor Block:\n\n"); | ||||
@@ -24,8 +24,6 @@ | |||||
#include "avformat.h" | #include "avformat.h" | ||||
#include "apetag.h" | #include "apetag.h" | ||||
#define ENABLE_DEBUG 0 | |||||
#define APE_TAG_VERSION 2000 | #define APE_TAG_VERSION 2000 | ||||
#define APE_TAG_FOOTER_BYTES 32 | #define APE_TAG_FOOTER_BYTES 32 | ||||
#define APE_TAG_FLAG_CONTAINS_HEADER (1 << 31) | #define APE_TAG_FLAG_CONTAINS_HEADER (1 << 31) | ||||
@@ -35,11 +33,11 @@ static int ape_tag_read_field(AVFormatContext *s) | |||||
{ | { | ||||
AVIOContext *pb = s->pb; | AVIOContext *pb = s->pb; | ||||
uint8_t key[1024], *value; | uint8_t key[1024], *value; | ||||
uint32_t size, flags av_unused; | |||||
uint32_t size; | |||||
int i, c; | int i, c; | ||||
size = avio_rl32(pb); /* field size */ | size = avio_rl32(pb); /* field size */ | ||||
flags = avio_rl32(pb); /* field flags */ | |||||
avio_skip(pb, 4); /* field flags */ | |||||
for (i = 0; i < sizeof(key) - 1; i++) { | for (i = 0; i < sizeof(key) - 1; i++) { | ||||
c = avio_r8(pb); | c = avio_r8(pb); | ||||
if (c < 0x20 || c > 0x7E) | if (c < 0x20 || c > 0x7E) | ||||
@@ -209,7 +209,6 @@ static int asf_read_stream_properties(AVFormatContext *s, int64_t size) | |||||
ff_asf_guid g; | ff_asf_guid g; | ||||
enum AVMediaType type; | enum AVMediaType type; | ||||
int type_specific_size, sizeX; | int type_specific_size, sizeX; | ||||
uint64_t total_size av_unused; | |||||
unsigned int tag1; | unsigned int tag1; | ||||
int64_t pos1, pos2, start_time; | int64_t pos1, pos2, start_time; | ||||
int test_for_ext_stream_audio, is_dvr_ms_audio=0; | int test_for_ext_stream_audio, is_dvr_ms_audio=0; | ||||
@@ -256,7 +255,7 @@ static int asf_read_stream_properties(AVFormatContext *s, int64_t size) | |||||
return -1; | return -1; | ||||
} | } | ||||
ff_get_guid(pb, &g); | ff_get_guid(pb, &g); | ||||
total_size = avio_rl64(pb); | |||||
avio_skip(pb, 8); /* total_size */ | |||||
type_specific_size = avio_rl32(pb); | type_specific_size = avio_rl32(pb); | ||||
avio_rl32(pb); | avio_rl32(pb); | ||||
st->id = avio_rl16(pb) & 0x7f; /* stream id */ | st->id = avio_rl16(pb) & 0x7f; /* stream id */ | ||||
@@ -393,7 +392,7 @@ static int asf_read_ext_stream_properties(AVFormatContext *s, int64_t size) | |||||
AVIOContext *pb = s->pb; | AVIOContext *pb = s->pb; | ||||
ff_asf_guid g; | ff_asf_guid g; | ||||
int ext_len, payload_ext_ct, stream_ct, i; | int ext_len, payload_ext_ct, stream_ct, i; | ||||
uint32_t ext_d av_unused, leak_rate, stream_num; | |||||
uint32_t leak_rate, stream_num; | |||||
unsigned int stream_languageid_index; | unsigned int stream_languageid_index; | ||||
avio_rl64(pb); // starttime | avio_rl64(pb); // starttime | ||||
@@ -427,7 +426,7 @@ static int asf_read_ext_stream_properties(AVFormatContext *s, int64_t size) | |||||
for (i=0; i<payload_ext_ct; i++){ | for (i=0; i<payload_ext_ct; i++){ | ||||
ff_get_guid(pb, &g); | ff_get_guid(pb, &g); | ||||
ext_d=avio_rl16(pb); | |||||
avio_skip(pb, 2); | |||||
ext_len=avio_rl32(pb); | ext_len=avio_rl32(pb); | ||||
avio_skip(pb, ext_len); | avio_skip(pb, ext_len); | ||||
} | } | ||||
@@ -511,7 +510,7 @@ static int asf_read_metadata(AVFormatContext *s, int64_t size) | |||||
{ | { | ||||
AVIOContext *pb = s->pb; | AVIOContext *pb = s->pb; | ||||
ASFContext *asf = s->priv_data; | ASFContext *asf = s->priv_data; | ||||
int n, stream_num, name_len, value_len, value_type av_unused, value_num; | |||||
int n, stream_num, name_len, value_len, value_num; | |||||
int ret, i; | int ret, i; | ||||
n = avio_rl16(pb); | n = avio_rl16(pb); | ||||
@@ -521,7 +520,7 @@ static int asf_read_metadata(AVFormatContext *s, int64_t size) | |||||
avio_rl16(pb); //lang_list_index | avio_rl16(pb); //lang_list_index | ||||
stream_num= avio_rl16(pb); | stream_num= avio_rl16(pb); | ||||
name_len= avio_rl16(pb); | name_len= avio_rl16(pb); | ||||
value_type= avio_rl16(pb); | |||||
avio_skip(pb, 2); /* value_type */ | |||||
value_len= avio_rl32(pb); | value_len= avio_rl32(pb); | ||||
if ((ret = avio_get_str16le(pb, name_len, name, sizeof(name))) < name_len) | if ((ret = avio_get_str16le(pb, name_len, name, sizeof(name))) < name_len) | ||||
@@ -626,10 +625,8 @@ static int asf_read_header(AVFormatContext *s, AVFormatParameters *ap) | |||||
// if so the next iteration will pick it up | // if so the next iteration will pick it up | ||||
continue; | continue; | ||||
} else if (!ff_guidcmp(&g, &ff_asf_head1_guid)) { | } else if (!ff_guidcmp(&g, &ff_asf_head1_guid)) { | ||||
av_unused int v1, v2; | |||||
ff_get_guid(pb, &g); | ff_get_guid(pb, &g); | ||||
v1 = avio_rl32(pb); | |||||
v2 = avio_rl16(pb); | |||||
avio_skip(pb, 6); | |||||
continue; | continue; | ||||
} else if (!ff_guidcmp(&g, &ff_asf_marker_header)) { | } else if (!ff_guidcmp(&g, &ff_asf_marker_header)) { | ||||
asf_read_marker(s, gsize); | asf_read_marker(s, gsize); | ||||
@@ -799,7 +796,7 @@ static int asf_read_frame_header(AVFormatContext *s, AVIOContext *pb){ | |||||
ASFContext *asf = s->priv_data; | ASFContext *asf = s->priv_data; | ||||
int rsize = 1; | int rsize = 1; | ||||
int num = avio_r8(pb); | int num = avio_r8(pb); | ||||
int64_t ts0, ts1 av_unused; | |||||
int64_t ts0; | |||||
asf->packet_segments--; | asf->packet_segments--; | ||||
asf->packet_key_frame = num >> 7; | asf->packet_key_frame = num >> 7; | ||||
@@ -822,7 +819,7 @@ static int asf_read_frame_header(AVFormatContext *s, AVIOContext *pb){ | |||||
// av_log(s, AV_LOG_DEBUG, "\n"); | // av_log(s, AV_LOG_DEBUG, "\n"); | ||||
avio_skip(pb, 10); | avio_skip(pb, 10); | ||||
ts0= avio_rl64(pb); | ts0= avio_rl64(pb); | ||||
ts1= avio_rl64(pb); | |||||
avio_skip(pb, 8);; | |||||
avio_skip(pb, 12); | avio_skip(pb, 12); | ||||
avio_rl32(pb); | avio_rl32(pb); | ||||
avio_skip(pb, asf->packet_replic_size - 8 - 38 - 4); | avio_skip(pb, asf->packet_replic_size - 8 - 38 - 4); | ||||
@@ -295,9 +295,7 @@ static int gif_write_video(AVFormatContext *s, | |||||
AVCodecContext *enc, const uint8_t *buf, int size) | AVCodecContext *enc, const uint8_t *buf, int size) | ||||
{ | { | ||||
AVIOContext *pb = s->pb; | AVIOContext *pb = s->pb; | ||||
GIFContext *gif = s->priv_data; | |||||
int jiffies; | int jiffies; | ||||
int64_t delay; | |||||
/* graphic control extension block */ | /* graphic control extension block */ | ||||
avio_w8(pb, 0x21); | avio_w8(pb, 0x21); | ||||
@@ -307,8 +305,6 @@ static int gif_write_video(AVFormatContext *s, | |||||
/* 1 jiffy is 1/70 s */ | /* 1 jiffy is 1/70 s */ | ||||
/* the delay_time field indicates the number of jiffies - 1 */ | /* the delay_time field indicates the number of jiffies - 1 */ | ||||
delay = gif->file_time - gif->time; | |||||
/* XXX: should use delay, in order to be more accurate */ | /* XXX: should use delay, in order to be more accurate */ | ||||
/* instead of using the same rounded value each time */ | /* instead of using the same rounded value each time */ | ||||
/* XXX: don't even remember if I really use it for now */ | /* XXX: don't even remember if I really use it for now */ | ||||
@@ -35,17 +35,6 @@ | |||||
#include "libavutil/intreadwrite.h" | #include "libavutil/intreadwrite.h" | ||||
#include "avformat.h" | #include "avformat.h" | ||||
/* debugging support: #define DEBUG_IPMOVIE as non-zero to see extremely | |||||
* verbose information about the demux process */ | |||||
#define DEBUG_IPMOVIE 0 | |||||
#if DEBUG_IPMOVIE | |||||
#undef printf | |||||
#define debug_ipmovie printf | |||||
#else | |||||
static inline void debug_ipmovie(const char *format, ...) { } | |||||
#endif | |||||
#define CHUNK_PREAMBLE_SIZE 4 | #define CHUNK_PREAMBLE_SIZE 4 | ||||
#define OPCODE_PREAMBLE_SIZE 4 | #define OPCODE_PREAMBLE_SIZE 4 | ||||
@@ -150,8 +139,8 @@ static int load_ipmovie_packet(IPMVEContext *s, AVIOContext *pb, | |||||
s->audio_frame_count += | s->audio_frame_count += | ||||
(s->audio_chunk_size - 6) / s->audio_channels; | (s->audio_chunk_size - 6) / s->audio_channels; | ||||
debug_ipmovie("sending audio frame with pts %"PRId64" (%d audio frames)\n", | |||||
pkt->pts, s->audio_frame_count); | |||||
av_dlog(NULL, "sending audio frame with pts %"PRId64" (%d audio frames)\n", | |||||
pkt->pts, s->audio_frame_count); | |||||
chunk_type = CHUNK_VIDEO; | chunk_type = CHUNK_VIDEO; | ||||
@@ -195,8 +184,7 @@ static int load_ipmovie_packet(IPMVEContext *s, AVIOContext *pb, | |||||
pkt->stream_index = s->video_stream_index; | pkt->stream_index = s->video_stream_index; | ||||
pkt->pts = s->video_pts; | pkt->pts = s->video_pts; | ||||
debug_ipmovie("sending video frame with pts %"PRId64"\n", | |||||
pkt->pts); | |||||
av_dlog(NULL, "sending video frame with pts %"PRId64"\n", pkt->pts); | |||||
s->video_pts += s->frame_pts_inc; | s->video_pts += s->frame_pts_inc; | ||||
@@ -244,36 +232,36 @@ static int process_ipmovie_chunk(IPMVEContext *s, AVIOContext *pb, | |||||
chunk_size = AV_RL16(&chunk_preamble[0]); | chunk_size = AV_RL16(&chunk_preamble[0]); | ||||
chunk_type = AV_RL16(&chunk_preamble[2]); | chunk_type = AV_RL16(&chunk_preamble[2]); | ||||
debug_ipmovie("chunk type 0x%04X, 0x%04X bytes: ", chunk_type, chunk_size); | |||||
av_dlog(NULL, "chunk type 0x%04X, 0x%04X bytes: ", chunk_type, chunk_size); | |||||
switch (chunk_type) { | switch (chunk_type) { | ||||
case CHUNK_INIT_AUDIO: | case CHUNK_INIT_AUDIO: | ||||
debug_ipmovie("initialize audio\n"); | |||||
av_dlog(NULL, "initialize audio\n"); | |||||
break; | break; | ||||
case CHUNK_AUDIO_ONLY: | case CHUNK_AUDIO_ONLY: | ||||
debug_ipmovie("audio only\n"); | |||||
av_dlog(NULL, "audio only\n"); | |||||
break; | break; | ||||
case CHUNK_INIT_VIDEO: | case CHUNK_INIT_VIDEO: | ||||
debug_ipmovie("initialize video\n"); | |||||
av_dlog(NULL, "initialize video\n"); | |||||
break; | break; | ||||
case CHUNK_VIDEO: | case CHUNK_VIDEO: | ||||
debug_ipmovie("video (and audio)\n"); | |||||
av_dlog(NULL, "video (and audio)\n"); | |||||
break; | break; | ||||
case CHUNK_SHUTDOWN: | case CHUNK_SHUTDOWN: | ||||
debug_ipmovie("shutdown\n"); | |||||
av_dlog(NULL, "shutdown\n"); | |||||
break; | break; | ||||
case CHUNK_END: | case CHUNK_END: | ||||
debug_ipmovie("end\n"); | |||||
av_dlog(NULL, "end\n"); | |||||
break; | break; | ||||
default: | default: | ||||
debug_ipmovie("invalid chunk\n"); | |||||
av_dlog(NULL, "invalid chunk\n"); | |||||
chunk_type = CHUNK_BAD; | chunk_type = CHUNK_BAD; | ||||
break; | break; | ||||
@@ -299,29 +287,29 @@ static int process_ipmovie_chunk(IPMVEContext *s, AVIOContext *pb, | |||||
chunk_size -= OPCODE_PREAMBLE_SIZE; | chunk_size -= OPCODE_PREAMBLE_SIZE; | ||||
chunk_size -= opcode_size; | chunk_size -= opcode_size; | ||||
if (chunk_size < 0) { | if (chunk_size < 0) { | ||||
debug_ipmovie("chunk_size countdown just went negative\n"); | |||||
av_dlog(NULL, "chunk_size countdown just went negative\n"); | |||||
chunk_type = CHUNK_BAD; | chunk_type = CHUNK_BAD; | ||||
break; | break; | ||||
} | } | ||||
debug_ipmovie(" opcode type %02X, version %d, 0x%04X bytes: ", | |||||
opcode_type, opcode_version, opcode_size); | |||||
av_dlog(NULL, " opcode type %02X, version %d, 0x%04X bytes: ", | |||||
opcode_type, opcode_version, opcode_size); | |||||
switch (opcode_type) { | switch (opcode_type) { | ||||
case OPCODE_END_OF_STREAM: | case OPCODE_END_OF_STREAM: | ||||
debug_ipmovie("end of stream\n"); | |||||
av_dlog(NULL, "end of stream\n"); | |||||
avio_skip(pb, opcode_size); | avio_skip(pb, opcode_size); | ||||
break; | break; | ||||
case OPCODE_END_OF_CHUNK: | case OPCODE_END_OF_CHUNK: | ||||
debug_ipmovie("end of chunk\n"); | |||||
av_dlog(NULL, "end of chunk\n"); | |||||
avio_skip(pb, opcode_size); | avio_skip(pb, opcode_size); | ||||
break; | break; | ||||
case OPCODE_CREATE_TIMER: | case OPCODE_CREATE_TIMER: | ||||
debug_ipmovie("create timer\n"); | |||||
av_dlog(NULL, "create timer\n"); | |||||
if ((opcode_version > 0) || (opcode_size > 6)) { | if ((opcode_version > 0) || (opcode_size > 6)) { | ||||
debug_ipmovie("bad create_timer opcode\n"); | |||||
av_dlog(NULL, "bad create_timer opcode\n"); | |||||
chunk_type = CHUNK_BAD; | chunk_type = CHUNK_BAD; | ||||
break; | break; | ||||
} | } | ||||
@@ -331,14 +319,15 @@ static int process_ipmovie_chunk(IPMVEContext *s, AVIOContext *pb, | |||||
break; | break; | ||||
} | } | ||||
s->frame_pts_inc = ((uint64_t)AV_RL32(&scratch[0])) * AV_RL16(&scratch[4]); | s->frame_pts_inc = ((uint64_t)AV_RL32(&scratch[0])) * AV_RL16(&scratch[4]); | ||||
debug_ipmovie(" %.2f frames/second (timer div = %d, subdiv = %d)\n", | |||||
1000000.0/s->frame_pts_inc, AV_RL32(&scratch[0]), AV_RL16(&scratch[4])); | |||||
av_dlog(NULL, " %.2f frames/second (timer div = %d, subdiv = %d)\n", | |||||
1000000.0 / s->frame_pts_inc, AV_RL32(&scratch[0]), | |||||
AV_RL16(&scratch[4])); | |||||
break; | break; | ||||
case OPCODE_INIT_AUDIO_BUFFERS: | case OPCODE_INIT_AUDIO_BUFFERS: | ||||
debug_ipmovie("initialize audio buffers\n"); | |||||
av_dlog(NULL, "initialize audio buffers\n"); | |||||
if ((opcode_version > 1) || (opcode_size > 10)) { | if ((opcode_version > 1) || (opcode_size > 10)) { | ||||
debug_ipmovie("bad init_audio_buffers opcode\n"); | |||||
av_dlog(NULL, "bad init_audio_buffers opcode\n"); | |||||
chunk_type = CHUNK_BAD; | chunk_type = CHUNK_BAD; | ||||
break; | break; | ||||
} | } | ||||
@@ -360,23 +349,22 @@ static int process_ipmovie_chunk(IPMVEContext *s, AVIOContext *pb, | |||||
s->audio_type = CODEC_ID_PCM_S16LE; | s->audio_type = CODEC_ID_PCM_S16LE; | ||||
else | else | ||||
s->audio_type = CODEC_ID_PCM_U8; | s->audio_type = CODEC_ID_PCM_U8; | ||||
debug_ipmovie("audio: %d bits, %d Hz, %s, %s format\n", | |||||
s->audio_bits, | |||||
s->audio_sample_rate, | |||||
(s->audio_channels == 2) ? "stereo" : "mono", | |||||
(s->audio_type == CODEC_ID_INTERPLAY_DPCM) ? | |||||
"Interplay audio" : "PCM"); | |||||
av_dlog(NULL, "audio: %d bits, %d Hz, %s, %s format\n", | |||||
s->audio_bits, s->audio_sample_rate, | |||||
(s->audio_channels == 2) ? "stereo" : "mono", | |||||
(s->audio_type == CODEC_ID_INTERPLAY_DPCM) ? | |||||
"Interplay audio" : "PCM"); | |||||
break; | break; | ||||
case OPCODE_START_STOP_AUDIO: | case OPCODE_START_STOP_AUDIO: | ||||
debug_ipmovie("start/stop audio\n"); | |||||
av_dlog(NULL, "start/stop audio\n"); | |||||
avio_skip(pb, opcode_size); | avio_skip(pb, opcode_size); | ||||
break; | break; | ||||
case OPCODE_INIT_VIDEO_BUFFERS: | case OPCODE_INIT_VIDEO_BUFFERS: | ||||
debug_ipmovie("initialize video buffers\n"); | |||||
av_dlog(NULL, "initialize video buffers\n"); | |||||
if ((opcode_version > 2) || (opcode_size > 8)) { | if ((opcode_version > 2) || (opcode_size > 8)) { | ||||
debug_ipmovie("bad init_video_buffers opcode\n"); | |||||
av_dlog(NULL, "bad init_video_buffers opcode\n"); | |||||
chunk_type = CHUNK_BAD; | chunk_type = CHUNK_BAD; | ||||
break; | break; | ||||
} | } | ||||
@@ -392,8 +380,8 @@ static int process_ipmovie_chunk(IPMVEContext *s, AVIOContext *pb, | |||||
} else { | } else { | ||||
s->video_bpp = 16; | s->video_bpp = 16; | ||||
} | } | ||||
debug_ipmovie("video resolution: %d x %d\n", | |||||
s->video_width, s->video_height); | |||||
av_dlog(NULL, "video resolution: %d x %d\n", | |||||
s->video_width, s->video_height); | |||||
break; | break; | ||||
case OPCODE_UNKNOWN_06: | case OPCODE_UNKNOWN_06: | ||||
@@ -403,17 +391,17 @@ static int process_ipmovie_chunk(IPMVEContext *s, AVIOContext *pb, | |||||
case OPCODE_UNKNOWN_13: | case OPCODE_UNKNOWN_13: | ||||
case OPCODE_UNKNOWN_14: | case OPCODE_UNKNOWN_14: | ||||
case OPCODE_UNKNOWN_15: | case OPCODE_UNKNOWN_15: | ||||
debug_ipmovie("unknown (but documented) opcode %02X\n", opcode_type); | |||||
av_dlog(NULL, "unknown (but documented) opcode %02X\n", opcode_type); | |||||
avio_skip(pb, opcode_size); | avio_skip(pb, opcode_size); | ||||
break; | break; | ||||
case OPCODE_SEND_BUFFER: | case OPCODE_SEND_BUFFER: | ||||
debug_ipmovie("send buffer\n"); | |||||
av_dlog(NULL, "send buffer\n"); | |||||
avio_skip(pb, opcode_size); | avio_skip(pb, opcode_size); | ||||
break; | break; | ||||
case OPCODE_AUDIO_FRAME: | case OPCODE_AUDIO_FRAME: | ||||
debug_ipmovie("audio frame\n"); | |||||
av_dlog(NULL, "audio frame\n"); | |||||
/* log position and move on for now */ | /* log position and move on for now */ | ||||
s->audio_chunk_offset = avio_tell(pb); | s->audio_chunk_offset = avio_tell(pb); | ||||
@@ -422,26 +410,26 @@ static int process_ipmovie_chunk(IPMVEContext *s, AVIOContext *pb, | |||||
break; | break; | ||||
case OPCODE_SILENCE_FRAME: | case OPCODE_SILENCE_FRAME: | ||||
debug_ipmovie("silence frame\n"); | |||||
av_dlog(NULL, "silence frame\n"); | |||||
avio_skip(pb, opcode_size); | avio_skip(pb, opcode_size); | ||||
break; | break; | ||||
case OPCODE_INIT_VIDEO_MODE: | case OPCODE_INIT_VIDEO_MODE: | ||||
debug_ipmovie("initialize video mode\n"); | |||||
av_dlog(NULL, "initialize video mode\n"); | |||||
avio_skip(pb, opcode_size); | avio_skip(pb, opcode_size); | ||||
break; | break; | ||||
case OPCODE_CREATE_GRADIENT: | case OPCODE_CREATE_GRADIENT: | ||||
debug_ipmovie("create gradient\n"); | |||||
av_dlog(NULL, "create gradient\n"); | |||||
avio_skip(pb, opcode_size); | avio_skip(pb, opcode_size); | ||||
break; | break; | ||||
case OPCODE_SET_PALETTE: | case OPCODE_SET_PALETTE: | ||||
debug_ipmovie("set palette\n"); | |||||
av_dlog(NULL, "set palette\n"); | |||||
/* check for the logical maximum palette size | /* check for the logical maximum palette size | ||||
* (3 * 256 + 4 bytes) */ | * (3 * 256 + 4 bytes) */ | ||||
if (opcode_size > 0x304) { | if (opcode_size > 0x304) { | ||||
debug_ipmovie("demux_ipmovie: set_palette opcode too large\n"); | |||||
av_dlog(NULL, "demux_ipmovie: set_palette opcode too large\n"); | |||||
chunk_type = CHUNK_BAD; | chunk_type = CHUNK_BAD; | ||||
break; | break; | ||||
} | } | ||||
@@ -455,7 +443,7 @@ static int process_ipmovie_chunk(IPMVEContext *s, AVIOContext *pb, | |||||
last_color = first_color + AV_RL16(&scratch[2]) - 1; | last_color = first_color + AV_RL16(&scratch[2]) - 1; | ||||
/* sanity check (since they are 16 bit values) */ | /* sanity check (since they are 16 bit values) */ | ||||
if ((first_color > 0xFF) || (last_color > 0xFF)) { | if ((first_color > 0xFF) || (last_color > 0xFF)) { | ||||
debug_ipmovie("demux_ipmovie: set_palette indexes out of range (%d -> %d)\n", | |||||
av_dlog(NULL, "demux_ipmovie: set_palette indexes out of range (%d -> %d)\n", | |||||
first_color, last_color); | first_color, last_color); | ||||
chunk_type = CHUNK_BAD; | chunk_type = CHUNK_BAD; | ||||
break; | break; | ||||
@@ -473,12 +461,12 @@ static int process_ipmovie_chunk(IPMVEContext *s, AVIOContext *pb, | |||||
break; | break; | ||||
case OPCODE_SET_PALETTE_COMPRESSED: | case OPCODE_SET_PALETTE_COMPRESSED: | ||||
debug_ipmovie("set palette compressed\n"); | |||||
av_dlog(NULL, "set palette compressed\n"); | |||||
avio_skip(pb, opcode_size); | avio_skip(pb, opcode_size); | ||||
break; | break; | ||||
case OPCODE_SET_DECODING_MAP: | case OPCODE_SET_DECODING_MAP: | ||||
debug_ipmovie("set decoding map\n"); | |||||
av_dlog(NULL, "set decoding map\n"); | |||||
/* log position and move on for now */ | /* log position and move on for now */ | ||||
s->decode_map_chunk_offset = avio_tell(pb); | s->decode_map_chunk_offset = avio_tell(pb); | ||||
@@ -487,7 +475,7 @@ static int process_ipmovie_chunk(IPMVEContext *s, AVIOContext *pb, | |||||
break; | break; | ||||
case OPCODE_VIDEO_DATA: | case OPCODE_VIDEO_DATA: | ||||
debug_ipmovie("set video data\n"); | |||||
av_dlog(NULL, "set video data\n"); | |||||
/* log position and move on for now */ | /* log position and move on for now */ | ||||
s->video_chunk_offset = avio_tell(pb); | s->video_chunk_offset = avio_tell(pb); | ||||
@@ -496,7 +484,7 @@ static int process_ipmovie_chunk(IPMVEContext *s, AVIOContext *pb, | |||||
break; | break; | ||||
default: | default: | ||||
debug_ipmovie("*** unknown opcode type\n"); | |||||
av_dlog(NULL, "*** unknown opcode type\n"); | |||||
chunk_type = CHUNK_BAD; | chunk_type = CHUNK_BAD; | ||||
break; | break; | ||||
@@ -186,13 +186,13 @@ static int mmf_read_header(AVFormatContext *s, | |||||
unsigned int tag; | unsigned int tag; | ||||
AVIOContext *pb = s->pb; | AVIOContext *pb = s->pb; | ||||
AVStream *st; | AVStream *st; | ||||
int64_t file_size av_unused, size; | |||||
int64_t size; | |||||
int rate, params; | int rate, params; | ||||
tag = avio_rl32(pb); | tag = avio_rl32(pb); | ||||
if (tag != MKTAG('M', 'M', 'M', 'D')) | if (tag != MKTAG('M', 'M', 'M', 'D')) | ||||
return -1; | return -1; | ||||
file_size = avio_rb32(pb); | |||||
avio_skip(pb, 4); /* file_size */ | |||||
/* Skip some unused chunks that may or may not be present */ | /* Skip some unused chunks that may or may not be present */ | ||||
for(;; avio_skip(pb, size)) { | for(;; avio_skip(pb, size)) { | ||||
@@ -572,10 +572,9 @@ static int mpegps_read_packet(AVFormatContext *s, | |||||
pkt->dts = dts; | pkt->dts = dts; | ||||
pkt->pos = dummy_pos; | pkt->pos = dummy_pos; | ||||
pkt->stream_index = st->index; | pkt->stream_index = st->index; | ||||
#if 0 | |||||
av_log(s, AV_LOG_DEBUG, "%d: pts=%0.3f dts=%0.3f size=%d\n", | |||||
pkt->stream_index, pkt->pts / 90000.0, pkt->dts / 90000.0, pkt->size); | |||||
#endif | |||||
av_dlog(s, "%d: pts=%0.3f dts=%0.3f size=%d\n", | |||||
pkt->stream_index, pkt->pts / 90000.0, pkt->dts / 90000.0, | |||||
pkt->size); | |||||
return 0; | return 0; | ||||
} | } | ||||
@@ -1078,10 +1078,8 @@ retry: | |||||
best_dts= pkt_desc->dts; | best_dts= pkt_desc->dts; | ||||
} | } | ||||
#if 0 | |||||
av_log(ctx, AV_LOG_DEBUG, "bumping scr, scr:%f, dts:%f\n", | |||||
scr/90000.0, best_dts/90000.0); | |||||
#endif | |||||
av_dlog(ctx, "bumping scr, scr:%f, dts:%f\n", | |||||
scr / 90000.0, best_dts / 90000.0); | |||||
if(best_dts == INT64_MAX) | if(best_dts == INT64_MAX) | ||||
return 0; | return 0; | ||||
@@ -1578,10 +1578,8 @@ static int mpegts_read_header(AVFormatContext *s, | |||||
s->bit_rate = (TS_PACKET_SIZE * 8) * 27e6 / ts->pcr_incr; | s->bit_rate = (TS_PACKET_SIZE * 8) * 27e6 / ts->pcr_incr; | ||||
st->codec->bit_rate = s->bit_rate; | st->codec->bit_rate = s->bit_rate; | ||||
st->start_time = ts->cur_pcr; | st->start_time = ts->cur_pcr; | ||||
#if 0 | |||||
av_log(ts->stream, AV_LOG_DEBUG, "start=%0.3f pcr=%0.3f incr=%d\n", | |||||
st->start_time / 1000000.0, pcrs[0] / 27e6, ts->pcr_incr); | |||||
#endif | |||||
av_dlog(ts->stream, "start=%0.3f pcr=%0.3f incr=%d\n", | |||||
st->start_time / 1000000.0, pcrs[0] / 27e6, ts->pcr_incr); | |||||
} | } | ||||
avio_seek(pb, pos, SEEK_SET); | avio_seek(pb, pos, SEEK_SET); | ||||
@@ -190,7 +190,6 @@ static int decode_main_header(NUTContext *nut){ | |||||
uint64_t tmp, end; | uint64_t tmp, end; | ||||
unsigned int stream_count; | unsigned int stream_count; | ||||
int i, j, tmp_stream, tmp_mul, tmp_pts, tmp_size, count, tmp_res, tmp_head_idx; | int i, j, tmp_stream, tmp_mul, tmp_pts, tmp_size, count, tmp_res, tmp_head_idx; | ||||
int64_t tmp_match; | |||||
end= get_packetheader(nut, bc, 1, MAIN_STARTCODE); | end= get_packetheader(nut, bc, 1, MAIN_STARTCODE); | ||||
end += avio_tell(bc); | end += avio_tell(bc); | ||||
@@ -218,7 +217,6 @@ static int decode_main_header(NUTContext *nut){ | |||||
tmp_pts=0; | tmp_pts=0; | ||||
tmp_mul=1; | tmp_mul=1; | ||||
tmp_stream=0; | tmp_stream=0; | ||||
tmp_match= 1-(1LL<<62); | |||||
tmp_head_idx= 0; | tmp_head_idx= 0; | ||||
for(i=0; i<256;){ | for(i=0; i<256;){ | ||||
int tmp_flags = ffio_read_varlen(bc); | int tmp_flags = ffio_read_varlen(bc); | ||||
@@ -232,7 +230,7 @@ static int decode_main_header(NUTContext *nut){ | |||||
else tmp_res = 0; | else tmp_res = 0; | ||||
if(tmp_fields>5) count = ffio_read_varlen(bc); | if(tmp_fields>5) count = ffio_read_varlen(bc); | ||||
else count = tmp_mul - tmp_size; | else count = tmp_mul - tmp_size; | ||||
if(tmp_fields>6) tmp_match = get_s(bc); | |||||
if(tmp_fields>6) get_s(bc); | |||||
if(tmp_fields>7) tmp_head_idx= ffio_read_varlen(bc); | if(tmp_fields>7) tmp_head_idx= ffio_read_varlen(bc); | ||||
while(tmp_fields-- > 8) | while(tmp_fields-- > 8) | ||||
@@ -197,8 +197,6 @@ static int ogg_read_page(AVFormatContext *s, int *str) | |||||
int flags, nsegs; | int flags, nsegs; | ||||
uint64_t gp; | uint64_t gp; | ||||
uint32_t serial; | uint32_t serial; | ||||
uint32_t seq av_unused; | |||||
uint32_t crc av_unused; | |||||
int size, idx; | int size, idx; | ||||
uint8_t sync[4]; | uint8_t sync[4]; | ||||
int sp = 0; | int sp = 0; | ||||
@@ -232,8 +230,7 @@ static int ogg_read_page(AVFormatContext *s, int *str) | |||||
flags = avio_r8(bc); | flags = avio_r8(bc); | ||||
gp = avio_rl64 (bc); | gp = avio_rl64 (bc); | ||||
serial = avio_rl32 (bc); | serial = avio_rl32 (bc); | ||||
seq = avio_rl32 (bc); | |||||
crc = avio_rl32 (bc); | |||||
avio_skip(bc, 8); /* seq, crc */ | |||||
nsegs = avio_r8(bc); | nsegs = avio_r8(bc); | ||||
idx = ogg_find_stream (ogg, serial); | idx = ogg_find_stream (ogg, serial); | ||||
@@ -364,8 +361,6 @@ static int ogg_packet(AVFormatContext *s, int *str, int *dstart, int *dsize, | |||||
} | } | ||||
}while (!complete); | }while (!complete); | ||||
av_dlog(s, "ogg_packet: idx %i, frame size %i, start %i\n", | |||||
idx, os->psize, os->pstart); | |||||
if (os->granule == -1) | if (os->granule == -1) | ||||
av_log(s, AV_LOG_WARNING, "Page at %"PRId64" is missing granule\n", os->page_pos); | av_log(s, AV_LOG_WARNING, "Page at %"PRId64" is missing granule\n", os->page_pos); | ||||
@@ -39,7 +39,6 @@ ogm_header(AVFormatContext *s, int idx) | |||||
const uint8_t *p = os->buf + os->pstart; | const uint8_t *p = os->buf + os->pstart; | ||||
uint64_t time_unit; | uint64_t time_unit; | ||||
uint64_t spu; | uint64_t spu; | ||||
uint32_t default_len av_unused; | |||||
if(!(*p & 1)) | if(!(*p & 1)) | ||||
return 0; | return 0; | ||||
@@ -74,8 +73,7 @@ ogm_header(AVFormatContext *s, int idx) | |||||
time_unit = bytestream_get_le64(&p); | time_unit = bytestream_get_le64(&p); | ||||
spu = bytestream_get_le64(&p); | spu = bytestream_get_le64(&p); | ||||
default_len = bytestream_get_le32(&p); | |||||
p += 4; /* default_len */ | |||||
p += 8; /* buffersize + bits_per_sample */ | p += 8; /* buffersize + bits_per_sample */ | ||||
if(st->codec->codec_type == AVMEDIA_TYPE_VIDEO){ | if(st->codec->codec_type == AVMEDIA_TYPE_VIDEO){ | ||||
@@ -80,6 +80,8 @@ static int oma_read_header(AVFormatContext *s, | |||||
ff_id3v2_read(s, ID3v2_EA3_MAGIC); | ff_id3v2_read(s, ID3v2_EA3_MAGIC); | ||||
ret = avio_read(s->pb, buf, EA3_HEADER_SIZE); | ret = avio_read(s->pb, buf, EA3_HEADER_SIZE); | ||||
if (ret < EA3_HEADER_SIZE) | |||||
return -1; | |||||
if (memcmp(buf, ((const uint8_t[]){'E', 'A', '3'}),3) || buf[4] != 0 || buf[5] != EA3_HEADER_SIZE) { | if (memcmp(buf, ((const uint8_t[]){'E', 'A', '3'}),3) || buf[4] != 0 || buf[5] != EA3_HEADER_SIZE) { | ||||
av_log(s, AV_LOG_ERROR, "Couldn't find the EA3 header !\n"); | av_log(s, AV_LOG_ERROR, "Couldn't find the EA3 header !\n"); | ||||
@@ -80,8 +80,6 @@ static av_cold int rl2_read_header(AVFormatContext *s, | |||||
unsigned int audio_frame_counter = 0; | unsigned int audio_frame_counter = 0; | ||||
unsigned int video_frame_counter = 0; | unsigned int video_frame_counter = 0; | ||||
unsigned int back_size; | unsigned int back_size; | ||||
int data_size av_unused; | |||||
unsigned short encoding_method av_unused; | |||||
unsigned short sound_rate; | unsigned short sound_rate; | ||||
unsigned short rate; | unsigned short rate; | ||||
unsigned short channels; | unsigned short channels; | ||||
@@ -98,14 +96,14 @@ static av_cold int rl2_read_header(AVFormatContext *s, | |||||
avio_skip(pb,4); /* skip FORM tag */ | avio_skip(pb,4); /* skip FORM tag */ | ||||
back_size = avio_rl32(pb); /**< get size of the background frame */ | back_size = avio_rl32(pb); /**< get size of the background frame */ | ||||
signature = avio_rb32(pb); | signature = avio_rb32(pb); | ||||
data_size = avio_rb32(pb); | |||||
avio_skip(pb, 4); /* data size */ | |||||
frame_count = avio_rl32(pb); | frame_count = avio_rl32(pb); | ||||
/* disallow back_sizes and frame_counts that may lead to overflows later */ | /* disallow back_sizes and frame_counts that may lead to overflows later */ | ||||
if(back_size > INT_MAX/2 || frame_count > INT_MAX / sizeof(uint32_t)) | if(back_size > INT_MAX/2 || frame_count > INT_MAX / sizeof(uint32_t)) | ||||
return AVERROR_INVALIDDATA; | return AVERROR_INVALIDDATA; | ||||
encoding_method = avio_rl16(pb); | |||||
avio_skip(pb, 2); /* encoding mentod */ | |||||
sound_rate = avio_rl16(pb); | sound_rate = avio_rl16(pb); | ||||
rate = avio_rl16(pb); | rate = avio_rl16(pb); | ||||
channels = avio_rl16(pb); | channels = avio_rl16(pb); | ||||
@@ -280,7 +280,7 @@ ff_rm_read_mdpr_codecdata (AVFormatContext *s, AVIOContext *pb, | |||||
if (rm_read_audio_stream_info(s, pb, st, rst, 0)) | if (rm_read_audio_stream_info(s, pb, st, rst, 0)) | ||||
return -1; | return -1; | ||||
} else { | } else { | ||||
int fps, fps2 av_unused; | |||||
int fps; | |||||
if (avio_rl32(pb) != MKTAG('V', 'I', 'D', 'O')) { | if (avio_rl32(pb) != MKTAG('V', 'I', 'D', 'O')) { | ||||
fail1: | fail1: | ||||
av_log(st->codec, AV_LOG_ERROR, "Unsupported video codec\n"); | av_log(st->codec, AV_LOG_ERROR, "Unsupported video codec\n"); | ||||
@@ -298,7 +298,7 @@ ff_rm_read_mdpr_codecdata (AVFormatContext *s, AVIOContext *pb, | |||||
fps= avio_rb16(pb); | fps= avio_rb16(pb); | ||||
st->codec->codec_type = AVMEDIA_TYPE_VIDEO; | st->codec->codec_type = AVMEDIA_TYPE_VIDEO; | ||||
avio_rb32(pb); | avio_rb32(pb); | ||||
fps2= avio_rb16(pb); | |||||
avio_skip(pb, 2); | |||||
avio_rb16(pb); | avio_rb16(pb); | ||||
if ((ret = rm_read_extradata(pb, st->codec, codec_data_size - (avio_tell(pb) - codec_pos))) < 0) | if ((ret = rm_read_extradata(pb, st->codec, codec_data_size - (avio_tell(pb) - codec_pos))) < 0) | ||||
@@ -299,9 +299,9 @@ static int rpl_read_packet(AVFormatContext *s, AVPacket *pkt) | |||||
stream->codec->codec_tag == 124) { | stream->codec->codec_tag == 124) { | ||||
// We have to split Escape 124 frames because there are | // We have to split Escape 124 frames because there are | ||||
// multiple frames per chunk in Escape 124 samples. | // multiple frames per chunk in Escape 124 samples. | ||||
uint32_t frame_size, frame_flags av_unused; | |||||
uint32_t frame_size; | |||||
frame_flags = avio_rl32(pb); | |||||
avio_skip(pb, 4); /* flags */ | |||||
frame_size = avio_rl32(pb); | frame_size = avio_rl32(pb); | ||||
if (avio_seek(pb, -8, SEEK_CUR) < 0) | if (avio_seek(pb, -8, SEEK_CUR) < 0) | ||||
return AVERROR(EIO); | return AVERROR(EIO); | ||||
@@ -108,8 +108,7 @@ static int parse_fmtp_config(AVStream *st, char *value) | |||||
int len = ff_hex_to_data(NULL, value), i, ret = 0; | int len = ff_hex_to_data(NULL, value), i, ret = 0; | ||||
GetBitContext gb; | GetBitContext gb; | ||||
uint8_t *config; | uint8_t *config; | ||||
int audio_mux_version, same_time_framing, num_sub_frames av_unused, | |||||
num_programs, num_layers; | |||||
int audio_mux_version, same_time_framing, num_programs, num_layers; | |||||
/* Pad this buffer, too, to avoid out of bounds reads with get_bits below */ | /* Pad this buffer, too, to avoid out of bounds reads with get_bits below */ | ||||
config = av_mallocz(len + FF_INPUT_BUFFER_PADDING_SIZE); | config = av_mallocz(len + FF_INPUT_BUFFER_PADDING_SIZE); | ||||
@@ -119,7 +118,7 @@ static int parse_fmtp_config(AVStream *st, char *value) | |||||
init_get_bits(&gb, config, len*8); | init_get_bits(&gb, config, len*8); | ||||
audio_mux_version = get_bits(&gb, 1); | audio_mux_version = get_bits(&gb, 1); | ||||
same_time_framing = get_bits(&gb, 1); | same_time_framing = get_bits(&gb, 1); | ||||
num_sub_frames = get_bits(&gb, 6); | |||||
skip_bits(&gb, 6); /* num_sub_frames */ | |||||
num_programs = get_bits(&gb, 4); | num_programs = get_bits(&gb, 4); | ||||
num_layers = get_bits(&gb, 3); | num_layers = get_bits(&gb, 3); | ||||
if (audio_mux_version != 0 || same_time_framing != 1 || num_programs != 0 || | if (audio_mux_version != 0 || same_time_framing != 1 || num_programs != 0 || | ||||
@@ -45,7 +45,6 @@ | |||||
#include "url.h" | #include "url.h" | ||||
//#define DEBUG | //#define DEBUG | ||||
//#define DEBUG_RTP_TCP | |||||
/* Timeout values for socket poll, in ms, | /* Timeout values for socket poll, in ms, | ||||
* and read_packet(), in seconds */ | * and read_packet(), in seconds */ | ||||
@@ -860,9 +859,7 @@ int ff_rtsp_read_reply(AVFormatContext *s, RTSPMessageHeader *reply, | |||||
q = buf; | q = buf; | ||||
for (;;) { | for (;;) { | ||||
ret = ffurl_read_complete(rt->rtsp_hd, &ch, 1); | ret = ffurl_read_complete(rt->rtsp_hd, &ch, 1); | ||||
#ifdef DEBUG_RTP_TCP | |||||
av_dlog(s, "ret=%d c=%02x [%c]\n", ret, ch, ch); | av_dlog(s, "ret=%d c=%02x [%c]\n", ret, ch, ch); | ||||
#endif | |||||
if (ret != 1) | if (ret != 1) | ||||
return AVERROR_EOF; | return AVERROR_EOF; | ||||
if (ch == '\n') | if (ch == '\n') | ||||
@@ -32,7 +32,7 @@ int ff_sauce_read(AVFormatContext *avctx, uint64_t *fsize, int *got_width, int g | |||||
{ | { | ||||
AVIOContext *pb = avctx->pb; | AVIOContext *pb = avctx->pb; | ||||
char buf[36]; | char buf[36]; | ||||
int datatype, filetype, t1, t2, nb_comments, flags av_unused; | |||||
int datatype, filetype, t1, t2, nb_comments; | |||||
uint64_t start_pos = avio_size(pb) - 128; | uint64_t start_pos = avio_size(pb) - 128; | ||||
avio_seek(pb, start_pos, SEEK_SET); | avio_seek(pb, start_pos, SEEK_SET); | ||||
@@ -57,7 +57,7 @@ int ff_sauce_read(AVFormatContext *avctx, uint64_t *fsize, int *got_width, int g | |||||
t1 = avio_rl16(pb); | t1 = avio_rl16(pb); | ||||
t2 = avio_rl16(pb); | t2 = avio_rl16(pb); | ||||
nb_comments = avio_r8(pb); | nb_comments = avio_r8(pb); | ||||
flags = avio_r8(pb); | |||||
avio_skip(pb, 1); /* flags */ | |||||
avio_skip(pb, 4); | avio_skip(pb, 4); | ||||
GET_SAUCE_META("encoder", 22); | GET_SAUCE_META("encoder", 22); | ||||
@@ -85,7 +85,6 @@ static int sol_channels(int magic, int type) | |||||
static int sol_read_header(AVFormatContext *s, | static int sol_read_header(AVFormatContext *s, | ||||
AVFormatParameters *ap) | AVFormatParameters *ap) | ||||
{ | { | ||||
int size av_unused; | |||||
unsigned int magic,tag; | unsigned int magic,tag; | ||||
AVIOContext *pb = s->pb; | AVIOContext *pb = s->pb; | ||||
unsigned int id, channels, rate, type; | unsigned int id, channels, rate, type; | ||||
@@ -99,7 +98,7 @@ static int sol_read_header(AVFormatContext *s, | |||||
return -1; | return -1; | ||||
rate = avio_rl16(pb); | rate = avio_rl16(pb); | ||||
type = avio_r8(pb); | type = avio_r8(pb); | ||||
size = avio_rl32(pb); | |||||
avio_skip(pb, 4); /* size */ | |||||
if (magic != 0x0B8D) | if (magic != 0x0B8D) | ||||
avio_r8(pb); /* newer SOLs contain padding byte */ | avio_r8(pb); /* newer SOLs contain padding byte */ | ||||
@@ -179,23 +179,23 @@ FATE_H264 := $(FATE_H264:%=fate-h264-conformance-%) \ | |||||
FATE_TESTS += $(FATE_H264) | FATE_TESTS += $(FATE_H264) | ||||
fate-h264: $(FATE_H264) | fate-h264: $(FATE_H264) | ||||
fate-h264-conformance-aud_mw_e: CMD = framecrc -i $(SAMPLES)/h264-conformance/AUD_MW_E.264 | |||||
fate-h264-conformance-ba1_ft_c: CMD = framecrc -i $(SAMPLES)/h264-conformance/BA1_FT_C.264 | |||||
fate-h264-conformance-ba1_sony_d: CMD = framecrc -i $(SAMPLES)/h264-conformance/BA1_Sony_D.jsv | |||||
fate-h264-conformance-ba2_sony_f: CMD = framecrc -i $(SAMPLES)/h264-conformance/BA2_Sony_F.jsv | |||||
fate-h264-conformance-aud_mw_e: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/AUD_MW_E.264 | |||||
fate-h264-conformance-ba1_ft_c: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/BA1_FT_C.264 | |||||
fate-h264-conformance-ba1_sony_d: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/BA1_Sony_D.jsv | |||||
fate-h264-conformance-ba2_sony_f: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/BA2_Sony_F.jsv | |||||
fate-h264-conformance-ba3_sva_c: CMD = framecrc -vsync 0 -strict 1 -i $(SAMPLES)/h264-conformance/BA3_SVA_C.264 | fate-h264-conformance-ba3_sva_c: CMD = framecrc -vsync 0 -strict 1 -i $(SAMPLES)/h264-conformance/BA3_SVA_C.264 | ||||
fate-h264-conformance-ba_mw_d: CMD = framecrc -i $(SAMPLES)/h264-conformance/BA_MW_D.264 | |||||
fate-h264-conformance-bamq1_jvc_c: CMD = framecrc -i $(SAMPLES)/h264-conformance/BAMQ1_JVC_C.264 | |||||
fate-h264-conformance-bamq2_jvc_c: CMD = framecrc -i $(SAMPLES)/h264-conformance/BAMQ2_JVC_C.264 | |||||
fate-h264-conformance-banm_mw_d: CMD = framecrc -i $(SAMPLES)/h264-conformance/BANM_MW_D.264 | |||||
fate-h264-conformance-basqp1_sony_c: CMD = framecrc -i $(SAMPLES)/h264-conformance/BASQP1_Sony_C.jsv | |||||
fate-h264-conformance-caba1_sony_d: CMD = framecrc -i $(SAMPLES)/h264-conformance/CABA1_Sony_D.jsv | |||||
fate-h264-conformance-caba1_sva_b: CMD = framecrc -i $(SAMPLES)/h264-conformance/CABA1_SVA_B.264 | |||||
fate-h264-conformance-caba2_sony_e: CMD = framecrc -i $(SAMPLES)/h264-conformance/CABA2_Sony_E.jsv | |||||
fate-h264-conformance-caba2_sva_b: CMD = framecrc -i $(SAMPLES)/h264-conformance/CABA2_SVA_B.264 | |||||
fate-h264-conformance-ba_mw_d: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/BA_MW_D.264 | |||||
fate-h264-conformance-bamq1_jvc_c: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/BAMQ1_JVC_C.264 | |||||
fate-h264-conformance-bamq2_jvc_c: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/BAMQ2_JVC_C.264 | |||||
fate-h264-conformance-banm_mw_d: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/BANM_MW_D.264 | |||||
fate-h264-conformance-basqp1_sony_c: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/BASQP1_Sony_C.jsv | |||||
fate-h264-conformance-caba1_sony_d: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/CABA1_Sony_D.jsv | |||||
fate-h264-conformance-caba1_sva_b: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/CABA1_SVA_B.264 | |||||
fate-h264-conformance-caba2_sony_e: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/CABA2_Sony_E.jsv | |||||
fate-h264-conformance-caba2_sva_b: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/CABA2_SVA_B.264 | |||||
fate-h264-conformance-caba3_sony_c: CMD = framecrc -vsync 0 -strict 1 -i $(SAMPLES)/h264-conformance/CABA3_Sony_C.jsv | fate-h264-conformance-caba3_sony_c: CMD = framecrc -vsync 0 -strict 1 -i $(SAMPLES)/h264-conformance/CABA3_Sony_C.jsv | ||||
fate-h264-conformance-caba3_sva_b: CMD = framecrc -vsync 0 -strict 1 -i $(SAMPLES)/h264-conformance/CABA3_SVA_B.264 | fate-h264-conformance-caba3_sva_b: CMD = framecrc -vsync 0 -strict 1 -i $(SAMPLES)/h264-conformance/CABA3_SVA_B.264 | ||||
fate-h264-conformance-caba3_toshiba_e: CMD = framecrc -i $(SAMPLES)/h264-conformance/CABA3_TOSHIBA_E.264 | |||||
fate-h264-conformance-caba3_toshiba_e: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/CABA3_TOSHIBA_E.264 | |||||
fate-h264-conformance-cabac_mot_fld0_full: CMD = framecrc -vsync 0 -strict 1 -i $(SAMPLES)/h264-conformance/camp_mot_fld0_full.26l | fate-h264-conformance-cabac_mot_fld0_full: CMD = framecrc -vsync 0 -strict 1 -i $(SAMPLES)/h264-conformance/camp_mot_fld0_full.26l | ||||
fate-h264-conformance-cabac_mot_frm0_full: CMD = framecrc -vsync 0 -strict 1 -i $(SAMPLES)/h264-conformance/camp_mot_frm0_full.26l | fate-h264-conformance-cabac_mot_frm0_full: CMD = framecrc -vsync 0 -strict 1 -i $(SAMPLES)/h264-conformance/camp_mot_frm0_full.26l | ||||
fate-h264-conformance-cabac_mot_mbaff0_full: CMD = framecrc -vsync 0 -strict 1 -i $(SAMPLES)/h264-conformance/camp_mot_mbaff0_full.26l | fate-h264-conformance-cabac_mot_mbaff0_full: CMD = framecrc -vsync 0 -strict 1 -i $(SAMPLES)/h264-conformance/camp_mot_mbaff0_full.26l | ||||
@@ -206,7 +206,7 @@ fate-h264-conformance-cabastbr3_sony_b: CMD = framecrc -vsync 0 -strict 1 -i $( | |||||
fate-h264-conformance-cabref3_sand_d: CMD = framecrc -vsync 0 -strict 1 -i $(SAMPLES)/h264-conformance/CABREF3_Sand_D.264 | fate-h264-conformance-cabref3_sand_d: CMD = framecrc -vsync 0 -strict 1 -i $(SAMPLES)/h264-conformance/CABREF3_Sand_D.264 | ||||
fate-h264-conformance-cacqp3_sony_d: CMD = framecrc -vsync 0 -strict 1 -i $(SAMPLES)/h264-conformance/CACQP3_Sony_D.jsv | fate-h264-conformance-cacqp3_sony_d: CMD = framecrc -vsync 0 -strict 1 -i $(SAMPLES)/h264-conformance/CACQP3_Sony_D.jsv | ||||
fate-h264-conformance-cafi1_sva_c: CMD = framecrc -vsync 0 -strict 1 -i $(SAMPLES)/h264-conformance/CAFI1_SVA_C.264 | fate-h264-conformance-cafi1_sva_c: CMD = framecrc -vsync 0 -strict 1 -i $(SAMPLES)/h264-conformance/CAFI1_SVA_C.264 | ||||
fate-h264-conformance-cama1_sony_c: CMD = framecrc -i $(SAMPLES)/h264-conformance/CAMA1_Sony_C.jsv | |||||
fate-h264-conformance-cama1_sony_c: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/CAMA1_Sony_C.jsv | |||||
fate-h264-conformance-cama1_toshiba_b: CMD = framecrc -vsync 0 -strict 1 -i $(SAMPLES)/h264-conformance/CAMA1_TOSHIBA_B.264 | fate-h264-conformance-cama1_toshiba_b: CMD = framecrc -vsync 0 -strict 1 -i $(SAMPLES)/h264-conformance/CAMA1_TOSHIBA_B.264 | ||||
fate-h264-conformance-cama1_vtc_c: CMD = framecrc -vsync 0 -strict 1 -i $(SAMPLES)/h264-conformance/cama1_vtc_c.avc | fate-h264-conformance-cama1_vtc_c: CMD = framecrc -vsync 0 -strict 1 -i $(SAMPLES)/h264-conformance/cama1_vtc_c.avc | ||||
fate-h264-conformance-cama2_vtc_b: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/cama2_vtc_b.avc | fate-h264-conformance-cama2_vtc_b: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/cama2_vtc_b.avc | ||||
@@ -219,37 +219,37 @@ fate-h264-conformance-camanl3_sand_e: CMD = framecrc -vsync 0 -strict 1 -i $(SA | |||||
fate-h264-conformance-camasl3_sony_b: CMD = framecrc -vsync 0 -strict 1 -i $(SAMPLES)/h264-conformance/CAMASL3_Sony_B.jsv | fate-h264-conformance-camasl3_sony_b: CMD = framecrc -vsync 0 -strict 1 -i $(SAMPLES)/h264-conformance/CAMASL3_Sony_B.jsv | ||||
fate-h264-conformance-camp_mot_mbaff_l30: CMD = framecrc -vsync 0 -strict 1 -i $(SAMPLES)/h264-conformance/CAMP_MOT_MBAFF_L30.26l | fate-h264-conformance-camp_mot_mbaff_l30: CMD = framecrc -vsync 0 -strict 1 -i $(SAMPLES)/h264-conformance/CAMP_MOT_MBAFF_L30.26l | ||||
fate-h264-conformance-camp_mot_mbaff_l31: CMD = framecrc -vsync 0 -strict 1 -i $(SAMPLES)/h264-conformance/CAMP_MOT_MBAFF_L31.26l | fate-h264-conformance-camp_mot_mbaff_l31: CMD = framecrc -vsync 0 -strict 1 -i $(SAMPLES)/h264-conformance/CAMP_MOT_MBAFF_L31.26l | ||||
fate-h264-conformance-canl1_sony_e: CMD = framecrc -i $(SAMPLES)/h264-conformance/CANL1_Sony_E.jsv | |||||
fate-h264-conformance-canl1_sva_b: CMD = framecrc -i $(SAMPLES)/h264-conformance/CANL1_SVA_B.264 | |||||
fate-h264-conformance-canl1_toshiba_g: CMD = framecrc -i $(SAMPLES)/h264-conformance/CANL1_TOSHIBA_G.264 | |||||
fate-h264-conformance-canl2_sony_e: CMD = framecrc -i $(SAMPLES)/h264-conformance/CANL2_Sony_E.jsv | |||||
fate-h264-conformance-canl2_sva_b: CMD = framecrc -i $(SAMPLES)/h264-conformance/CANL2_SVA_B.264 | |||||
fate-h264-conformance-canl1_sony_e: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/CANL1_Sony_E.jsv | |||||
fate-h264-conformance-canl1_sva_b: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/CANL1_SVA_B.264 | |||||
fate-h264-conformance-canl1_toshiba_g: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/CANL1_TOSHIBA_G.264 | |||||
fate-h264-conformance-canl2_sony_e: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/CANL2_Sony_E.jsv | |||||
fate-h264-conformance-canl2_sva_b: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/CANL2_SVA_B.264 | |||||
fate-h264-conformance-canl3_sony_c: CMD = framecrc -vsync 0 -strict 1 -i $(SAMPLES)/h264-conformance/CANL3_Sony_C.jsv | fate-h264-conformance-canl3_sony_c: CMD = framecrc -vsync 0 -strict 1 -i $(SAMPLES)/h264-conformance/CANL3_Sony_C.jsv | ||||
fate-h264-conformance-canl3_sva_b: CMD = framecrc -i $(SAMPLES)/h264-conformance/CANL3_SVA_B.264 | |||||
fate-h264-conformance-canl4_sva_b: CMD = framecrc -i $(SAMPLES)/h264-conformance/CANL4_SVA_B.264 | |||||
fate-h264-conformance-canlma2_sony_c: CMD = framecrc -i $(SAMPLES)/h264-conformance/CANLMA2_Sony_C.jsv | |||||
fate-h264-conformance-canlma3_sony_c: CMD = framecrc -i $(SAMPLES)/h264-conformance/CANLMA3_Sony_C.jsv | |||||
fate-h264-conformance-canl3_sva_b: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/CANL3_SVA_B.264 | |||||
fate-h264-conformance-canl4_sva_b: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/CANL4_SVA_B.264 | |||||
fate-h264-conformance-canlma2_sony_c: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/CANLMA2_Sony_C.jsv | |||||
fate-h264-conformance-canlma3_sony_c: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/CANLMA3_Sony_C.jsv | |||||
fate-h264-conformance-capa1_toshiba_b: CMD = framecrc -vsync 0 -strict 1 -i $(SAMPLES)/h264-conformance/CAPA1_TOSHIBA_B.264 | fate-h264-conformance-capa1_toshiba_b: CMD = framecrc -vsync 0 -strict 1 -i $(SAMPLES)/h264-conformance/CAPA1_TOSHIBA_B.264 | ||||
fate-h264-conformance-capama3_sand_f: CMD = framecrc -vsync 0 -strict 1 -i $(SAMPLES)/h264-conformance/CAPAMA3_Sand_F.264 | fate-h264-conformance-capama3_sand_f: CMD = framecrc -vsync 0 -strict 1 -i $(SAMPLES)/h264-conformance/CAPAMA3_Sand_F.264 | ||||
fate-h264-conformance-capcm1_sand_e: CMD = framecrc -i $(SAMPLES)/h264-conformance/CAPCM1_Sand_E.264 | |||||
fate-h264-conformance-capcmnl1_sand_e: CMD = framecrc -i $(SAMPLES)/h264-conformance/CAPCMNL1_Sand_E.264 | |||||
fate-h264-conformance-capcm1_sand_e: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/CAPCM1_Sand_E.264 | |||||
fate-h264-conformance-capcmnl1_sand_e: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/CAPCMNL1_Sand_E.264 | |||||
fate-h264-conformance-capm3_sony_d: CMD = framecrc -vsync 0 -strict 1 -i $(SAMPLES)/h264-conformance/CAPM3_Sony_D.jsv | fate-h264-conformance-capm3_sony_d: CMD = framecrc -vsync 0 -strict 1 -i $(SAMPLES)/h264-conformance/CAPM3_Sony_D.jsv | ||||
fate-h264-conformance-caqp1_sony_b: CMD = framecrc -i $(SAMPLES)/h264-conformance/CAQP1_Sony_B.jsv | |||||
fate-h264-conformance-caqp1_sony_b: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/CAQP1_Sony_B.jsv | |||||
fate-h264-conformance-cavlc_mot_fld0_full_b: CMD = framecrc -vsync 0 -strict 1 -i $(SAMPLES)/h264-conformance/cvmp_mot_fld0_full_B.26l | fate-h264-conformance-cavlc_mot_fld0_full_b: CMD = framecrc -vsync 0 -strict 1 -i $(SAMPLES)/h264-conformance/cvmp_mot_fld0_full_B.26l | ||||
fate-h264-conformance-cavlc_mot_frm0_full_b: CMD = framecrc -vsync 0 -strict 1 -i $(SAMPLES)/h264-conformance/cvmp_mot_frm0_full_B.26l | fate-h264-conformance-cavlc_mot_frm0_full_b: CMD = framecrc -vsync 0 -strict 1 -i $(SAMPLES)/h264-conformance/cvmp_mot_frm0_full_B.26l | ||||
fate-h264-conformance-cavlc_mot_mbaff0_full_b: CMD = framecrc -vsync 0 -strict 1 -i $(SAMPLES)/h264-conformance/cvmp_mot_mbaff0_full_B.26l | fate-h264-conformance-cavlc_mot_mbaff0_full_b: CMD = framecrc -vsync 0 -strict 1 -i $(SAMPLES)/h264-conformance/cvmp_mot_mbaff0_full_B.26l | ||||
fate-h264-conformance-cavlc_mot_picaff0_full_b: CMD = framecrc -vsync 0 -strict 1 -i $(SAMPLES)/h264-conformance/cvmp_mot_picaff0_full_B.26l | fate-h264-conformance-cavlc_mot_picaff0_full_b: CMD = framecrc -vsync 0 -strict 1 -i $(SAMPLES)/h264-conformance/cvmp_mot_picaff0_full_B.26l | ||||
fate-h264-conformance-cawp1_toshiba_e: CMD = framecrc -i $(SAMPLES)/h264-conformance/CAWP1_TOSHIBA_E.264 | |||||
fate-h264-conformance-cawp1_toshiba_e: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/CAWP1_TOSHIBA_E.264 | |||||
fate-h264-conformance-cawp5_toshiba_e: CMD = framecrc -vsync 0 -strict 1 -i $(SAMPLES)/h264-conformance/CAWP5_TOSHIBA_E.264 | fate-h264-conformance-cawp5_toshiba_e: CMD = framecrc -vsync 0 -strict 1 -i $(SAMPLES)/h264-conformance/CAWP5_TOSHIBA_E.264 | ||||
fate-h264-conformance-ci1_ft_b: CMD = framecrc -i $(SAMPLES)/h264-conformance/CI1_FT_B.264 | |||||
fate-h264-conformance-ci_mw_d: CMD = framecrc -i $(SAMPLES)/h264-conformance/CI_MW_D.264 | |||||
fate-h264-conformance-ci1_ft_b: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/CI1_FT_B.264 | |||||
fate-h264-conformance-ci_mw_d: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/CI_MW_D.264 | |||||
fate-h264-conformance-cvbs3_sony_c: CMD = framecrc -vsync 0 -strict 1 -i $(SAMPLES)/h264-conformance/CVBS3_Sony_C.jsv | fate-h264-conformance-cvbs3_sony_c: CMD = framecrc -vsync 0 -strict 1 -i $(SAMPLES)/h264-conformance/CVBS3_Sony_C.jsv | ||||
fate-h264-conformance-cvcanlma2_sony_c: CMD = framecrc -i $(SAMPLES)/h264-conformance/CVCANLMA2_Sony_C.jsv | |||||
fate-h264-conformance-cvcanlma2_sony_c: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/CVCANLMA2_Sony_C.jsv | |||||
fate-h264-conformance-cvfi1_sony_d: CMD = framecrc -vsync 0 -strict 1 -i $(SAMPLES)/h264-conformance/CVFI1_Sony_D.jsv | fate-h264-conformance-cvfi1_sony_d: CMD = framecrc -vsync 0 -strict 1 -i $(SAMPLES)/h264-conformance/CVFI1_Sony_D.jsv | ||||
fate-h264-conformance-cvfi1_sva_c: CMD = framecrc -vsync 0 -strict 1 -i $(SAMPLES)/h264-conformance/CVFI1_SVA_C.264 | fate-h264-conformance-cvfi1_sva_c: CMD = framecrc -vsync 0 -strict 1 -i $(SAMPLES)/h264-conformance/CVFI1_SVA_C.264 | ||||
fate-h264-conformance-cvfi2_sony_h: CMD = framecrc -vsync 0 -strict 1 -i $(SAMPLES)/h264-conformance/CVFI2_Sony_H.jsv | fate-h264-conformance-cvfi2_sony_h: CMD = framecrc -vsync 0 -strict 1 -i $(SAMPLES)/h264-conformance/CVFI2_Sony_H.jsv | ||||
fate-h264-conformance-cvfi2_sva_c: CMD = framecrc -vsync 0 -strict 1 -i $(SAMPLES)/h264-conformance/CVFI2_SVA_C.264 | fate-h264-conformance-cvfi2_sva_c: CMD = framecrc -vsync 0 -strict 1 -i $(SAMPLES)/h264-conformance/CVFI2_SVA_C.264 | ||||
fate-h264-conformance-cvma1_sony_d: CMD = framecrc -i $(SAMPLES)/h264-conformance/CVMA1_Sony_D.jsv | |||||
fate-h264-conformance-cvma1_sony_d: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/CVMA1_Sony_D.jsv | |||||
fate-h264-conformance-cvma1_toshiba_b: CMD = framecrc -vsync 0 -strict 1 -i $(SAMPLES)/h264-conformance/CVMA1_TOSHIBA_B.264 | fate-h264-conformance-cvma1_toshiba_b: CMD = framecrc -vsync 0 -strict 1 -i $(SAMPLES)/h264-conformance/CVMA1_TOSHIBA_B.264 | ||||
fate-h264-conformance-cvmanl1_toshiba_b: CMD = framecrc -vsync 0 -strict 1 -i $(SAMPLES)/h264-conformance/CVMANL1_TOSHIBA_B.264 | fate-h264-conformance-cvmanl1_toshiba_b: CMD = framecrc -vsync 0 -strict 1 -i $(SAMPLES)/h264-conformance/CVMANL1_TOSHIBA_B.264 | ||||
fate-h264-conformance-cvmanl2_toshiba_b: CMD = framecrc -vsync 0 -strict 1 -i $(SAMPLES)/h264-conformance/CVMANL2_TOSHIBA_B.264 | fate-h264-conformance-cvmanl2_toshiba_b: CMD = framecrc -vsync 0 -strict 1 -i $(SAMPLES)/h264-conformance/CVMANL2_TOSHIBA_B.264 | ||||
@@ -261,68 +261,68 @@ fate-h264-conformance-cvmp_mot_frm_l31_b: CMD = framecrc -vsync 0 -strict 1 -i | |||||
fate-h264-conformance-cvnlfi1_sony_c: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/CVNLFI1_Sony_C.jsv | fate-h264-conformance-cvnlfi1_sony_c: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/CVNLFI1_Sony_C.jsv | ||||
fate-h264-conformance-cvnlfi2_sony_h: CMD = framecrc -vsync 0 -strict 1 -i $(SAMPLES)/h264-conformance/CVNLFI2_Sony_H.jsv | fate-h264-conformance-cvnlfi2_sony_h: CMD = framecrc -vsync 0 -strict 1 -i $(SAMPLES)/h264-conformance/CVNLFI2_Sony_H.jsv | ||||
fate-h264-conformance-cvpa1_toshiba_b: CMD = framecrc -vsync 0 -strict 1 -i $(SAMPLES)/h264-conformance/CVPA1_TOSHIBA_B.264 | fate-h264-conformance-cvpa1_toshiba_b: CMD = framecrc -vsync 0 -strict 1 -i $(SAMPLES)/h264-conformance/CVPA1_TOSHIBA_B.264 | ||||
fate-h264-conformance-cvpcmnl1_sva_c: CMD = framecrc -i $(SAMPLES)/h264-conformance/CVPCMNL1_SVA_C.264 | |||||
fate-h264-conformance-cvpcmnl2_sva_c: CMD = framecrc -i $(SAMPLES)/h264-conformance/CVPCMNL2_SVA_C.264 | |||||
fate-h264-conformance-cvwp1_toshiba_e: CMD = framecrc -i $(SAMPLES)/h264-conformance/CVWP1_TOSHIBA_E.264 | |||||
fate-h264-conformance-cvpcmnl1_sva_c: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/CVPCMNL1_SVA_C.264 | |||||
fate-h264-conformance-cvpcmnl2_sva_c: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/CVPCMNL2_SVA_C.264 | |||||
fate-h264-conformance-cvwp1_toshiba_e: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/CVWP1_TOSHIBA_E.264 | |||||
fate-h264-conformance-cvwp2_toshiba_e: CMD = framecrc -vsync 0 -strict 1 -i $(SAMPLES)/h264-conformance/CVWP2_TOSHIBA_E.264 | fate-h264-conformance-cvwp2_toshiba_e: CMD = framecrc -vsync 0 -strict 1 -i $(SAMPLES)/h264-conformance/CVWP2_TOSHIBA_E.264 | ||||
fate-h264-conformance-cvwp3_toshiba_e: CMD = framecrc -vsync 0 -strict 1 -i $(SAMPLES)/h264-conformance/CVWP3_TOSHIBA_E.264 | fate-h264-conformance-cvwp3_toshiba_e: CMD = framecrc -vsync 0 -strict 1 -i $(SAMPLES)/h264-conformance/CVWP3_TOSHIBA_E.264 | ||||
fate-h264-conformance-cvwp5_toshiba_e: CMD = framecrc -vsync 0 -strict 1 -i $(SAMPLES)/h264-conformance/CVWP5_TOSHIBA_E.264 | fate-h264-conformance-cvwp5_toshiba_e: CMD = framecrc -vsync 0 -strict 1 -i $(SAMPLES)/h264-conformance/CVWP5_TOSHIBA_E.264 | ||||
fate-h264-conformance-fi1_sony_e: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/FI1_Sony_E.jsv | fate-h264-conformance-fi1_sony_e: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/FI1_Sony_E.jsv | ||||
fate-h264-conformance-frext-alphaconformanceg: CMD = framecrc -i $(SAMPLES)/h264-conformance/FRext/test8b43.264 | |||||
fate-h264-conformance-frext-bcrm_freh10: CMD = framecrc -i $(SAMPLES)/h264-conformance/FRext/freh10.264 -vsync 0 | |||||
fate-h264-conformance-frext-brcm_freh11: CMD = framecrc -i $(SAMPLES)/h264-conformance/FRext/freh11.264 -vsync 0 | |||||
fate-h264-conformance-frext-brcm_freh3: CMD = framecrc -i $(SAMPLES)/h264-conformance/FRext/freh3.264 | |||||
fate-h264-conformance-frext-brcm_freh4: CMD = framecrc -i $(SAMPLES)/h264-conformance/FRext/freh4.264 -vsync 0 | |||||
fate-h264-conformance-frext-brcm_freh5: CMD = framecrc -i $(SAMPLES)/h264-conformance/FRext/freh5.264 | |||||
fate-h264-conformance-frext-brcm_freh8: CMD = framecrc -i $(SAMPLES)/h264-conformance/FRext/freh8.264 | |||||
fate-h264-conformance-frext-brcm_freh9: CMD = framecrc -i $(SAMPLES)/h264-conformance/FRext/freh9.264 | |||||
fate-h264-conformance-frext-freh12_b: CMD = framecrc -i $(SAMPLES)/h264-conformance/FRext/Freh12_B.264 | |||||
fate-h264-conformance-frext-freh1_b: CMD = framecrc -i $(SAMPLES)/h264-conformance/FRext/Freh1_B.264 | |||||
fate-h264-conformance-frext-freh2_b: CMD = framecrc -i $(SAMPLES)/h264-conformance/FRext/Freh2_B.264 | |||||
fate-h264-conformance-frext-freh6: CMD = framecrc -i $(SAMPLES)/h264-conformance/FRext/freh6.264 -vsync 0 | |||||
fate-h264-conformance-frext-freh7_b: CMD = framecrc -i $(SAMPLES)/h264-conformance/FRext/Freh7_B.264 -vsync 0 | |||||
fate-h264-conformance-frext-frext01_jvc_d: CMD = framecrc -i $(SAMPLES)/h264-conformance/FRext/FREXT01_JVC_D.264 | |||||
fate-h264-conformance-frext-frext02_jvc_c: CMD = framecrc -i $(SAMPLES)/h264-conformance/FRext/FREXT02_JVC_C.264 | |||||
fate-h264-conformance-frext-frext1_panasonic_c: CMD = framecrc -i $(SAMPLES)/h264-conformance/FRext/FRExt1_Panasonic.avc | |||||
fate-h264-conformance-frext-frext2_panasonic_b: CMD = framecrc -i $(SAMPLES)/h264-conformance/FRext/FRExt2_Panasonic.avc -vsync 0 | |||||
fate-h264-conformance-frext-frext3_panasonic_d: CMD = framecrc -i $(SAMPLES)/h264-conformance/FRext/FRExt3_Panasonic.avc | |||||
fate-h264-conformance-frext-frext4_panasonic_a: CMD = framecrc -i $(SAMPLES)/h264-conformance/FRext/FRExt4_Panasonic.avc | |||||
fate-h264-conformance-frext-alphaconformanceg: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/FRext/test8b43.264 | |||||
fate-h264-conformance-frext-bcrm_freh10: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/FRext/freh10.264 -vsync 0 | |||||
fate-h264-conformance-frext-brcm_freh11: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/FRext/freh11.264 -vsync 0 | |||||
fate-h264-conformance-frext-brcm_freh3: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/FRext/freh3.264 | |||||
fate-h264-conformance-frext-brcm_freh4: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/FRext/freh4.264 -vsync 0 | |||||
fate-h264-conformance-frext-brcm_freh5: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/FRext/freh5.264 | |||||
fate-h264-conformance-frext-brcm_freh8: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/FRext/freh8.264 | |||||
fate-h264-conformance-frext-brcm_freh9: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/FRext/freh9.264 | |||||
fate-h264-conformance-frext-freh12_b: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/FRext/Freh12_B.264 | |||||
fate-h264-conformance-frext-freh1_b: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/FRext/Freh1_B.264 | |||||
fate-h264-conformance-frext-freh2_b: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/FRext/Freh2_B.264 | |||||
fate-h264-conformance-frext-freh6: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/FRext/freh6.264 -vsync 0 | |||||
fate-h264-conformance-frext-freh7_b: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/FRext/Freh7_B.264 -vsync 0 | |||||
fate-h264-conformance-frext-frext01_jvc_d: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/FRext/FREXT01_JVC_D.264 | |||||
fate-h264-conformance-frext-frext02_jvc_c: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/FRext/FREXT02_JVC_C.264 | |||||
fate-h264-conformance-frext-frext1_panasonic_c: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/FRext/FRExt1_Panasonic.avc | |||||
fate-h264-conformance-frext-frext2_panasonic_b: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/FRext/FRExt2_Panasonic.avc -vsync 0 | |||||
fate-h264-conformance-frext-frext3_panasonic_d: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/FRext/FRExt3_Panasonic.avc | |||||
fate-h264-conformance-frext-frext4_panasonic_a: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/FRext/FRExt4_Panasonic.avc | |||||
fate-h264-conformance-frext-frext_mmco4_sony_b: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/FRext/FRExt_MMCO4_Sony_B.264 | fate-h264-conformance-frext-frext_mmco4_sony_b: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/FRext/FRExt_MMCO4_Sony_B.264 | ||||
fate-h264-conformance-frext-hcaff1_hhi_b: CMD = framecrc -i $(SAMPLES)/h264-conformance/FRext/HCAFF1_HHI.264 | |||||
fate-h264-conformance-frext-hcafr1_hhi_c: CMD = framecrc -i $(SAMPLES)/h264-conformance/FRext/HCAFR1_HHI.264 | |||||
fate-h264-conformance-frext-hcafr2_hhi_a: CMD = framecrc -i $(SAMPLES)/h264-conformance/FRext/HCAFR2_HHI.264 | |||||
fate-h264-conformance-frext-hcafr3_hhi_a: CMD = framecrc -i $(SAMPLES)/h264-conformance/FRext/HCAFR3_HHI.264 | |||||
fate-h264-conformance-frext-hcafr4_hhi_a: CMD = framecrc -i $(SAMPLES)/h264-conformance/FRext/HCAFR4_HHI.264 | |||||
fate-h264-conformance-frext-hcamff1_hhi_b: CMD = framecrc -i $(SAMPLES)/h264-conformance/FRext/HCAMFF1_HHI.264 | |||||
fate-h264-conformance-frext-hpca_brcm_c: CMD = framecrc -i $(SAMPLES)/h264-conformance/FRext/HPCA_BRCM_C.264 | |||||
fate-h264-conformance-frext-hpcadq_brcm_b: CMD = framecrc -i $(SAMPLES)/h264-conformance/FRext/HPCADQ_BRCM_B.264 | |||||
fate-h264-conformance-frext-hpcafl_bcrm_c: CMD = framecrc -i $(SAMPLES)/h264-conformance/FRext/HPCAFL_BRCM_C.264 -vsync 0 | |||||
fate-h264-conformance-frext-hpcaflnl_bcrm_c: CMD = framecrc -i $(SAMPLES)/h264-conformance/FRext/HPCAFLNL_BRCM_C.264 -vsync 0 | |||||
fate-h264-conformance-frext-hpcalq_brcm_b: CMD = framecrc -i $(SAMPLES)/h264-conformance/FRext/HPCALQ_BRCM_B.264 | |||||
fate-h264-conformance-frext-hpcamapalq_bcrm_b: CMD = framecrc -i $(SAMPLES)/h264-conformance/FRext/HPCAMAPALQ_BRCM_B.264 -vsync 0 | |||||
fate-h264-conformance-frext-hpcamolq_brcm_b: CMD = framecrc -i $(SAMPLES)/h264-conformance/FRext/HPCAMOLQ_BRCM_B.264 | |||||
fate-h264-conformance-frext-hpcanl_brcm_c: CMD = framecrc -i $(SAMPLES)/h264-conformance/FRext/HPCANL_BRCM_C.264 | |||||
fate-h264-conformance-frext-hpcaq2lq_brcm_b: CMD = framecrc -i $(SAMPLES)/h264-conformance/FRext/HPCAQ2LQ_BRCM_B.264 | |||||
fate-h264-conformance-frext-hpcv_brcm_a: CMD = framecrc -i $(SAMPLES)/h264-conformance/FRext/HPCV_BRCM_A.264 | |||||
fate-h264-conformance-frext-hpcvfl_bcrm_a: CMD = framecrc -i $(SAMPLES)/h264-conformance/FRext/HPCVFL_BRCM_A.264 -vsync 0 | |||||
fate-h264-conformance-frext-hpcvflnl_bcrm_a: CMD = framecrc -i $(SAMPLES)/h264-conformance/FRext/HPCVFLNL_BRCM_A.264 -vsync 0 | |||||
fate-h264-conformance-frext-hpcvmolq_brcm_b: CMD = framecrc -i $(SAMPLES)/h264-conformance/FRext/HPCVMOLQ_BRCM_B.264 | |||||
fate-h264-conformance-frext-hpcvnl_brcm_a: CMD = framecrc -i $(SAMPLES)/h264-conformance/FRext/HPCVNL_BRCM_A.264 | |||||
fate-h264-conformance-frext-pph10i1_panasonic_a: CMD = framecrc -i $(SAMPLES)/h264-conformance/FRext/PPH10I1_Panasonic_A.264 -pix_fmt yuv420p10le | |||||
fate-h264-conformance-frext-pph10i2_panasonic_a: CMD = framecrc -i $(SAMPLES)/h264-conformance/FRext/PPH10I2_Panasonic_A.264 -pix_fmt yuv420p10le | |||||
fate-h264-conformance-frext-pph10i3_panasonic_a: CMD = framecrc -i $(SAMPLES)/h264-conformance/FRext/PPH10I3_Panasonic_A.264 -pix_fmt yuv420p10le | |||||
fate-h264-conformance-frext-pph10i4_panasonic_a: CMD = framecrc -i $(SAMPLES)/h264-conformance/FRext/PPH10I4_Panasonic_A.264 -pix_fmt yuv420p10le | |||||
fate-h264-conformance-frext-pph10i5_panasonic_a: CMD = framecrc -i $(SAMPLES)/h264-conformance/FRext/PPH10I5_Panasonic_A.264 -pix_fmt yuv420p10le | |||||
fate-h264-conformance-frext-pph10i6_panasonic_a: CMD = framecrc -i $(SAMPLES)/h264-conformance/FRext/PPH10I6_Panasonic_A.264 -pix_fmt yuv420p10le | |||||
fate-h264-conformance-frext-pph10i7_panasonic_a: CMD = framecrc -i $(SAMPLES)/h264-conformance/FRext/PPH10I7_Panasonic_A.264 -pix_fmt yuv420p10le | |||||
fate-h264-conformance-frext-hcaff1_hhi_b: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/FRext/HCAFF1_HHI.264 | |||||
fate-h264-conformance-frext-hcafr1_hhi_c: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/FRext/HCAFR1_HHI.264 | |||||
fate-h264-conformance-frext-hcafr2_hhi_a: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/FRext/HCAFR2_HHI.264 | |||||
fate-h264-conformance-frext-hcafr3_hhi_a: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/FRext/HCAFR3_HHI.264 | |||||
fate-h264-conformance-frext-hcafr4_hhi_a: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/FRext/HCAFR4_HHI.264 | |||||
fate-h264-conformance-frext-hcamff1_hhi_b: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/FRext/HCAMFF1_HHI.264 | |||||
fate-h264-conformance-frext-hpca_brcm_c: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/FRext/HPCA_BRCM_C.264 | |||||
fate-h264-conformance-frext-hpcadq_brcm_b: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/FRext/HPCADQ_BRCM_B.264 | |||||
fate-h264-conformance-frext-hpcafl_bcrm_c: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/FRext/HPCAFL_BRCM_C.264 -vsync 0 | |||||
fate-h264-conformance-frext-hpcaflnl_bcrm_c: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/FRext/HPCAFLNL_BRCM_C.264 -vsync 0 | |||||
fate-h264-conformance-frext-hpcalq_brcm_b: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/FRext/HPCALQ_BRCM_B.264 | |||||
fate-h264-conformance-frext-hpcamapalq_bcrm_b: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/FRext/HPCAMAPALQ_BRCM_B.264 -vsync 0 | |||||
fate-h264-conformance-frext-hpcamolq_brcm_b: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/FRext/HPCAMOLQ_BRCM_B.264 | |||||
fate-h264-conformance-frext-hpcanl_brcm_c: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/FRext/HPCANL_BRCM_C.264 | |||||
fate-h264-conformance-frext-hpcaq2lq_brcm_b: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/FRext/HPCAQ2LQ_BRCM_B.264 | |||||
fate-h264-conformance-frext-hpcv_brcm_a: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/FRext/HPCV_BRCM_A.264 | |||||
fate-h264-conformance-frext-hpcvfl_bcrm_a: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/FRext/HPCVFL_BRCM_A.264 -vsync 0 | |||||
fate-h264-conformance-frext-hpcvflnl_bcrm_a: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/FRext/HPCVFLNL_BRCM_A.264 -vsync 0 | |||||
fate-h264-conformance-frext-hpcvmolq_brcm_b: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/FRext/HPCVMOLQ_BRCM_B.264 | |||||
fate-h264-conformance-frext-hpcvnl_brcm_a: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/FRext/HPCVNL_BRCM_A.264 | |||||
fate-h264-conformance-frext-pph10i1_panasonic_a: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/FRext/PPH10I1_Panasonic_A.264 -pix_fmt yuv420p10le | |||||
fate-h264-conformance-frext-pph10i2_panasonic_a: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/FRext/PPH10I2_Panasonic_A.264 -pix_fmt yuv420p10le | |||||
fate-h264-conformance-frext-pph10i3_panasonic_a: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/FRext/PPH10I3_Panasonic_A.264 -pix_fmt yuv420p10le | |||||
fate-h264-conformance-frext-pph10i4_panasonic_a: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/FRext/PPH10I4_Panasonic_A.264 -pix_fmt yuv420p10le | |||||
fate-h264-conformance-frext-pph10i5_panasonic_a: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/FRext/PPH10I5_Panasonic_A.264 -pix_fmt yuv420p10le | |||||
fate-h264-conformance-frext-pph10i6_panasonic_a: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/FRext/PPH10I6_Panasonic_A.264 -pix_fmt yuv420p10le | |||||
fate-h264-conformance-frext-pph10i7_panasonic_a: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/FRext/PPH10I7_Panasonic_A.264 -pix_fmt yuv420p10le | |||||
fate-h264-conformance-hcbp2_hhi_a: CMD = framecrc -vsync 0 -strict 1 -i $(SAMPLES)/h264-conformance/HCBP2_HHI_A.264 | fate-h264-conformance-hcbp2_hhi_a: CMD = framecrc -vsync 0 -strict 1 -i $(SAMPLES)/h264-conformance/HCBP2_HHI_A.264 | ||||
fate-h264-conformance-hcmp1_hhi_a: CMD = framecrc -vsync 0 -strict 1 -i $(SAMPLES)/h264-conformance/HCMP1_HHI_A.264 | fate-h264-conformance-hcmp1_hhi_a: CMD = framecrc -vsync 0 -strict 1 -i $(SAMPLES)/h264-conformance/HCMP1_HHI_A.264 | ||||
fate-h264-conformance-ls_sva_d: CMD = framecrc -i $(SAMPLES)/h264-conformance/LS_SVA_D.264 | |||||
fate-h264-conformance-midr_mw_d: CMD = framecrc -i $(SAMPLES)/h264-conformance/MIDR_MW_D.264 | |||||
fate-h264-conformance-mps_mw_a: CMD = framecrc -i $(SAMPLES)/h264-conformance/MPS_MW_A.264 | |||||
fate-h264-conformance-mr1_bt_a: CMD = framecrc -i $(SAMPLES)/h264-conformance/MR1_BT_A.h264 | |||||
fate-h264-conformance-mr1_mw_a: CMD = framecrc -i $(SAMPLES)/h264-conformance/MR1_MW_A.264 | |||||
fate-h264-conformance-mr2_mw_a: CMD = framecrc -i $(SAMPLES)/h264-conformance/MR2_MW_A.264 | |||||
fate-h264-conformance-ls_sva_d: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/LS_SVA_D.264 | |||||
fate-h264-conformance-midr_mw_d: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/MIDR_MW_D.264 | |||||
fate-h264-conformance-mps_mw_a: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/MPS_MW_A.264 | |||||
fate-h264-conformance-mr1_bt_a: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/MR1_BT_A.h264 | |||||
fate-h264-conformance-mr1_mw_a: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/MR1_MW_A.264 | |||||
fate-h264-conformance-mr2_mw_a: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/MR2_MW_A.264 | |||||
fate-h264-conformance-mr2_tandberg_e: CMD = framecrc -vsync 0 -strict 1 -i $(SAMPLES)/h264-conformance/MR2_TANDBERG_E.264 | fate-h264-conformance-mr2_tandberg_e: CMD = framecrc -vsync 0 -strict 1 -i $(SAMPLES)/h264-conformance/MR2_TANDBERG_E.264 | ||||
fate-h264-conformance-mr3_tandberg_b: CMD = framecrc -vsync 0 -strict 1 -i $(SAMPLES)/h264-conformance/MR3_TANDBERG_B.264 | fate-h264-conformance-mr3_tandberg_b: CMD = framecrc -vsync 0 -strict 1 -i $(SAMPLES)/h264-conformance/MR3_TANDBERG_B.264 | ||||
fate-h264-conformance-mr4_tandberg_c: CMD = framecrc -vsync 0 -strict 1 -i $(SAMPLES)/h264-conformance/MR4_TANDBERG_C.264 | fate-h264-conformance-mr4_tandberg_c: CMD = framecrc -vsync 0 -strict 1 -i $(SAMPLES)/h264-conformance/MR4_TANDBERG_C.264 | ||||
@@ -332,26 +332,26 @@ fate-h264-conformance-mr7_bt_b: CMD = framecrc -vsync 0 -strict 1 -i $(SAMPLES) | |||||
fate-h264-conformance-mr8_bt_b: CMD = framecrc -vsync 0 -strict 1 -i $(SAMPLES)/h264-conformance/MR8_BT_B.h264 | fate-h264-conformance-mr8_bt_b: CMD = framecrc -vsync 0 -strict 1 -i $(SAMPLES)/h264-conformance/MR8_BT_B.h264 | ||||
fate-h264-conformance-mr9_bt_b: CMD = framecrc -vsync 0 -strict 1 -i $(SAMPLES)/h264-conformance/MR9_BT_B.h264 | fate-h264-conformance-mr9_bt_b: CMD = framecrc -vsync 0 -strict 1 -i $(SAMPLES)/h264-conformance/MR9_BT_B.h264 | ||||
fate-h264-conformance-mv1_brcm_d: CMD = framecrc -vsync 0 -strict 1 -i $(SAMPLES)/h264-conformance/src19td.IBP.264 | fate-h264-conformance-mv1_brcm_d: CMD = framecrc -vsync 0 -strict 1 -i $(SAMPLES)/h264-conformance/src19td.IBP.264 | ||||
fate-h264-conformance-nl1_sony_d: CMD = framecrc -i $(SAMPLES)/h264-conformance/NL1_Sony_D.jsv | |||||
fate-h264-conformance-nl2_sony_h: CMD = framecrc -i $(SAMPLES)/h264-conformance/NL2_Sony_H.jsv | |||||
fate-h264-conformance-nl3_sva_e: CMD = framecrc -i $(SAMPLES)/h264-conformance/NL3_SVA_E.264 | |||||
fate-h264-conformance-nlmq1_jvc_c: CMD = framecrc -i $(SAMPLES)/h264-conformance/NLMQ1_JVC_C.264 | |||||
fate-h264-conformance-nlmq2_jvc_c: CMD = framecrc -i $(SAMPLES)/h264-conformance/NLMQ2_JVC_C.264 | |||||
fate-h264-conformance-nrf_mw_e: CMD = framecrc -i $(SAMPLES)/h264-conformance/NRF_MW_E.264 | |||||
fate-h264-conformance-nl1_sony_d: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/NL1_Sony_D.jsv | |||||
fate-h264-conformance-nl2_sony_h: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/NL2_Sony_H.jsv | |||||
fate-h264-conformance-nl3_sva_e: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/NL3_SVA_E.264 | |||||
fate-h264-conformance-nlmq1_jvc_c: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/NLMQ1_JVC_C.264 | |||||
fate-h264-conformance-nlmq2_jvc_c: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/NLMQ2_JVC_C.264 | |||||
fate-h264-conformance-nrf_mw_e: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/NRF_MW_E.264 | |||||
fate-h264-conformance-sharp_mp_field_1_b: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/Sharp_MP_Field_1_B.jvt | fate-h264-conformance-sharp_mp_field_1_b: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/Sharp_MP_Field_1_B.jvt | ||||
fate-h264-conformance-sharp_mp_field_2_b: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/Sharp_MP_Field_2_B.jvt | fate-h264-conformance-sharp_mp_field_2_b: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/Sharp_MP_Field_2_B.jvt | ||||
fate-h264-conformance-sharp_mp_field_3_b: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/Sharp_MP_Field_3_B.jvt | fate-h264-conformance-sharp_mp_field_3_b: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/Sharp_MP_Field_3_B.jvt | ||||
fate-h264-conformance-sharp_mp_paff_1r2: CMD = framecrc -vsync 0 -strict 1 -i $(SAMPLES)/h264-conformance/Sharp_MP_PAFF_1r2.jvt | fate-h264-conformance-sharp_mp_paff_1r2: CMD = framecrc -vsync 0 -strict 1 -i $(SAMPLES)/h264-conformance/Sharp_MP_PAFF_1r2.jvt | ||||
fate-h264-conformance-sharp_mp_paff_2r: CMD = framecrc -vsync 0 -strict 1 -i $(SAMPLES)/h264-conformance/Sharp_MP_PAFF_2.jvt | fate-h264-conformance-sharp_mp_paff_2r: CMD = framecrc -vsync 0 -strict 1 -i $(SAMPLES)/h264-conformance/Sharp_MP_PAFF_2.jvt | ||||
fate-h264-conformance-sl1_sva_b: CMD = framecrc -vsync 0 -strict 1 -i $(SAMPLES)/h264-conformance/SL1_SVA_B.264 | fate-h264-conformance-sl1_sva_b: CMD = framecrc -vsync 0 -strict 1 -i $(SAMPLES)/h264-conformance/SL1_SVA_B.264 | ||||
fate-h264-conformance-sva_ba1_b: CMD = framecrc -i $(SAMPLES)/h264-conformance/SVA_BA1_B.264 | |||||
fate-h264-conformance-sva_ba2_d: CMD = framecrc -i $(SAMPLES)/h264-conformance/SVA_BA2_D.264 | |||||
fate-h264-conformance-sva_base_b: CMD = framecrc -i $(SAMPLES)/h264-conformance/SVA_Base_B.264 | |||||
fate-h264-conformance-sva_cl1_e: CMD = framecrc -i $(SAMPLES)/h264-conformance/SVA_CL1_E.264 | |||||
fate-h264-conformance-sva_fm1_e: CMD = framecrc -i $(SAMPLES)/h264-conformance/SVA_FM1_E.264 | |||||
fate-h264-conformance-sva_nl1_b: CMD = framecrc -i $(SAMPLES)/h264-conformance/SVA_NL1_B.264 | |||||
fate-h264-conformance-sva_nl2_e: CMD = framecrc -i $(SAMPLES)/h264-conformance/SVA_NL2_E.264 | |||||
fate-h264-conformance-sva_ba1_b: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/SVA_BA1_B.264 | |||||
fate-h264-conformance-sva_ba2_d: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/SVA_BA2_D.264 | |||||
fate-h264-conformance-sva_base_b: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/SVA_Base_B.264 | |||||
fate-h264-conformance-sva_cl1_e: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/SVA_CL1_E.264 | |||||
fate-h264-conformance-sva_fm1_e: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/SVA_FM1_E.264 | |||||
fate-h264-conformance-sva_nl1_b: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/SVA_NL1_B.264 | |||||
fate-h264-conformance-sva_nl2_e: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264-conformance/SVA_NL2_E.264 | |||||
fate-h264-interlace-crop: CMD = framecrc -vframes 3 -i $(SAMPLES)/h264/interlaced_crop.mp4 | |||||
fate-h264-lossless: CMD = framecrc -i $(SAMPLES)/h264/lossless.h264 | |||||
fate-h264-interlace-crop: CMD = framecrc -vsync 0 -vframes 3 -i $(SAMPLES)/h264/interlaced_crop.mp4 | |||||
fate-h264-lossless: CMD = framecrc -vsync 0 -i $(SAMPLES)/h264/lossless.h264 | |||||
fate-h264-extreme-plane-pred: CMD = framemd5 -strict 1 -vsync 0 -i $(SAMPLES)/h264/extreme-plane-pred.h264 | fate-h264-extreme-plane-pred: CMD = framemd5 -strict 1 -vsync 0 -i $(SAMPLES)/h264/extreme-plane-pred.h264 |
@@ -102,7 +102,7 @@ do_ffmpeg_crc() | |||||
do_video_decoding() | do_video_decoding() | ||||
{ | { | ||||
do_ffmpeg $raw_dst $DEC_OPTS $1 -i $target_path/$file -f rawvideo $ENC_OPTS $2 | |||||
do_ffmpeg $raw_dst $DEC_OPTS $1 -i $target_path/$file -f rawvideo $ENC_OPTS -vsync 0 $2 | |||||
} | } | ||||
do_video_encoding() | do_video_encoding() | ||||