Originally committed as revision 19652 to svn://svn.ffmpeg.org/ffmpeg/trunktags/v0.6
| @@ -45,18 +45,17 @@ static const FfmpegDiracSchroVideoFormatInfo ff_dirac_schro_video_format_info[] | |||
| { 4096, 2160, 24, 1 }, | |||
| }; | |||
| unsigned int ff_dirac_schro_get_video_format_idx (AVCodecContext *avccontext) | |||
| unsigned int ff_dirac_schro_get_video_format_idx(AVCodecContext *avccontext) | |||
| { | |||
| unsigned int ret_idx = 0; | |||
| unsigned int idx; | |||
| unsigned int num_formats = sizeof(ff_dirac_schro_video_format_info) / | |||
| sizeof(ff_dirac_schro_video_format_info[0]); | |||
| for (idx = 1 ; idx < num_formats; ++idx ) { | |||
| const FfmpegDiracSchroVideoFormatInfo *vf = | |||
| &ff_dirac_schro_video_format_info[idx]; | |||
| for (idx = 1; idx < num_formats; ++idx) { | |||
| const FfmpegDiracSchroVideoFormatInfo *vf = &ff_dirac_schro_video_format_info[idx]; | |||
| if (avccontext->width == vf->width && | |||
| avccontext->height == vf->height){ | |||
| avccontext->height == vf->height) { | |||
| ret_idx = idx; | |||
| if (avccontext->time_base.den == vf->frame_rate_num && | |||
| avccontext->time_base.num == vf->frame_rate_denom) | |||
| @@ -66,23 +65,22 @@ unsigned int ff_dirac_schro_get_video_format_idx (AVCodecContext *avccontext) | |||
| return ret_idx; | |||
| } | |||
| void ff_dirac_schro_queue_init (FfmpegDiracSchroQueue *queue) | |||
| void ff_dirac_schro_queue_init(FfmpegDiracSchroQueue *queue) | |||
| { | |||
| queue->p_head = queue->p_tail = NULL; | |||
| queue->size = 0; | |||
| } | |||
| void ff_dirac_schro_queue_free (FfmpegDiracSchroQueue *queue, | |||
| void (*free_func)(void *)) | |||
| void ff_dirac_schro_queue_free(FfmpegDiracSchroQueue *queue, | |||
| void (*free_func)(void *)) | |||
| { | |||
| while (queue->p_head) | |||
| free_func( ff_dirac_schro_queue_pop(queue) ); | |||
| free_func(ff_dirac_schro_queue_pop(queue)); | |||
| } | |||
| int ff_dirac_schro_queue_push_back (FfmpegDiracSchroQueue *queue, void *p_data) | |||
| int ff_dirac_schro_queue_push_back(FfmpegDiracSchroQueue *queue, void *p_data) | |||
| { | |||
| FfmpegDiracSchroQueueElement *p_new = | |||
| av_mallocz(sizeof(FfmpegDiracSchroQueueElement)); | |||
| FfmpegDiracSchroQueueElement *p_new = av_mallocz(sizeof(FfmpegDiracSchroQueueElement)); | |||
| if (!p_new) | |||
| return -1; | |||
| @@ -99,7 +97,7 @@ int ff_dirac_schro_queue_push_back (FfmpegDiracSchroQueue *queue, void *p_data) | |||
| return 0; | |||
| } | |||
| void *ff_dirac_schro_queue_pop (FfmpegDiracSchroQueue *queue) | |||
| void *ff_dirac_schro_queue_pop(FfmpegDiracSchroQueue *queue) | |||
| { | |||
| FfmpegDiracSchroQueueElement *top = queue->p_head; | |||
| @@ -107,7 +105,7 @@ void *ff_dirac_schro_queue_pop (FfmpegDiracSchroQueue *queue) | |||
| void *data = top->data; | |||
| queue->p_head = queue->p_head->next; | |||
| --queue->size; | |||
| av_freep (&top); | |||
| av_freep(&top); | |||
| return data; | |||
| } | |||
| @@ -28,8 +28,7 @@ | |||
| #include "avcodec.h" | |||
| typedef struct | |||
| { | |||
| typedef struct { | |||
| uint16_t width; | |||
| uint16_t height; | |||
| uint16_t frame_rate_num; | |||
| @@ -39,13 +38,12 @@ typedef struct | |||
| /** | |||
| * Returns the index into the Dirac Schro common video format info table | |||
| */ | |||
| unsigned int ff_dirac_schro_get_video_format_idx (AVCodecContext *avccontext); | |||
| unsigned int ff_dirac_schro_get_video_format_idx(AVCodecContext *avccontext); | |||
| /** | |||
| * contains a single encoded frame returned from Dirac or Schroedinger | |||
| */ | |||
| typedef struct FfmpegDiracSchroEncodedFrame | |||
| { | |||
| typedef struct FfmpegDiracSchroEncodedFrame { | |||
| /** encoded frame data */ | |||
| uint8_t *p_encbuf; | |||
| @@ -62,8 +60,7 @@ typedef struct FfmpegDiracSchroEncodedFrame | |||
| /** | |||
| * queue element | |||
| */ | |||
| typedef struct FfmpegDiracSchroQueueElement | |||
| { | |||
| typedef struct FfmpegDiracSchroQueueElement { | |||
| /** Data to be stored in queue*/ | |||
| void *data; | |||
| /** Pointer to next element queue */ | |||
| @@ -74,8 +71,7 @@ typedef struct FfmpegDiracSchroQueueElement | |||
| /** | |||
| * A simple queue implementation used in libdirac and libschroedinger | |||
| */ | |||
| typedef struct FfmpegDiracSchroQueue | |||
| { | |||
| typedef struct FfmpegDiracSchroQueue { | |||
| /** Pointer to head of queue */ | |||
| FfmpegDiracSchroQueueElement *p_head; | |||
| /** Pointer to tail of queue */ | |||
| @@ -92,12 +88,12 @@ void ff_dirac_schro_queue_init(FfmpegDiracSchroQueue *queue); | |||
| /** | |||
| * Add an element to the end of the queue | |||
| */ | |||
| int ff_dirac_schro_queue_push_back (FfmpegDiracSchroQueue *queue, void *p_data); | |||
| int ff_dirac_schro_queue_push_back(FfmpegDiracSchroQueue *queue, void *p_data); | |||
| /** | |||
| * Return the first element in the queue | |||
| */ | |||
| void *ff_dirac_schro_queue_pop (FfmpegDiracSchroQueue *queue); | |||
| void *ff_dirac_schro_queue_pop(FfmpegDiracSchroQueue *queue); | |||
| /** | |||
| * Free the queue resources. free_func is a function supplied by the caller to | |||
| @@ -36,8 +36,7 @@ | |||
| #include <libdirac_decoder/dirac_parser.h> | |||
| /** contains a single frame returned from Dirac */ | |||
| typedef struct FfmpegDiracDecoderParams | |||
| { | |||
| typedef struct FfmpegDiracDecoderParams { | |||
| /** decoder handle */ | |||
| dirac_decoder_t* p_decoder; | |||
| @@ -64,13 +63,13 @@ static enum PixelFormat GetFfmpegChromaFormat(dirac_chroma_t dirac_pix_fmt) | |||
| static av_cold int libdirac_decode_init(AVCodecContext *avccontext) | |||
| { | |||
| FfmpegDiracDecoderParams *p_dirac_params = avccontext->priv_data ; | |||
| FfmpegDiracDecoderParams *p_dirac_params = avccontext->priv_data; | |||
| p_dirac_params->p_decoder = dirac_decoder_init(avccontext->debug); | |||
| if (!p_dirac_params->p_decoder) | |||
| return -1; | |||
| return 0 ; | |||
| return 0; | |||
| } | |||
| static int libdirac_decode_frame(AVCodecContext *avccontext, | |||
| @@ -88,25 +87,23 @@ static int libdirac_decode_frame(AVCodecContext *avccontext, | |||
| *data_size = 0; | |||
| if (buf_size>0) { | |||
| if (buf_size > 0) { | |||
| /* set data to decode into buffer */ | |||
| dirac_buffer (p_dirac_params->p_decoder, buf, buf+buf_size); | |||
| if ((buf[4] &0x08) == 0x08 && (buf[4] & 0x03)) | |||
| dirac_buffer(p_dirac_params->p_decoder, buf, buf + buf_size); | |||
| if ((buf[4] & 0x08) == 0x08 && (buf[4] & 0x03)) | |||
| avccontext->has_b_frames = 1; | |||
| } | |||
| while (1) { | |||
| /* parse data and process result */ | |||
| DecoderState state = dirac_parse (p_dirac_params->p_decoder); | |||
| switch (state) | |||
| { | |||
| DecoderState state = dirac_parse(p_dirac_params->p_decoder); | |||
| switch (state) { | |||
| case STATE_BUFFER: | |||
| return buf_size; | |||
| case STATE_SEQUENCE: | |||
| { | |||
| /* tell FFmpeg about sequence details */ | |||
| dirac_sourceparams_t *src_params = | |||
| &p_dirac_params->p_decoder->src_params; | |||
| dirac_sourceparams_t *src_params = &p_dirac_params->p_decoder->src_params; | |||
| if (avcodec_check_dimensions(avccontext, src_params->width, | |||
| src_params->height) < 0) { | |||
| @@ -121,9 +118,9 @@ static int libdirac_decode_frame(AVCodecContext *avccontext, | |||
| avccontext->pix_fmt = GetFfmpegChromaFormat(src_params->chroma); | |||
| if (avccontext->pix_fmt == PIX_FMT_NONE) { | |||
| av_log (avccontext, AV_LOG_ERROR, | |||
| "Dirac chroma format %d not supported currently\n", | |||
| src_params->chroma); | |||
| av_log(avccontext, AV_LOG_ERROR, | |||
| "Dirac chroma format %d not supported currently\n", | |||
| src_params->chroma); | |||
| return -1; | |||
| } | |||
| @@ -140,7 +137,7 @@ static int libdirac_decode_frame(AVCodecContext *avccontext, | |||
| /* allocate output buffer */ | |||
| if (!p_dirac_params->p_out_frame_buf) | |||
| p_dirac_params->p_out_frame_buf = av_malloc (pict_size); | |||
| p_dirac_params->p_out_frame_buf = av_malloc(pict_size); | |||
| buffer[0] = p_dirac_params->p_out_frame_buf; | |||
| buffer[1] = p_dirac_params->p_out_frame_buf + | |||
| pic.linesize[0] * avccontext->height; | |||
| @@ -177,20 +174,20 @@ static int libdirac_decode_frame(AVCodecContext *avccontext, | |||
| static av_cold int libdirac_decode_close(AVCodecContext *avccontext) | |||
| { | |||
| FfmpegDiracDecoderParams *p_dirac_params = avccontext->priv_data; | |||
| dirac_decoder_close (p_dirac_params->p_decoder); | |||
| dirac_decoder_close(p_dirac_params->p_decoder); | |||
| av_freep(&p_dirac_params->p_out_frame_buf); | |||
| return 0 ; | |||
| return 0; | |||
| } | |||
| static void libdirac_flush (AVCodecContext *avccontext) | |||
| static void libdirac_flush(AVCodecContext *avccontext) | |||
| { | |||
| /* Got a seek request. We will need free memory held in the private | |||
| * context and free the current Dirac decoder handle and then open | |||
| * a new decoder handle. */ | |||
| libdirac_decode_close (avccontext); | |||
| libdirac_decode_init (avccontext); | |||
| libdirac_decode_close(avccontext); | |||
| libdirac_decode_init(avccontext); | |||
| return; | |||
| } | |||
| @@ -208,4 +205,4 @@ AVCodec libdirac_decoder = { | |||
| CODEC_CAP_DELAY, | |||
| .flush = libdirac_flush, | |||
| .long_name = NULL_IF_CONFIG_SMALL("libdirac Dirac 2.2"), | |||
| } ; | |||
| }; | |||
| @@ -38,8 +38,7 @@ | |||
| #include <libdirac_encoder/dirac_encoder.h> | |||
| /** Dirac encoder private data */ | |||
| typedef struct FfmpegDiracEncoderParams | |||
| { | |||
| typedef struct FfmpegDiracEncoderParams { | |||
| /** Dirac encoder context */ | |||
| dirac_encoder_context_t enc_ctx; | |||
| @@ -114,12 +113,12 @@ static const VideoFormat ff_dirac_video_formats[]={ | |||
| * Returns the video format preset matching the input video dimensions and | |||
| * time base. | |||
| */ | |||
| static VideoFormat GetDiracVideoFormatPreset (AVCodecContext *avccontext) | |||
| static VideoFormat GetDiracVideoFormatPreset(AVCodecContext *avccontext) | |||
| { | |||
| unsigned int num_formats = sizeof(ff_dirac_video_formats) / | |||
| sizeof(ff_dirac_video_formats[0]); | |||
| unsigned int idx = ff_dirac_schro_get_video_format_idx (avccontext); | |||
| unsigned int idx = ff_dirac_schro_get_video_format_idx(avccontext); | |||
| return (idx < num_formats) ? | |||
| ff_dirac_video_formats[idx] : VIDEO_FORMAT_CUSTOM; | |||
| @@ -130,30 +129,27 @@ static av_cold int libdirac_encode_init(AVCodecContext *avccontext) | |||
| FfmpegDiracEncoderParams* p_dirac_params = avccontext->priv_data; | |||
| int no_local = 1; | |||
| int verbose = avccontext->debug; | |||
| int verbose = avccontext->debug; | |||
| VideoFormat preset; | |||
| /* get Dirac preset */ | |||
| preset = GetDiracVideoFormatPreset(avccontext); | |||
| /* initialize the encoder context */ | |||
| dirac_encoder_context_init (&(p_dirac_params->enc_ctx), preset); | |||
| dirac_encoder_context_init(&(p_dirac_params->enc_ctx), preset); | |||
| p_dirac_params->enc_ctx.src_params.chroma = | |||
| GetDiracChromaFormat(avccontext->pix_fmt); | |||
| p_dirac_params->enc_ctx.src_params.chroma = GetDiracChromaFormat(avccontext->pix_fmt); | |||
| if (p_dirac_params->enc_ctx.src_params.chroma == formatNK) { | |||
| av_log (avccontext, AV_LOG_ERROR, | |||
| "Unsupported pixel format %d. This codec supports only " | |||
| "Planar YUV formats (yuv420p, yuv422p, yuv444p\n", | |||
| avccontext->pix_fmt); | |||
| av_log(avccontext, AV_LOG_ERROR, | |||
| "Unsupported pixel format %d. This codec supports only " | |||
| "Planar YUV formats (yuv420p, yuv422p, yuv444p\n", | |||
| avccontext->pix_fmt); | |||
| return -1; | |||
| } | |||
| p_dirac_params->enc_ctx.src_params.frame_rate.numerator = | |||
| avccontext->time_base.den; | |||
| p_dirac_params->enc_ctx.src_params.frame_rate.denominator = | |||
| avccontext->time_base.num; | |||
| p_dirac_params->enc_ctx.src_params.frame_rate.numerator = avccontext->time_base.den; | |||
| p_dirac_params->enc_ctx.src_params.frame_rate.denominator = avccontext->time_base.num; | |||
| p_dirac_params->enc_ctx.src_params.width = avccontext->width; | |||
| p_dirac_params->enc_ctx.src_params.height = avccontext->height; | |||
| @@ -182,20 +178,20 @@ static av_cold int libdirac_encode_init(AVCodecContext *avccontext) | |||
| if (avccontext->flags & CODEC_FLAG_QSCALE) { | |||
| if (avccontext->global_quality) { | |||
| p_dirac_params->enc_ctx.enc_params.qf = | |||
| avccontext->global_quality / (FF_QP2LAMBDA*10.0); | |||
| p_dirac_params->enc_ctx.enc_params.qf = avccontext->global_quality | |||
| / (FF_QP2LAMBDA * 10.0); | |||
| /* if it is not default bitrate then send target rate. */ | |||
| if (avccontext->bit_rate >= 1000 && | |||
| avccontext->bit_rate != 200000) | |||
| p_dirac_params->enc_ctx.enc_params.trate = | |||
| avccontext->bit_rate / 1000; | |||
| p_dirac_params->enc_ctx.enc_params.trate = avccontext->bit_rate | |||
| / 1000; | |||
| } else | |||
| p_dirac_params->enc_ctx.enc_params.lossless = 1; | |||
| } else if (avccontext->bit_rate >= 1000) | |||
| p_dirac_params->enc_ctx.enc_params.trate = avccontext->bit_rate / 1000; | |||
| if ((preset > VIDEO_FORMAT_QCIF || preset < VIDEO_FORMAT_QSIF525) && | |||
| avccontext->bit_rate == 200000) | |||
| avccontext->bit_rate == 200000) | |||
| p_dirac_params->enc_ctx.enc_params.trate = 0; | |||
| if (avccontext->flags & CODEC_FLAG_INTERLACED_ME) | |||
| @@ -203,8 +199,8 @@ static av_cold int libdirac_encode_init(AVCodecContext *avccontext) | |||
| * irrespective of the type of source material */ | |||
| p_dirac_params->enc_ctx.enc_params.picture_coding_mode = 1; | |||
| p_dirac_params->p_encoder = dirac_encoder_init (&(p_dirac_params->enc_ctx), | |||
| verbose ); | |||
| p_dirac_params->p_encoder = dirac_encoder_init(&(p_dirac_params->enc_ctx), | |||
| verbose); | |||
| if (!p_dirac_params->p_encoder) { | |||
| av_log(avccontext, AV_LOG_ERROR, | |||
| @@ -218,14 +214,14 @@ static av_cold int libdirac_encode_init(AVCodecContext *avccontext) | |||
| /* initialize the encoded frame queue */ | |||
| ff_dirac_schro_queue_init(&p_dirac_params->enc_frame_queue); | |||
| return 0 ; | |||
| return 0; | |||
| } | |||
| static void DiracFreeFrame (void *data) | |||
| static void DiracFreeFrame(void *data) | |||
| { | |||
| FfmpegDiracSchroEncodedFrame *enc_frame = data; | |||
| av_freep (&(enc_frame->p_encbuf)); | |||
| av_freep(&(enc_frame->p_encbuf)); | |||
| av_free(enc_frame); | |||
| } | |||
| @@ -236,7 +232,7 @@ static int libdirac_encode_frame(AVCodecContext *avccontext, | |||
| int enc_size = 0; | |||
| dirac_encoder_state_t state; | |||
| FfmpegDiracEncoderParams* p_dirac_params = avccontext->priv_data; | |||
| FfmpegDiracSchroEncodedFrame* p_frame_output = NULL; | |||
| FfmpegDiracSchroEncodedFrame* p_frame_output = NULL; | |||
| FfmpegDiracSchroEncodedFrame* p_next_output_frame = NULL; | |||
| int go = 1; | |||
| int last_frame_in_sequence = 0; | |||
| @@ -244,7 +240,7 @@ static int libdirac_encode_frame(AVCodecContext *avccontext, | |||
| if (!data) { | |||
| /* push end of sequence if not already signalled */ | |||
| if (!p_dirac_params->eos_signalled) { | |||
| dirac_encoder_end_sequence( p_dirac_params->p_encoder ); | |||
| dirac_encoder_end_sequence(p_dirac_params->p_encoder); | |||
| p_dirac_params->eos_signalled = 1; | |||
| } | |||
| } else { | |||
| @@ -253,15 +249,15 @@ static int libdirac_encode_frame(AVCodecContext *avccontext, | |||
| * Input line size may differ from what the codec supports, | |||
| * especially when transcoding from one format to another. | |||
| * So use avpicture_layout to copy the frame. */ | |||
| avpicture_layout ((AVPicture *)data, avccontext->pix_fmt, | |||
| avccontext->width, avccontext->height, | |||
| p_dirac_params->p_in_frame_buf, | |||
| p_dirac_params->frame_size); | |||
| avpicture_layout((AVPicture *)data, avccontext->pix_fmt, | |||
| avccontext->width, avccontext->height, | |||
| p_dirac_params->p_in_frame_buf, | |||
| p_dirac_params->frame_size); | |||
| /* load next frame */ | |||
| if (dirac_encoder_load (p_dirac_params->p_encoder, | |||
| p_dirac_params->p_in_frame_buf, | |||
| p_dirac_params->frame_size ) < 0) { | |||
| if (dirac_encoder_load(p_dirac_params->p_encoder, | |||
| p_dirac_params->p_in_frame_buf, | |||
| p_dirac_params->frame_size) < 0) { | |||
| av_log(avccontext, AV_LOG_ERROR, "Unrecoverable Encoder Error." | |||
| " dirac_encoder_load failed...\n"); | |||
| return -1; | |||
| @@ -271,34 +267,30 @@ static int libdirac_encode_frame(AVCodecContext *avccontext, | |||
| if (p_dirac_params->eos_pulled) | |||
| go = 0; | |||
| while(go) { | |||
| while (go) { | |||
| p_dirac_params->p_encoder->enc_buf.buffer = frame; | |||
| p_dirac_params->p_encoder->enc_buf.size = buf_size; | |||
| /* process frame */ | |||
| state = dirac_encoder_output ( p_dirac_params->p_encoder ); | |||
| state = dirac_encoder_output(p_dirac_params->p_encoder); | |||
| switch (state) | |||
| { | |||
| switch (state) { | |||
| case ENC_STATE_AVAIL: | |||
| case ENC_STATE_EOS: | |||
| assert (p_dirac_params->p_encoder->enc_buf.size > 0); | |||
| assert(p_dirac_params->p_encoder->enc_buf.size > 0); | |||
| /* All non-frame data is prepended to actual frame data to | |||
| * be able to set the pts correctly. So we don't write data | |||
| * to the frame output queue until we actually have a frame | |||
| */ | |||
| p_dirac_params->enc_buf = av_realloc ( | |||
| p_dirac_params->enc_buf, | |||
| p_dirac_params->enc_buf_size + | |||
| p_dirac_params->p_encoder->enc_buf.size | |||
| ); | |||
| p_dirac_params->enc_buf = av_realloc(p_dirac_params->enc_buf, | |||
| p_dirac_params->enc_buf_size + | |||
| p_dirac_params->p_encoder->enc_buf.size); | |||
| memcpy(p_dirac_params->enc_buf + p_dirac_params->enc_buf_size, | |||
| p_dirac_params->p_encoder->enc_buf.buffer, | |||
| p_dirac_params->p_encoder->enc_buf.size); | |||
| p_dirac_params->enc_buf_size += | |||
| p_dirac_params->p_encoder->enc_buf.size; | |||
| p_dirac_params->enc_buf_size += p_dirac_params->p_encoder->enc_buf.size; | |||
| if (state == ENC_STATE_EOS) { | |||
| p_dirac_params->eos_pulled = 1; | |||
| @@ -313,17 +305,16 @@ static int libdirac_encode_frame(AVCodecContext *avccontext, | |||
| /* create output frame */ | |||
| p_frame_output = av_mallocz(sizeof(FfmpegDiracSchroEncodedFrame)); | |||
| /* set output data */ | |||
| p_frame_output->size = p_dirac_params->enc_buf_size; | |||
| p_frame_output->p_encbuf = p_dirac_params->enc_buf; | |||
| p_frame_output->frame_num = | |||
| p_dirac_params->p_encoder->enc_pparams.pnum; | |||
| p_frame_output->size = p_dirac_params->enc_buf_size; | |||
| p_frame_output->p_encbuf = p_dirac_params->enc_buf; | |||
| p_frame_output->frame_num = p_dirac_params->p_encoder->enc_pparams.pnum; | |||
| if (p_dirac_params->p_encoder->enc_pparams.ptype == INTRA_PICTURE && | |||
| p_dirac_params->p_encoder->enc_pparams.rtype == REFERENCE_PICTURE) | |||
| p_frame_output->key_frame = 1; | |||
| ff_dirac_schro_queue_push_back (&p_dirac_params->enc_frame_queue, | |||
| p_frame_output); | |||
| ff_dirac_schro_queue_push_back(&p_dirac_params->enc_frame_queue, | |||
| p_frame_output); | |||
| p_dirac_params->enc_buf_size = 0; | |||
| p_dirac_params->enc_buf = NULL; | |||
| @@ -346,12 +337,10 @@ static int libdirac_encode_frame(AVCodecContext *avccontext, | |||
| /* copy 'next' frame in queue */ | |||
| if (p_dirac_params->enc_frame_queue.size == 1 && | |||
| p_dirac_params->eos_pulled) | |||
| if (p_dirac_params->enc_frame_queue.size == 1 && p_dirac_params->eos_pulled) | |||
| last_frame_in_sequence = 1; | |||
| p_next_output_frame = | |||
| ff_dirac_schro_queue_pop(&p_dirac_params->enc_frame_queue); | |||
| p_next_output_frame = ff_dirac_schro_queue_pop(&p_dirac_params->enc_frame_queue); | |||
| if (!p_next_output_frame) | |||
| return 0; | |||
| @@ -366,12 +355,11 @@ static int libdirac_encode_frame(AVCodecContext *avccontext, | |||
| /* Append the end of sequence information to the last frame in the | |||
| * sequence. */ | |||
| if (last_frame_in_sequence && p_dirac_params->enc_buf_size > 0) | |||
| { | |||
| memcpy (frame + enc_size, p_dirac_params->enc_buf, | |||
| p_dirac_params->enc_buf_size); | |||
| if (last_frame_in_sequence && p_dirac_params->enc_buf_size > 0) { | |||
| memcpy(frame + enc_size, p_dirac_params->enc_buf, | |||
| p_dirac_params->enc_buf_size); | |||
| enc_size += p_dirac_params->enc_buf_size; | |||
| av_freep (&p_dirac_params->enc_buf); | |||
| av_freep(&p_dirac_params->enc_buf); | |||
| p_dirac_params->enc_buf_size = 0; | |||
| } | |||
| @@ -386,7 +374,7 @@ static av_cold int libdirac_encode_close(AVCodecContext *avccontext) | |||
| FfmpegDiracEncoderParams* p_dirac_params = avccontext->priv_data; | |||
| /* close the encoder */ | |||
| dirac_encoder_close(p_dirac_params->p_encoder ); | |||
| dirac_encoder_close(p_dirac_params->p_encoder); | |||
| /* free data in the output frame queue */ | |||
| ff_dirac_schro_queue_free(&p_dirac_params->enc_frame_queue, | |||
| @@ -399,7 +387,7 @@ static av_cold int libdirac_encode_close(AVCodecContext *avccontext) | |||
| /* free the input frame buffer */ | |||
| av_freep(&p_dirac_params->p_in_frame_buf); | |||
| return 0 ; | |||
| return 0; | |||
| } | |||
| @@ -411,7 +399,7 @@ AVCodec libdirac_encoder = { | |||
| libdirac_encode_init, | |||
| libdirac_encode_frame, | |||
| libdirac_encode_close, | |||
| .capabilities= CODEC_CAP_DELAY, | |||
| .pix_fmts= (enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_YUV422P, PIX_FMT_YUV444P, PIX_FMT_NONE}, | |||
| .long_name= NULL_IF_CONFIG_SMALL("libdirac Dirac 2.2"), | |||
| } ; | |||
| .capabilities = CODEC_CAP_DELAY, | |||
| .pix_fmts = (enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_YUV422P, PIX_FMT_YUV444P, PIX_FMT_NONE}, | |||
| .long_name = NULL_IF_CONFIG_SMALL("libdirac Dirac 2.2"), | |||
| }; | |||
| @@ -57,8 +57,8 @@ SchroVideoFormatEnum ff_get_schro_video_format_preset(AVCodecContext *avccontext | |||
| unsigned int idx = ff_dirac_schro_get_video_format_idx (avccontext); | |||
| return (idx < num_formats) ? | |||
| ff_schro_video_formats[idx] : SCHRO_VIDEO_FORMAT_CUSTOM; | |||
| return (idx < num_formats) ? ff_schro_video_formats[idx] : | |||
| SCHRO_VIDEO_FORMAT_CUSTOM; | |||
| } | |||
| int ff_get_schro_frame_format (SchroChromaFormat schro_pix_fmt, | |||
| @@ -71,8 +71,7 @@ int ff_get_schro_frame_format (SchroChromaFormat schro_pix_fmt, | |||
| for (idx = 0; idx < num_formats; ++idx) { | |||
| if (ffmpeg_schro_pixel_format_map[idx].schro_pix_fmt == schro_pix_fmt) { | |||
| *schro_frame_fmt = | |||
| ffmpeg_schro_pixel_format_map[idx].schro_frame_fmt; | |||
| *schro_frame_fmt = ffmpeg_schro_pixel_format_map[idx].schro_frame_fmt; | |||
| return 0; | |||
| } | |||
| } | |||
| @@ -50,7 +50,7 @@ SchroVideoFormatEnum ff_get_schro_video_format_preset (AVCodecContext *avccontex | |||
| * Sets the Schroedinger frame format corresponding to the Schro chroma format | |||
| * passed. Returns 0 on success, -1 on failure. | |||
| */ | |||
| int ff_get_schro_frame_format (SchroChromaFormat schro_chroma_fmt, | |||
| SchroFrameFormat *schro_frame_fmt); | |||
| int ff_get_schro_frame_format(SchroChromaFormat schro_chroma_fmt, | |||
| SchroFrameFormat *schro_frame_fmt); | |||
| #endif /* AVCODEC_LIBSCHROEDINGER_H */ | |||
| @@ -40,8 +40,7 @@ | |||
| #include <schroedinger/schrovideoformat.h> | |||
| /** libschroedinger decoder private data */ | |||
| typedef struct FfmpegSchroDecoderParams | |||
| { | |||
| typedef struct FfmpegSchroDecoderParams { | |||
| /** Schroedinger video format */ | |||
| SchroVideoFormat *format; | |||
| @@ -64,24 +63,23 @@ typedef struct FfmpegSchroDecoderParams | |||
| AVPicture dec_pic; | |||
| } FfmpegSchroDecoderParams; | |||
| typedef struct FfmpegSchroParseUnitContext | |||
| { | |||
| typedef struct FfmpegSchroParseUnitContext { | |||
| const uint8_t *buf; | |||
| int buf_size; | |||
| } FfmpegSchroParseUnitContext; | |||
| static void libschroedinger_decode_buffer_free (SchroBuffer *schro_buf, | |||
| void *priv); | |||
| static void libschroedinger_decode_buffer_free(SchroBuffer *schro_buf, | |||
| void *priv); | |||
| static void FfmpegSchroParseContextInit (FfmpegSchroParseUnitContext *parse_ctx, | |||
| const uint8_t *buf, int buf_size) | |||
| static void FfmpegSchroParseContextInit(FfmpegSchroParseUnitContext *parse_ctx, | |||
| const uint8_t *buf, int buf_size) | |||
| { | |||
| parse_ctx->buf = buf; | |||
| parse_ctx->buf_size = buf_size; | |||
| } | |||
| static SchroBuffer* FfmpegFindNextSchroParseUnit (FfmpegSchroParseUnitContext *parse_ctx) | |||
| static SchroBuffer* FfmpegFindNextSchroParseUnit(FfmpegSchroParseUnitContext *parse_ctx) | |||
| { | |||
| SchroBuffer *enc_buf = NULL; | |||
| int next_pu_offset = 0; | |||
| @@ -107,12 +105,12 @@ static SchroBuffer* FfmpegFindNextSchroParseUnit (FfmpegSchroParseUnitContext *p | |||
| return NULL; | |||
| in_buf = av_malloc(next_pu_offset); | |||
| memcpy (in_buf, parse_ctx->buf, next_pu_offset); | |||
| enc_buf = schro_buffer_new_with_data (in_buf, next_pu_offset); | |||
| memcpy(in_buf, parse_ctx->buf, next_pu_offset); | |||
| enc_buf = schro_buffer_new_with_data(in_buf, next_pu_offset); | |||
| enc_buf->free = libschroedinger_decode_buffer_free; | |||
| enc_buf->priv = in_buf; | |||
| parse_ctx->buf += next_pu_offset; | |||
| parse_ctx->buf += next_pu_offset; | |||
| parse_ctx->buf_size -= next_pu_offset; | |||
| return enc_buf; | |||
| @@ -136,29 +134,29 @@ static enum PixelFormat GetFfmpegChromaFormat(SchroChromaFormat schro_pix_fmt) | |||
| static av_cold int libschroedinger_decode_init(AVCodecContext *avccontext) | |||
| { | |||
| FfmpegSchroDecoderParams *p_schro_params = avccontext->priv_data ; | |||
| FfmpegSchroDecoderParams *p_schro_params = avccontext->priv_data; | |||
| /* First of all, initialize our supporting libraries. */ | |||
| schro_init(); | |||
| schro_debug_set_level(avccontext->debug); | |||
| p_schro_params->decoder = schro_decoder_new(); | |||
| p_schro_params->decoder = schro_decoder_new(); | |||
| schro_decoder_set_skip_ratio(p_schro_params->decoder, 1); | |||
| if (!p_schro_params->decoder) | |||
| return -1; | |||
| /* Initialize the decoded frame queue. */ | |||
| ff_dirac_schro_queue_init (&p_schro_params->dec_frame_queue); | |||
| return 0 ; | |||
| ff_dirac_schro_queue_init(&p_schro_params->dec_frame_queue); | |||
| return 0; | |||
| } | |||
| static void libschroedinger_decode_buffer_free (SchroBuffer *schro_buf, | |||
| void *priv) | |||
| static void libschroedinger_decode_buffer_free(SchroBuffer *schro_buf, | |||
| void *priv) | |||
| { | |||
| av_freep(&priv); | |||
| } | |||
| static void libschroedinger_decode_frame_free (void *frame) | |||
| static void libschroedinger_decode_frame_free(void *frame) | |||
| { | |||
| schro_frame_unref(frame); | |||
| } | |||
| @@ -168,11 +166,11 @@ static void libschroedinger_handle_first_access_unit(AVCodecContext *avccontext) | |||
| FfmpegSchroDecoderParams *p_schro_params = avccontext->priv_data; | |||
| SchroDecoder *decoder = p_schro_params->decoder; | |||
| p_schro_params->format = schro_decoder_get_video_format (decoder); | |||
| p_schro_params->format = schro_decoder_get_video_format(decoder); | |||
| /* Tell FFmpeg about sequence details. */ | |||
| if(avcodec_check_dimensions(avccontext, p_schro_params->format->width, | |||
| p_schro_params->format->height) < 0) { | |||
| if (avcodec_check_dimensions(avccontext, p_schro_params->format->width, | |||
| p_schro_params->format->height) < 0) { | |||
| av_log(avccontext, AV_LOG_ERROR, "invalid dimensions (%dx%d)\n", | |||
| p_schro_params->format->width, p_schro_params->format->height); | |||
| avccontext->height = avccontext->width = 0; | |||
| @@ -180,14 +178,13 @@ static void libschroedinger_handle_first_access_unit(AVCodecContext *avccontext) | |||
| } | |||
| avccontext->height = p_schro_params->format->height; | |||
| avccontext->width = p_schro_params->format->width; | |||
| avccontext->pix_fmt = | |||
| GetFfmpegChromaFormat(p_schro_params->format->chroma_format); | |||
| if (ff_get_schro_frame_format( p_schro_params->format->chroma_format, | |||
| &p_schro_params->frame_format) == -1) { | |||
| av_log (avccontext, AV_LOG_ERROR, | |||
| "This codec currently only supports planar YUV 4:2:0, 4:2:2 " | |||
| "and 4:4:4 formats.\n"); | |||
| avccontext->pix_fmt = GetFfmpegChromaFormat(p_schro_params->format->chroma_format); | |||
| if (ff_get_schro_frame_format(p_schro_params->format->chroma_format, | |||
| &p_schro_params->frame_format) == -1) { | |||
| av_log(avccontext, AV_LOG_ERROR, | |||
| "This codec currently only supports planar YUV 4:2:0, 4:2:2 " | |||
| "and 4:4:4 formats.\n"); | |||
| return; | |||
| } | |||
| @@ -221,7 +218,7 @@ static int libschroedinger_decode_frame(AVCodecContext *avccontext, | |||
| *data_size = 0; | |||
| FfmpegSchroParseContextInit (&parse_ctx, buf, buf_size); | |||
| FfmpegSchroParseContextInit(&parse_ctx, buf, buf_size); | |||
| if (!buf_size) { | |||
| if (!p_schro_params->eos_signalled) { | |||
| state = schro_decoder_push_end_of_stream(decoder); | |||
| @@ -236,77 +233,74 @@ static int libschroedinger_decode_frame(AVCodecContext *avccontext, | |||
| if (SCHRO_PARSE_CODE_IS_PICTURE(enc_buf->data[4]) && | |||
| SCHRO_PARSE_CODE_NUM_REFS(enc_buf->data[4]) > 0) | |||
| avccontext->has_b_frames = 1; | |||
| state = schro_decoder_push (decoder, enc_buf); | |||
| state = schro_decoder_push(decoder, enc_buf); | |||
| if (state == SCHRO_DECODER_FIRST_ACCESS_UNIT) | |||
| libschroedinger_handle_first_access_unit(avccontext); | |||
| libschroedinger_handle_first_access_unit(avccontext); | |||
| go = 1; | |||
| } | |||
| else | |||
| outer = 0; | |||
| format = p_schro_params->format; | |||
| while (go) { | |||
| /* Parse data and process result. */ | |||
| state = schro_decoder_wait (decoder); | |||
| switch (state) | |||
| { | |||
| case SCHRO_DECODER_FIRST_ACCESS_UNIT: | |||
| libschroedinger_handle_first_access_unit (avccontext); | |||
| break; | |||
| case SCHRO_DECODER_NEED_BITS: | |||
| /* Need more input data - stop iterating over what we have. */ | |||
| go = 0; | |||
| break; | |||
| case SCHRO_DECODER_NEED_FRAME: | |||
| /* Decoder needs a frame - create one and push it in. */ | |||
| frame = schro_frame_new_and_alloc(NULL, | |||
| p_schro_params->frame_format, | |||
| format->width, | |||
| format->height); | |||
| schro_decoder_add_output_picture (decoder, frame); | |||
| break; | |||
| case SCHRO_DECODER_OK: | |||
| /* Pull a frame out of the decoder. */ | |||
| frame = schro_decoder_pull (decoder); | |||
| if (frame) | |||
| ff_dirac_schro_queue_push_back( | |||
| &p_schro_params->dec_frame_queue, | |||
| frame); | |||
| break; | |||
| case SCHRO_DECODER_EOS: | |||
| go = 0; | |||
| p_schro_params->eos_pulled = 1; | |||
| schro_decoder_reset (decoder); | |||
| } else | |||
| outer = 0; | |||
| break; | |||
| case SCHRO_DECODER_ERROR: | |||
| return -1; | |||
| break; | |||
| format = p_schro_params->format; | |||
| while (go) { | |||
| /* Parse data and process result. */ | |||
| state = schro_decoder_wait(decoder); | |||
| switch (state) { | |||
| case SCHRO_DECODER_FIRST_ACCESS_UNIT: | |||
| libschroedinger_handle_first_access_unit(avccontext); | |||
| break; | |||
| case SCHRO_DECODER_NEED_BITS: | |||
| /* Need more input data - stop iterating over what we have. */ | |||
| go = 0; | |||
| break; | |||
| case SCHRO_DECODER_NEED_FRAME: | |||
| /* Decoder needs a frame - create one and push it in. */ | |||
| frame = schro_frame_new_and_alloc(NULL, | |||
| p_schro_params->frame_format, | |||
| format->width, | |||
| format->height); | |||
| schro_decoder_add_output_picture(decoder, frame); | |||
| break; | |||
| case SCHRO_DECODER_OK: | |||
| /* Pull a frame out of the decoder. */ | |||
| frame = schro_decoder_pull(decoder); | |||
| if (frame) | |||
| ff_dirac_schro_queue_push_back(&p_schro_params->dec_frame_queue, | |||
| frame); | |||
| break; | |||
| case SCHRO_DECODER_EOS: | |||
| go = 0; | |||
| p_schro_params->eos_pulled = 1; | |||
| schro_decoder_reset(decoder); | |||
| outer = 0; | |||
| break; | |||
| case SCHRO_DECODER_ERROR: | |||
| return -1; | |||
| break; | |||
| } | |||
| } | |||
| } | |||
| } while(outer); | |||
| } while (outer); | |||
| /* Grab next frame to be returned from the top of the queue. */ | |||
| frame = ff_dirac_schro_queue_pop(&p_schro_params->dec_frame_queue); | |||
| if (frame) { | |||
| memcpy (p_schro_params->dec_pic.data[0], | |||
| frame->components[0].data, | |||
| frame->components[0].length); | |||
| memcpy(p_schro_params->dec_pic.data[0], | |||
| frame->components[0].data, | |||
| frame->components[0].length); | |||
| memcpy (p_schro_params->dec_pic.data[1], | |||
| frame->components[1].data, | |||
| frame->components[1].length); | |||
| memcpy(p_schro_params->dec_pic.data[1], | |||
| frame->components[1].data, | |||
| frame->components[1].length); | |||
| memcpy (p_schro_params->dec_pic.data[2], | |||
| frame->components[2].data, | |||
| frame->components[2].length); | |||
| memcpy(p_schro_params->dec_pic.data[2], | |||
| frame->components[2].data, | |||
| frame->components[2].length); | |||
| /* Fill picture with current buffer data from Schroedinger. */ | |||
| avpicture_fill(picture, p_schro_params->dec_pic.data[0], | |||
| @@ -316,7 +310,7 @@ static int libschroedinger_decode_frame(AVCodecContext *avccontext, | |||
| *data_size = sizeof(AVPicture); | |||
| /* Now free the frame resources. */ | |||
| libschroedinger_decode_frame_free (frame); | |||
| libschroedinger_decode_frame_free(frame); | |||
| } | |||
| return buf_size; | |||
| } | |||
| @@ -326,36 +320,36 @@ static av_cold int libschroedinger_decode_close(AVCodecContext *avccontext) | |||
| { | |||
| FfmpegSchroDecoderParams *p_schro_params = avccontext->priv_data; | |||
| /* Free the decoder. */ | |||
| schro_decoder_free (p_schro_params->decoder); | |||
| schro_decoder_free(p_schro_params->decoder); | |||
| av_freep(&p_schro_params->format); | |||
| avpicture_free (&p_schro_params->dec_pic); | |||
| avpicture_free(&p_schro_params->dec_pic); | |||
| /* Free data in the output frame queue. */ | |||
| ff_dirac_schro_queue_free (&p_schro_params->dec_frame_queue, | |||
| libschroedinger_decode_frame_free); | |||
| ff_dirac_schro_queue_free(&p_schro_params->dec_frame_queue, | |||
| libschroedinger_decode_frame_free); | |||
| return 0 ; | |||
| return 0; | |||
| } | |||
| static void libschroedinger_flush (AVCodecContext *avccontext) | |||
| static void libschroedinger_flush(AVCodecContext *avccontext) | |||
| { | |||
| /* Got a seek request. Free the decoded frames queue and then reset | |||
| * the decoder */ | |||
| FfmpegSchroDecoderParams *p_schro_params = avccontext->priv_data; | |||
| /* Free data in the output frame queue. */ | |||
| ff_dirac_schro_queue_free (&p_schro_params->dec_frame_queue, | |||
| libschroedinger_decode_frame_free); | |||
| ff_dirac_schro_queue_free(&p_schro_params->dec_frame_queue, | |||
| libschroedinger_decode_frame_free); | |||
| ff_dirac_schro_queue_init (&p_schro_params->dec_frame_queue); | |||
| ff_dirac_schro_queue_init(&p_schro_params->dec_frame_queue); | |||
| schro_decoder_reset(p_schro_params->decoder); | |||
| p_schro_params->eos_pulled = 0; | |||
| p_schro_params->eos_signalled = 0; | |||
| } | |||
| AVCodec libschroedinger_decoder = { | |||
| "libschroedinger", | |||
| "libschroedinger", | |||
| CODEC_TYPE_VIDEO, | |||
| CODEC_ID_DIRAC, | |||
| sizeof(FfmpegSchroDecoderParams), | |||
| @@ -40,8 +40,7 @@ | |||
| /** libschroedinger encoder private data */ | |||
| typedef struct FfmpegSchroEncoderParams | |||
| { | |||
| typedef struct FfmpegSchroEncoderParams { | |||
| /** Schroedinger video format */ | |||
| SchroVideoFormat *format; | |||
| @@ -86,16 +85,16 @@ static int SetSchroChromaFormat(AVCodecContext *avccontext) | |||
| for (idx = 0; idx < num_formats; ++idx) { | |||
| if (ffmpeg_schro_pixel_format_map[idx].ff_pix_fmt == | |||
| avccontext->pix_fmt) { | |||
| avccontext->pix_fmt) { | |||
| p_schro_params->format->chroma_format = | |||
| ffmpeg_schro_pixel_format_map[idx].schro_pix_fmt; | |||
| return 0; | |||
| } | |||
| } | |||
| av_log (avccontext, AV_LOG_ERROR, | |||
| "This codec currently only supports planar YUV 4:2:0, 4:2:2" | |||
| " and 4:4:4 formats.\n"); | |||
| av_log(avccontext, AV_LOG_ERROR, | |||
| "This codec currently only supports planar YUV 4:2:0, 4:2:2" | |||
| " and 4:4:4 formats.\n"); | |||
| return -1; | |||
| } | |||
| @@ -121,8 +120,8 @@ static int libschroedinger_encode_init(AVCodecContext *avccontext) | |||
| preset = ff_get_schro_video_format_preset(avccontext); | |||
| p_schro_params->format = | |||
| schro_encoder_get_video_format(p_schro_params->encoder); | |||
| schro_video_format_set_std_video_format (p_schro_params->format, preset); | |||
| p_schro_params->format->width = avccontext->width; | |||
| schro_video_format_set_std_video_format(p_schro_params->format, preset); | |||
| p_schro_params->format->width = avccontext->width; | |||
| p_schro_params->format->height = avccontext->height; | |||
| if (SetSchroChromaFormat(avccontext) == -1) | |||
| @@ -130,9 +129,9 @@ static int libschroedinger_encode_init(AVCodecContext *avccontext) | |||
| if (ff_get_schro_frame_format(p_schro_params->format->chroma_format, | |||
| &p_schro_params->frame_format) == -1) { | |||
| av_log (avccontext, AV_LOG_ERROR, | |||
| "This codec currently supports only planar YUV 4:2:0, 4:2:2" | |||
| " and 4:4:4 formats.\n"); | |||
| av_log(avccontext, AV_LOG_ERROR, | |||
| "This codec currently supports only planar YUV 4:2:0, 4:2:2" | |||
| " and 4:4:4 formats.\n"); | |||
| return -1; | |||
| } | |||
| @@ -146,18 +145,17 @@ static int libschroedinger_encode_init(AVCodecContext *avccontext) | |||
| avccontext->coded_frame = &p_schro_params->picture; | |||
| if (!avccontext->gop_size) { | |||
| schro_encoder_setting_set_double (p_schro_params->encoder, | |||
| "gop_structure", | |||
| SCHRO_ENCODER_GOP_INTRA_ONLY); | |||
| schro_encoder_setting_set_double(p_schro_params->encoder, | |||
| "gop_structure", | |||
| SCHRO_ENCODER_GOP_INTRA_ONLY); | |||
| if (avccontext->coder_type == FF_CODER_TYPE_VLC) | |||
| schro_encoder_setting_set_double (p_schro_params->encoder, | |||
| "enable_noarith", 1); | |||
| } | |||
| else { | |||
| schro_encoder_setting_set_double (p_schro_params->encoder, | |||
| "gop_structure", | |||
| SCHRO_ENCODER_GOP_BIREF); | |||
| schro_encoder_setting_set_double(p_schro_params->encoder, | |||
| "enable_noarith", 1); | |||
| } else { | |||
| schro_encoder_setting_set_double(p_schro_params->encoder, | |||
| "gop_structure", | |||
| SCHRO_ENCODER_GOP_BIREF); | |||
| avccontext->has_b_frames = 1; | |||
| } | |||
| @@ -165,39 +163,38 @@ static int libschroedinger_encode_init(AVCodecContext *avccontext) | |||
| if (avccontext->flags & CODEC_FLAG_QSCALE) { | |||
| if (!avccontext->global_quality) { | |||
| /* lossless coding */ | |||
| schro_encoder_setting_set_double (p_schro_params->encoder, | |||
| "rate_control", | |||
| SCHRO_ENCODER_RATE_CONTROL_LOSSLESS); | |||
| schro_encoder_setting_set_double(p_schro_params->encoder, | |||
| "rate_control", | |||
| SCHRO_ENCODER_RATE_CONTROL_LOSSLESS); | |||
| } else { | |||
| int noise_threshold; | |||
| schro_encoder_setting_set_double (p_schro_params->encoder, | |||
| "rate_control", | |||
| SCHRO_ENCODER_RATE_CONTROL_CONSTANT_NOISE_THRESHOLD); | |||
| schro_encoder_setting_set_double(p_schro_params->encoder, | |||
| "rate_control", | |||
| SCHRO_ENCODER_RATE_CONTROL_CONSTANT_NOISE_THRESHOLD); | |||
| noise_threshold = avccontext->global_quality/FF_QP2LAMBDA; | |||
| noise_threshold = avccontext->global_quality / FF_QP2LAMBDA; | |||
| if (noise_threshold > 100) | |||
| noise_threshold = 100; | |||
| schro_encoder_setting_set_double (p_schro_params->encoder, | |||
| "noise_threshold", | |||
| noise_threshold); | |||
| schro_encoder_setting_set_double(p_schro_params->encoder, | |||
| "noise_threshold", | |||
| noise_threshold); | |||
| } | |||
| } | |||
| else { | |||
| schro_encoder_setting_set_double ( p_schro_params->encoder, | |||
| "rate_control", | |||
| SCHRO_ENCODER_RATE_CONTROL_CONSTANT_BITRATE); | |||
| } else { | |||
| schro_encoder_setting_set_double(p_schro_params->encoder, | |||
| "rate_control", | |||
| SCHRO_ENCODER_RATE_CONTROL_CONSTANT_BITRATE); | |||
| schro_encoder_setting_set_double (p_schro_params->encoder, | |||
| "bitrate", | |||
| avccontext->bit_rate); | |||
| schro_encoder_setting_set_double(p_schro_params->encoder, | |||
| "bitrate", | |||
| avccontext->bit_rate); | |||
| } | |||
| if (avccontext->flags & CODEC_FLAG_INTERLACED_ME) | |||
| /* All material can be coded as interlaced or progressive | |||
| irrespective of the type of source material. */ | |||
| schro_encoder_setting_set_double (p_schro_params->encoder, | |||
| "interlaced_coding", 1); | |||
| schro_encoder_setting_set_double(p_schro_params->encoder, | |||
| "interlaced_coding", 1); | |||
| /* FIXME: Signal range hardcoded to 8-bit data until both libschroedinger | |||
| * and libdirac support other bit-depth data. */ | |||
| @@ -209,32 +206,32 @@ static int libschroedinger_encode_init(AVCodecContext *avccontext) | |||
| p_schro_params->format); | |||
| /* Set the debug level. */ | |||
| schro_debug_set_level (avccontext->debug); | |||
| schro_debug_set_level(avccontext->debug); | |||
| schro_encoder_start (p_schro_params->encoder); | |||
| schro_encoder_start(p_schro_params->encoder); | |||
| /* Initialize the encoded frame queue. */ | |||
| ff_dirac_schro_queue_init (&p_schro_params->enc_frame_queue); | |||
| return 0 ; | |||
| ff_dirac_schro_queue_init(&p_schro_params->enc_frame_queue); | |||
| return 0; | |||
| } | |||
| static SchroFrame *libschroedinger_frame_from_data (AVCodecContext *avccontext, | |||
| void *in_data) | |||
| static SchroFrame *libschroedinger_frame_from_data(AVCodecContext *avccontext, | |||
| void *in_data) | |||
| { | |||
| FfmpegSchroEncoderParams* p_schro_params = avccontext->priv_data; | |||
| SchroFrame *in_frame; | |||
| /* Input line size may differ from what the codec supports. Especially | |||
| * when transcoding from one format to another. So use avpicture_layout | |||
| * to copy the frame. */ | |||
| in_frame = schro_frame_new_and_alloc (NULL, | |||
| p_schro_params->frame_format, | |||
| p_schro_params->format->width, | |||
| p_schro_params->format->height); | |||
| in_frame = schro_frame_new_and_alloc(NULL, | |||
| p_schro_params->frame_format, | |||
| p_schro_params->format->width, | |||
| p_schro_params->format->height); | |||
| avpicture_layout ((AVPicture *)in_data, avccontext->pix_fmt, | |||
| avccontext->width, avccontext->height, | |||
| in_frame->components[0].data, | |||
| p_schro_params->frame_size); | |||
| avpicture_layout((AVPicture *)in_data, avccontext->pix_fmt, | |||
| avccontext->width, avccontext->height, | |||
| in_frame->components[0].data, | |||
| p_schro_params->frame_size); | |||
| return in_frame; | |||
| } | |||
| @@ -243,7 +240,7 @@ static void SchroedingerFreeFrame(void *data) | |||
| { | |||
| FfmpegDiracSchroEncodedFrame *enc_frame = data; | |||
| av_freep (&(enc_frame->p_encbuf)); | |||
| av_freep(&(enc_frame->p_encbuf)); | |||
| av_free(enc_frame); | |||
| } | |||
| @@ -269,8 +266,8 @@ static int libschroedinger_encode_frame(AVCodecContext *avccontext, | |||
| } | |||
| } else { | |||
| /* Allocate frame data to schro input buffer. */ | |||
| SchroFrame *in_frame = libschroedinger_frame_from_data (avccontext, | |||
| data); | |||
| SchroFrame *in_frame = libschroedinger_frame_from_data(avccontext, | |||
| data); | |||
| /* Load next frame. */ | |||
| schro_encoder_push_frame(encoder, in_frame); | |||
| } | |||
| @@ -280,28 +277,24 @@ static int libschroedinger_encode_frame(AVCodecContext *avccontext, | |||
| /* Now check to see if we have any output from the encoder. */ | |||
| while (go) { | |||
| SchroStateEnum state; | |||
| SchroStateEnum state; | |||
| state = schro_encoder_wait(encoder); | |||
| switch (state) | |||
| { | |||
| switch (state) { | |||
| case SCHRO_STATE_HAVE_BUFFER: | |||
| case SCHRO_STATE_END_OF_STREAM: | |||
| enc_buf = schro_encoder_pull (encoder, | |||
| &presentation_frame); | |||
| assert (enc_buf->length > 0); | |||
| assert (enc_buf->length <= buf_size); | |||
| enc_buf = schro_encoder_pull(encoder, &presentation_frame); | |||
| assert(enc_buf->length > 0); | |||
| assert(enc_buf->length <= buf_size); | |||
| parse_code = enc_buf->data[4]; | |||
| /* All non-frame data is prepended to actual frame data to | |||
| * be able to set the pts correctly. So we don't write data | |||
| * to the frame output queue until we actually have a frame | |||
| */ | |||
| p_schro_params->enc_buf = av_realloc ( | |||
| p_schro_params->enc_buf, | |||
| p_schro_params->enc_buf_size + enc_buf->length | |||
| ); | |||
| p_schro_params->enc_buf = av_realloc(p_schro_params->enc_buf, | |||
| p_schro_params->enc_buf_size + enc_buf->length); | |||
| memcpy(p_schro_params->enc_buf+p_schro_params->enc_buf_size, | |||
| memcpy(p_schro_params->enc_buf + p_schro_params->enc_buf_size, | |||
| enc_buf->data, enc_buf->length); | |||
| p_schro_params->enc_buf_size += enc_buf->length; | |||
| @@ -312,7 +305,7 @@ static int libschroedinger_encode_frame(AVCodecContext *avccontext, | |||
| } | |||
| if (!SCHRO_PARSE_CODE_IS_PICTURE(parse_code)) { | |||
| schro_buffer_unref (enc_buf); | |||
| schro_buffer_unref(enc_buf); | |||
| break; | |||
| } | |||
| @@ -332,12 +325,12 @@ static int libschroedinger_encode_frame(AVCodecContext *avccontext, | |||
| (enc_buf->data[15] << 8) + | |||
| enc_buf->data[16]; | |||
| ff_dirac_schro_queue_push_back (&p_schro_params->enc_frame_queue, | |||
| p_frame_output); | |||
| ff_dirac_schro_queue_push_back(&p_schro_params->enc_frame_queue, | |||
| p_frame_output); | |||
| p_schro_params->enc_buf_size = 0; | |||
| p_schro_params->enc_buf = NULL; | |||
| schro_buffer_unref (enc_buf); | |||
| schro_buffer_unref(enc_buf); | |||
| break; | |||
| @@ -360,8 +353,7 @@ static int libschroedinger_encode_frame(AVCodecContext *avccontext, | |||
| p_schro_params->eos_pulled) | |||
| last_frame_in_sequence = 1; | |||
| p_frame_output = | |||
| ff_dirac_schro_queue_pop (&p_schro_params->enc_frame_queue); | |||
| p_frame_output = ff_dirac_schro_queue_pop(&p_schro_params->enc_frame_queue); | |||
| if (!p_frame_output) | |||
| return 0; | |||
| @@ -376,17 +368,16 @@ static int libschroedinger_encode_frame(AVCodecContext *avccontext, | |||
| /* Append the end of sequence information to the last frame in the | |||
| * sequence. */ | |||
| if (last_frame_in_sequence && p_schro_params->enc_buf_size > 0) | |||
| { | |||
| memcpy (frame + enc_size, p_schro_params->enc_buf, | |||
| p_schro_params->enc_buf_size); | |||
| if (last_frame_in_sequence && p_schro_params->enc_buf_size > 0) { | |||
| memcpy(frame + enc_size, p_schro_params->enc_buf, | |||
| p_schro_params->enc_buf_size); | |||
| enc_size += p_schro_params->enc_buf_size; | |||
| av_freep (&p_schro_params->enc_buf); | |||
| av_freep(&p_schro_params->enc_buf); | |||
| p_schro_params->enc_buf_size = 0; | |||
| } | |||
| /* free frame */ | |||
| SchroedingerFreeFrame (p_frame_output); | |||
| SchroedingerFreeFrame(p_frame_output); | |||
| return enc_size; | |||
| } | |||
| @@ -397,12 +388,12 @@ static int libschroedinger_encode_close(AVCodecContext *avccontext) | |||
| FfmpegSchroEncoderParams* p_schro_params = avccontext->priv_data; | |||
| /* Close the encoder. */ | |||
| /* Close the encoder. */ | |||
| schro_encoder_free(p_schro_params->encoder); | |||
| /* Free data in the output frame queue. */ | |||
| ff_dirac_schro_queue_free (&p_schro_params->enc_frame_queue, | |||
| SchroedingerFreeFrame); | |||
| ff_dirac_schro_queue_free(&p_schro_params->enc_frame_queue, | |||
| SchroedingerFreeFrame); | |||
| /* Free the encoder buffer. */ | |||
| @@ -412,7 +403,7 @@ static int libschroedinger_encode_close(AVCodecContext *avccontext) | |||
| /* Free the video format structure. */ | |||
| av_freep(&p_schro_params->format); | |||
| return 0 ; | |||
| return 0; | |||
| } | |||
| @@ -424,7 +415,7 @@ AVCodec libschroedinger_encoder = { | |||
| libschroedinger_encode_init, | |||
| libschroedinger_encode_frame, | |||
| libschroedinger_encode_close, | |||
| .capabilities= CODEC_CAP_DELAY, | |||
| .pix_fmts= (enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_YUV422P, PIX_FMT_YUV444P, PIX_FMT_NONE}, | |||
| .long_name= NULL_IF_CONFIG_SMALL("libschroedinger Dirac 2.2"), | |||
| .capabilities = CODEC_CAP_DELAY, | |||
| .pix_fmts = (enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_YUV422P, PIX_FMT_YUV444P, PIX_FMT_NONE}, | |||
| .long_name = NULL_IF_CONFIG_SMALL("libschroedinger Dirac 2.2"), | |||
| }; | |||