Signed-off-by: Luca Barbato <lu_zero@gentoo.org>tags/n0.8
| @@ -50,6 +50,8 @@ typedef struct EightBpsContext { | |||||
| unsigned char planes; | unsigned char planes; | ||||
| unsigned char planemap[4]; | unsigned char planemap[4]; | ||||
| uint32_t pal[256]; | |||||
| } EightBpsContext; | } EightBpsContext; | ||||
| @@ -129,13 +131,16 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPac | |||||
| } | } | ||||
| } | } | ||||
| if (avctx->palctrl) { | |||||
| memcpy (c->pic.data[1], avctx->palctrl->palette, AVPALETTE_SIZE); | |||||
| if (avctx->palctrl->palette_changed) { | |||||
| if (avctx->bits_per_coded_sample <= 8) { | |||||
| const uint8_t *pal = av_packet_get_side_data(avpkt, | |||||
| AV_PKT_DATA_PALETTE, | |||||
| NULL); | |||||
| if (pal) { | |||||
| c->pic.palette_has_changed = 1; | c->pic.palette_has_changed = 1; | ||||
| avctx->palctrl->palette_changed = 0; | |||||
| } else | |||||
| c->pic.palette_has_changed = 0; | |||||
| memcpy(c->pal, pal, AVPALETTE_SIZE); | |||||
| } | |||||
| memcpy (c->pic.data[1], c->pal, AVPALETTE_SIZE); | |||||
| } | } | ||||
| *data_size = sizeof(AVFrame); | *data_size = sizeof(AVFrame); | ||||
| @@ -164,10 +169,6 @@ static av_cold int decode_init(AVCodecContext *avctx) | |||||
| avctx->pix_fmt = PIX_FMT_PAL8; | avctx->pix_fmt = PIX_FMT_PAL8; | ||||
| c->planes = 1; | c->planes = 1; | ||||
| c->planemap[0] = 0; // 1st plane is palette indexes | c->planemap[0] = 0; // 1st plane is palette indexes | ||||
| if (avctx->palctrl == NULL) { | |||||
| av_log(avctx, AV_LOG_ERROR, "Error: PAL8 format but no palette from demuxer.\n"); | |||||
| return -1; | |||||
| } | |||||
| break; | break; | ||||
| case 24: | case 24: | ||||
| avctx->pix_fmt = avctx->get_format(avctx, pixfmt_rgb24); | avctx->pix_fmt = avctx->get_format(avctx, pixfmt_rgb24); | ||||
| @@ -67,6 +67,7 @@ typedef struct CinepakContext { | |||||
| int sega_film_skip_bytes; | int sega_film_skip_bytes; | ||||
| uint32_t pal[256]; | |||||
| } CinepakContext; | } CinepakContext; | ||||
| static void cinepak_decode_codebook (cvid_codebook *codebook, | static void cinepak_decode_codebook (cvid_codebook *codebook, | ||||
| @@ -395,7 +396,7 @@ static av_cold int cinepak_decode_init(AVCodecContext *avctx) | |||||
| s->sega_film_skip_bytes = -1; /* uninitialized state */ | s->sega_film_skip_bytes = -1; /* uninitialized state */ | ||||
| // check for paletted data | // check for paletted data | ||||
| if ((avctx->palctrl == NULL) || (avctx->bits_per_coded_sample == 40)) { | |||||
| if (avctx->bits_per_coded_sample != 8) { | |||||
| s->palette_video = 0; | s->palette_video = 0; | ||||
| avctx->pix_fmt = PIX_FMT_YUV420P; | avctx->pix_fmt = PIX_FMT_YUV420P; | ||||
| } else { | } else { | ||||
| @@ -427,17 +428,19 @@ static int cinepak_decode_frame(AVCodecContext *avctx, | |||||
| return -1; | return -1; | ||||
| } | } | ||||
| cinepak_decode(s); | |||||
| if (s->palette_video) { | if (s->palette_video) { | ||||
| memcpy (s->frame.data[1], avctx->palctrl->palette, AVPALETTE_SIZE); | |||||
| if (avctx->palctrl->palette_changed) { | |||||
| const uint8_t *pal = av_packet_get_side_data(avpkt, AV_PKT_DATA_PALETTE, NULL); | |||||
| if (pal) { | |||||
| s->frame.palette_has_changed = 1; | s->frame.palette_has_changed = 1; | ||||
| avctx->palctrl->palette_changed = 0; | |||||
| } else | |||||
| s->frame.palette_has_changed = 0; | |||||
| memcpy(s->pal, pal, AVPALETTE_SIZE); | |||||
| } | |||||
| } | } | ||||
| cinepak_decode(s); | |||||
| if (s->palette_video) | |||||
| memcpy (s->frame.data[1], s->pal, AVPALETTE_SIZE); | |||||
| *data_size = sizeof(AVFrame); | *data_size = sizeof(AVFrame); | ||||
| *(AVFrame*)data = s->frame; | *(AVFrame*)data = s->frame; | ||||
| @@ -72,6 +72,7 @@ typedef struct IdcinContext { | |||||
| hnode huff_nodes[256][HUF_TOKENS*2]; | hnode huff_nodes[256][HUF_TOKENS*2]; | ||||
| int num_huff_nodes[256]; | int num_huff_nodes[256]; | ||||
| uint32_t pal[256]; | |||||
| } IdcinContext; | } IdcinContext; | ||||
| /* | /* | ||||
| @@ -213,7 +214,7 @@ static int idcin_decode_frame(AVCodecContext *avctx, | |||||
| const uint8_t *buf = avpkt->data; | const uint8_t *buf = avpkt->data; | ||||
| int buf_size = avpkt->size; | int buf_size = avpkt->size; | ||||
| IdcinContext *s = avctx->priv_data; | IdcinContext *s = avctx->priv_data; | ||||
| AVPaletteControl *palette_control = avctx->palctrl; | |||||
| const uint8_t *pal = av_packet_get_side_data(avpkt, AV_PKT_DATA_PALETTE, NULL); | |||||
| s->buf = buf; | s->buf = buf; | ||||
| s->size = buf_size; | s->size = buf_size; | ||||
| @@ -228,13 +229,12 @@ static int idcin_decode_frame(AVCodecContext *avctx, | |||||
| idcin_decode_vlcs(s); | idcin_decode_vlcs(s); | ||||
| /* make the palette available on the way out */ | |||||
| memcpy(s->frame.data[1], palette_control->palette, PALETTE_COUNT * 4); | |||||
| /* If palette changed inform application*/ | |||||
| if (palette_control->palette_changed) { | |||||
| palette_control->palette_changed = 0; | |||||
| if (pal) { | |||||
| s->frame.palette_has_changed = 1; | s->frame.palette_has_changed = 1; | ||||
| memcpy(s->pal, pal, AVPALETTE_SIZE); | |||||
| } | } | ||||
| /* make the palette available on the way out */ | |||||
| memcpy(s->frame.data[1], s->pal, AVPALETTE_SIZE); | |||||
| *data_size = sizeof(AVFrame); | *data_size = sizeof(AVFrame); | ||||
| *(AVFrame*)data = s->frame; | *(AVFrame*)data = s->frame; | ||||
| @@ -77,6 +77,7 @@ typedef struct IpvideoContext { | |||||
| int stride; | int stride; | ||||
| int upper_motion_limit_offset; | int upper_motion_limit_offset; | ||||
| uint32_t pal[256]; | |||||
| } IpvideoContext; | } IpvideoContext; | ||||
| #define CHECK_STREAM_PTR(stream_ptr, stream_end, n) \ | #define CHECK_STREAM_PTR(stream_ptr, stream_end, n) \ | ||||
| @@ -969,7 +970,7 @@ static void ipvideo_decode_opcodes(IpvideoContext *s) | |||||
| if (!s->is_16bpp) { | if (!s->is_16bpp) { | ||||
| /* this is PAL8, so make the palette available */ | /* this is PAL8, so make the palette available */ | ||||
| memcpy(s->current_frame.data[1], s->avctx->palctrl->palette, PALETTE_COUNT * 4); | |||||
| memcpy(s->current_frame.data[1], s->pal, AVPALETTE_SIZE); | |||||
| s->stride = s->current_frame.linesize[0]; | s->stride = s->current_frame.linesize[0]; | ||||
| s->stream_ptr = s->buf + 14; /* data starts 14 bytes in */ | s->stream_ptr = s->buf + 14; /* data starts 14 bytes in */ | ||||
| @@ -1023,10 +1024,6 @@ static av_cold int ipvideo_decode_init(AVCodecContext *avctx) | |||||
| s->is_16bpp = avctx->bits_per_coded_sample == 16; | s->is_16bpp = avctx->bits_per_coded_sample == 16; | ||||
| avctx->pix_fmt = s->is_16bpp ? PIX_FMT_RGB555 : PIX_FMT_PAL8; | avctx->pix_fmt = s->is_16bpp ? PIX_FMT_RGB555 : PIX_FMT_PAL8; | ||||
| if (!s->is_16bpp && s->avctx->palctrl == NULL) { | |||||
| av_log(avctx, AV_LOG_ERROR, " Interplay video: palette expected.\n"); | |||||
| return -1; | |||||
| } | |||||
| dsputil_init(&s->dsp, avctx); | dsputil_init(&s->dsp, avctx); | ||||
| @@ -1046,7 +1043,6 @@ static int ipvideo_decode_frame(AVCodecContext *avctx, | |||||
| const uint8_t *buf = avpkt->data; | const uint8_t *buf = avpkt->data; | ||||
| int buf_size = avpkt->size; | int buf_size = avpkt->size; | ||||
| IpvideoContext *s = avctx->priv_data; | IpvideoContext *s = avctx->priv_data; | ||||
| AVPaletteControl *palette_control = avctx->palctrl; | |||||
| /* compressed buffer needs to be large enough to at least hold an entire | /* compressed buffer needs to be large enough to at least hold an entire | ||||
| * decoding map */ | * decoding map */ | ||||
| @@ -1063,13 +1059,16 @@ static int ipvideo_decode_frame(AVCodecContext *avctx, | |||||
| return -1; | return -1; | ||||
| } | } | ||||
| ipvideo_decode_opcodes(s); | |||||
| if (!s->is_16bpp && palette_control->palette_changed) { | |||||
| palette_control->palette_changed = 0; | |||||
| s->current_frame.palette_has_changed = 1; | |||||
| if (!s->is_16bpp) { | |||||
| const uint8_t *pal = av_packet_get_side_data(avpkt, AV_PKT_DATA_PALETTE, NULL); | |||||
| if (pal) { | |||||
| s->current_frame.palette_has_changed = 1; | |||||
| memcpy(s->pal, pal, AVPALETTE_SIZE); | |||||
| } | |||||
| } | } | ||||
| ipvideo_decode_opcodes(s); | |||||
| *data_size = sizeof(AVFrame); | *data_size = sizeof(AVFrame); | ||||
| *(AVFrame*)data = s->current_frame; | *(AVFrame*)data = s->current_frame; | ||||
| @@ -233,6 +233,7 @@ static int decode_frame(AVCodecContext * avctx, void *data, int *data_size, AVPa | |||||
| int i; | int i; | ||||
| int header; | int header; | ||||
| int blocksize; | int blocksize; | ||||
| const uint8_t *pal = av_packet_get_side_data(avpkt, AV_PKT_DATA_PALETTE, NULL); | |||||
| if (ctx->pic.data[0]) | if (ctx->pic.data[0]) | ||||
| avctx->release_buffer(avctx, &ctx->pic); | avctx->release_buffer(avctx, &ctx->pic); | ||||
| @@ -264,13 +265,6 @@ static int decode_frame(AVCodecContext * avctx, void *data, int *data_size, AVPa | |||||
| ctx->pic.pict_type = FF_P_TYPE; | ctx->pic.pict_type = FF_P_TYPE; | ||||
| } | } | ||||
| /* if palette has been changed, copy it from palctrl */ | |||||
| if (ctx->avctx->palctrl && ctx->avctx->palctrl->palette_changed) { | |||||
| memcpy(ctx->pal, ctx->avctx->palctrl->palette, AVPALETTE_SIZE); | |||||
| ctx->setpal = 1; | |||||
| ctx->avctx->palctrl->palette_changed = 0; | |||||
| } | |||||
| if (header & KMVC_PALETTE) { | if (header & KMVC_PALETTE) { | ||||
| ctx->pic.palette_has_changed = 1; | ctx->pic.palette_has_changed = 1; | ||||
| // palette starts from index 1 and has 127 entries | // palette starts from index 1 and has 127 entries | ||||
| @@ -279,6 +273,11 @@ static int decode_frame(AVCodecContext * avctx, void *data, int *data_size, AVPa | |||||
| } | } | ||||
| } | } | ||||
| if (pal) { | |||||
| ctx->pic.palette_has_changed = 1; | |||||
| memcpy(ctx->pal, pal, AVPALETTE_SIZE); | |||||
| } | |||||
| if (ctx->setpal) { | if (ctx->setpal) { | ||||
| ctx->setpal = 0; | ctx->setpal = 0; | ||||
| ctx->pic.palette_has_changed = 1; | ctx->pic.palette_has_changed = 1; | ||||
| @@ -374,9 +373,6 @@ static av_cold int decode_init(AVCodecContext * avctx) | |||||
| src += 4; | src += 4; | ||||
| } | } | ||||
| c->setpal = 1; | c->setpal = 1; | ||||
| if (c->avctx->palctrl) { | |||||
| c->avctx->palctrl->palette_changed = 0; | |||||
| } | |||||
| } | } | ||||
| avctx->pix_fmt = PIX_FMT_PAL8; | avctx->pix_fmt = PIX_FMT_PAL8; | ||||
| @@ -26,9 +26,6 @@ | |||||
| * http://www.pcisys.net/~melanson/codecs/ | * http://www.pcisys.net/~melanson/codecs/ | ||||
| * | * | ||||
| * The MS RLE decoder outputs PAL8 colorspace data. | * The MS RLE decoder outputs PAL8 colorspace data. | ||||
| * | |||||
| * Note that this decoder expects the palette colors from the end of the | |||||
| * BITMAPINFO header passed through palctrl. | |||||
| */ | */ | ||||
| #include <stdio.h> | #include <stdio.h> | ||||
| @@ -46,6 +43,7 @@ typedef struct MsrleContext { | |||||
| const unsigned char *buf; | const unsigned char *buf; | ||||
| int size; | int size; | ||||
| uint32_t pal[256]; | |||||
| } MsrleContext; | } MsrleContext; | ||||
| static av_cold int msrle_decode_init(AVCodecContext *avctx) | static av_cold int msrle_decode_init(AVCodecContext *avctx) | ||||
| @@ -91,13 +89,16 @@ static int msrle_decode_frame(AVCodecContext *avctx, | |||||
| return -1; | return -1; | ||||
| } | } | ||||
| if (s->avctx->palctrl) { | |||||
| /* make the palette available */ | |||||
| memcpy(s->frame.data[1], s->avctx->palctrl->palette, AVPALETTE_SIZE); | |||||
| if (s->avctx->palctrl->palette_changed) { | |||||
| if (avctx->bits_per_coded_sample <= 8) { | |||||
| const uint8_t *pal = av_packet_get_side_data(avpkt, AV_PKT_DATA_PALETTE, NULL); | |||||
| if (pal) { | |||||
| s->frame.palette_has_changed = 1; | s->frame.palette_has_changed = 1; | ||||
| s->avctx->palctrl->palette_changed = 0; | |||||
| memcpy(s->pal, pal, AVPALETTE_SIZE); | |||||
| } | } | ||||
| /* make the palette available */ | |||||
| memcpy(s->frame.data[1], s->pal, AVPALETTE_SIZE); | |||||
| } | } | ||||
| /* FIXME how to correctly detect RLE ??? */ | /* FIXME how to correctly detect RLE ??? */ | ||||
| @@ -25,9 +25,6 @@ | |||||
| * For more information about the MS Video-1 format, visit: | * For more information about the MS Video-1 format, visit: | ||||
| * http://www.pcisys.net/~melanson/codecs/ | * http://www.pcisys.net/~melanson/codecs/ | ||||
| * | * | ||||
| * This decoder outputs either PAL8 or RGB555 data, depending on the | |||||
| * whether a RGB palette was passed through palctrl; | |||||
| * if it's present, then the data is PAL8; RGB555 otherwise. | |||||
| */ | */ | ||||
| #include <stdio.h> | #include <stdio.h> | ||||
| @@ -55,6 +52,7 @@ typedef struct Msvideo1Context { | |||||
| int mode_8bit; /* if it's not 8-bit, it's 16-bit */ | int mode_8bit; /* if it's not 8-bit, it's 16-bit */ | ||||
| uint32_t pal[256]; | |||||
| } Msvideo1Context; | } Msvideo1Context; | ||||
| static av_cold int msvideo1_decode_init(AVCodecContext *avctx) | static av_cold int msvideo1_decode_init(AVCodecContext *avctx) | ||||
| @@ -64,7 +62,7 @@ static av_cold int msvideo1_decode_init(AVCodecContext *avctx) | |||||
| s->avctx = avctx; | s->avctx = avctx; | ||||
| /* figure out the colorspace based on the presence of a palette */ | /* figure out the colorspace based on the presence of a palette */ | ||||
| if (s->avctx->palctrl) { | |||||
| if (s->avctx->bits_per_coded_sample == 8) { | |||||
| s->mode_8bit = 1; | s->mode_8bit = 1; | ||||
| avctx->pix_fmt = PIX_FMT_PAL8; | avctx->pix_fmt = PIX_FMT_PAL8; | ||||
| } else { | } else { | ||||
| @@ -173,13 +171,8 @@ static void msvideo1_decode_8bit(Msvideo1Context *s) | |||||
| } | } | ||||
| /* make the palette available on the way out */ | /* make the palette available on the way out */ | ||||
| if (s->avctx->pix_fmt == PIX_FMT_PAL8) { | |||||
| memcpy(s->frame.data[1], s->avctx->palctrl->palette, AVPALETTE_SIZE); | |||||
| if (s->avctx->palctrl->palette_changed) { | |||||
| s->frame.palette_has_changed = 1; | |||||
| s->avctx->palctrl->palette_changed = 0; | |||||
| } | |||||
| } | |||||
| if (s->avctx->pix_fmt == PIX_FMT_PAL8) | |||||
| memcpy(s->frame.data[1], s->pal, AVPALETTE_SIZE); | |||||
| } | } | ||||
| static void msvideo1_decode_16bit(Msvideo1Context *s) | static void msvideo1_decode_16bit(Msvideo1Context *s) | ||||
| @@ -309,6 +302,15 @@ static int msvideo1_decode_frame(AVCodecContext *avctx, | |||||
| return -1; | return -1; | ||||
| } | } | ||||
| if (s->mode_8bit) { | |||||
| const uint8_t *pal = av_packet_get_side_data(avpkt, AV_PKT_DATA_PALETTE, NULL); | |||||
| if (pal) { | |||||
| memcpy(s->pal, pal, AVPALETTE_SIZE); | |||||
| s->frame.palette_has_changed = 1; | |||||
| } | |||||
| } | |||||
| if (s->mode_8bit) | if (s->mode_8bit) | ||||
| msvideo1_decode_8bit(s); | msvideo1_decode_8bit(s); | ||||
| else | else | ||||
| @@ -30,6 +30,7 @@ typedef struct QpegContext{ | |||||
| AVCodecContext *avctx; | AVCodecContext *avctx; | ||||
| AVFrame pic; | AVFrame pic; | ||||
| uint8_t *refdata; | uint8_t *refdata; | ||||
| uint32_t pal[256]; | |||||
| } QpegContext; | } QpegContext; | ||||
| static void qpeg_decode_intra(const uint8_t *src, uint8_t *dst, int size, | static void qpeg_decode_intra(const uint8_t *src, uint8_t *dst, int size, | ||||
| @@ -256,6 +257,7 @@ static int decode_frame(AVCodecContext *avctx, | |||||
| AVFrame * const p= (AVFrame*)&a->pic; | AVFrame * const p= (AVFrame*)&a->pic; | ||||
| uint8_t* outdata; | uint8_t* outdata; | ||||
| int delta; | int delta; | ||||
| const uint8_t *pal = av_packet_get_side_data(avpkt, AV_PKT_DATA_PALETTE, NULL); | |||||
| if(p->data[0]) | if(p->data[0]) | ||||
| avctx->release_buffer(avctx, p); | avctx->release_buffer(avctx, p); | ||||
| @@ -274,11 +276,11 @@ static int decode_frame(AVCodecContext *avctx, | |||||
| } | } | ||||
| /* make the palette available on the way out */ | /* make the palette available on the way out */ | ||||
| memcpy(a->pic.data[1], a->avctx->palctrl->palette, AVPALETTE_SIZE); | |||||
| if (a->avctx->palctrl->palette_changed) { | |||||
| if (pal) { | |||||
| a->pic.palette_has_changed = 1; | a->pic.palette_has_changed = 1; | ||||
| a->avctx->palctrl->palette_changed = 0; | |||||
| memcpy(a->pal, pal, AVPALETTE_SIZE); | |||||
| } | } | ||||
| memcpy(a->pic.data[1], a->pal, AVPALETTE_SIZE); | |||||
| *data_size = sizeof(AVFrame); | *data_size = sizeof(AVFrame); | ||||
| *(AVFrame*)data = a->pic; | *(AVFrame*)data = a->pic; | ||||
| @@ -289,10 +291,6 @@ static int decode_frame(AVCodecContext *avctx, | |||||
| static av_cold int decode_init(AVCodecContext *avctx){ | static av_cold int decode_init(AVCodecContext *avctx){ | ||||
| QpegContext * const a = avctx->priv_data; | QpegContext * const a = avctx->priv_data; | ||||
| if (!avctx->palctrl) { | |||||
| av_log(avctx, AV_LOG_FATAL, "Missing required palette via palctrl\n"); | |||||
| return -1; | |||||
| } | |||||
| a->avctx = avctx; | a->avctx = avctx; | ||||
| avctx->pix_fmt= PIX_FMT_PAL8; | avctx->pix_fmt= PIX_FMT_PAL8; | ||||
| a->refdata = av_malloc(avctx->width * avctx->height); | a->refdata = av_malloc(avctx->width * avctx->height); | ||||
| @@ -46,6 +46,7 @@ typedef struct QtrleContext { | |||||
| const unsigned char *buf; | const unsigned char *buf; | ||||
| int size; | int size; | ||||
| uint32_t pal[256]; | |||||
| } QtrleContext; | } QtrleContext; | ||||
| #define CHECK_STREAM_PTR(n) \ | #define CHECK_STREAM_PTR(n) \ | ||||
| @@ -511,12 +512,15 @@ static int qtrle_decode_frame(AVCodecContext *avctx, | |||||
| } | } | ||||
| if(has_palette) { | if(has_palette) { | ||||
| /* make the palette available on the way out */ | |||||
| memcpy(s->frame.data[1], s->avctx->palctrl->palette, AVPALETTE_SIZE); | |||||
| if (s->avctx->palctrl->palette_changed) { | |||||
| const uint8_t *pal = av_packet_get_side_data(avpkt, AV_PKT_DATA_PALETTE, NULL); | |||||
| if (pal) { | |||||
| s->frame.palette_has_changed = 1; | s->frame.palette_has_changed = 1; | ||||
| s->avctx->palctrl->palette_changed = 0; | |||||
| memcpy(s->pal, pal, AVPALETTE_SIZE); | |||||
| } | } | ||||
| /* make the palette available on the way out */ | |||||
| memcpy(s->frame.data[1], s->pal, AVPALETTE_SIZE); | |||||
| } | } | ||||
| done: | done: | ||||
| @@ -158,9 +158,13 @@ static int raw_decode(AVCodecContext *avctx, | |||||
| (av_pix_fmt_descriptors[avctx->pix_fmt].flags & PIX_FMT_PAL))){ | (av_pix_fmt_descriptors[avctx->pix_fmt].flags & PIX_FMT_PAL))){ | ||||
| frame->data[1]= context->palette; | frame->data[1]= context->palette; | ||||
| } | } | ||||
| if (avctx->palctrl && avctx->palctrl->palette_changed) { | |||||
| memcpy(frame->data[1], avctx->palctrl->palette, AVPALETTE_SIZE); | |||||
| avctx->palctrl->palette_changed = 0; | |||||
| if (avctx->pix_fmt == PIX_FMT_PAL8) { | |||||
| const uint8_t *pal = av_packet_get_side_data(avpkt, AV_PKT_DATA_PALETTE, NULL); | |||||
| if (pal) { | |||||
| memcpy(frame->data[1], pal, AVPALETTE_SIZE); | |||||
| frame->palette_has_changed = 1; | |||||
| } | |||||
| } | } | ||||
| if(avctx->pix_fmt==PIX_FMT_BGR24 && ((frame->linesize[0]+3)&~3)*avctx->height <= buf_size) | if(avctx->pix_fmt==PIX_FMT_BGR24 && ((frame->linesize[0]+3)&~3)*avctx->height <= buf_size) | ||||
| frame->linesize[0] = (frame->linesize[0]+3)&~3; | frame->linesize[0] = (frame->linesize[0]+3)&~3; | ||||
| @@ -54,6 +54,7 @@ typedef struct SmcContext { | |||||
| unsigned char color_quads[COLORS_PER_TABLE * CQUAD]; | unsigned char color_quads[COLORS_PER_TABLE * CQUAD]; | ||||
| unsigned char color_octets[COLORS_PER_TABLE * COCTET]; | unsigned char color_octets[COLORS_PER_TABLE * COCTET]; | ||||
| uint32_t pal[256]; | |||||
| } SmcContext; | } SmcContext; | ||||
| #define GET_BLOCK_COUNT() \ | #define GET_BLOCK_COUNT() \ | ||||
| @@ -110,11 +111,7 @@ static void smc_decode_stream(SmcContext *s) | |||||
| int color_octet_index = 0; | int color_octet_index = 0; | ||||
| /* make the palette available */ | /* make the palette available */ | ||||
| memcpy(s->frame.data[1], s->avctx->palctrl->palette, AVPALETTE_SIZE); | |||||
| if (s->avctx->palctrl->palette_changed) { | |||||
| s->frame.palette_has_changed = 1; | |||||
| s->avctx->palctrl->palette_changed = 0; | |||||
| } | |||||
| memcpy(s->frame.data[1], s->pal, AVPALETTE_SIZE); | |||||
| chunk_size = AV_RB32(&s->buf[stream_ptr]) & 0x00FFFFFF; | chunk_size = AV_RB32(&s->buf[stream_ptr]) & 0x00FFFFFF; | ||||
| stream_ptr += 4; | stream_ptr += 4; | ||||
| @@ -440,6 +437,7 @@ static int smc_decode_frame(AVCodecContext *avctx, | |||||
| const uint8_t *buf = avpkt->data; | const uint8_t *buf = avpkt->data; | ||||
| int buf_size = avpkt->size; | int buf_size = avpkt->size; | ||||
| SmcContext *s = avctx->priv_data; | SmcContext *s = avctx->priv_data; | ||||
| const uint8_t *pal = av_packet_get_side_data(avpkt, AV_PKT_DATA_PALETTE, NULL); | |||||
| s->buf = buf; | s->buf = buf; | ||||
| s->size = buf_size; | s->size = buf_size; | ||||
| @@ -452,6 +450,11 @@ static int smc_decode_frame(AVCodecContext *avctx, | |||||
| return -1; | return -1; | ||||
| } | } | ||||
| if (pal) { | |||||
| s->frame.palette_has_changed = 1; | |||||
| memcpy(s->pal, pal, AVPALETTE_SIZE); | |||||
| } | |||||
| smc_decode_stream(s); | smc_decode_stream(s); | ||||
| *data_size = sizeof(AVFrame); | *data_size = sizeof(AVFrame); | ||||
| @@ -171,13 +171,6 @@ static int decode_frame(AVCodecContext *avctx, | |||||
| stride = -p->linesize[0]; | stride = -p->linesize[0]; | ||||
| } | } | ||||
| if(avctx->pix_fmt == PIX_FMT_PAL8 && avctx->palctrl){ | |||||
| memcpy(p->data[1], avctx->palctrl->palette, AVPALETTE_SIZE); | |||||
| if(avctx->palctrl->palette_changed){ | |||||
| p->palette_has_changed = 1; | |||||
| avctx->palctrl->palette_changed = 0; | |||||
| } | |||||
| } | |||||
| if(colors){ | if(colors){ | ||||
| size_t pal_size; | size_t pal_size; | ||||
| if((colors + first_clr) > 256){ | if((colors + first_clr) > 256){ | ||||
| @@ -60,6 +60,8 @@ typedef struct TsccContext { | |||||
| unsigned char* decomp_buf; | unsigned char* decomp_buf; | ||||
| int height; | int height; | ||||
| z_stream zstream; | z_stream zstream; | ||||
| uint32_t pal[256]; | |||||
| } CamtasiaContext; | } CamtasiaContext; | ||||
| /* | /* | ||||
| @@ -111,11 +113,13 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPac | |||||
| /* make the palette available on the way out */ | /* make the palette available on the way out */ | ||||
| if (c->avctx->pix_fmt == PIX_FMT_PAL8) { | if (c->avctx->pix_fmt == PIX_FMT_PAL8) { | ||||
| memcpy(c->pic.data[1], c->avctx->palctrl->palette, AVPALETTE_SIZE); | |||||
| if (c->avctx->palctrl->palette_changed) { | |||||
| const uint8_t *pal = av_packet_get_side_data(avpkt, AV_PKT_DATA_PALETTE, NULL); | |||||
| if (pal) { | |||||
| c->pic.palette_has_changed = 1; | c->pic.palette_has_changed = 1; | ||||
| c->avctx->palctrl->palette_changed = 0; | |||||
| memcpy(c->pal, pal, AVPALETTE_SIZE); | |||||
| } | } | ||||
| memcpy(c->pic.data[1], c->pal, AVPALETTE_SIZE); | |||||
| } | } | ||||
| *data_size = sizeof(AVFrame); | *data_size = sizeof(AVFrame); | ||||
| @@ -44,6 +44,8 @@ typedef struct { | |||||
| uint16_t stream_language_index; | uint16_t stream_language_index; | ||||
| int palette_changed; | |||||
| uint32_t palette[256]; | |||||
| } ASFStream; | } ASFStream; | ||||
| typedef uint8_t ff_asf_guid[16]; | typedef uint8_t ff_asf_guid[16]; | ||||
| @@ -365,15 +365,14 @@ static int asf_read_stream_properties(AVFormatContext *s, int64_t size) | |||||
| /* This is true for all paletted codecs implemented in ffmpeg */ | /* This is true for all paletted codecs implemented in ffmpeg */ | ||||
| if (st->codec->extradata_size && (st->codec->bits_per_coded_sample <= 8)) { | if (st->codec->extradata_size && (st->codec->bits_per_coded_sample <= 8)) { | ||||
| int av_unused i; | int av_unused i; | ||||
| st->codec->palctrl = av_mallocz(sizeof(AVPaletteControl)); | |||||
| #if HAVE_BIGENDIAN | #if HAVE_BIGENDIAN | ||||
| for (i = 0; i < FFMIN(st->codec->extradata_size, AVPALETTE_SIZE)/4; i++) | for (i = 0; i < FFMIN(st->codec->extradata_size, AVPALETTE_SIZE)/4; i++) | ||||
| st->codec->palctrl->palette[i] = av_bswap32(((uint32_t*)st->codec->extradata)[i]); | |||||
| asf_st->palette[i] = av_bswap32(((uint32_t*)st->codec->extradata)[i]); | |||||
| #else | #else | ||||
| memcpy(st->codec->palctrl->palette, st->codec->extradata, | |||||
| FFMIN(st->codec->extradata_size, AVPALETTE_SIZE)); | |||||
| memcpy(asf_st->palette, st->codec->extradata, | |||||
| FFMIN(st->codec->extradata_size, AVPALETTE_SIZE)); | |||||
| #endif | #endif | ||||
| st->codec->palctrl->palette_changed = 1; | |||||
| asf_st->palette_changed = 1; | |||||
| } | } | ||||
| st->codec->codec_tag = tag1; | st->codec->codec_tag = tag1; | ||||
| @@ -966,6 +965,17 @@ static int ff_asf_parse_packet(AVFormatContext *s, AVIOContext *pb, AVPacket *pk | |||||
| asf_st->pkt.stream_index = asf->stream_index; | asf_st->pkt.stream_index = asf->stream_index; | ||||
| asf_st->pkt.pos = | asf_st->pkt.pos = | ||||
| asf_st->packet_pos= asf->packet_pos; | asf_st->packet_pos= asf->packet_pos; | ||||
| if (asf_st->pkt.data && asf_st->palette_changed) { | |||||
| uint8_t *pal; | |||||
| pal = av_packet_new_side_data(pkt, AV_PKT_DATA_PALETTE, | |||||
| AVPALETTE_SIZE); | |||||
| if (!pal) { | |||||
| av_log(s, AV_LOG_ERROR, "Cannot append palette to packet\n"); | |||||
| } else { | |||||
| memcpy(pal, asf_st->palette, AVPALETTE_SIZE); | |||||
| asf_st->palette_changed = 0; | |||||
| } | |||||
| } | |||||
| //printf("new packet: stream:%d key:%d packet_key:%d audio:%d size:%d\n", | //printf("new packet: stream:%d key:%d packet_key:%d audio:%d size:%d\n", | ||||
| //asf->stream_index, asf->packet_key_frame, asf_st->pkt.flags & AV_PKT_FLAG_KEY, | //asf->stream_index, asf->packet_key_frame, asf_st->pkt.flags & AV_PKT_FLAG_KEY, | ||||
| //s->streams[asf->stream_index]->codec->codec_type == AVMEDIA_TYPE_AUDIO, asf->packet_obj_size); | //s->streams[asf->stream_index]->codec->codec_type == AVMEDIA_TYPE_AUDIO, asf->packet_obj_size); | ||||
| @@ -1127,7 +1137,6 @@ static int asf_read_close(AVFormatContext *s) | |||||
| asf_reset_header(s); | asf_reset_header(s); | ||||
| for(i=0;i<s->nb_streams;i++) { | for(i=0;i<s->nb_streams;i++) { | ||||
| AVStream *st = s->streams[i]; | AVStream *st = s->streams[i]; | ||||
| av_free(st->codec->palctrl); | |||||
| } | } | ||||
| return 0; | return 0; | ||||
| } | } | ||||
| @@ -590,15 +590,14 @@ static int avi_read_header(AVFormatContext *s, AVFormatParameters *ap) | |||||
| /* This code assumes that extradata contains only palette. */ | /* This code assumes that extradata contains only palette. */ | ||||
| /* This is true for all paletted codecs implemented in FFmpeg. */ | /* This is true for all paletted codecs implemented in FFmpeg. */ | ||||
| if (st->codec->extradata_size && (st->codec->bits_per_coded_sample <= 8)) { | if (st->codec->extradata_size && (st->codec->bits_per_coded_sample <= 8)) { | ||||
| st->codec->palctrl = av_mallocz(sizeof(AVPaletteControl)); | |||||
| #if HAVE_BIGENDIAN | #if HAVE_BIGENDIAN | ||||
| for (i = 0; i < FFMIN(st->codec->extradata_size, AVPALETTE_SIZE)/4; i++) | for (i = 0; i < FFMIN(st->codec->extradata_size, AVPALETTE_SIZE)/4; i++) | ||||
| st->codec->palctrl->palette[i] = av_bswap32(((uint32_t*)st->codec->extradata)[i]); | |||||
| ast->pal[i] = av_bswap32(((uint32_t*)st->codec->extradata)[i]); | |||||
| #else | #else | ||||
| memcpy(st->codec->palctrl->palette, st->codec->extradata, | |||||
| memcpy(ast->pal, st->codec->extradata, | |||||
| FFMIN(st->codec->extradata_size, AVPALETTE_SIZE)); | FFMIN(st->codec->extradata_size, AVPALETTE_SIZE)); | ||||
| #endif | #endif | ||||
| st->codec->palctrl->palette_changed = 1; | |||||
| ast->has_pal = 1; | |||||
| } | } | ||||
| print_tag("video", tag1, 0); | print_tag("video", tag1, 0); | ||||
| @@ -932,14 +931,14 @@ resync: | |||||
| return err; | return err; | ||||
| if(ast->has_pal && pkt->data && pkt->size<(unsigned)INT_MAX/2){ | if(ast->has_pal && pkt->data && pkt->size<(unsigned)INT_MAX/2){ | ||||
| void *ptr= av_realloc(pkt->data, pkt->size + 4*256 + FF_INPUT_BUFFER_PADDING_SIZE); | |||||
| if(ptr){ | |||||
| ast->has_pal=0; | |||||
| pkt->size += 4*256; | |||||
| pkt->data= ptr; | |||||
| memcpy(pkt->data + pkt->size - 4*256, ast->pal, 4*256); | |||||
| }else | |||||
| av_log(s, AV_LOG_ERROR, "Failed to append palette\n"); | |||||
| uint8_t *pal; | |||||
| pal = av_packet_new_side_data(pkt, AV_PKT_DATA_PALETTE, AVPALETTE_SIZE); | |||||
| if(!pal){ | |||||
| av_log(s, AV_LOG_ERROR, "Failed to allocate data for palette\n"); | |||||
| }else{ | |||||
| memcpy(pal, ast->pal, AVPALETTE_SIZE); | |||||
| ast->has_pal = 0; | |||||
| } | |||||
| } | } | ||||
| if (CONFIG_DV_DEMUXER && avi->dv_demux) { | if (CONFIG_DV_DEMUXER && avi->dv_demux) { | ||||
| @@ -1340,7 +1339,6 @@ static int avi_read_close(AVFormatContext *s) | |||||
| for(i=0;i<s->nb_streams;i++) { | for(i=0;i<s->nb_streams;i++) { | ||||
| AVStream *st = s->streams[i]; | AVStream *st = s->streams[i]; | ||||
| AVIStream *ast = st->priv_data; | AVIStream *ast = st->priv_data; | ||||
| av_free(st->codec->palctrl); | |||||
| if (ast) { | if (ast) { | ||||
| if (ast->sub_ctx) { | if (ast->sub_ctx) { | ||||
| av_freep(&ast->sub_ctx->pb); | av_freep(&ast->sub_ctx->pb); | ||||
| @@ -86,8 +86,6 @@ typedef struct IdcinDemuxContext { | |||||
| int audio_present; | int audio_present; | ||||
| int64_t pts; | int64_t pts; | ||||
| AVPaletteControl palctrl; | |||||
| } IdcinDemuxContext; | } IdcinDemuxContext; | ||||
| static int idcin_probe(AVProbeData *p) | static int idcin_probe(AVProbeData *p) | ||||
| @@ -172,8 +170,6 @@ static int idcin_read_header(AVFormatContext *s, | |||||
| if (avio_read(pb, st->codec->extradata, HUFFMAN_TABLE_SIZE) != | if (avio_read(pb, st->codec->extradata, HUFFMAN_TABLE_SIZE) != | ||||
| HUFFMAN_TABLE_SIZE) | HUFFMAN_TABLE_SIZE) | ||||
| return AVERROR(EIO); | return AVERROR(EIO); | ||||
| /* save a reference in order to transport the palette */ | |||||
| st->codec->palctrl = &idcin->palctrl; | |||||
| /* if sample rate is 0, assume no audio */ | /* if sample rate is 0, assume no audio */ | ||||
| if (sample_rate) { | if (sample_rate) { | ||||
| @@ -226,6 +222,7 @@ static int idcin_read_packet(AVFormatContext *s, | |||||
| int palette_scale; | int palette_scale; | ||||
| unsigned char r, g, b; | unsigned char r, g, b; | ||||
| unsigned char palette_buffer[768]; | unsigned char palette_buffer[768]; | ||||
| uint32_t palette[256]; | |||||
| if (s->pb->eof_reached) | if (s->pb->eof_reached) | ||||
| return AVERROR(EIO); | return AVERROR(EIO); | ||||
| @@ -236,7 +233,6 @@ static int idcin_read_packet(AVFormatContext *s, | |||||
| return AVERROR(EIO); | return AVERROR(EIO); | ||||
| } else if (command == 1) { | } else if (command == 1) { | ||||
| /* trigger a palette change */ | /* trigger a palette change */ | ||||
| idcin->palctrl.palette_changed = 1; | |||||
| if (avio_read(pb, palette_buffer, 768) != 768) | if (avio_read(pb, palette_buffer, 768) != 768) | ||||
| return AVERROR(EIO); | return AVERROR(EIO); | ||||
| /* scale the palette as necessary */ | /* scale the palette as necessary */ | ||||
| @@ -251,7 +247,7 @@ static int idcin_read_packet(AVFormatContext *s, | |||||
| r = palette_buffer[i * 3 ] << palette_scale; | r = palette_buffer[i * 3 ] << palette_scale; | ||||
| g = palette_buffer[i * 3 + 1] << palette_scale; | g = palette_buffer[i * 3 + 1] << palette_scale; | ||||
| b = palette_buffer[i * 3 + 2] << palette_scale; | b = palette_buffer[i * 3 + 2] << palette_scale; | ||||
| idcin->palctrl.palette[i] = (r << 16) | (g << 8) | (b); | |||||
| palette[i] = (r << 16) | (g << 8) | (b); | |||||
| } | } | ||||
| } | } | ||||
| @@ -262,6 +258,15 @@ static int idcin_read_packet(AVFormatContext *s, | |||||
| ret= av_get_packet(pb, pkt, chunk_size); | ret= av_get_packet(pb, pkt, chunk_size); | ||||
| if (ret < 0) | if (ret < 0) | ||||
| return ret; | return ret; | ||||
| if (command == 1) { | |||||
| uint8_t *pal; | |||||
| pal = av_packet_new_side_data(pkt, AV_PKT_DATA_PALETTE, | |||||
| AVPALETTE_SIZE); | |||||
| if (ret < 0) | |||||
| return ret; | |||||
| memcpy(pal, palette, AVPALETTE_SIZE); | |||||
| } | |||||
| pkt->stream_index = idcin->video_stream_index; | pkt->stream_index = idcin->video_stream_index; | ||||
| pkt->pts = idcin->pts; | pkt->pts = idcin->pts; | ||||
| } else { | } else { | ||||
| @@ -97,6 +97,8 @@ typedef struct IPMVEContext { | |||||
| unsigned int video_width; | unsigned int video_width; | ||||
| unsigned int video_height; | unsigned int video_height; | ||||
| int64_t video_pts; | int64_t video_pts; | ||||
| uint32_t palette[256]; | |||||
| int has_palette; | |||||
| unsigned int audio_bits; | unsigned int audio_bits; | ||||
| unsigned int audio_channels; | unsigned int audio_channels; | ||||
| @@ -116,8 +118,6 @@ typedef struct IPMVEContext { | |||||
| int64_t next_chunk_offset; | int64_t next_chunk_offset; | ||||
| AVPaletteControl palette_control; | |||||
| } IPMVEContext; | } IPMVEContext; | ||||
| static int load_ipmovie_packet(IPMVEContext *s, AVIOContext *pb, | static int load_ipmovie_packet(IPMVEContext *s, AVIOContext *pb, | ||||
| @@ -162,6 +162,17 @@ static int load_ipmovie_packet(IPMVEContext *s, AVIOContext *pb, | |||||
| if (av_new_packet(pkt, s->decode_map_chunk_size + s->video_chunk_size)) | if (av_new_packet(pkt, s->decode_map_chunk_size + s->video_chunk_size)) | ||||
| return CHUNK_NOMEM; | return CHUNK_NOMEM; | ||||
| if (s->has_palette) { | |||||
| uint8_t *pal; | |||||
| pal = av_packet_new_side_data(pkt, AV_PKT_DATA_PALETTE, | |||||
| AVPALETTE_SIZE); | |||||
| if (pal) { | |||||
| memcpy(pal, s->palette, AVPALETTE_SIZE); | |||||
| s->has_palette = 0; | |||||
| } | |||||
| } | |||||
| pkt->pos= s->decode_map_chunk_offset; | pkt->pos= s->decode_map_chunk_offset; | ||||
| avio_seek(pb, s->decode_map_chunk_offset, SEEK_SET); | avio_seek(pb, s->decode_map_chunk_offset, SEEK_SET); | ||||
| s->decode_map_chunk_offset = 0; | s->decode_map_chunk_offset = 0; | ||||
| @@ -456,10 +467,9 @@ static int process_ipmovie_chunk(IPMVEContext *s, AVIOContext *pb, | |||||
| r = scratch[j++] * 4; | r = scratch[j++] * 4; | ||||
| g = scratch[j++] * 4; | g = scratch[j++] * 4; | ||||
| b = scratch[j++] * 4; | b = scratch[j++] * 4; | ||||
| s->palette_control.palette[i] = (r << 16) | (g << 8) | (b); | |||||
| s->palette[i] = (r << 16) | (g << 8) | (b); | |||||
| } | } | ||||
| /* indicate a palette change */ | |||||
| s->palette_control.palette_changed = 1; | |||||
| s->has_palette = 1; | |||||
| break; | break; | ||||
| case OPCODE_SET_PALETTE_COMPRESSED: | case OPCODE_SET_PALETTE_COMPRESSED: | ||||
| @@ -573,9 +583,6 @@ static int ipmovie_read_header(AVFormatContext *s, | |||||
| st->codec->height = ipmovie->video_height; | st->codec->height = ipmovie->video_height; | ||||
| st->codec->bits_per_coded_sample = ipmovie->video_bpp; | st->codec->bits_per_coded_sample = ipmovie->video_bpp; | ||||
| /* palette considerations */ | |||||
| st->codec->palctrl = &ipmovie->palette_control; | |||||
| if (ipmovie->audio_type) { | if (ipmovie->audio_type) { | ||||
| st = av_new_stream(s, 0); | st = av_new_stream(s, 0); | ||||
| if (!st) | if (!st) | ||||
| @@ -123,6 +123,8 @@ typedef struct MOVStreamContext { | |||||
| int width; ///< tkhd width | int width; ///< tkhd width | ||||
| int height; ///< tkhd height | int height; ///< tkhd height | ||||
| int dts_shift; ///< dts shift when ctts is negative | int dts_shift; ///< dts shift when ctts is negative | ||||
| uint32_t palette[256]; | |||||
| int has_palette; | |||||
| } MOVStreamContext; | } MOVStreamContext; | ||||
| typedef struct MOVContext { | typedef struct MOVContext { | ||||
| @@ -1027,7 +1027,6 @@ int ff_mov_read_stsd_entries(MOVContext *c, AVIOContext *pb, int entries) | |||||
| unsigned int color_start, color_count, color_end; | unsigned int color_start, color_count, color_end; | ||||
| unsigned char r, g, b; | unsigned char r, g, b; | ||||
| st->codec->palctrl = av_malloc(sizeof(*st->codec->palctrl)); | |||||
| if (color_greyscale) { | if (color_greyscale) { | ||||
| int color_index, color_dec; | int color_index, color_dec; | ||||
| /* compute the greyscale palette */ | /* compute the greyscale palette */ | ||||
| @@ -1037,7 +1036,7 @@ int ff_mov_read_stsd_entries(MOVContext *c, AVIOContext *pb, int entries) | |||||
| color_dec = 256 / (color_count - 1); | color_dec = 256 / (color_count - 1); | ||||
| for (j = 0; j < color_count; j++) { | for (j = 0; j < color_count; j++) { | ||||
| r = g = b = color_index; | r = g = b = color_index; | ||||
| st->codec->palctrl->palette[j] = | |||||
| sc->palette[j] = | |||||
| (r << 16) | (g << 8) | (b); | (r << 16) | (g << 8) | (b); | ||||
| color_index -= color_dec; | color_index -= color_dec; | ||||
| if (color_index < 0) | if (color_index < 0) | ||||
| @@ -1058,7 +1057,7 @@ int ff_mov_read_stsd_entries(MOVContext *c, AVIOContext *pb, int entries) | |||||
| r = color_table[j * 3 + 0]; | r = color_table[j * 3 + 0]; | ||||
| g = color_table[j * 3 + 1]; | g = color_table[j * 3 + 1]; | ||||
| b = color_table[j * 3 + 2]; | b = color_table[j * 3 + 2]; | ||||
| st->codec->palctrl->palette[j] = | |||||
| sc->palette[j] = | |||||
| (r << 16) | (g << 8) | (b); | (r << 16) | (g << 8) | (b); | ||||
| } | } | ||||
| } else { | } else { | ||||
| @@ -1080,12 +1079,12 @@ int ff_mov_read_stsd_entries(MOVContext *c, AVIOContext *pb, int entries) | |||||
| avio_r8(pb); | avio_r8(pb); | ||||
| b = avio_r8(pb); | b = avio_r8(pb); | ||||
| avio_r8(pb); | avio_r8(pb); | ||||
| st->codec->palctrl->palette[j] = | |||||
| sc->palette[j] = | |||||
| (r << 16) | (g << 8) | (b); | (r << 16) | (g << 8) | (b); | ||||
| } | } | ||||
| } | } | ||||
| } | } | ||||
| st->codec->palctrl->palette_changed = 1; | |||||
| sc->has_palette = 1; | |||||
| } | } | ||||
| } else if(st->codec->codec_type==AVMEDIA_TYPE_AUDIO) { | } else if(st->codec->codec_type==AVMEDIA_TYPE_AUDIO) { | ||||
| int bits_per_sample, flags; | int bits_per_sample, flags; | ||||
| @@ -2433,6 +2432,17 @@ static int mov_read_packet(AVFormatContext *s, AVPacket *pkt) | |||||
| ret = av_get_packet(sc->pb, pkt, sample->size); | ret = av_get_packet(sc->pb, pkt, sample->size); | ||||
| if (ret < 0) | if (ret < 0) | ||||
| return ret; | return ret; | ||||
| if (sc->has_palette) { | |||||
| uint8_t *pal; | |||||
| pal = av_packet_new_side_data(pkt, AV_PKT_DATA_PALETTE, AVPALETTE_SIZE); | |||||
| if (!pal) { | |||||
| av_log(mov->fc, AV_LOG_ERROR, "Cannot append palette to packet\n"); | |||||
| } else { | |||||
| memcpy(pal, sc->palette, AVPALETTE_SIZE); | |||||
| sc->has_palette = 0; | |||||
| } | |||||
| } | |||||
| #if CONFIG_DV_DEMUXER | #if CONFIG_DV_DEMUXER | ||||
| if (mov->dv_demux && sc->dv_audio_container) { | if (mov->dv_demux && sc->dv_audio_container) { | ||||
| dv_produce_packet(mov->dv_demux, pkt, pkt->data, pkt->size); | dv_produce_packet(mov->dv_demux, pkt, pkt->data, pkt->size); | ||||