| @@ -279,21 +279,21 @@ OBJS-$(CONFIG_NELLYMOSER_DECODER) += nellymoserdec.o nellymoser.o | |||
| OBJS-$(CONFIG_NELLYMOSER_ENCODER) += nellymoserenc.o nellymoser.o | |||
| OBJS-$(CONFIG_NUV_DECODER) += nuv.o rtjpeg.o | |||
| OBJS-$(CONFIG_PAM_DECODER) += pnmdec.o pnm.o | |||
| OBJS-$(CONFIG_PAM_ENCODER) += pamenc.o pnm.o | |||
| OBJS-$(CONFIG_PAM_ENCODER) += pamenc.o | |||
| OBJS-$(CONFIG_PBM_DECODER) += pnmdec.o pnm.o | |||
| OBJS-$(CONFIG_PBM_ENCODER) += pnmenc.o pnm.o | |||
| OBJS-$(CONFIG_PBM_ENCODER) += pnmenc.o | |||
| OBJS-$(CONFIG_PCX_DECODER) += pcx.o | |||
| OBJS-$(CONFIG_PCX_ENCODER) += pcxenc.o | |||
| OBJS-$(CONFIG_PGM_DECODER) += pnmdec.o pnm.o | |||
| OBJS-$(CONFIG_PGM_ENCODER) += pnmenc.o pnm.o | |||
| OBJS-$(CONFIG_PGM_ENCODER) += pnmenc.o | |||
| OBJS-$(CONFIG_PGMYUV_DECODER) += pnmdec.o pnm.o | |||
| OBJS-$(CONFIG_PGMYUV_ENCODER) += pnmenc.o pnm.o | |||
| OBJS-$(CONFIG_PGMYUV_ENCODER) += pnmenc.o | |||
| OBJS-$(CONFIG_PGSSUB_DECODER) += pgssubdec.o | |||
| OBJS-$(CONFIG_PICTOR_DECODER) += pictordec.o cga_data.o | |||
| OBJS-$(CONFIG_PNG_DECODER) += png.o pngdec.o pngdsp.o | |||
| OBJS-$(CONFIG_PNG_ENCODER) += png.o pngenc.o | |||
| OBJS-$(CONFIG_PPM_DECODER) += pnmdec.o pnm.o | |||
| OBJS-$(CONFIG_PPM_ENCODER) += pnmenc.o pnm.o | |||
| OBJS-$(CONFIG_PPM_ENCODER) += pnmenc.o | |||
| OBJS-$(CONFIG_PRORES_DECODER) += proresdec.o proresdata.o proresdsp.o | |||
| OBJS-$(CONFIG_PRORES_ENCODER) += proresenc.o proresdata.o proresdsp.o | |||
| OBJS-$(CONFIG_PTX_DECODER) += ptx.o | |||
| @@ -22,14 +22,12 @@ | |||
| #include "avcodec.h" | |||
| #include "bytestream.h" | |||
| #include "internal.h" | |||
| #include "pnm.h" | |||
| static int pam_encode_frame(AVCodecContext *avctx, AVPacket *pkt, | |||
| const AVFrame *pict, int *got_packet) | |||
| { | |||
| PNMContext *s = avctx->priv_data; | |||
| AVFrame * const p = &s->picture; | |||
| uint8_t *bytestream_start, *bytestream, *bytestream_end; | |||
| const AVFrame * const p = pict; | |||
| int i, h, w, n, linesize, depth, maxval, ret; | |||
| const char *tuple_type; | |||
| uint8_t *ptr; | |||
| @@ -41,13 +39,9 @@ static int pam_encode_frame(AVCodecContext *avctx, AVPacket *pkt, | |||
| return ret; | |||
| } | |||
| *p = *pict; | |||
| p->pict_type = AV_PICTURE_TYPE_I; | |||
| p->key_frame = 1; | |||
| s->bytestream_start = | |||
| s->bytestream = pkt->data; | |||
| s->bytestream_end = pkt->data + pkt->size; | |||
| bytestream_start = | |||
| bytestream = pkt->data; | |||
| bytestream_end = pkt->data + pkt->size; | |||
| h = avctx->height; | |||
| w = avctx->width; | |||
| @@ -79,10 +73,10 @@ static int pam_encode_frame(AVCodecContext *avctx, AVPacket *pkt, | |||
| default: | |||
| return -1; | |||
| } | |||
| snprintf(s->bytestream, s->bytestream_end - s->bytestream, | |||
| snprintf(bytestream, bytestream_end - bytestream, | |||
| "P7\nWIDTH %d\nHEIGHT %d\nDEPTH %d\nMAXVAL %d\nTUPLTYPE %s\nENDHDR\n", | |||
| w, h, depth, maxval, tuple_type); | |||
| s->bytestream += strlen(s->bytestream); | |||
| bytestream += strlen(bytestream); | |||
| ptr = p->data[0]; | |||
| linesize = p->linesize[0]; | |||
| @@ -94,33 +88,50 @@ static int pam_encode_frame(AVCodecContext *avctx, AVPacket *pkt, | |||
| for (i = 0; i < h; i++) { | |||
| for (j = 0; j < w; j++) { | |||
| v = ((uint32_t *)ptr)[j]; | |||
| bytestream_put_be24(&s->bytestream, v); | |||
| *s->bytestream++ = v >> 24; | |||
| bytestream_put_be24(&bytestream, v); | |||
| *bytestream++ = v >> 24; | |||
| } | |||
| ptr += linesize; | |||
| } | |||
| } else { | |||
| for (i = 0; i < h; i++) { | |||
| memcpy(s->bytestream, ptr, n); | |||
| s->bytestream += n; | |||
| ptr += linesize; | |||
| memcpy(bytestream, ptr, n); | |||
| bytestream += n; | |||
| ptr += linesize; | |||
| } | |||
| } | |||
| pkt->size = s->bytestream - s->bytestream_start; | |||
| pkt->size = bytestream - bytestream_start; | |||
| pkt->flags |= AV_PKT_FLAG_KEY; | |||
| *got_packet = 1; | |||
| return 0; | |||
| } | |||
| static av_cold int pam_encode_init(AVCodecContext *avctx) | |||
| { | |||
| avctx->coded_frame = av_frame_alloc(); | |||
| if (!avctx->coded_frame) | |||
| return AVERROR(ENOMEM); | |||
| avctx->coded_frame->pict_type = AV_PICTURE_TYPE_I; | |||
| avctx->coded_frame->key_frame = 1; | |||
| return 0; | |||
| } | |||
| static av_cold int pam_encode_close(AVCodecContext *avctx) | |||
| { | |||
| av_frame_free(&avctx->coded_frame); | |||
| return 0; | |||
| } | |||
| AVCodec ff_pam_encoder = { | |||
| .name = "pam", | |||
| .long_name = NULL_IF_CONFIG_SMALL("PAM (Portable AnyMap) image"), | |||
| .type = AVMEDIA_TYPE_VIDEO, | |||
| .id = AV_CODEC_ID_PAM, | |||
| .priv_data_size = sizeof(PNMContext), | |||
| .init = ff_pnm_init, | |||
| .init = pam_encode_init, | |||
| .close = pam_encode_close, | |||
| .encode2 = pam_encode_frame, | |||
| .pix_fmts = (const enum AVPixelFormat[]){ | |||
| AV_PIX_FMT_RGB24, AV_PIX_FMT_RGB32, AV_PIX_FMT_GRAY8, AV_PIX_FMT_MONOWHITE, | |||
| @@ -184,13 +184,3 @@ int ff_pnm_decode_header(AVCodecContext *avctx, PNMContext * const s) | |||
| } | |||
| return 0; | |||
| } | |||
| av_cold int ff_pnm_init(AVCodecContext *avctx) | |||
| { | |||
| PNMContext *s = avctx->priv_data; | |||
| avcodec_get_frame_defaults(&s->picture); | |||
| avctx->coded_frame = &s->picture; | |||
| return 0; | |||
| } | |||
| @@ -28,12 +28,10 @@ typedef struct PNMContext { | |||
| uint8_t *bytestream; | |||
| uint8_t *bytestream_start; | |||
| uint8_t *bytestream_end; | |||
| AVFrame picture; | |||
| int maxval; ///< maximum value of a pixel | |||
| int type; | |||
| } PNMContext; | |||
| int ff_pnm_decode_header(AVCodecContext *avctx, PNMContext * const s); | |||
| int ff_pnm_init(AVCodecContext *avctx); | |||
| #endif /* AVCODEC_PNM_H */ | |||
| @@ -23,14 +23,12 @@ | |||
| #include "avcodec.h" | |||
| #include "bytestream.h" | |||
| #include "internal.h" | |||
| #include "pnm.h" | |||
| static int pnm_encode_frame(AVCodecContext *avctx, AVPacket *pkt, | |||
| const AVFrame *pict, int *got_packet) | |||
| { | |||
| PNMContext *s = avctx->priv_data; | |||
| AVFrame * const p = &s->picture; | |||
| uint8_t *bytestream, *bytestream_start, *bytestream_end; | |||
| const AVFrame * const p = pict; | |||
| int i, h, h1, c, n, linesize, ret; | |||
| uint8_t *ptr, *ptr1, *ptr2; | |||
| @@ -41,13 +39,9 @@ static int pnm_encode_frame(AVCodecContext *avctx, AVPacket *pkt, | |||
| return ret; | |||
| } | |||
| *p = *pict; | |||
| p->pict_type = AV_PICTURE_TYPE_I; | |||
| p->key_frame = 1; | |||
| s->bytestream_start = | |||
| s->bytestream = pkt->data; | |||
| s->bytestream_end = pkt->data + pkt->size; | |||
| bytestream_start = | |||
| bytestream = pkt->data; | |||
| bytestream_end = pkt->data + pkt->size; | |||
| h = avctx->height; | |||
| h1 = h; | |||
| @@ -85,22 +79,22 @@ static int pnm_encode_frame(AVCodecContext *avctx, AVPacket *pkt, | |||
| default: | |||
| return -1; | |||
| } | |||
| snprintf(s->bytestream, s->bytestream_end - s->bytestream, | |||
| snprintf(bytestream, bytestream_end - bytestream, | |||
| "P%c\n%d %d\n", c, avctx->width, h1); | |||
| s->bytestream += strlen(s->bytestream); | |||
| bytestream += strlen(bytestream); | |||
| if (avctx->pix_fmt != AV_PIX_FMT_MONOWHITE) { | |||
| int maxdepth = (1 << (av_pix_fmt_desc_get(avctx->pix_fmt)->comp[0].depth_minus1 + 1)) - 1; | |||
| snprintf(s->bytestream, s->bytestream_end - s->bytestream, | |||
| snprintf(bytestream, bytestream_end - bytestream, | |||
| "%d\n", maxdepth); | |||
| s->bytestream += strlen(s->bytestream); | |||
| bytestream += strlen(bytestream); | |||
| } | |||
| ptr = p->data[0]; | |||
| linesize = p->linesize[0]; | |||
| for (i = 0; i < h; i++) { | |||
| memcpy(s->bytestream, ptr, n); | |||
| s->bytestream += n; | |||
| ptr += linesize; | |||
| memcpy(bytestream, ptr, n); | |||
| bytestream += n; | |||
| ptr += linesize; | |||
| } | |||
| if (avctx->pix_fmt == AV_PIX_FMT_YUV420P || avctx->pix_fmt == AV_PIX_FMT_YUV420P16BE) { | |||
| @@ -109,21 +103,38 @@ static int pnm_encode_frame(AVCodecContext *avctx, AVPacket *pkt, | |||
| ptr1 = p->data[1]; | |||
| ptr2 = p->data[2]; | |||
| for (i = 0; i < h; i++) { | |||
| memcpy(s->bytestream, ptr1, n); | |||
| s->bytestream += n; | |||
| memcpy(s->bytestream, ptr2, n); | |||
| s->bytestream += n; | |||
| memcpy(bytestream, ptr1, n); | |||
| bytestream += n; | |||
| memcpy(bytestream, ptr2, n); | |||
| bytestream += n; | |||
| ptr1 += p->linesize[1]; | |||
| ptr2 += p->linesize[2]; | |||
| } | |||
| } | |||
| pkt->size = s->bytestream - s->bytestream_start; | |||
| pkt->size = bytestream - bytestream_start; | |||
| pkt->flags |= AV_PKT_FLAG_KEY; | |||
| *got_packet = 1; | |||
| return 0; | |||
| } | |||
| static av_cold int pnm_encode_init(AVCodecContext *avctx) | |||
| { | |||
| avctx->coded_frame = av_frame_alloc(); | |||
| if (!avctx->coded_frame) | |||
| return AVERROR(ENOMEM); | |||
| avctx->coded_frame->pict_type = AV_PICTURE_TYPE_I; | |||
| avctx->coded_frame->key_frame = 1; | |||
| return 0; | |||
| } | |||
| static av_cold int pnm_encode_close(AVCodecContext *avctx) | |||
| { | |||
| av_frame_free(&avctx->coded_frame); | |||
| return 0; | |||
| } | |||
| #if CONFIG_PGM_ENCODER | |||
| AVCodec ff_pgm_encoder = { | |||
| @@ -131,8 +142,8 @@ AVCodec ff_pgm_encoder = { | |||
| .long_name = NULL_IF_CONFIG_SMALL("PGM (Portable GrayMap) image"), | |||
| .type = AVMEDIA_TYPE_VIDEO, | |||
| .id = AV_CODEC_ID_PGM, | |||
| .priv_data_size = sizeof(PNMContext), | |||
| .init = ff_pnm_init, | |||
| .init = pnm_encode_init, | |||
| .close = pnm_encode_close, | |||
| .encode2 = pnm_encode_frame, | |||
| .pix_fmts = (const enum AVPixelFormat[]){ | |||
| AV_PIX_FMT_GRAY8, AV_PIX_FMT_GRAY16BE, AV_PIX_FMT_NONE | |||
| @@ -146,8 +157,8 @@ AVCodec ff_pgmyuv_encoder = { | |||
| .long_name = NULL_IF_CONFIG_SMALL("PGMYUV (Portable GrayMap YUV) image"), | |||
| .type = AVMEDIA_TYPE_VIDEO, | |||
| .id = AV_CODEC_ID_PGMYUV, | |||
| .priv_data_size = sizeof(PNMContext), | |||
| .init = ff_pnm_init, | |||
| .init = pnm_encode_init, | |||
| .close = pnm_encode_close, | |||
| .encode2 = pnm_encode_frame, | |||
| .pix_fmts = (const enum AVPixelFormat[]){ | |||
| AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV420P16BE, AV_PIX_FMT_NONE | |||
| @@ -161,8 +172,8 @@ AVCodec ff_ppm_encoder = { | |||
| .long_name = NULL_IF_CONFIG_SMALL("PPM (Portable PixelMap) image"), | |||
| .type = AVMEDIA_TYPE_VIDEO, | |||
| .id = AV_CODEC_ID_PPM, | |||
| .priv_data_size = sizeof(PNMContext), | |||
| .init = ff_pnm_init, | |||
| .init = pnm_encode_init, | |||
| .close = pnm_encode_close, | |||
| .encode2 = pnm_encode_frame, | |||
| .pix_fmts = (const enum AVPixelFormat[]){ | |||
| AV_PIX_FMT_RGB24, AV_PIX_FMT_RGB48BE, AV_PIX_FMT_NONE | |||
| @@ -176,8 +187,8 @@ AVCodec ff_pbm_encoder = { | |||
| .long_name = NULL_IF_CONFIG_SMALL("PBM (Portable BitMap) image"), | |||
| .type = AVMEDIA_TYPE_VIDEO, | |||
| .id = AV_CODEC_ID_PBM, | |||
| .priv_data_size = sizeof(PNMContext), | |||
| .init = ff_pnm_init, | |||
| .init = pnm_encode_init, | |||
| .close = pnm_encode_close, | |||
| .encode2 = pnm_encode_frame, | |||
| .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_MONOWHITE, | |||
| AV_PIX_FMT_NONE }, | |||