You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

485 lines
13KB

  1. /*
  2. * PNM image format
  3. * Copyright (c) 2002, 2003 Fabrice Bellard
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. #include "avcodec.h"
  22. #include "bytestream.h"
  23. #include "pnm.h"
  24. static av_cold int common_init(AVCodecContext *avctx){
  25. PNMContext *s = avctx->priv_data;
  26. avcodec_get_frame_defaults((AVFrame*)&s->picture);
  27. avctx->coded_frame= (AVFrame*)&s->picture;
  28. return 0;
  29. }
  30. static int pnm_decode_frame(AVCodecContext *avctx,
  31. void *data, int *data_size,
  32. AVPacket *avpkt)
  33. {
  34. const uint8_t *buf = avpkt->data;
  35. int buf_size = avpkt->size;
  36. PNMContext * const s = avctx->priv_data;
  37. AVFrame *picture = data;
  38. AVFrame * const p= (AVFrame*)&s->picture;
  39. int i, n, linesize, h, upgrade = 0;
  40. unsigned char *ptr;
  41. s->bytestream_start=
  42. s->bytestream= buf;
  43. s->bytestream_end= buf + buf_size;
  44. if(ff_pnm_decode_header(avctx, s) < 0)
  45. return -1;
  46. if(p->data[0])
  47. avctx->release_buffer(avctx, p);
  48. p->reference= 0;
  49. if(avctx->get_buffer(avctx, p) < 0){
  50. av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
  51. return -1;
  52. }
  53. p->pict_type= FF_I_TYPE;
  54. p->key_frame= 1;
  55. switch(avctx->pix_fmt) {
  56. default:
  57. return -1;
  58. case PIX_FMT_RGB48BE:
  59. n = avctx->width * 6;
  60. goto do_read;
  61. case PIX_FMT_RGB24:
  62. n = avctx->width * 3;
  63. goto do_read;
  64. case PIX_FMT_GRAY8:
  65. n = avctx->width;
  66. if (s->maxval < 255)
  67. upgrade = 1;
  68. goto do_read;
  69. case PIX_FMT_GRAY16BE:
  70. case PIX_FMT_GRAY16LE:
  71. n = avctx->width * 2;
  72. if (s->maxval < 65535)
  73. upgrade = 2;
  74. goto do_read;
  75. case PIX_FMT_MONOWHITE:
  76. case PIX_FMT_MONOBLACK:
  77. n = (avctx->width + 7) >> 3;
  78. do_read:
  79. ptr = p->data[0];
  80. linesize = p->linesize[0];
  81. if(s->bytestream + n*avctx->height > s->bytestream_end)
  82. return -1;
  83. for(i = 0; i < avctx->height; i++) {
  84. if (!upgrade)
  85. memcpy(ptr, s->bytestream, n);
  86. else if (upgrade == 1) {
  87. unsigned int j, f = (255*128 + s->maxval/2) / s->maxval;
  88. for (j=0; j<n; j++)
  89. ptr[j] = (s->bytestream[j] * f + 64) >> 7;
  90. } else if (upgrade == 2) {
  91. unsigned int j, v, f = (65535*32768 + s->maxval/2) / s->maxval;
  92. for (j=0; j<n/2; j++) {
  93. v = be2me_16(((uint16_t *)s->bytestream)[j]);
  94. ((uint16_t *)ptr)[j] = (v * f + 16384) >> 15;
  95. }
  96. }
  97. s->bytestream += n;
  98. ptr += linesize;
  99. }
  100. break;
  101. case PIX_FMT_YUV420P:
  102. {
  103. unsigned char *ptr1, *ptr2;
  104. n = avctx->width;
  105. ptr = p->data[0];
  106. linesize = p->linesize[0];
  107. if(s->bytestream + n*avctx->height*3/2 > s->bytestream_end)
  108. return -1;
  109. for(i = 0; i < avctx->height; i++) {
  110. memcpy(ptr, s->bytestream, n);
  111. s->bytestream += n;
  112. ptr += linesize;
  113. }
  114. ptr1 = p->data[1];
  115. ptr2 = p->data[2];
  116. n >>= 1;
  117. h = avctx->height >> 1;
  118. for(i = 0; i < h; i++) {
  119. memcpy(ptr1, s->bytestream, n);
  120. s->bytestream += n;
  121. memcpy(ptr2, s->bytestream, n);
  122. s->bytestream += n;
  123. ptr1 += p->linesize[1];
  124. ptr2 += p->linesize[2];
  125. }
  126. }
  127. break;
  128. case PIX_FMT_RGB32:
  129. ptr = p->data[0];
  130. linesize = p->linesize[0];
  131. if(s->bytestream + avctx->width*avctx->height*4 > s->bytestream_end)
  132. return -1;
  133. for(i = 0; i < avctx->height; i++) {
  134. int j, r, g, b, a;
  135. for(j = 0;j < avctx->width; j++) {
  136. r = *s->bytestream++;
  137. g = *s->bytestream++;
  138. b = *s->bytestream++;
  139. a = *s->bytestream++;
  140. ((uint32_t *)ptr)[j] = (a << 24) | (r << 16) | (g << 8) | b;
  141. }
  142. ptr += linesize;
  143. }
  144. break;
  145. }
  146. *picture= *(AVFrame*)&s->picture;
  147. *data_size = sizeof(AVPicture);
  148. return s->bytestream - s->bytestream_start;
  149. }
  150. static int pnm_encode_frame(AVCodecContext *avctx, unsigned char *outbuf, int buf_size, void *data){
  151. PNMContext *s = avctx->priv_data;
  152. AVFrame *pict = data;
  153. AVFrame * const p= (AVFrame*)&s->picture;
  154. int i, h, h1, c, n, linesize;
  155. uint8_t *ptr, *ptr1, *ptr2;
  156. if(buf_size < avpicture_get_size(avctx->pix_fmt, avctx->width, avctx->height) + 200){
  157. av_log(avctx, AV_LOG_ERROR, "encoded frame too large\n");
  158. return -1;
  159. }
  160. *p = *pict;
  161. p->pict_type= FF_I_TYPE;
  162. p->key_frame= 1;
  163. s->bytestream_start=
  164. s->bytestream= outbuf;
  165. s->bytestream_end= outbuf+buf_size;
  166. h = avctx->height;
  167. h1 = h;
  168. switch(avctx->pix_fmt) {
  169. case PIX_FMT_MONOWHITE:
  170. c = '4';
  171. n = (avctx->width + 7) >> 3;
  172. break;
  173. case PIX_FMT_GRAY8:
  174. c = '5';
  175. n = avctx->width;
  176. break;
  177. case PIX_FMT_GRAY16BE:
  178. c = '5';
  179. n = avctx->width * 2;
  180. break;
  181. case PIX_FMT_RGB24:
  182. c = '6';
  183. n = avctx->width * 3;
  184. break;
  185. case PIX_FMT_RGB48BE:
  186. c = '6';
  187. n = avctx->width * 6;
  188. break;
  189. case PIX_FMT_YUV420P:
  190. c = '5';
  191. n = avctx->width;
  192. h1 = (h * 3) / 2;
  193. break;
  194. default:
  195. return -1;
  196. }
  197. snprintf(s->bytestream, s->bytestream_end - s->bytestream,
  198. "P%c\n%d %d\n",
  199. c, avctx->width, h1);
  200. s->bytestream += strlen(s->bytestream);
  201. if (avctx->pix_fmt != PIX_FMT_MONOWHITE) {
  202. snprintf(s->bytestream, s->bytestream_end - s->bytestream,
  203. "%d\n", (avctx->pix_fmt != PIX_FMT_GRAY16BE && avctx->pix_fmt != PIX_FMT_RGB48BE) ? 255 : 65535);
  204. s->bytestream += strlen(s->bytestream);
  205. }
  206. ptr = p->data[0];
  207. linesize = p->linesize[0];
  208. for(i=0;i<h;i++) {
  209. memcpy(s->bytestream, ptr, n);
  210. s->bytestream += n;
  211. ptr += linesize;
  212. }
  213. if (avctx->pix_fmt == PIX_FMT_YUV420P) {
  214. h >>= 1;
  215. n >>= 1;
  216. ptr1 = p->data[1];
  217. ptr2 = p->data[2];
  218. for(i=0;i<h;i++) {
  219. memcpy(s->bytestream, ptr1, n);
  220. s->bytestream += n;
  221. memcpy(s->bytestream, ptr2, n);
  222. s->bytestream += n;
  223. ptr1 += p->linesize[1];
  224. ptr2 += p->linesize[2];
  225. }
  226. }
  227. return s->bytestream - s->bytestream_start;
  228. }
  229. static int pam_encode_frame(AVCodecContext *avctx, unsigned char *outbuf, int buf_size, void *data){
  230. PNMContext *s = avctx->priv_data;
  231. AVFrame *pict = data;
  232. AVFrame * const p= (AVFrame*)&s->picture;
  233. int i, h, w, n, linesize, depth, maxval;
  234. const char *tuple_type;
  235. uint8_t *ptr;
  236. if(buf_size < avpicture_get_size(avctx->pix_fmt, avctx->width, avctx->height) + 200){
  237. av_log(avctx, AV_LOG_ERROR, "encoded frame too large\n");
  238. return -1;
  239. }
  240. *p = *pict;
  241. p->pict_type= FF_I_TYPE;
  242. p->key_frame= 1;
  243. s->bytestream_start=
  244. s->bytestream= outbuf;
  245. s->bytestream_end= outbuf+buf_size;
  246. h = avctx->height;
  247. w = avctx->width;
  248. switch(avctx->pix_fmt) {
  249. case PIX_FMT_MONOWHITE:
  250. n = (w + 7) >> 3;
  251. depth = 1;
  252. maxval = 1;
  253. tuple_type = "BLACKANDWHITE";
  254. break;
  255. case PIX_FMT_GRAY8:
  256. n = w;
  257. depth = 1;
  258. maxval = 255;
  259. tuple_type = "GRAYSCALE";
  260. break;
  261. case PIX_FMT_RGB24:
  262. n = w * 3;
  263. depth = 3;
  264. maxval = 255;
  265. tuple_type = "RGB";
  266. break;
  267. case PIX_FMT_RGB32:
  268. n = w * 4;
  269. depth = 4;
  270. maxval = 255;
  271. tuple_type = "RGB_ALPHA";
  272. break;
  273. default:
  274. return -1;
  275. }
  276. snprintf(s->bytestream, s->bytestream_end - s->bytestream,
  277. "P7\nWIDTH %d\nHEIGHT %d\nDEPTH %d\nMAXVAL %d\nTUPLETYPE %s\nENDHDR\n",
  278. w, h, depth, maxval, tuple_type);
  279. s->bytestream += strlen(s->bytestream);
  280. ptr = p->data[0];
  281. linesize = p->linesize[0];
  282. if (avctx->pix_fmt == PIX_FMT_RGB32) {
  283. int j;
  284. unsigned int v;
  285. for(i=0;i<h;i++) {
  286. for(j=0;j<w;j++) {
  287. v = ((uint32_t *)ptr)[j];
  288. bytestream_put_be24(&s->bytestream, v);
  289. *s->bytestream++ = v >> 24;
  290. }
  291. ptr += linesize;
  292. }
  293. } else {
  294. for(i=0;i<h;i++) {
  295. memcpy(s->bytestream, ptr, n);
  296. s->bytestream += n;
  297. ptr += linesize;
  298. }
  299. }
  300. return s->bytestream - s->bytestream_start;
  301. }
  302. static av_cold int common_end(AVCodecContext *avctx){
  303. PNMContext *s = avctx->priv_data;
  304. if (s->picture.data[0])
  305. avctx->release_buffer(avctx, &s->picture);
  306. return 0;
  307. }
  308. #if CONFIG_PGM_DECODER
  309. AVCodec pgm_decoder = {
  310. "pgm",
  311. CODEC_TYPE_VIDEO,
  312. CODEC_ID_PGM,
  313. sizeof(PNMContext),
  314. common_init,
  315. NULL,
  316. common_end,
  317. pnm_decode_frame,
  318. CODEC_CAP_DR1,
  319. .pix_fmts= (const enum PixelFormat[]){PIX_FMT_GRAY8, PIX_FMT_GRAY16BE, PIX_FMT_NONE},
  320. .long_name= NULL_IF_CONFIG_SMALL("PGM (Portable GrayMap) image"),
  321. };
  322. #endif
  323. #if CONFIG_PGM_ENCODER
  324. AVCodec pgm_encoder = {
  325. "pgm",
  326. CODEC_TYPE_VIDEO,
  327. CODEC_ID_PGM,
  328. sizeof(PNMContext),
  329. common_init,
  330. pnm_encode_frame,
  331. .pix_fmts= (const enum PixelFormat[]){PIX_FMT_GRAY8, PIX_FMT_GRAY16BE, PIX_FMT_NONE},
  332. .long_name= NULL_IF_CONFIG_SMALL("PGM (Portable GrayMap) image"),
  333. };
  334. #endif // CONFIG_PGM_ENCODER
  335. #if CONFIG_PGMYUV_DECODER
  336. AVCodec pgmyuv_decoder = {
  337. "pgmyuv",
  338. CODEC_TYPE_VIDEO,
  339. CODEC_ID_PGMYUV,
  340. sizeof(PNMContext),
  341. common_init,
  342. NULL,
  343. common_end,
  344. pnm_decode_frame,
  345. CODEC_CAP_DR1,
  346. .pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE},
  347. .long_name= NULL_IF_CONFIG_SMALL("PGMYUV (Portable GrayMap YUV) image"),
  348. };
  349. #endif
  350. #if CONFIG_PGMYUV_ENCODER
  351. AVCodec pgmyuv_encoder = {
  352. "pgmyuv",
  353. CODEC_TYPE_VIDEO,
  354. CODEC_ID_PGMYUV,
  355. sizeof(PNMContext),
  356. common_init,
  357. pnm_encode_frame,
  358. .pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE},
  359. .long_name= NULL_IF_CONFIG_SMALL("PGMYUV (Portable GrayMap YUV) image"),
  360. };
  361. #endif // CONFIG_PGMYUV_ENCODER
  362. #if CONFIG_PPM_DECODER
  363. AVCodec ppm_decoder = {
  364. "ppm",
  365. CODEC_TYPE_VIDEO,
  366. CODEC_ID_PPM,
  367. sizeof(PNMContext),
  368. common_init,
  369. NULL,
  370. common_end,
  371. pnm_decode_frame,
  372. CODEC_CAP_DR1,
  373. .pix_fmts= (const enum PixelFormat[]){PIX_FMT_RGB24, PIX_FMT_RGB48BE, PIX_FMT_NONE},
  374. .long_name= NULL_IF_CONFIG_SMALL("PPM (Portable PixelMap) image"),
  375. };
  376. #endif
  377. #if CONFIG_PPM_ENCODER
  378. AVCodec ppm_encoder = {
  379. "ppm",
  380. CODEC_TYPE_VIDEO,
  381. CODEC_ID_PPM,
  382. sizeof(PNMContext),
  383. common_init,
  384. pnm_encode_frame,
  385. .pix_fmts= (const enum PixelFormat[]){PIX_FMT_RGB24, PIX_FMT_RGB48BE, PIX_FMT_NONE},
  386. .long_name= NULL_IF_CONFIG_SMALL("PPM (Portable PixelMap) image"),
  387. };
  388. #endif // CONFIG_PPM_ENCODER
  389. #if CONFIG_PBM_DECODER
  390. AVCodec pbm_decoder = {
  391. "pbm",
  392. CODEC_TYPE_VIDEO,
  393. CODEC_ID_PBM,
  394. sizeof(PNMContext),
  395. common_init,
  396. NULL,
  397. common_end,
  398. pnm_decode_frame,
  399. CODEC_CAP_DR1,
  400. .pix_fmts= (const enum PixelFormat[]){PIX_FMT_MONOWHITE, PIX_FMT_NONE},
  401. .long_name= NULL_IF_CONFIG_SMALL("PBM (Portable BitMap) image"),
  402. };
  403. #endif
  404. #if CONFIG_PBM_ENCODER
  405. AVCodec pbm_encoder = {
  406. "pbm",
  407. CODEC_TYPE_VIDEO,
  408. CODEC_ID_PBM,
  409. sizeof(PNMContext),
  410. common_init,
  411. pnm_encode_frame,
  412. .pix_fmts= (const enum PixelFormat[]){PIX_FMT_MONOWHITE, PIX_FMT_NONE},
  413. .long_name= NULL_IF_CONFIG_SMALL("PBM (Portable BitMap) image"),
  414. };
  415. #endif // CONFIG_PBM_ENCODER
  416. #if CONFIG_PAM_DECODER
  417. AVCodec pam_decoder = {
  418. "pam",
  419. CODEC_TYPE_VIDEO,
  420. CODEC_ID_PAM,
  421. sizeof(PNMContext),
  422. common_init,
  423. NULL,
  424. common_end,
  425. pnm_decode_frame,
  426. CODEC_CAP_DR1,
  427. .pix_fmts= (const enum PixelFormat[]){PIX_FMT_RGB24, PIX_FMT_RGB32, PIX_FMT_GRAY8, PIX_FMT_MONOWHITE, PIX_FMT_NONE},
  428. .long_name= NULL_IF_CONFIG_SMALL("PAM (Portable AnyMap) image"),
  429. };
  430. #endif
  431. #if CONFIG_PAM_ENCODER
  432. AVCodec pam_encoder = {
  433. "pam",
  434. CODEC_TYPE_VIDEO,
  435. CODEC_ID_PAM,
  436. sizeof(PNMContext),
  437. common_init,
  438. pam_encode_frame,
  439. .pix_fmts= (const enum PixelFormat[]){PIX_FMT_RGB24, PIX_FMT_RGB32, PIX_FMT_GRAY8, PIX_FMT_MONOWHITE, PIX_FMT_NONE},
  440. .long_name= NULL_IF_CONFIG_SMALL("PAM (Portable AnyMap) image"),
  441. };
  442. #endif // CONFIG_PAM_ENCODER