You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

470 lines
13KB

  1. /*
  2. * PNM image format
  3. * Copyright (c) 2002, 2003 Fabrice Bellard
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. #include "avcodec.h"
  22. #include "bytestream.h"
  23. #include "pnm.h"
  24. static int pnm_decode_frame(AVCodecContext *avctx, void *data,
  25. int *data_size, AVPacket *avpkt)
  26. {
  27. const uint8_t *buf = avpkt->data;
  28. int buf_size = avpkt->size;
  29. PNMContext * const s = avctx->priv_data;
  30. AVFrame *picture = data;
  31. AVFrame * const p = (AVFrame*)&s->picture;
  32. int i, n, linesize, h, upgrade = 0;
  33. unsigned char *ptr;
  34. s->bytestream_start =
  35. s->bytestream = buf;
  36. s->bytestream_end = buf + buf_size;
  37. if (ff_pnm_decode_header(avctx, s) < 0)
  38. return -1;
  39. if (p->data[0])
  40. avctx->release_buffer(avctx, p);
  41. p->reference = 0;
  42. if (avctx->get_buffer(avctx, p) < 0) {
  43. av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
  44. return -1;
  45. }
  46. p->pict_type = FF_I_TYPE;
  47. p->key_frame = 1;
  48. switch (avctx->pix_fmt) {
  49. default:
  50. return -1;
  51. case PIX_FMT_RGB48BE:
  52. n = avctx->width * 6;
  53. goto do_read;
  54. case PIX_FMT_RGB24:
  55. n = avctx->width * 3;
  56. goto do_read;
  57. case PIX_FMT_GRAY8:
  58. n = avctx->width;
  59. if (s->maxval < 255)
  60. upgrade = 1;
  61. goto do_read;
  62. case PIX_FMT_GRAY16BE:
  63. case PIX_FMT_GRAY16LE:
  64. n = avctx->width * 2;
  65. if (s->maxval < 65535)
  66. upgrade = 2;
  67. goto do_read;
  68. case PIX_FMT_MONOWHITE:
  69. case PIX_FMT_MONOBLACK:
  70. n = (avctx->width + 7) >> 3;
  71. do_read:
  72. ptr = p->data[0];
  73. linesize = p->linesize[0];
  74. if (s->bytestream + n * avctx->height > s->bytestream_end)
  75. return -1;
  76. for (i = 0; i < avctx->height; i++) {
  77. if (!upgrade)
  78. memcpy(ptr, s->bytestream, n);
  79. else if (upgrade == 1) {
  80. unsigned int j, f = (255 * 128 + s->maxval / 2) / s->maxval;
  81. for (j = 0; j < n; j++)
  82. ptr[j] = (s->bytestream[j] * f + 64) >> 7;
  83. } else if (upgrade == 2) {
  84. unsigned int j, v, f = (65535 * 32768 + s->maxval / 2) / s->maxval;
  85. for (j = 0; j < n / 2; j++) {
  86. v = be2me_16(((uint16_t *)s->bytestream)[j]);
  87. ((uint16_t *)ptr)[j] = (v * f + 16384) >> 15;
  88. }
  89. }
  90. s->bytestream += n;
  91. ptr += linesize;
  92. }
  93. break;
  94. case PIX_FMT_YUV420P:
  95. {
  96. unsigned char *ptr1, *ptr2;
  97. n = avctx->width;
  98. ptr = p->data[0];
  99. linesize = p->linesize[0];
  100. if (s->bytestream + n * avctx->height * 3 / 2 > s->bytestream_end)
  101. return -1;
  102. for (i = 0; i < avctx->height; i++) {
  103. memcpy(ptr, s->bytestream, n);
  104. s->bytestream += n;
  105. ptr += linesize;
  106. }
  107. ptr1 = p->data[1];
  108. ptr2 = p->data[2];
  109. n >>= 1;
  110. h = avctx->height >> 1;
  111. for (i = 0; i < h; i++) {
  112. memcpy(ptr1, s->bytestream, n);
  113. s->bytestream += n;
  114. memcpy(ptr2, s->bytestream, n);
  115. s->bytestream += n;
  116. ptr1 += p->linesize[1];
  117. ptr2 += p->linesize[2];
  118. }
  119. }
  120. break;
  121. case PIX_FMT_RGB32:
  122. ptr = p->data[0];
  123. linesize = p->linesize[0];
  124. if (s->bytestream + avctx->width * avctx->height * 4 > s->bytestream_end)
  125. return -1;
  126. for (i = 0; i < avctx->height; i++) {
  127. int j, r, g, b, a;
  128. for (j = 0; j < avctx->width; j++) {
  129. r = *s->bytestream++;
  130. g = *s->bytestream++;
  131. b = *s->bytestream++;
  132. a = *s->bytestream++;
  133. ((uint32_t *)ptr)[j] = (a << 24) | (r << 16) | (g << 8) | b;
  134. }
  135. ptr += linesize;
  136. }
  137. break;
  138. }
  139. *picture = *(AVFrame*)&s->picture;
  140. *data_size = sizeof(AVPicture);
  141. return s->bytestream - s->bytestream_start;
  142. }
  143. static int pnm_encode_frame(AVCodecContext *avctx, unsigned char *outbuf,
  144. int buf_size, void *data)
  145. {
  146. PNMContext *s = avctx->priv_data;
  147. AVFrame *pict = data;
  148. AVFrame * const p = (AVFrame*)&s->picture;
  149. int i, h, h1, c, n, linesize;
  150. uint8_t *ptr, *ptr1, *ptr2;
  151. if (buf_size < avpicture_get_size(avctx->pix_fmt, avctx->width, avctx->height) + 200) {
  152. av_log(avctx, AV_LOG_ERROR, "encoded frame too large\n");
  153. return -1;
  154. }
  155. *p = *pict;
  156. p->pict_type = FF_I_TYPE;
  157. p->key_frame = 1;
  158. s->bytestream_start =
  159. s->bytestream = outbuf;
  160. s->bytestream_end = outbuf + buf_size;
  161. h = avctx->height;
  162. h1 = h;
  163. switch (avctx->pix_fmt) {
  164. case PIX_FMT_MONOWHITE:
  165. c = '4';
  166. n = (avctx->width + 7) >> 3;
  167. break;
  168. case PIX_FMT_GRAY8:
  169. c = '5';
  170. n = avctx->width;
  171. break;
  172. case PIX_FMT_GRAY16BE:
  173. c = '5';
  174. n = avctx->width * 2;
  175. break;
  176. case PIX_FMT_RGB24:
  177. c = '6';
  178. n = avctx->width * 3;
  179. break;
  180. case PIX_FMT_RGB48BE:
  181. c = '6';
  182. n = avctx->width * 6;
  183. break;
  184. case PIX_FMT_YUV420P:
  185. c = '5';
  186. n = avctx->width;
  187. h1 = (h * 3) / 2;
  188. break;
  189. default:
  190. return -1;
  191. }
  192. snprintf(s->bytestream, s->bytestream_end - s->bytestream,
  193. "P%c\n%d %d\n", c, avctx->width, h1);
  194. s->bytestream += strlen(s->bytestream);
  195. if (avctx->pix_fmt != PIX_FMT_MONOWHITE) {
  196. snprintf(s->bytestream, s->bytestream_end - s->bytestream,
  197. "%d\n", (avctx->pix_fmt != PIX_FMT_GRAY16BE && avctx->pix_fmt != PIX_FMT_RGB48BE) ? 255 : 65535);
  198. s->bytestream += strlen(s->bytestream);
  199. }
  200. ptr = p->data[0];
  201. linesize = p->linesize[0];
  202. for (i = 0; i < h; i++) {
  203. memcpy(s->bytestream, ptr, n);
  204. s->bytestream += n;
  205. ptr += linesize;
  206. }
  207. if (avctx->pix_fmt == PIX_FMT_YUV420P) {
  208. h >>= 1;
  209. n >>= 1;
  210. ptr1 = p->data[1];
  211. ptr2 = p->data[2];
  212. for (i = 0; i < h; i++) {
  213. memcpy(s->bytestream, ptr1, n);
  214. s->bytestream += n;
  215. memcpy(s->bytestream, ptr2, n);
  216. s->bytestream += n;
  217. ptr1 += p->linesize[1];
  218. ptr2 += p->linesize[2];
  219. }
  220. }
  221. return s->bytestream - s->bytestream_start;
  222. }
  223. static int pam_encode_frame(AVCodecContext *avctx, unsigned char *outbuf,
  224. int buf_size, void *data)
  225. {
  226. PNMContext *s = avctx->priv_data;
  227. AVFrame *pict = data;
  228. AVFrame * const p = (AVFrame*)&s->picture;
  229. int i, h, w, n, linesize, depth, maxval;
  230. const char *tuple_type;
  231. uint8_t *ptr;
  232. if (buf_size < avpicture_get_size(avctx->pix_fmt, avctx->width, avctx->height) + 200) {
  233. av_log(avctx, AV_LOG_ERROR, "encoded frame too large\n");
  234. return -1;
  235. }
  236. *p = *pict;
  237. p->pict_type = FF_I_TYPE;
  238. p->key_frame = 1;
  239. s->bytestream_start =
  240. s->bytestream = outbuf;
  241. s->bytestream_end = outbuf+buf_size;
  242. h = avctx->height;
  243. w = avctx->width;
  244. switch (avctx->pix_fmt) {
  245. case PIX_FMT_MONOWHITE:
  246. n = (w + 7) >> 3;
  247. depth = 1;
  248. maxval = 1;
  249. tuple_type = "BLACKANDWHITE";
  250. break;
  251. case PIX_FMT_GRAY8:
  252. n = w;
  253. depth = 1;
  254. maxval = 255;
  255. tuple_type = "GRAYSCALE";
  256. break;
  257. case PIX_FMT_RGB24:
  258. n = w * 3;
  259. depth = 3;
  260. maxval = 255;
  261. tuple_type = "RGB";
  262. break;
  263. case PIX_FMT_RGB32:
  264. n = w * 4;
  265. depth = 4;
  266. maxval = 255;
  267. tuple_type = "RGB_ALPHA";
  268. break;
  269. default:
  270. return -1;
  271. }
  272. snprintf(s->bytestream, s->bytestream_end - s->bytestream,
  273. "P7\nWIDTH %d\nHEIGHT %d\nDEPTH %d\nMAXVAL %d\nTUPLETYPE %s\nENDHDR\n",
  274. w, h, depth, maxval, tuple_type);
  275. s->bytestream += strlen(s->bytestream);
  276. ptr = p->data[0];
  277. linesize = p->linesize[0];
  278. if (avctx->pix_fmt == PIX_FMT_RGB32) {
  279. int j;
  280. unsigned int v;
  281. for (i = 0; i < h; i++) {
  282. for (j = 0; j < w; j++) {
  283. v = ((uint32_t *)ptr)[j];
  284. bytestream_put_be24(&s->bytestream, v);
  285. *s->bytestream++ = v >> 24;
  286. }
  287. ptr += linesize;
  288. }
  289. } else {
  290. for (i = 0; i < h; i++) {
  291. memcpy(s->bytestream, ptr, n);
  292. s->bytestream += n;
  293. ptr += linesize;
  294. }
  295. }
  296. return s->bytestream - s->bytestream_start;
  297. }
  298. #if CONFIG_PGM_DECODER
  299. AVCodec pgm_decoder = {
  300. "pgm",
  301. CODEC_TYPE_VIDEO,
  302. CODEC_ID_PGM,
  303. sizeof(PNMContext),
  304. ff_pnm_init,
  305. NULL,
  306. ff_pnm_end,
  307. pnm_decode_frame,
  308. CODEC_CAP_DR1,
  309. .pix_fmts = (const enum PixelFormat[]){PIX_FMT_GRAY8, PIX_FMT_GRAY16BE, PIX_FMT_NONE},
  310. .long_name = NULL_IF_CONFIG_SMALL("PGM (Portable GrayMap) image"),
  311. };
  312. #endif
  313. #if CONFIG_PGM_ENCODER
  314. AVCodec pgm_encoder = {
  315. "pgm",
  316. CODEC_TYPE_VIDEO,
  317. CODEC_ID_PGM,
  318. sizeof(PNMContext),
  319. ff_pnm_init,
  320. pnm_encode_frame,
  321. .pix_fmts = (const enum PixelFormat[]){PIX_FMT_GRAY8, PIX_FMT_GRAY16BE, PIX_FMT_NONE},
  322. .long_name = NULL_IF_CONFIG_SMALL("PGM (Portable GrayMap) image"),
  323. };
  324. #endif
  325. #if CONFIG_PGMYUV_DECODER
  326. AVCodec pgmyuv_decoder = {
  327. "pgmyuv",
  328. CODEC_TYPE_VIDEO,
  329. CODEC_ID_PGMYUV,
  330. sizeof(PNMContext),
  331. ff_pnm_init,
  332. NULL,
  333. ff_pnm_end,
  334. pnm_decode_frame,
  335. CODEC_CAP_DR1,
  336. .pix_fmts = (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE},
  337. .long_name = NULL_IF_CONFIG_SMALL("PGMYUV (Portable GrayMap YUV) image"),
  338. };
  339. #endif
  340. #if CONFIG_PGMYUV_ENCODER
  341. AVCodec pgmyuv_encoder = {
  342. "pgmyuv",
  343. CODEC_TYPE_VIDEO,
  344. CODEC_ID_PGMYUV,
  345. sizeof(PNMContext),
  346. ff_pnm_init,
  347. pnm_encode_frame,
  348. .pix_fmts = (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE},
  349. .long_name = NULL_IF_CONFIG_SMALL("PGMYUV (Portable GrayMap YUV) image"),
  350. };
  351. #endif
  352. #if CONFIG_PPM_DECODER
  353. AVCodec ppm_decoder = {
  354. "ppm",
  355. CODEC_TYPE_VIDEO,
  356. CODEC_ID_PPM,
  357. sizeof(PNMContext),
  358. ff_pnm_init,
  359. NULL,
  360. ff_pnm_end,
  361. pnm_decode_frame,
  362. CODEC_CAP_DR1,
  363. .pix_fmts = (const enum PixelFormat[]){PIX_FMT_RGB24, PIX_FMT_RGB48BE, PIX_FMT_NONE},
  364. .long_name = NULL_IF_CONFIG_SMALL("PPM (Portable PixelMap) image"),
  365. };
  366. #endif
  367. #if CONFIG_PPM_ENCODER
  368. AVCodec ppm_encoder = {
  369. "ppm",
  370. CODEC_TYPE_VIDEO,
  371. CODEC_ID_PPM,
  372. sizeof(PNMContext),
  373. ff_pnm_init,
  374. pnm_encode_frame,
  375. .pix_fmts = (const enum PixelFormat[]){PIX_FMT_RGB24, PIX_FMT_RGB48BE, PIX_FMT_NONE},
  376. .long_name = NULL_IF_CONFIG_SMALL("PPM (Portable PixelMap) image"),
  377. };
  378. #endif
  379. #if CONFIG_PBM_DECODER
  380. AVCodec pbm_decoder = {
  381. "pbm",
  382. CODEC_TYPE_VIDEO,
  383. CODEC_ID_PBM,
  384. sizeof(PNMContext),
  385. ff_pnm_init,
  386. NULL,
  387. ff_pnm_end,
  388. pnm_decode_frame,
  389. CODEC_CAP_DR1,
  390. .pix_fmts = (const enum PixelFormat[]){PIX_FMT_MONOWHITE, PIX_FMT_NONE},
  391. .long_name = NULL_IF_CONFIG_SMALL("PBM (Portable BitMap) image"),
  392. };
  393. #endif
  394. #if CONFIG_PBM_ENCODER
  395. AVCodec pbm_encoder = {
  396. "pbm",
  397. CODEC_TYPE_VIDEO,
  398. CODEC_ID_PBM,
  399. sizeof(PNMContext),
  400. ff_pnm_init,
  401. pnm_encode_frame,
  402. .pix_fmts = (const enum PixelFormat[]){PIX_FMT_MONOWHITE, PIX_FMT_NONE},
  403. .long_name = NULL_IF_CONFIG_SMALL("PBM (Portable BitMap) image"),
  404. };
  405. #endif
  406. #if CONFIG_PAM_DECODER
  407. AVCodec pam_decoder = {
  408. "pam",
  409. CODEC_TYPE_VIDEO,
  410. CODEC_ID_PAM,
  411. sizeof(PNMContext),
  412. ff_pnm_init,
  413. NULL,
  414. ff_pnm_end,
  415. pnm_decode_frame,
  416. CODEC_CAP_DR1,
  417. .pix_fmts = (const enum PixelFormat[]){PIX_FMT_RGB24, PIX_FMT_RGB32, PIX_FMT_GRAY8, PIX_FMT_MONOWHITE, PIX_FMT_NONE},
  418. .long_name = NULL_IF_CONFIG_SMALL("PAM (Portable AnyMap) image"),
  419. };
  420. #endif
  421. #if CONFIG_PAM_ENCODER
  422. AVCodec pam_encoder = {
  423. "pam",
  424. CODEC_TYPE_VIDEO,
  425. CODEC_ID_PAM,
  426. sizeof(PNMContext),
  427. ff_pnm_init,
  428. pam_encode_frame,
  429. .pix_fmts = (const enum PixelFormat[]){PIX_FMT_RGB24, PIX_FMT_RGB32, PIX_FMT_GRAY8, PIX_FMT_MONOWHITE, PIX_FMT_NONE},
  430. .long_name = NULL_IF_CONFIG_SMALL("PAM (Portable AnyMap) image"),
  431. };
  432. #endif