You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

599 lines
16KB

  1. /*
  2. * PNM image format
  3. * Copyright (c) 2002, 2003 Fabrice Bellard.
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. #include "avcodec.h"
  22. #include "mpegvideo.h" //only for ParseContext
  23. typedef struct PNMContext {
  24. uint8_t *bytestream;
  25. uint8_t *bytestream_start;
  26. uint8_t *bytestream_end;
  27. AVFrame picture;
  28. } PNMContext;
  29. static inline int pnm_space(int c)
  30. {
  31. return (c == ' ' || c == '\n' || c == '\r' || c == '\t');
  32. }
  33. static void pnm_get(PNMContext *sc, char *str, int buf_size)
  34. {
  35. char *s;
  36. int c;
  37. /* skip spaces and comments */
  38. for(;;) {
  39. c = *sc->bytestream++;
  40. if (c == '#') {
  41. do {
  42. c = *sc->bytestream++;
  43. } while (c != '\n' && sc->bytestream < sc->bytestream_end);
  44. } else if (!pnm_space(c)) {
  45. break;
  46. }
  47. }
  48. s = str;
  49. while (sc->bytestream < sc->bytestream_end && !pnm_space(c)) {
  50. if ((s - str) < buf_size - 1)
  51. *s++ = c;
  52. c = *sc->bytestream++;
  53. }
  54. *s = '\0';
  55. }
  56. static int common_init(AVCodecContext *avctx){
  57. PNMContext *s = avctx->priv_data;
  58. avcodec_get_frame_defaults((AVFrame*)&s->picture);
  59. avctx->coded_frame= (AVFrame*)&s->picture;
  60. return 0;
  61. }
  62. static int pnm_decode_header(AVCodecContext *avctx, PNMContext * const s){
  63. char buf1[32], tuple_type[32];
  64. int h, w, depth, maxval;;
  65. pnm_get(s, buf1, sizeof(buf1));
  66. if (!strcmp(buf1, "P4")) {
  67. avctx->pix_fmt = PIX_FMT_MONOWHITE;
  68. } else if (!strcmp(buf1, "P5")) {
  69. if (avctx->codec_id == CODEC_ID_PGMYUV)
  70. avctx->pix_fmt = PIX_FMT_YUV420P;
  71. else
  72. avctx->pix_fmt = PIX_FMT_GRAY8;
  73. } else if (!strcmp(buf1, "P6")) {
  74. avctx->pix_fmt = PIX_FMT_RGB24;
  75. } else if (!strcmp(buf1, "P7")) {
  76. w = -1;
  77. h = -1;
  78. maxval = -1;
  79. depth = -1;
  80. tuple_type[0] = '\0';
  81. for(;;) {
  82. pnm_get(s, buf1, sizeof(buf1));
  83. if (!strcmp(buf1, "WIDTH")) {
  84. pnm_get(s, buf1, sizeof(buf1));
  85. w = strtol(buf1, NULL, 10);
  86. } else if (!strcmp(buf1, "HEIGHT")) {
  87. pnm_get(s, buf1, sizeof(buf1));
  88. h = strtol(buf1, NULL, 10);
  89. } else if (!strcmp(buf1, "DEPTH")) {
  90. pnm_get(s, buf1, sizeof(buf1));
  91. depth = strtol(buf1, NULL, 10);
  92. } else if (!strcmp(buf1, "MAXVAL")) {
  93. pnm_get(s, buf1, sizeof(buf1));
  94. maxval = strtol(buf1, NULL, 10);
  95. } else if (!strcmp(buf1, "TUPLETYPE")) {
  96. pnm_get(s, tuple_type, sizeof(tuple_type));
  97. } else if (!strcmp(buf1, "ENDHDR")) {
  98. break;
  99. } else {
  100. return -1;
  101. }
  102. }
  103. /* check that all tags are present */
  104. if (w <= 0 || h <= 0 || maxval <= 0 || depth <= 0 || tuple_type[0] == '\0' || avcodec_check_dimensions(avctx, w, h))
  105. return -1;
  106. avctx->width = w;
  107. avctx->height = h;
  108. if (depth == 1) {
  109. if (maxval == 1)
  110. avctx->pix_fmt = PIX_FMT_MONOWHITE;
  111. else
  112. avctx->pix_fmt = PIX_FMT_GRAY8;
  113. } else if (depth == 3) {
  114. avctx->pix_fmt = PIX_FMT_RGB24;
  115. } else if (depth == 4) {
  116. avctx->pix_fmt = PIX_FMT_RGBA32;
  117. } else {
  118. return -1;
  119. }
  120. return 0;
  121. } else {
  122. return -1;
  123. }
  124. pnm_get(s, buf1, sizeof(buf1));
  125. avctx->width = atoi(buf1);
  126. if (avctx->width <= 0)
  127. return -1;
  128. pnm_get(s, buf1, sizeof(buf1));
  129. avctx->height = atoi(buf1);
  130. if(avcodec_check_dimensions(avctx, avctx->width, avctx->height))
  131. return -1;
  132. if (avctx->pix_fmt != PIX_FMT_MONOWHITE) {
  133. pnm_get(s, buf1, sizeof(buf1));
  134. }
  135. /* more check if YUV420 */
  136. if (avctx->pix_fmt == PIX_FMT_YUV420P) {
  137. if ((avctx->width & 1) != 0)
  138. return -1;
  139. h = (avctx->height * 2);
  140. if ((h % 3) != 0)
  141. return -1;
  142. h /= 3;
  143. avctx->height = h;
  144. }
  145. return 0;
  146. }
  147. static int pnm_decode_frame(AVCodecContext *avctx,
  148. void *data, int *data_size,
  149. uint8_t *buf, int buf_size)
  150. {
  151. PNMContext * const s = avctx->priv_data;
  152. AVFrame *picture = data;
  153. AVFrame * const p= (AVFrame*)&s->picture;
  154. int i, n, linesize, h;
  155. unsigned char *ptr;
  156. s->bytestream_start=
  157. s->bytestream= buf;
  158. s->bytestream_end= buf + buf_size;
  159. if(pnm_decode_header(avctx, s) < 0)
  160. return -1;
  161. if(p->data[0])
  162. avctx->release_buffer(avctx, p);
  163. p->reference= 0;
  164. if(avctx->get_buffer(avctx, p) < 0){
  165. av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
  166. return -1;
  167. }
  168. p->pict_type= FF_I_TYPE;
  169. p->key_frame= 1;
  170. switch(avctx->pix_fmt) {
  171. default:
  172. return -1;
  173. case PIX_FMT_RGB24:
  174. n = avctx->width * 3;
  175. goto do_read;
  176. case PIX_FMT_GRAY8:
  177. n = avctx->width;
  178. goto do_read;
  179. case PIX_FMT_MONOWHITE:
  180. case PIX_FMT_MONOBLACK:
  181. n = (avctx->width + 7) >> 3;
  182. do_read:
  183. ptr = p->data[0];
  184. linesize = p->linesize[0];
  185. if(s->bytestream + n*avctx->height > s->bytestream_end)
  186. return -1;
  187. for(i = 0; i < avctx->height; i++) {
  188. memcpy(ptr, s->bytestream, n);
  189. s->bytestream += n;
  190. ptr += linesize;
  191. }
  192. break;
  193. case PIX_FMT_YUV420P:
  194. {
  195. unsigned char *ptr1, *ptr2;
  196. n = avctx->width;
  197. ptr = p->data[0];
  198. linesize = p->linesize[0];
  199. if(s->bytestream + n*avctx->height*3/2 > s->bytestream_end)
  200. return -1;
  201. for(i = 0; i < avctx->height; i++) {
  202. memcpy(ptr, s->bytestream, n);
  203. s->bytestream += n;
  204. ptr += linesize;
  205. }
  206. ptr1 = p->data[1];
  207. ptr2 = p->data[2];
  208. n >>= 1;
  209. h = avctx->height >> 1;
  210. for(i = 0; i < h; i++) {
  211. memcpy(ptr1, s->bytestream, n);
  212. s->bytestream += n;
  213. memcpy(ptr2, s->bytestream, n);
  214. s->bytestream += n;
  215. ptr1 += p->linesize[1];
  216. ptr2 += p->linesize[2];
  217. }
  218. }
  219. break;
  220. case PIX_FMT_RGBA32:
  221. ptr = p->data[0];
  222. linesize = p->linesize[0];
  223. if(s->bytestream + avctx->width*avctx->height*4 > s->bytestream_end)
  224. return -1;
  225. for(i = 0; i < avctx->height; i++) {
  226. int j, r, g, b, a;
  227. for(j = 0;j < avctx->width; j++) {
  228. r = *s->bytestream++;
  229. g = *s->bytestream++;
  230. b = *s->bytestream++;
  231. a = *s->bytestream++;
  232. ((uint32_t *)ptr)[j] = (a << 24) | (r << 16) | (g << 8) | b;
  233. }
  234. ptr += linesize;
  235. }
  236. break;
  237. }
  238. *picture= *(AVFrame*)&s->picture;
  239. *data_size = sizeof(AVPicture);
  240. return s->bytestream - s->bytestream_start;
  241. }
  242. static int pnm_encode_frame(AVCodecContext *avctx, unsigned char *outbuf, int buf_size, void *data){
  243. PNMContext *s = avctx->priv_data;
  244. AVFrame *pict = data;
  245. AVFrame * const p= (AVFrame*)&s->picture;
  246. int i, h, h1, c, n, linesize;
  247. uint8_t *ptr, *ptr1, *ptr2;
  248. if(buf_size < avpicture_get_size(avctx->pix_fmt, avctx->width, avctx->height) + 200){
  249. av_log(avctx, AV_LOG_ERROR, "encoded frame too large\n");
  250. return -1;
  251. }
  252. *p = *pict;
  253. p->pict_type= FF_I_TYPE;
  254. p->key_frame= 1;
  255. s->bytestream_start=
  256. s->bytestream= outbuf;
  257. s->bytestream_end= outbuf+buf_size;
  258. h = avctx->height;
  259. h1 = h;
  260. switch(avctx->pix_fmt) {
  261. case PIX_FMT_MONOWHITE:
  262. c = '4';
  263. n = (avctx->width + 7) >> 3;
  264. break;
  265. case PIX_FMT_GRAY8:
  266. c = '5';
  267. n = avctx->width;
  268. break;
  269. case PIX_FMT_RGB24:
  270. c = '6';
  271. n = avctx->width * 3;
  272. break;
  273. case PIX_FMT_YUV420P:
  274. c = '5';
  275. n = avctx->width;
  276. h1 = (h * 3) / 2;
  277. break;
  278. default:
  279. return -1;
  280. }
  281. snprintf(s->bytestream, s->bytestream_end - s->bytestream,
  282. "P%c\n%d %d\n",
  283. c, avctx->width, h1);
  284. s->bytestream += strlen(s->bytestream);
  285. if (avctx->pix_fmt != PIX_FMT_MONOWHITE) {
  286. snprintf(s->bytestream, s->bytestream_end - s->bytestream,
  287. "%d\n", 255);
  288. s->bytestream += strlen(s->bytestream);
  289. }
  290. ptr = p->data[0];
  291. linesize = p->linesize[0];
  292. for(i=0;i<h;i++) {
  293. memcpy(s->bytestream, ptr, n);
  294. s->bytestream += n;
  295. ptr += linesize;
  296. }
  297. if (avctx->pix_fmt == PIX_FMT_YUV420P) {
  298. h >>= 1;
  299. n >>= 1;
  300. ptr1 = p->data[1];
  301. ptr2 = p->data[2];
  302. for(i=0;i<h;i++) {
  303. memcpy(s->bytestream, ptr1, n);
  304. s->bytestream += n;
  305. memcpy(s->bytestream, ptr2, n);
  306. s->bytestream += n;
  307. ptr1 += p->linesize[1];
  308. ptr2 += p->linesize[2];
  309. }
  310. }
  311. return s->bytestream - s->bytestream_start;
  312. }
  313. static int pam_encode_frame(AVCodecContext *avctx, unsigned char *outbuf, int buf_size, void *data){
  314. PNMContext *s = avctx->priv_data;
  315. AVFrame *pict = data;
  316. AVFrame * const p= (AVFrame*)&s->picture;
  317. int i, h, w, n, linesize, depth, maxval;
  318. const char *tuple_type;
  319. uint8_t *ptr;
  320. if(buf_size < avpicture_get_size(avctx->pix_fmt, avctx->width, avctx->height) + 200){
  321. av_log(avctx, AV_LOG_ERROR, "encoded frame too large\n");
  322. return -1;
  323. }
  324. *p = *pict;
  325. p->pict_type= FF_I_TYPE;
  326. p->key_frame= 1;
  327. s->bytestream_start=
  328. s->bytestream= outbuf;
  329. s->bytestream_end= outbuf+buf_size;
  330. h = avctx->height;
  331. w = avctx->width;
  332. switch(avctx->pix_fmt) {
  333. case PIX_FMT_MONOWHITE:
  334. n = (w + 7) >> 3;
  335. depth = 1;
  336. maxval = 1;
  337. tuple_type = "BLACKANDWHITE";
  338. break;
  339. case PIX_FMT_GRAY8:
  340. n = w;
  341. depth = 1;
  342. maxval = 255;
  343. tuple_type = "GRAYSCALE";
  344. break;
  345. case PIX_FMT_RGB24:
  346. n = w * 3;
  347. depth = 3;
  348. maxval = 255;
  349. tuple_type = "RGB";
  350. break;
  351. case PIX_FMT_RGBA32:
  352. n = w * 4;
  353. depth = 4;
  354. maxval = 255;
  355. tuple_type = "RGB_ALPHA";
  356. break;
  357. default:
  358. return -1;
  359. }
  360. snprintf(s->bytestream, s->bytestream_end - s->bytestream,
  361. "P7\nWIDTH %d\nHEIGHT %d\nDEPTH %d\nMAXVAL %d\nTUPLETYPE %s\nENDHDR\n",
  362. w, h, depth, maxval, tuple_type);
  363. s->bytestream += strlen(s->bytestream);
  364. ptr = p->data[0];
  365. linesize = p->linesize[0];
  366. if (avctx->pix_fmt == PIX_FMT_RGBA32) {
  367. int j;
  368. unsigned int v;
  369. for(i=0;i<h;i++) {
  370. for(j=0;j<w;j++) {
  371. v = ((uint32_t *)ptr)[j];
  372. *s->bytestream++ = v >> 16;
  373. *s->bytestream++ = v >> 8;
  374. *s->bytestream++ = v;
  375. *s->bytestream++ = v >> 24;
  376. }
  377. ptr += linesize;
  378. }
  379. } else {
  380. for(i=0;i<h;i++) {
  381. memcpy(s->bytestream, ptr, n);
  382. s->bytestream += n;
  383. ptr += linesize;
  384. }
  385. }
  386. return s->bytestream - s->bytestream_start;
  387. }
  388. #if 0
  389. static int pnm_probe(AVProbeData *pd)
  390. {
  391. const char *p = pd->buf;
  392. if (pd->buf_size >= 8 &&
  393. p[0] == 'P' &&
  394. p[1] >= '4' && p[1] <= '6' &&
  395. pnm_space(p[2]) )
  396. return AVPROBE_SCORE_MAX - 1; /* to permit pgmyuv probe */
  397. else
  398. return 0;
  399. }
  400. static int pgmyuv_probe(AVProbeData *pd)
  401. {
  402. if (match_ext(pd->filename, "pgmyuv"))
  403. return AVPROBE_SCORE_MAX;
  404. else
  405. return 0;
  406. }
  407. static int pam_probe(AVProbeData *pd)
  408. {
  409. const char *p = pd->buf;
  410. if (pd->buf_size >= 8 &&
  411. p[0] == 'P' &&
  412. p[1] == '7' &&
  413. p[2] == '\n')
  414. return AVPROBE_SCORE_MAX;
  415. else
  416. return 0;
  417. }
  418. #endif
  419. #ifdef CONFIG_PNM_PARSER
  420. static int pnm_parse(AVCodecParserContext *s,
  421. AVCodecContext *avctx,
  422. uint8_t **poutbuf, int *poutbuf_size,
  423. const uint8_t *buf, int buf_size)
  424. {
  425. ParseContext *pc = s->priv_data;
  426. PNMContext pnmctx;
  427. int next;
  428. for(; pc->overread>0; pc->overread--){
  429. pc->buffer[pc->index++]= pc->buffer[pc->overread_index++];
  430. }
  431. retry:
  432. if(pc->index){
  433. pnmctx.bytestream_start=
  434. pnmctx.bytestream= pc->buffer;
  435. pnmctx.bytestream_end= pc->buffer + pc->index;
  436. }else{
  437. pnmctx.bytestream_start=
  438. pnmctx.bytestream= (uint8_t *) buf; /* casts avoid warnings */
  439. pnmctx.bytestream_end= (uint8_t *) buf + buf_size;
  440. }
  441. if(pnm_decode_header(avctx, &pnmctx) < 0){
  442. if(pnmctx.bytestream < pnmctx.bytestream_end){
  443. if(pc->index){
  444. pc->index=0;
  445. }else{
  446. buf++;
  447. buf_size--;
  448. }
  449. goto retry;
  450. }
  451. #if 0
  452. if(pc->index && pc->index*2 + FF_INPUT_BUFFER_PADDING_SIZE < pc->buffer_size && buf_size > pc->index){
  453. memcpy(pc->buffer + pc->index, buf, pc->index);
  454. pc->index += pc->index;
  455. buf += pc->index;
  456. buf_size -= pc->index;
  457. goto retry;
  458. }
  459. #endif
  460. next= END_NOT_FOUND;
  461. }else{
  462. next= pnmctx.bytestream - pnmctx.bytestream_start
  463. + avpicture_get_size(avctx->pix_fmt, avctx->width, avctx->height);
  464. if(pnmctx.bytestream_start!=buf)
  465. next-= pc->index;
  466. if(next > buf_size)
  467. next= END_NOT_FOUND;
  468. }
  469. if(ff_combine_frame(pc, next, (uint8_t **)&buf, &buf_size)<0){
  470. *poutbuf = NULL;
  471. *poutbuf_size = 0;
  472. return buf_size;
  473. }
  474. *poutbuf = (uint8_t *)buf;
  475. *poutbuf_size = buf_size;
  476. return next;
  477. }
  478. AVCodecParser pnm_parser = {
  479. { CODEC_ID_PGM, CODEC_ID_PGMYUV, CODEC_ID_PPM, CODEC_ID_PBM, CODEC_ID_PAM},
  480. sizeof(ParseContext),
  481. NULL,
  482. pnm_parse,
  483. ff_parse_close,
  484. };
  485. #endif /* CONFIG_PNM_PARSER */
  486. #ifdef CONFIG_PGM_ENCODER
  487. AVCodec pgm_encoder = {
  488. "pgm",
  489. CODEC_TYPE_VIDEO,
  490. CODEC_ID_PGM,
  491. sizeof(PNMContext),
  492. common_init,
  493. pnm_encode_frame,
  494. NULL, //encode_end,
  495. pnm_decode_frame,
  496. .pix_fmts= (enum PixelFormat[]){PIX_FMT_GRAY8, -1},
  497. };
  498. #endif // CONFIG_PGM_ENCODER
  499. #ifdef CONFIG_PGMYUV_ENCODER
  500. AVCodec pgmyuv_encoder = {
  501. "pgmyuv",
  502. CODEC_TYPE_VIDEO,
  503. CODEC_ID_PGMYUV,
  504. sizeof(PNMContext),
  505. common_init,
  506. pnm_encode_frame,
  507. NULL, //encode_end,
  508. pnm_decode_frame,
  509. .pix_fmts= (enum PixelFormat[]){PIX_FMT_YUV420P, -1},
  510. };
  511. #endif // CONFIG_PGMYUV_ENCODER
  512. #ifdef CONFIG_PPM_ENCODER
  513. AVCodec ppm_encoder = {
  514. "ppm",
  515. CODEC_TYPE_VIDEO,
  516. CODEC_ID_PPM,
  517. sizeof(PNMContext),
  518. common_init,
  519. pnm_encode_frame,
  520. NULL, //encode_end,
  521. pnm_decode_frame,
  522. .pix_fmts= (enum PixelFormat[]){PIX_FMT_RGB24, -1},
  523. };
  524. #endif // CONFIG_PPM_ENCODER
  525. #ifdef CONFIG_PBM_ENCODER
  526. AVCodec pbm_encoder = {
  527. "pbm",
  528. CODEC_TYPE_VIDEO,
  529. CODEC_ID_PBM,
  530. sizeof(PNMContext),
  531. common_init,
  532. pnm_encode_frame,
  533. NULL, //encode_end,
  534. pnm_decode_frame,
  535. .pix_fmts= (enum PixelFormat[]){PIX_FMT_MONOWHITE, -1},
  536. };
  537. #endif // CONFIG_PBM_ENCODER
  538. #ifdef CONFIG_PAM_ENCODER
  539. AVCodec pam_encoder = {
  540. "pam",
  541. CODEC_TYPE_VIDEO,
  542. CODEC_ID_PAM,
  543. sizeof(PNMContext),
  544. common_init,
  545. pam_encode_frame,
  546. NULL, //encode_end,
  547. pnm_decode_frame,
  548. .pix_fmts= (enum PixelFormat[]){PIX_FMT_RGB24, PIX_FMT_RGBA32, PIX_FMT_GRAY8, PIX_FMT_MONOWHITE, -1},
  549. };
  550. #endif // CONFIG_PAM_ENCODER