You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

632 lines
16KB

  1. /*
  2. * PNM image format
  3. * Copyright (c) 2002, 2003 Fabrice Bellard.
  4. *
  5. * This library is free software; you can redistribute it and/or
  6. * modify it under the terms of the GNU Lesser General Public
  7. * License as published by the Free Software Foundation; either
  8. * version 2 of the License, or (at your option) any later version.
  9. *
  10. * This library is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  13. * Lesser General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU Lesser General Public
  16. * License along with this library; if not, write to the Free Software
  17. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  18. */
  19. #include "avcodec.h"
  20. #include "mpegvideo.h" //only for ParseContext
  21. typedef struct PNMContext {
  22. uint8_t *bytestream;
  23. uint8_t *bytestream_start;
  24. uint8_t *bytestream_end;
  25. AVFrame picture;
  26. } PNMContext;
  27. static inline int pnm_space(int c)
  28. {
  29. return (c == ' ' || c == '\n' || c == '\r' || c == '\t');
  30. }
  31. static void pnm_get(PNMContext *sc, char *str, int buf_size)
  32. {
  33. char *s;
  34. int c;
  35. /* skip spaces and comments */
  36. for(;;) {
  37. c = *sc->bytestream++;
  38. if (c == '#') {
  39. do {
  40. c = *sc->bytestream++;
  41. } while (c != '\n' && sc->bytestream < sc->bytestream_end);
  42. } else if (!pnm_space(c)) {
  43. break;
  44. }
  45. }
  46. s = str;
  47. while (sc->bytestream < sc->bytestream_end && !pnm_space(c)) {
  48. if ((s - str) < buf_size - 1)
  49. *s++ = c;
  50. c = *sc->bytestream++;
  51. }
  52. *s = '\0';
  53. }
  54. static int common_init(AVCodecContext *avctx){
  55. PNMContext *s = avctx->priv_data;
  56. avcodec_get_frame_defaults((AVFrame*)&s->picture);
  57. avctx->coded_frame= (AVFrame*)&s->picture;
  58. return 0;
  59. }
  60. static int pnm_decode_header(AVCodecContext *avctx, PNMContext * const s){
  61. char buf1[32];
  62. int h;
  63. pnm_get(s, buf1, sizeof(buf1));
  64. if (!strcmp(buf1, "P4")) {
  65. avctx->pix_fmt = PIX_FMT_MONOWHITE;
  66. } else if (!strcmp(buf1, "P5")) {
  67. if (avctx->codec_id == CODEC_ID_PGMYUV)
  68. avctx->pix_fmt = PIX_FMT_YUV420P;
  69. else
  70. avctx->pix_fmt = PIX_FMT_GRAY8;
  71. } else if (!strcmp(buf1, "P6")) {
  72. avctx->pix_fmt = PIX_FMT_RGB24;
  73. } else {
  74. return -1;
  75. }
  76. pnm_get(s, buf1, sizeof(buf1));
  77. avctx->width = atoi(buf1);
  78. if (avctx->width <= 0)
  79. return -1;
  80. pnm_get(s, buf1, sizeof(buf1));
  81. avctx->height = atoi(buf1);
  82. if (avctx->height <= 0)
  83. return -1;
  84. if (avctx->pix_fmt != PIX_FMT_MONOWHITE) {
  85. pnm_get(s, buf1, sizeof(buf1));
  86. }
  87. /* more check if YUV420 */
  88. if (avctx->pix_fmt == PIX_FMT_YUV420P) {
  89. if ((avctx->width & 1) != 0)
  90. return -1;
  91. h = (avctx->height * 2);
  92. if ((h % 3) != 0)
  93. return -1;
  94. h /= 3;
  95. avctx->height = h;
  96. }
  97. return 0;
  98. }
  99. static int pnm_decode_frame(AVCodecContext *avctx,
  100. void *data, int *data_size,
  101. uint8_t *buf, int buf_size)
  102. {
  103. PNMContext * const s = avctx->priv_data;
  104. AVFrame *picture = data;
  105. AVFrame * const p= (AVFrame*)&s->picture;
  106. int i, n, linesize, h;
  107. unsigned char *ptr;
  108. /* special case for last picture */
  109. if (buf_size == 0) {
  110. return 0;
  111. }
  112. s->bytestream_start=
  113. s->bytestream= buf;
  114. s->bytestream_end= buf + buf_size;
  115. if(pnm_decode_header(avctx, s) < 0)
  116. return h;
  117. if(p->data[0])
  118. avctx->release_buffer(avctx, p);
  119. p->reference= 0;
  120. if(avctx->get_buffer(avctx, p) < 0){
  121. av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
  122. return -1;
  123. }
  124. p->pict_type= FF_I_TYPE;
  125. p->key_frame= 1;
  126. switch(avctx->pix_fmt) {
  127. default:
  128. return -1;
  129. case PIX_FMT_RGB24:
  130. n = avctx->width * 3;
  131. goto do_read;
  132. case PIX_FMT_GRAY8:
  133. n = avctx->width;
  134. goto do_read;
  135. case PIX_FMT_MONOWHITE:
  136. n = (avctx->width + 7) >> 3;
  137. do_read:
  138. ptr = p->data[0];
  139. linesize = p->linesize[0];
  140. for(i = 0; i < avctx->height; i++) {
  141. memcpy(ptr, s->bytestream, n);
  142. s->bytestream += n;
  143. ptr += linesize;
  144. }
  145. break;
  146. case PIX_FMT_YUV420P:
  147. {
  148. unsigned char *ptr1, *ptr2;
  149. n = avctx->width;
  150. ptr = p->data[0];
  151. linesize = p->linesize[0];
  152. for(i = 0; i < avctx->height; i++) {
  153. memcpy(ptr, s->bytestream, n);
  154. s->bytestream += n;
  155. ptr += linesize;
  156. }
  157. ptr1 = p->data[1];
  158. ptr2 = p->data[2];
  159. n >>= 1;
  160. h = avctx->height >> 1;
  161. for(i = 0; i < h; i++) {
  162. memcpy(ptr1, s->bytestream, n);
  163. s->bytestream += n;
  164. memcpy(ptr2, s->bytestream, n);
  165. s->bytestream += n;
  166. ptr1 += p->linesize[1];
  167. ptr2 += p->linesize[2];
  168. }
  169. }
  170. break;
  171. }
  172. *picture= *(AVFrame*)&s->picture;
  173. *data_size = sizeof(AVPicture);
  174. return s->bytestream - s->bytestream_start;
  175. }
  176. static int pnm_encode_frame(AVCodecContext *avctx, unsigned char *outbuf, int buf_size, void *data){
  177. PNMContext *s = avctx->priv_data;
  178. AVFrame *pict = data;
  179. AVFrame * const p= (AVFrame*)&s->picture;
  180. int i, h, h1, c, n, linesize;
  181. uint8_t *ptr, *ptr1, *ptr2;
  182. *p = *pict;
  183. p->pict_type= FF_I_TYPE;
  184. p->key_frame= 1;
  185. s->bytestream_start=
  186. s->bytestream= outbuf;
  187. s->bytestream_end= outbuf+buf_size;
  188. h = avctx->height;
  189. h1 = h;
  190. switch(avctx->pix_fmt) {
  191. case PIX_FMT_MONOWHITE:
  192. c = '4';
  193. n = (avctx->width + 7) >> 3;
  194. break;
  195. case PIX_FMT_GRAY8:
  196. c = '5';
  197. n = avctx->width;
  198. break;
  199. case PIX_FMT_RGB24:
  200. c = '6';
  201. n = avctx->width * 3;
  202. break;
  203. case PIX_FMT_YUV420P:
  204. c = '5';
  205. n = avctx->width;
  206. h1 = (h * 3) / 2;
  207. break;
  208. default:
  209. return -1;
  210. }
  211. snprintf(s->bytestream, s->bytestream_end - s->bytestream,
  212. "P%c\n%d %d\n",
  213. c, avctx->width, h1);
  214. s->bytestream += strlen(s->bytestream);
  215. if (avctx->pix_fmt != PIX_FMT_MONOWHITE) {
  216. snprintf(s->bytestream, s->bytestream_end - s->bytestream,
  217. "%d\n", 255);
  218. s->bytestream += strlen(s->bytestream);
  219. }
  220. ptr = p->data[0];
  221. linesize = p->linesize[0];
  222. for(i=0;i<h;i++) {
  223. memcpy(s->bytestream, ptr, n);
  224. s->bytestream += n;
  225. ptr += linesize;
  226. }
  227. if (avctx->pix_fmt == PIX_FMT_YUV420P) {
  228. h >>= 1;
  229. n >>= 1;
  230. ptr1 = p->data[1];
  231. ptr2 = p->data[2];
  232. for(i=0;i<h;i++) {
  233. memcpy(s->bytestream, ptr1, n);
  234. s->bytestream += n;
  235. memcpy(s->bytestream, ptr2, n);
  236. s->bytestream += n;
  237. ptr1 += p->linesize[1];
  238. ptr2 += p->linesize[2];
  239. }
  240. }
  241. return s->bytestream - s->bytestream_start;
  242. }
  243. static int pam_decode_frame(AVCodecContext *avctx,
  244. void *data, int *data_size,
  245. uint8_t *buf, int buf_size)
  246. {
  247. PNMContext * const s = avctx->priv_data;
  248. AVFrame *picture = data;
  249. AVFrame * const p= (AVFrame*)&s->picture;
  250. int i, n, linesize, h, w, depth, maxval;
  251. char buf1[32], tuple_type[32];
  252. unsigned char *ptr;
  253. /* special case for last picture */
  254. if (buf_size == 0) {
  255. return 0;
  256. }
  257. s->bytestream_start=
  258. s->bytestream= buf;
  259. s->bytestream_end= buf + buf_size;
  260. pnm_get(s, buf1, sizeof(buf1));
  261. if (strcmp(buf1, "P7") != 0)
  262. return -1;
  263. w = -1;
  264. h = -1;
  265. maxval = -1;
  266. depth = -1;
  267. tuple_type[0] = '\0';
  268. for(;;) {
  269. pnm_get(s, buf1, sizeof(buf1));
  270. if (!strcmp(buf1, "WIDTH")) {
  271. pnm_get(s, buf1, sizeof(buf1));
  272. w = strtol(buf1, NULL, 10);
  273. } else if (!strcmp(buf1, "HEIGHT")) {
  274. pnm_get(s, buf1, sizeof(buf1));
  275. h = strtol(buf1, NULL, 10);
  276. } else if (!strcmp(buf1, "DEPTH")) {
  277. pnm_get(s, buf1, sizeof(buf1));
  278. depth = strtol(buf1, NULL, 10);
  279. } else if (!strcmp(buf1, "MAXVAL")) {
  280. pnm_get(s, buf1, sizeof(buf1));
  281. maxval = strtol(buf1, NULL, 10);
  282. } else if (!strcmp(buf1, "TUPLETYPE")) {
  283. pnm_get(s, tuple_type, sizeof(tuple_type));
  284. } else if (!strcmp(buf1, "ENDHDR")) {
  285. break;
  286. } else {
  287. return -1;
  288. }
  289. }
  290. /* check that all tags are present */
  291. if (w <= 0 || h <= 0 || maxval <= 0 || depth <= 0 || tuple_type[0] == '\0')
  292. return -1;
  293. avctx->width = w;
  294. avctx->height = h;
  295. if (depth == 1) {
  296. if (maxval == 1)
  297. avctx->pix_fmt = PIX_FMT_MONOWHITE;
  298. else
  299. avctx->pix_fmt = PIX_FMT_GRAY8;
  300. } else if (depth == 3) {
  301. avctx->pix_fmt = PIX_FMT_RGB24;
  302. } else if (depth == 4) {
  303. avctx->pix_fmt = PIX_FMT_RGBA32;
  304. } else {
  305. return -1;
  306. }
  307. if(p->data[0])
  308. avctx->release_buffer(avctx, p);
  309. p->reference= 0;
  310. if(avctx->get_buffer(avctx, p) < 0){
  311. av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
  312. return -1;
  313. }
  314. p->pict_type= FF_I_TYPE;
  315. p->key_frame= 1;
  316. switch(avctx->pix_fmt) {
  317. default:
  318. return -1;
  319. case PIX_FMT_RGB24:
  320. n = avctx->width * 3;
  321. goto do_read;
  322. case PIX_FMT_GRAY8:
  323. n = avctx->width;
  324. goto do_read;
  325. case PIX_FMT_MONOWHITE:
  326. n = (avctx->width + 7) >> 3;
  327. do_read:
  328. ptr = p->data[0];
  329. linesize = p->linesize[0];
  330. for(i = 0; i < avctx->height; i++) {
  331. memcpy(ptr, s->bytestream, n);
  332. s->bytestream += n;
  333. ptr += linesize;
  334. }
  335. break;
  336. case PIX_FMT_RGBA32:
  337. ptr = p->data[0];
  338. linesize = p->linesize[0];
  339. for(i = 0; i < avctx->height; i++) {
  340. int j, r, g, b, a;
  341. for(j = 0;j < w; j++) {
  342. r = *s->bytestream++;
  343. g = *s->bytestream++;
  344. b = *s->bytestream++;
  345. a = *s->bytestream++;
  346. ((uint32_t *)ptr)[j] = (a << 24) | (r << 16) | (g << 8) | b;
  347. }
  348. ptr += linesize;
  349. }
  350. break;
  351. }
  352. *picture= *(AVFrame*)&s->picture;
  353. *data_size = sizeof(AVPicture);
  354. return s->bytestream - s->bytestream_start;
  355. }
  356. static int pam_encode_frame(AVCodecContext *avctx, unsigned char *outbuf, int buf_size, void *data){
  357. PNMContext *s = avctx->priv_data;
  358. AVFrame *pict = data;
  359. AVFrame * const p= (AVFrame*)&s->picture;
  360. int i, h, w, n, linesize, depth, maxval;
  361. const char *tuple_type;
  362. uint8_t *ptr;
  363. *p = *pict;
  364. p->pict_type= FF_I_TYPE;
  365. p->key_frame= 1;
  366. s->bytestream_start=
  367. s->bytestream= outbuf;
  368. s->bytestream_end= outbuf+buf_size;
  369. h = avctx->height;
  370. w = avctx->width;
  371. switch(avctx->pix_fmt) {
  372. case PIX_FMT_MONOWHITE:
  373. n = (w + 7) >> 3;
  374. depth = 1;
  375. maxval = 1;
  376. tuple_type = "BLACKANDWHITE";
  377. break;
  378. case PIX_FMT_GRAY8:
  379. n = w;
  380. depth = 1;
  381. maxval = 255;
  382. tuple_type = "GRAYSCALE";
  383. break;
  384. case PIX_FMT_RGB24:
  385. n = w * 3;
  386. depth = 3;
  387. maxval = 255;
  388. tuple_type = "RGB";
  389. break;
  390. case PIX_FMT_RGBA32:
  391. n = w * 4;
  392. depth = 4;
  393. maxval = 255;
  394. tuple_type = "RGB_ALPHA";
  395. break;
  396. default:
  397. return -1;
  398. }
  399. snprintf(s->bytestream, s->bytestream_end - s->bytestream,
  400. "P7\nWIDTH %d\nHEIGHT %d\nDEPTH %d\nMAXVAL %d\nTUPLETYPE %s\nENDHDR\n",
  401. w, h, depth, maxval, tuple_type);
  402. s->bytestream += strlen(s->bytestream);
  403. ptr = p->data[0];
  404. linesize = p->linesize[0];
  405. if (avctx->pix_fmt == PIX_FMT_RGBA32) {
  406. int j;
  407. unsigned int v;
  408. for(i=0;i<h;i++) {
  409. for(j=0;j<w;j++) {
  410. v = ((uint32_t *)ptr)[j];
  411. *s->bytestream++ = v >> 16;
  412. *s->bytestream++ = v >> 8;
  413. *s->bytestream++ = v;
  414. *s->bytestream++ = v >> 24;
  415. }
  416. ptr += linesize;
  417. }
  418. } else {
  419. for(i=0;i<h;i++) {
  420. memcpy(s->bytestream, ptr, n);
  421. s->bytestream += n;
  422. ptr += linesize;
  423. }
  424. }
  425. return s->bytestream - s->bytestream_start;
  426. }
  427. #if 0
  428. static int pnm_probe(AVProbeData *pd)
  429. {
  430. const char *p = pd->buf;
  431. if (pd->buf_size >= 8 &&
  432. p[0] == 'P' &&
  433. p[1] >= '4' && p[1] <= '6' &&
  434. pnm_space(p[2]) )
  435. return AVPROBE_SCORE_MAX - 1; /* to permit pgmyuv probe */
  436. else
  437. return 0;
  438. }
  439. static int pgmyuv_probe(AVProbeData *pd)
  440. {
  441. if (match_ext(pd->filename, "pgmyuv"))
  442. return AVPROBE_SCORE_MAX;
  443. else
  444. return 0;
  445. }
  446. static int pam_probe(AVProbeData *pd)
  447. {
  448. const char *p = pd->buf;
  449. if (pd->buf_size >= 8 &&
  450. p[0] == 'P' &&
  451. p[1] == '7' &&
  452. p[2] == '\n')
  453. return AVPROBE_SCORE_MAX;
  454. else
  455. return 0;
  456. }
  457. #endif
  458. static int pnm_parse(AVCodecParserContext *s,
  459. AVCodecContext *avctx,
  460. uint8_t **poutbuf, int *poutbuf_size,
  461. const uint8_t *buf, int buf_size)
  462. {
  463. ParseContext *pc = s->priv_data;
  464. PNMContext pnmctx;
  465. int next;
  466. for(; pc->overread>0; pc->overread--){
  467. pc->buffer[pc->index++]= pc->buffer[pc->overread_index++];
  468. }
  469. retry:
  470. if(pc->index){
  471. pnmctx.bytestream_start=
  472. pnmctx.bytestream= pc->buffer;
  473. pnmctx.bytestream_end= pc->buffer + pc->index;
  474. }else{
  475. pnmctx.bytestream_start=
  476. pnmctx.bytestream= buf;
  477. pnmctx.bytestream_end= buf + buf_size;
  478. }
  479. if(pnm_decode_header(avctx, &pnmctx) < 0){
  480. if(pnmctx.bytestream < pnmctx.bytestream_end){
  481. if(pc->index){
  482. pc->index=0;
  483. }else{
  484. buf++;
  485. buf_size--;
  486. }
  487. goto retry;
  488. }
  489. #if 0
  490. if(pc->index && pc->index*2 + FF_INPUT_BUFFER_PADDING_SIZE < pc->buffer_size && buf_size > pc->index){
  491. memcpy(pc->buffer + pc->index, buf, pc->index);
  492. pc->index += pc->index;
  493. buf += pc->index;
  494. buf_size -= pc->index;
  495. goto retry;
  496. }
  497. #endif
  498. next= END_NOT_FOUND;
  499. }else{
  500. next= pnmctx.bytestream - pnmctx.bytestream_start
  501. + avpicture_get_size(avctx->pix_fmt, avctx->width, avctx->height);
  502. if(pnmctx.bytestream_start!=buf)
  503. next-= pc->index;
  504. if(next > buf_size)
  505. next= END_NOT_FOUND;
  506. }
  507. if(ff_combine_frame(pc, next, (uint8_t **)&buf, &buf_size)<0){
  508. *poutbuf = NULL;
  509. *poutbuf_size = 0;
  510. return buf_size;
  511. }
  512. *poutbuf = (uint8_t *)buf;
  513. *poutbuf_size = buf_size;
  514. return next;
  515. }
  516. AVCodecParser pnm_parser = {
  517. { CODEC_ID_PGM, CODEC_ID_PGMYUV, CODEC_ID_PPM, CODEC_ID_PBM},
  518. sizeof(ParseContext),
  519. NULL,
  520. pnm_parse,
  521. ff_parse_close,
  522. };
  523. AVCodec pgm_encoder = {
  524. "pgm",
  525. CODEC_TYPE_VIDEO,
  526. CODEC_ID_PGM,
  527. sizeof(PNMContext),
  528. common_init,
  529. pnm_encode_frame,
  530. NULL, //encode_end,
  531. pnm_decode_frame,
  532. .pix_fmts= (enum PixelFormat[]){PIX_FMT_GRAY8, -1},
  533. };
  534. AVCodec pgmyuv_encoder = {
  535. "pgmyuv",
  536. CODEC_TYPE_VIDEO,
  537. CODEC_ID_PGMYUV,
  538. sizeof(PNMContext),
  539. common_init,
  540. pnm_encode_frame,
  541. NULL, //encode_end,
  542. pnm_decode_frame,
  543. .pix_fmts= (enum PixelFormat[]){PIX_FMT_YUV420P, -1},
  544. };
  545. AVCodec ppm_encoder = {
  546. "ppm",
  547. CODEC_TYPE_VIDEO,
  548. CODEC_ID_PPM,
  549. sizeof(PNMContext),
  550. common_init,
  551. pnm_encode_frame,
  552. NULL, //encode_end,
  553. pnm_decode_frame,
  554. .pix_fmts= (enum PixelFormat[]){PIX_FMT_RGB24, -1},
  555. };
  556. AVCodec pbm_encoder = {
  557. "pbm",
  558. CODEC_TYPE_VIDEO,
  559. CODEC_ID_PBM,
  560. sizeof(PNMContext),
  561. common_init,
  562. pnm_encode_frame,
  563. NULL, //encode_end,
  564. pnm_decode_frame,
  565. .pix_fmts= (enum PixelFormat[]){PIX_FMT_MONOWHITE, -1},
  566. };
  567. AVCodec pam_encoder = {
  568. "pam",
  569. CODEC_TYPE_VIDEO,
  570. CODEC_ID_PAM,
  571. sizeof(PNMContext),
  572. common_init,
  573. pam_encode_frame,
  574. NULL, //encode_end,
  575. pam_decode_frame,
  576. .pix_fmts= (enum PixelFormat[]){PIX_FMT_RGB24, PIX_FMT_RGBA32, PIX_FMT_GRAY8, PIX_FMT_MONOWHITE, -1},
  577. };