You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

629 lines
17KB

  1. /*
  2. * PNM image format
  3. * Copyright (c) 2002, 2003 Fabrice Bellard.
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. #include "avcodec.h"
  22. #include "parser.h" //for ParseContext
  23. typedef struct PNMContext {
  24. uint8_t *bytestream;
  25. uint8_t *bytestream_start;
  26. uint8_t *bytestream_end;
  27. AVFrame picture;
  28. int maxval; ///< maximum value of a pixel
  29. } PNMContext;
  30. static inline int pnm_space(int c)
  31. {
  32. return (c == ' ' || c == '\n' || c == '\r' || c == '\t');
  33. }
  34. static void pnm_get(PNMContext *sc, char *str, int buf_size)
  35. {
  36. char *s;
  37. int c;
  38. /* skip spaces and comments */
  39. for(;;) {
  40. c = *sc->bytestream++;
  41. if (c == '#') {
  42. do {
  43. c = *sc->bytestream++;
  44. } while (c != '\n' && sc->bytestream < sc->bytestream_end);
  45. } else if (!pnm_space(c)) {
  46. break;
  47. }
  48. }
  49. s = str;
  50. while (sc->bytestream < sc->bytestream_end && !pnm_space(c)) {
  51. if ((s - str) < buf_size - 1)
  52. *s++ = c;
  53. c = *sc->bytestream++;
  54. }
  55. *s = '\0';
  56. }
  57. static int common_init(AVCodecContext *avctx){
  58. PNMContext *s = avctx->priv_data;
  59. avcodec_get_frame_defaults((AVFrame*)&s->picture);
  60. avctx->coded_frame= (AVFrame*)&s->picture;
  61. return 0;
  62. }
  63. static int pnm_decode_header(AVCodecContext *avctx, PNMContext * const s){
  64. char buf1[32], tuple_type[32];
  65. int h, w, depth, maxval;
  66. pnm_get(s, buf1, sizeof(buf1));
  67. if (!strcmp(buf1, "P4")) {
  68. avctx->pix_fmt = PIX_FMT_MONOWHITE;
  69. } else if (!strcmp(buf1, "P5")) {
  70. if (avctx->codec_id == CODEC_ID_PGMYUV)
  71. avctx->pix_fmt = PIX_FMT_YUV420P;
  72. else
  73. avctx->pix_fmt = PIX_FMT_GRAY8;
  74. } else if (!strcmp(buf1, "P6")) {
  75. avctx->pix_fmt = PIX_FMT_RGB24;
  76. } else if (!strcmp(buf1, "P7")) {
  77. w = -1;
  78. h = -1;
  79. maxval = -1;
  80. depth = -1;
  81. tuple_type[0] = '\0';
  82. for(;;) {
  83. pnm_get(s, buf1, sizeof(buf1));
  84. if (!strcmp(buf1, "WIDTH")) {
  85. pnm_get(s, buf1, sizeof(buf1));
  86. w = strtol(buf1, NULL, 10);
  87. } else if (!strcmp(buf1, "HEIGHT")) {
  88. pnm_get(s, buf1, sizeof(buf1));
  89. h = strtol(buf1, NULL, 10);
  90. } else if (!strcmp(buf1, "DEPTH")) {
  91. pnm_get(s, buf1, sizeof(buf1));
  92. depth = strtol(buf1, NULL, 10);
  93. } else if (!strcmp(buf1, "MAXVAL")) {
  94. pnm_get(s, buf1, sizeof(buf1));
  95. maxval = strtol(buf1, NULL, 10);
  96. } else if (!strcmp(buf1, "TUPLETYPE")) {
  97. pnm_get(s, tuple_type, sizeof(tuple_type));
  98. } else if (!strcmp(buf1, "ENDHDR")) {
  99. break;
  100. } else {
  101. return -1;
  102. }
  103. }
  104. /* check that all tags are present */
  105. if (w <= 0 || h <= 0 || maxval <= 0 || depth <= 0 || tuple_type[0] == '\0' || avcodec_check_dimensions(avctx, w, h))
  106. return -1;
  107. avctx->width = w;
  108. avctx->height = h;
  109. if (depth == 1) {
  110. if (maxval == 1)
  111. avctx->pix_fmt = PIX_FMT_MONOWHITE;
  112. else
  113. avctx->pix_fmt = PIX_FMT_GRAY8;
  114. } else if (depth == 3) {
  115. avctx->pix_fmt = PIX_FMT_RGB24;
  116. } else if (depth == 4) {
  117. avctx->pix_fmt = PIX_FMT_RGB32;
  118. } else {
  119. return -1;
  120. }
  121. return 0;
  122. } else {
  123. return -1;
  124. }
  125. pnm_get(s, buf1, sizeof(buf1));
  126. avctx->width = atoi(buf1);
  127. if (avctx->width <= 0)
  128. return -1;
  129. pnm_get(s, buf1, sizeof(buf1));
  130. avctx->height = atoi(buf1);
  131. if(avcodec_check_dimensions(avctx, avctx->width, avctx->height))
  132. return -1;
  133. if (avctx->pix_fmt != PIX_FMT_MONOWHITE) {
  134. pnm_get(s, buf1, sizeof(buf1));
  135. s->maxval = atoi(buf1);
  136. if(s->maxval >= 256 && avctx->pix_fmt == PIX_FMT_GRAY8) {
  137. avctx->pix_fmt = PIX_FMT_GRAY16BE;
  138. if (s->maxval != 65535)
  139. avctx->pix_fmt = PIX_FMT_GRAY16;
  140. }
  141. }
  142. /* more check if YUV420 */
  143. if (avctx->pix_fmt == PIX_FMT_YUV420P) {
  144. if ((avctx->width & 1) != 0)
  145. return -1;
  146. h = (avctx->height * 2);
  147. if ((h % 3) != 0)
  148. return -1;
  149. h /= 3;
  150. avctx->height = h;
  151. }
  152. return 0;
  153. }
  154. static int pnm_decode_frame(AVCodecContext *avctx,
  155. void *data, int *data_size,
  156. uint8_t *buf, int buf_size)
  157. {
  158. PNMContext * const s = avctx->priv_data;
  159. AVFrame *picture = data;
  160. AVFrame * const p= (AVFrame*)&s->picture;
  161. int i, n, linesize, h, upgrade = 0;
  162. unsigned char *ptr;
  163. s->bytestream_start=
  164. s->bytestream= buf;
  165. s->bytestream_end= buf + buf_size;
  166. if(pnm_decode_header(avctx, s) < 0)
  167. return -1;
  168. if(p->data[0])
  169. avctx->release_buffer(avctx, p);
  170. p->reference= 0;
  171. if(avctx->get_buffer(avctx, p) < 0){
  172. av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
  173. return -1;
  174. }
  175. p->pict_type= FF_I_TYPE;
  176. p->key_frame= 1;
  177. switch(avctx->pix_fmt) {
  178. default:
  179. return -1;
  180. case PIX_FMT_RGB24:
  181. n = avctx->width * 3;
  182. goto do_read;
  183. case PIX_FMT_GRAY8:
  184. n = avctx->width;
  185. if (s->maxval < 255)
  186. upgrade = 1;
  187. goto do_read;
  188. case PIX_FMT_GRAY16BE:
  189. case PIX_FMT_GRAY16LE:
  190. n = avctx->width * 2;
  191. if (s->maxval < 65535)
  192. upgrade = 2;
  193. goto do_read;
  194. case PIX_FMT_MONOWHITE:
  195. case PIX_FMT_MONOBLACK:
  196. n = (avctx->width + 7) >> 3;
  197. do_read:
  198. ptr = p->data[0];
  199. linesize = p->linesize[0];
  200. if(s->bytestream + n*avctx->height > s->bytestream_end)
  201. return -1;
  202. for(i = 0; i < avctx->height; i++) {
  203. if (!upgrade)
  204. memcpy(ptr, s->bytestream, n);
  205. else if (upgrade == 1) {
  206. unsigned int j, f = (255*128 + s->maxval/2) / s->maxval;
  207. for (j=0; j<n; j++)
  208. ptr[j] = (s->bytestream[j] * f + 64) >> 7;
  209. } else if (upgrade == 2) {
  210. unsigned int j, v, f = (65535*32768 + s->maxval/2) / s->maxval;
  211. for (j=0; j<n/2; j++) {
  212. v = be2me_16(((uint16_t *)s->bytestream)[j]);
  213. ((uint16_t *)ptr)[j] = (v * f + 16384) >> 15;
  214. }
  215. }
  216. s->bytestream += n;
  217. ptr += linesize;
  218. }
  219. break;
  220. case PIX_FMT_YUV420P:
  221. {
  222. unsigned char *ptr1, *ptr2;
  223. n = avctx->width;
  224. ptr = p->data[0];
  225. linesize = p->linesize[0];
  226. if(s->bytestream + n*avctx->height*3/2 > s->bytestream_end)
  227. return -1;
  228. for(i = 0; i < avctx->height; i++) {
  229. memcpy(ptr, s->bytestream, n);
  230. s->bytestream += n;
  231. ptr += linesize;
  232. }
  233. ptr1 = p->data[1];
  234. ptr2 = p->data[2];
  235. n >>= 1;
  236. h = avctx->height >> 1;
  237. for(i = 0; i < h; i++) {
  238. memcpy(ptr1, s->bytestream, n);
  239. s->bytestream += n;
  240. memcpy(ptr2, s->bytestream, n);
  241. s->bytestream += n;
  242. ptr1 += p->linesize[1];
  243. ptr2 += p->linesize[2];
  244. }
  245. }
  246. break;
  247. case PIX_FMT_RGB32:
  248. ptr = p->data[0];
  249. linesize = p->linesize[0];
  250. if(s->bytestream + avctx->width*avctx->height*4 > s->bytestream_end)
  251. return -1;
  252. for(i = 0; i < avctx->height; i++) {
  253. int j, r, g, b, a;
  254. for(j = 0;j < avctx->width; j++) {
  255. r = *s->bytestream++;
  256. g = *s->bytestream++;
  257. b = *s->bytestream++;
  258. a = *s->bytestream++;
  259. ((uint32_t *)ptr)[j] = (a << 24) | (r << 16) | (g << 8) | b;
  260. }
  261. ptr += linesize;
  262. }
  263. break;
  264. }
  265. *picture= *(AVFrame*)&s->picture;
  266. *data_size = sizeof(AVPicture);
  267. return s->bytestream - s->bytestream_start;
  268. }
  269. static int pnm_encode_frame(AVCodecContext *avctx, unsigned char *outbuf, int buf_size, void *data){
  270. PNMContext *s = avctx->priv_data;
  271. AVFrame *pict = data;
  272. AVFrame * const p= (AVFrame*)&s->picture;
  273. int i, h, h1, c, n, linesize;
  274. uint8_t *ptr, *ptr1, *ptr2;
  275. if(buf_size < avpicture_get_size(avctx->pix_fmt, avctx->width, avctx->height) + 200){
  276. av_log(avctx, AV_LOG_ERROR, "encoded frame too large\n");
  277. return -1;
  278. }
  279. *p = *pict;
  280. p->pict_type= FF_I_TYPE;
  281. p->key_frame= 1;
  282. s->bytestream_start=
  283. s->bytestream= outbuf;
  284. s->bytestream_end= outbuf+buf_size;
  285. h = avctx->height;
  286. h1 = h;
  287. switch(avctx->pix_fmt) {
  288. case PIX_FMT_MONOWHITE:
  289. c = '4';
  290. n = (avctx->width + 7) >> 3;
  291. break;
  292. case PIX_FMT_GRAY8:
  293. c = '5';
  294. n = avctx->width;
  295. break;
  296. case PIX_FMT_GRAY16BE:
  297. c = '5';
  298. n = avctx->width * 2;
  299. break;
  300. case PIX_FMT_RGB24:
  301. c = '6';
  302. n = avctx->width * 3;
  303. break;
  304. case PIX_FMT_YUV420P:
  305. c = '5';
  306. n = avctx->width;
  307. h1 = (h * 3) / 2;
  308. break;
  309. default:
  310. return -1;
  311. }
  312. snprintf(s->bytestream, s->bytestream_end - s->bytestream,
  313. "P%c\n%d %d\n",
  314. c, avctx->width, h1);
  315. s->bytestream += strlen(s->bytestream);
  316. if (avctx->pix_fmt != PIX_FMT_MONOWHITE) {
  317. snprintf(s->bytestream, s->bytestream_end - s->bytestream,
  318. "%d\n", (avctx->pix_fmt != PIX_FMT_GRAY16BE) ? 255 : 65535);
  319. s->bytestream += strlen(s->bytestream);
  320. }
  321. ptr = p->data[0];
  322. linesize = p->linesize[0];
  323. for(i=0;i<h;i++) {
  324. memcpy(s->bytestream, ptr, n);
  325. s->bytestream += n;
  326. ptr += linesize;
  327. }
  328. if (avctx->pix_fmt == PIX_FMT_YUV420P) {
  329. h >>= 1;
  330. n >>= 1;
  331. ptr1 = p->data[1];
  332. ptr2 = p->data[2];
  333. for(i=0;i<h;i++) {
  334. memcpy(s->bytestream, ptr1, n);
  335. s->bytestream += n;
  336. memcpy(s->bytestream, ptr2, n);
  337. s->bytestream += n;
  338. ptr1 += p->linesize[1];
  339. ptr2 += p->linesize[2];
  340. }
  341. }
  342. return s->bytestream - s->bytestream_start;
  343. }
  344. static int pam_encode_frame(AVCodecContext *avctx, unsigned char *outbuf, int buf_size, void *data){
  345. PNMContext *s = avctx->priv_data;
  346. AVFrame *pict = data;
  347. AVFrame * const p= (AVFrame*)&s->picture;
  348. int i, h, w, n, linesize, depth, maxval;
  349. const char *tuple_type;
  350. uint8_t *ptr;
  351. if(buf_size < avpicture_get_size(avctx->pix_fmt, avctx->width, avctx->height) + 200){
  352. av_log(avctx, AV_LOG_ERROR, "encoded frame too large\n");
  353. return -1;
  354. }
  355. *p = *pict;
  356. p->pict_type= FF_I_TYPE;
  357. p->key_frame= 1;
  358. s->bytestream_start=
  359. s->bytestream= outbuf;
  360. s->bytestream_end= outbuf+buf_size;
  361. h = avctx->height;
  362. w = avctx->width;
  363. switch(avctx->pix_fmt) {
  364. case PIX_FMT_MONOWHITE:
  365. n = (w + 7) >> 3;
  366. depth = 1;
  367. maxval = 1;
  368. tuple_type = "BLACKANDWHITE";
  369. break;
  370. case PIX_FMT_GRAY8:
  371. n = w;
  372. depth = 1;
  373. maxval = 255;
  374. tuple_type = "GRAYSCALE";
  375. break;
  376. case PIX_FMT_RGB24:
  377. n = w * 3;
  378. depth = 3;
  379. maxval = 255;
  380. tuple_type = "RGB";
  381. break;
  382. case PIX_FMT_RGB32:
  383. n = w * 4;
  384. depth = 4;
  385. maxval = 255;
  386. tuple_type = "RGB_ALPHA";
  387. break;
  388. default:
  389. return -1;
  390. }
  391. snprintf(s->bytestream, s->bytestream_end - s->bytestream,
  392. "P7\nWIDTH %d\nHEIGHT %d\nDEPTH %d\nMAXVAL %d\nTUPLETYPE %s\nENDHDR\n",
  393. w, h, depth, maxval, tuple_type);
  394. s->bytestream += strlen(s->bytestream);
  395. ptr = p->data[0];
  396. linesize = p->linesize[0];
  397. if (avctx->pix_fmt == PIX_FMT_RGB32) {
  398. int j;
  399. unsigned int v;
  400. for(i=0;i<h;i++) {
  401. for(j=0;j<w;j++) {
  402. v = ((uint32_t *)ptr)[j];
  403. *s->bytestream++ = v >> 16;
  404. *s->bytestream++ = v >> 8;
  405. *s->bytestream++ = v;
  406. *s->bytestream++ = v >> 24;
  407. }
  408. ptr += linesize;
  409. }
  410. } else {
  411. for(i=0;i<h;i++) {
  412. memcpy(s->bytestream, ptr, n);
  413. s->bytestream += n;
  414. ptr += linesize;
  415. }
  416. }
  417. return s->bytestream - s->bytestream_start;
  418. }
  419. #if 0
  420. static int pnm_probe(AVProbeData *pd)
  421. {
  422. const char *p = pd->buf;
  423. if (pd->buf_size >= 8 &&
  424. p[0] == 'P' &&
  425. p[1] >= '4' && p[1] <= '6' &&
  426. pnm_space(p[2]) )
  427. return AVPROBE_SCORE_MAX - 1; /* to permit pgmyuv probe */
  428. else
  429. return 0;
  430. }
  431. static int pgmyuv_probe(AVProbeData *pd)
  432. {
  433. if (match_ext(pd->filename, "pgmyuv"))
  434. return AVPROBE_SCORE_MAX;
  435. else
  436. return 0;
  437. }
  438. static int pam_probe(AVProbeData *pd)
  439. {
  440. const char *p = pd->buf;
  441. if (pd->buf_size >= 8 &&
  442. p[0] == 'P' &&
  443. p[1] == '7' &&
  444. p[2] == '\n')
  445. return AVPROBE_SCORE_MAX;
  446. else
  447. return 0;
  448. }
  449. #endif
  450. #ifdef CONFIG_PNM_PARSER
  451. static int pnm_parse(AVCodecParserContext *s,
  452. AVCodecContext *avctx,
  453. uint8_t **poutbuf, int *poutbuf_size,
  454. const uint8_t *buf, int buf_size)
  455. {
  456. ParseContext *pc = s->priv_data;
  457. PNMContext pnmctx;
  458. int next;
  459. for(; pc->overread>0; pc->overread--){
  460. pc->buffer[pc->index++]= pc->buffer[pc->overread_index++];
  461. }
  462. retry:
  463. if(pc->index){
  464. pnmctx.bytestream_start=
  465. pnmctx.bytestream= pc->buffer;
  466. pnmctx.bytestream_end= pc->buffer + pc->index;
  467. }else{
  468. pnmctx.bytestream_start=
  469. pnmctx.bytestream= (uint8_t *) buf; /* casts avoid warnings */
  470. pnmctx.bytestream_end= (uint8_t *) buf + buf_size;
  471. }
  472. if(pnm_decode_header(avctx, &pnmctx) < 0){
  473. if(pnmctx.bytestream < pnmctx.bytestream_end){
  474. if(pc->index){
  475. pc->index=0;
  476. }else{
  477. buf++;
  478. buf_size--;
  479. }
  480. goto retry;
  481. }
  482. #if 0
  483. if(pc->index && pc->index*2 + FF_INPUT_BUFFER_PADDING_SIZE < pc->buffer_size && buf_size > pc->index){
  484. memcpy(pc->buffer + pc->index, buf, pc->index);
  485. pc->index += pc->index;
  486. buf += pc->index;
  487. buf_size -= pc->index;
  488. goto retry;
  489. }
  490. #endif
  491. next= END_NOT_FOUND;
  492. }else{
  493. next= pnmctx.bytestream - pnmctx.bytestream_start
  494. + avpicture_get_size(avctx->pix_fmt, avctx->width, avctx->height);
  495. if(pnmctx.bytestream_start!=buf)
  496. next-= pc->index;
  497. if(next > buf_size)
  498. next= END_NOT_FOUND;
  499. }
  500. if(ff_combine_frame(pc, next, (uint8_t **)&buf, &buf_size)<0){
  501. *poutbuf = NULL;
  502. *poutbuf_size = 0;
  503. return buf_size;
  504. }
  505. *poutbuf = (uint8_t *)buf;
  506. *poutbuf_size = buf_size;
  507. return next;
  508. }
  509. AVCodecParser pnm_parser = {
  510. { CODEC_ID_PGM, CODEC_ID_PGMYUV, CODEC_ID_PPM, CODEC_ID_PBM, CODEC_ID_PAM},
  511. sizeof(ParseContext),
  512. NULL,
  513. pnm_parse,
  514. ff_parse_close,
  515. };
  516. #endif /* CONFIG_PNM_PARSER */
  517. #ifdef CONFIG_PGM_ENCODER
  518. AVCodec pgm_encoder = {
  519. "pgm",
  520. CODEC_TYPE_VIDEO,
  521. CODEC_ID_PGM,
  522. sizeof(PNMContext),
  523. common_init,
  524. pnm_encode_frame,
  525. NULL, //encode_end,
  526. pnm_decode_frame,
  527. .pix_fmts= (enum PixelFormat[]){PIX_FMT_GRAY8, PIX_FMT_GRAY16BE, -1},
  528. };
  529. #endif // CONFIG_PGM_ENCODER
  530. #ifdef CONFIG_PGMYUV_ENCODER
  531. AVCodec pgmyuv_encoder = {
  532. "pgmyuv",
  533. CODEC_TYPE_VIDEO,
  534. CODEC_ID_PGMYUV,
  535. sizeof(PNMContext),
  536. common_init,
  537. pnm_encode_frame,
  538. NULL, //encode_end,
  539. pnm_decode_frame,
  540. .pix_fmts= (enum PixelFormat[]){PIX_FMT_YUV420P, -1},
  541. };
  542. #endif // CONFIG_PGMYUV_ENCODER
  543. #ifdef CONFIG_PPM_ENCODER
  544. AVCodec ppm_encoder = {
  545. "ppm",
  546. CODEC_TYPE_VIDEO,
  547. CODEC_ID_PPM,
  548. sizeof(PNMContext),
  549. common_init,
  550. pnm_encode_frame,
  551. NULL, //encode_end,
  552. pnm_decode_frame,
  553. .pix_fmts= (enum PixelFormat[]){PIX_FMT_RGB24, -1},
  554. };
  555. #endif // CONFIG_PPM_ENCODER
  556. #ifdef CONFIG_PBM_ENCODER
  557. AVCodec pbm_encoder = {
  558. "pbm",
  559. CODEC_TYPE_VIDEO,
  560. CODEC_ID_PBM,
  561. sizeof(PNMContext),
  562. common_init,
  563. pnm_encode_frame,
  564. NULL, //encode_end,
  565. pnm_decode_frame,
  566. .pix_fmts= (enum PixelFormat[]){PIX_FMT_MONOWHITE, -1},
  567. };
  568. #endif // CONFIG_PBM_ENCODER
  569. #ifdef CONFIG_PAM_ENCODER
  570. AVCodec pam_encoder = {
  571. "pam",
  572. CODEC_TYPE_VIDEO,
  573. CODEC_ID_PAM,
  574. sizeof(PNMContext),
  575. common_init,
  576. pam_encode_frame,
  577. NULL, //encode_end,
  578. pnm_decode_frame,
  579. .pix_fmts= (enum PixelFormat[]){PIX_FMT_RGB24, PIX_FMT_RGB32, PIX_FMT_GRAY8, PIX_FMT_MONOWHITE, -1},
  580. };
  581. #endif // CONFIG_PAM_ENCODER