You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

595 lines
16KB

  1. /*
  2. * PNM image format
  3. * Copyright (c) 2002, 2003 Fabrice Bellard.
  4. *
  5. * This library is free software; you can redistribute it and/or
  6. * modify it under the terms of the GNU Lesser General Public
  7. * License as published by the Free Software Foundation; either
  8. * version 2 of the License, or (at your option) any later version.
  9. *
  10. * This library is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  13. * Lesser General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU Lesser General Public
  16. * License along with this library; if not, write to the Free Software
  17. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  18. */
  19. #include "avcodec.h"
  20. #include "mpegvideo.h" //only for ParseContext
  21. typedef struct PNMContext {
  22. uint8_t *bytestream;
  23. uint8_t *bytestream_start;
  24. uint8_t *bytestream_end;
  25. AVFrame picture;
  26. } PNMContext;
  27. static inline int pnm_space(int c)
  28. {
  29. return (c == ' ' || c == '\n' || c == '\r' || c == '\t');
  30. }
  31. static void pnm_get(PNMContext *sc, char *str, int buf_size)
  32. {
  33. char *s;
  34. int c;
  35. /* skip spaces and comments */
  36. for(;;) {
  37. c = *sc->bytestream++;
  38. if (c == '#') {
  39. do {
  40. c = *sc->bytestream++;
  41. } while (c != '\n' && sc->bytestream < sc->bytestream_end);
  42. } else if (!pnm_space(c)) {
  43. break;
  44. }
  45. }
  46. s = str;
  47. while (sc->bytestream < sc->bytestream_end && !pnm_space(c)) {
  48. if ((s - str) < buf_size - 1)
  49. *s++ = c;
  50. c = *sc->bytestream++;
  51. }
  52. *s = '\0';
  53. }
  54. static int common_init(AVCodecContext *avctx){
  55. PNMContext *s = avctx->priv_data;
  56. avcodec_get_frame_defaults((AVFrame*)&s->picture);
  57. avctx->coded_frame= (AVFrame*)&s->picture;
  58. return 0;
  59. }
  60. static int pnm_decode_header(AVCodecContext *avctx, PNMContext * const s){
  61. char buf1[32], tuple_type[32];
  62. int h, w, depth, maxval;;
  63. pnm_get(s, buf1, sizeof(buf1));
  64. if (!strcmp(buf1, "P4")) {
  65. avctx->pix_fmt = PIX_FMT_MONOWHITE;
  66. } else if (!strcmp(buf1, "P5")) {
  67. if (avctx->codec_id == CODEC_ID_PGMYUV)
  68. avctx->pix_fmt = PIX_FMT_YUV420P;
  69. else
  70. avctx->pix_fmt = PIX_FMT_GRAY8;
  71. } else if (!strcmp(buf1, "P6")) {
  72. avctx->pix_fmt = PIX_FMT_RGB24;
  73. } else if (!strcmp(buf1, "P7")) {
  74. w = -1;
  75. h = -1;
  76. maxval = -1;
  77. depth = -1;
  78. tuple_type[0] = '\0';
  79. for(;;) {
  80. pnm_get(s, buf1, sizeof(buf1));
  81. if (!strcmp(buf1, "WIDTH")) {
  82. pnm_get(s, buf1, sizeof(buf1));
  83. w = strtol(buf1, NULL, 10);
  84. } else if (!strcmp(buf1, "HEIGHT")) {
  85. pnm_get(s, buf1, sizeof(buf1));
  86. h = strtol(buf1, NULL, 10);
  87. } else if (!strcmp(buf1, "DEPTH")) {
  88. pnm_get(s, buf1, sizeof(buf1));
  89. depth = strtol(buf1, NULL, 10);
  90. } else if (!strcmp(buf1, "MAXVAL")) {
  91. pnm_get(s, buf1, sizeof(buf1));
  92. maxval = strtol(buf1, NULL, 10);
  93. } else if (!strcmp(buf1, "TUPLETYPE")) {
  94. pnm_get(s, tuple_type, sizeof(tuple_type));
  95. } else if (!strcmp(buf1, "ENDHDR")) {
  96. break;
  97. } else {
  98. return -1;
  99. }
  100. }
  101. /* check that all tags are present */
  102. if (w <= 0 || h <= 0 || maxval <= 0 || depth <= 0 || tuple_type[0] == '\0' || avcodec_check_dimensions(avctx, w, h))
  103. return -1;
  104. avctx->width = w;
  105. avctx->height = h;
  106. if (depth == 1) {
  107. if (maxval == 1)
  108. avctx->pix_fmt = PIX_FMT_MONOWHITE;
  109. else
  110. avctx->pix_fmt = PIX_FMT_GRAY8;
  111. } else if (depth == 3) {
  112. avctx->pix_fmt = PIX_FMT_RGB24;
  113. } else if (depth == 4) {
  114. avctx->pix_fmt = PIX_FMT_RGBA32;
  115. } else {
  116. return -1;
  117. }
  118. return 0;
  119. } else {
  120. return -1;
  121. }
  122. pnm_get(s, buf1, sizeof(buf1));
  123. avctx->width = atoi(buf1);
  124. if (avctx->width <= 0)
  125. return -1;
  126. pnm_get(s, buf1, sizeof(buf1));
  127. avctx->height = atoi(buf1);
  128. if(avcodec_check_dimensions(avctx, avctx->width, avctx->height))
  129. return -1;
  130. if (avctx->pix_fmt != PIX_FMT_MONOWHITE) {
  131. pnm_get(s, buf1, sizeof(buf1));
  132. }
  133. /* more check if YUV420 */
  134. if (avctx->pix_fmt == PIX_FMT_YUV420P) {
  135. if ((avctx->width & 1) != 0)
  136. return -1;
  137. h = (avctx->height * 2);
  138. if ((h % 3) != 0)
  139. return -1;
  140. h /= 3;
  141. avctx->height = h;
  142. }
  143. return 0;
  144. }
  145. static int pnm_decode_frame(AVCodecContext *avctx,
  146. void *data, int *data_size,
  147. uint8_t *buf, int buf_size)
  148. {
  149. PNMContext * const s = avctx->priv_data;
  150. AVFrame *picture = data;
  151. AVFrame * const p= (AVFrame*)&s->picture;
  152. int i, n, linesize, h;
  153. unsigned char *ptr;
  154. s->bytestream_start=
  155. s->bytestream= buf;
  156. s->bytestream_end= buf + buf_size;
  157. if(pnm_decode_header(avctx, s) < 0)
  158. return -1;
  159. if(p->data[0])
  160. avctx->release_buffer(avctx, p);
  161. p->reference= 0;
  162. if(avctx->get_buffer(avctx, p) < 0){
  163. av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
  164. return -1;
  165. }
  166. p->pict_type= FF_I_TYPE;
  167. p->key_frame= 1;
  168. switch(avctx->pix_fmt) {
  169. default:
  170. return -1;
  171. case PIX_FMT_RGB24:
  172. n = avctx->width * 3;
  173. goto do_read;
  174. case PIX_FMT_GRAY8:
  175. n = avctx->width;
  176. goto do_read;
  177. case PIX_FMT_MONOWHITE:
  178. case PIX_FMT_MONOBLACK:
  179. n = (avctx->width + 7) >> 3;
  180. do_read:
  181. ptr = p->data[0];
  182. linesize = p->linesize[0];
  183. if(s->bytestream + n*avctx->height > s->bytestream_end)
  184. return -1;
  185. for(i = 0; i < avctx->height; i++) {
  186. memcpy(ptr, s->bytestream, n);
  187. s->bytestream += n;
  188. ptr += linesize;
  189. }
  190. break;
  191. case PIX_FMT_YUV420P:
  192. {
  193. unsigned char *ptr1, *ptr2;
  194. n = avctx->width;
  195. ptr = p->data[0];
  196. linesize = p->linesize[0];
  197. if(s->bytestream + n*avctx->height*3/2 > s->bytestream_end)
  198. return -1;
  199. for(i = 0; i < avctx->height; i++) {
  200. memcpy(ptr, s->bytestream, n);
  201. s->bytestream += n;
  202. ptr += linesize;
  203. }
  204. ptr1 = p->data[1];
  205. ptr2 = p->data[2];
  206. n >>= 1;
  207. h = avctx->height >> 1;
  208. for(i = 0; i < h; i++) {
  209. memcpy(ptr1, s->bytestream, n);
  210. s->bytestream += n;
  211. memcpy(ptr2, s->bytestream, n);
  212. s->bytestream += n;
  213. ptr1 += p->linesize[1];
  214. ptr2 += p->linesize[2];
  215. }
  216. }
  217. break;
  218. case PIX_FMT_RGBA32:
  219. ptr = p->data[0];
  220. linesize = p->linesize[0];
  221. if(s->bytestream + avctx->width*avctx->height*4 > s->bytestream_end)
  222. return -1;
  223. for(i = 0; i < avctx->height; i++) {
  224. int j, r, g, b, a;
  225. for(j = 0;j < avctx->width; j++) {
  226. r = *s->bytestream++;
  227. g = *s->bytestream++;
  228. b = *s->bytestream++;
  229. a = *s->bytestream++;
  230. ((uint32_t *)ptr)[j] = (a << 24) | (r << 16) | (g << 8) | b;
  231. }
  232. ptr += linesize;
  233. }
  234. break;
  235. }
  236. *picture= *(AVFrame*)&s->picture;
  237. *data_size = sizeof(AVPicture);
  238. return s->bytestream - s->bytestream_start;
  239. }
  240. static int pnm_encode_frame(AVCodecContext *avctx, unsigned char *outbuf, int buf_size, void *data){
  241. PNMContext *s = avctx->priv_data;
  242. AVFrame *pict = data;
  243. AVFrame * const p= (AVFrame*)&s->picture;
  244. int i, h, h1, c, n, linesize;
  245. uint8_t *ptr, *ptr1, *ptr2;
  246. if(buf_size < avpicture_get_size(avctx->pix_fmt, avctx->width, avctx->height) + 200){
  247. av_log(avctx, AV_LOG_ERROR, "encoded frame too large\n");
  248. return -1;
  249. }
  250. *p = *pict;
  251. p->pict_type= FF_I_TYPE;
  252. p->key_frame= 1;
  253. s->bytestream_start=
  254. s->bytestream= outbuf;
  255. s->bytestream_end= outbuf+buf_size;
  256. h = avctx->height;
  257. h1 = h;
  258. switch(avctx->pix_fmt) {
  259. case PIX_FMT_MONOWHITE:
  260. c = '4';
  261. n = (avctx->width + 7) >> 3;
  262. break;
  263. case PIX_FMT_GRAY8:
  264. c = '5';
  265. n = avctx->width;
  266. break;
  267. case PIX_FMT_RGB24:
  268. c = '6';
  269. n = avctx->width * 3;
  270. break;
  271. case PIX_FMT_YUV420P:
  272. c = '5';
  273. n = avctx->width;
  274. h1 = (h * 3) / 2;
  275. break;
  276. default:
  277. return -1;
  278. }
  279. snprintf(s->bytestream, s->bytestream_end - s->bytestream,
  280. "P%c\n%d %d\n",
  281. c, avctx->width, h1);
  282. s->bytestream += strlen(s->bytestream);
  283. if (avctx->pix_fmt != PIX_FMT_MONOWHITE) {
  284. snprintf(s->bytestream, s->bytestream_end - s->bytestream,
  285. "%d\n", 255);
  286. s->bytestream += strlen(s->bytestream);
  287. }
  288. ptr = p->data[0];
  289. linesize = p->linesize[0];
  290. for(i=0;i<h;i++) {
  291. memcpy(s->bytestream, ptr, n);
  292. s->bytestream += n;
  293. ptr += linesize;
  294. }
  295. if (avctx->pix_fmt == PIX_FMT_YUV420P) {
  296. h >>= 1;
  297. n >>= 1;
  298. ptr1 = p->data[1];
  299. ptr2 = p->data[2];
  300. for(i=0;i<h;i++) {
  301. memcpy(s->bytestream, ptr1, n);
  302. s->bytestream += n;
  303. memcpy(s->bytestream, ptr2, n);
  304. s->bytestream += n;
  305. ptr1 += p->linesize[1];
  306. ptr2 += p->linesize[2];
  307. }
  308. }
  309. return s->bytestream - s->bytestream_start;
  310. }
  311. static int pam_encode_frame(AVCodecContext *avctx, unsigned char *outbuf, int buf_size, void *data){
  312. PNMContext *s = avctx->priv_data;
  313. AVFrame *pict = data;
  314. AVFrame * const p= (AVFrame*)&s->picture;
  315. int i, h, w, n, linesize, depth, maxval;
  316. const char *tuple_type;
  317. uint8_t *ptr;
  318. if(buf_size < avpicture_get_size(avctx->pix_fmt, avctx->width, avctx->height) + 200){
  319. av_log(avctx, AV_LOG_ERROR, "encoded frame too large\n");
  320. return -1;
  321. }
  322. *p = *pict;
  323. p->pict_type= FF_I_TYPE;
  324. p->key_frame= 1;
  325. s->bytestream_start=
  326. s->bytestream= outbuf;
  327. s->bytestream_end= outbuf+buf_size;
  328. h = avctx->height;
  329. w = avctx->width;
  330. switch(avctx->pix_fmt) {
  331. case PIX_FMT_MONOWHITE:
  332. n = (w + 7) >> 3;
  333. depth = 1;
  334. maxval = 1;
  335. tuple_type = "BLACKANDWHITE";
  336. break;
  337. case PIX_FMT_GRAY8:
  338. n = w;
  339. depth = 1;
  340. maxval = 255;
  341. tuple_type = "GRAYSCALE";
  342. break;
  343. case PIX_FMT_RGB24:
  344. n = w * 3;
  345. depth = 3;
  346. maxval = 255;
  347. tuple_type = "RGB";
  348. break;
  349. case PIX_FMT_RGBA32:
  350. n = w * 4;
  351. depth = 4;
  352. maxval = 255;
  353. tuple_type = "RGB_ALPHA";
  354. break;
  355. default:
  356. return -1;
  357. }
  358. snprintf(s->bytestream, s->bytestream_end - s->bytestream,
  359. "P7\nWIDTH %d\nHEIGHT %d\nDEPTH %d\nMAXVAL %d\nTUPLETYPE %s\nENDHDR\n",
  360. w, h, depth, maxval, tuple_type);
  361. s->bytestream += strlen(s->bytestream);
  362. ptr = p->data[0];
  363. linesize = p->linesize[0];
  364. if (avctx->pix_fmt == PIX_FMT_RGBA32) {
  365. int j;
  366. unsigned int v;
  367. for(i=0;i<h;i++) {
  368. for(j=0;j<w;j++) {
  369. v = ((uint32_t *)ptr)[j];
  370. *s->bytestream++ = v >> 16;
  371. *s->bytestream++ = v >> 8;
  372. *s->bytestream++ = v;
  373. *s->bytestream++ = v >> 24;
  374. }
  375. ptr += linesize;
  376. }
  377. } else {
  378. for(i=0;i<h;i++) {
  379. memcpy(s->bytestream, ptr, n);
  380. s->bytestream += n;
  381. ptr += linesize;
  382. }
  383. }
  384. return s->bytestream - s->bytestream_start;
  385. }
  386. #if 0
  387. static int pnm_probe(AVProbeData *pd)
  388. {
  389. const char *p = pd->buf;
  390. if (pd->buf_size >= 8 &&
  391. p[0] == 'P' &&
  392. p[1] >= '4' && p[1] <= '6' &&
  393. pnm_space(p[2]) )
  394. return AVPROBE_SCORE_MAX - 1; /* to permit pgmyuv probe */
  395. else
  396. return 0;
  397. }
  398. static int pgmyuv_probe(AVProbeData *pd)
  399. {
  400. if (match_ext(pd->filename, "pgmyuv"))
  401. return AVPROBE_SCORE_MAX;
  402. else
  403. return 0;
  404. }
  405. static int pam_probe(AVProbeData *pd)
  406. {
  407. const char *p = pd->buf;
  408. if (pd->buf_size >= 8 &&
  409. p[0] == 'P' &&
  410. p[1] == '7' &&
  411. p[2] == '\n')
  412. return AVPROBE_SCORE_MAX;
  413. else
  414. return 0;
  415. }
  416. #endif
  417. static int pnm_parse(AVCodecParserContext *s,
  418. AVCodecContext *avctx,
  419. uint8_t **poutbuf, int *poutbuf_size,
  420. const uint8_t *buf, int buf_size)
  421. {
  422. ParseContext *pc = s->priv_data;
  423. PNMContext pnmctx;
  424. int next;
  425. for(; pc->overread>0; pc->overread--){
  426. pc->buffer[pc->index++]= pc->buffer[pc->overread_index++];
  427. }
  428. retry:
  429. if(pc->index){
  430. pnmctx.bytestream_start=
  431. pnmctx.bytestream= pc->buffer;
  432. pnmctx.bytestream_end= pc->buffer + pc->index;
  433. }else{
  434. pnmctx.bytestream_start=
  435. pnmctx.bytestream= (uint8_t *) buf; /* casts avoid warnings */
  436. pnmctx.bytestream_end= (uint8_t *) buf + buf_size;
  437. }
  438. if(pnm_decode_header(avctx, &pnmctx) < 0){
  439. if(pnmctx.bytestream < pnmctx.bytestream_end){
  440. if(pc->index){
  441. pc->index=0;
  442. }else{
  443. buf++;
  444. buf_size--;
  445. }
  446. goto retry;
  447. }
  448. #if 0
  449. if(pc->index && pc->index*2 + FF_INPUT_BUFFER_PADDING_SIZE < pc->buffer_size && buf_size > pc->index){
  450. memcpy(pc->buffer + pc->index, buf, pc->index);
  451. pc->index += pc->index;
  452. buf += pc->index;
  453. buf_size -= pc->index;
  454. goto retry;
  455. }
  456. #endif
  457. next= END_NOT_FOUND;
  458. }else{
  459. next= pnmctx.bytestream - pnmctx.bytestream_start
  460. + avpicture_get_size(avctx->pix_fmt, avctx->width, avctx->height);
  461. if(pnmctx.bytestream_start!=buf)
  462. next-= pc->index;
  463. if(next > buf_size)
  464. next= END_NOT_FOUND;
  465. }
  466. if(ff_combine_frame(pc, next, (uint8_t **)&buf, &buf_size)<0){
  467. *poutbuf = NULL;
  468. *poutbuf_size = 0;
  469. return buf_size;
  470. }
  471. *poutbuf = (uint8_t *)buf;
  472. *poutbuf_size = buf_size;
  473. return next;
  474. }
  475. AVCodecParser pnm_parser = {
  476. { CODEC_ID_PGM, CODEC_ID_PGMYUV, CODEC_ID_PPM, CODEC_ID_PBM, CODEC_ID_PAM},
  477. sizeof(ParseContext),
  478. NULL,
  479. pnm_parse,
  480. ff_parse_close,
  481. };
  482. #ifdef CONFIG_PGM_ENCODER
  483. AVCodec pgm_encoder = {
  484. "pgm",
  485. CODEC_TYPE_VIDEO,
  486. CODEC_ID_PGM,
  487. sizeof(PNMContext),
  488. common_init,
  489. pnm_encode_frame,
  490. NULL, //encode_end,
  491. pnm_decode_frame,
  492. .pix_fmts= (enum PixelFormat[]){PIX_FMT_GRAY8, -1},
  493. };
  494. #endif // CONFIG_PGM_ENCODER
  495. #ifdef CONFIG_PGMYUV_ENCODER
  496. AVCodec pgmyuv_encoder = {
  497. "pgmyuv",
  498. CODEC_TYPE_VIDEO,
  499. CODEC_ID_PGMYUV,
  500. sizeof(PNMContext),
  501. common_init,
  502. pnm_encode_frame,
  503. NULL, //encode_end,
  504. pnm_decode_frame,
  505. .pix_fmts= (enum PixelFormat[]){PIX_FMT_YUV420P, -1},
  506. };
  507. #endif // CONFIG_PGMYUV_ENCODER
  508. #ifdef CONFIG_PPM_ENCODER
  509. AVCodec ppm_encoder = {
  510. "ppm",
  511. CODEC_TYPE_VIDEO,
  512. CODEC_ID_PPM,
  513. sizeof(PNMContext),
  514. common_init,
  515. pnm_encode_frame,
  516. NULL, //encode_end,
  517. pnm_decode_frame,
  518. .pix_fmts= (enum PixelFormat[]){PIX_FMT_RGB24, -1},
  519. };
  520. #endif // CONFIG_PPM_ENCODER
  521. #ifdef CONFIG_PBM_ENCODER
  522. AVCodec pbm_encoder = {
  523. "pbm",
  524. CODEC_TYPE_VIDEO,
  525. CODEC_ID_PBM,
  526. sizeof(PNMContext),
  527. common_init,
  528. pnm_encode_frame,
  529. NULL, //encode_end,
  530. pnm_decode_frame,
  531. .pix_fmts= (enum PixelFormat[]){PIX_FMT_MONOWHITE, -1},
  532. };
  533. #endif // CONFIG_PBM_ENCODER
  534. #ifdef CONFIG_PAM_ENCODER
  535. AVCodec pam_encoder = {
  536. "pam",
  537. CODEC_TYPE_VIDEO,
  538. CODEC_ID_PAM,
  539. sizeof(PNMContext),
  540. common_init,
  541. pam_encode_frame,
  542. NULL, //encode_end,
  543. pnm_decode_frame,
  544. .pix_fmts= (enum PixelFormat[]){PIX_FMT_RGB24, PIX_FMT_RGBA32, PIX_FMT_GRAY8, PIX_FMT_MONOWHITE, -1},
  545. };
  546. #endif // CONFIG_PAM_ENCODER