You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

589 lines
16KB

  1. /*
  2. * PNM image format
  3. * Copyright (c) 2002, 2003 Fabrice Bellard.
  4. *
  5. * This library is free software; you can redistribute it and/or
  6. * modify it under the terms of the GNU Lesser General Public
  7. * License as published by the Free Software Foundation; either
  8. * version 2 of the License, or (at your option) any later version.
  9. *
  10. * This library is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  13. * Lesser General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU Lesser General Public
  16. * License along with this library; if not, write to the Free Software
  17. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  18. */
  19. #include "avcodec.h"
  20. #include "mpegvideo.h" //only for ParseContext
  21. typedef struct PNMContext {
  22. uint8_t *bytestream;
  23. uint8_t *bytestream_start;
  24. uint8_t *bytestream_end;
  25. AVFrame picture;
  26. } PNMContext;
  27. static inline int pnm_space(int c)
  28. {
  29. return (c == ' ' || c == '\n' || c == '\r' || c == '\t');
  30. }
  31. static void pnm_get(PNMContext *sc, char *str, int buf_size)
  32. {
  33. char *s;
  34. int c;
  35. /* skip spaces and comments */
  36. for(;;) {
  37. c = *sc->bytestream++;
  38. if (c == '#') {
  39. do {
  40. c = *sc->bytestream++;
  41. } while (c != '\n' && sc->bytestream < sc->bytestream_end);
  42. } else if (!pnm_space(c)) {
  43. break;
  44. }
  45. }
  46. s = str;
  47. while (sc->bytestream < sc->bytestream_end && !pnm_space(c)) {
  48. if ((s - str) < buf_size - 1)
  49. *s++ = c;
  50. c = *sc->bytestream++;
  51. }
  52. *s = '\0';
  53. }
  54. static int common_init(AVCodecContext *avctx){
  55. PNMContext *s = avctx->priv_data;
  56. avcodec_get_frame_defaults((AVFrame*)&s->picture);
  57. avctx->coded_frame= (AVFrame*)&s->picture;
  58. return 0;
  59. }
  60. static int pnm_decode_header(AVCodecContext *avctx, PNMContext * const s){
  61. char buf1[32], tuple_type[32];
  62. int h, w, depth, maxval;;
  63. pnm_get(s, buf1, sizeof(buf1));
  64. if (!strcmp(buf1, "P4")) {
  65. avctx->pix_fmt = PIX_FMT_MONOWHITE;
  66. } else if (!strcmp(buf1, "P5")) {
  67. if (avctx->codec_id == CODEC_ID_PGMYUV)
  68. avctx->pix_fmt = PIX_FMT_YUV420P;
  69. else
  70. avctx->pix_fmt = PIX_FMT_GRAY8;
  71. } else if (!strcmp(buf1, "P6")) {
  72. avctx->pix_fmt = PIX_FMT_RGB24;
  73. } else if (!strcmp(buf1, "P7")) {
  74. w = -1;
  75. h = -1;
  76. maxval = -1;
  77. depth = -1;
  78. tuple_type[0] = '\0';
  79. for(;;) {
  80. pnm_get(s, buf1, sizeof(buf1));
  81. if (!strcmp(buf1, "WIDTH")) {
  82. pnm_get(s, buf1, sizeof(buf1));
  83. w = strtol(buf1, NULL, 10);
  84. } else if (!strcmp(buf1, "HEIGHT")) {
  85. pnm_get(s, buf1, sizeof(buf1));
  86. h = strtol(buf1, NULL, 10);
  87. } else if (!strcmp(buf1, "DEPTH")) {
  88. pnm_get(s, buf1, sizeof(buf1));
  89. depth = strtol(buf1, NULL, 10);
  90. } else if (!strcmp(buf1, "MAXVAL")) {
  91. pnm_get(s, buf1, sizeof(buf1));
  92. maxval = strtol(buf1, NULL, 10);
  93. } else if (!strcmp(buf1, "TUPLETYPE")) {
  94. pnm_get(s, tuple_type, sizeof(tuple_type));
  95. } else if (!strcmp(buf1, "ENDHDR")) {
  96. break;
  97. } else {
  98. return -1;
  99. }
  100. }
  101. /* check that all tags are present */
  102. if (w <= 0 || h <= 0 || maxval <= 0 || depth <= 0 || tuple_type[0] == '\0' || avcodec_check_dimensions(avctx, w, h))
  103. return -1;
  104. avctx->width = w;
  105. avctx->height = h;
  106. if (depth == 1) {
  107. if (maxval == 1)
  108. avctx->pix_fmt = PIX_FMT_MONOWHITE;
  109. else
  110. avctx->pix_fmt = PIX_FMT_GRAY8;
  111. } else if (depth == 3) {
  112. avctx->pix_fmt = PIX_FMT_RGB24;
  113. } else if (depth == 4) {
  114. avctx->pix_fmt = PIX_FMT_RGBA32;
  115. } else {
  116. return -1;
  117. }
  118. return 0;
  119. } else {
  120. return -1;
  121. }
  122. pnm_get(s, buf1, sizeof(buf1));
  123. avctx->width = atoi(buf1);
  124. if (avctx->width <= 0)
  125. return -1;
  126. pnm_get(s, buf1, sizeof(buf1));
  127. avctx->height = atoi(buf1);
  128. if(avcodec_check_dimensions(avctx, avctx->width, avctx->height))
  129. return -1;
  130. if (avctx->pix_fmt != PIX_FMT_MONOWHITE) {
  131. pnm_get(s, buf1, sizeof(buf1));
  132. }
  133. /* more check if YUV420 */
  134. if (avctx->pix_fmt == PIX_FMT_YUV420P) {
  135. if ((avctx->width & 1) != 0)
  136. return -1;
  137. h = (avctx->height * 2);
  138. if ((h % 3) != 0)
  139. return -1;
  140. h /= 3;
  141. avctx->height = h;
  142. }
  143. return 0;
  144. }
  145. static int pnm_decode_frame(AVCodecContext *avctx,
  146. void *data, int *data_size,
  147. uint8_t *buf, int buf_size)
  148. {
  149. PNMContext * const s = avctx->priv_data;
  150. AVFrame *picture = data;
  151. AVFrame * const p= (AVFrame*)&s->picture;
  152. int i, n, linesize, h;
  153. unsigned char *ptr;
  154. s->bytestream_start=
  155. s->bytestream= buf;
  156. s->bytestream_end= buf + buf_size;
  157. if(pnm_decode_header(avctx, s) < 0)
  158. return -1;
  159. if(p->data[0])
  160. avctx->release_buffer(avctx, p);
  161. p->reference= 0;
  162. if(avctx->get_buffer(avctx, p) < 0){
  163. av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
  164. return -1;
  165. }
  166. p->pict_type= FF_I_TYPE;
  167. p->key_frame= 1;
  168. switch(avctx->pix_fmt) {
  169. default:
  170. return -1;
  171. case PIX_FMT_RGB24:
  172. n = avctx->width * 3;
  173. goto do_read;
  174. case PIX_FMT_GRAY8:
  175. n = avctx->width;
  176. goto do_read;
  177. case PIX_FMT_MONOWHITE:
  178. case PIX_FMT_MONOBLACK:
  179. n = (avctx->width + 7) >> 3;
  180. do_read:
  181. ptr = p->data[0];
  182. linesize = p->linesize[0];
  183. for(i = 0; i < avctx->height; i++) {
  184. memcpy(ptr, s->bytestream, n);
  185. s->bytestream += n;
  186. ptr += linesize;
  187. }
  188. break;
  189. case PIX_FMT_YUV420P:
  190. {
  191. unsigned char *ptr1, *ptr2;
  192. n = avctx->width;
  193. ptr = p->data[0];
  194. linesize = p->linesize[0];
  195. for(i = 0; i < avctx->height; i++) {
  196. memcpy(ptr, s->bytestream, n);
  197. s->bytestream += n;
  198. ptr += linesize;
  199. }
  200. ptr1 = p->data[1];
  201. ptr2 = p->data[2];
  202. n >>= 1;
  203. h = avctx->height >> 1;
  204. for(i = 0; i < h; i++) {
  205. memcpy(ptr1, s->bytestream, n);
  206. s->bytestream += n;
  207. memcpy(ptr2, s->bytestream, n);
  208. s->bytestream += n;
  209. ptr1 += p->linesize[1];
  210. ptr2 += p->linesize[2];
  211. }
  212. }
  213. break;
  214. case PIX_FMT_RGBA32:
  215. ptr = p->data[0];
  216. linesize = p->linesize[0];
  217. for(i = 0; i < avctx->height; i++) {
  218. int j, r, g, b, a;
  219. for(j = 0;j < avctx->width; j++) {
  220. r = *s->bytestream++;
  221. g = *s->bytestream++;
  222. b = *s->bytestream++;
  223. a = *s->bytestream++;
  224. ((uint32_t *)ptr)[j] = (a << 24) | (r << 16) | (g << 8) | b;
  225. }
  226. ptr += linesize;
  227. }
  228. break;
  229. }
  230. *picture= *(AVFrame*)&s->picture;
  231. *data_size = sizeof(AVPicture);
  232. return s->bytestream - s->bytestream_start;
  233. }
  234. static int pnm_encode_frame(AVCodecContext *avctx, unsigned char *outbuf, int buf_size, void *data){
  235. PNMContext *s = avctx->priv_data;
  236. AVFrame *pict = data;
  237. AVFrame * const p= (AVFrame*)&s->picture;
  238. int i, h, h1, c, n, linesize;
  239. uint8_t *ptr, *ptr1, *ptr2;
  240. if(buf_size < avpicture_get_size(avctx->pix_fmt, avctx->width, avctx->height) + 200){
  241. av_log(avctx, AV_LOG_ERROR, "encoded frame too large\n");
  242. return -1;
  243. }
  244. *p = *pict;
  245. p->pict_type= FF_I_TYPE;
  246. p->key_frame= 1;
  247. s->bytestream_start=
  248. s->bytestream= outbuf;
  249. s->bytestream_end= outbuf+buf_size;
  250. h = avctx->height;
  251. h1 = h;
  252. switch(avctx->pix_fmt) {
  253. case PIX_FMT_MONOWHITE:
  254. c = '4';
  255. n = (avctx->width + 7) >> 3;
  256. break;
  257. case PIX_FMT_GRAY8:
  258. c = '5';
  259. n = avctx->width;
  260. break;
  261. case PIX_FMT_RGB24:
  262. c = '6';
  263. n = avctx->width * 3;
  264. break;
  265. case PIX_FMT_YUV420P:
  266. c = '5';
  267. n = avctx->width;
  268. h1 = (h * 3) / 2;
  269. break;
  270. default:
  271. return -1;
  272. }
  273. snprintf(s->bytestream, s->bytestream_end - s->bytestream,
  274. "P%c\n%d %d\n",
  275. c, avctx->width, h1);
  276. s->bytestream += strlen(s->bytestream);
  277. if (avctx->pix_fmt != PIX_FMT_MONOWHITE) {
  278. snprintf(s->bytestream, s->bytestream_end - s->bytestream,
  279. "%d\n", 255);
  280. s->bytestream += strlen(s->bytestream);
  281. }
  282. ptr = p->data[0];
  283. linesize = p->linesize[0];
  284. for(i=0;i<h;i++) {
  285. memcpy(s->bytestream, ptr, n);
  286. s->bytestream += n;
  287. ptr += linesize;
  288. }
  289. if (avctx->pix_fmt == PIX_FMT_YUV420P) {
  290. h >>= 1;
  291. n >>= 1;
  292. ptr1 = p->data[1];
  293. ptr2 = p->data[2];
  294. for(i=0;i<h;i++) {
  295. memcpy(s->bytestream, ptr1, n);
  296. s->bytestream += n;
  297. memcpy(s->bytestream, ptr2, n);
  298. s->bytestream += n;
  299. ptr1 += p->linesize[1];
  300. ptr2 += p->linesize[2];
  301. }
  302. }
  303. return s->bytestream - s->bytestream_start;
  304. }
  305. static int pam_encode_frame(AVCodecContext *avctx, unsigned char *outbuf, int buf_size, void *data){
  306. PNMContext *s = avctx->priv_data;
  307. AVFrame *pict = data;
  308. AVFrame * const p= (AVFrame*)&s->picture;
  309. int i, h, w, n, linesize, depth, maxval;
  310. const char *tuple_type;
  311. uint8_t *ptr;
  312. if(buf_size < avpicture_get_size(avctx->pix_fmt, avctx->width, avctx->height) + 200){
  313. av_log(avctx, AV_LOG_ERROR, "encoded frame too large\n");
  314. return -1;
  315. }
  316. *p = *pict;
  317. p->pict_type= FF_I_TYPE;
  318. p->key_frame= 1;
  319. s->bytestream_start=
  320. s->bytestream= outbuf;
  321. s->bytestream_end= outbuf+buf_size;
  322. h = avctx->height;
  323. w = avctx->width;
  324. switch(avctx->pix_fmt) {
  325. case PIX_FMT_MONOWHITE:
  326. n = (w + 7) >> 3;
  327. depth = 1;
  328. maxval = 1;
  329. tuple_type = "BLACKANDWHITE";
  330. break;
  331. case PIX_FMT_GRAY8:
  332. n = w;
  333. depth = 1;
  334. maxval = 255;
  335. tuple_type = "GRAYSCALE";
  336. break;
  337. case PIX_FMT_RGB24:
  338. n = w * 3;
  339. depth = 3;
  340. maxval = 255;
  341. tuple_type = "RGB";
  342. break;
  343. case PIX_FMT_RGBA32:
  344. n = w * 4;
  345. depth = 4;
  346. maxval = 255;
  347. tuple_type = "RGB_ALPHA";
  348. break;
  349. default:
  350. return -1;
  351. }
  352. snprintf(s->bytestream, s->bytestream_end - s->bytestream,
  353. "P7\nWIDTH %d\nHEIGHT %d\nDEPTH %d\nMAXVAL %d\nTUPLETYPE %s\nENDHDR\n",
  354. w, h, depth, maxval, tuple_type);
  355. s->bytestream += strlen(s->bytestream);
  356. ptr = p->data[0];
  357. linesize = p->linesize[0];
  358. if (avctx->pix_fmt == PIX_FMT_RGBA32) {
  359. int j;
  360. unsigned int v;
  361. for(i=0;i<h;i++) {
  362. for(j=0;j<w;j++) {
  363. v = ((uint32_t *)ptr)[j];
  364. *s->bytestream++ = v >> 16;
  365. *s->bytestream++ = v >> 8;
  366. *s->bytestream++ = v;
  367. *s->bytestream++ = v >> 24;
  368. }
  369. ptr += linesize;
  370. }
  371. } else {
  372. for(i=0;i<h;i++) {
  373. memcpy(s->bytestream, ptr, n);
  374. s->bytestream += n;
  375. ptr += linesize;
  376. }
  377. }
  378. return s->bytestream - s->bytestream_start;
  379. }
  380. #if 0
  381. static int pnm_probe(AVProbeData *pd)
  382. {
  383. const char *p = pd->buf;
  384. if (pd->buf_size >= 8 &&
  385. p[0] == 'P' &&
  386. p[1] >= '4' && p[1] <= '6' &&
  387. pnm_space(p[2]) )
  388. return AVPROBE_SCORE_MAX - 1; /* to permit pgmyuv probe */
  389. else
  390. return 0;
  391. }
  392. static int pgmyuv_probe(AVProbeData *pd)
  393. {
  394. if (match_ext(pd->filename, "pgmyuv"))
  395. return AVPROBE_SCORE_MAX;
  396. else
  397. return 0;
  398. }
  399. static int pam_probe(AVProbeData *pd)
  400. {
  401. const char *p = pd->buf;
  402. if (pd->buf_size >= 8 &&
  403. p[0] == 'P' &&
  404. p[1] == '7' &&
  405. p[2] == '\n')
  406. return AVPROBE_SCORE_MAX;
  407. else
  408. return 0;
  409. }
  410. #endif
  411. static int pnm_parse(AVCodecParserContext *s,
  412. AVCodecContext *avctx,
  413. uint8_t **poutbuf, int *poutbuf_size,
  414. const uint8_t *buf, int buf_size)
  415. {
  416. ParseContext *pc = s->priv_data;
  417. PNMContext pnmctx;
  418. int next;
  419. for(; pc->overread>0; pc->overread--){
  420. pc->buffer[pc->index++]= pc->buffer[pc->overread_index++];
  421. }
  422. retry:
  423. if(pc->index){
  424. pnmctx.bytestream_start=
  425. pnmctx.bytestream= pc->buffer;
  426. pnmctx.bytestream_end= pc->buffer + pc->index;
  427. }else{
  428. pnmctx.bytestream_start=
  429. pnmctx.bytestream= buf;
  430. pnmctx.bytestream_end= buf + buf_size;
  431. }
  432. if(pnm_decode_header(avctx, &pnmctx) < 0){
  433. if(pnmctx.bytestream < pnmctx.bytestream_end){
  434. if(pc->index){
  435. pc->index=0;
  436. }else{
  437. buf++;
  438. buf_size--;
  439. }
  440. goto retry;
  441. }
  442. #if 0
  443. if(pc->index && pc->index*2 + FF_INPUT_BUFFER_PADDING_SIZE < pc->buffer_size && buf_size > pc->index){
  444. memcpy(pc->buffer + pc->index, buf, pc->index);
  445. pc->index += pc->index;
  446. buf += pc->index;
  447. buf_size -= pc->index;
  448. goto retry;
  449. }
  450. #endif
  451. next= END_NOT_FOUND;
  452. }else{
  453. next= pnmctx.bytestream - pnmctx.bytestream_start
  454. + avpicture_get_size(avctx->pix_fmt, avctx->width, avctx->height);
  455. if(pnmctx.bytestream_start!=buf)
  456. next-= pc->index;
  457. if(next > buf_size)
  458. next= END_NOT_FOUND;
  459. }
  460. if(ff_combine_frame(pc, next, (uint8_t **)&buf, &buf_size)<0){
  461. *poutbuf = NULL;
  462. *poutbuf_size = 0;
  463. return buf_size;
  464. }
  465. *poutbuf = (uint8_t *)buf;
  466. *poutbuf_size = buf_size;
  467. return next;
  468. }
  469. AVCodecParser pnm_parser = {
  470. { CODEC_ID_PGM, CODEC_ID_PGMYUV, CODEC_ID_PPM, CODEC_ID_PBM, CODEC_ID_PAM},
  471. sizeof(ParseContext),
  472. NULL,
  473. pnm_parse,
  474. ff_parse_close,
  475. };
  476. #ifdef CONFIG_PGM_ENCODER
  477. AVCodec pgm_encoder = {
  478. "pgm",
  479. CODEC_TYPE_VIDEO,
  480. CODEC_ID_PGM,
  481. sizeof(PNMContext),
  482. common_init,
  483. pnm_encode_frame,
  484. NULL, //encode_end,
  485. pnm_decode_frame,
  486. .pix_fmts= (enum PixelFormat[]){PIX_FMT_GRAY8, -1},
  487. };
  488. #endif // CONFIG_PGM_ENCODER
  489. #ifdef CONFIG_PGMYUV_ENCODER
  490. AVCodec pgmyuv_encoder = {
  491. "pgmyuv",
  492. CODEC_TYPE_VIDEO,
  493. CODEC_ID_PGMYUV,
  494. sizeof(PNMContext),
  495. common_init,
  496. pnm_encode_frame,
  497. NULL, //encode_end,
  498. pnm_decode_frame,
  499. .pix_fmts= (enum PixelFormat[]){PIX_FMT_YUV420P, -1},
  500. };
  501. #endif // CONFIG_PGMYUV_ENCODER
  502. #ifdef CONFIG_PPM_ENCODER
  503. AVCodec ppm_encoder = {
  504. "ppm",
  505. CODEC_TYPE_VIDEO,
  506. CODEC_ID_PPM,
  507. sizeof(PNMContext),
  508. common_init,
  509. pnm_encode_frame,
  510. NULL, //encode_end,
  511. pnm_decode_frame,
  512. .pix_fmts= (enum PixelFormat[]){PIX_FMT_RGB24, -1},
  513. };
  514. #endif // CONFIG_PPM_ENCODER
  515. #ifdef CONFIG_PBM_ENCODER
  516. AVCodec pbm_encoder = {
  517. "pbm",
  518. CODEC_TYPE_VIDEO,
  519. CODEC_ID_PBM,
  520. sizeof(PNMContext),
  521. common_init,
  522. pnm_encode_frame,
  523. NULL, //encode_end,
  524. pnm_decode_frame,
  525. .pix_fmts= (enum PixelFormat[]){PIX_FMT_MONOWHITE, -1},
  526. };
  527. #endif // CONFIG_PBM_ENCODER
  528. #ifdef CONFIG_PAM_ENCODER
  529. AVCodec pam_encoder = {
  530. "pam",
  531. CODEC_TYPE_VIDEO,
  532. CODEC_ID_PAM,
  533. sizeof(PNMContext),
  534. common_init,
  535. pam_encode_frame,
  536. NULL, //encode_end,
  537. pnm_decode_frame,
  538. .pix_fmts= (enum PixelFormat[]){PIX_FMT_RGB24, PIX_FMT_RGBA32, PIX_FMT_GRAY8, PIX_FMT_MONOWHITE, -1},
  539. };
  540. #endif // CONFIG_PAM_ENCODER