You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

573 lines
15KB

  1. /*
  2. * PNM image format
  3. * Copyright (c) 2002, 2003 Fabrice Bellard.
  4. *
  5. * This library is free software; you can redistribute it and/or
  6. * modify it under the terms of the GNU Lesser General Public
  7. * License as published by the Free Software Foundation; either
  8. * version 2 of the License, or (at your option) any later version.
  9. *
  10. * This library is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  13. * Lesser General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU Lesser General Public
  16. * License along with this library; if not, write to the Free Software
  17. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  18. */
  19. #include "avcodec.h"
  20. #include "mpegvideo.h" //only for ParseContext
  21. typedef struct PNMContext {
  22. uint8_t *bytestream;
  23. uint8_t *bytestream_start;
  24. uint8_t *bytestream_end;
  25. AVFrame picture;
  26. } PNMContext;
  27. static inline int pnm_space(int c)
  28. {
  29. return (c == ' ' || c == '\n' || c == '\r' || c == '\t');
  30. }
  31. static void pnm_get(PNMContext *sc, char *str, int buf_size)
  32. {
  33. char *s;
  34. int c;
  35. /* skip spaces and comments */
  36. for(;;) {
  37. c = *sc->bytestream++;
  38. if (c == '#') {
  39. do {
  40. c = *sc->bytestream++;
  41. } while (c != '\n' && sc->bytestream < sc->bytestream_end);
  42. } else if (!pnm_space(c)) {
  43. break;
  44. }
  45. }
  46. s = str;
  47. while (sc->bytestream < sc->bytestream_end && !pnm_space(c)) {
  48. if ((s - str) < buf_size - 1)
  49. *s++ = c;
  50. c = *sc->bytestream++;
  51. }
  52. *s = '\0';
  53. }
  54. static int common_init(AVCodecContext *avctx){
  55. PNMContext *s = avctx->priv_data;
  56. avcodec_get_frame_defaults((AVFrame*)&s->picture);
  57. avctx->coded_frame= (AVFrame*)&s->picture;
  58. return 0;
  59. }
  60. static int pnm_decode_header(AVCodecContext *avctx, PNMContext * const s){
  61. char buf1[32], tuple_type[32];
  62. int h, w, depth, maxval;;
  63. pnm_get(s, buf1, sizeof(buf1));
  64. if (!strcmp(buf1, "P4")) {
  65. avctx->pix_fmt = PIX_FMT_MONOWHITE;
  66. } else if (!strcmp(buf1, "P5")) {
  67. if (avctx->codec_id == CODEC_ID_PGMYUV)
  68. avctx->pix_fmt = PIX_FMT_YUV420P;
  69. else
  70. avctx->pix_fmt = PIX_FMT_GRAY8;
  71. } else if (!strcmp(buf1, "P6")) {
  72. avctx->pix_fmt = PIX_FMT_RGB24;
  73. } else if (!strcmp(buf1, "P7")) {
  74. w = -1;
  75. h = -1;
  76. maxval = -1;
  77. depth = -1;
  78. tuple_type[0] = '\0';
  79. for(;;) {
  80. pnm_get(s, buf1, sizeof(buf1));
  81. if (!strcmp(buf1, "WIDTH")) {
  82. pnm_get(s, buf1, sizeof(buf1));
  83. w = strtol(buf1, NULL, 10);
  84. } else if (!strcmp(buf1, "HEIGHT")) {
  85. pnm_get(s, buf1, sizeof(buf1));
  86. h = strtol(buf1, NULL, 10);
  87. } else if (!strcmp(buf1, "DEPTH")) {
  88. pnm_get(s, buf1, sizeof(buf1));
  89. depth = strtol(buf1, NULL, 10);
  90. } else if (!strcmp(buf1, "MAXVAL")) {
  91. pnm_get(s, buf1, sizeof(buf1));
  92. maxval = strtol(buf1, NULL, 10);
  93. } else if (!strcmp(buf1, "TUPLETYPE")) {
  94. pnm_get(s, tuple_type, sizeof(tuple_type));
  95. } else if (!strcmp(buf1, "ENDHDR")) {
  96. break;
  97. } else {
  98. return -1;
  99. }
  100. }
  101. /* check that all tags are present */
  102. if (w <= 0 || h <= 0 || maxval <= 0 || depth <= 0 || tuple_type[0] == '\0')
  103. return -1;
  104. avctx->width = w;
  105. avctx->height = h;
  106. if (depth == 1) {
  107. if (maxval == 1)
  108. avctx->pix_fmt = PIX_FMT_MONOWHITE;
  109. else
  110. avctx->pix_fmt = PIX_FMT_GRAY8;
  111. } else if (depth == 3) {
  112. avctx->pix_fmt = PIX_FMT_RGB24;
  113. } else if (depth == 4) {
  114. avctx->pix_fmt = PIX_FMT_RGBA32;
  115. } else {
  116. return -1;
  117. }
  118. return 0;
  119. } else {
  120. return -1;
  121. }
  122. pnm_get(s, buf1, sizeof(buf1));
  123. avctx->width = atoi(buf1);
  124. if (avctx->width <= 0)
  125. return -1;
  126. pnm_get(s, buf1, sizeof(buf1));
  127. avctx->height = atoi(buf1);
  128. if (avctx->height <= 0)
  129. return -1;
  130. if (avctx->pix_fmt != PIX_FMT_MONOWHITE) {
  131. pnm_get(s, buf1, sizeof(buf1));
  132. }
  133. /* more check if YUV420 */
  134. if (avctx->pix_fmt == PIX_FMT_YUV420P) {
  135. if ((avctx->width & 1) != 0)
  136. return -1;
  137. h = (avctx->height * 2);
  138. if ((h % 3) != 0)
  139. return -1;
  140. h /= 3;
  141. avctx->height = h;
  142. }
  143. return 0;
  144. }
  145. static int pnm_decode_frame(AVCodecContext *avctx,
  146. void *data, int *data_size,
  147. uint8_t *buf, int buf_size)
  148. {
  149. PNMContext * const s = avctx->priv_data;
  150. AVFrame *picture = data;
  151. AVFrame * const p= (AVFrame*)&s->picture;
  152. int i, n, linesize, h;
  153. unsigned char *ptr;
  154. /* special case for last picture */
  155. if (buf_size == 0) {
  156. return 0;
  157. }
  158. s->bytestream_start=
  159. s->bytestream= buf;
  160. s->bytestream_end= buf + buf_size;
  161. if(pnm_decode_header(avctx, s) < 0)
  162. return -1;
  163. if(p->data[0])
  164. avctx->release_buffer(avctx, p);
  165. p->reference= 0;
  166. if(avctx->get_buffer(avctx, p) < 0){
  167. av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
  168. return -1;
  169. }
  170. p->pict_type= FF_I_TYPE;
  171. p->key_frame= 1;
  172. switch(avctx->pix_fmt) {
  173. default:
  174. return -1;
  175. case PIX_FMT_RGB24:
  176. n = avctx->width * 3;
  177. goto do_read;
  178. case PIX_FMT_GRAY8:
  179. n = avctx->width;
  180. goto do_read;
  181. case PIX_FMT_MONOWHITE:
  182. case PIX_FMT_MONOBLACK:
  183. n = (avctx->width + 7) >> 3;
  184. do_read:
  185. ptr = p->data[0];
  186. linesize = p->linesize[0];
  187. for(i = 0; i < avctx->height; i++) {
  188. memcpy(ptr, s->bytestream, n);
  189. s->bytestream += n;
  190. ptr += linesize;
  191. }
  192. break;
  193. case PIX_FMT_YUV420P:
  194. {
  195. unsigned char *ptr1, *ptr2;
  196. n = avctx->width;
  197. ptr = p->data[0];
  198. linesize = p->linesize[0];
  199. for(i = 0; i < avctx->height; i++) {
  200. memcpy(ptr, s->bytestream, n);
  201. s->bytestream += n;
  202. ptr += linesize;
  203. }
  204. ptr1 = p->data[1];
  205. ptr2 = p->data[2];
  206. n >>= 1;
  207. h = avctx->height >> 1;
  208. for(i = 0; i < h; i++) {
  209. memcpy(ptr1, s->bytestream, n);
  210. s->bytestream += n;
  211. memcpy(ptr2, s->bytestream, n);
  212. s->bytestream += n;
  213. ptr1 += p->linesize[1];
  214. ptr2 += p->linesize[2];
  215. }
  216. }
  217. break;
  218. case PIX_FMT_RGBA32:
  219. ptr = p->data[0];
  220. linesize = p->linesize[0];
  221. for(i = 0; i < avctx->height; i++) {
  222. int j, r, g, b, a;
  223. for(j = 0;j < avctx->width; j++) {
  224. r = *s->bytestream++;
  225. g = *s->bytestream++;
  226. b = *s->bytestream++;
  227. a = *s->bytestream++;
  228. ((uint32_t *)ptr)[j] = (a << 24) | (r << 16) | (g << 8) | b;
  229. }
  230. ptr += linesize;
  231. }
  232. break;
  233. }
  234. *picture= *(AVFrame*)&s->picture;
  235. *data_size = sizeof(AVPicture);
  236. return s->bytestream - s->bytestream_start;
  237. }
  238. static int pnm_encode_frame(AVCodecContext *avctx, unsigned char *outbuf, int buf_size, void *data){
  239. PNMContext *s = avctx->priv_data;
  240. AVFrame *pict = data;
  241. AVFrame * const p= (AVFrame*)&s->picture;
  242. int i, h, h1, c, n, linesize;
  243. uint8_t *ptr, *ptr1, *ptr2;
  244. *p = *pict;
  245. p->pict_type= FF_I_TYPE;
  246. p->key_frame= 1;
  247. s->bytestream_start=
  248. s->bytestream= outbuf;
  249. s->bytestream_end= outbuf+buf_size;
  250. h = avctx->height;
  251. h1 = h;
  252. switch(avctx->pix_fmt) {
  253. case PIX_FMT_MONOWHITE:
  254. c = '4';
  255. n = (avctx->width + 7) >> 3;
  256. break;
  257. case PIX_FMT_GRAY8:
  258. c = '5';
  259. n = avctx->width;
  260. break;
  261. case PIX_FMT_RGB24:
  262. c = '6';
  263. n = avctx->width * 3;
  264. break;
  265. case PIX_FMT_YUV420P:
  266. c = '5';
  267. n = avctx->width;
  268. h1 = (h * 3) / 2;
  269. break;
  270. default:
  271. return -1;
  272. }
  273. snprintf(s->bytestream, s->bytestream_end - s->bytestream,
  274. "P%c\n%d %d\n",
  275. c, avctx->width, h1);
  276. s->bytestream += strlen(s->bytestream);
  277. if (avctx->pix_fmt != PIX_FMT_MONOWHITE) {
  278. snprintf(s->bytestream, s->bytestream_end - s->bytestream,
  279. "%d\n", 255);
  280. s->bytestream += strlen(s->bytestream);
  281. }
  282. ptr = p->data[0];
  283. linesize = p->linesize[0];
  284. for(i=0;i<h;i++) {
  285. memcpy(s->bytestream, ptr, n);
  286. s->bytestream += n;
  287. ptr += linesize;
  288. }
  289. if (avctx->pix_fmt == PIX_FMT_YUV420P) {
  290. h >>= 1;
  291. n >>= 1;
  292. ptr1 = p->data[1];
  293. ptr2 = p->data[2];
  294. for(i=0;i<h;i++) {
  295. memcpy(s->bytestream, ptr1, n);
  296. s->bytestream += n;
  297. memcpy(s->bytestream, ptr2, n);
  298. s->bytestream += n;
  299. ptr1 += p->linesize[1];
  300. ptr2 += p->linesize[2];
  301. }
  302. }
  303. return s->bytestream - s->bytestream_start;
  304. }
  305. static int pam_encode_frame(AVCodecContext *avctx, unsigned char *outbuf, int buf_size, void *data){
  306. PNMContext *s = avctx->priv_data;
  307. AVFrame *pict = data;
  308. AVFrame * const p= (AVFrame*)&s->picture;
  309. int i, h, w, n, linesize, depth, maxval;
  310. const char *tuple_type;
  311. uint8_t *ptr;
  312. *p = *pict;
  313. p->pict_type= FF_I_TYPE;
  314. p->key_frame= 1;
  315. s->bytestream_start=
  316. s->bytestream= outbuf;
  317. s->bytestream_end= outbuf+buf_size;
  318. h = avctx->height;
  319. w = avctx->width;
  320. switch(avctx->pix_fmt) {
  321. case PIX_FMT_MONOWHITE:
  322. n = (w + 7) >> 3;
  323. depth = 1;
  324. maxval = 1;
  325. tuple_type = "BLACKANDWHITE";
  326. break;
  327. case PIX_FMT_GRAY8:
  328. n = w;
  329. depth = 1;
  330. maxval = 255;
  331. tuple_type = "GRAYSCALE";
  332. break;
  333. case PIX_FMT_RGB24:
  334. n = w * 3;
  335. depth = 3;
  336. maxval = 255;
  337. tuple_type = "RGB";
  338. break;
  339. case PIX_FMT_RGBA32:
  340. n = w * 4;
  341. depth = 4;
  342. maxval = 255;
  343. tuple_type = "RGB_ALPHA";
  344. break;
  345. default:
  346. return -1;
  347. }
  348. snprintf(s->bytestream, s->bytestream_end - s->bytestream,
  349. "P7\nWIDTH %d\nHEIGHT %d\nDEPTH %d\nMAXVAL %d\nTUPLETYPE %s\nENDHDR\n",
  350. w, h, depth, maxval, tuple_type);
  351. s->bytestream += strlen(s->bytestream);
  352. ptr = p->data[0];
  353. linesize = p->linesize[0];
  354. if (avctx->pix_fmt == PIX_FMT_RGBA32) {
  355. int j;
  356. unsigned int v;
  357. for(i=0;i<h;i++) {
  358. for(j=0;j<w;j++) {
  359. v = ((uint32_t *)ptr)[j];
  360. *s->bytestream++ = v >> 16;
  361. *s->bytestream++ = v >> 8;
  362. *s->bytestream++ = v;
  363. *s->bytestream++ = v >> 24;
  364. }
  365. ptr += linesize;
  366. }
  367. } else {
  368. for(i=0;i<h;i++) {
  369. memcpy(s->bytestream, ptr, n);
  370. s->bytestream += n;
  371. ptr += linesize;
  372. }
  373. }
  374. return s->bytestream - s->bytestream_start;
  375. }
  376. #if 0
  377. static int pnm_probe(AVProbeData *pd)
  378. {
  379. const char *p = pd->buf;
  380. if (pd->buf_size >= 8 &&
  381. p[0] == 'P' &&
  382. p[1] >= '4' && p[1] <= '6' &&
  383. pnm_space(p[2]) )
  384. return AVPROBE_SCORE_MAX - 1; /* to permit pgmyuv probe */
  385. else
  386. return 0;
  387. }
  388. static int pgmyuv_probe(AVProbeData *pd)
  389. {
  390. if (match_ext(pd->filename, "pgmyuv"))
  391. return AVPROBE_SCORE_MAX;
  392. else
  393. return 0;
  394. }
  395. static int pam_probe(AVProbeData *pd)
  396. {
  397. const char *p = pd->buf;
  398. if (pd->buf_size >= 8 &&
  399. p[0] == 'P' &&
  400. p[1] == '7' &&
  401. p[2] == '\n')
  402. return AVPROBE_SCORE_MAX;
  403. else
  404. return 0;
  405. }
  406. #endif
  407. static int pnm_parse(AVCodecParserContext *s,
  408. AVCodecContext *avctx,
  409. uint8_t **poutbuf, int *poutbuf_size,
  410. const uint8_t *buf, int buf_size)
  411. {
  412. ParseContext *pc = s->priv_data;
  413. PNMContext pnmctx;
  414. int next;
  415. for(; pc->overread>0; pc->overread--){
  416. pc->buffer[pc->index++]= pc->buffer[pc->overread_index++];
  417. }
  418. retry:
  419. if(pc->index){
  420. pnmctx.bytestream_start=
  421. pnmctx.bytestream= pc->buffer;
  422. pnmctx.bytestream_end= pc->buffer + pc->index;
  423. }else{
  424. pnmctx.bytestream_start=
  425. pnmctx.bytestream= buf;
  426. pnmctx.bytestream_end= buf + buf_size;
  427. }
  428. if(pnm_decode_header(avctx, &pnmctx) < 0){
  429. if(pnmctx.bytestream < pnmctx.bytestream_end){
  430. if(pc->index){
  431. pc->index=0;
  432. }else{
  433. buf++;
  434. buf_size--;
  435. }
  436. goto retry;
  437. }
  438. #if 0
  439. if(pc->index && pc->index*2 + FF_INPUT_BUFFER_PADDING_SIZE < pc->buffer_size && buf_size > pc->index){
  440. memcpy(pc->buffer + pc->index, buf, pc->index);
  441. pc->index += pc->index;
  442. buf += pc->index;
  443. buf_size -= pc->index;
  444. goto retry;
  445. }
  446. #endif
  447. next= END_NOT_FOUND;
  448. }else{
  449. next= pnmctx.bytestream - pnmctx.bytestream_start
  450. + avpicture_get_size(avctx->pix_fmt, avctx->width, avctx->height);
  451. if(pnmctx.bytestream_start!=buf)
  452. next-= pc->index;
  453. if(next > buf_size)
  454. next= END_NOT_FOUND;
  455. }
  456. if(ff_combine_frame(pc, next, (uint8_t **)&buf, &buf_size)<0){
  457. *poutbuf = NULL;
  458. *poutbuf_size = 0;
  459. return buf_size;
  460. }
  461. *poutbuf = (uint8_t *)buf;
  462. *poutbuf_size = buf_size;
  463. return next;
  464. }
  465. AVCodecParser pnm_parser = {
  466. { CODEC_ID_PGM, CODEC_ID_PGMYUV, CODEC_ID_PPM, CODEC_ID_PBM, CODEC_ID_PAM},
  467. sizeof(ParseContext),
  468. NULL,
  469. pnm_parse,
  470. ff_parse_close,
  471. };
  472. AVCodec pgm_encoder = {
  473. "pgm",
  474. CODEC_TYPE_VIDEO,
  475. CODEC_ID_PGM,
  476. sizeof(PNMContext),
  477. common_init,
  478. pnm_encode_frame,
  479. NULL, //encode_end,
  480. pnm_decode_frame,
  481. .pix_fmts= (enum PixelFormat[]){PIX_FMT_GRAY8, -1},
  482. };
  483. AVCodec pgmyuv_encoder = {
  484. "pgmyuv",
  485. CODEC_TYPE_VIDEO,
  486. CODEC_ID_PGMYUV,
  487. sizeof(PNMContext),
  488. common_init,
  489. pnm_encode_frame,
  490. NULL, //encode_end,
  491. pnm_decode_frame,
  492. .pix_fmts= (enum PixelFormat[]){PIX_FMT_YUV420P, -1},
  493. };
  494. AVCodec ppm_encoder = {
  495. "ppm",
  496. CODEC_TYPE_VIDEO,
  497. CODEC_ID_PPM,
  498. sizeof(PNMContext),
  499. common_init,
  500. pnm_encode_frame,
  501. NULL, //encode_end,
  502. pnm_decode_frame,
  503. .pix_fmts= (enum PixelFormat[]){PIX_FMT_RGB24, -1},
  504. };
  505. AVCodec pbm_encoder = {
  506. "pbm",
  507. CODEC_TYPE_VIDEO,
  508. CODEC_ID_PBM,
  509. sizeof(PNMContext),
  510. common_init,
  511. pnm_encode_frame,
  512. NULL, //encode_end,
  513. pnm_decode_frame,
  514. .pix_fmts= (enum PixelFormat[]){PIX_FMT_MONOWHITE, -1},
  515. };
  516. AVCodec pam_encoder = {
  517. "pam",
  518. CODEC_TYPE_VIDEO,
  519. CODEC_ID_PAM,
  520. sizeof(PNMContext),
  521. common_init,
  522. pam_encode_frame,
  523. NULL, //encode_end,
  524. pnm_decode_frame,
  525. .pix_fmts= (enum PixelFormat[]){PIX_FMT_RGB24, PIX_FMT_RGBA32, PIX_FMT_GRAY8, PIX_FMT_MONOWHITE, -1},
  526. };