You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1867 lines
50KB

  1. /*
  2. * Misc image convertion routines
  3. * Copyright (c) 2001, 2002, 2003 Fabrice Bellard.
  4. *
  5. * This library is free software; you can redistribute it and/or
  6. * modify it under the terms of the GNU Lesser General Public
  7. * License as published by the Free Software Foundation; either
  8. * version 2 of the License, or (at your option) any later version.
  9. *
  10. * This library is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  13. * Lesser General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU Lesser General Public
  16. * License along with this library; if not, write to the Free Software
  17. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  18. */
  19. /**
  20. * @file imgconvert.c
  21. * Misc image convertion routines.
  22. */
  23. /* TODO:
  24. * - write 'ffimg' program to test all the image related stuff
  25. * - move all api to slice based system
  26. * - integrate deinterlacing, postprocessing and scaling in the conversion process
  27. */
  28. #include "avcodec.h"
  29. #include "dsputil.h"
  30. #ifdef USE_FASTMEMCPY
  31. #include "fastmemcpy.h"
  32. #endif
  33. #ifdef HAVE_MMX
  34. #include "i386/mmx.h"
  35. #endif
  36. #define xglue(x, y) x ## y
  37. #define glue(x, y) xglue(x, y)
  38. #define FF_COLOR_RGB 0 /* RGB color space */
  39. #define FF_COLOR_GRAY 1 /* gray color space */
  40. #define FF_COLOR_YUV 2 /* YUV color space. 16 <= Y <= 235, 16 <= U, V <= 240 */
  41. #define FF_COLOR_YUV_JPEG 3 /* YUV color space. 0 <= Y <= 255, 0 <= U, V <= 255 */
  42. #define FF_PIXEL_PLANAR 0 /* each channel has one component in AVPicture */
  43. #define FF_PIXEL_PACKED 1 /* only one components containing all the channels */
  44. #define FF_PIXEL_PALETTE 2 /* one components containing indexes for a palette */
  45. typedef struct PixFmtInfo {
  46. const char *name;
  47. uint8_t nb_channels; /* number of channels (including alpha) */
  48. uint8_t color_type; /* color type (see FF_COLOR_xxx constants) */
  49. uint8_t pixel_type; /* pixel storage type (see FF_PIXEL_xxx constants) */
  50. uint8_t is_alpha : 1; /* true if alpha can be specified */
  51. uint8_t x_chroma_shift; /* X chroma subsampling factor is 2 ^ shift */
  52. uint8_t y_chroma_shift; /* Y chroma subsampling factor is 2 ^ shift */
  53. uint8_t depth; /* bit depth of the color components */
  54. } PixFmtInfo;
  55. /* this table gives more information about formats */
  56. static PixFmtInfo pix_fmt_info[PIX_FMT_NB] = {
  57. /* YUV formats */
  58. [PIX_FMT_YUV420P] = {
  59. .name = "yuv420p",
  60. .nb_channels = 3,
  61. .color_type = FF_COLOR_YUV,
  62. .pixel_type = FF_PIXEL_PLANAR,
  63. .depth = 8,
  64. .x_chroma_shift = 1, .y_chroma_shift = 1,
  65. },
  66. [PIX_FMT_YUV422P] = {
  67. .name = "yuv422p",
  68. .nb_channels = 3,
  69. .color_type = FF_COLOR_YUV,
  70. .pixel_type = FF_PIXEL_PLANAR,
  71. .depth = 8,
  72. .x_chroma_shift = 1, .y_chroma_shift = 0,
  73. },
  74. [PIX_FMT_YUV444P] = {
  75. .name = "yuv444p",
  76. .nb_channels = 3,
  77. .color_type = FF_COLOR_YUV,
  78. .pixel_type = FF_PIXEL_PLANAR,
  79. .depth = 8,
  80. .x_chroma_shift = 0, .y_chroma_shift = 0,
  81. },
  82. [PIX_FMT_YUV422] = {
  83. .name = "yuv422",
  84. .nb_channels = 1,
  85. .color_type = FF_COLOR_YUV,
  86. .pixel_type = FF_PIXEL_PACKED,
  87. .depth = 8,
  88. .x_chroma_shift = 1, .y_chroma_shift = 0,
  89. },
  90. [PIX_FMT_YUV410P] = {
  91. .name = "yuv410p",
  92. .nb_channels = 3,
  93. .color_type = FF_COLOR_YUV,
  94. .pixel_type = FF_PIXEL_PLANAR,
  95. .depth = 8,
  96. .x_chroma_shift = 2, .y_chroma_shift = 2,
  97. },
  98. [PIX_FMT_YUV411P] = {
  99. .name = "yuv411p",
  100. .nb_channels = 3,
  101. .color_type = FF_COLOR_YUV,
  102. .pixel_type = FF_PIXEL_PLANAR,
  103. .depth = 8,
  104. .x_chroma_shift = 2, .y_chroma_shift = 0,
  105. },
  106. /* JPEG YUV */
  107. [PIX_FMT_YUVJ420P] = {
  108. .name = "yuvj420p",
  109. .nb_channels = 3,
  110. .color_type = FF_COLOR_YUV_JPEG,
  111. .pixel_type = FF_PIXEL_PLANAR,
  112. .depth = 8,
  113. .x_chroma_shift = 1, .y_chroma_shift = 1,
  114. },
  115. [PIX_FMT_YUVJ422P] = {
  116. .name = "yuvj422p",
  117. .nb_channels = 3,
  118. .color_type = FF_COLOR_YUV_JPEG,
  119. .pixel_type = FF_PIXEL_PLANAR,
  120. .depth = 8,
  121. .x_chroma_shift = 1, .y_chroma_shift = 0,
  122. },
  123. [PIX_FMT_YUVJ444P] = {
  124. .name = "yuvj444p",
  125. .nb_channels = 3,
  126. .color_type = FF_COLOR_YUV_JPEG,
  127. .pixel_type = FF_PIXEL_PLANAR,
  128. .depth = 8,
  129. .x_chroma_shift = 0, .y_chroma_shift = 0,
  130. },
  131. /* RGB formats */
  132. [PIX_FMT_RGB24] = {
  133. .name = "rgb24",
  134. .nb_channels = 3,
  135. .color_type = FF_COLOR_RGB,
  136. .pixel_type = FF_PIXEL_PACKED,
  137. .depth = 8,
  138. },
  139. [PIX_FMT_BGR24] = {
  140. .name = "bgr24",
  141. .nb_channels = 3,
  142. .color_type = FF_COLOR_RGB,
  143. .pixel_type = FF_PIXEL_PACKED,
  144. .depth = 8,
  145. },
  146. [PIX_FMT_RGBA32] = {
  147. .name = "rgba32",
  148. .nb_channels = 4, .is_alpha = 1,
  149. .color_type = FF_COLOR_RGB,
  150. .pixel_type = FF_PIXEL_PACKED,
  151. .depth = 8,
  152. },
  153. [PIX_FMT_RGB565] = {
  154. .name = "rgb565",
  155. .nb_channels = 3,
  156. .color_type = FF_COLOR_RGB,
  157. .pixel_type = FF_PIXEL_PACKED,
  158. .depth = 5,
  159. },
  160. [PIX_FMT_RGB555] = {
  161. .name = "rgb555",
  162. .nb_channels = 4, .is_alpha = 1,
  163. .color_type = FF_COLOR_RGB,
  164. .pixel_type = FF_PIXEL_PACKED,
  165. .depth = 5,
  166. },
  167. /* gray / mono formats */
  168. [PIX_FMT_GRAY8] = {
  169. .name = "gray",
  170. .nb_channels = 1,
  171. .color_type = FF_COLOR_GRAY,
  172. .pixel_type = FF_PIXEL_PLANAR,
  173. .depth = 8,
  174. },
  175. [PIX_FMT_MONOWHITE] = {
  176. .name = "monow",
  177. .nb_channels = 1,
  178. .color_type = FF_COLOR_GRAY,
  179. .pixel_type = FF_PIXEL_PLANAR,
  180. .depth = 1,
  181. },
  182. [PIX_FMT_MONOBLACK] = {
  183. .name = "monob",
  184. .nb_channels = 1,
  185. .color_type = FF_COLOR_GRAY,
  186. .pixel_type = FF_PIXEL_PLANAR,
  187. .depth = 1,
  188. },
  189. /* paletted formats */
  190. [PIX_FMT_PAL8] = {
  191. .name = "pal8",
  192. .nb_channels = 4, .is_alpha = 1,
  193. .color_type = FF_COLOR_RGB,
  194. .pixel_type = FF_PIXEL_PALETTE,
  195. .depth = 8,
  196. },
  197. };
  198. void avcodec_get_chroma_sub_sample(int pix_fmt, int *h_shift, int *v_shift)
  199. {
  200. *h_shift = pix_fmt_info[pix_fmt].x_chroma_shift;
  201. *v_shift = pix_fmt_info[pix_fmt].y_chroma_shift;
  202. }
  203. const char *avcodec_get_pix_fmt_name(int pix_fmt)
  204. {
  205. if (pix_fmt < 0 || pix_fmt >= PIX_FMT_NB)
  206. return "???";
  207. else
  208. return pix_fmt_info[pix_fmt].name;
  209. }
  210. /* Picture field are filled with 'ptr' addresses. Also return size */
  211. int avpicture_fill(AVPicture *picture, uint8_t *ptr,
  212. int pix_fmt, int width, int height)
  213. {
  214. int size, w2, h2, size2;
  215. PixFmtInfo *pinfo;
  216. pinfo = &pix_fmt_info[pix_fmt];
  217. size = width * height;
  218. switch(pix_fmt) {
  219. case PIX_FMT_YUV420P:
  220. case PIX_FMT_YUV422P:
  221. case PIX_FMT_YUV444P:
  222. case PIX_FMT_YUV410P:
  223. case PIX_FMT_YUV411P:
  224. case PIX_FMT_YUVJ420P:
  225. case PIX_FMT_YUVJ422P:
  226. case PIX_FMT_YUVJ444P:
  227. w2 = (width + (1 << pinfo->x_chroma_shift) - 1) >> pinfo->x_chroma_shift;
  228. h2 = (height + (1 << pinfo->y_chroma_shift) - 1) >> pinfo->y_chroma_shift;
  229. size2 = w2 * h2;
  230. picture->data[0] = ptr;
  231. picture->data[1] = picture->data[0] + size;
  232. picture->data[2] = picture->data[1] + size2;
  233. picture->linesize[0] = width;
  234. picture->linesize[1] = w2;
  235. picture->linesize[2] = w2;
  236. return size + 2 * size2;
  237. case PIX_FMT_RGB24:
  238. case PIX_FMT_BGR24:
  239. picture->data[0] = ptr;
  240. picture->data[1] = NULL;
  241. picture->data[2] = NULL;
  242. picture->linesize[0] = width * 3;
  243. return size * 3;
  244. case PIX_FMT_RGBA32:
  245. picture->data[0] = ptr;
  246. picture->data[1] = NULL;
  247. picture->data[2] = NULL;
  248. picture->linesize[0] = width * 4;
  249. return size * 4;
  250. case PIX_FMT_RGB555:
  251. case PIX_FMT_RGB565:
  252. case PIX_FMT_YUV422:
  253. picture->data[0] = ptr;
  254. picture->data[1] = NULL;
  255. picture->data[2] = NULL;
  256. picture->linesize[0] = width * 2;
  257. return size * 2;
  258. case PIX_FMT_GRAY8:
  259. picture->data[0] = ptr;
  260. picture->data[1] = NULL;
  261. picture->data[2] = NULL;
  262. picture->linesize[0] = width;
  263. return size;
  264. case PIX_FMT_MONOWHITE:
  265. case PIX_FMT_MONOBLACK:
  266. picture->data[0] = ptr;
  267. picture->data[1] = NULL;
  268. picture->data[2] = NULL;
  269. picture->linesize[0] = (width + 7) >> 3;
  270. return picture->linesize[0] * height;
  271. case PIX_FMT_PAL8:
  272. size2 = (size + 3) & ~3;
  273. picture->data[0] = ptr;
  274. picture->data[1] = ptr + size2; /* palette is stored here as 256 32 bit words */
  275. picture->data[2] = NULL;
  276. picture->linesize[0] = width;
  277. picture->linesize[1] = 4;
  278. return size2 + 256 * 4;
  279. default:
  280. picture->data[0] = NULL;
  281. picture->data[1] = NULL;
  282. picture->data[2] = NULL;
  283. picture->data[3] = NULL;
  284. return -1;
  285. }
  286. }
  287. int avpicture_get_size(int pix_fmt, int width, int height)
  288. {
  289. AVPicture dummy_pict;
  290. return avpicture_fill(&dummy_pict, NULL, pix_fmt, width, height);
  291. }
  292. /**
  293. * compute the loss when converting from a pixel format to another
  294. */
  295. int avcodec_get_pix_fmt_loss(int dst_pix_fmt, int src_pix_fmt,
  296. int has_alpha)
  297. {
  298. const PixFmtInfo *pf, *ps;
  299. int loss;
  300. ps = &pix_fmt_info[src_pix_fmt];
  301. pf = &pix_fmt_info[dst_pix_fmt];
  302. /* compute loss */
  303. loss = 0;
  304. pf = &pix_fmt_info[dst_pix_fmt];
  305. if (pf->depth < ps->depth)
  306. loss |= FF_LOSS_DEPTH;
  307. if (pf->x_chroma_shift >= ps->x_chroma_shift ||
  308. pf->y_chroma_shift >= ps->y_chroma_shift)
  309. loss |= FF_LOSS_RESOLUTION;
  310. switch(pf->color_type) {
  311. case FF_COLOR_RGB:
  312. if (ps->color_type != FF_COLOR_RGB &&
  313. ps->color_type != FF_COLOR_GRAY)
  314. loss |= FF_LOSS_COLORSPACE;
  315. break;
  316. case FF_COLOR_GRAY:
  317. if (ps->color_type != FF_COLOR_GRAY)
  318. loss |= FF_LOSS_COLORSPACE;
  319. break;
  320. case FF_COLOR_YUV:
  321. if (ps->color_type != FF_COLOR_YUV)
  322. loss |= FF_LOSS_COLORSPACE;
  323. break;
  324. case FF_COLOR_YUV_JPEG:
  325. if (ps->color_type != FF_COLOR_YUV_JPEG &&
  326. ps->color_type != FF_COLOR_YUV)
  327. loss |= FF_LOSS_COLORSPACE;
  328. break;
  329. default:
  330. /* fail safe test */
  331. if (ps->color_type != pf->color_type)
  332. loss |= FF_LOSS_COLORSPACE;
  333. break;
  334. }
  335. if (pf->color_type == FF_COLOR_GRAY &&
  336. ps->color_type != FF_COLOR_GRAY)
  337. loss |= FF_LOSS_CHROMA;
  338. if (!pf->is_alpha && (ps->is_alpha && has_alpha))
  339. loss |= FF_LOSS_ALPHA;
  340. if (pf->pixel_type == FF_PIXEL_PALETTE &&
  341. (ps->pixel_type != FF_PIXEL_PALETTE && ps->color_type != FF_COLOR_GRAY))
  342. loss |= FF_LOSS_COLORQUANT;
  343. return loss;
  344. }
  345. static int avg_bits_per_pixel(int pix_fmt)
  346. {
  347. int bits;
  348. const PixFmtInfo *pf;
  349. pf = &pix_fmt_info[pix_fmt];
  350. switch(pf->pixel_type) {
  351. case FF_PIXEL_PACKED:
  352. switch(pix_fmt) {
  353. case PIX_FMT_YUV422:
  354. case PIX_FMT_RGB565:
  355. case PIX_FMT_RGB555:
  356. bits = 16;
  357. break;
  358. default:
  359. bits = pf->depth * pf->nb_channels;
  360. break;
  361. }
  362. break;
  363. case FF_PIXEL_PLANAR:
  364. if (pf->x_chroma_shift == 0 && pf->y_chroma_shift == 0) {
  365. bits = pf->depth * pf->nb_channels;
  366. } else {
  367. bits = pf->depth + ((2 * pf->depth) >>
  368. (pf->x_chroma_shift + pf->y_chroma_shift));
  369. }
  370. break;
  371. case FF_PIXEL_PALETTE:
  372. bits = 8;
  373. break;
  374. default:
  375. bits = -1;
  376. break;
  377. }
  378. return bits;
  379. }
  380. static int avcodec_find_best_pix_fmt1(int pix_fmt_mask,
  381. int src_pix_fmt,
  382. int has_alpha,
  383. int loss_mask)
  384. {
  385. int dist, i, loss, min_dist, dst_pix_fmt;
  386. /* find exact color match with smallest size */
  387. dst_pix_fmt = -1;
  388. min_dist = 0x7fffffff;
  389. for(i = 0;i < PIX_FMT_NB; i++) {
  390. if (pix_fmt_mask & (1 << i)) {
  391. loss = avcodec_get_pix_fmt_loss(i, src_pix_fmt, has_alpha) & loss_mask;
  392. if (loss == 0) {
  393. dist = avg_bits_per_pixel(i);
  394. if (dist < min_dist) {
  395. min_dist = dist;
  396. dst_pix_fmt = i;
  397. }
  398. }
  399. }
  400. }
  401. return dst_pix_fmt;
  402. }
  403. /**
  404. * find best pixel format to convert to. Return -1 if none found
  405. */
  406. int avcodec_find_best_pix_fmt(int pix_fmt_mask, int src_pix_fmt,
  407. int has_alpha, int *loss_ptr)
  408. {
  409. int dst_pix_fmt, loss_mask, i;
  410. static const int loss_mask_order[] = {
  411. ~0, /* no loss first */
  412. ~FF_LOSS_ALPHA,
  413. ~FF_LOSS_RESOLUTION,
  414. ~(FF_LOSS_COLORSPACE | FF_LOSS_RESOLUTION),
  415. ~FF_LOSS_COLORQUANT,
  416. ~FF_LOSS_DEPTH,
  417. 0,
  418. };
  419. /* try with successive loss */
  420. i = 0;
  421. for(;;) {
  422. loss_mask = loss_mask_order[i++];
  423. dst_pix_fmt = avcodec_find_best_pix_fmt1(pix_fmt_mask, src_pix_fmt,
  424. has_alpha, loss_mask);
  425. if (dst_pix_fmt >= 0)
  426. goto found;
  427. if (loss_mask == 0)
  428. break;
  429. }
  430. return -1;
  431. found:
  432. if (loss_ptr)
  433. *loss_ptr = avcodec_get_pix_fmt_loss(dst_pix_fmt, src_pix_fmt, has_alpha);
  434. return dst_pix_fmt;
  435. }
  436. static void img_copy_plane(uint8_t *dst, int dst_wrap,
  437. uint8_t *src, int src_wrap,
  438. int width, int height)
  439. {
  440. for(;height > 0; height--) {
  441. memcpy(dst, src, width);
  442. dst += dst_wrap;
  443. src += src_wrap;
  444. }
  445. }
  446. /* copy image 'src' to 'dst' */
  447. void img_copy(AVPicture *dst, AVPicture *src,
  448. int pix_fmt, int width, int height)
  449. {
  450. int bwidth, bits, i;
  451. PixFmtInfo *pf = &pix_fmt_info[pix_fmt];
  452. pf = &pix_fmt_info[pix_fmt];
  453. switch(pf->pixel_type) {
  454. case FF_PIXEL_PACKED:
  455. switch(pix_fmt) {
  456. case PIX_FMT_YUV422:
  457. case PIX_FMT_RGB565:
  458. case PIX_FMT_RGB555:
  459. bits = 16;
  460. break;
  461. default:
  462. bits = pf->depth * pf->nb_channels;
  463. break;
  464. }
  465. bwidth = (width * bits + 7) >> 3;
  466. img_copy_plane(dst->data[0], dst->linesize[0],
  467. src->data[0], src->linesize[0],
  468. bwidth, height);
  469. break;
  470. case FF_PIXEL_PLANAR:
  471. for(i = 0; i < pf->nb_channels; i++) {
  472. int w, h;
  473. w = width;
  474. h = height;
  475. if (i == 1 || i == 2) {
  476. w >>= pf->x_chroma_shift;
  477. h >>= pf->y_chroma_shift;
  478. }
  479. bwidth = (w * pf->depth + 7) >> 3;
  480. img_copy_plane(dst->data[i], dst->linesize[i],
  481. src->data[i], src->linesize[i],
  482. bwidth, h);
  483. }
  484. break;
  485. case FF_PIXEL_PALETTE:
  486. img_copy_plane(dst->data[0], dst->linesize[0],
  487. src->data[0], src->linesize[0],
  488. width, height);
  489. /* copy the palette */
  490. img_copy_plane(dst->data[1], dst->linesize[1],
  491. src->data[1], src->linesize[1],
  492. 4, 256);
  493. break;
  494. }
  495. }
  496. /* XXX: totally non optimized */
  497. static void yuv422_to_yuv420p(AVPicture *dst, AVPicture *src,
  498. int width, int height)
  499. {
  500. const uint8_t *p, *p1;
  501. uint8_t *lum, *cr, *cb, *lum1, *cr1, *cb1;
  502. int x;
  503. p1 = src->data[0];
  504. lum1 = dst->data[0];
  505. cb1 = dst->data[0];
  506. cr1 = dst->data[0];
  507. for(;height >= 2; height -= 2) {
  508. p = p1;
  509. lum = lum1;
  510. cb = cb1;
  511. cr = cr1;
  512. for(x=0;x<width;x+=2) {
  513. lum[0] = p[0];
  514. cb[0] = p[1];
  515. lum[1] = p[2];
  516. cr[0] = p[3];
  517. p += 4;
  518. lum += 2;
  519. cb++;
  520. cr++;
  521. }
  522. p1 += src->linesize[0];
  523. lum1 += dst->linesize[0];
  524. p = p1;
  525. lum = lum1;
  526. for(x=0;x<width;x+=2) {
  527. lum[0] = p[0];
  528. lum[1] = p[2];
  529. p += 4;
  530. lum += 2;
  531. }
  532. p1 += src->linesize[0];
  533. lum1 += dst->linesize[0];
  534. cb1 += dst->linesize[1];
  535. cr1 += dst->linesize[2];
  536. }
  537. }
  538. static void yuv422_to_yuv422p(AVPicture *dst, AVPicture *src,
  539. int width, int height)
  540. {
  541. const uint8_t *p, *p1;
  542. uint8_t *lum, *cr, *cb, *lum1, *cr1, *cb1;
  543. int w;
  544. p1 = src->data[0];
  545. lum1 = dst->data[0];
  546. cb1 = dst->data[0];
  547. cr1 = dst->data[0];
  548. for(;height >= 2; height -= 2) {
  549. p = p1;
  550. lum = lum1;
  551. cb = cb1;
  552. cr = cr1;
  553. for(w = width; w >= 2; w -= 2) {
  554. lum[0] = p[0];
  555. cb[0] = p[1];
  556. lum[1] = p[2];
  557. cr[0] = p[3];
  558. p += 4;
  559. lum += 2;
  560. cb++;
  561. cr++;
  562. }
  563. p1 += src->linesize[0];
  564. lum1 += dst->linesize[0];
  565. cb1 += dst->linesize[1];
  566. cr1 += dst->linesize[2];
  567. }
  568. }
  569. static void yuv422p_to_yuv422(AVPicture *dst, AVPicture *src,
  570. int width, int height)
  571. {
  572. uint8_t *p, *p1;
  573. const uint8_t *lum, *cr, *cb, *lum1, *cr1, *cb1;
  574. int w;
  575. p1 = dst->data[0];
  576. lum1 = src->data[0];
  577. cb1 = src->data[0];
  578. cr1 = src->data[0];
  579. for(;height >= 2; height -= 2) {
  580. p = p1;
  581. lum = lum1;
  582. cb = cb1;
  583. cr = cr1;
  584. for(w = width; w >= 2; w -= 2) {
  585. p[0] = lum[0];
  586. p[1] = cb[0];
  587. p[2] = lum[1];
  588. p[3] = cr[0];
  589. p += 4;
  590. lum += 2;
  591. cb++;
  592. cr++;
  593. }
  594. p1 += src->linesize[0];
  595. lum1 += dst->linesize[0];
  596. cb1 += dst->linesize[1];
  597. cr1 += dst->linesize[2];
  598. }
  599. }
  600. #define SCALEBITS 10
  601. #define ONE_HALF (1 << (SCALEBITS - 1))
  602. #define FIX(x) ((int) ((x) * (1<<SCALEBITS) + 0.5))
  603. #define YUV_TO_RGB1_CCIR(cb1, cr1)\
  604. {\
  605. cb = (cb1) - 128;\
  606. cr = (cr1) - 128;\
  607. r_add = FIX(1.40200*255.0/224.0) * cr + ONE_HALF;\
  608. g_add = - FIX(0.34414*255.0/224.0) * cb - FIX(0.71414*255.0/224.0) * cr + \
  609. ONE_HALF;\
  610. b_add = FIX(1.77200*255.0/224.0) * cb + ONE_HALF;\
  611. }
  612. #define YUV_TO_RGB2_CCIR(r, g, b, y1)\
  613. {\
  614. y = ((y1) - 16) * FIX(255.0/219.0);\
  615. r = cm[(y + r_add) >> SCALEBITS];\
  616. g = cm[(y + g_add) >> SCALEBITS];\
  617. b = cm[(y + b_add) >> SCALEBITS];\
  618. }
  619. #define YUV_TO_RGB1(cb1, cr1)\
  620. {\
  621. cb = (cb1) - 128;\
  622. cr = (cr1) - 128;\
  623. r_add = FIX(1.40200) * cr + ONE_HALF;\
  624. g_add = - FIX(0.34414) * cb - FIX(0.71414) * cr + ONE_HALF;\
  625. b_add = FIX(1.77200) * cb + ONE_HALF;\
  626. }
  627. #define YUV_TO_RGB2(r, g, b, y1)\
  628. {\
  629. y = (y1) << SCALEBITS;\
  630. r = cm[(y + r_add) >> SCALEBITS];\
  631. g = cm[(y + g_add) >> SCALEBITS];\
  632. b = cm[(y + b_add) >> SCALEBITS];\
  633. }
  634. #define Y_CCIR_TO_JPEG(y)\
  635. cm[((y) * FIX(255.0/219.0) + (ONE_HALF - 16 * FIX(255.0/219.0))) >> SCALEBITS]
  636. #define Y_JPEG_TO_CCIR(y)\
  637. (((y) * FIX(219.0/255.0) + (ONE_HALF + (16 << SCALEBITS))) >> SCALEBITS)
  638. #define C_CCIR_TO_JPEG(y)\
  639. cm[(((y) - 128) * FIX(127.0/112.0) + (ONE_HALF + (128 << SCALEBITS))) >> SCALEBITS]
  640. /* NOTE: the clamp is really necessary! */
  641. #define C_JPEG_TO_CCIR(y)\
  642. ({\
  643. int __y;\
  644. __y = ((((y) - 128) * FIX(112.0/127.0) + (ONE_HALF + (128 << SCALEBITS))) >> SCALEBITS);\
  645. if (__y < 16)\
  646. __y = 16;\
  647. __y;\
  648. })
  649. #define RGB_TO_Y(r, g, b) \
  650. ((FIX(0.29900) * (r) + FIX(0.58700) * (g) + \
  651. FIX(0.11400) * (b) + ONE_HALF) >> SCALEBITS)
  652. #define RGB_TO_U(r1, g1, b1, shift)\
  653. (((- FIX(0.16874) * r1 - FIX(0.33126) * g1 + \
  654. FIX(0.50000) * b1 + (ONE_HALF << shift) - 1) >> (SCALEBITS + shift)) + 128)
  655. #define RGB_TO_V(r1, g1, b1, shift)\
  656. (((FIX(0.50000) * r1 - FIX(0.41869) * g1 - \
  657. FIX(0.08131) * b1 + (ONE_HALF << shift) - 1) >> (SCALEBITS + shift)) + 128)
  658. #define RGB_TO_Y_CCIR(r, g, b) \
  659. ((FIX(0.29900*219.0/255.0) * (r) + FIX(0.58700*219.0/255.0) * (g) + \
  660. FIX(0.11400*219.0/255.0) * (b) + (ONE_HALF + (16 << SCALEBITS))) >> SCALEBITS)
  661. #define RGB_TO_U_CCIR(r1, g1, b1, shift)\
  662. (((- FIX(0.16874*224.0/255.0) * r1 - FIX(0.33126*224.0/255.0) * g1 + \
  663. FIX(0.50000*224.0/255.0) * b1 + (ONE_HALF << shift) - 1) >> (SCALEBITS + shift)) + 128)
  664. #define RGB_TO_V_CCIR(r1, g1, b1, shift)\
  665. (((FIX(0.50000*224.0/255.0) * r1 - FIX(0.41869*224.0/255.0) * g1 - \
  666. FIX(0.08131*224.0/255.0) * b1 + (ONE_HALF << shift) - 1) >> (SCALEBITS + shift)) + 128)
  667. static uint8_t y_ccir_to_jpeg[256];
  668. static uint8_t y_jpeg_to_ccir[256];
  669. static uint8_t c_ccir_to_jpeg[256];
  670. static uint8_t c_jpeg_to_ccir[256];
  671. /* init various conversion tables */
  672. static void img_convert_init(void)
  673. {
  674. int i;
  675. uint8_t *cm = cropTbl + MAX_NEG_CROP;
  676. for(i = 0;i < 256; i++) {
  677. y_ccir_to_jpeg[i] = Y_CCIR_TO_JPEG(i);
  678. y_jpeg_to_ccir[i] = Y_JPEG_TO_CCIR(i);
  679. c_ccir_to_jpeg[i] = C_CCIR_TO_JPEG(i);
  680. c_jpeg_to_ccir[i] = C_JPEG_TO_CCIR(i);
  681. }
  682. }
  683. /* apply to each pixel the given table */
  684. static void img_apply_table(uint8_t *dst, int dst_wrap,
  685. const uint8_t *src, int src_wrap,
  686. int width, int height, const uint8_t *table1)
  687. {
  688. int n;
  689. const uint8_t *s;
  690. uint8_t *d;
  691. const uint8_t *table;
  692. table = table1;
  693. for(;height > 0; height--) {
  694. s = src;
  695. d = dst;
  696. n = width;
  697. while (n >= 4) {
  698. d[0] = table[s[0]];
  699. d[1] = table[s[1]];
  700. d[2] = table[s[2]];
  701. d[3] = table[s[3]];
  702. d += 4;
  703. s += 4;
  704. n -= 4;
  705. }
  706. while (n > 0) {
  707. d[0] = table[s[0]];
  708. d++;
  709. s++;
  710. n--;
  711. }
  712. dst += dst_wrap;
  713. src += src_wrap;
  714. }
  715. }
  716. /* XXX: use generic filter ? */
  717. /* 1x2 -> 1x1 */
  718. static void shrink2(uint8_t *dst, int dst_wrap,
  719. uint8_t *src, int src_wrap,
  720. int width, int height)
  721. {
  722. int w;
  723. uint8_t *s1, *s2, *d;
  724. for(;height > 0; height--) {
  725. s1 = src;
  726. s2 = s1 + src_wrap;
  727. d = dst;
  728. for(w = width;w >= 4; w-=4) {
  729. d[0] = (s1[0] + s2[0]) >> 1;
  730. d[1] = (s1[1] + s2[1]) >> 1;
  731. d[2] = (s1[2] + s2[2]) >> 1;
  732. d[3] = (s1[3] + s2[3]) >> 1;
  733. s1 += 4;
  734. s2 += 4;
  735. d += 4;
  736. }
  737. for(;w > 0; w--) {
  738. d[0] = (s1[0] + s2[0]) >> 1;
  739. s1++;
  740. s2++;
  741. d++;
  742. }
  743. src += 2 * src_wrap;
  744. dst += dst_wrap;
  745. }
  746. }
  747. /* 2x2 -> 1x1 */
  748. static void shrink22(uint8_t *dst, int dst_wrap,
  749. uint8_t *src, int src_wrap,
  750. int width, int height)
  751. {
  752. int w;
  753. uint8_t *s1, *s2, *d;
  754. for(;height > 0; height--) {
  755. s1 = src;
  756. s2 = s1 + src_wrap;
  757. d = dst;
  758. for(w = width;w >= 4; w-=4) {
  759. d[0] = (s1[0] + s1[1] + s2[0] + s2[1] + 2) >> 1;
  760. d[1] = (s1[2] + s1[3] + s2[2] + s2[3] + 2) >> 1;
  761. d[2] = (s1[4] + s1[5] + s2[4] + s2[5] + 2) >> 1;
  762. d[3] = (s1[6] + s1[7] + s2[6] + s2[7] + 2) >> 1;
  763. s1 += 8;
  764. s2 += 8;
  765. d += 4;
  766. }
  767. for(;w > 0; w--) {
  768. d[0] = (s1[0] + s1[1] + s2[0] + s2[1] + 2) >> 1;
  769. s1 += 2;
  770. s2 += 2;
  771. d++;
  772. }
  773. src += 2 * src_wrap;
  774. dst += dst_wrap;
  775. }
  776. }
  777. /* 1x1 -> 2x2 */
  778. static void grow22(uint8_t *dst, int dst_wrap,
  779. uint8_t *src, int src_wrap,
  780. int width, int height)
  781. {
  782. int w;
  783. uint8_t *s1, *d;
  784. for(;height > 0; height--) {
  785. s1 = src;
  786. d = dst;
  787. for(w = width;w >= 4; w-=4) {
  788. d[1] = d[0] = s1[0];
  789. d[3] = d[2] = s1[1];
  790. s1 += 2;
  791. d += 4;
  792. }
  793. for(;w > 0; w--) {
  794. d[0] = s1[0];
  795. s1 ++;
  796. d++;
  797. }
  798. if (height%2)
  799. src += src_wrap;
  800. dst += dst_wrap;
  801. }
  802. }
  803. /* 1x2 -> 2x1 */
  804. static void conv411(uint8_t *dst, int dst_wrap,
  805. uint8_t *src, int src_wrap,
  806. int width, int height)
  807. {
  808. int w, c;
  809. uint8_t *s1, *s2, *d;
  810. width>>=1;
  811. for(;height > 0; height--) {
  812. s1 = src;
  813. s2 = src + src_wrap;
  814. d = dst;
  815. for(w = width;w > 0; w--) {
  816. c = (s1[0] + s2[0]) >> 1;
  817. d[0] = c;
  818. d[1] = c;
  819. s1++;
  820. s2++;
  821. d += 2;
  822. }
  823. src += src_wrap * 2;
  824. dst += dst_wrap;
  825. }
  826. }
  827. /* XXX: add jpeg quantize code */
  828. #define TRANSP_INDEX (6*6*6)
  829. /* this is maybe slow, but allows for extensions */
  830. static inline unsigned char gif_clut_index(uint8_t r, uint8_t g, uint8_t b)
  831. {
  832. return ((((r)/47)%6)*6*6+(((g)/47)%6)*6+(((b)/47)%6));
  833. }
  834. static void build_rgb_palette(uint8_t *palette, int has_alpha)
  835. {
  836. uint32_t *pal;
  837. static const uint8_t pal_value[6] = { 0x00, 0x33, 0x66, 0x99, 0xcc, 0xff };
  838. int i, r, g, b;
  839. pal = (uint32_t *)palette;
  840. i = 0;
  841. for(r = 0; r < 6; r++) {
  842. for(g = 0; g < 6; g++) {
  843. for(b = 0; b < 6; b++) {
  844. pal[i++] = (0xff << 24) | (pal_value[r] << 16) |
  845. (pal_value[g] << 8) | pal_value[b];
  846. }
  847. }
  848. }
  849. if (has_alpha)
  850. pal[i++] = 0;
  851. while (i < 256)
  852. pal[i++] = 0xff000000;
  853. }
  854. /* copy bit n to bits 0 ... n - 1 */
  855. static inline unsigned int bitcopy_n(unsigned int a, int n)
  856. {
  857. int mask;
  858. mask = (1 << n) - 1;
  859. return (a & (0xff & ~mask)) | ((-((a >> n) & 1)) & mask);
  860. }
  861. /* rgb555 handling */
  862. #define RGB_NAME rgb555
  863. #define RGB_IN(r, g, b, s)\
  864. {\
  865. unsigned int v = ((const uint16_t *)(s))[0];\
  866. r = bitcopy_n(v >> (10 - 3), 3);\
  867. g = bitcopy_n(v >> (5 - 3), 3);\
  868. b = bitcopy_n(v << 3, 3);\
  869. }
  870. #define RGBA_IN(r, g, b, a, s)\
  871. {\
  872. unsigned int v = ((const uint16_t *)(s))[0];\
  873. r = bitcopy_n(v >> (10 - 3), 3);\
  874. g = bitcopy_n(v >> (5 - 3), 3);\
  875. b = bitcopy_n(v << 3, 3);\
  876. a = bitcopy_n(v >> 15, 7);\
  877. }
  878. #define RGBA_OUT(d, r, g, b, a)\
  879. {\
  880. ((uint16_t *)(d))[0] = ((r >> 3) << 10) | ((g >> 3) << 5) | (b >> 3) | \
  881. ((a << 8) & 0x8000);\
  882. }
  883. #define BPP 2
  884. #include "imgconvert_template.h"
  885. /* rgb565 handling */
  886. #define RGB_NAME rgb565
  887. #define RGB_IN(r, g, b, s)\
  888. {\
  889. unsigned int v = ((const uint16_t *)(s))[0];\
  890. r = bitcopy_n(v >> (11 - 3), 3);\
  891. g = bitcopy_n(v >> (5 - 2), 2);\
  892. b = bitcopy_n(v << 3, 3);\
  893. }
  894. #define RGB_OUT(d, r, g, b)\
  895. {\
  896. ((uint16_t *)(d))[0] = ((r >> 3) << 11) | ((g >> 2) << 5) | (b >> 3);\
  897. }
  898. #define BPP 2
  899. #include "imgconvert_template.h"
  900. /* bgr24 handling */
  901. #define RGB_NAME bgr24
  902. #define RGB_IN(r, g, b, s)\
  903. {\
  904. b = (s)[0];\
  905. g = (s)[1];\
  906. r = (s)[2];\
  907. }
  908. #define RGB_OUT(d, r, g, b)\
  909. {\
  910. (d)[0] = b;\
  911. (d)[1] = g;\
  912. (d)[2] = r;\
  913. }
  914. #define BPP 3
  915. #include "imgconvert_template.h"
  916. #undef RGB_IN
  917. #undef RGB_OUT
  918. #undef BPP
  919. /* rgb24 handling */
  920. #define RGB_NAME rgb24
  921. #define FMT_RGB24
  922. #define RGB_IN(r, g, b, s)\
  923. {\
  924. r = (s)[0];\
  925. g = (s)[1];\
  926. b = (s)[2];\
  927. }
  928. #define RGB_OUT(d, r, g, b)\
  929. {\
  930. (d)[0] = r;\
  931. (d)[1] = g;\
  932. (d)[2] = b;\
  933. }
  934. #define BPP 3
  935. #include "imgconvert_template.h"
  936. /* rgba32 handling */
  937. #define RGB_NAME rgba32
  938. #define FMT_RGBA32
  939. #define RGB_IN(r, g, b, s)\
  940. {\
  941. unsigned int v = ((const uint32_t *)(s))[0];\
  942. r = (v >> 16) & 0xff;\
  943. g = (v >> 8) & 0xff;\
  944. b = v & 0xff;\
  945. }
  946. #define RGBA_IN(r, g, b, a, s)\
  947. {\
  948. unsigned int v = ((const uint32_t *)(s))[0];\
  949. a = (v >> 24) & 0xff;\
  950. r = (v >> 16) & 0xff;\
  951. g = (v >> 8) & 0xff;\
  952. b = v & 0xff;\
  953. }
  954. #define RGBA_OUT(d, r, g, b, a)\
  955. {\
  956. ((uint32_t *)(d))[0] = (a << 24) | (r << 16) | (g << 8) | b;\
  957. }
  958. #define BPP 4
  959. #include "imgconvert_template.h"
  960. static void mono_to_gray(AVPicture *dst, AVPicture *src,
  961. int width, int height, int xor_mask)
  962. {
  963. const unsigned char *p;
  964. unsigned char *q;
  965. int v, dst_wrap, src_wrap;
  966. int y, w;
  967. p = src->data[0];
  968. src_wrap = src->linesize[0] - ((width + 7) >> 3);
  969. q = dst->data[0];
  970. dst_wrap = dst->linesize[0] - width;
  971. for(y=0;y<height;y++) {
  972. w = width;
  973. while (w >= 8) {
  974. v = *p++ ^ xor_mask;
  975. q[0] = -(v >> 7);
  976. q[1] = -((v >> 6) & 1);
  977. q[2] = -((v >> 5) & 1);
  978. q[3] = -((v >> 4) & 1);
  979. q[4] = -((v >> 3) & 1);
  980. q[5] = -((v >> 2) & 1);
  981. q[6] = -((v >> 1) & 1);
  982. q[7] = -((v >> 0) & 1);
  983. w -= 8;
  984. q += 8;
  985. }
  986. if (w > 0) {
  987. v = *p++ ^ xor_mask;
  988. do {
  989. q[0] = -((v >> 7) & 1);
  990. q++;
  991. v <<= 1;
  992. } while (--w);
  993. }
  994. p += src_wrap;
  995. q += dst_wrap;
  996. }
  997. }
  998. static void monowhite_to_gray(AVPicture *dst, AVPicture *src,
  999. int width, int height)
  1000. {
  1001. mono_to_gray(dst, src, width, height, 0xff);
  1002. }
  1003. static void monoblack_to_gray(AVPicture *dst, AVPicture *src,
  1004. int width, int height)
  1005. {
  1006. mono_to_gray(dst, src, width, height, 0x00);
  1007. }
  1008. static void gray_to_mono(AVPicture *dst, AVPicture *src,
  1009. int width, int height, int xor_mask)
  1010. {
  1011. int n;
  1012. const uint8_t *s;
  1013. uint8_t *d;
  1014. int j, b, v, n1, src_wrap, dst_wrap, y;
  1015. s = src->data[0];
  1016. src_wrap = src->linesize[0] - width;
  1017. d = dst->data[0];
  1018. dst_wrap = dst->linesize[0] - ((width + 7) >> 3);
  1019. for(y=0;y<height;y++) {
  1020. n = width;
  1021. while (n >= 8) {
  1022. v = 0;
  1023. for(j=0;j<8;j++) {
  1024. b = s[0];
  1025. s++;
  1026. v = (v << 1) | (b >> 7);
  1027. }
  1028. d[0] = v ^ xor_mask;
  1029. d++;
  1030. n -= 8;
  1031. }
  1032. if (n > 0) {
  1033. n1 = n;
  1034. v = 0;
  1035. while (n > 0) {
  1036. b = s[0];
  1037. s++;
  1038. v = (v << 1) | (b >> 7);
  1039. n--;
  1040. }
  1041. d[0] = (v << (8 - (n1 & 7))) ^ xor_mask;
  1042. d++;
  1043. }
  1044. s += src_wrap;
  1045. d += dst_wrap;
  1046. }
  1047. }
  1048. static void gray_to_monowhite(AVPicture *dst, AVPicture *src,
  1049. int width, int height)
  1050. {
  1051. gray_to_mono(dst, src, width, height, 0xff);
  1052. }
  1053. static void gray_to_monoblack(AVPicture *dst, AVPicture *src,
  1054. int width, int height)
  1055. {
  1056. gray_to_mono(dst, src, width, height, 0x00);
  1057. }
  1058. typedef struct ConvertEntry {
  1059. void (*convert)(AVPicture *dst, AVPicture *src, int width, int height);
  1060. } ConvertEntry;
  1061. /* Add each new convertion function in this table. In order to be able
  1062. to convert from any format to any format, the following constraints
  1063. must be satisfied:
  1064. - all FF_COLOR_RGB formats must convert to and from PIX_FMT_RGB24
  1065. - all FF_COLOR_GRAY formats must convert to and from PIX_FMT_GRAY8
  1066. - all FF_COLOR_RGB formats with alpha must convert to and from PIX_FMT_RGBA32
  1067. - all PIX_FMT_YUV444P and PIX_FMT_YUVJ444P must convert to and from
  1068. PIX_FMT_RGB24.
  1069. - PIX_FMT_422 must convert to and from PIX_FMT_422P.
  1070. */
  1071. static ConvertEntry convert_table[PIX_FMT_NB][PIX_FMT_NB] = {
  1072. [PIX_FMT_YUV420P] = {
  1073. [PIX_FMT_RGB555] = {
  1074. .convert = yuv420p_to_rgb555
  1075. },
  1076. [PIX_FMT_RGB565] = {
  1077. .convert = yuv420p_to_rgb565
  1078. },
  1079. [PIX_FMT_BGR24] = {
  1080. .convert = yuv420p_to_bgr24
  1081. },
  1082. [PIX_FMT_RGB24] = {
  1083. .convert = yuv420p_to_rgb24
  1084. },
  1085. [PIX_FMT_RGBA32] = {
  1086. .convert = yuv420p_to_rgba32
  1087. },
  1088. },
  1089. [PIX_FMT_YUV422P] = {
  1090. [PIX_FMT_YUV422] = {
  1091. .convert = yuv422p_to_yuv422,
  1092. },
  1093. },
  1094. [PIX_FMT_YUV444P] = {
  1095. [PIX_FMT_RGB24] = {
  1096. .convert = yuv444p_to_rgb24
  1097. },
  1098. },
  1099. [PIX_FMT_YUVJ420P] = {
  1100. [PIX_FMT_RGB555] = {
  1101. .convert = yuvj420p_to_rgb555
  1102. },
  1103. [PIX_FMT_RGB565] = {
  1104. .convert = yuvj420p_to_rgb565
  1105. },
  1106. [PIX_FMT_BGR24] = {
  1107. .convert = yuvj420p_to_bgr24
  1108. },
  1109. [PIX_FMT_RGB24] = {
  1110. .convert = yuvj420p_to_rgb24
  1111. },
  1112. [PIX_FMT_RGBA32] = {
  1113. .convert = yuvj420p_to_rgba32
  1114. },
  1115. },
  1116. [PIX_FMT_YUVJ444P] = {
  1117. [PIX_FMT_RGB24] = {
  1118. .convert = yuvj444p_to_rgb24
  1119. },
  1120. },
  1121. [PIX_FMT_YUV422] = {
  1122. [PIX_FMT_YUV420P] = {
  1123. .convert = yuv422_to_yuv420p,
  1124. },
  1125. [PIX_FMT_YUV422P] = {
  1126. .convert = yuv422_to_yuv422p,
  1127. },
  1128. },
  1129. [PIX_FMT_RGB24] = {
  1130. [PIX_FMT_YUV420P] = {
  1131. .convert = rgb24_to_yuv420p
  1132. },
  1133. [PIX_FMT_RGB565] = {
  1134. .convert = rgb24_to_rgb565
  1135. },
  1136. [PIX_FMT_RGB555] = {
  1137. .convert = rgb24_to_rgb555
  1138. },
  1139. [PIX_FMT_RGBA32] = {
  1140. .convert = rgb24_to_rgba32
  1141. },
  1142. [PIX_FMT_BGR24] = {
  1143. .convert = rgb24_to_bgr24
  1144. },
  1145. [PIX_FMT_GRAY8] = {
  1146. .convert = rgb24_to_gray
  1147. },
  1148. [PIX_FMT_PAL8] = {
  1149. .convert = rgb24_to_pal8
  1150. },
  1151. [PIX_FMT_YUV444P] = {
  1152. .convert = rgb24_to_yuv444p
  1153. },
  1154. [PIX_FMT_YUVJ420P] = {
  1155. .convert = rgb24_to_yuvj420p
  1156. },
  1157. [PIX_FMT_YUVJ444P] = {
  1158. .convert = rgb24_to_yuvj444p
  1159. },
  1160. },
  1161. [PIX_FMT_RGBA32] = {
  1162. [PIX_FMT_RGB24] = {
  1163. .convert = rgba32_to_rgb24
  1164. },
  1165. [PIX_FMT_RGB555] = {
  1166. .convert = rgba32_to_rgb555
  1167. },
  1168. [PIX_FMT_PAL8] = {
  1169. .convert = rgba32_to_pal8
  1170. },
  1171. [PIX_FMT_YUV420P] = {
  1172. .convert = rgba32_to_yuv420p
  1173. },
  1174. [PIX_FMT_GRAY8] = {
  1175. .convert = rgba32_to_gray
  1176. },
  1177. },
  1178. [PIX_FMT_BGR24] = {
  1179. [PIX_FMT_RGB24] = {
  1180. .convert = bgr24_to_rgb24
  1181. },
  1182. [PIX_FMT_YUV420P] = {
  1183. .convert = bgr24_to_yuv420p
  1184. },
  1185. [PIX_FMT_GRAY8] = {
  1186. .convert = bgr24_to_gray
  1187. },
  1188. },
  1189. [PIX_FMT_RGB555] = {
  1190. [PIX_FMT_RGB24] = {
  1191. .convert = rgb555_to_rgb24
  1192. },
  1193. [PIX_FMT_RGBA32] = {
  1194. .convert = rgb555_to_rgba32
  1195. },
  1196. [PIX_FMT_YUV420P] = {
  1197. .convert = rgb555_to_yuv420p
  1198. },
  1199. [PIX_FMT_GRAY8] = {
  1200. .convert = rgb555_to_gray
  1201. },
  1202. },
  1203. [PIX_FMT_RGB565] = {
  1204. [PIX_FMT_RGB24] = {
  1205. .convert = rgb565_to_rgb24
  1206. },
  1207. [PIX_FMT_YUV420P] = {
  1208. .convert = rgb565_to_yuv420p
  1209. },
  1210. [PIX_FMT_GRAY8] = {
  1211. .convert = rgb565_to_gray
  1212. },
  1213. },
  1214. [PIX_FMT_GRAY8] = {
  1215. [PIX_FMT_RGB555] = {
  1216. .convert = gray_to_rgb555
  1217. },
  1218. [PIX_FMT_RGB565] = {
  1219. .convert = gray_to_rgb565
  1220. },
  1221. [PIX_FMT_RGB24] = {
  1222. .convert = gray_to_rgb24
  1223. },
  1224. [PIX_FMT_BGR24] = {
  1225. .convert = gray_to_bgr24
  1226. },
  1227. [PIX_FMT_RGBA32] = {
  1228. .convert = gray_to_rgba32
  1229. },
  1230. [PIX_FMT_MONOWHITE] = {
  1231. .convert = gray_to_monowhite
  1232. },
  1233. [PIX_FMT_MONOBLACK] = {
  1234. .convert = gray_to_monoblack
  1235. },
  1236. },
  1237. [PIX_FMT_MONOWHITE] = {
  1238. [PIX_FMT_GRAY8] = {
  1239. .convert = monowhite_to_gray
  1240. },
  1241. },
  1242. [PIX_FMT_MONOBLACK] = {
  1243. [PIX_FMT_GRAY8] = {
  1244. .convert = monoblack_to_gray
  1245. },
  1246. },
  1247. [PIX_FMT_PAL8] = {
  1248. [PIX_FMT_RGB555] = {
  1249. .convert = pal8_to_rgb555
  1250. },
  1251. [PIX_FMT_RGB565] = {
  1252. .convert = pal8_to_rgb565
  1253. },
  1254. [PIX_FMT_BGR24] = {
  1255. .convert = pal8_to_bgr24
  1256. },
  1257. [PIX_FMT_RGB24] = {
  1258. .convert = pal8_to_rgb24
  1259. },
  1260. [PIX_FMT_RGBA32] = {
  1261. .convert = pal8_to_rgba32
  1262. },
  1263. },
  1264. };
  1265. static int avpicture_alloc(AVPicture *picture,
  1266. int pix_fmt, int width, int height)
  1267. {
  1268. unsigned int size;
  1269. void *ptr;
  1270. size = avpicture_get_size(pix_fmt, width, height);
  1271. if (size < 0)
  1272. goto fail;
  1273. ptr = av_malloc(size);
  1274. if (!ptr)
  1275. goto fail;
  1276. avpicture_fill(picture, ptr, pix_fmt, width, height);
  1277. return 0;
  1278. fail:
  1279. memset(picture, 0, sizeof(AVPicture));
  1280. return -1;
  1281. }
  1282. static void avpicture_free(AVPicture *picture)
  1283. {
  1284. av_free(picture->data[0]);
  1285. }
  1286. /* return true if yuv planar */
  1287. static inline int is_yuv_planar(PixFmtInfo *ps)
  1288. {
  1289. return (ps->color_type == FF_COLOR_YUV ||
  1290. ps->color_type == FF_COLOR_YUV_JPEG) &&
  1291. ps->pixel_type == FF_PIXEL_PLANAR;
  1292. }
  1293. /* XXX: always use linesize. Return -1 if not supported */
  1294. int img_convert(AVPicture *dst, int dst_pix_fmt,
  1295. AVPicture *src, int src_pix_fmt,
  1296. int src_width, int src_height)
  1297. {
  1298. static int inited;
  1299. int i, ret, dst_width, dst_height, int_pix_fmt;
  1300. PixFmtInfo *src_pix, *dst_pix;
  1301. ConvertEntry *ce;
  1302. AVPicture tmp1, *tmp = &tmp1;
  1303. if (src_pix_fmt < 0 || src_pix_fmt >= PIX_FMT_NB ||
  1304. dst_pix_fmt < 0 || dst_pix_fmt >= PIX_FMT_NB)
  1305. return -1;
  1306. if (src_width <= 0 || src_height <= 0)
  1307. return 0;
  1308. if (!inited) {
  1309. inited = 1;
  1310. img_convert_init();
  1311. }
  1312. dst_width = src_width;
  1313. dst_height = src_height;
  1314. dst_pix = &pix_fmt_info[dst_pix_fmt];
  1315. src_pix = &pix_fmt_info[src_pix_fmt];
  1316. if (src_pix_fmt == dst_pix_fmt) {
  1317. /* no conversion needed: just copy */
  1318. img_copy(dst, src, dst_pix_fmt, dst_width, dst_height);
  1319. return 0;
  1320. }
  1321. ce = &convert_table[src_pix_fmt][dst_pix_fmt];
  1322. if (ce->convert) {
  1323. /* specific convertion routine */
  1324. ce->convert(dst, src, dst_width, dst_height);
  1325. return 0;
  1326. }
  1327. /* gray to YUV */
  1328. if (is_yuv_planar(dst_pix) &&
  1329. src_pix_fmt == PIX_FMT_GRAY8) {
  1330. int w, h, y;
  1331. uint8_t *d;
  1332. if (dst_pix->color_type == FF_COLOR_YUV_JPEG) {
  1333. img_copy_plane(dst->data[0], dst->linesize[0],
  1334. src->data[0], src->linesize[0],
  1335. dst_width, dst_height);
  1336. } else {
  1337. img_apply_table(dst->data[0], dst->linesize[0],
  1338. src->data[0], src->linesize[0],
  1339. dst_width, dst_height,
  1340. y_jpeg_to_ccir);
  1341. }
  1342. /* fill U and V with 128 */
  1343. w = dst_width;
  1344. h = dst_height;
  1345. w >>= dst_pix->x_chroma_shift;
  1346. h >>= dst_pix->y_chroma_shift;
  1347. for(i = 1; i <= 2; i++) {
  1348. d = dst->data[i];
  1349. for(y = 0; y< h; y++) {
  1350. memset(d, 128, w);
  1351. d += dst->linesize[i];
  1352. }
  1353. }
  1354. return 0;
  1355. }
  1356. /* YUV to gray */
  1357. if (is_yuv_planar(src_pix) &&
  1358. dst_pix_fmt == PIX_FMT_GRAY8) {
  1359. if (src_pix->color_type == FF_COLOR_YUV_JPEG) {
  1360. img_copy_plane(dst->data[0], dst->linesize[0],
  1361. src->data[0], src->linesize[0],
  1362. dst_width, dst_height);
  1363. } else {
  1364. img_apply_table(dst->data[0], dst->linesize[0],
  1365. src->data[0], src->linesize[0],
  1366. dst_width, dst_height,
  1367. y_ccir_to_jpeg);
  1368. }
  1369. return 0;
  1370. }
  1371. /* YUV to YUV planar */
  1372. if (is_yuv_planar(dst_pix) && is_yuv_planar(src_pix)) {
  1373. int x_shift, y_shift, w, h;
  1374. void (*resize_func)(uint8_t *dst, int dst_wrap,
  1375. uint8_t *src, int src_wrap,
  1376. int width, int height);
  1377. /* compute chroma size of the smallest dimensions */
  1378. w = dst_width;
  1379. h = dst_height;
  1380. if (dst_pix->x_chroma_shift >= src_pix->x_chroma_shift)
  1381. w >>= dst_pix->x_chroma_shift;
  1382. else
  1383. w >>= src_pix->x_chroma_shift;
  1384. if (dst_pix->y_chroma_shift >= src_pix->y_chroma_shift)
  1385. h >>= dst_pix->y_chroma_shift;
  1386. else
  1387. h >>= src_pix->y_chroma_shift;
  1388. x_shift = (dst_pix->x_chroma_shift - src_pix->x_chroma_shift);
  1389. y_shift = (dst_pix->y_chroma_shift - src_pix->y_chroma_shift);
  1390. if (x_shift == 0 && y_shift == 0) {
  1391. resize_func = img_copy_plane;
  1392. } else if (x_shift == 0 && y_shift == 1) {
  1393. resize_func = shrink2;
  1394. } else if (x_shift == 1 && y_shift == 1) {
  1395. resize_func = shrink22;
  1396. } else if (x_shift == -1 && y_shift == -1) {
  1397. resize_func = grow22;
  1398. } else if (x_shift == -1 && y_shift == 1) {
  1399. resize_func = conv411;
  1400. } else {
  1401. /* currently not handled */
  1402. return -1;
  1403. }
  1404. img_copy_plane(dst->data[0], dst->linesize[0],
  1405. src->data[0], src->linesize[0],
  1406. dst_width, dst_height);
  1407. for(i = 1;i <= 2; i++)
  1408. resize_func(dst->data[i], dst->linesize[i],
  1409. src->data[i], src->linesize[i],
  1410. dst_width>>dst_pix->x_chroma_shift, dst_height>>dst_pix->y_chroma_shift);
  1411. /* if yuv color space conversion is needed, we do it here on
  1412. the destination image */
  1413. if (dst_pix->color_type != src_pix->color_type) {
  1414. const uint8_t *y_table, *c_table;
  1415. if (dst_pix->color_type == FF_COLOR_YUV) {
  1416. y_table = y_jpeg_to_ccir;
  1417. c_table = c_jpeg_to_ccir;
  1418. } else {
  1419. y_table = y_ccir_to_jpeg;
  1420. c_table = c_ccir_to_jpeg;
  1421. }
  1422. img_apply_table(dst->data[0], dst->linesize[0],
  1423. dst->data[0], dst->linesize[0],
  1424. dst_width, dst_height,
  1425. y_table);
  1426. for(i = 1;i <= 2; i++)
  1427. img_apply_table(dst->data[i], dst->linesize[i],
  1428. dst->data[i], dst->linesize[i],
  1429. dst_width>>dst_pix->x_chroma_shift,
  1430. dst_height>>dst_pix->y_chroma_shift,
  1431. c_table);
  1432. }
  1433. return 0;
  1434. }
  1435. /* try to use an intermediate format */
  1436. if (src_pix_fmt == PIX_FMT_YUV422 ||
  1437. dst_pix_fmt == PIX_FMT_YUV422) {
  1438. /* specific case: convert to YUV422P first */
  1439. int_pix_fmt = PIX_FMT_YUV422P;
  1440. } else if ((src_pix->color_type == FF_COLOR_GRAY &&
  1441. src_pix_fmt != PIX_FMT_GRAY8) ||
  1442. (dst_pix->color_type == FF_COLOR_GRAY &&
  1443. dst_pix_fmt != PIX_FMT_GRAY8)) {
  1444. /* gray8 is the normalized format */
  1445. int_pix_fmt = PIX_FMT_GRAY8;
  1446. } else if ((is_yuv_planar(src_pix) &&
  1447. src_pix_fmt != PIX_FMT_YUV444P &&
  1448. src_pix_fmt != PIX_FMT_YUVJ444P)) {
  1449. /* yuv444 is the normalized format */
  1450. if (src_pix->color_type == FF_COLOR_YUV_JPEG)
  1451. int_pix_fmt = PIX_FMT_YUVJ444P;
  1452. else
  1453. int_pix_fmt = PIX_FMT_YUV444P;
  1454. } else if ((is_yuv_planar(dst_pix) &&
  1455. dst_pix_fmt != PIX_FMT_YUV444P &&
  1456. dst_pix_fmt != PIX_FMT_YUVJ444P)) {
  1457. /* yuv444 is the normalized format */
  1458. if (dst_pix->color_type == FF_COLOR_YUV_JPEG)
  1459. int_pix_fmt = PIX_FMT_YUVJ444P;
  1460. else
  1461. int_pix_fmt = PIX_FMT_YUV444P;
  1462. } else {
  1463. /* the two formats are rgb or gray8 or yuv[j]444p */
  1464. if (src_pix->is_alpha && dst_pix->is_alpha)
  1465. int_pix_fmt = PIX_FMT_RGBA32;
  1466. else
  1467. int_pix_fmt = PIX_FMT_RGB24;
  1468. }
  1469. if (avpicture_alloc(tmp, int_pix_fmt, dst_width, dst_height) < 0)
  1470. return -1;
  1471. ret = -1;
  1472. if (img_convert(tmp, int_pix_fmt,
  1473. src, src_pix_fmt, src_width, src_height) < 0)
  1474. goto fail1;
  1475. if (img_convert(dst, dst_pix_fmt,
  1476. tmp, int_pix_fmt, dst_width, dst_height) < 0)
  1477. goto fail1;
  1478. ret = 0;
  1479. fail1:
  1480. avpicture_free(tmp);
  1481. return ret;
  1482. }
  1483. #ifdef HAVE_MMX
  1484. #define DEINT_INPLACE_LINE_LUM \
  1485. movd_m2r(lum_m4[0],mm0);\
  1486. movd_m2r(lum_m3[0],mm1);\
  1487. movd_m2r(lum_m2[0],mm2);\
  1488. movd_m2r(lum_m1[0],mm3);\
  1489. movd_m2r(lum[0],mm4);\
  1490. punpcklbw_r2r(mm7,mm0);\
  1491. movd_r2m(mm2,lum_m4[0]);\
  1492. punpcklbw_r2r(mm7,mm1);\
  1493. punpcklbw_r2r(mm7,mm2);\
  1494. punpcklbw_r2r(mm7,mm3);\
  1495. punpcklbw_r2r(mm7,mm4);\
  1496. paddw_r2r(mm3,mm1);\
  1497. psllw_i2r(1,mm2);\
  1498. paddw_r2r(mm4,mm0);\
  1499. psllw_i2r(2,mm1);\
  1500. paddw_r2r(mm6,mm2);\
  1501. paddw_r2r(mm2,mm1);\
  1502. psubusw_r2r(mm0,mm1);\
  1503. psrlw_i2r(3,mm1);\
  1504. packuswb_r2r(mm7,mm1);\
  1505. movd_r2m(mm1,lum_m2[0]);
  1506. #define DEINT_LINE_LUM \
  1507. movd_m2r(lum_m4[0],mm0);\
  1508. movd_m2r(lum_m3[0],mm1);\
  1509. movd_m2r(lum_m2[0],mm2);\
  1510. movd_m2r(lum_m1[0],mm3);\
  1511. movd_m2r(lum[0],mm4);\
  1512. punpcklbw_r2r(mm7,mm0);\
  1513. punpcklbw_r2r(mm7,mm1);\
  1514. punpcklbw_r2r(mm7,mm2);\
  1515. punpcklbw_r2r(mm7,mm3);\
  1516. punpcklbw_r2r(mm7,mm4);\
  1517. paddw_r2r(mm3,mm1);\
  1518. psllw_i2r(1,mm2);\
  1519. paddw_r2r(mm4,mm0);\
  1520. psllw_i2r(2,mm1);\
  1521. paddw_r2r(mm6,mm2);\
  1522. paddw_r2r(mm2,mm1);\
  1523. psubusw_r2r(mm0,mm1);\
  1524. psrlw_i2r(3,mm1);\
  1525. packuswb_r2r(mm7,mm1);\
  1526. movd_r2m(mm1,dst[0]);
  1527. #endif
  1528. /* filter parameters: [-1 4 2 4 -1] // 8 */
  1529. static void deinterlace_line(uint8_t *dst, uint8_t *lum_m4, uint8_t *lum_m3, uint8_t *lum_m2, uint8_t *lum_m1, uint8_t *lum,
  1530. int size)
  1531. {
  1532. #ifndef HAVE_MMX
  1533. uint8_t *cm = cropTbl + MAX_NEG_CROP;
  1534. int sum;
  1535. for(;size > 0;size--) {
  1536. sum = -lum_m4[0];
  1537. sum += lum_m3[0] << 2;
  1538. sum += lum_m2[0] << 1;
  1539. sum += lum_m1[0] << 2;
  1540. sum += -lum[0];
  1541. dst[0] = cm[(sum + 4) >> 3];
  1542. lum_m4++;
  1543. lum_m3++;
  1544. lum_m2++;
  1545. lum_m1++;
  1546. lum++;
  1547. dst++;
  1548. }
  1549. #else
  1550. {
  1551. mmx_t rounder;
  1552. rounder.uw[0]=4;
  1553. rounder.uw[1]=4;
  1554. rounder.uw[2]=4;
  1555. rounder.uw[3]=4;
  1556. pxor_r2r(mm7,mm7);
  1557. movq_m2r(rounder,mm6);
  1558. }
  1559. for (;size > 3; size-=4) {
  1560. DEINT_LINE_LUM
  1561. lum_m4+=4;
  1562. lum_m3+=4;
  1563. lum_m2+=4;
  1564. lum_m1+=4;
  1565. lum+=4;
  1566. dst+=4;
  1567. }
  1568. #endif
  1569. }
  1570. static void deinterlace_line_inplace(uint8_t *lum_m4, uint8_t *lum_m3, uint8_t *lum_m2, uint8_t *lum_m1, uint8_t *lum,
  1571. int size)
  1572. {
  1573. #ifndef HAVE_MMX
  1574. uint8_t *cm = cropTbl + MAX_NEG_CROP;
  1575. int sum;
  1576. for(;size > 0;size--) {
  1577. sum = -lum_m4[0];
  1578. sum += lum_m3[0] << 2;
  1579. sum += lum_m2[0] << 1;
  1580. lum_m4[0]=lum_m2[0];
  1581. sum += lum_m1[0] << 2;
  1582. sum += -lum[0];
  1583. lum_m2[0] = cm[(sum + 4) >> 3];
  1584. lum_m4++;
  1585. lum_m3++;
  1586. lum_m2++;
  1587. lum_m1++;
  1588. lum++;
  1589. }
  1590. #else
  1591. {
  1592. mmx_t rounder;
  1593. rounder.uw[0]=4;
  1594. rounder.uw[1]=4;
  1595. rounder.uw[2]=4;
  1596. rounder.uw[3]=4;
  1597. pxor_r2r(mm7,mm7);
  1598. movq_m2r(rounder,mm6);
  1599. }
  1600. for (;size > 3; size-=4) {
  1601. DEINT_INPLACE_LINE_LUM
  1602. lum_m4+=4;
  1603. lum_m3+=4;
  1604. lum_m2+=4;
  1605. lum_m1+=4;
  1606. lum+=4;
  1607. }
  1608. #endif
  1609. }
  1610. /* deinterlacing : 2 temporal taps, 3 spatial taps linear filter. The
  1611. top field is copied as is, but the bottom field is deinterlaced
  1612. against the top field. */
  1613. static void deinterlace_bottom_field(uint8_t *dst, int dst_wrap,
  1614. uint8_t *src1, int src_wrap,
  1615. int width, int height)
  1616. {
  1617. uint8_t *src_m2, *src_m1, *src_0, *src_p1, *src_p2;
  1618. int y;
  1619. src_m2 = src1;
  1620. src_m1 = src1;
  1621. src_0=&src_m1[src_wrap];
  1622. src_p1=&src_0[src_wrap];
  1623. src_p2=&src_p1[src_wrap];
  1624. for(y=0;y<(height-2);y+=2) {
  1625. memcpy(dst,src_m1,width);
  1626. dst += dst_wrap;
  1627. deinterlace_line(dst,src_m2,src_m1,src_0,src_p1,src_p2,width);
  1628. src_m2 = src_0;
  1629. src_m1 = src_p1;
  1630. src_0 = src_p2;
  1631. src_p1 += 2*src_wrap;
  1632. src_p2 += 2*src_wrap;
  1633. dst += dst_wrap;
  1634. }
  1635. memcpy(dst,src_m1,width);
  1636. dst += dst_wrap;
  1637. /* do last line */
  1638. deinterlace_line(dst,src_m2,src_m1,src_0,src_0,src_0,width);
  1639. }
  1640. static void deinterlace_bottom_field_inplace(uint8_t *src1, int src_wrap,
  1641. int width, int height)
  1642. {
  1643. uint8_t *src_m1, *src_0, *src_p1, *src_p2;
  1644. int y;
  1645. uint8_t *buf;
  1646. buf = (uint8_t*)av_malloc(width);
  1647. src_m1 = src1;
  1648. memcpy(buf,src_m1,width);
  1649. src_0=&src_m1[src_wrap];
  1650. src_p1=&src_0[src_wrap];
  1651. src_p2=&src_p1[src_wrap];
  1652. for(y=0;y<(height-2);y+=2) {
  1653. deinterlace_line_inplace(buf,src_m1,src_0,src_p1,src_p2,width);
  1654. src_m1 = src_p1;
  1655. src_0 = src_p2;
  1656. src_p1 += 2*src_wrap;
  1657. src_p2 += 2*src_wrap;
  1658. }
  1659. /* do last line */
  1660. deinterlace_line_inplace(buf,src_m1,src_0,src_0,src_0,width);
  1661. av_free(buf);
  1662. }
  1663. /* deinterlace - if not supported return -1 */
  1664. int avpicture_deinterlace(AVPicture *dst, AVPicture *src,
  1665. int pix_fmt, int width, int height)
  1666. {
  1667. int i;
  1668. if (pix_fmt != PIX_FMT_YUV420P &&
  1669. pix_fmt != PIX_FMT_YUV422P &&
  1670. pix_fmt != PIX_FMT_YUV444P)
  1671. return -1;
  1672. if ((width & 3) != 0 || (height & 3) != 0)
  1673. return -1;
  1674. for(i=0;i<3;i++) {
  1675. if (i == 1) {
  1676. switch(pix_fmt) {
  1677. case PIX_FMT_YUV420P:
  1678. width >>= 1;
  1679. height >>= 1;
  1680. break;
  1681. case PIX_FMT_YUV422P:
  1682. width >>= 1;
  1683. break;
  1684. default:
  1685. break;
  1686. }
  1687. }
  1688. if (src == dst) {
  1689. deinterlace_bottom_field_inplace(src->data[i], src->linesize[i],
  1690. width, height);
  1691. } else {
  1692. deinterlace_bottom_field(dst->data[i],dst->linesize[i],
  1693. src->data[i], src->linesize[i],
  1694. width, height);
  1695. }
  1696. }
  1697. #ifdef HAVE_MMX
  1698. emms();
  1699. #endif
  1700. return 0;
  1701. }
  1702. #undef FIX