You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

2483 lines
82KB

  1. /*
  2. * Misc image convertion routines
  3. * Copyright (c) 2001, 2002, 2003 Fabrice Bellard.
  4. *
  5. * This library is free software; you can redistribute it and/or
  6. * modify it under the terms of the GNU Lesser General Public
  7. * License as published by the Free Software Foundation; either
  8. * version 2 of the License, or (at your option) any later version.
  9. *
  10. * This library is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  13. * Lesser General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU Lesser General Public
  16. * License along with this library; if not, write to the Free Software
  17. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  18. */
  19. /**
  20. * @file imgconvert.c
  21. * Misc image convertion routines.
  22. */
  23. /* TODO:
  24. * - write 'ffimg' program to test all the image related stuff
  25. * - move all api to slice based system
  26. * - integrate deinterlacing, postprocessing and scaling in the conversion process
  27. */
  28. #include "avcodec.h"
  29. #include "dsputil.h"
  30. #ifdef USE_FASTMEMCPY
  31. #include "fastmemcpy.h"
  32. #endif
  33. #ifdef HAVE_MMX
  34. #include "i386/mmx.h"
  35. #endif
  36. #define FF_COLOR_RGB 0 /* RGB color space */
  37. #define FF_COLOR_GRAY 1 /* gray color space */
  38. #define FF_COLOR_YUV 2 /* YUV color space. 16 <= Y <= 235, 16 <= U, V <= 240 */
  39. #define FF_COLOR_YUV_JPEG 3 /* YUV color space. 0 <= Y <= 255, 0 <= U, V <= 255 */
  40. typedef struct PixFmtInfo {
  41. const char *name;
  42. uint8_t nb_components; /* number of components in AVPicture array */
  43. uint8_t color_type; /* color type (see FF_COLOR_xxx constants) */
  44. uint8_t is_packed : 1; /* true if multiple components in same word */
  45. uint8_t is_paletted : 1; /* true if paletted */
  46. uint8_t is_alpha : 1; /* true if alpha can be specified */
  47. uint8_t x_chroma_shift; /* X chroma subsampling factor is 2 ^ shift */
  48. uint8_t y_chroma_shift; /* Y chroma subsampling factor is 2 ^ shift */
  49. uint8_t depth; /* bit depth of the color components */
  50. } PixFmtInfo;
  51. /* this table gives more information about formats */
  52. static PixFmtInfo pix_fmt_info[PIX_FMT_NB] = {
  53. /* YUV formats */
  54. [PIX_FMT_YUV420P] = {
  55. .name = "yuv420p",
  56. .nb_components = 3,
  57. .color_type = FF_COLOR_YUV,
  58. .depth = 8,
  59. .x_chroma_shift = 1, .y_chroma_shift = 1,
  60. },
  61. [PIX_FMT_YUV422P] = {
  62. .name = "yuv422p",
  63. .nb_components = 3,
  64. .color_type = FF_COLOR_YUV,
  65. .depth = 8,
  66. .x_chroma_shift = 1, .y_chroma_shift = 0,
  67. },
  68. [PIX_FMT_YUV444P] = {
  69. .name = "yuv444p",
  70. .nb_components = 3,
  71. .color_type = FF_COLOR_YUV,
  72. .depth = 8,
  73. .x_chroma_shift = 0, .y_chroma_shift = 0,
  74. },
  75. [PIX_FMT_YUV422] = {
  76. .name = "yuv422",
  77. .nb_components = 1, .is_packed = 1,
  78. .color_type = FF_COLOR_YUV,
  79. .depth = 8,
  80. .x_chroma_shift = 1, .y_chroma_shift = 0,
  81. },
  82. [PIX_FMT_YUV410P] = {
  83. .name = "yuv410p",
  84. .nb_components = 3,
  85. .color_type = FF_COLOR_YUV,
  86. .depth = 8,
  87. .x_chroma_shift = 2, .y_chroma_shift = 2,
  88. },
  89. [PIX_FMT_YUV411P] = {
  90. .name = "yuv411p",
  91. .nb_components = 3,
  92. .color_type = FF_COLOR_YUV,
  93. .depth = 8,
  94. .x_chroma_shift = 2, .y_chroma_shift = 0,
  95. },
  96. /* JPEG YUV */
  97. [PIX_FMT_YUVJ420P] = {
  98. .name = "yuvj420p",
  99. .nb_components = 3,
  100. .color_type = FF_COLOR_YUV_JPEG,
  101. .depth = 8,
  102. .x_chroma_shift = 1, .y_chroma_shift = 1,
  103. },
  104. [PIX_FMT_YUVJ422P] = {
  105. .name = "yuvj422p",
  106. .nb_components = 3,
  107. .color_type = FF_COLOR_YUV_JPEG,
  108. .depth = 8,
  109. .x_chroma_shift = 1, .y_chroma_shift = 0,
  110. },
  111. [PIX_FMT_YUVJ444P] = {
  112. .name = "yuvj444p",
  113. .nb_components = 3,
  114. .color_type = FF_COLOR_YUV_JPEG,
  115. .depth = 8,
  116. .x_chroma_shift = 0, .y_chroma_shift = 0,
  117. },
  118. /* RGB formats */
  119. [PIX_FMT_RGB24] = {
  120. .name = "rgb24",
  121. .nb_components = 1, .is_packed = 1,
  122. .color_type = FF_COLOR_RGB,
  123. .depth = 8,
  124. },
  125. [PIX_FMT_BGR24] = {
  126. .name = "bgr24",
  127. .nb_components = 1, .is_packed = 1,
  128. .color_type = FF_COLOR_RGB,
  129. .depth = 8,
  130. },
  131. [PIX_FMT_RGBA32] = {
  132. .name = "rgba32",
  133. .nb_components = 1, .is_packed = 1, .is_alpha = 1,
  134. .color_type = FF_COLOR_RGB,
  135. .depth = 8,
  136. },
  137. [PIX_FMT_RGB565] = {
  138. .name = "rgb565",
  139. .nb_components = 1, .is_packed = 1,
  140. .color_type = FF_COLOR_RGB,
  141. .depth = 5,
  142. },
  143. [PIX_FMT_RGB555] = {
  144. .name = "rgb555",
  145. .nb_components = 1, .is_packed = 1, .is_alpha = 1,
  146. .color_type = FF_COLOR_RGB,
  147. .depth = 5,
  148. },
  149. /* gray / mono formats */
  150. [PIX_FMT_GRAY8] = {
  151. .name = "gray",
  152. .nb_components = 1,
  153. .color_type = FF_COLOR_GRAY,
  154. .depth = 8,
  155. },
  156. [PIX_FMT_MONOWHITE] = {
  157. .name = "monow",
  158. .nb_components = 1,
  159. .color_type = FF_COLOR_GRAY,
  160. .depth = 1,
  161. },
  162. [PIX_FMT_MONOBLACK] = {
  163. .name = "monob",
  164. .nb_components = 1,
  165. .color_type = FF_COLOR_GRAY,
  166. .depth = 1,
  167. },
  168. /* paletted formats */
  169. [PIX_FMT_PAL8] = {
  170. .name = "pal8",
  171. .nb_components = 1, .is_packed = 1, .is_alpha = 1, .is_paletted = 1,
  172. .color_type = FF_COLOR_RGB,
  173. .depth = 8,
  174. },
  175. };
  176. void avcodec_get_chroma_sub_sample(int pix_fmt, int *h_shift, int *v_shift)
  177. {
  178. *h_shift = pix_fmt_info[pix_fmt].x_chroma_shift;
  179. *v_shift = pix_fmt_info[pix_fmt].y_chroma_shift;
  180. }
  181. const char *avcodec_get_pix_fmt_name(int pix_fmt)
  182. {
  183. if (pix_fmt < 0 || pix_fmt >= PIX_FMT_NB)
  184. return "???";
  185. else
  186. return pix_fmt_info[pix_fmt].name;
  187. }
  188. /* Picture field are filled with 'ptr' addresses. Also return size */
  189. int avpicture_fill(AVPicture *picture, uint8_t *ptr,
  190. int pix_fmt, int width, int height)
  191. {
  192. int size, w2, h2, size2;
  193. PixFmtInfo *pinfo;
  194. pinfo = &pix_fmt_info[pix_fmt];
  195. size = width * height;
  196. switch(pix_fmt) {
  197. case PIX_FMT_YUV420P:
  198. case PIX_FMT_YUV422P:
  199. case PIX_FMT_YUV444P:
  200. case PIX_FMT_YUV410P:
  201. case PIX_FMT_YUV411P:
  202. case PIX_FMT_YUVJ420P:
  203. case PIX_FMT_YUVJ422P:
  204. case PIX_FMT_YUVJ444P:
  205. w2 = (width + (1 << pinfo->x_chroma_shift) - 1) >> pinfo->x_chroma_shift;
  206. h2 = (height + (1 << pinfo->y_chroma_shift) - 1) >> pinfo->y_chroma_shift;
  207. size2 = w2 * h2;
  208. picture->data[0] = ptr;
  209. picture->data[1] = picture->data[0] + size;
  210. picture->data[2] = picture->data[1] + size2;
  211. picture->linesize[0] = width;
  212. picture->linesize[1] = w2;
  213. picture->linesize[2] = w2;
  214. return size + 2 * size2;
  215. case PIX_FMT_RGB24:
  216. case PIX_FMT_BGR24:
  217. picture->data[0] = ptr;
  218. picture->data[1] = NULL;
  219. picture->data[2] = NULL;
  220. picture->linesize[0] = width * 3;
  221. return size * 3;
  222. case PIX_FMT_RGBA32:
  223. picture->data[0] = ptr;
  224. picture->data[1] = NULL;
  225. picture->data[2] = NULL;
  226. picture->linesize[0] = width * 4;
  227. return size * 4;
  228. case PIX_FMT_RGB555:
  229. case PIX_FMT_RGB565:
  230. case PIX_FMT_YUV422:
  231. picture->data[0] = ptr;
  232. picture->data[1] = NULL;
  233. picture->data[2] = NULL;
  234. picture->linesize[0] = width * 2;
  235. return size * 2;
  236. case PIX_FMT_GRAY8:
  237. picture->data[0] = ptr;
  238. picture->data[1] = NULL;
  239. picture->data[2] = NULL;
  240. picture->linesize[0] = width;
  241. return size;
  242. case PIX_FMT_MONOWHITE:
  243. case PIX_FMT_MONOBLACK:
  244. picture->data[0] = ptr;
  245. picture->data[1] = NULL;
  246. picture->data[2] = NULL;
  247. picture->linesize[0] = (width + 7) >> 3;
  248. return picture->linesize[0] * height;
  249. case PIX_FMT_PAL8:
  250. size2 = (size + 3) & ~3;
  251. picture->data[0] = ptr;
  252. picture->data[1] = ptr + size2; /* palette is stored here as 256 32 bit words */
  253. picture->data[2] = NULL;
  254. picture->linesize[0] = width;
  255. picture->linesize[1] = 4;
  256. return size2 + 256 * 4;
  257. default:
  258. picture->data[0] = NULL;
  259. picture->data[1] = NULL;
  260. picture->data[2] = NULL;
  261. picture->data[3] = NULL;
  262. return -1;
  263. }
  264. }
  265. int avpicture_get_size(int pix_fmt, int width, int height)
  266. {
  267. AVPicture dummy_pict;
  268. return avpicture_fill(&dummy_pict, NULL, pix_fmt, width, height);
  269. }
  270. /**
  271. * compute the loss when converting from a pixel format to another
  272. */
  273. int avcodec_get_pix_fmt_loss(int dst_pix_fmt, int src_pix_fmt,
  274. int has_alpha)
  275. {
  276. const PixFmtInfo *pf, *ps;
  277. int loss;
  278. ps = &pix_fmt_info[src_pix_fmt];
  279. pf = &pix_fmt_info[dst_pix_fmt];
  280. /* compute loss */
  281. loss = 0;
  282. pf = &pix_fmt_info[dst_pix_fmt];
  283. if (pf->depth < ps->depth)
  284. loss |= FF_LOSS_DEPTH;
  285. if (pf->x_chroma_shift >= ps->x_chroma_shift ||
  286. pf->y_chroma_shift >= ps->y_chroma_shift)
  287. loss |= FF_LOSS_RESOLUTION;
  288. switch(pf->color_type) {
  289. case FF_COLOR_RGB:
  290. if (ps->color_type != FF_COLOR_RGB &&
  291. ps->color_type != FF_COLOR_GRAY)
  292. loss |= FF_LOSS_COLORSPACE;
  293. break;
  294. case FF_COLOR_GRAY:
  295. if (ps->color_type != FF_COLOR_GRAY)
  296. loss |= FF_LOSS_COLORSPACE;
  297. break;
  298. case FF_COLOR_YUV:
  299. if (ps->color_type != FF_COLOR_YUV)
  300. loss |= FF_LOSS_COLORSPACE;
  301. break;
  302. case FF_COLOR_YUV_JPEG:
  303. if (ps->color_type != FF_COLOR_YUV_JPEG &&
  304. ps->color_type != FF_COLOR_YUV)
  305. loss |= FF_LOSS_COLORSPACE;
  306. break;
  307. default:
  308. /* fail safe test */
  309. if (ps->color_type != pf->color_type)
  310. loss |= FF_LOSS_COLORSPACE;
  311. break;
  312. }
  313. if (pf->color_type == FF_COLOR_GRAY &&
  314. ps->color_type != FF_COLOR_GRAY)
  315. loss |= FF_LOSS_CHROMA;
  316. if (!pf->is_alpha && (ps->is_alpha && has_alpha))
  317. loss |= FF_LOSS_ALPHA;
  318. if (pf->is_paletted && (!ps->is_paletted && ps->color_type != FF_COLOR_GRAY))
  319. loss |= FF_LOSS_COLORQUANT;
  320. return loss;
  321. }
  322. static int avg_bits_per_pixel(int pix_fmt)
  323. {
  324. int bits;
  325. const PixFmtInfo *pf;
  326. pf = &pix_fmt_info[pix_fmt];
  327. if (pf->is_packed) {
  328. switch(pix_fmt) {
  329. case PIX_FMT_RGB24:
  330. case PIX_FMT_BGR24:
  331. bits = 24;
  332. break;
  333. case PIX_FMT_RGBA32:
  334. bits = 32;
  335. break;
  336. case PIX_FMT_RGB565:
  337. case PIX_FMT_RGB555:
  338. bits = 16;
  339. break;
  340. case PIX_FMT_PAL8:
  341. bits = 8;
  342. break;
  343. default:
  344. bits = 32;
  345. break;
  346. }
  347. } else {
  348. bits = pf->depth;
  349. bits += (2 * pf->depth >>
  350. (pf->x_chroma_shift + pf->x_chroma_shift));
  351. }
  352. return bits;
  353. }
  354. static int avcodec_find_best_pix_fmt1(int pix_fmt_mask,
  355. int src_pix_fmt,
  356. int has_alpha,
  357. int loss_mask)
  358. {
  359. int dist, i, loss, min_dist, dst_pix_fmt;
  360. /* find exact color match with smallest size */
  361. dst_pix_fmt = -1;
  362. min_dist = 0x7fffffff;
  363. for(i = 0;i < PIX_FMT_NB; i++) {
  364. if (pix_fmt_mask & (1 << i)) {
  365. loss = avcodec_get_pix_fmt_loss(i, src_pix_fmt, has_alpha) & loss_mask;
  366. if (loss == 0) {
  367. dist = avg_bits_per_pixel(i);
  368. if (dist < min_dist) {
  369. min_dist = dist;
  370. dst_pix_fmt = i;
  371. }
  372. }
  373. }
  374. }
  375. return dst_pix_fmt;
  376. }
  377. /**
  378. * find best pixel format to convert to. Return -1 if none found
  379. */
  380. int avcodec_find_best_pix_fmt(int pix_fmt_mask, int src_pix_fmt,
  381. int has_alpha, int *loss_ptr)
  382. {
  383. int dst_pix_fmt, loss_mask, i;
  384. static const int loss_mask_order[] = {
  385. ~0, /* no loss first */
  386. ~FF_LOSS_ALPHA,
  387. ~FF_LOSS_RESOLUTION,
  388. ~(FF_LOSS_COLORSPACE | FF_LOSS_RESOLUTION),
  389. ~FF_LOSS_COLORQUANT,
  390. ~FF_LOSS_DEPTH,
  391. 0,
  392. };
  393. /* try with successive loss */
  394. i = 0;
  395. for(;;) {
  396. loss_mask = loss_mask_order[i++];
  397. dst_pix_fmt = avcodec_find_best_pix_fmt1(pix_fmt_mask, src_pix_fmt,
  398. has_alpha, loss_mask);
  399. if (dst_pix_fmt >= 0)
  400. goto found;
  401. if (loss_mask == 0)
  402. break;
  403. }
  404. return -1;
  405. found:
  406. if (loss_ptr)
  407. *loss_ptr = avcodec_get_pix_fmt_loss(dst_pix_fmt, src_pix_fmt, has_alpha);
  408. return dst_pix_fmt;
  409. }
  410. /* XXX: totally non optimized */
  411. static void yuv422_to_yuv420p(AVPicture *dst, AVPicture *src,
  412. int width, int height)
  413. {
  414. const uint8_t *p, *p1;
  415. uint8_t *lum, *cr, *cb, *lum1, *cr1, *cb1;
  416. int x;
  417. p1 = src->data[0];
  418. lum1 = dst->data[0];
  419. cb1 = dst->data[0];
  420. cr1 = dst->data[0];
  421. for(;height >= 2; height -= 2) {
  422. p = p1;
  423. lum = lum1;
  424. cb = cb1;
  425. cr = cr1;
  426. for(x=0;x<width;x+=2) {
  427. lum[0] = p[0];
  428. cb[0] = p[1];
  429. lum[1] = p[2];
  430. cr[0] = p[3];
  431. p += 4;
  432. lum += 2;
  433. cb++;
  434. cr++;
  435. }
  436. p1 += src->linesize[0];
  437. lum1 += dst->linesize[0];
  438. p = p1;
  439. lum = lum1;
  440. for(x=0;x<width;x+=2) {
  441. lum[0] = p[0];
  442. lum[1] = p[2];
  443. p += 4;
  444. lum += 2;
  445. }
  446. p1 += src->linesize[0];
  447. lum1 += dst->linesize[0];
  448. cb1 += dst->linesize[1];
  449. cr1 += dst->linesize[2];
  450. }
  451. }
  452. static void yuv422_to_yuv422p(AVPicture *dst, AVPicture *src,
  453. int width, int height)
  454. {
  455. const uint8_t *p, *p1;
  456. uint8_t *lum, *cr, *cb, *lum1, *cr1, *cb1;
  457. int w;
  458. p1 = src->data[0];
  459. lum1 = dst->data[0];
  460. cb1 = dst->data[0];
  461. cr1 = dst->data[0];
  462. for(;height >= 2; height -= 2) {
  463. p = p1;
  464. lum = lum1;
  465. cb = cb1;
  466. cr = cr1;
  467. for(w = width; w >= 2; w -= 2) {
  468. lum[0] = p[0];
  469. cb[0] = p[1];
  470. lum[1] = p[2];
  471. cr[0] = p[3];
  472. p += 4;
  473. lum += 2;
  474. cb++;
  475. cr++;
  476. }
  477. p1 += src->linesize[0];
  478. lum1 += dst->linesize[0];
  479. cb1 += dst->linesize[1];
  480. cr1 += dst->linesize[2];
  481. }
  482. }
  483. static void yuv422p_to_yuv422(AVPicture *dst, AVPicture *src,
  484. int width, int height)
  485. {
  486. uint8_t *p, *p1;
  487. const uint8_t *lum, *cr, *cb, *lum1, *cr1, *cb1;
  488. int w;
  489. p1 = dst->data[0];
  490. lum1 = src->data[0];
  491. cb1 = src->data[0];
  492. cr1 = src->data[0];
  493. for(;height >= 2; height -= 2) {
  494. p = p1;
  495. lum = lum1;
  496. cb = cb1;
  497. cr = cr1;
  498. for(w = width; w >= 2; w -= 2) {
  499. p[0] = lum[0];
  500. p[1] = cb[0];
  501. p[2] = lum[1];
  502. p[3] = cr[0];
  503. p += 4;
  504. lum += 2;
  505. cb++;
  506. cr++;
  507. }
  508. p1 += src->linesize[0];
  509. lum1 += dst->linesize[0];
  510. cb1 += dst->linesize[1];
  511. cr1 += dst->linesize[2];
  512. }
  513. }
  514. #define SCALEBITS 10
  515. #define ONE_HALF (1 << (SCALEBITS - 1))
  516. #define FIX(x) ((int) ((x) * (1<<SCALEBITS) + 0.5))
  517. #define YUV_TO_RGB1_CCIR(cb1, cr1)\
  518. {\
  519. cb = (cb1) - 128;\
  520. cr = (cr1) - 128;\
  521. r_add = FIX(1.40200*255.0/224.0) * cr + ONE_HALF;\
  522. g_add = - FIX(0.34414*255.0/224.0) * cb - FIX(0.71414*255.0/224.0) * cr + \
  523. ONE_HALF;\
  524. b_add = FIX(1.77200*255.0/224.0) * cb + ONE_HALF;\
  525. }
  526. #define YUV_TO_RGB2_CCIR(r, g, b, y1)\
  527. {\
  528. y = ((y1) - 16) * FIX(255.0/219.0);\
  529. r = cm[(y + r_add) >> SCALEBITS];\
  530. g = cm[(y + g_add) >> SCALEBITS];\
  531. b = cm[(y + b_add) >> SCALEBITS];\
  532. }
  533. #define YUV_TO_RGB1(cb1, cr1)\
  534. {\
  535. cb = (cb1) - 128;\
  536. cr = (cr1) - 128;\
  537. r_add = FIX(1.40200) * cr + ONE_HALF;\
  538. g_add = - FIX(0.34414) * cb - FIX(0.71414) * cr + ONE_HALF;\
  539. b_add = FIX(1.77200) * cb + ONE_HALF;\
  540. }
  541. #define YUV_TO_RGB2(r, g, b, y1)\
  542. {\
  543. y = (y1) << SCALEBITS;\
  544. r = cm[(y + r_add) >> SCALEBITS];\
  545. g = cm[(y + g_add) >> SCALEBITS];\
  546. b = cm[(y + b_add) >> SCALEBITS];\
  547. }
  548. #define Y_CCIR_TO_JPEG(y)\
  549. cm[((y) * FIX(255.0/219.0) + (ONE_HALF - 16 * FIX(255.0/219.0))) >> SCALEBITS]
  550. #define Y_JPEG_TO_CCIR(y)\
  551. (((y) * FIX(219.0/255.0) + (ONE_HALF + (16 << SCALEBITS))) >> SCALEBITS)
  552. #define C_CCIR_TO_JPEG(y)\
  553. cm[(((y) - 128) * FIX(127.0/112.0) + (ONE_HALF + (128 << SCALEBITS))) >> SCALEBITS]
  554. /* NOTE: the clamp is really necessary! */
  555. #define C_JPEG_TO_CCIR(y)\
  556. ({\
  557. int __y;\
  558. __y = ((((y) - 128) * FIX(112.0/127.0) + (ONE_HALF + (128 << SCALEBITS))) >> SCALEBITS);\
  559. if (__y < 16)\
  560. __y = 16;\
  561. __y;\
  562. })
  563. #define RGB_TO_Y(r, g, b) \
  564. ((FIX(0.29900) * (r) + FIX(0.58700) * (g) + \
  565. FIX(0.11400) * (b) + ONE_HALF) >> SCALEBITS)
  566. #define RGB_TO_U(r1, g1, b1, shift)\
  567. (((- FIX(0.16874) * r1 - FIX(0.33126) * g1 + \
  568. FIX(0.50000) * b1 + (ONE_HALF << shift) - 1) >> (SCALEBITS + shift)) + 128)
  569. #define RGB_TO_V(r1, g1, b1, shift)\
  570. (((FIX(0.50000) * r1 - FIX(0.41869) * g1 - \
  571. FIX(0.08131) * b1 + (ONE_HALF << shift) - 1) >> (SCALEBITS + shift)) + 128)
  572. #define RGB_TO_Y_CCIR(r, g, b) \
  573. ((FIX(0.29900*219.0/255.0) * (r) + FIX(0.58700*219.0/255.0) * (g) + \
  574. FIX(0.11400*219.0/255.0) * (b) + (ONE_HALF + (16 << SCALEBITS))) >> SCALEBITS)
  575. #define RGB_TO_U_CCIR(r1, g1, b1, shift)\
  576. (((- FIX(0.16874*224.0/255.0) * r1 - FIX(0.33126*224.0/255.0) * g1 + \
  577. FIX(0.50000*224.0/255.0) * b1 + (ONE_HALF << shift) - 1) >> (SCALEBITS + shift)) + 128)
  578. #define RGB_TO_V_CCIR(r1, g1, b1, shift)\
  579. (((FIX(0.50000*224.0/255.0) * r1 - FIX(0.41869*224.0/255.0) * g1 - \
  580. FIX(0.08131*224.0/255.0) * b1 + (ONE_HALF << shift) - 1) >> (SCALEBITS + shift)) + 128)
  581. static uint8_t y_ccir_to_jpeg[256];
  582. static uint8_t y_jpeg_to_ccir[256];
  583. static uint8_t c_ccir_to_jpeg[256];
  584. static uint8_t c_jpeg_to_ccir[256];
  585. /* init various conversion tables */
  586. static void img_convert_init(void)
  587. {
  588. int i;
  589. uint8_t *cm = cropTbl + MAX_NEG_CROP;
  590. for(i = 0;i < 256; i++) {
  591. y_ccir_to_jpeg[i] = Y_CCIR_TO_JPEG(i);
  592. y_jpeg_to_ccir[i] = Y_JPEG_TO_CCIR(i);
  593. c_ccir_to_jpeg[i] = C_CCIR_TO_JPEG(i);
  594. c_jpeg_to_ccir[i] = C_JPEG_TO_CCIR(i);
  595. }
  596. }
  597. /* apply to each pixel the given table */
  598. static void img_apply_table(uint8_t *dst, int dst_wrap,
  599. const uint8_t *src, int src_wrap,
  600. int width, int height, const uint8_t *table1)
  601. {
  602. int n;
  603. const uint8_t *s;
  604. uint8_t *d;
  605. const uint8_t *table;
  606. table = table1;
  607. for(;height > 0; height--) {
  608. s = src;
  609. d = dst;
  610. n = width;
  611. while (n >= 4) {
  612. d[0] = table[s[0]];
  613. d[1] = table[s[1]];
  614. d[2] = table[s[2]];
  615. d[3] = table[s[3]];
  616. d += 4;
  617. s += 4;
  618. n -= 4;
  619. }
  620. while (n > 0) {
  621. d[0] = table[s[0]];
  622. d++;
  623. s++;
  624. n--;
  625. }
  626. dst += dst_wrap;
  627. src += src_wrap;
  628. }
  629. }
  630. /* XXX: use generic filter ? */
  631. /* 1x2 -> 1x1 */
  632. static void shrink2(uint8_t *dst, int dst_wrap,
  633. uint8_t *src, int src_wrap,
  634. int width, int height)
  635. {
  636. int w;
  637. uint8_t *s1, *s2, *d;
  638. for(;height > 0; height--) {
  639. s1 = src;
  640. s2 = s1 + src_wrap;
  641. d = dst;
  642. for(w = width;w >= 4; w-=4) {
  643. d[0] = (s1[0] + s2[0]) >> 1;
  644. d[1] = (s1[1] + s2[1]) >> 1;
  645. d[2] = (s1[2] + s2[2]) >> 1;
  646. d[3] = (s1[3] + s2[3]) >> 1;
  647. s1 += 4;
  648. s2 += 4;
  649. d += 4;
  650. }
  651. for(;w > 0; w--) {
  652. d[0] = (s1[0] + s2[0]) >> 1;
  653. s1++;
  654. s2++;
  655. d++;
  656. }
  657. src += 2 * src_wrap;
  658. dst += dst_wrap;
  659. }
  660. }
  661. /* 2x2 -> 1x1 */
  662. static void shrink22(uint8_t *dst, int dst_wrap,
  663. uint8_t *src, int src_wrap,
  664. int width, int height)
  665. {
  666. int w;
  667. uint8_t *s1, *s2, *d;
  668. for(;height > 0; height--) {
  669. s1 = src;
  670. s2 = s1 + src_wrap;
  671. d = dst;
  672. for(w = width;w >= 4; w-=4) {
  673. d[0] = (s1[0] + s1[1] + s2[0] + s2[1] + 2) >> 1;
  674. d[1] = (s1[2] + s1[3] + s2[2] + s2[3] + 2) >> 1;
  675. d[2] = (s1[4] + s1[5] + s2[4] + s2[5] + 2) >> 1;
  676. d[3] = (s1[6] + s1[7] + s2[6] + s2[7] + 2) >> 1;
  677. s1 += 8;
  678. s2 += 8;
  679. d += 4;
  680. }
  681. for(;w > 0; w--) {
  682. d[0] = (s1[0] + s1[1] + s2[0] + s2[1] + 2) >> 1;
  683. s1 += 2;
  684. s2 += 2;
  685. d++;
  686. }
  687. src += 2 * src_wrap;
  688. dst += dst_wrap;
  689. }
  690. }
  691. /* 1x1 -> 2x2 */
  692. static void grow22(uint8_t *dst, int dst_wrap,
  693. uint8_t *src, int src_wrap,
  694. int width, int height)
  695. {
  696. int w;
  697. uint8_t *s1, *d;
  698. for(;height > 0; height--) {
  699. s1 = src;
  700. d = dst;
  701. for(w = width;w >= 4; w-=4) {
  702. d[1] = d[0] = s1[0];
  703. d[3] = d[2] = s1[1];
  704. s1 += 2;
  705. d += 4;
  706. }
  707. for(;w > 0; w--) {
  708. d[0] = s1[0];
  709. s1 ++;
  710. d++;
  711. }
  712. if (height%2)
  713. src += src_wrap;
  714. dst += dst_wrap;
  715. }
  716. }
  717. /* 1x2 -> 2x1 */
  718. static void conv411(uint8_t *dst, int dst_wrap,
  719. uint8_t *src, int src_wrap,
  720. int width, int height)
  721. {
  722. int w, c;
  723. uint8_t *s1, *s2, *d;
  724. width>>=1;
  725. for(;height > 0; height--) {
  726. s1 = src;
  727. s2 = src + src_wrap;
  728. d = dst;
  729. for(w = width;w > 0; w--) {
  730. c = (s1[0] + s2[0]) >> 1;
  731. d[0] = c;
  732. d[1] = c;
  733. s1++;
  734. s2++;
  735. d += 2;
  736. }
  737. src += src_wrap * 2;
  738. dst += dst_wrap;
  739. }
  740. }
  741. static void img_copy(uint8_t *dst, int dst_wrap,
  742. uint8_t *src, int src_wrap,
  743. int width, int height)
  744. {
  745. for(;height > 0; height--) {
  746. memcpy(dst, src, width);
  747. dst += dst_wrap;
  748. src += src_wrap;
  749. }
  750. }
  751. /* XXX: no chroma interpolating is done */
  752. #define RGB_FUNCTIONS(rgb_name) \
  753. \
  754. static void yuv420p_to_ ## rgb_name (AVPicture *dst, AVPicture *src, \
  755. int width, int height) \
  756. { \
  757. uint8_t *y1_ptr, *y2_ptr, *cb_ptr, *cr_ptr, *d, *d1, *d2; \
  758. int w, y, cb, cr, r_add, g_add, b_add, width2; \
  759. uint8_t *cm = cropTbl + MAX_NEG_CROP; \
  760. unsigned int r, g, b; \
  761. \
  762. d = dst->data[0]; \
  763. y1_ptr = src->data[0]; \
  764. cb_ptr = src->data[1]; \
  765. cr_ptr = src->data[2]; \
  766. width2 = (width + 1) >> 1; \
  767. for(;height >= 2; height -= 2) { \
  768. d1 = d; \
  769. d2 = d + dst->linesize[0]; \
  770. y2_ptr = y1_ptr + src->linesize[0]; \
  771. for(w = width; w >= 2; w -= 2) { \
  772. YUV_TO_RGB1_CCIR(cb_ptr[0], cr_ptr[0]); \
  773. /* output 4 pixels */ \
  774. YUV_TO_RGB2_CCIR(r, g, b, y1_ptr[0]); \
  775. RGB_OUT(d1, r, g, b); \
  776. \
  777. YUV_TO_RGB2_CCIR(r, g, b, y1_ptr[1]); \
  778. RGB_OUT(d1 + BPP, r, g, b); \
  779. \
  780. YUV_TO_RGB2_CCIR(r, g, b, y2_ptr[0]); \
  781. RGB_OUT(d2, r, g, b); \
  782. \
  783. YUV_TO_RGB2_CCIR(r, g, b, y2_ptr[1]); \
  784. RGB_OUT(d2 + BPP, r, g, b); \
  785. \
  786. d1 += 2 * BPP; \
  787. d2 += 2 * BPP; \
  788. \
  789. y1_ptr += 2; \
  790. y2_ptr += 2; \
  791. cb_ptr++; \
  792. cr_ptr++; \
  793. } \
  794. /* handle odd width */ \
  795. if (w) { \
  796. YUV_TO_RGB1_CCIR(cb_ptr[0], cr_ptr[0]); \
  797. YUV_TO_RGB2_CCIR(r, g, b, y1_ptr[0]); \
  798. RGB_OUT(d1, r, g, b); \
  799. \
  800. YUV_TO_RGB2_CCIR(r, g, b, y2_ptr[0]); \
  801. RGB_OUT(d2, r, g, b); \
  802. d1 += BPP; \
  803. d2 += BPP; \
  804. y1_ptr++; \
  805. y2_ptr++; \
  806. cb_ptr++; \
  807. cr_ptr++; \
  808. } \
  809. d += 2 * dst->linesize[0]; \
  810. y1_ptr += 2 * src->linesize[0] - width; \
  811. cb_ptr += src->linesize[1] - width2; \
  812. cr_ptr += src->linesize[2] - width2; \
  813. } \
  814. /* handle odd height */ \
  815. if (height) { \
  816. d1 = d; \
  817. for(w = width; w >= 2; w -= 2) { \
  818. YUV_TO_RGB1_CCIR(cb_ptr[0], cr_ptr[0]); \
  819. /* output 2 pixels */ \
  820. YUV_TO_RGB2_CCIR(r, g, b, y1_ptr[0]); \
  821. RGB_OUT(d1, r, g, b); \
  822. \
  823. YUV_TO_RGB2_CCIR(r, g, b, y1_ptr[1]); \
  824. RGB_OUT(d1 + BPP, r, g, b); \
  825. \
  826. d1 += 2 * BPP; \
  827. \
  828. y1_ptr += 2; \
  829. cb_ptr++; \
  830. cr_ptr++; \
  831. } \
  832. /* handle width */ \
  833. if (w) { \
  834. YUV_TO_RGB1_CCIR(cb_ptr[0], cr_ptr[0]); \
  835. /* output 2 pixels */ \
  836. YUV_TO_RGB2_CCIR(r, g, b, y1_ptr[0]); \
  837. RGB_OUT(d1, r, g, b); \
  838. d1 += BPP; \
  839. \
  840. y1_ptr++; \
  841. cb_ptr++; \
  842. cr_ptr++; \
  843. } \
  844. } \
  845. } \
  846. \
  847. static void yuvj420p_to_ ## rgb_name (AVPicture *dst, AVPicture *src, \
  848. int width, int height) \
  849. { \
  850. uint8_t *y1_ptr, *y2_ptr, *cb_ptr, *cr_ptr, *d, *d1, *d2; \
  851. int w, y, cb, cr, r_add, g_add, b_add, width2; \
  852. uint8_t *cm = cropTbl + MAX_NEG_CROP; \
  853. unsigned int r, g, b; \
  854. \
  855. d = dst->data[0]; \
  856. y1_ptr = src->data[0]; \
  857. cb_ptr = src->data[1]; \
  858. cr_ptr = src->data[2]; \
  859. width2 = (width + 1) >> 1; \
  860. for(;height >= 2; height -= 2) { \
  861. d1 = d; \
  862. d2 = d + dst->linesize[0]; \
  863. y2_ptr = y1_ptr + src->linesize[0]; \
  864. for(w = width; w >= 2; w -= 2) { \
  865. YUV_TO_RGB1(cb_ptr[0], cr_ptr[0]); \
  866. /* output 4 pixels */ \
  867. YUV_TO_RGB2(r, g, b, y1_ptr[0]); \
  868. RGB_OUT(d1, r, g, b); \
  869. \
  870. YUV_TO_RGB2(r, g, b, y1_ptr[1]); \
  871. RGB_OUT(d1 + BPP, r, g, b); \
  872. \
  873. YUV_TO_RGB2(r, g, b, y2_ptr[0]); \
  874. RGB_OUT(d2, r, g, b); \
  875. \
  876. YUV_TO_RGB2(r, g, b, y2_ptr[1]); \
  877. RGB_OUT(d2 + BPP, r, g, b); \
  878. \
  879. d1 += 2 * BPP; \
  880. d2 += 2 * BPP; \
  881. \
  882. y1_ptr += 2; \
  883. y2_ptr += 2; \
  884. cb_ptr++; \
  885. cr_ptr++; \
  886. } \
  887. /* handle odd width */ \
  888. if (w) { \
  889. YUV_TO_RGB1(cb_ptr[0], cr_ptr[0]); \
  890. YUV_TO_RGB2(r, g, b, y1_ptr[0]); \
  891. RGB_OUT(d1, r, g, b); \
  892. \
  893. YUV_TO_RGB2(r, g, b, y2_ptr[0]); \
  894. RGB_OUT(d2, r, g, b); \
  895. d1 += BPP; \
  896. d2 += BPP; \
  897. y1_ptr++; \
  898. y2_ptr++; \
  899. cb_ptr++; \
  900. cr_ptr++; \
  901. } \
  902. d += 2 * dst->linesize[0]; \
  903. y1_ptr += 2 * src->linesize[0] - width; \
  904. cb_ptr += src->linesize[1] - width2; \
  905. cr_ptr += src->linesize[2] - width2; \
  906. } \
  907. /* handle odd height */ \
  908. if (height) { \
  909. d1 = d; \
  910. for(w = width; w >= 2; w -= 2) { \
  911. YUV_TO_RGB1(cb_ptr[0], cr_ptr[0]); \
  912. /* output 2 pixels */ \
  913. YUV_TO_RGB2(r, g, b, y1_ptr[0]); \
  914. RGB_OUT(d1, r, g, b); \
  915. \
  916. YUV_TO_RGB2(r, g, b, y1_ptr[1]); \
  917. RGB_OUT(d1 + BPP, r, g, b); \
  918. \
  919. d1 += 2 * BPP; \
  920. \
  921. y1_ptr += 2; \
  922. cb_ptr++; \
  923. cr_ptr++; \
  924. } \
  925. /* handle width */ \
  926. if (w) { \
  927. YUV_TO_RGB1(cb_ptr[0], cr_ptr[0]); \
  928. /* output 2 pixels */ \
  929. YUV_TO_RGB2(r, g, b, y1_ptr[0]); \
  930. RGB_OUT(d1, r, g, b); \
  931. d1 += BPP; \
  932. \
  933. y1_ptr++; \
  934. cb_ptr++; \
  935. cr_ptr++; \
  936. } \
  937. } \
  938. } \
  939. \
  940. static void rgb_name ## _to_yuv420p(AVPicture *dst, AVPicture *src, \
  941. int width, int height) \
  942. { \
  943. int wrap, wrap3, width2; \
  944. int r, g, b, r1, g1, b1, w; \
  945. uint8_t *lum, *cb, *cr; \
  946. const uint8_t *p; \
  947. \
  948. lum = dst->data[0]; \
  949. cb = dst->data[1]; \
  950. cr = dst->data[2]; \
  951. \
  952. width2 = (width + 1) >> 1; \
  953. wrap = dst->linesize[0]; \
  954. wrap3 = src->linesize[0]; \
  955. p = src->data[0]; \
  956. for(;height>=2;height -= 2) { \
  957. for(w = width; w >= 2; w -= 2) { \
  958. RGB_IN(r, g, b, p); \
  959. r1 = r; \
  960. g1 = g; \
  961. b1 = b; \
  962. lum[0] = RGB_TO_Y_CCIR(r, g, b); \
  963. \
  964. RGB_IN(r, g, b, p + BPP); \
  965. r1 += r; \
  966. g1 += g; \
  967. b1 += b; \
  968. lum[1] = RGB_TO_Y_CCIR(r, g, b); \
  969. p += wrap3; \
  970. lum += wrap; \
  971. \
  972. RGB_IN(r, g, b, p); \
  973. r1 += r; \
  974. g1 += g; \
  975. b1 += b; \
  976. lum[0] = RGB_TO_Y_CCIR(r, g, b); \
  977. \
  978. RGB_IN(r, g, b, p + BPP); \
  979. r1 += r; \
  980. g1 += g; \
  981. b1 += b; \
  982. lum[1] = RGB_TO_Y_CCIR(r, g, b); \
  983. \
  984. cb[0] = RGB_TO_U_CCIR(r1, g1, b1, 2); \
  985. cr[0] = RGB_TO_V_CCIR(r1, g1, b1, 2); \
  986. \
  987. cb++; \
  988. cr++; \
  989. p += -wrap3 + 2 * BPP; \
  990. lum += -wrap + 2; \
  991. } \
  992. if (w) { \
  993. RGB_IN(r, g, b, p); \
  994. r1 = r; \
  995. g1 = g; \
  996. b1 = b; \
  997. lum[0] = RGB_TO_Y_CCIR(r, g, b); \
  998. p += wrap3; \
  999. lum += wrap; \
  1000. RGB_IN(r, g, b, p); \
  1001. r1 += r; \
  1002. g1 += g; \
  1003. b1 += b; \
  1004. lum[0] = RGB_TO_Y_CCIR(r, g, b); \
  1005. cb[0] = RGB_TO_U_CCIR(r1, g1, b1, 1); \
  1006. cr[0] = RGB_TO_V_CCIR(r1, g1, b1, 1); \
  1007. cb++; \
  1008. cr++; \
  1009. p += -wrap3 + BPP; \
  1010. lum += -wrap + 1; \
  1011. } \
  1012. p += wrap3 + (wrap3 - width * BPP); \
  1013. lum += wrap + (wrap - width); \
  1014. cb += dst->linesize[1] - width2; \
  1015. cr += dst->linesize[2] - width2; \
  1016. } \
  1017. /* handle odd height */ \
  1018. if (height) { \
  1019. for(w = width; w >= 2; w -= 2) { \
  1020. RGB_IN(r, g, b, p); \
  1021. r1 = r; \
  1022. g1 = g; \
  1023. b1 = b; \
  1024. lum[0] = RGB_TO_Y_CCIR(r, g, b); \
  1025. \
  1026. RGB_IN(r, g, b, p + BPP); \
  1027. r1 += r; \
  1028. g1 += g; \
  1029. b1 += b; \
  1030. lum[1] = RGB_TO_Y_CCIR(r, g, b); \
  1031. cb[0] = RGB_TO_U_CCIR(r1, g1, b1, 1); \
  1032. cr[0] = RGB_TO_V_CCIR(r1, g1, b1, 1); \
  1033. cb++; \
  1034. cr++; \
  1035. p += 2 * BPP;\
  1036. lum += 2;\
  1037. } \
  1038. if (w) { \
  1039. RGB_IN(r, g, b, p); \
  1040. lum[0] = RGB_TO_Y_CCIR(r, g, b); \
  1041. cb[0] = RGB_TO_U_CCIR(r, g, b, 0); \
  1042. cr[0] = RGB_TO_V_CCIR(r, g, b, 0); \
  1043. } \
  1044. } \
  1045. } \
  1046. \
  1047. static void rgb_name ## _to_gray(AVPicture *dst, AVPicture *src, \
  1048. int width, int height) \
  1049. { \
  1050. const unsigned char *p; \
  1051. unsigned char *q; \
  1052. int r, g, b, dst_wrap, src_wrap; \
  1053. int x, y; \
  1054. \
  1055. p = src->data[0]; \
  1056. src_wrap = src->linesize[0] - BPP * width; \
  1057. \
  1058. q = dst->data[0]; \
  1059. dst_wrap = dst->linesize[0] - width; \
  1060. \
  1061. for(y=0;y<height;y++) { \
  1062. for(x=0;x<width;x++) { \
  1063. RGB_IN(r, g, b, p); \
  1064. q[0] = RGB_TO_Y(r, g, b); \
  1065. q++; \
  1066. p += BPP; \
  1067. } \
  1068. p += src_wrap; \
  1069. q += dst_wrap; \
  1070. } \
  1071. } \
  1072. \
  1073. static void gray_to_ ## rgb_name(AVPicture *dst, AVPicture *src, \
  1074. int width, int height) \
  1075. { \
  1076. const unsigned char *p; \
  1077. unsigned char *q; \
  1078. int r, dst_wrap, src_wrap; \
  1079. int x, y; \
  1080. \
  1081. p = src->data[0]; \
  1082. src_wrap = src->linesize[0] - width; \
  1083. \
  1084. q = dst->data[0]; \
  1085. dst_wrap = dst->linesize[0] - BPP * width; \
  1086. \
  1087. for(y=0;y<height;y++) { \
  1088. for(x=0;x<width;x++) { \
  1089. r = p[0]; \
  1090. RGB_OUT(q, r, r, r); \
  1091. q += BPP; \
  1092. p ++; \
  1093. } \
  1094. p += src_wrap; \
  1095. q += dst_wrap; \
  1096. } \
  1097. } \
  1098. \
  1099. static void pal8_to_ ## rgb_name(AVPicture *dst, AVPicture *src, \
  1100. int width, int height) \
  1101. { \
  1102. const unsigned char *p; \
  1103. unsigned char *q; \
  1104. int r, g, b, dst_wrap, src_wrap; \
  1105. int x, y; \
  1106. uint32_t v;\
  1107. const uint32_t *palette;\
  1108. \
  1109. p = src->data[0]; \
  1110. src_wrap = src->linesize[0] - width; \
  1111. palette = (uint32_t *)src->data[1];\
  1112. \
  1113. q = dst->data[0]; \
  1114. dst_wrap = dst->linesize[0] - BPP * width; \
  1115. \
  1116. for(y=0;y<height;y++) { \
  1117. for(x=0;x<width;x++) { \
  1118. v = palette[p[0]];\
  1119. r = (v >> 16) & 0xff;\
  1120. g = (v >> 8) & 0xff;\
  1121. b = (v) & 0xff;\
  1122. RGB_OUT(q, r, g, b); \
  1123. q += BPP; \
  1124. p ++; \
  1125. } \
  1126. p += src_wrap; \
  1127. q += dst_wrap; \
  1128. } \
  1129. }
  1130. /* copy bit n to bits 0 ... n - 1 */
  1131. static inline unsigned int bitcopy_n(unsigned int a, int n)
  1132. {
  1133. int mask;
  1134. mask = (1 << n) - 1;
  1135. return (a & (0xff & ~mask)) | ((-((a >> n) & 1)) & mask);
  1136. }
  1137. /* rgb555 handling */
  1138. #define RGB_IN(r, g, b, s)\
  1139. {\
  1140. unsigned int v = ((const uint16_t *)(s))[0];\
  1141. r = bitcopy_n(v >> (10 - 3), 3);\
  1142. g = bitcopy_n(v >> (5 - 3), 3);\
  1143. b = bitcopy_n(v << 3, 3);\
  1144. }
  1145. #define RGB_OUT(d, r, g, b)\
  1146. {\
  1147. ((uint16_t *)(d))[0] = ((r >> 3) << 10) | ((g >> 3) << 5) | (b >> 3) | 0x8000;\
  1148. }
  1149. #define BPP 2
  1150. RGB_FUNCTIONS(rgb555)
  1151. #undef RGB_IN
  1152. #undef RGB_OUT
  1153. #undef BPP
  1154. /* rgb565 handling */
  1155. #define RGB_IN(r, g, b, s)\
  1156. {\
  1157. unsigned int v = ((const uint16_t *)(s))[0];\
  1158. r = bitcopy_n(v >> (11 - 3), 3);\
  1159. g = bitcopy_n(v >> (5 - 2), 2);\
  1160. b = bitcopy_n(v << 3, 3);\
  1161. }
  1162. #define RGB_OUT(d, r, g, b)\
  1163. {\
  1164. ((uint16_t *)(d))[0] = ((r >> 3) << 11) | ((g >> 2) << 5) | (b >> 3);\
  1165. }
  1166. #define BPP 2
  1167. RGB_FUNCTIONS(rgb565)
  1168. #undef RGB_IN
  1169. #undef RGB_OUT
  1170. #undef BPP
  1171. /* bgr24 handling */
  1172. #define RGB_IN(r, g, b, s)\
  1173. {\
  1174. b = (s)[0];\
  1175. g = (s)[1];\
  1176. r = (s)[2];\
  1177. }
  1178. #define RGB_OUT(d, r, g, b)\
  1179. {\
  1180. (d)[0] = b;\
  1181. (d)[1] = g;\
  1182. (d)[2] = r;\
  1183. }
  1184. #define BPP 3
  1185. RGB_FUNCTIONS(bgr24)
  1186. #undef RGB_IN
  1187. #undef RGB_OUT
  1188. #undef BPP
  1189. /* rgb24 handling */
  1190. #define RGB_IN(r, g, b, s)\
  1191. {\
  1192. r = (s)[0];\
  1193. g = (s)[1];\
  1194. b = (s)[2];\
  1195. }
  1196. #define RGB_OUT(d, r, g, b)\
  1197. {\
  1198. (d)[0] = r;\
  1199. (d)[1] = g;\
  1200. (d)[2] = b;\
  1201. }
  1202. #define BPP 3
  1203. RGB_FUNCTIONS(rgb24)
  1204. static void yuv444p_to_rgb24(AVPicture *dst, AVPicture *src,
  1205. int width, int height)
  1206. {
  1207. uint8_t *y1_ptr, *cb_ptr, *cr_ptr, *d, *d1;
  1208. int w, y, cb, cr, r_add, g_add, b_add;
  1209. uint8_t *cm = cropTbl + MAX_NEG_CROP;
  1210. unsigned int r, g, b;
  1211. d = dst->data[0];
  1212. y1_ptr = src->data[0];
  1213. cb_ptr = src->data[1];
  1214. cr_ptr = src->data[2];
  1215. for(;height > 0; height --) {
  1216. d1 = d;
  1217. for(w = width; w > 0; w--) {
  1218. YUV_TO_RGB1_CCIR(cb_ptr[0], cr_ptr[0]);
  1219. YUV_TO_RGB2_CCIR(r, g, b, y1_ptr[0]);
  1220. RGB_OUT(d1, r, g, b);
  1221. d1 += BPP;
  1222. y1_ptr++;
  1223. cb_ptr++;
  1224. cr_ptr++;
  1225. }
  1226. d += dst->linesize[0];
  1227. y1_ptr += src->linesize[0] - width;
  1228. cb_ptr += src->linesize[1] - width;
  1229. cr_ptr += src->linesize[2] - width;
  1230. }
  1231. }
  1232. static void yuvj444p_to_rgb24(AVPicture *dst, AVPicture *src,
  1233. int width, int height)
  1234. {
  1235. uint8_t *y1_ptr, *cb_ptr, *cr_ptr, *d, *d1;
  1236. int w, y, cb, cr, r_add, g_add, b_add;
  1237. uint8_t *cm = cropTbl + MAX_NEG_CROP;
  1238. unsigned int r, g, b;
  1239. d = dst->data[0];
  1240. y1_ptr = src->data[0];
  1241. cb_ptr = src->data[1];
  1242. cr_ptr = src->data[2];
  1243. for(;height > 0; height --) {
  1244. d1 = d;
  1245. for(w = width; w > 0; w--) {
  1246. YUV_TO_RGB1(cb_ptr[0], cr_ptr[0]);
  1247. YUV_TO_RGB2(r, g, b, y1_ptr[0]);
  1248. RGB_OUT(d1, r, g, b);
  1249. d1 += BPP;
  1250. y1_ptr++;
  1251. cb_ptr++;
  1252. cr_ptr++;
  1253. }
  1254. d += dst->linesize[0];
  1255. y1_ptr += src->linesize[0] - width;
  1256. cb_ptr += src->linesize[1] - width;
  1257. cr_ptr += src->linesize[2] - width;
  1258. }
  1259. }
  1260. static void rgb24_to_yuv444p(AVPicture *dst, AVPicture *src,
  1261. int width, int height)
  1262. {
  1263. int src_wrap, x, y;
  1264. int r, g, b;
  1265. uint8_t *lum, *cb, *cr;
  1266. const uint8_t *p;
  1267. lum = dst->data[0];
  1268. cb = dst->data[1];
  1269. cr = dst->data[2];
  1270. src_wrap = src->linesize[0] - width * BPP;
  1271. p = src->data[0];
  1272. for(y=0;y<height;y++) {
  1273. for(x=0;x<width;x++) {
  1274. RGB_IN(r, g, b, p);
  1275. lum[0] = RGB_TO_Y_CCIR(r, g, b);
  1276. cb[0] = RGB_TO_U_CCIR(r, g, b, 0);
  1277. cr[0] = RGB_TO_V_CCIR(r, g, b, 0);
  1278. cb++;
  1279. cr++;
  1280. lum++;
  1281. }
  1282. p += src_wrap;
  1283. lum += dst->linesize[0] - width;
  1284. cb += dst->linesize[1] - width;
  1285. cr += dst->linesize[2] - width;
  1286. }
  1287. }
  1288. static void rgb24_to_yuvj420p(AVPicture *dst, AVPicture *src,
  1289. int width, int height)
  1290. {
  1291. int wrap, wrap3, width2;
  1292. int r, g, b, r1, g1, b1, w;
  1293. uint8_t *lum, *cb, *cr;
  1294. const uint8_t *p;
  1295. lum = dst->data[0];
  1296. cb = dst->data[1];
  1297. cr = dst->data[2];
  1298. width2 = (width + 1) >> 1;
  1299. wrap = dst->linesize[0];
  1300. wrap3 = src->linesize[0];
  1301. p = src->data[0];
  1302. for(;height>=2;height -= 2) {
  1303. for(w = width; w >= 2; w -= 2) {
  1304. RGB_IN(r, g, b, p);
  1305. r1 = r;
  1306. g1 = g;
  1307. b1 = b;
  1308. lum[0] = RGB_TO_Y(r, g, b);
  1309. RGB_IN(r, g, b, p + BPP);
  1310. r1 += r;
  1311. g1 += g;
  1312. b1 += b;
  1313. lum[1] = RGB_TO_Y(r, g, b);
  1314. p += wrap3;
  1315. lum += wrap;
  1316. RGB_IN(r, g, b, p);
  1317. r1 += r;
  1318. g1 += g;
  1319. b1 += b;
  1320. lum[0] = RGB_TO_Y(r, g, b);
  1321. RGB_IN(r, g, b, p + BPP);
  1322. r1 += r;
  1323. g1 += g;
  1324. b1 += b;
  1325. lum[1] = RGB_TO_Y(r, g, b);
  1326. cb[0] = RGB_TO_U(r1, g1, b1, 2);
  1327. cr[0] = RGB_TO_V(r1, g1, b1, 2);
  1328. cb++;
  1329. cr++;
  1330. p += -wrap3 + 2 * BPP;
  1331. lum += -wrap + 2;
  1332. }
  1333. if (w) {
  1334. RGB_IN(r, g, b, p);
  1335. r1 = r;
  1336. g1 = g;
  1337. b1 = b;
  1338. lum[0] = RGB_TO_Y(r, g, b);
  1339. p += wrap3;
  1340. lum += wrap;
  1341. RGB_IN(r, g, b, p);
  1342. r1 += r;
  1343. g1 += g;
  1344. b1 += b;
  1345. lum[0] = RGB_TO_Y(r, g, b);
  1346. cb[0] = RGB_TO_U(r1, g1, b1, 1);
  1347. cr[0] = RGB_TO_V(r1, g1, b1, 1);
  1348. cb++;
  1349. cr++;
  1350. p += -wrap3 + BPP;
  1351. lum += -wrap + 1;
  1352. }
  1353. p += wrap3 + (wrap3 - width * BPP);
  1354. lum += wrap + (wrap - width);
  1355. cb += dst->linesize[1] - width2;
  1356. cr += dst->linesize[2] - width2;
  1357. }
  1358. /* handle odd height */
  1359. if (height) {
  1360. for(w = width; w >= 2; w -= 2) {
  1361. RGB_IN(r, g, b, p);
  1362. r1 = r;
  1363. g1 = g;
  1364. b1 = b;
  1365. lum[0] = RGB_TO_Y(r, g, b);
  1366. RGB_IN(r, g, b, p + BPP);
  1367. r1 += r;
  1368. g1 += g;
  1369. b1 += b;
  1370. lum[1] = RGB_TO_Y(r, g, b);
  1371. cb[0] = RGB_TO_U(r1, g1, b1, 1);
  1372. cr[0] = RGB_TO_V(r1, g1, b1, 1);
  1373. cb++;
  1374. cr++;
  1375. p += 2 * BPP;
  1376. lum += 2;
  1377. }
  1378. if (w) {
  1379. RGB_IN(r, g, b, p);
  1380. lum[0] = RGB_TO_Y(r, g, b);
  1381. cb[0] = RGB_TO_U(r, g, b, 0);
  1382. cr[0] = RGB_TO_V(r, g, b, 0);
  1383. }
  1384. }
  1385. }
  1386. static void rgb24_to_yuvj444p(AVPicture *dst, AVPicture *src,
  1387. int width, int height)
  1388. {
  1389. int src_wrap, x, y;
  1390. int r, g, b;
  1391. uint8_t *lum, *cb, *cr;
  1392. const uint8_t *p;
  1393. lum = dst->data[0];
  1394. cb = dst->data[1];
  1395. cr = dst->data[2];
  1396. src_wrap = src->linesize[0] - width * BPP;
  1397. p = src->data[0];
  1398. for(y=0;y<height;y++) {
  1399. for(x=0;x<width;x++) {
  1400. RGB_IN(r, g, b, p);
  1401. lum[0] = RGB_TO_Y(r, g, b);
  1402. cb[0] = RGB_TO_U(r, g, b, 0);
  1403. cr[0] = RGB_TO_V(r, g, b, 0);
  1404. cb++;
  1405. cr++;
  1406. lum++;
  1407. }
  1408. p += src_wrap;
  1409. lum += dst->linesize[0] - width;
  1410. cb += dst->linesize[1] - width;
  1411. cr += dst->linesize[2] - width;
  1412. }
  1413. }
  1414. #undef RGB_IN
  1415. #undef RGB_OUT
  1416. #undef BPP
  1417. /* rgba32 handling */
  1418. #define RGB_IN(r, g, b, s)\
  1419. {\
  1420. unsigned int v = ((const uint32_t *)(s))[0];\
  1421. r = (v >> 16) & 0xff;\
  1422. g = (v >> 8) & 0xff;\
  1423. b = v & 0xff;\
  1424. }
  1425. #define RGB_OUT(d, r, g, b)\
  1426. {\
  1427. ((uint32_t *)(d))[0] = (0xff << 24) | (r << 16) | (g << 8) | b;\
  1428. }
  1429. #define BPP 4
  1430. RGB_FUNCTIONS(rgba32)
  1431. #undef RGB_IN
  1432. #undef RGB_OUT
  1433. #undef BPP
  1434. static void rgb24_to_rgb565(AVPicture *dst, AVPicture *src,
  1435. int width, int height)
  1436. {
  1437. const unsigned char *p;
  1438. unsigned char *q;
  1439. int r, g, b, dst_wrap, src_wrap;
  1440. int x, y;
  1441. p = src->data[0];
  1442. src_wrap = src->linesize[0] - 3 * width;
  1443. q = dst->data[0];
  1444. dst_wrap = dst->linesize[0] - 2 * width;
  1445. for(y=0;y<height;y++) {
  1446. for(x=0;x<width;x++) {
  1447. r = p[0];
  1448. g = p[1];
  1449. b = p[2];
  1450. ((unsigned short *)q)[0] =
  1451. ((r >> 3) << 11) | ((g >> 2) << 5) | (b >> 3);
  1452. q += 2;
  1453. p += 3;
  1454. }
  1455. p += src_wrap;
  1456. q += dst_wrap;
  1457. }
  1458. }
  1459. /* NOTE: we also add a dummy alpha bit */
  1460. static void rgb24_to_rgb555(AVPicture *dst, AVPicture *src,
  1461. int width, int height)
  1462. {
  1463. const unsigned char *p;
  1464. unsigned char *q;
  1465. int r, g, b, dst_wrap, src_wrap;
  1466. int x, y;
  1467. p = src->data[0];
  1468. src_wrap = src->linesize[0] - 3 * width;
  1469. q = dst->data[0];
  1470. dst_wrap = dst->linesize[0] - 2 * width;
  1471. for(y=0;y<height;y++) {
  1472. for(x=0;x<width;x++) {
  1473. r = p[0];
  1474. g = p[1];
  1475. b = p[2];
  1476. ((unsigned short *)q)[0] =
  1477. ((r >> 3) << 10) | ((g >> 3) << 5) | (b >> 3) | 0x8000;
  1478. q += 2;
  1479. p += 3;
  1480. }
  1481. p += src_wrap;
  1482. q += dst_wrap;
  1483. }
  1484. }
  1485. static void mono_to_gray(AVPicture *dst, AVPicture *src,
  1486. int width, int height, int xor_mask)
  1487. {
  1488. const unsigned char *p;
  1489. unsigned char *q;
  1490. int v, dst_wrap, src_wrap;
  1491. int y, w;
  1492. p = src->data[0];
  1493. src_wrap = src->linesize[0] - ((width + 7) >> 3);
  1494. q = dst->data[0];
  1495. dst_wrap = dst->linesize[0] - width;
  1496. for(y=0;y<height;y++) {
  1497. w = width;
  1498. while (w >= 8) {
  1499. v = *p++ ^ xor_mask;
  1500. q[0] = -(v >> 7);
  1501. q[1] = -((v >> 6) & 1);
  1502. q[2] = -((v >> 5) & 1);
  1503. q[3] = -((v >> 4) & 1);
  1504. q[4] = -((v >> 3) & 1);
  1505. q[5] = -((v >> 2) & 1);
  1506. q[6] = -((v >> 1) & 1);
  1507. q[7] = -((v >> 0) & 1);
  1508. w -= 8;
  1509. q += 8;
  1510. }
  1511. if (w > 0) {
  1512. v = *p++ ^ xor_mask;
  1513. do {
  1514. q[0] = -((v >> 7) & 1);
  1515. q++;
  1516. v <<= 1;
  1517. } while (--w);
  1518. }
  1519. p += src_wrap;
  1520. q += dst_wrap;
  1521. }
  1522. }
  1523. static void monowhite_to_gray(AVPicture *dst, AVPicture *src,
  1524. int width, int height)
  1525. {
  1526. mono_to_gray(dst, src, width, height, 0xff);
  1527. }
  1528. static void monoblack_to_gray(AVPicture *dst, AVPicture *src,
  1529. int width, int height)
  1530. {
  1531. mono_to_gray(dst, src, width, height, 0x00);
  1532. }
  1533. static void gray_to_mono(AVPicture *dst, AVPicture *src,
  1534. int width, int height, int xor_mask)
  1535. {
  1536. int n;
  1537. const uint8_t *s;
  1538. uint8_t *d;
  1539. int j, b, v, n1, src_wrap, dst_wrap, y;
  1540. s = src->data[0];
  1541. src_wrap = src->linesize[0] - width;
  1542. d = dst->data[0];
  1543. dst_wrap = dst->linesize[0] - ((width + 7) >> 3);
  1544. for(y=0;y<height;y++) {
  1545. n = width;
  1546. while (n >= 8) {
  1547. v = 0;
  1548. for(j=0;j<8;j++) {
  1549. b = s[0];
  1550. s++;
  1551. v = (v << 1) | (b >> 7);
  1552. }
  1553. d[0] = v ^ xor_mask;
  1554. d++;
  1555. n -= 8;
  1556. }
  1557. if (n > 0) {
  1558. n1 = n;
  1559. v = 0;
  1560. while (n > 0) {
  1561. b = s[0];
  1562. s++;
  1563. v = (v << 1) | (b >> 7);
  1564. n--;
  1565. }
  1566. d[0] = (v << (8 - (n1 & 7))) ^ xor_mask;
  1567. d++;
  1568. }
  1569. s += src_wrap;
  1570. d += dst_wrap;
  1571. }
  1572. }
  1573. static void gray_to_monowhite(AVPicture *dst, AVPicture *src,
  1574. int width, int height)
  1575. {
  1576. gray_to_mono(dst, src, width, height, 0xff);
  1577. }
  1578. static void gray_to_monoblack(AVPicture *dst, AVPicture *src,
  1579. int width, int height)
  1580. {
  1581. gray_to_mono(dst, src, width, height, 0x00);
  1582. }
  1583. /* this is maybe slow, but allows for extensions */
  1584. static inline unsigned char gif_clut_index(uint8_t r, uint8_t g, uint8_t b)
  1585. {
  1586. return ((((r)/47)%6)*6*6+(((g)/47)%6)*6+(((b)/47)%6));
  1587. }
  1588. /* XXX: put jpeg quantize code instead */
  1589. static void rgb24_to_pal8(AVPicture *dst, AVPicture *src,
  1590. int width, int height)
  1591. {
  1592. const unsigned char *p;
  1593. unsigned char *q;
  1594. int r, g, b, dst_wrap, src_wrap;
  1595. int x, y, i;
  1596. static const uint8_t pal_value[6] = { 0x00, 0x33, 0x66, 0x99, 0xcc, 0xff };
  1597. uint32_t *pal;
  1598. p = src->data[0];
  1599. src_wrap = src->linesize[0] - 3 * width;
  1600. q = dst->data[0];
  1601. dst_wrap = dst->linesize[0] - width;
  1602. for(y=0;y<height;y++) {
  1603. for(x=0;x<width;x++) {
  1604. r = p[0];
  1605. g = p[1];
  1606. b = p[2];
  1607. q[0] = gif_clut_index(r, g, b);
  1608. q++;
  1609. p += 3;
  1610. }
  1611. p += src_wrap;
  1612. q += dst_wrap;
  1613. }
  1614. /* build palette */
  1615. pal = (uint32_t *)dst->data[1];
  1616. i = 0;
  1617. for(r = 0; r < 6; r++) {
  1618. for(g = 0; g < 6; g++) {
  1619. for(b = 0; b < 6; b++) {
  1620. pal[i++] = (0xff << 24) | (pal_value[r] << 16) |
  1621. (pal_value[g] << 8) | pal_value[b];
  1622. }
  1623. }
  1624. }
  1625. while (i < 256)
  1626. pal[i++] = 0;
  1627. }
  1628. static void rgba32_to_rgb24(AVPicture *dst, AVPicture *src,
  1629. int width, int height)
  1630. {
  1631. const uint8_t *s;
  1632. uint8_t *d;
  1633. int src_wrap, dst_wrap, j, y;
  1634. unsigned int v;
  1635. s = src->data[0];
  1636. src_wrap = src->linesize[0] - width * 4;
  1637. d = dst->data[0];
  1638. dst_wrap = dst->linesize[0] - width * 3;
  1639. for(y=0;y<height;y++) {
  1640. for(j = 0;j < width; j++) {
  1641. v = *(uint32_t *)s;
  1642. s += 4;
  1643. d[0] = v >> 16;
  1644. d[1] = v >> 8;
  1645. d[2] = v;
  1646. d += 3;
  1647. }
  1648. s += src_wrap;
  1649. d += dst_wrap;
  1650. }
  1651. }
  1652. typedef struct ConvertEntry {
  1653. void (*convert)(AVPicture *dst, AVPicture *src, int width, int height);
  1654. } ConvertEntry;
  1655. /* Add each new convertion function in this table. In order to be able
  1656. to convert from any format to any format, the following constraints
  1657. must be satisfied:
  1658. - all FF_COLOR_RGB formats must convert to and from PIX_FMT_RGB24
  1659. - all FF_COLOR_GRAY formats must convert to and from PIX_FMT_GRAY8
  1660. - all FF_COLOR_RGB formats with alpha must convert to and from PIX_FMT_RGBA32
  1661. - all PIX_FMT_YUV444P and PIX_FMT_YUVJ444P must convert to and from
  1662. PIX_FMT_RGB24.
  1663. - PIX_FMT_422 must convert to and from PIX_FMT_422P.
  1664. */
  1665. static ConvertEntry convert_table[PIX_FMT_NB][PIX_FMT_NB] = {
  1666. [PIX_FMT_YUV420P] = {
  1667. [PIX_FMT_RGB555] = {
  1668. .convert = yuv420p_to_rgb555
  1669. },
  1670. [PIX_FMT_RGB565] = {
  1671. .convert = yuv420p_to_rgb565
  1672. },
  1673. [PIX_FMT_BGR24] = {
  1674. .convert = yuv420p_to_bgr24
  1675. },
  1676. [PIX_FMT_RGB24] = {
  1677. .convert = yuv420p_to_rgb24
  1678. },
  1679. [PIX_FMT_RGBA32] = {
  1680. .convert = yuv420p_to_rgba32
  1681. },
  1682. },
  1683. [PIX_FMT_YUV422P] = {
  1684. [PIX_FMT_YUV422] = {
  1685. .convert = yuv422p_to_yuv422,
  1686. },
  1687. },
  1688. [PIX_FMT_YUV444P] = {
  1689. [PIX_FMT_RGB24] = {
  1690. .convert = yuv444p_to_rgb24
  1691. },
  1692. },
  1693. [PIX_FMT_YUVJ420P] = {
  1694. [PIX_FMT_RGB555] = {
  1695. .convert = yuvj420p_to_rgb555
  1696. },
  1697. [PIX_FMT_RGB565] = {
  1698. .convert = yuvj420p_to_rgb565
  1699. },
  1700. [PIX_FMT_BGR24] = {
  1701. .convert = yuvj420p_to_bgr24
  1702. },
  1703. [PIX_FMT_RGB24] = {
  1704. .convert = yuvj420p_to_rgb24
  1705. },
  1706. [PIX_FMT_RGBA32] = {
  1707. .convert = yuvj420p_to_rgba32
  1708. },
  1709. },
  1710. [PIX_FMT_YUVJ444P] = {
  1711. [PIX_FMT_RGB24] = {
  1712. .convert = yuvj444p_to_rgb24
  1713. },
  1714. },
  1715. [PIX_FMT_YUV422] = {
  1716. [PIX_FMT_YUV420P] = {
  1717. .convert = yuv422_to_yuv420p,
  1718. },
  1719. [PIX_FMT_YUV422P] = {
  1720. .convert = yuv422_to_yuv422p,
  1721. },
  1722. },
  1723. [PIX_FMT_RGB24] = {
  1724. [PIX_FMT_YUV420P] = {
  1725. .convert = rgb24_to_yuv420p
  1726. },
  1727. [PIX_FMT_RGB565] = {
  1728. .convert = rgb24_to_rgb565
  1729. },
  1730. [PIX_FMT_RGB555] = {
  1731. .convert = rgb24_to_rgb555
  1732. },
  1733. [PIX_FMT_GRAY8] = {
  1734. .convert = rgb24_to_gray
  1735. },
  1736. [PIX_FMT_PAL8] = {
  1737. .convert = rgb24_to_pal8
  1738. },
  1739. [PIX_FMT_YUV444P] = {
  1740. .convert = rgb24_to_yuv444p
  1741. },
  1742. [PIX_FMT_YUVJ420P] = {
  1743. .convert = rgb24_to_yuvj420p
  1744. },
  1745. [PIX_FMT_YUVJ444P] = {
  1746. .convert = rgb24_to_yuvj444p
  1747. },
  1748. },
  1749. [PIX_FMT_RGBA32] = {
  1750. [PIX_FMT_YUV420P] = {
  1751. .convert = rgba32_to_yuv420p
  1752. },
  1753. [PIX_FMT_GRAY8] = {
  1754. .convert = rgba32_to_gray
  1755. },
  1756. [PIX_FMT_RGB24] = {
  1757. .convert = rgba32_to_rgb24
  1758. },
  1759. },
  1760. [PIX_FMT_BGR24] = {
  1761. [PIX_FMT_YUV420P] = {
  1762. .convert = bgr24_to_yuv420p
  1763. },
  1764. [PIX_FMT_GRAY8] = {
  1765. .convert = bgr24_to_gray
  1766. },
  1767. },
  1768. [PIX_FMT_RGB555] = {
  1769. [PIX_FMT_YUV420P] = {
  1770. .convert = rgb555_to_yuv420p
  1771. },
  1772. [PIX_FMT_GRAY8] = {
  1773. .convert = rgb555_to_gray
  1774. },
  1775. },
  1776. [PIX_FMT_RGB565] = {
  1777. [PIX_FMT_YUV420P] = {
  1778. .convert = rgb565_to_yuv420p
  1779. },
  1780. [PIX_FMT_GRAY8] = {
  1781. .convert = rgb565_to_gray
  1782. },
  1783. },
  1784. [PIX_FMT_GRAY8] = {
  1785. [PIX_FMT_RGB555] = {
  1786. .convert = gray_to_rgb555
  1787. },
  1788. [PIX_FMT_RGB565] = {
  1789. .convert = gray_to_rgb565
  1790. },
  1791. [PIX_FMT_RGB24] = {
  1792. .convert = gray_to_rgb24
  1793. },
  1794. [PIX_FMT_BGR24] = {
  1795. .convert = gray_to_bgr24
  1796. },
  1797. [PIX_FMT_RGBA32] = {
  1798. .convert = gray_to_rgba32
  1799. },
  1800. [PIX_FMT_MONOWHITE] = {
  1801. .convert = gray_to_monowhite
  1802. },
  1803. [PIX_FMT_MONOBLACK] = {
  1804. .convert = gray_to_monoblack
  1805. },
  1806. },
  1807. [PIX_FMT_MONOWHITE] = {
  1808. [PIX_FMT_GRAY8] = {
  1809. .convert = monowhite_to_gray
  1810. },
  1811. },
  1812. [PIX_FMT_MONOBLACK] = {
  1813. [PIX_FMT_GRAY8] = {
  1814. .convert = monoblack_to_gray
  1815. },
  1816. },
  1817. [PIX_FMT_PAL8] = {
  1818. [PIX_FMT_RGB555] = {
  1819. .convert = pal8_to_rgb555
  1820. },
  1821. [PIX_FMT_RGB565] = {
  1822. .convert = pal8_to_rgb565
  1823. },
  1824. [PIX_FMT_BGR24] = {
  1825. .convert = pal8_to_bgr24
  1826. },
  1827. [PIX_FMT_RGB24] = {
  1828. .convert = pal8_to_rgb24
  1829. },
  1830. [PIX_FMT_RGBA32] = {
  1831. .convert = pal8_to_rgba32
  1832. },
  1833. },
  1834. };
  1835. static int avpicture_alloc(AVPicture *picture,
  1836. int pix_fmt, int width, int height)
  1837. {
  1838. unsigned int size;
  1839. void *ptr;
  1840. size = avpicture_get_size(pix_fmt, width, height);
  1841. if (size < 0)
  1842. goto fail;
  1843. ptr = av_malloc(size);
  1844. if (!ptr)
  1845. goto fail;
  1846. avpicture_fill(picture, ptr, pix_fmt, width, height);
  1847. return 0;
  1848. fail:
  1849. memset(picture, 0, sizeof(AVPicture));
  1850. return -1;
  1851. }
  1852. static void avpicture_free(AVPicture *picture)
  1853. {
  1854. av_free(picture->data[0]);
  1855. }
  1856. /* return true if yuv planar */
  1857. static inline int is_yuv_planar(PixFmtInfo *ps)
  1858. {
  1859. return (ps->color_type == FF_COLOR_YUV ||
  1860. ps->color_type == FF_COLOR_YUV_JPEG) && !ps->is_packed;
  1861. }
  1862. /* XXX: always use linesize. Return -1 if not supported */
  1863. int img_convert(AVPicture *dst, int dst_pix_fmt,
  1864. AVPicture *src, int src_pix_fmt,
  1865. int src_width, int src_height)
  1866. {
  1867. static int inited;
  1868. int i, ret, dst_width, dst_height, int_pix_fmt;
  1869. PixFmtInfo *src_pix, *dst_pix;
  1870. ConvertEntry *ce;
  1871. AVPicture tmp1, *tmp = &tmp1;
  1872. if (src_pix_fmt < 0 || src_pix_fmt >= PIX_FMT_NB ||
  1873. dst_pix_fmt < 0 || dst_pix_fmt >= PIX_FMT_NB)
  1874. return -1;
  1875. if (src_width <= 0 || src_height <= 0)
  1876. return 0;
  1877. if (!inited) {
  1878. inited = 1;
  1879. img_convert_init();
  1880. }
  1881. dst_width = src_width;
  1882. dst_height = src_height;
  1883. dst_pix = &pix_fmt_info[dst_pix_fmt];
  1884. src_pix = &pix_fmt_info[src_pix_fmt];
  1885. if (src_pix_fmt == dst_pix_fmt) {
  1886. /* XXX: incorrect */
  1887. /* same format: just copy */
  1888. for(i = 0; i < dst_pix->nb_components; i++) {
  1889. int w, h;
  1890. w = dst_width;
  1891. h = dst_height;
  1892. if (is_yuv_planar(dst_pix) && (i == 1 || i == 2)) {
  1893. w >>= dst_pix->x_chroma_shift;
  1894. h >>= dst_pix->y_chroma_shift;
  1895. }
  1896. img_copy(dst->data[i], dst->linesize[i],
  1897. src->data[i], src->linesize[i],
  1898. w, h);
  1899. }
  1900. return 0;
  1901. }
  1902. ce = &convert_table[src_pix_fmt][dst_pix_fmt];
  1903. if (ce->convert) {
  1904. /* specific convertion routine */
  1905. ce->convert(dst, src, dst_width, dst_height);
  1906. return 0;
  1907. }
  1908. /* gray to YUV */
  1909. if (is_yuv_planar(dst_pix) &&
  1910. src_pix_fmt == PIX_FMT_GRAY8) {
  1911. int w, h, y;
  1912. uint8_t *d;
  1913. if (dst_pix->color_type == FF_COLOR_YUV_JPEG) {
  1914. img_copy(dst->data[0], dst->linesize[0],
  1915. src->data[0], src->linesize[0],
  1916. dst_width, dst_height);
  1917. } else {
  1918. img_apply_table(dst->data[0], dst->linesize[0],
  1919. src->data[0], src->linesize[0],
  1920. dst_width, dst_height,
  1921. y_jpeg_to_ccir);
  1922. }
  1923. /* fill U and V with 128 */
  1924. w = dst_width;
  1925. h = dst_height;
  1926. w >>= dst_pix->x_chroma_shift;
  1927. h >>= dst_pix->y_chroma_shift;
  1928. for(i = 1; i <= 2; i++) {
  1929. d = dst->data[i];
  1930. for(y = 0; y< h; y++) {
  1931. memset(d, 128, w);
  1932. d += dst->linesize[i];
  1933. }
  1934. }
  1935. return 0;
  1936. }
  1937. /* YUV to gray */
  1938. if (is_yuv_planar(src_pix) &&
  1939. dst_pix_fmt == PIX_FMT_GRAY8) {
  1940. if (src_pix->color_type == FF_COLOR_YUV_JPEG) {
  1941. img_copy(dst->data[0], dst->linesize[0],
  1942. src->data[0], src->linesize[0],
  1943. dst_width, dst_height);
  1944. } else {
  1945. img_apply_table(dst->data[0], dst->linesize[0],
  1946. src->data[0], src->linesize[0],
  1947. dst_width, dst_height,
  1948. y_ccir_to_jpeg);
  1949. }
  1950. return 0;
  1951. }
  1952. /* YUV to YUV planar */
  1953. if (is_yuv_planar(dst_pix) && is_yuv_planar(src_pix)) {
  1954. int x_shift, y_shift, w, h;
  1955. void (*resize_func)(uint8_t *dst, int dst_wrap,
  1956. uint8_t *src, int src_wrap,
  1957. int width, int height);
  1958. /* compute chroma size of the smallest dimensions */
  1959. w = dst_width;
  1960. h = dst_height;
  1961. if (dst_pix->x_chroma_shift >= src_pix->x_chroma_shift)
  1962. w >>= dst_pix->x_chroma_shift;
  1963. else
  1964. w >>= src_pix->x_chroma_shift;
  1965. if (dst_pix->y_chroma_shift >= src_pix->y_chroma_shift)
  1966. h >>= dst_pix->y_chroma_shift;
  1967. else
  1968. h >>= src_pix->y_chroma_shift;
  1969. x_shift = (dst_pix->x_chroma_shift - src_pix->x_chroma_shift);
  1970. y_shift = (dst_pix->y_chroma_shift - src_pix->y_chroma_shift);
  1971. if (x_shift == 0 && y_shift == 0) {
  1972. resize_func = img_copy;
  1973. } else if (x_shift == 0 && y_shift == 1) {
  1974. resize_func = shrink2;
  1975. } else if (x_shift == 1 && y_shift == 1) {
  1976. resize_func = shrink22;
  1977. } else if (x_shift == -1 && y_shift == -1) {
  1978. resize_func = grow22;
  1979. } else if (x_shift == -1 && y_shift == 1) {
  1980. resize_func = conv411;
  1981. } else {
  1982. /* currently not handled */
  1983. return -1;
  1984. }
  1985. img_copy(dst->data[0], dst->linesize[0],
  1986. src->data[0], src->linesize[0],
  1987. dst_width, dst_height);
  1988. for(i = 1;i <= 2; i++)
  1989. resize_func(dst->data[i], dst->linesize[i],
  1990. src->data[i], src->linesize[i],
  1991. dst_width>>dst_pix->x_chroma_shift, dst_height>>dst_pix->y_chroma_shift);
  1992. /* if yuv color space conversion is needed, we do it here on
  1993. the destination image */
  1994. if (dst_pix->color_type != src_pix->color_type) {
  1995. const uint8_t *y_table, *c_table;
  1996. if (dst_pix->color_type == FF_COLOR_YUV) {
  1997. y_table = y_jpeg_to_ccir;
  1998. c_table = c_jpeg_to_ccir;
  1999. } else {
  2000. y_table = y_ccir_to_jpeg;
  2001. c_table = c_ccir_to_jpeg;
  2002. }
  2003. img_apply_table(dst->data[0], dst->linesize[0],
  2004. dst->data[0], dst->linesize[0],
  2005. dst_width, dst_height,
  2006. y_table);
  2007. for(i = 1;i <= 2; i++)
  2008. img_apply_table(dst->data[i], dst->linesize[i],
  2009. dst->data[i], dst->linesize[i],
  2010. dst_width>>dst_pix->x_chroma_shift,
  2011. dst_height>>dst_pix->y_chroma_shift,
  2012. c_table);
  2013. }
  2014. return 0;
  2015. }
  2016. /* try to use an intermediate format */
  2017. if (src_pix_fmt == PIX_FMT_YUV422 ||
  2018. dst_pix_fmt == PIX_FMT_YUV422) {
  2019. /* specific case: convert to YUV422P first */
  2020. int_pix_fmt = PIX_FMT_YUV422P;
  2021. } else if ((src_pix->color_type == FF_COLOR_GRAY &&
  2022. src_pix_fmt != PIX_FMT_GRAY8) ||
  2023. (dst_pix->color_type == FF_COLOR_GRAY &&
  2024. dst_pix_fmt != PIX_FMT_GRAY8)) {
  2025. /* gray8 is the normalized format */
  2026. int_pix_fmt = PIX_FMT_GRAY8;
  2027. } else if ((is_yuv_planar(src_pix) &&
  2028. src_pix_fmt != PIX_FMT_YUV444P &&
  2029. src_pix_fmt != PIX_FMT_YUVJ444P)) {
  2030. /* yuv444 is the normalized format */
  2031. if (src_pix->color_type == FF_COLOR_YUV_JPEG)
  2032. int_pix_fmt = PIX_FMT_YUVJ444P;
  2033. else
  2034. int_pix_fmt = PIX_FMT_YUV444P;
  2035. } else if ((is_yuv_planar(dst_pix) &&
  2036. dst_pix_fmt != PIX_FMT_YUV444P &&
  2037. dst_pix_fmt != PIX_FMT_YUVJ444P)) {
  2038. /* yuv444 is the normalized format */
  2039. if (dst_pix->color_type == FF_COLOR_YUV_JPEG)
  2040. int_pix_fmt = PIX_FMT_YUVJ444P;
  2041. else
  2042. int_pix_fmt = PIX_FMT_YUV444P;
  2043. } else {
  2044. /* the two formats are rgb or gray8 or yuv[j]444p */
  2045. if (src_pix->is_alpha && dst_pix->is_alpha)
  2046. int_pix_fmt = PIX_FMT_RGBA32;
  2047. else
  2048. int_pix_fmt = PIX_FMT_RGB24;
  2049. }
  2050. if (avpicture_alloc(tmp, int_pix_fmt, dst_width, dst_height) < 0)
  2051. return -1;
  2052. ret = -1;
  2053. if (img_convert(tmp, int_pix_fmt,
  2054. src, src_pix_fmt, src_width, src_height) < 0)
  2055. goto fail1;
  2056. if (img_convert(dst, dst_pix_fmt,
  2057. tmp, int_pix_fmt, dst_width, dst_height) < 0)
  2058. goto fail1;
  2059. ret = 0;
  2060. fail1:
  2061. avpicture_free(tmp);
  2062. return ret;
  2063. }
  2064. #ifdef HAVE_MMX
  2065. #define DEINT_INPLACE_LINE_LUM \
  2066. movd_m2r(lum_m4[0],mm0);\
  2067. movd_m2r(lum_m3[0],mm1);\
  2068. movd_m2r(lum_m2[0],mm2);\
  2069. movd_m2r(lum_m1[0],mm3);\
  2070. movd_m2r(lum[0],mm4);\
  2071. punpcklbw_r2r(mm7,mm0);\
  2072. movd_r2m(mm2,lum_m4[0]);\
  2073. punpcklbw_r2r(mm7,mm1);\
  2074. punpcklbw_r2r(mm7,mm2);\
  2075. punpcklbw_r2r(mm7,mm3);\
  2076. punpcklbw_r2r(mm7,mm4);\
  2077. paddw_r2r(mm3,mm1);\
  2078. psllw_i2r(1,mm2);\
  2079. paddw_r2r(mm4,mm0);\
  2080. psllw_i2r(2,mm1);\
  2081. paddw_r2r(mm6,mm2);\
  2082. paddw_r2r(mm2,mm1);\
  2083. psubusw_r2r(mm0,mm1);\
  2084. psrlw_i2r(3,mm1);\
  2085. packuswb_r2r(mm7,mm1);\
  2086. movd_r2m(mm1,lum_m2[0]);
  2087. #define DEINT_LINE_LUM \
  2088. movd_m2r(lum_m4[0],mm0);\
  2089. movd_m2r(lum_m3[0],mm1);\
  2090. movd_m2r(lum_m2[0],mm2);\
  2091. movd_m2r(lum_m1[0],mm3);\
  2092. movd_m2r(lum[0],mm4);\
  2093. punpcklbw_r2r(mm7,mm0);\
  2094. punpcklbw_r2r(mm7,mm1);\
  2095. punpcklbw_r2r(mm7,mm2);\
  2096. punpcklbw_r2r(mm7,mm3);\
  2097. punpcklbw_r2r(mm7,mm4);\
  2098. paddw_r2r(mm3,mm1);\
  2099. psllw_i2r(1,mm2);\
  2100. paddw_r2r(mm4,mm0);\
  2101. psllw_i2r(2,mm1);\
  2102. paddw_r2r(mm6,mm2);\
  2103. paddw_r2r(mm2,mm1);\
  2104. psubusw_r2r(mm0,mm1);\
  2105. psrlw_i2r(3,mm1);\
  2106. packuswb_r2r(mm7,mm1);\
  2107. movd_r2m(mm1,dst[0]);
  2108. #endif
  2109. /* filter parameters: [-1 4 2 4 -1] // 8 */
  2110. static void deinterlace_line(uint8_t *dst, uint8_t *lum_m4, uint8_t *lum_m3, uint8_t *lum_m2, uint8_t *lum_m1, uint8_t *lum,
  2111. int size)
  2112. {
  2113. #ifndef HAVE_MMX
  2114. uint8_t *cm = cropTbl + MAX_NEG_CROP;
  2115. int sum;
  2116. for(;size > 0;size--) {
  2117. sum = -lum_m4[0];
  2118. sum += lum_m3[0] << 2;
  2119. sum += lum_m2[0] << 1;
  2120. sum += lum_m1[0] << 2;
  2121. sum += -lum[0];
  2122. dst[0] = cm[(sum + 4) >> 3];
  2123. lum_m4++;
  2124. lum_m3++;
  2125. lum_m2++;
  2126. lum_m1++;
  2127. lum++;
  2128. dst++;
  2129. }
  2130. #else
  2131. {
  2132. mmx_t rounder;
  2133. rounder.uw[0]=4;
  2134. rounder.uw[1]=4;
  2135. rounder.uw[2]=4;
  2136. rounder.uw[3]=4;
  2137. pxor_r2r(mm7,mm7);
  2138. movq_m2r(rounder,mm6);
  2139. }
  2140. for (;size > 3; size-=4) {
  2141. DEINT_LINE_LUM
  2142. lum_m4+=4;
  2143. lum_m3+=4;
  2144. lum_m2+=4;
  2145. lum_m1+=4;
  2146. lum+=4;
  2147. dst+=4;
  2148. }
  2149. #endif
  2150. }
  2151. static void deinterlace_line_inplace(uint8_t *lum_m4, uint8_t *lum_m3, uint8_t *lum_m2, uint8_t *lum_m1, uint8_t *lum,
  2152. int size)
  2153. {
  2154. #ifndef HAVE_MMX
  2155. uint8_t *cm = cropTbl + MAX_NEG_CROP;
  2156. int sum;
  2157. for(;size > 0;size--) {
  2158. sum = -lum_m4[0];
  2159. sum += lum_m3[0] << 2;
  2160. sum += lum_m2[0] << 1;
  2161. lum_m4[0]=lum_m2[0];
  2162. sum += lum_m1[0] << 2;
  2163. sum += -lum[0];
  2164. lum_m2[0] = cm[(sum + 4) >> 3];
  2165. lum_m4++;
  2166. lum_m3++;
  2167. lum_m2++;
  2168. lum_m1++;
  2169. lum++;
  2170. }
  2171. #else
  2172. {
  2173. mmx_t rounder;
  2174. rounder.uw[0]=4;
  2175. rounder.uw[1]=4;
  2176. rounder.uw[2]=4;
  2177. rounder.uw[3]=4;
  2178. pxor_r2r(mm7,mm7);
  2179. movq_m2r(rounder,mm6);
  2180. }
  2181. for (;size > 3; size-=4) {
  2182. DEINT_INPLACE_LINE_LUM
  2183. lum_m4+=4;
  2184. lum_m3+=4;
  2185. lum_m2+=4;
  2186. lum_m1+=4;
  2187. lum+=4;
  2188. }
  2189. #endif
  2190. }
  2191. /* deinterlacing : 2 temporal taps, 3 spatial taps linear filter. The
  2192. top field is copied as is, but the bottom field is deinterlaced
  2193. against the top field. */
  2194. static void deinterlace_bottom_field(uint8_t *dst, int dst_wrap,
  2195. uint8_t *src1, int src_wrap,
  2196. int width, int height)
  2197. {
  2198. uint8_t *src_m2, *src_m1, *src_0, *src_p1, *src_p2;
  2199. int y;
  2200. src_m2 = src1;
  2201. src_m1 = src1;
  2202. src_0=&src_m1[src_wrap];
  2203. src_p1=&src_0[src_wrap];
  2204. src_p2=&src_p1[src_wrap];
  2205. for(y=0;y<(height-2);y+=2) {
  2206. memcpy(dst,src_m1,width);
  2207. dst += dst_wrap;
  2208. deinterlace_line(dst,src_m2,src_m1,src_0,src_p1,src_p2,width);
  2209. src_m2 = src_0;
  2210. src_m1 = src_p1;
  2211. src_0 = src_p2;
  2212. src_p1 += 2*src_wrap;
  2213. src_p2 += 2*src_wrap;
  2214. dst += dst_wrap;
  2215. }
  2216. memcpy(dst,src_m1,width);
  2217. dst += dst_wrap;
  2218. /* do last line */
  2219. deinterlace_line(dst,src_m2,src_m1,src_0,src_0,src_0,width);
  2220. }
  2221. static void deinterlace_bottom_field_inplace(uint8_t *src1, int src_wrap,
  2222. int width, int height)
  2223. {
  2224. uint8_t *src_m1, *src_0, *src_p1, *src_p2;
  2225. int y;
  2226. uint8_t *buf;
  2227. buf = (uint8_t*)av_malloc(width);
  2228. src_m1 = src1;
  2229. memcpy(buf,src_m1,width);
  2230. src_0=&src_m1[src_wrap];
  2231. src_p1=&src_0[src_wrap];
  2232. src_p2=&src_p1[src_wrap];
  2233. for(y=0;y<(height-2);y+=2) {
  2234. deinterlace_line_inplace(buf,src_m1,src_0,src_p1,src_p2,width);
  2235. src_m1 = src_p1;
  2236. src_0 = src_p2;
  2237. src_p1 += 2*src_wrap;
  2238. src_p2 += 2*src_wrap;
  2239. }
  2240. /* do last line */
  2241. deinterlace_line_inplace(buf,src_m1,src_0,src_0,src_0,width);
  2242. av_free(buf);
  2243. }
  2244. /* deinterlace - if not supported return -1 */
  2245. int avpicture_deinterlace(AVPicture *dst, AVPicture *src,
  2246. int pix_fmt, int width, int height)
  2247. {
  2248. int i;
  2249. if (pix_fmt != PIX_FMT_YUV420P &&
  2250. pix_fmt != PIX_FMT_YUV422P &&
  2251. pix_fmt != PIX_FMT_YUV444P)
  2252. return -1;
  2253. if ((width & 3) != 0 || (height & 3) != 0)
  2254. return -1;
  2255. for(i=0;i<3;i++) {
  2256. if (i == 1) {
  2257. switch(pix_fmt) {
  2258. case PIX_FMT_YUV420P:
  2259. width >>= 1;
  2260. height >>= 1;
  2261. break;
  2262. case PIX_FMT_YUV422P:
  2263. width >>= 1;
  2264. break;
  2265. default:
  2266. break;
  2267. }
  2268. }
  2269. if (src == dst) {
  2270. deinterlace_bottom_field_inplace(src->data[i], src->linesize[i],
  2271. width, height);
  2272. } else {
  2273. deinterlace_bottom_field(dst->data[i],dst->linesize[i],
  2274. src->data[i], src->linesize[i],
  2275. width, height);
  2276. }
  2277. }
  2278. #ifdef HAVE_MMX
  2279. emms();
  2280. #endif
  2281. return 0;
  2282. }
  2283. #undef FIX