You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

2105 lines
56KB

  1. /*
  2. * Misc image convertion routines
  3. * Copyright (c) 2001, 2002, 2003 Fabrice Bellard.
  4. *
  5. * This library is free software; you can redistribute it and/or
  6. * modify it under the terms of the GNU Lesser General Public
  7. * License as published by the Free Software Foundation; either
  8. * version 2 of the License, or (at your option) any later version.
  9. *
  10. * This library is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  13. * Lesser General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU Lesser General Public
  16. * License along with this library; if not, write to the Free Software
  17. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  18. */
  19. /**
  20. * @file imgconvert.c
  21. * Misc image convertion routines.
  22. */
  23. /* TODO:
  24. * - write 'ffimg' program to test all the image related stuff
  25. * - move all api to slice based system
  26. * - integrate deinterlacing, postprocessing and scaling in the conversion process
  27. */
  28. #include "avcodec.h"
  29. #include "dsputil.h"
  30. #ifdef USE_FASTMEMCPY
  31. #include "fastmemcpy.h"
  32. #endif
  33. #ifdef HAVE_MMX
  34. #include "i386/mmx.h"
  35. #endif
  36. #define xglue(x, y) x ## y
  37. #define glue(x, y) xglue(x, y)
  38. #define FF_COLOR_RGB 0 /* RGB color space */
  39. #define FF_COLOR_GRAY 1 /* gray color space */
  40. #define FF_COLOR_YUV 2 /* YUV color space. 16 <= Y <= 235, 16 <= U, V <= 240 */
  41. #define FF_COLOR_YUV_JPEG 3 /* YUV color space. 0 <= Y <= 255, 0 <= U, V <= 255 */
  42. #define FF_PIXEL_PLANAR 0 /* each channel has one component in AVPicture */
  43. #define FF_PIXEL_PACKED 1 /* only one components containing all the channels */
  44. #define FF_PIXEL_PALETTE 2 /* one components containing indexes for a palette */
  45. typedef struct PixFmtInfo {
  46. const char *name;
  47. uint8_t nb_channels; /* number of channels (including alpha) */
  48. uint8_t color_type; /* color type (see FF_COLOR_xxx constants) */
  49. uint8_t pixel_type; /* pixel storage type (see FF_PIXEL_xxx constants) */
  50. uint8_t is_alpha : 1; /* true if alpha can be specified */
  51. uint8_t x_chroma_shift; /* X chroma subsampling factor is 2 ^ shift */
  52. uint8_t y_chroma_shift; /* Y chroma subsampling factor is 2 ^ shift */
  53. uint8_t depth; /* bit depth of the color components */
  54. } PixFmtInfo;
  55. /* this table gives more information about formats */
  56. static PixFmtInfo pix_fmt_info[PIX_FMT_NB] = {
  57. /* YUV formats */
  58. [PIX_FMT_YUV420P] = {
  59. .name = "yuv420p",
  60. .nb_channels = 3,
  61. .color_type = FF_COLOR_YUV,
  62. .pixel_type = FF_PIXEL_PLANAR,
  63. .depth = 8,
  64. .x_chroma_shift = 1, .y_chroma_shift = 1,
  65. },
  66. [PIX_FMT_YUV422P] = {
  67. .name = "yuv422p",
  68. .nb_channels = 3,
  69. .color_type = FF_COLOR_YUV,
  70. .pixel_type = FF_PIXEL_PLANAR,
  71. .depth = 8,
  72. .x_chroma_shift = 1, .y_chroma_shift = 0,
  73. },
  74. [PIX_FMT_YUV444P] = {
  75. .name = "yuv444p",
  76. .nb_channels = 3,
  77. .color_type = FF_COLOR_YUV,
  78. .pixel_type = FF_PIXEL_PLANAR,
  79. .depth = 8,
  80. .x_chroma_shift = 0, .y_chroma_shift = 0,
  81. },
  82. [PIX_FMT_YUV422] = {
  83. .name = "yuv422",
  84. .nb_channels = 1,
  85. .color_type = FF_COLOR_YUV,
  86. .pixel_type = FF_PIXEL_PACKED,
  87. .depth = 8,
  88. .x_chroma_shift = 1, .y_chroma_shift = 0,
  89. },
  90. [PIX_FMT_YUV410P] = {
  91. .name = "yuv410p",
  92. .nb_channels = 3,
  93. .color_type = FF_COLOR_YUV,
  94. .pixel_type = FF_PIXEL_PLANAR,
  95. .depth = 8,
  96. .x_chroma_shift = 2, .y_chroma_shift = 2,
  97. },
  98. [PIX_FMT_YUV411P] = {
  99. .name = "yuv411p",
  100. .nb_channels = 3,
  101. .color_type = FF_COLOR_YUV,
  102. .pixel_type = FF_PIXEL_PLANAR,
  103. .depth = 8,
  104. .x_chroma_shift = 2, .y_chroma_shift = 0,
  105. },
  106. /* JPEG YUV */
  107. [PIX_FMT_YUVJ420P] = {
  108. .name = "yuvj420p",
  109. .nb_channels = 3,
  110. .color_type = FF_COLOR_YUV_JPEG,
  111. .pixel_type = FF_PIXEL_PLANAR,
  112. .depth = 8,
  113. .x_chroma_shift = 1, .y_chroma_shift = 1,
  114. },
  115. [PIX_FMT_YUVJ422P] = {
  116. .name = "yuvj422p",
  117. .nb_channels = 3,
  118. .color_type = FF_COLOR_YUV_JPEG,
  119. .pixel_type = FF_PIXEL_PLANAR,
  120. .depth = 8,
  121. .x_chroma_shift = 1, .y_chroma_shift = 0,
  122. },
  123. [PIX_FMT_YUVJ444P] = {
  124. .name = "yuvj444p",
  125. .nb_channels = 3,
  126. .color_type = FF_COLOR_YUV_JPEG,
  127. .pixel_type = FF_PIXEL_PLANAR,
  128. .depth = 8,
  129. .x_chroma_shift = 0, .y_chroma_shift = 0,
  130. },
  131. /* RGB formats */
  132. [PIX_FMT_RGB24] = {
  133. .name = "rgb24",
  134. .nb_channels = 3,
  135. .color_type = FF_COLOR_RGB,
  136. .pixel_type = FF_PIXEL_PACKED,
  137. .depth = 8,
  138. },
  139. [PIX_FMT_BGR24] = {
  140. .name = "bgr24",
  141. .nb_channels = 3,
  142. .color_type = FF_COLOR_RGB,
  143. .pixel_type = FF_PIXEL_PACKED,
  144. .depth = 8,
  145. },
  146. [PIX_FMT_RGBA32] = {
  147. .name = "rgba32",
  148. .nb_channels = 4, .is_alpha = 1,
  149. .color_type = FF_COLOR_RGB,
  150. .pixel_type = FF_PIXEL_PACKED,
  151. .depth = 8,
  152. },
  153. [PIX_FMT_RGB565] = {
  154. .name = "rgb565",
  155. .nb_channels = 3,
  156. .color_type = FF_COLOR_RGB,
  157. .pixel_type = FF_PIXEL_PACKED,
  158. .depth = 5,
  159. },
  160. [PIX_FMT_RGB555] = {
  161. .name = "rgb555",
  162. .nb_channels = 4, .is_alpha = 1,
  163. .color_type = FF_COLOR_RGB,
  164. .pixel_type = FF_PIXEL_PACKED,
  165. .depth = 5,
  166. },
  167. /* gray / mono formats */
  168. [PIX_FMT_GRAY8] = {
  169. .name = "gray",
  170. .nb_channels = 1,
  171. .color_type = FF_COLOR_GRAY,
  172. .pixel_type = FF_PIXEL_PLANAR,
  173. .depth = 8,
  174. },
  175. [PIX_FMT_MONOWHITE] = {
  176. .name = "monow",
  177. .nb_channels = 1,
  178. .color_type = FF_COLOR_GRAY,
  179. .pixel_type = FF_PIXEL_PLANAR,
  180. .depth = 1,
  181. },
  182. [PIX_FMT_MONOBLACK] = {
  183. .name = "monob",
  184. .nb_channels = 1,
  185. .color_type = FF_COLOR_GRAY,
  186. .pixel_type = FF_PIXEL_PLANAR,
  187. .depth = 1,
  188. },
  189. /* paletted formats */
  190. [PIX_FMT_PAL8] = {
  191. .name = "pal8",
  192. .nb_channels = 4, .is_alpha = 1,
  193. .color_type = FF_COLOR_RGB,
  194. .pixel_type = FF_PIXEL_PALETTE,
  195. .depth = 8,
  196. },
  197. };
  198. void avcodec_get_chroma_sub_sample(int pix_fmt, int *h_shift, int *v_shift)
  199. {
  200. *h_shift = pix_fmt_info[pix_fmt].x_chroma_shift;
  201. *v_shift = pix_fmt_info[pix_fmt].y_chroma_shift;
  202. }
  203. const char *avcodec_get_pix_fmt_name(int pix_fmt)
  204. {
  205. if (pix_fmt < 0 || pix_fmt >= PIX_FMT_NB)
  206. return "???";
  207. else
  208. return pix_fmt_info[pix_fmt].name;
  209. }
  210. /* Picture field are filled with 'ptr' addresses. Also return size */
  211. int avpicture_fill(AVPicture *picture, uint8_t *ptr,
  212. int pix_fmt, int width, int height)
  213. {
  214. int size, w2, h2, size2;
  215. PixFmtInfo *pinfo;
  216. pinfo = &pix_fmt_info[pix_fmt];
  217. size = width * height;
  218. switch(pix_fmt) {
  219. case PIX_FMT_YUV420P:
  220. case PIX_FMT_YUV422P:
  221. case PIX_FMT_YUV444P:
  222. case PIX_FMT_YUV410P:
  223. case PIX_FMT_YUV411P:
  224. case PIX_FMT_YUVJ420P:
  225. case PIX_FMT_YUVJ422P:
  226. case PIX_FMT_YUVJ444P:
  227. w2 = (width + (1 << pinfo->x_chroma_shift) - 1) >> pinfo->x_chroma_shift;
  228. h2 = (height + (1 << pinfo->y_chroma_shift) - 1) >> pinfo->y_chroma_shift;
  229. size2 = w2 * h2;
  230. picture->data[0] = ptr;
  231. picture->data[1] = picture->data[0] + size;
  232. picture->data[2] = picture->data[1] + size2;
  233. picture->linesize[0] = width;
  234. picture->linesize[1] = w2;
  235. picture->linesize[2] = w2;
  236. return size + 2 * size2;
  237. case PIX_FMT_RGB24:
  238. case PIX_FMT_BGR24:
  239. picture->data[0] = ptr;
  240. picture->data[1] = NULL;
  241. picture->data[2] = NULL;
  242. picture->linesize[0] = width * 3;
  243. return size * 3;
  244. case PIX_FMT_RGBA32:
  245. picture->data[0] = ptr;
  246. picture->data[1] = NULL;
  247. picture->data[2] = NULL;
  248. picture->linesize[0] = width * 4;
  249. return size * 4;
  250. case PIX_FMT_RGB555:
  251. case PIX_FMT_RGB565:
  252. case PIX_FMT_YUV422:
  253. picture->data[0] = ptr;
  254. picture->data[1] = NULL;
  255. picture->data[2] = NULL;
  256. picture->linesize[0] = width * 2;
  257. return size * 2;
  258. case PIX_FMT_GRAY8:
  259. picture->data[0] = ptr;
  260. picture->data[1] = NULL;
  261. picture->data[2] = NULL;
  262. picture->linesize[0] = width;
  263. return size;
  264. case PIX_FMT_MONOWHITE:
  265. case PIX_FMT_MONOBLACK:
  266. picture->data[0] = ptr;
  267. picture->data[1] = NULL;
  268. picture->data[2] = NULL;
  269. picture->linesize[0] = (width + 7) >> 3;
  270. return picture->linesize[0] * height;
  271. case PIX_FMT_PAL8:
  272. size2 = (size + 3) & ~3;
  273. picture->data[0] = ptr;
  274. picture->data[1] = ptr + size2; /* palette is stored here as 256 32 bit words */
  275. picture->data[2] = NULL;
  276. picture->linesize[0] = width;
  277. picture->linesize[1] = 4;
  278. return size2 + 256 * 4;
  279. default:
  280. picture->data[0] = NULL;
  281. picture->data[1] = NULL;
  282. picture->data[2] = NULL;
  283. picture->data[3] = NULL;
  284. return -1;
  285. }
  286. }
  287. int avpicture_get_size(int pix_fmt, int width, int height)
  288. {
  289. AVPicture dummy_pict;
  290. return avpicture_fill(&dummy_pict, NULL, pix_fmt, width, height);
  291. }
  292. /**
  293. * compute the loss when converting from a pixel format to another
  294. */
  295. int avcodec_get_pix_fmt_loss(int dst_pix_fmt, int src_pix_fmt,
  296. int has_alpha)
  297. {
  298. const PixFmtInfo *pf, *ps;
  299. int loss;
  300. ps = &pix_fmt_info[src_pix_fmt];
  301. pf = &pix_fmt_info[dst_pix_fmt];
  302. /* compute loss */
  303. loss = 0;
  304. pf = &pix_fmt_info[dst_pix_fmt];
  305. if (pf->depth < ps->depth ||
  306. (dst_pix_fmt == PIX_FMT_RGB555 && src_pix_fmt == PIX_FMT_RGB565))
  307. loss |= FF_LOSS_DEPTH;
  308. if (pf->x_chroma_shift > ps->x_chroma_shift ||
  309. pf->y_chroma_shift > ps->y_chroma_shift)
  310. loss |= FF_LOSS_RESOLUTION;
  311. switch(pf->color_type) {
  312. case FF_COLOR_RGB:
  313. if (ps->color_type != FF_COLOR_RGB &&
  314. ps->color_type != FF_COLOR_GRAY)
  315. loss |= FF_LOSS_COLORSPACE;
  316. break;
  317. case FF_COLOR_GRAY:
  318. if (ps->color_type != FF_COLOR_GRAY)
  319. loss |= FF_LOSS_COLORSPACE;
  320. break;
  321. case FF_COLOR_YUV:
  322. if (ps->color_type != FF_COLOR_YUV)
  323. loss |= FF_LOSS_COLORSPACE;
  324. break;
  325. case FF_COLOR_YUV_JPEG:
  326. if (ps->color_type != FF_COLOR_YUV_JPEG &&
  327. ps->color_type != FF_COLOR_YUV &&
  328. ps->color_type != FF_COLOR_GRAY)
  329. loss |= FF_LOSS_COLORSPACE;
  330. break;
  331. default:
  332. /* fail safe test */
  333. if (ps->color_type != pf->color_type)
  334. loss |= FF_LOSS_COLORSPACE;
  335. break;
  336. }
  337. if (pf->color_type == FF_COLOR_GRAY &&
  338. ps->color_type != FF_COLOR_GRAY)
  339. loss |= FF_LOSS_CHROMA;
  340. if (!pf->is_alpha && (ps->is_alpha && has_alpha))
  341. loss |= FF_LOSS_ALPHA;
  342. if (pf->pixel_type == FF_PIXEL_PALETTE &&
  343. (ps->pixel_type != FF_PIXEL_PALETTE && ps->color_type != FF_COLOR_GRAY))
  344. loss |= FF_LOSS_COLORQUANT;
  345. return loss;
  346. }
  347. static int avg_bits_per_pixel(int pix_fmt)
  348. {
  349. int bits;
  350. const PixFmtInfo *pf;
  351. pf = &pix_fmt_info[pix_fmt];
  352. switch(pf->pixel_type) {
  353. case FF_PIXEL_PACKED:
  354. switch(pix_fmt) {
  355. case PIX_FMT_YUV422:
  356. case PIX_FMT_RGB565:
  357. case PIX_FMT_RGB555:
  358. bits = 16;
  359. break;
  360. default:
  361. bits = pf->depth * pf->nb_channels;
  362. break;
  363. }
  364. break;
  365. case FF_PIXEL_PLANAR:
  366. if (pf->x_chroma_shift == 0 && pf->y_chroma_shift == 0) {
  367. bits = pf->depth * pf->nb_channels;
  368. } else {
  369. bits = pf->depth + ((2 * pf->depth) >>
  370. (pf->x_chroma_shift + pf->y_chroma_shift));
  371. }
  372. break;
  373. case FF_PIXEL_PALETTE:
  374. bits = 8;
  375. break;
  376. default:
  377. bits = -1;
  378. break;
  379. }
  380. return bits;
  381. }
  382. static int avcodec_find_best_pix_fmt1(int pix_fmt_mask,
  383. int src_pix_fmt,
  384. int has_alpha,
  385. int loss_mask)
  386. {
  387. int dist, i, loss, min_dist, dst_pix_fmt;
  388. /* find exact color match with smallest size */
  389. dst_pix_fmt = -1;
  390. min_dist = 0x7fffffff;
  391. for(i = 0;i < PIX_FMT_NB; i++) {
  392. if (pix_fmt_mask & (1 << i)) {
  393. loss = avcodec_get_pix_fmt_loss(i, src_pix_fmt, has_alpha) & loss_mask;
  394. if (loss == 0) {
  395. dist = avg_bits_per_pixel(i);
  396. if (dist < min_dist) {
  397. min_dist = dist;
  398. dst_pix_fmt = i;
  399. }
  400. }
  401. }
  402. }
  403. return dst_pix_fmt;
  404. }
  405. /**
  406. * find best pixel format to convert to. Return -1 if none found
  407. */
  408. int avcodec_find_best_pix_fmt(int pix_fmt_mask, int src_pix_fmt,
  409. int has_alpha, int *loss_ptr)
  410. {
  411. int dst_pix_fmt, loss_mask, i;
  412. static const int loss_mask_order[] = {
  413. ~0, /* no loss first */
  414. ~FF_LOSS_ALPHA,
  415. ~FF_LOSS_RESOLUTION,
  416. ~(FF_LOSS_COLORSPACE | FF_LOSS_RESOLUTION),
  417. ~FF_LOSS_COLORQUANT,
  418. ~FF_LOSS_DEPTH,
  419. 0,
  420. };
  421. /* try with successive loss */
  422. i = 0;
  423. for(;;) {
  424. loss_mask = loss_mask_order[i++];
  425. dst_pix_fmt = avcodec_find_best_pix_fmt1(pix_fmt_mask, src_pix_fmt,
  426. has_alpha, loss_mask);
  427. if (dst_pix_fmt >= 0)
  428. goto found;
  429. if (loss_mask == 0)
  430. break;
  431. }
  432. return -1;
  433. found:
  434. if (loss_ptr)
  435. *loss_ptr = avcodec_get_pix_fmt_loss(dst_pix_fmt, src_pix_fmt, has_alpha);
  436. return dst_pix_fmt;
  437. }
  438. static void img_copy_plane(uint8_t *dst, int dst_wrap,
  439. const uint8_t *src, int src_wrap,
  440. int width, int height)
  441. {
  442. for(;height > 0; height--) {
  443. memcpy(dst, src, width);
  444. dst += dst_wrap;
  445. src += src_wrap;
  446. }
  447. }
  448. /**
  449. * Copy image 'src' to 'dst'.
  450. */
  451. void img_copy(AVPicture *dst, AVPicture *src,
  452. int pix_fmt, int width, int height)
  453. {
  454. int bwidth, bits, i;
  455. PixFmtInfo *pf = &pix_fmt_info[pix_fmt];
  456. pf = &pix_fmt_info[pix_fmt];
  457. switch(pf->pixel_type) {
  458. case FF_PIXEL_PACKED:
  459. switch(pix_fmt) {
  460. case PIX_FMT_YUV422:
  461. case PIX_FMT_RGB565:
  462. case PIX_FMT_RGB555:
  463. bits = 16;
  464. break;
  465. default:
  466. bits = pf->depth * pf->nb_channels;
  467. break;
  468. }
  469. bwidth = (width * bits + 7) >> 3;
  470. img_copy_plane(dst->data[0], dst->linesize[0],
  471. src->data[0], src->linesize[0],
  472. bwidth, height);
  473. break;
  474. case FF_PIXEL_PLANAR:
  475. for(i = 0; i < pf->nb_channels; i++) {
  476. int w, h;
  477. w = width;
  478. h = height;
  479. if (i == 1 || i == 2) {
  480. w >>= pf->x_chroma_shift;
  481. h >>= pf->y_chroma_shift;
  482. }
  483. bwidth = (w * pf->depth + 7) >> 3;
  484. img_copy_plane(dst->data[i], dst->linesize[i],
  485. src->data[i], src->linesize[i],
  486. bwidth, h);
  487. }
  488. break;
  489. case FF_PIXEL_PALETTE:
  490. img_copy_plane(dst->data[0], dst->linesize[0],
  491. src->data[0], src->linesize[0],
  492. width, height);
  493. /* copy the palette */
  494. img_copy_plane(dst->data[1], dst->linesize[1],
  495. src->data[1], src->linesize[1],
  496. 4, 256);
  497. break;
  498. }
  499. }
  500. /* XXX: totally non optimized */
  501. static void yuv422_to_yuv420p(AVPicture *dst, AVPicture *src,
  502. int width, int height)
  503. {
  504. const uint8_t *p, *p1;
  505. uint8_t *lum, *cr, *cb, *lum1, *cr1, *cb1;
  506. int x;
  507. p1 = src->data[0];
  508. lum1 = dst->data[0];
  509. cb1 = dst->data[1];
  510. cr1 = dst->data[2];
  511. for(;height >= 2; height -= 2) {
  512. p = p1;
  513. lum = lum1;
  514. cb = cb1;
  515. cr = cr1;
  516. for(x=0;x<width;x+=2) {
  517. lum[0] = p[0];
  518. cb[0] = p[1];
  519. lum[1] = p[2];
  520. cr[0] = p[3];
  521. p += 4;
  522. lum += 2;
  523. cb++;
  524. cr++;
  525. }
  526. p1 += src->linesize[0];
  527. lum1 += dst->linesize[0];
  528. p = p1;
  529. lum = lum1;
  530. for(x=0;x<width;x+=2) {
  531. lum[0] = p[0];
  532. lum[1] = p[2];
  533. p += 4;
  534. lum += 2;
  535. }
  536. p1 += src->linesize[0];
  537. lum1 += dst->linesize[0];
  538. cb1 += dst->linesize[1];
  539. cr1 += dst->linesize[2];
  540. }
  541. }
  542. static void yuv422_to_yuv422p(AVPicture *dst, AVPicture *src,
  543. int width, int height)
  544. {
  545. const uint8_t *p, *p1;
  546. uint8_t *lum, *cr, *cb, *lum1, *cr1, *cb1;
  547. int w;
  548. p1 = src->data[0];
  549. lum1 = dst->data[0];
  550. cb1 = dst->data[1];
  551. cr1 = dst->data[2];
  552. for(;height > 0; height--) {
  553. p = p1;
  554. lum = lum1;
  555. cb = cb1;
  556. cr = cr1;
  557. for(w = width; w >= 2; w -= 2) {
  558. lum[0] = p[0];
  559. cb[0] = p[1];
  560. lum[1] = p[2];
  561. cr[0] = p[3];
  562. p += 4;
  563. lum += 2;
  564. cb++;
  565. cr++;
  566. }
  567. p1 += src->linesize[0];
  568. lum1 += dst->linesize[0];
  569. cb1 += dst->linesize[1];
  570. cr1 += dst->linesize[2];
  571. }
  572. }
  573. static void yuv422p_to_yuv422(AVPicture *dst, AVPicture *src,
  574. int width, int height)
  575. {
  576. uint8_t *p, *p1;
  577. const uint8_t *lum, *cr, *cb, *lum1, *cr1, *cb1;
  578. int w;
  579. p1 = dst->data[0];
  580. lum1 = src->data[0];
  581. cb1 = src->data[1];
  582. cr1 = src->data[2];
  583. for(;height > 0; height--) {
  584. p = p1;
  585. lum = lum1;
  586. cb = cb1;
  587. cr = cr1;
  588. for(w = width; w >= 2; w -= 2) {
  589. p[0] = lum[0];
  590. p[1] = cb[0];
  591. p[2] = lum[1];
  592. p[3] = cr[0];
  593. p += 4;
  594. lum += 2;
  595. cb++;
  596. cr++;
  597. }
  598. p1 += dst->linesize[0];
  599. lum1 += src->linesize[0];
  600. cb1 += src->linesize[1];
  601. cr1 += src->linesize[2];
  602. }
  603. }
  604. #define SCALEBITS 10
  605. #define ONE_HALF (1 << (SCALEBITS - 1))
  606. #define FIX(x) ((int) ((x) * (1<<SCALEBITS) + 0.5))
  607. #define YUV_TO_RGB1_CCIR(cb1, cr1)\
  608. {\
  609. cb = (cb1) - 128;\
  610. cr = (cr1) - 128;\
  611. r_add = FIX(1.40200*255.0/224.0) * cr + ONE_HALF;\
  612. g_add = - FIX(0.34414*255.0/224.0) * cb - FIX(0.71414*255.0/224.0) * cr + \
  613. ONE_HALF;\
  614. b_add = FIX(1.77200*255.0/224.0) * cb + ONE_HALF;\
  615. }
  616. #define YUV_TO_RGB2_CCIR(r, g, b, y1)\
  617. {\
  618. y = ((y1) - 16) * FIX(255.0/219.0);\
  619. r = cm[(y + r_add) >> SCALEBITS];\
  620. g = cm[(y + g_add) >> SCALEBITS];\
  621. b = cm[(y + b_add) >> SCALEBITS];\
  622. }
  623. #define YUV_TO_RGB1(cb1, cr1)\
  624. {\
  625. cb = (cb1) - 128;\
  626. cr = (cr1) - 128;\
  627. r_add = FIX(1.40200) * cr + ONE_HALF;\
  628. g_add = - FIX(0.34414) * cb - FIX(0.71414) * cr + ONE_HALF;\
  629. b_add = FIX(1.77200) * cb + ONE_HALF;\
  630. }
  631. #define YUV_TO_RGB2(r, g, b, y1)\
  632. {\
  633. y = (y1) << SCALEBITS;\
  634. r = cm[(y + r_add) >> SCALEBITS];\
  635. g = cm[(y + g_add) >> SCALEBITS];\
  636. b = cm[(y + b_add) >> SCALEBITS];\
  637. }
  638. #define Y_CCIR_TO_JPEG(y)\
  639. cm[((y) * FIX(255.0/219.0) + (ONE_HALF - 16 * FIX(255.0/219.0))) >> SCALEBITS]
  640. #define Y_JPEG_TO_CCIR(y)\
  641. (((y) * FIX(219.0/255.0) + (ONE_HALF + (16 << SCALEBITS))) >> SCALEBITS)
  642. #define C_CCIR_TO_JPEG(y)\
  643. cm[(((y) - 128) * FIX(127.0/112.0) + (ONE_HALF + (128 << SCALEBITS))) >> SCALEBITS]
  644. /* NOTE: the clamp is really necessary! */
  645. #define C_JPEG_TO_CCIR(y)\
  646. ({\
  647. int __y;\
  648. __y = ((((y) - 128) * FIX(112.0/127.0) + (ONE_HALF + (128 << SCALEBITS))) >> SCALEBITS);\
  649. if (__y < 16)\
  650. __y = 16;\
  651. __y;\
  652. })
  653. #define RGB_TO_Y(r, g, b) \
  654. ((FIX(0.29900) * (r) + FIX(0.58700) * (g) + \
  655. FIX(0.11400) * (b) + ONE_HALF) >> SCALEBITS)
  656. #define RGB_TO_U(r1, g1, b1, shift)\
  657. (((- FIX(0.16874) * r1 - FIX(0.33126) * g1 + \
  658. FIX(0.50000) * b1 + (ONE_HALF << shift) - 1) >> (SCALEBITS + shift)) + 128)
  659. #define RGB_TO_V(r1, g1, b1, shift)\
  660. (((FIX(0.50000) * r1 - FIX(0.41869) * g1 - \
  661. FIX(0.08131) * b1 + (ONE_HALF << shift) - 1) >> (SCALEBITS + shift)) + 128)
  662. #define RGB_TO_Y_CCIR(r, g, b) \
  663. ((FIX(0.29900*219.0/255.0) * (r) + FIX(0.58700*219.0/255.0) * (g) + \
  664. FIX(0.11400*219.0/255.0) * (b) + (ONE_HALF + (16 << SCALEBITS))) >> SCALEBITS)
  665. #define RGB_TO_U_CCIR(r1, g1, b1, shift)\
  666. (((- FIX(0.16874*224.0/255.0) * r1 - FIX(0.33126*224.0/255.0) * g1 + \
  667. FIX(0.50000*224.0/255.0) * b1 + (ONE_HALF << shift) - 1) >> (SCALEBITS + shift)) + 128)
  668. #define RGB_TO_V_CCIR(r1, g1, b1, shift)\
  669. (((FIX(0.50000*224.0/255.0) * r1 - FIX(0.41869*224.0/255.0) * g1 - \
  670. FIX(0.08131*224.0/255.0) * b1 + (ONE_HALF << shift) - 1) >> (SCALEBITS + shift)) + 128)
  671. static uint8_t y_ccir_to_jpeg[256];
  672. static uint8_t y_jpeg_to_ccir[256];
  673. static uint8_t c_ccir_to_jpeg[256];
  674. static uint8_t c_jpeg_to_ccir[256];
  675. /* init various conversion tables */
  676. static void img_convert_init(void)
  677. {
  678. int i;
  679. uint8_t *cm = cropTbl + MAX_NEG_CROP;
  680. for(i = 0;i < 256; i++) {
  681. y_ccir_to_jpeg[i] = Y_CCIR_TO_JPEG(i);
  682. y_jpeg_to_ccir[i] = Y_JPEG_TO_CCIR(i);
  683. c_ccir_to_jpeg[i] = C_CCIR_TO_JPEG(i);
  684. c_jpeg_to_ccir[i] = C_JPEG_TO_CCIR(i);
  685. }
  686. }
  687. /* apply to each pixel the given table */
  688. static void img_apply_table(uint8_t *dst, int dst_wrap,
  689. const uint8_t *src, int src_wrap,
  690. int width, int height, const uint8_t *table1)
  691. {
  692. int n;
  693. const uint8_t *s;
  694. uint8_t *d;
  695. const uint8_t *table;
  696. table = table1;
  697. for(;height > 0; height--) {
  698. s = src;
  699. d = dst;
  700. n = width;
  701. while (n >= 4) {
  702. d[0] = table[s[0]];
  703. d[1] = table[s[1]];
  704. d[2] = table[s[2]];
  705. d[3] = table[s[3]];
  706. d += 4;
  707. s += 4;
  708. n -= 4;
  709. }
  710. while (n > 0) {
  711. d[0] = table[s[0]];
  712. d++;
  713. s++;
  714. n--;
  715. }
  716. dst += dst_wrap;
  717. src += src_wrap;
  718. }
  719. }
  720. /* XXX: use generic filter ? */
  721. /* XXX: in most cases, the sampling position is incorrect */
  722. /* 4x1 -> 1x1 */
  723. static void shrink41(uint8_t *dst, int dst_wrap,
  724. const uint8_t *src, int src_wrap,
  725. int width, int height)
  726. {
  727. int w;
  728. const uint8_t *s;
  729. uint8_t *d;
  730. for(;height > 0; height--) {
  731. s = src;
  732. d = dst;
  733. for(w = width;w > 0; w--) {
  734. d[0] = (s[0] + s[1] + s[2] + s[3] + 2) >> 2;
  735. s += 4;
  736. d++;
  737. }
  738. src += src_wrap;
  739. dst += dst_wrap;
  740. }
  741. }
  742. /* 2x1 -> 1x1 */
  743. static void shrink21(uint8_t *dst, int dst_wrap,
  744. const uint8_t *src, int src_wrap,
  745. int width, int height)
  746. {
  747. int w;
  748. const uint8_t *s;
  749. uint8_t *d;
  750. for(;height > 0; height--) {
  751. s = src;
  752. d = dst;
  753. for(w = width;w > 0; w--) {
  754. d[0] = (s[0] + s[1]) >> 1;
  755. s += 2;
  756. d++;
  757. }
  758. src += src_wrap;
  759. dst += dst_wrap;
  760. }
  761. }
  762. /* 1x2 -> 1x1 */
  763. static void shrink12(uint8_t *dst, int dst_wrap,
  764. const uint8_t *src, int src_wrap,
  765. int width, int height)
  766. {
  767. int w;
  768. uint8_t *d;
  769. const uint8_t *s1, *s2;
  770. for(;height > 0; height--) {
  771. s1 = src;
  772. s2 = s1 + src_wrap;
  773. d = dst;
  774. for(w = width;w >= 4; w-=4) {
  775. d[0] = (s1[0] + s2[0]) >> 1;
  776. d[1] = (s1[1] + s2[1]) >> 1;
  777. d[2] = (s1[2] + s2[2]) >> 1;
  778. d[3] = (s1[3] + s2[3]) >> 1;
  779. s1 += 4;
  780. s2 += 4;
  781. d += 4;
  782. }
  783. for(;w > 0; w--) {
  784. d[0] = (s1[0] + s2[0]) >> 1;
  785. s1++;
  786. s2++;
  787. d++;
  788. }
  789. src += 2 * src_wrap;
  790. dst += dst_wrap;
  791. }
  792. }
  793. /* 2x2 -> 1x1 */
  794. static void shrink22(uint8_t *dst, int dst_wrap,
  795. const uint8_t *src, int src_wrap,
  796. int width, int height)
  797. {
  798. int w;
  799. const uint8_t *s1, *s2;
  800. uint8_t *d;
  801. for(;height > 0; height--) {
  802. s1 = src;
  803. s2 = s1 + src_wrap;
  804. d = dst;
  805. for(w = width;w >= 4; w-=4) {
  806. d[0] = (s1[0] + s1[1] + s2[0] + s2[1] + 2) >> 2;
  807. d[1] = (s1[2] + s1[3] + s2[2] + s2[3] + 2) >> 2;
  808. d[2] = (s1[4] + s1[5] + s2[4] + s2[5] + 2) >> 2;
  809. d[3] = (s1[6] + s1[7] + s2[6] + s2[7] + 2) >> 2;
  810. s1 += 8;
  811. s2 += 8;
  812. d += 4;
  813. }
  814. for(;w > 0; w--) {
  815. d[0] = (s1[0] + s1[1] + s2[0] + s2[1] + 2) >> 2;
  816. s1 += 2;
  817. s2 += 2;
  818. d++;
  819. }
  820. src += 2 * src_wrap;
  821. dst += dst_wrap;
  822. }
  823. }
  824. /* 4x4 -> 1x1 */
  825. static void shrink44(uint8_t *dst, int dst_wrap,
  826. const uint8_t *src, int src_wrap,
  827. int width, int height)
  828. {
  829. int w;
  830. const uint8_t *s1, *s2, *s3, *s4;
  831. uint8_t *d;
  832. for(;height > 0; height--) {
  833. s1 = src;
  834. s2 = s1 + src_wrap;
  835. s3 = s2 + src_wrap;
  836. s4 = s3 + src_wrap;
  837. d = dst;
  838. for(w = width;w > 0; w--) {
  839. d[0] = (s1[0] + s1[1] + s1[2] + s1[3] +
  840. s2[0] + s2[1] + s2[2] + s2[3] +
  841. s3[0] + s3[1] + s3[2] + s3[3] +
  842. s4[0] + s4[1] + s4[2] + s4[3] + 8) >> 4;
  843. s1 += 4;
  844. s2 += 4;
  845. s3 += 4;
  846. s4 += 4;
  847. d++;
  848. }
  849. src += 4 * src_wrap;
  850. dst += dst_wrap;
  851. }
  852. }
  853. static void grow21_line(uint8_t *dst, const uint8_t *src,
  854. int width)
  855. {
  856. int w;
  857. const uint8_t *s1;
  858. uint8_t *d;
  859. s1 = src;
  860. d = dst;
  861. for(w = width;w >= 4; w-=4) {
  862. d[1] = d[0] = s1[0];
  863. d[3] = d[2] = s1[1];
  864. s1 += 2;
  865. d += 4;
  866. }
  867. for(;w >= 2; w -= 2) {
  868. d[1] = d[0] = s1[0];
  869. s1 ++;
  870. d += 2;
  871. }
  872. /* only needed if width is not a multiple of two */
  873. /* XXX: veryfy that */
  874. if (w) {
  875. d[0] = s1[0];
  876. }
  877. }
  878. static void grow41_line(uint8_t *dst, const uint8_t *src,
  879. int width)
  880. {
  881. int w, v;
  882. const uint8_t *s1;
  883. uint8_t *d;
  884. s1 = src;
  885. d = dst;
  886. for(w = width;w >= 4; w-=4) {
  887. v = s1[0];
  888. d[0] = v;
  889. d[1] = v;
  890. d[2] = v;
  891. d[3] = v;
  892. s1 ++;
  893. d += 4;
  894. }
  895. }
  896. /* 1x1 -> 2x1 */
  897. static void grow21(uint8_t *dst, int dst_wrap,
  898. const uint8_t *src, int src_wrap,
  899. int width, int height)
  900. {
  901. for(;height > 0; height--) {
  902. grow21_line(dst, src, width);
  903. src += src_wrap;
  904. dst += dst_wrap;
  905. }
  906. }
  907. /* 1x1 -> 2x2 */
  908. static void grow22(uint8_t *dst, int dst_wrap,
  909. const uint8_t *src, int src_wrap,
  910. int width, int height)
  911. {
  912. for(;height > 0; height--) {
  913. grow21_line(dst, src, width);
  914. if (height%2)
  915. src += src_wrap;
  916. dst += dst_wrap;
  917. }
  918. }
  919. /* 1x1 -> 4x1 */
  920. static void grow41(uint8_t *dst, int dst_wrap,
  921. const uint8_t *src, int src_wrap,
  922. int width, int height)
  923. {
  924. for(;height > 0; height--) {
  925. grow41_line(dst, src, width);
  926. src += src_wrap;
  927. dst += dst_wrap;
  928. }
  929. }
  930. /* 1x1 -> 4x4 */
  931. static void grow44(uint8_t *dst, int dst_wrap,
  932. const uint8_t *src, int src_wrap,
  933. int width, int height)
  934. {
  935. for(;height > 0; height--) {
  936. grow41_line(dst, src, width);
  937. if ((height & 3) == 1)
  938. src += src_wrap;
  939. dst += dst_wrap;
  940. }
  941. }
  942. /* 1x2 -> 2x1 */
  943. static void conv411(uint8_t *dst, int dst_wrap,
  944. const uint8_t *src, int src_wrap,
  945. int width, int height)
  946. {
  947. int w, c;
  948. const uint8_t *s1, *s2;
  949. uint8_t *d;
  950. width>>=1;
  951. for(;height > 0; height--) {
  952. s1 = src;
  953. s2 = src + src_wrap;
  954. d = dst;
  955. for(w = width;w > 0; w--) {
  956. c = (s1[0] + s2[0]) >> 1;
  957. d[0] = c;
  958. d[1] = c;
  959. s1++;
  960. s2++;
  961. d += 2;
  962. }
  963. src += src_wrap * 2;
  964. dst += dst_wrap;
  965. }
  966. }
  967. /* XXX: add jpeg quantize code */
  968. #define TRANSP_INDEX (6*6*6)
  969. /* this is maybe slow, but allows for extensions */
  970. static inline unsigned char gif_clut_index(uint8_t r, uint8_t g, uint8_t b)
  971. {
  972. return ((((r)/47)%6)*6*6+(((g)/47)%6)*6+(((b)/47)%6));
  973. }
  974. static void build_rgb_palette(uint8_t *palette, int has_alpha)
  975. {
  976. uint32_t *pal;
  977. static const uint8_t pal_value[6] = { 0x00, 0x33, 0x66, 0x99, 0xcc, 0xff };
  978. int i, r, g, b;
  979. pal = (uint32_t *)palette;
  980. i = 0;
  981. for(r = 0; r < 6; r++) {
  982. for(g = 0; g < 6; g++) {
  983. for(b = 0; b < 6; b++) {
  984. pal[i++] = (0xff << 24) | (pal_value[r] << 16) |
  985. (pal_value[g] << 8) | pal_value[b];
  986. }
  987. }
  988. }
  989. if (has_alpha)
  990. pal[i++] = 0;
  991. while (i < 256)
  992. pal[i++] = 0xff000000;
  993. }
  994. /* copy bit n to bits 0 ... n - 1 */
  995. static inline unsigned int bitcopy_n(unsigned int a, int n)
  996. {
  997. int mask;
  998. mask = (1 << n) - 1;
  999. return (a & (0xff & ~mask)) | ((-((a >> n) & 1)) & mask);
  1000. }
  1001. /* rgb555 handling */
  1002. #define RGB_NAME rgb555
  1003. #define RGB_IN(r, g, b, s)\
  1004. {\
  1005. unsigned int v = ((const uint16_t *)(s))[0];\
  1006. r = bitcopy_n(v >> (10 - 3), 3);\
  1007. g = bitcopy_n(v >> (5 - 3), 3);\
  1008. b = bitcopy_n(v << 3, 3);\
  1009. }
  1010. #define RGBA_IN(r, g, b, a, s)\
  1011. {\
  1012. unsigned int v = ((const uint16_t *)(s))[0];\
  1013. r = bitcopy_n(v >> (10 - 3), 3);\
  1014. g = bitcopy_n(v >> (5 - 3), 3);\
  1015. b = bitcopy_n(v << 3, 3);\
  1016. a = (-(v >> 15)) & 0xff;\
  1017. }
  1018. #define RGBA_OUT(d, r, g, b, a)\
  1019. {\
  1020. ((uint16_t *)(d))[0] = ((r >> 3) << 10) | ((g >> 3) << 5) | (b >> 3) | \
  1021. ((a << 8) & 0x8000);\
  1022. }
  1023. #define BPP 2
  1024. #include "imgconvert_template.h"
  1025. /* rgb565 handling */
  1026. #define RGB_NAME rgb565
  1027. #define RGB_IN(r, g, b, s)\
  1028. {\
  1029. unsigned int v = ((const uint16_t *)(s))[0];\
  1030. r = bitcopy_n(v >> (11 - 3), 3);\
  1031. g = bitcopy_n(v >> (5 - 2), 2);\
  1032. b = bitcopy_n(v << 3, 3);\
  1033. }
  1034. #define RGB_OUT(d, r, g, b)\
  1035. {\
  1036. ((uint16_t *)(d))[0] = ((r >> 3) << 11) | ((g >> 2) << 5) | (b >> 3);\
  1037. }
  1038. #define BPP 2
  1039. #include "imgconvert_template.h"
  1040. /* bgr24 handling */
  1041. #define RGB_NAME bgr24
  1042. #define RGB_IN(r, g, b, s)\
  1043. {\
  1044. b = (s)[0];\
  1045. g = (s)[1];\
  1046. r = (s)[2];\
  1047. }
  1048. #define RGB_OUT(d, r, g, b)\
  1049. {\
  1050. (d)[0] = b;\
  1051. (d)[1] = g;\
  1052. (d)[2] = r;\
  1053. }
  1054. #define BPP 3
  1055. #include "imgconvert_template.h"
  1056. #undef RGB_IN
  1057. #undef RGB_OUT
  1058. #undef BPP
  1059. /* rgb24 handling */
  1060. #define RGB_NAME rgb24
  1061. #define FMT_RGB24
  1062. #define RGB_IN(r, g, b, s)\
  1063. {\
  1064. r = (s)[0];\
  1065. g = (s)[1];\
  1066. b = (s)[2];\
  1067. }
  1068. #define RGB_OUT(d, r, g, b)\
  1069. {\
  1070. (d)[0] = r;\
  1071. (d)[1] = g;\
  1072. (d)[2] = b;\
  1073. }
  1074. #define BPP 3
  1075. #include "imgconvert_template.h"
  1076. /* rgba32 handling */
  1077. #define RGB_NAME rgba32
  1078. #define FMT_RGBA32
  1079. #define RGB_IN(r, g, b, s)\
  1080. {\
  1081. unsigned int v = ((const uint32_t *)(s))[0];\
  1082. r = (v >> 16) & 0xff;\
  1083. g = (v >> 8) & 0xff;\
  1084. b = v & 0xff;\
  1085. }
  1086. #define RGBA_IN(r, g, b, a, s)\
  1087. {\
  1088. unsigned int v = ((const uint32_t *)(s))[0];\
  1089. a = (v >> 24) & 0xff;\
  1090. r = (v >> 16) & 0xff;\
  1091. g = (v >> 8) & 0xff;\
  1092. b = v & 0xff;\
  1093. }
  1094. #define RGBA_OUT(d, r, g, b, a)\
  1095. {\
  1096. ((uint32_t *)(d))[0] = (a << 24) | (r << 16) | (g << 8) | b;\
  1097. }
  1098. #define BPP 4
  1099. #include "imgconvert_template.h"
  1100. static void mono_to_gray(AVPicture *dst, AVPicture *src,
  1101. int width, int height, int xor_mask)
  1102. {
  1103. const unsigned char *p;
  1104. unsigned char *q;
  1105. int v, dst_wrap, src_wrap;
  1106. int y, w;
  1107. p = src->data[0];
  1108. src_wrap = src->linesize[0] - ((width + 7) >> 3);
  1109. q = dst->data[0];
  1110. dst_wrap = dst->linesize[0] - width;
  1111. for(y=0;y<height;y++) {
  1112. w = width;
  1113. while (w >= 8) {
  1114. v = *p++ ^ xor_mask;
  1115. q[0] = -(v >> 7);
  1116. q[1] = -((v >> 6) & 1);
  1117. q[2] = -((v >> 5) & 1);
  1118. q[3] = -((v >> 4) & 1);
  1119. q[4] = -((v >> 3) & 1);
  1120. q[5] = -((v >> 2) & 1);
  1121. q[6] = -((v >> 1) & 1);
  1122. q[7] = -((v >> 0) & 1);
  1123. w -= 8;
  1124. q += 8;
  1125. }
  1126. if (w > 0) {
  1127. v = *p++ ^ xor_mask;
  1128. do {
  1129. q[0] = -((v >> 7) & 1);
  1130. q++;
  1131. v <<= 1;
  1132. } while (--w);
  1133. }
  1134. p += src_wrap;
  1135. q += dst_wrap;
  1136. }
  1137. }
  1138. static void monowhite_to_gray(AVPicture *dst, AVPicture *src,
  1139. int width, int height)
  1140. {
  1141. mono_to_gray(dst, src, width, height, 0xff);
  1142. }
  1143. static void monoblack_to_gray(AVPicture *dst, AVPicture *src,
  1144. int width, int height)
  1145. {
  1146. mono_to_gray(dst, src, width, height, 0x00);
  1147. }
  1148. static void gray_to_mono(AVPicture *dst, AVPicture *src,
  1149. int width, int height, int xor_mask)
  1150. {
  1151. int n;
  1152. const uint8_t *s;
  1153. uint8_t *d;
  1154. int j, b, v, n1, src_wrap, dst_wrap, y;
  1155. s = src->data[0];
  1156. src_wrap = src->linesize[0] - width;
  1157. d = dst->data[0];
  1158. dst_wrap = dst->linesize[0] - ((width + 7) >> 3);
  1159. for(y=0;y<height;y++) {
  1160. n = width;
  1161. while (n >= 8) {
  1162. v = 0;
  1163. for(j=0;j<8;j++) {
  1164. b = s[0];
  1165. s++;
  1166. v = (v << 1) | (b >> 7);
  1167. }
  1168. d[0] = v ^ xor_mask;
  1169. d++;
  1170. n -= 8;
  1171. }
  1172. if (n > 0) {
  1173. n1 = n;
  1174. v = 0;
  1175. while (n > 0) {
  1176. b = s[0];
  1177. s++;
  1178. v = (v << 1) | (b >> 7);
  1179. n--;
  1180. }
  1181. d[0] = (v << (8 - (n1 & 7))) ^ xor_mask;
  1182. d++;
  1183. }
  1184. s += src_wrap;
  1185. d += dst_wrap;
  1186. }
  1187. }
  1188. static void gray_to_monowhite(AVPicture *dst, AVPicture *src,
  1189. int width, int height)
  1190. {
  1191. gray_to_mono(dst, src, width, height, 0xff);
  1192. }
  1193. static void gray_to_monoblack(AVPicture *dst, AVPicture *src,
  1194. int width, int height)
  1195. {
  1196. gray_to_mono(dst, src, width, height, 0x00);
  1197. }
  1198. typedef struct ConvertEntry {
  1199. void (*convert)(AVPicture *dst, AVPicture *src, int width, int height);
  1200. } ConvertEntry;
  1201. /* Add each new convertion function in this table. In order to be able
  1202. to convert from any format to any format, the following constraints
  1203. must be satisfied:
  1204. - all FF_COLOR_RGB formats must convert to and from PIX_FMT_RGB24
  1205. - all FF_COLOR_GRAY formats must convert to and from PIX_FMT_GRAY8
  1206. - all FF_COLOR_RGB formats with alpha must convert to and from PIX_FMT_RGBA32
  1207. - PIX_FMT_YUV444P and PIX_FMT_YUVJ444P must convert to and from
  1208. PIX_FMT_RGB24.
  1209. - PIX_FMT_422 must convert to and from PIX_FMT_422P.
  1210. The other conversion functions are just optimisations for common cases.
  1211. */
  1212. static ConvertEntry convert_table[PIX_FMT_NB][PIX_FMT_NB] = {
  1213. [PIX_FMT_YUV420P] = {
  1214. [PIX_FMT_RGB555] = {
  1215. .convert = yuv420p_to_rgb555
  1216. },
  1217. [PIX_FMT_RGB565] = {
  1218. .convert = yuv420p_to_rgb565
  1219. },
  1220. [PIX_FMT_BGR24] = {
  1221. .convert = yuv420p_to_bgr24
  1222. },
  1223. [PIX_FMT_RGB24] = {
  1224. .convert = yuv420p_to_rgb24
  1225. },
  1226. [PIX_FMT_RGBA32] = {
  1227. .convert = yuv420p_to_rgba32
  1228. },
  1229. },
  1230. [PIX_FMT_YUV422P] = {
  1231. [PIX_FMT_YUV422] = {
  1232. .convert = yuv422p_to_yuv422,
  1233. },
  1234. },
  1235. [PIX_FMT_YUV444P] = {
  1236. [PIX_FMT_RGB24] = {
  1237. .convert = yuv444p_to_rgb24
  1238. },
  1239. },
  1240. [PIX_FMT_YUVJ420P] = {
  1241. [PIX_FMT_RGB555] = {
  1242. .convert = yuvj420p_to_rgb555
  1243. },
  1244. [PIX_FMT_RGB565] = {
  1245. .convert = yuvj420p_to_rgb565
  1246. },
  1247. [PIX_FMT_BGR24] = {
  1248. .convert = yuvj420p_to_bgr24
  1249. },
  1250. [PIX_FMT_RGB24] = {
  1251. .convert = yuvj420p_to_rgb24
  1252. },
  1253. [PIX_FMT_RGBA32] = {
  1254. .convert = yuvj420p_to_rgba32
  1255. },
  1256. },
  1257. [PIX_FMT_YUVJ444P] = {
  1258. [PIX_FMT_RGB24] = {
  1259. .convert = yuvj444p_to_rgb24
  1260. },
  1261. },
  1262. [PIX_FMT_YUV422] = {
  1263. [PIX_FMT_YUV420P] = {
  1264. .convert = yuv422_to_yuv420p,
  1265. },
  1266. [PIX_FMT_YUV422P] = {
  1267. .convert = yuv422_to_yuv422p,
  1268. },
  1269. },
  1270. [PIX_FMT_RGB24] = {
  1271. [PIX_FMT_YUV420P] = {
  1272. .convert = rgb24_to_yuv420p
  1273. },
  1274. [PIX_FMT_RGB565] = {
  1275. .convert = rgb24_to_rgb565
  1276. },
  1277. [PIX_FMT_RGB555] = {
  1278. .convert = rgb24_to_rgb555
  1279. },
  1280. [PIX_FMT_RGBA32] = {
  1281. .convert = rgb24_to_rgba32
  1282. },
  1283. [PIX_FMT_BGR24] = {
  1284. .convert = rgb24_to_bgr24
  1285. },
  1286. [PIX_FMT_GRAY8] = {
  1287. .convert = rgb24_to_gray
  1288. },
  1289. [PIX_FMT_PAL8] = {
  1290. .convert = rgb24_to_pal8
  1291. },
  1292. [PIX_FMT_YUV444P] = {
  1293. .convert = rgb24_to_yuv444p
  1294. },
  1295. [PIX_FMT_YUVJ420P] = {
  1296. .convert = rgb24_to_yuvj420p
  1297. },
  1298. [PIX_FMT_YUVJ444P] = {
  1299. .convert = rgb24_to_yuvj444p
  1300. },
  1301. },
  1302. [PIX_FMT_RGBA32] = {
  1303. [PIX_FMT_RGB24] = {
  1304. .convert = rgba32_to_rgb24
  1305. },
  1306. [PIX_FMT_RGB555] = {
  1307. .convert = rgba32_to_rgb555
  1308. },
  1309. [PIX_FMT_PAL8] = {
  1310. .convert = rgba32_to_pal8
  1311. },
  1312. [PIX_FMT_YUV420P] = {
  1313. .convert = rgba32_to_yuv420p
  1314. },
  1315. [PIX_FMT_GRAY8] = {
  1316. .convert = rgba32_to_gray
  1317. },
  1318. },
  1319. [PIX_FMT_BGR24] = {
  1320. [PIX_FMT_RGB24] = {
  1321. .convert = bgr24_to_rgb24
  1322. },
  1323. [PIX_FMT_YUV420P] = {
  1324. .convert = bgr24_to_yuv420p
  1325. },
  1326. [PIX_FMT_GRAY8] = {
  1327. .convert = bgr24_to_gray
  1328. },
  1329. },
  1330. [PIX_FMT_RGB555] = {
  1331. [PIX_FMT_RGB24] = {
  1332. .convert = rgb555_to_rgb24
  1333. },
  1334. [PIX_FMT_RGBA32] = {
  1335. .convert = rgb555_to_rgba32
  1336. },
  1337. [PIX_FMT_YUV420P] = {
  1338. .convert = rgb555_to_yuv420p
  1339. },
  1340. [PIX_FMT_GRAY8] = {
  1341. .convert = rgb555_to_gray
  1342. },
  1343. },
  1344. [PIX_FMT_RGB565] = {
  1345. [PIX_FMT_RGB24] = {
  1346. .convert = rgb565_to_rgb24
  1347. },
  1348. [PIX_FMT_YUV420P] = {
  1349. .convert = rgb565_to_yuv420p
  1350. },
  1351. [PIX_FMT_GRAY8] = {
  1352. .convert = rgb565_to_gray
  1353. },
  1354. },
  1355. [PIX_FMT_GRAY8] = {
  1356. [PIX_FMT_RGB555] = {
  1357. .convert = gray_to_rgb555
  1358. },
  1359. [PIX_FMT_RGB565] = {
  1360. .convert = gray_to_rgb565
  1361. },
  1362. [PIX_FMT_RGB24] = {
  1363. .convert = gray_to_rgb24
  1364. },
  1365. [PIX_FMT_BGR24] = {
  1366. .convert = gray_to_bgr24
  1367. },
  1368. [PIX_FMT_RGBA32] = {
  1369. .convert = gray_to_rgba32
  1370. },
  1371. [PIX_FMT_MONOWHITE] = {
  1372. .convert = gray_to_monowhite
  1373. },
  1374. [PIX_FMT_MONOBLACK] = {
  1375. .convert = gray_to_monoblack
  1376. },
  1377. },
  1378. [PIX_FMT_MONOWHITE] = {
  1379. [PIX_FMT_GRAY8] = {
  1380. .convert = monowhite_to_gray
  1381. },
  1382. },
  1383. [PIX_FMT_MONOBLACK] = {
  1384. [PIX_FMT_GRAY8] = {
  1385. .convert = monoblack_to_gray
  1386. },
  1387. },
  1388. [PIX_FMT_PAL8] = {
  1389. [PIX_FMT_RGB555] = {
  1390. .convert = pal8_to_rgb555
  1391. },
  1392. [PIX_FMT_RGB565] = {
  1393. .convert = pal8_to_rgb565
  1394. },
  1395. [PIX_FMT_BGR24] = {
  1396. .convert = pal8_to_bgr24
  1397. },
  1398. [PIX_FMT_RGB24] = {
  1399. .convert = pal8_to_rgb24
  1400. },
  1401. [PIX_FMT_RGBA32] = {
  1402. .convert = pal8_to_rgba32
  1403. },
  1404. },
  1405. };
  1406. static int avpicture_alloc(AVPicture *picture,
  1407. int pix_fmt, int width, int height)
  1408. {
  1409. unsigned int size;
  1410. void *ptr;
  1411. size = avpicture_get_size(pix_fmt, width, height);
  1412. if (size < 0)
  1413. goto fail;
  1414. ptr = av_malloc(size);
  1415. if (!ptr)
  1416. goto fail;
  1417. avpicture_fill(picture, ptr, pix_fmt, width, height);
  1418. return 0;
  1419. fail:
  1420. memset(picture, 0, sizeof(AVPicture));
  1421. return -1;
  1422. }
  1423. static void avpicture_free(AVPicture *picture)
  1424. {
  1425. av_free(picture->data[0]);
  1426. }
  1427. /* return true if yuv planar */
  1428. static inline int is_yuv_planar(PixFmtInfo *ps)
  1429. {
  1430. return (ps->color_type == FF_COLOR_YUV ||
  1431. ps->color_type == FF_COLOR_YUV_JPEG) &&
  1432. ps->pixel_type == FF_PIXEL_PLANAR;
  1433. }
  1434. /* XXX: always use linesize. Return -1 if not supported */
  1435. int img_convert(AVPicture *dst, int dst_pix_fmt,
  1436. AVPicture *src, int src_pix_fmt,
  1437. int src_width, int src_height)
  1438. {
  1439. static int inited;
  1440. int i, ret, dst_width, dst_height, int_pix_fmt;
  1441. PixFmtInfo *src_pix, *dst_pix;
  1442. ConvertEntry *ce;
  1443. AVPicture tmp1, *tmp = &tmp1;
  1444. if (src_pix_fmt < 0 || src_pix_fmt >= PIX_FMT_NB ||
  1445. dst_pix_fmt < 0 || dst_pix_fmt >= PIX_FMT_NB)
  1446. return -1;
  1447. if (src_width <= 0 || src_height <= 0)
  1448. return 0;
  1449. if (!inited) {
  1450. inited = 1;
  1451. img_convert_init();
  1452. }
  1453. dst_width = src_width;
  1454. dst_height = src_height;
  1455. dst_pix = &pix_fmt_info[dst_pix_fmt];
  1456. src_pix = &pix_fmt_info[src_pix_fmt];
  1457. if (src_pix_fmt == dst_pix_fmt) {
  1458. /* no conversion needed: just copy */
  1459. img_copy(dst, src, dst_pix_fmt, dst_width, dst_height);
  1460. return 0;
  1461. }
  1462. ce = &convert_table[src_pix_fmt][dst_pix_fmt];
  1463. if (ce->convert) {
  1464. /* specific convertion routine */
  1465. ce->convert(dst, src, dst_width, dst_height);
  1466. return 0;
  1467. }
  1468. /* gray to YUV */
  1469. if (is_yuv_planar(dst_pix) &&
  1470. src_pix_fmt == PIX_FMT_GRAY8) {
  1471. int w, h, y;
  1472. uint8_t *d;
  1473. if (dst_pix->color_type == FF_COLOR_YUV_JPEG) {
  1474. img_copy_plane(dst->data[0], dst->linesize[0],
  1475. src->data[0], src->linesize[0],
  1476. dst_width, dst_height);
  1477. } else {
  1478. img_apply_table(dst->data[0], dst->linesize[0],
  1479. src->data[0], src->linesize[0],
  1480. dst_width, dst_height,
  1481. y_jpeg_to_ccir);
  1482. }
  1483. /* fill U and V with 128 */
  1484. w = dst_width;
  1485. h = dst_height;
  1486. w >>= dst_pix->x_chroma_shift;
  1487. h >>= dst_pix->y_chroma_shift;
  1488. for(i = 1; i <= 2; i++) {
  1489. d = dst->data[i];
  1490. for(y = 0; y< h; y++) {
  1491. memset(d, 128, w);
  1492. d += dst->linesize[i];
  1493. }
  1494. }
  1495. return 0;
  1496. }
  1497. /* YUV to gray */
  1498. if (is_yuv_planar(src_pix) &&
  1499. dst_pix_fmt == PIX_FMT_GRAY8) {
  1500. if (src_pix->color_type == FF_COLOR_YUV_JPEG) {
  1501. img_copy_plane(dst->data[0], dst->linesize[0],
  1502. src->data[0], src->linesize[0],
  1503. dst_width, dst_height);
  1504. } else {
  1505. img_apply_table(dst->data[0], dst->linesize[0],
  1506. src->data[0], src->linesize[0],
  1507. dst_width, dst_height,
  1508. y_ccir_to_jpeg);
  1509. }
  1510. return 0;
  1511. }
  1512. /* YUV to YUV planar */
  1513. if (is_yuv_planar(dst_pix) && is_yuv_planar(src_pix)) {
  1514. int x_shift, y_shift, w, h, xy_shift;
  1515. void (*resize_func)(uint8_t *dst, int dst_wrap,
  1516. const uint8_t *src, int src_wrap,
  1517. int width, int height);
  1518. /* compute chroma size of the smallest dimensions */
  1519. w = dst_width;
  1520. h = dst_height;
  1521. if (dst_pix->x_chroma_shift >= src_pix->x_chroma_shift)
  1522. w >>= dst_pix->x_chroma_shift;
  1523. else
  1524. w >>= src_pix->x_chroma_shift;
  1525. if (dst_pix->y_chroma_shift >= src_pix->y_chroma_shift)
  1526. h >>= dst_pix->y_chroma_shift;
  1527. else
  1528. h >>= src_pix->y_chroma_shift;
  1529. x_shift = (dst_pix->x_chroma_shift - src_pix->x_chroma_shift);
  1530. y_shift = (dst_pix->y_chroma_shift - src_pix->y_chroma_shift);
  1531. xy_shift = ((x_shift & 0xf) << 4) | (y_shift & 0xf);
  1532. /* there must be filters for conversion at least from and to
  1533. YUV444 format */
  1534. switch(xy_shift) {
  1535. case 0x00:
  1536. resize_func = img_copy_plane;
  1537. break;
  1538. case 0x10:
  1539. resize_func = shrink21;
  1540. break;
  1541. case 0x20:
  1542. resize_func = shrink41;
  1543. break;
  1544. case 0x01:
  1545. resize_func = shrink12;
  1546. break;
  1547. case 0x11:
  1548. resize_func = shrink22;
  1549. break;
  1550. case 0x22:
  1551. resize_func = shrink44;
  1552. break;
  1553. case 0xf0:
  1554. resize_func = grow21;
  1555. break;
  1556. case 0xe0:
  1557. resize_func = grow41;
  1558. break;
  1559. case 0xff:
  1560. resize_func = grow22;
  1561. break;
  1562. case 0xee:
  1563. resize_func = grow44;
  1564. break;
  1565. case 0xf1:
  1566. resize_func = conv411;
  1567. break;
  1568. default:
  1569. /* currently not handled */
  1570. goto no_chroma_filter;
  1571. }
  1572. img_copy_plane(dst->data[0], dst->linesize[0],
  1573. src->data[0], src->linesize[0],
  1574. dst_width, dst_height);
  1575. for(i = 1;i <= 2; i++)
  1576. resize_func(dst->data[i], dst->linesize[i],
  1577. src->data[i], src->linesize[i],
  1578. dst_width>>dst_pix->x_chroma_shift, dst_height>>dst_pix->y_chroma_shift);
  1579. /* if yuv color space conversion is needed, we do it here on
  1580. the destination image */
  1581. if (dst_pix->color_type != src_pix->color_type) {
  1582. const uint8_t *y_table, *c_table;
  1583. if (dst_pix->color_type == FF_COLOR_YUV) {
  1584. y_table = y_jpeg_to_ccir;
  1585. c_table = c_jpeg_to_ccir;
  1586. } else {
  1587. y_table = y_ccir_to_jpeg;
  1588. c_table = c_ccir_to_jpeg;
  1589. }
  1590. img_apply_table(dst->data[0], dst->linesize[0],
  1591. dst->data[0], dst->linesize[0],
  1592. dst_width, dst_height,
  1593. y_table);
  1594. for(i = 1;i <= 2; i++)
  1595. img_apply_table(dst->data[i], dst->linesize[i],
  1596. dst->data[i], dst->linesize[i],
  1597. dst_width>>dst_pix->x_chroma_shift,
  1598. dst_height>>dst_pix->y_chroma_shift,
  1599. c_table);
  1600. }
  1601. return 0;
  1602. }
  1603. no_chroma_filter:
  1604. /* try to use an intermediate format */
  1605. if (src_pix_fmt == PIX_FMT_YUV422 ||
  1606. dst_pix_fmt == PIX_FMT_YUV422) {
  1607. /* specific case: convert to YUV422P first */
  1608. int_pix_fmt = PIX_FMT_YUV422P;
  1609. } else if ((src_pix->color_type == FF_COLOR_GRAY &&
  1610. src_pix_fmt != PIX_FMT_GRAY8) ||
  1611. (dst_pix->color_type == FF_COLOR_GRAY &&
  1612. dst_pix_fmt != PIX_FMT_GRAY8)) {
  1613. /* gray8 is the normalized format */
  1614. int_pix_fmt = PIX_FMT_GRAY8;
  1615. } else if ((is_yuv_planar(src_pix) &&
  1616. src_pix_fmt != PIX_FMT_YUV444P &&
  1617. src_pix_fmt != PIX_FMT_YUVJ444P)) {
  1618. /* yuv444 is the normalized format */
  1619. if (src_pix->color_type == FF_COLOR_YUV_JPEG)
  1620. int_pix_fmt = PIX_FMT_YUVJ444P;
  1621. else
  1622. int_pix_fmt = PIX_FMT_YUV444P;
  1623. } else if ((is_yuv_planar(dst_pix) &&
  1624. dst_pix_fmt != PIX_FMT_YUV444P &&
  1625. dst_pix_fmt != PIX_FMT_YUVJ444P)) {
  1626. /* yuv444 is the normalized format */
  1627. if (dst_pix->color_type == FF_COLOR_YUV_JPEG)
  1628. int_pix_fmt = PIX_FMT_YUVJ444P;
  1629. else
  1630. int_pix_fmt = PIX_FMT_YUV444P;
  1631. } else {
  1632. /* the two formats are rgb or gray8 or yuv[j]444p */
  1633. if (src_pix->is_alpha && dst_pix->is_alpha)
  1634. int_pix_fmt = PIX_FMT_RGBA32;
  1635. else
  1636. int_pix_fmt = PIX_FMT_RGB24;
  1637. }
  1638. if (avpicture_alloc(tmp, int_pix_fmt, dst_width, dst_height) < 0)
  1639. return -1;
  1640. ret = -1;
  1641. if (img_convert(tmp, int_pix_fmt,
  1642. src, src_pix_fmt, src_width, src_height) < 0)
  1643. goto fail1;
  1644. if (img_convert(dst, dst_pix_fmt,
  1645. tmp, int_pix_fmt, dst_width, dst_height) < 0)
  1646. goto fail1;
  1647. ret = 0;
  1648. fail1:
  1649. avpicture_free(tmp);
  1650. return ret;
  1651. }
  1652. /* NOTE: we scan all the pixels to have an exact information */
  1653. static int get_alpha_info_pal8(AVPicture *src, int width, int height)
  1654. {
  1655. const unsigned char *p;
  1656. int src_wrap, ret, x, y;
  1657. unsigned int a;
  1658. uint32_t *palette = (uint32_t *)src->data[1];
  1659. p = src->data[0];
  1660. src_wrap = src->linesize[0] - width;
  1661. ret = 0;
  1662. for(y=0;y<height;y++) {
  1663. for(x=0;x<width;x++) {
  1664. a = palette[p[0]] >> 24;
  1665. if (a == 0x00) {
  1666. ret |= FF_ALPHA_TRANSP;
  1667. } else if (a != 0xff) {
  1668. ret |= FF_ALPHA_SEMI_TRANSP;
  1669. }
  1670. p++;
  1671. }
  1672. p += src_wrap;
  1673. }
  1674. return ret;
  1675. }
  1676. /**
  1677. * Tell if an image really has transparent alpha values.
  1678. * @return ored mask of FF_ALPHA_xxx constants
  1679. */
  1680. int img_get_alpha_info(AVPicture *src, int pix_fmt, int width, int height)
  1681. {
  1682. PixFmtInfo *pf = &pix_fmt_info[pix_fmt];
  1683. int ret;
  1684. pf = &pix_fmt_info[pix_fmt];
  1685. /* no alpha can be represented in format */
  1686. if (!pf->is_alpha)
  1687. return 0;
  1688. switch(pix_fmt) {
  1689. case PIX_FMT_RGBA32:
  1690. ret = get_alpha_info_rgba32(src, width, height);
  1691. break;
  1692. case PIX_FMT_RGB555:
  1693. ret = get_alpha_info_rgb555(src, width, height);
  1694. break;
  1695. case PIX_FMT_PAL8:
  1696. ret = get_alpha_info_pal8(src, width, height);
  1697. break;
  1698. default:
  1699. /* we do not know, so everything is indicated */
  1700. ret = FF_ALPHA_TRANSP | FF_ALPHA_SEMI_TRANSP;
  1701. break;
  1702. }
  1703. return ret;
  1704. }
  1705. #ifdef HAVE_MMX
  1706. #define DEINT_INPLACE_LINE_LUM \
  1707. movd_m2r(lum_m4[0],mm0);\
  1708. movd_m2r(lum_m3[0],mm1);\
  1709. movd_m2r(lum_m2[0],mm2);\
  1710. movd_m2r(lum_m1[0],mm3);\
  1711. movd_m2r(lum[0],mm4);\
  1712. punpcklbw_r2r(mm7,mm0);\
  1713. movd_r2m(mm2,lum_m4[0]);\
  1714. punpcklbw_r2r(mm7,mm1);\
  1715. punpcklbw_r2r(mm7,mm2);\
  1716. punpcklbw_r2r(mm7,mm3);\
  1717. punpcklbw_r2r(mm7,mm4);\
  1718. paddw_r2r(mm3,mm1);\
  1719. psllw_i2r(1,mm2);\
  1720. paddw_r2r(mm4,mm0);\
  1721. psllw_i2r(2,mm1);\
  1722. paddw_r2r(mm6,mm2);\
  1723. paddw_r2r(mm2,mm1);\
  1724. psubusw_r2r(mm0,mm1);\
  1725. psrlw_i2r(3,mm1);\
  1726. packuswb_r2r(mm7,mm1);\
  1727. movd_r2m(mm1,lum_m2[0]);
  1728. #define DEINT_LINE_LUM \
  1729. movd_m2r(lum_m4[0],mm0);\
  1730. movd_m2r(lum_m3[0],mm1);\
  1731. movd_m2r(lum_m2[0],mm2);\
  1732. movd_m2r(lum_m1[0],mm3);\
  1733. movd_m2r(lum[0],mm4);\
  1734. punpcklbw_r2r(mm7,mm0);\
  1735. punpcklbw_r2r(mm7,mm1);\
  1736. punpcklbw_r2r(mm7,mm2);\
  1737. punpcklbw_r2r(mm7,mm3);\
  1738. punpcklbw_r2r(mm7,mm4);\
  1739. paddw_r2r(mm3,mm1);\
  1740. psllw_i2r(1,mm2);\
  1741. paddw_r2r(mm4,mm0);\
  1742. psllw_i2r(2,mm1);\
  1743. paddw_r2r(mm6,mm2);\
  1744. paddw_r2r(mm2,mm1);\
  1745. psubusw_r2r(mm0,mm1);\
  1746. psrlw_i2r(3,mm1);\
  1747. packuswb_r2r(mm7,mm1);\
  1748. movd_r2m(mm1,dst[0]);
  1749. #endif
  1750. /* filter parameters: [-1 4 2 4 -1] // 8 */
  1751. static void deinterlace_line(uint8_t *dst, uint8_t *lum_m4, uint8_t *lum_m3, uint8_t *lum_m2, uint8_t *lum_m1, uint8_t *lum,
  1752. int size)
  1753. {
  1754. #ifndef HAVE_MMX
  1755. uint8_t *cm = cropTbl + MAX_NEG_CROP;
  1756. int sum;
  1757. for(;size > 0;size--) {
  1758. sum = -lum_m4[0];
  1759. sum += lum_m3[0] << 2;
  1760. sum += lum_m2[0] << 1;
  1761. sum += lum_m1[0] << 2;
  1762. sum += -lum[0];
  1763. dst[0] = cm[(sum + 4) >> 3];
  1764. lum_m4++;
  1765. lum_m3++;
  1766. lum_m2++;
  1767. lum_m1++;
  1768. lum++;
  1769. dst++;
  1770. }
  1771. #else
  1772. {
  1773. mmx_t rounder;
  1774. rounder.uw[0]=4;
  1775. rounder.uw[1]=4;
  1776. rounder.uw[2]=4;
  1777. rounder.uw[3]=4;
  1778. pxor_r2r(mm7,mm7);
  1779. movq_m2r(rounder,mm6);
  1780. }
  1781. for (;size > 3; size-=4) {
  1782. DEINT_LINE_LUM
  1783. lum_m4+=4;
  1784. lum_m3+=4;
  1785. lum_m2+=4;
  1786. lum_m1+=4;
  1787. lum+=4;
  1788. dst+=4;
  1789. }
  1790. #endif
  1791. }
  1792. static void deinterlace_line_inplace(uint8_t *lum_m4, uint8_t *lum_m3, uint8_t *lum_m2, uint8_t *lum_m1, uint8_t *lum,
  1793. int size)
  1794. {
  1795. #ifndef HAVE_MMX
  1796. uint8_t *cm = cropTbl + MAX_NEG_CROP;
  1797. int sum;
  1798. for(;size > 0;size--) {
  1799. sum = -lum_m4[0];
  1800. sum += lum_m3[0] << 2;
  1801. sum += lum_m2[0] << 1;
  1802. lum_m4[0]=lum_m2[0];
  1803. sum += lum_m1[0] << 2;
  1804. sum += -lum[0];
  1805. lum_m2[0] = cm[(sum + 4) >> 3];
  1806. lum_m4++;
  1807. lum_m3++;
  1808. lum_m2++;
  1809. lum_m1++;
  1810. lum++;
  1811. }
  1812. #else
  1813. {
  1814. mmx_t rounder;
  1815. rounder.uw[0]=4;
  1816. rounder.uw[1]=4;
  1817. rounder.uw[2]=4;
  1818. rounder.uw[3]=4;
  1819. pxor_r2r(mm7,mm7);
  1820. movq_m2r(rounder,mm6);
  1821. }
  1822. for (;size > 3; size-=4) {
  1823. DEINT_INPLACE_LINE_LUM
  1824. lum_m4+=4;
  1825. lum_m3+=4;
  1826. lum_m2+=4;
  1827. lum_m1+=4;
  1828. lum+=4;
  1829. }
  1830. #endif
  1831. }
  1832. /* deinterlacing : 2 temporal taps, 3 spatial taps linear filter. The
  1833. top field is copied as is, but the bottom field is deinterlaced
  1834. against the top field. */
  1835. static void deinterlace_bottom_field(uint8_t *dst, int dst_wrap,
  1836. uint8_t *src1, int src_wrap,
  1837. int width, int height)
  1838. {
  1839. uint8_t *src_m2, *src_m1, *src_0, *src_p1, *src_p2;
  1840. int y;
  1841. src_m2 = src1;
  1842. src_m1 = src1;
  1843. src_0=&src_m1[src_wrap];
  1844. src_p1=&src_0[src_wrap];
  1845. src_p2=&src_p1[src_wrap];
  1846. for(y=0;y<(height-2);y+=2) {
  1847. memcpy(dst,src_m1,width);
  1848. dst += dst_wrap;
  1849. deinterlace_line(dst,src_m2,src_m1,src_0,src_p1,src_p2,width);
  1850. src_m2 = src_0;
  1851. src_m1 = src_p1;
  1852. src_0 = src_p2;
  1853. src_p1 += 2*src_wrap;
  1854. src_p2 += 2*src_wrap;
  1855. dst += dst_wrap;
  1856. }
  1857. memcpy(dst,src_m1,width);
  1858. dst += dst_wrap;
  1859. /* do last line */
  1860. deinterlace_line(dst,src_m2,src_m1,src_0,src_0,src_0,width);
  1861. }
  1862. static void deinterlace_bottom_field_inplace(uint8_t *src1, int src_wrap,
  1863. int width, int height)
  1864. {
  1865. uint8_t *src_m1, *src_0, *src_p1, *src_p2;
  1866. int y;
  1867. uint8_t *buf;
  1868. buf = (uint8_t*)av_malloc(width);
  1869. src_m1 = src1;
  1870. memcpy(buf,src_m1,width);
  1871. src_0=&src_m1[src_wrap];
  1872. src_p1=&src_0[src_wrap];
  1873. src_p2=&src_p1[src_wrap];
  1874. for(y=0;y<(height-2);y+=2) {
  1875. deinterlace_line_inplace(buf,src_m1,src_0,src_p1,src_p2,width);
  1876. src_m1 = src_p1;
  1877. src_0 = src_p2;
  1878. src_p1 += 2*src_wrap;
  1879. src_p2 += 2*src_wrap;
  1880. }
  1881. /* do last line */
  1882. deinterlace_line_inplace(buf,src_m1,src_0,src_0,src_0,width);
  1883. av_free(buf);
  1884. }
  1885. /* deinterlace - if not supported return -1 */
  1886. int avpicture_deinterlace(AVPicture *dst, AVPicture *src,
  1887. int pix_fmt, int width, int height)
  1888. {
  1889. int i;
  1890. if (pix_fmt != PIX_FMT_YUV420P &&
  1891. pix_fmt != PIX_FMT_YUV422P &&
  1892. pix_fmt != PIX_FMT_YUV444P)
  1893. return -1;
  1894. if ((width & 3) != 0 || (height & 3) != 0)
  1895. return -1;
  1896. for(i=0;i<3;i++) {
  1897. if (i == 1) {
  1898. switch(pix_fmt) {
  1899. case PIX_FMT_YUV420P:
  1900. width >>= 1;
  1901. height >>= 1;
  1902. break;
  1903. case PIX_FMT_YUV422P:
  1904. width >>= 1;
  1905. break;
  1906. default:
  1907. break;
  1908. }
  1909. }
  1910. if (src == dst) {
  1911. deinterlace_bottom_field_inplace(src->data[i], src->linesize[i],
  1912. width, height);
  1913. } else {
  1914. deinterlace_bottom_field(dst->data[i],dst->linesize[i],
  1915. src->data[i], src->linesize[i],
  1916. width, height);
  1917. }
  1918. }
  1919. #ifdef HAVE_MMX
  1920. emms();
  1921. #endif
  1922. return 0;
  1923. }
  1924. #undef FIX