You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

2047 lines
54KB

  1. /*
  2. * Misc image convertion routines
  3. * Copyright (c) 2001, 2002, 2003 Fabrice Bellard.
  4. *
  5. * This library is free software; you can redistribute it and/or
  6. * modify it under the terms of the GNU Lesser General Public
  7. * License as published by the Free Software Foundation; either
  8. * version 2 of the License, or (at your option) any later version.
  9. *
  10. * This library is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  13. * Lesser General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU Lesser General Public
  16. * License along with this library; if not, write to the Free Software
  17. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  18. */
  19. /**
  20. * @file imgconvert.c
  21. * Misc image convertion routines.
  22. */
  23. /* TODO:
  24. * - write 'ffimg' program to test all the image related stuff
  25. * - move all api to slice based system
  26. * - integrate deinterlacing, postprocessing and scaling in the conversion process
  27. */
  28. #include "avcodec.h"
  29. #include "dsputil.h"
  30. #ifdef USE_FASTMEMCPY
  31. #include "fastmemcpy.h"
  32. #endif
  33. #ifdef HAVE_MMX
  34. #include "i386/mmx.h"
  35. #endif
  36. #define xglue(x, y) x ## y
  37. #define glue(x, y) xglue(x, y)
  38. #define FF_COLOR_RGB 0 /* RGB color space */
  39. #define FF_COLOR_GRAY 1 /* gray color space */
  40. #define FF_COLOR_YUV 2 /* YUV color space. 16 <= Y <= 235, 16 <= U, V <= 240 */
  41. #define FF_COLOR_YUV_JPEG 3 /* YUV color space. 0 <= Y <= 255, 0 <= U, V <= 255 */
  42. #define FF_PIXEL_PLANAR 0 /* each channel has one component in AVPicture */
  43. #define FF_PIXEL_PACKED 1 /* only one components containing all the channels */
  44. #define FF_PIXEL_PALETTE 2 /* one components containing indexes for a palette */
  45. typedef struct PixFmtInfo {
  46. const char *name;
  47. uint8_t nb_channels; /* number of channels (including alpha) */
  48. uint8_t color_type; /* color type (see FF_COLOR_xxx constants) */
  49. uint8_t pixel_type; /* pixel storage type (see FF_PIXEL_xxx constants) */
  50. uint8_t is_alpha : 1; /* true if alpha can be specified */
  51. uint8_t x_chroma_shift; /* X chroma subsampling factor is 2 ^ shift */
  52. uint8_t y_chroma_shift; /* Y chroma subsampling factor is 2 ^ shift */
  53. uint8_t depth; /* bit depth of the color components */
  54. } PixFmtInfo;
  55. /* this table gives more information about formats */
  56. static PixFmtInfo pix_fmt_info[PIX_FMT_NB] = {
  57. /* YUV formats */
  58. [PIX_FMT_YUV420P] = {
  59. .name = "yuv420p",
  60. .nb_channels = 3,
  61. .color_type = FF_COLOR_YUV,
  62. .pixel_type = FF_PIXEL_PLANAR,
  63. .depth = 8,
  64. .x_chroma_shift = 1, .y_chroma_shift = 1,
  65. },
  66. [PIX_FMT_YUV422P] = {
  67. .name = "yuv422p",
  68. .nb_channels = 3,
  69. .color_type = FF_COLOR_YUV,
  70. .pixel_type = FF_PIXEL_PLANAR,
  71. .depth = 8,
  72. .x_chroma_shift = 1, .y_chroma_shift = 0,
  73. },
  74. [PIX_FMT_YUV444P] = {
  75. .name = "yuv444p",
  76. .nb_channels = 3,
  77. .color_type = FF_COLOR_YUV,
  78. .pixel_type = FF_PIXEL_PLANAR,
  79. .depth = 8,
  80. .x_chroma_shift = 0, .y_chroma_shift = 0,
  81. },
  82. [PIX_FMT_YUV422] = {
  83. .name = "yuv422",
  84. .nb_channels = 1,
  85. .color_type = FF_COLOR_YUV,
  86. .pixel_type = FF_PIXEL_PACKED,
  87. .depth = 8,
  88. .x_chroma_shift = 1, .y_chroma_shift = 0,
  89. },
  90. [PIX_FMT_YUV410P] = {
  91. .name = "yuv410p",
  92. .nb_channels = 3,
  93. .color_type = FF_COLOR_YUV,
  94. .pixel_type = FF_PIXEL_PLANAR,
  95. .depth = 8,
  96. .x_chroma_shift = 2, .y_chroma_shift = 2,
  97. },
  98. [PIX_FMT_YUV411P] = {
  99. .name = "yuv411p",
  100. .nb_channels = 3,
  101. .color_type = FF_COLOR_YUV,
  102. .pixel_type = FF_PIXEL_PLANAR,
  103. .depth = 8,
  104. .x_chroma_shift = 2, .y_chroma_shift = 0,
  105. },
  106. /* JPEG YUV */
  107. [PIX_FMT_YUVJ420P] = {
  108. .name = "yuvj420p",
  109. .nb_channels = 3,
  110. .color_type = FF_COLOR_YUV_JPEG,
  111. .pixel_type = FF_PIXEL_PLANAR,
  112. .depth = 8,
  113. .x_chroma_shift = 1, .y_chroma_shift = 1,
  114. },
  115. [PIX_FMT_YUVJ422P] = {
  116. .name = "yuvj422p",
  117. .nb_channels = 3,
  118. .color_type = FF_COLOR_YUV_JPEG,
  119. .pixel_type = FF_PIXEL_PLANAR,
  120. .depth = 8,
  121. .x_chroma_shift = 1, .y_chroma_shift = 0,
  122. },
  123. [PIX_FMT_YUVJ444P] = {
  124. .name = "yuvj444p",
  125. .nb_channels = 3,
  126. .color_type = FF_COLOR_YUV_JPEG,
  127. .pixel_type = FF_PIXEL_PLANAR,
  128. .depth = 8,
  129. .x_chroma_shift = 0, .y_chroma_shift = 0,
  130. },
  131. /* RGB formats */
  132. [PIX_FMT_RGB24] = {
  133. .name = "rgb24",
  134. .nb_channels = 3,
  135. .color_type = FF_COLOR_RGB,
  136. .pixel_type = FF_PIXEL_PACKED,
  137. .depth = 8,
  138. },
  139. [PIX_FMT_BGR24] = {
  140. .name = "bgr24",
  141. .nb_channels = 3,
  142. .color_type = FF_COLOR_RGB,
  143. .pixel_type = FF_PIXEL_PACKED,
  144. .depth = 8,
  145. },
  146. [PIX_FMT_RGBA32] = {
  147. .name = "rgba32",
  148. .nb_channels = 4, .is_alpha = 1,
  149. .color_type = FF_COLOR_RGB,
  150. .pixel_type = FF_PIXEL_PACKED,
  151. .depth = 8,
  152. },
  153. [PIX_FMT_RGB565] = {
  154. .name = "rgb565",
  155. .nb_channels = 3,
  156. .color_type = FF_COLOR_RGB,
  157. .pixel_type = FF_PIXEL_PACKED,
  158. .depth = 5,
  159. },
  160. [PIX_FMT_RGB555] = {
  161. .name = "rgb555",
  162. .nb_channels = 4, .is_alpha = 1,
  163. .color_type = FF_COLOR_RGB,
  164. .pixel_type = FF_PIXEL_PACKED,
  165. .depth = 5,
  166. },
  167. /* gray / mono formats */
  168. [PIX_FMT_GRAY8] = {
  169. .name = "gray",
  170. .nb_channels = 1,
  171. .color_type = FF_COLOR_GRAY,
  172. .pixel_type = FF_PIXEL_PLANAR,
  173. .depth = 8,
  174. },
  175. [PIX_FMT_MONOWHITE] = {
  176. .name = "monow",
  177. .nb_channels = 1,
  178. .color_type = FF_COLOR_GRAY,
  179. .pixel_type = FF_PIXEL_PLANAR,
  180. .depth = 1,
  181. },
  182. [PIX_FMT_MONOBLACK] = {
  183. .name = "monob",
  184. .nb_channels = 1,
  185. .color_type = FF_COLOR_GRAY,
  186. .pixel_type = FF_PIXEL_PLANAR,
  187. .depth = 1,
  188. },
  189. /* paletted formats */
  190. [PIX_FMT_PAL8] = {
  191. .name = "pal8",
  192. .nb_channels = 4, .is_alpha = 1,
  193. .color_type = FF_COLOR_RGB,
  194. .pixel_type = FF_PIXEL_PALETTE,
  195. .depth = 8,
  196. },
  197. };
  198. void avcodec_get_chroma_sub_sample(int pix_fmt, int *h_shift, int *v_shift)
  199. {
  200. *h_shift = pix_fmt_info[pix_fmt].x_chroma_shift;
  201. *v_shift = pix_fmt_info[pix_fmt].y_chroma_shift;
  202. }
  203. const char *avcodec_get_pix_fmt_name(int pix_fmt)
  204. {
  205. if (pix_fmt < 0 || pix_fmt >= PIX_FMT_NB)
  206. return "???";
  207. else
  208. return pix_fmt_info[pix_fmt].name;
  209. }
  210. /* Picture field are filled with 'ptr' addresses. Also return size */
  211. int avpicture_fill(AVPicture *picture, uint8_t *ptr,
  212. int pix_fmt, int width, int height)
  213. {
  214. int size, w2, h2, size2;
  215. PixFmtInfo *pinfo;
  216. pinfo = &pix_fmt_info[pix_fmt];
  217. size = width * height;
  218. switch(pix_fmt) {
  219. case PIX_FMT_YUV420P:
  220. case PIX_FMT_YUV422P:
  221. case PIX_FMT_YUV444P:
  222. case PIX_FMT_YUV410P:
  223. case PIX_FMT_YUV411P:
  224. case PIX_FMT_YUVJ420P:
  225. case PIX_FMT_YUVJ422P:
  226. case PIX_FMT_YUVJ444P:
  227. w2 = (width + (1 << pinfo->x_chroma_shift) - 1) >> pinfo->x_chroma_shift;
  228. h2 = (height + (1 << pinfo->y_chroma_shift) - 1) >> pinfo->y_chroma_shift;
  229. size2 = w2 * h2;
  230. picture->data[0] = ptr;
  231. picture->data[1] = picture->data[0] + size;
  232. picture->data[2] = picture->data[1] + size2;
  233. picture->linesize[0] = width;
  234. picture->linesize[1] = w2;
  235. picture->linesize[2] = w2;
  236. return size + 2 * size2;
  237. case PIX_FMT_RGB24:
  238. case PIX_FMT_BGR24:
  239. picture->data[0] = ptr;
  240. picture->data[1] = NULL;
  241. picture->data[2] = NULL;
  242. picture->linesize[0] = width * 3;
  243. return size * 3;
  244. case PIX_FMT_RGBA32:
  245. picture->data[0] = ptr;
  246. picture->data[1] = NULL;
  247. picture->data[2] = NULL;
  248. picture->linesize[0] = width * 4;
  249. return size * 4;
  250. case PIX_FMT_RGB555:
  251. case PIX_FMT_RGB565:
  252. case PIX_FMT_YUV422:
  253. picture->data[0] = ptr;
  254. picture->data[1] = NULL;
  255. picture->data[2] = NULL;
  256. picture->linesize[0] = width * 2;
  257. return size * 2;
  258. case PIX_FMT_GRAY8:
  259. picture->data[0] = ptr;
  260. picture->data[1] = NULL;
  261. picture->data[2] = NULL;
  262. picture->linesize[0] = width;
  263. return size;
  264. case PIX_FMT_MONOWHITE:
  265. case PIX_FMT_MONOBLACK:
  266. picture->data[0] = ptr;
  267. picture->data[1] = NULL;
  268. picture->data[2] = NULL;
  269. picture->linesize[0] = (width + 7) >> 3;
  270. return picture->linesize[0] * height;
  271. case PIX_FMT_PAL8:
  272. size2 = (size + 3) & ~3;
  273. picture->data[0] = ptr;
  274. picture->data[1] = ptr + size2; /* palette is stored here as 256 32 bit words */
  275. picture->data[2] = NULL;
  276. picture->linesize[0] = width;
  277. picture->linesize[1] = 4;
  278. return size2 + 256 * 4;
  279. default:
  280. picture->data[0] = NULL;
  281. picture->data[1] = NULL;
  282. picture->data[2] = NULL;
  283. picture->data[3] = NULL;
  284. return -1;
  285. }
  286. }
  287. int avpicture_get_size(int pix_fmt, int width, int height)
  288. {
  289. AVPicture dummy_pict;
  290. return avpicture_fill(&dummy_pict, NULL, pix_fmt, width, height);
  291. }
  292. /**
  293. * compute the loss when converting from a pixel format to another
  294. */
  295. int avcodec_get_pix_fmt_loss(int dst_pix_fmt, int src_pix_fmt,
  296. int has_alpha)
  297. {
  298. const PixFmtInfo *pf, *ps;
  299. int loss;
  300. ps = &pix_fmt_info[src_pix_fmt];
  301. pf = &pix_fmt_info[dst_pix_fmt];
  302. /* compute loss */
  303. loss = 0;
  304. pf = &pix_fmt_info[dst_pix_fmt];
  305. if (pf->depth < ps->depth ||
  306. (dst_pix_fmt == PIX_FMT_RGB555 && src_pix_fmt == PIX_FMT_RGB565))
  307. loss |= FF_LOSS_DEPTH;
  308. if (pf->x_chroma_shift > ps->x_chroma_shift ||
  309. pf->y_chroma_shift > ps->y_chroma_shift)
  310. loss |= FF_LOSS_RESOLUTION;
  311. switch(pf->color_type) {
  312. case FF_COLOR_RGB:
  313. if (ps->color_type != FF_COLOR_RGB &&
  314. ps->color_type != FF_COLOR_GRAY)
  315. loss |= FF_LOSS_COLORSPACE;
  316. break;
  317. case FF_COLOR_GRAY:
  318. if (ps->color_type != FF_COLOR_GRAY)
  319. loss |= FF_LOSS_COLORSPACE;
  320. break;
  321. case FF_COLOR_YUV:
  322. if (ps->color_type != FF_COLOR_YUV)
  323. loss |= FF_LOSS_COLORSPACE;
  324. break;
  325. case FF_COLOR_YUV_JPEG:
  326. if (ps->color_type != FF_COLOR_YUV_JPEG &&
  327. ps->color_type != FF_COLOR_YUV &&
  328. ps->color_type != FF_COLOR_GRAY)
  329. loss |= FF_LOSS_COLORSPACE;
  330. break;
  331. default:
  332. /* fail safe test */
  333. if (ps->color_type != pf->color_type)
  334. loss |= FF_LOSS_COLORSPACE;
  335. break;
  336. }
  337. if (pf->color_type == FF_COLOR_GRAY &&
  338. ps->color_type != FF_COLOR_GRAY)
  339. loss |= FF_LOSS_CHROMA;
  340. if (!pf->is_alpha && (ps->is_alpha && has_alpha))
  341. loss |= FF_LOSS_ALPHA;
  342. if (pf->pixel_type == FF_PIXEL_PALETTE &&
  343. (ps->pixel_type != FF_PIXEL_PALETTE && ps->color_type != FF_COLOR_GRAY))
  344. loss |= FF_LOSS_COLORQUANT;
  345. return loss;
  346. }
  347. static int avg_bits_per_pixel(int pix_fmt)
  348. {
  349. int bits;
  350. const PixFmtInfo *pf;
  351. pf = &pix_fmt_info[pix_fmt];
  352. switch(pf->pixel_type) {
  353. case FF_PIXEL_PACKED:
  354. switch(pix_fmt) {
  355. case PIX_FMT_YUV422:
  356. case PIX_FMT_RGB565:
  357. case PIX_FMT_RGB555:
  358. bits = 16;
  359. break;
  360. default:
  361. bits = pf->depth * pf->nb_channels;
  362. break;
  363. }
  364. break;
  365. case FF_PIXEL_PLANAR:
  366. if (pf->x_chroma_shift == 0 && pf->y_chroma_shift == 0) {
  367. bits = pf->depth * pf->nb_channels;
  368. } else {
  369. bits = pf->depth + ((2 * pf->depth) >>
  370. (pf->x_chroma_shift + pf->y_chroma_shift));
  371. }
  372. break;
  373. case FF_PIXEL_PALETTE:
  374. bits = 8;
  375. break;
  376. default:
  377. bits = -1;
  378. break;
  379. }
  380. return bits;
  381. }
  382. static int avcodec_find_best_pix_fmt1(int pix_fmt_mask,
  383. int src_pix_fmt,
  384. int has_alpha,
  385. int loss_mask)
  386. {
  387. int dist, i, loss, min_dist, dst_pix_fmt;
  388. /* find exact color match with smallest size */
  389. dst_pix_fmt = -1;
  390. min_dist = 0x7fffffff;
  391. for(i = 0;i < PIX_FMT_NB; i++) {
  392. if (pix_fmt_mask & (1 << i)) {
  393. loss = avcodec_get_pix_fmt_loss(i, src_pix_fmt, has_alpha) & loss_mask;
  394. if (loss == 0) {
  395. dist = avg_bits_per_pixel(i);
  396. if (dist < min_dist) {
  397. min_dist = dist;
  398. dst_pix_fmt = i;
  399. }
  400. }
  401. }
  402. }
  403. return dst_pix_fmt;
  404. }
  405. /**
  406. * find best pixel format to convert to. Return -1 if none found
  407. */
  408. int avcodec_find_best_pix_fmt(int pix_fmt_mask, int src_pix_fmt,
  409. int has_alpha, int *loss_ptr)
  410. {
  411. int dst_pix_fmt, loss_mask, i;
  412. static const int loss_mask_order[] = {
  413. ~0, /* no loss first */
  414. ~FF_LOSS_ALPHA,
  415. ~FF_LOSS_RESOLUTION,
  416. ~(FF_LOSS_COLORSPACE | FF_LOSS_RESOLUTION),
  417. ~FF_LOSS_COLORQUANT,
  418. ~FF_LOSS_DEPTH,
  419. 0,
  420. };
  421. /* try with successive loss */
  422. i = 0;
  423. for(;;) {
  424. loss_mask = loss_mask_order[i++];
  425. dst_pix_fmt = avcodec_find_best_pix_fmt1(pix_fmt_mask, src_pix_fmt,
  426. has_alpha, loss_mask);
  427. if (dst_pix_fmt >= 0)
  428. goto found;
  429. if (loss_mask == 0)
  430. break;
  431. }
  432. return -1;
  433. found:
  434. if (loss_ptr)
  435. *loss_ptr = avcodec_get_pix_fmt_loss(dst_pix_fmt, src_pix_fmt, has_alpha);
  436. return dst_pix_fmt;
  437. }
  438. static void img_copy_plane(uint8_t *dst, int dst_wrap,
  439. const uint8_t *src, int src_wrap,
  440. int width, int height)
  441. {
  442. for(;height > 0; height--) {
  443. memcpy(dst, src, width);
  444. dst += dst_wrap;
  445. src += src_wrap;
  446. }
  447. }
  448. /* copy image 'src' to 'dst' */
  449. void img_copy(AVPicture *dst, AVPicture *src,
  450. int pix_fmt, int width, int height)
  451. {
  452. int bwidth, bits, i;
  453. PixFmtInfo *pf = &pix_fmt_info[pix_fmt];
  454. pf = &pix_fmt_info[pix_fmt];
  455. switch(pf->pixel_type) {
  456. case FF_PIXEL_PACKED:
  457. switch(pix_fmt) {
  458. case PIX_FMT_YUV422:
  459. case PIX_FMT_RGB565:
  460. case PIX_FMT_RGB555:
  461. bits = 16;
  462. break;
  463. default:
  464. bits = pf->depth * pf->nb_channels;
  465. break;
  466. }
  467. bwidth = (width * bits + 7) >> 3;
  468. img_copy_plane(dst->data[0], dst->linesize[0],
  469. src->data[0], src->linesize[0],
  470. bwidth, height);
  471. break;
  472. case FF_PIXEL_PLANAR:
  473. for(i = 0; i < pf->nb_channels; i++) {
  474. int w, h;
  475. w = width;
  476. h = height;
  477. if (i == 1 || i == 2) {
  478. w >>= pf->x_chroma_shift;
  479. h >>= pf->y_chroma_shift;
  480. }
  481. bwidth = (w * pf->depth + 7) >> 3;
  482. img_copy_plane(dst->data[i], dst->linesize[i],
  483. src->data[i], src->linesize[i],
  484. bwidth, h);
  485. }
  486. break;
  487. case FF_PIXEL_PALETTE:
  488. img_copy_plane(dst->data[0], dst->linesize[0],
  489. src->data[0], src->linesize[0],
  490. width, height);
  491. /* copy the palette */
  492. img_copy_plane(dst->data[1], dst->linesize[1],
  493. src->data[1], src->linesize[1],
  494. 4, 256);
  495. break;
  496. }
  497. }
  498. /* XXX: totally non optimized */
  499. static void yuv422_to_yuv420p(AVPicture *dst, AVPicture *src,
  500. int width, int height)
  501. {
  502. const uint8_t *p, *p1;
  503. uint8_t *lum, *cr, *cb, *lum1, *cr1, *cb1;
  504. int x;
  505. p1 = src->data[0];
  506. lum1 = dst->data[0];
  507. cb1 = dst->data[1];
  508. cr1 = dst->data[2];
  509. for(;height >= 2; height -= 2) {
  510. p = p1;
  511. lum = lum1;
  512. cb = cb1;
  513. cr = cr1;
  514. for(x=0;x<width;x+=2) {
  515. lum[0] = p[0];
  516. cb[0] = p[1];
  517. lum[1] = p[2];
  518. cr[0] = p[3];
  519. p += 4;
  520. lum += 2;
  521. cb++;
  522. cr++;
  523. }
  524. p1 += src->linesize[0];
  525. lum1 += dst->linesize[0];
  526. p = p1;
  527. lum = lum1;
  528. for(x=0;x<width;x+=2) {
  529. lum[0] = p[0];
  530. lum[1] = p[2];
  531. p += 4;
  532. lum += 2;
  533. }
  534. p1 += src->linesize[0];
  535. lum1 += dst->linesize[0];
  536. cb1 += dst->linesize[1];
  537. cr1 += dst->linesize[2];
  538. }
  539. }
  540. static void yuv422_to_yuv422p(AVPicture *dst, AVPicture *src,
  541. int width, int height)
  542. {
  543. const uint8_t *p, *p1;
  544. uint8_t *lum, *cr, *cb, *lum1, *cr1, *cb1;
  545. int w;
  546. p1 = src->data[0];
  547. lum1 = dst->data[0];
  548. cb1 = dst->data[1];
  549. cr1 = dst->data[2];
  550. for(;height > 0; height--) {
  551. p = p1;
  552. lum = lum1;
  553. cb = cb1;
  554. cr = cr1;
  555. for(w = width; w >= 2; w -= 2) {
  556. lum[0] = p[0];
  557. cb[0] = p[1];
  558. lum[1] = p[2];
  559. cr[0] = p[3];
  560. p += 4;
  561. lum += 2;
  562. cb++;
  563. cr++;
  564. }
  565. p1 += src->linesize[0];
  566. lum1 += dst->linesize[0];
  567. cb1 += dst->linesize[1];
  568. cr1 += dst->linesize[2];
  569. }
  570. }
  571. static void yuv422p_to_yuv422(AVPicture *dst, AVPicture *src,
  572. int width, int height)
  573. {
  574. uint8_t *p, *p1;
  575. const uint8_t *lum, *cr, *cb, *lum1, *cr1, *cb1;
  576. int w;
  577. p1 = dst->data[0];
  578. lum1 = src->data[0];
  579. cb1 = src->data[1];
  580. cr1 = src->data[2];
  581. for(;height > 0; height--) {
  582. p = p1;
  583. lum = lum1;
  584. cb = cb1;
  585. cr = cr1;
  586. for(w = width; w >= 2; w -= 2) {
  587. p[0] = lum[0];
  588. p[1] = cb[0];
  589. p[2] = lum[1];
  590. p[3] = cr[0];
  591. p += 4;
  592. lum += 2;
  593. cb++;
  594. cr++;
  595. }
  596. p1 += dst->linesize[0];
  597. lum1 += src->linesize[0];
  598. cb1 += src->linesize[1];
  599. cr1 += src->linesize[2];
  600. }
  601. }
  602. #define SCALEBITS 10
  603. #define ONE_HALF (1 << (SCALEBITS - 1))
  604. #define FIX(x) ((int) ((x) * (1<<SCALEBITS) + 0.5))
  605. #define YUV_TO_RGB1_CCIR(cb1, cr1)\
  606. {\
  607. cb = (cb1) - 128;\
  608. cr = (cr1) - 128;\
  609. r_add = FIX(1.40200*255.0/224.0) * cr + ONE_HALF;\
  610. g_add = - FIX(0.34414*255.0/224.0) * cb - FIX(0.71414*255.0/224.0) * cr + \
  611. ONE_HALF;\
  612. b_add = FIX(1.77200*255.0/224.0) * cb + ONE_HALF;\
  613. }
  614. #define YUV_TO_RGB2_CCIR(r, g, b, y1)\
  615. {\
  616. y = ((y1) - 16) * FIX(255.0/219.0);\
  617. r = cm[(y + r_add) >> SCALEBITS];\
  618. g = cm[(y + g_add) >> SCALEBITS];\
  619. b = cm[(y + b_add) >> SCALEBITS];\
  620. }
  621. #define YUV_TO_RGB1(cb1, cr1)\
  622. {\
  623. cb = (cb1) - 128;\
  624. cr = (cr1) - 128;\
  625. r_add = FIX(1.40200) * cr + ONE_HALF;\
  626. g_add = - FIX(0.34414) * cb - FIX(0.71414) * cr + ONE_HALF;\
  627. b_add = FIX(1.77200) * cb + ONE_HALF;\
  628. }
  629. #define YUV_TO_RGB2(r, g, b, y1)\
  630. {\
  631. y = (y1) << SCALEBITS;\
  632. r = cm[(y + r_add) >> SCALEBITS];\
  633. g = cm[(y + g_add) >> SCALEBITS];\
  634. b = cm[(y + b_add) >> SCALEBITS];\
  635. }
  636. #define Y_CCIR_TO_JPEG(y)\
  637. cm[((y) * FIX(255.0/219.0) + (ONE_HALF - 16 * FIX(255.0/219.0))) >> SCALEBITS]
  638. #define Y_JPEG_TO_CCIR(y)\
  639. (((y) * FIX(219.0/255.0) + (ONE_HALF + (16 << SCALEBITS))) >> SCALEBITS)
  640. #define C_CCIR_TO_JPEG(y)\
  641. cm[(((y) - 128) * FIX(127.0/112.0) + (ONE_HALF + (128 << SCALEBITS))) >> SCALEBITS]
  642. /* NOTE: the clamp is really necessary! */
  643. #define C_JPEG_TO_CCIR(y)\
  644. ({\
  645. int __y;\
  646. __y = ((((y) - 128) * FIX(112.0/127.0) + (ONE_HALF + (128 << SCALEBITS))) >> SCALEBITS);\
  647. if (__y < 16)\
  648. __y = 16;\
  649. __y;\
  650. })
  651. #define RGB_TO_Y(r, g, b) \
  652. ((FIX(0.29900) * (r) + FIX(0.58700) * (g) + \
  653. FIX(0.11400) * (b) + ONE_HALF) >> SCALEBITS)
  654. #define RGB_TO_U(r1, g1, b1, shift)\
  655. (((- FIX(0.16874) * r1 - FIX(0.33126) * g1 + \
  656. FIX(0.50000) * b1 + (ONE_HALF << shift) - 1) >> (SCALEBITS + shift)) + 128)
  657. #define RGB_TO_V(r1, g1, b1, shift)\
  658. (((FIX(0.50000) * r1 - FIX(0.41869) * g1 - \
  659. FIX(0.08131) * b1 + (ONE_HALF << shift) - 1) >> (SCALEBITS + shift)) + 128)
  660. #define RGB_TO_Y_CCIR(r, g, b) \
  661. ((FIX(0.29900*219.0/255.0) * (r) + FIX(0.58700*219.0/255.0) * (g) + \
  662. FIX(0.11400*219.0/255.0) * (b) + (ONE_HALF + (16 << SCALEBITS))) >> SCALEBITS)
  663. #define RGB_TO_U_CCIR(r1, g1, b1, shift)\
  664. (((- FIX(0.16874*224.0/255.0) * r1 - FIX(0.33126*224.0/255.0) * g1 + \
  665. FIX(0.50000*224.0/255.0) * b1 + (ONE_HALF << shift) - 1) >> (SCALEBITS + shift)) + 128)
  666. #define RGB_TO_V_CCIR(r1, g1, b1, shift)\
  667. (((FIX(0.50000*224.0/255.0) * r1 - FIX(0.41869*224.0/255.0) * g1 - \
  668. FIX(0.08131*224.0/255.0) * b1 + (ONE_HALF << shift) - 1) >> (SCALEBITS + shift)) + 128)
  669. static uint8_t y_ccir_to_jpeg[256];
  670. static uint8_t y_jpeg_to_ccir[256];
  671. static uint8_t c_ccir_to_jpeg[256];
  672. static uint8_t c_jpeg_to_ccir[256];
  673. /* init various conversion tables */
  674. static void img_convert_init(void)
  675. {
  676. int i;
  677. uint8_t *cm = cropTbl + MAX_NEG_CROP;
  678. for(i = 0;i < 256; i++) {
  679. y_ccir_to_jpeg[i] = Y_CCIR_TO_JPEG(i);
  680. y_jpeg_to_ccir[i] = Y_JPEG_TO_CCIR(i);
  681. c_ccir_to_jpeg[i] = C_CCIR_TO_JPEG(i);
  682. c_jpeg_to_ccir[i] = C_JPEG_TO_CCIR(i);
  683. }
  684. }
  685. /* apply to each pixel the given table */
  686. static void img_apply_table(uint8_t *dst, int dst_wrap,
  687. const uint8_t *src, int src_wrap,
  688. int width, int height, const uint8_t *table1)
  689. {
  690. int n;
  691. const uint8_t *s;
  692. uint8_t *d;
  693. const uint8_t *table;
  694. table = table1;
  695. for(;height > 0; height--) {
  696. s = src;
  697. d = dst;
  698. n = width;
  699. while (n >= 4) {
  700. d[0] = table[s[0]];
  701. d[1] = table[s[1]];
  702. d[2] = table[s[2]];
  703. d[3] = table[s[3]];
  704. d += 4;
  705. s += 4;
  706. n -= 4;
  707. }
  708. while (n > 0) {
  709. d[0] = table[s[0]];
  710. d++;
  711. s++;
  712. n--;
  713. }
  714. dst += dst_wrap;
  715. src += src_wrap;
  716. }
  717. }
  718. /* XXX: use generic filter ? */
  719. /* XXX: in most cases, the sampling position is incorrect */
  720. /* 4x1 -> 1x1 */
  721. static void shrink41(uint8_t *dst, int dst_wrap,
  722. const uint8_t *src, int src_wrap,
  723. int width, int height)
  724. {
  725. int w;
  726. const uint8_t *s;
  727. uint8_t *d;
  728. for(;height > 0; height--) {
  729. s = src;
  730. d = dst;
  731. for(w = width;w > 0; w--) {
  732. d[0] = (s[0] + s[1] + s[2] + s[3] + 2) >> 2;
  733. s += 4;
  734. d++;
  735. }
  736. src += src_wrap;
  737. dst += dst_wrap;
  738. }
  739. }
  740. /* 2x1 -> 1x1 */
  741. static void shrink21(uint8_t *dst, int dst_wrap,
  742. const uint8_t *src, int src_wrap,
  743. int width, int height)
  744. {
  745. int w;
  746. const uint8_t *s;
  747. uint8_t *d;
  748. for(;height > 0; height--) {
  749. s = src;
  750. d = dst;
  751. for(w = width;w > 0; w--) {
  752. d[0] = (s[0] + s[1]) >> 1;
  753. s += 2;
  754. d++;
  755. }
  756. src += src_wrap;
  757. dst += dst_wrap;
  758. }
  759. }
  760. /* 1x2 -> 1x1 */
  761. static void shrink12(uint8_t *dst, int dst_wrap,
  762. const uint8_t *src, int src_wrap,
  763. int width, int height)
  764. {
  765. int w;
  766. uint8_t *d;
  767. const uint8_t *s1, *s2;
  768. for(;height > 0; height--) {
  769. s1 = src;
  770. s2 = s1 + src_wrap;
  771. d = dst;
  772. for(w = width;w >= 4; w-=4) {
  773. d[0] = (s1[0] + s2[0]) >> 1;
  774. d[1] = (s1[1] + s2[1]) >> 1;
  775. d[2] = (s1[2] + s2[2]) >> 1;
  776. d[3] = (s1[3] + s2[3]) >> 1;
  777. s1 += 4;
  778. s2 += 4;
  779. d += 4;
  780. }
  781. for(;w > 0; w--) {
  782. d[0] = (s1[0] + s2[0]) >> 1;
  783. s1++;
  784. s2++;
  785. d++;
  786. }
  787. src += 2 * src_wrap;
  788. dst += dst_wrap;
  789. }
  790. }
  791. /* 2x2 -> 1x1 */
  792. static void shrink22(uint8_t *dst, int dst_wrap,
  793. const uint8_t *src, int src_wrap,
  794. int width, int height)
  795. {
  796. int w;
  797. const uint8_t *s1, *s2;
  798. uint8_t *d;
  799. for(;height > 0; height--) {
  800. s1 = src;
  801. s2 = s1 + src_wrap;
  802. d = dst;
  803. for(w = width;w >= 4; w-=4) {
  804. d[0] = (s1[0] + s1[1] + s2[0] + s2[1] + 2) >> 2;
  805. d[1] = (s1[2] + s1[3] + s2[2] + s2[3] + 2) >> 2;
  806. d[2] = (s1[4] + s1[5] + s2[4] + s2[5] + 2) >> 2;
  807. d[3] = (s1[6] + s1[7] + s2[6] + s2[7] + 2) >> 2;
  808. s1 += 8;
  809. s2 += 8;
  810. d += 4;
  811. }
  812. for(;w > 0; w--) {
  813. d[0] = (s1[0] + s1[1] + s2[0] + s2[1] + 2) >> 2;
  814. s1 += 2;
  815. s2 += 2;
  816. d++;
  817. }
  818. src += 2 * src_wrap;
  819. dst += dst_wrap;
  820. }
  821. }
  822. /* 4x4 -> 1x1 */
  823. static void shrink44(uint8_t *dst, int dst_wrap,
  824. const uint8_t *src, int src_wrap,
  825. int width, int height)
  826. {
  827. int w;
  828. const uint8_t *s1, *s2, *s3, *s4;
  829. uint8_t *d;
  830. for(;height > 0; height--) {
  831. s1 = src;
  832. s2 = s1 + src_wrap;
  833. s3 = s2 + src_wrap;
  834. s4 = s3 + src_wrap;
  835. d = dst;
  836. for(w = width;w > 0; w--) {
  837. d[0] = (s1[0] + s1[1] + s1[2] + s1[3] +
  838. s2[0] + s2[1] + s2[2] + s2[3] +
  839. s3[0] + s3[1] + s3[2] + s3[3] +
  840. s4[0] + s4[1] + s4[2] + s4[3] + 8) >> 4;
  841. s1 += 4;
  842. s2 += 4;
  843. s3 += 4;
  844. s4 += 4;
  845. d++;
  846. }
  847. src += 4 * src_wrap;
  848. dst += dst_wrap;
  849. }
  850. }
  851. static void grow21_line(uint8_t *dst, const uint8_t *src,
  852. int width)
  853. {
  854. int w;
  855. const uint8_t *s1;
  856. uint8_t *d;
  857. s1 = src;
  858. d = dst;
  859. for(w = width;w >= 4; w-=4) {
  860. d[1] = d[0] = s1[0];
  861. d[3] = d[2] = s1[1];
  862. s1 += 2;
  863. d += 4;
  864. }
  865. for(;w >= 2; w -= 2) {
  866. d[1] = d[0] = s1[0];
  867. s1 ++;
  868. d += 2;
  869. }
  870. /* only needed if width is not a multiple of two */
  871. /* XXX: veryfy that */
  872. if (w) {
  873. d[0] = s1[0];
  874. }
  875. }
  876. static void grow41_line(uint8_t *dst, const uint8_t *src,
  877. int width)
  878. {
  879. int w, v;
  880. const uint8_t *s1;
  881. uint8_t *d;
  882. s1 = src;
  883. d = dst;
  884. for(w = width;w >= 4; w-=4) {
  885. v = s1[0];
  886. d[0] = v;
  887. d[1] = v;
  888. d[2] = v;
  889. d[3] = v;
  890. s1 ++;
  891. d += 4;
  892. }
  893. }
  894. /* 1x1 -> 2x1 */
  895. static void grow21(uint8_t *dst, int dst_wrap,
  896. const uint8_t *src, int src_wrap,
  897. int width, int height)
  898. {
  899. for(;height > 0; height--) {
  900. grow21_line(dst, src, width);
  901. src += src_wrap;
  902. dst += dst_wrap;
  903. }
  904. }
  905. /* 1x1 -> 2x2 */
  906. static void grow22(uint8_t *dst, int dst_wrap,
  907. const uint8_t *src, int src_wrap,
  908. int width, int height)
  909. {
  910. for(;height > 0; height--) {
  911. grow21_line(dst, src, width);
  912. if (height%2)
  913. src += src_wrap;
  914. dst += dst_wrap;
  915. }
  916. }
  917. /* 1x1 -> 4x1 */
  918. static void grow41(uint8_t *dst, int dst_wrap,
  919. const uint8_t *src, int src_wrap,
  920. int width, int height)
  921. {
  922. for(;height > 0; height--) {
  923. grow41_line(dst, src, width);
  924. src += src_wrap;
  925. dst += dst_wrap;
  926. }
  927. }
  928. /* 1x1 -> 4x4 */
  929. static void grow44(uint8_t *dst, int dst_wrap,
  930. const uint8_t *src, int src_wrap,
  931. int width, int height)
  932. {
  933. for(;height > 0; height--) {
  934. grow41_line(dst, src, width);
  935. if ((height & 3) == 1)
  936. src += src_wrap;
  937. dst += dst_wrap;
  938. }
  939. }
  940. /* 1x2 -> 2x1 */
  941. static void conv411(uint8_t *dst, int dst_wrap,
  942. const uint8_t *src, int src_wrap,
  943. int width, int height)
  944. {
  945. int w, c;
  946. const uint8_t *s1, *s2;
  947. uint8_t *d;
  948. width>>=1;
  949. for(;height > 0; height--) {
  950. s1 = src;
  951. s2 = src + src_wrap;
  952. d = dst;
  953. for(w = width;w > 0; w--) {
  954. c = (s1[0] + s2[0]) >> 1;
  955. d[0] = c;
  956. d[1] = c;
  957. s1++;
  958. s2++;
  959. d += 2;
  960. }
  961. src += src_wrap * 2;
  962. dst += dst_wrap;
  963. }
  964. }
  965. /* XXX: add jpeg quantize code */
  966. #define TRANSP_INDEX (6*6*6)
  967. /* this is maybe slow, but allows for extensions */
  968. static inline unsigned char gif_clut_index(uint8_t r, uint8_t g, uint8_t b)
  969. {
  970. return ((((r)/47)%6)*6*6+(((g)/47)%6)*6+(((b)/47)%6));
  971. }
  972. static void build_rgb_palette(uint8_t *palette, int has_alpha)
  973. {
  974. uint32_t *pal;
  975. static const uint8_t pal_value[6] = { 0x00, 0x33, 0x66, 0x99, 0xcc, 0xff };
  976. int i, r, g, b;
  977. pal = (uint32_t *)palette;
  978. i = 0;
  979. for(r = 0; r < 6; r++) {
  980. for(g = 0; g < 6; g++) {
  981. for(b = 0; b < 6; b++) {
  982. pal[i++] = (0xff << 24) | (pal_value[r] << 16) |
  983. (pal_value[g] << 8) | pal_value[b];
  984. }
  985. }
  986. }
  987. if (has_alpha)
  988. pal[i++] = 0;
  989. while (i < 256)
  990. pal[i++] = 0xff000000;
  991. }
  992. /* copy bit n to bits 0 ... n - 1 */
  993. static inline unsigned int bitcopy_n(unsigned int a, int n)
  994. {
  995. int mask;
  996. mask = (1 << n) - 1;
  997. return (a & (0xff & ~mask)) | ((-((a >> n) & 1)) & mask);
  998. }
  999. /* rgb555 handling */
  1000. #define RGB_NAME rgb555
  1001. #define RGB_IN(r, g, b, s)\
  1002. {\
  1003. unsigned int v = ((const uint16_t *)(s))[0];\
  1004. r = bitcopy_n(v >> (10 - 3), 3);\
  1005. g = bitcopy_n(v >> (5 - 3), 3);\
  1006. b = bitcopy_n(v << 3, 3);\
  1007. }
  1008. #define RGBA_IN(r, g, b, a, s)\
  1009. {\
  1010. unsigned int v = ((const uint16_t *)(s))[0];\
  1011. r = bitcopy_n(v >> (10 - 3), 3);\
  1012. g = bitcopy_n(v >> (5 - 3), 3);\
  1013. b = bitcopy_n(v << 3, 3);\
  1014. a = bitcopy_n(v >> 15, 7);\
  1015. }
  1016. #define RGBA_OUT(d, r, g, b, a)\
  1017. {\
  1018. ((uint16_t *)(d))[0] = ((r >> 3) << 10) | ((g >> 3) << 5) | (b >> 3) | \
  1019. ((a << 8) & 0x8000);\
  1020. }
  1021. #define BPP 2
  1022. #include "imgconvert_template.h"
  1023. /* rgb565 handling */
  1024. #define RGB_NAME rgb565
  1025. #define RGB_IN(r, g, b, s)\
  1026. {\
  1027. unsigned int v = ((const uint16_t *)(s))[0];\
  1028. r = bitcopy_n(v >> (11 - 3), 3);\
  1029. g = bitcopy_n(v >> (5 - 2), 2);\
  1030. b = bitcopy_n(v << 3, 3);\
  1031. }
  1032. #define RGB_OUT(d, r, g, b)\
  1033. {\
  1034. ((uint16_t *)(d))[0] = ((r >> 3) << 11) | ((g >> 2) << 5) | (b >> 3);\
  1035. }
  1036. #define BPP 2
  1037. #include "imgconvert_template.h"
  1038. /* bgr24 handling */
  1039. #define RGB_NAME bgr24
  1040. #define RGB_IN(r, g, b, s)\
  1041. {\
  1042. b = (s)[0];\
  1043. g = (s)[1];\
  1044. r = (s)[2];\
  1045. }
  1046. #define RGB_OUT(d, r, g, b)\
  1047. {\
  1048. (d)[0] = b;\
  1049. (d)[1] = g;\
  1050. (d)[2] = r;\
  1051. }
  1052. #define BPP 3
  1053. #include "imgconvert_template.h"
  1054. #undef RGB_IN
  1055. #undef RGB_OUT
  1056. #undef BPP
  1057. /* rgb24 handling */
  1058. #define RGB_NAME rgb24
  1059. #define FMT_RGB24
  1060. #define RGB_IN(r, g, b, s)\
  1061. {\
  1062. r = (s)[0];\
  1063. g = (s)[1];\
  1064. b = (s)[2];\
  1065. }
  1066. #define RGB_OUT(d, r, g, b)\
  1067. {\
  1068. (d)[0] = r;\
  1069. (d)[1] = g;\
  1070. (d)[2] = b;\
  1071. }
  1072. #define BPP 3
  1073. #include "imgconvert_template.h"
  1074. /* rgba32 handling */
  1075. #define RGB_NAME rgba32
  1076. #define FMT_RGBA32
  1077. #define RGB_IN(r, g, b, s)\
  1078. {\
  1079. unsigned int v = ((const uint32_t *)(s))[0];\
  1080. r = (v >> 16) & 0xff;\
  1081. g = (v >> 8) & 0xff;\
  1082. b = v & 0xff;\
  1083. }
  1084. #define RGBA_IN(r, g, b, a, s)\
  1085. {\
  1086. unsigned int v = ((const uint32_t *)(s))[0];\
  1087. a = (v >> 24) & 0xff;\
  1088. r = (v >> 16) & 0xff;\
  1089. g = (v >> 8) & 0xff;\
  1090. b = v & 0xff;\
  1091. }
  1092. #define RGBA_OUT(d, r, g, b, a)\
  1093. {\
  1094. ((uint32_t *)(d))[0] = (a << 24) | (r << 16) | (g << 8) | b;\
  1095. }
  1096. #define BPP 4
  1097. #include "imgconvert_template.h"
  1098. static void mono_to_gray(AVPicture *dst, AVPicture *src,
  1099. int width, int height, int xor_mask)
  1100. {
  1101. const unsigned char *p;
  1102. unsigned char *q;
  1103. int v, dst_wrap, src_wrap;
  1104. int y, w;
  1105. p = src->data[0];
  1106. src_wrap = src->linesize[0] - ((width + 7) >> 3);
  1107. q = dst->data[0];
  1108. dst_wrap = dst->linesize[0] - width;
  1109. for(y=0;y<height;y++) {
  1110. w = width;
  1111. while (w >= 8) {
  1112. v = *p++ ^ xor_mask;
  1113. q[0] = -(v >> 7);
  1114. q[1] = -((v >> 6) & 1);
  1115. q[2] = -((v >> 5) & 1);
  1116. q[3] = -((v >> 4) & 1);
  1117. q[4] = -((v >> 3) & 1);
  1118. q[5] = -((v >> 2) & 1);
  1119. q[6] = -((v >> 1) & 1);
  1120. q[7] = -((v >> 0) & 1);
  1121. w -= 8;
  1122. q += 8;
  1123. }
  1124. if (w > 0) {
  1125. v = *p++ ^ xor_mask;
  1126. do {
  1127. q[0] = -((v >> 7) & 1);
  1128. q++;
  1129. v <<= 1;
  1130. } while (--w);
  1131. }
  1132. p += src_wrap;
  1133. q += dst_wrap;
  1134. }
  1135. }
  1136. static void monowhite_to_gray(AVPicture *dst, AVPicture *src,
  1137. int width, int height)
  1138. {
  1139. mono_to_gray(dst, src, width, height, 0xff);
  1140. }
  1141. static void monoblack_to_gray(AVPicture *dst, AVPicture *src,
  1142. int width, int height)
  1143. {
  1144. mono_to_gray(dst, src, width, height, 0x00);
  1145. }
  1146. static void gray_to_mono(AVPicture *dst, AVPicture *src,
  1147. int width, int height, int xor_mask)
  1148. {
  1149. int n;
  1150. const uint8_t *s;
  1151. uint8_t *d;
  1152. int j, b, v, n1, src_wrap, dst_wrap, y;
  1153. s = src->data[0];
  1154. src_wrap = src->linesize[0] - width;
  1155. d = dst->data[0];
  1156. dst_wrap = dst->linesize[0] - ((width + 7) >> 3);
  1157. for(y=0;y<height;y++) {
  1158. n = width;
  1159. while (n >= 8) {
  1160. v = 0;
  1161. for(j=0;j<8;j++) {
  1162. b = s[0];
  1163. s++;
  1164. v = (v << 1) | (b >> 7);
  1165. }
  1166. d[0] = v ^ xor_mask;
  1167. d++;
  1168. n -= 8;
  1169. }
  1170. if (n > 0) {
  1171. n1 = n;
  1172. v = 0;
  1173. while (n > 0) {
  1174. b = s[0];
  1175. s++;
  1176. v = (v << 1) | (b >> 7);
  1177. n--;
  1178. }
  1179. d[0] = (v << (8 - (n1 & 7))) ^ xor_mask;
  1180. d++;
  1181. }
  1182. s += src_wrap;
  1183. d += dst_wrap;
  1184. }
  1185. }
  1186. static void gray_to_monowhite(AVPicture *dst, AVPicture *src,
  1187. int width, int height)
  1188. {
  1189. gray_to_mono(dst, src, width, height, 0xff);
  1190. }
  1191. static void gray_to_monoblack(AVPicture *dst, AVPicture *src,
  1192. int width, int height)
  1193. {
  1194. gray_to_mono(dst, src, width, height, 0x00);
  1195. }
  1196. typedef struct ConvertEntry {
  1197. void (*convert)(AVPicture *dst, AVPicture *src, int width, int height);
  1198. } ConvertEntry;
  1199. /* Add each new convertion function in this table. In order to be able
  1200. to convert from any format to any format, the following constraints
  1201. must be satisfied:
  1202. - all FF_COLOR_RGB formats must convert to and from PIX_FMT_RGB24
  1203. - all FF_COLOR_GRAY formats must convert to and from PIX_FMT_GRAY8
  1204. - all FF_COLOR_RGB formats with alpha must convert to and from PIX_FMT_RGBA32
  1205. - PIX_FMT_YUV444P and PIX_FMT_YUVJ444P must convert to and from
  1206. PIX_FMT_RGB24.
  1207. - PIX_FMT_422 must convert to and from PIX_FMT_422P.
  1208. The other conversion functions are just optimisations for common cases.
  1209. */
  1210. static ConvertEntry convert_table[PIX_FMT_NB][PIX_FMT_NB] = {
  1211. [PIX_FMT_YUV420P] = {
  1212. [PIX_FMT_RGB555] = {
  1213. .convert = yuv420p_to_rgb555
  1214. },
  1215. [PIX_FMT_RGB565] = {
  1216. .convert = yuv420p_to_rgb565
  1217. },
  1218. [PIX_FMT_BGR24] = {
  1219. .convert = yuv420p_to_bgr24
  1220. },
  1221. [PIX_FMT_RGB24] = {
  1222. .convert = yuv420p_to_rgb24
  1223. },
  1224. [PIX_FMT_RGBA32] = {
  1225. .convert = yuv420p_to_rgba32
  1226. },
  1227. },
  1228. [PIX_FMT_YUV422P] = {
  1229. [PIX_FMT_YUV422] = {
  1230. .convert = yuv422p_to_yuv422,
  1231. },
  1232. },
  1233. [PIX_FMT_YUV444P] = {
  1234. [PIX_FMT_RGB24] = {
  1235. .convert = yuv444p_to_rgb24
  1236. },
  1237. },
  1238. [PIX_FMT_YUVJ420P] = {
  1239. [PIX_FMT_RGB555] = {
  1240. .convert = yuvj420p_to_rgb555
  1241. },
  1242. [PIX_FMT_RGB565] = {
  1243. .convert = yuvj420p_to_rgb565
  1244. },
  1245. [PIX_FMT_BGR24] = {
  1246. .convert = yuvj420p_to_bgr24
  1247. },
  1248. [PIX_FMT_RGB24] = {
  1249. .convert = yuvj420p_to_rgb24
  1250. },
  1251. [PIX_FMT_RGBA32] = {
  1252. .convert = yuvj420p_to_rgba32
  1253. },
  1254. },
  1255. [PIX_FMT_YUVJ444P] = {
  1256. [PIX_FMT_RGB24] = {
  1257. .convert = yuvj444p_to_rgb24
  1258. },
  1259. },
  1260. [PIX_FMT_YUV422] = {
  1261. [PIX_FMT_YUV420P] = {
  1262. .convert = yuv422_to_yuv420p,
  1263. },
  1264. [PIX_FMT_YUV422P] = {
  1265. .convert = yuv422_to_yuv422p,
  1266. },
  1267. },
  1268. [PIX_FMT_RGB24] = {
  1269. [PIX_FMT_YUV420P] = {
  1270. .convert = rgb24_to_yuv420p
  1271. },
  1272. [PIX_FMT_RGB565] = {
  1273. .convert = rgb24_to_rgb565
  1274. },
  1275. [PIX_FMT_RGB555] = {
  1276. .convert = rgb24_to_rgb555
  1277. },
  1278. [PIX_FMT_RGBA32] = {
  1279. .convert = rgb24_to_rgba32
  1280. },
  1281. [PIX_FMT_BGR24] = {
  1282. .convert = rgb24_to_bgr24
  1283. },
  1284. [PIX_FMT_GRAY8] = {
  1285. .convert = rgb24_to_gray
  1286. },
  1287. [PIX_FMT_PAL8] = {
  1288. .convert = rgb24_to_pal8
  1289. },
  1290. [PIX_FMT_YUV444P] = {
  1291. .convert = rgb24_to_yuv444p
  1292. },
  1293. [PIX_FMT_YUVJ420P] = {
  1294. .convert = rgb24_to_yuvj420p
  1295. },
  1296. [PIX_FMT_YUVJ444P] = {
  1297. .convert = rgb24_to_yuvj444p
  1298. },
  1299. },
  1300. [PIX_FMT_RGBA32] = {
  1301. [PIX_FMT_RGB24] = {
  1302. .convert = rgba32_to_rgb24
  1303. },
  1304. [PIX_FMT_RGB555] = {
  1305. .convert = rgba32_to_rgb555
  1306. },
  1307. [PIX_FMT_PAL8] = {
  1308. .convert = rgba32_to_pal8
  1309. },
  1310. [PIX_FMT_YUV420P] = {
  1311. .convert = rgba32_to_yuv420p
  1312. },
  1313. [PIX_FMT_GRAY8] = {
  1314. .convert = rgba32_to_gray
  1315. },
  1316. },
  1317. [PIX_FMT_BGR24] = {
  1318. [PIX_FMT_RGB24] = {
  1319. .convert = bgr24_to_rgb24
  1320. },
  1321. [PIX_FMT_YUV420P] = {
  1322. .convert = bgr24_to_yuv420p
  1323. },
  1324. [PIX_FMT_GRAY8] = {
  1325. .convert = bgr24_to_gray
  1326. },
  1327. },
  1328. [PIX_FMT_RGB555] = {
  1329. [PIX_FMT_RGB24] = {
  1330. .convert = rgb555_to_rgb24
  1331. },
  1332. [PIX_FMT_RGBA32] = {
  1333. .convert = rgb555_to_rgba32
  1334. },
  1335. [PIX_FMT_YUV420P] = {
  1336. .convert = rgb555_to_yuv420p
  1337. },
  1338. [PIX_FMT_GRAY8] = {
  1339. .convert = rgb555_to_gray
  1340. },
  1341. },
  1342. [PIX_FMT_RGB565] = {
  1343. [PIX_FMT_RGB24] = {
  1344. .convert = rgb565_to_rgb24
  1345. },
  1346. [PIX_FMT_YUV420P] = {
  1347. .convert = rgb565_to_yuv420p
  1348. },
  1349. [PIX_FMT_GRAY8] = {
  1350. .convert = rgb565_to_gray
  1351. },
  1352. },
  1353. [PIX_FMT_GRAY8] = {
  1354. [PIX_FMT_RGB555] = {
  1355. .convert = gray_to_rgb555
  1356. },
  1357. [PIX_FMT_RGB565] = {
  1358. .convert = gray_to_rgb565
  1359. },
  1360. [PIX_FMT_RGB24] = {
  1361. .convert = gray_to_rgb24
  1362. },
  1363. [PIX_FMT_BGR24] = {
  1364. .convert = gray_to_bgr24
  1365. },
  1366. [PIX_FMT_RGBA32] = {
  1367. .convert = gray_to_rgba32
  1368. },
  1369. [PIX_FMT_MONOWHITE] = {
  1370. .convert = gray_to_monowhite
  1371. },
  1372. [PIX_FMT_MONOBLACK] = {
  1373. .convert = gray_to_monoblack
  1374. },
  1375. },
  1376. [PIX_FMT_MONOWHITE] = {
  1377. [PIX_FMT_GRAY8] = {
  1378. .convert = monowhite_to_gray
  1379. },
  1380. },
  1381. [PIX_FMT_MONOBLACK] = {
  1382. [PIX_FMT_GRAY8] = {
  1383. .convert = monoblack_to_gray
  1384. },
  1385. },
  1386. [PIX_FMT_PAL8] = {
  1387. [PIX_FMT_RGB555] = {
  1388. .convert = pal8_to_rgb555
  1389. },
  1390. [PIX_FMT_RGB565] = {
  1391. .convert = pal8_to_rgb565
  1392. },
  1393. [PIX_FMT_BGR24] = {
  1394. .convert = pal8_to_bgr24
  1395. },
  1396. [PIX_FMT_RGB24] = {
  1397. .convert = pal8_to_rgb24
  1398. },
  1399. [PIX_FMT_RGBA32] = {
  1400. .convert = pal8_to_rgba32
  1401. },
  1402. },
  1403. };
  1404. static int avpicture_alloc(AVPicture *picture,
  1405. int pix_fmt, int width, int height)
  1406. {
  1407. unsigned int size;
  1408. void *ptr;
  1409. size = avpicture_get_size(pix_fmt, width, height);
  1410. if (size < 0)
  1411. goto fail;
  1412. ptr = av_malloc(size);
  1413. if (!ptr)
  1414. goto fail;
  1415. avpicture_fill(picture, ptr, pix_fmt, width, height);
  1416. return 0;
  1417. fail:
  1418. memset(picture, 0, sizeof(AVPicture));
  1419. return -1;
  1420. }
  1421. static void avpicture_free(AVPicture *picture)
  1422. {
  1423. av_free(picture->data[0]);
  1424. }
  1425. /* return true if yuv planar */
  1426. static inline int is_yuv_planar(PixFmtInfo *ps)
  1427. {
  1428. return (ps->color_type == FF_COLOR_YUV ||
  1429. ps->color_type == FF_COLOR_YUV_JPEG) &&
  1430. ps->pixel_type == FF_PIXEL_PLANAR;
  1431. }
  1432. /* XXX: always use linesize. Return -1 if not supported */
  1433. int img_convert(AVPicture *dst, int dst_pix_fmt,
  1434. AVPicture *src, int src_pix_fmt,
  1435. int src_width, int src_height)
  1436. {
  1437. static int inited;
  1438. int i, ret, dst_width, dst_height, int_pix_fmt;
  1439. PixFmtInfo *src_pix, *dst_pix;
  1440. ConvertEntry *ce;
  1441. AVPicture tmp1, *tmp = &tmp1;
  1442. if (src_pix_fmt < 0 || src_pix_fmt >= PIX_FMT_NB ||
  1443. dst_pix_fmt < 0 || dst_pix_fmt >= PIX_FMT_NB)
  1444. return -1;
  1445. if (src_width <= 0 || src_height <= 0)
  1446. return 0;
  1447. if (!inited) {
  1448. inited = 1;
  1449. img_convert_init();
  1450. }
  1451. dst_width = src_width;
  1452. dst_height = src_height;
  1453. dst_pix = &pix_fmt_info[dst_pix_fmt];
  1454. src_pix = &pix_fmt_info[src_pix_fmt];
  1455. if (src_pix_fmt == dst_pix_fmt) {
  1456. /* no conversion needed: just copy */
  1457. img_copy(dst, src, dst_pix_fmt, dst_width, dst_height);
  1458. return 0;
  1459. }
  1460. ce = &convert_table[src_pix_fmt][dst_pix_fmt];
  1461. if (ce->convert) {
  1462. /* specific convertion routine */
  1463. ce->convert(dst, src, dst_width, dst_height);
  1464. return 0;
  1465. }
  1466. /* gray to YUV */
  1467. if (is_yuv_planar(dst_pix) &&
  1468. src_pix_fmt == PIX_FMT_GRAY8) {
  1469. int w, h, y;
  1470. uint8_t *d;
  1471. if (dst_pix->color_type == FF_COLOR_YUV_JPEG) {
  1472. img_copy_plane(dst->data[0], dst->linesize[0],
  1473. src->data[0], src->linesize[0],
  1474. dst_width, dst_height);
  1475. } else {
  1476. img_apply_table(dst->data[0], dst->linesize[0],
  1477. src->data[0], src->linesize[0],
  1478. dst_width, dst_height,
  1479. y_jpeg_to_ccir);
  1480. }
  1481. /* fill U and V with 128 */
  1482. w = dst_width;
  1483. h = dst_height;
  1484. w >>= dst_pix->x_chroma_shift;
  1485. h >>= dst_pix->y_chroma_shift;
  1486. for(i = 1; i <= 2; i++) {
  1487. d = dst->data[i];
  1488. for(y = 0; y< h; y++) {
  1489. memset(d, 128, w);
  1490. d += dst->linesize[i];
  1491. }
  1492. }
  1493. return 0;
  1494. }
  1495. /* YUV to gray */
  1496. if (is_yuv_planar(src_pix) &&
  1497. dst_pix_fmt == PIX_FMT_GRAY8) {
  1498. if (src_pix->color_type == FF_COLOR_YUV_JPEG) {
  1499. img_copy_plane(dst->data[0], dst->linesize[0],
  1500. src->data[0], src->linesize[0],
  1501. dst_width, dst_height);
  1502. } else {
  1503. img_apply_table(dst->data[0], dst->linesize[0],
  1504. src->data[0], src->linesize[0],
  1505. dst_width, dst_height,
  1506. y_ccir_to_jpeg);
  1507. }
  1508. return 0;
  1509. }
  1510. /* YUV to YUV planar */
  1511. if (is_yuv_planar(dst_pix) && is_yuv_planar(src_pix)) {
  1512. int x_shift, y_shift, w, h, xy_shift;
  1513. void (*resize_func)(uint8_t *dst, int dst_wrap,
  1514. const uint8_t *src, int src_wrap,
  1515. int width, int height);
  1516. /* compute chroma size of the smallest dimensions */
  1517. w = dst_width;
  1518. h = dst_height;
  1519. if (dst_pix->x_chroma_shift >= src_pix->x_chroma_shift)
  1520. w >>= dst_pix->x_chroma_shift;
  1521. else
  1522. w >>= src_pix->x_chroma_shift;
  1523. if (dst_pix->y_chroma_shift >= src_pix->y_chroma_shift)
  1524. h >>= dst_pix->y_chroma_shift;
  1525. else
  1526. h >>= src_pix->y_chroma_shift;
  1527. x_shift = (dst_pix->x_chroma_shift - src_pix->x_chroma_shift);
  1528. y_shift = (dst_pix->y_chroma_shift - src_pix->y_chroma_shift);
  1529. xy_shift = ((x_shift & 0xf) << 4) | (y_shift & 0xf);
  1530. /* there must be filters for conversion at least from and to
  1531. YUV444 format */
  1532. switch(xy_shift) {
  1533. case 0x00:
  1534. resize_func = img_copy_plane;
  1535. break;
  1536. case 0x10:
  1537. resize_func = shrink21;
  1538. break;
  1539. case 0x20:
  1540. resize_func = shrink41;
  1541. break;
  1542. case 0x01:
  1543. resize_func = shrink12;
  1544. break;
  1545. case 0x11:
  1546. resize_func = shrink22;
  1547. break;
  1548. case 0x22:
  1549. resize_func = shrink44;
  1550. break;
  1551. case 0xf0:
  1552. resize_func = grow21;
  1553. break;
  1554. case 0xe0:
  1555. resize_func = grow41;
  1556. break;
  1557. case 0xff:
  1558. resize_func = grow22;
  1559. break;
  1560. case 0xee:
  1561. resize_func = grow44;
  1562. break;
  1563. case 0xf1:
  1564. resize_func = conv411;
  1565. break;
  1566. default:
  1567. /* currently not handled */
  1568. goto no_chroma_filter;
  1569. }
  1570. img_copy_plane(dst->data[0], dst->linesize[0],
  1571. src->data[0], src->linesize[0],
  1572. dst_width, dst_height);
  1573. for(i = 1;i <= 2; i++)
  1574. resize_func(dst->data[i], dst->linesize[i],
  1575. src->data[i], src->linesize[i],
  1576. dst_width>>dst_pix->x_chroma_shift, dst_height>>dst_pix->y_chroma_shift);
  1577. /* if yuv color space conversion is needed, we do it here on
  1578. the destination image */
  1579. if (dst_pix->color_type != src_pix->color_type) {
  1580. const uint8_t *y_table, *c_table;
  1581. if (dst_pix->color_type == FF_COLOR_YUV) {
  1582. y_table = y_jpeg_to_ccir;
  1583. c_table = c_jpeg_to_ccir;
  1584. } else {
  1585. y_table = y_ccir_to_jpeg;
  1586. c_table = c_ccir_to_jpeg;
  1587. }
  1588. img_apply_table(dst->data[0], dst->linesize[0],
  1589. dst->data[0], dst->linesize[0],
  1590. dst_width, dst_height,
  1591. y_table);
  1592. for(i = 1;i <= 2; i++)
  1593. img_apply_table(dst->data[i], dst->linesize[i],
  1594. dst->data[i], dst->linesize[i],
  1595. dst_width>>dst_pix->x_chroma_shift,
  1596. dst_height>>dst_pix->y_chroma_shift,
  1597. c_table);
  1598. }
  1599. return 0;
  1600. }
  1601. no_chroma_filter:
  1602. /* try to use an intermediate format */
  1603. if (src_pix_fmt == PIX_FMT_YUV422 ||
  1604. dst_pix_fmt == PIX_FMT_YUV422) {
  1605. /* specific case: convert to YUV422P first */
  1606. int_pix_fmt = PIX_FMT_YUV422P;
  1607. } else if ((src_pix->color_type == FF_COLOR_GRAY &&
  1608. src_pix_fmt != PIX_FMT_GRAY8) ||
  1609. (dst_pix->color_type == FF_COLOR_GRAY &&
  1610. dst_pix_fmt != PIX_FMT_GRAY8)) {
  1611. /* gray8 is the normalized format */
  1612. int_pix_fmt = PIX_FMT_GRAY8;
  1613. } else if ((is_yuv_planar(src_pix) &&
  1614. src_pix_fmt != PIX_FMT_YUV444P &&
  1615. src_pix_fmt != PIX_FMT_YUVJ444P)) {
  1616. /* yuv444 is the normalized format */
  1617. if (src_pix->color_type == FF_COLOR_YUV_JPEG)
  1618. int_pix_fmt = PIX_FMT_YUVJ444P;
  1619. else
  1620. int_pix_fmt = PIX_FMT_YUV444P;
  1621. } else if ((is_yuv_planar(dst_pix) &&
  1622. dst_pix_fmt != PIX_FMT_YUV444P &&
  1623. dst_pix_fmt != PIX_FMT_YUVJ444P)) {
  1624. /* yuv444 is the normalized format */
  1625. if (dst_pix->color_type == FF_COLOR_YUV_JPEG)
  1626. int_pix_fmt = PIX_FMT_YUVJ444P;
  1627. else
  1628. int_pix_fmt = PIX_FMT_YUV444P;
  1629. } else {
  1630. /* the two formats are rgb or gray8 or yuv[j]444p */
  1631. if (src_pix->is_alpha && dst_pix->is_alpha)
  1632. int_pix_fmt = PIX_FMT_RGBA32;
  1633. else
  1634. int_pix_fmt = PIX_FMT_RGB24;
  1635. }
  1636. if (avpicture_alloc(tmp, int_pix_fmt, dst_width, dst_height) < 0)
  1637. return -1;
  1638. ret = -1;
  1639. if (img_convert(tmp, int_pix_fmt,
  1640. src, src_pix_fmt, src_width, src_height) < 0)
  1641. goto fail1;
  1642. if (img_convert(dst, dst_pix_fmt,
  1643. tmp, int_pix_fmt, dst_width, dst_height) < 0)
  1644. goto fail1;
  1645. ret = 0;
  1646. fail1:
  1647. avpicture_free(tmp);
  1648. return ret;
  1649. }
  1650. #ifdef HAVE_MMX
  1651. #define DEINT_INPLACE_LINE_LUM \
  1652. movd_m2r(lum_m4[0],mm0);\
  1653. movd_m2r(lum_m3[0],mm1);\
  1654. movd_m2r(lum_m2[0],mm2);\
  1655. movd_m2r(lum_m1[0],mm3);\
  1656. movd_m2r(lum[0],mm4);\
  1657. punpcklbw_r2r(mm7,mm0);\
  1658. movd_r2m(mm2,lum_m4[0]);\
  1659. punpcklbw_r2r(mm7,mm1);\
  1660. punpcklbw_r2r(mm7,mm2);\
  1661. punpcklbw_r2r(mm7,mm3);\
  1662. punpcklbw_r2r(mm7,mm4);\
  1663. paddw_r2r(mm3,mm1);\
  1664. psllw_i2r(1,mm2);\
  1665. paddw_r2r(mm4,mm0);\
  1666. psllw_i2r(2,mm1);\
  1667. paddw_r2r(mm6,mm2);\
  1668. paddw_r2r(mm2,mm1);\
  1669. psubusw_r2r(mm0,mm1);\
  1670. psrlw_i2r(3,mm1);\
  1671. packuswb_r2r(mm7,mm1);\
  1672. movd_r2m(mm1,lum_m2[0]);
  1673. #define DEINT_LINE_LUM \
  1674. movd_m2r(lum_m4[0],mm0);\
  1675. movd_m2r(lum_m3[0],mm1);\
  1676. movd_m2r(lum_m2[0],mm2);\
  1677. movd_m2r(lum_m1[0],mm3);\
  1678. movd_m2r(lum[0],mm4);\
  1679. punpcklbw_r2r(mm7,mm0);\
  1680. punpcklbw_r2r(mm7,mm1);\
  1681. punpcklbw_r2r(mm7,mm2);\
  1682. punpcklbw_r2r(mm7,mm3);\
  1683. punpcklbw_r2r(mm7,mm4);\
  1684. paddw_r2r(mm3,mm1);\
  1685. psllw_i2r(1,mm2);\
  1686. paddw_r2r(mm4,mm0);\
  1687. psllw_i2r(2,mm1);\
  1688. paddw_r2r(mm6,mm2);\
  1689. paddw_r2r(mm2,mm1);\
  1690. psubusw_r2r(mm0,mm1);\
  1691. psrlw_i2r(3,mm1);\
  1692. packuswb_r2r(mm7,mm1);\
  1693. movd_r2m(mm1,dst[0]);
  1694. #endif
  1695. /* filter parameters: [-1 4 2 4 -1] // 8 */
  1696. static void deinterlace_line(uint8_t *dst, uint8_t *lum_m4, uint8_t *lum_m3, uint8_t *lum_m2, uint8_t *lum_m1, uint8_t *lum,
  1697. int size)
  1698. {
  1699. #ifndef HAVE_MMX
  1700. uint8_t *cm = cropTbl + MAX_NEG_CROP;
  1701. int sum;
  1702. for(;size > 0;size--) {
  1703. sum = -lum_m4[0];
  1704. sum += lum_m3[0] << 2;
  1705. sum += lum_m2[0] << 1;
  1706. sum += lum_m1[0] << 2;
  1707. sum += -lum[0];
  1708. dst[0] = cm[(sum + 4) >> 3];
  1709. lum_m4++;
  1710. lum_m3++;
  1711. lum_m2++;
  1712. lum_m1++;
  1713. lum++;
  1714. dst++;
  1715. }
  1716. #else
  1717. {
  1718. mmx_t rounder;
  1719. rounder.uw[0]=4;
  1720. rounder.uw[1]=4;
  1721. rounder.uw[2]=4;
  1722. rounder.uw[3]=4;
  1723. pxor_r2r(mm7,mm7);
  1724. movq_m2r(rounder,mm6);
  1725. }
  1726. for (;size > 3; size-=4) {
  1727. DEINT_LINE_LUM
  1728. lum_m4+=4;
  1729. lum_m3+=4;
  1730. lum_m2+=4;
  1731. lum_m1+=4;
  1732. lum+=4;
  1733. dst+=4;
  1734. }
  1735. #endif
  1736. }
  1737. static void deinterlace_line_inplace(uint8_t *lum_m4, uint8_t *lum_m3, uint8_t *lum_m2, uint8_t *lum_m1, uint8_t *lum,
  1738. int size)
  1739. {
  1740. #ifndef HAVE_MMX
  1741. uint8_t *cm = cropTbl + MAX_NEG_CROP;
  1742. int sum;
  1743. for(;size > 0;size--) {
  1744. sum = -lum_m4[0];
  1745. sum += lum_m3[0] << 2;
  1746. sum += lum_m2[0] << 1;
  1747. lum_m4[0]=lum_m2[0];
  1748. sum += lum_m1[0] << 2;
  1749. sum += -lum[0];
  1750. lum_m2[0] = cm[(sum + 4) >> 3];
  1751. lum_m4++;
  1752. lum_m3++;
  1753. lum_m2++;
  1754. lum_m1++;
  1755. lum++;
  1756. }
  1757. #else
  1758. {
  1759. mmx_t rounder;
  1760. rounder.uw[0]=4;
  1761. rounder.uw[1]=4;
  1762. rounder.uw[2]=4;
  1763. rounder.uw[3]=4;
  1764. pxor_r2r(mm7,mm7);
  1765. movq_m2r(rounder,mm6);
  1766. }
  1767. for (;size > 3; size-=4) {
  1768. DEINT_INPLACE_LINE_LUM
  1769. lum_m4+=4;
  1770. lum_m3+=4;
  1771. lum_m2+=4;
  1772. lum_m1+=4;
  1773. lum+=4;
  1774. }
  1775. #endif
  1776. }
  1777. /* deinterlacing : 2 temporal taps, 3 spatial taps linear filter. The
  1778. top field is copied as is, but the bottom field is deinterlaced
  1779. against the top field. */
  1780. static void deinterlace_bottom_field(uint8_t *dst, int dst_wrap,
  1781. uint8_t *src1, int src_wrap,
  1782. int width, int height)
  1783. {
  1784. uint8_t *src_m2, *src_m1, *src_0, *src_p1, *src_p2;
  1785. int y;
  1786. src_m2 = src1;
  1787. src_m1 = src1;
  1788. src_0=&src_m1[src_wrap];
  1789. src_p1=&src_0[src_wrap];
  1790. src_p2=&src_p1[src_wrap];
  1791. for(y=0;y<(height-2);y+=2) {
  1792. memcpy(dst,src_m1,width);
  1793. dst += dst_wrap;
  1794. deinterlace_line(dst,src_m2,src_m1,src_0,src_p1,src_p2,width);
  1795. src_m2 = src_0;
  1796. src_m1 = src_p1;
  1797. src_0 = src_p2;
  1798. src_p1 += 2*src_wrap;
  1799. src_p2 += 2*src_wrap;
  1800. dst += dst_wrap;
  1801. }
  1802. memcpy(dst,src_m1,width);
  1803. dst += dst_wrap;
  1804. /* do last line */
  1805. deinterlace_line(dst,src_m2,src_m1,src_0,src_0,src_0,width);
  1806. }
  1807. static void deinterlace_bottom_field_inplace(uint8_t *src1, int src_wrap,
  1808. int width, int height)
  1809. {
  1810. uint8_t *src_m1, *src_0, *src_p1, *src_p2;
  1811. int y;
  1812. uint8_t *buf;
  1813. buf = (uint8_t*)av_malloc(width);
  1814. src_m1 = src1;
  1815. memcpy(buf,src_m1,width);
  1816. src_0=&src_m1[src_wrap];
  1817. src_p1=&src_0[src_wrap];
  1818. src_p2=&src_p1[src_wrap];
  1819. for(y=0;y<(height-2);y+=2) {
  1820. deinterlace_line_inplace(buf,src_m1,src_0,src_p1,src_p2,width);
  1821. src_m1 = src_p1;
  1822. src_0 = src_p2;
  1823. src_p1 += 2*src_wrap;
  1824. src_p2 += 2*src_wrap;
  1825. }
  1826. /* do last line */
  1827. deinterlace_line_inplace(buf,src_m1,src_0,src_0,src_0,width);
  1828. av_free(buf);
  1829. }
  1830. /* deinterlace - if not supported return -1 */
  1831. int avpicture_deinterlace(AVPicture *dst, AVPicture *src,
  1832. int pix_fmt, int width, int height)
  1833. {
  1834. int i;
  1835. if (pix_fmt != PIX_FMT_YUV420P &&
  1836. pix_fmt != PIX_FMT_YUV422P &&
  1837. pix_fmt != PIX_FMT_YUV444P)
  1838. return -1;
  1839. if ((width & 3) != 0 || (height & 3) != 0)
  1840. return -1;
  1841. for(i=0;i<3;i++) {
  1842. if (i == 1) {
  1843. switch(pix_fmt) {
  1844. case PIX_FMT_YUV420P:
  1845. width >>= 1;
  1846. height >>= 1;
  1847. break;
  1848. case PIX_FMT_YUV422P:
  1849. width >>= 1;
  1850. break;
  1851. default:
  1852. break;
  1853. }
  1854. }
  1855. if (src == dst) {
  1856. deinterlace_bottom_field_inplace(src->data[i], src->linesize[i],
  1857. width, height);
  1858. } else {
  1859. deinterlace_bottom_field(dst->data[i],dst->linesize[i],
  1860. src->data[i], src->linesize[i],
  1861. width, height);
  1862. }
  1863. }
  1864. #ifdef HAVE_MMX
  1865. emms();
  1866. #endif
  1867. return 0;
  1868. }
  1869. #undef FIX