You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

2350 lines
62KB

  1. /*
  2. * Misc image convertion routines
  3. * Copyright (c) 2001, 2002, 2003 Fabrice Bellard.
  4. *
  5. * This library is free software; you can redistribute it and/or
  6. * modify it under the terms of the GNU Lesser General Public
  7. * License as published by the Free Software Foundation; either
  8. * version 2 of the License, or (at your option) any later version.
  9. *
  10. * This library is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  13. * Lesser General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU Lesser General Public
  16. * License along with this library; if not, write to the Free Software
  17. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  18. */
  19. /**
  20. * @file imgconvert.c
  21. * Misc image convertion routines.
  22. */
  23. /* TODO:
  24. * - write 'ffimg' program to test all the image related stuff
  25. * - move all api to slice based system
  26. * - integrate deinterlacing, postprocessing and scaling in the conversion process
  27. */
  28. #include "avcodec.h"
  29. #include "dsputil.h"
  30. #ifdef USE_FASTMEMCPY
  31. #include "fastmemcpy.h"
  32. #endif
  33. #ifdef HAVE_MMX
  34. #include "i386/mmx.h"
  35. #endif
  36. #define xglue(x, y) x ## y
  37. #define glue(x, y) xglue(x, y)
  38. #define FF_COLOR_RGB 0 /* RGB color space */
  39. #define FF_COLOR_GRAY 1 /* gray color space */
  40. #define FF_COLOR_YUV 2 /* YUV color space. 16 <= Y <= 235, 16 <= U, V <= 240 */
  41. #define FF_COLOR_YUV_JPEG 3 /* YUV color space. 0 <= Y <= 255, 0 <= U, V <= 255 */
  42. #define FF_PIXEL_PLANAR 0 /* each channel has one component in AVPicture */
  43. #define FF_PIXEL_PACKED 1 /* only one components containing all the channels */
  44. #define FF_PIXEL_PALETTE 2 /* one components containing indexes for a palette */
  45. typedef struct PixFmtInfo {
  46. const char *name;
  47. uint8_t nb_channels; /* number of channels (including alpha) */
  48. uint8_t color_type; /* color type (see FF_COLOR_xxx constants) */
  49. uint8_t pixel_type; /* pixel storage type (see FF_PIXEL_xxx constants) */
  50. uint8_t is_alpha : 1; /* true if alpha can be specified */
  51. uint8_t x_chroma_shift; /* X chroma subsampling factor is 2 ^ shift */
  52. uint8_t y_chroma_shift; /* Y chroma subsampling factor is 2 ^ shift */
  53. uint8_t depth; /* bit depth of the color components */
  54. } PixFmtInfo;
  55. /* this table gives more information about formats */
  56. static PixFmtInfo pix_fmt_info[PIX_FMT_NB] = {
  57. /* YUV formats */
  58. [PIX_FMT_YUV420P] = {
  59. .name = "yuv420p",
  60. .nb_channels = 3,
  61. .color_type = FF_COLOR_YUV,
  62. .pixel_type = FF_PIXEL_PLANAR,
  63. .depth = 8,
  64. .x_chroma_shift = 1, .y_chroma_shift = 1,
  65. },
  66. [PIX_FMT_YUV422P] = {
  67. .name = "yuv422p",
  68. .nb_channels = 3,
  69. .color_type = FF_COLOR_YUV,
  70. .pixel_type = FF_PIXEL_PLANAR,
  71. .depth = 8,
  72. .x_chroma_shift = 1, .y_chroma_shift = 0,
  73. },
  74. [PIX_FMT_YUV444P] = {
  75. .name = "yuv444p",
  76. .nb_channels = 3,
  77. .color_type = FF_COLOR_YUV,
  78. .pixel_type = FF_PIXEL_PLANAR,
  79. .depth = 8,
  80. .x_chroma_shift = 0, .y_chroma_shift = 0,
  81. },
  82. [PIX_FMT_YUV422] = {
  83. .name = "yuv422",
  84. .nb_channels = 1,
  85. .color_type = FF_COLOR_YUV,
  86. .pixel_type = FF_PIXEL_PACKED,
  87. .depth = 8,
  88. .x_chroma_shift = 1, .y_chroma_shift = 0,
  89. },
  90. [PIX_FMT_UYVY422] = {
  91. .name = "uyvy422",
  92. .nb_channels = 1,
  93. .color_type = FF_COLOR_YUV,
  94. .pixel_type = FF_PIXEL_PACKED,
  95. .depth = 8,
  96. .x_chroma_shift = 1, .y_chroma_shift = 0,
  97. },
  98. [PIX_FMT_YUV410P] = {
  99. .name = "yuv410p",
  100. .nb_channels = 3,
  101. .color_type = FF_COLOR_YUV,
  102. .pixel_type = FF_PIXEL_PLANAR,
  103. .depth = 8,
  104. .x_chroma_shift = 2, .y_chroma_shift = 2,
  105. },
  106. [PIX_FMT_YUV411P] = {
  107. .name = "yuv411p",
  108. .nb_channels = 3,
  109. .color_type = FF_COLOR_YUV,
  110. .pixel_type = FF_PIXEL_PLANAR,
  111. .depth = 8,
  112. .x_chroma_shift = 2, .y_chroma_shift = 0,
  113. },
  114. /* JPEG YUV */
  115. [PIX_FMT_YUVJ420P] = {
  116. .name = "yuvj420p",
  117. .nb_channels = 3,
  118. .color_type = FF_COLOR_YUV_JPEG,
  119. .pixel_type = FF_PIXEL_PLANAR,
  120. .depth = 8,
  121. .x_chroma_shift = 1, .y_chroma_shift = 1,
  122. },
  123. [PIX_FMT_YUVJ422P] = {
  124. .name = "yuvj422p",
  125. .nb_channels = 3,
  126. .color_type = FF_COLOR_YUV_JPEG,
  127. .pixel_type = FF_PIXEL_PLANAR,
  128. .depth = 8,
  129. .x_chroma_shift = 1, .y_chroma_shift = 0,
  130. },
  131. [PIX_FMT_YUVJ444P] = {
  132. .name = "yuvj444p",
  133. .nb_channels = 3,
  134. .color_type = FF_COLOR_YUV_JPEG,
  135. .pixel_type = FF_PIXEL_PLANAR,
  136. .depth = 8,
  137. .x_chroma_shift = 0, .y_chroma_shift = 0,
  138. },
  139. /* RGB formats */
  140. [PIX_FMT_RGB24] = {
  141. .name = "rgb24",
  142. .nb_channels = 3,
  143. .color_type = FF_COLOR_RGB,
  144. .pixel_type = FF_PIXEL_PACKED,
  145. .depth = 8,
  146. .x_chroma_shift = 0, .y_chroma_shift = 0,
  147. },
  148. [PIX_FMT_BGR24] = {
  149. .name = "bgr24",
  150. .nb_channels = 3,
  151. .color_type = FF_COLOR_RGB,
  152. .pixel_type = FF_PIXEL_PACKED,
  153. .depth = 8,
  154. .x_chroma_shift = 0, .y_chroma_shift = 0,
  155. },
  156. [PIX_FMT_RGBA32] = {
  157. .name = "rgba32",
  158. .nb_channels = 4, .is_alpha = 1,
  159. .color_type = FF_COLOR_RGB,
  160. .pixel_type = FF_PIXEL_PACKED,
  161. .depth = 8,
  162. .x_chroma_shift = 0, .y_chroma_shift = 0,
  163. },
  164. [PIX_FMT_RGB565] = {
  165. .name = "rgb565",
  166. .nb_channels = 3,
  167. .color_type = FF_COLOR_RGB,
  168. .pixel_type = FF_PIXEL_PACKED,
  169. .depth = 5,
  170. .x_chroma_shift = 0, .y_chroma_shift = 0,
  171. },
  172. [PIX_FMT_RGB555] = {
  173. .name = "rgb555",
  174. .nb_channels = 4, .is_alpha = 1,
  175. .color_type = FF_COLOR_RGB,
  176. .pixel_type = FF_PIXEL_PACKED,
  177. .depth = 5,
  178. .x_chroma_shift = 0, .y_chroma_shift = 0,
  179. },
  180. /* gray / mono formats */
  181. [PIX_FMT_GRAY8] = {
  182. .name = "gray",
  183. .nb_channels = 1,
  184. .color_type = FF_COLOR_GRAY,
  185. .pixel_type = FF_PIXEL_PLANAR,
  186. .depth = 8,
  187. },
  188. [PIX_FMT_MONOWHITE] = {
  189. .name = "monow",
  190. .nb_channels = 1,
  191. .color_type = FF_COLOR_GRAY,
  192. .pixel_type = FF_PIXEL_PLANAR,
  193. .depth = 1,
  194. },
  195. [PIX_FMT_MONOBLACK] = {
  196. .name = "monob",
  197. .nb_channels = 1,
  198. .color_type = FF_COLOR_GRAY,
  199. .pixel_type = FF_PIXEL_PLANAR,
  200. .depth = 1,
  201. },
  202. /* paletted formats */
  203. [PIX_FMT_PAL8] = {
  204. .name = "pal8",
  205. .nb_channels = 4, .is_alpha = 1,
  206. .color_type = FF_COLOR_RGB,
  207. .pixel_type = FF_PIXEL_PALETTE,
  208. .depth = 8,
  209. },
  210. [PIX_FMT_XVMC_MPEG2_MC] = {
  211. .name = "xvmcmc",
  212. },
  213. [PIX_FMT_XVMC_MPEG2_IDCT] = {
  214. .name = "xvmcidct",
  215. },
  216. };
  217. void avcodec_get_chroma_sub_sample(int pix_fmt, int *h_shift, int *v_shift)
  218. {
  219. *h_shift = pix_fmt_info[pix_fmt].x_chroma_shift;
  220. *v_shift = pix_fmt_info[pix_fmt].y_chroma_shift;
  221. }
  222. const char *avcodec_get_pix_fmt_name(int pix_fmt)
  223. {
  224. if (pix_fmt < 0 || pix_fmt >= PIX_FMT_NB)
  225. return "???";
  226. else
  227. return pix_fmt_info[pix_fmt].name;
  228. }
  229. enum PixelFormat avcodec_get_pix_fmt(const char* name)
  230. {
  231. int i;
  232. for (i=0; i < PIX_FMT_NB; i++)
  233. if (!strcmp(pix_fmt_info[i].name, name))
  234. break;
  235. return i;
  236. }
  237. /* Picture field are filled with 'ptr' addresses. Also return size */
  238. int avpicture_fill(AVPicture *picture, uint8_t *ptr,
  239. int pix_fmt, int width, int height)
  240. {
  241. int size, w2, h2, size2;
  242. PixFmtInfo *pinfo;
  243. pinfo = &pix_fmt_info[pix_fmt];
  244. size = width * height;
  245. switch(pix_fmt) {
  246. case PIX_FMT_YUV420P:
  247. case PIX_FMT_YUV422P:
  248. case PIX_FMT_YUV444P:
  249. case PIX_FMT_YUV410P:
  250. case PIX_FMT_YUV411P:
  251. case PIX_FMT_YUVJ420P:
  252. case PIX_FMT_YUVJ422P:
  253. case PIX_FMT_YUVJ444P:
  254. w2 = (width + (1 << pinfo->x_chroma_shift) - 1) >> pinfo->x_chroma_shift;
  255. h2 = (height + (1 << pinfo->y_chroma_shift) - 1) >> pinfo->y_chroma_shift;
  256. size2 = w2 * h2;
  257. picture->data[0] = ptr;
  258. picture->data[1] = picture->data[0] + size;
  259. picture->data[2] = picture->data[1] + size2;
  260. picture->linesize[0] = width;
  261. picture->linesize[1] = w2;
  262. picture->linesize[2] = w2;
  263. return size + 2 * size2;
  264. case PIX_FMT_RGB24:
  265. case PIX_FMT_BGR24:
  266. picture->data[0] = ptr;
  267. picture->data[1] = NULL;
  268. picture->data[2] = NULL;
  269. picture->linesize[0] = width * 3;
  270. return size * 3;
  271. case PIX_FMT_RGBA32:
  272. picture->data[0] = ptr;
  273. picture->data[1] = NULL;
  274. picture->data[2] = NULL;
  275. picture->linesize[0] = width * 4;
  276. return size * 4;
  277. case PIX_FMT_RGB555:
  278. case PIX_FMT_RGB565:
  279. case PIX_FMT_YUV422:
  280. picture->data[0] = ptr;
  281. picture->data[1] = NULL;
  282. picture->data[2] = NULL;
  283. picture->linesize[0] = width * 2;
  284. return size * 2;
  285. case PIX_FMT_UYVY422:
  286. picture->data[0] = ptr;
  287. picture->data[1] = NULL;
  288. picture->data[2] = NULL;
  289. picture->linesize[0] = width * 2;
  290. return size * 2;
  291. case PIX_FMT_GRAY8:
  292. picture->data[0] = ptr;
  293. picture->data[1] = NULL;
  294. picture->data[2] = NULL;
  295. picture->linesize[0] = width;
  296. return size;
  297. case PIX_FMT_MONOWHITE:
  298. case PIX_FMT_MONOBLACK:
  299. picture->data[0] = ptr;
  300. picture->data[1] = NULL;
  301. picture->data[2] = NULL;
  302. picture->linesize[0] = (width + 7) >> 3;
  303. return picture->linesize[0] * height;
  304. case PIX_FMT_PAL8:
  305. size2 = (size + 3) & ~3;
  306. picture->data[0] = ptr;
  307. picture->data[1] = ptr + size2; /* palette is stored here as 256 32 bit words */
  308. picture->data[2] = NULL;
  309. picture->linesize[0] = width;
  310. picture->linesize[1] = 4;
  311. return size2 + 256 * 4;
  312. default:
  313. picture->data[0] = NULL;
  314. picture->data[1] = NULL;
  315. picture->data[2] = NULL;
  316. picture->data[3] = NULL;
  317. return -1;
  318. }
  319. }
  320. int avpicture_layout(const AVPicture* src, int pix_fmt, int width, int height,
  321. unsigned char *dest, int dest_size)
  322. {
  323. PixFmtInfo* pf = &pix_fmt_info[pix_fmt];
  324. int i, j, w, h, data_planes;
  325. const unsigned char* s;
  326. int size = avpicture_get_size(pix_fmt, width, height);
  327. if (size > dest_size)
  328. return -1;
  329. if (pf->pixel_type == FF_PIXEL_PACKED || pf->pixel_type == FF_PIXEL_PALETTE) {
  330. if (pix_fmt == PIX_FMT_YUV422 ||
  331. pix_fmt == PIX_FMT_UYVY422 ||
  332. pix_fmt == PIX_FMT_RGB565 ||
  333. pix_fmt == PIX_FMT_RGB555)
  334. w = width * 2;
  335. else if (pix_fmt == PIX_FMT_PAL8)
  336. w = width;
  337. else
  338. w = width * (pf->depth * pf->nb_channels / 8);
  339. data_planes = 1;
  340. h = height;
  341. } else {
  342. data_planes = pf->nb_channels;
  343. w = width;
  344. h = height;
  345. }
  346. for (i=0; i<data_planes; i++) {
  347. if (i == 1) {
  348. w = width >> pf->x_chroma_shift;
  349. h = height >> pf->y_chroma_shift;
  350. }
  351. s = src->data[i];
  352. for(j=0; j<h; j++) {
  353. memcpy(dest, s, w);
  354. dest += w;
  355. s += src->linesize[i];
  356. }
  357. }
  358. if (pf->pixel_type == FF_PIXEL_PALETTE)
  359. memcpy((unsigned char *)(((size_t)dest + 3) & ~3), src->data[1], 256 * 4);
  360. return size;
  361. }
  362. int avpicture_get_size(int pix_fmt, int width, int height)
  363. {
  364. AVPicture dummy_pict;
  365. return avpicture_fill(&dummy_pict, NULL, pix_fmt, width, height);
  366. }
  367. /**
  368. * compute the loss when converting from a pixel format to another
  369. */
  370. int avcodec_get_pix_fmt_loss(int dst_pix_fmt, int src_pix_fmt,
  371. int has_alpha)
  372. {
  373. const PixFmtInfo *pf, *ps;
  374. int loss;
  375. ps = &pix_fmt_info[src_pix_fmt];
  376. pf = &pix_fmt_info[dst_pix_fmt];
  377. /* compute loss */
  378. loss = 0;
  379. pf = &pix_fmt_info[dst_pix_fmt];
  380. if (pf->depth < ps->depth ||
  381. (dst_pix_fmt == PIX_FMT_RGB555 && src_pix_fmt == PIX_FMT_RGB565))
  382. loss |= FF_LOSS_DEPTH;
  383. if (pf->x_chroma_shift > ps->x_chroma_shift ||
  384. pf->y_chroma_shift > ps->y_chroma_shift)
  385. loss |= FF_LOSS_RESOLUTION;
  386. switch(pf->color_type) {
  387. case FF_COLOR_RGB:
  388. if (ps->color_type != FF_COLOR_RGB &&
  389. ps->color_type != FF_COLOR_GRAY)
  390. loss |= FF_LOSS_COLORSPACE;
  391. break;
  392. case FF_COLOR_GRAY:
  393. if (ps->color_type != FF_COLOR_GRAY)
  394. loss |= FF_LOSS_COLORSPACE;
  395. break;
  396. case FF_COLOR_YUV:
  397. if (ps->color_type != FF_COLOR_YUV)
  398. loss |= FF_LOSS_COLORSPACE;
  399. break;
  400. case FF_COLOR_YUV_JPEG:
  401. if (ps->color_type != FF_COLOR_YUV_JPEG &&
  402. ps->color_type != FF_COLOR_YUV &&
  403. ps->color_type != FF_COLOR_GRAY)
  404. loss |= FF_LOSS_COLORSPACE;
  405. break;
  406. default:
  407. /* fail safe test */
  408. if (ps->color_type != pf->color_type)
  409. loss |= FF_LOSS_COLORSPACE;
  410. break;
  411. }
  412. if (pf->color_type == FF_COLOR_GRAY &&
  413. ps->color_type != FF_COLOR_GRAY)
  414. loss |= FF_LOSS_CHROMA;
  415. if (!pf->is_alpha && (ps->is_alpha && has_alpha))
  416. loss |= FF_LOSS_ALPHA;
  417. if (pf->pixel_type == FF_PIXEL_PALETTE &&
  418. (ps->pixel_type != FF_PIXEL_PALETTE && ps->color_type != FF_COLOR_GRAY))
  419. loss |= FF_LOSS_COLORQUANT;
  420. return loss;
  421. }
  422. static int avg_bits_per_pixel(int pix_fmt)
  423. {
  424. int bits;
  425. const PixFmtInfo *pf;
  426. pf = &pix_fmt_info[pix_fmt];
  427. switch(pf->pixel_type) {
  428. case FF_PIXEL_PACKED:
  429. switch(pix_fmt) {
  430. case PIX_FMT_YUV422:
  431. case PIX_FMT_UYVY422:
  432. case PIX_FMT_RGB565:
  433. case PIX_FMT_RGB555:
  434. bits = 16;
  435. break;
  436. default:
  437. bits = pf->depth * pf->nb_channels;
  438. break;
  439. }
  440. break;
  441. case FF_PIXEL_PLANAR:
  442. if (pf->x_chroma_shift == 0 && pf->y_chroma_shift == 0) {
  443. bits = pf->depth * pf->nb_channels;
  444. } else {
  445. bits = pf->depth + ((2 * pf->depth) >>
  446. (pf->x_chroma_shift + pf->y_chroma_shift));
  447. }
  448. break;
  449. case FF_PIXEL_PALETTE:
  450. bits = 8;
  451. break;
  452. default:
  453. bits = -1;
  454. break;
  455. }
  456. return bits;
  457. }
  458. static int avcodec_find_best_pix_fmt1(int pix_fmt_mask,
  459. int src_pix_fmt,
  460. int has_alpha,
  461. int loss_mask)
  462. {
  463. int dist, i, loss, min_dist, dst_pix_fmt;
  464. /* find exact color match with smallest size */
  465. dst_pix_fmt = -1;
  466. min_dist = 0x7fffffff;
  467. for(i = 0;i < PIX_FMT_NB; i++) {
  468. if (pix_fmt_mask & (1 << i)) {
  469. loss = avcodec_get_pix_fmt_loss(i, src_pix_fmt, has_alpha) & loss_mask;
  470. if (loss == 0) {
  471. dist = avg_bits_per_pixel(i);
  472. if (dist < min_dist) {
  473. min_dist = dist;
  474. dst_pix_fmt = i;
  475. }
  476. }
  477. }
  478. }
  479. return dst_pix_fmt;
  480. }
  481. /**
  482. * find best pixel format to convert to. Return -1 if none found
  483. */
  484. int avcodec_find_best_pix_fmt(int pix_fmt_mask, int src_pix_fmt,
  485. int has_alpha, int *loss_ptr)
  486. {
  487. int dst_pix_fmt, loss_mask, i;
  488. static const int loss_mask_order[] = {
  489. ~0, /* no loss first */
  490. ~FF_LOSS_ALPHA,
  491. ~FF_LOSS_RESOLUTION,
  492. ~(FF_LOSS_COLORSPACE | FF_LOSS_RESOLUTION),
  493. ~FF_LOSS_COLORQUANT,
  494. ~FF_LOSS_DEPTH,
  495. 0,
  496. };
  497. /* try with successive loss */
  498. i = 0;
  499. for(;;) {
  500. loss_mask = loss_mask_order[i++];
  501. dst_pix_fmt = avcodec_find_best_pix_fmt1(pix_fmt_mask, src_pix_fmt,
  502. has_alpha, loss_mask);
  503. if (dst_pix_fmt >= 0)
  504. goto found;
  505. if (loss_mask == 0)
  506. break;
  507. }
  508. return -1;
  509. found:
  510. if (loss_ptr)
  511. *loss_ptr = avcodec_get_pix_fmt_loss(dst_pix_fmt, src_pix_fmt, has_alpha);
  512. return dst_pix_fmt;
  513. }
  514. static void img_copy_plane(uint8_t *dst, int dst_wrap,
  515. const uint8_t *src, int src_wrap,
  516. int width, int height)
  517. {
  518. for(;height > 0; height--) {
  519. memcpy(dst, src, width);
  520. dst += dst_wrap;
  521. src += src_wrap;
  522. }
  523. }
  524. /**
  525. * Copy image 'src' to 'dst'.
  526. */
  527. void img_copy(AVPicture *dst, const AVPicture *src,
  528. int pix_fmt, int width, int height)
  529. {
  530. int bwidth, bits, i;
  531. PixFmtInfo *pf = &pix_fmt_info[pix_fmt];
  532. pf = &pix_fmt_info[pix_fmt];
  533. switch(pf->pixel_type) {
  534. case FF_PIXEL_PACKED:
  535. switch(pix_fmt) {
  536. case PIX_FMT_YUV422:
  537. case PIX_FMT_UYVY422:
  538. case PIX_FMT_RGB565:
  539. case PIX_FMT_RGB555:
  540. bits = 16;
  541. break;
  542. default:
  543. bits = pf->depth * pf->nb_channels;
  544. break;
  545. }
  546. bwidth = (width * bits + 7) >> 3;
  547. img_copy_plane(dst->data[0], dst->linesize[0],
  548. src->data[0], src->linesize[0],
  549. bwidth, height);
  550. break;
  551. case FF_PIXEL_PLANAR:
  552. for(i = 0; i < pf->nb_channels; i++) {
  553. int w, h;
  554. w = width;
  555. h = height;
  556. if (i == 1 || i == 2) {
  557. w >>= pf->x_chroma_shift;
  558. h >>= pf->y_chroma_shift;
  559. }
  560. bwidth = (w * pf->depth + 7) >> 3;
  561. img_copy_plane(dst->data[i], dst->linesize[i],
  562. src->data[i], src->linesize[i],
  563. bwidth, h);
  564. }
  565. break;
  566. case FF_PIXEL_PALETTE:
  567. img_copy_plane(dst->data[0], dst->linesize[0],
  568. src->data[0], src->linesize[0],
  569. width, height);
  570. /* copy the palette */
  571. img_copy_plane(dst->data[1], dst->linesize[1],
  572. src->data[1], src->linesize[1],
  573. 4, 256);
  574. break;
  575. }
  576. }
  577. /* XXX: totally non optimized */
  578. static void yuv422_to_yuv420p(AVPicture *dst, const AVPicture *src,
  579. int width, int height)
  580. {
  581. const uint8_t *p, *p1;
  582. uint8_t *lum, *cr, *cb, *lum1, *cr1, *cb1;
  583. int w;
  584. p1 = src->data[0];
  585. lum1 = dst->data[0];
  586. cb1 = dst->data[1];
  587. cr1 = dst->data[2];
  588. for(;height >= 1; height -= 2) {
  589. p = p1;
  590. lum = lum1;
  591. cb = cb1;
  592. cr = cr1;
  593. for(w = width; w >= 2; w -= 2) {
  594. lum[0] = p[0];
  595. cb[0] = p[1];
  596. lum[1] = p[2];
  597. cr[0] = p[3];
  598. p += 4;
  599. lum += 2;
  600. cb++;
  601. cr++;
  602. }
  603. if (w) {
  604. lum[0] = p[0];
  605. cb[0] = p[1];
  606. cr[0] = p[3];
  607. cb++;
  608. cr++;
  609. }
  610. p1 += src->linesize[0];
  611. lum1 += dst->linesize[0];
  612. if (height>1) {
  613. p = p1;
  614. lum = lum1;
  615. for(w = width; w >= 2; w -= 2) {
  616. lum[0] = p[0];
  617. lum[1] = p[2];
  618. p += 4;
  619. lum += 2;
  620. }
  621. if (w) {
  622. lum[0] = p[0];
  623. }
  624. p1 += src->linesize[0];
  625. lum1 += dst->linesize[0];
  626. }
  627. cb1 += dst->linesize[1];
  628. cr1 += dst->linesize[2];
  629. }
  630. }
  631. static void uyvy422_to_yuv420p(AVPicture *dst, const AVPicture *src,
  632. int width, int height)
  633. {
  634. const uint8_t *p, *p1;
  635. uint8_t *lum, *cr, *cb, *lum1, *cr1, *cb1;
  636. int w;
  637. p1 = src->data[0];
  638. lum1 = dst->data[0];
  639. cb1 = dst->data[1];
  640. cr1 = dst->data[2];
  641. for(;height >= 1; height -= 2) {
  642. p = p1;
  643. lum = lum1;
  644. cb = cb1;
  645. cr = cr1;
  646. for(w = width; w >= 2; w -= 2) {
  647. lum[0] = p[1];
  648. cb[0] = p[0];
  649. lum[1] = p[3];
  650. cr[0] = p[2];
  651. p += 4;
  652. lum += 2;
  653. cb++;
  654. cr++;
  655. }
  656. if (w) {
  657. lum[0] = p[1];
  658. cb[0] = p[0];
  659. cr[0] = p[2];
  660. cb++;
  661. cr++;
  662. }
  663. p1 += src->linesize[0];
  664. lum1 += dst->linesize[0];
  665. if (height>1) {
  666. p = p1;
  667. lum = lum1;
  668. for(w = width; w >= 2; w -= 2) {
  669. lum[0] = p[1];
  670. lum[1] = p[3];
  671. p += 4;
  672. lum += 2;
  673. }
  674. if (w) {
  675. lum[0] = p[1];
  676. }
  677. p1 += src->linesize[0];
  678. lum1 += dst->linesize[0];
  679. }
  680. cb1 += dst->linesize[1];
  681. cr1 += dst->linesize[2];
  682. }
  683. }
  684. static void uyvy422_to_yuv422p(AVPicture *dst, const AVPicture *src,
  685. int width, int height)
  686. {
  687. const uint8_t *p, *p1;
  688. uint8_t *lum, *cr, *cb, *lum1, *cr1, *cb1;
  689. int w;
  690. p1 = src->data[0];
  691. lum1 = dst->data[0];
  692. cb1 = dst->data[1];
  693. cr1 = dst->data[2];
  694. for(;height > 0; height--) {
  695. p = p1;
  696. lum = lum1;
  697. cb = cb1;
  698. cr = cr1;
  699. for(w = width; w >= 2; w -= 2) {
  700. lum[0] = p[1];
  701. cb[0] = p[0];
  702. lum[1] = p[3];
  703. cr[0] = p[2];
  704. p += 4;
  705. lum += 2;
  706. cb++;
  707. cr++;
  708. }
  709. p1 += src->linesize[0];
  710. lum1 += dst->linesize[0];
  711. cb1 += dst->linesize[1];
  712. cr1 += dst->linesize[2];
  713. }
  714. }
  715. static void yuv422_to_yuv422p(AVPicture *dst, const AVPicture *src,
  716. int width, int height)
  717. {
  718. const uint8_t *p, *p1;
  719. uint8_t *lum, *cr, *cb, *lum1, *cr1, *cb1;
  720. int w;
  721. p1 = src->data[0];
  722. lum1 = dst->data[0];
  723. cb1 = dst->data[1];
  724. cr1 = dst->data[2];
  725. for(;height > 0; height--) {
  726. p = p1;
  727. lum = lum1;
  728. cb = cb1;
  729. cr = cr1;
  730. for(w = width; w >= 2; w -= 2) {
  731. lum[0] = p[0];
  732. cb[0] = p[1];
  733. lum[1] = p[2];
  734. cr[0] = p[3];
  735. p += 4;
  736. lum += 2;
  737. cb++;
  738. cr++;
  739. }
  740. p1 += src->linesize[0];
  741. lum1 += dst->linesize[0];
  742. cb1 += dst->linesize[1];
  743. cr1 += dst->linesize[2];
  744. }
  745. }
  746. static void yuv422p_to_yuv422(AVPicture *dst, const AVPicture *src,
  747. int width, int height)
  748. {
  749. uint8_t *p, *p1;
  750. const uint8_t *lum, *cr, *cb, *lum1, *cr1, *cb1;
  751. int w;
  752. p1 = dst->data[0];
  753. lum1 = src->data[0];
  754. cb1 = src->data[1];
  755. cr1 = src->data[2];
  756. for(;height > 0; height--) {
  757. p = p1;
  758. lum = lum1;
  759. cb = cb1;
  760. cr = cr1;
  761. for(w = width; w >= 2; w -= 2) {
  762. p[0] = lum[0];
  763. p[1] = cb[0];
  764. p[2] = lum[1];
  765. p[3] = cr[0];
  766. p += 4;
  767. lum += 2;
  768. cb++;
  769. cr++;
  770. }
  771. p1 += dst->linesize[0];
  772. lum1 += src->linesize[0];
  773. cb1 += src->linesize[1];
  774. cr1 += src->linesize[2];
  775. }
  776. }
  777. static void yuv422p_to_uyvy422(AVPicture *dst, const AVPicture *src,
  778. int width, int height)
  779. {
  780. uint8_t *p, *p1;
  781. const uint8_t *lum, *cr, *cb, *lum1, *cr1, *cb1;
  782. int w;
  783. p1 = dst->data[0];
  784. lum1 = src->data[0];
  785. cb1 = src->data[1];
  786. cr1 = src->data[2];
  787. for(;height > 0; height--) {
  788. p = p1;
  789. lum = lum1;
  790. cb = cb1;
  791. cr = cr1;
  792. for(w = width; w >= 2; w -= 2) {
  793. p[1] = lum[0];
  794. p[0] = cb[0];
  795. p[3] = lum[1];
  796. p[2] = cr[0];
  797. p += 4;
  798. lum += 2;
  799. cb++;
  800. cr++;
  801. }
  802. p1 += dst->linesize[0];
  803. lum1 += src->linesize[0];
  804. cb1 += src->linesize[1];
  805. cr1 += src->linesize[2];
  806. }
  807. }
  808. #define SCALEBITS 10
  809. #define ONE_HALF (1 << (SCALEBITS - 1))
  810. #define FIX(x) ((int) ((x) * (1<<SCALEBITS) + 0.5))
  811. #define YUV_TO_RGB1_CCIR(cb1, cr1)\
  812. {\
  813. cb = (cb1) - 128;\
  814. cr = (cr1) - 128;\
  815. r_add = FIX(1.40200*255.0/224.0) * cr + ONE_HALF;\
  816. g_add = - FIX(0.34414*255.0/224.0) * cb - FIX(0.71414*255.0/224.0) * cr + \
  817. ONE_HALF;\
  818. b_add = FIX(1.77200*255.0/224.0) * cb + ONE_HALF;\
  819. }
  820. #define YUV_TO_RGB2_CCIR(r, g, b, y1)\
  821. {\
  822. y = ((y1) - 16) * FIX(255.0/219.0);\
  823. r = cm[(y + r_add) >> SCALEBITS];\
  824. g = cm[(y + g_add) >> SCALEBITS];\
  825. b = cm[(y + b_add) >> SCALEBITS];\
  826. }
  827. #define YUV_TO_RGB1(cb1, cr1)\
  828. {\
  829. cb = (cb1) - 128;\
  830. cr = (cr1) - 128;\
  831. r_add = FIX(1.40200) * cr + ONE_HALF;\
  832. g_add = - FIX(0.34414) * cb - FIX(0.71414) * cr + ONE_HALF;\
  833. b_add = FIX(1.77200) * cb + ONE_HALF;\
  834. }
  835. #define YUV_TO_RGB2(r, g, b, y1)\
  836. {\
  837. y = (y1) << SCALEBITS;\
  838. r = cm[(y + r_add) >> SCALEBITS];\
  839. g = cm[(y + g_add) >> SCALEBITS];\
  840. b = cm[(y + b_add) >> SCALEBITS];\
  841. }
  842. #define Y_CCIR_TO_JPEG(y)\
  843. cm[((y) * FIX(255.0/219.0) + (ONE_HALF - 16 * FIX(255.0/219.0))) >> SCALEBITS]
  844. #define Y_JPEG_TO_CCIR(y)\
  845. (((y) * FIX(219.0/255.0) + (ONE_HALF + (16 << SCALEBITS))) >> SCALEBITS)
  846. #define C_CCIR_TO_JPEG(y)\
  847. cm[(((y) - 128) * FIX(127.0/112.0) + (ONE_HALF + (128 << SCALEBITS))) >> SCALEBITS]
  848. /* NOTE: the clamp is really necessary! */
  849. static inline int C_JPEG_TO_CCIR(int y) {
  850. y = (((y - 128) * FIX(112.0/127.0) + (ONE_HALF + (128 << SCALEBITS))) >> SCALEBITS);
  851. if (y < 16)
  852. y = 16;
  853. return y;
  854. }
  855. #define RGB_TO_Y(r, g, b) \
  856. ((FIX(0.29900) * (r) + FIX(0.58700) * (g) + \
  857. FIX(0.11400) * (b) + ONE_HALF) >> SCALEBITS)
  858. #define RGB_TO_U(r1, g1, b1, shift)\
  859. (((- FIX(0.16874) * r1 - FIX(0.33126) * g1 + \
  860. FIX(0.50000) * b1 + (ONE_HALF << shift) - 1) >> (SCALEBITS + shift)) + 128)
  861. #define RGB_TO_V(r1, g1, b1, shift)\
  862. (((FIX(0.50000) * r1 - FIX(0.41869) * g1 - \
  863. FIX(0.08131) * b1 + (ONE_HALF << shift) - 1) >> (SCALEBITS + shift)) + 128)
  864. #define RGB_TO_Y_CCIR(r, g, b) \
  865. ((FIX(0.29900*219.0/255.0) * (r) + FIX(0.58700*219.0/255.0) * (g) + \
  866. FIX(0.11400*219.0/255.0) * (b) + (ONE_HALF + (16 << SCALEBITS))) >> SCALEBITS)
  867. #define RGB_TO_U_CCIR(r1, g1, b1, shift)\
  868. (((- FIX(0.16874*224.0/255.0) * r1 - FIX(0.33126*224.0/255.0) * g1 + \
  869. FIX(0.50000*224.0/255.0) * b1 + (ONE_HALF << shift) - 1) >> (SCALEBITS + shift)) + 128)
  870. #define RGB_TO_V_CCIR(r1, g1, b1, shift)\
  871. (((FIX(0.50000*224.0/255.0) * r1 - FIX(0.41869*224.0/255.0) * g1 - \
  872. FIX(0.08131*224.0/255.0) * b1 + (ONE_HALF << shift) - 1) >> (SCALEBITS + shift)) + 128)
  873. static uint8_t y_ccir_to_jpeg[256];
  874. static uint8_t y_jpeg_to_ccir[256];
  875. static uint8_t c_ccir_to_jpeg[256];
  876. static uint8_t c_jpeg_to_ccir[256];
  877. /* init various conversion tables */
  878. static void img_convert_init(void)
  879. {
  880. int i;
  881. uint8_t *cm = cropTbl + MAX_NEG_CROP;
  882. for(i = 0;i < 256; i++) {
  883. y_ccir_to_jpeg[i] = Y_CCIR_TO_JPEG(i);
  884. y_jpeg_to_ccir[i] = Y_JPEG_TO_CCIR(i);
  885. c_ccir_to_jpeg[i] = C_CCIR_TO_JPEG(i);
  886. c_jpeg_to_ccir[i] = C_JPEG_TO_CCIR(i);
  887. }
  888. }
  889. /* apply to each pixel the given table */
  890. static void img_apply_table(uint8_t *dst, int dst_wrap,
  891. const uint8_t *src, int src_wrap,
  892. int width, int height, const uint8_t *table1)
  893. {
  894. int n;
  895. const uint8_t *s;
  896. uint8_t *d;
  897. const uint8_t *table;
  898. table = table1;
  899. for(;height > 0; height--) {
  900. s = src;
  901. d = dst;
  902. n = width;
  903. while (n >= 4) {
  904. d[0] = table[s[0]];
  905. d[1] = table[s[1]];
  906. d[2] = table[s[2]];
  907. d[3] = table[s[3]];
  908. d += 4;
  909. s += 4;
  910. n -= 4;
  911. }
  912. while (n > 0) {
  913. d[0] = table[s[0]];
  914. d++;
  915. s++;
  916. n--;
  917. }
  918. dst += dst_wrap;
  919. src += src_wrap;
  920. }
  921. }
  922. /* XXX: use generic filter ? */
  923. /* XXX: in most cases, the sampling position is incorrect */
  924. /* 4x1 -> 1x1 */
  925. static void shrink41(uint8_t *dst, int dst_wrap,
  926. const uint8_t *src, int src_wrap,
  927. int width, int height)
  928. {
  929. int w;
  930. const uint8_t *s;
  931. uint8_t *d;
  932. for(;height > 0; height--) {
  933. s = src;
  934. d = dst;
  935. for(w = width;w > 0; w--) {
  936. d[0] = (s[0] + s[1] + s[2] + s[3] + 2) >> 2;
  937. s += 4;
  938. d++;
  939. }
  940. src += src_wrap;
  941. dst += dst_wrap;
  942. }
  943. }
  944. /* 2x1 -> 1x1 */
  945. static void shrink21(uint8_t *dst, int dst_wrap,
  946. const uint8_t *src, int src_wrap,
  947. int width, int height)
  948. {
  949. int w;
  950. const uint8_t *s;
  951. uint8_t *d;
  952. for(;height > 0; height--) {
  953. s = src;
  954. d = dst;
  955. for(w = width;w > 0; w--) {
  956. d[0] = (s[0] + s[1]) >> 1;
  957. s += 2;
  958. d++;
  959. }
  960. src += src_wrap;
  961. dst += dst_wrap;
  962. }
  963. }
  964. /* 1x2 -> 1x1 */
  965. static void shrink12(uint8_t *dst, int dst_wrap,
  966. const uint8_t *src, int src_wrap,
  967. int width, int height)
  968. {
  969. int w;
  970. uint8_t *d;
  971. const uint8_t *s1, *s2;
  972. for(;height > 0; height--) {
  973. s1 = src;
  974. s2 = s1 + src_wrap;
  975. d = dst;
  976. for(w = width;w >= 4; w-=4) {
  977. d[0] = (s1[0] + s2[0]) >> 1;
  978. d[1] = (s1[1] + s2[1]) >> 1;
  979. d[2] = (s1[2] + s2[2]) >> 1;
  980. d[3] = (s1[3] + s2[3]) >> 1;
  981. s1 += 4;
  982. s2 += 4;
  983. d += 4;
  984. }
  985. for(;w > 0; w--) {
  986. d[0] = (s1[0] + s2[0]) >> 1;
  987. s1++;
  988. s2++;
  989. d++;
  990. }
  991. src += 2 * src_wrap;
  992. dst += dst_wrap;
  993. }
  994. }
  995. /* 2x2 -> 1x1 */
  996. static void shrink22(uint8_t *dst, int dst_wrap,
  997. const uint8_t *src, int src_wrap,
  998. int width, int height)
  999. {
  1000. int w;
  1001. const uint8_t *s1, *s2;
  1002. uint8_t *d;
  1003. for(;height > 0; height--) {
  1004. s1 = src;
  1005. s2 = s1 + src_wrap;
  1006. d = dst;
  1007. for(w = width;w >= 4; w-=4) {
  1008. d[0] = (s1[0] + s1[1] + s2[0] + s2[1] + 2) >> 2;
  1009. d[1] = (s1[2] + s1[3] + s2[2] + s2[3] + 2) >> 2;
  1010. d[2] = (s1[4] + s1[5] + s2[4] + s2[5] + 2) >> 2;
  1011. d[3] = (s1[6] + s1[7] + s2[6] + s2[7] + 2) >> 2;
  1012. s1 += 8;
  1013. s2 += 8;
  1014. d += 4;
  1015. }
  1016. for(;w > 0; w--) {
  1017. d[0] = (s1[0] + s1[1] + s2[0] + s2[1] + 2) >> 2;
  1018. s1 += 2;
  1019. s2 += 2;
  1020. d++;
  1021. }
  1022. src += 2 * src_wrap;
  1023. dst += dst_wrap;
  1024. }
  1025. }
  1026. /* 4x4 -> 1x1 */
  1027. static void shrink44(uint8_t *dst, int dst_wrap,
  1028. const uint8_t *src, int src_wrap,
  1029. int width, int height)
  1030. {
  1031. int w;
  1032. const uint8_t *s1, *s2, *s3, *s4;
  1033. uint8_t *d;
  1034. for(;height > 0; height--) {
  1035. s1 = src;
  1036. s2 = s1 + src_wrap;
  1037. s3 = s2 + src_wrap;
  1038. s4 = s3 + src_wrap;
  1039. d = dst;
  1040. for(w = width;w > 0; w--) {
  1041. d[0] = (s1[0] + s1[1] + s1[2] + s1[3] +
  1042. s2[0] + s2[1] + s2[2] + s2[3] +
  1043. s3[0] + s3[1] + s3[2] + s3[3] +
  1044. s4[0] + s4[1] + s4[2] + s4[3] + 8) >> 4;
  1045. s1 += 4;
  1046. s2 += 4;
  1047. s3 += 4;
  1048. s4 += 4;
  1049. d++;
  1050. }
  1051. src += 4 * src_wrap;
  1052. dst += dst_wrap;
  1053. }
  1054. }
  1055. static void grow21_line(uint8_t *dst, const uint8_t *src,
  1056. int width)
  1057. {
  1058. int w;
  1059. const uint8_t *s1;
  1060. uint8_t *d;
  1061. s1 = src;
  1062. d = dst;
  1063. for(w = width;w >= 4; w-=4) {
  1064. d[1] = d[0] = s1[0];
  1065. d[3] = d[2] = s1[1];
  1066. s1 += 2;
  1067. d += 4;
  1068. }
  1069. for(;w >= 2; w -= 2) {
  1070. d[1] = d[0] = s1[0];
  1071. s1 ++;
  1072. d += 2;
  1073. }
  1074. /* only needed if width is not a multiple of two */
  1075. /* XXX: veryfy that */
  1076. if (w) {
  1077. d[0] = s1[0];
  1078. }
  1079. }
  1080. static void grow41_line(uint8_t *dst, const uint8_t *src,
  1081. int width)
  1082. {
  1083. int w, v;
  1084. const uint8_t *s1;
  1085. uint8_t *d;
  1086. s1 = src;
  1087. d = dst;
  1088. for(w = width;w >= 4; w-=4) {
  1089. v = s1[0];
  1090. d[0] = v;
  1091. d[1] = v;
  1092. d[2] = v;
  1093. d[3] = v;
  1094. s1 ++;
  1095. d += 4;
  1096. }
  1097. }
  1098. /* 1x1 -> 2x1 */
  1099. static void grow21(uint8_t *dst, int dst_wrap,
  1100. const uint8_t *src, int src_wrap,
  1101. int width, int height)
  1102. {
  1103. for(;height > 0; height--) {
  1104. grow21_line(dst, src, width);
  1105. src += src_wrap;
  1106. dst += dst_wrap;
  1107. }
  1108. }
  1109. /* 1x1 -> 2x2 */
  1110. static void grow22(uint8_t *dst, int dst_wrap,
  1111. const uint8_t *src, int src_wrap,
  1112. int width, int height)
  1113. {
  1114. for(;height > 0; height--) {
  1115. grow21_line(dst, src, width);
  1116. if (height%2)
  1117. src += src_wrap;
  1118. dst += dst_wrap;
  1119. }
  1120. }
  1121. /* 1x1 -> 4x1 */
  1122. static void grow41(uint8_t *dst, int dst_wrap,
  1123. const uint8_t *src, int src_wrap,
  1124. int width, int height)
  1125. {
  1126. for(;height > 0; height--) {
  1127. grow41_line(dst, src, width);
  1128. src += src_wrap;
  1129. dst += dst_wrap;
  1130. }
  1131. }
  1132. /* 1x1 -> 4x4 */
  1133. static void grow44(uint8_t *dst, int dst_wrap,
  1134. const uint8_t *src, int src_wrap,
  1135. int width, int height)
  1136. {
  1137. for(;height > 0; height--) {
  1138. grow41_line(dst, src, width);
  1139. if ((height & 3) == 1)
  1140. src += src_wrap;
  1141. dst += dst_wrap;
  1142. }
  1143. }
  1144. /* 1x2 -> 2x1 */
  1145. static void conv411(uint8_t *dst, int dst_wrap,
  1146. const uint8_t *src, int src_wrap,
  1147. int width, int height)
  1148. {
  1149. int w, c;
  1150. const uint8_t *s1, *s2;
  1151. uint8_t *d;
  1152. width>>=1;
  1153. for(;height > 0; height--) {
  1154. s1 = src;
  1155. s2 = src + src_wrap;
  1156. d = dst;
  1157. for(w = width;w > 0; w--) {
  1158. c = (s1[0] + s2[0]) >> 1;
  1159. d[0] = c;
  1160. d[1] = c;
  1161. s1++;
  1162. s2++;
  1163. d += 2;
  1164. }
  1165. src += src_wrap * 2;
  1166. dst += dst_wrap;
  1167. }
  1168. }
  1169. /* XXX: add jpeg quantize code */
  1170. #define TRANSP_INDEX (6*6*6)
  1171. /* this is maybe slow, but allows for extensions */
  1172. static inline unsigned char gif_clut_index(uint8_t r, uint8_t g, uint8_t b)
  1173. {
  1174. return ((((r)/47)%6)*6*6+(((g)/47)%6)*6+(((b)/47)%6));
  1175. }
  1176. static void build_rgb_palette(uint8_t *palette, int has_alpha)
  1177. {
  1178. uint32_t *pal;
  1179. static const uint8_t pal_value[6] = { 0x00, 0x33, 0x66, 0x99, 0xcc, 0xff };
  1180. int i, r, g, b;
  1181. pal = (uint32_t *)palette;
  1182. i = 0;
  1183. for(r = 0; r < 6; r++) {
  1184. for(g = 0; g < 6; g++) {
  1185. for(b = 0; b < 6; b++) {
  1186. pal[i++] = (0xff << 24) | (pal_value[r] << 16) |
  1187. (pal_value[g] << 8) | pal_value[b];
  1188. }
  1189. }
  1190. }
  1191. if (has_alpha)
  1192. pal[i++] = 0;
  1193. while (i < 256)
  1194. pal[i++] = 0xff000000;
  1195. }
  1196. /* copy bit n to bits 0 ... n - 1 */
  1197. static inline unsigned int bitcopy_n(unsigned int a, int n)
  1198. {
  1199. int mask;
  1200. mask = (1 << n) - 1;
  1201. return (a & (0xff & ~mask)) | ((-((a >> n) & 1)) & mask);
  1202. }
  1203. /* rgb555 handling */
  1204. #define RGB_NAME rgb555
  1205. #define RGB_IN(r, g, b, s)\
  1206. {\
  1207. unsigned int v = ((const uint16_t *)(s))[0];\
  1208. r = bitcopy_n(v >> (10 - 3), 3);\
  1209. g = bitcopy_n(v >> (5 - 3), 3);\
  1210. b = bitcopy_n(v << 3, 3);\
  1211. }
  1212. #define RGBA_IN(r, g, b, a, s)\
  1213. {\
  1214. unsigned int v = ((const uint16_t *)(s))[0];\
  1215. r = bitcopy_n(v >> (10 - 3), 3);\
  1216. g = bitcopy_n(v >> (5 - 3), 3);\
  1217. b = bitcopy_n(v << 3, 3);\
  1218. a = (-(v >> 15)) & 0xff;\
  1219. }
  1220. #define RGBA_OUT(d, r, g, b, a)\
  1221. {\
  1222. ((uint16_t *)(d))[0] = ((r >> 3) << 10) | ((g >> 3) << 5) | (b >> 3) | \
  1223. ((a << 8) & 0x8000);\
  1224. }
  1225. #define BPP 2
  1226. #include "imgconvert_template.h"
  1227. /* rgb565 handling */
  1228. #define RGB_NAME rgb565
  1229. #define RGB_IN(r, g, b, s)\
  1230. {\
  1231. unsigned int v = ((const uint16_t *)(s))[0];\
  1232. r = bitcopy_n(v >> (11 - 3), 3);\
  1233. g = bitcopy_n(v >> (5 - 2), 2);\
  1234. b = bitcopy_n(v << 3, 3);\
  1235. }
  1236. #define RGB_OUT(d, r, g, b)\
  1237. {\
  1238. ((uint16_t *)(d))[0] = ((r >> 3) << 11) | ((g >> 2) << 5) | (b >> 3);\
  1239. }
  1240. #define BPP 2
  1241. #include "imgconvert_template.h"
  1242. /* bgr24 handling */
  1243. #define RGB_NAME bgr24
  1244. #define RGB_IN(r, g, b, s)\
  1245. {\
  1246. b = (s)[0];\
  1247. g = (s)[1];\
  1248. r = (s)[2];\
  1249. }
  1250. #define RGB_OUT(d, r, g, b)\
  1251. {\
  1252. (d)[0] = b;\
  1253. (d)[1] = g;\
  1254. (d)[2] = r;\
  1255. }
  1256. #define BPP 3
  1257. #include "imgconvert_template.h"
  1258. #undef RGB_IN
  1259. #undef RGB_OUT
  1260. #undef BPP
  1261. /* rgb24 handling */
  1262. #define RGB_NAME rgb24
  1263. #define FMT_RGB24
  1264. #define RGB_IN(r, g, b, s)\
  1265. {\
  1266. r = (s)[0];\
  1267. g = (s)[1];\
  1268. b = (s)[2];\
  1269. }
  1270. #define RGB_OUT(d, r, g, b)\
  1271. {\
  1272. (d)[0] = r;\
  1273. (d)[1] = g;\
  1274. (d)[2] = b;\
  1275. }
  1276. #define BPP 3
  1277. #include "imgconvert_template.h"
  1278. /* rgba32 handling */
  1279. #define RGB_NAME rgba32
  1280. #define FMT_RGBA32
  1281. #define RGB_IN(r, g, b, s)\
  1282. {\
  1283. unsigned int v = ((const uint32_t *)(s))[0];\
  1284. r = (v >> 16) & 0xff;\
  1285. g = (v >> 8) & 0xff;\
  1286. b = v & 0xff;\
  1287. }
  1288. #define RGBA_IN(r, g, b, a, s)\
  1289. {\
  1290. unsigned int v = ((const uint32_t *)(s))[0];\
  1291. a = (v >> 24) & 0xff;\
  1292. r = (v >> 16) & 0xff;\
  1293. g = (v >> 8) & 0xff;\
  1294. b = v & 0xff;\
  1295. }
  1296. #define RGBA_OUT(d, r, g, b, a)\
  1297. {\
  1298. ((uint32_t *)(d))[0] = (a << 24) | (r << 16) | (g << 8) | b;\
  1299. }
  1300. #define BPP 4
  1301. #include "imgconvert_template.h"
  1302. static void mono_to_gray(AVPicture *dst, const AVPicture *src,
  1303. int width, int height, int xor_mask)
  1304. {
  1305. const unsigned char *p;
  1306. unsigned char *q;
  1307. int v, dst_wrap, src_wrap;
  1308. int y, w;
  1309. p = src->data[0];
  1310. src_wrap = src->linesize[0] - ((width + 7) >> 3);
  1311. q = dst->data[0];
  1312. dst_wrap = dst->linesize[0] - width;
  1313. for(y=0;y<height;y++) {
  1314. w = width;
  1315. while (w >= 8) {
  1316. v = *p++ ^ xor_mask;
  1317. q[0] = -(v >> 7);
  1318. q[1] = -((v >> 6) & 1);
  1319. q[2] = -((v >> 5) & 1);
  1320. q[3] = -((v >> 4) & 1);
  1321. q[4] = -((v >> 3) & 1);
  1322. q[5] = -((v >> 2) & 1);
  1323. q[6] = -((v >> 1) & 1);
  1324. q[7] = -((v >> 0) & 1);
  1325. w -= 8;
  1326. q += 8;
  1327. }
  1328. if (w > 0) {
  1329. v = *p++ ^ xor_mask;
  1330. do {
  1331. q[0] = -((v >> 7) & 1);
  1332. q++;
  1333. v <<= 1;
  1334. } while (--w);
  1335. }
  1336. p += src_wrap;
  1337. q += dst_wrap;
  1338. }
  1339. }
  1340. static void monowhite_to_gray(AVPicture *dst, const AVPicture *src,
  1341. int width, int height)
  1342. {
  1343. mono_to_gray(dst, src, width, height, 0xff);
  1344. }
  1345. static void monoblack_to_gray(AVPicture *dst, const AVPicture *src,
  1346. int width, int height)
  1347. {
  1348. mono_to_gray(dst, src, width, height, 0x00);
  1349. }
  1350. static void gray_to_mono(AVPicture *dst, const AVPicture *src,
  1351. int width, int height, int xor_mask)
  1352. {
  1353. int n;
  1354. const uint8_t *s;
  1355. uint8_t *d;
  1356. int j, b, v, n1, src_wrap, dst_wrap, y;
  1357. s = src->data[0];
  1358. src_wrap = src->linesize[0] - width;
  1359. d = dst->data[0];
  1360. dst_wrap = dst->linesize[0] - ((width + 7) >> 3);
  1361. for(y=0;y<height;y++) {
  1362. n = width;
  1363. while (n >= 8) {
  1364. v = 0;
  1365. for(j=0;j<8;j++) {
  1366. b = s[0];
  1367. s++;
  1368. v = (v << 1) | (b >> 7);
  1369. }
  1370. d[0] = v ^ xor_mask;
  1371. d++;
  1372. n -= 8;
  1373. }
  1374. if (n > 0) {
  1375. n1 = n;
  1376. v = 0;
  1377. while (n > 0) {
  1378. b = s[0];
  1379. s++;
  1380. v = (v << 1) | (b >> 7);
  1381. n--;
  1382. }
  1383. d[0] = (v << (8 - (n1 & 7))) ^ xor_mask;
  1384. d++;
  1385. }
  1386. s += src_wrap;
  1387. d += dst_wrap;
  1388. }
  1389. }
  1390. static void gray_to_monowhite(AVPicture *dst, const AVPicture *src,
  1391. int width, int height)
  1392. {
  1393. gray_to_mono(dst, src, width, height, 0xff);
  1394. }
  1395. static void gray_to_monoblack(AVPicture *dst, const AVPicture *src,
  1396. int width, int height)
  1397. {
  1398. gray_to_mono(dst, src, width, height, 0x00);
  1399. }
  1400. typedef struct ConvertEntry {
  1401. void (*convert)(AVPicture *dst,
  1402. const AVPicture *src, int width, int height);
  1403. } ConvertEntry;
  1404. /* Add each new convertion function in this table. In order to be able
  1405. to convert from any format to any format, the following constraints
  1406. must be satisfied:
  1407. - all FF_COLOR_RGB formats must convert to and from PIX_FMT_RGB24
  1408. - all FF_COLOR_GRAY formats must convert to and from PIX_FMT_GRAY8
  1409. - all FF_COLOR_RGB formats with alpha must convert to and from PIX_FMT_RGBA32
  1410. - PIX_FMT_YUV444P and PIX_FMT_YUVJ444P must convert to and from
  1411. PIX_FMT_RGB24.
  1412. - PIX_FMT_422 must convert to and from PIX_FMT_422P.
  1413. The other conversion functions are just optimisations for common cases.
  1414. */
  1415. static ConvertEntry convert_table[PIX_FMT_NB][PIX_FMT_NB] = {
  1416. [PIX_FMT_YUV420P] = {
  1417. [PIX_FMT_RGB555] = {
  1418. .convert = yuv420p_to_rgb555
  1419. },
  1420. [PIX_FMT_RGB565] = {
  1421. .convert = yuv420p_to_rgb565
  1422. },
  1423. [PIX_FMT_BGR24] = {
  1424. .convert = yuv420p_to_bgr24
  1425. },
  1426. [PIX_FMT_RGB24] = {
  1427. .convert = yuv420p_to_rgb24
  1428. },
  1429. [PIX_FMT_RGBA32] = {
  1430. .convert = yuv420p_to_rgba32
  1431. },
  1432. },
  1433. [PIX_FMT_YUV422P] = {
  1434. [PIX_FMT_YUV422] = {
  1435. .convert = yuv422p_to_yuv422,
  1436. },
  1437. [PIX_FMT_UYVY422] = {
  1438. .convert = yuv422p_to_uyvy422,
  1439. },
  1440. },
  1441. [PIX_FMT_YUV444P] = {
  1442. [PIX_FMT_RGB24] = {
  1443. .convert = yuv444p_to_rgb24
  1444. },
  1445. },
  1446. [PIX_FMT_YUVJ420P] = {
  1447. [PIX_FMT_RGB555] = {
  1448. .convert = yuvj420p_to_rgb555
  1449. },
  1450. [PIX_FMT_RGB565] = {
  1451. .convert = yuvj420p_to_rgb565
  1452. },
  1453. [PIX_FMT_BGR24] = {
  1454. .convert = yuvj420p_to_bgr24
  1455. },
  1456. [PIX_FMT_RGB24] = {
  1457. .convert = yuvj420p_to_rgb24
  1458. },
  1459. [PIX_FMT_RGBA32] = {
  1460. .convert = yuvj420p_to_rgba32
  1461. },
  1462. },
  1463. [PIX_FMT_YUVJ444P] = {
  1464. [PIX_FMT_RGB24] = {
  1465. .convert = yuvj444p_to_rgb24
  1466. },
  1467. },
  1468. [PIX_FMT_YUV422] = {
  1469. [PIX_FMT_YUV420P] = {
  1470. .convert = yuv422_to_yuv420p,
  1471. },
  1472. [PIX_FMT_YUV422P] = {
  1473. .convert = yuv422_to_yuv422p,
  1474. },
  1475. },
  1476. [PIX_FMT_UYVY422] = {
  1477. [PIX_FMT_YUV420P] = {
  1478. .convert = uyvy422_to_yuv420p,
  1479. },
  1480. [PIX_FMT_YUV422P] = {
  1481. .convert = uyvy422_to_yuv422p,
  1482. },
  1483. },
  1484. [PIX_FMT_RGB24] = {
  1485. [PIX_FMT_YUV420P] = {
  1486. .convert = rgb24_to_yuv420p
  1487. },
  1488. [PIX_FMT_RGB565] = {
  1489. .convert = rgb24_to_rgb565
  1490. },
  1491. [PIX_FMT_RGB555] = {
  1492. .convert = rgb24_to_rgb555
  1493. },
  1494. [PIX_FMT_RGBA32] = {
  1495. .convert = rgb24_to_rgba32
  1496. },
  1497. [PIX_FMT_BGR24] = {
  1498. .convert = rgb24_to_bgr24
  1499. },
  1500. [PIX_FMT_GRAY8] = {
  1501. .convert = rgb24_to_gray
  1502. },
  1503. [PIX_FMT_PAL8] = {
  1504. .convert = rgb24_to_pal8
  1505. },
  1506. [PIX_FMT_YUV444P] = {
  1507. .convert = rgb24_to_yuv444p
  1508. },
  1509. [PIX_FMT_YUVJ420P] = {
  1510. .convert = rgb24_to_yuvj420p
  1511. },
  1512. [PIX_FMT_YUVJ444P] = {
  1513. .convert = rgb24_to_yuvj444p
  1514. },
  1515. },
  1516. [PIX_FMT_RGBA32] = {
  1517. [PIX_FMT_RGB24] = {
  1518. .convert = rgba32_to_rgb24
  1519. },
  1520. [PIX_FMT_RGB555] = {
  1521. .convert = rgba32_to_rgb555
  1522. },
  1523. [PIX_FMT_PAL8] = {
  1524. .convert = rgba32_to_pal8
  1525. },
  1526. [PIX_FMT_YUV420P] = {
  1527. .convert = rgba32_to_yuv420p
  1528. },
  1529. [PIX_FMT_GRAY8] = {
  1530. .convert = rgba32_to_gray
  1531. },
  1532. },
  1533. [PIX_FMT_BGR24] = {
  1534. [PIX_FMT_RGB24] = {
  1535. .convert = bgr24_to_rgb24
  1536. },
  1537. [PIX_FMT_YUV420P] = {
  1538. .convert = bgr24_to_yuv420p
  1539. },
  1540. [PIX_FMT_GRAY8] = {
  1541. .convert = bgr24_to_gray
  1542. },
  1543. },
  1544. [PIX_FMT_RGB555] = {
  1545. [PIX_FMT_RGB24] = {
  1546. .convert = rgb555_to_rgb24
  1547. },
  1548. [PIX_FMT_RGBA32] = {
  1549. .convert = rgb555_to_rgba32
  1550. },
  1551. [PIX_FMT_YUV420P] = {
  1552. .convert = rgb555_to_yuv420p
  1553. },
  1554. [PIX_FMT_GRAY8] = {
  1555. .convert = rgb555_to_gray
  1556. },
  1557. },
  1558. [PIX_FMT_RGB565] = {
  1559. [PIX_FMT_RGB24] = {
  1560. .convert = rgb565_to_rgb24
  1561. },
  1562. [PIX_FMT_YUV420P] = {
  1563. .convert = rgb565_to_yuv420p
  1564. },
  1565. [PIX_FMT_GRAY8] = {
  1566. .convert = rgb565_to_gray
  1567. },
  1568. },
  1569. [PIX_FMT_GRAY8] = {
  1570. [PIX_FMT_RGB555] = {
  1571. .convert = gray_to_rgb555
  1572. },
  1573. [PIX_FMT_RGB565] = {
  1574. .convert = gray_to_rgb565
  1575. },
  1576. [PIX_FMT_RGB24] = {
  1577. .convert = gray_to_rgb24
  1578. },
  1579. [PIX_FMT_BGR24] = {
  1580. .convert = gray_to_bgr24
  1581. },
  1582. [PIX_FMT_RGBA32] = {
  1583. .convert = gray_to_rgba32
  1584. },
  1585. [PIX_FMT_MONOWHITE] = {
  1586. .convert = gray_to_monowhite
  1587. },
  1588. [PIX_FMT_MONOBLACK] = {
  1589. .convert = gray_to_monoblack
  1590. },
  1591. },
  1592. [PIX_FMT_MONOWHITE] = {
  1593. [PIX_FMT_GRAY8] = {
  1594. .convert = monowhite_to_gray
  1595. },
  1596. },
  1597. [PIX_FMT_MONOBLACK] = {
  1598. [PIX_FMT_GRAY8] = {
  1599. .convert = monoblack_to_gray
  1600. },
  1601. },
  1602. [PIX_FMT_PAL8] = {
  1603. [PIX_FMT_RGB555] = {
  1604. .convert = pal8_to_rgb555
  1605. },
  1606. [PIX_FMT_RGB565] = {
  1607. .convert = pal8_to_rgb565
  1608. },
  1609. [PIX_FMT_BGR24] = {
  1610. .convert = pal8_to_bgr24
  1611. },
  1612. [PIX_FMT_RGB24] = {
  1613. .convert = pal8_to_rgb24
  1614. },
  1615. [PIX_FMT_RGBA32] = {
  1616. .convert = pal8_to_rgba32
  1617. },
  1618. },
  1619. };
  1620. int avpicture_alloc(AVPicture *picture,
  1621. int pix_fmt, int width, int height)
  1622. {
  1623. unsigned int size;
  1624. void *ptr;
  1625. size = avpicture_get_size(pix_fmt, width, height);
  1626. ptr = av_malloc(size);
  1627. if (!ptr)
  1628. goto fail;
  1629. avpicture_fill(picture, ptr, pix_fmt, width, height);
  1630. return 0;
  1631. fail:
  1632. memset(picture, 0, sizeof(AVPicture));
  1633. return -1;
  1634. }
  1635. void avpicture_free(AVPicture *picture)
  1636. {
  1637. av_free(picture->data[0]);
  1638. }
  1639. /* return true if yuv planar */
  1640. static inline int is_yuv_planar(PixFmtInfo *ps)
  1641. {
  1642. return (ps->color_type == FF_COLOR_YUV ||
  1643. ps->color_type == FF_COLOR_YUV_JPEG) &&
  1644. ps->pixel_type == FF_PIXEL_PLANAR;
  1645. }
  1646. /* XXX: always use linesize. Return -1 if not supported */
  1647. int img_convert(AVPicture *dst, int dst_pix_fmt,
  1648. const AVPicture *src, int src_pix_fmt,
  1649. int src_width, int src_height)
  1650. {
  1651. static int inited;
  1652. int i, ret, dst_width, dst_height, int_pix_fmt;
  1653. PixFmtInfo *src_pix, *dst_pix;
  1654. ConvertEntry *ce;
  1655. AVPicture tmp1, *tmp = &tmp1;
  1656. if (src_pix_fmt < 0 || src_pix_fmt >= PIX_FMT_NB ||
  1657. dst_pix_fmt < 0 || dst_pix_fmt >= PIX_FMT_NB)
  1658. return -1;
  1659. if (src_width <= 0 || src_height <= 0)
  1660. return 0;
  1661. if (!inited) {
  1662. inited = 1;
  1663. img_convert_init();
  1664. }
  1665. dst_width = src_width;
  1666. dst_height = src_height;
  1667. dst_pix = &pix_fmt_info[dst_pix_fmt];
  1668. src_pix = &pix_fmt_info[src_pix_fmt];
  1669. if (src_pix_fmt == dst_pix_fmt) {
  1670. /* no conversion needed: just copy */
  1671. img_copy(dst, src, dst_pix_fmt, dst_width, dst_height);
  1672. return 0;
  1673. }
  1674. ce = &convert_table[src_pix_fmt][dst_pix_fmt];
  1675. if (ce->convert) {
  1676. /* specific conversion routine */
  1677. ce->convert(dst, src, dst_width, dst_height);
  1678. return 0;
  1679. }
  1680. /* gray to YUV */
  1681. if (is_yuv_planar(dst_pix) &&
  1682. src_pix_fmt == PIX_FMT_GRAY8) {
  1683. int w, h, y;
  1684. uint8_t *d;
  1685. if (dst_pix->color_type == FF_COLOR_YUV_JPEG) {
  1686. img_copy_plane(dst->data[0], dst->linesize[0],
  1687. src->data[0], src->linesize[0],
  1688. dst_width, dst_height);
  1689. } else {
  1690. img_apply_table(dst->data[0], dst->linesize[0],
  1691. src->data[0], src->linesize[0],
  1692. dst_width, dst_height,
  1693. y_jpeg_to_ccir);
  1694. }
  1695. /* fill U and V with 128 */
  1696. w = dst_width;
  1697. h = dst_height;
  1698. w >>= dst_pix->x_chroma_shift;
  1699. h >>= dst_pix->y_chroma_shift;
  1700. for(i = 1; i <= 2; i++) {
  1701. d = dst->data[i];
  1702. for(y = 0; y< h; y++) {
  1703. memset(d, 128, w);
  1704. d += dst->linesize[i];
  1705. }
  1706. }
  1707. return 0;
  1708. }
  1709. /* YUV to gray */
  1710. if (is_yuv_planar(src_pix) &&
  1711. dst_pix_fmt == PIX_FMT_GRAY8) {
  1712. if (src_pix->color_type == FF_COLOR_YUV_JPEG) {
  1713. img_copy_plane(dst->data[0], dst->linesize[0],
  1714. src->data[0], src->linesize[0],
  1715. dst_width, dst_height);
  1716. } else {
  1717. img_apply_table(dst->data[0], dst->linesize[0],
  1718. src->data[0], src->linesize[0],
  1719. dst_width, dst_height,
  1720. y_ccir_to_jpeg);
  1721. }
  1722. return 0;
  1723. }
  1724. /* YUV to YUV planar */
  1725. if (is_yuv_planar(dst_pix) && is_yuv_planar(src_pix)) {
  1726. int x_shift, y_shift, w, h, xy_shift;
  1727. void (*resize_func)(uint8_t *dst, int dst_wrap,
  1728. const uint8_t *src, int src_wrap,
  1729. int width, int height);
  1730. /* compute chroma size of the smallest dimensions */
  1731. w = dst_width;
  1732. h = dst_height;
  1733. if (dst_pix->x_chroma_shift >= src_pix->x_chroma_shift)
  1734. w >>= dst_pix->x_chroma_shift;
  1735. else
  1736. w >>= src_pix->x_chroma_shift;
  1737. if (dst_pix->y_chroma_shift >= src_pix->y_chroma_shift)
  1738. h >>= dst_pix->y_chroma_shift;
  1739. else
  1740. h >>= src_pix->y_chroma_shift;
  1741. x_shift = (dst_pix->x_chroma_shift - src_pix->x_chroma_shift);
  1742. y_shift = (dst_pix->y_chroma_shift - src_pix->y_chroma_shift);
  1743. xy_shift = ((x_shift & 0xf) << 4) | (y_shift & 0xf);
  1744. /* there must be filters for conversion at least from and to
  1745. YUV444 format */
  1746. switch(xy_shift) {
  1747. case 0x00:
  1748. resize_func = img_copy_plane;
  1749. break;
  1750. case 0x10:
  1751. resize_func = shrink21;
  1752. break;
  1753. case 0x20:
  1754. resize_func = shrink41;
  1755. break;
  1756. case 0x01:
  1757. resize_func = shrink12;
  1758. break;
  1759. case 0x11:
  1760. resize_func = shrink22;
  1761. break;
  1762. case 0x22:
  1763. resize_func = shrink44;
  1764. break;
  1765. case 0xf0:
  1766. resize_func = grow21;
  1767. break;
  1768. case 0xe0:
  1769. resize_func = grow41;
  1770. break;
  1771. case 0xff:
  1772. resize_func = grow22;
  1773. break;
  1774. case 0xee:
  1775. resize_func = grow44;
  1776. break;
  1777. case 0xf1:
  1778. resize_func = conv411;
  1779. break;
  1780. default:
  1781. /* currently not handled */
  1782. goto no_chroma_filter;
  1783. }
  1784. img_copy_plane(dst->data[0], dst->linesize[0],
  1785. src->data[0], src->linesize[0],
  1786. dst_width, dst_height);
  1787. for(i = 1;i <= 2; i++)
  1788. resize_func(dst->data[i], dst->linesize[i],
  1789. src->data[i], src->linesize[i],
  1790. dst_width>>dst_pix->x_chroma_shift, dst_height>>dst_pix->y_chroma_shift);
  1791. /* if yuv color space conversion is needed, we do it here on
  1792. the destination image */
  1793. if (dst_pix->color_type != src_pix->color_type) {
  1794. const uint8_t *y_table, *c_table;
  1795. if (dst_pix->color_type == FF_COLOR_YUV) {
  1796. y_table = y_jpeg_to_ccir;
  1797. c_table = c_jpeg_to_ccir;
  1798. } else {
  1799. y_table = y_ccir_to_jpeg;
  1800. c_table = c_ccir_to_jpeg;
  1801. }
  1802. img_apply_table(dst->data[0], dst->linesize[0],
  1803. dst->data[0], dst->linesize[0],
  1804. dst_width, dst_height,
  1805. y_table);
  1806. for(i = 1;i <= 2; i++)
  1807. img_apply_table(dst->data[i], dst->linesize[i],
  1808. dst->data[i], dst->linesize[i],
  1809. dst_width>>dst_pix->x_chroma_shift,
  1810. dst_height>>dst_pix->y_chroma_shift,
  1811. c_table);
  1812. }
  1813. return 0;
  1814. }
  1815. no_chroma_filter:
  1816. /* try to use an intermediate format */
  1817. if (src_pix_fmt == PIX_FMT_YUV422 ||
  1818. dst_pix_fmt == PIX_FMT_YUV422) {
  1819. /* specific case: convert to YUV422P first */
  1820. int_pix_fmt = PIX_FMT_YUV422P;
  1821. } else if (src_pix_fmt == PIX_FMT_UYVY422 ||
  1822. dst_pix_fmt == PIX_FMT_UYVY422) {
  1823. /* specific case: convert to YUV422P first */
  1824. int_pix_fmt = PIX_FMT_YUV422P;
  1825. } else if ((src_pix->color_type == FF_COLOR_GRAY &&
  1826. src_pix_fmt != PIX_FMT_GRAY8) ||
  1827. (dst_pix->color_type == FF_COLOR_GRAY &&
  1828. dst_pix_fmt != PIX_FMT_GRAY8)) {
  1829. /* gray8 is the normalized format */
  1830. int_pix_fmt = PIX_FMT_GRAY8;
  1831. } else if ((is_yuv_planar(src_pix) &&
  1832. src_pix_fmt != PIX_FMT_YUV444P &&
  1833. src_pix_fmt != PIX_FMT_YUVJ444P)) {
  1834. /* yuv444 is the normalized format */
  1835. if (src_pix->color_type == FF_COLOR_YUV_JPEG)
  1836. int_pix_fmt = PIX_FMT_YUVJ444P;
  1837. else
  1838. int_pix_fmt = PIX_FMT_YUV444P;
  1839. } else if ((is_yuv_planar(dst_pix) &&
  1840. dst_pix_fmt != PIX_FMT_YUV444P &&
  1841. dst_pix_fmt != PIX_FMT_YUVJ444P)) {
  1842. /* yuv444 is the normalized format */
  1843. if (dst_pix->color_type == FF_COLOR_YUV_JPEG)
  1844. int_pix_fmt = PIX_FMT_YUVJ444P;
  1845. else
  1846. int_pix_fmt = PIX_FMT_YUV444P;
  1847. } else {
  1848. /* the two formats are rgb or gray8 or yuv[j]444p */
  1849. if (src_pix->is_alpha && dst_pix->is_alpha)
  1850. int_pix_fmt = PIX_FMT_RGBA32;
  1851. else
  1852. int_pix_fmt = PIX_FMT_RGB24;
  1853. }
  1854. if (avpicture_alloc(tmp, int_pix_fmt, dst_width, dst_height) < 0)
  1855. return -1;
  1856. ret = -1;
  1857. if (img_convert(tmp, int_pix_fmt,
  1858. src, src_pix_fmt, src_width, src_height) < 0)
  1859. goto fail1;
  1860. if (img_convert(dst, dst_pix_fmt,
  1861. tmp, int_pix_fmt, dst_width, dst_height) < 0)
  1862. goto fail1;
  1863. ret = 0;
  1864. fail1:
  1865. avpicture_free(tmp);
  1866. return ret;
  1867. }
  1868. /* NOTE: we scan all the pixels to have an exact information */
  1869. static int get_alpha_info_pal8(const AVPicture *src, int width, int height)
  1870. {
  1871. const unsigned char *p;
  1872. int src_wrap, ret, x, y;
  1873. unsigned int a;
  1874. uint32_t *palette = (uint32_t *)src->data[1];
  1875. p = src->data[0];
  1876. src_wrap = src->linesize[0] - width;
  1877. ret = 0;
  1878. for(y=0;y<height;y++) {
  1879. for(x=0;x<width;x++) {
  1880. a = palette[p[0]] >> 24;
  1881. if (a == 0x00) {
  1882. ret |= FF_ALPHA_TRANSP;
  1883. } else if (a != 0xff) {
  1884. ret |= FF_ALPHA_SEMI_TRANSP;
  1885. }
  1886. p++;
  1887. }
  1888. p += src_wrap;
  1889. }
  1890. return ret;
  1891. }
  1892. /**
  1893. * Tell if an image really has transparent alpha values.
  1894. * @return ored mask of FF_ALPHA_xxx constants
  1895. */
  1896. int img_get_alpha_info(const AVPicture *src,
  1897. int pix_fmt, int width, int height)
  1898. {
  1899. PixFmtInfo *pf = &pix_fmt_info[pix_fmt];
  1900. int ret;
  1901. pf = &pix_fmt_info[pix_fmt];
  1902. /* no alpha can be represented in format */
  1903. if (!pf->is_alpha)
  1904. return 0;
  1905. switch(pix_fmt) {
  1906. case PIX_FMT_RGBA32:
  1907. ret = get_alpha_info_rgba32(src, width, height);
  1908. break;
  1909. case PIX_FMT_RGB555:
  1910. ret = get_alpha_info_rgb555(src, width, height);
  1911. break;
  1912. case PIX_FMT_PAL8:
  1913. ret = get_alpha_info_pal8(src, width, height);
  1914. break;
  1915. default:
  1916. /* we do not know, so everything is indicated */
  1917. ret = FF_ALPHA_TRANSP | FF_ALPHA_SEMI_TRANSP;
  1918. break;
  1919. }
  1920. return ret;
  1921. }
  1922. #ifdef HAVE_MMX
  1923. #define DEINT_INPLACE_LINE_LUM \
  1924. movd_m2r(lum_m4[0],mm0);\
  1925. movd_m2r(lum_m3[0],mm1);\
  1926. movd_m2r(lum_m2[0],mm2);\
  1927. movd_m2r(lum_m1[0],mm3);\
  1928. movd_m2r(lum[0],mm4);\
  1929. punpcklbw_r2r(mm7,mm0);\
  1930. movd_r2m(mm2,lum_m4[0]);\
  1931. punpcklbw_r2r(mm7,mm1);\
  1932. punpcklbw_r2r(mm7,mm2);\
  1933. punpcklbw_r2r(mm7,mm3);\
  1934. punpcklbw_r2r(mm7,mm4);\
  1935. paddw_r2r(mm3,mm1);\
  1936. psllw_i2r(1,mm2);\
  1937. paddw_r2r(mm4,mm0);\
  1938. psllw_i2r(2,mm1);\
  1939. paddw_r2r(mm6,mm2);\
  1940. paddw_r2r(mm2,mm1);\
  1941. psubusw_r2r(mm0,mm1);\
  1942. psrlw_i2r(3,mm1);\
  1943. packuswb_r2r(mm7,mm1);\
  1944. movd_r2m(mm1,lum_m2[0]);
  1945. #define DEINT_LINE_LUM \
  1946. movd_m2r(lum_m4[0],mm0);\
  1947. movd_m2r(lum_m3[0],mm1);\
  1948. movd_m2r(lum_m2[0],mm2);\
  1949. movd_m2r(lum_m1[0],mm3);\
  1950. movd_m2r(lum[0],mm4);\
  1951. punpcklbw_r2r(mm7,mm0);\
  1952. punpcklbw_r2r(mm7,mm1);\
  1953. punpcklbw_r2r(mm7,mm2);\
  1954. punpcklbw_r2r(mm7,mm3);\
  1955. punpcklbw_r2r(mm7,mm4);\
  1956. paddw_r2r(mm3,mm1);\
  1957. psllw_i2r(1,mm2);\
  1958. paddw_r2r(mm4,mm0);\
  1959. psllw_i2r(2,mm1);\
  1960. paddw_r2r(mm6,mm2);\
  1961. paddw_r2r(mm2,mm1);\
  1962. psubusw_r2r(mm0,mm1);\
  1963. psrlw_i2r(3,mm1);\
  1964. packuswb_r2r(mm7,mm1);\
  1965. movd_r2m(mm1,dst[0]);
  1966. #endif
  1967. /* filter parameters: [-1 4 2 4 -1] // 8 */
  1968. static void deinterlace_line(uint8_t *dst,
  1969. const uint8_t *lum_m4, const uint8_t *lum_m3,
  1970. const uint8_t *lum_m2, const uint8_t *lum_m1,
  1971. const uint8_t *lum,
  1972. int size)
  1973. {
  1974. #ifndef HAVE_MMX
  1975. uint8_t *cm = cropTbl + MAX_NEG_CROP;
  1976. int sum;
  1977. for(;size > 0;size--) {
  1978. sum = -lum_m4[0];
  1979. sum += lum_m3[0] << 2;
  1980. sum += lum_m2[0] << 1;
  1981. sum += lum_m1[0] << 2;
  1982. sum += -lum[0];
  1983. dst[0] = cm[(sum + 4) >> 3];
  1984. lum_m4++;
  1985. lum_m3++;
  1986. lum_m2++;
  1987. lum_m1++;
  1988. lum++;
  1989. dst++;
  1990. }
  1991. #else
  1992. {
  1993. mmx_t rounder;
  1994. rounder.uw[0]=4;
  1995. rounder.uw[1]=4;
  1996. rounder.uw[2]=4;
  1997. rounder.uw[3]=4;
  1998. pxor_r2r(mm7,mm7);
  1999. movq_m2r(rounder,mm6);
  2000. }
  2001. for (;size > 3; size-=4) {
  2002. DEINT_LINE_LUM
  2003. lum_m4+=4;
  2004. lum_m3+=4;
  2005. lum_m2+=4;
  2006. lum_m1+=4;
  2007. lum+=4;
  2008. dst+=4;
  2009. }
  2010. #endif
  2011. }
  2012. static void deinterlace_line_inplace(uint8_t *lum_m4, uint8_t *lum_m3, uint8_t *lum_m2, uint8_t *lum_m1, uint8_t *lum,
  2013. int size)
  2014. {
  2015. #ifndef HAVE_MMX
  2016. uint8_t *cm = cropTbl + MAX_NEG_CROP;
  2017. int sum;
  2018. for(;size > 0;size--) {
  2019. sum = -lum_m4[0];
  2020. sum += lum_m3[0] << 2;
  2021. sum += lum_m2[0] << 1;
  2022. lum_m4[0]=lum_m2[0];
  2023. sum += lum_m1[0] << 2;
  2024. sum += -lum[0];
  2025. lum_m2[0] = cm[(sum + 4) >> 3];
  2026. lum_m4++;
  2027. lum_m3++;
  2028. lum_m2++;
  2029. lum_m1++;
  2030. lum++;
  2031. }
  2032. #else
  2033. {
  2034. mmx_t rounder;
  2035. rounder.uw[0]=4;
  2036. rounder.uw[1]=4;
  2037. rounder.uw[2]=4;
  2038. rounder.uw[3]=4;
  2039. pxor_r2r(mm7,mm7);
  2040. movq_m2r(rounder,mm6);
  2041. }
  2042. for (;size > 3; size-=4) {
  2043. DEINT_INPLACE_LINE_LUM
  2044. lum_m4+=4;
  2045. lum_m3+=4;
  2046. lum_m2+=4;
  2047. lum_m1+=4;
  2048. lum+=4;
  2049. }
  2050. #endif
  2051. }
  2052. /* deinterlacing : 2 temporal taps, 3 spatial taps linear filter. The
  2053. top field is copied as is, but the bottom field is deinterlaced
  2054. against the top field. */
  2055. static void deinterlace_bottom_field(uint8_t *dst, int dst_wrap,
  2056. const uint8_t *src1, int src_wrap,
  2057. int width, int height)
  2058. {
  2059. const uint8_t *src_m2, *src_m1, *src_0, *src_p1, *src_p2;
  2060. int y;
  2061. src_m2 = src1;
  2062. src_m1 = src1;
  2063. src_0=&src_m1[src_wrap];
  2064. src_p1=&src_0[src_wrap];
  2065. src_p2=&src_p1[src_wrap];
  2066. for(y=0;y<(height-2);y+=2) {
  2067. memcpy(dst,src_m1,width);
  2068. dst += dst_wrap;
  2069. deinterlace_line(dst,src_m2,src_m1,src_0,src_p1,src_p2,width);
  2070. src_m2 = src_0;
  2071. src_m1 = src_p1;
  2072. src_0 = src_p2;
  2073. src_p1 += 2*src_wrap;
  2074. src_p2 += 2*src_wrap;
  2075. dst += dst_wrap;
  2076. }
  2077. memcpy(dst,src_m1,width);
  2078. dst += dst_wrap;
  2079. /* do last line */
  2080. deinterlace_line(dst,src_m2,src_m1,src_0,src_0,src_0,width);
  2081. }
  2082. static void deinterlace_bottom_field_inplace(uint8_t *src1, int src_wrap,
  2083. int width, int height)
  2084. {
  2085. uint8_t *src_m1, *src_0, *src_p1, *src_p2;
  2086. int y;
  2087. uint8_t *buf;
  2088. buf = (uint8_t*)av_malloc(width);
  2089. src_m1 = src1;
  2090. memcpy(buf,src_m1,width);
  2091. src_0=&src_m1[src_wrap];
  2092. src_p1=&src_0[src_wrap];
  2093. src_p2=&src_p1[src_wrap];
  2094. for(y=0;y<(height-2);y+=2) {
  2095. deinterlace_line_inplace(buf,src_m1,src_0,src_p1,src_p2,width);
  2096. src_m1 = src_p1;
  2097. src_0 = src_p2;
  2098. src_p1 += 2*src_wrap;
  2099. src_p2 += 2*src_wrap;
  2100. }
  2101. /* do last line */
  2102. deinterlace_line_inplace(buf,src_m1,src_0,src_0,src_0,width);
  2103. av_free(buf);
  2104. }
  2105. /* deinterlace - if not supported return -1 */
  2106. int avpicture_deinterlace(AVPicture *dst, const AVPicture *src,
  2107. int pix_fmt, int width, int height)
  2108. {
  2109. int i;
  2110. if (pix_fmt != PIX_FMT_YUV420P &&
  2111. pix_fmt != PIX_FMT_YUV422P &&
  2112. pix_fmt != PIX_FMT_YUV444P &&
  2113. pix_fmt != PIX_FMT_YUV411P)
  2114. return -1;
  2115. if ((width & 3) != 0 || (height & 3) != 0)
  2116. return -1;
  2117. for(i=0;i<3;i++) {
  2118. if (i == 1) {
  2119. switch(pix_fmt) {
  2120. case PIX_FMT_YUV420P:
  2121. width >>= 1;
  2122. height >>= 1;
  2123. break;
  2124. case PIX_FMT_YUV422P:
  2125. width >>= 1;
  2126. break;
  2127. case PIX_FMT_YUV411P:
  2128. width >>= 2;
  2129. break;
  2130. default:
  2131. break;
  2132. }
  2133. }
  2134. if (src == dst) {
  2135. deinterlace_bottom_field_inplace(dst->data[i], dst->linesize[i],
  2136. width, height);
  2137. } else {
  2138. deinterlace_bottom_field(dst->data[i],dst->linesize[i],
  2139. src->data[i], src->linesize[i],
  2140. width, height);
  2141. }
  2142. }
  2143. #ifdef HAVE_MMX
  2144. emms();
  2145. #endif
  2146. return 0;
  2147. }
  2148. #undef FIX