You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

2344 lines
62KB

  1. /*
  2. * Misc image convertion routines
  3. * Copyright (c) 2001, 2002, 2003 Fabrice Bellard.
  4. *
  5. * This library is free software; you can redistribute it and/or
  6. * modify it under the terms of the GNU Lesser General Public
  7. * License as published by the Free Software Foundation; either
  8. * version 2 of the License, or (at your option) any later version.
  9. *
  10. * This library is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  13. * Lesser General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU Lesser General Public
  16. * License along with this library; if not, write to the Free Software
  17. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  18. */
  19. /**
  20. * @file imgconvert.c
  21. * Misc image convertion routines.
  22. */
  23. /* TODO:
  24. * - write 'ffimg' program to test all the image related stuff
  25. * - move all api to slice based system
  26. * - integrate deinterlacing, postprocessing and scaling in the conversion process
  27. */
  28. #include "avcodec.h"
  29. #include "dsputil.h"
  30. #ifdef USE_FASTMEMCPY
  31. #include "fastmemcpy.h"
  32. #endif
  33. #ifdef HAVE_MMX
  34. #include "i386/mmx.h"
  35. #endif
  36. #define xglue(x, y) x ## y
  37. #define glue(x, y) xglue(x, y)
  38. #define FF_COLOR_RGB 0 /* RGB color space */
  39. #define FF_COLOR_GRAY 1 /* gray color space */
  40. #define FF_COLOR_YUV 2 /* YUV color space. 16 <= Y <= 235, 16 <= U, V <= 240 */
  41. #define FF_COLOR_YUV_JPEG 3 /* YUV color space. 0 <= Y <= 255, 0 <= U, V <= 255 */
  42. #define FF_PIXEL_PLANAR 0 /* each channel has one component in AVPicture */
  43. #define FF_PIXEL_PACKED 1 /* only one components containing all the channels */
  44. #define FF_PIXEL_PALETTE 2 /* one components containing indexes for a palette */
  45. typedef struct PixFmtInfo {
  46. const char *name;
  47. uint8_t nb_channels; /* number of channels (including alpha) */
  48. uint8_t color_type; /* color type (see FF_COLOR_xxx constants) */
  49. uint8_t pixel_type; /* pixel storage type (see FF_PIXEL_xxx constants) */
  50. uint8_t is_alpha : 1; /* true if alpha can be specified */
  51. uint8_t x_chroma_shift; /* X chroma subsampling factor is 2 ^ shift */
  52. uint8_t y_chroma_shift; /* Y chroma subsampling factor is 2 ^ shift */
  53. uint8_t depth; /* bit depth of the color components */
  54. } PixFmtInfo;
  55. /* this table gives more information about formats */
  56. static PixFmtInfo pix_fmt_info[PIX_FMT_NB] = {
  57. /* YUV formats */
  58. [PIX_FMT_YUV420P] = {
  59. .name = "yuv420p",
  60. .nb_channels = 3,
  61. .color_type = FF_COLOR_YUV,
  62. .pixel_type = FF_PIXEL_PLANAR,
  63. .depth = 8,
  64. .x_chroma_shift = 1, .y_chroma_shift = 1,
  65. },
  66. [PIX_FMT_YUV422P] = {
  67. .name = "yuv422p",
  68. .nb_channels = 3,
  69. .color_type = FF_COLOR_YUV,
  70. .pixel_type = FF_PIXEL_PLANAR,
  71. .depth = 8,
  72. .x_chroma_shift = 1, .y_chroma_shift = 0,
  73. },
  74. [PIX_FMT_YUV444P] = {
  75. .name = "yuv444p",
  76. .nb_channels = 3,
  77. .color_type = FF_COLOR_YUV,
  78. .pixel_type = FF_PIXEL_PLANAR,
  79. .depth = 8,
  80. .x_chroma_shift = 0, .y_chroma_shift = 0,
  81. },
  82. [PIX_FMT_YUV422] = {
  83. .name = "yuv422",
  84. .nb_channels = 1,
  85. .color_type = FF_COLOR_YUV,
  86. .pixel_type = FF_PIXEL_PACKED,
  87. .depth = 8,
  88. .x_chroma_shift = 1, .y_chroma_shift = 0,
  89. },
  90. [PIX_FMT_UYVY422] = {
  91. .name = "uyvy422",
  92. .nb_channels = 1,
  93. .color_type = FF_COLOR_YUV,
  94. .pixel_type = FF_PIXEL_PACKED,
  95. .depth = 8,
  96. .x_chroma_shift = 1, .y_chroma_shift = 0,
  97. },
  98. [PIX_FMT_YUV410P] = {
  99. .name = "yuv410p",
  100. .nb_channels = 3,
  101. .color_type = FF_COLOR_YUV,
  102. .pixel_type = FF_PIXEL_PLANAR,
  103. .depth = 8,
  104. .x_chroma_shift = 2, .y_chroma_shift = 2,
  105. },
  106. [PIX_FMT_YUV411P] = {
  107. .name = "yuv411p",
  108. .nb_channels = 3,
  109. .color_type = FF_COLOR_YUV,
  110. .pixel_type = FF_PIXEL_PLANAR,
  111. .depth = 8,
  112. .x_chroma_shift = 2, .y_chroma_shift = 0,
  113. },
  114. /* JPEG YUV */
  115. [PIX_FMT_YUVJ420P] = {
  116. .name = "yuvj420p",
  117. .nb_channels = 3,
  118. .color_type = FF_COLOR_YUV_JPEG,
  119. .pixel_type = FF_PIXEL_PLANAR,
  120. .depth = 8,
  121. .x_chroma_shift = 1, .y_chroma_shift = 1,
  122. },
  123. [PIX_FMT_YUVJ422P] = {
  124. .name = "yuvj422p",
  125. .nb_channels = 3,
  126. .color_type = FF_COLOR_YUV_JPEG,
  127. .pixel_type = FF_PIXEL_PLANAR,
  128. .depth = 8,
  129. .x_chroma_shift = 1, .y_chroma_shift = 0,
  130. },
  131. [PIX_FMT_YUVJ444P] = {
  132. .name = "yuvj444p",
  133. .nb_channels = 3,
  134. .color_type = FF_COLOR_YUV_JPEG,
  135. .pixel_type = FF_PIXEL_PLANAR,
  136. .depth = 8,
  137. .x_chroma_shift = 0, .y_chroma_shift = 0,
  138. },
  139. /* RGB formats */
  140. [PIX_FMT_RGB24] = {
  141. .name = "rgb24",
  142. .nb_channels = 3,
  143. .color_type = FF_COLOR_RGB,
  144. .pixel_type = FF_PIXEL_PACKED,
  145. .depth = 8,
  146. .x_chroma_shift = 0, .y_chroma_shift = 0,
  147. },
  148. [PIX_FMT_BGR24] = {
  149. .name = "bgr24",
  150. .nb_channels = 3,
  151. .color_type = FF_COLOR_RGB,
  152. .pixel_type = FF_PIXEL_PACKED,
  153. .depth = 8,
  154. .x_chroma_shift = 0, .y_chroma_shift = 0,
  155. },
  156. [PIX_FMT_RGBA32] = {
  157. .name = "rgba32",
  158. .nb_channels = 4, .is_alpha = 1,
  159. .color_type = FF_COLOR_RGB,
  160. .pixel_type = FF_PIXEL_PACKED,
  161. .depth = 8,
  162. .x_chroma_shift = 0, .y_chroma_shift = 0,
  163. },
  164. [PIX_FMT_RGB565] = {
  165. .name = "rgb565",
  166. .nb_channels = 3,
  167. .color_type = FF_COLOR_RGB,
  168. .pixel_type = FF_PIXEL_PACKED,
  169. .depth = 5,
  170. .x_chroma_shift = 0, .y_chroma_shift = 0,
  171. },
  172. [PIX_FMT_RGB555] = {
  173. .name = "rgb555",
  174. .nb_channels = 4, .is_alpha = 1,
  175. .color_type = FF_COLOR_RGB,
  176. .pixel_type = FF_PIXEL_PACKED,
  177. .depth = 5,
  178. .x_chroma_shift = 0, .y_chroma_shift = 0,
  179. },
  180. /* gray / mono formats */
  181. [PIX_FMT_GRAY8] = {
  182. .name = "gray",
  183. .nb_channels = 1,
  184. .color_type = FF_COLOR_GRAY,
  185. .pixel_type = FF_PIXEL_PLANAR,
  186. .depth = 8,
  187. },
  188. [PIX_FMT_MONOWHITE] = {
  189. .name = "monow",
  190. .nb_channels = 1,
  191. .color_type = FF_COLOR_GRAY,
  192. .pixel_type = FF_PIXEL_PLANAR,
  193. .depth = 1,
  194. },
  195. [PIX_FMT_MONOBLACK] = {
  196. .name = "monob",
  197. .nb_channels = 1,
  198. .color_type = FF_COLOR_GRAY,
  199. .pixel_type = FF_PIXEL_PLANAR,
  200. .depth = 1,
  201. },
  202. /* paletted formats */
  203. [PIX_FMT_PAL8] = {
  204. .name = "pal8",
  205. .nb_channels = 4, .is_alpha = 1,
  206. .color_type = FF_COLOR_RGB,
  207. .pixel_type = FF_PIXEL_PALETTE,
  208. .depth = 8,
  209. },
  210. };
  211. void avcodec_get_chroma_sub_sample(int pix_fmt, int *h_shift, int *v_shift)
  212. {
  213. *h_shift = pix_fmt_info[pix_fmt].x_chroma_shift;
  214. *v_shift = pix_fmt_info[pix_fmt].y_chroma_shift;
  215. }
  216. const char *avcodec_get_pix_fmt_name(int pix_fmt)
  217. {
  218. if (pix_fmt < 0 || pix_fmt >= PIX_FMT_NB)
  219. return "???";
  220. else
  221. return pix_fmt_info[pix_fmt].name;
  222. }
  223. enum PixelFormat avcodec_get_pix_fmt(const char* name)
  224. {
  225. int i;
  226. for (i=0; i < PIX_FMT_NB; i++)
  227. if (!strcmp(pix_fmt_info[i].name, name))
  228. break;
  229. return i;
  230. }
  231. /* Picture field are filled with 'ptr' addresses. Also return size */
  232. int avpicture_fill(AVPicture *picture, uint8_t *ptr,
  233. int pix_fmt, int width, int height)
  234. {
  235. int size, w2, h2, size2;
  236. PixFmtInfo *pinfo;
  237. pinfo = &pix_fmt_info[pix_fmt];
  238. size = width * height;
  239. switch(pix_fmt) {
  240. case PIX_FMT_YUV420P:
  241. case PIX_FMT_YUV422P:
  242. case PIX_FMT_YUV444P:
  243. case PIX_FMT_YUV410P:
  244. case PIX_FMT_YUV411P:
  245. case PIX_FMT_YUVJ420P:
  246. case PIX_FMT_YUVJ422P:
  247. case PIX_FMT_YUVJ444P:
  248. w2 = (width + (1 << pinfo->x_chroma_shift) - 1) >> pinfo->x_chroma_shift;
  249. h2 = (height + (1 << pinfo->y_chroma_shift) - 1) >> pinfo->y_chroma_shift;
  250. size2 = w2 * h2;
  251. picture->data[0] = ptr;
  252. picture->data[1] = picture->data[0] + size;
  253. picture->data[2] = picture->data[1] + size2;
  254. picture->linesize[0] = width;
  255. picture->linesize[1] = w2;
  256. picture->linesize[2] = w2;
  257. return size + 2 * size2;
  258. case PIX_FMT_RGB24:
  259. case PIX_FMT_BGR24:
  260. picture->data[0] = ptr;
  261. picture->data[1] = NULL;
  262. picture->data[2] = NULL;
  263. picture->linesize[0] = width * 3;
  264. return size * 3;
  265. case PIX_FMT_RGBA32:
  266. picture->data[0] = ptr;
  267. picture->data[1] = NULL;
  268. picture->data[2] = NULL;
  269. picture->linesize[0] = width * 4;
  270. return size * 4;
  271. case PIX_FMT_RGB555:
  272. case PIX_FMT_RGB565:
  273. case PIX_FMT_YUV422:
  274. picture->data[0] = ptr;
  275. picture->data[1] = NULL;
  276. picture->data[2] = NULL;
  277. picture->linesize[0] = width * 2;
  278. return size * 2;
  279. case PIX_FMT_UYVY422:
  280. picture->data[0] = ptr;
  281. picture->data[1] = NULL;
  282. picture->data[2] = NULL;
  283. picture->linesize[0] = width * 2;
  284. return size * 2;
  285. case PIX_FMT_GRAY8:
  286. picture->data[0] = ptr;
  287. picture->data[1] = NULL;
  288. picture->data[2] = NULL;
  289. picture->linesize[0] = width;
  290. return size;
  291. case PIX_FMT_MONOWHITE:
  292. case PIX_FMT_MONOBLACK:
  293. picture->data[0] = ptr;
  294. picture->data[1] = NULL;
  295. picture->data[2] = NULL;
  296. picture->linesize[0] = (width + 7) >> 3;
  297. return picture->linesize[0] * height;
  298. case PIX_FMT_PAL8:
  299. size2 = (size + 3) & ~3;
  300. picture->data[0] = ptr;
  301. picture->data[1] = ptr + size2; /* palette is stored here as 256 32 bit words */
  302. picture->data[2] = NULL;
  303. picture->linesize[0] = width;
  304. picture->linesize[1] = 4;
  305. return size2 + 256 * 4;
  306. default:
  307. picture->data[0] = NULL;
  308. picture->data[1] = NULL;
  309. picture->data[2] = NULL;
  310. picture->data[3] = NULL;
  311. return -1;
  312. }
  313. }
  314. int avpicture_layout(const AVPicture* src, int pix_fmt, int width, int height,
  315. unsigned char *dest, int dest_size)
  316. {
  317. PixFmtInfo* pf = &pix_fmt_info[pix_fmt];
  318. int i, j, w, h, data_planes;
  319. const unsigned char* s;
  320. int size = avpicture_get_size(pix_fmt, width, height);
  321. if (size > dest_size)
  322. return -1;
  323. if (pf->pixel_type == FF_PIXEL_PACKED || pf->pixel_type == FF_PIXEL_PALETTE) {
  324. if (pix_fmt == PIX_FMT_YUV422 ||
  325. pix_fmt == PIX_FMT_UYVY422 ||
  326. pix_fmt == PIX_FMT_RGB565 ||
  327. pix_fmt == PIX_FMT_RGB555)
  328. w = width * 2;
  329. else if (pix_fmt == PIX_FMT_PAL8)
  330. w = width;
  331. else
  332. w = width * (pf->depth * pf->nb_channels / 8);
  333. data_planes = 1;
  334. h = height;
  335. } else {
  336. data_planes = pf->nb_channels;
  337. w = width;
  338. h = height;
  339. }
  340. for (i=0; i<data_planes; i++) {
  341. if (i == 1) {
  342. w = width >> pf->x_chroma_shift;
  343. h = height >> pf->y_chroma_shift;
  344. }
  345. s = src->data[i];
  346. for(j=0; j<h; j++) {
  347. memcpy(dest, s, w);
  348. dest += w;
  349. s += src->linesize[i];
  350. }
  351. }
  352. if (pf->pixel_type == FF_PIXEL_PALETTE)
  353. memcpy((unsigned char *)(((size_t)dest + 3) & ~3), src->data[1], 256 * 4);
  354. return size;
  355. }
  356. int avpicture_get_size(int pix_fmt, int width, int height)
  357. {
  358. AVPicture dummy_pict;
  359. return avpicture_fill(&dummy_pict, NULL, pix_fmt, width, height);
  360. }
  361. /**
  362. * compute the loss when converting from a pixel format to another
  363. */
  364. int avcodec_get_pix_fmt_loss(int dst_pix_fmt, int src_pix_fmt,
  365. int has_alpha)
  366. {
  367. const PixFmtInfo *pf, *ps;
  368. int loss;
  369. ps = &pix_fmt_info[src_pix_fmt];
  370. pf = &pix_fmt_info[dst_pix_fmt];
  371. /* compute loss */
  372. loss = 0;
  373. pf = &pix_fmt_info[dst_pix_fmt];
  374. if (pf->depth < ps->depth ||
  375. (dst_pix_fmt == PIX_FMT_RGB555 && src_pix_fmt == PIX_FMT_RGB565))
  376. loss |= FF_LOSS_DEPTH;
  377. if (pf->x_chroma_shift > ps->x_chroma_shift ||
  378. pf->y_chroma_shift > ps->y_chroma_shift)
  379. loss |= FF_LOSS_RESOLUTION;
  380. switch(pf->color_type) {
  381. case FF_COLOR_RGB:
  382. if (ps->color_type != FF_COLOR_RGB &&
  383. ps->color_type != FF_COLOR_GRAY)
  384. loss |= FF_LOSS_COLORSPACE;
  385. break;
  386. case FF_COLOR_GRAY:
  387. if (ps->color_type != FF_COLOR_GRAY)
  388. loss |= FF_LOSS_COLORSPACE;
  389. break;
  390. case FF_COLOR_YUV:
  391. if (ps->color_type != FF_COLOR_YUV)
  392. loss |= FF_LOSS_COLORSPACE;
  393. break;
  394. case FF_COLOR_YUV_JPEG:
  395. if (ps->color_type != FF_COLOR_YUV_JPEG &&
  396. ps->color_type != FF_COLOR_YUV &&
  397. ps->color_type != FF_COLOR_GRAY)
  398. loss |= FF_LOSS_COLORSPACE;
  399. break;
  400. default:
  401. /* fail safe test */
  402. if (ps->color_type != pf->color_type)
  403. loss |= FF_LOSS_COLORSPACE;
  404. break;
  405. }
  406. if (pf->color_type == FF_COLOR_GRAY &&
  407. ps->color_type != FF_COLOR_GRAY)
  408. loss |= FF_LOSS_CHROMA;
  409. if (!pf->is_alpha && (ps->is_alpha && has_alpha))
  410. loss |= FF_LOSS_ALPHA;
  411. if (pf->pixel_type == FF_PIXEL_PALETTE &&
  412. (ps->pixel_type != FF_PIXEL_PALETTE && ps->color_type != FF_COLOR_GRAY))
  413. loss |= FF_LOSS_COLORQUANT;
  414. return loss;
  415. }
  416. static int avg_bits_per_pixel(int pix_fmt)
  417. {
  418. int bits;
  419. const PixFmtInfo *pf;
  420. pf = &pix_fmt_info[pix_fmt];
  421. switch(pf->pixel_type) {
  422. case FF_PIXEL_PACKED:
  423. switch(pix_fmt) {
  424. case PIX_FMT_YUV422:
  425. case PIX_FMT_UYVY422:
  426. case PIX_FMT_RGB565:
  427. case PIX_FMT_RGB555:
  428. bits = 16;
  429. break;
  430. default:
  431. bits = pf->depth * pf->nb_channels;
  432. break;
  433. }
  434. break;
  435. case FF_PIXEL_PLANAR:
  436. if (pf->x_chroma_shift == 0 && pf->y_chroma_shift == 0) {
  437. bits = pf->depth * pf->nb_channels;
  438. } else {
  439. bits = pf->depth + ((2 * pf->depth) >>
  440. (pf->x_chroma_shift + pf->y_chroma_shift));
  441. }
  442. break;
  443. case FF_PIXEL_PALETTE:
  444. bits = 8;
  445. break;
  446. default:
  447. bits = -1;
  448. break;
  449. }
  450. return bits;
  451. }
  452. static int avcodec_find_best_pix_fmt1(int pix_fmt_mask,
  453. int src_pix_fmt,
  454. int has_alpha,
  455. int loss_mask)
  456. {
  457. int dist, i, loss, min_dist, dst_pix_fmt;
  458. /* find exact color match with smallest size */
  459. dst_pix_fmt = -1;
  460. min_dist = 0x7fffffff;
  461. for(i = 0;i < PIX_FMT_NB; i++) {
  462. if (pix_fmt_mask & (1 << i)) {
  463. loss = avcodec_get_pix_fmt_loss(i, src_pix_fmt, has_alpha) & loss_mask;
  464. if (loss == 0) {
  465. dist = avg_bits_per_pixel(i);
  466. if (dist < min_dist) {
  467. min_dist = dist;
  468. dst_pix_fmt = i;
  469. }
  470. }
  471. }
  472. }
  473. return dst_pix_fmt;
  474. }
  475. /**
  476. * find best pixel format to convert to. Return -1 if none found
  477. */
  478. int avcodec_find_best_pix_fmt(int pix_fmt_mask, int src_pix_fmt,
  479. int has_alpha, int *loss_ptr)
  480. {
  481. int dst_pix_fmt, loss_mask, i;
  482. static const int loss_mask_order[] = {
  483. ~0, /* no loss first */
  484. ~FF_LOSS_ALPHA,
  485. ~FF_LOSS_RESOLUTION,
  486. ~(FF_LOSS_COLORSPACE | FF_LOSS_RESOLUTION),
  487. ~FF_LOSS_COLORQUANT,
  488. ~FF_LOSS_DEPTH,
  489. 0,
  490. };
  491. /* try with successive loss */
  492. i = 0;
  493. for(;;) {
  494. loss_mask = loss_mask_order[i++];
  495. dst_pix_fmt = avcodec_find_best_pix_fmt1(pix_fmt_mask, src_pix_fmt,
  496. has_alpha, loss_mask);
  497. if (dst_pix_fmt >= 0)
  498. goto found;
  499. if (loss_mask == 0)
  500. break;
  501. }
  502. return -1;
  503. found:
  504. if (loss_ptr)
  505. *loss_ptr = avcodec_get_pix_fmt_loss(dst_pix_fmt, src_pix_fmt, has_alpha);
  506. return dst_pix_fmt;
  507. }
  508. static void img_copy_plane(uint8_t *dst, int dst_wrap,
  509. const uint8_t *src, int src_wrap,
  510. int width, int height)
  511. {
  512. for(;height > 0; height--) {
  513. memcpy(dst, src, width);
  514. dst += dst_wrap;
  515. src += src_wrap;
  516. }
  517. }
  518. /**
  519. * Copy image 'src' to 'dst'.
  520. */
  521. void img_copy(AVPicture *dst, const AVPicture *src,
  522. int pix_fmt, int width, int height)
  523. {
  524. int bwidth, bits, i;
  525. PixFmtInfo *pf = &pix_fmt_info[pix_fmt];
  526. pf = &pix_fmt_info[pix_fmt];
  527. switch(pf->pixel_type) {
  528. case FF_PIXEL_PACKED:
  529. switch(pix_fmt) {
  530. case PIX_FMT_YUV422:
  531. case PIX_FMT_UYVY422:
  532. case PIX_FMT_RGB565:
  533. case PIX_FMT_RGB555:
  534. bits = 16;
  535. break;
  536. default:
  537. bits = pf->depth * pf->nb_channels;
  538. break;
  539. }
  540. bwidth = (width * bits + 7) >> 3;
  541. img_copy_plane(dst->data[0], dst->linesize[0],
  542. src->data[0], src->linesize[0],
  543. bwidth, height);
  544. break;
  545. case FF_PIXEL_PLANAR:
  546. for(i = 0; i < pf->nb_channels; i++) {
  547. int w, h;
  548. w = width;
  549. h = height;
  550. if (i == 1 || i == 2) {
  551. w >>= pf->x_chroma_shift;
  552. h >>= pf->y_chroma_shift;
  553. }
  554. bwidth = (w * pf->depth + 7) >> 3;
  555. img_copy_plane(dst->data[i], dst->linesize[i],
  556. src->data[i], src->linesize[i],
  557. bwidth, h);
  558. }
  559. break;
  560. case FF_PIXEL_PALETTE:
  561. img_copy_plane(dst->data[0], dst->linesize[0],
  562. src->data[0], src->linesize[0],
  563. width, height);
  564. /* copy the palette */
  565. img_copy_plane(dst->data[1], dst->linesize[1],
  566. src->data[1], src->linesize[1],
  567. 4, 256);
  568. break;
  569. }
  570. }
  571. /* XXX: totally non optimized */
  572. static void yuv422_to_yuv420p(AVPicture *dst, const AVPicture *src,
  573. int width, int height)
  574. {
  575. const uint8_t *p, *p1;
  576. uint8_t *lum, *cr, *cb, *lum1, *cr1, *cb1;
  577. int w;
  578. p1 = src->data[0];
  579. lum1 = dst->data[0];
  580. cb1 = dst->data[1];
  581. cr1 = dst->data[2];
  582. for(;height >= 1; height -= 2) {
  583. p = p1;
  584. lum = lum1;
  585. cb = cb1;
  586. cr = cr1;
  587. for(w = width; w >= 2; w -= 2) {
  588. lum[0] = p[0];
  589. cb[0] = p[1];
  590. lum[1] = p[2];
  591. cr[0] = p[3];
  592. p += 4;
  593. lum += 2;
  594. cb++;
  595. cr++;
  596. }
  597. if (w) {
  598. lum[0] = p[0];
  599. cb[0] = p[1];
  600. cr[0] = p[3];
  601. cb++;
  602. cr++;
  603. }
  604. p1 += src->linesize[0];
  605. lum1 += dst->linesize[0];
  606. if (height>1) {
  607. p = p1;
  608. lum = lum1;
  609. for(w = width; w >= 2; w -= 2) {
  610. lum[0] = p[0];
  611. lum[1] = p[2];
  612. p += 4;
  613. lum += 2;
  614. }
  615. if (w) {
  616. lum[0] = p[0];
  617. }
  618. p1 += src->linesize[0];
  619. lum1 += dst->linesize[0];
  620. }
  621. cb1 += dst->linesize[1];
  622. cr1 += dst->linesize[2];
  623. }
  624. }
  625. static void uyvy422_to_yuv420p(AVPicture *dst, const AVPicture *src,
  626. int width, int height)
  627. {
  628. const uint8_t *p, *p1;
  629. uint8_t *lum, *cr, *cb, *lum1, *cr1, *cb1;
  630. int w;
  631. p1 = src->data[0];
  632. lum1 = dst->data[0];
  633. cb1 = dst->data[1];
  634. cr1 = dst->data[2];
  635. for(;height >= 1; height -= 2) {
  636. p = p1;
  637. lum = lum1;
  638. cb = cb1;
  639. cr = cr1;
  640. for(w = width; w >= 2; w -= 2) {
  641. lum[0] = p[1];
  642. cb[0] = p[0];
  643. lum[1] = p[3];
  644. cr[0] = p[2];
  645. p += 4;
  646. lum += 2;
  647. cb++;
  648. cr++;
  649. }
  650. if (w) {
  651. lum[0] = p[1];
  652. cb[0] = p[0];
  653. cr[0] = p[2];
  654. cb++;
  655. cr++;
  656. }
  657. p1 += src->linesize[0];
  658. lum1 += dst->linesize[0];
  659. if (height>1) {
  660. p = p1;
  661. lum = lum1;
  662. for(w = width; w >= 2; w -= 2) {
  663. lum[0] = p[1];
  664. lum[1] = p[3];
  665. p += 4;
  666. lum += 2;
  667. }
  668. if (w) {
  669. lum[0] = p[1];
  670. }
  671. p1 += src->linesize[0];
  672. lum1 += dst->linesize[0];
  673. }
  674. cb1 += dst->linesize[1];
  675. cr1 += dst->linesize[2];
  676. }
  677. }
  678. static void uyvy422_to_yuv422p(AVPicture *dst, const AVPicture *src,
  679. int width, int height)
  680. {
  681. const uint8_t *p, *p1;
  682. uint8_t *lum, *cr, *cb, *lum1, *cr1, *cb1;
  683. int w;
  684. p1 = src->data[0];
  685. lum1 = dst->data[0];
  686. cb1 = dst->data[1];
  687. cr1 = dst->data[2];
  688. for(;height > 0; height--) {
  689. p = p1;
  690. lum = lum1;
  691. cb = cb1;
  692. cr = cr1;
  693. for(w = width; w >= 2; w -= 2) {
  694. lum[0] = p[1];
  695. cb[0] = p[0];
  696. lum[1] = p[3];
  697. cr[0] = p[2];
  698. p += 4;
  699. lum += 2;
  700. cb++;
  701. cr++;
  702. }
  703. p1 += src->linesize[0];
  704. lum1 += dst->linesize[0];
  705. cb1 += dst->linesize[1];
  706. cr1 += dst->linesize[2];
  707. }
  708. }
  709. static void yuv422_to_yuv422p(AVPicture *dst, const AVPicture *src,
  710. int width, int height)
  711. {
  712. const uint8_t *p, *p1;
  713. uint8_t *lum, *cr, *cb, *lum1, *cr1, *cb1;
  714. int w;
  715. p1 = src->data[0];
  716. lum1 = dst->data[0];
  717. cb1 = dst->data[1];
  718. cr1 = dst->data[2];
  719. for(;height > 0; height--) {
  720. p = p1;
  721. lum = lum1;
  722. cb = cb1;
  723. cr = cr1;
  724. for(w = width; w >= 2; w -= 2) {
  725. lum[0] = p[0];
  726. cb[0] = p[1];
  727. lum[1] = p[2];
  728. cr[0] = p[3];
  729. p += 4;
  730. lum += 2;
  731. cb++;
  732. cr++;
  733. }
  734. p1 += src->linesize[0];
  735. lum1 += dst->linesize[0];
  736. cb1 += dst->linesize[1];
  737. cr1 += dst->linesize[2];
  738. }
  739. }
  740. static void yuv422p_to_yuv422(AVPicture *dst, const AVPicture *src,
  741. int width, int height)
  742. {
  743. uint8_t *p, *p1;
  744. const uint8_t *lum, *cr, *cb, *lum1, *cr1, *cb1;
  745. int w;
  746. p1 = dst->data[0];
  747. lum1 = src->data[0];
  748. cb1 = src->data[1];
  749. cr1 = src->data[2];
  750. for(;height > 0; height--) {
  751. p = p1;
  752. lum = lum1;
  753. cb = cb1;
  754. cr = cr1;
  755. for(w = width; w >= 2; w -= 2) {
  756. p[0] = lum[0];
  757. p[1] = cb[0];
  758. p[2] = lum[1];
  759. p[3] = cr[0];
  760. p += 4;
  761. lum += 2;
  762. cb++;
  763. cr++;
  764. }
  765. p1 += dst->linesize[0];
  766. lum1 += src->linesize[0];
  767. cb1 += src->linesize[1];
  768. cr1 += src->linesize[2];
  769. }
  770. }
  771. static void yuv422p_to_uyvy422(AVPicture *dst, const AVPicture *src,
  772. int width, int height)
  773. {
  774. uint8_t *p, *p1;
  775. const uint8_t *lum, *cr, *cb, *lum1, *cr1, *cb1;
  776. int w;
  777. p1 = dst->data[0];
  778. lum1 = src->data[0];
  779. cb1 = src->data[1];
  780. cr1 = src->data[2];
  781. for(;height > 0; height--) {
  782. p = p1;
  783. lum = lum1;
  784. cb = cb1;
  785. cr = cr1;
  786. for(w = width; w >= 2; w -= 2) {
  787. p[1] = lum[0];
  788. p[0] = cb[0];
  789. p[3] = lum[1];
  790. p[2] = cr[0];
  791. p += 4;
  792. lum += 2;
  793. cb++;
  794. cr++;
  795. }
  796. p1 += dst->linesize[0];
  797. lum1 += src->linesize[0];
  798. cb1 += src->linesize[1];
  799. cr1 += src->linesize[2];
  800. }
  801. }
  802. #define SCALEBITS 10
  803. #define ONE_HALF (1 << (SCALEBITS - 1))
  804. #define FIX(x) ((int) ((x) * (1<<SCALEBITS) + 0.5))
  805. #define YUV_TO_RGB1_CCIR(cb1, cr1)\
  806. {\
  807. cb = (cb1) - 128;\
  808. cr = (cr1) - 128;\
  809. r_add = FIX(1.40200*255.0/224.0) * cr + ONE_HALF;\
  810. g_add = - FIX(0.34414*255.0/224.0) * cb - FIX(0.71414*255.0/224.0) * cr + \
  811. ONE_HALF;\
  812. b_add = FIX(1.77200*255.0/224.0) * cb + ONE_HALF;\
  813. }
  814. #define YUV_TO_RGB2_CCIR(r, g, b, y1)\
  815. {\
  816. y = ((y1) - 16) * FIX(255.0/219.0);\
  817. r = cm[(y + r_add) >> SCALEBITS];\
  818. g = cm[(y + g_add) >> SCALEBITS];\
  819. b = cm[(y + b_add) >> SCALEBITS];\
  820. }
  821. #define YUV_TO_RGB1(cb1, cr1)\
  822. {\
  823. cb = (cb1) - 128;\
  824. cr = (cr1) - 128;\
  825. r_add = FIX(1.40200) * cr + ONE_HALF;\
  826. g_add = - FIX(0.34414) * cb - FIX(0.71414) * cr + ONE_HALF;\
  827. b_add = FIX(1.77200) * cb + ONE_HALF;\
  828. }
  829. #define YUV_TO_RGB2(r, g, b, y1)\
  830. {\
  831. y = (y1) << SCALEBITS;\
  832. r = cm[(y + r_add) >> SCALEBITS];\
  833. g = cm[(y + g_add) >> SCALEBITS];\
  834. b = cm[(y + b_add) >> SCALEBITS];\
  835. }
  836. #define Y_CCIR_TO_JPEG(y)\
  837. cm[((y) * FIX(255.0/219.0) + (ONE_HALF - 16 * FIX(255.0/219.0))) >> SCALEBITS]
  838. #define Y_JPEG_TO_CCIR(y)\
  839. (((y) * FIX(219.0/255.0) + (ONE_HALF + (16 << SCALEBITS))) >> SCALEBITS)
  840. #define C_CCIR_TO_JPEG(y)\
  841. cm[(((y) - 128) * FIX(127.0/112.0) + (ONE_HALF + (128 << SCALEBITS))) >> SCALEBITS]
  842. /* NOTE: the clamp is really necessary! */
  843. static inline int C_JPEG_TO_CCIR(int y) {
  844. y = (((y - 128) * FIX(112.0/127.0) + (ONE_HALF + (128 << SCALEBITS))) >> SCALEBITS);
  845. if (y < 16)
  846. y = 16;
  847. return y;
  848. }
  849. #define RGB_TO_Y(r, g, b) \
  850. ((FIX(0.29900) * (r) + FIX(0.58700) * (g) + \
  851. FIX(0.11400) * (b) + ONE_HALF) >> SCALEBITS)
  852. #define RGB_TO_U(r1, g1, b1, shift)\
  853. (((- FIX(0.16874) * r1 - FIX(0.33126) * g1 + \
  854. FIX(0.50000) * b1 + (ONE_HALF << shift) - 1) >> (SCALEBITS + shift)) + 128)
  855. #define RGB_TO_V(r1, g1, b1, shift)\
  856. (((FIX(0.50000) * r1 - FIX(0.41869) * g1 - \
  857. FIX(0.08131) * b1 + (ONE_HALF << shift) - 1) >> (SCALEBITS + shift)) + 128)
  858. #define RGB_TO_Y_CCIR(r, g, b) \
  859. ((FIX(0.29900*219.0/255.0) * (r) + FIX(0.58700*219.0/255.0) * (g) + \
  860. FIX(0.11400*219.0/255.0) * (b) + (ONE_HALF + (16 << SCALEBITS))) >> SCALEBITS)
  861. #define RGB_TO_U_CCIR(r1, g1, b1, shift)\
  862. (((- FIX(0.16874*224.0/255.0) * r1 - FIX(0.33126*224.0/255.0) * g1 + \
  863. FIX(0.50000*224.0/255.0) * b1 + (ONE_HALF << shift) - 1) >> (SCALEBITS + shift)) + 128)
  864. #define RGB_TO_V_CCIR(r1, g1, b1, shift)\
  865. (((FIX(0.50000*224.0/255.0) * r1 - FIX(0.41869*224.0/255.0) * g1 - \
  866. FIX(0.08131*224.0/255.0) * b1 + (ONE_HALF << shift) - 1) >> (SCALEBITS + shift)) + 128)
  867. static uint8_t y_ccir_to_jpeg[256];
  868. static uint8_t y_jpeg_to_ccir[256];
  869. static uint8_t c_ccir_to_jpeg[256];
  870. static uint8_t c_jpeg_to_ccir[256];
  871. /* init various conversion tables */
  872. static void img_convert_init(void)
  873. {
  874. int i;
  875. uint8_t *cm = cropTbl + MAX_NEG_CROP;
  876. for(i = 0;i < 256; i++) {
  877. y_ccir_to_jpeg[i] = Y_CCIR_TO_JPEG(i);
  878. y_jpeg_to_ccir[i] = Y_JPEG_TO_CCIR(i);
  879. c_ccir_to_jpeg[i] = C_CCIR_TO_JPEG(i);
  880. c_jpeg_to_ccir[i] = C_JPEG_TO_CCIR(i);
  881. }
  882. }
  883. /* apply to each pixel the given table */
  884. static void img_apply_table(uint8_t *dst, int dst_wrap,
  885. const uint8_t *src, int src_wrap,
  886. int width, int height, const uint8_t *table1)
  887. {
  888. int n;
  889. const uint8_t *s;
  890. uint8_t *d;
  891. const uint8_t *table;
  892. table = table1;
  893. for(;height > 0; height--) {
  894. s = src;
  895. d = dst;
  896. n = width;
  897. while (n >= 4) {
  898. d[0] = table[s[0]];
  899. d[1] = table[s[1]];
  900. d[2] = table[s[2]];
  901. d[3] = table[s[3]];
  902. d += 4;
  903. s += 4;
  904. n -= 4;
  905. }
  906. while (n > 0) {
  907. d[0] = table[s[0]];
  908. d++;
  909. s++;
  910. n--;
  911. }
  912. dst += dst_wrap;
  913. src += src_wrap;
  914. }
  915. }
  916. /* XXX: use generic filter ? */
  917. /* XXX: in most cases, the sampling position is incorrect */
  918. /* 4x1 -> 1x1 */
  919. static void shrink41(uint8_t *dst, int dst_wrap,
  920. const uint8_t *src, int src_wrap,
  921. int width, int height)
  922. {
  923. int w;
  924. const uint8_t *s;
  925. uint8_t *d;
  926. for(;height > 0; height--) {
  927. s = src;
  928. d = dst;
  929. for(w = width;w > 0; w--) {
  930. d[0] = (s[0] + s[1] + s[2] + s[3] + 2) >> 2;
  931. s += 4;
  932. d++;
  933. }
  934. src += src_wrap;
  935. dst += dst_wrap;
  936. }
  937. }
  938. /* 2x1 -> 1x1 */
  939. static void shrink21(uint8_t *dst, int dst_wrap,
  940. const uint8_t *src, int src_wrap,
  941. int width, int height)
  942. {
  943. int w;
  944. const uint8_t *s;
  945. uint8_t *d;
  946. for(;height > 0; height--) {
  947. s = src;
  948. d = dst;
  949. for(w = width;w > 0; w--) {
  950. d[0] = (s[0] + s[1]) >> 1;
  951. s += 2;
  952. d++;
  953. }
  954. src += src_wrap;
  955. dst += dst_wrap;
  956. }
  957. }
  958. /* 1x2 -> 1x1 */
  959. static void shrink12(uint8_t *dst, int dst_wrap,
  960. const uint8_t *src, int src_wrap,
  961. int width, int height)
  962. {
  963. int w;
  964. uint8_t *d;
  965. const uint8_t *s1, *s2;
  966. for(;height > 0; height--) {
  967. s1 = src;
  968. s2 = s1 + src_wrap;
  969. d = dst;
  970. for(w = width;w >= 4; w-=4) {
  971. d[0] = (s1[0] + s2[0]) >> 1;
  972. d[1] = (s1[1] + s2[1]) >> 1;
  973. d[2] = (s1[2] + s2[2]) >> 1;
  974. d[3] = (s1[3] + s2[3]) >> 1;
  975. s1 += 4;
  976. s2 += 4;
  977. d += 4;
  978. }
  979. for(;w > 0; w--) {
  980. d[0] = (s1[0] + s2[0]) >> 1;
  981. s1++;
  982. s2++;
  983. d++;
  984. }
  985. src += 2 * src_wrap;
  986. dst += dst_wrap;
  987. }
  988. }
  989. /* 2x2 -> 1x1 */
  990. static void shrink22(uint8_t *dst, int dst_wrap,
  991. const uint8_t *src, int src_wrap,
  992. int width, int height)
  993. {
  994. int w;
  995. const uint8_t *s1, *s2;
  996. uint8_t *d;
  997. for(;height > 0; height--) {
  998. s1 = src;
  999. s2 = s1 + src_wrap;
  1000. d = dst;
  1001. for(w = width;w >= 4; w-=4) {
  1002. d[0] = (s1[0] + s1[1] + s2[0] + s2[1] + 2) >> 2;
  1003. d[1] = (s1[2] + s1[3] + s2[2] + s2[3] + 2) >> 2;
  1004. d[2] = (s1[4] + s1[5] + s2[4] + s2[5] + 2) >> 2;
  1005. d[3] = (s1[6] + s1[7] + s2[6] + s2[7] + 2) >> 2;
  1006. s1 += 8;
  1007. s2 += 8;
  1008. d += 4;
  1009. }
  1010. for(;w > 0; w--) {
  1011. d[0] = (s1[0] + s1[1] + s2[0] + s2[1] + 2) >> 2;
  1012. s1 += 2;
  1013. s2 += 2;
  1014. d++;
  1015. }
  1016. src += 2 * src_wrap;
  1017. dst += dst_wrap;
  1018. }
  1019. }
  1020. /* 4x4 -> 1x1 */
  1021. static void shrink44(uint8_t *dst, int dst_wrap,
  1022. const uint8_t *src, int src_wrap,
  1023. int width, int height)
  1024. {
  1025. int w;
  1026. const uint8_t *s1, *s2, *s3, *s4;
  1027. uint8_t *d;
  1028. for(;height > 0; height--) {
  1029. s1 = src;
  1030. s2 = s1 + src_wrap;
  1031. s3 = s2 + src_wrap;
  1032. s4 = s3 + src_wrap;
  1033. d = dst;
  1034. for(w = width;w > 0; w--) {
  1035. d[0] = (s1[0] + s1[1] + s1[2] + s1[3] +
  1036. s2[0] + s2[1] + s2[2] + s2[3] +
  1037. s3[0] + s3[1] + s3[2] + s3[3] +
  1038. s4[0] + s4[1] + s4[2] + s4[3] + 8) >> 4;
  1039. s1 += 4;
  1040. s2 += 4;
  1041. s3 += 4;
  1042. s4 += 4;
  1043. d++;
  1044. }
  1045. src += 4 * src_wrap;
  1046. dst += dst_wrap;
  1047. }
  1048. }
  1049. static void grow21_line(uint8_t *dst, const uint8_t *src,
  1050. int width)
  1051. {
  1052. int w;
  1053. const uint8_t *s1;
  1054. uint8_t *d;
  1055. s1 = src;
  1056. d = dst;
  1057. for(w = width;w >= 4; w-=4) {
  1058. d[1] = d[0] = s1[0];
  1059. d[3] = d[2] = s1[1];
  1060. s1 += 2;
  1061. d += 4;
  1062. }
  1063. for(;w >= 2; w -= 2) {
  1064. d[1] = d[0] = s1[0];
  1065. s1 ++;
  1066. d += 2;
  1067. }
  1068. /* only needed if width is not a multiple of two */
  1069. /* XXX: veryfy that */
  1070. if (w) {
  1071. d[0] = s1[0];
  1072. }
  1073. }
  1074. static void grow41_line(uint8_t *dst, const uint8_t *src,
  1075. int width)
  1076. {
  1077. int w, v;
  1078. const uint8_t *s1;
  1079. uint8_t *d;
  1080. s1 = src;
  1081. d = dst;
  1082. for(w = width;w >= 4; w-=4) {
  1083. v = s1[0];
  1084. d[0] = v;
  1085. d[1] = v;
  1086. d[2] = v;
  1087. d[3] = v;
  1088. s1 ++;
  1089. d += 4;
  1090. }
  1091. }
  1092. /* 1x1 -> 2x1 */
  1093. static void grow21(uint8_t *dst, int dst_wrap,
  1094. const uint8_t *src, int src_wrap,
  1095. int width, int height)
  1096. {
  1097. for(;height > 0; height--) {
  1098. grow21_line(dst, src, width);
  1099. src += src_wrap;
  1100. dst += dst_wrap;
  1101. }
  1102. }
  1103. /* 1x1 -> 2x2 */
  1104. static void grow22(uint8_t *dst, int dst_wrap,
  1105. const uint8_t *src, int src_wrap,
  1106. int width, int height)
  1107. {
  1108. for(;height > 0; height--) {
  1109. grow21_line(dst, src, width);
  1110. if (height%2)
  1111. src += src_wrap;
  1112. dst += dst_wrap;
  1113. }
  1114. }
  1115. /* 1x1 -> 4x1 */
  1116. static void grow41(uint8_t *dst, int dst_wrap,
  1117. const uint8_t *src, int src_wrap,
  1118. int width, int height)
  1119. {
  1120. for(;height > 0; height--) {
  1121. grow41_line(dst, src, width);
  1122. src += src_wrap;
  1123. dst += dst_wrap;
  1124. }
  1125. }
  1126. /* 1x1 -> 4x4 */
  1127. static void grow44(uint8_t *dst, int dst_wrap,
  1128. const uint8_t *src, int src_wrap,
  1129. int width, int height)
  1130. {
  1131. for(;height > 0; height--) {
  1132. grow41_line(dst, src, width);
  1133. if ((height & 3) == 1)
  1134. src += src_wrap;
  1135. dst += dst_wrap;
  1136. }
  1137. }
  1138. /* 1x2 -> 2x1 */
  1139. static void conv411(uint8_t *dst, int dst_wrap,
  1140. const uint8_t *src, int src_wrap,
  1141. int width, int height)
  1142. {
  1143. int w, c;
  1144. const uint8_t *s1, *s2;
  1145. uint8_t *d;
  1146. width>>=1;
  1147. for(;height > 0; height--) {
  1148. s1 = src;
  1149. s2 = src + src_wrap;
  1150. d = dst;
  1151. for(w = width;w > 0; w--) {
  1152. c = (s1[0] + s2[0]) >> 1;
  1153. d[0] = c;
  1154. d[1] = c;
  1155. s1++;
  1156. s2++;
  1157. d += 2;
  1158. }
  1159. src += src_wrap * 2;
  1160. dst += dst_wrap;
  1161. }
  1162. }
  1163. /* XXX: add jpeg quantize code */
  1164. #define TRANSP_INDEX (6*6*6)
  1165. /* this is maybe slow, but allows for extensions */
  1166. static inline unsigned char gif_clut_index(uint8_t r, uint8_t g, uint8_t b)
  1167. {
  1168. return ((((r)/47)%6)*6*6+(((g)/47)%6)*6+(((b)/47)%6));
  1169. }
  1170. static void build_rgb_palette(uint8_t *palette, int has_alpha)
  1171. {
  1172. uint32_t *pal;
  1173. static const uint8_t pal_value[6] = { 0x00, 0x33, 0x66, 0x99, 0xcc, 0xff };
  1174. int i, r, g, b;
  1175. pal = (uint32_t *)palette;
  1176. i = 0;
  1177. for(r = 0; r < 6; r++) {
  1178. for(g = 0; g < 6; g++) {
  1179. for(b = 0; b < 6; b++) {
  1180. pal[i++] = (0xff << 24) | (pal_value[r] << 16) |
  1181. (pal_value[g] << 8) | pal_value[b];
  1182. }
  1183. }
  1184. }
  1185. if (has_alpha)
  1186. pal[i++] = 0;
  1187. while (i < 256)
  1188. pal[i++] = 0xff000000;
  1189. }
  1190. /* copy bit n to bits 0 ... n - 1 */
  1191. static inline unsigned int bitcopy_n(unsigned int a, int n)
  1192. {
  1193. int mask;
  1194. mask = (1 << n) - 1;
  1195. return (a & (0xff & ~mask)) | ((-((a >> n) & 1)) & mask);
  1196. }
  1197. /* rgb555 handling */
  1198. #define RGB_NAME rgb555
  1199. #define RGB_IN(r, g, b, s)\
  1200. {\
  1201. unsigned int v = ((const uint16_t *)(s))[0];\
  1202. r = bitcopy_n(v >> (10 - 3), 3);\
  1203. g = bitcopy_n(v >> (5 - 3), 3);\
  1204. b = bitcopy_n(v << 3, 3);\
  1205. }
  1206. #define RGBA_IN(r, g, b, a, s)\
  1207. {\
  1208. unsigned int v = ((const uint16_t *)(s))[0];\
  1209. r = bitcopy_n(v >> (10 - 3), 3);\
  1210. g = bitcopy_n(v >> (5 - 3), 3);\
  1211. b = bitcopy_n(v << 3, 3);\
  1212. a = (-(v >> 15)) & 0xff;\
  1213. }
  1214. #define RGBA_OUT(d, r, g, b, a)\
  1215. {\
  1216. ((uint16_t *)(d))[0] = ((r >> 3) << 10) | ((g >> 3) << 5) | (b >> 3) | \
  1217. ((a << 8) & 0x8000);\
  1218. }
  1219. #define BPP 2
  1220. #include "imgconvert_template.h"
  1221. /* rgb565 handling */
  1222. #define RGB_NAME rgb565
  1223. #define RGB_IN(r, g, b, s)\
  1224. {\
  1225. unsigned int v = ((const uint16_t *)(s))[0];\
  1226. r = bitcopy_n(v >> (11 - 3), 3);\
  1227. g = bitcopy_n(v >> (5 - 2), 2);\
  1228. b = bitcopy_n(v << 3, 3);\
  1229. }
  1230. #define RGB_OUT(d, r, g, b)\
  1231. {\
  1232. ((uint16_t *)(d))[0] = ((r >> 3) << 11) | ((g >> 2) << 5) | (b >> 3);\
  1233. }
  1234. #define BPP 2
  1235. #include "imgconvert_template.h"
  1236. /* bgr24 handling */
  1237. #define RGB_NAME bgr24
  1238. #define RGB_IN(r, g, b, s)\
  1239. {\
  1240. b = (s)[0];\
  1241. g = (s)[1];\
  1242. r = (s)[2];\
  1243. }
  1244. #define RGB_OUT(d, r, g, b)\
  1245. {\
  1246. (d)[0] = b;\
  1247. (d)[1] = g;\
  1248. (d)[2] = r;\
  1249. }
  1250. #define BPP 3
  1251. #include "imgconvert_template.h"
  1252. #undef RGB_IN
  1253. #undef RGB_OUT
  1254. #undef BPP
  1255. /* rgb24 handling */
  1256. #define RGB_NAME rgb24
  1257. #define FMT_RGB24
  1258. #define RGB_IN(r, g, b, s)\
  1259. {\
  1260. r = (s)[0];\
  1261. g = (s)[1];\
  1262. b = (s)[2];\
  1263. }
  1264. #define RGB_OUT(d, r, g, b)\
  1265. {\
  1266. (d)[0] = r;\
  1267. (d)[1] = g;\
  1268. (d)[2] = b;\
  1269. }
  1270. #define BPP 3
  1271. #include "imgconvert_template.h"
  1272. /* rgba32 handling */
  1273. #define RGB_NAME rgba32
  1274. #define FMT_RGBA32
  1275. #define RGB_IN(r, g, b, s)\
  1276. {\
  1277. unsigned int v = ((const uint32_t *)(s))[0];\
  1278. r = (v >> 16) & 0xff;\
  1279. g = (v >> 8) & 0xff;\
  1280. b = v & 0xff;\
  1281. }
  1282. #define RGBA_IN(r, g, b, a, s)\
  1283. {\
  1284. unsigned int v = ((const uint32_t *)(s))[0];\
  1285. a = (v >> 24) & 0xff;\
  1286. r = (v >> 16) & 0xff;\
  1287. g = (v >> 8) & 0xff;\
  1288. b = v & 0xff;\
  1289. }
  1290. #define RGBA_OUT(d, r, g, b, a)\
  1291. {\
  1292. ((uint32_t *)(d))[0] = (a << 24) | (r << 16) | (g << 8) | b;\
  1293. }
  1294. #define BPP 4
  1295. #include "imgconvert_template.h"
  1296. static void mono_to_gray(AVPicture *dst, const AVPicture *src,
  1297. int width, int height, int xor_mask)
  1298. {
  1299. const unsigned char *p;
  1300. unsigned char *q;
  1301. int v, dst_wrap, src_wrap;
  1302. int y, w;
  1303. p = src->data[0];
  1304. src_wrap = src->linesize[0] - ((width + 7) >> 3);
  1305. q = dst->data[0];
  1306. dst_wrap = dst->linesize[0] - width;
  1307. for(y=0;y<height;y++) {
  1308. w = width;
  1309. while (w >= 8) {
  1310. v = *p++ ^ xor_mask;
  1311. q[0] = -(v >> 7);
  1312. q[1] = -((v >> 6) & 1);
  1313. q[2] = -((v >> 5) & 1);
  1314. q[3] = -((v >> 4) & 1);
  1315. q[4] = -((v >> 3) & 1);
  1316. q[5] = -((v >> 2) & 1);
  1317. q[6] = -((v >> 1) & 1);
  1318. q[7] = -((v >> 0) & 1);
  1319. w -= 8;
  1320. q += 8;
  1321. }
  1322. if (w > 0) {
  1323. v = *p++ ^ xor_mask;
  1324. do {
  1325. q[0] = -((v >> 7) & 1);
  1326. q++;
  1327. v <<= 1;
  1328. } while (--w);
  1329. }
  1330. p += src_wrap;
  1331. q += dst_wrap;
  1332. }
  1333. }
  1334. static void monowhite_to_gray(AVPicture *dst, const AVPicture *src,
  1335. int width, int height)
  1336. {
  1337. mono_to_gray(dst, src, width, height, 0xff);
  1338. }
  1339. static void monoblack_to_gray(AVPicture *dst, const AVPicture *src,
  1340. int width, int height)
  1341. {
  1342. mono_to_gray(dst, src, width, height, 0x00);
  1343. }
  1344. static void gray_to_mono(AVPicture *dst, const AVPicture *src,
  1345. int width, int height, int xor_mask)
  1346. {
  1347. int n;
  1348. const uint8_t *s;
  1349. uint8_t *d;
  1350. int j, b, v, n1, src_wrap, dst_wrap, y;
  1351. s = src->data[0];
  1352. src_wrap = src->linesize[0] - width;
  1353. d = dst->data[0];
  1354. dst_wrap = dst->linesize[0] - ((width + 7) >> 3);
  1355. for(y=0;y<height;y++) {
  1356. n = width;
  1357. while (n >= 8) {
  1358. v = 0;
  1359. for(j=0;j<8;j++) {
  1360. b = s[0];
  1361. s++;
  1362. v = (v << 1) | (b >> 7);
  1363. }
  1364. d[0] = v ^ xor_mask;
  1365. d++;
  1366. n -= 8;
  1367. }
  1368. if (n > 0) {
  1369. n1 = n;
  1370. v = 0;
  1371. while (n > 0) {
  1372. b = s[0];
  1373. s++;
  1374. v = (v << 1) | (b >> 7);
  1375. n--;
  1376. }
  1377. d[0] = (v << (8 - (n1 & 7))) ^ xor_mask;
  1378. d++;
  1379. }
  1380. s += src_wrap;
  1381. d += dst_wrap;
  1382. }
  1383. }
  1384. static void gray_to_monowhite(AVPicture *dst, const AVPicture *src,
  1385. int width, int height)
  1386. {
  1387. gray_to_mono(dst, src, width, height, 0xff);
  1388. }
  1389. static void gray_to_monoblack(AVPicture *dst, const AVPicture *src,
  1390. int width, int height)
  1391. {
  1392. gray_to_mono(dst, src, width, height, 0x00);
  1393. }
  1394. typedef struct ConvertEntry {
  1395. void (*convert)(AVPicture *dst,
  1396. const AVPicture *src, int width, int height);
  1397. } ConvertEntry;
  1398. /* Add each new convertion function in this table. In order to be able
  1399. to convert from any format to any format, the following constraints
  1400. must be satisfied:
  1401. - all FF_COLOR_RGB formats must convert to and from PIX_FMT_RGB24
  1402. - all FF_COLOR_GRAY formats must convert to and from PIX_FMT_GRAY8
  1403. - all FF_COLOR_RGB formats with alpha must convert to and from PIX_FMT_RGBA32
  1404. - PIX_FMT_YUV444P and PIX_FMT_YUVJ444P must convert to and from
  1405. PIX_FMT_RGB24.
  1406. - PIX_FMT_422 must convert to and from PIX_FMT_422P.
  1407. The other conversion functions are just optimisations for common cases.
  1408. */
  1409. static ConvertEntry convert_table[PIX_FMT_NB][PIX_FMT_NB] = {
  1410. [PIX_FMT_YUV420P] = {
  1411. [PIX_FMT_RGB555] = {
  1412. .convert = yuv420p_to_rgb555
  1413. },
  1414. [PIX_FMT_RGB565] = {
  1415. .convert = yuv420p_to_rgb565
  1416. },
  1417. [PIX_FMT_BGR24] = {
  1418. .convert = yuv420p_to_bgr24
  1419. },
  1420. [PIX_FMT_RGB24] = {
  1421. .convert = yuv420p_to_rgb24
  1422. },
  1423. [PIX_FMT_RGBA32] = {
  1424. .convert = yuv420p_to_rgba32
  1425. },
  1426. },
  1427. [PIX_FMT_YUV422P] = {
  1428. [PIX_FMT_YUV422] = {
  1429. .convert = yuv422p_to_yuv422,
  1430. },
  1431. [PIX_FMT_UYVY422] = {
  1432. .convert = yuv422p_to_uyvy422,
  1433. },
  1434. },
  1435. [PIX_FMT_YUV444P] = {
  1436. [PIX_FMT_RGB24] = {
  1437. .convert = yuv444p_to_rgb24
  1438. },
  1439. },
  1440. [PIX_FMT_YUVJ420P] = {
  1441. [PIX_FMT_RGB555] = {
  1442. .convert = yuvj420p_to_rgb555
  1443. },
  1444. [PIX_FMT_RGB565] = {
  1445. .convert = yuvj420p_to_rgb565
  1446. },
  1447. [PIX_FMT_BGR24] = {
  1448. .convert = yuvj420p_to_bgr24
  1449. },
  1450. [PIX_FMT_RGB24] = {
  1451. .convert = yuvj420p_to_rgb24
  1452. },
  1453. [PIX_FMT_RGBA32] = {
  1454. .convert = yuvj420p_to_rgba32
  1455. },
  1456. },
  1457. [PIX_FMT_YUVJ444P] = {
  1458. [PIX_FMT_RGB24] = {
  1459. .convert = yuvj444p_to_rgb24
  1460. },
  1461. },
  1462. [PIX_FMT_YUV422] = {
  1463. [PIX_FMT_YUV420P] = {
  1464. .convert = yuv422_to_yuv420p,
  1465. },
  1466. [PIX_FMT_YUV422P] = {
  1467. .convert = yuv422_to_yuv422p,
  1468. },
  1469. },
  1470. [PIX_FMT_UYVY422] = {
  1471. [PIX_FMT_YUV420P] = {
  1472. .convert = uyvy422_to_yuv420p,
  1473. },
  1474. [PIX_FMT_YUV422P] = {
  1475. .convert = uyvy422_to_yuv422p,
  1476. },
  1477. },
  1478. [PIX_FMT_RGB24] = {
  1479. [PIX_FMT_YUV420P] = {
  1480. .convert = rgb24_to_yuv420p
  1481. },
  1482. [PIX_FMT_RGB565] = {
  1483. .convert = rgb24_to_rgb565
  1484. },
  1485. [PIX_FMT_RGB555] = {
  1486. .convert = rgb24_to_rgb555
  1487. },
  1488. [PIX_FMT_RGBA32] = {
  1489. .convert = rgb24_to_rgba32
  1490. },
  1491. [PIX_FMT_BGR24] = {
  1492. .convert = rgb24_to_bgr24
  1493. },
  1494. [PIX_FMT_GRAY8] = {
  1495. .convert = rgb24_to_gray
  1496. },
  1497. [PIX_FMT_PAL8] = {
  1498. .convert = rgb24_to_pal8
  1499. },
  1500. [PIX_FMT_YUV444P] = {
  1501. .convert = rgb24_to_yuv444p
  1502. },
  1503. [PIX_FMT_YUVJ420P] = {
  1504. .convert = rgb24_to_yuvj420p
  1505. },
  1506. [PIX_FMT_YUVJ444P] = {
  1507. .convert = rgb24_to_yuvj444p
  1508. },
  1509. },
  1510. [PIX_FMT_RGBA32] = {
  1511. [PIX_FMT_RGB24] = {
  1512. .convert = rgba32_to_rgb24
  1513. },
  1514. [PIX_FMT_RGB555] = {
  1515. .convert = rgba32_to_rgb555
  1516. },
  1517. [PIX_FMT_PAL8] = {
  1518. .convert = rgba32_to_pal8
  1519. },
  1520. [PIX_FMT_YUV420P] = {
  1521. .convert = rgba32_to_yuv420p
  1522. },
  1523. [PIX_FMT_GRAY8] = {
  1524. .convert = rgba32_to_gray
  1525. },
  1526. },
  1527. [PIX_FMT_BGR24] = {
  1528. [PIX_FMT_RGB24] = {
  1529. .convert = bgr24_to_rgb24
  1530. },
  1531. [PIX_FMT_YUV420P] = {
  1532. .convert = bgr24_to_yuv420p
  1533. },
  1534. [PIX_FMT_GRAY8] = {
  1535. .convert = bgr24_to_gray
  1536. },
  1537. },
  1538. [PIX_FMT_RGB555] = {
  1539. [PIX_FMT_RGB24] = {
  1540. .convert = rgb555_to_rgb24
  1541. },
  1542. [PIX_FMT_RGBA32] = {
  1543. .convert = rgb555_to_rgba32
  1544. },
  1545. [PIX_FMT_YUV420P] = {
  1546. .convert = rgb555_to_yuv420p
  1547. },
  1548. [PIX_FMT_GRAY8] = {
  1549. .convert = rgb555_to_gray
  1550. },
  1551. },
  1552. [PIX_FMT_RGB565] = {
  1553. [PIX_FMT_RGB24] = {
  1554. .convert = rgb565_to_rgb24
  1555. },
  1556. [PIX_FMT_YUV420P] = {
  1557. .convert = rgb565_to_yuv420p
  1558. },
  1559. [PIX_FMT_GRAY8] = {
  1560. .convert = rgb565_to_gray
  1561. },
  1562. },
  1563. [PIX_FMT_GRAY8] = {
  1564. [PIX_FMT_RGB555] = {
  1565. .convert = gray_to_rgb555
  1566. },
  1567. [PIX_FMT_RGB565] = {
  1568. .convert = gray_to_rgb565
  1569. },
  1570. [PIX_FMT_RGB24] = {
  1571. .convert = gray_to_rgb24
  1572. },
  1573. [PIX_FMT_BGR24] = {
  1574. .convert = gray_to_bgr24
  1575. },
  1576. [PIX_FMT_RGBA32] = {
  1577. .convert = gray_to_rgba32
  1578. },
  1579. [PIX_FMT_MONOWHITE] = {
  1580. .convert = gray_to_monowhite
  1581. },
  1582. [PIX_FMT_MONOBLACK] = {
  1583. .convert = gray_to_monoblack
  1584. },
  1585. },
  1586. [PIX_FMT_MONOWHITE] = {
  1587. [PIX_FMT_GRAY8] = {
  1588. .convert = monowhite_to_gray
  1589. },
  1590. },
  1591. [PIX_FMT_MONOBLACK] = {
  1592. [PIX_FMT_GRAY8] = {
  1593. .convert = monoblack_to_gray
  1594. },
  1595. },
  1596. [PIX_FMT_PAL8] = {
  1597. [PIX_FMT_RGB555] = {
  1598. .convert = pal8_to_rgb555
  1599. },
  1600. [PIX_FMT_RGB565] = {
  1601. .convert = pal8_to_rgb565
  1602. },
  1603. [PIX_FMT_BGR24] = {
  1604. .convert = pal8_to_bgr24
  1605. },
  1606. [PIX_FMT_RGB24] = {
  1607. .convert = pal8_to_rgb24
  1608. },
  1609. [PIX_FMT_RGBA32] = {
  1610. .convert = pal8_to_rgba32
  1611. },
  1612. },
  1613. };
  1614. int avpicture_alloc(AVPicture *picture,
  1615. int pix_fmt, int width, int height)
  1616. {
  1617. unsigned int size;
  1618. void *ptr;
  1619. size = avpicture_get_size(pix_fmt, width, height);
  1620. ptr = av_malloc(size);
  1621. if (!ptr)
  1622. goto fail;
  1623. avpicture_fill(picture, ptr, pix_fmt, width, height);
  1624. return 0;
  1625. fail:
  1626. memset(picture, 0, sizeof(AVPicture));
  1627. return -1;
  1628. }
  1629. void avpicture_free(AVPicture *picture)
  1630. {
  1631. av_free(picture->data[0]);
  1632. }
  1633. /* return true if yuv planar */
  1634. static inline int is_yuv_planar(PixFmtInfo *ps)
  1635. {
  1636. return (ps->color_type == FF_COLOR_YUV ||
  1637. ps->color_type == FF_COLOR_YUV_JPEG) &&
  1638. ps->pixel_type == FF_PIXEL_PLANAR;
  1639. }
  1640. /* XXX: always use linesize. Return -1 if not supported */
  1641. int img_convert(AVPicture *dst, int dst_pix_fmt,
  1642. const AVPicture *src, int src_pix_fmt,
  1643. int src_width, int src_height)
  1644. {
  1645. static int inited;
  1646. int i, ret, dst_width, dst_height, int_pix_fmt;
  1647. PixFmtInfo *src_pix, *dst_pix;
  1648. ConvertEntry *ce;
  1649. AVPicture tmp1, *tmp = &tmp1;
  1650. if (src_pix_fmt < 0 || src_pix_fmt >= PIX_FMT_NB ||
  1651. dst_pix_fmt < 0 || dst_pix_fmt >= PIX_FMT_NB)
  1652. return -1;
  1653. if (src_width <= 0 || src_height <= 0)
  1654. return 0;
  1655. if (!inited) {
  1656. inited = 1;
  1657. img_convert_init();
  1658. }
  1659. dst_width = src_width;
  1660. dst_height = src_height;
  1661. dst_pix = &pix_fmt_info[dst_pix_fmt];
  1662. src_pix = &pix_fmt_info[src_pix_fmt];
  1663. if (src_pix_fmt == dst_pix_fmt) {
  1664. /* no conversion needed: just copy */
  1665. img_copy(dst, src, dst_pix_fmt, dst_width, dst_height);
  1666. return 0;
  1667. }
  1668. ce = &convert_table[src_pix_fmt][dst_pix_fmt];
  1669. if (ce->convert) {
  1670. /* specific conversion routine */
  1671. ce->convert(dst, src, dst_width, dst_height);
  1672. return 0;
  1673. }
  1674. /* gray to YUV */
  1675. if (is_yuv_planar(dst_pix) &&
  1676. src_pix_fmt == PIX_FMT_GRAY8) {
  1677. int w, h, y;
  1678. uint8_t *d;
  1679. if (dst_pix->color_type == FF_COLOR_YUV_JPEG) {
  1680. img_copy_plane(dst->data[0], dst->linesize[0],
  1681. src->data[0], src->linesize[0],
  1682. dst_width, dst_height);
  1683. } else {
  1684. img_apply_table(dst->data[0], dst->linesize[0],
  1685. src->data[0], src->linesize[0],
  1686. dst_width, dst_height,
  1687. y_jpeg_to_ccir);
  1688. }
  1689. /* fill U and V with 128 */
  1690. w = dst_width;
  1691. h = dst_height;
  1692. w >>= dst_pix->x_chroma_shift;
  1693. h >>= dst_pix->y_chroma_shift;
  1694. for(i = 1; i <= 2; i++) {
  1695. d = dst->data[i];
  1696. for(y = 0; y< h; y++) {
  1697. memset(d, 128, w);
  1698. d += dst->linesize[i];
  1699. }
  1700. }
  1701. return 0;
  1702. }
  1703. /* YUV to gray */
  1704. if (is_yuv_planar(src_pix) &&
  1705. dst_pix_fmt == PIX_FMT_GRAY8) {
  1706. if (src_pix->color_type == FF_COLOR_YUV_JPEG) {
  1707. img_copy_plane(dst->data[0], dst->linesize[0],
  1708. src->data[0], src->linesize[0],
  1709. dst_width, dst_height);
  1710. } else {
  1711. img_apply_table(dst->data[0], dst->linesize[0],
  1712. src->data[0], src->linesize[0],
  1713. dst_width, dst_height,
  1714. y_ccir_to_jpeg);
  1715. }
  1716. return 0;
  1717. }
  1718. /* YUV to YUV planar */
  1719. if (is_yuv_planar(dst_pix) && is_yuv_planar(src_pix)) {
  1720. int x_shift, y_shift, w, h, xy_shift;
  1721. void (*resize_func)(uint8_t *dst, int dst_wrap,
  1722. const uint8_t *src, int src_wrap,
  1723. int width, int height);
  1724. /* compute chroma size of the smallest dimensions */
  1725. w = dst_width;
  1726. h = dst_height;
  1727. if (dst_pix->x_chroma_shift >= src_pix->x_chroma_shift)
  1728. w >>= dst_pix->x_chroma_shift;
  1729. else
  1730. w >>= src_pix->x_chroma_shift;
  1731. if (dst_pix->y_chroma_shift >= src_pix->y_chroma_shift)
  1732. h >>= dst_pix->y_chroma_shift;
  1733. else
  1734. h >>= src_pix->y_chroma_shift;
  1735. x_shift = (dst_pix->x_chroma_shift - src_pix->x_chroma_shift);
  1736. y_shift = (dst_pix->y_chroma_shift - src_pix->y_chroma_shift);
  1737. xy_shift = ((x_shift & 0xf) << 4) | (y_shift & 0xf);
  1738. /* there must be filters for conversion at least from and to
  1739. YUV444 format */
  1740. switch(xy_shift) {
  1741. case 0x00:
  1742. resize_func = img_copy_plane;
  1743. break;
  1744. case 0x10:
  1745. resize_func = shrink21;
  1746. break;
  1747. case 0x20:
  1748. resize_func = shrink41;
  1749. break;
  1750. case 0x01:
  1751. resize_func = shrink12;
  1752. break;
  1753. case 0x11:
  1754. resize_func = shrink22;
  1755. break;
  1756. case 0x22:
  1757. resize_func = shrink44;
  1758. break;
  1759. case 0xf0:
  1760. resize_func = grow21;
  1761. break;
  1762. case 0xe0:
  1763. resize_func = grow41;
  1764. break;
  1765. case 0xff:
  1766. resize_func = grow22;
  1767. break;
  1768. case 0xee:
  1769. resize_func = grow44;
  1770. break;
  1771. case 0xf1:
  1772. resize_func = conv411;
  1773. break;
  1774. default:
  1775. /* currently not handled */
  1776. goto no_chroma_filter;
  1777. }
  1778. img_copy_plane(dst->data[0], dst->linesize[0],
  1779. src->data[0], src->linesize[0],
  1780. dst_width, dst_height);
  1781. for(i = 1;i <= 2; i++)
  1782. resize_func(dst->data[i], dst->linesize[i],
  1783. src->data[i], src->linesize[i],
  1784. dst_width>>dst_pix->x_chroma_shift, dst_height>>dst_pix->y_chroma_shift);
  1785. /* if yuv color space conversion is needed, we do it here on
  1786. the destination image */
  1787. if (dst_pix->color_type != src_pix->color_type) {
  1788. const uint8_t *y_table, *c_table;
  1789. if (dst_pix->color_type == FF_COLOR_YUV) {
  1790. y_table = y_jpeg_to_ccir;
  1791. c_table = c_jpeg_to_ccir;
  1792. } else {
  1793. y_table = y_ccir_to_jpeg;
  1794. c_table = c_ccir_to_jpeg;
  1795. }
  1796. img_apply_table(dst->data[0], dst->linesize[0],
  1797. dst->data[0], dst->linesize[0],
  1798. dst_width, dst_height,
  1799. y_table);
  1800. for(i = 1;i <= 2; i++)
  1801. img_apply_table(dst->data[i], dst->linesize[i],
  1802. dst->data[i], dst->linesize[i],
  1803. dst_width>>dst_pix->x_chroma_shift,
  1804. dst_height>>dst_pix->y_chroma_shift,
  1805. c_table);
  1806. }
  1807. return 0;
  1808. }
  1809. no_chroma_filter:
  1810. /* try to use an intermediate format */
  1811. if (src_pix_fmt == PIX_FMT_YUV422 ||
  1812. dst_pix_fmt == PIX_FMT_YUV422) {
  1813. /* specific case: convert to YUV422P first */
  1814. int_pix_fmt = PIX_FMT_YUV422P;
  1815. } else if (src_pix_fmt == PIX_FMT_UYVY422 ||
  1816. dst_pix_fmt == PIX_FMT_UYVY422) {
  1817. /* specific case: convert to YUV422P first */
  1818. int_pix_fmt = PIX_FMT_YUV422P;
  1819. } else if ((src_pix->color_type == FF_COLOR_GRAY &&
  1820. src_pix_fmt != PIX_FMT_GRAY8) ||
  1821. (dst_pix->color_type == FF_COLOR_GRAY &&
  1822. dst_pix_fmt != PIX_FMT_GRAY8)) {
  1823. /* gray8 is the normalized format */
  1824. int_pix_fmt = PIX_FMT_GRAY8;
  1825. } else if ((is_yuv_planar(src_pix) &&
  1826. src_pix_fmt != PIX_FMT_YUV444P &&
  1827. src_pix_fmt != PIX_FMT_YUVJ444P)) {
  1828. /* yuv444 is the normalized format */
  1829. if (src_pix->color_type == FF_COLOR_YUV_JPEG)
  1830. int_pix_fmt = PIX_FMT_YUVJ444P;
  1831. else
  1832. int_pix_fmt = PIX_FMT_YUV444P;
  1833. } else if ((is_yuv_planar(dst_pix) &&
  1834. dst_pix_fmt != PIX_FMT_YUV444P &&
  1835. dst_pix_fmt != PIX_FMT_YUVJ444P)) {
  1836. /* yuv444 is the normalized format */
  1837. if (dst_pix->color_type == FF_COLOR_YUV_JPEG)
  1838. int_pix_fmt = PIX_FMT_YUVJ444P;
  1839. else
  1840. int_pix_fmt = PIX_FMT_YUV444P;
  1841. } else {
  1842. /* the two formats are rgb or gray8 or yuv[j]444p */
  1843. if (src_pix->is_alpha && dst_pix->is_alpha)
  1844. int_pix_fmt = PIX_FMT_RGBA32;
  1845. else
  1846. int_pix_fmt = PIX_FMT_RGB24;
  1847. }
  1848. if (avpicture_alloc(tmp, int_pix_fmt, dst_width, dst_height) < 0)
  1849. return -1;
  1850. ret = -1;
  1851. if (img_convert(tmp, int_pix_fmt,
  1852. src, src_pix_fmt, src_width, src_height) < 0)
  1853. goto fail1;
  1854. if (img_convert(dst, dst_pix_fmt,
  1855. tmp, int_pix_fmt, dst_width, dst_height) < 0)
  1856. goto fail1;
  1857. ret = 0;
  1858. fail1:
  1859. avpicture_free(tmp);
  1860. return ret;
  1861. }
  1862. /* NOTE: we scan all the pixels to have an exact information */
  1863. static int get_alpha_info_pal8(const AVPicture *src, int width, int height)
  1864. {
  1865. const unsigned char *p;
  1866. int src_wrap, ret, x, y;
  1867. unsigned int a;
  1868. uint32_t *palette = (uint32_t *)src->data[1];
  1869. p = src->data[0];
  1870. src_wrap = src->linesize[0] - width;
  1871. ret = 0;
  1872. for(y=0;y<height;y++) {
  1873. for(x=0;x<width;x++) {
  1874. a = palette[p[0]] >> 24;
  1875. if (a == 0x00) {
  1876. ret |= FF_ALPHA_TRANSP;
  1877. } else if (a != 0xff) {
  1878. ret |= FF_ALPHA_SEMI_TRANSP;
  1879. }
  1880. p++;
  1881. }
  1882. p += src_wrap;
  1883. }
  1884. return ret;
  1885. }
  1886. /**
  1887. * Tell if an image really has transparent alpha values.
  1888. * @return ored mask of FF_ALPHA_xxx constants
  1889. */
  1890. int img_get_alpha_info(const AVPicture *src,
  1891. int pix_fmt, int width, int height)
  1892. {
  1893. PixFmtInfo *pf = &pix_fmt_info[pix_fmt];
  1894. int ret;
  1895. pf = &pix_fmt_info[pix_fmt];
  1896. /* no alpha can be represented in format */
  1897. if (!pf->is_alpha)
  1898. return 0;
  1899. switch(pix_fmt) {
  1900. case PIX_FMT_RGBA32:
  1901. ret = get_alpha_info_rgba32(src, width, height);
  1902. break;
  1903. case PIX_FMT_RGB555:
  1904. ret = get_alpha_info_rgb555(src, width, height);
  1905. break;
  1906. case PIX_FMT_PAL8:
  1907. ret = get_alpha_info_pal8(src, width, height);
  1908. break;
  1909. default:
  1910. /* we do not know, so everything is indicated */
  1911. ret = FF_ALPHA_TRANSP | FF_ALPHA_SEMI_TRANSP;
  1912. break;
  1913. }
  1914. return ret;
  1915. }
  1916. #ifdef HAVE_MMX
  1917. #define DEINT_INPLACE_LINE_LUM \
  1918. movd_m2r(lum_m4[0],mm0);\
  1919. movd_m2r(lum_m3[0],mm1);\
  1920. movd_m2r(lum_m2[0],mm2);\
  1921. movd_m2r(lum_m1[0],mm3);\
  1922. movd_m2r(lum[0],mm4);\
  1923. punpcklbw_r2r(mm7,mm0);\
  1924. movd_r2m(mm2,lum_m4[0]);\
  1925. punpcklbw_r2r(mm7,mm1);\
  1926. punpcklbw_r2r(mm7,mm2);\
  1927. punpcklbw_r2r(mm7,mm3);\
  1928. punpcklbw_r2r(mm7,mm4);\
  1929. paddw_r2r(mm3,mm1);\
  1930. psllw_i2r(1,mm2);\
  1931. paddw_r2r(mm4,mm0);\
  1932. psllw_i2r(2,mm1);\
  1933. paddw_r2r(mm6,mm2);\
  1934. paddw_r2r(mm2,mm1);\
  1935. psubusw_r2r(mm0,mm1);\
  1936. psrlw_i2r(3,mm1);\
  1937. packuswb_r2r(mm7,mm1);\
  1938. movd_r2m(mm1,lum_m2[0]);
  1939. #define DEINT_LINE_LUM \
  1940. movd_m2r(lum_m4[0],mm0);\
  1941. movd_m2r(lum_m3[0],mm1);\
  1942. movd_m2r(lum_m2[0],mm2);\
  1943. movd_m2r(lum_m1[0],mm3);\
  1944. movd_m2r(lum[0],mm4);\
  1945. punpcklbw_r2r(mm7,mm0);\
  1946. punpcklbw_r2r(mm7,mm1);\
  1947. punpcklbw_r2r(mm7,mm2);\
  1948. punpcklbw_r2r(mm7,mm3);\
  1949. punpcklbw_r2r(mm7,mm4);\
  1950. paddw_r2r(mm3,mm1);\
  1951. psllw_i2r(1,mm2);\
  1952. paddw_r2r(mm4,mm0);\
  1953. psllw_i2r(2,mm1);\
  1954. paddw_r2r(mm6,mm2);\
  1955. paddw_r2r(mm2,mm1);\
  1956. psubusw_r2r(mm0,mm1);\
  1957. psrlw_i2r(3,mm1);\
  1958. packuswb_r2r(mm7,mm1);\
  1959. movd_r2m(mm1,dst[0]);
  1960. #endif
  1961. /* filter parameters: [-1 4 2 4 -1] // 8 */
  1962. static void deinterlace_line(uint8_t *dst,
  1963. const uint8_t *lum_m4, const uint8_t *lum_m3,
  1964. const uint8_t *lum_m2, const uint8_t *lum_m1,
  1965. const uint8_t *lum,
  1966. int size)
  1967. {
  1968. #ifndef HAVE_MMX
  1969. uint8_t *cm = cropTbl + MAX_NEG_CROP;
  1970. int sum;
  1971. for(;size > 0;size--) {
  1972. sum = -lum_m4[0];
  1973. sum += lum_m3[0] << 2;
  1974. sum += lum_m2[0] << 1;
  1975. sum += lum_m1[0] << 2;
  1976. sum += -lum[0];
  1977. dst[0] = cm[(sum + 4) >> 3];
  1978. lum_m4++;
  1979. lum_m3++;
  1980. lum_m2++;
  1981. lum_m1++;
  1982. lum++;
  1983. dst++;
  1984. }
  1985. #else
  1986. {
  1987. mmx_t rounder;
  1988. rounder.uw[0]=4;
  1989. rounder.uw[1]=4;
  1990. rounder.uw[2]=4;
  1991. rounder.uw[3]=4;
  1992. pxor_r2r(mm7,mm7);
  1993. movq_m2r(rounder,mm6);
  1994. }
  1995. for (;size > 3; size-=4) {
  1996. DEINT_LINE_LUM
  1997. lum_m4+=4;
  1998. lum_m3+=4;
  1999. lum_m2+=4;
  2000. lum_m1+=4;
  2001. lum+=4;
  2002. dst+=4;
  2003. }
  2004. #endif
  2005. }
  2006. static void deinterlace_line_inplace(uint8_t *lum_m4, uint8_t *lum_m3, uint8_t *lum_m2, uint8_t *lum_m1, uint8_t *lum,
  2007. int size)
  2008. {
  2009. #ifndef HAVE_MMX
  2010. uint8_t *cm = cropTbl + MAX_NEG_CROP;
  2011. int sum;
  2012. for(;size > 0;size--) {
  2013. sum = -lum_m4[0];
  2014. sum += lum_m3[0] << 2;
  2015. sum += lum_m2[0] << 1;
  2016. lum_m4[0]=lum_m2[0];
  2017. sum += lum_m1[0] << 2;
  2018. sum += -lum[0];
  2019. lum_m2[0] = cm[(sum + 4) >> 3];
  2020. lum_m4++;
  2021. lum_m3++;
  2022. lum_m2++;
  2023. lum_m1++;
  2024. lum++;
  2025. }
  2026. #else
  2027. {
  2028. mmx_t rounder;
  2029. rounder.uw[0]=4;
  2030. rounder.uw[1]=4;
  2031. rounder.uw[2]=4;
  2032. rounder.uw[3]=4;
  2033. pxor_r2r(mm7,mm7);
  2034. movq_m2r(rounder,mm6);
  2035. }
  2036. for (;size > 3; size-=4) {
  2037. DEINT_INPLACE_LINE_LUM
  2038. lum_m4+=4;
  2039. lum_m3+=4;
  2040. lum_m2+=4;
  2041. lum_m1+=4;
  2042. lum+=4;
  2043. }
  2044. #endif
  2045. }
  2046. /* deinterlacing : 2 temporal taps, 3 spatial taps linear filter. The
  2047. top field is copied as is, but the bottom field is deinterlaced
  2048. against the top field. */
  2049. static void deinterlace_bottom_field(uint8_t *dst, int dst_wrap,
  2050. const uint8_t *src1, int src_wrap,
  2051. int width, int height)
  2052. {
  2053. const uint8_t *src_m2, *src_m1, *src_0, *src_p1, *src_p2;
  2054. int y;
  2055. src_m2 = src1;
  2056. src_m1 = src1;
  2057. src_0=&src_m1[src_wrap];
  2058. src_p1=&src_0[src_wrap];
  2059. src_p2=&src_p1[src_wrap];
  2060. for(y=0;y<(height-2);y+=2) {
  2061. memcpy(dst,src_m1,width);
  2062. dst += dst_wrap;
  2063. deinterlace_line(dst,src_m2,src_m1,src_0,src_p1,src_p2,width);
  2064. src_m2 = src_0;
  2065. src_m1 = src_p1;
  2066. src_0 = src_p2;
  2067. src_p1 += 2*src_wrap;
  2068. src_p2 += 2*src_wrap;
  2069. dst += dst_wrap;
  2070. }
  2071. memcpy(dst,src_m1,width);
  2072. dst += dst_wrap;
  2073. /* do last line */
  2074. deinterlace_line(dst,src_m2,src_m1,src_0,src_0,src_0,width);
  2075. }
  2076. static void deinterlace_bottom_field_inplace(uint8_t *src1, int src_wrap,
  2077. int width, int height)
  2078. {
  2079. uint8_t *src_m1, *src_0, *src_p1, *src_p2;
  2080. int y;
  2081. uint8_t *buf;
  2082. buf = (uint8_t*)av_malloc(width);
  2083. src_m1 = src1;
  2084. memcpy(buf,src_m1,width);
  2085. src_0=&src_m1[src_wrap];
  2086. src_p1=&src_0[src_wrap];
  2087. src_p2=&src_p1[src_wrap];
  2088. for(y=0;y<(height-2);y+=2) {
  2089. deinterlace_line_inplace(buf,src_m1,src_0,src_p1,src_p2,width);
  2090. src_m1 = src_p1;
  2091. src_0 = src_p2;
  2092. src_p1 += 2*src_wrap;
  2093. src_p2 += 2*src_wrap;
  2094. }
  2095. /* do last line */
  2096. deinterlace_line_inplace(buf,src_m1,src_0,src_0,src_0,width);
  2097. av_free(buf);
  2098. }
  2099. /* deinterlace - if not supported return -1 */
  2100. int avpicture_deinterlace(AVPicture *dst, const AVPicture *src,
  2101. int pix_fmt, int width, int height)
  2102. {
  2103. int i;
  2104. if (pix_fmt != PIX_FMT_YUV420P &&
  2105. pix_fmt != PIX_FMT_YUV422P &&
  2106. pix_fmt != PIX_FMT_YUV444P &&
  2107. pix_fmt != PIX_FMT_YUV411P)
  2108. return -1;
  2109. if ((width & 3) != 0 || (height & 3) != 0)
  2110. return -1;
  2111. for(i=0;i<3;i++) {
  2112. if (i == 1) {
  2113. switch(pix_fmt) {
  2114. case PIX_FMT_YUV420P:
  2115. width >>= 1;
  2116. height >>= 1;
  2117. break;
  2118. case PIX_FMT_YUV422P:
  2119. width >>= 1;
  2120. break;
  2121. case PIX_FMT_YUV411P:
  2122. width >>= 2;
  2123. break;
  2124. default:
  2125. break;
  2126. }
  2127. }
  2128. if (src == dst) {
  2129. deinterlace_bottom_field_inplace(dst->data[i], dst->linesize[i],
  2130. width, height);
  2131. } else {
  2132. deinterlace_bottom_field(dst->data[i],dst->linesize[i],
  2133. src->data[i], src->linesize[i],
  2134. width, height);
  2135. }
  2136. }
  2137. #ifdef HAVE_MMX
  2138. emms();
  2139. #endif
  2140. return 0;
  2141. }
  2142. #undef FIX