You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

2452 lines
65KB

  1. /*
  2. * Misc image convertion routines
  3. * Copyright (c) 2001, 2002, 2003 Fabrice Bellard.
  4. *
  5. * This library is free software; you can redistribute it and/or
  6. * modify it under the terms of the GNU Lesser General Public
  7. * License as published by the Free Software Foundation; either
  8. * version 2 of the License, or (at your option) any later version.
  9. *
  10. * This library is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  13. * Lesser General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU Lesser General Public
  16. * License along with this library; if not, write to the Free Software
  17. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  18. */
  19. /**
  20. * @file imgconvert.c
  21. * Misc image convertion routines.
  22. */
  23. /* TODO:
  24. * - write 'ffimg' program to test all the image related stuff
  25. * - move all api to slice based system
  26. * - integrate deinterlacing, postprocessing and scaling in the conversion process
  27. */
  28. #include "avcodec.h"
  29. #include "dsputil.h"
  30. #ifdef USE_FASTMEMCPY
  31. #include "fastmemcpy.h"
  32. #endif
  33. #ifdef HAVE_MMX
  34. #include "i386/mmx.h"
  35. #endif
  36. #define xglue(x, y) x ## y
  37. #define glue(x, y) xglue(x, y)
  38. #define FF_COLOR_RGB 0 /* RGB color space */
  39. #define FF_COLOR_GRAY 1 /* gray color space */
  40. #define FF_COLOR_YUV 2 /* YUV color space. 16 <= Y <= 235, 16 <= U, V <= 240 */
  41. #define FF_COLOR_YUV_JPEG 3 /* YUV color space. 0 <= Y <= 255, 0 <= U, V <= 255 */
  42. #define FF_PIXEL_PLANAR 0 /* each channel has one component in AVPicture */
  43. #define FF_PIXEL_PACKED 1 /* only one components containing all the channels */
  44. #define FF_PIXEL_PALETTE 2 /* one components containing indexes for a palette */
  45. typedef struct PixFmtInfo {
  46. const char *name;
  47. uint8_t nb_channels; /* number of channels (including alpha) */
  48. uint8_t color_type; /* color type (see FF_COLOR_xxx constants) */
  49. uint8_t pixel_type; /* pixel storage type (see FF_PIXEL_xxx constants) */
  50. uint8_t is_alpha : 1; /* true if alpha can be specified */
  51. uint8_t x_chroma_shift; /* X chroma subsampling factor is 2 ^ shift */
  52. uint8_t y_chroma_shift; /* Y chroma subsampling factor is 2 ^ shift */
  53. uint8_t depth; /* bit depth of the color components */
  54. } PixFmtInfo;
  55. /* this table gives more information about formats */
  56. static PixFmtInfo pix_fmt_info[PIX_FMT_NB] = {
  57. /* YUV formats */
  58. [PIX_FMT_YUV420P] = {
  59. .name = "yuv420p",
  60. .nb_channels = 3,
  61. .color_type = FF_COLOR_YUV,
  62. .pixel_type = FF_PIXEL_PLANAR,
  63. .depth = 8,
  64. .x_chroma_shift = 1, .y_chroma_shift = 1,
  65. },
  66. [PIX_FMT_YUV422P] = {
  67. .name = "yuv422p",
  68. .nb_channels = 3,
  69. .color_type = FF_COLOR_YUV,
  70. .pixel_type = FF_PIXEL_PLANAR,
  71. .depth = 8,
  72. .x_chroma_shift = 1, .y_chroma_shift = 0,
  73. },
  74. [PIX_FMT_YUV444P] = {
  75. .name = "yuv444p",
  76. .nb_channels = 3,
  77. .color_type = FF_COLOR_YUV,
  78. .pixel_type = FF_PIXEL_PLANAR,
  79. .depth = 8,
  80. .x_chroma_shift = 0, .y_chroma_shift = 0,
  81. },
  82. [PIX_FMT_YUV422] = {
  83. .name = "yuv422",
  84. .nb_channels = 1,
  85. .color_type = FF_COLOR_YUV,
  86. .pixel_type = FF_PIXEL_PACKED,
  87. .depth = 8,
  88. .x_chroma_shift = 1, .y_chroma_shift = 0,
  89. },
  90. [PIX_FMT_UYVY422] = {
  91. .name = "uyvy422",
  92. .nb_channels = 1,
  93. .color_type = FF_COLOR_YUV,
  94. .pixel_type = FF_PIXEL_PACKED,
  95. .depth = 8,
  96. .x_chroma_shift = 1, .y_chroma_shift = 0,
  97. },
  98. [PIX_FMT_YUV410P] = {
  99. .name = "yuv410p",
  100. .nb_channels = 3,
  101. .color_type = FF_COLOR_YUV,
  102. .pixel_type = FF_PIXEL_PLANAR,
  103. .depth = 8,
  104. .x_chroma_shift = 2, .y_chroma_shift = 2,
  105. },
  106. [PIX_FMT_YUV411P] = {
  107. .name = "yuv411p",
  108. .nb_channels = 3,
  109. .color_type = FF_COLOR_YUV,
  110. .pixel_type = FF_PIXEL_PLANAR,
  111. .depth = 8,
  112. .x_chroma_shift = 2, .y_chroma_shift = 0,
  113. },
  114. /* JPEG YUV */
  115. [PIX_FMT_YUVJ420P] = {
  116. .name = "yuvj420p",
  117. .nb_channels = 3,
  118. .color_type = FF_COLOR_YUV_JPEG,
  119. .pixel_type = FF_PIXEL_PLANAR,
  120. .depth = 8,
  121. .x_chroma_shift = 1, .y_chroma_shift = 1,
  122. },
  123. [PIX_FMT_YUVJ422P] = {
  124. .name = "yuvj422p",
  125. .nb_channels = 3,
  126. .color_type = FF_COLOR_YUV_JPEG,
  127. .pixel_type = FF_PIXEL_PLANAR,
  128. .depth = 8,
  129. .x_chroma_shift = 1, .y_chroma_shift = 0,
  130. },
  131. [PIX_FMT_YUVJ444P] = {
  132. .name = "yuvj444p",
  133. .nb_channels = 3,
  134. .color_type = FF_COLOR_YUV_JPEG,
  135. .pixel_type = FF_PIXEL_PLANAR,
  136. .depth = 8,
  137. .x_chroma_shift = 0, .y_chroma_shift = 0,
  138. },
  139. /* RGB formats */
  140. [PIX_FMT_RGB24] = {
  141. .name = "rgb24",
  142. .nb_channels = 3,
  143. .color_type = FF_COLOR_RGB,
  144. .pixel_type = FF_PIXEL_PACKED,
  145. .depth = 8,
  146. .x_chroma_shift = 0, .y_chroma_shift = 0,
  147. },
  148. [PIX_FMT_BGR24] = {
  149. .name = "bgr24",
  150. .nb_channels = 3,
  151. .color_type = FF_COLOR_RGB,
  152. .pixel_type = FF_PIXEL_PACKED,
  153. .depth = 8,
  154. .x_chroma_shift = 0, .y_chroma_shift = 0,
  155. },
  156. [PIX_FMT_RGBA32] = {
  157. .name = "rgba32",
  158. .nb_channels = 4, .is_alpha = 1,
  159. .color_type = FF_COLOR_RGB,
  160. .pixel_type = FF_PIXEL_PACKED,
  161. .depth = 8,
  162. .x_chroma_shift = 0, .y_chroma_shift = 0,
  163. },
  164. [PIX_FMT_RGB565] = {
  165. .name = "rgb565",
  166. .nb_channels = 3,
  167. .color_type = FF_COLOR_RGB,
  168. .pixel_type = FF_PIXEL_PACKED,
  169. .depth = 5,
  170. .x_chroma_shift = 0, .y_chroma_shift = 0,
  171. },
  172. [PIX_FMT_RGB555] = {
  173. .name = "rgb555",
  174. .nb_channels = 4, .is_alpha = 1,
  175. .color_type = FF_COLOR_RGB,
  176. .pixel_type = FF_PIXEL_PACKED,
  177. .depth = 5,
  178. .x_chroma_shift = 0, .y_chroma_shift = 0,
  179. },
  180. /* gray / mono formats */
  181. [PIX_FMT_GRAY8] = {
  182. .name = "gray",
  183. .nb_channels = 1,
  184. .color_type = FF_COLOR_GRAY,
  185. .pixel_type = FF_PIXEL_PLANAR,
  186. .depth = 8,
  187. },
  188. [PIX_FMT_MONOWHITE] = {
  189. .name = "monow",
  190. .nb_channels = 1,
  191. .color_type = FF_COLOR_GRAY,
  192. .pixel_type = FF_PIXEL_PLANAR,
  193. .depth = 1,
  194. },
  195. [PIX_FMT_MONOBLACK] = {
  196. .name = "monob",
  197. .nb_channels = 1,
  198. .color_type = FF_COLOR_GRAY,
  199. .pixel_type = FF_PIXEL_PLANAR,
  200. .depth = 1,
  201. },
  202. /* paletted formats */
  203. [PIX_FMT_PAL8] = {
  204. .name = "pal8",
  205. .nb_channels = 4, .is_alpha = 1,
  206. .color_type = FF_COLOR_RGB,
  207. .pixel_type = FF_PIXEL_PALETTE,
  208. .depth = 8,
  209. },
  210. [PIX_FMT_XVMC_MPEG2_MC] = {
  211. .name = "xvmcmc",
  212. },
  213. [PIX_FMT_XVMC_MPEG2_IDCT] = {
  214. .name = "xvmcidct",
  215. },
  216. [PIX_FMT_UYVY411] = {
  217. .name = "uyvy411",
  218. .nb_channels = 1,
  219. .color_type = FF_COLOR_YUV,
  220. .pixel_type = FF_PIXEL_PACKED,
  221. .depth = 8,
  222. .x_chroma_shift = 2, .y_chroma_shift = 0,
  223. },
  224. };
  225. void avcodec_get_chroma_sub_sample(int pix_fmt, int *h_shift, int *v_shift)
  226. {
  227. *h_shift = pix_fmt_info[pix_fmt].x_chroma_shift;
  228. *v_shift = pix_fmt_info[pix_fmt].y_chroma_shift;
  229. }
  230. const char *avcodec_get_pix_fmt_name(int pix_fmt)
  231. {
  232. if (pix_fmt < 0 || pix_fmt >= PIX_FMT_NB)
  233. return "???";
  234. else
  235. return pix_fmt_info[pix_fmt].name;
  236. }
  237. enum PixelFormat avcodec_get_pix_fmt(const char* name)
  238. {
  239. int i;
  240. for (i=0; i < PIX_FMT_NB; i++)
  241. if (!strcmp(pix_fmt_info[i].name, name))
  242. break;
  243. return i;
  244. }
  245. /* Picture field are filled with 'ptr' addresses. Also return size */
  246. int avpicture_fill(AVPicture *picture, uint8_t *ptr,
  247. int pix_fmt, int width, int height)
  248. {
  249. int size, w2, h2, size2;
  250. PixFmtInfo *pinfo;
  251. pinfo = &pix_fmt_info[pix_fmt];
  252. size = width * height;
  253. switch(pix_fmt) {
  254. case PIX_FMT_YUV420P:
  255. case PIX_FMT_YUV422P:
  256. case PIX_FMT_YUV444P:
  257. case PIX_FMT_YUV410P:
  258. case PIX_FMT_YUV411P:
  259. case PIX_FMT_YUVJ420P:
  260. case PIX_FMT_YUVJ422P:
  261. case PIX_FMT_YUVJ444P:
  262. w2 = (width + (1 << pinfo->x_chroma_shift) - 1) >> pinfo->x_chroma_shift;
  263. h2 = (height + (1 << pinfo->y_chroma_shift) - 1) >> pinfo->y_chroma_shift;
  264. size2 = w2 * h2;
  265. picture->data[0] = ptr;
  266. picture->data[1] = picture->data[0] + size;
  267. picture->data[2] = picture->data[1] + size2;
  268. picture->linesize[0] = width;
  269. picture->linesize[1] = w2;
  270. picture->linesize[2] = w2;
  271. return size + 2 * size2;
  272. case PIX_FMT_RGB24:
  273. case PIX_FMT_BGR24:
  274. picture->data[0] = ptr;
  275. picture->data[1] = NULL;
  276. picture->data[2] = NULL;
  277. picture->linesize[0] = width * 3;
  278. return size * 3;
  279. case PIX_FMT_RGBA32:
  280. picture->data[0] = ptr;
  281. picture->data[1] = NULL;
  282. picture->data[2] = NULL;
  283. picture->linesize[0] = width * 4;
  284. return size * 4;
  285. case PIX_FMT_RGB555:
  286. case PIX_FMT_RGB565:
  287. case PIX_FMT_YUV422:
  288. picture->data[0] = ptr;
  289. picture->data[1] = NULL;
  290. picture->data[2] = NULL;
  291. picture->linesize[0] = width * 2;
  292. return size * 2;
  293. case PIX_FMT_UYVY422:
  294. picture->data[0] = ptr;
  295. picture->data[1] = NULL;
  296. picture->data[2] = NULL;
  297. picture->linesize[0] = width * 2;
  298. return size * 2;
  299. case PIX_FMT_UYVY411:
  300. picture->data[0] = ptr;
  301. picture->data[1] = NULL;
  302. picture->data[2] = NULL;
  303. picture->linesize[0] = width + width/2;
  304. return size + size/2;
  305. case PIX_FMT_GRAY8:
  306. picture->data[0] = ptr;
  307. picture->data[1] = NULL;
  308. picture->data[2] = NULL;
  309. picture->linesize[0] = width;
  310. return size;
  311. case PIX_FMT_MONOWHITE:
  312. case PIX_FMT_MONOBLACK:
  313. picture->data[0] = ptr;
  314. picture->data[1] = NULL;
  315. picture->data[2] = NULL;
  316. picture->linesize[0] = (width + 7) >> 3;
  317. return picture->linesize[0] * height;
  318. case PIX_FMT_PAL8:
  319. size2 = (size + 3) & ~3;
  320. picture->data[0] = ptr;
  321. picture->data[1] = ptr + size2; /* palette is stored here as 256 32 bit words */
  322. picture->data[2] = NULL;
  323. picture->linesize[0] = width;
  324. picture->linesize[1] = 4;
  325. return size2 + 256 * 4;
  326. default:
  327. picture->data[0] = NULL;
  328. picture->data[1] = NULL;
  329. picture->data[2] = NULL;
  330. picture->data[3] = NULL;
  331. return -1;
  332. }
  333. }
  334. int avpicture_layout(const AVPicture* src, int pix_fmt, int width, int height,
  335. unsigned char *dest, int dest_size)
  336. {
  337. PixFmtInfo* pf = &pix_fmt_info[pix_fmt];
  338. int i, j, w, h, data_planes;
  339. const unsigned char* s;
  340. int size = avpicture_get_size(pix_fmt, width, height);
  341. if (size > dest_size)
  342. return -1;
  343. if (pf->pixel_type == FF_PIXEL_PACKED || pf->pixel_type == FF_PIXEL_PALETTE) {
  344. if (pix_fmt == PIX_FMT_YUV422 ||
  345. pix_fmt == PIX_FMT_UYVY422 ||
  346. pix_fmt == PIX_FMT_RGB565 ||
  347. pix_fmt == PIX_FMT_RGB555)
  348. w = width * 2;
  349. else if (pix_fmt == PIX_FMT_UYVY411)
  350. w = width + width/2;
  351. else if (pix_fmt == PIX_FMT_PAL8)
  352. w = width;
  353. else
  354. w = width * (pf->depth * pf->nb_channels / 8);
  355. data_planes = 1;
  356. h = height;
  357. } else {
  358. data_planes = pf->nb_channels;
  359. w = (width*pf->depth + 7)/8;
  360. h = height;
  361. }
  362. for (i=0; i<data_planes; i++) {
  363. if (i == 1) {
  364. w = width >> pf->x_chroma_shift;
  365. h = height >> pf->y_chroma_shift;
  366. }
  367. s = src->data[i];
  368. for(j=0; j<h; j++) {
  369. memcpy(dest, s, w);
  370. dest += w;
  371. s += src->linesize[i];
  372. }
  373. }
  374. if (pf->pixel_type == FF_PIXEL_PALETTE)
  375. memcpy((unsigned char *)(((size_t)dest + 3) & ~3), src->data[1], 256 * 4);
  376. return size;
  377. }
  378. int avpicture_get_size(int pix_fmt, int width, int height)
  379. {
  380. AVPicture dummy_pict;
  381. return avpicture_fill(&dummy_pict, NULL, pix_fmt, width, height);
  382. }
  383. /**
  384. * compute the loss when converting from a pixel format to another
  385. */
  386. int avcodec_get_pix_fmt_loss(int dst_pix_fmt, int src_pix_fmt,
  387. int has_alpha)
  388. {
  389. const PixFmtInfo *pf, *ps;
  390. int loss;
  391. ps = &pix_fmt_info[src_pix_fmt];
  392. pf = &pix_fmt_info[dst_pix_fmt];
  393. /* compute loss */
  394. loss = 0;
  395. pf = &pix_fmt_info[dst_pix_fmt];
  396. if (pf->depth < ps->depth ||
  397. (dst_pix_fmt == PIX_FMT_RGB555 && src_pix_fmt == PIX_FMT_RGB565))
  398. loss |= FF_LOSS_DEPTH;
  399. if (pf->x_chroma_shift > ps->x_chroma_shift ||
  400. pf->y_chroma_shift > ps->y_chroma_shift)
  401. loss |= FF_LOSS_RESOLUTION;
  402. switch(pf->color_type) {
  403. case FF_COLOR_RGB:
  404. if (ps->color_type != FF_COLOR_RGB &&
  405. ps->color_type != FF_COLOR_GRAY)
  406. loss |= FF_LOSS_COLORSPACE;
  407. break;
  408. case FF_COLOR_GRAY:
  409. if (ps->color_type != FF_COLOR_GRAY)
  410. loss |= FF_LOSS_COLORSPACE;
  411. break;
  412. case FF_COLOR_YUV:
  413. if (ps->color_type != FF_COLOR_YUV)
  414. loss |= FF_LOSS_COLORSPACE;
  415. break;
  416. case FF_COLOR_YUV_JPEG:
  417. if (ps->color_type != FF_COLOR_YUV_JPEG &&
  418. ps->color_type != FF_COLOR_YUV &&
  419. ps->color_type != FF_COLOR_GRAY)
  420. loss |= FF_LOSS_COLORSPACE;
  421. break;
  422. default:
  423. /* fail safe test */
  424. if (ps->color_type != pf->color_type)
  425. loss |= FF_LOSS_COLORSPACE;
  426. break;
  427. }
  428. if (pf->color_type == FF_COLOR_GRAY &&
  429. ps->color_type != FF_COLOR_GRAY)
  430. loss |= FF_LOSS_CHROMA;
  431. if (!pf->is_alpha && (ps->is_alpha && has_alpha))
  432. loss |= FF_LOSS_ALPHA;
  433. if (pf->pixel_type == FF_PIXEL_PALETTE &&
  434. (ps->pixel_type != FF_PIXEL_PALETTE && ps->color_type != FF_COLOR_GRAY))
  435. loss |= FF_LOSS_COLORQUANT;
  436. return loss;
  437. }
  438. static int avg_bits_per_pixel(int pix_fmt)
  439. {
  440. int bits;
  441. const PixFmtInfo *pf;
  442. pf = &pix_fmt_info[pix_fmt];
  443. switch(pf->pixel_type) {
  444. case FF_PIXEL_PACKED:
  445. switch(pix_fmt) {
  446. case PIX_FMT_YUV422:
  447. case PIX_FMT_UYVY422:
  448. case PIX_FMT_RGB565:
  449. case PIX_FMT_RGB555:
  450. bits = 16;
  451. break;
  452. case PIX_FMT_UYVY411:
  453. bits = 12;
  454. break;
  455. default:
  456. bits = pf->depth * pf->nb_channels;
  457. break;
  458. }
  459. break;
  460. case FF_PIXEL_PLANAR:
  461. if (pf->x_chroma_shift == 0 && pf->y_chroma_shift == 0) {
  462. bits = pf->depth * pf->nb_channels;
  463. } else {
  464. bits = pf->depth + ((2 * pf->depth) >>
  465. (pf->x_chroma_shift + pf->y_chroma_shift));
  466. }
  467. break;
  468. case FF_PIXEL_PALETTE:
  469. bits = 8;
  470. break;
  471. default:
  472. bits = -1;
  473. break;
  474. }
  475. return bits;
  476. }
  477. static int avcodec_find_best_pix_fmt1(int pix_fmt_mask,
  478. int src_pix_fmt,
  479. int has_alpha,
  480. int loss_mask)
  481. {
  482. int dist, i, loss, min_dist, dst_pix_fmt;
  483. /* find exact color match with smallest size */
  484. dst_pix_fmt = -1;
  485. min_dist = 0x7fffffff;
  486. for(i = 0;i < PIX_FMT_NB; i++) {
  487. if (pix_fmt_mask & (1 << i)) {
  488. loss = avcodec_get_pix_fmt_loss(i, src_pix_fmt, has_alpha) & loss_mask;
  489. if (loss == 0) {
  490. dist = avg_bits_per_pixel(i);
  491. if (dist < min_dist) {
  492. min_dist = dist;
  493. dst_pix_fmt = i;
  494. }
  495. }
  496. }
  497. }
  498. return dst_pix_fmt;
  499. }
  500. /**
  501. * find best pixel format to convert to. Return -1 if none found
  502. */
  503. int avcodec_find_best_pix_fmt(int pix_fmt_mask, int src_pix_fmt,
  504. int has_alpha, int *loss_ptr)
  505. {
  506. int dst_pix_fmt, loss_mask, i;
  507. static const int loss_mask_order[] = {
  508. ~0, /* no loss first */
  509. ~FF_LOSS_ALPHA,
  510. ~FF_LOSS_RESOLUTION,
  511. ~(FF_LOSS_COLORSPACE | FF_LOSS_RESOLUTION),
  512. ~FF_LOSS_COLORQUANT,
  513. ~FF_LOSS_DEPTH,
  514. 0,
  515. };
  516. /* try with successive loss */
  517. i = 0;
  518. for(;;) {
  519. loss_mask = loss_mask_order[i++];
  520. dst_pix_fmt = avcodec_find_best_pix_fmt1(pix_fmt_mask, src_pix_fmt,
  521. has_alpha, loss_mask);
  522. if (dst_pix_fmt >= 0)
  523. goto found;
  524. if (loss_mask == 0)
  525. break;
  526. }
  527. return -1;
  528. found:
  529. if (loss_ptr)
  530. *loss_ptr = avcodec_get_pix_fmt_loss(dst_pix_fmt, src_pix_fmt, has_alpha);
  531. return dst_pix_fmt;
  532. }
  533. static void img_copy_plane(uint8_t *dst, int dst_wrap,
  534. const uint8_t *src, int src_wrap,
  535. int width, int height)
  536. {
  537. for(;height > 0; height--) {
  538. memcpy(dst, src, width);
  539. dst += dst_wrap;
  540. src += src_wrap;
  541. }
  542. }
  543. /**
  544. * Copy image 'src' to 'dst'.
  545. */
  546. void img_copy(AVPicture *dst, const AVPicture *src,
  547. int pix_fmt, int width, int height)
  548. {
  549. int bwidth, bits, i;
  550. PixFmtInfo *pf = &pix_fmt_info[pix_fmt];
  551. pf = &pix_fmt_info[pix_fmt];
  552. switch(pf->pixel_type) {
  553. case FF_PIXEL_PACKED:
  554. switch(pix_fmt) {
  555. case PIX_FMT_YUV422:
  556. case PIX_FMT_UYVY422:
  557. case PIX_FMT_RGB565:
  558. case PIX_FMT_RGB555:
  559. bits = 16;
  560. break;
  561. case PIX_FMT_UYVY411:
  562. bits = 12;
  563. break;
  564. default:
  565. bits = pf->depth * pf->nb_channels;
  566. break;
  567. }
  568. bwidth = (width * bits + 7) >> 3;
  569. img_copy_plane(dst->data[0], dst->linesize[0],
  570. src->data[0], src->linesize[0],
  571. bwidth, height);
  572. break;
  573. case FF_PIXEL_PLANAR:
  574. for(i = 0; i < pf->nb_channels; i++) {
  575. int w, h;
  576. w = width;
  577. h = height;
  578. if (i == 1 || i == 2) {
  579. w >>= pf->x_chroma_shift;
  580. h >>= pf->y_chroma_shift;
  581. }
  582. bwidth = (w * pf->depth + 7) >> 3;
  583. img_copy_plane(dst->data[i], dst->linesize[i],
  584. src->data[i], src->linesize[i],
  585. bwidth, h);
  586. }
  587. break;
  588. case FF_PIXEL_PALETTE:
  589. img_copy_plane(dst->data[0], dst->linesize[0],
  590. src->data[0], src->linesize[0],
  591. width, height);
  592. /* copy the palette */
  593. img_copy_plane(dst->data[1], dst->linesize[1],
  594. src->data[1], src->linesize[1],
  595. 4, 256);
  596. break;
  597. }
  598. }
  599. /* XXX: totally non optimized */
  600. static void yuv422_to_yuv420p(AVPicture *dst, const AVPicture *src,
  601. int width, int height)
  602. {
  603. const uint8_t *p, *p1;
  604. uint8_t *lum, *cr, *cb, *lum1, *cr1, *cb1;
  605. int w;
  606. p1 = src->data[0];
  607. lum1 = dst->data[0];
  608. cb1 = dst->data[1];
  609. cr1 = dst->data[2];
  610. for(;height >= 1; height -= 2) {
  611. p = p1;
  612. lum = lum1;
  613. cb = cb1;
  614. cr = cr1;
  615. for(w = width; w >= 2; w -= 2) {
  616. lum[0] = p[0];
  617. cb[0] = p[1];
  618. lum[1] = p[2];
  619. cr[0] = p[3];
  620. p += 4;
  621. lum += 2;
  622. cb++;
  623. cr++;
  624. }
  625. if (w) {
  626. lum[0] = p[0];
  627. cb[0] = p[1];
  628. cr[0] = p[3];
  629. cb++;
  630. cr++;
  631. }
  632. p1 += src->linesize[0];
  633. lum1 += dst->linesize[0];
  634. if (height>1) {
  635. p = p1;
  636. lum = lum1;
  637. for(w = width; w >= 2; w -= 2) {
  638. lum[0] = p[0];
  639. lum[1] = p[2];
  640. p += 4;
  641. lum += 2;
  642. }
  643. if (w) {
  644. lum[0] = p[0];
  645. }
  646. p1 += src->linesize[0];
  647. lum1 += dst->linesize[0];
  648. }
  649. cb1 += dst->linesize[1];
  650. cr1 += dst->linesize[2];
  651. }
  652. }
  653. static void uyvy422_to_yuv420p(AVPicture *dst, const AVPicture *src,
  654. int width, int height)
  655. {
  656. const uint8_t *p, *p1;
  657. uint8_t *lum, *cr, *cb, *lum1, *cr1, *cb1;
  658. int w;
  659. p1 = src->data[0];
  660. lum1 = dst->data[0];
  661. cb1 = dst->data[1];
  662. cr1 = dst->data[2];
  663. for(;height >= 1; height -= 2) {
  664. p = p1;
  665. lum = lum1;
  666. cb = cb1;
  667. cr = cr1;
  668. for(w = width; w >= 2; w -= 2) {
  669. lum[0] = p[1];
  670. cb[0] = p[0];
  671. lum[1] = p[3];
  672. cr[0] = p[2];
  673. p += 4;
  674. lum += 2;
  675. cb++;
  676. cr++;
  677. }
  678. if (w) {
  679. lum[0] = p[1];
  680. cb[0] = p[0];
  681. cr[0] = p[2];
  682. cb++;
  683. cr++;
  684. }
  685. p1 += src->linesize[0];
  686. lum1 += dst->linesize[0];
  687. if (height>1) {
  688. p = p1;
  689. lum = lum1;
  690. for(w = width; w >= 2; w -= 2) {
  691. lum[0] = p[1];
  692. lum[1] = p[3];
  693. p += 4;
  694. lum += 2;
  695. }
  696. if (w) {
  697. lum[0] = p[1];
  698. }
  699. p1 += src->linesize[0];
  700. lum1 += dst->linesize[0];
  701. }
  702. cb1 += dst->linesize[1];
  703. cr1 += dst->linesize[2];
  704. }
  705. }
  706. static void uyvy422_to_yuv422p(AVPicture *dst, const AVPicture *src,
  707. int width, int height)
  708. {
  709. const uint8_t *p, *p1;
  710. uint8_t *lum, *cr, *cb, *lum1, *cr1, *cb1;
  711. int w;
  712. p1 = src->data[0];
  713. lum1 = dst->data[0];
  714. cb1 = dst->data[1];
  715. cr1 = dst->data[2];
  716. for(;height > 0; height--) {
  717. p = p1;
  718. lum = lum1;
  719. cb = cb1;
  720. cr = cr1;
  721. for(w = width; w >= 2; w -= 2) {
  722. lum[0] = p[1];
  723. cb[0] = p[0];
  724. lum[1] = p[3];
  725. cr[0] = p[2];
  726. p += 4;
  727. lum += 2;
  728. cb++;
  729. cr++;
  730. }
  731. p1 += src->linesize[0];
  732. lum1 += dst->linesize[0];
  733. cb1 += dst->linesize[1];
  734. cr1 += dst->linesize[2];
  735. }
  736. }
  737. static void yuv422_to_yuv422p(AVPicture *dst, const AVPicture *src,
  738. int width, int height)
  739. {
  740. const uint8_t *p, *p1;
  741. uint8_t *lum, *cr, *cb, *lum1, *cr1, *cb1;
  742. int w;
  743. p1 = src->data[0];
  744. lum1 = dst->data[0];
  745. cb1 = dst->data[1];
  746. cr1 = dst->data[2];
  747. for(;height > 0; height--) {
  748. p = p1;
  749. lum = lum1;
  750. cb = cb1;
  751. cr = cr1;
  752. for(w = width; w >= 2; w -= 2) {
  753. lum[0] = p[0];
  754. cb[0] = p[1];
  755. lum[1] = p[2];
  756. cr[0] = p[3];
  757. p += 4;
  758. lum += 2;
  759. cb++;
  760. cr++;
  761. }
  762. p1 += src->linesize[0];
  763. lum1 += dst->linesize[0];
  764. cb1 += dst->linesize[1];
  765. cr1 += dst->linesize[2];
  766. }
  767. }
  768. static void yuv422p_to_yuv422(AVPicture *dst, const AVPicture *src,
  769. int width, int height)
  770. {
  771. uint8_t *p, *p1;
  772. const uint8_t *lum, *cr, *cb, *lum1, *cr1, *cb1;
  773. int w;
  774. p1 = dst->data[0];
  775. lum1 = src->data[0];
  776. cb1 = src->data[1];
  777. cr1 = src->data[2];
  778. for(;height > 0; height--) {
  779. p = p1;
  780. lum = lum1;
  781. cb = cb1;
  782. cr = cr1;
  783. for(w = width; w >= 2; w -= 2) {
  784. p[0] = lum[0];
  785. p[1] = cb[0];
  786. p[2] = lum[1];
  787. p[3] = cr[0];
  788. p += 4;
  789. lum += 2;
  790. cb++;
  791. cr++;
  792. }
  793. p1 += dst->linesize[0];
  794. lum1 += src->linesize[0];
  795. cb1 += src->linesize[1];
  796. cr1 += src->linesize[2];
  797. }
  798. }
  799. static void yuv422p_to_uyvy422(AVPicture *dst, const AVPicture *src,
  800. int width, int height)
  801. {
  802. uint8_t *p, *p1;
  803. const uint8_t *lum, *cr, *cb, *lum1, *cr1, *cb1;
  804. int w;
  805. p1 = dst->data[0];
  806. lum1 = src->data[0];
  807. cb1 = src->data[1];
  808. cr1 = src->data[2];
  809. for(;height > 0; height--) {
  810. p = p1;
  811. lum = lum1;
  812. cb = cb1;
  813. cr = cr1;
  814. for(w = width; w >= 2; w -= 2) {
  815. p[1] = lum[0];
  816. p[0] = cb[0];
  817. p[3] = lum[1];
  818. p[2] = cr[0];
  819. p += 4;
  820. lum += 2;
  821. cb++;
  822. cr++;
  823. }
  824. p1 += dst->linesize[0];
  825. lum1 += src->linesize[0];
  826. cb1 += src->linesize[1];
  827. cr1 += src->linesize[2];
  828. }
  829. }
  830. static void uyvy411_to_yuv411p(AVPicture *dst, const AVPicture *src,
  831. int width, int height)
  832. {
  833. const uint8_t *p, *p1;
  834. uint8_t *lum, *cr, *cb, *lum1, *cr1, *cb1;
  835. int w;
  836. p1 = src->data[0];
  837. lum1 = dst->data[0];
  838. cb1 = dst->data[1];
  839. cr1 = dst->data[2];
  840. for(;height > 0; height--) {
  841. p = p1;
  842. lum = lum1;
  843. cb = cb1;
  844. cr = cr1;
  845. for(w = width; w >= 4; w -= 4) {
  846. cb[0] = p[0];
  847. lum[0] = p[1];
  848. lum[1] = p[2];
  849. cr[0] = p[3];
  850. lum[2] = p[4];
  851. lum[3] = p[5];
  852. p += 6;
  853. lum += 4;
  854. cb++;
  855. cr++;
  856. }
  857. p1 += src->linesize[0];
  858. lum1 += dst->linesize[0];
  859. cb1 += dst->linesize[1];
  860. cr1 += dst->linesize[2];
  861. }
  862. }
  863. static void yuv420p_to_yuv422(AVPicture *dst, const AVPicture *src,
  864. int width, int height)
  865. {
  866. int w, h;
  867. uint8_t *line1, *line2, *linesrc = dst->data[0];
  868. uint8_t *lum1, *lum2, *lumsrc = src->data[0];
  869. uint8_t *cb1, *cb2 = src->data[1];
  870. uint8_t *cr1, *cr2 = src->data[2];
  871. for(h = height / 2; h--;) {
  872. line1 = linesrc;
  873. line2 = linesrc + dst->linesize[0];
  874. lum1 = lumsrc;
  875. lum2 = lumsrc + src->linesize[0];
  876. cb1 = cb2;
  877. cr1 = cr2;
  878. for(w = width / 2; w--;) {
  879. *line1++ = *lum1++; *line2++ = *lum2++;
  880. *line1++ = *line2++ = *cb1++;
  881. *line1++ = *lum1++; *line2++ = *lum2++;
  882. *line1++ = *line2++ = *cr1++;
  883. }
  884. linesrc += dst->linesize[0] * 2;
  885. lumsrc += src->linesize[0] * 2;
  886. cb2 += src->linesize[1];
  887. cr2 += src->linesize[2];
  888. }
  889. }
  890. #define SCALEBITS 10
  891. #define ONE_HALF (1 << (SCALEBITS - 1))
  892. #define FIX(x) ((int) ((x) * (1<<SCALEBITS) + 0.5))
  893. #define YUV_TO_RGB1_CCIR(cb1, cr1)\
  894. {\
  895. cb = (cb1) - 128;\
  896. cr = (cr1) - 128;\
  897. r_add = FIX(1.40200*255.0/224.0) * cr + ONE_HALF;\
  898. g_add = - FIX(0.34414*255.0/224.0) * cb - FIX(0.71414*255.0/224.0) * cr + \
  899. ONE_HALF;\
  900. b_add = FIX(1.77200*255.0/224.0) * cb + ONE_HALF;\
  901. }
  902. #define YUV_TO_RGB2_CCIR(r, g, b, y1)\
  903. {\
  904. y = ((y1) - 16) * FIX(255.0/219.0);\
  905. r = cm[(y + r_add) >> SCALEBITS];\
  906. g = cm[(y + g_add) >> SCALEBITS];\
  907. b = cm[(y + b_add) >> SCALEBITS];\
  908. }
  909. #define YUV_TO_RGB1(cb1, cr1)\
  910. {\
  911. cb = (cb1) - 128;\
  912. cr = (cr1) - 128;\
  913. r_add = FIX(1.40200) * cr + ONE_HALF;\
  914. g_add = - FIX(0.34414) * cb - FIX(0.71414) * cr + ONE_HALF;\
  915. b_add = FIX(1.77200) * cb + ONE_HALF;\
  916. }
  917. #define YUV_TO_RGB2(r, g, b, y1)\
  918. {\
  919. y = (y1) << SCALEBITS;\
  920. r = cm[(y + r_add) >> SCALEBITS];\
  921. g = cm[(y + g_add) >> SCALEBITS];\
  922. b = cm[(y + b_add) >> SCALEBITS];\
  923. }
  924. #define Y_CCIR_TO_JPEG(y)\
  925. cm[((y) * FIX(255.0/219.0) + (ONE_HALF - 16 * FIX(255.0/219.0))) >> SCALEBITS]
  926. #define Y_JPEG_TO_CCIR(y)\
  927. (((y) * FIX(219.0/255.0) + (ONE_HALF + (16 << SCALEBITS))) >> SCALEBITS)
  928. #define C_CCIR_TO_JPEG(y)\
  929. cm[(((y) - 128) * FIX(127.0/112.0) + (ONE_HALF + (128 << SCALEBITS))) >> SCALEBITS]
  930. /* NOTE: the clamp is really necessary! */
  931. static inline int C_JPEG_TO_CCIR(int y) {
  932. y = (((y - 128) * FIX(112.0/127.0) + (ONE_HALF + (128 << SCALEBITS))) >> SCALEBITS);
  933. if (y < 16)
  934. y = 16;
  935. return y;
  936. }
  937. #define RGB_TO_Y(r, g, b) \
  938. ((FIX(0.29900) * (r) + FIX(0.58700) * (g) + \
  939. FIX(0.11400) * (b) + ONE_HALF) >> SCALEBITS)
  940. #define RGB_TO_U(r1, g1, b1, shift)\
  941. (((- FIX(0.16874) * r1 - FIX(0.33126) * g1 + \
  942. FIX(0.50000) * b1 + (ONE_HALF << shift) - 1) >> (SCALEBITS + shift)) + 128)
  943. #define RGB_TO_V(r1, g1, b1, shift)\
  944. (((FIX(0.50000) * r1 - FIX(0.41869) * g1 - \
  945. FIX(0.08131) * b1 + (ONE_HALF << shift) - 1) >> (SCALEBITS + shift)) + 128)
  946. #define RGB_TO_Y_CCIR(r, g, b) \
  947. ((FIX(0.29900*219.0/255.0) * (r) + FIX(0.58700*219.0/255.0) * (g) + \
  948. FIX(0.11400*219.0/255.0) * (b) + (ONE_HALF + (16 << SCALEBITS))) >> SCALEBITS)
  949. #define RGB_TO_U_CCIR(r1, g1, b1, shift)\
  950. (((- FIX(0.16874*224.0/255.0) * r1 - FIX(0.33126*224.0/255.0) * g1 + \
  951. FIX(0.50000*224.0/255.0) * b1 + (ONE_HALF << shift) - 1) >> (SCALEBITS + shift)) + 128)
  952. #define RGB_TO_V_CCIR(r1, g1, b1, shift)\
  953. (((FIX(0.50000*224.0/255.0) * r1 - FIX(0.41869*224.0/255.0) * g1 - \
  954. FIX(0.08131*224.0/255.0) * b1 + (ONE_HALF << shift) - 1) >> (SCALEBITS + shift)) + 128)
  955. static uint8_t y_ccir_to_jpeg[256];
  956. static uint8_t y_jpeg_to_ccir[256];
  957. static uint8_t c_ccir_to_jpeg[256];
  958. static uint8_t c_jpeg_to_ccir[256];
  959. /* init various conversion tables */
  960. static void img_convert_init(void)
  961. {
  962. int i;
  963. uint8_t *cm = cropTbl + MAX_NEG_CROP;
  964. for(i = 0;i < 256; i++) {
  965. y_ccir_to_jpeg[i] = Y_CCIR_TO_JPEG(i);
  966. y_jpeg_to_ccir[i] = Y_JPEG_TO_CCIR(i);
  967. c_ccir_to_jpeg[i] = C_CCIR_TO_JPEG(i);
  968. c_jpeg_to_ccir[i] = C_JPEG_TO_CCIR(i);
  969. }
  970. }
  971. /* apply to each pixel the given table */
  972. static void img_apply_table(uint8_t *dst, int dst_wrap,
  973. const uint8_t *src, int src_wrap,
  974. int width, int height, const uint8_t *table1)
  975. {
  976. int n;
  977. const uint8_t *s;
  978. uint8_t *d;
  979. const uint8_t *table;
  980. table = table1;
  981. for(;height > 0; height--) {
  982. s = src;
  983. d = dst;
  984. n = width;
  985. while (n >= 4) {
  986. d[0] = table[s[0]];
  987. d[1] = table[s[1]];
  988. d[2] = table[s[2]];
  989. d[3] = table[s[3]];
  990. d += 4;
  991. s += 4;
  992. n -= 4;
  993. }
  994. while (n > 0) {
  995. d[0] = table[s[0]];
  996. d++;
  997. s++;
  998. n--;
  999. }
  1000. dst += dst_wrap;
  1001. src += src_wrap;
  1002. }
  1003. }
  1004. /* XXX: use generic filter ? */
  1005. /* XXX: in most cases, the sampling position is incorrect */
  1006. /* 4x1 -> 1x1 */
  1007. static void shrink41(uint8_t *dst, int dst_wrap,
  1008. const uint8_t *src, int src_wrap,
  1009. int width, int height)
  1010. {
  1011. int w;
  1012. const uint8_t *s;
  1013. uint8_t *d;
  1014. for(;height > 0; height--) {
  1015. s = src;
  1016. d = dst;
  1017. for(w = width;w > 0; w--) {
  1018. d[0] = (s[0] + s[1] + s[2] + s[3] + 2) >> 2;
  1019. s += 4;
  1020. d++;
  1021. }
  1022. src += src_wrap;
  1023. dst += dst_wrap;
  1024. }
  1025. }
  1026. /* 2x1 -> 1x1 */
  1027. static void shrink21(uint8_t *dst, int dst_wrap,
  1028. const uint8_t *src, int src_wrap,
  1029. int width, int height)
  1030. {
  1031. int w;
  1032. const uint8_t *s;
  1033. uint8_t *d;
  1034. for(;height > 0; height--) {
  1035. s = src;
  1036. d = dst;
  1037. for(w = width;w > 0; w--) {
  1038. d[0] = (s[0] + s[1]) >> 1;
  1039. s += 2;
  1040. d++;
  1041. }
  1042. src += src_wrap;
  1043. dst += dst_wrap;
  1044. }
  1045. }
  1046. /* 1x2 -> 1x1 */
  1047. static void shrink12(uint8_t *dst, int dst_wrap,
  1048. const uint8_t *src, int src_wrap,
  1049. int width, int height)
  1050. {
  1051. int w;
  1052. uint8_t *d;
  1053. const uint8_t *s1, *s2;
  1054. for(;height > 0; height--) {
  1055. s1 = src;
  1056. s2 = s1 + src_wrap;
  1057. d = dst;
  1058. for(w = width;w >= 4; w-=4) {
  1059. d[0] = (s1[0] + s2[0]) >> 1;
  1060. d[1] = (s1[1] + s2[1]) >> 1;
  1061. d[2] = (s1[2] + s2[2]) >> 1;
  1062. d[3] = (s1[3] + s2[3]) >> 1;
  1063. s1 += 4;
  1064. s2 += 4;
  1065. d += 4;
  1066. }
  1067. for(;w > 0; w--) {
  1068. d[0] = (s1[0] + s2[0]) >> 1;
  1069. s1++;
  1070. s2++;
  1071. d++;
  1072. }
  1073. src += 2 * src_wrap;
  1074. dst += dst_wrap;
  1075. }
  1076. }
  1077. /* 2x2 -> 1x1 */
  1078. static void shrink22(uint8_t *dst, int dst_wrap,
  1079. const uint8_t *src, int src_wrap,
  1080. int width, int height)
  1081. {
  1082. int w;
  1083. const uint8_t *s1, *s2;
  1084. uint8_t *d;
  1085. for(;height > 0; height--) {
  1086. s1 = src;
  1087. s2 = s1 + src_wrap;
  1088. d = dst;
  1089. for(w = width;w >= 4; w-=4) {
  1090. d[0] = (s1[0] + s1[1] + s2[0] + s2[1] + 2) >> 2;
  1091. d[1] = (s1[2] + s1[3] + s2[2] + s2[3] + 2) >> 2;
  1092. d[2] = (s1[4] + s1[5] + s2[4] + s2[5] + 2) >> 2;
  1093. d[3] = (s1[6] + s1[7] + s2[6] + s2[7] + 2) >> 2;
  1094. s1 += 8;
  1095. s2 += 8;
  1096. d += 4;
  1097. }
  1098. for(;w > 0; w--) {
  1099. d[0] = (s1[0] + s1[1] + s2[0] + s2[1] + 2) >> 2;
  1100. s1 += 2;
  1101. s2 += 2;
  1102. d++;
  1103. }
  1104. src += 2 * src_wrap;
  1105. dst += dst_wrap;
  1106. }
  1107. }
  1108. /* 4x4 -> 1x1 */
  1109. static void shrink44(uint8_t *dst, int dst_wrap,
  1110. const uint8_t *src, int src_wrap,
  1111. int width, int height)
  1112. {
  1113. int w;
  1114. const uint8_t *s1, *s2, *s3, *s4;
  1115. uint8_t *d;
  1116. for(;height > 0; height--) {
  1117. s1 = src;
  1118. s2 = s1 + src_wrap;
  1119. s3 = s2 + src_wrap;
  1120. s4 = s3 + src_wrap;
  1121. d = dst;
  1122. for(w = width;w > 0; w--) {
  1123. d[0] = (s1[0] + s1[1] + s1[2] + s1[3] +
  1124. s2[0] + s2[1] + s2[2] + s2[3] +
  1125. s3[0] + s3[1] + s3[2] + s3[3] +
  1126. s4[0] + s4[1] + s4[2] + s4[3] + 8) >> 4;
  1127. s1 += 4;
  1128. s2 += 4;
  1129. s3 += 4;
  1130. s4 += 4;
  1131. d++;
  1132. }
  1133. src += 4 * src_wrap;
  1134. dst += dst_wrap;
  1135. }
  1136. }
  1137. static void grow21_line(uint8_t *dst, const uint8_t *src,
  1138. int width)
  1139. {
  1140. int w;
  1141. const uint8_t *s1;
  1142. uint8_t *d;
  1143. s1 = src;
  1144. d = dst;
  1145. for(w = width;w >= 4; w-=4) {
  1146. d[1] = d[0] = s1[0];
  1147. d[3] = d[2] = s1[1];
  1148. s1 += 2;
  1149. d += 4;
  1150. }
  1151. for(;w >= 2; w -= 2) {
  1152. d[1] = d[0] = s1[0];
  1153. s1 ++;
  1154. d += 2;
  1155. }
  1156. /* only needed if width is not a multiple of two */
  1157. /* XXX: veryfy that */
  1158. if (w) {
  1159. d[0] = s1[0];
  1160. }
  1161. }
  1162. static void grow41_line(uint8_t *dst, const uint8_t *src,
  1163. int width)
  1164. {
  1165. int w, v;
  1166. const uint8_t *s1;
  1167. uint8_t *d;
  1168. s1 = src;
  1169. d = dst;
  1170. for(w = width;w >= 4; w-=4) {
  1171. v = s1[0];
  1172. d[0] = v;
  1173. d[1] = v;
  1174. d[2] = v;
  1175. d[3] = v;
  1176. s1 ++;
  1177. d += 4;
  1178. }
  1179. }
  1180. /* 1x1 -> 2x1 */
  1181. static void grow21(uint8_t *dst, int dst_wrap,
  1182. const uint8_t *src, int src_wrap,
  1183. int width, int height)
  1184. {
  1185. for(;height > 0; height--) {
  1186. grow21_line(dst, src, width);
  1187. src += src_wrap;
  1188. dst += dst_wrap;
  1189. }
  1190. }
  1191. /* 1x1 -> 2x2 */
  1192. static void grow22(uint8_t *dst, int dst_wrap,
  1193. const uint8_t *src, int src_wrap,
  1194. int width, int height)
  1195. {
  1196. for(;height > 0; height--) {
  1197. grow21_line(dst, src, width);
  1198. if (height%2)
  1199. src += src_wrap;
  1200. dst += dst_wrap;
  1201. }
  1202. }
  1203. /* 1x1 -> 4x1 */
  1204. static void grow41(uint8_t *dst, int dst_wrap,
  1205. const uint8_t *src, int src_wrap,
  1206. int width, int height)
  1207. {
  1208. for(;height > 0; height--) {
  1209. grow41_line(dst, src, width);
  1210. src += src_wrap;
  1211. dst += dst_wrap;
  1212. }
  1213. }
  1214. /* 1x1 -> 4x4 */
  1215. static void grow44(uint8_t *dst, int dst_wrap,
  1216. const uint8_t *src, int src_wrap,
  1217. int width, int height)
  1218. {
  1219. for(;height > 0; height--) {
  1220. grow41_line(dst, src, width);
  1221. if ((height & 3) == 1)
  1222. src += src_wrap;
  1223. dst += dst_wrap;
  1224. }
  1225. }
  1226. /* 1x2 -> 2x1 */
  1227. static void conv411(uint8_t *dst, int dst_wrap,
  1228. const uint8_t *src, int src_wrap,
  1229. int width, int height)
  1230. {
  1231. int w, c;
  1232. const uint8_t *s1, *s2;
  1233. uint8_t *d;
  1234. width>>=1;
  1235. for(;height > 0; height--) {
  1236. s1 = src;
  1237. s2 = src + src_wrap;
  1238. d = dst;
  1239. for(w = width;w > 0; w--) {
  1240. c = (s1[0] + s2[0]) >> 1;
  1241. d[0] = c;
  1242. d[1] = c;
  1243. s1++;
  1244. s2++;
  1245. d += 2;
  1246. }
  1247. src += src_wrap * 2;
  1248. dst += dst_wrap;
  1249. }
  1250. }
  1251. /* XXX: add jpeg quantize code */
  1252. #define TRANSP_INDEX (6*6*6)
  1253. /* this is maybe slow, but allows for extensions */
  1254. static inline unsigned char gif_clut_index(uint8_t r, uint8_t g, uint8_t b)
  1255. {
  1256. return ((((r)/47)%6)*6*6+(((g)/47)%6)*6+(((b)/47)%6));
  1257. }
  1258. static void build_rgb_palette(uint8_t *palette, int has_alpha)
  1259. {
  1260. uint32_t *pal;
  1261. static const uint8_t pal_value[6] = { 0x00, 0x33, 0x66, 0x99, 0xcc, 0xff };
  1262. int i, r, g, b;
  1263. pal = (uint32_t *)palette;
  1264. i = 0;
  1265. for(r = 0; r < 6; r++) {
  1266. for(g = 0; g < 6; g++) {
  1267. for(b = 0; b < 6; b++) {
  1268. pal[i++] = (0xff << 24) | (pal_value[r] << 16) |
  1269. (pal_value[g] << 8) | pal_value[b];
  1270. }
  1271. }
  1272. }
  1273. if (has_alpha)
  1274. pal[i++] = 0;
  1275. while (i < 256)
  1276. pal[i++] = 0xff000000;
  1277. }
  1278. /* copy bit n to bits 0 ... n - 1 */
  1279. static inline unsigned int bitcopy_n(unsigned int a, int n)
  1280. {
  1281. int mask;
  1282. mask = (1 << n) - 1;
  1283. return (a & (0xff & ~mask)) | ((-((a >> n) & 1)) & mask);
  1284. }
  1285. /* rgb555 handling */
  1286. #define RGB_NAME rgb555
  1287. #define RGB_IN(r, g, b, s)\
  1288. {\
  1289. unsigned int v = ((const uint16_t *)(s))[0];\
  1290. r = bitcopy_n(v >> (10 - 3), 3);\
  1291. g = bitcopy_n(v >> (5 - 3), 3);\
  1292. b = bitcopy_n(v << 3, 3);\
  1293. }
  1294. #define RGBA_IN(r, g, b, a, s)\
  1295. {\
  1296. unsigned int v = ((const uint16_t *)(s))[0];\
  1297. r = bitcopy_n(v >> (10 - 3), 3);\
  1298. g = bitcopy_n(v >> (5 - 3), 3);\
  1299. b = bitcopy_n(v << 3, 3);\
  1300. a = (-(v >> 15)) & 0xff;\
  1301. }
  1302. #define RGBA_OUT(d, r, g, b, a)\
  1303. {\
  1304. ((uint16_t *)(d))[0] = ((r >> 3) << 10) | ((g >> 3) << 5) | (b >> 3) | \
  1305. ((a << 8) & 0x8000);\
  1306. }
  1307. #define BPP 2
  1308. #include "imgconvert_template.h"
  1309. /* rgb565 handling */
  1310. #define RGB_NAME rgb565
  1311. #define RGB_IN(r, g, b, s)\
  1312. {\
  1313. unsigned int v = ((const uint16_t *)(s))[0];\
  1314. r = bitcopy_n(v >> (11 - 3), 3);\
  1315. g = bitcopy_n(v >> (5 - 2), 2);\
  1316. b = bitcopy_n(v << 3, 3);\
  1317. }
  1318. #define RGB_OUT(d, r, g, b)\
  1319. {\
  1320. ((uint16_t *)(d))[0] = ((r >> 3) << 11) | ((g >> 2) << 5) | (b >> 3);\
  1321. }
  1322. #define BPP 2
  1323. #include "imgconvert_template.h"
  1324. /* bgr24 handling */
  1325. #define RGB_NAME bgr24
  1326. #define RGB_IN(r, g, b, s)\
  1327. {\
  1328. b = (s)[0];\
  1329. g = (s)[1];\
  1330. r = (s)[2];\
  1331. }
  1332. #define RGB_OUT(d, r, g, b)\
  1333. {\
  1334. (d)[0] = b;\
  1335. (d)[1] = g;\
  1336. (d)[2] = r;\
  1337. }
  1338. #define BPP 3
  1339. #include "imgconvert_template.h"
  1340. #undef RGB_IN
  1341. #undef RGB_OUT
  1342. #undef BPP
  1343. /* rgb24 handling */
  1344. #define RGB_NAME rgb24
  1345. #define FMT_RGB24
  1346. #define RGB_IN(r, g, b, s)\
  1347. {\
  1348. r = (s)[0];\
  1349. g = (s)[1];\
  1350. b = (s)[2];\
  1351. }
  1352. #define RGB_OUT(d, r, g, b)\
  1353. {\
  1354. (d)[0] = r;\
  1355. (d)[1] = g;\
  1356. (d)[2] = b;\
  1357. }
  1358. #define BPP 3
  1359. #include "imgconvert_template.h"
  1360. /* rgba32 handling */
  1361. #define RGB_NAME rgba32
  1362. #define FMT_RGBA32
  1363. #define RGB_IN(r, g, b, s)\
  1364. {\
  1365. unsigned int v = ((const uint32_t *)(s))[0];\
  1366. r = (v >> 16) & 0xff;\
  1367. g = (v >> 8) & 0xff;\
  1368. b = v & 0xff;\
  1369. }
  1370. #define RGBA_IN(r, g, b, a, s)\
  1371. {\
  1372. unsigned int v = ((const uint32_t *)(s))[0];\
  1373. a = (v >> 24) & 0xff;\
  1374. r = (v >> 16) & 0xff;\
  1375. g = (v >> 8) & 0xff;\
  1376. b = v & 0xff;\
  1377. }
  1378. #define RGBA_OUT(d, r, g, b, a)\
  1379. {\
  1380. ((uint32_t *)(d))[0] = (a << 24) | (r << 16) | (g << 8) | b;\
  1381. }
  1382. #define BPP 4
  1383. #include "imgconvert_template.h"
  1384. static void mono_to_gray(AVPicture *dst, const AVPicture *src,
  1385. int width, int height, int xor_mask)
  1386. {
  1387. const unsigned char *p;
  1388. unsigned char *q;
  1389. int v, dst_wrap, src_wrap;
  1390. int y, w;
  1391. p = src->data[0];
  1392. src_wrap = src->linesize[0] - ((width + 7) >> 3);
  1393. q = dst->data[0];
  1394. dst_wrap = dst->linesize[0] - width;
  1395. for(y=0;y<height;y++) {
  1396. w = width;
  1397. while (w >= 8) {
  1398. v = *p++ ^ xor_mask;
  1399. q[0] = -(v >> 7);
  1400. q[1] = -((v >> 6) & 1);
  1401. q[2] = -((v >> 5) & 1);
  1402. q[3] = -((v >> 4) & 1);
  1403. q[4] = -((v >> 3) & 1);
  1404. q[5] = -((v >> 2) & 1);
  1405. q[6] = -((v >> 1) & 1);
  1406. q[7] = -((v >> 0) & 1);
  1407. w -= 8;
  1408. q += 8;
  1409. }
  1410. if (w > 0) {
  1411. v = *p++ ^ xor_mask;
  1412. do {
  1413. q[0] = -((v >> 7) & 1);
  1414. q++;
  1415. v <<= 1;
  1416. } while (--w);
  1417. }
  1418. p += src_wrap;
  1419. q += dst_wrap;
  1420. }
  1421. }
  1422. static void monowhite_to_gray(AVPicture *dst, const AVPicture *src,
  1423. int width, int height)
  1424. {
  1425. mono_to_gray(dst, src, width, height, 0xff);
  1426. }
  1427. static void monoblack_to_gray(AVPicture *dst, const AVPicture *src,
  1428. int width, int height)
  1429. {
  1430. mono_to_gray(dst, src, width, height, 0x00);
  1431. }
  1432. static void gray_to_mono(AVPicture *dst, const AVPicture *src,
  1433. int width, int height, int xor_mask)
  1434. {
  1435. int n;
  1436. const uint8_t *s;
  1437. uint8_t *d;
  1438. int j, b, v, n1, src_wrap, dst_wrap, y;
  1439. s = src->data[0];
  1440. src_wrap = src->linesize[0] - width;
  1441. d = dst->data[0];
  1442. dst_wrap = dst->linesize[0] - ((width + 7) >> 3);
  1443. for(y=0;y<height;y++) {
  1444. n = width;
  1445. while (n >= 8) {
  1446. v = 0;
  1447. for(j=0;j<8;j++) {
  1448. b = s[0];
  1449. s++;
  1450. v = (v << 1) | (b >> 7);
  1451. }
  1452. d[0] = v ^ xor_mask;
  1453. d++;
  1454. n -= 8;
  1455. }
  1456. if (n > 0) {
  1457. n1 = n;
  1458. v = 0;
  1459. while (n > 0) {
  1460. b = s[0];
  1461. s++;
  1462. v = (v << 1) | (b >> 7);
  1463. n--;
  1464. }
  1465. d[0] = (v << (8 - (n1 & 7))) ^ xor_mask;
  1466. d++;
  1467. }
  1468. s += src_wrap;
  1469. d += dst_wrap;
  1470. }
  1471. }
  1472. static void gray_to_monowhite(AVPicture *dst, const AVPicture *src,
  1473. int width, int height)
  1474. {
  1475. gray_to_mono(dst, src, width, height, 0xff);
  1476. }
  1477. static void gray_to_monoblack(AVPicture *dst, const AVPicture *src,
  1478. int width, int height)
  1479. {
  1480. gray_to_mono(dst, src, width, height, 0x00);
  1481. }
  1482. typedef struct ConvertEntry {
  1483. void (*convert)(AVPicture *dst,
  1484. const AVPicture *src, int width, int height);
  1485. } ConvertEntry;
  1486. /* Add each new convertion function in this table. In order to be able
  1487. to convert from any format to any format, the following constraints
  1488. must be satisfied:
  1489. - all FF_COLOR_RGB formats must convert to and from PIX_FMT_RGB24
  1490. - all FF_COLOR_GRAY formats must convert to and from PIX_FMT_GRAY8
  1491. - all FF_COLOR_RGB formats with alpha must convert to and from PIX_FMT_RGBA32
  1492. - PIX_FMT_YUV444P and PIX_FMT_YUVJ444P must convert to and from
  1493. PIX_FMT_RGB24.
  1494. - PIX_FMT_422 must convert to and from PIX_FMT_422P.
  1495. The other conversion functions are just optimisations for common cases.
  1496. */
  1497. static ConvertEntry convert_table[PIX_FMT_NB][PIX_FMT_NB] = {
  1498. [PIX_FMT_YUV420P] = {
  1499. [PIX_FMT_YUV422] = {
  1500. .convert = yuv420p_to_yuv422,
  1501. },
  1502. [PIX_FMT_RGB555] = {
  1503. .convert = yuv420p_to_rgb555
  1504. },
  1505. [PIX_FMT_RGB565] = {
  1506. .convert = yuv420p_to_rgb565
  1507. },
  1508. [PIX_FMT_BGR24] = {
  1509. .convert = yuv420p_to_bgr24
  1510. },
  1511. [PIX_FMT_RGB24] = {
  1512. .convert = yuv420p_to_rgb24
  1513. },
  1514. [PIX_FMT_RGBA32] = {
  1515. .convert = yuv420p_to_rgba32
  1516. },
  1517. },
  1518. [PIX_FMT_YUV422P] = {
  1519. [PIX_FMT_YUV422] = {
  1520. .convert = yuv422p_to_yuv422,
  1521. },
  1522. [PIX_FMT_UYVY422] = {
  1523. .convert = yuv422p_to_uyvy422,
  1524. },
  1525. },
  1526. [PIX_FMT_YUV444P] = {
  1527. [PIX_FMT_RGB24] = {
  1528. .convert = yuv444p_to_rgb24
  1529. },
  1530. },
  1531. [PIX_FMT_YUVJ420P] = {
  1532. [PIX_FMT_RGB555] = {
  1533. .convert = yuvj420p_to_rgb555
  1534. },
  1535. [PIX_FMT_RGB565] = {
  1536. .convert = yuvj420p_to_rgb565
  1537. },
  1538. [PIX_FMT_BGR24] = {
  1539. .convert = yuvj420p_to_bgr24
  1540. },
  1541. [PIX_FMT_RGB24] = {
  1542. .convert = yuvj420p_to_rgb24
  1543. },
  1544. [PIX_FMT_RGBA32] = {
  1545. .convert = yuvj420p_to_rgba32
  1546. },
  1547. },
  1548. [PIX_FMT_YUVJ444P] = {
  1549. [PIX_FMT_RGB24] = {
  1550. .convert = yuvj444p_to_rgb24
  1551. },
  1552. },
  1553. [PIX_FMT_YUV422] = {
  1554. [PIX_FMT_YUV420P] = {
  1555. .convert = yuv422_to_yuv420p,
  1556. },
  1557. [PIX_FMT_YUV422P] = {
  1558. .convert = yuv422_to_yuv422p,
  1559. },
  1560. },
  1561. [PIX_FMT_UYVY422] = {
  1562. [PIX_FMT_YUV420P] = {
  1563. .convert = uyvy422_to_yuv420p,
  1564. },
  1565. [PIX_FMT_YUV422P] = {
  1566. .convert = uyvy422_to_yuv422p,
  1567. },
  1568. },
  1569. [PIX_FMT_RGB24] = {
  1570. [PIX_FMT_YUV420P] = {
  1571. .convert = rgb24_to_yuv420p
  1572. },
  1573. [PIX_FMT_RGB565] = {
  1574. .convert = rgb24_to_rgb565
  1575. },
  1576. [PIX_FMT_RGB555] = {
  1577. .convert = rgb24_to_rgb555
  1578. },
  1579. [PIX_FMT_RGBA32] = {
  1580. .convert = rgb24_to_rgba32
  1581. },
  1582. [PIX_FMT_BGR24] = {
  1583. .convert = rgb24_to_bgr24
  1584. },
  1585. [PIX_FMT_GRAY8] = {
  1586. .convert = rgb24_to_gray
  1587. },
  1588. [PIX_FMT_PAL8] = {
  1589. .convert = rgb24_to_pal8
  1590. },
  1591. [PIX_FMT_YUV444P] = {
  1592. .convert = rgb24_to_yuv444p
  1593. },
  1594. [PIX_FMT_YUVJ420P] = {
  1595. .convert = rgb24_to_yuvj420p
  1596. },
  1597. [PIX_FMT_YUVJ444P] = {
  1598. .convert = rgb24_to_yuvj444p
  1599. },
  1600. },
  1601. [PIX_FMT_RGBA32] = {
  1602. [PIX_FMT_RGB24] = {
  1603. .convert = rgba32_to_rgb24
  1604. },
  1605. [PIX_FMT_RGB555] = {
  1606. .convert = rgba32_to_rgb555
  1607. },
  1608. [PIX_FMT_PAL8] = {
  1609. .convert = rgba32_to_pal8
  1610. },
  1611. [PIX_FMT_YUV420P] = {
  1612. .convert = rgba32_to_yuv420p
  1613. },
  1614. [PIX_FMT_GRAY8] = {
  1615. .convert = rgba32_to_gray
  1616. },
  1617. },
  1618. [PIX_FMT_BGR24] = {
  1619. [PIX_FMT_RGB24] = {
  1620. .convert = bgr24_to_rgb24
  1621. },
  1622. [PIX_FMT_YUV420P] = {
  1623. .convert = bgr24_to_yuv420p
  1624. },
  1625. [PIX_FMT_GRAY8] = {
  1626. .convert = bgr24_to_gray
  1627. },
  1628. },
  1629. [PIX_FMT_RGB555] = {
  1630. [PIX_FMT_RGB24] = {
  1631. .convert = rgb555_to_rgb24
  1632. },
  1633. [PIX_FMT_RGBA32] = {
  1634. .convert = rgb555_to_rgba32
  1635. },
  1636. [PIX_FMT_YUV420P] = {
  1637. .convert = rgb555_to_yuv420p
  1638. },
  1639. [PIX_FMT_GRAY8] = {
  1640. .convert = rgb555_to_gray
  1641. },
  1642. },
  1643. [PIX_FMT_RGB565] = {
  1644. [PIX_FMT_RGB24] = {
  1645. .convert = rgb565_to_rgb24
  1646. },
  1647. [PIX_FMT_YUV420P] = {
  1648. .convert = rgb565_to_yuv420p
  1649. },
  1650. [PIX_FMT_GRAY8] = {
  1651. .convert = rgb565_to_gray
  1652. },
  1653. },
  1654. [PIX_FMT_GRAY8] = {
  1655. [PIX_FMT_RGB555] = {
  1656. .convert = gray_to_rgb555
  1657. },
  1658. [PIX_FMT_RGB565] = {
  1659. .convert = gray_to_rgb565
  1660. },
  1661. [PIX_FMT_RGB24] = {
  1662. .convert = gray_to_rgb24
  1663. },
  1664. [PIX_FMT_BGR24] = {
  1665. .convert = gray_to_bgr24
  1666. },
  1667. [PIX_FMT_RGBA32] = {
  1668. .convert = gray_to_rgba32
  1669. },
  1670. [PIX_FMT_MONOWHITE] = {
  1671. .convert = gray_to_monowhite
  1672. },
  1673. [PIX_FMT_MONOBLACK] = {
  1674. .convert = gray_to_monoblack
  1675. },
  1676. },
  1677. [PIX_FMT_MONOWHITE] = {
  1678. [PIX_FMT_GRAY8] = {
  1679. .convert = monowhite_to_gray
  1680. },
  1681. },
  1682. [PIX_FMT_MONOBLACK] = {
  1683. [PIX_FMT_GRAY8] = {
  1684. .convert = monoblack_to_gray
  1685. },
  1686. },
  1687. [PIX_FMT_PAL8] = {
  1688. [PIX_FMT_RGB555] = {
  1689. .convert = pal8_to_rgb555
  1690. },
  1691. [PIX_FMT_RGB565] = {
  1692. .convert = pal8_to_rgb565
  1693. },
  1694. [PIX_FMT_BGR24] = {
  1695. .convert = pal8_to_bgr24
  1696. },
  1697. [PIX_FMT_RGB24] = {
  1698. .convert = pal8_to_rgb24
  1699. },
  1700. [PIX_FMT_RGBA32] = {
  1701. .convert = pal8_to_rgba32
  1702. },
  1703. },
  1704. [PIX_FMT_UYVY411] = {
  1705. [PIX_FMT_YUV411P] = {
  1706. .convert = uyvy411_to_yuv411p,
  1707. },
  1708. },
  1709. };
  1710. int avpicture_alloc(AVPicture *picture,
  1711. int pix_fmt, int width, int height)
  1712. {
  1713. unsigned int size;
  1714. void *ptr;
  1715. size = avpicture_get_size(pix_fmt, width, height);
  1716. ptr = av_malloc(size);
  1717. if (!ptr)
  1718. goto fail;
  1719. avpicture_fill(picture, ptr, pix_fmt, width, height);
  1720. return 0;
  1721. fail:
  1722. memset(picture, 0, sizeof(AVPicture));
  1723. return -1;
  1724. }
  1725. void avpicture_free(AVPicture *picture)
  1726. {
  1727. av_free(picture->data[0]);
  1728. }
  1729. /* return true if yuv planar */
  1730. static inline int is_yuv_planar(PixFmtInfo *ps)
  1731. {
  1732. return (ps->color_type == FF_COLOR_YUV ||
  1733. ps->color_type == FF_COLOR_YUV_JPEG) &&
  1734. ps->pixel_type == FF_PIXEL_PLANAR;
  1735. }
  1736. /* XXX: always use linesize. Return -1 if not supported */
  1737. int img_convert(AVPicture *dst, int dst_pix_fmt,
  1738. const AVPicture *src, int src_pix_fmt,
  1739. int src_width, int src_height)
  1740. {
  1741. static int inited;
  1742. int i, ret, dst_width, dst_height, int_pix_fmt;
  1743. PixFmtInfo *src_pix, *dst_pix;
  1744. ConvertEntry *ce;
  1745. AVPicture tmp1, *tmp = &tmp1;
  1746. if (src_pix_fmt < 0 || src_pix_fmt >= PIX_FMT_NB ||
  1747. dst_pix_fmt < 0 || dst_pix_fmt >= PIX_FMT_NB)
  1748. return -1;
  1749. if (src_width <= 0 || src_height <= 0)
  1750. return 0;
  1751. if (!inited) {
  1752. inited = 1;
  1753. img_convert_init();
  1754. }
  1755. dst_width = src_width;
  1756. dst_height = src_height;
  1757. dst_pix = &pix_fmt_info[dst_pix_fmt];
  1758. src_pix = &pix_fmt_info[src_pix_fmt];
  1759. if (src_pix_fmt == dst_pix_fmt) {
  1760. /* no conversion needed: just copy */
  1761. img_copy(dst, src, dst_pix_fmt, dst_width, dst_height);
  1762. return 0;
  1763. }
  1764. ce = &convert_table[src_pix_fmt][dst_pix_fmt];
  1765. if (ce->convert) {
  1766. /* specific conversion routine */
  1767. ce->convert(dst, src, dst_width, dst_height);
  1768. return 0;
  1769. }
  1770. /* gray to YUV */
  1771. if (is_yuv_planar(dst_pix) &&
  1772. src_pix_fmt == PIX_FMT_GRAY8) {
  1773. int w, h, y;
  1774. uint8_t *d;
  1775. if (dst_pix->color_type == FF_COLOR_YUV_JPEG) {
  1776. img_copy_plane(dst->data[0], dst->linesize[0],
  1777. src->data[0], src->linesize[0],
  1778. dst_width, dst_height);
  1779. } else {
  1780. img_apply_table(dst->data[0], dst->linesize[0],
  1781. src->data[0], src->linesize[0],
  1782. dst_width, dst_height,
  1783. y_jpeg_to_ccir);
  1784. }
  1785. /* fill U and V with 128 */
  1786. w = dst_width;
  1787. h = dst_height;
  1788. w >>= dst_pix->x_chroma_shift;
  1789. h >>= dst_pix->y_chroma_shift;
  1790. for(i = 1; i <= 2; i++) {
  1791. d = dst->data[i];
  1792. for(y = 0; y< h; y++) {
  1793. memset(d, 128, w);
  1794. d += dst->linesize[i];
  1795. }
  1796. }
  1797. return 0;
  1798. }
  1799. /* YUV to gray */
  1800. if (is_yuv_planar(src_pix) &&
  1801. dst_pix_fmt == PIX_FMT_GRAY8) {
  1802. if (src_pix->color_type == FF_COLOR_YUV_JPEG) {
  1803. img_copy_plane(dst->data[0], dst->linesize[0],
  1804. src->data[0], src->linesize[0],
  1805. dst_width, dst_height);
  1806. } else {
  1807. img_apply_table(dst->data[0], dst->linesize[0],
  1808. src->data[0], src->linesize[0],
  1809. dst_width, dst_height,
  1810. y_ccir_to_jpeg);
  1811. }
  1812. return 0;
  1813. }
  1814. /* YUV to YUV planar */
  1815. if (is_yuv_planar(dst_pix) && is_yuv_planar(src_pix)) {
  1816. int x_shift, y_shift, w, h, xy_shift;
  1817. void (*resize_func)(uint8_t *dst, int dst_wrap,
  1818. const uint8_t *src, int src_wrap,
  1819. int width, int height);
  1820. /* compute chroma size of the smallest dimensions */
  1821. w = dst_width;
  1822. h = dst_height;
  1823. if (dst_pix->x_chroma_shift >= src_pix->x_chroma_shift)
  1824. w >>= dst_pix->x_chroma_shift;
  1825. else
  1826. w >>= src_pix->x_chroma_shift;
  1827. if (dst_pix->y_chroma_shift >= src_pix->y_chroma_shift)
  1828. h >>= dst_pix->y_chroma_shift;
  1829. else
  1830. h >>= src_pix->y_chroma_shift;
  1831. x_shift = (dst_pix->x_chroma_shift - src_pix->x_chroma_shift);
  1832. y_shift = (dst_pix->y_chroma_shift - src_pix->y_chroma_shift);
  1833. xy_shift = ((x_shift & 0xf) << 4) | (y_shift & 0xf);
  1834. /* there must be filters for conversion at least from and to
  1835. YUV444 format */
  1836. switch(xy_shift) {
  1837. case 0x00:
  1838. resize_func = img_copy_plane;
  1839. break;
  1840. case 0x10:
  1841. resize_func = shrink21;
  1842. break;
  1843. case 0x20:
  1844. resize_func = shrink41;
  1845. break;
  1846. case 0x01:
  1847. resize_func = shrink12;
  1848. break;
  1849. case 0x11:
  1850. resize_func = shrink22;
  1851. break;
  1852. case 0x22:
  1853. resize_func = shrink44;
  1854. break;
  1855. case 0xf0:
  1856. resize_func = grow21;
  1857. break;
  1858. case 0xe0:
  1859. resize_func = grow41;
  1860. break;
  1861. case 0xff:
  1862. resize_func = grow22;
  1863. break;
  1864. case 0xee:
  1865. resize_func = grow44;
  1866. break;
  1867. case 0xf1:
  1868. resize_func = conv411;
  1869. break;
  1870. default:
  1871. /* currently not handled */
  1872. goto no_chroma_filter;
  1873. }
  1874. img_copy_plane(dst->data[0], dst->linesize[0],
  1875. src->data[0], src->linesize[0],
  1876. dst_width, dst_height);
  1877. for(i = 1;i <= 2; i++)
  1878. resize_func(dst->data[i], dst->linesize[i],
  1879. src->data[i], src->linesize[i],
  1880. dst_width>>dst_pix->x_chroma_shift, dst_height>>dst_pix->y_chroma_shift);
  1881. /* if yuv color space conversion is needed, we do it here on
  1882. the destination image */
  1883. if (dst_pix->color_type != src_pix->color_type) {
  1884. const uint8_t *y_table, *c_table;
  1885. if (dst_pix->color_type == FF_COLOR_YUV) {
  1886. y_table = y_jpeg_to_ccir;
  1887. c_table = c_jpeg_to_ccir;
  1888. } else {
  1889. y_table = y_ccir_to_jpeg;
  1890. c_table = c_ccir_to_jpeg;
  1891. }
  1892. img_apply_table(dst->data[0], dst->linesize[0],
  1893. dst->data[0], dst->linesize[0],
  1894. dst_width, dst_height,
  1895. y_table);
  1896. for(i = 1;i <= 2; i++)
  1897. img_apply_table(dst->data[i], dst->linesize[i],
  1898. dst->data[i], dst->linesize[i],
  1899. dst_width>>dst_pix->x_chroma_shift,
  1900. dst_height>>dst_pix->y_chroma_shift,
  1901. c_table);
  1902. }
  1903. return 0;
  1904. }
  1905. no_chroma_filter:
  1906. /* try to use an intermediate format */
  1907. if (src_pix_fmt == PIX_FMT_YUV422 ||
  1908. dst_pix_fmt == PIX_FMT_YUV422) {
  1909. /* specific case: convert to YUV422P first */
  1910. int_pix_fmt = PIX_FMT_YUV422P;
  1911. } else if (src_pix_fmt == PIX_FMT_UYVY422 ||
  1912. dst_pix_fmt == PIX_FMT_UYVY422) {
  1913. /* specific case: convert to YUV422P first */
  1914. int_pix_fmt = PIX_FMT_YUV422P;
  1915. } else if (src_pix_fmt == PIX_FMT_UYVY411 ||
  1916. dst_pix_fmt == PIX_FMT_UYVY411) {
  1917. /* specific case: convert to YUV411P first */
  1918. int_pix_fmt = PIX_FMT_YUV411P;
  1919. } else if ((src_pix->color_type == FF_COLOR_GRAY &&
  1920. src_pix_fmt != PIX_FMT_GRAY8) ||
  1921. (dst_pix->color_type == FF_COLOR_GRAY &&
  1922. dst_pix_fmt != PIX_FMT_GRAY8)) {
  1923. /* gray8 is the normalized format */
  1924. int_pix_fmt = PIX_FMT_GRAY8;
  1925. } else if ((is_yuv_planar(src_pix) &&
  1926. src_pix_fmt != PIX_FMT_YUV444P &&
  1927. src_pix_fmt != PIX_FMT_YUVJ444P)) {
  1928. /* yuv444 is the normalized format */
  1929. if (src_pix->color_type == FF_COLOR_YUV_JPEG)
  1930. int_pix_fmt = PIX_FMT_YUVJ444P;
  1931. else
  1932. int_pix_fmt = PIX_FMT_YUV444P;
  1933. } else if ((is_yuv_planar(dst_pix) &&
  1934. dst_pix_fmt != PIX_FMT_YUV444P &&
  1935. dst_pix_fmt != PIX_FMT_YUVJ444P)) {
  1936. /* yuv444 is the normalized format */
  1937. if (dst_pix->color_type == FF_COLOR_YUV_JPEG)
  1938. int_pix_fmt = PIX_FMT_YUVJ444P;
  1939. else
  1940. int_pix_fmt = PIX_FMT_YUV444P;
  1941. } else {
  1942. /* the two formats are rgb or gray8 or yuv[j]444p */
  1943. if (src_pix->is_alpha && dst_pix->is_alpha)
  1944. int_pix_fmt = PIX_FMT_RGBA32;
  1945. else
  1946. int_pix_fmt = PIX_FMT_RGB24;
  1947. }
  1948. if (avpicture_alloc(tmp, int_pix_fmt, dst_width, dst_height) < 0)
  1949. return -1;
  1950. ret = -1;
  1951. if (img_convert(tmp, int_pix_fmt,
  1952. src, src_pix_fmt, src_width, src_height) < 0)
  1953. goto fail1;
  1954. if (img_convert(dst, dst_pix_fmt,
  1955. tmp, int_pix_fmt, dst_width, dst_height) < 0)
  1956. goto fail1;
  1957. ret = 0;
  1958. fail1:
  1959. avpicture_free(tmp);
  1960. return ret;
  1961. }
  1962. /* NOTE: we scan all the pixels to have an exact information */
  1963. static int get_alpha_info_pal8(const AVPicture *src, int width, int height)
  1964. {
  1965. const unsigned char *p;
  1966. int src_wrap, ret, x, y;
  1967. unsigned int a;
  1968. uint32_t *palette = (uint32_t *)src->data[1];
  1969. p = src->data[0];
  1970. src_wrap = src->linesize[0] - width;
  1971. ret = 0;
  1972. for(y=0;y<height;y++) {
  1973. for(x=0;x<width;x++) {
  1974. a = palette[p[0]] >> 24;
  1975. if (a == 0x00) {
  1976. ret |= FF_ALPHA_TRANSP;
  1977. } else if (a != 0xff) {
  1978. ret |= FF_ALPHA_SEMI_TRANSP;
  1979. }
  1980. p++;
  1981. }
  1982. p += src_wrap;
  1983. }
  1984. return ret;
  1985. }
  1986. /**
  1987. * Tell if an image really has transparent alpha values.
  1988. * @return ored mask of FF_ALPHA_xxx constants
  1989. */
  1990. int img_get_alpha_info(const AVPicture *src,
  1991. int pix_fmt, int width, int height)
  1992. {
  1993. PixFmtInfo *pf = &pix_fmt_info[pix_fmt];
  1994. int ret;
  1995. pf = &pix_fmt_info[pix_fmt];
  1996. /* no alpha can be represented in format */
  1997. if (!pf->is_alpha)
  1998. return 0;
  1999. switch(pix_fmt) {
  2000. case PIX_FMT_RGBA32:
  2001. ret = get_alpha_info_rgba32(src, width, height);
  2002. break;
  2003. case PIX_FMT_RGB555:
  2004. ret = get_alpha_info_rgb555(src, width, height);
  2005. break;
  2006. case PIX_FMT_PAL8:
  2007. ret = get_alpha_info_pal8(src, width, height);
  2008. break;
  2009. default:
  2010. /* we do not know, so everything is indicated */
  2011. ret = FF_ALPHA_TRANSP | FF_ALPHA_SEMI_TRANSP;
  2012. break;
  2013. }
  2014. return ret;
  2015. }
  2016. #ifdef HAVE_MMX
  2017. #define DEINT_INPLACE_LINE_LUM \
  2018. movd_m2r(lum_m4[0],mm0);\
  2019. movd_m2r(lum_m3[0],mm1);\
  2020. movd_m2r(lum_m2[0],mm2);\
  2021. movd_m2r(lum_m1[0],mm3);\
  2022. movd_m2r(lum[0],mm4);\
  2023. punpcklbw_r2r(mm7,mm0);\
  2024. movd_r2m(mm2,lum_m4[0]);\
  2025. punpcklbw_r2r(mm7,mm1);\
  2026. punpcklbw_r2r(mm7,mm2);\
  2027. punpcklbw_r2r(mm7,mm3);\
  2028. punpcklbw_r2r(mm7,mm4);\
  2029. paddw_r2r(mm3,mm1);\
  2030. psllw_i2r(1,mm2);\
  2031. paddw_r2r(mm4,mm0);\
  2032. psllw_i2r(2,mm1);\
  2033. paddw_r2r(mm6,mm2);\
  2034. paddw_r2r(mm2,mm1);\
  2035. psubusw_r2r(mm0,mm1);\
  2036. psrlw_i2r(3,mm1);\
  2037. packuswb_r2r(mm7,mm1);\
  2038. movd_r2m(mm1,lum_m2[0]);
  2039. #define DEINT_LINE_LUM \
  2040. movd_m2r(lum_m4[0],mm0);\
  2041. movd_m2r(lum_m3[0],mm1);\
  2042. movd_m2r(lum_m2[0],mm2);\
  2043. movd_m2r(lum_m1[0],mm3);\
  2044. movd_m2r(lum[0],mm4);\
  2045. punpcklbw_r2r(mm7,mm0);\
  2046. punpcklbw_r2r(mm7,mm1);\
  2047. punpcklbw_r2r(mm7,mm2);\
  2048. punpcklbw_r2r(mm7,mm3);\
  2049. punpcklbw_r2r(mm7,mm4);\
  2050. paddw_r2r(mm3,mm1);\
  2051. psllw_i2r(1,mm2);\
  2052. paddw_r2r(mm4,mm0);\
  2053. psllw_i2r(2,mm1);\
  2054. paddw_r2r(mm6,mm2);\
  2055. paddw_r2r(mm2,mm1);\
  2056. psubusw_r2r(mm0,mm1);\
  2057. psrlw_i2r(3,mm1);\
  2058. packuswb_r2r(mm7,mm1);\
  2059. movd_r2m(mm1,dst[0]);
  2060. #endif
  2061. /* filter parameters: [-1 4 2 4 -1] // 8 */
  2062. static void deinterlace_line(uint8_t *dst,
  2063. const uint8_t *lum_m4, const uint8_t *lum_m3,
  2064. const uint8_t *lum_m2, const uint8_t *lum_m1,
  2065. const uint8_t *lum,
  2066. int size)
  2067. {
  2068. #ifndef HAVE_MMX
  2069. uint8_t *cm = cropTbl + MAX_NEG_CROP;
  2070. int sum;
  2071. for(;size > 0;size--) {
  2072. sum = -lum_m4[0];
  2073. sum += lum_m3[0] << 2;
  2074. sum += lum_m2[0] << 1;
  2075. sum += lum_m1[0] << 2;
  2076. sum += -lum[0];
  2077. dst[0] = cm[(sum + 4) >> 3];
  2078. lum_m4++;
  2079. lum_m3++;
  2080. lum_m2++;
  2081. lum_m1++;
  2082. lum++;
  2083. dst++;
  2084. }
  2085. #else
  2086. {
  2087. mmx_t rounder;
  2088. rounder.uw[0]=4;
  2089. rounder.uw[1]=4;
  2090. rounder.uw[2]=4;
  2091. rounder.uw[3]=4;
  2092. pxor_r2r(mm7,mm7);
  2093. movq_m2r(rounder,mm6);
  2094. }
  2095. for (;size > 3; size-=4) {
  2096. DEINT_LINE_LUM
  2097. lum_m4+=4;
  2098. lum_m3+=4;
  2099. lum_m2+=4;
  2100. lum_m1+=4;
  2101. lum+=4;
  2102. dst+=4;
  2103. }
  2104. #endif
  2105. }
  2106. static void deinterlace_line_inplace(uint8_t *lum_m4, uint8_t *lum_m3, uint8_t *lum_m2, uint8_t *lum_m1, uint8_t *lum,
  2107. int size)
  2108. {
  2109. #ifndef HAVE_MMX
  2110. uint8_t *cm = cropTbl + MAX_NEG_CROP;
  2111. int sum;
  2112. for(;size > 0;size--) {
  2113. sum = -lum_m4[0];
  2114. sum += lum_m3[0] << 2;
  2115. sum += lum_m2[0] << 1;
  2116. lum_m4[0]=lum_m2[0];
  2117. sum += lum_m1[0] << 2;
  2118. sum += -lum[0];
  2119. lum_m2[0] = cm[(sum + 4) >> 3];
  2120. lum_m4++;
  2121. lum_m3++;
  2122. lum_m2++;
  2123. lum_m1++;
  2124. lum++;
  2125. }
  2126. #else
  2127. {
  2128. mmx_t rounder;
  2129. rounder.uw[0]=4;
  2130. rounder.uw[1]=4;
  2131. rounder.uw[2]=4;
  2132. rounder.uw[3]=4;
  2133. pxor_r2r(mm7,mm7);
  2134. movq_m2r(rounder,mm6);
  2135. }
  2136. for (;size > 3; size-=4) {
  2137. DEINT_INPLACE_LINE_LUM
  2138. lum_m4+=4;
  2139. lum_m3+=4;
  2140. lum_m2+=4;
  2141. lum_m1+=4;
  2142. lum+=4;
  2143. }
  2144. #endif
  2145. }
  2146. /* deinterlacing : 2 temporal taps, 3 spatial taps linear filter. The
  2147. top field is copied as is, but the bottom field is deinterlaced
  2148. against the top field. */
  2149. static void deinterlace_bottom_field(uint8_t *dst, int dst_wrap,
  2150. const uint8_t *src1, int src_wrap,
  2151. int width, int height)
  2152. {
  2153. const uint8_t *src_m2, *src_m1, *src_0, *src_p1, *src_p2;
  2154. int y;
  2155. src_m2 = src1;
  2156. src_m1 = src1;
  2157. src_0=&src_m1[src_wrap];
  2158. src_p1=&src_0[src_wrap];
  2159. src_p2=&src_p1[src_wrap];
  2160. for(y=0;y<(height-2);y+=2) {
  2161. memcpy(dst,src_m1,width);
  2162. dst += dst_wrap;
  2163. deinterlace_line(dst,src_m2,src_m1,src_0,src_p1,src_p2,width);
  2164. src_m2 = src_0;
  2165. src_m1 = src_p1;
  2166. src_0 = src_p2;
  2167. src_p1 += 2*src_wrap;
  2168. src_p2 += 2*src_wrap;
  2169. dst += dst_wrap;
  2170. }
  2171. memcpy(dst,src_m1,width);
  2172. dst += dst_wrap;
  2173. /* do last line */
  2174. deinterlace_line(dst,src_m2,src_m1,src_0,src_0,src_0,width);
  2175. }
  2176. static void deinterlace_bottom_field_inplace(uint8_t *src1, int src_wrap,
  2177. int width, int height)
  2178. {
  2179. uint8_t *src_m1, *src_0, *src_p1, *src_p2;
  2180. int y;
  2181. uint8_t *buf;
  2182. buf = (uint8_t*)av_malloc(width);
  2183. src_m1 = src1;
  2184. memcpy(buf,src_m1,width);
  2185. src_0=&src_m1[src_wrap];
  2186. src_p1=&src_0[src_wrap];
  2187. src_p2=&src_p1[src_wrap];
  2188. for(y=0;y<(height-2);y+=2) {
  2189. deinterlace_line_inplace(buf,src_m1,src_0,src_p1,src_p2,width);
  2190. src_m1 = src_p1;
  2191. src_0 = src_p2;
  2192. src_p1 += 2*src_wrap;
  2193. src_p2 += 2*src_wrap;
  2194. }
  2195. /* do last line */
  2196. deinterlace_line_inplace(buf,src_m1,src_0,src_0,src_0,width);
  2197. av_free(buf);
  2198. }
  2199. /* deinterlace - if not supported return -1 */
  2200. int avpicture_deinterlace(AVPicture *dst, const AVPicture *src,
  2201. int pix_fmt, int width, int height)
  2202. {
  2203. int i;
  2204. if (pix_fmt != PIX_FMT_YUV420P &&
  2205. pix_fmt != PIX_FMT_YUV422P &&
  2206. pix_fmt != PIX_FMT_YUV444P &&
  2207. pix_fmt != PIX_FMT_YUV411P)
  2208. return -1;
  2209. if ((width & 3) != 0 || (height & 3) != 0)
  2210. return -1;
  2211. for(i=0;i<3;i++) {
  2212. if (i == 1) {
  2213. switch(pix_fmt) {
  2214. case PIX_FMT_YUV420P:
  2215. width >>= 1;
  2216. height >>= 1;
  2217. break;
  2218. case PIX_FMT_YUV422P:
  2219. width >>= 1;
  2220. break;
  2221. case PIX_FMT_YUV411P:
  2222. width >>= 2;
  2223. break;
  2224. default:
  2225. break;
  2226. }
  2227. }
  2228. if (src == dst) {
  2229. deinterlace_bottom_field_inplace(dst->data[i], dst->linesize[i],
  2230. width, height);
  2231. } else {
  2232. deinterlace_bottom_field(dst->data[i],dst->linesize[i],
  2233. src->data[i], src->linesize[i],
  2234. width, height);
  2235. }
  2236. }
  2237. #ifdef HAVE_MMX
  2238. emms();
  2239. #endif
  2240. return 0;
  2241. }
  2242. #undef FIX