You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

2171 lines
58KB

  1. /*
  2. * Misc image convertion routines
  3. * Copyright (c) 2001, 2002, 2003 Fabrice Bellard.
  4. *
  5. * This library is free software; you can redistribute it and/or
  6. * modify it under the terms of the GNU Lesser General Public
  7. * License as published by the Free Software Foundation; either
  8. * version 2 of the License, or (at your option) any later version.
  9. *
  10. * This library is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  13. * Lesser General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU Lesser General Public
  16. * License along with this library; if not, write to the Free Software
  17. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  18. */
  19. /**
  20. * @file imgconvert.c
  21. * Misc image convertion routines.
  22. */
  23. /* TODO:
  24. * - write 'ffimg' program to test all the image related stuff
  25. * - move all api to slice based system
  26. * - integrate deinterlacing, postprocessing and scaling in the conversion process
  27. */
  28. #include "avcodec.h"
  29. #include "dsputil.h"
  30. #ifdef USE_FASTMEMCPY
  31. #include "fastmemcpy.h"
  32. #endif
  33. #ifdef HAVE_MMX
  34. #include "i386/mmx.h"
  35. #endif
  36. #define xglue(x, y) x ## y
  37. #define glue(x, y) xglue(x, y)
  38. #define FF_COLOR_RGB 0 /* RGB color space */
  39. #define FF_COLOR_GRAY 1 /* gray color space */
  40. #define FF_COLOR_YUV 2 /* YUV color space. 16 <= Y <= 235, 16 <= U, V <= 240 */
  41. #define FF_COLOR_YUV_JPEG 3 /* YUV color space. 0 <= Y <= 255, 0 <= U, V <= 255 */
  42. #define FF_PIXEL_PLANAR 0 /* each channel has one component in AVPicture */
  43. #define FF_PIXEL_PACKED 1 /* only one components containing all the channels */
  44. #define FF_PIXEL_PALETTE 2 /* one components containing indexes for a palette */
  45. typedef struct PixFmtInfo {
  46. const char *name;
  47. uint8_t nb_channels; /* number of channels (including alpha) */
  48. uint8_t color_type; /* color type (see FF_COLOR_xxx constants) */
  49. uint8_t pixel_type; /* pixel storage type (see FF_PIXEL_xxx constants) */
  50. uint8_t is_alpha : 1; /* true if alpha can be specified */
  51. uint8_t x_chroma_shift; /* X chroma subsampling factor is 2 ^ shift */
  52. uint8_t y_chroma_shift; /* Y chroma subsampling factor is 2 ^ shift */
  53. uint8_t depth; /* bit depth of the color components */
  54. } PixFmtInfo;
  55. /* this table gives more information about formats */
  56. static PixFmtInfo pix_fmt_info[PIX_FMT_NB] = {
  57. /* YUV formats */
  58. [PIX_FMT_YUV420P] = {
  59. .name = "yuv420p",
  60. .nb_channels = 3,
  61. .color_type = FF_COLOR_YUV,
  62. .pixel_type = FF_PIXEL_PLANAR,
  63. .depth = 8,
  64. .x_chroma_shift = 1, .y_chroma_shift = 1,
  65. },
  66. [PIX_FMT_YUV422P] = {
  67. .name = "yuv422p",
  68. .nb_channels = 3,
  69. .color_type = FF_COLOR_YUV,
  70. .pixel_type = FF_PIXEL_PLANAR,
  71. .depth = 8,
  72. .x_chroma_shift = 1, .y_chroma_shift = 0,
  73. },
  74. [PIX_FMT_YUV444P] = {
  75. .name = "yuv444p",
  76. .nb_channels = 3,
  77. .color_type = FF_COLOR_YUV,
  78. .pixel_type = FF_PIXEL_PLANAR,
  79. .depth = 8,
  80. .x_chroma_shift = 0, .y_chroma_shift = 0,
  81. },
  82. [PIX_FMT_YUV422] = {
  83. .name = "yuv422",
  84. .nb_channels = 1,
  85. .color_type = FF_COLOR_YUV,
  86. .pixel_type = FF_PIXEL_PACKED,
  87. .depth = 8,
  88. .x_chroma_shift = 1, .y_chroma_shift = 0,
  89. },
  90. [PIX_FMT_YUV410P] = {
  91. .name = "yuv410p",
  92. .nb_channels = 3,
  93. .color_type = FF_COLOR_YUV,
  94. .pixel_type = FF_PIXEL_PLANAR,
  95. .depth = 8,
  96. .x_chroma_shift = 2, .y_chroma_shift = 2,
  97. },
  98. [PIX_FMT_YUV411P] = {
  99. .name = "yuv411p",
  100. .nb_channels = 3,
  101. .color_type = FF_COLOR_YUV,
  102. .pixel_type = FF_PIXEL_PLANAR,
  103. .depth = 8,
  104. .x_chroma_shift = 2, .y_chroma_shift = 0,
  105. },
  106. /* JPEG YUV */
  107. [PIX_FMT_YUVJ420P] = {
  108. .name = "yuvj420p",
  109. .nb_channels = 3,
  110. .color_type = FF_COLOR_YUV_JPEG,
  111. .pixel_type = FF_PIXEL_PLANAR,
  112. .depth = 8,
  113. .x_chroma_shift = 1, .y_chroma_shift = 1,
  114. },
  115. [PIX_FMT_YUVJ422P] = {
  116. .name = "yuvj422p",
  117. .nb_channels = 3,
  118. .color_type = FF_COLOR_YUV_JPEG,
  119. .pixel_type = FF_PIXEL_PLANAR,
  120. .depth = 8,
  121. .x_chroma_shift = 1, .y_chroma_shift = 0,
  122. },
  123. [PIX_FMT_YUVJ444P] = {
  124. .name = "yuvj444p",
  125. .nb_channels = 3,
  126. .color_type = FF_COLOR_YUV_JPEG,
  127. .pixel_type = FF_PIXEL_PLANAR,
  128. .depth = 8,
  129. .x_chroma_shift = 0, .y_chroma_shift = 0,
  130. },
  131. /* RGB formats */
  132. [PIX_FMT_RGB24] = {
  133. .name = "rgb24",
  134. .nb_channels = 3,
  135. .color_type = FF_COLOR_RGB,
  136. .pixel_type = FF_PIXEL_PACKED,
  137. .depth = 8,
  138. },
  139. [PIX_FMT_BGR24] = {
  140. .name = "bgr24",
  141. .nb_channels = 3,
  142. .color_type = FF_COLOR_RGB,
  143. .pixel_type = FF_PIXEL_PACKED,
  144. .depth = 8,
  145. },
  146. [PIX_FMT_RGBA32] = {
  147. .name = "rgba32",
  148. .nb_channels = 4, .is_alpha = 1,
  149. .color_type = FF_COLOR_RGB,
  150. .pixel_type = FF_PIXEL_PACKED,
  151. .depth = 8,
  152. },
  153. [PIX_FMT_RGB565] = {
  154. .name = "rgb565",
  155. .nb_channels = 3,
  156. .color_type = FF_COLOR_RGB,
  157. .pixel_type = FF_PIXEL_PACKED,
  158. .depth = 5,
  159. },
  160. [PIX_FMT_RGB555] = {
  161. .name = "rgb555",
  162. .nb_channels = 4, .is_alpha = 1,
  163. .color_type = FF_COLOR_RGB,
  164. .pixel_type = FF_PIXEL_PACKED,
  165. .depth = 5,
  166. },
  167. /* gray / mono formats */
  168. [PIX_FMT_GRAY8] = {
  169. .name = "gray",
  170. .nb_channels = 1,
  171. .color_type = FF_COLOR_GRAY,
  172. .pixel_type = FF_PIXEL_PLANAR,
  173. .depth = 8,
  174. },
  175. [PIX_FMT_MONOWHITE] = {
  176. .name = "monow",
  177. .nb_channels = 1,
  178. .color_type = FF_COLOR_GRAY,
  179. .pixel_type = FF_PIXEL_PLANAR,
  180. .depth = 1,
  181. },
  182. [PIX_FMT_MONOBLACK] = {
  183. .name = "monob",
  184. .nb_channels = 1,
  185. .color_type = FF_COLOR_GRAY,
  186. .pixel_type = FF_PIXEL_PLANAR,
  187. .depth = 1,
  188. },
  189. /* paletted formats */
  190. [PIX_FMT_PAL8] = {
  191. .name = "pal8",
  192. .nb_channels = 4, .is_alpha = 1,
  193. .color_type = FF_COLOR_RGB,
  194. .pixel_type = FF_PIXEL_PALETTE,
  195. .depth = 8,
  196. },
  197. };
  198. void avcodec_get_chroma_sub_sample(int pix_fmt, int *h_shift, int *v_shift)
  199. {
  200. *h_shift = pix_fmt_info[pix_fmt].x_chroma_shift;
  201. *v_shift = pix_fmt_info[pix_fmt].y_chroma_shift;
  202. }
  203. const char *avcodec_get_pix_fmt_name(int pix_fmt)
  204. {
  205. if (pix_fmt < 0 || pix_fmt >= PIX_FMT_NB)
  206. return "???";
  207. else
  208. return pix_fmt_info[pix_fmt].name;
  209. }
  210. enum PixelFormat avcodec_get_pix_fmt(const char* name)
  211. {
  212. int i;
  213. for (i=0; i < PIX_FMT_NB; i++)
  214. if (!strcmp(pix_fmt_info[i].name, name))
  215. break;
  216. return i;
  217. }
  218. /* Picture field are filled with 'ptr' addresses. Also return size */
  219. int avpicture_fill(AVPicture *picture, uint8_t *ptr,
  220. int pix_fmt, int width, int height)
  221. {
  222. int size, w2, h2, size2;
  223. PixFmtInfo *pinfo;
  224. pinfo = &pix_fmt_info[pix_fmt];
  225. size = width * height;
  226. switch(pix_fmt) {
  227. case PIX_FMT_YUV420P:
  228. case PIX_FMT_YUV422P:
  229. case PIX_FMT_YUV444P:
  230. case PIX_FMT_YUV410P:
  231. case PIX_FMT_YUV411P:
  232. case PIX_FMT_YUVJ420P:
  233. case PIX_FMT_YUVJ422P:
  234. case PIX_FMT_YUVJ444P:
  235. w2 = (width + (1 << pinfo->x_chroma_shift) - 1) >> pinfo->x_chroma_shift;
  236. h2 = (height + (1 << pinfo->y_chroma_shift) - 1) >> pinfo->y_chroma_shift;
  237. size2 = w2 * h2;
  238. picture->data[0] = ptr;
  239. picture->data[1] = picture->data[0] + size;
  240. picture->data[2] = picture->data[1] + size2;
  241. picture->linesize[0] = width;
  242. picture->linesize[1] = w2;
  243. picture->linesize[2] = w2;
  244. return size + 2 * size2;
  245. case PIX_FMT_RGB24:
  246. case PIX_FMT_BGR24:
  247. picture->data[0] = ptr;
  248. picture->data[1] = NULL;
  249. picture->data[2] = NULL;
  250. picture->linesize[0] = width * 3;
  251. return size * 3;
  252. case PIX_FMT_RGBA32:
  253. picture->data[0] = ptr;
  254. picture->data[1] = NULL;
  255. picture->data[2] = NULL;
  256. picture->linesize[0] = width * 4;
  257. return size * 4;
  258. case PIX_FMT_RGB555:
  259. case PIX_FMT_RGB565:
  260. case PIX_FMT_YUV422:
  261. picture->data[0] = ptr;
  262. picture->data[1] = NULL;
  263. picture->data[2] = NULL;
  264. picture->linesize[0] = width * 2;
  265. return size * 2;
  266. case PIX_FMT_GRAY8:
  267. picture->data[0] = ptr;
  268. picture->data[1] = NULL;
  269. picture->data[2] = NULL;
  270. picture->linesize[0] = width;
  271. return size;
  272. case PIX_FMT_MONOWHITE:
  273. case PIX_FMT_MONOBLACK:
  274. picture->data[0] = ptr;
  275. picture->data[1] = NULL;
  276. picture->data[2] = NULL;
  277. picture->linesize[0] = (width + 7) >> 3;
  278. return picture->linesize[0] * height;
  279. case PIX_FMT_PAL8:
  280. size2 = (size + 3) & ~3;
  281. picture->data[0] = ptr;
  282. picture->data[1] = ptr + size2; /* palette is stored here as 256 32 bit words */
  283. picture->data[2] = NULL;
  284. picture->linesize[0] = width;
  285. picture->linesize[1] = 4;
  286. return size2 + 256 * 4;
  287. default:
  288. picture->data[0] = NULL;
  289. picture->data[1] = NULL;
  290. picture->data[2] = NULL;
  291. picture->data[3] = NULL;
  292. return -1;
  293. }
  294. }
  295. int avpicture_layout(AVPicture* src, int pix_fmt, int width, int height,
  296. unsigned char *dest, int dest_size)
  297. {
  298. PixFmtInfo* pf = &pix_fmt_info[pix_fmt];
  299. int i, j, w, h, data_planes;
  300. unsigned char* s;
  301. int size = avpicture_get_size(pix_fmt, width, height);
  302. if (size > dest_size)
  303. return -1;
  304. if (pf->pixel_type == FF_PIXEL_PACKED || pf->pixel_type == FF_PIXEL_PALETTE) {
  305. if (pix_fmt == PIX_FMT_YUV422 || pix_fmt == PIX_FMT_RGB565 ||
  306. pix_fmt == PIX_FMT_RGB555)
  307. w = width * 2;
  308. else if (pix_fmt == PIX_FMT_PAL8)
  309. w = width;
  310. else
  311. w = width * (pf->depth * pf->nb_channels / 8);
  312. data_planes = 1;
  313. h = height;
  314. } else {
  315. data_planes = pf->nb_channels;
  316. w = width;
  317. h = height;
  318. }
  319. for (i=0; i<data_planes; i++) {
  320. if (i == 1) {
  321. w = width >> pf->x_chroma_shift;
  322. h = height >> pf->y_chroma_shift;
  323. }
  324. s = src->data[i];
  325. for(j=0; j<h; j++) {
  326. memcpy(dest, s, w);
  327. dest += w;
  328. s += src->linesize[i];
  329. }
  330. }
  331. if (pf->pixel_type == FF_PIXEL_PALETTE)
  332. memcpy((unsigned char *)(((size_t)dest + 3) & ~3), src->data[1], 256 * 4);
  333. return size;
  334. }
  335. int avpicture_get_size(int pix_fmt, int width, int height)
  336. {
  337. AVPicture dummy_pict;
  338. return avpicture_fill(&dummy_pict, NULL, pix_fmt, width, height);
  339. }
  340. /**
  341. * compute the loss when converting from a pixel format to another
  342. */
  343. int avcodec_get_pix_fmt_loss(int dst_pix_fmt, int src_pix_fmt,
  344. int has_alpha)
  345. {
  346. const PixFmtInfo *pf, *ps;
  347. int loss;
  348. ps = &pix_fmt_info[src_pix_fmt];
  349. pf = &pix_fmt_info[dst_pix_fmt];
  350. /* compute loss */
  351. loss = 0;
  352. pf = &pix_fmt_info[dst_pix_fmt];
  353. if (pf->depth < ps->depth ||
  354. (dst_pix_fmt == PIX_FMT_RGB555 && src_pix_fmt == PIX_FMT_RGB565))
  355. loss |= FF_LOSS_DEPTH;
  356. if (pf->x_chroma_shift > ps->x_chroma_shift ||
  357. pf->y_chroma_shift > ps->y_chroma_shift)
  358. loss |= FF_LOSS_RESOLUTION;
  359. switch(pf->color_type) {
  360. case FF_COLOR_RGB:
  361. if (ps->color_type != FF_COLOR_RGB &&
  362. ps->color_type != FF_COLOR_GRAY)
  363. loss |= FF_LOSS_COLORSPACE;
  364. break;
  365. case FF_COLOR_GRAY:
  366. if (ps->color_type != FF_COLOR_GRAY)
  367. loss |= FF_LOSS_COLORSPACE;
  368. break;
  369. case FF_COLOR_YUV:
  370. if (ps->color_type != FF_COLOR_YUV)
  371. loss |= FF_LOSS_COLORSPACE;
  372. break;
  373. case FF_COLOR_YUV_JPEG:
  374. if (ps->color_type != FF_COLOR_YUV_JPEG &&
  375. ps->color_type != FF_COLOR_YUV &&
  376. ps->color_type != FF_COLOR_GRAY)
  377. loss |= FF_LOSS_COLORSPACE;
  378. break;
  379. default:
  380. /* fail safe test */
  381. if (ps->color_type != pf->color_type)
  382. loss |= FF_LOSS_COLORSPACE;
  383. break;
  384. }
  385. if (pf->color_type == FF_COLOR_GRAY &&
  386. ps->color_type != FF_COLOR_GRAY)
  387. loss |= FF_LOSS_CHROMA;
  388. if (!pf->is_alpha && (ps->is_alpha && has_alpha))
  389. loss |= FF_LOSS_ALPHA;
  390. if (pf->pixel_type == FF_PIXEL_PALETTE &&
  391. (ps->pixel_type != FF_PIXEL_PALETTE && ps->color_type != FF_COLOR_GRAY))
  392. loss |= FF_LOSS_COLORQUANT;
  393. return loss;
  394. }
  395. static int avg_bits_per_pixel(int pix_fmt)
  396. {
  397. int bits;
  398. const PixFmtInfo *pf;
  399. pf = &pix_fmt_info[pix_fmt];
  400. switch(pf->pixel_type) {
  401. case FF_PIXEL_PACKED:
  402. switch(pix_fmt) {
  403. case PIX_FMT_YUV422:
  404. case PIX_FMT_RGB565:
  405. case PIX_FMT_RGB555:
  406. bits = 16;
  407. break;
  408. default:
  409. bits = pf->depth * pf->nb_channels;
  410. break;
  411. }
  412. break;
  413. case FF_PIXEL_PLANAR:
  414. if (pf->x_chroma_shift == 0 && pf->y_chroma_shift == 0) {
  415. bits = pf->depth * pf->nb_channels;
  416. } else {
  417. bits = pf->depth + ((2 * pf->depth) >>
  418. (pf->x_chroma_shift + pf->y_chroma_shift));
  419. }
  420. break;
  421. case FF_PIXEL_PALETTE:
  422. bits = 8;
  423. break;
  424. default:
  425. bits = -1;
  426. break;
  427. }
  428. return bits;
  429. }
  430. static int avcodec_find_best_pix_fmt1(int pix_fmt_mask,
  431. int src_pix_fmt,
  432. int has_alpha,
  433. int loss_mask)
  434. {
  435. int dist, i, loss, min_dist, dst_pix_fmt;
  436. /* find exact color match with smallest size */
  437. dst_pix_fmt = -1;
  438. min_dist = 0x7fffffff;
  439. for(i = 0;i < PIX_FMT_NB; i++) {
  440. if (pix_fmt_mask & (1 << i)) {
  441. loss = avcodec_get_pix_fmt_loss(i, src_pix_fmt, has_alpha) & loss_mask;
  442. if (loss == 0) {
  443. dist = avg_bits_per_pixel(i);
  444. if (dist < min_dist) {
  445. min_dist = dist;
  446. dst_pix_fmt = i;
  447. }
  448. }
  449. }
  450. }
  451. return dst_pix_fmt;
  452. }
  453. /**
  454. * find best pixel format to convert to. Return -1 if none found
  455. */
  456. int avcodec_find_best_pix_fmt(int pix_fmt_mask, int src_pix_fmt,
  457. int has_alpha, int *loss_ptr)
  458. {
  459. int dst_pix_fmt, loss_mask, i;
  460. static const int loss_mask_order[] = {
  461. ~0, /* no loss first */
  462. ~FF_LOSS_ALPHA,
  463. ~FF_LOSS_RESOLUTION,
  464. ~(FF_LOSS_COLORSPACE | FF_LOSS_RESOLUTION),
  465. ~FF_LOSS_COLORQUANT,
  466. ~FF_LOSS_DEPTH,
  467. 0,
  468. };
  469. /* try with successive loss */
  470. i = 0;
  471. for(;;) {
  472. loss_mask = loss_mask_order[i++];
  473. dst_pix_fmt = avcodec_find_best_pix_fmt1(pix_fmt_mask, src_pix_fmt,
  474. has_alpha, loss_mask);
  475. if (dst_pix_fmt >= 0)
  476. goto found;
  477. if (loss_mask == 0)
  478. break;
  479. }
  480. return -1;
  481. found:
  482. if (loss_ptr)
  483. *loss_ptr = avcodec_get_pix_fmt_loss(dst_pix_fmt, src_pix_fmt, has_alpha);
  484. return dst_pix_fmt;
  485. }
  486. static void img_copy_plane(uint8_t *dst, int dst_wrap,
  487. const uint8_t *src, int src_wrap,
  488. int width, int height)
  489. {
  490. for(;height > 0; height--) {
  491. memcpy(dst, src, width);
  492. dst += dst_wrap;
  493. src += src_wrap;
  494. }
  495. }
  496. /**
  497. * Copy image 'src' to 'dst'.
  498. */
  499. void img_copy(AVPicture *dst, AVPicture *src,
  500. int pix_fmt, int width, int height)
  501. {
  502. int bwidth, bits, i;
  503. PixFmtInfo *pf = &pix_fmt_info[pix_fmt];
  504. pf = &pix_fmt_info[pix_fmt];
  505. switch(pf->pixel_type) {
  506. case FF_PIXEL_PACKED:
  507. switch(pix_fmt) {
  508. case PIX_FMT_YUV422:
  509. case PIX_FMT_RGB565:
  510. case PIX_FMT_RGB555:
  511. bits = 16;
  512. break;
  513. default:
  514. bits = pf->depth * pf->nb_channels;
  515. break;
  516. }
  517. bwidth = (width * bits + 7) >> 3;
  518. img_copy_plane(dst->data[0], dst->linesize[0],
  519. src->data[0], src->linesize[0],
  520. bwidth, height);
  521. break;
  522. case FF_PIXEL_PLANAR:
  523. for(i = 0; i < pf->nb_channels; i++) {
  524. int w, h;
  525. w = width;
  526. h = height;
  527. if (i == 1 || i == 2) {
  528. w >>= pf->x_chroma_shift;
  529. h >>= pf->y_chroma_shift;
  530. }
  531. bwidth = (w * pf->depth + 7) >> 3;
  532. img_copy_plane(dst->data[i], dst->linesize[i],
  533. src->data[i], src->linesize[i],
  534. bwidth, h);
  535. }
  536. break;
  537. case FF_PIXEL_PALETTE:
  538. img_copy_plane(dst->data[0], dst->linesize[0],
  539. src->data[0], src->linesize[0],
  540. width, height);
  541. /* copy the palette */
  542. img_copy_plane(dst->data[1], dst->linesize[1],
  543. src->data[1], src->linesize[1],
  544. 4, 256);
  545. break;
  546. }
  547. }
  548. /* XXX: totally non optimized */
  549. static void yuv422_to_yuv420p(AVPicture *dst, AVPicture *src,
  550. int width, int height)
  551. {
  552. const uint8_t *p, *p1;
  553. uint8_t *lum, *cr, *cb, *lum1, *cr1, *cb1;
  554. int w;
  555. p1 = src->data[0];
  556. lum1 = dst->data[0];
  557. cb1 = dst->data[1];
  558. cr1 = dst->data[2];
  559. for(;height >= 1; height -= 2) {
  560. p = p1;
  561. lum = lum1;
  562. cb = cb1;
  563. cr = cr1;
  564. for(w = width; w >= 2; w -= 2) {
  565. lum[0] = p[0];
  566. cb[0] = p[1];
  567. lum[1] = p[2];
  568. cr[0] = p[3];
  569. p += 4;
  570. lum += 2;
  571. cb++;
  572. cr++;
  573. }
  574. if (w) {
  575. lum[0] = p[0];
  576. cb[0] = p[1];
  577. cr[0] = p[3];
  578. cb++;
  579. cr++;
  580. }
  581. p1 += src->linesize[0];
  582. lum1 += dst->linesize[0];
  583. if (height>1) {
  584. p = p1;
  585. lum = lum1;
  586. for(w = width; w >= 2; w -= 2) {
  587. lum[0] = p[0];
  588. lum[1] = p[2];
  589. p += 4;
  590. lum += 2;
  591. }
  592. if (w) {
  593. lum[0] = p[0];
  594. }
  595. p1 += src->linesize[0];
  596. lum1 += dst->linesize[0];
  597. }
  598. cb1 += dst->linesize[1];
  599. cr1 += dst->linesize[2];
  600. }
  601. }
  602. static void yuv422_to_yuv422p(AVPicture *dst, AVPicture *src,
  603. int width, int height)
  604. {
  605. const uint8_t *p, *p1;
  606. uint8_t *lum, *cr, *cb, *lum1, *cr1, *cb1;
  607. int w;
  608. p1 = src->data[0];
  609. lum1 = dst->data[0];
  610. cb1 = dst->data[1];
  611. cr1 = dst->data[2];
  612. for(;height > 0; height--) {
  613. p = p1;
  614. lum = lum1;
  615. cb = cb1;
  616. cr = cr1;
  617. for(w = width; w >= 2; w -= 2) {
  618. lum[0] = p[0];
  619. cb[0] = p[1];
  620. lum[1] = p[2];
  621. cr[0] = p[3];
  622. p += 4;
  623. lum += 2;
  624. cb++;
  625. cr++;
  626. }
  627. p1 += src->linesize[0];
  628. lum1 += dst->linesize[0];
  629. cb1 += dst->linesize[1];
  630. cr1 += dst->linesize[2];
  631. }
  632. }
  633. static void yuv422p_to_yuv422(AVPicture *dst, AVPicture *src,
  634. int width, int height)
  635. {
  636. uint8_t *p, *p1;
  637. const uint8_t *lum, *cr, *cb, *lum1, *cr1, *cb1;
  638. int w;
  639. p1 = dst->data[0];
  640. lum1 = src->data[0];
  641. cb1 = src->data[1];
  642. cr1 = src->data[2];
  643. for(;height > 0; height--) {
  644. p = p1;
  645. lum = lum1;
  646. cb = cb1;
  647. cr = cr1;
  648. for(w = width; w >= 2; w -= 2) {
  649. p[0] = lum[0];
  650. p[1] = cb[0];
  651. p[2] = lum[1];
  652. p[3] = cr[0];
  653. p += 4;
  654. lum += 2;
  655. cb++;
  656. cr++;
  657. }
  658. p1 += dst->linesize[0];
  659. lum1 += src->linesize[0];
  660. cb1 += src->linesize[1];
  661. cr1 += src->linesize[2];
  662. }
  663. }
  664. #define SCALEBITS 10
  665. #define ONE_HALF (1 << (SCALEBITS - 1))
  666. #define FIX(x) ((int) ((x) * (1<<SCALEBITS) + 0.5))
  667. #define YUV_TO_RGB1_CCIR(cb1, cr1)\
  668. {\
  669. cb = (cb1) - 128;\
  670. cr = (cr1) - 128;\
  671. r_add = FIX(1.40200*255.0/224.0) * cr + ONE_HALF;\
  672. g_add = - FIX(0.34414*255.0/224.0) * cb - FIX(0.71414*255.0/224.0) * cr + \
  673. ONE_HALF;\
  674. b_add = FIX(1.77200*255.0/224.0) * cb + ONE_HALF;\
  675. }
  676. #define YUV_TO_RGB2_CCIR(r, g, b, y1)\
  677. {\
  678. y = ((y1) - 16) * FIX(255.0/219.0);\
  679. r = cm[(y + r_add) >> SCALEBITS];\
  680. g = cm[(y + g_add) >> SCALEBITS];\
  681. b = cm[(y + b_add) >> SCALEBITS];\
  682. }
  683. #define YUV_TO_RGB1(cb1, cr1)\
  684. {\
  685. cb = (cb1) - 128;\
  686. cr = (cr1) - 128;\
  687. r_add = FIX(1.40200) * cr + ONE_HALF;\
  688. g_add = - FIX(0.34414) * cb - FIX(0.71414) * cr + ONE_HALF;\
  689. b_add = FIX(1.77200) * cb + ONE_HALF;\
  690. }
  691. #define YUV_TO_RGB2(r, g, b, y1)\
  692. {\
  693. y = (y1) << SCALEBITS;\
  694. r = cm[(y + r_add) >> SCALEBITS];\
  695. g = cm[(y + g_add) >> SCALEBITS];\
  696. b = cm[(y + b_add) >> SCALEBITS];\
  697. }
  698. #define Y_CCIR_TO_JPEG(y)\
  699. cm[((y) * FIX(255.0/219.0) + (ONE_HALF - 16 * FIX(255.0/219.0))) >> SCALEBITS]
  700. #define Y_JPEG_TO_CCIR(y)\
  701. (((y) * FIX(219.0/255.0) + (ONE_HALF + (16 << SCALEBITS))) >> SCALEBITS)
  702. #define C_CCIR_TO_JPEG(y)\
  703. cm[(((y) - 128) * FIX(127.0/112.0) + (ONE_HALF + (128 << SCALEBITS))) >> SCALEBITS]
  704. /* NOTE: the clamp is really necessary! */
  705. static inline int C_JPEG_TO_CCIR(int y) {
  706. y = (((y - 128) * FIX(112.0/127.0) + (ONE_HALF + (128 << SCALEBITS))) >> SCALEBITS);
  707. if (y < 16)
  708. y = 16;
  709. return y;
  710. }
  711. #define RGB_TO_Y(r, g, b) \
  712. ((FIX(0.29900) * (r) + FIX(0.58700) * (g) + \
  713. FIX(0.11400) * (b) + ONE_HALF) >> SCALEBITS)
  714. #define RGB_TO_U(r1, g1, b1, shift)\
  715. (((- FIX(0.16874) * r1 - FIX(0.33126) * g1 + \
  716. FIX(0.50000) * b1 + (ONE_HALF << shift) - 1) >> (SCALEBITS + shift)) + 128)
  717. #define RGB_TO_V(r1, g1, b1, shift)\
  718. (((FIX(0.50000) * r1 - FIX(0.41869) * g1 - \
  719. FIX(0.08131) * b1 + (ONE_HALF << shift) - 1) >> (SCALEBITS + shift)) + 128)
  720. #define RGB_TO_Y_CCIR(r, g, b) \
  721. ((FIX(0.29900*219.0/255.0) * (r) + FIX(0.58700*219.0/255.0) * (g) + \
  722. FIX(0.11400*219.0/255.0) * (b) + (ONE_HALF + (16 << SCALEBITS))) >> SCALEBITS)
  723. #define RGB_TO_U_CCIR(r1, g1, b1, shift)\
  724. (((- FIX(0.16874*224.0/255.0) * r1 - FIX(0.33126*224.0/255.0) * g1 + \
  725. FIX(0.50000*224.0/255.0) * b1 + (ONE_HALF << shift) - 1) >> (SCALEBITS + shift)) + 128)
  726. #define RGB_TO_V_CCIR(r1, g1, b1, shift)\
  727. (((FIX(0.50000*224.0/255.0) * r1 - FIX(0.41869*224.0/255.0) * g1 - \
  728. FIX(0.08131*224.0/255.0) * b1 + (ONE_HALF << shift) - 1) >> (SCALEBITS + shift)) + 128)
  729. static uint8_t y_ccir_to_jpeg[256];
  730. static uint8_t y_jpeg_to_ccir[256];
  731. static uint8_t c_ccir_to_jpeg[256];
  732. static uint8_t c_jpeg_to_ccir[256];
  733. /* init various conversion tables */
  734. static void img_convert_init(void)
  735. {
  736. int i;
  737. uint8_t *cm = cropTbl + MAX_NEG_CROP;
  738. for(i = 0;i < 256; i++) {
  739. y_ccir_to_jpeg[i] = Y_CCIR_TO_JPEG(i);
  740. y_jpeg_to_ccir[i] = Y_JPEG_TO_CCIR(i);
  741. c_ccir_to_jpeg[i] = C_CCIR_TO_JPEG(i);
  742. c_jpeg_to_ccir[i] = C_JPEG_TO_CCIR(i);
  743. }
  744. }
  745. /* apply to each pixel the given table */
  746. static void img_apply_table(uint8_t *dst, int dst_wrap,
  747. const uint8_t *src, int src_wrap,
  748. int width, int height, const uint8_t *table1)
  749. {
  750. int n;
  751. const uint8_t *s;
  752. uint8_t *d;
  753. const uint8_t *table;
  754. table = table1;
  755. for(;height > 0; height--) {
  756. s = src;
  757. d = dst;
  758. n = width;
  759. while (n >= 4) {
  760. d[0] = table[s[0]];
  761. d[1] = table[s[1]];
  762. d[2] = table[s[2]];
  763. d[3] = table[s[3]];
  764. d += 4;
  765. s += 4;
  766. n -= 4;
  767. }
  768. while (n > 0) {
  769. d[0] = table[s[0]];
  770. d++;
  771. s++;
  772. n--;
  773. }
  774. dst += dst_wrap;
  775. src += src_wrap;
  776. }
  777. }
  778. /* XXX: use generic filter ? */
  779. /* XXX: in most cases, the sampling position is incorrect */
  780. /* 4x1 -> 1x1 */
  781. static void shrink41(uint8_t *dst, int dst_wrap,
  782. const uint8_t *src, int src_wrap,
  783. int width, int height)
  784. {
  785. int w;
  786. const uint8_t *s;
  787. uint8_t *d;
  788. for(;height > 0; height--) {
  789. s = src;
  790. d = dst;
  791. for(w = width;w > 0; w--) {
  792. d[0] = (s[0] + s[1] + s[2] + s[3] + 2) >> 2;
  793. s += 4;
  794. d++;
  795. }
  796. src += src_wrap;
  797. dst += dst_wrap;
  798. }
  799. }
  800. /* 2x1 -> 1x1 */
  801. static void shrink21(uint8_t *dst, int dst_wrap,
  802. const uint8_t *src, int src_wrap,
  803. int width, int height)
  804. {
  805. int w;
  806. const uint8_t *s;
  807. uint8_t *d;
  808. for(;height > 0; height--) {
  809. s = src;
  810. d = dst;
  811. for(w = width;w > 0; w--) {
  812. d[0] = (s[0] + s[1]) >> 1;
  813. s += 2;
  814. d++;
  815. }
  816. src += src_wrap;
  817. dst += dst_wrap;
  818. }
  819. }
  820. /* 1x2 -> 1x1 */
  821. static void shrink12(uint8_t *dst, int dst_wrap,
  822. const uint8_t *src, int src_wrap,
  823. int width, int height)
  824. {
  825. int w;
  826. uint8_t *d;
  827. const uint8_t *s1, *s2;
  828. for(;height > 0; height--) {
  829. s1 = src;
  830. s2 = s1 + src_wrap;
  831. d = dst;
  832. for(w = width;w >= 4; w-=4) {
  833. d[0] = (s1[0] + s2[0]) >> 1;
  834. d[1] = (s1[1] + s2[1]) >> 1;
  835. d[2] = (s1[2] + s2[2]) >> 1;
  836. d[3] = (s1[3] + s2[3]) >> 1;
  837. s1 += 4;
  838. s2 += 4;
  839. d += 4;
  840. }
  841. for(;w > 0; w--) {
  842. d[0] = (s1[0] + s2[0]) >> 1;
  843. s1++;
  844. s2++;
  845. d++;
  846. }
  847. src += 2 * src_wrap;
  848. dst += dst_wrap;
  849. }
  850. }
  851. /* 2x2 -> 1x1 */
  852. static void shrink22(uint8_t *dst, int dst_wrap,
  853. const uint8_t *src, int src_wrap,
  854. int width, int height)
  855. {
  856. int w;
  857. const uint8_t *s1, *s2;
  858. uint8_t *d;
  859. for(;height > 0; height--) {
  860. s1 = src;
  861. s2 = s1 + src_wrap;
  862. d = dst;
  863. for(w = width;w >= 4; w-=4) {
  864. d[0] = (s1[0] + s1[1] + s2[0] + s2[1] + 2) >> 2;
  865. d[1] = (s1[2] + s1[3] + s2[2] + s2[3] + 2) >> 2;
  866. d[2] = (s1[4] + s1[5] + s2[4] + s2[5] + 2) >> 2;
  867. d[3] = (s1[6] + s1[7] + s2[6] + s2[7] + 2) >> 2;
  868. s1 += 8;
  869. s2 += 8;
  870. d += 4;
  871. }
  872. for(;w > 0; w--) {
  873. d[0] = (s1[0] + s1[1] + s2[0] + s2[1] + 2) >> 2;
  874. s1 += 2;
  875. s2 += 2;
  876. d++;
  877. }
  878. src += 2 * src_wrap;
  879. dst += dst_wrap;
  880. }
  881. }
  882. /* 4x4 -> 1x1 */
  883. static void shrink44(uint8_t *dst, int dst_wrap,
  884. const uint8_t *src, int src_wrap,
  885. int width, int height)
  886. {
  887. int w;
  888. const uint8_t *s1, *s2, *s3, *s4;
  889. uint8_t *d;
  890. for(;height > 0; height--) {
  891. s1 = src;
  892. s2 = s1 + src_wrap;
  893. s3 = s2 + src_wrap;
  894. s4 = s3 + src_wrap;
  895. d = dst;
  896. for(w = width;w > 0; w--) {
  897. d[0] = (s1[0] + s1[1] + s1[2] + s1[3] +
  898. s2[0] + s2[1] + s2[2] + s2[3] +
  899. s3[0] + s3[1] + s3[2] + s3[3] +
  900. s4[0] + s4[1] + s4[2] + s4[3] + 8) >> 4;
  901. s1 += 4;
  902. s2 += 4;
  903. s3 += 4;
  904. s4 += 4;
  905. d++;
  906. }
  907. src += 4 * src_wrap;
  908. dst += dst_wrap;
  909. }
  910. }
  911. static void grow21_line(uint8_t *dst, const uint8_t *src,
  912. int width)
  913. {
  914. int w;
  915. const uint8_t *s1;
  916. uint8_t *d;
  917. s1 = src;
  918. d = dst;
  919. for(w = width;w >= 4; w-=4) {
  920. d[1] = d[0] = s1[0];
  921. d[3] = d[2] = s1[1];
  922. s1 += 2;
  923. d += 4;
  924. }
  925. for(;w >= 2; w -= 2) {
  926. d[1] = d[0] = s1[0];
  927. s1 ++;
  928. d += 2;
  929. }
  930. /* only needed if width is not a multiple of two */
  931. /* XXX: veryfy that */
  932. if (w) {
  933. d[0] = s1[0];
  934. }
  935. }
  936. static void grow41_line(uint8_t *dst, const uint8_t *src,
  937. int width)
  938. {
  939. int w, v;
  940. const uint8_t *s1;
  941. uint8_t *d;
  942. s1 = src;
  943. d = dst;
  944. for(w = width;w >= 4; w-=4) {
  945. v = s1[0];
  946. d[0] = v;
  947. d[1] = v;
  948. d[2] = v;
  949. d[3] = v;
  950. s1 ++;
  951. d += 4;
  952. }
  953. }
  954. /* 1x1 -> 2x1 */
  955. static void grow21(uint8_t *dst, int dst_wrap,
  956. const uint8_t *src, int src_wrap,
  957. int width, int height)
  958. {
  959. for(;height > 0; height--) {
  960. grow21_line(dst, src, width);
  961. src += src_wrap;
  962. dst += dst_wrap;
  963. }
  964. }
  965. /* 1x1 -> 2x2 */
  966. static void grow22(uint8_t *dst, int dst_wrap,
  967. const uint8_t *src, int src_wrap,
  968. int width, int height)
  969. {
  970. for(;height > 0; height--) {
  971. grow21_line(dst, src, width);
  972. if (height%2)
  973. src += src_wrap;
  974. dst += dst_wrap;
  975. }
  976. }
  977. /* 1x1 -> 4x1 */
  978. static void grow41(uint8_t *dst, int dst_wrap,
  979. const uint8_t *src, int src_wrap,
  980. int width, int height)
  981. {
  982. for(;height > 0; height--) {
  983. grow41_line(dst, src, width);
  984. src += src_wrap;
  985. dst += dst_wrap;
  986. }
  987. }
  988. /* 1x1 -> 4x4 */
  989. static void grow44(uint8_t *dst, int dst_wrap,
  990. const uint8_t *src, int src_wrap,
  991. int width, int height)
  992. {
  993. for(;height > 0; height--) {
  994. grow41_line(dst, src, width);
  995. if ((height & 3) == 1)
  996. src += src_wrap;
  997. dst += dst_wrap;
  998. }
  999. }
  1000. /* 1x2 -> 2x1 */
  1001. static void conv411(uint8_t *dst, int dst_wrap,
  1002. const uint8_t *src, int src_wrap,
  1003. int width, int height)
  1004. {
  1005. int w, c;
  1006. const uint8_t *s1, *s2;
  1007. uint8_t *d;
  1008. width>>=1;
  1009. for(;height > 0; height--) {
  1010. s1 = src;
  1011. s2 = src + src_wrap;
  1012. d = dst;
  1013. for(w = width;w > 0; w--) {
  1014. c = (s1[0] + s2[0]) >> 1;
  1015. d[0] = c;
  1016. d[1] = c;
  1017. s1++;
  1018. s2++;
  1019. d += 2;
  1020. }
  1021. src += src_wrap * 2;
  1022. dst += dst_wrap;
  1023. }
  1024. }
  1025. /* XXX: add jpeg quantize code */
  1026. #define TRANSP_INDEX (6*6*6)
  1027. /* this is maybe slow, but allows for extensions */
  1028. static inline unsigned char gif_clut_index(uint8_t r, uint8_t g, uint8_t b)
  1029. {
  1030. return ((((r)/47)%6)*6*6+(((g)/47)%6)*6+(((b)/47)%6));
  1031. }
  1032. static void build_rgb_palette(uint8_t *palette, int has_alpha)
  1033. {
  1034. uint32_t *pal;
  1035. static const uint8_t pal_value[6] = { 0x00, 0x33, 0x66, 0x99, 0xcc, 0xff };
  1036. int i, r, g, b;
  1037. pal = (uint32_t *)palette;
  1038. i = 0;
  1039. for(r = 0; r < 6; r++) {
  1040. for(g = 0; g < 6; g++) {
  1041. for(b = 0; b < 6; b++) {
  1042. pal[i++] = (0xff << 24) | (pal_value[r] << 16) |
  1043. (pal_value[g] << 8) | pal_value[b];
  1044. }
  1045. }
  1046. }
  1047. if (has_alpha)
  1048. pal[i++] = 0;
  1049. while (i < 256)
  1050. pal[i++] = 0xff000000;
  1051. }
  1052. /* copy bit n to bits 0 ... n - 1 */
  1053. static inline unsigned int bitcopy_n(unsigned int a, int n)
  1054. {
  1055. int mask;
  1056. mask = (1 << n) - 1;
  1057. return (a & (0xff & ~mask)) | ((-((a >> n) & 1)) & mask);
  1058. }
  1059. /* rgb555 handling */
  1060. #define RGB_NAME rgb555
  1061. #define RGB_IN(r, g, b, s)\
  1062. {\
  1063. unsigned int v = ((const uint16_t *)(s))[0];\
  1064. r = bitcopy_n(v >> (10 - 3), 3);\
  1065. g = bitcopy_n(v >> (5 - 3), 3);\
  1066. b = bitcopy_n(v << 3, 3);\
  1067. }
  1068. #define RGBA_IN(r, g, b, a, s)\
  1069. {\
  1070. unsigned int v = ((const uint16_t *)(s))[0];\
  1071. r = bitcopy_n(v >> (10 - 3), 3);\
  1072. g = bitcopy_n(v >> (5 - 3), 3);\
  1073. b = bitcopy_n(v << 3, 3);\
  1074. a = (-(v >> 15)) & 0xff;\
  1075. }
  1076. #define RGBA_OUT(d, r, g, b, a)\
  1077. {\
  1078. ((uint16_t *)(d))[0] = ((r >> 3) << 10) | ((g >> 3) << 5) | (b >> 3) | \
  1079. ((a << 8) & 0x8000);\
  1080. }
  1081. #define BPP 2
  1082. #include "imgconvert_template.h"
  1083. /* rgb565 handling */
  1084. #define RGB_NAME rgb565
  1085. #define RGB_IN(r, g, b, s)\
  1086. {\
  1087. unsigned int v = ((const uint16_t *)(s))[0];\
  1088. r = bitcopy_n(v >> (11 - 3), 3);\
  1089. g = bitcopy_n(v >> (5 - 2), 2);\
  1090. b = bitcopy_n(v << 3, 3);\
  1091. }
  1092. #define RGB_OUT(d, r, g, b)\
  1093. {\
  1094. ((uint16_t *)(d))[0] = ((r >> 3) << 11) | ((g >> 2) << 5) | (b >> 3);\
  1095. }
  1096. #define BPP 2
  1097. #include "imgconvert_template.h"
  1098. /* bgr24 handling */
  1099. #define RGB_NAME bgr24
  1100. #define RGB_IN(r, g, b, s)\
  1101. {\
  1102. b = (s)[0];\
  1103. g = (s)[1];\
  1104. r = (s)[2];\
  1105. }
  1106. #define RGB_OUT(d, r, g, b)\
  1107. {\
  1108. (d)[0] = b;\
  1109. (d)[1] = g;\
  1110. (d)[2] = r;\
  1111. }
  1112. #define BPP 3
  1113. #include "imgconvert_template.h"
  1114. #undef RGB_IN
  1115. #undef RGB_OUT
  1116. #undef BPP
  1117. /* rgb24 handling */
  1118. #define RGB_NAME rgb24
  1119. #define FMT_RGB24
  1120. #define RGB_IN(r, g, b, s)\
  1121. {\
  1122. r = (s)[0];\
  1123. g = (s)[1];\
  1124. b = (s)[2];\
  1125. }
  1126. #define RGB_OUT(d, r, g, b)\
  1127. {\
  1128. (d)[0] = r;\
  1129. (d)[1] = g;\
  1130. (d)[2] = b;\
  1131. }
  1132. #define BPP 3
  1133. #include "imgconvert_template.h"
  1134. /* rgba32 handling */
  1135. #define RGB_NAME rgba32
  1136. #define FMT_RGBA32
  1137. #define RGB_IN(r, g, b, s)\
  1138. {\
  1139. unsigned int v = ((const uint32_t *)(s))[0];\
  1140. r = (v >> 16) & 0xff;\
  1141. g = (v >> 8) & 0xff;\
  1142. b = v & 0xff;\
  1143. }
  1144. #define RGBA_IN(r, g, b, a, s)\
  1145. {\
  1146. unsigned int v = ((const uint32_t *)(s))[0];\
  1147. a = (v >> 24) & 0xff;\
  1148. r = (v >> 16) & 0xff;\
  1149. g = (v >> 8) & 0xff;\
  1150. b = v & 0xff;\
  1151. }
  1152. #define RGBA_OUT(d, r, g, b, a)\
  1153. {\
  1154. ((uint32_t *)(d))[0] = (a << 24) | (r << 16) | (g << 8) | b;\
  1155. }
  1156. #define BPP 4
  1157. #include "imgconvert_template.h"
  1158. static void mono_to_gray(AVPicture *dst, AVPicture *src,
  1159. int width, int height, int xor_mask)
  1160. {
  1161. const unsigned char *p;
  1162. unsigned char *q;
  1163. int v, dst_wrap, src_wrap;
  1164. int y, w;
  1165. p = src->data[0];
  1166. src_wrap = src->linesize[0] - ((width + 7) >> 3);
  1167. q = dst->data[0];
  1168. dst_wrap = dst->linesize[0] - width;
  1169. for(y=0;y<height;y++) {
  1170. w = width;
  1171. while (w >= 8) {
  1172. v = *p++ ^ xor_mask;
  1173. q[0] = -(v >> 7);
  1174. q[1] = -((v >> 6) & 1);
  1175. q[2] = -((v >> 5) & 1);
  1176. q[3] = -((v >> 4) & 1);
  1177. q[4] = -((v >> 3) & 1);
  1178. q[5] = -((v >> 2) & 1);
  1179. q[6] = -((v >> 1) & 1);
  1180. q[7] = -((v >> 0) & 1);
  1181. w -= 8;
  1182. q += 8;
  1183. }
  1184. if (w > 0) {
  1185. v = *p++ ^ xor_mask;
  1186. do {
  1187. q[0] = -((v >> 7) & 1);
  1188. q++;
  1189. v <<= 1;
  1190. } while (--w);
  1191. }
  1192. p += src_wrap;
  1193. q += dst_wrap;
  1194. }
  1195. }
  1196. static void monowhite_to_gray(AVPicture *dst, AVPicture *src,
  1197. int width, int height)
  1198. {
  1199. mono_to_gray(dst, src, width, height, 0xff);
  1200. }
  1201. static void monoblack_to_gray(AVPicture *dst, AVPicture *src,
  1202. int width, int height)
  1203. {
  1204. mono_to_gray(dst, src, width, height, 0x00);
  1205. }
  1206. static void gray_to_mono(AVPicture *dst, AVPicture *src,
  1207. int width, int height, int xor_mask)
  1208. {
  1209. int n;
  1210. const uint8_t *s;
  1211. uint8_t *d;
  1212. int j, b, v, n1, src_wrap, dst_wrap, y;
  1213. s = src->data[0];
  1214. src_wrap = src->linesize[0] - width;
  1215. d = dst->data[0];
  1216. dst_wrap = dst->linesize[0] - ((width + 7) >> 3);
  1217. for(y=0;y<height;y++) {
  1218. n = width;
  1219. while (n >= 8) {
  1220. v = 0;
  1221. for(j=0;j<8;j++) {
  1222. b = s[0];
  1223. s++;
  1224. v = (v << 1) | (b >> 7);
  1225. }
  1226. d[0] = v ^ xor_mask;
  1227. d++;
  1228. n -= 8;
  1229. }
  1230. if (n > 0) {
  1231. n1 = n;
  1232. v = 0;
  1233. while (n > 0) {
  1234. b = s[0];
  1235. s++;
  1236. v = (v << 1) | (b >> 7);
  1237. n--;
  1238. }
  1239. d[0] = (v << (8 - (n1 & 7))) ^ xor_mask;
  1240. d++;
  1241. }
  1242. s += src_wrap;
  1243. d += dst_wrap;
  1244. }
  1245. }
  1246. static void gray_to_monowhite(AVPicture *dst, AVPicture *src,
  1247. int width, int height)
  1248. {
  1249. gray_to_mono(dst, src, width, height, 0xff);
  1250. }
  1251. static void gray_to_monoblack(AVPicture *dst, AVPicture *src,
  1252. int width, int height)
  1253. {
  1254. gray_to_mono(dst, src, width, height, 0x00);
  1255. }
  1256. typedef struct ConvertEntry {
  1257. void (*convert)(AVPicture *dst, AVPicture *src, int width, int height);
  1258. } ConvertEntry;
  1259. /* Add each new convertion function in this table. In order to be able
  1260. to convert from any format to any format, the following constraints
  1261. must be satisfied:
  1262. - all FF_COLOR_RGB formats must convert to and from PIX_FMT_RGB24
  1263. - all FF_COLOR_GRAY formats must convert to and from PIX_FMT_GRAY8
  1264. - all FF_COLOR_RGB formats with alpha must convert to and from PIX_FMT_RGBA32
  1265. - PIX_FMT_YUV444P and PIX_FMT_YUVJ444P must convert to and from
  1266. PIX_FMT_RGB24.
  1267. - PIX_FMT_422 must convert to and from PIX_FMT_422P.
  1268. The other conversion functions are just optimisations for common cases.
  1269. */
  1270. static ConvertEntry convert_table[PIX_FMT_NB][PIX_FMT_NB] = {
  1271. [PIX_FMT_YUV420P] = {
  1272. [PIX_FMT_RGB555] = {
  1273. .convert = yuv420p_to_rgb555
  1274. },
  1275. [PIX_FMT_RGB565] = {
  1276. .convert = yuv420p_to_rgb565
  1277. },
  1278. [PIX_FMT_BGR24] = {
  1279. .convert = yuv420p_to_bgr24
  1280. },
  1281. [PIX_FMT_RGB24] = {
  1282. .convert = yuv420p_to_rgb24
  1283. },
  1284. [PIX_FMT_RGBA32] = {
  1285. .convert = yuv420p_to_rgba32
  1286. },
  1287. },
  1288. [PIX_FMT_YUV422P] = {
  1289. [PIX_FMT_YUV422] = {
  1290. .convert = yuv422p_to_yuv422,
  1291. },
  1292. },
  1293. [PIX_FMT_YUV444P] = {
  1294. [PIX_FMT_RGB24] = {
  1295. .convert = yuv444p_to_rgb24
  1296. },
  1297. },
  1298. [PIX_FMT_YUVJ420P] = {
  1299. [PIX_FMT_RGB555] = {
  1300. .convert = yuvj420p_to_rgb555
  1301. },
  1302. [PIX_FMT_RGB565] = {
  1303. .convert = yuvj420p_to_rgb565
  1304. },
  1305. [PIX_FMT_BGR24] = {
  1306. .convert = yuvj420p_to_bgr24
  1307. },
  1308. [PIX_FMT_RGB24] = {
  1309. .convert = yuvj420p_to_rgb24
  1310. },
  1311. [PIX_FMT_RGBA32] = {
  1312. .convert = yuvj420p_to_rgba32
  1313. },
  1314. },
  1315. [PIX_FMT_YUVJ444P] = {
  1316. [PIX_FMT_RGB24] = {
  1317. .convert = yuvj444p_to_rgb24
  1318. },
  1319. },
  1320. [PIX_FMT_YUV422] = {
  1321. [PIX_FMT_YUV420P] = {
  1322. .convert = yuv422_to_yuv420p,
  1323. },
  1324. [PIX_FMT_YUV422P] = {
  1325. .convert = yuv422_to_yuv422p,
  1326. },
  1327. },
  1328. [PIX_FMT_RGB24] = {
  1329. [PIX_FMT_YUV420P] = {
  1330. .convert = rgb24_to_yuv420p
  1331. },
  1332. [PIX_FMT_RGB565] = {
  1333. .convert = rgb24_to_rgb565
  1334. },
  1335. [PIX_FMT_RGB555] = {
  1336. .convert = rgb24_to_rgb555
  1337. },
  1338. [PIX_FMT_RGBA32] = {
  1339. .convert = rgb24_to_rgba32
  1340. },
  1341. [PIX_FMT_BGR24] = {
  1342. .convert = rgb24_to_bgr24
  1343. },
  1344. [PIX_FMT_GRAY8] = {
  1345. .convert = rgb24_to_gray
  1346. },
  1347. [PIX_FMT_PAL8] = {
  1348. .convert = rgb24_to_pal8
  1349. },
  1350. [PIX_FMT_YUV444P] = {
  1351. .convert = rgb24_to_yuv444p
  1352. },
  1353. [PIX_FMT_YUVJ420P] = {
  1354. .convert = rgb24_to_yuvj420p
  1355. },
  1356. [PIX_FMT_YUVJ444P] = {
  1357. .convert = rgb24_to_yuvj444p
  1358. },
  1359. },
  1360. [PIX_FMT_RGBA32] = {
  1361. [PIX_FMT_RGB24] = {
  1362. .convert = rgba32_to_rgb24
  1363. },
  1364. [PIX_FMT_RGB555] = {
  1365. .convert = rgba32_to_rgb555
  1366. },
  1367. [PIX_FMT_PAL8] = {
  1368. .convert = rgba32_to_pal8
  1369. },
  1370. [PIX_FMT_YUV420P] = {
  1371. .convert = rgba32_to_yuv420p
  1372. },
  1373. [PIX_FMT_GRAY8] = {
  1374. .convert = rgba32_to_gray
  1375. },
  1376. },
  1377. [PIX_FMT_BGR24] = {
  1378. [PIX_FMT_RGB24] = {
  1379. .convert = bgr24_to_rgb24
  1380. },
  1381. [PIX_FMT_YUV420P] = {
  1382. .convert = bgr24_to_yuv420p
  1383. },
  1384. [PIX_FMT_GRAY8] = {
  1385. .convert = bgr24_to_gray
  1386. },
  1387. },
  1388. [PIX_FMT_RGB555] = {
  1389. [PIX_FMT_RGB24] = {
  1390. .convert = rgb555_to_rgb24
  1391. },
  1392. [PIX_FMT_RGBA32] = {
  1393. .convert = rgb555_to_rgba32
  1394. },
  1395. [PIX_FMT_YUV420P] = {
  1396. .convert = rgb555_to_yuv420p
  1397. },
  1398. [PIX_FMT_GRAY8] = {
  1399. .convert = rgb555_to_gray
  1400. },
  1401. },
  1402. [PIX_FMT_RGB565] = {
  1403. [PIX_FMT_RGB24] = {
  1404. .convert = rgb565_to_rgb24
  1405. },
  1406. [PIX_FMT_YUV420P] = {
  1407. .convert = rgb565_to_yuv420p
  1408. },
  1409. [PIX_FMT_GRAY8] = {
  1410. .convert = rgb565_to_gray
  1411. },
  1412. },
  1413. [PIX_FMT_GRAY8] = {
  1414. [PIX_FMT_RGB555] = {
  1415. .convert = gray_to_rgb555
  1416. },
  1417. [PIX_FMT_RGB565] = {
  1418. .convert = gray_to_rgb565
  1419. },
  1420. [PIX_FMT_RGB24] = {
  1421. .convert = gray_to_rgb24
  1422. },
  1423. [PIX_FMT_BGR24] = {
  1424. .convert = gray_to_bgr24
  1425. },
  1426. [PIX_FMT_RGBA32] = {
  1427. .convert = gray_to_rgba32
  1428. },
  1429. [PIX_FMT_MONOWHITE] = {
  1430. .convert = gray_to_monowhite
  1431. },
  1432. [PIX_FMT_MONOBLACK] = {
  1433. .convert = gray_to_monoblack
  1434. },
  1435. },
  1436. [PIX_FMT_MONOWHITE] = {
  1437. [PIX_FMT_GRAY8] = {
  1438. .convert = monowhite_to_gray
  1439. },
  1440. },
  1441. [PIX_FMT_MONOBLACK] = {
  1442. [PIX_FMT_GRAY8] = {
  1443. .convert = monoblack_to_gray
  1444. },
  1445. },
  1446. [PIX_FMT_PAL8] = {
  1447. [PIX_FMT_RGB555] = {
  1448. .convert = pal8_to_rgb555
  1449. },
  1450. [PIX_FMT_RGB565] = {
  1451. .convert = pal8_to_rgb565
  1452. },
  1453. [PIX_FMT_BGR24] = {
  1454. .convert = pal8_to_bgr24
  1455. },
  1456. [PIX_FMT_RGB24] = {
  1457. .convert = pal8_to_rgb24
  1458. },
  1459. [PIX_FMT_RGBA32] = {
  1460. .convert = pal8_to_rgba32
  1461. },
  1462. },
  1463. };
  1464. static int avpicture_alloc(AVPicture *picture,
  1465. int pix_fmt, int width, int height)
  1466. {
  1467. unsigned int size;
  1468. void *ptr;
  1469. size = avpicture_get_size(pix_fmt, width, height);
  1470. ptr = av_malloc(size);
  1471. if (!ptr)
  1472. goto fail;
  1473. avpicture_fill(picture, ptr, pix_fmt, width, height);
  1474. return 0;
  1475. fail:
  1476. memset(picture, 0, sizeof(AVPicture));
  1477. return -1;
  1478. }
  1479. static void avpicture_free(AVPicture *picture)
  1480. {
  1481. av_free(picture->data[0]);
  1482. }
  1483. /* return true if yuv planar */
  1484. static inline int is_yuv_planar(PixFmtInfo *ps)
  1485. {
  1486. return (ps->color_type == FF_COLOR_YUV ||
  1487. ps->color_type == FF_COLOR_YUV_JPEG) &&
  1488. ps->pixel_type == FF_PIXEL_PLANAR;
  1489. }
  1490. /* XXX: always use linesize. Return -1 if not supported */
  1491. int img_convert(AVPicture *dst, int dst_pix_fmt,
  1492. AVPicture *src, int src_pix_fmt,
  1493. int src_width, int src_height)
  1494. {
  1495. static int inited;
  1496. int i, ret, dst_width, dst_height, int_pix_fmt;
  1497. PixFmtInfo *src_pix, *dst_pix;
  1498. ConvertEntry *ce;
  1499. AVPicture tmp1, *tmp = &tmp1;
  1500. if (src_pix_fmt < 0 || src_pix_fmt >= PIX_FMT_NB ||
  1501. dst_pix_fmt < 0 || dst_pix_fmt >= PIX_FMT_NB)
  1502. return -1;
  1503. if (src_width <= 0 || src_height <= 0)
  1504. return 0;
  1505. if (!inited) {
  1506. inited = 1;
  1507. img_convert_init();
  1508. }
  1509. dst_width = src_width;
  1510. dst_height = src_height;
  1511. dst_pix = &pix_fmt_info[dst_pix_fmt];
  1512. src_pix = &pix_fmt_info[src_pix_fmt];
  1513. if (src_pix_fmt == dst_pix_fmt) {
  1514. /* no conversion needed: just copy */
  1515. img_copy(dst, src, dst_pix_fmt, dst_width, dst_height);
  1516. return 0;
  1517. }
  1518. ce = &convert_table[src_pix_fmt][dst_pix_fmt];
  1519. if (ce->convert) {
  1520. /* specific convertion routine */
  1521. ce->convert(dst, src, dst_width, dst_height);
  1522. return 0;
  1523. }
  1524. /* gray to YUV */
  1525. if (is_yuv_planar(dst_pix) &&
  1526. src_pix_fmt == PIX_FMT_GRAY8) {
  1527. int w, h, y;
  1528. uint8_t *d;
  1529. if (dst_pix->color_type == FF_COLOR_YUV_JPEG) {
  1530. img_copy_plane(dst->data[0], dst->linesize[0],
  1531. src->data[0], src->linesize[0],
  1532. dst_width, dst_height);
  1533. } else {
  1534. img_apply_table(dst->data[0], dst->linesize[0],
  1535. src->data[0], src->linesize[0],
  1536. dst_width, dst_height,
  1537. y_jpeg_to_ccir);
  1538. }
  1539. /* fill U and V with 128 */
  1540. w = dst_width;
  1541. h = dst_height;
  1542. w >>= dst_pix->x_chroma_shift;
  1543. h >>= dst_pix->y_chroma_shift;
  1544. for(i = 1; i <= 2; i++) {
  1545. d = dst->data[i];
  1546. for(y = 0; y< h; y++) {
  1547. memset(d, 128, w);
  1548. d += dst->linesize[i];
  1549. }
  1550. }
  1551. return 0;
  1552. }
  1553. /* YUV to gray */
  1554. if (is_yuv_planar(src_pix) &&
  1555. dst_pix_fmt == PIX_FMT_GRAY8) {
  1556. if (src_pix->color_type == FF_COLOR_YUV_JPEG) {
  1557. img_copy_plane(dst->data[0], dst->linesize[0],
  1558. src->data[0], src->linesize[0],
  1559. dst_width, dst_height);
  1560. } else {
  1561. img_apply_table(dst->data[0], dst->linesize[0],
  1562. src->data[0], src->linesize[0],
  1563. dst_width, dst_height,
  1564. y_ccir_to_jpeg);
  1565. }
  1566. return 0;
  1567. }
  1568. /* YUV to YUV planar */
  1569. if (is_yuv_planar(dst_pix) && is_yuv_planar(src_pix)) {
  1570. int x_shift, y_shift, w, h, xy_shift;
  1571. void (*resize_func)(uint8_t *dst, int dst_wrap,
  1572. const uint8_t *src, int src_wrap,
  1573. int width, int height);
  1574. /* compute chroma size of the smallest dimensions */
  1575. w = dst_width;
  1576. h = dst_height;
  1577. if (dst_pix->x_chroma_shift >= src_pix->x_chroma_shift)
  1578. w >>= dst_pix->x_chroma_shift;
  1579. else
  1580. w >>= src_pix->x_chroma_shift;
  1581. if (dst_pix->y_chroma_shift >= src_pix->y_chroma_shift)
  1582. h >>= dst_pix->y_chroma_shift;
  1583. else
  1584. h >>= src_pix->y_chroma_shift;
  1585. x_shift = (dst_pix->x_chroma_shift - src_pix->x_chroma_shift);
  1586. y_shift = (dst_pix->y_chroma_shift - src_pix->y_chroma_shift);
  1587. xy_shift = ((x_shift & 0xf) << 4) | (y_shift & 0xf);
  1588. /* there must be filters for conversion at least from and to
  1589. YUV444 format */
  1590. switch(xy_shift) {
  1591. case 0x00:
  1592. resize_func = img_copy_plane;
  1593. break;
  1594. case 0x10:
  1595. resize_func = shrink21;
  1596. break;
  1597. case 0x20:
  1598. resize_func = shrink41;
  1599. break;
  1600. case 0x01:
  1601. resize_func = shrink12;
  1602. break;
  1603. case 0x11:
  1604. resize_func = shrink22;
  1605. break;
  1606. case 0x22:
  1607. resize_func = shrink44;
  1608. break;
  1609. case 0xf0:
  1610. resize_func = grow21;
  1611. break;
  1612. case 0xe0:
  1613. resize_func = grow41;
  1614. break;
  1615. case 0xff:
  1616. resize_func = grow22;
  1617. break;
  1618. case 0xee:
  1619. resize_func = grow44;
  1620. break;
  1621. case 0xf1:
  1622. resize_func = conv411;
  1623. break;
  1624. default:
  1625. /* currently not handled */
  1626. goto no_chroma_filter;
  1627. }
  1628. img_copy_plane(dst->data[0], dst->linesize[0],
  1629. src->data[0], src->linesize[0],
  1630. dst_width, dst_height);
  1631. for(i = 1;i <= 2; i++)
  1632. resize_func(dst->data[i], dst->linesize[i],
  1633. src->data[i], src->linesize[i],
  1634. dst_width>>dst_pix->x_chroma_shift, dst_height>>dst_pix->y_chroma_shift);
  1635. /* if yuv color space conversion is needed, we do it here on
  1636. the destination image */
  1637. if (dst_pix->color_type != src_pix->color_type) {
  1638. const uint8_t *y_table, *c_table;
  1639. if (dst_pix->color_type == FF_COLOR_YUV) {
  1640. y_table = y_jpeg_to_ccir;
  1641. c_table = c_jpeg_to_ccir;
  1642. } else {
  1643. y_table = y_ccir_to_jpeg;
  1644. c_table = c_ccir_to_jpeg;
  1645. }
  1646. img_apply_table(dst->data[0], dst->linesize[0],
  1647. dst->data[0], dst->linesize[0],
  1648. dst_width, dst_height,
  1649. y_table);
  1650. for(i = 1;i <= 2; i++)
  1651. img_apply_table(dst->data[i], dst->linesize[i],
  1652. dst->data[i], dst->linesize[i],
  1653. dst_width>>dst_pix->x_chroma_shift,
  1654. dst_height>>dst_pix->y_chroma_shift,
  1655. c_table);
  1656. }
  1657. return 0;
  1658. }
  1659. no_chroma_filter:
  1660. /* try to use an intermediate format */
  1661. if (src_pix_fmt == PIX_FMT_YUV422 ||
  1662. dst_pix_fmt == PIX_FMT_YUV422) {
  1663. /* specific case: convert to YUV422P first */
  1664. int_pix_fmt = PIX_FMT_YUV422P;
  1665. } else if ((src_pix->color_type == FF_COLOR_GRAY &&
  1666. src_pix_fmt != PIX_FMT_GRAY8) ||
  1667. (dst_pix->color_type == FF_COLOR_GRAY &&
  1668. dst_pix_fmt != PIX_FMT_GRAY8)) {
  1669. /* gray8 is the normalized format */
  1670. int_pix_fmt = PIX_FMT_GRAY8;
  1671. } else if ((is_yuv_planar(src_pix) &&
  1672. src_pix_fmt != PIX_FMT_YUV444P &&
  1673. src_pix_fmt != PIX_FMT_YUVJ444P)) {
  1674. /* yuv444 is the normalized format */
  1675. if (src_pix->color_type == FF_COLOR_YUV_JPEG)
  1676. int_pix_fmt = PIX_FMT_YUVJ444P;
  1677. else
  1678. int_pix_fmt = PIX_FMT_YUV444P;
  1679. } else if ((is_yuv_planar(dst_pix) &&
  1680. dst_pix_fmt != PIX_FMT_YUV444P &&
  1681. dst_pix_fmt != PIX_FMT_YUVJ444P)) {
  1682. /* yuv444 is the normalized format */
  1683. if (dst_pix->color_type == FF_COLOR_YUV_JPEG)
  1684. int_pix_fmt = PIX_FMT_YUVJ444P;
  1685. else
  1686. int_pix_fmt = PIX_FMT_YUV444P;
  1687. } else {
  1688. /* the two formats are rgb or gray8 or yuv[j]444p */
  1689. if (src_pix->is_alpha && dst_pix->is_alpha)
  1690. int_pix_fmt = PIX_FMT_RGBA32;
  1691. else
  1692. int_pix_fmt = PIX_FMT_RGB24;
  1693. }
  1694. if (avpicture_alloc(tmp, int_pix_fmt, dst_width, dst_height) < 0)
  1695. return -1;
  1696. ret = -1;
  1697. if (img_convert(tmp, int_pix_fmt,
  1698. src, src_pix_fmt, src_width, src_height) < 0)
  1699. goto fail1;
  1700. if (img_convert(dst, dst_pix_fmt,
  1701. tmp, int_pix_fmt, dst_width, dst_height) < 0)
  1702. goto fail1;
  1703. ret = 0;
  1704. fail1:
  1705. avpicture_free(tmp);
  1706. return ret;
  1707. }
  1708. /* NOTE: we scan all the pixels to have an exact information */
  1709. static int get_alpha_info_pal8(AVPicture *src, int width, int height)
  1710. {
  1711. const unsigned char *p;
  1712. int src_wrap, ret, x, y;
  1713. unsigned int a;
  1714. uint32_t *palette = (uint32_t *)src->data[1];
  1715. p = src->data[0];
  1716. src_wrap = src->linesize[0] - width;
  1717. ret = 0;
  1718. for(y=0;y<height;y++) {
  1719. for(x=0;x<width;x++) {
  1720. a = palette[p[0]] >> 24;
  1721. if (a == 0x00) {
  1722. ret |= FF_ALPHA_TRANSP;
  1723. } else if (a != 0xff) {
  1724. ret |= FF_ALPHA_SEMI_TRANSP;
  1725. }
  1726. p++;
  1727. }
  1728. p += src_wrap;
  1729. }
  1730. return ret;
  1731. }
  1732. /**
  1733. * Tell if an image really has transparent alpha values.
  1734. * @return ored mask of FF_ALPHA_xxx constants
  1735. */
  1736. int img_get_alpha_info(AVPicture *src, int pix_fmt, int width, int height)
  1737. {
  1738. PixFmtInfo *pf = &pix_fmt_info[pix_fmt];
  1739. int ret;
  1740. pf = &pix_fmt_info[pix_fmt];
  1741. /* no alpha can be represented in format */
  1742. if (!pf->is_alpha)
  1743. return 0;
  1744. switch(pix_fmt) {
  1745. case PIX_FMT_RGBA32:
  1746. ret = get_alpha_info_rgba32(src, width, height);
  1747. break;
  1748. case PIX_FMT_RGB555:
  1749. ret = get_alpha_info_rgb555(src, width, height);
  1750. break;
  1751. case PIX_FMT_PAL8:
  1752. ret = get_alpha_info_pal8(src, width, height);
  1753. break;
  1754. default:
  1755. /* we do not know, so everything is indicated */
  1756. ret = FF_ALPHA_TRANSP | FF_ALPHA_SEMI_TRANSP;
  1757. break;
  1758. }
  1759. return ret;
  1760. }
  1761. #ifdef HAVE_MMX
  1762. #define DEINT_INPLACE_LINE_LUM \
  1763. movd_m2r(lum_m4[0],mm0);\
  1764. movd_m2r(lum_m3[0],mm1);\
  1765. movd_m2r(lum_m2[0],mm2);\
  1766. movd_m2r(lum_m1[0],mm3);\
  1767. movd_m2r(lum[0],mm4);\
  1768. punpcklbw_r2r(mm7,mm0);\
  1769. movd_r2m(mm2,lum_m4[0]);\
  1770. punpcklbw_r2r(mm7,mm1);\
  1771. punpcklbw_r2r(mm7,mm2);\
  1772. punpcklbw_r2r(mm7,mm3);\
  1773. punpcklbw_r2r(mm7,mm4);\
  1774. paddw_r2r(mm3,mm1);\
  1775. psllw_i2r(1,mm2);\
  1776. paddw_r2r(mm4,mm0);\
  1777. psllw_i2r(2,mm1);\
  1778. paddw_r2r(mm6,mm2);\
  1779. paddw_r2r(mm2,mm1);\
  1780. psubusw_r2r(mm0,mm1);\
  1781. psrlw_i2r(3,mm1);\
  1782. packuswb_r2r(mm7,mm1);\
  1783. movd_r2m(mm1,lum_m2[0]);
  1784. #define DEINT_LINE_LUM \
  1785. movd_m2r(lum_m4[0],mm0);\
  1786. movd_m2r(lum_m3[0],mm1);\
  1787. movd_m2r(lum_m2[0],mm2);\
  1788. movd_m2r(lum_m1[0],mm3);\
  1789. movd_m2r(lum[0],mm4);\
  1790. punpcklbw_r2r(mm7,mm0);\
  1791. punpcklbw_r2r(mm7,mm1);\
  1792. punpcklbw_r2r(mm7,mm2);\
  1793. punpcklbw_r2r(mm7,mm3);\
  1794. punpcklbw_r2r(mm7,mm4);\
  1795. paddw_r2r(mm3,mm1);\
  1796. psllw_i2r(1,mm2);\
  1797. paddw_r2r(mm4,mm0);\
  1798. psllw_i2r(2,mm1);\
  1799. paddw_r2r(mm6,mm2);\
  1800. paddw_r2r(mm2,mm1);\
  1801. psubusw_r2r(mm0,mm1);\
  1802. psrlw_i2r(3,mm1);\
  1803. packuswb_r2r(mm7,mm1);\
  1804. movd_r2m(mm1,dst[0]);
  1805. #endif
  1806. /* filter parameters: [-1 4 2 4 -1] // 8 */
  1807. static void deinterlace_line(uint8_t *dst, uint8_t *lum_m4, uint8_t *lum_m3, uint8_t *lum_m2, uint8_t *lum_m1, uint8_t *lum,
  1808. int size)
  1809. {
  1810. #ifndef HAVE_MMX
  1811. uint8_t *cm = cropTbl + MAX_NEG_CROP;
  1812. int sum;
  1813. for(;size > 0;size--) {
  1814. sum = -lum_m4[0];
  1815. sum += lum_m3[0] << 2;
  1816. sum += lum_m2[0] << 1;
  1817. sum += lum_m1[0] << 2;
  1818. sum += -lum[0];
  1819. dst[0] = cm[(sum + 4) >> 3];
  1820. lum_m4++;
  1821. lum_m3++;
  1822. lum_m2++;
  1823. lum_m1++;
  1824. lum++;
  1825. dst++;
  1826. }
  1827. #else
  1828. {
  1829. mmx_t rounder;
  1830. rounder.uw[0]=4;
  1831. rounder.uw[1]=4;
  1832. rounder.uw[2]=4;
  1833. rounder.uw[3]=4;
  1834. pxor_r2r(mm7,mm7);
  1835. movq_m2r(rounder,mm6);
  1836. }
  1837. for (;size > 3; size-=4) {
  1838. DEINT_LINE_LUM
  1839. lum_m4+=4;
  1840. lum_m3+=4;
  1841. lum_m2+=4;
  1842. lum_m1+=4;
  1843. lum+=4;
  1844. dst+=4;
  1845. }
  1846. #endif
  1847. }
  1848. static void deinterlace_line_inplace(uint8_t *lum_m4, uint8_t *lum_m3, uint8_t *lum_m2, uint8_t *lum_m1, uint8_t *lum,
  1849. int size)
  1850. {
  1851. #ifndef HAVE_MMX
  1852. uint8_t *cm = cropTbl + MAX_NEG_CROP;
  1853. int sum;
  1854. for(;size > 0;size--) {
  1855. sum = -lum_m4[0];
  1856. sum += lum_m3[0] << 2;
  1857. sum += lum_m2[0] << 1;
  1858. lum_m4[0]=lum_m2[0];
  1859. sum += lum_m1[0] << 2;
  1860. sum += -lum[0];
  1861. lum_m2[0] = cm[(sum + 4) >> 3];
  1862. lum_m4++;
  1863. lum_m3++;
  1864. lum_m2++;
  1865. lum_m1++;
  1866. lum++;
  1867. }
  1868. #else
  1869. {
  1870. mmx_t rounder;
  1871. rounder.uw[0]=4;
  1872. rounder.uw[1]=4;
  1873. rounder.uw[2]=4;
  1874. rounder.uw[3]=4;
  1875. pxor_r2r(mm7,mm7);
  1876. movq_m2r(rounder,mm6);
  1877. }
  1878. for (;size > 3; size-=4) {
  1879. DEINT_INPLACE_LINE_LUM
  1880. lum_m4+=4;
  1881. lum_m3+=4;
  1882. lum_m2+=4;
  1883. lum_m1+=4;
  1884. lum+=4;
  1885. }
  1886. #endif
  1887. }
  1888. /* deinterlacing : 2 temporal taps, 3 spatial taps linear filter. The
  1889. top field is copied as is, but the bottom field is deinterlaced
  1890. against the top field. */
  1891. static void deinterlace_bottom_field(uint8_t *dst, int dst_wrap,
  1892. uint8_t *src1, int src_wrap,
  1893. int width, int height)
  1894. {
  1895. uint8_t *src_m2, *src_m1, *src_0, *src_p1, *src_p2;
  1896. int y;
  1897. src_m2 = src1;
  1898. src_m1 = src1;
  1899. src_0=&src_m1[src_wrap];
  1900. src_p1=&src_0[src_wrap];
  1901. src_p2=&src_p1[src_wrap];
  1902. for(y=0;y<(height-2);y+=2) {
  1903. memcpy(dst,src_m1,width);
  1904. dst += dst_wrap;
  1905. deinterlace_line(dst,src_m2,src_m1,src_0,src_p1,src_p2,width);
  1906. src_m2 = src_0;
  1907. src_m1 = src_p1;
  1908. src_0 = src_p2;
  1909. src_p1 += 2*src_wrap;
  1910. src_p2 += 2*src_wrap;
  1911. dst += dst_wrap;
  1912. }
  1913. memcpy(dst,src_m1,width);
  1914. dst += dst_wrap;
  1915. /* do last line */
  1916. deinterlace_line(dst,src_m2,src_m1,src_0,src_0,src_0,width);
  1917. }
  1918. static void deinterlace_bottom_field_inplace(uint8_t *src1, int src_wrap,
  1919. int width, int height)
  1920. {
  1921. uint8_t *src_m1, *src_0, *src_p1, *src_p2;
  1922. int y;
  1923. uint8_t *buf;
  1924. buf = (uint8_t*)av_malloc(width);
  1925. src_m1 = src1;
  1926. memcpy(buf,src_m1,width);
  1927. src_0=&src_m1[src_wrap];
  1928. src_p1=&src_0[src_wrap];
  1929. src_p2=&src_p1[src_wrap];
  1930. for(y=0;y<(height-2);y+=2) {
  1931. deinterlace_line_inplace(buf,src_m1,src_0,src_p1,src_p2,width);
  1932. src_m1 = src_p1;
  1933. src_0 = src_p2;
  1934. src_p1 += 2*src_wrap;
  1935. src_p2 += 2*src_wrap;
  1936. }
  1937. /* do last line */
  1938. deinterlace_line_inplace(buf,src_m1,src_0,src_0,src_0,width);
  1939. av_free(buf);
  1940. }
  1941. /* deinterlace - if not supported return -1 */
  1942. int avpicture_deinterlace(AVPicture *dst, AVPicture *src,
  1943. int pix_fmt, int width, int height)
  1944. {
  1945. int i;
  1946. if (pix_fmt != PIX_FMT_YUV420P &&
  1947. pix_fmt != PIX_FMT_YUV422P &&
  1948. pix_fmt != PIX_FMT_YUV444P)
  1949. return -1;
  1950. if ((width & 3) != 0 || (height & 3) != 0)
  1951. return -1;
  1952. for(i=0;i<3;i++) {
  1953. if (i == 1) {
  1954. switch(pix_fmt) {
  1955. case PIX_FMT_YUV420P:
  1956. width >>= 1;
  1957. height >>= 1;
  1958. break;
  1959. case PIX_FMT_YUV422P:
  1960. width >>= 1;
  1961. break;
  1962. default:
  1963. break;
  1964. }
  1965. }
  1966. if (src == dst) {
  1967. deinterlace_bottom_field_inplace(src->data[i], src->linesize[i],
  1968. width, height);
  1969. } else {
  1970. deinterlace_bottom_field(dst->data[i],dst->linesize[i],
  1971. src->data[i], src->linesize[i],
  1972. width, height);
  1973. }
  1974. }
  1975. #ifdef HAVE_MMX
  1976. emms();
  1977. #endif
  1978. return 0;
  1979. }
  1980. #undef FIX