You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

2736 lines
74KB

  1. /*
  2. * Misc image convertion routines
  3. * Copyright (c) 2001, 2002, 2003 Fabrice Bellard.
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. /**
  22. * @file imgconvert.c
  23. * Misc image convertion routines.
  24. */
  25. /* TODO:
  26. * - write 'ffimg' program to test all the image related stuff
  27. * - move all api to slice based system
  28. * - integrate deinterlacing, postprocessing and scaling in the conversion process
  29. */
  30. #include "avcodec.h"
  31. #include "dsputil.h"
  32. #ifdef USE_FASTMEMCPY
  33. #include "libvo/fastmemcpy.h"
  34. #endif
  35. #ifdef HAVE_MMX
  36. #include "i386/mmx.h"
  37. #endif
  38. #define xglue(x, y) x ## y
  39. #define glue(x, y) xglue(x, y)
  40. #define FF_COLOR_RGB 0 /* RGB color space */
  41. #define FF_COLOR_GRAY 1 /* gray color space */
  42. #define FF_COLOR_YUV 2 /* YUV color space. 16 <= Y <= 235, 16 <= U, V <= 240 */
  43. #define FF_COLOR_YUV_JPEG 3 /* YUV color space. 0 <= Y <= 255, 0 <= U, V <= 255 */
  44. #define FF_PIXEL_PLANAR 0 /* each channel has one component in AVPicture */
  45. #define FF_PIXEL_PACKED 1 /* only one components containing all the channels */
  46. #define FF_PIXEL_PALETTE 2 /* one components containing indexes for a palette */
  47. typedef struct PixFmtInfo {
  48. const char *name;
  49. uint8_t nb_channels; /* number of channels (including alpha) */
  50. uint8_t color_type; /* color type (see FF_COLOR_xxx constants) */
  51. uint8_t pixel_type; /* pixel storage type (see FF_PIXEL_xxx constants) */
  52. uint8_t is_alpha : 1; /* true if alpha can be specified */
  53. uint8_t x_chroma_shift; /* X chroma subsampling factor is 2 ^ shift */
  54. uint8_t y_chroma_shift; /* Y chroma subsampling factor is 2 ^ shift */
  55. uint8_t depth; /* bit depth of the color components */
  56. } PixFmtInfo;
  57. /* this table gives more information about formats */
  58. static const PixFmtInfo pix_fmt_info[PIX_FMT_NB] = {
  59. /* YUV formats */
  60. [PIX_FMT_YUV420P] = {
  61. .name = "yuv420p",
  62. .nb_channels = 3,
  63. .color_type = FF_COLOR_YUV,
  64. .pixel_type = FF_PIXEL_PLANAR,
  65. .depth = 8,
  66. .x_chroma_shift = 1, .y_chroma_shift = 1,
  67. },
  68. [PIX_FMT_YUV422P] = {
  69. .name = "yuv422p",
  70. .nb_channels = 3,
  71. .color_type = FF_COLOR_YUV,
  72. .pixel_type = FF_PIXEL_PLANAR,
  73. .depth = 8,
  74. .x_chroma_shift = 1, .y_chroma_shift = 0,
  75. },
  76. [PIX_FMT_YUV444P] = {
  77. .name = "yuv444p",
  78. .nb_channels = 3,
  79. .color_type = FF_COLOR_YUV,
  80. .pixel_type = FF_PIXEL_PLANAR,
  81. .depth = 8,
  82. .x_chroma_shift = 0, .y_chroma_shift = 0,
  83. },
  84. [PIX_FMT_YUV422] = {
  85. .name = "yuv422",
  86. .nb_channels = 1,
  87. .color_type = FF_COLOR_YUV,
  88. .pixel_type = FF_PIXEL_PACKED,
  89. .depth = 8,
  90. .x_chroma_shift = 1, .y_chroma_shift = 0,
  91. },
  92. [PIX_FMT_UYVY422] = {
  93. .name = "uyvy422",
  94. .nb_channels = 1,
  95. .color_type = FF_COLOR_YUV,
  96. .pixel_type = FF_PIXEL_PACKED,
  97. .depth = 8,
  98. .x_chroma_shift = 1, .y_chroma_shift = 0,
  99. },
  100. [PIX_FMT_YUV410P] = {
  101. .name = "yuv410p",
  102. .nb_channels = 3,
  103. .color_type = FF_COLOR_YUV,
  104. .pixel_type = FF_PIXEL_PLANAR,
  105. .depth = 8,
  106. .x_chroma_shift = 2, .y_chroma_shift = 2,
  107. },
  108. [PIX_FMT_YUV411P] = {
  109. .name = "yuv411p",
  110. .nb_channels = 3,
  111. .color_type = FF_COLOR_YUV,
  112. .pixel_type = FF_PIXEL_PLANAR,
  113. .depth = 8,
  114. .x_chroma_shift = 2, .y_chroma_shift = 0,
  115. },
  116. /* JPEG YUV */
  117. [PIX_FMT_YUVJ420P] = {
  118. .name = "yuvj420p",
  119. .nb_channels = 3,
  120. .color_type = FF_COLOR_YUV_JPEG,
  121. .pixel_type = FF_PIXEL_PLANAR,
  122. .depth = 8,
  123. .x_chroma_shift = 1, .y_chroma_shift = 1,
  124. },
  125. [PIX_FMT_YUVJ422P] = {
  126. .name = "yuvj422p",
  127. .nb_channels = 3,
  128. .color_type = FF_COLOR_YUV_JPEG,
  129. .pixel_type = FF_PIXEL_PLANAR,
  130. .depth = 8,
  131. .x_chroma_shift = 1, .y_chroma_shift = 0,
  132. },
  133. [PIX_FMT_YUVJ444P] = {
  134. .name = "yuvj444p",
  135. .nb_channels = 3,
  136. .color_type = FF_COLOR_YUV_JPEG,
  137. .pixel_type = FF_PIXEL_PLANAR,
  138. .depth = 8,
  139. .x_chroma_shift = 0, .y_chroma_shift = 0,
  140. },
  141. /* RGB formats */
  142. [PIX_FMT_RGB24] = {
  143. .name = "rgb24",
  144. .nb_channels = 3,
  145. .color_type = FF_COLOR_RGB,
  146. .pixel_type = FF_PIXEL_PACKED,
  147. .depth = 8,
  148. .x_chroma_shift = 0, .y_chroma_shift = 0,
  149. },
  150. [PIX_FMT_BGR24] = {
  151. .name = "bgr24",
  152. .nb_channels = 3,
  153. .color_type = FF_COLOR_RGB,
  154. .pixel_type = FF_PIXEL_PACKED,
  155. .depth = 8,
  156. .x_chroma_shift = 0, .y_chroma_shift = 0,
  157. },
  158. [PIX_FMT_RGBA32] = {
  159. .name = "rgba32",
  160. .nb_channels = 4, .is_alpha = 1,
  161. .color_type = FF_COLOR_RGB,
  162. .pixel_type = FF_PIXEL_PACKED,
  163. .depth = 8,
  164. .x_chroma_shift = 0, .y_chroma_shift = 0,
  165. },
  166. [PIX_FMT_RGB565] = {
  167. .name = "rgb565",
  168. .nb_channels = 3,
  169. .color_type = FF_COLOR_RGB,
  170. .pixel_type = FF_PIXEL_PACKED,
  171. .depth = 5,
  172. .x_chroma_shift = 0, .y_chroma_shift = 0,
  173. },
  174. [PIX_FMT_RGB555] = {
  175. .name = "rgb555",
  176. .nb_channels = 4, .is_alpha = 1,
  177. .color_type = FF_COLOR_RGB,
  178. .pixel_type = FF_PIXEL_PACKED,
  179. .depth = 5,
  180. .x_chroma_shift = 0, .y_chroma_shift = 0,
  181. },
  182. /* gray / mono formats */
  183. [PIX_FMT_GRAY8] = {
  184. .name = "gray",
  185. .nb_channels = 1,
  186. .color_type = FF_COLOR_GRAY,
  187. .pixel_type = FF_PIXEL_PLANAR,
  188. .depth = 8,
  189. },
  190. [PIX_FMT_MONOWHITE] = {
  191. .name = "monow",
  192. .nb_channels = 1,
  193. .color_type = FF_COLOR_GRAY,
  194. .pixel_type = FF_PIXEL_PLANAR,
  195. .depth = 1,
  196. },
  197. [PIX_FMT_MONOBLACK] = {
  198. .name = "monob",
  199. .nb_channels = 1,
  200. .color_type = FF_COLOR_GRAY,
  201. .pixel_type = FF_PIXEL_PLANAR,
  202. .depth = 1,
  203. },
  204. /* paletted formats */
  205. [PIX_FMT_PAL8] = {
  206. .name = "pal8",
  207. .nb_channels = 4, .is_alpha = 1,
  208. .color_type = FF_COLOR_RGB,
  209. .pixel_type = FF_PIXEL_PALETTE,
  210. .depth = 8,
  211. },
  212. [PIX_FMT_XVMC_MPEG2_MC] = {
  213. .name = "xvmcmc",
  214. },
  215. [PIX_FMT_XVMC_MPEG2_IDCT] = {
  216. .name = "xvmcidct",
  217. },
  218. [PIX_FMT_UYVY411] = {
  219. .name = "uyvy411",
  220. .nb_channels = 1,
  221. .color_type = FF_COLOR_YUV,
  222. .pixel_type = FF_PIXEL_PACKED,
  223. .depth = 8,
  224. .x_chroma_shift = 2, .y_chroma_shift = 0,
  225. },
  226. [PIX_FMT_BGR32] = {
  227. .name = "bgr32",
  228. .nb_channels = 4, .is_alpha = 1,
  229. .color_type = FF_COLOR_RGB,
  230. .pixel_type = FF_PIXEL_PACKED,
  231. .depth = 8,
  232. .x_chroma_shift = 0, .y_chroma_shift = 0,
  233. },
  234. [PIX_FMT_BGR565] = {
  235. .name = "bgr565",
  236. .nb_channels = 3,
  237. .color_type = FF_COLOR_RGB,
  238. .pixel_type = FF_PIXEL_PACKED,
  239. .depth = 5,
  240. .x_chroma_shift = 0, .y_chroma_shift = 0,
  241. },
  242. [PIX_FMT_BGR555] = {
  243. .name = "bgr555",
  244. .nb_channels = 4, .is_alpha = 1,
  245. .color_type = FF_COLOR_RGB,
  246. .pixel_type = FF_PIXEL_PACKED,
  247. .depth = 5,
  248. .x_chroma_shift = 0, .y_chroma_shift = 0,
  249. },
  250. [PIX_FMT_RGB8] = {
  251. .name = "rgb8",
  252. .nb_channels = 1,
  253. .color_type = FF_COLOR_RGB,
  254. .pixel_type = FF_PIXEL_PACKED,
  255. .depth = 8,
  256. .x_chroma_shift = 0, .y_chroma_shift = 0,
  257. },
  258. [PIX_FMT_RGB4] = {
  259. .name = "rgb4",
  260. .nb_channels = 1,
  261. .color_type = FF_COLOR_RGB,
  262. .pixel_type = FF_PIXEL_PACKED,
  263. .depth = 4,
  264. .x_chroma_shift = 0, .y_chroma_shift = 0,
  265. },
  266. [PIX_FMT_RGB4_BYTE] = {
  267. .name = "rgb4_byte",
  268. .nb_channels = 1,
  269. .color_type = FF_COLOR_RGB,
  270. .pixel_type = FF_PIXEL_PACKED,
  271. .depth = 8,
  272. .x_chroma_shift = 0, .y_chroma_shift = 0,
  273. },
  274. [PIX_FMT_BGR8] = {
  275. .name = "bgr8",
  276. .nb_channels = 1,
  277. .color_type = FF_COLOR_RGB,
  278. .pixel_type = FF_PIXEL_PACKED,
  279. .depth = 8,
  280. .x_chroma_shift = 0, .y_chroma_shift = 0,
  281. },
  282. [PIX_FMT_BGR4] = {
  283. .name = "bgr4",
  284. .nb_channels = 1,
  285. .color_type = FF_COLOR_RGB,
  286. .pixel_type = FF_PIXEL_PACKED,
  287. .depth = 4,
  288. .x_chroma_shift = 0, .y_chroma_shift = 0,
  289. },
  290. [PIX_FMT_BGR4_BYTE] = {
  291. .name = "bgr4_byte",
  292. .nb_channels = 1,
  293. .color_type = FF_COLOR_RGB,
  294. .pixel_type = FF_PIXEL_PACKED,
  295. .depth = 8,
  296. .x_chroma_shift = 0, .y_chroma_shift = 0,
  297. },
  298. [PIX_FMT_NV12] = {
  299. .name = "nv12",
  300. .nb_channels = 2,
  301. .color_type = FF_COLOR_YUV,
  302. .pixel_type = FF_PIXEL_PLANAR,
  303. .depth = 8,
  304. .x_chroma_shift = 1, .y_chroma_shift = 1,
  305. },
  306. [PIX_FMT_NV21] = {
  307. .name = "nv12",
  308. .nb_channels = 2,
  309. .color_type = FF_COLOR_YUV,
  310. .pixel_type = FF_PIXEL_PLANAR,
  311. .depth = 8,
  312. .x_chroma_shift = 1, .y_chroma_shift = 1,
  313. },
  314. [PIX_FMT_BGR32_1] = {
  315. .name = "bgr32_1",
  316. .nb_channels = 4, .is_alpha = 1,
  317. .color_type = FF_COLOR_RGB,
  318. .pixel_type = FF_PIXEL_PACKED,
  319. .depth = 8,
  320. .x_chroma_shift = 0, .y_chroma_shift = 0,
  321. },
  322. [PIX_FMT_RGB32_1] = {
  323. .name = "rgb32_1",
  324. .nb_channels = 4, .is_alpha = 1,
  325. .color_type = FF_COLOR_RGB,
  326. .pixel_type = FF_PIXEL_PACKED,
  327. .depth = 8,
  328. .x_chroma_shift = 0, .y_chroma_shift = 0,
  329. },
  330. };
  331. void avcodec_get_chroma_sub_sample(int pix_fmt, int *h_shift, int *v_shift)
  332. {
  333. *h_shift = pix_fmt_info[pix_fmt].x_chroma_shift;
  334. *v_shift = pix_fmt_info[pix_fmt].y_chroma_shift;
  335. }
  336. const char *avcodec_get_pix_fmt_name(int pix_fmt)
  337. {
  338. if (pix_fmt < 0 || pix_fmt >= PIX_FMT_NB)
  339. return "???";
  340. else
  341. return pix_fmt_info[pix_fmt].name;
  342. }
  343. enum PixelFormat avcodec_get_pix_fmt(const char* name)
  344. {
  345. int i;
  346. for (i=0; i < PIX_FMT_NB; i++)
  347. if (!strcmp(pix_fmt_info[i].name, name))
  348. break;
  349. return i;
  350. }
  351. /* Picture field are filled with 'ptr' addresses. Also return size */
  352. int avpicture_fill(AVPicture *picture, uint8_t *ptr,
  353. int pix_fmt, int width, int height)
  354. {
  355. int size, w2, h2, size2;
  356. const PixFmtInfo *pinfo;
  357. if(avcodec_check_dimensions(NULL, width, height))
  358. goto fail;
  359. pinfo = &pix_fmt_info[pix_fmt];
  360. size = width * height;
  361. switch(pix_fmt) {
  362. case PIX_FMT_YUV420P:
  363. case PIX_FMT_YUV422P:
  364. case PIX_FMT_YUV444P:
  365. case PIX_FMT_YUV410P:
  366. case PIX_FMT_YUV411P:
  367. case PIX_FMT_YUVJ420P:
  368. case PIX_FMT_YUVJ422P:
  369. case PIX_FMT_YUVJ444P:
  370. w2 = (width + (1 << pinfo->x_chroma_shift) - 1) >> pinfo->x_chroma_shift;
  371. h2 = (height + (1 << pinfo->y_chroma_shift) - 1) >> pinfo->y_chroma_shift;
  372. size2 = w2 * h2;
  373. picture->data[0] = ptr;
  374. picture->data[1] = picture->data[0] + size;
  375. picture->data[2] = picture->data[1] + size2;
  376. picture->linesize[0] = width;
  377. picture->linesize[1] = w2;
  378. picture->linesize[2] = w2;
  379. return size + 2 * size2;
  380. case PIX_FMT_NV12:
  381. case PIX_FMT_NV21:
  382. w2 = (width + (1 << pinfo->x_chroma_shift) - 1) >> pinfo->x_chroma_shift;
  383. h2 = (height + (1 << pinfo->y_chroma_shift) - 1) >> pinfo->y_chroma_shift;
  384. size2 = w2 * h2 * 2;
  385. picture->data[0] = ptr;
  386. picture->data[1] = picture->data[0] + size;
  387. picture->data[2] = NULL;
  388. picture->linesize[0] = width;
  389. picture->linesize[1] = w2;
  390. picture->linesize[2] = 0;
  391. return size + 2 * size2;
  392. case PIX_FMT_RGB24:
  393. case PIX_FMT_BGR24:
  394. picture->data[0] = ptr;
  395. picture->data[1] = NULL;
  396. picture->data[2] = NULL;
  397. picture->linesize[0] = width * 3;
  398. return size * 3;
  399. case PIX_FMT_RGBA32:
  400. case PIX_FMT_BGR32:
  401. case PIX_FMT_RGB32_1:
  402. case PIX_FMT_BGR32_1:
  403. picture->data[0] = ptr;
  404. picture->data[1] = NULL;
  405. picture->data[2] = NULL;
  406. picture->linesize[0] = width * 4;
  407. return size * 4;
  408. case PIX_FMT_BGR555:
  409. case PIX_FMT_BGR565:
  410. case PIX_FMT_RGB555:
  411. case PIX_FMT_RGB565:
  412. case PIX_FMT_YUV422:
  413. picture->data[0] = ptr;
  414. picture->data[1] = NULL;
  415. picture->data[2] = NULL;
  416. picture->linesize[0] = width * 2;
  417. return size * 2;
  418. case PIX_FMT_UYVY422:
  419. picture->data[0] = ptr;
  420. picture->data[1] = NULL;
  421. picture->data[2] = NULL;
  422. picture->linesize[0] = width * 2;
  423. return size * 2;
  424. case PIX_FMT_UYVY411:
  425. picture->data[0] = ptr;
  426. picture->data[1] = NULL;
  427. picture->data[2] = NULL;
  428. picture->linesize[0] = width + width/2;
  429. return size + size/2;
  430. case PIX_FMT_RGB8:
  431. case PIX_FMT_BGR8:
  432. case PIX_FMT_RGB4_BYTE:
  433. case PIX_FMT_BGR4_BYTE:
  434. case PIX_FMT_GRAY8:
  435. picture->data[0] = ptr;
  436. picture->data[1] = NULL;
  437. picture->data[2] = NULL;
  438. picture->linesize[0] = width;
  439. return size;
  440. case PIX_FMT_RGB4:
  441. case PIX_FMT_BGR4:
  442. picture->data[0] = ptr;
  443. picture->data[1] = NULL;
  444. picture->data[2] = NULL;
  445. picture->linesize[0] = width / 2;
  446. return size / 2;
  447. case PIX_FMT_MONOWHITE:
  448. case PIX_FMT_MONOBLACK:
  449. picture->data[0] = ptr;
  450. picture->data[1] = NULL;
  451. picture->data[2] = NULL;
  452. picture->linesize[0] = (width + 7) >> 3;
  453. return picture->linesize[0] * height;
  454. case PIX_FMT_PAL8:
  455. size2 = (size + 3) & ~3;
  456. picture->data[0] = ptr;
  457. picture->data[1] = ptr + size2; /* palette is stored here as 256 32 bit words */
  458. picture->data[2] = NULL;
  459. picture->linesize[0] = width;
  460. picture->linesize[1] = 4;
  461. return size2 + 256 * 4;
  462. default:
  463. fail:
  464. picture->data[0] = NULL;
  465. picture->data[1] = NULL;
  466. picture->data[2] = NULL;
  467. picture->data[3] = NULL;
  468. return -1;
  469. }
  470. }
  471. int avpicture_layout(const AVPicture* src, int pix_fmt, int width, int height,
  472. unsigned char *dest, int dest_size)
  473. {
  474. const PixFmtInfo* pf = &pix_fmt_info[pix_fmt];
  475. int i, j, w, h, data_planes;
  476. const unsigned char* s;
  477. int size = avpicture_get_size(pix_fmt, width, height);
  478. if (size > dest_size || size < 0)
  479. return -1;
  480. if (pf->pixel_type == FF_PIXEL_PACKED || pf->pixel_type == FF_PIXEL_PALETTE) {
  481. if (pix_fmt == PIX_FMT_YUV422 ||
  482. pix_fmt == PIX_FMT_UYVY422 ||
  483. pix_fmt == PIX_FMT_BGR565 ||
  484. pix_fmt == PIX_FMT_BGR565 ||
  485. pix_fmt == PIX_FMT_RGB565 ||
  486. pix_fmt == PIX_FMT_RGB555)
  487. w = width * 2;
  488. else if (pix_fmt == PIX_FMT_UYVY411)
  489. w = width + width/2;
  490. else if (pix_fmt == PIX_FMT_PAL8)
  491. w = width;
  492. else
  493. w = width * (pf->depth * pf->nb_channels / 8);
  494. data_planes = 1;
  495. h = height;
  496. } else {
  497. data_planes = pf->nb_channels;
  498. w = (width*pf->depth + 7)/8;
  499. h = height;
  500. }
  501. for (i=0; i<data_planes; i++) {
  502. if (i == 1) {
  503. w = width >> pf->x_chroma_shift;
  504. h = height >> pf->y_chroma_shift;
  505. }
  506. s = src->data[i];
  507. for(j=0; j<h; j++) {
  508. memcpy(dest, s, w);
  509. dest += w;
  510. s += src->linesize[i];
  511. }
  512. }
  513. if (pf->pixel_type == FF_PIXEL_PALETTE)
  514. memcpy((unsigned char *)(((size_t)dest + 3) & ~3), src->data[1], 256 * 4);
  515. return size;
  516. }
  517. int avpicture_get_size(int pix_fmt, int width, int height)
  518. {
  519. AVPicture dummy_pict;
  520. return avpicture_fill(&dummy_pict, NULL, pix_fmt, width, height);
  521. }
  522. /**
  523. * compute the loss when converting from a pixel format to another
  524. */
  525. int avcodec_get_pix_fmt_loss(int dst_pix_fmt, int src_pix_fmt,
  526. int has_alpha)
  527. {
  528. const PixFmtInfo *pf, *ps;
  529. int loss;
  530. ps = &pix_fmt_info[src_pix_fmt];
  531. pf = &pix_fmt_info[dst_pix_fmt];
  532. /* compute loss */
  533. loss = 0;
  534. pf = &pix_fmt_info[dst_pix_fmt];
  535. if (pf->depth < ps->depth ||
  536. (dst_pix_fmt == PIX_FMT_RGB555 && src_pix_fmt == PIX_FMT_RGB565))
  537. loss |= FF_LOSS_DEPTH;
  538. if (pf->x_chroma_shift > ps->x_chroma_shift ||
  539. pf->y_chroma_shift > ps->y_chroma_shift)
  540. loss |= FF_LOSS_RESOLUTION;
  541. switch(pf->color_type) {
  542. case FF_COLOR_RGB:
  543. if (ps->color_type != FF_COLOR_RGB &&
  544. ps->color_type != FF_COLOR_GRAY)
  545. loss |= FF_LOSS_COLORSPACE;
  546. break;
  547. case FF_COLOR_GRAY:
  548. if (ps->color_type != FF_COLOR_GRAY)
  549. loss |= FF_LOSS_COLORSPACE;
  550. break;
  551. case FF_COLOR_YUV:
  552. if (ps->color_type != FF_COLOR_YUV)
  553. loss |= FF_LOSS_COLORSPACE;
  554. break;
  555. case FF_COLOR_YUV_JPEG:
  556. if (ps->color_type != FF_COLOR_YUV_JPEG &&
  557. ps->color_type != FF_COLOR_YUV &&
  558. ps->color_type != FF_COLOR_GRAY)
  559. loss |= FF_LOSS_COLORSPACE;
  560. break;
  561. default:
  562. /* fail safe test */
  563. if (ps->color_type != pf->color_type)
  564. loss |= FF_LOSS_COLORSPACE;
  565. break;
  566. }
  567. if (pf->color_type == FF_COLOR_GRAY &&
  568. ps->color_type != FF_COLOR_GRAY)
  569. loss |= FF_LOSS_CHROMA;
  570. if (!pf->is_alpha && (ps->is_alpha && has_alpha))
  571. loss |= FF_LOSS_ALPHA;
  572. if (pf->pixel_type == FF_PIXEL_PALETTE &&
  573. (ps->pixel_type != FF_PIXEL_PALETTE && ps->color_type != FF_COLOR_GRAY))
  574. loss |= FF_LOSS_COLORQUANT;
  575. return loss;
  576. }
  577. static int avg_bits_per_pixel(int pix_fmt)
  578. {
  579. int bits;
  580. const PixFmtInfo *pf;
  581. pf = &pix_fmt_info[pix_fmt];
  582. switch(pf->pixel_type) {
  583. case FF_PIXEL_PACKED:
  584. switch(pix_fmt) {
  585. case PIX_FMT_YUV422:
  586. case PIX_FMT_UYVY422:
  587. case PIX_FMT_RGB565:
  588. case PIX_FMT_RGB555:
  589. case PIX_FMT_BGR565:
  590. case PIX_FMT_BGR555:
  591. bits = 16;
  592. break;
  593. case PIX_FMT_UYVY411:
  594. bits = 12;
  595. break;
  596. default:
  597. bits = pf->depth * pf->nb_channels;
  598. break;
  599. }
  600. break;
  601. case FF_PIXEL_PLANAR:
  602. if (pf->x_chroma_shift == 0 && pf->y_chroma_shift == 0) {
  603. bits = pf->depth * pf->nb_channels;
  604. } else {
  605. bits = pf->depth + ((2 * pf->depth) >>
  606. (pf->x_chroma_shift + pf->y_chroma_shift));
  607. }
  608. break;
  609. case FF_PIXEL_PALETTE:
  610. bits = 8;
  611. break;
  612. default:
  613. bits = -1;
  614. break;
  615. }
  616. return bits;
  617. }
  618. static int avcodec_find_best_pix_fmt1(int pix_fmt_mask,
  619. int src_pix_fmt,
  620. int has_alpha,
  621. int loss_mask)
  622. {
  623. int dist, i, loss, min_dist, dst_pix_fmt;
  624. /* find exact color match with smallest size */
  625. dst_pix_fmt = -1;
  626. min_dist = 0x7fffffff;
  627. for(i = 0;i < PIX_FMT_NB; i++) {
  628. if (pix_fmt_mask & (1 << i)) {
  629. loss = avcodec_get_pix_fmt_loss(i, src_pix_fmt, has_alpha) & loss_mask;
  630. if (loss == 0) {
  631. dist = avg_bits_per_pixel(i);
  632. if (dist < min_dist) {
  633. min_dist = dist;
  634. dst_pix_fmt = i;
  635. }
  636. }
  637. }
  638. }
  639. return dst_pix_fmt;
  640. }
  641. /**
  642. * find best pixel format to convert to. Return -1 if none found
  643. */
  644. int avcodec_find_best_pix_fmt(int pix_fmt_mask, int src_pix_fmt,
  645. int has_alpha, int *loss_ptr)
  646. {
  647. int dst_pix_fmt, loss_mask, i;
  648. static const int loss_mask_order[] = {
  649. ~0, /* no loss first */
  650. ~FF_LOSS_ALPHA,
  651. ~FF_LOSS_RESOLUTION,
  652. ~(FF_LOSS_COLORSPACE | FF_LOSS_RESOLUTION),
  653. ~FF_LOSS_COLORQUANT,
  654. ~FF_LOSS_DEPTH,
  655. 0,
  656. };
  657. /* try with successive loss */
  658. i = 0;
  659. for(;;) {
  660. loss_mask = loss_mask_order[i++];
  661. dst_pix_fmt = avcodec_find_best_pix_fmt1(pix_fmt_mask, src_pix_fmt,
  662. has_alpha, loss_mask);
  663. if (dst_pix_fmt >= 0)
  664. goto found;
  665. if (loss_mask == 0)
  666. break;
  667. }
  668. return -1;
  669. found:
  670. if (loss_ptr)
  671. *loss_ptr = avcodec_get_pix_fmt_loss(dst_pix_fmt, src_pix_fmt, has_alpha);
  672. return dst_pix_fmt;
  673. }
  674. void ff_img_copy_plane(uint8_t *dst, int dst_wrap,
  675. const uint8_t *src, int src_wrap,
  676. int width, int height)
  677. {
  678. if((!dst) || (!src))
  679. return;
  680. for(;height > 0; height--) {
  681. memcpy(dst, src, width);
  682. dst += dst_wrap;
  683. src += src_wrap;
  684. }
  685. }
  686. /**
  687. * Copy image 'src' to 'dst'.
  688. */
  689. void img_copy(AVPicture *dst, const AVPicture *src,
  690. int pix_fmt, int width, int height)
  691. {
  692. int bwidth, bits, i;
  693. const PixFmtInfo *pf = &pix_fmt_info[pix_fmt];
  694. pf = &pix_fmt_info[pix_fmt];
  695. switch(pf->pixel_type) {
  696. case FF_PIXEL_PACKED:
  697. switch(pix_fmt) {
  698. case PIX_FMT_YUV422:
  699. case PIX_FMT_UYVY422:
  700. case PIX_FMT_RGB565:
  701. case PIX_FMT_RGB555:
  702. case PIX_FMT_BGR565:
  703. case PIX_FMT_BGR555:
  704. bits = 16;
  705. break;
  706. case PIX_FMT_UYVY411:
  707. bits = 12;
  708. break;
  709. default:
  710. bits = pf->depth * pf->nb_channels;
  711. break;
  712. }
  713. bwidth = (width * bits + 7) >> 3;
  714. ff_img_copy_plane(dst->data[0], dst->linesize[0],
  715. src->data[0], src->linesize[0],
  716. bwidth, height);
  717. break;
  718. case FF_PIXEL_PLANAR:
  719. for(i = 0; i < pf->nb_channels; i++) {
  720. int w, h;
  721. w = width;
  722. h = height;
  723. if (i == 1 || i == 2) {
  724. w >>= pf->x_chroma_shift;
  725. h >>= pf->y_chroma_shift;
  726. }
  727. bwidth = (w * pf->depth + 7) >> 3;
  728. ff_img_copy_plane(dst->data[i], dst->linesize[i],
  729. src->data[i], src->linesize[i],
  730. bwidth, h);
  731. }
  732. break;
  733. case FF_PIXEL_PALETTE:
  734. ff_img_copy_plane(dst->data[0], dst->linesize[0],
  735. src->data[0], src->linesize[0],
  736. width, height);
  737. /* copy the palette */
  738. ff_img_copy_plane(dst->data[1], dst->linesize[1],
  739. src->data[1], src->linesize[1],
  740. 4, 256);
  741. break;
  742. }
  743. }
  744. /* XXX: totally non optimized */
  745. static void yuv422_to_yuv420p(AVPicture *dst, const AVPicture *src,
  746. int width, int height)
  747. {
  748. const uint8_t *p, *p1;
  749. uint8_t *lum, *cr, *cb, *lum1, *cr1, *cb1;
  750. int w;
  751. p1 = src->data[0];
  752. lum1 = dst->data[0];
  753. cb1 = dst->data[1];
  754. cr1 = dst->data[2];
  755. for(;height >= 1; height -= 2) {
  756. p = p1;
  757. lum = lum1;
  758. cb = cb1;
  759. cr = cr1;
  760. for(w = width; w >= 2; w -= 2) {
  761. lum[0] = p[0];
  762. cb[0] = p[1];
  763. lum[1] = p[2];
  764. cr[0] = p[3];
  765. p += 4;
  766. lum += 2;
  767. cb++;
  768. cr++;
  769. }
  770. if (w) {
  771. lum[0] = p[0];
  772. cb[0] = p[1];
  773. cr[0] = p[3];
  774. cb++;
  775. cr++;
  776. }
  777. p1 += src->linesize[0];
  778. lum1 += dst->linesize[0];
  779. if (height>1) {
  780. p = p1;
  781. lum = lum1;
  782. for(w = width; w >= 2; w -= 2) {
  783. lum[0] = p[0];
  784. lum[1] = p[2];
  785. p += 4;
  786. lum += 2;
  787. }
  788. if (w) {
  789. lum[0] = p[0];
  790. }
  791. p1 += src->linesize[0];
  792. lum1 += dst->linesize[0];
  793. }
  794. cb1 += dst->linesize[1];
  795. cr1 += dst->linesize[2];
  796. }
  797. }
  798. static void uyvy422_to_yuv420p(AVPicture *dst, const AVPicture *src,
  799. int width, int height)
  800. {
  801. const uint8_t *p, *p1;
  802. uint8_t *lum, *cr, *cb, *lum1, *cr1, *cb1;
  803. int w;
  804. p1 = src->data[0];
  805. lum1 = dst->data[0];
  806. cb1 = dst->data[1];
  807. cr1 = dst->data[2];
  808. for(;height >= 1; height -= 2) {
  809. p = p1;
  810. lum = lum1;
  811. cb = cb1;
  812. cr = cr1;
  813. for(w = width; w >= 2; w -= 2) {
  814. lum[0] = p[1];
  815. cb[0] = p[0];
  816. lum[1] = p[3];
  817. cr[0] = p[2];
  818. p += 4;
  819. lum += 2;
  820. cb++;
  821. cr++;
  822. }
  823. if (w) {
  824. lum[0] = p[1];
  825. cb[0] = p[0];
  826. cr[0] = p[2];
  827. cb++;
  828. cr++;
  829. }
  830. p1 += src->linesize[0];
  831. lum1 += dst->linesize[0];
  832. if (height>1) {
  833. p = p1;
  834. lum = lum1;
  835. for(w = width; w >= 2; w -= 2) {
  836. lum[0] = p[1];
  837. lum[1] = p[3];
  838. p += 4;
  839. lum += 2;
  840. }
  841. if (w) {
  842. lum[0] = p[1];
  843. }
  844. p1 += src->linesize[0];
  845. lum1 += dst->linesize[0];
  846. }
  847. cb1 += dst->linesize[1];
  848. cr1 += dst->linesize[2];
  849. }
  850. }
  851. static void uyvy422_to_yuv422p(AVPicture *dst, const AVPicture *src,
  852. int width, int height)
  853. {
  854. const uint8_t *p, *p1;
  855. uint8_t *lum, *cr, *cb, *lum1, *cr1, *cb1;
  856. int w;
  857. p1 = src->data[0];
  858. lum1 = dst->data[0];
  859. cb1 = dst->data[1];
  860. cr1 = dst->data[2];
  861. for(;height > 0; height--) {
  862. p = p1;
  863. lum = lum1;
  864. cb = cb1;
  865. cr = cr1;
  866. for(w = width; w >= 2; w -= 2) {
  867. lum[0] = p[1];
  868. cb[0] = p[0];
  869. lum[1] = p[3];
  870. cr[0] = p[2];
  871. p += 4;
  872. lum += 2;
  873. cb++;
  874. cr++;
  875. }
  876. p1 += src->linesize[0];
  877. lum1 += dst->linesize[0];
  878. cb1 += dst->linesize[1];
  879. cr1 += dst->linesize[2];
  880. }
  881. }
  882. static void yuv422_to_yuv422p(AVPicture *dst, const AVPicture *src,
  883. int width, int height)
  884. {
  885. const uint8_t *p, *p1;
  886. uint8_t *lum, *cr, *cb, *lum1, *cr1, *cb1;
  887. int w;
  888. p1 = src->data[0];
  889. lum1 = dst->data[0];
  890. cb1 = dst->data[1];
  891. cr1 = dst->data[2];
  892. for(;height > 0; height--) {
  893. p = p1;
  894. lum = lum1;
  895. cb = cb1;
  896. cr = cr1;
  897. for(w = width; w >= 2; w -= 2) {
  898. lum[0] = p[0];
  899. cb[0] = p[1];
  900. lum[1] = p[2];
  901. cr[0] = p[3];
  902. p += 4;
  903. lum += 2;
  904. cb++;
  905. cr++;
  906. }
  907. p1 += src->linesize[0];
  908. lum1 += dst->linesize[0];
  909. cb1 += dst->linesize[1];
  910. cr1 += dst->linesize[2];
  911. }
  912. }
  913. static void yuv422p_to_yuv422(AVPicture *dst, const AVPicture *src,
  914. int width, int height)
  915. {
  916. uint8_t *p, *p1;
  917. const uint8_t *lum, *cr, *cb, *lum1, *cr1, *cb1;
  918. int w;
  919. p1 = dst->data[0];
  920. lum1 = src->data[0];
  921. cb1 = src->data[1];
  922. cr1 = src->data[2];
  923. for(;height > 0; height--) {
  924. p = p1;
  925. lum = lum1;
  926. cb = cb1;
  927. cr = cr1;
  928. for(w = width; w >= 2; w -= 2) {
  929. p[0] = lum[0];
  930. p[1] = cb[0];
  931. p[2] = lum[1];
  932. p[3] = cr[0];
  933. p += 4;
  934. lum += 2;
  935. cb++;
  936. cr++;
  937. }
  938. p1 += dst->linesize[0];
  939. lum1 += src->linesize[0];
  940. cb1 += src->linesize[1];
  941. cr1 += src->linesize[2];
  942. }
  943. }
  944. static void yuv422p_to_uyvy422(AVPicture *dst, const AVPicture *src,
  945. int width, int height)
  946. {
  947. uint8_t *p, *p1;
  948. const uint8_t *lum, *cr, *cb, *lum1, *cr1, *cb1;
  949. int w;
  950. p1 = dst->data[0];
  951. lum1 = src->data[0];
  952. cb1 = src->data[1];
  953. cr1 = src->data[2];
  954. for(;height > 0; height--) {
  955. p = p1;
  956. lum = lum1;
  957. cb = cb1;
  958. cr = cr1;
  959. for(w = width; w >= 2; w -= 2) {
  960. p[1] = lum[0];
  961. p[0] = cb[0];
  962. p[3] = lum[1];
  963. p[2] = cr[0];
  964. p += 4;
  965. lum += 2;
  966. cb++;
  967. cr++;
  968. }
  969. p1 += dst->linesize[0];
  970. lum1 += src->linesize[0];
  971. cb1 += src->linesize[1];
  972. cr1 += src->linesize[2];
  973. }
  974. }
  975. static void uyvy411_to_yuv411p(AVPicture *dst, const AVPicture *src,
  976. int width, int height)
  977. {
  978. const uint8_t *p, *p1;
  979. uint8_t *lum, *cr, *cb, *lum1, *cr1, *cb1;
  980. int w;
  981. p1 = src->data[0];
  982. lum1 = dst->data[0];
  983. cb1 = dst->data[1];
  984. cr1 = dst->data[2];
  985. for(;height > 0; height--) {
  986. p = p1;
  987. lum = lum1;
  988. cb = cb1;
  989. cr = cr1;
  990. for(w = width; w >= 4; w -= 4) {
  991. cb[0] = p[0];
  992. lum[0] = p[1];
  993. lum[1] = p[2];
  994. cr[0] = p[3];
  995. lum[2] = p[4];
  996. lum[3] = p[5];
  997. p += 6;
  998. lum += 4;
  999. cb++;
  1000. cr++;
  1001. }
  1002. p1 += src->linesize[0];
  1003. lum1 += dst->linesize[0];
  1004. cb1 += dst->linesize[1];
  1005. cr1 += dst->linesize[2];
  1006. }
  1007. }
  1008. static void yuv420p_to_yuv422(AVPicture *dst, const AVPicture *src,
  1009. int width, int height)
  1010. {
  1011. int w, h;
  1012. uint8_t *line1, *line2, *linesrc = dst->data[0];
  1013. uint8_t *lum1, *lum2, *lumsrc = src->data[0];
  1014. uint8_t *cb1, *cb2 = src->data[1];
  1015. uint8_t *cr1, *cr2 = src->data[2];
  1016. for(h = height / 2; h--;) {
  1017. line1 = linesrc;
  1018. line2 = linesrc + dst->linesize[0];
  1019. lum1 = lumsrc;
  1020. lum2 = lumsrc + src->linesize[0];
  1021. cb1 = cb2;
  1022. cr1 = cr2;
  1023. for(w = width / 2; w--;) {
  1024. *line1++ = *lum1++; *line2++ = *lum2++;
  1025. *line1++ = *line2++ = *cb1++;
  1026. *line1++ = *lum1++; *line2++ = *lum2++;
  1027. *line1++ = *line2++ = *cr1++;
  1028. }
  1029. linesrc += dst->linesize[0] * 2;
  1030. lumsrc += src->linesize[0] * 2;
  1031. cb2 += src->linesize[1];
  1032. cr2 += src->linesize[2];
  1033. }
  1034. }
  1035. static void yuv420p_to_uyvy422(AVPicture *dst, const AVPicture *src,
  1036. int width, int height)
  1037. {
  1038. int w, h;
  1039. uint8_t *line1, *line2, *linesrc = dst->data[0];
  1040. uint8_t *lum1, *lum2, *lumsrc = src->data[0];
  1041. uint8_t *cb1, *cb2 = src->data[1];
  1042. uint8_t *cr1, *cr2 = src->data[2];
  1043. for(h = height / 2; h--;) {
  1044. line1 = linesrc;
  1045. line2 = linesrc + dst->linesize[0];
  1046. lum1 = lumsrc;
  1047. lum2 = lumsrc + src->linesize[0];
  1048. cb1 = cb2;
  1049. cr1 = cr2;
  1050. for(w = width / 2; w--;) {
  1051. *line1++ = *line2++ = *cb1++;
  1052. *line1++ = *lum1++; *line2++ = *lum2++;
  1053. *line1++ = *line2++ = *cr1++;
  1054. *line1++ = *lum1++; *line2++ = *lum2++;
  1055. }
  1056. linesrc += dst->linesize[0] * 2;
  1057. lumsrc += src->linesize[0] * 2;
  1058. cb2 += src->linesize[1];
  1059. cr2 += src->linesize[2];
  1060. }
  1061. }
  1062. #define SCALEBITS 10
  1063. #define ONE_HALF (1 << (SCALEBITS - 1))
  1064. #define FIX(x) ((int) ((x) * (1<<SCALEBITS) + 0.5))
  1065. #define YUV_TO_RGB1_CCIR(cb1, cr1)\
  1066. {\
  1067. cb = (cb1) - 128;\
  1068. cr = (cr1) - 128;\
  1069. r_add = FIX(1.40200*255.0/224.0) * cr + ONE_HALF;\
  1070. g_add = - FIX(0.34414*255.0/224.0) * cb - FIX(0.71414*255.0/224.0) * cr + \
  1071. ONE_HALF;\
  1072. b_add = FIX(1.77200*255.0/224.0) * cb + ONE_HALF;\
  1073. }
  1074. #define YUV_TO_RGB2_CCIR(r, g, b, y1)\
  1075. {\
  1076. y = ((y1) - 16) * FIX(255.0/219.0);\
  1077. r = cm[(y + r_add) >> SCALEBITS];\
  1078. g = cm[(y + g_add) >> SCALEBITS];\
  1079. b = cm[(y + b_add) >> SCALEBITS];\
  1080. }
  1081. #define YUV_TO_RGB1(cb1, cr1)\
  1082. {\
  1083. cb = (cb1) - 128;\
  1084. cr = (cr1) - 128;\
  1085. r_add = FIX(1.40200) * cr + ONE_HALF;\
  1086. g_add = - FIX(0.34414) * cb - FIX(0.71414) * cr + ONE_HALF;\
  1087. b_add = FIX(1.77200) * cb + ONE_HALF;\
  1088. }
  1089. #define YUV_TO_RGB2(r, g, b, y1)\
  1090. {\
  1091. y = (y1) << SCALEBITS;\
  1092. r = cm[(y + r_add) >> SCALEBITS];\
  1093. g = cm[(y + g_add) >> SCALEBITS];\
  1094. b = cm[(y + b_add) >> SCALEBITS];\
  1095. }
  1096. #define Y_CCIR_TO_JPEG(y)\
  1097. cm[((y) * FIX(255.0/219.0) + (ONE_HALF - 16 * FIX(255.0/219.0))) >> SCALEBITS]
  1098. #define Y_JPEG_TO_CCIR(y)\
  1099. (((y) * FIX(219.0/255.0) + (ONE_HALF + (16 << SCALEBITS))) >> SCALEBITS)
  1100. #define C_CCIR_TO_JPEG(y)\
  1101. cm[(((y) - 128) * FIX(127.0/112.0) + (ONE_HALF + (128 << SCALEBITS))) >> SCALEBITS]
  1102. /* NOTE: the clamp is really necessary! */
  1103. static inline int C_JPEG_TO_CCIR(int y) {
  1104. y = (((y - 128) * FIX(112.0/127.0) + (ONE_HALF + (128 << SCALEBITS))) >> SCALEBITS);
  1105. if (y < 16)
  1106. y = 16;
  1107. return y;
  1108. }
  1109. #define RGB_TO_Y(r, g, b) \
  1110. ((FIX(0.29900) * (r) + FIX(0.58700) * (g) + \
  1111. FIX(0.11400) * (b) + ONE_HALF) >> SCALEBITS)
  1112. #define RGB_TO_U(r1, g1, b1, shift)\
  1113. (((- FIX(0.16874) * r1 - FIX(0.33126) * g1 + \
  1114. FIX(0.50000) * b1 + (ONE_HALF << shift) - 1) >> (SCALEBITS + shift)) + 128)
  1115. #define RGB_TO_V(r1, g1, b1, shift)\
  1116. (((FIX(0.50000) * r1 - FIX(0.41869) * g1 - \
  1117. FIX(0.08131) * b1 + (ONE_HALF << shift) - 1) >> (SCALEBITS + shift)) + 128)
  1118. #define RGB_TO_Y_CCIR(r, g, b) \
  1119. ((FIX(0.29900*219.0/255.0) * (r) + FIX(0.58700*219.0/255.0) * (g) + \
  1120. FIX(0.11400*219.0/255.0) * (b) + (ONE_HALF + (16 << SCALEBITS))) >> SCALEBITS)
  1121. #define RGB_TO_U_CCIR(r1, g1, b1, shift)\
  1122. (((- FIX(0.16874*224.0/255.0) * r1 - FIX(0.33126*224.0/255.0) * g1 + \
  1123. FIX(0.50000*224.0/255.0) * b1 + (ONE_HALF << shift) - 1) >> (SCALEBITS + shift)) + 128)
  1124. #define RGB_TO_V_CCIR(r1, g1, b1, shift)\
  1125. (((FIX(0.50000*224.0/255.0) * r1 - FIX(0.41869*224.0/255.0) * g1 - \
  1126. FIX(0.08131*224.0/255.0) * b1 + (ONE_HALF << shift) - 1) >> (SCALEBITS + shift)) + 128)
  1127. static uint8_t y_ccir_to_jpeg[256];
  1128. static uint8_t y_jpeg_to_ccir[256];
  1129. static uint8_t c_ccir_to_jpeg[256];
  1130. static uint8_t c_jpeg_to_ccir[256];
  1131. /* init various conversion tables */
  1132. static void img_convert_init(void)
  1133. {
  1134. int i;
  1135. uint8_t *cm = cropTbl + MAX_NEG_CROP;
  1136. for(i = 0;i < 256; i++) {
  1137. y_ccir_to_jpeg[i] = Y_CCIR_TO_JPEG(i);
  1138. y_jpeg_to_ccir[i] = Y_JPEG_TO_CCIR(i);
  1139. c_ccir_to_jpeg[i] = C_CCIR_TO_JPEG(i);
  1140. c_jpeg_to_ccir[i] = C_JPEG_TO_CCIR(i);
  1141. }
  1142. }
  1143. /* apply to each pixel the given table */
  1144. static void img_apply_table(uint8_t *dst, int dst_wrap,
  1145. const uint8_t *src, int src_wrap,
  1146. int width, int height, const uint8_t *table1)
  1147. {
  1148. int n;
  1149. const uint8_t *s;
  1150. uint8_t *d;
  1151. const uint8_t *table;
  1152. table = table1;
  1153. for(;height > 0; height--) {
  1154. s = src;
  1155. d = dst;
  1156. n = width;
  1157. while (n >= 4) {
  1158. d[0] = table[s[0]];
  1159. d[1] = table[s[1]];
  1160. d[2] = table[s[2]];
  1161. d[3] = table[s[3]];
  1162. d += 4;
  1163. s += 4;
  1164. n -= 4;
  1165. }
  1166. while (n > 0) {
  1167. d[0] = table[s[0]];
  1168. d++;
  1169. s++;
  1170. n--;
  1171. }
  1172. dst += dst_wrap;
  1173. src += src_wrap;
  1174. }
  1175. }
  1176. /* XXX: use generic filter ? */
  1177. /* XXX: in most cases, the sampling position is incorrect */
  1178. /* 4x1 -> 1x1 */
  1179. static void shrink41(uint8_t *dst, int dst_wrap,
  1180. const uint8_t *src, int src_wrap,
  1181. int width, int height)
  1182. {
  1183. int w;
  1184. const uint8_t *s;
  1185. uint8_t *d;
  1186. for(;height > 0; height--) {
  1187. s = src;
  1188. d = dst;
  1189. for(w = width;w > 0; w--) {
  1190. d[0] = (s[0] + s[1] + s[2] + s[3] + 2) >> 2;
  1191. s += 4;
  1192. d++;
  1193. }
  1194. src += src_wrap;
  1195. dst += dst_wrap;
  1196. }
  1197. }
  1198. /* 2x1 -> 1x1 */
  1199. static void shrink21(uint8_t *dst, int dst_wrap,
  1200. const uint8_t *src, int src_wrap,
  1201. int width, int height)
  1202. {
  1203. int w;
  1204. const uint8_t *s;
  1205. uint8_t *d;
  1206. for(;height > 0; height--) {
  1207. s = src;
  1208. d = dst;
  1209. for(w = width;w > 0; w--) {
  1210. d[0] = (s[0] + s[1]) >> 1;
  1211. s += 2;
  1212. d++;
  1213. }
  1214. src += src_wrap;
  1215. dst += dst_wrap;
  1216. }
  1217. }
  1218. /* 1x2 -> 1x1 */
  1219. static void shrink12(uint8_t *dst, int dst_wrap,
  1220. const uint8_t *src, int src_wrap,
  1221. int width, int height)
  1222. {
  1223. int w;
  1224. uint8_t *d;
  1225. const uint8_t *s1, *s2;
  1226. for(;height > 0; height--) {
  1227. s1 = src;
  1228. s2 = s1 + src_wrap;
  1229. d = dst;
  1230. for(w = width;w >= 4; w-=4) {
  1231. d[0] = (s1[0] + s2[0]) >> 1;
  1232. d[1] = (s1[1] + s2[1]) >> 1;
  1233. d[2] = (s1[2] + s2[2]) >> 1;
  1234. d[3] = (s1[3] + s2[3]) >> 1;
  1235. s1 += 4;
  1236. s2 += 4;
  1237. d += 4;
  1238. }
  1239. for(;w > 0; w--) {
  1240. d[0] = (s1[0] + s2[0]) >> 1;
  1241. s1++;
  1242. s2++;
  1243. d++;
  1244. }
  1245. src += 2 * src_wrap;
  1246. dst += dst_wrap;
  1247. }
  1248. }
  1249. /* 2x2 -> 1x1 */
  1250. void ff_shrink22(uint8_t *dst, int dst_wrap,
  1251. const uint8_t *src, int src_wrap,
  1252. int width, int height)
  1253. {
  1254. int w;
  1255. const uint8_t *s1, *s2;
  1256. uint8_t *d;
  1257. for(;height > 0; height--) {
  1258. s1 = src;
  1259. s2 = s1 + src_wrap;
  1260. d = dst;
  1261. for(w = width;w >= 4; w-=4) {
  1262. d[0] = (s1[0] + s1[1] + s2[0] + s2[1] + 2) >> 2;
  1263. d[1] = (s1[2] + s1[3] + s2[2] + s2[3] + 2) >> 2;
  1264. d[2] = (s1[4] + s1[5] + s2[4] + s2[5] + 2) >> 2;
  1265. d[3] = (s1[6] + s1[7] + s2[6] + s2[7] + 2) >> 2;
  1266. s1 += 8;
  1267. s2 += 8;
  1268. d += 4;
  1269. }
  1270. for(;w > 0; w--) {
  1271. d[0] = (s1[0] + s1[1] + s2[0] + s2[1] + 2) >> 2;
  1272. s1 += 2;
  1273. s2 += 2;
  1274. d++;
  1275. }
  1276. src += 2 * src_wrap;
  1277. dst += dst_wrap;
  1278. }
  1279. }
  1280. /* 4x4 -> 1x1 */
  1281. void ff_shrink44(uint8_t *dst, int dst_wrap,
  1282. const uint8_t *src, int src_wrap,
  1283. int width, int height)
  1284. {
  1285. int w;
  1286. const uint8_t *s1, *s2, *s3, *s4;
  1287. uint8_t *d;
  1288. for(;height > 0; height--) {
  1289. s1 = src;
  1290. s2 = s1 + src_wrap;
  1291. s3 = s2 + src_wrap;
  1292. s4 = s3 + src_wrap;
  1293. d = dst;
  1294. for(w = width;w > 0; w--) {
  1295. d[0] = (s1[0] + s1[1] + s1[2] + s1[3] +
  1296. s2[0] + s2[1] + s2[2] + s2[3] +
  1297. s3[0] + s3[1] + s3[2] + s3[3] +
  1298. s4[0] + s4[1] + s4[2] + s4[3] + 8) >> 4;
  1299. s1 += 4;
  1300. s2 += 4;
  1301. s3 += 4;
  1302. s4 += 4;
  1303. d++;
  1304. }
  1305. src += 4 * src_wrap;
  1306. dst += dst_wrap;
  1307. }
  1308. }
  1309. /* 8x8 -> 1x1 */
  1310. void ff_shrink88(uint8_t *dst, int dst_wrap,
  1311. const uint8_t *src, int src_wrap,
  1312. int width, int height)
  1313. {
  1314. int w, i;
  1315. for(;height > 0; height--) {
  1316. for(w = width;w > 0; w--) {
  1317. int tmp=0;
  1318. for(i=0; i<8; i++){
  1319. tmp += src[0] + src[1] + src[2] + src[3] + src[4] + src[5] + src[6] + src[7];
  1320. src += src_wrap;
  1321. }
  1322. *(dst++) = (tmp + 32)>>6;
  1323. src += 8 - 8*src_wrap;
  1324. }
  1325. src += 8*src_wrap - 8*width;
  1326. dst += dst_wrap - width;
  1327. }
  1328. }
  1329. static void grow21_line(uint8_t *dst, const uint8_t *src,
  1330. int width)
  1331. {
  1332. int w;
  1333. const uint8_t *s1;
  1334. uint8_t *d;
  1335. s1 = src;
  1336. d = dst;
  1337. for(w = width;w >= 4; w-=4) {
  1338. d[1] = d[0] = s1[0];
  1339. d[3] = d[2] = s1[1];
  1340. s1 += 2;
  1341. d += 4;
  1342. }
  1343. for(;w >= 2; w -= 2) {
  1344. d[1] = d[0] = s1[0];
  1345. s1 ++;
  1346. d += 2;
  1347. }
  1348. /* only needed if width is not a multiple of two */
  1349. /* XXX: veryfy that */
  1350. if (w) {
  1351. d[0] = s1[0];
  1352. }
  1353. }
  1354. static void grow41_line(uint8_t *dst, const uint8_t *src,
  1355. int width)
  1356. {
  1357. int w, v;
  1358. const uint8_t *s1;
  1359. uint8_t *d;
  1360. s1 = src;
  1361. d = dst;
  1362. for(w = width;w >= 4; w-=4) {
  1363. v = s1[0];
  1364. d[0] = v;
  1365. d[1] = v;
  1366. d[2] = v;
  1367. d[3] = v;
  1368. s1 ++;
  1369. d += 4;
  1370. }
  1371. }
  1372. /* 1x1 -> 2x1 */
  1373. static void grow21(uint8_t *dst, int dst_wrap,
  1374. const uint8_t *src, int src_wrap,
  1375. int width, int height)
  1376. {
  1377. for(;height > 0; height--) {
  1378. grow21_line(dst, src, width);
  1379. src += src_wrap;
  1380. dst += dst_wrap;
  1381. }
  1382. }
  1383. /* 1x1 -> 2x2 */
  1384. static void grow22(uint8_t *dst, int dst_wrap,
  1385. const uint8_t *src, int src_wrap,
  1386. int width, int height)
  1387. {
  1388. for(;height > 0; height--) {
  1389. grow21_line(dst, src, width);
  1390. if (height%2)
  1391. src += src_wrap;
  1392. dst += dst_wrap;
  1393. }
  1394. }
  1395. /* 1x1 -> 4x1 */
  1396. static void grow41(uint8_t *dst, int dst_wrap,
  1397. const uint8_t *src, int src_wrap,
  1398. int width, int height)
  1399. {
  1400. for(;height > 0; height--) {
  1401. grow41_line(dst, src, width);
  1402. src += src_wrap;
  1403. dst += dst_wrap;
  1404. }
  1405. }
  1406. /* 1x1 -> 4x4 */
  1407. static void grow44(uint8_t *dst, int dst_wrap,
  1408. const uint8_t *src, int src_wrap,
  1409. int width, int height)
  1410. {
  1411. for(;height > 0; height--) {
  1412. grow41_line(dst, src, width);
  1413. if ((height & 3) == 1)
  1414. src += src_wrap;
  1415. dst += dst_wrap;
  1416. }
  1417. }
  1418. /* 1x2 -> 2x1 */
  1419. static void conv411(uint8_t *dst, int dst_wrap,
  1420. const uint8_t *src, int src_wrap,
  1421. int width, int height)
  1422. {
  1423. int w, c;
  1424. const uint8_t *s1, *s2;
  1425. uint8_t *d;
  1426. width>>=1;
  1427. for(;height > 0; height--) {
  1428. s1 = src;
  1429. s2 = src + src_wrap;
  1430. d = dst;
  1431. for(w = width;w > 0; w--) {
  1432. c = (s1[0] + s2[0]) >> 1;
  1433. d[0] = c;
  1434. d[1] = c;
  1435. s1++;
  1436. s2++;
  1437. d += 2;
  1438. }
  1439. src += src_wrap * 2;
  1440. dst += dst_wrap;
  1441. }
  1442. }
  1443. /* XXX: add jpeg quantize code */
  1444. #define TRANSP_INDEX (6*6*6)
  1445. /* this is maybe slow, but allows for extensions */
  1446. static inline unsigned char gif_clut_index(uint8_t r, uint8_t g, uint8_t b)
  1447. {
  1448. return ((((r)/47)%6)*6*6+(((g)/47)%6)*6+(((b)/47)%6));
  1449. }
  1450. static void build_rgb_palette(uint8_t *palette, int has_alpha)
  1451. {
  1452. uint32_t *pal;
  1453. static const uint8_t pal_value[6] = { 0x00, 0x33, 0x66, 0x99, 0xcc, 0xff };
  1454. int i, r, g, b;
  1455. pal = (uint32_t *)palette;
  1456. i = 0;
  1457. for(r = 0; r < 6; r++) {
  1458. for(g = 0; g < 6; g++) {
  1459. for(b = 0; b < 6; b++) {
  1460. pal[i++] = (0xff << 24) | (pal_value[r] << 16) |
  1461. (pal_value[g] << 8) | pal_value[b];
  1462. }
  1463. }
  1464. }
  1465. if (has_alpha)
  1466. pal[i++] = 0;
  1467. while (i < 256)
  1468. pal[i++] = 0xff000000;
  1469. }
  1470. /* copy bit n to bits 0 ... n - 1 */
  1471. static inline unsigned int bitcopy_n(unsigned int a, int n)
  1472. {
  1473. int mask;
  1474. mask = (1 << n) - 1;
  1475. return (a & (0xff & ~mask)) | ((-((a >> n) & 1)) & mask);
  1476. }
  1477. /* rgb555 handling */
  1478. #define RGB_NAME rgb555
  1479. #define RGB_IN(r, g, b, s)\
  1480. {\
  1481. unsigned int v = ((const uint16_t *)(s))[0];\
  1482. r = bitcopy_n(v >> (10 - 3), 3);\
  1483. g = bitcopy_n(v >> (5 - 3), 3);\
  1484. b = bitcopy_n(v << 3, 3);\
  1485. }
  1486. #define RGBA_IN(r, g, b, a, s)\
  1487. {\
  1488. unsigned int v = ((const uint16_t *)(s))[0];\
  1489. r = bitcopy_n(v >> (10 - 3), 3);\
  1490. g = bitcopy_n(v >> (5 - 3), 3);\
  1491. b = bitcopy_n(v << 3, 3);\
  1492. a = (-(v >> 15)) & 0xff;\
  1493. }
  1494. #define RGBA_OUT(d, r, g, b, a)\
  1495. {\
  1496. ((uint16_t *)(d))[0] = ((r >> 3) << 10) | ((g >> 3) << 5) | (b >> 3) | \
  1497. ((a << 8) & 0x8000);\
  1498. }
  1499. #define BPP 2
  1500. #include "imgconvert_template.h"
  1501. /* rgb565 handling */
  1502. #define RGB_NAME rgb565
  1503. #define RGB_IN(r, g, b, s)\
  1504. {\
  1505. unsigned int v = ((const uint16_t *)(s))[0];\
  1506. r = bitcopy_n(v >> (11 - 3), 3);\
  1507. g = bitcopy_n(v >> (5 - 2), 2);\
  1508. b = bitcopy_n(v << 3, 3);\
  1509. }
  1510. #define RGB_OUT(d, r, g, b)\
  1511. {\
  1512. ((uint16_t *)(d))[0] = ((r >> 3) << 11) | ((g >> 2) << 5) | (b >> 3);\
  1513. }
  1514. #define BPP 2
  1515. #include "imgconvert_template.h"
  1516. /* bgr24 handling */
  1517. #define RGB_NAME bgr24
  1518. #define RGB_IN(r, g, b, s)\
  1519. {\
  1520. b = (s)[0];\
  1521. g = (s)[1];\
  1522. r = (s)[2];\
  1523. }
  1524. #define RGB_OUT(d, r, g, b)\
  1525. {\
  1526. (d)[0] = b;\
  1527. (d)[1] = g;\
  1528. (d)[2] = r;\
  1529. }
  1530. #define BPP 3
  1531. #include "imgconvert_template.h"
  1532. #undef RGB_IN
  1533. #undef RGB_OUT
  1534. #undef BPP
  1535. /* rgb24 handling */
  1536. #define RGB_NAME rgb24
  1537. #define FMT_RGB24
  1538. #define RGB_IN(r, g, b, s)\
  1539. {\
  1540. r = (s)[0];\
  1541. g = (s)[1];\
  1542. b = (s)[2];\
  1543. }
  1544. #define RGB_OUT(d, r, g, b)\
  1545. {\
  1546. (d)[0] = r;\
  1547. (d)[1] = g;\
  1548. (d)[2] = b;\
  1549. }
  1550. #define BPP 3
  1551. #include "imgconvert_template.h"
  1552. /* rgba32 handling */
  1553. #define RGB_NAME rgba32
  1554. #define FMT_RGBA32
  1555. #define RGB_IN(r, g, b, s)\
  1556. {\
  1557. unsigned int v = ((const uint32_t *)(s))[0];\
  1558. r = (v >> 16) & 0xff;\
  1559. g = (v >> 8) & 0xff;\
  1560. b = v & 0xff;\
  1561. }
  1562. #define RGBA_IN(r, g, b, a, s)\
  1563. {\
  1564. unsigned int v = ((const uint32_t *)(s))[0];\
  1565. a = (v >> 24) & 0xff;\
  1566. r = (v >> 16) & 0xff;\
  1567. g = (v >> 8) & 0xff;\
  1568. b = v & 0xff;\
  1569. }
  1570. #define RGBA_OUT(d, r, g, b, a)\
  1571. {\
  1572. ((uint32_t *)(d))[0] = (a << 24) | (r << 16) | (g << 8) | b;\
  1573. }
  1574. #define BPP 4
  1575. #include "imgconvert_template.h"
  1576. static void mono_to_gray(AVPicture *dst, const AVPicture *src,
  1577. int width, int height, int xor_mask)
  1578. {
  1579. const unsigned char *p;
  1580. unsigned char *q;
  1581. int v, dst_wrap, src_wrap;
  1582. int y, w;
  1583. p = src->data[0];
  1584. src_wrap = src->linesize[0] - ((width + 7) >> 3);
  1585. q = dst->data[0];
  1586. dst_wrap = dst->linesize[0] - width;
  1587. for(y=0;y<height;y++) {
  1588. w = width;
  1589. while (w >= 8) {
  1590. v = *p++ ^ xor_mask;
  1591. q[0] = -(v >> 7);
  1592. q[1] = -((v >> 6) & 1);
  1593. q[2] = -((v >> 5) & 1);
  1594. q[3] = -((v >> 4) & 1);
  1595. q[4] = -((v >> 3) & 1);
  1596. q[5] = -((v >> 2) & 1);
  1597. q[6] = -((v >> 1) & 1);
  1598. q[7] = -((v >> 0) & 1);
  1599. w -= 8;
  1600. q += 8;
  1601. }
  1602. if (w > 0) {
  1603. v = *p++ ^ xor_mask;
  1604. do {
  1605. q[0] = -((v >> 7) & 1);
  1606. q++;
  1607. v <<= 1;
  1608. } while (--w);
  1609. }
  1610. p += src_wrap;
  1611. q += dst_wrap;
  1612. }
  1613. }
  1614. static void monowhite_to_gray(AVPicture *dst, const AVPicture *src,
  1615. int width, int height)
  1616. {
  1617. mono_to_gray(dst, src, width, height, 0xff);
  1618. }
  1619. static void monoblack_to_gray(AVPicture *dst, const AVPicture *src,
  1620. int width, int height)
  1621. {
  1622. mono_to_gray(dst, src, width, height, 0x00);
  1623. }
  1624. static void gray_to_mono(AVPicture *dst, const AVPicture *src,
  1625. int width, int height, int xor_mask)
  1626. {
  1627. int n;
  1628. const uint8_t *s;
  1629. uint8_t *d;
  1630. int j, b, v, n1, src_wrap, dst_wrap, y;
  1631. s = src->data[0];
  1632. src_wrap = src->linesize[0] - width;
  1633. d = dst->data[0];
  1634. dst_wrap = dst->linesize[0] - ((width + 7) >> 3);
  1635. for(y=0;y<height;y++) {
  1636. n = width;
  1637. while (n >= 8) {
  1638. v = 0;
  1639. for(j=0;j<8;j++) {
  1640. b = s[0];
  1641. s++;
  1642. v = (v << 1) | (b >> 7);
  1643. }
  1644. d[0] = v ^ xor_mask;
  1645. d++;
  1646. n -= 8;
  1647. }
  1648. if (n > 0) {
  1649. n1 = n;
  1650. v = 0;
  1651. while (n > 0) {
  1652. b = s[0];
  1653. s++;
  1654. v = (v << 1) | (b >> 7);
  1655. n--;
  1656. }
  1657. d[0] = (v << (8 - (n1 & 7))) ^ xor_mask;
  1658. d++;
  1659. }
  1660. s += src_wrap;
  1661. d += dst_wrap;
  1662. }
  1663. }
  1664. static void gray_to_monowhite(AVPicture *dst, const AVPicture *src,
  1665. int width, int height)
  1666. {
  1667. gray_to_mono(dst, src, width, height, 0xff);
  1668. }
  1669. static void gray_to_monoblack(AVPicture *dst, const AVPicture *src,
  1670. int width, int height)
  1671. {
  1672. gray_to_mono(dst, src, width, height, 0x00);
  1673. }
  1674. typedef struct ConvertEntry {
  1675. void (*convert)(AVPicture *dst,
  1676. const AVPicture *src, int width, int height);
  1677. } ConvertEntry;
  1678. /* Add each new convertion function in this table. In order to be able
  1679. to convert from any format to any format, the following constraints
  1680. must be satisfied:
  1681. - all FF_COLOR_RGB formats must convert to and from PIX_FMT_RGB24
  1682. - all FF_COLOR_GRAY formats must convert to and from PIX_FMT_GRAY8
  1683. - all FF_COLOR_RGB formats with alpha must convert to and from PIX_FMT_RGBA32
  1684. - PIX_FMT_YUV444P and PIX_FMT_YUVJ444P must convert to and from
  1685. PIX_FMT_RGB24.
  1686. - PIX_FMT_422 must convert to and from PIX_FMT_422P.
  1687. The other conversion functions are just optimisations for common cases.
  1688. */
  1689. static const ConvertEntry convert_table[PIX_FMT_NB][PIX_FMT_NB] = {
  1690. [PIX_FMT_YUV420P] = {
  1691. [PIX_FMT_YUV422] = {
  1692. .convert = yuv420p_to_yuv422,
  1693. },
  1694. [PIX_FMT_RGB555] = {
  1695. .convert = yuv420p_to_rgb555
  1696. },
  1697. [PIX_FMT_RGB565] = {
  1698. .convert = yuv420p_to_rgb565
  1699. },
  1700. [PIX_FMT_BGR24] = {
  1701. .convert = yuv420p_to_bgr24
  1702. },
  1703. [PIX_FMT_RGB24] = {
  1704. .convert = yuv420p_to_rgb24
  1705. },
  1706. [PIX_FMT_RGBA32] = {
  1707. .convert = yuv420p_to_rgba32
  1708. },
  1709. [PIX_FMT_UYVY422] = {
  1710. .convert = yuv420p_to_uyvy422,
  1711. },
  1712. },
  1713. [PIX_FMT_YUV422P] = {
  1714. [PIX_FMT_YUV422] = {
  1715. .convert = yuv422p_to_yuv422,
  1716. },
  1717. [PIX_FMT_UYVY422] = {
  1718. .convert = yuv422p_to_uyvy422,
  1719. },
  1720. },
  1721. [PIX_FMT_YUV444P] = {
  1722. [PIX_FMT_RGB24] = {
  1723. .convert = yuv444p_to_rgb24
  1724. },
  1725. },
  1726. [PIX_FMT_YUVJ420P] = {
  1727. [PIX_FMT_RGB555] = {
  1728. .convert = yuvj420p_to_rgb555
  1729. },
  1730. [PIX_FMT_RGB565] = {
  1731. .convert = yuvj420p_to_rgb565
  1732. },
  1733. [PIX_FMT_BGR24] = {
  1734. .convert = yuvj420p_to_bgr24
  1735. },
  1736. [PIX_FMT_RGB24] = {
  1737. .convert = yuvj420p_to_rgb24
  1738. },
  1739. [PIX_FMT_RGBA32] = {
  1740. .convert = yuvj420p_to_rgba32
  1741. },
  1742. },
  1743. [PIX_FMT_YUVJ444P] = {
  1744. [PIX_FMT_RGB24] = {
  1745. .convert = yuvj444p_to_rgb24
  1746. },
  1747. },
  1748. [PIX_FMT_YUV422] = {
  1749. [PIX_FMT_YUV420P] = {
  1750. .convert = yuv422_to_yuv420p,
  1751. },
  1752. [PIX_FMT_YUV422P] = {
  1753. .convert = yuv422_to_yuv422p,
  1754. },
  1755. },
  1756. [PIX_FMT_UYVY422] = {
  1757. [PIX_FMT_YUV420P] = {
  1758. .convert = uyvy422_to_yuv420p,
  1759. },
  1760. [PIX_FMT_YUV422P] = {
  1761. .convert = uyvy422_to_yuv422p,
  1762. },
  1763. },
  1764. [PIX_FMT_RGB24] = {
  1765. [PIX_FMT_YUV420P] = {
  1766. .convert = rgb24_to_yuv420p
  1767. },
  1768. [PIX_FMT_RGB565] = {
  1769. .convert = rgb24_to_rgb565
  1770. },
  1771. [PIX_FMT_RGB555] = {
  1772. .convert = rgb24_to_rgb555
  1773. },
  1774. [PIX_FMT_RGBA32] = {
  1775. .convert = rgb24_to_rgba32
  1776. },
  1777. [PIX_FMT_BGR24] = {
  1778. .convert = rgb24_to_bgr24
  1779. },
  1780. [PIX_FMT_GRAY8] = {
  1781. .convert = rgb24_to_gray
  1782. },
  1783. [PIX_FMT_PAL8] = {
  1784. .convert = rgb24_to_pal8
  1785. },
  1786. [PIX_FMT_YUV444P] = {
  1787. .convert = rgb24_to_yuv444p
  1788. },
  1789. [PIX_FMT_YUVJ420P] = {
  1790. .convert = rgb24_to_yuvj420p
  1791. },
  1792. [PIX_FMT_YUVJ444P] = {
  1793. .convert = rgb24_to_yuvj444p
  1794. },
  1795. },
  1796. [PIX_FMT_RGBA32] = {
  1797. [PIX_FMT_RGB24] = {
  1798. .convert = rgba32_to_rgb24
  1799. },
  1800. [PIX_FMT_RGB555] = {
  1801. .convert = rgba32_to_rgb555
  1802. },
  1803. [PIX_FMT_PAL8] = {
  1804. .convert = rgba32_to_pal8
  1805. },
  1806. [PIX_FMT_YUV420P] = {
  1807. .convert = rgba32_to_yuv420p
  1808. },
  1809. [PIX_FMT_GRAY8] = {
  1810. .convert = rgba32_to_gray
  1811. },
  1812. },
  1813. [PIX_FMT_BGR24] = {
  1814. [PIX_FMT_RGB24] = {
  1815. .convert = bgr24_to_rgb24
  1816. },
  1817. [PIX_FMT_YUV420P] = {
  1818. .convert = bgr24_to_yuv420p
  1819. },
  1820. [PIX_FMT_GRAY8] = {
  1821. .convert = bgr24_to_gray
  1822. },
  1823. },
  1824. [PIX_FMT_RGB555] = {
  1825. [PIX_FMT_RGB24] = {
  1826. .convert = rgb555_to_rgb24
  1827. },
  1828. [PIX_FMT_RGBA32] = {
  1829. .convert = rgb555_to_rgba32
  1830. },
  1831. [PIX_FMT_YUV420P] = {
  1832. .convert = rgb555_to_yuv420p
  1833. },
  1834. [PIX_FMT_GRAY8] = {
  1835. .convert = rgb555_to_gray
  1836. },
  1837. },
  1838. [PIX_FMT_RGB565] = {
  1839. [PIX_FMT_RGB24] = {
  1840. .convert = rgb565_to_rgb24
  1841. },
  1842. [PIX_FMT_YUV420P] = {
  1843. .convert = rgb565_to_yuv420p
  1844. },
  1845. [PIX_FMT_GRAY8] = {
  1846. .convert = rgb565_to_gray
  1847. },
  1848. },
  1849. [PIX_FMT_GRAY8] = {
  1850. [PIX_FMT_RGB555] = {
  1851. .convert = gray_to_rgb555
  1852. },
  1853. [PIX_FMT_RGB565] = {
  1854. .convert = gray_to_rgb565
  1855. },
  1856. [PIX_FMT_RGB24] = {
  1857. .convert = gray_to_rgb24
  1858. },
  1859. [PIX_FMT_BGR24] = {
  1860. .convert = gray_to_bgr24
  1861. },
  1862. [PIX_FMT_RGBA32] = {
  1863. .convert = gray_to_rgba32
  1864. },
  1865. [PIX_FMT_MONOWHITE] = {
  1866. .convert = gray_to_monowhite
  1867. },
  1868. [PIX_FMT_MONOBLACK] = {
  1869. .convert = gray_to_monoblack
  1870. },
  1871. },
  1872. [PIX_FMT_MONOWHITE] = {
  1873. [PIX_FMT_GRAY8] = {
  1874. .convert = monowhite_to_gray
  1875. },
  1876. },
  1877. [PIX_FMT_MONOBLACK] = {
  1878. [PIX_FMT_GRAY8] = {
  1879. .convert = monoblack_to_gray
  1880. },
  1881. },
  1882. [PIX_FMT_PAL8] = {
  1883. [PIX_FMT_RGB555] = {
  1884. .convert = pal8_to_rgb555
  1885. },
  1886. [PIX_FMT_RGB565] = {
  1887. .convert = pal8_to_rgb565
  1888. },
  1889. [PIX_FMT_BGR24] = {
  1890. .convert = pal8_to_bgr24
  1891. },
  1892. [PIX_FMT_RGB24] = {
  1893. .convert = pal8_to_rgb24
  1894. },
  1895. [PIX_FMT_RGBA32] = {
  1896. .convert = pal8_to_rgba32
  1897. },
  1898. },
  1899. [PIX_FMT_UYVY411] = {
  1900. [PIX_FMT_YUV411P] = {
  1901. .convert = uyvy411_to_yuv411p,
  1902. },
  1903. },
  1904. };
  1905. int avpicture_alloc(AVPicture *picture,
  1906. int pix_fmt, int width, int height)
  1907. {
  1908. int size;
  1909. void *ptr;
  1910. size = avpicture_get_size(pix_fmt, width, height);
  1911. if(size<0)
  1912. goto fail;
  1913. ptr = av_malloc(size);
  1914. if (!ptr)
  1915. goto fail;
  1916. avpicture_fill(picture, ptr, pix_fmt, width, height);
  1917. return 0;
  1918. fail:
  1919. memset(picture, 0, sizeof(AVPicture));
  1920. return -1;
  1921. }
  1922. void avpicture_free(AVPicture *picture)
  1923. {
  1924. av_free(picture->data[0]);
  1925. }
  1926. /* return true if yuv planar */
  1927. static inline int is_yuv_planar(const PixFmtInfo *ps)
  1928. {
  1929. return (ps->color_type == FF_COLOR_YUV ||
  1930. ps->color_type == FF_COLOR_YUV_JPEG) &&
  1931. ps->pixel_type == FF_PIXEL_PLANAR;
  1932. }
  1933. /**
  1934. * Crop image top and left side
  1935. */
  1936. int img_crop(AVPicture *dst, const AVPicture *src,
  1937. int pix_fmt, int top_band, int left_band)
  1938. {
  1939. int y_shift;
  1940. int x_shift;
  1941. if (pix_fmt < 0 || pix_fmt >= PIX_FMT_NB || !is_yuv_planar(&pix_fmt_info[pix_fmt]))
  1942. return -1;
  1943. y_shift = pix_fmt_info[pix_fmt].y_chroma_shift;
  1944. x_shift = pix_fmt_info[pix_fmt].x_chroma_shift;
  1945. dst->data[0] = src->data[0] + (top_band * src->linesize[0]) + left_band;
  1946. dst->data[1] = src->data[1] + ((top_band >> y_shift) * src->linesize[1]) + (left_band >> x_shift);
  1947. dst->data[2] = src->data[2] + ((top_band >> y_shift) * src->linesize[2]) + (left_band >> x_shift);
  1948. dst->linesize[0] = src->linesize[0];
  1949. dst->linesize[1] = src->linesize[1];
  1950. dst->linesize[2] = src->linesize[2];
  1951. return 0;
  1952. }
  1953. /**
  1954. * Pad image
  1955. */
  1956. int img_pad(AVPicture *dst, const AVPicture *src, int height, int width, int pix_fmt,
  1957. int padtop, int padbottom, int padleft, int padright, int *color)
  1958. {
  1959. uint8_t *optr, *iptr;
  1960. int y_shift;
  1961. int x_shift;
  1962. int yheight;
  1963. int i, y;
  1964. if (pix_fmt < 0 || pix_fmt >= PIX_FMT_NB || !is_yuv_planar(&pix_fmt_info[pix_fmt]))
  1965. return -1;
  1966. for (i = 0; i < 3; i++) {
  1967. x_shift = i ? pix_fmt_info[pix_fmt].x_chroma_shift : 0;
  1968. y_shift = i ? pix_fmt_info[pix_fmt].y_chroma_shift : 0;
  1969. if (padtop || padleft) {
  1970. memset(dst->data[i], color[i], dst->linesize[i] * (padtop >> y_shift) + (padleft >> x_shift));
  1971. }
  1972. if (padleft || padright || src) {
  1973. if (src) { /* first line */
  1974. iptr = src->data[i];
  1975. optr = dst->data[i] + dst->linesize[i] * (padtop >> y_shift) + (padleft >> x_shift);
  1976. memcpy(optr, iptr, src->linesize[i]);
  1977. iptr += src->linesize[i];
  1978. }
  1979. optr = dst->data[i] + dst->linesize[i] * (padtop >> y_shift) + (dst->linesize[i] - (padright >> x_shift));
  1980. yheight = (height - 1 - (padtop + padbottom)) >> y_shift;
  1981. for (y = 0; y < yheight; y++) {
  1982. memset(optr, color[i], (padleft + padright) >> x_shift);
  1983. if (src) {
  1984. memcpy(optr + ((padleft + padright) >> x_shift), iptr, src->linesize[i]);
  1985. iptr += src->linesize[i];
  1986. }
  1987. optr += dst->linesize[i];
  1988. }
  1989. }
  1990. if (padbottom || padright) {
  1991. optr = dst->data[i] + dst->linesize[i] * ((height - padbottom) >> y_shift) - (padright >> x_shift);
  1992. memset(optr, color[i], dst->linesize[i] * (padbottom >> y_shift) + (padright >> x_shift));
  1993. }
  1994. }
  1995. return 0;
  1996. }
  1997. #ifndef CONFIG_SWSCALER
  1998. /* XXX: always use linesize. Return -1 if not supported */
  1999. int img_convert(AVPicture *dst, int dst_pix_fmt,
  2000. const AVPicture *src, int src_pix_fmt,
  2001. int src_width, int src_height)
  2002. {
  2003. static int inited;
  2004. int i, ret, dst_width, dst_height, int_pix_fmt;
  2005. const PixFmtInfo *src_pix, *dst_pix;
  2006. const ConvertEntry *ce;
  2007. AVPicture tmp1, *tmp = &tmp1;
  2008. if (src_pix_fmt < 0 || src_pix_fmt >= PIX_FMT_NB ||
  2009. dst_pix_fmt < 0 || dst_pix_fmt >= PIX_FMT_NB)
  2010. return -1;
  2011. if (src_width <= 0 || src_height <= 0)
  2012. return 0;
  2013. if (!inited) {
  2014. inited = 1;
  2015. img_convert_init();
  2016. }
  2017. dst_width = src_width;
  2018. dst_height = src_height;
  2019. dst_pix = &pix_fmt_info[dst_pix_fmt];
  2020. src_pix = &pix_fmt_info[src_pix_fmt];
  2021. if (src_pix_fmt == dst_pix_fmt) {
  2022. /* no conversion needed: just copy */
  2023. img_copy(dst, src, dst_pix_fmt, dst_width, dst_height);
  2024. return 0;
  2025. }
  2026. ce = &convert_table[src_pix_fmt][dst_pix_fmt];
  2027. if (ce->convert) {
  2028. /* specific conversion routine */
  2029. ce->convert(dst, src, dst_width, dst_height);
  2030. return 0;
  2031. }
  2032. /* gray to YUV */
  2033. if (is_yuv_planar(dst_pix) &&
  2034. src_pix_fmt == PIX_FMT_GRAY8) {
  2035. int w, h, y;
  2036. uint8_t *d;
  2037. if (dst_pix->color_type == FF_COLOR_YUV_JPEG) {
  2038. ff_img_copy_plane(dst->data[0], dst->linesize[0],
  2039. src->data[0], src->linesize[0],
  2040. dst_width, dst_height);
  2041. } else {
  2042. img_apply_table(dst->data[0], dst->linesize[0],
  2043. src->data[0], src->linesize[0],
  2044. dst_width, dst_height,
  2045. y_jpeg_to_ccir);
  2046. }
  2047. /* fill U and V with 128 */
  2048. w = dst_width;
  2049. h = dst_height;
  2050. w >>= dst_pix->x_chroma_shift;
  2051. h >>= dst_pix->y_chroma_shift;
  2052. for(i = 1; i <= 2; i++) {
  2053. d = dst->data[i];
  2054. for(y = 0; y< h; y++) {
  2055. memset(d, 128, w);
  2056. d += dst->linesize[i];
  2057. }
  2058. }
  2059. return 0;
  2060. }
  2061. /* YUV to gray */
  2062. if (is_yuv_planar(src_pix) &&
  2063. dst_pix_fmt == PIX_FMT_GRAY8) {
  2064. if (src_pix->color_type == FF_COLOR_YUV_JPEG) {
  2065. ff_img_copy_plane(dst->data[0], dst->linesize[0],
  2066. src->data[0], src->linesize[0],
  2067. dst_width, dst_height);
  2068. } else {
  2069. img_apply_table(dst->data[0], dst->linesize[0],
  2070. src->data[0], src->linesize[0],
  2071. dst_width, dst_height,
  2072. y_ccir_to_jpeg);
  2073. }
  2074. return 0;
  2075. }
  2076. /* YUV to YUV planar */
  2077. if (is_yuv_planar(dst_pix) && is_yuv_planar(src_pix)) {
  2078. int x_shift, y_shift, w, h, xy_shift;
  2079. void (*resize_func)(uint8_t *dst, int dst_wrap,
  2080. const uint8_t *src, int src_wrap,
  2081. int width, int height);
  2082. /* compute chroma size of the smallest dimensions */
  2083. w = dst_width;
  2084. h = dst_height;
  2085. if (dst_pix->x_chroma_shift >= src_pix->x_chroma_shift)
  2086. w >>= dst_pix->x_chroma_shift;
  2087. else
  2088. w >>= src_pix->x_chroma_shift;
  2089. if (dst_pix->y_chroma_shift >= src_pix->y_chroma_shift)
  2090. h >>= dst_pix->y_chroma_shift;
  2091. else
  2092. h >>= src_pix->y_chroma_shift;
  2093. x_shift = (dst_pix->x_chroma_shift - src_pix->x_chroma_shift);
  2094. y_shift = (dst_pix->y_chroma_shift - src_pix->y_chroma_shift);
  2095. xy_shift = ((x_shift & 0xf) << 4) | (y_shift & 0xf);
  2096. /* there must be filters for conversion at least from and to
  2097. YUV444 format */
  2098. switch(xy_shift) {
  2099. case 0x00:
  2100. resize_func = ff_img_copy_plane;
  2101. break;
  2102. case 0x10:
  2103. resize_func = shrink21;
  2104. break;
  2105. case 0x20:
  2106. resize_func = shrink41;
  2107. break;
  2108. case 0x01:
  2109. resize_func = shrink12;
  2110. break;
  2111. case 0x11:
  2112. resize_func = ff_shrink22;
  2113. break;
  2114. case 0x22:
  2115. resize_func = ff_shrink44;
  2116. break;
  2117. case 0xf0:
  2118. resize_func = grow21;
  2119. break;
  2120. case 0xe0:
  2121. resize_func = grow41;
  2122. break;
  2123. case 0xff:
  2124. resize_func = grow22;
  2125. break;
  2126. case 0xee:
  2127. resize_func = grow44;
  2128. break;
  2129. case 0xf1:
  2130. resize_func = conv411;
  2131. break;
  2132. default:
  2133. /* currently not handled */
  2134. goto no_chroma_filter;
  2135. }
  2136. ff_img_copy_plane(dst->data[0], dst->linesize[0],
  2137. src->data[0], src->linesize[0],
  2138. dst_width, dst_height);
  2139. for(i = 1;i <= 2; i++)
  2140. resize_func(dst->data[i], dst->linesize[i],
  2141. src->data[i], src->linesize[i],
  2142. dst_width>>dst_pix->x_chroma_shift, dst_height>>dst_pix->y_chroma_shift);
  2143. /* if yuv color space conversion is needed, we do it here on
  2144. the destination image */
  2145. if (dst_pix->color_type != src_pix->color_type) {
  2146. const uint8_t *y_table, *c_table;
  2147. if (dst_pix->color_type == FF_COLOR_YUV) {
  2148. y_table = y_jpeg_to_ccir;
  2149. c_table = c_jpeg_to_ccir;
  2150. } else {
  2151. y_table = y_ccir_to_jpeg;
  2152. c_table = c_ccir_to_jpeg;
  2153. }
  2154. img_apply_table(dst->data[0], dst->linesize[0],
  2155. dst->data[0], dst->linesize[0],
  2156. dst_width, dst_height,
  2157. y_table);
  2158. for(i = 1;i <= 2; i++)
  2159. img_apply_table(dst->data[i], dst->linesize[i],
  2160. dst->data[i], dst->linesize[i],
  2161. dst_width>>dst_pix->x_chroma_shift,
  2162. dst_height>>dst_pix->y_chroma_shift,
  2163. c_table);
  2164. }
  2165. return 0;
  2166. }
  2167. no_chroma_filter:
  2168. /* try to use an intermediate format */
  2169. if (src_pix_fmt == PIX_FMT_YUV422 ||
  2170. dst_pix_fmt == PIX_FMT_YUV422) {
  2171. /* specific case: convert to YUV422P first */
  2172. int_pix_fmt = PIX_FMT_YUV422P;
  2173. } else if (src_pix_fmt == PIX_FMT_UYVY422 ||
  2174. dst_pix_fmt == PIX_FMT_UYVY422) {
  2175. /* specific case: convert to YUV422P first */
  2176. int_pix_fmt = PIX_FMT_YUV422P;
  2177. } else if (src_pix_fmt == PIX_FMT_UYVY411 ||
  2178. dst_pix_fmt == PIX_FMT_UYVY411) {
  2179. /* specific case: convert to YUV411P first */
  2180. int_pix_fmt = PIX_FMT_YUV411P;
  2181. } else if ((src_pix->color_type == FF_COLOR_GRAY &&
  2182. src_pix_fmt != PIX_FMT_GRAY8) ||
  2183. (dst_pix->color_type == FF_COLOR_GRAY &&
  2184. dst_pix_fmt != PIX_FMT_GRAY8)) {
  2185. /* gray8 is the normalized format */
  2186. int_pix_fmt = PIX_FMT_GRAY8;
  2187. } else if ((is_yuv_planar(src_pix) &&
  2188. src_pix_fmt != PIX_FMT_YUV444P &&
  2189. src_pix_fmt != PIX_FMT_YUVJ444P)) {
  2190. /* yuv444 is the normalized format */
  2191. if (src_pix->color_type == FF_COLOR_YUV_JPEG)
  2192. int_pix_fmt = PIX_FMT_YUVJ444P;
  2193. else
  2194. int_pix_fmt = PIX_FMT_YUV444P;
  2195. } else if ((is_yuv_planar(dst_pix) &&
  2196. dst_pix_fmt != PIX_FMT_YUV444P &&
  2197. dst_pix_fmt != PIX_FMT_YUVJ444P)) {
  2198. /* yuv444 is the normalized format */
  2199. if (dst_pix->color_type == FF_COLOR_YUV_JPEG)
  2200. int_pix_fmt = PIX_FMT_YUVJ444P;
  2201. else
  2202. int_pix_fmt = PIX_FMT_YUV444P;
  2203. } else {
  2204. /* the two formats are rgb or gray8 or yuv[j]444p */
  2205. if (src_pix->is_alpha && dst_pix->is_alpha)
  2206. int_pix_fmt = PIX_FMT_RGBA32;
  2207. else
  2208. int_pix_fmt = PIX_FMT_RGB24;
  2209. }
  2210. if (avpicture_alloc(tmp, int_pix_fmt, dst_width, dst_height) < 0)
  2211. return -1;
  2212. ret = -1;
  2213. if (img_convert(tmp, int_pix_fmt,
  2214. src, src_pix_fmt, src_width, src_height) < 0)
  2215. goto fail1;
  2216. if (img_convert(dst, dst_pix_fmt,
  2217. tmp, int_pix_fmt, dst_width, dst_height) < 0)
  2218. goto fail1;
  2219. ret = 0;
  2220. fail1:
  2221. avpicture_free(tmp);
  2222. return ret;
  2223. }
  2224. #endif
  2225. /* NOTE: we scan all the pixels to have an exact information */
  2226. static int get_alpha_info_pal8(const AVPicture *src, int width, int height)
  2227. {
  2228. const unsigned char *p;
  2229. int src_wrap, ret, x, y;
  2230. unsigned int a;
  2231. uint32_t *palette = (uint32_t *)src->data[1];
  2232. p = src->data[0];
  2233. src_wrap = src->linesize[0] - width;
  2234. ret = 0;
  2235. for(y=0;y<height;y++) {
  2236. for(x=0;x<width;x++) {
  2237. a = palette[p[0]] >> 24;
  2238. if (a == 0x00) {
  2239. ret |= FF_ALPHA_TRANSP;
  2240. } else if (a != 0xff) {
  2241. ret |= FF_ALPHA_SEMI_TRANSP;
  2242. }
  2243. p++;
  2244. }
  2245. p += src_wrap;
  2246. }
  2247. return ret;
  2248. }
  2249. /**
  2250. * Tell if an image really has transparent alpha values.
  2251. * @return ored mask of FF_ALPHA_xxx constants
  2252. */
  2253. int img_get_alpha_info(const AVPicture *src,
  2254. int pix_fmt, int width, int height)
  2255. {
  2256. const PixFmtInfo *pf = &pix_fmt_info[pix_fmt];
  2257. int ret;
  2258. pf = &pix_fmt_info[pix_fmt];
  2259. /* no alpha can be represented in format */
  2260. if (!pf->is_alpha)
  2261. return 0;
  2262. switch(pix_fmt) {
  2263. case PIX_FMT_RGBA32:
  2264. ret = get_alpha_info_rgba32(src, width, height);
  2265. break;
  2266. case PIX_FMT_RGB555:
  2267. ret = get_alpha_info_rgb555(src, width, height);
  2268. break;
  2269. case PIX_FMT_PAL8:
  2270. ret = get_alpha_info_pal8(src, width, height);
  2271. break;
  2272. default:
  2273. /* we do not know, so everything is indicated */
  2274. ret = FF_ALPHA_TRANSP | FF_ALPHA_SEMI_TRANSP;
  2275. break;
  2276. }
  2277. return ret;
  2278. }
  2279. #ifdef HAVE_MMX
  2280. #define DEINT_INPLACE_LINE_LUM \
  2281. movd_m2r(lum_m4[0],mm0);\
  2282. movd_m2r(lum_m3[0],mm1);\
  2283. movd_m2r(lum_m2[0],mm2);\
  2284. movd_m2r(lum_m1[0],mm3);\
  2285. movd_m2r(lum[0],mm4);\
  2286. punpcklbw_r2r(mm7,mm0);\
  2287. movd_r2m(mm2,lum_m4[0]);\
  2288. punpcklbw_r2r(mm7,mm1);\
  2289. punpcklbw_r2r(mm7,mm2);\
  2290. punpcklbw_r2r(mm7,mm3);\
  2291. punpcklbw_r2r(mm7,mm4);\
  2292. paddw_r2r(mm3,mm1);\
  2293. psllw_i2r(1,mm2);\
  2294. paddw_r2r(mm4,mm0);\
  2295. psllw_i2r(2,mm1);\
  2296. paddw_r2r(mm6,mm2);\
  2297. paddw_r2r(mm2,mm1);\
  2298. psubusw_r2r(mm0,mm1);\
  2299. psrlw_i2r(3,mm1);\
  2300. packuswb_r2r(mm7,mm1);\
  2301. movd_r2m(mm1,lum_m2[0]);
  2302. #define DEINT_LINE_LUM \
  2303. movd_m2r(lum_m4[0],mm0);\
  2304. movd_m2r(lum_m3[0],mm1);\
  2305. movd_m2r(lum_m2[0],mm2);\
  2306. movd_m2r(lum_m1[0],mm3);\
  2307. movd_m2r(lum[0],mm4);\
  2308. punpcklbw_r2r(mm7,mm0);\
  2309. punpcklbw_r2r(mm7,mm1);\
  2310. punpcklbw_r2r(mm7,mm2);\
  2311. punpcklbw_r2r(mm7,mm3);\
  2312. punpcklbw_r2r(mm7,mm4);\
  2313. paddw_r2r(mm3,mm1);\
  2314. psllw_i2r(1,mm2);\
  2315. paddw_r2r(mm4,mm0);\
  2316. psllw_i2r(2,mm1);\
  2317. paddw_r2r(mm6,mm2);\
  2318. paddw_r2r(mm2,mm1);\
  2319. psubusw_r2r(mm0,mm1);\
  2320. psrlw_i2r(3,mm1);\
  2321. packuswb_r2r(mm7,mm1);\
  2322. movd_r2m(mm1,dst[0]);
  2323. #endif
  2324. /* filter parameters: [-1 4 2 4 -1] // 8 */
  2325. static void deinterlace_line(uint8_t *dst,
  2326. const uint8_t *lum_m4, const uint8_t *lum_m3,
  2327. const uint8_t *lum_m2, const uint8_t *lum_m1,
  2328. const uint8_t *lum,
  2329. int size)
  2330. {
  2331. #ifndef HAVE_MMX
  2332. uint8_t *cm = cropTbl + MAX_NEG_CROP;
  2333. int sum;
  2334. for(;size > 0;size--) {
  2335. sum = -lum_m4[0];
  2336. sum += lum_m3[0] << 2;
  2337. sum += lum_m2[0] << 1;
  2338. sum += lum_m1[0] << 2;
  2339. sum += -lum[0];
  2340. dst[0] = cm[(sum + 4) >> 3];
  2341. lum_m4++;
  2342. lum_m3++;
  2343. lum_m2++;
  2344. lum_m1++;
  2345. lum++;
  2346. dst++;
  2347. }
  2348. #else
  2349. {
  2350. mmx_t rounder;
  2351. rounder.uw[0]=4;
  2352. rounder.uw[1]=4;
  2353. rounder.uw[2]=4;
  2354. rounder.uw[3]=4;
  2355. pxor_r2r(mm7,mm7);
  2356. movq_m2r(rounder,mm6);
  2357. }
  2358. for (;size > 3; size-=4) {
  2359. DEINT_LINE_LUM
  2360. lum_m4+=4;
  2361. lum_m3+=4;
  2362. lum_m2+=4;
  2363. lum_m1+=4;
  2364. lum+=4;
  2365. dst+=4;
  2366. }
  2367. #endif
  2368. }
  2369. static void deinterlace_line_inplace(uint8_t *lum_m4, uint8_t *lum_m3, uint8_t *lum_m2, uint8_t *lum_m1, uint8_t *lum,
  2370. int size)
  2371. {
  2372. #ifndef HAVE_MMX
  2373. uint8_t *cm = cropTbl + MAX_NEG_CROP;
  2374. int sum;
  2375. for(;size > 0;size--) {
  2376. sum = -lum_m4[0];
  2377. sum += lum_m3[0] << 2;
  2378. sum += lum_m2[0] << 1;
  2379. lum_m4[0]=lum_m2[0];
  2380. sum += lum_m1[0] << 2;
  2381. sum += -lum[0];
  2382. lum_m2[0] = cm[(sum + 4) >> 3];
  2383. lum_m4++;
  2384. lum_m3++;
  2385. lum_m2++;
  2386. lum_m1++;
  2387. lum++;
  2388. }
  2389. #else
  2390. {
  2391. mmx_t rounder;
  2392. rounder.uw[0]=4;
  2393. rounder.uw[1]=4;
  2394. rounder.uw[2]=4;
  2395. rounder.uw[3]=4;
  2396. pxor_r2r(mm7,mm7);
  2397. movq_m2r(rounder,mm6);
  2398. }
  2399. for (;size > 3; size-=4) {
  2400. DEINT_INPLACE_LINE_LUM
  2401. lum_m4+=4;
  2402. lum_m3+=4;
  2403. lum_m2+=4;
  2404. lum_m1+=4;
  2405. lum+=4;
  2406. }
  2407. #endif
  2408. }
  2409. /* deinterlacing : 2 temporal taps, 3 spatial taps linear filter. The
  2410. top field is copied as is, but the bottom field is deinterlaced
  2411. against the top field. */
  2412. static void deinterlace_bottom_field(uint8_t *dst, int dst_wrap,
  2413. const uint8_t *src1, int src_wrap,
  2414. int width, int height)
  2415. {
  2416. const uint8_t *src_m2, *src_m1, *src_0, *src_p1, *src_p2;
  2417. int y;
  2418. src_m2 = src1;
  2419. src_m1 = src1;
  2420. src_0=&src_m1[src_wrap];
  2421. src_p1=&src_0[src_wrap];
  2422. src_p2=&src_p1[src_wrap];
  2423. for(y=0;y<(height-2);y+=2) {
  2424. memcpy(dst,src_m1,width);
  2425. dst += dst_wrap;
  2426. deinterlace_line(dst,src_m2,src_m1,src_0,src_p1,src_p2,width);
  2427. src_m2 = src_0;
  2428. src_m1 = src_p1;
  2429. src_0 = src_p2;
  2430. src_p1 += 2*src_wrap;
  2431. src_p2 += 2*src_wrap;
  2432. dst += dst_wrap;
  2433. }
  2434. memcpy(dst,src_m1,width);
  2435. dst += dst_wrap;
  2436. /* do last line */
  2437. deinterlace_line(dst,src_m2,src_m1,src_0,src_0,src_0,width);
  2438. }
  2439. static void deinterlace_bottom_field_inplace(uint8_t *src1, int src_wrap,
  2440. int width, int height)
  2441. {
  2442. uint8_t *src_m1, *src_0, *src_p1, *src_p2;
  2443. int y;
  2444. uint8_t *buf;
  2445. buf = (uint8_t*)av_malloc(width);
  2446. src_m1 = src1;
  2447. memcpy(buf,src_m1,width);
  2448. src_0=&src_m1[src_wrap];
  2449. src_p1=&src_0[src_wrap];
  2450. src_p2=&src_p1[src_wrap];
  2451. for(y=0;y<(height-2);y+=2) {
  2452. deinterlace_line_inplace(buf,src_m1,src_0,src_p1,src_p2,width);
  2453. src_m1 = src_p1;
  2454. src_0 = src_p2;
  2455. src_p1 += 2*src_wrap;
  2456. src_p2 += 2*src_wrap;
  2457. }
  2458. /* do last line */
  2459. deinterlace_line_inplace(buf,src_m1,src_0,src_0,src_0,width);
  2460. av_free(buf);
  2461. }
  2462. /* deinterlace - if not supported return -1 */
  2463. int avpicture_deinterlace(AVPicture *dst, const AVPicture *src,
  2464. int pix_fmt, int width, int height)
  2465. {
  2466. int i;
  2467. if (pix_fmt != PIX_FMT_YUV420P &&
  2468. pix_fmt != PIX_FMT_YUV422P &&
  2469. pix_fmt != PIX_FMT_YUV444P &&
  2470. pix_fmt != PIX_FMT_YUV411P)
  2471. return -1;
  2472. if ((width & 3) != 0 || (height & 3) != 0)
  2473. return -1;
  2474. for(i=0;i<3;i++) {
  2475. if (i == 1) {
  2476. switch(pix_fmt) {
  2477. case PIX_FMT_YUV420P:
  2478. width >>= 1;
  2479. height >>= 1;
  2480. break;
  2481. case PIX_FMT_YUV422P:
  2482. width >>= 1;
  2483. break;
  2484. case PIX_FMT_YUV411P:
  2485. width >>= 2;
  2486. break;
  2487. default:
  2488. break;
  2489. }
  2490. }
  2491. if (src == dst) {
  2492. deinterlace_bottom_field_inplace(dst->data[i], dst->linesize[i],
  2493. width, height);
  2494. } else {
  2495. deinterlace_bottom_field(dst->data[i],dst->linesize[i],
  2496. src->data[i], src->linesize[i],
  2497. width, height);
  2498. }
  2499. }
  2500. #ifdef HAVE_MMX
  2501. emms();
  2502. #endif
  2503. return 0;
  2504. }
  2505. #undef FIX