You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

2856 lines
77KB

  1. /*
  2. * Misc image convertion routines
  3. * Copyright (c) 2001, 2002, 2003 Fabrice Bellard.
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. /**
  22. * @file imgconvert.c
  23. * Misc image convertion routines.
  24. */
  25. /* TODO:
  26. * - write 'ffimg' program to test all the image related stuff
  27. * - move all api to slice based system
  28. * - integrate deinterlacing, postprocessing and scaling in the conversion process
  29. */
  30. #include "avcodec.h"
  31. #include "dsputil.h"
  32. #ifdef USE_FASTMEMCPY
  33. #include "libvo/fastmemcpy.h"
  34. #endif
  35. #ifdef HAVE_MMX
  36. #include "i386/mmx.h"
  37. #endif
  38. #define xglue(x, y) x ## y
  39. #define glue(x, y) xglue(x, y)
  40. #define FF_COLOR_RGB 0 /* RGB color space */
  41. #define FF_COLOR_GRAY 1 /* gray color space */
  42. #define FF_COLOR_YUV 2 /* YUV color space. 16 <= Y <= 235, 16 <= U, V <= 240 */
  43. #define FF_COLOR_YUV_JPEG 3 /* YUV color space. 0 <= Y <= 255, 0 <= U, V <= 255 */
  44. #define FF_PIXEL_PLANAR 0 /* each channel has one component in AVPicture */
  45. #define FF_PIXEL_PACKED 1 /* only one components containing all the channels */
  46. #define FF_PIXEL_PALETTE 2 /* one components containing indexes for a palette */
  47. typedef struct PixFmtInfo {
  48. const char *name;
  49. uint8_t nb_channels; /* number of channels (including alpha) */
  50. uint8_t color_type; /* color type (see FF_COLOR_xxx constants) */
  51. uint8_t pixel_type; /* pixel storage type (see FF_PIXEL_xxx constants) */
  52. uint8_t is_alpha : 1; /* true if alpha can be specified */
  53. uint8_t x_chroma_shift; /* X chroma subsampling factor is 2 ^ shift */
  54. uint8_t y_chroma_shift; /* Y chroma subsampling factor is 2 ^ shift */
  55. uint8_t depth; /* bit depth of the color components */
  56. } PixFmtInfo;
  57. /* this table gives more information about formats */
  58. static const PixFmtInfo pix_fmt_info[PIX_FMT_NB] = {
  59. /* YUV formats */
  60. [PIX_FMT_YUV420P] = {
  61. .name = "yuv420p",
  62. .nb_channels = 3,
  63. .color_type = FF_COLOR_YUV,
  64. .pixel_type = FF_PIXEL_PLANAR,
  65. .depth = 8,
  66. .x_chroma_shift = 1, .y_chroma_shift = 1,
  67. },
  68. [PIX_FMT_YUV422P] = {
  69. .name = "yuv422p",
  70. .nb_channels = 3,
  71. .color_type = FF_COLOR_YUV,
  72. .pixel_type = FF_PIXEL_PLANAR,
  73. .depth = 8,
  74. .x_chroma_shift = 1, .y_chroma_shift = 0,
  75. },
  76. [PIX_FMT_YUV444P] = {
  77. .name = "yuv444p",
  78. .nb_channels = 3,
  79. .color_type = FF_COLOR_YUV,
  80. .pixel_type = FF_PIXEL_PLANAR,
  81. .depth = 8,
  82. .x_chroma_shift = 0, .y_chroma_shift = 0,
  83. },
  84. [PIX_FMT_YUYV422] = {
  85. .name = "yuyv422",
  86. .nb_channels = 1,
  87. .color_type = FF_COLOR_YUV,
  88. .pixel_type = FF_PIXEL_PACKED,
  89. .depth = 8,
  90. .x_chroma_shift = 1, .y_chroma_shift = 0,
  91. },
  92. [PIX_FMT_UYVY422] = {
  93. .name = "uyvy422",
  94. .nb_channels = 1,
  95. .color_type = FF_COLOR_YUV,
  96. .pixel_type = FF_PIXEL_PACKED,
  97. .depth = 8,
  98. .x_chroma_shift = 1, .y_chroma_shift = 0,
  99. },
  100. [PIX_FMT_YUV410P] = {
  101. .name = "yuv410p",
  102. .nb_channels = 3,
  103. .color_type = FF_COLOR_YUV,
  104. .pixel_type = FF_PIXEL_PLANAR,
  105. .depth = 8,
  106. .x_chroma_shift = 2, .y_chroma_shift = 2,
  107. },
  108. [PIX_FMT_YUV411P] = {
  109. .name = "yuv411p",
  110. .nb_channels = 3,
  111. .color_type = FF_COLOR_YUV,
  112. .pixel_type = FF_PIXEL_PLANAR,
  113. .depth = 8,
  114. .x_chroma_shift = 2, .y_chroma_shift = 0,
  115. },
  116. /* JPEG YUV */
  117. [PIX_FMT_YUVJ420P] = {
  118. .name = "yuvj420p",
  119. .nb_channels = 3,
  120. .color_type = FF_COLOR_YUV_JPEG,
  121. .pixel_type = FF_PIXEL_PLANAR,
  122. .depth = 8,
  123. .x_chroma_shift = 1, .y_chroma_shift = 1,
  124. },
  125. [PIX_FMT_YUVJ422P] = {
  126. .name = "yuvj422p",
  127. .nb_channels = 3,
  128. .color_type = FF_COLOR_YUV_JPEG,
  129. .pixel_type = FF_PIXEL_PLANAR,
  130. .depth = 8,
  131. .x_chroma_shift = 1, .y_chroma_shift = 0,
  132. },
  133. [PIX_FMT_YUVJ444P] = {
  134. .name = "yuvj444p",
  135. .nb_channels = 3,
  136. .color_type = FF_COLOR_YUV_JPEG,
  137. .pixel_type = FF_PIXEL_PLANAR,
  138. .depth = 8,
  139. .x_chroma_shift = 0, .y_chroma_shift = 0,
  140. },
  141. /* RGB formats */
  142. [PIX_FMT_RGB24] = {
  143. .name = "rgb24",
  144. .nb_channels = 3,
  145. .color_type = FF_COLOR_RGB,
  146. .pixel_type = FF_PIXEL_PACKED,
  147. .depth = 8,
  148. .x_chroma_shift = 0, .y_chroma_shift = 0,
  149. },
  150. [PIX_FMT_BGR24] = {
  151. .name = "bgr24",
  152. .nb_channels = 3,
  153. .color_type = FF_COLOR_RGB,
  154. .pixel_type = FF_PIXEL_PACKED,
  155. .depth = 8,
  156. .x_chroma_shift = 0, .y_chroma_shift = 0,
  157. },
  158. [PIX_FMT_RGB32] = {
  159. .name = "rgb32",
  160. .nb_channels = 4, .is_alpha = 1,
  161. .color_type = FF_COLOR_RGB,
  162. .pixel_type = FF_PIXEL_PACKED,
  163. .depth = 8,
  164. .x_chroma_shift = 0, .y_chroma_shift = 0,
  165. },
  166. [PIX_FMT_RGB565] = {
  167. .name = "rgb565",
  168. .nb_channels = 3,
  169. .color_type = FF_COLOR_RGB,
  170. .pixel_type = FF_PIXEL_PACKED,
  171. .depth = 5,
  172. .x_chroma_shift = 0, .y_chroma_shift = 0,
  173. },
  174. [PIX_FMT_RGB555] = {
  175. .name = "rgb555",
  176. .nb_channels = 3,
  177. .color_type = FF_COLOR_RGB,
  178. .pixel_type = FF_PIXEL_PACKED,
  179. .depth = 5,
  180. .x_chroma_shift = 0, .y_chroma_shift = 0,
  181. },
  182. /* gray / mono formats */
  183. [PIX_FMT_GRAY16BE] = {
  184. .name = "gray16be",
  185. .nb_channels = 1,
  186. .color_type = FF_COLOR_GRAY,
  187. .pixel_type = FF_PIXEL_PLANAR,
  188. .depth = 16,
  189. },
  190. [PIX_FMT_GRAY16LE] = {
  191. .name = "gray16le",
  192. .nb_channels = 1,
  193. .color_type = FF_COLOR_GRAY,
  194. .pixel_type = FF_PIXEL_PLANAR,
  195. .depth = 16,
  196. },
  197. [PIX_FMT_GRAY8] = {
  198. .name = "gray",
  199. .nb_channels = 1,
  200. .color_type = FF_COLOR_GRAY,
  201. .pixel_type = FF_PIXEL_PLANAR,
  202. .depth = 8,
  203. },
  204. [PIX_FMT_MONOWHITE] = {
  205. .name = "monow",
  206. .nb_channels = 1,
  207. .color_type = FF_COLOR_GRAY,
  208. .pixel_type = FF_PIXEL_PLANAR,
  209. .depth = 1,
  210. },
  211. [PIX_FMT_MONOBLACK] = {
  212. .name = "monob",
  213. .nb_channels = 1,
  214. .color_type = FF_COLOR_GRAY,
  215. .pixel_type = FF_PIXEL_PLANAR,
  216. .depth = 1,
  217. },
  218. /* paletted formats */
  219. [PIX_FMT_PAL8] = {
  220. .name = "pal8",
  221. .nb_channels = 4, .is_alpha = 1,
  222. .color_type = FF_COLOR_RGB,
  223. .pixel_type = FF_PIXEL_PALETTE,
  224. .depth = 8,
  225. },
  226. [PIX_FMT_XVMC_MPEG2_MC] = {
  227. .name = "xvmcmc",
  228. },
  229. [PIX_FMT_XVMC_MPEG2_IDCT] = {
  230. .name = "xvmcidct",
  231. },
  232. [PIX_FMT_UYYVYY411] = {
  233. .name = "uyyvyy411",
  234. .nb_channels = 1,
  235. .color_type = FF_COLOR_YUV,
  236. .pixel_type = FF_PIXEL_PACKED,
  237. .depth = 8,
  238. .x_chroma_shift = 2, .y_chroma_shift = 0,
  239. },
  240. [PIX_FMT_BGR32] = {
  241. .name = "bgr32",
  242. .nb_channels = 4, .is_alpha = 1,
  243. .color_type = FF_COLOR_RGB,
  244. .pixel_type = FF_PIXEL_PACKED,
  245. .depth = 8,
  246. .x_chroma_shift = 0, .y_chroma_shift = 0,
  247. },
  248. [PIX_FMT_BGR565] = {
  249. .name = "bgr565",
  250. .nb_channels = 3,
  251. .color_type = FF_COLOR_RGB,
  252. .pixel_type = FF_PIXEL_PACKED,
  253. .depth = 5,
  254. .x_chroma_shift = 0, .y_chroma_shift = 0,
  255. },
  256. [PIX_FMT_BGR555] = {
  257. .name = "bgr555",
  258. .nb_channels = 3,
  259. .color_type = FF_COLOR_RGB,
  260. .pixel_type = FF_PIXEL_PACKED,
  261. .depth = 5,
  262. .x_chroma_shift = 0, .y_chroma_shift = 0,
  263. },
  264. [PIX_FMT_RGB8] = {
  265. .name = "rgb8",
  266. .nb_channels = 1,
  267. .color_type = FF_COLOR_RGB,
  268. .pixel_type = FF_PIXEL_PACKED,
  269. .depth = 8,
  270. .x_chroma_shift = 0, .y_chroma_shift = 0,
  271. },
  272. [PIX_FMT_RGB4] = {
  273. .name = "rgb4",
  274. .nb_channels = 1,
  275. .color_type = FF_COLOR_RGB,
  276. .pixel_type = FF_PIXEL_PACKED,
  277. .depth = 4,
  278. .x_chroma_shift = 0, .y_chroma_shift = 0,
  279. },
  280. [PIX_FMT_RGB4_BYTE] = {
  281. .name = "rgb4_byte",
  282. .nb_channels = 1,
  283. .color_type = FF_COLOR_RGB,
  284. .pixel_type = FF_PIXEL_PACKED,
  285. .depth = 8,
  286. .x_chroma_shift = 0, .y_chroma_shift = 0,
  287. },
  288. [PIX_FMT_BGR8] = {
  289. .name = "bgr8",
  290. .nb_channels = 1,
  291. .color_type = FF_COLOR_RGB,
  292. .pixel_type = FF_PIXEL_PACKED,
  293. .depth = 8,
  294. .x_chroma_shift = 0, .y_chroma_shift = 0,
  295. },
  296. [PIX_FMT_BGR4] = {
  297. .name = "bgr4",
  298. .nb_channels = 1,
  299. .color_type = FF_COLOR_RGB,
  300. .pixel_type = FF_PIXEL_PACKED,
  301. .depth = 4,
  302. .x_chroma_shift = 0, .y_chroma_shift = 0,
  303. },
  304. [PIX_FMT_BGR4_BYTE] = {
  305. .name = "bgr4_byte",
  306. .nb_channels = 1,
  307. .color_type = FF_COLOR_RGB,
  308. .pixel_type = FF_PIXEL_PACKED,
  309. .depth = 8,
  310. .x_chroma_shift = 0, .y_chroma_shift = 0,
  311. },
  312. [PIX_FMT_NV12] = {
  313. .name = "nv12",
  314. .nb_channels = 2,
  315. .color_type = FF_COLOR_YUV,
  316. .pixel_type = FF_PIXEL_PLANAR,
  317. .depth = 8,
  318. .x_chroma_shift = 1, .y_chroma_shift = 1,
  319. },
  320. [PIX_FMT_NV21] = {
  321. .name = "nv12",
  322. .nb_channels = 2,
  323. .color_type = FF_COLOR_YUV,
  324. .pixel_type = FF_PIXEL_PLANAR,
  325. .depth = 8,
  326. .x_chroma_shift = 1, .y_chroma_shift = 1,
  327. },
  328. [PIX_FMT_BGR32_1] = {
  329. .name = "bgr32_1",
  330. .nb_channels = 4, .is_alpha = 1,
  331. .color_type = FF_COLOR_RGB,
  332. .pixel_type = FF_PIXEL_PACKED,
  333. .depth = 8,
  334. .x_chroma_shift = 0, .y_chroma_shift = 0,
  335. },
  336. [PIX_FMT_RGB32_1] = {
  337. .name = "rgb32_1",
  338. .nb_channels = 4, .is_alpha = 1,
  339. .color_type = FF_COLOR_RGB,
  340. .pixel_type = FF_PIXEL_PACKED,
  341. .depth = 8,
  342. .x_chroma_shift = 0, .y_chroma_shift = 0,
  343. },
  344. };
  345. void avcodec_get_chroma_sub_sample(int pix_fmt, int *h_shift, int *v_shift)
  346. {
  347. *h_shift = pix_fmt_info[pix_fmt].x_chroma_shift;
  348. *v_shift = pix_fmt_info[pix_fmt].y_chroma_shift;
  349. }
  350. const char *avcodec_get_pix_fmt_name(int pix_fmt)
  351. {
  352. if (pix_fmt < 0 || pix_fmt >= PIX_FMT_NB)
  353. return "???";
  354. else
  355. return pix_fmt_info[pix_fmt].name;
  356. }
  357. enum PixelFormat avcodec_get_pix_fmt(const char* name)
  358. {
  359. int i;
  360. for (i=0; i < PIX_FMT_NB; i++)
  361. if (!strcmp(pix_fmt_info[i].name, name))
  362. break;
  363. return i;
  364. }
  365. /* Picture field are filled with 'ptr' addresses. Also return size */
  366. int avpicture_fill(AVPicture *picture, uint8_t *ptr,
  367. int pix_fmt, int width, int height)
  368. {
  369. int size, w2, h2, size2;
  370. const PixFmtInfo *pinfo;
  371. if(avcodec_check_dimensions(NULL, width, height))
  372. goto fail;
  373. pinfo = &pix_fmt_info[pix_fmt];
  374. size = width * height;
  375. switch(pix_fmt) {
  376. case PIX_FMT_YUV420P:
  377. case PIX_FMT_YUV422P:
  378. case PIX_FMT_YUV444P:
  379. case PIX_FMT_YUV410P:
  380. case PIX_FMT_YUV411P:
  381. case PIX_FMT_YUVJ420P:
  382. case PIX_FMT_YUVJ422P:
  383. case PIX_FMT_YUVJ444P:
  384. w2 = (width + (1 << pinfo->x_chroma_shift) - 1) >> pinfo->x_chroma_shift;
  385. h2 = (height + (1 << pinfo->y_chroma_shift) - 1) >> pinfo->y_chroma_shift;
  386. size2 = w2 * h2;
  387. picture->data[0] = ptr;
  388. picture->data[1] = picture->data[0] + size;
  389. picture->data[2] = picture->data[1] + size2;
  390. picture->linesize[0] = width;
  391. picture->linesize[1] = w2;
  392. picture->linesize[2] = w2;
  393. return size + 2 * size2;
  394. case PIX_FMT_NV12:
  395. case PIX_FMT_NV21:
  396. w2 = (width + (1 << pinfo->x_chroma_shift) - 1) >> pinfo->x_chroma_shift;
  397. h2 = (height + (1 << pinfo->y_chroma_shift) - 1) >> pinfo->y_chroma_shift;
  398. size2 = w2 * h2 * 2;
  399. picture->data[0] = ptr;
  400. picture->data[1] = picture->data[0] + size;
  401. picture->data[2] = NULL;
  402. picture->linesize[0] = width;
  403. picture->linesize[1] = w2;
  404. picture->linesize[2] = 0;
  405. return size + 2 * size2;
  406. case PIX_FMT_RGB24:
  407. case PIX_FMT_BGR24:
  408. picture->data[0] = ptr;
  409. picture->data[1] = NULL;
  410. picture->data[2] = NULL;
  411. picture->linesize[0] = width * 3;
  412. return size * 3;
  413. case PIX_FMT_RGB32:
  414. case PIX_FMT_BGR32:
  415. case PIX_FMT_RGB32_1:
  416. case PIX_FMT_BGR32_1:
  417. picture->data[0] = ptr;
  418. picture->data[1] = NULL;
  419. picture->data[2] = NULL;
  420. picture->linesize[0] = width * 4;
  421. return size * 4;
  422. case PIX_FMT_GRAY16BE:
  423. case PIX_FMT_GRAY16LE:
  424. case PIX_FMT_BGR555:
  425. case PIX_FMT_BGR565:
  426. case PIX_FMT_RGB555:
  427. case PIX_FMT_RGB565:
  428. case PIX_FMT_YUYV422:
  429. picture->data[0] = ptr;
  430. picture->data[1] = NULL;
  431. picture->data[2] = NULL;
  432. picture->linesize[0] = width * 2;
  433. return size * 2;
  434. case PIX_FMT_UYVY422:
  435. picture->data[0] = ptr;
  436. picture->data[1] = NULL;
  437. picture->data[2] = NULL;
  438. picture->linesize[0] = width * 2;
  439. return size * 2;
  440. case PIX_FMT_UYYVYY411:
  441. picture->data[0] = ptr;
  442. picture->data[1] = NULL;
  443. picture->data[2] = NULL;
  444. picture->linesize[0] = width + width/2;
  445. return size + size/2;
  446. case PIX_FMT_RGB8:
  447. case PIX_FMT_BGR8:
  448. case PIX_FMT_RGB4_BYTE:
  449. case PIX_FMT_BGR4_BYTE:
  450. case PIX_FMT_GRAY8:
  451. picture->data[0] = ptr;
  452. picture->data[1] = NULL;
  453. picture->data[2] = NULL;
  454. picture->linesize[0] = width;
  455. return size;
  456. case PIX_FMT_RGB4:
  457. case PIX_FMT_BGR4:
  458. picture->data[0] = ptr;
  459. picture->data[1] = NULL;
  460. picture->data[2] = NULL;
  461. picture->linesize[0] = width / 2;
  462. return size / 2;
  463. case PIX_FMT_MONOWHITE:
  464. case PIX_FMT_MONOBLACK:
  465. picture->data[0] = ptr;
  466. picture->data[1] = NULL;
  467. picture->data[2] = NULL;
  468. picture->linesize[0] = (width + 7) >> 3;
  469. return picture->linesize[0] * height;
  470. case PIX_FMT_PAL8:
  471. size2 = (size + 3) & ~3;
  472. picture->data[0] = ptr;
  473. picture->data[1] = ptr + size2; /* palette is stored here as 256 32 bit words */
  474. picture->data[2] = NULL;
  475. picture->linesize[0] = width;
  476. picture->linesize[1] = 4;
  477. return size2 + 256 * 4;
  478. default:
  479. fail:
  480. picture->data[0] = NULL;
  481. picture->data[1] = NULL;
  482. picture->data[2] = NULL;
  483. picture->data[3] = NULL;
  484. return -1;
  485. }
  486. }
  487. int avpicture_layout(const AVPicture* src, int pix_fmt, int width, int height,
  488. unsigned char *dest, int dest_size)
  489. {
  490. const PixFmtInfo* pf = &pix_fmt_info[pix_fmt];
  491. int i, j, w, h, data_planes;
  492. const unsigned char* s;
  493. int size = avpicture_get_size(pix_fmt, width, height);
  494. if (size > dest_size || size < 0)
  495. return -1;
  496. if (pf->pixel_type == FF_PIXEL_PACKED || pf->pixel_type == FF_PIXEL_PALETTE) {
  497. if (pix_fmt == PIX_FMT_YUYV422 ||
  498. pix_fmt == PIX_FMT_UYVY422 ||
  499. pix_fmt == PIX_FMT_BGR565 ||
  500. pix_fmt == PIX_FMT_BGR555 ||
  501. pix_fmt == PIX_FMT_RGB565 ||
  502. pix_fmt == PIX_FMT_RGB555)
  503. w = width * 2;
  504. else if (pix_fmt == PIX_FMT_UYYVYY411)
  505. w = width + width/2;
  506. else if (pix_fmt == PIX_FMT_PAL8)
  507. w = width;
  508. else
  509. w = width * (pf->depth * pf->nb_channels / 8);
  510. data_planes = 1;
  511. h = height;
  512. } else {
  513. data_planes = pf->nb_channels;
  514. w = (width*pf->depth + 7)/8;
  515. h = height;
  516. }
  517. for (i=0; i<data_planes; i++) {
  518. if (i == 1) {
  519. w = width >> pf->x_chroma_shift;
  520. h = height >> pf->y_chroma_shift;
  521. }
  522. s = src->data[i];
  523. for(j=0; j<h; j++) {
  524. memcpy(dest, s, w);
  525. dest += w;
  526. s += src->linesize[i];
  527. }
  528. }
  529. if (pf->pixel_type == FF_PIXEL_PALETTE)
  530. memcpy((unsigned char *)(((size_t)dest + 3) & ~3), src->data[1], 256 * 4);
  531. return size;
  532. }
  533. int avpicture_get_size(int pix_fmt, int width, int height)
  534. {
  535. AVPicture dummy_pict;
  536. return avpicture_fill(&dummy_pict, NULL, pix_fmt, width, height);
  537. }
  538. /**
  539. * compute the loss when converting from a pixel format to another
  540. */
  541. int avcodec_get_pix_fmt_loss(int dst_pix_fmt, int src_pix_fmt,
  542. int has_alpha)
  543. {
  544. const PixFmtInfo *pf, *ps;
  545. int loss;
  546. ps = &pix_fmt_info[src_pix_fmt];
  547. pf = &pix_fmt_info[dst_pix_fmt];
  548. /* compute loss */
  549. loss = 0;
  550. pf = &pix_fmt_info[dst_pix_fmt];
  551. if (pf->depth < ps->depth ||
  552. (dst_pix_fmt == PIX_FMT_RGB555 && src_pix_fmt == PIX_FMT_RGB565))
  553. loss |= FF_LOSS_DEPTH;
  554. if (pf->x_chroma_shift > ps->x_chroma_shift ||
  555. pf->y_chroma_shift > ps->y_chroma_shift)
  556. loss |= FF_LOSS_RESOLUTION;
  557. switch(pf->color_type) {
  558. case FF_COLOR_RGB:
  559. if (ps->color_type != FF_COLOR_RGB &&
  560. ps->color_type != FF_COLOR_GRAY)
  561. loss |= FF_LOSS_COLORSPACE;
  562. break;
  563. case FF_COLOR_GRAY:
  564. if (ps->color_type != FF_COLOR_GRAY)
  565. loss |= FF_LOSS_COLORSPACE;
  566. break;
  567. case FF_COLOR_YUV:
  568. if (ps->color_type != FF_COLOR_YUV)
  569. loss |= FF_LOSS_COLORSPACE;
  570. break;
  571. case FF_COLOR_YUV_JPEG:
  572. if (ps->color_type != FF_COLOR_YUV_JPEG &&
  573. ps->color_type != FF_COLOR_YUV &&
  574. ps->color_type != FF_COLOR_GRAY)
  575. loss |= FF_LOSS_COLORSPACE;
  576. break;
  577. default:
  578. /* fail safe test */
  579. if (ps->color_type != pf->color_type)
  580. loss |= FF_LOSS_COLORSPACE;
  581. break;
  582. }
  583. if (pf->color_type == FF_COLOR_GRAY &&
  584. ps->color_type != FF_COLOR_GRAY)
  585. loss |= FF_LOSS_CHROMA;
  586. if (!pf->is_alpha && (ps->is_alpha && has_alpha))
  587. loss |= FF_LOSS_ALPHA;
  588. if (pf->pixel_type == FF_PIXEL_PALETTE &&
  589. (ps->pixel_type != FF_PIXEL_PALETTE && ps->color_type != FF_COLOR_GRAY))
  590. loss |= FF_LOSS_COLORQUANT;
  591. return loss;
  592. }
  593. static int avg_bits_per_pixel(int pix_fmt)
  594. {
  595. int bits;
  596. const PixFmtInfo *pf;
  597. pf = &pix_fmt_info[pix_fmt];
  598. switch(pf->pixel_type) {
  599. case FF_PIXEL_PACKED:
  600. switch(pix_fmt) {
  601. case PIX_FMT_YUYV422:
  602. case PIX_FMT_UYVY422:
  603. case PIX_FMT_RGB565:
  604. case PIX_FMT_RGB555:
  605. case PIX_FMT_BGR565:
  606. case PIX_FMT_BGR555:
  607. bits = 16;
  608. break;
  609. case PIX_FMT_UYYVYY411:
  610. bits = 12;
  611. break;
  612. default:
  613. bits = pf->depth * pf->nb_channels;
  614. break;
  615. }
  616. break;
  617. case FF_PIXEL_PLANAR:
  618. if (pf->x_chroma_shift == 0 && pf->y_chroma_shift == 0) {
  619. bits = pf->depth * pf->nb_channels;
  620. } else {
  621. bits = pf->depth + ((2 * pf->depth) >>
  622. (pf->x_chroma_shift + pf->y_chroma_shift));
  623. }
  624. break;
  625. case FF_PIXEL_PALETTE:
  626. bits = 8;
  627. break;
  628. default:
  629. bits = -1;
  630. break;
  631. }
  632. return bits;
  633. }
  634. static int avcodec_find_best_pix_fmt1(int pix_fmt_mask,
  635. int src_pix_fmt,
  636. int has_alpha,
  637. int loss_mask)
  638. {
  639. int dist, i, loss, min_dist, dst_pix_fmt;
  640. /* find exact color match with smallest size */
  641. dst_pix_fmt = -1;
  642. min_dist = 0x7fffffff;
  643. for(i = 0;i < PIX_FMT_NB; i++) {
  644. if (pix_fmt_mask & (1 << i)) {
  645. loss = avcodec_get_pix_fmt_loss(i, src_pix_fmt, has_alpha) & loss_mask;
  646. if (loss == 0) {
  647. dist = avg_bits_per_pixel(i);
  648. if (dist < min_dist) {
  649. min_dist = dist;
  650. dst_pix_fmt = i;
  651. }
  652. }
  653. }
  654. }
  655. return dst_pix_fmt;
  656. }
  657. /**
  658. * find best pixel format to convert to. Return -1 if none found
  659. */
  660. int avcodec_find_best_pix_fmt(int pix_fmt_mask, int src_pix_fmt,
  661. int has_alpha, int *loss_ptr)
  662. {
  663. int dst_pix_fmt, loss_mask, i;
  664. static const int loss_mask_order[] = {
  665. ~0, /* no loss first */
  666. ~FF_LOSS_ALPHA,
  667. ~FF_LOSS_RESOLUTION,
  668. ~(FF_LOSS_COLORSPACE | FF_LOSS_RESOLUTION),
  669. ~FF_LOSS_COLORQUANT,
  670. ~FF_LOSS_DEPTH,
  671. 0,
  672. };
  673. /* try with successive loss */
  674. i = 0;
  675. for(;;) {
  676. loss_mask = loss_mask_order[i++];
  677. dst_pix_fmt = avcodec_find_best_pix_fmt1(pix_fmt_mask, src_pix_fmt,
  678. has_alpha, loss_mask);
  679. if (dst_pix_fmt >= 0)
  680. goto found;
  681. if (loss_mask == 0)
  682. break;
  683. }
  684. return -1;
  685. found:
  686. if (loss_ptr)
  687. *loss_ptr = avcodec_get_pix_fmt_loss(dst_pix_fmt, src_pix_fmt, has_alpha);
  688. return dst_pix_fmt;
  689. }
  690. void ff_img_copy_plane(uint8_t *dst, int dst_wrap,
  691. const uint8_t *src, int src_wrap,
  692. int width, int height)
  693. {
  694. if((!dst) || (!src))
  695. return;
  696. for(;height > 0; height--) {
  697. memcpy(dst, src, width);
  698. dst += dst_wrap;
  699. src += src_wrap;
  700. }
  701. }
  702. /**
  703. * Copy image 'src' to 'dst'.
  704. */
  705. void img_copy(AVPicture *dst, const AVPicture *src,
  706. int pix_fmt, int width, int height)
  707. {
  708. int bwidth, bits, i;
  709. const PixFmtInfo *pf = &pix_fmt_info[pix_fmt];
  710. pf = &pix_fmt_info[pix_fmt];
  711. switch(pf->pixel_type) {
  712. case FF_PIXEL_PACKED:
  713. switch(pix_fmt) {
  714. case PIX_FMT_YUYV422:
  715. case PIX_FMT_UYVY422:
  716. case PIX_FMT_RGB565:
  717. case PIX_FMT_RGB555:
  718. case PIX_FMT_BGR565:
  719. case PIX_FMT_BGR555:
  720. bits = 16;
  721. break;
  722. case PIX_FMT_UYYVYY411:
  723. bits = 12;
  724. break;
  725. default:
  726. bits = pf->depth * pf->nb_channels;
  727. break;
  728. }
  729. bwidth = (width * bits + 7) >> 3;
  730. ff_img_copy_plane(dst->data[0], dst->linesize[0],
  731. src->data[0], src->linesize[0],
  732. bwidth, height);
  733. break;
  734. case FF_PIXEL_PLANAR:
  735. for(i = 0; i < pf->nb_channels; i++) {
  736. int w, h;
  737. w = width;
  738. h = height;
  739. if (i == 1 || i == 2) {
  740. w >>= pf->x_chroma_shift;
  741. h >>= pf->y_chroma_shift;
  742. }
  743. bwidth = (w * pf->depth + 7) >> 3;
  744. ff_img_copy_plane(dst->data[i], dst->linesize[i],
  745. src->data[i], src->linesize[i],
  746. bwidth, h);
  747. }
  748. break;
  749. case FF_PIXEL_PALETTE:
  750. ff_img_copy_plane(dst->data[0], dst->linesize[0],
  751. src->data[0], src->linesize[0],
  752. width, height);
  753. /* copy the palette */
  754. ff_img_copy_plane(dst->data[1], dst->linesize[1],
  755. src->data[1], src->linesize[1],
  756. 4, 256);
  757. break;
  758. }
  759. }
  760. /* XXX: totally non optimized */
  761. static void yuv422_to_yuv420p(AVPicture *dst, const AVPicture *src,
  762. int width, int height)
  763. {
  764. const uint8_t *p, *p1;
  765. uint8_t *lum, *cr, *cb, *lum1, *cr1, *cb1;
  766. int w;
  767. p1 = src->data[0];
  768. lum1 = dst->data[0];
  769. cb1 = dst->data[1];
  770. cr1 = dst->data[2];
  771. for(;height >= 1; height -= 2) {
  772. p = p1;
  773. lum = lum1;
  774. cb = cb1;
  775. cr = cr1;
  776. for(w = width; w >= 2; w -= 2) {
  777. lum[0] = p[0];
  778. cb[0] = p[1];
  779. lum[1] = p[2];
  780. cr[0] = p[3];
  781. p += 4;
  782. lum += 2;
  783. cb++;
  784. cr++;
  785. }
  786. if (w) {
  787. lum[0] = p[0];
  788. cb[0] = p[1];
  789. cr[0] = p[3];
  790. cb++;
  791. cr++;
  792. }
  793. p1 += src->linesize[0];
  794. lum1 += dst->linesize[0];
  795. if (height>1) {
  796. p = p1;
  797. lum = lum1;
  798. for(w = width; w >= 2; w -= 2) {
  799. lum[0] = p[0];
  800. lum[1] = p[2];
  801. p += 4;
  802. lum += 2;
  803. }
  804. if (w) {
  805. lum[0] = p[0];
  806. }
  807. p1 += src->linesize[0];
  808. lum1 += dst->linesize[0];
  809. }
  810. cb1 += dst->linesize[1];
  811. cr1 += dst->linesize[2];
  812. }
  813. }
  814. static void uyvy422_to_yuv420p(AVPicture *dst, const AVPicture *src,
  815. int width, int height)
  816. {
  817. const uint8_t *p, *p1;
  818. uint8_t *lum, *cr, *cb, *lum1, *cr1, *cb1;
  819. int w;
  820. p1 = src->data[0];
  821. lum1 = dst->data[0];
  822. cb1 = dst->data[1];
  823. cr1 = dst->data[2];
  824. for(;height >= 1; height -= 2) {
  825. p = p1;
  826. lum = lum1;
  827. cb = cb1;
  828. cr = cr1;
  829. for(w = width; w >= 2; w -= 2) {
  830. lum[0] = p[1];
  831. cb[0] = p[0];
  832. lum[1] = p[3];
  833. cr[0] = p[2];
  834. p += 4;
  835. lum += 2;
  836. cb++;
  837. cr++;
  838. }
  839. if (w) {
  840. lum[0] = p[1];
  841. cb[0] = p[0];
  842. cr[0] = p[2];
  843. cb++;
  844. cr++;
  845. }
  846. p1 += src->linesize[0];
  847. lum1 += dst->linesize[0];
  848. if (height>1) {
  849. p = p1;
  850. lum = lum1;
  851. for(w = width; w >= 2; w -= 2) {
  852. lum[0] = p[1];
  853. lum[1] = p[3];
  854. p += 4;
  855. lum += 2;
  856. }
  857. if (w) {
  858. lum[0] = p[1];
  859. }
  860. p1 += src->linesize[0];
  861. lum1 += dst->linesize[0];
  862. }
  863. cb1 += dst->linesize[1];
  864. cr1 += dst->linesize[2];
  865. }
  866. }
  867. static void uyvy422_to_yuv422p(AVPicture *dst, const AVPicture *src,
  868. int width, int height)
  869. {
  870. const uint8_t *p, *p1;
  871. uint8_t *lum, *cr, *cb, *lum1, *cr1, *cb1;
  872. int w;
  873. p1 = src->data[0];
  874. lum1 = dst->data[0];
  875. cb1 = dst->data[1];
  876. cr1 = dst->data[2];
  877. for(;height > 0; height--) {
  878. p = p1;
  879. lum = lum1;
  880. cb = cb1;
  881. cr = cr1;
  882. for(w = width; w >= 2; w -= 2) {
  883. lum[0] = p[1];
  884. cb[0] = p[0];
  885. lum[1] = p[3];
  886. cr[0] = p[2];
  887. p += 4;
  888. lum += 2;
  889. cb++;
  890. cr++;
  891. }
  892. p1 += src->linesize[0];
  893. lum1 += dst->linesize[0];
  894. cb1 += dst->linesize[1];
  895. cr1 += dst->linesize[2];
  896. }
  897. }
  898. static void yuv422_to_yuv422p(AVPicture *dst, const AVPicture *src,
  899. int width, int height)
  900. {
  901. const uint8_t *p, *p1;
  902. uint8_t *lum, *cr, *cb, *lum1, *cr1, *cb1;
  903. int w;
  904. p1 = src->data[0];
  905. lum1 = dst->data[0];
  906. cb1 = dst->data[1];
  907. cr1 = dst->data[2];
  908. for(;height > 0; height--) {
  909. p = p1;
  910. lum = lum1;
  911. cb = cb1;
  912. cr = cr1;
  913. for(w = width; w >= 2; w -= 2) {
  914. lum[0] = p[0];
  915. cb[0] = p[1];
  916. lum[1] = p[2];
  917. cr[0] = p[3];
  918. p += 4;
  919. lum += 2;
  920. cb++;
  921. cr++;
  922. }
  923. p1 += src->linesize[0];
  924. lum1 += dst->linesize[0];
  925. cb1 += dst->linesize[1];
  926. cr1 += dst->linesize[2];
  927. }
  928. }
  929. static void yuv422p_to_yuv422(AVPicture *dst, const AVPicture *src,
  930. int width, int height)
  931. {
  932. uint8_t *p, *p1;
  933. const uint8_t *lum, *cr, *cb, *lum1, *cr1, *cb1;
  934. int w;
  935. p1 = dst->data[0];
  936. lum1 = src->data[0];
  937. cb1 = src->data[1];
  938. cr1 = src->data[2];
  939. for(;height > 0; height--) {
  940. p = p1;
  941. lum = lum1;
  942. cb = cb1;
  943. cr = cr1;
  944. for(w = width; w >= 2; w -= 2) {
  945. p[0] = lum[0];
  946. p[1] = cb[0];
  947. p[2] = lum[1];
  948. p[3] = cr[0];
  949. p += 4;
  950. lum += 2;
  951. cb++;
  952. cr++;
  953. }
  954. p1 += dst->linesize[0];
  955. lum1 += src->linesize[0];
  956. cb1 += src->linesize[1];
  957. cr1 += src->linesize[2];
  958. }
  959. }
  960. static void yuv422p_to_uyvy422(AVPicture *dst, const AVPicture *src,
  961. int width, int height)
  962. {
  963. uint8_t *p, *p1;
  964. const uint8_t *lum, *cr, *cb, *lum1, *cr1, *cb1;
  965. int w;
  966. p1 = dst->data[0];
  967. lum1 = src->data[0];
  968. cb1 = src->data[1];
  969. cr1 = src->data[2];
  970. for(;height > 0; height--) {
  971. p = p1;
  972. lum = lum1;
  973. cb = cb1;
  974. cr = cr1;
  975. for(w = width; w >= 2; w -= 2) {
  976. p[1] = lum[0];
  977. p[0] = cb[0];
  978. p[3] = lum[1];
  979. p[2] = cr[0];
  980. p += 4;
  981. lum += 2;
  982. cb++;
  983. cr++;
  984. }
  985. p1 += dst->linesize[0];
  986. lum1 += src->linesize[0];
  987. cb1 += src->linesize[1];
  988. cr1 += src->linesize[2];
  989. }
  990. }
  991. static void uyvy411_to_yuv411p(AVPicture *dst, const AVPicture *src,
  992. int width, int height)
  993. {
  994. const uint8_t *p, *p1;
  995. uint8_t *lum, *cr, *cb, *lum1, *cr1, *cb1;
  996. int w;
  997. p1 = src->data[0];
  998. lum1 = dst->data[0];
  999. cb1 = dst->data[1];
  1000. cr1 = dst->data[2];
  1001. for(;height > 0; height--) {
  1002. p = p1;
  1003. lum = lum1;
  1004. cb = cb1;
  1005. cr = cr1;
  1006. for(w = width; w >= 4; w -= 4) {
  1007. cb[0] = p[0];
  1008. lum[0] = p[1];
  1009. lum[1] = p[2];
  1010. cr[0] = p[3];
  1011. lum[2] = p[4];
  1012. lum[3] = p[5];
  1013. p += 6;
  1014. lum += 4;
  1015. cb++;
  1016. cr++;
  1017. }
  1018. p1 += src->linesize[0];
  1019. lum1 += dst->linesize[0];
  1020. cb1 += dst->linesize[1];
  1021. cr1 += dst->linesize[2];
  1022. }
  1023. }
  1024. static void yuv420p_to_yuv422(AVPicture *dst, const AVPicture *src,
  1025. int width, int height)
  1026. {
  1027. int w, h;
  1028. uint8_t *line1, *line2, *linesrc = dst->data[0];
  1029. uint8_t *lum1, *lum2, *lumsrc = src->data[0];
  1030. uint8_t *cb1, *cb2 = src->data[1];
  1031. uint8_t *cr1, *cr2 = src->data[2];
  1032. for(h = height / 2; h--;) {
  1033. line1 = linesrc;
  1034. line2 = linesrc + dst->linesize[0];
  1035. lum1 = lumsrc;
  1036. lum2 = lumsrc + src->linesize[0];
  1037. cb1 = cb2;
  1038. cr1 = cr2;
  1039. for(w = width / 2; w--;) {
  1040. *line1++ = *lum1++; *line2++ = *lum2++;
  1041. *line1++ = *line2++ = *cb1++;
  1042. *line1++ = *lum1++; *line2++ = *lum2++;
  1043. *line1++ = *line2++ = *cr1++;
  1044. }
  1045. linesrc += dst->linesize[0] * 2;
  1046. lumsrc += src->linesize[0] * 2;
  1047. cb2 += src->linesize[1];
  1048. cr2 += src->linesize[2];
  1049. }
  1050. }
  1051. static void yuv420p_to_uyvy422(AVPicture *dst, const AVPicture *src,
  1052. int width, int height)
  1053. {
  1054. int w, h;
  1055. uint8_t *line1, *line2, *linesrc = dst->data[0];
  1056. uint8_t *lum1, *lum2, *lumsrc = src->data[0];
  1057. uint8_t *cb1, *cb2 = src->data[1];
  1058. uint8_t *cr1, *cr2 = src->data[2];
  1059. for(h = height / 2; h--;) {
  1060. line1 = linesrc;
  1061. line2 = linesrc + dst->linesize[0];
  1062. lum1 = lumsrc;
  1063. lum2 = lumsrc + src->linesize[0];
  1064. cb1 = cb2;
  1065. cr1 = cr2;
  1066. for(w = width / 2; w--;) {
  1067. *line1++ = *line2++ = *cb1++;
  1068. *line1++ = *lum1++; *line2++ = *lum2++;
  1069. *line1++ = *line2++ = *cr1++;
  1070. *line1++ = *lum1++; *line2++ = *lum2++;
  1071. }
  1072. linesrc += dst->linesize[0] * 2;
  1073. lumsrc += src->linesize[0] * 2;
  1074. cb2 += src->linesize[1];
  1075. cr2 += src->linesize[2];
  1076. }
  1077. }
  1078. #define SCALEBITS 10
  1079. #define ONE_HALF (1 << (SCALEBITS - 1))
  1080. #define FIX(x) ((int) ((x) * (1<<SCALEBITS) + 0.5))
  1081. #define YUV_TO_RGB1_CCIR(cb1, cr1)\
  1082. {\
  1083. cb = (cb1) - 128;\
  1084. cr = (cr1) - 128;\
  1085. r_add = FIX(1.40200*255.0/224.0) * cr + ONE_HALF;\
  1086. g_add = - FIX(0.34414*255.0/224.0) * cb - FIX(0.71414*255.0/224.0) * cr + \
  1087. ONE_HALF;\
  1088. b_add = FIX(1.77200*255.0/224.0) * cb + ONE_HALF;\
  1089. }
  1090. #define YUV_TO_RGB2_CCIR(r, g, b, y1)\
  1091. {\
  1092. y = ((y1) - 16) * FIX(255.0/219.0);\
  1093. r = cm[(y + r_add) >> SCALEBITS];\
  1094. g = cm[(y + g_add) >> SCALEBITS];\
  1095. b = cm[(y + b_add) >> SCALEBITS];\
  1096. }
  1097. #define YUV_TO_RGB1(cb1, cr1)\
  1098. {\
  1099. cb = (cb1) - 128;\
  1100. cr = (cr1) - 128;\
  1101. r_add = FIX(1.40200) * cr + ONE_HALF;\
  1102. g_add = - FIX(0.34414) * cb - FIX(0.71414) * cr + ONE_HALF;\
  1103. b_add = FIX(1.77200) * cb + ONE_HALF;\
  1104. }
  1105. #define YUV_TO_RGB2(r, g, b, y1)\
  1106. {\
  1107. y = (y1) << SCALEBITS;\
  1108. r = cm[(y + r_add) >> SCALEBITS];\
  1109. g = cm[(y + g_add) >> SCALEBITS];\
  1110. b = cm[(y + b_add) >> SCALEBITS];\
  1111. }
  1112. #define Y_CCIR_TO_JPEG(y)\
  1113. cm[((y) * FIX(255.0/219.0) + (ONE_HALF - 16 * FIX(255.0/219.0))) >> SCALEBITS]
  1114. #define Y_JPEG_TO_CCIR(y)\
  1115. (((y) * FIX(219.0/255.0) + (ONE_HALF + (16 << SCALEBITS))) >> SCALEBITS)
  1116. #define C_CCIR_TO_JPEG(y)\
  1117. cm[(((y) - 128) * FIX(127.0/112.0) + (ONE_HALF + (128 << SCALEBITS))) >> SCALEBITS]
  1118. /* NOTE: the clamp is really necessary! */
  1119. static inline int C_JPEG_TO_CCIR(int y) {
  1120. y = (((y - 128) * FIX(112.0/127.0) + (ONE_HALF + (128 << SCALEBITS))) >> SCALEBITS);
  1121. if (y < 16)
  1122. y = 16;
  1123. return y;
  1124. }
  1125. #define RGB_TO_Y(r, g, b) \
  1126. ((FIX(0.29900) * (r) + FIX(0.58700) * (g) + \
  1127. FIX(0.11400) * (b) + ONE_HALF) >> SCALEBITS)
  1128. #define RGB_TO_U(r1, g1, b1, shift)\
  1129. (((- FIX(0.16874) * r1 - FIX(0.33126) * g1 + \
  1130. FIX(0.50000) * b1 + (ONE_HALF << shift) - 1) >> (SCALEBITS + shift)) + 128)
  1131. #define RGB_TO_V(r1, g1, b1, shift)\
  1132. (((FIX(0.50000) * r1 - FIX(0.41869) * g1 - \
  1133. FIX(0.08131) * b1 + (ONE_HALF << shift) - 1) >> (SCALEBITS + shift)) + 128)
  1134. #define RGB_TO_Y_CCIR(r, g, b) \
  1135. ((FIX(0.29900*219.0/255.0) * (r) + FIX(0.58700*219.0/255.0) * (g) + \
  1136. FIX(0.11400*219.0/255.0) * (b) + (ONE_HALF + (16 << SCALEBITS))) >> SCALEBITS)
  1137. #define RGB_TO_U_CCIR(r1, g1, b1, shift)\
  1138. (((- FIX(0.16874*224.0/255.0) * r1 - FIX(0.33126*224.0/255.0) * g1 + \
  1139. FIX(0.50000*224.0/255.0) * b1 + (ONE_HALF << shift) - 1) >> (SCALEBITS + shift)) + 128)
  1140. #define RGB_TO_V_CCIR(r1, g1, b1, shift)\
  1141. (((FIX(0.50000*224.0/255.0) * r1 - FIX(0.41869*224.0/255.0) * g1 - \
  1142. FIX(0.08131*224.0/255.0) * b1 + (ONE_HALF << shift) - 1) >> (SCALEBITS + shift)) + 128)
  1143. static uint8_t y_ccir_to_jpeg[256];
  1144. static uint8_t y_jpeg_to_ccir[256];
  1145. static uint8_t c_ccir_to_jpeg[256];
  1146. static uint8_t c_jpeg_to_ccir[256];
  1147. /* init various conversion tables */
  1148. static void img_convert_init(void)
  1149. {
  1150. int i;
  1151. uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
  1152. for(i = 0;i < 256; i++) {
  1153. y_ccir_to_jpeg[i] = Y_CCIR_TO_JPEG(i);
  1154. y_jpeg_to_ccir[i] = Y_JPEG_TO_CCIR(i);
  1155. c_ccir_to_jpeg[i] = C_CCIR_TO_JPEG(i);
  1156. c_jpeg_to_ccir[i] = C_JPEG_TO_CCIR(i);
  1157. }
  1158. }
  1159. /* apply to each pixel the given table */
  1160. static void img_apply_table(uint8_t *dst, int dst_wrap,
  1161. const uint8_t *src, int src_wrap,
  1162. int width, int height, const uint8_t *table1)
  1163. {
  1164. int n;
  1165. const uint8_t *s;
  1166. uint8_t *d;
  1167. const uint8_t *table;
  1168. table = table1;
  1169. for(;height > 0; height--) {
  1170. s = src;
  1171. d = dst;
  1172. n = width;
  1173. while (n >= 4) {
  1174. d[0] = table[s[0]];
  1175. d[1] = table[s[1]];
  1176. d[2] = table[s[2]];
  1177. d[3] = table[s[3]];
  1178. d += 4;
  1179. s += 4;
  1180. n -= 4;
  1181. }
  1182. while (n > 0) {
  1183. d[0] = table[s[0]];
  1184. d++;
  1185. s++;
  1186. n--;
  1187. }
  1188. dst += dst_wrap;
  1189. src += src_wrap;
  1190. }
  1191. }
  1192. /* XXX: use generic filter ? */
  1193. /* XXX: in most cases, the sampling position is incorrect */
  1194. /* 4x1 -> 1x1 */
  1195. static void shrink41(uint8_t *dst, int dst_wrap,
  1196. const uint8_t *src, int src_wrap,
  1197. int width, int height)
  1198. {
  1199. int w;
  1200. const uint8_t *s;
  1201. uint8_t *d;
  1202. for(;height > 0; height--) {
  1203. s = src;
  1204. d = dst;
  1205. for(w = width;w > 0; w--) {
  1206. d[0] = (s[0] + s[1] + s[2] + s[3] + 2) >> 2;
  1207. s += 4;
  1208. d++;
  1209. }
  1210. src += src_wrap;
  1211. dst += dst_wrap;
  1212. }
  1213. }
  1214. /* 2x1 -> 1x1 */
  1215. static void shrink21(uint8_t *dst, int dst_wrap,
  1216. const uint8_t *src, int src_wrap,
  1217. int width, int height)
  1218. {
  1219. int w;
  1220. const uint8_t *s;
  1221. uint8_t *d;
  1222. for(;height > 0; height--) {
  1223. s = src;
  1224. d = dst;
  1225. for(w = width;w > 0; w--) {
  1226. d[0] = (s[0] + s[1]) >> 1;
  1227. s += 2;
  1228. d++;
  1229. }
  1230. src += src_wrap;
  1231. dst += dst_wrap;
  1232. }
  1233. }
  1234. /* 1x2 -> 1x1 */
  1235. static void shrink12(uint8_t *dst, int dst_wrap,
  1236. const uint8_t *src, int src_wrap,
  1237. int width, int height)
  1238. {
  1239. int w;
  1240. uint8_t *d;
  1241. const uint8_t *s1, *s2;
  1242. for(;height > 0; height--) {
  1243. s1 = src;
  1244. s2 = s1 + src_wrap;
  1245. d = dst;
  1246. for(w = width;w >= 4; w-=4) {
  1247. d[0] = (s1[0] + s2[0]) >> 1;
  1248. d[1] = (s1[1] + s2[1]) >> 1;
  1249. d[2] = (s1[2] + s2[2]) >> 1;
  1250. d[3] = (s1[3] + s2[3]) >> 1;
  1251. s1 += 4;
  1252. s2 += 4;
  1253. d += 4;
  1254. }
  1255. for(;w > 0; w--) {
  1256. d[0] = (s1[0] + s2[0]) >> 1;
  1257. s1++;
  1258. s2++;
  1259. d++;
  1260. }
  1261. src += 2 * src_wrap;
  1262. dst += dst_wrap;
  1263. }
  1264. }
  1265. /* 2x2 -> 1x1 */
  1266. void ff_shrink22(uint8_t *dst, int dst_wrap,
  1267. const uint8_t *src, int src_wrap,
  1268. int width, int height)
  1269. {
  1270. int w;
  1271. const uint8_t *s1, *s2;
  1272. uint8_t *d;
  1273. for(;height > 0; height--) {
  1274. s1 = src;
  1275. s2 = s1 + src_wrap;
  1276. d = dst;
  1277. for(w = width;w >= 4; w-=4) {
  1278. d[0] = (s1[0] + s1[1] + s2[0] + s2[1] + 2) >> 2;
  1279. d[1] = (s1[2] + s1[3] + s2[2] + s2[3] + 2) >> 2;
  1280. d[2] = (s1[4] + s1[5] + s2[4] + s2[5] + 2) >> 2;
  1281. d[3] = (s1[6] + s1[7] + s2[6] + s2[7] + 2) >> 2;
  1282. s1 += 8;
  1283. s2 += 8;
  1284. d += 4;
  1285. }
  1286. for(;w > 0; w--) {
  1287. d[0] = (s1[0] + s1[1] + s2[0] + s2[1] + 2) >> 2;
  1288. s1 += 2;
  1289. s2 += 2;
  1290. d++;
  1291. }
  1292. src += 2 * src_wrap;
  1293. dst += dst_wrap;
  1294. }
  1295. }
  1296. /* 4x4 -> 1x1 */
  1297. void ff_shrink44(uint8_t *dst, int dst_wrap,
  1298. const uint8_t *src, int src_wrap,
  1299. int width, int height)
  1300. {
  1301. int w;
  1302. const uint8_t *s1, *s2, *s3, *s4;
  1303. uint8_t *d;
  1304. for(;height > 0; height--) {
  1305. s1 = src;
  1306. s2 = s1 + src_wrap;
  1307. s3 = s2 + src_wrap;
  1308. s4 = s3 + src_wrap;
  1309. d = dst;
  1310. for(w = width;w > 0; w--) {
  1311. d[0] = (s1[0] + s1[1] + s1[2] + s1[3] +
  1312. s2[0] + s2[1] + s2[2] + s2[3] +
  1313. s3[0] + s3[1] + s3[2] + s3[3] +
  1314. s4[0] + s4[1] + s4[2] + s4[3] + 8) >> 4;
  1315. s1 += 4;
  1316. s2 += 4;
  1317. s3 += 4;
  1318. s4 += 4;
  1319. d++;
  1320. }
  1321. src += 4 * src_wrap;
  1322. dst += dst_wrap;
  1323. }
  1324. }
  1325. /* 8x8 -> 1x1 */
  1326. void ff_shrink88(uint8_t *dst, int dst_wrap,
  1327. const uint8_t *src, int src_wrap,
  1328. int width, int height)
  1329. {
  1330. int w, i;
  1331. for(;height > 0; height--) {
  1332. for(w = width;w > 0; w--) {
  1333. int tmp=0;
  1334. for(i=0; i<8; i++){
  1335. tmp += src[0] + src[1] + src[2] + src[3] + src[4] + src[5] + src[6] + src[7];
  1336. src += src_wrap;
  1337. }
  1338. *(dst++) = (tmp + 32)>>6;
  1339. src += 8 - 8*src_wrap;
  1340. }
  1341. src += 8*src_wrap - 8*width;
  1342. dst += dst_wrap - width;
  1343. }
  1344. }
  1345. static void grow21_line(uint8_t *dst, const uint8_t *src,
  1346. int width)
  1347. {
  1348. int w;
  1349. const uint8_t *s1;
  1350. uint8_t *d;
  1351. s1 = src;
  1352. d = dst;
  1353. for(w = width;w >= 4; w-=4) {
  1354. d[1] = d[0] = s1[0];
  1355. d[3] = d[2] = s1[1];
  1356. s1 += 2;
  1357. d += 4;
  1358. }
  1359. for(;w >= 2; w -= 2) {
  1360. d[1] = d[0] = s1[0];
  1361. s1 ++;
  1362. d += 2;
  1363. }
  1364. /* only needed if width is not a multiple of two */
  1365. /* XXX: veryfy that */
  1366. if (w) {
  1367. d[0] = s1[0];
  1368. }
  1369. }
  1370. static void grow41_line(uint8_t *dst, const uint8_t *src,
  1371. int width)
  1372. {
  1373. int w, v;
  1374. const uint8_t *s1;
  1375. uint8_t *d;
  1376. s1 = src;
  1377. d = dst;
  1378. for(w = width;w >= 4; w-=4) {
  1379. v = s1[0];
  1380. d[0] = v;
  1381. d[1] = v;
  1382. d[2] = v;
  1383. d[3] = v;
  1384. s1 ++;
  1385. d += 4;
  1386. }
  1387. }
  1388. /* 1x1 -> 2x1 */
  1389. static void grow21(uint8_t *dst, int dst_wrap,
  1390. const uint8_t *src, int src_wrap,
  1391. int width, int height)
  1392. {
  1393. for(;height > 0; height--) {
  1394. grow21_line(dst, src, width);
  1395. src += src_wrap;
  1396. dst += dst_wrap;
  1397. }
  1398. }
  1399. /* 1x1 -> 2x2 */
  1400. static void grow22(uint8_t *dst, int dst_wrap,
  1401. const uint8_t *src, int src_wrap,
  1402. int width, int height)
  1403. {
  1404. for(;height > 0; height--) {
  1405. grow21_line(dst, src, width);
  1406. if (height%2)
  1407. src += src_wrap;
  1408. dst += dst_wrap;
  1409. }
  1410. }
  1411. /* 1x1 -> 4x1 */
  1412. static void grow41(uint8_t *dst, int dst_wrap,
  1413. const uint8_t *src, int src_wrap,
  1414. int width, int height)
  1415. {
  1416. for(;height > 0; height--) {
  1417. grow41_line(dst, src, width);
  1418. src += src_wrap;
  1419. dst += dst_wrap;
  1420. }
  1421. }
  1422. /* 1x1 -> 4x4 */
  1423. static void grow44(uint8_t *dst, int dst_wrap,
  1424. const uint8_t *src, int src_wrap,
  1425. int width, int height)
  1426. {
  1427. for(;height > 0; height--) {
  1428. grow41_line(dst, src, width);
  1429. if ((height & 3) == 1)
  1430. src += src_wrap;
  1431. dst += dst_wrap;
  1432. }
  1433. }
  1434. /* 1x2 -> 2x1 */
  1435. static void conv411(uint8_t *dst, int dst_wrap,
  1436. const uint8_t *src, int src_wrap,
  1437. int width, int height)
  1438. {
  1439. int w, c;
  1440. const uint8_t *s1, *s2;
  1441. uint8_t *d;
  1442. width>>=1;
  1443. for(;height > 0; height--) {
  1444. s1 = src;
  1445. s2 = src + src_wrap;
  1446. d = dst;
  1447. for(w = width;w > 0; w--) {
  1448. c = (s1[0] + s2[0]) >> 1;
  1449. d[0] = c;
  1450. d[1] = c;
  1451. s1++;
  1452. s2++;
  1453. d += 2;
  1454. }
  1455. src += src_wrap * 2;
  1456. dst += dst_wrap;
  1457. }
  1458. }
  1459. /* XXX: add jpeg quantize code */
  1460. #define TRANSP_INDEX (6*6*6)
  1461. /* this is maybe slow, but allows for extensions */
  1462. static inline unsigned char gif_clut_index(uint8_t r, uint8_t g, uint8_t b)
  1463. {
  1464. return ((((r)/47)%6)*6*6+(((g)/47)%6)*6+(((b)/47)%6));
  1465. }
  1466. static void build_rgb_palette(uint8_t *palette, int has_alpha)
  1467. {
  1468. uint32_t *pal;
  1469. static const uint8_t pal_value[6] = { 0x00, 0x33, 0x66, 0x99, 0xcc, 0xff };
  1470. int i, r, g, b;
  1471. pal = (uint32_t *)palette;
  1472. i = 0;
  1473. for(r = 0; r < 6; r++) {
  1474. for(g = 0; g < 6; g++) {
  1475. for(b = 0; b < 6; b++) {
  1476. pal[i++] = (0xff << 24) | (pal_value[r] << 16) |
  1477. (pal_value[g] << 8) | pal_value[b];
  1478. }
  1479. }
  1480. }
  1481. if (has_alpha)
  1482. pal[i++] = 0;
  1483. while (i < 256)
  1484. pal[i++] = 0xff000000;
  1485. }
  1486. /* copy bit n to bits 0 ... n - 1 */
  1487. static inline unsigned int bitcopy_n(unsigned int a, int n)
  1488. {
  1489. int mask;
  1490. mask = (1 << n) - 1;
  1491. return (a & (0xff & ~mask)) | ((-((a >> n) & 1)) & mask);
  1492. }
  1493. /* rgb555 handling */
  1494. #define RGB_NAME rgb555
  1495. #define RGB_IN(r, g, b, s)\
  1496. {\
  1497. unsigned int v = ((const uint16_t *)(s))[0];\
  1498. r = bitcopy_n(v >> (10 - 3), 3);\
  1499. g = bitcopy_n(v >> (5 - 3), 3);\
  1500. b = bitcopy_n(v << 3, 3);\
  1501. }
  1502. #define RGB_OUT(d, r, g, b)\
  1503. {\
  1504. ((uint16_t *)(d))[0] = ((r >> 3) << 10) | ((g >> 3) << 5) | (b >> 3);\
  1505. }
  1506. #define BPP 2
  1507. #include "imgconvert_template.h"
  1508. /* rgb565 handling */
  1509. #define RGB_NAME rgb565
  1510. #define RGB_IN(r, g, b, s)\
  1511. {\
  1512. unsigned int v = ((const uint16_t *)(s))[0];\
  1513. r = bitcopy_n(v >> (11 - 3), 3);\
  1514. g = bitcopy_n(v >> (5 - 2), 2);\
  1515. b = bitcopy_n(v << 3, 3);\
  1516. }
  1517. #define RGB_OUT(d, r, g, b)\
  1518. {\
  1519. ((uint16_t *)(d))[0] = ((r >> 3) << 11) | ((g >> 2) << 5) | (b >> 3);\
  1520. }
  1521. #define BPP 2
  1522. #include "imgconvert_template.h"
  1523. /* bgr24 handling */
  1524. #define RGB_NAME bgr24
  1525. #define RGB_IN(r, g, b, s)\
  1526. {\
  1527. b = (s)[0];\
  1528. g = (s)[1];\
  1529. r = (s)[2];\
  1530. }
  1531. #define RGB_OUT(d, r, g, b)\
  1532. {\
  1533. (d)[0] = b;\
  1534. (d)[1] = g;\
  1535. (d)[2] = r;\
  1536. }
  1537. #define BPP 3
  1538. #include "imgconvert_template.h"
  1539. #undef RGB_IN
  1540. #undef RGB_OUT
  1541. #undef BPP
  1542. /* rgb24 handling */
  1543. #define RGB_NAME rgb24
  1544. #define FMT_RGB24
  1545. #define RGB_IN(r, g, b, s)\
  1546. {\
  1547. r = (s)[0];\
  1548. g = (s)[1];\
  1549. b = (s)[2];\
  1550. }
  1551. #define RGB_OUT(d, r, g, b)\
  1552. {\
  1553. (d)[0] = r;\
  1554. (d)[1] = g;\
  1555. (d)[2] = b;\
  1556. }
  1557. #define BPP 3
  1558. #include "imgconvert_template.h"
  1559. /* rgba32 handling */
  1560. #define RGB_NAME rgba32
  1561. #define FMT_RGBA32
  1562. #define RGB_IN(r, g, b, s)\
  1563. {\
  1564. unsigned int v = ((const uint32_t *)(s))[0];\
  1565. r = (v >> 16) & 0xff;\
  1566. g = (v >> 8) & 0xff;\
  1567. b = v & 0xff;\
  1568. }
  1569. #define RGBA_IN(r, g, b, a, s)\
  1570. {\
  1571. unsigned int v = ((const uint32_t *)(s))[0];\
  1572. a = (v >> 24) & 0xff;\
  1573. r = (v >> 16) & 0xff;\
  1574. g = (v >> 8) & 0xff;\
  1575. b = v & 0xff;\
  1576. }
  1577. #define RGBA_OUT(d, r, g, b, a)\
  1578. {\
  1579. ((uint32_t *)(d))[0] = (a << 24) | (r << 16) | (g << 8) | b;\
  1580. }
  1581. #define BPP 4
  1582. #include "imgconvert_template.h"
  1583. static void mono_to_gray(AVPicture *dst, const AVPicture *src,
  1584. int width, int height, int xor_mask)
  1585. {
  1586. const unsigned char *p;
  1587. unsigned char *q;
  1588. int v, dst_wrap, src_wrap;
  1589. int y, w;
  1590. p = src->data[0];
  1591. src_wrap = src->linesize[0] - ((width + 7) >> 3);
  1592. q = dst->data[0];
  1593. dst_wrap = dst->linesize[0] - width;
  1594. for(y=0;y<height;y++) {
  1595. w = width;
  1596. while (w >= 8) {
  1597. v = *p++ ^ xor_mask;
  1598. q[0] = -(v >> 7);
  1599. q[1] = -((v >> 6) & 1);
  1600. q[2] = -((v >> 5) & 1);
  1601. q[3] = -((v >> 4) & 1);
  1602. q[4] = -((v >> 3) & 1);
  1603. q[5] = -((v >> 2) & 1);
  1604. q[6] = -((v >> 1) & 1);
  1605. q[7] = -((v >> 0) & 1);
  1606. w -= 8;
  1607. q += 8;
  1608. }
  1609. if (w > 0) {
  1610. v = *p++ ^ xor_mask;
  1611. do {
  1612. q[0] = -((v >> 7) & 1);
  1613. q++;
  1614. v <<= 1;
  1615. } while (--w);
  1616. }
  1617. p += src_wrap;
  1618. q += dst_wrap;
  1619. }
  1620. }
  1621. static void monowhite_to_gray(AVPicture *dst, const AVPicture *src,
  1622. int width, int height)
  1623. {
  1624. mono_to_gray(dst, src, width, height, 0xff);
  1625. }
  1626. static void monoblack_to_gray(AVPicture *dst, const AVPicture *src,
  1627. int width, int height)
  1628. {
  1629. mono_to_gray(dst, src, width, height, 0x00);
  1630. }
  1631. static void gray_to_mono(AVPicture *dst, const AVPicture *src,
  1632. int width, int height, int xor_mask)
  1633. {
  1634. int n;
  1635. const uint8_t *s;
  1636. uint8_t *d;
  1637. int j, b, v, n1, src_wrap, dst_wrap, y;
  1638. s = src->data[0];
  1639. src_wrap = src->linesize[0] - width;
  1640. d = dst->data[0];
  1641. dst_wrap = dst->linesize[0] - ((width + 7) >> 3);
  1642. for(y=0;y<height;y++) {
  1643. n = width;
  1644. while (n >= 8) {
  1645. v = 0;
  1646. for(j=0;j<8;j++) {
  1647. b = s[0];
  1648. s++;
  1649. v = (v << 1) | (b >> 7);
  1650. }
  1651. d[0] = v ^ xor_mask;
  1652. d++;
  1653. n -= 8;
  1654. }
  1655. if (n > 0) {
  1656. n1 = n;
  1657. v = 0;
  1658. while (n > 0) {
  1659. b = s[0];
  1660. s++;
  1661. v = (v << 1) | (b >> 7);
  1662. n--;
  1663. }
  1664. d[0] = (v << (8 - (n1 & 7))) ^ xor_mask;
  1665. d++;
  1666. }
  1667. s += src_wrap;
  1668. d += dst_wrap;
  1669. }
  1670. }
  1671. static void gray_to_monowhite(AVPicture *dst, const AVPicture *src,
  1672. int width, int height)
  1673. {
  1674. gray_to_mono(dst, src, width, height, 0xff);
  1675. }
  1676. static void gray_to_monoblack(AVPicture *dst, const AVPicture *src,
  1677. int width, int height)
  1678. {
  1679. gray_to_mono(dst, src, width, height, 0x00);
  1680. }
  1681. static void gray_to_gray16(AVPicture *dst, const AVPicture *src,
  1682. int width, int height)
  1683. {
  1684. int x, y, src_wrap, dst_wrap;
  1685. uint8_t *s, *d;
  1686. s = src->data[0];
  1687. src_wrap = src->linesize[0] - width;
  1688. d = dst->data[0];
  1689. dst_wrap = dst->linesize[0] - width * 2;
  1690. for(y=0; y<height; y++){
  1691. for(x=0; x<width; x++){
  1692. *d++ = *s;
  1693. *d++ = *s++;
  1694. }
  1695. s += src_wrap;
  1696. d += dst_wrap;
  1697. }
  1698. }
  1699. static void gray16_to_gray(AVPicture *dst, const AVPicture *src,
  1700. int width, int height)
  1701. {
  1702. int x, y, src_wrap, dst_wrap;
  1703. uint8_t *s, *d;
  1704. s = src->data[0];
  1705. src_wrap = src->linesize[0] - width * 2;
  1706. d = dst->data[0];
  1707. dst_wrap = dst->linesize[0] - width;
  1708. for(y=0; y<height; y++){
  1709. for(x=0; x<width; x++){
  1710. *d++ = *s;
  1711. s += 2;
  1712. }
  1713. s += src_wrap;
  1714. d += dst_wrap;
  1715. }
  1716. }
  1717. static void gray16be_to_gray(AVPicture *dst, const AVPicture *src,
  1718. int width, int height)
  1719. {
  1720. gray16_to_gray(dst, src, width, height);
  1721. }
  1722. static void gray16le_to_gray(AVPicture *dst, const AVPicture *src,
  1723. int width, int height)
  1724. {
  1725. gray16_to_gray(dst, src + 1, width, height);
  1726. }
  1727. static void gray16_to_gray16(AVPicture *dst, const AVPicture *src,
  1728. int width, int height)
  1729. {
  1730. int x, y, src_wrap, dst_wrap;
  1731. uint16_t *s, *d;
  1732. s = src->data[0];
  1733. src_wrap = (src->linesize[0] - width * 2)/2;
  1734. d = dst->data[0];
  1735. dst_wrap = (dst->linesize[0] - width * 2)/2;
  1736. for(y=0; y<height; y++){
  1737. for(x=0; x<width; x++){
  1738. *d++ = bswap_16(*s++);
  1739. }
  1740. s += src_wrap;
  1741. d += dst_wrap;
  1742. }
  1743. }
  1744. typedef struct ConvertEntry {
  1745. void (*convert)(AVPicture *dst,
  1746. const AVPicture *src, int width, int height);
  1747. } ConvertEntry;
  1748. /* Add each new convertion function in this table. In order to be able
  1749. to convert from any format to any format, the following constraints
  1750. must be satisfied:
  1751. - all FF_COLOR_RGB formats must convert to and from PIX_FMT_RGB24
  1752. - all FF_COLOR_GRAY formats must convert to and from PIX_FMT_GRAY8
  1753. - all FF_COLOR_RGB formats with alpha must convert to and from PIX_FMT_RGB32
  1754. - PIX_FMT_YUV444P and PIX_FMT_YUVJ444P must convert to and from
  1755. PIX_FMT_RGB24.
  1756. - PIX_FMT_422 must convert to and from PIX_FMT_422P.
  1757. The other conversion functions are just optimisations for common cases.
  1758. */
  1759. static const ConvertEntry convert_table[PIX_FMT_NB][PIX_FMT_NB] = {
  1760. [PIX_FMT_YUV420P] = {
  1761. [PIX_FMT_YUYV422] = {
  1762. .convert = yuv420p_to_yuv422,
  1763. },
  1764. [PIX_FMT_RGB555] = {
  1765. .convert = yuv420p_to_rgb555
  1766. },
  1767. [PIX_FMT_RGB565] = {
  1768. .convert = yuv420p_to_rgb565
  1769. },
  1770. [PIX_FMT_BGR24] = {
  1771. .convert = yuv420p_to_bgr24
  1772. },
  1773. [PIX_FMT_RGB24] = {
  1774. .convert = yuv420p_to_rgb24
  1775. },
  1776. [PIX_FMT_RGB32] = {
  1777. .convert = yuv420p_to_rgba32
  1778. },
  1779. [PIX_FMT_UYVY422] = {
  1780. .convert = yuv420p_to_uyvy422,
  1781. },
  1782. },
  1783. [PIX_FMT_YUV422P] = {
  1784. [PIX_FMT_YUYV422] = {
  1785. .convert = yuv422p_to_yuv422,
  1786. },
  1787. [PIX_FMT_UYVY422] = {
  1788. .convert = yuv422p_to_uyvy422,
  1789. },
  1790. },
  1791. [PIX_FMT_YUV444P] = {
  1792. [PIX_FMT_RGB24] = {
  1793. .convert = yuv444p_to_rgb24
  1794. },
  1795. },
  1796. [PIX_FMT_YUVJ420P] = {
  1797. [PIX_FMT_RGB555] = {
  1798. .convert = yuvj420p_to_rgb555
  1799. },
  1800. [PIX_FMT_RGB565] = {
  1801. .convert = yuvj420p_to_rgb565
  1802. },
  1803. [PIX_FMT_BGR24] = {
  1804. .convert = yuvj420p_to_bgr24
  1805. },
  1806. [PIX_FMT_RGB24] = {
  1807. .convert = yuvj420p_to_rgb24
  1808. },
  1809. [PIX_FMT_RGB32] = {
  1810. .convert = yuvj420p_to_rgba32
  1811. },
  1812. },
  1813. [PIX_FMT_YUVJ444P] = {
  1814. [PIX_FMT_RGB24] = {
  1815. .convert = yuvj444p_to_rgb24
  1816. },
  1817. },
  1818. [PIX_FMT_YUYV422] = {
  1819. [PIX_FMT_YUV420P] = {
  1820. .convert = yuv422_to_yuv420p,
  1821. },
  1822. [PIX_FMT_YUV422P] = {
  1823. .convert = yuv422_to_yuv422p,
  1824. },
  1825. },
  1826. [PIX_FMT_UYVY422] = {
  1827. [PIX_FMT_YUV420P] = {
  1828. .convert = uyvy422_to_yuv420p,
  1829. },
  1830. [PIX_FMT_YUV422P] = {
  1831. .convert = uyvy422_to_yuv422p,
  1832. },
  1833. },
  1834. [PIX_FMT_RGB24] = {
  1835. [PIX_FMT_YUV420P] = {
  1836. .convert = rgb24_to_yuv420p
  1837. },
  1838. [PIX_FMT_RGB565] = {
  1839. .convert = rgb24_to_rgb565
  1840. },
  1841. [PIX_FMT_RGB555] = {
  1842. .convert = rgb24_to_rgb555
  1843. },
  1844. [PIX_FMT_RGB32] = {
  1845. .convert = rgb24_to_rgba32
  1846. },
  1847. [PIX_FMT_BGR24] = {
  1848. .convert = rgb24_to_bgr24
  1849. },
  1850. [PIX_FMT_GRAY8] = {
  1851. .convert = rgb24_to_gray
  1852. },
  1853. [PIX_FMT_PAL8] = {
  1854. .convert = rgb24_to_pal8
  1855. },
  1856. [PIX_FMT_YUV444P] = {
  1857. .convert = rgb24_to_yuv444p
  1858. },
  1859. [PIX_FMT_YUVJ420P] = {
  1860. .convert = rgb24_to_yuvj420p
  1861. },
  1862. [PIX_FMT_YUVJ444P] = {
  1863. .convert = rgb24_to_yuvj444p
  1864. },
  1865. },
  1866. [PIX_FMT_RGB32] = {
  1867. [PIX_FMT_RGB24] = {
  1868. .convert = rgba32_to_rgb24
  1869. },
  1870. [PIX_FMT_BGR24] = {
  1871. .convert = rgba32_to_bgr24
  1872. },
  1873. [PIX_FMT_RGB565] = {
  1874. .convert = rgba32_to_rgb565
  1875. },
  1876. [PIX_FMT_RGB555] = {
  1877. .convert = rgba32_to_rgb555
  1878. },
  1879. [PIX_FMT_PAL8] = {
  1880. .convert = rgba32_to_pal8
  1881. },
  1882. [PIX_FMT_YUV420P] = {
  1883. .convert = rgba32_to_yuv420p
  1884. },
  1885. [PIX_FMT_GRAY8] = {
  1886. .convert = rgba32_to_gray
  1887. },
  1888. },
  1889. [PIX_FMT_BGR24] = {
  1890. [PIX_FMT_RGB32] = {
  1891. .convert = bgr24_to_rgba32
  1892. },
  1893. [PIX_FMT_RGB24] = {
  1894. .convert = bgr24_to_rgb24
  1895. },
  1896. [PIX_FMT_YUV420P] = {
  1897. .convert = bgr24_to_yuv420p
  1898. },
  1899. [PIX_FMT_GRAY8] = {
  1900. .convert = bgr24_to_gray
  1901. },
  1902. },
  1903. [PIX_FMT_RGB555] = {
  1904. [PIX_FMT_RGB24] = {
  1905. .convert = rgb555_to_rgb24
  1906. },
  1907. [PIX_FMT_RGB32] = {
  1908. .convert = rgb555_to_rgba32
  1909. },
  1910. [PIX_FMT_YUV420P] = {
  1911. .convert = rgb555_to_yuv420p
  1912. },
  1913. [PIX_FMT_GRAY8] = {
  1914. .convert = rgb555_to_gray
  1915. },
  1916. },
  1917. [PIX_FMT_RGB565] = {
  1918. [PIX_FMT_RGB32] = {
  1919. .convert = rgb565_to_rgba32
  1920. },
  1921. [PIX_FMT_RGB24] = {
  1922. .convert = rgb565_to_rgb24
  1923. },
  1924. [PIX_FMT_YUV420P] = {
  1925. .convert = rgb565_to_yuv420p
  1926. },
  1927. [PIX_FMT_GRAY8] = {
  1928. .convert = rgb565_to_gray
  1929. },
  1930. },
  1931. [PIX_FMT_GRAY16BE] = {
  1932. [PIX_FMT_GRAY8] = {
  1933. .convert = gray16be_to_gray
  1934. },
  1935. [PIX_FMT_GRAY16LE] = {
  1936. .convert = gray16_to_gray16
  1937. },
  1938. },
  1939. [PIX_FMT_GRAY16LE] = {
  1940. [PIX_FMT_GRAY8] = {
  1941. .convert = gray16le_to_gray
  1942. },
  1943. [PIX_FMT_GRAY16BE] = {
  1944. .convert = gray16_to_gray16
  1945. },
  1946. },
  1947. [PIX_FMT_GRAY8] = {
  1948. [PIX_FMT_RGB555] = {
  1949. .convert = gray_to_rgb555
  1950. },
  1951. [PIX_FMT_RGB565] = {
  1952. .convert = gray_to_rgb565
  1953. },
  1954. [PIX_FMT_RGB24] = {
  1955. .convert = gray_to_rgb24
  1956. },
  1957. [PIX_FMT_BGR24] = {
  1958. .convert = gray_to_bgr24
  1959. },
  1960. [PIX_FMT_RGB32] = {
  1961. .convert = gray_to_rgba32
  1962. },
  1963. [PIX_FMT_MONOWHITE] = {
  1964. .convert = gray_to_monowhite
  1965. },
  1966. [PIX_FMT_MONOBLACK] = {
  1967. .convert = gray_to_monoblack
  1968. },
  1969. [PIX_FMT_GRAY16LE] = {
  1970. .convert = gray_to_gray16
  1971. },
  1972. [PIX_FMT_GRAY16BE] = {
  1973. .convert = gray_to_gray16
  1974. },
  1975. },
  1976. [PIX_FMT_MONOWHITE] = {
  1977. [PIX_FMT_GRAY8] = {
  1978. .convert = monowhite_to_gray
  1979. },
  1980. },
  1981. [PIX_FMT_MONOBLACK] = {
  1982. [PIX_FMT_GRAY8] = {
  1983. .convert = monoblack_to_gray
  1984. },
  1985. },
  1986. [PIX_FMT_PAL8] = {
  1987. [PIX_FMT_RGB555] = {
  1988. .convert = pal8_to_rgb555
  1989. },
  1990. [PIX_FMT_RGB565] = {
  1991. .convert = pal8_to_rgb565
  1992. },
  1993. [PIX_FMT_BGR24] = {
  1994. .convert = pal8_to_bgr24
  1995. },
  1996. [PIX_FMT_RGB24] = {
  1997. .convert = pal8_to_rgb24
  1998. },
  1999. [PIX_FMT_RGB32] = {
  2000. .convert = pal8_to_rgba32
  2001. },
  2002. },
  2003. [PIX_FMT_UYYVYY411] = {
  2004. [PIX_FMT_YUV411P] = {
  2005. .convert = uyvy411_to_yuv411p,
  2006. },
  2007. },
  2008. };
  2009. int avpicture_alloc(AVPicture *picture,
  2010. int pix_fmt, int width, int height)
  2011. {
  2012. int size;
  2013. void *ptr;
  2014. size = avpicture_get_size(pix_fmt, width, height);
  2015. if(size<0)
  2016. goto fail;
  2017. ptr = av_malloc(size);
  2018. if (!ptr)
  2019. goto fail;
  2020. avpicture_fill(picture, ptr, pix_fmt, width, height);
  2021. return 0;
  2022. fail:
  2023. memset(picture, 0, sizeof(AVPicture));
  2024. return -1;
  2025. }
  2026. void avpicture_free(AVPicture *picture)
  2027. {
  2028. av_free(picture->data[0]);
  2029. }
  2030. /* return true if yuv planar */
  2031. static inline int is_yuv_planar(const PixFmtInfo *ps)
  2032. {
  2033. return (ps->color_type == FF_COLOR_YUV ||
  2034. ps->color_type == FF_COLOR_YUV_JPEG) &&
  2035. ps->pixel_type == FF_PIXEL_PLANAR;
  2036. }
  2037. /**
  2038. * Crop image top and left side
  2039. */
  2040. int img_crop(AVPicture *dst, const AVPicture *src,
  2041. int pix_fmt, int top_band, int left_band)
  2042. {
  2043. int y_shift;
  2044. int x_shift;
  2045. if (pix_fmt < 0 || pix_fmt >= PIX_FMT_NB || !is_yuv_planar(&pix_fmt_info[pix_fmt]))
  2046. return -1;
  2047. y_shift = pix_fmt_info[pix_fmt].y_chroma_shift;
  2048. x_shift = pix_fmt_info[pix_fmt].x_chroma_shift;
  2049. dst->data[0] = src->data[0] + (top_band * src->linesize[0]) + left_band;
  2050. dst->data[1] = src->data[1] + ((top_band >> y_shift) * src->linesize[1]) + (left_band >> x_shift);
  2051. dst->data[2] = src->data[2] + ((top_band >> y_shift) * src->linesize[2]) + (left_band >> x_shift);
  2052. dst->linesize[0] = src->linesize[0];
  2053. dst->linesize[1] = src->linesize[1];
  2054. dst->linesize[2] = src->linesize[2];
  2055. return 0;
  2056. }
  2057. /**
  2058. * Pad image
  2059. */
  2060. int img_pad(AVPicture *dst, const AVPicture *src, int height, int width,
  2061. int pix_fmt, int padtop, int padbottom, int padleft, int padright,
  2062. int *color)
  2063. {
  2064. uint8_t *optr;
  2065. int y_shift;
  2066. int x_shift;
  2067. int yheight;
  2068. int i, y;
  2069. if (pix_fmt < 0 || pix_fmt >= PIX_FMT_NB ||
  2070. !is_yuv_planar(&pix_fmt_info[pix_fmt])) return -1;
  2071. for (i = 0; i < 3; i++) {
  2072. x_shift = i ? pix_fmt_info[pix_fmt].x_chroma_shift : 0;
  2073. y_shift = i ? pix_fmt_info[pix_fmt].y_chroma_shift : 0;
  2074. if (padtop || padleft) {
  2075. memset(dst->data[i], color[i],
  2076. dst->linesize[i] * (padtop >> y_shift) + (padleft >> x_shift));
  2077. }
  2078. if (padleft || padright) {
  2079. optr = dst->data[i] + dst->linesize[i] * (padtop >> y_shift) +
  2080. (dst->linesize[i] - (padright >> x_shift));
  2081. yheight = (height - 1 - (padtop + padbottom)) >> y_shift;
  2082. for (y = 0; y < yheight; y++) {
  2083. memset(optr, color[i], (padleft + padright) >> x_shift);
  2084. optr += dst->linesize[i];
  2085. }
  2086. }
  2087. if (src) { /* first line */
  2088. uint8_t *iptr = src->data[i];
  2089. optr = dst->data[i] + dst->linesize[i] * (padtop >> y_shift) +
  2090. (padleft >> x_shift);
  2091. memcpy(optr, iptr, src->linesize[i]);
  2092. iptr += src->linesize[i];
  2093. optr = dst->data[i] + dst->linesize[i] * (padtop >> y_shift) +
  2094. (dst->linesize[i] - (padright >> x_shift));
  2095. yheight = (height - 1 - (padtop + padbottom)) >> y_shift;
  2096. for (y = 0; y < yheight; y++) {
  2097. memset(optr, color[i], (padleft + padright) >> x_shift);
  2098. memcpy(optr + ((padleft + padright) >> x_shift), iptr,
  2099. src->linesize[i]);
  2100. iptr += src->linesize[i];
  2101. optr += dst->linesize[i];
  2102. }
  2103. }
  2104. if (padbottom || padright) {
  2105. optr = dst->data[i] + dst->linesize[i] *
  2106. ((height - padbottom) >> y_shift) - (padright >> x_shift);
  2107. memset(optr, color[i],dst->linesize[i] *
  2108. (padbottom >> y_shift) + (padright >> x_shift));
  2109. }
  2110. }
  2111. return 0;
  2112. }
  2113. #ifndef CONFIG_SWSCALER
  2114. /* XXX: always use linesize. Return -1 if not supported */
  2115. int img_convert(AVPicture *dst, int dst_pix_fmt,
  2116. const AVPicture *src, int src_pix_fmt,
  2117. int src_width, int src_height)
  2118. {
  2119. static int inited;
  2120. int i, ret, dst_width, dst_height, int_pix_fmt;
  2121. const PixFmtInfo *src_pix, *dst_pix;
  2122. const ConvertEntry *ce;
  2123. AVPicture tmp1, *tmp = &tmp1;
  2124. if (src_pix_fmt < 0 || src_pix_fmt >= PIX_FMT_NB ||
  2125. dst_pix_fmt < 0 || dst_pix_fmt >= PIX_FMT_NB)
  2126. return -1;
  2127. if (src_width <= 0 || src_height <= 0)
  2128. return 0;
  2129. if (!inited) {
  2130. inited = 1;
  2131. img_convert_init();
  2132. }
  2133. dst_width = src_width;
  2134. dst_height = src_height;
  2135. dst_pix = &pix_fmt_info[dst_pix_fmt];
  2136. src_pix = &pix_fmt_info[src_pix_fmt];
  2137. if (src_pix_fmt == dst_pix_fmt) {
  2138. /* no conversion needed: just copy */
  2139. img_copy(dst, src, dst_pix_fmt, dst_width, dst_height);
  2140. return 0;
  2141. }
  2142. ce = &convert_table[src_pix_fmt][dst_pix_fmt];
  2143. if (ce->convert) {
  2144. /* specific conversion routine */
  2145. ce->convert(dst, src, dst_width, dst_height);
  2146. return 0;
  2147. }
  2148. /* gray to YUV */
  2149. if (is_yuv_planar(dst_pix) &&
  2150. src_pix_fmt == PIX_FMT_GRAY8) {
  2151. int w, h, y;
  2152. uint8_t *d;
  2153. if (dst_pix->color_type == FF_COLOR_YUV_JPEG) {
  2154. ff_img_copy_plane(dst->data[0], dst->linesize[0],
  2155. src->data[0], src->linesize[0],
  2156. dst_width, dst_height);
  2157. } else {
  2158. img_apply_table(dst->data[0], dst->linesize[0],
  2159. src->data[0], src->linesize[0],
  2160. dst_width, dst_height,
  2161. y_jpeg_to_ccir);
  2162. }
  2163. /* fill U and V with 128 */
  2164. w = dst_width;
  2165. h = dst_height;
  2166. w >>= dst_pix->x_chroma_shift;
  2167. h >>= dst_pix->y_chroma_shift;
  2168. for(i = 1; i <= 2; i++) {
  2169. d = dst->data[i];
  2170. for(y = 0; y< h; y++) {
  2171. memset(d, 128, w);
  2172. d += dst->linesize[i];
  2173. }
  2174. }
  2175. return 0;
  2176. }
  2177. /* YUV to gray */
  2178. if (is_yuv_planar(src_pix) &&
  2179. dst_pix_fmt == PIX_FMT_GRAY8) {
  2180. if (src_pix->color_type == FF_COLOR_YUV_JPEG) {
  2181. ff_img_copy_plane(dst->data[0], dst->linesize[0],
  2182. src->data[0], src->linesize[0],
  2183. dst_width, dst_height);
  2184. } else {
  2185. img_apply_table(dst->data[0], dst->linesize[0],
  2186. src->data[0], src->linesize[0],
  2187. dst_width, dst_height,
  2188. y_ccir_to_jpeg);
  2189. }
  2190. return 0;
  2191. }
  2192. /* YUV to YUV planar */
  2193. if (is_yuv_planar(dst_pix) && is_yuv_planar(src_pix)) {
  2194. int x_shift, y_shift, w, h, xy_shift;
  2195. void (*resize_func)(uint8_t *dst, int dst_wrap,
  2196. const uint8_t *src, int src_wrap,
  2197. int width, int height);
  2198. /* compute chroma size of the smallest dimensions */
  2199. w = dst_width;
  2200. h = dst_height;
  2201. if (dst_pix->x_chroma_shift >= src_pix->x_chroma_shift)
  2202. w >>= dst_pix->x_chroma_shift;
  2203. else
  2204. w >>= src_pix->x_chroma_shift;
  2205. if (dst_pix->y_chroma_shift >= src_pix->y_chroma_shift)
  2206. h >>= dst_pix->y_chroma_shift;
  2207. else
  2208. h >>= src_pix->y_chroma_shift;
  2209. x_shift = (dst_pix->x_chroma_shift - src_pix->x_chroma_shift);
  2210. y_shift = (dst_pix->y_chroma_shift - src_pix->y_chroma_shift);
  2211. xy_shift = ((x_shift & 0xf) << 4) | (y_shift & 0xf);
  2212. /* there must be filters for conversion at least from and to
  2213. YUV444 format */
  2214. switch(xy_shift) {
  2215. case 0x00:
  2216. resize_func = ff_img_copy_plane;
  2217. break;
  2218. case 0x10:
  2219. resize_func = shrink21;
  2220. break;
  2221. case 0x20:
  2222. resize_func = shrink41;
  2223. break;
  2224. case 0x01:
  2225. resize_func = shrink12;
  2226. break;
  2227. case 0x11:
  2228. resize_func = ff_shrink22;
  2229. break;
  2230. case 0x22:
  2231. resize_func = ff_shrink44;
  2232. break;
  2233. case 0xf0:
  2234. resize_func = grow21;
  2235. break;
  2236. case 0xe0:
  2237. resize_func = grow41;
  2238. break;
  2239. case 0xff:
  2240. resize_func = grow22;
  2241. break;
  2242. case 0xee:
  2243. resize_func = grow44;
  2244. break;
  2245. case 0xf1:
  2246. resize_func = conv411;
  2247. break;
  2248. default:
  2249. /* currently not handled */
  2250. goto no_chroma_filter;
  2251. }
  2252. ff_img_copy_plane(dst->data[0], dst->linesize[0],
  2253. src->data[0], src->linesize[0],
  2254. dst_width, dst_height);
  2255. for(i = 1;i <= 2; i++)
  2256. resize_func(dst->data[i], dst->linesize[i],
  2257. src->data[i], src->linesize[i],
  2258. dst_width>>dst_pix->x_chroma_shift, dst_height>>dst_pix->y_chroma_shift);
  2259. /* if yuv color space conversion is needed, we do it here on
  2260. the destination image */
  2261. if (dst_pix->color_type != src_pix->color_type) {
  2262. const uint8_t *y_table, *c_table;
  2263. if (dst_pix->color_type == FF_COLOR_YUV) {
  2264. y_table = y_jpeg_to_ccir;
  2265. c_table = c_jpeg_to_ccir;
  2266. } else {
  2267. y_table = y_ccir_to_jpeg;
  2268. c_table = c_ccir_to_jpeg;
  2269. }
  2270. img_apply_table(dst->data[0], dst->linesize[0],
  2271. dst->data[0], dst->linesize[0],
  2272. dst_width, dst_height,
  2273. y_table);
  2274. for(i = 1;i <= 2; i++)
  2275. img_apply_table(dst->data[i], dst->linesize[i],
  2276. dst->data[i], dst->linesize[i],
  2277. dst_width>>dst_pix->x_chroma_shift,
  2278. dst_height>>dst_pix->y_chroma_shift,
  2279. c_table);
  2280. }
  2281. return 0;
  2282. }
  2283. no_chroma_filter:
  2284. /* try to use an intermediate format */
  2285. if (src_pix_fmt == PIX_FMT_YUYV422 ||
  2286. dst_pix_fmt == PIX_FMT_YUYV422) {
  2287. /* specific case: convert to YUV422P first */
  2288. int_pix_fmt = PIX_FMT_YUV422P;
  2289. } else if (src_pix_fmt == PIX_FMT_UYVY422 ||
  2290. dst_pix_fmt == PIX_FMT_UYVY422) {
  2291. /* specific case: convert to YUV422P first */
  2292. int_pix_fmt = PIX_FMT_YUV422P;
  2293. } else if (src_pix_fmt == PIX_FMT_UYYVYY411 ||
  2294. dst_pix_fmt == PIX_FMT_UYYVYY411) {
  2295. /* specific case: convert to YUV411P first */
  2296. int_pix_fmt = PIX_FMT_YUV411P;
  2297. } else if ((src_pix->color_type == FF_COLOR_GRAY &&
  2298. src_pix_fmt != PIX_FMT_GRAY8) ||
  2299. (dst_pix->color_type == FF_COLOR_GRAY &&
  2300. dst_pix_fmt != PIX_FMT_GRAY8)) {
  2301. /* gray8 is the normalized format */
  2302. int_pix_fmt = PIX_FMT_GRAY8;
  2303. } else if ((is_yuv_planar(src_pix) &&
  2304. src_pix_fmt != PIX_FMT_YUV444P &&
  2305. src_pix_fmt != PIX_FMT_YUVJ444P)) {
  2306. /* yuv444 is the normalized format */
  2307. if (src_pix->color_type == FF_COLOR_YUV_JPEG)
  2308. int_pix_fmt = PIX_FMT_YUVJ444P;
  2309. else
  2310. int_pix_fmt = PIX_FMT_YUV444P;
  2311. } else if ((is_yuv_planar(dst_pix) &&
  2312. dst_pix_fmt != PIX_FMT_YUV444P &&
  2313. dst_pix_fmt != PIX_FMT_YUVJ444P)) {
  2314. /* yuv444 is the normalized format */
  2315. if (dst_pix->color_type == FF_COLOR_YUV_JPEG)
  2316. int_pix_fmt = PIX_FMT_YUVJ444P;
  2317. else
  2318. int_pix_fmt = PIX_FMT_YUV444P;
  2319. } else {
  2320. /* the two formats are rgb or gray8 or yuv[j]444p */
  2321. if (src_pix->is_alpha && dst_pix->is_alpha)
  2322. int_pix_fmt = PIX_FMT_RGB32;
  2323. else
  2324. int_pix_fmt = PIX_FMT_RGB24;
  2325. }
  2326. if (avpicture_alloc(tmp, int_pix_fmt, dst_width, dst_height) < 0)
  2327. return -1;
  2328. ret = -1;
  2329. if (img_convert(tmp, int_pix_fmt,
  2330. src, src_pix_fmt, src_width, src_height) < 0)
  2331. goto fail1;
  2332. if (img_convert(dst, dst_pix_fmt,
  2333. tmp, int_pix_fmt, dst_width, dst_height) < 0)
  2334. goto fail1;
  2335. ret = 0;
  2336. fail1:
  2337. avpicture_free(tmp);
  2338. return ret;
  2339. }
  2340. #endif
  2341. /* NOTE: we scan all the pixels to have an exact information */
  2342. static int get_alpha_info_pal8(const AVPicture *src, int width, int height)
  2343. {
  2344. const unsigned char *p;
  2345. int src_wrap, ret, x, y;
  2346. unsigned int a;
  2347. uint32_t *palette = (uint32_t *)src->data[1];
  2348. p = src->data[0];
  2349. src_wrap = src->linesize[0] - width;
  2350. ret = 0;
  2351. for(y=0;y<height;y++) {
  2352. for(x=0;x<width;x++) {
  2353. a = palette[p[0]] >> 24;
  2354. if (a == 0x00) {
  2355. ret |= FF_ALPHA_TRANSP;
  2356. } else if (a != 0xff) {
  2357. ret |= FF_ALPHA_SEMI_TRANSP;
  2358. }
  2359. p++;
  2360. }
  2361. p += src_wrap;
  2362. }
  2363. return ret;
  2364. }
  2365. /**
  2366. * Tell if an image really has transparent alpha values.
  2367. * @return ored mask of FF_ALPHA_xxx constants
  2368. */
  2369. int img_get_alpha_info(const AVPicture *src,
  2370. int pix_fmt, int width, int height)
  2371. {
  2372. const PixFmtInfo *pf = &pix_fmt_info[pix_fmt];
  2373. int ret;
  2374. pf = &pix_fmt_info[pix_fmt];
  2375. /* no alpha can be represented in format */
  2376. if (!pf->is_alpha)
  2377. return 0;
  2378. switch(pix_fmt) {
  2379. case PIX_FMT_RGB32:
  2380. ret = get_alpha_info_rgba32(src, width, height);
  2381. break;
  2382. case PIX_FMT_PAL8:
  2383. ret = get_alpha_info_pal8(src, width, height);
  2384. break;
  2385. default:
  2386. /* we do not know, so everything is indicated */
  2387. ret = FF_ALPHA_TRANSP | FF_ALPHA_SEMI_TRANSP;
  2388. break;
  2389. }
  2390. return ret;
  2391. }
  2392. #ifdef HAVE_MMX
  2393. #define DEINT_INPLACE_LINE_LUM \
  2394. movd_m2r(lum_m4[0],mm0);\
  2395. movd_m2r(lum_m3[0],mm1);\
  2396. movd_m2r(lum_m2[0],mm2);\
  2397. movd_m2r(lum_m1[0],mm3);\
  2398. movd_m2r(lum[0],mm4);\
  2399. punpcklbw_r2r(mm7,mm0);\
  2400. movd_r2m(mm2,lum_m4[0]);\
  2401. punpcklbw_r2r(mm7,mm1);\
  2402. punpcklbw_r2r(mm7,mm2);\
  2403. punpcklbw_r2r(mm7,mm3);\
  2404. punpcklbw_r2r(mm7,mm4);\
  2405. paddw_r2r(mm3,mm1);\
  2406. psllw_i2r(1,mm2);\
  2407. paddw_r2r(mm4,mm0);\
  2408. psllw_i2r(2,mm1);\
  2409. paddw_r2r(mm6,mm2);\
  2410. paddw_r2r(mm2,mm1);\
  2411. psubusw_r2r(mm0,mm1);\
  2412. psrlw_i2r(3,mm1);\
  2413. packuswb_r2r(mm7,mm1);\
  2414. movd_r2m(mm1,lum_m2[0]);
  2415. #define DEINT_LINE_LUM \
  2416. movd_m2r(lum_m4[0],mm0);\
  2417. movd_m2r(lum_m3[0],mm1);\
  2418. movd_m2r(lum_m2[0],mm2);\
  2419. movd_m2r(lum_m1[0],mm3);\
  2420. movd_m2r(lum[0],mm4);\
  2421. punpcklbw_r2r(mm7,mm0);\
  2422. punpcklbw_r2r(mm7,mm1);\
  2423. punpcklbw_r2r(mm7,mm2);\
  2424. punpcklbw_r2r(mm7,mm3);\
  2425. punpcklbw_r2r(mm7,mm4);\
  2426. paddw_r2r(mm3,mm1);\
  2427. psllw_i2r(1,mm2);\
  2428. paddw_r2r(mm4,mm0);\
  2429. psllw_i2r(2,mm1);\
  2430. paddw_r2r(mm6,mm2);\
  2431. paddw_r2r(mm2,mm1);\
  2432. psubusw_r2r(mm0,mm1);\
  2433. psrlw_i2r(3,mm1);\
  2434. packuswb_r2r(mm7,mm1);\
  2435. movd_r2m(mm1,dst[0]);
  2436. #endif
  2437. /* filter parameters: [-1 4 2 4 -1] // 8 */
  2438. static void deinterlace_line(uint8_t *dst,
  2439. const uint8_t *lum_m4, const uint8_t *lum_m3,
  2440. const uint8_t *lum_m2, const uint8_t *lum_m1,
  2441. const uint8_t *lum,
  2442. int size)
  2443. {
  2444. #ifndef HAVE_MMX
  2445. uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
  2446. int sum;
  2447. for(;size > 0;size--) {
  2448. sum = -lum_m4[0];
  2449. sum += lum_m3[0] << 2;
  2450. sum += lum_m2[0] << 1;
  2451. sum += lum_m1[0] << 2;
  2452. sum += -lum[0];
  2453. dst[0] = cm[(sum + 4) >> 3];
  2454. lum_m4++;
  2455. lum_m3++;
  2456. lum_m2++;
  2457. lum_m1++;
  2458. lum++;
  2459. dst++;
  2460. }
  2461. #else
  2462. {
  2463. mmx_t rounder;
  2464. rounder.uw[0]=4;
  2465. rounder.uw[1]=4;
  2466. rounder.uw[2]=4;
  2467. rounder.uw[3]=4;
  2468. pxor_r2r(mm7,mm7);
  2469. movq_m2r(rounder,mm6);
  2470. }
  2471. for (;size > 3; size-=4) {
  2472. DEINT_LINE_LUM
  2473. lum_m4+=4;
  2474. lum_m3+=4;
  2475. lum_m2+=4;
  2476. lum_m1+=4;
  2477. lum+=4;
  2478. dst+=4;
  2479. }
  2480. #endif
  2481. }
  2482. static void deinterlace_line_inplace(uint8_t *lum_m4, uint8_t *lum_m3, uint8_t *lum_m2, uint8_t *lum_m1, uint8_t *lum,
  2483. int size)
  2484. {
  2485. #ifndef HAVE_MMX
  2486. uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
  2487. int sum;
  2488. for(;size > 0;size--) {
  2489. sum = -lum_m4[0];
  2490. sum += lum_m3[0] << 2;
  2491. sum += lum_m2[0] << 1;
  2492. lum_m4[0]=lum_m2[0];
  2493. sum += lum_m1[0] << 2;
  2494. sum += -lum[0];
  2495. lum_m2[0] = cm[(sum + 4) >> 3];
  2496. lum_m4++;
  2497. lum_m3++;
  2498. lum_m2++;
  2499. lum_m1++;
  2500. lum++;
  2501. }
  2502. #else
  2503. {
  2504. mmx_t rounder;
  2505. rounder.uw[0]=4;
  2506. rounder.uw[1]=4;
  2507. rounder.uw[2]=4;
  2508. rounder.uw[3]=4;
  2509. pxor_r2r(mm7,mm7);
  2510. movq_m2r(rounder,mm6);
  2511. }
  2512. for (;size > 3; size-=4) {
  2513. DEINT_INPLACE_LINE_LUM
  2514. lum_m4+=4;
  2515. lum_m3+=4;
  2516. lum_m2+=4;
  2517. lum_m1+=4;
  2518. lum+=4;
  2519. }
  2520. #endif
  2521. }
  2522. /* deinterlacing : 2 temporal taps, 3 spatial taps linear filter. The
  2523. top field is copied as is, but the bottom field is deinterlaced
  2524. against the top field. */
  2525. static void deinterlace_bottom_field(uint8_t *dst, int dst_wrap,
  2526. const uint8_t *src1, int src_wrap,
  2527. int width, int height)
  2528. {
  2529. const uint8_t *src_m2, *src_m1, *src_0, *src_p1, *src_p2;
  2530. int y;
  2531. src_m2 = src1;
  2532. src_m1 = src1;
  2533. src_0=&src_m1[src_wrap];
  2534. src_p1=&src_0[src_wrap];
  2535. src_p2=&src_p1[src_wrap];
  2536. for(y=0;y<(height-2);y+=2) {
  2537. memcpy(dst,src_m1,width);
  2538. dst += dst_wrap;
  2539. deinterlace_line(dst,src_m2,src_m1,src_0,src_p1,src_p2,width);
  2540. src_m2 = src_0;
  2541. src_m1 = src_p1;
  2542. src_0 = src_p2;
  2543. src_p1 += 2*src_wrap;
  2544. src_p2 += 2*src_wrap;
  2545. dst += dst_wrap;
  2546. }
  2547. memcpy(dst,src_m1,width);
  2548. dst += dst_wrap;
  2549. /* do last line */
  2550. deinterlace_line(dst,src_m2,src_m1,src_0,src_0,src_0,width);
  2551. }
  2552. static void deinterlace_bottom_field_inplace(uint8_t *src1, int src_wrap,
  2553. int width, int height)
  2554. {
  2555. uint8_t *src_m1, *src_0, *src_p1, *src_p2;
  2556. int y;
  2557. uint8_t *buf;
  2558. buf = (uint8_t*)av_malloc(width);
  2559. src_m1 = src1;
  2560. memcpy(buf,src_m1,width);
  2561. src_0=&src_m1[src_wrap];
  2562. src_p1=&src_0[src_wrap];
  2563. src_p2=&src_p1[src_wrap];
  2564. for(y=0;y<(height-2);y+=2) {
  2565. deinterlace_line_inplace(buf,src_m1,src_0,src_p1,src_p2,width);
  2566. src_m1 = src_p1;
  2567. src_0 = src_p2;
  2568. src_p1 += 2*src_wrap;
  2569. src_p2 += 2*src_wrap;
  2570. }
  2571. /* do last line */
  2572. deinterlace_line_inplace(buf,src_m1,src_0,src_0,src_0,width);
  2573. av_free(buf);
  2574. }
  2575. /* deinterlace - if not supported return -1 */
  2576. int avpicture_deinterlace(AVPicture *dst, const AVPicture *src,
  2577. int pix_fmt, int width, int height)
  2578. {
  2579. int i;
  2580. if (pix_fmt != PIX_FMT_YUV420P &&
  2581. pix_fmt != PIX_FMT_YUV422P &&
  2582. pix_fmt != PIX_FMT_YUV444P &&
  2583. pix_fmt != PIX_FMT_YUV411P)
  2584. return -1;
  2585. if ((width & 3) != 0 || (height & 3) != 0)
  2586. return -1;
  2587. for(i=0;i<3;i++) {
  2588. if (i == 1) {
  2589. switch(pix_fmt) {
  2590. case PIX_FMT_YUV420P:
  2591. width >>= 1;
  2592. height >>= 1;
  2593. break;
  2594. case PIX_FMT_YUV422P:
  2595. width >>= 1;
  2596. break;
  2597. case PIX_FMT_YUV411P:
  2598. width >>= 2;
  2599. break;
  2600. default:
  2601. break;
  2602. }
  2603. }
  2604. if (src == dst) {
  2605. deinterlace_bottom_field_inplace(dst->data[i], dst->linesize[i],
  2606. width, height);
  2607. } else {
  2608. deinterlace_bottom_field(dst->data[i],dst->linesize[i],
  2609. src->data[i], src->linesize[i],
  2610. width, height);
  2611. }
  2612. }
  2613. #ifdef HAVE_MMX
  2614. emms();
  2615. #endif
  2616. return 0;
  2617. }
  2618. #undef FIX