You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

2874 lines
77KB

  1. /*
  2. * Misc image convertion routines
  3. * Copyright (c) 2001, 2002, 2003 Fabrice Bellard.
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. /**
  22. * @file imgconvert.c
  23. * Misc image convertion routines.
  24. */
  25. /* TODO:
  26. * - write 'ffimg' program to test all the image related stuff
  27. * - move all api to slice based system
  28. * - integrate deinterlacing, postprocessing and scaling in the conversion process
  29. */
  30. #include "avcodec.h"
  31. #include "dsputil.h"
  32. #ifdef HAVE_MMX
  33. #include "i386/mmx.h"
  34. #endif
  35. #define xglue(x, y) x ## y
  36. #define glue(x, y) xglue(x, y)
  37. #define FF_COLOR_RGB 0 /**< RGB color space */
  38. #define FF_COLOR_GRAY 1 /**< gray color space */
  39. #define FF_COLOR_YUV 2 /**< YUV color space. 16 <= Y <= 235, 16 <= U, V <= 240 */
  40. #define FF_COLOR_YUV_JPEG 3 /**< YUV color space. 0 <= Y <= 255, 0 <= U, V <= 255 */
  41. #define FF_PIXEL_PLANAR 0 /**< each channel has one component in AVPicture */
  42. #define FF_PIXEL_PACKED 1 /**< only one components containing all the channels */
  43. #define FF_PIXEL_PALETTE 2 /**< one components containing indexes for a palette */
  44. typedef struct PixFmtInfo {
  45. const char *name;
  46. uint8_t nb_channels; /**< number of channels (including alpha) */
  47. uint8_t color_type; /**< color type (see FF_COLOR_xxx constants) */
  48. uint8_t pixel_type; /**< pixel storage type (see FF_PIXEL_xxx constants) */
  49. uint8_t is_alpha : 1; /**< true if alpha can be specified */
  50. uint8_t x_chroma_shift; /**< X chroma subsampling factor is 2 ^ shift */
  51. uint8_t y_chroma_shift; /**< Y chroma subsampling factor is 2 ^ shift */
  52. uint8_t depth; /**< bit depth of the color components */
  53. } PixFmtInfo;
  54. /* this table gives more information about formats */
  55. static const PixFmtInfo pix_fmt_info[PIX_FMT_NB] = {
  56. /* YUV formats */
  57. [PIX_FMT_YUV420P] = {
  58. .name = "yuv420p",
  59. .nb_channels = 3,
  60. .color_type = FF_COLOR_YUV,
  61. .pixel_type = FF_PIXEL_PLANAR,
  62. .depth = 8,
  63. .x_chroma_shift = 1, .y_chroma_shift = 1,
  64. },
  65. [PIX_FMT_YUV422P] = {
  66. .name = "yuv422p",
  67. .nb_channels = 3,
  68. .color_type = FF_COLOR_YUV,
  69. .pixel_type = FF_PIXEL_PLANAR,
  70. .depth = 8,
  71. .x_chroma_shift = 1, .y_chroma_shift = 0,
  72. },
  73. [PIX_FMT_YUV444P] = {
  74. .name = "yuv444p",
  75. .nb_channels = 3,
  76. .color_type = FF_COLOR_YUV,
  77. .pixel_type = FF_PIXEL_PLANAR,
  78. .depth = 8,
  79. .x_chroma_shift = 0, .y_chroma_shift = 0,
  80. },
  81. [PIX_FMT_YUYV422] = {
  82. .name = "yuyv422",
  83. .nb_channels = 1,
  84. .color_type = FF_COLOR_YUV,
  85. .pixel_type = FF_PIXEL_PACKED,
  86. .depth = 8,
  87. .x_chroma_shift = 1, .y_chroma_shift = 0,
  88. },
  89. [PIX_FMT_UYVY422] = {
  90. .name = "uyvy422",
  91. .nb_channels = 1,
  92. .color_type = FF_COLOR_YUV,
  93. .pixel_type = FF_PIXEL_PACKED,
  94. .depth = 8,
  95. .x_chroma_shift = 1, .y_chroma_shift = 0,
  96. },
  97. [PIX_FMT_YUV410P] = {
  98. .name = "yuv410p",
  99. .nb_channels = 3,
  100. .color_type = FF_COLOR_YUV,
  101. .pixel_type = FF_PIXEL_PLANAR,
  102. .depth = 8,
  103. .x_chroma_shift = 2, .y_chroma_shift = 2,
  104. },
  105. [PIX_FMT_YUV411P] = {
  106. .name = "yuv411p",
  107. .nb_channels = 3,
  108. .color_type = FF_COLOR_YUV,
  109. .pixel_type = FF_PIXEL_PLANAR,
  110. .depth = 8,
  111. .x_chroma_shift = 2, .y_chroma_shift = 0,
  112. },
  113. /* JPEG YUV */
  114. [PIX_FMT_YUVJ420P] = {
  115. .name = "yuvj420p",
  116. .nb_channels = 3,
  117. .color_type = FF_COLOR_YUV_JPEG,
  118. .pixel_type = FF_PIXEL_PLANAR,
  119. .depth = 8,
  120. .x_chroma_shift = 1, .y_chroma_shift = 1,
  121. },
  122. [PIX_FMT_YUVJ422P] = {
  123. .name = "yuvj422p",
  124. .nb_channels = 3,
  125. .color_type = FF_COLOR_YUV_JPEG,
  126. .pixel_type = FF_PIXEL_PLANAR,
  127. .depth = 8,
  128. .x_chroma_shift = 1, .y_chroma_shift = 0,
  129. },
  130. [PIX_FMT_YUVJ444P] = {
  131. .name = "yuvj444p",
  132. .nb_channels = 3,
  133. .color_type = FF_COLOR_YUV_JPEG,
  134. .pixel_type = FF_PIXEL_PLANAR,
  135. .depth = 8,
  136. .x_chroma_shift = 0, .y_chroma_shift = 0,
  137. },
  138. /* RGB formats */
  139. [PIX_FMT_RGB24] = {
  140. .name = "rgb24",
  141. .nb_channels = 3,
  142. .color_type = FF_COLOR_RGB,
  143. .pixel_type = FF_PIXEL_PACKED,
  144. .depth = 8,
  145. .x_chroma_shift = 0, .y_chroma_shift = 0,
  146. },
  147. [PIX_FMT_BGR24] = {
  148. .name = "bgr24",
  149. .nb_channels = 3,
  150. .color_type = FF_COLOR_RGB,
  151. .pixel_type = FF_PIXEL_PACKED,
  152. .depth = 8,
  153. .x_chroma_shift = 0, .y_chroma_shift = 0,
  154. },
  155. [PIX_FMT_RGB32] = {
  156. .name = "rgb32",
  157. .nb_channels = 4, .is_alpha = 1,
  158. .color_type = FF_COLOR_RGB,
  159. .pixel_type = FF_PIXEL_PACKED,
  160. .depth = 8,
  161. .x_chroma_shift = 0, .y_chroma_shift = 0,
  162. },
  163. [PIX_FMT_RGB565] = {
  164. .name = "rgb565",
  165. .nb_channels = 3,
  166. .color_type = FF_COLOR_RGB,
  167. .pixel_type = FF_PIXEL_PACKED,
  168. .depth = 5,
  169. .x_chroma_shift = 0, .y_chroma_shift = 0,
  170. },
  171. [PIX_FMT_RGB555] = {
  172. .name = "rgb555",
  173. .nb_channels = 3,
  174. .color_type = FF_COLOR_RGB,
  175. .pixel_type = FF_PIXEL_PACKED,
  176. .depth = 5,
  177. .x_chroma_shift = 0, .y_chroma_shift = 0,
  178. },
  179. /* gray / mono formats */
  180. [PIX_FMT_GRAY16BE] = {
  181. .name = "gray16be",
  182. .nb_channels = 1,
  183. .color_type = FF_COLOR_GRAY,
  184. .pixel_type = FF_PIXEL_PLANAR,
  185. .depth = 16,
  186. },
  187. [PIX_FMT_GRAY16LE] = {
  188. .name = "gray16le",
  189. .nb_channels = 1,
  190. .color_type = FF_COLOR_GRAY,
  191. .pixel_type = FF_PIXEL_PLANAR,
  192. .depth = 16,
  193. },
  194. [PIX_FMT_GRAY8] = {
  195. .name = "gray",
  196. .nb_channels = 1,
  197. .color_type = FF_COLOR_GRAY,
  198. .pixel_type = FF_PIXEL_PLANAR,
  199. .depth = 8,
  200. },
  201. [PIX_FMT_MONOWHITE] = {
  202. .name = "monow",
  203. .nb_channels = 1,
  204. .color_type = FF_COLOR_GRAY,
  205. .pixel_type = FF_PIXEL_PLANAR,
  206. .depth = 1,
  207. },
  208. [PIX_FMT_MONOBLACK] = {
  209. .name = "monob",
  210. .nb_channels = 1,
  211. .color_type = FF_COLOR_GRAY,
  212. .pixel_type = FF_PIXEL_PLANAR,
  213. .depth = 1,
  214. },
  215. /* paletted formats */
  216. [PIX_FMT_PAL8] = {
  217. .name = "pal8",
  218. .nb_channels = 4, .is_alpha = 1,
  219. .color_type = FF_COLOR_RGB,
  220. .pixel_type = FF_PIXEL_PALETTE,
  221. .depth = 8,
  222. },
  223. [PIX_FMT_XVMC_MPEG2_MC] = {
  224. .name = "xvmcmc",
  225. },
  226. [PIX_FMT_XVMC_MPEG2_IDCT] = {
  227. .name = "xvmcidct",
  228. },
  229. [PIX_FMT_UYYVYY411] = {
  230. .name = "uyyvyy411",
  231. .nb_channels = 1,
  232. .color_type = FF_COLOR_YUV,
  233. .pixel_type = FF_PIXEL_PACKED,
  234. .depth = 8,
  235. .x_chroma_shift = 2, .y_chroma_shift = 0,
  236. },
  237. [PIX_FMT_BGR32] = {
  238. .name = "bgr32",
  239. .nb_channels = 4, .is_alpha = 1,
  240. .color_type = FF_COLOR_RGB,
  241. .pixel_type = FF_PIXEL_PACKED,
  242. .depth = 8,
  243. .x_chroma_shift = 0, .y_chroma_shift = 0,
  244. },
  245. [PIX_FMT_BGR565] = {
  246. .name = "bgr565",
  247. .nb_channels = 3,
  248. .color_type = FF_COLOR_RGB,
  249. .pixel_type = FF_PIXEL_PACKED,
  250. .depth = 5,
  251. .x_chroma_shift = 0, .y_chroma_shift = 0,
  252. },
  253. [PIX_FMT_BGR555] = {
  254. .name = "bgr555",
  255. .nb_channels = 3,
  256. .color_type = FF_COLOR_RGB,
  257. .pixel_type = FF_PIXEL_PACKED,
  258. .depth = 5,
  259. .x_chroma_shift = 0, .y_chroma_shift = 0,
  260. },
  261. [PIX_FMT_RGB8] = {
  262. .name = "rgb8",
  263. .nb_channels = 1,
  264. .color_type = FF_COLOR_RGB,
  265. .pixel_type = FF_PIXEL_PACKED,
  266. .depth = 8,
  267. .x_chroma_shift = 0, .y_chroma_shift = 0,
  268. },
  269. [PIX_FMT_RGB4] = {
  270. .name = "rgb4",
  271. .nb_channels = 1,
  272. .color_type = FF_COLOR_RGB,
  273. .pixel_type = FF_PIXEL_PACKED,
  274. .depth = 4,
  275. .x_chroma_shift = 0, .y_chroma_shift = 0,
  276. },
  277. [PIX_FMT_RGB4_BYTE] = {
  278. .name = "rgb4_byte",
  279. .nb_channels = 1,
  280. .color_type = FF_COLOR_RGB,
  281. .pixel_type = FF_PIXEL_PACKED,
  282. .depth = 8,
  283. .x_chroma_shift = 0, .y_chroma_shift = 0,
  284. },
  285. [PIX_FMT_BGR8] = {
  286. .name = "bgr8",
  287. .nb_channels = 1,
  288. .color_type = FF_COLOR_RGB,
  289. .pixel_type = FF_PIXEL_PACKED,
  290. .depth = 8,
  291. .x_chroma_shift = 0, .y_chroma_shift = 0,
  292. },
  293. [PIX_FMT_BGR4] = {
  294. .name = "bgr4",
  295. .nb_channels = 1,
  296. .color_type = FF_COLOR_RGB,
  297. .pixel_type = FF_PIXEL_PACKED,
  298. .depth = 4,
  299. .x_chroma_shift = 0, .y_chroma_shift = 0,
  300. },
  301. [PIX_FMT_BGR4_BYTE] = {
  302. .name = "bgr4_byte",
  303. .nb_channels = 1,
  304. .color_type = FF_COLOR_RGB,
  305. .pixel_type = FF_PIXEL_PACKED,
  306. .depth = 8,
  307. .x_chroma_shift = 0, .y_chroma_shift = 0,
  308. },
  309. [PIX_FMT_NV12] = {
  310. .name = "nv12",
  311. .nb_channels = 2,
  312. .color_type = FF_COLOR_YUV,
  313. .pixel_type = FF_PIXEL_PLANAR,
  314. .depth = 8,
  315. .x_chroma_shift = 1, .y_chroma_shift = 1,
  316. },
  317. [PIX_FMT_NV21] = {
  318. .name = "nv12",
  319. .nb_channels = 2,
  320. .color_type = FF_COLOR_YUV,
  321. .pixel_type = FF_PIXEL_PLANAR,
  322. .depth = 8,
  323. .x_chroma_shift = 1, .y_chroma_shift = 1,
  324. },
  325. [PIX_FMT_BGR32_1] = {
  326. .name = "bgr32_1",
  327. .nb_channels = 4, .is_alpha = 1,
  328. .color_type = FF_COLOR_RGB,
  329. .pixel_type = FF_PIXEL_PACKED,
  330. .depth = 8,
  331. .x_chroma_shift = 0, .y_chroma_shift = 0,
  332. },
  333. [PIX_FMT_RGB32_1] = {
  334. .name = "rgb32_1",
  335. .nb_channels = 4, .is_alpha = 1,
  336. .color_type = FF_COLOR_RGB,
  337. .pixel_type = FF_PIXEL_PACKED,
  338. .depth = 8,
  339. .x_chroma_shift = 0, .y_chroma_shift = 0,
  340. },
  341. };
  342. void avcodec_get_chroma_sub_sample(int pix_fmt, int *h_shift, int *v_shift)
  343. {
  344. *h_shift = pix_fmt_info[pix_fmt].x_chroma_shift;
  345. *v_shift = pix_fmt_info[pix_fmt].y_chroma_shift;
  346. }
  347. const char *avcodec_get_pix_fmt_name(int pix_fmt)
  348. {
  349. if (pix_fmt < 0 || pix_fmt >= PIX_FMT_NB)
  350. return "???";
  351. else
  352. return pix_fmt_info[pix_fmt].name;
  353. }
  354. enum PixelFormat avcodec_get_pix_fmt(const char* name)
  355. {
  356. int i;
  357. for (i=0; i < PIX_FMT_NB; i++)
  358. if (!strcmp(pix_fmt_info[i].name, name))
  359. break;
  360. return i;
  361. }
  362. void avcodec_pix_fmt_string (char *buf, int buf_size, int pix_fmt)
  363. {
  364. PixFmtInfo info= pix_fmt_info[pix_fmt];
  365. char is_alpha_char= info.is_alpha ? 'y' : 'n';
  366. /* print header */
  367. if (pix_fmt < 0)
  368. snprintf (buf, buf_size,
  369. "name " " nb_channels" " depth" " is_alpha"
  370. );
  371. else
  372. snprintf (buf, buf_size,
  373. "%-10s" " %1d " " %2d " " %c ",
  374. info.name,
  375. info.nb_channels,
  376. info.depth,
  377. is_alpha_char
  378. );
  379. }
  380. int avpicture_fill(AVPicture *picture, uint8_t *ptr,
  381. int pix_fmt, int width, int height)
  382. {
  383. int size, w2, h2, size2;
  384. const PixFmtInfo *pinfo;
  385. if(avcodec_check_dimensions(NULL, width, height))
  386. goto fail;
  387. pinfo = &pix_fmt_info[pix_fmt];
  388. size = width * height;
  389. switch(pix_fmt) {
  390. case PIX_FMT_YUV420P:
  391. case PIX_FMT_YUV422P:
  392. case PIX_FMT_YUV444P:
  393. case PIX_FMT_YUV410P:
  394. case PIX_FMT_YUV411P:
  395. case PIX_FMT_YUVJ420P:
  396. case PIX_FMT_YUVJ422P:
  397. case PIX_FMT_YUVJ444P:
  398. w2 = (width + (1 << pinfo->x_chroma_shift) - 1) >> pinfo->x_chroma_shift;
  399. h2 = (height + (1 << pinfo->y_chroma_shift) - 1) >> pinfo->y_chroma_shift;
  400. size2 = w2 * h2;
  401. picture->data[0] = ptr;
  402. picture->data[1] = picture->data[0] + size;
  403. picture->data[2] = picture->data[1] + size2;
  404. picture->linesize[0] = width;
  405. picture->linesize[1] = w2;
  406. picture->linesize[2] = w2;
  407. return size + 2 * size2;
  408. case PIX_FMT_NV12:
  409. case PIX_FMT_NV21:
  410. w2 = (width + (1 << pinfo->x_chroma_shift) - 1) >> pinfo->x_chroma_shift;
  411. h2 = (height + (1 << pinfo->y_chroma_shift) - 1) >> pinfo->y_chroma_shift;
  412. size2 = w2 * h2 * 2;
  413. picture->data[0] = ptr;
  414. picture->data[1] = picture->data[0] + size;
  415. picture->data[2] = NULL;
  416. picture->linesize[0] = width;
  417. picture->linesize[1] = w2;
  418. picture->linesize[2] = 0;
  419. return size + 2 * size2;
  420. case PIX_FMT_RGB24:
  421. case PIX_FMT_BGR24:
  422. picture->data[0] = ptr;
  423. picture->data[1] = NULL;
  424. picture->data[2] = NULL;
  425. picture->linesize[0] = width * 3;
  426. return size * 3;
  427. case PIX_FMT_RGB32:
  428. case PIX_FMT_BGR32:
  429. case PIX_FMT_RGB32_1:
  430. case PIX_FMT_BGR32_1:
  431. picture->data[0] = ptr;
  432. picture->data[1] = NULL;
  433. picture->data[2] = NULL;
  434. picture->linesize[0] = width * 4;
  435. return size * 4;
  436. case PIX_FMT_GRAY16BE:
  437. case PIX_FMT_GRAY16LE:
  438. case PIX_FMT_BGR555:
  439. case PIX_FMT_BGR565:
  440. case PIX_FMT_RGB555:
  441. case PIX_FMT_RGB565:
  442. case PIX_FMT_YUYV422:
  443. picture->data[0] = ptr;
  444. picture->data[1] = NULL;
  445. picture->data[2] = NULL;
  446. picture->linesize[0] = width * 2;
  447. return size * 2;
  448. case PIX_FMT_UYVY422:
  449. picture->data[0] = ptr;
  450. picture->data[1] = NULL;
  451. picture->data[2] = NULL;
  452. picture->linesize[0] = width * 2;
  453. return size * 2;
  454. case PIX_FMT_UYYVYY411:
  455. picture->data[0] = ptr;
  456. picture->data[1] = NULL;
  457. picture->data[2] = NULL;
  458. picture->linesize[0] = width + width/2;
  459. return size + size/2;
  460. case PIX_FMT_RGB8:
  461. case PIX_FMT_BGR8:
  462. case PIX_FMT_RGB4_BYTE:
  463. case PIX_FMT_BGR4_BYTE:
  464. case PIX_FMT_GRAY8:
  465. picture->data[0] = ptr;
  466. picture->data[1] = NULL;
  467. picture->data[2] = NULL;
  468. picture->linesize[0] = width;
  469. return size;
  470. case PIX_FMT_RGB4:
  471. case PIX_FMT_BGR4:
  472. picture->data[0] = ptr;
  473. picture->data[1] = NULL;
  474. picture->data[2] = NULL;
  475. picture->linesize[0] = width / 2;
  476. return size / 2;
  477. case PIX_FMT_MONOWHITE:
  478. case PIX_FMT_MONOBLACK:
  479. picture->data[0] = ptr;
  480. picture->data[1] = NULL;
  481. picture->data[2] = NULL;
  482. picture->linesize[0] = (width + 7) >> 3;
  483. return picture->linesize[0] * height;
  484. case PIX_FMT_PAL8:
  485. size2 = (size + 3) & ~3;
  486. picture->data[0] = ptr;
  487. picture->data[1] = ptr + size2; /* palette is stored here as 256 32 bit words */
  488. picture->data[2] = NULL;
  489. picture->linesize[0] = width;
  490. picture->linesize[1] = 4;
  491. return size2 + 256 * 4;
  492. default:
  493. fail:
  494. picture->data[0] = NULL;
  495. picture->data[1] = NULL;
  496. picture->data[2] = NULL;
  497. picture->data[3] = NULL;
  498. return -1;
  499. }
  500. }
  501. int avpicture_layout(const AVPicture* src, int pix_fmt, int width, int height,
  502. unsigned char *dest, int dest_size)
  503. {
  504. const PixFmtInfo* pf = &pix_fmt_info[pix_fmt];
  505. int i, j, w, h, data_planes;
  506. const unsigned char* s;
  507. int size = avpicture_get_size(pix_fmt, width, height);
  508. if (size > dest_size || size < 0)
  509. return -1;
  510. if (pf->pixel_type == FF_PIXEL_PACKED || pf->pixel_type == FF_PIXEL_PALETTE) {
  511. if (pix_fmt == PIX_FMT_YUYV422 ||
  512. pix_fmt == PIX_FMT_UYVY422 ||
  513. pix_fmt == PIX_FMT_BGR565 ||
  514. pix_fmt == PIX_FMT_BGR555 ||
  515. pix_fmt == PIX_FMT_RGB565 ||
  516. pix_fmt == PIX_FMT_RGB555)
  517. w = width * 2;
  518. else if (pix_fmt == PIX_FMT_UYYVYY411)
  519. w = width + width/2;
  520. else if (pix_fmt == PIX_FMT_PAL8)
  521. w = width;
  522. else
  523. w = width * (pf->depth * pf->nb_channels / 8);
  524. data_planes = 1;
  525. h = height;
  526. } else {
  527. data_planes = pf->nb_channels;
  528. w = (width*pf->depth + 7)/8;
  529. h = height;
  530. }
  531. for (i=0; i<data_planes; i++) {
  532. if (i == 1) {
  533. w = width >> pf->x_chroma_shift;
  534. h = height >> pf->y_chroma_shift;
  535. }
  536. s = src->data[i];
  537. for(j=0; j<h; j++) {
  538. memcpy(dest, s, w);
  539. dest += w;
  540. s += src->linesize[i];
  541. }
  542. }
  543. if (pf->pixel_type == FF_PIXEL_PALETTE)
  544. memcpy((unsigned char *)(((size_t)dest + 3) & ~3), src->data[1], 256 * 4);
  545. return size;
  546. }
  547. int avpicture_get_size(int pix_fmt, int width, int height)
  548. {
  549. AVPicture dummy_pict;
  550. return avpicture_fill(&dummy_pict, NULL, pix_fmt, width, height);
  551. }
  552. int avcodec_get_pix_fmt_loss(int dst_pix_fmt, int src_pix_fmt,
  553. int has_alpha)
  554. {
  555. const PixFmtInfo *pf, *ps;
  556. int loss;
  557. ps = &pix_fmt_info[src_pix_fmt];
  558. pf = &pix_fmt_info[dst_pix_fmt];
  559. /* compute loss */
  560. loss = 0;
  561. pf = &pix_fmt_info[dst_pix_fmt];
  562. if (pf->depth < ps->depth ||
  563. (dst_pix_fmt == PIX_FMT_RGB555 && src_pix_fmt == PIX_FMT_RGB565))
  564. loss |= FF_LOSS_DEPTH;
  565. if (pf->x_chroma_shift > ps->x_chroma_shift ||
  566. pf->y_chroma_shift > ps->y_chroma_shift)
  567. loss |= FF_LOSS_RESOLUTION;
  568. switch(pf->color_type) {
  569. case FF_COLOR_RGB:
  570. if (ps->color_type != FF_COLOR_RGB &&
  571. ps->color_type != FF_COLOR_GRAY)
  572. loss |= FF_LOSS_COLORSPACE;
  573. break;
  574. case FF_COLOR_GRAY:
  575. if (ps->color_type != FF_COLOR_GRAY)
  576. loss |= FF_LOSS_COLORSPACE;
  577. break;
  578. case FF_COLOR_YUV:
  579. if (ps->color_type != FF_COLOR_YUV)
  580. loss |= FF_LOSS_COLORSPACE;
  581. break;
  582. case FF_COLOR_YUV_JPEG:
  583. if (ps->color_type != FF_COLOR_YUV_JPEG &&
  584. ps->color_type != FF_COLOR_YUV &&
  585. ps->color_type != FF_COLOR_GRAY)
  586. loss |= FF_LOSS_COLORSPACE;
  587. break;
  588. default:
  589. /* fail safe test */
  590. if (ps->color_type != pf->color_type)
  591. loss |= FF_LOSS_COLORSPACE;
  592. break;
  593. }
  594. if (pf->color_type == FF_COLOR_GRAY &&
  595. ps->color_type != FF_COLOR_GRAY)
  596. loss |= FF_LOSS_CHROMA;
  597. if (!pf->is_alpha && (ps->is_alpha && has_alpha))
  598. loss |= FF_LOSS_ALPHA;
  599. if (pf->pixel_type == FF_PIXEL_PALETTE &&
  600. (ps->pixel_type != FF_PIXEL_PALETTE && ps->color_type != FF_COLOR_GRAY))
  601. loss |= FF_LOSS_COLORQUANT;
  602. return loss;
  603. }
  604. static int avg_bits_per_pixel(int pix_fmt)
  605. {
  606. int bits;
  607. const PixFmtInfo *pf;
  608. pf = &pix_fmt_info[pix_fmt];
  609. switch(pf->pixel_type) {
  610. case FF_PIXEL_PACKED:
  611. switch(pix_fmt) {
  612. case PIX_FMT_YUYV422:
  613. case PIX_FMT_UYVY422:
  614. case PIX_FMT_RGB565:
  615. case PIX_FMT_RGB555:
  616. case PIX_FMT_BGR565:
  617. case PIX_FMT_BGR555:
  618. bits = 16;
  619. break;
  620. case PIX_FMT_UYYVYY411:
  621. bits = 12;
  622. break;
  623. default:
  624. bits = pf->depth * pf->nb_channels;
  625. break;
  626. }
  627. break;
  628. case FF_PIXEL_PLANAR:
  629. if (pf->x_chroma_shift == 0 && pf->y_chroma_shift == 0) {
  630. bits = pf->depth * pf->nb_channels;
  631. } else {
  632. bits = pf->depth + ((2 * pf->depth) >>
  633. (pf->x_chroma_shift + pf->y_chroma_shift));
  634. }
  635. break;
  636. case FF_PIXEL_PALETTE:
  637. bits = 8;
  638. break;
  639. default:
  640. bits = -1;
  641. break;
  642. }
  643. return bits;
  644. }
  645. static int avcodec_find_best_pix_fmt1(int pix_fmt_mask,
  646. int src_pix_fmt,
  647. int has_alpha,
  648. int loss_mask)
  649. {
  650. int dist, i, loss, min_dist, dst_pix_fmt;
  651. /* find exact color match with smallest size */
  652. dst_pix_fmt = -1;
  653. min_dist = 0x7fffffff;
  654. for(i = 0;i < PIX_FMT_NB; i++) {
  655. if (pix_fmt_mask & (1 << i)) {
  656. loss = avcodec_get_pix_fmt_loss(i, src_pix_fmt, has_alpha) & loss_mask;
  657. if (loss == 0) {
  658. dist = avg_bits_per_pixel(i);
  659. if (dist < min_dist) {
  660. min_dist = dist;
  661. dst_pix_fmt = i;
  662. }
  663. }
  664. }
  665. }
  666. return dst_pix_fmt;
  667. }
  668. int avcodec_find_best_pix_fmt(int pix_fmt_mask, int src_pix_fmt,
  669. int has_alpha, int *loss_ptr)
  670. {
  671. int dst_pix_fmt, loss_mask, i;
  672. static const int loss_mask_order[] = {
  673. ~0, /* no loss first */
  674. ~FF_LOSS_ALPHA,
  675. ~FF_LOSS_RESOLUTION,
  676. ~(FF_LOSS_COLORSPACE | FF_LOSS_RESOLUTION),
  677. ~FF_LOSS_COLORQUANT,
  678. ~FF_LOSS_DEPTH,
  679. 0,
  680. };
  681. /* try with successive loss */
  682. i = 0;
  683. for(;;) {
  684. loss_mask = loss_mask_order[i++];
  685. dst_pix_fmt = avcodec_find_best_pix_fmt1(pix_fmt_mask, src_pix_fmt,
  686. has_alpha, loss_mask);
  687. if (dst_pix_fmt >= 0)
  688. goto found;
  689. if (loss_mask == 0)
  690. break;
  691. }
  692. return -1;
  693. found:
  694. if (loss_ptr)
  695. *loss_ptr = avcodec_get_pix_fmt_loss(dst_pix_fmt, src_pix_fmt, has_alpha);
  696. return dst_pix_fmt;
  697. }
  698. void ff_img_copy_plane(uint8_t *dst, int dst_wrap,
  699. const uint8_t *src, int src_wrap,
  700. int width, int height)
  701. {
  702. if((!dst) || (!src))
  703. return;
  704. for(;height > 0; height--) {
  705. memcpy(dst, src, width);
  706. dst += dst_wrap;
  707. src += src_wrap;
  708. }
  709. }
  710. void av_picture_copy(AVPicture *dst, const AVPicture *src,
  711. int pix_fmt, int width, int height)
  712. {
  713. int bwidth, bits, i;
  714. const PixFmtInfo *pf = &pix_fmt_info[pix_fmt];
  715. pf = &pix_fmt_info[pix_fmt];
  716. switch(pf->pixel_type) {
  717. case FF_PIXEL_PACKED:
  718. switch(pix_fmt) {
  719. case PIX_FMT_YUYV422:
  720. case PIX_FMT_UYVY422:
  721. case PIX_FMT_RGB565:
  722. case PIX_FMT_RGB555:
  723. case PIX_FMT_BGR565:
  724. case PIX_FMT_BGR555:
  725. bits = 16;
  726. break;
  727. case PIX_FMT_UYYVYY411:
  728. bits = 12;
  729. break;
  730. default:
  731. bits = pf->depth * pf->nb_channels;
  732. break;
  733. }
  734. bwidth = (width * bits + 7) >> 3;
  735. ff_img_copy_plane(dst->data[0], dst->linesize[0],
  736. src->data[0], src->linesize[0],
  737. bwidth, height);
  738. break;
  739. case FF_PIXEL_PLANAR:
  740. for(i = 0; i < pf->nb_channels; i++) {
  741. int w, h;
  742. w = width;
  743. h = height;
  744. if (i == 1 || i == 2) {
  745. w >>= pf->x_chroma_shift;
  746. h >>= pf->y_chroma_shift;
  747. }
  748. bwidth = (w * pf->depth + 7) >> 3;
  749. ff_img_copy_plane(dst->data[i], dst->linesize[i],
  750. src->data[i], src->linesize[i],
  751. bwidth, h);
  752. }
  753. break;
  754. case FF_PIXEL_PALETTE:
  755. ff_img_copy_plane(dst->data[0], dst->linesize[0],
  756. src->data[0], src->linesize[0],
  757. width, height);
  758. /* copy the palette */
  759. ff_img_copy_plane(dst->data[1], dst->linesize[1],
  760. src->data[1], src->linesize[1],
  761. 4, 256);
  762. break;
  763. }
  764. }
  765. /* XXX: totally non optimized */
  766. static void yuyv422_to_yuv420p(AVPicture *dst, const AVPicture *src,
  767. int width, int height)
  768. {
  769. const uint8_t *p, *p1;
  770. uint8_t *lum, *cr, *cb, *lum1, *cr1, *cb1;
  771. int w;
  772. p1 = src->data[0];
  773. lum1 = dst->data[0];
  774. cb1 = dst->data[1];
  775. cr1 = dst->data[2];
  776. for(;height >= 1; height -= 2) {
  777. p = p1;
  778. lum = lum1;
  779. cb = cb1;
  780. cr = cr1;
  781. for(w = width; w >= 2; w -= 2) {
  782. lum[0] = p[0];
  783. cb[0] = p[1];
  784. lum[1] = p[2];
  785. cr[0] = p[3];
  786. p += 4;
  787. lum += 2;
  788. cb++;
  789. cr++;
  790. }
  791. if (w) {
  792. lum[0] = p[0];
  793. cb[0] = p[1];
  794. cr[0] = p[3];
  795. cb++;
  796. cr++;
  797. }
  798. p1 += src->linesize[0];
  799. lum1 += dst->linesize[0];
  800. if (height>1) {
  801. p = p1;
  802. lum = lum1;
  803. for(w = width; w >= 2; w -= 2) {
  804. lum[0] = p[0];
  805. lum[1] = p[2];
  806. p += 4;
  807. lum += 2;
  808. }
  809. if (w) {
  810. lum[0] = p[0];
  811. }
  812. p1 += src->linesize[0];
  813. lum1 += dst->linesize[0];
  814. }
  815. cb1 += dst->linesize[1];
  816. cr1 += dst->linesize[2];
  817. }
  818. }
  819. static void uyvy422_to_yuv420p(AVPicture *dst, const AVPicture *src,
  820. int width, int height)
  821. {
  822. const uint8_t *p, *p1;
  823. uint8_t *lum, *cr, *cb, *lum1, *cr1, *cb1;
  824. int w;
  825. p1 = src->data[0];
  826. lum1 = dst->data[0];
  827. cb1 = dst->data[1];
  828. cr1 = dst->data[2];
  829. for(;height >= 1; height -= 2) {
  830. p = p1;
  831. lum = lum1;
  832. cb = cb1;
  833. cr = cr1;
  834. for(w = width; w >= 2; w -= 2) {
  835. lum[0] = p[1];
  836. cb[0] = p[0];
  837. lum[1] = p[3];
  838. cr[0] = p[2];
  839. p += 4;
  840. lum += 2;
  841. cb++;
  842. cr++;
  843. }
  844. if (w) {
  845. lum[0] = p[1];
  846. cb[0] = p[0];
  847. cr[0] = p[2];
  848. cb++;
  849. cr++;
  850. }
  851. p1 += src->linesize[0];
  852. lum1 += dst->linesize[0];
  853. if (height>1) {
  854. p = p1;
  855. lum = lum1;
  856. for(w = width; w >= 2; w -= 2) {
  857. lum[0] = p[1];
  858. lum[1] = p[3];
  859. p += 4;
  860. lum += 2;
  861. }
  862. if (w) {
  863. lum[0] = p[1];
  864. }
  865. p1 += src->linesize[0];
  866. lum1 += dst->linesize[0];
  867. }
  868. cb1 += dst->linesize[1];
  869. cr1 += dst->linesize[2];
  870. }
  871. }
  872. static void uyvy422_to_yuv422p(AVPicture *dst, const AVPicture *src,
  873. int width, int height)
  874. {
  875. const uint8_t *p, *p1;
  876. uint8_t *lum, *cr, *cb, *lum1, *cr1, *cb1;
  877. int w;
  878. p1 = src->data[0];
  879. lum1 = dst->data[0];
  880. cb1 = dst->data[1];
  881. cr1 = dst->data[2];
  882. for(;height > 0; height--) {
  883. p = p1;
  884. lum = lum1;
  885. cb = cb1;
  886. cr = cr1;
  887. for(w = width; w >= 2; w -= 2) {
  888. lum[0] = p[1];
  889. cb[0] = p[0];
  890. lum[1] = p[3];
  891. cr[0] = p[2];
  892. p += 4;
  893. lum += 2;
  894. cb++;
  895. cr++;
  896. }
  897. p1 += src->linesize[0];
  898. lum1 += dst->linesize[0];
  899. cb1 += dst->linesize[1];
  900. cr1 += dst->linesize[2];
  901. }
  902. }
  903. static void yuyv422_to_yuv422p(AVPicture *dst, const AVPicture *src,
  904. int width, int height)
  905. {
  906. const uint8_t *p, *p1;
  907. uint8_t *lum, *cr, *cb, *lum1, *cr1, *cb1;
  908. int w;
  909. p1 = src->data[0];
  910. lum1 = dst->data[0];
  911. cb1 = dst->data[1];
  912. cr1 = dst->data[2];
  913. for(;height > 0; height--) {
  914. p = p1;
  915. lum = lum1;
  916. cb = cb1;
  917. cr = cr1;
  918. for(w = width; w >= 2; w -= 2) {
  919. lum[0] = p[0];
  920. cb[0] = p[1];
  921. lum[1] = p[2];
  922. cr[0] = p[3];
  923. p += 4;
  924. lum += 2;
  925. cb++;
  926. cr++;
  927. }
  928. p1 += src->linesize[0];
  929. lum1 += dst->linesize[0];
  930. cb1 += dst->linesize[1];
  931. cr1 += dst->linesize[2];
  932. }
  933. }
  934. static void yuv422p_to_yuyv422(AVPicture *dst, const AVPicture *src,
  935. int width, int height)
  936. {
  937. uint8_t *p, *p1;
  938. const uint8_t *lum, *cr, *cb, *lum1, *cr1, *cb1;
  939. int w;
  940. p1 = dst->data[0];
  941. lum1 = src->data[0];
  942. cb1 = src->data[1];
  943. cr1 = src->data[2];
  944. for(;height > 0; height--) {
  945. p = p1;
  946. lum = lum1;
  947. cb = cb1;
  948. cr = cr1;
  949. for(w = width; w >= 2; w -= 2) {
  950. p[0] = lum[0];
  951. p[1] = cb[0];
  952. p[2] = lum[1];
  953. p[3] = cr[0];
  954. p += 4;
  955. lum += 2;
  956. cb++;
  957. cr++;
  958. }
  959. p1 += dst->linesize[0];
  960. lum1 += src->linesize[0];
  961. cb1 += src->linesize[1];
  962. cr1 += src->linesize[2];
  963. }
  964. }
  965. static void yuv422p_to_uyvy422(AVPicture *dst, const AVPicture *src,
  966. int width, int height)
  967. {
  968. uint8_t *p, *p1;
  969. const uint8_t *lum, *cr, *cb, *lum1, *cr1, *cb1;
  970. int w;
  971. p1 = dst->data[0];
  972. lum1 = src->data[0];
  973. cb1 = src->data[1];
  974. cr1 = src->data[2];
  975. for(;height > 0; height--) {
  976. p = p1;
  977. lum = lum1;
  978. cb = cb1;
  979. cr = cr1;
  980. for(w = width; w >= 2; w -= 2) {
  981. p[1] = lum[0];
  982. p[0] = cb[0];
  983. p[3] = lum[1];
  984. p[2] = cr[0];
  985. p += 4;
  986. lum += 2;
  987. cb++;
  988. cr++;
  989. }
  990. p1 += dst->linesize[0];
  991. lum1 += src->linesize[0];
  992. cb1 += src->linesize[1];
  993. cr1 += src->linesize[2];
  994. }
  995. }
  996. static void uyyvyy411_to_yuv411p(AVPicture *dst, const AVPicture *src,
  997. int width, int height)
  998. {
  999. const uint8_t *p, *p1;
  1000. uint8_t *lum, *cr, *cb, *lum1, *cr1, *cb1;
  1001. int w;
  1002. p1 = src->data[0];
  1003. lum1 = dst->data[0];
  1004. cb1 = dst->data[1];
  1005. cr1 = dst->data[2];
  1006. for(;height > 0; height--) {
  1007. p = p1;
  1008. lum = lum1;
  1009. cb = cb1;
  1010. cr = cr1;
  1011. for(w = width; w >= 4; w -= 4) {
  1012. cb[0] = p[0];
  1013. lum[0] = p[1];
  1014. lum[1] = p[2];
  1015. cr[0] = p[3];
  1016. lum[2] = p[4];
  1017. lum[3] = p[5];
  1018. p += 6;
  1019. lum += 4;
  1020. cb++;
  1021. cr++;
  1022. }
  1023. p1 += src->linesize[0];
  1024. lum1 += dst->linesize[0];
  1025. cb1 += dst->linesize[1];
  1026. cr1 += dst->linesize[2];
  1027. }
  1028. }
  1029. static void yuv420p_to_yuyv422(AVPicture *dst, const AVPicture *src,
  1030. int width, int height)
  1031. {
  1032. int w, h;
  1033. uint8_t *line1, *line2, *linesrc = dst->data[0];
  1034. uint8_t *lum1, *lum2, *lumsrc = src->data[0];
  1035. uint8_t *cb1, *cb2 = src->data[1];
  1036. uint8_t *cr1, *cr2 = src->data[2];
  1037. for(h = height / 2; h--;) {
  1038. line1 = linesrc;
  1039. line2 = linesrc + dst->linesize[0];
  1040. lum1 = lumsrc;
  1041. lum2 = lumsrc + src->linesize[0];
  1042. cb1 = cb2;
  1043. cr1 = cr2;
  1044. for(w = width / 2; w--;) {
  1045. *line1++ = *lum1++; *line2++ = *lum2++;
  1046. *line1++ = *line2++ = *cb1++;
  1047. *line1++ = *lum1++; *line2++ = *lum2++;
  1048. *line1++ = *line2++ = *cr1++;
  1049. }
  1050. linesrc += dst->linesize[0] * 2;
  1051. lumsrc += src->linesize[0] * 2;
  1052. cb2 += src->linesize[1];
  1053. cr2 += src->linesize[2];
  1054. }
  1055. }
  1056. static void yuv420p_to_uyvy422(AVPicture *dst, const AVPicture *src,
  1057. int width, int height)
  1058. {
  1059. int w, h;
  1060. uint8_t *line1, *line2, *linesrc = dst->data[0];
  1061. uint8_t *lum1, *lum2, *lumsrc = src->data[0];
  1062. uint8_t *cb1, *cb2 = src->data[1];
  1063. uint8_t *cr1, *cr2 = src->data[2];
  1064. for(h = height / 2; h--;) {
  1065. line1 = linesrc;
  1066. line2 = linesrc + dst->linesize[0];
  1067. lum1 = lumsrc;
  1068. lum2 = lumsrc + src->linesize[0];
  1069. cb1 = cb2;
  1070. cr1 = cr2;
  1071. for(w = width / 2; w--;) {
  1072. *line1++ = *line2++ = *cb1++;
  1073. *line1++ = *lum1++; *line2++ = *lum2++;
  1074. *line1++ = *line2++ = *cr1++;
  1075. *line1++ = *lum1++; *line2++ = *lum2++;
  1076. }
  1077. linesrc += dst->linesize[0] * 2;
  1078. lumsrc += src->linesize[0] * 2;
  1079. cb2 += src->linesize[1];
  1080. cr2 += src->linesize[2];
  1081. }
  1082. }
  1083. #define SCALEBITS 10
  1084. #define ONE_HALF (1 << (SCALEBITS - 1))
  1085. #define FIX(x) ((int) ((x) * (1<<SCALEBITS) + 0.5))
  1086. #define YUV_TO_RGB1_CCIR(cb1, cr1)\
  1087. {\
  1088. cb = (cb1) - 128;\
  1089. cr = (cr1) - 128;\
  1090. r_add = FIX(1.40200*255.0/224.0) * cr + ONE_HALF;\
  1091. g_add = - FIX(0.34414*255.0/224.0) * cb - FIX(0.71414*255.0/224.0) * cr + \
  1092. ONE_HALF;\
  1093. b_add = FIX(1.77200*255.0/224.0) * cb + ONE_HALF;\
  1094. }
  1095. #define YUV_TO_RGB2_CCIR(r, g, b, y1)\
  1096. {\
  1097. y = ((y1) - 16) * FIX(255.0/219.0);\
  1098. r = cm[(y + r_add) >> SCALEBITS];\
  1099. g = cm[(y + g_add) >> SCALEBITS];\
  1100. b = cm[(y + b_add) >> SCALEBITS];\
  1101. }
  1102. #define YUV_TO_RGB1(cb1, cr1)\
  1103. {\
  1104. cb = (cb1) - 128;\
  1105. cr = (cr1) - 128;\
  1106. r_add = FIX(1.40200) * cr + ONE_HALF;\
  1107. g_add = - FIX(0.34414) * cb - FIX(0.71414) * cr + ONE_HALF;\
  1108. b_add = FIX(1.77200) * cb + ONE_HALF;\
  1109. }
  1110. #define YUV_TO_RGB2(r, g, b, y1)\
  1111. {\
  1112. y = (y1) << SCALEBITS;\
  1113. r = cm[(y + r_add) >> SCALEBITS];\
  1114. g = cm[(y + g_add) >> SCALEBITS];\
  1115. b = cm[(y + b_add) >> SCALEBITS];\
  1116. }
  1117. #define Y_CCIR_TO_JPEG(y)\
  1118. cm[((y) * FIX(255.0/219.0) + (ONE_HALF - 16 * FIX(255.0/219.0))) >> SCALEBITS]
  1119. #define Y_JPEG_TO_CCIR(y)\
  1120. (((y) * FIX(219.0/255.0) + (ONE_HALF + (16 << SCALEBITS))) >> SCALEBITS)
  1121. #define C_CCIR_TO_JPEG(y)\
  1122. cm[(((y) - 128) * FIX(127.0/112.0) + (ONE_HALF + (128 << SCALEBITS))) >> SCALEBITS]
  1123. /* NOTE: the clamp is really necessary! */
  1124. static inline int C_JPEG_TO_CCIR(int y) {
  1125. y = (((y - 128) * FIX(112.0/127.0) + (ONE_HALF + (128 << SCALEBITS))) >> SCALEBITS);
  1126. if (y < 16)
  1127. y = 16;
  1128. return y;
  1129. }
  1130. #define RGB_TO_Y(r, g, b) \
  1131. ((FIX(0.29900) * (r) + FIX(0.58700) * (g) + \
  1132. FIX(0.11400) * (b) + ONE_HALF) >> SCALEBITS)
  1133. #define RGB_TO_U(r1, g1, b1, shift)\
  1134. (((- FIX(0.16874) * r1 - FIX(0.33126) * g1 + \
  1135. FIX(0.50000) * b1 + (ONE_HALF << shift) - 1) >> (SCALEBITS + shift)) + 128)
  1136. #define RGB_TO_V(r1, g1, b1, shift)\
  1137. (((FIX(0.50000) * r1 - FIX(0.41869) * g1 - \
  1138. FIX(0.08131) * b1 + (ONE_HALF << shift) - 1) >> (SCALEBITS + shift)) + 128)
  1139. #define RGB_TO_Y_CCIR(r, g, b) \
  1140. ((FIX(0.29900*219.0/255.0) * (r) + FIX(0.58700*219.0/255.0) * (g) + \
  1141. FIX(0.11400*219.0/255.0) * (b) + (ONE_HALF + (16 << SCALEBITS))) >> SCALEBITS)
  1142. #define RGB_TO_U_CCIR(r1, g1, b1, shift)\
  1143. (((- FIX(0.16874*224.0/255.0) * r1 - FIX(0.33126*224.0/255.0) * g1 + \
  1144. FIX(0.50000*224.0/255.0) * b1 + (ONE_HALF << shift) - 1) >> (SCALEBITS + shift)) + 128)
  1145. #define RGB_TO_V_CCIR(r1, g1, b1, shift)\
  1146. (((FIX(0.50000*224.0/255.0) * r1 - FIX(0.41869*224.0/255.0) * g1 - \
  1147. FIX(0.08131*224.0/255.0) * b1 + (ONE_HALF << shift) - 1) >> (SCALEBITS + shift)) + 128)
  1148. static uint8_t y_ccir_to_jpeg[256];
  1149. static uint8_t y_jpeg_to_ccir[256];
  1150. static uint8_t c_ccir_to_jpeg[256];
  1151. static uint8_t c_jpeg_to_ccir[256];
  1152. /* init various conversion tables */
  1153. static void img_convert_init(void)
  1154. {
  1155. int i;
  1156. uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
  1157. for(i = 0;i < 256; i++) {
  1158. y_ccir_to_jpeg[i] = Y_CCIR_TO_JPEG(i);
  1159. y_jpeg_to_ccir[i] = Y_JPEG_TO_CCIR(i);
  1160. c_ccir_to_jpeg[i] = C_CCIR_TO_JPEG(i);
  1161. c_jpeg_to_ccir[i] = C_JPEG_TO_CCIR(i);
  1162. }
  1163. }
  1164. /* apply to each pixel the given table */
  1165. static void img_apply_table(uint8_t *dst, int dst_wrap,
  1166. const uint8_t *src, int src_wrap,
  1167. int width, int height, const uint8_t *table1)
  1168. {
  1169. int n;
  1170. const uint8_t *s;
  1171. uint8_t *d;
  1172. const uint8_t *table;
  1173. table = table1;
  1174. for(;height > 0; height--) {
  1175. s = src;
  1176. d = dst;
  1177. n = width;
  1178. while (n >= 4) {
  1179. d[0] = table[s[0]];
  1180. d[1] = table[s[1]];
  1181. d[2] = table[s[2]];
  1182. d[3] = table[s[3]];
  1183. d += 4;
  1184. s += 4;
  1185. n -= 4;
  1186. }
  1187. while (n > 0) {
  1188. d[0] = table[s[0]];
  1189. d++;
  1190. s++;
  1191. n--;
  1192. }
  1193. dst += dst_wrap;
  1194. src += src_wrap;
  1195. }
  1196. }
  1197. /* XXX: use generic filter ? */
  1198. /* XXX: in most cases, the sampling position is incorrect */
  1199. /* 4x1 -> 1x1 */
  1200. static void shrink41(uint8_t *dst, int dst_wrap,
  1201. const uint8_t *src, int src_wrap,
  1202. int width, int height)
  1203. {
  1204. int w;
  1205. const uint8_t *s;
  1206. uint8_t *d;
  1207. for(;height > 0; height--) {
  1208. s = src;
  1209. d = dst;
  1210. for(w = width;w > 0; w--) {
  1211. d[0] = (s[0] + s[1] + s[2] + s[3] + 2) >> 2;
  1212. s += 4;
  1213. d++;
  1214. }
  1215. src += src_wrap;
  1216. dst += dst_wrap;
  1217. }
  1218. }
  1219. /* 2x1 -> 1x1 */
  1220. static void shrink21(uint8_t *dst, int dst_wrap,
  1221. const uint8_t *src, int src_wrap,
  1222. int width, int height)
  1223. {
  1224. int w;
  1225. const uint8_t *s;
  1226. uint8_t *d;
  1227. for(;height > 0; height--) {
  1228. s = src;
  1229. d = dst;
  1230. for(w = width;w > 0; w--) {
  1231. d[0] = (s[0] + s[1]) >> 1;
  1232. s += 2;
  1233. d++;
  1234. }
  1235. src += src_wrap;
  1236. dst += dst_wrap;
  1237. }
  1238. }
  1239. /* 1x2 -> 1x1 */
  1240. static void shrink12(uint8_t *dst, int dst_wrap,
  1241. const uint8_t *src, int src_wrap,
  1242. int width, int height)
  1243. {
  1244. int w;
  1245. uint8_t *d;
  1246. const uint8_t *s1, *s2;
  1247. for(;height > 0; height--) {
  1248. s1 = src;
  1249. s2 = s1 + src_wrap;
  1250. d = dst;
  1251. for(w = width;w >= 4; w-=4) {
  1252. d[0] = (s1[0] + s2[0]) >> 1;
  1253. d[1] = (s1[1] + s2[1]) >> 1;
  1254. d[2] = (s1[2] + s2[2]) >> 1;
  1255. d[3] = (s1[3] + s2[3]) >> 1;
  1256. s1 += 4;
  1257. s2 += 4;
  1258. d += 4;
  1259. }
  1260. for(;w > 0; w--) {
  1261. d[0] = (s1[0] + s2[0]) >> 1;
  1262. s1++;
  1263. s2++;
  1264. d++;
  1265. }
  1266. src += 2 * src_wrap;
  1267. dst += dst_wrap;
  1268. }
  1269. }
  1270. /* 2x2 -> 1x1 */
  1271. void ff_shrink22(uint8_t *dst, int dst_wrap,
  1272. const uint8_t *src, int src_wrap,
  1273. int width, int height)
  1274. {
  1275. int w;
  1276. const uint8_t *s1, *s2;
  1277. uint8_t *d;
  1278. for(;height > 0; height--) {
  1279. s1 = src;
  1280. s2 = s1 + src_wrap;
  1281. d = dst;
  1282. for(w = width;w >= 4; w-=4) {
  1283. d[0] = (s1[0] + s1[1] + s2[0] + s2[1] + 2) >> 2;
  1284. d[1] = (s1[2] + s1[3] + s2[2] + s2[3] + 2) >> 2;
  1285. d[2] = (s1[4] + s1[5] + s2[4] + s2[5] + 2) >> 2;
  1286. d[3] = (s1[6] + s1[7] + s2[6] + s2[7] + 2) >> 2;
  1287. s1 += 8;
  1288. s2 += 8;
  1289. d += 4;
  1290. }
  1291. for(;w > 0; w--) {
  1292. d[0] = (s1[0] + s1[1] + s2[0] + s2[1] + 2) >> 2;
  1293. s1 += 2;
  1294. s2 += 2;
  1295. d++;
  1296. }
  1297. src += 2 * src_wrap;
  1298. dst += dst_wrap;
  1299. }
  1300. }
  1301. /* 4x4 -> 1x1 */
  1302. void ff_shrink44(uint8_t *dst, int dst_wrap,
  1303. const uint8_t *src, int src_wrap,
  1304. int width, int height)
  1305. {
  1306. int w;
  1307. const uint8_t *s1, *s2, *s3, *s4;
  1308. uint8_t *d;
  1309. for(;height > 0; height--) {
  1310. s1 = src;
  1311. s2 = s1 + src_wrap;
  1312. s3 = s2 + src_wrap;
  1313. s4 = s3 + src_wrap;
  1314. d = dst;
  1315. for(w = width;w > 0; w--) {
  1316. d[0] = (s1[0] + s1[1] + s1[2] + s1[3] +
  1317. s2[0] + s2[1] + s2[2] + s2[3] +
  1318. s3[0] + s3[1] + s3[2] + s3[3] +
  1319. s4[0] + s4[1] + s4[2] + s4[3] + 8) >> 4;
  1320. s1 += 4;
  1321. s2 += 4;
  1322. s3 += 4;
  1323. s4 += 4;
  1324. d++;
  1325. }
  1326. src += 4 * src_wrap;
  1327. dst += dst_wrap;
  1328. }
  1329. }
  1330. /* 8x8 -> 1x1 */
  1331. void ff_shrink88(uint8_t *dst, int dst_wrap,
  1332. const uint8_t *src, int src_wrap,
  1333. int width, int height)
  1334. {
  1335. int w, i;
  1336. for(;height > 0; height--) {
  1337. for(w = width;w > 0; w--) {
  1338. int tmp=0;
  1339. for(i=0; i<8; i++){
  1340. tmp += src[0] + src[1] + src[2] + src[3] + src[4] + src[5] + src[6] + src[7];
  1341. src += src_wrap;
  1342. }
  1343. *(dst++) = (tmp + 32)>>6;
  1344. src += 8 - 8*src_wrap;
  1345. }
  1346. src += 8*src_wrap - 8*width;
  1347. dst += dst_wrap - width;
  1348. }
  1349. }
  1350. static void grow21_line(uint8_t *dst, const uint8_t *src,
  1351. int width)
  1352. {
  1353. int w;
  1354. const uint8_t *s1;
  1355. uint8_t *d;
  1356. s1 = src;
  1357. d = dst;
  1358. for(w = width;w >= 4; w-=4) {
  1359. d[1] = d[0] = s1[0];
  1360. d[3] = d[2] = s1[1];
  1361. s1 += 2;
  1362. d += 4;
  1363. }
  1364. for(;w >= 2; w -= 2) {
  1365. d[1] = d[0] = s1[0];
  1366. s1 ++;
  1367. d += 2;
  1368. }
  1369. /* only needed if width is not a multiple of two */
  1370. /* XXX: veryfy that */
  1371. if (w) {
  1372. d[0] = s1[0];
  1373. }
  1374. }
  1375. static void grow41_line(uint8_t *dst, const uint8_t *src,
  1376. int width)
  1377. {
  1378. int w, v;
  1379. const uint8_t *s1;
  1380. uint8_t *d;
  1381. s1 = src;
  1382. d = dst;
  1383. for(w = width;w >= 4; w-=4) {
  1384. v = s1[0];
  1385. d[0] = v;
  1386. d[1] = v;
  1387. d[2] = v;
  1388. d[3] = v;
  1389. s1 ++;
  1390. d += 4;
  1391. }
  1392. }
  1393. /* 1x1 -> 2x1 */
  1394. static void grow21(uint8_t *dst, int dst_wrap,
  1395. const uint8_t *src, int src_wrap,
  1396. int width, int height)
  1397. {
  1398. for(;height > 0; height--) {
  1399. grow21_line(dst, src, width);
  1400. src += src_wrap;
  1401. dst += dst_wrap;
  1402. }
  1403. }
  1404. /* 1x1 -> 2x2 */
  1405. static void grow22(uint8_t *dst, int dst_wrap,
  1406. const uint8_t *src, int src_wrap,
  1407. int width, int height)
  1408. {
  1409. for(;height > 0; height--) {
  1410. grow21_line(dst, src, width);
  1411. if (height%2)
  1412. src += src_wrap;
  1413. dst += dst_wrap;
  1414. }
  1415. }
  1416. /* 1x1 -> 4x1 */
  1417. static void grow41(uint8_t *dst, int dst_wrap,
  1418. const uint8_t *src, int src_wrap,
  1419. int width, int height)
  1420. {
  1421. for(;height > 0; height--) {
  1422. grow41_line(dst, src, width);
  1423. src += src_wrap;
  1424. dst += dst_wrap;
  1425. }
  1426. }
  1427. /* 1x1 -> 4x4 */
  1428. static void grow44(uint8_t *dst, int dst_wrap,
  1429. const uint8_t *src, int src_wrap,
  1430. int width, int height)
  1431. {
  1432. for(;height > 0; height--) {
  1433. grow41_line(dst, src, width);
  1434. if ((height & 3) == 1)
  1435. src += src_wrap;
  1436. dst += dst_wrap;
  1437. }
  1438. }
  1439. /* 1x2 -> 2x1 */
  1440. static void conv411(uint8_t *dst, int dst_wrap,
  1441. const uint8_t *src, int src_wrap,
  1442. int width, int height)
  1443. {
  1444. int w, c;
  1445. const uint8_t *s1, *s2;
  1446. uint8_t *d;
  1447. width>>=1;
  1448. for(;height > 0; height--) {
  1449. s1 = src;
  1450. s2 = src + src_wrap;
  1451. d = dst;
  1452. for(w = width;w > 0; w--) {
  1453. c = (s1[0] + s2[0]) >> 1;
  1454. d[0] = c;
  1455. d[1] = c;
  1456. s1++;
  1457. s2++;
  1458. d += 2;
  1459. }
  1460. src += src_wrap * 2;
  1461. dst += dst_wrap;
  1462. }
  1463. }
  1464. /* XXX: add jpeg quantize code */
  1465. #define TRANSP_INDEX (6*6*6)
  1466. /* this is maybe slow, but allows for extensions */
  1467. static inline unsigned char gif_clut_index(uint8_t r, uint8_t g, uint8_t b)
  1468. {
  1469. return ((((r)/47)%6)*6*6+(((g)/47)%6)*6+(((b)/47)%6));
  1470. }
  1471. static void build_rgb_palette(uint8_t *palette, int has_alpha)
  1472. {
  1473. uint32_t *pal;
  1474. static const uint8_t pal_value[6] = { 0x00, 0x33, 0x66, 0x99, 0xcc, 0xff };
  1475. int i, r, g, b;
  1476. pal = (uint32_t *)palette;
  1477. i = 0;
  1478. for(r = 0; r < 6; r++) {
  1479. for(g = 0; g < 6; g++) {
  1480. for(b = 0; b < 6; b++) {
  1481. pal[i++] = (0xff << 24) | (pal_value[r] << 16) |
  1482. (pal_value[g] << 8) | pal_value[b];
  1483. }
  1484. }
  1485. }
  1486. if (has_alpha)
  1487. pal[i++] = 0;
  1488. while (i < 256)
  1489. pal[i++] = 0xff000000;
  1490. }
  1491. /* copy bit n to bits 0 ... n - 1 */
  1492. static inline unsigned int bitcopy_n(unsigned int a, int n)
  1493. {
  1494. int mask;
  1495. mask = (1 << n) - 1;
  1496. return (a & (0xff & ~mask)) | ((-((a >> n) & 1)) & mask);
  1497. }
  1498. /* rgb555 handling */
  1499. #define RGB_NAME rgb555
  1500. #define RGB_IN(r, g, b, s)\
  1501. {\
  1502. unsigned int v = ((const uint16_t *)(s))[0];\
  1503. r = bitcopy_n(v >> (10 - 3), 3);\
  1504. g = bitcopy_n(v >> (5 - 3), 3);\
  1505. b = bitcopy_n(v << 3, 3);\
  1506. }
  1507. #define RGB_OUT(d, r, g, b)\
  1508. {\
  1509. ((uint16_t *)(d))[0] = ((r >> 3) << 10) | ((g >> 3) << 5) | (b >> 3);\
  1510. }
  1511. #define BPP 2
  1512. #include "imgconvert_template.h"
  1513. /* rgb565 handling */
  1514. #define RGB_NAME rgb565
  1515. #define RGB_IN(r, g, b, s)\
  1516. {\
  1517. unsigned int v = ((const uint16_t *)(s))[0];\
  1518. r = bitcopy_n(v >> (11 - 3), 3);\
  1519. g = bitcopy_n(v >> (5 - 2), 2);\
  1520. b = bitcopy_n(v << 3, 3);\
  1521. }
  1522. #define RGB_OUT(d, r, g, b)\
  1523. {\
  1524. ((uint16_t *)(d))[0] = ((r >> 3) << 11) | ((g >> 2) << 5) | (b >> 3);\
  1525. }
  1526. #define BPP 2
  1527. #include "imgconvert_template.h"
  1528. /* bgr24 handling */
  1529. #define RGB_NAME bgr24
  1530. #define RGB_IN(r, g, b, s)\
  1531. {\
  1532. b = (s)[0];\
  1533. g = (s)[1];\
  1534. r = (s)[2];\
  1535. }
  1536. #define RGB_OUT(d, r, g, b)\
  1537. {\
  1538. (d)[0] = b;\
  1539. (d)[1] = g;\
  1540. (d)[2] = r;\
  1541. }
  1542. #define BPP 3
  1543. #include "imgconvert_template.h"
  1544. #undef RGB_IN
  1545. #undef RGB_OUT
  1546. #undef BPP
  1547. /* rgb24 handling */
  1548. #define RGB_NAME rgb24
  1549. #define FMT_RGB24
  1550. #define RGB_IN(r, g, b, s)\
  1551. {\
  1552. r = (s)[0];\
  1553. g = (s)[1];\
  1554. b = (s)[2];\
  1555. }
  1556. #define RGB_OUT(d, r, g, b)\
  1557. {\
  1558. (d)[0] = r;\
  1559. (d)[1] = g;\
  1560. (d)[2] = b;\
  1561. }
  1562. #define BPP 3
  1563. #include "imgconvert_template.h"
  1564. /* rgb32 handling */
  1565. #define RGB_NAME rgb32
  1566. #define FMT_RGB32
  1567. #define RGB_IN(r, g, b, s)\
  1568. {\
  1569. unsigned int v = ((const uint32_t *)(s))[0];\
  1570. r = (v >> 16) & 0xff;\
  1571. g = (v >> 8) & 0xff;\
  1572. b = v & 0xff;\
  1573. }
  1574. #define RGBA_IN(r, g, b, a, s)\
  1575. {\
  1576. unsigned int v = ((const uint32_t *)(s))[0];\
  1577. a = (v >> 24) & 0xff;\
  1578. r = (v >> 16) & 0xff;\
  1579. g = (v >> 8) & 0xff;\
  1580. b = v & 0xff;\
  1581. }
  1582. #define RGBA_OUT(d, r, g, b, a)\
  1583. {\
  1584. ((uint32_t *)(d))[0] = (a << 24) | (r << 16) | (g << 8) | b;\
  1585. }
  1586. #define BPP 4
  1587. #include "imgconvert_template.h"
  1588. static void mono_to_gray(AVPicture *dst, const AVPicture *src,
  1589. int width, int height, int xor_mask)
  1590. {
  1591. const unsigned char *p;
  1592. unsigned char *q;
  1593. int v, dst_wrap, src_wrap;
  1594. int y, w;
  1595. p = src->data[0];
  1596. src_wrap = src->linesize[0] - ((width + 7) >> 3);
  1597. q = dst->data[0];
  1598. dst_wrap = dst->linesize[0] - width;
  1599. for(y=0;y<height;y++) {
  1600. w = width;
  1601. while (w >= 8) {
  1602. v = *p++ ^ xor_mask;
  1603. q[0] = -(v >> 7);
  1604. q[1] = -((v >> 6) & 1);
  1605. q[2] = -((v >> 5) & 1);
  1606. q[3] = -((v >> 4) & 1);
  1607. q[4] = -((v >> 3) & 1);
  1608. q[5] = -((v >> 2) & 1);
  1609. q[6] = -((v >> 1) & 1);
  1610. q[7] = -((v >> 0) & 1);
  1611. w -= 8;
  1612. q += 8;
  1613. }
  1614. if (w > 0) {
  1615. v = *p++ ^ xor_mask;
  1616. do {
  1617. q[0] = -((v >> 7) & 1);
  1618. q++;
  1619. v <<= 1;
  1620. } while (--w);
  1621. }
  1622. p += src_wrap;
  1623. q += dst_wrap;
  1624. }
  1625. }
  1626. static void monowhite_to_gray(AVPicture *dst, const AVPicture *src,
  1627. int width, int height)
  1628. {
  1629. mono_to_gray(dst, src, width, height, 0xff);
  1630. }
  1631. static void monoblack_to_gray(AVPicture *dst, const AVPicture *src,
  1632. int width, int height)
  1633. {
  1634. mono_to_gray(dst, src, width, height, 0x00);
  1635. }
  1636. static void gray_to_mono(AVPicture *dst, const AVPicture *src,
  1637. int width, int height, int xor_mask)
  1638. {
  1639. int n;
  1640. const uint8_t *s;
  1641. uint8_t *d;
  1642. int j, b, v, n1, src_wrap, dst_wrap, y;
  1643. s = src->data[0];
  1644. src_wrap = src->linesize[0] - width;
  1645. d = dst->data[0];
  1646. dst_wrap = dst->linesize[0] - ((width + 7) >> 3);
  1647. for(y=0;y<height;y++) {
  1648. n = width;
  1649. while (n >= 8) {
  1650. v = 0;
  1651. for(j=0;j<8;j++) {
  1652. b = s[0];
  1653. s++;
  1654. v = (v << 1) | (b >> 7);
  1655. }
  1656. d[0] = v ^ xor_mask;
  1657. d++;
  1658. n -= 8;
  1659. }
  1660. if (n > 0) {
  1661. n1 = n;
  1662. v = 0;
  1663. while (n > 0) {
  1664. b = s[0];
  1665. s++;
  1666. v = (v << 1) | (b >> 7);
  1667. n--;
  1668. }
  1669. d[0] = (v << (8 - (n1 & 7))) ^ xor_mask;
  1670. d++;
  1671. }
  1672. s += src_wrap;
  1673. d += dst_wrap;
  1674. }
  1675. }
  1676. static void gray_to_monowhite(AVPicture *dst, const AVPicture *src,
  1677. int width, int height)
  1678. {
  1679. gray_to_mono(dst, src, width, height, 0xff);
  1680. }
  1681. static void gray_to_monoblack(AVPicture *dst, const AVPicture *src,
  1682. int width, int height)
  1683. {
  1684. gray_to_mono(dst, src, width, height, 0x00);
  1685. }
  1686. static void gray_to_gray16(AVPicture *dst, const AVPicture *src,
  1687. int width, int height)
  1688. {
  1689. int x, y, src_wrap, dst_wrap;
  1690. uint8_t *s, *d;
  1691. s = src->data[0];
  1692. src_wrap = src->linesize[0] - width;
  1693. d = dst->data[0];
  1694. dst_wrap = dst->linesize[0] - width * 2;
  1695. for(y=0; y<height; y++){
  1696. for(x=0; x<width; x++){
  1697. *d++ = *s;
  1698. *d++ = *s++;
  1699. }
  1700. s += src_wrap;
  1701. d += dst_wrap;
  1702. }
  1703. }
  1704. static void gray16_to_gray(AVPicture *dst, const AVPicture *src,
  1705. int width, int height)
  1706. {
  1707. int x, y, src_wrap, dst_wrap;
  1708. uint8_t *s, *d;
  1709. s = src->data[0];
  1710. src_wrap = src->linesize[0] - width * 2;
  1711. d = dst->data[0];
  1712. dst_wrap = dst->linesize[0] - width;
  1713. for(y=0; y<height; y++){
  1714. for(x=0; x<width; x++){
  1715. *d++ = *s;
  1716. s += 2;
  1717. }
  1718. s += src_wrap;
  1719. d += dst_wrap;
  1720. }
  1721. }
  1722. static void gray16be_to_gray(AVPicture *dst, const AVPicture *src,
  1723. int width, int height)
  1724. {
  1725. gray16_to_gray(dst, src, width, height);
  1726. }
  1727. static void gray16le_to_gray(AVPicture *dst, const AVPicture *src,
  1728. int width, int height)
  1729. {
  1730. AVPicture tmpsrc = *src;
  1731. tmpsrc.data[0]++;
  1732. gray16_to_gray(dst, &tmpsrc, width, height);
  1733. }
  1734. static void gray16_to_gray16(AVPicture *dst, const AVPicture *src,
  1735. int width, int height)
  1736. {
  1737. int x, y, src_wrap, dst_wrap;
  1738. uint16_t *s, *d;
  1739. s = src->data[0];
  1740. src_wrap = (src->linesize[0] - width * 2)/2;
  1741. d = dst->data[0];
  1742. dst_wrap = (dst->linesize[0] - width * 2)/2;
  1743. for(y=0; y<height; y++){
  1744. for(x=0; x<width; x++){
  1745. *d++ = bswap_16(*s++);
  1746. }
  1747. s += src_wrap;
  1748. d += dst_wrap;
  1749. }
  1750. }
  1751. typedef struct ConvertEntry {
  1752. void (*convert)(AVPicture *dst,
  1753. const AVPicture *src, int width, int height);
  1754. } ConvertEntry;
  1755. /* Add each new convertion function in this table. In order to be able
  1756. to convert from any format to any format, the following constraints
  1757. must be satisfied:
  1758. - all FF_COLOR_RGB formats must convert to and from PIX_FMT_RGB24
  1759. - all FF_COLOR_GRAY formats must convert to and from PIX_FMT_GRAY8
  1760. - all FF_COLOR_RGB formats with alpha must convert to and from PIX_FMT_RGB32
  1761. - PIX_FMT_YUV444P and PIX_FMT_YUVJ444P must convert to and from
  1762. PIX_FMT_RGB24.
  1763. - PIX_FMT_422 must convert to and from PIX_FMT_422P.
  1764. The other conversion functions are just optimisations for common cases.
  1765. */
  1766. static const ConvertEntry convert_table[PIX_FMT_NB][PIX_FMT_NB] = {
  1767. [PIX_FMT_YUV420P] = {
  1768. [PIX_FMT_YUYV422] = {
  1769. .convert = yuv420p_to_yuyv422,
  1770. },
  1771. [PIX_FMT_RGB555] = {
  1772. .convert = yuv420p_to_rgb555
  1773. },
  1774. [PIX_FMT_RGB565] = {
  1775. .convert = yuv420p_to_rgb565
  1776. },
  1777. [PIX_FMT_BGR24] = {
  1778. .convert = yuv420p_to_bgr24
  1779. },
  1780. [PIX_FMT_RGB24] = {
  1781. .convert = yuv420p_to_rgb24
  1782. },
  1783. [PIX_FMT_RGB32] = {
  1784. .convert = yuv420p_to_rgb32
  1785. },
  1786. [PIX_FMT_UYVY422] = {
  1787. .convert = yuv420p_to_uyvy422,
  1788. },
  1789. },
  1790. [PIX_FMT_YUV422P] = {
  1791. [PIX_FMT_YUYV422] = {
  1792. .convert = yuv422p_to_yuyv422,
  1793. },
  1794. [PIX_FMT_UYVY422] = {
  1795. .convert = yuv422p_to_uyvy422,
  1796. },
  1797. },
  1798. [PIX_FMT_YUV444P] = {
  1799. [PIX_FMT_RGB24] = {
  1800. .convert = yuv444p_to_rgb24
  1801. },
  1802. },
  1803. [PIX_FMT_YUVJ420P] = {
  1804. [PIX_FMT_RGB555] = {
  1805. .convert = yuvj420p_to_rgb555
  1806. },
  1807. [PIX_FMT_RGB565] = {
  1808. .convert = yuvj420p_to_rgb565
  1809. },
  1810. [PIX_FMT_BGR24] = {
  1811. .convert = yuvj420p_to_bgr24
  1812. },
  1813. [PIX_FMT_RGB24] = {
  1814. .convert = yuvj420p_to_rgb24
  1815. },
  1816. [PIX_FMT_RGB32] = {
  1817. .convert = yuvj420p_to_rgb32
  1818. },
  1819. },
  1820. [PIX_FMT_YUVJ444P] = {
  1821. [PIX_FMT_RGB24] = {
  1822. .convert = yuvj444p_to_rgb24
  1823. },
  1824. },
  1825. [PIX_FMT_YUYV422] = {
  1826. [PIX_FMT_YUV420P] = {
  1827. .convert = yuyv422_to_yuv420p,
  1828. },
  1829. [PIX_FMT_YUV422P] = {
  1830. .convert = yuyv422_to_yuv422p,
  1831. },
  1832. },
  1833. [PIX_FMT_UYVY422] = {
  1834. [PIX_FMT_YUV420P] = {
  1835. .convert = uyvy422_to_yuv420p,
  1836. },
  1837. [PIX_FMT_YUV422P] = {
  1838. .convert = uyvy422_to_yuv422p,
  1839. },
  1840. },
  1841. [PIX_FMT_RGB24] = {
  1842. [PIX_FMT_YUV420P] = {
  1843. .convert = rgb24_to_yuv420p
  1844. },
  1845. [PIX_FMT_RGB565] = {
  1846. .convert = rgb24_to_rgb565
  1847. },
  1848. [PIX_FMT_RGB555] = {
  1849. .convert = rgb24_to_rgb555
  1850. },
  1851. [PIX_FMT_RGB32] = {
  1852. .convert = rgb24_to_rgb32
  1853. },
  1854. [PIX_FMT_BGR24] = {
  1855. .convert = rgb24_to_bgr24
  1856. },
  1857. [PIX_FMT_GRAY8] = {
  1858. .convert = rgb24_to_gray
  1859. },
  1860. [PIX_FMT_PAL8] = {
  1861. .convert = rgb24_to_pal8
  1862. },
  1863. [PIX_FMT_YUV444P] = {
  1864. .convert = rgb24_to_yuv444p
  1865. },
  1866. [PIX_FMT_YUVJ420P] = {
  1867. .convert = rgb24_to_yuvj420p
  1868. },
  1869. [PIX_FMT_YUVJ444P] = {
  1870. .convert = rgb24_to_yuvj444p
  1871. },
  1872. },
  1873. [PIX_FMT_RGB32] = {
  1874. [PIX_FMT_RGB24] = {
  1875. .convert = rgb32_to_rgb24
  1876. },
  1877. [PIX_FMT_BGR24] = {
  1878. .convert = rgb32_to_bgr24
  1879. },
  1880. [PIX_FMT_RGB565] = {
  1881. .convert = rgb32_to_rgb565
  1882. },
  1883. [PIX_FMT_RGB555] = {
  1884. .convert = rgb32_to_rgb555
  1885. },
  1886. [PIX_FMT_PAL8] = {
  1887. .convert = rgb32_to_pal8
  1888. },
  1889. [PIX_FMT_YUV420P] = {
  1890. .convert = rgb32_to_yuv420p
  1891. },
  1892. [PIX_FMT_GRAY8] = {
  1893. .convert = rgb32_to_gray
  1894. },
  1895. },
  1896. [PIX_FMT_BGR24] = {
  1897. [PIX_FMT_RGB32] = {
  1898. .convert = bgr24_to_rgb32
  1899. },
  1900. [PIX_FMT_RGB24] = {
  1901. .convert = bgr24_to_rgb24
  1902. },
  1903. [PIX_FMT_YUV420P] = {
  1904. .convert = bgr24_to_yuv420p
  1905. },
  1906. [PIX_FMT_GRAY8] = {
  1907. .convert = bgr24_to_gray
  1908. },
  1909. },
  1910. [PIX_FMT_RGB555] = {
  1911. [PIX_FMT_RGB24] = {
  1912. .convert = rgb555_to_rgb24
  1913. },
  1914. [PIX_FMT_RGB32] = {
  1915. .convert = rgb555_to_rgb32
  1916. },
  1917. [PIX_FMT_YUV420P] = {
  1918. .convert = rgb555_to_yuv420p
  1919. },
  1920. [PIX_FMT_GRAY8] = {
  1921. .convert = rgb555_to_gray
  1922. },
  1923. },
  1924. [PIX_FMT_RGB565] = {
  1925. [PIX_FMT_RGB32] = {
  1926. .convert = rgb565_to_rgb32
  1927. },
  1928. [PIX_FMT_RGB24] = {
  1929. .convert = rgb565_to_rgb24
  1930. },
  1931. [PIX_FMT_YUV420P] = {
  1932. .convert = rgb565_to_yuv420p
  1933. },
  1934. [PIX_FMT_GRAY8] = {
  1935. .convert = rgb565_to_gray
  1936. },
  1937. },
  1938. [PIX_FMT_GRAY16BE] = {
  1939. [PIX_FMT_GRAY8] = {
  1940. .convert = gray16be_to_gray
  1941. },
  1942. [PIX_FMT_GRAY16LE] = {
  1943. .convert = gray16_to_gray16
  1944. },
  1945. },
  1946. [PIX_FMT_GRAY16LE] = {
  1947. [PIX_FMT_GRAY8] = {
  1948. .convert = gray16le_to_gray
  1949. },
  1950. [PIX_FMT_GRAY16BE] = {
  1951. .convert = gray16_to_gray16
  1952. },
  1953. },
  1954. [PIX_FMT_GRAY8] = {
  1955. [PIX_FMT_RGB555] = {
  1956. .convert = gray_to_rgb555
  1957. },
  1958. [PIX_FMT_RGB565] = {
  1959. .convert = gray_to_rgb565
  1960. },
  1961. [PIX_FMT_RGB24] = {
  1962. .convert = gray_to_rgb24
  1963. },
  1964. [PIX_FMT_BGR24] = {
  1965. .convert = gray_to_bgr24
  1966. },
  1967. [PIX_FMT_RGB32] = {
  1968. .convert = gray_to_rgb32
  1969. },
  1970. [PIX_FMT_MONOWHITE] = {
  1971. .convert = gray_to_monowhite
  1972. },
  1973. [PIX_FMT_MONOBLACK] = {
  1974. .convert = gray_to_monoblack
  1975. },
  1976. [PIX_FMT_GRAY16LE] = {
  1977. .convert = gray_to_gray16
  1978. },
  1979. [PIX_FMT_GRAY16BE] = {
  1980. .convert = gray_to_gray16
  1981. },
  1982. },
  1983. [PIX_FMT_MONOWHITE] = {
  1984. [PIX_FMT_GRAY8] = {
  1985. .convert = monowhite_to_gray
  1986. },
  1987. },
  1988. [PIX_FMT_MONOBLACK] = {
  1989. [PIX_FMT_GRAY8] = {
  1990. .convert = monoblack_to_gray
  1991. },
  1992. },
  1993. [PIX_FMT_PAL8] = {
  1994. [PIX_FMT_RGB555] = {
  1995. .convert = pal8_to_rgb555
  1996. },
  1997. [PIX_FMT_RGB565] = {
  1998. .convert = pal8_to_rgb565
  1999. },
  2000. [PIX_FMT_BGR24] = {
  2001. .convert = pal8_to_bgr24
  2002. },
  2003. [PIX_FMT_RGB24] = {
  2004. .convert = pal8_to_rgb24
  2005. },
  2006. [PIX_FMT_RGB32] = {
  2007. .convert = pal8_to_rgb32
  2008. },
  2009. },
  2010. [PIX_FMT_UYYVYY411] = {
  2011. [PIX_FMT_YUV411P] = {
  2012. .convert = uyyvyy411_to_yuv411p,
  2013. },
  2014. },
  2015. };
  2016. int avpicture_alloc(AVPicture *picture,
  2017. int pix_fmt, int width, int height)
  2018. {
  2019. int size;
  2020. void *ptr;
  2021. size = avpicture_get_size(pix_fmt, width, height);
  2022. if(size<0)
  2023. goto fail;
  2024. ptr = av_malloc(size);
  2025. if (!ptr)
  2026. goto fail;
  2027. avpicture_fill(picture, ptr, pix_fmt, width, height);
  2028. return 0;
  2029. fail:
  2030. memset(picture, 0, sizeof(AVPicture));
  2031. return -1;
  2032. }
  2033. void avpicture_free(AVPicture *picture)
  2034. {
  2035. av_free(picture->data[0]);
  2036. }
  2037. /* return true if yuv planar */
  2038. static inline int is_yuv_planar(const PixFmtInfo *ps)
  2039. {
  2040. return (ps->color_type == FF_COLOR_YUV ||
  2041. ps->color_type == FF_COLOR_YUV_JPEG) &&
  2042. ps->pixel_type == FF_PIXEL_PLANAR;
  2043. }
  2044. int av_picture_crop(AVPicture *dst, const AVPicture *src,
  2045. int pix_fmt, int top_band, int left_band)
  2046. {
  2047. int y_shift;
  2048. int x_shift;
  2049. if (pix_fmt < 0 || pix_fmt >= PIX_FMT_NB || !is_yuv_planar(&pix_fmt_info[pix_fmt]))
  2050. return -1;
  2051. y_shift = pix_fmt_info[pix_fmt].y_chroma_shift;
  2052. x_shift = pix_fmt_info[pix_fmt].x_chroma_shift;
  2053. dst->data[0] = src->data[0] + (top_band * src->linesize[0]) + left_band;
  2054. dst->data[1] = src->data[1] + ((top_band >> y_shift) * src->linesize[1]) + (left_band >> x_shift);
  2055. dst->data[2] = src->data[2] + ((top_band >> y_shift) * src->linesize[2]) + (left_band >> x_shift);
  2056. dst->linesize[0] = src->linesize[0];
  2057. dst->linesize[1] = src->linesize[1];
  2058. dst->linesize[2] = src->linesize[2];
  2059. return 0;
  2060. }
  2061. int av_picture_pad(AVPicture *dst, const AVPicture *src, int height, int width,
  2062. int pix_fmt, int padtop, int padbottom, int padleft, int padright,
  2063. int *color)
  2064. {
  2065. uint8_t *optr;
  2066. int y_shift;
  2067. int x_shift;
  2068. int yheight;
  2069. int i, y;
  2070. if (pix_fmt < 0 || pix_fmt >= PIX_FMT_NB ||
  2071. !is_yuv_planar(&pix_fmt_info[pix_fmt])) return -1;
  2072. for (i = 0; i < 3; i++) {
  2073. x_shift = i ? pix_fmt_info[pix_fmt].x_chroma_shift : 0;
  2074. y_shift = i ? pix_fmt_info[pix_fmt].y_chroma_shift : 0;
  2075. if (padtop || padleft) {
  2076. memset(dst->data[i], color[i],
  2077. dst->linesize[i] * (padtop >> y_shift) + (padleft >> x_shift));
  2078. }
  2079. if (padleft || padright) {
  2080. optr = dst->data[i] + dst->linesize[i] * (padtop >> y_shift) +
  2081. (dst->linesize[i] - (padright >> x_shift));
  2082. yheight = (height - 1 - (padtop + padbottom)) >> y_shift;
  2083. for (y = 0; y < yheight; y++) {
  2084. memset(optr, color[i], (padleft + padright) >> x_shift);
  2085. optr += dst->linesize[i];
  2086. }
  2087. }
  2088. if (src) { /* first line */
  2089. uint8_t *iptr = src->data[i];
  2090. optr = dst->data[i] + dst->linesize[i] * (padtop >> y_shift) +
  2091. (padleft >> x_shift);
  2092. memcpy(optr, iptr, src->linesize[i]);
  2093. iptr += src->linesize[i];
  2094. optr = dst->data[i] + dst->linesize[i] * (padtop >> y_shift) +
  2095. (dst->linesize[i] - (padright >> x_shift));
  2096. yheight = (height - 1 - (padtop + padbottom)) >> y_shift;
  2097. for (y = 0; y < yheight; y++) {
  2098. memset(optr, color[i], (padleft + padright) >> x_shift);
  2099. memcpy(optr + ((padleft + padright) >> x_shift), iptr,
  2100. src->linesize[i]);
  2101. iptr += src->linesize[i];
  2102. optr += dst->linesize[i];
  2103. }
  2104. }
  2105. if (padbottom || padright) {
  2106. optr = dst->data[i] + dst->linesize[i] *
  2107. ((height - padbottom) >> y_shift) - (padright >> x_shift);
  2108. memset(optr, color[i],dst->linesize[i] *
  2109. (padbottom >> y_shift) + (padright >> x_shift));
  2110. }
  2111. }
  2112. return 0;
  2113. }
  2114. #if LIBAVCODEC_VERSION_INT < ((52<<16)+(0<<8)+0)
  2115. void img_copy(AVPicture *dst, const AVPicture *src,
  2116. int pix_fmt, int width, int height)
  2117. {
  2118. av_picture_copy(dst, src, pix_fmt, width, height);
  2119. }
  2120. int img_crop(AVPicture *dst, const AVPicture *src,
  2121. int pix_fmt, int top_band, int left_band)
  2122. {
  2123. return av_picture_crop(dst, src, pix_fmt, top_band, left_band);
  2124. }
  2125. int img_pad(AVPicture *dst, const AVPicture *src, int height, int width,
  2126. int pix_fmt, int padtop, int padbottom, int padleft, int padright,
  2127. int *color)
  2128. {
  2129. return av_picture_pad(dst, src, height, width, pix_fmt, padtop, padbottom, padleft, padright, color);
  2130. }
  2131. #endif
  2132. #ifndef CONFIG_SWSCALER
  2133. /* XXX: always use linesize. Return -1 if not supported */
  2134. int img_convert(AVPicture *dst, int dst_pix_fmt,
  2135. const AVPicture *src, int src_pix_fmt,
  2136. int src_width, int src_height)
  2137. {
  2138. static int inited;
  2139. int i, ret, dst_width, dst_height, int_pix_fmt;
  2140. const PixFmtInfo *src_pix, *dst_pix;
  2141. const ConvertEntry *ce;
  2142. AVPicture tmp1, *tmp = &tmp1;
  2143. if (src_pix_fmt < 0 || src_pix_fmt >= PIX_FMT_NB ||
  2144. dst_pix_fmt < 0 || dst_pix_fmt >= PIX_FMT_NB)
  2145. return -1;
  2146. if (src_width <= 0 || src_height <= 0)
  2147. return 0;
  2148. if (!inited) {
  2149. inited = 1;
  2150. img_convert_init();
  2151. }
  2152. dst_width = src_width;
  2153. dst_height = src_height;
  2154. dst_pix = &pix_fmt_info[dst_pix_fmt];
  2155. src_pix = &pix_fmt_info[src_pix_fmt];
  2156. if (src_pix_fmt == dst_pix_fmt) {
  2157. /* no conversion needed: just copy */
  2158. av_picture_copy(dst, src, dst_pix_fmt, dst_width, dst_height);
  2159. return 0;
  2160. }
  2161. ce = &convert_table[src_pix_fmt][dst_pix_fmt];
  2162. if (ce->convert) {
  2163. /* specific conversion routine */
  2164. ce->convert(dst, src, dst_width, dst_height);
  2165. return 0;
  2166. }
  2167. /* gray to YUV */
  2168. if (is_yuv_planar(dst_pix) &&
  2169. src_pix_fmt == PIX_FMT_GRAY8) {
  2170. int w, h, y;
  2171. uint8_t *d;
  2172. if (dst_pix->color_type == FF_COLOR_YUV_JPEG) {
  2173. ff_img_copy_plane(dst->data[0], dst->linesize[0],
  2174. src->data[0], src->linesize[0],
  2175. dst_width, dst_height);
  2176. } else {
  2177. img_apply_table(dst->data[0], dst->linesize[0],
  2178. src->data[0], src->linesize[0],
  2179. dst_width, dst_height,
  2180. y_jpeg_to_ccir);
  2181. }
  2182. /* fill U and V with 128 */
  2183. w = dst_width;
  2184. h = dst_height;
  2185. w >>= dst_pix->x_chroma_shift;
  2186. h >>= dst_pix->y_chroma_shift;
  2187. for(i = 1; i <= 2; i++) {
  2188. d = dst->data[i];
  2189. for(y = 0; y< h; y++) {
  2190. memset(d, 128, w);
  2191. d += dst->linesize[i];
  2192. }
  2193. }
  2194. return 0;
  2195. }
  2196. /* YUV to gray */
  2197. if (is_yuv_planar(src_pix) &&
  2198. dst_pix_fmt == PIX_FMT_GRAY8) {
  2199. if (src_pix->color_type == FF_COLOR_YUV_JPEG) {
  2200. ff_img_copy_plane(dst->data[0], dst->linesize[0],
  2201. src->data[0], src->linesize[0],
  2202. dst_width, dst_height);
  2203. } else {
  2204. img_apply_table(dst->data[0], dst->linesize[0],
  2205. src->data[0], src->linesize[0],
  2206. dst_width, dst_height,
  2207. y_ccir_to_jpeg);
  2208. }
  2209. return 0;
  2210. }
  2211. /* YUV to YUV planar */
  2212. if (is_yuv_planar(dst_pix) && is_yuv_planar(src_pix)) {
  2213. int x_shift, y_shift, w, h, xy_shift;
  2214. void (*resize_func)(uint8_t *dst, int dst_wrap,
  2215. const uint8_t *src, int src_wrap,
  2216. int width, int height);
  2217. /* compute chroma size of the smallest dimensions */
  2218. w = dst_width;
  2219. h = dst_height;
  2220. if (dst_pix->x_chroma_shift >= src_pix->x_chroma_shift)
  2221. w >>= dst_pix->x_chroma_shift;
  2222. else
  2223. w >>= src_pix->x_chroma_shift;
  2224. if (dst_pix->y_chroma_shift >= src_pix->y_chroma_shift)
  2225. h >>= dst_pix->y_chroma_shift;
  2226. else
  2227. h >>= src_pix->y_chroma_shift;
  2228. x_shift = (dst_pix->x_chroma_shift - src_pix->x_chroma_shift);
  2229. y_shift = (dst_pix->y_chroma_shift - src_pix->y_chroma_shift);
  2230. xy_shift = ((x_shift & 0xf) << 4) | (y_shift & 0xf);
  2231. /* there must be filters for conversion at least from and to
  2232. YUV444 format */
  2233. switch(xy_shift) {
  2234. case 0x00:
  2235. resize_func = ff_img_copy_plane;
  2236. break;
  2237. case 0x10:
  2238. resize_func = shrink21;
  2239. break;
  2240. case 0x20:
  2241. resize_func = shrink41;
  2242. break;
  2243. case 0x01:
  2244. resize_func = shrink12;
  2245. break;
  2246. case 0x11:
  2247. resize_func = ff_shrink22;
  2248. break;
  2249. case 0x22:
  2250. resize_func = ff_shrink44;
  2251. break;
  2252. case 0xf0:
  2253. resize_func = grow21;
  2254. break;
  2255. case 0xe0:
  2256. resize_func = grow41;
  2257. break;
  2258. case 0xff:
  2259. resize_func = grow22;
  2260. break;
  2261. case 0xee:
  2262. resize_func = grow44;
  2263. break;
  2264. case 0xf1:
  2265. resize_func = conv411;
  2266. break;
  2267. default:
  2268. /* currently not handled */
  2269. goto no_chroma_filter;
  2270. }
  2271. ff_img_copy_plane(dst->data[0], dst->linesize[0],
  2272. src->data[0], src->linesize[0],
  2273. dst_width, dst_height);
  2274. for(i = 1;i <= 2; i++)
  2275. resize_func(dst->data[i], dst->linesize[i],
  2276. src->data[i], src->linesize[i],
  2277. dst_width>>dst_pix->x_chroma_shift, dst_height>>dst_pix->y_chroma_shift);
  2278. /* if yuv color space conversion is needed, we do it here on
  2279. the destination image */
  2280. if (dst_pix->color_type != src_pix->color_type) {
  2281. const uint8_t *y_table, *c_table;
  2282. if (dst_pix->color_type == FF_COLOR_YUV) {
  2283. y_table = y_jpeg_to_ccir;
  2284. c_table = c_jpeg_to_ccir;
  2285. } else {
  2286. y_table = y_ccir_to_jpeg;
  2287. c_table = c_ccir_to_jpeg;
  2288. }
  2289. img_apply_table(dst->data[0], dst->linesize[0],
  2290. dst->data[0], dst->linesize[0],
  2291. dst_width, dst_height,
  2292. y_table);
  2293. for(i = 1;i <= 2; i++)
  2294. img_apply_table(dst->data[i], dst->linesize[i],
  2295. dst->data[i], dst->linesize[i],
  2296. dst_width>>dst_pix->x_chroma_shift,
  2297. dst_height>>dst_pix->y_chroma_shift,
  2298. c_table);
  2299. }
  2300. return 0;
  2301. }
  2302. no_chroma_filter:
  2303. /* try to use an intermediate format */
  2304. if (src_pix_fmt == PIX_FMT_YUYV422 ||
  2305. dst_pix_fmt == PIX_FMT_YUYV422) {
  2306. /* specific case: convert to YUV422P first */
  2307. int_pix_fmt = PIX_FMT_YUV422P;
  2308. } else if (src_pix_fmt == PIX_FMT_UYVY422 ||
  2309. dst_pix_fmt == PIX_FMT_UYVY422) {
  2310. /* specific case: convert to YUV422P first */
  2311. int_pix_fmt = PIX_FMT_YUV422P;
  2312. } else if (src_pix_fmt == PIX_FMT_UYYVYY411 ||
  2313. dst_pix_fmt == PIX_FMT_UYYVYY411) {
  2314. /* specific case: convert to YUV411P first */
  2315. int_pix_fmt = PIX_FMT_YUV411P;
  2316. } else if ((src_pix->color_type == FF_COLOR_GRAY &&
  2317. src_pix_fmt != PIX_FMT_GRAY8) ||
  2318. (dst_pix->color_type == FF_COLOR_GRAY &&
  2319. dst_pix_fmt != PIX_FMT_GRAY8)) {
  2320. /* gray8 is the normalized format */
  2321. int_pix_fmt = PIX_FMT_GRAY8;
  2322. } else if ((is_yuv_planar(src_pix) &&
  2323. src_pix_fmt != PIX_FMT_YUV444P &&
  2324. src_pix_fmt != PIX_FMT_YUVJ444P)) {
  2325. /* yuv444 is the normalized format */
  2326. if (src_pix->color_type == FF_COLOR_YUV_JPEG)
  2327. int_pix_fmt = PIX_FMT_YUVJ444P;
  2328. else
  2329. int_pix_fmt = PIX_FMT_YUV444P;
  2330. } else if ((is_yuv_planar(dst_pix) &&
  2331. dst_pix_fmt != PIX_FMT_YUV444P &&
  2332. dst_pix_fmt != PIX_FMT_YUVJ444P)) {
  2333. /* yuv444 is the normalized format */
  2334. if (dst_pix->color_type == FF_COLOR_YUV_JPEG)
  2335. int_pix_fmt = PIX_FMT_YUVJ444P;
  2336. else
  2337. int_pix_fmt = PIX_FMT_YUV444P;
  2338. } else {
  2339. /* the two formats are rgb or gray8 or yuv[j]444p */
  2340. if (src_pix->is_alpha && dst_pix->is_alpha)
  2341. int_pix_fmt = PIX_FMT_RGB32;
  2342. else
  2343. int_pix_fmt = PIX_FMT_RGB24;
  2344. }
  2345. if (avpicture_alloc(tmp, int_pix_fmt, dst_width, dst_height) < 0)
  2346. return -1;
  2347. ret = -1;
  2348. if (img_convert(tmp, int_pix_fmt,
  2349. src, src_pix_fmt, src_width, src_height) < 0)
  2350. goto fail1;
  2351. if (img_convert(dst, dst_pix_fmt,
  2352. tmp, int_pix_fmt, dst_width, dst_height) < 0)
  2353. goto fail1;
  2354. ret = 0;
  2355. fail1:
  2356. avpicture_free(tmp);
  2357. return ret;
  2358. }
  2359. #endif
  2360. /* NOTE: we scan all the pixels to have an exact information */
  2361. static int get_alpha_info_pal8(const AVPicture *src, int width, int height)
  2362. {
  2363. const unsigned char *p;
  2364. int src_wrap, ret, x, y;
  2365. unsigned int a;
  2366. uint32_t *palette = (uint32_t *)src->data[1];
  2367. p = src->data[0];
  2368. src_wrap = src->linesize[0] - width;
  2369. ret = 0;
  2370. for(y=0;y<height;y++) {
  2371. for(x=0;x<width;x++) {
  2372. a = palette[p[0]] >> 24;
  2373. if (a == 0x00) {
  2374. ret |= FF_ALPHA_TRANSP;
  2375. } else if (a != 0xff) {
  2376. ret |= FF_ALPHA_SEMI_TRANSP;
  2377. }
  2378. p++;
  2379. }
  2380. p += src_wrap;
  2381. }
  2382. return ret;
  2383. }
  2384. int img_get_alpha_info(const AVPicture *src,
  2385. int pix_fmt, int width, int height)
  2386. {
  2387. const PixFmtInfo *pf = &pix_fmt_info[pix_fmt];
  2388. int ret;
  2389. pf = &pix_fmt_info[pix_fmt];
  2390. /* no alpha can be represented in format */
  2391. if (!pf->is_alpha)
  2392. return 0;
  2393. switch(pix_fmt) {
  2394. case PIX_FMT_RGB32:
  2395. ret = get_alpha_info_rgb32(src, width, height);
  2396. break;
  2397. case PIX_FMT_PAL8:
  2398. ret = get_alpha_info_pal8(src, width, height);
  2399. break;
  2400. default:
  2401. /* we do not know, so everything is indicated */
  2402. ret = FF_ALPHA_TRANSP | FF_ALPHA_SEMI_TRANSP;
  2403. break;
  2404. }
  2405. return ret;
  2406. }
  2407. #ifdef HAVE_MMX
  2408. #define DEINT_INPLACE_LINE_LUM \
  2409. movd_m2r(lum_m4[0],mm0);\
  2410. movd_m2r(lum_m3[0],mm1);\
  2411. movd_m2r(lum_m2[0],mm2);\
  2412. movd_m2r(lum_m1[0],mm3);\
  2413. movd_m2r(lum[0],mm4);\
  2414. punpcklbw_r2r(mm7,mm0);\
  2415. movd_r2m(mm2,lum_m4[0]);\
  2416. punpcklbw_r2r(mm7,mm1);\
  2417. punpcklbw_r2r(mm7,mm2);\
  2418. punpcklbw_r2r(mm7,mm3);\
  2419. punpcklbw_r2r(mm7,mm4);\
  2420. paddw_r2r(mm3,mm1);\
  2421. psllw_i2r(1,mm2);\
  2422. paddw_r2r(mm4,mm0);\
  2423. psllw_i2r(2,mm1);\
  2424. paddw_r2r(mm6,mm2);\
  2425. paddw_r2r(mm2,mm1);\
  2426. psubusw_r2r(mm0,mm1);\
  2427. psrlw_i2r(3,mm1);\
  2428. packuswb_r2r(mm7,mm1);\
  2429. movd_r2m(mm1,lum_m2[0]);
  2430. #define DEINT_LINE_LUM \
  2431. movd_m2r(lum_m4[0],mm0);\
  2432. movd_m2r(lum_m3[0],mm1);\
  2433. movd_m2r(lum_m2[0],mm2);\
  2434. movd_m2r(lum_m1[0],mm3);\
  2435. movd_m2r(lum[0],mm4);\
  2436. punpcklbw_r2r(mm7,mm0);\
  2437. punpcklbw_r2r(mm7,mm1);\
  2438. punpcklbw_r2r(mm7,mm2);\
  2439. punpcklbw_r2r(mm7,mm3);\
  2440. punpcklbw_r2r(mm7,mm4);\
  2441. paddw_r2r(mm3,mm1);\
  2442. psllw_i2r(1,mm2);\
  2443. paddw_r2r(mm4,mm0);\
  2444. psllw_i2r(2,mm1);\
  2445. paddw_r2r(mm6,mm2);\
  2446. paddw_r2r(mm2,mm1);\
  2447. psubusw_r2r(mm0,mm1);\
  2448. psrlw_i2r(3,mm1);\
  2449. packuswb_r2r(mm7,mm1);\
  2450. movd_r2m(mm1,dst[0]);
  2451. #endif
  2452. /* filter parameters: [-1 4 2 4 -1] // 8 */
  2453. static void deinterlace_line(uint8_t *dst,
  2454. const uint8_t *lum_m4, const uint8_t *lum_m3,
  2455. const uint8_t *lum_m2, const uint8_t *lum_m1,
  2456. const uint8_t *lum,
  2457. int size)
  2458. {
  2459. #ifndef HAVE_MMX
  2460. uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
  2461. int sum;
  2462. for(;size > 0;size--) {
  2463. sum = -lum_m4[0];
  2464. sum += lum_m3[0] << 2;
  2465. sum += lum_m2[0] << 1;
  2466. sum += lum_m1[0] << 2;
  2467. sum += -lum[0];
  2468. dst[0] = cm[(sum + 4) >> 3];
  2469. lum_m4++;
  2470. lum_m3++;
  2471. lum_m2++;
  2472. lum_m1++;
  2473. lum++;
  2474. dst++;
  2475. }
  2476. #else
  2477. {
  2478. mmx_t rounder;
  2479. rounder.uw[0]=4;
  2480. rounder.uw[1]=4;
  2481. rounder.uw[2]=4;
  2482. rounder.uw[3]=4;
  2483. pxor_r2r(mm7,mm7);
  2484. movq_m2r(rounder,mm6);
  2485. }
  2486. for (;size > 3; size-=4) {
  2487. DEINT_LINE_LUM
  2488. lum_m4+=4;
  2489. lum_m3+=4;
  2490. lum_m2+=4;
  2491. lum_m1+=4;
  2492. lum+=4;
  2493. dst+=4;
  2494. }
  2495. #endif
  2496. }
  2497. static void deinterlace_line_inplace(uint8_t *lum_m4, uint8_t *lum_m3, uint8_t *lum_m2, uint8_t *lum_m1, uint8_t *lum,
  2498. int size)
  2499. {
  2500. #ifndef HAVE_MMX
  2501. uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
  2502. int sum;
  2503. for(;size > 0;size--) {
  2504. sum = -lum_m4[0];
  2505. sum += lum_m3[0] << 2;
  2506. sum += lum_m2[0] << 1;
  2507. lum_m4[0]=lum_m2[0];
  2508. sum += lum_m1[0] << 2;
  2509. sum += -lum[0];
  2510. lum_m2[0] = cm[(sum + 4) >> 3];
  2511. lum_m4++;
  2512. lum_m3++;
  2513. lum_m2++;
  2514. lum_m1++;
  2515. lum++;
  2516. }
  2517. #else
  2518. {
  2519. mmx_t rounder;
  2520. rounder.uw[0]=4;
  2521. rounder.uw[1]=4;
  2522. rounder.uw[2]=4;
  2523. rounder.uw[3]=4;
  2524. pxor_r2r(mm7,mm7);
  2525. movq_m2r(rounder,mm6);
  2526. }
  2527. for (;size > 3; size-=4) {
  2528. DEINT_INPLACE_LINE_LUM
  2529. lum_m4+=4;
  2530. lum_m3+=4;
  2531. lum_m2+=4;
  2532. lum_m1+=4;
  2533. lum+=4;
  2534. }
  2535. #endif
  2536. }
  2537. /* deinterlacing : 2 temporal taps, 3 spatial taps linear filter. The
  2538. top field is copied as is, but the bottom field is deinterlaced
  2539. against the top field. */
  2540. static void deinterlace_bottom_field(uint8_t *dst, int dst_wrap,
  2541. const uint8_t *src1, int src_wrap,
  2542. int width, int height)
  2543. {
  2544. const uint8_t *src_m2, *src_m1, *src_0, *src_p1, *src_p2;
  2545. int y;
  2546. src_m2 = src1;
  2547. src_m1 = src1;
  2548. src_0=&src_m1[src_wrap];
  2549. src_p1=&src_0[src_wrap];
  2550. src_p2=&src_p1[src_wrap];
  2551. for(y=0;y<(height-2);y+=2) {
  2552. memcpy(dst,src_m1,width);
  2553. dst += dst_wrap;
  2554. deinterlace_line(dst,src_m2,src_m1,src_0,src_p1,src_p2,width);
  2555. src_m2 = src_0;
  2556. src_m1 = src_p1;
  2557. src_0 = src_p2;
  2558. src_p1 += 2*src_wrap;
  2559. src_p2 += 2*src_wrap;
  2560. dst += dst_wrap;
  2561. }
  2562. memcpy(dst,src_m1,width);
  2563. dst += dst_wrap;
  2564. /* do last line */
  2565. deinterlace_line(dst,src_m2,src_m1,src_0,src_0,src_0,width);
  2566. }
  2567. static void deinterlace_bottom_field_inplace(uint8_t *src1, int src_wrap,
  2568. int width, int height)
  2569. {
  2570. uint8_t *src_m1, *src_0, *src_p1, *src_p2;
  2571. int y;
  2572. uint8_t *buf;
  2573. buf = (uint8_t*)av_malloc(width);
  2574. src_m1 = src1;
  2575. memcpy(buf,src_m1,width);
  2576. src_0=&src_m1[src_wrap];
  2577. src_p1=&src_0[src_wrap];
  2578. src_p2=&src_p1[src_wrap];
  2579. for(y=0;y<(height-2);y+=2) {
  2580. deinterlace_line_inplace(buf,src_m1,src_0,src_p1,src_p2,width);
  2581. src_m1 = src_p1;
  2582. src_0 = src_p2;
  2583. src_p1 += 2*src_wrap;
  2584. src_p2 += 2*src_wrap;
  2585. }
  2586. /* do last line */
  2587. deinterlace_line_inplace(buf,src_m1,src_0,src_0,src_0,width);
  2588. av_free(buf);
  2589. }
  2590. int avpicture_deinterlace(AVPicture *dst, const AVPicture *src,
  2591. int pix_fmt, int width, int height)
  2592. {
  2593. int i;
  2594. if (pix_fmt != PIX_FMT_YUV420P &&
  2595. pix_fmt != PIX_FMT_YUV422P &&
  2596. pix_fmt != PIX_FMT_YUV444P &&
  2597. pix_fmt != PIX_FMT_YUV411P)
  2598. return -1;
  2599. if ((width & 3) != 0 || (height & 3) != 0)
  2600. return -1;
  2601. for(i=0;i<3;i++) {
  2602. if (i == 1) {
  2603. switch(pix_fmt) {
  2604. case PIX_FMT_YUV420P:
  2605. width >>= 1;
  2606. height >>= 1;
  2607. break;
  2608. case PIX_FMT_YUV422P:
  2609. width >>= 1;
  2610. break;
  2611. case PIX_FMT_YUV411P:
  2612. width >>= 2;
  2613. break;
  2614. default:
  2615. break;
  2616. }
  2617. }
  2618. if (src == dst) {
  2619. deinterlace_bottom_field_inplace(dst->data[i], dst->linesize[i],
  2620. width, height);
  2621. } else {
  2622. deinterlace_bottom_field(dst->data[i],dst->linesize[i],
  2623. src->data[i], src->linesize[i],
  2624. width, height);
  2625. }
  2626. }
  2627. #ifdef HAVE_MMX
  2628. emms();
  2629. #endif
  2630. return 0;
  2631. }
  2632. #undef FIX