You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

2793 lines
75KB

  1. /*
  2. * Misc image conversion routines
  3. * Copyright (c) 2001, 2002, 2003 Fabrice Bellard.
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. /**
  22. * @file imgconvert.c
  23. * misc image conversion routines
  24. */
  25. /* TODO:
  26. * - write 'ffimg' program to test all the image related stuff
  27. * - move all api to slice based system
  28. * - integrate deinterlacing, postprocessing and scaling in the conversion process
  29. */
  30. #include "avcodec.h"
  31. #include "dsputil.h"
  32. #include "colorspace.h"
  33. #ifdef HAVE_MMX
  34. #include "i386/mmx.h"
  35. #endif
  36. #define xglue(x, y) x ## y
  37. #define glue(x, y) xglue(x, y)
  38. #define FF_COLOR_RGB 0 /**< RGB color space */
  39. #define FF_COLOR_GRAY 1 /**< gray color space */
  40. #define FF_COLOR_YUV 2 /**< YUV color space. 16 <= Y <= 235, 16 <= U, V <= 240 */
  41. #define FF_COLOR_YUV_JPEG 3 /**< YUV color space. 0 <= Y <= 255, 0 <= U, V <= 255 */
  42. #define FF_PIXEL_PLANAR 0 /**< each channel has one component in AVPicture */
  43. #define FF_PIXEL_PACKED 1 /**< only one components containing all the channels */
  44. #define FF_PIXEL_PALETTE 2 /**< one components containing indexes for a palette */
  45. typedef struct PixFmtInfo {
  46. const char *name;
  47. uint8_t nb_channels; /**< number of channels (including alpha) */
  48. uint8_t color_type; /**< color type (see FF_COLOR_xxx constants) */
  49. uint8_t pixel_type; /**< pixel storage type (see FF_PIXEL_xxx constants) */
  50. uint8_t is_alpha : 1; /**< true if alpha can be specified */
  51. uint8_t x_chroma_shift; /**< X chroma subsampling factor is 2 ^ shift */
  52. uint8_t y_chroma_shift; /**< Y chroma subsampling factor is 2 ^ shift */
  53. uint8_t depth; /**< bit depth of the color components */
  54. } PixFmtInfo;
  55. /* this table gives more information about formats */
  56. static const PixFmtInfo pix_fmt_info[PIX_FMT_NB] = {
  57. /* YUV formats */
  58. [PIX_FMT_YUV420P] = {
  59. .name = "yuv420p",
  60. .nb_channels = 3,
  61. .color_type = FF_COLOR_YUV,
  62. .pixel_type = FF_PIXEL_PLANAR,
  63. .depth = 8,
  64. .x_chroma_shift = 1, .y_chroma_shift = 1,
  65. },
  66. [PIX_FMT_YUV422P] = {
  67. .name = "yuv422p",
  68. .nb_channels = 3,
  69. .color_type = FF_COLOR_YUV,
  70. .pixel_type = FF_PIXEL_PLANAR,
  71. .depth = 8,
  72. .x_chroma_shift = 1, .y_chroma_shift = 0,
  73. },
  74. [PIX_FMT_YUV444P] = {
  75. .name = "yuv444p",
  76. .nb_channels = 3,
  77. .color_type = FF_COLOR_YUV,
  78. .pixel_type = FF_PIXEL_PLANAR,
  79. .depth = 8,
  80. .x_chroma_shift = 0, .y_chroma_shift = 0,
  81. },
  82. [PIX_FMT_YUYV422] = {
  83. .name = "yuyv422",
  84. .nb_channels = 1,
  85. .color_type = FF_COLOR_YUV,
  86. .pixel_type = FF_PIXEL_PACKED,
  87. .depth = 8,
  88. .x_chroma_shift = 1, .y_chroma_shift = 0,
  89. },
  90. [PIX_FMT_UYVY422] = {
  91. .name = "uyvy422",
  92. .nb_channels = 1,
  93. .color_type = FF_COLOR_YUV,
  94. .pixel_type = FF_PIXEL_PACKED,
  95. .depth = 8,
  96. .x_chroma_shift = 1, .y_chroma_shift = 0,
  97. },
  98. [PIX_FMT_YUV410P] = {
  99. .name = "yuv410p",
  100. .nb_channels = 3,
  101. .color_type = FF_COLOR_YUV,
  102. .pixel_type = FF_PIXEL_PLANAR,
  103. .depth = 8,
  104. .x_chroma_shift = 2, .y_chroma_shift = 2,
  105. },
  106. [PIX_FMT_YUV411P] = {
  107. .name = "yuv411p",
  108. .nb_channels = 3,
  109. .color_type = FF_COLOR_YUV,
  110. .pixel_type = FF_PIXEL_PLANAR,
  111. .depth = 8,
  112. .x_chroma_shift = 2, .y_chroma_shift = 0,
  113. },
  114. /* JPEG YUV */
  115. [PIX_FMT_YUVJ420P] = {
  116. .name = "yuvj420p",
  117. .nb_channels = 3,
  118. .color_type = FF_COLOR_YUV_JPEG,
  119. .pixel_type = FF_PIXEL_PLANAR,
  120. .depth = 8,
  121. .x_chroma_shift = 1, .y_chroma_shift = 1,
  122. },
  123. [PIX_FMT_YUVJ422P] = {
  124. .name = "yuvj422p",
  125. .nb_channels = 3,
  126. .color_type = FF_COLOR_YUV_JPEG,
  127. .pixel_type = FF_PIXEL_PLANAR,
  128. .depth = 8,
  129. .x_chroma_shift = 1, .y_chroma_shift = 0,
  130. },
  131. [PIX_FMT_YUVJ444P] = {
  132. .name = "yuvj444p",
  133. .nb_channels = 3,
  134. .color_type = FF_COLOR_YUV_JPEG,
  135. .pixel_type = FF_PIXEL_PLANAR,
  136. .depth = 8,
  137. .x_chroma_shift = 0, .y_chroma_shift = 0,
  138. },
  139. /* RGB formats */
  140. [PIX_FMT_RGB24] = {
  141. .name = "rgb24",
  142. .nb_channels = 3,
  143. .color_type = FF_COLOR_RGB,
  144. .pixel_type = FF_PIXEL_PACKED,
  145. .depth = 8,
  146. .x_chroma_shift = 0, .y_chroma_shift = 0,
  147. },
  148. [PIX_FMT_BGR24] = {
  149. .name = "bgr24",
  150. .nb_channels = 3,
  151. .color_type = FF_COLOR_RGB,
  152. .pixel_type = FF_PIXEL_PACKED,
  153. .depth = 8,
  154. .x_chroma_shift = 0, .y_chroma_shift = 0,
  155. },
  156. [PIX_FMT_RGB32] = {
  157. .name = "rgb32",
  158. .nb_channels = 4, .is_alpha = 1,
  159. .color_type = FF_COLOR_RGB,
  160. .pixel_type = FF_PIXEL_PACKED,
  161. .depth = 8,
  162. .x_chroma_shift = 0, .y_chroma_shift = 0,
  163. },
  164. [PIX_FMT_RGB565] = {
  165. .name = "rgb565",
  166. .nb_channels = 3,
  167. .color_type = FF_COLOR_RGB,
  168. .pixel_type = FF_PIXEL_PACKED,
  169. .depth = 5,
  170. .x_chroma_shift = 0, .y_chroma_shift = 0,
  171. },
  172. [PIX_FMT_RGB555] = {
  173. .name = "rgb555",
  174. .nb_channels = 3,
  175. .color_type = FF_COLOR_RGB,
  176. .pixel_type = FF_PIXEL_PACKED,
  177. .depth = 5,
  178. .x_chroma_shift = 0, .y_chroma_shift = 0,
  179. },
  180. /* gray / mono formats */
  181. [PIX_FMT_GRAY16BE] = {
  182. .name = "gray16be",
  183. .nb_channels = 1,
  184. .color_type = FF_COLOR_GRAY,
  185. .pixel_type = FF_PIXEL_PLANAR,
  186. .depth = 16,
  187. },
  188. [PIX_FMT_GRAY16LE] = {
  189. .name = "gray16le",
  190. .nb_channels = 1,
  191. .color_type = FF_COLOR_GRAY,
  192. .pixel_type = FF_PIXEL_PLANAR,
  193. .depth = 16,
  194. },
  195. [PIX_FMT_GRAY8] = {
  196. .name = "gray",
  197. .nb_channels = 1,
  198. .color_type = FF_COLOR_GRAY,
  199. .pixel_type = FF_PIXEL_PLANAR,
  200. .depth = 8,
  201. },
  202. [PIX_FMT_MONOWHITE] = {
  203. .name = "monow",
  204. .nb_channels = 1,
  205. .color_type = FF_COLOR_GRAY,
  206. .pixel_type = FF_PIXEL_PLANAR,
  207. .depth = 1,
  208. },
  209. [PIX_FMT_MONOBLACK] = {
  210. .name = "monob",
  211. .nb_channels = 1,
  212. .color_type = FF_COLOR_GRAY,
  213. .pixel_type = FF_PIXEL_PLANAR,
  214. .depth = 1,
  215. },
  216. /* paletted formats */
  217. [PIX_FMT_PAL8] = {
  218. .name = "pal8",
  219. .nb_channels = 4, .is_alpha = 1,
  220. .color_type = FF_COLOR_RGB,
  221. .pixel_type = FF_PIXEL_PALETTE,
  222. .depth = 8,
  223. },
  224. [PIX_FMT_XVMC_MPEG2_MC] = {
  225. .name = "xvmcmc",
  226. },
  227. [PIX_FMT_XVMC_MPEG2_IDCT] = {
  228. .name = "xvmcidct",
  229. },
  230. [PIX_FMT_UYYVYY411] = {
  231. .name = "uyyvyy411",
  232. .nb_channels = 1,
  233. .color_type = FF_COLOR_YUV,
  234. .pixel_type = FF_PIXEL_PACKED,
  235. .depth = 8,
  236. .x_chroma_shift = 2, .y_chroma_shift = 0,
  237. },
  238. [PIX_FMT_BGR32] = {
  239. .name = "bgr32",
  240. .nb_channels = 4, .is_alpha = 1,
  241. .color_type = FF_COLOR_RGB,
  242. .pixel_type = FF_PIXEL_PACKED,
  243. .depth = 8,
  244. .x_chroma_shift = 0, .y_chroma_shift = 0,
  245. },
  246. [PIX_FMT_BGR565] = {
  247. .name = "bgr565",
  248. .nb_channels = 3,
  249. .color_type = FF_COLOR_RGB,
  250. .pixel_type = FF_PIXEL_PACKED,
  251. .depth = 5,
  252. .x_chroma_shift = 0, .y_chroma_shift = 0,
  253. },
  254. [PIX_FMT_BGR555] = {
  255. .name = "bgr555",
  256. .nb_channels = 3,
  257. .color_type = FF_COLOR_RGB,
  258. .pixel_type = FF_PIXEL_PACKED,
  259. .depth = 5,
  260. .x_chroma_shift = 0, .y_chroma_shift = 0,
  261. },
  262. [PIX_FMT_RGB8] = {
  263. .name = "rgb8",
  264. .nb_channels = 1,
  265. .color_type = FF_COLOR_RGB,
  266. .pixel_type = FF_PIXEL_PACKED,
  267. .depth = 8,
  268. .x_chroma_shift = 0, .y_chroma_shift = 0,
  269. },
  270. [PIX_FMT_RGB4] = {
  271. .name = "rgb4",
  272. .nb_channels = 1,
  273. .color_type = FF_COLOR_RGB,
  274. .pixel_type = FF_PIXEL_PACKED,
  275. .depth = 4,
  276. .x_chroma_shift = 0, .y_chroma_shift = 0,
  277. },
  278. [PIX_FMT_RGB4_BYTE] = {
  279. .name = "rgb4_byte",
  280. .nb_channels = 1,
  281. .color_type = FF_COLOR_RGB,
  282. .pixel_type = FF_PIXEL_PACKED,
  283. .depth = 8,
  284. .x_chroma_shift = 0, .y_chroma_shift = 0,
  285. },
  286. [PIX_FMT_BGR8] = {
  287. .name = "bgr8",
  288. .nb_channels = 1,
  289. .color_type = FF_COLOR_RGB,
  290. .pixel_type = FF_PIXEL_PACKED,
  291. .depth = 8,
  292. .x_chroma_shift = 0, .y_chroma_shift = 0,
  293. },
  294. [PIX_FMT_BGR4] = {
  295. .name = "bgr4",
  296. .nb_channels = 1,
  297. .color_type = FF_COLOR_RGB,
  298. .pixel_type = FF_PIXEL_PACKED,
  299. .depth = 4,
  300. .x_chroma_shift = 0, .y_chroma_shift = 0,
  301. },
  302. [PIX_FMT_BGR4_BYTE] = {
  303. .name = "bgr4_byte",
  304. .nb_channels = 1,
  305. .color_type = FF_COLOR_RGB,
  306. .pixel_type = FF_PIXEL_PACKED,
  307. .depth = 8,
  308. .x_chroma_shift = 0, .y_chroma_shift = 0,
  309. },
  310. [PIX_FMT_NV12] = {
  311. .name = "nv12",
  312. .nb_channels = 2,
  313. .color_type = FF_COLOR_YUV,
  314. .pixel_type = FF_PIXEL_PLANAR,
  315. .depth = 8,
  316. .x_chroma_shift = 1, .y_chroma_shift = 1,
  317. },
  318. [PIX_FMT_NV21] = {
  319. .name = "nv12",
  320. .nb_channels = 2,
  321. .color_type = FF_COLOR_YUV,
  322. .pixel_type = FF_PIXEL_PLANAR,
  323. .depth = 8,
  324. .x_chroma_shift = 1, .y_chroma_shift = 1,
  325. },
  326. [PIX_FMT_BGR32_1] = {
  327. .name = "bgr32_1",
  328. .nb_channels = 4, .is_alpha = 1,
  329. .color_type = FF_COLOR_RGB,
  330. .pixel_type = FF_PIXEL_PACKED,
  331. .depth = 8,
  332. .x_chroma_shift = 0, .y_chroma_shift = 0,
  333. },
  334. [PIX_FMT_RGB32_1] = {
  335. .name = "rgb32_1",
  336. .nb_channels = 4, .is_alpha = 1,
  337. .color_type = FF_COLOR_RGB,
  338. .pixel_type = FF_PIXEL_PACKED,
  339. .depth = 8,
  340. .x_chroma_shift = 0, .y_chroma_shift = 0,
  341. },
  342. };
  343. void avcodec_get_chroma_sub_sample(int pix_fmt, int *h_shift, int *v_shift)
  344. {
  345. *h_shift = pix_fmt_info[pix_fmt].x_chroma_shift;
  346. *v_shift = pix_fmt_info[pix_fmt].y_chroma_shift;
  347. }
  348. const char *avcodec_get_pix_fmt_name(int pix_fmt)
  349. {
  350. if (pix_fmt < 0 || pix_fmt >= PIX_FMT_NB)
  351. return "???";
  352. else
  353. return pix_fmt_info[pix_fmt].name;
  354. }
  355. enum PixelFormat avcodec_get_pix_fmt(const char* name)
  356. {
  357. int i;
  358. for (i=0; i < PIX_FMT_NB; i++)
  359. if (!strcmp(pix_fmt_info[i].name, name))
  360. break;
  361. return i;
  362. }
  363. void avcodec_pix_fmt_string (char *buf, int buf_size, int pix_fmt)
  364. {
  365. PixFmtInfo info= pix_fmt_info[pix_fmt];
  366. char is_alpha_char= info.is_alpha ? 'y' : 'n';
  367. /* print header */
  368. if (pix_fmt < 0)
  369. snprintf (buf, buf_size,
  370. "name " " nb_channels" " depth" " is_alpha"
  371. );
  372. else
  373. snprintf (buf, buf_size,
  374. "%-10s" " %1d " " %2d " " %c ",
  375. info.name,
  376. info.nb_channels,
  377. info.depth,
  378. is_alpha_char
  379. );
  380. }
  381. int avpicture_fill(AVPicture *picture, uint8_t *ptr,
  382. int pix_fmt, int width, int height)
  383. {
  384. int size, w2, h2, size2;
  385. const PixFmtInfo *pinfo;
  386. if(avcodec_check_dimensions(NULL, width, height))
  387. goto fail;
  388. pinfo = &pix_fmt_info[pix_fmt];
  389. size = width * height;
  390. switch(pix_fmt) {
  391. case PIX_FMT_YUV420P:
  392. case PIX_FMT_YUV422P:
  393. case PIX_FMT_YUV444P:
  394. case PIX_FMT_YUV410P:
  395. case PIX_FMT_YUV411P:
  396. case PIX_FMT_YUVJ420P:
  397. case PIX_FMT_YUVJ422P:
  398. case PIX_FMT_YUVJ444P:
  399. w2 = (width + (1 << pinfo->x_chroma_shift) - 1) >> pinfo->x_chroma_shift;
  400. h2 = (height + (1 << pinfo->y_chroma_shift) - 1) >> pinfo->y_chroma_shift;
  401. size2 = w2 * h2;
  402. picture->data[0] = ptr;
  403. picture->data[1] = picture->data[0] + size;
  404. picture->data[2] = picture->data[1] + size2;
  405. picture->linesize[0] = width;
  406. picture->linesize[1] = w2;
  407. picture->linesize[2] = w2;
  408. return size + 2 * size2;
  409. case PIX_FMT_NV12:
  410. case PIX_FMT_NV21:
  411. w2 = (width + (1 << pinfo->x_chroma_shift) - 1) >> pinfo->x_chroma_shift;
  412. h2 = (height + (1 << pinfo->y_chroma_shift) - 1) >> pinfo->y_chroma_shift;
  413. size2 = w2 * h2 * 2;
  414. picture->data[0] = ptr;
  415. picture->data[1] = picture->data[0] + size;
  416. picture->data[2] = NULL;
  417. picture->linesize[0] = width;
  418. picture->linesize[1] = w2;
  419. picture->linesize[2] = 0;
  420. return size + 2 * size2;
  421. case PIX_FMT_RGB24:
  422. case PIX_FMT_BGR24:
  423. picture->data[0] = ptr;
  424. picture->data[1] = NULL;
  425. picture->data[2] = NULL;
  426. picture->linesize[0] = width * 3;
  427. return size * 3;
  428. case PIX_FMT_RGB32:
  429. case PIX_FMT_BGR32:
  430. case PIX_FMT_RGB32_1:
  431. case PIX_FMT_BGR32_1:
  432. picture->data[0] = ptr;
  433. picture->data[1] = NULL;
  434. picture->data[2] = NULL;
  435. picture->linesize[0] = width * 4;
  436. return size * 4;
  437. case PIX_FMT_GRAY16BE:
  438. case PIX_FMT_GRAY16LE:
  439. case PIX_FMT_BGR555:
  440. case PIX_FMT_BGR565:
  441. case PIX_FMT_RGB555:
  442. case PIX_FMT_RGB565:
  443. case PIX_FMT_YUYV422:
  444. picture->data[0] = ptr;
  445. picture->data[1] = NULL;
  446. picture->data[2] = NULL;
  447. picture->linesize[0] = width * 2;
  448. return size * 2;
  449. case PIX_FMT_UYVY422:
  450. picture->data[0] = ptr;
  451. picture->data[1] = NULL;
  452. picture->data[2] = NULL;
  453. picture->linesize[0] = width * 2;
  454. return size * 2;
  455. case PIX_FMT_UYYVYY411:
  456. picture->data[0] = ptr;
  457. picture->data[1] = NULL;
  458. picture->data[2] = NULL;
  459. picture->linesize[0] = width + width/2;
  460. return size + size/2;
  461. case PIX_FMT_RGB8:
  462. case PIX_FMT_BGR8:
  463. case PIX_FMT_RGB4_BYTE:
  464. case PIX_FMT_BGR4_BYTE:
  465. case PIX_FMT_GRAY8:
  466. picture->data[0] = ptr;
  467. picture->data[1] = NULL;
  468. picture->data[2] = NULL;
  469. picture->linesize[0] = width;
  470. return size;
  471. case PIX_FMT_RGB4:
  472. case PIX_FMT_BGR4:
  473. picture->data[0] = ptr;
  474. picture->data[1] = NULL;
  475. picture->data[2] = NULL;
  476. picture->linesize[0] = width / 2;
  477. return size / 2;
  478. case PIX_FMT_MONOWHITE:
  479. case PIX_FMT_MONOBLACK:
  480. picture->data[0] = ptr;
  481. picture->data[1] = NULL;
  482. picture->data[2] = NULL;
  483. picture->linesize[0] = (width + 7) >> 3;
  484. return picture->linesize[0] * height;
  485. case PIX_FMT_PAL8:
  486. size2 = (size + 3) & ~3;
  487. picture->data[0] = ptr;
  488. picture->data[1] = ptr + size2; /* palette is stored here as 256 32 bit words */
  489. picture->data[2] = NULL;
  490. picture->linesize[0] = width;
  491. picture->linesize[1] = 4;
  492. return size2 + 256 * 4;
  493. default:
  494. fail:
  495. picture->data[0] = NULL;
  496. picture->data[1] = NULL;
  497. picture->data[2] = NULL;
  498. picture->data[3] = NULL;
  499. return -1;
  500. }
  501. }
  502. int avpicture_layout(const AVPicture* src, int pix_fmt, int width, int height,
  503. unsigned char *dest, int dest_size)
  504. {
  505. const PixFmtInfo* pf = &pix_fmt_info[pix_fmt];
  506. int i, j, w, h, data_planes;
  507. const unsigned char* s;
  508. int size = avpicture_get_size(pix_fmt, width, height);
  509. if (size > dest_size || size < 0)
  510. return -1;
  511. if (pf->pixel_type == FF_PIXEL_PACKED || pf->pixel_type == FF_PIXEL_PALETTE) {
  512. if (pix_fmt == PIX_FMT_YUYV422 ||
  513. pix_fmt == PIX_FMT_UYVY422 ||
  514. pix_fmt == PIX_FMT_BGR565 ||
  515. pix_fmt == PIX_FMT_BGR555 ||
  516. pix_fmt == PIX_FMT_RGB565 ||
  517. pix_fmt == PIX_FMT_RGB555)
  518. w = width * 2;
  519. else if (pix_fmt == PIX_FMT_UYYVYY411)
  520. w = width + width/2;
  521. else if (pix_fmt == PIX_FMT_PAL8)
  522. w = width;
  523. else
  524. w = width * (pf->depth * pf->nb_channels / 8);
  525. data_planes = 1;
  526. h = height;
  527. } else {
  528. data_planes = pf->nb_channels;
  529. w = (width*pf->depth + 7)/8;
  530. h = height;
  531. }
  532. for (i=0; i<data_planes; i++) {
  533. if (i == 1) {
  534. w = width >> pf->x_chroma_shift;
  535. h = height >> pf->y_chroma_shift;
  536. }
  537. s = src->data[i];
  538. for(j=0; j<h; j++) {
  539. memcpy(dest, s, w);
  540. dest += w;
  541. s += src->linesize[i];
  542. }
  543. }
  544. if (pf->pixel_type == FF_PIXEL_PALETTE)
  545. memcpy((unsigned char *)(((size_t)dest + 3) & ~3), src->data[1], 256 * 4);
  546. return size;
  547. }
  548. int avpicture_get_size(int pix_fmt, int width, int height)
  549. {
  550. AVPicture dummy_pict;
  551. return avpicture_fill(&dummy_pict, NULL, pix_fmt, width, height);
  552. }
  553. int avcodec_get_pix_fmt_loss(int dst_pix_fmt, int src_pix_fmt,
  554. int has_alpha)
  555. {
  556. const PixFmtInfo *pf, *ps;
  557. int loss;
  558. ps = &pix_fmt_info[src_pix_fmt];
  559. pf = &pix_fmt_info[dst_pix_fmt];
  560. /* compute loss */
  561. loss = 0;
  562. pf = &pix_fmt_info[dst_pix_fmt];
  563. if (pf->depth < ps->depth ||
  564. (dst_pix_fmt == PIX_FMT_RGB555 && src_pix_fmt == PIX_FMT_RGB565))
  565. loss |= FF_LOSS_DEPTH;
  566. if (pf->x_chroma_shift > ps->x_chroma_shift ||
  567. pf->y_chroma_shift > ps->y_chroma_shift)
  568. loss |= FF_LOSS_RESOLUTION;
  569. switch(pf->color_type) {
  570. case FF_COLOR_RGB:
  571. if (ps->color_type != FF_COLOR_RGB &&
  572. ps->color_type != FF_COLOR_GRAY)
  573. loss |= FF_LOSS_COLORSPACE;
  574. break;
  575. case FF_COLOR_GRAY:
  576. if (ps->color_type != FF_COLOR_GRAY)
  577. loss |= FF_LOSS_COLORSPACE;
  578. break;
  579. case FF_COLOR_YUV:
  580. if (ps->color_type != FF_COLOR_YUV)
  581. loss |= FF_LOSS_COLORSPACE;
  582. break;
  583. case FF_COLOR_YUV_JPEG:
  584. if (ps->color_type != FF_COLOR_YUV_JPEG &&
  585. ps->color_type != FF_COLOR_YUV &&
  586. ps->color_type != FF_COLOR_GRAY)
  587. loss |= FF_LOSS_COLORSPACE;
  588. break;
  589. default:
  590. /* fail safe test */
  591. if (ps->color_type != pf->color_type)
  592. loss |= FF_LOSS_COLORSPACE;
  593. break;
  594. }
  595. if (pf->color_type == FF_COLOR_GRAY &&
  596. ps->color_type != FF_COLOR_GRAY)
  597. loss |= FF_LOSS_CHROMA;
  598. if (!pf->is_alpha && (ps->is_alpha && has_alpha))
  599. loss |= FF_LOSS_ALPHA;
  600. if (pf->pixel_type == FF_PIXEL_PALETTE &&
  601. (ps->pixel_type != FF_PIXEL_PALETTE && ps->color_type != FF_COLOR_GRAY))
  602. loss |= FF_LOSS_COLORQUANT;
  603. return loss;
  604. }
  605. static int avg_bits_per_pixel(int pix_fmt)
  606. {
  607. int bits;
  608. const PixFmtInfo *pf;
  609. pf = &pix_fmt_info[pix_fmt];
  610. switch(pf->pixel_type) {
  611. case FF_PIXEL_PACKED:
  612. switch(pix_fmt) {
  613. case PIX_FMT_YUYV422:
  614. case PIX_FMT_UYVY422:
  615. case PIX_FMT_RGB565:
  616. case PIX_FMT_RGB555:
  617. case PIX_FMT_BGR565:
  618. case PIX_FMT_BGR555:
  619. bits = 16;
  620. break;
  621. case PIX_FMT_UYYVYY411:
  622. bits = 12;
  623. break;
  624. default:
  625. bits = pf->depth * pf->nb_channels;
  626. break;
  627. }
  628. break;
  629. case FF_PIXEL_PLANAR:
  630. if (pf->x_chroma_shift == 0 && pf->y_chroma_shift == 0) {
  631. bits = pf->depth * pf->nb_channels;
  632. } else {
  633. bits = pf->depth + ((2 * pf->depth) >>
  634. (pf->x_chroma_shift + pf->y_chroma_shift));
  635. }
  636. break;
  637. case FF_PIXEL_PALETTE:
  638. bits = 8;
  639. break;
  640. default:
  641. bits = -1;
  642. break;
  643. }
  644. return bits;
  645. }
  646. static int avcodec_find_best_pix_fmt1(int pix_fmt_mask,
  647. int src_pix_fmt,
  648. int has_alpha,
  649. int loss_mask)
  650. {
  651. int dist, i, loss, min_dist, dst_pix_fmt;
  652. /* find exact color match with smallest size */
  653. dst_pix_fmt = -1;
  654. min_dist = 0x7fffffff;
  655. for(i = 0;i < PIX_FMT_NB; i++) {
  656. if (pix_fmt_mask & (1 << i)) {
  657. loss = avcodec_get_pix_fmt_loss(i, src_pix_fmt, has_alpha) & loss_mask;
  658. if (loss == 0) {
  659. dist = avg_bits_per_pixel(i);
  660. if (dist < min_dist) {
  661. min_dist = dist;
  662. dst_pix_fmt = i;
  663. }
  664. }
  665. }
  666. }
  667. return dst_pix_fmt;
  668. }
  669. int avcodec_find_best_pix_fmt(int pix_fmt_mask, int src_pix_fmt,
  670. int has_alpha, int *loss_ptr)
  671. {
  672. int dst_pix_fmt, loss_mask, i;
  673. static const int loss_mask_order[] = {
  674. ~0, /* no loss first */
  675. ~FF_LOSS_ALPHA,
  676. ~FF_LOSS_RESOLUTION,
  677. ~(FF_LOSS_COLORSPACE | FF_LOSS_RESOLUTION),
  678. ~FF_LOSS_COLORQUANT,
  679. ~FF_LOSS_DEPTH,
  680. 0,
  681. };
  682. /* try with successive loss */
  683. i = 0;
  684. for(;;) {
  685. loss_mask = loss_mask_order[i++];
  686. dst_pix_fmt = avcodec_find_best_pix_fmt1(pix_fmt_mask, src_pix_fmt,
  687. has_alpha, loss_mask);
  688. if (dst_pix_fmt >= 0)
  689. goto found;
  690. if (loss_mask == 0)
  691. break;
  692. }
  693. return -1;
  694. found:
  695. if (loss_ptr)
  696. *loss_ptr = avcodec_get_pix_fmt_loss(dst_pix_fmt, src_pix_fmt, has_alpha);
  697. return dst_pix_fmt;
  698. }
  699. void ff_img_copy_plane(uint8_t *dst, int dst_wrap,
  700. const uint8_t *src, int src_wrap,
  701. int width, int height)
  702. {
  703. if((!dst) || (!src))
  704. return;
  705. for(;height > 0; height--) {
  706. memcpy(dst, src, width);
  707. dst += dst_wrap;
  708. src += src_wrap;
  709. }
  710. }
  711. void av_picture_copy(AVPicture *dst, const AVPicture *src,
  712. int pix_fmt, int width, int height)
  713. {
  714. int bwidth, bits, i;
  715. const PixFmtInfo *pf = &pix_fmt_info[pix_fmt];
  716. pf = &pix_fmt_info[pix_fmt];
  717. switch(pf->pixel_type) {
  718. case FF_PIXEL_PACKED:
  719. switch(pix_fmt) {
  720. case PIX_FMT_YUYV422:
  721. case PIX_FMT_UYVY422:
  722. case PIX_FMT_RGB565:
  723. case PIX_FMT_RGB555:
  724. case PIX_FMT_BGR565:
  725. case PIX_FMT_BGR555:
  726. bits = 16;
  727. break;
  728. case PIX_FMT_UYYVYY411:
  729. bits = 12;
  730. break;
  731. default:
  732. bits = pf->depth * pf->nb_channels;
  733. break;
  734. }
  735. bwidth = (width * bits + 7) >> 3;
  736. ff_img_copy_plane(dst->data[0], dst->linesize[0],
  737. src->data[0], src->linesize[0],
  738. bwidth, height);
  739. break;
  740. case FF_PIXEL_PLANAR:
  741. for(i = 0; i < pf->nb_channels; i++) {
  742. int w, h;
  743. w = width;
  744. h = height;
  745. if (i == 1 || i == 2) {
  746. w >>= pf->x_chroma_shift;
  747. h >>= pf->y_chroma_shift;
  748. }
  749. bwidth = (w * pf->depth + 7) >> 3;
  750. ff_img_copy_plane(dst->data[i], dst->linesize[i],
  751. src->data[i], src->linesize[i],
  752. bwidth, h);
  753. }
  754. break;
  755. case FF_PIXEL_PALETTE:
  756. ff_img_copy_plane(dst->data[0], dst->linesize[0],
  757. src->data[0], src->linesize[0],
  758. width, height);
  759. /* copy the palette */
  760. ff_img_copy_plane(dst->data[1], dst->linesize[1],
  761. src->data[1], src->linesize[1],
  762. 4, 256);
  763. break;
  764. }
  765. }
  766. /* XXX: totally non optimized */
  767. static void yuyv422_to_yuv420p(AVPicture *dst, const AVPicture *src,
  768. int width, int height)
  769. {
  770. const uint8_t *p, *p1;
  771. uint8_t *lum, *cr, *cb, *lum1, *cr1, *cb1;
  772. int w;
  773. p1 = src->data[0];
  774. lum1 = dst->data[0];
  775. cb1 = dst->data[1];
  776. cr1 = dst->data[2];
  777. for(;height >= 1; height -= 2) {
  778. p = p1;
  779. lum = lum1;
  780. cb = cb1;
  781. cr = cr1;
  782. for(w = width; w >= 2; w -= 2) {
  783. lum[0] = p[0];
  784. cb[0] = p[1];
  785. lum[1] = p[2];
  786. cr[0] = p[3];
  787. p += 4;
  788. lum += 2;
  789. cb++;
  790. cr++;
  791. }
  792. if (w) {
  793. lum[0] = p[0];
  794. cb[0] = p[1];
  795. cr[0] = p[3];
  796. cb++;
  797. cr++;
  798. }
  799. p1 += src->linesize[0];
  800. lum1 += dst->linesize[0];
  801. if (height>1) {
  802. p = p1;
  803. lum = lum1;
  804. for(w = width; w >= 2; w -= 2) {
  805. lum[0] = p[0];
  806. lum[1] = p[2];
  807. p += 4;
  808. lum += 2;
  809. }
  810. if (w) {
  811. lum[0] = p[0];
  812. }
  813. p1 += src->linesize[0];
  814. lum1 += dst->linesize[0];
  815. }
  816. cb1 += dst->linesize[1];
  817. cr1 += dst->linesize[2];
  818. }
  819. }
  820. static void uyvy422_to_yuv420p(AVPicture *dst, const AVPicture *src,
  821. int width, int height)
  822. {
  823. const uint8_t *p, *p1;
  824. uint8_t *lum, *cr, *cb, *lum1, *cr1, *cb1;
  825. int w;
  826. p1 = src->data[0];
  827. lum1 = dst->data[0];
  828. cb1 = dst->data[1];
  829. cr1 = dst->data[2];
  830. for(;height >= 1; height -= 2) {
  831. p = p1;
  832. lum = lum1;
  833. cb = cb1;
  834. cr = cr1;
  835. for(w = width; w >= 2; w -= 2) {
  836. lum[0] = p[1];
  837. cb[0] = p[0];
  838. lum[1] = p[3];
  839. cr[0] = p[2];
  840. p += 4;
  841. lum += 2;
  842. cb++;
  843. cr++;
  844. }
  845. if (w) {
  846. lum[0] = p[1];
  847. cb[0] = p[0];
  848. cr[0] = p[2];
  849. cb++;
  850. cr++;
  851. }
  852. p1 += src->linesize[0];
  853. lum1 += dst->linesize[0];
  854. if (height>1) {
  855. p = p1;
  856. lum = lum1;
  857. for(w = width; w >= 2; w -= 2) {
  858. lum[0] = p[1];
  859. lum[1] = p[3];
  860. p += 4;
  861. lum += 2;
  862. }
  863. if (w) {
  864. lum[0] = p[1];
  865. }
  866. p1 += src->linesize[0];
  867. lum1 += dst->linesize[0];
  868. }
  869. cb1 += dst->linesize[1];
  870. cr1 += dst->linesize[2];
  871. }
  872. }
  873. static void uyvy422_to_yuv422p(AVPicture *dst, const AVPicture *src,
  874. int width, int height)
  875. {
  876. const uint8_t *p, *p1;
  877. uint8_t *lum, *cr, *cb, *lum1, *cr1, *cb1;
  878. int w;
  879. p1 = src->data[0];
  880. lum1 = dst->data[0];
  881. cb1 = dst->data[1];
  882. cr1 = dst->data[2];
  883. for(;height > 0; height--) {
  884. p = p1;
  885. lum = lum1;
  886. cb = cb1;
  887. cr = cr1;
  888. for(w = width; w >= 2; w -= 2) {
  889. lum[0] = p[1];
  890. cb[0] = p[0];
  891. lum[1] = p[3];
  892. cr[0] = p[2];
  893. p += 4;
  894. lum += 2;
  895. cb++;
  896. cr++;
  897. }
  898. p1 += src->linesize[0];
  899. lum1 += dst->linesize[0];
  900. cb1 += dst->linesize[1];
  901. cr1 += dst->linesize[2];
  902. }
  903. }
  904. static void yuyv422_to_yuv422p(AVPicture *dst, const AVPicture *src,
  905. int width, int height)
  906. {
  907. const uint8_t *p, *p1;
  908. uint8_t *lum, *cr, *cb, *lum1, *cr1, *cb1;
  909. int w;
  910. p1 = src->data[0];
  911. lum1 = dst->data[0];
  912. cb1 = dst->data[1];
  913. cr1 = dst->data[2];
  914. for(;height > 0; height--) {
  915. p = p1;
  916. lum = lum1;
  917. cb = cb1;
  918. cr = cr1;
  919. for(w = width; w >= 2; w -= 2) {
  920. lum[0] = p[0];
  921. cb[0] = p[1];
  922. lum[1] = p[2];
  923. cr[0] = p[3];
  924. p += 4;
  925. lum += 2;
  926. cb++;
  927. cr++;
  928. }
  929. p1 += src->linesize[0];
  930. lum1 += dst->linesize[0];
  931. cb1 += dst->linesize[1];
  932. cr1 += dst->linesize[2];
  933. }
  934. }
  935. static void yuv422p_to_yuyv422(AVPicture *dst, const AVPicture *src,
  936. int width, int height)
  937. {
  938. uint8_t *p, *p1;
  939. const uint8_t *lum, *cr, *cb, *lum1, *cr1, *cb1;
  940. int w;
  941. p1 = dst->data[0];
  942. lum1 = src->data[0];
  943. cb1 = src->data[1];
  944. cr1 = src->data[2];
  945. for(;height > 0; height--) {
  946. p = p1;
  947. lum = lum1;
  948. cb = cb1;
  949. cr = cr1;
  950. for(w = width; w >= 2; w -= 2) {
  951. p[0] = lum[0];
  952. p[1] = cb[0];
  953. p[2] = lum[1];
  954. p[3] = cr[0];
  955. p += 4;
  956. lum += 2;
  957. cb++;
  958. cr++;
  959. }
  960. p1 += dst->linesize[0];
  961. lum1 += src->linesize[0];
  962. cb1 += src->linesize[1];
  963. cr1 += src->linesize[2];
  964. }
  965. }
  966. static void yuv422p_to_uyvy422(AVPicture *dst, const AVPicture *src,
  967. int width, int height)
  968. {
  969. uint8_t *p, *p1;
  970. const uint8_t *lum, *cr, *cb, *lum1, *cr1, *cb1;
  971. int w;
  972. p1 = dst->data[0];
  973. lum1 = src->data[0];
  974. cb1 = src->data[1];
  975. cr1 = src->data[2];
  976. for(;height > 0; height--) {
  977. p = p1;
  978. lum = lum1;
  979. cb = cb1;
  980. cr = cr1;
  981. for(w = width; w >= 2; w -= 2) {
  982. p[1] = lum[0];
  983. p[0] = cb[0];
  984. p[3] = lum[1];
  985. p[2] = cr[0];
  986. p += 4;
  987. lum += 2;
  988. cb++;
  989. cr++;
  990. }
  991. p1 += dst->linesize[0];
  992. lum1 += src->linesize[0];
  993. cb1 += src->linesize[1];
  994. cr1 += src->linesize[2];
  995. }
  996. }
  997. static void uyyvyy411_to_yuv411p(AVPicture *dst, const AVPicture *src,
  998. int width, int height)
  999. {
  1000. const uint8_t *p, *p1;
  1001. uint8_t *lum, *cr, *cb, *lum1, *cr1, *cb1;
  1002. int w;
  1003. p1 = src->data[0];
  1004. lum1 = dst->data[0];
  1005. cb1 = dst->data[1];
  1006. cr1 = dst->data[2];
  1007. for(;height > 0; height--) {
  1008. p = p1;
  1009. lum = lum1;
  1010. cb = cb1;
  1011. cr = cr1;
  1012. for(w = width; w >= 4; w -= 4) {
  1013. cb[0] = p[0];
  1014. lum[0] = p[1];
  1015. lum[1] = p[2];
  1016. cr[0] = p[3];
  1017. lum[2] = p[4];
  1018. lum[3] = p[5];
  1019. p += 6;
  1020. lum += 4;
  1021. cb++;
  1022. cr++;
  1023. }
  1024. p1 += src->linesize[0];
  1025. lum1 += dst->linesize[0];
  1026. cb1 += dst->linesize[1];
  1027. cr1 += dst->linesize[2];
  1028. }
  1029. }
  1030. static void yuv420p_to_yuyv422(AVPicture *dst, const AVPicture *src,
  1031. int width, int height)
  1032. {
  1033. int w, h;
  1034. uint8_t *line1, *line2, *linesrc = dst->data[0];
  1035. uint8_t *lum1, *lum2, *lumsrc = src->data[0];
  1036. uint8_t *cb1, *cb2 = src->data[1];
  1037. uint8_t *cr1, *cr2 = src->data[2];
  1038. for(h = height / 2; h--;) {
  1039. line1 = linesrc;
  1040. line2 = linesrc + dst->linesize[0];
  1041. lum1 = lumsrc;
  1042. lum2 = lumsrc + src->linesize[0];
  1043. cb1 = cb2;
  1044. cr1 = cr2;
  1045. for(w = width / 2; w--;) {
  1046. *line1++ = *lum1++; *line2++ = *lum2++;
  1047. *line1++ = *line2++ = *cb1++;
  1048. *line1++ = *lum1++; *line2++ = *lum2++;
  1049. *line1++ = *line2++ = *cr1++;
  1050. }
  1051. linesrc += dst->linesize[0] * 2;
  1052. lumsrc += src->linesize[0] * 2;
  1053. cb2 += src->linesize[1];
  1054. cr2 += src->linesize[2];
  1055. }
  1056. }
  1057. static void yuv420p_to_uyvy422(AVPicture *dst, const AVPicture *src,
  1058. int width, int height)
  1059. {
  1060. int w, h;
  1061. uint8_t *line1, *line2, *linesrc = dst->data[0];
  1062. uint8_t *lum1, *lum2, *lumsrc = src->data[0];
  1063. uint8_t *cb1, *cb2 = src->data[1];
  1064. uint8_t *cr1, *cr2 = src->data[2];
  1065. for(h = height / 2; h--;) {
  1066. line1 = linesrc;
  1067. line2 = linesrc + dst->linesize[0];
  1068. lum1 = lumsrc;
  1069. lum2 = lumsrc + src->linesize[0];
  1070. cb1 = cb2;
  1071. cr1 = cr2;
  1072. for(w = width / 2; w--;) {
  1073. *line1++ = *line2++ = *cb1++;
  1074. *line1++ = *lum1++; *line2++ = *lum2++;
  1075. *line1++ = *line2++ = *cr1++;
  1076. *line1++ = *lum1++; *line2++ = *lum2++;
  1077. }
  1078. linesrc += dst->linesize[0] * 2;
  1079. lumsrc += src->linesize[0] * 2;
  1080. cb2 += src->linesize[1];
  1081. cr2 += src->linesize[2];
  1082. }
  1083. }
  1084. static uint8_t y_ccir_to_jpeg[256];
  1085. static uint8_t y_jpeg_to_ccir[256];
  1086. static uint8_t c_ccir_to_jpeg[256];
  1087. static uint8_t c_jpeg_to_ccir[256];
  1088. /* init various conversion tables */
  1089. static void img_convert_init(void)
  1090. {
  1091. int i;
  1092. uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
  1093. for(i = 0;i < 256; i++) {
  1094. y_ccir_to_jpeg[i] = Y_CCIR_TO_JPEG(i);
  1095. y_jpeg_to_ccir[i] = Y_JPEG_TO_CCIR(i);
  1096. c_ccir_to_jpeg[i] = C_CCIR_TO_JPEG(i);
  1097. c_jpeg_to_ccir[i] = C_JPEG_TO_CCIR(i);
  1098. }
  1099. }
  1100. /* apply to each pixel the given table */
  1101. static void img_apply_table(uint8_t *dst, int dst_wrap,
  1102. const uint8_t *src, int src_wrap,
  1103. int width, int height, const uint8_t *table1)
  1104. {
  1105. int n;
  1106. const uint8_t *s;
  1107. uint8_t *d;
  1108. const uint8_t *table;
  1109. table = table1;
  1110. for(;height > 0; height--) {
  1111. s = src;
  1112. d = dst;
  1113. n = width;
  1114. while (n >= 4) {
  1115. d[0] = table[s[0]];
  1116. d[1] = table[s[1]];
  1117. d[2] = table[s[2]];
  1118. d[3] = table[s[3]];
  1119. d += 4;
  1120. s += 4;
  1121. n -= 4;
  1122. }
  1123. while (n > 0) {
  1124. d[0] = table[s[0]];
  1125. d++;
  1126. s++;
  1127. n--;
  1128. }
  1129. dst += dst_wrap;
  1130. src += src_wrap;
  1131. }
  1132. }
  1133. /* XXX: use generic filter ? */
  1134. /* XXX: in most cases, the sampling position is incorrect */
  1135. /* 4x1 -> 1x1 */
  1136. static void shrink41(uint8_t *dst, int dst_wrap,
  1137. const uint8_t *src, int src_wrap,
  1138. int width, int height)
  1139. {
  1140. int w;
  1141. const uint8_t *s;
  1142. uint8_t *d;
  1143. for(;height > 0; height--) {
  1144. s = src;
  1145. d = dst;
  1146. for(w = width;w > 0; w--) {
  1147. d[0] = (s[0] + s[1] + s[2] + s[3] + 2) >> 2;
  1148. s += 4;
  1149. d++;
  1150. }
  1151. src += src_wrap;
  1152. dst += dst_wrap;
  1153. }
  1154. }
  1155. /* 2x1 -> 1x1 */
  1156. static void shrink21(uint8_t *dst, int dst_wrap,
  1157. const uint8_t *src, int src_wrap,
  1158. int width, int height)
  1159. {
  1160. int w;
  1161. const uint8_t *s;
  1162. uint8_t *d;
  1163. for(;height > 0; height--) {
  1164. s = src;
  1165. d = dst;
  1166. for(w = width;w > 0; w--) {
  1167. d[0] = (s[0] + s[1]) >> 1;
  1168. s += 2;
  1169. d++;
  1170. }
  1171. src += src_wrap;
  1172. dst += dst_wrap;
  1173. }
  1174. }
  1175. /* 1x2 -> 1x1 */
  1176. static void shrink12(uint8_t *dst, int dst_wrap,
  1177. const uint8_t *src, int src_wrap,
  1178. int width, int height)
  1179. {
  1180. int w;
  1181. uint8_t *d;
  1182. const uint8_t *s1, *s2;
  1183. for(;height > 0; height--) {
  1184. s1 = src;
  1185. s2 = s1 + src_wrap;
  1186. d = dst;
  1187. for(w = width;w >= 4; w-=4) {
  1188. d[0] = (s1[0] + s2[0]) >> 1;
  1189. d[1] = (s1[1] + s2[1]) >> 1;
  1190. d[2] = (s1[2] + s2[2]) >> 1;
  1191. d[3] = (s1[3] + s2[3]) >> 1;
  1192. s1 += 4;
  1193. s2 += 4;
  1194. d += 4;
  1195. }
  1196. for(;w > 0; w--) {
  1197. d[0] = (s1[0] + s2[0]) >> 1;
  1198. s1++;
  1199. s2++;
  1200. d++;
  1201. }
  1202. src += 2 * src_wrap;
  1203. dst += dst_wrap;
  1204. }
  1205. }
  1206. /* 2x2 -> 1x1 */
  1207. void ff_shrink22(uint8_t *dst, int dst_wrap,
  1208. const uint8_t *src, int src_wrap,
  1209. int width, int height)
  1210. {
  1211. int w;
  1212. const uint8_t *s1, *s2;
  1213. uint8_t *d;
  1214. for(;height > 0; height--) {
  1215. s1 = src;
  1216. s2 = s1 + src_wrap;
  1217. d = dst;
  1218. for(w = width;w >= 4; w-=4) {
  1219. d[0] = (s1[0] + s1[1] + s2[0] + s2[1] + 2) >> 2;
  1220. d[1] = (s1[2] + s1[3] + s2[2] + s2[3] + 2) >> 2;
  1221. d[2] = (s1[4] + s1[5] + s2[4] + s2[5] + 2) >> 2;
  1222. d[3] = (s1[6] + s1[7] + s2[6] + s2[7] + 2) >> 2;
  1223. s1 += 8;
  1224. s2 += 8;
  1225. d += 4;
  1226. }
  1227. for(;w > 0; w--) {
  1228. d[0] = (s1[0] + s1[1] + s2[0] + s2[1] + 2) >> 2;
  1229. s1 += 2;
  1230. s2 += 2;
  1231. d++;
  1232. }
  1233. src += 2 * src_wrap;
  1234. dst += dst_wrap;
  1235. }
  1236. }
  1237. /* 4x4 -> 1x1 */
  1238. void ff_shrink44(uint8_t *dst, int dst_wrap,
  1239. const uint8_t *src, int src_wrap,
  1240. int width, int height)
  1241. {
  1242. int w;
  1243. const uint8_t *s1, *s2, *s3, *s4;
  1244. uint8_t *d;
  1245. for(;height > 0; height--) {
  1246. s1 = src;
  1247. s2 = s1 + src_wrap;
  1248. s3 = s2 + src_wrap;
  1249. s4 = s3 + src_wrap;
  1250. d = dst;
  1251. for(w = width;w > 0; w--) {
  1252. d[0] = (s1[0] + s1[1] + s1[2] + s1[3] +
  1253. s2[0] + s2[1] + s2[2] + s2[3] +
  1254. s3[0] + s3[1] + s3[2] + s3[3] +
  1255. s4[0] + s4[1] + s4[2] + s4[3] + 8) >> 4;
  1256. s1 += 4;
  1257. s2 += 4;
  1258. s3 += 4;
  1259. s4 += 4;
  1260. d++;
  1261. }
  1262. src += 4 * src_wrap;
  1263. dst += dst_wrap;
  1264. }
  1265. }
  1266. /* 8x8 -> 1x1 */
  1267. void ff_shrink88(uint8_t *dst, int dst_wrap,
  1268. const uint8_t *src, int src_wrap,
  1269. int width, int height)
  1270. {
  1271. int w, i;
  1272. for(;height > 0; height--) {
  1273. for(w = width;w > 0; w--) {
  1274. int tmp=0;
  1275. for(i=0; i<8; i++){
  1276. tmp += src[0] + src[1] + src[2] + src[3] + src[4] + src[5] + src[6] + src[7];
  1277. src += src_wrap;
  1278. }
  1279. *(dst++) = (tmp + 32)>>6;
  1280. src += 8 - 8*src_wrap;
  1281. }
  1282. src += 8*src_wrap - 8*width;
  1283. dst += dst_wrap - width;
  1284. }
  1285. }
  1286. static void grow21_line(uint8_t *dst, const uint8_t *src,
  1287. int width)
  1288. {
  1289. int w;
  1290. const uint8_t *s1;
  1291. uint8_t *d;
  1292. s1 = src;
  1293. d = dst;
  1294. for(w = width;w >= 4; w-=4) {
  1295. d[1] = d[0] = s1[0];
  1296. d[3] = d[2] = s1[1];
  1297. s1 += 2;
  1298. d += 4;
  1299. }
  1300. for(;w >= 2; w -= 2) {
  1301. d[1] = d[0] = s1[0];
  1302. s1 ++;
  1303. d += 2;
  1304. }
  1305. /* only needed if width is not a multiple of two */
  1306. /* XXX: veryfy that */
  1307. if (w) {
  1308. d[0] = s1[0];
  1309. }
  1310. }
  1311. static void grow41_line(uint8_t *dst, const uint8_t *src,
  1312. int width)
  1313. {
  1314. int w, v;
  1315. const uint8_t *s1;
  1316. uint8_t *d;
  1317. s1 = src;
  1318. d = dst;
  1319. for(w = width;w >= 4; w-=4) {
  1320. v = s1[0];
  1321. d[0] = v;
  1322. d[1] = v;
  1323. d[2] = v;
  1324. d[3] = v;
  1325. s1 ++;
  1326. d += 4;
  1327. }
  1328. }
  1329. /* 1x1 -> 2x1 */
  1330. static void grow21(uint8_t *dst, int dst_wrap,
  1331. const uint8_t *src, int src_wrap,
  1332. int width, int height)
  1333. {
  1334. for(;height > 0; height--) {
  1335. grow21_line(dst, src, width);
  1336. src += src_wrap;
  1337. dst += dst_wrap;
  1338. }
  1339. }
  1340. /* 1x1 -> 2x2 */
  1341. static void grow22(uint8_t *dst, int dst_wrap,
  1342. const uint8_t *src, int src_wrap,
  1343. int width, int height)
  1344. {
  1345. for(;height > 0; height--) {
  1346. grow21_line(dst, src, width);
  1347. if (height%2)
  1348. src += src_wrap;
  1349. dst += dst_wrap;
  1350. }
  1351. }
  1352. /* 1x1 -> 4x1 */
  1353. static void grow41(uint8_t *dst, int dst_wrap,
  1354. const uint8_t *src, int src_wrap,
  1355. int width, int height)
  1356. {
  1357. for(;height > 0; height--) {
  1358. grow41_line(dst, src, width);
  1359. src += src_wrap;
  1360. dst += dst_wrap;
  1361. }
  1362. }
  1363. /* 1x1 -> 4x4 */
  1364. static void grow44(uint8_t *dst, int dst_wrap,
  1365. const uint8_t *src, int src_wrap,
  1366. int width, int height)
  1367. {
  1368. for(;height > 0; height--) {
  1369. grow41_line(dst, src, width);
  1370. if ((height & 3) == 1)
  1371. src += src_wrap;
  1372. dst += dst_wrap;
  1373. }
  1374. }
  1375. /* 1x2 -> 2x1 */
  1376. static void conv411(uint8_t *dst, int dst_wrap,
  1377. const uint8_t *src, int src_wrap,
  1378. int width, int height)
  1379. {
  1380. int w, c;
  1381. const uint8_t *s1, *s2;
  1382. uint8_t *d;
  1383. width>>=1;
  1384. for(;height > 0; height--) {
  1385. s1 = src;
  1386. s2 = src + src_wrap;
  1387. d = dst;
  1388. for(w = width;w > 0; w--) {
  1389. c = (s1[0] + s2[0]) >> 1;
  1390. d[0] = c;
  1391. d[1] = c;
  1392. s1++;
  1393. s2++;
  1394. d += 2;
  1395. }
  1396. src += src_wrap * 2;
  1397. dst += dst_wrap;
  1398. }
  1399. }
  1400. /* XXX: add jpeg quantize code */
  1401. #define TRANSP_INDEX (6*6*6)
  1402. /* this is maybe slow, but allows for extensions */
  1403. static inline unsigned char gif_clut_index(uint8_t r, uint8_t g, uint8_t b)
  1404. {
  1405. return ((((r)/47)%6)*6*6+(((g)/47)%6)*6+(((b)/47)%6));
  1406. }
  1407. static void build_rgb_palette(uint8_t *palette, int has_alpha)
  1408. {
  1409. uint32_t *pal;
  1410. static const uint8_t pal_value[6] = { 0x00, 0x33, 0x66, 0x99, 0xcc, 0xff };
  1411. int i, r, g, b;
  1412. pal = (uint32_t *)palette;
  1413. i = 0;
  1414. for(r = 0; r < 6; r++) {
  1415. for(g = 0; g < 6; g++) {
  1416. for(b = 0; b < 6; b++) {
  1417. pal[i++] = (0xff << 24) | (pal_value[r] << 16) |
  1418. (pal_value[g] << 8) | pal_value[b];
  1419. }
  1420. }
  1421. }
  1422. if (has_alpha)
  1423. pal[i++] = 0;
  1424. while (i < 256)
  1425. pal[i++] = 0xff000000;
  1426. }
  1427. /* copy bit n to bits 0 ... n - 1 */
  1428. static inline unsigned int bitcopy_n(unsigned int a, int n)
  1429. {
  1430. int mask;
  1431. mask = (1 << n) - 1;
  1432. return (a & (0xff & ~mask)) | ((-((a >> n) & 1)) & mask);
  1433. }
  1434. /* rgb555 handling */
  1435. #define RGB_NAME rgb555
  1436. #define RGB_IN(r, g, b, s)\
  1437. {\
  1438. unsigned int v = ((const uint16_t *)(s))[0];\
  1439. r = bitcopy_n(v >> (10 - 3), 3);\
  1440. g = bitcopy_n(v >> (5 - 3), 3);\
  1441. b = bitcopy_n(v << 3, 3);\
  1442. }
  1443. #define RGB_OUT(d, r, g, b)\
  1444. {\
  1445. ((uint16_t *)(d))[0] = ((r >> 3) << 10) | ((g >> 3) << 5) | (b >> 3);\
  1446. }
  1447. #define BPP 2
  1448. #include "imgconvert_template.h"
  1449. /* rgb565 handling */
  1450. #define RGB_NAME rgb565
  1451. #define RGB_IN(r, g, b, s)\
  1452. {\
  1453. unsigned int v = ((const uint16_t *)(s))[0];\
  1454. r = bitcopy_n(v >> (11 - 3), 3);\
  1455. g = bitcopy_n(v >> (5 - 2), 2);\
  1456. b = bitcopy_n(v << 3, 3);\
  1457. }
  1458. #define RGB_OUT(d, r, g, b)\
  1459. {\
  1460. ((uint16_t *)(d))[0] = ((r >> 3) << 11) | ((g >> 2) << 5) | (b >> 3);\
  1461. }
  1462. #define BPP 2
  1463. #include "imgconvert_template.h"
  1464. /* bgr24 handling */
  1465. #define RGB_NAME bgr24
  1466. #define RGB_IN(r, g, b, s)\
  1467. {\
  1468. b = (s)[0];\
  1469. g = (s)[1];\
  1470. r = (s)[2];\
  1471. }
  1472. #define RGB_OUT(d, r, g, b)\
  1473. {\
  1474. (d)[0] = b;\
  1475. (d)[1] = g;\
  1476. (d)[2] = r;\
  1477. }
  1478. #define BPP 3
  1479. #include "imgconvert_template.h"
  1480. #undef RGB_IN
  1481. #undef RGB_OUT
  1482. #undef BPP
  1483. /* rgb24 handling */
  1484. #define RGB_NAME rgb24
  1485. #define FMT_RGB24
  1486. #define RGB_IN(r, g, b, s)\
  1487. {\
  1488. r = (s)[0];\
  1489. g = (s)[1];\
  1490. b = (s)[2];\
  1491. }
  1492. #define RGB_OUT(d, r, g, b)\
  1493. {\
  1494. (d)[0] = r;\
  1495. (d)[1] = g;\
  1496. (d)[2] = b;\
  1497. }
  1498. #define BPP 3
  1499. #include "imgconvert_template.h"
  1500. /* rgb32 handling */
  1501. #define RGB_NAME rgb32
  1502. #define FMT_RGB32
  1503. #define RGB_IN(r, g, b, s)\
  1504. {\
  1505. unsigned int v = ((const uint32_t *)(s))[0];\
  1506. r = (v >> 16) & 0xff;\
  1507. g = (v >> 8) & 0xff;\
  1508. b = v & 0xff;\
  1509. }
  1510. #define RGBA_IN(r, g, b, a, s)\
  1511. {\
  1512. unsigned int v = ((const uint32_t *)(s))[0];\
  1513. a = (v >> 24) & 0xff;\
  1514. r = (v >> 16) & 0xff;\
  1515. g = (v >> 8) & 0xff;\
  1516. b = v & 0xff;\
  1517. }
  1518. #define RGBA_OUT(d, r, g, b, a)\
  1519. {\
  1520. ((uint32_t *)(d))[0] = (a << 24) | (r << 16) | (g << 8) | b;\
  1521. }
  1522. #define BPP 4
  1523. #include "imgconvert_template.h"
  1524. static void mono_to_gray(AVPicture *dst, const AVPicture *src,
  1525. int width, int height, int xor_mask)
  1526. {
  1527. const unsigned char *p;
  1528. unsigned char *q;
  1529. int v, dst_wrap, src_wrap;
  1530. int y, w;
  1531. p = src->data[0];
  1532. src_wrap = src->linesize[0] - ((width + 7) >> 3);
  1533. q = dst->data[0];
  1534. dst_wrap = dst->linesize[0] - width;
  1535. for(y=0;y<height;y++) {
  1536. w = width;
  1537. while (w >= 8) {
  1538. v = *p++ ^ xor_mask;
  1539. q[0] = -(v >> 7);
  1540. q[1] = -((v >> 6) & 1);
  1541. q[2] = -((v >> 5) & 1);
  1542. q[3] = -((v >> 4) & 1);
  1543. q[4] = -((v >> 3) & 1);
  1544. q[5] = -((v >> 2) & 1);
  1545. q[6] = -((v >> 1) & 1);
  1546. q[7] = -((v >> 0) & 1);
  1547. w -= 8;
  1548. q += 8;
  1549. }
  1550. if (w > 0) {
  1551. v = *p++ ^ xor_mask;
  1552. do {
  1553. q[0] = -((v >> 7) & 1);
  1554. q++;
  1555. v <<= 1;
  1556. } while (--w);
  1557. }
  1558. p += src_wrap;
  1559. q += dst_wrap;
  1560. }
  1561. }
  1562. static void monowhite_to_gray(AVPicture *dst, const AVPicture *src,
  1563. int width, int height)
  1564. {
  1565. mono_to_gray(dst, src, width, height, 0xff);
  1566. }
  1567. static void monoblack_to_gray(AVPicture *dst, const AVPicture *src,
  1568. int width, int height)
  1569. {
  1570. mono_to_gray(dst, src, width, height, 0x00);
  1571. }
  1572. static void gray_to_mono(AVPicture *dst, const AVPicture *src,
  1573. int width, int height, int xor_mask)
  1574. {
  1575. int n;
  1576. const uint8_t *s;
  1577. uint8_t *d;
  1578. int j, b, v, n1, src_wrap, dst_wrap, y;
  1579. s = src->data[0];
  1580. src_wrap = src->linesize[0] - width;
  1581. d = dst->data[0];
  1582. dst_wrap = dst->linesize[0] - ((width + 7) >> 3);
  1583. for(y=0;y<height;y++) {
  1584. n = width;
  1585. while (n >= 8) {
  1586. v = 0;
  1587. for(j=0;j<8;j++) {
  1588. b = s[0];
  1589. s++;
  1590. v = (v << 1) | (b >> 7);
  1591. }
  1592. d[0] = v ^ xor_mask;
  1593. d++;
  1594. n -= 8;
  1595. }
  1596. if (n > 0) {
  1597. n1 = n;
  1598. v = 0;
  1599. while (n > 0) {
  1600. b = s[0];
  1601. s++;
  1602. v = (v << 1) | (b >> 7);
  1603. n--;
  1604. }
  1605. d[0] = (v << (8 - (n1 & 7))) ^ xor_mask;
  1606. d++;
  1607. }
  1608. s += src_wrap;
  1609. d += dst_wrap;
  1610. }
  1611. }
  1612. static void gray_to_monowhite(AVPicture *dst, const AVPicture *src,
  1613. int width, int height)
  1614. {
  1615. gray_to_mono(dst, src, width, height, 0xff);
  1616. }
  1617. static void gray_to_monoblack(AVPicture *dst, const AVPicture *src,
  1618. int width, int height)
  1619. {
  1620. gray_to_mono(dst, src, width, height, 0x00);
  1621. }
  1622. static void gray_to_gray16(AVPicture *dst, const AVPicture *src,
  1623. int width, int height)
  1624. {
  1625. int x, y, src_wrap, dst_wrap;
  1626. uint8_t *s, *d;
  1627. s = src->data[0];
  1628. src_wrap = src->linesize[0] - width;
  1629. d = dst->data[0];
  1630. dst_wrap = dst->linesize[0] - width * 2;
  1631. for(y=0; y<height; y++){
  1632. for(x=0; x<width; x++){
  1633. *d++ = *s;
  1634. *d++ = *s++;
  1635. }
  1636. s += src_wrap;
  1637. d += dst_wrap;
  1638. }
  1639. }
  1640. static void gray16_to_gray(AVPicture *dst, const AVPicture *src,
  1641. int width, int height)
  1642. {
  1643. int x, y, src_wrap, dst_wrap;
  1644. uint8_t *s, *d;
  1645. s = src->data[0];
  1646. src_wrap = src->linesize[0] - width * 2;
  1647. d = dst->data[0];
  1648. dst_wrap = dst->linesize[0] - width;
  1649. for(y=0; y<height; y++){
  1650. for(x=0; x<width; x++){
  1651. *d++ = *s;
  1652. s += 2;
  1653. }
  1654. s += src_wrap;
  1655. d += dst_wrap;
  1656. }
  1657. }
  1658. static void gray16be_to_gray(AVPicture *dst, const AVPicture *src,
  1659. int width, int height)
  1660. {
  1661. gray16_to_gray(dst, src, width, height);
  1662. }
  1663. static void gray16le_to_gray(AVPicture *dst, const AVPicture *src,
  1664. int width, int height)
  1665. {
  1666. AVPicture tmpsrc = *src;
  1667. tmpsrc.data[0]++;
  1668. gray16_to_gray(dst, &tmpsrc, width, height);
  1669. }
  1670. static void gray16_to_gray16(AVPicture *dst, const AVPicture *src,
  1671. int width, int height)
  1672. {
  1673. int x, y, src_wrap, dst_wrap;
  1674. uint16_t *s, *d;
  1675. s = src->data[0];
  1676. src_wrap = (src->linesize[0] - width * 2)/2;
  1677. d = dst->data[0];
  1678. dst_wrap = (dst->linesize[0] - width * 2)/2;
  1679. for(y=0; y<height; y++){
  1680. for(x=0; x<width; x++){
  1681. *d++ = bswap_16(*s++);
  1682. }
  1683. s += src_wrap;
  1684. d += dst_wrap;
  1685. }
  1686. }
  1687. typedef struct ConvertEntry {
  1688. void (*convert)(AVPicture *dst,
  1689. const AVPicture *src, int width, int height);
  1690. } ConvertEntry;
  1691. /* Add each new conversion function in this table. In order to be able
  1692. to convert from any format to any format, the following constraints
  1693. must be satisfied:
  1694. - all FF_COLOR_RGB formats must convert to and from PIX_FMT_RGB24
  1695. - all FF_COLOR_GRAY formats must convert to and from PIX_FMT_GRAY8
  1696. - all FF_COLOR_RGB formats with alpha must convert to and from PIX_FMT_RGB32
  1697. - PIX_FMT_YUV444P and PIX_FMT_YUVJ444P must convert to and from
  1698. PIX_FMT_RGB24.
  1699. - PIX_FMT_422 must convert to and from PIX_FMT_422P.
  1700. The other conversion functions are just optimisations for common cases.
  1701. */
  1702. static const ConvertEntry convert_table[PIX_FMT_NB][PIX_FMT_NB] = {
  1703. [PIX_FMT_YUV420P] = {
  1704. [PIX_FMT_YUYV422] = {
  1705. .convert = yuv420p_to_yuyv422,
  1706. },
  1707. [PIX_FMT_RGB555] = {
  1708. .convert = yuv420p_to_rgb555
  1709. },
  1710. [PIX_FMT_RGB565] = {
  1711. .convert = yuv420p_to_rgb565
  1712. },
  1713. [PIX_FMT_BGR24] = {
  1714. .convert = yuv420p_to_bgr24
  1715. },
  1716. [PIX_FMT_RGB24] = {
  1717. .convert = yuv420p_to_rgb24
  1718. },
  1719. [PIX_FMT_RGB32] = {
  1720. .convert = yuv420p_to_rgb32
  1721. },
  1722. [PIX_FMT_UYVY422] = {
  1723. .convert = yuv420p_to_uyvy422,
  1724. },
  1725. },
  1726. [PIX_FMT_YUV422P] = {
  1727. [PIX_FMT_YUYV422] = {
  1728. .convert = yuv422p_to_yuyv422,
  1729. },
  1730. [PIX_FMT_UYVY422] = {
  1731. .convert = yuv422p_to_uyvy422,
  1732. },
  1733. },
  1734. [PIX_FMT_YUV444P] = {
  1735. [PIX_FMT_RGB24] = {
  1736. .convert = yuv444p_to_rgb24
  1737. },
  1738. },
  1739. [PIX_FMT_YUVJ420P] = {
  1740. [PIX_FMT_RGB555] = {
  1741. .convert = yuvj420p_to_rgb555
  1742. },
  1743. [PIX_FMT_RGB565] = {
  1744. .convert = yuvj420p_to_rgb565
  1745. },
  1746. [PIX_FMT_BGR24] = {
  1747. .convert = yuvj420p_to_bgr24
  1748. },
  1749. [PIX_FMT_RGB24] = {
  1750. .convert = yuvj420p_to_rgb24
  1751. },
  1752. [PIX_FMT_RGB32] = {
  1753. .convert = yuvj420p_to_rgb32
  1754. },
  1755. },
  1756. [PIX_FMT_YUVJ444P] = {
  1757. [PIX_FMT_RGB24] = {
  1758. .convert = yuvj444p_to_rgb24
  1759. },
  1760. },
  1761. [PIX_FMT_YUYV422] = {
  1762. [PIX_FMT_YUV420P] = {
  1763. .convert = yuyv422_to_yuv420p,
  1764. },
  1765. [PIX_FMT_YUV422P] = {
  1766. .convert = yuyv422_to_yuv422p,
  1767. },
  1768. },
  1769. [PIX_FMT_UYVY422] = {
  1770. [PIX_FMT_YUV420P] = {
  1771. .convert = uyvy422_to_yuv420p,
  1772. },
  1773. [PIX_FMT_YUV422P] = {
  1774. .convert = uyvy422_to_yuv422p,
  1775. },
  1776. },
  1777. [PIX_FMT_RGB24] = {
  1778. [PIX_FMT_YUV420P] = {
  1779. .convert = rgb24_to_yuv420p
  1780. },
  1781. [PIX_FMT_RGB565] = {
  1782. .convert = rgb24_to_rgb565
  1783. },
  1784. [PIX_FMT_RGB555] = {
  1785. .convert = rgb24_to_rgb555
  1786. },
  1787. [PIX_FMT_RGB32] = {
  1788. .convert = rgb24_to_rgb32
  1789. },
  1790. [PIX_FMT_BGR24] = {
  1791. .convert = rgb24_to_bgr24
  1792. },
  1793. [PIX_FMT_GRAY8] = {
  1794. .convert = rgb24_to_gray
  1795. },
  1796. [PIX_FMT_PAL8] = {
  1797. .convert = rgb24_to_pal8
  1798. },
  1799. [PIX_FMT_YUV444P] = {
  1800. .convert = rgb24_to_yuv444p
  1801. },
  1802. [PIX_FMT_YUVJ420P] = {
  1803. .convert = rgb24_to_yuvj420p
  1804. },
  1805. [PIX_FMT_YUVJ444P] = {
  1806. .convert = rgb24_to_yuvj444p
  1807. },
  1808. },
  1809. [PIX_FMT_RGB32] = {
  1810. [PIX_FMT_RGB24] = {
  1811. .convert = rgb32_to_rgb24
  1812. },
  1813. [PIX_FMT_BGR24] = {
  1814. .convert = rgb32_to_bgr24
  1815. },
  1816. [PIX_FMT_RGB565] = {
  1817. .convert = rgb32_to_rgb565
  1818. },
  1819. [PIX_FMT_RGB555] = {
  1820. .convert = rgb32_to_rgb555
  1821. },
  1822. [PIX_FMT_PAL8] = {
  1823. .convert = rgb32_to_pal8
  1824. },
  1825. [PIX_FMT_YUV420P] = {
  1826. .convert = rgb32_to_yuv420p
  1827. },
  1828. [PIX_FMT_GRAY8] = {
  1829. .convert = rgb32_to_gray
  1830. },
  1831. },
  1832. [PIX_FMT_BGR24] = {
  1833. [PIX_FMT_RGB32] = {
  1834. .convert = bgr24_to_rgb32
  1835. },
  1836. [PIX_FMT_RGB24] = {
  1837. .convert = bgr24_to_rgb24
  1838. },
  1839. [PIX_FMT_YUV420P] = {
  1840. .convert = bgr24_to_yuv420p
  1841. },
  1842. [PIX_FMT_GRAY8] = {
  1843. .convert = bgr24_to_gray
  1844. },
  1845. },
  1846. [PIX_FMT_RGB555] = {
  1847. [PIX_FMT_RGB24] = {
  1848. .convert = rgb555_to_rgb24
  1849. },
  1850. [PIX_FMT_RGB32] = {
  1851. .convert = rgb555_to_rgb32
  1852. },
  1853. [PIX_FMT_YUV420P] = {
  1854. .convert = rgb555_to_yuv420p
  1855. },
  1856. [PIX_FMT_GRAY8] = {
  1857. .convert = rgb555_to_gray
  1858. },
  1859. },
  1860. [PIX_FMT_RGB565] = {
  1861. [PIX_FMT_RGB32] = {
  1862. .convert = rgb565_to_rgb32
  1863. },
  1864. [PIX_FMT_RGB24] = {
  1865. .convert = rgb565_to_rgb24
  1866. },
  1867. [PIX_FMT_YUV420P] = {
  1868. .convert = rgb565_to_yuv420p
  1869. },
  1870. [PIX_FMT_GRAY8] = {
  1871. .convert = rgb565_to_gray
  1872. },
  1873. },
  1874. [PIX_FMT_GRAY16BE] = {
  1875. [PIX_FMT_GRAY8] = {
  1876. .convert = gray16be_to_gray
  1877. },
  1878. [PIX_FMT_GRAY16LE] = {
  1879. .convert = gray16_to_gray16
  1880. },
  1881. },
  1882. [PIX_FMT_GRAY16LE] = {
  1883. [PIX_FMT_GRAY8] = {
  1884. .convert = gray16le_to_gray
  1885. },
  1886. [PIX_FMT_GRAY16BE] = {
  1887. .convert = gray16_to_gray16
  1888. },
  1889. },
  1890. [PIX_FMT_GRAY8] = {
  1891. [PIX_FMT_RGB555] = {
  1892. .convert = gray_to_rgb555
  1893. },
  1894. [PIX_FMT_RGB565] = {
  1895. .convert = gray_to_rgb565
  1896. },
  1897. [PIX_FMT_RGB24] = {
  1898. .convert = gray_to_rgb24
  1899. },
  1900. [PIX_FMT_BGR24] = {
  1901. .convert = gray_to_bgr24
  1902. },
  1903. [PIX_FMT_RGB32] = {
  1904. .convert = gray_to_rgb32
  1905. },
  1906. [PIX_FMT_MONOWHITE] = {
  1907. .convert = gray_to_monowhite
  1908. },
  1909. [PIX_FMT_MONOBLACK] = {
  1910. .convert = gray_to_monoblack
  1911. },
  1912. [PIX_FMT_GRAY16LE] = {
  1913. .convert = gray_to_gray16
  1914. },
  1915. [PIX_FMT_GRAY16BE] = {
  1916. .convert = gray_to_gray16
  1917. },
  1918. },
  1919. [PIX_FMT_MONOWHITE] = {
  1920. [PIX_FMT_GRAY8] = {
  1921. .convert = monowhite_to_gray
  1922. },
  1923. },
  1924. [PIX_FMT_MONOBLACK] = {
  1925. [PIX_FMT_GRAY8] = {
  1926. .convert = monoblack_to_gray
  1927. },
  1928. },
  1929. [PIX_FMT_PAL8] = {
  1930. [PIX_FMT_RGB555] = {
  1931. .convert = pal8_to_rgb555
  1932. },
  1933. [PIX_FMT_RGB565] = {
  1934. .convert = pal8_to_rgb565
  1935. },
  1936. [PIX_FMT_BGR24] = {
  1937. .convert = pal8_to_bgr24
  1938. },
  1939. [PIX_FMT_RGB24] = {
  1940. .convert = pal8_to_rgb24
  1941. },
  1942. [PIX_FMT_RGB32] = {
  1943. .convert = pal8_to_rgb32
  1944. },
  1945. },
  1946. [PIX_FMT_UYYVYY411] = {
  1947. [PIX_FMT_YUV411P] = {
  1948. .convert = uyyvyy411_to_yuv411p,
  1949. },
  1950. },
  1951. };
  1952. int avpicture_alloc(AVPicture *picture,
  1953. int pix_fmt, int width, int height)
  1954. {
  1955. int size;
  1956. void *ptr;
  1957. size = avpicture_get_size(pix_fmt, width, height);
  1958. if(size<0)
  1959. goto fail;
  1960. ptr = av_malloc(size);
  1961. if (!ptr)
  1962. goto fail;
  1963. avpicture_fill(picture, ptr, pix_fmt, width, height);
  1964. return 0;
  1965. fail:
  1966. memset(picture, 0, sizeof(AVPicture));
  1967. return -1;
  1968. }
  1969. void avpicture_free(AVPicture *picture)
  1970. {
  1971. av_free(picture->data[0]);
  1972. }
  1973. /* return true if yuv planar */
  1974. static inline int is_yuv_planar(const PixFmtInfo *ps)
  1975. {
  1976. return (ps->color_type == FF_COLOR_YUV ||
  1977. ps->color_type == FF_COLOR_YUV_JPEG) &&
  1978. ps->pixel_type == FF_PIXEL_PLANAR;
  1979. }
  1980. int av_picture_crop(AVPicture *dst, const AVPicture *src,
  1981. int pix_fmt, int top_band, int left_band)
  1982. {
  1983. int y_shift;
  1984. int x_shift;
  1985. if (pix_fmt < 0 || pix_fmt >= PIX_FMT_NB || !is_yuv_planar(&pix_fmt_info[pix_fmt]))
  1986. return -1;
  1987. y_shift = pix_fmt_info[pix_fmt].y_chroma_shift;
  1988. x_shift = pix_fmt_info[pix_fmt].x_chroma_shift;
  1989. dst->data[0] = src->data[0] + (top_band * src->linesize[0]) + left_band;
  1990. dst->data[1] = src->data[1] + ((top_band >> y_shift) * src->linesize[1]) + (left_band >> x_shift);
  1991. dst->data[2] = src->data[2] + ((top_band >> y_shift) * src->linesize[2]) + (left_band >> x_shift);
  1992. dst->linesize[0] = src->linesize[0];
  1993. dst->linesize[1] = src->linesize[1];
  1994. dst->linesize[2] = src->linesize[2];
  1995. return 0;
  1996. }
  1997. int av_picture_pad(AVPicture *dst, const AVPicture *src, int height, int width,
  1998. int pix_fmt, int padtop, int padbottom, int padleft, int padright,
  1999. int *color)
  2000. {
  2001. uint8_t *optr;
  2002. int y_shift;
  2003. int x_shift;
  2004. int yheight;
  2005. int i, y;
  2006. if (pix_fmt < 0 || pix_fmt >= PIX_FMT_NB ||
  2007. !is_yuv_planar(&pix_fmt_info[pix_fmt])) return -1;
  2008. for (i = 0; i < 3; i++) {
  2009. x_shift = i ? pix_fmt_info[pix_fmt].x_chroma_shift : 0;
  2010. y_shift = i ? pix_fmt_info[pix_fmt].y_chroma_shift : 0;
  2011. if (padtop || padleft) {
  2012. memset(dst->data[i], color[i],
  2013. dst->linesize[i] * (padtop >> y_shift) + (padleft >> x_shift));
  2014. }
  2015. if (padleft || padright) {
  2016. optr = dst->data[i] + dst->linesize[i] * (padtop >> y_shift) +
  2017. (dst->linesize[i] - (padright >> x_shift));
  2018. yheight = (height - 1 - (padtop + padbottom)) >> y_shift;
  2019. for (y = 0; y < yheight; y++) {
  2020. memset(optr, color[i], (padleft + padright) >> x_shift);
  2021. optr += dst->linesize[i];
  2022. }
  2023. }
  2024. if (src) { /* first line */
  2025. uint8_t *iptr = src->data[i];
  2026. optr = dst->data[i] + dst->linesize[i] * (padtop >> y_shift) +
  2027. (padleft >> x_shift);
  2028. memcpy(optr, iptr, src->linesize[i]);
  2029. iptr += src->linesize[i];
  2030. optr = dst->data[i] + dst->linesize[i] * (padtop >> y_shift) +
  2031. (dst->linesize[i] - (padright >> x_shift));
  2032. yheight = (height - 1 - (padtop + padbottom)) >> y_shift;
  2033. for (y = 0; y < yheight; y++) {
  2034. memset(optr, color[i], (padleft + padright) >> x_shift);
  2035. memcpy(optr + ((padleft + padright) >> x_shift), iptr,
  2036. src->linesize[i]);
  2037. iptr += src->linesize[i];
  2038. optr += dst->linesize[i];
  2039. }
  2040. }
  2041. if (padbottom || padright) {
  2042. optr = dst->data[i] + dst->linesize[i] *
  2043. ((height - padbottom) >> y_shift) - (padright >> x_shift);
  2044. memset(optr, color[i],dst->linesize[i] *
  2045. (padbottom >> y_shift) + (padright >> x_shift));
  2046. }
  2047. }
  2048. return 0;
  2049. }
  2050. #if LIBAVCODEC_VERSION_INT < ((52<<16)+(0<<8)+0)
  2051. void img_copy(AVPicture *dst, const AVPicture *src,
  2052. int pix_fmt, int width, int height)
  2053. {
  2054. av_picture_copy(dst, src, pix_fmt, width, height);
  2055. }
  2056. int img_crop(AVPicture *dst, const AVPicture *src,
  2057. int pix_fmt, int top_band, int left_band)
  2058. {
  2059. return av_picture_crop(dst, src, pix_fmt, top_band, left_band);
  2060. }
  2061. int img_pad(AVPicture *dst, const AVPicture *src, int height, int width,
  2062. int pix_fmt, int padtop, int padbottom, int padleft, int padright,
  2063. int *color)
  2064. {
  2065. return av_picture_pad(dst, src, height, width, pix_fmt, padtop, padbottom, padleft, padright, color);
  2066. }
  2067. #endif
  2068. #ifndef CONFIG_SWSCALER
  2069. /* XXX: always use linesize. Return -1 if not supported */
  2070. int img_convert(AVPicture *dst, int dst_pix_fmt,
  2071. const AVPicture *src, int src_pix_fmt,
  2072. int src_width, int src_height)
  2073. {
  2074. static int inited;
  2075. int i, ret, dst_width, dst_height, int_pix_fmt;
  2076. const PixFmtInfo *src_pix, *dst_pix;
  2077. const ConvertEntry *ce;
  2078. AVPicture tmp1, *tmp = &tmp1;
  2079. if (src_pix_fmt < 0 || src_pix_fmt >= PIX_FMT_NB ||
  2080. dst_pix_fmt < 0 || dst_pix_fmt >= PIX_FMT_NB)
  2081. return -1;
  2082. if (src_width <= 0 || src_height <= 0)
  2083. return 0;
  2084. if (!inited) {
  2085. inited = 1;
  2086. img_convert_init();
  2087. }
  2088. dst_width = src_width;
  2089. dst_height = src_height;
  2090. dst_pix = &pix_fmt_info[dst_pix_fmt];
  2091. src_pix = &pix_fmt_info[src_pix_fmt];
  2092. if (src_pix_fmt == dst_pix_fmt) {
  2093. /* no conversion needed: just copy */
  2094. av_picture_copy(dst, src, dst_pix_fmt, dst_width, dst_height);
  2095. return 0;
  2096. }
  2097. ce = &convert_table[src_pix_fmt][dst_pix_fmt];
  2098. if (ce->convert) {
  2099. /* specific conversion routine */
  2100. ce->convert(dst, src, dst_width, dst_height);
  2101. return 0;
  2102. }
  2103. /* gray to YUV */
  2104. if (is_yuv_planar(dst_pix) &&
  2105. src_pix_fmt == PIX_FMT_GRAY8) {
  2106. int w, h, y;
  2107. uint8_t *d;
  2108. if (dst_pix->color_type == FF_COLOR_YUV_JPEG) {
  2109. ff_img_copy_plane(dst->data[0], dst->linesize[0],
  2110. src->data[0], src->linesize[0],
  2111. dst_width, dst_height);
  2112. } else {
  2113. img_apply_table(dst->data[0], dst->linesize[0],
  2114. src->data[0], src->linesize[0],
  2115. dst_width, dst_height,
  2116. y_jpeg_to_ccir);
  2117. }
  2118. /* fill U and V with 128 */
  2119. w = dst_width;
  2120. h = dst_height;
  2121. w >>= dst_pix->x_chroma_shift;
  2122. h >>= dst_pix->y_chroma_shift;
  2123. for(i = 1; i <= 2; i++) {
  2124. d = dst->data[i];
  2125. for(y = 0; y< h; y++) {
  2126. memset(d, 128, w);
  2127. d += dst->linesize[i];
  2128. }
  2129. }
  2130. return 0;
  2131. }
  2132. /* YUV to gray */
  2133. if (is_yuv_planar(src_pix) &&
  2134. dst_pix_fmt == PIX_FMT_GRAY8) {
  2135. if (src_pix->color_type == FF_COLOR_YUV_JPEG) {
  2136. ff_img_copy_plane(dst->data[0], dst->linesize[0],
  2137. src->data[0], src->linesize[0],
  2138. dst_width, dst_height);
  2139. } else {
  2140. img_apply_table(dst->data[0], dst->linesize[0],
  2141. src->data[0], src->linesize[0],
  2142. dst_width, dst_height,
  2143. y_ccir_to_jpeg);
  2144. }
  2145. return 0;
  2146. }
  2147. /* YUV to YUV planar */
  2148. if (is_yuv_planar(dst_pix) && is_yuv_planar(src_pix)) {
  2149. int x_shift, y_shift, w, h, xy_shift;
  2150. void (*resize_func)(uint8_t *dst, int dst_wrap,
  2151. const uint8_t *src, int src_wrap,
  2152. int width, int height);
  2153. /* compute chroma size of the smallest dimensions */
  2154. w = dst_width;
  2155. h = dst_height;
  2156. if (dst_pix->x_chroma_shift >= src_pix->x_chroma_shift)
  2157. w >>= dst_pix->x_chroma_shift;
  2158. else
  2159. w >>= src_pix->x_chroma_shift;
  2160. if (dst_pix->y_chroma_shift >= src_pix->y_chroma_shift)
  2161. h >>= dst_pix->y_chroma_shift;
  2162. else
  2163. h >>= src_pix->y_chroma_shift;
  2164. x_shift = (dst_pix->x_chroma_shift - src_pix->x_chroma_shift);
  2165. y_shift = (dst_pix->y_chroma_shift - src_pix->y_chroma_shift);
  2166. xy_shift = ((x_shift & 0xf) << 4) | (y_shift & 0xf);
  2167. /* there must be filters for conversion at least from and to
  2168. YUV444 format */
  2169. switch(xy_shift) {
  2170. case 0x00:
  2171. resize_func = ff_img_copy_plane;
  2172. break;
  2173. case 0x10:
  2174. resize_func = shrink21;
  2175. break;
  2176. case 0x20:
  2177. resize_func = shrink41;
  2178. break;
  2179. case 0x01:
  2180. resize_func = shrink12;
  2181. break;
  2182. case 0x11:
  2183. resize_func = ff_shrink22;
  2184. break;
  2185. case 0x22:
  2186. resize_func = ff_shrink44;
  2187. break;
  2188. case 0xf0:
  2189. resize_func = grow21;
  2190. break;
  2191. case 0xe0:
  2192. resize_func = grow41;
  2193. break;
  2194. case 0xff:
  2195. resize_func = grow22;
  2196. break;
  2197. case 0xee:
  2198. resize_func = grow44;
  2199. break;
  2200. case 0xf1:
  2201. resize_func = conv411;
  2202. break;
  2203. default:
  2204. /* currently not handled */
  2205. goto no_chroma_filter;
  2206. }
  2207. ff_img_copy_plane(dst->data[0], dst->linesize[0],
  2208. src->data[0], src->linesize[0],
  2209. dst_width, dst_height);
  2210. for(i = 1;i <= 2; i++)
  2211. resize_func(dst->data[i], dst->linesize[i],
  2212. src->data[i], src->linesize[i],
  2213. dst_width>>dst_pix->x_chroma_shift, dst_height>>dst_pix->y_chroma_shift);
  2214. /* if yuv color space conversion is needed, we do it here on
  2215. the destination image */
  2216. if (dst_pix->color_type != src_pix->color_type) {
  2217. const uint8_t *y_table, *c_table;
  2218. if (dst_pix->color_type == FF_COLOR_YUV) {
  2219. y_table = y_jpeg_to_ccir;
  2220. c_table = c_jpeg_to_ccir;
  2221. } else {
  2222. y_table = y_ccir_to_jpeg;
  2223. c_table = c_ccir_to_jpeg;
  2224. }
  2225. img_apply_table(dst->data[0], dst->linesize[0],
  2226. dst->data[0], dst->linesize[0],
  2227. dst_width, dst_height,
  2228. y_table);
  2229. for(i = 1;i <= 2; i++)
  2230. img_apply_table(dst->data[i], dst->linesize[i],
  2231. dst->data[i], dst->linesize[i],
  2232. dst_width>>dst_pix->x_chroma_shift,
  2233. dst_height>>dst_pix->y_chroma_shift,
  2234. c_table);
  2235. }
  2236. return 0;
  2237. }
  2238. no_chroma_filter:
  2239. /* try to use an intermediate format */
  2240. if (src_pix_fmt == PIX_FMT_YUYV422 ||
  2241. dst_pix_fmt == PIX_FMT_YUYV422) {
  2242. /* specific case: convert to YUV422P first */
  2243. int_pix_fmt = PIX_FMT_YUV422P;
  2244. } else if (src_pix_fmt == PIX_FMT_UYVY422 ||
  2245. dst_pix_fmt == PIX_FMT_UYVY422) {
  2246. /* specific case: convert to YUV422P first */
  2247. int_pix_fmt = PIX_FMT_YUV422P;
  2248. } else if (src_pix_fmt == PIX_FMT_UYYVYY411 ||
  2249. dst_pix_fmt == PIX_FMT_UYYVYY411) {
  2250. /* specific case: convert to YUV411P first */
  2251. int_pix_fmt = PIX_FMT_YUV411P;
  2252. } else if ((src_pix->color_type == FF_COLOR_GRAY &&
  2253. src_pix_fmt != PIX_FMT_GRAY8) ||
  2254. (dst_pix->color_type == FF_COLOR_GRAY &&
  2255. dst_pix_fmt != PIX_FMT_GRAY8)) {
  2256. /* gray8 is the normalized format */
  2257. int_pix_fmt = PIX_FMT_GRAY8;
  2258. } else if ((is_yuv_planar(src_pix) &&
  2259. src_pix_fmt != PIX_FMT_YUV444P &&
  2260. src_pix_fmt != PIX_FMT_YUVJ444P)) {
  2261. /* yuv444 is the normalized format */
  2262. if (src_pix->color_type == FF_COLOR_YUV_JPEG)
  2263. int_pix_fmt = PIX_FMT_YUVJ444P;
  2264. else
  2265. int_pix_fmt = PIX_FMT_YUV444P;
  2266. } else if ((is_yuv_planar(dst_pix) &&
  2267. dst_pix_fmt != PIX_FMT_YUV444P &&
  2268. dst_pix_fmt != PIX_FMT_YUVJ444P)) {
  2269. /* yuv444 is the normalized format */
  2270. if (dst_pix->color_type == FF_COLOR_YUV_JPEG)
  2271. int_pix_fmt = PIX_FMT_YUVJ444P;
  2272. else
  2273. int_pix_fmt = PIX_FMT_YUV444P;
  2274. } else {
  2275. /* the two formats are rgb or gray8 or yuv[j]444p */
  2276. if (src_pix->is_alpha && dst_pix->is_alpha)
  2277. int_pix_fmt = PIX_FMT_RGB32;
  2278. else
  2279. int_pix_fmt = PIX_FMT_RGB24;
  2280. }
  2281. if (avpicture_alloc(tmp, int_pix_fmt, dst_width, dst_height) < 0)
  2282. return -1;
  2283. ret = -1;
  2284. if (img_convert(tmp, int_pix_fmt,
  2285. src, src_pix_fmt, src_width, src_height) < 0)
  2286. goto fail1;
  2287. if (img_convert(dst, dst_pix_fmt,
  2288. tmp, int_pix_fmt, dst_width, dst_height) < 0)
  2289. goto fail1;
  2290. ret = 0;
  2291. fail1:
  2292. avpicture_free(tmp);
  2293. return ret;
  2294. }
  2295. #endif
  2296. /* NOTE: we scan all the pixels to have an exact information */
  2297. static int get_alpha_info_pal8(const AVPicture *src, int width, int height)
  2298. {
  2299. const unsigned char *p;
  2300. int src_wrap, ret, x, y;
  2301. unsigned int a;
  2302. uint32_t *palette = (uint32_t *)src->data[1];
  2303. p = src->data[0];
  2304. src_wrap = src->linesize[0] - width;
  2305. ret = 0;
  2306. for(y=0;y<height;y++) {
  2307. for(x=0;x<width;x++) {
  2308. a = palette[p[0]] >> 24;
  2309. if (a == 0x00) {
  2310. ret |= FF_ALPHA_TRANSP;
  2311. } else if (a != 0xff) {
  2312. ret |= FF_ALPHA_SEMI_TRANSP;
  2313. }
  2314. p++;
  2315. }
  2316. p += src_wrap;
  2317. }
  2318. return ret;
  2319. }
  2320. int img_get_alpha_info(const AVPicture *src,
  2321. int pix_fmt, int width, int height)
  2322. {
  2323. const PixFmtInfo *pf = &pix_fmt_info[pix_fmt];
  2324. int ret;
  2325. pf = &pix_fmt_info[pix_fmt];
  2326. /* no alpha can be represented in format */
  2327. if (!pf->is_alpha)
  2328. return 0;
  2329. switch(pix_fmt) {
  2330. case PIX_FMT_RGB32:
  2331. ret = get_alpha_info_rgb32(src, width, height);
  2332. break;
  2333. case PIX_FMT_PAL8:
  2334. ret = get_alpha_info_pal8(src, width, height);
  2335. break;
  2336. default:
  2337. /* we do not know, so everything is indicated */
  2338. ret = FF_ALPHA_TRANSP | FF_ALPHA_SEMI_TRANSP;
  2339. break;
  2340. }
  2341. return ret;
  2342. }
  2343. #ifdef HAVE_MMX
  2344. #define DEINT_INPLACE_LINE_LUM \
  2345. movd_m2r(lum_m4[0],mm0);\
  2346. movd_m2r(lum_m3[0],mm1);\
  2347. movd_m2r(lum_m2[0],mm2);\
  2348. movd_m2r(lum_m1[0],mm3);\
  2349. movd_m2r(lum[0],mm4);\
  2350. punpcklbw_r2r(mm7,mm0);\
  2351. movd_r2m(mm2,lum_m4[0]);\
  2352. punpcklbw_r2r(mm7,mm1);\
  2353. punpcklbw_r2r(mm7,mm2);\
  2354. punpcklbw_r2r(mm7,mm3);\
  2355. punpcklbw_r2r(mm7,mm4);\
  2356. paddw_r2r(mm3,mm1);\
  2357. psllw_i2r(1,mm2);\
  2358. paddw_r2r(mm4,mm0);\
  2359. psllw_i2r(2,mm1);\
  2360. paddw_r2r(mm6,mm2);\
  2361. paddw_r2r(mm2,mm1);\
  2362. psubusw_r2r(mm0,mm1);\
  2363. psrlw_i2r(3,mm1);\
  2364. packuswb_r2r(mm7,mm1);\
  2365. movd_r2m(mm1,lum_m2[0]);
  2366. #define DEINT_LINE_LUM \
  2367. movd_m2r(lum_m4[0],mm0);\
  2368. movd_m2r(lum_m3[0],mm1);\
  2369. movd_m2r(lum_m2[0],mm2);\
  2370. movd_m2r(lum_m1[0],mm3);\
  2371. movd_m2r(lum[0],mm4);\
  2372. punpcklbw_r2r(mm7,mm0);\
  2373. punpcklbw_r2r(mm7,mm1);\
  2374. punpcklbw_r2r(mm7,mm2);\
  2375. punpcklbw_r2r(mm7,mm3);\
  2376. punpcklbw_r2r(mm7,mm4);\
  2377. paddw_r2r(mm3,mm1);\
  2378. psllw_i2r(1,mm2);\
  2379. paddw_r2r(mm4,mm0);\
  2380. psllw_i2r(2,mm1);\
  2381. paddw_r2r(mm6,mm2);\
  2382. paddw_r2r(mm2,mm1);\
  2383. psubusw_r2r(mm0,mm1);\
  2384. psrlw_i2r(3,mm1);\
  2385. packuswb_r2r(mm7,mm1);\
  2386. movd_r2m(mm1,dst[0]);
  2387. #endif
  2388. /* filter parameters: [-1 4 2 4 -1] // 8 */
  2389. static void deinterlace_line(uint8_t *dst,
  2390. const uint8_t *lum_m4, const uint8_t *lum_m3,
  2391. const uint8_t *lum_m2, const uint8_t *lum_m1,
  2392. const uint8_t *lum,
  2393. int size)
  2394. {
  2395. #ifndef HAVE_MMX
  2396. uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
  2397. int sum;
  2398. for(;size > 0;size--) {
  2399. sum = -lum_m4[0];
  2400. sum += lum_m3[0] << 2;
  2401. sum += lum_m2[0] << 1;
  2402. sum += lum_m1[0] << 2;
  2403. sum += -lum[0];
  2404. dst[0] = cm[(sum + 4) >> 3];
  2405. lum_m4++;
  2406. lum_m3++;
  2407. lum_m2++;
  2408. lum_m1++;
  2409. lum++;
  2410. dst++;
  2411. }
  2412. #else
  2413. {
  2414. mmx_t rounder;
  2415. rounder.uw[0]=4;
  2416. rounder.uw[1]=4;
  2417. rounder.uw[2]=4;
  2418. rounder.uw[3]=4;
  2419. pxor_r2r(mm7,mm7);
  2420. movq_m2r(rounder,mm6);
  2421. }
  2422. for (;size > 3; size-=4) {
  2423. DEINT_LINE_LUM
  2424. lum_m4+=4;
  2425. lum_m3+=4;
  2426. lum_m2+=4;
  2427. lum_m1+=4;
  2428. lum+=4;
  2429. dst+=4;
  2430. }
  2431. #endif
  2432. }
  2433. static void deinterlace_line_inplace(uint8_t *lum_m4, uint8_t *lum_m3, uint8_t *lum_m2, uint8_t *lum_m1, uint8_t *lum,
  2434. int size)
  2435. {
  2436. #ifndef HAVE_MMX
  2437. uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
  2438. int sum;
  2439. for(;size > 0;size--) {
  2440. sum = -lum_m4[0];
  2441. sum += lum_m3[0] << 2;
  2442. sum += lum_m2[0] << 1;
  2443. lum_m4[0]=lum_m2[0];
  2444. sum += lum_m1[0] << 2;
  2445. sum += -lum[0];
  2446. lum_m2[0] = cm[(sum + 4) >> 3];
  2447. lum_m4++;
  2448. lum_m3++;
  2449. lum_m2++;
  2450. lum_m1++;
  2451. lum++;
  2452. }
  2453. #else
  2454. {
  2455. mmx_t rounder;
  2456. rounder.uw[0]=4;
  2457. rounder.uw[1]=4;
  2458. rounder.uw[2]=4;
  2459. rounder.uw[3]=4;
  2460. pxor_r2r(mm7,mm7);
  2461. movq_m2r(rounder,mm6);
  2462. }
  2463. for (;size > 3; size-=4) {
  2464. DEINT_INPLACE_LINE_LUM
  2465. lum_m4+=4;
  2466. lum_m3+=4;
  2467. lum_m2+=4;
  2468. lum_m1+=4;
  2469. lum+=4;
  2470. }
  2471. #endif
  2472. }
  2473. /* deinterlacing : 2 temporal taps, 3 spatial taps linear filter. The
  2474. top field is copied as is, but the bottom field is deinterlaced
  2475. against the top field. */
  2476. static void deinterlace_bottom_field(uint8_t *dst, int dst_wrap,
  2477. const uint8_t *src1, int src_wrap,
  2478. int width, int height)
  2479. {
  2480. const uint8_t *src_m2, *src_m1, *src_0, *src_p1, *src_p2;
  2481. int y;
  2482. src_m2 = src1;
  2483. src_m1 = src1;
  2484. src_0=&src_m1[src_wrap];
  2485. src_p1=&src_0[src_wrap];
  2486. src_p2=&src_p1[src_wrap];
  2487. for(y=0;y<(height-2);y+=2) {
  2488. memcpy(dst,src_m1,width);
  2489. dst += dst_wrap;
  2490. deinterlace_line(dst,src_m2,src_m1,src_0,src_p1,src_p2,width);
  2491. src_m2 = src_0;
  2492. src_m1 = src_p1;
  2493. src_0 = src_p2;
  2494. src_p1 += 2*src_wrap;
  2495. src_p2 += 2*src_wrap;
  2496. dst += dst_wrap;
  2497. }
  2498. memcpy(dst,src_m1,width);
  2499. dst += dst_wrap;
  2500. /* do last line */
  2501. deinterlace_line(dst,src_m2,src_m1,src_0,src_0,src_0,width);
  2502. }
  2503. static void deinterlace_bottom_field_inplace(uint8_t *src1, int src_wrap,
  2504. int width, int height)
  2505. {
  2506. uint8_t *src_m1, *src_0, *src_p1, *src_p2;
  2507. int y;
  2508. uint8_t *buf;
  2509. buf = (uint8_t*)av_malloc(width);
  2510. src_m1 = src1;
  2511. memcpy(buf,src_m1,width);
  2512. src_0=&src_m1[src_wrap];
  2513. src_p1=&src_0[src_wrap];
  2514. src_p2=&src_p1[src_wrap];
  2515. for(y=0;y<(height-2);y+=2) {
  2516. deinterlace_line_inplace(buf,src_m1,src_0,src_p1,src_p2,width);
  2517. src_m1 = src_p1;
  2518. src_0 = src_p2;
  2519. src_p1 += 2*src_wrap;
  2520. src_p2 += 2*src_wrap;
  2521. }
  2522. /* do last line */
  2523. deinterlace_line_inplace(buf,src_m1,src_0,src_0,src_0,width);
  2524. av_free(buf);
  2525. }
  2526. int avpicture_deinterlace(AVPicture *dst, const AVPicture *src,
  2527. int pix_fmt, int width, int height)
  2528. {
  2529. int i;
  2530. if (pix_fmt != PIX_FMT_YUV420P &&
  2531. pix_fmt != PIX_FMT_YUV422P &&
  2532. pix_fmt != PIX_FMT_YUV444P &&
  2533. pix_fmt != PIX_FMT_YUV411P)
  2534. return -1;
  2535. if ((width & 3) != 0 || (height & 3) != 0)
  2536. return -1;
  2537. for(i=0;i<3;i++) {
  2538. if (i == 1) {
  2539. switch(pix_fmt) {
  2540. case PIX_FMT_YUV420P:
  2541. width >>= 1;
  2542. height >>= 1;
  2543. break;
  2544. case PIX_FMT_YUV422P:
  2545. width >>= 1;
  2546. break;
  2547. case PIX_FMT_YUV411P:
  2548. width >>= 2;
  2549. break;
  2550. default:
  2551. break;
  2552. }
  2553. }
  2554. if (src == dst) {
  2555. deinterlace_bottom_field_inplace(dst->data[i], dst->linesize[i],
  2556. width, height);
  2557. } else {
  2558. deinterlace_bottom_field(dst->data[i],dst->linesize[i],
  2559. src->data[i], src->linesize[i],
  2560. width, height);
  2561. }
  2562. }
  2563. #ifdef HAVE_MMX
  2564. emms();
  2565. #endif
  2566. return 0;
  2567. }