You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

2828 lines
76KB

  1. /*
  2. * Misc image conversion routines
  3. * Copyright (c) 2001, 2002, 2003 Fabrice Bellard.
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. /**
  22. * @file imgconvert.c
  23. * misc image conversion routines
  24. */
  25. /* TODO:
  26. * - write 'ffimg' program to test all the image related stuff
  27. * - move all api to slice based system
  28. * - integrate deinterlacing, postprocessing and scaling in the conversion process
  29. */
  30. #include "avcodec.h"
  31. #include "dsputil.h"
  32. #include "colorspace.h"
  33. #ifdef HAVE_MMX
  34. #include "i386/mmx.h"
  35. #endif
  36. #define xglue(x, y) x ## y
  37. #define glue(x, y) xglue(x, y)
  38. #define FF_COLOR_RGB 0 /**< RGB color space */
  39. #define FF_COLOR_GRAY 1 /**< gray color space */
  40. #define FF_COLOR_YUV 2 /**< YUV color space. 16 <= Y <= 235, 16 <= U, V <= 240 */
  41. #define FF_COLOR_YUV_JPEG 3 /**< YUV color space. 0 <= Y <= 255, 0 <= U, V <= 255 */
  42. #define FF_PIXEL_PLANAR 0 /**< each channel has one component in AVPicture */
  43. #define FF_PIXEL_PACKED 1 /**< only one components containing all the channels */
  44. #define FF_PIXEL_PALETTE 2 /**< one components containing indexes for a palette */
  45. typedef struct PixFmtInfo {
  46. const char *name;
  47. uint8_t nb_channels; /**< number of channels (including alpha) */
  48. uint8_t color_type; /**< color type (see FF_COLOR_xxx constants) */
  49. uint8_t pixel_type; /**< pixel storage type (see FF_PIXEL_xxx constants) */
  50. uint8_t is_alpha : 1; /**< true if alpha can be specified */
  51. uint8_t x_chroma_shift; /**< X chroma subsampling factor is 2 ^ shift */
  52. uint8_t y_chroma_shift; /**< Y chroma subsampling factor is 2 ^ shift */
  53. uint8_t depth; /**< bit depth of the color components */
  54. } PixFmtInfo;
  55. /* this table gives more information about formats */
  56. static const PixFmtInfo pix_fmt_info[PIX_FMT_NB] = {
  57. /* YUV formats */
  58. [PIX_FMT_YUV420P] = {
  59. .name = "yuv420p",
  60. .nb_channels = 3,
  61. .color_type = FF_COLOR_YUV,
  62. .pixel_type = FF_PIXEL_PLANAR,
  63. .depth = 8,
  64. .x_chroma_shift = 1, .y_chroma_shift = 1,
  65. },
  66. [PIX_FMT_YUV422P] = {
  67. .name = "yuv422p",
  68. .nb_channels = 3,
  69. .color_type = FF_COLOR_YUV,
  70. .pixel_type = FF_PIXEL_PLANAR,
  71. .depth = 8,
  72. .x_chroma_shift = 1, .y_chroma_shift = 0,
  73. },
  74. [PIX_FMT_YUV444P] = {
  75. .name = "yuv444p",
  76. .nb_channels = 3,
  77. .color_type = FF_COLOR_YUV,
  78. .pixel_type = FF_PIXEL_PLANAR,
  79. .depth = 8,
  80. .x_chroma_shift = 0, .y_chroma_shift = 0,
  81. },
  82. [PIX_FMT_YUYV422] = {
  83. .name = "yuyv422",
  84. .nb_channels = 1,
  85. .color_type = FF_COLOR_YUV,
  86. .pixel_type = FF_PIXEL_PACKED,
  87. .depth = 8,
  88. .x_chroma_shift = 1, .y_chroma_shift = 0,
  89. },
  90. [PIX_FMT_UYVY422] = {
  91. .name = "uyvy422",
  92. .nb_channels = 1,
  93. .color_type = FF_COLOR_YUV,
  94. .pixel_type = FF_PIXEL_PACKED,
  95. .depth = 8,
  96. .x_chroma_shift = 1, .y_chroma_shift = 0,
  97. },
  98. [PIX_FMT_YUV410P] = {
  99. .name = "yuv410p",
  100. .nb_channels = 3,
  101. .color_type = FF_COLOR_YUV,
  102. .pixel_type = FF_PIXEL_PLANAR,
  103. .depth = 8,
  104. .x_chroma_shift = 2, .y_chroma_shift = 2,
  105. },
  106. [PIX_FMT_YUV411P] = {
  107. .name = "yuv411p",
  108. .nb_channels = 3,
  109. .color_type = FF_COLOR_YUV,
  110. .pixel_type = FF_PIXEL_PLANAR,
  111. .depth = 8,
  112. .x_chroma_shift = 2, .y_chroma_shift = 0,
  113. },
  114. [PIX_FMT_YUV440P] = {
  115. .name = "yuv440p",
  116. .nb_channels = 3,
  117. .color_type = FF_COLOR_YUV,
  118. .pixel_type = FF_PIXEL_PLANAR,
  119. .depth = 8,
  120. .x_chroma_shift = 0, .y_chroma_shift = 1,
  121. },
  122. /* JPEG YUV */
  123. [PIX_FMT_YUVJ420P] = {
  124. .name = "yuvj420p",
  125. .nb_channels = 3,
  126. .color_type = FF_COLOR_YUV_JPEG,
  127. .pixel_type = FF_PIXEL_PLANAR,
  128. .depth = 8,
  129. .x_chroma_shift = 1, .y_chroma_shift = 1,
  130. },
  131. [PIX_FMT_YUVJ422P] = {
  132. .name = "yuvj422p",
  133. .nb_channels = 3,
  134. .color_type = FF_COLOR_YUV_JPEG,
  135. .pixel_type = FF_PIXEL_PLANAR,
  136. .depth = 8,
  137. .x_chroma_shift = 1, .y_chroma_shift = 0,
  138. },
  139. [PIX_FMT_YUVJ444P] = {
  140. .name = "yuvj444p",
  141. .nb_channels = 3,
  142. .color_type = FF_COLOR_YUV_JPEG,
  143. .pixel_type = FF_PIXEL_PLANAR,
  144. .depth = 8,
  145. .x_chroma_shift = 0, .y_chroma_shift = 0,
  146. },
  147. [PIX_FMT_YUVJ440P] = {
  148. .name = "yuvj440p",
  149. .nb_channels = 3,
  150. .color_type = FF_COLOR_YUV_JPEG,
  151. .pixel_type = FF_PIXEL_PLANAR,
  152. .depth = 8,
  153. .x_chroma_shift = 0, .y_chroma_shift = 1,
  154. },
  155. /* RGB formats */
  156. [PIX_FMT_RGB24] = {
  157. .name = "rgb24",
  158. .nb_channels = 3,
  159. .color_type = FF_COLOR_RGB,
  160. .pixel_type = FF_PIXEL_PACKED,
  161. .depth = 8,
  162. .x_chroma_shift = 0, .y_chroma_shift = 0,
  163. },
  164. [PIX_FMT_BGR24] = {
  165. .name = "bgr24",
  166. .nb_channels = 3,
  167. .color_type = FF_COLOR_RGB,
  168. .pixel_type = FF_PIXEL_PACKED,
  169. .depth = 8,
  170. .x_chroma_shift = 0, .y_chroma_shift = 0,
  171. },
  172. [PIX_FMT_RGB32] = {
  173. .name = "rgb32",
  174. .nb_channels = 4, .is_alpha = 1,
  175. .color_type = FF_COLOR_RGB,
  176. .pixel_type = FF_PIXEL_PACKED,
  177. .depth = 8,
  178. .x_chroma_shift = 0, .y_chroma_shift = 0,
  179. },
  180. [PIX_FMT_RGB565] = {
  181. .name = "rgb565",
  182. .nb_channels = 3,
  183. .color_type = FF_COLOR_RGB,
  184. .pixel_type = FF_PIXEL_PACKED,
  185. .depth = 5,
  186. .x_chroma_shift = 0, .y_chroma_shift = 0,
  187. },
  188. [PIX_FMT_RGB555] = {
  189. .name = "rgb555",
  190. .nb_channels = 3,
  191. .color_type = FF_COLOR_RGB,
  192. .pixel_type = FF_PIXEL_PACKED,
  193. .depth = 5,
  194. .x_chroma_shift = 0, .y_chroma_shift = 0,
  195. },
  196. /* gray / mono formats */
  197. [PIX_FMT_GRAY16BE] = {
  198. .name = "gray16be",
  199. .nb_channels = 1,
  200. .color_type = FF_COLOR_GRAY,
  201. .pixel_type = FF_PIXEL_PLANAR,
  202. .depth = 16,
  203. },
  204. [PIX_FMT_GRAY16LE] = {
  205. .name = "gray16le",
  206. .nb_channels = 1,
  207. .color_type = FF_COLOR_GRAY,
  208. .pixel_type = FF_PIXEL_PLANAR,
  209. .depth = 16,
  210. },
  211. [PIX_FMT_GRAY8] = {
  212. .name = "gray",
  213. .nb_channels = 1,
  214. .color_type = FF_COLOR_GRAY,
  215. .pixel_type = FF_PIXEL_PLANAR,
  216. .depth = 8,
  217. },
  218. [PIX_FMT_MONOWHITE] = {
  219. .name = "monow",
  220. .nb_channels = 1,
  221. .color_type = FF_COLOR_GRAY,
  222. .pixel_type = FF_PIXEL_PLANAR,
  223. .depth = 1,
  224. },
  225. [PIX_FMT_MONOBLACK] = {
  226. .name = "monob",
  227. .nb_channels = 1,
  228. .color_type = FF_COLOR_GRAY,
  229. .pixel_type = FF_PIXEL_PLANAR,
  230. .depth = 1,
  231. },
  232. /* paletted formats */
  233. [PIX_FMT_PAL8] = {
  234. .name = "pal8",
  235. .nb_channels = 4, .is_alpha = 1,
  236. .color_type = FF_COLOR_RGB,
  237. .pixel_type = FF_PIXEL_PALETTE,
  238. .depth = 8,
  239. },
  240. [PIX_FMT_XVMC_MPEG2_MC] = {
  241. .name = "xvmcmc",
  242. },
  243. [PIX_FMT_XVMC_MPEG2_IDCT] = {
  244. .name = "xvmcidct",
  245. },
  246. [PIX_FMT_UYYVYY411] = {
  247. .name = "uyyvyy411",
  248. .nb_channels = 1,
  249. .color_type = FF_COLOR_YUV,
  250. .pixel_type = FF_PIXEL_PACKED,
  251. .depth = 8,
  252. .x_chroma_shift = 2, .y_chroma_shift = 0,
  253. },
  254. [PIX_FMT_BGR32] = {
  255. .name = "bgr32",
  256. .nb_channels = 4, .is_alpha = 1,
  257. .color_type = FF_COLOR_RGB,
  258. .pixel_type = FF_PIXEL_PACKED,
  259. .depth = 8,
  260. .x_chroma_shift = 0, .y_chroma_shift = 0,
  261. },
  262. [PIX_FMT_BGR565] = {
  263. .name = "bgr565",
  264. .nb_channels = 3,
  265. .color_type = FF_COLOR_RGB,
  266. .pixel_type = FF_PIXEL_PACKED,
  267. .depth = 5,
  268. .x_chroma_shift = 0, .y_chroma_shift = 0,
  269. },
  270. [PIX_FMT_BGR555] = {
  271. .name = "bgr555",
  272. .nb_channels = 3,
  273. .color_type = FF_COLOR_RGB,
  274. .pixel_type = FF_PIXEL_PACKED,
  275. .depth = 5,
  276. .x_chroma_shift = 0, .y_chroma_shift = 0,
  277. },
  278. [PIX_FMT_RGB8] = {
  279. .name = "rgb8",
  280. .nb_channels = 1,
  281. .color_type = FF_COLOR_RGB,
  282. .pixel_type = FF_PIXEL_PACKED,
  283. .depth = 8,
  284. .x_chroma_shift = 0, .y_chroma_shift = 0,
  285. },
  286. [PIX_FMT_RGB4] = {
  287. .name = "rgb4",
  288. .nb_channels = 1,
  289. .color_type = FF_COLOR_RGB,
  290. .pixel_type = FF_PIXEL_PACKED,
  291. .depth = 4,
  292. .x_chroma_shift = 0, .y_chroma_shift = 0,
  293. },
  294. [PIX_FMT_RGB4_BYTE] = {
  295. .name = "rgb4_byte",
  296. .nb_channels = 1,
  297. .color_type = FF_COLOR_RGB,
  298. .pixel_type = FF_PIXEL_PACKED,
  299. .depth = 8,
  300. .x_chroma_shift = 0, .y_chroma_shift = 0,
  301. },
  302. [PIX_FMT_BGR8] = {
  303. .name = "bgr8",
  304. .nb_channels = 1,
  305. .color_type = FF_COLOR_RGB,
  306. .pixel_type = FF_PIXEL_PACKED,
  307. .depth = 8,
  308. .x_chroma_shift = 0, .y_chroma_shift = 0,
  309. },
  310. [PIX_FMT_BGR4] = {
  311. .name = "bgr4",
  312. .nb_channels = 1,
  313. .color_type = FF_COLOR_RGB,
  314. .pixel_type = FF_PIXEL_PACKED,
  315. .depth = 4,
  316. .x_chroma_shift = 0, .y_chroma_shift = 0,
  317. },
  318. [PIX_FMT_BGR4_BYTE] = {
  319. .name = "bgr4_byte",
  320. .nb_channels = 1,
  321. .color_type = FF_COLOR_RGB,
  322. .pixel_type = FF_PIXEL_PACKED,
  323. .depth = 8,
  324. .x_chroma_shift = 0, .y_chroma_shift = 0,
  325. },
  326. [PIX_FMT_NV12] = {
  327. .name = "nv12",
  328. .nb_channels = 2,
  329. .color_type = FF_COLOR_YUV,
  330. .pixel_type = FF_PIXEL_PLANAR,
  331. .depth = 8,
  332. .x_chroma_shift = 1, .y_chroma_shift = 1,
  333. },
  334. [PIX_FMT_NV21] = {
  335. .name = "nv12",
  336. .nb_channels = 2,
  337. .color_type = FF_COLOR_YUV,
  338. .pixel_type = FF_PIXEL_PLANAR,
  339. .depth = 8,
  340. .x_chroma_shift = 1, .y_chroma_shift = 1,
  341. },
  342. [PIX_FMT_BGR32_1] = {
  343. .name = "bgr32_1",
  344. .nb_channels = 4, .is_alpha = 1,
  345. .color_type = FF_COLOR_RGB,
  346. .pixel_type = FF_PIXEL_PACKED,
  347. .depth = 8,
  348. .x_chroma_shift = 0, .y_chroma_shift = 0,
  349. },
  350. [PIX_FMT_RGB32_1] = {
  351. .name = "rgb32_1",
  352. .nb_channels = 4, .is_alpha = 1,
  353. .color_type = FF_COLOR_RGB,
  354. .pixel_type = FF_PIXEL_PACKED,
  355. .depth = 8,
  356. .x_chroma_shift = 0, .y_chroma_shift = 0,
  357. },
  358. };
  359. void avcodec_get_chroma_sub_sample(int pix_fmt, int *h_shift, int *v_shift)
  360. {
  361. *h_shift = pix_fmt_info[pix_fmt].x_chroma_shift;
  362. *v_shift = pix_fmt_info[pix_fmt].y_chroma_shift;
  363. }
  364. const char *avcodec_get_pix_fmt_name(int pix_fmt)
  365. {
  366. if (pix_fmt < 0 || pix_fmt >= PIX_FMT_NB)
  367. return "???";
  368. else
  369. return pix_fmt_info[pix_fmt].name;
  370. }
  371. enum PixelFormat avcodec_get_pix_fmt(const char* name)
  372. {
  373. int i;
  374. for (i=0; i < PIX_FMT_NB; i++)
  375. if (!strcmp(pix_fmt_info[i].name, name))
  376. break;
  377. return i;
  378. }
  379. void avcodec_pix_fmt_string (char *buf, int buf_size, int pix_fmt)
  380. {
  381. PixFmtInfo info= pix_fmt_info[pix_fmt];
  382. char is_alpha_char= info.is_alpha ? 'y' : 'n';
  383. /* print header */
  384. if (pix_fmt < 0)
  385. snprintf (buf, buf_size,
  386. "name " " nb_channels" " depth" " is_alpha"
  387. );
  388. else
  389. snprintf (buf, buf_size,
  390. "%-10s" " %1d " " %2d " " %c ",
  391. info.name,
  392. info.nb_channels,
  393. info.depth,
  394. is_alpha_char
  395. );
  396. }
  397. int avpicture_fill(AVPicture *picture, uint8_t *ptr,
  398. int pix_fmt, int width, int height)
  399. {
  400. int size, w2, h2, size2;
  401. const PixFmtInfo *pinfo;
  402. if(avcodec_check_dimensions(NULL, width, height))
  403. goto fail;
  404. pinfo = &pix_fmt_info[pix_fmt];
  405. size = width * height;
  406. switch(pix_fmt) {
  407. case PIX_FMT_YUV420P:
  408. case PIX_FMT_YUV422P:
  409. case PIX_FMT_YUV444P:
  410. case PIX_FMT_YUV410P:
  411. case PIX_FMT_YUV411P:
  412. case PIX_FMT_YUV440P:
  413. case PIX_FMT_YUVJ420P:
  414. case PIX_FMT_YUVJ422P:
  415. case PIX_FMT_YUVJ444P:
  416. case PIX_FMT_YUVJ440P:
  417. w2 = (width + (1 << pinfo->x_chroma_shift) - 1) >> pinfo->x_chroma_shift;
  418. h2 = (height + (1 << pinfo->y_chroma_shift) - 1) >> pinfo->y_chroma_shift;
  419. size2 = w2 * h2;
  420. picture->data[0] = ptr;
  421. picture->data[1] = picture->data[0] + size;
  422. picture->data[2] = picture->data[1] + size2;
  423. picture->linesize[0] = width;
  424. picture->linesize[1] = w2;
  425. picture->linesize[2] = w2;
  426. return size + 2 * size2;
  427. case PIX_FMT_NV12:
  428. case PIX_FMT_NV21:
  429. w2 = (width + (1 << pinfo->x_chroma_shift) - 1) >> pinfo->x_chroma_shift;
  430. h2 = (height + (1 << pinfo->y_chroma_shift) - 1) >> pinfo->y_chroma_shift;
  431. size2 = w2 * h2 * 2;
  432. picture->data[0] = ptr;
  433. picture->data[1] = picture->data[0] + size;
  434. picture->data[2] = NULL;
  435. picture->linesize[0] = width;
  436. picture->linesize[1] = w2;
  437. picture->linesize[2] = 0;
  438. return size + 2 * size2;
  439. case PIX_FMT_RGB24:
  440. case PIX_FMT_BGR24:
  441. picture->data[0] = ptr;
  442. picture->data[1] = NULL;
  443. picture->data[2] = NULL;
  444. picture->linesize[0] = width * 3;
  445. return size * 3;
  446. case PIX_FMT_RGB32:
  447. case PIX_FMT_BGR32:
  448. case PIX_FMT_RGB32_1:
  449. case PIX_FMT_BGR32_1:
  450. picture->data[0] = ptr;
  451. picture->data[1] = NULL;
  452. picture->data[2] = NULL;
  453. picture->linesize[0] = width * 4;
  454. return size * 4;
  455. case PIX_FMT_GRAY16BE:
  456. case PIX_FMT_GRAY16LE:
  457. case PIX_FMT_BGR555:
  458. case PIX_FMT_BGR565:
  459. case PIX_FMT_RGB555:
  460. case PIX_FMT_RGB565:
  461. case PIX_FMT_YUYV422:
  462. picture->data[0] = ptr;
  463. picture->data[1] = NULL;
  464. picture->data[2] = NULL;
  465. picture->linesize[0] = width * 2;
  466. return size * 2;
  467. case PIX_FMT_UYVY422:
  468. picture->data[0] = ptr;
  469. picture->data[1] = NULL;
  470. picture->data[2] = NULL;
  471. picture->linesize[0] = width * 2;
  472. return size * 2;
  473. case PIX_FMT_UYYVYY411:
  474. picture->data[0] = ptr;
  475. picture->data[1] = NULL;
  476. picture->data[2] = NULL;
  477. picture->linesize[0] = width + width/2;
  478. return size + size/2;
  479. case PIX_FMT_RGB8:
  480. case PIX_FMT_BGR8:
  481. case PIX_FMT_RGB4_BYTE:
  482. case PIX_FMT_BGR4_BYTE:
  483. case PIX_FMT_GRAY8:
  484. picture->data[0] = ptr;
  485. picture->data[1] = NULL;
  486. picture->data[2] = NULL;
  487. picture->linesize[0] = width;
  488. return size;
  489. case PIX_FMT_RGB4:
  490. case PIX_FMT_BGR4:
  491. picture->data[0] = ptr;
  492. picture->data[1] = NULL;
  493. picture->data[2] = NULL;
  494. picture->linesize[0] = width / 2;
  495. return size / 2;
  496. case PIX_FMT_MONOWHITE:
  497. case PIX_FMT_MONOBLACK:
  498. picture->data[0] = ptr;
  499. picture->data[1] = NULL;
  500. picture->data[2] = NULL;
  501. picture->linesize[0] = (width + 7) >> 3;
  502. return picture->linesize[0] * height;
  503. case PIX_FMT_PAL8:
  504. size2 = (size + 3) & ~3;
  505. picture->data[0] = ptr;
  506. picture->data[1] = ptr + size2; /* palette is stored here as 256 32 bit words */
  507. picture->data[2] = NULL;
  508. picture->linesize[0] = width;
  509. picture->linesize[1] = 4;
  510. return size2 + 256 * 4;
  511. default:
  512. fail:
  513. picture->data[0] = NULL;
  514. picture->data[1] = NULL;
  515. picture->data[2] = NULL;
  516. picture->data[3] = NULL;
  517. return -1;
  518. }
  519. }
  520. int avpicture_layout(const AVPicture* src, int pix_fmt, int width, int height,
  521. unsigned char *dest, int dest_size)
  522. {
  523. const PixFmtInfo* pf = &pix_fmt_info[pix_fmt];
  524. int i, j, w, h, data_planes;
  525. const unsigned char* s;
  526. int size = avpicture_get_size(pix_fmt, width, height);
  527. if (size > dest_size || size < 0)
  528. return -1;
  529. if (pf->pixel_type == FF_PIXEL_PACKED || pf->pixel_type == FF_PIXEL_PALETTE) {
  530. if (pix_fmt == PIX_FMT_YUYV422 ||
  531. pix_fmt == PIX_FMT_UYVY422 ||
  532. pix_fmt == PIX_FMT_BGR565 ||
  533. pix_fmt == PIX_FMT_BGR555 ||
  534. pix_fmt == PIX_FMT_RGB565 ||
  535. pix_fmt == PIX_FMT_RGB555)
  536. w = width * 2;
  537. else if (pix_fmt == PIX_FMT_UYYVYY411)
  538. w = width + width/2;
  539. else if (pix_fmt == PIX_FMT_PAL8)
  540. w = width;
  541. else
  542. w = width * (pf->depth * pf->nb_channels / 8);
  543. data_planes = 1;
  544. h = height;
  545. } else {
  546. data_planes = pf->nb_channels;
  547. w = (width*pf->depth + 7)/8;
  548. h = height;
  549. }
  550. for (i=0; i<data_planes; i++) {
  551. if (i == 1) {
  552. w = width >> pf->x_chroma_shift;
  553. h = height >> pf->y_chroma_shift;
  554. }
  555. s = src->data[i];
  556. for(j=0; j<h; j++) {
  557. memcpy(dest, s, w);
  558. dest += w;
  559. s += src->linesize[i];
  560. }
  561. }
  562. if (pf->pixel_type == FF_PIXEL_PALETTE)
  563. memcpy((unsigned char *)(((size_t)dest + 3) & ~3), src->data[1], 256 * 4);
  564. return size;
  565. }
  566. int avpicture_get_size(int pix_fmt, int width, int height)
  567. {
  568. AVPicture dummy_pict;
  569. return avpicture_fill(&dummy_pict, NULL, pix_fmt, width, height);
  570. }
  571. int avcodec_get_pix_fmt_loss(int dst_pix_fmt, int src_pix_fmt,
  572. int has_alpha)
  573. {
  574. const PixFmtInfo *pf, *ps;
  575. int loss;
  576. ps = &pix_fmt_info[src_pix_fmt];
  577. pf = &pix_fmt_info[dst_pix_fmt];
  578. /* compute loss */
  579. loss = 0;
  580. pf = &pix_fmt_info[dst_pix_fmt];
  581. if (pf->depth < ps->depth ||
  582. (dst_pix_fmt == PIX_FMT_RGB555 && src_pix_fmt == PIX_FMT_RGB565))
  583. loss |= FF_LOSS_DEPTH;
  584. if (pf->x_chroma_shift > ps->x_chroma_shift ||
  585. pf->y_chroma_shift > ps->y_chroma_shift)
  586. loss |= FF_LOSS_RESOLUTION;
  587. switch(pf->color_type) {
  588. case FF_COLOR_RGB:
  589. if (ps->color_type != FF_COLOR_RGB &&
  590. ps->color_type != FF_COLOR_GRAY)
  591. loss |= FF_LOSS_COLORSPACE;
  592. break;
  593. case FF_COLOR_GRAY:
  594. if (ps->color_type != FF_COLOR_GRAY)
  595. loss |= FF_LOSS_COLORSPACE;
  596. break;
  597. case FF_COLOR_YUV:
  598. if (ps->color_type != FF_COLOR_YUV)
  599. loss |= FF_LOSS_COLORSPACE;
  600. break;
  601. case FF_COLOR_YUV_JPEG:
  602. if (ps->color_type != FF_COLOR_YUV_JPEG &&
  603. ps->color_type != FF_COLOR_YUV &&
  604. ps->color_type != FF_COLOR_GRAY)
  605. loss |= FF_LOSS_COLORSPACE;
  606. break;
  607. default:
  608. /* fail safe test */
  609. if (ps->color_type != pf->color_type)
  610. loss |= FF_LOSS_COLORSPACE;
  611. break;
  612. }
  613. if (pf->color_type == FF_COLOR_GRAY &&
  614. ps->color_type != FF_COLOR_GRAY)
  615. loss |= FF_LOSS_CHROMA;
  616. if (!pf->is_alpha && (ps->is_alpha && has_alpha))
  617. loss |= FF_LOSS_ALPHA;
  618. if (pf->pixel_type == FF_PIXEL_PALETTE &&
  619. (ps->pixel_type != FF_PIXEL_PALETTE && ps->color_type != FF_COLOR_GRAY))
  620. loss |= FF_LOSS_COLORQUANT;
  621. return loss;
  622. }
  623. static int avg_bits_per_pixel(int pix_fmt)
  624. {
  625. int bits;
  626. const PixFmtInfo *pf;
  627. pf = &pix_fmt_info[pix_fmt];
  628. switch(pf->pixel_type) {
  629. case FF_PIXEL_PACKED:
  630. switch(pix_fmt) {
  631. case PIX_FMT_YUYV422:
  632. case PIX_FMT_UYVY422:
  633. case PIX_FMT_RGB565:
  634. case PIX_FMT_RGB555:
  635. case PIX_FMT_BGR565:
  636. case PIX_FMT_BGR555:
  637. bits = 16;
  638. break;
  639. case PIX_FMT_UYYVYY411:
  640. bits = 12;
  641. break;
  642. default:
  643. bits = pf->depth * pf->nb_channels;
  644. break;
  645. }
  646. break;
  647. case FF_PIXEL_PLANAR:
  648. if (pf->x_chroma_shift == 0 && pf->y_chroma_shift == 0) {
  649. bits = pf->depth * pf->nb_channels;
  650. } else {
  651. bits = pf->depth + ((2 * pf->depth) >>
  652. (pf->x_chroma_shift + pf->y_chroma_shift));
  653. }
  654. break;
  655. case FF_PIXEL_PALETTE:
  656. bits = 8;
  657. break;
  658. default:
  659. bits = -1;
  660. break;
  661. }
  662. return bits;
  663. }
  664. static int avcodec_find_best_pix_fmt1(int pix_fmt_mask,
  665. int src_pix_fmt,
  666. int has_alpha,
  667. int loss_mask)
  668. {
  669. int dist, i, loss, min_dist, dst_pix_fmt;
  670. /* find exact color match with smallest size */
  671. dst_pix_fmt = -1;
  672. min_dist = 0x7fffffff;
  673. for(i = 0;i < PIX_FMT_NB; i++) {
  674. if (pix_fmt_mask & (1 << i)) {
  675. loss = avcodec_get_pix_fmt_loss(i, src_pix_fmt, has_alpha) & loss_mask;
  676. if (loss == 0) {
  677. dist = avg_bits_per_pixel(i);
  678. if (dist < min_dist) {
  679. min_dist = dist;
  680. dst_pix_fmt = i;
  681. }
  682. }
  683. }
  684. }
  685. return dst_pix_fmt;
  686. }
  687. int avcodec_find_best_pix_fmt(int pix_fmt_mask, int src_pix_fmt,
  688. int has_alpha, int *loss_ptr)
  689. {
  690. int dst_pix_fmt, loss_mask, i;
  691. static const int loss_mask_order[] = {
  692. ~0, /* no loss first */
  693. ~FF_LOSS_ALPHA,
  694. ~FF_LOSS_RESOLUTION,
  695. ~(FF_LOSS_COLORSPACE | FF_LOSS_RESOLUTION),
  696. ~FF_LOSS_COLORQUANT,
  697. ~FF_LOSS_DEPTH,
  698. 0,
  699. };
  700. /* try with successive loss */
  701. i = 0;
  702. for(;;) {
  703. loss_mask = loss_mask_order[i++];
  704. dst_pix_fmt = avcodec_find_best_pix_fmt1(pix_fmt_mask, src_pix_fmt,
  705. has_alpha, loss_mask);
  706. if (dst_pix_fmt >= 0)
  707. goto found;
  708. if (loss_mask == 0)
  709. break;
  710. }
  711. return -1;
  712. found:
  713. if (loss_ptr)
  714. *loss_ptr = avcodec_get_pix_fmt_loss(dst_pix_fmt, src_pix_fmt, has_alpha);
  715. return dst_pix_fmt;
  716. }
  717. void ff_img_copy_plane(uint8_t *dst, int dst_wrap,
  718. const uint8_t *src, int src_wrap,
  719. int width, int height)
  720. {
  721. if((!dst) || (!src))
  722. return;
  723. for(;height > 0; height--) {
  724. memcpy(dst, src, width);
  725. dst += dst_wrap;
  726. src += src_wrap;
  727. }
  728. }
  729. void av_picture_copy(AVPicture *dst, const AVPicture *src,
  730. int pix_fmt, int width, int height)
  731. {
  732. int bwidth, bits, i;
  733. const PixFmtInfo *pf = &pix_fmt_info[pix_fmt];
  734. pf = &pix_fmt_info[pix_fmt];
  735. switch(pf->pixel_type) {
  736. case FF_PIXEL_PACKED:
  737. switch(pix_fmt) {
  738. case PIX_FMT_YUYV422:
  739. case PIX_FMT_UYVY422:
  740. case PIX_FMT_RGB565:
  741. case PIX_FMT_RGB555:
  742. case PIX_FMT_BGR565:
  743. case PIX_FMT_BGR555:
  744. bits = 16;
  745. break;
  746. case PIX_FMT_UYYVYY411:
  747. bits = 12;
  748. break;
  749. default:
  750. bits = pf->depth * pf->nb_channels;
  751. break;
  752. }
  753. bwidth = (width * bits + 7) >> 3;
  754. ff_img_copy_plane(dst->data[0], dst->linesize[0],
  755. src->data[0], src->linesize[0],
  756. bwidth, height);
  757. break;
  758. case FF_PIXEL_PLANAR:
  759. for(i = 0; i < pf->nb_channels; i++) {
  760. int w, h;
  761. w = width;
  762. h = height;
  763. if (i == 1 || i == 2) {
  764. w >>= pf->x_chroma_shift;
  765. h >>= pf->y_chroma_shift;
  766. }
  767. bwidth = (w * pf->depth + 7) >> 3;
  768. ff_img_copy_plane(dst->data[i], dst->linesize[i],
  769. src->data[i], src->linesize[i],
  770. bwidth, h);
  771. }
  772. break;
  773. case FF_PIXEL_PALETTE:
  774. ff_img_copy_plane(dst->data[0], dst->linesize[0],
  775. src->data[0], src->linesize[0],
  776. width, height);
  777. /* copy the palette */
  778. ff_img_copy_plane(dst->data[1], dst->linesize[1],
  779. src->data[1], src->linesize[1],
  780. 4, 256);
  781. break;
  782. }
  783. }
  784. /* XXX: totally non optimized */
  785. static void yuyv422_to_yuv420p(AVPicture *dst, const AVPicture *src,
  786. int width, int height)
  787. {
  788. const uint8_t *p, *p1;
  789. uint8_t *lum, *cr, *cb, *lum1, *cr1, *cb1;
  790. int w;
  791. p1 = src->data[0];
  792. lum1 = dst->data[0];
  793. cb1 = dst->data[1];
  794. cr1 = dst->data[2];
  795. for(;height >= 1; height -= 2) {
  796. p = p1;
  797. lum = lum1;
  798. cb = cb1;
  799. cr = cr1;
  800. for(w = width; w >= 2; w -= 2) {
  801. lum[0] = p[0];
  802. cb[0] = p[1];
  803. lum[1] = p[2];
  804. cr[0] = p[3];
  805. p += 4;
  806. lum += 2;
  807. cb++;
  808. cr++;
  809. }
  810. if (w) {
  811. lum[0] = p[0];
  812. cb[0] = p[1];
  813. cr[0] = p[3];
  814. cb++;
  815. cr++;
  816. }
  817. p1 += src->linesize[0];
  818. lum1 += dst->linesize[0];
  819. if (height>1) {
  820. p = p1;
  821. lum = lum1;
  822. for(w = width; w >= 2; w -= 2) {
  823. lum[0] = p[0];
  824. lum[1] = p[2];
  825. p += 4;
  826. lum += 2;
  827. }
  828. if (w) {
  829. lum[0] = p[0];
  830. }
  831. p1 += src->linesize[0];
  832. lum1 += dst->linesize[0];
  833. }
  834. cb1 += dst->linesize[1];
  835. cr1 += dst->linesize[2];
  836. }
  837. }
  838. static void uyvy422_to_yuv420p(AVPicture *dst, const AVPicture *src,
  839. int width, int height)
  840. {
  841. const uint8_t *p, *p1;
  842. uint8_t *lum, *cr, *cb, *lum1, *cr1, *cb1;
  843. int w;
  844. p1 = src->data[0];
  845. lum1 = dst->data[0];
  846. cb1 = dst->data[1];
  847. cr1 = dst->data[2];
  848. for(;height >= 1; height -= 2) {
  849. p = p1;
  850. lum = lum1;
  851. cb = cb1;
  852. cr = cr1;
  853. for(w = width; w >= 2; w -= 2) {
  854. lum[0] = p[1];
  855. cb[0] = p[0];
  856. lum[1] = p[3];
  857. cr[0] = p[2];
  858. p += 4;
  859. lum += 2;
  860. cb++;
  861. cr++;
  862. }
  863. if (w) {
  864. lum[0] = p[1];
  865. cb[0] = p[0];
  866. cr[0] = p[2];
  867. cb++;
  868. cr++;
  869. }
  870. p1 += src->linesize[0];
  871. lum1 += dst->linesize[0];
  872. if (height>1) {
  873. p = p1;
  874. lum = lum1;
  875. for(w = width; w >= 2; w -= 2) {
  876. lum[0] = p[1];
  877. lum[1] = p[3];
  878. p += 4;
  879. lum += 2;
  880. }
  881. if (w) {
  882. lum[0] = p[1];
  883. }
  884. p1 += src->linesize[0];
  885. lum1 += dst->linesize[0];
  886. }
  887. cb1 += dst->linesize[1];
  888. cr1 += dst->linesize[2];
  889. }
  890. }
  891. static void uyvy422_to_yuv422p(AVPicture *dst, const AVPicture *src,
  892. int width, int height)
  893. {
  894. const uint8_t *p, *p1;
  895. uint8_t *lum, *cr, *cb, *lum1, *cr1, *cb1;
  896. int w;
  897. p1 = src->data[0];
  898. lum1 = dst->data[0];
  899. cb1 = dst->data[1];
  900. cr1 = dst->data[2];
  901. for(;height > 0; height--) {
  902. p = p1;
  903. lum = lum1;
  904. cb = cb1;
  905. cr = cr1;
  906. for(w = width; w >= 2; w -= 2) {
  907. lum[0] = p[1];
  908. cb[0] = p[0];
  909. lum[1] = p[3];
  910. cr[0] = p[2];
  911. p += 4;
  912. lum += 2;
  913. cb++;
  914. cr++;
  915. }
  916. p1 += src->linesize[0];
  917. lum1 += dst->linesize[0];
  918. cb1 += dst->linesize[1];
  919. cr1 += dst->linesize[2];
  920. }
  921. }
  922. static void yuyv422_to_yuv422p(AVPicture *dst, const AVPicture *src,
  923. int width, int height)
  924. {
  925. const uint8_t *p, *p1;
  926. uint8_t *lum, *cr, *cb, *lum1, *cr1, *cb1;
  927. int w;
  928. p1 = src->data[0];
  929. lum1 = dst->data[0];
  930. cb1 = dst->data[1];
  931. cr1 = dst->data[2];
  932. for(;height > 0; height--) {
  933. p = p1;
  934. lum = lum1;
  935. cb = cb1;
  936. cr = cr1;
  937. for(w = width; w >= 2; w -= 2) {
  938. lum[0] = p[0];
  939. cb[0] = p[1];
  940. lum[1] = p[2];
  941. cr[0] = p[3];
  942. p += 4;
  943. lum += 2;
  944. cb++;
  945. cr++;
  946. }
  947. p1 += src->linesize[0];
  948. lum1 += dst->linesize[0];
  949. cb1 += dst->linesize[1];
  950. cr1 += dst->linesize[2];
  951. }
  952. }
  953. static void yuv422p_to_yuyv422(AVPicture *dst, const AVPicture *src,
  954. int width, int height)
  955. {
  956. uint8_t *p, *p1;
  957. const uint8_t *lum, *cr, *cb, *lum1, *cr1, *cb1;
  958. int w;
  959. p1 = dst->data[0];
  960. lum1 = src->data[0];
  961. cb1 = src->data[1];
  962. cr1 = src->data[2];
  963. for(;height > 0; height--) {
  964. p = p1;
  965. lum = lum1;
  966. cb = cb1;
  967. cr = cr1;
  968. for(w = width; w >= 2; w -= 2) {
  969. p[0] = lum[0];
  970. p[1] = cb[0];
  971. p[2] = lum[1];
  972. p[3] = cr[0];
  973. p += 4;
  974. lum += 2;
  975. cb++;
  976. cr++;
  977. }
  978. p1 += dst->linesize[0];
  979. lum1 += src->linesize[0];
  980. cb1 += src->linesize[1];
  981. cr1 += src->linesize[2];
  982. }
  983. }
  984. static void yuv422p_to_uyvy422(AVPicture *dst, const AVPicture *src,
  985. int width, int height)
  986. {
  987. uint8_t *p, *p1;
  988. const uint8_t *lum, *cr, *cb, *lum1, *cr1, *cb1;
  989. int w;
  990. p1 = dst->data[0];
  991. lum1 = src->data[0];
  992. cb1 = src->data[1];
  993. cr1 = src->data[2];
  994. for(;height > 0; height--) {
  995. p = p1;
  996. lum = lum1;
  997. cb = cb1;
  998. cr = cr1;
  999. for(w = width; w >= 2; w -= 2) {
  1000. p[1] = lum[0];
  1001. p[0] = cb[0];
  1002. p[3] = lum[1];
  1003. p[2] = cr[0];
  1004. p += 4;
  1005. lum += 2;
  1006. cb++;
  1007. cr++;
  1008. }
  1009. p1 += dst->linesize[0];
  1010. lum1 += src->linesize[0];
  1011. cb1 += src->linesize[1];
  1012. cr1 += src->linesize[2];
  1013. }
  1014. }
  1015. static void uyyvyy411_to_yuv411p(AVPicture *dst, const AVPicture *src,
  1016. int width, int height)
  1017. {
  1018. const uint8_t *p, *p1;
  1019. uint8_t *lum, *cr, *cb, *lum1, *cr1, *cb1;
  1020. int w;
  1021. p1 = src->data[0];
  1022. lum1 = dst->data[0];
  1023. cb1 = dst->data[1];
  1024. cr1 = dst->data[2];
  1025. for(;height > 0; height--) {
  1026. p = p1;
  1027. lum = lum1;
  1028. cb = cb1;
  1029. cr = cr1;
  1030. for(w = width; w >= 4; w -= 4) {
  1031. cb[0] = p[0];
  1032. lum[0] = p[1];
  1033. lum[1] = p[2];
  1034. cr[0] = p[3];
  1035. lum[2] = p[4];
  1036. lum[3] = p[5];
  1037. p += 6;
  1038. lum += 4;
  1039. cb++;
  1040. cr++;
  1041. }
  1042. p1 += src->linesize[0];
  1043. lum1 += dst->linesize[0];
  1044. cb1 += dst->linesize[1];
  1045. cr1 += dst->linesize[2];
  1046. }
  1047. }
  1048. static void yuv420p_to_yuyv422(AVPicture *dst, const AVPicture *src,
  1049. int width, int height)
  1050. {
  1051. int w, h;
  1052. uint8_t *line1, *line2, *linesrc = dst->data[0];
  1053. uint8_t *lum1, *lum2, *lumsrc = src->data[0];
  1054. uint8_t *cb1, *cb2 = src->data[1];
  1055. uint8_t *cr1, *cr2 = src->data[2];
  1056. for(h = height / 2; h--;) {
  1057. line1 = linesrc;
  1058. line2 = linesrc + dst->linesize[0];
  1059. lum1 = lumsrc;
  1060. lum2 = lumsrc + src->linesize[0];
  1061. cb1 = cb2;
  1062. cr1 = cr2;
  1063. for(w = width / 2; w--;) {
  1064. *line1++ = *lum1++; *line2++ = *lum2++;
  1065. *line1++ = *line2++ = *cb1++;
  1066. *line1++ = *lum1++; *line2++ = *lum2++;
  1067. *line1++ = *line2++ = *cr1++;
  1068. }
  1069. linesrc += dst->linesize[0] * 2;
  1070. lumsrc += src->linesize[0] * 2;
  1071. cb2 += src->linesize[1];
  1072. cr2 += src->linesize[2];
  1073. }
  1074. }
  1075. static void yuv420p_to_uyvy422(AVPicture *dst, const AVPicture *src,
  1076. int width, int height)
  1077. {
  1078. int w, h;
  1079. uint8_t *line1, *line2, *linesrc = dst->data[0];
  1080. uint8_t *lum1, *lum2, *lumsrc = src->data[0];
  1081. uint8_t *cb1, *cb2 = src->data[1];
  1082. uint8_t *cr1, *cr2 = src->data[2];
  1083. for(h = height / 2; h--;) {
  1084. line1 = linesrc;
  1085. line2 = linesrc + dst->linesize[0];
  1086. lum1 = lumsrc;
  1087. lum2 = lumsrc + src->linesize[0];
  1088. cb1 = cb2;
  1089. cr1 = cr2;
  1090. for(w = width / 2; w--;) {
  1091. *line1++ = *line2++ = *cb1++;
  1092. *line1++ = *lum1++; *line2++ = *lum2++;
  1093. *line1++ = *line2++ = *cr1++;
  1094. *line1++ = *lum1++; *line2++ = *lum2++;
  1095. }
  1096. linesrc += dst->linesize[0] * 2;
  1097. lumsrc += src->linesize[0] * 2;
  1098. cb2 += src->linesize[1];
  1099. cr2 += src->linesize[2];
  1100. }
  1101. }
  1102. static uint8_t y_ccir_to_jpeg[256];
  1103. static uint8_t y_jpeg_to_ccir[256];
  1104. static uint8_t c_ccir_to_jpeg[256];
  1105. static uint8_t c_jpeg_to_ccir[256];
  1106. /* init various conversion tables */
  1107. static void img_convert_init(void)
  1108. {
  1109. int i;
  1110. uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
  1111. for(i = 0;i < 256; i++) {
  1112. y_ccir_to_jpeg[i] = Y_CCIR_TO_JPEG(i);
  1113. y_jpeg_to_ccir[i] = Y_JPEG_TO_CCIR(i);
  1114. c_ccir_to_jpeg[i] = C_CCIR_TO_JPEG(i);
  1115. c_jpeg_to_ccir[i] = C_JPEG_TO_CCIR(i);
  1116. }
  1117. }
  1118. /* apply to each pixel the given table */
  1119. static void img_apply_table(uint8_t *dst, int dst_wrap,
  1120. const uint8_t *src, int src_wrap,
  1121. int width, int height, const uint8_t *table1)
  1122. {
  1123. int n;
  1124. const uint8_t *s;
  1125. uint8_t *d;
  1126. const uint8_t *table;
  1127. table = table1;
  1128. for(;height > 0; height--) {
  1129. s = src;
  1130. d = dst;
  1131. n = width;
  1132. while (n >= 4) {
  1133. d[0] = table[s[0]];
  1134. d[1] = table[s[1]];
  1135. d[2] = table[s[2]];
  1136. d[3] = table[s[3]];
  1137. d += 4;
  1138. s += 4;
  1139. n -= 4;
  1140. }
  1141. while (n > 0) {
  1142. d[0] = table[s[0]];
  1143. d++;
  1144. s++;
  1145. n--;
  1146. }
  1147. dst += dst_wrap;
  1148. src += src_wrap;
  1149. }
  1150. }
  1151. /* XXX: use generic filter ? */
  1152. /* XXX: in most cases, the sampling position is incorrect */
  1153. /* 4x1 -> 1x1 */
  1154. static void shrink41(uint8_t *dst, int dst_wrap,
  1155. const uint8_t *src, int src_wrap,
  1156. int width, int height)
  1157. {
  1158. int w;
  1159. const uint8_t *s;
  1160. uint8_t *d;
  1161. for(;height > 0; height--) {
  1162. s = src;
  1163. d = dst;
  1164. for(w = width;w > 0; w--) {
  1165. d[0] = (s[0] + s[1] + s[2] + s[3] + 2) >> 2;
  1166. s += 4;
  1167. d++;
  1168. }
  1169. src += src_wrap;
  1170. dst += dst_wrap;
  1171. }
  1172. }
  1173. /* 2x1 -> 1x1 */
  1174. static void shrink21(uint8_t *dst, int dst_wrap,
  1175. const uint8_t *src, int src_wrap,
  1176. int width, int height)
  1177. {
  1178. int w;
  1179. const uint8_t *s;
  1180. uint8_t *d;
  1181. for(;height > 0; height--) {
  1182. s = src;
  1183. d = dst;
  1184. for(w = width;w > 0; w--) {
  1185. d[0] = (s[0] + s[1]) >> 1;
  1186. s += 2;
  1187. d++;
  1188. }
  1189. src += src_wrap;
  1190. dst += dst_wrap;
  1191. }
  1192. }
  1193. /* 1x2 -> 1x1 */
  1194. static void shrink12(uint8_t *dst, int dst_wrap,
  1195. const uint8_t *src, int src_wrap,
  1196. int width, int height)
  1197. {
  1198. int w;
  1199. uint8_t *d;
  1200. const uint8_t *s1, *s2;
  1201. for(;height > 0; height--) {
  1202. s1 = src;
  1203. s2 = s1 + src_wrap;
  1204. d = dst;
  1205. for(w = width;w >= 4; w-=4) {
  1206. d[0] = (s1[0] + s2[0]) >> 1;
  1207. d[1] = (s1[1] + s2[1]) >> 1;
  1208. d[2] = (s1[2] + s2[2]) >> 1;
  1209. d[3] = (s1[3] + s2[3]) >> 1;
  1210. s1 += 4;
  1211. s2 += 4;
  1212. d += 4;
  1213. }
  1214. for(;w > 0; w--) {
  1215. d[0] = (s1[0] + s2[0]) >> 1;
  1216. s1++;
  1217. s2++;
  1218. d++;
  1219. }
  1220. src += 2 * src_wrap;
  1221. dst += dst_wrap;
  1222. }
  1223. }
  1224. /* 2x2 -> 1x1 */
  1225. void ff_shrink22(uint8_t *dst, int dst_wrap,
  1226. const uint8_t *src, int src_wrap,
  1227. int width, int height)
  1228. {
  1229. int w;
  1230. const uint8_t *s1, *s2;
  1231. uint8_t *d;
  1232. for(;height > 0; height--) {
  1233. s1 = src;
  1234. s2 = s1 + src_wrap;
  1235. d = dst;
  1236. for(w = width;w >= 4; w-=4) {
  1237. d[0] = (s1[0] + s1[1] + s2[0] + s2[1] + 2) >> 2;
  1238. d[1] = (s1[2] + s1[3] + s2[2] + s2[3] + 2) >> 2;
  1239. d[2] = (s1[4] + s1[5] + s2[4] + s2[5] + 2) >> 2;
  1240. d[3] = (s1[6] + s1[7] + s2[6] + s2[7] + 2) >> 2;
  1241. s1 += 8;
  1242. s2 += 8;
  1243. d += 4;
  1244. }
  1245. for(;w > 0; w--) {
  1246. d[0] = (s1[0] + s1[1] + s2[0] + s2[1] + 2) >> 2;
  1247. s1 += 2;
  1248. s2 += 2;
  1249. d++;
  1250. }
  1251. src += 2 * src_wrap;
  1252. dst += dst_wrap;
  1253. }
  1254. }
  1255. /* 4x4 -> 1x1 */
  1256. void ff_shrink44(uint8_t *dst, int dst_wrap,
  1257. const uint8_t *src, int src_wrap,
  1258. int width, int height)
  1259. {
  1260. int w;
  1261. const uint8_t *s1, *s2, *s3, *s4;
  1262. uint8_t *d;
  1263. for(;height > 0; height--) {
  1264. s1 = src;
  1265. s2 = s1 + src_wrap;
  1266. s3 = s2 + src_wrap;
  1267. s4 = s3 + src_wrap;
  1268. d = dst;
  1269. for(w = width;w > 0; w--) {
  1270. d[0] = (s1[0] + s1[1] + s1[2] + s1[3] +
  1271. s2[0] + s2[1] + s2[2] + s2[3] +
  1272. s3[0] + s3[1] + s3[2] + s3[3] +
  1273. s4[0] + s4[1] + s4[2] + s4[3] + 8) >> 4;
  1274. s1 += 4;
  1275. s2 += 4;
  1276. s3 += 4;
  1277. s4 += 4;
  1278. d++;
  1279. }
  1280. src += 4 * src_wrap;
  1281. dst += dst_wrap;
  1282. }
  1283. }
  1284. /* 8x8 -> 1x1 */
  1285. void ff_shrink88(uint8_t *dst, int dst_wrap,
  1286. const uint8_t *src, int src_wrap,
  1287. int width, int height)
  1288. {
  1289. int w, i;
  1290. for(;height > 0; height--) {
  1291. for(w = width;w > 0; w--) {
  1292. int tmp=0;
  1293. for(i=0; i<8; i++){
  1294. tmp += src[0] + src[1] + src[2] + src[3] + src[4] + src[5] + src[6] + src[7];
  1295. src += src_wrap;
  1296. }
  1297. *(dst++) = (tmp + 32)>>6;
  1298. src += 8 - 8*src_wrap;
  1299. }
  1300. src += 8*src_wrap - 8*width;
  1301. dst += dst_wrap - width;
  1302. }
  1303. }
  1304. static void grow21_line(uint8_t *dst, const uint8_t *src,
  1305. int width)
  1306. {
  1307. int w;
  1308. const uint8_t *s1;
  1309. uint8_t *d;
  1310. s1 = src;
  1311. d = dst;
  1312. for(w = width;w >= 4; w-=4) {
  1313. d[1] = d[0] = s1[0];
  1314. d[3] = d[2] = s1[1];
  1315. s1 += 2;
  1316. d += 4;
  1317. }
  1318. for(;w >= 2; w -= 2) {
  1319. d[1] = d[0] = s1[0];
  1320. s1 ++;
  1321. d += 2;
  1322. }
  1323. /* only needed if width is not a multiple of two */
  1324. /* XXX: veryfy that */
  1325. if (w) {
  1326. d[0] = s1[0];
  1327. }
  1328. }
  1329. static void grow41_line(uint8_t *dst, const uint8_t *src,
  1330. int width)
  1331. {
  1332. int w, v;
  1333. const uint8_t *s1;
  1334. uint8_t *d;
  1335. s1 = src;
  1336. d = dst;
  1337. for(w = width;w >= 4; w-=4) {
  1338. v = s1[0];
  1339. d[0] = v;
  1340. d[1] = v;
  1341. d[2] = v;
  1342. d[3] = v;
  1343. s1 ++;
  1344. d += 4;
  1345. }
  1346. }
  1347. /* 1x1 -> 2x1 */
  1348. static void grow21(uint8_t *dst, int dst_wrap,
  1349. const uint8_t *src, int src_wrap,
  1350. int width, int height)
  1351. {
  1352. for(;height > 0; height--) {
  1353. grow21_line(dst, src, width);
  1354. src += src_wrap;
  1355. dst += dst_wrap;
  1356. }
  1357. }
  1358. /* 1x1 -> 1x2 */
  1359. static void grow12(uint8_t *dst, int dst_wrap,
  1360. const uint8_t *src, int src_wrap,
  1361. int width, int height)
  1362. {
  1363. for(;height > 0; height-=2) {
  1364. memcpy(dst, src, width);
  1365. dst += dst_wrap;
  1366. memcpy(dst, src, width);
  1367. dst += dst_wrap;
  1368. src += src_wrap;
  1369. }
  1370. }
  1371. /* 1x1 -> 2x2 */
  1372. static void grow22(uint8_t *dst, int dst_wrap,
  1373. const uint8_t *src, int src_wrap,
  1374. int width, int height)
  1375. {
  1376. for(;height > 0; height--) {
  1377. grow21_line(dst, src, width);
  1378. if (height%2)
  1379. src += src_wrap;
  1380. dst += dst_wrap;
  1381. }
  1382. }
  1383. /* 1x1 -> 4x1 */
  1384. static void grow41(uint8_t *dst, int dst_wrap,
  1385. const uint8_t *src, int src_wrap,
  1386. int width, int height)
  1387. {
  1388. for(;height > 0; height--) {
  1389. grow41_line(dst, src, width);
  1390. src += src_wrap;
  1391. dst += dst_wrap;
  1392. }
  1393. }
  1394. /* 1x1 -> 4x4 */
  1395. static void grow44(uint8_t *dst, int dst_wrap,
  1396. const uint8_t *src, int src_wrap,
  1397. int width, int height)
  1398. {
  1399. for(;height > 0; height--) {
  1400. grow41_line(dst, src, width);
  1401. if ((height & 3) == 1)
  1402. src += src_wrap;
  1403. dst += dst_wrap;
  1404. }
  1405. }
  1406. /* 1x2 -> 2x1 */
  1407. static void conv411(uint8_t *dst, int dst_wrap,
  1408. const uint8_t *src, int src_wrap,
  1409. int width, int height)
  1410. {
  1411. int w, c;
  1412. const uint8_t *s1, *s2;
  1413. uint8_t *d;
  1414. width>>=1;
  1415. for(;height > 0; height--) {
  1416. s1 = src;
  1417. s2 = src + src_wrap;
  1418. d = dst;
  1419. for(w = width;w > 0; w--) {
  1420. c = (s1[0] + s2[0]) >> 1;
  1421. d[0] = c;
  1422. d[1] = c;
  1423. s1++;
  1424. s2++;
  1425. d += 2;
  1426. }
  1427. src += src_wrap * 2;
  1428. dst += dst_wrap;
  1429. }
  1430. }
  1431. /* XXX: add jpeg quantize code */
  1432. #define TRANSP_INDEX (6*6*6)
  1433. /* this is maybe slow, but allows for extensions */
  1434. static inline unsigned char gif_clut_index(uint8_t r, uint8_t g, uint8_t b)
  1435. {
  1436. return ((((r)/47)%6)*6*6+(((g)/47)%6)*6+(((b)/47)%6));
  1437. }
  1438. static void build_rgb_palette(uint8_t *palette, int has_alpha)
  1439. {
  1440. uint32_t *pal;
  1441. static const uint8_t pal_value[6] = { 0x00, 0x33, 0x66, 0x99, 0xcc, 0xff };
  1442. int i, r, g, b;
  1443. pal = (uint32_t *)palette;
  1444. i = 0;
  1445. for(r = 0; r < 6; r++) {
  1446. for(g = 0; g < 6; g++) {
  1447. for(b = 0; b < 6; b++) {
  1448. pal[i++] = (0xff << 24) | (pal_value[r] << 16) |
  1449. (pal_value[g] << 8) | pal_value[b];
  1450. }
  1451. }
  1452. }
  1453. if (has_alpha)
  1454. pal[i++] = 0;
  1455. while (i < 256)
  1456. pal[i++] = 0xff000000;
  1457. }
  1458. /* copy bit n to bits 0 ... n - 1 */
  1459. static inline unsigned int bitcopy_n(unsigned int a, int n)
  1460. {
  1461. int mask;
  1462. mask = (1 << n) - 1;
  1463. return (a & (0xff & ~mask)) | ((-((a >> n) & 1)) & mask);
  1464. }
  1465. /* rgb555 handling */
  1466. #define RGB_NAME rgb555
  1467. #define RGB_IN(r, g, b, s)\
  1468. {\
  1469. unsigned int v = ((const uint16_t *)(s))[0];\
  1470. r = bitcopy_n(v >> (10 - 3), 3);\
  1471. g = bitcopy_n(v >> (5 - 3), 3);\
  1472. b = bitcopy_n(v << 3, 3);\
  1473. }
  1474. #define RGB_OUT(d, r, g, b)\
  1475. {\
  1476. ((uint16_t *)(d))[0] = ((r >> 3) << 10) | ((g >> 3) << 5) | (b >> 3);\
  1477. }
  1478. #define BPP 2
  1479. #include "imgconvert_template.h"
  1480. /* rgb565 handling */
  1481. #define RGB_NAME rgb565
  1482. #define RGB_IN(r, g, b, s)\
  1483. {\
  1484. unsigned int v = ((const uint16_t *)(s))[0];\
  1485. r = bitcopy_n(v >> (11 - 3), 3);\
  1486. g = bitcopy_n(v >> (5 - 2), 2);\
  1487. b = bitcopy_n(v << 3, 3);\
  1488. }
  1489. #define RGB_OUT(d, r, g, b)\
  1490. {\
  1491. ((uint16_t *)(d))[0] = ((r >> 3) << 11) | ((g >> 2) << 5) | (b >> 3);\
  1492. }
  1493. #define BPP 2
  1494. #include "imgconvert_template.h"
  1495. /* bgr24 handling */
  1496. #define RGB_NAME bgr24
  1497. #define RGB_IN(r, g, b, s)\
  1498. {\
  1499. b = (s)[0];\
  1500. g = (s)[1];\
  1501. r = (s)[2];\
  1502. }
  1503. #define RGB_OUT(d, r, g, b)\
  1504. {\
  1505. (d)[0] = b;\
  1506. (d)[1] = g;\
  1507. (d)[2] = r;\
  1508. }
  1509. #define BPP 3
  1510. #include "imgconvert_template.h"
  1511. #undef RGB_IN
  1512. #undef RGB_OUT
  1513. #undef BPP
  1514. /* rgb24 handling */
  1515. #define RGB_NAME rgb24
  1516. #define FMT_RGB24
  1517. #define RGB_IN(r, g, b, s)\
  1518. {\
  1519. r = (s)[0];\
  1520. g = (s)[1];\
  1521. b = (s)[2];\
  1522. }
  1523. #define RGB_OUT(d, r, g, b)\
  1524. {\
  1525. (d)[0] = r;\
  1526. (d)[1] = g;\
  1527. (d)[2] = b;\
  1528. }
  1529. #define BPP 3
  1530. #include "imgconvert_template.h"
  1531. /* rgb32 handling */
  1532. #define RGB_NAME rgb32
  1533. #define FMT_RGB32
  1534. #define RGB_IN(r, g, b, s)\
  1535. {\
  1536. unsigned int v = ((const uint32_t *)(s))[0];\
  1537. r = (v >> 16) & 0xff;\
  1538. g = (v >> 8) & 0xff;\
  1539. b = v & 0xff;\
  1540. }
  1541. #define RGBA_IN(r, g, b, a, s)\
  1542. {\
  1543. unsigned int v = ((const uint32_t *)(s))[0];\
  1544. a = (v >> 24) & 0xff;\
  1545. r = (v >> 16) & 0xff;\
  1546. g = (v >> 8) & 0xff;\
  1547. b = v & 0xff;\
  1548. }
  1549. #define RGBA_OUT(d, r, g, b, a)\
  1550. {\
  1551. ((uint32_t *)(d))[0] = (a << 24) | (r << 16) | (g << 8) | b;\
  1552. }
  1553. #define BPP 4
  1554. #include "imgconvert_template.h"
  1555. static void mono_to_gray(AVPicture *dst, const AVPicture *src,
  1556. int width, int height, int xor_mask)
  1557. {
  1558. const unsigned char *p;
  1559. unsigned char *q;
  1560. int v, dst_wrap, src_wrap;
  1561. int y, w;
  1562. p = src->data[0];
  1563. src_wrap = src->linesize[0] - ((width + 7) >> 3);
  1564. q = dst->data[0];
  1565. dst_wrap = dst->linesize[0] - width;
  1566. for(y=0;y<height;y++) {
  1567. w = width;
  1568. while (w >= 8) {
  1569. v = *p++ ^ xor_mask;
  1570. q[0] = -(v >> 7);
  1571. q[1] = -((v >> 6) & 1);
  1572. q[2] = -((v >> 5) & 1);
  1573. q[3] = -((v >> 4) & 1);
  1574. q[4] = -((v >> 3) & 1);
  1575. q[5] = -((v >> 2) & 1);
  1576. q[6] = -((v >> 1) & 1);
  1577. q[7] = -((v >> 0) & 1);
  1578. w -= 8;
  1579. q += 8;
  1580. }
  1581. if (w > 0) {
  1582. v = *p++ ^ xor_mask;
  1583. do {
  1584. q[0] = -((v >> 7) & 1);
  1585. q++;
  1586. v <<= 1;
  1587. } while (--w);
  1588. }
  1589. p += src_wrap;
  1590. q += dst_wrap;
  1591. }
  1592. }
  1593. static void monowhite_to_gray(AVPicture *dst, const AVPicture *src,
  1594. int width, int height)
  1595. {
  1596. mono_to_gray(dst, src, width, height, 0xff);
  1597. }
  1598. static void monoblack_to_gray(AVPicture *dst, const AVPicture *src,
  1599. int width, int height)
  1600. {
  1601. mono_to_gray(dst, src, width, height, 0x00);
  1602. }
  1603. static void gray_to_mono(AVPicture *dst, const AVPicture *src,
  1604. int width, int height, int xor_mask)
  1605. {
  1606. int n;
  1607. const uint8_t *s;
  1608. uint8_t *d;
  1609. int j, b, v, n1, src_wrap, dst_wrap, y;
  1610. s = src->data[0];
  1611. src_wrap = src->linesize[0] - width;
  1612. d = dst->data[0];
  1613. dst_wrap = dst->linesize[0] - ((width + 7) >> 3);
  1614. for(y=0;y<height;y++) {
  1615. n = width;
  1616. while (n >= 8) {
  1617. v = 0;
  1618. for(j=0;j<8;j++) {
  1619. b = s[0];
  1620. s++;
  1621. v = (v << 1) | (b >> 7);
  1622. }
  1623. d[0] = v ^ xor_mask;
  1624. d++;
  1625. n -= 8;
  1626. }
  1627. if (n > 0) {
  1628. n1 = n;
  1629. v = 0;
  1630. while (n > 0) {
  1631. b = s[0];
  1632. s++;
  1633. v = (v << 1) | (b >> 7);
  1634. n--;
  1635. }
  1636. d[0] = (v << (8 - (n1 & 7))) ^ xor_mask;
  1637. d++;
  1638. }
  1639. s += src_wrap;
  1640. d += dst_wrap;
  1641. }
  1642. }
  1643. static void gray_to_monowhite(AVPicture *dst, const AVPicture *src,
  1644. int width, int height)
  1645. {
  1646. gray_to_mono(dst, src, width, height, 0xff);
  1647. }
  1648. static void gray_to_monoblack(AVPicture *dst, const AVPicture *src,
  1649. int width, int height)
  1650. {
  1651. gray_to_mono(dst, src, width, height, 0x00);
  1652. }
  1653. static void gray_to_gray16(AVPicture *dst, const AVPicture *src,
  1654. int width, int height)
  1655. {
  1656. int x, y, src_wrap, dst_wrap;
  1657. uint8_t *s, *d;
  1658. s = src->data[0];
  1659. src_wrap = src->linesize[0] - width;
  1660. d = dst->data[0];
  1661. dst_wrap = dst->linesize[0] - width * 2;
  1662. for(y=0; y<height; y++){
  1663. for(x=0; x<width; x++){
  1664. *d++ = *s;
  1665. *d++ = *s++;
  1666. }
  1667. s += src_wrap;
  1668. d += dst_wrap;
  1669. }
  1670. }
  1671. static void gray16_to_gray(AVPicture *dst, const AVPicture *src,
  1672. int width, int height)
  1673. {
  1674. int x, y, src_wrap, dst_wrap;
  1675. uint8_t *s, *d;
  1676. s = src->data[0];
  1677. src_wrap = src->linesize[0] - width * 2;
  1678. d = dst->data[0];
  1679. dst_wrap = dst->linesize[0] - width;
  1680. for(y=0; y<height; y++){
  1681. for(x=0; x<width; x++){
  1682. *d++ = *s;
  1683. s += 2;
  1684. }
  1685. s += src_wrap;
  1686. d += dst_wrap;
  1687. }
  1688. }
  1689. static void gray16be_to_gray(AVPicture *dst, const AVPicture *src,
  1690. int width, int height)
  1691. {
  1692. gray16_to_gray(dst, src, width, height);
  1693. }
  1694. static void gray16le_to_gray(AVPicture *dst, const AVPicture *src,
  1695. int width, int height)
  1696. {
  1697. AVPicture tmpsrc = *src;
  1698. tmpsrc.data[0]++;
  1699. gray16_to_gray(dst, &tmpsrc, width, height);
  1700. }
  1701. static void gray16_to_gray16(AVPicture *dst, const AVPicture *src,
  1702. int width, int height)
  1703. {
  1704. int x, y, src_wrap, dst_wrap;
  1705. uint16_t *s, *d;
  1706. s = src->data[0];
  1707. src_wrap = (src->linesize[0] - width * 2)/2;
  1708. d = dst->data[0];
  1709. dst_wrap = (dst->linesize[0] - width * 2)/2;
  1710. for(y=0; y<height; y++){
  1711. for(x=0; x<width; x++){
  1712. *d++ = bswap_16(*s++);
  1713. }
  1714. s += src_wrap;
  1715. d += dst_wrap;
  1716. }
  1717. }
  1718. typedef struct ConvertEntry {
  1719. void (*convert)(AVPicture *dst,
  1720. const AVPicture *src, int width, int height);
  1721. } ConvertEntry;
  1722. /* Add each new conversion function in this table. In order to be able
  1723. to convert from any format to any format, the following constraints
  1724. must be satisfied:
  1725. - all FF_COLOR_RGB formats must convert to and from PIX_FMT_RGB24
  1726. - all FF_COLOR_GRAY formats must convert to and from PIX_FMT_GRAY8
  1727. - all FF_COLOR_RGB formats with alpha must convert to and from PIX_FMT_RGB32
  1728. - PIX_FMT_YUV444P and PIX_FMT_YUVJ444P must convert to and from
  1729. PIX_FMT_RGB24.
  1730. - PIX_FMT_422 must convert to and from PIX_FMT_422P.
  1731. The other conversion functions are just optimisations for common cases.
  1732. */
  1733. static const ConvertEntry convert_table[PIX_FMT_NB][PIX_FMT_NB] = {
  1734. [PIX_FMT_YUV420P] = {
  1735. [PIX_FMT_YUYV422] = {
  1736. .convert = yuv420p_to_yuyv422,
  1737. },
  1738. [PIX_FMT_RGB555] = {
  1739. .convert = yuv420p_to_rgb555
  1740. },
  1741. [PIX_FMT_RGB565] = {
  1742. .convert = yuv420p_to_rgb565
  1743. },
  1744. [PIX_FMT_BGR24] = {
  1745. .convert = yuv420p_to_bgr24
  1746. },
  1747. [PIX_FMT_RGB24] = {
  1748. .convert = yuv420p_to_rgb24
  1749. },
  1750. [PIX_FMT_RGB32] = {
  1751. .convert = yuv420p_to_rgb32
  1752. },
  1753. [PIX_FMT_UYVY422] = {
  1754. .convert = yuv420p_to_uyvy422,
  1755. },
  1756. },
  1757. [PIX_FMT_YUV422P] = {
  1758. [PIX_FMT_YUYV422] = {
  1759. .convert = yuv422p_to_yuyv422,
  1760. },
  1761. [PIX_FMT_UYVY422] = {
  1762. .convert = yuv422p_to_uyvy422,
  1763. },
  1764. },
  1765. [PIX_FMT_YUV444P] = {
  1766. [PIX_FMT_RGB24] = {
  1767. .convert = yuv444p_to_rgb24
  1768. },
  1769. },
  1770. [PIX_FMT_YUVJ420P] = {
  1771. [PIX_FMT_RGB555] = {
  1772. .convert = yuvj420p_to_rgb555
  1773. },
  1774. [PIX_FMT_RGB565] = {
  1775. .convert = yuvj420p_to_rgb565
  1776. },
  1777. [PIX_FMT_BGR24] = {
  1778. .convert = yuvj420p_to_bgr24
  1779. },
  1780. [PIX_FMT_RGB24] = {
  1781. .convert = yuvj420p_to_rgb24
  1782. },
  1783. [PIX_FMT_RGB32] = {
  1784. .convert = yuvj420p_to_rgb32
  1785. },
  1786. },
  1787. [PIX_FMT_YUVJ444P] = {
  1788. [PIX_FMT_RGB24] = {
  1789. .convert = yuvj444p_to_rgb24
  1790. },
  1791. },
  1792. [PIX_FMT_YUYV422] = {
  1793. [PIX_FMT_YUV420P] = {
  1794. .convert = yuyv422_to_yuv420p,
  1795. },
  1796. [PIX_FMT_YUV422P] = {
  1797. .convert = yuyv422_to_yuv422p,
  1798. },
  1799. },
  1800. [PIX_FMT_UYVY422] = {
  1801. [PIX_FMT_YUV420P] = {
  1802. .convert = uyvy422_to_yuv420p,
  1803. },
  1804. [PIX_FMT_YUV422P] = {
  1805. .convert = uyvy422_to_yuv422p,
  1806. },
  1807. },
  1808. [PIX_FMT_RGB24] = {
  1809. [PIX_FMT_YUV420P] = {
  1810. .convert = rgb24_to_yuv420p
  1811. },
  1812. [PIX_FMT_RGB565] = {
  1813. .convert = rgb24_to_rgb565
  1814. },
  1815. [PIX_FMT_RGB555] = {
  1816. .convert = rgb24_to_rgb555
  1817. },
  1818. [PIX_FMT_RGB32] = {
  1819. .convert = rgb24_to_rgb32
  1820. },
  1821. [PIX_FMT_BGR24] = {
  1822. .convert = rgb24_to_bgr24
  1823. },
  1824. [PIX_FMT_GRAY8] = {
  1825. .convert = rgb24_to_gray
  1826. },
  1827. [PIX_FMT_PAL8] = {
  1828. .convert = rgb24_to_pal8
  1829. },
  1830. [PIX_FMT_YUV444P] = {
  1831. .convert = rgb24_to_yuv444p
  1832. },
  1833. [PIX_FMT_YUVJ420P] = {
  1834. .convert = rgb24_to_yuvj420p
  1835. },
  1836. [PIX_FMT_YUVJ444P] = {
  1837. .convert = rgb24_to_yuvj444p
  1838. },
  1839. },
  1840. [PIX_FMT_RGB32] = {
  1841. [PIX_FMT_RGB24] = {
  1842. .convert = rgb32_to_rgb24
  1843. },
  1844. [PIX_FMT_BGR24] = {
  1845. .convert = rgb32_to_bgr24
  1846. },
  1847. [PIX_FMT_RGB565] = {
  1848. .convert = rgb32_to_rgb565
  1849. },
  1850. [PIX_FMT_RGB555] = {
  1851. .convert = rgb32_to_rgb555
  1852. },
  1853. [PIX_FMT_PAL8] = {
  1854. .convert = rgb32_to_pal8
  1855. },
  1856. [PIX_FMT_YUV420P] = {
  1857. .convert = rgb32_to_yuv420p
  1858. },
  1859. [PIX_FMT_GRAY8] = {
  1860. .convert = rgb32_to_gray
  1861. },
  1862. },
  1863. [PIX_FMT_BGR24] = {
  1864. [PIX_FMT_RGB32] = {
  1865. .convert = bgr24_to_rgb32
  1866. },
  1867. [PIX_FMT_RGB24] = {
  1868. .convert = bgr24_to_rgb24
  1869. },
  1870. [PIX_FMT_YUV420P] = {
  1871. .convert = bgr24_to_yuv420p
  1872. },
  1873. [PIX_FMT_GRAY8] = {
  1874. .convert = bgr24_to_gray
  1875. },
  1876. },
  1877. [PIX_FMT_RGB555] = {
  1878. [PIX_FMT_RGB24] = {
  1879. .convert = rgb555_to_rgb24
  1880. },
  1881. [PIX_FMT_RGB32] = {
  1882. .convert = rgb555_to_rgb32
  1883. },
  1884. [PIX_FMT_YUV420P] = {
  1885. .convert = rgb555_to_yuv420p
  1886. },
  1887. [PIX_FMT_GRAY8] = {
  1888. .convert = rgb555_to_gray
  1889. },
  1890. },
  1891. [PIX_FMT_RGB565] = {
  1892. [PIX_FMT_RGB32] = {
  1893. .convert = rgb565_to_rgb32
  1894. },
  1895. [PIX_FMT_RGB24] = {
  1896. .convert = rgb565_to_rgb24
  1897. },
  1898. [PIX_FMT_YUV420P] = {
  1899. .convert = rgb565_to_yuv420p
  1900. },
  1901. [PIX_FMT_GRAY8] = {
  1902. .convert = rgb565_to_gray
  1903. },
  1904. },
  1905. [PIX_FMT_GRAY16BE] = {
  1906. [PIX_FMT_GRAY8] = {
  1907. .convert = gray16be_to_gray
  1908. },
  1909. [PIX_FMT_GRAY16LE] = {
  1910. .convert = gray16_to_gray16
  1911. },
  1912. },
  1913. [PIX_FMT_GRAY16LE] = {
  1914. [PIX_FMT_GRAY8] = {
  1915. .convert = gray16le_to_gray
  1916. },
  1917. [PIX_FMT_GRAY16BE] = {
  1918. .convert = gray16_to_gray16
  1919. },
  1920. },
  1921. [PIX_FMT_GRAY8] = {
  1922. [PIX_FMT_RGB555] = {
  1923. .convert = gray_to_rgb555
  1924. },
  1925. [PIX_FMT_RGB565] = {
  1926. .convert = gray_to_rgb565
  1927. },
  1928. [PIX_FMT_RGB24] = {
  1929. .convert = gray_to_rgb24
  1930. },
  1931. [PIX_FMT_BGR24] = {
  1932. .convert = gray_to_bgr24
  1933. },
  1934. [PIX_FMT_RGB32] = {
  1935. .convert = gray_to_rgb32
  1936. },
  1937. [PIX_FMT_MONOWHITE] = {
  1938. .convert = gray_to_monowhite
  1939. },
  1940. [PIX_FMT_MONOBLACK] = {
  1941. .convert = gray_to_monoblack
  1942. },
  1943. [PIX_FMT_GRAY16LE] = {
  1944. .convert = gray_to_gray16
  1945. },
  1946. [PIX_FMT_GRAY16BE] = {
  1947. .convert = gray_to_gray16
  1948. },
  1949. },
  1950. [PIX_FMT_MONOWHITE] = {
  1951. [PIX_FMT_GRAY8] = {
  1952. .convert = monowhite_to_gray
  1953. },
  1954. },
  1955. [PIX_FMT_MONOBLACK] = {
  1956. [PIX_FMT_GRAY8] = {
  1957. .convert = monoblack_to_gray
  1958. },
  1959. },
  1960. [PIX_FMT_PAL8] = {
  1961. [PIX_FMT_RGB555] = {
  1962. .convert = pal8_to_rgb555
  1963. },
  1964. [PIX_FMT_RGB565] = {
  1965. .convert = pal8_to_rgb565
  1966. },
  1967. [PIX_FMT_BGR24] = {
  1968. .convert = pal8_to_bgr24
  1969. },
  1970. [PIX_FMT_RGB24] = {
  1971. .convert = pal8_to_rgb24
  1972. },
  1973. [PIX_FMT_RGB32] = {
  1974. .convert = pal8_to_rgb32
  1975. },
  1976. },
  1977. [PIX_FMT_UYYVYY411] = {
  1978. [PIX_FMT_YUV411P] = {
  1979. .convert = uyyvyy411_to_yuv411p,
  1980. },
  1981. },
  1982. };
  1983. int avpicture_alloc(AVPicture *picture,
  1984. int pix_fmt, int width, int height)
  1985. {
  1986. int size;
  1987. void *ptr;
  1988. size = avpicture_get_size(pix_fmt, width, height);
  1989. if(size<0)
  1990. goto fail;
  1991. ptr = av_malloc(size);
  1992. if (!ptr)
  1993. goto fail;
  1994. avpicture_fill(picture, ptr, pix_fmt, width, height);
  1995. return 0;
  1996. fail:
  1997. memset(picture, 0, sizeof(AVPicture));
  1998. return -1;
  1999. }
  2000. void avpicture_free(AVPicture *picture)
  2001. {
  2002. av_free(picture->data[0]);
  2003. }
  2004. /* return true if yuv planar */
  2005. static inline int is_yuv_planar(const PixFmtInfo *ps)
  2006. {
  2007. return (ps->color_type == FF_COLOR_YUV ||
  2008. ps->color_type == FF_COLOR_YUV_JPEG) &&
  2009. ps->pixel_type == FF_PIXEL_PLANAR;
  2010. }
  2011. int av_picture_crop(AVPicture *dst, const AVPicture *src,
  2012. int pix_fmt, int top_band, int left_band)
  2013. {
  2014. int y_shift;
  2015. int x_shift;
  2016. if (pix_fmt < 0 || pix_fmt >= PIX_FMT_NB || !is_yuv_planar(&pix_fmt_info[pix_fmt]))
  2017. return -1;
  2018. y_shift = pix_fmt_info[pix_fmt].y_chroma_shift;
  2019. x_shift = pix_fmt_info[pix_fmt].x_chroma_shift;
  2020. dst->data[0] = src->data[0] + (top_band * src->linesize[0]) + left_band;
  2021. dst->data[1] = src->data[1] + ((top_band >> y_shift) * src->linesize[1]) + (left_band >> x_shift);
  2022. dst->data[2] = src->data[2] + ((top_band >> y_shift) * src->linesize[2]) + (left_band >> x_shift);
  2023. dst->linesize[0] = src->linesize[0];
  2024. dst->linesize[1] = src->linesize[1];
  2025. dst->linesize[2] = src->linesize[2];
  2026. return 0;
  2027. }
  2028. int av_picture_pad(AVPicture *dst, const AVPicture *src, int height, int width,
  2029. int pix_fmt, int padtop, int padbottom, int padleft, int padright,
  2030. int *color)
  2031. {
  2032. uint8_t *optr;
  2033. int y_shift;
  2034. int x_shift;
  2035. int yheight;
  2036. int i, y;
  2037. if (pix_fmt < 0 || pix_fmt >= PIX_FMT_NB ||
  2038. !is_yuv_planar(&pix_fmt_info[pix_fmt])) return -1;
  2039. for (i = 0; i < 3; i++) {
  2040. x_shift = i ? pix_fmt_info[pix_fmt].x_chroma_shift : 0;
  2041. y_shift = i ? pix_fmt_info[pix_fmt].y_chroma_shift : 0;
  2042. if (padtop || padleft) {
  2043. memset(dst->data[i], color[i],
  2044. dst->linesize[i] * (padtop >> y_shift) + (padleft >> x_shift));
  2045. }
  2046. if (padleft || padright) {
  2047. optr = dst->data[i] + dst->linesize[i] * (padtop >> y_shift) +
  2048. (dst->linesize[i] - (padright >> x_shift));
  2049. yheight = (height - 1 - (padtop + padbottom)) >> y_shift;
  2050. for (y = 0; y < yheight; y++) {
  2051. memset(optr, color[i], (padleft + padright) >> x_shift);
  2052. optr += dst->linesize[i];
  2053. }
  2054. }
  2055. if (src) { /* first line */
  2056. uint8_t *iptr = src->data[i];
  2057. optr = dst->data[i] + dst->linesize[i] * (padtop >> y_shift) +
  2058. (padleft >> x_shift);
  2059. memcpy(optr, iptr, src->linesize[i]);
  2060. iptr += src->linesize[i];
  2061. optr = dst->data[i] + dst->linesize[i] * (padtop >> y_shift) +
  2062. (dst->linesize[i] - (padright >> x_shift));
  2063. yheight = (height - 1 - (padtop + padbottom)) >> y_shift;
  2064. for (y = 0; y < yheight; y++) {
  2065. memset(optr, color[i], (padleft + padright) >> x_shift);
  2066. memcpy(optr + ((padleft + padright) >> x_shift), iptr,
  2067. src->linesize[i]);
  2068. iptr += src->linesize[i];
  2069. optr += dst->linesize[i];
  2070. }
  2071. }
  2072. if (padbottom || padright) {
  2073. optr = dst->data[i] + dst->linesize[i] *
  2074. ((height - padbottom) >> y_shift) - (padright >> x_shift);
  2075. memset(optr, color[i],dst->linesize[i] *
  2076. (padbottom >> y_shift) + (padright >> x_shift));
  2077. }
  2078. }
  2079. return 0;
  2080. }
  2081. #if LIBAVCODEC_VERSION_INT < ((52<<16)+(0<<8)+0)
  2082. void img_copy(AVPicture *dst, const AVPicture *src,
  2083. int pix_fmt, int width, int height)
  2084. {
  2085. av_picture_copy(dst, src, pix_fmt, width, height);
  2086. }
  2087. int img_crop(AVPicture *dst, const AVPicture *src,
  2088. int pix_fmt, int top_band, int left_band)
  2089. {
  2090. return av_picture_crop(dst, src, pix_fmt, top_band, left_band);
  2091. }
  2092. int img_pad(AVPicture *dst, const AVPicture *src, int height, int width,
  2093. int pix_fmt, int padtop, int padbottom, int padleft, int padright,
  2094. int *color)
  2095. {
  2096. return av_picture_pad(dst, src, height, width, pix_fmt, padtop, padbottom, padleft, padright, color);
  2097. }
  2098. #endif
  2099. #ifndef CONFIG_SWSCALER
  2100. /* XXX: always use linesize. Return -1 if not supported */
  2101. int img_convert(AVPicture *dst, int dst_pix_fmt,
  2102. const AVPicture *src, int src_pix_fmt,
  2103. int src_width, int src_height)
  2104. {
  2105. static int inited;
  2106. int i, ret, dst_width, dst_height, int_pix_fmt;
  2107. const PixFmtInfo *src_pix, *dst_pix;
  2108. const ConvertEntry *ce;
  2109. AVPicture tmp1, *tmp = &tmp1;
  2110. if (src_pix_fmt < 0 || src_pix_fmt >= PIX_FMT_NB ||
  2111. dst_pix_fmt < 0 || dst_pix_fmt >= PIX_FMT_NB)
  2112. return -1;
  2113. if (src_width <= 0 || src_height <= 0)
  2114. return 0;
  2115. if (!inited) {
  2116. inited = 1;
  2117. img_convert_init();
  2118. }
  2119. dst_width = src_width;
  2120. dst_height = src_height;
  2121. dst_pix = &pix_fmt_info[dst_pix_fmt];
  2122. src_pix = &pix_fmt_info[src_pix_fmt];
  2123. if (src_pix_fmt == dst_pix_fmt) {
  2124. /* no conversion needed: just copy */
  2125. av_picture_copy(dst, src, dst_pix_fmt, dst_width, dst_height);
  2126. return 0;
  2127. }
  2128. ce = &convert_table[src_pix_fmt][dst_pix_fmt];
  2129. if (ce->convert) {
  2130. /* specific conversion routine */
  2131. ce->convert(dst, src, dst_width, dst_height);
  2132. return 0;
  2133. }
  2134. /* gray to YUV */
  2135. if (is_yuv_planar(dst_pix) &&
  2136. src_pix_fmt == PIX_FMT_GRAY8) {
  2137. int w, h, y;
  2138. uint8_t *d;
  2139. if (dst_pix->color_type == FF_COLOR_YUV_JPEG) {
  2140. ff_img_copy_plane(dst->data[0], dst->linesize[0],
  2141. src->data[0], src->linesize[0],
  2142. dst_width, dst_height);
  2143. } else {
  2144. img_apply_table(dst->data[0], dst->linesize[0],
  2145. src->data[0], src->linesize[0],
  2146. dst_width, dst_height,
  2147. y_jpeg_to_ccir);
  2148. }
  2149. /* fill U and V with 128 */
  2150. w = dst_width;
  2151. h = dst_height;
  2152. w >>= dst_pix->x_chroma_shift;
  2153. h >>= dst_pix->y_chroma_shift;
  2154. for(i = 1; i <= 2; i++) {
  2155. d = dst->data[i];
  2156. for(y = 0; y< h; y++) {
  2157. memset(d, 128, w);
  2158. d += dst->linesize[i];
  2159. }
  2160. }
  2161. return 0;
  2162. }
  2163. /* YUV to gray */
  2164. if (is_yuv_planar(src_pix) &&
  2165. dst_pix_fmt == PIX_FMT_GRAY8) {
  2166. if (src_pix->color_type == FF_COLOR_YUV_JPEG) {
  2167. ff_img_copy_plane(dst->data[0], dst->linesize[0],
  2168. src->data[0], src->linesize[0],
  2169. dst_width, dst_height);
  2170. } else {
  2171. img_apply_table(dst->data[0], dst->linesize[0],
  2172. src->data[0], src->linesize[0],
  2173. dst_width, dst_height,
  2174. y_ccir_to_jpeg);
  2175. }
  2176. return 0;
  2177. }
  2178. /* YUV to YUV planar */
  2179. if (is_yuv_planar(dst_pix) && is_yuv_planar(src_pix)) {
  2180. int x_shift, y_shift, w, h, xy_shift;
  2181. void (*resize_func)(uint8_t *dst, int dst_wrap,
  2182. const uint8_t *src, int src_wrap,
  2183. int width, int height);
  2184. /* compute chroma size of the smallest dimensions */
  2185. w = dst_width;
  2186. h = dst_height;
  2187. if (dst_pix->x_chroma_shift >= src_pix->x_chroma_shift)
  2188. w >>= dst_pix->x_chroma_shift;
  2189. else
  2190. w >>= src_pix->x_chroma_shift;
  2191. if (dst_pix->y_chroma_shift >= src_pix->y_chroma_shift)
  2192. h >>= dst_pix->y_chroma_shift;
  2193. else
  2194. h >>= src_pix->y_chroma_shift;
  2195. x_shift = (dst_pix->x_chroma_shift - src_pix->x_chroma_shift);
  2196. y_shift = (dst_pix->y_chroma_shift - src_pix->y_chroma_shift);
  2197. xy_shift = ((x_shift & 0xf) << 4) | (y_shift & 0xf);
  2198. /* there must be filters for conversion at least from and to
  2199. YUV444 format */
  2200. switch(xy_shift) {
  2201. case 0x00:
  2202. resize_func = ff_img_copy_plane;
  2203. break;
  2204. case 0x10:
  2205. resize_func = shrink21;
  2206. break;
  2207. case 0x20:
  2208. resize_func = shrink41;
  2209. break;
  2210. case 0x01:
  2211. resize_func = shrink12;
  2212. break;
  2213. case 0x11:
  2214. resize_func = ff_shrink22;
  2215. break;
  2216. case 0x22:
  2217. resize_func = ff_shrink44;
  2218. break;
  2219. case 0xf0:
  2220. resize_func = grow21;
  2221. break;
  2222. case 0x0f:
  2223. resize_func = grow12;
  2224. break;
  2225. case 0xe0:
  2226. resize_func = grow41;
  2227. break;
  2228. case 0xff:
  2229. resize_func = grow22;
  2230. break;
  2231. case 0xee:
  2232. resize_func = grow44;
  2233. break;
  2234. case 0xf1:
  2235. resize_func = conv411;
  2236. break;
  2237. default:
  2238. /* currently not handled */
  2239. goto no_chroma_filter;
  2240. }
  2241. ff_img_copy_plane(dst->data[0], dst->linesize[0],
  2242. src->data[0], src->linesize[0],
  2243. dst_width, dst_height);
  2244. for(i = 1;i <= 2; i++)
  2245. resize_func(dst->data[i], dst->linesize[i],
  2246. src->data[i], src->linesize[i],
  2247. dst_width>>dst_pix->x_chroma_shift, dst_height>>dst_pix->y_chroma_shift);
  2248. /* if yuv color space conversion is needed, we do it here on
  2249. the destination image */
  2250. if (dst_pix->color_type != src_pix->color_type) {
  2251. const uint8_t *y_table, *c_table;
  2252. if (dst_pix->color_type == FF_COLOR_YUV) {
  2253. y_table = y_jpeg_to_ccir;
  2254. c_table = c_jpeg_to_ccir;
  2255. } else {
  2256. y_table = y_ccir_to_jpeg;
  2257. c_table = c_ccir_to_jpeg;
  2258. }
  2259. img_apply_table(dst->data[0], dst->linesize[0],
  2260. dst->data[0], dst->linesize[0],
  2261. dst_width, dst_height,
  2262. y_table);
  2263. for(i = 1;i <= 2; i++)
  2264. img_apply_table(dst->data[i], dst->linesize[i],
  2265. dst->data[i], dst->linesize[i],
  2266. dst_width>>dst_pix->x_chroma_shift,
  2267. dst_height>>dst_pix->y_chroma_shift,
  2268. c_table);
  2269. }
  2270. return 0;
  2271. }
  2272. no_chroma_filter:
  2273. /* try to use an intermediate format */
  2274. if (src_pix_fmt == PIX_FMT_YUYV422 ||
  2275. dst_pix_fmt == PIX_FMT_YUYV422) {
  2276. /* specific case: convert to YUV422P first */
  2277. int_pix_fmt = PIX_FMT_YUV422P;
  2278. } else if (src_pix_fmt == PIX_FMT_UYVY422 ||
  2279. dst_pix_fmt == PIX_FMT_UYVY422) {
  2280. /* specific case: convert to YUV422P first */
  2281. int_pix_fmt = PIX_FMT_YUV422P;
  2282. } else if (src_pix_fmt == PIX_FMT_UYYVYY411 ||
  2283. dst_pix_fmt == PIX_FMT_UYYVYY411) {
  2284. /* specific case: convert to YUV411P first */
  2285. int_pix_fmt = PIX_FMT_YUV411P;
  2286. } else if ((src_pix->color_type == FF_COLOR_GRAY &&
  2287. src_pix_fmt != PIX_FMT_GRAY8) ||
  2288. (dst_pix->color_type == FF_COLOR_GRAY &&
  2289. dst_pix_fmt != PIX_FMT_GRAY8)) {
  2290. /* gray8 is the normalized format */
  2291. int_pix_fmt = PIX_FMT_GRAY8;
  2292. } else if ((is_yuv_planar(src_pix) &&
  2293. src_pix_fmt != PIX_FMT_YUV444P &&
  2294. src_pix_fmt != PIX_FMT_YUVJ444P)) {
  2295. /* yuv444 is the normalized format */
  2296. if (src_pix->color_type == FF_COLOR_YUV_JPEG)
  2297. int_pix_fmt = PIX_FMT_YUVJ444P;
  2298. else
  2299. int_pix_fmt = PIX_FMT_YUV444P;
  2300. } else if ((is_yuv_planar(dst_pix) &&
  2301. dst_pix_fmt != PIX_FMT_YUV444P &&
  2302. dst_pix_fmt != PIX_FMT_YUVJ444P)) {
  2303. /* yuv444 is the normalized format */
  2304. if (dst_pix->color_type == FF_COLOR_YUV_JPEG)
  2305. int_pix_fmt = PIX_FMT_YUVJ444P;
  2306. else
  2307. int_pix_fmt = PIX_FMT_YUV444P;
  2308. } else {
  2309. /* the two formats are rgb or gray8 or yuv[j]444p */
  2310. if (src_pix->is_alpha && dst_pix->is_alpha)
  2311. int_pix_fmt = PIX_FMT_RGB32;
  2312. else
  2313. int_pix_fmt = PIX_FMT_RGB24;
  2314. }
  2315. if (avpicture_alloc(tmp, int_pix_fmt, dst_width, dst_height) < 0)
  2316. return -1;
  2317. ret = -1;
  2318. if (img_convert(tmp, int_pix_fmt,
  2319. src, src_pix_fmt, src_width, src_height) < 0)
  2320. goto fail1;
  2321. if (img_convert(dst, dst_pix_fmt,
  2322. tmp, int_pix_fmt, dst_width, dst_height) < 0)
  2323. goto fail1;
  2324. ret = 0;
  2325. fail1:
  2326. avpicture_free(tmp);
  2327. return ret;
  2328. }
  2329. #endif
  2330. /* NOTE: we scan all the pixels to have an exact information */
  2331. static int get_alpha_info_pal8(const AVPicture *src, int width, int height)
  2332. {
  2333. const unsigned char *p;
  2334. int src_wrap, ret, x, y;
  2335. unsigned int a;
  2336. uint32_t *palette = (uint32_t *)src->data[1];
  2337. p = src->data[0];
  2338. src_wrap = src->linesize[0] - width;
  2339. ret = 0;
  2340. for(y=0;y<height;y++) {
  2341. for(x=0;x<width;x++) {
  2342. a = palette[p[0]] >> 24;
  2343. if (a == 0x00) {
  2344. ret |= FF_ALPHA_TRANSP;
  2345. } else if (a != 0xff) {
  2346. ret |= FF_ALPHA_SEMI_TRANSP;
  2347. }
  2348. p++;
  2349. }
  2350. p += src_wrap;
  2351. }
  2352. return ret;
  2353. }
  2354. int img_get_alpha_info(const AVPicture *src,
  2355. int pix_fmt, int width, int height)
  2356. {
  2357. const PixFmtInfo *pf = &pix_fmt_info[pix_fmt];
  2358. int ret;
  2359. pf = &pix_fmt_info[pix_fmt];
  2360. /* no alpha can be represented in format */
  2361. if (!pf->is_alpha)
  2362. return 0;
  2363. switch(pix_fmt) {
  2364. case PIX_FMT_RGB32:
  2365. ret = get_alpha_info_rgb32(src, width, height);
  2366. break;
  2367. case PIX_FMT_PAL8:
  2368. ret = get_alpha_info_pal8(src, width, height);
  2369. break;
  2370. default:
  2371. /* we do not know, so everything is indicated */
  2372. ret = FF_ALPHA_TRANSP | FF_ALPHA_SEMI_TRANSP;
  2373. break;
  2374. }
  2375. return ret;
  2376. }
  2377. #ifdef HAVE_MMX
  2378. #define DEINT_INPLACE_LINE_LUM \
  2379. movd_m2r(lum_m4[0],mm0);\
  2380. movd_m2r(lum_m3[0],mm1);\
  2381. movd_m2r(lum_m2[0],mm2);\
  2382. movd_m2r(lum_m1[0],mm3);\
  2383. movd_m2r(lum[0],mm4);\
  2384. punpcklbw_r2r(mm7,mm0);\
  2385. movd_r2m(mm2,lum_m4[0]);\
  2386. punpcklbw_r2r(mm7,mm1);\
  2387. punpcklbw_r2r(mm7,mm2);\
  2388. punpcklbw_r2r(mm7,mm3);\
  2389. punpcklbw_r2r(mm7,mm4);\
  2390. paddw_r2r(mm3,mm1);\
  2391. psllw_i2r(1,mm2);\
  2392. paddw_r2r(mm4,mm0);\
  2393. psllw_i2r(2,mm1);\
  2394. paddw_r2r(mm6,mm2);\
  2395. paddw_r2r(mm2,mm1);\
  2396. psubusw_r2r(mm0,mm1);\
  2397. psrlw_i2r(3,mm1);\
  2398. packuswb_r2r(mm7,mm1);\
  2399. movd_r2m(mm1,lum_m2[0]);
  2400. #define DEINT_LINE_LUM \
  2401. movd_m2r(lum_m4[0],mm0);\
  2402. movd_m2r(lum_m3[0],mm1);\
  2403. movd_m2r(lum_m2[0],mm2);\
  2404. movd_m2r(lum_m1[0],mm3);\
  2405. movd_m2r(lum[0],mm4);\
  2406. punpcklbw_r2r(mm7,mm0);\
  2407. punpcklbw_r2r(mm7,mm1);\
  2408. punpcklbw_r2r(mm7,mm2);\
  2409. punpcklbw_r2r(mm7,mm3);\
  2410. punpcklbw_r2r(mm7,mm4);\
  2411. paddw_r2r(mm3,mm1);\
  2412. psllw_i2r(1,mm2);\
  2413. paddw_r2r(mm4,mm0);\
  2414. psllw_i2r(2,mm1);\
  2415. paddw_r2r(mm6,mm2);\
  2416. paddw_r2r(mm2,mm1);\
  2417. psubusw_r2r(mm0,mm1);\
  2418. psrlw_i2r(3,mm1);\
  2419. packuswb_r2r(mm7,mm1);\
  2420. movd_r2m(mm1,dst[0]);
  2421. #endif
  2422. /* filter parameters: [-1 4 2 4 -1] // 8 */
  2423. static void deinterlace_line(uint8_t *dst,
  2424. const uint8_t *lum_m4, const uint8_t *lum_m3,
  2425. const uint8_t *lum_m2, const uint8_t *lum_m1,
  2426. const uint8_t *lum,
  2427. int size)
  2428. {
  2429. #ifndef HAVE_MMX
  2430. uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
  2431. int sum;
  2432. for(;size > 0;size--) {
  2433. sum = -lum_m4[0];
  2434. sum += lum_m3[0] << 2;
  2435. sum += lum_m2[0] << 1;
  2436. sum += lum_m1[0] << 2;
  2437. sum += -lum[0];
  2438. dst[0] = cm[(sum + 4) >> 3];
  2439. lum_m4++;
  2440. lum_m3++;
  2441. lum_m2++;
  2442. lum_m1++;
  2443. lum++;
  2444. dst++;
  2445. }
  2446. #else
  2447. {
  2448. mmx_t rounder;
  2449. rounder.uw[0]=4;
  2450. rounder.uw[1]=4;
  2451. rounder.uw[2]=4;
  2452. rounder.uw[3]=4;
  2453. pxor_r2r(mm7,mm7);
  2454. movq_m2r(rounder,mm6);
  2455. }
  2456. for (;size > 3; size-=4) {
  2457. DEINT_LINE_LUM
  2458. lum_m4+=4;
  2459. lum_m3+=4;
  2460. lum_m2+=4;
  2461. lum_m1+=4;
  2462. lum+=4;
  2463. dst+=4;
  2464. }
  2465. #endif
  2466. }
  2467. static void deinterlace_line_inplace(uint8_t *lum_m4, uint8_t *lum_m3, uint8_t *lum_m2, uint8_t *lum_m1, uint8_t *lum,
  2468. int size)
  2469. {
  2470. #ifndef HAVE_MMX
  2471. uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
  2472. int sum;
  2473. for(;size > 0;size--) {
  2474. sum = -lum_m4[0];
  2475. sum += lum_m3[0] << 2;
  2476. sum += lum_m2[0] << 1;
  2477. lum_m4[0]=lum_m2[0];
  2478. sum += lum_m1[0] << 2;
  2479. sum += -lum[0];
  2480. lum_m2[0] = cm[(sum + 4) >> 3];
  2481. lum_m4++;
  2482. lum_m3++;
  2483. lum_m2++;
  2484. lum_m1++;
  2485. lum++;
  2486. }
  2487. #else
  2488. {
  2489. mmx_t rounder;
  2490. rounder.uw[0]=4;
  2491. rounder.uw[1]=4;
  2492. rounder.uw[2]=4;
  2493. rounder.uw[3]=4;
  2494. pxor_r2r(mm7,mm7);
  2495. movq_m2r(rounder,mm6);
  2496. }
  2497. for (;size > 3; size-=4) {
  2498. DEINT_INPLACE_LINE_LUM
  2499. lum_m4+=4;
  2500. lum_m3+=4;
  2501. lum_m2+=4;
  2502. lum_m1+=4;
  2503. lum+=4;
  2504. }
  2505. #endif
  2506. }
  2507. /* deinterlacing : 2 temporal taps, 3 spatial taps linear filter. The
  2508. top field is copied as is, but the bottom field is deinterlaced
  2509. against the top field. */
  2510. static void deinterlace_bottom_field(uint8_t *dst, int dst_wrap,
  2511. const uint8_t *src1, int src_wrap,
  2512. int width, int height)
  2513. {
  2514. const uint8_t *src_m2, *src_m1, *src_0, *src_p1, *src_p2;
  2515. int y;
  2516. src_m2 = src1;
  2517. src_m1 = src1;
  2518. src_0=&src_m1[src_wrap];
  2519. src_p1=&src_0[src_wrap];
  2520. src_p2=&src_p1[src_wrap];
  2521. for(y=0;y<(height-2);y+=2) {
  2522. memcpy(dst,src_m1,width);
  2523. dst += dst_wrap;
  2524. deinterlace_line(dst,src_m2,src_m1,src_0,src_p1,src_p2,width);
  2525. src_m2 = src_0;
  2526. src_m1 = src_p1;
  2527. src_0 = src_p2;
  2528. src_p1 += 2*src_wrap;
  2529. src_p2 += 2*src_wrap;
  2530. dst += dst_wrap;
  2531. }
  2532. memcpy(dst,src_m1,width);
  2533. dst += dst_wrap;
  2534. /* do last line */
  2535. deinterlace_line(dst,src_m2,src_m1,src_0,src_0,src_0,width);
  2536. }
  2537. static void deinterlace_bottom_field_inplace(uint8_t *src1, int src_wrap,
  2538. int width, int height)
  2539. {
  2540. uint8_t *src_m1, *src_0, *src_p1, *src_p2;
  2541. int y;
  2542. uint8_t *buf;
  2543. buf = (uint8_t*)av_malloc(width);
  2544. src_m1 = src1;
  2545. memcpy(buf,src_m1,width);
  2546. src_0=&src_m1[src_wrap];
  2547. src_p1=&src_0[src_wrap];
  2548. src_p2=&src_p1[src_wrap];
  2549. for(y=0;y<(height-2);y+=2) {
  2550. deinterlace_line_inplace(buf,src_m1,src_0,src_p1,src_p2,width);
  2551. src_m1 = src_p1;
  2552. src_0 = src_p2;
  2553. src_p1 += 2*src_wrap;
  2554. src_p2 += 2*src_wrap;
  2555. }
  2556. /* do last line */
  2557. deinterlace_line_inplace(buf,src_m1,src_0,src_0,src_0,width);
  2558. av_free(buf);
  2559. }
  2560. int avpicture_deinterlace(AVPicture *dst, const AVPicture *src,
  2561. int pix_fmt, int width, int height)
  2562. {
  2563. int i;
  2564. if (pix_fmt != PIX_FMT_YUV420P &&
  2565. pix_fmt != PIX_FMT_YUV422P &&
  2566. pix_fmt != PIX_FMT_YUV444P &&
  2567. pix_fmt != PIX_FMT_YUV411P)
  2568. return -1;
  2569. if ((width & 3) != 0 || (height & 3) != 0)
  2570. return -1;
  2571. for(i=0;i<3;i++) {
  2572. if (i == 1) {
  2573. switch(pix_fmt) {
  2574. case PIX_FMT_YUV420P:
  2575. width >>= 1;
  2576. height >>= 1;
  2577. break;
  2578. case PIX_FMT_YUV422P:
  2579. width >>= 1;
  2580. break;
  2581. case PIX_FMT_YUV411P:
  2582. width >>= 2;
  2583. break;
  2584. default:
  2585. break;
  2586. }
  2587. }
  2588. if (src == dst) {
  2589. deinterlace_bottom_field_inplace(dst->data[i], dst->linesize[i],
  2590. width, height);
  2591. } else {
  2592. deinterlace_bottom_field(dst->data[i],dst->linesize[i],
  2593. src->data[i], src->linesize[i],
  2594. width, height);
  2595. }
  2596. }
  2597. #ifdef HAVE_MMX
  2598. emms();
  2599. #endif
  2600. return 0;
  2601. }