You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

2868 lines
77KB

  1. /*
  2. * Misc image conversion routines
  3. * Copyright (c) 2001, 2002, 2003 Fabrice Bellard.
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. /**
  22. * @file imgconvert.c
  23. * misc image conversion routines
  24. */
  25. /* TODO:
  26. * - write 'ffimg' program to test all the image related stuff
  27. * - move all api to slice based system
  28. * - integrate deinterlacing, postprocessing and scaling in the conversion process
  29. */
  30. #include "avcodec.h"
  31. #include "dsputil.h"
  32. #include "colorspace.h"
  33. #ifdef HAVE_MMX
  34. #include "i386/mmx.h"
  35. #endif
  36. #define xglue(x, y) x ## y
  37. #define glue(x, y) xglue(x, y)
  38. #define FF_COLOR_RGB 0 /**< RGB color space */
  39. #define FF_COLOR_GRAY 1 /**< gray color space */
  40. #define FF_COLOR_YUV 2 /**< YUV color space. 16 <= Y <= 235, 16 <= U, V <= 240 */
  41. #define FF_COLOR_YUV_JPEG 3 /**< YUV color space. 0 <= Y <= 255, 0 <= U, V <= 255 */
  42. #define FF_PIXEL_PLANAR 0 /**< each channel has one component in AVPicture */
  43. #define FF_PIXEL_PACKED 1 /**< only one components containing all the channels */
  44. #define FF_PIXEL_PALETTE 2 /**< one components containing indexes for a palette */
  45. typedef struct PixFmtInfo {
  46. const char *name;
  47. uint8_t nb_channels; /**< number of channels (including alpha) */
  48. uint8_t color_type; /**< color type (see FF_COLOR_xxx constants) */
  49. uint8_t pixel_type; /**< pixel storage type (see FF_PIXEL_xxx constants) */
  50. uint8_t is_alpha : 1; /**< true if alpha can be specified */
  51. uint8_t x_chroma_shift; /**< X chroma subsampling factor is 2 ^ shift */
  52. uint8_t y_chroma_shift; /**< Y chroma subsampling factor is 2 ^ shift */
  53. uint8_t depth; /**< bit depth of the color components */
  54. } PixFmtInfo;
  55. /* this table gives more information about formats */
  56. static const PixFmtInfo pix_fmt_info[PIX_FMT_NB] = {
  57. /* YUV formats */
  58. [PIX_FMT_YUV420P] = {
  59. .name = "yuv420p",
  60. .nb_channels = 3,
  61. .color_type = FF_COLOR_YUV,
  62. .pixel_type = FF_PIXEL_PLANAR,
  63. .depth = 8,
  64. .x_chroma_shift = 1, .y_chroma_shift = 1,
  65. },
  66. [PIX_FMT_YUV422P] = {
  67. .name = "yuv422p",
  68. .nb_channels = 3,
  69. .color_type = FF_COLOR_YUV,
  70. .pixel_type = FF_PIXEL_PLANAR,
  71. .depth = 8,
  72. .x_chroma_shift = 1, .y_chroma_shift = 0,
  73. },
  74. [PIX_FMT_YUV444P] = {
  75. .name = "yuv444p",
  76. .nb_channels = 3,
  77. .color_type = FF_COLOR_YUV,
  78. .pixel_type = FF_PIXEL_PLANAR,
  79. .depth = 8,
  80. .x_chroma_shift = 0, .y_chroma_shift = 0,
  81. },
  82. [PIX_FMT_YUYV422] = {
  83. .name = "yuyv422",
  84. .nb_channels = 1,
  85. .color_type = FF_COLOR_YUV,
  86. .pixel_type = FF_PIXEL_PACKED,
  87. .depth = 8,
  88. .x_chroma_shift = 1, .y_chroma_shift = 0,
  89. },
  90. [PIX_FMT_UYVY422] = {
  91. .name = "uyvy422",
  92. .nb_channels = 1,
  93. .color_type = FF_COLOR_YUV,
  94. .pixel_type = FF_PIXEL_PACKED,
  95. .depth = 8,
  96. .x_chroma_shift = 1, .y_chroma_shift = 0,
  97. },
  98. [PIX_FMT_YUV410P] = {
  99. .name = "yuv410p",
  100. .nb_channels = 3,
  101. .color_type = FF_COLOR_YUV,
  102. .pixel_type = FF_PIXEL_PLANAR,
  103. .depth = 8,
  104. .x_chroma_shift = 2, .y_chroma_shift = 2,
  105. },
  106. [PIX_FMT_YUV411P] = {
  107. .name = "yuv411p",
  108. .nb_channels = 3,
  109. .color_type = FF_COLOR_YUV,
  110. .pixel_type = FF_PIXEL_PLANAR,
  111. .depth = 8,
  112. .x_chroma_shift = 2, .y_chroma_shift = 0,
  113. },
  114. [PIX_FMT_YUV440P] = {
  115. .name = "yuv440p",
  116. .nb_channels = 3,
  117. .color_type = FF_COLOR_YUV,
  118. .pixel_type = FF_PIXEL_PLANAR,
  119. .depth = 8,
  120. .x_chroma_shift = 0, .y_chroma_shift = 1,
  121. },
  122. /* YUV formats with alpha plane */
  123. [PIX_FMT_YUVA420P] = {
  124. .name = "yuva420p",
  125. .nb_channels = 4,
  126. .color_type = FF_COLOR_YUV,
  127. .pixel_type = FF_PIXEL_PLANAR,
  128. .depth = 8,
  129. .x_chroma_shift = 1, .y_chroma_shift = 1,
  130. },
  131. /* JPEG YUV */
  132. [PIX_FMT_YUVJ420P] = {
  133. .name = "yuvj420p",
  134. .nb_channels = 3,
  135. .color_type = FF_COLOR_YUV_JPEG,
  136. .pixel_type = FF_PIXEL_PLANAR,
  137. .depth = 8,
  138. .x_chroma_shift = 1, .y_chroma_shift = 1,
  139. },
  140. [PIX_FMT_YUVJ422P] = {
  141. .name = "yuvj422p",
  142. .nb_channels = 3,
  143. .color_type = FF_COLOR_YUV_JPEG,
  144. .pixel_type = FF_PIXEL_PLANAR,
  145. .depth = 8,
  146. .x_chroma_shift = 1, .y_chroma_shift = 0,
  147. },
  148. [PIX_FMT_YUVJ444P] = {
  149. .name = "yuvj444p",
  150. .nb_channels = 3,
  151. .color_type = FF_COLOR_YUV_JPEG,
  152. .pixel_type = FF_PIXEL_PLANAR,
  153. .depth = 8,
  154. .x_chroma_shift = 0, .y_chroma_shift = 0,
  155. },
  156. [PIX_FMT_YUVJ440P] = {
  157. .name = "yuvj440p",
  158. .nb_channels = 3,
  159. .color_type = FF_COLOR_YUV_JPEG,
  160. .pixel_type = FF_PIXEL_PLANAR,
  161. .depth = 8,
  162. .x_chroma_shift = 0, .y_chroma_shift = 1,
  163. },
  164. /* RGB formats */
  165. [PIX_FMT_RGB24] = {
  166. .name = "rgb24",
  167. .nb_channels = 3,
  168. .color_type = FF_COLOR_RGB,
  169. .pixel_type = FF_PIXEL_PACKED,
  170. .depth = 8,
  171. .x_chroma_shift = 0, .y_chroma_shift = 0,
  172. },
  173. [PIX_FMT_BGR24] = {
  174. .name = "bgr24",
  175. .nb_channels = 3,
  176. .color_type = FF_COLOR_RGB,
  177. .pixel_type = FF_PIXEL_PACKED,
  178. .depth = 8,
  179. .x_chroma_shift = 0, .y_chroma_shift = 0,
  180. },
  181. [PIX_FMT_RGB32] = {
  182. .name = "rgb32",
  183. .nb_channels = 4, .is_alpha = 1,
  184. .color_type = FF_COLOR_RGB,
  185. .pixel_type = FF_PIXEL_PACKED,
  186. .depth = 8,
  187. .x_chroma_shift = 0, .y_chroma_shift = 0,
  188. },
  189. [PIX_FMT_RGB565] = {
  190. .name = "rgb565",
  191. .nb_channels = 3,
  192. .color_type = FF_COLOR_RGB,
  193. .pixel_type = FF_PIXEL_PACKED,
  194. .depth = 5,
  195. .x_chroma_shift = 0, .y_chroma_shift = 0,
  196. },
  197. [PIX_FMT_RGB555] = {
  198. .name = "rgb555",
  199. .nb_channels = 3,
  200. .color_type = FF_COLOR_RGB,
  201. .pixel_type = FF_PIXEL_PACKED,
  202. .depth = 5,
  203. .x_chroma_shift = 0, .y_chroma_shift = 0,
  204. },
  205. /* gray / mono formats */
  206. [PIX_FMT_GRAY16BE] = {
  207. .name = "gray16be",
  208. .nb_channels = 1,
  209. .color_type = FF_COLOR_GRAY,
  210. .pixel_type = FF_PIXEL_PLANAR,
  211. .depth = 16,
  212. },
  213. [PIX_FMT_GRAY16LE] = {
  214. .name = "gray16le",
  215. .nb_channels = 1,
  216. .color_type = FF_COLOR_GRAY,
  217. .pixel_type = FF_PIXEL_PLANAR,
  218. .depth = 16,
  219. },
  220. [PIX_FMT_GRAY8] = {
  221. .name = "gray",
  222. .nb_channels = 1,
  223. .color_type = FF_COLOR_GRAY,
  224. .pixel_type = FF_PIXEL_PLANAR,
  225. .depth = 8,
  226. },
  227. [PIX_FMT_MONOWHITE] = {
  228. .name = "monow",
  229. .nb_channels = 1,
  230. .color_type = FF_COLOR_GRAY,
  231. .pixel_type = FF_PIXEL_PLANAR,
  232. .depth = 1,
  233. },
  234. [PIX_FMT_MONOBLACK] = {
  235. .name = "monob",
  236. .nb_channels = 1,
  237. .color_type = FF_COLOR_GRAY,
  238. .pixel_type = FF_PIXEL_PLANAR,
  239. .depth = 1,
  240. },
  241. /* paletted formats */
  242. [PIX_FMT_PAL8] = {
  243. .name = "pal8",
  244. .nb_channels = 4, .is_alpha = 1,
  245. .color_type = FF_COLOR_RGB,
  246. .pixel_type = FF_PIXEL_PALETTE,
  247. .depth = 8,
  248. },
  249. [PIX_FMT_XVMC_MPEG2_MC] = {
  250. .name = "xvmcmc",
  251. },
  252. [PIX_FMT_XVMC_MPEG2_IDCT] = {
  253. .name = "xvmcidct",
  254. },
  255. [PIX_FMT_UYYVYY411] = {
  256. .name = "uyyvyy411",
  257. .nb_channels = 1,
  258. .color_type = FF_COLOR_YUV,
  259. .pixel_type = FF_PIXEL_PACKED,
  260. .depth = 8,
  261. .x_chroma_shift = 2, .y_chroma_shift = 0,
  262. },
  263. [PIX_FMT_BGR32] = {
  264. .name = "bgr32",
  265. .nb_channels = 4, .is_alpha = 1,
  266. .color_type = FF_COLOR_RGB,
  267. .pixel_type = FF_PIXEL_PACKED,
  268. .depth = 8,
  269. .x_chroma_shift = 0, .y_chroma_shift = 0,
  270. },
  271. [PIX_FMT_BGR565] = {
  272. .name = "bgr565",
  273. .nb_channels = 3,
  274. .color_type = FF_COLOR_RGB,
  275. .pixel_type = FF_PIXEL_PACKED,
  276. .depth = 5,
  277. .x_chroma_shift = 0, .y_chroma_shift = 0,
  278. },
  279. [PIX_FMT_BGR555] = {
  280. .name = "bgr555",
  281. .nb_channels = 3,
  282. .color_type = FF_COLOR_RGB,
  283. .pixel_type = FF_PIXEL_PACKED,
  284. .depth = 5,
  285. .x_chroma_shift = 0, .y_chroma_shift = 0,
  286. },
  287. [PIX_FMT_RGB8] = {
  288. .name = "rgb8",
  289. .nb_channels = 1,
  290. .color_type = FF_COLOR_RGB,
  291. .pixel_type = FF_PIXEL_PACKED,
  292. .depth = 8,
  293. .x_chroma_shift = 0, .y_chroma_shift = 0,
  294. },
  295. [PIX_FMT_RGB4] = {
  296. .name = "rgb4",
  297. .nb_channels = 1,
  298. .color_type = FF_COLOR_RGB,
  299. .pixel_type = FF_PIXEL_PACKED,
  300. .depth = 4,
  301. .x_chroma_shift = 0, .y_chroma_shift = 0,
  302. },
  303. [PIX_FMT_RGB4_BYTE] = {
  304. .name = "rgb4_byte",
  305. .nb_channels = 1,
  306. .color_type = FF_COLOR_RGB,
  307. .pixel_type = FF_PIXEL_PACKED,
  308. .depth = 8,
  309. .x_chroma_shift = 0, .y_chroma_shift = 0,
  310. },
  311. [PIX_FMT_BGR8] = {
  312. .name = "bgr8",
  313. .nb_channels = 1,
  314. .color_type = FF_COLOR_RGB,
  315. .pixel_type = FF_PIXEL_PACKED,
  316. .depth = 8,
  317. .x_chroma_shift = 0, .y_chroma_shift = 0,
  318. },
  319. [PIX_FMT_BGR4] = {
  320. .name = "bgr4",
  321. .nb_channels = 1,
  322. .color_type = FF_COLOR_RGB,
  323. .pixel_type = FF_PIXEL_PACKED,
  324. .depth = 4,
  325. .x_chroma_shift = 0, .y_chroma_shift = 0,
  326. },
  327. [PIX_FMT_BGR4_BYTE] = {
  328. .name = "bgr4_byte",
  329. .nb_channels = 1,
  330. .color_type = FF_COLOR_RGB,
  331. .pixel_type = FF_PIXEL_PACKED,
  332. .depth = 8,
  333. .x_chroma_shift = 0, .y_chroma_shift = 0,
  334. },
  335. [PIX_FMT_NV12] = {
  336. .name = "nv12",
  337. .nb_channels = 2,
  338. .color_type = FF_COLOR_YUV,
  339. .pixel_type = FF_PIXEL_PLANAR,
  340. .depth = 8,
  341. .x_chroma_shift = 1, .y_chroma_shift = 1,
  342. },
  343. [PIX_FMT_NV21] = {
  344. .name = "nv12",
  345. .nb_channels = 2,
  346. .color_type = FF_COLOR_YUV,
  347. .pixel_type = FF_PIXEL_PLANAR,
  348. .depth = 8,
  349. .x_chroma_shift = 1, .y_chroma_shift = 1,
  350. },
  351. [PIX_FMT_BGR32_1] = {
  352. .name = "bgr32_1",
  353. .nb_channels = 4, .is_alpha = 1,
  354. .color_type = FF_COLOR_RGB,
  355. .pixel_type = FF_PIXEL_PACKED,
  356. .depth = 8,
  357. .x_chroma_shift = 0, .y_chroma_shift = 0,
  358. },
  359. [PIX_FMT_RGB32_1] = {
  360. .name = "rgb32_1",
  361. .nb_channels = 4, .is_alpha = 1,
  362. .color_type = FF_COLOR_RGB,
  363. .pixel_type = FF_PIXEL_PACKED,
  364. .depth = 8,
  365. .x_chroma_shift = 0, .y_chroma_shift = 0,
  366. },
  367. };
  368. void avcodec_get_chroma_sub_sample(int pix_fmt, int *h_shift, int *v_shift)
  369. {
  370. *h_shift = pix_fmt_info[pix_fmt].x_chroma_shift;
  371. *v_shift = pix_fmt_info[pix_fmt].y_chroma_shift;
  372. }
  373. const char *avcodec_get_pix_fmt_name(int pix_fmt)
  374. {
  375. if (pix_fmt < 0 || pix_fmt >= PIX_FMT_NB)
  376. return "???";
  377. else
  378. return pix_fmt_info[pix_fmt].name;
  379. }
  380. enum PixelFormat avcodec_get_pix_fmt(const char* name)
  381. {
  382. int i;
  383. for (i=0; i < PIX_FMT_NB; i++)
  384. if (!strcmp(pix_fmt_info[i].name, name))
  385. break;
  386. return i;
  387. }
  388. void avcodec_pix_fmt_string (char *buf, int buf_size, int pix_fmt)
  389. {
  390. PixFmtInfo info= pix_fmt_info[pix_fmt];
  391. char is_alpha_char= info.is_alpha ? 'y' : 'n';
  392. /* print header */
  393. if (pix_fmt < 0)
  394. snprintf (buf, buf_size,
  395. "name " " nb_channels" " depth" " is_alpha"
  396. );
  397. else
  398. snprintf (buf, buf_size,
  399. "%-10s" " %1d " " %2d " " %c ",
  400. info.name,
  401. info.nb_channels,
  402. info.depth,
  403. is_alpha_char
  404. );
  405. }
  406. int avpicture_fill(AVPicture *picture, uint8_t *ptr,
  407. int pix_fmt, int width, int height)
  408. {
  409. int size, w2, h2, size2;
  410. const PixFmtInfo *pinfo;
  411. if(avcodec_check_dimensions(NULL, width, height))
  412. goto fail;
  413. pinfo = &pix_fmt_info[pix_fmt];
  414. size = width * height;
  415. switch(pix_fmt) {
  416. case PIX_FMT_YUV420P:
  417. case PIX_FMT_YUV422P:
  418. case PIX_FMT_YUV444P:
  419. case PIX_FMT_YUV410P:
  420. case PIX_FMT_YUV411P:
  421. case PIX_FMT_YUV440P:
  422. case PIX_FMT_YUVJ420P:
  423. case PIX_FMT_YUVJ422P:
  424. case PIX_FMT_YUVJ444P:
  425. case PIX_FMT_YUVJ440P:
  426. w2 = (width + (1 << pinfo->x_chroma_shift) - 1) >> pinfo->x_chroma_shift;
  427. h2 = (height + (1 << pinfo->y_chroma_shift) - 1) >> pinfo->y_chroma_shift;
  428. size2 = w2 * h2;
  429. picture->data[0] = ptr;
  430. picture->data[1] = picture->data[0] + size;
  431. picture->data[2] = picture->data[1] + size2;
  432. picture->data[3] = NULL;
  433. picture->linesize[0] = width;
  434. picture->linesize[1] = w2;
  435. picture->linesize[2] = w2;
  436. picture->linesize[3] = 0;
  437. return size + 2 * size2;
  438. case PIX_FMT_YUVA420P:
  439. w2 = (width + (1 << pinfo->x_chroma_shift) - 1) >> pinfo->x_chroma_shift;
  440. h2 = (height + (1 << pinfo->y_chroma_shift) - 1) >> pinfo->y_chroma_shift;
  441. size2 = w2 * h2;
  442. picture->data[0] = ptr;
  443. picture->data[1] = picture->data[0] + size;
  444. picture->data[2] = picture->data[1] + size2;
  445. picture->data[3] = picture->data[1] + size2 + size2;
  446. picture->linesize[0] = width;
  447. picture->linesize[1] = w2;
  448. picture->linesize[2] = w2;
  449. picture->linesize[3] = width;
  450. return 2 * size + 2 * size2;
  451. case PIX_FMT_NV12:
  452. case PIX_FMT_NV21:
  453. w2 = (width + (1 << pinfo->x_chroma_shift) - 1) >> pinfo->x_chroma_shift;
  454. h2 = (height + (1 << pinfo->y_chroma_shift) - 1) >> pinfo->y_chroma_shift;
  455. size2 = w2 * h2 * 2;
  456. picture->data[0] = ptr;
  457. picture->data[1] = picture->data[0] + size;
  458. picture->data[2] = NULL;
  459. picture->data[3] = NULL;
  460. picture->linesize[0] = width;
  461. picture->linesize[1] = w2;
  462. picture->linesize[2] = 0;
  463. picture->linesize[3] = 0;
  464. return size + 2 * size2;
  465. case PIX_FMT_RGB24:
  466. case PIX_FMT_BGR24:
  467. picture->data[0] = ptr;
  468. picture->data[1] = NULL;
  469. picture->data[2] = NULL;
  470. picture->data[3] = NULL;
  471. picture->linesize[0] = width * 3;
  472. return size * 3;
  473. case PIX_FMT_RGB32:
  474. case PIX_FMT_BGR32:
  475. case PIX_FMT_RGB32_1:
  476. case PIX_FMT_BGR32_1:
  477. picture->data[0] = ptr;
  478. picture->data[1] = NULL;
  479. picture->data[2] = NULL;
  480. picture->data[3] = NULL;
  481. picture->linesize[0] = width * 4;
  482. return size * 4;
  483. case PIX_FMT_GRAY16BE:
  484. case PIX_FMT_GRAY16LE:
  485. case PIX_FMT_BGR555:
  486. case PIX_FMT_BGR565:
  487. case PIX_FMT_RGB555:
  488. case PIX_FMT_RGB565:
  489. case PIX_FMT_YUYV422:
  490. picture->data[0] = ptr;
  491. picture->data[1] = NULL;
  492. picture->data[2] = NULL;
  493. picture->data[3] = NULL;
  494. picture->linesize[0] = width * 2;
  495. return size * 2;
  496. case PIX_FMT_UYVY422:
  497. picture->data[0] = ptr;
  498. picture->data[1] = NULL;
  499. picture->data[2] = NULL;
  500. picture->data[3] = NULL;
  501. picture->linesize[0] = width * 2;
  502. return size * 2;
  503. case PIX_FMT_UYYVYY411:
  504. picture->data[0] = ptr;
  505. picture->data[1] = NULL;
  506. picture->data[2] = NULL;
  507. picture->data[3] = NULL;
  508. picture->linesize[0] = width + width/2;
  509. return size + size/2;
  510. case PIX_FMT_RGB8:
  511. case PIX_FMT_BGR8:
  512. case PIX_FMT_RGB4_BYTE:
  513. case PIX_FMT_BGR4_BYTE:
  514. case PIX_FMT_GRAY8:
  515. picture->data[0] = ptr;
  516. picture->data[1] = NULL;
  517. picture->data[2] = NULL;
  518. picture->data[3] = NULL;
  519. picture->linesize[0] = width;
  520. return size;
  521. case PIX_FMT_RGB4:
  522. case PIX_FMT_BGR4:
  523. picture->data[0] = ptr;
  524. picture->data[1] = NULL;
  525. picture->data[2] = NULL;
  526. picture->data[3] = NULL;
  527. picture->linesize[0] = width / 2;
  528. return size / 2;
  529. case PIX_FMT_MONOWHITE:
  530. case PIX_FMT_MONOBLACK:
  531. picture->data[0] = ptr;
  532. picture->data[1] = NULL;
  533. picture->data[2] = NULL;
  534. picture->data[3] = NULL;
  535. picture->linesize[0] = (width + 7) >> 3;
  536. return picture->linesize[0] * height;
  537. case PIX_FMT_PAL8:
  538. size2 = (size + 3) & ~3;
  539. picture->data[0] = ptr;
  540. picture->data[1] = ptr + size2; /* palette is stored here as 256 32 bit words */
  541. picture->data[2] = NULL;
  542. picture->data[3] = NULL;
  543. picture->linesize[0] = width;
  544. picture->linesize[1] = 4;
  545. return size2 + 256 * 4;
  546. default:
  547. fail:
  548. picture->data[0] = NULL;
  549. picture->data[1] = NULL;
  550. picture->data[2] = NULL;
  551. picture->data[3] = NULL;
  552. return -1;
  553. }
  554. }
  555. int avpicture_layout(const AVPicture* src, int pix_fmt, int width, int height,
  556. unsigned char *dest, int dest_size)
  557. {
  558. const PixFmtInfo* pf = &pix_fmt_info[pix_fmt];
  559. int i, j, w, h, data_planes;
  560. const unsigned char* s;
  561. int size = avpicture_get_size(pix_fmt, width, height);
  562. if (size > dest_size || size < 0)
  563. return -1;
  564. if (pf->pixel_type == FF_PIXEL_PACKED || pf->pixel_type == FF_PIXEL_PALETTE) {
  565. if (pix_fmt == PIX_FMT_YUYV422 ||
  566. pix_fmt == PIX_FMT_UYVY422 ||
  567. pix_fmt == PIX_FMT_BGR565 ||
  568. pix_fmt == PIX_FMT_BGR555 ||
  569. pix_fmt == PIX_FMT_RGB565 ||
  570. pix_fmt == PIX_FMT_RGB555)
  571. w = width * 2;
  572. else if (pix_fmt == PIX_FMT_UYYVYY411)
  573. w = width + width/2;
  574. else if (pix_fmt == PIX_FMT_PAL8)
  575. w = width;
  576. else
  577. w = width * (pf->depth * pf->nb_channels / 8);
  578. data_planes = 1;
  579. h = height;
  580. } else {
  581. data_planes = pf->nb_channels;
  582. w = (width*pf->depth + 7)/8;
  583. h = height;
  584. }
  585. for (i=0; i<data_planes; i++) {
  586. if (i == 1) {
  587. w = width >> pf->x_chroma_shift;
  588. h = height >> pf->y_chroma_shift;
  589. }
  590. s = src->data[i];
  591. for(j=0; j<h; j++) {
  592. memcpy(dest, s, w);
  593. dest += w;
  594. s += src->linesize[i];
  595. }
  596. }
  597. if (pf->pixel_type == FF_PIXEL_PALETTE)
  598. memcpy((unsigned char *)(((size_t)dest + 3) & ~3), src->data[1], 256 * 4);
  599. return size;
  600. }
  601. int avpicture_get_size(int pix_fmt, int width, int height)
  602. {
  603. AVPicture dummy_pict;
  604. return avpicture_fill(&dummy_pict, NULL, pix_fmt, width, height);
  605. }
  606. int avcodec_get_pix_fmt_loss(int dst_pix_fmt, int src_pix_fmt,
  607. int has_alpha)
  608. {
  609. const PixFmtInfo *pf, *ps;
  610. int loss;
  611. ps = &pix_fmt_info[src_pix_fmt];
  612. pf = &pix_fmt_info[dst_pix_fmt];
  613. /* compute loss */
  614. loss = 0;
  615. pf = &pix_fmt_info[dst_pix_fmt];
  616. if (pf->depth < ps->depth ||
  617. (dst_pix_fmt == PIX_FMT_RGB555 && src_pix_fmt == PIX_FMT_RGB565))
  618. loss |= FF_LOSS_DEPTH;
  619. if (pf->x_chroma_shift > ps->x_chroma_shift ||
  620. pf->y_chroma_shift > ps->y_chroma_shift)
  621. loss |= FF_LOSS_RESOLUTION;
  622. switch(pf->color_type) {
  623. case FF_COLOR_RGB:
  624. if (ps->color_type != FF_COLOR_RGB &&
  625. ps->color_type != FF_COLOR_GRAY)
  626. loss |= FF_LOSS_COLORSPACE;
  627. break;
  628. case FF_COLOR_GRAY:
  629. if (ps->color_type != FF_COLOR_GRAY)
  630. loss |= FF_LOSS_COLORSPACE;
  631. break;
  632. case FF_COLOR_YUV:
  633. if (ps->color_type != FF_COLOR_YUV)
  634. loss |= FF_LOSS_COLORSPACE;
  635. break;
  636. case FF_COLOR_YUV_JPEG:
  637. if (ps->color_type != FF_COLOR_YUV_JPEG &&
  638. ps->color_type != FF_COLOR_YUV &&
  639. ps->color_type != FF_COLOR_GRAY)
  640. loss |= FF_LOSS_COLORSPACE;
  641. break;
  642. default:
  643. /* fail safe test */
  644. if (ps->color_type != pf->color_type)
  645. loss |= FF_LOSS_COLORSPACE;
  646. break;
  647. }
  648. if (pf->color_type == FF_COLOR_GRAY &&
  649. ps->color_type != FF_COLOR_GRAY)
  650. loss |= FF_LOSS_CHROMA;
  651. if (!pf->is_alpha && (ps->is_alpha && has_alpha))
  652. loss |= FF_LOSS_ALPHA;
  653. if (pf->pixel_type == FF_PIXEL_PALETTE &&
  654. (ps->pixel_type != FF_PIXEL_PALETTE && ps->color_type != FF_COLOR_GRAY))
  655. loss |= FF_LOSS_COLORQUANT;
  656. return loss;
  657. }
  658. static int avg_bits_per_pixel(int pix_fmt)
  659. {
  660. int bits;
  661. const PixFmtInfo *pf;
  662. pf = &pix_fmt_info[pix_fmt];
  663. switch(pf->pixel_type) {
  664. case FF_PIXEL_PACKED:
  665. switch(pix_fmt) {
  666. case PIX_FMT_YUYV422:
  667. case PIX_FMT_UYVY422:
  668. case PIX_FMT_RGB565:
  669. case PIX_FMT_RGB555:
  670. case PIX_FMT_BGR565:
  671. case PIX_FMT_BGR555:
  672. bits = 16;
  673. break;
  674. case PIX_FMT_UYYVYY411:
  675. bits = 12;
  676. break;
  677. default:
  678. bits = pf->depth * pf->nb_channels;
  679. break;
  680. }
  681. break;
  682. case FF_PIXEL_PLANAR:
  683. if (pf->x_chroma_shift == 0 && pf->y_chroma_shift == 0) {
  684. bits = pf->depth * pf->nb_channels;
  685. } else {
  686. bits = pf->depth + ((2 * pf->depth) >>
  687. (pf->x_chroma_shift + pf->y_chroma_shift));
  688. }
  689. break;
  690. case FF_PIXEL_PALETTE:
  691. bits = 8;
  692. break;
  693. default:
  694. bits = -1;
  695. break;
  696. }
  697. return bits;
  698. }
  699. static int avcodec_find_best_pix_fmt1(int pix_fmt_mask,
  700. int src_pix_fmt,
  701. int has_alpha,
  702. int loss_mask)
  703. {
  704. int dist, i, loss, min_dist, dst_pix_fmt;
  705. /* find exact color match with smallest size */
  706. dst_pix_fmt = -1;
  707. min_dist = 0x7fffffff;
  708. for(i = 0;i < PIX_FMT_NB; i++) {
  709. if (pix_fmt_mask & (1 << i)) {
  710. loss = avcodec_get_pix_fmt_loss(i, src_pix_fmt, has_alpha) & loss_mask;
  711. if (loss == 0) {
  712. dist = avg_bits_per_pixel(i);
  713. if (dist < min_dist) {
  714. min_dist = dist;
  715. dst_pix_fmt = i;
  716. }
  717. }
  718. }
  719. }
  720. return dst_pix_fmt;
  721. }
  722. int avcodec_find_best_pix_fmt(int pix_fmt_mask, int src_pix_fmt,
  723. int has_alpha, int *loss_ptr)
  724. {
  725. int dst_pix_fmt, loss_mask, i;
  726. static const int loss_mask_order[] = {
  727. ~0, /* no loss first */
  728. ~FF_LOSS_ALPHA,
  729. ~FF_LOSS_RESOLUTION,
  730. ~(FF_LOSS_COLORSPACE | FF_LOSS_RESOLUTION),
  731. ~FF_LOSS_COLORQUANT,
  732. ~FF_LOSS_DEPTH,
  733. 0,
  734. };
  735. /* try with successive loss */
  736. i = 0;
  737. for(;;) {
  738. loss_mask = loss_mask_order[i++];
  739. dst_pix_fmt = avcodec_find_best_pix_fmt1(pix_fmt_mask, src_pix_fmt,
  740. has_alpha, loss_mask);
  741. if (dst_pix_fmt >= 0)
  742. goto found;
  743. if (loss_mask == 0)
  744. break;
  745. }
  746. return -1;
  747. found:
  748. if (loss_ptr)
  749. *loss_ptr = avcodec_get_pix_fmt_loss(dst_pix_fmt, src_pix_fmt, has_alpha);
  750. return dst_pix_fmt;
  751. }
  752. void ff_img_copy_plane(uint8_t *dst, int dst_wrap,
  753. const uint8_t *src, int src_wrap,
  754. int width, int height)
  755. {
  756. if((!dst) || (!src))
  757. return;
  758. for(;height > 0; height--) {
  759. memcpy(dst, src, width);
  760. dst += dst_wrap;
  761. src += src_wrap;
  762. }
  763. }
  764. void av_picture_copy(AVPicture *dst, const AVPicture *src,
  765. int pix_fmt, int width, int height)
  766. {
  767. int bwidth, bits, i;
  768. const PixFmtInfo *pf = &pix_fmt_info[pix_fmt];
  769. pf = &pix_fmt_info[pix_fmt];
  770. switch(pf->pixel_type) {
  771. case FF_PIXEL_PACKED:
  772. switch(pix_fmt) {
  773. case PIX_FMT_YUYV422:
  774. case PIX_FMT_UYVY422:
  775. case PIX_FMT_RGB565:
  776. case PIX_FMT_RGB555:
  777. case PIX_FMT_BGR565:
  778. case PIX_FMT_BGR555:
  779. bits = 16;
  780. break;
  781. case PIX_FMT_UYYVYY411:
  782. bits = 12;
  783. break;
  784. default:
  785. bits = pf->depth * pf->nb_channels;
  786. break;
  787. }
  788. bwidth = (width * bits + 7) >> 3;
  789. ff_img_copy_plane(dst->data[0], dst->linesize[0],
  790. src->data[0], src->linesize[0],
  791. bwidth, height);
  792. break;
  793. case FF_PIXEL_PLANAR:
  794. for(i = 0; i < pf->nb_channels; i++) {
  795. int w, h;
  796. w = width;
  797. h = height;
  798. if (i == 1 || i == 2) {
  799. w >>= pf->x_chroma_shift;
  800. h >>= pf->y_chroma_shift;
  801. }
  802. bwidth = (w * pf->depth + 7) >> 3;
  803. ff_img_copy_plane(dst->data[i], dst->linesize[i],
  804. src->data[i], src->linesize[i],
  805. bwidth, h);
  806. }
  807. break;
  808. case FF_PIXEL_PALETTE:
  809. ff_img_copy_plane(dst->data[0], dst->linesize[0],
  810. src->data[0], src->linesize[0],
  811. width, height);
  812. /* copy the palette */
  813. ff_img_copy_plane(dst->data[1], dst->linesize[1],
  814. src->data[1], src->linesize[1],
  815. 4, 256);
  816. break;
  817. }
  818. }
  819. /* XXX: totally non optimized */
  820. static void yuyv422_to_yuv420p(AVPicture *dst, const AVPicture *src,
  821. int width, int height)
  822. {
  823. const uint8_t *p, *p1;
  824. uint8_t *lum, *cr, *cb, *lum1, *cr1, *cb1;
  825. int w;
  826. p1 = src->data[0];
  827. lum1 = dst->data[0];
  828. cb1 = dst->data[1];
  829. cr1 = dst->data[2];
  830. for(;height >= 1; height -= 2) {
  831. p = p1;
  832. lum = lum1;
  833. cb = cb1;
  834. cr = cr1;
  835. for(w = width; w >= 2; w -= 2) {
  836. lum[0] = p[0];
  837. cb[0] = p[1];
  838. lum[1] = p[2];
  839. cr[0] = p[3];
  840. p += 4;
  841. lum += 2;
  842. cb++;
  843. cr++;
  844. }
  845. if (w) {
  846. lum[0] = p[0];
  847. cb[0] = p[1];
  848. cr[0] = p[3];
  849. cb++;
  850. cr++;
  851. }
  852. p1 += src->linesize[0];
  853. lum1 += dst->linesize[0];
  854. if (height>1) {
  855. p = p1;
  856. lum = lum1;
  857. for(w = width; w >= 2; w -= 2) {
  858. lum[0] = p[0];
  859. lum[1] = p[2];
  860. p += 4;
  861. lum += 2;
  862. }
  863. if (w) {
  864. lum[0] = p[0];
  865. }
  866. p1 += src->linesize[0];
  867. lum1 += dst->linesize[0];
  868. }
  869. cb1 += dst->linesize[1];
  870. cr1 += dst->linesize[2];
  871. }
  872. }
  873. static void uyvy422_to_yuv420p(AVPicture *dst, const AVPicture *src,
  874. int width, int height)
  875. {
  876. const uint8_t *p, *p1;
  877. uint8_t *lum, *cr, *cb, *lum1, *cr1, *cb1;
  878. int w;
  879. p1 = src->data[0];
  880. lum1 = dst->data[0];
  881. cb1 = dst->data[1];
  882. cr1 = dst->data[2];
  883. for(;height >= 1; height -= 2) {
  884. p = p1;
  885. lum = lum1;
  886. cb = cb1;
  887. cr = cr1;
  888. for(w = width; w >= 2; w -= 2) {
  889. lum[0] = p[1];
  890. cb[0] = p[0];
  891. lum[1] = p[3];
  892. cr[0] = p[2];
  893. p += 4;
  894. lum += 2;
  895. cb++;
  896. cr++;
  897. }
  898. if (w) {
  899. lum[0] = p[1];
  900. cb[0] = p[0];
  901. cr[0] = p[2];
  902. cb++;
  903. cr++;
  904. }
  905. p1 += src->linesize[0];
  906. lum1 += dst->linesize[0];
  907. if (height>1) {
  908. p = p1;
  909. lum = lum1;
  910. for(w = width; w >= 2; w -= 2) {
  911. lum[0] = p[1];
  912. lum[1] = p[3];
  913. p += 4;
  914. lum += 2;
  915. }
  916. if (w) {
  917. lum[0] = p[1];
  918. }
  919. p1 += src->linesize[0];
  920. lum1 += dst->linesize[0];
  921. }
  922. cb1 += dst->linesize[1];
  923. cr1 += dst->linesize[2];
  924. }
  925. }
  926. static void uyvy422_to_yuv422p(AVPicture *dst, const AVPicture *src,
  927. int width, int height)
  928. {
  929. const uint8_t *p, *p1;
  930. uint8_t *lum, *cr, *cb, *lum1, *cr1, *cb1;
  931. int w;
  932. p1 = src->data[0];
  933. lum1 = dst->data[0];
  934. cb1 = dst->data[1];
  935. cr1 = dst->data[2];
  936. for(;height > 0; height--) {
  937. p = p1;
  938. lum = lum1;
  939. cb = cb1;
  940. cr = cr1;
  941. for(w = width; w >= 2; w -= 2) {
  942. lum[0] = p[1];
  943. cb[0] = p[0];
  944. lum[1] = p[3];
  945. cr[0] = p[2];
  946. p += 4;
  947. lum += 2;
  948. cb++;
  949. cr++;
  950. }
  951. p1 += src->linesize[0];
  952. lum1 += dst->linesize[0];
  953. cb1 += dst->linesize[1];
  954. cr1 += dst->linesize[2];
  955. }
  956. }
  957. static void yuyv422_to_yuv422p(AVPicture *dst, const AVPicture *src,
  958. int width, int height)
  959. {
  960. const uint8_t *p, *p1;
  961. uint8_t *lum, *cr, *cb, *lum1, *cr1, *cb1;
  962. int w;
  963. p1 = src->data[0];
  964. lum1 = dst->data[0];
  965. cb1 = dst->data[1];
  966. cr1 = dst->data[2];
  967. for(;height > 0; height--) {
  968. p = p1;
  969. lum = lum1;
  970. cb = cb1;
  971. cr = cr1;
  972. for(w = width; w >= 2; w -= 2) {
  973. lum[0] = p[0];
  974. cb[0] = p[1];
  975. lum[1] = p[2];
  976. cr[0] = p[3];
  977. p += 4;
  978. lum += 2;
  979. cb++;
  980. cr++;
  981. }
  982. p1 += src->linesize[0];
  983. lum1 += dst->linesize[0];
  984. cb1 += dst->linesize[1];
  985. cr1 += dst->linesize[2];
  986. }
  987. }
  988. static void yuv422p_to_yuyv422(AVPicture *dst, const AVPicture *src,
  989. int width, int height)
  990. {
  991. uint8_t *p, *p1;
  992. const uint8_t *lum, *cr, *cb, *lum1, *cr1, *cb1;
  993. int w;
  994. p1 = dst->data[0];
  995. lum1 = src->data[0];
  996. cb1 = src->data[1];
  997. cr1 = src->data[2];
  998. for(;height > 0; height--) {
  999. p = p1;
  1000. lum = lum1;
  1001. cb = cb1;
  1002. cr = cr1;
  1003. for(w = width; w >= 2; w -= 2) {
  1004. p[0] = lum[0];
  1005. p[1] = cb[0];
  1006. p[2] = lum[1];
  1007. p[3] = cr[0];
  1008. p += 4;
  1009. lum += 2;
  1010. cb++;
  1011. cr++;
  1012. }
  1013. p1 += dst->linesize[0];
  1014. lum1 += src->linesize[0];
  1015. cb1 += src->linesize[1];
  1016. cr1 += src->linesize[2];
  1017. }
  1018. }
  1019. static void yuv422p_to_uyvy422(AVPicture *dst, const AVPicture *src,
  1020. int width, int height)
  1021. {
  1022. uint8_t *p, *p1;
  1023. const uint8_t *lum, *cr, *cb, *lum1, *cr1, *cb1;
  1024. int w;
  1025. p1 = dst->data[0];
  1026. lum1 = src->data[0];
  1027. cb1 = src->data[1];
  1028. cr1 = src->data[2];
  1029. for(;height > 0; height--) {
  1030. p = p1;
  1031. lum = lum1;
  1032. cb = cb1;
  1033. cr = cr1;
  1034. for(w = width; w >= 2; w -= 2) {
  1035. p[1] = lum[0];
  1036. p[0] = cb[0];
  1037. p[3] = lum[1];
  1038. p[2] = cr[0];
  1039. p += 4;
  1040. lum += 2;
  1041. cb++;
  1042. cr++;
  1043. }
  1044. p1 += dst->linesize[0];
  1045. lum1 += src->linesize[0];
  1046. cb1 += src->linesize[1];
  1047. cr1 += src->linesize[2];
  1048. }
  1049. }
  1050. static void uyyvyy411_to_yuv411p(AVPicture *dst, const AVPicture *src,
  1051. int width, int height)
  1052. {
  1053. const uint8_t *p, *p1;
  1054. uint8_t *lum, *cr, *cb, *lum1, *cr1, *cb1;
  1055. int w;
  1056. p1 = src->data[0];
  1057. lum1 = dst->data[0];
  1058. cb1 = dst->data[1];
  1059. cr1 = dst->data[2];
  1060. for(;height > 0; height--) {
  1061. p = p1;
  1062. lum = lum1;
  1063. cb = cb1;
  1064. cr = cr1;
  1065. for(w = width; w >= 4; w -= 4) {
  1066. cb[0] = p[0];
  1067. lum[0] = p[1];
  1068. lum[1] = p[2];
  1069. cr[0] = p[3];
  1070. lum[2] = p[4];
  1071. lum[3] = p[5];
  1072. p += 6;
  1073. lum += 4;
  1074. cb++;
  1075. cr++;
  1076. }
  1077. p1 += src->linesize[0];
  1078. lum1 += dst->linesize[0];
  1079. cb1 += dst->linesize[1];
  1080. cr1 += dst->linesize[2];
  1081. }
  1082. }
  1083. static void yuv420p_to_yuyv422(AVPicture *dst, const AVPicture *src,
  1084. int width, int height)
  1085. {
  1086. int w, h;
  1087. uint8_t *line1, *line2, *linesrc = dst->data[0];
  1088. uint8_t *lum1, *lum2, *lumsrc = src->data[0];
  1089. uint8_t *cb1, *cb2 = src->data[1];
  1090. uint8_t *cr1, *cr2 = src->data[2];
  1091. for(h = height / 2; h--;) {
  1092. line1 = linesrc;
  1093. line2 = linesrc + dst->linesize[0];
  1094. lum1 = lumsrc;
  1095. lum2 = lumsrc + src->linesize[0];
  1096. cb1 = cb2;
  1097. cr1 = cr2;
  1098. for(w = width / 2; w--;) {
  1099. *line1++ = *lum1++; *line2++ = *lum2++;
  1100. *line1++ = *line2++ = *cb1++;
  1101. *line1++ = *lum1++; *line2++ = *lum2++;
  1102. *line1++ = *line2++ = *cr1++;
  1103. }
  1104. linesrc += dst->linesize[0] * 2;
  1105. lumsrc += src->linesize[0] * 2;
  1106. cb2 += src->linesize[1];
  1107. cr2 += src->linesize[2];
  1108. }
  1109. }
  1110. static void yuv420p_to_uyvy422(AVPicture *dst, const AVPicture *src,
  1111. int width, int height)
  1112. {
  1113. int w, h;
  1114. uint8_t *line1, *line2, *linesrc = dst->data[0];
  1115. uint8_t *lum1, *lum2, *lumsrc = src->data[0];
  1116. uint8_t *cb1, *cb2 = src->data[1];
  1117. uint8_t *cr1, *cr2 = src->data[2];
  1118. for(h = height / 2; h--;) {
  1119. line1 = linesrc;
  1120. line2 = linesrc + dst->linesize[0];
  1121. lum1 = lumsrc;
  1122. lum2 = lumsrc + src->linesize[0];
  1123. cb1 = cb2;
  1124. cr1 = cr2;
  1125. for(w = width / 2; w--;) {
  1126. *line1++ = *line2++ = *cb1++;
  1127. *line1++ = *lum1++; *line2++ = *lum2++;
  1128. *line1++ = *line2++ = *cr1++;
  1129. *line1++ = *lum1++; *line2++ = *lum2++;
  1130. }
  1131. linesrc += dst->linesize[0] * 2;
  1132. lumsrc += src->linesize[0] * 2;
  1133. cb2 += src->linesize[1];
  1134. cr2 += src->linesize[2];
  1135. }
  1136. }
  1137. static uint8_t y_ccir_to_jpeg[256];
  1138. static uint8_t y_jpeg_to_ccir[256];
  1139. static uint8_t c_ccir_to_jpeg[256];
  1140. static uint8_t c_jpeg_to_ccir[256];
  1141. /* init various conversion tables */
  1142. static void img_convert_init(void)
  1143. {
  1144. int i;
  1145. uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
  1146. for(i = 0;i < 256; i++) {
  1147. y_ccir_to_jpeg[i] = Y_CCIR_TO_JPEG(i);
  1148. y_jpeg_to_ccir[i] = Y_JPEG_TO_CCIR(i);
  1149. c_ccir_to_jpeg[i] = C_CCIR_TO_JPEG(i);
  1150. c_jpeg_to_ccir[i] = C_JPEG_TO_CCIR(i);
  1151. }
  1152. }
  1153. /* apply to each pixel the given table */
  1154. static void img_apply_table(uint8_t *dst, int dst_wrap,
  1155. const uint8_t *src, int src_wrap,
  1156. int width, int height, const uint8_t *table1)
  1157. {
  1158. int n;
  1159. const uint8_t *s;
  1160. uint8_t *d;
  1161. const uint8_t *table;
  1162. table = table1;
  1163. for(;height > 0; height--) {
  1164. s = src;
  1165. d = dst;
  1166. n = width;
  1167. while (n >= 4) {
  1168. d[0] = table[s[0]];
  1169. d[1] = table[s[1]];
  1170. d[2] = table[s[2]];
  1171. d[3] = table[s[3]];
  1172. d += 4;
  1173. s += 4;
  1174. n -= 4;
  1175. }
  1176. while (n > 0) {
  1177. d[0] = table[s[0]];
  1178. d++;
  1179. s++;
  1180. n--;
  1181. }
  1182. dst += dst_wrap;
  1183. src += src_wrap;
  1184. }
  1185. }
  1186. /* XXX: use generic filter ? */
  1187. /* XXX: in most cases, the sampling position is incorrect */
  1188. /* 4x1 -> 1x1 */
  1189. static void shrink41(uint8_t *dst, int dst_wrap,
  1190. const uint8_t *src, int src_wrap,
  1191. int width, int height)
  1192. {
  1193. int w;
  1194. const uint8_t *s;
  1195. uint8_t *d;
  1196. for(;height > 0; height--) {
  1197. s = src;
  1198. d = dst;
  1199. for(w = width;w > 0; w--) {
  1200. d[0] = (s[0] + s[1] + s[2] + s[3] + 2) >> 2;
  1201. s += 4;
  1202. d++;
  1203. }
  1204. src += src_wrap;
  1205. dst += dst_wrap;
  1206. }
  1207. }
  1208. /* 2x1 -> 1x1 */
  1209. static void shrink21(uint8_t *dst, int dst_wrap,
  1210. const uint8_t *src, int src_wrap,
  1211. int width, int height)
  1212. {
  1213. int w;
  1214. const uint8_t *s;
  1215. uint8_t *d;
  1216. for(;height > 0; height--) {
  1217. s = src;
  1218. d = dst;
  1219. for(w = width;w > 0; w--) {
  1220. d[0] = (s[0] + s[1]) >> 1;
  1221. s += 2;
  1222. d++;
  1223. }
  1224. src += src_wrap;
  1225. dst += dst_wrap;
  1226. }
  1227. }
  1228. /* 1x2 -> 1x1 */
  1229. static void shrink12(uint8_t *dst, int dst_wrap,
  1230. const uint8_t *src, int src_wrap,
  1231. int width, int height)
  1232. {
  1233. int w;
  1234. uint8_t *d;
  1235. const uint8_t *s1, *s2;
  1236. for(;height > 0; height--) {
  1237. s1 = src;
  1238. s2 = s1 + src_wrap;
  1239. d = dst;
  1240. for(w = width;w >= 4; w-=4) {
  1241. d[0] = (s1[0] + s2[0]) >> 1;
  1242. d[1] = (s1[1] + s2[1]) >> 1;
  1243. d[2] = (s1[2] + s2[2]) >> 1;
  1244. d[3] = (s1[3] + s2[3]) >> 1;
  1245. s1 += 4;
  1246. s2 += 4;
  1247. d += 4;
  1248. }
  1249. for(;w > 0; w--) {
  1250. d[0] = (s1[0] + s2[0]) >> 1;
  1251. s1++;
  1252. s2++;
  1253. d++;
  1254. }
  1255. src += 2 * src_wrap;
  1256. dst += dst_wrap;
  1257. }
  1258. }
  1259. /* 2x2 -> 1x1 */
  1260. void ff_shrink22(uint8_t *dst, int dst_wrap,
  1261. const uint8_t *src, int src_wrap,
  1262. int width, int height)
  1263. {
  1264. int w;
  1265. const uint8_t *s1, *s2;
  1266. uint8_t *d;
  1267. for(;height > 0; height--) {
  1268. s1 = src;
  1269. s2 = s1 + src_wrap;
  1270. d = dst;
  1271. for(w = width;w >= 4; w-=4) {
  1272. d[0] = (s1[0] + s1[1] + s2[0] + s2[1] + 2) >> 2;
  1273. d[1] = (s1[2] + s1[3] + s2[2] + s2[3] + 2) >> 2;
  1274. d[2] = (s1[4] + s1[5] + s2[4] + s2[5] + 2) >> 2;
  1275. d[3] = (s1[6] + s1[7] + s2[6] + s2[7] + 2) >> 2;
  1276. s1 += 8;
  1277. s2 += 8;
  1278. d += 4;
  1279. }
  1280. for(;w > 0; w--) {
  1281. d[0] = (s1[0] + s1[1] + s2[0] + s2[1] + 2) >> 2;
  1282. s1 += 2;
  1283. s2 += 2;
  1284. d++;
  1285. }
  1286. src += 2 * src_wrap;
  1287. dst += dst_wrap;
  1288. }
  1289. }
  1290. /* 4x4 -> 1x1 */
  1291. void ff_shrink44(uint8_t *dst, int dst_wrap,
  1292. const uint8_t *src, int src_wrap,
  1293. int width, int height)
  1294. {
  1295. int w;
  1296. const uint8_t *s1, *s2, *s3, *s4;
  1297. uint8_t *d;
  1298. for(;height > 0; height--) {
  1299. s1 = src;
  1300. s2 = s1 + src_wrap;
  1301. s3 = s2 + src_wrap;
  1302. s4 = s3 + src_wrap;
  1303. d = dst;
  1304. for(w = width;w > 0; w--) {
  1305. d[0] = (s1[0] + s1[1] + s1[2] + s1[3] +
  1306. s2[0] + s2[1] + s2[2] + s2[3] +
  1307. s3[0] + s3[1] + s3[2] + s3[3] +
  1308. s4[0] + s4[1] + s4[2] + s4[3] + 8) >> 4;
  1309. s1 += 4;
  1310. s2 += 4;
  1311. s3 += 4;
  1312. s4 += 4;
  1313. d++;
  1314. }
  1315. src += 4 * src_wrap;
  1316. dst += dst_wrap;
  1317. }
  1318. }
  1319. /* 8x8 -> 1x1 */
  1320. void ff_shrink88(uint8_t *dst, int dst_wrap,
  1321. const uint8_t *src, int src_wrap,
  1322. int width, int height)
  1323. {
  1324. int w, i;
  1325. for(;height > 0; height--) {
  1326. for(w = width;w > 0; w--) {
  1327. int tmp=0;
  1328. for(i=0; i<8; i++){
  1329. tmp += src[0] + src[1] + src[2] + src[3] + src[4] + src[5] + src[6] + src[7];
  1330. src += src_wrap;
  1331. }
  1332. *(dst++) = (tmp + 32)>>6;
  1333. src += 8 - 8*src_wrap;
  1334. }
  1335. src += 8*src_wrap - 8*width;
  1336. dst += dst_wrap - width;
  1337. }
  1338. }
  1339. static void grow21_line(uint8_t *dst, const uint8_t *src,
  1340. int width)
  1341. {
  1342. int w;
  1343. const uint8_t *s1;
  1344. uint8_t *d;
  1345. s1 = src;
  1346. d = dst;
  1347. for(w = width;w >= 4; w-=4) {
  1348. d[1] = d[0] = s1[0];
  1349. d[3] = d[2] = s1[1];
  1350. s1 += 2;
  1351. d += 4;
  1352. }
  1353. for(;w >= 2; w -= 2) {
  1354. d[1] = d[0] = s1[0];
  1355. s1 ++;
  1356. d += 2;
  1357. }
  1358. /* only needed if width is not a multiple of two */
  1359. /* XXX: veryfy that */
  1360. if (w) {
  1361. d[0] = s1[0];
  1362. }
  1363. }
  1364. static void grow41_line(uint8_t *dst, const uint8_t *src,
  1365. int width)
  1366. {
  1367. int w, v;
  1368. const uint8_t *s1;
  1369. uint8_t *d;
  1370. s1 = src;
  1371. d = dst;
  1372. for(w = width;w >= 4; w-=4) {
  1373. v = s1[0];
  1374. d[0] = v;
  1375. d[1] = v;
  1376. d[2] = v;
  1377. d[3] = v;
  1378. s1 ++;
  1379. d += 4;
  1380. }
  1381. }
  1382. /* 1x1 -> 2x1 */
  1383. static void grow21(uint8_t *dst, int dst_wrap,
  1384. const uint8_t *src, int src_wrap,
  1385. int width, int height)
  1386. {
  1387. for(;height > 0; height--) {
  1388. grow21_line(dst, src, width);
  1389. src += src_wrap;
  1390. dst += dst_wrap;
  1391. }
  1392. }
  1393. /* 1x1 -> 1x2 */
  1394. static void grow12(uint8_t *dst, int dst_wrap,
  1395. const uint8_t *src, int src_wrap,
  1396. int width, int height)
  1397. {
  1398. for(;height > 0; height-=2) {
  1399. memcpy(dst, src, width);
  1400. dst += dst_wrap;
  1401. memcpy(dst, src, width);
  1402. dst += dst_wrap;
  1403. src += src_wrap;
  1404. }
  1405. }
  1406. /* 1x1 -> 2x2 */
  1407. static void grow22(uint8_t *dst, int dst_wrap,
  1408. const uint8_t *src, int src_wrap,
  1409. int width, int height)
  1410. {
  1411. for(;height > 0; height--) {
  1412. grow21_line(dst, src, width);
  1413. if (height%2)
  1414. src += src_wrap;
  1415. dst += dst_wrap;
  1416. }
  1417. }
  1418. /* 1x1 -> 4x1 */
  1419. static void grow41(uint8_t *dst, int dst_wrap,
  1420. const uint8_t *src, int src_wrap,
  1421. int width, int height)
  1422. {
  1423. for(;height > 0; height--) {
  1424. grow41_line(dst, src, width);
  1425. src += src_wrap;
  1426. dst += dst_wrap;
  1427. }
  1428. }
  1429. /* 1x1 -> 4x4 */
  1430. static void grow44(uint8_t *dst, int dst_wrap,
  1431. const uint8_t *src, int src_wrap,
  1432. int width, int height)
  1433. {
  1434. for(;height > 0; height--) {
  1435. grow41_line(dst, src, width);
  1436. if ((height & 3) == 1)
  1437. src += src_wrap;
  1438. dst += dst_wrap;
  1439. }
  1440. }
  1441. /* 1x2 -> 2x1 */
  1442. static void conv411(uint8_t *dst, int dst_wrap,
  1443. const uint8_t *src, int src_wrap,
  1444. int width, int height)
  1445. {
  1446. int w, c;
  1447. const uint8_t *s1, *s2;
  1448. uint8_t *d;
  1449. width>>=1;
  1450. for(;height > 0; height--) {
  1451. s1 = src;
  1452. s2 = src + src_wrap;
  1453. d = dst;
  1454. for(w = width;w > 0; w--) {
  1455. c = (s1[0] + s2[0]) >> 1;
  1456. d[0] = c;
  1457. d[1] = c;
  1458. s1++;
  1459. s2++;
  1460. d += 2;
  1461. }
  1462. src += src_wrap * 2;
  1463. dst += dst_wrap;
  1464. }
  1465. }
  1466. /* XXX: add jpeg quantize code */
  1467. #define TRANSP_INDEX (6*6*6)
  1468. /* this is maybe slow, but allows for extensions */
  1469. static inline unsigned char gif_clut_index(uint8_t r, uint8_t g, uint8_t b)
  1470. {
  1471. return ((((r)/47)%6)*6*6+(((g)/47)%6)*6+(((b)/47)%6));
  1472. }
  1473. static void build_rgb_palette(uint8_t *palette, int has_alpha)
  1474. {
  1475. uint32_t *pal;
  1476. static const uint8_t pal_value[6] = { 0x00, 0x33, 0x66, 0x99, 0xcc, 0xff };
  1477. int i, r, g, b;
  1478. pal = (uint32_t *)palette;
  1479. i = 0;
  1480. for(r = 0; r < 6; r++) {
  1481. for(g = 0; g < 6; g++) {
  1482. for(b = 0; b < 6; b++) {
  1483. pal[i++] = (0xff << 24) | (pal_value[r] << 16) |
  1484. (pal_value[g] << 8) | pal_value[b];
  1485. }
  1486. }
  1487. }
  1488. if (has_alpha)
  1489. pal[i++] = 0;
  1490. while (i < 256)
  1491. pal[i++] = 0xff000000;
  1492. }
  1493. /* copy bit n to bits 0 ... n - 1 */
  1494. static inline unsigned int bitcopy_n(unsigned int a, int n)
  1495. {
  1496. int mask;
  1497. mask = (1 << n) - 1;
  1498. return (a & (0xff & ~mask)) | ((-((a >> n) & 1)) & mask);
  1499. }
  1500. /* rgb555 handling */
  1501. #define RGB_NAME rgb555
  1502. #define RGB_IN(r, g, b, s)\
  1503. {\
  1504. unsigned int v = ((const uint16_t *)(s))[0];\
  1505. r = bitcopy_n(v >> (10 - 3), 3);\
  1506. g = bitcopy_n(v >> (5 - 3), 3);\
  1507. b = bitcopy_n(v << 3, 3);\
  1508. }
  1509. #define RGB_OUT(d, r, g, b)\
  1510. {\
  1511. ((uint16_t *)(d))[0] = ((r >> 3) << 10) | ((g >> 3) << 5) | (b >> 3);\
  1512. }
  1513. #define BPP 2
  1514. #include "imgconvert_template.h"
  1515. /* rgb565 handling */
  1516. #define RGB_NAME rgb565
  1517. #define RGB_IN(r, g, b, s)\
  1518. {\
  1519. unsigned int v = ((const uint16_t *)(s))[0];\
  1520. r = bitcopy_n(v >> (11 - 3), 3);\
  1521. g = bitcopy_n(v >> (5 - 2), 2);\
  1522. b = bitcopy_n(v << 3, 3);\
  1523. }
  1524. #define RGB_OUT(d, r, g, b)\
  1525. {\
  1526. ((uint16_t *)(d))[0] = ((r >> 3) << 11) | ((g >> 2) << 5) | (b >> 3);\
  1527. }
  1528. #define BPP 2
  1529. #include "imgconvert_template.h"
  1530. /* bgr24 handling */
  1531. #define RGB_NAME bgr24
  1532. #define RGB_IN(r, g, b, s)\
  1533. {\
  1534. b = (s)[0];\
  1535. g = (s)[1];\
  1536. r = (s)[2];\
  1537. }
  1538. #define RGB_OUT(d, r, g, b)\
  1539. {\
  1540. (d)[0] = b;\
  1541. (d)[1] = g;\
  1542. (d)[2] = r;\
  1543. }
  1544. #define BPP 3
  1545. #include "imgconvert_template.h"
  1546. #undef RGB_IN
  1547. #undef RGB_OUT
  1548. #undef BPP
  1549. /* rgb24 handling */
  1550. #define RGB_NAME rgb24
  1551. #define FMT_RGB24
  1552. #define RGB_IN(r, g, b, s)\
  1553. {\
  1554. r = (s)[0];\
  1555. g = (s)[1];\
  1556. b = (s)[2];\
  1557. }
  1558. #define RGB_OUT(d, r, g, b)\
  1559. {\
  1560. (d)[0] = r;\
  1561. (d)[1] = g;\
  1562. (d)[2] = b;\
  1563. }
  1564. #define BPP 3
  1565. #include "imgconvert_template.h"
  1566. /* rgb32 handling */
  1567. #define RGB_NAME rgb32
  1568. #define FMT_RGB32
  1569. #define RGB_IN(r, g, b, s)\
  1570. {\
  1571. unsigned int v = ((const uint32_t *)(s))[0];\
  1572. r = (v >> 16) & 0xff;\
  1573. g = (v >> 8) & 0xff;\
  1574. b = v & 0xff;\
  1575. }
  1576. #define RGBA_IN(r, g, b, a, s)\
  1577. {\
  1578. unsigned int v = ((const uint32_t *)(s))[0];\
  1579. a = (v >> 24) & 0xff;\
  1580. r = (v >> 16) & 0xff;\
  1581. g = (v >> 8) & 0xff;\
  1582. b = v & 0xff;\
  1583. }
  1584. #define RGBA_OUT(d, r, g, b, a)\
  1585. {\
  1586. ((uint32_t *)(d))[0] = (a << 24) | (r << 16) | (g << 8) | b;\
  1587. }
  1588. #define BPP 4
  1589. #include "imgconvert_template.h"
  1590. static void mono_to_gray(AVPicture *dst, const AVPicture *src,
  1591. int width, int height, int xor_mask)
  1592. {
  1593. const unsigned char *p;
  1594. unsigned char *q;
  1595. int v, dst_wrap, src_wrap;
  1596. int y, w;
  1597. p = src->data[0];
  1598. src_wrap = src->linesize[0] - ((width + 7) >> 3);
  1599. q = dst->data[0];
  1600. dst_wrap = dst->linesize[0] - width;
  1601. for(y=0;y<height;y++) {
  1602. w = width;
  1603. while (w >= 8) {
  1604. v = *p++ ^ xor_mask;
  1605. q[0] = -(v >> 7);
  1606. q[1] = -((v >> 6) & 1);
  1607. q[2] = -((v >> 5) & 1);
  1608. q[3] = -((v >> 4) & 1);
  1609. q[4] = -((v >> 3) & 1);
  1610. q[5] = -((v >> 2) & 1);
  1611. q[6] = -((v >> 1) & 1);
  1612. q[7] = -((v >> 0) & 1);
  1613. w -= 8;
  1614. q += 8;
  1615. }
  1616. if (w > 0) {
  1617. v = *p++ ^ xor_mask;
  1618. do {
  1619. q[0] = -((v >> 7) & 1);
  1620. q++;
  1621. v <<= 1;
  1622. } while (--w);
  1623. }
  1624. p += src_wrap;
  1625. q += dst_wrap;
  1626. }
  1627. }
  1628. static void monowhite_to_gray(AVPicture *dst, const AVPicture *src,
  1629. int width, int height)
  1630. {
  1631. mono_to_gray(dst, src, width, height, 0xff);
  1632. }
  1633. static void monoblack_to_gray(AVPicture *dst, const AVPicture *src,
  1634. int width, int height)
  1635. {
  1636. mono_to_gray(dst, src, width, height, 0x00);
  1637. }
  1638. static void gray_to_mono(AVPicture *dst, const AVPicture *src,
  1639. int width, int height, int xor_mask)
  1640. {
  1641. int n;
  1642. const uint8_t *s;
  1643. uint8_t *d;
  1644. int j, b, v, n1, src_wrap, dst_wrap, y;
  1645. s = src->data[0];
  1646. src_wrap = src->linesize[0] - width;
  1647. d = dst->data[0];
  1648. dst_wrap = dst->linesize[0] - ((width + 7) >> 3);
  1649. for(y=0;y<height;y++) {
  1650. n = width;
  1651. while (n >= 8) {
  1652. v = 0;
  1653. for(j=0;j<8;j++) {
  1654. b = s[0];
  1655. s++;
  1656. v = (v << 1) | (b >> 7);
  1657. }
  1658. d[0] = v ^ xor_mask;
  1659. d++;
  1660. n -= 8;
  1661. }
  1662. if (n > 0) {
  1663. n1 = n;
  1664. v = 0;
  1665. while (n > 0) {
  1666. b = s[0];
  1667. s++;
  1668. v = (v << 1) | (b >> 7);
  1669. n--;
  1670. }
  1671. d[0] = (v << (8 - (n1 & 7))) ^ xor_mask;
  1672. d++;
  1673. }
  1674. s += src_wrap;
  1675. d += dst_wrap;
  1676. }
  1677. }
  1678. static void gray_to_monowhite(AVPicture *dst, const AVPicture *src,
  1679. int width, int height)
  1680. {
  1681. gray_to_mono(dst, src, width, height, 0xff);
  1682. }
  1683. static void gray_to_monoblack(AVPicture *dst, const AVPicture *src,
  1684. int width, int height)
  1685. {
  1686. gray_to_mono(dst, src, width, height, 0x00);
  1687. }
  1688. static void gray_to_gray16(AVPicture *dst, const AVPicture *src,
  1689. int width, int height)
  1690. {
  1691. int x, y, src_wrap, dst_wrap;
  1692. uint8_t *s, *d;
  1693. s = src->data[0];
  1694. src_wrap = src->linesize[0] - width;
  1695. d = dst->data[0];
  1696. dst_wrap = dst->linesize[0] - width * 2;
  1697. for(y=0; y<height; y++){
  1698. for(x=0; x<width; x++){
  1699. *d++ = *s;
  1700. *d++ = *s++;
  1701. }
  1702. s += src_wrap;
  1703. d += dst_wrap;
  1704. }
  1705. }
  1706. static void gray16_to_gray(AVPicture *dst, const AVPicture *src,
  1707. int width, int height)
  1708. {
  1709. int x, y, src_wrap, dst_wrap;
  1710. uint8_t *s, *d;
  1711. s = src->data[0];
  1712. src_wrap = src->linesize[0] - width * 2;
  1713. d = dst->data[0];
  1714. dst_wrap = dst->linesize[0] - width;
  1715. for(y=0; y<height; y++){
  1716. for(x=0; x<width; x++){
  1717. *d++ = *s;
  1718. s += 2;
  1719. }
  1720. s += src_wrap;
  1721. d += dst_wrap;
  1722. }
  1723. }
  1724. static void gray16be_to_gray(AVPicture *dst, const AVPicture *src,
  1725. int width, int height)
  1726. {
  1727. gray16_to_gray(dst, src, width, height);
  1728. }
  1729. static void gray16le_to_gray(AVPicture *dst, const AVPicture *src,
  1730. int width, int height)
  1731. {
  1732. AVPicture tmpsrc = *src;
  1733. tmpsrc.data[0]++;
  1734. gray16_to_gray(dst, &tmpsrc, width, height);
  1735. }
  1736. static void gray16_to_gray16(AVPicture *dst, const AVPicture *src,
  1737. int width, int height)
  1738. {
  1739. int x, y, src_wrap, dst_wrap;
  1740. uint16_t *s, *d;
  1741. s = (uint16_t*)src->data[0];
  1742. src_wrap = (src->linesize[0] - width * 2)/2;
  1743. d = (uint16_t*)dst->data[0];
  1744. dst_wrap = (dst->linesize[0] - width * 2)/2;
  1745. for(y=0; y<height; y++){
  1746. for(x=0; x<width; x++){
  1747. *d++ = bswap_16(*s++);
  1748. }
  1749. s += src_wrap;
  1750. d += dst_wrap;
  1751. }
  1752. }
  1753. typedef struct ConvertEntry {
  1754. void (*convert)(AVPicture *dst,
  1755. const AVPicture *src, int width, int height);
  1756. } ConvertEntry;
  1757. /* Add each new conversion function in this table. In order to be able
  1758. to convert from any format to any format, the following constraints
  1759. must be satisfied:
  1760. - all FF_COLOR_RGB formats must convert to and from PIX_FMT_RGB24
  1761. - all FF_COLOR_GRAY formats must convert to and from PIX_FMT_GRAY8
  1762. - all FF_COLOR_RGB formats with alpha must convert to and from PIX_FMT_RGB32
  1763. - PIX_FMT_YUV444P and PIX_FMT_YUVJ444P must convert to and from
  1764. PIX_FMT_RGB24.
  1765. - PIX_FMT_422 must convert to and from PIX_FMT_422P.
  1766. The other conversion functions are just optimizations for common cases.
  1767. */
  1768. static const ConvertEntry convert_table[PIX_FMT_NB][PIX_FMT_NB] = {
  1769. [PIX_FMT_YUV420P] = {
  1770. [PIX_FMT_YUYV422] = {
  1771. .convert = yuv420p_to_yuyv422,
  1772. },
  1773. [PIX_FMT_RGB555] = {
  1774. .convert = yuv420p_to_rgb555
  1775. },
  1776. [PIX_FMT_RGB565] = {
  1777. .convert = yuv420p_to_rgb565
  1778. },
  1779. [PIX_FMT_BGR24] = {
  1780. .convert = yuv420p_to_bgr24
  1781. },
  1782. [PIX_FMT_RGB24] = {
  1783. .convert = yuv420p_to_rgb24
  1784. },
  1785. [PIX_FMT_RGB32] = {
  1786. .convert = yuv420p_to_rgb32
  1787. },
  1788. [PIX_FMT_UYVY422] = {
  1789. .convert = yuv420p_to_uyvy422,
  1790. },
  1791. },
  1792. [PIX_FMT_YUV422P] = {
  1793. [PIX_FMT_YUYV422] = {
  1794. .convert = yuv422p_to_yuyv422,
  1795. },
  1796. [PIX_FMT_UYVY422] = {
  1797. .convert = yuv422p_to_uyvy422,
  1798. },
  1799. },
  1800. [PIX_FMT_YUV444P] = {
  1801. [PIX_FMT_RGB24] = {
  1802. .convert = yuv444p_to_rgb24
  1803. },
  1804. },
  1805. [PIX_FMT_YUVJ420P] = {
  1806. [PIX_FMT_RGB555] = {
  1807. .convert = yuvj420p_to_rgb555
  1808. },
  1809. [PIX_FMT_RGB565] = {
  1810. .convert = yuvj420p_to_rgb565
  1811. },
  1812. [PIX_FMT_BGR24] = {
  1813. .convert = yuvj420p_to_bgr24
  1814. },
  1815. [PIX_FMT_RGB24] = {
  1816. .convert = yuvj420p_to_rgb24
  1817. },
  1818. [PIX_FMT_RGB32] = {
  1819. .convert = yuvj420p_to_rgb32
  1820. },
  1821. },
  1822. [PIX_FMT_YUVJ444P] = {
  1823. [PIX_FMT_RGB24] = {
  1824. .convert = yuvj444p_to_rgb24
  1825. },
  1826. },
  1827. [PIX_FMT_YUYV422] = {
  1828. [PIX_FMT_YUV420P] = {
  1829. .convert = yuyv422_to_yuv420p,
  1830. },
  1831. [PIX_FMT_YUV422P] = {
  1832. .convert = yuyv422_to_yuv422p,
  1833. },
  1834. },
  1835. [PIX_FMT_UYVY422] = {
  1836. [PIX_FMT_YUV420P] = {
  1837. .convert = uyvy422_to_yuv420p,
  1838. },
  1839. [PIX_FMT_YUV422P] = {
  1840. .convert = uyvy422_to_yuv422p,
  1841. },
  1842. },
  1843. [PIX_FMT_RGB24] = {
  1844. [PIX_FMT_YUV420P] = {
  1845. .convert = rgb24_to_yuv420p
  1846. },
  1847. [PIX_FMT_RGB565] = {
  1848. .convert = rgb24_to_rgb565
  1849. },
  1850. [PIX_FMT_RGB555] = {
  1851. .convert = rgb24_to_rgb555
  1852. },
  1853. [PIX_FMT_RGB32] = {
  1854. .convert = rgb24_to_rgb32
  1855. },
  1856. [PIX_FMT_BGR24] = {
  1857. .convert = rgb24_to_bgr24
  1858. },
  1859. [PIX_FMT_GRAY8] = {
  1860. .convert = rgb24_to_gray
  1861. },
  1862. [PIX_FMT_PAL8] = {
  1863. .convert = rgb24_to_pal8
  1864. },
  1865. [PIX_FMT_YUV444P] = {
  1866. .convert = rgb24_to_yuv444p
  1867. },
  1868. [PIX_FMT_YUVJ420P] = {
  1869. .convert = rgb24_to_yuvj420p
  1870. },
  1871. [PIX_FMT_YUVJ444P] = {
  1872. .convert = rgb24_to_yuvj444p
  1873. },
  1874. },
  1875. [PIX_FMT_RGB32] = {
  1876. [PIX_FMT_RGB24] = {
  1877. .convert = rgb32_to_rgb24
  1878. },
  1879. [PIX_FMT_BGR24] = {
  1880. .convert = rgb32_to_bgr24
  1881. },
  1882. [PIX_FMT_RGB565] = {
  1883. .convert = rgb32_to_rgb565
  1884. },
  1885. [PIX_FMT_RGB555] = {
  1886. .convert = rgb32_to_rgb555
  1887. },
  1888. [PIX_FMT_PAL8] = {
  1889. .convert = rgb32_to_pal8
  1890. },
  1891. [PIX_FMT_YUV420P] = {
  1892. .convert = rgb32_to_yuv420p
  1893. },
  1894. [PIX_FMT_GRAY8] = {
  1895. .convert = rgb32_to_gray
  1896. },
  1897. },
  1898. [PIX_FMT_BGR24] = {
  1899. [PIX_FMT_RGB32] = {
  1900. .convert = bgr24_to_rgb32
  1901. },
  1902. [PIX_FMT_RGB24] = {
  1903. .convert = bgr24_to_rgb24
  1904. },
  1905. [PIX_FMT_YUV420P] = {
  1906. .convert = bgr24_to_yuv420p
  1907. },
  1908. [PIX_FMT_GRAY8] = {
  1909. .convert = bgr24_to_gray
  1910. },
  1911. },
  1912. [PIX_FMT_RGB555] = {
  1913. [PIX_FMT_RGB24] = {
  1914. .convert = rgb555_to_rgb24
  1915. },
  1916. [PIX_FMT_RGB32] = {
  1917. .convert = rgb555_to_rgb32
  1918. },
  1919. [PIX_FMT_YUV420P] = {
  1920. .convert = rgb555_to_yuv420p
  1921. },
  1922. [PIX_FMT_GRAY8] = {
  1923. .convert = rgb555_to_gray
  1924. },
  1925. },
  1926. [PIX_FMT_RGB565] = {
  1927. [PIX_FMT_RGB32] = {
  1928. .convert = rgb565_to_rgb32
  1929. },
  1930. [PIX_FMT_RGB24] = {
  1931. .convert = rgb565_to_rgb24
  1932. },
  1933. [PIX_FMT_YUV420P] = {
  1934. .convert = rgb565_to_yuv420p
  1935. },
  1936. [PIX_FMT_GRAY8] = {
  1937. .convert = rgb565_to_gray
  1938. },
  1939. },
  1940. [PIX_FMT_GRAY16BE] = {
  1941. [PIX_FMT_GRAY8] = {
  1942. .convert = gray16be_to_gray
  1943. },
  1944. [PIX_FMT_GRAY16LE] = {
  1945. .convert = gray16_to_gray16
  1946. },
  1947. },
  1948. [PIX_FMT_GRAY16LE] = {
  1949. [PIX_FMT_GRAY8] = {
  1950. .convert = gray16le_to_gray
  1951. },
  1952. [PIX_FMT_GRAY16BE] = {
  1953. .convert = gray16_to_gray16
  1954. },
  1955. },
  1956. [PIX_FMT_GRAY8] = {
  1957. [PIX_FMT_RGB555] = {
  1958. .convert = gray_to_rgb555
  1959. },
  1960. [PIX_FMT_RGB565] = {
  1961. .convert = gray_to_rgb565
  1962. },
  1963. [PIX_FMT_RGB24] = {
  1964. .convert = gray_to_rgb24
  1965. },
  1966. [PIX_FMT_BGR24] = {
  1967. .convert = gray_to_bgr24
  1968. },
  1969. [PIX_FMT_RGB32] = {
  1970. .convert = gray_to_rgb32
  1971. },
  1972. [PIX_FMT_MONOWHITE] = {
  1973. .convert = gray_to_monowhite
  1974. },
  1975. [PIX_FMT_MONOBLACK] = {
  1976. .convert = gray_to_monoblack
  1977. },
  1978. [PIX_FMT_GRAY16LE] = {
  1979. .convert = gray_to_gray16
  1980. },
  1981. [PIX_FMT_GRAY16BE] = {
  1982. .convert = gray_to_gray16
  1983. },
  1984. },
  1985. [PIX_FMT_MONOWHITE] = {
  1986. [PIX_FMT_GRAY8] = {
  1987. .convert = monowhite_to_gray
  1988. },
  1989. },
  1990. [PIX_FMT_MONOBLACK] = {
  1991. [PIX_FMT_GRAY8] = {
  1992. .convert = monoblack_to_gray
  1993. },
  1994. },
  1995. [PIX_FMT_PAL8] = {
  1996. [PIX_FMT_RGB555] = {
  1997. .convert = pal8_to_rgb555
  1998. },
  1999. [PIX_FMT_RGB565] = {
  2000. .convert = pal8_to_rgb565
  2001. },
  2002. [PIX_FMT_BGR24] = {
  2003. .convert = pal8_to_bgr24
  2004. },
  2005. [PIX_FMT_RGB24] = {
  2006. .convert = pal8_to_rgb24
  2007. },
  2008. [PIX_FMT_RGB32] = {
  2009. .convert = pal8_to_rgb32
  2010. },
  2011. },
  2012. [PIX_FMT_UYYVYY411] = {
  2013. [PIX_FMT_YUV411P] = {
  2014. .convert = uyyvyy411_to_yuv411p,
  2015. },
  2016. },
  2017. };
  2018. int avpicture_alloc(AVPicture *picture,
  2019. int pix_fmt, int width, int height)
  2020. {
  2021. int size;
  2022. void *ptr;
  2023. size = avpicture_get_size(pix_fmt, width, height);
  2024. if(size<0)
  2025. goto fail;
  2026. ptr = av_malloc(size);
  2027. if (!ptr)
  2028. goto fail;
  2029. avpicture_fill(picture, ptr, pix_fmt, width, height);
  2030. return 0;
  2031. fail:
  2032. memset(picture, 0, sizeof(AVPicture));
  2033. return -1;
  2034. }
  2035. void avpicture_free(AVPicture *picture)
  2036. {
  2037. av_free(picture->data[0]);
  2038. }
  2039. /* return true if yuv planar */
  2040. static inline int is_yuv_planar(const PixFmtInfo *ps)
  2041. {
  2042. return (ps->color_type == FF_COLOR_YUV ||
  2043. ps->color_type == FF_COLOR_YUV_JPEG) &&
  2044. ps->pixel_type == FF_PIXEL_PLANAR;
  2045. }
  2046. int av_picture_crop(AVPicture *dst, const AVPicture *src,
  2047. int pix_fmt, int top_band, int left_band)
  2048. {
  2049. int y_shift;
  2050. int x_shift;
  2051. if (pix_fmt < 0 || pix_fmt >= PIX_FMT_NB || !is_yuv_planar(&pix_fmt_info[pix_fmt]))
  2052. return -1;
  2053. y_shift = pix_fmt_info[pix_fmt].y_chroma_shift;
  2054. x_shift = pix_fmt_info[pix_fmt].x_chroma_shift;
  2055. dst->data[0] = src->data[0] + (top_band * src->linesize[0]) + left_band;
  2056. dst->data[1] = src->data[1] + ((top_band >> y_shift) * src->linesize[1]) + (left_band >> x_shift);
  2057. dst->data[2] = src->data[2] + ((top_band >> y_shift) * src->linesize[2]) + (left_band >> x_shift);
  2058. dst->linesize[0] = src->linesize[0];
  2059. dst->linesize[1] = src->linesize[1];
  2060. dst->linesize[2] = src->linesize[2];
  2061. return 0;
  2062. }
  2063. int av_picture_pad(AVPicture *dst, const AVPicture *src, int height, int width,
  2064. int pix_fmt, int padtop, int padbottom, int padleft, int padright,
  2065. int *color)
  2066. {
  2067. uint8_t *optr;
  2068. int y_shift;
  2069. int x_shift;
  2070. int yheight;
  2071. int i, y;
  2072. if (pix_fmt < 0 || pix_fmt >= PIX_FMT_NB ||
  2073. !is_yuv_planar(&pix_fmt_info[pix_fmt])) return -1;
  2074. for (i = 0; i < 3; i++) {
  2075. x_shift = i ? pix_fmt_info[pix_fmt].x_chroma_shift : 0;
  2076. y_shift = i ? pix_fmt_info[pix_fmt].y_chroma_shift : 0;
  2077. if (padtop || padleft) {
  2078. memset(dst->data[i], color[i],
  2079. dst->linesize[i] * (padtop >> y_shift) + (padleft >> x_shift));
  2080. }
  2081. if (padleft || padright) {
  2082. optr = dst->data[i] + dst->linesize[i] * (padtop >> y_shift) +
  2083. (dst->linesize[i] - (padright >> x_shift));
  2084. yheight = (height - 1 - (padtop + padbottom)) >> y_shift;
  2085. for (y = 0; y < yheight; y++) {
  2086. memset(optr, color[i], (padleft + padright) >> x_shift);
  2087. optr += dst->linesize[i];
  2088. }
  2089. }
  2090. if (src) { /* first line */
  2091. uint8_t *iptr = src->data[i];
  2092. optr = dst->data[i] + dst->linesize[i] * (padtop >> y_shift) +
  2093. (padleft >> x_shift);
  2094. memcpy(optr, iptr, src->linesize[i]);
  2095. iptr += src->linesize[i];
  2096. optr = dst->data[i] + dst->linesize[i] * (padtop >> y_shift) +
  2097. (dst->linesize[i] - (padright >> x_shift));
  2098. yheight = (height - 1 - (padtop + padbottom)) >> y_shift;
  2099. for (y = 0; y < yheight; y++) {
  2100. memset(optr, color[i], (padleft + padright) >> x_shift);
  2101. memcpy(optr + ((padleft + padright) >> x_shift), iptr,
  2102. src->linesize[i]);
  2103. iptr += src->linesize[i];
  2104. optr += dst->linesize[i];
  2105. }
  2106. }
  2107. if (padbottom || padright) {
  2108. optr = dst->data[i] + dst->linesize[i] *
  2109. ((height - padbottom) >> y_shift) - (padright >> x_shift);
  2110. memset(optr, color[i],dst->linesize[i] *
  2111. (padbottom >> y_shift) + (padright >> x_shift));
  2112. }
  2113. }
  2114. return 0;
  2115. }
  2116. #if LIBAVCODEC_VERSION_INT < ((52<<16)+(0<<8)+0)
  2117. void img_copy(AVPicture *dst, const AVPicture *src,
  2118. int pix_fmt, int width, int height)
  2119. {
  2120. av_picture_copy(dst, src, pix_fmt, width, height);
  2121. }
  2122. int img_crop(AVPicture *dst, const AVPicture *src,
  2123. int pix_fmt, int top_band, int left_band)
  2124. {
  2125. return av_picture_crop(dst, src, pix_fmt, top_band, left_band);
  2126. }
  2127. int img_pad(AVPicture *dst, const AVPicture *src, int height, int width,
  2128. int pix_fmt, int padtop, int padbottom, int padleft, int padright,
  2129. int *color)
  2130. {
  2131. return av_picture_pad(dst, src, height, width, pix_fmt, padtop, padbottom, padleft, padright, color);
  2132. }
  2133. #endif
  2134. #ifndef CONFIG_SWSCALER
  2135. /* XXX: always use linesize. Return -1 if not supported */
  2136. int img_convert(AVPicture *dst, int dst_pix_fmt,
  2137. const AVPicture *src, int src_pix_fmt,
  2138. int src_width, int src_height)
  2139. {
  2140. static int inited;
  2141. int i, ret, dst_width, dst_height, int_pix_fmt;
  2142. const PixFmtInfo *src_pix, *dst_pix;
  2143. const ConvertEntry *ce;
  2144. AVPicture tmp1, *tmp = &tmp1;
  2145. if (src_pix_fmt < 0 || src_pix_fmt >= PIX_FMT_NB ||
  2146. dst_pix_fmt < 0 || dst_pix_fmt >= PIX_FMT_NB)
  2147. return -1;
  2148. if (src_width <= 0 || src_height <= 0)
  2149. return 0;
  2150. if (!inited) {
  2151. inited = 1;
  2152. img_convert_init();
  2153. }
  2154. dst_width = src_width;
  2155. dst_height = src_height;
  2156. dst_pix = &pix_fmt_info[dst_pix_fmt];
  2157. src_pix = &pix_fmt_info[src_pix_fmt];
  2158. if (src_pix_fmt == dst_pix_fmt) {
  2159. /* no conversion needed: just copy */
  2160. av_picture_copy(dst, src, dst_pix_fmt, dst_width, dst_height);
  2161. return 0;
  2162. }
  2163. ce = &convert_table[src_pix_fmt][dst_pix_fmt];
  2164. if (ce->convert) {
  2165. /* specific conversion routine */
  2166. ce->convert(dst, src, dst_width, dst_height);
  2167. return 0;
  2168. }
  2169. /* gray to YUV */
  2170. if (is_yuv_planar(dst_pix) &&
  2171. src_pix_fmt == PIX_FMT_GRAY8) {
  2172. int w, h, y;
  2173. uint8_t *d;
  2174. if (dst_pix->color_type == FF_COLOR_YUV_JPEG) {
  2175. ff_img_copy_plane(dst->data[0], dst->linesize[0],
  2176. src->data[0], src->linesize[0],
  2177. dst_width, dst_height);
  2178. } else {
  2179. img_apply_table(dst->data[0], dst->linesize[0],
  2180. src->data[0], src->linesize[0],
  2181. dst_width, dst_height,
  2182. y_jpeg_to_ccir);
  2183. }
  2184. /* fill U and V with 128 */
  2185. w = dst_width;
  2186. h = dst_height;
  2187. w >>= dst_pix->x_chroma_shift;
  2188. h >>= dst_pix->y_chroma_shift;
  2189. for(i = 1; i <= 2; i++) {
  2190. d = dst->data[i];
  2191. for(y = 0; y< h; y++) {
  2192. memset(d, 128, w);
  2193. d += dst->linesize[i];
  2194. }
  2195. }
  2196. return 0;
  2197. }
  2198. /* YUV to gray */
  2199. if (is_yuv_planar(src_pix) &&
  2200. dst_pix_fmt == PIX_FMT_GRAY8) {
  2201. if (src_pix->color_type == FF_COLOR_YUV_JPEG) {
  2202. ff_img_copy_plane(dst->data[0], dst->linesize[0],
  2203. src->data[0], src->linesize[0],
  2204. dst_width, dst_height);
  2205. } else {
  2206. img_apply_table(dst->data[0], dst->linesize[0],
  2207. src->data[0], src->linesize[0],
  2208. dst_width, dst_height,
  2209. y_ccir_to_jpeg);
  2210. }
  2211. return 0;
  2212. }
  2213. /* YUV to YUV planar */
  2214. if (is_yuv_planar(dst_pix) && is_yuv_planar(src_pix)) {
  2215. int x_shift, y_shift, w, h, xy_shift;
  2216. void (*resize_func)(uint8_t *dst, int dst_wrap,
  2217. const uint8_t *src, int src_wrap,
  2218. int width, int height);
  2219. /* compute chroma size of the smallest dimensions */
  2220. w = dst_width;
  2221. h = dst_height;
  2222. if (dst_pix->x_chroma_shift >= src_pix->x_chroma_shift)
  2223. w >>= dst_pix->x_chroma_shift;
  2224. else
  2225. w >>= src_pix->x_chroma_shift;
  2226. if (dst_pix->y_chroma_shift >= src_pix->y_chroma_shift)
  2227. h >>= dst_pix->y_chroma_shift;
  2228. else
  2229. h >>= src_pix->y_chroma_shift;
  2230. x_shift = (dst_pix->x_chroma_shift - src_pix->x_chroma_shift);
  2231. y_shift = (dst_pix->y_chroma_shift - src_pix->y_chroma_shift);
  2232. xy_shift = ((x_shift & 0xf) << 4) | (y_shift & 0xf);
  2233. /* there must be filters for conversion at least from and to
  2234. YUV444 format */
  2235. switch(xy_shift) {
  2236. case 0x00:
  2237. resize_func = ff_img_copy_plane;
  2238. break;
  2239. case 0x10:
  2240. resize_func = shrink21;
  2241. break;
  2242. case 0x20:
  2243. resize_func = shrink41;
  2244. break;
  2245. case 0x01:
  2246. resize_func = shrink12;
  2247. break;
  2248. case 0x11:
  2249. resize_func = ff_shrink22;
  2250. break;
  2251. case 0x22:
  2252. resize_func = ff_shrink44;
  2253. break;
  2254. case 0xf0:
  2255. resize_func = grow21;
  2256. break;
  2257. case 0x0f:
  2258. resize_func = grow12;
  2259. break;
  2260. case 0xe0:
  2261. resize_func = grow41;
  2262. break;
  2263. case 0xff:
  2264. resize_func = grow22;
  2265. break;
  2266. case 0xee:
  2267. resize_func = grow44;
  2268. break;
  2269. case 0xf1:
  2270. resize_func = conv411;
  2271. break;
  2272. default:
  2273. /* currently not handled */
  2274. goto no_chroma_filter;
  2275. }
  2276. ff_img_copy_plane(dst->data[0], dst->linesize[0],
  2277. src->data[0], src->linesize[0],
  2278. dst_width, dst_height);
  2279. for(i = 1;i <= 2; i++)
  2280. resize_func(dst->data[i], dst->linesize[i],
  2281. src->data[i], src->linesize[i],
  2282. dst_width>>dst_pix->x_chroma_shift, dst_height>>dst_pix->y_chroma_shift);
  2283. /* if yuv color space conversion is needed, we do it here on
  2284. the destination image */
  2285. if (dst_pix->color_type != src_pix->color_type) {
  2286. const uint8_t *y_table, *c_table;
  2287. if (dst_pix->color_type == FF_COLOR_YUV) {
  2288. y_table = y_jpeg_to_ccir;
  2289. c_table = c_jpeg_to_ccir;
  2290. } else {
  2291. y_table = y_ccir_to_jpeg;
  2292. c_table = c_ccir_to_jpeg;
  2293. }
  2294. img_apply_table(dst->data[0], dst->linesize[0],
  2295. dst->data[0], dst->linesize[0],
  2296. dst_width, dst_height,
  2297. y_table);
  2298. for(i = 1;i <= 2; i++)
  2299. img_apply_table(dst->data[i], dst->linesize[i],
  2300. dst->data[i], dst->linesize[i],
  2301. dst_width>>dst_pix->x_chroma_shift,
  2302. dst_height>>dst_pix->y_chroma_shift,
  2303. c_table);
  2304. }
  2305. return 0;
  2306. }
  2307. no_chroma_filter:
  2308. /* try to use an intermediate format */
  2309. if (src_pix_fmt == PIX_FMT_YUYV422 ||
  2310. dst_pix_fmt == PIX_FMT_YUYV422) {
  2311. /* specific case: convert to YUV422P first */
  2312. int_pix_fmt = PIX_FMT_YUV422P;
  2313. } else if (src_pix_fmt == PIX_FMT_UYVY422 ||
  2314. dst_pix_fmt == PIX_FMT_UYVY422) {
  2315. /* specific case: convert to YUV422P first */
  2316. int_pix_fmt = PIX_FMT_YUV422P;
  2317. } else if (src_pix_fmt == PIX_FMT_UYYVYY411 ||
  2318. dst_pix_fmt == PIX_FMT_UYYVYY411) {
  2319. /* specific case: convert to YUV411P first */
  2320. int_pix_fmt = PIX_FMT_YUV411P;
  2321. } else if ((src_pix->color_type == FF_COLOR_GRAY &&
  2322. src_pix_fmt != PIX_FMT_GRAY8) ||
  2323. (dst_pix->color_type == FF_COLOR_GRAY &&
  2324. dst_pix_fmt != PIX_FMT_GRAY8)) {
  2325. /* gray8 is the normalized format */
  2326. int_pix_fmt = PIX_FMT_GRAY8;
  2327. } else if ((is_yuv_planar(src_pix) &&
  2328. src_pix_fmt != PIX_FMT_YUV444P &&
  2329. src_pix_fmt != PIX_FMT_YUVJ444P)) {
  2330. /* yuv444 is the normalized format */
  2331. if (src_pix->color_type == FF_COLOR_YUV_JPEG)
  2332. int_pix_fmt = PIX_FMT_YUVJ444P;
  2333. else
  2334. int_pix_fmt = PIX_FMT_YUV444P;
  2335. } else if ((is_yuv_planar(dst_pix) &&
  2336. dst_pix_fmt != PIX_FMT_YUV444P &&
  2337. dst_pix_fmt != PIX_FMT_YUVJ444P)) {
  2338. /* yuv444 is the normalized format */
  2339. if (dst_pix->color_type == FF_COLOR_YUV_JPEG)
  2340. int_pix_fmt = PIX_FMT_YUVJ444P;
  2341. else
  2342. int_pix_fmt = PIX_FMT_YUV444P;
  2343. } else {
  2344. /* the two formats are rgb or gray8 or yuv[j]444p */
  2345. if (src_pix->is_alpha && dst_pix->is_alpha)
  2346. int_pix_fmt = PIX_FMT_RGB32;
  2347. else
  2348. int_pix_fmt = PIX_FMT_RGB24;
  2349. }
  2350. if (src_pix_fmt == int_pix_fmt)
  2351. return -1;
  2352. if (avpicture_alloc(tmp, int_pix_fmt, dst_width, dst_height) < 0)
  2353. return -1;
  2354. ret = -1;
  2355. if (img_convert(tmp, int_pix_fmt,
  2356. src, src_pix_fmt, src_width, src_height) < 0)
  2357. goto fail1;
  2358. if (img_convert(dst, dst_pix_fmt,
  2359. tmp, int_pix_fmt, dst_width, dst_height) < 0)
  2360. goto fail1;
  2361. ret = 0;
  2362. fail1:
  2363. avpicture_free(tmp);
  2364. return ret;
  2365. }
  2366. #endif
  2367. /* NOTE: we scan all the pixels to have an exact information */
  2368. static int get_alpha_info_pal8(const AVPicture *src, int width, int height)
  2369. {
  2370. const unsigned char *p;
  2371. int src_wrap, ret, x, y;
  2372. unsigned int a;
  2373. uint32_t *palette = (uint32_t *)src->data[1];
  2374. p = src->data[0];
  2375. src_wrap = src->linesize[0] - width;
  2376. ret = 0;
  2377. for(y=0;y<height;y++) {
  2378. for(x=0;x<width;x++) {
  2379. a = palette[p[0]] >> 24;
  2380. if (a == 0x00) {
  2381. ret |= FF_ALPHA_TRANSP;
  2382. } else if (a != 0xff) {
  2383. ret |= FF_ALPHA_SEMI_TRANSP;
  2384. }
  2385. p++;
  2386. }
  2387. p += src_wrap;
  2388. }
  2389. return ret;
  2390. }
  2391. int img_get_alpha_info(const AVPicture *src,
  2392. int pix_fmt, int width, int height)
  2393. {
  2394. const PixFmtInfo *pf = &pix_fmt_info[pix_fmt];
  2395. int ret;
  2396. pf = &pix_fmt_info[pix_fmt];
  2397. /* no alpha can be represented in format */
  2398. if (!pf->is_alpha)
  2399. return 0;
  2400. switch(pix_fmt) {
  2401. case PIX_FMT_RGB32:
  2402. ret = get_alpha_info_rgb32(src, width, height);
  2403. break;
  2404. case PIX_FMT_PAL8:
  2405. ret = get_alpha_info_pal8(src, width, height);
  2406. break;
  2407. default:
  2408. /* we do not know, so everything is indicated */
  2409. ret = FF_ALPHA_TRANSP | FF_ALPHA_SEMI_TRANSP;
  2410. break;
  2411. }
  2412. return ret;
  2413. }
  2414. #ifdef HAVE_MMX
  2415. #define DEINT_INPLACE_LINE_LUM \
  2416. movd_m2r(lum_m4[0],mm0);\
  2417. movd_m2r(lum_m3[0],mm1);\
  2418. movd_m2r(lum_m2[0],mm2);\
  2419. movd_m2r(lum_m1[0],mm3);\
  2420. movd_m2r(lum[0],mm4);\
  2421. punpcklbw_r2r(mm7,mm0);\
  2422. movd_r2m(mm2,lum_m4[0]);\
  2423. punpcklbw_r2r(mm7,mm1);\
  2424. punpcklbw_r2r(mm7,mm2);\
  2425. punpcklbw_r2r(mm7,mm3);\
  2426. punpcklbw_r2r(mm7,mm4);\
  2427. paddw_r2r(mm3,mm1);\
  2428. psllw_i2r(1,mm2);\
  2429. paddw_r2r(mm4,mm0);\
  2430. psllw_i2r(2,mm1);\
  2431. paddw_r2r(mm6,mm2);\
  2432. paddw_r2r(mm2,mm1);\
  2433. psubusw_r2r(mm0,mm1);\
  2434. psrlw_i2r(3,mm1);\
  2435. packuswb_r2r(mm7,mm1);\
  2436. movd_r2m(mm1,lum_m2[0]);
  2437. #define DEINT_LINE_LUM \
  2438. movd_m2r(lum_m4[0],mm0);\
  2439. movd_m2r(lum_m3[0],mm1);\
  2440. movd_m2r(lum_m2[0],mm2);\
  2441. movd_m2r(lum_m1[0],mm3);\
  2442. movd_m2r(lum[0],mm4);\
  2443. punpcklbw_r2r(mm7,mm0);\
  2444. punpcklbw_r2r(mm7,mm1);\
  2445. punpcklbw_r2r(mm7,mm2);\
  2446. punpcklbw_r2r(mm7,mm3);\
  2447. punpcklbw_r2r(mm7,mm4);\
  2448. paddw_r2r(mm3,mm1);\
  2449. psllw_i2r(1,mm2);\
  2450. paddw_r2r(mm4,mm0);\
  2451. psllw_i2r(2,mm1);\
  2452. paddw_r2r(mm6,mm2);\
  2453. paddw_r2r(mm2,mm1);\
  2454. psubusw_r2r(mm0,mm1);\
  2455. psrlw_i2r(3,mm1);\
  2456. packuswb_r2r(mm7,mm1);\
  2457. movd_r2m(mm1,dst[0]);
  2458. #endif
  2459. /* filter parameters: [-1 4 2 4 -1] // 8 */
  2460. static void deinterlace_line(uint8_t *dst,
  2461. const uint8_t *lum_m4, const uint8_t *lum_m3,
  2462. const uint8_t *lum_m2, const uint8_t *lum_m1,
  2463. const uint8_t *lum,
  2464. int size)
  2465. {
  2466. #ifndef HAVE_MMX
  2467. uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
  2468. int sum;
  2469. for(;size > 0;size--) {
  2470. sum = -lum_m4[0];
  2471. sum += lum_m3[0] << 2;
  2472. sum += lum_m2[0] << 1;
  2473. sum += lum_m1[0] << 2;
  2474. sum += -lum[0];
  2475. dst[0] = cm[(sum + 4) >> 3];
  2476. lum_m4++;
  2477. lum_m3++;
  2478. lum_m2++;
  2479. lum_m1++;
  2480. lum++;
  2481. dst++;
  2482. }
  2483. #else
  2484. {
  2485. mmx_t rounder;
  2486. rounder.uw[0]=4;
  2487. rounder.uw[1]=4;
  2488. rounder.uw[2]=4;
  2489. rounder.uw[3]=4;
  2490. pxor_r2r(mm7,mm7);
  2491. movq_m2r(rounder,mm6);
  2492. }
  2493. for (;size > 3; size-=4) {
  2494. DEINT_LINE_LUM
  2495. lum_m4+=4;
  2496. lum_m3+=4;
  2497. lum_m2+=4;
  2498. lum_m1+=4;
  2499. lum+=4;
  2500. dst+=4;
  2501. }
  2502. #endif
  2503. }
  2504. static void deinterlace_line_inplace(uint8_t *lum_m4, uint8_t *lum_m3, uint8_t *lum_m2, uint8_t *lum_m1, uint8_t *lum,
  2505. int size)
  2506. {
  2507. #ifndef HAVE_MMX
  2508. uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
  2509. int sum;
  2510. for(;size > 0;size--) {
  2511. sum = -lum_m4[0];
  2512. sum += lum_m3[0] << 2;
  2513. sum += lum_m2[0] << 1;
  2514. lum_m4[0]=lum_m2[0];
  2515. sum += lum_m1[0] << 2;
  2516. sum += -lum[0];
  2517. lum_m2[0] = cm[(sum + 4) >> 3];
  2518. lum_m4++;
  2519. lum_m3++;
  2520. lum_m2++;
  2521. lum_m1++;
  2522. lum++;
  2523. }
  2524. #else
  2525. {
  2526. mmx_t rounder;
  2527. rounder.uw[0]=4;
  2528. rounder.uw[1]=4;
  2529. rounder.uw[2]=4;
  2530. rounder.uw[3]=4;
  2531. pxor_r2r(mm7,mm7);
  2532. movq_m2r(rounder,mm6);
  2533. }
  2534. for (;size > 3; size-=4) {
  2535. DEINT_INPLACE_LINE_LUM
  2536. lum_m4+=4;
  2537. lum_m3+=4;
  2538. lum_m2+=4;
  2539. lum_m1+=4;
  2540. lum+=4;
  2541. }
  2542. #endif
  2543. }
  2544. /* deinterlacing : 2 temporal taps, 3 spatial taps linear filter. The
  2545. top field is copied as is, but the bottom field is deinterlaced
  2546. against the top field. */
  2547. static void deinterlace_bottom_field(uint8_t *dst, int dst_wrap,
  2548. const uint8_t *src1, int src_wrap,
  2549. int width, int height)
  2550. {
  2551. const uint8_t *src_m2, *src_m1, *src_0, *src_p1, *src_p2;
  2552. int y;
  2553. src_m2 = src1;
  2554. src_m1 = src1;
  2555. src_0=&src_m1[src_wrap];
  2556. src_p1=&src_0[src_wrap];
  2557. src_p2=&src_p1[src_wrap];
  2558. for(y=0;y<(height-2);y+=2) {
  2559. memcpy(dst,src_m1,width);
  2560. dst += dst_wrap;
  2561. deinterlace_line(dst,src_m2,src_m1,src_0,src_p1,src_p2,width);
  2562. src_m2 = src_0;
  2563. src_m1 = src_p1;
  2564. src_0 = src_p2;
  2565. src_p1 += 2*src_wrap;
  2566. src_p2 += 2*src_wrap;
  2567. dst += dst_wrap;
  2568. }
  2569. memcpy(dst,src_m1,width);
  2570. dst += dst_wrap;
  2571. /* do last line */
  2572. deinterlace_line(dst,src_m2,src_m1,src_0,src_0,src_0,width);
  2573. }
  2574. static void deinterlace_bottom_field_inplace(uint8_t *src1, int src_wrap,
  2575. int width, int height)
  2576. {
  2577. uint8_t *src_m1, *src_0, *src_p1, *src_p2;
  2578. int y;
  2579. uint8_t *buf;
  2580. buf = (uint8_t*)av_malloc(width);
  2581. src_m1 = src1;
  2582. memcpy(buf,src_m1,width);
  2583. src_0=&src_m1[src_wrap];
  2584. src_p1=&src_0[src_wrap];
  2585. src_p2=&src_p1[src_wrap];
  2586. for(y=0;y<(height-2);y+=2) {
  2587. deinterlace_line_inplace(buf,src_m1,src_0,src_p1,src_p2,width);
  2588. src_m1 = src_p1;
  2589. src_0 = src_p2;
  2590. src_p1 += 2*src_wrap;
  2591. src_p2 += 2*src_wrap;
  2592. }
  2593. /* do last line */
  2594. deinterlace_line_inplace(buf,src_m1,src_0,src_0,src_0,width);
  2595. av_free(buf);
  2596. }
  2597. int avpicture_deinterlace(AVPicture *dst, const AVPicture *src,
  2598. int pix_fmt, int width, int height)
  2599. {
  2600. int i;
  2601. if (pix_fmt != PIX_FMT_YUV420P &&
  2602. pix_fmt != PIX_FMT_YUV422P &&
  2603. pix_fmt != PIX_FMT_YUV444P &&
  2604. pix_fmt != PIX_FMT_YUV411P &&
  2605. pix_fmt != PIX_FMT_GRAY8)
  2606. return -1;
  2607. if ((width & 3) != 0 || (height & 3) != 0)
  2608. return -1;
  2609. for(i=0;i<3;i++) {
  2610. if (i == 1) {
  2611. switch(pix_fmt) {
  2612. case PIX_FMT_YUV420P:
  2613. width >>= 1;
  2614. height >>= 1;
  2615. break;
  2616. case PIX_FMT_YUV422P:
  2617. width >>= 1;
  2618. break;
  2619. case PIX_FMT_YUV411P:
  2620. width >>= 2;
  2621. break;
  2622. default:
  2623. break;
  2624. }
  2625. if (pix_fmt == PIX_FMT_GRAY8) {
  2626. break;
  2627. }
  2628. }
  2629. if (src == dst) {
  2630. deinterlace_bottom_field_inplace(dst->data[i], dst->linesize[i],
  2631. width, height);
  2632. } else {
  2633. deinterlace_bottom_field(dst->data[i],dst->linesize[i],
  2634. src->data[i], src->linesize[i],
  2635. width, height);
  2636. }
  2637. }
  2638. emms_c();
  2639. return 0;
  2640. }