You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

2970 lines
79KB

  1. /*
  2. * Misc image conversion routines
  3. * Copyright (c) 2001, 2002, 2003 Fabrice Bellard
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. /**
  22. * @file libavcodec/imgconvert.c
  23. * misc image conversion routines
  24. */
  25. /* TODO:
  26. * - write 'ffimg' program to test all the image related stuff
  27. * - move all api to slice based system
  28. * - integrate deinterlacing, postprocessing and scaling in the conversion process
  29. */
  30. #include "avcodec.h"
  31. #include "dsputil.h"
  32. #include "colorspace.h"
  33. #if HAVE_MMX
  34. #include "x86/mmx.h"
  35. #include "x86/dsputil_mmx.h"
  36. #endif
  37. #define xglue(x, y) x ## y
  38. #define glue(x, y) xglue(x, y)
  39. #define FF_COLOR_RGB 0 /**< RGB color space */
  40. #define FF_COLOR_GRAY 1 /**< gray color space */
  41. #define FF_COLOR_YUV 2 /**< YUV color space. 16 <= Y <= 235, 16 <= U, V <= 240 */
  42. #define FF_COLOR_YUV_JPEG 3 /**< YUV color space. 0 <= Y <= 255, 0 <= U, V <= 255 */
  43. #define FF_PIXEL_PLANAR 0 /**< each channel has one component in AVPicture */
  44. #define FF_PIXEL_PACKED 1 /**< only one components containing all the channels */
  45. #define FF_PIXEL_PALETTE 2 /**< one components containing indexes for a palette */
  46. typedef struct PixFmtInfo {
  47. const char *name;
  48. uint8_t nb_channels; /**< number of channels (including alpha) */
  49. uint8_t color_type; /**< color type (see FF_COLOR_xxx constants) */
  50. uint8_t pixel_type; /**< pixel storage type (see FF_PIXEL_xxx constants) */
  51. uint8_t is_alpha : 1; /**< true if alpha can be specified */
  52. uint8_t x_chroma_shift; /**< X chroma subsampling factor is 2 ^ shift */
  53. uint8_t y_chroma_shift; /**< Y chroma subsampling factor is 2 ^ shift */
  54. uint8_t depth; /**< bit depth of the color components */
  55. } PixFmtInfo;
  56. /* this table gives more information about formats */
  57. static const PixFmtInfo pix_fmt_info[PIX_FMT_NB] = {
  58. /* YUV formats */
  59. [PIX_FMT_YUV420P] = {
  60. .name = "yuv420p",
  61. .nb_channels = 3,
  62. .color_type = FF_COLOR_YUV,
  63. .pixel_type = FF_PIXEL_PLANAR,
  64. .depth = 8,
  65. .x_chroma_shift = 1, .y_chroma_shift = 1,
  66. },
  67. [PIX_FMT_YUV422P] = {
  68. .name = "yuv422p",
  69. .nb_channels = 3,
  70. .color_type = FF_COLOR_YUV,
  71. .pixel_type = FF_PIXEL_PLANAR,
  72. .depth = 8,
  73. .x_chroma_shift = 1, .y_chroma_shift = 0,
  74. },
  75. [PIX_FMT_YUV444P] = {
  76. .name = "yuv444p",
  77. .nb_channels = 3,
  78. .color_type = FF_COLOR_YUV,
  79. .pixel_type = FF_PIXEL_PLANAR,
  80. .depth = 8,
  81. .x_chroma_shift = 0, .y_chroma_shift = 0,
  82. },
  83. [PIX_FMT_YUYV422] = {
  84. .name = "yuyv422",
  85. .nb_channels = 1,
  86. .color_type = FF_COLOR_YUV,
  87. .pixel_type = FF_PIXEL_PACKED,
  88. .depth = 8,
  89. .x_chroma_shift = 1, .y_chroma_shift = 0,
  90. },
  91. [PIX_FMT_UYVY422] = {
  92. .name = "uyvy422",
  93. .nb_channels = 1,
  94. .color_type = FF_COLOR_YUV,
  95. .pixel_type = FF_PIXEL_PACKED,
  96. .depth = 8,
  97. .x_chroma_shift = 1, .y_chroma_shift = 0,
  98. },
  99. [PIX_FMT_YUV410P] = {
  100. .name = "yuv410p",
  101. .nb_channels = 3,
  102. .color_type = FF_COLOR_YUV,
  103. .pixel_type = FF_PIXEL_PLANAR,
  104. .depth = 8,
  105. .x_chroma_shift = 2, .y_chroma_shift = 2,
  106. },
  107. [PIX_FMT_YUV411P] = {
  108. .name = "yuv411p",
  109. .nb_channels = 3,
  110. .color_type = FF_COLOR_YUV,
  111. .pixel_type = FF_PIXEL_PLANAR,
  112. .depth = 8,
  113. .x_chroma_shift = 2, .y_chroma_shift = 0,
  114. },
  115. [PIX_FMT_YUV440P] = {
  116. .name = "yuv440p",
  117. .nb_channels = 3,
  118. .color_type = FF_COLOR_YUV,
  119. .pixel_type = FF_PIXEL_PLANAR,
  120. .depth = 8,
  121. .x_chroma_shift = 0, .y_chroma_shift = 1,
  122. },
  123. /* YUV formats with alpha plane */
  124. [PIX_FMT_YUVA420P] = {
  125. .name = "yuva420p",
  126. .nb_channels = 4,
  127. .color_type = FF_COLOR_YUV,
  128. .pixel_type = FF_PIXEL_PLANAR,
  129. .depth = 8,
  130. .x_chroma_shift = 1, .y_chroma_shift = 1,
  131. },
  132. /* JPEG YUV */
  133. [PIX_FMT_YUVJ420P] = {
  134. .name = "yuvj420p",
  135. .nb_channels = 3,
  136. .color_type = FF_COLOR_YUV_JPEG,
  137. .pixel_type = FF_PIXEL_PLANAR,
  138. .depth = 8,
  139. .x_chroma_shift = 1, .y_chroma_shift = 1,
  140. },
  141. [PIX_FMT_YUVJ422P] = {
  142. .name = "yuvj422p",
  143. .nb_channels = 3,
  144. .color_type = FF_COLOR_YUV_JPEG,
  145. .pixel_type = FF_PIXEL_PLANAR,
  146. .depth = 8,
  147. .x_chroma_shift = 1, .y_chroma_shift = 0,
  148. },
  149. [PIX_FMT_YUVJ444P] = {
  150. .name = "yuvj444p",
  151. .nb_channels = 3,
  152. .color_type = FF_COLOR_YUV_JPEG,
  153. .pixel_type = FF_PIXEL_PLANAR,
  154. .depth = 8,
  155. .x_chroma_shift = 0, .y_chroma_shift = 0,
  156. },
  157. [PIX_FMT_YUVJ440P] = {
  158. .name = "yuvj440p",
  159. .nb_channels = 3,
  160. .color_type = FF_COLOR_YUV_JPEG,
  161. .pixel_type = FF_PIXEL_PLANAR,
  162. .depth = 8,
  163. .x_chroma_shift = 0, .y_chroma_shift = 1,
  164. },
  165. /* RGB formats */
  166. [PIX_FMT_RGB24] = {
  167. .name = "rgb24",
  168. .nb_channels = 3,
  169. .color_type = FF_COLOR_RGB,
  170. .pixel_type = FF_PIXEL_PACKED,
  171. .depth = 8,
  172. .x_chroma_shift = 0, .y_chroma_shift = 0,
  173. },
  174. [PIX_FMT_BGR24] = {
  175. .name = "bgr24",
  176. .nb_channels = 3,
  177. .color_type = FF_COLOR_RGB,
  178. .pixel_type = FF_PIXEL_PACKED,
  179. .depth = 8,
  180. .x_chroma_shift = 0, .y_chroma_shift = 0,
  181. },
  182. [PIX_FMT_RGB32] = {
  183. .name = "rgb32",
  184. .nb_channels = 4, .is_alpha = 1,
  185. .color_type = FF_COLOR_RGB,
  186. .pixel_type = FF_PIXEL_PACKED,
  187. .depth = 8,
  188. .x_chroma_shift = 0, .y_chroma_shift = 0,
  189. },
  190. [PIX_FMT_RGB48BE] = {
  191. .name = "rgb48be",
  192. .nb_channels = 3,
  193. .color_type = FF_COLOR_RGB,
  194. .pixel_type = FF_PIXEL_PACKED,
  195. .depth = 16,
  196. .x_chroma_shift = 0, .y_chroma_shift = 0,
  197. },
  198. [PIX_FMT_RGB48LE] = {
  199. .name = "rgb48le",
  200. .nb_channels = 3,
  201. .color_type = FF_COLOR_RGB,
  202. .pixel_type = FF_PIXEL_PACKED,
  203. .depth = 16,
  204. .x_chroma_shift = 0, .y_chroma_shift = 0,
  205. },
  206. [PIX_FMT_RGB565] = {
  207. .name = "rgb565",
  208. .nb_channels = 3,
  209. .color_type = FF_COLOR_RGB,
  210. .pixel_type = FF_PIXEL_PACKED,
  211. .depth = 5,
  212. .x_chroma_shift = 0, .y_chroma_shift = 0,
  213. },
  214. [PIX_FMT_RGB555] = {
  215. .name = "rgb555",
  216. .nb_channels = 3,
  217. .color_type = FF_COLOR_RGB,
  218. .pixel_type = FF_PIXEL_PACKED,
  219. .depth = 5,
  220. .x_chroma_shift = 0, .y_chroma_shift = 0,
  221. },
  222. /* gray / mono formats */
  223. [PIX_FMT_GRAY16BE] = {
  224. .name = "gray16be",
  225. .nb_channels = 1,
  226. .color_type = FF_COLOR_GRAY,
  227. .pixel_type = FF_PIXEL_PLANAR,
  228. .depth = 16,
  229. },
  230. [PIX_FMT_GRAY16LE] = {
  231. .name = "gray16le",
  232. .nb_channels = 1,
  233. .color_type = FF_COLOR_GRAY,
  234. .pixel_type = FF_PIXEL_PLANAR,
  235. .depth = 16,
  236. },
  237. [PIX_FMT_GRAY8] = {
  238. .name = "gray",
  239. .nb_channels = 1,
  240. .color_type = FF_COLOR_GRAY,
  241. .pixel_type = FF_PIXEL_PLANAR,
  242. .depth = 8,
  243. },
  244. [PIX_FMT_MONOWHITE] = {
  245. .name = "monow",
  246. .nb_channels = 1,
  247. .color_type = FF_COLOR_GRAY,
  248. .pixel_type = FF_PIXEL_PLANAR,
  249. .depth = 1,
  250. },
  251. [PIX_FMT_MONOBLACK] = {
  252. .name = "monob",
  253. .nb_channels = 1,
  254. .color_type = FF_COLOR_GRAY,
  255. .pixel_type = FF_PIXEL_PLANAR,
  256. .depth = 1,
  257. },
  258. /* paletted formats */
  259. [PIX_FMT_PAL8] = {
  260. .name = "pal8",
  261. .nb_channels = 4, .is_alpha = 1,
  262. .color_type = FF_COLOR_RGB,
  263. .pixel_type = FF_PIXEL_PALETTE,
  264. .depth = 8,
  265. },
  266. [PIX_FMT_XVMC_MPEG2_MC] = {
  267. .name = "xvmcmc",
  268. },
  269. [PIX_FMT_XVMC_MPEG2_IDCT] = {
  270. .name = "xvmcidct",
  271. },
  272. [PIX_FMT_VDPAU_MPEG1] = {
  273. .name = "vdpau_mpeg1",
  274. },
  275. [PIX_FMT_VDPAU_MPEG2] = {
  276. .name = "vdpau_mpeg2",
  277. },
  278. [PIX_FMT_VDPAU_H264] = {
  279. .name = "vdpau_h264",
  280. },
  281. [PIX_FMT_VDPAU_WMV3] = {
  282. .name = "vdpau_wmv3",
  283. },
  284. [PIX_FMT_VDPAU_VC1] = {
  285. .name = "vdpau_vc1",
  286. },
  287. [PIX_FMT_UYYVYY411] = {
  288. .name = "uyyvyy411",
  289. .nb_channels = 1,
  290. .color_type = FF_COLOR_YUV,
  291. .pixel_type = FF_PIXEL_PACKED,
  292. .depth = 8,
  293. .x_chroma_shift = 2, .y_chroma_shift = 0,
  294. },
  295. [PIX_FMT_BGR32] = {
  296. .name = "bgr32",
  297. .nb_channels = 4, .is_alpha = 1,
  298. .color_type = FF_COLOR_RGB,
  299. .pixel_type = FF_PIXEL_PACKED,
  300. .depth = 8,
  301. .x_chroma_shift = 0, .y_chroma_shift = 0,
  302. },
  303. [PIX_FMT_BGR565] = {
  304. .name = "bgr565",
  305. .nb_channels = 3,
  306. .color_type = FF_COLOR_RGB,
  307. .pixel_type = FF_PIXEL_PACKED,
  308. .depth = 5,
  309. .x_chroma_shift = 0, .y_chroma_shift = 0,
  310. },
  311. [PIX_FMT_BGR555] = {
  312. .name = "bgr555",
  313. .nb_channels = 3,
  314. .color_type = FF_COLOR_RGB,
  315. .pixel_type = FF_PIXEL_PACKED,
  316. .depth = 5,
  317. .x_chroma_shift = 0, .y_chroma_shift = 0,
  318. },
  319. [PIX_FMT_RGB8] = {
  320. .name = "rgb8",
  321. .nb_channels = 1,
  322. .color_type = FF_COLOR_RGB,
  323. .pixel_type = FF_PIXEL_PACKED,
  324. .depth = 8,
  325. .x_chroma_shift = 0, .y_chroma_shift = 0,
  326. },
  327. [PIX_FMT_RGB4] = {
  328. .name = "rgb4",
  329. .nb_channels = 1,
  330. .color_type = FF_COLOR_RGB,
  331. .pixel_type = FF_PIXEL_PACKED,
  332. .depth = 4,
  333. .x_chroma_shift = 0, .y_chroma_shift = 0,
  334. },
  335. [PIX_FMT_RGB4_BYTE] = {
  336. .name = "rgb4_byte",
  337. .nb_channels = 1,
  338. .color_type = FF_COLOR_RGB,
  339. .pixel_type = FF_PIXEL_PACKED,
  340. .depth = 8,
  341. .x_chroma_shift = 0, .y_chroma_shift = 0,
  342. },
  343. [PIX_FMT_BGR8] = {
  344. .name = "bgr8",
  345. .nb_channels = 1,
  346. .color_type = FF_COLOR_RGB,
  347. .pixel_type = FF_PIXEL_PACKED,
  348. .depth = 8,
  349. .x_chroma_shift = 0, .y_chroma_shift = 0,
  350. },
  351. [PIX_FMT_BGR4] = {
  352. .name = "bgr4",
  353. .nb_channels = 1,
  354. .color_type = FF_COLOR_RGB,
  355. .pixel_type = FF_PIXEL_PACKED,
  356. .depth = 4,
  357. .x_chroma_shift = 0, .y_chroma_shift = 0,
  358. },
  359. [PIX_FMT_BGR4_BYTE] = {
  360. .name = "bgr4_byte",
  361. .nb_channels = 1,
  362. .color_type = FF_COLOR_RGB,
  363. .pixel_type = FF_PIXEL_PACKED,
  364. .depth = 8,
  365. .x_chroma_shift = 0, .y_chroma_shift = 0,
  366. },
  367. [PIX_FMT_NV12] = {
  368. .name = "nv12",
  369. .nb_channels = 2,
  370. .color_type = FF_COLOR_YUV,
  371. .pixel_type = FF_PIXEL_PLANAR,
  372. .depth = 8,
  373. .x_chroma_shift = 1, .y_chroma_shift = 1,
  374. },
  375. [PIX_FMT_NV21] = {
  376. .name = "nv12",
  377. .nb_channels = 2,
  378. .color_type = FF_COLOR_YUV,
  379. .pixel_type = FF_PIXEL_PLANAR,
  380. .depth = 8,
  381. .x_chroma_shift = 1, .y_chroma_shift = 1,
  382. },
  383. [PIX_FMT_BGR32_1] = {
  384. .name = "bgr32_1",
  385. .nb_channels = 4, .is_alpha = 1,
  386. .color_type = FF_COLOR_RGB,
  387. .pixel_type = FF_PIXEL_PACKED,
  388. .depth = 8,
  389. .x_chroma_shift = 0, .y_chroma_shift = 0,
  390. },
  391. [PIX_FMT_RGB32_1] = {
  392. .name = "rgb32_1",
  393. .nb_channels = 4, .is_alpha = 1,
  394. .color_type = FF_COLOR_RGB,
  395. .pixel_type = FF_PIXEL_PACKED,
  396. .depth = 8,
  397. .x_chroma_shift = 0, .y_chroma_shift = 0,
  398. },
  399. };
  400. void avcodec_get_chroma_sub_sample(int pix_fmt, int *h_shift, int *v_shift)
  401. {
  402. *h_shift = pix_fmt_info[pix_fmt].x_chroma_shift;
  403. *v_shift = pix_fmt_info[pix_fmt].y_chroma_shift;
  404. }
  405. const char *avcodec_get_pix_fmt_name(int pix_fmt)
  406. {
  407. if (pix_fmt < 0 || pix_fmt >= PIX_FMT_NB)
  408. return NULL;
  409. else
  410. return pix_fmt_info[pix_fmt].name;
  411. }
  412. enum PixelFormat avcodec_get_pix_fmt(const char* name)
  413. {
  414. int i;
  415. for (i=0; i < PIX_FMT_NB; i++)
  416. if (!strcmp(pix_fmt_info[i].name, name))
  417. return i;
  418. return PIX_FMT_NONE;
  419. }
  420. void avcodec_pix_fmt_string (char *buf, int buf_size, int pix_fmt)
  421. {
  422. /* print header */
  423. if (pix_fmt < 0)
  424. snprintf (buf, buf_size,
  425. "name " " nb_channels" " depth" " is_alpha"
  426. );
  427. else{
  428. PixFmtInfo info= pix_fmt_info[pix_fmt];
  429. char is_alpha_char= info.is_alpha ? 'y' : 'n';
  430. snprintf (buf, buf_size,
  431. "%-10s" " %1d " " %2d " " %c ",
  432. info.name,
  433. info.nb_channels,
  434. info.depth,
  435. is_alpha_char
  436. );
  437. }
  438. }
  439. int ff_set_systematic_pal(uint32_t pal[256], enum PixelFormat pix_fmt){
  440. int i;
  441. for(i=0; i<256; i++){
  442. int r,g,b;
  443. switch(pix_fmt) {
  444. case PIX_FMT_RGB8:
  445. r= (i>>5 )*36;
  446. g= ((i>>2)&7)*36;
  447. b= (i&3 )*85;
  448. break;
  449. case PIX_FMT_BGR8:
  450. b= (i>>6 )*85;
  451. g= ((i>>3)&7)*36;
  452. r= (i&7 )*36;
  453. break;
  454. case PIX_FMT_RGB4_BYTE:
  455. r= (i>>3 )*255;
  456. g= ((i>>1)&3)*85;
  457. b= (i&1 )*255;
  458. break;
  459. case PIX_FMT_BGR4_BYTE:
  460. b= (i>>3 )*255;
  461. g= ((i>>1)&3)*85;
  462. r= (i&1 )*255;
  463. break;
  464. case PIX_FMT_GRAY8:
  465. r=b=g= i;
  466. break;
  467. default:
  468. return -1;
  469. }
  470. pal[i] = b + (g<<8) + (r<<16);
  471. }
  472. return 0;
  473. }
  474. int ff_fill_linesize(AVPicture *picture, int pix_fmt, int width)
  475. {
  476. int w2;
  477. const PixFmtInfo *pinfo;
  478. memset(picture->linesize, 0, sizeof(picture->linesize));
  479. pinfo = &pix_fmt_info[pix_fmt];
  480. switch(pix_fmt) {
  481. case PIX_FMT_YUV420P:
  482. case PIX_FMT_YUV422P:
  483. case PIX_FMT_YUV444P:
  484. case PIX_FMT_YUV410P:
  485. case PIX_FMT_YUV411P:
  486. case PIX_FMT_YUV440P:
  487. case PIX_FMT_YUVJ420P:
  488. case PIX_FMT_YUVJ422P:
  489. case PIX_FMT_YUVJ444P:
  490. case PIX_FMT_YUVJ440P:
  491. w2 = (width + (1 << pinfo->x_chroma_shift) - 1) >> pinfo->x_chroma_shift;
  492. picture->linesize[0] = width;
  493. picture->linesize[1] = w2;
  494. picture->linesize[2] = w2;
  495. break;
  496. case PIX_FMT_YUVA420P:
  497. w2 = (width + (1 << pinfo->x_chroma_shift) - 1) >> pinfo->x_chroma_shift;
  498. picture->linesize[0] = width;
  499. picture->linesize[1] = w2;
  500. picture->linesize[2] = w2;
  501. picture->linesize[3] = width;
  502. break;
  503. case PIX_FMT_NV12:
  504. case PIX_FMT_NV21:
  505. w2 = (width + (1 << pinfo->x_chroma_shift) - 1) >> pinfo->x_chroma_shift;
  506. picture->linesize[0] = width;
  507. picture->linesize[1] = w2;
  508. break;
  509. case PIX_FMT_RGB24:
  510. case PIX_FMT_BGR24:
  511. picture->linesize[0] = width * 3;
  512. break;
  513. case PIX_FMT_RGB32:
  514. case PIX_FMT_BGR32:
  515. case PIX_FMT_RGB32_1:
  516. case PIX_FMT_BGR32_1:
  517. picture->linesize[0] = width * 4;
  518. break;
  519. case PIX_FMT_RGB48BE:
  520. case PIX_FMT_RGB48LE:
  521. picture->linesize[0] = width * 6;
  522. break;
  523. case PIX_FMT_GRAY16BE:
  524. case PIX_FMT_GRAY16LE:
  525. case PIX_FMT_BGR555:
  526. case PIX_FMT_BGR565:
  527. case PIX_FMT_RGB555:
  528. case PIX_FMT_RGB565:
  529. case PIX_FMT_YUYV422:
  530. picture->linesize[0] = width * 2;
  531. break;
  532. case PIX_FMT_UYVY422:
  533. picture->linesize[0] = width * 2;
  534. break;
  535. case PIX_FMT_UYYVYY411:
  536. picture->linesize[0] = width + width/2;
  537. break;
  538. case PIX_FMT_RGB4:
  539. case PIX_FMT_BGR4:
  540. picture->linesize[0] = width / 2;
  541. break;
  542. case PIX_FMT_MONOWHITE:
  543. case PIX_FMT_MONOBLACK:
  544. picture->linesize[0] = (width + 7) >> 3;
  545. break;
  546. case PIX_FMT_PAL8:
  547. case PIX_FMT_RGB8:
  548. case PIX_FMT_BGR8:
  549. case PIX_FMT_RGB4_BYTE:
  550. case PIX_FMT_BGR4_BYTE:
  551. case PIX_FMT_GRAY8:
  552. picture->linesize[0] = width;
  553. picture->linesize[1] = 4;
  554. break;
  555. default:
  556. return -1;
  557. }
  558. return 0;
  559. }
  560. int ff_fill_pointer(AVPicture *picture, uint8_t *ptr, int pix_fmt,
  561. int height)
  562. {
  563. int size, h2, size2;
  564. const PixFmtInfo *pinfo;
  565. pinfo = &pix_fmt_info[pix_fmt];
  566. size = picture->linesize[0] * height;
  567. switch(pix_fmt) {
  568. case PIX_FMT_YUV420P:
  569. case PIX_FMT_YUV422P:
  570. case PIX_FMT_YUV444P:
  571. case PIX_FMT_YUV410P:
  572. case PIX_FMT_YUV411P:
  573. case PIX_FMT_YUV440P:
  574. case PIX_FMT_YUVJ420P:
  575. case PIX_FMT_YUVJ422P:
  576. case PIX_FMT_YUVJ444P:
  577. case PIX_FMT_YUVJ440P:
  578. h2 = (height + (1 << pinfo->y_chroma_shift) - 1) >> pinfo->y_chroma_shift;
  579. size2 = picture->linesize[1] * h2;
  580. picture->data[0] = ptr;
  581. picture->data[1] = picture->data[0] + size;
  582. picture->data[2] = picture->data[1] + size2;
  583. picture->data[3] = NULL;
  584. return size + 2 * size2;
  585. case PIX_FMT_YUVA420P:
  586. h2 = (height + (1 << pinfo->y_chroma_shift) - 1) >> pinfo->y_chroma_shift;
  587. size2 = picture->linesize[1] * h2;
  588. picture->data[0] = ptr;
  589. picture->data[1] = picture->data[0] + size;
  590. picture->data[2] = picture->data[1] + size2;
  591. picture->data[3] = picture->data[1] + size2 + size2;
  592. return 2 * size + 2 * size2;
  593. case PIX_FMT_NV12:
  594. case PIX_FMT_NV21:
  595. h2 = (height + (1 << pinfo->y_chroma_shift) - 1) >> pinfo->y_chroma_shift;
  596. size2 = picture->linesize[1] * h2 * 2;
  597. picture->data[0] = ptr;
  598. picture->data[1] = picture->data[0] + size;
  599. picture->data[2] = NULL;
  600. picture->data[3] = NULL;
  601. return size + 2 * size2;
  602. case PIX_FMT_RGB24:
  603. case PIX_FMT_BGR24:
  604. case PIX_FMT_RGB32:
  605. case PIX_FMT_BGR32:
  606. case PIX_FMT_RGB32_1:
  607. case PIX_FMT_BGR32_1:
  608. case PIX_FMT_RGB48BE:
  609. case PIX_FMT_RGB48LE:
  610. case PIX_FMT_GRAY16BE:
  611. case PIX_FMT_GRAY16LE:
  612. case PIX_FMT_BGR555:
  613. case PIX_FMT_BGR565:
  614. case PIX_FMT_RGB555:
  615. case PIX_FMT_RGB565:
  616. case PIX_FMT_YUYV422:
  617. case PIX_FMT_UYVY422:
  618. case PIX_FMT_UYYVYY411:
  619. case PIX_FMT_RGB4:
  620. case PIX_FMT_BGR4:
  621. case PIX_FMT_MONOWHITE:
  622. case PIX_FMT_MONOBLACK:
  623. picture->data[0] = ptr;
  624. picture->data[1] = NULL;
  625. picture->data[2] = NULL;
  626. picture->data[3] = NULL;
  627. return size;
  628. case PIX_FMT_PAL8:
  629. case PIX_FMT_RGB8:
  630. case PIX_FMT_BGR8:
  631. case PIX_FMT_RGB4_BYTE:
  632. case PIX_FMT_BGR4_BYTE:
  633. case PIX_FMT_GRAY8:
  634. size2 = (size + 3) & ~3;
  635. picture->data[0] = ptr;
  636. picture->data[1] = ptr + size2; /* palette is stored here as 256 32 bit words */
  637. picture->data[2] = NULL;
  638. picture->data[3] = NULL;
  639. return size2 + 256 * 4;
  640. default:
  641. picture->data[0] = NULL;
  642. picture->data[1] = NULL;
  643. picture->data[2] = NULL;
  644. picture->data[3] = NULL;
  645. return -1;
  646. }
  647. }
  648. int avpicture_fill(AVPicture *picture, uint8_t *ptr,
  649. int pix_fmt, int width, int height)
  650. {
  651. if(avcodec_check_dimensions(NULL, width, height))
  652. return -1;
  653. if (ff_fill_linesize(picture, pix_fmt, width))
  654. return -1;
  655. return ff_fill_pointer(picture, ptr, pix_fmt, height);
  656. }
  657. int avpicture_layout(const AVPicture* src, int pix_fmt, int width, int height,
  658. unsigned char *dest, int dest_size)
  659. {
  660. const PixFmtInfo* pf = &pix_fmt_info[pix_fmt];
  661. int i, j, w, h, data_planes;
  662. const unsigned char* s;
  663. int size = avpicture_get_size(pix_fmt, width, height);
  664. if (size > dest_size || size < 0)
  665. return -1;
  666. if (pf->pixel_type == FF_PIXEL_PACKED || pf->pixel_type == FF_PIXEL_PALETTE) {
  667. if (pix_fmt == PIX_FMT_YUYV422 ||
  668. pix_fmt == PIX_FMT_UYVY422 ||
  669. pix_fmt == PIX_FMT_BGR565 ||
  670. pix_fmt == PIX_FMT_BGR555 ||
  671. pix_fmt == PIX_FMT_RGB565 ||
  672. pix_fmt == PIX_FMT_RGB555)
  673. w = width * 2;
  674. else if (pix_fmt == PIX_FMT_UYYVYY411)
  675. w = width + width/2;
  676. else if (pix_fmt == PIX_FMT_PAL8)
  677. w = width;
  678. else
  679. w = width * (pf->depth * pf->nb_channels / 8);
  680. data_planes = 1;
  681. h = height;
  682. } else {
  683. data_planes = pf->nb_channels;
  684. w = (width*pf->depth + 7)/8;
  685. h = height;
  686. }
  687. for (i=0; i<data_planes; i++) {
  688. if (i == 1) {
  689. w = width >> pf->x_chroma_shift;
  690. h = height >> pf->y_chroma_shift;
  691. }
  692. s = src->data[i];
  693. for(j=0; j<h; j++) {
  694. memcpy(dest, s, w);
  695. dest += w;
  696. s += src->linesize[i];
  697. }
  698. }
  699. if (pf->pixel_type == FF_PIXEL_PALETTE)
  700. memcpy((unsigned char *)(((size_t)dest + 3) & ~3), src->data[1], 256 * 4);
  701. return size;
  702. }
  703. int avpicture_get_size(int pix_fmt, int width, int height)
  704. {
  705. AVPicture dummy_pict;
  706. return avpicture_fill(&dummy_pict, NULL, pix_fmt, width, height);
  707. }
  708. int avcodec_get_pix_fmt_loss(int dst_pix_fmt, int src_pix_fmt,
  709. int has_alpha)
  710. {
  711. const PixFmtInfo *pf, *ps;
  712. int loss;
  713. ps = &pix_fmt_info[src_pix_fmt];
  714. pf = &pix_fmt_info[dst_pix_fmt];
  715. /* compute loss */
  716. loss = 0;
  717. pf = &pix_fmt_info[dst_pix_fmt];
  718. if (pf->depth < ps->depth ||
  719. (dst_pix_fmt == PIX_FMT_RGB555 && src_pix_fmt == PIX_FMT_RGB565))
  720. loss |= FF_LOSS_DEPTH;
  721. if (pf->x_chroma_shift > ps->x_chroma_shift ||
  722. pf->y_chroma_shift > ps->y_chroma_shift)
  723. loss |= FF_LOSS_RESOLUTION;
  724. switch(pf->color_type) {
  725. case FF_COLOR_RGB:
  726. if (ps->color_type != FF_COLOR_RGB &&
  727. ps->color_type != FF_COLOR_GRAY)
  728. loss |= FF_LOSS_COLORSPACE;
  729. break;
  730. case FF_COLOR_GRAY:
  731. if (ps->color_type != FF_COLOR_GRAY)
  732. loss |= FF_LOSS_COLORSPACE;
  733. break;
  734. case FF_COLOR_YUV:
  735. if (ps->color_type != FF_COLOR_YUV)
  736. loss |= FF_LOSS_COLORSPACE;
  737. break;
  738. case FF_COLOR_YUV_JPEG:
  739. if (ps->color_type != FF_COLOR_YUV_JPEG &&
  740. ps->color_type != FF_COLOR_YUV &&
  741. ps->color_type != FF_COLOR_GRAY)
  742. loss |= FF_LOSS_COLORSPACE;
  743. break;
  744. default:
  745. /* fail safe test */
  746. if (ps->color_type != pf->color_type)
  747. loss |= FF_LOSS_COLORSPACE;
  748. break;
  749. }
  750. if (pf->color_type == FF_COLOR_GRAY &&
  751. ps->color_type != FF_COLOR_GRAY)
  752. loss |= FF_LOSS_CHROMA;
  753. if (!pf->is_alpha && (ps->is_alpha && has_alpha))
  754. loss |= FF_LOSS_ALPHA;
  755. if (pf->pixel_type == FF_PIXEL_PALETTE &&
  756. (ps->pixel_type != FF_PIXEL_PALETTE && ps->color_type != FF_COLOR_GRAY))
  757. loss |= FF_LOSS_COLORQUANT;
  758. return loss;
  759. }
  760. static int avg_bits_per_pixel(int pix_fmt)
  761. {
  762. int bits;
  763. const PixFmtInfo *pf;
  764. pf = &pix_fmt_info[pix_fmt];
  765. switch(pf->pixel_type) {
  766. case FF_PIXEL_PACKED:
  767. switch(pix_fmt) {
  768. case PIX_FMT_YUYV422:
  769. case PIX_FMT_UYVY422:
  770. case PIX_FMT_RGB565:
  771. case PIX_FMT_RGB555:
  772. case PIX_FMT_BGR565:
  773. case PIX_FMT_BGR555:
  774. bits = 16;
  775. break;
  776. case PIX_FMT_UYYVYY411:
  777. bits = 12;
  778. break;
  779. default:
  780. bits = pf->depth * pf->nb_channels;
  781. break;
  782. }
  783. break;
  784. case FF_PIXEL_PLANAR:
  785. if (pf->x_chroma_shift == 0 && pf->y_chroma_shift == 0) {
  786. bits = pf->depth * pf->nb_channels;
  787. } else {
  788. bits = pf->depth + ((2 * pf->depth) >>
  789. (pf->x_chroma_shift + pf->y_chroma_shift));
  790. }
  791. break;
  792. case FF_PIXEL_PALETTE:
  793. bits = 8;
  794. break;
  795. default:
  796. bits = -1;
  797. break;
  798. }
  799. return bits;
  800. }
  801. static int avcodec_find_best_pix_fmt1(int64_t pix_fmt_mask,
  802. int src_pix_fmt,
  803. int has_alpha,
  804. int loss_mask)
  805. {
  806. int dist, i, loss, min_dist, dst_pix_fmt;
  807. /* find exact color match with smallest size */
  808. dst_pix_fmt = -1;
  809. min_dist = 0x7fffffff;
  810. for(i = 0;i < PIX_FMT_NB; i++) {
  811. if (pix_fmt_mask & (1ULL << i)) {
  812. loss = avcodec_get_pix_fmt_loss(i, src_pix_fmt, has_alpha) & loss_mask;
  813. if (loss == 0) {
  814. dist = avg_bits_per_pixel(i);
  815. if (dist < min_dist) {
  816. min_dist = dist;
  817. dst_pix_fmt = i;
  818. }
  819. }
  820. }
  821. }
  822. return dst_pix_fmt;
  823. }
  824. int avcodec_find_best_pix_fmt(int64_t pix_fmt_mask, int src_pix_fmt,
  825. int has_alpha, int *loss_ptr)
  826. {
  827. int dst_pix_fmt, loss_mask, i;
  828. static const int loss_mask_order[] = {
  829. ~0, /* no loss first */
  830. ~FF_LOSS_ALPHA,
  831. ~FF_LOSS_RESOLUTION,
  832. ~(FF_LOSS_COLORSPACE | FF_LOSS_RESOLUTION),
  833. ~FF_LOSS_COLORQUANT,
  834. ~FF_LOSS_DEPTH,
  835. 0,
  836. };
  837. /* try with successive loss */
  838. i = 0;
  839. for(;;) {
  840. loss_mask = loss_mask_order[i++];
  841. dst_pix_fmt = avcodec_find_best_pix_fmt1(pix_fmt_mask, src_pix_fmt,
  842. has_alpha, loss_mask);
  843. if (dst_pix_fmt >= 0)
  844. goto found;
  845. if (loss_mask == 0)
  846. break;
  847. }
  848. return -1;
  849. found:
  850. if (loss_ptr)
  851. *loss_ptr = avcodec_get_pix_fmt_loss(dst_pix_fmt, src_pix_fmt, has_alpha);
  852. return dst_pix_fmt;
  853. }
  854. void ff_img_copy_plane(uint8_t *dst, int dst_wrap,
  855. const uint8_t *src, int src_wrap,
  856. int width, int height)
  857. {
  858. if((!dst) || (!src))
  859. return;
  860. for(;height > 0; height--) {
  861. memcpy(dst, src, width);
  862. dst += dst_wrap;
  863. src += src_wrap;
  864. }
  865. }
  866. int ff_get_plane_bytewidth(enum PixelFormat pix_fmt, int width, int plane)
  867. {
  868. int bits;
  869. const PixFmtInfo *pf = &pix_fmt_info[pix_fmt];
  870. pf = &pix_fmt_info[pix_fmt];
  871. switch(pf->pixel_type) {
  872. case FF_PIXEL_PACKED:
  873. switch(pix_fmt) {
  874. case PIX_FMT_YUYV422:
  875. case PIX_FMT_UYVY422:
  876. case PIX_FMT_RGB565:
  877. case PIX_FMT_RGB555:
  878. case PIX_FMT_BGR565:
  879. case PIX_FMT_BGR555:
  880. bits = 16;
  881. break;
  882. case PIX_FMT_UYYVYY411:
  883. bits = 12;
  884. break;
  885. default:
  886. bits = pf->depth * pf->nb_channels;
  887. break;
  888. }
  889. return (width * bits + 7) >> 3;
  890. break;
  891. case FF_PIXEL_PLANAR:
  892. if (plane == 1 || plane == 2)
  893. width= -((-width)>>pf->x_chroma_shift);
  894. return (width * pf->depth + 7) >> 3;
  895. break;
  896. case FF_PIXEL_PALETTE:
  897. if (plane == 0)
  898. return width;
  899. break;
  900. }
  901. return -1;
  902. }
  903. void av_picture_copy(AVPicture *dst, const AVPicture *src,
  904. int pix_fmt, int width, int height)
  905. {
  906. int i;
  907. const PixFmtInfo *pf = &pix_fmt_info[pix_fmt];
  908. pf = &pix_fmt_info[pix_fmt];
  909. switch(pf->pixel_type) {
  910. case FF_PIXEL_PACKED:
  911. case FF_PIXEL_PLANAR:
  912. for(i = 0; i < pf->nb_channels; i++) {
  913. int h;
  914. int bwidth = ff_get_plane_bytewidth(pix_fmt, width, i);
  915. h = height;
  916. if (i == 1 || i == 2) {
  917. h= -((-height)>>pf->y_chroma_shift);
  918. }
  919. ff_img_copy_plane(dst->data[i], dst->linesize[i],
  920. src->data[i], src->linesize[i],
  921. bwidth, h);
  922. }
  923. break;
  924. case FF_PIXEL_PALETTE:
  925. ff_img_copy_plane(dst->data[0], dst->linesize[0],
  926. src->data[0], src->linesize[0],
  927. width, height);
  928. /* copy the palette */
  929. ff_img_copy_plane(dst->data[1], dst->linesize[1],
  930. src->data[1], src->linesize[1],
  931. 4, 256);
  932. break;
  933. }
  934. }
  935. /* XXX: totally non optimized */
  936. static void yuyv422_to_yuv420p(AVPicture *dst, const AVPicture *src,
  937. int width, int height)
  938. {
  939. const uint8_t *p, *p1;
  940. uint8_t *lum, *cr, *cb, *lum1, *cr1, *cb1;
  941. int w;
  942. p1 = src->data[0];
  943. lum1 = dst->data[0];
  944. cb1 = dst->data[1];
  945. cr1 = dst->data[2];
  946. for(;height >= 1; height -= 2) {
  947. p = p1;
  948. lum = lum1;
  949. cb = cb1;
  950. cr = cr1;
  951. for(w = width; w >= 2; w -= 2) {
  952. lum[0] = p[0];
  953. cb[0] = p[1];
  954. lum[1] = p[2];
  955. cr[0] = p[3];
  956. p += 4;
  957. lum += 2;
  958. cb++;
  959. cr++;
  960. }
  961. if (w) {
  962. lum[0] = p[0];
  963. cb[0] = p[1];
  964. cr[0] = p[3];
  965. cb++;
  966. cr++;
  967. }
  968. p1 += src->linesize[0];
  969. lum1 += dst->linesize[0];
  970. if (height>1) {
  971. p = p1;
  972. lum = lum1;
  973. for(w = width; w >= 2; w -= 2) {
  974. lum[0] = p[0];
  975. lum[1] = p[2];
  976. p += 4;
  977. lum += 2;
  978. }
  979. if (w) {
  980. lum[0] = p[0];
  981. }
  982. p1 += src->linesize[0];
  983. lum1 += dst->linesize[0];
  984. }
  985. cb1 += dst->linesize[1];
  986. cr1 += dst->linesize[2];
  987. }
  988. }
  989. static void uyvy422_to_yuv420p(AVPicture *dst, const AVPicture *src,
  990. int width, int height)
  991. {
  992. const uint8_t *p, *p1;
  993. uint8_t *lum, *cr, *cb, *lum1, *cr1, *cb1;
  994. int w;
  995. p1 = src->data[0];
  996. lum1 = dst->data[0];
  997. cb1 = dst->data[1];
  998. cr1 = dst->data[2];
  999. for(;height >= 1; height -= 2) {
  1000. p = p1;
  1001. lum = lum1;
  1002. cb = cb1;
  1003. cr = cr1;
  1004. for(w = width; w >= 2; w -= 2) {
  1005. lum[0] = p[1];
  1006. cb[0] = p[0];
  1007. lum[1] = p[3];
  1008. cr[0] = p[2];
  1009. p += 4;
  1010. lum += 2;
  1011. cb++;
  1012. cr++;
  1013. }
  1014. if (w) {
  1015. lum[0] = p[1];
  1016. cb[0] = p[0];
  1017. cr[0] = p[2];
  1018. cb++;
  1019. cr++;
  1020. }
  1021. p1 += src->linesize[0];
  1022. lum1 += dst->linesize[0];
  1023. if (height>1) {
  1024. p = p1;
  1025. lum = lum1;
  1026. for(w = width; w >= 2; w -= 2) {
  1027. lum[0] = p[1];
  1028. lum[1] = p[3];
  1029. p += 4;
  1030. lum += 2;
  1031. }
  1032. if (w) {
  1033. lum[0] = p[1];
  1034. }
  1035. p1 += src->linesize[0];
  1036. lum1 += dst->linesize[0];
  1037. }
  1038. cb1 += dst->linesize[1];
  1039. cr1 += dst->linesize[2];
  1040. }
  1041. }
  1042. static void uyvy422_to_yuv422p(AVPicture *dst, const AVPicture *src,
  1043. int width, int height)
  1044. {
  1045. const uint8_t *p, *p1;
  1046. uint8_t *lum, *cr, *cb, *lum1, *cr1, *cb1;
  1047. int w;
  1048. p1 = src->data[0];
  1049. lum1 = dst->data[0];
  1050. cb1 = dst->data[1];
  1051. cr1 = dst->data[2];
  1052. for(;height > 0; height--) {
  1053. p = p1;
  1054. lum = lum1;
  1055. cb = cb1;
  1056. cr = cr1;
  1057. for(w = width; w >= 2; w -= 2) {
  1058. lum[0] = p[1];
  1059. cb[0] = p[0];
  1060. lum[1] = p[3];
  1061. cr[0] = p[2];
  1062. p += 4;
  1063. lum += 2;
  1064. cb++;
  1065. cr++;
  1066. }
  1067. p1 += src->linesize[0];
  1068. lum1 += dst->linesize[0];
  1069. cb1 += dst->linesize[1];
  1070. cr1 += dst->linesize[2];
  1071. }
  1072. }
  1073. static void yuyv422_to_yuv422p(AVPicture *dst, const AVPicture *src,
  1074. int width, int height)
  1075. {
  1076. const uint8_t *p, *p1;
  1077. uint8_t *lum, *cr, *cb, *lum1, *cr1, *cb1;
  1078. int w;
  1079. p1 = src->data[0];
  1080. lum1 = dst->data[0];
  1081. cb1 = dst->data[1];
  1082. cr1 = dst->data[2];
  1083. for(;height > 0; height--) {
  1084. p = p1;
  1085. lum = lum1;
  1086. cb = cb1;
  1087. cr = cr1;
  1088. for(w = width; w >= 2; w -= 2) {
  1089. lum[0] = p[0];
  1090. cb[0] = p[1];
  1091. lum[1] = p[2];
  1092. cr[0] = p[3];
  1093. p += 4;
  1094. lum += 2;
  1095. cb++;
  1096. cr++;
  1097. }
  1098. p1 += src->linesize[0];
  1099. lum1 += dst->linesize[0];
  1100. cb1 += dst->linesize[1];
  1101. cr1 += dst->linesize[2];
  1102. }
  1103. }
  1104. static void yuv422p_to_yuyv422(AVPicture *dst, const AVPicture *src,
  1105. int width, int height)
  1106. {
  1107. uint8_t *p, *p1;
  1108. const uint8_t *lum, *cr, *cb, *lum1, *cr1, *cb1;
  1109. int w;
  1110. p1 = dst->data[0];
  1111. lum1 = src->data[0];
  1112. cb1 = src->data[1];
  1113. cr1 = src->data[2];
  1114. for(;height > 0; height--) {
  1115. p = p1;
  1116. lum = lum1;
  1117. cb = cb1;
  1118. cr = cr1;
  1119. for(w = width; w >= 2; w -= 2) {
  1120. p[0] = lum[0];
  1121. p[1] = cb[0];
  1122. p[2] = lum[1];
  1123. p[3] = cr[0];
  1124. p += 4;
  1125. lum += 2;
  1126. cb++;
  1127. cr++;
  1128. }
  1129. p1 += dst->linesize[0];
  1130. lum1 += src->linesize[0];
  1131. cb1 += src->linesize[1];
  1132. cr1 += src->linesize[2];
  1133. }
  1134. }
  1135. static void yuv422p_to_uyvy422(AVPicture *dst, const AVPicture *src,
  1136. int width, int height)
  1137. {
  1138. uint8_t *p, *p1;
  1139. const uint8_t *lum, *cr, *cb, *lum1, *cr1, *cb1;
  1140. int w;
  1141. p1 = dst->data[0];
  1142. lum1 = src->data[0];
  1143. cb1 = src->data[1];
  1144. cr1 = src->data[2];
  1145. for(;height > 0; height--) {
  1146. p = p1;
  1147. lum = lum1;
  1148. cb = cb1;
  1149. cr = cr1;
  1150. for(w = width; w >= 2; w -= 2) {
  1151. p[1] = lum[0];
  1152. p[0] = cb[0];
  1153. p[3] = lum[1];
  1154. p[2] = cr[0];
  1155. p += 4;
  1156. lum += 2;
  1157. cb++;
  1158. cr++;
  1159. }
  1160. p1 += dst->linesize[0];
  1161. lum1 += src->linesize[0];
  1162. cb1 += src->linesize[1];
  1163. cr1 += src->linesize[2];
  1164. }
  1165. }
  1166. static void uyyvyy411_to_yuv411p(AVPicture *dst, const AVPicture *src,
  1167. int width, int height)
  1168. {
  1169. const uint8_t *p, *p1;
  1170. uint8_t *lum, *cr, *cb, *lum1, *cr1, *cb1;
  1171. int w;
  1172. p1 = src->data[0];
  1173. lum1 = dst->data[0];
  1174. cb1 = dst->data[1];
  1175. cr1 = dst->data[2];
  1176. for(;height > 0; height--) {
  1177. p = p1;
  1178. lum = lum1;
  1179. cb = cb1;
  1180. cr = cr1;
  1181. for(w = width; w >= 4; w -= 4) {
  1182. cb[0] = p[0];
  1183. lum[0] = p[1];
  1184. lum[1] = p[2];
  1185. cr[0] = p[3];
  1186. lum[2] = p[4];
  1187. lum[3] = p[5];
  1188. p += 6;
  1189. lum += 4;
  1190. cb++;
  1191. cr++;
  1192. }
  1193. p1 += src->linesize[0];
  1194. lum1 += dst->linesize[0];
  1195. cb1 += dst->linesize[1];
  1196. cr1 += dst->linesize[2];
  1197. }
  1198. }
  1199. static void yuv420p_to_yuyv422(AVPicture *dst, const AVPicture *src,
  1200. int width, int height)
  1201. {
  1202. int w, h;
  1203. uint8_t *line1, *line2, *linesrc = dst->data[0];
  1204. uint8_t *lum1, *lum2, *lumsrc = src->data[0];
  1205. uint8_t *cb1, *cb2 = src->data[1];
  1206. uint8_t *cr1, *cr2 = src->data[2];
  1207. for(h = height / 2; h--;) {
  1208. line1 = linesrc;
  1209. line2 = linesrc + dst->linesize[0];
  1210. lum1 = lumsrc;
  1211. lum2 = lumsrc + src->linesize[0];
  1212. cb1 = cb2;
  1213. cr1 = cr2;
  1214. for(w = width / 2; w--;) {
  1215. *line1++ = *lum1++; *line2++ = *lum2++;
  1216. *line1++ = *line2++ = *cb1++;
  1217. *line1++ = *lum1++; *line2++ = *lum2++;
  1218. *line1++ = *line2++ = *cr1++;
  1219. }
  1220. linesrc += dst->linesize[0] * 2;
  1221. lumsrc += src->linesize[0] * 2;
  1222. cb2 += src->linesize[1];
  1223. cr2 += src->linesize[2];
  1224. }
  1225. }
  1226. static void yuv420p_to_uyvy422(AVPicture *dst, const AVPicture *src,
  1227. int width, int height)
  1228. {
  1229. int w, h;
  1230. uint8_t *line1, *line2, *linesrc = dst->data[0];
  1231. uint8_t *lum1, *lum2, *lumsrc = src->data[0];
  1232. uint8_t *cb1, *cb2 = src->data[1];
  1233. uint8_t *cr1, *cr2 = src->data[2];
  1234. for(h = height / 2; h--;) {
  1235. line1 = linesrc;
  1236. line2 = linesrc + dst->linesize[0];
  1237. lum1 = lumsrc;
  1238. lum2 = lumsrc + src->linesize[0];
  1239. cb1 = cb2;
  1240. cr1 = cr2;
  1241. for(w = width / 2; w--;) {
  1242. *line1++ = *line2++ = *cb1++;
  1243. *line1++ = *lum1++; *line2++ = *lum2++;
  1244. *line1++ = *line2++ = *cr1++;
  1245. *line1++ = *lum1++; *line2++ = *lum2++;
  1246. }
  1247. linesrc += dst->linesize[0] * 2;
  1248. lumsrc += src->linesize[0] * 2;
  1249. cb2 += src->linesize[1];
  1250. cr2 += src->linesize[2];
  1251. }
  1252. }
  1253. /* 2x2 -> 1x1 */
  1254. void ff_shrink22(uint8_t *dst, int dst_wrap,
  1255. const uint8_t *src, int src_wrap,
  1256. int width, int height)
  1257. {
  1258. int w;
  1259. const uint8_t *s1, *s2;
  1260. uint8_t *d;
  1261. for(;height > 0; height--) {
  1262. s1 = src;
  1263. s2 = s1 + src_wrap;
  1264. d = dst;
  1265. for(w = width;w >= 4; w-=4) {
  1266. d[0] = (s1[0] + s1[1] + s2[0] + s2[1] + 2) >> 2;
  1267. d[1] = (s1[2] + s1[3] + s2[2] + s2[3] + 2) >> 2;
  1268. d[2] = (s1[4] + s1[5] + s2[4] + s2[5] + 2) >> 2;
  1269. d[3] = (s1[6] + s1[7] + s2[6] + s2[7] + 2) >> 2;
  1270. s1 += 8;
  1271. s2 += 8;
  1272. d += 4;
  1273. }
  1274. for(;w > 0; w--) {
  1275. d[0] = (s1[0] + s1[1] + s2[0] + s2[1] + 2) >> 2;
  1276. s1 += 2;
  1277. s2 += 2;
  1278. d++;
  1279. }
  1280. src += 2 * src_wrap;
  1281. dst += dst_wrap;
  1282. }
  1283. }
  1284. /* 4x4 -> 1x1 */
  1285. void ff_shrink44(uint8_t *dst, int dst_wrap,
  1286. const uint8_t *src, int src_wrap,
  1287. int width, int height)
  1288. {
  1289. int w;
  1290. const uint8_t *s1, *s2, *s3, *s4;
  1291. uint8_t *d;
  1292. for(;height > 0; height--) {
  1293. s1 = src;
  1294. s2 = s1 + src_wrap;
  1295. s3 = s2 + src_wrap;
  1296. s4 = s3 + src_wrap;
  1297. d = dst;
  1298. for(w = width;w > 0; w--) {
  1299. d[0] = (s1[0] + s1[1] + s1[2] + s1[3] +
  1300. s2[0] + s2[1] + s2[2] + s2[3] +
  1301. s3[0] + s3[1] + s3[2] + s3[3] +
  1302. s4[0] + s4[1] + s4[2] + s4[3] + 8) >> 4;
  1303. s1 += 4;
  1304. s2 += 4;
  1305. s3 += 4;
  1306. s4 += 4;
  1307. d++;
  1308. }
  1309. src += 4 * src_wrap;
  1310. dst += dst_wrap;
  1311. }
  1312. }
  1313. /* 8x8 -> 1x1 */
  1314. void ff_shrink88(uint8_t *dst, int dst_wrap,
  1315. const uint8_t *src, int src_wrap,
  1316. int width, int height)
  1317. {
  1318. int w, i;
  1319. for(;height > 0; height--) {
  1320. for(w = width;w > 0; w--) {
  1321. int tmp=0;
  1322. for(i=0; i<8; i++){
  1323. tmp += src[0] + src[1] + src[2] + src[3] + src[4] + src[5] + src[6] + src[7];
  1324. src += src_wrap;
  1325. }
  1326. *(dst++) = (tmp + 32)>>6;
  1327. src += 8 - 8*src_wrap;
  1328. }
  1329. src += 8*src_wrap - 8*width;
  1330. dst += dst_wrap - width;
  1331. }
  1332. }
  1333. /* XXX: add jpeg quantize code */
  1334. #define TRANSP_INDEX (6*6*6)
  1335. /* this is maybe slow, but allows for extensions */
  1336. static inline unsigned char gif_clut_index(uint8_t r, uint8_t g, uint8_t b)
  1337. {
  1338. return (((r) / 47) % 6) * 6 * 6 + (((g) / 47) % 6) * 6 + (((b) / 47) % 6);
  1339. }
  1340. static void build_rgb_palette(uint8_t *palette, int has_alpha)
  1341. {
  1342. uint32_t *pal;
  1343. static const uint8_t pal_value[6] = { 0x00, 0x33, 0x66, 0x99, 0xcc, 0xff };
  1344. int i, r, g, b;
  1345. pal = (uint32_t *)palette;
  1346. i = 0;
  1347. for(r = 0; r < 6; r++) {
  1348. for(g = 0; g < 6; g++) {
  1349. for(b = 0; b < 6; b++) {
  1350. pal[i++] = (0xff << 24) | (pal_value[r] << 16) |
  1351. (pal_value[g] << 8) | pal_value[b];
  1352. }
  1353. }
  1354. }
  1355. if (has_alpha)
  1356. pal[i++] = 0;
  1357. while (i < 256)
  1358. pal[i++] = 0xff000000;
  1359. }
  1360. /* copy bit n to bits 0 ... n - 1 */
  1361. static inline unsigned int bitcopy_n(unsigned int a, int n)
  1362. {
  1363. int mask;
  1364. mask = (1 << n) - 1;
  1365. return (a & (0xff & ~mask)) | ((-((a >> n) & 1)) & mask);
  1366. }
  1367. /* rgb555 handling */
  1368. #define RGB_NAME rgb555
  1369. #define RGB_IN(r, g, b, s)\
  1370. {\
  1371. unsigned int v = ((const uint16_t *)(s))[0];\
  1372. r = bitcopy_n(v >> (10 - 3), 3);\
  1373. g = bitcopy_n(v >> (5 - 3), 3);\
  1374. b = bitcopy_n(v << 3, 3);\
  1375. }
  1376. #define RGB_OUT(d, r, g, b)\
  1377. {\
  1378. ((uint16_t *)(d))[0] = ((r >> 3) << 10) | ((g >> 3) << 5) | (b >> 3);\
  1379. }
  1380. #define BPP 2
  1381. #include "imgconvert_template.c"
  1382. /* rgb565 handling */
  1383. #define RGB_NAME rgb565
  1384. #define RGB_IN(r, g, b, s)\
  1385. {\
  1386. unsigned int v = ((const uint16_t *)(s))[0];\
  1387. r = bitcopy_n(v >> (11 - 3), 3);\
  1388. g = bitcopy_n(v >> (5 - 2), 2);\
  1389. b = bitcopy_n(v << 3, 3);\
  1390. }
  1391. #define RGB_OUT(d, r, g, b)\
  1392. {\
  1393. ((uint16_t *)(d))[0] = ((r >> 3) << 11) | ((g >> 2) << 5) | (b >> 3);\
  1394. }
  1395. #define BPP 2
  1396. #include "imgconvert_template.c"
  1397. /* bgr24 handling */
  1398. #define RGB_NAME bgr24
  1399. #define RGB_IN(r, g, b, s)\
  1400. {\
  1401. b = (s)[0];\
  1402. g = (s)[1];\
  1403. r = (s)[2];\
  1404. }
  1405. #define RGB_OUT(d, r, g, b)\
  1406. {\
  1407. (d)[0] = b;\
  1408. (d)[1] = g;\
  1409. (d)[2] = r;\
  1410. }
  1411. #define BPP 3
  1412. #include "imgconvert_template.c"
  1413. #undef RGB_IN
  1414. #undef RGB_OUT
  1415. #undef BPP
  1416. /* rgb24 handling */
  1417. #define RGB_NAME rgb24
  1418. #define FMT_RGB24
  1419. #define RGB_IN(r, g, b, s)\
  1420. {\
  1421. r = (s)[0];\
  1422. g = (s)[1];\
  1423. b = (s)[2];\
  1424. }
  1425. #define RGB_OUT(d, r, g, b)\
  1426. {\
  1427. (d)[0] = r;\
  1428. (d)[1] = g;\
  1429. (d)[2] = b;\
  1430. }
  1431. #define BPP 3
  1432. #include "imgconvert_template.c"
  1433. /* rgb32 handling */
  1434. #define RGB_NAME rgb32
  1435. #define FMT_RGB32
  1436. #define RGB_IN(r, g, b, s)\
  1437. {\
  1438. unsigned int v = ((const uint32_t *)(s))[0];\
  1439. r = (v >> 16) & 0xff;\
  1440. g = (v >> 8) & 0xff;\
  1441. b = v & 0xff;\
  1442. }
  1443. #define RGBA_IN(r, g, b, a, s)\
  1444. {\
  1445. unsigned int v = ((const uint32_t *)(s))[0];\
  1446. a = (v >> 24) & 0xff;\
  1447. r = (v >> 16) & 0xff;\
  1448. g = (v >> 8) & 0xff;\
  1449. b = v & 0xff;\
  1450. }
  1451. #define RGBA_OUT(d, r, g, b, a)\
  1452. {\
  1453. ((uint32_t *)(d))[0] = (a << 24) | (r << 16) | (g << 8) | b;\
  1454. }
  1455. #define BPP 4
  1456. #include "imgconvert_template.c"
  1457. static void mono_to_gray(AVPicture *dst, const AVPicture *src,
  1458. int width, int height, int xor_mask)
  1459. {
  1460. const unsigned char *p;
  1461. unsigned char *q;
  1462. int v, dst_wrap, src_wrap;
  1463. int y, w;
  1464. p = src->data[0];
  1465. src_wrap = src->linesize[0] - ((width + 7) >> 3);
  1466. q = dst->data[0];
  1467. dst_wrap = dst->linesize[0] - width;
  1468. for(y=0;y<height;y++) {
  1469. w = width;
  1470. while (w >= 8) {
  1471. v = *p++ ^ xor_mask;
  1472. q[0] = -(v >> 7);
  1473. q[1] = -((v >> 6) & 1);
  1474. q[2] = -((v >> 5) & 1);
  1475. q[3] = -((v >> 4) & 1);
  1476. q[4] = -((v >> 3) & 1);
  1477. q[5] = -((v >> 2) & 1);
  1478. q[6] = -((v >> 1) & 1);
  1479. q[7] = -((v >> 0) & 1);
  1480. w -= 8;
  1481. q += 8;
  1482. }
  1483. if (w > 0) {
  1484. v = *p++ ^ xor_mask;
  1485. do {
  1486. q[0] = -((v >> 7) & 1);
  1487. q++;
  1488. v <<= 1;
  1489. } while (--w);
  1490. }
  1491. p += src_wrap;
  1492. q += dst_wrap;
  1493. }
  1494. }
  1495. static void monowhite_to_gray(AVPicture *dst, const AVPicture *src,
  1496. int width, int height)
  1497. {
  1498. mono_to_gray(dst, src, width, height, 0xff);
  1499. }
  1500. static void monoblack_to_gray(AVPicture *dst, const AVPicture *src,
  1501. int width, int height)
  1502. {
  1503. mono_to_gray(dst, src, width, height, 0x00);
  1504. }
  1505. static void gray_to_mono(AVPicture *dst, const AVPicture *src,
  1506. int width, int height, int xor_mask)
  1507. {
  1508. int n;
  1509. const uint8_t *s;
  1510. uint8_t *d;
  1511. int j, b, v, n1, src_wrap, dst_wrap, y;
  1512. s = src->data[0];
  1513. src_wrap = src->linesize[0] - width;
  1514. d = dst->data[0];
  1515. dst_wrap = dst->linesize[0] - ((width + 7) >> 3);
  1516. for(y=0;y<height;y++) {
  1517. n = width;
  1518. while (n >= 8) {
  1519. v = 0;
  1520. for(j=0;j<8;j++) {
  1521. b = s[0];
  1522. s++;
  1523. v = (v << 1) | (b >> 7);
  1524. }
  1525. d[0] = v ^ xor_mask;
  1526. d++;
  1527. n -= 8;
  1528. }
  1529. if (n > 0) {
  1530. n1 = n;
  1531. v = 0;
  1532. while (n > 0) {
  1533. b = s[0];
  1534. s++;
  1535. v = (v << 1) | (b >> 7);
  1536. n--;
  1537. }
  1538. d[0] = (v << (8 - (n1 & 7))) ^ xor_mask;
  1539. d++;
  1540. }
  1541. s += src_wrap;
  1542. d += dst_wrap;
  1543. }
  1544. }
  1545. static void gray_to_monowhite(AVPicture *dst, const AVPicture *src,
  1546. int width, int height)
  1547. {
  1548. gray_to_mono(dst, src, width, height, 0xff);
  1549. }
  1550. static void gray_to_monoblack(AVPicture *dst, const AVPicture *src,
  1551. int width, int height)
  1552. {
  1553. gray_to_mono(dst, src, width, height, 0x00);
  1554. }
  1555. static void gray_to_gray16(AVPicture *dst, const AVPicture *src,
  1556. int width, int height)
  1557. {
  1558. int x, y, src_wrap, dst_wrap;
  1559. uint8_t *s, *d;
  1560. s = src->data[0];
  1561. src_wrap = src->linesize[0] - width;
  1562. d = dst->data[0];
  1563. dst_wrap = dst->linesize[0] - width * 2;
  1564. for(y=0; y<height; y++){
  1565. for(x=0; x<width; x++){
  1566. *d++ = *s;
  1567. *d++ = *s++;
  1568. }
  1569. s += src_wrap;
  1570. d += dst_wrap;
  1571. }
  1572. }
  1573. static void gray16_to_gray(AVPicture *dst, const AVPicture *src,
  1574. int width, int height)
  1575. {
  1576. int x, y, src_wrap, dst_wrap;
  1577. uint8_t *s, *d;
  1578. s = src->data[0];
  1579. src_wrap = src->linesize[0] - width * 2;
  1580. d = dst->data[0];
  1581. dst_wrap = dst->linesize[0] - width;
  1582. for(y=0; y<height; y++){
  1583. for(x=0; x<width; x++){
  1584. *d++ = *s;
  1585. s += 2;
  1586. }
  1587. s += src_wrap;
  1588. d += dst_wrap;
  1589. }
  1590. }
  1591. static void gray16be_to_gray(AVPicture *dst, const AVPicture *src,
  1592. int width, int height)
  1593. {
  1594. gray16_to_gray(dst, src, width, height);
  1595. }
  1596. static void gray16le_to_gray(AVPicture *dst, const AVPicture *src,
  1597. int width, int height)
  1598. {
  1599. AVPicture tmpsrc = *src;
  1600. tmpsrc.data[0]++;
  1601. gray16_to_gray(dst, &tmpsrc, width, height);
  1602. }
  1603. static void gray16_to_gray16(AVPicture *dst, const AVPicture *src,
  1604. int width, int height)
  1605. {
  1606. int x, y, src_wrap, dst_wrap;
  1607. uint16_t *s, *d;
  1608. s = (uint16_t*)src->data[0];
  1609. src_wrap = (src->linesize[0] - width * 2)/2;
  1610. d = (uint16_t*)dst->data[0];
  1611. dst_wrap = (dst->linesize[0] - width * 2)/2;
  1612. for(y=0; y<height; y++){
  1613. for(x=0; x<width; x++){
  1614. *d++ = bswap_16(*s++);
  1615. }
  1616. s += src_wrap;
  1617. d += dst_wrap;
  1618. }
  1619. }
  1620. typedef struct ConvertEntry {
  1621. void (*convert)(AVPicture *dst,
  1622. const AVPicture *src, int width, int height);
  1623. } ConvertEntry;
  1624. /* Add each new conversion function in this table. In order to be able
  1625. to convert from any format to any format, the following constraints
  1626. must be satisfied:
  1627. - all FF_COLOR_RGB formats must convert to and from PIX_FMT_RGB24
  1628. - all FF_COLOR_GRAY formats must convert to and from PIX_FMT_GRAY8
  1629. - all FF_COLOR_RGB formats with alpha must convert to and from PIX_FMT_RGB32
  1630. - PIX_FMT_YUV444P and PIX_FMT_YUVJ444P must convert to and from
  1631. PIX_FMT_RGB24.
  1632. - PIX_FMT_422 must convert to and from PIX_FMT_422P.
  1633. The other conversion functions are just optimizations for common cases.
  1634. */
  1635. static const ConvertEntry convert_table[PIX_FMT_NB][PIX_FMT_NB] = {
  1636. [PIX_FMT_YUV420P] = {
  1637. [PIX_FMT_YUYV422] = {
  1638. .convert = yuv420p_to_yuyv422,
  1639. },
  1640. [PIX_FMT_RGB555] = {
  1641. .convert = yuv420p_to_rgb555
  1642. },
  1643. [PIX_FMT_RGB565] = {
  1644. .convert = yuv420p_to_rgb565
  1645. },
  1646. [PIX_FMT_BGR24] = {
  1647. .convert = yuv420p_to_bgr24
  1648. },
  1649. [PIX_FMT_RGB24] = {
  1650. .convert = yuv420p_to_rgb24
  1651. },
  1652. [PIX_FMT_RGB32] = {
  1653. .convert = yuv420p_to_rgb32
  1654. },
  1655. [PIX_FMT_UYVY422] = {
  1656. .convert = yuv420p_to_uyvy422,
  1657. },
  1658. },
  1659. [PIX_FMT_YUV422P] = {
  1660. [PIX_FMT_YUYV422] = {
  1661. .convert = yuv422p_to_yuyv422,
  1662. },
  1663. [PIX_FMT_UYVY422] = {
  1664. .convert = yuv422p_to_uyvy422,
  1665. },
  1666. },
  1667. [PIX_FMT_YUV444P] = {
  1668. [PIX_FMT_RGB24] = {
  1669. .convert = yuv444p_to_rgb24
  1670. },
  1671. },
  1672. [PIX_FMT_YUVJ420P] = {
  1673. [PIX_FMT_RGB555] = {
  1674. .convert = yuvj420p_to_rgb555
  1675. },
  1676. [PIX_FMT_RGB565] = {
  1677. .convert = yuvj420p_to_rgb565
  1678. },
  1679. [PIX_FMT_BGR24] = {
  1680. .convert = yuvj420p_to_bgr24
  1681. },
  1682. [PIX_FMT_RGB24] = {
  1683. .convert = yuvj420p_to_rgb24
  1684. },
  1685. [PIX_FMT_RGB32] = {
  1686. .convert = yuvj420p_to_rgb32
  1687. },
  1688. },
  1689. [PIX_FMT_YUVJ444P] = {
  1690. [PIX_FMT_RGB24] = {
  1691. .convert = yuvj444p_to_rgb24
  1692. },
  1693. },
  1694. [PIX_FMT_YUYV422] = {
  1695. [PIX_FMT_YUV420P] = {
  1696. .convert = yuyv422_to_yuv420p,
  1697. },
  1698. [PIX_FMT_YUV422P] = {
  1699. .convert = yuyv422_to_yuv422p,
  1700. },
  1701. },
  1702. [PIX_FMT_UYVY422] = {
  1703. [PIX_FMT_YUV420P] = {
  1704. .convert = uyvy422_to_yuv420p,
  1705. },
  1706. [PIX_FMT_YUV422P] = {
  1707. .convert = uyvy422_to_yuv422p,
  1708. },
  1709. },
  1710. [PIX_FMT_RGB24] = {
  1711. [PIX_FMT_YUV420P] = {
  1712. .convert = rgb24_to_yuv420p
  1713. },
  1714. [PIX_FMT_RGB565] = {
  1715. .convert = rgb24_to_rgb565
  1716. },
  1717. [PIX_FMT_RGB555] = {
  1718. .convert = rgb24_to_rgb555
  1719. },
  1720. [PIX_FMT_RGB32] = {
  1721. .convert = rgb24_to_rgb32
  1722. },
  1723. [PIX_FMT_BGR24] = {
  1724. .convert = rgb24_to_bgr24
  1725. },
  1726. [PIX_FMT_GRAY8] = {
  1727. .convert = rgb24_to_gray
  1728. },
  1729. [PIX_FMT_PAL8] = {
  1730. .convert = rgb24_to_pal8
  1731. },
  1732. [PIX_FMT_YUV444P] = {
  1733. .convert = rgb24_to_yuv444p
  1734. },
  1735. [PIX_FMT_YUVJ420P] = {
  1736. .convert = rgb24_to_yuvj420p
  1737. },
  1738. [PIX_FMT_YUVJ444P] = {
  1739. .convert = rgb24_to_yuvj444p
  1740. },
  1741. },
  1742. [PIX_FMT_RGB32] = {
  1743. [PIX_FMT_RGB24] = {
  1744. .convert = rgb32_to_rgb24
  1745. },
  1746. [PIX_FMT_BGR24] = {
  1747. .convert = rgb32_to_bgr24
  1748. },
  1749. [PIX_FMT_RGB565] = {
  1750. .convert = rgb32_to_rgb565
  1751. },
  1752. [PIX_FMT_RGB555] = {
  1753. .convert = rgb32_to_rgb555
  1754. },
  1755. [PIX_FMT_PAL8] = {
  1756. .convert = rgb32_to_pal8
  1757. },
  1758. [PIX_FMT_YUV420P] = {
  1759. .convert = rgb32_to_yuv420p
  1760. },
  1761. [PIX_FMT_GRAY8] = {
  1762. .convert = rgb32_to_gray
  1763. },
  1764. },
  1765. [PIX_FMT_BGR24] = {
  1766. [PIX_FMT_RGB32] = {
  1767. .convert = bgr24_to_rgb32
  1768. },
  1769. [PIX_FMT_RGB24] = {
  1770. .convert = bgr24_to_rgb24
  1771. },
  1772. [PIX_FMT_YUV420P] = {
  1773. .convert = bgr24_to_yuv420p
  1774. },
  1775. [PIX_FMT_GRAY8] = {
  1776. .convert = bgr24_to_gray
  1777. },
  1778. },
  1779. [PIX_FMT_RGB555] = {
  1780. [PIX_FMT_RGB24] = {
  1781. .convert = rgb555_to_rgb24
  1782. },
  1783. [PIX_FMT_RGB32] = {
  1784. .convert = rgb555_to_rgb32
  1785. },
  1786. [PIX_FMT_YUV420P] = {
  1787. .convert = rgb555_to_yuv420p
  1788. },
  1789. [PIX_FMT_GRAY8] = {
  1790. .convert = rgb555_to_gray
  1791. },
  1792. },
  1793. [PIX_FMT_RGB565] = {
  1794. [PIX_FMT_RGB32] = {
  1795. .convert = rgb565_to_rgb32
  1796. },
  1797. [PIX_FMT_RGB24] = {
  1798. .convert = rgb565_to_rgb24
  1799. },
  1800. [PIX_FMT_YUV420P] = {
  1801. .convert = rgb565_to_yuv420p
  1802. },
  1803. [PIX_FMT_GRAY8] = {
  1804. .convert = rgb565_to_gray
  1805. },
  1806. },
  1807. [PIX_FMT_GRAY16BE] = {
  1808. [PIX_FMT_GRAY8] = {
  1809. .convert = gray16be_to_gray
  1810. },
  1811. [PIX_FMT_GRAY16LE] = {
  1812. .convert = gray16_to_gray16
  1813. },
  1814. },
  1815. [PIX_FMT_GRAY16LE] = {
  1816. [PIX_FMT_GRAY8] = {
  1817. .convert = gray16le_to_gray
  1818. },
  1819. [PIX_FMT_GRAY16BE] = {
  1820. .convert = gray16_to_gray16
  1821. },
  1822. },
  1823. [PIX_FMT_GRAY8] = {
  1824. [PIX_FMT_RGB555] = {
  1825. .convert = gray_to_rgb555
  1826. },
  1827. [PIX_FMT_RGB565] = {
  1828. .convert = gray_to_rgb565
  1829. },
  1830. [PIX_FMT_RGB24] = {
  1831. .convert = gray_to_rgb24
  1832. },
  1833. [PIX_FMT_BGR24] = {
  1834. .convert = gray_to_bgr24
  1835. },
  1836. [PIX_FMT_RGB32] = {
  1837. .convert = gray_to_rgb32
  1838. },
  1839. [PIX_FMT_MONOWHITE] = {
  1840. .convert = gray_to_monowhite
  1841. },
  1842. [PIX_FMT_MONOBLACK] = {
  1843. .convert = gray_to_monoblack
  1844. },
  1845. [PIX_FMT_GRAY16LE] = {
  1846. .convert = gray_to_gray16
  1847. },
  1848. [PIX_FMT_GRAY16BE] = {
  1849. .convert = gray_to_gray16
  1850. },
  1851. },
  1852. [PIX_FMT_MONOWHITE] = {
  1853. [PIX_FMT_GRAY8] = {
  1854. .convert = monowhite_to_gray
  1855. },
  1856. },
  1857. [PIX_FMT_MONOBLACK] = {
  1858. [PIX_FMT_GRAY8] = {
  1859. .convert = monoblack_to_gray
  1860. },
  1861. },
  1862. [PIX_FMT_PAL8] = {
  1863. [PIX_FMT_RGB555] = {
  1864. .convert = pal8_to_rgb555
  1865. },
  1866. [PIX_FMT_RGB565] = {
  1867. .convert = pal8_to_rgb565
  1868. },
  1869. [PIX_FMT_BGR24] = {
  1870. .convert = pal8_to_bgr24
  1871. },
  1872. [PIX_FMT_RGB24] = {
  1873. .convert = pal8_to_rgb24
  1874. },
  1875. [PIX_FMT_RGB32] = {
  1876. .convert = pal8_to_rgb32
  1877. },
  1878. },
  1879. [PIX_FMT_UYYVYY411] = {
  1880. [PIX_FMT_YUV411P] = {
  1881. .convert = uyyvyy411_to_yuv411p,
  1882. },
  1883. },
  1884. };
  1885. int avpicture_alloc(AVPicture *picture,
  1886. int pix_fmt, int width, int height)
  1887. {
  1888. int size;
  1889. void *ptr;
  1890. size = avpicture_get_size(pix_fmt, width, height);
  1891. if(size<0)
  1892. goto fail;
  1893. ptr = av_malloc(size);
  1894. if (!ptr)
  1895. goto fail;
  1896. avpicture_fill(picture, ptr, pix_fmt, width, height);
  1897. if(picture->data[1] && !picture->data[2])
  1898. ff_set_systematic_pal((uint32_t*)picture->data[1], pix_fmt);
  1899. return 0;
  1900. fail:
  1901. memset(picture, 0, sizeof(AVPicture));
  1902. return -1;
  1903. }
  1904. void avpicture_free(AVPicture *picture)
  1905. {
  1906. av_free(picture->data[0]);
  1907. }
  1908. /* return true if yuv planar */
  1909. static inline int is_yuv_planar(const PixFmtInfo *ps)
  1910. {
  1911. return (ps->color_type == FF_COLOR_YUV ||
  1912. ps->color_type == FF_COLOR_YUV_JPEG) &&
  1913. ps->pixel_type == FF_PIXEL_PLANAR;
  1914. }
  1915. int av_picture_crop(AVPicture *dst, const AVPicture *src,
  1916. int pix_fmt, int top_band, int left_band)
  1917. {
  1918. int y_shift;
  1919. int x_shift;
  1920. if (pix_fmt < 0 || pix_fmt >= PIX_FMT_NB || !is_yuv_planar(&pix_fmt_info[pix_fmt]))
  1921. return -1;
  1922. y_shift = pix_fmt_info[pix_fmt].y_chroma_shift;
  1923. x_shift = pix_fmt_info[pix_fmt].x_chroma_shift;
  1924. dst->data[0] = src->data[0] + (top_band * src->linesize[0]) + left_band;
  1925. dst->data[1] = src->data[1] + ((top_band >> y_shift) * src->linesize[1]) + (left_band >> x_shift);
  1926. dst->data[2] = src->data[2] + ((top_band >> y_shift) * src->linesize[2]) + (left_band >> x_shift);
  1927. dst->linesize[0] = src->linesize[0];
  1928. dst->linesize[1] = src->linesize[1];
  1929. dst->linesize[2] = src->linesize[2];
  1930. return 0;
  1931. }
  1932. int av_picture_pad(AVPicture *dst, const AVPicture *src, int height, int width,
  1933. int pix_fmt, int padtop, int padbottom, int padleft, int padright,
  1934. int *color)
  1935. {
  1936. uint8_t *optr;
  1937. int y_shift;
  1938. int x_shift;
  1939. int yheight;
  1940. int i, y;
  1941. if (pix_fmt < 0 || pix_fmt >= PIX_FMT_NB ||
  1942. !is_yuv_planar(&pix_fmt_info[pix_fmt])) return -1;
  1943. for (i = 0; i < 3; i++) {
  1944. x_shift = i ? pix_fmt_info[pix_fmt].x_chroma_shift : 0;
  1945. y_shift = i ? pix_fmt_info[pix_fmt].y_chroma_shift : 0;
  1946. if (padtop || padleft) {
  1947. memset(dst->data[i], color[i],
  1948. dst->linesize[i] * (padtop >> y_shift) + (padleft >> x_shift));
  1949. }
  1950. if (padleft || padright) {
  1951. optr = dst->data[i] + dst->linesize[i] * (padtop >> y_shift) +
  1952. (dst->linesize[i] - (padright >> x_shift));
  1953. yheight = (height - 1 - (padtop + padbottom)) >> y_shift;
  1954. for (y = 0; y < yheight; y++) {
  1955. memset(optr, color[i], (padleft + padright) >> x_shift);
  1956. optr += dst->linesize[i];
  1957. }
  1958. }
  1959. if (src) { /* first line */
  1960. uint8_t *iptr = src->data[i];
  1961. optr = dst->data[i] + dst->linesize[i] * (padtop >> y_shift) +
  1962. (padleft >> x_shift);
  1963. memcpy(optr, iptr, (width - padleft - padright) >> x_shift);
  1964. iptr += src->linesize[i];
  1965. optr = dst->data[i] + dst->linesize[i] * (padtop >> y_shift) +
  1966. (dst->linesize[i] - (padright >> x_shift));
  1967. yheight = (height - 1 - (padtop + padbottom)) >> y_shift;
  1968. for (y = 0; y < yheight; y++) {
  1969. memset(optr, color[i], (padleft + padright) >> x_shift);
  1970. memcpy(optr + ((padleft + padright) >> x_shift), iptr,
  1971. (width - padleft - padright) >> x_shift);
  1972. iptr += src->linesize[i];
  1973. optr += dst->linesize[i];
  1974. }
  1975. }
  1976. if (padbottom || padright) {
  1977. optr = dst->data[i] + dst->linesize[i] *
  1978. ((height - padbottom) >> y_shift) - (padright >> x_shift);
  1979. memset(optr, color[i],dst->linesize[i] *
  1980. (padbottom >> y_shift) + (padright >> x_shift));
  1981. }
  1982. }
  1983. return 0;
  1984. }
  1985. #if !CONFIG_SWSCALE
  1986. static uint8_t y_ccir_to_jpeg[256];
  1987. static uint8_t y_jpeg_to_ccir[256];
  1988. static uint8_t c_ccir_to_jpeg[256];
  1989. static uint8_t c_jpeg_to_ccir[256];
  1990. /* init various conversion tables */
  1991. static void img_convert_init(void)
  1992. {
  1993. int i;
  1994. uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
  1995. for(i = 0;i < 256; i++) {
  1996. y_ccir_to_jpeg[i] = Y_CCIR_TO_JPEG(i);
  1997. y_jpeg_to_ccir[i] = Y_JPEG_TO_CCIR(i);
  1998. c_ccir_to_jpeg[i] = C_CCIR_TO_JPEG(i);
  1999. c_jpeg_to_ccir[i] = C_JPEG_TO_CCIR(i);
  2000. }
  2001. }
  2002. /* apply to each pixel the given table */
  2003. static void img_apply_table(uint8_t *dst, int dst_wrap,
  2004. const uint8_t *src, int src_wrap,
  2005. int width, int height, const uint8_t *table1)
  2006. {
  2007. int n;
  2008. const uint8_t *s;
  2009. uint8_t *d;
  2010. const uint8_t *table;
  2011. table = table1;
  2012. for(;height > 0; height--) {
  2013. s = src;
  2014. d = dst;
  2015. n = width;
  2016. while (n >= 4) {
  2017. d[0] = table[s[0]];
  2018. d[1] = table[s[1]];
  2019. d[2] = table[s[2]];
  2020. d[3] = table[s[3]];
  2021. d += 4;
  2022. s += 4;
  2023. n -= 4;
  2024. }
  2025. while (n > 0) {
  2026. d[0] = table[s[0]];
  2027. d++;
  2028. s++;
  2029. n--;
  2030. }
  2031. dst += dst_wrap;
  2032. src += src_wrap;
  2033. }
  2034. }
  2035. /* XXX: use generic filter ? */
  2036. /* XXX: in most cases, the sampling position is incorrect */
  2037. /* 4x1 -> 1x1 */
  2038. static void shrink41(uint8_t *dst, int dst_wrap,
  2039. const uint8_t *src, int src_wrap,
  2040. int width, int height)
  2041. {
  2042. int w;
  2043. const uint8_t *s;
  2044. uint8_t *d;
  2045. for(;height > 0; height--) {
  2046. s = src;
  2047. d = dst;
  2048. for(w = width;w > 0; w--) {
  2049. d[0] = (s[0] + s[1] + s[2] + s[3] + 2) >> 2;
  2050. s += 4;
  2051. d++;
  2052. }
  2053. src += src_wrap;
  2054. dst += dst_wrap;
  2055. }
  2056. }
  2057. /* 2x1 -> 1x1 */
  2058. static void shrink21(uint8_t *dst, int dst_wrap,
  2059. const uint8_t *src, int src_wrap,
  2060. int width, int height)
  2061. {
  2062. int w;
  2063. const uint8_t *s;
  2064. uint8_t *d;
  2065. for(;height > 0; height--) {
  2066. s = src;
  2067. d = dst;
  2068. for(w = width;w > 0; w--) {
  2069. d[0] = (s[0] + s[1]) >> 1;
  2070. s += 2;
  2071. d++;
  2072. }
  2073. src += src_wrap;
  2074. dst += dst_wrap;
  2075. }
  2076. }
  2077. /* 1x2 -> 1x1 */
  2078. static void shrink12(uint8_t *dst, int dst_wrap,
  2079. const uint8_t *src, int src_wrap,
  2080. int width, int height)
  2081. {
  2082. int w;
  2083. uint8_t *d;
  2084. const uint8_t *s1, *s2;
  2085. for(;height > 0; height--) {
  2086. s1 = src;
  2087. s2 = s1 + src_wrap;
  2088. d = dst;
  2089. for(w = width;w >= 4; w-=4) {
  2090. d[0] = (s1[0] + s2[0]) >> 1;
  2091. d[1] = (s1[1] + s2[1]) >> 1;
  2092. d[2] = (s1[2] + s2[2]) >> 1;
  2093. d[3] = (s1[3] + s2[3]) >> 1;
  2094. s1 += 4;
  2095. s2 += 4;
  2096. d += 4;
  2097. }
  2098. for(;w > 0; w--) {
  2099. d[0] = (s1[0] + s2[0]) >> 1;
  2100. s1++;
  2101. s2++;
  2102. d++;
  2103. }
  2104. src += 2 * src_wrap;
  2105. dst += dst_wrap;
  2106. }
  2107. }
  2108. static void grow21_line(uint8_t *dst, const uint8_t *src,
  2109. int width)
  2110. {
  2111. int w;
  2112. const uint8_t *s1;
  2113. uint8_t *d;
  2114. s1 = src;
  2115. d = dst;
  2116. for(w = width;w >= 4; w-=4) {
  2117. d[1] = d[0] = s1[0];
  2118. d[3] = d[2] = s1[1];
  2119. s1 += 2;
  2120. d += 4;
  2121. }
  2122. for(;w >= 2; w -= 2) {
  2123. d[1] = d[0] = s1[0];
  2124. s1 ++;
  2125. d += 2;
  2126. }
  2127. /* only needed if width is not a multiple of two */
  2128. /* XXX: veryfy that */
  2129. if (w) {
  2130. d[0] = s1[0];
  2131. }
  2132. }
  2133. static void grow41_line(uint8_t *dst, const uint8_t *src,
  2134. int width)
  2135. {
  2136. int w, v;
  2137. const uint8_t *s1;
  2138. uint8_t *d;
  2139. s1 = src;
  2140. d = dst;
  2141. for(w = width;w >= 4; w-=4) {
  2142. v = s1[0];
  2143. d[0] = v;
  2144. d[1] = v;
  2145. d[2] = v;
  2146. d[3] = v;
  2147. s1 ++;
  2148. d += 4;
  2149. }
  2150. }
  2151. /* 1x1 -> 2x1 */
  2152. static void grow21(uint8_t *dst, int dst_wrap,
  2153. const uint8_t *src, int src_wrap,
  2154. int width, int height)
  2155. {
  2156. for(;height > 0; height--) {
  2157. grow21_line(dst, src, width);
  2158. src += src_wrap;
  2159. dst += dst_wrap;
  2160. }
  2161. }
  2162. /* 1x1 -> 1x2 */
  2163. static void grow12(uint8_t *dst, int dst_wrap,
  2164. const uint8_t *src, int src_wrap,
  2165. int width, int height)
  2166. {
  2167. for(;height > 0; height-=2) {
  2168. memcpy(dst, src, width);
  2169. dst += dst_wrap;
  2170. memcpy(dst, src, width);
  2171. dst += dst_wrap;
  2172. src += src_wrap;
  2173. }
  2174. }
  2175. /* 1x1 -> 2x2 */
  2176. static void grow22(uint8_t *dst, int dst_wrap,
  2177. const uint8_t *src, int src_wrap,
  2178. int width, int height)
  2179. {
  2180. for(;height > 0; height--) {
  2181. grow21_line(dst, src, width);
  2182. if (height%2)
  2183. src += src_wrap;
  2184. dst += dst_wrap;
  2185. }
  2186. }
  2187. /* 1x1 -> 4x1 */
  2188. static void grow41(uint8_t *dst, int dst_wrap,
  2189. const uint8_t *src, int src_wrap,
  2190. int width, int height)
  2191. {
  2192. for(;height > 0; height--) {
  2193. grow41_line(dst, src, width);
  2194. src += src_wrap;
  2195. dst += dst_wrap;
  2196. }
  2197. }
  2198. /* 1x1 -> 4x4 */
  2199. static void grow44(uint8_t *dst, int dst_wrap,
  2200. const uint8_t *src, int src_wrap,
  2201. int width, int height)
  2202. {
  2203. for(;height > 0; height--) {
  2204. grow41_line(dst, src, width);
  2205. if ((height & 3) == 1)
  2206. src += src_wrap;
  2207. dst += dst_wrap;
  2208. }
  2209. }
  2210. /* 1x2 -> 2x1 */
  2211. static void conv411(uint8_t *dst, int dst_wrap,
  2212. const uint8_t *src, int src_wrap,
  2213. int width, int height)
  2214. {
  2215. int w, c;
  2216. const uint8_t *s1, *s2;
  2217. uint8_t *d;
  2218. width>>=1;
  2219. for(;height > 0; height--) {
  2220. s1 = src;
  2221. s2 = src + src_wrap;
  2222. d = dst;
  2223. for(w = width;w > 0; w--) {
  2224. c = (s1[0] + s2[0]) >> 1;
  2225. d[0] = c;
  2226. d[1] = c;
  2227. s1++;
  2228. s2++;
  2229. d += 2;
  2230. }
  2231. src += src_wrap * 2;
  2232. dst += dst_wrap;
  2233. }
  2234. }
  2235. /* XXX: always use linesize. Return -1 if not supported */
  2236. int img_convert(AVPicture *dst, int dst_pix_fmt,
  2237. const AVPicture *src, int src_pix_fmt,
  2238. int src_width, int src_height)
  2239. {
  2240. static int initialized;
  2241. int i, ret, dst_width, dst_height, int_pix_fmt;
  2242. const PixFmtInfo *src_pix, *dst_pix;
  2243. const ConvertEntry *ce;
  2244. AVPicture tmp1, *tmp = &tmp1;
  2245. if (src_pix_fmt < 0 || src_pix_fmt >= PIX_FMT_NB ||
  2246. dst_pix_fmt < 0 || dst_pix_fmt >= PIX_FMT_NB)
  2247. return -1;
  2248. if (src_width <= 0 || src_height <= 0)
  2249. return 0;
  2250. if (!initialized) {
  2251. initialized = 1;
  2252. img_convert_init();
  2253. }
  2254. dst_width = src_width;
  2255. dst_height = src_height;
  2256. dst_pix = &pix_fmt_info[dst_pix_fmt];
  2257. src_pix = &pix_fmt_info[src_pix_fmt];
  2258. if (src_pix_fmt == dst_pix_fmt) {
  2259. /* no conversion needed: just copy */
  2260. av_picture_copy(dst, src, dst_pix_fmt, dst_width, dst_height);
  2261. return 0;
  2262. }
  2263. ce = &convert_table[src_pix_fmt][dst_pix_fmt];
  2264. if (ce->convert) {
  2265. /* specific conversion routine */
  2266. ce->convert(dst, src, dst_width, dst_height);
  2267. return 0;
  2268. }
  2269. /* gray to YUV */
  2270. if (is_yuv_planar(dst_pix) &&
  2271. src_pix_fmt == PIX_FMT_GRAY8) {
  2272. int w, h, y;
  2273. uint8_t *d;
  2274. if (dst_pix->color_type == FF_COLOR_YUV_JPEG) {
  2275. ff_img_copy_plane(dst->data[0], dst->linesize[0],
  2276. src->data[0], src->linesize[0],
  2277. dst_width, dst_height);
  2278. } else {
  2279. img_apply_table(dst->data[0], dst->linesize[0],
  2280. src->data[0], src->linesize[0],
  2281. dst_width, dst_height,
  2282. y_jpeg_to_ccir);
  2283. }
  2284. /* fill U and V with 128 */
  2285. w = dst_width;
  2286. h = dst_height;
  2287. w >>= dst_pix->x_chroma_shift;
  2288. h >>= dst_pix->y_chroma_shift;
  2289. for(i = 1; i <= 2; i++) {
  2290. d = dst->data[i];
  2291. for(y = 0; y< h; y++) {
  2292. memset(d, 128, w);
  2293. d += dst->linesize[i];
  2294. }
  2295. }
  2296. return 0;
  2297. }
  2298. /* YUV to gray */
  2299. if (is_yuv_planar(src_pix) &&
  2300. dst_pix_fmt == PIX_FMT_GRAY8) {
  2301. if (src_pix->color_type == FF_COLOR_YUV_JPEG) {
  2302. ff_img_copy_plane(dst->data[0], dst->linesize[0],
  2303. src->data[0], src->linesize[0],
  2304. dst_width, dst_height);
  2305. } else {
  2306. img_apply_table(dst->data[0], dst->linesize[0],
  2307. src->data[0], src->linesize[0],
  2308. dst_width, dst_height,
  2309. y_ccir_to_jpeg);
  2310. }
  2311. return 0;
  2312. }
  2313. /* YUV to YUV planar */
  2314. if (is_yuv_planar(dst_pix) && is_yuv_planar(src_pix)) {
  2315. int x_shift, y_shift, w, h, xy_shift;
  2316. void (*resize_func)(uint8_t *dst, int dst_wrap,
  2317. const uint8_t *src, int src_wrap,
  2318. int width, int height);
  2319. /* compute chroma size of the smallest dimensions */
  2320. w = dst_width;
  2321. h = dst_height;
  2322. if (dst_pix->x_chroma_shift >= src_pix->x_chroma_shift)
  2323. w >>= dst_pix->x_chroma_shift;
  2324. else
  2325. w >>= src_pix->x_chroma_shift;
  2326. if (dst_pix->y_chroma_shift >= src_pix->y_chroma_shift)
  2327. h >>= dst_pix->y_chroma_shift;
  2328. else
  2329. h >>= src_pix->y_chroma_shift;
  2330. x_shift = (dst_pix->x_chroma_shift - src_pix->x_chroma_shift);
  2331. y_shift = (dst_pix->y_chroma_shift - src_pix->y_chroma_shift);
  2332. xy_shift = ((x_shift & 0xf) << 4) | (y_shift & 0xf);
  2333. /* there must be filters for conversion at least from and to
  2334. YUV444 format */
  2335. switch(xy_shift) {
  2336. case 0x00:
  2337. resize_func = ff_img_copy_plane;
  2338. break;
  2339. case 0x10:
  2340. resize_func = shrink21;
  2341. break;
  2342. case 0x20:
  2343. resize_func = shrink41;
  2344. break;
  2345. case 0x01:
  2346. resize_func = shrink12;
  2347. break;
  2348. case 0x11:
  2349. resize_func = ff_shrink22;
  2350. break;
  2351. case 0x22:
  2352. resize_func = ff_shrink44;
  2353. break;
  2354. case 0xf0:
  2355. resize_func = grow21;
  2356. break;
  2357. case 0x0f:
  2358. resize_func = grow12;
  2359. break;
  2360. case 0xe0:
  2361. resize_func = grow41;
  2362. break;
  2363. case 0xff:
  2364. resize_func = grow22;
  2365. break;
  2366. case 0xee:
  2367. resize_func = grow44;
  2368. break;
  2369. case 0xf1:
  2370. resize_func = conv411;
  2371. break;
  2372. default:
  2373. /* currently not handled */
  2374. goto no_chroma_filter;
  2375. }
  2376. ff_img_copy_plane(dst->data[0], dst->linesize[0],
  2377. src->data[0], src->linesize[0],
  2378. dst_width, dst_height);
  2379. for(i = 1;i <= 2; i++)
  2380. resize_func(dst->data[i], dst->linesize[i],
  2381. src->data[i], src->linesize[i],
  2382. dst_width>>dst_pix->x_chroma_shift, dst_height>>dst_pix->y_chroma_shift);
  2383. /* if yuv color space conversion is needed, we do it here on
  2384. the destination image */
  2385. if (dst_pix->color_type != src_pix->color_type) {
  2386. const uint8_t *y_table, *c_table;
  2387. if (dst_pix->color_type == FF_COLOR_YUV) {
  2388. y_table = y_jpeg_to_ccir;
  2389. c_table = c_jpeg_to_ccir;
  2390. } else {
  2391. y_table = y_ccir_to_jpeg;
  2392. c_table = c_ccir_to_jpeg;
  2393. }
  2394. img_apply_table(dst->data[0], dst->linesize[0],
  2395. dst->data[0], dst->linesize[0],
  2396. dst_width, dst_height,
  2397. y_table);
  2398. for(i = 1;i <= 2; i++)
  2399. img_apply_table(dst->data[i], dst->linesize[i],
  2400. dst->data[i], dst->linesize[i],
  2401. dst_width>>dst_pix->x_chroma_shift,
  2402. dst_height>>dst_pix->y_chroma_shift,
  2403. c_table);
  2404. }
  2405. return 0;
  2406. }
  2407. no_chroma_filter:
  2408. /* try to use an intermediate format */
  2409. if (src_pix_fmt == PIX_FMT_YUYV422 ||
  2410. dst_pix_fmt == PIX_FMT_YUYV422) {
  2411. /* specific case: convert to YUV422P first */
  2412. int_pix_fmt = PIX_FMT_YUV422P;
  2413. } else if (src_pix_fmt == PIX_FMT_UYVY422 ||
  2414. dst_pix_fmt == PIX_FMT_UYVY422) {
  2415. /* specific case: convert to YUV422P first */
  2416. int_pix_fmt = PIX_FMT_YUV422P;
  2417. } else if (src_pix_fmt == PIX_FMT_UYYVYY411 ||
  2418. dst_pix_fmt == PIX_FMT_UYYVYY411) {
  2419. /* specific case: convert to YUV411P first */
  2420. int_pix_fmt = PIX_FMT_YUV411P;
  2421. } else if ((src_pix->color_type == FF_COLOR_GRAY &&
  2422. src_pix_fmt != PIX_FMT_GRAY8) ||
  2423. (dst_pix->color_type == FF_COLOR_GRAY &&
  2424. dst_pix_fmt != PIX_FMT_GRAY8)) {
  2425. /* gray8 is the normalized format */
  2426. int_pix_fmt = PIX_FMT_GRAY8;
  2427. } else if ((is_yuv_planar(src_pix) &&
  2428. src_pix_fmt != PIX_FMT_YUV444P &&
  2429. src_pix_fmt != PIX_FMT_YUVJ444P)) {
  2430. /* yuv444 is the normalized format */
  2431. if (src_pix->color_type == FF_COLOR_YUV_JPEG)
  2432. int_pix_fmt = PIX_FMT_YUVJ444P;
  2433. else
  2434. int_pix_fmt = PIX_FMT_YUV444P;
  2435. } else if ((is_yuv_planar(dst_pix) &&
  2436. dst_pix_fmt != PIX_FMT_YUV444P &&
  2437. dst_pix_fmt != PIX_FMT_YUVJ444P)) {
  2438. /* yuv444 is the normalized format */
  2439. if (dst_pix->color_type == FF_COLOR_YUV_JPEG)
  2440. int_pix_fmt = PIX_FMT_YUVJ444P;
  2441. else
  2442. int_pix_fmt = PIX_FMT_YUV444P;
  2443. } else {
  2444. /* the two formats are rgb or gray8 or yuv[j]444p */
  2445. if (src_pix->is_alpha && dst_pix->is_alpha)
  2446. int_pix_fmt = PIX_FMT_RGB32;
  2447. else
  2448. int_pix_fmt = PIX_FMT_RGB24;
  2449. }
  2450. if (src_pix_fmt == int_pix_fmt)
  2451. return -1;
  2452. if (avpicture_alloc(tmp, int_pix_fmt, dst_width, dst_height) < 0)
  2453. return -1;
  2454. ret = -1;
  2455. if (img_convert(tmp, int_pix_fmt,
  2456. src, src_pix_fmt, src_width, src_height) < 0)
  2457. goto fail1;
  2458. if (img_convert(dst, dst_pix_fmt,
  2459. tmp, int_pix_fmt, dst_width, dst_height) < 0)
  2460. goto fail1;
  2461. ret = 0;
  2462. fail1:
  2463. avpicture_free(tmp);
  2464. return ret;
  2465. }
  2466. #endif
  2467. /* NOTE: we scan all the pixels to have an exact information */
  2468. static int get_alpha_info_pal8(const AVPicture *src, int width, int height)
  2469. {
  2470. const unsigned char *p;
  2471. int src_wrap, ret, x, y;
  2472. unsigned int a;
  2473. uint32_t *palette = (uint32_t *)src->data[1];
  2474. p = src->data[0];
  2475. src_wrap = src->linesize[0] - width;
  2476. ret = 0;
  2477. for(y=0;y<height;y++) {
  2478. for(x=0;x<width;x++) {
  2479. a = palette[p[0]] >> 24;
  2480. if (a == 0x00) {
  2481. ret |= FF_ALPHA_TRANSP;
  2482. } else if (a != 0xff) {
  2483. ret |= FF_ALPHA_SEMI_TRANSP;
  2484. }
  2485. p++;
  2486. }
  2487. p += src_wrap;
  2488. }
  2489. return ret;
  2490. }
  2491. int img_get_alpha_info(const AVPicture *src,
  2492. int pix_fmt, int width, int height)
  2493. {
  2494. const PixFmtInfo *pf = &pix_fmt_info[pix_fmt];
  2495. int ret;
  2496. pf = &pix_fmt_info[pix_fmt];
  2497. /* no alpha can be represented in format */
  2498. if (!pf->is_alpha)
  2499. return 0;
  2500. switch(pix_fmt) {
  2501. case PIX_FMT_RGB32:
  2502. ret = get_alpha_info_rgb32(src, width, height);
  2503. break;
  2504. case PIX_FMT_PAL8:
  2505. ret = get_alpha_info_pal8(src, width, height);
  2506. break;
  2507. default:
  2508. /* we do not know, so everything is indicated */
  2509. ret = FF_ALPHA_TRANSP | FF_ALPHA_SEMI_TRANSP;
  2510. break;
  2511. }
  2512. return ret;
  2513. }
  2514. #if HAVE_MMX
  2515. #define DEINT_INPLACE_LINE_LUM \
  2516. movd_m2r(lum_m4[0],mm0);\
  2517. movd_m2r(lum_m3[0],mm1);\
  2518. movd_m2r(lum_m2[0],mm2);\
  2519. movd_m2r(lum_m1[0],mm3);\
  2520. movd_m2r(lum[0],mm4);\
  2521. punpcklbw_r2r(mm7,mm0);\
  2522. movd_r2m(mm2,lum_m4[0]);\
  2523. punpcklbw_r2r(mm7,mm1);\
  2524. punpcklbw_r2r(mm7,mm2);\
  2525. punpcklbw_r2r(mm7,mm3);\
  2526. punpcklbw_r2r(mm7,mm4);\
  2527. paddw_r2r(mm3,mm1);\
  2528. psllw_i2r(1,mm2);\
  2529. paddw_r2r(mm4,mm0);\
  2530. psllw_i2r(2,mm1);\
  2531. paddw_r2r(mm6,mm2);\
  2532. paddw_r2r(mm2,mm1);\
  2533. psubusw_r2r(mm0,mm1);\
  2534. psrlw_i2r(3,mm1);\
  2535. packuswb_r2r(mm7,mm1);\
  2536. movd_r2m(mm1,lum_m2[0]);
  2537. #define DEINT_LINE_LUM \
  2538. movd_m2r(lum_m4[0],mm0);\
  2539. movd_m2r(lum_m3[0],mm1);\
  2540. movd_m2r(lum_m2[0],mm2);\
  2541. movd_m2r(lum_m1[0],mm3);\
  2542. movd_m2r(lum[0],mm4);\
  2543. punpcklbw_r2r(mm7,mm0);\
  2544. punpcklbw_r2r(mm7,mm1);\
  2545. punpcklbw_r2r(mm7,mm2);\
  2546. punpcklbw_r2r(mm7,mm3);\
  2547. punpcklbw_r2r(mm7,mm4);\
  2548. paddw_r2r(mm3,mm1);\
  2549. psllw_i2r(1,mm2);\
  2550. paddw_r2r(mm4,mm0);\
  2551. psllw_i2r(2,mm1);\
  2552. paddw_r2r(mm6,mm2);\
  2553. paddw_r2r(mm2,mm1);\
  2554. psubusw_r2r(mm0,mm1);\
  2555. psrlw_i2r(3,mm1);\
  2556. packuswb_r2r(mm7,mm1);\
  2557. movd_r2m(mm1,dst[0]);
  2558. #endif
  2559. /* filter parameters: [-1 4 2 4 -1] // 8 */
  2560. static void deinterlace_line(uint8_t *dst,
  2561. const uint8_t *lum_m4, const uint8_t *lum_m3,
  2562. const uint8_t *lum_m2, const uint8_t *lum_m1,
  2563. const uint8_t *lum,
  2564. int size)
  2565. {
  2566. #if !HAVE_MMX
  2567. uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
  2568. int sum;
  2569. for(;size > 0;size--) {
  2570. sum = -lum_m4[0];
  2571. sum += lum_m3[0] << 2;
  2572. sum += lum_m2[0] << 1;
  2573. sum += lum_m1[0] << 2;
  2574. sum += -lum[0];
  2575. dst[0] = cm[(sum + 4) >> 3];
  2576. lum_m4++;
  2577. lum_m3++;
  2578. lum_m2++;
  2579. lum_m1++;
  2580. lum++;
  2581. dst++;
  2582. }
  2583. #else
  2584. {
  2585. pxor_r2r(mm7,mm7);
  2586. movq_m2r(ff_pw_4,mm6);
  2587. }
  2588. for (;size > 3; size-=4) {
  2589. DEINT_LINE_LUM
  2590. lum_m4+=4;
  2591. lum_m3+=4;
  2592. lum_m2+=4;
  2593. lum_m1+=4;
  2594. lum+=4;
  2595. dst+=4;
  2596. }
  2597. #endif
  2598. }
  2599. static void deinterlace_line_inplace(uint8_t *lum_m4, uint8_t *lum_m3, uint8_t *lum_m2, uint8_t *lum_m1, uint8_t *lum,
  2600. int size)
  2601. {
  2602. #if !HAVE_MMX
  2603. uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
  2604. int sum;
  2605. for(;size > 0;size--) {
  2606. sum = -lum_m4[0];
  2607. sum += lum_m3[0] << 2;
  2608. sum += lum_m2[0] << 1;
  2609. lum_m4[0]=lum_m2[0];
  2610. sum += lum_m1[0] << 2;
  2611. sum += -lum[0];
  2612. lum_m2[0] = cm[(sum + 4) >> 3];
  2613. lum_m4++;
  2614. lum_m3++;
  2615. lum_m2++;
  2616. lum_m1++;
  2617. lum++;
  2618. }
  2619. #else
  2620. {
  2621. pxor_r2r(mm7,mm7);
  2622. movq_m2r(ff_pw_4,mm6);
  2623. }
  2624. for (;size > 3; size-=4) {
  2625. DEINT_INPLACE_LINE_LUM
  2626. lum_m4+=4;
  2627. lum_m3+=4;
  2628. lum_m2+=4;
  2629. lum_m1+=4;
  2630. lum+=4;
  2631. }
  2632. #endif
  2633. }
  2634. /* deinterlacing : 2 temporal taps, 3 spatial taps linear filter. The
  2635. top field is copied as is, but the bottom field is deinterlaced
  2636. against the top field. */
  2637. static void deinterlace_bottom_field(uint8_t *dst, int dst_wrap,
  2638. const uint8_t *src1, int src_wrap,
  2639. int width, int height)
  2640. {
  2641. const uint8_t *src_m2, *src_m1, *src_0, *src_p1, *src_p2;
  2642. int y;
  2643. src_m2 = src1;
  2644. src_m1 = src1;
  2645. src_0=&src_m1[src_wrap];
  2646. src_p1=&src_0[src_wrap];
  2647. src_p2=&src_p1[src_wrap];
  2648. for(y=0;y<(height-2);y+=2) {
  2649. memcpy(dst,src_m1,width);
  2650. dst += dst_wrap;
  2651. deinterlace_line(dst,src_m2,src_m1,src_0,src_p1,src_p2,width);
  2652. src_m2 = src_0;
  2653. src_m1 = src_p1;
  2654. src_0 = src_p2;
  2655. src_p1 += 2*src_wrap;
  2656. src_p2 += 2*src_wrap;
  2657. dst += dst_wrap;
  2658. }
  2659. memcpy(dst,src_m1,width);
  2660. dst += dst_wrap;
  2661. /* do last line */
  2662. deinterlace_line(dst,src_m2,src_m1,src_0,src_0,src_0,width);
  2663. }
  2664. static void deinterlace_bottom_field_inplace(uint8_t *src1, int src_wrap,
  2665. int width, int height)
  2666. {
  2667. uint8_t *src_m1, *src_0, *src_p1, *src_p2;
  2668. int y;
  2669. uint8_t *buf;
  2670. buf = (uint8_t*)av_malloc(width);
  2671. src_m1 = src1;
  2672. memcpy(buf,src_m1,width);
  2673. src_0=&src_m1[src_wrap];
  2674. src_p1=&src_0[src_wrap];
  2675. src_p2=&src_p1[src_wrap];
  2676. for(y=0;y<(height-2);y+=2) {
  2677. deinterlace_line_inplace(buf,src_m1,src_0,src_p1,src_p2,width);
  2678. src_m1 = src_p1;
  2679. src_0 = src_p2;
  2680. src_p1 += 2*src_wrap;
  2681. src_p2 += 2*src_wrap;
  2682. }
  2683. /* do last line */
  2684. deinterlace_line_inplace(buf,src_m1,src_0,src_0,src_0,width);
  2685. av_free(buf);
  2686. }
  2687. int avpicture_deinterlace(AVPicture *dst, const AVPicture *src,
  2688. int pix_fmt, int width, int height)
  2689. {
  2690. int i;
  2691. if (pix_fmt != PIX_FMT_YUV420P &&
  2692. pix_fmt != PIX_FMT_YUV422P &&
  2693. pix_fmt != PIX_FMT_YUV444P &&
  2694. pix_fmt != PIX_FMT_YUV411P &&
  2695. pix_fmt != PIX_FMT_GRAY8)
  2696. return -1;
  2697. if ((width & 3) != 0 || (height & 3) != 0)
  2698. return -1;
  2699. for(i=0;i<3;i++) {
  2700. if (i == 1) {
  2701. switch(pix_fmt) {
  2702. case PIX_FMT_YUV420P:
  2703. width >>= 1;
  2704. height >>= 1;
  2705. break;
  2706. case PIX_FMT_YUV422P:
  2707. width >>= 1;
  2708. break;
  2709. case PIX_FMT_YUV411P:
  2710. width >>= 2;
  2711. break;
  2712. default:
  2713. break;
  2714. }
  2715. if (pix_fmt == PIX_FMT_GRAY8) {
  2716. break;
  2717. }
  2718. }
  2719. if (src == dst) {
  2720. deinterlace_bottom_field_inplace(dst->data[i], dst->linesize[i],
  2721. width, height);
  2722. } else {
  2723. deinterlace_bottom_field(dst->data[i],dst->linesize[i],
  2724. src->data[i], src->linesize[i],
  2725. width, height);
  2726. }
  2727. }
  2728. emms_c();
  2729. return 0;
  2730. }