You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

3011 lines
80KB

  1. /*
  2. * Misc image conversion routines
  3. * Copyright (c) 2001, 2002, 2003 Fabrice Bellard
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. /**
  22. * @file libavcodec/imgconvert.c
  23. * misc image conversion routines
  24. */
  25. /* TODO:
  26. * - write 'ffimg' program to test all the image related stuff
  27. * - move all api to slice based system
  28. * - integrate deinterlacing, postprocessing and scaling in the conversion process
  29. */
  30. #include "avcodec.h"
  31. #include "dsputil.h"
  32. #include "colorspace.h"
  33. #if HAVE_MMX
  34. #include "x86/mmx.h"
  35. #include "x86/dsputil_mmx.h"
  36. #endif
  37. #define xglue(x, y) x ## y
  38. #define glue(x, y) xglue(x, y)
  39. #define FF_COLOR_RGB 0 /**< RGB color space */
  40. #define FF_COLOR_GRAY 1 /**< gray color space */
  41. #define FF_COLOR_YUV 2 /**< YUV color space. 16 <= Y <= 235, 16 <= U, V <= 240 */
  42. #define FF_COLOR_YUV_JPEG 3 /**< YUV color space. 0 <= Y <= 255, 0 <= U, V <= 255 */
  43. #define FF_PIXEL_PLANAR 0 /**< each channel has one component in AVPicture */
  44. #define FF_PIXEL_PACKED 1 /**< only one components containing all the channels */
  45. #define FF_PIXEL_PALETTE 2 /**< one components containing indexes for a palette */
  46. typedef struct PixFmtInfo {
  47. const char *name;
  48. uint8_t nb_channels; /**< number of channels (including alpha) */
  49. uint8_t color_type; /**< color type (see FF_COLOR_xxx constants) */
  50. uint8_t pixel_type; /**< pixel storage type (see FF_PIXEL_xxx constants) */
  51. uint8_t is_alpha : 1; /**< true if alpha can be specified */
  52. uint8_t is_hwaccel : 1; /**< true if this is an HW accelerated format */
  53. uint8_t x_chroma_shift; /**< X chroma subsampling factor is 2 ^ shift */
  54. uint8_t y_chroma_shift; /**< Y chroma subsampling factor is 2 ^ shift */
  55. uint8_t depth; /**< bit depth of the color components */
  56. } PixFmtInfo;
  57. /* this table gives more information about formats */
  58. static const PixFmtInfo pix_fmt_info[PIX_FMT_NB] = {
  59. /* YUV formats */
  60. [PIX_FMT_YUV420P] = {
  61. .name = "yuv420p",
  62. .nb_channels = 3,
  63. .color_type = FF_COLOR_YUV,
  64. .pixel_type = FF_PIXEL_PLANAR,
  65. .depth = 8,
  66. .x_chroma_shift = 1, .y_chroma_shift = 1,
  67. },
  68. [PIX_FMT_YUV422P] = {
  69. .name = "yuv422p",
  70. .nb_channels = 3,
  71. .color_type = FF_COLOR_YUV,
  72. .pixel_type = FF_PIXEL_PLANAR,
  73. .depth = 8,
  74. .x_chroma_shift = 1, .y_chroma_shift = 0,
  75. },
  76. [PIX_FMT_YUV444P] = {
  77. .name = "yuv444p",
  78. .nb_channels = 3,
  79. .color_type = FF_COLOR_YUV,
  80. .pixel_type = FF_PIXEL_PLANAR,
  81. .depth = 8,
  82. .x_chroma_shift = 0, .y_chroma_shift = 0,
  83. },
  84. [PIX_FMT_YUYV422] = {
  85. .name = "yuyv422",
  86. .nb_channels = 1,
  87. .color_type = FF_COLOR_YUV,
  88. .pixel_type = FF_PIXEL_PACKED,
  89. .depth = 8,
  90. .x_chroma_shift = 1, .y_chroma_shift = 0,
  91. },
  92. [PIX_FMT_UYVY422] = {
  93. .name = "uyvy422",
  94. .nb_channels = 1,
  95. .color_type = FF_COLOR_YUV,
  96. .pixel_type = FF_PIXEL_PACKED,
  97. .depth = 8,
  98. .x_chroma_shift = 1, .y_chroma_shift = 0,
  99. },
  100. [PIX_FMT_YUV410P] = {
  101. .name = "yuv410p",
  102. .nb_channels = 3,
  103. .color_type = FF_COLOR_YUV,
  104. .pixel_type = FF_PIXEL_PLANAR,
  105. .depth = 8,
  106. .x_chroma_shift = 2, .y_chroma_shift = 2,
  107. },
  108. [PIX_FMT_YUV411P] = {
  109. .name = "yuv411p",
  110. .nb_channels = 3,
  111. .color_type = FF_COLOR_YUV,
  112. .pixel_type = FF_PIXEL_PLANAR,
  113. .depth = 8,
  114. .x_chroma_shift = 2, .y_chroma_shift = 0,
  115. },
  116. [PIX_FMT_YUV440P] = {
  117. .name = "yuv440p",
  118. .nb_channels = 3,
  119. .color_type = FF_COLOR_YUV,
  120. .pixel_type = FF_PIXEL_PLANAR,
  121. .depth = 8,
  122. .x_chroma_shift = 0, .y_chroma_shift = 1,
  123. },
  124. /* YUV formats with alpha plane */
  125. [PIX_FMT_YUVA420P] = {
  126. .name = "yuva420p",
  127. .nb_channels = 4,
  128. .color_type = FF_COLOR_YUV,
  129. .pixel_type = FF_PIXEL_PLANAR,
  130. .depth = 8,
  131. .x_chroma_shift = 1, .y_chroma_shift = 1,
  132. },
  133. /* JPEG YUV */
  134. [PIX_FMT_YUVJ420P] = {
  135. .name = "yuvj420p",
  136. .nb_channels = 3,
  137. .color_type = FF_COLOR_YUV_JPEG,
  138. .pixel_type = FF_PIXEL_PLANAR,
  139. .depth = 8,
  140. .x_chroma_shift = 1, .y_chroma_shift = 1,
  141. },
  142. [PIX_FMT_YUVJ422P] = {
  143. .name = "yuvj422p",
  144. .nb_channels = 3,
  145. .color_type = FF_COLOR_YUV_JPEG,
  146. .pixel_type = FF_PIXEL_PLANAR,
  147. .depth = 8,
  148. .x_chroma_shift = 1, .y_chroma_shift = 0,
  149. },
  150. [PIX_FMT_YUVJ444P] = {
  151. .name = "yuvj444p",
  152. .nb_channels = 3,
  153. .color_type = FF_COLOR_YUV_JPEG,
  154. .pixel_type = FF_PIXEL_PLANAR,
  155. .depth = 8,
  156. .x_chroma_shift = 0, .y_chroma_shift = 0,
  157. },
  158. [PIX_FMT_YUVJ440P] = {
  159. .name = "yuvj440p",
  160. .nb_channels = 3,
  161. .color_type = FF_COLOR_YUV_JPEG,
  162. .pixel_type = FF_PIXEL_PLANAR,
  163. .depth = 8,
  164. .x_chroma_shift = 0, .y_chroma_shift = 1,
  165. },
  166. /* RGB formats */
  167. [PIX_FMT_RGB24] = {
  168. .name = "rgb24",
  169. .nb_channels = 3,
  170. .color_type = FF_COLOR_RGB,
  171. .pixel_type = FF_PIXEL_PACKED,
  172. .depth = 8,
  173. .x_chroma_shift = 0, .y_chroma_shift = 0,
  174. },
  175. [PIX_FMT_BGR24] = {
  176. .name = "bgr24",
  177. .nb_channels = 3,
  178. .color_type = FF_COLOR_RGB,
  179. .pixel_type = FF_PIXEL_PACKED,
  180. .depth = 8,
  181. .x_chroma_shift = 0, .y_chroma_shift = 0,
  182. },
  183. [PIX_FMT_RGB32] = {
  184. .name = "rgb32",
  185. .nb_channels = 4, .is_alpha = 1,
  186. .color_type = FF_COLOR_RGB,
  187. .pixel_type = FF_PIXEL_PACKED,
  188. .depth = 8,
  189. .x_chroma_shift = 0, .y_chroma_shift = 0,
  190. },
  191. [PIX_FMT_RGB48BE] = {
  192. .name = "rgb48be",
  193. .nb_channels = 3,
  194. .color_type = FF_COLOR_RGB,
  195. .pixel_type = FF_PIXEL_PACKED,
  196. .depth = 16,
  197. .x_chroma_shift = 0, .y_chroma_shift = 0,
  198. },
  199. [PIX_FMT_RGB48LE] = {
  200. .name = "rgb48le",
  201. .nb_channels = 3,
  202. .color_type = FF_COLOR_RGB,
  203. .pixel_type = FF_PIXEL_PACKED,
  204. .depth = 16,
  205. .x_chroma_shift = 0, .y_chroma_shift = 0,
  206. },
  207. [PIX_FMT_RGB565] = {
  208. .name = "rgb565",
  209. .nb_channels = 3,
  210. .color_type = FF_COLOR_RGB,
  211. .pixel_type = FF_PIXEL_PACKED,
  212. .depth = 5,
  213. .x_chroma_shift = 0, .y_chroma_shift = 0,
  214. },
  215. [PIX_FMT_RGB555] = {
  216. .name = "rgb555",
  217. .nb_channels = 3,
  218. .color_type = FF_COLOR_RGB,
  219. .pixel_type = FF_PIXEL_PACKED,
  220. .depth = 5,
  221. .x_chroma_shift = 0, .y_chroma_shift = 0,
  222. },
  223. /* gray / mono formats */
  224. [PIX_FMT_GRAY16BE] = {
  225. .name = "gray16be",
  226. .nb_channels = 1,
  227. .color_type = FF_COLOR_GRAY,
  228. .pixel_type = FF_PIXEL_PLANAR,
  229. .depth = 16,
  230. },
  231. [PIX_FMT_GRAY16LE] = {
  232. .name = "gray16le",
  233. .nb_channels = 1,
  234. .color_type = FF_COLOR_GRAY,
  235. .pixel_type = FF_PIXEL_PLANAR,
  236. .depth = 16,
  237. },
  238. [PIX_FMT_GRAY8] = {
  239. .name = "gray",
  240. .nb_channels = 1,
  241. .color_type = FF_COLOR_GRAY,
  242. .pixel_type = FF_PIXEL_PLANAR,
  243. .depth = 8,
  244. },
  245. [PIX_FMT_MONOWHITE] = {
  246. .name = "monow",
  247. .nb_channels = 1,
  248. .color_type = FF_COLOR_GRAY,
  249. .pixel_type = FF_PIXEL_PLANAR,
  250. .depth = 1,
  251. },
  252. [PIX_FMT_MONOBLACK] = {
  253. .name = "monob",
  254. .nb_channels = 1,
  255. .color_type = FF_COLOR_GRAY,
  256. .pixel_type = FF_PIXEL_PLANAR,
  257. .depth = 1,
  258. },
  259. /* paletted formats */
  260. [PIX_FMT_PAL8] = {
  261. .name = "pal8",
  262. .nb_channels = 4, .is_alpha = 1,
  263. .color_type = FF_COLOR_RGB,
  264. .pixel_type = FF_PIXEL_PALETTE,
  265. .depth = 8,
  266. },
  267. [PIX_FMT_XVMC_MPEG2_MC] = {
  268. .name = "xvmcmc",
  269. .is_hwaccel = 1,
  270. },
  271. [PIX_FMT_XVMC_MPEG2_IDCT] = {
  272. .name = "xvmcidct",
  273. .is_hwaccel = 1,
  274. },
  275. [PIX_FMT_VDPAU_MPEG1] = {
  276. .name = "vdpau_mpeg1",
  277. .is_hwaccel = 1,
  278. .x_chroma_shift = 1, .y_chroma_shift = 1,
  279. },
  280. [PIX_FMT_VDPAU_MPEG2] = {
  281. .name = "vdpau_mpeg2",
  282. .is_hwaccel = 1,
  283. .x_chroma_shift = 1, .y_chroma_shift = 1,
  284. },
  285. [PIX_FMT_VDPAU_H264] = {
  286. .name = "vdpau_h264",
  287. .is_hwaccel = 1,
  288. .x_chroma_shift = 1, .y_chroma_shift = 1,
  289. },
  290. [PIX_FMT_VDPAU_WMV3] = {
  291. .name = "vdpau_wmv3",
  292. .is_hwaccel = 1,
  293. .x_chroma_shift = 1, .y_chroma_shift = 1,
  294. },
  295. [PIX_FMT_VDPAU_VC1] = {
  296. .name = "vdpau_vc1",
  297. .is_hwaccel = 1,
  298. .x_chroma_shift = 1, .y_chroma_shift = 1,
  299. },
  300. [PIX_FMT_UYYVYY411] = {
  301. .name = "uyyvyy411",
  302. .nb_channels = 1,
  303. .color_type = FF_COLOR_YUV,
  304. .pixel_type = FF_PIXEL_PACKED,
  305. .depth = 8,
  306. .x_chroma_shift = 2, .y_chroma_shift = 0,
  307. },
  308. [PIX_FMT_BGR32] = {
  309. .name = "bgr32",
  310. .nb_channels = 4, .is_alpha = 1,
  311. .color_type = FF_COLOR_RGB,
  312. .pixel_type = FF_PIXEL_PACKED,
  313. .depth = 8,
  314. .x_chroma_shift = 0, .y_chroma_shift = 0,
  315. },
  316. [PIX_FMT_BGR565] = {
  317. .name = "bgr565",
  318. .nb_channels = 3,
  319. .color_type = FF_COLOR_RGB,
  320. .pixel_type = FF_PIXEL_PACKED,
  321. .depth = 5,
  322. .x_chroma_shift = 0, .y_chroma_shift = 0,
  323. },
  324. [PIX_FMT_BGR555] = {
  325. .name = "bgr555",
  326. .nb_channels = 3,
  327. .color_type = FF_COLOR_RGB,
  328. .pixel_type = FF_PIXEL_PACKED,
  329. .depth = 5,
  330. .x_chroma_shift = 0, .y_chroma_shift = 0,
  331. },
  332. [PIX_FMT_RGB8] = {
  333. .name = "rgb8",
  334. .nb_channels = 1,
  335. .color_type = FF_COLOR_RGB,
  336. .pixel_type = FF_PIXEL_PACKED,
  337. .depth = 8,
  338. .x_chroma_shift = 0, .y_chroma_shift = 0,
  339. },
  340. [PIX_FMT_RGB4] = {
  341. .name = "rgb4",
  342. .nb_channels = 1,
  343. .color_type = FF_COLOR_RGB,
  344. .pixel_type = FF_PIXEL_PACKED,
  345. .depth = 4,
  346. .x_chroma_shift = 0, .y_chroma_shift = 0,
  347. },
  348. [PIX_FMT_RGB4_BYTE] = {
  349. .name = "rgb4_byte",
  350. .nb_channels = 1,
  351. .color_type = FF_COLOR_RGB,
  352. .pixel_type = FF_PIXEL_PACKED,
  353. .depth = 8,
  354. .x_chroma_shift = 0, .y_chroma_shift = 0,
  355. },
  356. [PIX_FMT_BGR8] = {
  357. .name = "bgr8",
  358. .nb_channels = 1,
  359. .color_type = FF_COLOR_RGB,
  360. .pixel_type = FF_PIXEL_PACKED,
  361. .depth = 8,
  362. .x_chroma_shift = 0, .y_chroma_shift = 0,
  363. },
  364. [PIX_FMT_BGR4] = {
  365. .name = "bgr4",
  366. .nb_channels = 1,
  367. .color_type = FF_COLOR_RGB,
  368. .pixel_type = FF_PIXEL_PACKED,
  369. .depth = 4,
  370. .x_chroma_shift = 0, .y_chroma_shift = 0,
  371. },
  372. [PIX_FMT_BGR4_BYTE] = {
  373. .name = "bgr4_byte",
  374. .nb_channels = 1,
  375. .color_type = FF_COLOR_RGB,
  376. .pixel_type = FF_PIXEL_PACKED,
  377. .depth = 8,
  378. .x_chroma_shift = 0, .y_chroma_shift = 0,
  379. },
  380. [PIX_FMT_NV12] = {
  381. .name = "nv12",
  382. .nb_channels = 2,
  383. .color_type = FF_COLOR_YUV,
  384. .pixel_type = FF_PIXEL_PLANAR,
  385. .depth = 8,
  386. .x_chroma_shift = 1, .y_chroma_shift = 1,
  387. },
  388. [PIX_FMT_NV21] = {
  389. .name = "nv12",
  390. .nb_channels = 2,
  391. .color_type = FF_COLOR_YUV,
  392. .pixel_type = FF_PIXEL_PLANAR,
  393. .depth = 8,
  394. .x_chroma_shift = 1, .y_chroma_shift = 1,
  395. },
  396. [PIX_FMT_BGR32_1] = {
  397. .name = "bgr32_1",
  398. .nb_channels = 4, .is_alpha = 1,
  399. .color_type = FF_COLOR_RGB,
  400. .pixel_type = FF_PIXEL_PACKED,
  401. .depth = 8,
  402. .x_chroma_shift = 0, .y_chroma_shift = 0,
  403. },
  404. [PIX_FMT_RGB32_1] = {
  405. .name = "rgb32_1",
  406. .nb_channels = 4, .is_alpha = 1,
  407. .color_type = FF_COLOR_RGB,
  408. .pixel_type = FF_PIXEL_PACKED,
  409. .depth = 8,
  410. .x_chroma_shift = 0, .y_chroma_shift = 0,
  411. },
  412. /* VA API formats */
  413. [PIX_FMT_VAAPI_MOCO] = {
  414. .name = "vaapi_moco",
  415. .is_hwaccel = 1,
  416. .x_chroma_shift = 1, .y_chroma_shift = 1,
  417. },
  418. [PIX_FMT_VAAPI_IDCT] = {
  419. .name = "vaapi_idct",
  420. .is_hwaccel = 1,
  421. .x_chroma_shift = 1, .y_chroma_shift = 1,
  422. },
  423. [PIX_FMT_VAAPI_VLD] = {
  424. .name = "vaapi_vld",
  425. .is_hwaccel = 1,
  426. .x_chroma_shift = 1, .y_chroma_shift = 1,
  427. },
  428. };
  429. void avcodec_get_chroma_sub_sample(int pix_fmt, int *h_shift, int *v_shift)
  430. {
  431. *h_shift = pix_fmt_info[pix_fmt].x_chroma_shift;
  432. *v_shift = pix_fmt_info[pix_fmt].y_chroma_shift;
  433. }
  434. const char *avcodec_get_pix_fmt_name(int pix_fmt)
  435. {
  436. if (pix_fmt < 0 || pix_fmt >= PIX_FMT_NB)
  437. return NULL;
  438. else
  439. return pix_fmt_info[pix_fmt].name;
  440. }
  441. enum PixelFormat avcodec_get_pix_fmt(const char* name)
  442. {
  443. int i;
  444. for (i=0; i < PIX_FMT_NB; i++)
  445. if (!strcmp(pix_fmt_info[i].name, name))
  446. return i;
  447. return PIX_FMT_NONE;
  448. }
  449. void avcodec_pix_fmt_string (char *buf, int buf_size, int pix_fmt)
  450. {
  451. /* print header */
  452. if (pix_fmt < 0)
  453. snprintf (buf, buf_size,
  454. "name " " nb_channels" " depth" " is_alpha"
  455. );
  456. else{
  457. PixFmtInfo info= pix_fmt_info[pix_fmt];
  458. char is_alpha_char= info.is_alpha ? 'y' : 'n';
  459. snprintf (buf, buf_size,
  460. "%-10s" " %1d " " %2d " " %c ",
  461. info.name,
  462. info.nb_channels,
  463. info.depth,
  464. is_alpha_char
  465. );
  466. }
  467. }
  468. int ff_is_hwaccel_pix_fmt(enum PixelFormat pix_fmt)
  469. {
  470. return pix_fmt_info[pix_fmt].is_hwaccel;
  471. }
  472. int ff_set_systematic_pal(uint32_t pal[256], enum PixelFormat pix_fmt){
  473. int i;
  474. for(i=0; i<256; i++){
  475. int r,g,b;
  476. switch(pix_fmt) {
  477. case PIX_FMT_RGB8:
  478. r= (i>>5 )*36;
  479. g= ((i>>2)&7)*36;
  480. b= (i&3 )*85;
  481. break;
  482. case PIX_FMT_BGR8:
  483. b= (i>>6 )*85;
  484. g= ((i>>3)&7)*36;
  485. r= (i&7 )*36;
  486. break;
  487. case PIX_FMT_RGB4_BYTE:
  488. r= (i>>3 )*255;
  489. g= ((i>>1)&3)*85;
  490. b= (i&1 )*255;
  491. break;
  492. case PIX_FMT_BGR4_BYTE:
  493. b= (i>>3 )*255;
  494. g= ((i>>1)&3)*85;
  495. r= (i&1 )*255;
  496. break;
  497. case PIX_FMT_GRAY8:
  498. r=b=g= i;
  499. break;
  500. default:
  501. return -1;
  502. }
  503. pal[i] = b + (g<<8) + (r<<16);
  504. }
  505. return 0;
  506. }
  507. int ff_fill_linesize(AVPicture *picture, int pix_fmt, int width)
  508. {
  509. int w2;
  510. const PixFmtInfo *pinfo;
  511. memset(picture->linesize, 0, sizeof(picture->linesize));
  512. pinfo = &pix_fmt_info[pix_fmt];
  513. switch(pix_fmt) {
  514. case PIX_FMT_YUV420P:
  515. case PIX_FMT_YUV422P:
  516. case PIX_FMT_YUV444P:
  517. case PIX_FMT_YUV410P:
  518. case PIX_FMT_YUV411P:
  519. case PIX_FMT_YUV440P:
  520. case PIX_FMT_YUVJ420P:
  521. case PIX_FMT_YUVJ422P:
  522. case PIX_FMT_YUVJ444P:
  523. case PIX_FMT_YUVJ440P:
  524. w2 = (width + (1 << pinfo->x_chroma_shift) - 1) >> pinfo->x_chroma_shift;
  525. picture->linesize[0] = width;
  526. picture->linesize[1] = w2;
  527. picture->linesize[2] = w2;
  528. break;
  529. case PIX_FMT_YUVA420P:
  530. w2 = (width + (1 << pinfo->x_chroma_shift) - 1) >> pinfo->x_chroma_shift;
  531. picture->linesize[0] = width;
  532. picture->linesize[1] = w2;
  533. picture->linesize[2] = w2;
  534. picture->linesize[3] = width;
  535. break;
  536. case PIX_FMT_NV12:
  537. case PIX_FMT_NV21:
  538. w2 = (width + (1 << pinfo->x_chroma_shift) - 1) >> pinfo->x_chroma_shift;
  539. picture->linesize[0] = width;
  540. picture->linesize[1] = w2;
  541. break;
  542. case PIX_FMT_RGB24:
  543. case PIX_FMT_BGR24:
  544. picture->linesize[0] = width * 3;
  545. break;
  546. case PIX_FMT_RGB32:
  547. case PIX_FMT_BGR32:
  548. case PIX_FMT_RGB32_1:
  549. case PIX_FMT_BGR32_1:
  550. picture->linesize[0] = width * 4;
  551. break;
  552. case PIX_FMT_RGB48BE:
  553. case PIX_FMT_RGB48LE:
  554. picture->linesize[0] = width * 6;
  555. break;
  556. case PIX_FMT_GRAY16BE:
  557. case PIX_FMT_GRAY16LE:
  558. case PIX_FMT_BGR555:
  559. case PIX_FMT_BGR565:
  560. case PIX_FMT_RGB555:
  561. case PIX_FMT_RGB565:
  562. case PIX_FMT_YUYV422:
  563. picture->linesize[0] = width * 2;
  564. break;
  565. case PIX_FMT_UYVY422:
  566. picture->linesize[0] = width * 2;
  567. break;
  568. case PIX_FMT_UYYVYY411:
  569. picture->linesize[0] = width + width/2;
  570. break;
  571. case PIX_FMT_RGB4:
  572. case PIX_FMT_BGR4:
  573. picture->linesize[0] = width / 2;
  574. break;
  575. case PIX_FMT_MONOWHITE:
  576. case PIX_FMT_MONOBLACK:
  577. picture->linesize[0] = (width + 7) >> 3;
  578. break;
  579. case PIX_FMT_PAL8:
  580. case PIX_FMT_RGB8:
  581. case PIX_FMT_BGR8:
  582. case PIX_FMT_RGB4_BYTE:
  583. case PIX_FMT_BGR4_BYTE:
  584. case PIX_FMT_GRAY8:
  585. picture->linesize[0] = width;
  586. picture->linesize[1] = 4;
  587. break;
  588. default:
  589. return -1;
  590. }
  591. return 0;
  592. }
  593. int ff_fill_pointer(AVPicture *picture, uint8_t *ptr, int pix_fmt,
  594. int height)
  595. {
  596. int size, h2, size2;
  597. const PixFmtInfo *pinfo;
  598. pinfo = &pix_fmt_info[pix_fmt];
  599. size = picture->linesize[0] * height;
  600. switch(pix_fmt) {
  601. case PIX_FMT_YUV420P:
  602. case PIX_FMT_YUV422P:
  603. case PIX_FMT_YUV444P:
  604. case PIX_FMT_YUV410P:
  605. case PIX_FMT_YUV411P:
  606. case PIX_FMT_YUV440P:
  607. case PIX_FMT_YUVJ420P:
  608. case PIX_FMT_YUVJ422P:
  609. case PIX_FMT_YUVJ444P:
  610. case PIX_FMT_YUVJ440P:
  611. h2 = (height + (1 << pinfo->y_chroma_shift) - 1) >> pinfo->y_chroma_shift;
  612. size2 = picture->linesize[1] * h2;
  613. picture->data[0] = ptr;
  614. picture->data[1] = picture->data[0] + size;
  615. picture->data[2] = picture->data[1] + size2;
  616. picture->data[3] = NULL;
  617. return size + 2 * size2;
  618. case PIX_FMT_YUVA420P:
  619. h2 = (height + (1 << pinfo->y_chroma_shift) - 1) >> pinfo->y_chroma_shift;
  620. size2 = picture->linesize[1] * h2;
  621. picture->data[0] = ptr;
  622. picture->data[1] = picture->data[0] + size;
  623. picture->data[2] = picture->data[1] + size2;
  624. picture->data[3] = picture->data[1] + size2 + size2;
  625. return 2 * size + 2 * size2;
  626. case PIX_FMT_NV12:
  627. case PIX_FMT_NV21:
  628. h2 = (height + (1 << pinfo->y_chroma_shift) - 1) >> pinfo->y_chroma_shift;
  629. size2 = picture->linesize[1] * h2 * 2;
  630. picture->data[0] = ptr;
  631. picture->data[1] = picture->data[0] + size;
  632. picture->data[2] = NULL;
  633. picture->data[3] = NULL;
  634. return size + 2 * size2;
  635. case PIX_FMT_RGB24:
  636. case PIX_FMT_BGR24:
  637. case PIX_FMT_RGB32:
  638. case PIX_FMT_BGR32:
  639. case PIX_FMT_RGB32_1:
  640. case PIX_FMT_BGR32_1:
  641. case PIX_FMT_RGB48BE:
  642. case PIX_FMT_RGB48LE:
  643. case PIX_FMT_GRAY16BE:
  644. case PIX_FMT_GRAY16LE:
  645. case PIX_FMT_BGR555:
  646. case PIX_FMT_BGR565:
  647. case PIX_FMT_RGB555:
  648. case PIX_FMT_RGB565:
  649. case PIX_FMT_YUYV422:
  650. case PIX_FMT_UYVY422:
  651. case PIX_FMT_UYYVYY411:
  652. case PIX_FMT_RGB4:
  653. case PIX_FMT_BGR4:
  654. case PIX_FMT_MONOWHITE:
  655. case PIX_FMT_MONOBLACK:
  656. picture->data[0] = ptr;
  657. picture->data[1] = NULL;
  658. picture->data[2] = NULL;
  659. picture->data[3] = NULL;
  660. return size;
  661. case PIX_FMT_PAL8:
  662. case PIX_FMT_RGB8:
  663. case PIX_FMT_BGR8:
  664. case PIX_FMT_RGB4_BYTE:
  665. case PIX_FMT_BGR4_BYTE:
  666. case PIX_FMT_GRAY8:
  667. size2 = (size + 3) & ~3;
  668. picture->data[0] = ptr;
  669. picture->data[1] = ptr + size2; /* palette is stored here as 256 32 bit words */
  670. picture->data[2] = NULL;
  671. picture->data[3] = NULL;
  672. return size2 + 256 * 4;
  673. default:
  674. picture->data[0] = NULL;
  675. picture->data[1] = NULL;
  676. picture->data[2] = NULL;
  677. picture->data[3] = NULL;
  678. return -1;
  679. }
  680. }
  681. int avpicture_fill(AVPicture *picture, uint8_t *ptr,
  682. int pix_fmt, int width, int height)
  683. {
  684. if(avcodec_check_dimensions(NULL, width, height))
  685. return -1;
  686. if (ff_fill_linesize(picture, pix_fmt, width))
  687. return -1;
  688. return ff_fill_pointer(picture, ptr, pix_fmt, height);
  689. }
  690. int avpicture_layout(const AVPicture* src, int pix_fmt, int width, int height,
  691. unsigned char *dest, int dest_size)
  692. {
  693. const PixFmtInfo* pf = &pix_fmt_info[pix_fmt];
  694. int i, j, w, ow, h, oh, data_planes;
  695. const unsigned char* s;
  696. int size = avpicture_get_size(pix_fmt, width, height);
  697. if (size > dest_size || size < 0)
  698. return -1;
  699. if (pf->pixel_type == FF_PIXEL_PACKED || pf->pixel_type == FF_PIXEL_PALETTE) {
  700. if (pix_fmt == PIX_FMT_YUYV422 ||
  701. pix_fmt == PIX_FMT_UYVY422 ||
  702. pix_fmt == PIX_FMT_BGR565 ||
  703. pix_fmt == PIX_FMT_BGR555 ||
  704. pix_fmt == PIX_FMT_RGB565 ||
  705. pix_fmt == PIX_FMT_RGB555)
  706. w = width * 2;
  707. else if (pix_fmt == PIX_FMT_UYYVYY411)
  708. w = width + width/2;
  709. else if (pix_fmt == PIX_FMT_PAL8)
  710. w = width;
  711. else
  712. w = width * (pf->depth * pf->nb_channels / 8);
  713. data_planes = 1;
  714. h = height;
  715. } else {
  716. data_planes = pf->nb_channels;
  717. w = (width*pf->depth + 7)/8;
  718. h = height;
  719. }
  720. ow = w;
  721. oh = h;
  722. for (i=0; i<data_planes; i++) {
  723. if (i == 1) {
  724. w = width >> pf->x_chroma_shift;
  725. h = height >> pf->y_chroma_shift;
  726. } else if (i == 3) {
  727. w = ow;
  728. h = oh;
  729. }
  730. s = src->data[i];
  731. for(j=0; j<h; j++) {
  732. memcpy(dest, s, w);
  733. dest += w;
  734. s += src->linesize[i];
  735. }
  736. }
  737. if (pf->pixel_type == FF_PIXEL_PALETTE)
  738. memcpy((unsigned char *)(((size_t)dest + 3) & ~3), src->data[1], 256 * 4);
  739. return size;
  740. }
  741. int avpicture_get_size(int pix_fmt, int width, int height)
  742. {
  743. AVPicture dummy_pict;
  744. return avpicture_fill(&dummy_pict, NULL, pix_fmt, width, height);
  745. }
  746. int avcodec_get_pix_fmt_loss(int dst_pix_fmt, int src_pix_fmt,
  747. int has_alpha)
  748. {
  749. const PixFmtInfo *pf, *ps;
  750. int loss;
  751. ps = &pix_fmt_info[src_pix_fmt];
  752. pf = &pix_fmt_info[dst_pix_fmt];
  753. /* compute loss */
  754. loss = 0;
  755. pf = &pix_fmt_info[dst_pix_fmt];
  756. if (pf->depth < ps->depth ||
  757. (dst_pix_fmt == PIX_FMT_RGB555 && src_pix_fmt == PIX_FMT_RGB565))
  758. loss |= FF_LOSS_DEPTH;
  759. if (pf->x_chroma_shift > ps->x_chroma_shift ||
  760. pf->y_chroma_shift > ps->y_chroma_shift)
  761. loss |= FF_LOSS_RESOLUTION;
  762. switch(pf->color_type) {
  763. case FF_COLOR_RGB:
  764. if (ps->color_type != FF_COLOR_RGB &&
  765. ps->color_type != FF_COLOR_GRAY)
  766. loss |= FF_LOSS_COLORSPACE;
  767. break;
  768. case FF_COLOR_GRAY:
  769. if (ps->color_type != FF_COLOR_GRAY)
  770. loss |= FF_LOSS_COLORSPACE;
  771. break;
  772. case FF_COLOR_YUV:
  773. if (ps->color_type != FF_COLOR_YUV)
  774. loss |= FF_LOSS_COLORSPACE;
  775. break;
  776. case FF_COLOR_YUV_JPEG:
  777. if (ps->color_type != FF_COLOR_YUV_JPEG &&
  778. ps->color_type != FF_COLOR_YUV &&
  779. ps->color_type != FF_COLOR_GRAY)
  780. loss |= FF_LOSS_COLORSPACE;
  781. break;
  782. default:
  783. /* fail safe test */
  784. if (ps->color_type != pf->color_type)
  785. loss |= FF_LOSS_COLORSPACE;
  786. break;
  787. }
  788. if (pf->color_type == FF_COLOR_GRAY &&
  789. ps->color_type != FF_COLOR_GRAY)
  790. loss |= FF_LOSS_CHROMA;
  791. if (!pf->is_alpha && (ps->is_alpha && has_alpha))
  792. loss |= FF_LOSS_ALPHA;
  793. if (pf->pixel_type == FF_PIXEL_PALETTE &&
  794. (ps->pixel_type != FF_PIXEL_PALETTE && ps->color_type != FF_COLOR_GRAY))
  795. loss |= FF_LOSS_COLORQUANT;
  796. return loss;
  797. }
  798. static int avg_bits_per_pixel(int pix_fmt)
  799. {
  800. int bits;
  801. const PixFmtInfo *pf;
  802. pf = &pix_fmt_info[pix_fmt];
  803. switch(pf->pixel_type) {
  804. case FF_PIXEL_PACKED:
  805. switch(pix_fmt) {
  806. case PIX_FMT_YUYV422:
  807. case PIX_FMT_UYVY422:
  808. case PIX_FMT_RGB565:
  809. case PIX_FMT_RGB555:
  810. case PIX_FMT_BGR565:
  811. case PIX_FMT_BGR555:
  812. bits = 16;
  813. break;
  814. case PIX_FMT_UYYVYY411:
  815. bits = 12;
  816. break;
  817. default:
  818. bits = pf->depth * pf->nb_channels;
  819. break;
  820. }
  821. break;
  822. case FF_PIXEL_PLANAR:
  823. if (pf->x_chroma_shift == 0 && pf->y_chroma_shift == 0) {
  824. bits = pf->depth * pf->nb_channels;
  825. } else {
  826. bits = pf->depth + ((2 * pf->depth) >>
  827. (pf->x_chroma_shift + pf->y_chroma_shift));
  828. }
  829. break;
  830. case FF_PIXEL_PALETTE:
  831. bits = 8;
  832. break;
  833. default:
  834. bits = -1;
  835. break;
  836. }
  837. return bits;
  838. }
  839. static int avcodec_find_best_pix_fmt1(int64_t pix_fmt_mask,
  840. int src_pix_fmt,
  841. int has_alpha,
  842. int loss_mask)
  843. {
  844. int dist, i, loss, min_dist, dst_pix_fmt;
  845. /* find exact color match with smallest size */
  846. dst_pix_fmt = -1;
  847. min_dist = 0x7fffffff;
  848. for(i = 0;i < PIX_FMT_NB; i++) {
  849. if (pix_fmt_mask & (1ULL << i)) {
  850. loss = avcodec_get_pix_fmt_loss(i, src_pix_fmt, has_alpha) & loss_mask;
  851. if (loss == 0) {
  852. dist = avg_bits_per_pixel(i);
  853. if (dist < min_dist) {
  854. min_dist = dist;
  855. dst_pix_fmt = i;
  856. }
  857. }
  858. }
  859. }
  860. return dst_pix_fmt;
  861. }
  862. int avcodec_find_best_pix_fmt(int64_t pix_fmt_mask, int src_pix_fmt,
  863. int has_alpha, int *loss_ptr)
  864. {
  865. int dst_pix_fmt, loss_mask, i;
  866. static const int loss_mask_order[] = {
  867. ~0, /* no loss first */
  868. ~FF_LOSS_ALPHA,
  869. ~FF_LOSS_RESOLUTION,
  870. ~(FF_LOSS_COLORSPACE | FF_LOSS_RESOLUTION),
  871. ~FF_LOSS_COLORQUANT,
  872. ~FF_LOSS_DEPTH,
  873. 0,
  874. };
  875. /* try with successive loss */
  876. i = 0;
  877. for(;;) {
  878. loss_mask = loss_mask_order[i++];
  879. dst_pix_fmt = avcodec_find_best_pix_fmt1(pix_fmt_mask, src_pix_fmt,
  880. has_alpha, loss_mask);
  881. if (dst_pix_fmt >= 0)
  882. goto found;
  883. if (loss_mask == 0)
  884. break;
  885. }
  886. return -1;
  887. found:
  888. if (loss_ptr)
  889. *loss_ptr = avcodec_get_pix_fmt_loss(dst_pix_fmt, src_pix_fmt, has_alpha);
  890. return dst_pix_fmt;
  891. }
  892. void ff_img_copy_plane(uint8_t *dst, int dst_wrap,
  893. const uint8_t *src, int src_wrap,
  894. int width, int height)
  895. {
  896. if((!dst) || (!src))
  897. return;
  898. for(;height > 0; height--) {
  899. memcpy(dst, src, width);
  900. dst += dst_wrap;
  901. src += src_wrap;
  902. }
  903. }
  904. int ff_get_plane_bytewidth(enum PixelFormat pix_fmt, int width, int plane)
  905. {
  906. int bits;
  907. const PixFmtInfo *pf = &pix_fmt_info[pix_fmt];
  908. pf = &pix_fmt_info[pix_fmt];
  909. switch(pf->pixel_type) {
  910. case FF_PIXEL_PACKED:
  911. switch(pix_fmt) {
  912. case PIX_FMT_YUYV422:
  913. case PIX_FMT_UYVY422:
  914. case PIX_FMT_RGB565:
  915. case PIX_FMT_RGB555:
  916. case PIX_FMT_BGR565:
  917. case PIX_FMT_BGR555:
  918. bits = 16;
  919. break;
  920. case PIX_FMT_UYYVYY411:
  921. bits = 12;
  922. break;
  923. default:
  924. bits = pf->depth * pf->nb_channels;
  925. break;
  926. }
  927. return (width * bits + 7) >> 3;
  928. break;
  929. case FF_PIXEL_PLANAR:
  930. if (plane == 1 || plane == 2)
  931. width= -((-width)>>pf->x_chroma_shift);
  932. return (width * pf->depth + 7) >> 3;
  933. break;
  934. case FF_PIXEL_PALETTE:
  935. if (plane == 0)
  936. return width;
  937. break;
  938. }
  939. return -1;
  940. }
  941. void av_picture_copy(AVPicture *dst, const AVPicture *src,
  942. int pix_fmt, int width, int height)
  943. {
  944. int i;
  945. const PixFmtInfo *pf = &pix_fmt_info[pix_fmt];
  946. pf = &pix_fmt_info[pix_fmt];
  947. switch(pf->pixel_type) {
  948. case FF_PIXEL_PACKED:
  949. case FF_PIXEL_PLANAR:
  950. for(i = 0; i < pf->nb_channels; i++) {
  951. int h;
  952. int bwidth = ff_get_plane_bytewidth(pix_fmt, width, i);
  953. h = height;
  954. if (i == 1 || i == 2) {
  955. h= -((-height)>>pf->y_chroma_shift);
  956. }
  957. ff_img_copy_plane(dst->data[i], dst->linesize[i],
  958. src->data[i], src->linesize[i],
  959. bwidth, h);
  960. }
  961. break;
  962. case FF_PIXEL_PALETTE:
  963. ff_img_copy_plane(dst->data[0], dst->linesize[0],
  964. src->data[0], src->linesize[0],
  965. width, height);
  966. /* copy the palette */
  967. ff_img_copy_plane(dst->data[1], dst->linesize[1],
  968. src->data[1], src->linesize[1],
  969. 4, 256);
  970. break;
  971. }
  972. }
  973. /* XXX: totally non optimized */
  974. static void yuyv422_to_yuv420p(AVPicture *dst, const AVPicture *src,
  975. int width, int height)
  976. {
  977. const uint8_t *p, *p1;
  978. uint8_t *lum, *cr, *cb, *lum1, *cr1, *cb1;
  979. int w;
  980. p1 = src->data[0];
  981. lum1 = dst->data[0];
  982. cb1 = dst->data[1];
  983. cr1 = dst->data[2];
  984. for(;height >= 1; height -= 2) {
  985. p = p1;
  986. lum = lum1;
  987. cb = cb1;
  988. cr = cr1;
  989. for(w = width; w >= 2; w -= 2) {
  990. lum[0] = p[0];
  991. cb[0] = p[1];
  992. lum[1] = p[2];
  993. cr[0] = p[3];
  994. p += 4;
  995. lum += 2;
  996. cb++;
  997. cr++;
  998. }
  999. if (w) {
  1000. lum[0] = p[0];
  1001. cb[0] = p[1];
  1002. cr[0] = p[3];
  1003. cb++;
  1004. cr++;
  1005. }
  1006. p1 += src->linesize[0];
  1007. lum1 += dst->linesize[0];
  1008. if (height>1) {
  1009. p = p1;
  1010. lum = lum1;
  1011. for(w = width; w >= 2; w -= 2) {
  1012. lum[0] = p[0];
  1013. lum[1] = p[2];
  1014. p += 4;
  1015. lum += 2;
  1016. }
  1017. if (w) {
  1018. lum[0] = p[0];
  1019. }
  1020. p1 += src->linesize[0];
  1021. lum1 += dst->linesize[0];
  1022. }
  1023. cb1 += dst->linesize[1];
  1024. cr1 += dst->linesize[2];
  1025. }
  1026. }
  1027. static void uyvy422_to_yuv420p(AVPicture *dst, const AVPicture *src,
  1028. int width, int height)
  1029. {
  1030. const uint8_t *p, *p1;
  1031. uint8_t *lum, *cr, *cb, *lum1, *cr1, *cb1;
  1032. int w;
  1033. p1 = src->data[0];
  1034. lum1 = dst->data[0];
  1035. cb1 = dst->data[1];
  1036. cr1 = dst->data[2];
  1037. for(;height >= 1; height -= 2) {
  1038. p = p1;
  1039. lum = lum1;
  1040. cb = cb1;
  1041. cr = cr1;
  1042. for(w = width; w >= 2; w -= 2) {
  1043. lum[0] = p[1];
  1044. cb[0] = p[0];
  1045. lum[1] = p[3];
  1046. cr[0] = p[2];
  1047. p += 4;
  1048. lum += 2;
  1049. cb++;
  1050. cr++;
  1051. }
  1052. if (w) {
  1053. lum[0] = p[1];
  1054. cb[0] = p[0];
  1055. cr[0] = p[2];
  1056. cb++;
  1057. cr++;
  1058. }
  1059. p1 += src->linesize[0];
  1060. lum1 += dst->linesize[0];
  1061. if (height>1) {
  1062. p = p1;
  1063. lum = lum1;
  1064. for(w = width; w >= 2; w -= 2) {
  1065. lum[0] = p[1];
  1066. lum[1] = p[3];
  1067. p += 4;
  1068. lum += 2;
  1069. }
  1070. if (w) {
  1071. lum[0] = p[1];
  1072. }
  1073. p1 += src->linesize[0];
  1074. lum1 += dst->linesize[0];
  1075. }
  1076. cb1 += dst->linesize[1];
  1077. cr1 += dst->linesize[2];
  1078. }
  1079. }
  1080. static void uyvy422_to_yuv422p(AVPicture *dst, const AVPicture *src,
  1081. int width, int height)
  1082. {
  1083. const uint8_t *p, *p1;
  1084. uint8_t *lum, *cr, *cb, *lum1, *cr1, *cb1;
  1085. int w;
  1086. p1 = src->data[0];
  1087. lum1 = dst->data[0];
  1088. cb1 = dst->data[1];
  1089. cr1 = dst->data[2];
  1090. for(;height > 0; height--) {
  1091. p = p1;
  1092. lum = lum1;
  1093. cb = cb1;
  1094. cr = cr1;
  1095. for(w = width; w >= 2; w -= 2) {
  1096. lum[0] = p[1];
  1097. cb[0] = p[0];
  1098. lum[1] = p[3];
  1099. cr[0] = p[2];
  1100. p += 4;
  1101. lum += 2;
  1102. cb++;
  1103. cr++;
  1104. }
  1105. p1 += src->linesize[0];
  1106. lum1 += dst->linesize[0];
  1107. cb1 += dst->linesize[1];
  1108. cr1 += dst->linesize[2];
  1109. }
  1110. }
  1111. static void yuyv422_to_yuv422p(AVPicture *dst, const AVPicture *src,
  1112. int width, int height)
  1113. {
  1114. const uint8_t *p, *p1;
  1115. uint8_t *lum, *cr, *cb, *lum1, *cr1, *cb1;
  1116. int w;
  1117. p1 = src->data[0];
  1118. lum1 = dst->data[0];
  1119. cb1 = dst->data[1];
  1120. cr1 = dst->data[2];
  1121. for(;height > 0; height--) {
  1122. p = p1;
  1123. lum = lum1;
  1124. cb = cb1;
  1125. cr = cr1;
  1126. for(w = width; w >= 2; w -= 2) {
  1127. lum[0] = p[0];
  1128. cb[0] = p[1];
  1129. lum[1] = p[2];
  1130. cr[0] = p[3];
  1131. p += 4;
  1132. lum += 2;
  1133. cb++;
  1134. cr++;
  1135. }
  1136. p1 += src->linesize[0];
  1137. lum1 += dst->linesize[0];
  1138. cb1 += dst->linesize[1];
  1139. cr1 += dst->linesize[2];
  1140. }
  1141. }
  1142. static void yuv422p_to_yuyv422(AVPicture *dst, const AVPicture *src,
  1143. int width, int height)
  1144. {
  1145. uint8_t *p, *p1;
  1146. const uint8_t *lum, *cr, *cb, *lum1, *cr1, *cb1;
  1147. int w;
  1148. p1 = dst->data[0];
  1149. lum1 = src->data[0];
  1150. cb1 = src->data[1];
  1151. cr1 = src->data[2];
  1152. for(;height > 0; height--) {
  1153. p = p1;
  1154. lum = lum1;
  1155. cb = cb1;
  1156. cr = cr1;
  1157. for(w = width; w >= 2; w -= 2) {
  1158. p[0] = lum[0];
  1159. p[1] = cb[0];
  1160. p[2] = lum[1];
  1161. p[3] = cr[0];
  1162. p += 4;
  1163. lum += 2;
  1164. cb++;
  1165. cr++;
  1166. }
  1167. p1 += dst->linesize[0];
  1168. lum1 += src->linesize[0];
  1169. cb1 += src->linesize[1];
  1170. cr1 += src->linesize[2];
  1171. }
  1172. }
  1173. static void yuv422p_to_uyvy422(AVPicture *dst, const AVPicture *src,
  1174. int width, int height)
  1175. {
  1176. uint8_t *p, *p1;
  1177. const uint8_t *lum, *cr, *cb, *lum1, *cr1, *cb1;
  1178. int w;
  1179. p1 = dst->data[0];
  1180. lum1 = src->data[0];
  1181. cb1 = src->data[1];
  1182. cr1 = src->data[2];
  1183. for(;height > 0; height--) {
  1184. p = p1;
  1185. lum = lum1;
  1186. cb = cb1;
  1187. cr = cr1;
  1188. for(w = width; w >= 2; w -= 2) {
  1189. p[1] = lum[0];
  1190. p[0] = cb[0];
  1191. p[3] = lum[1];
  1192. p[2] = cr[0];
  1193. p += 4;
  1194. lum += 2;
  1195. cb++;
  1196. cr++;
  1197. }
  1198. p1 += dst->linesize[0];
  1199. lum1 += src->linesize[0];
  1200. cb1 += src->linesize[1];
  1201. cr1 += src->linesize[2];
  1202. }
  1203. }
  1204. static void uyyvyy411_to_yuv411p(AVPicture *dst, const AVPicture *src,
  1205. int width, int height)
  1206. {
  1207. const uint8_t *p, *p1;
  1208. uint8_t *lum, *cr, *cb, *lum1, *cr1, *cb1;
  1209. int w;
  1210. p1 = src->data[0];
  1211. lum1 = dst->data[0];
  1212. cb1 = dst->data[1];
  1213. cr1 = dst->data[2];
  1214. for(;height > 0; height--) {
  1215. p = p1;
  1216. lum = lum1;
  1217. cb = cb1;
  1218. cr = cr1;
  1219. for(w = width; w >= 4; w -= 4) {
  1220. cb[0] = p[0];
  1221. lum[0] = p[1];
  1222. lum[1] = p[2];
  1223. cr[0] = p[3];
  1224. lum[2] = p[4];
  1225. lum[3] = p[5];
  1226. p += 6;
  1227. lum += 4;
  1228. cb++;
  1229. cr++;
  1230. }
  1231. p1 += src->linesize[0];
  1232. lum1 += dst->linesize[0];
  1233. cb1 += dst->linesize[1];
  1234. cr1 += dst->linesize[2];
  1235. }
  1236. }
  1237. static void yuv420p_to_yuyv422(AVPicture *dst, const AVPicture *src,
  1238. int width, int height)
  1239. {
  1240. int w, h;
  1241. uint8_t *line1, *line2, *linesrc = dst->data[0];
  1242. uint8_t *lum1, *lum2, *lumsrc = src->data[0];
  1243. uint8_t *cb1, *cb2 = src->data[1];
  1244. uint8_t *cr1, *cr2 = src->data[2];
  1245. for(h = height / 2; h--;) {
  1246. line1 = linesrc;
  1247. line2 = linesrc + dst->linesize[0];
  1248. lum1 = lumsrc;
  1249. lum2 = lumsrc + src->linesize[0];
  1250. cb1 = cb2;
  1251. cr1 = cr2;
  1252. for(w = width / 2; w--;) {
  1253. *line1++ = *lum1++; *line2++ = *lum2++;
  1254. *line1++ = *line2++ = *cb1++;
  1255. *line1++ = *lum1++; *line2++ = *lum2++;
  1256. *line1++ = *line2++ = *cr1++;
  1257. }
  1258. linesrc += dst->linesize[0] * 2;
  1259. lumsrc += src->linesize[0] * 2;
  1260. cb2 += src->linesize[1];
  1261. cr2 += src->linesize[2];
  1262. }
  1263. }
  1264. static void yuv420p_to_uyvy422(AVPicture *dst, const AVPicture *src,
  1265. int width, int height)
  1266. {
  1267. int w, h;
  1268. uint8_t *line1, *line2, *linesrc = dst->data[0];
  1269. uint8_t *lum1, *lum2, *lumsrc = src->data[0];
  1270. uint8_t *cb1, *cb2 = src->data[1];
  1271. uint8_t *cr1, *cr2 = src->data[2];
  1272. for(h = height / 2; h--;) {
  1273. line1 = linesrc;
  1274. line2 = linesrc + dst->linesize[0];
  1275. lum1 = lumsrc;
  1276. lum2 = lumsrc + src->linesize[0];
  1277. cb1 = cb2;
  1278. cr1 = cr2;
  1279. for(w = width / 2; w--;) {
  1280. *line1++ = *line2++ = *cb1++;
  1281. *line1++ = *lum1++; *line2++ = *lum2++;
  1282. *line1++ = *line2++ = *cr1++;
  1283. *line1++ = *lum1++; *line2++ = *lum2++;
  1284. }
  1285. linesrc += dst->linesize[0] * 2;
  1286. lumsrc += src->linesize[0] * 2;
  1287. cb2 += src->linesize[1];
  1288. cr2 += src->linesize[2];
  1289. }
  1290. }
  1291. /* 2x2 -> 1x1 */
  1292. void ff_shrink22(uint8_t *dst, int dst_wrap,
  1293. const uint8_t *src, int src_wrap,
  1294. int width, int height)
  1295. {
  1296. int w;
  1297. const uint8_t *s1, *s2;
  1298. uint8_t *d;
  1299. for(;height > 0; height--) {
  1300. s1 = src;
  1301. s2 = s1 + src_wrap;
  1302. d = dst;
  1303. for(w = width;w >= 4; w-=4) {
  1304. d[0] = (s1[0] + s1[1] + s2[0] + s2[1] + 2) >> 2;
  1305. d[1] = (s1[2] + s1[3] + s2[2] + s2[3] + 2) >> 2;
  1306. d[2] = (s1[4] + s1[5] + s2[4] + s2[5] + 2) >> 2;
  1307. d[3] = (s1[6] + s1[7] + s2[6] + s2[7] + 2) >> 2;
  1308. s1 += 8;
  1309. s2 += 8;
  1310. d += 4;
  1311. }
  1312. for(;w > 0; w--) {
  1313. d[0] = (s1[0] + s1[1] + s2[0] + s2[1] + 2) >> 2;
  1314. s1 += 2;
  1315. s2 += 2;
  1316. d++;
  1317. }
  1318. src += 2 * src_wrap;
  1319. dst += dst_wrap;
  1320. }
  1321. }
  1322. /* 4x4 -> 1x1 */
  1323. void ff_shrink44(uint8_t *dst, int dst_wrap,
  1324. const uint8_t *src, int src_wrap,
  1325. int width, int height)
  1326. {
  1327. int w;
  1328. const uint8_t *s1, *s2, *s3, *s4;
  1329. uint8_t *d;
  1330. for(;height > 0; height--) {
  1331. s1 = src;
  1332. s2 = s1 + src_wrap;
  1333. s3 = s2 + src_wrap;
  1334. s4 = s3 + src_wrap;
  1335. d = dst;
  1336. for(w = width;w > 0; w--) {
  1337. d[0] = (s1[0] + s1[1] + s1[2] + s1[3] +
  1338. s2[0] + s2[1] + s2[2] + s2[3] +
  1339. s3[0] + s3[1] + s3[2] + s3[3] +
  1340. s4[0] + s4[1] + s4[2] + s4[3] + 8) >> 4;
  1341. s1 += 4;
  1342. s2 += 4;
  1343. s3 += 4;
  1344. s4 += 4;
  1345. d++;
  1346. }
  1347. src += 4 * src_wrap;
  1348. dst += dst_wrap;
  1349. }
  1350. }
  1351. /* 8x8 -> 1x1 */
  1352. void ff_shrink88(uint8_t *dst, int dst_wrap,
  1353. const uint8_t *src, int src_wrap,
  1354. int width, int height)
  1355. {
  1356. int w, i;
  1357. for(;height > 0; height--) {
  1358. for(w = width;w > 0; w--) {
  1359. int tmp=0;
  1360. for(i=0; i<8; i++){
  1361. tmp += src[0] + src[1] + src[2] + src[3] + src[4] + src[5] + src[6] + src[7];
  1362. src += src_wrap;
  1363. }
  1364. *(dst++) = (tmp + 32)>>6;
  1365. src += 8 - 8*src_wrap;
  1366. }
  1367. src += 8*src_wrap - 8*width;
  1368. dst += dst_wrap - width;
  1369. }
  1370. }
  1371. /* XXX: add jpeg quantize code */
  1372. #define TRANSP_INDEX (6*6*6)
  1373. /* this is maybe slow, but allows for extensions */
  1374. static inline unsigned char gif_clut_index(uint8_t r, uint8_t g, uint8_t b)
  1375. {
  1376. return (((r) / 47) % 6) * 6 * 6 + (((g) / 47) % 6) * 6 + (((b) / 47) % 6);
  1377. }
  1378. static void build_rgb_palette(uint8_t *palette, int has_alpha)
  1379. {
  1380. uint32_t *pal;
  1381. static const uint8_t pal_value[6] = { 0x00, 0x33, 0x66, 0x99, 0xcc, 0xff };
  1382. int i, r, g, b;
  1383. pal = (uint32_t *)palette;
  1384. i = 0;
  1385. for(r = 0; r < 6; r++) {
  1386. for(g = 0; g < 6; g++) {
  1387. for(b = 0; b < 6; b++) {
  1388. pal[i++] = (0xff << 24) | (pal_value[r] << 16) |
  1389. (pal_value[g] << 8) | pal_value[b];
  1390. }
  1391. }
  1392. }
  1393. if (has_alpha)
  1394. pal[i++] = 0;
  1395. while (i < 256)
  1396. pal[i++] = 0xff000000;
  1397. }
  1398. /* copy bit n to bits 0 ... n - 1 */
  1399. static inline unsigned int bitcopy_n(unsigned int a, int n)
  1400. {
  1401. int mask;
  1402. mask = (1 << n) - 1;
  1403. return (a & (0xff & ~mask)) | ((-((a >> n) & 1)) & mask);
  1404. }
  1405. /* rgb555 handling */
  1406. #define RGB_NAME rgb555
  1407. #define RGB_IN(r, g, b, s)\
  1408. {\
  1409. unsigned int v = ((const uint16_t *)(s))[0];\
  1410. r = bitcopy_n(v >> (10 - 3), 3);\
  1411. g = bitcopy_n(v >> (5 - 3), 3);\
  1412. b = bitcopy_n(v << 3, 3);\
  1413. }
  1414. #define RGB_OUT(d, r, g, b)\
  1415. {\
  1416. ((uint16_t *)(d))[0] = ((r >> 3) << 10) | ((g >> 3) << 5) | (b >> 3);\
  1417. }
  1418. #define BPP 2
  1419. #include "imgconvert_template.c"
  1420. /* rgb565 handling */
  1421. #define RGB_NAME rgb565
  1422. #define RGB_IN(r, g, b, s)\
  1423. {\
  1424. unsigned int v = ((const uint16_t *)(s))[0];\
  1425. r = bitcopy_n(v >> (11 - 3), 3);\
  1426. g = bitcopy_n(v >> (5 - 2), 2);\
  1427. b = bitcopy_n(v << 3, 3);\
  1428. }
  1429. #define RGB_OUT(d, r, g, b)\
  1430. {\
  1431. ((uint16_t *)(d))[0] = ((r >> 3) << 11) | ((g >> 2) << 5) | (b >> 3);\
  1432. }
  1433. #define BPP 2
  1434. #include "imgconvert_template.c"
  1435. /* bgr24 handling */
  1436. #define RGB_NAME bgr24
  1437. #define RGB_IN(r, g, b, s)\
  1438. {\
  1439. b = (s)[0];\
  1440. g = (s)[1];\
  1441. r = (s)[2];\
  1442. }
  1443. #define RGB_OUT(d, r, g, b)\
  1444. {\
  1445. (d)[0] = b;\
  1446. (d)[1] = g;\
  1447. (d)[2] = r;\
  1448. }
  1449. #define BPP 3
  1450. #include "imgconvert_template.c"
  1451. #undef RGB_IN
  1452. #undef RGB_OUT
  1453. #undef BPP
  1454. /* rgb24 handling */
  1455. #define RGB_NAME rgb24
  1456. #define FMT_RGB24
  1457. #define RGB_IN(r, g, b, s)\
  1458. {\
  1459. r = (s)[0];\
  1460. g = (s)[1];\
  1461. b = (s)[2];\
  1462. }
  1463. #define RGB_OUT(d, r, g, b)\
  1464. {\
  1465. (d)[0] = r;\
  1466. (d)[1] = g;\
  1467. (d)[2] = b;\
  1468. }
  1469. #define BPP 3
  1470. #include "imgconvert_template.c"
  1471. /* rgb32 handling */
  1472. #define RGB_NAME rgb32
  1473. #define FMT_RGB32
  1474. #define RGB_IN(r, g, b, s)\
  1475. {\
  1476. unsigned int v = ((const uint32_t *)(s))[0];\
  1477. r = (v >> 16) & 0xff;\
  1478. g = (v >> 8) & 0xff;\
  1479. b = v & 0xff;\
  1480. }
  1481. #define RGBA_IN(r, g, b, a, s)\
  1482. {\
  1483. unsigned int v = ((const uint32_t *)(s))[0];\
  1484. a = (v >> 24) & 0xff;\
  1485. r = (v >> 16) & 0xff;\
  1486. g = (v >> 8) & 0xff;\
  1487. b = v & 0xff;\
  1488. }
  1489. #define RGBA_OUT(d, r, g, b, a)\
  1490. {\
  1491. ((uint32_t *)(d))[0] = (a << 24) | (r << 16) | (g << 8) | b;\
  1492. }
  1493. #define BPP 4
  1494. #include "imgconvert_template.c"
  1495. static void mono_to_gray(AVPicture *dst, const AVPicture *src,
  1496. int width, int height, int xor_mask)
  1497. {
  1498. const unsigned char *p;
  1499. unsigned char *q;
  1500. int v, dst_wrap, src_wrap;
  1501. int y, w;
  1502. p = src->data[0];
  1503. src_wrap = src->linesize[0] - ((width + 7) >> 3);
  1504. q = dst->data[0];
  1505. dst_wrap = dst->linesize[0] - width;
  1506. for(y=0;y<height;y++) {
  1507. w = width;
  1508. while (w >= 8) {
  1509. v = *p++ ^ xor_mask;
  1510. q[0] = -(v >> 7);
  1511. q[1] = -((v >> 6) & 1);
  1512. q[2] = -((v >> 5) & 1);
  1513. q[3] = -((v >> 4) & 1);
  1514. q[4] = -((v >> 3) & 1);
  1515. q[5] = -((v >> 2) & 1);
  1516. q[6] = -((v >> 1) & 1);
  1517. q[7] = -((v >> 0) & 1);
  1518. w -= 8;
  1519. q += 8;
  1520. }
  1521. if (w > 0) {
  1522. v = *p++ ^ xor_mask;
  1523. do {
  1524. q[0] = -((v >> 7) & 1);
  1525. q++;
  1526. v <<= 1;
  1527. } while (--w);
  1528. }
  1529. p += src_wrap;
  1530. q += dst_wrap;
  1531. }
  1532. }
  1533. static void monowhite_to_gray(AVPicture *dst, const AVPicture *src,
  1534. int width, int height)
  1535. {
  1536. mono_to_gray(dst, src, width, height, 0xff);
  1537. }
  1538. static void monoblack_to_gray(AVPicture *dst, const AVPicture *src,
  1539. int width, int height)
  1540. {
  1541. mono_to_gray(dst, src, width, height, 0x00);
  1542. }
  1543. static void gray_to_mono(AVPicture *dst, const AVPicture *src,
  1544. int width, int height, int xor_mask)
  1545. {
  1546. int n;
  1547. const uint8_t *s;
  1548. uint8_t *d;
  1549. int j, b, v, n1, src_wrap, dst_wrap, y;
  1550. s = src->data[0];
  1551. src_wrap = src->linesize[0] - width;
  1552. d = dst->data[0];
  1553. dst_wrap = dst->linesize[0] - ((width + 7) >> 3);
  1554. for(y=0;y<height;y++) {
  1555. n = width;
  1556. while (n >= 8) {
  1557. v = 0;
  1558. for(j=0;j<8;j++) {
  1559. b = s[0];
  1560. s++;
  1561. v = (v << 1) | (b >> 7);
  1562. }
  1563. d[0] = v ^ xor_mask;
  1564. d++;
  1565. n -= 8;
  1566. }
  1567. if (n > 0) {
  1568. n1 = n;
  1569. v = 0;
  1570. while (n > 0) {
  1571. b = s[0];
  1572. s++;
  1573. v = (v << 1) | (b >> 7);
  1574. n--;
  1575. }
  1576. d[0] = (v << (8 - (n1 & 7))) ^ xor_mask;
  1577. d++;
  1578. }
  1579. s += src_wrap;
  1580. d += dst_wrap;
  1581. }
  1582. }
  1583. static void gray_to_monowhite(AVPicture *dst, const AVPicture *src,
  1584. int width, int height)
  1585. {
  1586. gray_to_mono(dst, src, width, height, 0xff);
  1587. }
  1588. static void gray_to_monoblack(AVPicture *dst, const AVPicture *src,
  1589. int width, int height)
  1590. {
  1591. gray_to_mono(dst, src, width, height, 0x00);
  1592. }
  1593. static void gray_to_gray16(AVPicture *dst, const AVPicture *src,
  1594. int width, int height)
  1595. {
  1596. int x, y, src_wrap, dst_wrap;
  1597. uint8_t *s, *d;
  1598. s = src->data[0];
  1599. src_wrap = src->linesize[0] - width;
  1600. d = dst->data[0];
  1601. dst_wrap = dst->linesize[0] - width * 2;
  1602. for(y=0; y<height; y++){
  1603. for(x=0; x<width; x++){
  1604. *d++ = *s;
  1605. *d++ = *s++;
  1606. }
  1607. s += src_wrap;
  1608. d += dst_wrap;
  1609. }
  1610. }
  1611. static void gray16_to_gray(AVPicture *dst, const AVPicture *src,
  1612. int width, int height)
  1613. {
  1614. int x, y, src_wrap, dst_wrap;
  1615. uint8_t *s, *d;
  1616. s = src->data[0];
  1617. src_wrap = src->linesize[0] - width * 2;
  1618. d = dst->data[0];
  1619. dst_wrap = dst->linesize[0] - width;
  1620. for(y=0; y<height; y++){
  1621. for(x=0; x<width; x++){
  1622. *d++ = *s;
  1623. s += 2;
  1624. }
  1625. s += src_wrap;
  1626. d += dst_wrap;
  1627. }
  1628. }
  1629. static void gray16be_to_gray(AVPicture *dst, const AVPicture *src,
  1630. int width, int height)
  1631. {
  1632. gray16_to_gray(dst, src, width, height);
  1633. }
  1634. static void gray16le_to_gray(AVPicture *dst, const AVPicture *src,
  1635. int width, int height)
  1636. {
  1637. AVPicture tmpsrc = *src;
  1638. tmpsrc.data[0]++;
  1639. gray16_to_gray(dst, &tmpsrc, width, height);
  1640. }
  1641. static void gray16_to_gray16(AVPicture *dst, const AVPicture *src,
  1642. int width, int height)
  1643. {
  1644. int x, y, src_wrap, dst_wrap;
  1645. uint16_t *s, *d;
  1646. s = (uint16_t*)src->data[0];
  1647. src_wrap = (src->linesize[0] - width * 2)/2;
  1648. d = (uint16_t*)dst->data[0];
  1649. dst_wrap = (dst->linesize[0] - width * 2)/2;
  1650. for(y=0; y<height; y++){
  1651. for(x=0; x<width; x++){
  1652. *d++ = bswap_16(*s++);
  1653. }
  1654. s += src_wrap;
  1655. d += dst_wrap;
  1656. }
  1657. }
  1658. typedef struct ConvertEntry {
  1659. void (*convert)(AVPicture *dst,
  1660. const AVPicture *src, int width, int height);
  1661. } ConvertEntry;
  1662. /* Add each new conversion function in this table. In order to be able
  1663. to convert from any format to any format, the following constraints
  1664. must be satisfied:
  1665. - all FF_COLOR_RGB formats must convert to and from PIX_FMT_RGB24
  1666. - all FF_COLOR_GRAY formats must convert to and from PIX_FMT_GRAY8
  1667. - all FF_COLOR_RGB formats with alpha must convert to and from PIX_FMT_RGB32
  1668. - PIX_FMT_YUV444P and PIX_FMT_YUVJ444P must convert to and from
  1669. PIX_FMT_RGB24.
  1670. - PIX_FMT_422 must convert to and from PIX_FMT_422P.
  1671. The other conversion functions are just optimizations for common cases.
  1672. */
  1673. static const ConvertEntry convert_table[PIX_FMT_NB][PIX_FMT_NB] = {
  1674. [PIX_FMT_YUV420P] = {
  1675. [PIX_FMT_YUYV422] = {
  1676. .convert = yuv420p_to_yuyv422,
  1677. },
  1678. [PIX_FMT_RGB555] = {
  1679. .convert = yuv420p_to_rgb555
  1680. },
  1681. [PIX_FMT_RGB565] = {
  1682. .convert = yuv420p_to_rgb565
  1683. },
  1684. [PIX_FMT_BGR24] = {
  1685. .convert = yuv420p_to_bgr24
  1686. },
  1687. [PIX_FMT_RGB24] = {
  1688. .convert = yuv420p_to_rgb24
  1689. },
  1690. [PIX_FMT_RGB32] = {
  1691. .convert = yuv420p_to_rgb32
  1692. },
  1693. [PIX_FMT_UYVY422] = {
  1694. .convert = yuv420p_to_uyvy422,
  1695. },
  1696. },
  1697. [PIX_FMT_YUV422P] = {
  1698. [PIX_FMT_YUYV422] = {
  1699. .convert = yuv422p_to_yuyv422,
  1700. },
  1701. [PIX_FMT_UYVY422] = {
  1702. .convert = yuv422p_to_uyvy422,
  1703. },
  1704. },
  1705. [PIX_FMT_YUV444P] = {
  1706. [PIX_FMT_RGB24] = {
  1707. .convert = yuv444p_to_rgb24
  1708. },
  1709. },
  1710. [PIX_FMT_YUVJ420P] = {
  1711. [PIX_FMT_RGB555] = {
  1712. .convert = yuvj420p_to_rgb555
  1713. },
  1714. [PIX_FMT_RGB565] = {
  1715. .convert = yuvj420p_to_rgb565
  1716. },
  1717. [PIX_FMT_BGR24] = {
  1718. .convert = yuvj420p_to_bgr24
  1719. },
  1720. [PIX_FMT_RGB24] = {
  1721. .convert = yuvj420p_to_rgb24
  1722. },
  1723. [PIX_FMT_RGB32] = {
  1724. .convert = yuvj420p_to_rgb32
  1725. },
  1726. },
  1727. [PIX_FMT_YUVJ444P] = {
  1728. [PIX_FMT_RGB24] = {
  1729. .convert = yuvj444p_to_rgb24
  1730. },
  1731. },
  1732. [PIX_FMT_YUYV422] = {
  1733. [PIX_FMT_YUV420P] = {
  1734. .convert = yuyv422_to_yuv420p,
  1735. },
  1736. [PIX_FMT_YUV422P] = {
  1737. .convert = yuyv422_to_yuv422p,
  1738. },
  1739. },
  1740. [PIX_FMT_UYVY422] = {
  1741. [PIX_FMT_YUV420P] = {
  1742. .convert = uyvy422_to_yuv420p,
  1743. },
  1744. [PIX_FMT_YUV422P] = {
  1745. .convert = uyvy422_to_yuv422p,
  1746. },
  1747. },
  1748. [PIX_FMT_RGB24] = {
  1749. [PIX_FMT_YUV420P] = {
  1750. .convert = rgb24_to_yuv420p
  1751. },
  1752. [PIX_FMT_RGB565] = {
  1753. .convert = rgb24_to_rgb565
  1754. },
  1755. [PIX_FMT_RGB555] = {
  1756. .convert = rgb24_to_rgb555
  1757. },
  1758. [PIX_FMT_RGB32] = {
  1759. .convert = rgb24_to_rgb32
  1760. },
  1761. [PIX_FMT_BGR24] = {
  1762. .convert = rgb24_to_bgr24
  1763. },
  1764. [PIX_FMT_GRAY8] = {
  1765. .convert = rgb24_to_gray
  1766. },
  1767. [PIX_FMT_PAL8] = {
  1768. .convert = rgb24_to_pal8
  1769. },
  1770. [PIX_FMT_YUV444P] = {
  1771. .convert = rgb24_to_yuv444p
  1772. },
  1773. [PIX_FMT_YUVJ420P] = {
  1774. .convert = rgb24_to_yuvj420p
  1775. },
  1776. [PIX_FMT_YUVJ444P] = {
  1777. .convert = rgb24_to_yuvj444p
  1778. },
  1779. },
  1780. [PIX_FMT_RGB32] = {
  1781. [PIX_FMT_RGB24] = {
  1782. .convert = rgb32_to_rgb24
  1783. },
  1784. [PIX_FMT_BGR24] = {
  1785. .convert = rgb32_to_bgr24
  1786. },
  1787. [PIX_FMT_RGB565] = {
  1788. .convert = rgb32_to_rgb565
  1789. },
  1790. [PIX_FMT_RGB555] = {
  1791. .convert = rgb32_to_rgb555
  1792. },
  1793. [PIX_FMT_PAL8] = {
  1794. .convert = rgb32_to_pal8
  1795. },
  1796. [PIX_FMT_YUV420P] = {
  1797. .convert = rgb32_to_yuv420p
  1798. },
  1799. [PIX_FMT_GRAY8] = {
  1800. .convert = rgb32_to_gray
  1801. },
  1802. },
  1803. [PIX_FMT_BGR24] = {
  1804. [PIX_FMT_RGB32] = {
  1805. .convert = bgr24_to_rgb32
  1806. },
  1807. [PIX_FMT_RGB24] = {
  1808. .convert = bgr24_to_rgb24
  1809. },
  1810. [PIX_FMT_YUV420P] = {
  1811. .convert = bgr24_to_yuv420p
  1812. },
  1813. [PIX_FMT_GRAY8] = {
  1814. .convert = bgr24_to_gray
  1815. },
  1816. },
  1817. [PIX_FMT_RGB555] = {
  1818. [PIX_FMT_RGB24] = {
  1819. .convert = rgb555_to_rgb24
  1820. },
  1821. [PIX_FMT_RGB32] = {
  1822. .convert = rgb555_to_rgb32
  1823. },
  1824. [PIX_FMT_YUV420P] = {
  1825. .convert = rgb555_to_yuv420p
  1826. },
  1827. [PIX_FMT_GRAY8] = {
  1828. .convert = rgb555_to_gray
  1829. },
  1830. },
  1831. [PIX_FMT_RGB565] = {
  1832. [PIX_FMT_RGB32] = {
  1833. .convert = rgb565_to_rgb32
  1834. },
  1835. [PIX_FMT_RGB24] = {
  1836. .convert = rgb565_to_rgb24
  1837. },
  1838. [PIX_FMT_YUV420P] = {
  1839. .convert = rgb565_to_yuv420p
  1840. },
  1841. [PIX_FMT_GRAY8] = {
  1842. .convert = rgb565_to_gray
  1843. },
  1844. },
  1845. [PIX_FMT_GRAY16BE] = {
  1846. [PIX_FMT_GRAY8] = {
  1847. .convert = gray16be_to_gray
  1848. },
  1849. [PIX_FMT_GRAY16LE] = {
  1850. .convert = gray16_to_gray16
  1851. },
  1852. },
  1853. [PIX_FMT_GRAY16LE] = {
  1854. [PIX_FMT_GRAY8] = {
  1855. .convert = gray16le_to_gray
  1856. },
  1857. [PIX_FMT_GRAY16BE] = {
  1858. .convert = gray16_to_gray16
  1859. },
  1860. },
  1861. [PIX_FMT_GRAY8] = {
  1862. [PIX_FMT_RGB555] = {
  1863. .convert = gray_to_rgb555
  1864. },
  1865. [PIX_FMT_RGB565] = {
  1866. .convert = gray_to_rgb565
  1867. },
  1868. [PIX_FMT_RGB24] = {
  1869. .convert = gray_to_rgb24
  1870. },
  1871. [PIX_FMT_BGR24] = {
  1872. .convert = gray_to_bgr24
  1873. },
  1874. [PIX_FMT_RGB32] = {
  1875. .convert = gray_to_rgb32
  1876. },
  1877. [PIX_FMT_MONOWHITE] = {
  1878. .convert = gray_to_monowhite
  1879. },
  1880. [PIX_FMT_MONOBLACK] = {
  1881. .convert = gray_to_monoblack
  1882. },
  1883. [PIX_FMT_GRAY16LE] = {
  1884. .convert = gray_to_gray16
  1885. },
  1886. [PIX_FMT_GRAY16BE] = {
  1887. .convert = gray_to_gray16
  1888. },
  1889. },
  1890. [PIX_FMT_MONOWHITE] = {
  1891. [PIX_FMT_GRAY8] = {
  1892. .convert = monowhite_to_gray
  1893. },
  1894. },
  1895. [PIX_FMT_MONOBLACK] = {
  1896. [PIX_FMT_GRAY8] = {
  1897. .convert = monoblack_to_gray
  1898. },
  1899. },
  1900. [PIX_FMT_PAL8] = {
  1901. [PIX_FMT_RGB555] = {
  1902. .convert = pal8_to_rgb555
  1903. },
  1904. [PIX_FMT_RGB565] = {
  1905. .convert = pal8_to_rgb565
  1906. },
  1907. [PIX_FMT_BGR24] = {
  1908. .convert = pal8_to_bgr24
  1909. },
  1910. [PIX_FMT_RGB24] = {
  1911. .convert = pal8_to_rgb24
  1912. },
  1913. [PIX_FMT_RGB32] = {
  1914. .convert = pal8_to_rgb32
  1915. },
  1916. },
  1917. [PIX_FMT_UYYVYY411] = {
  1918. [PIX_FMT_YUV411P] = {
  1919. .convert = uyyvyy411_to_yuv411p,
  1920. },
  1921. },
  1922. };
  1923. int avpicture_alloc(AVPicture *picture,
  1924. int pix_fmt, int width, int height)
  1925. {
  1926. int size;
  1927. void *ptr;
  1928. size = avpicture_get_size(pix_fmt, width, height);
  1929. if(size<0)
  1930. goto fail;
  1931. ptr = av_malloc(size);
  1932. if (!ptr)
  1933. goto fail;
  1934. avpicture_fill(picture, ptr, pix_fmt, width, height);
  1935. if(picture->data[1] && !picture->data[2])
  1936. ff_set_systematic_pal((uint32_t*)picture->data[1], pix_fmt);
  1937. return 0;
  1938. fail:
  1939. memset(picture, 0, sizeof(AVPicture));
  1940. return -1;
  1941. }
  1942. void avpicture_free(AVPicture *picture)
  1943. {
  1944. av_free(picture->data[0]);
  1945. }
  1946. /* return true if yuv planar */
  1947. static inline int is_yuv_planar(const PixFmtInfo *ps)
  1948. {
  1949. return (ps->color_type == FF_COLOR_YUV ||
  1950. ps->color_type == FF_COLOR_YUV_JPEG) &&
  1951. ps->pixel_type == FF_PIXEL_PLANAR;
  1952. }
  1953. int av_picture_crop(AVPicture *dst, const AVPicture *src,
  1954. int pix_fmt, int top_band, int left_band)
  1955. {
  1956. int y_shift;
  1957. int x_shift;
  1958. if (pix_fmt < 0 || pix_fmt >= PIX_FMT_NB || !is_yuv_planar(&pix_fmt_info[pix_fmt]))
  1959. return -1;
  1960. y_shift = pix_fmt_info[pix_fmt].y_chroma_shift;
  1961. x_shift = pix_fmt_info[pix_fmt].x_chroma_shift;
  1962. dst->data[0] = src->data[0] + (top_band * src->linesize[0]) + left_band;
  1963. dst->data[1] = src->data[1] + ((top_band >> y_shift) * src->linesize[1]) + (left_band >> x_shift);
  1964. dst->data[2] = src->data[2] + ((top_band >> y_shift) * src->linesize[2]) + (left_band >> x_shift);
  1965. dst->linesize[0] = src->linesize[0];
  1966. dst->linesize[1] = src->linesize[1];
  1967. dst->linesize[2] = src->linesize[2];
  1968. return 0;
  1969. }
  1970. int av_picture_pad(AVPicture *dst, const AVPicture *src, int height, int width,
  1971. int pix_fmt, int padtop, int padbottom, int padleft, int padright,
  1972. int *color)
  1973. {
  1974. uint8_t *optr;
  1975. int y_shift;
  1976. int x_shift;
  1977. int yheight;
  1978. int i, y;
  1979. if (pix_fmt < 0 || pix_fmt >= PIX_FMT_NB ||
  1980. !is_yuv_planar(&pix_fmt_info[pix_fmt])) return -1;
  1981. for (i = 0; i < 3; i++) {
  1982. x_shift = i ? pix_fmt_info[pix_fmt].x_chroma_shift : 0;
  1983. y_shift = i ? pix_fmt_info[pix_fmt].y_chroma_shift : 0;
  1984. if (padtop || padleft) {
  1985. memset(dst->data[i], color[i],
  1986. dst->linesize[i] * (padtop >> y_shift) + (padleft >> x_shift));
  1987. }
  1988. if (padleft || padright) {
  1989. optr = dst->data[i] + dst->linesize[i] * (padtop >> y_shift) +
  1990. (dst->linesize[i] - (padright >> x_shift));
  1991. yheight = (height - 1 - (padtop + padbottom)) >> y_shift;
  1992. for (y = 0; y < yheight; y++) {
  1993. memset(optr, color[i], (padleft + padright) >> x_shift);
  1994. optr += dst->linesize[i];
  1995. }
  1996. }
  1997. if (src) { /* first line */
  1998. uint8_t *iptr = src->data[i];
  1999. optr = dst->data[i] + dst->linesize[i] * (padtop >> y_shift) +
  2000. (padleft >> x_shift);
  2001. memcpy(optr, iptr, (width - padleft - padright) >> x_shift);
  2002. iptr += src->linesize[i];
  2003. optr = dst->data[i] + dst->linesize[i] * (padtop >> y_shift) +
  2004. (dst->linesize[i] - (padright >> x_shift));
  2005. yheight = (height - 1 - (padtop + padbottom)) >> y_shift;
  2006. for (y = 0; y < yheight; y++) {
  2007. memset(optr, color[i], (padleft + padright) >> x_shift);
  2008. memcpy(optr + ((padleft + padright) >> x_shift), iptr,
  2009. (width - padleft - padright) >> x_shift);
  2010. iptr += src->linesize[i];
  2011. optr += dst->linesize[i];
  2012. }
  2013. }
  2014. if (padbottom || padright) {
  2015. optr = dst->data[i] + dst->linesize[i] *
  2016. ((height - padbottom) >> y_shift) - (padright >> x_shift);
  2017. memset(optr, color[i],dst->linesize[i] *
  2018. (padbottom >> y_shift) + (padright >> x_shift));
  2019. }
  2020. }
  2021. return 0;
  2022. }
  2023. #if !CONFIG_SWSCALE
  2024. static uint8_t y_ccir_to_jpeg[256];
  2025. static uint8_t y_jpeg_to_ccir[256];
  2026. static uint8_t c_ccir_to_jpeg[256];
  2027. static uint8_t c_jpeg_to_ccir[256];
  2028. /* init various conversion tables */
  2029. static av_cold void img_convert_init(void)
  2030. {
  2031. int i;
  2032. uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
  2033. for(i = 0;i < 256; i++) {
  2034. y_ccir_to_jpeg[i] = Y_CCIR_TO_JPEG(i);
  2035. y_jpeg_to_ccir[i] = Y_JPEG_TO_CCIR(i);
  2036. c_ccir_to_jpeg[i] = C_CCIR_TO_JPEG(i);
  2037. c_jpeg_to_ccir[i] = C_JPEG_TO_CCIR(i);
  2038. }
  2039. }
  2040. /* apply to each pixel the given table */
  2041. static void img_apply_table(uint8_t *dst, int dst_wrap,
  2042. const uint8_t *src, int src_wrap,
  2043. int width, int height, const uint8_t *table1)
  2044. {
  2045. int n;
  2046. const uint8_t *s;
  2047. uint8_t *d;
  2048. const uint8_t *table;
  2049. table = table1;
  2050. for(;height > 0; height--) {
  2051. s = src;
  2052. d = dst;
  2053. n = width;
  2054. while (n >= 4) {
  2055. d[0] = table[s[0]];
  2056. d[1] = table[s[1]];
  2057. d[2] = table[s[2]];
  2058. d[3] = table[s[3]];
  2059. d += 4;
  2060. s += 4;
  2061. n -= 4;
  2062. }
  2063. while (n > 0) {
  2064. d[0] = table[s[0]];
  2065. d++;
  2066. s++;
  2067. n--;
  2068. }
  2069. dst += dst_wrap;
  2070. src += src_wrap;
  2071. }
  2072. }
  2073. /* XXX: use generic filter ? */
  2074. /* XXX: in most cases, the sampling position is incorrect */
  2075. /* 4x1 -> 1x1 */
  2076. static void shrink41(uint8_t *dst, int dst_wrap,
  2077. const uint8_t *src, int src_wrap,
  2078. int width, int height)
  2079. {
  2080. int w;
  2081. const uint8_t *s;
  2082. uint8_t *d;
  2083. for(;height > 0; height--) {
  2084. s = src;
  2085. d = dst;
  2086. for(w = width;w > 0; w--) {
  2087. d[0] = (s[0] + s[1] + s[2] + s[3] + 2) >> 2;
  2088. s += 4;
  2089. d++;
  2090. }
  2091. src += src_wrap;
  2092. dst += dst_wrap;
  2093. }
  2094. }
  2095. /* 2x1 -> 1x1 */
  2096. static void shrink21(uint8_t *dst, int dst_wrap,
  2097. const uint8_t *src, int src_wrap,
  2098. int width, int height)
  2099. {
  2100. int w;
  2101. const uint8_t *s;
  2102. uint8_t *d;
  2103. for(;height > 0; height--) {
  2104. s = src;
  2105. d = dst;
  2106. for(w = width;w > 0; w--) {
  2107. d[0] = (s[0] + s[1]) >> 1;
  2108. s += 2;
  2109. d++;
  2110. }
  2111. src += src_wrap;
  2112. dst += dst_wrap;
  2113. }
  2114. }
  2115. /* 1x2 -> 1x1 */
  2116. static void shrink12(uint8_t *dst, int dst_wrap,
  2117. const uint8_t *src, int src_wrap,
  2118. int width, int height)
  2119. {
  2120. int w;
  2121. uint8_t *d;
  2122. const uint8_t *s1, *s2;
  2123. for(;height > 0; height--) {
  2124. s1 = src;
  2125. s2 = s1 + src_wrap;
  2126. d = dst;
  2127. for(w = width;w >= 4; w-=4) {
  2128. d[0] = (s1[0] + s2[0]) >> 1;
  2129. d[1] = (s1[1] + s2[1]) >> 1;
  2130. d[2] = (s1[2] + s2[2]) >> 1;
  2131. d[3] = (s1[3] + s2[3]) >> 1;
  2132. s1 += 4;
  2133. s2 += 4;
  2134. d += 4;
  2135. }
  2136. for(;w > 0; w--) {
  2137. d[0] = (s1[0] + s2[0]) >> 1;
  2138. s1++;
  2139. s2++;
  2140. d++;
  2141. }
  2142. src += 2 * src_wrap;
  2143. dst += dst_wrap;
  2144. }
  2145. }
  2146. static void grow21_line(uint8_t *dst, const uint8_t *src,
  2147. int width)
  2148. {
  2149. int w;
  2150. const uint8_t *s1;
  2151. uint8_t *d;
  2152. s1 = src;
  2153. d = dst;
  2154. for(w = width;w >= 4; w-=4) {
  2155. d[1] = d[0] = s1[0];
  2156. d[3] = d[2] = s1[1];
  2157. s1 += 2;
  2158. d += 4;
  2159. }
  2160. for(;w >= 2; w -= 2) {
  2161. d[1] = d[0] = s1[0];
  2162. s1 ++;
  2163. d += 2;
  2164. }
  2165. /* only needed if width is not a multiple of two */
  2166. /* XXX: veryfy that */
  2167. if (w) {
  2168. d[0] = s1[0];
  2169. }
  2170. }
  2171. static void grow41_line(uint8_t *dst, const uint8_t *src,
  2172. int width)
  2173. {
  2174. int w, v;
  2175. const uint8_t *s1;
  2176. uint8_t *d;
  2177. s1 = src;
  2178. d = dst;
  2179. for(w = width;w >= 4; w-=4) {
  2180. v = s1[0];
  2181. d[0] = v;
  2182. d[1] = v;
  2183. d[2] = v;
  2184. d[3] = v;
  2185. s1 ++;
  2186. d += 4;
  2187. }
  2188. }
  2189. /* 1x1 -> 2x1 */
  2190. static void grow21(uint8_t *dst, int dst_wrap,
  2191. const uint8_t *src, int src_wrap,
  2192. int width, int height)
  2193. {
  2194. for(;height > 0; height--) {
  2195. grow21_line(dst, src, width);
  2196. src += src_wrap;
  2197. dst += dst_wrap;
  2198. }
  2199. }
  2200. /* 1x1 -> 1x2 */
  2201. static void grow12(uint8_t *dst, int dst_wrap,
  2202. const uint8_t *src, int src_wrap,
  2203. int width, int height)
  2204. {
  2205. for(;height > 0; height-=2) {
  2206. memcpy(dst, src, width);
  2207. dst += dst_wrap;
  2208. memcpy(dst, src, width);
  2209. dst += dst_wrap;
  2210. src += src_wrap;
  2211. }
  2212. }
  2213. /* 1x1 -> 2x2 */
  2214. static void grow22(uint8_t *dst, int dst_wrap,
  2215. const uint8_t *src, int src_wrap,
  2216. int width, int height)
  2217. {
  2218. for(;height > 0; height--) {
  2219. grow21_line(dst, src, width);
  2220. if (height%2)
  2221. src += src_wrap;
  2222. dst += dst_wrap;
  2223. }
  2224. }
  2225. /* 1x1 -> 4x1 */
  2226. static void grow41(uint8_t *dst, int dst_wrap,
  2227. const uint8_t *src, int src_wrap,
  2228. int width, int height)
  2229. {
  2230. for(;height > 0; height--) {
  2231. grow41_line(dst, src, width);
  2232. src += src_wrap;
  2233. dst += dst_wrap;
  2234. }
  2235. }
  2236. /* 1x1 -> 4x4 */
  2237. static void grow44(uint8_t *dst, int dst_wrap,
  2238. const uint8_t *src, int src_wrap,
  2239. int width, int height)
  2240. {
  2241. for(;height > 0; height--) {
  2242. grow41_line(dst, src, width);
  2243. if ((height & 3) == 1)
  2244. src += src_wrap;
  2245. dst += dst_wrap;
  2246. }
  2247. }
  2248. /* 1x2 -> 2x1 */
  2249. static void conv411(uint8_t *dst, int dst_wrap,
  2250. const uint8_t *src, int src_wrap,
  2251. int width, int height)
  2252. {
  2253. int w, c;
  2254. const uint8_t *s1, *s2;
  2255. uint8_t *d;
  2256. width>>=1;
  2257. for(;height > 0; height--) {
  2258. s1 = src;
  2259. s2 = src + src_wrap;
  2260. d = dst;
  2261. for(w = width;w > 0; w--) {
  2262. c = (s1[0] + s2[0]) >> 1;
  2263. d[0] = c;
  2264. d[1] = c;
  2265. s1++;
  2266. s2++;
  2267. d += 2;
  2268. }
  2269. src += src_wrap * 2;
  2270. dst += dst_wrap;
  2271. }
  2272. }
  2273. /* XXX: always use linesize. Return -1 if not supported */
  2274. int img_convert(AVPicture *dst, int dst_pix_fmt,
  2275. const AVPicture *src, int src_pix_fmt,
  2276. int src_width, int src_height)
  2277. {
  2278. static int initialized;
  2279. int i, ret, dst_width, dst_height, int_pix_fmt;
  2280. const PixFmtInfo *src_pix, *dst_pix;
  2281. const ConvertEntry *ce;
  2282. AVPicture tmp1, *tmp = &tmp1;
  2283. if (src_pix_fmt < 0 || src_pix_fmt >= PIX_FMT_NB ||
  2284. dst_pix_fmt < 0 || dst_pix_fmt >= PIX_FMT_NB)
  2285. return -1;
  2286. if (src_width <= 0 || src_height <= 0)
  2287. return 0;
  2288. if (!initialized) {
  2289. initialized = 1;
  2290. img_convert_init();
  2291. }
  2292. dst_width = src_width;
  2293. dst_height = src_height;
  2294. dst_pix = &pix_fmt_info[dst_pix_fmt];
  2295. src_pix = &pix_fmt_info[src_pix_fmt];
  2296. if (src_pix_fmt == dst_pix_fmt) {
  2297. /* no conversion needed: just copy */
  2298. av_picture_copy(dst, src, dst_pix_fmt, dst_width, dst_height);
  2299. return 0;
  2300. }
  2301. ce = &convert_table[src_pix_fmt][dst_pix_fmt];
  2302. if (ce->convert) {
  2303. /* specific conversion routine */
  2304. ce->convert(dst, src, dst_width, dst_height);
  2305. return 0;
  2306. }
  2307. /* gray to YUV */
  2308. if (is_yuv_planar(dst_pix) &&
  2309. src_pix_fmt == PIX_FMT_GRAY8) {
  2310. int w, h, y;
  2311. uint8_t *d;
  2312. if (dst_pix->color_type == FF_COLOR_YUV_JPEG) {
  2313. ff_img_copy_plane(dst->data[0], dst->linesize[0],
  2314. src->data[0], src->linesize[0],
  2315. dst_width, dst_height);
  2316. } else {
  2317. img_apply_table(dst->data[0], dst->linesize[0],
  2318. src->data[0], src->linesize[0],
  2319. dst_width, dst_height,
  2320. y_jpeg_to_ccir);
  2321. }
  2322. /* fill U and V with 128 */
  2323. w = dst_width;
  2324. h = dst_height;
  2325. w >>= dst_pix->x_chroma_shift;
  2326. h >>= dst_pix->y_chroma_shift;
  2327. for(i = 1; i <= 2; i++) {
  2328. d = dst->data[i];
  2329. for(y = 0; y< h; y++) {
  2330. memset(d, 128, w);
  2331. d += dst->linesize[i];
  2332. }
  2333. }
  2334. return 0;
  2335. }
  2336. /* YUV to gray */
  2337. if (is_yuv_planar(src_pix) &&
  2338. dst_pix_fmt == PIX_FMT_GRAY8) {
  2339. if (src_pix->color_type == FF_COLOR_YUV_JPEG) {
  2340. ff_img_copy_plane(dst->data[0], dst->linesize[0],
  2341. src->data[0], src->linesize[0],
  2342. dst_width, dst_height);
  2343. } else {
  2344. img_apply_table(dst->data[0], dst->linesize[0],
  2345. src->data[0], src->linesize[0],
  2346. dst_width, dst_height,
  2347. y_ccir_to_jpeg);
  2348. }
  2349. return 0;
  2350. }
  2351. /* YUV to YUV planar */
  2352. if (is_yuv_planar(dst_pix) && is_yuv_planar(src_pix)) {
  2353. int x_shift, y_shift, w, h, xy_shift;
  2354. void (*resize_func)(uint8_t *dst, int dst_wrap,
  2355. const uint8_t *src, int src_wrap,
  2356. int width, int height);
  2357. /* compute chroma size of the smallest dimensions */
  2358. w = dst_width;
  2359. h = dst_height;
  2360. if (dst_pix->x_chroma_shift >= src_pix->x_chroma_shift)
  2361. w >>= dst_pix->x_chroma_shift;
  2362. else
  2363. w >>= src_pix->x_chroma_shift;
  2364. if (dst_pix->y_chroma_shift >= src_pix->y_chroma_shift)
  2365. h >>= dst_pix->y_chroma_shift;
  2366. else
  2367. h >>= src_pix->y_chroma_shift;
  2368. x_shift = (dst_pix->x_chroma_shift - src_pix->x_chroma_shift);
  2369. y_shift = (dst_pix->y_chroma_shift - src_pix->y_chroma_shift);
  2370. xy_shift = ((x_shift & 0xf) << 4) | (y_shift & 0xf);
  2371. /* there must be filters for conversion at least from and to
  2372. YUV444 format */
  2373. switch(xy_shift) {
  2374. case 0x00:
  2375. resize_func = ff_img_copy_plane;
  2376. break;
  2377. case 0x10:
  2378. resize_func = shrink21;
  2379. break;
  2380. case 0x20:
  2381. resize_func = shrink41;
  2382. break;
  2383. case 0x01:
  2384. resize_func = shrink12;
  2385. break;
  2386. case 0x11:
  2387. resize_func = ff_shrink22;
  2388. break;
  2389. case 0x22:
  2390. resize_func = ff_shrink44;
  2391. break;
  2392. case 0xf0:
  2393. resize_func = grow21;
  2394. break;
  2395. case 0x0f:
  2396. resize_func = grow12;
  2397. break;
  2398. case 0xe0:
  2399. resize_func = grow41;
  2400. break;
  2401. case 0xff:
  2402. resize_func = grow22;
  2403. break;
  2404. case 0xee:
  2405. resize_func = grow44;
  2406. break;
  2407. case 0xf1:
  2408. resize_func = conv411;
  2409. break;
  2410. default:
  2411. /* currently not handled */
  2412. goto no_chroma_filter;
  2413. }
  2414. ff_img_copy_plane(dst->data[0], dst->linesize[0],
  2415. src->data[0], src->linesize[0],
  2416. dst_width, dst_height);
  2417. for(i = 1;i <= 2; i++)
  2418. resize_func(dst->data[i], dst->linesize[i],
  2419. src->data[i], src->linesize[i],
  2420. dst_width>>dst_pix->x_chroma_shift, dst_height>>dst_pix->y_chroma_shift);
  2421. /* if yuv color space conversion is needed, we do it here on
  2422. the destination image */
  2423. if (dst_pix->color_type != src_pix->color_type) {
  2424. const uint8_t *y_table, *c_table;
  2425. if (dst_pix->color_type == FF_COLOR_YUV) {
  2426. y_table = y_jpeg_to_ccir;
  2427. c_table = c_jpeg_to_ccir;
  2428. } else {
  2429. y_table = y_ccir_to_jpeg;
  2430. c_table = c_ccir_to_jpeg;
  2431. }
  2432. img_apply_table(dst->data[0], dst->linesize[0],
  2433. dst->data[0], dst->linesize[0],
  2434. dst_width, dst_height,
  2435. y_table);
  2436. for(i = 1;i <= 2; i++)
  2437. img_apply_table(dst->data[i], dst->linesize[i],
  2438. dst->data[i], dst->linesize[i],
  2439. dst_width>>dst_pix->x_chroma_shift,
  2440. dst_height>>dst_pix->y_chroma_shift,
  2441. c_table);
  2442. }
  2443. return 0;
  2444. }
  2445. no_chroma_filter:
  2446. /* try to use an intermediate format */
  2447. if (src_pix_fmt == PIX_FMT_YUYV422 ||
  2448. dst_pix_fmt == PIX_FMT_YUYV422) {
  2449. /* specific case: convert to YUV422P first */
  2450. int_pix_fmt = PIX_FMT_YUV422P;
  2451. } else if (src_pix_fmt == PIX_FMT_UYVY422 ||
  2452. dst_pix_fmt == PIX_FMT_UYVY422) {
  2453. /* specific case: convert to YUV422P first */
  2454. int_pix_fmt = PIX_FMT_YUV422P;
  2455. } else if (src_pix_fmt == PIX_FMT_UYYVYY411 ||
  2456. dst_pix_fmt == PIX_FMT_UYYVYY411) {
  2457. /* specific case: convert to YUV411P first */
  2458. int_pix_fmt = PIX_FMT_YUV411P;
  2459. } else if ((src_pix->color_type == FF_COLOR_GRAY &&
  2460. src_pix_fmt != PIX_FMT_GRAY8) ||
  2461. (dst_pix->color_type == FF_COLOR_GRAY &&
  2462. dst_pix_fmt != PIX_FMT_GRAY8)) {
  2463. /* gray8 is the normalized format */
  2464. int_pix_fmt = PIX_FMT_GRAY8;
  2465. } else if ((is_yuv_planar(src_pix) &&
  2466. src_pix_fmt != PIX_FMT_YUV444P &&
  2467. src_pix_fmt != PIX_FMT_YUVJ444P)) {
  2468. /* yuv444 is the normalized format */
  2469. if (src_pix->color_type == FF_COLOR_YUV_JPEG)
  2470. int_pix_fmt = PIX_FMT_YUVJ444P;
  2471. else
  2472. int_pix_fmt = PIX_FMT_YUV444P;
  2473. } else if ((is_yuv_planar(dst_pix) &&
  2474. dst_pix_fmt != PIX_FMT_YUV444P &&
  2475. dst_pix_fmt != PIX_FMT_YUVJ444P)) {
  2476. /* yuv444 is the normalized format */
  2477. if (dst_pix->color_type == FF_COLOR_YUV_JPEG)
  2478. int_pix_fmt = PIX_FMT_YUVJ444P;
  2479. else
  2480. int_pix_fmt = PIX_FMT_YUV444P;
  2481. } else {
  2482. /* the two formats are rgb or gray8 or yuv[j]444p */
  2483. if (src_pix->is_alpha && dst_pix->is_alpha)
  2484. int_pix_fmt = PIX_FMT_RGB32;
  2485. else
  2486. int_pix_fmt = PIX_FMT_RGB24;
  2487. }
  2488. if (src_pix_fmt == int_pix_fmt)
  2489. return -1;
  2490. if (avpicture_alloc(tmp, int_pix_fmt, dst_width, dst_height) < 0)
  2491. return -1;
  2492. ret = -1;
  2493. if (img_convert(tmp, int_pix_fmt,
  2494. src, src_pix_fmt, src_width, src_height) < 0)
  2495. goto fail1;
  2496. if (img_convert(dst, dst_pix_fmt,
  2497. tmp, int_pix_fmt, dst_width, dst_height) < 0)
  2498. goto fail1;
  2499. ret = 0;
  2500. fail1:
  2501. avpicture_free(tmp);
  2502. return ret;
  2503. }
  2504. #endif
  2505. /* NOTE: we scan all the pixels to have an exact information */
  2506. static int get_alpha_info_pal8(const AVPicture *src, int width, int height)
  2507. {
  2508. const unsigned char *p;
  2509. int src_wrap, ret, x, y;
  2510. unsigned int a;
  2511. uint32_t *palette = (uint32_t *)src->data[1];
  2512. p = src->data[0];
  2513. src_wrap = src->linesize[0] - width;
  2514. ret = 0;
  2515. for(y=0;y<height;y++) {
  2516. for(x=0;x<width;x++) {
  2517. a = palette[p[0]] >> 24;
  2518. if (a == 0x00) {
  2519. ret |= FF_ALPHA_TRANSP;
  2520. } else if (a != 0xff) {
  2521. ret |= FF_ALPHA_SEMI_TRANSP;
  2522. }
  2523. p++;
  2524. }
  2525. p += src_wrap;
  2526. }
  2527. return ret;
  2528. }
  2529. int img_get_alpha_info(const AVPicture *src,
  2530. int pix_fmt, int width, int height)
  2531. {
  2532. const PixFmtInfo *pf = &pix_fmt_info[pix_fmt];
  2533. int ret;
  2534. pf = &pix_fmt_info[pix_fmt];
  2535. /* no alpha can be represented in format */
  2536. if (!pf->is_alpha)
  2537. return 0;
  2538. switch(pix_fmt) {
  2539. case PIX_FMT_RGB32:
  2540. ret = get_alpha_info_rgb32(src, width, height);
  2541. break;
  2542. case PIX_FMT_PAL8:
  2543. ret = get_alpha_info_pal8(src, width, height);
  2544. break;
  2545. default:
  2546. /* we do not know, so everything is indicated */
  2547. ret = FF_ALPHA_TRANSP | FF_ALPHA_SEMI_TRANSP;
  2548. break;
  2549. }
  2550. return ret;
  2551. }
  2552. #if HAVE_MMX
  2553. #define DEINT_INPLACE_LINE_LUM \
  2554. movd_m2r(lum_m4[0],mm0);\
  2555. movd_m2r(lum_m3[0],mm1);\
  2556. movd_m2r(lum_m2[0],mm2);\
  2557. movd_m2r(lum_m1[0],mm3);\
  2558. movd_m2r(lum[0],mm4);\
  2559. punpcklbw_r2r(mm7,mm0);\
  2560. movd_r2m(mm2,lum_m4[0]);\
  2561. punpcklbw_r2r(mm7,mm1);\
  2562. punpcklbw_r2r(mm7,mm2);\
  2563. punpcklbw_r2r(mm7,mm3);\
  2564. punpcklbw_r2r(mm7,mm4);\
  2565. paddw_r2r(mm3,mm1);\
  2566. psllw_i2r(1,mm2);\
  2567. paddw_r2r(mm4,mm0);\
  2568. psllw_i2r(2,mm1);\
  2569. paddw_r2r(mm6,mm2);\
  2570. paddw_r2r(mm2,mm1);\
  2571. psubusw_r2r(mm0,mm1);\
  2572. psrlw_i2r(3,mm1);\
  2573. packuswb_r2r(mm7,mm1);\
  2574. movd_r2m(mm1,lum_m2[0]);
  2575. #define DEINT_LINE_LUM \
  2576. movd_m2r(lum_m4[0],mm0);\
  2577. movd_m2r(lum_m3[0],mm1);\
  2578. movd_m2r(lum_m2[0],mm2);\
  2579. movd_m2r(lum_m1[0],mm3);\
  2580. movd_m2r(lum[0],mm4);\
  2581. punpcklbw_r2r(mm7,mm0);\
  2582. punpcklbw_r2r(mm7,mm1);\
  2583. punpcklbw_r2r(mm7,mm2);\
  2584. punpcklbw_r2r(mm7,mm3);\
  2585. punpcklbw_r2r(mm7,mm4);\
  2586. paddw_r2r(mm3,mm1);\
  2587. psllw_i2r(1,mm2);\
  2588. paddw_r2r(mm4,mm0);\
  2589. psllw_i2r(2,mm1);\
  2590. paddw_r2r(mm6,mm2);\
  2591. paddw_r2r(mm2,mm1);\
  2592. psubusw_r2r(mm0,mm1);\
  2593. psrlw_i2r(3,mm1);\
  2594. packuswb_r2r(mm7,mm1);\
  2595. movd_r2m(mm1,dst[0]);
  2596. #endif
  2597. /* filter parameters: [-1 4 2 4 -1] // 8 */
  2598. static void deinterlace_line(uint8_t *dst,
  2599. const uint8_t *lum_m4, const uint8_t *lum_m3,
  2600. const uint8_t *lum_m2, const uint8_t *lum_m1,
  2601. const uint8_t *lum,
  2602. int size)
  2603. {
  2604. #if !HAVE_MMX
  2605. uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
  2606. int sum;
  2607. for(;size > 0;size--) {
  2608. sum = -lum_m4[0];
  2609. sum += lum_m3[0] << 2;
  2610. sum += lum_m2[0] << 1;
  2611. sum += lum_m1[0] << 2;
  2612. sum += -lum[0];
  2613. dst[0] = cm[(sum + 4) >> 3];
  2614. lum_m4++;
  2615. lum_m3++;
  2616. lum_m2++;
  2617. lum_m1++;
  2618. lum++;
  2619. dst++;
  2620. }
  2621. #else
  2622. {
  2623. pxor_r2r(mm7,mm7);
  2624. movq_m2r(ff_pw_4,mm6);
  2625. }
  2626. for (;size > 3; size-=4) {
  2627. DEINT_LINE_LUM
  2628. lum_m4+=4;
  2629. lum_m3+=4;
  2630. lum_m2+=4;
  2631. lum_m1+=4;
  2632. lum+=4;
  2633. dst+=4;
  2634. }
  2635. #endif
  2636. }
  2637. static void deinterlace_line_inplace(uint8_t *lum_m4, uint8_t *lum_m3, uint8_t *lum_m2, uint8_t *lum_m1, uint8_t *lum,
  2638. int size)
  2639. {
  2640. #if !HAVE_MMX
  2641. uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
  2642. int sum;
  2643. for(;size > 0;size--) {
  2644. sum = -lum_m4[0];
  2645. sum += lum_m3[0] << 2;
  2646. sum += lum_m2[0] << 1;
  2647. lum_m4[0]=lum_m2[0];
  2648. sum += lum_m1[0] << 2;
  2649. sum += -lum[0];
  2650. lum_m2[0] = cm[(sum + 4) >> 3];
  2651. lum_m4++;
  2652. lum_m3++;
  2653. lum_m2++;
  2654. lum_m1++;
  2655. lum++;
  2656. }
  2657. #else
  2658. {
  2659. pxor_r2r(mm7,mm7);
  2660. movq_m2r(ff_pw_4,mm6);
  2661. }
  2662. for (;size > 3; size-=4) {
  2663. DEINT_INPLACE_LINE_LUM
  2664. lum_m4+=4;
  2665. lum_m3+=4;
  2666. lum_m2+=4;
  2667. lum_m1+=4;
  2668. lum+=4;
  2669. }
  2670. #endif
  2671. }
  2672. /* deinterlacing : 2 temporal taps, 3 spatial taps linear filter. The
  2673. top field is copied as is, but the bottom field is deinterlaced
  2674. against the top field. */
  2675. static void deinterlace_bottom_field(uint8_t *dst, int dst_wrap,
  2676. const uint8_t *src1, int src_wrap,
  2677. int width, int height)
  2678. {
  2679. const uint8_t *src_m2, *src_m1, *src_0, *src_p1, *src_p2;
  2680. int y;
  2681. src_m2 = src1;
  2682. src_m1 = src1;
  2683. src_0=&src_m1[src_wrap];
  2684. src_p1=&src_0[src_wrap];
  2685. src_p2=&src_p1[src_wrap];
  2686. for(y=0;y<(height-2);y+=2) {
  2687. memcpy(dst,src_m1,width);
  2688. dst += dst_wrap;
  2689. deinterlace_line(dst,src_m2,src_m1,src_0,src_p1,src_p2,width);
  2690. src_m2 = src_0;
  2691. src_m1 = src_p1;
  2692. src_0 = src_p2;
  2693. src_p1 += 2*src_wrap;
  2694. src_p2 += 2*src_wrap;
  2695. dst += dst_wrap;
  2696. }
  2697. memcpy(dst,src_m1,width);
  2698. dst += dst_wrap;
  2699. /* do last line */
  2700. deinterlace_line(dst,src_m2,src_m1,src_0,src_0,src_0,width);
  2701. }
  2702. static void deinterlace_bottom_field_inplace(uint8_t *src1, int src_wrap,
  2703. int width, int height)
  2704. {
  2705. uint8_t *src_m1, *src_0, *src_p1, *src_p2;
  2706. int y;
  2707. uint8_t *buf;
  2708. buf = (uint8_t*)av_malloc(width);
  2709. src_m1 = src1;
  2710. memcpy(buf,src_m1,width);
  2711. src_0=&src_m1[src_wrap];
  2712. src_p1=&src_0[src_wrap];
  2713. src_p2=&src_p1[src_wrap];
  2714. for(y=0;y<(height-2);y+=2) {
  2715. deinterlace_line_inplace(buf,src_m1,src_0,src_p1,src_p2,width);
  2716. src_m1 = src_p1;
  2717. src_0 = src_p2;
  2718. src_p1 += 2*src_wrap;
  2719. src_p2 += 2*src_wrap;
  2720. }
  2721. /* do last line */
  2722. deinterlace_line_inplace(buf,src_m1,src_0,src_0,src_0,width);
  2723. av_free(buf);
  2724. }
  2725. int avpicture_deinterlace(AVPicture *dst, const AVPicture *src,
  2726. int pix_fmt, int width, int height)
  2727. {
  2728. int i;
  2729. if (pix_fmt != PIX_FMT_YUV420P &&
  2730. pix_fmt != PIX_FMT_YUV422P &&
  2731. pix_fmt != PIX_FMT_YUV444P &&
  2732. pix_fmt != PIX_FMT_YUV411P &&
  2733. pix_fmt != PIX_FMT_GRAY8)
  2734. return -1;
  2735. if ((width & 3) != 0 || (height & 3) != 0)
  2736. return -1;
  2737. for(i=0;i<3;i++) {
  2738. if (i == 1) {
  2739. switch(pix_fmt) {
  2740. case PIX_FMT_YUV420P:
  2741. width >>= 1;
  2742. height >>= 1;
  2743. break;
  2744. case PIX_FMT_YUV422P:
  2745. width >>= 1;
  2746. break;
  2747. case PIX_FMT_YUV411P:
  2748. width >>= 2;
  2749. break;
  2750. default:
  2751. break;
  2752. }
  2753. if (pix_fmt == PIX_FMT_GRAY8) {
  2754. break;
  2755. }
  2756. }
  2757. if (src == dst) {
  2758. deinterlace_bottom_field_inplace(dst->data[i], dst->linesize[i],
  2759. width, height);
  2760. } else {
  2761. deinterlace_bottom_field(dst->data[i],dst->linesize[i],
  2762. src->data[i], src->linesize[i],
  2763. width, height);
  2764. }
  2765. }
  2766. emms_c();
  2767. return 0;
  2768. }