You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

2997 lines
80KB

  1. /*
  2. * Misc image conversion routines
  3. * Copyright (c) 2001, 2002, 2003 Fabrice Bellard
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. /**
  22. * @file libavcodec/imgconvert.c
  23. * misc image conversion routines
  24. */
  25. /* TODO:
  26. * - write 'ffimg' program to test all the image related stuff
  27. * - move all api to slice based system
  28. * - integrate deinterlacing, postprocessing and scaling in the conversion process
  29. */
  30. #include "avcodec.h"
  31. #include "dsputil.h"
  32. #include "colorspace.h"
  33. #if HAVE_MMX
  34. #include "x86/mmx.h"
  35. #include "x86/dsputil_mmx.h"
  36. #endif
  37. #define xglue(x, y) x ## y
  38. #define glue(x, y) xglue(x, y)
  39. #define FF_COLOR_RGB 0 /**< RGB color space */
  40. #define FF_COLOR_GRAY 1 /**< gray color space */
  41. #define FF_COLOR_YUV 2 /**< YUV color space. 16 <= Y <= 235, 16 <= U, V <= 240 */
  42. #define FF_COLOR_YUV_JPEG 3 /**< YUV color space. 0 <= Y <= 255, 0 <= U, V <= 255 */
  43. #define FF_PIXEL_PLANAR 0 /**< each channel has one component in AVPicture */
  44. #define FF_PIXEL_PACKED 1 /**< only one components containing all the channels */
  45. #define FF_PIXEL_PALETTE 2 /**< one components containing indexes for a palette */
  46. typedef struct PixFmtInfo {
  47. const char *name;
  48. uint8_t nb_channels; /**< number of channels (including alpha) */
  49. uint8_t color_type; /**< color type (see FF_COLOR_xxx constants) */
  50. uint8_t pixel_type; /**< pixel storage type (see FF_PIXEL_xxx constants) */
  51. uint8_t is_alpha : 1; /**< true if alpha can be specified */
  52. uint8_t is_hwaccel : 1; /**< true if this is an HW accelerated format */
  53. uint8_t x_chroma_shift; /**< X chroma subsampling factor is 2 ^ shift */
  54. uint8_t y_chroma_shift; /**< Y chroma subsampling factor is 2 ^ shift */
  55. uint8_t depth; /**< bit depth of the color components */
  56. } PixFmtInfo;
  57. /* this table gives more information about formats */
  58. static const PixFmtInfo pix_fmt_info[PIX_FMT_NB] = {
  59. /* YUV formats */
  60. [PIX_FMT_YUV420P] = {
  61. .name = "yuv420p",
  62. .nb_channels = 3,
  63. .color_type = FF_COLOR_YUV,
  64. .pixel_type = FF_PIXEL_PLANAR,
  65. .depth = 8,
  66. .x_chroma_shift = 1, .y_chroma_shift = 1,
  67. },
  68. [PIX_FMT_YUV422P] = {
  69. .name = "yuv422p",
  70. .nb_channels = 3,
  71. .color_type = FF_COLOR_YUV,
  72. .pixel_type = FF_PIXEL_PLANAR,
  73. .depth = 8,
  74. .x_chroma_shift = 1, .y_chroma_shift = 0,
  75. },
  76. [PIX_FMT_YUV444P] = {
  77. .name = "yuv444p",
  78. .nb_channels = 3,
  79. .color_type = FF_COLOR_YUV,
  80. .pixel_type = FF_PIXEL_PLANAR,
  81. .depth = 8,
  82. .x_chroma_shift = 0, .y_chroma_shift = 0,
  83. },
  84. [PIX_FMT_YUYV422] = {
  85. .name = "yuyv422",
  86. .nb_channels = 1,
  87. .color_type = FF_COLOR_YUV,
  88. .pixel_type = FF_PIXEL_PACKED,
  89. .depth = 8,
  90. .x_chroma_shift = 1, .y_chroma_shift = 0,
  91. },
  92. [PIX_FMT_UYVY422] = {
  93. .name = "uyvy422",
  94. .nb_channels = 1,
  95. .color_type = FF_COLOR_YUV,
  96. .pixel_type = FF_PIXEL_PACKED,
  97. .depth = 8,
  98. .x_chroma_shift = 1, .y_chroma_shift = 0,
  99. },
  100. [PIX_FMT_YUV410P] = {
  101. .name = "yuv410p",
  102. .nb_channels = 3,
  103. .color_type = FF_COLOR_YUV,
  104. .pixel_type = FF_PIXEL_PLANAR,
  105. .depth = 8,
  106. .x_chroma_shift = 2, .y_chroma_shift = 2,
  107. },
  108. [PIX_FMT_YUV411P] = {
  109. .name = "yuv411p",
  110. .nb_channels = 3,
  111. .color_type = FF_COLOR_YUV,
  112. .pixel_type = FF_PIXEL_PLANAR,
  113. .depth = 8,
  114. .x_chroma_shift = 2, .y_chroma_shift = 0,
  115. },
  116. [PIX_FMT_YUV440P] = {
  117. .name = "yuv440p",
  118. .nb_channels = 3,
  119. .color_type = FF_COLOR_YUV,
  120. .pixel_type = FF_PIXEL_PLANAR,
  121. .depth = 8,
  122. .x_chroma_shift = 0, .y_chroma_shift = 1,
  123. },
  124. /* YUV formats with alpha plane */
  125. [PIX_FMT_YUVA420P] = {
  126. .name = "yuva420p",
  127. .nb_channels = 4,
  128. .color_type = FF_COLOR_YUV,
  129. .pixel_type = FF_PIXEL_PLANAR,
  130. .depth = 8,
  131. .x_chroma_shift = 1, .y_chroma_shift = 1,
  132. },
  133. /* JPEG YUV */
  134. [PIX_FMT_YUVJ420P] = {
  135. .name = "yuvj420p",
  136. .nb_channels = 3,
  137. .color_type = FF_COLOR_YUV_JPEG,
  138. .pixel_type = FF_PIXEL_PLANAR,
  139. .depth = 8,
  140. .x_chroma_shift = 1, .y_chroma_shift = 1,
  141. },
  142. [PIX_FMT_YUVJ422P] = {
  143. .name = "yuvj422p",
  144. .nb_channels = 3,
  145. .color_type = FF_COLOR_YUV_JPEG,
  146. .pixel_type = FF_PIXEL_PLANAR,
  147. .depth = 8,
  148. .x_chroma_shift = 1, .y_chroma_shift = 0,
  149. },
  150. [PIX_FMT_YUVJ444P] = {
  151. .name = "yuvj444p",
  152. .nb_channels = 3,
  153. .color_type = FF_COLOR_YUV_JPEG,
  154. .pixel_type = FF_PIXEL_PLANAR,
  155. .depth = 8,
  156. .x_chroma_shift = 0, .y_chroma_shift = 0,
  157. },
  158. [PIX_FMT_YUVJ440P] = {
  159. .name = "yuvj440p",
  160. .nb_channels = 3,
  161. .color_type = FF_COLOR_YUV_JPEG,
  162. .pixel_type = FF_PIXEL_PLANAR,
  163. .depth = 8,
  164. .x_chroma_shift = 0, .y_chroma_shift = 1,
  165. },
  166. /* RGB formats */
  167. [PIX_FMT_RGB24] = {
  168. .name = "rgb24",
  169. .nb_channels = 3,
  170. .color_type = FF_COLOR_RGB,
  171. .pixel_type = FF_PIXEL_PACKED,
  172. .depth = 8,
  173. .x_chroma_shift = 0, .y_chroma_shift = 0,
  174. },
  175. [PIX_FMT_BGR24] = {
  176. .name = "bgr24",
  177. .nb_channels = 3,
  178. .color_type = FF_COLOR_RGB,
  179. .pixel_type = FF_PIXEL_PACKED,
  180. .depth = 8,
  181. .x_chroma_shift = 0, .y_chroma_shift = 0,
  182. },
  183. [PIX_FMT_RGB32] = {
  184. .name = "rgb32",
  185. .nb_channels = 4, .is_alpha = 1,
  186. .color_type = FF_COLOR_RGB,
  187. .pixel_type = FF_PIXEL_PACKED,
  188. .depth = 8,
  189. .x_chroma_shift = 0, .y_chroma_shift = 0,
  190. },
  191. [PIX_FMT_RGB48BE] = {
  192. .name = "rgb48be",
  193. .nb_channels = 3,
  194. .color_type = FF_COLOR_RGB,
  195. .pixel_type = FF_PIXEL_PACKED,
  196. .depth = 16,
  197. .x_chroma_shift = 0, .y_chroma_shift = 0,
  198. },
  199. [PIX_FMT_RGB48LE] = {
  200. .name = "rgb48le",
  201. .nb_channels = 3,
  202. .color_type = FF_COLOR_RGB,
  203. .pixel_type = FF_PIXEL_PACKED,
  204. .depth = 16,
  205. .x_chroma_shift = 0, .y_chroma_shift = 0,
  206. },
  207. [PIX_FMT_RGB565] = {
  208. .name = "rgb565",
  209. .nb_channels = 3,
  210. .color_type = FF_COLOR_RGB,
  211. .pixel_type = FF_PIXEL_PACKED,
  212. .depth = 5,
  213. .x_chroma_shift = 0, .y_chroma_shift = 0,
  214. },
  215. [PIX_FMT_RGB555] = {
  216. .name = "rgb555",
  217. .nb_channels = 3,
  218. .color_type = FF_COLOR_RGB,
  219. .pixel_type = FF_PIXEL_PACKED,
  220. .depth = 5,
  221. .x_chroma_shift = 0, .y_chroma_shift = 0,
  222. },
  223. /* gray / mono formats */
  224. [PIX_FMT_GRAY16BE] = {
  225. .name = "gray16be",
  226. .nb_channels = 1,
  227. .color_type = FF_COLOR_GRAY,
  228. .pixel_type = FF_PIXEL_PLANAR,
  229. .depth = 16,
  230. },
  231. [PIX_FMT_GRAY16LE] = {
  232. .name = "gray16le",
  233. .nb_channels = 1,
  234. .color_type = FF_COLOR_GRAY,
  235. .pixel_type = FF_PIXEL_PLANAR,
  236. .depth = 16,
  237. },
  238. [PIX_FMT_GRAY8] = {
  239. .name = "gray",
  240. .nb_channels = 1,
  241. .color_type = FF_COLOR_GRAY,
  242. .pixel_type = FF_PIXEL_PLANAR,
  243. .depth = 8,
  244. },
  245. [PIX_FMT_MONOWHITE] = {
  246. .name = "monow",
  247. .nb_channels = 1,
  248. .color_type = FF_COLOR_GRAY,
  249. .pixel_type = FF_PIXEL_PLANAR,
  250. .depth = 1,
  251. },
  252. [PIX_FMT_MONOBLACK] = {
  253. .name = "monob",
  254. .nb_channels = 1,
  255. .color_type = FF_COLOR_GRAY,
  256. .pixel_type = FF_PIXEL_PLANAR,
  257. .depth = 1,
  258. },
  259. /* paletted formats */
  260. [PIX_FMT_PAL8] = {
  261. .name = "pal8",
  262. .nb_channels = 4, .is_alpha = 1,
  263. .color_type = FF_COLOR_RGB,
  264. .pixel_type = FF_PIXEL_PALETTE,
  265. .depth = 8,
  266. },
  267. [PIX_FMT_XVMC_MPEG2_MC] = {
  268. .name = "xvmcmc",
  269. .is_hwaccel = 1,
  270. },
  271. [PIX_FMT_XVMC_MPEG2_IDCT] = {
  272. .name = "xvmcidct",
  273. .is_hwaccel = 1,
  274. },
  275. [PIX_FMT_VDPAU_MPEG1] = {
  276. .name = "vdpau_mpeg1",
  277. .is_hwaccel = 1,
  278. },
  279. [PIX_FMT_VDPAU_MPEG2] = {
  280. .name = "vdpau_mpeg2",
  281. .is_hwaccel = 1,
  282. },
  283. [PIX_FMT_VDPAU_H264] = {
  284. .name = "vdpau_h264",
  285. .is_hwaccel = 1,
  286. },
  287. [PIX_FMT_VDPAU_WMV3] = {
  288. .name = "vdpau_wmv3",
  289. .is_hwaccel = 1,
  290. },
  291. [PIX_FMT_VDPAU_VC1] = {
  292. .name = "vdpau_vc1",
  293. .is_hwaccel = 1,
  294. },
  295. [PIX_FMT_UYYVYY411] = {
  296. .name = "uyyvyy411",
  297. .nb_channels = 1,
  298. .color_type = FF_COLOR_YUV,
  299. .pixel_type = FF_PIXEL_PACKED,
  300. .depth = 8,
  301. .x_chroma_shift = 2, .y_chroma_shift = 0,
  302. },
  303. [PIX_FMT_BGR32] = {
  304. .name = "bgr32",
  305. .nb_channels = 4, .is_alpha = 1,
  306. .color_type = FF_COLOR_RGB,
  307. .pixel_type = FF_PIXEL_PACKED,
  308. .depth = 8,
  309. .x_chroma_shift = 0, .y_chroma_shift = 0,
  310. },
  311. [PIX_FMT_BGR565] = {
  312. .name = "bgr565",
  313. .nb_channels = 3,
  314. .color_type = FF_COLOR_RGB,
  315. .pixel_type = FF_PIXEL_PACKED,
  316. .depth = 5,
  317. .x_chroma_shift = 0, .y_chroma_shift = 0,
  318. },
  319. [PIX_FMT_BGR555] = {
  320. .name = "bgr555",
  321. .nb_channels = 3,
  322. .color_type = FF_COLOR_RGB,
  323. .pixel_type = FF_PIXEL_PACKED,
  324. .depth = 5,
  325. .x_chroma_shift = 0, .y_chroma_shift = 0,
  326. },
  327. [PIX_FMT_RGB8] = {
  328. .name = "rgb8",
  329. .nb_channels = 1,
  330. .color_type = FF_COLOR_RGB,
  331. .pixel_type = FF_PIXEL_PACKED,
  332. .depth = 8,
  333. .x_chroma_shift = 0, .y_chroma_shift = 0,
  334. },
  335. [PIX_FMT_RGB4] = {
  336. .name = "rgb4",
  337. .nb_channels = 1,
  338. .color_type = FF_COLOR_RGB,
  339. .pixel_type = FF_PIXEL_PACKED,
  340. .depth = 4,
  341. .x_chroma_shift = 0, .y_chroma_shift = 0,
  342. },
  343. [PIX_FMT_RGB4_BYTE] = {
  344. .name = "rgb4_byte",
  345. .nb_channels = 1,
  346. .color_type = FF_COLOR_RGB,
  347. .pixel_type = FF_PIXEL_PACKED,
  348. .depth = 8,
  349. .x_chroma_shift = 0, .y_chroma_shift = 0,
  350. },
  351. [PIX_FMT_BGR8] = {
  352. .name = "bgr8",
  353. .nb_channels = 1,
  354. .color_type = FF_COLOR_RGB,
  355. .pixel_type = FF_PIXEL_PACKED,
  356. .depth = 8,
  357. .x_chroma_shift = 0, .y_chroma_shift = 0,
  358. },
  359. [PIX_FMT_BGR4] = {
  360. .name = "bgr4",
  361. .nb_channels = 1,
  362. .color_type = FF_COLOR_RGB,
  363. .pixel_type = FF_PIXEL_PACKED,
  364. .depth = 4,
  365. .x_chroma_shift = 0, .y_chroma_shift = 0,
  366. },
  367. [PIX_FMT_BGR4_BYTE] = {
  368. .name = "bgr4_byte",
  369. .nb_channels = 1,
  370. .color_type = FF_COLOR_RGB,
  371. .pixel_type = FF_PIXEL_PACKED,
  372. .depth = 8,
  373. .x_chroma_shift = 0, .y_chroma_shift = 0,
  374. },
  375. [PIX_FMT_NV12] = {
  376. .name = "nv12",
  377. .nb_channels = 2,
  378. .color_type = FF_COLOR_YUV,
  379. .pixel_type = FF_PIXEL_PLANAR,
  380. .depth = 8,
  381. .x_chroma_shift = 1, .y_chroma_shift = 1,
  382. },
  383. [PIX_FMT_NV21] = {
  384. .name = "nv12",
  385. .nb_channels = 2,
  386. .color_type = FF_COLOR_YUV,
  387. .pixel_type = FF_PIXEL_PLANAR,
  388. .depth = 8,
  389. .x_chroma_shift = 1, .y_chroma_shift = 1,
  390. },
  391. [PIX_FMT_BGR32_1] = {
  392. .name = "bgr32_1",
  393. .nb_channels = 4, .is_alpha = 1,
  394. .color_type = FF_COLOR_RGB,
  395. .pixel_type = FF_PIXEL_PACKED,
  396. .depth = 8,
  397. .x_chroma_shift = 0, .y_chroma_shift = 0,
  398. },
  399. [PIX_FMT_RGB32_1] = {
  400. .name = "rgb32_1",
  401. .nb_channels = 4, .is_alpha = 1,
  402. .color_type = FF_COLOR_RGB,
  403. .pixel_type = FF_PIXEL_PACKED,
  404. .depth = 8,
  405. .x_chroma_shift = 0, .y_chroma_shift = 0,
  406. },
  407. /* VA API formats */
  408. [PIX_FMT_VAAPI_MOCO] = {
  409. .name = "vaapi_moco",
  410. .is_hwaccel = 1,
  411. },
  412. [PIX_FMT_VAAPI_IDCT] = {
  413. .name = "vaapi_idct",
  414. .is_hwaccel = 1,
  415. },
  416. [PIX_FMT_VAAPI_VLD] = {
  417. .name = "vaapi_vld",
  418. .is_hwaccel = 1,
  419. },
  420. };
  421. void avcodec_get_chroma_sub_sample(int pix_fmt, int *h_shift, int *v_shift)
  422. {
  423. *h_shift = pix_fmt_info[pix_fmt].x_chroma_shift;
  424. *v_shift = pix_fmt_info[pix_fmt].y_chroma_shift;
  425. }
  426. const char *avcodec_get_pix_fmt_name(int pix_fmt)
  427. {
  428. if (pix_fmt < 0 || pix_fmt >= PIX_FMT_NB)
  429. return NULL;
  430. else
  431. return pix_fmt_info[pix_fmt].name;
  432. }
  433. enum PixelFormat avcodec_get_pix_fmt(const char* name)
  434. {
  435. int i;
  436. for (i=0; i < PIX_FMT_NB; i++)
  437. if (!strcmp(pix_fmt_info[i].name, name))
  438. return i;
  439. return PIX_FMT_NONE;
  440. }
  441. void avcodec_pix_fmt_string (char *buf, int buf_size, int pix_fmt)
  442. {
  443. /* print header */
  444. if (pix_fmt < 0)
  445. snprintf (buf, buf_size,
  446. "name " " nb_channels" " depth" " is_alpha"
  447. );
  448. else{
  449. PixFmtInfo info= pix_fmt_info[pix_fmt];
  450. char is_alpha_char= info.is_alpha ? 'y' : 'n';
  451. snprintf (buf, buf_size,
  452. "%-10s" " %1d " " %2d " " %c ",
  453. info.name,
  454. info.nb_channels,
  455. info.depth,
  456. is_alpha_char
  457. );
  458. }
  459. }
  460. int ff_is_hwaccel_pix_fmt(enum PixelFormat pix_fmt)
  461. {
  462. return pix_fmt_info[pix_fmt].is_hwaccel;
  463. }
  464. int ff_set_systematic_pal(uint32_t pal[256], enum PixelFormat pix_fmt){
  465. int i;
  466. for(i=0; i<256; i++){
  467. int r,g,b;
  468. switch(pix_fmt) {
  469. case PIX_FMT_RGB8:
  470. r= (i>>5 )*36;
  471. g= ((i>>2)&7)*36;
  472. b= (i&3 )*85;
  473. break;
  474. case PIX_FMT_BGR8:
  475. b= (i>>6 )*85;
  476. g= ((i>>3)&7)*36;
  477. r= (i&7 )*36;
  478. break;
  479. case PIX_FMT_RGB4_BYTE:
  480. r= (i>>3 )*255;
  481. g= ((i>>1)&3)*85;
  482. b= (i&1 )*255;
  483. break;
  484. case PIX_FMT_BGR4_BYTE:
  485. b= (i>>3 )*255;
  486. g= ((i>>1)&3)*85;
  487. r= (i&1 )*255;
  488. break;
  489. case PIX_FMT_GRAY8:
  490. r=b=g= i;
  491. break;
  492. default:
  493. return -1;
  494. }
  495. pal[i] = b + (g<<8) + (r<<16);
  496. }
  497. return 0;
  498. }
  499. int ff_fill_linesize(AVPicture *picture, int pix_fmt, int width)
  500. {
  501. int w2;
  502. const PixFmtInfo *pinfo;
  503. memset(picture->linesize, 0, sizeof(picture->linesize));
  504. pinfo = &pix_fmt_info[pix_fmt];
  505. switch(pix_fmt) {
  506. case PIX_FMT_YUV420P:
  507. case PIX_FMT_YUV422P:
  508. case PIX_FMT_YUV444P:
  509. case PIX_FMT_YUV410P:
  510. case PIX_FMT_YUV411P:
  511. case PIX_FMT_YUV440P:
  512. case PIX_FMT_YUVJ420P:
  513. case PIX_FMT_YUVJ422P:
  514. case PIX_FMT_YUVJ444P:
  515. case PIX_FMT_YUVJ440P:
  516. w2 = (width + (1 << pinfo->x_chroma_shift) - 1) >> pinfo->x_chroma_shift;
  517. picture->linesize[0] = width;
  518. picture->linesize[1] = w2;
  519. picture->linesize[2] = w2;
  520. break;
  521. case PIX_FMT_YUVA420P:
  522. w2 = (width + (1 << pinfo->x_chroma_shift) - 1) >> pinfo->x_chroma_shift;
  523. picture->linesize[0] = width;
  524. picture->linesize[1] = w2;
  525. picture->linesize[2] = w2;
  526. picture->linesize[3] = width;
  527. break;
  528. case PIX_FMT_NV12:
  529. case PIX_FMT_NV21:
  530. w2 = (width + (1 << pinfo->x_chroma_shift) - 1) >> pinfo->x_chroma_shift;
  531. picture->linesize[0] = width;
  532. picture->linesize[1] = w2;
  533. break;
  534. case PIX_FMT_RGB24:
  535. case PIX_FMT_BGR24:
  536. picture->linesize[0] = width * 3;
  537. break;
  538. case PIX_FMT_RGB32:
  539. case PIX_FMT_BGR32:
  540. case PIX_FMT_RGB32_1:
  541. case PIX_FMT_BGR32_1:
  542. picture->linesize[0] = width * 4;
  543. break;
  544. case PIX_FMT_RGB48BE:
  545. case PIX_FMT_RGB48LE:
  546. picture->linesize[0] = width * 6;
  547. break;
  548. case PIX_FMT_GRAY16BE:
  549. case PIX_FMT_GRAY16LE:
  550. case PIX_FMT_BGR555:
  551. case PIX_FMT_BGR565:
  552. case PIX_FMT_RGB555:
  553. case PIX_FMT_RGB565:
  554. case PIX_FMT_YUYV422:
  555. picture->linesize[0] = width * 2;
  556. break;
  557. case PIX_FMT_UYVY422:
  558. picture->linesize[0] = width * 2;
  559. break;
  560. case PIX_FMT_UYYVYY411:
  561. picture->linesize[0] = width + width/2;
  562. break;
  563. case PIX_FMT_RGB4:
  564. case PIX_FMT_BGR4:
  565. picture->linesize[0] = width / 2;
  566. break;
  567. case PIX_FMT_MONOWHITE:
  568. case PIX_FMT_MONOBLACK:
  569. picture->linesize[0] = (width + 7) >> 3;
  570. break;
  571. case PIX_FMT_PAL8:
  572. case PIX_FMT_RGB8:
  573. case PIX_FMT_BGR8:
  574. case PIX_FMT_RGB4_BYTE:
  575. case PIX_FMT_BGR4_BYTE:
  576. case PIX_FMT_GRAY8:
  577. picture->linesize[0] = width;
  578. picture->linesize[1] = 4;
  579. break;
  580. default:
  581. return -1;
  582. }
  583. return 0;
  584. }
  585. int ff_fill_pointer(AVPicture *picture, uint8_t *ptr, int pix_fmt,
  586. int height)
  587. {
  588. int size, h2, size2;
  589. const PixFmtInfo *pinfo;
  590. pinfo = &pix_fmt_info[pix_fmt];
  591. size = picture->linesize[0] * height;
  592. switch(pix_fmt) {
  593. case PIX_FMT_YUV420P:
  594. case PIX_FMT_YUV422P:
  595. case PIX_FMT_YUV444P:
  596. case PIX_FMT_YUV410P:
  597. case PIX_FMT_YUV411P:
  598. case PIX_FMT_YUV440P:
  599. case PIX_FMT_YUVJ420P:
  600. case PIX_FMT_YUVJ422P:
  601. case PIX_FMT_YUVJ444P:
  602. case PIX_FMT_YUVJ440P:
  603. h2 = (height + (1 << pinfo->y_chroma_shift) - 1) >> pinfo->y_chroma_shift;
  604. size2 = picture->linesize[1] * h2;
  605. picture->data[0] = ptr;
  606. picture->data[1] = picture->data[0] + size;
  607. picture->data[2] = picture->data[1] + size2;
  608. picture->data[3] = NULL;
  609. return size + 2 * size2;
  610. case PIX_FMT_YUVA420P:
  611. h2 = (height + (1 << pinfo->y_chroma_shift) - 1) >> pinfo->y_chroma_shift;
  612. size2 = picture->linesize[1] * h2;
  613. picture->data[0] = ptr;
  614. picture->data[1] = picture->data[0] + size;
  615. picture->data[2] = picture->data[1] + size2;
  616. picture->data[3] = picture->data[1] + size2 + size2;
  617. return 2 * size + 2 * size2;
  618. case PIX_FMT_NV12:
  619. case PIX_FMT_NV21:
  620. h2 = (height + (1 << pinfo->y_chroma_shift) - 1) >> pinfo->y_chroma_shift;
  621. size2 = picture->linesize[1] * h2 * 2;
  622. picture->data[0] = ptr;
  623. picture->data[1] = picture->data[0] + size;
  624. picture->data[2] = NULL;
  625. picture->data[3] = NULL;
  626. return size + 2 * size2;
  627. case PIX_FMT_RGB24:
  628. case PIX_FMT_BGR24:
  629. case PIX_FMT_RGB32:
  630. case PIX_FMT_BGR32:
  631. case PIX_FMT_RGB32_1:
  632. case PIX_FMT_BGR32_1:
  633. case PIX_FMT_RGB48BE:
  634. case PIX_FMT_RGB48LE:
  635. case PIX_FMT_GRAY16BE:
  636. case PIX_FMT_GRAY16LE:
  637. case PIX_FMT_BGR555:
  638. case PIX_FMT_BGR565:
  639. case PIX_FMT_RGB555:
  640. case PIX_FMT_RGB565:
  641. case PIX_FMT_YUYV422:
  642. case PIX_FMT_UYVY422:
  643. case PIX_FMT_UYYVYY411:
  644. case PIX_FMT_RGB4:
  645. case PIX_FMT_BGR4:
  646. case PIX_FMT_MONOWHITE:
  647. case PIX_FMT_MONOBLACK:
  648. picture->data[0] = ptr;
  649. picture->data[1] = NULL;
  650. picture->data[2] = NULL;
  651. picture->data[3] = NULL;
  652. return size;
  653. case PIX_FMT_PAL8:
  654. case PIX_FMT_RGB8:
  655. case PIX_FMT_BGR8:
  656. case PIX_FMT_RGB4_BYTE:
  657. case PIX_FMT_BGR4_BYTE:
  658. case PIX_FMT_GRAY8:
  659. size2 = (size + 3) & ~3;
  660. picture->data[0] = ptr;
  661. picture->data[1] = ptr + size2; /* palette is stored here as 256 32 bit words */
  662. picture->data[2] = NULL;
  663. picture->data[3] = NULL;
  664. return size2 + 256 * 4;
  665. default:
  666. picture->data[0] = NULL;
  667. picture->data[1] = NULL;
  668. picture->data[2] = NULL;
  669. picture->data[3] = NULL;
  670. return -1;
  671. }
  672. }
  673. int avpicture_fill(AVPicture *picture, uint8_t *ptr,
  674. int pix_fmt, int width, int height)
  675. {
  676. if(avcodec_check_dimensions(NULL, width, height))
  677. return -1;
  678. if (ff_fill_linesize(picture, pix_fmt, width))
  679. return -1;
  680. return ff_fill_pointer(picture, ptr, pix_fmt, height);
  681. }
  682. int avpicture_layout(const AVPicture* src, int pix_fmt, int width, int height,
  683. unsigned char *dest, int dest_size)
  684. {
  685. const PixFmtInfo* pf = &pix_fmt_info[pix_fmt];
  686. int i, j, w, h, data_planes;
  687. const unsigned char* s;
  688. int size = avpicture_get_size(pix_fmt, width, height);
  689. if (size > dest_size || size < 0)
  690. return -1;
  691. if (pf->pixel_type == FF_PIXEL_PACKED || pf->pixel_type == FF_PIXEL_PALETTE) {
  692. if (pix_fmt == PIX_FMT_YUYV422 ||
  693. pix_fmt == PIX_FMT_UYVY422 ||
  694. pix_fmt == PIX_FMT_BGR565 ||
  695. pix_fmt == PIX_FMT_BGR555 ||
  696. pix_fmt == PIX_FMT_RGB565 ||
  697. pix_fmt == PIX_FMT_RGB555)
  698. w = width * 2;
  699. else if (pix_fmt == PIX_FMT_UYYVYY411)
  700. w = width + width/2;
  701. else if (pix_fmt == PIX_FMT_PAL8)
  702. w = width;
  703. else
  704. w = width * (pf->depth * pf->nb_channels / 8);
  705. data_planes = 1;
  706. h = height;
  707. } else {
  708. data_planes = pf->nb_channels;
  709. w = (width*pf->depth + 7)/8;
  710. h = height;
  711. }
  712. for (i=0; i<data_planes; i++) {
  713. if (i == 1) {
  714. w = width >> pf->x_chroma_shift;
  715. h = height >> pf->y_chroma_shift;
  716. }
  717. s = src->data[i];
  718. for(j=0; j<h; j++) {
  719. memcpy(dest, s, w);
  720. dest += w;
  721. s += src->linesize[i];
  722. }
  723. }
  724. if (pf->pixel_type == FF_PIXEL_PALETTE)
  725. memcpy((unsigned char *)(((size_t)dest + 3) & ~3), src->data[1], 256 * 4);
  726. return size;
  727. }
  728. int avpicture_get_size(int pix_fmt, int width, int height)
  729. {
  730. AVPicture dummy_pict;
  731. return avpicture_fill(&dummy_pict, NULL, pix_fmt, width, height);
  732. }
  733. int avcodec_get_pix_fmt_loss(int dst_pix_fmt, int src_pix_fmt,
  734. int has_alpha)
  735. {
  736. const PixFmtInfo *pf, *ps;
  737. int loss;
  738. ps = &pix_fmt_info[src_pix_fmt];
  739. pf = &pix_fmt_info[dst_pix_fmt];
  740. /* compute loss */
  741. loss = 0;
  742. pf = &pix_fmt_info[dst_pix_fmt];
  743. if (pf->depth < ps->depth ||
  744. (dst_pix_fmt == PIX_FMT_RGB555 && src_pix_fmt == PIX_FMT_RGB565))
  745. loss |= FF_LOSS_DEPTH;
  746. if (pf->x_chroma_shift > ps->x_chroma_shift ||
  747. pf->y_chroma_shift > ps->y_chroma_shift)
  748. loss |= FF_LOSS_RESOLUTION;
  749. switch(pf->color_type) {
  750. case FF_COLOR_RGB:
  751. if (ps->color_type != FF_COLOR_RGB &&
  752. ps->color_type != FF_COLOR_GRAY)
  753. loss |= FF_LOSS_COLORSPACE;
  754. break;
  755. case FF_COLOR_GRAY:
  756. if (ps->color_type != FF_COLOR_GRAY)
  757. loss |= FF_LOSS_COLORSPACE;
  758. break;
  759. case FF_COLOR_YUV:
  760. if (ps->color_type != FF_COLOR_YUV)
  761. loss |= FF_LOSS_COLORSPACE;
  762. break;
  763. case FF_COLOR_YUV_JPEG:
  764. if (ps->color_type != FF_COLOR_YUV_JPEG &&
  765. ps->color_type != FF_COLOR_YUV &&
  766. ps->color_type != FF_COLOR_GRAY)
  767. loss |= FF_LOSS_COLORSPACE;
  768. break;
  769. default:
  770. /* fail safe test */
  771. if (ps->color_type != pf->color_type)
  772. loss |= FF_LOSS_COLORSPACE;
  773. break;
  774. }
  775. if (pf->color_type == FF_COLOR_GRAY &&
  776. ps->color_type != FF_COLOR_GRAY)
  777. loss |= FF_LOSS_CHROMA;
  778. if (!pf->is_alpha && (ps->is_alpha && has_alpha))
  779. loss |= FF_LOSS_ALPHA;
  780. if (pf->pixel_type == FF_PIXEL_PALETTE &&
  781. (ps->pixel_type != FF_PIXEL_PALETTE && ps->color_type != FF_COLOR_GRAY))
  782. loss |= FF_LOSS_COLORQUANT;
  783. return loss;
  784. }
  785. static int avg_bits_per_pixel(int pix_fmt)
  786. {
  787. int bits;
  788. const PixFmtInfo *pf;
  789. pf = &pix_fmt_info[pix_fmt];
  790. switch(pf->pixel_type) {
  791. case FF_PIXEL_PACKED:
  792. switch(pix_fmt) {
  793. case PIX_FMT_YUYV422:
  794. case PIX_FMT_UYVY422:
  795. case PIX_FMT_RGB565:
  796. case PIX_FMT_RGB555:
  797. case PIX_FMT_BGR565:
  798. case PIX_FMT_BGR555:
  799. bits = 16;
  800. break;
  801. case PIX_FMT_UYYVYY411:
  802. bits = 12;
  803. break;
  804. default:
  805. bits = pf->depth * pf->nb_channels;
  806. break;
  807. }
  808. break;
  809. case FF_PIXEL_PLANAR:
  810. if (pf->x_chroma_shift == 0 && pf->y_chroma_shift == 0) {
  811. bits = pf->depth * pf->nb_channels;
  812. } else {
  813. bits = pf->depth + ((2 * pf->depth) >>
  814. (pf->x_chroma_shift + pf->y_chroma_shift));
  815. }
  816. break;
  817. case FF_PIXEL_PALETTE:
  818. bits = 8;
  819. break;
  820. default:
  821. bits = -1;
  822. break;
  823. }
  824. return bits;
  825. }
  826. static int avcodec_find_best_pix_fmt1(int64_t pix_fmt_mask,
  827. int src_pix_fmt,
  828. int has_alpha,
  829. int loss_mask)
  830. {
  831. int dist, i, loss, min_dist, dst_pix_fmt;
  832. /* find exact color match with smallest size */
  833. dst_pix_fmt = -1;
  834. min_dist = 0x7fffffff;
  835. for(i = 0;i < PIX_FMT_NB; i++) {
  836. if (pix_fmt_mask & (1ULL << i)) {
  837. loss = avcodec_get_pix_fmt_loss(i, src_pix_fmt, has_alpha) & loss_mask;
  838. if (loss == 0) {
  839. dist = avg_bits_per_pixel(i);
  840. if (dist < min_dist) {
  841. min_dist = dist;
  842. dst_pix_fmt = i;
  843. }
  844. }
  845. }
  846. }
  847. return dst_pix_fmt;
  848. }
  849. int avcodec_find_best_pix_fmt(int64_t pix_fmt_mask, int src_pix_fmt,
  850. int has_alpha, int *loss_ptr)
  851. {
  852. int dst_pix_fmt, loss_mask, i;
  853. static const int loss_mask_order[] = {
  854. ~0, /* no loss first */
  855. ~FF_LOSS_ALPHA,
  856. ~FF_LOSS_RESOLUTION,
  857. ~(FF_LOSS_COLORSPACE | FF_LOSS_RESOLUTION),
  858. ~FF_LOSS_COLORQUANT,
  859. ~FF_LOSS_DEPTH,
  860. 0,
  861. };
  862. /* try with successive loss */
  863. i = 0;
  864. for(;;) {
  865. loss_mask = loss_mask_order[i++];
  866. dst_pix_fmt = avcodec_find_best_pix_fmt1(pix_fmt_mask, src_pix_fmt,
  867. has_alpha, loss_mask);
  868. if (dst_pix_fmt >= 0)
  869. goto found;
  870. if (loss_mask == 0)
  871. break;
  872. }
  873. return -1;
  874. found:
  875. if (loss_ptr)
  876. *loss_ptr = avcodec_get_pix_fmt_loss(dst_pix_fmt, src_pix_fmt, has_alpha);
  877. return dst_pix_fmt;
  878. }
  879. void ff_img_copy_plane(uint8_t *dst, int dst_wrap,
  880. const uint8_t *src, int src_wrap,
  881. int width, int height)
  882. {
  883. if((!dst) || (!src))
  884. return;
  885. for(;height > 0; height--) {
  886. memcpy(dst, src, width);
  887. dst += dst_wrap;
  888. src += src_wrap;
  889. }
  890. }
  891. int ff_get_plane_bytewidth(enum PixelFormat pix_fmt, int width, int plane)
  892. {
  893. int bits;
  894. const PixFmtInfo *pf = &pix_fmt_info[pix_fmt];
  895. pf = &pix_fmt_info[pix_fmt];
  896. switch(pf->pixel_type) {
  897. case FF_PIXEL_PACKED:
  898. switch(pix_fmt) {
  899. case PIX_FMT_YUYV422:
  900. case PIX_FMT_UYVY422:
  901. case PIX_FMT_RGB565:
  902. case PIX_FMT_RGB555:
  903. case PIX_FMT_BGR565:
  904. case PIX_FMT_BGR555:
  905. bits = 16;
  906. break;
  907. case PIX_FMT_UYYVYY411:
  908. bits = 12;
  909. break;
  910. default:
  911. bits = pf->depth * pf->nb_channels;
  912. break;
  913. }
  914. return (width * bits + 7) >> 3;
  915. break;
  916. case FF_PIXEL_PLANAR:
  917. if (plane == 1 || plane == 2)
  918. width= -((-width)>>pf->x_chroma_shift);
  919. return (width * pf->depth + 7) >> 3;
  920. break;
  921. case FF_PIXEL_PALETTE:
  922. if (plane == 0)
  923. return width;
  924. break;
  925. }
  926. return -1;
  927. }
  928. void av_picture_copy(AVPicture *dst, const AVPicture *src,
  929. int pix_fmt, int width, int height)
  930. {
  931. int i;
  932. const PixFmtInfo *pf = &pix_fmt_info[pix_fmt];
  933. pf = &pix_fmt_info[pix_fmt];
  934. switch(pf->pixel_type) {
  935. case FF_PIXEL_PACKED:
  936. case FF_PIXEL_PLANAR:
  937. for(i = 0; i < pf->nb_channels; i++) {
  938. int h;
  939. int bwidth = ff_get_plane_bytewidth(pix_fmt, width, i);
  940. h = height;
  941. if (i == 1 || i == 2) {
  942. h= -((-height)>>pf->y_chroma_shift);
  943. }
  944. ff_img_copy_plane(dst->data[i], dst->linesize[i],
  945. src->data[i], src->linesize[i],
  946. bwidth, h);
  947. }
  948. break;
  949. case FF_PIXEL_PALETTE:
  950. ff_img_copy_plane(dst->data[0], dst->linesize[0],
  951. src->data[0], src->linesize[0],
  952. width, height);
  953. /* copy the palette */
  954. ff_img_copy_plane(dst->data[1], dst->linesize[1],
  955. src->data[1], src->linesize[1],
  956. 4, 256);
  957. break;
  958. }
  959. }
  960. /* XXX: totally non optimized */
  961. static void yuyv422_to_yuv420p(AVPicture *dst, const AVPicture *src,
  962. int width, int height)
  963. {
  964. const uint8_t *p, *p1;
  965. uint8_t *lum, *cr, *cb, *lum1, *cr1, *cb1;
  966. int w;
  967. p1 = src->data[0];
  968. lum1 = dst->data[0];
  969. cb1 = dst->data[1];
  970. cr1 = dst->data[2];
  971. for(;height >= 1; height -= 2) {
  972. p = p1;
  973. lum = lum1;
  974. cb = cb1;
  975. cr = cr1;
  976. for(w = width; w >= 2; w -= 2) {
  977. lum[0] = p[0];
  978. cb[0] = p[1];
  979. lum[1] = p[2];
  980. cr[0] = p[3];
  981. p += 4;
  982. lum += 2;
  983. cb++;
  984. cr++;
  985. }
  986. if (w) {
  987. lum[0] = p[0];
  988. cb[0] = p[1];
  989. cr[0] = p[3];
  990. cb++;
  991. cr++;
  992. }
  993. p1 += src->linesize[0];
  994. lum1 += dst->linesize[0];
  995. if (height>1) {
  996. p = p1;
  997. lum = lum1;
  998. for(w = width; w >= 2; w -= 2) {
  999. lum[0] = p[0];
  1000. lum[1] = p[2];
  1001. p += 4;
  1002. lum += 2;
  1003. }
  1004. if (w) {
  1005. lum[0] = p[0];
  1006. }
  1007. p1 += src->linesize[0];
  1008. lum1 += dst->linesize[0];
  1009. }
  1010. cb1 += dst->linesize[1];
  1011. cr1 += dst->linesize[2];
  1012. }
  1013. }
  1014. static void uyvy422_to_yuv420p(AVPicture *dst, const AVPicture *src,
  1015. int width, int height)
  1016. {
  1017. const uint8_t *p, *p1;
  1018. uint8_t *lum, *cr, *cb, *lum1, *cr1, *cb1;
  1019. int w;
  1020. p1 = src->data[0];
  1021. lum1 = dst->data[0];
  1022. cb1 = dst->data[1];
  1023. cr1 = dst->data[2];
  1024. for(;height >= 1; height -= 2) {
  1025. p = p1;
  1026. lum = lum1;
  1027. cb = cb1;
  1028. cr = cr1;
  1029. for(w = width; w >= 2; w -= 2) {
  1030. lum[0] = p[1];
  1031. cb[0] = p[0];
  1032. lum[1] = p[3];
  1033. cr[0] = p[2];
  1034. p += 4;
  1035. lum += 2;
  1036. cb++;
  1037. cr++;
  1038. }
  1039. if (w) {
  1040. lum[0] = p[1];
  1041. cb[0] = p[0];
  1042. cr[0] = p[2];
  1043. cb++;
  1044. cr++;
  1045. }
  1046. p1 += src->linesize[0];
  1047. lum1 += dst->linesize[0];
  1048. if (height>1) {
  1049. p = p1;
  1050. lum = lum1;
  1051. for(w = width; w >= 2; w -= 2) {
  1052. lum[0] = p[1];
  1053. lum[1] = p[3];
  1054. p += 4;
  1055. lum += 2;
  1056. }
  1057. if (w) {
  1058. lum[0] = p[1];
  1059. }
  1060. p1 += src->linesize[0];
  1061. lum1 += dst->linesize[0];
  1062. }
  1063. cb1 += dst->linesize[1];
  1064. cr1 += dst->linesize[2];
  1065. }
  1066. }
  1067. static void uyvy422_to_yuv422p(AVPicture *dst, const AVPicture *src,
  1068. int width, int height)
  1069. {
  1070. const uint8_t *p, *p1;
  1071. uint8_t *lum, *cr, *cb, *lum1, *cr1, *cb1;
  1072. int w;
  1073. p1 = src->data[0];
  1074. lum1 = dst->data[0];
  1075. cb1 = dst->data[1];
  1076. cr1 = dst->data[2];
  1077. for(;height > 0; height--) {
  1078. p = p1;
  1079. lum = lum1;
  1080. cb = cb1;
  1081. cr = cr1;
  1082. for(w = width; w >= 2; w -= 2) {
  1083. lum[0] = p[1];
  1084. cb[0] = p[0];
  1085. lum[1] = p[3];
  1086. cr[0] = p[2];
  1087. p += 4;
  1088. lum += 2;
  1089. cb++;
  1090. cr++;
  1091. }
  1092. p1 += src->linesize[0];
  1093. lum1 += dst->linesize[0];
  1094. cb1 += dst->linesize[1];
  1095. cr1 += dst->linesize[2];
  1096. }
  1097. }
  1098. static void yuyv422_to_yuv422p(AVPicture *dst, const AVPicture *src,
  1099. int width, int height)
  1100. {
  1101. const uint8_t *p, *p1;
  1102. uint8_t *lum, *cr, *cb, *lum1, *cr1, *cb1;
  1103. int w;
  1104. p1 = src->data[0];
  1105. lum1 = dst->data[0];
  1106. cb1 = dst->data[1];
  1107. cr1 = dst->data[2];
  1108. for(;height > 0; height--) {
  1109. p = p1;
  1110. lum = lum1;
  1111. cb = cb1;
  1112. cr = cr1;
  1113. for(w = width; w >= 2; w -= 2) {
  1114. lum[0] = p[0];
  1115. cb[0] = p[1];
  1116. lum[1] = p[2];
  1117. cr[0] = p[3];
  1118. p += 4;
  1119. lum += 2;
  1120. cb++;
  1121. cr++;
  1122. }
  1123. p1 += src->linesize[0];
  1124. lum1 += dst->linesize[0];
  1125. cb1 += dst->linesize[1];
  1126. cr1 += dst->linesize[2];
  1127. }
  1128. }
  1129. static void yuv422p_to_yuyv422(AVPicture *dst, const AVPicture *src,
  1130. int width, int height)
  1131. {
  1132. uint8_t *p, *p1;
  1133. const uint8_t *lum, *cr, *cb, *lum1, *cr1, *cb1;
  1134. int w;
  1135. p1 = dst->data[0];
  1136. lum1 = src->data[0];
  1137. cb1 = src->data[1];
  1138. cr1 = src->data[2];
  1139. for(;height > 0; height--) {
  1140. p = p1;
  1141. lum = lum1;
  1142. cb = cb1;
  1143. cr = cr1;
  1144. for(w = width; w >= 2; w -= 2) {
  1145. p[0] = lum[0];
  1146. p[1] = cb[0];
  1147. p[2] = lum[1];
  1148. p[3] = cr[0];
  1149. p += 4;
  1150. lum += 2;
  1151. cb++;
  1152. cr++;
  1153. }
  1154. p1 += dst->linesize[0];
  1155. lum1 += src->linesize[0];
  1156. cb1 += src->linesize[1];
  1157. cr1 += src->linesize[2];
  1158. }
  1159. }
  1160. static void yuv422p_to_uyvy422(AVPicture *dst, const AVPicture *src,
  1161. int width, int height)
  1162. {
  1163. uint8_t *p, *p1;
  1164. const uint8_t *lum, *cr, *cb, *lum1, *cr1, *cb1;
  1165. int w;
  1166. p1 = dst->data[0];
  1167. lum1 = src->data[0];
  1168. cb1 = src->data[1];
  1169. cr1 = src->data[2];
  1170. for(;height > 0; height--) {
  1171. p = p1;
  1172. lum = lum1;
  1173. cb = cb1;
  1174. cr = cr1;
  1175. for(w = width; w >= 2; w -= 2) {
  1176. p[1] = lum[0];
  1177. p[0] = cb[0];
  1178. p[3] = lum[1];
  1179. p[2] = cr[0];
  1180. p += 4;
  1181. lum += 2;
  1182. cb++;
  1183. cr++;
  1184. }
  1185. p1 += dst->linesize[0];
  1186. lum1 += src->linesize[0];
  1187. cb1 += src->linesize[1];
  1188. cr1 += src->linesize[2];
  1189. }
  1190. }
  1191. static void uyyvyy411_to_yuv411p(AVPicture *dst, const AVPicture *src,
  1192. int width, int height)
  1193. {
  1194. const uint8_t *p, *p1;
  1195. uint8_t *lum, *cr, *cb, *lum1, *cr1, *cb1;
  1196. int w;
  1197. p1 = src->data[0];
  1198. lum1 = dst->data[0];
  1199. cb1 = dst->data[1];
  1200. cr1 = dst->data[2];
  1201. for(;height > 0; height--) {
  1202. p = p1;
  1203. lum = lum1;
  1204. cb = cb1;
  1205. cr = cr1;
  1206. for(w = width; w >= 4; w -= 4) {
  1207. cb[0] = p[0];
  1208. lum[0] = p[1];
  1209. lum[1] = p[2];
  1210. cr[0] = p[3];
  1211. lum[2] = p[4];
  1212. lum[3] = p[5];
  1213. p += 6;
  1214. lum += 4;
  1215. cb++;
  1216. cr++;
  1217. }
  1218. p1 += src->linesize[0];
  1219. lum1 += dst->linesize[0];
  1220. cb1 += dst->linesize[1];
  1221. cr1 += dst->linesize[2];
  1222. }
  1223. }
  1224. static void yuv420p_to_yuyv422(AVPicture *dst, const AVPicture *src,
  1225. int width, int height)
  1226. {
  1227. int w, h;
  1228. uint8_t *line1, *line2, *linesrc = dst->data[0];
  1229. uint8_t *lum1, *lum2, *lumsrc = src->data[0];
  1230. uint8_t *cb1, *cb2 = src->data[1];
  1231. uint8_t *cr1, *cr2 = src->data[2];
  1232. for(h = height / 2; h--;) {
  1233. line1 = linesrc;
  1234. line2 = linesrc + dst->linesize[0];
  1235. lum1 = lumsrc;
  1236. lum2 = lumsrc + src->linesize[0];
  1237. cb1 = cb2;
  1238. cr1 = cr2;
  1239. for(w = width / 2; w--;) {
  1240. *line1++ = *lum1++; *line2++ = *lum2++;
  1241. *line1++ = *line2++ = *cb1++;
  1242. *line1++ = *lum1++; *line2++ = *lum2++;
  1243. *line1++ = *line2++ = *cr1++;
  1244. }
  1245. linesrc += dst->linesize[0] * 2;
  1246. lumsrc += src->linesize[0] * 2;
  1247. cb2 += src->linesize[1];
  1248. cr2 += src->linesize[2];
  1249. }
  1250. }
  1251. static void yuv420p_to_uyvy422(AVPicture *dst, const AVPicture *src,
  1252. int width, int height)
  1253. {
  1254. int w, h;
  1255. uint8_t *line1, *line2, *linesrc = dst->data[0];
  1256. uint8_t *lum1, *lum2, *lumsrc = src->data[0];
  1257. uint8_t *cb1, *cb2 = src->data[1];
  1258. uint8_t *cr1, *cr2 = src->data[2];
  1259. for(h = height / 2; h--;) {
  1260. line1 = linesrc;
  1261. line2 = linesrc + dst->linesize[0];
  1262. lum1 = lumsrc;
  1263. lum2 = lumsrc + src->linesize[0];
  1264. cb1 = cb2;
  1265. cr1 = cr2;
  1266. for(w = width / 2; w--;) {
  1267. *line1++ = *line2++ = *cb1++;
  1268. *line1++ = *lum1++; *line2++ = *lum2++;
  1269. *line1++ = *line2++ = *cr1++;
  1270. *line1++ = *lum1++; *line2++ = *lum2++;
  1271. }
  1272. linesrc += dst->linesize[0] * 2;
  1273. lumsrc += src->linesize[0] * 2;
  1274. cb2 += src->linesize[1];
  1275. cr2 += src->linesize[2];
  1276. }
  1277. }
  1278. /* 2x2 -> 1x1 */
  1279. void ff_shrink22(uint8_t *dst, int dst_wrap,
  1280. const uint8_t *src, int src_wrap,
  1281. int width, int height)
  1282. {
  1283. int w;
  1284. const uint8_t *s1, *s2;
  1285. uint8_t *d;
  1286. for(;height > 0; height--) {
  1287. s1 = src;
  1288. s2 = s1 + src_wrap;
  1289. d = dst;
  1290. for(w = width;w >= 4; w-=4) {
  1291. d[0] = (s1[0] + s1[1] + s2[0] + s2[1] + 2) >> 2;
  1292. d[1] = (s1[2] + s1[3] + s2[2] + s2[3] + 2) >> 2;
  1293. d[2] = (s1[4] + s1[5] + s2[4] + s2[5] + 2) >> 2;
  1294. d[3] = (s1[6] + s1[7] + s2[6] + s2[7] + 2) >> 2;
  1295. s1 += 8;
  1296. s2 += 8;
  1297. d += 4;
  1298. }
  1299. for(;w > 0; w--) {
  1300. d[0] = (s1[0] + s1[1] + s2[0] + s2[1] + 2) >> 2;
  1301. s1 += 2;
  1302. s2 += 2;
  1303. d++;
  1304. }
  1305. src += 2 * src_wrap;
  1306. dst += dst_wrap;
  1307. }
  1308. }
  1309. /* 4x4 -> 1x1 */
  1310. void ff_shrink44(uint8_t *dst, int dst_wrap,
  1311. const uint8_t *src, int src_wrap,
  1312. int width, int height)
  1313. {
  1314. int w;
  1315. const uint8_t *s1, *s2, *s3, *s4;
  1316. uint8_t *d;
  1317. for(;height > 0; height--) {
  1318. s1 = src;
  1319. s2 = s1 + src_wrap;
  1320. s3 = s2 + src_wrap;
  1321. s4 = s3 + src_wrap;
  1322. d = dst;
  1323. for(w = width;w > 0; w--) {
  1324. d[0] = (s1[0] + s1[1] + s1[2] + s1[3] +
  1325. s2[0] + s2[1] + s2[2] + s2[3] +
  1326. s3[0] + s3[1] + s3[2] + s3[3] +
  1327. s4[0] + s4[1] + s4[2] + s4[3] + 8) >> 4;
  1328. s1 += 4;
  1329. s2 += 4;
  1330. s3 += 4;
  1331. s4 += 4;
  1332. d++;
  1333. }
  1334. src += 4 * src_wrap;
  1335. dst += dst_wrap;
  1336. }
  1337. }
  1338. /* 8x8 -> 1x1 */
  1339. void ff_shrink88(uint8_t *dst, int dst_wrap,
  1340. const uint8_t *src, int src_wrap,
  1341. int width, int height)
  1342. {
  1343. int w, i;
  1344. for(;height > 0; height--) {
  1345. for(w = width;w > 0; w--) {
  1346. int tmp=0;
  1347. for(i=0; i<8; i++){
  1348. tmp += src[0] + src[1] + src[2] + src[3] + src[4] + src[5] + src[6] + src[7];
  1349. src += src_wrap;
  1350. }
  1351. *(dst++) = (tmp + 32)>>6;
  1352. src += 8 - 8*src_wrap;
  1353. }
  1354. src += 8*src_wrap - 8*width;
  1355. dst += dst_wrap - width;
  1356. }
  1357. }
  1358. /* XXX: add jpeg quantize code */
  1359. #define TRANSP_INDEX (6*6*6)
  1360. /* this is maybe slow, but allows for extensions */
  1361. static inline unsigned char gif_clut_index(uint8_t r, uint8_t g, uint8_t b)
  1362. {
  1363. return (((r) / 47) % 6) * 6 * 6 + (((g) / 47) % 6) * 6 + (((b) / 47) % 6);
  1364. }
  1365. static void build_rgb_palette(uint8_t *palette, int has_alpha)
  1366. {
  1367. uint32_t *pal;
  1368. static const uint8_t pal_value[6] = { 0x00, 0x33, 0x66, 0x99, 0xcc, 0xff };
  1369. int i, r, g, b;
  1370. pal = (uint32_t *)palette;
  1371. i = 0;
  1372. for(r = 0; r < 6; r++) {
  1373. for(g = 0; g < 6; g++) {
  1374. for(b = 0; b < 6; b++) {
  1375. pal[i++] = (0xff << 24) | (pal_value[r] << 16) |
  1376. (pal_value[g] << 8) | pal_value[b];
  1377. }
  1378. }
  1379. }
  1380. if (has_alpha)
  1381. pal[i++] = 0;
  1382. while (i < 256)
  1383. pal[i++] = 0xff000000;
  1384. }
  1385. /* copy bit n to bits 0 ... n - 1 */
  1386. static inline unsigned int bitcopy_n(unsigned int a, int n)
  1387. {
  1388. int mask;
  1389. mask = (1 << n) - 1;
  1390. return (a & (0xff & ~mask)) | ((-((a >> n) & 1)) & mask);
  1391. }
  1392. /* rgb555 handling */
  1393. #define RGB_NAME rgb555
  1394. #define RGB_IN(r, g, b, s)\
  1395. {\
  1396. unsigned int v = ((const uint16_t *)(s))[0];\
  1397. r = bitcopy_n(v >> (10 - 3), 3);\
  1398. g = bitcopy_n(v >> (5 - 3), 3);\
  1399. b = bitcopy_n(v << 3, 3);\
  1400. }
  1401. #define RGB_OUT(d, r, g, b)\
  1402. {\
  1403. ((uint16_t *)(d))[0] = ((r >> 3) << 10) | ((g >> 3) << 5) | (b >> 3);\
  1404. }
  1405. #define BPP 2
  1406. #include "imgconvert_template.c"
  1407. /* rgb565 handling */
  1408. #define RGB_NAME rgb565
  1409. #define RGB_IN(r, g, b, s)\
  1410. {\
  1411. unsigned int v = ((const uint16_t *)(s))[0];\
  1412. r = bitcopy_n(v >> (11 - 3), 3);\
  1413. g = bitcopy_n(v >> (5 - 2), 2);\
  1414. b = bitcopy_n(v << 3, 3);\
  1415. }
  1416. #define RGB_OUT(d, r, g, b)\
  1417. {\
  1418. ((uint16_t *)(d))[0] = ((r >> 3) << 11) | ((g >> 2) << 5) | (b >> 3);\
  1419. }
  1420. #define BPP 2
  1421. #include "imgconvert_template.c"
  1422. /* bgr24 handling */
  1423. #define RGB_NAME bgr24
  1424. #define RGB_IN(r, g, b, s)\
  1425. {\
  1426. b = (s)[0];\
  1427. g = (s)[1];\
  1428. r = (s)[2];\
  1429. }
  1430. #define RGB_OUT(d, r, g, b)\
  1431. {\
  1432. (d)[0] = b;\
  1433. (d)[1] = g;\
  1434. (d)[2] = r;\
  1435. }
  1436. #define BPP 3
  1437. #include "imgconvert_template.c"
  1438. #undef RGB_IN
  1439. #undef RGB_OUT
  1440. #undef BPP
  1441. /* rgb24 handling */
  1442. #define RGB_NAME rgb24
  1443. #define FMT_RGB24
  1444. #define RGB_IN(r, g, b, s)\
  1445. {\
  1446. r = (s)[0];\
  1447. g = (s)[1];\
  1448. b = (s)[2];\
  1449. }
  1450. #define RGB_OUT(d, r, g, b)\
  1451. {\
  1452. (d)[0] = r;\
  1453. (d)[1] = g;\
  1454. (d)[2] = b;\
  1455. }
  1456. #define BPP 3
  1457. #include "imgconvert_template.c"
  1458. /* rgb32 handling */
  1459. #define RGB_NAME rgb32
  1460. #define FMT_RGB32
  1461. #define RGB_IN(r, g, b, s)\
  1462. {\
  1463. unsigned int v = ((const uint32_t *)(s))[0];\
  1464. r = (v >> 16) & 0xff;\
  1465. g = (v >> 8) & 0xff;\
  1466. b = v & 0xff;\
  1467. }
  1468. #define RGBA_IN(r, g, b, a, s)\
  1469. {\
  1470. unsigned int v = ((const uint32_t *)(s))[0];\
  1471. a = (v >> 24) & 0xff;\
  1472. r = (v >> 16) & 0xff;\
  1473. g = (v >> 8) & 0xff;\
  1474. b = v & 0xff;\
  1475. }
  1476. #define RGBA_OUT(d, r, g, b, a)\
  1477. {\
  1478. ((uint32_t *)(d))[0] = (a << 24) | (r << 16) | (g << 8) | b;\
  1479. }
  1480. #define BPP 4
  1481. #include "imgconvert_template.c"
  1482. static void mono_to_gray(AVPicture *dst, const AVPicture *src,
  1483. int width, int height, int xor_mask)
  1484. {
  1485. const unsigned char *p;
  1486. unsigned char *q;
  1487. int v, dst_wrap, src_wrap;
  1488. int y, w;
  1489. p = src->data[0];
  1490. src_wrap = src->linesize[0] - ((width + 7) >> 3);
  1491. q = dst->data[0];
  1492. dst_wrap = dst->linesize[0] - width;
  1493. for(y=0;y<height;y++) {
  1494. w = width;
  1495. while (w >= 8) {
  1496. v = *p++ ^ xor_mask;
  1497. q[0] = -(v >> 7);
  1498. q[1] = -((v >> 6) & 1);
  1499. q[2] = -((v >> 5) & 1);
  1500. q[3] = -((v >> 4) & 1);
  1501. q[4] = -((v >> 3) & 1);
  1502. q[5] = -((v >> 2) & 1);
  1503. q[6] = -((v >> 1) & 1);
  1504. q[7] = -((v >> 0) & 1);
  1505. w -= 8;
  1506. q += 8;
  1507. }
  1508. if (w > 0) {
  1509. v = *p++ ^ xor_mask;
  1510. do {
  1511. q[0] = -((v >> 7) & 1);
  1512. q++;
  1513. v <<= 1;
  1514. } while (--w);
  1515. }
  1516. p += src_wrap;
  1517. q += dst_wrap;
  1518. }
  1519. }
  1520. static void monowhite_to_gray(AVPicture *dst, const AVPicture *src,
  1521. int width, int height)
  1522. {
  1523. mono_to_gray(dst, src, width, height, 0xff);
  1524. }
  1525. static void monoblack_to_gray(AVPicture *dst, const AVPicture *src,
  1526. int width, int height)
  1527. {
  1528. mono_to_gray(dst, src, width, height, 0x00);
  1529. }
  1530. static void gray_to_mono(AVPicture *dst, const AVPicture *src,
  1531. int width, int height, int xor_mask)
  1532. {
  1533. int n;
  1534. const uint8_t *s;
  1535. uint8_t *d;
  1536. int j, b, v, n1, src_wrap, dst_wrap, y;
  1537. s = src->data[0];
  1538. src_wrap = src->linesize[0] - width;
  1539. d = dst->data[0];
  1540. dst_wrap = dst->linesize[0] - ((width + 7) >> 3);
  1541. for(y=0;y<height;y++) {
  1542. n = width;
  1543. while (n >= 8) {
  1544. v = 0;
  1545. for(j=0;j<8;j++) {
  1546. b = s[0];
  1547. s++;
  1548. v = (v << 1) | (b >> 7);
  1549. }
  1550. d[0] = v ^ xor_mask;
  1551. d++;
  1552. n -= 8;
  1553. }
  1554. if (n > 0) {
  1555. n1 = n;
  1556. v = 0;
  1557. while (n > 0) {
  1558. b = s[0];
  1559. s++;
  1560. v = (v << 1) | (b >> 7);
  1561. n--;
  1562. }
  1563. d[0] = (v << (8 - (n1 & 7))) ^ xor_mask;
  1564. d++;
  1565. }
  1566. s += src_wrap;
  1567. d += dst_wrap;
  1568. }
  1569. }
  1570. static void gray_to_monowhite(AVPicture *dst, const AVPicture *src,
  1571. int width, int height)
  1572. {
  1573. gray_to_mono(dst, src, width, height, 0xff);
  1574. }
  1575. static void gray_to_monoblack(AVPicture *dst, const AVPicture *src,
  1576. int width, int height)
  1577. {
  1578. gray_to_mono(dst, src, width, height, 0x00);
  1579. }
  1580. static void gray_to_gray16(AVPicture *dst, const AVPicture *src,
  1581. int width, int height)
  1582. {
  1583. int x, y, src_wrap, dst_wrap;
  1584. uint8_t *s, *d;
  1585. s = src->data[0];
  1586. src_wrap = src->linesize[0] - width;
  1587. d = dst->data[0];
  1588. dst_wrap = dst->linesize[0] - width * 2;
  1589. for(y=0; y<height; y++){
  1590. for(x=0; x<width; x++){
  1591. *d++ = *s;
  1592. *d++ = *s++;
  1593. }
  1594. s += src_wrap;
  1595. d += dst_wrap;
  1596. }
  1597. }
  1598. static void gray16_to_gray(AVPicture *dst, const AVPicture *src,
  1599. int width, int height)
  1600. {
  1601. int x, y, src_wrap, dst_wrap;
  1602. uint8_t *s, *d;
  1603. s = src->data[0];
  1604. src_wrap = src->linesize[0] - width * 2;
  1605. d = dst->data[0];
  1606. dst_wrap = dst->linesize[0] - width;
  1607. for(y=0; y<height; y++){
  1608. for(x=0; x<width; x++){
  1609. *d++ = *s;
  1610. s += 2;
  1611. }
  1612. s += src_wrap;
  1613. d += dst_wrap;
  1614. }
  1615. }
  1616. static void gray16be_to_gray(AVPicture *dst, const AVPicture *src,
  1617. int width, int height)
  1618. {
  1619. gray16_to_gray(dst, src, width, height);
  1620. }
  1621. static void gray16le_to_gray(AVPicture *dst, const AVPicture *src,
  1622. int width, int height)
  1623. {
  1624. AVPicture tmpsrc = *src;
  1625. tmpsrc.data[0]++;
  1626. gray16_to_gray(dst, &tmpsrc, width, height);
  1627. }
  1628. static void gray16_to_gray16(AVPicture *dst, const AVPicture *src,
  1629. int width, int height)
  1630. {
  1631. int x, y, src_wrap, dst_wrap;
  1632. uint16_t *s, *d;
  1633. s = (uint16_t*)src->data[0];
  1634. src_wrap = (src->linesize[0] - width * 2)/2;
  1635. d = (uint16_t*)dst->data[0];
  1636. dst_wrap = (dst->linesize[0] - width * 2)/2;
  1637. for(y=0; y<height; y++){
  1638. for(x=0; x<width; x++){
  1639. *d++ = bswap_16(*s++);
  1640. }
  1641. s += src_wrap;
  1642. d += dst_wrap;
  1643. }
  1644. }
  1645. typedef struct ConvertEntry {
  1646. void (*convert)(AVPicture *dst,
  1647. const AVPicture *src, int width, int height);
  1648. } ConvertEntry;
  1649. /* Add each new conversion function in this table. In order to be able
  1650. to convert from any format to any format, the following constraints
  1651. must be satisfied:
  1652. - all FF_COLOR_RGB formats must convert to and from PIX_FMT_RGB24
  1653. - all FF_COLOR_GRAY formats must convert to and from PIX_FMT_GRAY8
  1654. - all FF_COLOR_RGB formats with alpha must convert to and from PIX_FMT_RGB32
  1655. - PIX_FMT_YUV444P and PIX_FMT_YUVJ444P must convert to and from
  1656. PIX_FMT_RGB24.
  1657. - PIX_FMT_422 must convert to and from PIX_FMT_422P.
  1658. The other conversion functions are just optimizations for common cases.
  1659. */
  1660. static const ConvertEntry convert_table[PIX_FMT_NB][PIX_FMT_NB] = {
  1661. [PIX_FMT_YUV420P] = {
  1662. [PIX_FMT_YUYV422] = {
  1663. .convert = yuv420p_to_yuyv422,
  1664. },
  1665. [PIX_FMT_RGB555] = {
  1666. .convert = yuv420p_to_rgb555
  1667. },
  1668. [PIX_FMT_RGB565] = {
  1669. .convert = yuv420p_to_rgb565
  1670. },
  1671. [PIX_FMT_BGR24] = {
  1672. .convert = yuv420p_to_bgr24
  1673. },
  1674. [PIX_FMT_RGB24] = {
  1675. .convert = yuv420p_to_rgb24
  1676. },
  1677. [PIX_FMT_RGB32] = {
  1678. .convert = yuv420p_to_rgb32
  1679. },
  1680. [PIX_FMT_UYVY422] = {
  1681. .convert = yuv420p_to_uyvy422,
  1682. },
  1683. },
  1684. [PIX_FMT_YUV422P] = {
  1685. [PIX_FMT_YUYV422] = {
  1686. .convert = yuv422p_to_yuyv422,
  1687. },
  1688. [PIX_FMT_UYVY422] = {
  1689. .convert = yuv422p_to_uyvy422,
  1690. },
  1691. },
  1692. [PIX_FMT_YUV444P] = {
  1693. [PIX_FMT_RGB24] = {
  1694. .convert = yuv444p_to_rgb24
  1695. },
  1696. },
  1697. [PIX_FMT_YUVJ420P] = {
  1698. [PIX_FMT_RGB555] = {
  1699. .convert = yuvj420p_to_rgb555
  1700. },
  1701. [PIX_FMT_RGB565] = {
  1702. .convert = yuvj420p_to_rgb565
  1703. },
  1704. [PIX_FMT_BGR24] = {
  1705. .convert = yuvj420p_to_bgr24
  1706. },
  1707. [PIX_FMT_RGB24] = {
  1708. .convert = yuvj420p_to_rgb24
  1709. },
  1710. [PIX_FMT_RGB32] = {
  1711. .convert = yuvj420p_to_rgb32
  1712. },
  1713. },
  1714. [PIX_FMT_YUVJ444P] = {
  1715. [PIX_FMT_RGB24] = {
  1716. .convert = yuvj444p_to_rgb24
  1717. },
  1718. },
  1719. [PIX_FMT_YUYV422] = {
  1720. [PIX_FMT_YUV420P] = {
  1721. .convert = yuyv422_to_yuv420p,
  1722. },
  1723. [PIX_FMT_YUV422P] = {
  1724. .convert = yuyv422_to_yuv422p,
  1725. },
  1726. },
  1727. [PIX_FMT_UYVY422] = {
  1728. [PIX_FMT_YUV420P] = {
  1729. .convert = uyvy422_to_yuv420p,
  1730. },
  1731. [PIX_FMT_YUV422P] = {
  1732. .convert = uyvy422_to_yuv422p,
  1733. },
  1734. },
  1735. [PIX_FMT_RGB24] = {
  1736. [PIX_FMT_YUV420P] = {
  1737. .convert = rgb24_to_yuv420p
  1738. },
  1739. [PIX_FMT_RGB565] = {
  1740. .convert = rgb24_to_rgb565
  1741. },
  1742. [PIX_FMT_RGB555] = {
  1743. .convert = rgb24_to_rgb555
  1744. },
  1745. [PIX_FMT_RGB32] = {
  1746. .convert = rgb24_to_rgb32
  1747. },
  1748. [PIX_FMT_BGR24] = {
  1749. .convert = rgb24_to_bgr24
  1750. },
  1751. [PIX_FMT_GRAY8] = {
  1752. .convert = rgb24_to_gray
  1753. },
  1754. [PIX_FMT_PAL8] = {
  1755. .convert = rgb24_to_pal8
  1756. },
  1757. [PIX_FMT_YUV444P] = {
  1758. .convert = rgb24_to_yuv444p
  1759. },
  1760. [PIX_FMT_YUVJ420P] = {
  1761. .convert = rgb24_to_yuvj420p
  1762. },
  1763. [PIX_FMT_YUVJ444P] = {
  1764. .convert = rgb24_to_yuvj444p
  1765. },
  1766. },
  1767. [PIX_FMT_RGB32] = {
  1768. [PIX_FMT_RGB24] = {
  1769. .convert = rgb32_to_rgb24
  1770. },
  1771. [PIX_FMT_BGR24] = {
  1772. .convert = rgb32_to_bgr24
  1773. },
  1774. [PIX_FMT_RGB565] = {
  1775. .convert = rgb32_to_rgb565
  1776. },
  1777. [PIX_FMT_RGB555] = {
  1778. .convert = rgb32_to_rgb555
  1779. },
  1780. [PIX_FMT_PAL8] = {
  1781. .convert = rgb32_to_pal8
  1782. },
  1783. [PIX_FMT_YUV420P] = {
  1784. .convert = rgb32_to_yuv420p
  1785. },
  1786. [PIX_FMT_GRAY8] = {
  1787. .convert = rgb32_to_gray
  1788. },
  1789. },
  1790. [PIX_FMT_BGR24] = {
  1791. [PIX_FMT_RGB32] = {
  1792. .convert = bgr24_to_rgb32
  1793. },
  1794. [PIX_FMT_RGB24] = {
  1795. .convert = bgr24_to_rgb24
  1796. },
  1797. [PIX_FMT_YUV420P] = {
  1798. .convert = bgr24_to_yuv420p
  1799. },
  1800. [PIX_FMT_GRAY8] = {
  1801. .convert = bgr24_to_gray
  1802. },
  1803. },
  1804. [PIX_FMT_RGB555] = {
  1805. [PIX_FMT_RGB24] = {
  1806. .convert = rgb555_to_rgb24
  1807. },
  1808. [PIX_FMT_RGB32] = {
  1809. .convert = rgb555_to_rgb32
  1810. },
  1811. [PIX_FMT_YUV420P] = {
  1812. .convert = rgb555_to_yuv420p
  1813. },
  1814. [PIX_FMT_GRAY8] = {
  1815. .convert = rgb555_to_gray
  1816. },
  1817. },
  1818. [PIX_FMT_RGB565] = {
  1819. [PIX_FMT_RGB32] = {
  1820. .convert = rgb565_to_rgb32
  1821. },
  1822. [PIX_FMT_RGB24] = {
  1823. .convert = rgb565_to_rgb24
  1824. },
  1825. [PIX_FMT_YUV420P] = {
  1826. .convert = rgb565_to_yuv420p
  1827. },
  1828. [PIX_FMT_GRAY8] = {
  1829. .convert = rgb565_to_gray
  1830. },
  1831. },
  1832. [PIX_FMT_GRAY16BE] = {
  1833. [PIX_FMT_GRAY8] = {
  1834. .convert = gray16be_to_gray
  1835. },
  1836. [PIX_FMT_GRAY16LE] = {
  1837. .convert = gray16_to_gray16
  1838. },
  1839. },
  1840. [PIX_FMT_GRAY16LE] = {
  1841. [PIX_FMT_GRAY8] = {
  1842. .convert = gray16le_to_gray
  1843. },
  1844. [PIX_FMT_GRAY16BE] = {
  1845. .convert = gray16_to_gray16
  1846. },
  1847. },
  1848. [PIX_FMT_GRAY8] = {
  1849. [PIX_FMT_RGB555] = {
  1850. .convert = gray_to_rgb555
  1851. },
  1852. [PIX_FMT_RGB565] = {
  1853. .convert = gray_to_rgb565
  1854. },
  1855. [PIX_FMT_RGB24] = {
  1856. .convert = gray_to_rgb24
  1857. },
  1858. [PIX_FMT_BGR24] = {
  1859. .convert = gray_to_bgr24
  1860. },
  1861. [PIX_FMT_RGB32] = {
  1862. .convert = gray_to_rgb32
  1863. },
  1864. [PIX_FMT_MONOWHITE] = {
  1865. .convert = gray_to_monowhite
  1866. },
  1867. [PIX_FMT_MONOBLACK] = {
  1868. .convert = gray_to_monoblack
  1869. },
  1870. [PIX_FMT_GRAY16LE] = {
  1871. .convert = gray_to_gray16
  1872. },
  1873. [PIX_FMT_GRAY16BE] = {
  1874. .convert = gray_to_gray16
  1875. },
  1876. },
  1877. [PIX_FMT_MONOWHITE] = {
  1878. [PIX_FMT_GRAY8] = {
  1879. .convert = monowhite_to_gray
  1880. },
  1881. },
  1882. [PIX_FMT_MONOBLACK] = {
  1883. [PIX_FMT_GRAY8] = {
  1884. .convert = monoblack_to_gray
  1885. },
  1886. },
  1887. [PIX_FMT_PAL8] = {
  1888. [PIX_FMT_RGB555] = {
  1889. .convert = pal8_to_rgb555
  1890. },
  1891. [PIX_FMT_RGB565] = {
  1892. .convert = pal8_to_rgb565
  1893. },
  1894. [PIX_FMT_BGR24] = {
  1895. .convert = pal8_to_bgr24
  1896. },
  1897. [PIX_FMT_RGB24] = {
  1898. .convert = pal8_to_rgb24
  1899. },
  1900. [PIX_FMT_RGB32] = {
  1901. .convert = pal8_to_rgb32
  1902. },
  1903. },
  1904. [PIX_FMT_UYYVYY411] = {
  1905. [PIX_FMT_YUV411P] = {
  1906. .convert = uyyvyy411_to_yuv411p,
  1907. },
  1908. },
  1909. };
  1910. int avpicture_alloc(AVPicture *picture,
  1911. int pix_fmt, int width, int height)
  1912. {
  1913. int size;
  1914. void *ptr;
  1915. size = avpicture_get_size(pix_fmt, width, height);
  1916. if(size<0)
  1917. goto fail;
  1918. ptr = av_malloc(size);
  1919. if (!ptr)
  1920. goto fail;
  1921. avpicture_fill(picture, ptr, pix_fmt, width, height);
  1922. if(picture->data[1] && !picture->data[2])
  1923. ff_set_systematic_pal((uint32_t*)picture->data[1], pix_fmt);
  1924. return 0;
  1925. fail:
  1926. memset(picture, 0, sizeof(AVPicture));
  1927. return -1;
  1928. }
  1929. void avpicture_free(AVPicture *picture)
  1930. {
  1931. av_free(picture->data[0]);
  1932. }
  1933. /* return true if yuv planar */
  1934. static inline int is_yuv_planar(const PixFmtInfo *ps)
  1935. {
  1936. return (ps->color_type == FF_COLOR_YUV ||
  1937. ps->color_type == FF_COLOR_YUV_JPEG) &&
  1938. ps->pixel_type == FF_PIXEL_PLANAR;
  1939. }
  1940. int av_picture_crop(AVPicture *dst, const AVPicture *src,
  1941. int pix_fmt, int top_band, int left_band)
  1942. {
  1943. int y_shift;
  1944. int x_shift;
  1945. if (pix_fmt < 0 || pix_fmt >= PIX_FMT_NB || !is_yuv_planar(&pix_fmt_info[pix_fmt]))
  1946. return -1;
  1947. y_shift = pix_fmt_info[pix_fmt].y_chroma_shift;
  1948. x_shift = pix_fmt_info[pix_fmt].x_chroma_shift;
  1949. dst->data[0] = src->data[0] + (top_band * src->linesize[0]) + left_band;
  1950. dst->data[1] = src->data[1] + ((top_band >> y_shift) * src->linesize[1]) + (left_band >> x_shift);
  1951. dst->data[2] = src->data[2] + ((top_band >> y_shift) * src->linesize[2]) + (left_band >> x_shift);
  1952. dst->linesize[0] = src->linesize[0];
  1953. dst->linesize[1] = src->linesize[1];
  1954. dst->linesize[2] = src->linesize[2];
  1955. return 0;
  1956. }
  1957. int av_picture_pad(AVPicture *dst, const AVPicture *src, int height, int width,
  1958. int pix_fmt, int padtop, int padbottom, int padleft, int padright,
  1959. int *color)
  1960. {
  1961. uint8_t *optr;
  1962. int y_shift;
  1963. int x_shift;
  1964. int yheight;
  1965. int i, y;
  1966. if (pix_fmt < 0 || pix_fmt >= PIX_FMT_NB ||
  1967. !is_yuv_planar(&pix_fmt_info[pix_fmt])) return -1;
  1968. for (i = 0; i < 3; i++) {
  1969. x_shift = i ? pix_fmt_info[pix_fmt].x_chroma_shift : 0;
  1970. y_shift = i ? pix_fmt_info[pix_fmt].y_chroma_shift : 0;
  1971. if (padtop || padleft) {
  1972. memset(dst->data[i], color[i],
  1973. dst->linesize[i] * (padtop >> y_shift) + (padleft >> x_shift));
  1974. }
  1975. if (padleft || padright) {
  1976. optr = dst->data[i] + dst->linesize[i] * (padtop >> y_shift) +
  1977. (dst->linesize[i] - (padright >> x_shift));
  1978. yheight = (height - 1 - (padtop + padbottom)) >> y_shift;
  1979. for (y = 0; y < yheight; y++) {
  1980. memset(optr, color[i], (padleft + padright) >> x_shift);
  1981. optr += dst->linesize[i];
  1982. }
  1983. }
  1984. if (src) { /* first line */
  1985. uint8_t *iptr = src->data[i];
  1986. optr = dst->data[i] + dst->linesize[i] * (padtop >> y_shift) +
  1987. (padleft >> x_shift);
  1988. memcpy(optr, iptr, (width - padleft - padright) >> x_shift);
  1989. iptr += src->linesize[i];
  1990. optr = dst->data[i] + dst->linesize[i] * (padtop >> y_shift) +
  1991. (dst->linesize[i] - (padright >> x_shift));
  1992. yheight = (height - 1 - (padtop + padbottom)) >> y_shift;
  1993. for (y = 0; y < yheight; y++) {
  1994. memset(optr, color[i], (padleft + padright) >> x_shift);
  1995. memcpy(optr + ((padleft + padright) >> x_shift), iptr,
  1996. (width - padleft - padright) >> x_shift);
  1997. iptr += src->linesize[i];
  1998. optr += dst->linesize[i];
  1999. }
  2000. }
  2001. if (padbottom || padright) {
  2002. optr = dst->data[i] + dst->linesize[i] *
  2003. ((height - padbottom) >> y_shift) - (padright >> x_shift);
  2004. memset(optr, color[i],dst->linesize[i] *
  2005. (padbottom >> y_shift) + (padright >> x_shift));
  2006. }
  2007. }
  2008. return 0;
  2009. }
  2010. #if !CONFIG_SWSCALE
  2011. static uint8_t y_ccir_to_jpeg[256];
  2012. static uint8_t y_jpeg_to_ccir[256];
  2013. static uint8_t c_ccir_to_jpeg[256];
  2014. static uint8_t c_jpeg_to_ccir[256];
  2015. /* init various conversion tables */
  2016. static av_cold void img_convert_init(void)
  2017. {
  2018. int i;
  2019. uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
  2020. for(i = 0;i < 256; i++) {
  2021. y_ccir_to_jpeg[i] = Y_CCIR_TO_JPEG(i);
  2022. y_jpeg_to_ccir[i] = Y_JPEG_TO_CCIR(i);
  2023. c_ccir_to_jpeg[i] = C_CCIR_TO_JPEG(i);
  2024. c_jpeg_to_ccir[i] = C_JPEG_TO_CCIR(i);
  2025. }
  2026. }
  2027. /* apply to each pixel the given table */
  2028. static void img_apply_table(uint8_t *dst, int dst_wrap,
  2029. const uint8_t *src, int src_wrap,
  2030. int width, int height, const uint8_t *table1)
  2031. {
  2032. int n;
  2033. const uint8_t *s;
  2034. uint8_t *d;
  2035. const uint8_t *table;
  2036. table = table1;
  2037. for(;height > 0; height--) {
  2038. s = src;
  2039. d = dst;
  2040. n = width;
  2041. while (n >= 4) {
  2042. d[0] = table[s[0]];
  2043. d[1] = table[s[1]];
  2044. d[2] = table[s[2]];
  2045. d[3] = table[s[3]];
  2046. d += 4;
  2047. s += 4;
  2048. n -= 4;
  2049. }
  2050. while (n > 0) {
  2051. d[0] = table[s[0]];
  2052. d++;
  2053. s++;
  2054. n--;
  2055. }
  2056. dst += dst_wrap;
  2057. src += src_wrap;
  2058. }
  2059. }
  2060. /* XXX: use generic filter ? */
  2061. /* XXX: in most cases, the sampling position is incorrect */
  2062. /* 4x1 -> 1x1 */
  2063. static void shrink41(uint8_t *dst, int dst_wrap,
  2064. const uint8_t *src, int src_wrap,
  2065. int width, int height)
  2066. {
  2067. int w;
  2068. const uint8_t *s;
  2069. uint8_t *d;
  2070. for(;height > 0; height--) {
  2071. s = src;
  2072. d = dst;
  2073. for(w = width;w > 0; w--) {
  2074. d[0] = (s[0] + s[1] + s[2] + s[3] + 2) >> 2;
  2075. s += 4;
  2076. d++;
  2077. }
  2078. src += src_wrap;
  2079. dst += dst_wrap;
  2080. }
  2081. }
  2082. /* 2x1 -> 1x1 */
  2083. static void shrink21(uint8_t *dst, int dst_wrap,
  2084. const uint8_t *src, int src_wrap,
  2085. int width, int height)
  2086. {
  2087. int w;
  2088. const uint8_t *s;
  2089. uint8_t *d;
  2090. for(;height > 0; height--) {
  2091. s = src;
  2092. d = dst;
  2093. for(w = width;w > 0; w--) {
  2094. d[0] = (s[0] + s[1]) >> 1;
  2095. s += 2;
  2096. d++;
  2097. }
  2098. src += src_wrap;
  2099. dst += dst_wrap;
  2100. }
  2101. }
  2102. /* 1x2 -> 1x1 */
  2103. static void shrink12(uint8_t *dst, int dst_wrap,
  2104. const uint8_t *src, int src_wrap,
  2105. int width, int height)
  2106. {
  2107. int w;
  2108. uint8_t *d;
  2109. const uint8_t *s1, *s2;
  2110. for(;height > 0; height--) {
  2111. s1 = src;
  2112. s2 = s1 + src_wrap;
  2113. d = dst;
  2114. for(w = width;w >= 4; w-=4) {
  2115. d[0] = (s1[0] + s2[0]) >> 1;
  2116. d[1] = (s1[1] + s2[1]) >> 1;
  2117. d[2] = (s1[2] + s2[2]) >> 1;
  2118. d[3] = (s1[3] + s2[3]) >> 1;
  2119. s1 += 4;
  2120. s2 += 4;
  2121. d += 4;
  2122. }
  2123. for(;w > 0; w--) {
  2124. d[0] = (s1[0] + s2[0]) >> 1;
  2125. s1++;
  2126. s2++;
  2127. d++;
  2128. }
  2129. src += 2 * src_wrap;
  2130. dst += dst_wrap;
  2131. }
  2132. }
  2133. static void grow21_line(uint8_t *dst, const uint8_t *src,
  2134. int width)
  2135. {
  2136. int w;
  2137. const uint8_t *s1;
  2138. uint8_t *d;
  2139. s1 = src;
  2140. d = dst;
  2141. for(w = width;w >= 4; w-=4) {
  2142. d[1] = d[0] = s1[0];
  2143. d[3] = d[2] = s1[1];
  2144. s1 += 2;
  2145. d += 4;
  2146. }
  2147. for(;w >= 2; w -= 2) {
  2148. d[1] = d[0] = s1[0];
  2149. s1 ++;
  2150. d += 2;
  2151. }
  2152. /* only needed if width is not a multiple of two */
  2153. /* XXX: veryfy that */
  2154. if (w) {
  2155. d[0] = s1[0];
  2156. }
  2157. }
  2158. static void grow41_line(uint8_t *dst, const uint8_t *src,
  2159. int width)
  2160. {
  2161. int w, v;
  2162. const uint8_t *s1;
  2163. uint8_t *d;
  2164. s1 = src;
  2165. d = dst;
  2166. for(w = width;w >= 4; w-=4) {
  2167. v = s1[0];
  2168. d[0] = v;
  2169. d[1] = v;
  2170. d[2] = v;
  2171. d[3] = v;
  2172. s1 ++;
  2173. d += 4;
  2174. }
  2175. }
  2176. /* 1x1 -> 2x1 */
  2177. static void grow21(uint8_t *dst, int dst_wrap,
  2178. const uint8_t *src, int src_wrap,
  2179. int width, int height)
  2180. {
  2181. for(;height > 0; height--) {
  2182. grow21_line(dst, src, width);
  2183. src += src_wrap;
  2184. dst += dst_wrap;
  2185. }
  2186. }
  2187. /* 1x1 -> 1x2 */
  2188. static void grow12(uint8_t *dst, int dst_wrap,
  2189. const uint8_t *src, int src_wrap,
  2190. int width, int height)
  2191. {
  2192. for(;height > 0; height-=2) {
  2193. memcpy(dst, src, width);
  2194. dst += dst_wrap;
  2195. memcpy(dst, src, width);
  2196. dst += dst_wrap;
  2197. src += src_wrap;
  2198. }
  2199. }
  2200. /* 1x1 -> 2x2 */
  2201. static void grow22(uint8_t *dst, int dst_wrap,
  2202. const uint8_t *src, int src_wrap,
  2203. int width, int height)
  2204. {
  2205. for(;height > 0; height--) {
  2206. grow21_line(dst, src, width);
  2207. if (height%2)
  2208. src += src_wrap;
  2209. dst += dst_wrap;
  2210. }
  2211. }
  2212. /* 1x1 -> 4x1 */
  2213. static void grow41(uint8_t *dst, int dst_wrap,
  2214. const uint8_t *src, int src_wrap,
  2215. int width, int height)
  2216. {
  2217. for(;height > 0; height--) {
  2218. grow41_line(dst, src, width);
  2219. src += src_wrap;
  2220. dst += dst_wrap;
  2221. }
  2222. }
  2223. /* 1x1 -> 4x4 */
  2224. static void grow44(uint8_t *dst, int dst_wrap,
  2225. const uint8_t *src, int src_wrap,
  2226. int width, int height)
  2227. {
  2228. for(;height > 0; height--) {
  2229. grow41_line(dst, src, width);
  2230. if ((height & 3) == 1)
  2231. src += src_wrap;
  2232. dst += dst_wrap;
  2233. }
  2234. }
  2235. /* 1x2 -> 2x1 */
  2236. static void conv411(uint8_t *dst, int dst_wrap,
  2237. const uint8_t *src, int src_wrap,
  2238. int width, int height)
  2239. {
  2240. int w, c;
  2241. const uint8_t *s1, *s2;
  2242. uint8_t *d;
  2243. width>>=1;
  2244. for(;height > 0; height--) {
  2245. s1 = src;
  2246. s2 = src + src_wrap;
  2247. d = dst;
  2248. for(w = width;w > 0; w--) {
  2249. c = (s1[0] + s2[0]) >> 1;
  2250. d[0] = c;
  2251. d[1] = c;
  2252. s1++;
  2253. s2++;
  2254. d += 2;
  2255. }
  2256. src += src_wrap * 2;
  2257. dst += dst_wrap;
  2258. }
  2259. }
  2260. /* XXX: always use linesize. Return -1 if not supported */
  2261. int img_convert(AVPicture *dst, int dst_pix_fmt,
  2262. const AVPicture *src, int src_pix_fmt,
  2263. int src_width, int src_height)
  2264. {
  2265. static int initialized;
  2266. int i, ret, dst_width, dst_height, int_pix_fmt;
  2267. const PixFmtInfo *src_pix, *dst_pix;
  2268. const ConvertEntry *ce;
  2269. AVPicture tmp1, *tmp = &tmp1;
  2270. if (src_pix_fmt < 0 || src_pix_fmt >= PIX_FMT_NB ||
  2271. dst_pix_fmt < 0 || dst_pix_fmt >= PIX_FMT_NB)
  2272. return -1;
  2273. if (src_width <= 0 || src_height <= 0)
  2274. return 0;
  2275. if (!initialized) {
  2276. initialized = 1;
  2277. img_convert_init();
  2278. }
  2279. dst_width = src_width;
  2280. dst_height = src_height;
  2281. dst_pix = &pix_fmt_info[dst_pix_fmt];
  2282. src_pix = &pix_fmt_info[src_pix_fmt];
  2283. if (src_pix_fmt == dst_pix_fmt) {
  2284. /* no conversion needed: just copy */
  2285. av_picture_copy(dst, src, dst_pix_fmt, dst_width, dst_height);
  2286. return 0;
  2287. }
  2288. ce = &convert_table[src_pix_fmt][dst_pix_fmt];
  2289. if (ce->convert) {
  2290. /* specific conversion routine */
  2291. ce->convert(dst, src, dst_width, dst_height);
  2292. return 0;
  2293. }
  2294. /* gray to YUV */
  2295. if (is_yuv_planar(dst_pix) &&
  2296. src_pix_fmt == PIX_FMT_GRAY8) {
  2297. int w, h, y;
  2298. uint8_t *d;
  2299. if (dst_pix->color_type == FF_COLOR_YUV_JPEG) {
  2300. ff_img_copy_plane(dst->data[0], dst->linesize[0],
  2301. src->data[0], src->linesize[0],
  2302. dst_width, dst_height);
  2303. } else {
  2304. img_apply_table(dst->data[0], dst->linesize[0],
  2305. src->data[0], src->linesize[0],
  2306. dst_width, dst_height,
  2307. y_jpeg_to_ccir);
  2308. }
  2309. /* fill U and V with 128 */
  2310. w = dst_width;
  2311. h = dst_height;
  2312. w >>= dst_pix->x_chroma_shift;
  2313. h >>= dst_pix->y_chroma_shift;
  2314. for(i = 1; i <= 2; i++) {
  2315. d = dst->data[i];
  2316. for(y = 0; y< h; y++) {
  2317. memset(d, 128, w);
  2318. d += dst->linesize[i];
  2319. }
  2320. }
  2321. return 0;
  2322. }
  2323. /* YUV to gray */
  2324. if (is_yuv_planar(src_pix) &&
  2325. dst_pix_fmt == PIX_FMT_GRAY8) {
  2326. if (src_pix->color_type == FF_COLOR_YUV_JPEG) {
  2327. ff_img_copy_plane(dst->data[0], dst->linesize[0],
  2328. src->data[0], src->linesize[0],
  2329. dst_width, dst_height);
  2330. } else {
  2331. img_apply_table(dst->data[0], dst->linesize[0],
  2332. src->data[0], src->linesize[0],
  2333. dst_width, dst_height,
  2334. y_ccir_to_jpeg);
  2335. }
  2336. return 0;
  2337. }
  2338. /* YUV to YUV planar */
  2339. if (is_yuv_planar(dst_pix) && is_yuv_planar(src_pix)) {
  2340. int x_shift, y_shift, w, h, xy_shift;
  2341. void (*resize_func)(uint8_t *dst, int dst_wrap,
  2342. const uint8_t *src, int src_wrap,
  2343. int width, int height);
  2344. /* compute chroma size of the smallest dimensions */
  2345. w = dst_width;
  2346. h = dst_height;
  2347. if (dst_pix->x_chroma_shift >= src_pix->x_chroma_shift)
  2348. w >>= dst_pix->x_chroma_shift;
  2349. else
  2350. w >>= src_pix->x_chroma_shift;
  2351. if (dst_pix->y_chroma_shift >= src_pix->y_chroma_shift)
  2352. h >>= dst_pix->y_chroma_shift;
  2353. else
  2354. h >>= src_pix->y_chroma_shift;
  2355. x_shift = (dst_pix->x_chroma_shift - src_pix->x_chroma_shift);
  2356. y_shift = (dst_pix->y_chroma_shift - src_pix->y_chroma_shift);
  2357. xy_shift = ((x_shift & 0xf) << 4) | (y_shift & 0xf);
  2358. /* there must be filters for conversion at least from and to
  2359. YUV444 format */
  2360. switch(xy_shift) {
  2361. case 0x00:
  2362. resize_func = ff_img_copy_plane;
  2363. break;
  2364. case 0x10:
  2365. resize_func = shrink21;
  2366. break;
  2367. case 0x20:
  2368. resize_func = shrink41;
  2369. break;
  2370. case 0x01:
  2371. resize_func = shrink12;
  2372. break;
  2373. case 0x11:
  2374. resize_func = ff_shrink22;
  2375. break;
  2376. case 0x22:
  2377. resize_func = ff_shrink44;
  2378. break;
  2379. case 0xf0:
  2380. resize_func = grow21;
  2381. break;
  2382. case 0x0f:
  2383. resize_func = grow12;
  2384. break;
  2385. case 0xe0:
  2386. resize_func = grow41;
  2387. break;
  2388. case 0xff:
  2389. resize_func = grow22;
  2390. break;
  2391. case 0xee:
  2392. resize_func = grow44;
  2393. break;
  2394. case 0xf1:
  2395. resize_func = conv411;
  2396. break;
  2397. default:
  2398. /* currently not handled */
  2399. goto no_chroma_filter;
  2400. }
  2401. ff_img_copy_plane(dst->data[0], dst->linesize[0],
  2402. src->data[0], src->linesize[0],
  2403. dst_width, dst_height);
  2404. for(i = 1;i <= 2; i++)
  2405. resize_func(dst->data[i], dst->linesize[i],
  2406. src->data[i], src->linesize[i],
  2407. dst_width>>dst_pix->x_chroma_shift, dst_height>>dst_pix->y_chroma_shift);
  2408. /* if yuv color space conversion is needed, we do it here on
  2409. the destination image */
  2410. if (dst_pix->color_type != src_pix->color_type) {
  2411. const uint8_t *y_table, *c_table;
  2412. if (dst_pix->color_type == FF_COLOR_YUV) {
  2413. y_table = y_jpeg_to_ccir;
  2414. c_table = c_jpeg_to_ccir;
  2415. } else {
  2416. y_table = y_ccir_to_jpeg;
  2417. c_table = c_ccir_to_jpeg;
  2418. }
  2419. img_apply_table(dst->data[0], dst->linesize[0],
  2420. dst->data[0], dst->linesize[0],
  2421. dst_width, dst_height,
  2422. y_table);
  2423. for(i = 1;i <= 2; i++)
  2424. img_apply_table(dst->data[i], dst->linesize[i],
  2425. dst->data[i], dst->linesize[i],
  2426. dst_width>>dst_pix->x_chroma_shift,
  2427. dst_height>>dst_pix->y_chroma_shift,
  2428. c_table);
  2429. }
  2430. return 0;
  2431. }
  2432. no_chroma_filter:
  2433. /* try to use an intermediate format */
  2434. if (src_pix_fmt == PIX_FMT_YUYV422 ||
  2435. dst_pix_fmt == PIX_FMT_YUYV422) {
  2436. /* specific case: convert to YUV422P first */
  2437. int_pix_fmt = PIX_FMT_YUV422P;
  2438. } else if (src_pix_fmt == PIX_FMT_UYVY422 ||
  2439. dst_pix_fmt == PIX_FMT_UYVY422) {
  2440. /* specific case: convert to YUV422P first */
  2441. int_pix_fmt = PIX_FMT_YUV422P;
  2442. } else if (src_pix_fmt == PIX_FMT_UYYVYY411 ||
  2443. dst_pix_fmt == PIX_FMT_UYYVYY411) {
  2444. /* specific case: convert to YUV411P first */
  2445. int_pix_fmt = PIX_FMT_YUV411P;
  2446. } else if ((src_pix->color_type == FF_COLOR_GRAY &&
  2447. src_pix_fmt != PIX_FMT_GRAY8) ||
  2448. (dst_pix->color_type == FF_COLOR_GRAY &&
  2449. dst_pix_fmt != PIX_FMT_GRAY8)) {
  2450. /* gray8 is the normalized format */
  2451. int_pix_fmt = PIX_FMT_GRAY8;
  2452. } else if ((is_yuv_planar(src_pix) &&
  2453. src_pix_fmt != PIX_FMT_YUV444P &&
  2454. src_pix_fmt != PIX_FMT_YUVJ444P)) {
  2455. /* yuv444 is the normalized format */
  2456. if (src_pix->color_type == FF_COLOR_YUV_JPEG)
  2457. int_pix_fmt = PIX_FMT_YUVJ444P;
  2458. else
  2459. int_pix_fmt = PIX_FMT_YUV444P;
  2460. } else if ((is_yuv_planar(dst_pix) &&
  2461. dst_pix_fmt != PIX_FMT_YUV444P &&
  2462. dst_pix_fmt != PIX_FMT_YUVJ444P)) {
  2463. /* yuv444 is the normalized format */
  2464. if (dst_pix->color_type == FF_COLOR_YUV_JPEG)
  2465. int_pix_fmt = PIX_FMT_YUVJ444P;
  2466. else
  2467. int_pix_fmt = PIX_FMT_YUV444P;
  2468. } else {
  2469. /* the two formats are rgb or gray8 or yuv[j]444p */
  2470. if (src_pix->is_alpha && dst_pix->is_alpha)
  2471. int_pix_fmt = PIX_FMT_RGB32;
  2472. else
  2473. int_pix_fmt = PIX_FMT_RGB24;
  2474. }
  2475. if (src_pix_fmt == int_pix_fmt)
  2476. return -1;
  2477. if (avpicture_alloc(tmp, int_pix_fmt, dst_width, dst_height) < 0)
  2478. return -1;
  2479. ret = -1;
  2480. if (img_convert(tmp, int_pix_fmt,
  2481. src, src_pix_fmt, src_width, src_height) < 0)
  2482. goto fail1;
  2483. if (img_convert(dst, dst_pix_fmt,
  2484. tmp, int_pix_fmt, dst_width, dst_height) < 0)
  2485. goto fail1;
  2486. ret = 0;
  2487. fail1:
  2488. avpicture_free(tmp);
  2489. return ret;
  2490. }
  2491. #endif
  2492. /* NOTE: we scan all the pixels to have an exact information */
  2493. static int get_alpha_info_pal8(const AVPicture *src, int width, int height)
  2494. {
  2495. const unsigned char *p;
  2496. int src_wrap, ret, x, y;
  2497. unsigned int a;
  2498. uint32_t *palette = (uint32_t *)src->data[1];
  2499. p = src->data[0];
  2500. src_wrap = src->linesize[0] - width;
  2501. ret = 0;
  2502. for(y=0;y<height;y++) {
  2503. for(x=0;x<width;x++) {
  2504. a = palette[p[0]] >> 24;
  2505. if (a == 0x00) {
  2506. ret |= FF_ALPHA_TRANSP;
  2507. } else if (a != 0xff) {
  2508. ret |= FF_ALPHA_SEMI_TRANSP;
  2509. }
  2510. p++;
  2511. }
  2512. p += src_wrap;
  2513. }
  2514. return ret;
  2515. }
  2516. int img_get_alpha_info(const AVPicture *src,
  2517. int pix_fmt, int width, int height)
  2518. {
  2519. const PixFmtInfo *pf = &pix_fmt_info[pix_fmt];
  2520. int ret;
  2521. pf = &pix_fmt_info[pix_fmt];
  2522. /* no alpha can be represented in format */
  2523. if (!pf->is_alpha)
  2524. return 0;
  2525. switch(pix_fmt) {
  2526. case PIX_FMT_RGB32:
  2527. ret = get_alpha_info_rgb32(src, width, height);
  2528. break;
  2529. case PIX_FMT_PAL8:
  2530. ret = get_alpha_info_pal8(src, width, height);
  2531. break;
  2532. default:
  2533. /* we do not know, so everything is indicated */
  2534. ret = FF_ALPHA_TRANSP | FF_ALPHA_SEMI_TRANSP;
  2535. break;
  2536. }
  2537. return ret;
  2538. }
  2539. #if HAVE_MMX
  2540. #define DEINT_INPLACE_LINE_LUM \
  2541. movd_m2r(lum_m4[0],mm0);\
  2542. movd_m2r(lum_m3[0],mm1);\
  2543. movd_m2r(lum_m2[0],mm2);\
  2544. movd_m2r(lum_m1[0],mm3);\
  2545. movd_m2r(lum[0],mm4);\
  2546. punpcklbw_r2r(mm7,mm0);\
  2547. movd_r2m(mm2,lum_m4[0]);\
  2548. punpcklbw_r2r(mm7,mm1);\
  2549. punpcklbw_r2r(mm7,mm2);\
  2550. punpcklbw_r2r(mm7,mm3);\
  2551. punpcklbw_r2r(mm7,mm4);\
  2552. paddw_r2r(mm3,mm1);\
  2553. psllw_i2r(1,mm2);\
  2554. paddw_r2r(mm4,mm0);\
  2555. psllw_i2r(2,mm1);\
  2556. paddw_r2r(mm6,mm2);\
  2557. paddw_r2r(mm2,mm1);\
  2558. psubusw_r2r(mm0,mm1);\
  2559. psrlw_i2r(3,mm1);\
  2560. packuswb_r2r(mm7,mm1);\
  2561. movd_r2m(mm1,lum_m2[0]);
  2562. #define DEINT_LINE_LUM \
  2563. movd_m2r(lum_m4[0],mm0);\
  2564. movd_m2r(lum_m3[0],mm1);\
  2565. movd_m2r(lum_m2[0],mm2);\
  2566. movd_m2r(lum_m1[0],mm3);\
  2567. movd_m2r(lum[0],mm4);\
  2568. punpcklbw_r2r(mm7,mm0);\
  2569. punpcklbw_r2r(mm7,mm1);\
  2570. punpcklbw_r2r(mm7,mm2);\
  2571. punpcklbw_r2r(mm7,mm3);\
  2572. punpcklbw_r2r(mm7,mm4);\
  2573. paddw_r2r(mm3,mm1);\
  2574. psllw_i2r(1,mm2);\
  2575. paddw_r2r(mm4,mm0);\
  2576. psllw_i2r(2,mm1);\
  2577. paddw_r2r(mm6,mm2);\
  2578. paddw_r2r(mm2,mm1);\
  2579. psubusw_r2r(mm0,mm1);\
  2580. psrlw_i2r(3,mm1);\
  2581. packuswb_r2r(mm7,mm1);\
  2582. movd_r2m(mm1,dst[0]);
  2583. #endif
  2584. /* filter parameters: [-1 4 2 4 -1] // 8 */
  2585. static void deinterlace_line(uint8_t *dst,
  2586. const uint8_t *lum_m4, const uint8_t *lum_m3,
  2587. const uint8_t *lum_m2, const uint8_t *lum_m1,
  2588. const uint8_t *lum,
  2589. int size)
  2590. {
  2591. #if !HAVE_MMX
  2592. uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
  2593. int sum;
  2594. for(;size > 0;size--) {
  2595. sum = -lum_m4[0];
  2596. sum += lum_m3[0] << 2;
  2597. sum += lum_m2[0] << 1;
  2598. sum += lum_m1[0] << 2;
  2599. sum += -lum[0];
  2600. dst[0] = cm[(sum + 4) >> 3];
  2601. lum_m4++;
  2602. lum_m3++;
  2603. lum_m2++;
  2604. lum_m1++;
  2605. lum++;
  2606. dst++;
  2607. }
  2608. #else
  2609. {
  2610. pxor_r2r(mm7,mm7);
  2611. movq_m2r(ff_pw_4,mm6);
  2612. }
  2613. for (;size > 3; size-=4) {
  2614. DEINT_LINE_LUM
  2615. lum_m4+=4;
  2616. lum_m3+=4;
  2617. lum_m2+=4;
  2618. lum_m1+=4;
  2619. lum+=4;
  2620. dst+=4;
  2621. }
  2622. #endif
  2623. }
  2624. static void deinterlace_line_inplace(uint8_t *lum_m4, uint8_t *lum_m3, uint8_t *lum_m2, uint8_t *lum_m1, uint8_t *lum,
  2625. int size)
  2626. {
  2627. #if !HAVE_MMX
  2628. uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
  2629. int sum;
  2630. for(;size > 0;size--) {
  2631. sum = -lum_m4[0];
  2632. sum += lum_m3[0] << 2;
  2633. sum += lum_m2[0] << 1;
  2634. lum_m4[0]=lum_m2[0];
  2635. sum += lum_m1[0] << 2;
  2636. sum += -lum[0];
  2637. lum_m2[0] = cm[(sum + 4) >> 3];
  2638. lum_m4++;
  2639. lum_m3++;
  2640. lum_m2++;
  2641. lum_m1++;
  2642. lum++;
  2643. }
  2644. #else
  2645. {
  2646. pxor_r2r(mm7,mm7);
  2647. movq_m2r(ff_pw_4,mm6);
  2648. }
  2649. for (;size > 3; size-=4) {
  2650. DEINT_INPLACE_LINE_LUM
  2651. lum_m4+=4;
  2652. lum_m3+=4;
  2653. lum_m2+=4;
  2654. lum_m1+=4;
  2655. lum+=4;
  2656. }
  2657. #endif
  2658. }
  2659. /* deinterlacing : 2 temporal taps, 3 spatial taps linear filter. The
  2660. top field is copied as is, but the bottom field is deinterlaced
  2661. against the top field. */
  2662. static void deinterlace_bottom_field(uint8_t *dst, int dst_wrap,
  2663. const uint8_t *src1, int src_wrap,
  2664. int width, int height)
  2665. {
  2666. const uint8_t *src_m2, *src_m1, *src_0, *src_p1, *src_p2;
  2667. int y;
  2668. src_m2 = src1;
  2669. src_m1 = src1;
  2670. src_0=&src_m1[src_wrap];
  2671. src_p1=&src_0[src_wrap];
  2672. src_p2=&src_p1[src_wrap];
  2673. for(y=0;y<(height-2);y+=2) {
  2674. memcpy(dst,src_m1,width);
  2675. dst += dst_wrap;
  2676. deinterlace_line(dst,src_m2,src_m1,src_0,src_p1,src_p2,width);
  2677. src_m2 = src_0;
  2678. src_m1 = src_p1;
  2679. src_0 = src_p2;
  2680. src_p1 += 2*src_wrap;
  2681. src_p2 += 2*src_wrap;
  2682. dst += dst_wrap;
  2683. }
  2684. memcpy(dst,src_m1,width);
  2685. dst += dst_wrap;
  2686. /* do last line */
  2687. deinterlace_line(dst,src_m2,src_m1,src_0,src_0,src_0,width);
  2688. }
  2689. static void deinterlace_bottom_field_inplace(uint8_t *src1, int src_wrap,
  2690. int width, int height)
  2691. {
  2692. uint8_t *src_m1, *src_0, *src_p1, *src_p2;
  2693. int y;
  2694. uint8_t *buf;
  2695. buf = (uint8_t*)av_malloc(width);
  2696. src_m1 = src1;
  2697. memcpy(buf,src_m1,width);
  2698. src_0=&src_m1[src_wrap];
  2699. src_p1=&src_0[src_wrap];
  2700. src_p2=&src_p1[src_wrap];
  2701. for(y=0;y<(height-2);y+=2) {
  2702. deinterlace_line_inplace(buf,src_m1,src_0,src_p1,src_p2,width);
  2703. src_m1 = src_p1;
  2704. src_0 = src_p2;
  2705. src_p1 += 2*src_wrap;
  2706. src_p2 += 2*src_wrap;
  2707. }
  2708. /* do last line */
  2709. deinterlace_line_inplace(buf,src_m1,src_0,src_0,src_0,width);
  2710. av_free(buf);
  2711. }
  2712. int avpicture_deinterlace(AVPicture *dst, const AVPicture *src,
  2713. int pix_fmt, int width, int height)
  2714. {
  2715. int i;
  2716. if (pix_fmt != PIX_FMT_YUV420P &&
  2717. pix_fmt != PIX_FMT_YUV422P &&
  2718. pix_fmt != PIX_FMT_YUV444P &&
  2719. pix_fmt != PIX_FMT_YUV411P &&
  2720. pix_fmt != PIX_FMT_GRAY8)
  2721. return -1;
  2722. if ((width & 3) != 0 || (height & 3) != 0)
  2723. return -1;
  2724. for(i=0;i<3;i++) {
  2725. if (i == 1) {
  2726. switch(pix_fmt) {
  2727. case PIX_FMT_YUV420P:
  2728. width >>= 1;
  2729. height >>= 1;
  2730. break;
  2731. case PIX_FMT_YUV422P:
  2732. width >>= 1;
  2733. break;
  2734. case PIX_FMT_YUV411P:
  2735. width >>= 2;
  2736. break;
  2737. default:
  2738. break;
  2739. }
  2740. if (pix_fmt == PIX_FMT_GRAY8) {
  2741. break;
  2742. }
  2743. }
  2744. if (src == dst) {
  2745. deinterlace_bottom_field_inplace(dst->data[i], dst->linesize[i],
  2746. width, height);
  2747. } else {
  2748. deinterlace_bottom_field(dst->data[i],dst->linesize[i],
  2749. src->data[i], src->linesize[i],
  2750. width, height);
  2751. }
  2752. }
  2753. emms_c();
  2754. return 0;
  2755. }