You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

2902 lines
77KB

  1. /*
  2. * Misc image conversion routines
  3. * Copyright (c) 2001, 2002, 2003 Fabrice Bellard
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. /**
  22. * @file imgconvert.c
  23. * misc image conversion routines
  24. */
  25. /* TODO:
  26. * - write 'ffimg' program to test all the image related stuff
  27. * - move all api to slice based system
  28. * - integrate deinterlacing, postprocessing and scaling in the conversion process
  29. */
  30. #include "avcodec.h"
  31. #include "dsputil.h"
  32. #include "colorspace.h"
  33. #if HAVE_MMX
  34. #include "x86/mmx.h"
  35. #include "x86/dsputil_mmx.h"
  36. #endif
  37. #define xglue(x, y) x ## y
  38. #define glue(x, y) xglue(x, y)
  39. #define FF_COLOR_RGB 0 /**< RGB color space */
  40. #define FF_COLOR_GRAY 1 /**< gray color space */
  41. #define FF_COLOR_YUV 2 /**< YUV color space. 16 <= Y <= 235, 16 <= U, V <= 240 */
  42. #define FF_COLOR_YUV_JPEG 3 /**< YUV color space. 0 <= Y <= 255, 0 <= U, V <= 255 */
  43. #define FF_PIXEL_PLANAR 0 /**< each channel has one component in AVPicture */
  44. #define FF_PIXEL_PACKED 1 /**< only one components containing all the channels */
  45. #define FF_PIXEL_PALETTE 2 /**< one components containing indexes for a palette */
  46. typedef struct PixFmtInfo {
  47. const char *name;
  48. uint8_t nb_channels; /**< number of channels (including alpha) */
  49. uint8_t color_type; /**< color type (see FF_COLOR_xxx constants) */
  50. uint8_t pixel_type; /**< pixel storage type (see FF_PIXEL_xxx constants) */
  51. uint8_t is_alpha : 1; /**< true if alpha can be specified */
  52. uint8_t x_chroma_shift; /**< X chroma subsampling factor is 2 ^ shift */
  53. uint8_t y_chroma_shift; /**< Y chroma subsampling factor is 2 ^ shift */
  54. uint8_t depth; /**< bit depth of the color components */
  55. } PixFmtInfo;
  56. /* this table gives more information about formats */
  57. static const PixFmtInfo pix_fmt_info[PIX_FMT_NB] = {
  58. /* YUV formats */
  59. [PIX_FMT_YUV420P] = {
  60. .name = "yuv420p",
  61. .nb_channels = 3,
  62. .color_type = FF_COLOR_YUV,
  63. .pixel_type = FF_PIXEL_PLANAR,
  64. .depth = 8,
  65. .x_chroma_shift = 1, .y_chroma_shift = 1,
  66. },
  67. [PIX_FMT_YUV422P] = {
  68. .name = "yuv422p",
  69. .nb_channels = 3,
  70. .color_type = FF_COLOR_YUV,
  71. .pixel_type = FF_PIXEL_PLANAR,
  72. .depth = 8,
  73. .x_chroma_shift = 1, .y_chroma_shift = 0,
  74. },
  75. [PIX_FMT_YUV444P] = {
  76. .name = "yuv444p",
  77. .nb_channels = 3,
  78. .color_type = FF_COLOR_YUV,
  79. .pixel_type = FF_PIXEL_PLANAR,
  80. .depth = 8,
  81. .x_chroma_shift = 0, .y_chroma_shift = 0,
  82. },
  83. [PIX_FMT_YUYV422] = {
  84. .name = "yuyv422",
  85. .nb_channels = 1,
  86. .color_type = FF_COLOR_YUV,
  87. .pixel_type = FF_PIXEL_PACKED,
  88. .depth = 8,
  89. .x_chroma_shift = 1, .y_chroma_shift = 0,
  90. },
  91. [PIX_FMT_UYVY422] = {
  92. .name = "uyvy422",
  93. .nb_channels = 1,
  94. .color_type = FF_COLOR_YUV,
  95. .pixel_type = FF_PIXEL_PACKED,
  96. .depth = 8,
  97. .x_chroma_shift = 1, .y_chroma_shift = 0,
  98. },
  99. [PIX_FMT_YUV410P] = {
  100. .name = "yuv410p",
  101. .nb_channels = 3,
  102. .color_type = FF_COLOR_YUV,
  103. .pixel_type = FF_PIXEL_PLANAR,
  104. .depth = 8,
  105. .x_chroma_shift = 2, .y_chroma_shift = 2,
  106. },
  107. [PIX_FMT_YUV411P] = {
  108. .name = "yuv411p",
  109. .nb_channels = 3,
  110. .color_type = FF_COLOR_YUV,
  111. .pixel_type = FF_PIXEL_PLANAR,
  112. .depth = 8,
  113. .x_chroma_shift = 2, .y_chroma_shift = 0,
  114. },
  115. [PIX_FMT_YUV440P] = {
  116. .name = "yuv440p",
  117. .nb_channels = 3,
  118. .color_type = FF_COLOR_YUV,
  119. .pixel_type = FF_PIXEL_PLANAR,
  120. .depth = 8,
  121. .x_chroma_shift = 0, .y_chroma_shift = 1,
  122. },
  123. /* YUV formats with alpha plane */
  124. [PIX_FMT_YUVA420P] = {
  125. .name = "yuva420p",
  126. .nb_channels = 4,
  127. .color_type = FF_COLOR_YUV,
  128. .pixel_type = FF_PIXEL_PLANAR,
  129. .depth = 8,
  130. .x_chroma_shift = 1, .y_chroma_shift = 1,
  131. },
  132. /* JPEG YUV */
  133. [PIX_FMT_YUVJ420P] = {
  134. .name = "yuvj420p",
  135. .nb_channels = 3,
  136. .color_type = FF_COLOR_YUV_JPEG,
  137. .pixel_type = FF_PIXEL_PLANAR,
  138. .depth = 8,
  139. .x_chroma_shift = 1, .y_chroma_shift = 1,
  140. },
  141. [PIX_FMT_YUVJ422P] = {
  142. .name = "yuvj422p",
  143. .nb_channels = 3,
  144. .color_type = FF_COLOR_YUV_JPEG,
  145. .pixel_type = FF_PIXEL_PLANAR,
  146. .depth = 8,
  147. .x_chroma_shift = 1, .y_chroma_shift = 0,
  148. },
  149. [PIX_FMT_YUVJ444P] = {
  150. .name = "yuvj444p",
  151. .nb_channels = 3,
  152. .color_type = FF_COLOR_YUV_JPEG,
  153. .pixel_type = FF_PIXEL_PLANAR,
  154. .depth = 8,
  155. .x_chroma_shift = 0, .y_chroma_shift = 0,
  156. },
  157. [PIX_FMT_YUVJ440P] = {
  158. .name = "yuvj440p",
  159. .nb_channels = 3,
  160. .color_type = FF_COLOR_YUV_JPEG,
  161. .pixel_type = FF_PIXEL_PLANAR,
  162. .depth = 8,
  163. .x_chroma_shift = 0, .y_chroma_shift = 1,
  164. },
  165. /* RGB formats */
  166. [PIX_FMT_RGB24] = {
  167. .name = "rgb24",
  168. .nb_channels = 3,
  169. .color_type = FF_COLOR_RGB,
  170. .pixel_type = FF_PIXEL_PACKED,
  171. .depth = 8,
  172. .x_chroma_shift = 0, .y_chroma_shift = 0,
  173. },
  174. [PIX_FMT_BGR24] = {
  175. .name = "bgr24",
  176. .nb_channels = 3,
  177. .color_type = FF_COLOR_RGB,
  178. .pixel_type = FF_PIXEL_PACKED,
  179. .depth = 8,
  180. .x_chroma_shift = 0, .y_chroma_shift = 0,
  181. },
  182. [PIX_FMT_RGB32] = {
  183. .name = "rgb32",
  184. .nb_channels = 4, .is_alpha = 1,
  185. .color_type = FF_COLOR_RGB,
  186. .pixel_type = FF_PIXEL_PACKED,
  187. .depth = 8,
  188. .x_chroma_shift = 0, .y_chroma_shift = 0,
  189. },
  190. [PIX_FMT_RGB565] = {
  191. .name = "rgb565",
  192. .nb_channels = 3,
  193. .color_type = FF_COLOR_RGB,
  194. .pixel_type = FF_PIXEL_PACKED,
  195. .depth = 5,
  196. .x_chroma_shift = 0, .y_chroma_shift = 0,
  197. },
  198. [PIX_FMT_RGB555] = {
  199. .name = "rgb555",
  200. .nb_channels = 3,
  201. .color_type = FF_COLOR_RGB,
  202. .pixel_type = FF_PIXEL_PACKED,
  203. .depth = 5,
  204. .x_chroma_shift = 0, .y_chroma_shift = 0,
  205. },
  206. /* gray / mono formats */
  207. [PIX_FMT_GRAY16BE] = {
  208. .name = "gray16be",
  209. .nb_channels = 1,
  210. .color_type = FF_COLOR_GRAY,
  211. .pixel_type = FF_PIXEL_PLANAR,
  212. .depth = 16,
  213. },
  214. [PIX_FMT_GRAY16LE] = {
  215. .name = "gray16le",
  216. .nb_channels = 1,
  217. .color_type = FF_COLOR_GRAY,
  218. .pixel_type = FF_PIXEL_PLANAR,
  219. .depth = 16,
  220. },
  221. [PIX_FMT_GRAY8] = {
  222. .name = "gray",
  223. .nb_channels = 1,
  224. .color_type = FF_COLOR_GRAY,
  225. .pixel_type = FF_PIXEL_PLANAR,
  226. .depth = 8,
  227. },
  228. [PIX_FMT_MONOWHITE] = {
  229. .name = "monow",
  230. .nb_channels = 1,
  231. .color_type = FF_COLOR_GRAY,
  232. .pixel_type = FF_PIXEL_PLANAR,
  233. .depth = 1,
  234. },
  235. [PIX_FMT_MONOBLACK] = {
  236. .name = "monob",
  237. .nb_channels = 1,
  238. .color_type = FF_COLOR_GRAY,
  239. .pixel_type = FF_PIXEL_PLANAR,
  240. .depth = 1,
  241. },
  242. /* paletted formats */
  243. [PIX_FMT_PAL8] = {
  244. .name = "pal8",
  245. .nb_channels = 4, .is_alpha = 1,
  246. .color_type = FF_COLOR_RGB,
  247. .pixel_type = FF_PIXEL_PALETTE,
  248. .depth = 8,
  249. },
  250. [PIX_FMT_XVMC_MPEG2_MC] = {
  251. .name = "xvmcmc",
  252. },
  253. [PIX_FMT_XVMC_MPEG2_IDCT] = {
  254. .name = "xvmcidct",
  255. },
  256. [PIX_FMT_VDPAU_MPEG1] = {
  257. .name = "vdpau_mpeg1",
  258. },
  259. [PIX_FMT_VDPAU_MPEG2] = {
  260. .name = "vdpau_mpeg2",
  261. },
  262. [PIX_FMT_VDPAU_H264] = {
  263. .name = "vdpau_h264",
  264. },
  265. [PIX_FMT_UYYVYY411] = {
  266. .name = "uyyvyy411",
  267. .nb_channels = 1,
  268. .color_type = FF_COLOR_YUV,
  269. .pixel_type = FF_PIXEL_PACKED,
  270. .depth = 8,
  271. .x_chroma_shift = 2, .y_chroma_shift = 0,
  272. },
  273. [PIX_FMT_BGR32] = {
  274. .name = "bgr32",
  275. .nb_channels = 4, .is_alpha = 1,
  276. .color_type = FF_COLOR_RGB,
  277. .pixel_type = FF_PIXEL_PACKED,
  278. .depth = 8,
  279. .x_chroma_shift = 0, .y_chroma_shift = 0,
  280. },
  281. [PIX_FMT_BGR565] = {
  282. .name = "bgr565",
  283. .nb_channels = 3,
  284. .color_type = FF_COLOR_RGB,
  285. .pixel_type = FF_PIXEL_PACKED,
  286. .depth = 5,
  287. .x_chroma_shift = 0, .y_chroma_shift = 0,
  288. },
  289. [PIX_FMT_BGR555] = {
  290. .name = "bgr555",
  291. .nb_channels = 3,
  292. .color_type = FF_COLOR_RGB,
  293. .pixel_type = FF_PIXEL_PACKED,
  294. .depth = 5,
  295. .x_chroma_shift = 0, .y_chroma_shift = 0,
  296. },
  297. [PIX_FMT_RGB8] = {
  298. .name = "rgb8",
  299. .nb_channels = 1,
  300. .color_type = FF_COLOR_RGB,
  301. .pixel_type = FF_PIXEL_PACKED,
  302. .depth = 8,
  303. .x_chroma_shift = 0, .y_chroma_shift = 0,
  304. },
  305. [PIX_FMT_RGB4] = {
  306. .name = "rgb4",
  307. .nb_channels = 1,
  308. .color_type = FF_COLOR_RGB,
  309. .pixel_type = FF_PIXEL_PACKED,
  310. .depth = 4,
  311. .x_chroma_shift = 0, .y_chroma_shift = 0,
  312. },
  313. [PIX_FMT_RGB4_BYTE] = {
  314. .name = "rgb4_byte",
  315. .nb_channels = 1,
  316. .color_type = FF_COLOR_RGB,
  317. .pixel_type = FF_PIXEL_PACKED,
  318. .depth = 8,
  319. .x_chroma_shift = 0, .y_chroma_shift = 0,
  320. },
  321. [PIX_FMT_BGR8] = {
  322. .name = "bgr8",
  323. .nb_channels = 1,
  324. .color_type = FF_COLOR_RGB,
  325. .pixel_type = FF_PIXEL_PACKED,
  326. .depth = 8,
  327. .x_chroma_shift = 0, .y_chroma_shift = 0,
  328. },
  329. [PIX_FMT_BGR4] = {
  330. .name = "bgr4",
  331. .nb_channels = 1,
  332. .color_type = FF_COLOR_RGB,
  333. .pixel_type = FF_PIXEL_PACKED,
  334. .depth = 4,
  335. .x_chroma_shift = 0, .y_chroma_shift = 0,
  336. },
  337. [PIX_FMT_BGR4_BYTE] = {
  338. .name = "bgr4_byte",
  339. .nb_channels = 1,
  340. .color_type = FF_COLOR_RGB,
  341. .pixel_type = FF_PIXEL_PACKED,
  342. .depth = 8,
  343. .x_chroma_shift = 0, .y_chroma_shift = 0,
  344. },
  345. [PIX_FMT_NV12] = {
  346. .name = "nv12",
  347. .nb_channels = 2,
  348. .color_type = FF_COLOR_YUV,
  349. .pixel_type = FF_PIXEL_PLANAR,
  350. .depth = 8,
  351. .x_chroma_shift = 1, .y_chroma_shift = 1,
  352. },
  353. [PIX_FMT_NV21] = {
  354. .name = "nv12",
  355. .nb_channels = 2,
  356. .color_type = FF_COLOR_YUV,
  357. .pixel_type = FF_PIXEL_PLANAR,
  358. .depth = 8,
  359. .x_chroma_shift = 1, .y_chroma_shift = 1,
  360. },
  361. [PIX_FMT_BGR32_1] = {
  362. .name = "bgr32_1",
  363. .nb_channels = 4, .is_alpha = 1,
  364. .color_type = FF_COLOR_RGB,
  365. .pixel_type = FF_PIXEL_PACKED,
  366. .depth = 8,
  367. .x_chroma_shift = 0, .y_chroma_shift = 0,
  368. },
  369. [PIX_FMT_RGB32_1] = {
  370. .name = "rgb32_1",
  371. .nb_channels = 4, .is_alpha = 1,
  372. .color_type = FF_COLOR_RGB,
  373. .pixel_type = FF_PIXEL_PACKED,
  374. .depth = 8,
  375. .x_chroma_shift = 0, .y_chroma_shift = 0,
  376. },
  377. };
  378. void avcodec_get_chroma_sub_sample(int pix_fmt, int *h_shift, int *v_shift)
  379. {
  380. *h_shift = pix_fmt_info[pix_fmt].x_chroma_shift;
  381. *v_shift = pix_fmt_info[pix_fmt].y_chroma_shift;
  382. }
  383. const char *avcodec_get_pix_fmt_name(int pix_fmt)
  384. {
  385. if (pix_fmt < 0 || pix_fmt >= PIX_FMT_NB)
  386. return NULL;
  387. else
  388. return pix_fmt_info[pix_fmt].name;
  389. }
  390. enum PixelFormat avcodec_get_pix_fmt(const char* name)
  391. {
  392. int i;
  393. for (i=0; i < PIX_FMT_NB; i++)
  394. if (!strcmp(pix_fmt_info[i].name, name))
  395. return i;
  396. return PIX_FMT_NONE;
  397. }
  398. void avcodec_pix_fmt_string (char *buf, int buf_size, int pix_fmt)
  399. {
  400. /* print header */
  401. if (pix_fmt < 0)
  402. snprintf (buf, buf_size,
  403. "name " " nb_channels" " depth" " is_alpha"
  404. );
  405. else{
  406. PixFmtInfo info= pix_fmt_info[pix_fmt];
  407. char is_alpha_char= info.is_alpha ? 'y' : 'n';
  408. snprintf (buf, buf_size,
  409. "%-10s" " %1d " " %2d " " %c ",
  410. info.name,
  411. info.nb_channels,
  412. info.depth,
  413. is_alpha_char
  414. );
  415. }
  416. }
  417. int ff_fill_linesize(AVPicture *picture, int pix_fmt, int width)
  418. {
  419. int w2;
  420. const PixFmtInfo *pinfo;
  421. memset(picture->linesize, 0, sizeof(picture->linesize));
  422. pinfo = &pix_fmt_info[pix_fmt];
  423. switch(pix_fmt) {
  424. case PIX_FMT_YUV420P:
  425. case PIX_FMT_YUV422P:
  426. case PIX_FMT_YUV444P:
  427. case PIX_FMT_YUV410P:
  428. case PIX_FMT_YUV411P:
  429. case PIX_FMT_YUV440P:
  430. case PIX_FMT_YUVJ420P:
  431. case PIX_FMT_YUVJ422P:
  432. case PIX_FMT_YUVJ444P:
  433. case PIX_FMT_YUVJ440P:
  434. w2 = (width + (1 << pinfo->x_chroma_shift) - 1) >> pinfo->x_chroma_shift;
  435. picture->linesize[0] = width;
  436. picture->linesize[1] = w2;
  437. picture->linesize[2] = w2;
  438. break;
  439. case PIX_FMT_YUVA420P:
  440. w2 = (width + (1 << pinfo->x_chroma_shift) - 1) >> pinfo->x_chroma_shift;
  441. picture->linesize[0] = width;
  442. picture->linesize[1] = w2;
  443. picture->linesize[2] = w2;
  444. picture->linesize[3] = width;
  445. break;
  446. case PIX_FMT_NV12:
  447. case PIX_FMT_NV21:
  448. w2 = (width + (1 << pinfo->x_chroma_shift) - 1) >> pinfo->x_chroma_shift;
  449. picture->linesize[0] = width;
  450. picture->linesize[1] = w2;
  451. break;
  452. case PIX_FMT_RGB24:
  453. case PIX_FMT_BGR24:
  454. picture->linesize[0] = width * 3;
  455. break;
  456. case PIX_FMT_RGB32:
  457. case PIX_FMT_BGR32:
  458. case PIX_FMT_RGB32_1:
  459. case PIX_FMT_BGR32_1:
  460. picture->linesize[0] = width * 4;
  461. break;
  462. case PIX_FMT_GRAY16BE:
  463. case PIX_FMT_GRAY16LE:
  464. case PIX_FMT_BGR555:
  465. case PIX_FMT_BGR565:
  466. case PIX_FMT_RGB555:
  467. case PIX_FMT_RGB565:
  468. case PIX_FMT_YUYV422:
  469. picture->linesize[0] = width * 2;
  470. break;
  471. case PIX_FMT_UYVY422:
  472. picture->linesize[0] = width * 2;
  473. break;
  474. case PIX_FMT_UYYVYY411:
  475. picture->linesize[0] = width + width/2;
  476. break;
  477. case PIX_FMT_RGB8:
  478. case PIX_FMT_BGR8:
  479. case PIX_FMT_RGB4_BYTE:
  480. case PIX_FMT_BGR4_BYTE:
  481. case PIX_FMT_GRAY8:
  482. picture->linesize[0] = width;
  483. break;
  484. case PIX_FMT_RGB4:
  485. case PIX_FMT_BGR4:
  486. picture->linesize[0] = width / 2;
  487. break;
  488. case PIX_FMT_MONOWHITE:
  489. case PIX_FMT_MONOBLACK:
  490. picture->linesize[0] = (width + 7) >> 3;
  491. break;
  492. case PIX_FMT_PAL8:
  493. picture->linesize[0] = width;
  494. picture->linesize[1] = 4;
  495. break;
  496. default:
  497. return -1;
  498. }
  499. return 0;
  500. }
  501. int ff_fill_pointer(AVPicture *picture, uint8_t *ptr, int pix_fmt,
  502. int height)
  503. {
  504. int size, h2, size2;
  505. const PixFmtInfo *pinfo;
  506. pinfo = &pix_fmt_info[pix_fmt];
  507. size = picture->linesize[0] * height;
  508. switch(pix_fmt) {
  509. case PIX_FMT_YUV420P:
  510. case PIX_FMT_YUV422P:
  511. case PIX_FMT_YUV444P:
  512. case PIX_FMT_YUV410P:
  513. case PIX_FMT_YUV411P:
  514. case PIX_FMT_YUV440P:
  515. case PIX_FMT_YUVJ420P:
  516. case PIX_FMT_YUVJ422P:
  517. case PIX_FMT_YUVJ444P:
  518. case PIX_FMT_YUVJ440P:
  519. h2 = (height + (1 << pinfo->y_chroma_shift) - 1) >> pinfo->y_chroma_shift;
  520. size2 = picture->linesize[1] * h2;
  521. picture->data[0] = ptr;
  522. picture->data[1] = picture->data[0] + size;
  523. picture->data[2] = picture->data[1] + size2;
  524. picture->data[3] = NULL;
  525. return size + 2 * size2;
  526. case PIX_FMT_YUVA420P:
  527. h2 = (height + (1 << pinfo->y_chroma_shift) - 1) >> pinfo->y_chroma_shift;
  528. size2 = picture->linesize[1] * h2;
  529. picture->data[0] = ptr;
  530. picture->data[1] = picture->data[0] + size;
  531. picture->data[2] = picture->data[1] + size2;
  532. picture->data[3] = picture->data[1] + size2 + size2;
  533. return 2 * size + 2 * size2;
  534. case PIX_FMT_NV12:
  535. case PIX_FMT_NV21:
  536. h2 = (height + (1 << pinfo->y_chroma_shift) - 1) >> pinfo->y_chroma_shift;
  537. size2 = picture->linesize[1] * h2 * 2;
  538. picture->data[0] = ptr;
  539. picture->data[1] = picture->data[0] + size;
  540. picture->data[2] = NULL;
  541. picture->data[3] = NULL;
  542. return size + 2 * size2;
  543. case PIX_FMT_RGB24:
  544. case PIX_FMT_BGR24:
  545. case PIX_FMT_RGB32:
  546. case PIX_FMT_BGR32:
  547. case PIX_FMT_RGB32_1:
  548. case PIX_FMT_BGR32_1:
  549. case PIX_FMT_GRAY16BE:
  550. case PIX_FMT_GRAY16LE:
  551. case PIX_FMT_BGR555:
  552. case PIX_FMT_BGR565:
  553. case PIX_FMT_RGB555:
  554. case PIX_FMT_RGB565:
  555. case PIX_FMT_YUYV422:
  556. case PIX_FMT_UYVY422:
  557. case PIX_FMT_UYYVYY411:
  558. case PIX_FMT_RGB8:
  559. case PIX_FMT_BGR8:
  560. case PIX_FMT_RGB4_BYTE:
  561. case PIX_FMT_BGR4_BYTE:
  562. case PIX_FMT_GRAY8:
  563. case PIX_FMT_RGB4:
  564. case PIX_FMT_BGR4:
  565. case PIX_FMT_MONOWHITE:
  566. case PIX_FMT_MONOBLACK:
  567. picture->data[0] = ptr;
  568. picture->data[1] = NULL;
  569. picture->data[2] = NULL;
  570. picture->data[3] = NULL;
  571. return size;
  572. case PIX_FMT_PAL8:
  573. size2 = (size + 3) & ~3;
  574. picture->data[0] = ptr;
  575. picture->data[1] = ptr + size2; /* palette is stored here as 256 32 bit words */
  576. picture->data[2] = NULL;
  577. picture->data[3] = NULL;
  578. return size2 + 256 * 4;
  579. default:
  580. picture->data[0] = NULL;
  581. picture->data[1] = NULL;
  582. picture->data[2] = NULL;
  583. picture->data[3] = NULL;
  584. return -1;
  585. }
  586. }
  587. int avpicture_fill(AVPicture *picture, uint8_t *ptr,
  588. int pix_fmt, int width, int height)
  589. {
  590. if(avcodec_check_dimensions(NULL, width, height))
  591. return -1;
  592. if (ff_fill_linesize(picture, pix_fmt, width))
  593. return -1;
  594. return ff_fill_pointer(picture, ptr, pix_fmt, height);
  595. }
  596. int avpicture_layout(const AVPicture* src, int pix_fmt, int width, int height,
  597. unsigned char *dest, int dest_size)
  598. {
  599. const PixFmtInfo* pf = &pix_fmt_info[pix_fmt];
  600. int i, j, w, h, data_planes;
  601. const unsigned char* s;
  602. int size = avpicture_get_size(pix_fmt, width, height);
  603. if (size > dest_size || size < 0)
  604. return -1;
  605. if (pf->pixel_type == FF_PIXEL_PACKED || pf->pixel_type == FF_PIXEL_PALETTE) {
  606. if (pix_fmt == PIX_FMT_YUYV422 ||
  607. pix_fmt == PIX_FMT_UYVY422 ||
  608. pix_fmt == PIX_FMT_BGR565 ||
  609. pix_fmt == PIX_FMT_BGR555 ||
  610. pix_fmt == PIX_FMT_RGB565 ||
  611. pix_fmt == PIX_FMT_RGB555)
  612. w = width * 2;
  613. else if (pix_fmt == PIX_FMT_UYYVYY411)
  614. w = width + width/2;
  615. else if (pix_fmt == PIX_FMT_PAL8)
  616. w = width;
  617. else
  618. w = width * (pf->depth * pf->nb_channels / 8);
  619. data_planes = 1;
  620. h = height;
  621. } else {
  622. data_planes = pf->nb_channels;
  623. w = (width*pf->depth + 7)/8;
  624. h = height;
  625. }
  626. for (i=0; i<data_planes; i++) {
  627. if (i == 1) {
  628. w = width >> pf->x_chroma_shift;
  629. h = height >> pf->y_chroma_shift;
  630. }
  631. s = src->data[i];
  632. for(j=0; j<h; j++) {
  633. memcpy(dest, s, w);
  634. dest += w;
  635. s += src->linesize[i];
  636. }
  637. }
  638. if (pf->pixel_type == FF_PIXEL_PALETTE)
  639. memcpy((unsigned char *)(((size_t)dest + 3) & ~3), src->data[1], 256 * 4);
  640. return size;
  641. }
  642. int avpicture_get_size(int pix_fmt, int width, int height)
  643. {
  644. AVPicture dummy_pict;
  645. return avpicture_fill(&dummy_pict, NULL, pix_fmt, width, height);
  646. }
  647. int avcodec_get_pix_fmt_loss(int dst_pix_fmt, int src_pix_fmt,
  648. int has_alpha)
  649. {
  650. const PixFmtInfo *pf, *ps;
  651. int loss;
  652. ps = &pix_fmt_info[src_pix_fmt];
  653. pf = &pix_fmt_info[dst_pix_fmt];
  654. /* compute loss */
  655. loss = 0;
  656. pf = &pix_fmt_info[dst_pix_fmt];
  657. if (pf->depth < ps->depth ||
  658. (dst_pix_fmt == PIX_FMT_RGB555 && src_pix_fmt == PIX_FMT_RGB565))
  659. loss |= FF_LOSS_DEPTH;
  660. if (pf->x_chroma_shift > ps->x_chroma_shift ||
  661. pf->y_chroma_shift > ps->y_chroma_shift)
  662. loss |= FF_LOSS_RESOLUTION;
  663. switch(pf->color_type) {
  664. case FF_COLOR_RGB:
  665. if (ps->color_type != FF_COLOR_RGB &&
  666. ps->color_type != FF_COLOR_GRAY)
  667. loss |= FF_LOSS_COLORSPACE;
  668. break;
  669. case FF_COLOR_GRAY:
  670. if (ps->color_type != FF_COLOR_GRAY)
  671. loss |= FF_LOSS_COLORSPACE;
  672. break;
  673. case FF_COLOR_YUV:
  674. if (ps->color_type != FF_COLOR_YUV)
  675. loss |= FF_LOSS_COLORSPACE;
  676. break;
  677. case FF_COLOR_YUV_JPEG:
  678. if (ps->color_type != FF_COLOR_YUV_JPEG &&
  679. ps->color_type != FF_COLOR_YUV &&
  680. ps->color_type != FF_COLOR_GRAY)
  681. loss |= FF_LOSS_COLORSPACE;
  682. break;
  683. default:
  684. /* fail safe test */
  685. if (ps->color_type != pf->color_type)
  686. loss |= FF_LOSS_COLORSPACE;
  687. break;
  688. }
  689. if (pf->color_type == FF_COLOR_GRAY &&
  690. ps->color_type != FF_COLOR_GRAY)
  691. loss |= FF_LOSS_CHROMA;
  692. if (!pf->is_alpha && (ps->is_alpha && has_alpha))
  693. loss |= FF_LOSS_ALPHA;
  694. if (pf->pixel_type == FF_PIXEL_PALETTE &&
  695. (ps->pixel_type != FF_PIXEL_PALETTE && ps->color_type != FF_COLOR_GRAY))
  696. loss |= FF_LOSS_COLORQUANT;
  697. return loss;
  698. }
  699. static int avg_bits_per_pixel(int pix_fmt)
  700. {
  701. int bits;
  702. const PixFmtInfo *pf;
  703. pf = &pix_fmt_info[pix_fmt];
  704. switch(pf->pixel_type) {
  705. case FF_PIXEL_PACKED:
  706. switch(pix_fmt) {
  707. case PIX_FMT_YUYV422:
  708. case PIX_FMT_UYVY422:
  709. case PIX_FMT_RGB565:
  710. case PIX_FMT_RGB555:
  711. case PIX_FMT_BGR565:
  712. case PIX_FMT_BGR555:
  713. bits = 16;
  714. break;
  715. case PIX_FMT_UYYVYY411:
  716. bits = 12;
  717. break;
  718. default:
  719. bits = pf->depth * pf->nb_channels;
  720. break;
  721. }
  722. break;
  723. case FF_PIXEL_PLANAR:
  724. if (pf->x_chroma_shift == 0 && pf->y_chroma_shift == 0) {
  725. bits = pf->depth * pf->nb_channels;
  726. } else {
  727. bits = pf->depth + ((2 * pf->depth) >>
  728. (pf->x_chroma_shift + pf->y_chroma_shift));
  729. }
  730. break;
  731. case FF_PIXEL_PALETTE:
  732. bits = 8;
  733. break;
  734. default:
  735. bits = -1;
  736. break;
  737. }
  738. return bits;
  739. }
  740. static int avcodec_find_best_pix_fmt1(int64_t pix_fmt_mask,
  741. int src_pix_fmt,
  742. int has_alpha,
  743. int loss_mask)
  744. {
  745. int dist, i, loss, min_dist, dst_pix_fmt;
  746. /* find exact color match with smallest size */
  747. dst_pix_fmt = -1;
  748. min_dist = 0x7fffffff;
  749. for(i = 0;i < PIX_FMT_NB; i++) {
  750. if (pix_fmt_mask & (1ULL << i)) {
  751. loss = avcodec_get_pix_fmt_loss(i, src_pix_fmt, has_alpha) & loss_mask;
  752. if (loss == 0) {
  753. dist = avg_bits_per_pixel(i);
  754. if (dist < min_dist) {
  755. min_dist = dist;
  756. dst_pix_fmt = i;
  757. }
  758. }
  759. }
  760. }
  761. return dst_pix_fmt;
  762. }
  763. int avcodec_find_best_pix_fmt(int64_t pix_fmt_mask, int src_pix_fmt,
  764. int has_alpha, int *loss_ptr)
  765. {
  766. int dst_pix_fmt, loss_mask, i;
  767. static const int loss_mask_order[] = {
  768. ~0, /* no loss first */
  769. ~FF_LOSS_ALPHA,
  770. ~FF_LOSS_RESOLUTION,
  771. ~(FF_LOSS_COLORSPACE | FF_LOSS_RESOLUTION),
  772. ~FF_LOSS_COLORQUANT,
  773. ~FF_LOSS_DEPTH,
  774. 0,
  775. };
  776. /* try with successive loss */
  777. i = 0;
  778. for(;;) {
  779. loss_mask = loss_mask_order[i++];
  780. dst_pix_fmt = avcodec_find_best_pix_fmt1(pix_fmt_mask, src_pix_fmt,
  781. has_alpha, loss_mask);
  782. if (dst_pix_fmt >= 0)
  783. goto found;
  784. if (loss_mask == 0)
  785. break;
  786. }
  787. return -1;
  788. found:
  789. if (loss_ptr)
  790. *loss_ptr = avcodec_get_pix_fmt_loss(dst_pix_fmt, src_pix_fmt, has_alpha);
  791. return dst_pix_fmt;
  792. }
  793. void ff_img_copy_plane(uint8_t *dst, int dst_wrap,
  794. const uint8_t *src, int src_wrap,
  795. int width, int height)
  796. {
  797. if((!dst) || (!src))
  798. return;
  799. for(;height > 0; height--) {
  800. memcpy(dst, src, width);
  801. dst += dst_wrap;
  802. src += src_wrap;
  803. }
  804. }
  805. int ff_get_plane_bytewidth(enum PixelFormat pix_fmt, int width, int plane)
  806. {
  807. int bits;
  808. const PixFmtInfo *pf = &pix_fmt_info[pix_fmt];
  809. pf = &pix_fmt_info[pix_fmt];
  810. switch(pf->pixel_type) {
  811. case FF_PIXEL_PACKED:
  812. switch(pix_fmt) {
  813. case PIX_FMT_YUYV422:
  814. case PIX_FMT_UYVY422:
  815. case PIX_FMT_RGB565:
  816. case PIX_FMT_RGB555:
  817. case PIX_FMT_BGR565:
  818. case PIX_FMT_BGR555:
  819. bits = 16;
  820. break;
  821. case PIX_FMT_UYYVYY411:
  822. bits = 12;
  823. break;
  824. default:
  825. bits = pf->depth * pf->nb_channels;
  826. break;
  827. }
  828. return (width * bits + 7) >> 3;
  829. break;
  830. case FF_PIXEL_PLANAR:
  831. if (plane == 1 || plane == 2)
  832. width= -((-width)>>pf->x_chroma_shift);
  833. return (width * pf->depth + 7) >> 3;
  834. break;
  835. case FF_PIXEL_PALETTE:
  836. if (plane == 0)
  837. return width;
  838. break;
  839. }
  840. return -1;
  841. }
  842. void av_picture_copy(AVPicture *dst, const AVPicture *src,
  843. int pix_fmt, int width, int height)
  844. {
  845. int i;
  846. const PixFmtInfo *pf = &pix_fmt_info[pix_fmt];
  847. pf = &pix_fmt_info[pix_fmt];
  848. switch(pf->pixel_type) {
  849. case FF_PIXEL_PACKED:
  850. case FF_PIXEL_PLANAR:
  851. for(i = 0; i < pf->nb_channels; i++) {
  852. int h;
  853. int bwidth = ff_get_plane_bytewidth(pix_fmt, width, i);
  854. h = height;
  855. if (i == 1 || i == 2) {
  856. h= -((-height)>>pf->y_chroma_shift);
  857. }
  858. ff_img_copy_plane(dst->data[i], dst->linesize[i],
  859. src->data[i], src->linesize[i],
  860. bwidth, h);
  861. }
  862. break;
  863. case FF_PIXEL_PALETTE:
  864. ff_img_copy_plane(dst->data[0], dst->linesize[0],
  865. src->data[0], src->linesize[0],
  866. width, height);
  867. /* copy the palette */
  868. ff_img_copy_plane(dst->data[1], dst->linesize[1],
  869. src->data[1], src->linesize[1],
  870. 4, 256);
  871. break;
  872. }
  873. }
  874. /* XXX: totally non optimized */
  875. static void yuyv422_to_yuv420p(AVPicture *dst, const AVPicture *src,
  876. int width, int height)
  877. {
  878. const uint8_t *p, *p1;
  879. uint8_t *lum, *cr, *cb, *lum1, *cr1, *cb1;
  880. int w;
  881. p1 = src->data[0];
  882. lum1 = dst->data[0];
  883. cb1 = dst->data[1];
  884. cr1 = dst->data[2];
  885. for(;height >= 1; height -= 2) {
  886. p = p1;
  887. lum = lum1;
  888. cb = cb1;
  889. cr = cr1;
  890. for(w = width; w >= 2; w -= 2) {
  891. lum[0] = p[0];
  892. cb[0] = p[1];
  893. lum[1] = p[2];
  894. cr[0] = p[3];
  895. p += 4;
  896. lum += 2;
  897. cb++;
  898. cr++;
  899. }
  900. if (w) {
  901. lum[0] = p[0];
  902. cb[0] = p[1];
  903. cr[0] = p[3];
  904. cb++;
  905. cr++;
  906. }
  907. p1 += src->linesize[0];
  908. lum1 += dst->linesize[0];
  909. if (height>1) {
  910. p = p1;
  911. lum = lum1;
  912. for(w = width; w >= 2; w -= 2) {
  913. lum[0] = p[0];
  914. lum[1] = p[2];
  915. p += 4;
  916. lum += 2;
  917. }
  918. if (w) {
  919. lum[0] = p[0];
  920. }
  921. p1 += src->linesize[0];
  922. lum1 += dst->linesize[0];
  923. }
  924. cb1 += dst->linesize[1];
  925. cr1 += dst->linesize[2];
  926. }
  927. }
  928. static void uyvy422_to_yuv420p(AVPicture *dst, const AVPicture *src,
  929. int width, int height)
  930. {
  931. const uint8_t *p, *p1;
  932. uint8_t *lum, *cr, *cb, *lum1, *cr1, *cb1;
  933. int w;
  934. p1 = src->data[0];
  935. lum1 = dst->data[0];
  936. cb1 = dst->data[1];
  937. cr1 = dst->data[2];
  938. for(;height >= 1; height -= 2) {
  939. p = p1;
  940. lum = lum1;
  941. cb = cb1;
  942. cr = cr1;
  943. for(w = width; w >= 2; w -= 2) {
  944. lum[0] = p[1];
  945. cb[0] = p[0];
  946. lum[1] = p[3];
  947. cr[0] = p[2];
  948. p += 4;
  949. lum += 2;
  950. cb++;
  951. cr++;
  952. }
  953. if (w) {
  954. lum[0] = p[1];
  955. cb[0] = p[0];
  956. cr[0] = p[2];
  957. cb++;
  958. cr++;
  959. }
  960. p1 += src->linesize[0];
  961. lum1 += dst->linesize[0];
  962. if (height>1) {
  963. p = p1;
  964. lum = lum1;
  965. for(w = width; w >= 2; w -= 2) {
  966. lum[0] = p[1];
  967. lum[1] = p[3];
  968. p += 4;
  969. lum += 2;
  970. }
  971. if (w) {
  972. lum[0] = p[1];
  973. }
  974. p1 += src->linesize[0];
  975. lum1 += dst->linesize[0];
  976. }
  977. cb1 += dst->linesize[1];
  978. cr1 += dst->linesize[2];
  979. }
  980. }
  981. static void uyvy422_to_yuv422p(AVPicture *dst, const AVPicture *src,
  982. int width, int height)
  983. {
  984. const uint8_t *p, *p1;
  985. uint8_t *lum, *cr, *cb, *lum1, *cr1, *cb1;
  986. int w;
  987. p1 = src->data[0];
  988. lum1 = dst->data[0];
  989. cb1 = dst->data[1];
  990. cr1 = dst->data[2];
  991. for(;height > 0; height--) {
  992. p = p1;
  993. lum = lum1;
  994. cb = cb1;
  995. cr = cr1;
  996. for(w = width; w >= 2; w -= 2) {
  997. lum[0] = p[1];
  998. cb[0] = p[0];
  999. lum[1] = p[3];
  1000. cr[0] = p[2];
  1001. p += 4;
  1002. lum += 2;
  1003. cb++;
  1004. cr++;
  1005. }
  1006. p1 += src->linesize[0];
  1007. lum1 += dst->linesize[0];
  1008. cb1 += dst->linesize[1];
  1009. cr1 += dst->linesize[2];
  1010. }
  1011. }
  1012. static void yuyv422_to_yuv422p(AVPicture *dst, const AVPicture *src,
  1013. int width, int height)
  1014. {
  1015. const uint8_t *p, *p1;
  1016. uint8_t *lum, *cr, *cb, *lum1, *cr1, *cb1;
  1017. int w;
  1018. p1 = src->data[0];
  1019. lum1 = dst->data[0];
  1020. cb1 = dst->data[1];
  1021. cr1 = dst->data[2];
  1022. for(;height > 0; height--) {
  1023. p = p1;
  1024. lum = lum1;
  1025. cb = cb1;
  1026. cr = cr1;
  1027. for(w = width; w >= 2; w -= 2) {
  1028. lum[0] = p[0];
  1029. cb[0] = p[1];
  1030. lum[1] = p[2];
  1031. cr[0] = p[3];
  1032. p += 4;
  1033. lum += 2;
  1034. cb++;
  1035. cr++;
  1036. }
  1037. p1 += src->linesize[0];
  1038. lum1 += dst->linesize[0];
  1039. cb1 += dst->linesize[1];
  1040. cr1 += dst->linesize[2];
  1041. }
  1042. }
  1043. static void yuv422p_to_yuyv422(AVPicture *dst, const AVPicture *src,
  1044. int width, int height)
  1045. {
  1046. uint8_t *p, *p1;
  1047. const uint8_t *lum, *cr, *cb, *lum1, *cr1, *cb1;
  1048. int w;
  1049. p1 = dst->data[0];
  1050. lum1 = src->data[0];
  1051. cb1 = src->data[1];
  1052. cr1 = src->data[2];
  1053. for(;height > 0; height--) {
  1054. p = p1;
  1055. lum = lum1;
  1056. cb = cb1;
  1057. cr = cr1;
  1058. for(w = width; w >= 2; w -= 2) {
  1059. p[0] = lum[0];
  1060. p[1] = cb[0];
  1061. p[2] = lum[1];
  1062. p[3] = cr[0];
  1063. p += 4;
  1064. lum += 2;
  1065. cb++;
  1066. cr++;
  1067. }
  1068. p1 += dst->linesize[0];
  1069. lum1 += src->linesize[0];
  1070. cb1 += src->linesize[1];
  1071. cr1 += src->linesize[2];
  1072. }
  1073. }
  1074. static void yuv422p_to_uyvy422(AVPicture *dst, const AVPicture *src,
  1075. int width, int height)
  1076. {
  1077. uint8_t *p, *p1;
  1078. const uint8_t *lum, *cr, *cb, *lum1, *cr1, *cb1;
  1079. int w;
  1080. p1 = dst->data[0];
  1081. lum1 = src->data[0];
  1082. cb1 = src->data[1];
  1083. cr1 = src->data[2];
  1084. for(;height > 0; height--) {
  1085. p = p1;
  1086. lum = lum1;
  1087. cb = cb1;
  1088. cr = cr1;
  1089. for(w = width; w >= 2; w -= 2) {
  1090. p[1] = lum[0];
  1091. p[0] = cb[0];
  1092. p[3] = lum[1];
  1093. p[2] = cr[0];
  1094. p += 4;
  1095. lum += 2;
  1096. cb++;
  1097. cr++;
  1098. }
  1099. p1 += dst->linesize[0];
  1100. lum1 += src->linesize[0];
  1101. cb1 += src->linesize[1];
  1102. cr1 += src->linesize[2];
  1103. }
  1104. }
  1105. static void uyyvyy411_to_yuv411p(AVPicture *dst, const AVPicture *src,
  1106. int width, int height)
  1107. {
  1108. const uint8_t *p, *p1;
  1109. uint8_t *lum, *cr, *cb, *lum1, *cr1, *cb1;
  1110. int w;
  1111. p1 = src->data[0];
  1112. lum1 = dst->data[0];
  1113. cb1 = dst->data[1];
  1114. cr1 = dst->data[2];
  1115. for(;height > 0; height--) {
  1116. p = p1;
  1117. lum = lum1;
  1118. cb = cb1;
  1119. cr = cr1;
  1120. for(w = width; w >= 4; w -= 4) {
  1121. cb[0] = p[0];
  1122. lum[0] = p[1];
  1123. lum[1] = p[2];
  1124. cr[0] = p[3];
  1125. lum[2] = p[4];
  1126. lum[3] = p[5];
  1127. p += 6;
  1128. lum += 4;
  1129. cb++;
  1130. cr++;
  1131. }
  1132. p1 += src->linesize[0];
  1133. lum1 += dst->linesize[0];
  1134. cb1 += dst->linesize[1];
  1135. cr1 += dst->linesize[2];
  1136. }
  1137. }
  1138. static void yuv420p_to_yuyv422(AVPicture *dst, const AVPicture *src,
  1139. int width, int height)
  1140. {
  1141. int w, h;
  1142. uint8_t *line1, *line2, *linesrc = dst->data[0];
  1143. uint8_t *lum1, *lum2, *lumsrc = src->data[0];
  1144. uint8_t *cb1, *cb2 = src->data[1];
  1145. uint8_t *cr1, *cr2 = src->data[2];
  1146. for(h = height / 2; h--;) {
  1147. line1 = linesrc;
  1148. line2 = linesrc + dst->linesize[0];
  1149. lum1 = lumsrc;
  1150. lum2 = lumsrc + src->linesize[0];
  1151. cb1 = cb2;
  1152. cr1 = cr2;
  1153. for(w = width / 2; w--;) {
  1154. *line1++ = *lum1++; *line2++ = *lum2++;
  1155. *line1++ = *line2++ = *cb1++;
  1156. *line1++ = *lum1++; *line2++ = *lum2++;
  1157. *line1++ = *line2++ = *cr1++;
  1158. }
  1159. linesrc += dst->linesize[0] * 2;
  1160. lumsrc += src->linesize[0] * 2;
  1161. cb2 += src->linesize[1];
  1162. cr2 += src->linesize[2];
  1163. }
  1164. }
  1165. static void yuv420p_to_uyvy422(AVPicture *dst, const AVPicture *src,
  1166. int width, int height)
  1167. {
  1168. int w, h;
  1169. uint8_t *line1, *line2, *linesrc = dst->data[0];
  1170. uint8_t *lum1, *lum2, *lumsrc = src->data[0];
  1171. uint8_t *cb1, *cb2 = src->data[1];
  1172. uint8_t *cr1, *cr2 = src->data[2];
  1173. for(h = height / 2; h--;) {
  1174. line1 = linesrc;
  1175. line2 = linesrc + dst->linesize[0];
  1176. lum1 = lumsrc;
  1177. lum2 = lumsrc + src->linesize[0];
  1178. cb1 = cb2;
  1179. cr1 = cr2;
  1180. for(w = width / 2; w--;) {
  1181. *line1++ = *line2++ = *cb1++;
  1182. *line1++ = *lum1++; *line2++ = *lum2++;
  1183. *line1++ = *line2++ = *cr1++;
  1184. *line1++ = *lum1++; *line2++ = *lum2++;
  1185. }
  1186. linesrc += dst->linesize[0] * 2;
  1187. lumsrc += src->linesize[0] * 2;
  1188. cb2 += src->linesize[1];
  1189. cr2 += src->linesize[2];
  1190. }
  1191. }
  1192. /* 2x2 -> 1x1 */
  1193. void ff_shrink22(uint8_t *dst, int dst_wrap,
  1194. const uint8_t *src, int src_wrap,
  1195. int width, int height)
  1196. {
  1197. int w;
  1198. const uint8_t *s1, *s2;
  1199. uint8_t *d;
  1200. for(;height > 0; height--) {
  1201. s1 = src;
  1202. s2 = s1 + src_wrap;
  1203. d = dst;
  1204. for(w = width;w >= 4; w-=4) {
  1205. d[0] = (s1[0] + s1[1] + s2[0] + s2[1] + 2) >> 2;
  1206. d[1] = (s1[2] + s1[3] + s2[2] + s2[3] + 2) >> 2;
  1207. d[2] = (s1[4] + s1[5] + s2[4] + s2[5] + 2) >> 2;
  1208. d[3] = (s1[6] + s1[7] + s2[6] + s2[7] + 2) >> 2;
  1209. s1 += 8;
  1210. s2 += 8;
  1211. d += 4;
  1212. }
  1213. for(;w > 0; w--) {
  1214. d[0] = (s1[0] + s1[1] + s2[0] + s2[1] + 2) >> 2;
  1215. s1 += 2;
  1216. s2 += 2;
  1217. d++;
  1218. }
  1219. src += 2 * src_wrap;
  1220. dst += dst_wrap;
  1221. }
  1222. }
  1223. /* 4x4 -> 1x1 */
  1224. void ff_shrink44(uint8_t *dst, int dst_wrap,
  1225. const uint8_t *src, int src_wrap,
  1226. int width, int height)
  1227. {
  1228. int w;
  1229. const uint8_t *s1, *s2, *s3, *s4;
  1230. uint8_t *d;
  1231. for(;height > 0; height--) {
  1232. s1 = src;
  1233. s2 = s1 + src_wrap;
  1234. s3 = s2 + src_wrap;
  1235. s4 = s3 + src_wrap;
  1236. d = dst;
  1237. for(w = width;w > 0; w--) {
  1238. d[0] = (s1[0] + s1[1] + s1[2] + s1[3] +
  1239. s2[0] + s2[1] + s2[2] + s2[3] +
  1240. s3[0] + s3[1] + s3[2] + s3[3] +
  1241. s4[0] + s4[1] + s4[2] + s4[3] + 8) >> 4;
  1242. s1 += 4;
  1243. s2 += 4;
  1244. s3 += 4;
  1245. s4 += 4;
  1246. d++;
  1247. }
  1248. src += 4 * src_wrap;
  1249. dst += dst_wrap;
  1250. }
  1251. }
  1252. /* 8x8 -> 1x1 */
  1253. void ff_shrink88(uint8_t *dst, int dst_wrap,
  1254. const uint8_t *src, int src_wrap,
  1255. int width, int height)
  1256. {
  1257. int w, i;
  1258. for(;height > 0; height--) {
  1259. for(w = width;w > 0; w--) {
  1260. int tmp=0;
  1261. for(i=0; i<8; i++){
  1262. tmp += src[0] + src[1] + src[2] + src[3] + src[4] + src[5] + src[6] + src[7];
  1263. src += src_wrap;
  1264. }
  1265. *(dst++) = (tmp + 32)>>6;
  1266. src += 8 - 8*src_wrap;
  1267. }
  1268. src += 8*src_wrap - 8*width;
  1269. dst += dst_wrap - width;
  1270. }
  1271. }
  1272. /* XXX: add jpeg quantize code */
  1273. #define TRANSP_INDEX (6*6*6)
  1274. /* this is maybe slow, but allows for extensions */
  1275. static inline unsigned char gif_clut_index(uint8_t r, uint8_t g, uint8_t b)
  1276. {
  1277. return (((r) / 47) % 6) * 6 * 6 + (((g) / 47) % 6) * 6 + (((b) / 47) % 6);
  1278. }
  1279. static void build_rgb_palette(uint8_t *palette, int has_alpha)
  1280. {
  1281. uint32_t *pal;
  1282. static const uint8_t pal_value[6] = { 0x00, 0x33, 0x66, 0x99, 0xcc, 0xff };
  1283. int i, r, g, b;
  1284. pal = (uint32_t *)palette;
  1285. i = 0;
  1286. for(r = 0; r < 6; r++) {
  1287. for(g = 0; g < 6; g++) {
  1288. for(b = 0; b < 6; b++) {
  1289. pal[i++] = (0xff << 24) | (pal_value[r] << 16) |
  1290. (pal_value[g] << 8) | pal_value[b];
  1291. }
  1292. }
  1293. }
  1294. if (has_alpha)
  1295. pal[i++] = 0;
  1296. while (i < 256)
  1297. pal[i++] = 0xff000000;
  1298. }
  1299. /* copy bit n to bits 0 ... n - 1 */
  1300. static inline unsigned int bitcopy_n(unsigned int a, int n)
  1301. {
  1302. int mask;
  1303. mask = (1 << n) - 1;
  1304. return (a & (0xff & ~mask)) | ((-((a >> n) & 1)) & mask);
  1305. }
  1306. /* rgb555 handling */
  1307. #define RGB_NAME rgb555
  1308. #define RGB_IN(r, g, b, s)\
  1309. {\
  1310. unsigned int v = ((const uint16_t *)(s))[0];\
  1311. r = bitcopy_n(v >> (10 - 3), 3);\
  1312. g = bitcopy_n(v >> (5 - 3), 3);\
  1313. b = bitcopy_n(v << 3, 3);\
  1314. }
  1315. #define RGB_OUT(d, r, g, b)\
  1316. {\
  1317. ((uint16_t *)(d))[0] = ((r >> 3) << 10) | ((g >> 3) << 5) | (b >> 3);\
  1318. }
  1319. #define BPP 2
  1320. #include "imgconvert_template.c"
  1321. /* rgb565 handling */
  1322. #define RGB_NAME rgb565
  1323. #define RGB_IN(r, g, b, s)\
  1324. {\
  1325. unsigned int v = ((const uint16_t *)(s))[0];\
  1326. r = bitcopy_n(v >> (11 - 3), 3);\
  1327. g = bitcopy_n(v >> (5 - 2), 2);\
  1328. b = bitcopy_n(v << 3, 3);\
  1329. }
  1330. #define RGB_OUT(d, r, g, b)\
  1331. {\
  1332. ((uint16_t *)(d))[0] = ((r >> 3) << 11) | ((g >> 2) << 5) | (b >> 3);\
  1333. }
  1334. #define BPP 2
  1335. #include "imgconvert_template.c"
  1336. /* bgr24 handling */
  1337. #define RGB_NAME bgr24
  1338. #define RGB_IN(r, g, b, s)\
  1339. {\
  1340. b = (s)[0];\
  1341. g = (s)[1];\
  1342. r = (s)[2];\
  1343. }
  1344. #define RGB_OUT(d, r, g, b)\
  1345. {\
  1346. (d)[0] = b;\
  1347. (d)[1] = g;\
  1348. (d)[2] = r;\
  1349. }
  1350. #define BPP 3
  1351. #include "imgconvert_template.c"
  1352. #undef RGB_IN
  1353. #undef RGB_OUT
  1354. #undef BPP
  1355. /* rgb24 handling */
  1356. #define RGB_NAME rgb24
  1357. #define FMT_RGB24
  1358. #define RGB_IN(r, g, b, s)\
  1359. {\
  1360. r = (s)[0];\
  1361. g = (s)[1];\
  1362. b = (s)[2];\
  1363. }
  1364. #define RGB_OUT(d, r, g, b)\
  1365. {\
  1366. (d)[0] = r;\
  1367. (d)[1] = g;\
  1368. (d)[2] = b;\
  1369. }
  1370. #define BPP 3
  1371. #include "imgconvert_template.c"
  1372. /* rgb32 handling */
  1373. #define RGB_NAME rgb32
  1374. #define FMT_RGB32
  1375. #define RGB_IN(r, g, b, s)\
  1376. {\
  1377. unsigned int v = ((const uint32_t *)(s))[0];\
  1378. r = (v >> 16) & 0xff;\
  1379. g = (v >> 8) & 0xff;\
  1380. b = v & 0xff;\
  1381. }
  1382. #define RGBA_IN(r, g, b, a, s)\
  1383. {\
  1384. unsigned int v = ((const uint32_t *)(s))[0];\
  1385. a = (v >> 24) & 0xff;\
  1386. r = (v >> 16) & 0xff;\
  1387. g = (v >> 8) & 0xff;\
  1388. b = v & 0xff;\
  1389. }
  1390. #define RGBA_OUT(d, r, g, b, a)\
  1391. {\
  1392. ((uint32_t *)(d))[0] = (a << 24) | (r << 16) | (g << 8) | b;\
  1393. }
  1394. #define BPP 4
  1395. #include "imgconvert_template.c"
  1396. static void mono_to_gray(AVPicture *dst, const AVPicture *src,
  1397. int width, int height, int xor_mask)
  1398. {
  1399. const unsigned char *p;
  1400. unsigned char *q;
  1401. int v, dst_wrap, src_wrap;
  1402. int y, w;
  1403. p = src->data[0];
  1404. src_wrap = src->linesize[0] - ((width + 7) >> 3);
  1405. q = dst->data[0];
  1406. dst_wrap = dst->linesize[0] - width;
  1407. for(y=0;y<height;y++) {
  1408. w = width;
  1409. while (w >= 8) {
  1410. v = *p++ ^ xor_mask;
  1411. q[0] = -(v >> 7);
  1412. q[1] = -((v >> 6) & 1);
  1413. q[2] = -((v >> 5) & 1);
  1414. q[3] = -((v >> 4) & 1);
  1415. q[4] = -((v >> 3) & 1);
  1416. q[5] = -((v >> 2) & 1);
  1417. q[6] = -((v >> 1) & 1);
  1418. q[7] = -((v >> 0) & 1);
  1419. w -= 8;
  1420. q += 8;
  1421. }
  1422. if (w > 0) {
  1423. v = *p++ ^ xor_mask;
  1424. do {
  1425. q[0] = -((v >> 7) & 1);
  1426. q++;
  1427. v <<= 1;
  1428. } while (--w);
  1429. }
  1430. p += src_wrap;
  1431. q += dst_wrap;
  1432. }
  1433. }
  1434. static void monowhite_to_gray(AVPicture *dst, const AVPicture *src,
  1435. int width, int height)
  1436. {
  1437. mono_to_gray(dst, src, width, height, 0xff);
  1438. }
  1439. static void monoblack_to_gray(AVPicture *dst, const AVPicture *src,
  1440. int width, int height)
  1441. {
  1442. mono_to_gray(dst, src, width, height, 0x00);
  1443. }
  1444. static void gray_to_mono(AVPicture *dst, const AVPicture *src,
  1445. int width, int height, int xor_mask)
  1446. {
  1447. int n;
  1448. const uint8_t *s;
  1449. uint8_t *d;
  1450. int j, b, v, n1, src_wrap, dst_wrap, y;
  1451. s = src->data[0];
  1452. src_wrap = src->linesize[0] - width;
  1453. d = dst->data[0];
  1454. dst_wrap = dst->linesize[0] - ((width + 7) >> 3);
  1455. for(y=0;y<height;y++) {
  1456. n = width;
  1457. while (n >= 8) {
  1458. v = 0;
  1459. for(j=0;j<8;j++) {
  1460. b = s[0];
  1461. s++;
  1462. v = (v << 1) | (b >> 7);
  1463. }
  1464. d[0] = v ^ xor_mask;
  1465. d++;
  1466. n -= 8;
  1467. }
  1468. if (n > 0) {
  1469. n1 = n;
  1470. v = 0;
  1471. while (n > 0) {
  1472. b = s[0];
  1473. s++;
  1474. v = (v << 1) | (b >> 7);
  1475. n--;
  1476. }
  1477. d[0] = (v << (8 - (n1 & 7))) ^ xor_mask;
  1478. d++;
  1479. }
  1480. s += src_wrap;
  1481. d += dst_wrap;
  1482. }
  1483. }
  1484. static void gray_to_monowhite(AVPicture *dst, const AVPicture *src,
  1485. int width, int height)
  1486. {
  1487. gray_to_mono(dst, src, width, height, 0xff);
  1488. }
  1489. static void gray_to_monoblack(AVPicture *dst, const AVPicture *src,
  1490. int width, int height)
  1491. {
  1492. gray_to_mono(dst, src, width, height, 0x00);
  1493. }
  1494. static void gray_to_gray16(AVPicture *dst, const AVPicture *src,
  1495. int width, int height)
  1496. {
  1497. int x, y, src_wrap, dst_wrap;
  1498. uint8_t *s, *d;
  1499. s = src->data[0];
  1500. src_wrap = src->linesize[0] - width;
  1501. d = dst->data[0];
  1502. dst_wrap = dst->linesize[0] - width * 2;
  1503. for(y=0; y<height; y++){
  1504. for(x=0; x<width; x++){
  1505. *d++ = *s;
  1506. *d++ = *s++;
  1507. }
  1508. s += src_wrap;
  1509. d += dst_wrap;
  1510. }
  1511. }
  1512. static void gray16_to_gray(AVPicture *dst, const AVPicture *src,
  1513. int width, int height)
  1514. {
  1515. int x, y, src_wrap, dst_wrap;
  1516. uint8_t *s, *d;
  1517. s = src->data[0];
  1518. src_wrap = src->linesize[0] - width * 2;
  1519. d = dst->data[0];
  1520. dst_wrap = dst->linesize[0] - width;
  1521. for(y=0; y<height; y++){
  1522. for(x=0; x<width; x++){
  1523. *d++ = *s;
  1524. s += 2;
  1525. }
  1526. s += src_wrap;
  1527. d += dst_wrap;
  1528. }
  1529. }
  1530. static void gray16be_to_gray(AVPicture *dst, const AVPicture *src,
  1531. int width, int height)
  1532. {
  1533. gray16_to_gray(dst, src, width, height);
  1534. }
  1535. static void gray16le_to_gray(AVPicture *dst, const AVPicture *src,
  1536. int width, int height)
  1537. {
  1538. AVPicture tmpsrc = *src;
  1539. tmpsrc.data[0]++;
  1540. gray16_to_gray(dst, &tmpsrc, width, height);
  1541. }
  1542. static void gray16_to_gray16(AVPicture *dst, const AVPicture *src,
  1543. int width, int height)
  1544. {
  1545. int x, y, src_wrap, dst_wrap;
  1546. uint16_t *s, *d;
  1547. s = (uint16_t*)src->data[0];
  1548. src_wrap = (src->linesize[0] - width * 2)/2;
  1549. d = (uint16_t*)dst->data[0];
  1550. dst_wrap = (dst->linesize[0] - width * 2)/2;
  1551. for(y=0; y<height; y++){
  1552. for(x=0; x<width; x++){
  1553. *d++ = bswap_16(*s++);
  1554. }
  1555. s += src_wrap;
  1556. d += dst_wrap;
  1557. }
  1558. }
  1559. typedef struct ConvertEntry {
  1560. void (*convert)(AVPicture *dst,
  1561. const AVPicture *src, int width, int height);
  1562. } ConvertEntry;
  1563. /* Add each new conversion function in this table. In order to be able
  1564. to convert from any format to any format, the following constraints
  1565. must be satisfied:
  1566. - all FF_COLOR_RGB formats must convert to and from PIX_FMT_RGB24
  1567. - all FF_COLOR_GRAY formats must convert to and from PIX_FMT_GRAY8
  1568. - all FF_COLOR_RGB formats with alpha must convert to and from PIX_FMT_RGB32
  1569. - PIX_FMT_YUV444P and PIX_FMT_YUVJ444P must convert to and from
  1570. PIX_FMT_RGB24.
  1571. - PIX_FMT_422 must convert to and from PIX_FMT_422P.
  1572. The other conversion functions are just optimizations for common cases.
  1573. */
  1574. static const ConvertEntry convert_table[PIX_FMT_NB][PIX_FMT_NB] = {
  1575. [PIX_FMT_YUV420P] = {
  1576. [PIX_FMT_YUYV422] = {
  1577. .convert = yuv420p_to_yuyv422,
  1578. },
  1579. [PIX_FMT_RGB555] = {
  1580. .convert = yuv420p_to_rgb555
  1581. },
  1582. [PIX_FMT_RGB565] = {
  1583. .convert = yuv420p_to_rgb565
  1584. },
  1585. [PIX_FMT_BGR24] = {
  1586. .convert = yuv420p_to_bgr24
  1587. },
  1588. [PIX_FMT_RGB24] = {
  1589. .convert = yuv420p_to_rgb24
  1590. },
  1591. [PIX_FMT_RGB32] = {
  1592. .convert = yuv420p_to_rgb32
  1593. },
  1594. [PIX_FMT_UYVY422] = {
  1595. .convert = yuv420p_to_uyvy422,
  1596. },
  1597. },
  1598. [PIX_FMT_YUV422P] = {
  1599. [PIX_FMT_YUYV422] = {
  1600. .convert = yuv422p_to_yuyv422,
  1601. },
  1602. [PIX_FMT_UYVY422] = {
  1603. .convert = yuv422p_to_uyvy422,
  1604. },
  1605. },
  1606. [PIX_FMT_YUV444P] = {
  1607. [PIX_FMT_RGB24] = {
  1608. .convert = yuv444p_to_rgb24
  1609. },
  1610. },
  1611. [PIX_FMT_YUVJ420P] = {
  1612. [PIX_FMT_RGB555] = {
  1613. .convert = yuvj420p_to_rgb555
  1614. },
  1615. [PIX_FMT_RGB565] = {
  1616. .convert = yuvj420p_to_rgb565
  1617. },
  1618. [PIX_FMT_BGR24] = {
  1619. .convert = yuvj420p_to_bgr24
  1620. },
  1621. [PIX_FMT_RGB24] = {
  1622. .convert = yuvj420p_to_rgb24
  1623. },
  1624. [PIX_FMT_RGB32] = {
  1625. .convert = yuvj420p_to_rgb32
  1626. },
  1627. },
  1628. [PIX_FMT_YUVJ444P] = {
  1629. [PIX_FMT_RGB24] = {
  1630. .convert = yuvj444p_to_rgb24
  1631. },
  1632. },
  1633. [PIX_FMT_YUYV422] = {
  1634. [PIX_FMT_YUV420P] = {
  1635. .convert = yuyv422_to_yuv420p,
  1636. },
  1637. [PIX_FMT_YUV422P] = {
  1638. .convert = yuyv422_to_yuv422p,
  1639. },
  1640. },
  1641. [PIX_FMT_UYVY422] = {
  1642. [PIX_FMT_YUV420P] = {
  1643. .convert = uyvy422_to_yuv420p,
  1644. },
  1645. [PIX_FMT_YUV422P] = {
  1646. .convert = uyvy422_to_yuv422p,
  1647. },
  1648. },
  1649. [PIX_FMT_RGB24] = {
  1650. [PIX_FMT_YUV420P] = {
  1651. .convert = rgb24_to_yuv420p
  1652. },
  1653. [PIX_FMT_RGB565] = {
  1654. .convert = rgb24_to_rgb565
  1655. },
  1656. [PIX_FMT_RGB555] = {
  1657. .convert = rgb24_to_rgb555
  1658. },
  1659. [PIX_FMT_RGB32] = {
  1660. .convert = rgb24_to_rgb32
  1661. },
  1662. [PIX_FMT_BGR24] = {
  1663. .convert = rgb24_to_bgr24
  1664. },
  1665. [PIX_FMT_GRAY8] = {
  1666. .convert = rgb24_to_gray
  1667. },
  1668. [PIX_FMT_PAL8] = {
  1669. .convert = rgb24_to_pal8
  1670. },
  1671. [PIX_FMT_YUV444P] = {
  1672. .convert = rgb24_to_yuv444p
  1673. },
  1674. [PIX_FMT_YUVJ420P] = {
  1675. .convert = rgb24_to_yuvj420p
  1676. },
  1677. [PIX_FMT_YUVJ444P] = {
  1678. .convert = rgb24_to_yuvj444p
  1679. },
  1680. },
  1681. [PIX_FMT_RGB32] = {
  1682. [PIX_FMT_RGB24] = {
  1683. .convert = rgb32_to_rgb24
  1684. },
  1685. [PIX_FMT_BGR24] = {
  1686. .convert = rgb32_to_bgr24
  1687. },
  1688. [PIX_FMT_RGB565] = {
  1689. .convert = rgb32_to_rgb565
  1690. },
  1691. [PIX_FMT_RGB555] = {
  1692. .convert = rgb32_to_rgb555
  1693. },
  1694. [PIX_FMT_PAL8] = {
  1695. .convert = rgb32_to_pal8
  1696. },
  1697. [PIX_FMT_YUV420P] = {
  1698. .convert = rgb32_to_yuv420p
  1699. },
  1700. [PIX_FMT_GRAY8] = {
  1701. .convert = rgb32_to_gray
  1702. },
  1703. },
  1704. [PIX_FMT_BGR24] = {
  1705. [PIX_FMT_RGB32] = {
  1706. .convert = bgr24_to_rgb32
  1707. },
  1708. [PIX_FMT_RGB24] = {
  1709. .convert = bgr24_to_rgb24
  1710. },
  1711. [PIX_FMT_YUV420P] = {
  1712. .convert = bgr24_to_yuv420p
  1713. },
  1714. [PIX_FMT_GRAY8] = {
  1715. .convert = bgr24_to_gray
  1716. },
  1717. },
  1718. [PIX_FMT_RGB555] = {
  1719. [PIX_FMT_RGB24] = {
  1720. .convert = rgb555_to_rgb24
  1721. },
  1722. [PIX_FMT_RGB32] = {
  1723. .convert = rgb555_to_rgb32
  1724. },
  1725. [PIX_FMT_YUV420P] = {
  1726. .convert = rgb555_to_yuv420p
  1727. },
  1728. [PIX_FMT_GRAY8] = {
  1729. .convert = rgb555_to_gray
  1730. },
  1731. },
  1732. [PIX_FMT_RGB565] = {
  1733. [PIX_FMT_RGB32] = {
  1734. .convert = rgb565_to_rgb32
  1735. },
  1736. [PIX_FMT_RGB24] = {
  1737. .convert = rgb565_to_rgb24
  1738. },
  1739. [PIX_FMT_YUV420P] = {
  1740. .convert = rgb565_to_yuv420p
  1741. },
  1742. [PIX_FMT_GRAY8] = {
  1743. .convert = rgb565_to_gray
  1744. },
  1745. },
  1746. [PIX_FMT_GRAY16BE] = {
  1747. [PIX_FMT_GRAY8] = {
  1748. .convert = gray16be_to_gray
  1749. },
  1750. [PIX_FMT_GRAY16LE] = {
  1751. .convert = gray16_to_gray16
  1752. },
  1753. },
  1754. [PIX_FMT_GRAY16LE] = {
  1755. [PIX_FMT_GRAY8] = {
  1756. .convert = gray16le_to_gray
  1757. },
  1758. [PIX_FMT_GRAY16BE] = {
  1759. .convert = gray16_to_gray16
  1760. },
  1761. },
  1762. [PIX_FMT_GRAY8] = {
  1763. [PIX_FMT_RGB555] = {
  1764. .convert = gray_to_rgb555
  1765. },
  1766. [PIX_FMT_RGB565] = {
  1767. .convert = gray_to_rgb565
  1768. },
  1769. [PIX_FMT_RGB24] = {
  1770. .convert = gray_to_rgb24
  1771. },
  1772. [PIX_FMT_BGR24] = {
  1773. .convert = gray_to_bgr24
  1774. },
  1775. [PIX_FMT_RGB32] = {
  1776. .convert = gray_to_rgb32
  1777. },
  1778. [PIX_FMT_MONOWHITE] = {
  1779. .convert = gray_to_monowhite
  1780. },
  1781. [PIX_FMT_MONOBLACK] = {
  1782. .convert = gray_to_monoblack
  1783. },
  1784. [PIX_FMT_GRAY16LE] = {
  1785. .convert = gray_to_gray16
  1786. },
  1787. [PIX_FMT_GRAY16BE] = {
  1788. .convert = gray_to_gray16
  1789. },
  1790. },
  1791. [PIX_FMT_MONOWHITE] = {
  1792. [PIX_FMT_GRAY8] = {
  1793. .convert = monowhite_to_gray
  1794. },
  1795. },
  1796. [PIX_FMT_MONOBLACK] = {
  1797. [PIX_FMT_GRAY8] = {
  1798. .convert = monoblack_to_gray
  1799. },
  1800. },
  1801. [PIX_FMT_PAL8] = {
  1802. [PIX_FMT_RGB555] = {
  1803. .convert = pal8_to_rgb555
  1804. },
  1805. [PIX_FMT_RGB565] = {
  1806. .convert = pal8_to_rgb565
  1807. },
  1808. [PIX_FMT_BGR24] = {
  1809. .convert = pal8_to_bgr24
  1810. },
  1811. [PIX_FMT_RGB24] = {
  1812. .convert = pal8_to_rgb24
  1813. },
  1814. [PIX_FMT_RGB32] = {
  1815. .convert = pal8_to_rgb32
  1816. },
  1817. },
  1818. [PIX_FMT_UYYVYY411] = {
  1819. [PIX_FMT_YUV411P] = {
  1820. .convert = uyyvyy411_to_yuv411p,
  1821. },
  1822. },
  1823. };
  1824. int avpicture_alloc(AVPicture *picture,
  1825. int pix_fmt, int width, int height)
  1826. {
  1827. int size;
  1828. void *ptr;
  1829. size = avpicture_get_size(pix_fmt, width, height);
  1830. if(size<0)
  1831. goto fail;
  1832. ptr = av_malloc(size);
  1833. if (!ptr)
  1834. goto fail;
  1835. avpicture_fill(picture, ptr, pix_fmt, width, height);
  1836. return 0;
  1837. fail:
  1838. memset(picture, 0, sizeof(AVPicture));
  1839. return -1;
  1840. }
  1841. void avpicture_free(AVPicture *picture)
  1842. {
  1843. av_free(picture->data[0]);
  1844. }
  1845. /* return true if yuv planar */
  1846. static inline int is_yuv_planar(const PixFmtInfo *ps)
  1847. {
  1848. return (ps->color_type == FF_COLOR_YUV ||
  1849. ps->color_type == FF_COLOR_YUV_JPEG) &&
  1850. ps->pixel_type == FF_PIXEL_PLANAR;
  1851. }
  1852. int av_picture_crop(AVPicture *dst, const AVPicture *src,
  1853. int pix_fmt, int top_band, int left_band)
  1854. {
  1855. int y_shift;
  1856. int x_shift;
  1857. if (pix_fmt < 0 || pix_fmt >= PIX_FMT_NB || !is_yuv_planar(&pix_fmt_info[pix_fmt]))
  1858. return -1;
  1859. y_shift = pix_fmt_info[pix_fmt].y_chroma_shift;
  1860. x_shift = pix_fmt_info[pix_fmt].x_chroma_shift;
  1861. dst->data[0] = src->data[0] + (top_band * src->linesize[0]) + left_band;
  1862. dst->data[1] = src->data[1] + ((top_band >> y_shift) * src->linesize[1]) + (left_band >> x_shift);
  1863. dst->data[2] = src->data[2] + ((top_band >> y_shift) * src->linesize[2]) + (left_band >> x_shift);
  1864. dst->linesize[0] = src->linesize[0];
  1865. dst->linesize[1] = src->linesize[1];
  1866. dst->linesize[2] = src->linesize[2];
  1867. return 0;
  1868. }
  1869. int av_picture_pad(AVPicture *dst, const AVPicture *src, int height, int width,
  1870. int pix_fmt, int padtop, int padbottom, int padleft, int padright,
  1871. int *color)
  1872. {
  1873. uint8_t *optr;
  1874. int y_shift;
  1875. int x_shift;
  1876. int yheight;
  1877. int i, y;
  1878. if (pix_fmt < 0 || pix_fmt >= PIX_FMT_NB ||
  1879. !is_yuv_planar(&pix_fmt_info[pix_fmt])) return -1;
  1880. for (i = 0; i < 3; i++) {
  1881. x_shift = i ? pix_fmt_info[pix_fmt].x_chroma_shift : 0;
  1882. y_shift = i ? pix_fmt_info[pix_fmt].y_chroma_shift : 0;
  1883. if (padtop || padleft) {
  1884. memset(dst->data[i], color[i],
  1885. dst->linesize[i] * (padtop >> y_shift) + (padleft >> x_shift));
  1886. }
  1887. if (padleft || padright) {
  1888. optr = dst->data[i] + dst->linesize[i] * (padtop >> y_shift) +
  1889. (dst->linesize[i] - (padright >> x_shift));
  1890. yheight = (height - 1 - (padtop + padbottom)) >> y_shift;
  1891. for (y = 0; y < yheight; y++) {
  1892. memset(optr, color[i], (padleft + padright) >> x_shift);
  1893. optr += dst->linesize[i];
  1894. }
  1895. }
  1896. if (src) { /* first line */
  1897. uint8_t *iptr = src->data[i];
  1898. optr = dst->data[i] + dst->linesize[i] * (padtop >> y_shift) +
  1899. (padleft >> x_shift);
  1900. memcpy(optr, iptr, (width - padleft - padright) >> x_shift);
  1901. iptr += src->linesize[i];
  1902. optr = dst->data[i] + dst->linesize[i] * (padtop >> y_shift) +
  1903. (dst->linesize[i] - (padright >> x_shift));
  1904. yheight = (height - 1 - (padtop + padbottom)) >> y_shift;
  1905. for (y = 0; y < yheight; y++) {
  1906. memset(optr, color[i], (padleft + padright) >> x_shift);
  1907. memcpy(optr + ((padleft + padright) >> x_shift), iptr,
  1908. (width - padleft - padright) >> x_shift);
  1909. iptr += src->linesize[i];
  1910. optr += dst->linesize[i];
  1911. }
  1912. }
  1913. if (padbottom || padright) {
  1914. optr = dst->data[i] + dst->linesize[i] *
  1915. ((height - padbottom) >> y_shift) - (padright >> x_shift);
  1916. memset(optr, color[i],dst->linesize[i] *
  1917. (padbottom >> y_shift) + (padright >> x_shift));
  1918. }
  1919. }
  1920. return 0;
  1921. }
  1922. #if !CONFIG_SWSCALE
  1923. static uint8_t y_ccir_to_jpeg[256];
  1924. static uint8_t y_jpeg_to_ccir[256];
  1925. static uint8_t c_ccir_to_jpeg[256];
  1926. static uint8_t c_jpeg_to_ccir[256];
  1927. /* init various conversion tables */
  1928. static void img_convert_init(void)
  1929. {
  1930. int i;
  1931. uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
  1932. for(i = 0;i < 256; i++) {
  1933. y_ccir_to_jpeg[i] = Y_CCIR_TO_JPEG(i);
  1934. y_jpeg_to_ccir[i] = Y_JPEG_TO_CCIR(i);
  1935. c_ccir_to_jpeg[i] = C_CCIR_TO_JPEG(i);
  1936. c_jpeg_to_ccir[i] = C_JPEG_TO_CCIR(i);
  1937. }
  1938. }
  1939. /* apply to each pixel the given table */
  1940. static void img_apply_table(uint8_t *dst, int dst_wrap,
  1941. const uint8_t *src, int src_wrap,
  1942. int width, int height, const uint8_t *table1)
  1943. {
  1944. int n;
  1945. const uint8_t *s;
  1946. uint8_t *d;
  1947. const uint8_t *table;
  1948. table = table1;
  1949. for(;height > 0; height--) {
  1950. s = src;
  1951. d = dst;
  1952. n = width;
  1953. while (n >= 4) {
  1954. d[0] = table[s[0]];
  1955. d[1] = table[s[1]];
  1956. d[2] = table[s[2]];
  1957. d[3] = table[s[3]];
  1958. d += 4;
  1959. s += 4;
  1960. n -= 4;
  1961. }
  1962. while (n > 0) {
  1963. d[0] = table[s[0]];
  1964. d++;
  1965. s++;
  1966. n--;
  1967. }
  1968. dst += dst_wrap;
  1969. src += src_wrap;
  1970. }
  1971. }
  1972. /* XXX: use generic filter ? */
  1973. /* XXX: in most cases, the sampling position is incorrect */
  1974. /* 4x1 -> 1x1 */
  1975. static void shrink41(uint8_t *dst, int dst_wrap,
  1976. const uint8_t *src, int src_wrap,
  1977. int width, int height)
  1978. {
  1979. int w;
  1980. const uint8_t *s;
  1981. uint8_t *d;
  1982. for(;height > 0; height--) {
  1983. s = src;
  1984. d = dst;
  1985. for(w = width;w > 0; w--) {
  1986. d[0] = (s[0] + s[1] + s[2] + s[3] + 2) >> 2;
  1987. s += 4;
  1988. d++;
  1989. }
  1990. src += src_wrap;
  1991. dst += dst_wrap;
  1992. }
  1993. }
  1994. /* 2x1 -> 1x1 */
  1995. static void shrink21(uint8_t *dst, int dst_wrap,
  1996. const uint8_t *src, int src_wrap,
  1997. int width, int height)
  1998. {
  1999. int w;
  2000. const uint8_t *s;
  2001. uint8_t *d;
  2002. for(;height > 0; height--) {
  2003. s = src;
  2004. d = dst;
  2005. for(w = width;w > 0; w--) {
  2006. d[0] = (s[0] + s[1]) >> 1;
  2007. s += 2;
  2008. d++;
  2009. }
  2010. src += src_wrap;
  2011. dst += dst_wrap;
  2012. }
  2013. }
  2014. /* 1x2 -> 1x1 */
  2015. static void shrink12(uint8_t *dst, int dst_wrap,
  2016. const uint8_t *src, int src_wrap,
  2017. int width, int height)
  2018. {
  2019. int w;
  2020. uint8_t *d;
  2021. const uint8_t *s1, *s2;
  2022. for(;height > 0; height--) {
  2023. s1 = src;
  2024. s2 = s1 + src_wrap;
  2025. d = dst;
  2026. for(w = width;w >= 4; w-=4) {
  2027. d[0] = (s1[0] + s2[0]) >> 1;
  2028. d[1] = (s1[1] + s2[1]) >> 1;
  2029. d[2] = (s1[2] + s2[2]) >> 1;
  2030. d[3] = (s1[3] + s2[3]) >> 1;
  2031. s1 += 4;
  2032. s2 += 4;
  2033. d += 4;
  2034. }
  2035. for(;w > 0; w--) {
  2036. d[0] = (s1[0] + s2[0]) >> 1;
  2037. s1++;
  2038. s2++;
  2039. d++;
  2040. }
  2041. src += 2 * src_wrap;
  2042. dst += dst_wrap;
  2043. }
  2044. }
  2045. static void grow21_line(uint8_t *dst, const uint8_t *src,
  2046. int width)
  2047. {
  2048. int w;
  2049. const uint8_t *s1;
  2050. uint8_t *d;
  2051. s1 = src;
  2052. d = dst;
  2053. for(w = width;w >= 4; w-=4) {
  2054. d[1] = d[0] = s1[0];
  2055. d[3] = d[2] = s1[1];
  2056. s1 += 2;
  2057. d += 4;
  2058. }
  2059. for(;w >= 2; w -= 2) {
  2060. d[1] = d[0] = s1[0];
  2061. s1 ++;
  2062. d += 2;
  2063. }
  2064. /* only needed if width is not a multiple of two */
  2065. /* XXX: veryfy that */
  2066. if (w) {
  2067. d[0] = s1[0];
  2068. }
  2069. }
  2070. static void grow41_line(uint8_t *dst, const uint8_t *src,
  2071. int width)
  2072. {
  2073. int w, v;
  2074. const uint8_t *s1;
  2075. uint8_t *d;
  2076. s1 = src;
  2077. d = dst;
  2078. for(w = width;w >= 4; w-=4) {
  2079. v = s1[0];
  2080. d[0] = v;
  2081. d[1] = v;
  2082. d[2] = v;
  2083. d[3] = v;
  2084. s1 ++;
  2085. d += 4;
  2086. }
  2087. }
  2088. /* 1x1 -> 2x1 */
  2089. static void grow21(uint8_t *dst, int dst_wrap,
  2090. const uint8_t *src, int src_wrap,
  2091. int width, int height)
  2092. {
  2093. for(;height > 0; height--) {
  2094. grow21_line(dst, src, width);
  2095. src += src_wrap;
  2096. dst += dst_wrap;
  2097. }
  2098. }
  2099. /* 1x1 -> 1x2 */
  2100. static void grow12(uint8_t *dst, int dst_wrap,
  2101. const uint8_t *src, int src_wrap,
  2102. int width, int height)
  2103. {
  2104. for(;height > 0; height-=2) {
  2105. memcpy(dst, src, width);
  2106. dst += dst_wrap;
  2107. memcpy(dst, src, width);
  2108. dst += dst_wrap;
  2109. src += src_wrap;
  2110. }
  2111. }
  2112. /* 1x1 -> 2x2 */
  2113. static void grow22(uint8_t *dst, int dst_wrap,
  2114. const uint8_t *src, int src_wrap,
  2115. int width, int height)
  2116. {
  2117. for(;height > 0; height--) {
  2118. grow21_line(dst, src, width);
  2119. if (height%2)
  2120. src += src_wrap;
  2121. dst += dst_wrap;
  2122. }
  2123. }
  2124. /* 1x1 -> 4x1 */
  2125. static void grow41(uint8_t *dst, int dst_wrap,
  2126. const uint8_t *src, int src_wrap,
  2127. int width, int height)
  2128. {
  2129. for(;height > 0; height--) {
  2130. grow41_line(dst, src, width);
  2131. src += src_wrap;
  2132. dst += dst_wrap;
  2133. }
  2134. }
  2135. /* 1x1 -> 4x4 */
  2136. static void grow44(uint8_t *dst, int dst_wrap,
  2137. const uint8_t *src, int src_wrap,
  2138. int width, int height)
  2139. {
  2140. for(;height > 0; height--) {
  2141. grow41_line(dst, src, width);
  2142. if ((height & 3) == 1)
  2143. src += src_wrap;
  2144. dst += dst_wrap;
  2145. }
  2146. }
  2147. /* 1x2 -> 2x1 */
  2148. static void conv411(uint8_t *dst, int dst_wrap,
  2149. const uint8_t *src, int src_wrap,
  2150. int width, int height)
  2151. {
  2152. int w, c;
  2153. const uint8_t *s1, *s2;
  2154. uint8_t *d;
  2155. width>>=1;
  2156. for(;height > 0; height--) {
  2157. s1 = src;
  2158. s2 = src + src_wrap;
  2159. d = dst;
  2160. for(w = width;w > 0; w--) {
  2161. c = (s1[0] + s2[0]) >> 1;
  2162. d[0] = c;
  2163. d[1] = c;
  2164. s1++;
  2165. s2++;
  2166. d += 2;
  2167. }
  2168. src += src_wrap * 2;
  2169. dst += dst_wrap;
  2170. }
  2171. }
  2172. /* XXX: always use linesize. Return -1 if not supported */
  2173. int img_convert(AVPicture *dst, int dst_pix_fmt,
  2174. const AVPicture *src, int src_pix_fmt,
  2175. int src_width, int src_height)
  2176. {
  2177. static int initialized;
  2178. int i, ret, dst_width, dst_height, int_pix_fmt;
  2179. const PixFmtInfo *src_pix, *dst_pix;
  2180. const ConvertEntry *ce;
  2181. AVPicture tmp1, *tmp = &tmp1;
  2182. if (src_pix_fmt < 0 || src_pix_fmt >= PIX_FMT_NB ||
  2183. dst_pix_fmt < 0 || dst_pix_fmt >= PIX_FMT_NB)
  2184. return -1;
  2185. if (src_width <= 0 || src_height <= 0)
  2186. return 0;
  2187. if (!initialized) {
  2188. initialized = 1;
  2189. img_convert_init();
  2190. }
  2191. dst_width = src_width;
  2192. dst_height = src_height;
  2193. dst_pix = &pix_fmt_info[dst_pix_fmt];
  2194. src_pix = &pix_fmt_info[src_pix_fmt];
  2195. if (src_pix_fmt == dst_pix_fmt) {
  2196. /* no conversion needed: just copy */
  2197. av_picture_copy(dst, src, dst_pix_fmt, dst_width, dst_height);
  2198. return 0;
  2199. }
  2200. ce = &convert_table[src_pix_fmt][dst_pix_fmt];
  2201. if (ce->convert) {
  2202. /* specific conversion routine */
  2203. ce->convert(dst, src, dst_width, dst_height);
  2204. return 0;
  2205. }
  2206. /* gray to YUV */
  2207. if (is_yuv_planar(dst_pix) &&
  2208. src_pix_fmt == PIX_FMT_GRAY8) {
  2209. int w, h, y;
  2210. uint8_t *d;
  2211. if (dst_pix->color_type == FF_COLOR_YUV_JPEG) {
  2212. ff_img_copy_plane(dst->data[0], dst->linesize[0],
  2213. src->data[0], src->linesize[0],
  2214. dst_width, dst_height);
  2215. } else {
  2216. img_apply_table(dst->data[0], dst->linesize[0],
  2217. src->data[0], src->linesize[0],
  2218. dst_width, dst_height,
  2219. y_jpeg_to_ccir);
  2220. }
  2221. /* fill U and V with 128 */
  2222. w = dst_width;
  2223. h = dst_height;
  2224. w >>= dst_pix->x_chroma_shift;
  2225. h >>= dst_pix->y_chroma_shift;
  2226. for(i = 1; i <= 2; i++) {
  2227. d = dst->data[i];
  2228. for(y = 0; y< h; y++) {
  2229. memset(d, 128, w);
  2230. d += dst->linesize[i];
  2231. }
  2232. }
  2233. return 0;
  2234. }
  2235. /* YUV to gray */
  2236. if (is_yuv_planar(src_pix) &&
  2237. dst_pix_fmt == PIX_FMT_GRAY8) {
  2238. if (src_pix->color_type == FF_COLOR_YUV_JPEG) {
  2239. ff_img_copy_plane(dst->data[0], dst->linesize[0],
  2240. src->data[0], src->linesize[0],
  2241. dst_width, dst_height);
  2242. } else {
  2243. img_apply_table(dst->data[0], dst->linesize[0],
  2244. src->data[0], src->linesize[0],
  2245. dst_width, dst_height,
  2246. y_ccir_to_jpeg);
  2247. }
  2248. return 0;
  2249. }
  2250. /* YUV to YUV planar */
  2251. if (is_yuv_planar(dst_pix) && is_yuv_planar(src_pix)) {
  2252. int x_shift, y_shift, w, h, xy_shift;
  2253. void (*resize_func)(uint8_t *dst, int dst_wrap,
  2254. const uint8_t *src, int src_wrap,
  2255. int width, int height);
  2256. /* compute chroma size of the smallest dimensions */
  2257. w = dst_width;
  2258. h = dst_height;
  2259. if (dst_pix->x_chroma_shift >= src_pix->x_chroma_shift)
  2260. w >>= dst_pix->x_chroma_shift;
  2261. else
  2262. w >>= src_pix->x_chroma_shift;
  2263. if (dst_pix->y_chroma_shift >= src_pix->y_chroma_shift)
  2264. h >>= dst_pix->y_chroma_shift;
  2265. else
  2266. h >>= src_pix->y_chroma_shift;
  2267. x_shift = (dst_pix->x_chroma_shift - src_pix->x_chroma_shift);
  2268. y_shift = (dst_pix->y_chroma_shift - src_pix->y_chroma_shift);
  2269. xy_shift = ((x_shift & 0xf) << 4) | (y_shift & 0xf);
  2270. /* there must be filters for conversion at least from and to
  2271. YUV444 format */
  2272. switch(xy_shift) {
  2273. case 0x00:
  2274. resize_func = ff_img_copy_plane;
  2275. break;
  2276. case 0x10:
  2277. resize_func = shrink21;
  2278. break;
  2279. case 0x20:
  2280. resize_func = shrink41;
  2281. break;
  2282. case 0x01:
  2283. resize_func = shrink12;
  2284. break;
  2285. case 0x11:
  2286. resize_func = ff_shrink22;
  2287. break;
  2288. case 0x22:
  2289. resize_func = ff_shrink44;
  2290. break;
  2291. case 0xf0:
  2292. resize_func = grow21;
  2293. break;
  2294. case 0x0f:
  2295. resize_func = grow12;
  2296. break;
  2297. case 0xe0:
  2298. resize_func = grow41;
  2299. break;
  2300. case 0xff:
  2301. resize_func = grow22;
  2302. break;
  2303. case 0xee:
  2304. resize_func = grow44;
  2305. break;
  2306. case 0xf1:
  2307. resize_func = conv411;
  2308. break;
  2309. default:
  2310. /* currently not handled */
  2311. goto no_chroma_filter;
  2312. }
  2313. ff_img_copy_plane(dst->data[0], dst->linesize[0],
  2314. src->data[0], src->linesize[0],
  2315. dst_width, dst_height);
  2316. for(i = 1;i <= 2; i++)
  2317. resize_func(dst->data[i], dst->linesize[i],
  2318. src->data[i], src->linesize[i],
  2319. dst_width>>dst_pix->x_chroma_shift, dst_height>>dst_pix->y_chroma_shift);
  2320. /* if yuv color space conversion is needed, we do it here on
  2321. the destination image */
  2322. if (dst_pix->color_type != src_pix->color_type) {
  2323. const uint8_t *y_table, *c_table;
  2324. if (dst_pix->color_type == FF_COLOR_YUV) {
  2325. y_table = y_jpeg_to_ccir;
  2326. c_table = c_jpeg_to_ccir;
  2327. } else {
  2328. y_table = y_ccir_to_jpeg;
  2329. c_table = c_ccir_to_jpeg;
  2330. }
  2331. img_apply_table(dst->data[0], dst->linesize[0],
  2332. dst->data[0], dst->linesize[0],
  2333. dst_width, dst_height,
  2334. y_table);
  2335. for(i = 1;i <= 2; i++)
  2336. img_apply_table(dst->data[i], dst->linesize[i],
  2337. dst->data[i], dst->linesize[i],
  2338. dst_width>>dst_pix->x_chroma_shift,
  2339. dst_height>>dst_pix->y_chroma_shift,
  2340. c_table);
  2341. }
  2342. return 0;
  2343. }
  2344. no_chroma_filter:
  2345. /* try to use an intermediate format */
  2346. if (src_pix_fmt == PIX_FMT_YUYV422 ||
  2347. dst_pix_fmt == PIX_FMT_YUYV422) {
  2348. /* specific case: convert to YUV422P first */
  2349. int_pix_fmt = PIX_FMT_YUV422P;
  2350. } else if (src_pix_fmt == PIX_FMT_UYVY422 ||
  2351. dst_pix_fmt == PIX_FMT_UYVY422) {
  2352. /* specific case: convert to YUV422P first */
  2353. int_pix_fmt = PIX_FMT_YUV422P;
  2354. } else if (src_pix_fmt == PIX_FMT_UYYVYY411 ||
  2355. dst_pix_fmt == PIX_FMT_UYYVYY411) {
  2356. /* specific case: convert to YUV411P first */
  2357. int_pix_fmt = PIX_FMT_YUV411P;
  2358. } else if ((src_pix->color_type == FF_COLOR_GRAY &&
  2359. src_pix_fmt != PIX_FMT_GRAY8) ||
  2360. (dst_pix->color_type == FF_COLOR_GRAY &&
  2361. dst_pix_fmt != PIX_FMT_GRAY8)) {
  2362. /* gray8 is the normalized format */
  2363. int_pix_fmt = PIX_FMT_GRAY8;
  2364. } else if ((is_yuv_planar(src_pix) &&
  2365. src_pix_fmt != PIX_FMT_YUV444P &&
  2366. src_pix_fmt != PIX_FMT_YUVJ444P)) {
  2367. /* yuv444 is the normalized format */
  2368. if (src_pix->color_type == FF_COLOR_YUV_JPEG)
  2369. int_pix_fmt = PIX_FMT_YUVJ444P;
  2370. else
  2371. int_pix_fmt = PIX_FMT_YUV444P;
  2372. } else if ((is_yuv_planar(dst_pix) &&
  2373. dst_pix_fmt != PIX_FMT_YUV444P &&
  2374. dst_pix_fmt != PIX_FMT_YUVJ444P)) {
  2375. /* yuv444 is the normalized format */
  2376. if (dst_pix->color_type == FF_COLOR_YUV_JPEG)
  2377. int_pix_fmt = PIX_FMT_YUVJ444P;
  2378. else
  2379. int_pix_fmt = PIX_FMT_YUV444P;
  2380. } else {
  2381. /* the two formats are rgb or gray8 or yuv[j]444p */
  2382. if (src_pix->is_alpha && dst_pix->is_alpha)
  2383. int_pix_fmt = PIX_FMT_RGB32;
  2384. else
  2385. int_pix_fmt = PIX_FMT_RGB24;
  2386. }
  2387. if (src_pix_fmt == int_pix_fmt)
  2388. return -1;
  2389. if (avpicture_alloc(tmp, int_pix_fmt, dst_width, dst_height) < 0)
  2390. return -1;
  2391. ret = -1;
  2392. if (img_convert(tmp, int_pix_fmt,
  2393. src, src_pix_fmt, src_width, src_height) < 0)
  2394. goto fail1;
  2395. if (img_convert(dst, dst_pix_fmt,
  2396. tmp, int_pix_fmt, dst_width, dst_height) < 0)
  2397. goto fail1;
  2398. ret = 0;
  2399. fail1:
  2400. avpicture_free(tmp);
  2401. return ret;
  2402. }
  2403. #endif
  2404. /* NOTE: we scan all the pixels to have an exact information */
  2405. static int get_alpha_info_pal8(const AVPicture *src, int width, int height)
  2406. {
  2407. const unsigned char *p;
  2408. int src_wrap, ret, x, y;
  2409. unsigned int a;
  2410. uint32_t *palette = (uint32_t *)src->data[1];
  2411. p = src->data[0];
  2412. src_wrap = src->linesize[0] - width;
  2413. ret = 0;
  2414. for(y=0;y<height;y++) {
  2415. for(x=0;x<width;x++) {
  2416. a = palette[p[0]] >> 24;
  2417. if (a == 0x00) {
  2418. ret |= FF_ALPHA_TRANSP;
  2419. } else if (a != 0xff) {
  2420. ret |= FF_ALPHA_SEMI_TRANSP;
  2421. }
  2422. p++;
  2423. }
  2424. p += src_wrap;
  2425. }
  2426. return ret;
  2427. }
  2428. int img_get_alpha_info(const AVPicture *src,
  2429. int pix_fmt, int width, int height)
  2430. {
  2431. const PixFmtInfo *pf = &pix_fmt_info[pix_fmt];
  2432. int ret;
  2433. pf = &pix_fmt_info[pix_fmt];
  2434. /* no alpha can be represented in format */
  2435. if (!pf->is_alpha)
  2436. return 0;
  2437. switch(pix_fmt) {
  2438. case PIX_FMT_RGB32:
  2439. ret = get_alpha_info_rgb32(src, width, height);
  2440. break;
  2441. case PIX_FMT_PAL8:
  2442. ret = get_alpha_info_pal8(src, width, height);
  2443. break;
  2444. default:
  2445. /* we do not know, so everything is indicated */
  2446. ret = FF_ALPHA_TRANSP | FF_ALPHA_SEMI_TRANSP;
  2447. break;
  2448. }
  2449. return ret;
  2450. }
  2451. #if HAVE_MMX
  2452. #define DEINT_INPLACE_LINE_LUM \
  2453. movd_m2r(lum_m4[0],mm0);\
  2454. movd_m2r(lum_m3[0],mm1);\
  2455. movd_m2r(lum_m2[0],mm2);\
  2456. movd_m2r(lum_m1[0],mm3);\
  2457. movd_m2r(lum[0],mm4);\
  2458. punpcklbw_r2r(mm7,mm0);\
  2459. movd_r2m(mm2,lum_m4[0]);\
  2460. punpcklbw_r2r(mm7,mm1);\
  2461. punpcklbw_r2r(mm7,mm2);\
  2462. punpcklbw_r2r(mm7,mm3);\
  2463. punpcklbw_r2r(mm7,mm4);\
  2464. paddw_r2r(mm3,mm1);\
  2465. psllw_i2r(1,mm2);\
  2466. paddw_r2r(mm4,mm0);\
  2467. psllw_i2r(2,mm1);\
  2468. paddw_r2r(mm6,mm2);\
  2469. paddw_r2r(mm2,mm1);\
  2470. psubusw_r2r(mm0,mm1);\
  2471. psrlw_i2r(3,mm1);\
  2472. packuswb_r2r(mm7,mm1);\
  2473. movd_r2m(mm1,lum_m2[0]);
  2474. #define DEINT_LINE_LUM \
  2475. movd_m2r(lum_m4[0],mm0);\
  2476. movd_m2r(lum_m3[0],mm1);\
  2477. movd_m2r(lum_m2[0],mm2);\
  2478. movd_m2r(lum_m1[0],mm3);\
  2479. movd_m2r(lum[0],mm4);\
  2480. punpcklbw_r2r(mm7,mm0);\
  2481. punpcklbw_r2r(mm7,mm1);\
  2482. punpcklbw_r2r(mm7,mm2);\
  2483. punpcklbw_r2r(mm7,mm3);\
  2484. punpcklbw_r2r(mm7,mm4);\
  2485. paddw_r2r(mm3,mm1);\
  2486. psllw_i2r(1,mm2);\
  2487. paddw_r2r(mm4,mm0);\
  2488. psllw_i2r(2,mm1);\
  2489. paddw_r2r(mm6,mm2);\
  2490. paddw_r2r(mm2,mm1);\
  2491. psubusw_r2r(mm0,mm1);\
  2492. psrlw_i2r(3,mm1);\
  2493. packuswb_r2r(mm7,mm1);\
  2494. movd_r2m(mm1,dst[0]);
  2495. #endif
  2496. /* filter parameters: [-1 4 2 4 -1] // 8 */
  2497. static void deinterlace_line(uint8_t *dst,
  2498. const uint8_t *lum_m4, const uint8_t *lum_m3,
  2499. const uint8_t *lum_m2, const uint8_t *lum_m1,
  2500. const uint8_t *lum,
  2501. int size)
  2502. {
  2503. #if !HAVE_MMX
  2504. uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
  2505. int sum;
  2506. for(;size > 0;size--) {
  2507. sum = -lum_m4[0];
  2508. sum += lum_m3[0] << 2;
  2509. sum += lum_m2[0] << 1;
  2510. sum += lum_m1[0] << 2;
  2511. sum += -lum[0];
  2512. dst[0] = cm[(sum + 4) >> 3];
  2513. lum_m4++;
  2514. lum_m3++;
  2515. lum_m2++;
  2516. lum_m1++;
  2517. lum++;
  2518. dst++;
  2519. }
  2520. #else
  2521. {
  2522. pxor_r2r(mm7,mm7);
  2523. movq_m2r(ff_pw_4,mm6);
  2524. }
  2525. for (;size > 3; size-=4) {
  2526. DEINT_LINE_LUM
  2527. lum_m4+=4;
  2528. lum_m3+=4;
  2529. lum_m2+=4;
  2530. lum_m1+=4;
  2531. lum+=4;
  2532. dst+=4;
  2533. }
  2534. #endif
  2535. }
  2536. static void deinterlace_line_inplace(uint8_t *lum_m4, uint8_t *lum_m3, uint8_t *lum_m2, uint8_t *lum_m1, uint8_t *lum,
  2537. int size)
  2538. {
  2539. #if !HAVE_MMX
  2540. uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
  2541. int sum;
  2542. for(;size > 0;size--) {
  2543. sum = -lum_m4[0];
  2544. sum += lum_m3[0] << 2;
  2545. sum += lum_m2[0] << 1;
  2546. lum_m4[0]=lum_m2[0];
  2547. sum += lum_m1[0] << 2;
  2548. sum += -lum[0];
  2549. lum_m2[0] = cm[(sum + 4) >> 3];
  2550. lum_m4++;
  2551. lum_m3++;
  2552. lum_m2++;
  2553. lum_m1++;
  2554. lum++;
  2555. }
  2556. #else
  2557. {
  2558. pxor_r2r(mm7,mm7);
  2559. movq_m2r(ff_pw_4,mm6);
  2560. }
  2561. for (;size > 3; size-=4) {
  2562. DEINT_INPLACE_LINE_LUM
  2563. lum_m4+=4;
  2564. lum_m3+=4;
  2565. lum_m2+=4;
  2566. lum_m1+=4;
  2567. lum+=4;
  2568. }
  2569. #endif
  2570. }
  2571. /* deinterlacing : 2 temporal taps, 3 spatial taps linear filter. The
  2572. top field is copied as is, but the bottom field is deinterlaced
  2573. against the top field. */
  2574. static void deinterlace_bottom_field(uint8_t *dst, int dst_wrap,
  2575. const uint8_t *src1, int src_wrap,
  2576. int width, int height)
  2577. {
  2578. const uint8_t *src_m2, *src_m1, *src_0, *src_p1, *src_p2;
  2579. int y;
  2580. src_m2 = src1;
  2581. src_m1 = src1;
  2582. src_0=&src_m1[src_wrap];
  2583. src_p1=&src_0[src_wrap];
  2584. src_p2=&src_p1[src_wrap];
  2585. for(y=0;y<(height-2);y+=2) {
  2586. memcpy(dst,src_m1,width);
  2587. dst += dst_wrap;
  2588. deinterlace_line(dst,src_m2,src_m1,src_0,src_p1,src_p2,width);
  2589. src_m2 = src_0;
  2590. src_m1 = src_p1;
  2591. src_0 = src_p2;
  2592. src_p1 += 2*src_wrap;
  2593. src_p2 += 2*src_wrap;
  2594. dst += dst_wrap;
  2595. }
  2596. memcpy(dst,src_m1,width);
  2597. dst += dst_wrap;
  2598. /* do last line */
  2599. deinterlace_line(dst,src_m2,src_m1,src_0,src_0,src_0,width);
  2600. }
  2601. static void deinterlace_bottom_field_inplace(uint8_t *src1, int src_wrap,
  2602. int width, int height)
  2603. {
  2604. uint8_t *src_m1, *src_0, *src_p1, *src_p2;
  2605. int y;
  2606. uint8_t *buf;
  2607. buf = (uint8_t*)av_malloc(width);
  2608. src_m1 = src1;
  2609. memcpy(buf,src_m1,width);
  2610. src_0=&src_m1[src_wrap];
  2611. src_p1=&src_0[src_wrap];
  2612. src_p2=&src_p1[src_wrap];
  2613. for(y=0;y<(height-2);y+=2) {
  2614. deinterlace_line_inplace(buf,src_m1,src_0,src_p1,src_p2,width);
  2615. src_m1 = src_p1;
  2616. src_0 = src_p2;
  2617. src_p1 += 2*src_wrap;
  2618. src_p2 += 2*src_wrap;
  2619. }
  2620. /* do last line */
  2621. deinterlace_line_inplace(buf,src_m1,src_0,src_0,src_0,width);
  2622. av_free(buf);
  2623. }
  2624. int avpicture_deinterlace(AVPicture *dst, const AVPicture *src,
  2625. int pix_fmt, int width, int height)
  2626. {
  2627. int i;
  2628. if (pix_fmt != PIX_FMT_YUV420P &&
  2629. pix_fmt != PIX_FMT_YUV422P &&
  2630. pix_fmt != PIX_FMT_YUV444P &&
  2631. pix_fmt != PIX_FMT_YUV411P &&
  2632. pix_fmt != PIX_FMT_GRAY8)
  2633. return -1;
  2634. if ((width & 3) != 0 || (height & 3) != 0)
  2635. return -1;
  2636. for(i=0;i<3;i++) {
  2637. if (i == 1) {
  2638. switch(pix_fmt) {
  2639. case PIX_FMT_YUV420P:
  2640. width >>= 1;
  2641. height >>= 1;
  2642. break;
  2643. case PIX_FMT_YUV422P:
  2644. width >>= 1;
  2645. break;
  2646. case PIX_FMT_YUV411P:
  2647. width >>= 2;
  2648. break;
  2649. default:
  2650. break;
  2651. }
  2652. if (pix_fmt == PIX_FMT_GRAY8) {
  2653. break;
  2654. }
  2655. }
  2656. if (src == dst) {
  2657. deinterlace_bottom_field_inplace(dst->data[i], dst->linesize[i],
  2658. width, height);
  2659. } else {
  2660. deinterlace_bottom_field(dst->data[i],dst->linesize[i],
  2661. src->data[i], src->linesize[i],
  2662. width, height);
  2663. }
  2664. }
  2665. emms_c();
  2666. return 0;
  2667. }