You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

2902 lines
77KB

  1. /*
  2. * Misc image conversion routines
  3. * Copyright (c) 2001, 2002, 2003 Fabrice Bellard.
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. /**
  22. * @file imgconvert.c
  23. * misc image conversion routines
  24. */
  25. /* TODO:
  26. * - write 'ffimg' program to test all the image related stuff
  27. * - move all api to slice based system
  28. * - integrate deinterlacing, postprocessing and scaling in the conversion process
  29. */
  30. #include "avcodec.h"
  31. #include "dsputil.h"
  32. #include "colorspace.h"
  33. #ifdef HAVE_MMX
  34. #include "i386/mmx.h"
  35. #endif
  36. #define xglue(x, y) x ## y
  37. #define glue(x, y) xglue(x, y)
  38. #define FF_COLOR_RGB 0 /**< RGB color space */
  39. #define FF_COLOR_GRAY 1 /**< gray color space */
  40. #define FF_COLOR_YUV 2 /**< YUV color space. 16 <= Y <= 235, 16 <= U, V <= 240 */
  41. #define FF_COLOR_YUV_JPEG 3 /**< YUV color space. 0 <= Y <= 255, 0 <= U, V <= 255 */
  42. #define FF_PIXEL_PLANAR 0 /**< each channel has one component in AVPicture */
  43. #define FF_PIXEL_PACKED 1 /**< only one components containing all the channels */
  44. #define FF_PIXEL_PALETTE 2 /**< one components containing indexes for a palette */
  45. typedef struct PixFmtInfo {
  46. const char *name;
  47. uint8_t nb_channels; /**< number of channels (including alpha) */
  48. uint8_t color_type; /**< color type (see FF_COLOR_xxx constants) */
  49. uint8_t pixel_type; /**< pixel storage type (see FF_PIXEL_xxx constants) */
  50. uint8_t is_alpha : 1; /**< true if alpha can be specified */
  51. uint8_t x_chroma_shift; /**< X chroma subsampling factor is 2 ^ shift */
  52. uint8_t y_chroma_shift; /**< Y chroma subsampling factor is 2 ^ shift */
  53. uint8_t depth; /**< bit depth of the color components */
  54. } PixFmtInfo;
  55. /* this table gives more information about formats */
  56. static const PixFmtInfo pix_fmt_info[PIX_FMT_NB] = {
  57. /* YUV formats */
  58. [PIX_FMT_YUV420P] = {
  59. .name = "yuv420p",
  60. .nb_channels = 3,
  61. .color_type = FF_COLOR_YUV,
  62. .pixel_type = FF_PIXEL_PLANAR,
  63. .depth = 8,
  64. .x_chroma_shift = 1, .y_chroma_shift = 1,
  65. },
  66. [PIX_FMT_YUV422P] = {
  67. .name = "yuv422p",
  68. .nb_channels = 3,
  69. .color_type = FF_COLOR_YUV,
  70. .pixel_type = FF_PIXEL_PLANAR,
  71. .depth = 8,
  72. .x_chroma_shift = 1, .y_chroma_shift = 0,
  73. },
  74. [PIX_FMT_YUV444P] = {
  75. .name = "yuv444p",
  76. .nb_channels = 3,
  77. .color_type = FF_COLOR_YUV,
  78. .pixel_type = FF_PIXEL_PLANAR,
  79. .depth = 8,
  80. .x_chroma_shift = 0, .y_chroma_shift = 0,
  81. },
  82. [PIX_FMT_YUYV422] = {
  83. .name = "yuyv422",
  84. .nb_channels = 1,
  85. .color_type = FF_COLOR_YUV,
  86. .pixel_type = FF_PIXEL_PACKED,
  87. .depth = 8,
  88. .x_chroma_shift = 1, .y_chroma_shift = 0,
  89. },
  90. [PIX_FMT_UYVY422] = {
  91. .name = "uyvy422",
  92. .nb_channels = 1,
  93. .color_type = FF_COLOR_YUV,
  94. .pixel_type = FF_PIXEL_PACKED,
  95. .depth = 8,
  96. .x_chroma_shift = 1, .y_chroma_shift = 0,
  97. },
  98. [PIX_FMT_YUV410P] = {
  99. .name = "yuv410p",
  100. .nb_channels = 3,
  101. .color_type = FF_COLOR_YUV,
  102. .pixel_type = FF_PIXEL_PLANAR,
  103. .depth = 8,
  104. .x_chroma_shift = 2, .y_chroma_shift = 2,
  105. },
  106. [PIX_FMT_YUV411P] = {
  107. .name = "yuv411p",
  108. .nb_channels = 3,
  109. .color_type = FF_COLOR_YUV,
  110. .pixel_type = FF_PIXEL_PLANAR,
  111. .depth = 8,
  112. .x_chroma_shift = 2, .y_chroma_shift = 0,
  113. },
  114. [PIX_FMT_YUV440P] = {
  115. .name = "yuv440p",
  116. .nb_channels = 3,
  117. .color_type = FF_COLOR_YUV,
  118. .pixel_type = FF_PIXEL_PLANAR,
  119. .depth = 8,
  120. .x_chroma_shift = 0, .y_chroma_shift = 1,
  121. },
  122. /* YUV formats with alpha plane */
  123. [PIX_FMT_YUVA420P] = {
  124. .name = "yuva420p",
  125. .nb_channels = 4,
  126. .color_type = FF_COLOR_YUV,
  127. .pixel_type = FF_PIXEL_PLANAR,
  128. .depth = 8,
  129. .x_chroma_shift = 1, .y_chroma_shift = 1,
  130. },
  131. /* JPEG YUV */
  132. [PIX_FMT_YUVJ420P] = {
  133. .name = "yuvj420p",
  134. .nb_channels = 3,
  135. .color_type = FF_COLOR_YUV_JPEG,
  136. .pixel_type = FF_PIXEL_PLANAR,
  137. .depth = 8,
  138. .x_chroma_shift = 1, .y_chroma_shift = 1,
  139. },
  140. [PIX_FMT_YUVJ422P] = {
  141. .name = "yuvj422p",
  142. .nb_channels = 3,
  143. .color_type = FF_COLOR_YUV_JPEG,
  144. .pixel_type = FF_PIXEL_PLANAR,
  145. .depth = 8,
  146. .x_chroma_shift = 1, .y_chroma_shift = 0,
  147. },
  148. [PIX_FMT_YUVJ444P] = {
  149. .name = "yuvj444p",
  150. .nb_channels = 3,
  151. .color_type = FF_COLOR_YUV_JPEG,
  152. .pixel_type = FF_PIXEL_PLANAR,
  153. .depth = 8,
  154. .x_chroma_shift = 0, .y_chroma_shift = 0,
  155. },
  156. [PIX_FMT_YUVJ440P] = {
  157. .name = "yuvj440p",
  158. .nb_channels = 3,
  159. .color_type = FF_COLOR_YUV_JPEG,
  160. .pixel_type = FF_PIXEL_PLANAR,
  161. .depth = 8,
  162. .x_chroma_shift = 0, .y_chroma_shift = 1,
  163. },
  164. /* RGB formats */
  165. [PIX_FMT_RGB24] = {
  166. .name = "rgb24",
  167. .nb_channels = 3,
  168. .color_type = FF_COLOR_RGB,
  169. .pixel_type = FF_PIXEL_PACKED,
  170. .depth = 8,
  171. .x_chroma_shift = 0, .y_chroma_shift = 0,
  172. },
  173. [PIX_FMT_BGR24] = {
  174. .name = "bgr24",
  175. .nb_channels = 3,
  176. .color_type = FF_COLOR_RGB,
  177. .pixel_type = FF_PIXEL_PACKED,
  178. .depth = 8,
  179. .x_chroma_shift = 0, .y_chroma_shift = 0,
  180. },
  181. [PIX_FMT_RGB32] = {
  182. .name = "rgb32",
  183. .nb_channels = 4, .is_alpha = 1,
  184. .color_type = FF_COLOR_RGB,
  185. .pixel_type = FF_PIXEL_PACKED,
  186. .depth = 8,
  187. .x_chroma_shift = 0, .y_chroma_shift = 0,
  188. },
  189. [PIX_FMT_RGB565] = {
  190. .name = "rgb565",
  191. .nb_channels = 3,
  192. .color_type = FF_COLOR_RGB,
  193. .pixel_type = FF_PIXEL_PACKED,
  194. .depth = 5,
  195. .x_chroma_shift = 0, .y_chroma_shift = 0,
  196. },
  197. [PIX_FMT_RGB555] = {
  198. .name = "rgb555",
  199. .nb_channels = 3,
  200. .color_type = FF_COLOR_RGB,
  201. .pixel_type = FF_PIXEL_PACKED,
  202. .depth = 5,
  203. .x_chroma_shift = 0, .y_chroma_shift = 0,
  204. },
  205. /* gray / mono formats */
  206. [PIX_FMT_GRAY16BE] = {
  207. .name = "gray16be",
  208. .nb_channels = 1,
  209. .color_type = FF_COLOR_GRAY,
  210. .pixel_type = FF_PIXEL_PLANAR,
  211. .depth = 16,
  212. },
  213. [PIX_FMT_GRAY16LE] = {
  214. .name = "gray16le",
  215. .nb_channels = 1,
  216. .color_type = FF_COLOR_GRAY,
  217. .pixel_type = FF_PIXEL_PLANAR,
  218. .depth = 16,
  219. },
  220. [PIX_FMT_GRAY8] = {
  221. .name = "gray",
  222. .nb_channels = 1,
  223. .color_type = FF_COLOR_GRAY,
  224. .pixel_type = FF_PIXEL_PLANAR,
  225. .depth = 8,
  226. },
  227. [PIX_FMT_MONOWHITE] = {
  228. .name = "monow",
  229. .nb_channels = 1,
  230. .color_type = FF_COLOR_GRAY,
  231. .pixel_type = FF_PIXEL_PLANAR,
  232. .depth = 1,
  233. },
  234. [PIX_FMT_MONOBLACK] = {
  235. .name = "monob",
  236. .nb_channels = 1,
  237. .color_type = FF_COLOR_GRAY,
  238. .pixel_type = FF_PIXEL_PLANAR,
  239. .depth = 1,
  240. },
  241. /* paletted formats */
  242. [PIX_FMT_PAL8] = {
  243. .name = "pal8",
  244. .nb_channels = 4, .is_alpha = 1,
  245. .color_type = FF_COLOR_RGB,
  246. .pixel_type = FF_PIXEL_PALETTE,
  247. .depth = 8,
  248. },
  249. [PIX_FMT_XVMC_MPEG2_MC] = {
  250. .name = "xvmcmc",
  251. },
  252. [PIX_FMT_XVMC_MPEG2_IDCT] = {
  253. .name = "xvmcidct",
  254. },
  255. [PIX_FMT_UYYVYY411] = {
  256. .name = "uyyvyy411",
  257. .nb_channels = 1,
  258. .color_type = FF_COLOR_YUV,
  259. .pixel_type = FF_PIXEL_PACKED,
  260. .depth = 8,
  261. .x_chroma_shift = 2, .y_chroma_shift = 0,
  262. },
  263. [PIX_FMT_BGR32] = {
  264. .name = "bgr32",
  265. .nb_channels = 4, .is_alpha = 1,
  266. .color_type = FF_COLOR_RGB,
  267. .pixel_type = FF_PIXEL_PACKED,
  268. .depth = 8,
  269. .x_chroma_shift = 0, .y_chroma_shift = 0,
  270. },
  271. [PIX_FMT_BGR565] = {
  272. .name = "bgr565",
  273. .nb_channels = 3,
  274. .color_type = FF_COLOR_RGB,
  275. .pixel_type = FF_PIXEL_PACKED,
  276. .depth = 5,
  277. .x_chroma_shift = 0, .y_chroma_shift = 0,
  278. },
  279. [PIX_FMT_BGR555] = {
  280. .name = "bgr555",
  281. .nb_channels = 3,
  282. .color_type = FF_COLOR_RGB,
  283. .pixel_type = FF_PIXEL_PACKED,
  284. .depth = 5,
  285. .x_chroma_shift = 0, .y_chroma_shift = 0,
  286. },
  287. [PIX_FMT_RGB8] = {
  288. .name = "rgb8",
  289. .nb_channels = 1,
  290. .color_type = FF_COLOR_RGB,
  291. .pixel_type = FF_PIXEL_PACKED,
  292. .depth = 8,
  293. .x_chroma_shift = 0, .y_chroma_shift = 0,
  294. },
  295. [PIX_FMT_RGB4] = {
  296. .name = "rgb4",
  297. .nb_channels = 1,
  298. .color_type = FF_COLOR_RGB,
  299. .pixel_type = FF_PIXEL_PACKED,
  300. .depth = 4,
  301. .x_chroma_shift = 0, .y_chroma_shift = 0,
  302. },
  303. [PIX_FMT_RGB4_BYTE] = {
  304. .name = "rgb4_byte",
  305. .nb_channels = 1,
  306. .color_type = FF_COLOR_RGB,
  307. .pixel_type = FF_PIXEL_PACKED,
  308. .depth = 8,
  309. .x_chroma_shift = 0, .y_chroma_shift = 0,
  310. },
  311. [PIX_FMT_BGR8] = {
  312. .name = "bgr8",
  313. .nb_channels = 1,
  314. .color_type = FF_COLOR_RGB,
  315. .pixel_type = FF_PIXEL_PACKED,
  316. .depth = 8,
  317. .x_chroma_shift = 0, .y_chroma_shift = 0,
  318. },
  319. [PIX_FMT_BGR4] = {
  320. .name = "bgr4",
  321. .nb_channels = 1,
  322. .color_type = FF_COLOR_RGB,
  323. .pixel_type = FF_PIXEL_PACKED,
  324. .depth = 4,
  325. .x_chroma_shift = 0, .y_chroma_shift = 0,
  326. },
  327. [PIX_FMT_BGR4_BYTE] = {
  328. .name = "bgr4_byte",
  329. .nb_channels = 1,
  330. .color_type = FF_COLOR_RGB,
  331. .pixel_type = FF_PIXEL_PACKED,
  332. .depth = 8,
  333. .x_chroma_shift = 0, .y_chroma_shift = 0,
  334. },
  335. [PIX_FMT_NV12] = {
  336. .name = "nv12",
  337. .nb_channels = 2,
  338. .color_type = FF_COLOR_YUV,
  339. .pixel_type = FF_PIXEL_PLANAR,
  340. .depth = 8,
  341. .x_chroma_shift = 1, .y_chroma_shift = 1,
  342. },
  343. [PIX_FMT_NV21] = {
  344. .name = "nv12",
  345. .nb_channels = 2,
  346. .color_type = FF_COLOR_YUV,
  347. .pixel_type = FF_PIXEL_PLANAR,
  348. .depth = 8,
  349. .x_chroma_shift = 1, .y_chroma_shift = 1,
  350. },
  351. [PIX_FMT_BGR32_1] = {
  352. .name = "bgr32_1",
  353. .nb_channels = 4, .is_alpha = 1,
  354. .color_type = FF_COLOR_RGB,
  355. .pixel_type = FF_PIXEL_PACKED,
  356. .depth = 8,
  357. .x_chroma_shift = 0, .y_chroma_shift = 0,
  358. },
  359. [PIX_FMT_RGB32_1] = {
  360. .name = "rgb32_1",
  361. .nb_channels = 4, .is_alpha = 1,
  362. .color_type = FF_COLOR_RGB,
  363. .pixel_type = FF_PIXEL_PACKED,
  364. .depth = 8,
  365. .x_chroma_shift = 0, .y_chroma_shift = 0,
  366. },
  367. };
  368. void avcodec_get_chroma_sub_sample(int pix_fmt, int *h_shift, int *v_shift)
  369. {
  370. *h_shift = pix_fmt_info[pix_fmt].x_chroma_shift;
  371. *v_shift = pix_fmt_info[pix_fmt].y_chroma_shift;
  372. }
  373. const char *avcodec_get_pix_fmt_name(int pix_fmt)
  374. {
  375. if (pix_fmt < 0 || pix_fmt >= PIX_FMT_NB)
  376. return NULL;
  377. else
  378. return pix_fmt_info[pix_fmt].name;
  379. }
  380. enum PixelFormat avcodec_get_pix_fmt(const char* name)
  381. {
  382. int i;
  383. for (i=0; i < PIX_FMT_NB; i++)
  384. if (!strcmp(pix_fmt_info[i].name, name))
  385. return i;
  386. return PIX_FMT_NONE;
  387. }
  388. void avcodec_pix_fmt_string (char *buf, int buf_size, int pix_fmt)
  389. {
  390. /* print header */
  391. if (pix_fmt < 0)
  392. snprintf (buf, buf_size,
  393. "name " " nb_channels" " depth" " is_alpha"
  394. );
  395. else{
  396. PixFmtInfo info= pix_fmt_info[pix_fmt];
  397. char is_alpha_char= info.is_alpha ? 'y' : 'n';
  398. snprintf (buf, buf_size,
  399. "%-10s" " %1d " " %2d " " %c ",
  400. info.name,
  401. info.nb_channels,
  402. info.depth,
  403. is_alpha_char
  404. );
  405. }
  406. }
  407. int ff_fill_linesize(AVPicture *picture, int pix_fmt, int width)
  408. {
  409. int w2;
  410. const PixFmtInfo *pinfo;
  411. memset(picture->linesize, 0, sizeof(picture->linesize));
  412. pinfo = &pix_fmt_info[pix_fmt];
  413. switch(pix_fmt) {
  414. case PIX_FMT_YUV420P:
  415. case PIX_FMT_YUV422P:
  416. case PIX_FMT_YUV444P:
  417. case PIX_FMT_YUV410P:
  418. case PIX_FMT_YUV411P:
  419. case PIX_FMT_YUV440P:
  420. case PIX_FMT_YUVJ420P:
  421. case PIX_FMT_YUVJ422P:
  422. case PIX_FMT_YUVJ444P:
  423. case PIX_FMT_YUVJ440P:
  424. w2 = (width + (1 << pinfo->x_chroma_shift) - 1) >> pinfo->x_chroma_shift;
  425. picture->linesize[0] = width;
  426. picture->linesize[1] = w2;
  427. picture->linesize[2] = w2;
  428. break;
  429. case PIX_FMT_YUVA420P:
  430. w2 = (width + (1 << pinfo->x_chroma_shift) - 1) >> pinfo->x_chroma_shift;
  431. picture->linesize[0] = width;
  432. picture->linesize[1] = w2;
  433. picture->linesize[2] = w2;
  434. picture->linesize[3] = width;
  435. break;
  436. case PIX_FMT_NV12:
  437. case PIX_FMT_NV21:
  438. w2 = (width + (1 << pinfo->x_chroma_shift) - 1) >> pinfo->x_chroma_shift;
  439. picture->linesize[0] = width;
  440. picture->linesize[1] = w2;
  441. break;
  442. case PIX_FMT_RGB24:
  443. case PIX_FMT_BGR24:
  444. picture->linesize[0] = width * 3;
  445. break;
  446. case PIX_FMT_RGB32:
  447. case PIX_FMT_BGR32:
  448. case PIX_FMT_RGB32_1:
  449. case PIX_FMT_BGR32_1:
  450. picture->linesize[0] = width * 4;
  451. break;
  452. case PIX_FMT_GRAY16BE:
  453. case PIX_FMT_GRAY16LE:
  454. case PIX_FMT_BGR555:
  455. case PIX_FMT_BGR565:
  456. case PIX_FMT_RGB555:
  457. case PIX_FMT_RGB565:
  458. case PIX_FMT_YUYV422:
  459. picture->linesize[0] = width * 2;
  460. break;
  461. case PIX_FMT_UYVY422:
  462. picture->linesize[0] = width * 2;
  463. break;
  464. case PIX_FMT_UYYVYY411:
  465. picture->linesize[0] = width + width/2;
  466. break;
  467. case PIX_FMT_RGB8:
  468. case PIX_FMT_BGR8:
  469. case PIX_FMT_RGB4_BYTE:
  470. case PIX_FMT_BGR4_BYTE:
  471. case PIX_FMT_GRAY8:
  472. picture->linesize[0] = width;
  473. break;
  474. case PIX_FMT_RGB4:
  475. case PIX_FMT_BGR4:
  476. picture->linesize[0] = width / 2;
  477. break;
  478. case PIX_FMT_MONOWHITE:
  479. case PIX_FMT_MONOBLACK:
  480. picture->linesize[0] = (width + 7) >> 3;
  481. break;
  482. case PIX_FMT_PAL8:
  483. picture->linesize[0] = width;
  484. picture->linesize[1] = 4;
  485. break;
  486. default:
  487. return -1;
  488. }
  489. return 0;
  490. }
  491. int ff_fill_pointer(AVPicture *picture, uint8_t *ptr, int pix_fmt,
  492. int height)
  493. {
  494. int size, h2, size2;
  495. const PixFmtInfo *pinfo;
  496. pinfo = &pix_fmt_info[pix_fmt];
  497. size = picture->linesize[0] * height;
  498. switch(pix_fmt) {
  499. case PIX_FMT_YUV420P:
  500. case PIX_FMT_YUV422P:
  501. case PIX_FMT_YUV444P:
  502. case PIX_FMT_YUV410P:
  503. case PIX_FMT_YUV411P:
  504. case PIX_FMT_YUV440P:
  505. case PIX_FMT_YUVJ420P:
  506. case PIX_FMT_YUVJ422P:
  507. case PIX_FMT_YUVJ444P:
  508. case PIX_FMT_YUVJ440P:
  509. h2 = (height + (1 << pinfo->y_chroma_shift) - 1) >> pinfo->y_chroma_shift;
  510. size2 = picture->linesize[1] * h2;
  511. picture->data[0] = ptr;
  512. picture->data[1] = picture->data[0] + size;
  513. picture->data[2] = picture->data[1] + size2;
  514. picture->data[3] = NULL;
  515. return size + 2 * size2;
  516. case PIX_FMT_YUVA420P:
  517. h2 = (height + (1 << pinfo->y_chroma_shift) - 1) >> pinfo->y_chroma_shift;
  518. size2 = picture->linesize[1] * h2;
  519. picture->data[0] = ptr;
  520. picture->data[1] = picture->data[0] + size;
  521. picture->data[2] = picture->data[1] + size2;
  522. picture->data[3] = picture->data[1] + size2 + size2;
  523. return 2 * size + 2 * size2;
  524. case PIX_FMT_NV12:
  525. case PIX_FMT_NV21:
  526. h2 = (height + (1 << pinfo->y_chroma_shift) - 1) >> pinfo->y_chroma_shift;
  527. size2 = picture->linesize[1] * h2 * 2;
  528. picture->data[0] = ptr;
  529. picture->data[1] = picture->data[0] + size;
  530. picture->data[2] = NULL;
  531. picture->data[3] = NULL;
  532. return size + 2 * size2;
  533. case PIX_FMT_RGB24:
  534. case PIX_FMT_BGR24:
  535. case PIX_FMT_RGB32:
  536. case PIX_FMT_BGR32:
  537. case PIX_FMT_RGB32_1:
  538. case PIX_FMT_BGR32_1:
  539. case PIX_FMT_GRAY16BE:
  540. case PIX_FMT_GRAY16LE:
  541. case PIX_FMT_BGR555:
  542. case PIX_FMT_BGR565:
  543. case PIX_FMT_RGB555:
  544. case PIX_FMT_RGB565:
  545. case PIX_FMT_YUYV422:
  546. case PIX_FMT_UYVY422:
  547. case PIX_FMT_UYYVYY411:
  548. case PIX_FMT_RGB8:
  549. case PIX_FMT_BGR8:
  550. case PIX_FMT_RGB4_BYTE:
  551. case PIX_FMT_BGR4_BYTE:
  552. case PIX_FMT_GRAY8:
  553. case PIX_FMT_RGB4:
  554. case PIX_FMT_BGR4:
  555. case PIX_FMT_MONOWHITE:
  556. case PIX_FMT_MONOBLACK:
  557. picture->data[0] = ptr;
  558. picture->data[1] = NULL;
  559. picture->data[2] = NULL;
  560. picture->data[3] = NULL;
  561. return size;
  562. case PIX_FMT_PAL8:
  563. size2 = (size + 3) & ~3;
  564. picture->data[0] = ptr;
  565. picture->data[1] = ptr + size2; /* palette is stored here as 256 32 bit words */
  566. picture->data[2] = NULL;
  567. picture->data[3] = NULL;
  568. return size2 + 256 * 4;
  569. default:
  570. picture->data[0] = NULL;
  571. picture->data[1] = NULL;
  572. picture->data[2] = NULL;
  573. picture->data[3] = NULL;
  574. return -1;
  575. }
  576. }
  577. int avpicture_fill(AVPicture *picture, uint8_t *ptr,
  578. int pix_fmt, int width, int height)
  579. {
  580. if(avcodec_check_dimensions(NULL, width, height))
  581. return -1;
  582. if (ff_fill_linesize(picture, pix_fmt, width))
  583. return -1;
  584. return ff_fill_pointer(picture, ptr, pix_fmt, height);
  585. }
  586. int avpicture_layout(const AVPicture* src, int pix_fmt, int width, int height,
  587. unsigned char *dest, int dest_size)
  588. {
  589. const PixFmtInfo* pf = &pix_fmt_info[pix_fmt];
  590. int i, j, w, h, data_planes;
  591. const unsigned char* s;
  592. int size = avpicture_get_size(pix_fmt, width, height);
  593. if (size > dest_size || size < 0)
  594. return -1;
  595. if (pf->pixel_type == FF_PIXEL_PACKED || pf->pixel_type == FF_PIXEL_PALETTE) {
  596. if (pix_fmt == PIX_FMT_YUYV422 ||
  597. pix_fmt == PIX_FMT_UYVY422 ||
  598. pix_fmt == PIX_FMT_BGR565 ||
  599. pix_fmt == PIX_FMT_BGR555 ||
  600. pix_fmt == PIX_FMT_RGB565 ||
  601. pix_fmt == PIX_FMT_RGB555)
  602. w = width * 2;
  603. else if (pix_fmt == PIX_FMT_UYYVYY411)
  604. w = width + width/2;
  605. else if (pix_fmt == PIX_FMT_PAL8)
  606. w = width;
  607. else
  608. w = width * (pf->depth * pf->nb_channels / 8);
  609. data_planes = 1;
  610. h = height;
  611. } else {
  612. data_planes = pf->nb_channels;
  613. w = (width*pf->depth + 7)/8;
  614. h = height;
  615. }
  616. for (i=0; i<data_planes; i++) {
  617. if (i == 1) {
  618. w = width >> pf->x_chroma_shift;
  619. h = height >> pf->y_chroma_shift;
  620. }
  621. s = src->data[i];
  622. for(j=0; j<h; j++) {
  623. memcpy(dest, s, w);
  624. dest += w;
  625. s += src->linesize[i];
  626. }
  627. }
  628. if (pf->pixel_type == FF_PIXEL_PALETTE)
  629. memcpy((unsigned char *)(((size_t)dest + 3) & ~3), src->data[1], 256 * 4);
  630. return size;
  631. }
  632. int avpicture_get_size(int pix_fmt, int width, int height)
  633. {
  634. AVPicture dummy_pict;
  635. return avpicture_fill(&dummy_pict, NULL, pix_fmt, width, height);
  636. }
  637. int avcodec_get_pix_fmt_loss(int dst_pix_fmt, int src_pix_fmt,
  638. int has_alpha)
  639. {
  640. const PixFmtInfo *pf, *ps;
  641. int loss;
  642. ps = &pix_fmt_info[src_pix_fmt];
  643. pf = &pix_fmt_info[dst_pix_fmt];
  644. /* compute loss */
  645. loss = 0;
  646. pf = &pix_fmt_info[dst_pix_fmt];
  647. if (pf->depth < ps->depth ||
  648. (dst_pix_fmt == PIX_FMT_RGB555 && src_pix_fmt == PIX_FMT_RGB565))
  649. loss |= FF_LOSS_DEPTH;
  650. if (pf->x_chroma_shift > ps->x_chroma_shift ||
  651. pf->y_chroma_shift > ps->y_chroma_shift)
  652. loss |= FF_LOSS_RESOLUTION;
  653. switch(pf->color_type) {
  654. case FF_COLOR_RGB:
  655. if (ps->color_type != FF_COLOR_RGB &&
  656. ps->color_type != FF_COLOR_GRAY)
  657. loss |= FF_LOSS_COLORSPACE;
  658. break;
  659. case FF_COLOR_GRAY:
  660. if (ps->color_type != FF_COLOR_GRAY)
  661. loss |= FF_LOSS_COLORSPACE;
  662. break;
  663. case FF_COLOR_YUV:
  664. if (ps->color_type != FF_COLOR_YUV)
  665. loss |= FF_LOSS_COLORSPACE;
  666. break;
  667. case FF_COLOR_YUV_JPEG:
  668. if (ps->color_type != FF_COLOR_YUV_JPEG &&
  669. ps->color_type != FF_COLOR_YUV &&
  670. ps->color_type != FF_COLOR_GRAY)
  671. loss |= FF_LOSS_COLORSPACE;
  672. break;
  673. default:
  674. /* fail safe test */
  675. if (ps->color_type != pf->color_type)
  676. loss |= FF_LOSS_COLORSPACE;
  677. break;
  678. }
  679. if (pf->color_type == FF_COLOR_GRAY &&
  680. ps->color_type != FF_COLOR_GRAY)
  681. loss |= FF_LOSS_CHROMA;
  682. if (!pf->is_alpha && (ps->is_alpha && has_alpha))
  683. loss |= FF_LOSS_ALPHA;
  684. if (pf->pixel_type == FF_PIXEL_PALETTE &&
  685. (ps->pixel_type != FF_PIXEL_PALETTE && ps->color_type != FF_COLOR_GRAY))
  686. loss |= FF_LOSS_COLORQUANT;
  687. return loss;
  688. }
  689. static int avg_bits_per_pixel(int pix_fmt)
  690. {
  691. int bits;
  692. const PixFmtInfo *pf;
  693. pf = &pix_fmt_info[pix_fmt];
  694. switch(pf->pixel_type) {
  695. case FF_PIXEL_PACKED:
  696. switch(pix_fmt) {
  697. case PIX_FMT_YUYV422:
  698. case PIX_FMT_UYVY422:
  699. case PIX_FMT_RGB565:
  700. case PIX_FMT_RGB555:
  701. case PIX_FMT_BGR565:
  702. case PIX_FMT_BGR555:
  703. bits = 16;
  704. break;
  705. case PIX_FMT_UYYVYY411:
  706. bits = 12;
  707. break;
  708. default:
  709. bits = pf->depth * pf->nb_channels;
  710. break;
  711. }
  712. break;
  713. case FF_PIXEL_PLANAR:
  714. if (pf->x_chroma_shift == 0 && pf->y_chroma_shift == 0) {
  715. bits = pf->depth * pf->nb_channels;
  716. } else {
  717. bits = pf->depth + ((2 * pf->depth) >>
  718. (pf->x_chroma_shift + pf->y_chroma_shift));
  719. }
  720. break;
  721. case FF_PIXEL_PALETTE:
  722. bits = 8;
  723. break;
  724. default:
  725. bits = -1;
  726. break;
  727. }
  728. return bits;
  729. }
  730. static int avcodec_find_best_pix_fmt1(int64_t pix_fmt_mask,
  731. int src_pix_fmt,
  732. int has_alpha,
  733. int loss_mask)
  734. {
  735. int dist, i, loss, min_dist, dst_pix_fmt;
  736. /* find exact color match with smallest size */
  737. dst_pix_fmt = -1;
  738. min_dist = 0x7fffffff;
  739. for(i = 0;i < PIX_FMT_NB; i++) {
  740. if (pix_fmt_mask & (1 << i)) {
  741. loss = avcodec_get_pix_fmt_loss(i, src_pix_fmt, has_alpha) & loss_mask;
  742. if (loss == 0) {
  743. dist = avg_bits_per_pixel(i);
  744. if (dist < min_dist) {
  745. min_dist = dist;
  746. dst_pix_fmt = i;
  747. }
  748. }
  749. }
  750. }
  751. return dst_pix_fmt;
  752. }
  753. int avcodec_find_best_pix_fmt(int64_t pix_fmt_mask, int src_pix_fmt,
  754. int has_alpha, int *loss_ptr)
  755. {
  756. int dst_pix_fmt, loss_mask, i;
  757. static const int loss_mask_order[] = {
  758. ~0, /* no loss first */
  759. ~FF_LOSS_ALPHA,
  760. ~FF_LOSS_RESOLUTION,
  761. ~(FF_LOSS_COLORSPACE | FF_LOSS_RESOLUTION),
  762. ~FF_LOSS_COLORQUANT,
  763. ~FF_LOSS_DEPTH,
  764. 0,
  765. };
  766. /* try with successive loss */
  767. i = 0;
  768. for(;;) {
  769. loss_mask = loss_mask_order[i++];
  770. dst_pix_fmt = avcodec_find_best_pix_fmt1(pix_fmt_mask, src_pix_fmt,
  771. has_alpha, loss_mask);
  772. if (dst_pix_fmt >= 0)
  773. goto found;
  774. if (loss_mask == 0)
  775. break;
  776. }
  777. return -1;
  778. found:
  779. if (loss_ptr)
  780. *loss_ptr = avcodec_get_pix_fmt_loss(dst_pix_fmt, src_pix_fmt, has_alpha);
  781. return dst_pix_fmt;
  782. }
  783. void ff_img_copy_plane(uint8_t *dst, int dst_wrap,
  784. const uint8_t *src, int src_wrap,
  785. int width, int height)
  786. {
  787. if((!dst) || (!src))
  788. return;
  789. for(;height > 0; height--) {
  790. memcpy(dst, src, width);
  791. dst += dst_wrap;
  792. src += src_wrap;
  793. }
  794. }
  795. int ff_get_plane_bytewidth(enum PixelFormat pix_fmt, int width, int plane)
  796. {
  797. int bits;
  798. const PixFmtInfo *pf = &pix_fmt_info[pix_fmt];
  799. pf = &pix_fmt_info[pix_fmt];
  800. switch(pf->pixel_type) {
  801. case FF_PIXEL_PACKED:
  802. switch(pix_fmt) {
  803. case PIX_FMT_YUYV422:
  804. case PIX_FMT_UYVY422:
  805. case PIX_FMT_RGB565:
  806. case PIX_FMT_RGB555:
  807. case PIX_FMT_BGR565:
  808. case PIX_FMT_BGR555:
  809. bits = 16;
  810. break;
  811. case PIX_FMT_UYYVYY411:
  812. bits = 12;
  813. break;
  814. default:
  815. bits = pf->depth * pf->nb_channels;
  816. break;
  817. }
  818. return (width * bits + 7) >> 3;
  819. break;
  820. case FF_PIXEL_PLANAR:
  821. if (plane == 1 || plane == 2)
  822. width= -((-width)>>pf->x_chroma_shift);
  823. return (width * pf->depth + 7) >> 3;
  824. break;
  825. case FF_PIXEL_PALETTE:
  826. if (plane == 0)
  827. return width;
  828. break;
  829. }
  830. return -1;
  831. }
  832. void av_picture_copy(AVPicture *dst, const AVPicture *src,
  833. int pix_fmt, int width, int height)
  834. {
  835. int i;
  836. const PixFmtInfo *pf = &pix_fmt_info[pix_fmt];
  837. pf = &pix_fmt_info[pix_fmt];
  838. switch(pf->pixel_type) {
  839. case FF_PIXEL_PACKED:
  840. case FF_PIXEL_PLANAR:
  841. for(i = 0; i < pf->nb_channels; i++) {
  842. int h;
  843. int bwidth = ff_get_plane_bytewidth(pix_fmt, width, i);
  844. h = height;
  845. if (i == 1 || i == 2) {
  846. h= -((-height)>>pf->y_chroma_shift);
  847. }
  848. ff_img_copy_plane(dst->data[i], dst->linesize[i],
  849. src->data[i], src->linesize[i],
  850. bwidth, h);
  851. }
  852. break;
  853. case FF_PIXEL_PALETTE:
  854. ff_img_copy_plane(dst->data[0], dst->linesize[0],
  855. src->data[0], src->linesize[0],
  856. width, height);
  857. /* copy the palette */
  858. ff_img_copy_plane(dst->data[1], dst->linesize[1],
  859. src->data[1], src->linesize[1],
  860. 4, 256);
  861. break;
  862. }
  863. }
  864. /* XXX: totally non optimized */
  865. static void yuyv422_to_yuv420p(AVPicture *dst, const AVPicture *src,
  866. int width, int height)
  867. {
  868. const uint8_t *p, *p1;
  869. uint8_t *lum, *cr, *cb, *lum1, *cr1, *cb1;
  870. int w;
  871. p1 = src->data[0];
  872. lum1 = dst->data[0];
  873. cb1 = dst->data[1];
  874. cr1 = dst->data[2];
  875. for(;height >= 1; height -= 2) {
  876. p = p1;
  877. lum = lum1;
  878. cb = cb1;
  879. cr = cr1;
  880. for(w = width; w >= 2; w -= 2) {
  881. lum[0] = p[0];
  882. cb[0] = p[1];
  883. lum[1] = p[2];
  884. cr[0] = p[3];
  885. p += 4;
  886. lum += 2;
  887. cb++;
  888. cr++;
  889. }
  890. if (w) {
  891. lum[0] = p[0];
  892. cb[0] = p[1];
  893. cr[0] = p[3];
  894. cb++;
  895. cr++;
  896. }
  897. p1 += src->linesize[0];
  898. lum1 += dst->linesize[0];
  899. if (height>1) {
  900. p = p1;
  901. lum = lum1;
  902. for(w = width; w >= 2; w -= 2) {
  903. lum[0] = p[0];
  904. lum[1] = p[2];
  905. p += 4;
  906. lum += 2;
  907. }
  908. if (w) {
  909. lum[0] = p[0];
  910. }
  911. p1 += src->linesize[0];
  912. lum1 += dst->linesize[0];
  913. }
  914. cb1 += dst->linesize[1];
  915. cr1 += dst->linesize[2];
  916. }
  917. }
  918. static void uyvy422_to_yuv420p(AVPicture *dst, const AVPicture *src,
  919. int width, int height)
  920. {
  921. const uint8_t *p, *p1;
  922. uint8_t *lum, *cr, *cb, *lum1, *cr1, *cb1;
  923. int w;
  924. p1 = src->data[0];
  925. lum1 = dst->data[0];
  926. cb1 = dst->data[1];
  927. cr1 = dst->data[2];
  928. for(;height >= 1; height -= 2) {
  929. p = p1;
  930. lum = lum1;
  931. cb = cb1;
  932. cr = cr1;
  933. for(w = width; w >= 2; w -= 2) {
  934. lum[0] = p[1];
  935. cb[0] = p[0];
  936. lum[1] = p[3];
  937. cr[0] = p[2];
  938. p += 4;
  939. lum += 2;
  940. cb++;
  941. cr++;
  942. }
  943. if (w) {
  944. lum[0] = p[1];
  945. cb[0] = p[0];
  946. cr[0] = p[2];
  947. cb++;
  948. cr++;
  949. }
  950. p1 += src->linesize[0];
  951. lum1 += dst->linesize[0];
  952. if (height>1) {
  953. p = p1;
  954. lum = lum1;
  955. for(w = width; w >= 2; w -= 2) {
  956. lum[0] = p[1];
  957. lum[1] = p[3];
  958. p += 4;
  959. lum += 2;
  960. }
  961. if (w) {
  962. lum[0] = p[1];
  963. }
  964. p1 += src->linesize[0];
  965. lum1 += dst->linesize[0];
  966. }
  967. cb1 += dst->linesize[1];
  968. cr1 += dst->linesize[2];
  969. }
  970. }
  971. static void uyvy422_to_yuv422p(AVPicture *dst, const AVPicture *src,
  972. int width, int height)
  973. {
  974. const uint8_t *p, *p1;
  975. uint8_t *lum, *cr, *cb, *lum1, *cr1, *cb1;
  976. int w;
  977. p1 = src->data[0];
  978. lum1 = dst->data[0];
  979. cb1 = dst->data[1];
  980. cr1 = dst->data[2];
  981. for(;height > 0; height--) {
  982. p = p1;
  983. lum = lum1;
  984. cb = cb1;
  985. cr = cr1;
  986. for(w = width; w >= 2; w -= 2) {
  987. lum[0] = p[1];
  988. cb[0] = p[0];
  989. lum[1] = p[3];
  990. cr[0] = p[2];
  991. p += 4;
  992. lum += 2;
  993. cb++;
  994. cr++;
  995. }
  996. p1 += src->linesize[0];
  997. lum1 += dst->linesize[0];
  998. cb1 += dst->linesize[1];
  999. cr1 += dst->linesize[2];
  1000. }
  1001. }
  1002. static void yuyv422_to_yuv422p(AVPicture *dst, const AVPicture *src,
  1003. int width, int height)
  1004. {
  1005. const uint8_t *p, *p1;
  1006. uint8_t *lum, *cr, *cb, *lum1, *cr1, *cb1;
  1007. int w;
  1008. p1 = src->data[0];
  1009. lum1 = dst->data[0];
  1010. cb1 = dst->data[1];
  1011. cr1 = dst->data[2];
  1012. for(;height > 0; height--) {
  1013. p = p1;
  1014. lum = lum1;
  1015. cb = cb1;
  1016. cr = cr1;
  1017. for(w = width; w >= 2; w -= 2) {
  1018. lum[0] = p[0];
  1019. cb[0] = p[1];
  1020. lum[1] = p[2];
  1021. cr[0] = p[3];
  1022. p += 4;
  1023. lum += 2;
  1024. cb++;
  1025. cr++;
  1026. }
  1027. p1 += src->linesize[0];
  1028. lum1 += dst->linesize[0];
  1029. cb1 += dst->linesize[1];
  1030. cr1 += dst->linesize[2];
  1031. }
  1032. }
  1033. static void yuv422p_to_yuyv422(AVPicture *dst, const AVPicture *src,
  1034. int width, int height)
  1035. {
  1036. uint8_t *p, *p1;
  1037. const uint8_t *lum, *cr, *cb, *lum1, *cr1, *cb1;
  1038. int w;
  1039. p1 = dst->data[0];
  1040. lum1 = src->data[0];
  1041. cb1 = src->data[1];
  1042. cr1 = src->data[2];
  1043. for(;height > 0; height--) {
  1044. p = p1;
  1045. lum = lum1;
  1046. cb = cb1;
  1047. cr = cr1;
  1048. for(w = width; w >= 2; w -= 2) {
  1049. p[0] = lum[0];
  1050. p[1] = cb[0];
  1051. p[2] = lum[1];
  1052. p[3] = cr[0];
  1053. p += 4;
  1054. lum += 2;
  1055. cb++;
  1056. cr++;
  1057. }
  1058. p1 += dst->linesize[0];
  1059. lum1 += src->linesize[0];
  1060. cb1 += src->linesize[1];
  1061. cr1 += src->linesize[2];
  1062. }
  1063. }
  1064. static void yuv422p_to_uyvy422(AVPicture *dst, const AVPicture *src,
  1065. int width, int height)
  1066. {
  1067. uint8_t *p, *p1;
  1068. const uint8_t *lum, *cr, *cb, *lum1, *cr1, *cb1;
  1069. int w;
  1070. p1 = dst->data[0];
  1071. lum1 = src->data[0];
  1072. cb1 = src->data[1];
  1073. cr1 = src->data[2];
  1074. for(;height > 0; height--) {
  1075. p = p1;
  1076. lum = lum1;
  1077. cb = cb1;
  1078. cr = cr1;
  1079. for(w = width; w >= 2; w -= 2) {
  1080. p[1] = lum[0];
  1081. p[0] = cb[0];
  1082. p[3] = lum[1];
  1083. p[2] = cr[0];
  1084. p += 4;
  1085. lum += 2;
  1086. cb++;
  1087. cr++;
  1088. }
  1089. p1 += dst->linesize[0];
  1090. lum1 += src->linesize[0];
  1091. cb1 += src->linesize[1];
  1092. cr1 += src->linesize[2];
  1093. }
  1094. }
  1095. static void uyyvyy411_to_yuv411p(AVPicture *dst, const AVPicture *src,
  1096. int width, int height)
  1097. {
  1098. const uint8_t *p, *p1;
  1099. uint8_t *lum, *cr, *cb, *lum1, *cr1, *cb1;
  1100. int w;
  1101. p1 = src->data[0];
  1102. lum1 = dst->data[0];
  1103. cb1 = dst->data[1];
  1104. cr1 = dst->data[2];
  1105. for(;height > 0; height--) {
  1106. p = p1;
  1107. lum = lum1;
  1108. cb = cb1;
  1109. cr = cr1;
  1110. for(w = width; w >= 4; w -= 4) {
  1111. cb[0] = p[0];
  1112. lum[0] = p[1];
  1113. lum[1] = p[2];
  1114. cr[0] = p[3];
  1115. lum[2] = p[4];
  1116. lum[3] = p[5];
  1117. p += 6;
  1118. lum += 4;
  1119. cb++;
  1120. cr++;
  1121. }
  1122. p1 += src->linesize[0];
  1123. lum1 += dst->linesize[0];
  1124. cb1 += dst->linesize[1];
  1125. cr1 += dst->linesize[2];
  1126. }
  1127. }
  1128. static void yuv420p_to_yuyv422(AVPicture *dst, const AVPicture *src,
  1129. int width, int height)
  1130. {
  1131. int w, h;
  1132. uint8_t *line1, *line2, *linesrc = dst->data[0];
  1133. uint8_t *lum1, *lum2, *lumsrc = src->data[0];
  1134. uint8_t *cb1, *cb2 = src->data[1];
  1135. uint8_t *cr1, *cr2 = src->data[2];
  1136. for(h = height / 2; h--;) {
  1137. line1 = linesrc;
  1138. line2 = linesrc + dst->linesize[0];
  1139. lum1 = lumsrc;
  1140. lum2 = lumsrc + src->linesize[0];
  1141. cb1 = cb2;
  1142. cr1 = cr2;
  1143. for(w = width / 2; w--;) {
  1144. *line1++ = *lum1++; *line2++ = *lum2++;
  1145. *line1++ = *line2++ = *cb1++;
  1146. *line1++ = *lum1++; *line2++ = *lum2++;
  1147. *line1++ = *line2++ = *cr1++;
  1148. }
  1149. linesrc += dst->linesize[0] * 2;
  1150. lumsrc += src->linesize[0] * 2;
  1151. cb2 += src->linesize[1];
  1152. cr2 += src->linesize[2];
  1153. }
  1154. }
  1155. static void yuv420p_to_uyvy422(AVPicture *dst, const AVPicture *src,
  1156. int width, int height)
  1157. {
  1158. int w, h;
  1159. uint8_t *line1, *line2, *linesrc = dst->data[0];
  1160. uint8_t *lum1, *lum2, *lumsrc = src->data[0];
  1161. uint8_t *cb1, *cb2 = src->data[1];
  1162. uint8_t *cr1, *cr2 = src->data[2];
  1163. for(h = height / 2; h--;) {
  1164. line1 = linesrc;
  1165. line2 = linesrc + dst->linesize[0];
  1166. lum1 = lumsrc;
  1167. lum2 = lumsrc + src->linesize[0];
  1168. cb1 = cb2;
  1169. cr1 = cr2;
  1170. for(w = width / 2; w--;) {
  1171. *line1++ = *line2++ = *cb1++;
  1172. *line1++ = *lum1++; *line2++ = *lum2++;
  1173. *line1++ = *line2++ = *cr1++;
  1174. *line1++ = *lum1++; *line2++ = *lum2++;
  1175. }
  1176. linesrc += dst->linesize[0] * 2;
  1177. lumsrc += src->linesize[0] * 2;
  1178. cb2 += src->linesize[1];
  1179. cr2 += src->linesize[2];
  1180. }
  1181. }
  1182. /* 2x2 -> 1x1 */
  1183. void ff_shrink22(uint8_t *dst, int dst_wrap,
  1184. const uint8_t *src, int src_wrap,
  1185. int width, int height)
  1186. {
  1187. int w;
  1188. const uint8_t *s1, *s2;
  1189. uint8_t *d;
  1190. for(;height > 0; height--) {
  1191. s1 = src;
  1192. s2 = s1 + src_wrap;
  1193. d = dst;
  1194. for(w = width;w >= 4; w-=4) {
  1195. d[0] = (s1[0] + s1[1] + s2[0] + s2[1] + 2) >> 2;
  1196. d[1] = (s1[2] + s1[3] + s2[2] + s2[3] + 2) >> 2;
  1197. d[2] = (s1[4] + s1[5] + s2[4] + s2[5] + 2) >> 2;
  1198. d[3] = (s1[6] + s1[7] + s2[6] + s2[7] + 2) >> 2;
  1199. s1 += 8;
  1200. s2 += 8;
  1201. d += 4;
  1202. }
  1203. for(;w > 0; w--) {
  1204. d[0] = (s1[0] + s1[1] + s2[0] + s2[1] + 2) >> 2;
  1205. s1 += 2;
  1206. s2 += 2;
  1207. d++;
  1208. }
  1209. src += 2 * src_wrap;
  1210. dst += dst_wrap;
  1211. }
  1212. }
  1213. /* 4x4 -> 1x1 */
  1214. void ff_shrink44(uint8_t *dst, int dst_wrap,
  1215. const uint8_t *src, int src_wrap,
  1216. int width, int height)
  1217. {
  1218. int w;
  1219. const uint8_t *s1, *s2, *s3, *s4;
  1220. uint8_t *d;
  1221. for(;height > 0; height--) {
  1222. s1 = src;
  1223. s2 = s1 + src_wrap;
  1224. s3 = s2 + src_wrap;
  1225. s4 = s3 + src_wrap;
  1226. d = dst;
  1227. for(w = width;w > 0; w--) {
  1228. d[0] = (s1[0] + s1[1] + s1[2] + s1[3] +
  1229. s2[0] + s2[1] + s2[2] + s2[3] +
  1230. s3[0] + s3[1] + s3[2] + s3[3] +
  1231. s4[0] + s4[1] + s4[2] + s4[3] + 8) >> 4;
  1232. s1 += 4;
  1233. s2 += 4;
  1234. s3 += 4;
  1235. s4 += 4;
  1236. d++;
  1237. }
  1238. src += 4 * src_wrap;
  1239. dst += dst_wrap;
  1240. }
  1241. }
  1242. /* 8x8 -> 1x1 */
  1243. void ff_shrink88(uint8_t *dst, int dst_wrap,
  1244. const uint8_t *src, int src_wrap,
  1245. int width, int height)
  1246. {
  1247. int w, i;
  1248. for(;height > 0; height--) {
  1249. for(w = width;w > 0; w--) {
  1250. int tmp=0;
  1251. for(i=0; i<8; i++){
  1252. tmp += src[0] + src[1] + src[2] + src[3] + src[4] + src[5] + src[6] + src[7];
  1253. src += src_wrap;
  1254. }
  1255. *(dst++) = (tmp + 32)>>6;
  1256. src += 8 - 8*src_wrap;
  1257. }
  1258. src += 8*src_wrap - 8*width;
  1259. dst += dst_wrap - width;
  1260. }
  1261. }
  1262. /* XXX: add jpeg quantize code */
  1263. #define TRANSP_INDEX (6*6*6)
  1264. /* this is maybe slow, but allows for extensions */
  1265. static inline unsigned char gif_clut_index(uint8_t r, uint8_t g, uint8_t b)
  1266. {
  1267. return (((r) / 47) % 6) * 6 * 6 + (((g) / 47) % 6) * 6 + (((b) / 47) % 6);
  1268. }
  1269. static void build_rgb_palette(uint8_t *palette, int has_alpha)
  1270. {
  1271. uint32_t *pal;
  1272. static const uint8_t pal_value[6] = { 0x00, 0x33, 0x66, 0x99, 0xcc, 0xff };
  1273. int i, r, g, b;
  1274. pal = (uint32_t *)palette;
  1275. i = 0;
  1276. for(r = 0; r < 6; r++) {
  1277. for(g = 0; g < 6; g++) {
  1278. for(b = 0; b < 6; b++) {
  1279. pal[i++] = (0xff << 24) | (pal_value[r] << 16) |
  1280. (pal_value[g] << 8) | pal_value[b];
  1281. }
  1282. }
  1283. }
  1284. if (has_alpha)
  1285. pal[i++] = 0;
  1286. while (i < 256)
  1287. pal[i++] = 0xff000000;
  1288. }
  1289. /* copy bit n to bits 0 ... n - 1 */
  1290. static inline unsigned int bitcopy_n(unsigned int a, int n)
  1291. {
  1292. int mask;
  1293. mask = (1 << n) - 1;
  1294. return (a & (0xff & ~mask)) | ((-((a >> n) & 1)) & mask);
  1295. }
  1296. /* rgb555 handling */
  1297. #define RGB_NAME rgb555
  1298. #define RGB_IN(r, g, b, s)\
  1299. {\
  1300. unsigned int v = ((const uint16_t *)(s))[0];\
  1301. r = bitcopy_n(v >> (10 - 3), 3);\
  1302. g = bitcopy_n(v >> (5 - 3), 3);\
  1303. b = bitcopy_n(v << 3, 3);\
  1304. }
  1305. #define RGB_OUT(d, r, g, b)\
  1306. {\
  1307. ((uint16_t *)(d))[0] = ((r >> 3) << 10) | ((g >> 3) << 5) | (b >> 3);\
  1308. }
  1309. #define BPP 2
  1310. #include "imgconvert_template.c"
  1311. /* rgb565 handling */
  1312. #define RGB_NAME rgb565
  1313. #define RGB_IN(r, g, b, s)\
  1314. {\
  1315. unsigned int v = ((const uint16_t *)(s))[0];\
  1316. r = bitcopy_n(v >> (11 - 3), 3);\
  1317. g = bitcopy_n(v >> (5 - 2), 2);\
  1318. b = bitcopy_n(v << 3, 3);\
  1319. }
  1320. #define RGB_OUT(d, r, g, b)\
  1321. {\
  1322. ((uint16_t *)(d))[0] = ((r >> 3) << 11) | ((g >> 2) << 5) | (b >> 3);\
  1323. }
  1324. #define BPP 2
  1325. #include "imgconvert_template.c"
  1326. /* bgr24 handling */
  1327. #define RGB_NAME bgr24
  1328. #define RGB_IN(r, g, b, s)\
  1329. {\
  1330. b = (s)[0];\
  1331. g = (s)[1];\
  1332. r = (s)[2];\
  1333. }
  1334. #define RGB_OUT(d, r, g, b)\
  1335. {\
  1336. (d)[0] = b;\
  1337. (d)[1] = g;\
  1338. (d)[2] = r;\
  1339. }
  1340. #define BPP 3
  1341. #include "imgconvert_template.c"
  1342. #undef RGB_IN
  1343. #undef RGB_OUT
  1344. #undef BPP
  1345. /* rgb24 handling */
  1346. #define RGB_NAME rgb24
  1347. #define FMT_RGB24
  1348. #define RGB_IN(r, g, b, s)\
  1349. {\
  1350. r = (s)[0];\
  1351. g = (s)[1];\
  1352. b = (s)[2];\
  1353. }
  1354. #define RGB_OUT(d, r, g, b)\
  1355. {\
  1356. (d)[0] = r;\
  1357. (d)[1] = g;\
  1358. (d)[2] = b;\
  1359. }
  1360. #define BPP 3
  1361. #include "imgconvert_template.c"
  1362. /* rgb32 handling */
  1363. #define RGB_NAME rgb32
  1364. #define FMT_RGB32
  1365. #define RGB_IN(r, g, b, s)\
  1366. {\
  1367. unsigned int v = ((const uint32_t *)(s))[0];\
  1368. r = (v >> 16) & 0xff;\
  1369. g = (v >> 8) & 0xff;\
  1370. b = v & 0xff;\
  1371. }
  1372. #define RGBA_IN(r, g, b, a, s)\
  1373. {\
  1374. unsigned int v = ((const uint32_t *)(s))[0];\
  1375. a = (v >> 24) & 0xff;\
  1376. r = (v >> 16) & 0xff;\
  1377. g = (v >> 8) & 0xff;\
  1378. b = v & 0xff;\
  1379. }
  1380. #define RGBA_OUT(d, r, g, b, a)\
  1381. {\
  1382. ((uint32_t *)(d))[0] = (a << 24) | (r << 16) | (g << 8) | b;\
  1383. }
  1384. #define BPP 4
  1385. #include "imgconvert_template.c"
  1386. static void mono_to_gray(AVPicture *dst, const AVPicture *src,
  1387. int width, int height, int xor_mask)
  1388. {
  1389. const unsigned char *p;
  1390. unsigned char *q;
  1391. int v, dst_wrap, src_wrap;
  1392. int y, w;
  1393. p = src->data[0];
  1394. src_wrap = src->linesize[0] - ((width + 7) >> 3);
  1395. q = dst->data[0];
  1396. dst_wrap = dst->linesize[0] - width;
  1397. for(y=0;y<height;y++) {
  1398. w = width;
  1399. while (w >= 8) {
  1400. v = *p++ ^ xor_mask;
  1401. q[0] = -(v >> 7);
  1402. q[1] = -((v >> 6) & 1);
  1403. q[2] = -((v >> 5) & 1);
  1404. q[3] = -((v >> 4) & 1);
  1405. q[4] = -((v >> 3) & 1);
  1406. q[5] = -((v >> 2) & 1);
  1407. q[6] = -((v >> 1) & 1);
  1408. q[7] = -((v >> 0) & 1);
  1409. w -= 8;
  1410. q += 8;
  1411. }
  1412. if (w > 0) {
  1413. v = *p++ ^ xor_mask;
  1414. do {
  1415. q[0] = -((v >> 7) & 1);
  1416. q++;
  1417. v <<= 1;
  1418. } while (--w);
  1419. }
  1420. p += src_wrap;
  1421. q += dst_wrap;
  1422. }
  1423. }
  1424. static void monowhite_to_gray(AVPicture *dst, const AVPicture *src,
  1425. int width, int height)
  1426. {
  1427. mono_to_gray(dst, src, width, height, 0xff);
  1428. }
  1429. static void monoblack_to_gray(AVPicture *dst, const AVPicture *src,
  1430. int width, int height)
  1431. {
  1432. mono_to_gray(dst, src, width, height, 0x00);
  1433. }
  1434. static void gray_to_mono(AVPicture *dst, const AVPicture *src,
  1435. int width, int height, int xor_mask)
  1436. {
  1437. int n;
  1438. const uint8_t *s;
  1439. uint8_t *d;
  1440. int j, b, v, n1, src_wrap, dst_wrap, y;
  1441. s = src->data[0];
  1442. src_wrap = src->linesize[0] - width;
  1443. d = dst->data[0];
  1444. dst_wrap = dst->linesize[0] - ((width + 7) >> 3);
  1445. for(y=0;y<height;y++) {
  1446. n = width;
  1447. while (n >= 8) {
  1448. v = 0;
  1449. for(j=0;j<8;j++) {
  1450. b = s[0];
  1451. s++;
  1452. v = (v << 1) | (b >> 7);
  1453. }
  1454. d[0] = v ^ xor_mask;
  1455. d++;
  1456. n -= 8;
  1457. }
  1458. if (n > 0) {
  1459. n1 = n;
  1460. v = 0;
  1461. while (n > 0) {
  1462. b = s[0];
  1463. s++;
  1464. v = (v << 1) | (b >> 7);
  1465. n--;
  1466. }
  1467. d[0] = (v << (8 - (n1 & 7))) ^ xor_mask;
  1468. d++;
  1469. }
  1470. s += src_wrap;
  1471. d += dst_wrap;
  1472. }
  1473. }
  1474. static void gray_to_monowhite(AVPicture *dst, const AVPicture *src,
  1475. int width, int height)
  1476. {
  1477. gray_to_mono(dst, src, width, height, 0xff);
  1478. }
  1479. static void gray_to_monoblack(AVPicture *dst, const AVPicture *src,
  1480. int width, int height)
  1481. {
  1482. gray_to_mono(dst, src, width, height, 0x00);
  1483. }
  1484. static void gray_to_gray16(AVPicture *dst, const AVPicture *src,
  1485. int width, int height)
  1486. {
  1487. int x, y, src_wrap, dst_wrap;
  1488. uint8_t *s, *d;
  1489. s = src->data[0];
  1490. src_wrap = src->linesize[0] - width;
  1491. d = dst->data[0];
  1492. dst_wrap = dst->linesize[0] - width * 2;
  1493. for(y=0; y<height; y++){
  1494. for(x=0; x<width; x++){
  1495. *d++ = *s;
  1496. *d++ = *s++;
  1497. }
  1498. s += src_wrap;
  1499. d += dst_wrap;
  1500. }
  1501. }
  1502. static void gray16_to_gray(AVPicture *dst, const AVPicture *src,
  1503. int width, int height)
  1504. {
  1505. int x, y, src_wrap, dst_wrap;
  1506. uint8_t *s, *d;
  1507. s = src->data[0];
  1508. src_wrap = src->linesize[0] - width * 2;
  1509. d = dst->data[0];
  1510. dst_wrap = dst->linesize[0] - width;
  1511. for(y=0; y<height; y++){
  1512. for(x=0; x<width; x++){
  1513. *d++ = *s;
  1514. s += 2;
  1515. }
  1516. s += src_wrap;
  1517. d += dst_wrap;
  1518. }
  1519. }
  1520. static void gray16be_to_gray(AVPicture *dst, const AVPicture *src,
  1521. int width, int height)
  1522. {
  1523. gray16_to_gray(dst, src, width, height);
  1524. }
  1525. static void gray16le_to_gray(AVPicture *dst, const AVPicture *src,
  1526. int width, int height)
  1527. {
  1528. AVPicture tmpsrc = *src;
  1529. tmpsrc.data[0]++;
  1530. gray16_to_gray(dst, &tmpsrc, width, height);
  1531. }
  1532. static void gray16_to_gray16(AVPicture *dst, const AVPicture *src,
  1533. int width, int height)
  1534. {
  1535. int x, y, src_wrap, dst_wrap;
  1536. uint16_t *s, *d;
  1537. s = (uint16_t*)src->data[0];
  1538. src_wrap = (src->linesize[0] - width * 2)/2;
  1539. d = (uint16_t*)dst->data[0];
  1540. dst_wrap = (dst->linesize[0] - width * 2)/2;
  1541. for(y=0; y<height; y++){
  1542. for(x=0; x<width; x++){
  1543. *d++ = bswap_16(*s++);
  1544. }
  1545. s += src_wrap;
  1546. d += dst_wrap;
  1547. }
  1548. }
  1549. typedef struct ConvertEntry {
  1550. void (*convert)(AVPicture *dst,
  1551. const AVPicture *src, int width, int height);
  1552. } ConvertEntry;
  1553. /* Add each new conversion function in this table. In order to be able
  1554. to convert from any format to any format, the following constraints
  1555. must be satisfied:
  1556. - all FF_COLOR_RGB formats must convert to and from PIX_FMT_RGB24
  1557. - all FF_COLOR_GRAY formats must convert to and from PIX_FMT_GRAY8
  1558. - all FF_COLOR_RGB formats with alpha must convert to and from PIX_FMT_RGB32
  1559. - PIX_FMT_YUV444P and PIX_FMT_YUVJ444P must convert to and from
  1560. PIX_FMT_RGB24.
  1561. - PIX_FMT_422 must convert to and from PIX_FMT_422P.
  1562. The other conversion functions are just optimizations for common cases.
  1563. */
  1564. static const ConvertEntry convert_table[PIX_FMT_NB][PIX_FMT_NB] = {
  1565. [PIX_FMT_YUV420P] = {
  1566. [PIX_FMT_YUYV422] = {
  1567. .convert = yuv420p_to_yuyv422,
  1568. },
  1569. [PIX_FMT_RGB555] = {
  1570. .convert = yuv420p_to_rgb555
  1571. },
  1572. [PIX_FMT_RGB565] = {
  1573. .convert = yuv420p_to_rgb565
  1574. },
  1575. [PIX_FMT_BGR24] = {
  1576. .convert = yuv420p_to_bgr24
  1577. },
  1578. [PIX_FMT_RGB24] = {
  1579. .convert = yuv420p_to_rgb24
  1580. },
  1581. [PIX_FMT_RGB32] = {
  1582. .convert = yuv420p_to_rgb32
  1583. },
  1584. [PIX_FMT_UYVY422] = {
  1585. .convert = yuv420p_to_uyvy422,
  1586. },
  1587. },
  1588. [PIX_FMT_YUV422P] = {
  1589. [PIX_FMT_YUYV422] = {
  1590. .convert = yuv422p_to_yuyv422,
  1591. },
  1592. [PIX_FMT_UYVY422] = {
  1593. .convert = yuv422p_to_uyvy422,
  1594. },
  1595. },
  1596. [PIX_FMT_YUV444P] = {
  1597. [PIX_FMT_RGB24] = {
  1598. .convert = yuv444p_to_rgb24
  1599. },
  1600. },
  1601. [PIX_FMT_YUVJ420P] = {
  1602. [PIX_FMT_RGB555] = {
  1603. .convert = yuvj420p_to_rgb555
  1604. },
  1605. [PIX_FMT_RGB565] = {
  1606. .convert = yuvj420p_to_rgb565
  1607. },
  1608. [PIX_FMT_BGR24] = {
  1609. .convert = yuvj420p_to_bgr24
  1610. },
  1611. [PIX_FMT_RGB24] = {
  1612. .convert = yuvj420p_to_rgb24
  1613. },
  1614. [PIX_FMT_RGB32] = {
  1615. .convert = yuvj420p_to_rgb32
  1616. },
  1617. },
  1618. [PIX_FMT_YUVJ444P] = {
  1619. [PIX_FMT_RGB24] = {
  1620. .convert = yuvj444p_to_rgb24
  1621. },
  1622. },
  1623. [PIX_FMT_YUYV422] = {
  1624. [PIX_FMT_YUV420P] = {
  1625. .convert = yuyv422_to_yuv420p,
  1626. },
  1627. [PIX_FMT_YUV422P] = {
  1628. .convert = yuyv422_to_yuv422p,
  1629. },
  1630. },
  1631. [PIX_FMT_UYVY422] = {
  1632. [PIX_FMT_YUV420P] = {
  1633. .convert = uyvy422_to_yuv420p,
  1634. },
  1635. [PIX_FMT_YUV422P] = {
  1636. .convert = uyvy422_to_yuv422p,
  1637. },
  1638. },
  1639. [PIX_FMT_RGB24] = {
  1640. [PIX_FMT_YUV420P] = {
  1641. .convert = rgb24_to_yuv420p
  1642. },
  1643. [PIX_FMT_RGB565] = {
  1644. .convert = rgb24_to_rgb565
  1645. },
  1646. [PIX_FMT_RGB555] = {
  1647. .convert = rgb24_to_rgb555
  1648. },
  1649. [PIX_FMT_RGB32] = {
  1650. .convert = rgb24_to_rgb32
  1651. },
  1652. [PIX_FMT_BGR24] = {
  1653. .convert = rgb24_to_bgr24
  1654. },
  1655. [PIX_FMT_GRAY8] = {
  1656. .convert = rgb24_to_gray
  1657. },
  1658. [PIX_FMT_PAL8] = {
  1659. .convert = rgb24_to_pal8
  1660. },
  1661. [PIX_FMT_YUV444P] = {
  1662. .convert = rgb24_to_yuv444p
  1663. },
  1664. [PIX_FMT_YUVJ420P] = {
  1665. .convert = rgb24_to_yuvj420p
  1666. },
  1667. [PIX_FMT_YUVJ444P] = {
  1668. .convert = rgb24_to_yuvj444p
  1669. },
  1670. },
  1671. [PIX_FMT_RGB32] = {
  1672. [PIX_FMT_RGB24] = {
  1673. .convert = rgb32_to_rgb24
  1674. },
  1675. [PIX_FMT_BGR24] = {
  1676. .convert = rgb32_to_bgr24
  1677. },
  1678. [PIX_FMT_RGB565] = {
  1679. .convert = rgb32_to_rgb565
  1680. },
  1681. [PIX_FMT_RGB555] = {
  1682. .convert = rgb32_to_rgb555
  1683. },
  1684. [PIX_FMT_PAL8] = {
  1685. .convert = rgb32_to_pal8
  1686. },
  1687. [PIX_FMT_YUV420P] = {
  1688. .convert = rgb32_to_yuv420p
  1689. },
  1690. [PIX_FMT_GRAY8] = {
  1691. .convert = rgb32_to_gray
  1692. },
  1693. },
  1694. [PIX_FMT_BGR24] = {
  1695. [PIX_FMT_RGB32] = {
  1696. .convert = bgr24_to_rgb32
  1697. },
  1698. [PIX_FMT_RGB24] = {
  1699. .convert = bgr24_to_rgb24
  1700. },
  1701. [PIX_FMT_YUV420P] = {
  1702. .convert = bgr24_to_yuv420p
  1703. },
  1704. [PIX_FMT_GRAY8] = {
  1705. .convert = bgr24_to_gray
  1706. },
  1707. },
  1708. [PIX_FMT_RGB555] = {
  1709. [PIX_FMT_RGB24] = {
  1710. .convert = rgb555_to_rgb24
  1711. },
  1712. [PIX_FMT_RGB32] = {
  1713. .convert = rgb555_to_rgb32
  1714. },
  1715. [PIX_FMT_YUV420P] = {
  1716. .convert = rgb555_to_yuv420p
  1717. },
  1718. [PIX_FMT_GRAY8] = {
  1719. .convert = rgb555_to_gray
  1720. },
  1721. },
  1722. [PIX_FMT_RGB565] = {
  1723. [PIX_FMT_RGB32] = {
  1724. .convert = rgb565_to_rgb32
  1725. },
  1726. [PIX_FMT_RGB24] = {
  1727. .convert = rgb565_to_rgb24
  1728. },
  1729. [PIX_FMT_YUV420P] = {
  1730. .convert = rgb565_to_yuv420p
  1731. },
  1732. [PIX_FMT_GRAY8] = {
  1733. .convert = rgb565_to_gray
  1734. },
  1735. },
  1736. [PIX_FMT_GRAY16BE] = {
  1737. [PIX_FMT_GRAY8] = {
  1738. .convert = gray16be_to_gray
  1739. },
  1740. [PIX_FMT_GRAY16LE] = {
  1741. .convert = gray16_to_gray16
  1742. },
  1743. },
  1744. [PIX_FMT_GRAY16LE] = {
  1745. [PIX_FMT_GRAY8] = {
  1746. .convert = gray16le_to_gray
  1747. },
  1748. [PIX_FMT_GRAY16BE] = {
  1749. .convert = gray16_to_gray16
  1750. },
  1751. },
  1752. [PIX_FMT_GRAY8] = {
  1753. [PIX_FMT_RGB555] = {
  1754. .convert = gray_to_rgb555
  1755. },
  1756. [PIX_FMT_RGB565] = {
  1757. .convert = gray_to_rgb565
  1758. },
  1759. [PIX_FMT_RGB24] = {
  1760. .convert = gray_to_rgb24
  1761. },
  1762. [PIX_FMT_BGR24] = {
  1763. .convert = gray_to_bgr24
  1764. },
  1765. [PIX_FMT_RGB32] = {
  1766. .convert = gray_to_rgb32
  1767. },
  1768. [PIX_FMT_MONOWHITE] = {
  1769. .convert = gray_to_monowhite
  1770. },
  1771. [PIX_FMT_MONOBLACK] = {
  1772. .convert = gray_to_monoblack
  1773. },
  1774. [PIX_FMT_GRAY16LE] = {
  1775. .convert = gray_to_gray16
  1776. },
  1777. [PIX_FMT_GRAY16BE] = {
  1778. .convert = gray_to_gray16
  1779. },
  1780. },
  1781. [PIX_FMT_MONOWHITE] = {
  1782. [PIX_FMT_GRAY8] = {
  1783. .convert = monowhite_to_gray
  1784. },
  1785. },
  1786. [PIX_FMT_MONOBLACK] = {
  1787. [PIX_FMT_GRAY8] = {
  1788. .convert = monoblack_to_gray
  1789. },
  1790. },
  1791. [PIX_FMT_PAL8] = {
  1792. [PIX_FMT_RGB555] = {
  1793. .convert = pal8_to_rgb555
  1794. },
  1795. [PIX_FMT_RGB565] = {
  1796. .convert = pal8_to_rgb565
  1797. },
  1798. [PIX_FMT_BGR24] = {
  1799. .convert = pal8_to_bgr24
  1800. },
  1801. [PIX_FMT_RGB24] = {
  1802. .convert = pal8_to_rgb24
  1803. },
  1804. [PIX_FMT_RGB32] = {
  1805. .convert = pal8_to_rgb32
  1806. },
  1807. },
  1808. [PIX_FMT_UYYVYY411] = {
  1809. [PIX_FMT_YUV411P] = {
  1810. .convert = uyyvyy411_to_yuv411p,
  1811. },
  1812. },
  1813. };
  1814. int avpicture_alloc(AVPicture *picture,
  1815. int pix_fmt, int width, int height)
  1816. {
  1817. int size;
  1818. void *ptr;
  1819. size = avpicture_get_size(pix_fmt, width, height);
  1820. if(size<0)
  1821. goto fail;
  1822. ptr = av_malloc(size);
  1823. if (!ptr)
  1824. goto fail;
  1825. avpicture_fill(picture, ptr, pix_fmt, width, height);
  1826. return 0;
  1827. fail:
  1828. memset(picture, 0, sizeof(AVPicture));
  1829. return -1;
  1830. }
  1831. void avpicture_free(AVPicture *picture)
  1832. {
  1833. av_free(picture->data[0]);
  1834. }
  1835. /* return true if yuv planar */
  1836. static inline int is_yuv_planar(const PixFmtInfo *ps)
  1837. {
  1838. return (ps->color_type == FF_COLOR_YUV ||
  1839. ps->color_type == FF_COLOR_YUV_JPEG) &&
  1840. ps->pixel_type == FF_PIXEL_PLANAR;
  1841. }
  1842. int av_picture_crop(AVPicture *dst, const AVPicture *src,
  1843. int pix_fmt, int top_band, int left_band)
  1844. {
  1845. int y_shift;
  1846. int x_shift;
  1847. if (pix_fmt < 0 || pix_fmt >= PIX_FMT_NB || !is_yuv_planar(&pix_fmt_info[pix_fmt]))
  1848. return -1;
  1849. y_shift = pix_fmt_info[pix_fmt].y_chroma_shift;
  1850. x_shift = pix_fmt_info[pix_fmt].x_chroma_shift;
  1851. dst->data[0] = src->data[0] + (top_band * src->linesize[0]) + left_band;
  1852. dst->data[1] = src->data[1] + ((top_band >> y_shift) * src->linesize[1]) + (left_band >> x_shift);
  1853. dst->data[2] = src->data[2] + ((top_band >> y_shift) * src->linesize[2]) + (left_band >> x_shift);
  1854. dst->linesize[0] = src->linesize[0];
  1855. dst->linesize[1] = src->linesize[1];
  1856. dst->linesize[2] = src->linesize[2];
  1857. return 0;
  1858. }
  1859. int av_picture_pad(AVPicture *dst, const AVPicture *src, int height, int width,
  1860. int pix_fmt, int padtop, int padbottom, int padleft, int padright,
  1861. int *color)
  1862. {
  1863. uint8_t *optr;
  1864. int y_shift;
  1865. int x_shift;
  1866. int yheight;
  1867. int i, y;
  1868. if (pix_fmt < 0 || pix_fmt >= PIX_FMT_NB ||
  1869. !is_yuv_planar(&pix_fmt_info[pix_fmt])) return -1;
  1870. for (i = 0; i < 3; i++) {
  1871. x_shift = i ? pix_fmt_info[pix_fmt].x_chroma_shift : 0;
  1872. y_shift = i ? pix_fmt_info[pix_fmt].y_chroma_shift : 0;
  1873. if (padtop || padleft) {
  1874. memset(dst->data[i], color[i],
  1875. dst->linesize[i] * (padtop >> y_shift) + (padleft >> x_shift));
  1876. }
  1877. if (padleft || padright) {
  1878. optr = dst->data[i] + dst->linesize[i] * (padtop >> y_shift) +
  1879. (dst->linesize[i] - (padright >> x_shift));
  1880. yheight = (height - 1 - (padtop + padbottom)) >> y_shift;
  1881. for (y = 0; y < yheight; y++) {
  1882. memset(optr, color[i], (padleft + padright) >> x_shift);
  1883. optr += dst->linesize[i];
  1884. }
  1885. }
  1886. if (src) { /* first line */
  1887. uint8_t *iptr = src->data[i];
  1888. optr = dst->data[i] + dst->linesize[i] * (padtop >> y_shift) +
  1889. (padleft >> x_shift);
  1890. memcpy(optr, iptr, (width - padleft - padright) >> x_shift);
  1891. iptr += src->linesize[i];
  1892. optr = dst->data[i] + dst->linesize[i] * (padtop >> y_shift) +
  1893. (dst->linesize[i] - (padright >> x_shift));
  1894. yheight = (height - 1 - (padtop + padbottom)) >> y_shift;
  1895. for (y = 0; y < yheight; y++) {
  1896. memset(optr, color[i], (padleft + padright) >> x_shift);
  1897. memcpy(optr + ((padleft + padright) >> x_shift), iptr,
  1898. (width - padleft - padright) >> x_shift);
  1899. iptr += src->linesize[i];
  1900. optr += dst->linesize[i];
  1901. }
  1902. }
  1903. if (padbottom || padright) {
  1904. optr = dst->data[i] + dst->linesize[i] *
  1905. ((height - padbottom) >> y_shift) - (padright >> x_shift);
  1906. memset(optr, color[i],dst->linesize[i] *
  1907. (padbottom >> y_shift) + (padright >> x_shift));
  1908. }
  1909. }
  1910. return 0;
  1911. }
  1912. #ifndef CONFIG_SWSCALE
  1913. static uint8_t y_ccir_to_jpeg[256];
  1914. static uint8_t y_jpeg_to_ccir[256];
  1915. static uint8_t c_ccir_to_jpeg[256];
  1916. static uint8_t c_jpeg_to_ccir[256];
  1917. /* init various conversion tables */
  1918. static void img_convert_init(void)
  1919. {
  1920. int i;
  1921. uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
  1922. for(i = 0;i < 256; i++) {
  1923. y_ccir_to_jpeg[i] = Y_CCIR_TO_JPEG(i);
  1924. y_jpeg_to_ccir[i] = Y_JPEG_TO_CCIR(i);
  1925. c_ccir_to_jpeg[i] = C_CCIR_TO_JPEG(i);
  1926. c_jpeg_to_ccir[i] = C_JPEG_TO_CCIR(i);
  1927. }
  1928. }
  1929. /* apply to each pixel the given table */
  1930. static void img_apply_table(uint8_t *dst, int dst_wrap,
  1931. const uint8_t *src, int src_wrap,
  1932. int width, int height, const uint8_t *table1)
  1933. {
  1934. int n;
  1935. const uint8_t *s;
  1936. uint8_t *d;
  1937. const uint8_t *table;
  1938. table = table1;
  1939. for(;height > 0; height--) {
  1940. s = src;
  1941. d = dst;
  1942. n = width;
  1943. while (n >= 4) {
  1944. d[0] = table[s[0]];
  1945. d[1] = table[s[1]];
  1946. d[2] = table[s[2]];
  1947. d[3] = table[s[3]];
  1948. d += 4;
  1949. s += 4;
  1950. n -= 4;
  1951. }
  1952. while (n > 0) {
  1953. d[0] = table[s[0]];
  1954. d++;
  1955. s++;
  1956. n--;
  1957. }
  1958. dst += dst_wrap;
  1959. src += src_wrap;
  1960. }
  1961. }
  1962. /* XXX: use generic filter ? */
  1963. /* XXX: in most cases, the sampling position is incorrect */
  1964. /* 4x1 -> 1x1 */
  1965. static void shrink41(uint8_t *dst, int dst_wrap,
  1966. const uint8_t *src, int src_wrap,
  1967. int width, int height)
  1968. {
  1969. int w;
  1970. const uint8_t *s;
  1971. uint8_t *d;
  1972. for(;height > 0; height--) {
  1973. s = src;
  1974. d = dst;
  1975. for(w = width;w > 0; w--) {
  1976. d[0] = (s[0] + s[1] + s[2] + s[3] + 2) >> 2;
  1977. s += 4;
  1978. d++;
  1979. }
  1980. src += src_wrap;
  1981. dst += dst_wrap;
  1982. }
  1983. }
  1984. /* 2x1 -> 1x1 */
  1985. static void shrink21(uint8_t *dst, int dst_wrap,
  1986. const uint8_t *src, int src_wrap,
  1987. int width, int height)
  1988. {
  1989. int w;
  1990. const uint8_t *s;
  1991. uint8_t *d;
  1992. for(;height > 0; height--) {
  1993. s = src;
  1994. d = dst;
  1995. for(w = width;w > 0; w--) {
  1996. d[0] = (s[0] + s[1]) >> 1;
  1997. s += 2;
  1998. d++;
  1999. }
  2000. src += src_wrap;
  2001. dst += dst_wrap;
  2002. }
  2003. }
  2004. /* 1x2 -> 1x1 */
  2005. static void shrink12(uint8_t *dst, int dst_wrap,
  2006. const uint8_t *src, int src_wrap,
  2007. int width, int height)
  2008. {
  2009. int w;
  2010. uint8_t *d;
  2011. const uint8_t *s1, *s2;
  2012. for(;height > 0; height--) {
  2013. s1 = src;
  2014. s2 = s1 + src_wrap;
  2015. d = dst;
  2016. for(w = width;w >= 4; w-=4) {
  2017. d[0] = (s1[0] + s2[0]) >> 1;
  2018. d[1] = (s1[1] + s2[1]) >> 1;
  2019. d[2] = (s1[2] + s2[2]) >> 1;
  2020. d[3] = (s1[3] + s2[3]) >> 1;
  2021. s1 += 4;
  2022. s2 += 4;
  2023. d += 4;
  2024. }
  2025. for(;w > 0; w--) {
  2026. d[0] = (s1[0] + s2[0]) >> 1;
  2027. s1++;
  2028. s2++;
  2029. d++;
  2030. }
  2031. src += 2 * src_wrap;
  2032. dst += dst_wrap;
  2033. }
  2034. }
  2035. static void grow21_line(uint8_t *dst, const uint8_t *src,
  2036. int width)
  2037. {
  2038. int w;
  2039. const uint8_t *s1;
  2040. uint8_t *d;
  2041. s1 = src;
  2042. d = dst;
  2043. for(w = width;w >= 4; w-=4) {
  2044. d[1] = d[0] = s1[0];
  2045. d[3] = d[2] = s1[1];
  2046. s1 += 2;
  2047. d += 4;
  2048. }
  2049. for(;w >= 2; w -= 2) {
  2050. d[1] = d[0] = s1[0];
  2051. s1 ++;
  2052. d += 2;
  2053. }
  2054. /* only needed if width is not a multiple of two */
  2055. /* XXX: veryfy that */
  2056. if (w) {
  2057. d[0] = s1[0];
  2058. }
  2059. }
  2060. static void grow41_line(uint8_t *dst, const uint8_t *src,
  2061. int width)
  2062. {
  2063. int w, v;
  2064. const uint8_t *s1;
  2065. uint8_t *d;
  2066. s1 = src;
  2067. d = dst;
  2068. for(w = width;w >= 4; w-=4) {
  2069. v = s1[0];
  2070. d[0] = v;
  2071. d[1] = v;
  2072. d[2] = v;
  2073. d[3] = v;
  2074. s1 ++;
  2075. d += 4;
  2076. }
  2077. }
  2078. /* 1x1 -> 2x1 */
  2079. static void grow21(uint8_t *dst, int dst_wrap,
  2080. const uint8_t *src, int src_wrap,
  2081. int width, int height)
  2082. {
  2083. for(;height > 0; height--) {
  2084. grow21_line(dst, src, width);
  2085. src += src_wrap;
  2086. dst += dst_wrap;
  2087. }
  2088. }
  2089. /* 1x1 -> 1x2 */
  2090. static void grow12(uint8_t *dst, int dst_wrap,
  2091. const uint8_t *src, int src_wrap,
  2092. int width, int height)
  2093. {
  2094. for(;height > 0; height-=2) {
  2095. memcpy(dst, src, width);
  2096. dst += dst_wrap;
  2097. memcpy(dst, src, width);
  2098. dst += dst_wrap;
  2099. src += src_wrap;
  2100. }
  2101. }
  2102. /* 1x1 -> 2x2 */
  2103. static void grow22(uint8_t *dst, int dst_wrap,
  2104. const uint8_t *src, int src_wrap,
  2105. int width, int height)
  2106. {
  2107. for(;height > 0; height--) {
  2108. grow21_line(dst, src, width);
  2109. if (height%2)
  2110. src += src_wrap;
  2111. dst += dst_wrap;
  2112. }
  2113. }
  2114. /* 1x1 -> 4x1 */
  2115. static void grow41(uint8_t *dst, int dst_wrap,
  2116. const uint8_t *src, int src_wrap,
  2117. int width, int height)
  2118. {
  2119. for(;height > 0; height--) {
  2120. grow41_line(dst, src, width);
  2121. src += src_wrap;
  2122. dst += dst_wrap;
  2123. }
  2124. }
  2125. /* 1x1 -> 4x4 */
  2126. static void grow44(uint8_t *dst, int dst_wrap,
  2127. const uint8_t *src, int src_wrap,
  2128. int width, int height)
  2129. {
  2130. for(;height > 0; height--) {
  2131. grow41_line(dst, src, width);
  2132. if ((height & 3) == 1)
  2133. src += src_wrap;
  2134. dst += dst_wrap;
  2135. }
  2136. }
  2137. /* 1x2 -> 2x1 */
  2138. static void conv411(uint8_t *dst, int dst_wrap,
  2139. const uint8_t *src, int src_wrap,
  2140. int width, int height)
  2141. {
  2142. int w, c;
  2143. const uint8_t *s1, *s2;
  2144. uint8_t *d;
  2145. width>>=1;
  2146. for(;height > 0; height--) {
  2147. s1 = src;
  2148. s2 = src + src_wrap;
  2149. d = dst;
  2150. for(w = width;w > 0; w--) {
  2151. c = (s1[0] + s2[0]) >> 1;
  2152. d[0] = c;
  2153. d[1] = c;
  2154. s1++;
  2155. s2++;
  2156. d += 2;
  2157. }
  2158. src += src_wrap * 2;
  2159. dst += dst_wrap;
  2160. }
  2161. }
  2162. /* XXX: always use linesize. Return -1 if not supported */
  2163. int img_convert(AVPicture *dst, int dst_pix_fmt,
  2164. const AVPicture *src, int src_pix_fmt,
  2165. int src_width, int src_height)
  2166. {
  2167. static int initialized;
  2168. int i, ret, dst_width, dst_height, int_pix_fmt;
  2169. const PixFmtInfo *src_pix, *dst_pix;
  2170. const ConvertEntry *ce;
  2171. AVPicture tmp1, *tmp = &tmp1;
  2172. if (src_pix_fmt < 0 || src_pix_fmt >= PIX_FMT_NB ||
  2173. dst_pix_fmt < 0 || dst_pix_fmt >= PIX_FMT_NB)
  2174. return -1;
  2175. if (src_width <= 0 || src_height <= 0)
  2176. return 0;
  2177. if (!initialized) {
  2178. initialized = 1;
  2179. img_convert_init();
  2180. }
  2181. dst_width = src_width;
  2182. dst_height = src_height;
  2183. dst_pix = &pix_fmt_info[dst_pix_fmt];
  2184. src_pix = &pix_fmt_info[src_pix_fmt];
  2185. if (src_pix_fmt == dst_pix_fmt) {
  2186. /* no conversion needed: just copy */
  2187. av_picture_copy(dst, src, dst_pix_fmt, dst_width, dst_height);
  2188. return 0;
  2189. }
  2190. ce = &convert_table[src_pix_fmt][dst_pix_fmt];
  2191. if (ce->convert) {
  2192. /* specific conversion routine */
  2193. ce->convert(dst, src, dst_width, dst_height);
  2194. return 0;
  2195. }
  2196. /* gray to YUV */
  2197. if (is_yuv_planar(dst_pix) &&
  2198. src_pix_fmt == PIX_FMT_GRAY8) {
  2199. int w, h, y;
  2200. uint8_t *d;
  2201. if (dst_pix->color_type == FF_COLOR_YUV_JPEG) {
  2202. ff_img_copy_plane(dst->data[0], dst->linesize[0],
  2203. src->data[0], src->linesize[0],
  2204. dst_width, dst_height);
  2205. } else {
  2206. img_apply_table(dst->data[0], dst->linesize[0],
  2207. src->data[0], src->linesize[0],
  2208. dst_width, dst_height,
  2209. y_jpeg_to_ccir);
  2210. }
  2211. /* fill U and V with 128 */
  2212. w = dst_width;
  2213. h = dst_height;
  2214. w >>= dst_pix->x_chroma_shift;
  2215. h >>= dst_pix->y_chroma_shift;
  2216. for(i = 1; i <= 2; i++) {
  2217. d = dst->data[i];
  2218. for(y = 0; y< h; y++) {
  2219. memset(d, 128, w);
  2220. d += dst->linesize[i];
  2221. }
  2222. }
  2223. return 0;
  2224. }
  2225. /* YUV to gray */
  2226. if (is_yuv_planar(src_pix) &&
  2227. dst_pix_fmt == PIX_FMT_GRAY8) {
  2228. if (src_pix->color_type == FF_COLOR_YUV_JPEG) {
  2229. ff_img_copy_plane(dst->data[0], dst->linesize[0],
  2230. src->data[0], src->linesize[0],
  2231. dst_width, dst_height);
  2232. } else {
  2233. img_apply_table(dst->data[0], dst->linesize[0],
  2234. src->data[0], src->linesize[0],
  2235. dst_width, dst_height,
  2236. y_ccir_to_jpeg);
  2237. }
  2238. return 0;
  2239. }
  2240. /* YUV to YUV planar */
  2241. if (is_yuv_planar(dst_pix) && is_yuv_planar(src_pix)) {
  2242. int x_shift, y_shift, w, h, xy_shift;
  2243. void (*resize_func)(uint8_t *dst, int dst_wrap,
  2244. const uint8_t *src, int src_wrap,
  2245. int width, int height);
  2246. /* compute chroma size of the smallest dimensions */
  2247. w = dst_width;
  2248. h = dst_height;
  2249. if (dst_pix->x_chroma_shift >= src_pix->x_chroma_shift)
  2250. w >>= dst_pix->x_chroma_shift;
  2251. else
  2252. w >>= src_pix->x_chroma_shift;
  2253. if (dst_pix->y_chroma_shift >= src_pix->y_chroma_shift)
  2254. h >>= dst_pix->y_chroma_shift;
  2255. else
  2256. h >>= src_pix->y_chroma_shift;
  2257. x_shift = (dst_pix->x_chroma_shift - src_pix->x_chroma_shift);
  2258. y_shift = (dst_pix->y_chroma_shift - src_pix->y_chroma_shift);
  2259. xy_shift = ((x_shift & 0xf) << 4) | (y_shift & 0xf);
  2260. /* there must be filters for conversion at least from and to
  2261. YUV444 format */
  2262. switch(xy_shift) {
  2263. case 0x00:
  2264. resize_func = ff_img_copy_plane;
  2265. break;
  2266. case 0x10:
  2267. resize_func = shrink21;
  2268. break;
  2269. case 0x20:
  2270. resize_func = shrink41;
  2271. break;
  2272. case 0x01:
  2273. resize_func = shrink12;
  2274. break;
  2275. case 0x11:
  2276. resize_func = ff_shrink22;
  2277. break;
  2278. case 0x22:
  2279. resize_func = ff_shrink44;
  2280. break;
  2281. case 0xf0:
  2282. resize_func = grow21;
  2283. break;
  2284. case 0x0f:
  2285. resize_func = grow12;
  2286. break;
  2287. case 0xe0:
  2288. resize_func = grow41;
  2289. break;
  2290. case 0xff:
  2291. resize_func = grow22;
  2292. break;
  2293. case 0xee:
  2294. resize_func = grow44;
  2295. break;
  2296. case 0xf1:
  2297. resize_func = conv411;
  2298. break;
  2299. default:
  2300. /* currently not handled */
  2301. goto no_chroma_filter;
  2302. }
  2303. ff_img_copy_plane(dst->data[0], dst->linesize[0],
  2304. src->data[0], src->linesize[0],
  2305. dst_width, dst_height);
  2306. for(i = 1;i <= 2; i++)
  2307. resize_func(dst->data[i], dst->linesize[i],
  2308. src->data[i], src->linesize[i],
  2309. dst_width>>dst_pix->x_chroma_shift, dst_height>>dst_pix->y_chroma_shift);
  2310. /* if yuv color space conversion is needed, we do it here on
  2311. the destination image */
  2312. if (dst_pix->color_type != src_pix->color_type) {
  2313. const uint8_t *y_table, *c_table;
  2314. if (dst_pix->color_type == FF_COLOR_YUV) {
  2315. y_table = y_jpeg_to_ccir;
  2316. c_table = c_jpeg_to_ccir;
  2317. } else {
  2318. y_table = y_ccir_to_jpeg;
  2319. c_table = c_ccir_to_jpeg;
  2320. }
  2321. img_apply_table(dst->data[0], dst->linesize[0],
  2322. dst->data[0], dst->linesize[0],
  2323. dst_width, dst_height,
  2324. y_table);
  2325. for(i = 1;i <= 2; i++)
  2326. img_apply_table(dst->data[i], dst->linesize[i],
  2327. dst->data[i], dst->linesize[i],
  2328. dst_width>>dst_pix->x_chroma_shift,
  2329. dst_height>>dst_pix->y_chroma_shift,
  2330. c_table);
  2331. }
  2332. return 0;
  2333. }
  2334. no_chroma_filter:
  2335. /* try to use an intermediate format */
  2336. if (src_pix_fmt == PIX_FMT_YUYV422 ||
  2337. dst_pix_fmt == PIX_FMT_YUYV422) {
  2338. /* specific case: convert to YUV422P first */
  2339. int_pix_fmt = PIX_FMT_YUV422P;
  2340. } else if (src_pix_fmt == PIX_FMT_UYVY422 ||
  2341. dst_pix_fmt == PIX_FMT_UYVY422) {
  2342. /* specific case: convert to YUV422P first */
  2343. int_pix_fmt = PIX_FMT_YUV422P;
  2344. } else if (src_pix_fmt == PIX_FMT_UYYVYY411 ||
  2345. dst_pix_fmt == PIX_FMT_UYYVYY411) {
  2346. /* specific case: convert to YUV411P first */
  2347. int_pix_fmt = PIX_FMT_YUV411P;
  2348. } else if ((src_pix->color_type == FF_COLOR_GRAY &&
  2349. src_pix_fmt != PIX_FMT_GRAY8) ||
  2350. (dst_pix->color_type == FF_COLOR_GRAY &&
  2351. dst_pix_fmt != PIX_FMT_GRAY8)) {
  2352. /* gray8 is the normalized format */
  2353. int_pix_fmt = PIX_FMT_GRAY8;
  2354. } else if ((is_yuv_planar(src_pix) &&
  2355. src_pix_fmt != PIX_FMT_YUV444P &&
  2356. src_pix_fmt != PIX_FMT_YUVJ444P)) {
  2357. /* yuv444 is the normalized format */
  2358. if (src_pix->color_type == FF_COLOR_YUV_JPEG)
  2359. int_pix_fmt = PIX_FMT_YUVJ444P;
  2360. else
  2361. int_pix_fmt = PIX_FMT_YUV444P;
  2362. } else if ((is_yuv_planar(dst_pix) &&
  2363. dst_pix_fmt != PIX_FMT_YUV444P &&
  2364. dst_pix_fmt != PIX_FMT_YUVJ444P)) {
  2365. /* yuv444 is the normalized format */
  2366. if (dst_pix->color_type == FF_COLOR_YUV_JPEG)
  2367. int_pix_fmt = PIX_FMT_YUVJ444P;
  2368. else
  2369. int_pix_fmt = PIX_FMT_YUV444P;
  2370. } else {
  2371. /* the two formats are rgb or gray8 or yuv[j]444p */
  2372. if (src_pix->is_alpha && dst_pix->is_alpha)
  2373. int_pix_fmt = PIX_FMT_RGB32;
  2374. else
  2375. int_pix_fmt = PIX_FMT_RGB24;
  2376. }
  2377. if (src_pix_fmt == int_pix_fmt)
  2378. return -1;
  2379. if (avpicture_alloc(tmp, int_pix_fmt, dst_width, dst_height) < 0)
  2380. return -1;
  2381. ret = -1;
  2382. if (img_convert(tmp, int_pix_fmt,
  2383. src, src_pix_fmt, src_width, src_height) < 0)
  2384. goto fail1;
  2385. if (img_convert(dst, dst_pix_fmt,
  2386. tmp, int_pix_fmt, dst_width, dst_height) < 0)
  2387. goto fail1;
  2388. ret = 0;
  2389. fail1:
  2390. avpicture_free(tmp);
  2391. return ret;
  2392. }
  2393. #endif
  2394. /* NOTE: we scan all the pixels to have an exact information */
  2395. static int get_alpha_info_pal8(const AVPicture *src, int width, int height)
  2396. {
  2397. const unsigned char *p;
  2398. int src_wrap, ret, x, y;
  2399. unsigned int a;
  2400. uint32_t *palette = (uint32_t *)src->data[1];
  2401. p = src->data[0];
  2402. src_wrap = src->linesize[0] - width;
  2403. ret = 0;
  2404. for(y=0;y<height;y++) {
  2405. for(x=0;x<width;x++) {
  2406. a = palette[p[0]] >> 24;
  2407. if (a == 0x00) {
  2408. ret |= FF_ALPHA_TRANSP;
  2409. } else if (a != 0xff) {
  2410. ret |= FF_ALPHA_SEMI_TRANSP;
  2411. }
  2412. p++;
  2413. }
  2414. p += src_wrap;
  2415. }
  2416. return ret;
  2417. }
  2418. int img_get_alpha_info(const AVPicture *src,
  2419. int pix_fmt, int width, int height)
  2420. {
  2421. const PixFmtInfo *pf = &pix_fmt_info[pix_fmt];
  2422. int ret;
  2423. pf = &pix_fmt_info[pix_fmt];
  2424. /* no alpha can be represented in format */
  2425. if (!pf->is_alpha)
  2426. return 0;
  2427. switch(pix_fmt) {
  2428. case PIX_FMT_RGB32:
  2429. ret = get_alpha_info_rgb32(src, width, height);
  2430. break;
  2431. case PIX_FMT_PAL8:
  2432. ret = get_alpha_info_pal8(src, width, height);
  2433. break;
  2434. default:
  2435. /* we do not know, so everything is indicated */
  2436. ret = FF_ALPHA_TRANSP | FF_ALPHA_SEMI_TRANSP;
  2437. break;
  2438. }
  2439. return ret;
  2440. }
  2441. #ifdef HAVE_MMX
  2442. #define DEINT_INPLACE_LINE_LUM \
  2443. movd_m2r(lum_m4[0],mm0);\
  2444. movd_m2r(lum_m3[0],mm1);\
  2445. movd_m2r(lum_m2[0],mm2);\
  2446. movd_m2r(lum_m1[0],mm3);\
  2447. movd_m2r(lum[0],mm4);\
  2448. punpcklbw_r2r(mm7,mm0);\
  2449. movd_r2m(mm2,lum_m4[0]);\
  2450. punpcklbw_r2r(mm7,mm1);\
  2451. punpcklbw_r2r(mm7,mm2);\
  2452. punpcklbw_r2r(mm7,mm3);\
  2453. punpcklbw_r2r(mm7,mm4);\
  2454. paddw_r2r(mm3,mm1);\
  2455. psllw_i2r(1,mm2);\
  2456. paddw_r2r(mm4,mm0);\
  2457. psllw_i2r(2,mm1);\
  2458. paddw_r2r(mm6,mm2);\
  2459. paddw_r2r(mm2,mm1);\
  2460. psubusw_r2r(mm0,mm1);\
  2461. psrlw_i2r(3,mm1);\
  2462. packuswb_r2r(mm7,mm1);\
  2463. movd_r2m(mm1,lum_m2[0]);
  2464. #define DEINT_LINE_LUM \
  2465. movd_m2r(lum_m4[0],mm0);\
  2466. movd_m2r(lum_m3[0],mm1);\
  2467. movd_m2r(lum_m2[0],mm2);\
  2468. movd_m2r(lum_m1[0],mm3);\
  2469. movd_m2r(lum[0],mm4);\
  2470. punpcklbw_r2r(mm7,mm0);\
  2471. punpcklbw_r2r(mm7,mm1);\
  2472. punpcklbw_r2r(mm7,mm2);\
  2473. punpcklbw_r2r(mm7,mm3);\
  2474. punpcklbw_r2r(mm7,mm4);\
  2475. paddw_r2r(mm3,mm1);\
  2476. psllw_i2r(1,mm2);\
  2477. paddw_r2r(mm4,mm0);\
  2478. psllw_i2r(2,mm1);\
  2479. paddw_r2r(mm6,mm2);\
  2480. paddw_r2r(mm2,mm1);\
  2481. psubusw_r2r(mm0,mm1);\
  2482. psrlw_i2r(3,mm1);\
  2483. packuswb_r2r(mm7,mm1);\
  2484. movd_r2m(mm1,dst[0]);
  2485. #endif
  2486. /* filter parameters: [-1 4 2 4 -1] // 8 */
  2487. static void deinterlace_line(uint8_t *dst,
  2488. const uint8_t *lum_m4, const uint8_t *lum_m3,
  2489. const uint8_t *lum_m2, const uint8_t *lum_m1,
  2490. const uint8_t *lum,
  2491. int size)
  2492. {
  2493. #ifndef HAVE_MMX
  2494. uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
  2495. int sum;
  2496. for(;size > 0;size--) {
  2497. sum = -lum_m4[0];
  2498. sum += lum_m3[0] << 2;
  2499. sum += lum_m2[0] << 1;
  2500. sum += lum_m1[0] << 2;
  2501. sum += -lum[0];
  2502. dst[0] = cm[(sum + 4) >> 3];
  2503. lum_m4++;
  2504. lum_m3++;
  2505. lum_m2++;
  2506. lum_m1++;
  2507. lum++;
  2508. dst++;
  2509. }
  2510. #else
  2511. {
  2512. mmx_t rounder;
  2513. rounder.uw[0]=4;
  2514. rounder.uw[1]=4;
  2515. rounder.uw[2]=4;
  2516. rounder.uw[3]=4;
  2517. pxor_r2r(mm7,mm7);
  2518. movq_m2r(rounder,mm6);
  2519. }
  2520. for (;size > 3; size-=4) {
  2521. DEINT_LINE_LUM
  2522. lum_m4+=4;
  2523. lum_m3+=4;
  2524. lum_m2+=4;
  2525. lum_m1+=4;
  2526. lum+=4;
  2527. dst+=4;
  2528. }
  2529. #endif
  2530. }
  2531. static void deinterlace_line_inplace(uint8_t *lum_m4, uint8_t *lum_m3, uint8_t *lum_m2, uint8_t *lum_m1, uint8_t *lum,
  2532. int size)
  2533. {
  2534. #ifndef HAVE_MMX
  2535. uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
  2536. int sum;
  2537. for(;size > 0;size--) {
  2538. sum = -lum_m4[0];
  2539. sum += lum_m3[0] << 2;
  2540. sum += lum_m2[0] << 1;
  2541. lum_m4[0]=lum_m2[0];
  2542. sum += lum_m1[0] << 2;
  2543. sum += -lum[0];
  2544. lum_m2[0] = cm[(sum + 4) >> 3];
  2545. lum_m4++;
  2546. lum_m3++;
  2547. lum_m2++;
  2548. lum_m1++;
  2549. lum++;
  2550. }
  2551. #else
  2552. {
  2553. mmx_t rounder;
  2554. rounder.uw[0]=4;
  2555. rounder.uw[1]=4;
  2556. rounder.uw[2]=4;
  2557. rounder.uw[3]=4;
  2558. pxor_r2r(mm7,mm7);
  2559. movq_m2r(rounder,mm6);
  2560. }
  2561. for (;size > 3; size-=4) {
  2562. DEINT_INPLACE_LINE_LUM
  2563. lum_m4+=4;
  2564. lum_m3+=4;
  2565. lum_m2+=4;
  2566. lum_m1+=4;
  2567. lum+=4;
  2568. }
  2569. #endif
  2570. }
  2571. /* deinterlacing : 2 temporal taps, 3 spatial taps linear filter. The
  2572. top field is copied as is, but the bottom field is deinterlaced
  2573. against the top field. */
  2574. static void deinterlace_bottom_field(uint8_t *dst, int dst_wrap,
  2575. const uint8_t *src1, int src_wrap,
  2576. int width, int height)
  2577. {
  2578. const uint8_t *src_m2, *src_m1, *src_0, *src_p1, *src_p2;
  2579. int y;
  2580. src_m2 = src1;
  2581. src_m1 = src1;
  2582. src_0=&src_m1[src_wrap];
  2583. src_p1=&src_0[src_wrap];
  2584. src_p2=&src_p1[src_wrap];
  2585. for(y=0;y<(height-2);y+=2) {
  2586. memcpy(dst,src_m1,width);
  2587. dst += dst_wrap;
  2588. deinterlace_line(dst,src_m2,src_m1,src_0,src_p1,src_p2,width);
  2589. src_m2 = src_0;
  2590. src_m1 = src_p1;
  2591. src_0 = src_p2;
  2592. src_p1 += 2*src_wrap;
  2593. src_p2 += 2*src_wrap;
  2594. dst += dst_wrap;
  2595. }
  2596. memcpy(dst,src_m1,width);
  2597. dst += dst_wrap;
  2598. /* do last line */
  2599. deinterlace_line(dst,src_m2,src_m1,src_0,src_0,src_0,width);
  2600. }
  2601. static void deinterlace_bottom_field_inplace(uint8_t *src1, int src_wrap,
  2602. int width, int height)
  2603. {
  2604. uint8_t *src_m1, *src_0, *src_p1, *src_p2;
  2605. int y;
  2606. uint8_t *buf;
  2607. buf = (uint8_t*)av_malloc(width);
  2608. src_m1 = src1;
  2609. memcpy(buf,src_m1,width);
  2610. src_0=&src_m1[src_wrap];
  2611. src_p1=&src_0[src_wrap];
  2612. src_p2=&src_p1[src_wrap];
  2613. for(y=0;y<(height-2);y+=2) {
  2614. deinterlace_line_inplace(buf,src_m1,src_0,src_p1,src_p2,width);
  2615. src_m1 = src_p1;
  2616. src_0 = src_p2;
  2617. src_p1 += 2*src_wrap;
  2618. src_p2 += 2*src_wrap;
  2619. }
  2620. /* do last line */
  2621. deinterlace_line_inplace(buf,src_m1,src_0,src_0,src_0,width);
  2622. av_free(buf);
  2623. }
  2624. int avpicture_deinterlace(AVPicture *dst, const AVPicture *src,
  2625. int pix_fmt, int width, int height)
  2626. {
  2627. int i;
  2628. if (pix_fmt != PIX_FMT_YUV420P &&
  2629. pix_fmt != PIX_FMT_YUV422P &&
  2630. pix_fmt != PIX_FMT_YUV444P &&
  2631. pix_fmt != PIX_FMT_YUV411P &&
  2632. pix_fmt != PIX_FMT_GRAY8)
  2633. return -1;
  2634. if ((width & 3) != 0 || (height & 3) != 0)
  2635. return -1;
  2636. for(i=0;i<3;i++) {
  2637. if (i == 1) {
  2638. switch(pix_fmt) {
  2639. case PIX_FMT_YUV420P:
  2640. width >>= 1;
  2641. height >>= 1;
  2642. break;
  2643. case PIX_FMT_YUV422P:
  2644. width >>= 1;
  2645. break;
  2646. case PIX_FMT_YUV411P:
  2647. width >>= 2;
  2648. break;
  2649. default:
  2650. break;
  2651. }
  2652. if (pix_fmt == PIX_FMT_GRAY8) {
  2653. break;
  2654. }
  2655. }
  2656. if (src == dst) {
  2657. deinterlace_bottom_field_inplace(dst->data[i], dst->linesize[i],
  2658. width, height);
  2659. } else {
  2660. deinterlace_bottom_field(dst->data[i],dst->linesize[i],
  2661. src->data[i], src->linesize[i],
  2662. width, height);
  2663. }
  2664. }
  2665. emms_c();
  2666. return 0;
  2667. }