You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

2734 lines
73KB

  1. /*
  2. * Misc image convertion routines
  3. * Copyright (c) 2001, 2002, 2003 Fabrice Bellard.
  4. *
  5. * This library is free software; you can redistribute it and/or
  6. * modify it under the terms of the GNU Lesser General Public
  7. * License as published by the Free Software Foundation; either
  8. * version 2 of the License, or (at your option) any later version.
  9. *
  10. * This library is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  13. * Lesser General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU Lesser General Public
  16. * License along with this library; if not, write to the Free Software
  17. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  18. */
  19. /**
  20. * @file imgconvert.c
  21. * Misc image convertion routines.
  22. */
  23. /* TODO:
  24. * - write 'ffimg' program to test all the image related stuff
  25. * - move all api to slice based system
  26. * - integrate deinterlacing, postprocessing and scaling in the conversion process
  27. */
  28. #include "avcodec.h"
  29. #include "dsputil.h"
  30. #ifdef USE_FASTMEMCPY
  31. #include "libvo/fastmemcpy.h"
  32. #endif
  33. #ifdef HAVE_MMX
  34. #include "i386/mmx.h"
  35. #endif
  36. #define xglue(x, y) x ## y
  37. #define glue(x, y) xglue(x, y)
  38. #define FF_COLOR_RGB 0 /* RGB color space */
  39. #define FF_COLOR_GRAY 1 /* gray color space */
  40. #define FF_COLOR_YUV 2 /* YUV color space. 16 <= Y <= 235, 16 <= U, V <= 240 */
  41. #define FF_COLOR_YUV_JPEG 3 /* YUV color space. 0 <= Y <= 255, 0 <= U, V <= 255 */
  42. #define FF_PIXEL_PLANAR 0 /* each channel has one component in AVPicture */
  43. #define FF_PIXEL_PACKED 1 /* only one components containing all the channels */
  44. #define FF_PIXEL_PALETTE 2 /* one components containing indexes for a palette */
  45. typedef struct PixFmtInfo {
  46. const char *name;
  47. uint8_t nb_channels; /* number of channels (including alpha) */
  48. uint8_t color_type; /* color type (see FF_COLOR_xxx constants) */
  49. uint8_t pixel_type; /* pixel storage type (see FF_PIXEL_xxx constants) */
  50. uint8_t is_alpha : 1; /* true if alpha can be specified */
  51. uint8_t x_chroma_shift; /* X chroma subsampling factor is 2 ^ shift */
  52. uint8_t y_chroma_shift; /* Y chroma subsampling factor is 2 ^ shift */
  53. uint8_t depth; /* bit depth of the color components */
  54. } PixFmtInfo;
  55. /* this table gives more information about formats */
  56. static const PixFmtInfo pix_fmt_info[PIX_FMT_NB] = {
  57. /* YUV formats */
  58. [PIX_FMT_YUV420P] = {
  59. .name = "yuv420p",
  60. .nb_channels = 3,
  61. .color_type = FF_COLOR_YUV,
  62. .pixel_type = FF_PIXEL_PLANAR,
  63. .depth = 8,
  64. .x_chroma_shift = 1, .y_chroma_shift = 1,
  65. },
  66. [PIX_FMT_YUV422P] = {
  67. .name = "yuv422p",
  68. .nb_channels = 3,
  69. .color_type = FF_COLOR_YUV,
  70. .pixel_type = FF_PIXEL_PLANAR,
  71. .depth = 8,
  72. .x_chroma_shift = 1, .y_chroma_shift = 0,
  73. },
  74. [PIX_FMT_YUV444P] = {
  75. .name = "yuv444p",
  76. .nb_channels = 3,
  77. .color_type = FF_COLOR_YUV,
  78. .pixel_type = FF_PIXEL_PLANAR,
  79. .depth = 8,
  80. .x_chroma_shift = 0, .y_chroma_shift = 0,
  81. },
  82. [PIX_FMT_YUV422] = {
  83. .name = "yuv422",
  84. .nb_channels = 1,
  85. .color_type = FF_COLOR_YUV,
  86. .pixel_type = FF_PIXEL_PACKED,
  87. .depth = 8,
  88. .x_chroma_shift = 1, .y_chroma_shift = 0,
  89. },
  90. [PIX_FMT_UYVY422] = {
  91. .name = "uyvy422",
  92. .nb_channels = 1,
  93. .color_type = FF_COLOR_YUV,
  94. .pixel_type = FF_PIXEL_PACKED,
  95. .depth = 8,
  96. .x_chroma_shift = 1, .y_chroma_shift = 0,
  97. },
  98. [PIX_FMT_YUV410P] = {
  99. .name = "yuv410p",
  100. .nb_channels = 3,
  101. .color_type = FF_COLOR_YUV,
  102. .pixel_type = FF_PIXEL_PLANAR,
  103. .depth = 8,
  104. .x_chroma_shift = 2, .y_chroma_shift = 2,
  105. },
  106. [PIX_FMT_YUV411P] = {
  107. .name = "yuv411p",
  108. .nb_channels = 3,
  109. .color_type = FF_COLOR_YUV,
  110. .pixel_type = FF_PIXEL_PLANAR,
  111. .depth = 8,
  112. .x_chroma_shift = 2, .y_chroma_shift = 0,
  113. },
  114. /* JPEG YUV */
  115. [PIX_FMT_YUVJ420P] = {
  116. .name = "yuvj420p",
  117. .nb_channels = 3,
  118. .color_type = FF_COLOR_YUV_JPEG,
  119. .pixel_type = FF_PIXEL_PLANAR,
  120. .depth = 8,
  121. .x_chroma_shift = 1, .y_chroma_shift = 1,
  122. },
  123. [PIX_FMT_YUVJ422P] = {
  124. .name = "yuvj422p",
  125. .nb_channels = 3,
  126. .color_type = FF_COLOR_YUV_JPEG,
  127. .pixel_type = FF_PIXEL_PLANAR,
  128. .depth = 8,
  129. .x_chroma_shift = 1, .y_chroma_shift = 0,
  130. },
  131. [PIX_FMT_YUVJ444P] = {
  132. .name = "yuvj444p",
  133. .nb_channels = 3,
  134. .color_type = FF_COLOR_YUV_JPEG,
  135. .pixel_type = FF_PIXEL_PLANAR,
  136. .depth = 8,
  137. .x_chroma_shift = 0, .y_chroma_shift = 0,
  138. },
  139. /* RGB formats */
  140. [PIX_FMT_RGB24] = {
  141. .name = "rgb24",
  142. .nb_channels = 3,
  143. .color_type = FF_COLOR_RGB,
  144. .pixel_type = FF_PIXEL_PACKED,
  145. .depth = 8,
  146. .x_chroma_shift = 0, .y_chroma_shift = 0,
  147. },
  148. [PIX_FMT_BGR24] = {
  149. .name = "bgr24",
  150. .nb_channels = 3,
  151. .color_type = FF_COLOR_RGB,
  152. .pixel_type = FF_PIXEL_PACKED,
  153. .depth = 8,
  154. .x_chroma_shift = 0, .y_chroma_shift = 0,
  155. },
  156. [PIX_FMT_RGBA32] = {
  157. .name = "rgba32",
  158. .nb_channels = 4, .is_alpha = 1,
  159. .color_type = FF_COLOR_RGB,
  160. .pixel_type = FF_PIXEL_PACKED,
  161. .depth = 8,
  162. .x_chroma_shift = 0, .y_chroma_shift = 0,
  163. },
  164. [PIX_FMT_RGB565] = {
  165. .name = "rgb565",
  166. .nb_channels = 3,
  167. .color_type = FF_COLOR_RGB,
  168. .pixel_type = FF_PIXEL_PACKED,
  169. .depth = 5,
  170. .x_chroma_shift = 0, .y_chroma_shift = 0,
  171. },
  172. [PIX_FMT_RGB555] = {
  173. .name = "rgb555",
  174. .nb_channels = 4, .is_alpha = 1,
  175. .color_type = FF_COLOR_RGB,
  176. .pixel_type = FF_PIXEL_PACKED,
  177. .depth = 5,
  178. .x_chroma_shift = 0, .y_chroma_shift = 0,
  179. },
  180. /* gray / mono formats */
  181. [PIX_FMT_GRAY8] = {
  182. .name = "gray",
  183. .nb_channels = 1,
  184. .color_type = FF_COLOR_GRAY,
  185. .pixel_type = FF_PIXEL_PLANAR,
  186. .depth = 8,
  187. },
  188. [PIX_FMT_MONOWHITE] = {
  189. .name = "monow",
  190. .nb_channels = 1,
  191. .color_type = FF_COLOR_GRAY,
  192. .pixel_type = FF_PIXEL_PLANAR,
  193. .depth = 1,
  194. },
  195. [PIX_FMT_MONOBLACK] = {
  196. .name = "monob",
  197. .nb_channels = 1,
  198. .color_type = FF_COLOR_GRAY,
  199. .pixel_type = FF_PIXEL_PLANAR,
  200. .depth = 1,
  201. },
  202. /* paletted formats */
  203. [PIX_FMT_PAL8] = {
  204. .name = "pal8",
  205. .nb_channels = 4, .is_alpha = 1,
  206. .color_type = FF_COLOR_RGB,
  207. .pixel_type = FF_PIXEL_PALETTE,
  208. .depth = 8,
  209. },
  210. [PIX_FMT_XVMC_MPEG2_MC] = {
  211. .name = "xvmcmc",
  212. },
  213. [PIX_FMT_XVMC_MPEG2_IDCT] = {
  214. .name = "xvmcidct",
  215. },
  216. [PIX_FMT_UYVY411] = {
  217. .name = "uyvy411",
  218. .nb_channels = 1,
  219. .color_type = FF_COLOR_YUV,
  220. .pixel_type = FF_PIXEL_PACKED,
  221. .depth = 8,
  222. .x_chroma_shift = 2, .y_chroma_shift = 0,
  223. },
  224. [PIX_FMT_BGR32] = {
  225. .name = "bgr32",
  226. .nb_channels = 4, .is_alpha = 1,
  227. .color_type = FF_COLOR_RGB,
  228. .pixel_type = FF_PIXEL_PACKED,
  229. .depth = 8,
  230. .x_chroma_shift = 0, .y_chroma_shift = 0,
  231. },
  232. [PIX_FMT_BGR565] = {
  233. .name = "bgr565",
  234. .nb_channels = 3,
  235. .color_type = FF_COLOR_RGB,
  236. .pixel_type = FF_PIXEL_PACKED,
  237. .depth = 5,
  238. .x_chroma_shift = 0, .y_chroma_shift = 0,
  239. },
  240. [PIX_FMT_BGR555] = {
  241. .name = "bgr555",
  242. .nb_channels = 4, .is_alpha = 1,
  243. .color_type = FF_COLOR_RGB,
  244. .pixel_type = FF_PIXEL_PACKED,
  245. .depth = 5,
  246. .x_chroma_shift = 0, .y_chroma_shift = 0,
  247. },
  248. [PIX_FMT_RGB8] = {
  249. .name = "rgb8",
  250. .nb_channels = 1,
  251. .color_type = FF_COLOR_RGB,
  252. .pixel_type = FF_PIXEL_PACKED,
  253. .depth = 8,
  254. .x_chroma_shift = 0, .y_chroma_shift = 0,
  255. },
  256. [PIX_FMT_RGB4] = {
  257. .name = "rgb4",
  258. .nb_channels = 1,
  259. .color_type = FF_COLOR_RGB,
  260. .pixel_type = FF_PIXEL_PACKED,
  261. .depth = 4,
  262. .x_chroma_shift = 0, .y_chroma_shift = 0,
  263. },
  264. [PIX_FMT_RGB4_BYTE] = {
  265. .name = "rgb4_byte",
  266. .nb_channels = 1,
  267. .color_type = FF_COLOR_RGB,
  268. .pixel_type = FF_PIXEL_PACKED,
  269. .depth = 8,
  270. .x_chroma_shift = 0, .y_chroma_shift = 0,
  271. },
  272. [PIX_FMT_BGR8] = {
  273. .name = "bgr8",
  274. .nb_channels = 1,
  275. .color_type = FF_COLOR_RGB,
  276. .pixel_type = FF_PIXEL_PACKED,
  277. .depth = 8,
  278. .x_chroma_shift = 0, .y_chroma_shift = 0,
  279. },
  280. [PIX_FMT_BGR4] = {
  281. .name = "bgr4",
  282. .nb_channels = 1,
  283. .color_type = FF_COLOR_RGB,
  284. .pixel_type = FF_PIXEL_PACKED,
  285. .depth = 4,
  286. .x_chroma_shift = 0, .y_chroma_shift = 0,
  287. },
  288. [PIX_FMT_BGR4_BYTE] = {
  289. .name = "bgr4_byte",
  290. .nb_channels = 1,
  291. .color_type = FF_COLOR_RGB,
  292. .pixel_type = FF_PIXEL_PACKED,
  293. .depth = 8,
  294. .x_chroma_shift = 0, .y_chroma_shift = 0,
  295. },
  296. [PIX_FMT_NV12] = {
  297. .name = "nv12",
  298. .nb_channels = 2,
  299. .color_type = FF_COLOR_YUV,
  300. .pixel_type = FF_PIXEL_PLANAR,
  301. .depth = 8,
  302. .x_chroma_shift = 1, .y_chroma_shift = 1,
  303. },
  304. [PIX_FMT_NV21] = {
  305. .name = "nv12",
  306. .nb_channels = 2,
  307. .color_type = FF_COLOR_YUV,
  308. .pixel_type = FF_PIXEL_PLANAR,
  309. .depth = 8,
  310. .x_chroma_shift = 1, .y_chroma_shift = 1,
  311. },
  312. [PIX_FMT_BGR32_1] = {
  313. .name = "bgr32_1",
  314. .nb_channels = 4, .is_alpha = 1,
  315. .color_type = FF_COLOR_RGB,
  316. .pixel_type = FF_PIXEL_PACKED,
  317. .depth = 8,
  318. .x_chroma_shift = 0, .y_chroma_shift = 0,
  319. },
  320. [PIX_FMT_RGB32_1] = {
  321. .name = "rgb32_1",
  322. .nb_channels = 4, .is_alpha = 1,
  323. .color_type = FF_COLOR_RGB,
  324. .pixel_type = FF_PIXEL_PACKED,
  325. .depth = 8,
  326. .x_chroma_shift = 0, .y_chroma_shift = 0,
  327. },
  328. };
  329. void avcodec_get_chroma_sub_sample(int pix_fmt, int *h_shift, int *v_shift)
  330. {
  331. *h_shift = pix_fmt_info[pix_fmt].x_chroma_shift;
  332. *v_shift = pix_fmt_info[pix_fmt].y_chroma_shift;
  333. }
  334. const char *avcodec_get_pix_fmt_name(int pix_fmt)
  335. {
  336. if (pix_fmt < 0 || pix_fmt >= PIX_FMT_NB)
  337. return "???";
  338. else
  339. return pix_fmt_info[pix_fmt].name;
  340. }
  341. enum PixelFormat avcodec_get_pix_fmt(const char* name)
  342. {
  343. int i;
  344. for (i=0; i < PIX_FMT_NB; i++)
  345. if (!strcmp(pix_fmt_info[i].name, name))
  346. break;
  347. return i;
  348. }
  349. /* Picture field are filled with 'ptr' addresses. Also return size */
  350. int avpicture_fill(AVPicture *picture, uint8_t *ptr,
  351. int pix_fmt, int width, int height)
  352. {
  353. int size, w2, h2, size2;
  354. const PixFmtInfo *pinfo;
  355. if(avcodec_check_dimensions(NULL, width, height))
  356. goto fail;
  357. pinfo = &pix_fmt_info[pix_fmt];
  358. size = width * height;
  359. switch(pix_fmt) {
  360. case PIX_FMT_YUV420P:
  361. case PIX_FMT_YUV422P:
  362. case PIX_FMT_YUV444P:
  363. case PIX_FMT_YUV410P:
  364. case PIX_FMT_YUV411P:
  365. case PIX_FMT_YUVJ420P:
  366. case PIX_FMT_YUVJ422P:
  367. case PIX_FMT_YUVJ444P:
  368. w2 = (width + (1 << pinfo->x_chroma_shift) - 1) >> pinfo->x_chroma_shift;
  369. h2 = (height + (1 << pinfo->y_chroma_shift) - 1) >> pinfo->y_chroma_shift;
  370. size2 = w2 * h2;
  371. picture->data[0] = ptr;
  372. picture->data[1] = picture->data[0] + size;
  373. picture->data[2] = picture->data[1] + size2;
  374. picture->linesize[0] = width;
  375. picture->linesize[1] = w2;
  376. picture->linesize[2] = w2;
  377. return size + 2 * size2;
  378. case PIX_FMT_NV12:
  379. case PIX_FMT_NV21:
  380. w2 = (width + (1 << pinfo->x_chroma_shift) - 1) >> pinfo->x_chroma_shift;
  381. h2 = (height + (1 << pinfo->y_chroma_shift) - 1) >> pinfo->y_chroma_shift;
  382. size2 = w2 * h2 * 2;
  383. picture->data[0] = ptr;
  384. picture->data[1] = picture->data[0] + size;
  385. picture->data[2] = NULL;
  386. picture->linesize[0] = width;
  387. picture->linesize[1] = w2;
  388. picture->linesize[2] = 0;
  389. return size + 2 * size2;
  390. case PIX_FMT_RGB24:
  391. case PIX_FMT_BGR24:
  392. picture->data[0] = ptr;
  393. picture->data[1] = NULL;
  394. picture->data[2] = NULL;
  395. picture->linesize[0] = width * 3;
  396. return size * 3;
  397. case PIX_FMT_RGBA32:
  398. case PIX_FMT_BGR32:
  399. case PIX_FMT_RGB32_1:
  400. case PIX_FMT_BGR32_1:
  401. picture->data[0] = ptr;
  402. picture->data[1] = NULL;
  403. picture->data[2] = NULL;
  404. picture->linesize[0] = width * 4;
  405. return size * 4;
  406. case PIX_FMT_BGR555:
  407. case PIX_FMT_BGR565:
  408. case PIX_FMT_RGB555:
  409. case PIX_FMT_RGB565:
  410. case PIX_FMT_YUV422:
  411. picture->data[0] = ptr;
  412. picture->data[1] = NULL;
  413. picture->data[2] = NULL;
  414. picture->linesize[0] = width * 2;
  415. return size * 2;
  416. case PIX_FMT_UYVY422:
  417. picture->data[0] = ptr;
  418. picture->data[1] = NULL;
  419. picture->data[2] = NULL;
  420. picture->linesize[0] = width * 2;
  421. return size * 2;
  422. case PIX_FMT_UYVY411:
  423. picture->data[0] = ptr;
  424. picture->data[1] = NULL;
  425. picture->data[2] = NULL;
  426. picture->linesize[0] = width + width/2;
  427. return size + size/2;
  428. case PIX_FMT_RGB8:
  429. case PIX_FMT_BGR8:
  430. case PIX_FMT_RGB4_BYTE:
  431. case PIX_FMT_BGR4_BYTE:
  432. case PIX_FMT_GRAY8:
  433. picture->data[0] = ptr;
  434. picture->data[1] = NULL;
  435. picture->data[2] = NULL;
  436. picture->linesize[0] = width;
  437. return size;
  438. case PIX_FMT_RGB4:
  439. case PIX_FMT_BGR4:
  440. picture->data[0] = ptr;
  441. picture->data[1] = NULL;
  442. picture->data[2] = NULL;
  443. picture->linesize[0] = width / 2;
  444. return size / 2;
  445. case PIX_FMT_MONOWHITE:
  446. case PIX_FMT_MONOBLACK:
  447. picture->data[0] = ptr;
  448. picture->data[1] = NULL;
  449. picture->data[2] = NULL;
  450. picture->linesize[0] = (width + 7) >> 3;
  451. return picture->linesize[0] * height;
  452. case PIX_FMT_PAL8:
  453. size2 = (size + 3) & ~3;
  454. picture->data[0] = ptr;
  455. picture->data[1] = ptr + size2; /* palette is stored here as 256 32 bit words */
  456. picture->data[2] = NULL;
  457. picture->linesize[0] = width;
  458. picture->linesize[1] = 4;
  459. return size2 + 256 * 4;
  460. default:
  461. fail:
  462. picture->data[0] = NULL;
  463. picture->data[1] = NULL;
  464. picture->data[2] = NULL;
  465. picture->data[3] = NULL;
  466. return -1;
  467. }
  468. }
  469. int avpicture_layout(const AVPicture* src, int pix_fmt, int width, int height,
  470. unsigned char *dest, int dest_size)
  471. {
  472. const PixFmtInfo* pf = &pix_fmt_info[pix_fmt];
  473. int i, j, w, h, data_planes;
  474. const unsigned char* s;
  475. int size = avpicture_get_size(pix_fmt, width, height);
  476. if (size > dest_size || size < 0)
  477. return -1;
  478. if (pf->pixel_type == FF_PIXEL_PACKED || pf->pixel_type == FF_PIXEL_PALETTE) {
  479. if (pix_fmt == PIX_FMT_YUV422 ||
  480. pix_fmt == PIX_FMT_UYVY422 ||
  481. pix_fmt == PIX_FMT_BGR565 ||
  482. pix_fmt == PIX_FMT_BGR565 ||
  483. pix_fmt == PIX_FMT_RGB565 ||
  484. pix_fmt == PIX_FMT_RGB555)
  485. w = width * 2;
  486. else if (pix_fmt == PIX_FMT_UYVY411)
  487. w = width + width/2;
  488. else if (pix_fmt == PIX_FMT_PAL8)
  489. w = width;
  490. else
  491. w = width * (pf->depth * pf->nb_channels / 8);
  492. data_planes = 1;
  493. h = height;
  494. } else {
  495. data_planes = pf->nb_channels;
  496. w = (width*pf->depth + 7)/8;
  497. h = height;
  498. }
  499. for (i=0; i<data_planes; i++) {
  500. if (i == 1) {
  501. w = width >> pf->x_chroma_shift;
  502. h = height >> pf->y_chroma_shift;
  503. }
  504. s = src->data[i];
  505. for(j=0; j<h; j++) {
  506. memcpy(dest, s, w);
  507. dest += w;
  508. s += src->linesize[i];
  509. }
  510. }
  511. if (pf->pixel_type == FF_PIXEL_PALETTE)
  512. memcpy((unsigned char *)(((size_t)dest + 3) & ~3), src->data[1], 256 * 4);
  513. return size;
  514. }
  515. int avpicture_get_size(int pix_fmt, int width, int height)
  516. {
  517. AVPicture dummy_pict;
  518. return avpicture_fill(&dummy_pict, NULL, pix_fmt, width, height);
  519. }
  520. /**
  521. * compute the loss when converting from a pixel format to another
  522. */
  523. int avcodec_get_pix_fmt_loss(int dst_pix_fmt, int src_pix_fmt,
  524. int has_alpha)
  525. {
  526. const PixFmtInfo *pf, *ps;
  527. int loss;
  528. ps = &pix_fmt_info[src_pix_fmt];
  529. pf = &pix_fmt_info[dst_pix_fmt];
  530. /* compute loss */
  531. loss = 0;
  532. pf = &pix_fmt_info[dst_pix_fmt];
  533. if (pf->depth < ps->depth ||
  534. (dst_pix_fmt == PIX_FMT_RGB555 && src_pix_fmt == PIX_FMT_RGB565))
  535. loss |= FF_LOSS_DEPTH;
  536. if (pf->x_chroma_shift > ps->x_chroma_shift ||
  537. pf->y_chroma_shift > ps->y_chroma_shift)
  538. loss |= FF_LOSS_RESOLUTION;
  539. switch(pf->color_type) {
  540. case FF_COLOR_RGB:
  541. if (ps->color_type != FF_COLOR_RGB &&
  542. ps->color_type != FF_COLOR_GRAY)
  543. loss |= FF_LOSS_COLORSPACE;
  544. break;
  545. case FF_COLOR_GRAY:
  546. if (ps->color_type != FF_COLOR_GRAY)
  547. loss |= FF_LOSS_COLORSPACE;
  548. break;
  549. case FF_COLOR_YUV:
  550. if (ps->color_type != FF_COLOR_YUV)
  551. loss |= FF_LOSS_COLORSPACE;
  552. break;
  553. case FF_COLOR_YUV_JPEG:
  554. if (ps->color_type != FF_COLOR_YUV_JPEG &&
  555. ps->color_type != FF_COLOR_YUV &&
  556. ps->color_type != FF_COLOR_GRAY)
  557. loss |= FF_LOSS_COLORSPACE;
  558. break;
  559. default:
  560. /* fail safe test */
  561. if (ps->color_type != pf->color_type)
  562. loss |= FF_LOSS_COLORSPACE;
  563. break;
  564. }
  565. if (pf->color_type == FF_COLOR_GRAY &&
  566. ps->color_type != FF_COLOR_GRAY)
  567. loss |= FF_LOSS_CHROMA;
  568. if (!pf->is_alpha && (ps->is_alpha && has_alpha))
  569. loss |= FF_LOSS_ALPHA;
  570. if (pf->pixel_type == FF_PIXEL_PALETTE &&
  571. (ps->pixel_type != FF_PIXEL_PALETTE && ps->color_type != FF_COLOR_GRAY))
  572. loss |= FF_LOSS_COLORQUANT;
  573. return loss;
  574. }
  575. static int avg_bits_per_pixel(int pix_fmt)
  576. {
  577. int bits;
  578. const PixFmtInfo *pf;
  579. pf = &pix_fmt_info[pix_fmt];
  580. switch(pf->pixel_type) {
  581. case FF_PIXEL_PACKED:
  582. switch(pix_fmt) {
  583. case PIX_FMT_YUV422:
  584. case PIX_FMT_UYVY422:
  585. case PIX_FMT_RGB565:
  586. case PIX_FMT_RGB555:
  587. case PIX_FMT_BGR565:
  588. case PIX_FMT_BGR555:
  589. bits = 16;
  590. break;
  591. case PIX_FMT_UYVY411:
  592. bits = 12;
  593. break;
  594. default:
  595. bits = pf->depth * pf->nb_channels;
  596. break;
  597. }
  598. break;
  599. case FF_PIXEL_PLANAR:
  600. if (pf->x_chroma_shift == 0 && pf->y_chroma_shift == 0) {
  601. bits = pf->depth * pf->nb_channels;
  602. } else {
  603. bits = pf->depth + ((2 * pf->depth) >>
  604. (pf->x_chroma_shift + pf->y_chroma_shift));
  605. }
  606. break;
  607. case FF_PIXEL_PALETTE:
  608. bits = 8;
  609. break;
  610. default:
  611. bits = -1;
  612. break;
  613. }
  614. return bits;
  615. }
  616. static int avcodec_find_best_pix_fmt1(int pix_fmt_mask,
  617. int src_pix_fmt,
  618. int has_alpha,
  619. int loss_mask)
  620. {
  621. int dist, i, loss, min_dist, dst_pix_fmt;
  622. /* find exact color match with smallest size */
  623. dst_pix_fmt = -1;
  624. min_dist = 0x7fffffff;
  625. for(i = 0;i < PIX_FMT_NB; i++) {
  626. if (pix_fmt_mask & (1 << i)) {
  627. loss = avcodec_get_pix_fmt_loss(i, src_pix_fmt, has_alpha) & loss_mask;
  628. if (loss == 0) {
  629. dist = avg_bits_per_pixel(i);
  630. if (dist < min_dist) {
  631. min_dist = dist;
  632. dst_pix_fmt = i;
  633. }
  634. }
  635. }
  636. }
  637. return dst_pix_fmt;
  638. }
  639. /**
  640. * find best pixel format to convert to. Return -1 if none found
  641. */
  642. int avcodec_find_best_pix_fmt(int pix_fmt_mask, int src_pix_fmt,
  643. int has_alpha, int *loss_ptr)
  644. {
  645. int dst_pix_fmt, loss_mask, i;
  646. static const int loss_mask_order[] = {
  647. ~0, /* no loss first */
  648. ~FF_LOSS_ALPHA,
  649. ~FF_LOSS_RESOLUTION,
  650. ~(FF_LOSS_COLORSPACE | FF_LOSS_RESOLUTION),
  651. ~FF_LOSS_COLORQUANT,
  652. ~FF_LOSS_DEPTH,
  653. 0,
  654. };
  655. /* try with successive loss */
  656. i = 0;
  657. for(;;) {
  658. loss_mask = loss_mask_order[i++];
  659. dst_pix_fmt = avcodec_find_best_pix_fmt1(pix_fmt_mask, src_pix_fmt,
  660. has_alpha, loss_mask);
  661. if (dst_pix_fmt >= 0)
  662. goto found;
  663. if (loss_mask == 0)
  664. break;
  665. }
  666. return -1;
  667. found:
  668. if (loss_ptr)
  669. *loss_ptr = avcodec_get_pix_fmt_loss(dst_pix_fmt, src_pix_fmt, has_alpha);
  670. return dst_pix_fmt;
  671. }
  672. void ff_img_copy_plane(uint8_t *dst, int dst_wrap,
  673. const uint8_t *src, int src_wrap,
  674. int width, int height)
  675. {
  676. if((!dst) || (!src))
  677. return;
  678. for(;height > 0; height--) {
  679. memcpy(dst, src, width);
  680. dst += dst_wrap;
  681. src += src_wrap;
  682. }
  683. }
  684. /**
  685. * Copy image 'src' to 'dst'.
  686. */
  687. void img_copy(AVPicture *dst, const AVPicture *src,
  688. int pix_fmt, int width, int height)
  689. {
  690. int bwidth, bits, i;
  691. const PixFmtInfo *pf = &pix_fmt_info[pix_fmt];
  692. pf = &pix_fmt_info[pix_fmt];
  693. switch(pf->pixel_type) {
  694. case FF_PIXEL_PACKED:
  695. switch(pix_fmt) {
  696. case PIX_FMT_YUV422:
  697. case PIX_FMT_UYVY422:
  698. case PIX_FMT_RGB565:
  699. case PIX_FMT_RGB555:
  700. case PIX_FMT_BGR565:
  701. case PIX_FMT_BGR555:
  702. bits = 16;
  703. break;
  704. case PIX_FMT_UYVY411:
  705. bits = 12;
  706. break;
  707. default:
  708. bits = pf->depth * pf->nb_channels;
  709. break;
  710. }
  711. bwidth = (width * bits + 7) >> 3;
  712. ff_img_copy_plane(dst->data[0], dst->linesize[0],
  713. src->data[0], src->linesize[0],
  714. bwidth, height);
  715. break;
  716. case FF_PIXEL_PLANAR:
  717. for(i = 0; i < pf->nb_channels; i++) {
  718. int w, h;
  719. w = width;
  720. h = height;
  721. if (i == 1 || i == 2) {
  722. w >>= pf->x_chroma_shift;
  723. h >>= pf->y_chroma_shift;
  724. }
  725. bwidth = (w * pf->depth + 7) >> 3;
  726. ff_img_copy_plane(dst->data[i], dst->linesize[i],
  727. src->data[i], src->linesize[i],
  728. bwidth, h);
  729. }
  730. break;
  731. case FF_PIXEL_PALETTE:
  732. ff_img_copy_plane(dst->data[0], dst->linesize[0],
  733. src->data[0], src->linesize[0],
  734. width, height);
  735. /* copy the palette */
  736. ff_img_copy_plane(dst->data[1], dst->linesize[1],
  737. src->data[1], src->linesize[1],
  738. 4, 256);
  739. break;
  740. }
  741. }
  742. /* XXX: totally non optimized */
  743. static void yuv422_to_yuv420p(AVPicture *dst, const AVPicture *src,
  744. int width, int height)
  745. {
  746. const uint8_t *p, *p1;
  747. uint8_t *lum, *cr, *cb, *lum1, *cr1, *cb1;
  748. int w;
  749. p1 = src->data[0];
  750. lum1 = dst->data[0];
  751. cb1 = dst->data[1];
  752. cr1 = dst->data[2];
  753. for(;height >= 1; height -= 2) {
  754. p = p1;
  755. lum = lum1;
  756. cb = cb1;
  757. cr = cr1;
  758. for(w = width; w >= 2; w -= 2) {
  759. lum[0] = p[0];
  760. cb[0] = p[1];
  761. lum[1] = p[2];
  762. cr[0] = p[3];
  763. p += 4;
  764. lum += 2;
  765. cb++;
  766. cr++;
  767. }
  768. if (w) {
  769. lum[0] = p[0];
  770. cb[0] = p[1];
  771. cr[0] = p[3];
  772. cb++;
  773. cr++;
  774. }
  775. p1 += src->linesize[0];
  776. lum1 += dst->linesize[0];
  777. if (height>1) {
  778. p = p1;
  779. lum = lum1;
  780. for(w = width; w >= 2; w -= 2) {
  781. lum[0] = p[0];
  782. lum[1] = p[2];
  783. p += 4;
  784. lum += 2;
  785. }
  786. if (w) {
  787. lum[0] = p[0];
  788. }
  789. p1 += src->linesize[0];
  790. lum1 += dst->linesize[0];
  791. }
  792. cb1 += dst->linesize[1];
  793. cr1 += dst->linesize[2];
  794. }
  795. }
  796. static void uyvy422_to_yuv420p(AVPicture *dst, const AVPicture *src,
  797. int width, int height)
  798. {
  799. const uint8_t *p, *p1;
  800. uint8_t *lum, *cr, *cb, *lum1, *cr1, *cb1;
  801. int w;
  802. p1 = src->data[0];
  803. lum1 = dst->data[0];
  804. cb1 = dst->data[1];
  805. cr1 = dst->data[2];
  806. for(;height >= 1; height -= 2) {
  807. p = p1;
  808. lum = lum1;
  809. cb = cb1;
  810. cr = cr1;
  811. for(w = width; w >= 2; w -= 2) {
  812. lum[0] = p[1];
  813. cb[0] = p[0];
  814. lum[1] = p[3];
  815. cr[0] = p[2];
  816. p += 4;
  817. lum += 2;
  818. cb++;
  819. cr++;
  820. }
  821. if (w) {
  822. lum[0] = p[1];
  823. cb[0] = p[0];
  824. cr[0] = p[2];
  825. cb++;
  826. cr++;
  827. }
  828. p1 += src->linesize[0];
  829. lum1 += dst->linesize[0];
  830. if (height>1) {
  831. p = p1;
  832. lum = lum1;
  833. for(w = width; w >= 2; w -= 2) {
  834. lum[0] = p[1];
  835. lum[1] = p[3];
  836. p += 4;
  837. lum += 2;
  838. }
  839. if (w) {
  840. lum[0] = p[1];
  841. }
  842. p1 += src->linesize[0];
  843. lum1 += dst->linesize[0];
  844. }
  845. cb1 += dst->linesize[1];
  846. cr1 += dst->linesize[2];
  847. }
  848. }
  849. static void uyvy422_to_yuv422p(AVPicture *dst, const AVPicture *src,
  850. int width, int height)
  851. {
  852. const uint8_t *p, *p1;
  853. uint8_t *lum, *cr, *cb, *lum1, *cr1, *cb1;
  854. int w;
  855. p1 = src->data[0];
  856. lum1 = dst->data[0];
  857. cb1 = dst->data[1];
  858. cr1 = dst->data[2];
  859. for(;height > 0; height--) {
  860. p = p1;
  861. lum = lum1;
  862. cb = cb1;
  863. cr = cr1;
  864. for(w = width; w >= 2; w -= 2) {
  865. lum[0] = p[1];
  866. cb[0] = p[0];
  867. lum[1] = p[3];
  868. cr[0] = p[2];
  869. p += 4;
  870. lum += 2;
  871. cb++;
  872. cr++;
  873. }
  874. p1 += src->linesize[0];
  875. lum1 += dst->linesize[0];
  876. cb1 += dst->linesize[1];
  877. cr1 += dst->linesize[2];
  878. }
  879. }
  880. static void yuv422_to_yuv422p(AVPicture *dst, const AVPicture *src,
  881. int width, int height)
  882. {
  883. const uint8_t *p, *p1;
  884. uint8_t *lum, *cr, *cb, *lum1, *cr1, *cb1;
  885. int w;
  886. p1 = src->data[0];
  887. lum1 = dst->data[0];
  888. cb1 = dst->data[1];
  889. cr1 = dst->data[2];
  890. for(;height > 0; height--) {
  891. p = p1;
  892. lum = lum1;
  893. cb = cb1;
  894. cr = cr1;
  895. for(w = width; w >= 2; w -= 2) {
  896. lum[0] = p[0];
  897. cb[0] = p[1];
  898. lum[1] = p[2];
  899. cr[0] = p[3];
  900. p += 4;
  901. lum += 2;
  902. cb++;
  903. cr++;
  904. }
  905. p1 += src->linesize[0];
  906. lum1 += dst->linesize[0];
  907. cb1 += dst->linesize[1];
  908. cr1 += dst->linesize[2];
  909. }
  910. }
  911. static void yuv422p_to_yuv422(AVPicture *dst, const AVPicture *src,
  912. int width, int height)
  913. {
  914. uint8_t *p, *p1;
  915. const uint8_t *lum, *cr, *cb, *lum1, *cr1, *cb1;
  916. int w;
  917. p1 = dst->data[0];
  918. lum1 = src->data[0];
  919. cb1 = src->data[1];
  920. cr1 = src->data[2];
  921. for(;height > 0; height--) {
  922. p = p1;
  923. lum = lum1;
  924. cb = cb1;
  925. cr = cr1;
  926. for(w = width; w >= 2; w -= 2) {
  927. p[0] = lum[0];
  928. p[1] = cb[0];
  929. p[2] = lum[1];
  930. p[3] = cr[0];
  931. p += 4;
  932. lum += 2;
  933. cb++;
  934. cr++;
  935. }
  936. p1 += dst->linesize[0];
  937. lum1 += src->linesize[0];
  938. cb1 += src->linesize[1];
  939. cr1 += src->linesize[2];
  940. }
  941. }
  942. static void yuv422p_to_uyvy422(AVPicture *dst, const AVPicture *src,
  943. int width, int height)
  944. {
  945. uint8_t *p, *p1;
  946. const uint8_t *lum, *cr, *cb, *lum1, *cr1, *cb1;
  947. int w;
  948. p1 = dst->data[0];
  949. lum1 = src->data[0];
  950. cb1 = src->data[1];
  951. cr1 = src->data[2];
  952. for(;height > 0; height--) {
  953. p = p1;
  954. lum = lum1;
  955. cb = cb1;
  956. cr = cr1;
  957. for(w = width; w >= 2; w -= 2) {
  958. p[1] = lum[0];
  959. p[0] = cb[0];
  960. p[3] = lum[1];
  961. p[2] = cr[0];
  962. p += 4;
  963. lum += 2;
  964. cb++;
  965. cr++;
  966. }
  967. p1 += dst->linesize[0];
  968. lum1 += src->linesize[0];
  969. cb1 += src->linesize[1];
  970. cr1 += src->linesize[2];
  971. }
  972. }
  973. static void uyvy411_to_yuv411p(AVPicture *dst, const AVPicture *src,
  974. int width, int height)
  975. {
  976. const uint8_t *p, *p1;
  977. uint8_t *lum, *cr, *cb, *lum1, *cr1, *cb1;
  978. int w;
  979. p1 = src->data[0];
  980. lum1 = dst->data[0];
  981. cb1 = dst->data[1];
  982. cr1 = dst->data[2];
  983. for(;height > 0; height--) {
  984. p = p1;
  985. lum = lum1;
  986. cb = cb1;
  987. cr = cr1;
  988. for(w = width; w >= 4; w -= 4) {
  989. cb[0] = p[0];
  990. lum[0] = p[1];
  991. lum[1] = p[2];
  992. cr[0] = p[3];
  993. lum[2] = p[4];
  994. lum[3] = p[5];
  995. p += 6;
  996. lum += 4;
  997. cb++;
  998. cr++;
  999. }
  1000. p1 += src->linesize[0];
  1001. lum1 += dst->linesize[0];
  1002. cb1 += dst->linesize[1];
  1003. cr1 += dst->linesize[2];
  1004. }
  1005. }
  1006. static void yuv420p_to_yuv422(AVPicture *dst, const AVPicture *src,
  1007. int width, int height)
  1008. {
  1009. int w, h;
  1010. uint8_t *line1, *line2, *linesrc = dst->data[0];
  1011. uint8_t *lum1, *lum2, *lumsrc = src->data[0];
  1012. uint8_t *cb1, *cb2 = src->data[1];
  1013. uint8_t *cr1, *cr2 = src->data[2];
  1014. for(h = height / 2; h--;) {
  1015. line1 = linesrc;
  1016. line2 = linesrc + dst->linesize[0];
  1017. lum1 = lumsrc;
  1018. lum2 = lumsrc + src->linesize[0];
  1019. cb1 = cb2;
  1020. cr1 = cr2;
  1021. for(w = width / 2; w--;) {
  1022. *line1++ = *lum1++; *line2++ = *lum2++;
  1023. *line1++ = *line2++ = *cb1++;
  1024. *line1++ = *lum1++; *line2++ = *lum2++;
  1025. *line1++ = *line2++ = *cr1++;
  1026. }
  1027. linesrc += dst->linesize[0] * 2;
  1028. lumsrc += src->linesize[0] * 2;
  1029. cb2 += src->linesize[1];
  1030. cr2 += src->linesize[2];
  1031. }
  1032. }
  1033. static void yuv420p_to_uyvy422(AVPicture *dst, const AVPicture *src,
  1034. int width, int height)
  1035. {
  1036. int w, h;
  1037. uint8_t *line1, *line2, *linesrc = dst->data[0];
  1038. uint8_t *lum1, *lum2, *lumsrc = src->data[0];
  1039. uint8_t *cb1, *cb2 = src->data[1];
  1040. uint8_t *cr1, *cr2 = src->data[2];
  1041. for(h = height / 2; h--;) {
  1042. line1 = linesrc;
  1043. line2 = linesrc + dst->linesize[0];
  1044. lum1 = lumsrc;
  1045. lum2 = lumsrc + src->linesize[0];
  1046. cb1 = cb2;
  1047. cr1 = cr2;
  1048. for(w = width / 2; w--;) {
  1049. *line1++ = *line2++ = *cb1++;
  1050. *line1++ = *lum1++; *line2++ = *lum2++;
  1051. *line1++ = *line2++ = *cr1++;
  1052. *line1++ = *lum1++; *line2++ = *lum2++;
  1053. }
  1054. linesrc += dst->linesize[0] * 2;
  1055. lumsrc += src->linesize[0] * 2;
  1056. cb2 += src->linesize[1];
  1057. cr2 += src->linesize[2];
  1058. }
  1059. }
  1060. #define SCALEBITS 10
  1061. #define ONE_HALF (1 << (SCALEBITS - 1))
  1062. #define FIX(x) ((int) ((x) * (1<<SCALEBITS) + 0.5))
  1063. #define YUV_TO_RGB1_CCIR(cb1, cr1)\
  1064. {\
  1065. cb = (cb1) - 128;\
  1066. cr = (cr1) - 128;\
  1067. r_add = FIX(1.40200*255.0/224.0) * cr + ONE_HALF;\
  1068. g_add = - FIX(0.34414*255.0/224.0) * cb - FIX(0.71414*255.0/224.0) * cr + \
  1069. ONE_HALF;\
  1070. b_add = FIX(1.77200*255.0/224.0) * cb + ONE_HALF;\
  1071. }
  1072. #define YUV_TO_RGB2_CCIR(r, g, b, y1)\
  1073. {\
  1074. y = ((y1) - 16) * FIX(255.0/219.0);\
  1075. r = cm[(y + r_add) >> SCALEBITS];\
  1076. g = cm[(y + g_add) >> SCALEBITS];\
  1077. b = cm[(y + b_add) >> SCALEBITS];\
  1078. }
  1079. #define YUV_TO_RGB1(cb1, cr1)\
  1080. {\
  1081. cb = (cb1) - 128;\
  1082. cr = (cr1) - 128;\
  1083. r_add = FIX(1.40200) * cr + ONE_HALF;\
  1084. g_add = - FIX(0.34414) * cb - FIX(0.71414) * cr + ONE_HALF;\
  1085. b_add = FIX(1.77200) * cb + ONE_HALF;\
  1086. }
  1087. #define YUV_TO_RGB2(r, g, b, y1)\
  1088. {\
  1089. y = (y1) << SCALEBITS;\
  1090. r = cm[(y + r_add) >> SCALEBITS];\
  1091. g = cm[(y + g_add) >> SCALEBITS];\
  1092. b = cm[(y + b_add) >> SCALEBITS];\
  1093. }
  1094. #define Y_CCIR_TO_JPEG(y)\
  1095. cm[((y) * FIX(255.0/219.0) + (ONE_HALF - 16 * FIX(255.0/219.0))) >> SCALEBITS]
  1096. #define Y_JPEG_TO_CCIR(y)\
  1097. (((y) * FIX(219.0/255.0) + (ONE_HALF + (16 << SCALEBITS))) >> SCALEBITS)
  1098. #define C_CCIR_TO_JPEG(y)\
  1099. cm[(((y) - 128) * FIX(127.0/112.0) + (ONE_HALF + (128 << SCALEBITS))) >> SCALEBITS]
  1100. /* NOTE: the clamp is really necessary! */
  1101. static inline int C_JPEG_TO_CCIR(int y) {
  1102. y = (((y - 128) * FIX(112.0/127.0) + (ONE_HALF + (128 << SCALEBITS))) >> SCALEBITS);
  1103. if (y < 16)
  1104. y = 16;
  1105. return y;
  1106. }
  1107. #define RGB_TO_Y(r, g, b) \
  1108. ((FIX(0.29900) * (r) + FIX(0.58700) * (g) + \
  1109. FIX(0.11400) * (b) + ONE_HALF) >> SCALEBITS)
  1110. #define RGB_TO_U(r1, g1, b1, shift)\
  1111. (((- FIX(0.16874) * r1 - FIX(0.33126) * g1 + \
  1112. FIX(0.50000) * b1 + (ONE_HALF << shift) - 1) >> (SCALEBITS + shift)) + 128)
  1113. #define RGB_TO_V(r1, g1, b1, shift)\
  1114. (((FIX(0.50000) * r1 - FIX(0.41869) * g1 - \
  1115. FIX(0.08131) * b1 + (ONE_HALF << shift) - 1) >> (SCALEBITS + shift)) + 128)
  1116. #define RGB_TO_Y_CCIR(r, g, b) \
  1117. ((FIX(0.29900*219.0/255.0) * (r) + FIX(0.58700*219.0/255.0) * (g) + \
  1118. FIX(0.11400*219.0/255.0) * (b) + (ONE_HALF + (16 << SCALEBITS))) >> SCALEBITS)
  1119. #define RGB_TO_U_CCIR(r1, g1, b1, shift)\
  1120. (((- FIX(0.16874*224.0/255.0) * r1 - FIX(0.33126*224.0/255.0) * g1 + \
  1121. FIX(0.50000*224.0/255.0) * b1 + (ONE_HALF << shift) - 1) >> (SCALEBITS + shift)) + 128)
  1122. #define RGB_TO_V_CCIR(r1, g1, b1, shift)\
  1123. (((FIX(0.50000*224.0/255.0) * r1 - FIX(0.41869*224.0/255.0) * g1 - \
  1124. FIX(0.08131*224.0/255.0) * b1 + (ONE_HALF << shift) - 1) >> (SCALEBITS + shift)) + 128)
  1125. static uint8_t y_ccir_to_jpeg[256];
  1126. static uint8_t y_jpeg_to_ccir[256];
  1127. static uint8_t c_ccir_to_jpeg[256];
  1128. static uint8_t c_jpeg_to_ccir[256];
  1129. /* init various conversion tables */
  1130. static void img_convert_init(void)
  1131. {
  1132. int i;
  1133. uint8_t *cm = cropTbl + MAX_NEG_CROP;
  1134. for(i = 0;i < 256; i++) {
  1135. y_ccir_to_jpeg[i] = Y_CCIR_TO_JPEG(i);
  1136. y_jpeg_to_ccir[i] = Y_JPEG_TO_CCIR(i);
  1137. c_ccir_to_jpeg[i] = C_CCIR_TO_JPEG(i);
  1138. c_jpeg_to_ccir[i] = C_JPEG_TO_CCIR(i);
  1139. }
  1140. }
  1141. /* apply to each pixel the given table */
  1142. static void img_apply_table(uint8_t *dst, int dst_wrap,
  1143. const uint8_t *src, int src_wrap,
  1144. int width, int height, const uint8_t *table1)
  1145. {
  1146. int n;
  1147. const uint8_t *s;
  1148. uint8_t *d;
  1149. const uint8_t *table;
  1150. table = table1;
  1151. for(;height > 0; height--) {
  1152. s = src;
  1153. d = dst;
  1154. n = width;
  1155. while (n >= 4) {
  1156. d[0] = table[s[0]];
  1157. d[1] = table[s[1]];
  1158. d[2] = table[s[2]];
  1159. d[3] = table[s[3]];
  1160. d += 4;
  1161. s += 4;
  1162. n -= 4;
  1163. }
  1164. while (n > 0) {
  1165. d[0] = table[s[0]];
  1166. d++;
  1167. s++;
  1168. n--;
  1169. }
  1170. dst += dst_wrap;
  1171. src += src_wrap;
  1172. }
  1173. }
  1174. /* XXX: use generic filter ? */
  1175. /* XXX: in most cases, the sampling position is incorrect */
  1176. /* 4x1 -> 1x1 */
  1177. static void shrink41(uint8_t *dst, int dst_wrap,
  1178. const uint8_t *src, int src_wrap,
  1179. int width, int height)
  1180. {
  1181. int w;
  1182. const uint8_t *s;
  1183. uint8_t *d;
  1184. for(;height > 0; height--) {
  1185. s = src;
  1186. d = dst;
  1187. for(w = width;w > 0; w--) {
  1188. d[0] = (s[0] + s[1] + s[2] + s[3] + 2) >> 2;
  1189. s += 4;
  1190. d++;
  1191. }
  1192. src += src_wrap;
  1193. dst += dst_wrap;
  1194. }
  1195. }
  1196. /* 2x1 -> 1x1 */
  1197. static void shrink21(uint8_t *dst, int dst_wrap,
  1198. const uint8_t *src, int src_wrap,
  1199. int width, int height)
  1200. {
  1201. int w;
  1202. const uint8_t *s;
  1203. uint8_t *d;
  1204. for(;height > 0; height--) {
  1205. s = src;
  1206. d = dst;
  1207. for(w = width;w > 0; w--) {
  1208. d[0] = (s[0] + s[1]) >> 1;
  1209. s += 2;
  1210. d++;
  1211. }
  1212. src += src_wrap;
  1213. dst += dst_wrap;
  1214. }
  1215. }
  1216. /* 1x2 -> 1x1 */
  1217. static void shrink12(uint8_t *dst, int dst_wrap,
  1218. const uint8_t *src, int src_wrap,
  1219. int width, int height)
  1220. {
  1221. int w;
  1222. uint8_t *d;
  1223. const uint8_t *s1, *s2;
  1224. for(;height > 0; height--) {
  1225. s1 = src;
  1226. s2 = s1 + src_wrap;
  1227. d = dst;
  1228. for(w = width;w >= 4; w-=4) {
  1229. d[0] = (s1[0] + s2[0]) >> 1;
  1230. d[1] = (s1[1] + s2[1]) >> 1;
  1231. d[2] = (s1[2] + s2[2]) >> 1;
  1232. d[3] = (s1[3] + s2[3]) >> 1;
  1233. s1 += 4;
  1234. s2 += 4;
  1235. d += 4;
  1236. }
  1237. for(;w > 0; w--) {
  1238. d[0] = (s1[0] + s2[0]) >> 1;
  1239. s1++;
  1240. s2++;
  1241. d++;
  1242. }
  1243. src += 2 * src_wrap;
  1244. dst += dst_wrap;
  1245. }
  1246. }
  1247. /* 2x2 -> 1x1 */
  1248. void ff_shrink22(uint8_t *dst, int dst_wrap,
  1249. const uint8_t *src, int src_wrap,
  1250. int width, int height)
  1251. {
  1252. int w;
  1253. const uint8_t *s1, *s2;
  1254. uint8_t *d;
  1255. for(;height > 0; height--) {
  1256. s1 = src;
  1257. s2 = s1 + src_wrap;
  1258. d = dst;
  1259. for(w = width;w >= 4; w-=4) {
  1260. d[0] = (s1[0] + s1[1] + s2[0] + s2[1] + 2) >> 2;
  1261. d[1] = (s1[2] + s1[3] + s2[2] + s2[3] + 2) >> 2;
  1262. d[2] = (s1[4] + s1[5] + s2[4] + s2[5] + 2) >> 2;
  1263. d[3] = (s1[6] + s1[7] + s2[6] + s2[7] + 2) >> 2;
  1264. s1 += 8;
  1265. s2 += 8;
  1266. d += 4;
  1267. }
  1268. for(;w > 0; w--) {
  1269. d[0] = (s1[0] + s1[1] + s2[0] + s2[1] + 2) >> 2;
  1270. s1 += 2;
  1271. s2 += 2;
  1272. d++;
  1273. }
  1274. src += 2 * src_wrap;
  1275. dst += dst_wrap;
  1276. }
  1277. }
  1278. /* 4x4 -> 1x1 */
  1279. void ff_shrink44(uint8_t *dst, int dst_wrap,
  1280. const uint8_t *src, int src_wrap,
  1281. int width, int height)
  1282. {
  1283. int w;
  1284. const uint8_t *s1, *s2, *s3, *s4;
  1285. uint8_t *d;
  1286. for(;height > 0; height--) {
  1287. s1 = src;
  1288. s2 = s1 + src_wrap;
  1289. s3 = s2 + src_wrap;
  1290. s4 = s3 + src_wrap;
  1291. d = dst;
  1292. for(w = width;w > 0; w--) {
  1293. d[0] = (s1[0] + s1[1] + s1[2] + s1[3] +
  1294. s2[0] + s2[1] + s2[2] + s2[3] +
  1295. s3[0] + s3[1] + s3[2] + s3[3] +
  1296. s4[0] + s4[1] + s4[2] + s4[3] + 8) >> 4;
  1297. s1 += 4;
  1298. s2 += 4;
  1299. s3 += 4;
  1300. s4 += 4;
  1301. d++;
  1302. }
  1303. src += 4 * src_wrap;
  1304. dst += dst_wrap;
  1305. }
  1306. }
  1307. /* 8x8 -> 1x1 */
  1308. void ff_shrink88(uint8_t *dst, int dst_wrap,
  1309. const uint8_t *src, int src_wrap,
  1310. int width, int height)
  1311. {
  1312. int w, i;
  1313. for(;height > 0; height--) {
  1314. for(w = width;w > 0; w--) {
  1315. int tmp=0;
  1316. for(i=0; i<8; i++){
  1317. tmp += src[0] + src[1] + src[2] + src[3] + src[4] + src[5] + src[6] + src[7];
  1318. src += src_wrap;
  1319. }
  1320. *(dst++) = (tmp + 32)>>6;
  1321. src += 8 - 8*src_wrap;
  1322. }
  1323. src += 8*src_wrap - 8*width;
  1324. dst += dst_wrap - width;
  1325. }
  1326. }
  1327. static void grow21_line(uint8_t *dst, const uint8_t *src,
  1328. int width)
  1329. {
  1330. int w;
  1331. const uint8_t *s1;
  1332. uint8_t *d;
  1333. s1 = src;
  1334. d = dst;
  1335. for(w = width;w >= 4; w-=4) {
  1336. d[1] = d[0] = s1[0];
  1337. d[3] = d[2] = s1[1];
  1338. s1 += 2;
  1339. d += 4;
  1340. }
  1341. for(;w >= 2; w -= 2) {
  1342. d[1] = d[0] = s1[0];
  1343. s1 ++;
  1344. d += 2;
  1345. }
  1346. /* only needed if width is not a multiple of two */
  1347. /* XXX: veryfy that */
  1348. if (w) {
  1349. d[0] = s1[0];
  1350. }
  1351. }
  1352. static void grow41_line(uint8_t *dst, const uint8_t *src,
  1353. int width)
  1354. {
  1355. int w, v;
  1356. const uint8_t *s1;
  1357. uint8_t *d;
  1358. s1 = src;
  1359. d = dst;
  1360. for(w = width;w >= 4; w-=4) {
  1361. v = s1[0];
  1362. d[0] = v;
  1363. d[1] = v;
  1364. d[2] = v;
  1365. d[3] = v;
  1366. s1 ++;
  1367. d += 4;
  1368. }
  1369. }
  1370. /* 1x1 -> 2x1 */
  1371. static void grow21(uint8_t *dst, int dst_wrap,
  1372. const uint8_t *src, int src_wrap,
  1373. int width, int height)
  1374. {
  1375. for(;height > 0; height--) {
  1376. grow21_line(dst, src, width);
  1377. src += src_wrap;
  1378. dst += dst_wrap;
  1379. }
  1380. }
  1381. /* 1x1 -> 2x2 */
  1382. static void grow22(uint8_t *dst, int dst_wrap,
  1383. const uint8_t *src, int src_wrap,
  1384. int width, int height)
  1385. {
  1386. for(;height > 0; height--) {
  1387. grow21_line(dst, src, width);
  1388. if (height%2)
  1389. src += src_wrap;
  1390. dst += dst_wrap;
  1391. }
  1392. }
  1393. /* 1x1 -> 4x1 */
  1394. static void grow41(uint8_t *dst, int dst_wrap,
  1395. const uint8_t *src, int src_wrap,
  1396. int width, int height)
  1397. {
  1398. for(;height > 0; height--) {
  1399. grow41_line(dst, src, width);
  1400. src += src_wrap;
  1401. dst += dst_wrap;
  1402. }
  1403. }
  1404. /* 1x1 -> 4x4 */
  1405. static void grow44(uint8_t *dst, int dst_wrap,
  1406. const uint8_t *src, int src_wrap,
  1407. int width, int height)
  1408. {
  1409. for(;height > 0; height--) {
  1410. grow41_line(dst, src, width);
  1411. if ((height & 3) == 1)
  1412. src += src_wrap;
  1413. dst += dst_wrap;
  1414. }
  1415. }
  1416. /* 1x2 -> 2x1 */
  1417. static void conv411(uint8_t *dst, int dst_wrap,
  1418. const uint8_t *src, int src_wrap,
  1419. int width, int height)
  1420. {
  1421. int w, c;
  1422. const uint8_t *s1, *s2;
  1423. uint8_t *d;
  1424. width>>=1;
  1425. for(;height > 0; height--) {
  1426. s1 = src;
  1427. s2 = src + src_wrap;
  1428. d = dst;
  1429. for(w = width;w > 0; w--) {
  1430. c = (s1[0] + s2[0]) >> 1;
  1431. d[0] = c;
  1432. d[1] = c;
  1433. s1++;
  1434. s2++;
  1435. d += 2;
  1436. }
  1437. src += src_wrap * 2;
  1438. dst += dst_wrap;
  1439. }
  1440. }
  1441. /* XXX: add jpeg quantize code */
  1442. #define TRANSP_INDEX (6*6*6)
  1443. /* this is maybe slow, but allows for extensions */
  1444. static inline unsigned char gif_clut_index(uint8_t r, uint8_t g, uint8_t b)
  1445. {
  1446. return ((((r)/47)%6)*6*6+(((g)/47)%6)*6+(((b)/47)%6));
  1447. }
  1448. static void build_rgb_palette(uint8_t *palette, int has_alpha)
  1449. {
  1450. uint32_t *pal;
  1451. static const uint8_t pal_value[6] = { 0x00, 0x33, 0x66, 0x99, 0xcc, 0xff };
  1452. int i, r, g, b;
  1453. pal = (uint32_t *)palette;
  1454. i = 0;
  1455. for(r = 0; r < 6; r++) {
  1456. for(g = 0; g < 6; g++) {
  1457. for(b = 0; b < 6; b++) {
  1458. pal[i++] = (0xff << 24) | (pal_value[r] << 16) |
  1459. (pal_value[g] << 8) | pal_value[b];
  1460. }
  1461. }
  1462. }
  1463. if (has_alpha)
  1464. pal[i++] = 0;
  1465. while (i < 256)
  1466. pal[i++] = 0xff000000;
  1467. }
  1468. /* copy bit n to bits 0 ... n - 1 */
  1469. static inline unsigned int bitcopy_n(unsigned int a, int n)
  1470. {
  1471. int mask;
  1472. mask = (1 << n) - 1;
  1473. return (a & (0xff & ~mask)) | ((-((a >> n) & 1)) & mask);
  1474. }
  1475. /* rgb555 handling */
  1476. #define RGB_NAME rgb555
  1477. #define RGB_IN(r, g, b, s)\
  1478. {\
  1479. unsigned int v = ((const uint16_t *)(s))[0];\
  1480. r = bitcopy_n(v >> (10 - 3), 3);\
  1481. g = bitcopy_n(v >> (5 - 3), 3);\
  1482. b = bitcopy_n(v << 3, 3);\
  1483. }
  1484. #define RGBA_IN(r, g, b, a, s)\
  1485. {\
  1486. unsigned int v = ((const uint16_t *)(s))[0];\
  1487. r = bitcopy_n(v >> (10 - 3), 3);\
  1488. g = bitcopy_n(v >> (5 - 3), 3);\
  1489. b = bitcopy_n(v << 3, 3);\
  1490. a = (-(v >> 15)) & 0xff;\
  1491. }
  1492. #define RGBA_OUT(d, r, g, b, a)\
  1493. {\
  1494. ((uint16_t *)(d))[0] = ((r >> 3) << 10) | ((g >> 3) << 5) | (b >> 3) | \
  1495. ((a << 8) & 0x8000);\
  1496. }
  1497. #define BPP 2
  1498. #include "imgconvert_template.h"
  1499. /* rgb565 handling */
  1500. #define RGB_NAME rgb565
  1501. #define RGB_IN(r, g, b, s)\
  1502. {\
  1503. unsigned int v = ((const uint16_t *)(s))[0];\
  1504. r = bitcopy_n(v >> (11 - 3), 3);\
  1505. g = bitcopy_n(v >> (5 - 2), 2);\
  1506. b = bitcopy_n(v << 3, 3);\
  1507. }
  1508. #define RGB_OUT(d, r, g, b)\
  1509. {\
  1510. ((uint16_t *)(d))[0] = ((r >> 3) << 11) | ((g >> 2) << 5) | (b >> 3);\
  1511. }
  1512. #define BPP 2
  1513. #include "imgconvert_template.h"
  1514. /* bgr24 handling */
  1515. #define RGB_NAME bgr24
  1516. #define RGB_IN(r, g, b, s)\
  1517. {\
  1518. b = (s)[0];\
  1519. g = (s)[1];\
  1520. r = (s)[2];\
  1521. }
  1522. #define RGB_OUT(d, r, g, b)\
  1523. {\
  1524. (d)[0] = b;\
  1525. (d)[1] = g;\
  1526. (d)[2] = r;\
  1527. }
  1528. #define BPP 3
  1529. #include "imgconvert_template.h"
  1530. #undef RGB_IN
  1531. #undef RGB_OUT
  1532. #undef BPP
  1533. /* rgb24 handling */
  1534. #define RGB_NAME rgb24
  1535. #define FMT_RGB24
  1536. #define RGB_IN(r, g, b, s)\
  1537. {\
  1538. r = (s)[0];\
  1539. g = (s)[1];\
  1540. b = (s)[2];\
  1541. }
  1542. #define RGB_OUT(d, r, g, b)\
  1543. {\
  1544. (d)[0] = r;\
  1545. (d)[1] = g;\
  1546. (d)[2] = b;\
  1547. }
  1548. #define BPP 3
  1549. #include "imgconvert_template.h"
  1550. /* rgba32 handling */
  1551. #define RGB_NAME rgba32
  1552. #define FMT_RGBA32
  1553. #define RGB_IN(r, g, b, s)\
  1554. {\
  1555. unsigned int v = ((const uint32_t *)(s))[0];\
  1556. r = (v >> 16) & 0xff;\
  1557. g = (v >> 8) & 0xff;\
  1558. b = v & 0xff;\
  1559. }
  1560. #define RGBA_IN(r, g, b, a, s)\
  1561. {\
  1562. unsigned int v = ((const uint32_t *)(s))[0];\
  1563. a = (v >> 24) & 0xff;\
  1564. r = (v >> 16) & 0xff;\
  1565. g = (v >> 8) & 0xff;\
  1566. b = v & 0xff;\
  1567. }
  1568. #define RGBA_OUT(d, r, g, b, a)\
  1569. {\
  1570. ((uint32_t *)(d))[0] = (a << 24) | (r << 16) | (g << 8) | b;\
  1571. }
  1572. #define BPP 4
  1573. #include "imgconvert_template.h"
  1574. static void mono_to_gray(AVPicture *dst, const AVPicture *src,
  1575. int width, int height, int xor_mask)
  1576. {
  1577. const unsigned char *p;
  1578. unsigned char *q;
  1579. int v, dst_wrap, src_wrap;
  1580. int y, w;
  1581. p = src->data[0];
  1582. src_wrap = src->linesize[0] - ((width + 7) >> 3);
  1583. q = dst->data[0];
  1584. dst_wrap = dst->linesize[0] - width;
  1585. for(y=0;y<height;y++) {
  1586. w = width;
  1587. while (w >= 8) {
  1588. v = *p++ ^ xor_mask;
  1589. q[0] = -(v >> 7);
  1590. q[1] = -((v >> 6) & 1);
  1591. q[2] = -((v >> 5) & 1);
  1592. q[3] = -((v >> 4) & 1);
  1593. q[4] = -((v >> 3) & 1);
  1594. q[5] = -((v >> 2) & 1);
  1595. q[6] = -((v >> 1) & 1);
  1596. q[7] = -((v >> 0) & 1);
  1597. w -= 8;
  1598. q += 8;
  1599. }
  1600. if (w > 0) {
  1601. v = *p++ ^ xor_mask;
  1602. do {
  1603. q[0] = -((v >> 7) & 1);
  1604. q++;
  1605. v <<= 1;
  1606. } while (--w);
  1607. }
  1608. p += src_wrap;
  1609. q += dst_wrap;
  1610. }
  1611. }
  1612. static void monowhite_to_gray(AVPicture *dst, const AVPicture *src,
  1613. int width, int height)
  1614. {
  1615. mono_to_gray(dst, src, width, height, 0xff);
  1616. }
  1617. static void monoblack_to_gray(AVPicture *dst, const AVPicture *src,
  1618. int width, int height)
  1619. {
  1620. mono_to_gray(dst, src, width, height, 0x00);
  1621. }
  1622. static void gray_to_mono(AVPicture *dst, const AVPicture *src,
  1623. int width, int height, int xor_mask)
  1624. {
  1625. int n;
  1626. const uint8_t *s;
  1627. uint8_t *d;
  1628. int j, b, v, n1, src_wrap, dst_wrap, y;
  1629. s = src->data[0];
  1630. src_wrap = src->linesize[0] - width;
  1631. d = dst->data[0];
  1632. dst_wrap = dst->linesize[0] - ((width + 7) >> 3);
  1633. for(y=0;y<height;y++) {
  1634. n = width;
  1635. while (n >= 8) {
  1636. v = 0;
  1637. for(j=0;j<8;j++) {
  1638. b = s[0];
  1639. s++;
  1640. v = (v << 1) | (b >> 7);
  1641. }
  1642. d[0] = v ^ xor_mask;
  1643. d++;
  1644. n -= 8;
  1645. }
  1646. if (n > 0) {
  1647. n1 = n;
  1648. v = 0;
  1649. while (n > 0) {
  1650. b = s[0];
  1651. s++;
  1652. v = (v << 1) | (b >> 7);
  1653. n--;
  1654. }
  1655. d[0] = (v << (8 - (n1 & 7))) ^ xor_mask;
  1656. d++;
  1657. }
  1658. s += src_wrap;
  1659. d += dst_wrap;
  1660. }
  1661. }
  1662. static void gray_to_monowhite(AVPicture *dst, const AVPicture *src,
  1663. int width, int height)
  1664. {
  1665. gray_to_mono(dst, src, width, height, 0xff);
  1666. }
  1667. static void gray_to_monoblack(AVPicture *dst, const AVPicture *src,
  1668. int width, int height)
  1669. {
  1670. gray_to_mono(dst, src, width, height, 0x00);
  1671. }
  1672. typedef struct ConvertEntry {
  1673. void (*convert)(AVPicture *dst,
  1674. const AVPicture *src, int width, int height);
  1675. } ConvertEntry;
  1676. /* Add each new convertion function in this table. In order to be able
  1677. to convert from any format to any format, the following constraints
  1678. must be satisfied:
  1679. - all FF_COLOR_RGB formats must convert to and from PIX_FMT_RGB24
  1680. - all FF_COLOR_GRAY formats must convert to and from PIX_FMT_GRAY8
  1681. - all FF_COLOR_RGB formats with alpha must convert to and from PIX_FMT_RGBA32
  1682. - PIX_FMT_YUV444P and PIX_FMT_YUVJ444P must convert to and from
  1683. PIX_FMT_RGB24.
  1684. - PIX_FMT_422 must convert to and from PIX_FMT_422P.
  1685. The other conversion functions are just optimisations for common cases.
  1686. */
  1687. static const ConvertEntry convert_table[PIX_FMT_NB][PIX_FMT_NB] = {
  1688. [PIX_FMT_YUV420P] = {
  1689. [PIX_FMT_YUV422] = {
  1690. .convert = yuv420p_to_yuv422,
  1691. },
  1692. [PIX_FMT_RGB555] = {
  1693. .convert = yuv420p_to_rgb555
  1694. },
  1695. [PIX_FMT_RGB565] = {
  1696. .convert = yuv420p_to_rgb565
  1697. },
  1698. [PIX_FMT_BGR24] = {
  1699. .convert = yuv420p_to_bgr24
  1700. },
  1701. [PIX_FMT_RGB24] = {
  1702. .convert = yuv420p_to_rgb24
  1703. },
  1704. [PIX_FMT_RGBA32] = {
  1705. .convert = yuv420p_to_rgba32
  1706. },
  1707. [PIX_FMT_UYVY422] = {
  1708. .convert = yuv420p_to_uyvy422,
  1709. },
  1710. },
  1711. [PIX_FMT_YUV422P] = {
  1712. [PIX_FMT_YUV422] = {
  1713. .convert = yuv422p_to_yuv422,
  1714. },
  1715. [PIX_FMT_UYVY422] = {
  1716. .convert = yuv422p_to_uyvy422,
  1717. },
  1718. },
  1719. [PIX_FMT_YUV444P] = {
  1720. [PIX_FMT_RGB24] = {
  1721. .convert = yuv444p_to_rgb24
  1722. },
  1723. },
  1724. [PIX_FMT_YUVJ420P] = {
  1725. [PIX_FMT_RGB555] = {
  1726. .convert = yuvj420p_to_rgb555
  1727. },
  1728. [PIX_FMT_RGB565] = {
  1729. .convert = yuvj420p_to_rgb565
  1730. },
  1731. [PIX_FMT_BGR24] = {
  1732. .convert = yuvj420p_to_bgr24
  1733. },
  1734. [PIX_FMT_RGB24] = {
  1735. .convert = yuvj420p_to_rgb24
  1736. },
  1737. [PIX_FMT_RGBA32] = {
  1738. .convert = yuvj420p_to_rgba32
  1739. },
  1740. },
  1741. [PIX_FMT_YUVJ444P] = {
  1742. [PIX_FMT_RGB24] = {
  1743. .convert = yuvj444p_to_rgb24
  1744. },
  1745. },
  1746. [PIX_FMT_YUV422] = {
  1747. [PIX_FMT_YUV420P] = {
  1748. .convert = yuv422_to_yuv420p,
  1749. },
  1750. [PIX_FMT_YUV422P] = {
  1751. .convert = yuv422_to_yuv422p,
  1752. },
  1753. },
  1754. [PIX_FMT_UYVY422] = {
  1755. [PIX_FMT_YUV420P] = {
  1756. .convert = uyvy422_to_yuv420p,
  1757. },
  1758. [PIX_FMT_YUV422P] = {
  1759. .convert = uyvy422_to_yuv422p,
  1760. },
  1761. },
  1762. [PIX_FMT_RGB24] = {
  1763. [PIX_FMT_YUV420P] = {
  1764. .convert = rgb24_to_yuv420p
  1765. },
  1766. [PIX_FMT_RGB565] = {
  1767. .convert = rgb24_to_rgb565
  1768. },
  1769. [PIX_FMT_RGB555] = {
  1770. .convert = rgb24_to_rgb555
  1771. },
  1772. [PIX_FMT_RGBA32] = {
  1773. .convert = rgb24_to_rgba32
  1774. },
  1775. [PIX_FMT_BGR24] = {
  1776. .convert = rgb24_to_bgr24
  1777. },
  1778. [PIX_FMT_GRAY8] = {
  1779. .convert = rgb24_to_gray
  1780. },
  1781. [PIX_FMT_PAL8] = {
  1782. .convert = rgb24_to_pal8
  1783. },
  1784. [PIX_FMT_YUV444P] = {
  1785. .convert = rgb24_to_yuv444p
  1786. },
  1787. [PIX_FMT_YUVJ420P] = {
  1788. .convert = rgb24_to_yuvj420p
  1789. },
  1790. [PIX_FMT_YUVJ444P] = {
  1791. .convert = rgb24_to_yuvj444p
  1792. },
  1793. },
  1794. [PIX_FMT_RGBA32] = {
  1795. [PIX_FMT_RGB24] = {
  1796. .convert = rgba32_to_rgb24
  1797. },
  1798. [PIX_FMT_RGB555] = {
  1799. .convert = rgba32_to_rgb555
  1800. },
  1801. [PIX_FMT_PAL8] = {
  1802. .convert = rgba32_to_pal8
  1803. },
  1804. [PIX_FMT_YUV420P] = {
  1805. .convert = rgba32_to_yuv420p
  1806. },
  1807. [PIX_FMT_GRAY8] = {
  1808. .convert = rgba32_to_gray
  1809. },
  1810. },
  1811. [PIX_FMT_BGR24] = {
  1812. [PIX_FMT_RGB24] = {
  1813. .convert = bgr24_to_rgb24
  1814. },
  1815. [PIX_FMT_YUV420P] = {
  1816. .convert = bgr24_to_yuv420p
  1817. },
  1818. [PIX_FMT_GRAY8] = {
  1819. .convert = bgr24_to_gray
  1820. },
  1821. },
  1822. [PIX_FMT_RGB555] = {
  1823. [PIX_FMT_RGB24] = {
  1824. .convert = rgb555_to_rgb24
  1825. },
  1826. [PIX_FMT_RGBA32] = {
  1827. .convert = rgb555_to_rgba32
  1828. },
  1829. [PIX_FMT_YUV420P] = {
  1830. .convert = rgb555_to_yuv420p
  1831. },
  1832. [PIX_FMT_GRAY8] = {
  1833. .convert = rgb555_to_gray
  1834. },
  1835. },
  1836. [PIX_FMT_RGB565] = {
  1837. [PIX_FMT_RGB24] = {
  1838. .convert = rgb565_to_rgb24
  1839. },
  1840. [PIX_FMT_YUV420P] = {
  1841. .convert = rgb565_to_yuv420p
  1842. },
  1843. [PIX_FMT_GRAY8] = {
  1844. .convert = rgb565_to_gray
  1845. },
  1846. },
  1847. [PIX_FMT_GRAY8] = {
  1848. [PIX_FMT_RGB555] = {
  1849. .convert = gray_to_rgb555
  1850. },
  1851. [PIX_FMT_RGB565] = {
  1852. .convert = gray_to_rgb565
  1853. },
  1854. [PIX_FMT_RGB24] = {
  1855. .convert = gray_to_rgb24
  1856. },
  1857. [PIX_FMT_BGR24] = {
  1858. .convert = gray_to_bgr24
  1859. },
  1860. [PIX_FMT_RGBA32] = {
  1861. .convert = gray_to_rgba32
  1862. },
  1863. [PIX_FMT_MONOWHITE] = {
  1864. .convert = gray_to_monowhite
  1865. },
  1866. [PIX_FMT_MONOBLACK] = {
  1867. .convert = gray_to_monoblack
  1868. },
  1869. },
  1870. [PIX_FMT_MONOWHITE] = {
  1871. [PIX_FMT_GRAY8] = {
  1872. .convert = monowhite_to_gray
  1873. },
  1874. },
  1875. [PIX_FMT_MONOBLACK] = {
  1876. [PIX_FMT_GRAY8] = {
  1877. .convert = monoblack_to_gray
  1878. },
  1879. },
  1880. [PIX_FMT_PAL8] = {
  1881. [PIX_FMT_RGB555] = {
  1882. .convert = pal8_to_rgb555
  1883. },
  1884. [PIX_FMT_RGB565] = {
  1885. .convert = pal8_to_rgb565
  1886. },
  1887. [PIX_FMT_BGR24] = {
  1888. .convert = pal8_to_bgr24
  1889. },
  1890. [PIX_FMT_RGB24] = {
  1891. .convert = pal8_to_rgb24
  1892. },
  1893. [PIX_FMT_RGBA32] = {
  1894. .convert = pal8_to_rgba32
  1895. },
  1896. },
  1897. [PIX_FMT_UYVY411] = {
  1898. [PIX_FMT_YUV411P] = {
  1899. .convert = uyvy411_to_yuv411p,
  1900. },
  1901. },
  1902. };
  1903. int avpicture_alloc(AVPicture *picture,
  1904. int pix_fmt, int width, int height)
  1905. {
  1906. int size;
  1907. void *ptr;
  1908. size = avpicture_get_size(pix_fmt, width, height);
  1909. if(size<0)
  1910. goto fail;
  1911. ptr = av_malloc(size);
  1912. if (!ptr)
  1913. goto fail;
  1914. avpicture_fill(picture, ptr, pix_fmt, width, height);
  1915. return 0;
  1916. fail:
  1917. memset(picture, 0, sizeof(AVPicture));
  1918. return -1;
  1919. }
  1920. void avpicture_free(AVPicture *picture)
  1921. {
  1922. av_free(picture->data[0]);
  1923. }
  1924. /* return true if yuv planar */
  1925. static inline int is_yuv_planar(const PixFmtInfo *ps)
  1926. {
  1927. return (ps->color_type == FF_COLOR_YUV ||
  1928. ps->color_type == FF_COLOR_YUV_JPEG) &&
  1929. ps->pixel_type == FF_PIXEL_PLANAR;
  1930. }
  1931. /**
  1932. * Crop image top and left side
  1933. */
  1934. int img_crop(AVPicture *dst, const AVPicture *src,
  1935. int pix_fmt, int top_band, int left_band)
  1936. {
  1937. int y_shift;
  1938. int x_shift;
  1939. if (pix_fmt < 0 || pix_fmt >= PIX_FMT_NB || !is_yuv_planar(&pix_fmt_info[pix_fmt]))
  1940. return -1;
  1941. y_shift = pix_fmt_info[pix_fmt].y_chroma_shift;
  1942. x_shift = pix_fmt_info[pix_fmt].x_chroma_shift;
  1943. dst->data[0] = src->data[0] + (top_band * src->linesize[0]) + left_band;
  1944. dst->data[1] = src->data[1] + ((top_band >> y_shift) * src->linesize[1]) + (left_band >> x_shift);
  1945. dst->data[2] = src->data[2] + ((top_band >> y_shift) * src->linesize[2]) + (left_band >> x_shift);
  1946. dst->linesize[0] = src->linesize[0];
  1947. dst->linesize[1] = src->linesize[1];
  1948. dst->linesize[2] = src->linesize[2];
  1949. return 0;
  1950. }
  1951. /**
  1952. * Pad image
  1953. */
  1954. int img_pad(AVPicture *dst, const AVPicture *src, int height, int width, int pix_fmt,
  1955. int padtop, int padbottom, int padleft, int padright, int *color)
  1956. {
  1957. uint8_t *optr, *iptr;
  1958. int y_shift;
  1959. int x_shift;
  1960. int yheight;
  1961. int i, y;
  1962. if (pix_fmt < 0 || pix_fmt >= PIX_FMT_NB || !is_yuv_planar(&pix_fmt_info[pix_fmt]))
  1963. return -1;
  1964. for (i = 0; i < 3; i++) {
  1965. x_shift = i ? pix_fmt_info[pix_fmt].x_chroma_shift : 0;
  1966. y_shift = i ? pix_fmt_info[pix_fmt].y_chroma_shift : 0;
  1967. if (padtop || padleft) {
  1968. memset(dst->data[i], color[i], dst->linesize[i] * (padtop >> y_shift) + (padleft >> x_shift));
  1969. }
  1970. if (padleft || padright || src) {
  1971. if (src) { /* first line */
  1972. iptr = src->data[i];
  1973. optr = dst->data[i] + dst->linesize[i] * (padtop >> y_shift) + (padleft >> x_shift);
  1974. memcpy(optr, iptr, src->linesize[i]);
  1975. iptr += src->linesize[i];
  1976. }
  1977. optr = dst->data[i] + dst->linesize[i] * (padtop >> y_shift) + (dst->linesize[i] - (padright >> x_shift));
  1978. yheight = (height - 1 - (padtop + padbottom)) >> y_shift;
  1979. for (y = 0; y < yheight; y++) {
  1980. memset(optr, color[i], (padleft + padright) >> x_shift);
  1981. if (src) {
  1982. memcpy(optr + ((padleft + padright) >> x_shift), iptr, src->linesize[i]);
  1983. iptr += src->linesize[i];
  1984. }
  1985. optr += dst->linesize[i];
  1986. }
  1987. }
  1988. if (padbottom || padright) {
  1989. optr = dst->data[i] + dst->linesize[i] * ((height - padbottom) >> y_shift) - (padright >> x_shift);
  1990. memset(optr, color[i], dst->linesize[i] * (padbottom >> y_shift) + (padright >> x_shift));
  1991. }
  1992. }
  1993. return 0;
  1994. }
  1995. #ifndef CONFIG_SWSCALER
  1996. /* XXX: always use linesize. Return -1 if not supported */
  1997. int img_convert(AVPicture *dst, int dst_pix_fmt,
  1998. const AVPicture *src, int src_pix_fmt,
  1999. int src_width, int src_height)
  2000. {
  2001. static int inited;
  2002. int i, ret, dst_width, dst_height, int_pix_fmt;
  2003. const PixFmtInfo *src_pix, *dst_pix;
  2004. const ConvertEntry *ce;
  2005. AVPicture tmp1, *tmp = &tmp1;
  2006. if (src_pix_fmt < 0 || src_pix_fmt >= PIX_FMT_NB ||
  2007. dst_pix_fmt < 0 || dst_pix_fmt >= PIX_FMT_NB)
  2008. return -1;
  2009. if (src_width <= 0 || src_height <= 0)
  2010. return 0;
  2011. if (!inited) {
  2012. inited = 1;
  2013. img_convert_init();
  2014. }
  2015. dst_width = src_width;
  2016. dst_height = src_height;
  2017. dst_pix = &pix_fmt_info[dst_pix_fmt];
  2018. src_pix = &pix_fmt_info[src_pix_fmt];
  2019. if (src_pix_fmt == dst_pix_fmt) {
  2020. /* no conversion needed: just copy */
  2021. img_copy(dst, src, dst_pix_fmt, dst_width, dst_height);
  2022. return 0;
  2023. }
  2024. ce = &convert_table[src_pix_fmt][dst_pix_fmt];
  2025. if (ce->convert) {
  2026. /* specific conversion routine */
  2027. ce->convert(dst, src, dst_width, dst_height);
  2028. return 0;
  2029. }
  2030. /* gray to YUV */
  2031. if (is_yuv_planar(dst_pix) &&
  2032. src_pix_fmt == PIX_FMT_GRAY8) {
  2033. int w, h, y;
  2034. uint8_t *d;
  2035. if (dst_pix->color_type == FF_COLOR_YUV_JPEG) {
  2036. ff_img_copy_plane(dst->data[0], dst->linesize[0],
  2037. src->data[0], src->linesize[0],
  2038. dst_width, dst_height);
  2039. } else {
  2040. img_apply_table(dst->data[0], dst->linesize[0],
  2041. src->data[0], src->linesize[0],
  2042. dst_width, dst_height,
  2043. y_jpeg_to_ccir);
  2044. }
  2045. /* fill U and V with 128 */
  2046. w = dst_width;
  2047. h = dst_height;
  2048. w >>= dst_pix->x_chroma_shift;
  2049. h >>= dst_pix->y_chroma_shift;
  2050. for(i = 1; i <= 2; i++) {
  2051. d = dst->data[i];
  2052. for(y = 0; y< h; y++) {
  2053. memset(d, 128, w);
  2054. d += dst->linesize[i];
  2055. }
  2056. }
  2057. return 0;
  2058. }
  2059. /* YUV to gray */
  2060. if (is_yuv_planar(src_pix) &&
  2061. dst_pix_fmt == PIX_FMT_GRAY8) {
  2062. if (src_pix->color_type == FF_COLOR_YUV_JPEG) {
  2063. ff_img_copy_plane(dst->data[0], dst->linesize[0],
  2064. src->data[0], src->linesize[0],
  2065. dst_width, dst_height);
  2066. } else {
  2067. img_apply_table(dst->data[0], dst->linesize[0],
  2068. src->data[0], src->linesize[0],
  2069. dst_width, dst_height,
  2070. y_ccir_to_jpeg);
  2071. }
  2072. return 0;
  2073. }
  2074. /* YUV to YUV planar */
  2075. if (is_yuv_planar(dst_pix) && is_yuv_planar(src_pix)) {
  2076. int x_shift, y_shift, w, h, xy_shift;
  2077. void (*resize_func)(uint8_t *dst, int dst_wrap,
  2078. const uint8_t *src, int src_wrap,
  2079. int width, int height);
  2080. /* compute chroma size of the smallest dimensions */
  2081. w = dst_width;
  2082. h = dst_height;
  2083. if (dst_pix->x_chroma_shift >= src_pix->x_chroma_shift)
  2084. w >>= dst_pix->x_chroma_shift;
  2085. else
  2086. w >>= src_pix->x_chroma_shift;
  2087. if (dst_pix->y_chroma_shift >= src_pix->y_chroma_shift)
  2088. h >>= dst_pix->y_chroma_shift;
  2089. else
  2090. h >>= src_pix->y_chroma_shift;
  2091. x_shift = (dst_pix->x_chroma_shift - src_pix->x_chroma_shift);
  2092. y_shift = (dst_pix->y_chroma_shift - src_pix->y_chroma_shift);
  2093. xy_shift = ((x_shift & 0xf) << 4) | (y_shift & 0xf);
  2094. /* there must be filters for conversion at least from and to
  2095. YUV444 format */
  2096. switch(xy_shift) {
  2097. case 0x00:
  2098. resize_func = ff_img_copy_plane;
  2099. break;
  2100. case 0x10:
  2101. resize_func = shrink21;
  2102. break;
  2103. case 0x20:
  2104. resize_func = shrink41;
  2105. break;
  2106. case 0x01:
  2107. resize_func = shrink12;
  2108. break;
  2109. case 0x11:
  2110. resize_func = ff_shrink22;
  2111. break;
  2112. case 0x22:
  2113. resize_func = ff_shrink44;
  2114. break;
  2115. case 0xf0:
  2116. resize_func = grow21;
  2117. break;
  2118. case 0xe0:
  2119. resize_func = grow41;
  2120. break;
  2121. case 0xff:
  2122. resize_func = grow22;
  2123. break;
  2124. case 0xee:
  2125. resize_func = grow44;
  2126. break;
  2127. case 0xf1:
  2128. resize_func = conv411;
  2129. break;
  2130. default:
  2131. /* currently not handled */
  2132. goto no_chroma_filter;
  2133. }
  2134. ff_img_copy_plane(dst->data[0], dst->linesize[0],
  2135. src->data[0], src->linesize[0],
  2136. dst_width, dst_height);
  2137. for(i = 1;i <= 2; i++)
  2138. resize_func(dst->data[i], dst->linesize[i],
  2139. src->data[i], src->linesize[i],
  2140. dst_width>>dst_pix->x_chroma_shift, dst_height>>dst_pix->y_chroma_shift);
  2141. /* if yuv color space conversion is needed, we do it here on
  2142. the destination image */
  2143. if (dst_pix->color_type != src_pix->color_type) {
  2144. const uint8_t *y_table, *c_table;
  2145. if (dst_pix->color_type == FF_COLOR_YUV) {
  2146. y_table = y_jpeg_to_ccir;
  2147. c_table = c_jpeg_to_ccir;
  2148. } else {
  2149. y_table = y_ccir_to_jpeg;
  2150. c_table = c_ccir_to_jpeg;
  2151. }
  2152. img_apply_table(dst->data[0], dst->linesize[0],
  2153. dst->data[0], dst->linesize[0],
  2154. dst_width, dst_height,
  2155. y_table);
  2156. for(i = 1;i <= 2; i++)
  2157. img_apply_table(dst->data[i], dst->linesize[i],
  2158. dst->data[i], dst->linesize[i],
  2159. dst_width>>dst_pix->x_chroma_shift,
  2160. dst_height>>dst_pix->y_chroma_shift,
  2161. c_table);
  2162. }
  2163. return 0;
  2164. }
  2165. no_chroma_filter:
  2166. /* try to use an intermediate format */
  2167. if (src_pix_fmt == PIX_FMT_YUV422 ||
  2168. dst_pix_fmt == PIX_FMT_YUV422) {
  2169. /* specific case: convert to YUV422P first */
  2170. int_pix_fmt = PIX_FMT_YUV422P;
  2171. } else if (src_pix_fmt == PIX_FMT_UYVY422 ||
  2172. dst_pix_fmt == PIX_FMT_UYVY422) {
  2173. /* specific case: convert to YUV422P first */
  2174. int_pix_fmt = PIX_FMT_YUV422P;
  2175. } else if (src_pix_fmt == PIX_FMT_UYVY411 ||
  2176. dst_pix_fmt == PIX_FMT_UYVY411) {
  2177. /* specific case: convert to YUV411P first */
  2178. int_pix_fmt = PIX_FMT_YUV411P;
  2179. } else if ((src_pix->color_type == FF_COLOR_GRAY &&
  2180. src_pix_fmt != PIX_FMT_GRAY8) ||
  2181. (dst_pix->color_type == FF_COLOR_GRAY &&
  2182. dst_pix_fmt != PIX_FMT_GRAY8)) {
  2183. /* gray8 is the normalized format */
  2184. int_pix_fmt = PIX_FMT_GRAY8;
  2185. } else if ((is_yuv_planar(src_pix) &&
  2186. src_pix_fmt != PIX_FMT_YUV444P &&
  2187. src_pix_fmt != PIX_FMT_YUVJ444P)) {
  2188. /* yuv444 is the normalized format */
  2189. if (src_pix->color_type == FF_COLOR_YUV_JPEG)
  2190. int_pix_fmt = PIX_FMT_YUVJ444P;
  2191. else
  2192. int_pix_fmt = PIX_FMT_YUV444P;
  2193. } else if ((is_yuv_planar(dst_pix) &&
  2194. dst_pix_fmt != PIX_FMT_YUV444P &&
  2195. dst_pix_fmt != PIX_FMT_YUVJ444P)) {
  2196. /* yuv444 is the normalized format */
  2197. if (dst_pix->color_type == FF_COLOR_YUV_JPEG)
  2198. int_pix_fmt = PIX_FMT_YUVJ444P;
  2199. else
  2200. int_pix_fmt = PIX_FMT_YUV444P;
  2201. } else {
  2202. /* the two formats are rgb or gray8 or yuv[j]444p */
  2203. if (src_pix->is_alpha && dst_pix->is_alpha)
  2204. int_pix_fmt = PIX_FMT_RGBA32;
  2205. else
  2206. int_pix_fmt = PIX_FMT_RGB24;
  2207. }
  2208. if (avpicture_alloc(tmp, int_pix_fmt, dst_width, dst_height) < 0)
  2209. return -1;
  2210. ret = -1;
  2211. if (img_convert(tmp, int_pix_fmt,
  2212. src, src_pix_fmt, src_width, src_height) < 0)
  2213. goto fail1;
  2214. if (img_convert(dst, dst_pix_fmt,
  2215. tmp, int_pix_fmt, dst_width, dst_height) < 0)
  2216. goto fail1;
  2217. ret = 0;
  2218. fail1:
  2219. avpicture_free(tmp);
  2220. return ret;
  2221. }
  2222. #endif
  2223. /* NOTE: we scan all the pixels to have an exact information */
  2224. static int get_alpha_info_pal8(const AVPicture *src, int width, int height)
  2225. {
  2226. const unsigned char *p;
  2227. int src_wrap, ret, x, y;
  2228. unsigned int a;
  2229. uint32_t *palette = (uint32_t *)src->data[1];
  2230. p = src->data[0];
  2231. src_wrap = src->linesize[0] - width;
  2232. ret = 0;
  2233. for(y=0;y<height;y++) {
  2234. for(x=0;x<width;x++) {
  2235. a = palette[p[0]] >> 24;
  2236. if (a == 0x00) {
  2237. ret |= FF_ALPHA_TRANSP;
  2238. } else if (a != 0xff) {
  2239. ret |= FF_ALPHA_SEMI_TRANSP;
  2240. }
  2241. p++;
  2242. }
  2243. p += src_wrap;
  2244. }
  2245. return ret;
  2246. }
  2247. /**
  2248. * Tell if an image really has transparent alpha values.
  2249. * @return ored mask of FF_ALPHA_xxx constants
  2250. */
  2251. int img_get_alpha_info(const AVPicture *src,
  2252. int pix_fmt, int width, int height)
  2253. {
  2254. const PixFmtInfo *pf = &pix_fmt_info[pix_fmt];
  2255. int ret;
  2256. pf = &pix_fmt_info[pix_fmt];
  2257. /* no alpha can be represented in format */
  2258. if (!pf->is_alpha)
  2259. return 0;
  2260. switch(pix_fmt) {
  2261. case PIX_FMT_RGBA32:
  2262. ret = get_alpha_info_rgba32(src, width, height);
  2263. break;
  2264. case PIX_FMT_RGB555:
  2265. ret = get_alpha_info_rgb555(src, width, height);
  2266. break;
  2267. case PIX_FMT_PAL8:
  2268. ret = get_alpha_info_pal8(src, width, height);
  2269. break;
  2270. default:
  2271. /* we do not know, so everything is indicated */
  2272. ret = FF_ALPHA_TRANSP | FF_ALPHA_SEMI_TRANSP;
  2273. break;
  2274. }
  2275. return ret;
  2276. }
  2277. #ifdef HAVE_MMX
  2278. #define DEINT_INPLACE_LINE_LUM \
  2279. movd_m2r(lum_m4[0],mm0);\
  2280. movd_m2r(lum_m3[0],mm1);\
  2281. movd_m2r(lum_m2[0],mm2);\
  2282. movd_m2r(lum_m1[0],mm3);\
  2283. movd_m2r(lum[0],mm4);\
  2284. punpcklbw_r2r(mm7,mm0);\
  2285. movd_r2m(mm2,lum_m4[0]);\
  2286. punpcklbw_r2r(mm7,mm1);\
  2287. punpcklbw_r2r(mm7,mm2);\
  2288. punpcklbw_r2r(mm7,mm3);\
  2289. punpcklbw_r2r(mm7,mm4);\
  2290. paddw_r2r(mm3,mm1);\
  2291. psllw_i2r(1,mm2);\
  2292. paddw_r2r(mm4,mm0);\
  2293. psllw_i2r(2,mm1);\
  2294. paddw_r2r(mm6,mm2);\
  2295. paddw_r2r(mm2,mm1);\
  2296. psubusw_r2r(mm0,mm1);\
  2297. psrlw_i2r(3,mm1);\
  2298. packuswb_r2r(mm7,mm1);\
  2299. movd_r2m(mm1,lum_m2[0]);
  2300. #define DEINT_LINE_LUM \
  2301. movd_m2r(lum_m4[0],mm0);\
  2302. movd_m2r(lum_m3[0],mm1);\
  2303. movd_m2r(lum_m2[0],mm2);\
  2304. movd_m2r(lum_m1[0],mm3);\
  2305. movd_m2r(lum[0],mm4);\
  2306. punpcklbw_r2r(mm7,mm0);\
  2307. punpcklbw_r2r(mm7,mm1);\
  2308. punpcklbw_r2r(mm7,mm2);\
  2309. punpcklbw_r2r(mm7,mm3);\
  2310. punpcklbw_r2r(mm7,mm4);\
  2311. paddw_r2r(mm3,mm1);\
  2312. psllw_i2r(1,mm2);\
  2313. paddw_r2r(mm4,mm0);\
  2314. psllw_i2r(2,mm1);\
  2315. paddw_r2r(mm6,mm2);\
  2316. paddw_r2r(mm2,mm1);\
  2317. psubusw_r2r(mm0,mm1);\
  2318. psrlw_i2r(3,mm1);\
  2319. packuswb_r2r(mm7,mm1);\
  2320. movd_r2m(mm1,dst[0]);
  2321. #endif
  2322. /* filter parameters: [-1 4 2 4 -1] // 8 */
  2323. static void deinterlace_line(uint8_t *dst,
  2324. const uint8_t *lum_m4, const uint8_t *lum_m3,
  2325. const uint8_t *lum_m2, const uint8_t *lum_m1,
  2326. const uint8_t *lum,
  2327. int size)
  2328. {
  2329. #ifndef HAVE_MMX
  2330. uint8_t *cm = cropTbl + MAX_NEG_CROP;
  2331. int sum;
  2332. for(;size > 0;size--) {
  2333. sum = -lum_m4[0];
  2334. sum += lum_m3[0] << 2;
  2335. sum += lum_m2[0] << 1;
  2336. sum += lum_m1[0] << 2;
  2337. sum += -lum[0];
  2338. dst[0] = cm[(sum + 4) >> 3];
  2339. lum_m4++;
  2340. lum_m3++;
  2341. lum_m2++;
  2342. lum_m1++;
  2343. lum++;
  2344. dst++;
  2345. }
  2346. #else
  2347. {
  2348. mmx_t rounder;
  2349. rounder.uw[0]=4;
  2350. rounder.uw[1]=4;
  2351. rounder.uw[2]=4;
  2352. rounder.uw[3]=4;
  2353. pxor_r2r(mm7,mm7);
  2354. movq_m2r(rounder,mm6);
  2355. }
  2356. for (;size > 3; size-=4) {
  2357. DEINT_LINE_LUM
  2358. lum_m4+=4;
  2359. lum_m3+=4;
  2360. lum_m2+=4;
  2361. lum_m1+=4;
  2362. lum+=4;
  2363. dst+=4;
  2364. }
  2365. #endif
  2366. }
  2367. static void deinterlace_line_inplace(uint8_t *lum_m4, uint8_t *lum_m3, uint8_t *lum_m2, uint8_t *lum_m1, uint8_t *lum,
  2368. int size)
  2369. {
  2370. #ifndef HAVE_MMX
  2371. uint8_t *cm = cropTbl + MAX_NEG_CROP;
  2372. int sum;
  2373. for(;size > 0;size--) {
  2374. sum = -lum_m4[0];
  2375. sum += lum_m3[0] << 2;
  2376. sum += lum_m2[0] << 1;
  2377. lum_m4[0]=lum_m2[0];
  2378. sum += lum_m1[0] << 2;
  2379. sum += -lum[0];
  2380. lum_m2[0] = cm[(sum + 4) >> 3];
  2381. lum_m4++;
  2382. lum_m3++;
  2383. lum_m2++;
  2384. lum_m1++;
  2385. lum++;
  2386. }
  2387. #else
  2388. {
  2389. mmx_t rounder;
  2390. rounder.uw[0]=4;
  2391. rounder.uw[1]=4;
  2392. rounder.uw[2]=4;
  2393. rounder.uw[3]=4;
  2394. pxor_r2r(mm7,mm7);
  2395. movq_m2r(rounder,mm6);
  2396. }
  2397. for (;size > 3; size-=4) {
  2398. DEINT_INPLACE_LINE_LUM
  2399. lum_m4+=4;
  2400. lum_m3+=4;
  2401. lum_m2+=4;
  2402. lum_m1+=4;
  2403. lum+=4;
  2404. }
  2405. #endif
  2406. }
  2407. /* deinterlacing : 2 temporal taps, 3 spatial taps linear filter. The
  2408. top field is copied as is, but the bottom field is deinterlaced
  2409. against the top field. */
  2410. static void deinterlace_bottom_field(uint8_t *dst, int dst_wrap,
  2411. const uint8_t *src1, int src_wrap,
  2412. int width, int height)
  2413. {
  2414. const uint8_t *src_m2, *src_m1, *src_0, *src_p1, *src_p2;
  2415. int y;
  2416. src_m2 = src1;
  2417. src_m1 = src1;
  2418. src_0=&src_m1[src_wrap];
  2419. src_p1=&src_0[src_wrap];
  2420. src_p2=&src_p1[src_wrap];
  2421. for(y=0;y<(height-2);y+=2) {
  2422. memcpy(dst,src_m1,width);
  2423. dst += dst_wrap;
  2424. deinterlace_line(dst,src_m2,src_m1,src_0,src_p1,src_p2,width);
  2425. src_m2 = src_0;
  2426. src_m1 = src_p1;
  2427. src_0 = src_p2;
  2428. src_p1 += 2*src_wrap;
  2429. src_p2 += 2*src_wrap;
  2430. dst += dst_wrap;
  2431. }
  2432. memcpy(dst,src_m1,width);
  2433. dst += dst_wrap;
  2434. /* do last line */
  2435. deinterlace_line(dst,src_m2,src_m1,src_0,src_0,src_0,width);
  2436. }
  2437. static void deinterlace_bottom_field_inplace(uint8_t *src1, int src_wrap,
  2438. int width, int height)
  2439. {
  2440. uint8_t *src_m1, *src_0, *src_p1, *src_p2;
  2441. int y;
  2442. uint8_t *buf;
  2443. buf = (uint8_t*)av_malloc(width);
  2444. src_m1 = src1;
  2445. memcpy(buf,src_m1,width);
  2446. src_0=&src_m1[src_wrap];
  2447. src_p1=&src_0[src_wrap];
  2448. src_p2=&src_p1[src_wrap];
  2449. for(y=0;y<(height-2);y+=2) {
  2450. deinterlace_line_inplace(buf,src_m1,src_0,src_p1,src_p2,width);
  2451. src_m1 = src_p1;
  2452. src_0 = src_p2;
  2453. src_p1 += 2*src_wrap;
  2454. src_p2 += 2*src_wrap;
  2455. }
  2456. /* do last line */
  2457. deinterlace_line_inplace(buf,src_m1,src_0,src_0,src_0,width);
  2458. av_free(buf);
  2459. }
  2460. /* deinterlace - if not supported return -1 */
  2461. int avpicture_deinterlace(AVPicture *dst, const AVPicture *src,
  2462. int pix_fmt, int width, int height)
  2463. {
  2464. int i;
  2465. if (pix_fmt != PIX_FMT_YUV420P &&
  2466. pix_fmt != PIX_FMT_YUV422P &&
  2467. pix_fmt != PIX_FMT_YUV444P &&
  2468. pix_fmt != PIX_FMT_YUV411P)
  2469. return -1;
  2470. if ((width & 3) != 0 || (height & 3) != 0)
  2471. return -1;
  2472. for(i=0;i<3;i++) {
  2473. if (i == 1) {
  2474. switch(pix_fmt) {
  2475. case PIX_FMT_YUV420P:
  2476. width >>= 1;
  2477. height >>= 1;
  2478. break;
  2479. case PIX_FMT_YUV422P:
  2480. width >>= 1;
  2481. break;
  2482. case PIX_FMT_YUV411P:
  2483. width >>= 2;
  2484. break;
  2485. default:
  2486. break;
  2487. }
  2488. }
  2489. if (src == dst) {
  2490. deinterlace_bottom_field_inplace(dst->data[i], dst->linesize[i],
  2491. width, height);
  2492. } else {
  2493. deinterlace_bottom_field(dst->data[i],dst->linesize[i],
  2494. src->data[i], src->linesize[i],
  2495. width, height);
  2496. }
  2497. }
  2498. #ifdef HAVE_MMX
  2499. emms();
  2500. #endif
  2501. return 0;
  2502. }
  2503. #undef FIX