You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

2595 lines
69KB

  1. /*
  2. * Misc image convertion routines
  3. * Copyright (c) 2001, 2002, 2003 Fabrice Bellard.
  4. *
  5. * This library is free software; you can redistribute it and/or
  6. * modify it under the terms of the GNU Lesser General Public
  7. * License as published by the Free Software Foundation; either
  8. * version 2 of the License, or (at your option) any later version.
  9. *
  10. * This library is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  13. * Lesser General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU Lesser General Public
  16. * License along with this library; if not, write to the Free Software
  17. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  18. */
  19. /**
  20. * @file imgconvert.c
  21. * Misc image convertion routines.
  22. */
  23. /* TODO:
  24. * - write 'ffimg' program to test all the image related stuff
  25. * - move all api to slice based system
  26. * - integrate deinterlacing, postprocessing and scaling in the conversion process
  27. */
  28. #include "avcodec.h"
  29. #include "dsputil.h"
  30. #ifdef USE_FASTMEMCPY
  31. #include "fastmemcpy.h"
  32. #endif
  33. #ifdef HAVE_MMX
  34. #include "i386/mmx.h"
  35. #endif
  36. #define xglue(x, y) x ## y
  37. #define glue(x, y) xglue(x, y)
  38. #define FF_COLOR_RGB 0 /* RGB color space */
  39. #define FF_COLOR_GRAY 1 /* gray color space */
  40. #define FF_COLOR_YUV 2 /* YUV color space. 16 <= Y <= 235, 16 <= U, V <= 240 */
  41. #define FF_COLOR_YUV_JPEG 3 /* YUV color space. 0 <= Y <= 255, 0 <= U, V <= 255 */
  42. #define FF_PIXEL_PLANAR 0 /* each channel has one component in AVPicture */
  43. #define FF_PIXEL_PACKED 1 /* only one components containing all the channels */
  44. #define FF_PIXEL_PALETTE 2 /* one components containing indexes for a palette */
  45. typedef struct PixFmtInfo {
  46. const char *name;
  47. uint8_t nb_channels; /* number of channels (including alpha) */
  48. uint8_t color_type; /* color type (see FF_COLOR_xxx constants) */
  49. uint8_t pixel_type; /* pixel storage type (see FF_PIXEL_xxx constants) */
  50. uint8_t is_alpha : 1; /* true if alpha can be specified */
  51. uint8_t x_chroma_shift; /* X chroma subsampling factor is 2 ^ shift */
  52. uint8_t y_chroma_shift; /* Y chroma subsampling factor is 2 ^ shift */
  53. uint8_t depth; /* bit depth of the color components */
  54. } PixFmtInfo;
  55. /* this table gives more information about formats */
  56. static const PixFmtInfo pix_fmt_info[PIX_FMT_NB] = {
  57. /* YUV formats */
  58. [PIX_FMT_YUV420P] = {
  59. .name = "yuv420p",
  60. .nb_channels = 3,
  61. .color_type = FF_COLOR_YUV,
  62. .pixel_type = FF_PIXEL_PLANAR,
  63. .depth = 8,
  64. .x_chroma_shift = 1, .y_chroma_shift = 1,
  65. },
  66. [PIX_FMT_YUV422P] = {
  67. .name = "yuv422p",
  68. .nb_channels = 3,
  69. .color_type = FF_COLOR_YUV,
  70. .pixel_type = FF_PIXEL_PLANAR,
  71. .depth = 8,
  72. .x_chroma_shift = 1, .y_chroma_shift = 0,
  73. },
  74. [PIX_FMT_YUV444P] = {
  75. .name = "yuv444p",
  76. .nb_channels = 3,
  77. .color_type = FF_COLOR_YUV,
  78. .pixel_type = FF_PIXEL_PLANAR,
  79. .depth = 8,
  80. .x_chroma_shift = 0, .y_chroma_shift = 0,
  81. },
  82. [PIX_FMT_YUV422] = {
  83. .name = "yuv422",
  84. .nb_channels = 1,
  85. .color_type = FF_COLOR_YUV,
  86. .pixel_type = FF_PIXEL_PACKED,
  87. .depth = 8,
  88. .x_chroma_shift = 1, .y_chroma_shift = 0,
  89. },
  90. [PIX_FMT_UYVY422] = {
  91. .name = "uyvy422",
  92. .nb_channels = 1,
  93. .color_type = FF_COLOR_YUV,
  94. .pixel_type = FF_PIXEL_PACKED,
  95. .depth = 8,
  96. .x_chroma_shift = 1, .y_chroma_shift = 0,
  97. },
  98. [PIX_FMT_YUV410P] = {
  99. .name = "yuv410p",
  100. .nb_channels = 3,
  101. .color_type = FF_COLOR_YUV,
  102. .pixel_type = FF_PIXEL_PLANAR,
  103. .depth = 8,
  104. .x_chroma_shift = 2, .y_chroma_shift = 2,
  105. },
  106. [PIX_FMT_YUV411P] = {
  107. .name = "yuv411p",
  108. .nb_channels = 3,
  109. .color_type = FF_COLOR_YUV,
  110. .pixel_type = FF_PIXEL_PLANAR,
  111. .depth = 8,
  112. .x_chroma_shift = 2, .y_chroma_shift = 0,
  113. },
  114. /* JPEG YUV */
  115. [PIX_FMT_YUVJ420P] = {
  116. .name = "yuvj420p",
  117. .nb_channels = 3,
  118. .color_type = FF_COLOR_YUV_JPEG,
  119. .pixel_type = FF_PIXEL_PLANAR,
  120. .depth = 8,
  121. .x_chroma_shift = 1, .y_chroma_shift = 1,
  122. },
  123. [PIX_FMT_YUVJ422P] = {
  124. .name = "yuvj422p",
  125. .nb_channels = 3,
  126. .color_type = FF_COLOR_YUV_JPEG,
  127. .pixel_type = FF_PIXEL_PLANAR,
  128. .depth = 8,
  129. .x_chroma_shift = 1, .y_chroma_shift = 0,
  130. },
  131. [PIX_FMT_YUVJ444P] = {
  132. .name = "yuvj444p",
  133. .nb_channels = 3,
  134. .color_type = FF_COLOR_YUV_JPEG,
  135. .pixel_type = FF_PIXEL_PLANAR,
  136. .depth = 8,
  137. .x_chroma_shift = 0, .y_chroma_shift = 0,
  138. },
  139. /* RGB formats */
  140. [PIX_FMT_RGB24] = {
  141. .name = "rgb24",
  142. .nb_channels = 3,
  143. .color_type = FF_COLOR_RGB,
  144. .pixel_type = FF_PIXEL_PACKED,
  145. .depth = 8,
  146. .x_chroma_shift = 0, .y_chroma_shift = 0,
  147. },
  148. [PIX_FMT_BGR24] = {
  149. .name = "bgr24",
  150. .nb_channels = 3,
  151. .color_type = FF_COLOR_RGB,
  152. .pixel_type = FF_PIXEL_PACKED,
  153. .depth = 8,
  154. .x_chroma_shift = 0, .y_chroma_shift = 0,
  155. },
  156. [PIX_FMT_RGBA32] = {
  157. .name = "rgba32",
  158. .nb_channels = 4, .is_alpha = 1,
  159. .color_type = FF_COLOR_RGB,
  160. .pixel_type = FF_PIXEL_PACKED,
  161. .depth = 8,
  162. .x_chroma_shift = 0, .y_chroma_shift = 0,
  163. },
  164. [PIX_FMT_RGB565] = {
  165. .name = "rgb565",
  166. .nb_channels = 3,
  167. .color_type = FF_COLOR_RGB,
  168. .pixel_type = FF_PIXEL_PACKED,
  169. .depth = 5,
  170. .x_chroma_shift = 0, .y_chroma_shift = 0,
  171. },
  172. [PIX_FMT_RGB555] = {
  173. .name = "rgb555",
  174. .nb_channels = 4, .is_alpha = 1,
  175. .color_type = FF_COLOR_RGB,
  176. .pixel_type = FF_PIXEL_PACKED,
  177. .depth = 5,
  178. .x_chroma_shift = 0, .y_chroma_shift = 0,
  179. },
  180. /* gray / mono formats */
  181. [PIX_FMT_GRAY8] = {
  182. .name = "gray",
  183. .nb_channels = 1,
  184. .color_type = FF_COLOR_GRAY,
  185. .pixel_type = FF_PIXEL_PLANAR,
  186. .depth = 8,
  187. },
  188. [PIX_FMT_MONOWHITE] = {
  189. .name = "monow",
  190. .nb_channels = 1,
  191. .color_type = FF_COLOR_GRAY,
  192. .pixel_type = FF_PIXEL_PLANAR,
  193. .depth = 1,
  194. },
  195. [PIX_FMT_MONOBLACK] = {
  196. .name = "monob",
  197. .nb_channels = 1,
  198. .color_type = FF_COLOR_GRAY,
  199. .pixel_type = FF_PIXEL_PLANAR,
  200. .depth = 1,
  201. },
  202. /* paletted formats */
  203. [PIX_FMT_PAL8] = {
  204. .name = "pal8",
  205. .nb_channels = 4, .is_alpha = 1,
  206. .color_type = FF_COLOR_RGB,
  207. .pixel_type = FF_PIXEL_PALETTE,
  208. .depth = 8,
  209. },
  210. [PIX_FMT_XVMC_MPEG2_MC] = {
  211. .name = "xvmcmc",
  212. },
  213. [PIX_FMT_XVMC_MPEG2_IDCT] = {
  214. .name = "xvmcidct",
  215. },
  216. [PIX_FMT_UYVY411] = {
  217. .name = "uyvy411",
  218. .nb_channels = 1,
  219. .color_type = FF_COLOR_YUV,
  220. .pixel_type = FF_PIXEL_PACKED,
  221. .depth = 8,
  222. .x_chroma_shift = 2, .y_chroma_shift = 0,
  223. },
  224. };
  225. void avcodec_get_chroma_sub_sample(int pix_fmt, int *h_shift, int *v_shift)
  226. {
  227. *h_shift = pix_fmt_info[pix_fmt].x_chroma_shift;
  228. *v_shift = pix_fmt_info[pix_fmt].y_chroma_shift;
  229. }
  230. const char *avcodec_get_pix_fmt_name(int pix_fmt)
  231. {
  232. if (pix_fmt < 0 || pix_fmt >= PIX_FMT_NB)
  233. return "???";
  234. else
  235. return pix_fmt_info[pix_fmt].name;
  236. }
  237. enum PixelFormat avcodec_get_pix_fmt(const char* name)
  238. {
  239. int i;
  240. for (i=0; i < PIX_FMT_NB; i++)
  241. if (!strcmp(pix_fmt_info[i].name, name))
  242. break;
  243. return i;
  244. }
  245. /* Picture field are filled with 'ptr' addresses. Also return size */
  246. int avpicture_fill(AVPicture *picture, uint8_t *ptr,
  247. int pix_fmt, int width, int height)
  248. {
  249. int size, w2, h2, size2;
  250. const PixFmtInfo *pinfo;
  251. if(avcodec_check_dimensions(NULL, width, height))
  252. goto fail;
  253. pinfo = &pix_fmt_info[pix_fmt];
  254. size = width * height;
  255. switch(pix_fmt) {
  256. case PIX_FMT_YUV420P:
  257. case PIX_FMT_YUV422P:
  258. case PIX_FMT_YUV444P:
  259. case PIX_FMT_YUV410P:
  260. case PIX_FMT_YUV411P:
  261. case PIX_FMT_YUVJ420P:
  262. case PIX_FMT_YUVJ422P:
  263. case PIX_FMT_YUVJ444P:
  264. w2 = (width + (1 << pinfo->x_chroma_shift) - 1) >> pinfo->x_chroma_shift;
  265. h2 = (height + (1 << pinfo->y_chroma_shift) - 1) >> pinfo->y_chroma_shift;
  266. size2 = w2 * h2;
  267. picture->data[0] = ptr;
  268. picture->data[1] = picture->data[0] + size;
  269. picture->data[2] = picture->data[1] + size2;
  270. picture->linesize[0] = width;
  271. picture->linesize[1] = w2;
  272. picture->linesize[2] = w2;
  273. return size + 2 * size2;
  274. case PIX_FMT_RGB24:
  275. case PIX_FMT_BGR24:
  276. picture->data[0] = ptr;
  277. picture->data[1] = NULL;
  278. picture->data[2] = NULL;
  279. picture->linesize[0] = width * 3;
  280. return size * 3;
  281. case PIX_FMT_RGBA32:
  282. picture->data[0] = ptr;
  283. picture->data[1] = NULL;
  284. picture->data[2] = NULL;
  285. picture->linesize[0] = width * 4;
  286. return size * 4;
  287. case PIX_FMT_RGB555:
  288. case PIX_FMT_RGB565:
  289. case PIX_FMT_YUV422:
  290. picture->data[0] = ptr;
  291. picture->data[1] = NULL;
  292. picture->data[2] = NULL;
  293. picture->linesize[0] = width * 2;
  294. return size * 2;
  295. case PIX_FMT_UYVY422:
  296. picture->data[0] = ptr;
  297. picture->data[1] = NULL;
  298. picture->data[2] = NULL;
  299. picture->linesize[0] = width * 2;
  300. return size * 2;
  301. case PIX_FMT_UYVY411:
  302. picture->data[0] = ptr;
  303. picture->data[1] = NULL;
  304. picture->data[2] = NULL;
  305. picture->linesize[0] = width + width/2;
  306. return size + size/2;
  307. case PIX_FMT_GRAY8:
  308. picture->data[0] = ptr;
  309. picture->data[1] = NULL;
  310. picture->data[2] = NULL;
  311. picture->linesize[0] = width;
  312. return size;
  313. case PIX_FMT_MONOWHITE:
  314. case PIX_FMT_MONOBLACK:
  315. picture->data[0] = ptr;
  316. picture->data[1] = NULL;
  317. picture->data[2] = NULL;
  318. picture->linesize[0] = (width + 7) >> 3;
  319. return picture->linesize[0] * height;
  320. case PIX_FMT_PAL8:
  321. size2 = (size + 3) & ~3;
  322. picture->data[0] = ptr;
  323. picture->data[1] = ptr + size2; /* palette is stored here as 256 32 bit words */
  324. picture->data[2] = NULL;
  325. picture->linesize[0] = width;
  326. picture->linesize[1] = 4;
  327. return size2 + 256 * 4;
  328. default:
  329. fail:
  330. picture->data[0] = NULL;
  331. picture->data[1] = NULL;
  332. picture->data[2] = NULL;
  333. picture->data[3] = NULL;
  334. return -1;
  335. }
  336. }
  337. int avpicture_layout(const AVPicture* src, int pix_fmt, int width, int height,
  338. unsigned char *dest, int dest_size)
  339. {
  340. const PixFmtInfo* pf = &pix_fmt_info[pix_fmt];
  341. int i, j, w, h, data_planes;
  342. const unsigned char* s;
  343. int size = avpicture_get_size(pix_fmt, width, height);
  344. if (size > dest_size || size < 0)
  345. return -1;
  346. if (pf->pixel_type == FF_PIXEL_PACKED || pf->pixel_type == FF_PIXEL_PALETTE) {
  347. if (pix_fmt == PIX_FMT_YUV422 ||
  348. pix_fmt == PIX_FMT_UYVY422 ||
  349. pix_fmt == PIX_FMT_RGB565 ||
  350. pix_fmt == PIX_FMT_RGB555)
  351. w = width * 2;
  352. else if (pix_fmt == PIX_FMT_UYVY411)
  353. w = width + width/2;
  354. else if (pix_fmt == PIX_FMT_PAL8)
  355. w = width;
  356. else
  357. w = width * (pf->depth * pf->nb_channels / 8);
  358. data_planes = 1;
  359. h = height;
  360. } else {
  361. data_planes = pf->nb_channels;
  362. w = (width*pf->depth + 7)/8;
  363. h = height;
  364. }
  365. for (i=0; i<data_planes; i++) {
  366. if (i == 1) {
  367. w = width >> pf->x_chroma_shift;
  368. h = height >> pf->y_chroma_shift;
  369. }
  370. s = src->data[i];
  371. for(j=0; j<h; j++) {
  372. memcpy(dest, s, w);
  373. dest += w;
  374. s += src->linesize[i];
  375. }
  376. }
  377. if (pf->pixel_type == FF_PIXEL_PALETTE)
  378. memcpy((unsigned char *)(((size_t)dest + 3) & ~3), src->data[1], 256 * 4);
  379. return size;
  380. }
  381. int avpicture_get_size(int pix_fmt, int width, int height)
  382. {
  383. AVPicture dummy_pict;
  384. return avpicture_fill(&dummy_pict, NULL, pix_fmt, width, height);
  385. }
  386. /**
  387. * compute the loss when converting from a pixel format to another
  388. */
  389. int avcodec_get_pix_fmt_loss(int dst_pix_fmt, int src_pix_fmt,
  390. int has_alpha)
  391. {
  392. const PixFmtInfo *pf, *ps;
  393. int loss;
  394. ps = &pix_fmt_info[src_pix_fmt];
  395. pf = &pix_fmt_info[dst_pix_fmt];
  396. /* compute loss */
  397. loss = 0;
  398. pf = &pix_fmt_info[dst_pix_fmt];
  399. if (pf->depth < ps->depth ||
  400. (dst_pix_fmt == PIX_FMT_RGB555 && src_pix_fmt == PIX_FMT_RGB565))
  401. loss |= FF_LOSS_DEPTH;
  402. if (pf->x_chroma_shift > ps->x_chroma_shift ||
  403. pf->y_chroma_shift > ps->y_chroma_shift)
  404. loss |= FF_LOSS_RESOLUTION;
  405. switch(pf->color_type) {
  406. case FF_COLOR_RGB:
  407. if (ps->color_type != FF_COLOR_RGB &&
  408. ps->color_type != FF_COLOR_GRAY)
  409. loss |= FF_LOSS_COLORSPACE;
  410. break;
  411. case FF_COLOR_GRAY:
  412. if (ps->color_type != FF_COLOR_GRAY)
  413. loss |= FF_LOSS_COLORSPACE;
  414. break;
  415. case FF_COLOR_YUV:
  416. if (ps->color_type != FF_COLOR_YUV)
  417. loss |= FF_LOSS_COLORSPACE;
  418. break;
  419. case FF_COLOR_YUV_JPEG:
  420. if (ps->color_type != FF_COLOR_YUV_JPEG &&
  421. ps->color_type != FF_COLOR_YUV &&
  422. ps->color_type != FF_COLOR_GRAY)
  423. loss |= FF_LOSS_COLORSPACE;
  424. break;
  425. default:
  426. /* fail safe test */
  427. if (ps->color_type != pf->color_type)
  428. loss |= FF_LOSS_COLORSPACE;
  429. break;
  430. }
  431. if (pf->color_type == FF_COLOR_GRAY &&
  432. ps->color_type != FF_COLOR_GRAY)
  433. loss |= FF_LOSS_CHROMA;
  434. if (!pf->is_alpha && (ps->is_alpha && has_alpha))
  435. loss |= FF_LOSS_ALPHA;
  436. if (pf->pixel_type == FF_PIXEL_PALETTE &&
  437. (ps->pixel_type != FF_PIXEL_PALETTE && ps->color_type != FF_COLOR_GRAY))
  438. loss |= FF_LOSS_COLORQUANT;
  439. return loss;
  440. }
  441. static int avg_bits_per_pixel(int pix_fmt)
  442. {
  443. int bits;
  444. const PixFmtInfo *pf;
  445. pf = &pix_fmt_info[pix_fmt];
  446. switch(pf->pixel_type) {
  447. case FF_PIXEL_PACKED:
  448. switch(pix_fmt) {
  449. case PIX_FMT_YUV422:
  450. case PIX_FMT_UYVY422:
  451. case PIX_FMT_RGB565:
  452. case PIX_FMT_RGB555:
  453. bits = 16;
  454. break;
  455. case PIX_FMT_UYVY411:
  456. bits = 12;
  457. break;
  458. default:
  459. bits = pf->depth * pf->nb_channels;
  460. break;
  461. }
  462. break;
  463. case FF_PIXEL_PLANAR:
  464. if (pf->x_chroma_shift == 0 && pf->y_chroma_shift == 0) {
  465. bits = pf->depth * pf->nb_channels;
  466. } else {
  467. bits = pf->depth + ((2 * pf->depth) >>
  468. (pf->x_chroma_shift + pf->y_chroma_shift));
  469. }
  470. break;
  471. case FF_PIXEL_PALETTE:
  472. bits = 8;
  473. break;
  474. default:
  475. bits = -1;
  476. break;
  477. }
  478. return bits;
  479. }
  480. static int avcodec_find_best_pix_fmt1(int pix_fmt_mask,
  481. int src_pix_fmt,
  482. int has_alpha,
  483. int loss_mask)
  484. {
  485. int dist, i, loss, min_dist, dst_pix_fmt;
  486. /* find exact color match with smallest size */
  487. dst_pix_fmt = -1;
  488. min_dist = 0x7fffffff;
  489. for(i = 0;i < PIX_FMT_NB; i++) {
  490. if (pix_fmt_mask & (1 << i)) {
  491. loss = avcodec_get_pix_fmt_loss(i, src_pix_fmt, has_alpha) & loss_mask;
  492. if (loss == 0) {
  493. dist = avg_bits_per_pixel(i);
  494. if (dist < min_dist) {
  495. min_dist = dist;
  496. dst_pix_fmt = i;
  497. }
  498. }
  499. }
  500. }
  501. return dst_pix_fmt;
  502. }
  503. /**
  504. * find best pixel format to convert to. Return -1 if none found
  505. */
  506. int avcodec_find_best_pix_fmt(int pix_fmt_mask, int src_pix_fmt,
  507. int has_alpha, int *loss_ptr)
  508. {
  509. int dst_pix_fmt, loss_mask, i;
  510. static const int loss_mask_order[] = {
  511. ~0, /* no loss first */
  512. ~FF_LOSS_ALPHA,
  513. ~FF_LOSS_RESOLUTION,
  514. ~(FF_LOSS_COLORSPACE | FF_LOSS_RESOLUTION),
  515. ~FF_LOSS_COLORQUANT,
  516. ~FF_LOSS_DEPTH,
  517. 0,
  518. };
  519. /* try with successive loss */
  520. i = 0;
  521. for(;;) {
  522. loss_mask = loss_mask_order[i++];
  523. dst_pix_fmt = avcodec_find_best_pix_fmt1(pix_fmt_mask, src_pix_fmt,
  524. has_alpha, loss_mask);
  525. if (dst_pix_fmt >= 0)
  526. goto found;
  527. if (loss_mask == 0)
  528. break;
  529. }
  530. return -1;
  531. found:
  532. if (loss_ptr)
  533. *loss_ptr = avcodec_get_pix_fmt_loss(dst_pix_fmt, src_pix_fmt, has_alpha);
  534. return dst_pix_fmt;
  535. }
  536. void ff_img_copy_plane(uint8_t *dst, int dst_wrap,
  537. const uint8_t *src, int src_wrap,
  538. int width, int height)
  539. {
  540. if((!dst) || (!src))
  541. return;
  542. for(;height > 0; height--) {
  543. memcpy(dst, src, width);
  544. dst += dst_wrap;
  545. src += src_wrap;
  546. }
  547. }
  548. /**
  549. * Copy image 'src' to 'dst'.
  550. */
  551. void img_copy(AVPicture *dst, const AVPicture *src,
  552. int pix_fmt, int width, int height)
  553. {
  554. int bwidth, bits, i;
  555. const PixFmtInfo *pf = &pix_fmt_info[pix_fmt];
  556. pf = &pix_fmt_info[pix_fmt];
  557. switch(pf->pixel_type) {
  558. case FF_PIXEL_PACKED:
  559. switch(pix_fmt) {
  560. case PIX_FMT_YUV422:
  561. case PIX_FMT_UYVY422:
  562. case PIX_FMT_RGB565:
  563. case PIX_FMT_RGB555:
  564. bits = 16;
  565. break;
  566. case PIX_FMT_UYVY411:
  567. bits = 12;
  568. break;
  569. default:
  570. bits = pf->depth * pf->nb_channels;
  571. break;
  572. }
  573. bwidth = (width * bits + 7) >> 3;
  574. ff_img_copy_plane(dst->data[0], dst->linesize[0],
  575. src->data[0], src->linesize[0],
  576. bwidth, height);
  577. break;
  578. case FF_PIXEL_PLANAR:
  579. for(i = 0; i < pf->nb_channels; i++) {
  580. int w, h;
  581. w = width;
  582. h = height;
  583. if (i == 1 || i == 2) {
  584. w >>= pf->x_chroma_shift;
  585. h >>= pf->y_chroma_shift;
  586. }
  587. bwidth = (w * pf->depth + 7) >> 3;
  588. ff_img_copy_plane(dst->data[i], dst->linesize[i],
  589. src->data[i], src->linesize[i],
  590. bwidth, h);
  591. }
  592. break;
  593. case FF_PIXEL_PALETTE:
  594. ff_img_copy_plane(dst->data[0], dst->linesize[0],
  595. src->data[0], src->linesize[0],
  596. width, height);
  597. /* copy the palette */
  598. ff_img_copy_plane(dst->data[1], dst->linesize[1],
  599. src->data[1], src->linesize[1],
  600. 4, 256);
  601. break;
  602. }
  603. }
  604. /* XXX: totally non optimized */
  605. static void yuv422_to_yuv420p(AVPicture *dst, const AVPicture *src,
  606. int width, int height)
  607. {
  608. const uint8_t *p, *p1;
  609. uint8_t *lum, *cr, *cb, *lum1, *cr1, *cb1;
  610. int w;
  611. p1 = src->data[0];
  612. lum1 = dst->data[0];
  613. cb1 = dst->data[1];
  614. cr1 = dst->data[2];
  615. for(;height >= 1; height -= 2) {
  616. p = p1;
  617. lum = lum1;
  618. cb = cb1;
  619. cr = cr1;
  620. for(w = width; w >= 2; w -= 2) {
  621. lum[0] = p[0];
  622. cb[0] = p[1];
  623. lum[1] = p[2];
  624. cr[0] = p[3];
  625. p += 4;
  626. lum += 2;
  627. cb++;
  628. cr++;
  629. }
  630. if (w) {
  631. lum[0] = p[0];
  632. cb[0] = p[1];
  633. cr[0] = p[3];
  634. cb++;
  635. cr++;
  636. }
  637. p1 += src->linesize[0];
  638. lum1 += dst->linesize[0];
  639. if (height>1) {
  640. p = p1;
  641. lum = lum1;
  642. for(w = width; w >= 2; w -= 2) {
  643. lum[0] = p[0];
  644. lum[1] = p[2];
  645. p += 4;
  646. lum += 2;
  647. }
  648. if (w) {
  649. lum[0] = p[0];
  650. }
  651. p1 += src->linesize[0];
  652. lum1 += dst->linesize[0];
  653. }
  654. cb1 += dst->linesize[1];
  655. cr1 += dst->linesize[2];
  656. }
  657. }
  658. static void uyvy422_to_yuv420p(AVPicture *dst, const AVPicture *src,
  659. int width, int height)
  660. {
  661. const uint8_t *p, *p1;
  662. uint8_t *lum, *cr, *cb, *lum1, *cr1, *cb1;
  663. int w;
  664. p1 = src->data[0];
  665. lum1 = dst->data[0];
  666. cb1 = dst->data[1];
  667. cr1 = dst->data[2];
  668. for(;height >= 1; height -= 2) {
  669. p = p1;
  670. lum = lum1;
  671. cb = cb1;
  672. cr = cr1;
  673. for(w = width; w >= 2; w -= 2) {
  674. lum[0] = p[1];
  675. cb[0] = p[0];
  676. lum[1] = p[3];
  677. cr[0] = p[2];
  678. p += 4;
  679. lum += 2;
  680. cb++;
  681. cr++;
  682. }
  683. if (w) {
  684. lum[0] = p[1];
  685. cb[0] = p[0];
  686. cr[0] = p[2];
  687. cb++;
  688. cr++;
  689. }
  690. p1 += src->linesize[0];
  691. lum1 += dst->linesize[0];
  692. if (height>1) {
  693. p = p1;
  694. lum = lum1;
  695. for(w = width; w >= 2; w -= 2) {
  696. lum[0] = p[1];
  697. lum[1] = p[3];
  698. p += 4;
  699. lum += 2;
  700. }
  701. if (w) {
  702. lum[0] = p[1];
  703. }
  704. p1 += src->linesize[0];
  705. lum1 += dst->linesize[0];
  706. }
  707. cb1 += dst->linesize[1];
  708. cr1 += dst->linesize[2];
  709. }
  710. }
  711. static void uyvy422_to_yuv422p(AVPicture *dst, const AVPicture *src,
  712. int width, int height)
  713. {
  714. const uint8_t *p, *p1;
  715. uint8_t *lum, *cr, *cb, *lum1, *cr1, *cb1;
  716. int w;
  717. p1 = src->data[0];
  718. lum1 = dst->data[0];
  719. cb1 = dst->data[1];
  720. cr1 = dst->data[2];
  721. for(;height > 0; height--) {
  722. p = p1;
  723. lum = lum1;
  724. cb = cb1;
  725. cr = cr1;
  726. for(w = width; w >= 2; w -= 2) {
  727. lum[0] = p[1];
  728. cb[0] = p[0];
  729. lum[1] = p[3];
  730. cr[0] = p[2];
  731. p += 4;
  732. lum += 2;
  733. cb++;
  734. cr++;
  735. }
  736. p1 += src->linesize[0];
  737. lum1 += dst->linesize[0];
  738. cb1 += dst->linesize[1];
  739. cr1 += dst->linesize[2];
  740. }
  741. }
  742. static void yuv422_to_yuv422p(AVPicture *dst, const AVPicture *src,
  743. int width, int height)
  744. {
  745. const uint8_t *p, *p1;
  746. uint8_t *lum, *cr, *cb, *lum1, *cr1, *cb1;
  747. int w;
  748. p1 = src->data[0];
  749. lum1 = dst->data[0];
  750. cb1 = dst->data[1];
  751. cr1 = dst->data[2];
  752. for(;height > 0; height--) {
  753. p = p1;
  754. lum = lum1;
  755. cb = cb1;
  756. cr = cr1;
  757. for(w = width; w >= 2; w -= 2) {
  758. lum[0] = p[0];
  759. cb[0] = p[1];
  760. lum[1] = p[2];
  761. cr[0] = p[3];
  762. p += 4;
  763. lum += 2;
  764. cb++;
  765. cr++;
  766. }
  767. p1 += src->linesize[0];
  768. lum1 += dst->linesize[0];
  769. cb1 += dst->linesize[1];
  770. cr1 += dst->linesize[2];
  771. }
  772. }
  773. static void yuv422p_to_yuv422(AVPicture *dst, const AVPicture *src,
  774. int width, int height)
  775. {
  776. uint8_t *p, *p1;
  777. const uint8_t *lum, *cr, *cb, *lum1, *cr1, *cb1;
  778. int w;
  779. p1 = dst->data[0];
  780. lum1 = src->data[0];
  781. cb1 = src->data[1];
  782. cr1 = src->data[2];
  783. for(;height > 0; height--) {
  784. p = p1;
  785. lum = lum1;
  786. cb = cb1;
  787. cr = cr1;
  788. for(w = width; w >= 2; w -= 2) {
  789. p[0] = lum[0];
  790. p[1] = cb[0];
  791. p[2] = lum[1];
  792. p[3] = cr[0];
  793. p += 4;
  794. lum += 2;
  795. cb++;
  796. cr++;
  797. }
  798. p1 += dst->linesize[0];
  799. lum1 += src->linesize[0];
  800. cb1 += src->linesize[1];
  801. cr1 += src->linesize[2];
  802. }
  803. }
  804. static void yuv422p_to_uyvy422(AVPicture *dst, const AVPicture *src,
  805. int width, int height)
  806. {
  807. uint8_t *p, *p1;
  808. const uint8_t *lum, *cr, *cb, *lum1, *cr1, *cb1;
  809. int w;
  810. p1 = dst->data[0];
  811. lum1 = src->data[0];
  812. cb1 = src->data[1];
  813. cr1 = src->data[2];
  814. for(;height > 0; height--) {
  815. p = p1;
  816. lum = lum1;
  817. cb = cb1;
  818. cr = cr1;
  819. for(w = width; w >= 2; w -= 2) {
  820. p[1] = lum[0];
  821. p[0] = cb[0];
  822. p[3] = lum[1];
  823. p[2] = cr[0];
  824. p += 4;
  825. lum += 2;
  826. cb++;
  827. cr++;
  828. }
  829. p1 += dst->linesize[0];
  830. lum1 += src->linesize[0];
  831. cb1 += src->linesize[1];
  832. cr1 += src->linesize[2];
  833. }
  834. }
  835. static void uyvy411_to_yuv411p(AVPicture *dst, const AVPicture *src,
  836. int width, int height)
  837. {
  838. const uint8_t *p, *p1;
  839. uint8_t *lum, *cr, *cb, *lum1, *cr1, *cb1;
  840. int w;
  841. p1 = src->data[0];
  842. lum1 = dst->data[0];
  843. cb1 = dst->data[1];
  844. cr1 = dst->data[2];
  845. for(;height > 0; height--) {
  846. p = p1;
  847. lum = lum1;
  848. cb = cb1;
  849. cr = cr1;
  850. for(w = width; w >= 4; w -= 4) {
  851. cb[0] = p[0];
  852. lum[0] = p[1];
  853. lum[1] = p[2];
  854. cr[0] = p[3];
  855. lum[2] = p[4];
  856. lum[3] = p[5];
  857. p += 6;
  858. lum += 4;
  859. cb++;
  860. cr++;
  861. }
  862. p1 += src->linesize[0];
  863. lum1 += dst->linesize[0];
  864. cb1 += dst->linesize[1];
  865. cr1 += dst->linesize[2];
  866. }
  867. }
  868. static void yuv420p_to_yuv422(AVPicture *dst, const AVPicture *src,
  869. int width, int height)
  870. {
  871. int w, h;
  872. uint8_t *line1, *line2, *linesrc = dst->data[0];
  873. uint8_t *lum1, *lum2, *lumsrc = src->data[0];
  874. uint8_t *cb1, *cb2 = src->data[1];
  875. uint8_t *cr1, *cr2 = src->data[2];
  876. for(h = height / 2; h--;) {
  877. line1 = linesrc;
  878. line2 = linesrc + dst->linesize[0];
  879. lum1 = lumsrc;
  880. lum2 = lumsrc + src->linesize[0];
  881. cb1 = cb2;
  882. cr1 = cr2;
  883. for(w = width / 2; w--;) {
  884. *line1++ = *lum1++; *line2++ = *lum2++;
  885. *line1++ = *line2++ = *cb1++;
  886. *line1++ = *lum1++; *line2++ = *lum2++;
  887. *line1++ = *line2++ = *cr1++;
  888. }
  889. linesrc += dst->linesize[0] * 2;
  890. lumsrc += src->linesize[0] * 2;
  891. cb2 += src->linesize[1];
  892. cr2 += src->linesize[2];
  893. }
  894. }
  895. static void yuv420p_to_uyvy422(AVPicture *dst, const AVPicture *src,
  896. int width, int height)
  897. {
  898. int w, h;
  899. uint8_t *line1, *line2, *linesrc = dst->data[0];
  900. uint8_t *lum1, *lum2, *lumsrc = src->data[0];
  901. uint8_t *cb1, *cb2 = src->data[1];
  902. uint8_t *cr1, *cr2 = src->data[2];
  903. for(h = height / 2; h--;) {
  904. line1 = linesrc;
  905. line2 = linesrc + dst->linesize[0];
  906. lum1 = lumsrc;
  907. lum2 = lumsrc + src->linesize[0];
  908. cb1 = cb2;
  909. cr1 = cr2;
  910. for(w = width / 2; w--;) {
  911. *line1++ = *line2++ = *cb1++;
  912. *line1++ = *lum1++; *line2++ = *lum2++;
  913. *line1++ = *line2++ = *cr1++;
  914. *line1++ = *lum1++; *line2++ = *lum2++;
  915. }
  916. linesrc += dst->linesize[0] * 2;
  917. lumsrc += src->linesize[0] * 2;
  918. cb2 += src->linesize[1];
  919. cr2 += src->linesize[2];
  920. }
  921. }
  922. #define SCALEBITS 10
  923. #define ONE_HALF (1 << (SCALEBITS - 1))
  924. #define FIX(x) ((int) ((x) * (1<<SCALEBITS) + 0.5))
  925. #define YUV_TO_RGB1_CCIR(cb1, cr1)\
  926. {\
  927. cb = (cb1) - 128;\
  928. cr = (cr1) - 128;\
  929. r_add = FIX(1.40200*255.0/224.0) * cr + ONE_HALF;\
  930. g_add = - FIX(0.34414*255.0/224.0) * cb - FIX(0.71414*255.0/224.0) * cr + \
  931. ONE_HALF;\
  932. b_add = FIX(1.77200*255.0/224.0) * cb + ONE_HALF;\
  933. }
  934. #define YUV_TO_RGB2_CCIR(r, g, b, y1)\
  935. {\
  936. y = ((y1) - 16) * FIX(255.0/219.0);\
  937. r = cm[(y + r_add) >> SCALEBITS];\
  938. g = cm[(y + g_add) >> SCALEBITS];\
  939. b = cm[(y + b_add) >> SCALEBITS];\
  940. }
  941. #define YUV_TO_RGB1(cb1, cr1)\
  942. {\
  943. cb = (cb1) - 128;\
  944. cr = (cr1) - 128;\
  945. r_add = FIX(1.40200) * cr + ONE_HALF;\
  946. g_add = - FIX(0.34414) * cb - FIX(0.71414) * cr + ONE_HALF;\
  947. b_add = FIX(1.77200) * cb + ONE_HALF;\
  948. }
  949. #define YUV_TO_RGB2(r, g, b, y1)\
  950. {\
  951. y = (y1) << SCALEBITS;\
  952. r = cm[(y + r_add) >> SCALEBITS];\
  953. g = cm[(y + g_add) >> SCALEBITS];\
  954. b = cm[(y + b_add) >> SCALEBITS];\
  955. }
  956. #define Y_CCIR_TO_JPEG(y)\
  957. cm[((y) * FIX(255.0/219.0) + (ONE_HALF - 16 * FIX(255.0/219.0))) >> SCALEBITS]
  958. #define Y_JPEG_TO_CCIR(y)\
  959. (((y) * FIX(219.0/255.0) + (ONE_HALF + (16 << SCALEBITS))) >> SCALEBITS)
  960. #define C_CCIR_TO_JPEG(y)\
  961. cm[(((y) - 128) * FIX(127.0/112.0) + (ONE_HALF + (128 << SCALEBITS))) >> SCALEBITS]
  962. /* NOTE: the clamp is really necessary! */
  963. static inline int C_JPEG_TO_CCIR(int y) {
  964. y = (((y - 128) * FIX(112.0/127.0) + (ONE_HALF + (128 << SCALEBITS))) >> SCALEBITS);
  965. if (y < 16)
  966. y = 16;
  967. return y;
  968. }
  969. #define RGB_TO_Y(r, g, b) \
  970. ((FIX(0.29900) * (r) + FIX(0.58700) * (g) + \
  971. FIX(0.11400) * (b) + ONE_HALF) >> SCALEBITS)
  972. #define RGB_TO_U(r1, g1, b1, shift)\
  973. (((- FIX(0.16874) * r1 - FIX(0.33126) * g1 + \
  974. FIX(0.50000) * b1 + (ONE_HALF << shift) - 1) >> (SCALEBITS + shift)) + 128)
  975. #define RGB_TO_V(r1, g1, b1, shift)\
  976. (((FIX(0.50000) * r1 - FIX(0.41869) * g1 - \
  977. FIX(0.08131) * b1 + (ONE_HALF << shift) - 1) >> (SCALEBITS + shift)) + 128)
  978. #define RGB_TO_Y_CCIR(r, g, b) \
  979. ((FIX(0.29900*219.0/255.0) * (r) + FIX(0.58700*219.0/255.0) * (g) + \
  980. FIX(0.11400*219.0/255.0) * (b) + (ONE_HALF + (16 << SCALEBITS))) >> SCALEBITS)
  981. #define RGB_TO_U_CCIR(r1, g1, b1, shift)\
  982. (((- FIX(0.16874*224.0/255.0) * r1 - FIX(0.33126*224.0/255.0) * g1 + \
  983. FIX(0.50000*224.0/255.0) * b1 + (ONE_HALF << shift) - 1) >> (SCALEBITS + shift)) + 128)
  984. #define RGB_TO_V_CCIR(r1, g1, b1, shift)\
  985. (((FIX(0.50000*224.0/255.0) * r1 - FIX(0.41869*224.0/255.0) * g1 - \
  986. FIX(0.08131*224.0/255.0) * b1 + (ONE_HALF << shift) - 1) >> (SCALEBITS + shift)) + 128)
  987. static uint8_t y_ccir_to_jpeg[256];
  988. static uint8_t y_jpeg_to_ccir[256];
  989. static uint8_t c_ccir_to_jpeg[256];
  990. static uint8_t c_jpeg_to_ccir[256];
  991. /* init various conversion tables */
  992. static void img_convert_init(void)
  993. {
  994. int i;
  995. uint8_t *cm = cropTbl + MAX_NEG_CROP;
  996. for(i = 0;i < 256; i++) {
  997. y_ccir_to_jpeg[i] = Y_CCIR_TO_JPEG(i);
  998. y_jpeg_to_ccir[i] = Y_JPEG_TO_CCIR(i);
  999. c_ccir_to_jpeg[i] = C_CCIR_TO_JPEG(i);
  1000. c_jpeg_to_ccir[i] = C_JPEG_TO_CCIR(i);
  1001. }
  1002. }
  1003. /* apply to each pixel the given table */
  1004. static void img_apply_table(uint8_t *dst, int dst_wrap,
  1005. const uint8_t *src, int src_wrap,
  1006. int width, int height, const uint8_t *table1)
  1007. {
  1008. int n;
  1009. const uint8_t *s;
  1010. uint8_t *d;
  1011. const uint8_t *table;
  1012. table = table1;
  1013. for(;height > 0; height--) {
  1014. s = src;
  1015. d = dst;
  1016. n = width;
  1017. while (n >= 4) {
  1018. d[0] = table[s[0]];
  1019. d[1] = table[s[1]];
  1020. d[2] = table[s[2]];
  1021. d[3] = table[s[3]];
  1022. d += 4;
  1023. s += 4;
  1024. n -= 4;
  1025. }
  1026. while (n > 0) {
  1027. d[0] = table[s[0]];
  1028. d++;
  1029. s++;
  1030. n--;
  1031. }
  1032. dst += dst_wrap;
  1033. src += src_wrap;
  1034. }
  1035. }
  1036. /* XXX: use generic filter ? */
  1037. /* XXX: in most cases, the sampling position is incorrect */
  1038. /* 4x1 -> 1x1 */
  1039. static void shrink41(uint8_t *dst, int dst_wrap,
  1040. const uint8_t *src, int src_wrap,
  1041. int width, int height)
  1042. {
  1043. int w;
  1044. const uint8_t *s;
  1045. uint8_t *d;
  1046. for(;height > 0; height--) {
  1047. s = src;
  1048. d = dst;
  1049. for(w = width;w > 0; w--) {
  1050. d[0] = (s[0] + s[1] + s[2] + s[3] + 2) >> 2;
  1051. s += 4;
  1052. d++;
  1053. }
  1054. src += src_wrap;
  1055. dst += dst_wrap;
  1056. }
  1057. }
  1058. /* 2x1 -> 1x1 */
  1059. static void shrink21(uint8_t *dst, int dst_wrap,
  1060. const uint8_t *src, int src_wrap,
  1061. int width, int height)
  1062. {
  1063. int w;
  1064. const uint8_t *s;
  1065. uint8_t *d;
  1066. for(;height > 0; height--) {
  1067. s = src;
  1068. d = dst;
  1069. for(w = width;w > 0; w--) {
  1070. d[0] = (s[0] + s[1]) >> 1;
  1071. s += 2;
  1072. d++;
  1073. }
  1074. src += src_wrap;
  1075. dst += dst_wrap;
  1076. }
  1077. }
  1078. /* 1x2 -> 1x1 */
  1079. static void shrink12(uint8_t *dst, int dst_wrap,
  1080. const uint8_t *src, int src_wrap,
  1081. int width, int height)
  1082. {
  1083. int w;
  1084. uint8_t *d;
  1085. const uint8_t *s1, *s2;
  1086. for(;height > 0; height--) {
  1087. s1 = src;
  1088. s2 = s1 + src_wrap;
  1089. d = dst;
  1090. for(w = width;w >= 4; w-=4) {
  1091. d[0] = (s1[0] + s2[0]) >> 1;
  1092. d[1] = (s1[1] + s2[1]) >> 1;
  1093. d[2] = (s1[2] + s2[2]) >> 1;
  1094. d[3] = (s1[3] + s2[3]) >> 1;
  1095. s1 += 4;
  1096. s2 += 4;
  1097. d += 4;
  1098. }
  1099. for(;w > 0; w--) {
  1100. d[0] = (s1[0] + s2[0]) >> 1;
  1101. s1++;
  1102. s2++;
  1103. d++;
  1104. }
  1105. src += 2 * src_wrap;
  1106. dst += dst_wrap;
  1107. }
  1108. }
  1109. /* 2x2 -> 1x1 */
  1110. void ff_shrink22(uint8_t *dst, int dst_wrap,
  1111. const uint8_t *src, int src_wrap,
  1112. int width, int height)
  1113. {
  1114. int w;
  1115. const uint8_t *s1, *s2;
  1116. uint8_t *d;
  1117. for(;height > 0; height--) {
  1118. s1 = src;
  1119. s2 = s1 + src_wrap;
  1120. d = dst;
  1121. for(w = width;w >= 4; w-=4) {
  1122. d[0] = (s1[0] + s1[1] + s2[0] + s2[1] + 2) >> 2;
  1123. d[1] = (s1[2] + s1[3] + s2[2] + s2[3] + 2) >> 2;
  1124. d[2] = (s1[4] + s1[5] + s2[4] + s2[5] + 2) >> 2;
  1125. d[3] = (s1[6] + s1[7] + s2[6] + s2[7] + 2) >> 2;
  1126. s1 += 8;
  1127. s2 += 8;
  1128. d += 4;
  1129. }
  1130. for(;w > 0; w--) {
  1131. d[0] = (s1[0] + s1[1] + s2[0] + s2[1] + 2) >> 2;
  1132. s1 += 2;
  1133. s2 += 2;
  1134. d++;
  1135. }
  1136. src += 2 * src_wrap;
  1137. dst += dst_wrap;
  1138. }
  1139. }
  1140. /* 4x4 -> 1x1 */
  1141. void ff_shrink44(uint8_t *dst, int dst_wrap,
  1142. const uint8_t *src, int src_wrap,
  1143. int width, int height)
  1144. {
  1145. int w;
  1146. const uint8_t *s1, *s2, *s3, *s4;
  1147. uint8_t *d;
  1148. for(;height > 0; height--) {
  1149. s1 = src;
  1150. s2 = s1 + src_wrap;
  1151. s3 = s2 + src_wrap;
  1152. s4 = s3 + src_wrap;
  1153. d = dst;
  1154. for(w = width;w > 0; w--) {
  1155. d[0] = (s1[0] + s1[1] + s1[2] + s1[3] +
  1156. s2[0] + s2[1] + s2[2] + s2[3] +
  1157. s3[0] + s3[1] + s3[2] + s3[3] +
  1158. s4[0] + s4[1] + s4[2] + s4[3] + 8) >> 4;
  1159. s1 += 4;
  1160. s2 += 4;
  1161. s3 += 4;
  1162. s4 += 4;
  1163. d++;
  1164. }
  1165. src += 4 * src_wrap;
  1166. dst += dst_wrap;
  1167. }
  1168. }
  1169. /* 8x8 -> 1x1 */
  1170. void ff_shrink88(uint8_t *dst, int dst_wrap,
  1171. const uint8_t *src, int src_wrap,
  1172. int width, int height)
  1173. {
  1174. int w, i;
  1175. for(;height > 0; height--) {
  1176. for(w = width;w > 0; w--) {
  1177. int tmp=0;
  1178. for(i=0; i<8; i++){
  1179. tmp += src[0] + src[1] + src[2] + src[3] + src[4] + src[5] + src[6] + src[7];
  1180. src += src_wrap;
  1181. }
  1182. *(dst++) = (tmp + 32)>>6;
  1183. src += 8 - 8*src_wrap;
  1184. }
  1185. src += 8*src_wrap - 8*width;
  1186. dst += dst_wrap - width;
  1187. }
  1188. }
  1189. static void grow21_line(uint8_t *dst, const uint8_t *src,
  1190. int width)
  1191. {
  1192. int w;
  1193. const uint8_t *s1;
  1194. uint8_t *d;
  1195. s1 = src;
  1196. d = dst;
  1197. for(w = width;w >= 4; w-=4) {
  1198. d[1] = d[0] = s1[0];
  1199. d[3] = d[2] = s1[1];
  1200. s1 += 2;
  1201. d += 4;
  1202. }
  1203. for(;w >= 2; w -= 2) {
  1204. d[1] = d[0] = s1[0];
  1205. s1 ++;
  1206. d += 2;
  1207. }
  1208. /* only needed if width is not a multiple of two */
  1209. /* XXX: veryfy that */
  1210. if (w) {
  1211. d[0] = s1[0];
  1212. }
  1213. }
  1214. static void grow41_line(uint8_t *dst, const uint8_t *src,
  1215. int width)
  1216. {
  1217. int w, v;
  1218. const uint8_t *s1;
  1219. uint8_t *d;
  1220. s1 = src;
  1221. d = dst;
  1222. for(w = width;w >= 4; w-=4) {
  1223. v = s1[0];
  1224. d[0] = v;
  1225. d[1] = v;
  1226. d[2] = v;
  1227. d[3] = v;
  1228. s1 ++;
  1229. d += 4;
  1230. }
  1231. }
  1232. /* 1x1 -> 2x1 */
  1233. static void grow21(uint8_t *dst, int dst_wrap,
  1234. const uint8_t *src, int src_wrap,
  1235. int width, int height)
  1236. {
  1237. for(;height > 0; height--) {
  1238. grow21_line(dst, src, width);
  1239. src += src_wrap;
  1240. dst += dst_wrap;
  1241. }
  1242. }
  1243. /* 1x1 -> 2x2 */
  1244. static void grow22(uint8_t *dst, int dst_wrap,
  1245. const uint8_t *src, int src_wrap,
  1246. int width, int height)
  1247. {
  1248. for(;height > 0; height--) {
  1249. grow21_line(dst, src, width);
  1250. if (height%2)
  1251. src += src_wrap;
  1252. dst += dst_wrap;
  1253. }
  1254. }
  1255. /* 1x1 -> 4x1 */
  1256. static void grow41(uint8_t *dst, int dst_wrap,
  1257. const uint8_t *src, int src_wrap,
  1258. int width, int height)
  1259. {
  1260. for(;height > 0; height--) {
  1261. grow41_line(dst, src, width);
  1262. src += src_wrap;
  1263. dst += dst_wrap;
  1264. }
  1265. }
  1266. /* 1x1 -> 4x4 */
  1267. static void grow44(uint8_t *dst, int dst_wrap,
  1268. const uint8_t *src, int src_wrap,
  1269. int width, int height)
  1270. {
  1271. for(;height > 0; height--) {
  1272. grow41_line(dst, src, width);
  1273. if ((height & 3) == 1)
  1274. src += src_wrap;
  1275. dst += dst_wrap;
  1276. }
  1277. }
  1278. /* 1x2 -> 2x1 */
  1279. static void conv411(uint8_t *dst, int dst_wrap,
  1280. const uint8_t *src, int src_wrap,
  1281. int width, int height)
  1282. {
  1283. int w, c;
  1284. const uint8_t *s1, *s2;
  1285. uint8_t *d;
  1286. width>>=1;
  1287. for(;height > 0; height--) {
  1288. s1 = src;
  1289. s2 = src + src_wrap;
  1290. d = dst;
  1291. for(w = width;w > 0; w--) {
  1292. c = (s1[0] + s2[0]) >> 1;
  1293. d[0] = c;
  1294. d[1] = c;
  1295. s1++;
  1296. s2++;
  1297. d += 2;
  1298. }
  1299. src += src_wrap * 2;
  1300. dst += dst_wrap;
  1301. }
  1302. }
  1303. /* XXX: add jpeg quantize code */
  1304. #define TRANSP_INDEX (6*6*6)
  1305. /* this is maybe slow, but allows for extensions */
  1306. static inline unsigned char gif_clut_index(uint8_t r, uint8_t g, uint8_t b)
  1307. {
  1308. return ((((r)/47)%6)*6*6+(((g)/47)%6)*6+(((b)/47)%6));
  1309. }
  1310. static void build_rgb_palette(uint8_t *palette, int has_alpha)
  1311. {
  1312. uint32_t *pal;
  1313. static const uint8_t pal_value[6] = { 0x00, 0x33, 0x66, 0x99, 0xcc, 0xff };
  1314. int i, r, g, b;
  1315. pal = (uint32_t *)palette;
  1316. i = 0;
  1317. for(r = 0; r < 6; r++) {
  1318. for(g = 0; g < 6; g++) {
  1319. for(b = 0; b < 6; b++) {
  1320. pal[i++] = (0xff << 24) | (pal_value[r] << 16) |
  1321. (pal_value[g] << 8) | pal_value[b];
  1322. }
  1323. }
  1324. }
  1325. if (has_alpha)
  1326. pal[i++] = 0;
  1327. while (i < 256)
  1328. pal[i++] = 0xff000000;
  1329. }
  1330. /* copy bit n to bits 0 ... n - 1 */
  1331. static inline unsigned int bitcopy_n(unsigned int a, int n)
  1332. {
  1333. int mask;
  1334. mask = (1 << n) - 1;
  1335. return (a & (0xff & ~mask)) | ((-((a >> n) & 1)) & mask);
  1336. }
  1337. /* rgb555 handling */
  1338. #define RGB_NAME rgb555
  1339. #define RGB_IN(r, g, b, s)\
  1340. {\
  1341. unsigned int v = ((const uint16_t *)(s))[0];\
  1342. r = bitcopy_n(v >> (10 - 3), 3);\
  1343. g = bitcopy_n(v >> (5 - 3), 3);\
  1344. b = bitcopy_n(v << 3, 3);\
  1345. }
  1346. #define RGBA_IN(r, g, b, a, s)\
  1347. {\
  1348. unsigned int v = ((const uint16_t *)(s))[0];\
  1349. r = bitcopy_n(v >> (10 - 3), 3);\
  1350. g = bitcopy_n(v >> (5 - 3), 3);\
  1351. b = bitcopy_n(v << 3, 3);\
  1352. a = (-(v >> 15)) & 0xff;\
  1353. }
  1354. #define RGBA_OUT(d, r, g, b, a)\
  1355. {\
  1356. ((uint16_t *)(d))[0] = ((r >> 3) << 10) | ((g >> 3) << 5) | (b >> 3) | \
  1357. ((a << 8) & 0x8000);\
  1358. }
  1359. #define BPP 2
  1360. #include "imgconvert_template.h"
  1361. /* rgb565 handling */
  1362. #define RGB_NAME rgb565
  1363. #define RGB_IN(r, g, b, s)\
  1364. {\
  1365. unsigned int v = ((const uint16_t *)(s))[0];\
  1366. r = bitcopy_n(v >> (11 - 3), 3);\
  1367. g = bitcopy_n(v >> (5 - 2), 2);\
  1368. b = bitcopy_n(v << 3, 3);\
  1369. }
  1370. #define RGB_OUT(d, r, g, b)\
  1371. {\
  1372. ((uint16_t *)(d))[0] = ((r >> 3) << 11) | ((g >> 2) << 5) | (b >> 3);\
  1373. }
  1374. #define BPP 2
  1375. #include "imgconvert_template.h"
  1376. /* bgr24 handling */
  1377. #define RGB_NAME bgr24
  1378. #define RGB_IN(r, g, b, s)\
  1379. {\
  1380. b = (s)[0];\
  1381. g = (s)[1];\
  1382. r = (s)[2];\
  1383. }
  1384. #define RGB_OUT(d, r, g, b)\
  1385. {\
  1386. (d)[0] = b;\
  1387. (d)[1] = g;\
  1388. (d)[2] = r;\
  1389. }
  1390. #define BPP 3
  1391. #include "imgconvert_template.h"
  1392. #undef RGB_IN
  1393. #undef RGB_OUT
  1394. #undef BPP
  1395. /* rgb24 handling */
  1396. #define RGB_NAME rgb24
  1397. #define FMT_RGB24
  1398. #define RGB_IN(r, g, b, s)\
  1399. {\
  1400. r = (s)[0];\
  1401. g = (s)[1];\
  1402. b = (s)[2];\
  1403. }
  1404. #define RGB_OUT(d, r, g, b)\
  1405. {\
  1406. (d)[0] = r;\
  1407. (d)[1] = g;\
  1408. (d)[2] = b;\
  1409. }
  1410. #define BPP 3
  1411. #include "imgconvert_template.h"
  1412. /* rgba32 handling */
  1413. #define RGB_NAME rgba32
  1414. #define FMT_RGBA32
  1415. #define RGB_IN(r, g, b, s)\
  1416. {\
  1417. unsigned int v = ((const uint32_t *)(s))[0];\
  1418. r = (v >> 16) & 0xff;\
  1419. g = (v >> 8) & 0xff;\
  1420. b = v & 0xff;\
  1421. }
  1422. #define RGBA_IN(r, g, b, a, s)\
  1423. {\
  1424. unsigned int v = ((const uint32_t *)(s))[0];\
  1425. a = (v >> 24) & 0xff;\
  1426. r = (v >> 16) & 0xff;\
  1427. g = (v >> 8) & 0xff;\
  1428. b = v & 0xff;\
  1429. }
  1430. #define RGBA_OUT(d, r, g, b, a)\
  1431. {\
  1432. ((uint32_t *)(d))[0] = (a << 24) | (r << 16) | (g << 8) | b;\
  1433. }
  1434. #define BPP 4
  1435. #include "imgconvert_template.h"
  1436. static void mono_to_gray(AVPicture *dst, const AVPicture *src,
  1437. int width, int height, int xor_mask)
  1438. {
  1439. const unsigned char *p;
  1440. unsigned char *q;
  1441. int v, dst_wrap, src_wrap;
  1442. int y, w;
  1443. p = src->data[0];
  1444. src_wrap = src->linesize[0] - ((width + 7) >> 3);
  1445. q = dst->data[0];
  1446. dst_wrap = dst->linesize[0] - width;
  1447. for(y=0;y<height;y++) {
  1448. w = width;
  1449. while (w >= 8) {
  1450. v = *p++ ^ xor_mask;
  1451. q[0] = -(v >> 7);
  1452. q[1] = -((v >> 6) & 1);
  1453. q[2] = -((v >> 5) & 1);
  1454. q[3] = -((v >> 4) & 1);
  1455. q[4] = -((v >> 3) & 1);
  1456. q[5] = -((v >> 2) & 1);
  1457. q[6] = -((v >> 1) & 1);
  1458. q[7] = -((v >> 0) & 1);
  1459. w -= 8;
  1460. q += 8;
  1461. }
  1462. if (w > 0) {
  1463. v = *p++ ^ xor_mask;
  1464. do {
  1465. q[0] = -((v >> 7) & 1);
  1466. q++;
  1467. v <<= 1;
  1468. } while (--w);
  1469. }
  1470. p += src_wrap;
  1471. q += dst_wrap;
  1472. }
  1473. }
  1474. static void monowhite_to_gray(AVPicture *dst, const AVPicture *src,
  1475. int width, int height)
  1476. {
  1477. mono_to_gray(dst, src, width, height, 0xff);
  1478. }
  1479. static void monoblack_to_gray(AVPicture *dst, const AVPicture *src,
  1480. int width, int height)
  1481. {
  1482. mono_to_gray(dst, src, width, height, 0x00);
  1483. }
  1484. static void gray_to_mono(AVPicture *dst, const AVPicture *src,
  1485. int width, int height, int xor_mask)
  1486. {
  1487. int n;
  1488. const uint8_t *s;
  1489. uint8_t *d;
  1490. int j, b, v, n1, src_wrap, dst_wrap, y;
  1491. s = src->data[0];
  1492. src_wrap = src->linesize[0] - width;
  1493. d = dst->data[0];
  1494. dst_wrap = dst->linesize[0] - ((width + 7) >> 3);
  1495. for(y=0;y<height;y++) {
  1496. n = width;
  1497. while (n >= 8) {
  1498. v = 0;
  1499. for(j=0;j<8;j++) {
  1500. b = s[0];
  1501. s++;
  1502. v = (v << 1) | (b >> 7);
  1503. }
  1504. d[0] = v ^ xor_mask;
  1505. d++;
  1506. n -= 8;
  1507. }
  1508. if (n > 0) {
  1509. n1 = n;
  1510. v = 0;
  1511. while (n > 0) {
  1512. b = s[0];
  1513. s++;
  1514. v = (v << 1) | (b >> 7);
  1515. n--;
  1516. }
  1517. d[0] = (v << (8 - (n1 & 7))) ^ xor_mask;
  1518. d++;
  1519. }
  1520. s += src_wrap;
  1521. d += dst_wrap;
  1522. }
  1523. }
  1524. static void gray_to_monowhite(AVPicture *dst, const AVPicture *src,
  1525. int width, int height)
  1526. {
  1527. gray_to_mono(dst, src, width, height, 0xff);
  1528. }
  1529. static void gray_to_monoblack(AVPicture *dst, const AVPicture *src,
  1530. int width, int height)
  1531. {
  1532. gray_to_mono(dst, src, width, height, 0x00);
  1533. }
  1534. typedef struct ConvertEntry {
  1535. void (*convert)(AVPicture *dst,
  1536. const AVPicture *src, int width, int height);
  1537. } ConvertEntry;
  1538. /* Add each new convertion function in this table. In order to be able
  1539. to convert from any format to any format, the following constraints
  1540. must be satisfied:
  1541. - all FF_COLOR_RGB formats must convert to and from PIX_FMT_RGB24
  1542. - all FF_COLOR_GRAY formats must convert to and from PIX_FMT_GRAY8
  1543. - all FF_COLOR_RGB formats with alpha must convert to and from PIX_FMT_RGBA32
  1544. - PIX_FMT_YUV444P and PIX_FMT_YUVJ444P must convert to and from
  1545. PIX_FMT_RGB24.
  1546. - PIX_FMT_422 must convert to and from PIX_FMT_422P.
  1547. The other conversion functions are just optimisations for common cases.
  1548. */
  1549. static const ConvertEntry convert_table[PIX_FMT_NB][PIX_FMT_NB] = {
  1550. [PIX_FMT_YUV420P] = {
  1551. [PIX_FMT_YUV422] = {
  1552. .convert = yuv420p_to_yuv422,
  1553. },
  1554. [PIX_FMT_RGB555] = {
  1555. .convert = yuv420p_to_rgb555
  1556. },
  1557. [PIX_FMT_RGB565] = {
  1558. .convert = yuv420p_to_rgb565
  1559. },
  1560. [PIX_FMT_BGR24] = {
  1561. .convert = yuv420p_to_bgr24
  1562. },
  1563. [PIX_FMT_RGB24] = {
  1564. .convert = yuv420p_to_rgb24
  1565. },
  1566. [PIX_FMT_RGBA32] = {
  1567. .convert = yuv420p_to_rgba32
  1568. },
  1569. [PIX_FMT_UYVY422] = {
  1570. .convert = yuv420p_to_uyvy422,
  1571. },
  1572. },
  1573. [PIX_FMT_YUV422P] = {
  1574. [PIX_FMT_YUV422] = {
  1575. .convert = yuv422p_to_yuv422,
  1576. },
  1577. [PIX_FMT_UYVY422] = {
  1578. .convert = yuv422p_to_uyvy422,
  1579. },
  1580. },
  1581. [PIX_FMT_YUV444P] = {
  1582. [PIX_FMT_RGB24] = {
  1583. .convert = yuv444p_to_rgb24
  1584. },
  1585. },
  1586. [PIX_FMT_YUVJ420P] = {
  1587. [PIX_FMT_RGB555] = {
  1588. .convert = yuvj420p_to_rgb555
  1589. },
  1590. [PIX_FMT_RGB565] = {
  1591. .convert = yuvj420p_to_rgb565
  1592. },
  1593. [PIX_FMT_BGR24] = {
  1594. .convert = yuvj420p_to_bgr24
  1595. },
  1596. [PIX_FMT_RGB24] = {
  1597. .convert = yuvj420p_to_rgb24
  1598. },
  1599. [PIX_FMT_RGBA32] = {
  1600. .convert = yuvj420p_to_rgba32
  1601. },
  1602. },
  1603. [PIX_FMT_YUVJ444P] = {
  1604. [PIX_FMT_RGB24] = {
  1605. .convert = yuvj444p_to_rgb24
  1606. },
  1607. },
  1608. [PIX_FMT_YUV422] = {
  1609. [PIX_FMT_YUV420P] = {
  1610. .convert = yuv422_to_yuv420p,
  1611. },
  1612. [PIX_FMT_YUV422P] = {
  1613. .convert = yuv422_to_yuv422p,
  1614. },
  1615. },
  1616. [PIX_FMT_UYVY422] = {
  1617. [PIX_FMT_YUV420P] = {
  1618. .convert = uyvy422_to_yuv420p,
  1619. },
  1620. [PIX_FMT_YUV422P] = {
  1621. .convert = uyvy422_to_yuv422p,
  1622. },
  1623. },
  1624. [PIX_FMT_RGB24] = {
  1625. [PIX_FMT_YUV420P] = {
  1626. .convert = rgb24_to_yuv420p
  1627. },
  1628. [PIX_FMT_RGB565] = {
  1629. .convert = rgb24_to_rgb565
  1630. },
  1631. [PIX_FMT_RGB555] = {
  1632. .convert = rgb24_to_rgb555
  1633. },
  1634. [PIX_FMT_RGBA32] = {
  1635. .convert = rgb24_to_rgba32
  1636. },
  1637. [PIX_FMT_BGR24] = {
  1638. .convert = rgb24_to_bgr24
  1639. },
  1640. [PIX_FMT_GRAY8] = {
  1641. .convert = rgb24_to_gray
  1642. },
  1643. [PIX_FMT_PAL8] = {
  1644. .convert = rgb24_to_pal8
  1645. },
  1646. [PIX_FMT_YUV444P] = {
  1647. .convert = rgb24_to_yuv444p
  1648. },
  1649. [PIX_FMT_YUVJ420P] = {
  1650. .convert = rgb24_to_yuvj420p
  1651. },
  1652. [PIX_FMT_YUVJ444P] = {
  1653. .convert = rgb24_to_yuvj444p
  1654. },
  1655. },
  1656. [PIX_FMT_RGBA32] = {
  1657. [PIX_FMT_RGB24] = {
  1658. .convert = rgba32_to_rgb24
  1659. },
  1660. [PIX_FMT_RGB555] = {
  1661. .convert = rgba32_to_rgb555
  1662. },
  1663. [PIX_FMT_PAL8] = {
  1664. .convert = rgba32_to_pal8
  1665. },
  1666. [PIX_FMT_YUV420P] = {
  1667. .convert = rgba32_to_yuv420p
  1668. },
  1669. [PIX_FMT_GRAY8] = {
  1670. .convert = rgba32_to_gray
  1671. },
  1672. },
  1673. [PIX_FMT_BGR24] = {
  1674. [PIX_FMT_RGB24] = {
  1675. .convert = bgr24_to_rgb24
  1676. },
  1677. [PIX_FMT_YUV420P] = {
  1678. .convert = bgr24_to_yuv420p
  1679. },
  1680. [PIX_FMT_GRAY8] = {
  1681. .convert = bgr24_to_gray
  1682. },
  1683. },
  1684. [PIX_FMT_RGB555] = {
  1685. [PIX_FMT_RGB24] = {
  1686. .convert = rgb555_to_rgb24
  1687. },
  1688. [PIX_FMT_RGBA32] = {
  1689. .convert = rgb555_to_rgba32
  1690. },
  1691. [PIX_FMT_YUV420P] = {
  1692. .convert = rgb555_to_yuv420p
  1693. },
  1694. [PIX_FMT_GRAY8] = {
  1695. .convert = rgb555_to_gray
  1696. },
  1697. },
  1698. [PIX_FMT_RGB565] = {
  1699. [PIX_FMT_RGB24] = {
  1700. .convert = rgb565_to_rgb24
  1701. },
  1702. [PIX_FMT_YUV420P] = {
  1703. .convert = rgb565_to_yuv420p
  1704. },
  1705. [PIX_FMT_GRAY8] = {
  1706. .convert = rgb565_to_gray
  1707. },
  1708. },
  1709. [PIX_FMT_GRAY8] = {
  1710. [PIX_FMT_RGB555] = {
  1711. .convert = gray_to_rgb555
  1712. },
  1713. [PIX_FMT_RGB565] = {
  1714. .convert = gray_to_rgb565
  1715. },
  1716. [PIX_FMT_RGB24] = {
  1717. .convert = gray_to_rgb24
  1718. },
  1719. [PIX_FMT_BGR24] = {
  1720. .convert = gray_to_bgr24
  1721. },
  1722. [PIX_FMT_RGBA32] = {
  1723. .convert = gray_to_rgba32
  1724. },
  1725. [PIX_FMT_MONOWHITE] = {
  1726. .convert = gray_to_monowhite
  1727. },
  1728. [PIX_FMT_MONOBLACK] = {
  1729. .convert = gray_to_monoblack
  1730. },
  1731. },
  1732. [PIX_FMT_MONOWHITE] = {
  1733. [PIX_FMT_GRAY8] = {
  1734. .convert = monowhite_to_gray
  1735. },
  1736. },
  1737. [PIX_FMT_MONOBLACK] = {
  1738. [PIX_FMT_GRAY8] = {
  1739. .convert = monoblack_to_gray
  1740. },
  1741. },
  1742. [PIX_FMT_PAL8] = {
  1743. [PIX_FMT_RGB555] = {
  1744. .convert = pal8_to_rgb555
  1745. },
  1746. [PIX_FMT_RGB565] = {
  1747. .convert = pal8_to_rgb565
  1748. },
  1749. [PIX_FMT_BGR24] = {
  1750. .convert = pal8_to_bgr24
  1751. },
  1752. [PIX_FMT_RGB24] = {
  1753. .convert = pal8_to_rgb24
  1754. },
  1755. [PIX_FMT_RGBA32] = {
  1756. .convert = pal8_to_rgba32
  1757. },
  1758. },
  1759. [PIX_FMT_UYVY411] = {
  1760. [PIX_FMT_YUV411P] = {
  1761. .convert = uyvy411_to_yuv411p,
  1762. },
  1763. },
  1764. };
  1765. int avpicture_alloc(AVPicture *picture,
  1766. int pix_fmt, int width, int height)
  1767. {
  1768. int size;
  1769. void *ptr;
  1770. size = avpicture_get_size(pix_fmt, width, height);
  1771. if(size<0)
  1772. goto fail;
  1773. ptr = av_malloc(size);
  1774. if (!ptr)
  1775. goto fail;
  1776. avpicture_fill(picture, ptr, pix_fmt, width, height);
  1777. return 0;
  1778. fail:
  1779. memset(picture, 0, sizeof(AVPicture));
  1780. return -1;
  1781. }
  1782. void avpicture_free(AVPicture *picture)
  1783. {
  1784. av_free(picture->data[0]);
  1785. }
  1786. /* return true if yuv planar */
  1787. static inline int is_yuv_planar(const PixFmtInfo *ps)
  1788. {
  1789. return (ps->color_type == FF_COLOR_YUV ||
  1790. ps->color_type == FF_COLOR_YUV_JPEG) &&
  1791. ps->pixel_type == FF_PIXEL_PLANAR;
  1792. }
  1793. /**
  1794. * Crop image top and left side
  1795. */
  1796. int img_crop(AVPicture *dst, const AVPicture *src,
  1797. int pix_fmt, int top_band, int left_band)
  1798. {
  1799. int y_shift;
  1800. int x_shift;
  1801. if (pix_fmt < 0 || pix_fmt >= PIX_FMT_NB || !is_yuv_planar(&pix_fmt_info[pix_fmt]))
  1802. return -1;
  1803. y_shift = pix_fmt_info[pix_fmt].y_chroma_shift;
  1804. x_shift = pix_fmt_info[pix_fmt].x_chroma_shift;
  1805. dst->data[0] = src->data[0] + (top_band * src->linesize[0]) + left_band;
  1806. dst->data[1] = src->data[1] + ((top_band >> y_shift) * src->linesize[1]) + (left_band >> x_shift);
  1807. dst->data[2] = src->data[2] + ((top_band >> y_shift) * src->linesize[2]) + (left_band >> x_shift);
  1808. dst->linesize[0] = src->linesize[0];
  1809. dst->linesize[1] = src->linesize[1];
  1810. dst->linesize[2] = src->linesize[2];
  1811. return 0;
  1812. }
  1813. /**
  1814. * Pad image
  1815. */
  1816. int img_pad(AVPicture *dst, const AVPicture *src, int height, int width, int pix_fmt,
  1817. int padtop, int padbottom, int padleft, int padright, int *color)
  1818. {
  1819. uint8_t *optr, *iptr;
  1820. int y_shift;
  1821. int x_shift;
  1822. int yheight;
  1823. int i, y;
  1824. if (pix_fmt < 0 || pix_fmt >= PIX_FMT_NB || !is_yuv_planar(&pix_fmt_info[pix_fmt]))
  1825. return -1;
  1826. for (i = 0; i < 3; i++) {
  1827. x_shift = i ? pix_fmt_info[pix_fmt].x_chroma_shift : 0;
  1828. y_shift = i ? pix_fmt_info[pix_fmt].y_chroma_shift : 0;
  1829. if (padtop || padleft) {
  1830. memset(dst->data[i], color[i], dst->linesize[i] * (padtop >> y_shift) + (padleft >> x_shift));
  1831. }
  1832. if (padleft || padright || src) {
  1833. if (src) { /* first line */
  1834. iptr = src->data[i];
  1835. optr = dst->data[i] + dst->linesize[i] * (padtop >> y_shift) + (padleft >> x_shift);
  1836. memcpy(optr, iptr, src->linesize[i]);
  1837. iptr += src->linesize[i];
  1838. }
  1839. optr = dst->data[i] + dst->linesize[i] * (padtop >> y_shift) + (dst->linesize[i] - (padright >> x_shift));
  1840. yheight = (height - 1 - (padtop + padbottom)) >> y_shift;
  1841. for (y = 0; y < yheight; y++) {
  1842. memset(optr, color[i], (padleft + padright) >> x_shift);
  1843. if (src) {
  1844. memcpy(optr + ((padleft + padright) >> x_shift), iptr, src->linesize[i]);
  1845. iptr += src->linesize[i];
  1846. }
  1847. optr += dst->linesize[i];
  1848. }
  1849. }
  1850. if (padbottom || padright) {
  1851. optr = dst->data[i] + dst->linesize[i] * ((height - padbottom) >> y_shift) - (padright >> x_shift);
  1852. memset(optr, color[i], dst->linesize[i] * (padbottom >> y_shift) + (padright >> x_shift));
  1853. }
  1854. }
  1855. return 0;
  1856. }
  1857. #ifndef CONFIG_SWSCALER
  1858. /* XXX: always use linesize. Return -1 if not supported */
  1859. int img_convert(AVPicture *dst, int dst_pix_fmt,
  1860. const AVPicture *src, int src_pix_fmt,
  1861. int src_width, int src_height)
  1862. {
  1863. static int inited;
  1864. int i, ret, dst_width, dst_height, int_pix_fmt;
  1865. const PixFmtInfo *src_pix, *dst_pix;
  1866. const ConvertEntry *ce;
  1867. AVPicture tmp1, *tmp = &tmp1;
  1868. if (src_pix_fmt < 0 || src_pix_fmt >= PIX_FMT_NB ||
  1869. dst_pix_fmt < 0 || dst_pix_fmt >= PIX_FMT_NB)
  1870. return -1;
  1871. if (src_width <= 0 || src_height <= 0)
  1872. return 0;
  1873. if (!inited) {
  1874. inited = 1;
  1875. img_convert_init();
  1876. }
  1877. dst_width = src_width;
  1878. dst_height = src_height;
  1879. dst_pix = &pix_fmt_info[dst_pix_fmt];
  1880. src_pix = &pix_fmt_info[src_pix_fmt];
  1881. if (src_pix_fmt == dst_pix_fmt) {
  1882. /* no conversion needed: just copy */
  1883. img_copy(dst, src, dst_pix_fmt, dst_width, dst_height);
  1884. return 0;
  1885. }
  1886. ce = &convert_table[src_pix_fmt][dst_pix_fmt];
  1887. if (ce->convert) {
  1888. /* specific conversion routine */
  1889. ce->convert(dst, src, dst_width, dst_height);
  1890. return 0;
  1891. }
  1892. /* gray to YUV */
  1893. if (is_yuv_planar(dst_pix) &&
  1894. src_pix_fmt == PIX_FMT_GRAY8) {
  1895. int w, h, y;
  1896. uint8_t *d;
  1897. if (dst_pix->color_type == FF_COLOR_YUV_JPEG) {
  1898. ff_img_copy_plane(dst->data[0], dst->linesize[0],
  1899. src->data[0], src->linesize[0],
  1900. dst_width, dst_height);
  1901. } else {
  1902. img_apply_table(dst->data[0], dst->linesize[0],
  1903. src->data[0], src->linesize[0],
  1904. dst_width, dst_height,
  1905. y_jpeg_to_ccir);
  1906. }
  1907. /* fill U and V with 128 */
  1908. w = dst_width;
  1909. h = dst_height;
  1910. w >>= dst_pix->x_chroma_shift;
  1911. h >>= dst_pix->y_chroma_shift;
  1912. for(i = 1; i <= 2; i++) {
  1913. d = dst->data[i];
  1914. for(y = 0; y< h; y++) {
  1915. memset(d, 128, w);
  1916. d += dst->linesize[i];
  1917. }
  1918. }
  1919. return 0;
  1920. }
  1921. /* YUV to gray */
  1922. if (is_yuv_planar(src_pix) &&
  1923. dst_pix_fmt == PIX_FMT_GRAY8) {
  1924. if (src_pix->color_type == FF_COLOR_YUV_JPEG) {
  1925. ff_img_copy_plane(dst->data[0], dst->linesize[0],
  1926. src->data[0], src->linesize[0],
  1927. dst_width, dst_height);
  1928. } else {
  1929. img_apply_table(dst->data[0], dst->linesize[0],
  1930. src->data[0], src->linesize[0],
  1931. dst_width, dst_height,
  1932. y_ccir_to_jpeg);
  1933. }
  1934. return 0;
  1935. }
  1936. /* YUV to YUV planar */
  1937. if (is_yuv_planar(dst_pix) && is_yuv_planar(src_pix)) {
  1938. int x_shift, y_shift, w, h, xy_shift;
  1939. void (*resize_func)(uint8_t *dst, int dst_wrap,
  1940. const uint8_t *src, int src_wrap,
  1941. int width, int height);
  1942. /* compute chroma size of the smallest dimensions */
  1943. w = dst_width;
  1944. h = dst_height;
  1945. if (dst_pix->x_chroma_shift >= src_pix->x_chroma_shift)
  1946. w >>= dst_pix->x_chroma_shift;
  1947. else
  1948. w >>= src_pix->x_chroma_shift;
  1949. if (dst_pix->y_chroma_shift >= src_pix->y_chroma_shift)
  1950. h >>= dst_pix->y_chroma_shift;
  1951. else
  1952. h >>= src_pix->y_chroma_shift;
  1953. x_shift = (dst_pix->x_chroma_shift - src_pix->x_chroma_shift);
  1954. y_shift = (dst_pix->y_chroma_shift - src_pix->y_chroma_shift);
  1955. xy_shift = ((x_shift & 0xf) << 4) | (y_shift & 0xf);
  1956. /* there must be filters for conversion at least from and to
  1957. YUV444 format */
  1958. switch(xy_shift) {
  1959. case 0x00:
  1960. resize_func = ff_img_copy_plane;
  1961. break;
  1962. case 0x10:
  1963. resize_func = shrink21;
  1964. break;
  1965. case 0x20:
  1966. resize_func = shrink41;
  1967. break;
  1968. case 0x01:
  1969. resize_func = shrink12;
  1970. break;
  1971. case 0x11:
  1972. resize_func = ff_shrink22;
  1973. break;
  1974. case 0x22:
  1975. resize_func = ff_shrink44;
  1976. break;
  1977. case 0xf0:
  1978. resize_func = grow21;
  1979. break;
  1980. case 0xe0:
  1981. resize_func = grow41;
  1982. break;
  1983. case 0xff:
  1984. resize_func = grow22;
  1985. break;
  1986. case 0xee:
  1987. resize_func = grow44;
  1988. break;
  1989. case 0xf1:
  1990. resize_func = conv411;
  1991. break;
  1992. default:
  1993. /* currently not handled */
  1994. goto no_chroma_filter;
  1995. }
  1996. ff_img_copy_plane(dst->data[0], dst->linesize[0],
  1997. src->data[0], src->linesize[0],
  1998. dst_width, dst_height);
  1999. for(i = 1;i <= 2; i++)
  2000. resize_func(dst->data[i], dst->linesize[i],
  2001. src->data[i], src->linesize[i],
  2002. dst_width>>dst_pix->x_chroma_shift, dst_height>>dst_pix->y_chroma_shift);
  2003. /* if yuv color space conversion is needed, we do it here on
  2004. the destination image */
  2005. if (dst_pix->color_type != src_pix->color_type) {
  2006. const uint8_t *y_table, *c_table;
  2007. if (dst_pix->color_type == FF_COLOR_YUV) {
  2008. y_table = y_jpeg_to_ccir;
  2009. c_table = c_jpeg_to_ccir;
  2010. } else {
  2011. y_table = y_ccir_to_jpeg;
  2012. c_table = c_ccir_to_jpeg;
  2013. }
  2014. img_apply_table(dst->data[0], dst->linesize[0],
  2015. dst->data[0], dst->linesize[0],
  2016. dst_width, dst_height,
  2017. y_table);
  2018. for(i = 1;i <= 2; i++)
  2019. img_apply_table(dst->data[i], dst->linesize[i],
  2020. dst->data[i], dst->linesize[i],
  2021. dst_width>>dst_pix->x_chroma_shift,
  2022. dst_height>>dst_pix->y_chroma_shift,
  2023. c_table);
  2024. }
  2025. return 0;
  2026. }
  2027. no_chroma_filter:
  2028. /* try to use an intermediate format */
  2029. if (src_pix_fmt == PIX_FMT_YUV422 ||
  2030. dst_pix_fmt == PIX_FMT_YUV422) {
  2031. /* specific case: convert to YUV422P first */
  2032. int_pix_fmt = PIX_FMT_YUV422P;
  2033. } else if (src_pix_fmt == PIX_FMT_UYVY422 ||
  2034. dst_pix_fmt == PIX_FMT_UYVY422) {
  2035. /* specific case: convert to YUV422P first */
  2036. int_pix_fmt = PIX_FMT_YUV422P;
  2037. } else if (src_pix_fmt == PIX_FMT_UYVY411 ||
  2038. dst_pix_fmt == PIX_FMT_UYVY411) {
  2039. /* specific case: convert to YUV411P first */
  2040. int_pix_fmt = PIX_FMT_YUV411P;
  2041. } else if ((src_pix->color_type == FF_COLOR_GRAY &&
  2042. src_pix_fmt != PIX_FMT_GRAY8) ||
  2043. (dst_pix->color_type == FF_COLOR_GRAY &&
  2044. dst_pix_fmt != PIX_FMT_GRAY8)) {
  2045. /* gray8 is the normalized format */
  2046. int_pix_fmt = PIX_FMT_GRAY8;
  2047. } else if ((is_yuv_planar(src_pix) &&
  2048. src_pix_fmt != PIX_FMT_YUV444P &&
  2049. src_pix_fmt != PIX_FMT_YUVJ444P)) {
  2050. /* yuv444 is the normalized format */
  2051. if (src_pix->color_type == FF_COLOR_YUV_JPEG)
  2052. int_pix_fmt = PIX_FMT_YUVJ444P;
  2053. else
  2054. int_pix_fmt = PIX_FMT_YUV444P;
  2055. } else if ((is_yuv_planar(dst_pix) &&
  2056. dst_pix_fmt != PIX_FMT_YUV444P &&
  2057. dst_pix_fmt != PIX_FMT_YUVJ444P)) {
  2058. /* yuv444 is the normalized format */
  2059. if (dst_pix->color_type == FF_COLOR_YUV_JPEG)
  2060. int_pix_fmt = PIX_FMT_YUVJ444P;
  2061. else
  2062. int_pix_fmt = PIX_FMT_YUV444P;
  2063. } else {
  2064. /* the two formats are rgb or gray8 or yuv[j]444p */
  2065. if (src_pix->is_alpha && dst_pix->is_alpha)
  2066. int_pix_fmt = PIX_FMT_RGBA32;
  2067. else
  2068. int_pix_fmt = PIX_FMT_RGB24;
  2069. }
  2070. if (avpicture_alloc(tmp, int_pix_fmt, dst_width, dst_height) < 0)
  2071. return -1;
  2072. ret = -1;
  2073. if (img_convert(tmp, int_pix_fmt,
  2074. src, src_pix_fmt, src_width, src_height) < 0)
  2075. goto fail1;
  2076. if (img_convert(dst, dst_pix_fmt,
  2077. tmp, int_pix_fmt, dst_width, dst_height) < 0)
  2078. goto fail1;
  2079. ret = 0;
  2080. fail1:
  2081. avpicture_free(tmp);
  2082. return ret;
  2083. }
  2084. #endif
  2085. /* NOTE: we scan all the pixels to have an exact information */
  2086. static int get_alpha_info_pal8(const AVPicture *src, int width, int height)
  2087. {
  2088. const unsigned char *p;
  2089. int src_wrap, ret, x, y;
  2090. unsigned int a;
  2091. uint32_t *palette = (uint32_t *)src->data[1];
  2092. p = src->data[0];
  2093. src_wrap = src->linesize[0] - width;
  2094. ret = 0;
  2095. for(y=0;y<height;y++) {
  2096. for(x=0;x<width;x++) {
  2097. a = palette[p[0]] >> 24;
  2098. if (a == 0x00) {
  2099. ret |= FF_ALPHA_TRANSP;
  2100. } else if (a != 0xff) {
  2101. ret |= FF_ALPHA_SEMI_TRANSP;
  2102. }
  2103. p++;
  2104. }
  2105. p += src_wrap;
  2106. }
  2107. return ret;
  2108. }
  2109. /**
  2110. * Tell if an image really has transparent alpha values.
  2111. * @return ored mask of FF_ALPHA_xxx constants
  2112. */
  2113. int img_get_alpha_info(const AVPicture *src,
  2114. int pix_fmt, int width, int height)
  2115. {
  2116. const PixFmtInfo *pf = &pix_fmt_info[pix_fmt];
  2117. int ret;
  2118. pf = &pix_fmt_info[pix_fmt];
  2119. /* no alpha can be represented in format */
  2120. if (!pf->is_alpha)
  2121. return 0;
  2122. switch(pix_fmt) {
  2123. case PIX_FMT_RGBA32:
  2124. ret = get_alpha_info_rgba32(src, width, height);
  2125. break;
  2126. case PIX_FMT_RGB555:
  2127. ret = get_alpha_info_rgb555(src, width, height);
  2128. break;
  2129. case PIX_FMT_PAL8:
  2130. ret = get_alpha_info_pal8(src, width, height);
  2131. break;
  2132. default:
  2133. /* we do not know, so everything is indicated */
  2134. ret = FF_ALPHA_TRANSP | FF_ALPHA_SEMI_TRANSP;
  2135. break;
  2136. }
  2137. return ret;
  2138. }
  2139. #ifdef HAVE_MMX
  2140. #define DEINT_INPLACE_LINE_LUM \
  2141. movd_m2r(lum_m4[0],mm0);\
  2142. movd_m2r(lum_m3[0],mm1);\
  2143. movd_m2r(lum_m2[0],mm2);\
  2144. movd_m2r(lum_m1[0],mm3);\
  2145. movd_m2r(lum[0],mm4);\
  2146. punpcklbw_r2r(mm7,mm0);\
  2147. movd_r2m(mm2,lum_m4[0]);\
  2148. punpcklbw_r2r(mm7,mm1);\
  2149. punpcklbw_r2r(mm7,mm2);\
  2150. punpcklbw_r2r(mm7,mm3);\
  2151. punpcklbw_r2r(mm7,mm4);\
  2152. paddw_r2r(mm3,mm1);\
  2153. psllw_i2r(1,mm2);\
  2154. paddw_r2r(mm4,mm0);\
  2155. psllw_i2r(2,mm1);\
  2156. paddw_r2r(mm6,mm2);\
  2157. paddw_r2r(mm2,mm1);\
  2158. psubusw_r2r(mm0,mm1);\
  2159. psrlw_i2r(3,mm1);\
  2160. packuswb_r2r(mm7,mm1);\
  2161. movd_r2m(mm1,lum_m2[0]);
  2162. #define DEINT_LINE_LUM \
  2163. movd_m2r(lum_m4[0],mm0);\
  2164. movd_m2r(lum_m3[0],mm1);\
  2165. movd_m2r(lum_m2[0],mm2);\
  2166. movd_m2r(lum_m1[0],mm3);\
  2167. movd_m2r(lum[0],mm4);\
  2168. punpcklbw_r2r(mm7,mm0);\
  2169. punpcklbw_r2r(mm7,mm1);\
  2170. punpcklbw_r2r(mm7,mm2);\
  2171. punpcklbw_r2r(mm7,mm3);\
  2172. punpcklbw_r2r(mm7,mm4);\
  2173. paddw_r2r(mm3,mm1);\
  2174. psllw_i2r(1,mm2);\
  2175. paddw_r2r(mm4,mm0);\
  2176. psllw_i2r(2,mm1);\
  2177. paddw_r2r(mm6,mm2);\
  2178. paddw_r2r(mm2,mm1);\
  2179. psubusw_r2r(mm0,mm1);\
  2180. psrlw_i2r(3,mm1);\
  2181. packuswb_r2r(mm7,mm1);\
  2182. movd_r2m(mm1,dst[0]);
  2183. #endif
  2184. /* filter parameters: [-1 4 2 4 -1] // 8 */
  2185. static void deinterlace_line(uint8_t *dst,
  2186. const uint8_t *lum_m4, const uint8_t *lum_m3,
  2187. const uint8_t *lum_m2, const uint8_t *lum_m1,
  2188. const uint8_t *lum,
  2189. int size)
  2190. {
  2191. #ifndef HAVE_MMX
  2192. uint8_t *cm = cropTbl + MAX_NEG_CROP;
  2193. int sum;
  2194. for(;size > 0;size--) {
  2195. sum = -lum_m4[0];
  2196. sum += lum_m3[0] << 2;
  2197. sum += lum_m2[0] << 1;
  2198. sum += lum_m1[0] << 2;
  2199. sum += -lum[0];
  2200. dst[0] = cm[(sum + 4) >> 3];
  2201. lum_m4++;
  2202. lum_m3++;
  2203. lum_m2++;
  2204. lum_m1++;
  2205. lum++;
  2206. dst++;
  2207. }
  2208. #else
  2209. {
  2210. mmx_t rounder;
  2211. rounder.uw[0]=4;
  2212. rounder.uw[1]=4;
  2213. rounder.uw[2]=4;
  2214. rounder.uw[3]=4;
  2215. pxor_r2r(mm7,mm7);
  2216. movq_m2r(rounder,mm6);
  2217. }
  2218. for (;size > 3; size-=4) {
  2219. DEINT_LINE_LUM
  2220. lum_m4+=4;
  2221. lum_m3+=4;
  2222. lum_m2+=4;
  2223. lum_m1+=4;
  2224. lum+=4;
  2225. dst+=4;
  2226. }
  2227. #endif
  2228. }
  2229. static void deinterlace_line_inplace(uint8_t *lum_m4, uint8_t *lum_m3, uint8_t *lum_m2, uint8_t *lum_m1, uint8_t *lum,
  2230. int size)
  2231. {
  2232. #ifndef HAVE_MMX
  2233. uint8_t *cm = cropTbl + MAX_NEG_CROP;
  2234. int sum;
  2235. for(;size > 0;size--) {
  2236. sum = -lum_m4[0];
  2237. sum += lum_m3[0] << 2;
  2238. sum += lum_m2[0] << 1;
  2239. lum_m4[0]=lum_m2[0];
  2240. sum += lum_m1[0] << 2;
  2241. sum += -lum[0];
  2242. lum_m2[0] = cm[(sum + 4) >> 3];
  2243. lum_m4++;
  2244. lum_m3++;
  2245. lum_m2++;
  2246. lum_m1++;
  2247. lum++;
  2248. }
  2249. #else
  2250. {
  2251. mmx_t rounder;
  2252. rounder.uw[0]=4;
  2253. rounder.uw[1]=4;
  2254. rounder.uw[2]=4;
  2255. rounder.uw[3]=4;
  2256. pxor_r2r(mm7,mm7);
  2257. movq_m2r(rounder,mm6);
  2258. }
  2259. for (;size > 3; size-=4) {
  2260. DEINT_INPLACE_LINE_LUM
  2261. lum_m4+=4;
  2262. lum_m3+=4;
  2263. lum_m2+=4;
  2264. lum_m1+=4;
  2265. lum+=4;
  2266. }
  2267. #endif
  2268. }
  2269. /* deinterlacing : 2 temporal taps, 3 spatial taps linear filter. The
  2270. top field is copied as is, but the bottom field is deinterlaced
  2271. against the top field. */
  2272. static void deinterlace_bottom_field(uint8_t *dst, int dst_wrap,
  2273. const uint8_t *src1, int src_wrap,
  2274. int width, int height)
  2275. {
  2276. const uint8_t *src_m2, *src_m1, *src_0, *src_p1, *src_p2;
  2277. int y;
  2278. src_m2 = src1;
  2279. src_m1 = src1;
  2280. src_0=&src_m1[src_wrap];
  2281. src_p1=&src_0[src_wrap];
  2282. src_p2=&src_p1[src_wrap];
  2283. for(y=0;y<(height-2);y+=2) {
  2284. memcpy(dst,src_m1,width);
  2285. dst += dst_wrap;
  2286. deinterlace_line(dst,src_m2,src_m1,src_0,src_p1,src_p2,width);
  2287. src_m2 = src_0;
  2288. src_m1 = src_p1;
  2289. src_0 = src_p2;
  2290. src_p1 += 2*src_wrap;
  2291. src_p2 += 2*src_wrap;
  2292. dst += dst_wrap;
  2293. }
  2294. memcpy(dst,src_m1,width);
  2295. dst += dst_wrap;
  2296. /* do last line */
  2297. deinterlace_line(dst,src_m2,src_m1,src_0,src_0,src_0,width);
  2298. }
  2299. static void deinterlace_bottom_field_inplace(uint8_t *src1, int src_wrap,
  2300. int width, int height)
  2301. {
  2302. uint8_t *src_m1, *src_0, *src_p1, *src_p2;
  2303. int y;
  2304. uint8_t *buf;
  2305. buf = (uint8_t*)av_malloc(width);
  2306. src_m1 = src1;
  2307. memcpy(buf,src_m1,width);
  2308. src_0=&src_m1[src_wrap];
  2309. src_p1=&src_0[src_wrap];
  2310. src_p2=&src_p1[src_wrap];
  2311. for(y=0;y<(height-2);y+=2) {
  2312. deinterlace_line_inplace(buf,src_m1,src_0,src_p1,src_p2,width);
  2313. src_m1 = src_p1;
  2314. src_0 = src_p2;
  2315. src_p1 += 2*src_wrap;
  2316. src_p2 += 2*src_wrap;
  2317. }
  2318. /* do last line */
  2319. deinterlace_line_inplace(buf,src_m1,src_0,src_0,src_0,width);
  2320. av_free(buf);
  2321. }
  2322. /* deinterlace - if not supported return -1 */
  2323. int avpicture_deinterlace(AVPicture *dst, const AVPicture *src,
  2324. int pix_fmt, int width, int height)
  2325. {
  2326. int i;
  2327. if (pix_fmt != PIX_FMT_YUV420P &&
  2328. pix_fmt != PIX_FMT_YUV422P &&
  2329. pix_fmt != PIX_FMT_YUV444P &&
  2330. pix_fmt != PIX_FMT_YUV411P)
  2331. return -1;
  2332. if ((width & 3) != 0 || (height & 3) != 0)
  2333. return -1;
  2334. for(i=0;i<3;i++) {
  2335. if (i == 1) {
  2336. switch(pix_fmt) {
  2337. case PIX_FMT_YUV420P:
  2338. width >>= 1;
  2339. height >>= 1;
  2340. break;
  2341. case PIX_FMT_YUV422P:
  2342. width >>= 1;
  2343. break;
  2344. case PIX_FMT_YUV411P:
  2345. width >>= 2;
  2346. break;
  2347. default:
  2348. break;
  2349. }
  2350. }
  2351. if (src == dst) {
  2352. deinterlace_bottom_field_inplace(dst->data[i], dst->linesize[i],
  2353. width, height);
  2354. } else {
  2355. deinterlace_bottom_field(dst->data[i],dst->linesize[i],
  2356. src->data[i], src->linesize[i],
  2357. width, height);
  2358. }
  2359. }
  2360. #ifdef HAVE_MMX
  2361. emms();
  2362. #endif
  2363. return 0;
  2364. }
  2365. #undef FIX