You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

685 lines
22KB

  1. /*
  2. * Misc image conversion routines
  3. * Copyright (c) 2001, 2002, 2003 Fabrice Bellard
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. /**
  22. * @file
  23. * misc image conversion routines
  24. */
  25. /* TODO:
  26. * - write 'ffimg' program to test all the image related stuff
  27. * - move all api to slice based system
  28. * - integrate deinterlacing, postprocessing and scaling in the conversion process
  29. */
  30. #include "avcodec.h"
  31. #include "dsputil.h"
  32. #include "imgconvert.h"
  33. #include "internal.h"
  34. #include "libavutil/avassert.h"
  35. #include "libavutil/colorspace.h"
  36. #include "libavutil/common.h"
  37. #include "libavutil/pixdesc.h"
  38. #include "libavutil/imgutils.h"
  39. #if HAVE_MMX_EXTERNAL
  40. #include "x86/dsputil_mmx.h"
  41. #endif
  42. #define FF_COLOR_NA -1
  43. #define FF_COLOR_RGB 0 /**< RGB color space */
  44. #define FF_COLOR_GRAY 1 /**< gray color space */
  45. #define FF_COLOR_YUV 2 /**< YUV color space. 16 <= Y <= 235, 16 <= U, V <= 240 */
  46. #define FF_COLOR_YUV_JPEG 3 /**< YUV color space. 0 <= Y <= 255, 0 <= U, V <= 255 */
  47. #if HAVE_MMX_EXTERNAL
  48. #define deinterlace_line_inplace ff_deinterlace_line_inplace_mmx
  49. #define deinterlace_line ff_deinterlace_line_mmx
  50. #else
  51. #define deinterlace_line_inplace deinterlace_line_inplace_c
  52. #define deinterlace_line deinterlace_line_c
  53. #endif
  54. #define pixdesc_has_alpha(pixdesc) \
  55. ((pixdesc)->nb_components == 2 || (pixdesc)->nb_components == 4 || (pixdesc)->flags & PIX_FMT_PAL)
  56. void avcodec_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
  57. {
  58. const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(pix_fmt);
  59. av_assert0(desc);
  60. *h_shift = desc->log2_chroma_w;
  61. *v_shift = desc->log2_chroma_h;
  62. }
  63. static int get_color_type(const AVPixFmtDescriptor *desc) {
  64. if(desc->nb_components == 1 || desc->nb_components == 2)
  65. return FF_COLOR_GRAY;
  66. if(desc->name && !strncmp(desc->name, "yuvj", 4))
  67. return FF_COLOR_YUV_JPEG;
  68. if(desc->flags & PIX_FMT_RGB)
  69. return FF_COLOR_RGB;
  70. if(desc->nb_components == 0)
  71. return FF_COLOR_NA;
  72. return FF_COLOR_YUV;
  73. }
  74. static int get_pix_fmt_depth(int *min, int *max, enum AVPixelFormat pix_fmt)
  75. {
  76. const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(pix_fmt);
  77. int i;
  78. if (!desc || !desc->nb_components) {
  79. *min = *max = 0;
  80. return AVERROR(EINVAL);
  81. }
  82. *min = INT_MAX, *max = -INT_MAX;
  83. for (i = 0; i < desc->nb_components; i++) {
  84. *min = FFMIN(desc->comp[i].depth_minus1+1, *min);
  85. *max = FFMAX(desc->comp[i].depth_minus1+1, *max);
  86. }
  87. return 0;
  88. }
  89. static int get_pix_fmt_score(enum AVPixelFormat dst_pix_fmt,
  90. enum AVPixelFormat src_pix_fmt,
  91. unsigned *lossp, unsigned consider)
  92. {
  93. const AVPixFmtDescriptor *src_desc = av_pix_fmt_desc_get(src_pix_fmt);
  94. const AVPixFmtDescriptor *dst_desc = av_pix_fmt_desc_get(dst_pix_fmt);
  95. int src_color, dst_color;
  96. int src_min_depth, src_max_depth, dst_min_depth, dst_max_depth;
  97. int ret, loss, i, nb_components;
  98. int score = INT_MAX;
  99. if (dst_pix_fmt >= AV_PIX_FMT_NB || dst_pix_fmt <= AV_PIX_FMT_NONE)
  100. return ~0;
  101. /* compute loss */
  102. *lossp = loss = 0;
  103. if (dst_pix_fmt == src_pix_fmt)
  104. return INT_MAX;
  105. if ((ret = get_pix_fmt_depth(&src_min_depth, &src_max_depth, src_pix_fmt)) < 0)
  106. return ret;
  107. if ((ret = get_pix_fmt_depth(&dst_min_depth, &dst_max_depth, dst_pix_fmt)) < 0)
  108. return ret;
  109. src_color = get_color_type(src_desc);
  110. dst_color = get_color_type(dst_desc);
  111. nb_components = FFMIN(src_desc->nb_components, dst_desc->nb_components);
  112. for (i = 0; i < nb_components; i++)
  113. if (src_desc->comp[i].depth_minus1 > dst_desc->comp[i].depth_minus1 && (consider & FF_LOSS_DEPTH)) {
  114. loss |= FF_LOSS_DEPTH;
  115. score -= 65536 >> dst_desc->comp[i].depth_minus1;
  116. }
  117. if (consider & FF_LOSS_RESOLUTION) {
  118. if (dst_desc->log2_chroma_w > src_desc->log2_chroma_w) {
  119. loss |= FF_LOSS_RESOLUTION;
  120. score -= 256 << dst_desc->log2_chroma_w;
  121. }
  122. if (dst_desc->log2_chroma_h > src_desc->log2_chroma_h) {
  123. loss |= FF_LOSS_RESOLUTION;
  124. score -= 256 << dst_desc->log2_chroma_h;
  125. }
  126. // dont favor 422 over 420 if downsampling is needed, because 420 has much better support on the decoder side
  127. if (dst_desc->log2_chroma_w == 1 && src_desc->log2_chroma_w == 0 &&
  128. dst_desc->log2_chroma_h == 1 && src_desc->log2_chroma_h == 0 ) {
  129. score += 512;
  130. }
  131. }
  132. if(consider & FF_LOSS_COLORSPACE)
  133. switch(dst_color) {
  134. case FF_COLOR_RGB:
  135. if (src_color != FF_COLOR_RGB &&
  136. src_color != FF_COLOR_GRAY)
  137. loss |= FF_LOSS_COLORSPACE;
  138. break;
  139. case FF_COLOR_GRAY:
  140. if (src_color != FF_COLOR_GRAY)
  141. loss |= FF_LOSS_COLORSPACE;
  142. break;
  143. case FF_COLOR_YUV:
  144. if (src_color != FF_COLOR_YUV)
  145. loss |= FF_LOSS_COLORSPACE;
  146. break;
  147. case FF_COLOR_YUV_JPEG:
  148. if (src_color != FF_COLOR_YUV_JPEG &&
  149. src_color != FF_COLOR_YUV &&
  150. src_color != FF_COLOR_GRAY)
  151. loss |= FF_LOSS_COLORSPACE;
  152. break;
  153. default:
  154. /* fail safe test */
  155. if (src_color != dst_color)
  156. loss |= FF_LOSS_COLORSPACE;
  157. break;
  158. }
  159. if(loss & FF_LOSS_COLORSPACE)
  160. score -= (nb_components * 65536) >> FFMIN(dst_desc->comp[0].depth_minus1, src_desc->comp[0].depth_minus1);
  161. if (dst_color == FF_COLOR_GRAY &&
  162. src_color != FF_COLOR_GRAY && (consider & FF_LOSS_CHROMA)) {
  163. loss |= FF_LOSS_CHROMA;
  164. score -= 2 * 65536;
  165. }
  166. if (!pixdesc_has_alpha(dst_desc) && (pixdesc_has_alpha(src_desc) && (consider & FF_LOSS_ALPHA))) {
  167. loss |= FF_LOSS_ALPHA;
  168. score -= 65536;
  169. }
  170. if (dst_pix_fmt == AV_PIX_FMT_PAL8 && (consider & FF_LOSS_COLORQUANT) &&
  171. (src_pix_fmt != AV_PIX_FMT_PAL8 && (src_color != FF_COLOR_GRAY || (pixdesc_has_alpha(src_desc) && (consider & FF_LOSS_ALPHA))))) {
  172. loss |= FF_LOSS_COLORQUANT;
  173. score -= 65536;
  174. }
  175. *lossp = loss;
  176. return score;
  177. }
  178. int avcodec_get_pix_fmt_loss(enum AVPixelFormat dst_pix_fmt,
  179. enum AVPixelFormat src_pix_fmt,
  180. int has_alpha)
  181. {
  182. int loss;
  183. int ret = get_pix_fmt_score(dst_pix_fmt, src_pix_fmt, &loss, has_alpha ? ~0 : ~FF_LOSS_ALPHA);
  184. if (ret < 0)
  185. return ret;
  186. return loss;
  187. }
  188. #if FF_API_FIND_BEST_PIX_FMT
  189. enum AVPixelFormat avcodec_find_best_pix_fmt(int64_t pix_fmt_mask, enum AVPixelFormat src_pix_fmt,
  190. int has_alpha, int *loss_ptr)
  191. {
  192. enum AVPixelFormat dst_pix_fmt;
  193. int i;
  194. if (loss_ptr) /* all losses count (for backward compatibility) */
  195. *loss_ptr = 0;
  196. dst_pix_fmt = AV_PIX_FMT_NONE; /* so first iteration doesn't have to be treated special */
  197. for(i = 0; i< FFMIN(AV_PIX_FMT_NB, 64); i++){
  198. if (pix_fmt_mask & (1ULL << i))
  199. dst_pix_fmt = avcodec_find_best_pix_fmt_of_2(dst_pix_fmt, i, src_pix_fmt, has_alpha, loss_ptr);
  200. }
  201. return dst_pix_fmt;
  202. }
  203. #endif /* FF_API_FIND_BEST_PIX_FMT */
  204. enum AVPixelFormat avcodec_find_best_pix_fmt_of_2(enum AVPixelFormat dst_pix_fmt1, enum AVPixelFormat dst_pix_fmt2,
  205. enum AVPixelFormat src_pix_fmt, int has_alpha, int *loss_ptr)
  206. {
  207. enum AVPixelFormat dst_pix_fmt;
  208. int loss1, loss2, loss_mask;
  209. const AVPixFmtDescriptor *desc1 = av_pix_fmt_desc_get(dst_pix_fmt1);
  210. const AVPixFmtDescriptor *desc2 = av_pix_fmt_desc_get(dst_pix_fmt2);
  211. int score1, score2;
  212. loss_mask= loss_ptr?~*loss_ptr:~0; /* use loss mask if provided */
  213. if(!has_alpha)
  214. loss_mask &= ~FF_LOSS_ALPHA;
  215. dst_pix_fmt = AV_PIX_FMT_NONE;
  216. score1 = get_pix_fmt_score(dst_pix_fmt1, src_pix_fmt, &loss1, loss_mask);
  217. score2 = get_pix_fmt_score(dst_pix_fmt2, src_pix_fmt, &loss2, loss_mask);
  218. if (score1 == score2) {
  219. if(av_get_padded_bits_per_pixel(desc2) != av_get_padded_bits_per_pixel(desc1)) {
  220. dst_pix_fmt = av_get_padded_bits_per_pixel(desc2) < av_get_padded_bits_per_pixel(desc1) ? dst_pix_fmt2 : dst_pix_fmt1;
  221. } else {
  222. dst_pix_fmt = desc2->nb_components < desc1->nb_components ? dst_pix_fmt2 : dst_pix_fmt1;
  223. }
  224. } else {
  225. dst_pix_fmt = score1 < score2 ? dst_pix_fmt2 : dst_pix_fmt1;
  226. }
  227. if (loss_ptr)
  228. *loss_ptr = avcodec_get_pix_fmt_loss(dst_pix_fmt, src_pix_fmt, has_alpha);
  229. return dst_pix_fmt;
  230. }
  231. #if AV_HAVE_INCOMPATIBLE_FORK_ABI
  232. enum AVPixelFormat avcodec_find_best_pix_fmt2(enum AVPixelFormat *pix_fmt_list,
  233. enum AVPixelFormat src_pix_fmt,
  234. int has_alpha, int *loss_ptr){
  235. return avcodec_find_best_pix_fmt_of_list(pix_fmt_list, src_pix_fmt, has_alpha, loss_ptr);
  236. }
  237. #else
  238. enum AVPixelFormat avcodec_find_best_pix_fmt2(enum AVPixelFormat dst_pix_fmt1, enum AVPixelFormat dst_pix_fmt2,
  239. enum AVPixelFormat src_pix_fmt, int has_alpha, int *loss_ptr)
  240. {
  241. return avcodec_find_best_pix_fmt_of_2(dst_pix_fmt1, dst_pix_fmt2, src_pix_fmt, has_alpha, loss_ptr);
  242. }
  243. #endif
  244. enum AVPixelFormat avcodec_find_best_pix_fmt_of_list(enum AVPixelFormat *pix_fmt_list,
  245. enum AVPixelFormat src_pix_fmt,
  246. int has_alpha, int *loss_ptr){
  247. int i;
  248. enum AVPixelFormat best = AV_PIX_FMT_NONE;
  249. for(i=0; pix_fmt_list[i] != AV_PIX_FMT_NONE; i++)
  250. best = avcodec_find_best_pix_fmt_of_2(best, pix_fmt_list[i], src_pix_fmt, has_alpha, loss_ptr);
  251. return best;
  252. }
  253. /* 2x2 -> 1x1 */
  254. void ff_shrink22(uint8_t *dst, int dst_wrap,
  255. const uint8_t *src, int src_wrap,
  256. int width, int height)
  257. {
  258. int w;
  259. const uint8_t *s1, *s2;
  260. uint8_t *d;
  261. for(;height > 0; height--) {
  262. s1 = src;
  263. s2 = s1 + src_wrap;
  264. d = dst;
  265. for(w = width;w >= 4; w-=4) {
  266. d[0] = (s1[0] + s1[1] + s2[0] + s2[1] + 2) >> 2;
  267. d[1] = (s1[2] + s1[3] + s2[2] + s2[3] + 2) >> 2;
  268. d[2] = (s1[4] + s1[5] + s2[4] + s2[5] + 2) >> 2;
  269. d[3] = (s1[6] + s1[7] + s2[6] + s2[7] + 2) >> 2;
  270. s1 += 8;
  271. s2 += 8;
  272. d += 4;
  273. }
  274. for(;w > 0; w--) {
  275. d[0] = (s1[0] + s1[1] + s2[0] + s2[1] + 2) >> 2;
  276. s1 += 2;
  277. s2 += 2;
  278. d++;
  279. }
  280. src += 2 * src_wrap;
  281. dst += dst_wrap;
  282. }
  283. }
  284. /* 4x4 -> 1x1 */
  285. void ff_shrink44(uint8_t *dst, int dst_wrap,
  286. const uint8_t *src, int src_wrap,
  287. int width, int height)
  288. {
  289. int w;
  290. const uint8_t *s1, *s2, *s3, *s4;
  291. uint8_t *d;
  292. for(;height > 0; height--) {
  293. s1 = src;
  294. s2 = s1 + src_wrap;
  295. s3 = s2 + src_wrap;
  296. s4 = s3 + src_wrap;
  297. d = dst;
  298. for(w = width;w > 0; w--) {
  299. d[0] = (s1[0] + s1[1] + s1[2] + s1[3] +
  300. s2[0] + s2[1] + s2[2] + s2[3] +
  301. s3[0] + s3[1] + s3[2] + s3[3] +
  302. s4[0] + s4[1] + s4[2] + s4[3] + 8) >> 4;
  303. s1 += 4;
  304. s2 += 4;
  305. s3 += 4;
  306. s4 += 4;
  307. d++;
  308. }
  309. src += 4 * src_wrap;
  310. dst += dst_wrap;
  311. }
  312. }
  313. /* 8x8 -> 1x1 */
  314. void ff_shrink88(uint8_t *dst, int dst_wrap,
  315. const uint8_t *src, int src_wrap,
  316. int width, int height)
  317. {
  318. int w, i;
  319. for(;height > 0; height--) {
  320. for(w = width;w > 0; w--) {
  321. int tmp=0;
  322. for(i=0; i<8; i++){
  323. tmp += src[0] + src[1] + src[2] + src[3] + src[4] + src[5] + src[6] + src[7];
  324. src += src_wrap;
  325. }
  326. *(dst++) = (tmp + 32)>>6;
  327. src += 8 - 8*src_wrap;
  328. }
  329. src += 8*src_wrap - 8*width;
  330. dst += dst_wrap - width;
  331. }
  332. }
  333. /* return true if yuv planar */
  334. static inline int is_yuv_planar(const AVPixFmtDescriptor *desc)
  335. {
  336. int i;
  337. int planes[4] = { 0 };
  338. if ( desc->flags & PIX_FMT_RGB
  339. || !(desc->flags & PIX_FMT_PLANAR))
  340. return 0;
  341. /* set the used planes */
  342. for (i = 0; i < desc->nb_components; i++)
  343. planes[desc->comp[i].plane] = 1;
  344. /* if there is an unused plane, the format is not planar */
  345. for (i = 0; i < desc->nb_components; i++)
  346. if (!planes[i])
  347. return 0;
  348. return 1;
  349. }
  350. int av_picture_crop(AVPicture *dst, const AVPicture *src,
  351. enum AVPixelFormat pix_fmt, int top_band, int left_band)
  352. {
  353. const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(pix_fmt);
  354. int y_shift;
  355. int x_shift;
  356. if (pix_fmt < 0 || pix_fmt >= AV_PIX_FMT_NB)
  357. return -1;
  358. y_shift = desc->log2_chroma_h;
  359. x_shift = desc->log2_chroma_w;
  360. if (is_yuv_planar(desc)) {
  361. dst->data[0] = src->data[0] + (top_band * src->linesize[0]) + left_band;
  362. dst->data[1] = src->data[1] + ((top_band >> y_shift) * src->linesize[1]) + (left_band >> x_shift);
  363. dst->data[2] = src->data[2] + ((top_band >> y_shift) * src->linesize[2]) + (left_band >> x_shift);
  364. } else{
  365. if(top_band % (1<<y_shift) || left_band % (1<<x_shift))
  366. return -1;
  367. if(left_band) //FIXME add support for this too
  368. return -1;
  369. dst->data[0] = src->data[0] + (top_band * src->linesize[0]) + left_band;
  370. }
  371. dst->linesize[0] = src->linesize[0];
  372. dst->linesize[1] = src->linesize[1];
  373. dst->linesize[2] = src->linesize[2];
  374. return 0;
  375. }
  376. int av_picture_pad(AVPicture *dst, const AVPicture *src, int height, int width,
  377. enum AVPixelFormat pix_fmt, int padtop, int padbottom, int padleft, int padright,
  378. int *color)
  379. {
  380. const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(pix_fmt);
  381. uint8_t *optr;
  382. int y_shift;
  383. int x_shift;
  384. int yheight;
  385. int i, y;
  386. if (pix_fmt < 0 || pix_fmt >= AV_PIX_FMT_NB ||
  387. !is_yuv_planar(desc)) return -1;
  388. for (i = 0; i < 3; i++) {
  389. x_shift = i ? desc->log2_chroma_w : 0;
  390. y_shift = i ? desc->log2_chroma_h : 0;
  391. if (padtop || padleft) {
  392. memset(dst->data[i], color[i],
  393. dst->linesize[i] * (padtop >> y_shift) + (padleft >> x_shift));
  394. }
  395. if (padleft || padright) {
  396. optr = dst->data[i] + dst->linesize[i] * (padtop >> y_shift) +
  397. (dst->linesize[i] - (padright >> x_shift));
  398. yheight = (height - 1 - (padtop + padbottom)) >> y_shift;
  399. for (y = 0; y < yheight; y++) {
  400. memset(optr, color[i], (padleft + padright) >> x_shift);
  401. optr += dst->linesize[i];
  402. }
  403. }
  404. if (src) { /* first line */
  405. uint8_t *iptr = src->data[i];
  406. optr = dst->data[i] + dst->linesize[i] * (padtop >> y_shift) +
  407. (padleft >> x_shift);
  408. memcpy(optr, iptr, (width - padleft - padright) >> x_shift);
  409. iptr += src->linesize[i];
  410. optr = dst->data[i] + dst->linesize[i] * (padtop >> y_shift) +
  411. (dst->linesize[i] - (padright >> x_shift));
  412. yheight = (height - 1 - (padtop + padbottom)) >> y_shift;
  413. for (y = 0; y < yheight; y++) {
  414. memset(optr, color[i], (padleft + padright) >> x_shift);
  415. memcpy(optr + ((padleft + padright) >> x_shift), iptr,
  416. (width - padleft - padright) >> x_shift);
  417. iptr += src->linesize[i];
  418. optr += dst->linesize[i];
  419. }
  420. }
  421. if (padbottom || padright) {
  422. optr = dst->data[i] + dst->linesize[i] *
  423. ((height - padbottom) >> y_shift) - (padright >> x_shift);
  424. memset(optr, color[i],dst->linesize[i] *
  425. (padbottom >> y_shift) + (padright >> x_shift));
  426. }
  427. }
  428. return 0;
  429. }
  430. #if FF_API_DEINTERLACE
  431. #if !HAVE_MMX_EXTERNAL
  432. /* filter parameters: [-1 4 2 4 -1] // 8 */
  433. static void deinterlace_line_c(uint8_t *dst,
  434. const uint8_t *lum_m4, const uint8_t *lum_m3,
  435. const uint8_t *lum_m2, const uint8_t *lum_m1,
  436. const uint8_t *lum,
  437. int size)
  438. {
  439. uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
  440. int sum;
  441. for(;size > 0;size--) {
  442. sum = -lum_m4[0];
  443. sum += lum_m3[0] << 2;
  444. sum += lum_m2[0] << 1;
  445. sum += lum_m1[0] << 2;
  446. sum += -lum[0];
  447. dst[0] = cm[(sum + 4) >> 3];
  448. lum_m4++;
  449. lum_m3++;
  450. lum_m2++;
  451. lum_m1++;
  452. lum++;
  453. dst++;
  454. }
  455. }
  456. static void deinterlace_line_inplace_c(uint8_t *lum_m4, uint8_t *lum_m3,
  457. uint8_t *lum_m2, uint8_t *lum_m1,
  458. uint8_t *lum, int size)
  459. {
  460. uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
  461. int sum;
  462. for(;size > 0;size--) {
  463. sum = -lum_m4[0];
  464. sum += lum_m3[0] << 2;
  465. sum += lum_m2[0] << 1;
  466. lum_m4[0]=lum_m2[0];
  467. sum += lum_m1[0] << 2;
  468. sum += -lum[0];
  469. lum_m2[0] = cm[(sum + 4) >> 3];
  470. lum_m4++;
  471. lum_m3++;
  472. lum_m2++;
  473. lum_m1++;
  474. lum++;
  475. }
  476. }
  477. #endif /* !HAVE_MMX_EXTERNAL */
  478. /* deinterlacing : 2 temporal taps, 3 spatial taps linear filter. The
  479. top field is copied as is, but the bottom field is deinterlaced
  480. against the top field. */
  481. static void deinterlace_bottom_field(uint8_t *dst, int dst_wrap,
  482. const uint8_t *src1, int src_wrap,
  483. int width, int height)
  484. {
  485. const uint8_t *src_m2, *src_m1, *src_0, *src_p1, *src_p2;
  486. int y;
  487. src_m2 = src1;
  488. src_m1 = src1;
  489. src_0=&src_m1[src_wrap];
  490. src_p1=&src_0[src_wrap];
  491. src_p2=&src_p1[src_wrap];
  492. for(y=0;y<(height-2);y+=2) {
  493. memcpy(dst,src_m1,width);
  494. dst += dst_wrap;
  495. deinterlace_line(dst,src_m2,src_m1,src_0,src_p1,src_p2,width);
  496. src_m2 = src_0;
  497. src_m1 = src_p1;
  498. src_0 = src_p2;
  499. src_p1 += 2*src_wrap;
  500. src_p2 += 2*src_wrap;
  501. dst += dst_wrap;
  502. }
  503. memcpy(dst,src_m1,width);
  504. dst += dst_wrap;
  505. /* do last line */
  506. deinterlace_line(dst,src_m2,src_m1,src_0,src_0,src_0,width);
  507. }
  508. static void deinterlace_bottom_field_inplace(uint8_t *src1, int src_wrap,
  509. int width, int height)
  510. {
  511. uint8_t *src_m1, *src_0, *src_p1, *src_p2;
  512. int y;
  513. uint8_t *buf;
  514. buf = av_malloc(width);
  515. src_m1 = src1;
  516. memcpy(buf,src_m1,width);
  517. src_0=&src_m1[src_wrap];
  518. src_p1=&src_0[src_wrap];
  519. src_p2=&src_p1[src_wrap];
  520. for(y=0;y<(height-2);y+=2) {
  521. deinterlace_line_inplace(buf,src_m1,src_0,src_p1,src_p2,width);
  522. src_m1 = src_p1;
  523. src_0 = src_p2;
  524. src_p1 += 2*src_wrap;
  525. src_p2 += 2*src_wrap;
  526. }
  527. /* do last line */
  528. deinterlace_line_inplace(buf,src_m1,src_0,src_0,src_0,width);
  529. av_free(buf);
  530. }
  531. int avpicture_deinterlace(AVPicture *dst, const AVPicture *src,
  532. enum AVPixelFormat pix_fmt, int width, int height)
  533. {
  534. int i;
  535. if (pix_fmt != AV_PIX_FMT_YUV420P &&
  536. pix_fmt != AV_PIX_FMT_YUVJ420P &&
  537. pix_fmt != AV_PIX_FMT_YUV422P &&
  538. pix_fmt != AV_PIX_FMT_YUVJ422P &&
  539. pix_fmt != AV_PIX_FMT_YUV444P &&
  540. pix_fmt != AV_PIX_FMT_YUV411P &&
  541. pix_fmt != AV_PIX_FMT_GRAY8)
  542. return -1;
  543. if ((width & 3) != 0 || (height & 3) != 0)
  544. return -1;
  545. for(i=0;i<3;i++) {
  546. if (i == 1) {
  547. switch(pix_fmt) {
  548. case AV_PIX_FMT_YUVJ420P:
  549. case AV_PIX_FMT_YUV420P:
  550. width >>= 1;
  551. height >>= 1;
  552. break;
  553. case AV_PIX_FMT_YUV422P:
  554. case AV_PIX_FMT_YUVJ422P:
  555. width >>= 1;
  556. break;
  557. case AV_PIX_FMT_YUV411P:
  558. width >>= 2;
  559. break;
  560. default:
  561. break;
  562. }
  563. if (pix_fmt == AV_PIX_FMT_GRAY8) {
  564. break;
  565. }
  566. }
  567. if (src == dst) {
  568. deinterlace_bottom_field_inplace(dst->data[i], dst->linesize[i],
  569. width, height);
  570. } else {
  571. deinterlace_bottom_field(dst->data[i],dst->linesize[i],
  572. src->data[i], src->linesize[i],
  573. width, height);
  574. }
  575. }
  576. emms_c();
  577. return 0;
  578. }
  579. #endif /* FF_API_DEINTERLACE */
  580. #ifdef TEST
  581. int main(void){
  582. int i;
  583. int err=0;
  584. int skip = 0;
  585. for (i=0; i<AV_PIX_FMT_NB*2; i++) {
  586. AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(i);
  587. if(!desc || !desc->name) {
  588. skip ++;
  589. continue;
  590. }
  591. if (skip) {
  592. av_log(NULL, AV_LOG_INFO, "%3d unused pixel format values\n", skip);
  593. skip = 0;
  594. }
  595. av_log(NULL, AV_LOG_INFO, "pix fmt %s yuv_plan:%d avg_bpp:%d colortype:%d\n", desc->name, is_yuv_planar(desc), av_get_padded_bits_per_pixel(desc), get_color_type(desc));
  596. if ((!(desc->flags & PIX_FMT_ALPHA)) != (desc->nb_components != 2 && desc->nb_components != 4)) {
  597. av_log(NULL, AV_LOG_ERROR, "Alpha flag mismatch\n");
  598. err = 1;
  599. }
  600. }
  601. return err;
  602. }
  603. #endif