You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

2348 lines
86KB

  1. /*
  2. * The simplest mpeg encoder (well, it was the simplest!)
  3. * Copyright (c) 2000,2001 Fabrice Bellard
  4. * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
  5. *
  6. * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
  7. *
  8. * This file is part of FFmpeg.
  9. *
  10. * FFmpeg is free software; you can redistribute it and/or
  11. * modify it under the terms of the GNU Lesser General Public
  12. * License as published by the Free Software Foundation; either
  13. * version 2.1 of the License, or (at your option) any later version.
  14. *
  15. * FFmpeg is distributed in the hope that it will be useful,
  16. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  17. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  18. * Lesser General Public License for more details.
  19. *
  20. * You should have received a copy of the GNU Lesser General Public
  21. * License along with FFmpeg; if not, write to the Free Software
  22. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  23. */
  24. /**
  25. * @file
  26. * The simplest mpeg encoder (well, it was the simplest!).
  27. */
  28. #include "libavutil/attributes.h"
  29. #include "libavutil/avassert.h"
  30. #include "libavutil/imgutils.h"
  31. #include "libavutil/internal.h"
  32. #include "libavutil/motion_vector.h"
  33. #include "libavutil/video_enc_params.h"
  34. #include "avcodec.h"
  35. #include "blockdsp.h"
  36. #include "h264chroma.h"
  37. #include "idctdsp.h"
  38. #include "internal.h"
  39. #include "mathops.h"
  40. #include "mpeg_er.h"
  41. #include "mpegutils.h"
  42. #include "mpegvideo.h"
  43. #include "mpegvideodata.h"
  44. #include "mjpegenc.h"
  45. #include "msmpeg4.h"
  46. #include "qpeldsp.h"
  47. #include "thread.h"
  48. #include "wmv2.h"
  49. #include <limits.h>
  50. static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
  51. int16_t *block, int n, int qscale)
  52. {
  53. int i, level, nCoeffs;
  54. const uint16_t *quant_matrix;
  55. nCoeffs= s->block_last_index[n];
  56. block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
  57. /* XXX: only MPEG-1 */
  58. quant_matrix = s->intra_matrix;
  59. for(i=1;i<=nCoeffs;i++) {
  60. int j= s->intra_scantable.permutated[i];
  61. level = block[j];
  62. if (level) {
  63. if (level < 0) {
  64. level = -level;
  65. level = (int)(level * qscale * quant_matrix[j]) >> 3;
  66. level = (level - 1) | 1;
  67. level = -level;
  68. } else {
  69. level = (int)(level * qscale * quant_matrix[j]) >> 3;
  70. level = (level - 1) | 1;
  71. }
  72. block[j] = level;
  73. }
  74. }
  75. }
  76. static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
  77. int16_t *block, int n, int qscale)
  78. {
  79. int i, level, nCoeffs;
  80. const uint16_t *quant_matrix;
  81. nCoeffs= s->block_last_index[n];
  82. quant_matrix = s->inter_matrix;
  83. for(i=0; i<=nCoeffs; i++) {
  84. int j= s->intra_scantable.permutated[i];
  85. level = block[j];
  86. if (level) {
  87. if (level < 0) {
  88. level = -level;
  89. level = (((level << 1) + 1) * qscale *
  90. ((int) (quant_matrix[j]))) >> 4;
  91. level = (level - 1) | 1;
  92. level = -level;
  93. } else {
  94. level = (((level << 1) + 1) * qscale *
  95. ((int) (quant_matrix[j]))) >> 4;
  96. level = (level - 1) | 1;
  97. }
  98. block[j] = level;
  99. }
  100. }
  101. }
  102. static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
  103. int16_t *block, int n, int qscale)
  104. {
  105. int i, level, nCoeffs;
  106. const uint16_t *quant_matrix;
  107. if (s->q_scale_type) qscale = ff_mpeg2_non_linear_qscale[qscale];
  108. else qscale <<= 1;
  109. if(s->alternate_scan) nCoeffs= 63;
  110. else nCoeffs= s->block_last_index[n];
  111. block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
  112. quant_matrix = s->intra_matrix;
  113. for(i=1;i<=nCoeffs;i++) {
  114. int j= s->intra_scantable.permutated[i];
  115. level = block[j];
  116. if (level) {
  117. if (level < 0) {
  118. level = -level;
  119. level = (int)(level * qscale * quant_matrix[j]) >> 4;
  120. level = -level;
  121. } else {
  122. level = (int)(level * qscale * quant_matrix[j]) >> 4;
  123. }
  124. block[j] = level;
  125. }
  126. }
  127. }
  128. static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
  129. int16_t *block, int n, int qscale)
  130. {
  131. int i, level, nCoeffs;
  132. const uint16_t *quant_matrix;
  133. int sum=-1;
  134. if (s->q_scale_type) qscale = ff_mpeg2_non_linear_qscale[qscale];
  135. else qscale <<= 1;
  136. if(s->alternate_scan) nCoeffs= 63;
  137. else nCoeffs= s->block_last_index[n];
  138. block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
  139. sum += block[0];
  140. quant_matrix = s->intra_matrix;
  141. for(i=1;i<=nCoeffs;i++) {
  142. int j= s->intra_scantable.permutated[i];
  143. level = block[j];
  144. if (level) {
  145. if (level < 0) {
  146. level = -level;
  147. level = (int)(level * qscale * quant_matrix[j]) >> 4;
  148. level = -level;
  149. } else {
  150. level = (int)(level * qscale * quant_matrix[j]) >> 4;
  151. }
  152. block[j] = level;
  153. sum+=level;
  154. }
  155. }
  156. block[63]^=sum&1;
  157. }
  158. static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
  159. int16_t *block, int n, int qscale)
  160. {
  161. int i, level, nCoeffs;
  162. const uint16_t *quant_matrix;
  163. int sum=-1;
  164. if (s->q_scale_type) qscale = ff_mpeg2_non_linear_qscale[qscale];
  165. else qscale <<= 1;
  166. if(s->alternate_scan) nCoeffs= 63;
  167. else nCoeffs= s->block_last_index[n];
  168. quant_matrix = s->inter_matrix;
  169. for(i=0; i<=nCoeffs; i++) {
  170. int j= s->intra_scantable.permutated[i];
  171. level = block[j];
  172. if (level) {
  173. if (level < 0) {
  174. level = -level;
  175. level = (((level << 1) + 1) * qscale *
  176. ((int) (quant_matrix[j]))) >> 5;
  177. level = -level;
  178. } else {
  179. level = (((level << 1) + 1) * qscale *
  180. ((int) (quant_matrix[j]))) >> 5;
  181. }
  182. block[j] = level;
  183. sum+=level;
  184. }
  185. }
  186. block[63]^=sum&1;
  187. }
  188. static void dct_unquantize_h263_intra_c(MpegEncContext *s,
  189. int16_t *block, int n, int qscale)
  190. {
  191. int i, level, qmul, qadd;
  192. int nCoeffs;
  193. av_assert2(s->block_last_index[n]>=0 || s->h263_aic);
  194. qmul = qscale << 1;
  195. if (!s->h263_aic) {
  196. block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
  197. qadd = (qscale - 1) | 1;
  198. }else{
  199. qadd = 0;
  200. }
  201. if(s->ac_pred)
  202. nCoeffs=63;
  203. else
  204. nCoeffs= s->intra_scantable.raster_end[ s->block_last_index[n] ];
  205. for(i=1; i<=nCoeffs; i++) {
  206. level = block[i];
  207. if (level) {
  208. if (level < 0) {
  209. level = level * qmul - qadd;
  210. } else {
  211. level = level * qmul + qadd;
  212. }
  213. block[i] = level;
  214. }
  215. }
  216. }
  217. static void dct_unquantize_h263_inter_c(MpegEncContext *s,
  218. int16_t *block, int n, int qscale)
  219. {
  220. int i, level, qmul, qadd;
  221. int nCoeffs;
  222. av_assert2(s->block_last_index[n]>=0);
  223. qadd = (qscale - 1) | 1;
  224. qmul = qscale << 1;
  225. nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
  226. for(i=0; i<=nCoeffs; i++) {
  227. level = block[i];
  228. if (level) {
  229. if (level < 0) {
  230. level = level * qmul - qadd;
  231. } else {
  232. level = level * qmul + qadd;
  233. }
  234. block[i] = level;
  235. }
  236. }
  237. }
  238. static void gray16(uint8_t *dst, const uint8_t *src, ptrdiff_t linesize, int h)
  239. {
  240. while(h--)
  241. memset(dst + h*linesize, 128, 16);
  242. }
  243. static void gray8(uint8_t *dst, const uint8_t *src, ptrdiff_t linesize, int h)
  244. {
  245. while(h--)
  246. memset(dst + h*linesize, 128, 8);
  247. }
  248. /* init common dct for both encoder and decoder */
  249. static av_cold int dct_init(MpegEncContext *s)
  250. {
  251. ff_blockdsp_init(&s->bdsp, s->avctx);
  252. ff_h264chroma_init(&s->h264chroma, 8); //for lowres
  253. ff_hpeldsp_init(&s->hdsp, s->avctx->flags);
  254. ff_mpegvideodsp_init(&s->mdsp);
  255. ff_videodsp_init(&s->vdsp, s->avctx->bits_per_raw_sample);
  256. if (s->avctx->debug & FF_DEBUG_NOMC) {
  257. int i;
  258. for (i=0; i<4; i++) {
  259. s->hdsp.avg_pixels_tab[0][i] = gray16;
  260. s->hdsp.put_pixels_tab[0][i] = gray16;
  261. s->hdsp.put_no_rnd_pixels_tab[0][i] = gray16;
  262. s->hdsp.avg_pixels_tab[1][i] = gray8;
  263. s->hdsp.put_pixels_tab[1][i] = gray8;
  264. s->hdsp.put_no_rnd_pixels_tab[1][i] = gray8;
  265. }
  266. }
  267. s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_c;
  268. s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_c;
  269. s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_c;
  270. s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_c;
  271. s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_c;
  272. if (s->avctx->flags & AV_CODEC_FLAG_BITEXACT)
  273. s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_bitexact;
  274. s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c;
  275. if (HAVE_INTRINSICS_NEON)
  276. ff_mpv_common_init_neon(s);
  277. if (ARCH_ALPHA)
  278. ff_mpv_common_init_axp(s);
  279. if (ARCH_ARM)
  280. ff_mpv_common_init_arm(s);
  281. if (ARCH_PPC)
  282. ff_mpv_common_init_ppc(s);
  283. if (ARCH_X86)
  284. ff_mpv_common_init_x86(s);
  285. if (ARCH_MIPS)
  286. ff_mpv_common_init_mips(s);
  287. return 0;
  288. }
  289. av_cold void ff_mpv_idct_init(MpegEncContext *s)
  290. {
  291. if (s->codec_id == AV_CODEC_ID_MPEG4)
  292. s->idsp.mpeg4_studio_profile = s->studio_profile;
  293. ff_idctdsp_init(&s->idsp, s->avctx);
  294. /* load & permutate scantables
  295. * note: only wmv uses different ones
  296. */
  297. if (s->alternate_scan) {
  298. ff_init_scantable(s->idsp.idct_permutation, &s->inter_scantable, ff_alternate_vertical_scan);
  299. ff_init_scantable(s->idsp.idct_permutation, &s->intra_scantable, ff_alternate_vertical_scan);
  300. } else {
  301. ff_init_scantable(s->idsp.idct_permutation, &s->inter_scantable, ff_zigzag_direct);
  302. ff_init_scantable(s->idsp.idct_permutation, &s->intra_scantable, ff_zigzag_direct);
  303. }
  304. ff_init_scantable(s->idsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan);
  305. ff_init_scantable(s->idsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
  306. }
  307. static int alloc_picture(MpegEncContext *s, Picture *pic)
  308. {
  309. return ff_alloc_picture(s->avctx, pic, &s->me, &s->sc, 0, 0,
  310. s->chroma_x_shift, s->chroma_y_shift, s->out_format,
  311. s->mb_stride, s->mb_width, s->mb_height, s->b8_stride,
  312. &s->linesize, &s->uvlinesize);
  313. }
  314. static int init_duplicate_context(MpegEncContext *s)
  315. {
  316. int y_size = s->b8_stride * (2 * s->mb_height + 1);
  317. int c_size = s->mb_stride * (s->mb_height + 1);
  318. int yc_size = y_size + 2 * c_size;
  319. int i;
  320. if (s->mb_height & 1)
  321. yc_size += 2*s->b8_stride + 2*s->mb_stride;
  322. if (s->encoding) {
  323. if (!FF_ALLOCZ_TYPED_ARRAY(s->me.map, ME_MAP_SIZE) ||
  324. !FF_ALLOCZ_TYPED_ARRAY(s->me.score_map, ME_MAP_SIZE))
  325. return AVERROR(ENOMEM);
  326. if (s->noise_reduction) {
  327. if (!FF_ALLOCZ_TYPED_ARRAY(s->dct_error_sum, 2))
  328. return AVERROR(ENOMEM);
  329. }
  330. }
  331. if (!FF_ALLOCZ_TYPED_ARRAY(s->blocks, 2))
  332. return AVERROR(ENOMEM);
  333. s->block = s->blocks[0];
  334. for (i = 0; i < 12; i++) {
  335. s->pblocks[i] = &s->block[i];
  336. }
  337. if (!(s->block32 = av_mallocz(sizeof(*s->block32))) ||
  338. !(s->dpcm_macroblock = av_mallocz(sizeof(*s->dpcm_macroblock))))
  339. return AVERROR(ENOMEM);
  340. s->dpcm_direction = 0;
  341. if (s->avctx->codec_tag == AV_RL32("VCR2")) {
  342. // exchange uv
  343. FFSWAP(void *, s->pblocks[4], s->pblocks[5]);
  344. }
  345. if (s->out_format == FMT_H263) {
  346. /* ac values */
  347. if (!FF_ALLOCZ_TYPED_ARRAY(s->ac_val_base, yc_size))
  348. return AVERROR(ENOMEM);
  349. s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
  350. s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
  351. s->ac_val[2] = s->ac_val[1] + c_size;
  352. }
  353. return 0;
  354. }
  355. /**
  356. * Initialize an MpegEncContext's thread contexts. Presumes that
  357. * slice_context_count is already set and that all the fields
  358. * that are freed/reset in free_duplicate_context() are NULL.
  359. */
  360. static int init_duplicate_contexts(MpegEncContext *s)
  361. {
  362. int nb_slices = s->slice_context_count, ret;
  363. /* We initialize the copies before the original so that
  364. * fields allocated in init_duplicate_context are NULL after
  365. * copying. This prevents double-frees upon allocation error. */
  366. for (int i = 1; i < nb_slices; i++) {
  367. s->thread_context[i] = av_memdup(s, sizeof(MpegEncContext));
  368. if (!s->thread_context[i])
  369. return AVERROR(ENOMEM);
  370. if ((ret = init_duplicate_context(s->thread_context[i])) < 0)
  371. return ret;
  372. s->thread_context[i]->start_mb_y =
  373. (s->mb_height * (i ) + nb_slices / 2) / nb_slices;
  374. s->thread_context[i]->end_mb_y =
  375. (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
  376. }
  377. s->start_mb_y = 0;
  378. s->end_mb_y = nb_slices > 1 ? (s->mb_height + nb_slices / 2) / nb_slices
  379. : s->mb_height;
  380. return init_duplicate_context(s);
  381. }
  382. static void free_duplicate_context(MpegEncContext *s)
  383. {
  384. if (!s)
  385. return;
  386. av_freep(&s->sc.edge_emu_buffer);
  387. av_freep(&s->me.scratchpad);
  388. s->me.temp =
  389. s->sc.rd_scratchpad =
  390. s->sc.b_scratchpad =
  391. s->sc.obmc_scratchpad = NULL;
  392. av_freep(&s->dct_error_sum);
  393. av_freep(&s->me.map);
  394. av_freep(&s->me.score_map);
  395. av_freep(&s->blocks);
  396. av_freep(&s->block32);
  397. av_freep(&s->dpcm_macroblock);
  398. av_freep(&s->ac_val_base);
  399. s->block = NULL;
  400. }
  401. static void free_duplicate_contexts(MpegEncContext *s)
  402. {
  403. for (int i = 1; i < s->slice_context_count; i++) {
  404. free_duplicate_context(s->thread_context[i]);
  405. av_freep(&s->thread_context[i]);
  406. }
  407. free_duplicate_context(s);
  408. }
  409. static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src)
  410. {
  411. #define COPY(a) bak->a = src->a
  412. COPY(sc.edge_emu_buffer);
  413. COPY(me.scratchpad);
  414. COPY(me.temp);
  415. COPY(sc.rd_scratchpad);
  416. COPY(sc.b_scratchpad);
  417. COPY(sc.obmc_scratchpad);
  418. COPY(me.map);
  419. COPY(me.score_map);
  420. COPY(blocks);
  421. COPY(block);
  422. COPY(block32);
  423. COPY(dpcm_macroblock);
  424. COPY(dpcm_direction);
  425. COPY(start_mb_y);
  426. COPY(end_mb_y);
  427. COPY(me.map_generation);
  428. COPY(pb);
  429. COPY(dct_error_sum);
  430. COPY(dct_count[0]);
  431. COPY(dct_count[1]);
  432. COPY(ac_val_base);
  433. COPY(ac_val[0]);
  434. COPY(ac_val[1]);
  435. COPY(ac_val[2]);
  436. #undef COPY
  437. }
  438. int ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src)
  439. {
  440. MpegEncContext bak;
  441. int i, ret;
  442. // FIXME copy only needed parts
  443. backup_duplicate_context(&bak, dst);
  444. memcpy(dst, src, sizeof(MpegEncContext));
  445. backup_duplicate_context(dst, &bak);
  446. for (i = 0; i < 12; i++) {
  447. dst->pblocks[i] = &dst->block[i];
  448. }
  449. if (dst->avctx->codec_tag == AV_RL32("VCR2")) {
  450. // exchange uv
  451. FFSWAP(void *, dst->pblocks[4], dst->pblocks[5]);
  452. }
  453. if (!dst->sc.edge_emu_buffer &&
  454. (ret = ff_mpeg_framesize_alloc(dst->avctx, &dst->me,
  455. &dst->sc, dst->linesize)) < 0) {
  456. av_log(dst->avctx, AV_LOG_ERROR, "failed to allocate context "
  457. "scratch buffers.\n");
  458. return ret;
  459. }
  460. return 0;
  461. }
  462. int ff_mpeg_update_thread_context(AVCodecContext *dst,
  463. const AVCodecContext *src)
  464. {
  465. int i, ret;
  466. MpegEncContext *s = dst->priv_data, *s1 = src->priv_data;
  467. if (dst == src)
  468. return 0;
  469. av_assert0(s != s1);
  470. // FIXME can parameters change on I-frames?
  471. // in that case dst may need a reinit
  472. if (!s->context_initialized) {
  473. int err;
  474. memcpy(s, s1, sizeof(MpegEncContext));
  475. s->avctx = dst;
  476. s->bitstream_buffer = NULL;
  477. s->bitstream_buffer_size = s->allocated_bitstream_buffer_size = 0;
  478. if (s1->context_initialized){
  479. // s->picture_range_start += MAX_PICTURE_COUNT;
  480. // s->picture_range_end += MAX_PICTURE_COUNT;
  481. ff_mpv_idct_init(s);
  482. if((err = ff_mpv_common_init(s)) < 0){
  483. memset(s, 0, sizeof(MpegEncContext));
  484. s->avctx = dst;
  485. return err;
  486. }
  487. }
  488. }
  489. if (s->height != s1->height || s->width != s1->width || s->context_reinit) {
  490. s->context_reinit = 0;
  491. s->height = s1->height;
  492. s->width = s1->width;
  493. if ((ret = ff_mpv_common_frame_size_change(s)) < 0)
  494. return ret;
  495. }
  496. s->avctx->coded_height = s1->avctx->coded_height;
  497. s->avctx->coded_width = s1->avctx->coded_width;
  498. s->avctx->width = s1->avctx->width;
  499. s->avctx->height = s1->avctx->height;
  500. s->quarter_sample = s1->quarter_sample;
  501. s->coded_picture_number = s1->coded_picture_number;
  502. s->picture_number = s1->picture_number;
  503. av_assert0(!s->picture || s->picture != s1->picture);
  504. if(s->picture)
  505. for (i = 0; i < MAX_PICTURE_COUNT; i++) {
  506. ff_mpeg_unref_picture(s->avctx, &s->picture[i]);
  507. if (s1->picture && s1->picture[i].f->buf[0] &&
  508. (ret = ff_mpeg_ref_picture(s->avctx, &s->picture[i], &s1->picture[i])) < 0)
  509. return ret;
  510. }
  511. #define UPDATE_PICTURE(pic)\
  512. do {\
  513. ff_mpeg_unref_picture(s->avctx, &s->pic);\
  514. if (s1->pic.f && s1->pic.f->buf[0])\
  515. ret = ff_mpeg_ref_picture(s->avctx, &s->pic, &s1->pic);\
  516. else\
  517. ret = ff_update_picture_tables(&s->pic, &s1->pic);\
  518. if (ret < 0)\
  519. return ret;\
  520. } while (0)
  521. UPDATE_PICTURE(current_picture);
  522. UPDATE_PICTURE(last_picture);
  523. UPDATE_PICTURE(next_picture);
  524. #define REBASE_PICTURE(pic, new_ctx, old_ctx) \
  525. ((pic && pic >= old_ctx->picture && \
  526. pic < old_ctx->picture + MAX_PICTURE_COUNT) ? \
  527. &new_ctx->picture[pic - old_ctx->picture] : NULL)
  528. s->last_picture_ptr = REBASE_PICTURE(s1->last_picture_ptr, s, s1);
  529. s->current_picture_ptr = REBASE_PICTURE(s1->current_picture_ptr, s, s1);
  530. s->next_picture_ptr = REBASE_PICTURE(s1->next_picture_ptr, s, s1);
  531. // Error/bug resilience
  532. s->next_p_frame_damaged = s1->next_p_frame_damaged;
  533. s->workaround_bugs = s1->workaround_bugs;
  534. s->padding_bug_score = s1->padding_bug_score;
  535. // MPEG-4 timing info
  536. memcpy(&s->last_time_base, &s1->last_time_base,
  537. (char *) &s1->pb_field_time + sizeof(s1->pb_field_time) -
  538. (char *) &s1->last_time_base);
  539. // B-frame info
  540. s->max_b_frames = s1->max_b_frames;
  541. s->low_delay = s1->low_delay;
  542. s->droppable = s1->droppable;
  543. // DivX handling (doesn't work)
  544. s->divx_packed = s1->divx_packed;
  545. if (s1->bitstream_buffer) {
  546. if (s1->bitstream_buffer_size +
  547. AV_INPUT_BUFFER_PADDING_SIZE > s->allocated_bitstream_buffer_size) {
  548. av_fast_malloc(&s->bitstream_buffer,
  549. &s->allocated_bitstream_buffer_size,
  550. s1->allocated_bitstream_buffer_size);
  551. if (!s->bitstream_buffer) {
  552. s->bitstream_buffer_size = 0;
  553. return AVERROR(ENOMEM);
  554. }
  555. }
  556. s->bitstream_buffer_size = s1->bitstream_buffer_size;
  557. memcpy(s->bitstream_buffer, s1->bitstream_buffer,
  558. s1->bitstream_buffer_size);
  559. memset(s->bitstream_buffer + s->bitstream_buffer_size, 0,
  560. AV_INPUT_BUFFER_PADDING_SIZE);
  561. }
  562. // linesize-dependent scratch buffer allocation
  563. if (!s->sc.edge_emu_buffer)
  564. if (s1->linesize) {
  565. if (ff_mpeg_framesize_alloc(s->avctx, &s->me,
  566. &s->sc, s1->linesize) < 0) {
  567. av_log(s->avctx, AV_LOG_ERROR, "Failed to allocate context "
  568. "scratch buffers.\n");
  569. return AVERROR(ENOMEM);
  570. }
  571. } else {
  572. av_log(s->avctx, AV_LOG_ERROR, "Context scratch buffers could not "
  573. "be allocated due to unknown size.\n");
  574. }
  575. // MPEG-2/interlacing info
  576. memcpy(&s->progressive_sequence, &s1->progressive_sequence,
  577. (char *) &s1->rtp_mode - (char *) &s1->progressive_sequence);
  578. if (!s1->first_field) {
  579. s->last_pict_type = s1->pict_type;
  580. if (s1->current_picture_ptr)
  581. s->last_lambda_for[s1->pict_type] = s1->current_picture_ptr->f->quality;
  582. }
  583. return 0;
  584. }
  585. /**
  586. * Set the given MpegEncContext to common defaults
  587. * (same for encoding and decoding).
  588. * The changed fields will not depend upon the
  589. * prior state of the MpegEncContext.
  590. */
  591. void ff_mpv_common_defaults(MpegEncContext *s)
  592. {
  593. s->y_dc_scale_table =
  594. s->c_dc_scale_table = ff_mpeg1_dc_scale_table;
  595. s->chroma_qscale_table = ff_default_chroma_qscale_table;
  596. s->progressive_frame = 1;
  597. s->progressive_sequence = 1;
  598. s->picture_structure = PICT_FRAME;
  599. s->coded_picture_number = 0;
  600. s->picture_number = 0;
  601. s->f_code = 1;
  602. s->b_code = 1;
  603. s->slice_context_count = 1;
  604. }
  605. /**
  606. * Initialize the given MpegEncContext for decoding.
  607. * the changed fields will not depend upon
  608. * the prior state of the MpegEncContext.
  609. */
  610. void ff_mpv_decode_init(MpegEncContext *s, AVCodecContext *avctx)
  611. {
  612. ff_mpv_common_defaults(s);
  613. s->avctx = avctx;
  614. s->width = avctx->coded_width;
  615. s->height = avctx->coded_height;
  616. s->codec_id = avctx->codec->id;
  617. s->workaround_bugs = avctx->workaround_bugs;
  618. /* convert fourcc to upper case */
  619. s->codec_tag = avpriv_toupper4(avctx->codec_tag);
  620. }
  621. /**
  622. * Initialize and allocates MpegEncContext fields dependent on the resolution.
  623. */
  624. static int init_context_frame(MpegEncContext *s)
  625. {
  626. int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y;
  627. s->mb_width = (s->width + 15) / 16;
  628. s->mb_stride = s->mb_width + 1;
  629. s->b8_stride = s->mb_width * 2 + 1;
  630. mb_array_size = s->mb_height * s->mb_stride;
  631. mv_table_size = (s->mb_height + 2) * s->mb_stride + 1;
  632. /* set default edge pos, will be overridden
  633. * in decode_header if needed */
  634. s->h_edge_pos = s->mb_width * 16;
  635. s->v_edge_pos = s->mb_height * 16;
  636. s->mb_num = s->mb_width * s->mb_height;
  637. s->block_wrap[0] =
  638. s->block_wrap[1] =
  639. s->block_wrap[2] =
  640. s->block_wrap[3] = s->b8_stride;
  641. s->block_wrap[4] =
  642. s->block_wrap[5] = s->mb_stride;
  643. y_size = s->b8_stride * (2 * s->mb_height + 1);
  644. c_size = s->mb_stride * (s->mb_height + 1);
  645. yc_size = y_size + 2 * c_size;
  646. if (s->mb_height & 1)
  647. yc_size += 2*s->b8_stride + 2*s->mb_stride;
  648. if (!FF_ALLOCZ_TYPED_ARRAY(s->mb_index2xy, s->mb_num + 1))
  649. return AVERROR(ENOMEM);
  650. for (y = 0; y < s->mb_height; y++)
  651. for (x = 0; x < s->mb_width; x++)
  652. s->mb_index2xy[x + y * s->mb_width] = x + y * s->mb_stride;
  653. s->mb_index2xy[s->mb_height * s->mb_width] = (s->mb_height - 1) * s->mb_stride + s->mb_width; // FIXME really needed?
  654. if (s->encoding) {
  655. /* Allocate MV tables */
  656. if (!FF_ALLOCZ_TYPED_ARRAY(s->p_mv_table_base, mv_table_size) ||
  657. !FF_ALLOCZ_TYPED_ARRAY(s->b_forw_mv_table_base, mv_table_size) ||
  658. !FF_ALLOCZ_TYPED_ARRAY(s->b_back_mv_table_base, mv_table_size) ||
  659. !FF_ALLOCZ_TYPED_ARRAY(s->b_bidir_forw_mv_table_base, mv_table_size) ||
  660. !FF_ALLOCZ_TYPED_ARRAY(s->b_bidir_back_mv_table_base, mv_table_size) ||
  661. !FF_ALLOCZ_TYPED_ARRAY(s->b_direct_mv_table_base, mv_table_size))
  662. return AVERROR(ENOMEM);
  663. s->p_mv_table = s->p_mv_table_base + s->mb_stride + 1;
  664. s->b_forw_mv_table = s->b_forw_mv_table_base + s->mb_stride + 1;
  665. s->b_back_mv_table = s->b_back_mv_table_base + s->mb_stride + 1;
  666. s->b_bidir_forw_mv_table = s->b_bidir_forw_mv_table_base + s->mb_stride + 1;
  667. s->b_bidir_back_mv_table = s->b_bidir_back_mv_table_base + s->mb_stride + 1;
  668. s->b_direct_mv_table = s->b_direct_mv_table_base + s->mb_stride + 1;
  669. /* Allocate MB type table */
  670. if (!FF_ALLOCZ_TYPED_ARRAY(s->mb_type, mb_array_size) ||
  671. !FF_ALLOCZ_TYPED_ARRAY(s->lambda_table, mb_array_size) ||
  672. !FF_ALLOC_TYPED_ARRAY (s->cplx_tab, mb_array_size) ||
  673. !FF_ALLOC_TYPED_ARRAY (s->bits_tab, mb_array_size))
  674. return AVERROR(ENOMEM);
  675. }
  676. if (s->codec_id == AV_CODEC_ID_MPEG4 ||
  677. (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME)) {
  678. /* interlaced direct mode decoding tables */
  679. for (i = 0; i < 2; i++) {
  680. int j, k;
  681. for (j = 0; j < 2; j++) {
  682. for (k = 0; k < 2; k++) {
  683. if (!FF_ALLOCZ_TYPED_ARRAY(s->b_field_mv_table_base[i][j][k], mv_table_size))
  684. return AVERROR(ENOMEM);
  685. s->b_field_mv_table[i][j][k] = s->b_field_mv_table_base[i][j][k] +
  686. s->mb_stride + 1;
  687. }
  688. if (!FF_ALLOCZ_TYPED_ARRAY(s->b_field_select_table [i][j], mv_table_size * 2) ||
  689. !FF_ALLOCZ_TYPED_ARRAY(s->p_field_mv_table_base[i][j], mv_table_size))
  690. return AVERROR(ENOMEM);
  691. s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j] + s->mb_stride + 1;
  692. }
  693. if (!FF_ALLOCZ_TYPED_ARRAY(s->p_field_select_table[i], mv_table_size * 2))
  694. return AVERROR(ENOMEM);
  695. }
  696. }
  697. if (s->out_format == FMT_H263) {
  698. /* cbp values, cbp, ac_pred, pred_dir */
  699. if (!FF_ALLOCZ_TYPED_ARRAY(s->coded_block_base, y_size + (s->mb_height&1)*2*s->b8_stride) ||
  700. !FF_ALLOCZ_TYPED_ARRAY(s->cbp_table, mb_array_size) ||
  701. !FF_ALLOCZ_TYPED_ARRAY(s->pred_dir_table, mb_array_size))
  702. return AVERROR(ENOMEM);
  703. s->coded_block = s->coded_block_base + s->b8_stride + 1;
  704. }
  705. if (s->h263_pred || s->h263_plus || !s->encoding) {
  706. /* dc values */
  707. // MN: we need these for error resilience of intra-frames
  708. if (!FF_ALLOCZ_TYPED_ARRAY(s->dc_val_base, yc_size))
  709. return AVERROR(ENOMEM);
  710. s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
  711. s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
  712. s->dc_val[2] = s->dc_val[1] + c_size;
  713. for (i = 0; i < yc_size; i++)
  714. s->dc_val_base[i] = 1024;
  715. }
  716. /* which mb is an intra block, init macroblock skip table */
  717. if (!FF_ALLOC_TYPED_ARRAY(s->mbintra_table, mb_array_size) ||
  718. // Note the + 1 is for a quicker MPEG-4 slice_end detection
  719. !FF_ALLOCZ_TYPED_ARRAY(s->mbskip_table, mb_array_size + 2))
  720. return AVERROR(ENOMEM);
  721. memset(s->mbintra_table, 1, mb_array_size);
  722. return ff_mpeg_er_init(s);
  723. }
  724. static void clear_context(MpegEncContext *s)
  725. {
  726. int i, j, k;
  727. memset(&s->next_picture, 0, sizeof(s->next_picture));
  728. memset(&s->last_picture, 0, sizeof(s->last_picture));
  729. memset(&s->current_picture, 0, sizeof(s->current_picture));
  730. memset(&s->new_picture, 0, sizeof(s->new_picture));
  731. memset(s->thread_context, 0, sizeof(s->thread_context));
  732. s->me.map = NULL;
  733. s->me.score_map = NULL;
  734. s->dct_error_sum = NULL;
  735. s->block = NULL;
  736. s->blocks = NULL;
  737. s->block32 = NULL;
  738. memset(s->pblocks, 0, sizeof(s->pblocks));
  739. s->dpcm_direction = 0;
  740. s->dpcm_macroblock = NULL;
  741. s->ac_val_base = NULL;
  742. s->ac_val[0] =
  743. s->ac_val[1] =
  744. s->ac_val[2] =NULL;
  745. s->sc.edge_emu_buffer = NULL;
  746. s->me.scratchpad = NULL;
  747. s->me.temp =
  748. s->sc.rd_scratchpad =
  749. s->sc.b_scratchpad =
  750. s->sc.obmc_scratchpad = NULL;
  751. s->bitstream_buffer = NULL;
  752. s->allocated_bitstream_buffer_size = 0;
  753. s->picture = NULL;
  754. s->mb_type = NULL;
  755. s->p_mv_table_base = NULL;
  756. s->b_forw_mv_table_base = NULL;
  757. s->b_back_mv_table_base = NULL;
  758. s->b_bidir_forw_mv_table_base = NULL;
  759. s->b_bidir_back_mv_table_base = NULL;
  760. s->b_direct_mv_table_base = NULL;
  761. s->p_mv_table = NULL;
  762. s->b_forw_mv_table = NULL;
  763. s->b_back_mv_table = NULL;
  764. s->b_bidir_forw_mv_table = NULL;
  765. s->b_bidir_back_mv_table = NULL;
  766. s->b_direct_mv_table = NULL;
  767. for (i = 0; i < 2; i++) {
  768. for (j = 0; j < 2; j++) {
  769. for (k = 0; k < 2; k++) {
  770. s->b_field_mv_table_base[i][j][k] = NULL;
  771. s->b_field_mv_table[i][j][k] = NULL;
  772. }
  773. s->b_field_select_table[i][j] = NULL;
  774. s->p_field_mv_table_base[i][j] = NULL;
  775. s->p_field_mv_table[i][j] = NULL;
  776. }
  777. s->p_field_select_table[i] = NULL;
  778. }
  779. s->dc_val_base = NULL;
  780. s->coded_block_base = NULL;
  781. s->mbintra_table = NULL;
  782. s->cbp_table = NULL;
  783. s->pred_dir_table = NULL;
  784. s->mbskip_table = NULL;
  785. s->er.error_status_table = NULL;
  786. s->er.er_temp_buffer = NULL;
  787. s->mb_index2xy = NULL;
  788. s->lambda_table = NULL;
  789. s->cplx_tab = NULL;
  790. s->bits_tab = NULL;
  791. }
  792. /**
  793. * init common structure for both encoder and decoder.
  794. * this assumes that some variables like width/height are already set
  795. */
  796. av_cold int ff_mpv_common_init(MpegEncContext *s)
  797. {
  798. int i, ret;
  799. int nb_slices = (HAVE_THREADS &&
  800. s->avctx->active_thread_type & FF_THREAD_SLICE) ?
  801. s->avctx->thread_count : 1;
  802. clear_context(s);
  803. if (s->encoding && s->avctx->slices)
  804. nb_slices = s->avctx->slices;
  805. if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
  806. s->mb_height = (s->height + 31) / 32 * 2;
  807. else
  808. s->mb_height = (s->height + 15) / 16;
  809. if (s->avctx->pix_fmt == AV_PIX_FMT_NONE) {
  810. av_log(s->avctx, AV_LOG_ERROR,
  811. "decoding to AV_PIX_FMT_NONE is not supported.\n");
  812. return AVERROR(EINVAL);
  813. }
  814. if (nb_slices > MAX_THREADS || (nb_slices > s->mb_height && s->mb_height)) {
  815. int max_slices;
  816. if (s->mb_height)
  817. max_slices = FFMIN(MAX_THREADS, s->mb_height);
  818. else
  819. max_slices = MAX_THREADS;
  820. av_log(s->avctx, AV_LOG_WARNING, "too many threads/slices (%d),"
  821. " reducing to %d\n", nb_slices, max_slices);
  822. nb_slices = max_slices;
  823. }
  824. if ((s->width || s->height) &&
  825. av_image_check_size(s->width, s->height, 0, s->avctx))
  826. return AVERROR(EINVAL);
  827. dct_init(s);
  828. /* set chroma shifts */
  829. ret = av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
  830. &s->chroma_x_shift,
  831. &s->chroma_y_shift);
  832. if (ret)
  833. return ret;
  834. if (!FF_ALLOCZ_TYPED_ARRAY(s->picture, MAX_PICTURE_COUNT))
  835. return AVERROR(ENOMEM);
  836. for (i = 0; i < MAX_PICTURE_COUNT; i++) {
  837. s->picture[i].f = av_frame_alloc();
  838. if (!s->picture[i].f)
  839. goto fail_nomem;
  840. }
  841. if (!(s->next_picture.f = av_frame_alloc()) ||
  842. !(s->last_picture.f = av_frame_alloc()) ||
  843. !(s->current_picture.f = av_frame_alloc()) ||
  844. !(s->new_picture.f = av_frame_alloc()))
  845. goto fail_nomem;
  846. if ((ret = init_context_frame(s)))
  847. goto fail;
  848. s->parse_context.state = -1;
  849. s->context_initialized = 1;
  850. memset(s->thread_context, 0, sizeof(s->thread_context));
  851. s->thread_context[0] = s;
  852. s->slice_context_count = nb_slices;
  853. // if (s->width && s->height) {
  854. ret = init_duplicate_contexts(s);
  855. if (ret < 0)
  856. goto fail;
  857. // }
  858. return 0;
  859. fail_nomem:
  860. ret = AVERROR(ENOMEM);
  861. fail:
  862. ff_mpv_common_end(s);
  863. return ret;
  864. }
  865. /**
  866. * Frees and resets MpegEncContext fields depending on the resolution
  867. * as well as the slice thread contexts.
  868. * Is used during resolution changes to avoid a full reinitialization of the
  869. * codec.
  870. */
  871. static void free_context_frame(MpegEncContext *s)
  872. {
  873. int i, j, k;
  874. free_duplicate_contexts(s);
  875. av_freep(&s->mb_type);
  876. av_freep(&s->p_mv_table_base);
  877. av_freep(&s->b_forw_mv_table_base);
  878. av_freep(&s->b_back_mv_table_base);
  879. av_freep(&s->b_bidir_forw_mv_table_base);
  880. av_freep(&s->b_bidir_back_mv_table_base);
  881. av_freep(&s->b_direct_mv_table_base);
  882. s->p_mv_table = NULL;
  883. s->b_forw_mv_table = NULL;
  884. s->b_back_mv_table = NULL;
  885. s->b_bidir_forw_mv_table = NULL;
  886. s->b_bidir_back_mv_table = NULL;
  887. s->b_direct_mv_table = NULL;
  888. for (i = 0; i < 2; i++) {
  889. for (j = 0; j < 2; j++) {
  890. for (k = 0; k < 2; k++) {
  891. av_freep(&s->b_field_mv_table_base[i][j][k]);
  892. s->b_field_mv_table[i][j][k] = NULL;
  893. }
  894. av_freep(&s->b_field_select_table[i][j]);
  895. av_freep(&s->p_field_mv_table_base[i][j]);
  896. s->p_field_mv_table[i][j] = NULL;
  897. }
  898. av_freep(&s->p_field_select_table[i]);
  899. }
  900. av_freep(&s->dc_val_base);
  901. av_freep(&s->coded_block_base);
  902. av_freep(&s->mbintra_table);
  903. av_freep(&s->cbp_table);
  904. av_freep(&s->pred_dir_table);
  905. av_freep(&s->mbskip_table);
  906. av_freep(&s->er.error_status_table);
  907. av_freep(&s->er.er_temp_buffer);
  908. av_freep(&s->mb_index2xy);
  909. av_freep(&s->lambda_table);
  910. av_freep(&s->cplx_tab);
  911. av_freep(&s->bits_tab);
  912. s->linesize = s->uvlinesize = 0;
  913. }
  914. int ff_mpv_common_frame_size_change(MpegEncContext *s)
  915. {
  916. int i, err = 0;
  917. if (!s->context_initialized)
  918. return AVERROR(EINVAL);
  919. free_context_frame(s);
  920. if (s->picture)
  921. for (i = 0; i < MAX_PICTURE_COUNT; i++) {
  922. s->picture[i].needs_realloc = 1;
  923. }
  924. s->last_picture_ptr =
  925. s->next_picture_ptr =
  926. s->current_picture_ptr = NULL;
  927. // init
  928. if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
  929. s->mb_height = (s->height + 31) / 32 * 2;
  930. else
  931. s->mb_height = (s->height + 15) / 16;
  932. if ((s->width || s->height) &&
  933. (err = av_image_check_size(s->width, s->height, 0, s->avctx)) < 0)
  934. goto fail;
  935. /* set chroma shifts */
  936. err = av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
  937. &s->chroma_x_shift,
  938. &s->chroma_y_shift);
  939. if (err < 0)
  940. goto fail;
  941. if ((err = init_context_frame(s)))
  942. goto fail;
  943. memset(s->thread_context, 0, sizeof(s->thread_context));
  944. s->thread_context[0] = s;
  945. if (s->width && s->height) {
  946. err = init_duplicate_contexts(s);
  947. if (err < 0)
  948. goto fail;
  949. }
  950. return 0;
  951. fail:
  952. ff_mpv_common_end(s);
  953. return err;
  954. }
  955. /* init common structure for both encoder and decoder */
  956. void ff_mpv_common_end(MpegEncContext *s)
  957. {
  958. int i;
  959. if (!s)
  960. return;
  961. free_context_frame(s);
  962. if (s->slice_context_count > 1)
  963. s->slice_context_count = 1;
  964. av_freep(&s->parse_context.buffer);
  965. s->parse_context.buffer_size = 0;
  966. av_freep(&s->bitstream_buffer);
  967. s->allocated_bitstream_buffer_size = 0;
  968. if (!s->avctx)
  969. return;
  970. if (s->picture) {
  971. for (i = 0; i < MAX_PICTURE_COUNT; i++) {
  972. ff_free_picture_tables(&s->picture[i]);
  973. ff_mpeg_unref_picture(s->avctx, &s->picture[i]);
  974. av_frame_free(&s->picture[i].f);
  975. }
  976. }
  977. av_freep(&s->picture);
  978. ff_free_picture_tables(&s->last_picture);
  979. ff_mpeg_unref_picture(s->avctx, &s->last_picture);
  980. av_frame_free(&s->last_picture.f);
  981. ff_free_picture_tables(&s->current_picture);
  982. ff_mpeg_unref_picture(s->avctx, &s->current_picture);
  983. av_frame_free(&s->current_picture.f);
  984. ff_free_picture_tables(&s->next_picture);
  985. ff_mpeg_unref_picture(s->avctx, &s->next_picture);
  986. av_frame_free(&s->next_picture.f);
  987. ff_free_picture_tables(&s->new_picture);
  988. ff_mpeg_unref_picture(s->avctx, &s->new_picture);
  989. av_frame_free(&s->new_picture.f);
  990. s->context_initialized = 0;
  991. s->last_picture_ptr =
  992. s->next_picture_ptr =
  993. s->current_picture_ptr = NULL;
  994. s->linesize = s->uvlinesize = 0;
  995. }
  996. static void gray_frame(AVFrame *frame)
  997. {
  998. int i, h_chroma_shift, v_chroma_shift;
  999. av_pix_fmt_get_chroma_sub_sample(frame->format, &h_chroma_shift, &v_chroma_shift);
  1000. for(i=0; i<frame->height; i++)
  1001. memset(frame->data[0] + frame->linesize[0]*i, 0x80, frame->width);
  1002. for(i=0; i<AV_CEIL_RSHIFT(frame->height, v_chroma_shift); i++) {
  1003. memset(frame->data[1] + frame->linesize[1]*i,
  1004. 0x80, AV_CEIL_RSHIFT(frame->width, h_chroma_shift));
  1005. memset(frame->data[2] + frame->linesize[2]*i,
  1006. 0x80, AV_CEIL_RSHIFT(frame->width, h_chroma_shift));
  1007. }
  1008. }
  1009. /**
  1010. * generic function called after decoding
  1011. * the header and before a frame is decoded.
  1012. */
  1013. int ff_mpv_frame_start(MpegEncContext *s, AVCodecContext *avctx)
  1014. {
  1015. int i, ret;
  1016. Picture *pic;
  1017. s->mb_skipped = 0;
  1018. if (!ff_thread_can_start_frame(avctx)) {
  1019. av_log(avctx, AV_LOG_ERROR, "Attempt to start a frame outside SETUP state\n");
  1020. return -1;
  1021. }
  1022. /* mark & release old frames */
  1023. if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
  1024. s->last_picture_ptr != s->next_picture_ptr &&
  1025. s->last_picture_ptr->f->buf[0]) {
  1026. ff_mpeg_unref_picture(s->avctx, s->last_picture_ptr);
  1027. }
  1028. /* release forgotten pictures */
  1029. /* if (MPEG-124 / H.263) */
  1030. for (i = 0; i < MAX_PICTURE_COUNT; i++) {
  1031. if (&s->picture[i] != s->last_picture_ptr &&
  1032. &s->picture[i] != s->next_picture_ptr &&
  1033. s->picture[i].reference && !s->picture[i].needs_realloc) {
  1034. ff_mpeg_unref_picture(s->avctx, &s->picture[i]);
  1035. }
  1036. }
  1037. ff_mpeg_unref_picture(s->avctx, &s->current_picture);
  1038. ff_mpeg_unref_picture(s->avctx, &s->last_picture);
  1039. ff_mpeg_unref_picture(s->avctx, &s->next_picture);
  1040. /* release non reference frames */
  1041. for (i = 0; i < MAX_PICTURE_COUNT; i++) {
  1042. if (!s->picture[i].reference)
  1043. ff_mpeg_unref_picture(s->avctx, &s->picture[i]);
  1044. }
  1045. if (s->current_picture_ptr && !s->current_picture_ptr->f->buf[0]) {
  1046. // we already have an unused image
  1047. // (maybe it was set before reading the header)
  1048. pic = s->current_picture_ptr;
  1049. } else {
  1050. i = ff_find_unused_picture(s->avctx, s->picture, 0);
  1051. if (i < 0) {
  1052. av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
  1053. return i;
  1054. }
  1055. pic = &s->picture[i];
  1056. }
  1057. pic->reference = 0;
  1058. if (!s->droppable) {
  1059. if (s->pict_type != AV_PICTURE_TYPE_B)
  1060. pic->reference = 3;
  1061. }
  1062. pic->f->coded_picture_number = s->coded_picture_number++;
  1063. if (alloc_picture(s, pic) < 0)
  1064. return -1;
  1065. s->current_picture_ptr = pic;
  1066. // FIXME use only the vars from current_pic
  1067. s->current_picture_ptr->f->top_field_first = s->top_field_first;
  1068. if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
  1069. s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
  1070. if (s->picture_structure != PICT_FRAME)
  1071. s->current_picture_ptr->f->top_field_first =
  1072. (s->picture_structure == PICT_TOP_FIELD) == s->first_field;
  1073. }
  1074. s->current_picture_ptr->f->interlaced_frame = !s->progressive_frame &&
  1075. !s->progressive_sequence;
  1076. s->current_picture_ptr->field_picture = s->picture_structure != PICT_FRAME;
  1077. s->current_picture_ptr->f->pict_type = s->pict_type;
  1078. // if (s->avctx->flags && AV_CODEC_FLAG_QSCALE)
  1079. // s->current_picture_ptr->quality = s->new_picture_ptr->quality;
  1080. s->current_picture_ptr->f->key_frame = s->pict_type == AV_PICTURE_TYPE_I;
  1081. if ((ret = ff_mpeg_ref_picture(s->avctx, &s->current_picture,
  1082. s->current_picture_ptr)) < 0)
  1083. return ret;
  1084. if (s->pict_type != AV_PICTURE_TYPE_B) {
  1085. s->last_picture_ptr = s->next_picture_ptr;
  1086. if (!s->droppable)
  1087. s->next_picture_ptr = s->current_picture_ptr;
  1088. }
  1089. ff_dlog(s->avctx, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n",
  1090. s->last_picture_ptr, s->next_picture_ptr,s->current_picture_ptr,
  1091. s->last_picture_ptr ? s->last_picture_ptr->f->data[0] : NULL,
  1092. s->next_picture_ptr ? s->next_picture_ptr->f->data[0] : NULL,
  1093. s->current_picture_ptr ? s->current_picture_ptr->f->data[0] : NULL,
  1094. s->pict_type, s->droppable);
  1095. if ((!s->last_picture_ptr || !s->last_picture_ptr->f->buf[0]) &&
  1096. (s->pict_type != AV_PICTURE_TYPE_I)) {
  1097. int h_chroma_shift, v_chroma_shift;
  1098. av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
  1099. &h_chroma_shift, &v_chroma_shift);
  1100. if (s->pict_type == AV_PICTURE_TYPE_B && s->next_picture_ptr && s->next_picture_ptr->f->buf[0])
  1101. av_log(avctx, AV_LOG_DEBUG,
  1102. "allocating dummy last picture for B frame\n");
  1103. else if (s->pict_type != AV_PICTURE_TYPE_I)
  1104. av_log(avctx, AV_LOG_ERROR,
  1105. "warning: first frame is no keyframe\n");
  1106. /* Allocate a dummy frame */
  1107. i = ff_find_unused_picture(s->avctx, s->picture, 0);
  1108. if (i < 0) {
  1109. av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
  1110. return i;
  1111. }
  1112. s->last_picture_ptr = &s->picture[i];
  1113. s->last_picture_ptr->reference = 3;
  1114. s->last_picture_ptr->f->key_frame = 0;
  1115. s->last_picture_ptr->f->pict_type = AV_PICTURE_TYPE_P;
  1116. if (alloc_picture(s, s->last_picture_ptr) < 0) {
  1117. s->last_picture_ptr = NULL;
  1118. return -1;
  1119. }
  1120. if (!avctx->hwaccel) {
  1121. for(i=0; i<avctx->height; i++)
  1122. memset(s->last_picture_ptr->f->data[0] + s->last_picture_ptr->f->linesize[0]*i,
  1123. 0x80, avctx->width);
  1124. if (s->last_picture_ptr->f->data[2]) {
  1125. for(i=0; i<AV_CEIL_RSHIFT(avctx->height, v_chroma_shift); i++) {
  1126. memset(s->last_picture_ptr->f->data[1] + s->last_picture_ptr->f->linesize[1]*i,
  1127. 0x80, AV_CEIL_RSHIFT(avctx->width, h_chroma_shift));
  1128. memset(s->last_picture_ptr->f->data[2] + s->last_picture_ptr->f->linesize[2]*i,
  1129. 0x80, AV_CEIL_RSHIFT(avctx->width, h_chroma_shift));
  1130. }
  1131. }
  1132. if(s->codec_id == AV_CODEC_ID_FLV1 || s->codec_id == AV_CODEC_ID_H263){
  1133. for(i=0; i<avctx->height; i++)
  1134. memset(s->last_picture_ptr->f->data[0] + s->last_picture_ptr->f->linesize[0]*i, 16, avctx->width);
  1135. }
  1136. }
  1137. ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 0);
  1138. ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 1);
  1139. }
  1140. if ((!s->next_picture_ptr || !s->next_picture_ptr->f->buf[0]) &&
  1141. s->pict_type == AV_PICTURE_TYPE_B) {
  1142. /* Allocate a dummy frame */
  1143. i = ff_find_unused_picture(s->avctx, s->picture, 0);
  1144. if (i < 0) {
  1145. av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
  1146. return i;
  1147. }
  1148. s->next_picture_ptr = &s->picture[i];
  1149. s->next_picture_ptr->reference = 3;
  1150. s->next_picture_ptr->f->key_frame = 0;
  1151. s->next_picture_ptr->f->pict_type = AV_PICTURE_TYPE_P;
  1152. if (alloc_picture(s, s->next_picture_ptr) < 0) {
  1153. s->next_picture_ptr = NULL;
  1154. return -1;
  1155. }
  1156. ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 0);
  1157. ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 1);
  1158. }
  1159. #if 0 // BUFREF-FIXME
  1160. memset(s->last_picture.f->data, 0, sizeof(s->last_picture.f->data));
  1161. memset(s->next_picture.f->data, 0, sizeof(s->next_picture.f->data));
  1162. #endif
  1163. if (s->last_picture_ptr) {
  1164. if (s->last_picture_ptr->f->buf[0] &&
  1165. (ret = ff_mpeg_ref_picture(s->avctx, &s->last_picture,
  1166. s->last_picture_ptr)) < 0)
  1167. return ret;
  1168. }
  1169. if (s->next_picture_ptr) {
  1170. if (s->next_picture_ptr->f->buf[0] &&
  1171. (ret = ff_mpeg_ref_picture(s->avctx, &s->next_picture,
  1172. s->next_picture_ptr)) < 0)
  1173. return ret;
  1174. }
  1175. av_assert0(s->pict_type == AV_PICTURE_TYPE_I || (s->last_picture_ptr &&
  1176. s->last_picture_ptr->f->buf[0]));
  1177. if (s->picture_structure!= PICT_FRAME) {
  1178. int i;
  1179. for (i = 0; i < 4; i++) {
  1180. if (s->picture_structure == PICT_BOTTOM_FIELD) {
  1181. s->current_picture.f->data[i] +=
  1182. s->current_picture.f->linesize[i];
  1183. }
  1184. s->current_picture.f->linesize[i] *= 2;
  1185. s->last_picture.f->linesize[i] *= 2;
  1186. s->next_picture.f->linesize[i] *= 2;
  1187. }
  1188. }
  1189. /* set dequantizer, we can't do it during init as
  1190. * it might change for MPEG-4 and we can't do it in the header
  1191. * decode as init is not called for MPEG-4 there yet */
  1192. if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
  1193. s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
  1194. s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
  1195. } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
  1196. s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
  1197. s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
  1198. } else {
  1199. s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
  1200. s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
  1201. }
  1202. if (s->avctx->debug & FF_DEBUG_NOMC) {
  1203. gray_frame(s->current_picture_ptr->f);
  1204. }
  1205. return 0;
  1206. }
  1207. /* called after a frame has been decoded. */
  1208. void ff_mpv_frame_end(MpegEncContext *s)
  1209. {
  1210. emms_c();
  1211. if (s->current_picture.reference)
  1212. ff_thread_report_progress(&s->current_picture_ptr->tf, INT_MAX, 0);
  1213. }
  1214. void ff_print_debug_info(MpegEncContext *s, Picture *p, AVFrame *pict)
  1215. {
  1216. ff_print_debug_info2(s->avctx, pict, s->mbskip_table, p->mb_type,
  1217. p->qscale_table, p->motion_val, &s->low_delay,
  1218. s->mb_width, s->mb_height, s->mb_stride, s->quarter_sample);
  1219. }
  1220. int ff_mpv_export_qp_table(MpegEncContext *s, AVFrame *f, Picture *p, int qp_type)
  1221. {
  1222. AVVideoEncParams *par;
  1223. int mult = (qp_type == FF_QSCALE_TYPE_MPEG1) ? 2 : 1;
  1224. unsigned int nb_mb = p->alloc_mb_height * p->alloc_mb_width;
  1225. unsigned int x, y;
  1226. if (!(s->avctx->export_side_data & AV_CODEC_EXPORT_DATA_VIDEO_ENC_PARAMS))
  1227. return 0;
  1228. par = av_video_enc_params_create_side_data(f, AV_VIDEO_ENC_PARAMS_MPEG2, nb_mb);
  1229. if (!par)
  1230. return AVERROR(ENOMEM);
  1231. for (y = 0; y < p->alloc_mb_height; y++)
  1232. for (x = 0; x < p->alloc_mb_width; x++) {
  1233. const unsigned int block_idx = y * p->alloc_mb_width + x;
  1234. const unsigned int mb_xy = y * p->alloc_mb_stride + x;
  1235. AVVideoBlockParams *b = av_video_enc_params_block(par, block_idx);
  1236. b->src_x = x * 16;
  1237. b->src_y = y * 16;
  1238. b->w = 16;
  1239. b->h = 16;
  1240. b->delta_qp = p->qscale_table[mb_xy] * mult;
  1241. }
  1242. return 0;
  1243. }
  1244. static inline int hpel_motion_lowres(MpegEncContext *s,
  1245. uint8_t *dest, uint8_t *src,
  1246. int field_based, int field_select,
  1247. int src_x, int src_y,
  1248. int width, int height, ptrdiff_t stride,
  1249. int h_edge_pos, int v_edge_pos,
  1250. int w, int h, h264_chroma_mc_func *pix_op,
  1251. int motion_x, int motion_y)
  1252. {
  1253. const int lowres = s->avctx->lowres;
  1254. const int op_index = FFMIN(lowres, 3);
  1255. const int s_mask = (2 << lowres) - 1;
  1256. int emu = 0;
  1257. int sx, sy;
  1258. if (s->quarter_sample) {
  1259. motion_x /= 2;
  1260. motion_y /= 2;
  1261. }
  1262. sx = motion_x & s_mask;
  1263. sy = motion_y & s_mask;
  1264. src_x += motion_x >> lowres + 1;
  1265. src_y += motion_y >> lowres + 1;
  1266. src += src_y * stride + src_x;
  1267. if ((unsigned)src_x > FFMAX( h_edge_pos - (!!sx) - w, 0) ||
  1268. (unsigned)src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
  1269. s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, src,
  1270. s->linesize, s->linesize,
  1271. w + 1, (h + 1) << field_based,
  1272. src_x, src_y << field_based,
  1273. h_edge_pos, v_edge_pos);
  1274. src = s->sc.edge_emu_buffer;
  1275. emu = 1;
  1276. }
  1277. sx = (sx << 2) >> lowres;
  1278. sy = (sy << 2) >> lowres;
  1279. if (field_select)
  1280. src += s->linesize;
  1281. pix_op[op_index](dest, src, stride, h, sx, sy);
  1282. return emu;
  1283. }
  1284. /* apply one mpeg motion vector to the three components */
  1285. static av_always_inline void mpeg_motion_lowres(MpegEncContext *s,
  1286. uint8_t *dest_y,
  1287. uint8_t *dest_cb,
  1288. uint8_t *dest_cr,
  1289. int field_based,
  1290. int bottom_field,
  1291. int field_select,
  1292. uint8_t **ref_picture,
  1293. h264_chroma_mc_func *pix_op,
  1294. int motion_x, int motion_y,
  1295. int h, int mb_y)
  1296. {
  1297. uint8_t *ptr_y, *ptr_cb, *ptr_cr;
  1298. int mx, my, src_x, src_y, uvsrc_x, uvsrc_y, sx, sy, uvsx, uvsy;
  1299. ptrdiff_t uvlinesize, linesize;
  1300. const int lowres = s->avctx->lowres;
  1301. const int op_index = FFMIN(lowres-1+s->chroma_x_shift, 3);
  1302. const int block_s = 8>>lowres;
  1303. const int s_mask = (2 << lowres) - 1;
  1304. const int h_edge_pos = s->h_edge_pos >> lowres;
  1305. const int v_edge_pos = s->v_edge_pos >> lowres;
  1306. linesize = s->current_picture.f->linesize[0] << field_based;
  1307. uvlinesize = s->current_picture.f->linesize[1] << field_based;
  1308. // FIXME obviously not perfect but qpel will not work in lowres anyway
  1309. if (s->quarter_sample) {
  1310. motion_x /= 2;
  1311. motion_y /= 2;
  1312. }
  1313. if(field_based){
  1314. motion_y += (bottom_field - field_select)*((1 << lowres)-1);
  1315. }
  1316. sx = motion_x & s_mask;
  1317. sy = motion_y & s_mask;
  1318. src_x = s->mb_x * 2 * block_s + (motion_x >> lowres + 1);
  1319. src_y = (mb_y * 2 * block_s >> field_based) + (motion_y >> lowres + 1);
  1320. if (s->out_format == FMT_H263) {
  1321. uvsx = ((motion_x >> 1) & s_mask) | (sx & 1);
  1322. uvsy = ((motion_y >> 1) & s_mask) | (sy & 1);
  1323. uvsrc_x = src_x >> 1;
  1324. uvsrc_y = src_y >> 1;
  1325. } else if (s->out_format == FMT_H261) {
  1326. // even chroma mv's are full pel in H261
  1327. mx = motion_x / 4;
  1328. my = motion_y / 4;
  1329. uvsx = (2 * mx) & s_mask;
  1330. uvsy = (2 * my) & s_mask;
  1331. uvsrc_x = s->mb_x * block_s + (mx >> lowres);
  1332. uvsrc_y = mb_y * block_s + (my >> lowres);
  1333. } else {
  1334. if(s->chroma_y_shift){
  1335. mx = motion_x / 2;
  1336. my = motion_y / 2;
  1337. uvsx = mx & s_mask;
  1338. uvsy = my & s_mask;
  1339. uvsrc_x = s->mb_x * block_s + (mx >> lowres + 1);
  1340. uvsrc_y = (mb_y * block_s >> field_based) + (my >> lowres + 1);
  1341. } else {
  1342. if(s->chroma_x_shift){
  1343. //Chroma422
  1344. mx = motion_x / 2;
  1345. uvsx = mx & s_mask;
  1346. uvsy = motion_y & s_mask;
  1347. uvsrc_y = src_y;
  1348. uvsrc_x = s->mb_x*block_s + (mx >> (lowres+1));
  1349. } else {
  1350. //Chroma444
  1351. uvsx = motion_x & s_mask;
  1352. uvsy = motion_y & s_mask;
  1353. uvsrc_x = src_x;
  1354. uvsrc_y = src_y;
  1355. }
  1356. }
  1357. }
  1358. ptr_y = ref_picture[0] + src_y * linesize + src_x;
  1359. ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
  1360. ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
  1361. if ((unsigned) src_x > FFMAX( h_edge_pos - (!!sx) - 2 * block_s, 0) || uvsrc_y<0 ||
  1362. (unsigned) src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
  1363. s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, ptr_y,
  1364. linesize >> field_based, linesize >> field_based,
  1365. 17, 17 + field_based,
  1366. src_x, src_y << field_based, h_edge_pos,
  1367. v_edge_pos);
  1368. ptr_y = s->sc.edge_emu_buffer;
  1369. if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
  1370. uint8_t *ubuf = s->sc.edge_emu_buffer + 18 * s->linesize;
  1371. uint8_t *vbuf =ubuf + 10 * s->uvlinesize;
  1372. if (s->workaround_bugs & FF_BUG_IEDGE)
  1373. vbuf -= s->uvlinesize;
  1374. s->vdsp.emulated_edge_mc(ubuf, ptr_cb,
  1375. uvlinesize >> field_based, uvlinesize >> field_based,
  1376. 9, 9 + field_based,
  1377. uvsrc_x, uvsrc_y << field_based,
  1378. h_edge_pos >> 1, v_edge_pos >> 1);
  1379. s->vdsp.emulated_edge_mc(vbuf, ptr_cr,
  1380. uvlinesize >> field_based,uvlinesize >> field_based,
  1381. 9, 9 + field_based,
  1382. uvsrc_x, uvsrc_y << field_based,
  1383. h_edge_pos >> 1, v_edge_pos >> 1);
  1384. ptr_cb = ubuf;
  1385. ptr_cr = vbuf;
  1386. }
  1387. }
  1388. // FIXME use this for field pix too instead of the obnoxious hack which changes picture.f->data
  1389. if (bottom_field) {
  1390. dest_y += s->linesize;
  1391. dest_cb += s->uvlinesize;
  1392. dest_cr += s->uvlinesize;
  1393. }
  1394. if (field_select) {
  1395. ptr_y += s->linesize;
  1396. ptr_cb += s->uvlinesize;
  1397. ptr_cr += s->uvlinesize;
  1398. }
  1399. sx = (sx << 2) >> lowres;
  1400. sy = (sy << 2) >> lowres;
  1401. pix_op[lowres - 1](dest_y, ptr_y, linesize, h, sx, sy);
  1402. if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
  1403. int hc = s->chroma_y_shift ? (h+1-bottom_field)>>1 : h;
  1404. uvsx = (uvsx << 2) >> lowres;
  1405. uvsy = (uvsy << 2) >> lowres;
  1406. if (hc) {
  1407. pix_op[op_index](dest_cb, ptr_cb, uvlinesize, hc, uvsx, uvsy);
  1408. pix_op[op_index](dest_cr, ptr_cr, uvlinesize, hc, uvsx, uvsy);
  1409. }
  1410. }
  1411. // FIXME h261 lowres loop filter
  1412. }
  1413. static inline void chroma_4mv_motion_lowres(MpegEncContext *s,
  1414. uint8_t *dest_cb, uint8_t *dest_cr,
  1415. uint8_t **ref_picture,
  1416. h264_chroma_mc_func * pix_op,
  1417. int mx, int my)
  1418. {
  1419. const int lowres = s->avctx->lowres;
  1420. const int op_index = FFMIN(lowres, 3);
  1421. const int block_s = 8 >> lowres;
  1422. const int s_mask = (2 << lowres) - 1;
  1423. const int h_edge_pos = s->h_edge_pos >> lowres + 1;
  1424. const int v_edge_pos = s->v_edge_pos >> lowres + 1;
  1425. int emu = 0, src_x, src_y, sx, sy;
  1426. ptrdiff_t offset;
  1427. uint8_t *ptr;
  1428. if (s->quarter_sample) {
  1429. mx /= 2;
  1430. my /= 2;
  1431. }
  1432. /* In case of 8X8, we construct a single chroma motion vector
  1433. with a special rounding */
  1434. mx = ff_h263_round_chroma(mx);
  1435. my = ff_h263_round_chroma(my);
  1436. sx = mx & s_mask;
  1437. sy = my & s_mask;
  1438. src_x = s->mb_x * block_s + (mx >> lowres + 1);
  1439. src_y = s->mb_y * block_s + (my >> lowres + 1);
  1440. offset = src_y * s->uvlinesize + src_x;
  1441. ptr = ref_picture[1] + offset;
  1442. if ((unsigned) src_x > FFMAX(h_edge_pos - (!!sx) - block_s, 0) ||
  1443. (unsigned) src_y > FFMAX(v_edge_pos - (!!sy) - block_s, 0)) {
  1444. s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, ptr,
  1445. s->uvlinesize, s->uvlinesize,
  1446. 9, 9,
  1447. src_x, src_y, h_edge_pos, v_edge_pos);
  1448. ptr = s->sc.edge_emu_buffer;
  1449. emu = 1;
  1450. }
  1451. sx = (sx << 2) >> lowres;
  1452. sy = (sy << 2) >> lowres;
  1453. pix_op[op_index](dest_cb, ptr, s->uvlinesize, block_s, sx, sy);
  1454. ptr = ref_picture[2] + offset;
  1455. if (emu) {
  1456. s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, ptr,
  1457. s->uvlinesize, s->uvlinesize,
  1458. 9, 9,
  1459. src_x, src_y, h_edge_pos, v_edge_pos);
  1460. ptr = s->sc.edge_emu_buffer;
  1461. }
  1462. pix_op[op_index](dest_cr, ptr, s->uvlinesize, block_s, sx, sy);
  1463. }
  1464. /**
  1465. * motion compensation of a single macroblock
  1466. * @param s context
  1467. * @param dest_y luma destination pointer
  1468. * @param dest_cb chroma cb/u destination pointer
  1469. * @param dest_cr chroma cr/v destination pointer
  1470. * @param dir direction (0->forward, 1->backward)
  1471. * @param ref_picture array[3] of pointers to the 3 planes of the reference picture
  1472. * @param pix_op halfpel motion compensation function (average or put normally)
  1473. * the motion vectors are taken from s->mv and the MV type from s->mv_type
  1474. */
  1475. static inline void MPV_motion_lowres(MpegEncContext *s,
  1476. uint8_t *dest_y, uint8_t *dest_cb,
  1477. uint8_t *dest_cr,
  1478. int dir, uint8_t **ref_picture,
  1479. h264_chroma_mc_func *pix_op)
  1480. {
  1481. int mx, my;
  1482. int mb_x, mb_y, i;
  1483. const int lowres = s->avctx->lowres;
  1484. const int block_s = 8 >>lowres;
  1485. mb_x = s->mb_x;
  1486. mb_y = s->mb_y;
  1487. switch (s->mv_type) {
  1488. case MV_TYPE_16X16:
  1489. mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
  1490. 0, 0, 0,
  1491. ref_picture, pix_op,
  1492. s->mv[dir][0][0], s->mv[dir][0][1],
  1493. 2 * block_s, mb_y);
  1494. break;
  1495. case MV_TYPE_8X8:
  1496. mx = 0;
  1497. my = 0;
  1498. for (i = 0; i < 4; i++) {
  1499. hpel_motion_lowres(s, dest_y + ((i & 1) + (i >> 1) *
  1500. s->linesize) * block_s,
  1501. ref_picture[0], 0, 0,
  1502. (2 * mb_x + (i & 1)) * block_s,
  1503. (2 * mb_y + (i >> 1)) * block_s,
  1504. s->width, s->height, s->linesize,
  1505. s->h_edge_pos >> lowres, s->v_edge_pos >> lowres,
  1506. block_s, block_s, pix_op,
  1507. s->mv[dir][i][0], s->mv[dir][i][1]);
  1508. mx += s->mv[dir][i][0];
  1509. my += s->mv[dir][i][1];
  1510. }
  1511. if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY))
  1512. chroma_4mv_motion_lowres(s, dest_cb, dest_cr, ref_picture,
  1513. pix_op, mx, my);
  1514. break;
  1515. case MV_TYPE_FIELD:
  1516. if (s->picture_structure == PICT_FRAME) {
  1517. /* top field */
  1518. mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
  1519. 1, 0, s->field_select[dir][0],
  1520. ref_picture, pix_op,
  1521. s->mv[dir][0][0], s->mv[dir][0][1],
  1522. block_s, mb_y);
  1523. /* bottom field */
  1524. mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
  1525. 1, 1, s->field_select[dir][1],
  1526. ref_picture, pix_op,
  1527. s->mv[dir][1][0], s->mv[dir][1][1],
  1528. block_s, mb_y);
  1529. } else {
  1530. if (s->picture_structure != s->field_select[dir][0] + 1 &&
  1531. s->pict_type != AV_PICTURE_TYPE_B && !s->first_field) {
  1532. ref_picture = s->current_picture_ptr->f->data;
  1533. }
  1534. mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
  1535. 0, 0, s->field_select[dir][0],
  1536. ref_picture, pix_op,
  1537. s->mv[dir][0][0],
  1538. s->mv[dir][0][1], 2 * block_s, mb_y >> 1);
  1539. }
  1540. break;
  1541. case MV_TYPE_16X8:
  1542. for (i = 0; i < 2; i++) {
  1543. uint8_t **ref2picture;
  1544. if (s->picture_structure == s->field_select[dir][i] + 1 ||
  1545. s->pict_type == AV_PICTURE_TYPE_B || s->first_field) {
  1546. ref2picture = ref_picture;
  1547. } else {
  1548. ref2picture = s->current_picture_ptr->f->data;
  1549. }
  1550. mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
  1551. 0, 0, s->field_select[dir][i],
  1552. ref2picture, pix_op,
  1553. s->mv[dir][i][0], s->mv[dir][i][1] +
  1554. 2 * block_s * i, block_s, mb_y >> 1);
  1555. dest_y += 2 * block_s * s->linesize;
  1556. dest_cb += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
  1557. dest_cr += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
  1558. }
  1559. break;
  1560. case MV_TYPE_DMV:
  1561. if (s->picture_structure == PICT_FRAME) {
  1562. for (i = 0; i < 2; i++) {
  1563. int j;
  1564. for (j = 0; j < 2; j++) {
  1565. mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
  1566. 1, j, j ^ i,
  1567. ref_picture, pix_op,
  1568. s->mv[dir][2 * i + j][0],
  1569. s->mv[dir][2 * i + j][1],
  1570. block_s, mb_y);
  1571. }
  1572. pix_op = s->h264chroma.avg_h264_chroma_pixels_tab;
  1573. }
  1574. } else {
  1575. for (i = 0; i < 2; i++) {
  1576. mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
  1577. 0, 0, s->picture_structure != i + 1,
  1578. ref_picture, pix_op,
  1579. s->mv[dir][2 * i][0],s->mv[dir][2 * i][1],
  1580. 2 * block_s, mb_y >> 1);
  1581. // after put we make avg of the same block
  1582. pix_op = s->h264chroma.avg_h264_chroma_pixels_tab;
  1583. // opposite parity is always in the same
  1584. // frame if this is second field
  1585. if (!s->first_field) {
  1586. ref_picture = s->current_picture_ptr->f->data;
  1587. }
  1588. }
  1589. }
  1590. break;
  1591. default:
  1592. av_assert2(0);
  1593. }
  1594. }
  1595. /**
  1596. * find the lowest MB row referenced in the MVs
  1597. */
  1598. static int lowest_referenced_row(MpegEncContext *s, int dir)
  1599. {
  1600. int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->quarter_sample;
  1601. int my, off, i, mvs;
  1602. if (s->picture_structure != PICT_FRAME || s->mcsel)
  1603. goto unhandled;
  1604. switch (s->mv_type) {
  1605. case MV_TYPE_16X16:
  1606. mvs = 1;
  1607. break;
  1608. case MV_TYPE_16X8:
  1609. mvs = 2;
  1610. break;
  1611. case MV_TYPE_8X8:
  1612. mvs = 4;
  1613. break;
  1614. default:
  1615. goto unhandled;
  1616. }
  1617. for (i = 0; i < mvs; i++) {
  1618. my = s->mv[dir][i][1];
  1619. my_max = FFMAX(my_max, my);
  1620. my_min = FFMIN(my_min, my);
  1621. }
  1622. off = ((FFMAX(-my_min, my_max)<<qpel_shift) + 63) >> 6;
  1623. return av_clip(s->mb_y + off, 0, s->mb_height - 1);
  1624. unhandled:
  1625. return s->mb_height-1;
  1626. }
  1627. /* put block[] to dest[] */
  1628. static inline void put_dct(MpegEncContext *s,
  1629. int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
  1630. {
  1631. s->dct_unquantize_intra(s, block, i, qscale);
  1632. s->idsp.idct_put(dest, line_size, block);
  1633. }
  1634. /* add block[] to dest[] */
  1635. static inline void add_dct(MpegEncContext *s,
  1636. int16_t *block, int i, uint8_t *dest, int line_size)
  1637. {
  1638. if (s->block_last_index[i] >= 0) {
  1639. s->idsp.idct_add(dest, line_size, block);
  1640. }
  1641. }
  1642. static inline void add_dequant_dct(MpegEncContext *s,
  1643. int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
  1644. {
  1645. if (s->block_last_index[i] >= 0) {
  1646. s->dct_unquantize_inter(s, block, i, qscale);
  1647. s->idsp.idct_add(dest, line_size, block);
  1648. }
  1649. }
  1650. /**
  1651. * Clean dc, ac, coded_block for the current non-intra MB.
  1652. */
  1653. void ff_clean_intra_table_entries(MpegEncContext *s)
  1654. {
  1655. int wrap = s->b8_stride;
  1656. int xy = s->block_index[0];
  1657. s->dc_val[0][xy ] =
  1658. s->dc_val[0][xy + 1 ] =
  1659. s->dc_val[0][xy + wrap] =
  1660. s->dc_val[0][xy + 1 + wrap] = 1024;
  1661. /* ac pred */
  1662. memset(s->ac_val[0][xy ], 0, 32 * sizeof(int16_t));
  1663. memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
  1664. if (s->msmpeg4_version>=3) {
  1665. s->coded_block[xy ] =
  1666. s->coded_block[xy + 1 ] =
  1667. s->coded_block[xy + wrap] =
  1668. s->coded_block[xy + 1 + wrap] = 0;
  1669. }
  1670. /* chroma */
  1671. wrap = s->mb_stride;
  1672. xy = s->mb_x + s->mb_y * wrap;
  1673. s->dc_val[1][xy] =
  1674. s->dc_val[2][xy] = 1024;
  1675. /* ac pred */
  1676. memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
  1677. memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
  1678. s->mbintra_table[xy]= 0;
  1679. }
  1680. /* generic function called after a macroblock has been parsed by the
  1681. decoder or after it has been encoded by the encoder.
  1682. Important variables used:
  1683. s->mb_intra : true if intra macroblock
  1684. s->mv_dir : motion vector direction
  1685. s->mv_type : motion vector type
  1686. s->mv : motion vector
  1687. s->interlaced_dct : true if interlaced dct used (mpeg2)
  1688. */
  1689. static av_always_inline
  1690. void mpv_reconstruct_mb_internal(MpegEncContext *s, int16_t block[12][64],
  1691. int lowres_flag, int is_mpeg12)
  1692. {
  1693. const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
  1694. if (CONFIG_XVMC &&
  1695. s->avctx->hwaccel && s->avctx->hwaccel->decode_mb) {
  1696. s->avctx->hwaccel->decode_mb(s);//xvmc uses pblocks
  1697. return;
  1698. }
  1699. if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
  1700. /* print DCT coefficients */
  1701. int i,j;
  1702. av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
  1703. for(i=0; i<6; i++){
  1704. for(j=0; j<64; j++){
  1705. av_log(s->avctx, AV_LOG_DEBUG, "%5d",
  1706. block[i][s->idsp.idct_permutation[j]]);
  1707. }
  1708. av_log(s->avctx, AV_LOG_DEBUG, "\n");
  1709. }
  1710. }
  1711. s->current_picture.qscale_table[mb_xy] = s->qscale;
  1712. /* update DC predictors for P macroblocks */
  1713. if (!s->mb_intra) {
  1714. if (!is_mpeg12 && (s->h263_pred || s->h263_aic)) {
  1715. if(s->mbintra_table[mb_xy])
  1716. ff_clean_intra_table_entries(s);
  1717. } else {
  1718. s->last_dc[0] =
  1719. s->last_dc[1] =
  1720. s->last_dc[2] = 128 << s->intra_dc_precision;
  1721. }
  1722. }
  1723. else if (!is_mpeg12 && (s->h263_pred || s->h263_aic))
  1724. s->mbintra_table[mb_xy]=1;
  1725. if ((s->avctx->flags & AV_CODEC_FLAG_PSNR) || s->frame_skip_threshold || s->frame_skip_factor ||
  1726. !(s->encoding && (s->intra_only || s->pict_type == AV_PICTURE_TYPE_B) &&
  1727. s->avctx->mb_decision != FF_MB_DECISION_RD)) { // FIXME precalc
  1728. uint8_t *dest_y, *dest_cb, *dest_cr;
  1729. int dct_linesize, dct_offset;
  1730. op_pixels_func (*op_pix)[4];
  1731. qpel_mc_func (*op_qpix)[16];
  1732. const int linesize = s->current_picture.f->linesize[0]; //not s->linesize as this would be wrong for field pics
  1733. const int uvlinesize = s->current_picture.f->linesize[1];
  1734. const int readable= s->pict_type != AV_PICTURE_TYPE_B || s->encoding || s->avctx->draw_horiz_band || lowres_flag;
  1735. const int block_size= lowres_flag ? 8>>s->avctx->lowres : 8;
  1736. /* avoid copy if macroblock skipped in last frame too */
  1737. /* skip only during decoding as we might trash the buffers during encoding a bit */
  1738. if(!s->encoding){
  1739. uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
  1740. if (s->mb_skipped) {
  1741. s->mb_skipped= 0;
  1742. av_assert2(s->pict_type!=AV_PICTURE_TYPE_I);
  1743. *mbskip_ptr = 1;
  1744. } else if(!s->current_picture.reference) {
  1745. *mbskip_ptr = 1;
  1746. } else{
  1747. *mbskip_ptr = 0; /* not skipped */
  1748. }
  1749. }
  1750. dct_linesize = linesize << s->interlaced_dct;
  1751. dct_offset = s->interlaced_dct ? linesize : linesize * block_size;
  1752. if(readable){
  1753. dest_y= s->dest[0];
  1754. dest_cb= s->dest[1];
  1755. dest_cr= s->dest[2];
  1756. }else{
  1757. dest_y = s->sc.b_scratchpad;
  1758. dest_cb= s->sc.b_scratchpad+16*linesize;
  1759. dest_cr= s->sc.b_scratchpad+32*linesize;
  1760. }
  1761. if (!s->mb_intra) {
  1762. /* motion handling */
  1763. /* decoding or more than one mb_type (MC was already done otherwise) */
  1764. if(!s->encoding){
  1765. if(HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_FRAME) {
  1766. if (s->mv_dir & MV_DIR_FORWARD) {
  1767. ff_thread_await_progress(&s->last_picture_ptr->tf,
  1768. lowest_referenced_row(s, 0),
  1769. 0);
  1770. }
  1771. if (s->mv_dir & MV_DIR_BACKWARD) {
  1772. ff_thread_await_progress(&s->next_picture_ptr->tf,
  1773. lowest_referenced_row(s, 1),
  1774. 0);
  1775. }
  1776. }
  1777. if(lowres_flag){
  1778. h264_chroma_mc_func *op_pix = s->h264chroma.put_h264_chroma_pixels_tab;
  1779. if (s->mv_dir & MV_DIR_FORWARD) {
  1780. MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f->data, op_pix);
  1781. op_pix = s->h264chroma.avg_h264_chroma_pixels_tab;
  1782. }
  1783. if (s->mv_dir & MV_DIR_BACKWARD) {
  1784. MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f->data, op_pix);
  1785. }
  1786. }else{
  1787. op_qpix = s->me.qpel_put;
  1788. if ((!s->no_rounding) || s->pict_type==AV_PICTURE_TYPE_B){
  1789. op_pix = s->hdsp.put_pixels_tab;
  1790. }else{
  1791. op_pix = s->hdsp.put_no_rnd_pixels_tab;
  1792. }
  1793. if (s->mv_dir & MV_DIR_FORWARD) {
  1794. ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f->data, op_pix, op_qpix);
  1795. op_pix = s->hdsp.avg_pixels_tab;
  1796. op_qpix= s->me.qpel_avg;
  1797. }
  1798. if (s->mv_dir & MV_DIR_BACKWARD) {
  1799. ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f->data, op_pix, op_qpix);
  1800. }
  1801. }
  1802. }
  1803. /* skip dequant / idct if we are really late ;) */
  1804. if(s->avctx->skip_idct){
  1805. if( (s->avctx->skip_idct >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B)
  1806. ||(s->avctx->skip_idct >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I)
  1807. || s->avctx->skip_idct >= AVDISCARD_ALL)
  1808. goto skip_idct;
  1809. }
  1810. /* add dct residue */
  1811. if(s->encoding || !( s->msmpeg4_version || s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO
  1812. || (s->codec_id==AV_CODEC_ID_MPEG4 && !s->mpeg_quant))){
  1813. add_dequant_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
  1814. add_dequant_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
  1815. add_dequant_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
  1816. add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
  1817. if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
  1818. if (s->chroma_y_shift){
  1819. add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
  1820. add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
  1821. }else{
  1822. dct_linesize >>= 1;
  1823. dct_offset >>=1;
  1824. add_dequant_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
  1825. add_dequant_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
  1826. add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
  1827. add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
  1828. }
  1829. }
  1830. } else if(is_mpeg12 || (s->codec_id != AV_CODEC_ID_WMV2)){
  1831. add_dct(s, block[0], 0, dest_y , dct_linesize);
  1832. add_dct(s, block[1], 1, dest_y + block_size, dct_linesize);
  1833. add_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize);
  1834. add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
  1835. if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
  1836. if(s->chroma_y_shift){//Chroma420
  1837. add_dct(s, block[4], 4, dest_cb, uvlinesize);
  1838. add_dct(s, block[5], 5, dest_cr, uvlinesize);
  1839. }else{
  1840. //chroma422
  1841. dct_linesize = uvlinesize << s->interlaced_dct;
  1842. dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize*block_size;
  1843. add_dct(s, block[4], 4, dest_cb, dct_linesize);
  1844. add_dct(s, block[5], 5, dest_cr, dct_linesize);
  1845. add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
  1846. add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
  1847. if(!s->chroma_x_shift){//Chroma444
  1848. add_dct(s, block[8], 8, dest_cb+block_size, dct_linesize);
  1849. add_dct(s, block[9], 9, dest_cr+block_size, dct_linesize);
  1850. add_dct(s, block[10], 10, dest_cb+block_size+dct_offset, dct_linesize);
  1851. add_dct(s, block[11], 11, dest_cr+block_size+dct_offset, dct_linesize);
  1852. }
  1853. }
  1854. }//fi gray
  1855. }
  1856. else if (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) {
  1857. ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
  1858. }
  1859. } else {
  1860. /* Only MPEG-4 Simple Studio Profile is supported in > 8-bit mode.
  1861. TODO: Integrate 10-bit properly into mpegvideo.c so that ER works properly */
  1862. if (s->avctx->bits_per_raw_sample > 8){
  1863. const int act_block_size = block_size * 2;
  1864. if(s->dpcm_direction == 0) {
  1865. s->idsp.idct_put(dest_y, dct_linesize, (int16_t*)(*s->block32)[0]);
  1866. s->idsp.idct_put(dest_y + act_block_size, dct_linesize, (int16_t*)(*s->block32)[1]);
  1867. s->idsp.idct_put(dest_y + dct_offset, dct_linesize, (int16_t*)(*s->block32)[2]);
  1868. s->idsp.idct_put(dest_y + dct_offset + act_block_size, dct_linesize, (int16_t*)(*s->block32)[3]);
  1869. dct_linesize = uvlinesize << s->interlaced_dct;
  1870. dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize*block_size;
  1871. s->idsp.idct_put(dest_cb, dct_linesize, (int16_t*)(*s->block32)[4]);
  1872. s->idsp.idct_put(dest_cr, dct_linesize, (int16_t*)(*s->block32)[5]);
  1873. s->idsp.idct_put(dest_cb + dct_offset, dct_linesize, (int16_t*)(*s->block32)[6]);
  1874. s->idsp.idct_put(dest_cr + dct_offset, dct_linesize, (int16_t*)(*s->block32)[7]);
  1875. if(!s->chroma_x_shift){//Chroma444
  1876. s->idsp.idct_put(dest_cb + act_block_size, dct_linesize, (int16_t*)(*s->block32)[8]);
  1877. s->idsp.idct_put(dest_cr + act_block_size, dct_linesize, (int16_t*)(*s->block32)[9]);
  1878. s->idsp.idct_put(dest_cb + act_block_size + dct_offset, dct_linesize, (int16_t*)(*s->block32)[10]);
  1879. s->idsp.idct_put(dest_cr + act_block_size + dct_offset, dct_linesize, (int16_t*)(*s->block32)[11]);
  1880. }
  1881. } else if(s->dpcm_direction == 1) {
  1882. int i, w, h;
  1883. uint16_t *dest_pcm[3] = {(uint16_t*)dest_y, (uint16_t*)dest_cb, (uint16_t*)dest_cr};
  1884. int linesize[3] = {dct_linesize, uvlinesize, uvlinesize};
  1885. for(i = 0; i < 3; i++) {
  1886. int idx = 0;
  1887. int vsub = i ? s->chroma_y_shift : 0;
  1888. int hsub = i ? s->chroma_x_shift : 0;
  1889. for(h = 0; h < (16 >> vsub); h++){
  1890. for(w = 0; w < (16 >> hsub); w++)
  1891. dest_pcm[i][w] = (*s->dpcm_macroblock)[i][idx++];
  1892. dest_pcm[i] += linesize[i] / 2;
  1893. }
  1894. }
  1895. } else if(s->dpcm_direction == -1) {
  1896. int i, w, h;
  1897. uint16_t *dest_pcm[3] = {(uint16_t*)dest_y, (uint16_t*)dest_cb, (uint16_t*)dest_cr};
  1898. int linesize[3] = {dct_linesize, uvlinesize, uvlinesize};
  1899. for(i = 0; i < 3; i++) {
  1900. int idx = 0;
  1901. int vsub = i ? s->chroma_y_shift : 0;
  1902. int hsub = i ? s->chroma_x_shift : 0;
  1903. dest_pcm[i] += (linesize[i] / 2) * ((16 >> vsub) - 1);
  1904. for(h = (16 >> vsub)-1; h >= 1; h--){
  1905. for(w = (16 >> hsub)-1; w >= 1; w--)
  1906. dest_pcm[i][w] = (*s->dpcm_macroblock)[i][idx++];
  1907. dest_pcm[i] -= linesize[i] / 2;
  1908. }
  1909. }
  1910. }
  1911. }
  1912. /* dct only in intra block */
  1913. else if(s->encoding || !(s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO)){
  1914. put_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
  1915. put_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
  1916. put_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
  1917. put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
  1918. if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
  1919. if(s->chroma_y_shift){
  1920. put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
  1921. put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
  1922. }else{
  1923. dct_offset >>=1;
  1924. dct_linesize >>=1;
  1925. put_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
  1926. put_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
  1927. put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
  1928. put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
  1929. }
  1930. }
  1931. }else{
  1932. s->idsp.idct_put(dest_y, dct_linesize, block[0]);
  1933. s->idsp.idct_put(dest_y + block_size, dct_linesize, block[1]);
  1934. s->idsp.idct_put(dest_y + dct_offset, dct_linesize, block[2]);
  1935. s->idsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
  1936. if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
  1937. if(s->chroma_y_shift){
  1938. s->idsp.idct_put(dest_cb, uvlinesize, block[4]);
  1939. s->idsp.idct_put(dest_cr, uvlinesize, block[5]);
  1940. }else{
  1941. dct_linesize = uvlinesize << s->interlaced_dct;
  1942. dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize*block_size;
  1943. s->idsp.idct_put(dest_cb, dct_linesize, block[4]);
  1944. s->idsp.idct_put(dest_cr, dct_linesize, block[5]);
  1945. s->idsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
  1946. s->idsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
  1947. if(!s->chroma_x_shift){//Chroma444
  1948. s->idsp.idct_put(dest_cb + block_size, dct_linesize, block[8]);
  1949. s->idsp.idct_put(dest_cr + block_size, dct_linesize, block[9]);
  1950. s->idsp.idct_put(dest_cb + block_size + dct_offset, dct_linesize, block[10]);
  1951. s->idsp.idct_put(dest_cr + block_size + dct_offset, dct_linesize, block[11]);
  1952. }
  1953. }
  1954. }//gray
  1955. }
  1956. }
  1957. skip_idct:
  1958. if(!readable){
  1959. s->hdsp.put_pixels_tab[0][0](s->dest[0], dest_y , linesize,16);
  1960. if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
  1961. s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift);
  1962. s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift);
  1963. }
  1964. }
  1965. }
  1966. }
  1967. void ff_mpv_reconstruct_mb(MpegEncContext *s, int16_t block[12][64])
  1968. {
  1969. #if !CONFIG_SMALL
  1970. if(s->out_format == FMT_MPEG1) {
  1971. if(s->avctx->lowres) mpv_reconstruct_mb_internal(s, block, 1, 1);
  1972. else mpv_reconstruct_mb_internal(s, block, 0, 1);
  1973. } else
  1974. #endif
  1975. if(s->avctx->lowres) mpv_reconstruct_mb_internal(s, block, 1, 0);
  1976. else mpv_reconstruct_mb_internal(s, block, 0, 0);
  1977. }
  1978. void ff_mpeg_draw_horiz_band(MpegEncContext *s, int y, int h)
  1979. {
  1980. ff_draw_horiz_band(s->avctx, s->current_picture_ptr->f,
  1981. s->last_picture_ptr ? s->last_picture_ptr->f : NULL, y, h, s->picture_structure,
  1982. s->first_field, s->low_delay);
  1983. }
  1984. void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
  1985. const int linesize = s->current_picture.f->linesize[0]; //not s->linesize as this would be wrong for field pics
  1986. const int uvlinesize = s->current_picture.f->linesize[1];
  1987. const int width_of_mb = (4 + (s->avctx->bits_per_raw_sample > 8)) - s->avctx->lowres;
  1988. const int height_of_mb = 4 - s->avctx->lowres;
  1989. s->block_index[0]= s->b8_stride*(s->mb_y*2 ) - 2 + s->mb_x*2;
  1990. s->block_index[1]= s->b8_stride*(s->mb_y*2 ) - 1 + s->mb_x*2;
  1991. s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2;
  1992. s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
  1993. s->block_index[4]= s->mb_stride*(s->mb_y + 1) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
  1994. s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
  1995. //block_index is not used by mpeg2, so it is not affected by chroma_format
  1996. s->dest[0] = s->current_picture.f->data[0] + (int)((s->mb_x - 1U) << width_of_mb);
  1997. s->dest[1] = s->current_picture.f->data[1] + (int)((s->mb_x - 1U) << (width_of_mb - s->chroma_x_shift));
  1998. s->dest[2] = s->current_picture.f->data[2] + (int)((s->mb_x - 1U) << (width_of_mb - s->chroma_x_shift));
  1999. if(!(s->pict_type==AV_PICTURE_TYPE_B && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME))
  2000. {
  2001. if(s->picture_structure==PICT_FRAME){
  2002. s->dest[0] += s->mb_y * linesize << height_of_mb;
  2003. s->dest[1] += s->mb_y * uvlinesize << (height_of_mb - s->chroma_y_shift);
  2004. s->dest[2] += s->mb_y * uvlinesize << (height_of_mb - s->chroma_y_shift);
  2005. }else{
  2006. s->dest[0] += (s->mb_y>>1) * linesize << height_of_mb;
  2007. s->dest[1] += (s->mb_y>>1) * uvlinesize << (height_of_mb - s->chroma_y_shift);
  2008. s->dest[2] += (s->mb_y>>1) * uvlinesize << (height_of_mb - s->chroma_y_shift);
  2009. av_assert1((s->mb_y&1) == (s->picture_structure == PICT_BOTTOM_FIELD));
  2010. }
  2011. }
  2012. }
  2013. void ff_mpeg_flush(AVCodecContext *avctx){
  2014. int i;
  2015. MpegEncContext *s = avctx->priv_data;
  2016. if (!s || !s->picture)
  2017. return;
  2018. for (i = 0; i < MAX_PICTURE_COUNT; i++)
  2019. ff_mpeg_unref_picture(s->avctx, &s->picture[i]);
  2020. s->current_picture_ptr = s->last_picture_ptr = s->next_picture_ptr = NULL;
  2021. ff_mpeg_unref_picture(s->avctx, &s->current_picture);
  2022. ff_mpeg_unref_picture(s->avctx, &s->last_picture);
  2023. ff_mpeg_unref_picture(s->avctx, &s->next_picture);
  2024. s->mb_x= s->mb_y= 0;
  2025. s->closed_gop= 0;
  2026. s->parse_context.state= -1;
  2027. s->parse_context.frame_start_found= 0;
  2028. s->parse_context.overread= 0;
  2029. s->parse_context.overread_index= 0;
  2030. s->parse_context.index= 0;
  2031. s->parse_context.last_index= 0;
  2032. s->bitstream_buffer_size=0;
  2033. s->pp_time=0;
  2034. }
  2035. /**
  2036. * set qscale and update qscale dependent variables.
  2037. */
  2038. void ff_set_qscale(MpegEncContext * s, int qscale)
  2039. {
  2040. if (qscale < 1)
  2041. qscale = 1;
  2042. else if (qscale > 31)
  2043. qscale = 31;
  2044. s->qscale = qscale;
  2045. s->chroma_qscale= s->chroma_qscale_table[qscale];
  2046. s->y_dc_scale= s->y_dc_scale_table[ qscale ];
  2047. s->c_dc_scale= s->c_dc_scale_table[ s->chroma_qscale ];
  2048. }
  2049. void ff_mpv_report_decode_progress(MpegEncContext *s)
  2050. {
  2051. if (s->pict_type != AV_PICTURE_TYPE_B && !s->partitioned_frame && !s->er.error_occurred)
  2052. ff_thread_report_progress(&s->current_picture_ptr->tf, s->mb_y, 0);
  2053. }