You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

3314 lines
121KB

  1. /*
  2. * The simplest mpeg encoder (well, it was the simplest!)
  3. * Copyright (c) 2000,2001 Fabrice Bellard
  4. * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
  5. *
  6. * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
  7. *
  8. * This file is part of FFmpeg.
  9. *
  10. * FFmpeg is free software; you can redistribute it and/or
  11. * modify it under the terms of the GNU Lesser General Public
  12. * License as published by the Free Software Foundation; either
  13. * version 2.1 of the License, or (at your option) any later version.
  14. *
  15. * FFmpeg is distributed in the hope that it will be useful,
  16. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  17. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  18. * Lesser General Public License for more details.
  19. *
  20. * You should have received a copy of the GNU Lesser General Public
  21. * License along with FFmpeg; if not, write to the Free Software
  22. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  23. */
  24. /**
  25. * @file
  26. * The simplest mpeg encoder (well, it was the simplest!).
  27. */
  28. #include "libavutil/attributes.h"
  29. #include "libavutil/avassert.h"
  30. #include "libavutil/imgutils.h"
  31. #include "libavutil/internal.h"
  32. #include "libavutil/motion_vector.h"
  33. #include "libavutil/timer.h"
  34. #include "avcodec.h"
  35. #include "blockdsp.h"
  36. #include "h264chroma.h"
  37. #include "idctdsp.h"
  38. #include "internal.h"
  39. #include "mathops.h"
  40. #include "mpegutils.h"
  41. #include "mpegvideo.h"
  42. #include "mjpegenc.h"
  43. #include "msmpeg4.h"
  44. #include "qpeldsp.h"
  45. #include "thread.h"
  46. #include <limits.h>
  47. static const uint8_t ff_default_chroma_qscale_table[32] = {
  48. // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
  49. 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
  50. 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
  51. };
  52. const uint8_t ff_mpeg1_dc_scale_table[128] = {
  53. // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
  54. 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
  55. 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
  56. 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
  57. 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
  58. 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
  59. 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
  60. 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
  61. 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
  62. };
  63. static const uint8_t mpeg2_dc_scale_table1[128] = {
  64. // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
  65. 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
  66. 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
  67. 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
  68. 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
  69. 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
  70. 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
  71. 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
  72. 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
  73. };
  74. static const uint8_t mpeg2_dc_scale_table2[128] = {
  75. // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
  76. 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
  77. 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
  78. 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
  79. 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
  80. 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
  81. 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
  82. 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
  83. 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
  84. };
  85. static const uint8_t mpeg2_dc_scale_table3[128] = {
  86. // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
  87. 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
  88. 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
  89. 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
  90. 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
  91. 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
  92. 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
  93. 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
  94. 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
  95. };
  96. const uint8_t *const ff_mpeg2_dc_scale_table[4] = {
  97. ff_mpeg1_dc_scale_table,
  98. mpeg2_dc_scale_table1,
  99. mpeg2_dc_scale_table2,
  100. mpeg2_dc_scale_table3,
  101. };
  102. const uint8_t ff_alternate_horizontal_scan[64] = {
  103. 0, 1, 2, 3, 8, 9, 16, 17,
  104. 10, 11, 4, 5, 6, 7, 15, 14,
  105. 13, 12, 19, 18, 24, 25, 32, 33,
  106. 26, 27, 20, 21, 22, 23, 28, 29,
  107. 30, 31, 34, 35, 40, 41, 48, 49,
  108. 42, 43, 36, 37, 38, 39, 44, 45,
  109. 46, 47, 50, 51, 56, 57, 58, 59,
  110. 52, 53, 54, 55, 60, 61, 62, 63,
  111. };
  112. const uint8_t ff_alternate_vertical_scan[64] = {
  113. 0, 8, 16, 24, 1, 9, 2, 10,
  114. 17, 25, 32, 40, 48, 56, 57, 49,
  115. 41, 33, 26, 18, 3, 11, 4, 12,
  116. 19, 27, 34, 42, 50, 58, 35, 43,
  117. 51, 59, 20, 28, 5, 13, 6, 14,
  118. 21, 29, 36, 44, 52, 60, 37, 45,
  119. 53, 61, 22, 30, 7, 15, 23, 31,
  120. 38, 46, 54, 62, 39, 47, 55, 63,
  121. };
  122. static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
  123. int16_t *block, int n, int qscale)
  124. {
  125. int i, level, nCoeffs;
  126. const uint16_t *quant_matrix;
  127. nCoeffs= s->block_last_index[n];
  128. block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
  129. /* XXX: only mpeg1 */
  130. quant_matrix = s->intra_matrix;
  131. for(i=1;i<=nCoeffs;i++) {
  132. int j= s->intra_scantable.permutated[i];
  133. level = block[j];
  134. if (level) {
  135. if (level < 0) {
  136. level = -level;
  137. level = (int)(level * qscale * quant_matrix[j]) >> 3;
  138. level = (level - 1) | 1;
  139. level = -level;
  140. } else {
  141. level = (int)(level * qscale * quant_matrix[j]) >> 3;
  142. level = (level - 1) | 1;
  143. }
  144. block[j] = level;
  145. }
  146. }
  147. }
  148. static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
  149. int16_t *block, int n, int qscale)
  150. {
  151. int i, level, nCoeffs;
  152. const uint16_t *quant_matrix;
  153. nCoeffs= s->block_last_index[n];
  154. quant_matrix = s->inter_matrix;
  155. for(i=0; i<=nCoeffs; i++) {
  156. int j= s->intra_scantable.permutated[i];
  157. level = block[j];
  158. if (level) {
  159. if (level < 0) {
  160. level = -level;
  161. level = (((level << 1) + 1) * qscale *
  162. ((int) (quant_matrix[j]))) >> 4;
  163. level = (level - 1) | 1;
  164. level = -level;
  165. } else {
  166. level = (((level << 1) + 1) * qscale *
  167. ((int) (quant_matrix[j]))) >> 4;
  168. level = (level - 1) | 1;
  169. }
  170. block[j] = level;
  171. }
  172. }
  173. }
  174. static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
  175. int16_t *block, int n, int qscale)
  176. {
  177. int i, level, nCoeffs;
  178. const uint16_t *quant_matrix;
  179. if(s->alternate_scan) nCoeffs= 63;
  180. else nCoeffs= s->block_last_index[n];
  181. block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
  182. quant_matrix = s->intra_matrix;
  183. for(i=1;i<=nCoeffs;i++) {
  184. int j= s->intra_scantable.permutated[i];
  185. level = block[j];
  186. if (level) {
  187. if (level < 0) {
  188. level = -level;
  189. level = (int)(level * qscale * quant_matrix[j]) >> 3;
  190. level = -level;
  191. } else {
  192. level = (int)(level * qscale * quant_matrix[j]) >> 3;
  193. }
  194. block[j] = level;
  195. }
  196. }
  197. }
  198. static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
  199. int16_t *block, int n, int qscale)
  200. {
  201. int i, level, nCoeffs;
  202. const uint16_t *quant_matrix;
  203. int sum=-1;
  204. if(s->alternate_scan) nCoeffs= 63;
  205. else nCoeffs= s->block_last_index[n];
  206. block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
  207. sum += block[0];
  208. quant_matrix = s->intra_matrix;
  209. for(i=1;i<=nCoeffs;i++) {
  210. int j= s->intra_scantable.permutated[i];
  211. level = block[j];
  212. if (level) {
  213. if (level < 0) {
  214. level = -level;
  215. level = (int)(level * qscale * quant_matrix[j]) >> 3;
  216. level = -level;
  217. } else {
  218. level = (int)(level * qscale * quant_matrix[j]) >> 3;
  219. }
  220. block[j] = level;
  221. sum+=level;
  222. }
  223. }
  224. block[63]^=sum&1;
  225. }
  226. static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
  227. int16_t *block, int n, int qscale)
  228. {
  229. int i, level, nCoeffs;
  230. const uint16_t *quant_matrix;
  231. int sum=-1;
  232. if(s->alternate_scan) nCoeffs= 63;
  233. else nCoeffs= s->block_last_index[n];
  234. quant_matrix = s->inter_matrix;
  235. for(i=0; i<=nCoeffs; i++) {
  236. int j= s->intra_scantable.permutated[i];
  237. level = block[j];
  238. if (level) {
  239. if (level < 0) {
  240. level = -level;
  241. level = (((level << 1) + 1) * qscale *
  242. ((int) (quant_matrix[j]))) >> 4;
  243. level = -level;
  244. } else {
  245. level = (((level << 1) + 1) * qscale *
  246. ((int) (quant_matrix[j]))) >> 4;
  247. }
  248. block[j] = level;
  249. sum+=level;
  250. }
  251. }
  252. block[63]^=sum&1;
  253. }
  254. static void dct_unquantize_h263_intra_c(MpegEncContext *s,
  255. int16_t *block, int n, int qscale)
  256. {
  257. int i, level, qmul, qadd;
  258. int nCoeffs;
  259. av_assert2(s->block_last_index[n]>=0 || s->h263_aic);
  260. qmul = qscale << 1;
  261. if (!s->h263_aic) {
  262. block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
  263. qadd = (qscale - 1) | 1;
  264. }else{
  265. qadd = 0;
  266. }
  267. if(s->ac_pred)
  268. nCoeffs=63;
  269. else
  270. nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
  271. for(i=1; i<=nCoeffs; i++) {
  272. level = block[i];
  273. if (level) {
  274. if (level < 0) {
  275. level = level * qmul - qadd;
  276. } else {
  277. level = level * qmul + qadd;
  278. }
  279. block[i] = level;
  280. }
  281. }
  282. }
  283. static void dct_unquantize_h263_inter_c(MpegEncContext *s,
  284. int16_t *block, int n, int qscale)
  285. {
  286. int i, level, qmul, qadd;
  287. int nCoeffs;
  288. av_assert2(s->block_last_index[n]>=0);
  289. qadd = (qscale - 1) | 1;
  290. qmul = qscale << 1;
  291. nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
  292. for(i=0; i<=nCoeffs; i++) {
  293. level = block[i];
  294. if (level) {
  295. if (level < 0) {
  296. level = level * qmul - qadd;
  297. } else {
  298. level = level * qmul + qadd;
  299. }
  300. block[i] = level;
  301. }
  302. }
  303. }
  304. static void mpeg_er_decode_mb(void *opaque, int ref, int mv_dir, int mv_type,
  305. int (*mv)[2][4][2],
  306. int mb_x, int mb_y, int mb_intra, int mb_skipped)
  307. {
  308. MpegEncContext *s = opaque;
  309. s->mv_dir = mv_dir;
  310. s->mv_type = mv_type;
  311. s->mb_intra = mb_intra;
  312. s->mb_skipped = mb_skipped;
  313. s->mb_x = mb_x;
  314. s->mb_y = mb_y;
  315. memcpy(s->mv, mv, sizeof(*mv));
  316. ff_init_block_index(s);
  317. ff_update_block_index(s);
  318. s->bdsp.clear_blocks(s->block[0]);
  319. s->dest[0] = s->current_picture.f->data[0] + (s->mb_y * 16 * s->linesize) + s->mb_x * 16;
  320. s->dest[1] = s->current_picture.f->data[1] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
  321. s->dest[2] = s->current_picture.f->data[2] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
  322. if (ref)
  323. av_log(s->avctx, AV_LOG_DEBUG,
  324. "Interlaced error concealment is not fully implemented\n");
  325. ff_mpv_decode_mb(s, s->block);
  326. }
  327. static void gray16(uint8_t *dst, const uint8_t *src, ptrdiff_t linesize, int h)
  328. {
  329. while(h--)
  330. memset(dst + h*linesize, 128, 16);
  331. }
  332. static void gray8(uint8_t *dst, const uint8_t *src, ptrdiff_t linesize, int h)
  333. {
  334. while(h--)
  335. memset(dst + h*linesize, 128, 8);
  336. }
  337. /* init common dct for both encoder and decoder */
  338. static av_cold int dct_init(MpegEncContext *s)
  339. {
  340. ff_blockdsp_init(&s->bdsp, s->avctx);
  341. ff_h264chroma_init(&s->h264chroma, 8); //for lowres
  342. ff_hpeldsp_init(&s->hdsp, s->avctx->flags);
  343. ff_mpegvideodsp_init(&s->mdsp);
  344. ff_videodsp_init(&s->vdsp, s->avctx->bits_per_raw_sample);
  345. if (s->avctx->debug & FF_DEBUG_NOMC) {
  346. int i;
  347. for (i=0; i<4; i++) {
  348. s->hdsp.avg_pixels_tab[0][i] = gray16;
  349. s->hdsp.put_pixels_tab[0][i] = gray16;
  350. s->hdsp.put_no_rnd_pixels_tab[0][i] = gray16;
  351. s->hdsp.avg_pixels_tab[1][i] = gray8;
  352. s->hdsp.put_pixels_tab[1][i] = gray8;
  353. s->hdsp.put_no_rnd_pixels_tab[1][i] = gray8;
  354. }
  355. }
  356. s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_c;
  357. s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_c;
  358. s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_c;
  359. s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_c;
  360. s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_c;
  361. if (s->avctx->flags & CODEC_FLAG_BITEXACT)
  362. s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_bitexact;
  363. s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c;
  364. if (HAVE_INTRINSICS_NEON)
  365. ff_mpv_common_init_neon(s);
  366. if (ARCH_ALPHA)
  367. ff_mpv_common_init_axp(s);
  368. if (ARCH_ARM)
  369. ff_mpv_common_init_arm(s);
  370. if (ARCH_PPC)
  371. ff_mpv_common_init_ppc(s);
  372. if (ARCH_X86)
  373. ff_mpv_common_init_x86(s);
  374. return 0;
  375. }
  376. av_cold void ff_mpv_idct_init(MpegEncContext *s)
  377. {
  378. ff_idctdsp_init(&s->idsp, s->avctx);
  379. /* load & permutate scantables
  380. * note: only wmv uses different ones
  381. */
  382. if (s->alternate_scan) {
  383. ff_init_scantable(s->idsp.idct_permutation, &s->inter_scantable, ff_alternate_vertical_scan);
  384. ff_init_scantable(s->idsp.idct_permutation, &s->intra_scantable, ff_alternate_vertical_scan);
  385. } else {
  386. ff_init_scantable(s->idsp.idct_permutation, &s->inter_scantable, ff_zigzag_direct);
  387. ff_init_scantable(s->idsp.idct_permutation, &s->intra_scantable, ff_zigzag_direct);
  388. }
  389. ff_init_scantable(s->idsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan);
  390. ff_init_scantable(s->idsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
  391. }
  392. static int frame_size_alloc(MpegEncContext *s, int linesize)
  393. {
  394. int alloc_size = FFALIGN(FFABS(linesize) + 64, 32);
  395. if (s->avctx->hwaccel || s->avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU)
  396. return 0;
  397. if (linesize < 24) {
  398. av_log(s->avctx, AV_LOG_ERROR, "Image too small, temporary buffers cannot function\n");
  399. return AVERROR_PATCHWELCOME;
  400. }
  401. // edge emu needs blocksize + filter length - 1
  402. // (= 17x17 for halfpel / 21x21 for h264)
  403. // VC1 computes luma and chroma simultaneously and needs 19X19 + 9x9
  404. // at uvlinesize. It supports only YUV420 so 24x24 is enough
  405. // linesize * interlaced * MBsize
  406. // we also use this buffer for encoding in encode_mb_internal() needig an additional 32 lines
  407. FF_ALLOCZ_ARRAY_OR_GOTO(s->avctx, s->edge_emu_buffer, alloc_size, 4 * 68,
  408. fail);
  409. FF_ALLOCZ_ARRAY_OR_GOTO(s->avctx, s->me.scratchpad, alloc_size, 4 * 16 * 2,
  410. fail)
  411. s->me.temp = s->me.scratchpad;
  412. s->rd_scratchpad = s->me.scratchpad;
  413. s->b_scratchpad = s->me.scratchpad;
  414. s->obmc_scratchpad = s->me.scratchpad + 16;
  415. return 0;
  416. fail:
  417. av_freep(&s->edge_emu_buffer);
  418. return AVERROR(ENOMEM);
  419. }
  420. /**
  421. * Allocate a frame buffer
  422. */
  423. static int alloc_frame_buffer(MpegEncContext *s, Picture *pic)
  424. {
  425. int edges_needed = av_codec_is_encoder(s->avctx->codec);
  426. int r, ret;
  427. pic->tf.f = pic->f;
  428. if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
  429. s->codec_id != AV_CODEC_ID_VC1IMAGE &&
  430. s->codec_id != AV_CODEC_ID_MSS2) {
  431. if (edges_needed) {
  432. pic->f->width = s->avctx->width + 2 * EDGE_WIDTH;
  433. pic->f->height = s->avctx->height + 2 * EDGE_WIDTH;
  434. }
  435. r = ff_thread_get_buffer(s->avctx, &pic->tf,
  436. pic->reference ? AV_GET_BUFFER_FLAG_REF : 0);
  437. } else {
  438. pic->f->width = s->avctx->width;
  439. pic->f->height = s->avctx->height;
  440. pic->f->format = s->avctx->pix_fmt;
  441. r = avcodec_default_get_buffer2(s->avctx, pic->f, 0);
  442. }
  443. if (r < 0 || !pic->f->buf[0]) {
  444. av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %p)\n",
  445. r, pic->f->data[0]);
  446. return -1;
  447. }
  448. if (edges_needed) {
  449. int i;
  450. for (i = 0; pic->f->data[i]; i++) {
  451. int offset = (EDGE_WIDTH >> (i ? s->chroma_y_shift : 0)) *
  452. pic->f->linesize[i] +
  453. (EDGE_WIDTH >> (i ? s->chroma_x_shift : 0));
  454. pic->f->data[i] += offset;
  455. }
  456. pic->f->width = s->avctx->width;
  457. pic->f->height = s->avctx->height;
  458. }
  459. if (s->avctx->hwaccel) {
  460. assert(!pic->hwaccel_picture_private);
  461. if (s->avctx->hwaccel->frame_priv_data_size) {
  462. pic->hwaccel_priv_buf = av_buffer_allocz(s->avctx->hwaccel->frame_priv_data_size);
  463. if (!pic->hwaccel_priv_buf) {
  464. av_log(s->avctx, AV_LOG_ERROR, "alloc_frame_buffer() failed (hwaccel private data allocation)\n");
  465. return -1;
  466. }
  467. pic->hwaccel_picture_private = pic->hwaccel_priv_buf->data;
  468. }
  469. }
  470. if (s->linesize && (s->linesize != pic->f->linesize[0] ||
  471. s->uvlinesize != pic->f->linesize[1])) {
  472. av_log(s->avctx, AV_LOG_ERROR,
  473. "get_buffer() failed (stride changed)\n");
  474. ff_mpeg_unref_picture(s->avctx, pic);
  475. return -1;
  476. }
  477. if (pic->f->linesize[1] != pic->f->linesize[2]) {
  478. av_log(s->avctx, AV_LOG_ERROR,
  479. "get_buffer() failed (uv stride mismatch)\n");
  480. ff_mpeg_unref_picture(s->avctx, pic);
  481. return -1;
  482. }
  483. if (!s->edge_emu_buffer &&
  484. (ret = frame_size_alloc(s, pic->f->linesize[0])) < 0) {
  485. av_log(s->avctx, AV_LOG_ERROR,
  486. "get_buffer() failed to allocate context scratch buffers.\n");
  487. ff_mpeg_unref_picture(s->avctx, pic);
  488. return ret;
  489. }
  490. return 0;
  491. }
  492. void ff_free_picture_tables(Picture *pic)
  493. {
  494. int i;
  495. pic->alloc_mb_width =
  496. pic->alloc_mb_height = 0;
  497. av_buffer_unref(&pic->mb_var_buf);
  498. av_buffer_unref(&pic->mc_mb_var_buf);
  499. av_buffer_unref(&pic->mb_mean_buf);
  500. av_buffer_unref(&pic->mbskip_table_buf);
  501. av_buffer_unref(&pic->qscale_table_buf);
  502. av_buffer_unref(&pic->mb_type_buf);
  503. for (i = 0; i < 2; i++) {
  504. av_buffer_unref(&pic->motion_val_buf[i]);
  505. av_buffer_unref(&pic->ref_index_buf[i]);
  506. }
  507. }
  508. static int alloc_picture_tables(MpegEncContext *s, Picture *pic)
  509. {
  510. const int big_mb_num = s->mb_stride * (s->mb_height + 1) + 1;
  511. const int mb_array_size = s->mb_stride * s->mb_height;
  512. const int b8_array_size = s->b8_stride * s->mb_height * 2;
  513. int i;
  514. pic->mbskip_table_buf = av_buffer_allocz(mb_array_size + 2);
  515. pic->qscale_table_buf = av_buffer_allocz(big_mb_num + s->mb_stride);
  516. pic->mb_type_buf = av_buffer_allocz((big_mb_num + s->mb_stride) *
  517. sizeof(uint32_t));
  518. if (!pic->mbskip_table_buf || !pic->qscale_table_buf || !pic->mb_type_buf)
  519. return AVERROR(ENOMEM);
  520. if (s->encoding) {
  521. pic->mb_var_buf = av_buffer_allocz(mb_array_size * sizeof(int16_t));
  522. pic->mc_mb_var_buf = av_buffer_allocz(mb_array_size * sizeof(int16_t));
  523. pic->mb_mean_buf = av_buffer_allocz(mb_array_size);
  524. if (!pic->mb_var_buf || !pic->mc_mb_var_buf || !pic->mb_mean_buf)
  525. return AVERROR(ENOMEM);
  526. }
  527. if (s->out_format == FMT_H263 || s->encoding || s->avctx->debug_mv ||
  528. (s->avctx->flags2 & CODEC_FLAG2_EXPORT_MVS)) {
  529. int mv_size = 2 * (b8_array_size + 4) * sizeof(int16_t);
  530. int ref_index_size = 4 * mb_array_size;
  531. for (i = 0; mv_size && i < 2; i++) {
  532. pic->motion_val_buf[i] = av_buffer_allocz(mv_size);
  533. pic->ref_index_buf[i] = av_buffer_allocz(ref_index_size);
  534. if (!pic->motion_val_buf[i] || !pic->ref_index_buf[i])
  535. return AVERROR(ENOMEM);
  536. }
  537. }
  538. pic->alloc_mb_width = s->mb_width;
  539. pic->alloc_mb_height = s->mb_height;
  540. return 0;
  541. }
  542. static int make_tables_writable(Picture *pic)
  543. {
  544. int ret, i;
  545. #define MAKE_WRITABLE(table) \
  546. do {\
  547. if (pic->table &&\
  548. (ret = av_buffer_make_writable(&pic->table)) < 0)\
  549. return ret;\
  550. } while (0)
  551. MAKE_WRITABLE(mb_var_buf);
  552. MAKE_WRITABLE(mc_mb_var_buf);
  553. MAKE_WRITABLE(mb_mean_buf);
  554. MAKE_WRITABLE(mbskip_table_buf);
  555. MAKE_WRITABLE(qscale_table_buf);
  556. MAKE_WRITABLE(mb_type_buf);
  557. for (i = 0; i < 2; i++) {
  558. MAKE_WRITABLE(motion_val_buf[i]);
  559. MAKE_WRITABLE(ref_index_buf[i]);
  560. }
  561. return 0;
  562. }
  563. /**
  564. * Allocate a Picture.
  565. * The pixels are allocated/set by calling get_buffer() if shared = 0
  566. */
  567. int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared)
  568. {
  569. int i, ret;
  570. if (pic->qscale_table_buf)
  571. if ( pic->alloc_mb_width != s->mb_width
  572. || pic->alloc_mb_height != s->mb_height)
  573. ff_free_picture_tables(pic);
  574. if (shared) {
  575. av_assert0(pic->f->data[0]);
  576. pic->shared = 1;
  577. } else {
  578. av_assert0(!pic->f->buf[0]);
  579. if (alloc_frame_buffer(s, pic) < 0)
  580. return -1;
  581. s->linesize = pic->f->linesize[0];
  582. s->uvlinesize = pic->f->linesize[1];
  583. }
  584. if (!pic->qscale_table_buf)
  585. ret = alloc_picture_tables(s, pic);
  586. else
  587. ret = make_tables_writable(pic);
  588. if (ret < 0)
  589. goto fail;
  590. if (s->encoding) {
  591. pic->mb_var = (uint16_t*)pic->mb_var_buf->data;
  592. pic->mc_mb_var = (uint16_t*)pic->mc_mb_var_buf->data;
  593. pic->mb_mean = pic->mb_mean_buf->data;
  594. }
  595. pic->mbskip_table = pic->mbskip_table_buf->data;
  596. pic->qscale_table = pic->qscale_table_buf->data + 2 * s->mb_stride + 1;
  597. pic->mb_type = (uint32_t*)pic->mb_type_buf->data + 2 * s->mb_stride + 1;
  598. if (pic->motion_val_buf[0]) {
  599. for (i = 0; i < 2; i++) {
  600. pic->motion_val[i] = (int16_t (*)[2])pic->motion_val_buf[i]->data + 4;
  601. pic->ref_index[i] = pic->ref_index_buf[i]->data;
  602. }
  603. }
  604. return 0;
  605. fail:
  606. av_log(s->avctx, AV_LOG_ERROR, "Error allocating a picture.\n");
  607. ff_mpeg_unref_picture(s->avctx, pic);
  608. ff_free_picture_tables(pic);
  609. return AVERROR(ENOMEM);
  610. }
  611. /**
  612. * Deallocate a picture.
  613. */
  614. void ff_mpeg_unref_picture(AVCodecContext *avctx, Picture *pic)
  615. {
  616. int off = offsetof(Picture, mb_mean) + sizeof(pic->mb_mean);
  617. pic->tf.f = pic->f;
  618. /* WM Image / Screen codecs allocate internal buffers with different
  619. * dimensions / colorspaces; ignore user-defined callbacks for these. */
  620. if (avctx->codec->id != AV_CODEC_ID_WMV3IMAGE &&
  621. avctx->codec->id != AV_CODEC_ID_VC1IMAGE &&
  622. avctx->codec->id != AV_CODEC_ID_MSS2)
  623. ff_thread_release_buffer(avctx, &pic->tf);
  624. else if (pic->f)
  625. av_frame_unref(pic->f);
  626. av_buffer_unref(&pic->hwaccel_priv_buf);
  627. if (pic->needs_realloc)
  628. ff_free_picture_tables(pic);
  629. memset((uint8_t*)pic + off, 0, sizeof(*pic) - off);
  630. }
  631. static int update_picture_tables(Picture *dst, Picture *src)
  632. {
  633. int i;
  634. #define UPDATE_TABLE(table)\
  635. do {\
  636. if (src->table &&\
  637. (!dst->table || dst->table->buffer != src->table->buffer)) {\
  638. av_buffer_unref(&dst->table);\
  639. dst->table = av_buffer_ref(src->table);\
  640. if (!dst->table) {\
  641. ff_free_picture_tables(dst);\
  642. return AVERROR(ENOMEM);\
  643. }\
  644. }\
  645. } while (0)
  646. UPDATE_TABLE(mb_var_buf);
  647. UPDATE_TABLE(mc_mb_var_buf);
  648. UPDATE_TABLE(mb_mean_buf);
  649. UPDATE_TABLE(mbskip_table_buf);
  650. UPDATE_TABLE(qscale_table_buf);
  651. UPDATE_TABLE(mb_type_buf);
  652. for (i = 0; i < 2; i++) {
  653. UPDATE_TABLE(motion_val_buf[i]);
  654. UPDATE_TABLE(ref_index_buf[i]);
  655. }
  656. dst->mb_var = src->mb_var;
  657. dst->mc_mb_var = src->mc_mb_var;
  658. dst->mb_mean = src->mb_mean;
  659. dst->mbskip_table = src->mbskip_table;
  660. dst->qscale_table = src->qscale_table;
  661. dst->mb_type = src->mb_type;
  662. for (i = 0; i < 2; i++) {
  663. dst->motion_val[i] = src->motion_val[i];
  664. dst->ref_index[i] = src->ref_index[i];
  665. }
  666. dst->alloc_mb_width = src->alloc_mb_width;
  667. dst->alloc_mb_height = src->alloc_mb_height;
  668. return 0;
  669. }
  670. int ff_mpeg_ref_picture(AVCodecContext *avctx, Picture *dst, Picture *src)
  671. {
  672. int ret;
  673. av_assert0(!dst->f->buf[0]);
  674. av_assert0(src->f->buf[0]);
  675. src->tf.f = src->f;
  676. dst->tf.f = dst->f;
  677. ret = ff_thread_ref_frame(&dst->tf, &src->tf);
  678. if (ret < 0)
  679. goto fail;
  680. ret = update_picture_tables(dst, src);
  681. if (ret < 0)
  682. goto fail;
  683. if (src->hwaccel_picture_private) {
  684. dst->hwaccel_priv_buf = av_buffer_ref(src->hwaccel_priv_buf);
  685. if (!dst->hwaccel_priv_buf)
  686. goto fail;
  687. dst->hwaccel_picture_private = dst->hwaccel_priv_buf->data;
  688. }
  689. dst->field_picture = src->field_picture;
  690. dst->mb_var_sum = src->mb_var_sum;
  691. dst->mc_mb_var_sum = src->mc_mb_var_sum;
  692. dst->b_frame_score = src->b_frame_score;
  693. dst->needs_realloc = src->needs_realloc;
  694. dst->reference = src->reference;
  695. dst->shared = src->shared;
  696. return 0;
  697. fail:
  698. ff_mpeg_unref_picture(avctx, dst);
  699. return ret;
  700. }
  701. static int init_duplicate_context(MpegEncContext *s)
  702. {
  703. int y_size = s->b8_stride * (2 * s->mb_height + 1);
  704. int c_size = s->mb_stride * (s->mb_height + 1);
  705. int yc_size = y_size + 2 * c_size;
  706. int i;
  707. if (s->mb_height & 1)
  708. yc_size += 2*s->b8_stride + 2*s->mb_stride;
  709. s->edge_emu_buffer =
  710. s->me.scratchpad =
  711. s->me.temp =
  712. s->rd_scratchpad =
  713. s->b_scratchpad =
  714. s->obmc_scratchpad = NULL;
  715. if (s->encoding) {
  716. FF_ALLOCZ_OR_GOTO(s->avctx, s->me.map,
  717. ME_MAP_SIZE * sizeof(uint32_t), fail)
  718. FF_ALLOCZ_OR_GOTO(s->avctx, s->me.score_map,
  719. ME_MAP_SIZE * sizeof(uint32_t), fail)
  720. if (s->avctx->noise_reduction) {
  721. FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_error_sum,
  722. 2 * 64 * sizeof(int), fail)
  723. }
  724. }
  725. FF_ALLOCZ_OR_GOTO(s->avctx, s->blocks, 64 * 12 * 2 * sizeof(int16_t), fail)
  726. s->block = s->blocks[0];
  727. for (i = 0; i < 12; i++) {
  728. s->pblocks[i] = &s->block[i];
  729. }
  730. if (s->avctx->codec_tag == AV_RL32("VCR2")) {
  731. // exchange uv
  732. FFSWAP(void *, s->pblocks[4], s->pblocks[5]);
  733. }
  734. if (s->out_format == FMT_H263) {
  735. /* ac values */
  736. FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_val_base,
  737. yc_size * sizeof(int16_t) * 16, fail);
  738. s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
  739. s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
  740. s->ac_val[2] = s->ac_val[1] + c_size;
  741. }
  742. return 0;
  743. fail:
  744. return -1; // free() through ff_mpv_common_end()
  745. }
  746. static void free_duplicate_context(MpegEncContext *s)
  747. {
  748. if (!s)
  749. return;
  750. av_freep(&s->edge_emu_buffer);
  751. av_freep(&s->me.scratchpad);
  752. s->me.temp =
  753. s->rd_scratchpad =
  754. s->b_scratchpad =
  755. s->obmc_scratchpad = NULL;
  756. av_freep(&s->dct_error_sum);
  757. av_freep(&s->me.map);
  758. av_freep(&s->me.score_map);
  759. av_freep(&s->blocks);
  760. av_freep(&s->ac_val_base);
  761. s->block = NULL;
  762. }
  763. static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src)
  764. {
  765. #define COPY(a) bak->a = src->a
  766. COPY(edge_emu_buffer);
  767. COPY(me.scratchpad);
  768. COPY(me.temp);
  769. COPY(rd_scratchpad);
  770. COPY(b_scratchpad);
  771. COPY(obmc_scratchpad);
  772. COPY(me.map);
  773. COPY(me.score_map);
  774. COPY(blocks);
  775. COPY(block);
  776. COPY(start_mb_y);
  777. COPY(end_mb_y);
  778. COPY(me.map_generation);
  779. COPY(pb);
  780. COPY(dct_error_sum);
  781. COPY(dct_count[0]);
  782. COPY(dct_count[1]);
  783. COPY(ac_val_base);
  784. COPY(ac_val[0]);
  785. COPY(ac_val[1]);
  786. COPY(ac_val[2]);
  787. #undef COPY
  788. }
  789. int ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src)
  790. {
  791. MpegEncContext bak;
  792. int i, ret;
  793. // FIXME copy only needed parts
  794. // START_TIMER
  795. backup_duplicate_context(&bak, dst);
  796. memcpy(dst, src, sizeof(MpegEncContext));
  797. backup_duplicate_context(dst, &bak);
  798. for (i = 0; i < 12; i++) {
  799. dst->pblocks[i] = &dst->block[i];
  800. }
  801. if (dst->avctx->codec_tag == AV_RL32("VCR2")) {
  802. // exchange uv
  803. FFSWAP(void *, dst->pblocks[4], dst->pblocks[5]);
  804. }
  805. if (!dst->edge_emu_buffer &&
  806. (ret = frame_size_alloc(dst, dst->linesize)) < 0) {
  807. av_log(dst->avctx, AV_LOG_ERROR, "failed to allocate context "
  808. "scratch buffers.\n");
  809. return ret;
  810. }
  811. // STOP_TIMER("update_duplicate_context")
  812. // about 10k cycles / 0.01 sec for 1000frames on 1ghz with 2 threads
  813. return 0;
  814. }
  815. int ff_mpeg_update_thread_context(AVCodecContext *dst,
  816. const AVCodecContext *src)
  817. {
  818. int i, ret;
  819. MpegEncContext *s = dst->priv_data, *s1 = src->priv_data;
  820. if (dst == src)
  821. return 0;
  822. av_assert0(s != s1);
  823. // FIXME can parameters change on I-frames?
  824. // in that case dst may need a reinit
  825. if (!s->context_initialized) {
  826. int err;
  827. memcpy(s, s1, sizeof(MpegEncContext));
  828. s->avctx = dst;
  829. s->bitstream_buffer = NULL;
  830. s->bitstream_buffer_size = s->allocated_bitstream_buffer_size = 0;
  831. if (s1->context_initialized){
  832. // s->picture_range_start += MAX_PICTURE_COUNT;
  833. // s->picture_range_end += MAX_PICTURE_COUNT;
  834. ff_mpv_idct_init(s);
  835. if((err = ff_mpv_common_init(s)) < 0){
  836. memset(s, 0, sizeof(MpegEncContext));
  837. s->avctx = dst;
  838. return err;
  839. }
  840. }
  841. }
  842. if (s->height != s1->height || s->width != s1->width || s->context_reinit) {
  843. s->context_reinit = 0;
  844. s->height = s1->height;
  845. s->width = s1->width;
  846. if ((ret = ff_mpv_common_frame_size_change(s)) < 0)
  847. return ret;
  848. }
  849. s->avctx->coded_height = s1->avctx->coded_height;
  850. s->avctx->coded_width = s1->avctx->coded_width;
  851. s->avctx->width = s1->avctx->width;
  852. s->avctx->height = s1->avctx->height;
  853. s->coded_picture_number = s1->coded_picture_number;
  854. s->picture_number = s1->picture_number;
  855. av_assert0(!s->picture || s->picture != s1->picture);
  856. if(s->picture)
  857. for (i = 0; i < MAX_PICTURE_COUNT; i++) {
  858. ff_mpeg_unref_picture(s->avctx, &s->picture[i]);
  859. if (s1->picture[i].f->buf[0] &&
  860. (ret = ff_mpeg_ref_picture(s->avctx, &s->picture[i], &s1->picture[i])) < 0)
  861. return ret;
  862. }
  863. #define UPDATE_PICTURE(pic)\
  864. do {\
  865. ff_mpeg_unref_picture(s->avctx, &s->pic);\
  866. if (s1->pic.f && s1->pic.f->buf[0])\
  867. ret = ff_mpeg_ref_picture(s->avctx, &s->pic, &s1->pic);\
  868. else\
  869. ret = update_picture_tables(&s->pic, &s1->pic);\
  870. if (ret < 0)\
  871. return ret;\
  872. } while (0)
  873. UPDATE_PICTURE(current_picture);
  874. UPDATE_PICTURE(last_picture);
  875. UPDATE_PICTURE(next_picture);
  876. #define REBASE_PICTURE(pic, new_ctx, old_ctx) \
  877. ((pic && pic >= old_ctx->picture && \
  878. pic < old_ctx->picture + MAX_PICTURE_COUNT) ? \
  879. &new_ctx->picture[pic - old_ctx->picture] : NULL)
  880. s->last_picture_ptr = REBASE_PICTURE(s1->last_picture_ptr, s, s1);
  881. s->current_picture_ptr = REBASE_PICTURE(s1->current_picture_ptr, s, s1);
  882. s->next_picture_ptr = REBASE_PICTURE(s1->next_picture_ptr, s, s1);
  883. // Error/bug resilience
  884. s->next_p_frame_damaged = s1->next_p_frame_damaged;
  885. s->workaround_bugs = s1->workaround_bugs;
  886. s->padding_bug_score = s1->padding_bug_score;
  887. // MPEG4 timing info
  888. memcpy(&s->last_time_base, &s1->last_time_base,
  889. (char *) &s1->pb_field_time + sizeof(s1->pb_field_time) -
  890. (char *) &s1->last_time_base);
  891. // B-frame info
  892. s->max_b_frames = s1->max_b_frames;
  893. s->low_delay = s1->low_delay;
  894. s->droppable = s1->droppable;
  895. // DivX handling (doesn't work)
  896. s->divx_packed = s1->divx_packed;
  897. if (s1->bitstream_buffer) {
  898. if (s1->bitstream_buffer_size +
  899. FF_INPUT_BUFFER_PADDING_SIZE > s->allocated_bitstream_buffer_size) {
  900. av_fast_malloc(&s->bitstream_buffer,
  901. &s->allocated_bitstream_buffer_size,
  902. s1->allocated_bitstream_buffer_size);
  903. if (!s->bitstream_buffer) {
  904. s->bitstream_buffer_size = 0;
  905. return AVERROR(ENOMEM);
  906. }
  907. }
  908. s->bitstream_buffer_size = s1->bitstream_buffer_size;
  909. memcpy(s->bitstream_buffer, s1->bitstream_buffer,
  910. s1->bitstream_buffer_size);
  911. memset(s->bitstream_buffer + s->bitstream_buffer_size, 0,
  912. FF_INPUT_BUFFER_PADDING_SIZE);
  913. }
  914. // linesize dependend scratch buffer allocation
  915. if (!s->edge_emu_buffer)
  916. if (s1->linesize) {
  917. if (frame_size_alloc(s, s1->linesize) < 0) {
  918. av_log(s->avctx, AV_LOG_ERROR, "Failed to allocate context "
  919. "scratch buffers.\n");
  920. return AVERROR(ENOMEM);
  921. }
  922. } else {
  923. av_log(s->avctx, AV_LOG_ERROR, "Context scratch buffers could not "
  924. "be allocated due to unknown size.\n");
  925. }
  926. // MPEG2/interlacing info
  927. memcpy(&s->progressive_sequence, &s1->progressive_sequence,
  928. (char *) &s1->rtp_mode - (char *) &s1->progressive_sequence);
  929. if (!s1->first_field) {
  930. s->last_pict_type = s1->pict_type;
  931. if (s1->current_picture_ptr)
  932. s->last_lambda_for[s1->pict_type] = s1->current_picture_ptr->f->quality;
  933. }
  934. return 0;
  935. }
  936. /**
  937. * Set the given MpegEncContext to common defaults
  938. * (same for encoding and decoding).
  939. * The changed fields will not depend upon the
  940. * prior state of the MpegEncContext.
  941. */
  942. void ff_mpv_common_defaults(MpegEncContext *s)
  943. {
  944. s->y_dc_scale_table =
  945. s->c_dc_scale_table = ff_mpeg1_dc_scale_table;
  946. s->chroma_qscale_table = ff_default_chroma_qscale_table;
  947. s->progressive_frame = 1;
  948. s->progressive_sequence = 1;
  949. s->picture_structure = PICT_FRAME;
  950. s->coded_picture_number = 0;
  951. s->picture_number = 0;
  952. s->f_code = 1;
  953. s->b_code = 1;
  954. s->slice_context_count = 1;
  955. }
  956. /**
  957. * Set the given MpegEncContext to defaults for decoding.
  958. * the changed fields will not depend upon
  959. * the prior state of the MpegEncContext.
  960. */
  961. void ff_mpv_decode_defaults(MpegEncContext *s)
  962. {
  963. ff_mpv_common_defaults(s);
  964. }
  965. void ff_mpv_decode_init(MpegEncContext *s, AVCodecContext *avctx)
  966. {
  967. s->avctx = avctx;
  968. s->width = avctx->coded_width;
  969. s->height = avctx->coded_height;
  970. s->codec_id = avctx->codec->id;
  971. s->workaround_bugs = avctx->workaround_bugs;
  972. /* convert fourcc to upper case */
  973. s->codec_tag = avpriv_toupper4(avctx->codec_tag);
  974. }
  975. static int init_er(MpegEncContext *s)
  976. {
  977. ERContext *er = &s->er;
  978. int mb_array_size = s->mb_height * s->mb_stride;
  979. int i;
  980. er->avctx = s->avctx;
  981. er->mb_index2xy = s->mb_index2xy;
  982. er->mb_num = s->mb_num;
  983. er->mb_width = s->mb_width;
  984. er->mb_height = s->mb_height;
  985. er->mb_stride = s->mb_stride;
  986. er->b8_stride = s->b8_stride;
  987. er->er_temp_buffer = av_malloc(s->mb_height * s->mb_stride);
  988. er->error_status_table = av_mallocz(mb_array_size);
  989. if (!er->er_temp_buffer || !er->error_status_table)
  990. goto fail;
  991. er->mbskip_table = s->mbskip_table;
  992. er->mbintra_table = s->mbintra_table;
  993. for (i = 0; i < FF_ARRAY_ELEMS(s->dc_val); i++)
  994. er->dc_val[i] = s->dc_val[i];
  995. er->decode_mb = mpeg_er_decode_mb;
  996. er->opaque = s;
  997. return 0;
  998. fail:
  999. av_freep(&er->er_temp_buffer);
  1000. av_freep(&er->error_status_table);
  1001. return AVERROR(ENOMEM);
  1002. }
  1003. /**
  1004. * Initialize and allocates MpegEncContext fields dependent on the resolution.
  1005. */
  1006. static int init_context_frame(MpegEncContext *s)
  1007. {
  1008. int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y;
  1009. s->mb_width = (s->width + 15) / 16;
  1010. s->mb_stride = s->mb_width + 1;
  1011. s->b8_stride = s->mb_width * 2 + 1;
  1012. mb_array_size = s->mb_height * s->mb_stride;
  1013. mv_table_size = (s->mb_height + 2) * s->mb_stride + 1;
  1014. /* set default edge pos, will be overridden
  1015. * in decode_header if needed */
  1016. s->h_edge_pos = s->mb_width * 16;
  1017. s->v_edge_pos = s->mb_height * 16;
  1018. s->mb_num = s->mb_width * s->mb_height;
  1019. s->block_wrap[0] =
  1020. s->block_wrap[1] =
  1021. s->block_wrap[2] =
  1022. s->block_wrap[3] = s->b8_stride;
  1023. s->block_wrap[4] =
  1024. s->block_wrap[5] = s->mb_stride;
  1025. y_size = s->b8_stride * (2 * s->mb_height + 1);
  1026. c_size = s->mb_stride * (s->mb_height + 1);
  1027. yc_size = y_size + 2 * c_size;
  1028. if (s->mb_height & 1)
  1029. yc_size += 2*s->b8_stride + 2*s->mb_stride;
  1030. FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_index2xy, (s->mb_num + 1) * sizeof(int), fail); // error ressilience code looks cleaner with this
  1031. for (y = 0; y < s->mb_height; y++)
  1032. for (x = 0; x < s->mb_width; x++)
  1033. s->mb_index2xy[x + y * s->mb_width] = x + y * s->mb_stride;
  1034. s->mb_index2xy[s->mb_height * s->mb_width] = (s->mb_height - 1) * s->mb_stride + s->mb_width; // FIXME really needed?
  1035. if (s->encoding) {
  1036. /* Allocate MV tables */
  1037. FF_ALLOCZ_OR_GOTO(s->avctx, s->p_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
  1038. FF_ALLOCZ_OR_GOTO(s->avctx, s->b_forw_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
  1039. FF_ALLOCZ_OR_GOTO(s->avctx, s->b_back_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
  1040. FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_forw_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
  1041. FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_back_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
  1042. FF_ALLOCZ_OR_GOTO(s->avctx, s->b_direct_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
  1043. s->p_mv_table = s->p_mv_table_base + s->mb_stride + 1;
  1044. s->b_forw_mv_table = s->b_forw_mv_table_base + s->mb_stride + 1;
  1045. s->b_back_mv_table = s->b_back_mv_table_base + s->mb_stride + 1;
  1046. s->b_bidir_forw_mv_table = s->b_bidir_forw_mv_table_base + s->mb_stride + 1;
  1047. s->b_bidir_back_mv_table = s->b_bidir_back_mv_table_base + s->mb_stride + 1;
  1048. s->b_direct_mv_table = s->b_direct_mv_table_base + s->mb_stride + 1;
  1049. /* Allocate MB type table */
  1050. FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_type, mb_array_size * sizeof(uint16_t), fail) // needed for encoding
  1051. FF_ALLOCZ_OR_GOTO(s->avctx, s->lambda_table, mb_array_size * sizeof(int), fail)
  1052. FF_ALLOC_OR_GOTO(s->avctx, s->cplx_tab,
  1053. mb_array_size * sizeof(float), fail);
  1054. FF_ALLOC_OR_GOTO(s->avctx, s->bits_tab,
  1055. mb_array_size * sizeof(float), fail);
  1056. }
  1057. if (s->codec_id == AV_CODEC_ID_MPEG4 ||
  1058. (s->avctx->flags & CODEC_FLAG_INTERLACED_ME)) {
  1059. /* interlaced direct mode decoding tables */
  1060. for (i = 0; i < 2; i++) {
  1061. int j, k;
  1062. for (j = 0; j < 2; j++) {
  1063. for (k = 0; k < 2; k++) {
  1064. FF_ALLOCZ_OR_GOTO(s->avctx,
  1065. s->b_field_mv_table_base[i][j][k],
  1066. mv_table_size * 2 * sizeof(int16_t),
  1067. fail);
  1068. s->b_field_mv_table[i][j][k] = s->b_field_mv_table_base[i][j][k] +
  1069. s->mb_stride + 1;
  1070. }
  1071. FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_select_table [i][j], mb_array_size * 2 * sizeof(uint8_t), fail)
  1072. FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_mv_table_base[i][j], mv_table_size * 2 * sizeof(int16_t), fail)
  1073. s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j] + s->mb_stride + 1;
  1074. }
  1075. FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_select_table[i], mb_array_size * 2 * sizeof(uint8_t), fail)
  1076. }
  1077. }
  1078. if (s->out_format == FMT_H263) {
  1079. /* cbp values */
  1080. FF_ALLOCZ_OR_GOTO(s->avctx, s->coded_block_base, y_size + (s->mb_height&1)*2*s->b8_stride, fail);
  1081. s->coded_block = s->coded_block_base + s->b8_stride + 1;
  1082. /* cbp, ac_pred, pred_dir */
  1083. FF_ALLOCZ_OR_GOTO(s->avctx, s->cbp_table , mb_array_size * sizeof(uint8_t), fail);
  1084. FF_ALLOCZ_OR_GOTO(s->avctx, s->pred_dir_table, mb_array_size * sizeof(uint8_t), fail);
  1085. }
  1086. if (s->h263_pred || s->h263_plus || !s->encoding) {
  1087. /* dc values */
  1088. // MN: we need these for error resilience of intra-frames
  1089. FF_ALLOCZ_OR_GOTO(s->avctx, s->dc_val_base, yc_size * sizeof(int16_t), fail);
  1090. s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
  1091. s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
  1092. s->dc_val[2] = s->dc_val[1] + c_size;
  1093. for (i = 0; i < yc_size; i++)
  1094. s->dc_val_base[i] = 1024;
  1095. }
  1096. /* which mb is a intra block */
  1097. FF_ALLOCZ_OR_GOTO(s->avctx, s->mbintra_table, mb_array_size, fail);
  1098. memset(s->mbintra_table, 1, mb_array_size);
  1099. /* init macroblock skip table */
  1100. FF_ALLOCZ_OR_GOTO(s->avctx, s->mbskip_table, mb_array_size + 2, fail);
  1101. // Note the + 1 is for a quicker mpeg4 slice_end detection
  1102. return init_er(s);
  1103. fail:
  1104. return AVERROR(ENOMEM);
  1105. }
  1106. /**
  1107. * init common structure for both encoder and decoder.
  1108. * this assumes that some variables like width/height are already set
  1109. */
  1110. av_cold int ff_mpv_common_init(MpegEncContext *s)
  1111. {
  1112. int i;
  1113. int nb_slices = (HAVE_THREADS &&
  1114. s->avctx->active_thread_type & FF_THREAD_SLICE) ?
  1115. s->avctx->thread_count : 1;
  1116. if (s->encoding && s->avctx->slices)
  1117. nb_slices = s->avctx->slices;
  1118. if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
  1119. s->mb_height = (s->height + 31) / 32 * 2;
  1120. else
  1121. s->mb_height = (s->height + 15) / 16;
  1122. if (s->avctx->pix_fmt == AV_PIX_FMT_NONE) {
  1123. av_log(s->avctx, AV_LOG_ERROR,
  1124. "decoding to AV_PIX_FMT_NONE is not supported.\n");
  1125. return -1;
  1126. }
  1127. if (nb_slices > MAX_THREADS || (nb_slices > s->mb_height && s->mb_height)) {
  1128. int max_slices;
  1129. if (s->mb_height)
  1130. max_slices = FFMIN(MAX_THREADS, s->mb_height);
  1131. else
  1132. max_slices = MAX_THREADS;
  1133. av_log(s->avctx, AV_LOG_WARNING, "too many threads/slices (%d),"
  1134. " reducing to %d\n", nb_slices, max_slices);
  1135. nb_slices = max_slices;
  1136. }
  1137. if ((s->width || s->height) &&
  1138. av_image_check_size(s->width, s->height, 0, s->avctx))
  1139. return -1;
  1140. dct_init(s);
  1141. /* set chroma shifts */
  1142. avcodec_get_chroma_sub_sample(s->avctx->pix_fmt,
  1143. &s->chroma_x_shift,
  1144. &s->chroma_y_shift);
  1145. FF_ALLOCZ_OR_GOTO(s->avctx, s->picture,
  1146. MAX_PICTURE_COUNT * sizeof(Picture), fail);
  1147. for (i = 0; i < MAX_PICTURE_COUNT; i++) {
  1148. s->picture[i].f = av_frame_alloc();
  1149. if (!s->picture[i].f)
  1150. goto fail;
  1151. }
  1152. memset(&s->next_picture, 0, sizeof(s->next_picture));
  1153. memset(&s->last_picture, 0, sizeof(s->last_picture));
  1154. memset(&s->current_picture, 0, sizeof(s->current_picture));
  1155. memset(&s->new_picture, 0, sizeof(s->new_picture));
  1156. s->next_picture.f = av_frame_alloc();
  1157. if (!s->next_picture.f)
  1158. goto fail;
  1159. s->last_picture.f = av_frame_alloc();
  1160. if (!s->last_picture.f)
  1161. goto fail;
  1162. s->current_picture.f = av_frame_alloc();
  1163. if (!s->current_picture.f)
  1164. goto fail;
  1165. s->new_picture.f = av_frame_alloc();
  1166. if (!s->new_picture.f)
  1167. goto fail;
  1168. if (init_context_frame(s))
  1169. goto fail;
  1170. s->parse_context.state = -1;
  1171. s->context_initialized = 1;
  1172. s->thread_context[0] = s;
  1173. // if (s->width && s->height) {
  1174. if (nb_slices > 1) {
  1175. for (i = 1; i < nb_slices; i++) {
  1176. s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
  1177. memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
  1178. }
  1179. for (i = 0; i < nb_slices; i++) {
  1180. if (init_duplicate_context(s->thread_context[i]) < 0)
  1181. goto fail;
  1182. s->thread_context[i]->start_mb_y =
  1183. (s->mb_height * (i) + nb_slices / 2) / nb_slices;
  1184. s->thread_context[i]->end_mb_y =
  1185. (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
  1186. }
  1187. } else {
  1188. if (init_duplicate_context(s) < 0)
  1189. goto fail;
  1190. s->start_mb_y = 0;
  1191. s->end_mb_y = s->mb_height;
  1192. }
  1193. s->slice_context_count = nb_slices;
  1194. // }
  1195. return 0;
  1196. fail:
  1197. ff_mpv_common_end(s);
  1198. return -1;
  1199. }
  1200. /**
  1201. * Frees and resets MpegEncContext fields depending on the resolution.
  1202. * Is used during resolution changes to avoid a full reinitialization of the
  1203. * codec.
  1204. */
  1205. static void free_context_frame(MpegEncContext *s)
  1206. {
  1207. int i, j, k;
  1208. av_freep(&s->mb_type);
  1209. av_freep(&s->p_mv_table_base);
  1210. av_freep(&s->b_forw_mv_table_base);
  1211. av_freep(&s->b_back_mv_table_base);
  1212. av_freep(&s->b_bidir_forw_mv_table_base);
  1213. av_freep(&s->b_bidir_back_mv_table_base);
  1214. av_freep(&s->b_direct_mv_table_base);
  1215. s->p_mv_table = NULL;
  1216. s->b_forw_mv_table = NULL;
  1217. s->b_back_mv_table = NULL;
  1218. s->b_bidir_forw_mv_table = NULL;
  1219. s->b_bidir_back_mv_table = NULL;
  1220. s->b_direct_mv_table = NULL;
  1221. for (i = 0; i < 2; i++) {
  1222. for (j = 0; j < 2; j++) {
  1223. for (k = 0; k < 2; k++) {
  1224. av_freep(&s->b_field_mv_table_base[i][j][k]);
  1225. s->b_field_mv_table[i][j][k] = NULL;
  1226. }
  1227. av_freep(&s->b_field_select_table[i][j]);
  1228. av_freep(&s->p_field_mv_table_base[i][j]);
  1229. s->p_field_mv_table[i][j] = NULL;
  1230. }
  1231. av_freep(&s->p_field_select_table[i]);
  1232. }
  1233. av_freep(&s->dc_val_base);
  1234. av_freep(&s->coded_block_base);
  1235. av_freep(&s->mbintra_table);
  1236. av_freep(&s->cbp_table);
  1237. av_freep(&s->pred_dir_table);
  1238. av_freep(&s->mbskip_table);
  1239. av_freep(&s->er.error_status_table);
  1240. av_freep(&s->er.er_temp_buffer);
  1241. av_freep(&s->mb_index2xy);
  1242. av_freep(&s->lambda_table);
  1243. av_freep(&s->cplx_tab);
  1244. av_freep(&s->bits_tab);
  1245. s->linesize = s->uvlinesize = 0;
  1246. }
  1247. int ff_mpv_common_frame_size_change(MpegEncContext *s)
  1248. {
  1249. int i, err = 0;
  1250. if (!s->context_initialized)
  1251. return AVERROR(EINVAL);
  1252. if (s->slice_context_count > 1) {
  1253. for (i = 0; i < s->slice_context_count; i++) {
  1254. free_duplicate_context(s->thread_context[i]);
  1255. }
  1256. for (i = 1; i < s->slice_context_count; i++) {
  1257. av_freep(&s->thread_context[i]);
  1258. }
  1259. } else
  1260. free_duplicate_context(s);
  1261. free_context_frame(s);
  1262. if (s->picture)
  1263. for (i = 0; i < MAX_PICTURE_COUNT; i++) {
  1264. s->picture[i].needs_realloc = 1;
  1265. }
  1266. s->last_picture_ptr =
  1267. s->next_picture_ptr =
  1268. s->current_picture_ptr = NULL;
  1269. // init
  1270. if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
  1271. s->mb_height = (s->height + 31) / 32 * 2;
  1272. else
  1273. s->mb_height = (s->height + 15) / 16;
  1274. if ((s->width || s->height) &&
  1275. (err = av_image_check_size(s->width, s->height, 0, s->avctx)) < 0)
  1276. goto fail;
  1277. if ((err = init_context_frame(s)))
  1278. goto fail;
  1279. s->thread_context[0] = s;
  1280. if (s->width && s->height) {
  1281. int nb_slices = s->slice_context_count;
  1282. if (nb_slices > 1) {
  1283. for (i = 1; i < nb_slices; i++) {
  1284. s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
  1285. memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
  1286. }
  1287. for (i = 0; i < nb_slices; i++) {
  1288. if ((err = init_duplicate_context(s->thread_context[i])) < 0)
  1289. goto fail;
  1290. s->thread_context[i]->start_mb_y =
  1291. (s->mb_height * (i) + nb_slices / 2) / nb_slices;
  1292. s->thread_context[i]->end_mb_y =
  1293. (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
  1294. }
  1295. } else {
  1296. err = init_duplicate_context(s);
  1297. if (err < 0)
  1298. goto fail;
  1299. s->start_mb_y = 0;
  1300. s->end_mb_y = s->mb_height;
  1301. }
  1302. s->slice_context_count = nb_slices;
  1303. }
  1304. return 0;
  1305. fail:
  1306. ff_mpv_common_end(s);
  1307. return err;
  1308. }
  1309. /* init common structure for both encoder and decoder */
  1310. void ff_mpv_common_end(MpegEncContext *s)
  1311. {
  1312. int i;
  1313. if (s->slice_context_count > 1) {
  1314. for (i = 0; i < s->slice_context_count; i++) {
  1315. free_duplicate_context(s->thread_context[i]);
  1316. }
  1317. for (i = 1; i < s->slice_context_count; i++) {
  1318. av_freep(&s->thread_context[i]);
  1319. }
  1320. s->slice_context_count = 1;
  1321. } else free_duplicate_context(s);
  1322. av_freep(&s->parse_context.buffer);
  1323. s->parse_context.buffer_size = 0;
  1324. av_freep(&s->bitstream_buffer);
  1325. s->allocated_bitstream_buffer_size = 0;
  1326. if (s->picture) {
  1327. for (i = 0; i < MAX_PICTURE_COUNT; i++) {
  1328. ff_free_picture_tables(&s->picture[i]);
  1329. ff_mpeg_unref_picture(s->avctx, &s->picture[i]);
  1330. av_frame_free(&s->picture[i].f);
  1331. }
  1332. }
  1333. av_freep(&s->picture);
  1334. ff_free_picture_tables(&s->last_picture);
  1335. ff_mpeg_unref_picture(s->avctx, &s->last_picture);
  1336. av_frame_free(&s->last_picture.f);
  1337. ff_free_picture_tables(&s->current_picture);
  1338. ff_mpeg_unref_picture(s->avctx, &s->current_picture);
  1339. av_frame_free(&s->current_picture.f);
  1340. ff_free_picture_tables(&s->next_picture);
  1341. ff_mpeg_unref_picture(s->avctx, &s->next_picture);
  1342. av_frame_free(&s->next_picture.f);
  1343. ff_free_picture_tables(&s->new_picture);
  1344. ff_mpeg_unref_picture(s->avctx, &s->new_picture);
  1345. av_frame_free(&s->new_picture.f);
  1346. free_context_frame(s);
  1347. s->context_initialized = 0;
  1348. s->last_picture_ptr =
  1349. s->next_picture_ptr =
  1350. s->current_picture_ptr = NULL;
  1351. s->linesize = s->uvlinesize = 0;
  1352. }
  1353. static void release_unused_pictures(AVCodecContext *avctx, Picture *picture)
  1354. {
  1355. int i;
  1356. /* release non reference frames */
  1357. for (i = 0; i < MAX_PICTURE_COUNT; i++) {
  1358. if (!picture[i].reference)
  1359. ff_mpeg_unref_picture(avctx, &picture[i]);
  1360. }
  1361. }
  1362. static inline int pic_is_unused(Picture *pic)
  1363. {
  1364. if (!pic->f->buf[0])
  1365. return 1;
  1366. if (pic->needs_realloc && !(pic->reference & DELAYED_PIC_REF))
  1367. return 1;
  1368. return 0;
  1369. }
  1370. static int find_unused_picture(AVCodecContext *avctx, Picture *picture, int shared)
  1371. {
  1372. int i;
  1373. if (shared) {
  1374. for (i = 0; i < MAX_PICTURE_COUNT; i++) {
  1375. if (!picture[i].f->buf[0])
  1376. return i;
  1377. }
  1378. } else {
  1379. for (i = 0; i < MAX_PICTURE_COUNT; i++) {
  1380. if (pic_is_unused(&picture[i]))
  1381. return i;
  1382. }
  1383. }
  1384. av_log(avctx, AV_LOG_FATAL,
  1385. "Internal error, picture buffer overflow\n");
  1386. /* We could return -1, but the codec would crash trying to draw into a
  1387. * non-existing frame anyway. This is safer than waiting for a random crash.
  1388. * Also the return of this is never useful, an encoder must only allocate
  1389. * as much as allowed in the specification. This has no relationship to how
  1390. * much libavcodec could allocate (and MAX_PICTURE_COUNT is always large
  1391. * enough for such valid streams).
  1392. * Plus, a decoder has to check stream validity and remove frames if too
  1393. * many reference frames are around. Waiting for "OOM" is not correct at
  1394. * all. Similarly, missing reference frames have to be replaced by
  1395. * interpolated/MC frames, anything else is a bug in the codec ...
  1396. */
  1397. abort();
  1398. return -1;
  1399. }
  1400. int ff_find_unused_picture(AVCodecContext *avctx, Picture *picture, int shared)
  1401. {
  1402. int ret = find_unused_picture(avctx, picture, shared);
  1403. if (ret >= 0 && ret < MAX_PICTURE_COUNT) {
  1404. if (picture[ret].needs_realloc) {
  1405. picture[ret].needs_realloc = 0;
  1406. ff_free_picture_tables(&picture[ret]);
  1407. ff_mpeg_unref_picture(avctx, &picture[ret]);
  1408. }
  1409. }
  1410. return ret;
  1411. }
  1412. static void gray_frame(AVFrame *frame)
  1413. {
  1414. int i, h_chroma_shift, v_chroma_shift;
  1415. av_pix_fmt_get_chroma_sub_sample(frame->format, &h_chroma_shift, &v_chroma_shift);
  1416. for(i=0; i<frame->height; i++)
  1417. memset(frame->data[0] + frame->linesize[0]*i, 0x80, frame->width);
  1418. for(i=0; i<FF_CEIL_RSHIFT(frame->height, v_chroma_shift); i++) {
  1419. memset(frame->data[1] + frame->linesize[1]*i,
  1420. 0x80, FF_CEIL_RSHIFT(frame->width, h_chroma_shift));
  1421. memset(frame->data[2] + frame->linesize[2]*i,
  1422. 0x80, FF_CEIL_RSHIFT(frame->width, h_chroma_shift));
  1423. }
  1424. }
  1425. /**
  1426. * generic function called after decoding
  1427. * the header and before a frame is decoded.
  1428. */
  1429. int ff_mpv_frame_start(MpegEncContext *s, AVCodecContext *avctx)
  1430. {
  1431. int i, ret;
  1432. Picture *pic;
  1433. s->mb_skipped = 0;
  1434. if (!ff_thread_can_start_frame(avctx)) {
  1435. av_log(avctx, AV_LOG_ERROR, "Attempt to start a frame outside SETUP state\n");
  1436. return -1;
  1437. }
  1438. /* mark & release old frames */
  1439. if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
  1440. s->last_picture_ptr != s->next_picture_ptr &&
  1441. s->last_picture_ptr->f->buf[0]) {
  1442. ff_mpeg_unref_picture(s->avctx, s->last_picture_ptr);
  1443. }
  1444. /* release forgotten pictures */
  1445. /* if (mpeg124/h263) */
  1446. for (i = 0; i < MAX_PICTURE_COUNT; i++) {
  1447. if (&s->picture[i] != s->last_picture_ptr &&
  1448. &s->picture[i] != s->next_picture_ptr &&
  1449. s->picture[i].reference && !s->picture[i].needs_realloc) {
  1450. if (!(avctx->active_thread_type & FF_THREAD_FRAME))
  1451. av_log(avctx, AV_LOG_ERROR,
  1452. "releasing zombie picture\n");
  1453. ff_mpeg_unref_picture(s->avctx, &s->picture[i]);
  1454. }
  1455. }
  1456. ff_mpeg_unref_picture(s->avctx, &s->current_picture);
  1457. release_unused_pictures(s->avctx, s->picture);
  1458. if (s->current_picture_ptr && !s->current_picture_ptr->f->buf[0]) {
  1459. // we already have a unused image
  1460. // (maybe it was set before reading the header)
  1461. pic = s->current_picture_ptr;
  1462. } else {
  1463. i = ff_find_unused_picture(s->avctx, s->picture, 0);
  1464. if (i < 0) {
  1465. av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
  1466. return i;
  1467. }
  1468. pic = &s->picture[i];
  1469. }
  1470. pic->reference = 0;
  1471. if (!s->droppable) {
  1472. if (s->pict_type != AV_PICTURE_TYPE_B)
  1473. pic->reference = 3;
  1474. }
  1475. pic->f->coded_picture_number = s->coded_picture_number++;
  1476. if (ff_alloc_picture(s, pic, 0) < 0)
  1477. return -1;
  1478. s->current_picture_ptr = pic;
  1479. // FIXME use only the vars from current_pic
  1480. s->current_picture_ptr->f->top_field_first = s->top_field_first;
  1481. if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
  1482. s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
  1483. if (s->picture_structure != PICT_FRAME)
  1484. s->current_picture_ptr->f->top_field_first =
  1485. (s->picture_structure == PICT_TOP_FIELD) == s->first_field;
  1486. }
  1487. s->current_picture_ptr->f->interlaced_frame = !s->progressive_frame &&
  1488. !s->progressive_sequence;
  1489. s->current_picture_ptr->field_picture = s->picture_structure != PICT_FRAME;
  1490. s->current_picture_ptr->f->pict_type = s->pict_type;
  1491. // if (s->avctx->flags && CODEC_FLAG_QSCALE)
  1492. // s->current_picture_ptr->quality = s->new_picture_ptr->quality;
  1493. s->current_picture_ptr->f->key_frame = s->pict_type == AV_PICTURE_TYPE_I;
  1494. if ((ret = ff_mpeg_ref_picture(s->avctx, &s->current_picture,
  1495. s->current_picture_ptr)) < 0)
  1496. return ret;
  1497. if (s->pict_type != AV_PICTURE_TYPE_B) {
  1498. s->last_picture_ptr = s->next_picture_ptr;
  1499. if (!s->droppable)
  1500. s->next_picture_ptr = s->current_picture_ptr;
  1501. }
  1502. ff_dlog(s->avctx, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n",
  1503. s->last_picture_ptr, s->next_picture_ptr,s->current_picture_ptr,
  1504. s->last_picture_ptr ? s->last_picture_ptr->f->data[0] : NULL,
  1505. s->next_picture_ptr ? s->next_picture_ptr->f->data[0] : NULL,
  1506. s->current_picture_ptr ? s->current_picture_ptr->f->data[0] : NULL,
  1507. s->pict_type, s->droppable);
  1508. if ((!s->last_picture_ptr || !s->last_picture_ptr->f->buf[0]) &&
  1509. (s->pict_type != AV_PICTURE_TYPE_I ||
  1510. s->picture_structure != PICT_FRAME)) {
  1511. int h_chroma_shift, v_chroma_shift;
  1512. av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
  1513. &h_chroma_shift, &v_chroma_shift);
  1514. if (s->pict_type == AV_PICTURE_TYPE_B && s->next_picture_ptr && s->next_picture_ptr->f->buf[0])
  1515. av_log(avctx, AV_LOG_DEBUG,
  1516. "allocating dummy last picture for B frame\n");
  1517. else if (s->pict_type != AV_PICTURE_TYPE_I)
  1518. av_log(avctx, AV_LOG_ERROR,
  1519. "warning: first frame is no keyframe\n");
  1520. else if (s->picture_structure != PICT_FRAME)
  1521. av_log(avctx, AV_LOG_DEBUG,
  1522. "allocate dummy last picture for field based first keyframe\n");
  1523. /* Allocate a dummy frame */
  1524. i = ff_find_unused_picture(s->avctx, s->picture, 0);
  1525. if (i < 0) {
  1526. av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
  1527. return i;
  1528. }
  1529. s->last_picture_ptr = &s->picture[i];
  1530. s->last_picture_ptr->reference = 3;
  1531. s->last_picture_ptr->f->key_frame = 0;
  1532. s->last_picture_ptr->f->pict_type = AV_PICTURE_TYPE_P;
  1533. if (ff_alloc_picture(s, s->last_picture_ptr, 0) < 0) {
  1534. s->last_picture_ptr = NULL;
  1535. return -1;
  1536. }
  1537. if (!avctx->hwaccel && !(avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)) {
  1538. for(i=0; i<avctx->height; i++)
  1539. memset(s->last_picture_ptr->f->data[0] + s->last_picture_ptr->f->linesize[0]*i,
  1540. 0x80, avctx->width);
  1541. if (s->last_picture_ptr->f->data[2]) {
  1542. for(i=0; i<FF_CEIL_RSHIFT(avctx->height, v_chroma_shift); i++) {
  1543. memset(s->last_picture_ptr->f->data[1] + s->last_picture_ptr->f->linesize[1]*i,
  1544. 0x80, FF_CEIL_RSHIFT(avctx->width, h_chroma_shift));
  1545. memset(s->last_picture_ptr->f->data[2] + s->last_picture_ptr->f->linesize[2]*i,
  1546. 0x80, FF_CEIL_RSHIFT(avctx->width, h_chroma_shift));
  1547. }
  1548. }
  1549. if(s->codec_id == AV_CODEC_ID_FLV1 || s->codec_id == AV_CODEC_ID_H263){
  1550. for(i=0; i<avctx->height; i++)
  1551. memset(s->last_picture_ptr->f->data[0] + s->last_picture_ptr->f->linesize[0]*i, 16, avctx->width);
  1552. }
  1553. }
  1554. ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 0);
  1555. ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 1);
  1556. }
  1557. if ((!s->next_picture_ptr || !s->next_picture_ptr->f->buf[0]) &&
  1558. s->pict_type == AV_PICTURE_TYPE_B) {
  1559. /* Allocate a dummy frame */
  1560. i = ff_find_unused_picture(s->avctx, s->picture, 0);
  1561. if (i < 0) {
  1562. av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
  1563. return i;
  1564. }
  1565. s->next_picture_ptr = &s->picture[i];
  1566. s->next_picture_ptr->reference = 3;
  1567. s->next_picture_ptr->f->key_frame = 0;
  1568. s->next_picture_ptr->f->pict_type = AV_PICTURE_TYPE_P;
  1569. if (ff_alloc_picture(s, s->next_picture_ptr, 0) < 0) {
  1570. s->next_picture_ptr = NULL;
  1571. return -1;
  1572. }
  1573. ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 0);
  1574. ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 1);
  1575. }
  1576. #if 0 // BUFREF-FIXME
  1577. memset(s->last_picture.f->data, 0, sizeof(s->last_picture.f->data));
  1578. memset(s->next_picture.f->data, 0, sizeof(s->next_picture.f->data));
  1579. #endif
  1580. if (s->last_picture_ptr) {
  1581. ff_mpeg_unref_picture(s->avctx, &s->last_picture);
  1582. if (s->last_picture_ptr->f->buf[0] &&
  1583. (ret = ff_mpeg_ref_picture(s->avctx, &s->last_picture,
  1584. s->last_picture_ptr)) < 0)
  1585. return ret;
  1586. }
  1587. if (s->next_picture_ptr) {
  1588. ff_mpeg_unref_picture(s->avctx, &s->next_picture);
  1589. if (s->next_picture_ptr->f->buf[0] &&
  1590. (ret = ff_mpeg_ref_picture(s->avctx, &s->next_picture,
  1591. s->next_picture_ptr)) < 0)
  1592. return ret;
  1593. }
  1594. av_assert0(s->pict_type == AV_PICTURE_TYPE_I || (s->last_picture_ptr &&
  1595. s->last_picture_ptr->f->buf[0]));
  1596. if (s->picture_structure!= PICT_FRAME) {
  1597. int i;
  1598. for (i = 0; i < 4; i++) {
  1599. if (s->picture_structure == PICT_BOTTOM_FIELD) {
  1600. s->current_picture.f->data[i] +=
  1601. s->current_picture.f->linesize[i];
  1602. }
  1603. s->current_picture.f->linesize[i] *= 2;
  1604. s->last_picture.f->linesize[i] *= 2;
  1605. s->next_picture.f->linesize[i] *= 2;
  1606. }
  1607. }
  1608. /* set dequantizer, we can't do it during init as
  1609. * it might change for mpeg4 and we can't do it in the header
  1610. * decode as init is not called for mpeg4 there yet */
  1611. if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
  1612. s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
  1613. s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
  1614. } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
  1615. s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
  1616. s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
  1617. } else {
  1618. s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
  1619. s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
  1620. }
  1621. if (s->avctx->debug & FF_DEBUG_NOMC) {
  1622. gray_frame(s->current_picture_ptr->f);
  1623. }
  1624. return 0;
  1625. }
  1626. /* called after a frame has been decoded. */
  1627. void ff_mpv_frame_end(MpegEncContext *s)
  1628. {
  1629. emms_c();
  1630. if (s->current_picture.reference)
  1631. ff_thread_report_progress(&s->current_picture_ptr->tf, INT_MAX, 0);
  1632. }
  1633. #if FF_API_VISMV
  1634. static int clip_line(int *sx, int *sy, int *ex, int *ey, int maxx)
  1635. {
  1636. if(*sx > *ex)
  1637. return clip_line(ex, ey, sx, sy, maxx);
  1638. if (*sx < 0) {
  1639. if (*ex < 0)
  1640. return 1;
  1641. *sy = *ey + (*sy - *ey) * (int64_t)*ex / (*ex - *sx);
  1642. *sx = 0;
  1643. }
  1644. if (*ex > maxx) {
  1645. if (*sx > maxx)
  1646. return 1;
  1647. *ey = *sy + (*ey - *sy) * (int64_t)(maxx - *sx) / (*ex - *sx);
  1648. *ex = maxx;
  1649. }
  1650. return 0;
  1651. }
  1652. /**
  1653. * Draw a line from (ex, ey) -> (sx, sy).
  1654. * @param w width of the image
  1655. * @param h height of the image
  1656. * @param stride stride/linesize of the image
  1657. * @param color color of the arrow
  1658. */
  1659. static void draw_line(uint8_t *buf, int sx, int sy, int ex, int ey,
  1660. int w, int h, int stride, int color)
  1661. {
  1662. int x, y, fr, f;
  1663. if (clip_line(&sx, &sy, &ex, &ey, w - 1))
  1664. return;
  1665. if (clip_line(&sy, &sx, &ey, &ex, h - 1))
  1666. return;
  1667. sx = av_clip(sx, 0, w - 1);
  1668. sy = av_clip(sy, 0, h - 1);
  1669. ex = av_clip(ex, 0, w - 1);
  1670. ey = av_clip(ey, 0, h - 1);
  1671. buf[sy * stride + sx] += color;
  1672. if (FFABS(ex - sx) > FFABS(ey - sy)) {
  1673. if (sx > ex) {
  1674. FFSWAP(int, sx, ex);
  1675. FFSWAP(int, sy, ey);
  1676. }
  1677. buf += sx + sy * stride;
  1678. ex -= sx;
  1679. f = ((ey - sy) << 16) / ex;
  1680. for (x = 0; x <= ex; x++) {
  1681. y = (x * f) >> 16;
  1682. fr = (x * f) & 0xFFFF;
  1683. buf[y * stride + x] += (color * (0x10000 - fr)) >> 16;
  1684. if(fr) buf[(y + 1) * stride + x] += (color * fr ) >> 16;
  1685. }
  1686. } else {
  1687. if (sy > ey) {
  1688. FFSWAP(int, sx, ex);
  1689. FFSWAP(int, sy, ey);
  1690. }
  1691. buf += sx + sy * stride;
  1692. ey -= sy;
  1693. if (ey)
  1694. f = ((ex - sx) << 16) / ey;
  1695. else
  1696. f = 0;
  1697. for(y= 0; y <= ey; y++){
  1698. x = (y*f) >> 16;
  1699. fr = (y*f) & 0xFFFF;
  1700. buf[y * stride + x] += (color * (0x10000 - fr)) >> 16;
  1701. if(fr) buf[y * stride + x + 1] += (color * fr ) >> 16;
  1702. }
  1703. }
  1704. }
  1705. /**
  1706. * Draw an arrow from (ex, ey) -> (sx, sy).
  1707. * @param w width of the image
  1708. * @param h height of the image
  1709. * @param stride stride/linesize of the image
  1710. * @param color color of the arrow
  1711. */
  1712. static void draw_arrow(uint8_t *buf, int sx, int sy, int ex,
  1713. int ey, int w, int h, int stride, int color, int tail, int direction)
  1714. {
  1715. int dx,dy;
  1716. if (direction) {
  1717. FFSWAP(int, sx, ex);
  1718. FFSWAP(int, sy, ey);
  1719. }
  1720. sx = av_clip(sx, -100, w + 100);
  1721. sy = av_clip(sy, -100, h + 100);
  1722. ex = av_clip(ex, -100, w + 100);
  1723. ey = av_clip(ey, -100, h + 100);
  1724. dx = ex - sx;
  1725. dy = ey - sy;
  1726. if (dx * dx + dy * dy > 3 * 3) {
  1727. int rx = dx + dy;
  1728. int ry = -dx + dy;
  1729. int length = ff_sqrt((rx * rx + ry * ry) << 8);
  1730. // FIXME subpixel accuracy
  1731. rx = ROUNDED_DIV(rx * 3 << 4, length);
  1732. ry = ROUNDED_DIV(ry * 3 << 4, length);
  1733. if (tail) {
  1734. rx = -rx;
  1735. ry = -ry;
  1736. }
  1737. draw_line(buf, sx, sy, sx + rx, sy + ry, w, h, stride, color);
  1738. draw_line(buf, sx, sy, sx - ry, sy + rx, w, h, stride, color);
  1739. }
  1740. draw_line(buf, sx, sy, ex, ey, w, h, stride, color);
  1741. }
  1742. #endif
  1743. static int add_mb(AVMotionVector *mb, uint32_t mb_type,
  1744. int dst_x, int dst_y,
  1745. int src_x, int src_y,
  1746. int direction)
  1747. {
  1748. mb->w = IS_8X8(mb_type) || IS_8X16(mb_type) ? 8 : 16;
  1749. mb->h = IS_8X8(mb_type) || IS_16X8(mb_type) ? 8 : 16;
  1750. mb->src_x = src_x;
  1751. mb->src_y = src_y;
  1752. mb->dst_x = dst_x;
  1753. mb->dst_y = dst_y;
  1754. mb->source = direction ? 1 : -1;
  1755. mb->flags = 0; // XXX: does mb_type contain extra information that could be exported here?
  1756. return 1;
  1757. }
  1758. /**
  1759. * Print debugging info for the given picture.
  1760. */
  1761. void ff_print_debug_info2(AVCodecContext *avctx, AVFrame *pict, uint8_t *mbskip_table,
  1762. uint32_t *mbtype_table, int8_t *qscale_table, int16_t (*motion_val[2])[2],
  1763. int *low_delay,
  1764. int mb_width, int mb_height, int mb_stride, int quarter_sample)
  1765. {
  1766. if ((avctx->flags2 & CODEC_FLAG2_EXPORT_MVS) && mbtype_table && motion_val[0]) {
  1767. const int shift = 1 + quarter_sample;
  1768. const int mv_sample_log2 = avctx->codec_id == AV_CODEC_ID_H264 || avctx->codec_id == AV_CODEC_ID_SVQ3 ? 2 : 1;
  1769. const int mv_stride = (mb_width << mv_sample_log2) +
  1770. (avctx->codec->id == AV_CODEC_ID_H264 ? 0 : 1);
  1771. int mb_x, mb_y, mbcount = 0;
  1772. /* size is width * height * 2 * 4 where 2 is for directions and 4 is
  1773. * for the maximum number of MB (4 MB in case of IS_8x8) */
  1774. AVMotionVector *mvs = av_malloc_array(mb_width * mb_height, 2 * 4 * sizeof(AVMotionVector));
  1775. if (!mvs)
  1776. return;
  1777. for (mb_y = 0; mb_y < mb_height; mb_y++) {
  1778. for (mb_x = 0; mb_x < mb_width; mb_x++) {
  1779. int i, direction, mb_type = mbtype_table[mb_x + mb_y * mb_stride];
  1780. for (direction = 0; direction < 2; direction++) {
  1781. if (!USES_LIST(mb_type, direction))
  1782. continue;
  1783. if (IS_8X8(mb_type)) {
  1784. for (i = 0; i < 4; i++) {
  1785. int sx = mb_x * 16 + 4 + 8 * (i & 1);
  1786. int sy = mb_y * 16 + 4 + 8 * (i >> 1);
  1787. int xy = (mb_x * 2 + (i & 1) +
  1788. (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
  1789. int mx = (motion_val[direction][xy][0] >> shift) + sx;
  1790. int my = (motion_val[direction][xy][1] >> shift) + sy;
  1791. mbcount += add_mb(mvs + mbcount, mb_type, sx, sy, mx, my, direction);
  1792. }
  1793. } else if (IS_16X8(mb_type)) {
  1794. for (i = 0; i < 2; i++) {
  1795. int sx = mb_x * 16 + 8;
  1796. int sy = mb_y * 16 + 4 + 8 * i;
  1797. int xy = (mb_x * 2 + (mb_y * 2 + i) * mv_stride) << (mv_sample_log2 - 1);
  1798. int mx = (motion_val[direction][xy][0] >> shift);
  1799. int my = (motion_val[direction][xy][1] >> shift);
  1800. if (IS_INTERLACED(mb_type))
  1801. my *= 2;
  1802. mbcount += add_mb(mvs + mbcount, mb_type, sx, sy, mx + sx, my + sy, direction);
  1803. }
  1804. } else if (IS_8X16(mb_type)) {
  1805. for (i = 0; i < 2; i++) {
  1806. int sx = mb_x * 16 + 4 + 8 * i;
  1807. int sy = mb_y * 16 + 8;
  1808. int xy = (mb_x * 2 + i + mb_y * 2 * mv_stride) << (mv_sample_log2 - 1);
  1809. int mx = motion_val[direction][xy][0] >> shift;
  1810. int my = motion_val[direction][xy][1] >> shift;
  1811. if (IS_INTERLACED(mb_type))
  1812. my *= 2;
  1813. mbcount += add_mb(mvs + mbcount, mb_type, sx, sy, mx + sx, my + sy, direction);
  1814. }
  1815. } else {
  1816. int sx = mb_x * 16 + 8;
  1817. int sy = mb_y * 16 + 8;
  1818. int xy = (mb_x + mb_y * mv_stride) << mv_sample_log2;
  1819. int mx = (motion_val[direction][xy][0]>>shift) + sx;
  1820. int my = (motion_val[direction][xy][1]>>shift) + sy;
  1821. mbcount += add_mb(mvs + mbcount, mb_type, sx, sy, mx, my, direction);
  1822. }
  1823. }
  1824. }
  1825. }
  1826. if (mbcount) {
  1827. AVFrameSideData *sd;
  1828. av_log(avctx, AV_LOG_DEBUG, "Adding %d MVs info to frame %d\n", mbcount, avctx->frame_number);
  1829. sd = av_frame_new_side_data(pict, AV_FRAME_DATA_MOTION_VECTORS, mbcount * sizeof(AVMotionVector));
  1830. if (!sd) {
  1831. av_freep(&mvs);
  1832. return;
  1833. }
  1834. memcpy(sd->data, mvs, mbcount * sizeof(AVMotionVector));
  1835. }
  1836. av_freep(&mvs);
  1837. }
  1838. /* TODO: export all the following to make them accessible for users (and filters) */
  1839. if (avctx->hwaccel || !mbtype_table
  1840. || (avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU))
  1841. return;
  1842. if (avctx->debug & (FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)) {
  1843. int x,y;
  1844. av_log(avctx, AV_LOG_DEBUG, "New frame, type: %c\n",
  1845. av_get_picture_type_char(pict->pict_type));
  1846. for (y = 0; y < mb_height; y++) {
  1847. for (x = 0; x < mb_width; x++) {
  1848. if (avctx->debug & FF_DEBUG_SKIP) {
  1849. int count = mbskip_table ? mbskip_table[x + y * mb_stride] : 0;
  1850. if (count > 9)
  1851. count = 9;
  1852. av_log(avctx, AV_LOG_DEBUG, "%1d", count);
  1853. }
  1854. if (avctx->debug & FF_DEBUG_QP) {
  1855. av_log(avctx, AV_LOG_DEBUG, "%2d",
  1856. qscale_table[x + y * mb_stride]);
  1857. }
  1858. if (avctx->debug & FF_DEBUG_MB_TYPE) {
  1859. int mb_type = mbtype_table[x + y * mb_stride];
  1860. // Type & MV direction
  1861. if (IS_PCM(mb_type))
  1862. av_log(avctx, AV_LOG_DEBUG, "P");
  1863. else if (IS_INTRA(mb_type) && IS_ACPRED(mb_type))
  1864. av_log(avctx, AV_LOG_DEBUG, "A");
  1865. else if (IS_INTRA4x4(mb_type))
  1866. av_log(avctx, AV_LOG_DEBUG, "i");
  1867. else if (IS_INTRA16x16(mb_type))
  1868. av_log(avctx, AV_LOG_DEBUG, "I");
  1869. else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type))
  1870. av_log(avctx, AV_LOG_DEBUG, "d");
  1871. else if (IS_DIRECT(mb_type))
  1872. av_log(avctx, AV_LOG_DEBUG, "D");
  1873. else if (IS_GMC(mb_type) && IS_SKIP(mb_type))
  1874. av_log(avctx, AV_LOG_DEBUG, "g");
  1875. else if (IS_GMC(mb_type))
  1876. av_log(avctx, AV_LOG_DEBUG, "G");
  1877. else if (IS_SKIP(mb_type))
  1878. av_log(avctx, AV_LOG_DEBUG, "S");
  1879. else if (!USES_LIST(mb_type, 1))
  1880. av_log(avctx, AV_LOG_DEBUG, ">");
  1881. else if (!USES_LIST(mb_type, 0))
  1882. av_log(avctx, AV_LOG_DEBUG, "<");
  1883. else {
  1884. av_assert2(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
  1885. av_log(avctx, AV_LOG_DEBUG, "X");
  1886. }
  1887. // segmentation
  1888. if (IS_8X8(mb_type))
  1889. av_log(avctx, AV_LOG_DEBUG, "+");
  1890. else if (IS_16X8(mb_type))
  1891. av_log(avctx, AV_LOG_DEBUG, "-");
  1892. else if (IS_8X16(mb_type))
  1893. av_log(avctx, AV_LOG_DEBUG, "|");
  1894. else if (IS_INTRA(mb_type) || IS_16X16(mb_type))
  1895. av_log(avctx, AV_LOG_DEBUG, " ");
  1896. else
  1897. av_log(avctx, AV_LOG_DEBUG, "?");
  1898. if (IS_INTERLACED(mb_type))
  1899. av_log(avctx, AV_LOG_DEBUG, "=");
  1900. else
  1901. av_log(avctx, AV_LOG_DEBUG, " ");
  1902. }
  1903. }
  1904. av_log(avctx, AV_LOG_DEBUG, "\n");
  1905. }
  1906. }
  1907. if ((avctx->debug & (FF_DEBUG_VIS_QP | FF_DEBUG_VIS_MB_TYPE)) ||
  1908. (avctx->debug_mv)) {
  1909. int mb_y;
  1910. int i;
  1911. int h_chroma_shift, v_chroma_shift, block_height;
  1912. #if FF_API_VISMV
  1913. const int shift = 1 + quarter_sample;
  1914. uint8_t *ptr;
  1915. const int width = avctx->width;
  1916. const int height = avctx->height;
  1917. #endif
  1918. const int mv_sample_log2 = avctx->codec_id == AV_CODEC_ID_H264 || avctx->codec_id == AV_CODEC_ID_SVQ3 ? 2 : 1;
  1919. const int mv_stride = (mb_width << mv_sample_log2) +
  1920. (avctx->codec->id == AV_CODEC_ID_H264 ? 0 : 1);
  1921. *low_delay = 0; // needed to see the vectors without trashing the buffers
  1922. avcodec_get_chroma_sub_sample(avctx->pix_fmt, &h_chroma_shift, &v_chroma_shift);
  1923. av_frame_make_writable(pict);
  1924. pict->opaque = NULL;
  1925. #if FF_API_VISMV
  1926. ptr = pict->data[0];
  1927. #endif
  1928. block_height = 16 >> v_chroma_shift;
  1929. for (mb_y = 0; mb_y < mb_height; mb_y++) {
  1930. int mb_x;
  1931. for (mb_x = 0; mb_x < mb_width; mb_x++) {
  1932. const int mb_index = mb_x + mb_y * mb_stride;
  1933. #if FF_API_VISMV
  1934. if ((avctx->debug_mv) && motion_val[0]) {
  1935. int type;
  1936. for (type = 0; type < 3; type++) {
  1937. int direction = 0;
  1938. switch (type) {
  1939. case 0:
  1940. if ((!(avctx->debug_mv & FF_DEBUG_VIS_MV_P_FOR)) ||
  1941. (pict->pict_type!= AV_PICTURE_TYPE_P))
  1942. continue;
  1943. direction = 0;
  1944. break;
  1945. case 1:
  1946. if ((!(avctx->debug_mv & FF_DEBUG_VIS_MV_B_FOR)) ||
  1947. (pict->pict_type!= AV_PICTURE_TYPE_B))
  1948. continue;
  1949. direction = 0;
  1950. break;
  1951. case 2:
  1952. if ((!(avctx->debug_mv & FF_DEBUG_VIS_MV_B_BACK)) ||
  1953. (pict->pict_type!= AV_PICTURE_TYPE_B))
  1954. continue;
  1955. direction = 1;
  1956. break;
  1957. }
  1958. if (!USES_LIST(mbtype_table[mb_index], direction))
  1959. continue;
  1960. if (IS_8X8(mbtype_table[mb_index])) {
  1961. int i;
  1962. for (i = 0; i < 4; i++) {
  1963. int sx = mb_x * 16 + 4 + 8 * (i & 1);
  1964. int sy = mb_y * 16 + 4 + 8 * (i >> 1);
  1965. int xy = (mb_x * 2 + (i & 1) +
  1966. (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
  1967. int mx = (motion_val[direction][xy][0] >> shift) + sx;
  1968. int my = (motion_val[direction][xy][1] >> shift) + sy;
  1969. draw_arrow(ptr, sx, sy, mx, my, width,
  1970. height, pict->linesize[0], 100, 0, direction);
  1971. }
  1972. } else if (IS_16X8(mbtype_table[mb_index])) {
  1973. int i;
  1974. for (i = 0; i < 2; i++) {
  1975. int sx = mb_x * 16 + 8;
  1976. int sy = mb_y * 16 + 4 + 8 * i;
  1977. int xy = (mb_x * 2 + (mb_y * 2 + i) * mv_stride) << (mv_sample_log2 - 1);
  1978. int mx = (motion_val[direction][xy][0] >> shift);
  1979. int my = (motion_val[direction][xy][1] >> shift);
  1980. if (IS_INTERLACED(mbtype_table[mb_index]))
  1981. my *= 2;
  1982. draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
  1983. height, pict->linesize[0], 100, 0, direction);
  1984. }
  1985. } else if (IS_8X16(mbtype_table[mb_index])) {
  1986. int i;
  1987. for (i = 0; i < 2; i++) {
  1988. int sx = mb_x * 16 + 4 + 8 * i;
  1989. int sy = mb_y * 16 + 8;
  1990. int xy = (mb_x * 2 + i + mb_y * 2 * mv_stride) << (mv_sample_log2 - 1);
  1991. int mx = motion_val[direction][xy][0] >> shift;
  1992. int my = motion_val[direction][xy][1] >> shift;
  1993. if (IS_INTERLACED(mbtype_table[mb_index]))
  1994. my *= 2;
  1995. draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
  1996. height, pict->linesize[0], 100, 0, direction);
  1997. }
  1998. } else {
  1999. int sx= mb_x * 16 + 8;
  2000. int sy= mb_y * 16 + 8;
  2001. int xy= (mb_x + mb_y * mv_stride) << mv_sample_log2;
  2002. int mx= (motion_val[direction][xy][0]>>shift) + sx;
  2003. int my= (motion_val[direction][xy][1]>>shift) + sy;
  2004. draw_arrow(ptr, sx, sy, mx, my, width, height, pict->linesize[0], 100, 0, direction);
  2005. }
  2006. }
  2007. }
  2008. #endif
  2009. if ((avctx->debug & FF_DEBUG_VIS_QP)) {
  2010. uint64_t c = (qscale_table[mb_index] * 128 / 31) *
  2011. 0x0101010101010101ULL;
  2012. int y;
  2013. for (y = 0; y < block_height; y++) {
  2014. *(uint64_t *)(pict->data[1] + 8 * mb_x +
  2015. (block_height * mb_y + y) *
  2016. pict->linesize[1]) = c;
  2017. *(uint64_t *)(pict->data[2] + 8 * mb_x +
  2018. (block_height * mb_y + y) *
  2019. pict->linesize[2]) = c;
  2020. }
  2021. }
  2022. if ((avctx->debug & FF_DEBUG_VIS_MB_TYPE) &&
  2023. motion_val[0]) {
  2024. int mb_type = mbtype_table[mb_index];
  2025. uint64_t u,v;
  2026. int y;
  2027. #define COLOR(theta, r) \
  2028. u = (int)(128 + r * cos(theta * 3.141592 / 180)); \
  2029. v = (int)(128 + r * sin(theta * 3.141592 / 180));
  2030. u = v = 128;
  2031. if (IS_PCM(mb_type)) {
  2032. COLOR(120, 48)
  2033. } else if ((IS_INTRA(mb_type) && IS_ACPRED(mb_type)) ||
  2034. IS_INTRA16x16(mb_type)) {
  2035. COLOR(30, 48)
  2036. } else if (IS_INTRA4x4(mb_type)) {
  2037. COLOR(90, 48)
  2038. } else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type)) {
  2039. // COLOR(120, 48)
  2040. } else if (IS_DIRECT(mb_type)) {
  2041. COLOR(150, 48)
  2042. } else if (IS_GMC(mb_type) && IS_SKIP(mb_type)) {
  2043. COLOR(170, 48)
  2044. } else if (IS_GMC(mb_type)) {
  2045. COLOR(190, 48)
  2046. } else if (IS_SKIP(mb_type)) {
  2047. // COLOR(180, 48)
  2048. } else if (!USES_LIST(mb_type, 1)) {
  2049. COLOR(240, 48)
  2050. } else if (!USES_LIST(mb_type, 0)) {
  2051. COLOR(0, 48)
  2052. } else {
  2053. av_assert2(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
  2054. COLOR(300,48)
  2055. }
  2056. u *= 0x0101010101010101ULL;
  2057. v *= 0x0101010101010101ULL;
  2058. for (y = 0; y < block_height; y++) {
  2059. *(uint64_t *)(pict->data[1] + 8 * mb_x +
  2060. (block_height * mb_y + y) * pict->linesize[1]) = u;
  2061. *(uint64_t *)(pict->data[2] + 8 * mb_x +
  2062. (block_height * mb_y + y) * pict->linesize[2]) = v;
  2063. }
  2064. // segmentation
  2065. if (IS_8X8(mb_type) || IS_16X8(mb_type)) {
  2066. *(uint64_t *)(pict->data[0] + 16 * mb_x + 0 +
  2067. (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
  2068. *(uint64_t *)(pict->data[0] + 16 * mb_x + 8 +
  2069. (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
  2070. }
  2071. if (IS_8X8(mb_type) || IS_8X16(mb_type)) {
  2072. for (y = 0; y < 16; y++)
  2073. pict->data[0][16 * mb_x + 8 + (16 * mb_y + y) *
  2074. pict->linesize[0]] ^= 0x80;
  2075. }
  2076. if (IS_8X8(mb_type) && mv_sample_log2 >= 2) {
  2077. int dm = 1 << (mv_sample_log2 - 2);
  2078. for (i = 0; i < 4; i++) {
  2079. int sx = mb_x * 16 + 8 * (i & 1);
  2080. int sy = mb_y * 16 + 8 * (i >> 1);
  2081. int xy = (mb_x * 2 + (i & 1) +
  2082. (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
  2083. // FIXME bidir
  2084. int32_t *mv = (int32_t *) &motion_val[0][xy];
  2085. if (mv[0] != mv[dm] ||
  2086. mv[dm * mv_stride] != mv[dm * (mv_stride + 1)])
  2087. for (y = 0; y < 8; y++)
  2088. pict->data[0][sx + 4 + (sy + y) * pict->linesize[0]] ^= 0x80;
  2089. if (mv[0] != mv[dm * mv_stride] || mv[dm] != mv[dm * (mv_stride + 1)])
  2090. *(uint64_t *)(pict->data[0] + sx + (sy + 4) *
  2091. pict->linesize[0]) ^= 0x8080808080808080ULL;
  2092. }
  2093. }
  2094. if (IS_INTERLACED(mb_type) &&
  2095. avctx->codec->id == AV_CODEC_ID_H264) {
  2096. // hmm
  2097. }
  2098. }
  2099. if (mbskip_table)
  2100. mbskip_table[mb_index] = 0;
  2101. }
  2102. }
  2103. }
  2104. }
  2105. void ff_print_debug_info(MpegEncContext *s, Picture *p, AVFrame *pict)
  2106. {
  2107. ff_print_debug_info2(s->avctx, pict, s->mbskip_table, p->mb_type,
  2108. p->qscale_table, p->motion_val, &s->low_delay,
  2109. s->mb_width, s->mb_height, s->mb_stride, s->quarter_sample);
  2110. }
  2111. int ff_mpv_export_qp_table(MpegEncContext *s, AVFrame *f, Picture *p, int qp_type)
  2112. {
  2113. AVBufferRef *ref = av_buffer_ref(p->qscale_table_buf);
  2114. int offset = 2*s->mb_stride + 1;
  2115. if(!ref)
  2116. return AVERROR(ENOMEM);
  2117. av_assert0(ref->size >= offset + s->mb_stride * ((f->height+15)/16));
  2118. ref->size -= offset;
  2119. ref->data += offset;
  2120. return av_frame_set_qp_table(f, ref, s->mb_stride, qp_type);
  2121. }
  2122. static inline int hpel_motion_lowres(MpegEncContext *s,
  2123. uint8_t *dest, uint8_t *src,
  2124. int field_based, int field_select,
  2125. int src_x, int src_y,
  2126. int width, int height, ptrdiff_t stride,
  2127. int h_edge_pos, int v_edge_pos,
  2128. int w, int h, h264_chroma_mc_func *pix_op,
  2129. int motion_x, int motion_y)
  2130. {
  2131. const int lowres = s->avctx->lowres;
  2132. const int op_index = FFMIN(lowres, 3);
  2133. const int s_mask = (2 << lowres) - 1;
  2134. int emu = 0;
  2135. int sx, sy;
  2136. if (s->quarter_sample) {
  2137. motion_x /= 2;
  2138. motion_y /= 2;
  2139. }
  2140. sx = motion_x & s_mask;
  2141. sy = motion_y & s_mask;
  2142. src_x += motion_x >> lowres + 1;
  2143. src_y += motion_y >> lowres + 1;
  2144. src += src_y * stride + src_x;
  2145. if ((unsigned)src_x > FFMAX( h_edge_pos - (!!sx) - w, 0) ||
  2146. (unsigned)src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
  2147. s->vdsp.emulated_edge_mc(s->edge_emu_buffer, src,
  2148. s->linesize, s->linesize,
  2149. w + 1, (h + 1) << field_based,
  2150. src_x, src_y << field_based,
  2151. h_edge_pos, v_edge_pos);
  2152. src = s->edge_emu_buffer;
  2153. emu = 1;
  2154. }
  2155. sx = (sx << 2) >> lowres;
  2156. sy = (sy << 2) >> lowres;
  2157. if (field_select)
  2158. src += s->linesize;
  2159. pix_op[op_index](dest, src, stride, h, sx, sy);
  2160. return emu;
  2161. }
  2162. /* apply one mpeg motion vector to the three components */
  2163. static av_always_inline void mpeg_motion_lowres(MpegEncContext *s,
  2164. uint8_t *dest_y,
  2165. uint8_t *dest_cb,
  2166. uint8_t *dest_cr,
  2167. int field_based,
  2168. int bottom_field,
  2169. int field_select,
  2170. uint8_t **ref_picture,
  2171. h264_chroma_mc_func *pix_op,
  2172. int motion_x, int motion_y,
  2173. int h, int mb_y)
  2174. {
  2175. uint8_t *ptr_y, *ptr_cb, *ptr_cr;
  2176. int mx, my, src_x, src_y, uvsrc_x, uvsrc_y, sx, sy, uvsx, uvsy;
  2177. ptrdiff_t uvlinesize, linesize;
  2178. const int lowres = s->avctx->lowres;
  2179. const int op_index = FFMIN(lowres-1+s->chroma_x_shift, 3);
  2180. const int block_s = 8>>lowres;
  2181. const int s_mask = (2 << lowres) - 1;
  2182. const int h_edge_pos = s->h_edge_pos >> lowres;
  2183. const int v_edge_pos = s->v_edge_pos >> lowres;
  2184. linesize = s->current_picture.f->linesize[0] << field_based;
  2185. uvlinesize = s->current_picture.f->linesize[1] << field_based;
  2186. // FIXME obviously not perfect but qpel will not work in lowres anyway
  2187. if (s->quarter_sample) {
  2188. motion_x /= 2;
  2189. motion_y /= 2;
  2190. }
  2191. if(field_based){
  2192. motion_y += (bottom_field - field_select)*((1 << lowres)-1);
  2193. }
  2194. sx = motion_x & s_mask;
  2195. sy = motion_y & s_mask;
  2196. src_x = s->mb_x * 2 * block_s + (motion_x >> lowres + 1);
  2197. src_y = (mb_y * 2 * block_s >> field_based) + (motion_y >> lowres + 1);
  2198. if (s->out_format == FMT_H263) {
  2199. uvsx = ((motion_x >> 1) & s_mask) | (sx & 1);
  2200. uvsy = ((motion_y >> 1) & s_mask) | (sy & 1);
  2201. uvsrc_x = src_x >> 1;
  2202. uvsrc_y = src_y >> 1;
  2203. } else if (s->out_format == FMT_H261) {
  2204. // even chroma mv's are full pel in H261
  2205. mx = motion_x / 4;
  2206. my = motion_y / 4;
  2207. uvsx = (2 * mx) & s_mask;
  2208. uvsy = (2 * my) & s_mask;
  2209. uvsrc_x = s->mb_x * block_s + (mx >> lowres);
  2210. uvsrc_y = mb_y * block_s + (my >> lowres);
  2211. } else {
  2212. if(s->chroma_y_shift){
  2213. mx = motion_x / 2;
  2214. my = motion_y / 2;
  2215. uvsx = mx & s_mask;
  2216. uvsy = my & s_mask;
  2217. uvsrc_x = s->mb_x * block_s + (mx >> lowres + 1);
  2218. uvsrc_y = (mb_y * block_s >> field_based) + (my >> lowres + 1);
  2219. } else {
  2220. if(s->chroma_x_shift){
  2221. //Chroma422
  2222. mx = motion_x / 2;
  2223. uvsx = mx & s_mask;
  2224. uvsy = motion_y & s_mask;
  2225. uvsrc_y = src_y;
  2226. uvsrc_x = s->mb_x*block_s + (mx >> (lowres+1));
  2227. } else {
  2228. //Chroma444
  2229. uvsx = motion_x & s_mask;
  2230. uvsy = motion_y & s_mask;
  2231. uvsrc_x = src_x;
  2232. uvsrc_y = src_y;
  2233. }
  2234. }
  2235. }
  2236. ptr_y = ref_picture[0] + src_y * linesize + src_x;
  2237. ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
  2238. ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
  2239. if ((unsigned) src_x > FFMAX( h_edge_pos - (!!sx) - 2 * block_s, 0) || uvsrc_y<0 ||
  2240. (unsigned) src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
  2241. s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr_y,
  2242. linesize >> field_based, linesize >> field_based,
  2243. 17, 17 + field_based,
  2244. src_x, src_y << field_based, h_edge_pos,
  2245. v_edge_pos);
  2246. ptr_y = s->edge_emu_buffer;
  2247. if (!CONFIG_GRAY || !(s->avctx->flags & CODEC_FLAG_GRAY)) {
  2248. uint8_t *ubuf = s->edge_emu_buffer + 18 * s->linesize;
  2249. uint8_t *vbuf =ubuf + 9 * s->uvlinesize;
  2250. s->vdsp.emulated_edge_mc(ubuf, ptr_cb,
  2251. uvlinesize >> field_based, uvlinesize >> field_based,
  2252. 9, 9 + field_based,
  2253. uvsrc_x, uvsrc_y << field_based,
  2254. h_edge_pos >> 1, v_edge_pos >> 1);
  2255. s->vdsp.emulated_edge_mc(vbuf, ptr_cr,
  2256. uvlinesize >> field_based,uvlinesize >> field_based,
  2257. 9, 9 + field_based,
  2258. uvsrc_x, uvsrc_y << field_based,
  2259. h_edge_pos >> 1, v_edge_pos >> 1);
  2260. ptr_cb = ubuf;
  2261. ptr_cr = vbuf;
  2262. }
  2263. }
  2264. // FIXME use this for field pix too instead of the obnoxious hack which changes picture.f->data
  2265. if (bottom_field) {
  2266. dest_y += s->linesize;
  2267. dest_cb += s->uvlinesize;
  2268. dest_cr += s->uvlinesize;
  2269. }
  2270. if (field_select) {
  2271. ptr_y += s->linesize;
  2272. ptr_cb += s->uvlinesize;
  2273. ptr_cr += s->uvlinesize;
  2274. }
  2275. sx = (sx << 2) >> lowres;
  2276. sy = (sy << 2) >> lowres;
  2277. pix_op[lowres - 1](dest_y, ptr_y, linesize, h, sx, sy);
  2278. if (!CONFIG_GRAY || !(s->avctx->flags & CODEC_FLAG_GRAY)) {
  2279. int hc = s->chroma_y_shift ? (h+1-bottom_field)>>1 : h;
  2280. uvsx = (uvsx << 2) >> lowres;
  2281. uvsy = (uvsy << 2) >> lowres;
  2282. if (hc) {
  2283. pix_op[op_index](dest_cb, ptr_cb, uvlinesize, hc, uvsx, uvsy);
  2284. pix_op[op_index](dest_cr, ptr_cr, uvlinesize, hc, uvsx, uvsy);
  2285. }
  2286. }
  2287. // FIXME h261 lowres loop filter
  2288. }
  2289. static inline void chroma_4mv_motion_lowres(MpegEncContext *s,
  2290. uint8_t *dest_cb, uint8_t *dest_cr,
  2291. uint8_t **ref_picture,
  2292. h264_chroma_mc_func * pix_op,
  2293. int mx, int my)
  2294. {
  2295. const int lowres = s->avctx->lowres;
  2296. const int op_index = FFMIN(lowres, 3);
  2297. const int block_s = 8 >> lowres;
  2298. const int s_mask = (2 << lowres) - 1;
  2299. const int h_edge_pos = s->h_edge_pos >> lowres + 1;
  2300. const int v_edge_pos = s->v_edge_pos >> lowres + 1;
  2301. int emu = 0, src_x, src_y, sx, sy;
  2302. ptrdiff_t offset;
  2303. uint8_t *ptr;
  2304. if (s->quarter_sample) {
  2305. mx /= 2;
  2306. my /= 2;
  2307. }
  2308. /* In case of 8X8, we construct a single chroma motion vector
  2309. with a special rounding */
  2310. mx = ff_h263_round_chroma(mx);
  2311. my = ff_h263_round_chroma(my);
  2312. sx = mx & s_mask;
  2313. sy = my & s_mask;
  2314. src_x = s->mb_x * block_s + (mx >> lowres + 1);
  2315. src_y = s->mb_y * block_s + (my >> lowres + 1);
  2316. offset = src_y * s->uvlinesize + src_x;
  2317. ptr = ref_picture[1] + offset;
  2318. if ((unsigned) src_x > FFMAX(h_edge_pos - (!!sx) - block_s, 0) ||
  2319. (unsigned) src_y > FFMAX(v_edge_pos - (!!sy) - block_s, 0)) {
  2320. s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr,
  2321. s->uvlinesize, s->uvlinesize,
  2322. 9, 9,
  2323. src_x, src_y, h_edge_pos, v_edge_pos);
  2324. ptr = s->edge_emu_buffer;
  2325. emu = 1;
  2326. }
  2327. sx = (sx << 2) >> lowres;
  2328. sy = (sy << 2) >> lowres;
  2329. pix_op[op_index](dest_cb, ptr, s->uvlinesize, block_s, sx, sy);
  2330. ptr = ref_picture[2] + offset;
  2331. if (emu) {
  2332. s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr,
  2333. s->uvlinesize, s->uvlinesize,
  2334. 9, 9,
  2335. src_x, src_y, h_edge_pos, v_edge_pos);
  2336. ptr = s->edge_emu_buffer;
  2337. }
  2338. pix_op[op_index](dest_cr, ptr, s->uvlinesize, block_s, sx, sy);
  2339. }
  2340. /**
  2341. * motion compensation of a single macroblock
  2342. * @param s context
  2343. * @param dest_y luma destination pointer
  2344. * @param dest_cb chroma cb/u destination pointer
  2345. * @param dest_cr chroma cr/v destination pointer
  2346. * @param dir direction (0->forward, 1->backward)
  2347. * @param ref_picture array[3] of pointers to the 3 planes of the reference picture
  2348. * @param pix_op halfpel motion compensation function (average or put normally)
  2349. * the motion vectors are taken from s->mv and the MV type from s->mv_type
  2350. */
  2351. static inline void MPV_motion_lowres(MpegEncContext *s,
  2352. uint8_t *dest_y, uint8_t *dest_cb,
  2353. uint8_t *dest_cr,
  2354. int dir, uint8_t **ref_picture,
  2355. h264_chroma_mc_func *pix_op)
  2356. {
  2357. int mx, my;
  2358. int mb_x, mb_y, i;
  2359. const int lowres = s->avctx->lowres;
  2360. const int block_s = 8 >>lowres;
  2361. mb_x = s->mb_x;
  2362. mb_y = s->mb_y;
  2363. switch (s->mv_type) {
  2364. case MV_TYPE_16X16:
  2365. mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
  2366. 0, 0, 0,
  2367. ref_picture, pix_op,
  2368. s->mv[dir][0][0], s->mv[dir][0][1],
  2369. 2 * block_s, mb_y);
  2370. break;
  2371. case MV_TYPE_8X8:
  2372. mx = 0;
  2373. my = 0;
  2374. for (i = 0; i < 4; i++) {
  2375. hpel_motion_lowres(s, dest_y + ((i & 1) + (i >> 1) *
  2376. s->linesize) * block_s,
  2377. ref_picture[0], 0, 0,
  2378. (2 * mb_x + (i & 1)) * block_s,
  2379. (2 * mb_y + (i >> 1)) * block_s,
  2380. s->width, s->height, s->linesize,
  2381. s->h_edge_pos >> lowres, s->v_edge_pos >> lowres,
  2382. block_s, block_s, pix_op,
  2383. s->mv[dir][i][0], s->mv[dir][i][1]);
  2384. mx += s->mv[dir][i][0];
  2385. my += s->mv[dir][i][1];
  2386. }
  2387. if (!CONFIG_GRAY || !(s->avctx->flags & CODEC_FLAG_GRAY))
  2388. chroma_4mv_motion_lowres(s, dest_cb, dest_cr, ref_picture,
  2389. pix_op, mx, my);
  2390. break;
  2391. case MV_TYPE_FIELD:
  2392. if (s->picture_structure == PICT_FRAME) {
  2393. /* top field */
  2394. mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
  2395. 1, 0, s->field_select[dir][0],
  2396. ref_picture, pix_op,
  2397. s->mv[dir][0][0], s->mv[dir][0][1],
  2398. block_s, mb_y);
  2399. /* bottom field */
  2400. mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
  2401. 1, 1, s->field_select[dir][1],
  2402. ref_picture, pix_op,
  2403. s->mv[dir][1][0], s->mv[dir][1][1],
  2404. block_s, mb_y);
  2405. } else {
  2406. if (s->picture_structure != s->field_select[dir][0] + 1 &&
  2407. s->pict_type != AV_PICTURE_TYPE_B && !s->first_field) {
  2408. ref_picture = s->current_picture_ptr->f->data;
  2409. }
  2410. mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
  2411. 0, 0, s->field_select[dir][0],
  2412. ref_picture, pix_op,
  2413. s->mv[dir][0][0],
  2414. s->mv[dir][0][1], 2 * block_s, mb_y >> 1);
  2415. }
  2416. break;
  2417. case MV_TYPE_16X8:
  2418. for (i = 0; i < 2; i++) {
  2419. uint8_t **ref2picture;
  2420. if (s->picture_structure == s->field_select[dir][i] + 1 ||
  2421. s->pict_type == AV_PICTURE_TYPE_B || s->first_field) {
  2422. ref2picture = ref_picture;
  2423. } else {
  2424. ref2picture = s->current_picture_ptr->f->data;
  2425. }
  2426. mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
  2427. 0, 0, s->field_select[dir][i],
  2428. ref2picture, pix_op,
  2429. s->mv[dir][i][0], s->mv[dir][i][1] +
  2430. 2 * block_s * i, block_s, mb_y >> 1);
  2431. dest_y += 2 * block_s * s->linesize;
  2432. dest_cb += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
  2433. dest_cr += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
  2434. }
  2435. break;
  2436. case MV_TYPE_DMV:
  2437. if (s->picture_structure == PICT_FRAME) {
  2438. for (i = 0; i < 2; i++) {
  2439. int j;
  2440. for (j = 0; j < 2; j++) {
  2441. mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
  2442. 1, j, j ^ i,
  2443. ref_picture, pix_op,
  2444. s->mv[dir][2 * i + j][0],
  2445. s->mv[dir][2 * i + j][1],
  2446. block_s, mb_y);
  2447. }
  2448. pix_op = s->h264chroma.avg_h264_chroma_pixels_tab;
  2449. }
  2450. } else {
  2451. for (i = 0; i < 2; i++) {
  2452. mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
  2453. 0, 0, s->picture_structure != i + 1,
  2454. ref_picture, pix_op,
  2455. s->mv[dir][2 * i][0],s->mv[dir][2 * i][1],
  2456. 2 * block_s, mb_y >> 1);
  2457. // after put we make avg of the same block
  2458. pix_op = s->h264chroma.avg_h264_chroma_pixels_tab;
  2459. // opposite parity is always in the same
  2460. // frame if this is second field
  2461. if (!s->first_field) {
  2462. ref_picture = s->current_picture_ptr->f->data;
  2463. }
  2464. }
  2465. }
  2466. break;
  2467. default:
  2468. av_assert2(0);
  2469. }
  2470. }
  2471. /**
  2472. * find the lowest MB row referenced in the MVs
  2473. */
  2474. int ff_mpv_lowest_referenced_row(MpegEncContext *s, int dir)
  2475. {
  2476. int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->quarter_sample;
  2477. int my, off, i, mvs;
  2478. if (s->picture_structure != PICT_FRAME || s->mcsel)
  2479. goto unhandled;
  2480. switch (s->mv_type) {
  2481. case MV_TYPE_16X16:
  2482. mvs = 1;
  2483. break;
  2484. case MV_TYPE_16X8:
  2485. mvs = 2;
  2486. break;
  2487. case MV_TYPE_8X8:
  2488. mvs = 4;
  2489. break;
  2490. default:
  2491. goto unhandled;
  2492. }
  2493. for (i = 0; i < mvs; i++) {
  2494. my = s->mv[dir][i][1];
  2495. my_max = FFMAX(my_max, my);
  2496. my_min = FFMIN(my_min, my);
  2497. }
  2498. off = ((FFMAX(-my_min, my_max)<<qpel_shift) + 63) >> 6;
  2499. return FFMIN(FFMAX(s->mb_y + off, 0), s->mb_height-1);
  2500. unhandled:
  2501. return s->mb_height-1;
  2502. }
  2503. /* put block[] to dest[] */
  2504. static inline void put_dct(MpegEncContext *s,
  2505. int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
  2506. {
  2507. s->dct_unquantize_intra(s, block, i, qscale);
  2508. s->idsp.idct_put(dest, line_size, block);
  2509. }
  2510. /* add block[] to dest[] */
  2511. static inline void add_dct(MpegEncContext *s,
  2512. int16_t *block, int i, uint8_t *dest, int line_size)
  2513. {
  2514. if (s->block_last_index[i] >= 0) {
  2515. s->idsp.idct_add(dest, line_size, block);
  2516. }
  2517. }
  2518. static inline void add_dequant_dct(MpegEncContext *s,
  2519. int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
  2520. {
  2521. if (s->block_last_index[i] >= 0) {
  2522. s->dct_unquantize_inter(s, block, i, qscale);
  2523. s->idsp.idct_add(dest, line_size, block);
  2524. }
  2525. }
  2526. /**
  2527. * Clean dc, ac, coded_block for the current non-intra MB.
  2528. */
  2529. void ff_clean_intra_table_entries(MpegEncContext *s)
  2530. {
  2531. int wrap = s->b8_stride;
  2532. int xy = s->block_index[0];
  2533. s->dc_val[0][xy ] =
  2534. s->dc_val[0][xy + 1 ] =
  2535. s->dc_val[0][xy + wrap] =
  2536. s->dc_val[0][xy + 1 + wrap] = 1024;
  2537. /* ac pred */
  2538. memset(s->ac_val[0][xy ], 0, 32 * sizeof(int16_t));
  2539. memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
  2540. if (s->msmpeg4_version>=3) {
  2541. s->coded_block[xy ] =
  2542. s->coded_block[xy + 1 ] =
  2543. s->coded_block[xy + wrap] =
  2544. s->coded_block[xy + 1 + wrap] = 0;
  2545. }
  2546. /* chroma */
  2547. wrap = s->mb_stride;
  2548. xy = s->mb_x + s->mb_y * wrap;
  2549. s->dc_val[1][xy] =
  2550. s->dc_val[2][xy] = 1024;
  2551. /* ac pred */
  2552. memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
  2553. memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
  2554. s->mbintra_table[xy]= 0;
  2555. }
  2556. /* generic function called after a macroblock has been parsed by the
  2557. decoder or after it has been encoded by the encoder.
  2558. Important variables used:
  2559. s->mb_intra : true if intra macroblock
  2560. s->mv_dir : motion vector direction
  2561. s->mv_type : motion vector type
  2562. s->mv : motion vector
  2563. s->interlaced_dct : true if interlaced dct used (mpeg2)
  2564. */
  2565. static av_always_inline
  2566. void mpv_decode_mb_internal(MpegEncContext *s, int16_t block[12][64],
  2567. int lowres_flag, int is_mpeg12)
  2568. {
  2569. const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
  2570. if (CONFIG_XVMC &&
  2571. s->avctx->hwaccel && s->avctx->hwaccel->decode_mb) {
  2572. s->avctx->hwaccel->decode_mb(s);//xvmc uses pblocks
  2573. return;
  2574. }
  2575. if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
  2576. /* print DCT coefficients */
  2577. int i,j;
  2578. av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
  2579. for(i=0; i<6; i++){
  2580. for(j=0; j<64; j++){
  2581. av_log(s->avctx, AV_LOG_DEBUG, "%5d",
  2582. block[i][s->idsp.idct_permutation[j]]);
  2583. }
  2584. av_log(s->avctx, AV_LOG_DEBUG, "\n");
  2585. }
  2586. }
  2587. s->current_picture.qscale_table[mb_xy] = s->qscale;
  2588. /* update DC predictors for P macroblocks */
  2589. if (!s->mb_intra) {
  2590. if (!is_mpeg12 && (s->h263_pred || s->h263_aic)) {
  2591. if(s->mbintra_table[mb_xy])
  2592. ff_clean_intra_table_entries(s);
  2593. } else {
  2594. s->last_dc[0] =
  2595. s->last_dc[1] =
  2596. s->last_dc[2] = 128 << s->intra_dc_precision;
  2597. }
  2598. }
  2599. else if (!is_mpeg12 && (s->h263_pred || s->h263_aic))
  2600. s->mbintra_table[mb_xy]=1;
  2601. if ((s->avctx->flags & CODEC_FLAG_PSNR) || s->avctx->frame_skip_threshold || s->avctx->frame_skip_factor ||
  2602. !(s->encoding && (s->intra_only || s->pict_type == AV_PICTURE_TYPE_B) &&
  2603. s->avctx->mb_decision != FF_MB_DECISION_RD)) { // FIXME precalc
  2604. uint8_t *dest_y, *dest_cb, *dest_cr;
  2605. int dct_linesize, dct_offset;
  2606. op_pixels_func (*op_pix)[4];
  2607. qpel_mc_func (*op_qpix)[16];
  2608. const int linesize = s->current_picture.f->linesize[0]; //not s->linesize as this would be wrong for field pics
  2609. const int uvlinesize = s->current_picture.f->linesize[1];
  2610. const int readable= s->pict_type != AV_PICTURE_TYPE_B || s->encoding || s->avctx->draw_horiz_band || lowres_flag;
  2611. const int block_size= lowres_flag ? 8>>s->avctx->lowres : 8;
  2612. /* avoid copy if macroblock skipped in last frame too */
  2613. /* skip only during decoding as we might trash the buffers during encoding a bit */
  2614. if(!s->encoding){
  2615. uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
  2616. if (s->mb_skipped) {
  2617. s->mb_skipped= 0;
  2618. av_assert2(s->pict_type!=AV_PICTURE_TYPE_I);
  2619. *mbskip_ptr = 1;
  2620. } else if(!s->current_picture.reference) {
  2621. *mbskip_ptr = 1;
  2622. } else{
  2623. *mbskip_ptr = 0; /* not skipped */
  2624. }
  2625. }
  2626. dct_linesize = linesize << s->interlaced_dct;
  2627. dct_offset = s->interlaced_dct ? linesize : linesize * block_size;
  2628. if(readable){
  2629. dest_y= s->dest[0];
  2630. dest_cb= s->dest[1];
  2631. dest_cr= s->dest[2];
  2632. }else{
  2633. dest_y = s->b_scratchpad;
  2634. dest_cb= s->b_scratchpad+16*linesize;
  2635. dest_cr= s->b_scratchpad+32*linesize;
  2636. }
  2637. if (!s->mb_intra) {
  2638. /* motion handling */
  2639. /* decoding or more than one mb_type (MC was already done otherwise) */
  2640. if(!s->encoding){
  2641. if(HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_FRAME) {
  2642. if (s->mv_dir & MV_DIR_FORWARD) {
  2643. ff_thread_await_progress(&s->last_picture_ptr->tf,
  2644. ff_mpv_lowest_referenced_row(s, 0),
  2645. 0);
  2646. }
  2647. if (s->mv_dir & MV_DIR_BACKWARD) {
  2648. ff_thread_await_progress(&s->next_picture_ptr->tf,
  2649. ff_mpv_lowest_referenced_row(s, 1),
  2650. 0);
  2651. }
  2652. }
  2653. if(lowres_flag){
  2654. h264_chroma_mc_func *op_pix = s->h264chroma.put_h264_chroma_pixels_tab;
  2655. if (s->mv_dir & MV_DIR_FORWARD) {
  2656. MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f->data, op_pix);
  2657. op_pix = s->h264chroma.avg_h264_chroma_pixels_tab;
  2658. }
  2659. if (s->mv_dir & MV_DIR_BACKWARD) {
  2660. MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f->data, op_pix);
  2661. }
  2662. }else{
  2663. op_qpix = s->me.qpel_put;
  2664. if ((!s->no_rounding) || s->pict_type==AV_PICTURE_TYPE_B){
  2665. op_pix = s->hdsp.put_pixels_tab;
  2666. }else{
  2667. op_pix = s->hdsp.put_no_rnd_pixels_tab;
  2668. }
  2669. if (s->mv_dir & MV_DIR_FORWARD) {
  2670. ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f->data, op_pix, op_qpix);
  2671. op_pix = s->hdsp.avg_pixels_tab;
  2672. op_qpix= s->me.qpel_avg;
  2673. }
  2674. if (s->mv_dir & MV_DIR_BACKWARD) {
  2675. ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f->data, op_pix, op_qpix);
  2676. }
  2677. }
  2678. }
  2679. /* skip dequant / idct if we are really late ;) */
  2680. if(s->avctx->skip_idct){
  2681. if( (s->avctx->skip_idct >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B)
  2682. ||(s->avctx->skip_idct >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I)
  2683. || s->avctx->skip_idct >= AVDISCARD_ALL)
  2684. goto skip_idct;
  2685. }
  2686. /* add dct residue */
  2687. if(s->encoding || !( s->msmpeg4_version || s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO
  2688. || (s->codec_id==AV_CODEC_ID_MPEG4 && !s->mpeg_quant))){
  2689. add_dequant_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
  2690. add_dequant_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
  2691. add_dequant_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
  2692. add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
  2693. if (!CONFIG_GRAY || !(s->avctx->flags & CODEC_FLAG_GRAY)) {
  2694. if (s->chroma_y_shift){
  2695. add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
  2696. add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
  2697. }else{
  2698. dct_linesize >>= 1;
  2699. dct_offset >>=1;
  2700. add_dequant_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
  2701. add_dequant_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
  2702. add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
  2703. add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
  2704. }
  2705. }
  2706. } else if(is_mpeg12 || (s->codec_id != AV_CODEC_ID_WMV2)){
  2707. add_dct(s, block[0], 0, dest_y , dct_linesize);
  2708. add_dct(s, block[1], 1, dest_y + block_size, dct_linesize);
  2709. add_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize);
  2710. add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
  2711. if (!CONFIG_GRAY || !(s->avctx->flags & CODEC_FLAG_GRAY)) {
  2712. if(s->chroma_y_shift){//Chroma420
  2713. add_dct(s, block[4], 4, dest_cb, uvlinesize);
  2714. add_dct(s, block[5], 5, dest_cr, uvlinesize);
  2715. }else{
  2716. //chroma422
  2717. dct_linesize = uvlinesize << s->interlaced_dct;
  2718. dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize*block_size;
  2719. add_dct(s, block[4], 4, dest_cb, dct_linesize);
  2720. add_dct(s, block[5], 5, dest_cr, dct_linesize);
  2721. add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
  2722. add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
  2723. if(!s->chroma_x_shift){//Chroma444
  2724. add_dct(s, block[8], 8, dest_cb+block_size, dct_linesize);
  2725. add_dct(s, block[9], 9, dest_cr+block_size, dct_linesize);
  2726. add_dct(s, block[10], 10, dest_cb+block_size+dct_offset, dct_linesize);
  2727. add_dct(s, block[11], 11, dest_cr+block_size+dct_offset, dct_linesize);
  2728. }
  2729. }
  2730. }//fi gray
  2731. }
  2732. else if (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) {
  2733. ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
  2734. }
  2735. } else {
  2736. /* dct only in intra block */
  2737. if(s->encoding || !(s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO)){
  2738. put_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
  2739. put_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
  2740. put_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
  2741. put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
  2742. if (!CONFIG_GRAY || !(s->avctx->flags & CODEC_FLAG_GRAY)) {
  2743. if(s->chroma_y_shift){
  2744. put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
  2745. put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
  2746. }else{
  2747. dct_offset >>=1;
  2748. dct_linesize >>=1;
  2749. put_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
  2750. put_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
  2751. put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
  2752. put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
  2753. }
  2754. }
  2755. }else{
  2756. s->idsp.idct_put(dest_y, dct_linesize, block[0]);
  2757. s->idsp.idct_put(dest_y + block_size, dct_linesize, block[1]);
  2758. s->idsp.idct_put(dest_y + dct_offset, dct_linesize, block[2]);
  2759. s->idsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
  2760. if (!CONFIG_GRAY || !(s->avctx->flags & CODEC_FLAG_GRAY)) {
  2761. if(s->chroma_y_shift){
  2762. s->idsp.idct_put(dest_cb, uvlinesize, block[4]);
  2763. s->idsp.idct_put(dest_cr, uvlinesize, block[5]);
  2764. }else{
  2765. dct_linesize = uvlinesize << s->interlaced_dct;
  2766. dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize*block_size;
  2767. s->idsp.idct_put(dest_cb, dct_linesize, block[4]);
  2768. s->idsp.idct_put(dest_cr, dct_linesize, block[5]);
  2769. s->idsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
  2770. s->idsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
  2771. if(!s->chroma_x_shift){//Chroma444
  2772. s->idsp.idct_put(dest_cb + block_size, dct_linesize, block[8]);
  2773. s->idsp.idct_put(dest_cr + block_size, dct_linesize, block[9]);
  2774. s->idsp.idct_put(dest_cb + block_size + dct_offset, dct_linesize, block[10]);
  2775. s->idsp.idct_put(dest_cr + block_size + dct_offset, dct_linesize, block[11]);
  2776. }
  2777. }
  2778. }//gray
  2779. }
  2780. }
  2781. skip_idct:
  2782. if(!readable){
  2783. s->hdsp.put_pixels_tab[0][0](s->dest[0], dest_y , linesize,16);
  2784. if (!CONFIG_GRAY || !(s->avctx->flags & CODEC_FLAG_GRAY)) {
  2785. s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift);
  2786. s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift);
  2787. }
  2788. }
  2789. }
  2790. }
  2791. void ff_mpv_decode_mb(MpegEncContext *s, int16_t block[12][64])
  2792. {
  2793. #if !CONFIG_SMALL
  2794. if(s->out_format == FMT_MPEG1) {
  2795. if(s->avctx->lowres) mpv_decode_mb_internal(s, block, 1, 1);
  2796. else mpv_decode_mb_internal(s, block, 0, 1);
  2797. } else
  2798. #endif
  2799. if(s->avctx->lowres) mpv_decode_mb_internal(s, block, 1, 0);
  2800. else mpv_decode_mb_internal(s, block, 0, 0);
  2801. }
  2802. void ff_mpeg_draw_horiz_band(MpegEncContext *s, int y, int h)
  2803. {
  2804. ff_draw_horiz_band(s->avctx, s->current_picture_ptr->f,
  2805. s->last_picture_ptr ? s->last_picture_ptr->f : NULL, y, h, s->picture_structure,
  2806. s->first_field, s->low_delay);
  2807. }
  2808. void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
  2809. const int linesize = s->current_picture.f->linesize[0]; //not s->linesize as this would be wrong for field pics
  2810. const int uvlinesize = s->current_picture.f->linesize[1];
  2811. const int mb_size= 4 - s->avctx->lowres;
  2812. s->block_index[0]= s->b8_stride*(s->mb_y*2 ) - 2 + s->mb_x*2;
  2813. s->block_index[1]= s->b8_stride*(s->mb_y*2 ) - 1 + s->mb_x*2;
  2814. s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2;
  2815. s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
  2816. s->block_index[4]= s->mb_stride*(s->mb_y + 1) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
  2817. s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
  2818. //block_index is not used by mpeg2, so it is not affected by chroma_format
  2819. s->dest[0] = s->current_picture.f->data[0] + (int)((s->mb_x - 1U) << mb_size);
  2820. s->dest[1] = s->current_picture.f->data[1] + (int)((s->mb_x - 1U) << (mb_size - s->chroma_x_shift));
  2821. s->dest[2] = s->current_picture.f->data[2] + (int)((s->mb_x - 1U) << (mb_size - s->chroma_x_shift));
  2822. if(!(s->pict_type==AV_PICTURE_TYPE_B && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME))
  2823. {
  2824. if(s->picture_structure==PICT_FRAME){
  2825. s->dest[0] += s->mb_y * linesize << mb_size;
  2826. s->dest[1] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
  2827. s->dest[2] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
  2828. }else{
  2829. s->dest[0] += (s->mb_y>>1) * linesize << mb_size;
  2830. s->dest[1] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
  2831. s->dest[2] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
  2832. av_assert1((s->mb_y&1) == (s->picture_structure == PICT_BOTTOM_FIELD));
  2833. }
  2834. }
  2835. }
  2836. /**
  2837. * Permute an 8x8 block.
  2838. * @param block the block which will be permuted according to the given permutation vector
  2839. * @param permutation the permutation vector
  2840. * @param last the last non zero coefficient in scantable order, used to speed the permutation up
  2841. * @param scantable the used scantable, this is only used to speed the permutation up, the block is not
  2842. * (inverse) permutated to scantable order!
  2843. */
  2844. void ff_block_permute(int16_t *block, uint8_t *permutation, const uint8_t *scantable, int last)
  2845. {
  2846. int i;
  2847. int16_t temp[64];
  2848. if(last<=0) return;
  2849. //if(permutation[1]==1) return; //FIXME it is ok but not clean and might fail for some permutations
  2850. for(i=0; i<=last; i++){
  2851. const int j= scantable[i];
  2852. temp[j]= block[j];
  2853. block[j]=0;
  2854. }
  2855. for(i=0; i<=last; i++){
  2856. const int j= scantable[i];
  2857. const int perm_j= permutation[j];
  2858. block[perm_j]= temp[j];
  2859. }
  2860. }
  2861. void ff_mpeg_flush(AVCodecContext *avctx){
  2862. int i;
  2863. MpegEncContext *s = avctx->priv_data;
  2864. if (!s || !s->picture)
  2865. return;
  2866. for (i = 0; i < MAX_PICTURE_COUNT; i++)
  2867. ff_mpeg_unref_picture(s->avctx, &s->picture[i]);
  2868. s->current_picture_ptr = s->last_picture_ptr = s->next_picture_ptr = NULL;
  2869. ff_mpeg_unref_picture(s->avctx, &s->current_picture);
  2870. ff_mpeg_unref_picture(s->avctx, &s->last_picture);
  2871. ff_mpeg_unref_picture(s->avctx, &s->next_picture);
  2872. s->mb_x= s->mb_y= 0;
  2873. s->closed_gop= 0;
  2874. s->parse_context.state= -1;
  2875. s->parse_context.frame_start_found= 0;
  2876. s->parse_context.overread= 0;
  2877. s->parse_context.overread_index= 0;
  2878. s->parse_context.index= 0;
  2879. s->parse_context.last_index= 0;
  2880. s->bitstream_buffer_size=0;
  2881. s->pp_time=0;
  2882. }
  2883. /**
  2884. * set qscale and update qscale dependent variables.
  2885. */
  2886. void ff_set_qscale(MpegEncContext * s, int qscale)
  2887. {
  2888. if (qscale < 1)
  2889. qscale = 1;
  2890. else if (qscale > 31)
  2891. qscale = 31;
  2892. s->qscale = qscale;
  2893. s->chroma_qscale= s->chroma_qscale_table[qscale];
  2894. s->y_dc_scale= s->y_dc_scale_table[ qscale ];
  2895. s->c_dc_scale= s->c_dc_scale_table[ s->chroma_qscale ];
  2896. }
  2897. void ff_mpv_report_decode_progress(MpegEncContext *s)
  2898. {
  2899. if (s->pict_type != AV_PICTURE_TYPE_B && !s->partitioned_frame && !s->er.error_occurred)
  2900. ff_thread_report_progress(&s->current_picture_ptr->tf, s->mb_y, 0);
  2901. }