You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

3288 lines
118KB

  1. /*
  2. * The simplest mpeg encoder (well, it was the simplest!)
  3. * Copyright (c) 2000,2001 Fabrice Bellard
  4. * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
  5. *
  6. * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
  7. *
  8. * This file is part of FFmpeg.
  9. *
  10. * FFmpeg is free software; you can redistribute it and/or
  11. * modify it under the terms of the GNU Lesser General Public
  12. * License as published by the Free Software Foundation; either
  13. * version 2.1 of the License, or (at your option) any later version.
  14. *
  15. * FFmpeg is distributed in the hope that it will be useful,
  16. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  17. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  18. * Lesser General Public License for more details.
  19. *
  20. * You should have received a copy of the GNU Lesser General Public
  21. * License along with FFmpeg; if not, write to the Free Software
  22. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  23. */
  24. /**
  25. * @file
  26. * The simplest mpeg encoder (well, it was the simplest!).
  27. */
  28. #include "libavutil/attributes.h"
  29. #include "libavutil/avassert.h"
  30. #include "libavutil/imgutils.h"
  31. #include "libavutil/internal.h"
  32. #include "libavutil/timer.h"
  33. #include "avcodec.h"
  34. #include "blockdsp.h"
  35. #include "h264chroma.h"
  36. #include "idctdsp.h"
  37. #include "internal.h"
  38. #include "mathops.h"
  39. #include "mpegutils.h"
  40. #include "mpegvideo.h"
  41. #include "mjpegenc.h"
  42. #include "msmpeg4.h"
  43. #include "qpeldsp.h"
  44. #include "thread.h"
  45. #include <limits.h>
  46. static const uint8_t ff_default_chroma_qscale_table[32] = {
  47. // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
  48. 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
  49. 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
  50. };
  51. const uint8_t ff_mpeg1_dc_scale_table[128] = {
  52. // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
  53. 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
  54. 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
  55. 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
  56. 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
  57. 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
  58. 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
  59. 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
  60. 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
  61. };
  62. static const uint8_t mpeg2_dc_scale_table1[128] = {
  63. // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
  64. 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
  65. 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
  66. 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
  67. 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
  68. 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
  69. 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
  70. 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
  71. 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
  72. };
  73. static const uint8_t mpeg2_dc_scale_table2[128] = {
  74. // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
  75. 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
  76. 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
  77. 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
  78. 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
  79. 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
  80. 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
  81. 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
  82. 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
  83. };
  84. static const uint8_t mpeg2_dc_scale_table3[128] = {
  85. // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
  86. 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
  87. 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
  88. 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
  89. 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
  90. 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
  91. 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
  92. 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
  93. 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
  94. };
  95. const uint8_t *const ff_mpeg2_dc_scale_table[4] = {
  96. ff_mpeg1_dc_scale_table,
  97. mpeg2_dc_scale_table1,
  98. mpeg2_dc_scale_table2,
  99. mpeg2_dc_scale_table3,
  100. };
  101. const uint8_t ff_alternate_horizontal_scan[64] = {
  102. 0, 1, 2, 3, 8, 9, 16, 17,
  103. 10, 11, 4, 5, 6, 7, 15, 14,
  104. 13, 12, 19, 18, 24, 25, 32, 33,
  105. 26, 27, 20, 21, 22, 23, 28, 29,
  106. 30, 31, 34, 35, 40, 41, 48, 49,
  107. 42, 43, 36, 37, 38, 39, 44, 45,
  108. 46, 47, 50, 51, 56, 57, 58, 59,
  109. 52, 53, 54, 55, 60, 61, 62, 63,
  110. };
  111. const uint8_t ff_alternate_vertical_scan[64] = {
  112. 0, 8, 16, 24, 1, 9, 2, 10,
  113. 17, 25, 32, 40, 48, 56, 57, 49,
  114. 41, 33, 26, 18, 3, 11, 4, 12,
  115. 19, 27, 34, 42, 50, 58, 35, 43,
  116. 51, 59, 20, 28, 5, 13, 6, 14,
  117. 21, 29, 36, 44, 52, 60, 37, 45,
  118. 53, 61, 22, 30, 7, 15, 23, 31,
  119. 38, 46, 54, 62, 39, 47, 55, 63,
  120. };
  121. static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
  122. int16_t *block, int n, int qscale)
  123. {
  124. int i, level, nCoeffs;
  125. const uint16_t *quant_matrix;
  126. nCoeffs= s->block_last_index[n];
  127. block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
  128. /* XXX: only mpeg1 */
  129. quant_matrix = s->intra_matrix;
  130. for(i=1;i<=nCoeffs;i++) {
  131. int j= s->intra_scantable.permutated[i];
  132. level = block[j];
  133. if (level) {
  134. if (level < 0) {
  135. level = -level;
  136. level = (int)(level * qscale * quant_matrix[j]) >> 3;
  137. level = (level - 1) | 1;
  138. level = -level;
  139. } else {
  140. level = (int)(level * qscale * quant_matrix[j]) >> 3;
  141. level = (level - 1) | 1;
  142. }
  143. block[j] = level;
  144. }
  145. }
  146. }
  147. static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
  148. int16_t *block, int n, int qscale)
  149. {
  150. int i, level, nCoeffs;
  151. const uint16_t *quant_matrix;
  152. nCoeffs= s->block_last_index[n];
  153. quant_matrix = s->inter_matrix;
  154. for(i=0; i<=nCoeffs; i++) {
  155. int j= s->intra_scantable.permutated[i];
  156. level = block[j];
  157. if (level) {
  158. if (level < 0) {
  159. level = -level;
  160. level = (((level << 1) + 1) * qscale *
  161. ((int) (quant_matrix[j]))) >> 4;
  162. level = (level - 1) | 1;
  163. level = -level;
  164. } else {
  165. level = (((level << 1) + 1) * qscale *
  166. ((int) (quant_matrix[j]))) >> 4;
  167. level = (level - 1) | 1;
  168. }
  169. block[j] = level;
  170. }
  171. }
  172. }
  173. static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
  174. int16_t *block, int n, int qscale)
  175. {
  176. int i, level, nCoeffs;
  177. const uint16_t *quant_matrix;
  178. if(s->alternate_scan) nCoeffs= 63;
  179. else nCoeffs= s->block_last_index[n];
  180. block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
  181. quant_matrix = s->intra_matrix;
  182. for(i=1;i<=nCoeffs;i++) {
  183. int j= s->intra_scantable.permutated[i];
  184. level = block[j];
  185. if (level) {
  186. if (level < 0) {
  187. level = -level;
  188. level = (int)(level * qscale * quant_matrix[j]) >> 3;
  189. level = -level;
  190. } else {
  191. level = (int)(level * qscale * quant_matrix[j]) >> 3;
  192. }
  193. block[j] = level;
  194. }
  195. }
  196. }
  197. static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
  198. int16_t *block, int n, int qscale)
  199. {
  200. int i, level, nCoeffs;
  201. const uint16_t *quant_matrix;
  202. int sum=-1;
  203. if(s->alternate_scan) nCoeffs= 63;
  204. else nCoeffs= s->block_last_index[n];
  205. block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
  206. sum += block[0];
  207. quant_matrix = s->intra_matrix;
  208. for(i=1;i<=nCoeffs;i++) {
  209. int j= s->intra_scantable.permutated[i];
  210. level = block[j];
  211. if (level) {
  212. if (level < 0) {
  213. level = -level;
  214. level = (int)(level * qscale * quant_matrix[j]) >> 3;
  215. level = -level;
  216. } else {
  217. level = (int)(level * qscale * quant_matrix[j]) >> 3;
  218. }
  219. block[j] = level;
  220. sum+=level;
  221. }
  222. }
  223. block[63]^=sum&1;
  224. }
  225. static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
  226. int16_t *block, int n, int qscale)
  227. {
  228. int i, level, nCoeffs;
  229. const uint16_t *quant_matrix;
  230. int sum=-1;
  231. if(s->alternate_scan) nCoeffs= 63;
  232. else nCoeffs= s->block_last_index[n];
  233. quant_matrix = s->inter_matrix;
  234. for(i=0; i<=nCoeffs; i++) {
  235. int j= s->intra_scantable.permutated[i];
  236. level = block[j];
  237. if (level) {
  238. if (level < 0) {
  239. level = -level;
  240. level = (((level << 1) + 1) * qscale *
  241. ((int) (quant_matrix[j]))) >> 4;
  242. level = -level;
  243. } else {
  244. level = (((level << 1) + 1) * qscale *
  245. ((int) (quant_matrix[j]))) >> 4;
  246. }
  247. block[j] = level;
  248. sum+=level;
  249. }
  250. }
  251. block[63]^=sum&1;
  252. }
  253. static void dct_unquantize_h263_intra_c(MpegEncContext *s,
  254. int16_t *block, int n, int qscale)
  255. {
  256. int i, level, qmul, qadd;
  257. int nCoeffs;
  258. av_assert2(s->block_last_index[n]>=0 || s->h263_aic);
  259. qmul = qscale << 1;
  260. if (!s->h263_aic) {
  261. block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
  262. qadd = (qscale - 1) | 1;
  263. }else{
  264. qadd = 0;
  265. }
  266. if(s->ac_pred)
  267. nCoeffs=63;
  268. else
  269. nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
  270. for(i=1; i<=nCoeffs; i++) {
  271. level = block[i];
  272. if (level) {
  273. if (level < 0) {
  274. level = level * qmul - qadd;
  275. } else {
  276. level = level * qmul + qadd;
  277. }
  278. block[i] = level;
  279. }
  280. }
  281. }
  282. static void dct_unquantize_h263_inter_c(MpegEncContext *s,
  283. int16_t *block, int n, int qscale)
  284. {
  285. int i, level, qmul, qadd;
  286. int nCoeffs;
  287. av_assert2(s->block_last_index[n]>=0);
  288. qadd = (qscale - 1) | 1;
  289. qmul = qscale << 1;
  290. nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
  291. for(i=0; i<=nCoeffs; i++) {
  292. level = block[i];
  293. if (level) {
  294. if (level < 0) {
  295. level = level * qmul - qadd;
  296. } else {
  297. level = level * qmul + qadd;
  298. }
  299. block[i] = level;
  300. }
  301. }
  302. }
  303. static void mpeg_er_decode_mb(void *opaque, int ref, int mv_dir, int mv_type,
  304. int (*mv)[2][4][2],
  305. int mb_x, int mb_y, int mb_intra, int mb_skipped)
  306. {
  307. MpegEncContext *s = opaque;
  308. s->mv_dir = mv_dir;
  309. s->mv_type = mv_type;
  310. s->mb_intra = mb_intra;
  311. s->mb_skipped = mb_skipped;
  312. s->mb_x = mb_x;
  313. s->mb_y = mb_y;
  314. memcpy(s->mv, mv, sizeof(*mv));
  315. ff_init_block_index(s);
  316. ff_update_block_index(s);
  317. s->bdsp.clear_blocks(s->block[0]);
  318. s->dest[0] = s->current_picture.f->data[0] + (s->mb_y * 16 * s->linesize) + s->mb_x * 16;
  319. s->dest[1] = s->current_picture.f->data[1] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
  320. s->dest[2] = s->current_picture.f->data[2] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
  321. if (ref)
  322. av_log(s->avctx, AV_LOG_DEBUG, "Interlaced error concealment is not fully implemented\n");
  323. ff_MPV_decode_mb(s, s->block);
  324. }
  325. static void gray16(uint8_t *dst, const uint8_t *src, ptrdiff_t linesize, int h)
  326. {
  327. while(h--)
  328. memset(dst + h*linesize, 128, 16);
  329. }
  330. static void gray8(uint8_t *dst, const uint8_t *src, ptrdiff_t linesize, int h)
  331. {
  332. while(h--)
  333. memset(dst + h*linesize, 128, 8);
  334. }
  335. /* init common dct for both encoder and decoder */
  336. static av_cold int dct_init(MpegEncContext *s)
  337. {
  338. ff_blockdsp_init(&s->bdsp, s->avctx);
  339. ff_h264chroma_init(&s->h264chroma, 8); //for lowres
  340. ff_hpeldsp_init(&s->hdsp, s->avctx->flags);
  341. ff_me_cmp_init(&s->mecc, s->avctx);
  342. ff_mpegvideodsp_init(&s->mdsp);
  343. ff_videodsp_init(&s->vdsp, s->avctx->bits_per_raw_sample);
  344. if (s->avctx->debug & FF_DEBUG_NOMC) {
  345. int i;
  346. for (i=0; i<4; i++) {
  347. s->hdsp.avg_pixels_tab[0][i] = gray16;
  348. s->hdsp.put_pixels_tab[0][i] = gray16;
  349. s->hdsp.put_no_rnd_pixels_tab[0][i] = gray16;
  350. s->hdsp.avg_pixels_tab[1][i] = gray8;
  351. s->hdsp.put_pixels_tab[1][i] = gray8;
  352. s->hdsp.put_no_rnd_pixels_tab[1][i] = gray8;
  353. }
  354. }
  355. s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_c;
  356. s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_c;
  357. s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_c;
  358. s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_c;
  359. s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_c;
  360. if (s->flags & CODEC_FLAG_BITEXACT)
  361. s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_bitexact;
  362. s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c;
  363. if (HAVE_INTRINSICS_NEON)
  364. ff_MPV_common_init_neon(s);
  365. if (ARCH_ALPHA)
  366. ff_MPV_common_init_axp(s);
  367. if (ARCH_ARM)
  368. ff_MPV_common_init_arm(s);
  369. if (ARCH_PPC)
  370. ff_MPV_common_init_ppc(s);
  371. if (ARCH_X86)
  372. ff_MPV_common_init_x86(s);
  373. return 0;
  374. }
  375. av_cold void ff_mpv_idct_init(MpegEncContext *s)
  376. {
  377. ff_idctdsp_init(&s->idsp, s->avctx);
  378. /* load & permutate scantables
  379. * note: only wmv uses different ones
  380. */
  381. if (s->alternate_scan) {
  382. ff_init_scantable(s->idsp.idct_permutation, &s->inter_scantable, ff_alternate_vertical_scan);
  383. ff_init_scantable(s->idsp.idct_permutation, &s->intra_scantable, ff_alternate_vertical_scan);
  384. } else {
  385. ff_init_scantable(s->idsp.idct_permutation, &s->inter_scantable, ff_zigzag_direct);
  386. ff_init_scantable(s->idsp.idct_permutation, &s->intra_scantable, ff_zigzag_direct);
  387. }
  388. ff_init_scantable(s->idsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan);
  389. ff_init_scantable(s->idsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
  390. }
  391. static int frame_size_alloc(MpegEncContext *s, int linesize)
  392. {
  393. int alloc_size = FFALIGN(FFABS(linesize) + 64, 32);
  394. if (s->avctx->hwaccel || s->avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU)
  395. return 0;
  396. if (linesize < 24) {
  397. av_log(s->avctx, AV_LOG_ERROR, "Image too small, temporary buffers cannot function\n");
  398. return AVERROR_PATCHWELCOME;
  399. }
  400. // edge emu needs blocksize + filter length - 1
  401. // (= 17x17 for halfpel / 21x21 for h264)
  402. // VC1 computes luma and chroma simultaneously and needs 19X19 + 9x9
  403. // at uvlinesize. It supports only YUV420 so 24x24 is enough
  404. // linesize * interlaced * MBsize
  405. // we also use this buffer for encoding in encode_mb_internal() needig an additional 32 lines
  406. FF_ALLOCZ_OR_GOTO(s->avctx, s->edge_emu_buffer, alloc_size * 4 * 68,
  407. fail);
  408. FF_ALLOCZ_OR_GOTO(s->avctx, s->me.scratchpad, alloc_size * 4 * 16 * 2,
  409. fail)
  410. s->me.temp = s->me.scratchpad;
  411. s->rd_scratchpad = s->me.scratchpad;
  412. s->b_scratchpad = s->me.scratchpad;
  413. s->obmc_scratchpad = s->me.scratchpad + 16;
  414. return 0;
  415. fail:
  416. av_freep(&s->edge_emu_buffer);
  417. return AVERROR(ENOMEM);
  418. }
  419. /**
  420. * Allocate a frame buffer
  421. */
  422. static int alloc_frame_buffer(MpegEncContext *s, Picture *pic)
  423. {
  424. int edges_needed = av_codec_is_encoder(s->avctx->codec);
  425. int r, ret;
  426. pic->tf.f = pic->f;
  427. if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
  428. s->codec_id != AV_CODEC_ID_VC1IMAGE &&
  429. s->codec_id != AV_CODEC_ID_MSS2) {
  430. if (edges_needed) {
  431. pic->f->width = s->avctx->width + 2 * EDGE_WIDTH;
  432. pic->f->height = s->avctx->height + 2 * EDGE_WIDTH;
  433. }
  434. r = ff_thread_get_buffer(s->avctx, &pic->tf,
  435. pic->reference ? AV_GET_BUFFER_FLAG_REF : 0);
  436. } else {
  437. pic->f->width = s->avctx->width;
  438. pic->f->height = s->avctx->height;
  439. pic->f->format = s->avctx->pix_fmt;
  440. r = avcodec_default_get_buffer2(s->avctx, pic->f, 0);
  441. }
  442. if (r < 0 || !pic->f->buf[0]) {
  443. av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %p)\n",
  444. r, pic->f->data[0]);
  445. return -1;
  446. }
  447. if (edges_needed) {
  448. int i;
  449. for (i = 0; pic->f->data[i]; i++) {
  450. int offset = (EDGE_WIDTH >> (i ? s->chroma_y_shift : 0)) *
  451. pic->f->linesize[i] +
  452. (EDGE_WIDTH >> (i ? s->chroma_x_shift : 0));
  453. pic->f->data[i] += offset;
  454. }
  455. pic->f->width = s->avctx->width;
  456. pic->f->height = s->avctx->height;
  457. }
  458. if (s->avctx->hwaccel) {
  459. assert(!pic->hwaccel_picture_private);
  460. if (s->avctx->hwaccel->frame_priv_data_size) {
  461. pic->hwaccel_priv_buf = av_buffer_allocz(s->avctx->hwaccel->frame_priv_data_size);
  462. if (!pic->hwaccel_priv_buf) {
  463. av_log(s->avctx, AV_LOG_ERROR, "alloc_frame_buffer() failed (hwaccel private data allocation)\n");
  464. return -1;
  465. }
  466. pic->hwaccel_picture_private = pic->hwaccel_priv_buf->data;
  467. }
  468. }
  469. if (s->linesize && (s->linesize != pic->f->linesize[0] ||
  470. s->uvlinesize != pic->f->linesize[1])) {
  471. av_log(s->avctx, AV_LOG_ERROR,
  472. "get_buffer() failed (stride changed)\n");
  473. ff_mpeg_unref_picture(s, pic);
  474. return -1;
  475. }
  476. if (pic->f->linesize[1] != pic->f->linesize[2]) {
  477. av_log(s->avctx, AV_LOG_ERROR,
  478. "get_buffer() failed (uv stride mismatch)\n");
  479. ff_mpeg_unref_picture(s, pic);
  480. return -1;
  481. }
  482. if (!s->edge_emu_buffer &&
  483. (ret = frame_size_alloc(s, pic->f->linesize[0])) < 0) {
  484. av_log(s->avctx, AV_LOG_ERROR,
  485. "get_buffer() failed to allocate context scratch buffers.\n");
  486. ff_mpeg_unref_picture(s, pic);
  487. return ret;
  488. }
  489. return 0;
  490. }
  491. void ff_free_picture_tables(Picture *pic)
  492. {
  493. int i;
  494. pic->alloc_mb_width =
  495. pic->alloc_mb_height = 0;
  496. av_buffer_unref(&pic->mb_var_buf);
  497. av_buffer_unref(&pic->mc_mb_var_buf);
  498. av_buffer_unref(&pic->mb_mean_buf);
  499. av_buffer_unref(&pic->mbskip_table_buf);
  500. av_buffer_unref(&pic->qscale_table_buf);
  501. av_buffer_unref(&pic->mb_type_buf);
  502. for (i = 0; i < 2; i++) {
  503. av_buffer_unref(&pic->motion_val_buf[i]);
  504. av_buffer_unref(&pic->ref_index_buf[i]);
  505. }
  506. }
  507. static int alloc_picture_tables(MpegEncContext *s, Picture *pic)
  508. {
  509. const int big_mb_num = s->mb_stride * (s->mb_height + 1) + 1;
  510. const int mb_array_size = s->mb_stride * s->mb_height;
  511. const int b8_array_size = s->b8_stride * s->mb_height * 2;
  512. int i;
  513. pic->mbskip_table_buf = av_buffer_allocz(mb_array_size + 2);
  514. pic->qscale_table_buf = av_buffer_allocz(big_mb_num + s->mb_stride);
  515. pic->mb_type_buf = av_buffer_allocz((big_mb_num + s->mb_stride) *
  516. sizeof(uint32_t));
  517. if (!pic->mbskip_table_buf || !pic->qscale_table_buf || !pic->mb_type_buf)
  518. return AVERROR(ENOMEM);
  519. if (s->encoding) {
  520. pic->mb_var_buf = av_buffer_allocz(mb_array_size * sizeof(int16_t));
  521. pic->mc_mb_var_buf = av_buffer_allocz(mb_array_size * sizeof(int16_t));
  522. pic->mb_mean_buf = av_buffer_allocz(mb_array_size);
  523. if (!pic->mb_var_buf || !pic->mc_mb_var_buf || !pic->mb_mean_buf)
  524. return AVERROR(ENOMEM);
  525. }
  526. if (s->out_format == FMT_H263 || s->encoding || s->avctx->debug_mv) {
  527. int mv_size = 2 * (b8_array_size + 4) * sizeof(int16_t);
  528. int ref_index_size = 4 * mb_array_size;
  529. for (i = 0; mv_size && i < 2; i++) {
  530. pic->motion_val_buf[i] = av_buffer_allocz(mv_size);
  531. pic->ref_index_buf[i] = av_buffer_allocz(ref_index_size);
  532. if (!pic->motion_val_buf[i] || !pic->ref_index_buf[i])
  533. return AVERROR(ENOMEM);
  534. }
  535. }
  536. pic->alloc_mb_width = s->mb_width;
  537. pic->alloc_mb_height = s->mb_height;
  538. return 0;
  539. }
  540. static int make_tables_writable(Picture *pic)
  541. {
  542. int ret, i;
  543. #define MAKE_WRITABLE(table) \
  544. do {\
  545. if (pic->table &&\
  546. (ret = av_buffer_make_writable(&pic->table)) < 0)\
  547. return ret;\
  548. } while (0)
  549. MAKE_WRITABLE(mb_var_buf);
  550. MAKE_WRITABLE(mc_mb_var_buf);
  551. MAKE_WRITABLE(mb_mean_buf);
  552. MAKE_WRITABLE(mbskip_table_buf);
  553. MAKE_WRITABLE(qscale_table_buf);
  554. MAKE_WRITABLE(mb_type_buf);
  555. for (i = 0; i < 2; i++) {
  556. MAKE_WRITABLE(motion_val_buf[i]);
  557. MAKE_WRITABLE(ref_index_buf[i]);
  558. }
  559. return 0;
  560. }
  561. /**
  562. * Allocate a Picture.
  563. * The pixels are allocated/set by calling get_buffer() if shared = 0
  564. */
  565. int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared)
  566. {
  567. int i, ret;
  568. if (pic->qscale_table_buf)
  569. if ( pic->alloc_mb_width != s->mb_width
  570. || pic->alloc_mb_height != s->mb_height)
  571. ff_free_picture_tables(pic);
  572. if (shared) {
  573. av_assert0(pic->f->data[0]);
  574. pic->shared = 1;
  575. } else {
  576. av_assert0(!pic->f->buf[0]);
  577. if (alloc_frame_buffer(s, pic) < 0)
  578. return -1;
  579. s->linesize = pic->f->linesize[0];
  580. s->uvlinesize = pic->f->linesize[1];
  581. }
  582. if (!pic->qscale_table_buf)
  583. ret = alloc_picture_tables(s, pic);
  584. else
  585. ret = make_tables_writable(pic);
  586. if (ret < 0)
  587. goto fail;
  588. if (s->encoding) {
  589. pic->mb_var = (uint16_t*)pic->mb_var_buf->data;
  590. pic->mc_mb_var = (uint16_t*)pic->mc_mb_var_buf->data;
  591. pic->mb_mean = pic->mb_mean_buf->data;
  592. }
  593. pic->mbskip_table = pic->mbskip_table_buf->data;
  594. pic->qscale_table = pic->qscale_table_buf->data + 2 * s->mb_stride + 1;
  595. pic->mb_type = (uint32_t*)pic->mb_type_buf->data + 2 * s->mb_stride + 1;
  596. if (pic->motion_val_buf[0]) {
  597. for (i = 0; i < 2; i++) {
  598. pic->motion_val[i] = (int16_t (*)[2])pic->motion_val_buf[i]->data + 4;
  599. pic->ref_index[i] = pic->ref_index_buf[i]->data;
  600. }
  601. }
  602. return 0;
  603. fail:
  604. av_log(s->avctx, AV_LOG_ERROR, "Error allocating a picture.\n");
  605. ff_mpeg_unref_picture(s, pic);
  606. ff_free_picture_tables(pic);
  607. return AVERROR(ENOMEM);
  608. }
  609. /**
  610. * Deallocate a picture.
  611. */
  612. void ff_mpeg_unref_picture(MpegEncContext *s, Picture *pic)
  613. {
  614. int off = offsetof(Picture, mb_mean) + sizeof(pic->mb_mean);
  615. pic->tf.f = pic->f;
  616. /* WM Image / Screen codecs allocate internal buffers with different
  617. * dimensions / colorspaces; ignore user-defined callbacks for these. */
  618. if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
  619. s->codec_id != AV_CODEC_ID_VC1IMAGE &&
  620. s->codec_id != AV_CODEC_ID_MSS2)
  621. ff_thread_release_buffer(s->avctx, &pic->tf);
  622. else if (pic->f)
  623. av_frame_unref(pic->f);
  624. av_buffer_unref(&pic->hwaccel_priv_buf);
  625. if (pic->needs_realloc)
  626. ff_free_picture_tables(pic);
  627. memset((uint8_t*)pic + off, 0, sizeof(*pic) - off);
  628. }
  629. static int update_picture_tables(Picture *dst, Picture *src)
  630. {
  631. int i;
  632. #define UPDATE_TABLE(table)\
  633. do {\
  634. if (src->table &&\
  635. (!dst->table || dst->table->buffer != src->table->buffer)) {\
  636. av_buffer_unref(&dst->table);\
  637. dst->table = av_buffer_ref(src->table);\
  638. if (!dst->table) {\
  639. ff_free_picture_tables(dst);\
  640. return AVERROR(ENOMEM);\
  641. }\
  642. }\
  643. } while (0)
  644. UPDATE_TABLE(mb_var_buf);
  645. UPDATE_TABLE(mc_mb_var_buf);
  646. UPDATE_TABLE(mb_mean_buf);
  647. UPDATE_TABLE(mbskip_table_buf);
  648. UPDATE_TABLE(qscale_table_buf);
  649. UPDATE_TABLE(mb_type_buf);
  650. for (i = 0; i < 2; i++) {
  651. UPDATE_TABLE(motion_val_buf[i]);
  652. UPDATE_TABLE(ref_index_buf[i]);
  653. }
  654. dst->mb_var = src->mb_var;
  655. dst->mc_mb_var = src->mc_mb_var;
  656. dst->mb_mean = src->mb_mean;
  657. dst->mbskip_table = src->mbskip_table;
  658. dst->qscale_table = src->qscale_table;
  659. dst->mb_type = src->mb_type;
  660. for (i = 0; i < 2; i++) {
  661. dst->motion_val[i] = src->motion_val[i];
  662. dst->ref_index[i] = src->ref_index[i];
  663. }
  664. dst->alloc_mb_width = src->alloc_mb_width;
  665. dst->alloc_mb_height = src->alloc_mb_height;
  666. return 0;
  667. }
  668. int ff_mpeg_ref_picture(MpegEncContext *s, Picture *dst, Picture *src)
  669. {
  670. int ret;
  671. av_assert0(!dst->f->buf[0]);
  672. av_assert0(src->f->buf[0]);
  673. src->tf.f = src->f;
  674. dst->tf.f = dst->f;
  675. ret = ff_thread_ref_frame(&dst->tf, &src->tf);
  676. if (ret < 0)
  677. goto fail;
  678. ret = update_picture_tables(dst, src);
  679. if (ret < 0)
  680. goto fail;
  681. if (src->hwaccel_picture_private) {
  682. dst->hwaccel_priv_buf = av_buffer_ref(src->hwaccel_priv_buf);
  683. if (!dst->hwaccel_priv_buf)
  684. goto fail;
  685. dst->hwaccel_picture_private = dst->hwaccel_priv_buf->data;
  686. }
  687. dst->field_picture = src->field_picture;
  688. dst->mb_var_sum = src->mb_var_sum;
  689. dst->mc_mb_var_sum = src->mc_mb_var_sum;
  690. dst->b_frame_score = src->b_frame_score;
  691. dst->needs_realloc = src->needs_realloc;
  692. dst->reference = src->reference;
  693. dst->shared = src->shared;
  694. return 0;
  695. fail:
  696. ff_mpeg_unref_picture(s, dst);
  697. return ret;
  698. }
  699. static void exchange_uv(MpegEncContext *s)
  700. {
  701. int16_t (*tmp)[64];
  702. tmp = s->pblocks[4];
  703. s->pblocks[4] = s->pblocks[5];
  704. s->pblocks[5] = tmp;
  705. }
  706. static int init_duplicate_context(MpegEncContext *s)
  707. {
  708. int y_size = s->b8_stride * (2 * s->mb_height + 1);
  709. int c_size = s->mb_stride * (s->mb_height + 1);
  710. int yc_size = y_size + 2 * c_size;
  711. int i;
  712. if (s->mb_height & 1)
  713. yc_size += 2*s->b8_stride + 2*s->mb_stride;
  714. s->edge_emu_buffer =
  715. s->me.scratchpad =
  716. s->me.temp =
  717. s->rd_scratchpad =
  718. s->b_scratchpad =
  719. s->obmc_scratchpad = NULL;
  720. if (s->encoding) {
  721. FF_ALLOCZ_OR_GOTO(s->avctx, s->me.map,
  722. ME_MAP_SIZE * sizeof(uint32_t), fail)
  723. FF_ALLOCZ_OR_GOTO(s->avctx, s->me.score_map,
  724. ME_MAP_SIZE * sizeof(uint32_t), fail)
  725. if (s->avctx->noise_reduction) {
  726. FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_error_sum,
  727. 2 * 64 * sizeof(int), fail)
  728. }
  729. }
  730. FF_ALLOCZ_OR_GOTO(s->avctx, s->blocks, 64 * 12 * 2 * sizeof(int16_t), fail)
  731. s->block = s->blocks[0];
  732. for (i = 0; i < 12; i++) {
  733. s->pblocks[i] = &s->block[i];
  734. }
  735. if (s->avctx->codec_tag == AV_RL32("VCR2"))
  736. exchange_uv(s);
  737. if (s->out_format == FMT_H263) {
  738. /* ac values */
  739. FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_val_base,
  740. yc_size * sizeof(int16_t) * 16, fail);
  741. s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
  742. s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
  743. s->ac_val[2] = s->ac_val[1] + c_size;
  744. }
  745. return 0;
  746. fail:
  747. return -1; // free() through ff_MPV_common_end()
  748. }
  749. static void free_duplicate_context(MpegEncContext *s)
  750. {
  751. if (s == NULL)
  752. return;
  753. av_freep(&s->edge_emu_buffer);
  754. av_freep(&s->me.scratchpad);
  755. s->me.temp =
  756. s->rd_scratchpad =
  757. s->b_scratchpad =
  758. s->obmc_scratchpad = NULL;
  759. av_freep(&s->dct_error_sum);
  760. av_freep(&s->me.map);
  761. av_freep(&s->me.score_map);
  762. av_freep(&s->blocks);
  763. av_freep(&s->ac_val_base);
  764. s->block = NULL;
  765. }
  766. static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src)
  767. {
  768. #define COPY(a) bak->a = src->a
  769. COPY(edge_emu_buffer);
  770. COPY(me.scratchpad);
  771. COPY(me.temp);
  772. COPY(rd_scratchpad);
  773. COPY(b_scratchpad);
  774. COPY(obmc_scratchpad);
  775. COPY(me.map);
  776. COPY(me.score_map);
  777. COPY(blocks);
  778. COPY(block);
  779. COPY(start_mb_y);
  780. COPY(end_mb_y);
  781. COPY(me.map_generation);
  782. COPY(pb);
  783. COPY(dct_error_sum);
  784. COPY(dct_count[0]);
  785. COPY(dct_count[1]);
  786. COPY(ac_val_base);
  787. COPY(ac_val[0]);
  788. COPY(ac_val[1]);
  789. COPY(ac_val[2]);
  790. #undef COPY
  791. }
  792. int ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src)
  793. {
  794. MpegEncContext bak;
  795. int i, ret;
  796. // FIXME copy only needed parts
  797. // START_TIMER
  798. backup_duplicate_context(&bak, dst);
  799. memcpy(dst, src, sizeof(MpegEncContext));
  800. backup_duplicate_context(dst, &bak);
  801. for (i = 0; i < 12; i++) {
  802. dst->pblocks[i] = &dst->block[i];
  803. }
  804. if (dst->avctx->codec_tag == AV_RL32("VCR2"))
  805. exchange_uv(dst);
  806. if (!dst->edge_emu_buffer &&
  807. (ret = frame_size_alloc(dst, dst->linesize)) < 0) {
  808. av_log(dst->avctx, AV_LOG_ERROR, "failed to allocate context "
  809. "scratch buffers.\n");
  810. return ret;
  811. }
  812. // STOP_TIMER("update_duplicate_context")
  813. // about 10k cycles / 0.01 sec for 1000frames on 1ghz with 2 threads
  814. return 0;
  815. }
  816. int ff_mpeg_update_thread_context(AVCodecContext *dst,
  817. const AVCodecContext *src)
  818. {
  819. int i, ret;
  820. MpegEncContext *s = dst->priv_data, *s1 = src->priv_data;
  821. if (dst == src)
  822. return 0;
  823. av_assert0(s != s1);
  824. // FIXME can parameters change on I-frames?
  825. // in that case dst may need a reinit
  826. if (!s->context_initialized) {
  827. memcpy(s, s1, sizeof(MpegEncContext));
  828. s->avctx = dst;
  829. s->bitstream_buffer = NULL;
  830. s->bitstream_buffer_size = s->allocated_bitstream_buffer_size = 0;
  831. if (s1->context_initialized){
  832. // s->picture_range_start += MAX_PICTURE_COUNT;
  833. // s->picture_range_end += MAX_PICTURE_COUNT;
  834. ff_mpv_idct_init(s);
  835. if((ret = ff_MPV_common_init(s)) < 0){
  836. memset(s, 0, sizeof(MpegEncContext));
  837. s->avctx = dst;
  838. return ret;
  839. }
  840. }
  841. }
  842. if (s->height != s1->height || s->width != s1->width || s->context_reinit) {
  843. s->context_reinit = 0;
  844. s->height = s1->height;
  845. s->width = s1->width;
  846. if ((ret = ff_MPV_common_frame_size_change(s)) < 0)
  847. return ret;
  848. }
  849. s->avctx->coded_height = s1->avctx->coded_height;
  850. s->avctx->coded_width = s1->avctx->coded_width;
  851. s->avctx->width = s1->avctx->width;
  852. s->avctx->height = s1->avctx->height;
  853. s->coded_picture_number = s1->coded_picture_number;
  854. s->picture_number = s1->picture_number;
  855. av_assert0(!s->picture || s->picture != s1->picture);
  856. if(s->picture)
  857. for (i = 0; i < MAX_PICTURE_COUNT; i++) {
  858. ff_mpeg_unref_picture(s, &s->picture[i]);
  859. if (s1->picture[i].f->buf[0] &&
  860. (ret = ff_mpeg_ref_picture(s, &s->picture[i], &s1->picture[i])) < 0)
  861. return ret;
  862. }
  863. #define UPDATE_PICTURE(pic)\
  864. do {\
  865. ff_mpeg_unref_picture(s, &s->pic);\
  866. if (s1->pic.f && s1->pic.f->buf[0])\
  867. ret = ff_mpeg_ref_picture(s, &s->pic, &s1->pic);\
  868. else\
  869. ret = update_picture_tables(&s->pic, &s1->pic);\
  870. if (ret < 0)\
  871. return ret;\
  872. } while (0)
  873. UPDATE_PICTURE(current_picture);
  874. UPDATE_PICTURE(last_picture);
  875. UPDATE_PICTURE(next_picture);
  876. s->last_picture_ptr = REBASE_PICTURE(s1->last_picture_ptr, s, s1);
  877. s->current_picture_ptr = REBASE_PICTURE(s1->current_picture_ptr, s, s1);
  878. s->next_picture_ptr = REBASE_PICTURE(s1->next_picture_ptr, s, s1);
  879. // Error/bug resilience
  880. s->next_p_frame_damaged = s1->next_p_frame_damaged;
  881. s->workaround_bugs = s1->workaround_bugs;
  882. s->padding_bug_score = s1->padding_bug_score;
  883. // MPEG4 timing info
  884. memcpy(&s->last_time_base, &s1->last_time_base,
  885. (char *) &s1->pb_field_time + sizeof(s1->pb_field_time) -
  886. (char *) &s1->last_time_base);
  887. // B-frame info
  888. s->max_b_frames = s1->max_b_frames;
  889. s->low_delay = s1->low_delay;
  890. s->droppable = s1->droppable;
  891. // DivX handling (doesn't work)
  892. s->divx_packed = s1->divx_packed;
  893. if (s1->bitstream_buffer) {
  894. if (s1->bitstream_buffer_size +
  895. FF_INPUT_BUFFER_PADDING_SIZE > s->allocated_bitstream_buffer_size)
  896. av_fast_malloc(&s->bitstream_buffer,
  897. &s->allocated_bitstream_buffer_size,
  898. s1->allocated_bitstream_buffer_size);
  899. s->bitstream_buffer_size = s1->bitstream_buffer_size;
  900. memcpy(s->bitstream_buffer, s1->bitstream_buffer,
  901. s1->bitstream_buffer_size);
  902. memset(s->bitstream_buffer + s->bitstream_buffer_size, 0,
  903. FF_INPUT_BUFFER_PADDING_SIZE);
  904. }
  905. // linesize dependend scratch buffer allocation
  906. if (!s->edge_emu_buffer)
  907. if (s1->linesize) {
  908. if (frame_size_alloc(s, s1->linesize) < 0) {
  909. av_log(s->avctx, AV_LOG_ERROR, "Failed to allocate context "
  910. "scratch buffers.\n");
  911. return AVERROR(ENOMEM);
  912. }
  913. } else {
  914. av_log(s->avctx, AV_LOG_ERROR, "Context scratch buffers could not "
  915. "be allocated due to unknown size.\n");
  916. }
  917. // MPEG2/interlacing info
  918. memcpy(&s->progressive_sequence, &s1->progressive_sequence,
  919. (char *) &s1->rtp_mode - (char *) &s1->progressive_sequence);
  920. if (!s1->first_field) {
  921. s->last_pict_type = s1->pict_type;
  922. if (s1->current_picture_ptr)
  923. s->last_lambda_for[s1->pict_type] = s1->current_picture_ptr->f->quality;
  924. }
  925. return 0;
  926. }
  927. /**
  928. * Set the given MpegEncContext to common defaults
  929. * (same for encoding and decoding).
  930. * The changed fields will not depend upon the
  931. * prior state of the MpegEncContext.
  932. */
  933. void ff_MPV_common_defaults(MpegEncContext *s)
  934. {
  935. s->y_dc_scale_table =
  936. s->c_dc_scale_table = ff_mpeg1_dc_scale_table;
  937. s->chroma_qscale_table = ff_default_chroma_qscale_table;
  938. s->progressive_frame = 1;
  939. s->progressive_sequence = 1;
  940. s->picture_structure = PICT_FRAME;
  941. s->coded_picture_number = 0;
  942. s->picture_number = 0;
  943. s->f_code = 1;
  944. s->b_code = 1;
  945. s->slice_context_count = 1;
  946. }
  947. /**
  948. * Set the given MpegEncContext to defaults for decoding.
  949. * the changed fields will not depend upon
  950. * the prior state of the MpegEncContext.
  951. */
  952. void ff_MPV_decode_defaults(MpegEncContext *s)
  953. {
  954. ff_MPV_common_defaults(s);
  955. }
  956. static int init_er(MpegEncContext *s)
  957. {
  958. ERContext *er = &s->er;
  959. int mb_array_size = s->mb_height * s->mb_stride;
  960. int i;
  961. er->avctx = s->avctx;
  962. er->mecc = &s->mecc;
  963. er->mb_index2xy = s->mb_index2xy;
  964. er->mb_num = s->mb_num;
  965. er->mb_width = s->mb_width;
  966. er->mb_height = s->mb_height;
  967. er->mb_stride = s->mb_stride;
  968. er->b8_stride = s->b8_stride;
  969. er->er_temp_buffer = av_malloc(s->mb_height * s->mb_stride);
  970. er->error_status_table = av_mallocz(mb_array_size);
  971. if (!er->er_temp_buffer || !er->error_status_table)
  972. goto fail;
  973. er->mbskip_table = s->mbskip_table;
  974. er->mbintra_table = s->mbintra_table;
  975. for (i = 0; i < FF_ARRAY_ELEMS(s->dc_val); i++)
  976. er->dc_val[i] = s->dc_val[i];
  977. er->decode_mb = mpeg_er_decode_mb;
  978. er->opaque = s;
  979. return 0;
  980. fail:
  981. av_freep(&er->er_temp_buffer);
  982. av_freep(&er->error_status_table);
  983. return AVERROR(ENOMEM);
  984. }
  985. /**
  986. * Initialize and allocates MpegEncContext fields dependent on the resolution.
  987. */
  988. static int init_context_frame(MpegEncContext *s)
  989. {
  990. int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y;
  991. s->mb_width = (s->width + 15) / 16;
  992. s->mb_stride = s->mb_width + 1;
  993. s->b8_stride = s->mb_width * 2 + 1;
  994. mb_array_size = s->mb_height * s->mb_stride;
  995. mv_table_size = (s->mb_height + 2) * s->mb_stride + 1;
  996. /* set default edge pos, will be overridden
  997. * in decode_header if needed */
  998. s->h_edge_pos = s->mb_width * 16;
  999. s->v_edge_pos = s->mb_height * 16;
  1000. s->mb_num = s->mb_width * s->mb_height;
  1001. s->block_wrap[0] =
  1002. s->block_wrap[1] =
  1003. s->block_wrap[2] =
  1004. s->block_wrap[3] = s->b8_stride;
  1005. s->block_wrap[4] =
  1006. s->block_wrap[5] = s->mb_stride;
  1007. y_size = s->b8_stride * (2 * s->mb_height + 1);
  1008. c_size = s->mb_stride * (s->mb_height + 1);
  1009. yc_size = y_size + 2 * c_size;
  1010. if (s->mb_height & 1)
  1011. yc_size += 2*s->b8_stride + 2*s->mb_stride;
  1012. FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_index2xy, (s->mb_num + 1) * sizeof(int), fail); // error ressilience code looks cleaner with this
  1013. for (y = 0; y < s->mb_height; y++)
  1014. for (x = 0; x < s->mb_width; x++)
  1015. s->mb_index2xy[x + y * s->mb_width] = x + y * s->mb_stride;
  1016. s->mb_index2xy[s->mb_height * s->mb_width] = (s->mb_height - 1) * s->mb_stride + s->mb_width; // FIXME really needed?
  1017. if (s->encoding) {
  1018. /* Allocate MV tables */
  1019. FF_ALLOCZ_OR_GOTO(s->avctx, s->p_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
  1020. FF_ALLOCZ_OR_GOTO(s->avctx, s->b_forw_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
  1021. FF_ALLOCZ_OR_GOTO(s->avctx, s->b_back_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
  1022. FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_forw_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
  1023. FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_back_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
  1024. FF_ALLOCZ_OR_GOTO(s->avctx, s->b_direct_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
  1025. s->p_mv_table = s->p_mv_table_base + s->mb_stride + 1;
  1026. s->b_forw_mv_table = s->b_forw_mv_table_base + s->mb_stride + 1;
  1027. s->b_back_mv_table = s->b_back_mv_table_base + s->mb_stride + 1;
  1028. s->b_bidir_forw_mv_table = s->b_bidir_forw_mv_table_base + s->mb_stride + 1;
  1029. s->b_bidir_back_mv_table = s->b_bidir_back_mv_table_base + s->mb_stride + 1;
  1030. s->b_direct_mv_table = s->b_direct_mv_table_base + s->mb_stride + 1;
  1031. /* Allocate MB type table */
  1032. FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_type, mb_array_size * sizeof(uint16_t), fail) // needed for encoding
  1033. FF_ALLOCZ_OR_GOTO(s->avctx, s->lambda_table, mb_array_size * sizeof(int), fail)
  1034. FF_ALLOC_OR_GOTO(s->avctx, s->cplx_tab,
  1035. mb_array_size * sizeof(float), fail);
  1036. FF_ALLOC_OR_GOTO(s->avctx, s->bits_tab,
  1037. mb_array_size * sizeof(float), fail);
  1038. }
  1039. if (s->codec_id == AV_CODEC_ID_MPEG4 ||
  1040. (s->flags & CODEC_FLAG_INTERLACED_ME)) {
  1041. /* interlaced direct mode decoding tables */
  1042. for (i = 0; i < 2; i++) {
  1043. int j, k;
  1044. for (j = 0; j < 2; j++) {
  1045. for (k = 0; k < 2; k++) {
  1046. FF_ALLOCZ_OR_GOTO(s->avctx,
  1047. s->b_field_mv_table_base[i][j][k],
  1048. mv_table_size * 2 * sizeof(int16_t),
  1049. fail);
  1050. s->b_field_mv_table[i][j][k] = s->b_field_mv_table_base[i][j][k] +
  1051. s->mb_stride + 1;
  1052. }
  1053. FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_select_table [i][j], mb_array_size * 2 * sizeof(uint8_t), fail)
  1054. FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_mv_table_base[i][j], mv_table_size * 2 * sizeof(int16_t), fail)
  1055. s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j] + s->mb_stride + 1;
  1056. }
  1057. FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_select_table[i], mb_array_size * 2 * sizeof(uint8_t), fail)
  1058. }
  1059. }
  1060. if (s->out_format == FMT_H263) {
  1061. /* cbp values */
  1062. FF_ALLOCZ_OR_GOTO(s->avctx, s->coded_block_base, y_size + (s->mb_height&1)*2*s->b8_stride, fail);
  1063. s->coded_block = s->coded_block_base + s->b8_stride + 1;
  1064. /* cbp, ac_pred, pred_dir */
  1065. FF_ALLOCZ_OR_GOTO(s->avctx, s->cbp_table , mb_array_size * sizeof(uint8_t), fail);
  1066. FF_ALLOCZ_OR_GOTO(s->avctx, s->pred_dir_table, mb_array_size * sizeof(uint8_t), fail);
  1067. }
  1068. if (s->h263_pred || s->h263_plus || !s->encoding) {
  1069. /* dc values */
  1070. // MN: we need these for error resilience of intra-frames
  1071. FF_ALLOCZ_OR_GOTO(s->avctx, s->dc_val_base, yc_size * sizeof(int16_t), fail);
  1072. s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
  1073. s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
  1074. s->dc_val[2] = s->dc_val[1] + c_size;
  1075. for (i = 0; i < yc_size; i++)
  1076. s->dc_val_base[i] = 1024;
  1077. }
  1078. /* which mb is a intra block */
  1079. FF_ALLOCZ_OR_GOTO(s->avctx, s->mbintra_table, mb_array_size, fail);
  1080. memset(s->mbintra_table, 1, mb_array_size);
  1081. /* init macroblock skip table */
  1082. FF_ALLOCZ_OR_GOTO(s->avctx, s->mbskip_table, mb_array_size + 2, fail);
  1083. // Note the + 1 is for a quicker mpeg4 slice_end detection
  1084. return init_er(s);
  1085. fail:
  1086. return AVERROR(ENOMEM);
  1087. }
  1088. /**
  1089. * init common structure for both encoder and decoder.
  1090. * this assumes that some variables like width/height are already set
  1091. */
  1092. av_cold int ff_MPV_common_init(MpegEncContext *s)
  1093. {
  1094. int i;
  1095. int nb_slices = (HAVE_THREADS &&
  1096. s->avctx->active_thread_type & FF_THREAD_SLICE) ?
  1097. s->avctx->thread_count : 1;
  1098. if (s->encoding && s->avctx->slices)
  1099. nb_slices = s->avctx->slices;
  1100. if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
  1101. s->mb_height = (s->height + 31) / 32 * 2;
  1102. else
  1103. s->mb_height = (s->height + 15) / 16;
  1104. if (s->avctx->pix_fmt == AV_PIX_FMT_NONE) {
  1105. av_log(s->avctx, AV_LOG_ERROR,
  1106. "decoding to AV_PIX_FMT_NONE is not supported.\n");
  1107. return -1;
  1108. }
  1109. if (nb_slices > MAX_THREADS || (nb_slices > s->mb_height && s->mb_height)) {
  1110. int max_slices;
  1111. if (s->mb_height)
  1112. max_slices = FFMIN(MAX_THREADS, s->mb_height);
  1113. else
  1114. max_slices = MAX_THREADS;
  1115. av_log(s->avctx, AV_LOG_WARNING, "too many threads/slices (%d),"
  1116. " reducing to %d\n", nb_slices, max_slices);
  1117. nb_slices = max_slices;
  1118. }
  1119. if ((s->width || s->height) &&
  1120. av_image_check_size(s->width, s->height, 0, s->avctx))
  1121. return -1;
  1122. dct_init(s);
  1123. s->flags = s->avctx->flags;
  1124. s->flags2 = s->avctx->flags2;
  1125. /* set chroma shifts */
  1126. avcodec_get_chroma_sub_sample(s->avctx->pix_fmt,
  1127. &s->chroma_x_shift,
  1128. &s->chroma_y_shift);
  1129. /* convert fourcc to upper case */
  1130. s->codec_tag = avpriv_toupper4(s->avctx->codec_tag);
  1131. s->stream_codec_tag = avpriv_toupper4(s->avctx->stream_codec_tag);
  1132. FF_ALLOCZ_OR_GOTO(s->avctx, s->picture,
  1133. MAX_PICTURE_COUNT * sizeof(Picture), fail);
  1134. for (i = 0; i < MAX_PICTURE_COUNT; i++) {
  1135. s->picture[i].f = av_frame_alloc();
  1136. if (!s->picture[i].f)
  1137. goto fail;
  1138. }
  1139. memset(&s->next_picture, 0, sizeof(s->next_picture));
  1140. memset(&s->last_picture, 0, sizeof(s->last_picture));
  1141. memset(&s->current_picture, 0, sizeof(s->current_picture));
  1142. memset(&s->new_picture, 0, sizeof(s->new_picture));
  1143. s->next_picture.f = av_frame_alloc();
  1144. if (!s->next_picture.f)
  1145. goto fail;
  1146. s->last_picture.f = av_frame_alloc();
  1147. if (!s->last_picture.f)
  1148. goto fail;
  1149. s->current_picture.f = av_frame_alloc();
  1150. if (!s->current_picture.f)
  1151. goto fail;
  1152. s->new_picture.f = av_frame_alloc();
  1153. if (!s->new_picture.f)
  1154. goto fail;
  1155. if (init_context_frame(s))
  1156. goto fail;
  1157. s->parse_context.state = -1;
  1158. s->context_initialized = 1;
  1159. s->thread_context[0] = s;
  1160. // if (s->width && s->height) {
  1161. if (nb_slices > 1) {
  1162. for (i = 1; i < nb_slices; i++) {
  1163. s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
  1164. memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
  1165. }
  1166. for (i = 0; i < nb_slices; i++) {
  1167. if (init_duplicate_context(s->thread_context[i]) < 0)
  1168. goto fail;
  1169. s->thread_context[i]->start_mb_y =
  1170. (s->mb_height * (i) + nb_slices / 2) / nb_slices;
  1171. s->thread_context[i]->end_mb_y =
  1172. (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
  1173. }
  1174. } else {
  1175. if (init_duplicate_context(s) < 0)
  1176. goto fail;
  1177. s->start_mb_y = 0;
  1178. s->end_mb_y = s->mb_height;
  1179. }
  1180. s->slice_context_count = nb_slices;
  1181. // }
  1182. return 0;
  1183. fail:
  1184. ff_MPV_common_end(s);
  1185. return -1;
  1186. }
  1187. /**
  1188. * Frees and resets MpegEncContext fields depending on the resolution.
  1189. * Is used during resolution changes to avoid a full reinitialization of the
  1190. * codec.
  1191. */
  1192. static int free_context_frame(MpegEncContext *s)
  1193. {
  1194. int i, j, k;
  1195. av_freep(&s->mb_type);
  1196. av_freep(&s->p_mv_table_base);
  1197. av_freep(&s->b_forw_mv_table_base);
  1198. av_freep(&s->b_back_mv_table_base);
  1199. av_freep(&s->b_bidir_forw_mv_table_base);
  1200. av_freep(&s->b_bidir_back_mv_table_base);
  1201. av_freep(&s->b_direct_mv_table_base);
  1202. s->p_mv_table = NULL;
  1203. s->b_forw_mv_table = NULL;
  1204. s->b_back_mv_table = NULL;
  1205. s->b_bidir_forw_mv_table = NULL;
  1206. s->b_bidir_back_mv_table = NULL;
  1207. s->b_direct_mv_table = NULL;
  1208. for (i = 0; i < 2; i++) {
  1209. for (j = 0; j < 2; j++) {
  1210. for (k = 0; k < 2; k++) {
  1211. av_freep(&s->b_field_mv_table_base[i][j][k]);
  1212. s->b_field_mv_table[i][j][k] = NULL;
  1213. }
  1214. av_freep(&s->b_field_select_table[i][j]);
  1215. av_freep(&s->p_field_mv_table_base[i][j]);
  1216. s->p_field_mv_table[i][j] = NULL;
  1217. }
  1218. av_freep(&s->p_field_select_table[i]);
  1219. }
  1220. av_freep(&s->dc_val_base);
  1221. av_freep(&s->coded_block_base);
  1222. av_freep(&s->mbintra_table);
  1223. av_freep(&s->cbp_table);
  1224. av_freep(&s->pred_dir_table);
  1225. av_freep(&s->mbskip_table);
  1226. av_freep(&s->er.error_status_table);
  1227. av_freep(&s->er.er_temp_buffer);
  1228. av_freep(&s->mb_index2xy);
  1229. av_freep(&s->lambda_table);
  1230. av_freep(&s->cplx_tab);
  1231. av_freep(&s->bits_tab);
  1232. s->linesize = s->uvlinesize = 0;
  1233. return 0;
  1234. }
  1235. int ff_MPV_common_frame_size_change(MpegEncContext *s)
  1236. {
  1237. int i, err = 0;
  1238. if (s->slice_context_count > 1) {
  1239. for (i = 0; i < s->slice_context_count; i++) {
  1240. free_duplicate_context(s->thread_context[i]);
  1241. }
  1242. for (i = 1; i < s->slice_context_count; i++) {
  1243. av_freep(&s->thread_context[i]);
  1244. }
  1245. } else
  1246. free_duplicate_context(s);
  1247. if ((err = free_context_frame(s)) < 0)
  1248. return err;
  1249. if (s->picture)
  1250. for (i = 0; i < MAX_PICTURE_COUNT; i++) {
  1251. s->picture[i].needs_realloc = 1;
  1252. }
  1253. s->last_picture_ptr =
  1254. s->next_picture_ptr =
  1255. s->current_picture_ptr = NULL;
  1256. // init
  1257. if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
  1258. s->mb_height = (s->height + 31) / 32 * 2;
  1259. else
  1260. s->mb_height = (s->height + 15) / 16;
  1261. if ((s->width || s->height) &&
  1262. av_image_check_size(s->width, s->height, 0, s->avctx))
  1263. return AVERROR_INVALIDDATA;
  1264. if ((err = init_context_frame(s)))
  1265. goto fail;
  1266. s->thread_context[0] = s;
  1267. if (s->width && s->height) {
  1268. int nb_slices = s->slice_context_count;
  1269. if (nb_slices > 1) {
  1270. for (i = 1; i < nb_slices; i++) {
  1271. s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
  1272. memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
  1273. }
  1274. for (i = 0; i < nb_slices; i++) {
  1275. if (init_duplicate_context(s->thread_context[i]) < 0)
  1276. goto fail;
  1277. s->thread_context[i]->start_mb_y =
  1278. (s->mb_height * (i) + nb_slices / 2) / nb_slices;
  1279. s->thread_context[i]->end_mb_y =
  1280. (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
  1281. }
  1282. } else {
  1283. err = init_duplicate_context(s);
  1284. if (err < 0)
  1285. goto fail;
  1286. s->start_mb_y = 0;
  1287. s->end_mb_y = s->mb_height;
  1288. }
  1289. s->slice_context_count = nb_slices;
  1290. }
  1291. return 0;
  1292. fail:
  1293. ff_MPV_common_end(s);
  1294. return err;
  1295. }
  1296. /* init common structure for both encoder and decoder */
  1297. void ff_MPV_common_end(MpegEncContext *s)
  1298. {
  1299. int i;
  1300. if (s->slice_context_count > 1) {
  1301. for (i = 0; i < s->slice_context_count; i++) {
  1302. free_duplicate_context(s->thread_context[i]);
  1303. }
  1304. for (i = 1; i < s->slice_context_count; i++) {
  1305. av_freep(&s->thread_context[i]);
  1306. }
  1307. s->slice_context_count = 1;
  1308. } else free_duplicate_context(s);
  1309. av_freep(&s->parse_context.buffer);
  1310. s->parse_context.buffer_size = 0;
  1311. av_freep(&s->bitstream_buffer);
  1312. s->allocated_bitstream_buffer_size = 0;
  1313. if (s->picture) {
  1314. for (i = 0; i < MAX_PICTURE_COUNT; i++) {
  1315. ff_free_picture_tables(&s->picture[i]);
  1316. ff_mpeg_unref_picture(s, &s->picture[i]);
  1317. av_frame_free(&s->picture[i].f);
  1318. }
  1319. }
  1320. av_freep(&s->picture);
  1321. ff_free_picture_tables(&s->last_picture);
  1322. ff_mpeg_unref_picture(s, &s->last_picture);
  1323. av_frame_free(&s->last_picture.f);
  1324. ff_free_picture_tables(&s->current_picture);
  1325. ff_mpeg_unref_picture(s, &s->current_picture);
  1326. av_frame_free(&s->current_picture.f);
  1327. ff_free_picture_tables(&s->next_picture);
  1328. ff_mpeg_unref_picture(s, &s->next_picture);
  1329. av_frame_free(&s->next_picture.f);
  1330. ff_free_picture_tables(&s->new_picture);
  1331. ff_mpeg_unref_picture(s, &s->new_picture);
  1332. av_frame_free(&s->new_picture.f);
  1333. free_context_frame(s);
  1334. s->context_initialized = 0;
  1335. s->last_picture_ptr =
  1336. s->next_picture_ptr =
  1337. s->current_picture_ptr = NULL;
  1338. s->linesize = s->uvlinesize = 0;
  1339. }
  1340. av_cold void ff_init_rl(RLTable *rl,
  1341. uint8_t static_store[2][2 * MAX_RUN + MAX_LEVEL + 3])
  1342. {
  1343. int8_t max_level[MAX_RUN + 1], max_run[MAX_LEVEL + 1];
  1344. uint8_t index_run[MAX_RUN + 1];
  1345. int last, run, level, start, end, i;
  1346. /* If table is static, we can quit if rl->max_level[0] is not NULL */
  1347. if (static_store && rl->max_level[0])
  1348. return;
  1349. /* compute max_level[], max_run[] and index_run[] */
  1350. for (last = 0; last < 2; last++) {
  1351. if (last == 0) {
  1352. start = 0;
  1353. end = rl->last;
  1354. } else {
  1355. start = rl->last;
  1356. end = rl->n;
  1357. }
  1358. memset(max_level, 0, MAX_RUN + 1);
  1359. memset(max_run, 0, MAX_LEVEL + 1);
  1360. memset(index_run, rl->n, MAX_RUN + 1);
  1361. for (i = start; i < end; i++) {
  1362. run = rl->table_run[i];
  1363. level = rl->table_level[i];
  1364. if (index_run[run] == rl->n)
  1365. index_run[run] = i;
  1366. if (level > max_level[run])
  1367. max_level[run] = level;
  1368. if (run > max_run[level])
  1369. max_run[level] = run;
  1370. }
  1371. if (static_store)
  1372. rl->max_level[last] = static_store[last];
  1373. else
  1374. rl->max_level[last] = av_malloc(MAX_RUN + 1);
  1375. memcpy(rl->max_level[last], max_level, MAX_RUN + 1);
  1376. if (static_store)
  1377. rl->max_run[last] = static_store[last] + MAX_RUN + 1;
  1378. else
  1379. rl->max_run[last] = av_malloc(MAX_LEVEL + 1);
  1380. memcpy(rl->max_run[last], max_run, MAX_LEVEL + 1);
  1381. if (static_store)
  1382. rl->index_run[last] = static_store[last] + MAX_RUN + MAX_LEVEL + 2;
  1383. else
  1384. rl->index_run[last] = av_malloc(MAX_RUN + 1);
  1385. memcpy(rl->index_run[last], index_run, MAX_RUN + 1);
  1386. }
  1387. }
  1388. av_cold void ff_init_vlc_rl(RLTable *rl)
  1389. {
  1390. int i, q;
  1391. for (q = 0; q < 32; q++) {
  1392. int qmul = q * 2;
  1393. int qadd = (q - 1) | 1;
  1394. if (q == 0) {
  1395. qmul = 1;
  1396. qadd = 0;
  1397. }
  1398. for (i = 0; i < rl->vlc.table_size; i++) {
  1399. int code = rl->vlc.table[i][0];
  1400. int len = rl->vlc.table[i][1];
  1401. int level, run;
  1402. if (len == 0) { // illegal code
  1403. run = 66;
  1404. level = MAX_LEVEL;
  1405. } else if (len < 0) { // more bits needed
  1406. run = 0;
  1407. level = code;
  1408. } else {
  1409. if (code == rl->n) { // esc
  1410. run = 66;
  1411. level = 0;
  1412. } else {
  1413. run = rl->table_run[code] + 1;
  1414. level = rl->table_level[code] * qmul + qadd;
  1415. if (code >= rl->last) run += 192;
  1416. }
  1417. }
  1418. rl->rl_vlc[q][i].len = len;
  1419. rl->rl_vlc[q][i].level = level;
  1420. rl->rl_vlc[q][i].run = run;
  1421. }
  1422. }
  1423. }
  1424. static void release_unused_pictures(MpegEncContext *s)
  1425. {
  1426. int i;
  1427. /* release non reference frames */
  1428. for (i = 0; i < MAX_PICTURE_COUNT; i++) {
  1429. if (!s->picture[i].reference)
  1430. ff_mpeg_unref_picture(s, &s->picture[i]);
  1431. }
  1432. }
  1433. static inline int pic_is_unused(MpegEncContext *s, Picture *pic)
  1434. {
  1435. if (pic == s->last_picture_ptr)
  1436. return 0;
  1437. if (pic->f->buf[0] == NULL)
  1438. return 1;
  1439. if (pic->needs_realloc && !(pic->reference & DELAYED_PIC_REF))
  1440. return 1;
  1441. return 0;
  1442. }
  1443. static int find_unused_picture(MpegEncContext *s, int shared)
  1444. {
  1445. int i;
  1446. if (shared) {
  1447. for (i = 0; i < MAX_PICTURE_COUNT; i++) {
  1448. if (s->picture[i].f->buf[0] == NULL && &s->picture[i] != s->last_picture_ptr)
  1449. return i;
  1450. }
  1451. } else {
  1452. for (i = 0; i < MAX_PICTURE_COUNT; i++) {
  1453. if (pic_is_unused(s, &s->picture[i]))
  1454. return i;
  1455. }
  1456. }
  1457. av_log(s->avctx, AV_LOG_FATAL,
  1458. "Internal error, picture buffer overflow\n");
  1459. /* We could return -1, but the codec would crash trying to draw into a
  1460. * non-existing frame anyway. This is safer than waiting for a random crash.
  1461. * Also the return of this is never useful, an encoder must only allocate
  1462. * as much as allowed in the specification. This has no relationship to how
  1463. * much libavcodec could allocate (and MAX_PICTURE_COUNT is always large
  1464. * enough for such valid streams).
  1465. * Plus, a decoder has to check stream validity and remove frames if too
  1466. * many reference frames are around. Waiting for "OOM" is not correct at
  1467. * all. Similarly, missing reference frames have to be replaced by
  1468. * interpolated/MC frames, anything else is a bug in the codec ...
  1469. */
  1470. abort();
  1471. return -1;
  1472. }
  1473. int ff_find_unused_picture(MpegEncContext *s, int shared)
  1474. {
  1475. int ret = find_unused_picture(s, shared);
  1476. if (ret >= 0 && ret < MAX_PICTURE_COUNT) {
  1477. if (s->picture[ret].needs_realloc) {
  1478. s->picture[ret].needs_realloc = 0;
  1479. ff_free_picture_tables(&s->picture[ret]);
  1480. ff_mpeg_unref_picture(s, &s->picture[ret]);
  1481. }
  1482. }
  1483. return ret;
  1484. }
  1485. static void gray_frame(AVFrame *frame)
  1486. {
  1487. int i, h_chroma_shift, v_chroma_shift;
  1488. av_pix_fmt_get_chroma_sub_sample(frame->format, &h_chroma_shift, &v_chroma_shift);
  1489. for(i=0; i<frame->height; i++)
  1490. memset(frame->data[0] + frame->linesize[0]*i, 0x80, frame->width);
  1491. for(i=0; i<FF_CEIL_RSHIFT(frame->height, v_chroma_shift); i++) {
  1492. memset(frame->data[1] + frame->linesize[1]*i,
  1493. 0x80, FF_CEIL_RSHIFT(frame->width, h_chroma_shift));
  1494. memset(frame->data[2] + frame->linesize[2]*i,
  1495. 0x80, FF_CEIL_RSHIFT(frame->width, h_chroma_shift));
  1496. }
  1497. }
  1498. /**
  1499. * generic function called after decoding
  1500. * the header and before a frame is decoded.
  1501. */
  1502. int ff_MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
  1503. {
  1504. int i, ret;
  1505. Picture *pic;
  1506. s->mb_skipped = 0;
  1507. if (!ff_thread_can_start_frame(avctx)) {
  1508. av_log(avctx, AV_LOG_ERROR, "Attempt to start a frame outside SETUP state\n");
  1509. return -1;
  1510. }
  1511. /* mark & release old frames */
  1512. if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
  1513. s->last_picture_ptr != s->next_picture_ptr &&
  1514. s->last_picture_ptr->f->buf[0]) {
  1515. ff_mpeg_unref_picture(s, s->last_picture_ptr);
  1516. }
  1517. /* release forgotten pictures */
  1518. /* if (mpeg124/h263) */
  1519. for (i = 0; i < MAX_PICTURE_COUNT; i++) {
  1520. if (&s->picture[i] != s->last_picture_ptr &&
  1521. &s->picture[i] != s->next_picture_ptr &&
  1522. s->picture[i].reference && !s->picture[i].needs_realloc) {
  1523. if (!(avctx->active_thread_type & FF_THREAD_FRAME))
  1524. av_log(avctx, AV_LOG_ERROR,
  1525. "releasing zombie picture\n");
  1526. ff_mpeg_unref_picture(s, &s->picture[i]);
  1527. }
  1528. }
  1529. ff_mpeg_unref_picture(s, &s->current_picture);
  1530. release_unused_pictures(s);
  1531. if (s->current_picture_ptr &&
  1532. s->current_picture_ptr->f->buf[0] == NULL) {
  1533. // we already have a unused image
  1534. // (maybe it was set before reading the header)
  1535. pic = s->current_picture_ptr;
  1536. } else {
  1537. i = ff_find_unused_picture(s, 0);
  1538. if (i < 0) {
  1539. av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
  1540. return i;
  1541. }
  1542. pic = &s->picture[i];
  1543. }
  1544. pic->reference = 0;
  1545. if (!s->droppable) {
  1546. if (s->pict_type != AV_PICTURE_TYPE_B)
  1547. pic->reference = 3;
  1548. }
  1549. pic->f->coded_picture_number = s->coded_picture_number++;
  1550. if (ff_alloc_picture(s, pic, 0) < 0)
  1551. return -1;
  1552. s->current_picture_ptr = pic;
  1553. // FIXME use only the vars from current_pic
  1554. s->current_picture_ptr->f->top_field_first = s->top_field_first;
  1555. if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
  1556. s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
  1557. if (s->picture_structure != PICT_FRAME)
  1558. s->current_picture_ptr->f->top_field_first =
  1559. (s->picture_structure == PICT_TOP_FIELD) == s->first_field;
  1560. }
  1561. s->current_picture_ptr->f->interlaced_frame = !s->progressive_frame &&
  1562. !s->progressive_sequence;
  1563. s->current_picture_ptr->field_picture = s->picture_structure != PICT_FRAME;
  1564. s->current_picture_ptr->f->pict_type = s->pict_type;
  1565. // if (s->flags && CODEC_FLAG_QSCALE)
  1566. // s->current_picture_ptr->quality = s->new_picture_ptr->quality;
  1567. s->current_picture_ptr->f->key_frame = s->pict_type == AV_PICTURE_TYPE_I;
  1568. if ((ret = ff_mpeg_ref_picture(s, &s->current_picture,
  1569. s->current_picture_ptr)) < 0)
  1570. return ret;
  1571. if (s->pict_type != AV_PICTURE_TYPE_B) {
  1572. s->last_picture_ptr = s->next_picture_ptr;
  1573. if (!s->droppable)
  1574. s->next_picture_ptr = s->current_picture_ptr;
  1575. }
  1576. av_dlog(s->avctx, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n",
  1577. s->last_picture_ptr, s->next_picture_ptr,s->current_picture_ptr,
  1578. s->last_picture_ptr ? s->last_picture_ptr->f->data[0] : NULL,
  1579. s->next_picture_ptr ? s->next_picture_ptr->f->data[0] : NULL,
  1580. s->current_picture_ptr ? s->current_picture_ptr->f->data[0] : NULL,
  1581. s->pict_type, s->droppable);
  1582. if ((s->last_picture_ptr == NULL ||
  1583. s->last_picture_ptr->f->buf[0] == NULL) &&
  1584. (s->pict_type != AV_PICTURE_TYPE_I ||
  1585. s->picture_structure != PICT_FRAME)) {
  1586. int h_chroma_shift, v_chroma_shift;
  1587. av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
  1588. &h_chroma_shift, &v_chroma_shift);
  1589. if (s->pict_type == AV_PICTURE_TYPE_B && s->next_picture_ptr && s->next_picture_ptr->f->buf[0])
  1590. av_log(avctx, AV_LOG_DEBUG,
  1591. "allocating dummy last picture for B frame\n");
  1592. else if (s->pict_type != AV_PICTURE_TYPE_I)
  1593. av_log(avctx, AV_LOG_ERROR,
  1594. "warning: first frame is no keyframe\n");
  1595. else if (s->picture_structure != PICT_FRAME)
  1596. av_log(avctx, AV_LOG_DEBUG,
  1597. "allocate dummy last picture for field based first keyframe\n");
  1598. /* Allocate a dummy frame */
  1599. i = ff_find_unused_picture(s, 0);
  1600. if (i < 0) {
  1601. av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
  1602. return i;
  1603. }
  1604. s->last_picture_ptr = &s->picture[i];
  1605. s->last_picture_ptr->reference = 3;
  1606. s->last_picture_ptr->f->key_frame = 0;
  1607. s->last_picture_ptr->f->pict_type = AV_PICTURE_TYPE_P;
  1608. if (ff_alloc_picture(s, s->last_picture_ptr, 0) < 0) {
  1609. s->last_picture_ptr = NULL;
  1610. return -1;
  1611. }
  1612. if (!avctx->hwaccel && !(avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)) {
  1613. for(i=0; i<avctx->height; i++)
  1614. memset(s->last_picture_ptr->f->data[0] + s->last_picture_ptr->f->linesize[0]*i,
  1615. 0x80, avctx->width);
  1616. for(i=0; i<FF_CEIL_RSHIFT(avctx->height, v_chroma_shift); i++) {
  1617. memset(s->last_picture_ptr->f->data[1] + s->last_picture_ptr->f->linesize[1]*i,
  1618. 0x80, FF_CEIL_RSHIFT(avctx->width, h_chroma_shift));
  1619. memset(s->last_picture_ptr->f->data[2] + s->last_picture_ptr->f->linesize[2]*i,
  1620. 0x80, FF_CEIL_RSHIFT(avctx->width, h_chroma_shift));
  1621. }
  1622. if(s->codec_id == AV_CODEC_ID_FLV1 || s->codec_id == AV_CODEC_ID_H263){
  1623. for(i=0; i<avctx->height; i++)
  1624. memset(s->last_picture_ptr->f->data[0] + s->last_picture_ptr->f->linesize[0]*i, 16, avctx->width);
  1625. }
  1626. }
  1627. ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 0);
  1628. ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 1);
  1629. }
  1630. if ((s->next_picture_ptr == NULL ||
  1631. s->next_picture_ptr->f->buf[0] == NULL) &&
  1632. s->pict_type == AV_PICTURE_TYPE_B) {
  1633. /* Allocate a dummy frame */
  1634. i = ff_find_unused_picture(s, 0);
  1635. if (i < 0) {
  1636. av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
  1637. return i;
  1638. }
  1639. s->next_picture_ptr = &s->picture[i];
  1640. s->next_picture_ptr->reference = 3;
  1641. s->next_picture_ptr->f->key_frame = 0;
  1642. s->next_picture_ptr->f->pict_type = AV_PICTURE_TYPE_P;
  1643. if (ff_alloc_picture(s, s->next_picture_ptr, 0) < 0) {
  1644. s->next_picture_ptr = NULL;
  1645. return -1;
  1646. }
  1647. ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 0);
  1648. ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 1);
  1649. }
  1650. #if 0 // BUFREF-FIXME
  1651. memset(s->last_picture.f->data, 0, sizeof(s->last_picture.f->data));
  1652. memset(s->next_picture.f->data, 0, sizeof(s->next_picture.f->data));
  1653. #endif
  1654. if (s->last_picture_ptr) {
  1655. ff_mpeg_unref_picture(s, &s->last_picture);
  1656. if (s->last_picture_ptr->f->buf[0] &&
  1657. (ret = ff_mpeg_ref_picture(s, &s->last_picture,
  1658. s->last_picture_ptr)) < 0)
  1659. return ret;
  1660. }
  1661. if (s->next_picture_ptr) {
  1662. ff_mpeg_unref_picture(s, &s->next_picture);
  1663. if (s->next_picture_ptr->f->buf[0] &&
  1664. (ret = ff_mpeg_ref_picture(s, &s->next_picture,
  1665. s->next_picture_ptr)) < 0)
  1666. return ret;
  1667. }
  1668. av_assert0(s->pict_type == AV_PICTURE_TYPE_I || (s->last_picture_ptr &&
  1669. s->last_picture_ptr->f->buf[0]));
  1670. if (s->picture_structure!= PICT_FRAME) {
  1671. int i;
  1672. for (i = 0; i < 4; i++) {
  1673. if (s->picture_structure == PICT_BOTTOM_FIELD) {
  1674. s->current_picture.f->data[i] +=
  1675. s->current_picture.f->linesize[i];
  1676. }
  1677. s->current_picture.f->linesize[i] *= 2;
  1678. s->last_picture.f->linesize[i] *= 2;
  1679. s->next_picture.f->linesize[i] *= 2;
  1680. }
  1681. }
  1682. s->err_recognition = avctx->err_recognition;
  1683. /* set dequantizer, we can't do it during init as
  1684. * it might change for mpeg4 and we can't do it in the header
  1685. * decode as init is not called for mpeg4 there yet */
  1686. if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
  1687. s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
  1688. s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
  1689. } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
  1690. s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
  1691. s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
  1692. } else {
  1693. s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
  1694. s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
  1695. }
  1696. if (s->avctx->debug & FF_DEBUG_NOMC) {
  1697. gray_frame(s->current_picture_ptr->f);
  1698. }
  1699. return 0;
  1700. }
  1701. /* called after a frame has been decoded. */
  1702. void ff_MPV_frame_end(MpegEncContext *s)
  1703. {
  1704. emms_c();
  1705. if (s->current_picture.reference)
  1706. ff_thread_report_progress(&s->current_picture_ptr->tf, INT_MAX, 0);
  1707. }
  1708. static int clip_line(int *sx, int *sy, int *ex, int *ey, int maxx)
  1709. {
  1710. if(*sx > *ex)
  1711. return clip_line(ex, ey, sx, sy, maxx);
  1712. if (*sx < 0) {
  1713. if (*ex < 0)
  1714. return 1;
  1715. *sy = *ey + (*sy - *ey) * (int64_t)*ex / (*ex - *sx);
  1716. *sx = 0;
  1717. }
  1718. if (*ex > maxx) {
  1719. if (*sx > maxx)
  1720. return 1;
  1721. *ey = *sy + (*ey - *sy) * (int64_t)(maxx - *sx) / (*ex - *sx);
  1722. *ex = maxx;
  1723. }
  1724. return 0;
  1725. }
  1726. /**
  1727. * Draw a line from (ex, ey) -> (sx, sy).
  1728. * @param w width of the image
  1729. * @param h height of the image
  1730. * @param stride stride/linesize of the image
  1731. * @param color color of the arrow
  1732. */
  1733. static void draw_line(uint8_t *buf, int sx, int sy, int ex, int ey,
  1734. int w, int h, int stride, int color)
  1735. {
  1736. int x, y, fr, f;
  1737. if (clip_line(&sx, &sy, &ex, &ey, w - 1))
  1738. return;
  1739. if (clip_line(&sy, &sx, &ey, &ex, h - 1))
  1740. return;
  1741. sx = av_clip(sx, 0, w - 1);
  1742. sy = av_clip(sy, 0, h - 1);
  1743. ex = av_clip(ex, 0, w - 1);
  1744. ey = av_clip(ey, 0, h - 1);
  1745. buf[sy * stride + sx] += color;
  1746. if (FFABS(ex - sx) > FFABS(ey - sy)) {
  1747. if (sx > ex) {
  1748. FFSWAP(int, sx, ex);
  1749. FFSWAP(int, sy, ey);
  1750. }
  1751. buf += sx + sy * stride;
  1752. ex -= sx;
  1753. f = ((ey - sy) << 16) / ex;
  1754. for (x = 0; x <= ex; x++) {
  1755. y = (x * f) >> 16;
  1756. fr = (x * f) & 0xFFFF;
  1757. buf[y * stride + x] += (color * (0x10000 - fr)) >> 16;
  1758. if(fr) buf[(y + 1) * stride + x] += (color * fr ) >> 16;
  1759. }
  1760. } else {
  1761. if (sy > ey) {
  1762. FFSWAP(int, sx, ex);
  1763. FFSWAP(int, sy, ey);
  1764. }
  1765. buf += sx + sy * stride;
  1766. ey -= sy;
  1767. if (ey)
  1768. f = ((ex - sx) << 16) / ey;
  1769. else
  1770. f = 0;
  1771. for(y= 0; y <= ey; y++){
  1772. x = (y*f) >> 16;
  1773. fr = (y*f) & 0xFFFF;
  1774. buf[y * stride + x] += (color * (0x10000 - fr)) >> 16;
  1775. if(fr) buf[y * stride + x + 1] += (color * fr ) >> 16;
  1776. }
  1777. }
  1778. }
  1779. /**
  1780. * Draw an arrow from (ex, ey) -> (sx, sy).
  1781. * @param w width of the image
  1782. * @param h height of the image
  1783. * @param stride stride/linesize of the image
  1784. * @param color color of the arrow
  1785. */
  1786. static void draw_arrow(uint8_t *buf, int sx, int sy, int ex,
  1787. int ey, int w, int h, int stride, int color, int tail, int direction)
  1788. {
  1789. int dx,dy;
  1790. if (direction) {
  1791. FFSWAP(int, sx, ex);
  1792. FFSWAP(int, sy, ey);
  1793. }
  1794. sx = av_clip(sx, -100, w + 100);
  1795. sy = av_clip(sy, -100, h + 100);
  1796. ex = av_clip(ex, -100, w + 100);
  1797. ey = av_clip(ey, -100, h + 100);
  1798. dx = ex - sx;
  1799. dy = ey - sy;
  1800. if (dx * dx + dy * dy > 3 * 3) {
  1801. int rx = dx + dy;
  1802. int ry = -dx + dy;
  1803. int length = ff_sqrt((rx * rx + ry * ry) << 8);
  1804. // FIXME subpixel accuracy
  1805. rx = ROUNDED_DIV(rx * 3 << 4, length);
  1806. ry = ROUNDED_DIV(ry * 3 << 4, length);
  1807. if (tail) {
  1808. rx = -rx;
  1809. ry = -ry;
  1810. }
  1811. draw_line(buf, sx, sy, sx + rx, sy + ry, w, h, stride, color);
  1812. draw_line(buf, sx, sy, sx - ry, sy + rx, w, h, stride, color);
  1813. }
  1814. draw_line(buf, sx, sy, ex, ey, w, h, stride, color);
  1815. }
  1816. /**
  1817. * Print debugging info for the given picture.
  1818. */
  1819. void ff_print_debug_info2(AVCodecContext *avctx, AVFrame *pict, uint8_t *mbskip_table,
  1820. uint32_t *mbtype_table, int8_t *qscale_table, int16_t (*motion_val[2])[2],
  1821. int *low_delay,
  1822. int mb_width, int mb_height, int mb_stride, int quarter_sample)
  1823. {
  1824. if (avctx->hwaccel || !mbtype_table
  1825. || (avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU))
  1826. return;
  1827. if (avctx->debug & (FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)) {
  1828. int x,y;
  1829. av_log(avctx, AV_LOG_DEBUG, "New frame, type: %c\n",
  1830. av_get_picture_type_char(pict->pict_type));
  1831. for (y = 0; y < mb_height; y++) {
  1832. for (x = 0; x < mb_width; x++) {
  1833. if (avctx->debug & FF_DEBUG_SKIP) {
  1834. int count = mbskip_table[x + y * mb_stride];
  1835. if (count > 9)
  1836. count = 9;
  1837. av_log(avctx, AV_LOG_DEBUG, "%1d", count);
  1838. }
  1839. if (avctx->debug & FF_DEBUG_QP) {
  1840. av_log(avctx, AV_LOG_DEBUG, "%2d",
  1841. qscale_table[x + y * mb_stride]);
  1842. }
  1843. if (avctx->debug & FF_DEBUG_MB_TYPE) {
  1844. int mb_type = mbtype_table[x + y * mb_stride];
  1845. // Type & MV direction
  1846. if (IS_PCM(mb_type))
  1847. av_log(avctx, AV_LOG_DEBUG, "P");
  1848. else if (IS_INTRA(mb_type) && IS_ACPRED(mb_type))
  1849. av_log(avctx, AV_LOG_DEBUG, "A");
  1850. else if (IS_INTRA4x4(mb_type))
  1851. av_log(avctx, AV_LOG_DEBUG, "i");
  1852. else if (IS_INTRA16x16(mb_type))
  1853. av_log(avctx, AV_LOG_DEBUG, "I");
  1854. else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type))
  1855. av_log(avctx, AV_LOG_DEBUG, "d");
  1856. else if (IS_DIRECT(mb_type))
  1857. av_log(avctx, AV_LOG_DEBUG, "D");
  1858. else if (IS_GMC(mb_type) && IS_SKIP(mb_type))
  1859. av_log(avctx, AV_LOG_DEBUG, "g");
  1860. else if (IS_GMC(mb_type))
  1861. av_log(avctx, AV_LOG_DEBUG, "G");
  1862. else if (IS_SKIP(mb_type))
  1863. av_log(avctx, AV_LOG_DEBUG, "S");
  1864. else if (!USES_LIST(mb_type, 1))
  1865. av_log(avctx, AV_LOG_DEBUG, ">");
  1866. else if (!USES_LIST(mb_type, 0))
  1867. av_log(avctx, AV_LOG_DEBUG, "<");
  1868. else {
  1869. av_assert2(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
  1870. av_log(avctx, AV_LOG_DEBUG, "X");
  1871. }
  1872. // segmentation
  1873. if (IS_8X8(mb_type))
  1874. av_log(avctx, AV_LOG_DEBUG, "+");
  1875. else if (IS_16X8(mb_type))
  1876. av_log(avctx, AV_LOG_DEBUG, "-");
  1877. else if (IS_8X16(mb_type))
  1878. av_log(avctx, AV_LOG_DEBUG, "|");
  1879. else if (IS_INTRA(mb_type) || IS_16X16(mb_type))
  1880. av_log(avctx, AV_LOG_DEBUG, " ");
  1881. else
  1882. av_log(avctx, AV_LOG_DEBUG, "?");
  1883. if (IS_INTERLACED(mb_type))
  1884. av_log(avctx, AV_LOG_DEBUG, "=");
  1885. else
  1886. av_log(avctx, AV_LOG_DEBUG, " ");
  1887. }
  1888. }
  1889. av_log(avctx, AV_LOG_DEBUG, "\n");
  1890. }
  1891. }
  1892. if ((avctx->debug & (FF_DEBUG_VIS_QP | FF_DEBUG_VIS_MB_TYPE)) ||
  1893. (avctx->debug_mv)) {
  1894. const int shift = 1 + quarter_sample;
  1895. int mb_y;
  1896. uint8_t *ptr;
  1897. int i;
  1898. int h_chroma_shift, v_chroma_shift, block_height;
  1899. const int width = avctx->width;
  1900. const int height = avctx->height;
  1901. const int mv_sample_log2 = avctx->codec_id == AV_CODEC_ID_H264 || avctx->codec_id == AV_CODEC_ID_SVQ3 ? 2 : 1;
  1902. const int mv_stride = (mb_width << mv_sample_log2) +
  1903. (avctx->codec->id == AV_CODEC_ID_H264 ? 0 : 1);
  1904. *low_delay = 0; // needed to see the vectors without trashing the buffers
  1905. avcodec_get_chroma_sub_sample(avctx->pix_fmt, &h_chroma_shift, &v_chroma_shift);
  1906. av_frame_make_writable(pict);
  1907. pict->opaque = NULL;
  1908. ptr = pict->data[0];
  1909. block_height = 16 >> v_chroma_shift;
  1910. for (mb_y = 0; mb_y < mb_height; mb_y++) {
  1911. int mb_x;
  1912. for (mb_x = 0; mb_x < mb_width; mb_x++) {
  1913. const int mb_index = mb_x + mb_y * mb_stride;
  1914. if ((avctx->debug_mv) && motion_val[0]) {
  1915. int type;
  1916. for (type = 0; type < 3; type++) {
  1917. int direction = 0;
  1918. switch (type) {
  1919. case 0:
  1920. if ((!(avctx->debug_mv & FF_DEBUG_VIS_MV_P_FOR)) ||
  1921. (pict->pict_type!= AV_PICTURE_TYPE_P))
  1922. continue;
  1923. direction = 0;
  1924. break;
  1925. case 1:
  1926. if ((!(avctx->debug_mv & FF_DEBUG_VIS_MV_B_FOR)) ||
  1927. (pict->pict_type!= AV_PICTURE_TYPE_B))
  1928. continue;
  1929. direction = 0;
  1930. break;
  1931. case 2:
  1932. if ((!(avctx->debug_mv & FF_DEBUG_VIS_MV_B_BACK)) ||
  1933. (pict->pict_type!= AV_PICTURE_TYPE_B))
  1934. continue;
  1935. direction = 1;
  1936. break;
  1937. }
  1938. if (!USES_LIST(mbtype_table[mb_index], direction))
  1939. continue;
  1940. if (IS_8X8(mbtype_table[mb_index])) {
  1941. int i;
  1942. for (i = 0; i < 4; i++) {
  1943. int sx = mb_x * 16 + 4 + 8 * (i & 1);
  1944. int sy = mb_y * 16 + 4 + 8 * (i >> 1);
  1945. int xy = (mb_x * 2 + (i & 1) +
  1946. (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
  1947. int mx = (motion_val[direction][xy][0] >> shift) + sx;
  1948. int my = (motion_val[direction][xy][1] >> shift) + sy;
  1949. draw_arrow(ptr, sx, sy, mx, my, width,
  1950. height, pict->linesize[0], 100, 0, direction);
  1951. }
  1952. } else if (IS_16X8(mbtype_table[mb_index])) {
  1953. int i;
  1954. for (i = 0; i < 2; i++) {
  1955. int sx = mb_x * 16 + 8;
  1956. int sy = mb_y * 16 + 4 + 8 * i;
  1957. int xy = (mb_x * 2 + (mb_y * 2 + i) * mv_stride) << (mv_sample_log2 - 1);
  1958. int mx = (motion_val[direction][xy][0] >> shift);
  1959. int my = (motion_val[direction][xy][1] >> shift);
  1960. if (IS_INTERLACED(mbtype_table[mb_index]))
  1961. my *= 2;
  1962. draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
  1963. height, pict->linesize[0], 100, 0, direction);
  1964. }
  1965. } else if (IS_8X16(mbtype_table[mb_index])) {
  1966. int i;
  1967. for (i = 0; i < 2; i++) {
  1968. int sx = mb_x * 16 + 4 + 8 * i;
  1969. int sy = mb_y * 16 + 8;
  1970. int xy = (mb_x * 2 + i + mb_y * 2 * mv_stride) << (mv_sample_log2 - 1);
  1971. int mx = motion_val[direction][xy][0] >> shift;
  1972. int my = motion_val[direction][xy][1] >> shift;
  1973. if (IS_INTERLACED(mbtype_table[mb_index]))
  1974. my *= 2;
  1975. draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
  1976. height, pict->linesize[0], 100, 0, direction);
  1977. }
  1978. } else {
  1979. int sx= mb_x * 16 + 8;
  1980. int sy= mb_y * 16 + 8;
  1981. int xy= (mb_x + mb_y * mv_stride) << mv_sample_log2;
  1982. int mx= (motion_val[direction][xy][0]>>shift) + sx;
  1983. int my= (motion_val[direction][xy][1]>>shift) + sy;
  1984. draw_arrow(ptr, sx, sy, mx, my, width, height, pict->linesize[0], 100, 0, direction);
  1985. }
  1986. }
  1987. }
  1988. if ((avctx->debug & FF_DEBUG_VIS_QP)) {
  1989. uint64_t c = (qscale_table[mb_index] * 128 / 31) *
  1990. 0x0101010101010101ULL;
  1991. int y;
  1992. for (y = 0; y < block_height; y++) {
  1993. *(uint64_t *)(pict->data[1] + 8 * mb_x +
  1994. (block_height * mb_y + y) *
  1995. pict->linesize[1]) = c;
  1996. *(uint64_t *)(pict->data[2] + 8 * mb_x +
  1997. (block_height * mb_y + y) *
  1998. pict->linesize[2]) = c;
  1999. }
  2000. }
  2001. if ((avctx->debug & FF_DEBUG_VIS_MB_TYPE) &&
  2002. motion_val[0]) {
  2003. int mb_type = mbtype_table[mb_index];
  2004. uint64_t u,v;
  2005. int y;
  2006. #define COLOR(theta, r) \
  2007. u = (int)(128 + r * cos(theta * 3.141592 / 180)); \
  2008. v = (int)(128 + r * sin(theta * 3.141592 / 180));
  2009. u = v = 128;
  2010. if (IS_PCM(mb_type)) {
  2011. COLOR(120, 48)
  2012. } else if ((IS_INTRA(mb_type) && IS_ACPRED(mb_type)) ||
  2013. IS_INTRA16x16(mb_type)) {
  2014. COLOR(30, 48)
  2015. } else if (IS_INTRA4x4(mb_type)) {
  2016. COLOR(90, 48)
  2017. } else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type)) {
  2018. // COLOR(120, 48)
  2019. } else if (IS_DIRECT(mb_type)) {
  2020. COLOR(150, 48)
  2021. } else if (IS_GMC(mb_type) && IS_SKIP(mb_type)) {
  2022. COLOR(170, 48)
  2023. } else if (IS_GMC(mb_type)) {
  2024. COLOR(190, 48)
  2025. } else if (IS_SKIP(mb_type)) {
  2026. // COLOR(180, 48)
  2027. } else if (!USES_LIST(mb_type, 1)) {
  2028. COLOR(240, 48)
  2029. } else if (!USES_LIST(mb_type, 0)) {
  2030. COLOR(0, 48)
  2031. } else {
  2032. av_assert2(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
  2033. COLOR(300,48)
  2034. }
  2035. u *= 0x0101010101010101ULL;
  2036. v *= 0x0101010101010101ULL;
  2037. for (y = 0; y < block_height; y++) {
  2038. *(uint64_t *)(pict->data[1] + 8 * mb_x +
  2039. (block_height * mb_y + y) * pict->linesize[1]) = u;
  2040. *(uint64_t *)(pict->data[2] + 8 * mb_x +
  2041. (block_height * mb_y + y) * pict->linesize[2]) = v;
  2042. }
  2043. // segmentation
  2044. if (IS_8X8(mb_type) || IS_16X8(mb_type)) {
  2045. *(uint64_t *)(pict->data[0] + 16 * mb_x + 0 +
  2046. (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
  2047. *(uint64_t *)(pict->data[0] + 16 * mb_x + 8 +
  2048. (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
  2049. }
  2050. if (IS_8X8(mb_type) || IS_8X16(mb_type)) {
  2051. for (y = 0; y < 16; y++)
  2052. pict->data[0][16 * mb_x + 8 + (16 * mb_y + y) *
  2053. pict->linesize[0]] ^= 0x80;
  2054. }
  2055. if (IS_8X8(mb_type) && mv_sample_log2 >= 2) {
  2056. int dm = 1 << (mv_sample_log2 - 2);
  2057. for (i = 0; i < 4; i++) {
  2058. int sx = mb_x * 16 + 8 * (i & 1);
  2059. int sy = mb_y * 16 + 8 * (i >> 1);
  2060. int xy = (mb_x * 2 + (i & 1) +
  2061. (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
  2062. // FIXME bidir
  2063. int32_t *mv = (int32_t *) &motion_val[0][xy];
  2064. if (mv[0] != mv[dm] ||
  2065. mv[dm * mv_stride] != mv[dm * (mv_stride + 1)])
  2066. for (y = 0; y < 8; y++)
  2067. pict->data[0][sx + 4 + (sy + y) * pict->linesize[0]] ^= 0x80;
  2068. if (mv[0] != mv[dm * mv_stride] || mv[dm] != mv[dm * (mv_stride + 1)])
  2069. *(uint64_t *)(pict->data[0] + sx + (sy + 4) *
  2070. pict->linesize[0]) ^= 0x8080808080808080ULL;
  2071. }
  2072. }
  2073. if (IS_INTERLACED(mb_type) &&
  2074. avctx->codec->id == AV_CODEC_ID_H264) {
  2075. // hmm
  2076. }
  2077. }
  2078. mbskip_table[mb_index] = 0;
  2079. }
  2080. }
  2081. }
  2082. }
  2083. void ff_print_debug_info(MpegEncContext *s, Picture *p, AVFrame *pict)
  2084. {
  2085. ff_print_debug_info2(s->avctx, pict, s->mbskip_table, p->mb_type,
  2086. p->qscale_table, p->motion_val, &s->low_delay,
  2087. s->mb_width, s->mb_height, s->mb_stride, s->quarter_sample);
  2088. }
  2089. int ff_mpv_export_qp_table(MpegEncContext *s, AVFrame *f, Picture *p, int qp_type)
  2090. {
  2091. AVBufferRef *ref = av_buffer_ref(p->qscale_table_buf);
  2092. int offset = 2*s->mb_stride + 1;
  2093. if(!ref)
  2094. return AVERROR(ENOMEM);
  2095. av_assert0(ref->size >= offset + s->mb_stride * ((f->height+15)/16));
  2096. ref->size -= offset;
  2097. ref->data += offset;
  2098. return av_frame_set_qp_table(f, ref, s->mb_stride, qp_type);
  2099. }
  2100. static inline int hpel_motion_lowres(MpegEncContext *s,
  2101. uint8_t *dest, uint8_t *src,
  2102. int field_based, int field_select,
  2103. int src_x, int src_y,
  2104. int width, int height, ptrdiff_t stride,
  2105. int h_edge_pos, int v_edge_pos,
  2106. int w, int h, h264_chroma_mc_func *pix_op,
  2107. int motion_x, int motion_y)
  2108. {
  2109. const int lowres = s->avctx->lowres;
  2110. const int op_index = FFMIN(lowres, 3);
  2111. const int s_mask = (2 << lowres) - 1;
  2112. int emu = 0;
  2113. int sx, sy;
  2114. if (s->quarter_sample) {
  2115. motion_x /= 2;
  2116. motion_y /= 2;
  2117. }
  2118. sx = motion_x & s_mask;
  2119. sy = motion_y & s_mask;
  2120. src_x += motion_x >> lowres + 1;
  2121. src_y += motion_y >> lowres + 1;
  2122. src += src_y * stride + src_x;
  2123. if ((unsigned)src_x > FFMAX( h_edge_pos - (!!sx) - w, 0) ||
  2124. (unsigned)src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
  2125. s->vdsp.emulated_edge_mc(s->edge_emu_buffer, src,
  2126. s->linesize, s->linesize,
  2127. w + 1, (h + 1) << field_based,
  2128. src_x, src_y << field_based,
  2129. h_edge_pos, v_edge_pos);
  2130. src = s->edge_emu_buffer;
  2131. emu = 1;
  2132. }
  2133. sx = (sx << 2) >> lowres;
  2134. sy = (sy << 2) >> lowres;
  2135. if (field_select)
  2136. src += s->linesize;
  2137. pix_op[op_index](dest, src, stride, h, sx, sy);
  2138. return emu;
  2139. }
  2140. /* apply one mpeg motion vector to the three components */
  2141. static av_always_inline void mpeg_motion_lowres(MpegEncContext *s,
  2142. uint8_t *dest_y,
  2143. uint8_t *dest_cb,
  2144. uint8_t *dest_cr,
  2145. int field_based,
  2146. int bottom_field,
  2147. int field_select,
  2148. uint8_t **ref_picture,
  2149. h264_chroma_mc_func *pix_op,
  2150. int motion_x, int motion_y,
  2151. int h, int mb_y)
  2152. {
  2153. uint8_t *ptr_y, *ptr_cb, *ptr_cr;
  2154. int mx, my, src_x, src_y, uvsrc_x, uvsrc_y, sx, sy, uvsx, uvsy;
  2155. ptrdiff_t uvlinesize, linesize;
  2156. const int lowres = s->avctx->lowres;
  2157. const int op_index = FFMIN(lowres-1+s->chroma_x_shift, 3);
  2158. const int block_s = 8>>lowres;
  2159. const int s_mask = (2 << lowres) - 1;
  2160. const int h_edge_pos = s->h_edge_pos >> lowres;
  2161. const int v_edge_pos = s->v_edge_pos >> lowres;
  2162. linesize = s->current_picture.f->linesize[0] << field_based;
  2163. uvlinesize = s->current_picture.f->linesize[1] << field_based;
  2164. // FIXME obviously not perfect but qpel will not work in lowres anyway
  2165. if (s->quarter_sample) {
  2166. motion_x /= 2;
  2167. motion_y /= 2;
  2168. }
  2169. if(field_based){
  2170. motion_y += (bottom_field - field_select)*((1 << lowres)-1);
  2171. }
  2172. sx = motion_x & s_mask;
  2173. sy = motion_y & s_mask;
  2174. src_x = s->mb_x * 2 * block_s + (motion_x >> lowres + 1);
  2175. src_y = (mb_y * 2 * block_s >> field_based) + (motion_y >> lowres + 1);
  2176. if (s->out_format == FMT_H263) {
  2177. uvsx = ((motion_x >> 1) & s_mask) | (sx & 1);
  2178. uvsy = ((motion_y >> 1) & s_mask) | (sy & 1);
  2179. uvsrc_x = src_x >> 1;
  2180. uvsrc_y = src_y >> 1;
  2181. } else if (s->out_format == FMT_H261) {
  2182. // even chroma mv's are full pel in H261
  2183. mx = motion_x / 4;
  2184. my = motion_y / 4;
  2185. uvsx = (2 * mx) & s_mask;
  2186. uvsy = (2 * my) & s_mask;
  2187. uvsrc_x = s->mb_x * block_s + (mx >> lowres);
  2188. uvsrc_y = mb_y * block_s + (my >> lowres);
  2189. } else {
  2190. if(s->chroma_y_shift){
  2191. mx = motion_x / 2;
  2192. my = motion_y / 2;
  2193. uvsx = mx & s_mask;
  2194. uvsy = my & s_mask;
  2195. uvsrc_x = s->mb_x * block_s + (mx >> lowres + 1);
  2196. uvsrc_y = (mb_y * block_s >> field_based) + (my >> lowres + 1);
  2197. } else {
  2198. if(s->chroma_x_shift){
  2199. //Chroma422
  2200. mx = motion_x / 2;
  2201. uvsx = mx & s_mask;
  2202. uvsy = motion_y & s_mask;
  2203. uvsrc_y = src_y;
  2204. uvsrc_x = s->mb_x*block_s + (mx >> (lowres+1));
  2205. } else {
  2206. //Chroma444
  2207. uvsx = motion_x & s_mask;
  2208. uvsy = motion_y & s_mask;
  2209. uvsrc_x = src_x;
  2210. uvsrc_y = src_y;
  2211. }
  2212. }
  2213. }
  2214. ptr_y = ref_picture[0] + src_y * linesize + src_x;
  2215. ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
  2216. ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
  2217. if ((unsigned) src_x > FFMAX( h_edge_pos - (!!sx) - 2 * block_s, 0) || uvsrc_y<0 ||
  2218. (unsigned) src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
  2219. s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr_y,
  2220. linesize >> field_based, linesize >> field_based,
  2221. 17, 17 + field_based,
  2222. src_x, src_y << field_based, h_edge_pos,
  2223. v_edge_pos);
  2224. ptr_y = s->edge_emu_buffer;
  2225. if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) {
  2226. uint8_t *ubuf = s->edge_emu_buffer + 18 * s->linesize;
  2227. uint8_t *vbuf =ubuf + 9 * s->uvlinesize;
  2228. s->vdsp.emulated_edge_mc(ubuf, ptr_cb,
  2229. uvlinesize >> field_based, uvlinesize >> field_based,
  2230. 9, 9 + field_based,
  2231. uvsrc_x, uvsrc_y << field_based,
  2232. h_edge_pos >> 1, v_edge_pos >> 1);
  2233. s->vdsp.emulated_edge_mc(vbuf, ptr_cr,
  2234. uvlinesize >> field_based,uvlinesize >> field_based,
  2235. 9, 9 + field_based,
  2236. uvsrc_x, uvsrc_y << field_based,
  2237. h_edge_pos >> 1, v_edge_pos >> 1);
  2238. ptr_cb = ubuf;
  2239. ptr_cr = vbuf;
  2240. }
  2241. }
  2242. // FIXME use this for field pix too instead of the obnoxious hack which changes picture.f->data
  2243. if (bottom_field) {
  2244. dest_y += s->linesize;
  2245. dest_cb += s->uvlinesize;
  2246. dest_cr += s->uvlinesize;
  2247. }
  2248. if (field_select) {
  2249. ptr_y += s->linesize;
  2250. ptr_cb += s->uvlinesize;
  2251. ptr_cr += s->uvlinesize;
  2252. }
  2253. sx = (sx << 2) >> lowres;
  2254. sy = (sy << 2) >> lowres;
  2255. pix_op[lowres - 1](dest_y, ptr_y, linesize, h, sx, sy);
  2256. if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) {
  2257. int hc = s->chroma_y_shift ? (h+1-bottom_field)>>1 : h;
  2258. uvsx = (uvsx << 2) >> lowres;
  2259. uvsy = (uvsy << 2) >> lowres;
  2260. if (hc) {
  2261. pix_op[op_index](dest_cb, ptr_cb, uvlinesize, hc, uvsx, uvsy);
  2262. pix_op[op_index](dest_cr, ptr_cr, uvlinesize, hc, uvsx, uvsy);
  2263. }
  2264. }
  2265. // FIXME h261 lowres loop filter
  2266. }
  2267. static inline void chroma_4mv_motion_lowres(MpegEncContext *s,
  2268. uint8_t *dest_cb, uint8_t *dest_cr,
  2269. uint8_t **ref_picture,
  2270. h264_chroma_mc_func * pix_op,
  2271. int mx, int my)
  2272. {
  2273. const int lowres = s->avctx->lowres;
  2274. const int op_index = FFMIN(lowres, 3);
  2275. const int block_s = 8 >> lowres;
  2276. const int s_mask = (2 << lowres) - 1;
  2277. const int h_edge_pos = s->h_edge_pos >> lowres + 1;
  2278. const int v_edge_pos = s->v_edge_pos >> lowres + 1;
  2279. int emu = 0, src_x, src_y, sx, sy;
  2280. ptrdiff_t offset;
  2281. uint8_t *ptr;
  2282. if (s->quarter_sample) {
  2283. mx /= 2;
  2284. my /= 2;
  2285. }
  2286. /* In case of 8X8, we construct a single chroma motion vector
  2287. with a special rounding */
  2288. mx = ff_h263_round_chroma(mx);
  2289. my = ff_h263_round_chroma(my);
  2290. sx = mx & s_mask;
  2291. sy = my & s_mask;
  2292. src_x = s->mb_x * block_s + (mx >> lowres + 1);
  2293. src_y = s->mb_y * block_s + (my >> lowres + 1);
  2294. offset = src_y * s->uvlinesize + src_x;
  2295. ptr = ref_picture[1] + offset;
  2296. if ((unsigned) src_x > FFMAX(h_edge_pos - (!!sx) - block_s, 0) ||
  2297. (unsigned) src_y > FFMAX(v_edge_pos - (!!sy) - block_s, 0)) {
  2298. s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr,
  2299. s->uvlinesize, s->uvlinesize,
  2300. 9, 9,
  2301. src_x, src_y, h_edge_pos, v_edge_pos);
  2302. ptr = s->edge_emu_buffer;
  2303. emu = 1;
  2304. }
  2305. sx = (sx << 2) >> lowres;
  2306. sy = (sy << 2) >> lowres;
  2307. pix_op[op_index](dest_cb, ptr, s->uvlinesize, block_s, sx, sy);
  2308. ptr = ref_picture[2] + offset;
  2309. if (emu) {
  2310. s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr,
  2311. s->uvlinesize, s->uvlinesize,
  2312. 9, 9,
  2313. src_x, src_y, h_edge_pos, v_edge_pos);
  2314. ptr = s->edge_emu_buffer;
  2315. }
  2316. pix_op[op_index](dest_cr, ptr, s->uvlinesize, block_s, sx, sy);
  2317. }
  2318. /**
  2319. * motion compensation of a single macroblock
  2320. * @param s context
  2321. * @param dest_y luma destination pointer
  2322. * @param dest_cb chroma cb/u destination pointer
  2323. * @param dest_cr chroma cr/v destination pointer
  2324. * @param dir direction (0->forward, 1->backward)
  2325. * @param ref_picture array[3] of pointers to the 3 planes of the reference picture
  2326. * @param pix_op halfpel motion compensation function (average or put normally)
  2327. * the motion vectors are taken from s->mv and the MV type from s->mv_type
  2328. */
  2329. static inline void MPV_motion_lowres(MpegEncContext *s,
  2330. uint8_t *dest_y, uint8_t *dest_cb,
  2331. uint8_t *dest_cr,
  2332. int dir, uint8_t **ref_picture,
  2333. h264_chroma_mc_func *pix_op)
  2334. {
  2335. int mx, my;
  2336. int mb_x, mb_y, i;
  2337. const int lowres = s->avctx->lowres;
  2338. const int block_s = 8 >>lowres;
  2339. mb_x = s->mb_x;
  2340. mb_y = s->mb_y;
  2341. switch (s->mv_type) {
  2342. case MV_TYPE_16X16:
  2343. mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
  2344. 0, 0, 0,
  2345. ref_picture, pix_op,
  2346. s->mv[dir][0][0], s->mv[dir][0][1],
  2347. 2 * block_s, mb_y);
  2348. break;
  2349. case MV_TYPE_8X8:
  2350. mx = 0;
  2351. my = 0;
  2352. for (i = 0; i < 4; i++) {
  2353. hpel_motion_lowres(s, dest_y + ((i & 1) + (i >> 1) *
  2354. s->linesize) * block_s,
  2355. ref_picture[0], 0, 0,
  2356. (2 * mb_x + (i & 1)) * block_s,
  2357. (2 * mb_y + (i >> 1)) * block_s,
  2358. s->width, s->height, s->linesize,
  2359. s->h_edge_pos >> lowres, s->v_edge_pos >> lowres,
  2360. block_s, block_s, pix_op,
  2361. s->mv[dir][i][0], s->mv[dir][i][1]);
  2362. mx += s->mv[dir][i][0];
  2363. my += s->mv[dir][i][1];
  2364. }
  2365. if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY))
  2366. chroma_4mv_motion_lowres(s, dest_cb, dest_cr, ref_picture,
  2367. pix_op, mx, my);
  2368. break;
  2369. case MV_TYPE_FIELD:
  2370. if (s->picture_structure == PICT_FRAME) {
  2371. /* top field */
  2372. mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
  2373. 1, 0, s->field_select[dir][0],
  2374. ref_picture, pix_op,
  2375. s->mv[dir][0][0], s->mv[dir][0][1],
  2376. block_s, mb_y);
  2377. /* bottom field */
  2378. mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
  2379. 1, 1, s->field_select[dir][1],
  2380. ref_picture, pix_op,
  2381. s->mv[dir][1][0], s->mv[dir][1][1],
  2382. block_s, mb_y);
  2383. } else {
  2384. if (s->picture_structure != s->field_select[dir][0] + 1 &&
  2385. s->pict_type != AV_PICTURE_TYPE_B && !s->first_field) {
  2386. ref_picture = s->current_picture_ptr->f->data;
  2387. }
  2388. mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
  2389. 0, 0, s->field_select[dir][0],
  2390. ref_picture, pix_op,
  2391. s->mv[dir][0][0],
  2392. s->mv[dir][0][1], 2 * block_s, mb_y >> 1);
  2393. }
  2394. break;
  2395. case MV_TYPE_16X8:
  2396. for (i = 0; i < 2; i++) {
  2397. uint8_t **ref2picture;
  2398. if (s->picture_structure == s->field_select[dir][i] + 1 ||
  2399. s->pict_type == AV_PICTURE_TYPE_B || s->first_field) {
  2400. ref2picture = ref_picture;
  2401. } else {
  2402. ref2picture = s->current_picture_ptr->f->data;
  2403. }
  2404. mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
  2405. 0, 0, s->field_select[dir][i],
  2406. ref2picture, pix_op,
  2407. s->mv[dir][i][0], s->mv[dir][i][1] +
  2408. 2 * block_s * i, block_s, mb_y >> 1);
  2409. dest_y += 2 * block_s * s->linesize;
  2410. dest_cb += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
  2411. dest_cr += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
  2412. }
  2413. break;
  2414. case MV_TYPE_DMV:
  2415. if (s->picture_structure == PICT_FRAME) {
  2416. for (i = 0; i < 2; i++) {
  2417. int j;
  2418. for (j = 0; j < 2; j++) {
  2419. mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
  2420. 1, j, j ^ i,
  2421. ref_picture, pix_op,
  2422. s->mv[dir][2 * i + j][0],
  2423. s->mv[dir][2 * i + j][1],
  2424. block_s, mb_y);
  2425. }
  2426. pix_op = s->h264chroma.avg_h264_chroma_pixels_tab;
  2427. }
  2428. } else {
  2429. for (i = 0; i < 2; i++) {
  2430. mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
  2431. 0, 0, s->picture_structure != i + 1,
  2432. ref_picture, pix_op,
  2433. s->mv[dir][2 * i][0],s->mv[dir][2 * i][1],
  2434. 2 * block_s, mb_y >> 1);
  2435. // after put we make avg of the same block
  2436. pix_op = s->h264chroma.avg_h264_chroma_pixels_tab;
  2437. // opposite parity is always in the same
  2438. // frame if this is second field
  2439. if (!s->first_field) {
  2440. ref_picture = s->current_picture_ptr->f->data;
  2441. }
  2442. }
  2443. }
  2444. break;
  2445. default:
  2446. av_assert2(0);
  2447. }
  2448. }
  2449. /**
  2450. * find the lowest MB row referenced in the MVs
  2451. */
  2452. int ff_MPV_lowest_referenced_row(MpegEncContext *s, int dir)
  2453. {
  2454. int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->quarter_sample;
  2455. int my, off, i, mvs;
  2456. if (s->picture_structure != PICT_FRAME || s->mcsel)
  2457. goto unhandled;
  2458. switch (s->mv_type) {
  2459. case MV_TYPE_16X16:
  2460. mvs = 1;
  2461. break;
  2462. case MV_TYPE_16X8:
  2463. mvs = 2;
  2464. break;
  2465. case MV_TYPE_8X8:
  2466. mvs = 4;
  2467. break;
  2468. default:
  2469. goto unhandled;
  2470. }
  2471. for (i = 0; i < mvs; i++) {
  2472. my = s->mv[dir][i][1]<<qpel_shift;
  2473. my_max = FFMAX(my_max, my);
  2474. my_min = FFMIN(my_min, my);
  2475. }
  2476. off = (FFMAX(-my_min, my_max) + 63) >> 6;
  2477. return FFMIN(FFMAX(s->mb_y + off, 0), s->mb_height-1);
  2478. unhandled:
  2479. return s->mb_height-1;
  2480. }
  2481. /* put block[] to dest[] */
  2482. static inline void put_dct(MpegEncContext *s,
  2483. int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
  2484. {
  2485. s->dct_unquantize_intra(s, block, i, qscale);
  2486. s->idsp.idct_put(dest, line_size, block);
  2487. }
  2488. /* add block[] to dest[] */
  2489. static inline void add_dct(MpegEncContext *s,
  2490. int16_t *block, int i, uint8_t *dest, int line_size)
  2491. {
  2492. if (s->block_last_index[i] >= 0) {
  2493. s->idsp.idct_add(dest, line_size, block);
  2494. }
  2495. }
  2496. static inline void add_dequant_dct(MpegEncContext *s,
  2497. int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
  2498. {
  2499. if (s->block_last_index[i] >= 0) {
  2500. s->dct_unquantize_inter(s, block, i, qscale);
  2501. s->idsp.idct_add(dest, line_size, block);
  2502. }
  2503. }
  2504. /**
  2505. * Clean dc, ac, coded_block for the current non-intra MB.
  2506. */
  2507. void ff_clean_intra_table_entries(MpegEncContext *s)
  2508. {
  2509. int wrap = s->b8_stride;
  2510. int xy = s->block_index[0];
  2511. s->dc_val[0][xy ] =
  2512. s->dc_val[0][xy + 1 ] =
  2513. s->dc_val[0][xy + wrap] =
  2514. s->dc_val[0][xy + 1 + wrap] = 1024;
  2515. /* ac pred */
  2516. memset(s->ac_val[0][xy ], 0, 32 * sizeof(int16_t));
  2517. memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
  2518. if (s->msmpeg4_version>=3) {
  2519. s->coded_block[xy ] =
  2520. s->coded_block[xy + 1 ] =
  2521. s->coded_block[xy + wrap] =
  2522. s->coded_block[xy + 1 + wrap] = 0;
  2523. }
  2524. /* chroma */
  2525. wrap = s->mb_stride;
  2526. xy = s->mb_x + s->mb_y * wrap;
  2527. s->dc_val[1][xy] =
  2528. s->dc_val[2][xy] = 1024;
  2529. /* ac pred */
  2530. memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
  2531. memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
  2532. s->mbintra_table[xy]= 0;
  2533. }
  2534. /* generic function called after a macroblock has been parsed by the
  2535. decoder or after it has been encoded by the encoder.
  2536. Important variables used:
  2537. s->mb_intra : true if intra macroblock
  2538. s->mv_dir : motion vector direction
  2539. s->mv_type : motion vector type
  2540. s->mv : motion vector
  2541. s->interlaced_dct : true if interlaced dct used (mpeg2)
  2542. */
  2543. static av_always_inline
  2544. void MPV_decode_mb_internal(MpegEncContext *s, int16_t block[12][64],
  2545. int lowres_flag, int is_mpeg12)
  2546. {
  2547. const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
  2548. if (CONFIG_XVMC &&
  2549. s->avctx->hwaccel && s->avctx->hwaccel->decode_mb) {
  2550. s->avctx->hwaccel->decode_mb(s);//xvmc uses pblocks
  2551. return;
  2552. }
  2553. if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
  2554. /* print DCT coefficients */
  2555. int i,j;
  2556. av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
  2557. for(i=0; i<6; i++){
  2558. for(j=0; j<64; j++){
  2559. av_log(s->avctx, AV_LOG_DEBUG, "%5d",
  2560. block[i][s->idsp.idct_permutation[j]]);
  2561. }
  2562. av_log(s->avctx, AV_LOG_DEBUG, "\n");
  2563. }
  2564. }
  2565. s->current_picture.qscale_table[mb_xy] = s->qscale;
  2566. /* update DC predictors for P macroblocks */
  2567. if (!s->mb_intra) {
  2568. if (!is_mpeg12 && (s->h263_pred || s->h263_aic)) {
  2569. if(s->mbintra_table[mb_xy])
  2570. ff_clean_intra_table_entries(s);
  2571. } else {
  2572. s->last_dc[0] =
  2573. s->last_dc[1] =
  2574. s->last_dc[2] = 128 << s->intra_dc_precision;
  2575. }
  2576. }
  2577. else if (!is_mpeg12 && (s->h263_pred || s->h263_aic))
  2578. s->mbintra_table[mb_xy]=1;
  2579. if ( (s->flags&CODEC_FLAG_PSNR)
  2580. || s->avctx->frame_skip_threshold || s->avctx->frame_skip_factor
  2581. || !(s->encoding && (s->intra_only || s->pict_type==AV_PICTURE_TYPE_B) && s->avctx->mb_decision != FF_MB_DECISION_RD)) { //FIXME precalc
  2582. uint8_t *dest_y, *dest_cb, *dest_cr;
  2583. int dct_linesize, dct_offset;
  2584. op_pixels_func (*op_pix)[4];
  2585. qpel_mc_func (*op_qpix)[16];
  2586. const int linesize = s->current_picture.f->linesize[0]; //not s->linesize as this would be wrong for field pics
  2587. const int uvlinesize = s->current_picture.f->linesize[1];
  2588. const int readable= s->pict_type != AV_PICTURE_TYPE_B || s->encoding || s->avctx->draw_horiz_band || lowres_flag;
  2589. const int block_size= lowres_flag ? 8>>s->avctx->lowres : 8;
  2590. /* avoid copy if macroblock skipped in last frame too */
  2591. /* skip only during decoding as we might trash the buffers during encoding a bit */
  2592. if(!s->encoding){
  2593. uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
  2594. if (s->mb_skipped) {
  2595. s->mb_skipped= 0;
  2596. av_assert2(s->pict_type!=AV_PICTURE_TYPE_I);
  2597. *mbskip_ptr = 1;
  2598. } else if(!s->current_picture.reference) {
  2599. *mbskip_ptr = 1;
  2600. } else{
  2601. *mbskip_ptr = 0; /* not skipped */
  2602. }
  2603. }
  2604. dct_linesize = linesize << s->interlaced_dct;
  2605. dct_offset = s->interlaced_dct ? linesize : linesize * block_size;
  2606. if(readable){
  2607. dest_y= s->dest[0];
  2608. dest_cb= s->dest[1];
  2609. dest_cr= s->dest[2];
  2610. }else{
  2611. dest_y = s->b_scratchpad;
  2612. dest_cb= s->b_scratchpad+16*linesize;
  2613. dest_cr= s->b_scratchpad+32*linesize;
  2614. }
  2615. if (!s->mb_intra) {
  2616. /* motion handling */
  2617. /* decoding or more than one mb_type (MC was already done otherwise) */
  2618. if(!s->encoding){
  2619. if(HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_FRAME) {
  2620. if (s->mv_dir & MV_DIR_FORWARD) {
  2621. ff_thread_await_progress(&s->last_picture_ptr->tf,
  2622. ff_MPV_lowest_referenced_row(s, 0),
  2623. 0);
  2624. }
  2625. if (s->mv_dir & MV_DIR_BACKWARD) {
  2626. ff_thread_await_progress(&s->next_picture_ptr->tf,
  2627. ff_MPV_lowest_referenced_row(s, 1),
  2628. 0);
  2629. }
  2630. }
  2631. if(lowres_flag){
  2632. h264_chroma_mc_func *op_pix = s->h264chroma.put_h264_chroma_pixels_tab;
  2633. if (s->mv_dir & MV_DIR_FORWARD) {
  2634. MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f->data, op_pix);
  2635. op_pix = s->h264chroma.avg_h264_chroma_pixels_tab;
  2636. }
  2637. if (s->mv_dir & MV_DIR_BACKWARD) {
  2638. MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f->data, op_pix);
  2639. }
  2640. }else{
  2641. op_qpix = s->me.qpel_put;
  2642. if ((!s->no_rounding) || s->pict_type==AV_PICTURE_TYPE_B){
  2643. op_pix = s->hdsp.put_pixels_tab;
  2644. }else{
  2645. op_pix = s->hdsp.put_no_rnd_pixels_tab;
  2646. }
  2647. if (s->mv_dir & MV_DIR_FORWARD) {
  2648. ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f->data, op_pix, op_qpix);
  2649. op_pix = s->hdsp.avg_pixels_tab;
  2650. op_qpix= s->me.qpel_avg;
  2651. }
  2652. if (s->mv_dir & MV_DIR_BACKWARD) {
  2653. ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f->data, op_pix, op_qpix);
  2654. }
  2655. }
  2656. }
  2657. /* skip dequant / idct if we are really late ;) */
  2658. if(s->avctx->skip_idct){
  2659. if( (s->avctx->skip_idct >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B)
  2660. ||(s->avctx->skip_idct >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I)
  2661. || s->avctx->skip_idct >= AVDISCARD_ALL)
  2662. goto skip_idct;
  2663. }
  2664. /* add dct residue */
  2665. if(s->encoding || !( s->msmpeg4_version || s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO
  2666. || (s->codec_id==AV_CODEC_ID_MPEG4 && !s->mpeg_quant))){
  2667. add_dequant_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
  2668. add_dequant_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
  2669. add_dequant_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
  2670. add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
  2671. if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
  2672. if (s->chroma_y_shift){
  2673. add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
  2674. add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
  2675. }else{
  2676. dct_linesize >>= 1;
  2677. dct_offset >>=1;
  2678. add_dequant_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
  2679. add_dequant_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
  2680. add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
  2681. add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
  2682. }
  2683. }
  2684. } else if(is_mpeg12 || (s->codec_id != AV_CODEC_ID_WMV2)){
  2685. add_dct(s, block[0], 0, dest_y , dct_linesize);
  2686. add_dct(s, block[1], 1, dest_y + block_size, dct_linesize);
  2687. add_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize);
  2688. add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
  2689. if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
  2690. if(s->chroma_y_shift){//Chroma420
  2691. add_dct(s, block[4], 4, dest_cb, uvlinesize);
  2692. add_dct(s, block[5], 5, dest_cr, uvlinesize);
  2693. }else{
  2694. //chroma422
  2695. dct_linesize = uvlinesize << s->interlaced_dct;
  2696. dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize*block_size;
  2697. add_dct(s, block[4], 4, dest_cb, dct_linesize);
  2698. add_dct(s, block[5], 5, dest_cr, dct_linesize);
  2699. add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
  2700. add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
  2701. if(!s->chroma_x_shift){//Chroma444
  2702. add_dct(s, block[8], 8, dest_cb+block_size, dct_linesize);
  2703. add_dct(s, block[9], 9, dest_cr+block_size, dct_linesize);
  2704. add_dct(s, block[10], 10, dest_cb+block_size+dct_offset, dct_linesize);
  2705. add_dct(s, block[11], 11, dest_cr+block_size+dct_offset, dct_linesize);
  2706. }
  2707. }
  2708. }//fi gray
  2709. }
  2710. else if (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) {
  2711. ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
  2712. }
  2713. } else {
  2714. /* dct only in intra block */
  2715. if(s->encoding || !(s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO)){
  2716. put_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
  2717. put_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
  2718. put_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
  2719. put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
  2720. if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
  2721. if(s->chroma_y_shift){
  2722. put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
  2723. put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
  2724. }else{
  2725. dct_offset >>=1;
  2726. dct_linesize >>=1;
  2727. put_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
  2728. put_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
  2729. put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
  2730. put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
  2731. }
  2732. }
  2733. }else{
  2734. s->idsp.idct_put(dest_y, dct_linesize, block[0]);
  2735. s->idsp.idct_put(dest_y + block_size, dct_linesize, block[1]);
  2736. s->idsp.idct_put(dest_y + dct_offset, dct_linesize, block[2]);
  2737. s->idsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
  2738. if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
  2739. if(s->chroma_y_shift){
  2740. s->idsp.idct_put(dest_cb, uvlinesize, block[4]);
  2741. s->idsp.idct_put(dest_cr, uvlinesize, block[5]);
  2742. }else{
  2743. dct_linesize = uvlinesize << s->interlaced_dct;
  2744. dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize*block_size;
  2745. s->idsp.idct_put(dest_cb, dct_linesize, block[4]);
  2746. s->idsp.idct_put(dest_cr, dct_linesize, block[5]);
  2747. s->idsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
  2748. s->idsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
  2749. if(!s->chroma_x_shift){//Chroma444
  2750. s->idsp.idct_put(dest_cb + block_size, dct_linesize, block[8]);
  2751. s->idsp.idct_put(dest_cr + block_size, dct_linesize, block[9]);
  2752. s->idsp.idct_put(dest_cb + block_size + dct_offset, dct_linesize, block[10]);
  2753. s->idsp.idct_put(dest_cr + block_size + dct_offset, dct_linesize, block[11]);
  2754. }
  2755. }
  2756. }//gray
  2757. }
  2758. }
  2759. skip_idct:
  2760. if(!readable){
  2761. s->hdsp.put_pixels_tab[0][0](s->dest[0], dest_y , linesize,16);
  2762. s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift);
  2763. s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift);
  2764. }
  2765. }
  2766. }
  2767. void ff_MPV_decode_mb(MpegEncContext *s, int16_t block[12][64]){
  2768. #if !CONFIG_SMALL
  2769. if(s->out_format == FMT_MPEG1) {
  2770. if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 1);
  2771. else MPV_decode_mb_internal(s, block, 0, 1);
  2772. } else
  2773. #endif
  2774. if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 0);
  2775. else MPV_decode_mb_internal(s, block, 0, 0);
  2776. }
  2777. void ff_mpeg_draw_horiz_band(MpegEncContext *s, int y, int h)
  2778. {
  2779. ff_draw_horiz_band(s->avctx, s->current_picture_ptr->f,
  2780. s->last_picture_ptr ? s->last_picture_ptr->f : NULL, y, h, s->picture_structure,
  2781. s->first_field, s->low_delay);
  2782. }
  2783. void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
  2784. const int linesize = s->current_picture.f->linesize[0]; //not s->linesize as this would be wrong for field pics
  2785. const int uvlinesize = s->current_picture.f->linesize[1];
  2786. const int mb_size= 4 - s->avctx->lowres;
  2787. s->block_index[0]= s->b8_stride*(s->mb_y*2 ) - 2 + s->mb_x*2;
  2788. s->block_index[1]= s->b8_stride*(s->mb_y*2 ) - 1 + s->mb_x*2;
  2789. s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2;
  2790. s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
  2791. s->block_index[4]= s->mb_stride*(s->mb_y + 1) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
  2792. s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
  2793. //block_index is not used by mpeg2, so it is not affected by chroma_format
  2794. s->dest[0] = s->current_picture.f->data[0] + ((s->mb_x - 1) << mb_size);
  2795. s->dest[1] = s->current_picture.f->data[1] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
  2796. s->dest[2] = s->current_picture.f->data[2] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
  2797. if(!(s->pict_type==AV_PICTURE_TYPE_B && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME))
  2798. {
  2799. if(s->picture_structure==PICT_FRAME){
  2800. s->dest[0] += s->mb_y * linesize << mb_size;
  2801. s->dest[1] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
  2802. s->dest[2] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
  2803. }else{
  2804. s->dest[0] += (s->mb_y>>1) * linesize << mb_size;
  2805. s->dest[1] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
  2806. s->dest[2] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
  2807. av_assert1((s->mb_y&1) == (s->picture_structure == PICT_BOTTOM_FIELD));
  2808. }
  2809. }
  2810. }
  2811. /**
  2812. * Permute an 8x8 block.
  2813. * @param block the block which will be permuted according to the given permutation vector
  2814. * @param permutation the permutation vector
  2815. * @param last the last non zero coefficient in scantable order, used to speed the permutation up
  2816. * @param scantable the used scantable, this is only used to speed the permutation up, the block is not
  2817. * (inverse) permutated to scantable order!
  2818. */
  2819. void ff_block_permute(int16_t *block, uint8_t *permutation, const uint8_t *scantable, int last)
  2820. {
  2821. int i;
  2822. int16_t temp[64];
  2823. if(last<=0) return;
  2824. //if(permutation[1]==1) return; //FIXME it is ok but not clean and might fail for some permutations
  2825. for(i=0; i<=last; i++){
  2826. const int j= scantable[i];
  2827. temp[j]= block[j];
  2828. block[j]=0;
  2829. }
  2830. for(i=0; i<=last; i++){
  2831. const int j= scantable[i];
  2832. const int perm_j= permutation[j];
  2833. block[perm_j]= temp[j];
  2834. }
  2835. }
  2836. void ff_mpeg_flush(AVCodecContext *avctx){
  2837. int i;
  2838. MpegEncContext *s = avctx->priv_data;
  2839. if(s==NULL || s->picture==NULL)
  2840. return;
  2841. for (i = 0; i < MAX_PICTURE_COUNT; i++)
  2842. ff_mpeg_unref_picture(s, &s->picture[i]);
  2843. s->current_picture_ptr = s->last_picture_ptr = s->next_picture_ptr = NULL;
  2844. ff_mpeg_unref_picture(s, &s->current_picture);
  2845. ff_mpeg_unref_picture(s, &s->last_picture);
  2846. ff_mpeg_unref_picture(s, &s->next_picture);
  2847. s->mb_x= s->mb_y= 0;
  2848. s->closed_gop= 0;
  2849. s->parse_context.state= -1;
  2850. s->parse_context.frame_start_found= 0;
  2851. s->parse_context.overread= 0;
  2852. s->parse_context.overread_index= 0;
  2853. s->parse_context.index= 0;
  2854. s->parse_context.last_index= 0;
  2855. s->bitstream_buffer_size=0;
  2856. s->pp_time=0;
  2857. }
  2858. /**
  2859. * set qscale and update qscale dependent variables.
  2860. */
  2861. void ff_set_qscale(MpegEncContext * s, int qscale)
  2862. {
  2863. if (qscale < 1)
  2864. qscale = 1;
  2865. else if (qscale > 31)
  2866. qscale = 31;
  2867. s->qscale = qscale;
  2868. s->chroma_qscale= s->chroma_qscale_table[qscale];
  2869. s->y_dc_scale= s->y_dc_scale_table[ qscale ];
  2870. s->c_dc_scale= s->c_dc_scale_table[ s->chroma_qscale ];
  2871. }
  2872. void ff_MPV_report_decode_progress(MpegEncContext *s)
  2873. {
  2874. if (s->pict_type != AV_PICTURE_TYPE_B && !s->partitioned_frame && !s->er.error_occurred)
  2875. ff_thread_report_progress(&s->current_picture_ptr->tf, s->mb_y, 0);
  2876. }