You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

473 lines
15KB

  1. /*
  2. * Mpeg video formats-related picture management functions
  3. *
  4. * This file is part of FFmpeg.
  5. *
  6. * FFmpeg is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2.1 of the License, or (at your option) any later version.
  10. *
  11. * FFmpeg is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with FFmpeg; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  19. */
  20. #include <stdint.h>
  21. #include "libavutil/avassert.h"
  22. #include "libavutil/common.h"
  23. #include "libavutil/pixdesc.h"
  24. #include "libavutil/imgutils.h"
  25. #include "avcodec.h"
  26. #include "motion_est.h"
  27. #include "mpegpicture.h"
  28. #include "mpegutils.h"
  29. static int make_tables_writable(Picture *pic)
  30. {
  31. int ret, i;
  32. #define MAKE_WRITABLE(table) \
  33. do {\
  34. if (pic->table &&\
  35. (ret = av_buffer_make_writable(&pic->table)) < 0)\
  36. return ret;\
  37. } while (0)
  38. MAKE_WRITABLE(mb_var_buf);
  39. MAKE_WRITABLE(mc_mb_var_buf);
  40. MAKE_WRITABLE(mb_mean_buf);
  41. MAKE_WRITABLE(mbskip_table_buf);
  42. MAKE_WRITABLE(qscale_table_buf);
  43. MAKE_WRITABLE(mb_type_buf);
  44. for (i = 0; i < 2; i++) {
  45. MAKE_WRITABLE(motion_val_buf[i]);
  46. MAKE_WRITABLE(ref_index_buf[i]);
  47. }
  48. return 0;
  49. }
  50. int ff_mpeg_framesize_alloc(AVCodecContext *avctx, MotionEstContext *me,
  51. ScratchpadContext *sc, int linesize)
  52. {
  53. # define EMU_EDGE_HEIGHT (4 * 70)
  54. int alloc_size = FFALIGN(FFABS(linesize) + 64, 32);
  55. if (avctx->hwaccel)
  56. return 0;
  57. if (linesize < 24) {
  58. av_log(avctx, AV_LOG_ERROR, "Image too small, temporary buffers cannot function\n");
  59. return AVERROR_PATCHWELCOME;
  60. }
  61. if (av_image_check_size2(alloc_size, EMU_EDGE_HEIGHT, avctx->max_pixels, AV_PIX_FMT_NONE, 0, avctx) < 0)
  62. return AVERROR(ENOMEM);
  63. // edge emu needs blocksize + filter length - 1
  64. // (= 17x17 for halfpel / 21x21 for H.264)
  65. // VC-1 computes luma and chroma simultaneously and needs 19X19 + 9x9
  66. // at uvlinesize. It supports only YUV420 so 24x24 is enough
  67. // linesize * interlaced * MBsize
  68. // we also use this buffer for encoding in encode_mb_internal() needig an additional 32 lines
  69. if (!FF_ALLOCZ_TYPED_ARRAY(sc->edge_emu_buffer, alloc_size * EMU_EDGE_HEIGHT) ||
  70. !FF_ALLOCZ_TYPED_ARRAY(me->scratchpad, alloc_size * 4 * 16 * 2)) {
  71. av_freep(&sc->edge_emu_buffer);
  72. return AVERROR(ENOMEM);
  73. }
  74. me->temp = me->scratchpad;
  75. sc->rd_scratchpad = me->scratchpad;
  76. sc->b_scratchpad = me->scratchpad;
  77. sc->obmc_scratchpad = me->scratchpad + 16;
  78. return 0;
  79. }
  80. /**
  81. * Allocate a frame buffer
  82. */
  83. static int alloc_frame_buffer(AVCodecContext *avctx, Picture *pic,
  84. MotionEstContext *me, ScratchpadContext *sc,
  85. int chroma_x_shift, int chroma_y_shift,
  86. int linesize, int uvlinesize)
  87. {
  88. int edges_needed = av_codec_is_encoder(avctx->codec);
  89. int r, ret;
  90. pic->tf.f = pic->f;
  91. if (avctx->codec_id != AV_CODEC_ID_WMV3IMAGE &&
  92. avctx->codec_id != AV_CODEC_ID_VC1IMAGE &&
  93. avctx->codec_id != AV_CODEC_ID_MSS2) {
  94. if (edges_needed) {
  95. pic->f->width = avctx->width + 2 * EDGE_WIDTH;
  96. pic->f->height = avctx->height + 2 * EDGE_WIDTH;
  97. }
  98. r = ff_thread_get_buffer(avctx, &pic->tf,
  99. pic->reference ? AV_GET_BUFFER_FLAG_REF : 0);
  100. } else {
  101. pic->f->width = avctx->width;
  102. pic->f->height = avctx->height;
  103. pic->f->format = avctx->pix_fmt;
  104. r = avcodec_default_get_buffer2(avctx, pic->f, 0);
  105. }
  106. if (r < 0 || !pic->f->buf[0]) {
  107. av_log(avctx, AV_LOG_ERROR, "get_buffer() failed (%d %p)\n",
  108. r, pic->f->data[0]);
  109. return -1;
  110. }
  111. if (edges_needed) {
  112. int i;
  113. for (i = 0; pic->f->data[i]; i++) {
  114. int offset = (EDGE_WIDTH >> (i ? chroma_y_shift : 0)) *
  115. pic->f->linesize[i] +
  116. (EDGE_WIDTH >> (i ? chroma_x_shift : 0));
  117. pic->f->data[i] += offset;
  118. }
  119. pic->f->width = avctx->width;
  120. pic->f->height = avctx->height;
  121. }
  122. if (avctx->hwaccel) {
  123. assert(!pic->hwaccel_picture_private);
  124. if (avctx->hwaccel->frame_priv_data_size) {
  125. pic->hwaccel_priv_buf = av_buffer_allocz(avctx->hwaccel->frame_priv_data_size);
  126. if (!pic->hwaccel_priv_buf) {
  127. av_log(avctx, AV_LOG_ERROR, "alloc_frame_buffer() failed (hwaccel private data allocation)\n");
  128. return -1;
  129. }
  130. pic->hwaccel_picture_private = pic->hwaccel_priv_buf->data;
  131. }
  132. }
  133. if ((linesize && linesize != pic->f->linesize[0]) ||
  134. (uvlinesize && uvlinesize != pic->f->linesize[1])) {
  135. av_log(avctx, AV_LOG_ERROR,
  136. "get_buffer() failed (stride changed: linesize=%d/%d uvlinesize=%d/%d)\n",
  137. linesize, pic->f->linesize[0],
  138. uvlinesize, pic->f->linesize[1]);
  139. ff_mpeg_unref_picture(avctx, pic);
  140. return -1;
  141. }
  142. if (av_pix_fmt_count_planes(pic->f->format) > 2 &&
  143. pic->f->linesize[1] != pic->f->linesize[2]) {
  144. av_log(avctx, AV_LOG_ERROR,
  145. "get_buffer() failed (uv stride mismatch)\n");
  146. ff_mpeg_unref_picture(avctx, pic);
  147. return -1;
  148. }
  149. if (!sc->edge_emu_buffer &&
  150. (ret = ff_mpeg_framesize_alloc(avctx, me, sc,
  151. pic->f->linesize[0])) < 0) {
  152. av_log(avctx, AV_LOG_ERROR,
  153. "get_buffer() failed to allocate context scratch buffers.\n");
  154. ff_mpeg_unref_picture(avctx, pic);
  155. return ret;
  156. }
  157. return 0;
  158. }
  159. static int alloc_picture_tables(AVCodecContext *avctx, Picture *pic, int encoding, int out_format,
  160. int mb_stride, int mb_width, int mb_height, int b8_stride)
  161. {
  162. const int big_mb_num = mb_stride * (mb_height + 1) + 1;
  163. const int mb_array_size = mb_stride * mb_height;
  164. const int b8_array_size = b8_stride * mb_height * 2;
  165. int i;
  166. pic->mbskip_table_buf = av_buffer_allocz(mb_array_size + 2);
  167. pic->qscale_table_buf = av_buffer_allocz(big_mb_num + mb_stride);
  168. pic->mb_type_buf = av_buffer_allocz((big_mb_num + mb_stride) *
  169. sizeof(uint32_t));
  170. if (!pic->mbskip_table_buf || !pic->qscale_table_buf || !pic->mb_type_buf)
  171. return AVERROR(ENOMEM);
  172. if (encoding) {
  173. pic->mb_var_buf = av_buffer_allocz(mb_array_size * sizeof(int16_t));
  174. pic->mc_mb_var_buf = av_buffer_allocz(mb_array_size * sizeof(int16_t));
  175. pic->mb_mean_buf = av_buffer_allocz(mb_array_size);
  176. if (!pic->mb_var_buf || !pic->mc_mb_var_buf || !pic->mb_mean_buf)
  177. return AVERROR(ENOMEM);
  178. }
  179. if (out_format == FMT_H263 || encoding ||
  180. (avctx->export_side_data & AV_CODEC_EXPORT_DATA_MVS)) {
  181. int mv_size = 2 * (b8_array_size + 4) * sizeof(int16_t);
  182. int ref_index_size = 4 * mb_array_size;
  183. for (i = 0; mv_size && i < 2; i++) {
  184. pic->motion_val_buf[i] = av_buffer_allocz(mv_size);
  185. pic->ref_index_buf[i] = av_buffer_allocz(ref_index_size);
  186. if (!pic->motion_val_buf[i] || !pic->ref_index_buf[i])
  187. return AVERROR(ENOMEM);
  188. }
  189. }
  190. pic->alloc_mb_width = mb_width;
  191. pic->alloc_mb_height = mb_height;
  192. pic->alloc_mb_stride = mb_stride;
  193. return 0;
  194. }
  195. /**
  196. * Allocate a Picture.
  197. * The pixels are allocated/set by calling get_buffer() if shared = 0
  198. */
  199. int ff_alloc_picture(AVCodecContext *avctx, Picture *pic, MotionEstContext *me,
  200. ScratchpadContext *sc, int shared, int encoding,
  201. int chroma_x_shift, int chroma_y_shift, int out_format,
  202. int mb_stride, int mb_width, int mb_height, int b8_stride,
  203. ptrdiff_t *linesize, ptrdiff_t *uvlinesize)
  204. {
  205. int i, ret;
  206. if (pic->qscale_table_buf)
  207. if ( pic->alloc_mb_width != mb_width
  208. || pic->alloc_mb_height != mb_height)
  209. ff_free_picture_tables(pic);
  210. if (shared) {
  211. av_assert0(pic->f->data[0]);
  212. pic->shared = 1;
  213. } else {
  214. av_assert0(!pic->f->buf[0]);
  215. if (alloc_frame_buffer(avctx, pic, me, sc,
  216. chroma_x_shift, chroma_y_shift,
  217. *linesize, *uvlinesize) < 0)
  218. return -1;
  219. *linesize = pic->f->linesize[0];
  220. *uvlinesize = pic->f->linesize[1];
  221. }
  222. if (!pic->qscale_table_buf)
  223. ret = alloc_picture_tables(avctx, pic, encoding, out_format,
  224. mb_stride, mb_width, mb_height, b8_stride);
  225. else
  226. ret = make_tables_writable(pic);
  227. if (ret < 0)
  228. goto fail;
  229. if (encoding) {
  230. pic->mb_var = (uint16_t*)pic->mb_var_buf->data;
  231. pic->mc_mb_var = (uint16_t*)pic->mc_mb_var_buf->data;
  232. pic->mb_mean = pic->mb_mean_buf->data;
  233. }
  234. pic->mbskip_table = pic->mbskip_table_buf->data;
  235. pic->qscale_table = pic->qscale_table_buf->data + 2 * mb_stride + 1;
  236. pic->mb_type = (uint32_t*)pic->mb_type_buf->data + 2 * mb_stride + 1;
  237. if (pic->motion_val_buf[0]) {
  238. for (i = 0; i < 2; i++) {
  239. pic->motion_val[i] = (int16_t (*)[2])pic->motion_val_buf[i]->data + 4;
  240. pic->ref_index[i] = pic->ref_index_buf[i]->data;
  241. }
  242. }
  243. return 0;
  244. fail:
  245. av_log(avctx, AV_LOG_ERROR, "Error allocating a picture.\n");
  246. ff_mpeg_unref_picture(avctx, pic);
  247. ff_free_picture_tables(pic);
  248. return AVERROR(ENOMEM);
  249. }
  250. /**
  251. * Deallocate a picture.
  252. */
  253. void ff_mpeg_unref_picture(AVCodecContext *avctx, Picture *pic)
  254. {
  255. int off = offsetof(Picture, mb_mean) + sizeof(pic->mb_mean);
  256. pic->tf.f = pic->f;
  257. /* WM Image / Screen codecs allocate internal buffers with different
  258. * dimensions / colorspaces; ignore user-defined callbacks for these. */
  259. if (avctx->codec_id != AV_CODEC_ID_WMV3IMAGE &&
  260. avctx->codec_id != AV_CODEC_ID_VC1IMAGE &&
  261. avctx->codec_id != AV_CODEC_ID_MSS2)
  262. ff_thread_release_buffer(avctx, &pic->tf);
  263. else if (pic->f)
  264. av_frame_unref(pic->f);
  265. av_buffer_unref(&pic->hwaccel_priv_buf);
  266. if (pic->needs_realloc)
  267. ff_free_picture_tables(pic);
  268. memset((uint8_t*)pic + off, 0, sizeof(*pic) - off);
  269. }
  270. int ff_update_picture_tables(Picture *dst, Picture *src)
  271. {
  272. int i, ret;
  273. ret = av_buffer_replace(&dst->mb_var_buf, src->mb_var_buf);
  274. ret |= av_buffer_replace(&dst->mc_mb_var_buf, src->mc_mb_var_buf);
  275. ret |= av_buffer_replace(&dst->mb_mean_buf, src->mb_mean_buf);
  276. ret |= av_buffer_replace(&dst->mbskip_table_buf, src->mbskip_table_buf);
  277. ret |= av_buffer_replace(&dst->qscale_table_buf, src->qscale_table_buf);
  278. ret |= av_buffer_replace(&dst->mb_type_buf, src->mb_type_buf);
  279. for (i = 0; i < 2; i++) {
  280. ret |= av_buffer_replace(&dst->motion_val_buf[i], src->motion_val_buf[i]);
  281. ret |= av_buffer_replace(&dst->ref_index_buf[i], src->ref_index_buf[i]);
  282. }
  283. if (ret < 0) {
  284. ff_free_picture_tables(dst);
  285. return ret;
  286. }
  287. dst->mb_var = src->mb_var;
  288. dst->mc_mb_var = src->mc_mb_var;
  289. dst->mb_mean = src->mb_mean;
  290. dst->mbskip_table = src->mbskip_table;
  291. dst->qscale_table = src->qscale_table;
  292. dst->mb_type = src->mb_type;
  293. for (i = 0; i < 2; i++) {
  294. dst->motion_val[i] = src->motion_val[i];
  295. dst->ref_index[i] = src->ref_index[i];
  296. }
  297. dst->alloc_mb_width = src->alloc_mb_width;
  298. dst->alloc_mb_height = src->alloc_mb_height;
  299. dst->alloc_mb_stride = src->alloc_mb_stride;
  300. return 0;
  301. }
  302. int ff_mpeg_ref_picture(AVCodecContext *avctx, Picture *dst, Picture *src)
  303. {
  304. int ret;
  305. av_assert0(!dst->f->buf[0]);
  306. av_assert0(src->f->buf[0]);
  307. src->tf.f = src->f;
  308. dst->tf.f = dst->f;
  309. ret = ff_thread_ref_frame(&dst->tf, &src->tf);
  310. if (ret < 0)
  311. goto fail;
  312. ret = ff_update_picture_tables(dst, src);
  313. if (ret < 0)
  314. goto fail;
  315. if (src->hwaccel_picture_private) {
  316. dst->hwaccel_priv_buf = av_buffer_ref(src->hwaccel_priv_buf);
  317. if (!dst->hwaccel_priv_buf) {
  318. ret = AVERROR(ENOMEM);
  319. goto fail;
  320. }
  321. dst->hwaccel_picture_private = dst->hwaccel_priv_buf->data;
  322. }
  323. dst->field_picture = src->field_picture;
  324. dst->mb_var_sum = src->mb_var_sum;
  325. dst->mc_mb_var_sum = src->mc_mb_var_sum;
  326. dst->b_frame_score = src->b_frame_score;
  327. dst->needs_realloc = src->needs_realloc;
  328. dst->reference = src->reference;
  329. dst->shared = src->shared;
  330. memcpy(dst->encoding_error, src->encoding_error,
  331. sizeof(dst->encoding_error));
  332. return 0;
  333. fail:
  334. ff_mpeg_unref_picture(avctx, dst);
  335. return ret;
  336. }
  337. static inline int pic_is_unused(Picture *pic)
  338. {
  339. if (!pic->f->buf[0])
  340. return 1;
  341. if (pic->needs_realloc && !(pic->reference & DELAYED_PIC_REF))
  342. return 1;
  343. return 0;
  344. }
  345. static int find_unused_picture(AVCodecContext *avctx, Picture *picture, int shared)
  346. {
  347. int i;
  348. if (shared) {
  349. for (i = 0; i < MAX_PICTURE_COUNT; i++) {
  350. if (!picture[i].f->buf[0])
  351. return i;
  352. }
  353. } else {
  354. for (i = 0; i < MAX_PICTURE_COUNT; i++) {
  355. if (pic_is_unused(&picture[i]))
  356. return i;
  357. }
  358. }
  359. av_log(avctx, AV_LOG_FATAL,
  360. "Internal error, picture buffer overflow\n");
  361. /* We could return -1, but the codec would crash trying to draw into a
  362. * non-existing frame anyway. This is safer than waiting for a random crash.
  363. * Also the return of this is never useful, an encoder must only allocate
  364. * as much as allowed in the specification. This has no relationship to how
  365. * much libavcodec could allocate (and MAX_PICTURE_COUNT is always large
  366. * enough for such valid streams).
  367. * Plus, a decoder has to check stream validity and remove frames if too
  368. * many reference frames are around. Waiting for "OOM" is not correct at
  369. * all. Similarly, missing reference frames have to be replaced by
  370. * interpolated/MC frames, anything else is a bug in the codec ...
  371. */
  372. abort();
  373. return -1;
  374. }
  375. int ff_find_unused_picture(AVCodecContext *avctx, Picture *picture, int shared)
  376. {
  377. int ret = find_unused_picture(avctx, picture, shared);
  378. if (ret >= 0 && ret < MAX_PICTURE_COUNT) {
  379. if (picture[ret].needs_realloc) {
  380. picture[ret].needs_realloc = 0;
  381. ff_free_picture_tables(&picture[ret]);
  382. ff_mpeg_unref_picture(avctx, &picture[ret]);
  383. }
  384. }
  385. return ret;
  386. }
  387. void ff_free_picture_tables(Picture *pic)
  388. {
  389. int i;
  390. pic->alloc_mb_width =
  391. pic->alloc_mb_height = 0;
  392. av_buffer_unref(&pic->mb_var_buf);
  393. av_buffer_unref(&pic->mc_mb_var_buf);
  394. av_buffer_unref(&pic->mb_mean_buf);
  395. av_buffer_unref(&pic->mbskip_table_buf);
  396. av_buffer_unref(&pic->qscale_table_buf);
  397. av_buffer_unref(&pic->mb_type_buf);
  398. for (i = 0; i < 2; i++) {
  399. av_buffer_unref(&pic->motion_val_buf[i]);
  400. av_buffer_unref(&pic->ref_index_buf[i]);
  401. }
  402. }