You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

595 lines
17KB

  1. /*
  2. *
  3. * This file is part of FFmpeg.
  4. *
  5. * FFmpeg is free software; you can redistribute it and/or
  6. * modify it under the terms of the GNU Lesser General Public
  7. * License as published by the Free Software Foundation; either
  8. * version 2.1 of the License, or (at your option) any later version.
  9. *
  10. * FFmpeg is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  13. * Lesser General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU Lesser General Public
  16. * License along with FFmpeg; if not, write to the Free Software
  17. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  18. */
  19. #include "channel_layout.h"
  20. #include "avassert.h"
  21. #include "buffer.h"
  22. #include "common.h"
  23. #include "dict.h"
  24. #include "frame.h"
  25. #include "imgutils.h"
  26. #include "mem.h"
  27. #include "samplefmt.h"
  28. MAKE_ACCESSORS(AVFrame, frame, int64_t, best_effort_timestamp)
  29. MAKE_ACCESSORS(AVFrame, frame, int64_t, pkt_duration)
  30. MAKE_ACCESSORS(AVFrame, frame, int64_t, pkt_pos)
  31. MAKE_ACCESSORS(AVFrame, frame, int64_t, channel_layout)
  32. MAKE_ACCESSORS(AVFrame, frame, int, channels)
  33. MAKE_ACCESSORS(AVFrame, frame, int, sample_rate)
  34. MAKE_ACCESSORS(AVFrame, frame, AVDictionary *, metadata)
  35. MAKE_ACCESSORS(AVFrame, frame, int, decode_error_flags)
  36. MAKE_ACCESSORS(AVFrame, frame, int, pkt_size)
  37. MAKE_ACCESSORS(AVFrame, frame, enum AVColorSpace, colorspace)
  38. MAKE_ACCESSORS(AVFrame, frame, enum AVColorRange, color_range)
  39. #define CHECK_CHANNELS_CONSISTENCY(frame) \
  40. av_assert2(!(frame)->channel_layout || \
  41. (frame)->channels == \
  42. av_get_channel_layout_nb_channels((frame)->channel_layout))
  43. AVDictionary **avpriv_frame_get_metadatap(AVFrame *frame) {return &frame->metadata;};
  44. int av_frame_set_qp_table(AVFrame *f, AVBufferRef *buf, int stride, int qp_type)
  45. {
  46. av_buffer_unref(&f->qp_table_buf);
  47. f->qp_table_buf = buf;
  48. f->qscale_table = buf->data;
  49. f->qstride = stride;
  50. f->qscale_type = qp_type;
  51. return 0;
  52. }
  53. int8_t *av_frame_get_qp_table(AVFrame *f, int *stride, int *type)
  54. {
  55. *stride = f->qstride;
  56. *type = f->qscale_type;
  57. if (!f->qp_table_buf)
  58. return NULL;
  59. return f->qp_table_buf->data;
  60. }
  61. const char *av_get_colorspace_name(enum AVColorSpace val)
  62. {
  63. static const char *name[] = {
  64. [AVCOL_SPC_RGB] = "GBR",
  65. [AVCOL_SPC_BT709] = "bt709",
  66. [AVCOL_SPC_FCC] = "fcc",
  67. [AVCOL_SPC_BT470BG] = "bt470bg",
  68. [AVCOL_SPC_SMPTE170M] = "smpte170m",
  69. [AVCOL_SPC_SMPTE240M] = "smpte240m",
  70. [AVCOL_SPC_YCOCG] = "YCgCo",
  71. };
  72. if ((unsigned)val >= FF_ARRAY_ELEMS(name))
  73. return NULL;
  74. return name[val];
  75. }
  76. static void get_frame_defaults(AVFrame *frame)
  77. {
  78. if (frame->extended_data != frame->data)
  79. av_freep(&frame->extended_data);
  80. memset(frame, 0, sizeof(*frame));
  81. frame->pts =
  82. frame->pkt_dts =
  83. frame->pkt_pts = AV_NOPTS_VALUE;
  84. av_frame_set_best_effort_timestamp(frame, AV_NOPTS_VALUE);
  85. av_frame_set_pkt_duration (frame, 0);
  86. av_frame_set_pkt_pos (frame, -1);
  87. av_frame_set_pkt_size (frame, -1);
  88. frame->key_frame = 1;
  89. frame->sample_aspect_ratio = (AVRational){ 0, 1 };
  90. frame->format = -1; /* unknown */
  91. frame->colorspace = AVCOL_SPC_UNSPECIFIED;
  92. frame->extended_data = frame->data;
  93. }
  94. AVFrame *av_frame_alloc(void)
  95. {
  96. AVFrame *frame = av_mallocz(sizeof(*frame));
  97. if (!frame)
  98. return NULL;
  99. frame->extended_data = NULL;
  100. get_frame_defaults(frame);
  101. return frame;
  102. }
  103. void av_frame_free(AVFrame **frame)
  104. {
  105. if (!frame || !*frame)
  106. return;
  107. av_frame_unref(*frame);
  108. av_freep(frame);
  109. }
  110. static int get_video_buffer(AVFrame *frame, int align)
  111. {
  112. const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(frame->format);
  113. int ret, i;
  114. if (!desc)
  115. return AVERROR(EINVAL);
  116. if ((ret = av_image_check_size(frame->width, frame->height, 0, NULL)) < 0)
  117. return ret;
  118. if (!frame->linesize[0]) {
  119. for(i=1; i<=align; i+=i) {
  120. ret = av_image_fill_linesizes(frame->linesize, frame->format,
  121. FFALIGN(frame->width, i));
  122. if (ret < 0)
  123. return ret;
  124. if (!(frame->linesize[0] & (align-1)))
  125. break;
  126. }
  127. for (i = 0; i < 4 && frame->linesize[i]; i++)
  128. frame->linesize[i] = FFALIGN(frame->linesize[i], align);
  129. }
  130. for (i = 0; i < 4 && frame->linesize[i]; i++) {
  131. int h = FFALIGN(frame->height, 32);
  132. if (i == 1 || i == 2)
  133. h = FF_CEIL_RSHIFT(h, desc->log2_chroma_h);
  134. frame->buf[i] = av_buffer_alloc(frame->linesize[i] * h + 16 + 16/*STRIDE_ALIGN*/ - 1);
  135. if (!frame->buf[i])
  136. goto fail;
  137. frame->data[i] = frame->buf[i]->data;
  138. }
  139. if (desc->flags & AV_PIX_FMT_FLAG_PAL || desc->flags & AV_PIX_FMT_FLAG_PSEUDOPAL) {
  140. av_buffer_unref(&frame->buf[1]);
  141. frame->buf[1] = av_buffer_alloc(1024);
  142. if (!frame->buf[1])
  143. goto fail;
  144. frame->data[1] = frame->buf[1]->data;
  145. }
  146. frame->extended_data = frame->data;
  147. return 0;
  148. fail:
  149. av_frame_unref(frame);
  150. return AVERROR(ENOMEM);
  151. }
  152. static int get_audio_buffer(AVFrame *frame, int align)
  153. {
  154. int channels;
  155. int planar = av_sample_fmt_is_planar(frame->format);
  156. int planes;
  157. int ret, i;
  158. if (!frame->channels)
  159. frame->channels = av_get_channel_layout_nb_channels(frame->channel_layout);
  160. channels = frame->channels;
  161. planes = planar ? channels : 1;
  162. CHECK_CHANNELS_CONSISTENCY(frame);
  163. if (!frame->linesize[0]) {
  164. ret = av_samples_get_buffer_size(&frame->linesize[0], channels,
  165. frame->nb_samples, frame->format,
  166. align);
  167. if (ret < 0)
  168. return ret;
  169. }
  170. if (planes > AV_NUM_DATA_POINTERS) {
  171. frame->extended_data = av_mallocz(planes *
  172. sizeof(*frame->extended_data));
  173. frame->extended_buf = av_mallocz((planes - AV_NUM_DATA_POINTERS) *
  174. sizeof(*frame->extended_buf));
  175. if (!frame->extended_data || !frame->extended_buf) {
  176. av_freep(&frame->extended_data);
  177. av_freep(&frame->extended_buf);
  178. return AVERROR(ENOMEM);
  179. }
  180. frame->nb_extended_buf = planes - AV_NUM_DATA_POINTERS;
  181. } else
  182. frame->extended_data = frame->data;
  183. for (i = 0; i < FFMIN(planes, AV_NUM_DATA_POINTERS); i++) {
  184. frame->buf[i] = av_buffer_alloc(frame->linesize[0]);
  185. if (!frame->buf[i]) {
  186. av_frame_unref(frame);
  187. return AVERROR(ENOMEM);
  188. }
  189. frame->extended_data[i] = frame->data[i] = frame->buf[i]->data;
  190. }
  191. for (i = 0; i < planes - AV_NUM_DATA_POINTERS; i++) {
  192. frame->extended_buf[i] = av_buffer_alloc(frame->linesize[0]);
  193. if (!frame->extended_buf[i]) {
  194. av_frame_unref(frame);
  195. return AVERROR(ENOMEM);
  196. }
  197. frame->extended_data[i + AV_NUM_DATA_POINTERS] = frame->extended_buf[i]->data;
  198. }
  199. return 0;
  200. }
  201. int av_frame_get_buffer(AVFrame *frame, int align)
  202. {
  203. if (frame->format < 0)
  204. return AVERROR(EINVAL);
  205. if (frame->width > 0 && frame->height > 0)
  206. return get_video_buffer(frame, align);
  207. else if (frame->nb_samples > 0 && (frame->channel_layout || frame->channels > 0))
  208. return get_audio_buffer(frame, align);
  209. return AVERROR(EINVAL);
  210. }
  211. int av_frame_ref(AVFrame *dst, const AVFrame *src)
  212. {
  213. int i, ret = 0;
  214. dst->format = src->format;
  215. dst->width = src->width;
  216. dst->height = src->height;
  217. dst->channels = src->channels;
  218. dst->channel_layout = src->channel_layout;
  219. dst->nb_samples = src->nb_samples;
  220. ret = av_frame_copy_props(dst, src);
  221. if (ret < 0)
  222. return ret;
  223. /* duplicate the frame data if it's not refcounted */
  224. if (!src->buf[0]) {
  225. ret = av_frame_get_buffer(dst, 32);
  226. if (ret < 0)
  227. return ret;
  228. if (src->nb_samples) {
  229. int ch = src->channels;
  230. CHECK_CHANNELS_CONSISTENCY(src);
  231. av_samples_copy(dst->extended_data, src->extended_data, 0, 0,
  232. dst->nb_samples, ch, dst->format);
  233. } else {
  234. av_image_copy(dst->data, dst->linesize, src->data, src->linesize,
  235. dst->format, dst->width, dst->height);
  236. }
  237. return 0;
  238. }
  239. /* ref the buffers */
  240. for (i = 0; i < FF_ARRAY_ELEMS(src->buf); i++) {
  241. if (!src->buf[i])
  242. continue;
  243. dst->buf[i] = av_buffer_ref(src->buf[i]);
  244. if (!dst->buf[i]) {
  245. ret = AVERROR(ENOMEM);
  246. goto fail;
  247. }
  248. }
  249. if (src->extended_buf) {
  250. dst->extended_buf = av_mallocz(sizeof(*dst->extended_buf) *
  251. src->nb_extended_buf);
  252. if (!dst->extended_buf) {
  253. ret = AVERROR(ENOMEM);
  254. goto fail;
  255. }
  256. dst->nb_extended_buf = src->nb_extended_buf;
  257. for (i = 0; i < src->nb_extended_buf; i++) {
  258. dst->extended_buf[i] = av_buffer_ref(src->extended_buf[i]);
  259. if (!dst->extended_buf[i]) {
  260. ret = AVERROR(ENOMEM);
  261. goto fail;
  262. }
  263. }
  264. }
  265. /* duplicate extended data */
  266. if (src->extended_data != src->data) {
  267. int ch = src->channels;
  268. if (!ch) {
  269. ret = AVERROR(EINVAL);
  270. goto fail;
  271. }
  272. CHECK_CHANNELS_CONSISTENCY(src);
  273. dst->extended_data = av_malloc(sizeof(*dst->extended_data) * ch);
  274. if (!dst->extended_data) {
  275. ret = AVERROR(ENOMEM);
  276. goto fail;
  277. }
  278. memcpy(dst->extended_data, src->extended_data, sizeof(*src->extended_data) * ch);
  279. } else
  280. dst->extended_data = dst->data;
  281. memcpy(dst->data, src->data, sizeof(src->data));
  282. memcpy(dst->linesize, src->linesize, sizeof(src->linesize));
  283. return 0;
  284. fail:
  285. av_frame_unref(dst);
  286. return ret;
  287. }
  288. AVFrame *av_frame_clone(const AVFrame *src)
  289. {
  290. AVFrame *ret = av_frame_alloc();
  291. if (!ret)
  292. return NULL;
  293. if (av_frame_ref(ret, src) < 0)
  294. av_frame_free(&ret);
  295. return ret;
  296. }
  297. void av_frame_unref(AVFrame *frame)
  298. {
  299. int i;
  300. for (i = 0; i < frame->nb_side_data; i++) {
  301. av_freep(&frame->side_data[i]->data);
  302. av_dict_free(&frame->side_data[i]->metadata);
  303. av_freep(&frame->side_data[i]);
  304. }
  305. av_freep(&frame->side_data);
  306. for (i = 0; i < FF_ARRAY_ELEMS(frame->buf); i++)
  307. av_buffer_unref(&frame->buf[i]);
  308. for (i = 0; i < frame->nb_extended_buf; i++)
  309. av_buffer_unref(&frame->extended_buf[i]);
  310. av_freep(&frame->extended_buf);
  311. av_dict_free(&frame->metadata);
  312. av_buffer_unref(&frame->qp_table_buf);
  313. get_frame_defaults(frame);
  314. }
  315. void av_frame_move_ref(AVFrame *dst, AVFrame *src)
  316. {
  317. *dst = *src;
  318. if (src->extended_data == src->data)
  319. dst->extended_data = dst->data;
  320. memset(src, 0, sizeof(*src));
  321. get_frame_defaults(src);
  322. }
  323. int av_frame_is_writable(AVFrame *frame)
  324. {
  325. int i, ret = 1;
  326. /* assume non-refcounted frames are not writable */
  327. if (!frame->buf[0])
  328. return 0;
  329. for (i = 0; i < FF_ARRAY_ELEMS(frame->buf); i++)
  330. if (frame->buf[i])
  331. ret &= !!av_buffer_is_writable(frame->buf[i]);
  332. for (i = 0; i < frame->nb_extended_buf; i++)
  333. ret &= !!av_buffer_is_writable(frame->extended_buf[i]);
  334. return ret;
  335. }
  336. int av_frame_make_writable(AVFrame *frame)
  337. {
  338. AVFrame tmp;
  339. int ret;
  340. if (!frame->buf[0])
  341. return AVERROR(EINVAL);
  342. if (av_frame_is_writable(frame))
  343. return 0;
  344. memset(&tmp, 0, sizeof(tmp));
  345. tmp.format = frame->format;
  346. tmp.width = frame->width;
  347. tmp.height = frame->height;
  348. tmp.channels = frame->channels;
  349. tmp.channel_layout = frame->channel_layout;
  350. tmp.nb_samples = frame->nb_samples;
  351. ret = av_frame_get_buffer(&tmp, 32);
  352. if (ret < 0)
  353. return ret;
  354. if (tmp.nb_samples) {
  355. int ch = tmp.channels;
  356. CHECK_CHANNELS_CONSISTENCY(&tmp);
  357. av_samples_copy(tmp.extended_data, frame->extended_data, 0, 0,
  358. frame->nb_samples, ch, frame->format);
  359. } else {
  360. av_image_copy(tmp.data, tmp.linesize, frame->data, frame->linesize,
  361. frame->format, frame->width, frame->height);
  362. }
  363. ret = av_frame_copy_props(&tmp, frame);
  364. if (ret < 0) {
  365. av_frame_unref(&tmp);
  366. return ret;
  367. }
  368. av_frame_unref(frame);
  369. *frame = tmp;
  370. if (tmp.data == tmp.extended_data)
  371. frame->extended_data = frame->data;
  372. return 0;
  373. }
  374. int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
  375. {
  376. int i;
  377. dst->key_frame = src->key_frame;
  378. dst->pict_type = src->pict_type;
  379. dst->sample_aspect_ratio = src->sample_aspect_ratio;
  380. dst->pts = src->pts;
  381. dst->repeat_pict = src->repeat_pict;
  382. dst->interlaced_frame = src->interlaced_frame;
  383. dst->top_field_first = src->top_field_first;
  384. dst->palette_has_changed = src->palette_has_changed;
  385. dst->sample_rate = src->sample_rate;
  386. dst->opaque = src->opaque;
  387. #if FF_API_AVFRAME_LAVC
  388. dst->type = src->type;
  389. #endif
  390. dst->pkt_pts = src->pkt_pts;
  391. dst->pkt_dts = src->pkt_dts;
  392. dst->pkt_pos = src->pkt_pos;
  393. dst->pkt_size = src->pkt_size;
  394. dst->pkt_duration = src->pkt_duration;
  395. dst->reordered_opaque = src->reordered_opaque;
  396. dst->quality = src->quality;
  397. dst->best_effort_timestamp = src->best_effort_timestamp;
  398. dst->coded_picture_number = src->coded_picture_number;
  399. dst->display_picture_number = src->display_picture_number;
  400. dst->flags = src->flags;
  401. dst->decode_error_flags = src->decode_error_flags;
  402. dst->colorspace = src->colorspace;
  403. dst->color_range = src->color_range;
  404. av_dict_copy(&dst->metadata, src->metadata, 0);
  405. memcpy(dst->error, src->error, sizeof(dst->error));
  406. for (i = 0; i < src->nb_side_data; i++) {
  407. const AVFrameSideData *sd_src = src->side_data[i];
  408. AVFrameSideData *sd_dst = av_frame_new_side_data(dst, sd_src->type,
  409. sd_src->size);
  410. if (!sd_dst) {
  411. for (i = 0; i < dst->nb_side_data; i++) {
  412. av_freep(&dst->side_data[i]->data);
  413. av_freep(&dst->side_data[i]);
  414. av_dict_free(&dst->side_data[i]->metadata);
  415. }
  416. av_freep(&dst->side_data);
  417. return AVERROR(ENOMEM);
  418. }
  419. memcpy(sd_dst->data, sd_src->data, sd_src->size);
  420. av_dict_copy(&sd_dst->metadata, sd_src->metadata, 0);
  421. }
  422. dst->qscale_table = NULL;
  423. dst->qstride = 0;
  424. dst->qscale_type = 0;
  425. if (src->qp_table_buf) {
  426. dst->qp_table_buf = av_buffer_ref(src->qp_table_buf);
  427. if (dst->qp_table_buf) {
  428. dst->qscale_table = dst->qp_table_buf->data;
  429. dst->qstride = src->qstride;
  430. dst->qscale_type = src->qscale_type;
  431. }
  432. }
  433. return 0;
  434. }
  435. AVBufferRef *av_frame_get_plane_buffer(AVFrame *frame, int plane)
  436. {
  437. uint8_t *data;
  438. int planes, i;
  439. if (frame->nb_samples) {
  440. int channels = frame->channels;
  441. if (!channels)
  442. return NULL;
  443. CHECK_CHANNELS_CONSISTENCY(frame);
  444. planes = av_sample_fmt_is_planar(frame->format) ? channels : 1;
  445. } else
  446. planes = 4;
  447. if (plane < 0 || plane >= planes || !frame->extended_data[plane])
  448. return NULL;
  449. data = frame->extended_data[plane];
  450. for (i = 0; i < FF_ARRAY_ELEMS(frame->buf) && frame->buf[i]; i++) {
  451. AVBufferRef *buf = frame->buf[i];
  452. if (data >= buf->data && data < buf->data + buf->size)
  453. return buf;
  454. }
  455. for (i = 0; i < frame->nb_extended_buf; i++) {
  456. AVBufferRef *buf = frame->extended_buf[i];
  457. if (data >= buf->data && data < buf->data + buf->size)
  458. return buf;
  459. }
  460. return NULL;
  461. }
  462. AVFrameSideData *av_frame_new_side_data(AVFrame *frame,
  463. enum AVFrameSideDataType type,
  464. int size)
  465. {
  466. AVFrameSideData *ret, **tmp;
  467. if (frame->nb_side_data > INT_MAX / sizeof(*frame->side_data) - 1)
  468. return NULL;
  469. tmp = av_realloc(frame->side_data,
  470. (frame->nb_side_data + 1) * sizeof(*frame->side_data));
  471. if (!tmp)
  472. return NULL;
  473. frame->side_data = tmp;
  474. ret = av_mallocz(sizeof(*ret));
  475. if (!ret)
  476. return NULL;
  477. ret->data = av_malloc(size);
  478. if (!ret->data) {
  479. av_freep(&ret);
  480. return NULL;
  481. }
  482. ret->size = size;
  483. ret->type = type;
  484. frame->side_data[frame->nb_side_data++] = ret;
  485. return ret;
  486. }
  487. AVFrameSideData *av_frame_get_side_data(const AVFrame *frame,
  488. enum AVFrameSideDataType type)
  489. {
  490. int i;
  491. for (i = 0; i < frame->nb_side_data; i++) {
  492. if (frame->side_data[i]->type == type)
  493. return frame->side_data[i];
  494. }
  495. return NULL;
  496. }