You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

608 lines
19KB

  1. /*
  2. * Copyright (c) 2003 Fabrice Bellard
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a copy
  5. * of this software and associated documentation files (the "Software"), to deal
  6. * in the Software without restriction, including without limitation the rights
  7. * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  8. * copies of the Software, and to permit persons to whom the Software is
  9. * furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  18. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  19. * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
  20. * THE SOFTWARE.
  21. */
  22. /**
  23. * @file
  24. * libavformat API example.
  25. *
  26. * Output a media file in any supported libavformat format. The default
  27. * codecs are used.
  28. * @example muxing.c
  29. */
  30. #include <stdlib.h>
  31. #include <stdio.h>
  32. #include <string.h>
  33. #include <math.h>
  34. #include <libavutil/channel_layout.h>
  35. #include <libavutil/opt.h>
  36. #include <libavutil/mathematics.h>
  37. #include <libavutil/timestamp.h>
  38. #include <libavformat/avformat.h>
  39. #include <libswscale/swscale.h>
  40. #include <libswresample/swresample.h>
  41. static int audio_is_eof, video_is_eof;
  42. #define STREAM_DURATION 10.0
  43. #define STREAM_FRAME_RATE 25 /* 25 images/s */
  44. #define STREAM_PIX_FMT AV_PIX_FMT_YUV420P /* default pix_fmt */
  45. static int sws_flags = SWS_BICUBIC;
  46. // a wrapper around a single output AVStream
  47. typedef struct OutputStream {
  48. AVStream *st;
  49. AVFrame *frame;
  50. AVFrame *tmp_frame;
  51. float t, tincr, tincr2;
  52. int audio_input_frame_size;
  53. } OutputStream;
  54. static void log_packet(const AVFormatContext *fmt_ctx, const AVPacket *pkt)
  55. {
  56. AVRational *time_base = &fmt_ctx->streams[pkt->stream_index]->time_base;
  57. printf("pts:%s pts_time:%s dts:%s dts_time:%s duration:%s duration_time:%s stream_index:%d\n",
  58. av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, time_base),
  59. av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, time_base),
  60. av_ts2str(pkt->duration), av_ts2timestr(pkt->duration, time_base),
  61. pkt->stream_index);
  62. }
  63. static int write_frame(AVFormatContext *fmt_ctx, const AVRational *time_base, AVStream *st, AVPacket *pkt)
  64. {
  65. /* rescale output packet timestamp values from codec to stream timebase */
  66. av_packet_rescale_ts(pkt, *time_base, st->time_base);
  67. pkt->stream_index = st->index;
  68. /* Write the compressed frame to the media file. */
  69. log_packet(fmt_ctx, pkt);
  70. return av_interleaved_write_frame(fmt_ctx, pkt);
  71. }
  72. /* Add an output stream. */
  73. static void add_stream(OutputStream *ost, AVFormatContext *oc,
  74. AVCodec **codec,
  75. enum AVCodecID codec_id)
  76. {
  77. AVCodecContext *c;
  78. /* find the encoder */
  79. *codec = avcodec_find_encoder(codec_id);
  80. if (!(*codec)) {
  81. fprintf(stderr, "Could not find encoder for '%s'\n",
  82. avcodec_get_name(codec_id));
  83. exit(1);
  84. }
  85. ost->st = avformat_new_stream(oc, *codec);
  86. if (!ost->st) {
  87. fprintf(stderr, "Could not allocate stream\n");
  88. exit(1);
  89. }
  90. ost->st->id = oc->nb_streams-1;
  91. c = ost->st->codec;
  92. switch ((*codec)->type) {
  93. case AVMEDIA_TYPE_AUDIO:
  94. c->sample_fmt = (*codec)->sample_fmts ?
  95. (*codec)->sample_fmts[0] : AV_SAMPLE_FMT_FLTP;
  96. c->bit_rate = 64000;
  97. c->sample_rate = 44100;
  98. c->channels = 2;
  99. c->channel_layout = AV_CH_LAYOUT_STEREO;
  100. break;
  101. case AVMEDIA_TYPE_VIDEO:
  102. c->codec_id = codec_id;
  103. c->bit_rate = 400000;
  104. /* Resolution must be a multiple of two. */
  105. c->width = 352;
  106. c->height = 288;
  107. /* timebase: This is the fundamental unit of time (in seconds) in terms
  108. * of which frame timestamps are represented. For fixed-fps content,
  109. * timebase should be 1/framerate and timestamp increments should be
  110. * identical to 1. */
  111. c->time_base.den = STREAM_FRAME_RATE;
  112. c->time_base.num = 1;
  113. c->gop_size = 12; /* emit one intra frame every twelve frames at most */
  114. c->pix_fmt = STREAM_PIX_FMT;
  115. if (c->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
  116. /* just for testing, we also add B frames */
  117. c->max_b_frames = 2;
  118. }
  119. if (c->codec_id == AV_CODEC_ID_MPEG1VIDEO) {
  120. /* Needed to avoid using macroblocks in which some coeffs overflow.
  121. * This does not happen with normal video, it just happens here as
  122. * the motion of the chroma plane does not match the luma plane. */
  123. c->mb_decision = 2;
  124. }
  125. break;
  126. default:
  127. break;
  128. }
  129. /* Some formats want stream headers to be separate. */
  130. if (oc->oformat->flags & AVFMT_GLOBALHEADER)
  131. c->flags |= CODEC_FLAG_GLOBAL_HEADER;
  132. }
  133. /**************************************************************/
  134. /* audio output */
  135. int samples_count;
  136. struct SwrContext *swr_ctx = NULL;
  137. static void open_audio(AVFormatContext *oc, AVCodec *codec, OutputStream *ost)
  138. {
  139. AVCodecContext *c;
  140. int ret;
  141. c = ost->st->codec;
  142. /* open it */
  143. ret = avcodec_open2(c, codec, NULL);
  144. if (ret < 0) {
  145. fprintf(stderr, "Could not open audio codec: %s\n", av_err2str(ret));
  146. exit(1);
  147. }
  148. /* init signal generator */
  149. ost->t = 0;
  150. ost->tincr = 2 * M_PI * 110.0 / c->sample_rate;
  151. /* increment frequency by 110 Hz per second */
  152. ost->tincr2 = 2 * M_PI * 110.0 / c->sample_rate / c->sample_rate;
  153. if (c->codec->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE)
  154. ost->audio_input_frame_size = 10000;
  155. else
  156. ost->audio_input_frame_size = c->frame_size;
  157. /* create resampler context */
  158. if (c->sample_fmt != AV_SAMPLE_FMT_S16) {
  159. swr_ctx = swr_alloc();
  160. if (!swr_ctx) {
  161. fprintf(stderr, "Could not allocate resampler context\n");
  162. exit(1);
  163. }
  164. /* set options */
  165. av_opt_set_int (swr_ctx, "in_channel_count", c->channels, 0);
  166. av_opt_set_int (swr_ctx, "in_sample_rate", c->sample_rate, 0);
  167. av_opt_set_sample_fmt(swr_ctx, "in_sample_fmt", AV_SAMPLE_FMT_S16, 0);
  168. av_opt_set_int (swr_ctx, "out_channel_count", c->channels, 0);
  169. av_opt_set_int (swr_ctx, "out_sample_rate", c->sample_rate, 0);
  170. av_opt_set_sample_fmt(swr_ctx, "out_sample_fmt", c->sample_fmt, 0);
  171. /* initialize the resampling context */
  172. if ((ret = swr_init(swr_ctx)) < 0) {
  173. fprintf(stderr, "Failed to initialize the resampling context\n");
  174. exit(1);
  175. }
  176. }
  177. }
  178. /* Prepare a 16 bit dummy audio frame of 'frame_size' samples and
  179. * 'nb_channels' channels. */
  180. static void get_audio_frame(OutputStream *ost, AVFrame *frame, int nb_channels)
  181. {
  182. int j, i, v, ret;
  183. int16_t *q = (int16_t*)frame->data[0];
  184. /* when we pass a frame to the encoder, it may keep a reference to it
  185. * internally;
  186. * make sure we do not overwrite it here
  187. */
  188. ret = av_frame_make_writable(frame);
  189. if (ret < 0)
  190. exit(1);
  191. for (j = 0; j < frame->nb_samples; j++) {
  192. v = (int)(sin(ost->t) * 10000);
  193. for (i = 0; i < nb_channels; i++)
  194. *q++ = v;
  195. ost->t += ost->tincr;
  196. ost->tincr += ost->tincr2;
  197. }
  198. }
  199. static void write_audio_frame(AVFormatContext *oc, OutputStream *ost, int flush)
  200. {
  201. AVCodecContext *c;
  202. AVPacket pkt = { 0 }; // data and size must be 0;
  203. AVFrame *frame = av_frame_alloc();
  204. int got_packet, ret;
  205. int dst_nb_samples;
  206. av_init_packet(&pkt);
  207. c = ost->st->codec;
  208. if (!flush) {
  209. frame->sample_rate = c->sample_rate;
  210. frame->nb_samples = ost->audio_input_frame_size;
  211. frame->format = AV_SAMPLE_FMT_S16;
  212. frame->channel_layout = c->channel_layout;
  213. ret = av_frame_get_buffer(frame, 0);
  214. if (ret < 0) {
  215. fprintf(stderr, "Could not allocate an audio frame.\n");
  216. exit(1);
  217. }
  218. get_audio_frame(ost, frame, c->channels);
  219. /* convert samples from native format to destination codec format, using the resampler */
  220. if (swr_ctx) {
  221. AVFrame *tmp_frame = av_frame_alloc();
  222. /* compute destination number of samples */
  223. dst_nb_samples = av_rescale_rnd(swr_get_delay(swr_ctx, c->sample_rate) + ost->audio_input_frame_size,
  224. c->sample_rate, c->sample_rate, AV_ROUND_UP);
  225. tmp_frame->sample_rate = c->sample_rate;
  226. tmp_frame->nb_samples = dst_nb_samples;
  227. tmp_frame->format = c->sample_fmt;
  228. tmp_frame->channel_layout = c->channel_layout;
  229. ret = av_frame_get_buffer(tmp_frame, 0);
  230. if (ret < 0) {
  231. fprintf(stderr, "Could not allocate an audio frame.\n");
  232. exit(1);
  233. }
  234. /* convert to destination format */
  235. ret = swr_convert(swr_ctx,
  236. tmp_frame->data, dst_nb_samples,
  237. (const uint8_t **)frame->data, ost->audio_input_frame_size);
  238. if (ret < 0) {
  239. fprintf(stderr, "Error while converting\n");
  240. exit(1);
  241. }
  242. av_frame_free(&frame);
  243. frame = tmp_frame;
  244. } else {
  245. dst_nb_samples = ost->audio_input_frame_size;
  246. }
  247. frame->nb_samples = dst_nb_samples;
  248. frame->pts = av_rescale_q(samples_count, (AVRational){1, c->sample_rate}, c->time_base);
  249. samples_count += dst_nb_samples;
  250. }
  251. ret = avcodec_encode_audio2(c, &pkt, flush ? NULL : frame, &got_packet);
  252. if (ret < 0) {
  253. fprintf(stderr, "Error encoding audio frame: %s\n", av_err2str(ret));
  254. exit(1);
  255. }
  256. if (!got_packet) {
  257. if (flush)
  258. audio_is_eof = 1;
  259. return;
  260. }
  261. ret = write_frame(oc, &c->time_base, ost->st, &pkt);
  262. if (ret < 0) {
  263. fprintf(stderr, "Error while writing audio frame: %s\n",
  264. av_err2str(ret));
  265. exit(1);
  266. }
  267. }
  268. static void close_audio(AVFormatContext *oc, OutputStream *ost)
  269. {
  270. avcodec_close(ost->st->codec);
  271. }
  272. /**************************************************************/
  273. /* video output */
  274. static int frame_count;
  275. static AVFrame *alloc_picture(enum AVPixelFormat pix_fmt, int width, int height)
  276. {
  277. AVFrame *picture;
  278. int ret;
  279. picture = av_frame_alloc();
  280. if (!picture)
  281. return NULL;
  282. picture->format = pix_fmt;
  283. picture->width = width;
  284. picture->height = height;
  285. /* allocate the buffers for the frame data */
  286. ret = av_frame_get_buffer(picture, 32);
  287. if (ret < 0) {
  288. fprintf(stderr, "Could not allocate frame data.\n");
  289. exit(1);
  290. }
  291. return picture;
  292. }
  293. static void open_video(AVFormatContext *oc, AVCodec *codec, OutputStream *ost)
  294. {
  295. int ret;
  296. AVCodecContext *c = ost->st->codec;
  297. /* open the codec */
  298. ret = avcodec_open2(c, codec, NULL);
  299. if (ret < 0) {
  300. fprintf(stderr, "Could not open video codec: %s\n", av_err2str(ret));
  301. exit(1);
  302. }
  303. /* allocate and init a re-usable frame */
  304. ost->frame = alloc_picture(c->pix_fmt, c->width, c->height);
  305. if (!ost->frame) {
  306. fprintf(stderr, "Could not allocate video frame\n");
  307. exit(1);
  308. }
  309. /* If the output format is not YUV420P, then a temporary YUV420P
  310. * picture is needed too. It is then converted to the required
  311. * output format. */
  312. ost->tmp_frame = NULL;
  313. if (c->pix_fmt != AV_PIX_FMT_YUV420P) {
  314. ost->tmp_frame = alloc_picture(AV_PIX_FMT_YUV420P, c->width, c->height);
  315. if (!ost->tmp_frame) {
  316. fprintf(stderr, "Could not allocate temporary picture\n");
  317. exit(1);
  318. }
  319. }
  320. }
  321. /* Prepare a dummy image. */
  322. static void fill_yuv_image(AVFrame *pict, int frame_index,
  323. int width, int height)
  324. {
  325. int x, y, i, ret;
  326. /* when we pass a frame to the encoder, it may keep a reference to it
  327. * internally;
  328. * make sure we do not overwrite it here
  329. */
  330. ret = av_frame_make_writable(pict);
  331. if (ret < 0)
  332. exit(1);
  333. i = frame_index;
  334. /* Y */
  335. for (y = 0; y < height; y++)
  336. for (x = 0; x < width; x++)
  337. pict->data[0][y * pict->linesize[0] + x] = x + y + i * 3;
  338. /* Cb and Cr */
  339. for (y = 0; y < height / 2; y++) {
  340. for (x = 0; x < width / 2; x++) {
  341. pict->data[1][y * pict->linesize[1] + x] = 128 + y + i * 2;
  342. pict->data[2][y * pict->linesize[2] + x] = 64 + x + i * 5;
  343. }
  344. }
  345. }
  346. static void write_video_frame(AVFormatContext *oc, OutputStream *ost, int flush)
  347. {
  348. int ret;
  349. static struct SwsContext *sws_ctx;
  350. AVCodecContext *c = ost->st->codec;
  351. if (!flush) {
  352. if (c->pix_fmt != AV_PIX_FMT_YUV420P) {
  353. /* as we only generate a YUV420P picture, we must convert it
  354. * to the codec pixel format if needed */
  355. if (!sws_ctx) {
  356. sws_ctx = sws_getContext(c->width, c->height, AV_PIX_FMT_YUV420P,
  357. c->width, c->height, c->pix_fmt,
  358. sws_flags, NULL, NULL, NULL);
  359. if (!sws_ctx) {
  360. fprintf(stderr,
  361. "Could not initialize the conversion context\n");
  362. exit(1);
  363. }
  364. }
  365. fill_yuv_image(ost->tmp_frame, frame_count, c->width, c->height);
  366. sws_scale(sws_ctx,
  367. (const uint8_t * const *)ost->tmp_frame->data, ost->tmp_frame->linesize,
  368. 0, c->height, ost->frame->data, ost->frame->linesize);
  369. } else {
  370. fill_yuv_image(ost->frame, frame_count, c->width, c->height);
  371. }
  372. }
  373. if (oc->oformat->flags & AVFMT_RAWPICTURE && !flush) {
  374. /* Raw video case - directly store the picture in the packet */
  375. AVPacket pkt;
  376. av_init_packet(&pkt);
  377. pkt.flags |= AV_PKT_FLAG_KEY;
  378. pkt.stream_index = ost->st->index;
  379. pkt.data = (uint8_t *)ost->frame;
  380. pkt.size = sizeof(AVPicture);
  381. ret = av_interleaved_write_frame(oc, &pkt);
  382. } else {
  383. AVPacket pkt = { 0 };
  384. int got_packet;
  385. av_init_packet(&pkt);
  386. /* encode the image */
  387. ost->frame->pts = frame_count;
  388. ret = avcodec_encode_video2(c, &pkt, flush ? NULL : ost->frame, &got_packet);
  389. if (ret < 0) {
  390. fprintf(stderr, "Error encoding video frame: %s\n", av_err2str(ret));
  391. exit(1);
  392. }
  393. /* If size is zero, it means the image was buffered. */
  394. if (got_packet) {
  395. ret = write_frame(oc, &c->time_base, ost->st, &pkt);
  396. } else {
  397. if (flush)
  398. video_is_eof = 1;
  399. ret = 0;
  400. }
  401. }
  402. if (ret < 0) {
  403. fprintf(stderr, "Error while writing video frame: %s\n", av_err2str(ret));
  404. exit(1);
  405. }
  406. frame_count++;
  407. }
  408. static void close_video(AVFormatContext *oc, OutputStream *ost)
  409. {
  410. avcodec_close(ost->st->codec);
  411. av_frame_free(&ost->frame);
  412. av_frame_free(&ost->tmp_frame);
  413. }
  414. /**************************************************************/
  415. /* media file output */
  416. int main(int argc, char **argv)
  417. {
  418. OutputStream video_st, audio_st;
  419. const char *filename;
  420. AVOutputFormat *fmt;
  421. AVFormatContext *oc;
  422. AVCodec *audio_codec, *video_codec;
  423. double audio_time, video_time;
  424. int flush, ret;
  425. int have_video = 0, have_audio = 0;
  426. /* Initialize libavcodec, and register all codecs and formats. */
  427. av_register_all();
  428. if (argc != 2) {
  429. printf("usage: %s output_file\n"
  430. "API example program to output a media file with libavformat.\n"
  431. "This program generates a synthetic audio and video stream, encodes and\n"
  432. "muxes them into a file named output_file.\n"
  433. "The output format is automatically guessed according to the file extension.\n"
  434. "Raw images can also be output by using '%%d' in the filename.\n"
  435. "\n", argv[0]);
  436. return 1;
  437. }
  438. filename = argv[1];
  439. /* allocate the output media context */
  440. avformat_alloc_output_context2(&oc, NULL, NULL, filename);
  441. if (!oc) {
  442. printf("Could not deduce output format from file extension: using MPEG.\n");
  443. avformat_alloc_output_context2(&oc, NULL, "mpeg", filename);
  444. }
  445. if (!oc)
  446. return 1;
  447. fmt = oc->oformat;
  448. /* Add the audio and video streams using the default format codecs
  449. * and initialize the codecs. */
  450. if (fmt->video_codec != AV_CODEC_ID_NONE) {
  451. add_stream(&video_st, oc, &video_codec, fmt->video_codec);
  452. have_video = 1;
  453. }
  454. if (fmt->audio_codec != AV_CODEC_ID_NONE) {
  455. add_stream(&audio_st, oc, &audio_codec, fmt->audio_codec);
  456. have_audio = 1;
  457. }
  458. /* Now that all the parameters are set, we can open the audio and
  459. * video codecs and allocate the necessary encode buffers. */
  460. if (have_video)
  461. open_video(oc, video_codec, &video_st);
  462. if (have_audio)
  463. open_audio(oc, audio_codec, &audio_st);
  464. av_dump_format(oc, 0, filename, 1);
  465. /* open the output file, if needed */
  466. if (!(fmt->flags & AVFMT_NOFILE)) {
  467. ret = avio_open(&oc->pb, filename, AVIO_FLAG_WRITE);
  468. if (ret < 0) {
  469. fprintf(stderr, "Could not open '%s': %s\n", filename,
  470. av_err2str(ret));
  471. return 1;
  472. }
  473. }
  474. /* Write the stream header, if any. */
  475. ret = avformat_write_header(oc, NULL);
  476. if (ret < 0) {
  477. fprintf(stderr, "Error occurred when opening output file: %s\n",
  478. av_err2str(ret));
  479. return 1;
  480. }
  481. flush = 0;
  482. while ((have_video && !video_is_eof) || (have_audio && !audio_is_eof)) {
  483. /* Compute current audio and video time. */
  484. audio_time = (have_audio && !audio_is_eof) ? audio_st.st->pts.val * av_q2d(audio_st.st->time_base) : INFINITY;
  485. video_time = (have_video && !video_is_eof) ? video_st.st->pts.val * av_q2d(video_st.st->time_base) : INFINITY;
  486. if (!flush &&
  487. (!have_audio || audio_time >= STREAM_DURATION) &&
  488. (!have_video || video_time >= STREAM_DURATION)) {
  489. flush = 1;
  490. }
  491. /* write interleaved audio and video frames */
  492. if (have_audio && !audio_is_eof && audio_time <= video_time) {
  493. write_audio_frame(oc, &audio_st, flush);
  494. } else if (have_video && !video_is_eof && video_time < audio_time) {
  495. write_video_frame(oc, &video_st, flush);
  496. }
  497. }
  498. /* Write the trailer, if any. The trailer must be written before you
  499. * close the CodecContexts open when you wrote the header; otherwise
  500. * av_write_trailer() may try to use memory that was freed on
  501. * av_codec_close(). */
  502. av_write_trailer(oc);
  503. /* Close each codec. */
  504. if (have_video)
  505. close_video(oc, &video_st);
  506. if (have_audio)
  507. close_audio(oc, &audio_st);
  508. if (!(fmt->flags & AVFMT_NOFILE))
  509. /* Close the output file. */
  510. avio_close(oc->pb);
  511. /* free the stream */
  512. avformat_free_context(oc);
  513. return 0;
  514. }