You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

566 lines
18KB

  1. /*
  2. * Copyright (c) 2003 Fabrice Bellard
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a copy
  5. * of this software and associated documentation files (the "Software"), to deal
  6. * in the Software without restriction, including without limitation the rights
  7. * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  8. * copies of the Software, and to permit persons to whom the Software is
  9. * furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  18. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  19. * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
  20. * THE SOFTWARE.
  21. */
  22. /**
  23. * @file
  24. * libavformat API example.
  25. *
  26. * Output a media file in any supported libavformat format.
  27. * The default codecs are used.
  28. * @example doc/examples/muxing.c
  29. */
  30. #include <stdlib.h>
  31. #include <stdio.h>
  32. #include <string.h>
  33. #include <math.h>
  34. #include <libavutil/opt.h>
  35. #include <libavutil/mathematics.h>
  36. #include <libavformat/avformat.h>
  37. #include <libswscale/swscale.h>
  38. #include <libswresample/swresample.h>
  39. /* 5 seconds stream duration */
  40. #define STREAM_DURATION 200.0
  41. #define STREAM_FRAME_RATE 25 /* 25 images/s */
  42. #define STREAM_NB_FRAMES ((int)(STREAM_DURATION * STREAM_FRAME_RATE))
  43. #define STREAM_PIX_FMT AV_PIX_FMT_YUV420P /* default pix_fmt */
  44. static int sws_flags = SWS_BICUBIC;
  45. /* Add an output stream. */
  46. static AVStream *add_stream(AVFormatContext *oc, AVCodec **codec,
  47. enum AVCodecID codec_id)
  48. {
  49. AVCodecContext *c;
  50. AVStream *st;
  51. /* find the encoder */
  52. *codec = avcodec_find_encoder(codec_id);
  53. if (!(*codec)) {
  54. fprintf(stderr, "Could not find encoder for '%s'\n",
  55. avcodec_get_name(codec_id));
  56. exit(1);
  57. }
  58. st = avformat_new_stream(oc, *codec);
  59. if (!st) {
  60. fprintf(stderr, "Could not allocate stream\n");
  61. exit(1);
  62. }
  63. st->id = oc->nb_streams-1;
  64. c = st->codec;
  65. switch ((*codec)->type) {
  66. case AVMEDIA_TYPE_AUDIO:
  67. c->sample_fmt = AV_SAMPLE_FMT_FLTP;
  68. c->bit_rate = 64000;
  69. c->sample_rate = 44100;
  70. c->channels = 2;
  71. break;
  72. case AVMEDIA_TYPE_VIDEO:
  73. c->codec_id = codec_id;
  74. c->bit_rate = 400000;
  75. /* Resolution must be a multiple of two. */
  76. c->width = 352;
  77. c->height = 288;
  78. /* timebase: This is the fundamental unit of time (in seconds) in terms
  79. * of which frame timestamps are represented. For fixed-fps content,
  80. * timebase should be 1/framerate and timestamp increments should be
  81. * identical to 1. */
  82. c->time_base.den = STREAM_FRAME_RATE;
  83. c->time_base.num = 1;
  84. c->gop_size = 12; /* emit one intra frame every twelve frames at most */
  85. c->pix_fmt = STREAM_PIX_FMT;
  86. if (c->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
  87. /* just for testing, we also add B frames */
  88. c->max_b_frames = 2;
  89. }
  90. if (c->codec_id == AV_CODEC_ID_MPEG1VIDEO) {
  91. /* Needed to avoid using macroblocks in which some coeffs overflow.
  92. * This does not happen with normal video, it just happens here as
  93. * the motion of the chroma plane does not match the luma plane. */
  94. c->mb_decision = 2;
  95. }
  96. break;
  97. default:
  98. break;
  99. }
  100. /* Some formats want stream headers to be separate. */
  101. if (oc->oformat->flags & AVFMT_GLOBALHEADER)
  102. c->flags |= CODEC_FLAG_GLOBAL_HEADER;
  103. return st;
  104. }
  105. /**************************************************************/
  106. /* audio output */
  107. static float t, tincr, tincr2;
  108. static uint8_t **src_samples_data;
  109. static int src_samples_linesize;
  110. static int src_nb_samples;
  111. static int max_dst_nb_samples;
  112. uint8_t **dst_samples_data;
  113. int dst_samples_linesize;
  114. int dst_samples_size;
  115. struct SwrContext *swr_ctx = NULL;
  116. static void open_audio(AVFormatContext *oc, AVCodec *codec, AVStream *st)
  117. {
  118. AVCodecContext *c;
  119. int ret;
  120. c = st->codec;
  121. /* open it */
  122. ret = avcodec_open2(c, codec, NULL);
  123. if (ret < 0) {
  124. fprintf(stderr, "Could not open audio codec: %s\n", av_err2str(ret));
  125. exit(1);
  126. }
  127. /* init signal generator */
  128. t = 0;
  129. tincr = 2 * M_PI * 110.0 / c->sample_rate;
  130. /* increment frequency by 110 Hz per second */
  131. tincr2 = 2 * M_PI * 110.0 / c->sample_rate / c->sample_rate;
  132. src_nb_samples = c->codec->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE ?
  133. 10000 : c->frame_size;
  134. ret = av_samples_alloc_array_and_samples(&src_samples_data, &src_samples_linesize, c->channels,
  135. src_nb_samples, AV_SAMPLE_FMT_S16, 0);
  136. if (ret < 0) {
  137. fprintf(stderr, "Could not allocate source samples\n");
  138. exit(1);
  139. }
  140. /* create resampler context */
  141. if (c->sample_fmt != AV_SAMPLE_FMT_S16) {
  142. swr_ctx = swr_alloc();
  143. if (!swr_ctx) {
  144. fprintf(stderr, "Could not allocate resampler context\n");
  145. exit(1);
  146. }
  147. /* set options */
  148. av_opt_set_int (swr_ctx, "in_channel_count", c->channels, 0);
  149. av_opt_set_int (swr_ctx, "in_sample_rate", c->sample_rate, 0);
  150. av_opt_set_sample_fmt(swr_ctx, "in_sample_fmt", AV_SAMPLE_FMT_S16, 0);
  151. av_opt_set_int (swr_ctx, "out_channel_count", c->channels, 0);
  152. av_opt_set_int (swr_ctx, "out_sample_rate", c->sample_rate, 0);
  153. av_opt_set_sample_fmt(swr_ctx, "out_sample_fmt", c->sample_fmt, 0);
  154. /* initialize the resampling context */
  155. if ((ret = swr_init(swr_ctx)) < 0) {
  156. fprintf(stderr, "Failed to initialize the resampling context\n");
  157. exit(1);
  158. }
  159. }
  160. /* compute the number of converted samples: buffering is avoided
  161. * ensuring that the output buffer will contain at least all the
  162. * converted input samples */
  163. max_dst_nb_samples = src_nb_samples;
  164. ret = av_samples_alloc_array_and_samples(&dst_samples_data, &dst_samples_linesize, c->channels,
  165. max_dst_nb_samples, c->sample_fmt, 0);
  166. if (ret < 0) {
  167. fprintf(stderr, "Could not allocate destination samples\n");
  168. exit(1);
  169. }
  170. dst_samples_size = av_samples_get_buffer_size(NULL, c->channels, max_dst_nb_samples,
  171. c->sample_fmt, 0);
  172. }
  173. /* Prepare a 16 bit dummy audio frame of 'frame_size' samples and
  174. * 'nb_channels' channels. */
  175. static void get_audio_frame(int16_t *samples, int frame_size, int nb_channels)
  176. {
  177. int j, i, v;
  178. int16_t *q;
  179. q = samples;
  180. for (j = 0; j < frame_size; j++) {
  181. v = (int)(sin(t) * 10000);
  182. for (i = 0; i < nb_channels; i++)
  183. *q++ = v;
  184. t += tincr;
  185. tincr += tincr2;
  186. }
  187. }
  188. static void write_audio_frame(AVFormatContext *oc, AVStream *st)
  189. {
  190. AVCodecContext *c;
  191. AVPacket pkt = { 0 }; // data and size must be 0;
  192. AVFrame *frame = av_frame_alloc();
  193. int got_packet, ret, dst_nb_samples;
  194. av_init_packet(&pkt);
  195. c = st->codec;
  196. get_audio_frame((int16_t *)src_samples_data[0], src_nb_samples, c->channels);
  197. /* convert samples from native format to destination codec format, using the resampler */
  198. if (swr_ctx) {
  199. /* compute destination number of samples */
  200. dst_nb_samples = av_rescale_rnd(swr_get_delay(swr_ctx, c->sample_rate) + src_nb_samples,
  201. c->sample_rate, c->sample_rate, AV_ROUND_UP);
  202. if (dst_nb_samples > max_dst_nb_samples) {
  203. av_free(dst_samples_data[0]);
  204. ret = av_samples_alloc(dst_samples_data, &dst_samples_linesize, c->channels,
  205. dst_nb_samples, c->sample_fmt, 0);
  206. if (ret < 0)
  207. exit(1);
  208. max_dst_nb_samples = dst_nb_samples;
  209. dst_samples_size = av_samples_get_buffer_size(NULL, c->channels, dst_nb_samples,
  210. c->sample_fmt, 0);
  211. }
  212. /* convert to destination format */
  213. ret = swr_convert(swr_ctx,
  214. dst_samples_data, dst_nb_samples,
  215. (const uint8_t **)src_samples_data, src_nb_samples);
  216. if (ret < 0) {
  217. fprintf(stderr, "Error while converting\n");
  218. exit(1);
  219. }
  220. } else {
  221. dst_samples_data[0] = src_samples_data[0];
  222. dst_nb_samples = src_nb_samples;
  223. }
  224. frame->nb_samples = dst_nb_samples;
  225. avcodec_fill_audio_frame(frame, c->channels, c->sample_fmt,
  226. dst_samples_data[0], dst_samples_size, 0);
  227. ret = avcodec_encode_audio2(c, &pkt, frame, &got_packet);
  228. if (ret < 0) {
  229. fprintf(stderr, "Error encoding audio frame: %s\n", av_err2str(ret));
  230. exit(1);
  231. }
  232. if (!got_packet)
  233. goto freeframe;
  234. pkt.stream_index = st->index;
  235. /* Write the compressed frame to the media file. */
  236. ret = av_interleaved_write_frame(oc, &pkt);
  237. if (ret != 0) {
  238. fprintf(stderr, "Error while writing audio frame: %s\n",
  239. av_err2str(ret));
  240. exit(1);
  241. }
  242. freeframe:
  243. av_frame_free(&frame);
  244. }
  245. static void close_audio(AVFormatContext *oc, AVStream *st)
  246. {
  247. avcodec_close(st->codec);
  248. av_free(src_samples_data[0]);
  249. av_free(dst_samples_data[0]);
  250. }
  251. /**************************************************************/
  252. /* video output */
  253. static AVFrame *frame;
  254. static AVPicture src_picture, dst_picture;
  255. static int frame_count;
  256. static void open_video(AVFormatContext *oc, AVCodec *codec, AVStream *st)
  257. {
  258. int ret;
  259. AVCodecContext *c = st->codec;
  260. /* open the codec */
  261. ret = avcodec_open2(c, codec, NULL);
  262. if (ret < 0) {
  263. fprintf(stderr, "Could not open video codec: %s\n", av_err2str(ret));
  264. exit(1);
  265. }
  266. /* allocate and init a re-usable frame */
  267. frame = av_frame_alloc();
  268. if (!frame) {
  269. fprintf(stderr, "Could not allocate video frame\n");
  270. exit(1);
  271. }
  272. /* Allocate the encoded raw picture. */
  273. ret = avpicture_alloc(&dst_picture, c->pix_fmt, c->width, c->height);
  274. if (ret < 0) {
  275. fprintf(stderr, "Could not allocate picture: %s\n", av_err2str(ret));
  276. exit(1);
  277. }
  278. /* If the output format is not YUV420P, then a temporary YUV420P
  279. * picture is needed too. It is then converted to the required
  280. * output format. */
  281. if (c->pix_fmt != AV_PIX_FMT_YUV420P) {
  282. ret = avpicture_alloc(&src_picture, AV_PIX_FMT_YUV420P, c->width, c->height);
  283. if (ret < 0) {
  284. fprintf(stderr, "Could not allocate temporary picture: %s\n",
  285. av_err2str(ret));
  286. exit(1);
  287. }
  288. }
  289. /* copy data and linesize picture pointers to frame */
  290. *((AVPicture *)frame) = dst_picture;
  291. }
  292. /* Prepare a dummy image. */
  293. static void fill_yuv_image(AVPicture *pict, int frame_index,
  294. int width, int height)
  295. {
  296. int x, y, i;
  297. i = frame_index;
  298. /* Y */
  299. for (y = 0; y < height; y++)
  300. for (x = 0; x < width; x++)
  301. pict->data[0][y * pict->linesize[0] + x] = x + y + i * 3;
  302. /* Cb and Cr */
  303. for (y = 0; y < height / 2; y++) {
  304. for (x = 0; x < width / 2; x++) {
  305. pict->data[1][y * pict->linesize[1] + x] = 128 + y + i * 2;
  306. pict->data[2][y * pict->linesize[2] + x] = 64 + x + i * 5;
  307. }
  308. }
  309. }
  310. static void write_video_frame(AVFormatContext *oc, AVStream *st)
  311. {
  312. int ret;
  313. static struct SwsContext *sws_ctx;
  314. AVCodecContext *c = st->codec;
  315. if (frame_count >= STREAM_NB_FRAMES) {
  316. /* No more frames to compress. The codec has a latency of a few
  317. * frames if using B-frames, so we get the last frames by
  318. * passing the same picture again. */
  319. } else {
  320. if (c->pix_fmt != AV_PIX_FMT_YUV420P) {
  321. /* as we only generate a YUV420P picture, we must convert it
  322. * to the codec pixel format if needed */
  323. if (!sws_ctx) {
  324. sws_ctx = sws_getContext(c->width, c->height, AV_PIX_FMT_YUV420P,
  325. c->width, c->height, c->pix_fmt,
  326. sws_flags, NULL, NULL, NULL);
  327. if (!sws_ctx) {
  328. fprintf(stderr,
  329. "Could not initialize the conversion context\n");
  330. exit(1);
  331. }
  332. }
  333. fill_yuv_image(&src_picture, frame_count, c->width, c->height);
  334. sws_scale(sws_ctx,
  335. (const uint8_t * const *)src_picture.data, src_picture.linesize,
  336. 0, c->height, dst_picture.data, dst_picture.linesize);
  337. } else {
  338. fill_yuv_image(&dst_picture, frame_count, c->width, c->height);
  339. }
  340. }
  341. if (oc->oformat->flags & AVFMT_RAWPICTURE) {
  342. /* Raw video case - directly store the picture in the packet */
  343. AVPacket pkt;
  344. av_init_packet(&pkt);
  345. pkt.flags |= AV_PKT_FLAG_KEY;
  346. pkt.stream_index = st->index;
  347. pkt.data = dst_picture.data[0];
  348. pkt.size = sizeof(AVPicture);
  349. ret = av_interleaved_write_frame(oc, &pkt);
  350. } else {
  351. AVPacket pkt = { 0 };
  352. int got_packet;
  353. av_init_packet(&pkt);
  354. /* encode the image */
  355. ret = avcodec_encode_video2(c, &pkt, frame, &got_packet);
  356. if (ret < 0) {
  357. fprintf(stderr, "Error encoding video frame: %s\n", av_err2str(ret));
  358. exit(1);
  359. }
  360. /* If size is zero, it means the image was buffered. */
  361. if (!ret && got_packet && pkt.size) {
  362. pkt.stream_index = st->index;
  363. /* Write the compressed frame to the media file. */
  364. ret = av_interleaved_write_frame(oc, &pkt);
  365. } else {
  366. ret = 0;
  367. }
  368. }
  369. if (ret != 0) {
  370. fprintf(stderr, "Error while writing video frame: %s\n", av_err2str(ret));
  371. exit(1);
  372. }
  373. frame_count++;
  374. }
  375. static void close_video(AVFormatContext *oc, AVStream *st)
  376. {
  377. avcodec_close(st->codec);
  378. av_free(src_picture.data[0]);
  379. av_free(dst_picture.data[0]);
  380. av_free(frame);
  381. }
  382. /**************************************************************/
  383. /* media file output */
  384. int main(int argc, char **argv)
  385. {
  386. const char *filename;
  387. AVOutputFormat *fmt;
  388. AVFormatContext *oc;
  389. AVStream *audio_st, *video_st;
  390. AVCodec *audio_codec, *video_codec;
  391. double audio_time, video_time;
  392. int ret;
  393. /* Initialize libavcodec, and register all codecs and formats. */
  394. av_register_all();
  395. if (argc != 2) {
  396. printf("usage: %s output_file\n"
  397. "API example program to output a media file with libavformat.\n"
  398. "This program generates a synthetic audio and video stream, encodes and\n"
  399. "muxes them into a file named output_file.\n"
  400. "The output format is automatically guessed according to the file extension.\n"
  401. "Raw images can also be output by using '%%d' in the filename.\n"
  402. "\n", argv[0]);
  403. return 1;
  404. }
  405. filename = argv[1];
  406. /* allocate the output media context */
  407. avformat_alloc_output_context2(&oc, NULL, NULL, filename);
  408. if (!oc) {
  409. printf("Could not deduce output format from file extension: using MPEG.\n");
  410. avformat_alloc_output_context2(&oc, NULL, "mpeg", filename);
  411. }
  412. if (!oc) {
  413. return 1;
  414. }
  415. fmt = oc->oformat;
  416. /* Add the audio and video streams using the default format codecs
  417. * and initialize the codecs. */
  418. video_st = NULL;
  419. audio_st = NULL;
  420. if (fmt->video_codec != AV_CODEC_ID_NONE) {
  421. video_st = add_stream(oc, &video_codec, fmt->video_codec);
  422. }
  423. if (fmt->audio_codec != AV_CODEC_ID_NONE) {
  424. audio_st = add_stream(oc, &audio_codec, fmt->audio_codec);
  425. }
  426. /* Now that all the parameters are set, we can open the audio and
  427. * video codecs and allocate the necessary encode buffers. */
  428. if (video_st)
  429. open_video(oc, video_codec, video_st);
  430. if (audio_st)
  431. open_audio(oc, audio_codec, audio_st);
  432. av_dump_format(oc, 0, filename, 1);
  433. /* open the output file, if needed */
  434. if (!(fmt->flags & AVFMT_NOFILE)) {
  435. ret = avio_open(&oc->pb, filename, AVIO_FLAG_WRITE);
  436. if (ret < 0) {
  437. fprintf(stderr, "Could not open '%s': %s\n", filename,
  438. av_err2str(ret));
  439. return 1;
  440. }
  441. }
  442. /* Write the stream header, if any. */
  443. ret = avformat_write_header(oc, NULL);
  444. if (ret < 0) {
  445. fprintf(stderr, "Error occurred when opening output file: %s\n",
  446. av_err2str(ret));
  447. return 1;
  448. }
  449. if (frame)
  450. frame->pts = 0;
  451. for (;;) {
  452. /* Compute current audio and video time. */
  453. audio_time = audio_st ? audio_st->pts.val * av_q2d(audio_st->time_base) : 0.0;
  454. video_time = video_st ? video_st->pts.val * av_q2d(video_st->time_base) : 0.0;
  455. if ((!audio_st || audio_time >= STREAM_DURATION) &&
  456. (!video_st || video_time >= STREAM_DURATION))
  457. break;
  458. /* write interleaved audio and video frames */
  459. if (!video_st || (video_st && audio_st && audio_time < video_time)) {
  460. write_audio_frame(oc, audio_st);
  461. } else {
  462. write_video_frame(oc, video_st);
  463. frame->pts += av_rescale_q(1, video_st->codec->time_base, video_st->time_base);
  464. }
  465. }
  466. /* Write the trailer, if any. The trailer must be written before you
  467. * close the CodecContexts open when you wrote the header; otherwise
  468. * av_write_trailer() may try to use memory that was freed on
  469. * av_codec_close(). */
  470. av_write_trailer(oc);
  471. /* Close each codec. */
  472. if (video_st)
  473. close_video(oc, video_st);
  474. if (audio_st)
  475. close_audio(oc, audio_st);
  476. if (!(fmt->flags & AVFMT_NOFILE))
  477. /* Close the output file. */
  478. avio_close(oc->pb);
  479. /* free the stream */
  480. avformat_free_context(oc);
  481. return 0;
  482. }