You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

509 lines
16KB

  1. /*
  2. * Copyright (c) 2003 Fabrice Bellard
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a copy
  5. * of this software and associated documentation files (the "Software"), to deal
  6. * in the Software without restriction, including without limitation the rights
  7. * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  8. * copies of the Software, and to permit persons to whom the Software is
  9. * furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  18. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  19. * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
  20. * THE SOFTWARE.
  21. */
  22. /**
  23. * @file
  24. * libavformat API example.
  25. *
  26. * Output a media file in any supported libavformat format.
  27. * The default codecs are used.
  28. * @example doc/examples/muxing.c
  29. */
  30. #include <stdlib.h>
  31. #include <stdio.h>
  32. #include <string.h>
  33. #include <math.h>
  34. #include <libavutil/mathematics.h>
  35. #include <libavformat/avformat.h>
  36. #include <libswscale/swscale.h>
  37. /* 5 seconds stream duration */
  38. #define STREAM_DURATION 200.0
  39. #define STREAM_FRAME_RATE 25 /* 25 images/s */
  40. #define STREAM_NB_FRAMES ((int)(STREAM_DURATION * STREAM_FRAME_RATE))
  41. #define STREAM_PIX_FMT AV_PIX_FMT_YUV420P /* default pix_fmt */
  42. static int sws_flags = SWS_BICUBIC;
  43. /**************************************************************/
  44. /* audio output */
  45. static float t, tincr, tincr2;
  46. static int16_t *samples;
  47. static int audio_input_frame_size;
  48. /* Add an output stream. */
  49. static AVStream *add_stream(AVFormatContext *oc, AVCodec **codec,
  50. enum AVCodecID codec_id)
  51. {
  52. AVCodecContext *c;
  53. AVStream *st;
  54. /* find the encoder */
  55. *codec = avcodec_find_encoder(codec_id);
  56. if (!(*codec)) {
  57. fprintf(stderr, "Could not find encoder for '%s'\n",
  58. avcodec_get_name(codec_id));
  59. exit(1);
  60. }
  61. st = avformat_new_stream(oc, *codec);
  62. if (!st) {
  63. fprintf(stderr, "Could not allocate stream\n");
  64. exit(1);
  65. }
  66. st->id = oc->nb_streams-1;
  67. c = st->codec;
  68. switch ((*codec)->type) {
  69. case AVMEDIA_TYPE_AUDIO:
  70. st->id = 1;
  71. c->sample_fmt = AV_SAMPLE_FMT_S16;
  72. c->bit_rate = 64000;
  73. c->sample_rate = 44100;
  74. c->channels = 2;
  75. break;
  76. case AVMEDIA_TYPE_VIDEO:
  77. c->codec_id = codec_id;
  78. c->bit_rate = 400000;
  79. /* Resolution must be a multiple of two. */
  80. c->width = 352;
  81. c->height = 288;
  82. /* timebase: This is the fundamental unit of time (in seconds) in terms
  83. * of which frame timestamps are represented. For fixed-fps content,
  84. * timebase should be 1/framerate and timestamp increments should be
  85. * identical to 1. */
  86. c->time_base.den = STREAM_FRAME_RATE;
  87. c->time_base.num = 1;
  88. c->gop_size = 12; /* emit one intra frame every twelve frames at most */
  89. c->pix_fmt = STREAM_PIX_FMT;
  90. if (c->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
  91. /* just for testing, we also add B frames */
  92. c->max_b_frames = 2;
  93. }
  94. if (c->codec_id == AV_CODEC_ID_MPEG1VIDEO) {
  95. /* Needed to avoid using macroblocks in which some coeffs overflow.
  96. * This does not happen with normal video, it just happens here as
  97. * the motion of the chroma plane does not match the luma plane. */
  98. c->mb_decision = 2;
  99. }
  100. break;
  101. default:
  102. break;
  103. }
  104. /* Some formats want stream headers to be separate. */
  105. if (oc->oformat->flags & AVFMT_GLOBALHEADER)
  106. c->flags |= CODEC_FLAG_GLOBAL_HEADER;
  107. return st;
  108. }
  109. /**************************************************************/
  110. /* audio output */
  111. static float t, tincr, tincr2;
  112. static int16_t *samples;
  113. static int audio_input_frame_size;
  114. static void open_audio(AVFormatContext *oc, AVCodec *codec, AVStream *st)
  115. {
  116. AVCodecContext *c;
  117. int ret;
  118. c = st->codec;
  119. /* open it */
  120. ret = avcodec_open2(c, codec, NULL);
  121. if (ret < 0) {
  122. fprintf(stderr, "Could not open audio codec: %s\n", av_err2str(ret));
  123. exit(1);
  124. }
  125. /* init signal generator */
  126. t = 0;
  127. tincr = 2 * M_PI * 110.0 / c->sample_rate;
  128. /* increment frequency by 110 Hz per second */
  129. tincr2 = 2 * M_PI * 110.0 / c->sample_rate / c->sample_rate;
  130. if (c->codec->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE)
  131. audio_input_frame_size = 10000;
  132. else
  133. audio_input_frame_size = c->frame_size;
  134. samples = av_malloc(audio_input_frame_size *
  135. av_get_bytes_per_sample(c->sample_fmt) *
  136. c->channels);
  137. if (!samples) {
  138. fprintf(stderr, "Could not allocate audio samples buffer\n");
  139. exit(1);
  140. }
  141. }
  142. /* Prepare a 16 bit dummy audio frame of 'frame_size' samples and
  143. * 'nb_channels' channels. */
  144. static void get_audio_frame(int16_t *samples, int frame_size, int nb_channels)
  145. {
  146. int j, i, v;
  147. int16_t *q;
  148. q = samples;
  149. for (j = 0; j < frame_size; j++) {
  150. v = (int)(sin(t) * 10000);
  151. for (i = 0; i < nb_channels; i++)
  152. *q++ = v;
  153. t += tincr;
  154. tincr += tincr2;
  155. }
  156. }
  157. static void write_audio_frame(AVFormatContext *oc, AVStream *st)
  158. {
  159. AVCodecContext *c;
  160. AVPacket pkt = { 0 }; // data and size must be 0;
  161. AVFrame *frame = avcodec_alloc_frame();
  162. int got_packet, ret;
  163. av_init_packet(&pkt);
  164. c = st->codec;
  165. get_audio_frame(samples, audio_input_frame_size, c->channels);
  166. frame->nb_samples = audio_input_frame_size;
  167. avcodec_fill_audio_frame(frame, c->channels, c->sample_fmt,
  168. (uint8_t *)samples,
  169. audio_input_frame_size *
  170. av_get_bytes_per_sample(c->sample_fmt) *
  171. c->channels, 1);
  172. ret = avcodec_encode_audio2(c, &pkt, frame, &got_packet);
  173. if (ret < 0) {
  174. fprintf(stderr, "Error encoding audio frame: %s\n", av_err2str(ret));
  175. exit(1);
  176. }
  177. if (!got_packet)
  178. return;
  179. pkt.stream_index = st->index;
  180. /* Write the compressed frame to the media file. */
  181. ret = av_interleaved_write_frame(oc, &pkt);
  182. if (ret != 0) {
  183. fprintf(stderr, "Error while writing audio frame: %s\n",
  184. av_err2str(ret));
  185. exit(1);
  186. }
  187. avcodec_free_frame(&frame);
  188. }
  189. static void close_audio(AVFormatContext *oc, AVStream *st)
  190. {
  191. avcodec_close(st->codec);
  192. av_free(samples);
  193. }
  194. /**************************************************************/
  195. /* video output */
  196. static AVFrame *frame;
  197. static AVPicture src_picture, dst_picture;
  198. static int frame_count;
  199. static void open_video(AVFormatContext *oc, AVCodec *codec, AVStream *st)
  200. {
  201. int ret;
  202. AVCodecContext *c = st->codec;
  203. /* open the codec */
  204. ret = avcodec_open2(c, codec, NULL);
  205. if (ret < 0) {
  206. fprintf(stderr, "Could not open video codec: %s\n", av_err2str(ret));
  207. exit(1);
  208. }
  209. /* allocate and init a re-usable frame */
  210. frame = avcodec_alloc_frame();
  211. if (!frame) {
  212. fprintf(stderr, "Could not allocate video frame\n");
  213. exit(1);
  214. }
  215. /* Allocate the encoded raw picture. */
  216. ret = avpicture_alloc(&dst_picture, c->pix_fmt, c->width, c->height);
  217. if (ret < 0) {
  218. fprintf(stderr, "Could not allocate picture: %s\n", av_err2str(ret));
  219. exit(1);
  220. }
  221. /* If the output format is not YUV420P, then a temporary YUV420P
  222. * picture is needed too. It is then converted to the required
  223. * output format. */
  224. if (c->pix_fmt != AV_PIX_FMT_YUV420P) {
  225. ret = avpicture_alloc(&src_picture, AV_PIX_FMT_YUV420P, c->width, c->height);
  226. if (ret < 0) {
  227. fprintf(stderr, "Could not allocate temporary picture: %s\n",
  228. av_err2str(ret));
  229. exit(1);
  230. }
  231. }
  232. /* copy data and linesize picture pointers to frame */
  233. *((AVPicture *)frame) = dst_picture;
  234. }
  235. /* Prepare a dummy image. */
  236. static void fill_yuv_image(AVPicture *pict, int frame_index,
  237. int width, int height)
  238. {
  239. int x, y, i;
  240. i = frame_index;
  241. /* Y */
  242. for (y = 0; y < height; y++)
  243. for (x = 0; x < width; x++)
  244. pict->data[0][y * pict->linesize[0] + x] = x + y + i * 3;
  245. /* Cb and Cr */
  246. for (y = 0; y < height / 2; y++) {
  247. for (x = 0; x < width / 2; x++) {
  248. pict->data[1][y * pict->linesize[1] + x] = 128 + y + i * 2;
  249. pict->data[2][y * pict->linesize[2] + x] = 64 + x + i * 5;
  250. }
  251. }
  252. }
  253. static void write_video_frame(AVFormatContext *oc, AVStream *st)
  254. {
  255. int ret;
  256. static struct SwsContext *sws_ctx;
  257. AVCodecContext *c = st->codec;
  258. if (frame_count >= STREAM_NB_FRAMES) {
  259. /* No more frames to compress. The codec has a latency of a few
  260. * frames if using B-frames, so we get the last frames by
  261. * passing the same picture again. */
  262. } else {
  263. if (c->pix_fmt != AV_PIX_FMT_YUV420P) {
  264. /* as we only generate a YUV420P picture, we must convert it
  265. * to the codec pixel format if needed */
  266. if (!sws_ctx) {
  267. sws_ctx = sws_getContext(c->width, c->height, AV_PIX_FMT_YUV420P,
  268. c->width, c->height, c->pix_fmt,
  269. sws_flags, NULL, NULL, NULL);
  270. if (!sws_ctx) {
  271. fprintf(stderr,
  272. "Could not initialize the conversion context\n");
  273. exit(1);
  274. }
  275. }
  276. fill_yuv_image(&src_picture, frame_count, c->width, c->height);
  277. sws_scale(sws_ctx,
  278. (const uint8_t * const *)src_picture.data, src_picture.linesize,
  279. 0, c->height, dst_picture.data, dst_picture.linesize);
  280. } else {
  281. fill_yuv_image(&dst_picture, frame_count, c->width, c->height);
  282. }
  283. }
  284. if (oc->oformat->flags & AVFMT_RAWPICTURE) {
  285. /* Raw video case - directly store the picture in the packet */
  286. AVPacket pkt;
  287. av_init_packet(&pkt);
  288. pkt.flags |= AV_PKT_FLAG_KEY;
  289. pkt.stream_index = st->index;
  290. pkt.data = dst_picture.data[0];
  291. pkt.size = sizeof(AVPicture);
  292. ret = av_interleaved_write_frame(oc, &pkt);
  293. } else {
  294. AVPacket pkt = { 0 };
  295. int got_packet;
  296. av_init_packet(&pkt);
  297. /* encode the image */
  298. ret = avcodec_encode_video2(c, &pkt, frame, &got_packet);
  299. if (ret < 0) {
  300. fprintf(stderr, "Error encoding video frame: %s\n", av_err2str(ret));
  301. exit(1);
  302. }
  303. /* If size is zero, it means the image was buffered. */
  304. if (!ret && got_packet && pkt.size) {
  305. pkt.stream_index = st->index;
  306. /* Write the compressed frame to the media file. */
  307. ret = av_interleaved_write_frame(oc, &pkt);
  308. } else {
  309. ret = 0;
  310. }
  311. }
  312. if (ret != 0) {
  313. fprintf(stderr, "Error while writing video frame: %s\n", av_err2str(ret));
  314. exit(1);
  315. }
  316. frame_count++;
  317. }
  318. static void close_video(AVFormatContext *oc, AVStream *st)
  319. {
  320. avcodec_close(st->codec);
  321. av_free(src_picture.data[0]);
  322. av_free(dst_picture.data[0]);
  323. av_free(frame);
  324. }
  325. /**************************************************************/
  326. /* media file output */
  327. int main(int argc, char **argv)
  328. {
  329. const char *filename;
  330. AVOutputFormat *fmt;
  331. AVFormatContext *oc;
  332. AVStream *audio_st, *video_st;
  333. AVCodec *audio_codec, *video_codec;
  334. double audio_pts, video_pts;
  335. int ret;
  336. /* Initialize libavcodec, and register all codecs and formats. */
  337. av_register_all();
  338. if (argc != 2) {
  339. printf("usage: %s output_file\n"
  340. "API example program to output a media file with libavformat.\n"
  341. "This program generates a synthetic audio and video stream, encodes and\n"
  342. "muxes them into a file named output_file.\n"
  343. "The output format is automatically guessed according to the file extension.\n"
  344. "Raw images can also be output by using '%%d' in the filename.\n"
  345. "\n", argv[0]);
  346. return 1;
  347. }
  348. filename = argv[1];
  349. /* allocate the output media context */
  350. avformat_alloc_output_context2(&oc, NULL, NULL, filename);
  351. if (!oc) {
  352. printf("Could not deduce output format from file extension: using MPEG.\n");
  353. avformat_alloc_output_context2(&oc, NULL, "mpeg", filename);
  354. }
  355. if (!oc) {
  356. return 1;
  357. }
  358. fmt = oc->oformat;
  359. /* Add the audio and video streams using the default format codecs
  360. * and initialize the codecs. */
  361. video_st = NULL;
  362. audio_st = NULL;
  363. if (fmt->video_codec != AV_CODEC_ID_NONE) {
  364. video_st = add_stream(oc, &video_codec, fmt->video_codec);
  365. }
  366. if (fmt->audio_codec != AV_CODEC_ID_NONE) {
  367. audio_st = add_stream(oc, &audio_codec, fmt->audio_codec);
  368. }
  369. /* Now that all the parameters are set, we can open the audio and
  370. * video codecs and allocate the necessary encode buffers. */
  371. if (video_st)
  372. open_video(oc, video_codec, video_st);
  373. if (audio_st)
  374. open_audio(oc, audio_codec, audio_st);
  375. av_dump_format(oc, 0, filename, 1);
  376. /* open the output file, if needed */
  377. if (!(fmt->flags & AVFMT_NOFILE)) {
  378. ret = avio_open(&oc->pb, filename, AVIO_FLAG_WRITE);
  379. if (ret < 0) {
  380. fprintf(stderr, "Could not open '%s': %s\n", filename,
  381. av_err2str(ret));
  382. return 1;
  383. }
  384. }
  385. /* Write the stream header, if any. */
  386. ret = avformat_write_header(oc, NULL);
  387. if (ret < 0) {
  388. fprintf(stderr, "Error occurred when opening output file: %s\n",
  389. av_err2str(ret));
  390. return 1;
  391. }
  392. if (frame)
  393. frame->pts = 0;
  394. for (;;) {
  395. /* Compute current audio and video time. */
  396. if (audio_st)
  397. audio_pts = (double)audio_st->pts.val * audio_st->time_base.num / audio_st->time_base.den;
  398. else
  399. audio_pts = 0.0;
  400. if (video_st)
  401. video_pts = (double)video_st->pts.val * video_st->time_base.num /
  402. video_st->time_base.den;
  403. else
  404. video_pts = 0.0;
  405. if ((!audio_st || audio_pts >= STREAM_DURATION) &&
  406. (!video_st || video_pts >= STREAM_DURATION))
  407. break;
  408. /* write interleaved audio and video frames */
  409. if (!video_st || (video_st && audio_st && audio_pts < video_pts)) {
  410. write_audio_frame(oc, audio_st);
  411. } else {
  412. write_video_frame(oc, video_st);
  413. frame->pts += av_rescale_q(1, video_st->codec->time_base, video_st->time_base);
  414. }
  415. }
  416. /* Write the trailer, if any. The trailer must be written before you
  417. * close the CodecContexts open when you wrote the header; otherwise
  418. * av_write_trailer() may try to use memory that was freed on
  419. * av_codec_close(). */
  420. av_write_trailer(oc);
  421. /* Close each codec. */
  422. if (video_st)
  423. close_video(oc, video_st);
  424. if (audio_st)
  425. close_audio(oc, audio_st);
  426. if (!(fmt->flags & AVFMT_NOFILE))
  427. /* Close the output file. */
  428. avio_close(oc->pb);
  429. /* free the stream */
  430. avformat_free_context(oc);
  431. return 0;
  432. }