You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

683 lines
23KB

  1. /*
  2. * generic encoding-related code
  3. *
  4. * This file is part of FFmpeg.
  5. *
  6. * FFmpeg is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2.1 of the License, or (at your option) any later version.
  10. *
  11. * FFmpeg is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with FFmpeg; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  19. */
  20. #include "libavutil/attributes.h"
  21. #include "libavutil/avassert.h"
  22. #include "libavutil/frame.h"
  23. #include "libavutil/imgutils.h"
  24. #include "libavutil/internal.h"
  25. #include "libavutil/samplefmt.h"
  26. #include "avcodec.h"
  27. #include "encode.h"
  28. #include "frame_thread_encoder.h"
  29. #include "internal.h"
  30. int ff_alloc_packet2(AVCodecContext *avctx, AVPacket *avpkt, int64_t size, int64_t min_size)
  31. {
  32. if (size < 0 || size > INT_MAX - AV_INPUT_BUFFER_PADDING_SIZE) {
  33. av_log(avctx, AV_LOG_ERROR, "Invalid minimum required packet size %"PRId64" (max allowed is %d)\n",
  34. size, INT_MAX - AV_INPUT_BUFFER_PADDING_SIZE);
  35. return AVERROR(EINVAL);
  36. }
  37. av_assert0(!avpkt->data);
  38. if (avctx && 2*min_size < size) { // FIXME The factor needs to be finetuned
  39. av_fast_padded_malloc(&avctx->internal->byte_buffer, &avctx->internal->byte_buffer_size, size);
  40. avpkt->data = avctx->internal->byte_buffer;
  41. avpkt->size = size;
  42. }
  43. if (!avpkt->data) {
  44. int ret = av_new_packet(avpkt, size);
  45. if (ret < 0)
  46. av_log(avctx, AV_LOG_ERROR, "Failed to allocate packet of size %"PRId64"\n", size);
  47. return ret;
  48. }
  49. return 0;
  50. }
  51. int avcodec_default_get_encode_buffer(AVCodecContext *avctx, AVPacket *avpkt, int flags)
  52. {
  53. int ret;
  54. if (avpkt->size < 0 || avpkt->size > INT_MAX - AV_INPUT_BUFFER_PADDING_SIZE)
  55. return AVERROR(EINVAL);
  56. if (avpkt->data || avpkt->buf) {
  57. av_log(avctx, AV_LOG_ERROR, "avpkt->{data,buf} != NULL in avcodec_default_get_encode_buffer()\n");
  58. return AVERROR(EINVAL);
  59. }
  60. ret = av_buffer_realloc(&avpkt->buf, avpkt->size + AV_INPUT_BUFFER_PADDING_SIZE);
  61. if (ret < 0) {
  62. av_log(avctx, AV_LOG_ERROR, "Failed to allocate packet of size %d\n", avpkt->size);
  63. return ret;
  64. }
  65. avpkt->data = avpkt->buf->data;
  66. memset(avpkt->data + avpkt->size, 0, AV_INPUT_BUFFER_PADDING_SIZE);
  67. return 0;
  68. }
  69. int ff_get_encode_buffer(AVCodecContext *avctx, AVPacket *avpkt, int64_t size, int flags)
  70. {
  71. int ret;
  72. if (size < 0 || size > INT_MAX - AV_INPUT_BUFFER_PADDING_SIZE)
  73. return AVERROR(EINVAL);
  74. av_assert0(!avpkt->data && !avpkt->buf);
  75. avpkt->size = size;
  76. ret = avctx->get_encode_buffer(avctx, avpkt, flags);
  77. if (ret < 0)
  78. goto fail;
  79. if (!avpkt->data || !avpkt->buf) {
  80. av_log(avctx, AV_LOG_ERROR, "No buffer returned by get_encode_buffer()\n");
  81. ret = AVERROR(EINVAL);
  82. goto fail;
  83. }
  84. ret = 0;
  85. fail:
  86. if (ret < 0) {
  87. av_log(avctx, AV_LOG_ERROR, "get_encode_buffer() failed\n");
  88. av_packet_unref(avpkt);
  89. }
  90. return ret;
  91. }
  92. /**
  93. * Pad last frame with silence.
  94. */
  95. static int pad_last_frame(AVCodecContext *s, AVFrame *frame, const AVFrame *src)
  96. {
  97. int ret;
  98. frame->format = src->format;
  99. frame->channel_layout = src->channel_layout;
  100. frame->channels = src->channels;
  101. frame->nb_samples = s->frame_size;
  102. ret = av_frame_get_buffer(frame, 0);
  103. if (ret < 0)
  104. goto fail;
  105. ret = av_frame_copy_props(frame, src);
  106. if (ret < 0)
  107. goto fail;
  108. if ((ret = av_samples_copy(frame->extended_data, src->extended_data, 0, 0,
  109. src->nb_samples, s->channels, s->sample_fmt)) < 0)
  110. goto fail;
  111. if ((ret = av_samples_set_silence(frame->extended_data, src->nb_samples,
  112. frame->nb_samples - src->nb_samples,
  113. s->channels, s->sample_fmt)) < 0)
  114. goto fail;
  115. return 0;
  116. fail:
  117. av_frame_unref(frame);
  118. return ret;
  119. }
  120. int avcodec_encode_subtitle(AVCodecContext *avctx, uint8_t *buf, int buf_size,
  121. const AVSubtitle *sub)
  122. {
  123. int ret;
  124. if (sub->start_display_time) {
  125. av_log(avctx, AV_LOG_ERROR, "start_display_time must be 0.\n");
  126. return -1;
  127. }
  128. ret = avctx->codec->encode_sub(avctx, buf, buf_size, sub);
  129. avctx->frame_number++;
  130. return ret;
  131. }
  132. int ff_encode_get_frame(AVCodecContext *avctx, AVFrame *frame)
  133. {
  134. AVCodecInternal *avci = avctx->internal;
  135. if (avci->draining)
  136. return AVERROR_EOF;
  137. if (!avci->buffer_frame->buf[0])
  138. return AVERROR(EAGAIN);
  139. av_frame_move_ref(frame, avci->buffer_frame);
  140. return 0;
  141. }
  142. static int encode_simple_internal(AVCodecContext *avctx, AVPacket *avpkt)
  143. {
  144. AVCodecInternal *avci = avctx->internal;
  145. EncodeSimpleContext *es = &avci->es;
  146. AVFrame *frame = es->in_frame;
  147. int got_packet;
  148. int ret;
  149. if (avci->draining_done)
  150. return AVERROR_EOF;
  151. if (!frame->buf[0] && !avci->draining) {
  152. av_frame_unref(frame);
  153. ret = ff_encode_get_frame(avctx, frame);
  154. if (ret < 0 && ret != AVERROR_EOF)
  155. return ret;
  156. }
  157. if (!frame->buf[0]) {
  158. if (!(avctx->codec->capabilities & AV_CODEC_CAP_DELAY ||
  159. (avci->frame_thread_encoder && avctx->active_thread_type & FF_THREAD_FRAME)))
  160. return AVERROR_EOF;
  161. // Flushing is signaled with a NULL frame
  162. frame = NULL;
  163. }
  164. got_packet = 0;
  165. av_assert0(avctx->codec->encode2);
  166. if (CONFIG_FRAME_THREAD_ENCODER &&
  167. avci->frame_thread_encoder && (avctx->active_thread_type & FF_THREAD_FRAME))
  168. /* This might modify frame, but it doesn't matter, because
  169. * the frame properties used below are not used for video
  170. * (due to the delay inherent in frame threaded encoding, it makes
  171. * no sense to use the properties of the current frame anyway). */
  172. ret = ff_thread_video_encode_frame(avctx, avpkt, frame, &got_packet);
  173. else {
  174. ret = avctx->codec->encode2(avctx, avpkt, frame, &got_packet);
  175. if (avctx->codec->type == AVMEDIA_TYPE_VIDEO && !ret && got_packet &&
  176. !(avctx->codec->capabilities & AV_CODEC_CAP_DELAY))
  177. avpkt->pts = avpkt->dts = frame->pts;
  178. }
  179. av_assert0(ret <= 0);
  180. emms_c();
  181. if (!ret && got_packet) {
  182. if (avpkt->data) {
  183. ret = av_packet_make_refcounted(avpkt);
  184. if (ret < 0)
  185. goto end;
  186. }
  187. if (frame && !(avctx->codec->capabilities & AV_CODEC_CAP_DELAY)) {
  188. if (avctx->codec->type == AVMEDIA_TYPE_AUDIO) {
  189. if (avpkt->pts == AV_NOPTS_VALUE)
  190. avpkt->pts = frame->pts;
  191. if (!avpkt->duration)
  192. avpkt->duration = ff_samples_to_time_base(avctx,
  193. frame->nb_samples);
  194. }
  195. }
  196. if (avctx->codec->type == AVMEDIA_TYPE_AUDIO) {
  197. /* NOTE: if we add any audio encoders which output non-keyframe packets,
  198. * this needs to be moved to the encoders, but for now we can do it
  199. * here to simplify things */
  200. avpkt->flags |= AV_PKT_FLAG_KEY;
  201. avpkt->dts = avpkt->pts;
  202. }
  203. }
  204. if (avci->draining && !got_packet)
  205. avci->draining_done = 1;
  206. end:
  207. if (ret < 0 || !got_packet)
  208. av_packet_unref(avpkt);
  209. if (frame) {
  210. if (!ret)
  211. avctx->frame_number++;
  212. av_frame_unref(frame);
  213. }
  214. if (got_packet)
  215. // Encoders must always return ref-counted buffers.
  216. // Side-data only packets have no data and can be not ref-counted.
  217. av_assert0(!avpkt->data || avpkt->buf);
  218. return ret;
  219. }
  220. static int encode_simple_receive_packet(AVCodecContext *avctx, AVPacket *avpkt)
  221. {
  222. int ret;
  223. while (!avpkt->data && !avpkt->side_data) {
  224. ret = encode_simple_internal(avctx, avpkt);
  225. if (ret < 0)
  226. return ret;
  227. }
  228. return 0;
  229. }
  230. static int encode_receive_packet_internal(AVCodecContext *avctx, AVPacket *avpkt)
  231. {
  232. AVCodecInternal *avci = avctx->internal;
  233. int ret;
  234. if (avci->draining_done)
  235. return AVERROR_EOF;
  236. av_assert0(!avpkt->data && !avpkt->side_data);
  237. if (avctx->codec->type == AVMEDIA_TYPE_VIDEO) {
  238. if ((avctx->flags & AV_CODEC_FLAG_PASS1) && avctx->stats_out)
  239. avctx->stats_out[0] = '\0';
  240. if (av_image_check_size2(avctx->width, avctx->height, avctx->max_pixels, AV_PIX_FMT_NONE, 0, avctx))
  241. return AVERROR(EINVAL);
  242. }
  243. if (avctx->codec->receive_packet) {
  244. ret = avctx->codec->receive_packet(avctx, avpkt);
  245. if (ret < 0)
  246. av_packet_unref(avpkt);
  247. else
  248. // Encoders must always return ref-counted buffers.
  249. // Side-data only packets have no data and can be not ref-counted.
  250. av_assert0(!avpkt->data || avpkt->buf);
  251. } else
  252. ret = encode_simple_receive_packet(avctx, avpkt);
  253. if (ret == AVERROR_EOF)
  254. avci->draining_done = 1;
  255. return ret;
  256. }
  257. static int encode_send_frame_internal(AVCodecContext *avctx, const AVFrame *src)
  258. {
  259. AVCodecInternal *avci = avctx->internal;
  260. AVFrame *dst = avci->buffer_frame;
  261. int ret;
  262. if (avctx->codec->type == AVMEDIA_TYPE_AUDIO) {
  263. /* extract audio service type metadata */
  264. AVFrameSideData *sd = av_frame_get_side_data(src, AV_FRAME_DATA_AUDIO_SERVICE_TYPE);
  265. if (sd && sd->size >= sizeof(enum AVAudioServiceType))
  266. avctx->audio_service_type = *(enum AVAudioServiceType*)sd->data;
  267. /* check for valid frame size */
  268. if (avctx->codec->capabilities & AV_CODEC_CAP_SMALL_LAST_FRAME) {
  269. if (src->nb_samples > avctx->frame_size) {
  270. av_log(avctx, AV_LOG_ERROR, "more samples than frame size\n");
  271. return AVERROR(EINVAL);
  272. }
  273. } else if (!(avctx->codec->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE)) {
  274. /* if we already got an undersized frame, that must have been the last */
  275. if (avctx->internal->last_audio_frame) {
  276. av_log(avctx, AV_LOG_ERROR, "frame_size (%d) was not respected for a non-last frame\n", avctx->frame_size);
  277. return AVERROR(EINVAL);
  278. }
  279. if (src->nb_samples < avctx->frame_size) {
  280. ret = pad_last_frame(avctx, dst, src);
  281. if (ret < 0)
  282. return ret;
  283. avctx->internal->last_audio_frame = 1;
  284. } else if (src->nb_samples > avctx->frame_size) {
  285. av_log(avctx, AV_LOG_ERROR, "nb_samples (%d) != frame_size (%d)\n", src->nb_samples, avctx->frame_size);
  286. return AVERROR(EINVAL);
  287. }
  288. }
  289. }
  290. if (!dst->data[0]) {
  291. ret = av_frame_ref(dst, src);
  292. if (ret < 0)
  293. return ret;
  294. }
  295. return 0;
  296. }
  297. int attribute_align_arg avcodec_send_frame(AVCodecContext *avctx, const AVFrame *frame)
  298. {
  299. AVCodecInternal *avci = avctx->internal;
  300. int ret;
  301. if (!avcodec_is_open(avctx) || !av_codec_is_encoder(avctx->codec))
  302. return AVERROR(EINVAL);
  303. if (avci->draining)
  304. return AVERROR_EOF;
  305. if (avci->buffer_frame->data[0])
  306. return AVERROR(EAGAIN);
  307. if (!frame) {
  308. avci->draining = 1;
  309. } else {
  310. ret = encode_send_frame_internal(avctx, frame);
  311. if (ret < 0)
  312. return ret;
  313. }
  314. if (!avci->buffer_pkt->data && !avci->buffer_pkt->side_data) {
  315. ret = encode_receive_packet_internal(avctx, avci->buffer_pkt);
  316. if (ret < 0 && ret != AVERROR(EAGAIN) && ret != AVERROR_EOF)
  317. return ret;
  318. }
  319. return 0;
  320. }
  321. int attribute_align_arg avcodec_receive_packet(AVCodecContext *avctx, AVPacket *avpkt)
  322. {
  323. AVCodecInternal *avci = avctx->internal;
  324. int ret;
  325. av_packet_unref(avpkt);
  326. if (!avcodec_is_open(avctx) || !av_codec_is_encoder(avctx->codec))
  327. return AVERROR(EINVAL);
  328. if (avci->buffer_pkt->data || avci->buffer_pkt->side_data) {
  329. av_packet_move_ref(avpkt, avci->buffer_pkt);
  330. } else {
  331. ret = encode_receive_packet_internal(avctx, avpkt);
  332. if (ret < 0)
  333. return ret;
  334. }
  335. return 0;
  336. }
  337. #if FF_API_OLD_ENCDEC
  338. static int compat_encode(AVCodecContext *avctx, AVPacket *avpkt,
  339. int *got_packet, const AVFrame *frame)
  340. {
  341. AVCodecInternal *avci = avctx->internal;
  342. AVPacket user_pkt;
  343. int ret;
  344. *got_packet = 0;
  345. if (frame && avctx->codec->type == AVMEDIA_TYPE_VIDEO) {
  346. if (frame->format == AV_PIX_FMT_NONE)
  347. av_log(avctx, AV_LOG_WARNING, "AVFrame.format is not set\n");
  348. if (frame->width == 0 || frame->height == 0)
  349. av_log(avctx, AV_LOG_WARNING, "AVFrame.width or height is not set\n");
  350. }
  351. if (avctx->codec->capabilities & AV_CODEC_CAP_DR1) {
  352. av_log(avctx, AV_LOG_WARNING, "The deprecated avcodec_encode_* API does not support "
  353. "AV_CODEC_CAP_DR1 encoders\n");
  354. return AVERROR(ENOSYS);
  355. }
  356. ret = avcodec_send_frame(avctx, frame);
  357. if (ret == AVERROR_EOF)
  358. ret = 0;
  359. else if (ret == AVERROR(EAGAIN)) {
  360. /* we fully drain all the output in each encode call, so this should not
  361. * ever happen */
  362. return AVERROR_BUG;
  363. } else if (ret < 0)
  364. return ret;
  365. av_packet_move_ref(&user_pkt, avpkt);
  366. while (ret >= 0) {
  367. ret = avcodec_receive_packet(avctx, avpkt);
  368. if (ret < 0) {
  369. if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
  370. ret = 0;
  371. goto finish;
  372. }
  373. if (avpkt != avci->compat_encode_packet) {
  374. if (avpkt->data && user_pkt.data) {
  375. if (user_pkt.size >= avpkt->size) {
  376. memcpy(user_pkt.data, avpkt->data, avpkt->size);
  377. av_buffer_unref(&avpkt->buf);
  378. avpkt->buf = user_pkt.buf;
  379. avpkt->data = user_pkt.data;
  380. FF_DISABLE_DEPRECATION_WARNINGS
  381. av_init_packet(&user_pkt);
  382. FF_ENABLE_DEPRECATION_WARNINGS
  383. } else {
  384. av_log(avctx, AV_LOG_ERROR, "Provided packet is too small, needs to be %d\n", avpkt->size);
  385. av_packet_unref(avpkt);
  386. ret = AVERROR(EINVAL);
  387. goto finish;
  388. }
  389. }
  390. *got_packet = 1;
  391. avpkt = avci->compat_encode_packet;
  392. } else {
  393. if (!avci->compat_decode_warned) {
  394. av_log(avctx, AV_LOG_WARNING, "The deprecated avcodec_encode_* "
  395. "API cannot return all the packets for this encoder. "
  396. "Some packets will be dropped. Update your code to the "
  397. "new encoding API to fix this.\n");
  398. avci->compat_decode_warned = 1;
  399. av_packet_unref(avpkt);
  400. }
  401. }
  402. if (avci->draining)
  403. break;
  404. }
  405. finish:
  406. if (ret < 0)
  407. av_packet_unref(&user_pkt);
  408. return ret;
  409. }
  410. int attribute_align_arg avcodec_encode_audio2(AVCodecContext *avctx,
  411. AVPacket *avpkt,
  412. const AVFrame *frame,
  413. int *got_packet_ptr)
  414. {
  415. int ret = compat_encode(avctx, avpkt, got_packet_ptr, frame);
  416. if (ret < 0)
  417. av_packet_unref(avpkt);
  418. return ret;
  419. }
  420. int attribute_align_arg avcodec_encode_video2(AVCodecContext *avctx,
  421. AVPacket *avpkt,
  422. const AVFrame *frame,
  423. int *got_packet_ptr)
  424. {
  425. int ret = compat_encode(avctx, avpkt, got_packet_ptr, frame);
  426. if (ret < 0)
  427. av_packet_unref(avpkt);
  428. return ret;
  429. }
  430. #endif
  431. int ff_encode_preinit(AVCodecContext *avctx)
  432. {
  433. int i;
  434. #if FF_API_CODED_FRAME
  435. FF_DISABLE_DEPRECATION_WARNINGS
  436. avctx->coded_frame = av_frame_alloc();
  437. if (!avctx->coded_frame) {
  438. return AVERROR(ENOMEM);
  439. }
  440. FF_ENABLE_DEPRECATION_WARNINGS
  441. #endif
  442. if (avctx->time_base.num <= 0 || avctx->time_base.den <= 0) {
  443. av_log(avctx, AV_LOG_ERROR, "The encoder timebase is not set.\n");
  444. return AVERROR(EINVAL);
  445. }
  446. if (avctx->codec->sample_fmts) {
  447. for (i = 0; avctx->codec->sample_fmts[i] != AV_SAMPLE_FMT_NONE; i++) {
  448. if (avctx->sample_fmt == avctx->codec->sample_fmts[i])
  449. break;
  450. if (avctx->channels == 1 &&
  451. av_get_planar_sample_fmt(avctx->sample_fmt) ==
  452. av_get_planar_sample_fmt(avctx->codec->sample_fmts[i])) {
  453. avctx->sample_fmt = avctx->codec->sample_fmts[i];
  454. break;
  455. }
  456. }
  457. if (avctx->codec->sample_fmts[i] == AV_SAMPLE_FMT_NONE) {
  458. char buf[128];
  459. snprintf(buf, sizeof(buf), "%d", avctx->sample_fmt);
  460. av_log(avctx, AV_LOG_ERROR, "Specified sample format %s is invalid or not supported\n",
  461. (char *)av_x_if_null(av_get_sample_fmt_name(avctx->sample_fmt), buf));
  462. return AVERROR(EINVAL);
  463. }
  464. }
  465. if (avctx->codec->pix_fmts) {
  466. for (i = 0; avctx->codec->pix_fmts[i] != AV_PIX_FMT_NONE; i++)
  467. if (avctx->pix_fmt == avctx->codec->pix_fmts[i])
  468. break;
  469. if (avctx->codec->pix_fmts[i] == AV_PIX_FMT_NONE
  470. && !(avctx->codec_id == AV_CODEC_ID_MJPEG
  471. && avctx->strict_std_compliance <= FF_COMPLIANCE_UNOFFICIAL)) {
  472. char buf[128];
  473. snprintf(buf, sizeof(buf), "%d", avctx->pix_fmt);
  474. av_log(avctx, AV_LOG_ERROR, "Specified pixel format %s is invalid or not supported\n",
  475. (char *)av_x_if_null(av_get_pix_fmt_name(avctx->pix_fmt), buf));
  476. return AVERROR(EINVAL);
  477. }
  478. if (avctx->codec->pix_fmts[i] == AV_PIX_FMT_YUVJ420P ||
  479. avctx->codec->pix_fmts[i] == AV_PIX_FMT_YUVJ411P ||
  480. avctx->codec->pix_fmts[i] == AV_PIX_FMT_YUVJ422P ||
  481. avctx->codec->pix_fmts[i] == AV_PIX_FMT_YUVJ440P ||
  482. avctx->codec->pix_fmts[i] == AV_PIX_FMT_YUVJ444P)
  483. avctx->color_range = AVCOL_RANGE_JPEG;
  484. }
  485. if (avctx->codec->supported_samplerates) {
  486. for (i = 0; avctx->codec->supported_samplerates[i] != 0; i++)
  487. if (avctx->sample_rate == avctx->codec->supported_samplerates[i])
  488. break;
  489. if (avctx->codec->supported_samplerates[i] == 0) {
  490. av_log(avctx, AV_LOG_ERROR, "Specified sample rate %d is not supported\n",
  491. avctx->sample_rate);
  492. return AVERROR(EINVAL);
  493. }
  494. }
  495. if (avctx->sample_rate < 0) {
  496. av_log(avctx, AV_LOG_ERROR, "Specified sample rate %d is not supported\n",
  497. avctx->sample_rate);
  498. return AVERROR(EINVAL);
  499. }
  500. if (avctx->codec->channel_layouts) {
  501. if (!avctx->channel_layout) {
  502. av_log(avctx, AV_LOG_WARNING, "Channel layout not specified\n");
  503. } else {
  504. for (i = 0; avctx->codec->channel_layouts[i] != 0; i++)
  505. if (avctx->channel_layout == avctx->codec->channel_layouts[i])
  506. break;
  507. if (avctx->codec->channel_layouts[i] == 0) {
  508. char buf[512];
  509. av_get_channel_layout_string(buf, sizeof(buf), -1, avctx->channel_layout);
  510. av_log(avctx, AV_LOG_ERROR, "Specified channel layout '%s' is not supported\n", buf);
  511. return AVERROR(EINVAL);
  512. }
  513. }
  514. }
  515. if (avctx->channel_layout && avctx->channels) {
  516. int channels = av_get_channel_layout_nb_channels(avctx->channel_layout);
  517. if (channels != avctx->channels) {
  518. char buf[512];
  519. av_get_channel_layout_string(buf, sizeof(buf), -1, avctx->channel_layout);
  520. av_log(avctx, AV_LOG_ERROR,
  521. "Channel layout '%s' with %d channels does not match number of specified channels %d\n",
  522. buf, channels, avctx->channels);
  523. return AVERROR(EINVAL);
  524. }
  525. } else if (avctx->channel_layout) {
  526. avctx->channels = av_get_channel_layout_nb_channels(avctx->channel_layout);
  527. }
  528. if (avctx->channels < 0) {
  529. av_log(avctx, AV_LOG_ERROR, "Specified number of channels %d is not supported\n",
  530. avctx->channels);
  531. return AVERROR(EINVAL);
  532. }
  533. if(avctx->codec_type == AVMEDIA_TYPE_VIDEO) {
  534. const AVPixFmtDescriptor *pixdesc = av_pix_fmt_desc_get(avctx->pix_fmt);
  535. if ( avctx->bits_per_raw_sample < 0
  536. || (avctx->bits_per_raw_sample > 8 && pixdesc->comp[0].depth <= 8)) {
  537. av_log(avctx, AV_LOG_WARNING, "Specified bit depth %d not possible with the specified pixel formats depth %d\n",
  538. avctx->bits_per_raw_sample, pixdesc->comp[0].depth);
  539. avctx->bits_per_raw_sample = pixdesc->comp[0].depth;
  540. }
  541. if (avctx->width <= 0 || avctx->height <= 0) {
  542. av_log(avctx, AV_LOG_ERROR, "dimensions not set\n");
  543. return AVERROR(EINVAL);
  544. }
  545. }
  546. if ( (avctx->codec_type == AVMEDIA_TYPE_VIDEO || avctx->codec_type == AVMEDIA_TYPE_AUDIO)
  547. && avctx->bit_rate>0 && avctx->bit_rate<1000) {
  548. av_log(avctx, AV_LOG_WARNING, "Bitrate %"PRId64" is extremely low, maybe you mean %"PRId64"k\n", avctx->bit_rate, avctx->bit_rate);
  549. }
  550. if (!avctx->rc_initial_buffer_occupancy)
  551. avctx->rc_initial_buffer_occupancy = avctx->rc_buffer_size * 3LL / 4;
  552. if (avctx->ticks_per_frame && avctx->time_base.num &&
  553. avctx->ticks_per_frame > INT_MAX / avctx->time_base.num) {
  554. av_log(avctx, AV_LOG_ERROR,
  555. "ticks_per_frame %d too large for the timebase %d/%d.",
  556. avctx->ticks_per_frame,
  557. avctx->time_base.num,
  558. avctx->time_base.den);
  559. return AVERROR(EINVAL);
  560. }
  561. if (avctx->hw_frames_ctx) {
  562. AVHWFramesContext *frames_ctx = (AVHWFramesContext*)avctx->hw_frames_ctx->data;
  563. if (frames_ctx->format != avctx->pix_fmt) {
  564. av_log(avctx, AV_LOG_ERROR,
  565. "Mismatching AVCodecContext.pix_fmt and AVHWFramesContext.format\n");
  566. return AVERROR(EINVAL);
  567. }
  568. if (avctx->sw_pix_fmt != AV_PIX_FMT_NONE &&
  569. avctx->sw_pix_fmt != frames_ctx->sw_format) {
  570. av_log(avctx, AV_LOG_ERROR,
  571. "Mismatching AVCodecContext.sw_pix_fmt (%s) "
  572. "and AVHWFramesContext.sw_format (%s)\n",
  573. av_get_pix_fmt_name(avctx->sw_pix_fmt),
  574. av_get_pix_fmt_name(frames_ctx->sw_format));
  575. return AVERROR(EINVAL);
  576. }
  577. avctx->sw_pix_fmt = frames_ctx->sw_format;
  578. }
  579. return 0;
  580. }