You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

701 lines
24KB

  1. /*
  2. * H.26L/H.264/AVC/JVT/14496-10/... parser
  3. * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. /**
  22. * @file
  23. * H.264 / AVC / MPEG4 part10 parser.
  24. * @author Michael Niedermayer <michaelni@gmx.at>
  25. */
  26. #define UNCHECKED_BITSTREAM_READER 1
  27. #include <assert.h>
  28. #include <stdint.h>
  29. #include "libavutil/avutil.h"
  30. #include "libavutil/error.h"
  31. #include "libavutil/log.h"
  32. #include "libavutil/mem.h"
  33. #include "libavutil/pixfmt.h"
  34. #include "get_bits.h"
  35. #include "golomb.h"
  36. #include "h264.h"
  37. #include "h264_sei.h"
  38. #include "h264data.h"
  39. #include "internal.h"
  40. #include "mpegutils.h"
  41. #include "parser.h"
  42. typedef struct H264ParseContext {
  43. H264Context h;
  44. ParseContext pc;
  45. H264ParamSets ps;
  46. H264DSPContext h264dsp;
  47. H264POCContext poc;
  48. H264SEIContext sei;
  49. int got_first;
  50. } H264ParseContext;
  51. static int h264_find_frame_end(H264ParseContext *p, const uint8_t *buf,
  52. int buf_size)
  53. {
  54. H264Context *h = &p->h;
  55. int i, j;
  56. uint32_t state;
  57. ParseContext *pc = &p->pc;
  58. int next_avc= h->is_avc ? 0 : buf_size;
  59. // mb_addr= pc->mb_addr - 1;
  60. state = pc->state;
  61. if (state > 13)
  62. state = 7;
  63. if (h->is_avc && !h->nal_length_size)
  64. av_log(h->avctx, AV_LOG_ERROR, "AVC-parser: nal length size invalid\n");
  65. for (i = 0; i < buf_size; i++) {
  66. if (i >= next_avc) {
  67. int nalsize = 0;
  68. i = next_avc;
  69. for (j = 0; j < h->nal_length_size; j++)
  70. nalsize = (nalsize << 8) | buf[i++];
  71. if (nalsize <= 0 || nalsize > buf_size - i) {
  72. av_log(h->avctx, AV_LOG_ERROR, "AVC-parser: nal size %d remaining %d\n", nalsize, buf_size - i);
  73. return buf_size;
  74. }
  75. next_avc = i + nalsize;
  76. state = 5;
  77. }
  78. if (state == 7) {
  79. i += p->h264dsp.startcode_find_candidate(buf + i, next_avc - i);
  80. if (i < next_avc)
  81. state = 2;
  82. } else if (state <= 2) {
  83. if (buf[i] == 1)
  84. state ^= 5; // 2->7, 1->4, 0->5
  85. else if (buf[i])
  86. state = 7;
  87. else
  88. state >>= 1; // 2->1, 1->0, 0->0
  89. } else if (state <= 5) {
  90. int nalu_type = buf[i] & 0x1F;
  91. if (nalu_type == NAL_SEI || nalu_type == NAL_SPS ||
  92. nalu_type == NAL_PPS || nalu_type == NAL_AUD) {
  93. if (pc->frame_start_found) {
  94. i++;
  95. goto found;
  96. }
  97. } else if (nalu_type == NAL_SLICE || nalu_type == NAL_DPA ||
  98. nalu_type == NAL_IDR_SLICE) {
  99. state += 8;
  100. continue;
  101. }
  102. state = 7;
  103. } else {
  104. h->parse_history[h->parse_history_count++]= buf[i];
  105. if (h->parse_history_count>5) {
  106. unsigned int mb, last_mb= h->parse_last_mb;
  107. GetBitContext gb;
  108. init_get_bits(&gb, h->parse_history, 8*h->parse_history_count);
  109. h->parse_history_count=0;
  110. mb= get_ue_golomb_long(&gb);
  111. h->parse_last_mb= mb;
  112. if (pc->frame_start_found) {
  113. if (mb <= last_mb)
  114. goto found;
  115. } else
  116. pc->frame_start_found = 1;
  117. state = 7;
  118. }
  119. }
  120. }
  121. pc->state = state;
  122. if (h->is_avc)
  123. return next_avc;
  124. return END_NOT_FOUND;
  125. found:
  126. pc->state = 7;
  127. pc->frame_start_found = 0;
  128. if (h->is_avc)
  129. return next_avc;
  130. return i - (state & 5) - 5 * (state > 7);
  131. }
  132. static int scan_mmco_reset(AVCodecParserContext *s, GetBitContext *gb)
  133. {
  134. H264PredWeightTable pwt;
  135. int slice_type_nos = s->pict_type & 3;
  136. H264ParseContext *p = s->priv_data;
  137. H264Context *h = &p->h;
  138. int list_count, ref_count[2];
  139. if (p->ps.pps->redundant_pic_cnt_present)
  140. get_ue_golomb(gb); // redundant_pic_count
  141. if (slice_type_nos == AV_PICTURE_TYPE_B)
  142. get_bits1(gb); // direct_spatial_mv_pred
  143. if (ff_h264_parse_ref_count(&list_count, ref_count, gb, p->ps.pps,
  144. slice_type_nos, h->picture_structure, h->avctx) < 0)
  145. return AVERROR_INVALIDDATA;
  146. if (slice_type_nos != AV_PICTURE_TYPE_I) {
  147. int list;
  148. for (list = 0; list < list_count; list++) {
  149. if (get_bits1(gb)) {
  150. int index;
  151. for (index = 0; ; index++) {
  152. unsigned int reordering_of_pic_nums_idc = get_ue_golomb_31(gb);
  153. if (reordering_of_pic_nums_idc < 3)
  154. get_ue_golomb_long(gb);
  155. else if (reordering_of_pic_nums_idc > 3) {
  156. av_log(h->avctx, AV_LOG_ERROR,
  157. "illegal reordering_of_pic_nums_idc %d\n",
  158. reordering_of_pic_nums_idc);
  159. return AVERROR_INVALIDDATA;
  160. } else
  161. break;
  162. if (index >= ref_count[list]) {
  163. av_log(h->avctx, AV_LOG_ERROR,
  164. "reference count %d overflow\n", index);
  165. return AVERROR_INVALIDDATA;
  166. }
  167. }
  168. }
  169. }
  170. }
  171. if ((p->ps.pps->weighted_pred && slice_type_nos == AV_PICTURE_TYPE_P) ||
  172. (p->ps.pps->weighted_bipred_idc == 1 && slice_type_nos == AV_PICTURE_TYPE_B))
  173. ff_h264_pred_weight_table(gb, p->ps.sps, ref_count, slice_type_nos,
  174. &pwt);
  175. if (get_bits1(gb)) { // adaptive_ref_pic_marking_mode_flag
  176. int i;
  177. for (i = 0; i < MAX_MMCO_COUNT; i++) {
  178. MMCOOpcode opcode = get_ue_golomb_31(gb);
  179. if (opcode > (unsigned) MMCO_LONG) {
  180. av_log(h->avctx, AV_LOG_ERROR,
  181. "illegal memory management control operation %d\n",
  182. opcode);
  183. return AVERROR_INVALIDDATA;
  184. }
  185. if (opcode == MMCO_END)
  186. return 0;
  187. else if (opcode == MMCO_RESET)
  188. return 1;
  189. if (opcode == MMCO_SHORT2UNUSED || opcode == MMCO_SHORT2LONG)
  190. get_ue_golomb_long(gb); // difference_of_pic_nums_minus1
  191. if (opcode == MMCO_SHORT2LONG || opcode == MMCO_LONG2UNUSED ||
  192. opcode == MMCO_LONG || opcode == MMCO_SET_MAX_LONG)
  193. get_ue_golomb_31(gb);
  194. }
  195. }
  196. return 0;
  197. }
  198. /**
  199. * Parse NAL units of found picture and decode some basic information.
  200. *
  201. * @param s parser context.
  202. * @param avctx codec context.
  203. * @param buf buffer with field/frame data.
  204. * @param buf_size size of the buffer.
  205. */
  206. static inline int parse_nal_units(AVCodecParserContext *s,
  207. AVCodecContext *avctx,
  208. const uint8_t * const buf, int buf_size)
  209. {
  210. H264ParseContext *p = s->priv_data;
  211. H264Context *h = &p->h;
  212. H2645NAL nal = { NULL };
  213. int buf_index, next_avc;
  214. unsigned int pps_id;
  215. unsigned int slice_type;
  216. int state = -1, got_reset = 0;
  217. int q264 = buf_size >=4 && !memcmp("Q264", buf, 4);
  218. int field_poc[2];
  219. int ret;
  220. /* set some sane default values */
  221. s->pict_type = AV_PICTURE_TYPE_I;
  222. s->key_frame = 0;
  223. s->picture_structure = AV_PICTURE_STRUCTURE_UNKNOWN;
  224. h->avctx = avctx;
  225. ff_h264_sei_uninit(&p->sei);
  226. h->sei.frame_packing.frame_packing_arrangement_cancel_flag = -1;
  227. if (!buf_size)
  228. return 0;
  229. buf_index = 0;
  230. next_avc = h->is_avc ? 0 : buf_size;
  231. for (;;) {
  232. const SPS *sps;
  233. int src_length, consumed, nalsize = 0;
  234. if (buf_index >= next_avc) {
  235. nalsize = get_avc_nalsize(h, buf, buf_size, &buf_index);
  236. if (nalsize < 0)
  237. break;
  238. next_avc = buf_index + nalsize;
  239. } else {
  240. buf_index = find_start_code(buf, buf_size, buf_index, next_avc);
  241. if (buf_index >= buf_size)
  242. break;
  243. if (buf_index >= next_avc)
  244. continue;
  245. }
  246. src_length = next_avc - buf_index;
  247. state = buf[buf_index];
  248. switch (state & 0x1f) {
  249. case NAL_SLICE:
  250. case NAL_IDR_SLICE:
  251. // Do not walk the whole buffer just to decode slice header
  252. if ((state & 0x1f) == NAL_IDR_SLICE || ((state >> 5) & 0x3) == 0) {
  253. /* IDR or disposable slice
  254. * No need to decode many bytes because MMCOs shall not be present. */
  255. if (src_length > 60)
  256. src_length = 60;
  257. } else {
  258. /* To decode up to MMCOs */
  259. if (src_length > 1000)
  260. src_length = 1000;
  261. }
  262. break;
  263. }
  264. consumed = ff_h2645_extract_rbsp(buf + buf_index, src_length, &nal);
  265. if (consumed < 0)
  266. break;
  267. buf_index += consumed;
  268. ret = init_get_bits8(&nal.gb, nal.data, nal.size);
  269. if (ret < 0)
  270. goto fail;
  271. get_bits1(&nal.gb);
  272. nal.ref_idc = get_bits(&nal.gb, 2);
  273. nal.type = get_bits(&nal.gb, 5);
  274. h->gb = nal.gb;
  275. h->nal_ref_idc = nal.ref_idc;
  276. h->nal_unit_type = nal.type;
  277. switch (h->nal_unit_type) {
  278. case NAL_SPS:
  279. ff_h264_decode_seq_parameter_set(&nal.gb, avctx, &p->ps, 0);
  280. break;
  281. case NAL_PPS:
  282. ff_h264_decode_picture_parameter_set(&nal.gb, avctx, &p->ps,
  283. nal.size_bits);
  284. break;
  285. case NAL_SEI:
  286. ff_h264_sei_decode(&p->sei, &nal.gb, &p->ps, avctx);
  287. break;
  288. case NAL_IDR_SLICE:
  289. s->key_frame = 1;
  290. p->poc.prev_frame_num = 0;
  291. p->poc.prev_frame_num_offset = 0;
  292. p->poc.prev_poc_msb =
  293. p->poc.prev_poc_lsb = 0;
  294. /* fall through */
  295. case NAL_SLICE:
  296. get_ue_golomb_long(&nal.gb); // skip first_mb_in_slice
  297. slice_type = get_ue_golomb_31(&nal.gb);
  298. s->pict_type = ff_h264_golomb_to_pict_type[slice_type % 5];
  299. if (p->sei.recovery_point.recovery_frame_cnt >= 0) {
  300. /* key frame, since recovery_frame_cnt is set */
  301. s->key_frame = 1;
  302. }
  303. pps_id = get_ue_golomb(&nal.gb);
  304. if (pps_id >= MAX_PPS_COUNT) {
  305. av_log(h->avctx, AV_LOG_ERROR,
  306. "pps_id %u out of range\n", pps_id);
  307. goto fail;
  308. }
  309. if (!p->ps.pps_list[pps_id]) {
  310. av_log(h->avctx, AV_LOG_ERROR,
  311. "non-existing PPS %u referenced\n", pps_id);
  312. goto fail;
  313. }
  314. p->ps.pps = (const PPS*)p->ps.pps_list[pps_id]->data;
  315. if (!p->ps.sps_list[p->ps.pps->sps_id]) {
  316. av_log(h->avctx, AV_LOG_ERROR,
  317. "non-existing SPS %u referenced\n", p->ps.pps->sps_id);
  318. goto fail;
  319. }
  320. p->ps.sps = (SPS*)p->ps.sps_list[p->ps.pps->sps_id]->data;
  321. h->ps.sps = p->ps.sps;
  322. h->ps.pps = p->ps.pps;
  323. sps = p->ps.sps;
  324. // heuristic to detect non marked keyframes
  325. if (h->ps.sps->ref_frame_count <= 1 && h->ps.pps->ref_count[0] <= 1 && s->pict_type == AV_PICTURE_TYPE_I)
  326. s->key_frame = 1;
  327. p->poc.frame_num = get_bits(&nal.gb, sps->log2_max_frame_num);
  328. s->coded_width = 16 * sps->mb_width;
  329. s->coded_height = 16 * sps->mb_height;
  330. s->width = s->coded_width - (sps->crop_right + sps->crop_left);
  331. s->height = s->coded_height - (sps->crop_top + sps->crop_bottom);
  332. if (s->width <= 0 || s->height <= 0) {
  333. s->width = s->coded_width;
  334. s->height = s->coded_height;
  335. }
  336. switch (sps->bit_depth_luma) {
  337. case 9:
  338. if (CHROMA444(h)) s->format = AV_PIX_FMT_YUV444P9;
  339. else if (CHROMA422(h)) s->format = AV_PIX_FMT_YUV422P9;
  340. else s->format = AV_PIX_FMT_YUV420P9;
  341. break;
  342. case 10:
  343. if (CHROMA444(h)) s->format = AV_PIX_FMT_YUV444P10;
  344. else if (CHROMA422(h)) s->format = AV_PIX_FMT_YUV422P10;
  345. else s->format = AV_PIX_FMT_YUV420P10;
  346. break;
  347. case 8:
  348. if (CHROMA444(h)) s->format = AV_PIX_FMT_YUV444P;
  349. else if (CHROMA422(h)) s->format = AV_PIX_FMT_YUV422P;
  350. else s->format = AV_PIX_FMT_YUV420P;
  351. break;
  352. default:
  353. s->format = AV_PIX_FMT_NONE;
  354. }
  355. avctx->profile = ff_h264_get_profile(sps);
  356. avctx->level = sps->level_idc;
  357. if (sps->frame_mbs_only_flag) {
  358. h->picture_structure = PICT_FRAME;
  359. } else {
  360. if (get_bits1(&nal.gb)) { // field_pic_flag
  361. h->picture_structure = PICT_TOP_FIELD + get_bits1(&nal.gb); // bottom_field_flag
  362. } else {
  363. h->picture_structure = PICT_FRAME;
  364. }
  365. }
  366. if (h->nal_unit_type == NAL_IDR_SLICE)
  367. get_ue_golomb_long(&nal.gb); /* idr_pic_id */
  368. if (sps->poc_type == 0) {
  369. p->poc.poc_lsb = get_bits(&nal.gb, sps->log2_max_poc_lsb);
  370. if (p->ps.pps->pic_order_present == 1 &&
  371. h->picture_structure == PICT_FRAME)
  372. p->poc.delta_poc_bottom = get_se_golomb(&nal.gb);
  373. }
  374. if (sps->poc_type == 1 &&
  375. !sps->delta_pic_order_always_zero_flag) {
  376. p->poc.delta_poc[0] = get_se_golomb(&nal.gb);
  377. if (p->ps.pps->pic_order_present == 1 &&
  378. h->picture_structure == PICT_FRAME)
  379. p->poc.delta_poc[1] = get_se_golomb(&nal.gb);
  380. }
  381. /* Decode POC of this picture.
  382. * The prev_ values needed for decoding POC of the next picture are not set here. */
  383. field_poc[0] = field_poc[1] = INT_MAX;
  384. ff_h264_init_poc(field_poc, &s->output_picture_number, sps,
  385. &p->poc, h->picture_structure, nal.ref_idc);
  386. /* Continue parsing to check if MMCO_RESET is present.
  387. * FIXME: MMCO_RESET could appear in non-first slice.
  388. * Maybe, we should parse all undisposable non-IDR slice of this
  389. * picture until encountering MMCO_RESET in a slice of it. */
  390. if (h->nal_ref_idc && h->nal_unit_type != NAL_IDR_SLICE) {
  391. got_reset = scan_mmco_reset(s, &nal.gb);
  392. if (got_reset < 0)
  393. goto fail;
  394. }
  395. /* Set up the prev_ values for decoding POC of the next picture. */
  396. p->poc.prev_frame_num = got_reset ? 0 : p->poc.frame_num;
  397. p->poc.prev_frame_num_offset = got_reset ? 0 : p->poc.frame_num_offset;
  398. if (h->nal_ref_idc != 0) {
  399. if (!got_reset) {
  400. p->poc.prev_poc_msb = p->poc.poc_msb;
  401. p->poc.prev_poc_lsb = p->poc.poc_lsb;
  402. } else {
  403. p->poc.prev_poc_msb = 0;
  404. p->poc.prev_poc_lsb =
  405. h->picture_structure == PICT_BOTTOM_FIELD ? 0 : field_poc[0];
  406. }
  407. }
  408. if (sps->pic_struct_present_flag) {
  409. switch (p->sei.picture_timing.pic_struct) {
  410. case SEI_PIC_STRUCT_TOP_FIELD:
  411. case SEI_PIC_STRUCT_BOTTOM_FIELD:
  412. s->repeat_pict = 0;
  413. break;
  414. case SEI_PIC_STRUCT_FRAME:
  415. case SEI_PIC_STRUCT_TOP_BOTTOM:
  416. case SEI_PIC_STRUCT_BOTTOM_TOP:
  417. s->repeat_pict = 1;
  418. break;
  419. case SEI_PIC_STRUCT_TOP_BOTTOM_TOP:
  420. case SEI_PIC_STRUCT_BOTTOM_TOP_BOTTOM:
  421. s->repeat_pict = 2;
  422. break;
  423. case SEI_PIC_STRUCT_FRAME_DOUBLING:
  424. s->repeat_pict = 3;
  425. break;
  426. case SEI_PIC_STRUCT_FRAME_TRIPLING:
  427. s->repeat_pict = 5;
  428. break;
  429. default:
  430. s->repeat_pict = h->picture_structure == PICT_FRAME ? 1 : 0;
  431. break;
  432. }
  433. } else {
  434. s->repeat_pict = h->picture_structure == PICT_FRAME ? 1 : 0;
  435. }
  436. if (h->picture_structure == PICT_FRAME) {
  437. s->picture_structure = AV_PICTURE_STRUCTURE_FRAME;
  438. if (sps->pic_struct_present_flag) {
  439. switch (p->sei.picture_timing.pic_struct) {
  440. case SEI_PIC_STRUCT_TOP_BOTTOM:
  441. case SEI_PIC_STRUCT_TOP_BOTTOM_TOP:
  442. s->field_order = AV_FIELD_TT;
  443. break;
  444. case SEI_PIC_STRUCT_BOTTOM_TOP:
  445. case SEI_PIC_STRUCT_BOTTOM_TOP_BOTTOM:
  446. s->field_order = AV_FIELD_BB;
  447. break;
  448. default:
  449. s->field_order = AV_FIELD_PROGRESSIVE;
  450. break;
  451. }
  452. } else {
  453. if (field_poc[0] < field_poc[1])
  454. s->field_order = AV_FIELD_TT;
  455. else if (field_poc[0] > field_poc[1])
  456. s->field_order = AV_FIELD_BB;
  457. else
  458. s->field_order = AV_FIELD_PROGRESSIVE;
  459. }
  460. } else {
  461. if (h->picture_structure == PICT_TOP_FIELD)
  462. s->picture_structure = AV_PICTURE_STRUCTURE_TOP_FIELD;
  463. else
  464. s->picture_structure = AV_PICTURE_STRUCTURE_BOTTOM_FIELD;
  465. s->field_order = AV_FIELD_UNKNOWN;
  466. }
  467. av_freep(&nal.rbsp_buffer);
  468. return 0; /* no need to evaluate the rest */
  469. }
  470. }
  471. if (q264) {
  472. av_freep(&nal.rbsp_buffer);
  473. return 0;
  474. }
  475. /* didn't find a picture! */
  476. av_log(h->avctx, AV_LOG_ERROR, "missing picture in access unit with size %d\n", buf_size);
  477. fail:
  478. av_freep(&nal.rbsp_buffer);
  479. return -1;
  480. }
  481. static int h264_parse(AVCodecParserContext *s,
  482. AVCodecContext *avctx,
  483. const uint8_t **poutbuf, int *poutbuf_size,
  484. const uint8_t *buf, int buf_size)
  485. {
  486. H264ParseContext *p = s->priv_data;
  487. H264Context *h = &p->h;
  488. ParseContext *pc = &p->pc;
  489. int next;
  490. if (!p->got_first) {
  491. p->got_first = 1;
  492. if (avctx->extradata_size) {
  493. int i;
  494. h->avctx = avctx;
  495. // must be done like in decoder, otherwise opening the parser,
  496. // letting it create extradata and then closing and opening again
  497. // will cause has_b_frames to be always set.
  498. // Note that estimate_timings_from_pts does exactly this.
  499. if (!avctx->has_b_frames)
  500. h->low_delay = 1;
  501. ff_h264_decode_extradata(h, avctx->extradata, avctx->extradata_size);
  502. for (i = 0; i < FF_ARRAY_ELEMS(p->ps.sps_list); i++) {
  503. av_buffer_unref(&p->ps.sps_list[i]);
  504. if (h->ps.sps_list[i]) {
  505. p->ps.sps_list[i] = av_buffer_ref(h->ps.sps_list[i]);
  506. if (!p->ps.sps_list[i])
  507. return AVERROR(ENOMEM);
  508. }
  509. }
  510. for (i = 0; i < FF_ARRAY_ELEMS(p->ps.pps_list); i++) {
  511. av_buffer_unref(&p->ps.pps_list[i]);
  512. if (h->ps.pps_list[i]) {
  513. p->ps.pps_list[i] = av_buffer_ref(h->ps.pps_list[i]);
  514. if (!p->ps.pps_list[i])
  515. return AVERROR(ENOMEM);
  516. }
  517. }
  518. p->ps.sps = h->ps.sps;
  519. }
  520. }
  521. if (s->flags & PARSER_FLAG_COMPLETE_FRAMES) {
  522. next = buf_size;
  523. } else {
  524. next = h264_find_frame_end(p, buf, buf_size);
  525. if (ff_combine_frame(pc, next, &buf, &buf_size) < 0) {
  526. *poutbuf = NULL;
  527. *poutbuf_size = 0;
  528. return buf_size;
  529. }
  530. if (next < 0 && next != END_NOT_FOUND) {
  531. av_assert1(pc->last_index + next >= 0);
  532. h264_find_frame_end(p, &pc->buffer[pc->last_index + next], -next); // update state
  533. }
  534. }
  535. parse_nal_units(s, avctx, buf, buf_size);
  536. if (avctx->framerate.num)
  537. avctx->time_base = av_inv_q(av_mul_q(avctx->framerate, (AVRational){avctx->ticks_per_frame, 1}));
  538. if (p->sei.picture_timing.cpb_removal_delay >= 0) {
  539. s->dts_sync_point = p->sei.buffering_period.present;
  540. s->dts_ref_dts_delta = p->sei.picture_timing.cpb_removal_delay;
  541. s->pts_dts_delta = p->sei.picture_timing.dpb_output_delay;
  542. } else {
  543. s->dts_sync_point = INT_MIN;
  544. s->dts_ref_dts_delta = INT_MIN;
  545. s->pts_dts_delta = INT_MIN;
  546. }
  547. if (s->flags & PARSER_FLAG_ONCE) {
  548. s->flags &= PARSER_FLAG_COMPLETE_FRAMES;
  549. }
  550. *poutbuf = buf;
  551. *poutbuf_size = buf_size;
  552. return next;
  553. }
  554. static int h264_split(AVCodecContext *avctx,
  555. const uint8_t *buf, int buf_size)
  556. {
  557. uint32_t state = -1;
  558. int has_sps = 0;
  559. int has_pps = 0;
  560. const uint8_t *ptr = buf, *end = buf + buf_size;
  561. int nalu_type;
  562. while (ptr < end) {
  563. ptr = avpriv_find_start_code(ptr, end, &state);
  564. if ((state & 0xFFFFFF00) != 0x100)
  565. break;
  566. nalu_type = state & 0x1F;
  567. if (nalu_type == NAL_SPS) {
  568. has_sps = 1;
  569. } else if (nalu_type == NAL_PPS)
  570. has_pps = 1;
  571. /* else if (nalu_type == 0x01 ||
  572. * nalu_type == 0x02 ||
  573. * nalu_type == 0x05) {
  574. * }
  575. */
  576. else if ((nalu_type != NAL_SEI || has_pps) &&
  577. nalu_type != NAL_AUD && nalu_type != NAL_SPS_EXT &&
  578. nalu_type != 0x0f) {
  579. if (has_sps) {
  580. while (ptr - 4 > buf && ptr[-5] == 0)
  581. ptr--;
  582. return ptr - 4 - buf;
  583. }
  584. }
  585. }
  586. return 0;
  587. }
  588. static void h264_close(AVCodecParserContext *s)
  589. {
  590. H264ParseContext *p = s->priv_data;
  591. H264Context *h = &p->h;
  592. ParseContext *pc = &p->pc;
  593. int i;
  594. av_freep(&pc->buffer);
  595. ff_h264_free_context(h);
  596. ff_h264_sei_uninit(&p->sei);
  597. for (i = 0; i < FF_ARRAY_ELEMS(p->ps.sps_list); i++)
  598. av_buffer_unref(&p->ps.sps_list[i]);
  599. for (i = 0; i < FF_ARRAY_ELEMS(p->ps.pps_list); i++)
  600. av_buffer_unref(&p->ps.pps_list[i]);
  601. }
  602. static av_cold int init(AVCodecParserContext *s)
  603. {
  604. H264ParseContext *p = s->priv_data;
  605. H264Context *h = &p->h;
  606. h->slice_ctx = av_mallocz(sizeof(*h->slice_ctx));
  607. if (!h->slice_ctx)
  608. return 0;
  609. h->nb_slice_ctx = 1;
  610. h->slice_context_count = 1;
  611. ff_h264dsp_init(&p->h264dsp, 8, 1);
  612. return 0;
  613. }
  614. AVCodecParser ff_h264_parser = {
  615. .codec_ids = { AV_CODEC_ID_H264 },
  616. .priv_data_size = sizeof(H264ParseContext),
  617. .parser_init = init,
  618. .parser_parse = h264_parse,
  619. .parser_close = h264_close,
  620. .split = h264_split,
  621. };