You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

659 lines
23KB

  1. /*
  2. * H.26L/H.264/AVC/JVT/14496-10/... parser
  3. * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. /**
  22. * @file
  23. * H.264 / AVC / MPEG4 part10 parser.
  24. * @author Michael Niedermayer <michaelni@gmx.at>
  25. */
  26. #define UNCHECKED_BITSTREAM_READER 1
  27. #include <assert.h>
  28. #include <stdint.h>
  29. #include "libavutil/avutil.h"
  30. #include "libavutil/error.h"
  31. #include "libavutil/log.h"
  32. #include "libavutil/mem.h"
  33. #include "libavutil/pixfmt.h"
  34. #include "get_bits.h"
  35. #include "golomb.h"
  36. #include "h264.h"
  37. #include "h264data.h"
  38. #include "internal.h"
  39. #include "mpegutils.h"
  40. #include "parser.h"
  41. typedef struct H264ParseContext {
  42. H264Context h;
  43. ParseContext pc;
  44. int got_first;
  45. } H264ParseContext;
  46. static int h264_find_frame_end(H264ParseContext *p, const uint8_t *buf,
  47. int buf_size)
  48. {
  49. H264Context *h = &p->h;
  50. int i, j;
  51. uint32_t state;
  52. ParseContext *pc = &p->pc;
  53. int next_avc= h->is_avc ? 0 : buf_size;
  54. // mb_addr= pc->mb_addr - 1;
  55. state = pc->state;
  56. if (state > 13)
  57. state = 7;
  58. if (h->is_avc && !h->nal_length_size)
  59. av_log(h->avctx, AV_LOG_ERROR, "AVC-parser: nal length size invalid\n");
  60. for (i = 0; i < buf_size; i++) {
  61. if (i >= next_avc) {
  62. int nalsize = 0;
  63. i = next_avc;
  64. for (j = 0; j < h->nal_length_size; j++)
  65. nalsize = (nalsize << 8) | buf[i++];
  66. if (nalsize <= 0 || nalsize > buf_size - i) {
  67. av_log(h->avctx, AV_LOG_ERROR, "AVC-parser: nal size %d remaining %d\n", nalsize, buf_size - i);
  68. return buf_size;
  69. }
  70. next_avc = i + nalsize;
  71. state = 5;
  72. }
  73. if (state == 7) {
  74. i += h->h264dsp.startcode_find_candidate(buf + i, next_avc - i);
  75. if (i < next_avc)
  76. state = 2;
  77. } else if (state <= 2) {
  78. if (buf[i] == 1)
  79. state ^= 5; // 2->7, 1->4, 0->5
  80. else if (buf[i])
  81. state = 7;
  82. else
  83. state >>= 1; // 2->1, 1->0, 0->0
  84. } else if (state <= 5) {
  85. int nalu_type = buf[i] & 0x1F;
  86. if (nalu_type == NAL_SEI || nalu_type == NAL_SPS ||
  87. nalu_type == NAL_PPS || nalu_type == NAL_AUD) {
  88. if (pc->frame_start_found) {
  89. i++;
  90. goto found;
  91. }
  92. } else if (nalu_type == NAL_SLICE || nalu_type == NAL_DPA ||
  93. nalu_type == NAL_IDR_SLICE) {
  94. state += 8;
  95. continue;
  96. }
  97. state = 7;
  98. } else {
  99. h->parse_history[h->parse_history_count++]= buf[i];
  100. if (h->parse_history_count>5) {
  101. unsigned int mb, last_mb= h->parse_last_mb;
  102. GetBitContext gb;
  103. init_get_bits(&gb, h->parse_history, 8*h->parse_history_count);
  104. h->parse_history_count=0;
  105. mb= get_ue_golomb_long(&gb);
  106. h->parse_last_mb= mb;
  107. if (pc->frame_start_found) {
  108. if (mb <= last_mb)
  109. goto found;
  110. } else
  111. pc->frame_start_found = 1;
  112. state = 7;
  113. }
  114. }
  115. }
  116. pc->state = state;
  117. if (h->is_avc)
  118. return next_avc;
  119. return END_NOT_FOUND;
  120. found:
  121. pc->state = 7;
  122. pc->frame_start_found = 0;
  123. if (h->is_avc)
  124. return next_avc;
  125. return i - (state & 5) - 5 * (state > 7);
  126. }
  127. static int scan_mmco_reset(AVCodecParserContext *s)
  128. {
  129. H264ParseContext *p = s->priv_data;
  130. H264Context *h = &p->h;
  131. H264SliceContext *sl = &h->slice_ctx[0];
  132. int list_count, ref_count[2];
  133. sl->slice_type_nos = s->pict_type & 3;
  134. if (h->pps.redundant_pic_cnt_present)
  135. get_ue_golomb(&sl->gb); // redundant_pic_count
  136. if (sl->slice_type_nos == AV_PICTURE_TYPE_B)
  137. get_bits1(&sl->gb); // direct_spatial_mv_pred
  138. if (ff_h264_parse_ref_count(&list_count, ref_count, &sl->gb, &h->pps,
  139. sl->slice_type_nos, h->picture_structure, h->avctx) < 0)
  140. return AVERROR_INVALIDDATA;
  141. if (sl->slice_type_nos != AV_PICTURE_TYPE_I) {
  142. int list;
  143. for (list = 0; list < list_count; list++) {
  144. if (get_bits1(&sl->gb)) {
  145. int index;
  146. for (index = 0; ; index++) {
  147. unsigned int reordering_of_pic_nums_idc = get_ue_golomb_31(&sl->gb);
  148. if (reordering_of_pic_nums_idc < 3)
  149. get_ue_golomb_long(&sl->gb);
  150. else if (reordering_of_pic_nums_idc > 3) {
  151. av_log(h->avctx, AV_LOG_ERROR,
  152. "illegal reordering_of_pic_nums_idc %d\n",
  153. reordering_of_pic_nums_idc);
  154. return AVERROR_INVALIDDATA;
  155. } else
  156. break;
  157. if (index >= ref_count[list]) {
  158. av_log(h->avctx, AV_LOG_ERROR,
  159. "reference count %d overflow\n", index);
  160. return AVERROR_INVALIDDATA;
  161. }
  162. }
  163. }
  164. }
  165. }
  166. if ((h->pps.weighted_pred && sl->slice_type_nos == AV_PICTURE_TYPE_P) ||
  167. (h->pps.weighted_bipred_idc == 1 && sl->slice_type_nos == AV_PICTURE_TYPE_B))
  168. ff_h264_pred_weight_table(&sl->gb, &h->sps, ref_count, sl->slice_type_nos,
  169. &sl->pwt);
  170. if (get_bits1(&sl->gb)) { // adaptive_ref_pic_marking_mode_flag
  171. int i;
  172. for (i = 0; i < MAX_MMCO_COUNT; i++) {
  173. MMCOOpcode opcode = get_ue_golomb_31(&sl->gb);
  174. if (opcode > (unsigned) MMCO_LONG) {
  175. av_log(h->avctx, AV_LOG_ERROR,
  176. "illegal memory management control operation %d\n",
  177. opcode);
  178. return AVERROR_INVALIDDATA;
  179. }
  180. if (opcode == MMCO_END)
  181. return 0;
  182. else if (opcode == MMCO_RESET)
  183. return 1;
  184. if (opcode == MMCO_SHORT2UNUSED || opcode == MMCO_SHORT2LONG)
  185. get_ue_golomb_long(&sl->gb); // difference_of_pic_nums_minus1
  186. if (opcode == MMCO_SHORT2LONG || opcode == MMCO_LONG2UNUSED ||
  187. opcode == MMCO_LONG || opcode == MMCO_SET_MAX_LONG)
  188. get_ue_golomb_31(&sl->gb);
  189. }
  190. }
  191. return 0;
  192. }
  193. /**
  194. * Parse NAL units of found picture and decode some basic information.
  195. *
  196. * @param s parser context.
  197. * @param avctx codec context.
  198. * @param buf buffer with field/frame data.
  199. * @param buf_size size of the buffer.
  200. */
  201. static inline int parse_nal_units(AVCodecParserContext *s,
  202. AVCodecContext *avctx,
  203. const uint8_t * const buf, int buf_size)
  204. {
  205. H264ParseContext *p = s->priv_data;
  206. H264Context *h = &p->h;
  207. H264SliceContext *sl = &h->slice_ctx[0];
  208. H2645NAL nal = { NULL };
  209. int buf_index, next_avc;
  210. unsigned int pps_id;
  211. unsigned int slice_type;
  212. int state = -1, got_reset = 0;
  213. int q264 = buf_size >=4 && !memcmp("Q264", buf, 4);
  214. int field_poc[2];
  215. int ret;
  216. /* set some sane default values */
  217. s->pict_type = AV_PICTURE_TYPE_I;
  218. s->key_frame = 0;
  219. s->picture_structure = AV_PICTURE_STRUCTURE_UNKNOWN;
  220. h->avctx = avctx;
  221. ff_h264_reset_sei(h);
  222. h->sei_fpa.frame_packing_arrangement_cancel_flag = -1;
  223. if (!buf_size)
  224. return 0;
  225. buf_index = 0;
  226. next_avc = h->is_avc ? 0 : buf_size;
  227. for (;;) {
  228. int src_length, consumed, nalsize = 0;
  229. if (buf_index >= next_avc) {
  230. nalsize = get_avc_nalsize(h, buf, buf_size, &buf_index);
  231. if (nalsize < 0)
  232. break;
  233. next_avc = buf_index + nalsize;
  234. } else {
  235. buf_index = find_start_code(buf, buf_size, buf_index, next_avc);
  236. if (buf_index >= buf_size)
  237. break;
  238. if (buf_index >= next_avc)
  239. continue;
  240. }
  241. src_length = next_avc - buf_index;
  242. state = buf[buf_index];
  243. switch (state & 0x1f) {
  244. case NAL_SLICE:
  245. case NAL_IDR_SLICE:
  246. // Do not walk the whole buffer just to decode slice header
  247. if ((state & 0x1f) == NAL_IDR_SLICE || ((state >> 5) & 0x3) == 0) {
  248. /* IDR or disposable slice
  249. * No need to decode many bytes because MMCOs shall not be present. */
  250. if (src_length > 60)
  251. src_length = 60;
  252. } else {
  253. /* To decode up to MMCOs */
  254. if (src_length > 1000)
  255. src_length = 1000;
  256. }
  257. break;
  258. }
  259. consumed = ff_h2645_extract_rbsp(buf + buf_index, src_length, &nal);
  260. if (consumed < 0)
  261. break;
  262. buf_index += consumed;
  263. ret = init_get_bits8(&nal.gb, nal.data, nal.size);
  264. if (ret < 0)
  265. goto fail;
  266. get_bits1(&nal.gb);
  267. nal.ref_idc = get_bits(&nal.gb, 2);
  268. nal.type = get_bits(&nal.gb, 5);
  269. h->gb = nal.gb;
  270. h->nal_ref_idc = nal.ref_idc;
  271. h->nal_unit_type = nal.type;
  272. switch (h->nal_unit_type) {
  273. case NAL_SPS:
  274. ff_h264_decode_seq_parameter_set(h, 0);
  275. break;
  276. case NAL_PPS:
  277. ff_h264_decode_picture_parameter_set(h, h->gb.size_in_bits);
  278. break;
  279. case NAL_SEI:
  280. ff_h264_decode_sei(h);
  281. break;
  282. case NAL_IDR_SLICE:
  283. s->key_frame = 1;
  284. h->prev_frame_num = 0;
  285. h->prev_frame_num_offset = 0;
  286. h->prev_poc_msb =
  287. h->prev_poc_lsb = 0;
  288. /* fall through */
  289. case NAL_SLICE:
  290. sl->gb = nal.gb;
  291. get_ue_golomb_long(&sl->gb); // skip first_mb_in_slice
  292. slice_type = get_ue_golomb_31(&sl->gb);
  293. s->pict_type = ff_h264_golomb_to_pict_type[slice_type % 5];
  294. if (h->sei_recovery_frame_cnt >= 0) {
  295. /* key frame, since recovery_frame_cnt is set */
  296. s->key_frame = 1;
  297. }
  298. pps_id = get_ue_golomb(&sl->gb);
  299. if (pps_id >= MAX_PPS_COUNT) {
  300. av_log(h->avctx, AV_LOG_ERROR,
  301. "pps_id %u out of range\n", pps_id);
  302. goto fail;
  303. }
  304. if (!h->pps_buffers[pps_id]) {
  305. av_log(h->avctx, AV_LOG_ERROR,
  306. "non-existing PPS %u referenced\n", pps_id);
  307. goto fail;
  308. }
  309. h->pps = *h->pps_buffers[pps_id];
  310. if (!h->sps_buffers[h->pps.sps_id]) {
  311. av_log(h->avctx, AV_LOG_ERROR,
  312. "non-existing SPS %u referenced\n", h->pps.sps_id);
  313. goto fail;
  314. }
  315. h->sps = *h->sps_buffers[h->pps.sps_id];
  316. h->frame_num = get_bits(&sl->gb, h->sps.log2_max_frame_num);
  317. if(h->sps.ref_frame_count <= 1 && h->pps.ref_count[0] <= 1 && s->pict_type == AV_PICTURE_TYPE_I)
  318. s->key_frame = 1;
  319. s->coded_width = 16 * h->sps.mb_width;
  320. s->coded_height = 16 * h->sps.mb_height;
  321. s->width = s->coded_width - (h->sps.crop_right + h->sps.crop_left);
  322. s->height = s->coded_height - (h->sps.crop_top + h->sps.crop_bottom);
  323. if (s->width <= 0 || s->height <= 0) {
  324. s->width = s->coded_width;
  325. s->height = s->coded_height;
  326. }
  327. switch (h->sps.bit_depth_luma) {
  328. case 9:
  329. if (CHROMA444(h)) s->format = AV_PIX_FMT_YUV444P9;
  330. else if (CHROMA422(h)) s->format = AV_PIX_FMT_YUV422P9;
  331. else s->format = AV_PIX_FMT_YUV420P9;
  332. break;
  333. case 10:
  334. if (CHROMA444(h)) s->format = AV_PIX_FMT_YUV444P10;
  335. else if (CHROMA422(h)) s->format = AV_PIX_FMT_YUV422P10;
  336. else s->format = AV_PIX_FMT_YUV420P10;
  337. break;
  338. case 8:
  339. if (CHROMA444(h)) s->format = AV_PIX_FMT_YUV444P;
  340. else if (CHROMA422(h)) s->format = AV_PIX_FMT_YUV422P;
  341. else s->format = AV_PIX_FMT_YUV420P;
  342. break;
  343. default:
  344. s->format = AV_PIX_FMT_NONE;
  345. }
  346. avctx->profile = ff_h264_get_profile(&h->sps);
  347. avctx->level = h->sps.level_idc;
  348. if (h->sps.frame_mbs_only_flag) {
  349. h->picture_structure = PICT_FRAME;
  350. } else {
  351. if (get_bits1(&sl->gb)) { // field_pic_flag
  352. h->picture_structure = PICT_TOP_FIELD + get_bits1(&sl->gb); // bottom_field_flag
  353. } else {
  354. h->picture_structure = PICT_FRAME;
  355. }
  356. }
  357. if (h->nal_unit_type == NAL_IDR_SLICE)
  358. get_ue_golomb_long(&sl->gb); /* idr_pic_id */
  359. if (h->sps.poc_type == 0) {
  360. h->poc_lsb = get_bits(&sl->gb, h->sps.log2_max_poc_lsb);
  361. if (h->pps.pic_order_present == 1 &&
  362. h->picture_structure == PICT_FRAME)
  363. h->delta_poc_bottom = get_se_golomb(&sl->gb);
  364. }
  365. if (h->sps.poc_type == 1 &&
  366. !h->sps.delta_pic_order_always_zero_flag) {
  367. h->delta_poc[0] = get_se_golomb(&sl->gb);
  368. if (h->pps.pic_order_present == 1 &&
  369. h->picture_structure == PICT_FRAME)
  370. h->delta_poc[1] = get_se_golomb(&sl->gb);
  371. }
  372. /* Decode POC of this picture.
  373. * The prev_ values needed for decoding POC of the next picture are not set here. */
  374. field_poc[0] = field_poc[1] = INT_MAX;
  375. ff_init_poc(h, field_poc, &s->output_picture_number);
  376. /* Continue parsing to check if MMCO_RESET is present.
  377. * FIXME: MMCO_RESET could appear in non-first slice.
  378. * Maybe, we should parse all undisposable non-IDR slice of this
  379. * picture until encountering MMCO_RESET in a slice of it. */
  380. if (h->nal_ref_idc && h->nal_unit_type != NAL_IDR_SLICE) {
  381. got_reset = scan_mmco_reset(s);
  382. if (got_reset < 0)
  383. goto fail;
  384. }
  385. /* Set up the prev_ values for decoding POC of the next picture. */
  386. h->prev_frame_num = got_reset ? 0 : h->frame_num;
  387. h->prev_frame_num_offset = got_reset ? 0 : h->frame_num_offset;
  388. if (h->nal_ref_idc != 0) {
  389. if (!got_reset) {
  390. h->prev_poc_msb = h->poc_msb;
  391. h->prev_poc_lsb = h->poc_lsb;
  392. } else {
  393. h->prev_poc_msb = 0;
  394. h->prev_poc_lsb =
  395. h->picture_structure == PICT_BOTTOM_FIELD ? 0 : field_poc[0];
  396. }
  397. }
  398. if (h->sps.pic_struct_present_flag) {
  399. switch (h->sei_pic_struct) {
  400. case SEI_PIC_STRUCT_TOP_FIELD:
  401. case SEI_PIC_STRUCT_BOTTOM_FIELD:
  402. s->repeat_pict = 0;
  403. break;
  404. case SEI_PIC_STRUCT_FRAME:
  405. case SEI_PIC_STRUCT_TOP_BOTTOM:
  406. case SEI_PIC_STRUCT_BOTTOM_TOP:
  407. s->repeat_pict = 1;
  408. break;
  409. case SEI_PIC_STRUCT_TOP_BOTTOM_TOP:
  410. case SEI_PIC_STRUCT_BOTTOM_TOP_BOTTOM:
  411. s->repeat_pict = 2;
  412. break;
  413. case SEI_PIC_STRUCT_FRAME_DOUBLING:
  414. s->repeat_pict = 3;
  415. break;
  416. case SEI_PIC_STRUCT_FRAME_TRIPLING:
  417. s->repeat_pict = 5;
  418. break;
  419. default:
  420. s->repeat_pict = h->picture_structure == PICT_FRAME ? 1 : 0;
  421. break;
  422. }
  423. } else {
  424. s->repeat_pict = h->picture_structure == PICT_FRAME ? 1 : 0;
  425. }
  426. if (h->picture_structure == PICT_FRAME) {
  427. s->picture_structure = AV_PICTURE_STRUCTURE_FRAME;
  428. if (h->sps.pic_struct_present_flag) {
  429. switch (h->sei_pic_struct) {
  430. case SEI_PIC_STRUCT_TOP_BOTTOM:
  431. case SEI_PIC_STRUCT_TOP_BOTTOM_TOP:
  432. s->field_order = AV_FIELD_TT;
  433. break;
  434. case SEI_PIC_STRUCT_BOTTOM_TOP:
  435. case SEI_PIC_STRUCT_BOTTOM_TOP_BOTTOM:
  436. s->field_order = AV_FIELD_BB;
  437. break;
  438. default:
  439. s->field_order = AV_FIELD_PROGRESSIVE;
  440. break;
  441. }
  442. } else {
  443. if (field_poc[0] < field_poc[1])
  444. s->field_order = AV_FIELD_TT;
  445. else if (field_poc[0] > field_poc[1])
  446. s->field_order = AV_FIELD_BB;
  447. else
  448. s->field_order = AV_FIELD_PROGRESSIVE;
  449. }
  450. } else {
  451. if (h->picture_structure == PICT_TOP_FIELD)
  452. s->picture_structure = AV_PICTURE_STRUCTURE_TOP_FIELD;
  453. else
  454. s->picture_structure = AV_PICTURE_STRUCTURE_BOTTOM_FIELD;
  455. s->field_order = AV_FIELD_UNKNOWN;
  456. }
  457. av_freep(&nal.rbsp_buffer);
  458. return 0; /* no need to evaluate the rest */
  459. }
  460. }
  461. if (q264) {
  462. av_freep(&nal.rbsp_buffer);
  463. return 0;
  464. }
  465. /* didn't find a picture! */
  466. av_log(h->avctx, AV_LOG_ERROR, "missing picture in access unit with size %d\n", buf_size);
  467. fail:
  468. av_freep(&nal.rbsp_buffer);
  469. return -1;
  470. }
  471. static int h264_parse(AVCodecParserContext *s,
  472. AVCodecContext *avctx,
  473. const uint8_t **poutbuf, int *poutbuf_size,
  474. const uint8_t *buf, int buf_size)
  475. {
  476. H264ParseContext *p = s->priv_data;
  477. H264Context *h = &p->h;
  478. ParseContext *pc = &p->pc;
  479. int next;
  480. if (!p->got_first) {
  481. p->got_first = 1;
  482. if (avctx->extradata_size) {
  483. h->avctx = avctx;
  484. // must be done like in decoder, otherwise opening the parser,
  485. // letting it create extradata and then closing and opening again
  486. // will cause has_b_frames to be always set.
  487. // Note that estimate_timings_from_pts does exactly this.
  488. if (!avctx->has_b_frames)
  489. h->low_delay = 1;
  490. ff_h264_decode_extradata(h, avctx->extradata, avctx->extradata_size);
  491. }
  492. }
  493. if (s->flags & PARSER_FLAG_COMPLETE_FRAMES) {
  494. next = buf_size;
  495. } else {
  496. next = h264_find_frame_end(p, buf, buf_size);
  497. if (ff_combine_frame(pc, next, &buf, &buf_size) < 0) {
  498. *poutbuf = NULL;
  499. *poutbuf_size = 0;
  500. return buf_size;
  501. }
  502. if (next < 0 && next != END_NOT_FOUND) {
  503. av_assert1(pc->last_index + next >= 0);
  504. h264_find_frame_end(p, &pc->buffer[pc->last_index + next], -next); // update state
  505. }
  506. }
  507. parse_nal_units(s, avctx, buf, buf_size);
  508. if (avctx->framerate.num)
  509. avctx->time_base = av_inv_q(av_mul_q(avctx->framerate, (AVRational){avctx->ticks_per_frame, 1}));
  510. if (h->sei_cpb_removal_delay >= 0) {
  511. s->dts_sync_point = h->sei_buffering_period_present;
  512. s->dts_ref_dts_delta = h->sei_cpb_removal_delay;
  513. s->pts_dts_delta = h->sei_dpb_output_delay;
  514. } else {
  515. s->dts_sync_point = INT_MIN;
  516. s->dts_ref_dts_delta = INT_MIN;
  517. s->pts_dts_delta = INT_MIN;
  518. }
  519. if (s->flags & PARSER_FLAG_ONCE) {
  520. s->flags &= PARSER_FLAG_COMPLETE_FRAMES;
  521. }
  522. *poutbuf = buf;
  523. *poutbuf_size = buf_size;
  524. return next;
  525. }
  526. static int h264_split(AVCodecContext *avctx,
  527. const uint8_t *buf, int buf_size)
  528. {
  529. uint32_t state = -1;
  530. int has_sps = 0;
  531. int has_pps = 0;
  532. const uint8_t *ptr = buf, *end = buf + buf_size;
  533. int nalu_type;
  534. while (ptr < end) {
  535. ptr = avpriv_find_start_code(ptr, end, &state);
  536. if ((state & 0xFFFFFF00) != 0x100)
  537. break;
  538. nalu_type = state & 0x1F;
  539. if (nalu_type == NAL_SPS) {
  540. has_sps = 1;
  541. } else if (nalu_type == NAL_PPS)
  542. has_pps = 1;
  543. /* else if (nalu_type == 0x01 ||
  544. * nalu_type == 0x02 ||
  545. * nalu_type == 0x05) {
  546. * }
  547. */
  548. else if ((nalu_type != NAL_SEI || has_pps) &&
  549. nalu_type != NAL_AUD && nalu_type != NAL_SPS_EXT &&
  550. nalu_type != 0x0f) {
  551. if (has_sps) {
  552. while (ptr - 4 > buf && ptr[-5] == 0)
  553. ptr--;
  554. return ptr - 4 - buf;
  555. }
  556. }
  557. }
  558. return 0;
  559. }
  560. static void h264_close(AVCodecParserContext *s)
  561. {
  562. H264ParseContext *p = s->priv_data;
  563. H264Context *h = &p->h;
  564. ParseContext *pc = &p->pc;
  565. av_freep(&pc->buffer);
  566. ff_h264_free_context(h);
  567. }
  568. static av_cold int init(AVCodecParserContext *s)
  569. {
  570. H264ParseContext *p = s->priv_data;
  571. H264Context *h = &p->h;
  572. h->slice_ctx = av_mallocz(sizeof(*h->slice_ctx));
  573. if (!h->slice_ctx)
  574. return 0;
  575. h->nb_slice_ctx = 1;
  576. h->slice_context_count = 1;
  577. ff_h264dsp_init(&h->h264dsp, 8, 1);
  578. return 0;
  579. }
  580. AVCodecParser ff_h264_parser = {
  581. .codec_ids = { AV_CODEC_ID_H264 },
  582. .priv_data_size = sizeof(H264ParseContext),
  583. .parser_init = init,
  584. .parser_parse = h264_parse,
  585. .parser_close = h264_close,
  586. .split = h264_split,
  587. };