You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

618 lines
21KB

  1. /*
  2. * H.26L/H.264/AVC/JVT/14496-10/... parser
  3. * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
  4. *
  5. * This file is part of Libav.
  6. *
  7. * Libav is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * Libav is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with Libav; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. /**
  22. * @file
  23. * H.264 / AVC / MPEG4 part10 parser.
  24. * @author Michael Niedermayer <michaelni@gmx.at>
  25. */
  26. #include <assert.h>
  27. #include <stdint.h>
  28. #include "libavutil/avutil.h"
  29. #include "libavutil/error.h"
  30. #include "libavutil/log.h"
  31. #include "libavutil/mem.h"
  32. #include "libavutil/pixfmt.h"
  33. #include "get_bits.h"
  34. #include "golomb.h"
  35. #include "h264.h"
  36. #include "h264_sei.h"
  37. #include "h264data.h"
  38. #include "internal.h"
  39. #include "mpegutils.h"
  40. #include "parser.h"
  41. typedef struct H264ParseContext {
  42. H264Context h;
  43. ParseContext pc;
  44. H264ParamSets ps;
  45. H264DSPContext h264dsp;
  46. H264POCContext poc;
  47. H264SEIContext sei;
  48. int got_first;
  49. } H264ParseContext;
  50. static int h264_find_frame_end(H264ParseContext *p, const uint8_t *buf,
  51. int buf_size)
  52. {
  53. int i;
  54. uint32_t state;
  55. ParseContext *pc = &p->pc;
  56. // mb_addr= pc->mb_addr - 1;
  57. state = pc->state;
  58. if (state > 13)
  59. state = 7;
  60. for (i = 0; i < buf_size; i++) {
  61. if (state == 7) {
  62. i += p->h264dsp.startcode_find_candidate(buf + i, buf_size - i);
  63. if (i < buf_size)
  64. state = 2;
  65. } else if (state <= 2) {
  66. if (buf[i] == 1)
  67. state ^= 5; // 2->7, 1->4, 0->5
  68. else if (buf[i])
  69. state = 7;
  70. else
  71. state >>= 1; // 2->1, 1->0, 0->0
  72. } else if (state <= 5) {
  73. int nalu_type = buf[i] & 0x1F;
  74. if (nalu_type == NAL_SEI || nalu_type == NAL_SPS ||
  75. nalu_type == NAL_PPS || nalu_type == NAL_AUD) {
  76. if (pc->frame_start_found) {
  77. i++;
  78. goto found;
  79. }
  80. } else if (nalu_type == NAL_SLICE || nalu_type == NAL_DPA ||
  81. nalu_type == NAL_IDR_SLICE) {
  82. if (pc->frame_start_found) {
  83. state += 8;
  84. continue;
  85. } else
  86. pc->frame_start_found = 1;
  87. }
  88. state = 7;
  89. } else {
  90. // first_mb_in_slice is 0, probably the first nal of a new slice
  91. if (buf[i] & 0x80)
  92. goto found;
  93. state = 7;
  94. }
  95. }
  96. pc->state = state;
  97. return END_NOT_FOUND;
  98. found:
  99. pc->state = 7;
  100. pc->frame_start_found = 0;
  101. return i - (state & 5);
  102. }
  103. static int scan_mmco_reset(AVCodecParserContext *s, GetBitContext *gb)
  104. {
  105. H264PredWeightTable pwt;
  106. int slice_type_nos = s->pict_type & 3;
  107. H264ParseContext *p = s->priv_data;
  108. H264Context *h = &p->h;
  109. int list_count, ref_count[2];
  110. if (p->ps.pps->redundant_pic_cnt_present)
  111. get_ue_golomb(gb); // redundant_pic_count
  112. if (slice_type_nos == AV_PICTURE_TYPE_B)
  113. get_bits1(gb); // direct_spatial_mv_pred
  114. if (ff_h264_parse_ref_count(&list_count, ref_count, gb, p->ps.pps,
  115. slice_type_nos, h->picture_structure) < 0)
  116. return AVERROR_INVALIDDATA;
  117. if (slice_type_nos != AV_PICTURE_TYPE_I) {
  118. int list;
  119. for (list = 0; list < list_count; list++) {
  120. if (get_bits1(gb)) {
  121. int index;
  122. for (index = 0; ; index++) {
  123. unsigned int reordering_of_pic_nums_idc = get_ue_golomb_31(gb);
  124. if (reordering_of_pic_nums_idc < 3)
  125. get_ue_golomb(gb);
  126. else if (reordering_of_pic_nums_idc > 3) {
  127. av_log(h->avctx, AV_LOG_ERROR,
  128. "illegal reordering_of_pic_nums_idc %d\n",
  129. reordering_of_pic_nums_idc);
  130. return AVERROR_INVALIDDATA;
  131. } else
  132. break;
  133. if (index >= ref_count[list]) {
  134. av_log(h->avctx, AV_LOG_ERROR,
  135. "reference count %d overflow\n", index);
  136. return AVERROR_INVALIDDATA;
  137. }
  138. }
  139. }
  140. }
  141. }
  142. if ((p->ps.pps->weighted_pred && slice_type_nos == AV_PICTURE_TYPE_P) ||
  143. (p->ps.pps->weighted_bipred_idc == 1 && slice_type_nos == AV_PICTURE_TYPE_B))
  144. ff_h264_pred_weight_table(gb, p->ps.sps, ref_count, slice_type_nos,
  145. &pwt);
  146. if (get_bits1(gb)) { // adaptive_ref_pic_marking_mode_flag
  147. int i;
  148. for (i = 0; i < MAX_MMCO_COUNT; i++) {
  149. MMCOOpcode opcode = get_ue_golomb_31(gb);
  150. if (opcode > (unsigned) MMCO_LONG) {
  151. av_log(h->avctx, AV_LOG_ERROR,
  152. "illegal memory management control operation %d\n",
  153. opcode);
  154. return AVERROR_INVALIDDATA;
  155. }
  156. if (opcode == MMCO_END)
  157. return 0;
  158. else if (opcode == MMCO_RESET)
  159. return 1;
  160. if (opcode == MMCO_SHORT2UNUSED || opcode == MMCO_SHORT2LONG)
  161. get_ue_golomb(gb);
  162. if (opcode == MMCO_SHORT2LONG || opcode == MMCO_LONG2UNUSED ||
  163. opcode == MMCO_LONG || opcode == MMCO_SET_MAX_LONG)
  164. get_ue_golomb_31(gb);
  165. }
  166. }
  167. return 0;
  168. }
  169. /**
  170. * Parse NAL units of found picture and decode some basic information.
  171. *
  172. * @param s parser context.
  173. * @param avctx codec context.
  174. * @param buf buffer with field/frame data.
  175. * @param buf_size size of the buffer.
  176. */
  177. static inline int parse_nal_units(AVCodecParserContext *s,
  178. AVCodecContext *avctx,
  179. const uint8_t *buf, int buf_size)
  180. {
  181. H264ParseContext *p = s->priv_data;
  182. H264Context *h = &p->h;
  183. const uint8_t *buf_end = buf + buf_size;
  184. H2645NAL nal = { NULL };
  185. unsigned int pps_id;
  186. unsigned int slice_type;
  187. int state = -1, got_reset = 0;
  188. int field_poc[2];
  189. int ret;
  190. /* set some sane default values */
  191. s->pict_type = AV_PICTURE_TYPE_I;
  192. s->key_frame = 0;
  193. s->picture_structure = AV_PICTURE_STRUCTURE_UNKNOWN;
  194. h->avctx = avctx;
  195. ff_h264_sei_uninit(&p->sei);
  196. if (!buf_size)
  197. return 0;
  198. for (;;) {
  199. const SPS *sps;
  200. int src_length, consumed;
  201. buf = avpriv_find_start_code(buf, buf_end, &state);
  202. if (buf >= buf_end)
  203. break;
  204. --buf;
  205. src_length = buf_end - buf;
  206. switch (state & 0x1f) {
  207. case NAL_SLICE:
  208. case NAL_IDR_SLICE:
  209. // Do not walk the whole buffer just to decode slice header
  210. if ((state & 0x1f) == NAL_IDR_SLICE || ((state >> 5) & 0x3) == 0) {
  211. /* IDR or disposable slice
  212. * No need to decode many bytes because MMCOs shall not be present. */
  213. if (src_length > 60)
  214. src_length = 60;
  215. } else {
  216. /* To decode up to MMCOs */
  217. if (src_length > 1000)
  218. src_length = 1000;
  219. }
  220. break;
  221. }
  222. consumed = ff_h2645_extract_rbsp(buf, src_length, &nal);
  223. if (consumed < 0)
  224. break;
  225. ret = init_get_bits(&nal.gb, nal.data, nal.size * 8);
  226. if (ret < 0)
  227. goto fail;
  228. get_bits1(&nal.gb);
  229. nal.ref_idc = get_bits(&nal.gb, 2);
  230. nal.type = get_bits(&nal.gb, 5);
  231. h->gb = nal.gb;
  232. h->nal_ref_idc = nal.ref_idc;
  233. h->nal_unit_type = nal.type;
  234. switch (h->nal_unit_type) {
  235. case NAL_SPS:
  236. ff_h264_decode_seq_parameter_set(&nal.gb, avctx, &p->ps);
  237. break;
  238. case NAL_PPS:
  239. ff_h264_decode_picture_parameter_set(&nal.gb, avctx, &p->ps,
  240. nal.size_bits);
  241. break;
  242. case NAL_SEI:
  243. ff_h264_sei_decode(&p->sei, &nal.gb, &p->ps, avctx);
  244. break;
  245. case NAL_IDR_SLICE:
  246. s->key_frame = 1;
  247. p->poc.prev_frame_num = 0;
  248. p->poc.prev_frame_num_offset = 0;
  249. p->poc.prev_poc_msb =
  250. p->poc.prev_poc_lsb = 0;
  251. /* fall through */
  252. case NAL_SLICE:
  253. get_ue_golomb(&nal.gb); // skip first_mb_in_slice
  254. slice_type = get_ue_golomb_31(&nal.gb);
  255. s->pict_type = ff_h264_golomb_to_pict_type[slice_type % 5];
  256. if (p->sei.recovery_point.recovery_frame_cnt >= 0) {
  257. /* key frame, since recovery_frame_cnt is set */
  258. s->key_frame = 1;
  259. }
  260. pps_id = get_ue_golomb(&nal.gb);
  261. if (pps_id >= MAX_PPS_COUNT) {
  262. av_log(h->avctx, AV_LOG_ERROR,
  263. "pps_id %u out of range\n", pps_id);
  264. goto fail;
  265. }
  266. if (!p->ps.pps_list[pps_id]) {
  267. av_log(h->avctx, AV_LOG_ERROR,
  268. "non-existing PPS %u referenced\n", pps_id);
  269. goto fail;
  270. }
  271. p->ps.pps = (const PPS*)p->ps.pps_list[pps_id]->data;
  272. if (!p->ps.sps_list[p->ps.pps->sps_id]) {
  273. av_log(h->avctx, AV_LOG_ERROR,
  274. "non-existing SPS %u referenced\n", p->ps.pps->sps_id);
  275. goto fail;
  276. }
  277. p->ps.sps = (SPS*)p->ps.sps_list[p->ps.pps->sps_id]->data;
  278. h->ps.sps = p->ps.sps;
  279. h->ps.pps = p->ps.pps;
  280. sps = p->ps.sps;
  281. p->poc.frame_num = get_bits(&nal.gb, sps->log2_max_frame_num);
  282. s->coded_width = 16 * sps->mb_width;
  283. s->coded_height = 16 * sps->mb_height;
  284. s->width = s->coded_width - (sps->crop_right + sps->crop_left);
  285. s->height = s->coded_height - (sps->crop_top + sps->crop_bottom);
  286. if (s->width <= 0 || s->height <= 0) {
  287. s->width = s->coded_width;
  288. s->height = s->coded_height;
  289. }
  290. switch (sps->bit_depth_luma) {
  291. case 9:
  292. if (CHROMA444(h)) s->format = AV_PIX_FMT_YUV444P9;
  293. else if (CHROMA422(h)) s->format = AV_PIX_FMT_YUV422P9;
  294. else s->format = AV_PIX_FMT_YUV420P9;
  295. break;
  296. case 10:
  297. if (CHROMA444(h)) s->format = AV_PIX_FMT_YUV444P10;
  298. else if (CHROMA422(h)) s->format = AV_PIX_FMT_YUV422P10;
  299. else s->format = AV_PIX_FMT_YUV420P10;
  300. break;
  301. case 8:
  302. if (CHROMA444(h)) s->format = AV_PIX_FMT_YUV444P;
  303. else if (CHROMA422(h)) s->format = AV_PIX_FMT_YUV422P;
  304. else s->format = AV_PIX_FMT_YUV420P;
  305. break;
  306. default:
  307. s->format = AV_PIX_FMT_NONE;
  308. }
  309. avctx->profile = ff_h264_get_profile(sps);
  310. avctx->level = sps->level_idc;
  311. if (sps->frame_mbs_only_flag) {
  312. h->picture_structure = PICT_FRAME;
  313. } else {
  314. if (get_bits1(&nal.gb)) { // field_pic_flag
  315. h->picture_structure = PICT_TOP_FIELD + get_bits1(&nal.gb); // bottom_field_flag
  316. } else {
  317. h->picture_structure = PICT_FRAME;
  318. }
  319. }
  320. if (h->nal_unit_type == NAL_IDR_SLICE)
  321. get_ue_golomb(&nal.gb); /* idr_pic_id */
  322. if (sps->poc_type == 0) {
  323. p->poc.poc_lsb = get_bits(&nal.gb, sps->log2_max_poc_lsb);
  324. if (p->ps.pps->pic_order_present == 1 &&
  325. h->picture_structure == PICT_FRAME)
  326. p->poc.delta_poc_bottom = get_se_golomb(&nal.gb);
  327. }
  328. if (sps->poc_type == 1 &&
  329. !sps->delta_pic_order_always_zero_flag) {
  330. p->poc.delta_poc[0] = get_se_golomb(&nal.gb);
  331. if (p->ps.pps->pic_order_present == 1 &&
  332. h->picture_structure == PICT_FRAME)
  333. p->poc.delta_poc[1] = get_se_golomb(&nal.gb);
  334. }
  335. /* Decode POC of this picture.
  336. * The prev_ values needed for decoding POC of the next picture are not set here. */
  337. field_poc[0] = field_poc[1] = INT_MAX;
  338. ff_h264_init_poc(field_poc, &s->output_picture_number, sps,
  339. &p->poc, h->picture_structure, nal.ref_idc);
  340. /* Continue parsing to check if MMCO_RESET is present.
  341. * FIXME: MMCO_RESET could appear in non-first slice.
  342. * Maybe, we should parse all undisposable non-IDR slice of this
  343. * picture until encountering MMCO_RESET in a slice of it. */
  344. if (h->nal_ref_idc && h->nal_unit_type != NAL_IDR_SLICE) {
  345. got_reset = scan_mmco_reset(s, &nal.gb);
  346. if (got_reset < 0)
  347. goto fail;
  348. }
  349. /* Set up the prev_ values for decoding POC of the next picture. */
  350. p->poc.prev_frame_num = got_reset ? 0 : p->poc.frame_num;
  351. p->poc.prev_frame_num_offset = got_reset ? 0 : p->poc.frame_num_offset;
  352. if (h->nal_ref_idc != 0) {
  353. if (!got_reset) {
  354. p->poc.prev_poc_msb = p->poc.poc_msb;
  355. p->poc.prev_poc_lsb = p->poc.poc_lsb;
  356. } else {
  357. p->poc.prev_poc_msb = 0;
  358. p->poc.prev_poc_lsb =
  359. h->picture_structure == PICT_BOTTOM_FIELD ? 0 : field_poc[0];
  360. }
  361. }
  362. if (sps->pic_struct_present_flag) {
  363. switch (p->sei.picture_timing.pic_struct) {
  364. case SEI_PIC_STRUCT_TOP_FIELD:
  365. case SEI_PIC_STRUCT_BOTTOM_FIELD:
  366. s->repeat_pict = 0;
  367. break;
  368. case SEI_PIC_STRUCT_FRAME:
  369. case SEI_PIC_STRUCT_TOP_BOTTOM:
  370. case SEI_PIC_STRUCT_BOTTOM_TOP:
  371. s->repeat_pict = 1;
  372. break;
  373. case SEI_PIC_STRUCT_TOP_BOTTOM_TOP:
  374. case SEI_PIC_STRUCT_BOTTOM_TOP_BOTTOM:
  375. s->repeat_pict = 2;
  376. break;
  377. case SEI_PIC_STRUCT_FRAME_DOUBLING:
  378. s->repeat_pict = 3;
  379. break;
  380. case SEI_PIC_STRUCT_FRAME_TRIPLING:
  381. s->repeat_pict = 5;
  382. break;
  383. default:
  384. s->repeat_pict = h->picture_structure == PICT_FRAME ? 1 : 0;
  385. break;
  386. }
  387. } else {
  388. s->repeat_pict = h->picture_structure == PICT_FRAME ? 1 : 0;
  389. }
  390. if (h->picture_structure == PICT_FRAME) {
  391. s->picture_structure = AV_PICTURE_STRUCTURE_FRAME;
  392. if (sps->pic_struct_present_flag) {
  393. switch (p->sei.picture_timing.pic_struct) {
  394. case SEI_PIC_STRUCT_TOP_BOTTOM:
  395. case SEI_PIC_STRUCT_TOP_BOTTOM_TOP:
  396. s->field_order = AV_FIELD_TT;
  397. break;
  398. case SEI_PIC_STRUCT_BOTTOM_TOP:
  399. case SEI_PIC_STRUCT_BOTTOM_TOP_BOTTOM:
  400. s->field_order = AV_FIELD_BB;
  401. break;
  402. default:
  403. s->field_order = AV_FIELD_PROGRESSIVE;
  404. break;
  405. }
  406. } else {
  407. if (field_poc[0] < field_poc[1])
  408. s->field_order = AV_FIELD_TT;
  409. else if (field_poc[0] > field_poc[1])
  410. s->field_order = AV_FIELD_BB;
  411. else
  412. s->field_order = AV_FIELD_PROGRESSIVE;
  413. }
  414. } else {
  415. if (h->picture_structure == PICT_TOP_FIELD)
  416. s->picture_structure = AV_PICTURE_STRUCTURE_TOP_FIELD;
  417. else
  418. s->picture_structure = AV_PICTURE_STRUCTURE_BOTTOM_FIELD;
  419. s->field_order = AV_FIELD_UNKNOWN;
  420. }
  421. av_freep(&nal.rbsp_buffer);
  422. return 0; /* no need to evaluate the rest */
  423. }
  424. buf += consumed;
  425. }
  426. /* didn't find a picture! */
  427. av_log(h->avctx, AV_LOG_ERROR, "missing picture in access unit\n");
  428. fail:
  429. av_freep(&nal.rbsp_buffer);
  430. return -1;
  431. }
  432. static int h264_parse(AVCodecParserContext *s,
  433. AVCodecContext *avctx,
  434. const uint8_t **poutbuf, int *poutbuf_size,
  435. const uint8_t *buf, int buf_size)
  436. {
  437. H264ParseContext *p = s->priv_data;
  438. H264Context *h = &p->h;
  439. ParseContext *pc = &p->pc;
  440. int next;
  441. if (!p->got_first) {
  442. p->got_first = 1;
  443. if (avctx->extradata_size) {
  444. h->avctx = avctx;
  445. // must be done like in the decoder.
  446. // otherwise opening the parser, creating extradata,
  447. // and then closing and opening again
  448. // will cause has_b_frames to be always set.
  449. // NB: estimate_timings_from_pts behaves exactly like this.
  450. if (!avctx->has_b_frames)
  451. h->low_delay = 1;
  452. ff_h264_decode_extradata(h);
  453. }
  454. }
  455. if (s->flags & PARSER_FLAG_COMPLETE_FRAMES) {
  456. next = buf_size;
  457. } else {
  458. next = h264_find_frame_end(p, buf, buf_size);
  459. if (ff_combine_frame(pc, next, &buf, &buf_size) < 0) {
  460. *poutbuf = NULL;
  461. *poutbuf_size = 0;
  462. return buf_size;
  463. }
  464. if (next < 0 && next != END_NOT_FOUND) {
  465. assert(pc->last_index + next >= 0);
  466. h264_find_frame_end(p, &pc->buffer[pc->last_index + next], -next); // update state
  467. }
  468. }
  469. parse_nal_units(s, avctx, buf, buf_size);
  470. if (p->sei.picture_timing.cpb_removal_delay >= 0) {
  471. s->dts_sync_point = p->sei.buffering_period.present;
  472. s->dts_ref_dts_delta = p->sei.picture_timing.cpb_removal_delay;
  473. s->pts_dts_delta = p->sei.picture_timing.dpb_output_delay;
  474. } else {
  475. s->dts_sync_point = INT_MIN;
  476. s->dts_ref_dts_delta = INT_MIN;
  477. s->pts_dts_delta = INT_MIN;
  478. }
  479. if (s->flags & PARSER_FLAG_ONCE) {
  480. s->flags &= PARSER_FLAG_COMPLETE_FRAMES;
  481. }
  482. *poutbuf = buf;
  483. *poutbuf_size = buf_size;
  484. return next;
  485. }
  486. static int h264_split(AVCodecContext *avctx,
  487. const uint8_t *buf, int buf_size)
  488. {
  489. int i;
  490. uint32_t state = -1;
  491. int has_sps = 0;
  492. for (i = 0; i <= buf_size; i++) {
  493. if ((state & 0xFFFFFF1F) == 0x107)
  494. has_sps = 1;
  495. /* if((state&0xFFFFFF1F) == 0x101 ||
  496. * (state&0xFFFFFF1F) == 0x102 ||
  497. * (state&0xFFFFFF1F) == 0x105) {
  498. * }
  499. */
  500. if ((state & 0xFFFFFF00) == 0x100 && (state & 0xFFFFFF1F) != 0x106 &&
  501. (state & 0xFFFFFF1F) != 0x107 && (state & 0xFFFFFF1F) != 0x108 &&
  502. (state & 0xFFFFFF1F) != 0x109 && (state & 0xFFFFFF1F) != 0x10d &&
  503. (state & 0xFFFFFF1F) != 0x10f) {
  504. if (has_sps) {
  505. while (i > 4 && buf[i - 5] == 0)
  506. i--;
  507. return i - 4;
  508. }
  509. }
  510. if (i < buf_size)
  511. state = (state << 8) | buf[i];
  512. }
  513. return 0;
  514. }
  515. static void h264_close(AVCodecParserContext *s)
  516. {
  517. H264ParseContext *p = s->priv_data;
  518. H264Context *h = &p->h;
  519. ParseContext *pc = &p->pc;
  520. int i;
  521. av_free(pc->buffer);
  522. ff_h264_free_context(h);
  523. ff_h264_sei_uninit(&p->sei);
  524. for (i = 0; i < FF_ARRAY_ELEMS(p->ps.sps_list); i++)
  525. av_buffer_unref(&p->ps.sps_list[i]);
  526. for (i = 0; i < FF_ARRAY_ELEMS(p->ps.pps_list); i++)
  527. av_buffer_unref(&p->ps.pps_list[i]);
  528. }
  529. static av_cold int init(AVCodecParserContext *s)
  530. {
  531. H264ParseContext *p = s->priv_data;
  532. H264Context *h = &p->h;
  533. h->slice_ctx = av_mallocz(sizeof(*h->slice_ctx));
  534. if (!h->slice_ctx)
  535. return 0;
  536. h->nb_slice_ctx = 1;
  537. h->slice_context_count = 1;
  538. ff_h264dsp_init(&p->h264dsp, 8, 1);
  539. return 0;
  540. }
  541. AVCodecParser ff_h264_parser = {
  542. .codec_ids = { AV_CODEC_ID_H264 },
  543. .priv_data_size = sizeof(H264ParseContext),
  544. .parser_init = init,
  545. .parser_parse = h264_parse,
  546. .parser_close = h264_close,
  547. .split = h264_split,
  548. };