You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

351 lines
11KB

  1. /*
  2. * H.26L/H.264/AVC/JVT/14496-10/... parser
  3. * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
  4. *
  5. * This file is part of Libav.
  6. *
  7. * Libav is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * Libav is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with Libav; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. /**
  22. * @file
  23. * H.264 / AVC / MPEG4 part10 parser.
  24. * @author Michael Niedermayer <michaelni@gmx.at>
  25. */
  26. #include "parser.h"
  27. #include "h264data.h"
  28. #include "golomb.h"
  29. #include <assert.h>
  30. static int ff_h264_find_frame_end(H264Context *h, const uint8_t *buf, int buf_size)
  31. {
  32. int i;
  33. uint32_t state;
  34. ParseContext *pc = &(h->s.parse_context);
  35. // mb_addr= pc->mb_addr - 1;
  36. state= pc->state;
  37. if(state>13)
  38. state= 7;
  39. for(i=0; i<buf_size; i++){
  40. if(state==7){
  41. #if HAVE_FAST_UNALIGNED
  42. /* we check i<buf_size instead of i+3/7 because its simpler
  43. * and there should be FF_INPUT_BUFFER_PADDING_SIZE bytes at the end
  44. */
  45. # if HAVE_FAST_64BIT
  46. while(i<buf_size && !((~*(const uint64_t*)(buf+i) & (*(const uint64_t*)(buf+i) - 0x0101010101010101ULL)) & 0x8080808080808080ULL))
  47. i+=8;
  48. # else
  49. while(i<buf_size && !((~*(const uint32_t*)(buf+i) & (*(const uint32_t*)(buf+i) - 0x01010101U)) & 0x80808080U))
  50. i+=4;
  51. # endif
  52. #endif
  53. for(; i<buf_size; i++){
  54. if(!buf[i]){
  55. state=2;
  56. break;
  57. }
  58. }
  59. }else if(state<=2){
  60. if(buf[i]==1) state^= 5; //2->7, 1->4, 0->5
  61. else if(buf[i]) state = 7;
  62. else state>>=1; //2->1, 1->0, 0->0
  63. }else if(state<=5){
  64. int v= buf[i] & 0x1F;
  65. if(v==6 || v==7 || v==8 || v==9){
  66. if(pc->frame_start_found){
  67. i++;
  68. goto found;
  69. }
  70. }else if(v==1 || v==2 || v==5){
  71. if(pc->frame_start_found){
  72. state+=8;
  73. continue;
  74. }else
  75. pc->frame_start_found = 1;
  76. }
  77. state= 7;
  78. }else{
  79. if(buf[i] & 0x80)
  80. goto found;
  81. state= 7;
  82. }
  83. }
  84. pc->state= state;
  85. return END_NOT_FOUND;
  86. found:
  87. pc->state=7;
  88. pc->frame_start_found= 0;
  89. return i-(state&5);
  90. }
  91. /**
  92. * Parse NAL units of found picture and decode some basic information.
  93. *
  94. * @param s parser context.
  95. * @param avctx codec context.
  96. * @param buf buffer with field/frame data.
  97. * @param buf_size size of the buffer.
  98. */
  99. static inline int parse_nal_units(AVCodecParserContext *s,
  100. AVCodecContext *avctx,
  101. const uint8_t *buf, int buf_size)
  102. {
  103. H264Context *h = s->priv_data;
  104. const uint8_t *buf_end = buf + buf_size;
  105. unsigned int pps_id;
  106. unsigned int slice_type;
  107. int state = -1;
  108. const uint8_t *ptr;
  109. /* set some sane default values */
  110. s->pict_type = AV_PICTURE_TYPE_I;
  111. s->key_frame = 0;
  112. h->s.avctx= avctx;
  113. h->sei_recovery_frame_cnt = -1;
  114. h->sei_dpb_output_delay = 0;
  115. h->sei_cpb_removal_delay = -1;
  116. h->sei_buffering_period_present = 0;
  117. if (!buf_size)
  118. return 0;
  119. for(;;) {
  120. int src_length, dst_length, consumed;
  121. buf = avpriv_mpv_find_start_code(buf, buf_end, &state);
  122. if(buf >= buf_end)
  123. break;
  124. --buf;
  125. src_length = buf_end - buf;
  126. switch (state & 0x1f) {
  127. case NAL_SLICE:
  128. case NAL_IDR_SLICE:
  129. // Do not walk the whole buffer just to decode slice header
  130. if (src_length > 20)
  131. src_length = 20;
  132. break;
  133. }
  134. ptr= ff_h264_decode_nal(h, buf, &dst_length, &consumed, src_length);
  135. if (ptr==NULL || dst_length < 0)
  136. break;
  137. init_get_bits(&h->s.gb, ptr, 8*dst_length);
  138. switch(h->nal_unit_type) {
  139. case NAL_SPS:
  140. ff_h264_decode_seq_parameter_set(h);
  141. break;
  142. case NAL_PPS:
  143. ff_h264_decode_picture_parameter_set(h, h->s.gb.size_in_bits);
  144. break;
  145. case NAL_SEI:
  146. ff_h264_decode_sei(h);
  147. break;
  148. case NAL_IDR_SLICE:
  149. s->key_frame = 1;
  150. /* fall through */
  151. case NAL_SLICE:
  152. get_ue_golomb(&h->s.gb); // skip first_mb_in_slice
  153. slice_type = get_ue_golomb_31(&h->s.gb);
  154. s->pict_type = golomb_to_pict_type[slice_type % 5];
  155. if (h->sei_recovery_frame_cnt >= 0) {
  156. /* key frame, since recovery_frame_cnt is set */
  157. s->key_frame = 1;
  158. }
  159. pps_id= get_ue_golomb(&h->s.gb);
  160. if(pps_id>=MAX_PPS_COUNT) {
  161. av_log(h->s.avctx, AV_LOG_ERROR, "pps_id out of range\n");
  162. return -1;
  163. }
  164. if(!h->pps_buffers[pps_id]) {
  165. av_log(h->s.avctx, AV_LOG_ERROR, "non-existing PPS referenced\n");
  166. return -1;
  167. }
  168. h->pps= *h->pps_buffers[pps_id];
  169. if(!h->sps_buffers[h->pps.sps_id]) {
  170. av_log(h->s.avctx, AV_LOG_ERROR, "non-existing SPS referenced\n");
  171. return -1;
  172. }
  173. h->sps = *h->sps_buffers[h->pps.sps_id];
  174. h->frame_num = get_bits(&h->s.gb, h->sps.log2_max_frame_num);
  175. avctx->profile = ff_h264_get_profile(&h->sps);
  176. avctx->level = h->sps.level_idc;
  177. if(h->sps.frame_mbs_only_flag){
  178. h->s.picture_structure= PICT_FRAME;
  179. }else{
  180. if(get_bits1(&h->s.gb)) { //field_pic_flag
  181. h->s.picture_structure= PICT_TOP_FIELD + get_bits1(&h->s.gb); //bottom_field_flag
  182. } else {
  183. h->s.picture_structure= PICT_FRAME;
  184. }
  185. }
  186. if(h->sps.pic_struct_present_flag) {
  187. switch (h->sei_pic_struct) {
  188. case SEI_PIC_STRUCT_TOP_FIELD:
  189. case SEI_PIC_STRUCT_BOTTOM_FIELD:
  190. s->repeat_pict = 0;
  191. break;
  192. case SEI_PIC_STRUCT_FRAME:
  193. case SEI_PIC_STRUCT_TOP_BOTTOM:
  194. case SEI_PIC_STRUCT_BOTTOM_TOP:
  195. s->repeat_pict = 1;
  196. break;
  197. case SEI_PIC_STRUCT_TOP_BOTTOM_TOP:
  198. case SEI_PIC_STRUCT_BOTTOM_TOP_BOTTOM:
  199. s->repeat_pict = 2;
  200. break;
  201. case SEI_PIC_STRUCT_FRAME_DOUBLING:
  202. s->repeat_pict = 3;
  203. break;
  204. case SEI_PIC_STRUCT_FRAME_TRIPLING:
  205. s->repeat_pict = 5;
  206. break;
  207. default:
  208. s->repeat_pict = h->s.picture_structure == PICT_FRAME ? 1 : 0;
  209. break;
  210. }
  211. } else {
  212. s->repeat_pict = h->s.picture_structure == PICT_FRAME ? 1 : 0;
  213. }
  214. return 0; /* no need to evaluate the rest */
  215. }
  216. buf += consumed;
  217. }
  218. /* didn't find a picture! */
  219. av_log(h->s.avctx, AV_LOG_ERROR, "missing picture in access unit\n");
  220. return -1;
  221. }
  222. static int h264_parse(AVCodecParserContext *s,
  223. AVCodecContext *avctx,
  224. const uint8_t **poutbuf, int *poutbuf_size,
  225. const uint8_t *buf, int buf_size)
  226. {
  227. H264Context *h = s->priv_data;
  228. ParseContext *pc = &h->s.parse_context;
  229. int next;
  230. if (!h->got_first) {
  231. h->got_first = 1;
  232. if (avctx->extradata_size) {
  233. h->s.avctx = avctx;
  234. // must be done like in the decoder.
  235. // otherwise opening the parser, creating extradata,
  236. // and then closing and opening again
  237. // will cause has_b_frames to be always set.
  238. // NB: estimate_timings_from_pts behaves exactly like this.
  239. if (!avctx->has_b_frames)
  240. h->s.low_delay = 1;
  241. ff_h264_decode_extradata(h);
  242. }
  243. }
  244. if(s->flags & PARSER_FLAG_COMPLETE_FRAMES){
  245. next= buf_size;
  246. }else{
  247. next= ff_h264_find_frame_end(h, buf, buf_size);
  248. if (ff_combine_frame(pc, next, &buf, &buf_size) < 0) {
  249. *poutbuf = NULL;
  250. *poutbuf_size = 0;
  251. return buf_size;
  252. }
  253. if(next<0 && next != END_NOT_FOUND){
  254. assert(pc->last_index + next >= 0 );
  255. ff_h264_find_frame_end(h, &pc->buffer[pc->last_index + next], -next); //update state
  256. }
  257. }
  258. parse_nal_units(s, avctx, buf, buf_size);
  259. if (h->sei_cpb_removal_delay >= 0) {
  260. s->dts_sync_point = h->sei_buffering_period_present;
  261. s->dts_ref_dts_delta = h->sei_cpb_removal_delay;
  262. s->pts_dts_delta = h->sei_dpb_output_delay;
  263. } else {
  264. s->dts_sync_point = INT_MIN;
  265. s->dts_ref_dts_delta = INT_MIN;
  266. s->pts_dts_delta = INT_MIN;
  267. }
  268. if (s->flags & PARSER_FLAG_ONCE) {
  269. s->flags &= PARSER_FLAG_COMPLETE_FRAMES;
  270. }
  271. *poutbuf = buf;
  272. *poutbuf_size = buf_size;
  273. return next;
  274. }
  275. static int h264_split(AVCodecContext *avctx,
  276. const uint8_t *buf, int buf_size)
  277. {
  278. int i;
  279. uint32_t state = -1;
  280. int has_sps= 0;
  281. for(i=0; i<=buf_size; i++){
  282. if((state&0xFFFFFF1F) == 0x107)
  283. has_sps=1;
  284. /* if((state&0xFFFFFF1F) == 0x101 || (state&0xFFFFFF1F) == 0x102 || (state&0xFFFFFF1F) == 0x105){
  285. }*/
  286. if((state&0xFFFFFF00) == 0x100 && (state&0xFFFFFF1F) != 0x107 && (state&0xFFFFFF1F) != 0x108 && (state&0xFFFFFF1F) != 0x109){
  287. if(has_sps){
  288. while(i>4 && buf[i-5]==0) i--;
  289. return i-4;
  290. }
  291. }
  292. if (i<buf_size)
  293. state= (state<<8) | buf[i];
  294. }
  295. return 0;
  296. }
  297. static void close(AVCodecParserContext *s)
  298. {
  299. H264Context *h = s->priv_data;
  300. ParseContext *pc = &h->s.parse_context;
  301. av_free(pc->buffer);
  302. ff_h264_free_context(h);
  303. }
  304. static int init(AVCodecParserContext *s)
  305. {
  306. H264Context *h = s->priv_data;
  307. h->thread_context[0] = h;
  308. h->s.slice_context_count = 1;
  309. return 0;
  310. }
  311. AVCodecParser ff_h264_parser = {
  312. .codec_ids = { AV_CODEC_ID_H264 },
  313. .priv_data_size = sizeof(H264Context),
  314. .parser_init = init,
  315. .parser_parse = h264_parse,
  316. .parser_close = close,
  317. .split = h264_split,
  318. };