You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

654 lines
17KB

  1. /*
  2. * FFV1 codec for libavcodec
  3. *
  4. * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
  5. *
  6. * This library is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2 of the License, or (at your option) any later version.
  10. *
  11. * This library is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with this library; if not, write to the Free Software
  18. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  19. *
  20. */
  21. /**
  22. * @file ffv1.c
  23. * FF Video Codec 1 (a experimental lossless codec)
  24. */
  25. #include "common.h"
  26. #include "avcodec.h"
  27. #include "dsputil.h"
  28. #include "cabac.h"
  29. #define MAX_PLANES 4
  30. #define CONTEXT_SIZE 32
  31. #if 0
  32. #define DEFAULT_QDIFF_COUNT (9)
  33. static const uint8_t default_quant_table[512]={
  34. 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  35. 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  36. 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  37. 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  38. 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  39. 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  40. 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  41. 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3,
  42. 4,
  43. 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7, 8, 8,
  44. 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
  45. 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
  46. 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
  47. 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
  48. 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
  49. 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
  50. 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
  51. };
  52. #else
  53. #define DEFAULT_QDIFF_COUNT (16)
  54. static const uint8_t default_quant_table[256]={
  55. 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  56. 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  57. 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  58. 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  59. 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
  60. 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
  61. 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
  62. 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 6, 7,
  63. 8,
  64. 9,10,11,11,12,12,12,12,13,13,13,13,13,13,13,13,
  65. 14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,
  66. 15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,
  67. 15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,
  68. 15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,
  69. 15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,
  70. 15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,
  71. 15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,
  72. };
  73. #endif
  74. static const int to5[16]={
  75. 0,0,0,0,
  76. 0,0,0,1,
  77. 2,3,4,4,
  78. 4,4,4,4,
  79. };
  80. typedef struct PlaneContext{
  81. uint8_t quant_table[256];
  82. int qdiff_count;
  83. int context_count;
  84. uint8_t (*state)[CONTEXT_SIZE];
  85. uint8_t interlace_bit_state[2];
  86. } PlaneContext;
  87. typedef struct FFV1Context{
  88. AVCodecContext *avctx;
  89. CABACContext c;
  90. int version;
  91. int width, height;
  92. int chroma_h_shift, chroma_v_shift;
  93. int flags;
  94. int picture_number;
  95. AVFrame picture;
  96. int plane_count;
  97. PlaneContext plane[MAX_PLANES];
  98. DSPContext dsp;
  99. }FFV1Context;
  100. //1.774215
  101. static inline int predict(FFV1Context *s, uint8_t *src, int stride, int x, int y){
  102. if(x && y){
  103. // const int RT= src[+1-stride];
  104. const int LT= src[-1-stride];
  105. const int T= src[ -stride];
  106. const int L = src[-1 ];
  107. uint8_t *cm = cropTbl + MAX_NEG_CROP;
  108. const int gradient= cm[L + T - LT];
  109. // return gradient;
  110. return mid_pred(L, gradient, T);
  111. }else{
  112. if(y){
  113. return src[ -stride];
  114. }else if(x){
  115. return src[-1 ];
  116. }else{
  117. return 128;
  118. }
  119. }
  120. }
  121. #if 0
  122. static inline void put_symbol(CABACContext, uint8_t *state, int v){
  123. put_cabac_ueg(c, state, v, 32, 1, 4 , 32);
  124. }
  125. static inline int get_symbol(CABACContext, uint8_t *state){
  126. return get_cabac_ueg(c, state, 32, 1, 4 , 32);
  127. }
  128. #elif 0
  129. static inline void put_symbol(CABACContext *c, uint8_t *state, int v){
  130. if(v==0)
  131. put_cabac(c, state+0, 1);
  132. else{
  133. put_cabac(c, state+0, 0);
  134. put_cabac(c, state+1, v<0);
  135. if(v<0) state += 64;
  136. put_cabac_ueg(c, state+2, ABS(v)-1, 32, 0, 4 , 32);
  137. }
  138. }
  139. static inline int get_symbol(CABACContext *c, uint8_t *state){
  140. if(get_cabac(c, state+0))
  141. return 0;
  142. else{
  143. int sign= get_cabac(c, state+1);
  144. if(sign)
  145. return -1-get_cabac_ueg(c, state+66, 32, 0, 4 , 32);
  146. else
  147. return 1+get_cabac_ueg(c, state+2 , 32, 0, 4 , 32);
  148. }
  149. }
  150. #else
  151. /**
  152. * put
  153. */
  154. static inline void put_symbol(CABACContext *c, uint8_t *state, int v, int is_signed){
  155. int i;
  156. if(v){
  157. const int a= ABS(v);
  158. const int e= av_log2(a);
  159. put_cabac(c, state+0, 0);
  160. put_cabac_u(c, state+1, e, 7, 6, 1); //1..7
  161. if(e<7){
  162. for(i=e-1; i>=0; i--){
  163. static const int offset[7]= {15+0, 15+0, 15+1, 15+3, 15+6, 15+10, 15+11};
  164. put_cabac(c, state+offset[e]+i, (a>>i)&1); //15..31
  165. }
  166. if(is_signed)
  167. put_cabac(c, state+8 + e, v < 0); //8..14
  168. }
  169. }else{
  170. put_cabac(c, state+0, 1);
  171. }
  172. }
  173. static inline int get_symbol(CABACContext *c, uint8_t *state, int is_signed){
  174. int i;
  175. if(get_cabac(c, state+0))
  176. return 0;
  177. else{
  178. const int e= get_cabac_u(c, state+1, 7, 6, 1); //1..7
  179. if(e<7){
  180. int a= 1<<e;
  181. for(i=e-1; i>=0; i--){
  182. static const int offset[7]= {15+0, 15+0, 15+1, 15+3, 15+6, 15+10, 15+11};
  183. a += get_cabac(c, state+offset[e]+i)<<i; //14..31
  184. }
  185. if(is_signed && get_cabac(c, state+8 + e)) //8..14
  186. return -a;
  187. else
  188. return a;
  189. }else
  190. return -128;
  191. }
  192. }
  193. #endif
  194. static void encode_plane(FFV1Context *s, uint8_t *src, int w, int h, int stride, int plane_index){
  195. PlaneContext * const p= &s->plane[plane_index];
  196. CABACContext * const c= &s->c;
  197. int x,y;
  198. uint8_t pred_diff_buffer[4][w+6];
  199. uint8_t *pred_diff[4]= {pred_diff_buffer[0]+3, pred_diff_buffer[1]+3, pred_diff_buffer[2]+3, pred_diff_buffer[3]+3};
  200. // uint8_t temp_buf[3*w], *temp= temp_buf + 3*w;
  201. memset(pred_diff_buffer, 0, sizeof(pred_diff_buffer));
  202. for(y=0; y<h; y++){
  203. uint8_t *temp= pred_diff[0]; //FIXME try a normal buffer
  204. pred_diff[0]= pred_diff[1];
  205. pred_diff[1]= pred_diff[2];
  206. pred_diff[2]= pred_diff[3];
  207. pred_diff[3]= temp;
  208. for(x=0; x<w; x++){
  209. uint8_t *temp_src= src + x + stride*y;
  210. int diff, context, qdiff;
  211. if(p->context_count == 256)
  212. context= pred_diff[3+0][x-1] + 16*pred_diff[3-1][x+0];
  213. else
  214. context= pred_diff[3+0][x-1] + 16*pred_diff[3-1][x+0]
  215. + 16*16*to5[pred_diff[3-1][x+1]] + 16*16*5*to5[pred_diff[3-0][x-2]] + 16*16*5*5*to5[pred_diff[3-2][x+0]];
  216. diff = (int8_t)(temp_src[0] - predict(s, temp_src, stride, x, y));
  217. qdiff= p->quant_table[128+diff];
  218. put_symbol(c, p->state[context], diff, 1);
  219. pred_diff[3][x]= qdiff;
  220. }
  221. }
  222. }
  223. static void write_quant_table(CABACContext *c, uint8_t *quant_table){
  224. int last=0;
  225. int i;
  226. uint8_t state[CONTEXT_SIZE]={0};
  227. for(i=1; i<256 ; i++){
  228. if(quant_table[i] != quant_table[i-1]){
  229. put_symbol(c, state, i-last-1, 0);
  230. last= i;
  231. }
  232. }
  233. put_symbol(c, state, i-last-1, 0);
  234. }
  235. static void write_header(FFV1Context *f){
  236. uint8_t state[CONTEXT_SIZE]={0};
  237. int i;
  238. CABACContext * const c= &f->c;
  239. put_symbol(c, state, f->version, 0);
  240. put_symbol(c, state, 0, 0); //YUV cs type
  241. put_cabac(c, state, 1); //chroma planes
  242. put_symbol(c, state, f->chroma_h_shift, 0);
  243. put_symbol(c, state, f->chroma_v_shift, 0);
  244. put_cabac(c, state, 0); //no transparency plane
  245. for(i=0; i<3; i++){ //FIXME chroma & trasparency decission
  246. PlaneContext * const p= &f->plane[i];
  247. put_symbol(c, state, av_log2(p->context_count), 0);
  248. write_quant_table(c, p->quant_table);
  249. }
  250. }
  251. static int common_init(AVCodecContext *avctx){
  252. FFV1Context *s = avctx->priv_data;
  253. int width, height;
  254. s->avctx= avctx;
  255. s->flags= avctx->flags;
  256. dsputil_init(&s->dsp, avctx);
  257. width= s->width= avctx->width;
  258. height= s->height= avctx->height;
  259. assert(width && height);
  260. return 0;
  261. }
  262. static int encode_init(AVCodecContext *avctx)
  263. {
  264. FFV1Context *s = avctx->priv_data;
  265. int i;
  266. common_init(avctx);
  267. s->version=0;
  268. s->plane_count=3;
  269. for(i=0; i<s->plane_count; i++){
  270. PlaneContext * const p= &s->plane[i];
  271. memcpy(p->quant_table, default_quant_table, sizeof(uint8_t)*256);
  272. p->qdiff_count= DEFAULT_QDIFF_COUNT;
  273. #if 1
  274. p->context_count= 256;
  275. p->state= av_malloc(CONTEXT_SIZE*p->context_count*sizeof(uint8_t));
  276. #else
  277. p->context_count= 16*16*128 /*5*5*5*/;
  278. p->state= av_malloc(CONTEXT_SIZE*p->context_count*sizeof(uint8_t));
  279. #endif
  280. }
  281. avctx->coded_frame= &s->picture;
  282. switch(avctx->pix_fmt){
  283. case PIX_FMT_YUV444P:
  284. case PIX_FMT_YUV422P:
  285. case PIX_FMT_YUV420P:
  286. case PIX_FMT_YUV411P:
  287. case PIX_FMT_YUV410P:
  288. avcodec_get_chroma_sub_sample(avctx->pix_fmt, &s->chroma_h_shift, &s->chroma_v_shift);
  289. break;
  290. default:
  291. fprintf(stderr, "format not supported\n");
  292. return -1;
  293. }
  294. s->picture_number=0;
  295. return 0;
  296. }
  297. static void clear_state(FFV1Context *f){
  298. int i, j;
  299. for(i=0; i<f->plane_count; i++){
  300. PlaneContext *p= &f->plane[i];
  301. p->interlace_bit_state[0]= 0;
  302. p->interlace_bit_state[1]= 0;
  303. for(j=0; j<p->context_count; j++){
  304. memset(p->state[j], 0, sizeof(uint8_t)*CONTEXT_SIZE);
  305. p->state[j][7] = 2*62;
  306. }
  307. }
  308. }
  309. static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size, void *data){
  310. FFV1Context *f = avctx->priv_data;
  311. CABACContext * const c= &f->c;
  312. AVFrame *pict = data;
  313. const int width= f->width;
  314. const int height= f->height;
  315. AVFrame * const p= &f->picture;
  316. if(avctx->strict_std_compliance >= 0){
  317. printf("this codec is under development, files encoded with it wont be decodeable with future versions!!!\n"
  318. "use vstrict=-1 to use it anyway\n");
  319. return -1;
  320. }
  321. ff_init_cabac_encoder(c, buf, buf_size);
  322. ff_init_cabac_states(c, ff_h264_lps_range, ff_h264_mps_state, ff_h264_lps_state, 64);
  323. *p = *pict;
  324. p->pict_type= FF_I_TYPE;
  325. if(avctx->gop_size==0 || f->picture_number % avctx->gop_size == 0){
  326. put_cabac_bypass(c, 1);
  327. p->key_frame= 1;
  328. write_header(f);
  329. clear_state(f);
  330. }else{
  331. put_cabac_bypass(c, 0);
  332. p->key_frame= 0;
  333. }
  334. if(1){
  335. const int chroma_width = -((-width )>>f->chroma_h_shift);
  336. const int chroma_height= -((-height)>>f->chroma_v_shift);
  337. encode_plane(f, p->data[0], width, height, p->linesize[0], 0);
  338. encode_plane(f, p->data[1], chroma_width, chroma_height, p->linesize[1], 1);
  339. encode_plane(f, p->data[2], chroma_width, chroma_height, p->linesize[2], 2);
  340. }
  341. emms_c();
  342. f->picture_number++;
  343. return put_cabac_terminate(c, 1);
  344. }
  345. static void common_end(FFV1Context *s){
  346. int i;
  347. for(i=0; i<s->plane_count; i++){
  348. PlaneContext *p= &s->plane[i];
  349. av_freep(&p->state);
  350. }
  351. }
  352. static int encode_end(AVCodecContext *avctx)
  353. {
  354. FFV1Context *s = avctx->priv_data;
  355. common_end(s);
  356. return 0;
  357. }
  358. static void decode_plane(FFV1Context *s, uint8_t *src, int w, int h, int stride, int plane_index){
  359. PlaneContext * const p= &s->plane[plane_index];
  360. CABACContext * const c= &s->c;
  361. int x,y;
  362. uint8_t pred_diff_buffer[4][w+6];
  363. uint8_t *pred_diff[4]= {pred_diff_buffer[0]+3, pred_diff_buffer[1]+3, pred_diff_buffer[2]+3, pred_diff_buffer[3]+3};
  364. // uint8_t temp_buf[3*w], *temp= temp_buf + 3*w;
  365. memset(pred_diff_buffer, 0, sizeof(pred_diff_buffer));
  366. for(y=0; y<h; y++){
  367. uint8_t *temp= pred_diff[0]; //FIXME try a normal buffer
  368. pred_diff[0]= pred_diff[1];
  369. pred_diff[1]= pred_diff[2];
  370. pred_diff[2]= pred_diff[3];
  371. pred_diff[3]= temp;
  372. for(x=0; x<w; x++){
  373. uint8_t *temp_src= src + x + stride*y;
  374. int diff, context, qdiff;
  375. if(p->context_count == 256)
  376. context= pred_diff[3+0][x-1] + 16*pred_diff[3-1][x+0];
  377. else
  378. context= pred_diff[3+0][x-1] + 16*pred_diff[3-1][x+0]
  379. + 16*16*to5[pred_diff[3-1][x+1]] + 16*16*5*to5[pred_diff[3-0][x-2]] + 16*16*5*5*to5[pred_diff[3-2][x+0]];
  380. diff= get_symbol(c, p->state[context], 1);
  381. temp_src[0] = predict(s, temp_src, stride, x, y) + diff;
  382. assert(diff>= -128 && diff <= 127);
  383. qdiff= p->quant_table[128+diff];
  384. pred_diff[3][x]= qdiff;
  385. }
  386. }
  387. }
  388. static int read_quant_table(CABACContext *c, uint8_t *quant_table){
  389. int v;
  390. int i=0;
  391. uint8_t state[CONTEXT_SIZE]={0};
  392. for(v=0; i<256 ; v++){
  393. int len= get_symbol(c, state, 0) + 1;
  394. if(len + i > 256) return -1;
  395. while(len--){
  396. quant_table[i++] = v;
  397. //printf("%2d ",v);
  398. //if(i%16==0) printf("\n");
  399. }
  400. }
  401. return v;
  402. }
  403. static int read_header(FFV1Context *f){
  404. uint8_t state[CONTEXT_SIZE]={0};
  405. int i;
  406. CABACContext * const c= &f->c;
  407. f->version= get_symbol(c, state, 0);
  408. get_symbol(c, state, 0); //YUV cs type
  409. get_cabac(c, state); //no chroma = false
  410. f->chroma_h_shift= get_symbol(c, state, 0);
  411. f->chroma_v_shift= get_symbol(c, state, 0);
  412. get_cabac(c, state); //transparency plane
  413. f->plane_count= 3;
  414. for(i=0; i<f->plane_count; i++){
  415. PlaneContext * const p= &f->plane[i];
  416. p->context_count= 1<<get_symbol(c, state, 0);
  417. p->qdiff_count= read_quant_table(c, p->quant_table);
  418. if(p->qdiff_count < 0) return -1;
  419. if(!p->state)
  420. p->state= av_malloc(CONTEXT_SIZE*p->context_count*sizeof(uint8_t));
  421. }
  422. return 0;
  423. }
  424. static int decode_init(AVCodecContext *avctx)
  425. {
  426. FFV1Context *s = avctx->priv_data;
  427. common_init(avctx);
  428. #if 0
  429. switch(s->bitstream_bpp){
  430. case 12:
  431. avctx->pix_fmt = PIX_FMT_YUV420P;
  432. break;
  433. case 16:
  434. avctx->pix_fmt = PIX_FMT_YUV422P;
  435. break;
  436. case 24:
  437. case 32:
  438. if(s->bgr32){
  439. avctx->pix_fmt = PIX_FMT_RGBA32;
  440. }else{
  441. avctx->pix_fmt = PIX_FMT_BGR24;
  442. }
  443. break;
  444. default:
  445. assert(0);
  446. }
  447. #endif
  448. return 0;
  449. }
  450. static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, uint8_t *buf, int buf_size){
  451. FFV1Context *f = avctx->priv_data;
  452. CABACContext * const c= &f->c;
  453. const int width= f->width;
  454. const int height= f->height;
  455. AVFrame * const p= &f->picture;
  456. int bytes_read;
  457. AVFrame *picture = data;
  458. *data_size = 0;
  459. /* no supplementary picture */
  460. if (buf_size == 0)
  461. return 0;
  462. ff_init_cabac_decoder(c, buf, buf_size);
  463. ff_init_cabac_states(c, ff_h264_lps_range, ff_h264_mps_state, ff_h264_lps_state, 64);
  464. p->reference= 0;
  465. if(avctx->get_buffer(avctx, p) < 0){
  466. fprintf(stderr, "get_buffer() failed\n");
  467. return -1;
  468. }
  469. p->pict_type= FF_I_TYPE; //FIXME I vs. P
  470. if(get_cabac_bypass(c)){
  471. p->key_frame= 1;
  472. read_header(f);
  473. clear_state(f);
  474. }else{
  475. p->key_frame= 0;
  476. }
  477. if(avctx->debug&FF_DEBUG_PICT_INFO)
  478. printf("keyframe:%d\n", p->key_frame);
  479. if(1){
  480. const int chroma_width = -((-width )>>f->chroma_h_shift);
  481. const int chroma_height= -((-height)>>f->chroma_v_shift);
  482. decode_plane(f, p->data[0], width, height, p->linesize[0], 0);
  483. decode_plane(f, p->data[1], chroma_width, chroma_height, p->linesize[1], 1);
  484. decode_plane(f, p->data[2], chroma_width, chroma_height, p->linesize[2], 2);
  485. }
  486. emms_c();
  487. f->picture_number++;
  488. *picture= *p;
  489. avctx->release_buffer(avctx, p); //FIXME
  490. *data_size = sizeof(AVFrame);
  491. bytes_read= get_cabac_terminate(c);
  492. if(bytes_read ==0) printf("error at end of frame\n");
  493. return bytes_read;
  494. }
  495. static int decode_end(AVCodecContext *avctx)
  496. {
  497. FFV1Context *s = avctx->priv_data;
  498. int i;
  499. if(avctx->get_buffer == avcodec_default_get_buffer){
  500. for(i=0; i<4; i++){
  501. av_freep(&s->picture.base[i]);
  502. s->picture.data[i]= NULL;
  503. }
  504. av_freep(&s->picture.opaque);
  505. }
  506. return 0;
  507. }
  508. AVCodec ffv1_decoder = {
  509. "ffv1",
  510. CODEC_TYPE_VIDEO,
  511. CODEC_ID_FFV1,
  512. sizeof(FFV1Context),
  513. decode_init,
  514. NULL,
  515. decode_end,
  516. decode_frame,
  517. CODEC_CAP_DR1 /*| CODEC_CAP_DRAW_HORIZ_BAND*/,
  518. NULL
  519. };
  520. AVCodec ffv1_encoder = {
  521. "ffv1",
  522. CODEC_TYPE_VIDEO,
  523. CODEC_ID_FFV1,
  524. sizeof(FFV1Context),
  525. encode_init,
  526. encode_frame,
  527. encode_end,
  528. };