You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

670 lines
18KB

  1. /*
  2. * FFV1 codec for libavcodec
  3. *
  4. * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
  5. *
  6. * This library is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2 of the License, or (at your option) any later version.
  10. *
  11. * This library is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with this library; if not, write to the Free Software
  18. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  19. *
  20. */
  21. /**
  22. * @file ffv1.c
  23. * FF Video Codec 1 (a experimental lossless codec)
  24. */
  25. #include "common.h"
  26. #include "avcodec.h"
  27. #include "dsputil.h"
  28. #include "cabac.h"
  29. #define MAX_PLANES 4
  30. #if 0
  31. #define DEFAULT_QDIFF_COUNT (9)
  32. static const uint8_t default_quant_table[512]={
  33. 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  34. 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  35. 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  36. 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  37. 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  38. 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  39. 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  40. 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3,
  41. 4,
  42. 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7, 8, 8,
  43. 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
  44. 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
  45. 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
  46. 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
  47. 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
  48. 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
  49. 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
  50. };
  51. #else
  52. #define DEFAULT_QDIFF_COUNT (16)
  53. static const uint8_t default_quant_table[256]={
  54. 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  55. 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  56. 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  57. 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
  58. 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
  59. 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
  60. 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
  61. 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 6, 7,
  62. 8,
  63. 9,10,11,11,12,12,12,12,13,13,13,13,13,13,13,13,
  64. 14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,
  65. 15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,
  66. 15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,
  67. 15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,
  68. 15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,
  69. 15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,
  70. 15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,
  71. };
  72. #endif
  73. static const int to8[16]={
  74. 0,1,1,1,
  75. 1,2,2,3,
  76. 4,5,6,6,
  77. 7,7,7,7
  78. };
  79. typedef struct PlaneContext{
  80. uint8_t quant_table[256];
  81. int qdiff_count;
  82. int context_count;
  83. uint8_t (*state)[64]; //FIXME 64
  84. uint8_t interlace_bit_state[2];
  85. } PlaneContext;
  86. typedef struct FFV1Context{
  87. AVCodecContext *avctx;
  88. CABACContext c;
  89. int version;
  90. int width, height;
  91. int chroma_h_shift, chroma_v_shift;
  92. int flags;
  93. int picture_number;
  94. AVFrame picture;
  95. int plane_count;
  96. PlaneContext plane[MAX_PLANES];
  97. DSPContext dsp;
  98. }FFV1Context;
  99. //1.774215
  100. static inline int predict(FFV1Context *s, uint8_t *src, int stride, int x, int y){
  101. if(x && y){
  102. // const int RT= src[+1-stride];
  103. const int LT= src[-1-stride];
  104. const int T= src[ -stride];
  105. const int L = src[-1 ];
  106. uint8_t *cm = cropTbl + MAX_NEG_CROP;
  107. const int gradient= cm[L + T - LT];
  108. // return gradient;
  109. return mid_pred(L, gradient, T);
  110. }else{
  111. if(y){
  112. return src[ -stride];
  113. }else if(x){
  114. return src[-1 ];
  115. }else{
  116. return 128;
  117. }
  118. }
  119. }
  120. #if 0
  121. static inline void put_symbol(CABACContext, uint8_t *state, int v){
  122. put_cabac_ueg(c, state, v, 32, 1, 4 , 32);
  123. }
  124. static inline int get_symbol(CABACContext, uint8_t *state){
  125. return get_cabac_ueg(c, state, 32, 1, 4 , 32);
  126. }
  127. #elif 0
  128. static inline void put_symbol(CABACContext *c, uint8_t *state, int v){
  129. if(v==0)
  130. put_cabac(c, state+0, 1);
  131. else{
  132. put_cabac(c, state+0, 0);
  133. put_cabac(c, state+1, v<0);
  134. if(v<0) state += 64;
  135. put_cabac_ueg(c, state+2, ABS(v)-1, 32, 0, 4 , 32);
  136. }
  137. }
  138. static inline int get_symbol(CABACContext *c, uint8_t *state){
  139. if(get_cabac(c, state+0))
  140. return 0;
  141. else{
  142. int sign= get_cabac(c, state+1);
  143. if(sign)
  144. return -1-get_cabac_ueg(c, state+66, 32, 0, 4 , 32);
  145. else
  146. return 1+get_cabac_ueg(c, state+2 , 32, 0, 4 , 32);
  147. }
  148. }
  149. #else
  150. /**
  151. * put
  152. */
  153. static inline void put_symbol(CABACContext *c, uint8_t *state, int v, int is_signed){
  154. int i;
  155. #if 0
  156. const int a= ABS(v);
  157. const int e= av_log2(a+1);
  158. put_cabac_u(c, state+0, e, 7, 6, 1); //0..6
  159. if(e){
  160. put_cabac(c, state+6 + e, v < 0); //7..13
  161. for(i=; i<e; i++){
  162. }
  163. }
  164. #else
  165. // 0 1 2 3 4 5 6
  166. // 0 1 2 3 4 5 6
  167. // 0 0 1 3 6 10 15 21
  168. if(v){
  169. const int a= ABS(v);
  170. const int e= av_log2(a);
  171. put_cabac(c, state+0, 0);
  172. put_cabac_u(c, state+1, e, 7, 6, 1); //1..7
  173. if(e<7){
  174. for(i=e-1; i>=0; i--){
  175. static const int offset[7]= {14+0, 14+0, 14+1, 14+3, 14+6, 14+10, 14+15};
  176. // put_cabac(c, state+14+e-i, (a>>i)&1); //14..20
  177. put_cabac(c, state+offset[e]+i, (a>>i)&1); //14..34
  178. }
  179. if(is_signed)
  180. put_cabac(c, state+8 + e, v < 0); //8..14
  181. }
  182. }else{
  183. put_cabac(c, state+0, 1);
  184. }
  185. #endif
  186. }
  187. static inline int get_symbol(CABACContext *c, uint8_t *state, int is_signed){
  188. int i;
  189. if(get_cabac(c, state+0))
  190. return 0;
  191. else{
  192. const int e= get_cabac_u(c, state+1, 7, 6, 1); //1..7
  193. if(e<7){
  194. int a= 1<<e;
  195. for(i=e-1; i>=0; i--){
  196. static const int offset[7]= {14+0, 14+0, 14+1, 14+3, 14+6, 14+10, 14+15};
  197. a += get_cabac(c, state+offset[e]+i)<<i; //14..34
  198. }
  199. if(is_signed && get_cabac(c, state+8 + e)) //8..14
  200. return -a;
  201. else
  202. return a;
  203. }else
  204. return -128;
  205. }
  206. }
  207. #endif
  208. static void encode_plane(FFV1Context *s, uint8_t *src, int w, int h, int stride, int plane_index){
  209. PlaneContext * const p= &s->plane[plane_index];
  210. CABACContext * const c= &s->c;
  211. int x,y;
  212. uint8_t pred_diff_buffer[4][w+6];
  213. uint8_t *pred_diff[4]= {pred_diff_buffer[0]+3, pred_diff_buffer[1]+3, pred_diff_buffer[2]+3, pred_diff_buffer[3]+3};
  214. // uint8_t temp_buf[3*w], *temp= temp_buf + 3*w;
  215. memset(pred_diff_buffer, 0, sizeof(pred_diff_buffer));
  216. for(y=0; y<h; y++){
  217. uint8_t *temp= pred_diff[0]; //FIXME try a normal buffer
  218. pred_diff[0]= pred_diff[1];
  219. pred_diff[1]= pred_diff[2];
  220. pred_diff[2]= pred_diff[3];
  221. pred_diff[3]= temp;
  222. for(x=0; x<w; x++){
  223. uint8_t *temp_src= src + x + stride*y;
  224. int diff, context, qdiff;
  225. if(p->context_count == 256)
  226. context= pred_diff[3+0][x-1] + 16*pred_diff[3-1][x+0];
  227. else
  228. context= pred_diff[3+0][x-1] + 16*pred_diff[3-1][x+0]
  229. + 16*16*to8[pred_diff[3-1][x+1]] + 16*16*8*to8[pred_diff[3-0][x-2]] + 16*16*8*8*to8[pred_diff[3-2][x+0]];
  230. diff = (int8_t)(temp_src[0] - predict(s, temp_src, stride, x, y));
  231. qdiff= p->quant_table[128+diff];
  232. put_symbol(c, p->state[context], diff, 1);
  233. pred_diff[3][x]= qdiff;
  234. }
  235. }
  236. }
  237. static void write_quant_table(CABACContext *c, uint8_t *quant_table){
  238. int last=0;
  239. int i;
  240. uint8_t state[64]={0};
  241. for(i=1; i<256 ; i++){
  242. if(quant_table[i] != quant_table[i-1]){
  243. put_symbol(c, state, i-last-1, 0);
  244. last= i;
  245. }
  246. }
  247. put_symbol(c, state, i-last-1, 0);
  248. }
  249. static void write_header(FFV1Context *f){
  250. uint8_t state[64]={0};
  251. int i;
  252. CABACContext * const c= &f->c;
  253. put_symbol(c, state, f->version, 0);
  254. put_symbol(c, state, 0, 0); //YUV cs type
  255. put_cabac(c, state, 1); //chroma planes
  256. put_symbol(c, state, f->chroma_h_shift, 0);
  257. put_symbol(c, state, f->chroma_v_shift, 0);
  258. put_cabac(c, state, 0); //no transparency plane
  259. for(i=0; i<3; i++){ //FIXME chroma & trasparency decission
  260. PlaneContext * const p= &f->plane[i];
  261. put_symbol(c, state, av_log2(p->context_count), 0);
  262. write_quant_table(c, p->quant_table);
  263. }
  264. }
  265. static int common_init(AVCodecContext *avctx){
  266. FFV1Context *s = avctx->priv_data;
  267. int i, j, width, height;
  268. s->avctx= avctx;
  269. s->flags= avctx->flags;
  270. dsputil_init(&s->dsp, avctx);
  271. width= s->width= avctx->width;
  272. height= s->height= avctx->height;
  273. assert(width && height);
  274. for(i=0; i<s->plane_count; i++){
  275. PlaneContext *p= &s->plane[i];
  276. }
  277. return 0;
  278. }
  279. static int encode_init(AVCodecContext *avctx)
  280. {
  281. FFV1Context *s = avctx->priv_data;
  282. int i;
  283. common_init(avctx);
  284. s->version=0;
  285. s->plane_count=3;
  286. for(i=0; i<s->plane_count; i++){
  287. PlaneContext * const p= &s->plane[i];
  288. memcpy(p->quant_table, default_quant_table, sizeof(uint8_t)*256);
  289. p->qdiff_count= DEFAULT_QDIFF_COUNT;
  290. #if 1
  291. p->context_count= 256;
  292. p->state= av_malloc(64*p->context_count*sizeof(uint8_t));
  293. #else
  294. p->context_count= 16*16*8*8*8; //256*16;
  295. p->state= av_malloc(64*p->context_count*sizeof(uint8_t));
  296. #endif
  297. }
  298. avctx->coded_frame= &s->picture;
  299. switch(avctx->pix_fmt){
  300. case PIX_FMT_YUV444P:
  301. case PIX_FMT_YUV422P:
  302. case PIX_FMT_YUV420P:
  303. case PIX_FMT_YUV411P:
  304. case PIX_FMT_YUV410P:
  305. avcodec_get_chroma_sub_sample(avctx->pix_fmt, &s->chroma_h_shift, &s->chroma_v_shift);
  306. break;
  307. default:
  308. fprintf(stderr, "format not supported\n");
  309. return -1;
  310. }
  311. s->picture_number=0;
  312. return 0;
  313. }
  314. static void clear_state(FFV1Context *f){
  315. int i;
  316. for(i=0; i<f->plane_count; i++){
  317. PlaneContext *p= &f->plane[i];
  318. p->interlace_bit_state[0]= 0;
  319. p->interlace_bit_state[1]= 0;
  320. memset(p->state, 0, p->context_count*sizeof(uint8_t)*64);
  321. }
  322. }
  323. static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size, void *data){
  324. FFV1Context *f = avctx->priv_data;
  325. CABACContext * const c= &f->c;
  326. AVFrame *pict = data;
  327. const int width= f->width;
  328. const int height= f->height;
  329. AVFrame * const p= &f->picture;
  330. if(avctx->strict_std_compliance >= 0){
  331. printf("this codec is under development, files encoded with it wont be decodeable with future versions!!!\n"
  332. "use vstrict=-1 to use it anyway\n");
  333. return -1;
  334. }
  335. ff_init_cabac_encoder(c, buf, buf_size);
  336. ff_init_cabac_states(c, ff_h264_lps_range, ff_h264_mps_state, ff_h264_lps_state, 64);
  337. *p = *pict;
  338. p->pict_type= FF_I_TYPE;
  339. if(avctx->gop_size==0 || f->picture_number % avctx->gop_size == 0){
  340. put_cabac_bypass(c, 1);
  341. p->key_frame= 1;
  342. write_header(f);
  343. clear_state(f);
  344. }else{
  345. put_cabac_bypass(c, 0);
  346. p->key_frame= 0;
  347. }
  348. if(1){
  349. const int chroma_width = -((-width )>>f->chroma_h_shift);
  350. const int chroma_height= -((-height)>>f->chroma_v_shift);
  351. encode_plane(f, p->data[0], width, height, p->linesize[0], 0);
  352. encode_plane(f, p->data[1], chroma_width, chroma_height, p->linesize[1], 1);
  353. encode_plane(f, p->data[2], chroma_width, chroma_height, p->linesize[2], 2);
  354. }
  355. emms_c();
  356. f->picture_number++;
  357. return put_cabac_terminate(c, 1);
  358. }
  359. static void common_end(FFV1Context *s){
  360. int i;
  361. for(i=0; i<s->plane_count; i++){
  362. PlaneContext *p= &s->plane[i];
  363. av_freep(&p->state);
  364. }
  365. }
  366. static int encode_end(AVCodecContext *avctx)
  367. {
  368. FFV1Context *s = avctx->priv_data;
  369. common_end(s);
  370. return 0;
  371. }
  372. static void decode_plane(FFV1Context *s, uint8_t *src, int w, int h, int stride, int plane_index){
  373. PlaneContext * const p= &s->plane[plane_index];
  374. CABACContext * const c= &s->c;
  375. int x,y;
  376. uint8_t pred_diff_buffer[4][w+6];
  377. uint8_t *pred_diff[4]= {pred_diff_buffer[0]+3, pred_diff_buffer[1]+3, pred_diff_buffer[2]+3, pred_diff_buffer[3]+3};
  378. // uint8_t temp_buf[3*w], *temp= temp_buf + 3*w;
  379. memset(pred_diff_buffer, 0, sizeof(pred_diff_buffer));
  380. for(y=0; y<h; y++){
  381. uint8_t *temp= pred_diff[0]; //FIXME try a normal buffer
  382. pred_diff[0]= pred_diff[1];
  383. pred_diff[1]= pred_diff[2];
  384. pred_diff[2]= pred_diff[3];
  385. pred_diff[3]= temp;
  386. for(x=0; x<w; x++){
  387. uint8_t *temp_src= src + x + stride*y;
  388. int diff, context, qdiff;
  389. if(p->context_count == 256)
  390. context= pred_diff[3+0][x-1] + 16*pred_diff[3-1][x+0];
  391. else
  392. context= pred_diff[3+0][x-1] + 16*pred_diff[3-1][x+0]
  393. + 16*16*to8[pred_diff[3-1][x+1]] + 16*16*8*to8[pred_diff[3-0][x-2]] + 16*16*8*8*to8[pred_diff[3-2][x+0]];
  394. diff= get_symbol(c, p->state[context], 1);
  395. temp_src[0] = predict(s, temp_src, stride, x, y) + diff;
  396. assert(diff>= -128 && diff <= 127);
  397. qdiff= p->quant_table[128+diff];
  398. pred_diff[3][x]= qdiff;
  399. }
  400. }
  401. }
  402. static int read_quant_table(CABACContext *c, uint8_t *quant_table){
  403. int v;
  404. int i=0;
  405. uint8_t state[64]={0};
  406. for(v=0; i<256 ; v++){
  407. int len= get_symbol(c, state, 0) + 1;
  408. if(len + i > 256) return -1;
  409. while(len--){
  410. quant_table[i++] = v;
  411. //printf("%2d ",v);
  412. //if(i%16==0) printf("\n");
  413. }
  414. }
  415. return v;
  416. }
  417. static int read_header(FFV1Context *f){
  418. uint8_t state[64]={0};
  419. int i;
  420. CABACContext * const c= &f->c;
  421. f->version= get_symbol(c, state, 0);
  422. get_symbol(c, state, 0); //YUV cs type
  423. get_cabac(c, state); //no chroma = false
  424. f->chroma_h_shift= get_symbol(c, state, 0);
  425. f->chroma_v_shift= get_symbol(c, state, 0);
  426. get_cabac(c, state); //transparency plane
  427. f->plane_count= 3;
  428. for(i=0; i<f->plane_count; i++){
  429. PlaneContext * const p= &f->plane[i];
  430. p->context_count= 1<<get_symbol(c, state, 0);
  431. p->qdiff_count= read_quant_table(c, p->quant_table);
  432. if(p->qdiff_count < 0) return -1;
  433. if(!p->state)
  434. p->state= av_malloc(64*p->context_count*sizeof(uint8_t));
  435. }
  436. return 0;
  437. }
  438. static int decode_init(AVCodecContext *avctx)
  439. {
  440. FFV1Context *s = avctx->priv_data;
  441. common_init(avctx);
  442. #if 0
  443. switch(s->bitstream_bpp){
  444. case 12:
  445. avctx->pix_fmt = PIX_FMT_YUV420P;
  446. break;
  447. case 16:
  448. avctx->pix_fmt = PIX_FMT_YUV422P;
  449. break;
  450. case 24:
  451. case 32:
  452. if(s->bgr32){
  453. avctx->pix_fmt = PIX_FMT_RGBA32;
  454. }else{
  455. avctx->pix_fmt = PIX_FMT_BGR24;
  456. }
  457. break;
  458. default:
  459. assert(0);
  460. }
  461. #endif
  462. return 0;
  463. }
  464. static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, uint8_t *buf, int buf_size){
  465. FFV1Context *f = avctx->priv_data;
  466. CABACContext * const c= &f->c;
  467. const int width= f->width;
  468. const int height= f->height;
  469. AVFrame * const p= &f->picture;
  470. int bytes_read;
  471. AVFrame *picture = data;
  472. *data_size = 0;
  473. /* no supplementary picture */
  474. if (buf_size == 0)
  475. return 0;
  476. ff_init_cabac_decoder(c, buf, buf_size);
  477. ff_init_cabac_states(c, ff_h264_lps_range, ff_h264_mps_state, ff_h264_lps_state, 64);
  478. p->reference= 0;
  479. if(avctx->get_buffer(avctx, p) < 0){
  480. fprintf(stderr, "get_buffer() failed\n");
  481. return -1;
  482. }
  483. p->pict_type= FF_I_TYPE; //FIXME I vs. P
  484. if(get_cabac_bypass(c)){
  485. p->key_frame= 1;
  486. read_header(f);
  487. clear_state(f);
  488. }else{
  489. p->key_frame= 0;
  490. }
  491. if(avctx->debug&FF_DEBUG_PICT_INFO)
  492. printf("keyframe:%d\n", p->key_frame);
  493. if(1){
  494. const int chroma_width = -((-width )>>f->chroma_h_shift);
  495. const int chroma_height= -((-height)>>f->chroma_v_shift);
  496. decode_plane(f, p->data[0], width, height, p->linesize[0], 0);
  497. decode_plane(f, p->data[1], chroma_width, chroma_height, p->linesize[1], 1);
  498. decode_plane(f, p->data[2], chroma_width, chroma_height, p->linesize[2], 2);
  499. }
  500. emms_c();
  501. f->picture_number++;
  502. *picture= *p;
  503. avctx->release_buffer(avctx, p); //FIXME
  504. *data_size = sizeof(AVFrame);
  505. bytes_read= get_cabac_terminate(c);
  506. if(bytes_read ==0) printf("error at end of frame\n");
  507. return bytes_read;
  508. }
  509. static int decode_end(AVCodecContext *avctx)
  510. {
  511. FFV1Context *s = avctx->priv_data;
  512. int i;
  513. if(avctx->get_buffer == avcodec_default_get_buffer){
  514. for(i=0; i<4; i++){
  515. av_freep(&s->picture.base[i]);
  516. s->picture.data[i]= NULL;
  517. }
  518. av_freep(&s->picture.opaque);
  519. }
  520. return 0;
  521. }
  522. AVCodec ffv1_decoder = {
  523. "ffv1",
  524. CODEC_TYPE_VIDEO,
  525. CODEC_ID_FFV1,
  526. sizeof(FFV1Context),
  527. decode_init,
  528. NULL,
  529. decode_end,
  530. decode_frame,
  531. /*CODEC_CAP_DR1 | CODEC_CAP_DRAW_HORIZ_BAND*/ 0,
  532. NULL
  533. };
  534. AVCodec ffv1_encoder = {
  535. "ffv1",
  536. CODEC_TYPE_VIDEO,
  537. CODEC_ID_FFV1,
  538. sizeof(FFV1Context),
  539. encode_init,
  540. encode_frame,
  541. encode_end,
  542. };