You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1102 lines
35KB

  1. /*
  2. * huffyuv codec for libavcodec
  3. *
  4. * Copyright (c) 2002 Michael Niedermayer <michaelni@gmx.at>
  5. *
  6. * This library is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2 of the License, or (at your option) any later version.
  10. *
  11. * This library is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with this library; if not, write to the Free Software
  18. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  19. *
  20. * see http://www.pcisys.net/~melanson/codecs/huffyuv.txt for a description of
  21. * the algorithm used
  22. */
  23. #include "common.h"
  24. #include "avcodec.h"
  25. #include "dsputil.h"
  26. #ifndef INT64_MAX
  27. #define INT64_MAX 9223372036854775807LL
  28. #endif
  29. #define VLC_BITS 11
  30. typedef enum Predictor{
  31. LEFT= 0,
  32. PLANE,
  33. MEDIAN,
  34. } Predictor;
  35. typedef struct HYuvContext{
  36. AVCodecContext *avctx;
  37. Predictor predictor;
  38. GetBitContext gb;
  39. PutBitContext pb;
  40. int interlaced;
  41. int decorrelate;
  42. int bitstream_bpp;
  43. int version;
  44. int yuy2; //use yuy2 instead of 422P
  45. int bgr32; //use bgr32 instead of bgr24
  46. int width, height;
  47. int flags;
  48. int picture_number;
  49. int last_slice_end;
  50. uint8_t __align8 temp[3][2500];
  51. uint64_t stats[3][256];
  52. uint8_t len[3][256];
  53. uint32_t bits[3][256];
  54. VLC vlc[3];
  55. AVFrame picture;
  56. uint8_t __align8 bitstream_buffer[1024*1024*3]; //FIXME dynamic alloc or some other solution
  57. DSPContext dsp;
  58. }HYuvContext;
  59. static inline void bswap_buf(uint32_t *dst, uint32_t *src, int w){
  60. int i;
  61. for(i=0; i+8<=w; i+=8){
  62. dst[i+0]= bswap_32(src[i+0]);
  63. dst[i+1]= bswap_32(src[i+1]);
  64. dst[i+2]= bswap_32(src[i+2]);
  65. dst[i+3]= bswap_32(src[i+3]);
  66. dst[i+4]= bswap_32(src[i+4]);
  67. dst[i+5]= bswap_32(src[i+5]);
  68. dst[i+6]= bswap_32(src[i+6]);
  69. dst[i+7]= bswap_32(src[i+7]);
  70. }
  71. for(;i<w; i++){
  72. dst[i+0]= bswap_32(src[i+0]);
  73. }
  74. }
  75. static inline int add_left_prediction(uint8_t *dst, uint8_t *src, int w, int acc){
  76. int i;
  77. for(i=0; i<w-1; i++){
  78. acc+= src[i];
  79. dst[i]= acc;
  80. i++;
  81. acc+= src[i];
  82. dst[i]= acc;
  83. }
  84. for(; i<w; i++){
  85. acc+= src[i];
  86. dst[i]= acc;
  87. }
  88. return acc;
  89. }
  90. static inline void add_median_prediction(uint8_t *dst, uint8_t *src1, uint8_t *diff, int w, int *left, int *left_top){
  91. int i;
  92. uint8_t l, lt;
  93. l= *left;
  94. lt= *left_top;
  95. for(i=0; i<w; i++){
  96. l= mid_pred(l, src1[i], (l + src1[i] - lt)&0xFF) + diff[i];
  97. lt= src1[i];
  98. dst[i]= l;
  99. }
  100. *left= l;
  101. *left_top= lt;
  102. }
  103. //FIXME optimize
  104. static inline void sub_median_prediction(uint8_t *dst, uint8_t *src1, uint8_t *src2, int w, int *left, int *left_top){
  105. int i;
  106. uint8_t l, lt;
  107. l= *left;
  108. lt= *left_top;
  109. for(i=0; i<w; i++){
  110. const int pred= mid_pred(l, src1[i], (l + src1[i] - lt)&0xFF);
  111. lt= src1[i];
  112. l= src2[i];
  113. dst[i]= l - pred;
  114. }
  115. *left= l;
  116. *left_top= lt;
  117. }
  118. static inline void add_left_prediction_bgr32(uint8_t *dst, uint8_t *src, int w, int *red, int *green, int *blue){
  119. int i;
  120. int r,g,b;
  121. r= *red;
  122. g= *green;
  123. b= *blue;
  124. for(i=0; i<w; i++){
  125. b+= src[4*i+0];
  126. g+= src[4*i+1];
  127. r+= src[4*i+2];
  128. dst[4*i+0]= b;
  129. dst[4*i+1]= g;
  130. dst[4*i+2]= r;
  131. }
  132. *red= r;
  133. *green= g;
  134. *blue= b;
  135. }
  136. static inline int sub_left_prediction(HYuvContext *s, uint8_t *dst, uint8_t *src, int w, int left){
  137. int i;
  138. if(w<32){
  139. for(i=0; i<w; i++){
  140. const int temp= src[i];
  141. dst[i]= temp - left;
  142. left= temp;
  143. }
  144. return left;
  145. }else{
  146. for(i=0; i<16; i++){
  147. const int temp= src[i];
  148. dst[i]= temp - left;
  149. left= temp;
  150. }
  151. s->dsp.diff_bytes(dst+16, src+16, src+15, w-16);
  152. return src[w-1];
  153. }
  154. }
  155. static void read_len_table(uint8_t *dst, GetBitContext *gb){
  156. int i, val, repeat;
  157. for(i=0; i<256;){
  158. repeat= get_bits(gb, 3);
  159. val = get_bits(gb, 5);
  160. if(repeat==0)
  161. repeat= get_bits(gb, 8);
  162. //printf("%d %d\n", val, repeat);
  163. while (repeat--)
  164. dst[i++] = val;
  165. }
  166. }
  167. static int generate_bits_table(uint32_t *dst, uint8_t *len_table){
  168. int len, index;
  169. uint32_t bits=0;
  170. for(len=32; len>0; len--){
  171. int bit= 1<<(32-len);
  172. for(index=0; index<256; index++){
  173. if(len_table[index]==len){
  174. if(bits & (bit-1)){
  175. fprintf(stderr, "Error generating huffman table\n");
  176. return -1;
  177. }
  178. dst[index]= bits>>(32-len);
  179. bits+= bit;
  180. }
  181. }
  182. }
  183. return 0;
  184. }
  185. static void generate_len_table(uint8_t *dst, uint64_t *stats, int size){
  186. uint64_t counts[2*size];
  187. int up[2*size];
  188. int offset, i, next;
  189. for(offset=1; ; offset<<=1){
  190. for(i=0; i<size; i++){
  191. counts[i]= stats[i] + offset - 1;
  192. }
  193. for(next=size; next<size*2; next++){
  194. uint64_t min1, min2;
  195. int min1_i, min2_i;
  196. min1=min2= INT64_MAX;
  197. min1_i= min2_i=-1;
  198. for(i=0; i<next; i++){
  199. if(min2 > counts[i]){
  200. if(min1 > counts[i]){
  201. min2= min1;
  202. min2_i= min1_i;
  203. min1= counts[i];
  204. min1_i= i;
  205. }else{
  206. min2= counts[i];
  207. min2_i= i;
  208. }
  209. }
  210. }
  211. if(min2==INT64_MAX) break;
  212. counts[next]= min1 + min2;
  213. counts[min1_i]=
  214. counts[min2_i]= INT64_MAX;
  215. up[min1_i]=
  216. up[min2_i]= next;
  217. up[next]= -1;
  218. }
  219. for(i=0; i<size; i++){
  220. int len;
  221. int index=i;
  222. for(len=0; up[index] != -1; len++)
  223. index= up[index];
  224. if(len > 32) break;
  225. dst[i]= len;
  226. }
  227. if(i==size) break;
  228. }
  229. }
  230. static int read_huffman_tables(HYuvContext *s, uint8_t *src, int length){
  231. GetBitContext gb;
  232. int i;
  233. init_get_bits(&gb, src, length);
  234. for(i=0; i<3; i++){
  235. read_len_table(s->len[i], &gb);
  236. if(generate_bits_table(s->bits[i], s->len[i])<0){
  237. return -1;
  238. }
  239. #if 0
  240. for(j=0; j<256; j++){
  241. printf("%6X, %2d, %3d\n", s->bits[i][j], s->len[i][j], j);
  242. }
  243. #endif
  244. init_vlc(&s->vlc[i], VLC_BITS, 256, s->len[i], 1, 1, s->bits[i], 4, 4);
  245. }
  246. return 0;
  247. }
  248. static int read_old_huffman_tables(HYuvContext *s){
  249. #if 0
  250. GetBitContext gb;
  251. int i;
  252. init_get_bits(&gb, classic_shift_luma, sizeof(classic_shift_luma));
  253. read_len_table(s->len[0], &gb);
  254. init_get_bits(&gb, classic_shift_chroma, sizeof(classic_shift_chroma));
  255. read_len_table(s->len[1], &gb);
  256. for(i=0; i<256; i++) s->bits[0][i] = classic_add_luma [i];
  257. for(i=0; i<256; i++) s->bits[1][i] = classic_add_chroma[i];
  258. if(s->bitstream_bpp >= 24){
  259. memcpy(s->bits[1], s->bits[0], 256*sizeof(uint32_t));
  260. memcpy(s->len[1] , s->len [0], 256*sizeof(uint8_t));
  261. }
  262. memcpy(s->bits[2], s->bits[1], 256*sizeof(uint32_t));
  263. memcpy(s->len[2] , s->len [1], 256*sizeof(uint8_t));
  264. for(i=0; i<3; i++)
  265. init_vlc(&s->vlc[i], VLC_BITS, 256, s->len[i], 1, 1, s->bits[i], 4, 4);
  266. return 0;
  267. #else
  268. fprintf(stderr, "v1 huffyuv is not supported \n");
  269. return -1;
  270. #endif
  271. }
  272. static int decode_init(AVCodecContext *avctx)
  273. {
  274. HYuvContext *s = avctx->priv_data;
  275. int width, height;
  276. s->avctx= avctx;
  277. s->flags= avctx->flags;
  278. dsputil_init(&s->dsp, avctx->dsp_mask);
  279. width= s->width= avctx->width;
  280. height= s->height= avctx->height;
  281. avctx->coded_frame= &s->picture;
  282. s->bgr32=1;
  283. assert(width && height);
  284. //if(avctx->extradata)
  285. // printf("extradata:%X, extradata_size:%d\n", *(uint32_t*)avctx->extradata, avctx->extradata_size);
  286. if(avctx->extradata_size){
  287. if((avctx->bits_per_sample&7) && avctx->bits_per_sample != 12)
  288. s->version=1; // do such files exist at all?
  289. else
  290. s->version=2;
  291. }else
  292. s->version=0;
  293. if(s->version==2){
  294. int method;
  295. method= ((uint8_t*)avctx->extradata)[0];
  296. s->decorrelate= method&64 ? 1 : 0;
  297. s->predictor= method&63;
  298. s->bitstream_bpp= ((uint8_t*)avctx->extradata)[1];
  299. if(s->bitstream_bpp==0)
  300. s->bitstream_bpp= avctx->bits_per_sample&~7;
  301. if(read_huffman_tables(s, ((uint8_t*)avctx->extradata)+4, avctx->extradata_size) < 0)
  302. return -1;
  303. }else{
  304. switch(avctx->bits_per_sample&7){
  305. case 1:
  306. s->predictor= LEFT;
  307. s->decorrelate= 0;
  308. break;
  309. case 2:
  310. s->predictor= LEFT;
  311. s->decorrelate= 1;
  312. break;
  313. case 3:
  314. s->predictor= PLANE;
  315. s->decorrelate= avctx->bits_per_sample >= 24;
  316. break;
  317. case 4:
  318. s->predictor= MEDIAN;
  319. s->decorrelate= 0;
  320. break;
  321. default:
  322. s->predictor= LEFT; //OLD
  323. s->decorrelate= 0;
  324. break;
  325. }
  326. s->bitstream_bpp= avctx->bits_per_sample & ~7;
  327. if(read_old_huffman_tables(s) < 0)
  328. return -1;
  329. }
  330. s->interlaced= height > 288;
  331. switch(s->bitstream_bpp){
  332. case 12:
  333. avctx->pix_fmt = PIX_FMT_YUV420P;
  334. break;
  335. case 16:
  336. if(s->yuy2){
  337. avctx->pix_fmt = PIX_FMT_YUV422;
  338. }else{
  339. avctx->pix_fmt = PIX_FMT_YUV422P;
  340. }
  341. break;
  342. case 24:
  343. case 32:
  344. if(s->bgr32){
  345. avctx->pix_fmt = PIX_FMT_RGBA32;
  346. }else{
  347. avctx->pix_fmt = PIX_FMT_BGR24;
  348. }
  349. break;
  350. default:
  351. assert(0);
  352. }
  353. // printf("pred:%d bpp:%d hbpp:%d il:%d\n", s->predictor, s->bitstream_bpp, avctx->bits_per_sample, s->interlaced);
  354. return 0;
  355. }
  356. static void store_table(HYuvContext *s, uint8_t *len){
  357. int i;
  358. int index= s->avctx->extradata_size;
  359. for(i=0; i<256;){
  360. int cur=i;
  361. int val= len[i];
  362. int repeat;
  363. for(; i<256 && len[i]==val; i++);
  364. repeat= i - cur;
  365. if(repeat>7){
  366. ((uint8_t*)s->avctx->extradata)[index++]= val;
  367. ((uint8_t*)s->avctx->extradata)[index++]= repeat;
  368. }else{
  369. ((uint8_t*)s->avctx->extradata)[index++]= val | (repeat<<5);
  370. }
  371. }
  372. s->avctx->extradata_size= index;
  373. }
  374. static int encode_init(AVCodecContext *avctx)
  375. {
  376. HYuvContext *s = avctx->priv_data;
  377. int i, j, width, height;
  378. s->avctx= avctx;
  379. s->flags= avctx->flags;
  380. dsputil_init(&s->dsp, avctx->dsp_mask);
  381. width= s->width= avctx->width;
  382. height= s->height= avctx->height;
  383. assert(width && height);
  384. avctx->extradata= av_mallocz(1024*10);
  385. avctx->stats_out= av_mallocz(1024*10);
  386. s->version=2;
  387. avctx->coded_frame= &s->picture;
  388. s->picture.pict_type= FF_I_TYPE;
  389. s->picture.key_frame= 1;
  390. switch(avctx->pix_fmt){
  391. case PIX_FMT_YUV420P:
  392. if(avctx->strict_std_compliance>=0){
  393. fprintf(stderr, "YV12-huffyuv is experimental, there WILL be no compatbility! (use (v)strict=-1)\n");
  394. return -1;
  395. }
  396. s->bitstream_bpp= 12;
  397. break;
  398. case PIX_FMT_YUV422P:
  399. s->bitstream_bpp= 16;
  400. break;
  401. default:
  402. fprintf(stderr, "format not supported\n");
  403. return -1;
  404. }
  405. avctx->bits_per_sample= s->bitstream_bpp;
  406. s->decorrelate= s->bitstream_bpp >= 24;
  407. s->predictor= avctx->prediction_method;
  408. ((uint8_t*)avctx->extradata)[0]= s->predictor;
  409. ((uint8_t*)avctx->extradata)[1]= s->bitstream_bpp;
  410. ((uint8_t*)avctx->extradata)[2]=
  411. ((uint8_t*)avctx->extradata)[3]= 0;
  412. s->avctx->extradata_size= 4;
  413. if(avctx->stats_in){
  414. char *p= avctx->stats_in;
  415. for(i=0; i<3; i++)
  416. for(j=0; j<256; j++)
  417. s->stats[i][j]= 1;
  418. for(;;){
  419. for(i=0; i<3; i++){
  420. char *next;
  421. for(j=0; j<256; j++){
  422. s->stats[i][j]+= strtol(p, &next, 0);
  423. if(next==p) return -1;
  424. p=next;
  425. }
  426. }
  427. if(p[0]==0 || p[1]==0 || p[2]==0) break;
  428. }
  429. }else{
  430. for(i=0; i<3; i++)
  431. for(j=0; j<256; j++){
  432. int d= FFMIN(j, 256-j);
  433. s->stats[i][j]= 100000000/(d+1);
  434. }
  435. }
  436. for(i=0; i<3; i++){
  437. generate_len_table(s->len[i], s->stats[i], 256);
  438. if(generate_bits_table(s->bits[i], s->len[i])<0){
  439. return -1;
  440. }
  441. store_table(s, s->len[i]);
  442. }
  443. for(i=0; i<3; i++)
  444. for(j=0; j<256; j++)
  445. s->stats[i][j]= 0;
  446. s->interlaced= height > 288;
  447. // printf("pred:%d bpp:%d hbpp:%d il:%d\n", s->predictor, s->bitstream_bpp, avctx->bits_per_sample, s->interlaced);
  448. s->picture_number=0;
  449. return 0;
  450. }
  451. static void decode_422_bitstream(HYuvContext *s, int count){
  452. int i;
  453. count/=2;
  454. for(i=0; i<count; i++){
  455. s->temp[0][2*i ]= get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3);
  456. s->temp[1][ i ]= get_vlc2(&s->gb, s->vlc[1].table, VLC_BITS, 3);
  457. s->temp[0][2*i+1]= get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3);
  458. s->temp[2][ i ]= get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3);
  459. }
  460. }
  461. static void decode_gray_bitstream(HYuvContext *s, int count){
  462. int i;
  463. count/=2;
  464. for(i=0; i<count; i++){
  465. s->temp[0][2*i ]= get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3);
  466. s->temp[0][2*i+1]= get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3);
  467. }
  468. }
  469. static void encode_422_bitstream(HYuvContext *s, int count){
  470. int i;
  471. count/=2;
  472. if(s->flags&CODEC_FLAG_PASS1){
  473. for(i=0; i<count; i++){
  474. s->stats[0][ s->temp[0][2*i ] ]++;
  475. s->stats[1][ s->temp[1][ i ] ]++;
  476. s->stats[0][ s->temp[0][2*i+1] ]++;
  477. s->stats[2][ s->temp[2][ i ] ]++;
  478. }
  479. }else{
  480. for(i=0; i<count; i++){
  481. put_bits(&s->pb, s->len[0][ s->temp[0][2*i ] ], s->bits[0][ s->temp[0][2*i ] ]);
  482. put_bits(&s->pb, s->len[1][ s->temp[1][ i ] ], s->bits[1][ s->temp[1][ i ] ]);
  483. put_bits(&s->pb, s->len[0][ s->temp[0][2*i+1] ], s->bits[0][ s->temp[0][2*i+1] ]);
  484. put_bits(&s->pb, s->len[2][ s->temp[2][ i ] ], s->bits[2][ s->temp[2][ i ] ]);
  485. }
  486. }
  487. }
  488. static void encode_gray_bitstream(HYuvContext *s, int count){
  489. int i;
  490. count/=2;
  491. if(s->flags&CODEC_FLAG_PASS1){
  492. for(i=0; i<count; i++){
  493. s->stats[0][ s->temp[0][2*i ] ]++;
  494. s->stats[0][ s->temp[0][2*i+1] ]++;
  495. }
  496. }else{
  497. for(i=0; i<count; i++){
  498. put_bits(&s->pb, s->len[0][ s->temp[0][2*i ] ], s->bits[0][ s->temp[0][2*i ] ]);
  499. put_bits(&s->pb, s->len[0][ s->temp[0][2*i+1] ], s->bits[0][ s->temp[0][2*i+1] ]);
  500. }
  501. }
  502. }
  503. static void decode_bgr_bitstream(HYuvContext *s, int count){
  504. int i;
  505. if(s->decorrelate){
  506. if(s->bitstream_bpp==24){
  507. for(i=0; i<count; i++){
  508. s->temp[0][4*i+1]= get_vlc2(&s->gb, s->vlc[1].table, VLC_BITS, 3);
  509. s->temp[0][4*i ]= get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3) + s->temp[0][4*i+1];
  510. s->temp[0][4*i+2]= get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3) + s->temp[0][4*i+1];
  511. }
  512. }else{
  513. for(i=0; i<count; i++){
  514. s->temp[0][4*i+1]= get_vlc2(&s->gb, s->vlc[1].table, VLC_BITS, 3);
  515. s->temp[0][4*i ]= get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3) + s->temp[0][4*i+1];
  516. s->temp[0][4*i+2]= get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3) + s->temp[0][4*i+1];
  517. get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3); //?!
  518. }
  519. }
  520. }else{
  521. if(s->bitstream_bpp==24){
  522. for(i=0; i<count; i++){
  523. s->temp[0][4*i ]= get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3);
  524. s->temp[0][4*i+1]= get_vlc2(&s->gb, s->vlc[1].table, VLC_BITS, 3);
  525. s->temp[0][4*i+2]= get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3);
  526. }
  527. }else{
  528. for(i=0; i<count; i++){
  529. s->temp[0][4*i ]= get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3);
  530. s->temp[0][4*i+1]= get_vlc2(&s->gb, s->vlc[1].table, VLC_BITS, 3);
  531. s->temp[0][4*i+2]= get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3);
  532. get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3); //?!
  533. }
  534. }
  535. }
  536. }
  537. static void draw_slice(HYuvContext *s, int y){
  538. int h, cy;
  539. UINT8 *src_ptr[3];
  540. if(s->avctx->draw_horiz_band==NULL)
  541. return;
  542. h= y - s->last_slice_end;
  543. y -= h;
  544. if(s->bitstream_bpp==12){
  545. cy= y>>1;
  546. }else{
  547. cy= y;
  548. }
  549. src_ptr[0] = s->picture.data[0] + s->picture.linesize[0]*y;
  550. src_ptr[1] = s->picture.data[1] + s->picture.linesize[1]*cy;
  551. src_ptr[2] = s->picture.data[2] + s->picture.linesize[2]*cy;
  552. emms_c();
  553. s->avctx->draw_horiz_band(s->avctx, src_ptr, s->picture.linesize[0], y, s->width, h);
  554. s->last_slice_end= y + h;
  555. }
  556. static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, uint8_t *buf, int buf_size){
  557. HYuvContext *s = avctx->priv_data;
  558. const int width= s->width;
  559. const int width2= s->width>>1;
  560. const int height= s->height;
  561. int fake_ystride, fake_ustride, fake_vstride;
  562. AVFrame * const p= &s->picture;
  563. AVFrame *picture = data;
  564. *data_size = 0;
  565. /* no supplementary picture */
  566. if (buf_size == 0)
  567. return 0;
  568. bswap_buf((uint32_t*)s->bitstream_buffer, (uint32_t*)buf, buf_size/4);
  569. init_get_bits(&s->gb, s->bitstream_buffer, buf_size);
  570. p->reference= 0;
  571. if(avctx->get_buffer(avctx, p) < 0){
  572. fprintf(stderr, "get_buffer() failed\n");
  573. return -1;
  574. }
  575. fake_ystride= s->interlaced ? p->linesize[0]*2 : p->linesize[0];
  576. fake_ustride= s->interlaced ? p->linesize[1]*2 : p->linesize[1];
  577. fake_vstride= s->interlaced ? p->linesize[2]*2 : p->linesize[2];
  578. s->last_slice_end= 0;
  579. if(s->bitstream_bpp<24){
  580. int y, cy;
  581. int lefty, leftu, leftv;
  582. int lefttopy, lefttopu, lefttopv;
  583. if(s->yuy2){
  584. p->data[0][3]= get_bits(&s->gb, 8);
  585. p->data[0][2]= get_bits(&s->gb, 8);
  586. p->data[0][1]= get_bits(&s->gb, 8);
  587. p->data[0][0]= get_bits(&s->gb, 8);
  588. fprintf(stderr, "YUY2 output isnt implemenetd yet\n");
  589. return -1;
  590. }else{
  591. leftv= p->data[2][0]= get_bits(&s->gb, 8);
  592. lefty= p->data[0][1]= get_bits(&s->gb, 8);
  593. leftu= p->data[1][0]= get_bits(&s->gb, 8);
  594. p->data[0][0]= get_bits(&s->gb, 8);
  595. switch(s->predictor){
  596. case LEFT:
  597. case PLANE:
  598. decode_422_bitstream(s, width-2);
  599. lefty= add_left_prediction(p->data[0] + 2, s->temp[0], width-2, lefty);
  600. if(!(s->flags&CODEC_FLAG_GRAY)){
  601. leftu= add_left_prediction(p->data[1] + 1, s->temp[1], width2-1, leftu);
  602. leftv= add_left_prediction(p->data[2] + 1, s->temp[2], width2-1, leftv);
  603. }
  604. for(cy=y=1; y<s->height; y++,cy++){
  605. uint8_t *ydst, *udst, *vdst;
  606. if(s->bitstream_bpp==12){
  607. decode_gray_bitstream(s, width);
  608. ydst= p->data[0] + p->linesize[0]*y;
  609. lefty= add_left_prediction(ydst, s->temp[0], width, lefty);
  610. if(s->predictor == PLANE){
  611. if(y>s->interlaced)
  612. s->dsp.add_bytes(ydst, ydst - fake_ystride, width);
  613. }
  614. y++;
  615. if(y>=s->height) break;
  616. }
  617. draw_slice(s, y);
  618. ydst= p->data[0] + p->linesize[0]*y;
  619. udst= p->data[1] + p->linesize[1]*cy;
  620. vdst= p->data[2] + p->linesize[2]*cy;
  621. decode_422_bitstream(s, width);
  622. lefty= add_left_prediction(ydst, s->temp[0], width, lefty);
  623. if(!(s->flags&CODEC_FLAG_GRAY)){
  624. leftu= add_left_prediction(udst, s->temp[1], width2, leftu);
  625. leftv= add_left_prediction(vdst, s->temp[2], width2, leftv);
  626. }
  627. if(s->predictor == PLANE){
  628. if(cy>s->interlaced){
  629. s->dsp.add_bytes(ydst, ydst - fake_ystride, width);
  630. if(!(s->flags&CODEC_FLAG_GRAY)){
  631. s->dsp.add_bytes(udst, udst - fake_ustride, width2);
  632. s->dsp.add_bytes(vdst, vdst - fake_vstride, width2);
  633. }
  634. }
  635. }
  636. }
  637. draw_slice(s, height);
  638. break;
  639. case MEDIAN:
  640. /* first line except first 2 pixels is left predicted */
  641. decode_422_bitstream(s, width-2);
  642. lefty= add_left_prediction(p->data[0] + 2, s->temp[0], width-2, lefty);
  643. if(!(s->flags&CODEC_FLAG_GRAY)){
  644. leftu= add_left_prediction(p->data[1] + 1, s->temp[1], width2-1, leftu);
  645. leftv= add_left_prediction(p->data[2] + 1, s->temp[2], width2-1, leftv);
  646. }
  647. cy=y=1;
  648. /* second line is left predicted for interlaced case */
  649. if(s->interlaced){
  650. decode_422_bitstream(s, width);
  651. lefty= add_left_prediction(p->data[0] + p->linesize[0], s->temp[0], width, lefty);
  652. if(!(s->flags&CODEC_FLAG_GRAY)){
  653. leftu= add_left_prediction(p->data[1] + p->linesize[2], s->temp[1], width2, leftu);
  654. leftv= add_left_prediction(p->data[2] + p->linesize[1], s->temp[2], width2, leftv);
  655. }
  656. y++; cy++;
  657. }
  658. /* next 4 pixels are left predicted too */
  659. decode_422_bitstream(s, 4);
  660. lefty= add_left_prediction(p->data[0] + fake_ystride, s->temp[0], 4, lefty);
  661. if(!(s->flags&CODEC_FLAG_GRAY)){
  662. leftu= add_left_prediction(p->data[1] + fake_ustride, s->temp[1], 2, leftu);
  663. leftv= add_left_prediction(p->data[2] + fake_vstride, s->temp[2], 2, leftv);
  664. }
  665. /* next line except the first 4 pixels is median predicted */
  666. lefttopy= p->data[0][3];
  667. decode_422_bitstream(s, width-4);
  668. add_median_prediction(p->data[0] + fake_ystride+4, p->data[0]+4, s->temp[0], width-4, &lefty, &lefttopy);
  669. if(!(s->flags&CODEC_FLAG_GRAY)){
  670. lefttopu= p->data[1][1];
  671. lefttopv= p->data[2][1];
  672. add_median_prediction(p->data[1] + fake_ustride+2, p->data[1]+2, s->temp[1], width2-2, &leftu, &lefttopu);
  673. add_median_prediction(p->data[2] + fake_vstride+2, p->data[2]+2, s->temp[2], width2-2, &leftv, &lefttopv);
  674. }
  675. y++; cy++;
  676. for(; y<height; y++,cy++){
  677. uint8_t *ydst, *udst, *vdst;
  678. if(s->bitstream_bpp==12){
  679. while(2*cy > y){
  680. decode_gray_bitstream(s, width);
  681. ydst= p->data[0] + p->linesize[0]*y;
  682. add_median_prediction(ydst, ydst - fake_ystride, s->temp[0], width, &lefty, &lefttopy);
  683. y++;
  684. }
  685. if(y>=height) break;
  686. }
  687. draw_slice(s, y);
  688. decode_422_bitstream(s, width);
  689. ydst= p->data[0] + p->linesize[0]*y;
  690. udst= p->data[1] + p->linesize[1]*cy;
  691. vdst= p->data[2] + p->linesize[2]*cy;
  692. add_median_prediction(ydst, ydst - fake_ystride, s->temp[0], width, &lefty, &lefttopy);
  693. if(!(s->flags&CODEC_FLAG_GRAY)){
  694. add_median_prediction(udst, udst - fake_ustride, s->temp[1], width2, &leftu, &lefttopu);
  695. add_median_prediction(vdst, vdst - fake_vstride, s->temp[2], width2, &leftv, &lefttopv);
  696. }
  697. }
  698. draw_slice(s, height);
  699. break;
  700. }
  701. }
  702. }else{
  703. int y;
  704. int leftr, leftg, leftb;
  705. const int last_line= (height-1)*p->linesize[0];
  706. if(s->bitstream_bpp==32){
  707. p->data[0][last_line+3]= get_bits(&s->gb, 8);
  708. leftr= p->data[0][last_line+2]= get_bits(&s->gb, 8);
  709. leftg= p->data[0][last_line+1]= get_bits(&s->gb, 8);
  710. leftb= p->data[0][last_line+0]= get_bits(&s->gb, 8);
  711. }else{
  712. leftr= p->data[0][last_line+2]= get_bits(&s->gb, 8);
  713. leftg= p->data[0][last_line+1]= get_bits(&s->gb, 8);
  714. leftb= p->data[0][last_line+0]= get_bits(&s->gb, 8);
  715. skip_bits(&s->gb, 8);
  716. }
  717. if(s->bgr32){
  718. switch(s->predictor){
  719. case LEFT:
  720. case PLANE:
  721. decode_bgr_bitstream(s, width-1);
  722. add_left_prediction_bgr32(p->data[0] + last_line+4, s->temp[0], width-1, &leftr, &leftg, &leftb);
  723. for(y=s->height-2; y>=0; y--){ //yes its stored upside down
  724. decode_bgr_bitstream(s, width);
  725. add_left_prediction_bgr32(p->data[0] + p->linesize[0]*y, s->temp[0], width, &leftr, &leftg, &leftb);
  726. if(s->predictor == PLANE){
  727. if((y&s->interlaced)==0){
  728. s->dsp.add_bytes(p->data[0] + p->linesize[0]*y,
  729. p->data[0] + p->linesize[0]*y + fake_ystride, fake_ystride);
  730. }
  731. }
  732. }
  733. draw_slice(s, height); // just 1 large slice as this isnt possible in reverse order
  734. break;
  735. default:
  736. fprintf(stderr, "prediction type not supported!\n");
  737. }
  738. }else{
  739. fprintf(stderr, "BGR24 output isnt implemenetd yet\n");
  740. return -1;
  741. }
  742. }
  743. emms_c();
  744. *picture= *p;
  745. avctx->release_buffer(avctx, p);
  746. *data_size = sizeof(AVFrame);
  747. return (get_bits_count(&s->gb)+7)>>3;
  748. }
  749. static int decode_end(AVCodecContext *avctx)
  750. {
  751. HYuvContext *s = avctx->priv_data;
  752. int i;
  753. for(i=0; i<3; i++){
  754. free_vlc(&s->vlc[i]);
  755. }
  756. if(avctx->get_buffer == avcodec_default_get_buffer){
  757. for(i=0; i<4; i++){
  758. av_freep(&s->picture.base[i]);
  759. s->picture.data[i]= NULL;
  760. }
  761. av_freep(&s->picture.opaque);
  762. }
  763. return 0;
  764. }
  765. static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size, void *data){
  766. HYuvContext *s = avctx->priv_data;
  767. AVFrame *pict = data;
  768. const int width= s->width;
  769. const int width2= s->width>>1;
  770. const int height= s->height;
  771. const int fake_ystride= s->interlaced ? pict->linesize[0]*2 : pict->linesize[0];
  772. const int fake_ustride= s->interlaced ? pict->linesize[1]*2 : pict->linesize[1];
  773. const int fake_vstride= s->interlaced ? pict->linesize[2]*2 : pict->linesize[2];
  774. AVFrame * const p= &s->picture;
  775. int i, size;
  776. init_put_bits(&s->pb, buf, buf_size, NULL, NULL);
  777. *p = *pict;
  778. if(avctx->pix_fmt == PIX_FMT_YUV422P || avctx->pix_fmt == PIX_FMT_YUV420P){
  779. int lefty, leftu, leftv, y, cy;
  780. put_bits(&s->pb, 8, leftv= p->data[2][0]);
  781. put_bits(&s->pb, 8, lefty= p->data[0][1]);
  782. put_bits(&s->pb, 8, leftu= p->data[1][0]);
  783. put_bits(&s->pb, 8, p->data[0][0]);
  784. lefty= sub_left_prediction(s, s->temp[0], p->data[0]+2, width-2 , lefty);
  785. leftu= sub_left_prediction(s, s->temp[1], p->data[1]+1, width2-1, leftu);
  786. leftv= sub_left_prediction(s, s->temp[2], p->data[2]+1, width2-1, leftv);
  787. encode_422_bitstream(s, width-2);
  788. if(s->predictor==MEDIAN){
  789. int lefttopy, lefttopu, lefttopv;
  790. cy=y=1;
  791. if(s->interlaced){
  792. lefty= sub_left_prediction(s, s->temp[0], p->data[0]+p->linesize[0], width , lefty);
  793. leftu= sub_left_prediction(s, s->temp[1], p->data[1]+p->linesize[1], width2, leftu);
  794. leftv= sub_left_prediction(s, s->temp[2], p->data[2]+p->linesize[2], width2, leftv);
  795. encode_422_bitstream(s, width);
  796. y++; cy++;
  797. }
  798. lefty= sub_left_prediction(s, s->temp[0], p->data[0]+fake_ystride, 4, lefty);
  799. leftu= sub_left_prediction(s, s->temp[1], p->data[1]+fake_ystride, 2, leftu);
  800. leftv= sub_left_prediction(s, s->temp[2], p->data[2]+fake_ystride, 2, leftv);
  801. encode_422_bitstream(s, 4);
  802. lefttopy= p->data[0][3];
  803. lefttopu= p->data[1][1];
  804. lefttopv= p->data[2][1];
  805. sub_median_prediction(s->temp[0], p->data[0]+4, p->data[0] + fake_ystride+4, width-4 , &lefty, &lefttopy);
  806. sub_median_prediction(s->temp[1], p->data[1]+2, p->data[1] + fake_ustride+2, width2-2, &leftu, &lefttopu);
  807. sub_median_prediction(s->temp[2], p->data[2]+2, p->data[2] + fake_vstride+2, width2-2, &leftv, &lefttopv);
  808. encode_422_bitstream(s, width-4);
  809. y++; cy++;
  810. for(; y<height; y++,cy++){
  811. uint8_t *ydst, *udst, *vdst;
  812. if(s->bitstream_bpp==12){
  813. while(2*cy > y){
  814. ydst= p->data[0] + p->linesize[0]*y;
  815. sub_median_prediction(s->temp[0], ydst - fake_ystride, ydst, width , &lefty, &lefttopy);
  816. encode_gray_bitstream(s, width);
  817. y++;
  818. }
  819. if(y>=height) break;
  820. }
  821. ydst= p->data[0] + p->linesize[0]*y;
  822. udst= p->data[1] + p->linesize[1]*cy;
  823. vdst= p->data[2] + p->linesize[2]*cy;
  824. sub_median_prediction(s->temp[0], ydst - fake_ystride, ydst, width , &lefty, &lefttopy);
  825. sub_median_prediction(s->temp[1], udst - fake_ustride, udst, width2, &leftu, &lefttopu);
  826. sub_median_prediction(s->temp[2], vdst - fake_vstride, vdst, width2, &leftv, &lefttopv);
  827. encode_422_bitstream(s, width);
  828. }
  829. }else{
  830. for(cy=y=1; y<height; y++,cy++){
  831. uint8_t *ydst, *udst, *vdst;
  832. /* encode a luma only line & y++ */
  833. if(s->bitstream_bpp==12){
  834. ydst= p->data[0] + p->linesize[0]*y;
  835. if(s->predictor == PLANE && s->interlaced < y){
  836. s->dsp.diff_bytes(s->temp[1], ydst, ydst - fake_ystride, width);
  837. lefty= sub_left_prediction(s, s->temp[0], s->temp[1], width , lefty);
  838. }else{
  839. lefty= sub_left_prediction(s, s->temp[0], ydst, width , lefty);
  840. }
  841. encode_gray_bitstream(s, width);
  842. y++;
  843. if(y>=height) break;
  844. }
  845. ydst= p->data[0] + p->linesize[0]*y;
  846. udst= p->data[1] + p->linesize[1]*cy;
  847. vdst= p->data[2] + p->linesize[2]*cy;
  848. if(s->predictor == PLANE && s->interlaced < cy){
  849. s->dsp.diff_bytes(s->temp[1], ydst, ydst - fake_ystride, width);
  850. s->dsp.diff_bytes(s->temp[2], udst, udst - fake_ustride, width2);
  851. s->dsp.diff_bytes(s->temp[3], vdst, vdst - fake_vstride, width2);
  852. lefty= sub_left_prediction(s, s->temp[0], s->temp[1], width , lefty);
  853. leftu= sub_left_prediction(s, s->temp[1], s->temp[2], width2, leftu);
  854. leftv= sub_left_prediction(s, s->temp[2], s->temp[3], width2, leftv);
  855. }else{
  856. lefty= sub_left_prediction(s, s->temp[0], ydst, width , lefty);
  857. leftu= sub_left_prediction(s, s->temp[1], udst, width2, leftu);
  858. leftv= sub_left_prediction(s, s->temp[2], vdst, width2, leftv);
  859. }
  860. encode_422_bitstream(s, width);
  861. }
  862. }
  863. }else{
  864. fprintf(stderr, "Format not supported!\n");
  865. }
  866. emms_c();
  867. size= (get_bit_count(&s->pb)+31)/32;
  868. if((s->flags&CODEC_FLAG_PASS1) && (s->picture_number&31)==0){
  869. int j;
  870. char *p= avctx->stats_out;
  871. for(i=0; i<3; i++){
  872. for(j=0; j<256; j++){
  873. sprintf(p, "%Ld ", s->stats[i][j]);
  874. p+= strlen(p);
  875. s->stats[i][j]= 0;
  876. }
  877. sprintf(p, "\n");
  878. p++;
  879. }
  880. }else{
  881. flush_put_bits(&s->pb);
  882. bswap_buf((uint32_t*)buf, (uint32_t*)buf, size);
  883. }
  884. s->picture_number++;
  885. return size*4;
  886. }
  887. static int encode_end(AVCodecContext *avctx)
  888. {
  889. // HYuvContext *s = avctx->priv_data;
  890. av_freep(&avctx->extradata);
  891. av_freep(&avctx->stats_out);
  892. return 0;
  893. }
  894. AVCodec huffyuv_decoder = {
  895. "huffyuv",
  896. CODEC_TYPE_VIDEO,
  897. CODEC_ID_HUFFYUV,
  898. sizeof(HYuvContext),
  899. decode_init,
  900. NULL,
  901. decode_end,
  902. decode_frame,
  903. CODEC_CAP_DR1 | CODEC_CAP_DRAW_HORIZ_BAND,
  904. NULL
  905. };
  906. AVCodec huffyuv_encoder = {
  907. "huffyuv",
  908. CODEC_TYPE_VIDEO,
  909. CODEC_ID_HUFFYUV,
  910. sizeof(HYuvContext),
  911. encode_init,
  912. encode_frame,
  913. encode_end,
  914. };