You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1102 lines
35KB

  1. /*
  2. * huffyuv codec for libavcodec
  3. *
  4. * Copyright (c) 2002-2003 Michael Niedermayer <michaelni@gmx.at>
  5. *
  6. * This library is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2 of the License, or (at your option) any later version.
  10. *
  11. * This library is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with this library; if not, write to the Free Software
  18. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  19. *
  20. * see http://www.pcisys.net/~melanson/codecs/huffyuv.txt for a description of
  21. * the algorithm used
  22. */
  23. #include "common.h"
  24. #include "avcodec.h"
  25. #include "dsputil.h"
  26. #ifndef INT64_MAX
  27. #define INT64_MAX 9223372036854775807LL
  28. #endif
  29. #define VLC_BITS 11
  30. typedef enum Predictor{
  31. LEFT= 0,
  32. PLANE,
  33. MEDIAN,
  34. } Predictor;
  35. typedef struct HYuvContext{
  36. AVCodecContext *avctx;
  37. Predictor predictor;
  38. GetBitContext gb;
  39. PutBitContext pb;
  40. int interlaced;
  41. int decorrelate;
  42. int bitstream_bpp;
  43. int version;
  44. int yuy2; //use yuy2 instead of 422P
  45. int bgr32; //use bgr32 instead of bgr24
  46. int width, height;
  47. int flags;
  48. int picture_number;
  49. int last_slice_end;
  50. uint8_t __align8 temp[3][2500];
  51. uint64_t stats[3][256];
  52. uint8_t len[3][256];
  53. uint32_t bits[3][256];
  54. VLC vlc[3];
  55. AVFrame picture;
  56. uint8_t __align8 bitstream_buffer[1024*1024*3]; //FIXME dynamic alloc or some other solution
  57. DSPContext dsp;
  58. }HYuvContext;
  59. static inline void bswap_buf(uint32_t *dst, uint32_t *src, int w){
  60. int i;
  61. for(i=0; i+8<=w; i+=8){
  62. dst[i+0]= bswap_32(src[i+0]);
  63. dst[i+1]= bswap_32(src[i+1]);
  64. dst[i+2]= bswap_32(src[i+2]);
  65. dst[i+3]= bswap_32(src[i+3]);
  66. dst[i+4]= bswap_32(src[i+4]);
  67. dst[i+5]= bswap_32(src[i+5]);
  68. dst[i+6]= bswap_32(src[i+6]);
  69. dst[i+7]= bswap_32(src[i+7]);
  70. }
  71. for(;i<w; i++){
  72. dst[i+0]= bswap_32(src[i+0]);
  73. }
  74. }
  75. static inline int add_left_prediction(uint8_t *dst, uint8_t *src, int w, int acc){
  76. int i;
  77. for(i=0; i<w-1; i++){
  78. acc+= src[i];
  79. dst[i]= acc;
  80. i++;
  81. acc+= src[i];
  82. dst[i]= acc;
  83. }
  84. for(; i<w; i++){
  85. acc+= src[i];
  86. dst[i]= acc;
  87. }
  88. return acc;
  89. }
  90. static inline void add_median_prediction(uint8_t *dst, uint8_t *src1, uint8_t *diff, int w, int *left, int *left_top){
  91. int i;
  92. uint8_t l, lt;
  93. l= *left;
  94. lt= *left_top;
  95. for(i=0; i<w; i++){
  96. l= mid_pred(l, src1[i], (l + src1[i] - lt)&0xFF) + diff[i];
  97. lt= src1[i];
  98. dst[i]= l;
  99. }
  100. *left= l;
  101. *left_top= lt;
  102. }
  103. //FIXME optimize
  104. static inline void sub_median_prediction(uint8_t *dst, uint8_t *src1, uint8_t *src2, int w, int *left, int *left_top){
  105. int i;
  106. uint8_t l, lt;
  107. l= *left;
  108. lt= *left_top;
  109. for(i=0; i<w; i++){
  110. const int pred= mid_pred(l, src1[i], (l + src1[i] - lt)&0xFF);
  111. lt= src1[i];
  112. l= src2[i];
  113. dst[i]= l - pred;
  114. }
  115. *left= l;
  116. *left_top= lt;
  117. }
  118. static inline void add_left_prediction_bgr32(uint8_t *dst, uint8_t *src, int w, int *red, int *green, int *blue){
  119. int i;
  120. int r,g,b;
  121. r= *red;
  122. g= *green;
  123. b= *blue;
  124. for(i=0; i<w; i++){
  125. b+= src[4*i+0];
  126. g+= src[4*i+1];
  127. r+= src[4*i+2];
  128. dst[4*i+0]= b;
  129. dst[4*i+1]= g;
  130. dst[4*i+2]= r;
  131. }
  132. *red= r;
  133. *green= g;
  134. *blue= b;
  135. }
  136. static inline int sub_left_prediction(HYuvContext *s, uint8_t *dst, uint8_t *src, int w, int left){
  137. int i;
  138. if(w<32){
  139. for(i=0; i<w; i++){
  140. const int temp= src[i];
  141. dst[i]= temp - left;
  142. left= temp;
  143. }
  144. return left;
  145. }else{
  146. for(i=0; i<16; i++){
  147. const int temp= src[i];
  148. dst[i]= temp - left;
  149. left= temp;
  150. }
  151. s->dsp.diff_bytes(dst+16, src+16, src+15, w-16);
  152. return src[w-1];
  153. }
  154. }
  155. static void read_len_table(uint8_t *dst, GetBitContext *gb){
  156. int i, val, repeat;
  157. for(i=0; i<256;){
  158. repeat= get_bits(gb, 3);
  159. val = get_bits(gb, 5);
  160. if(repeat==0)
  161. repeat= get_bits(gb, 8);
  162. //printf("%d %d\n", val, repeat);
  163. while (repeat--)
  164. dst[i++] = val;
  165. }
  166. }
  167. static int generate_bits_table(uint32_t *dst, uint8_t *len_table){
  168. int len, index;
  169. uint32_t bits=0;
  170. for(len=32; len>0; len--){
  171. int bit= 1<<(32-len);
  172. for(index=0; index<256; index++){
  173. if(len_table[index]==len){
  174. if(bits & (bit-1)){
  175. fprintf(stderr, "Error generating huffman table\n");
  176. return -1;
  177. }
  178. dst[index]= bits>>(32-len);
  179. bits+= bit;
  180. }
  181. }
  182. }
  183. return 0;
  184. }
  185. static void generate_len_table(uint8_t *dst, uint64_t *stats, int size){
  186. uint64_t counts[2*size];
  187. int up[2*size];
  188. int offset, i, next;
  189. for(offset=1; ; offset<<=1){
  190. for(i=0; i<size; i++){
  191. counts[i]= stats[i] + offset - 1;
  192. }
  193. for(next=size; next<size*2; next++){
  194. uint64_t min1, min2;
  195. int min1_i, min2_i;
  196. min1=min2= INT64_MAX;
  197. min1_i= min2_i=-1;
  198. for(i=0; i<next; i++){
  199. if(min2 > counts[i]){
  200. if(min1 > counts[i]){
  201. min2= min1;
  202. min2_i= min1_i;
  203. min1= counts[i];
  204. min1_i= i;
  205. }else{
  206. min2= counts[i];
  207. min2_i= i;
  208. }
  209. }
  210. }
  211. if(min2==INT64_MAX) break;
  212. counts[next]= min1 + min2;
  213. counts[min1_i]=
  214. counts[min2_i]= INT64_MAX;
  215. up[min1_i]=
  216. up[min2_i]= next;
  217. up[next]= -1;
  218. }
  219. for(i=0; i<size; i++){
  220. int len;
  221. int index=i;
  222. for(len=0; up[index] != -1; len++)
  223. index= up[index];
  224. if(len > 32) break;
  225. dst[i]= len;
  226. }
  227. if(i==size) break;
  228. }
  229. }
  230. static int read_huffman_tables(HYuvContext *s, uint8_t *src, int length){
  231. GetBitContext gb;
  232. int i;
  233. init_get_bits(&gb, src, length*8);
  234. for(i=0; i<3; i++){
  235. read_len_table(s->len[i], &gb);
  236. if(generate_bits_table(s->bits[i], s->len[i])<0){
  237. return -1;
  238. }
  239. #if 0
  240. for(j=0; j<256; j++){
  241. printf("%6X, %2d, %3d\n", s->bits[i][j], s->len[i][j], j);
  242. }
  243. #endif
  244. init_vlc(&s->vlc[i], VLC_BITS, 256, s->len[i], 1, 1, s->bits[i], 4, 4);
  245. }
  246. return 0;
  247. }
  248. static int read_old_huffman_tables(HYuvContext *s){
  249. #if 0
  250. GetBitContext gb;
  251. int i;
  252. init_get_bits(&gb, classic_shift_luma, sizeof(classic_shift_luma)*8);
  253. read_len_table(s->len[0], &gb);
  254. init_get_bits(&gb, classic_shift_chroma, sizeof(classic_shift_chroma)*8);
  255. read_len_table(s->len[1], &gb);
  256. for(i=0; i<256; i++) s->bits[0][i] = classic_add_luma [i];
  257. for(i=0; i<256; i++) s->bits[1][i] = classic_add_chroma[i];
  258. if(s->bitstream_bpp >= 24){
  259. memcpy(s->bits[1], s->bits[0], 256*sizeof(uint32_t));
  260. memcpy(s->len[1] , s->len [0], 256*sizeof(uint8_t));
  261. }
  262. memcpy(s->bits[2], s->bits[1], 256*sizeof(uint32_t));
  263. memcpy(s->len[2] , s->len [1], 256*sizeof(uint8_t));
  264. for(i=0; i<3; i++)
  265. init_vlc(&s->vlc[i], VLC_BITS, 256, s->len[i], 1, 1, s->bits[i], 4, 4);
  266. return 0;
  267. #else
  268. fprintf(stderr, "v1 huffyuv is not supported \n");
  269. return -1;
  270. #endif
  271. }
  272. static int decode_init(AVCodecContext *avctx)
  273. {
  274. HYuvContext *s = avctx->priv_data;
  275. int width, height;
  276. s->avctx= avctx;
  277. s->flags= avctx->flags;
  278. dsputil_init(&s->dsp, avctx->dsp_mask);
  279. width= s->width= avctx->width;
  280. height= s->height= avctx->height;
  281. avctx->coded_frame= &s->picture;
  282. s->bgr32=1;
  283. assert(width && height);
  284. //if(avctx->extradata)
  285. // printf("extradata:%X, extradata_size:%d\n", *(uint32_t*)avctx->extradata, avctx->extradata_size);
  286. if(avctx->extradata_size){
  287. if((avctx->bits_per_sample&7) && avctx->bits_per_sample != 12)
  288. s->version=1; // do such files exist at all?
  289. else
  290. s->version=2;
  291. }else
  292. s->version=0;
  293. if(s->version==2){
  294. int method;
  295. method= ((uint8_t*)avctx->extradata)[0];
  296. s->decorrelate= method&64 ? 1 : 0;
  297. s->predictor= method&63;
  298. s->bitstream_bpp= ((uint8_t*)avctx->extradata)[1];
  299. if(s->bitstream_bpp==0)
  300. s->bitstream_bpp= avctx->bits_per_sample&~7;
  301. if(read_huffman_tables(s, ((uint8_t*)avctx->extradata)+4, avctx->extradata_size) < 0)
  302. return -1;
  303. }else{
  304. switch(avctx->bits_per_sample&7){
  305. case 1:
  306. s->predictor= LEFT;
  307. s->decorrelate= 0;
  308. break;
  309. case 2:
  310. s->predictor= LEFT;
  311. s->decorrelate= 1;
  312. break;
  313. case 3:
  314. s->predictor= PLANE;
  315. s->decorrelate= avctx->bits_per_sample >= 24;
  316. break;
  317. case 4:
  318. s->predictor= MEDIAN;
  319. s->decorrelate= 0;
  320. break;
  321. default:
  322. s->predictor= LEFT; //OLD
  323. s->decorrelate= 0;
  324. break;
  325. }
  326. s->bitstream_bpp= avctx->bits_per_sample & ~7;
  327. if(read_old_huffman_tables(s) < 0)
  328. return -1;
  329. }
  330. s->interlaced= height > 288;
  331. switch(s->bitstream_bpp){
  332. case 12:
  333. avctx->pix_fmt = PIX_FMT_YUV420P;
  334. break;
  335. case 16:
  336. if(s->yuy2){
  337. avctx->pix_fmt = PIX_FMT_YUV422;
  338. }else{
  339. avctx->pix_fmt = PIX_FMT_YUV422P;
  340. }
  341. break;
  342. case 24:
  343. case 32:
  344. if(s->bgr32){
  345. avctx->pix_fmt = PIX_FMT_RGBA32;
  346. }else{
  347. avctx->pix_fmt = PIX_FMT_BGR24;
  348. }
  349. break;
  350. default:
  351. assert(0);
  352. }
  353. // printf("pred:%d bpp:%d hbpp:%d il:%d\n", s->predictor, s->bitstream_bpp, avctx->bits_per_sample, s->interlaced);
  354. return 0;
  355. }
  356. static void store_table(HYuvContext *s, uint8_t *len){
  357. int i;
  358. int index= s->avctx->extradata_size;
  359. for(i=0; i<256;){
  360. int cur=i;
  361. int val= len[i];
  362. int repeat;
  363. for(; i<256 && len[i]==val; i++);
  364. repeat= i - cur;
  365. if(repeat>7){
  366. ((uint8_t*)s->avctx->extradata)[index++]= val;
  367. ((uint8_t*)s->avctx->extradata)[index++]= repeat;
  368. }else{
  369. ((uint8_t*)s->avctx->extradata)[index++]= val | (repeat<<5);
  370. }
  371. }
  372. s->avctx->extradata_size= index;
  373. }
  374. static int encode_init(AVCodecContext *avctx)
  375. {
  376. HYuvContext *s = avctx->priv_data;
  377. int i, j, width, height;
  378. s->avctx= avctx;
  379. s->flags= avctx->flags;
  380. dsputil_init(&s->dsp, avctx->dsp_mask);
  381. width= s->width= avctx->width;
  382. height= s->height= avctx->height;
  383. assert(width && height);
  384. avctx->extradata= av_mallocz(1024*10);
  385. avctx->stats_out= av_mallocz(1024*10);
  386. s->version=2;
  387. avctx->coded_frame= &s->picture;
  388. switch(avctx->pix_fmt){
  389. case PIX_FMT_YUV420P:
  390. if(avctx->strict_std_compliance>=0){
  391. fprintf(stderr, "YV12-huffyuv is experimental, there WILL be no compatbility! (use (v)strict=-1)\n");
  392. return -1;
  393. }
  394. s->bitstream_bpp= 12;
  395. break;
  396. case PIX_FMT_YUV422P:
  397. s->bitstream_bpp= 16;
  398. break;
  399. default:
  400. fprintf(stderr, "format not supported\n");
  401. return -1;
  402. }
  403. avctx->bits_per_sample= s->bitstream_bpp;
  404. s->decorrelate= s->bitstream_bpp >= 24;
  405. s->predictor= avctx->prediction_method;
  406. ((uint8_t*)avctx->extradata)[0]= s->predictor;
  407. ((uint8_t*)avctx->extradata)[1]= s->bitstream_bpp;
  408. ((uint8_t*)avctx->extradata)[2]=
  409. ((uint8_t*)avctx->extradata)[3]= 0;
  410. s->avctx->extradata_size= 4;
  411. if(avctx->stats_in){
  412. char *p= avctx->stats_in;
  413. for(i=0; i<3; i++)
  414. for(j=0; j<256; j++)
  415. s->stats[i][j]= 1;
  416. for(;;){
  417. for(i=0; i<3; i++){
  418. char *next;
  419. for(j=0; j<256; j++){
  420. s->stats[i][j]+= strtol(p, &next, 0);
  421. if(next==p) return -1;
  422. p=next;
  423. }
  424. }
  425. if(p[0]==0 || p[1]==0 || p[2]==0) break;
  426. }
  427. }else{
  428. for(i=0; i<3; i++)
  429. for(j=0; j<256; j++){
  430. int d= FFMIN(j, 256-j);
  431. s->stats[i][j]= 100000000/(d+1);
  432. }
  433. }
  434. for(i=0; i<3; i++){
  435. generate_len_table(s->len[i], s->stats[i], 256);
  436. if(generate_bits_table(s->bits[i], s->len[i])<0){
  437. return -1;
  438. }
  439. store_table(s, s->len[i]);
  440. }
  441. for(i=0; i<3; i++)
  442. for(j=0; j<256; j++)
  443. s->stats[i][j]= 0;
  444. s->interlaced= height > 288;
  445. // printf("pred:%d bpp:%d hbpp:%d il:%d\n", s->predictor, s->bitstream_bpp, avctx->bits_per_sample, s->interlaced);
  446. s->picture_number=0;
  447. return 0;
  448. }
  449. static void decode_422_bitstream(HYuvContext *s, int count){
  450. int i;
  451. count/=2;
  452. for(i=0; i<count; i++){
  453. s->temp[0][2*i ]= get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3);
  454. s->temp[1][ i ]= get_vlc2(&s->gb, s->vlc[1].table, VLC_BITS, 3);
  455. s->temp[0][2*i+1]= get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3);
  456. s->temp[2][ i ]= get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3);
  457. }
  458. }
  459. static void decode_gray_bitstream(HYuvContext *s, int count){
  460. int i;
  461. count/=2;
  462. for(i=0; i<count; i++){
  463. s->temp[0][2*i ]= get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3);
  464. s->temp[0][2*i+1]= get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3);
  465. }
  466. }
  467. static void encode_422_bitstream(HYuvContext *s, int count){
  468. int i;
  469. count/=2;
  470. if(s->flags&CODEC_FLAG_PASS1){
  471. for(i=0; i<count; i++){
  472. s->stats[0][ s->temp[0][2*i ] ]++;
  473. s->stats[1][ s->temp[1][ i ] ]++;
  474. s->stats[0][ s->temp[0][2*i+1] ]++;
  475. s->stats[2][ s->temp[2][ i ] ]++;
  476. }
  477. }else{
  478. for(i=0; i<count; i++){
  479. put_bits(&s->pb, s->len[0][ s->temp[0][2*i ] ], s->bits[0][ s->temp[0][2*i ] ]);
  480. put_bits(&s->pb, s->len[1][ s->temp[1][ i ] ], s->bits[1][ s->temp[1][ i ] ]);
  481. put_bits(&s->pb, s->len[0][ s->temp[0][2*i+1] ], s->bits[0][ s->temp[0][2*i+1] ]);
  482. put_bits(&s->pb, s->len[2][ s->temp[2][ i ] ], s->bits[2][ s->temp[2][ i ] ]);
  483. }
  484. }
  485. }
  486. static void encode_gray_bitstream(HYuvContext *s, int count){
  487. int i;
  488. count/=2;
  489. if(s->flags&CODEC_FLAG_PASS1){
  490. for(i=0; i<count; i++){
  491. s->stats[0][ s->temp[0][2*i ] ]++;
  492. s->stats[0][ s->temp[0][2*i+1] ]++;
  493. }
  494. }else{
  495. for(i=0; i<count; i++){
  496. put_bits(&s->pb, s->len[0][ s->temp[0][2*i ] ], s->bits[0][ s->temp[0][2*i ] ]);
  497. put_bits(&s->pb, s->len[0][ s->temp[0][2*i+1] ], s->bits[0][ s->temp[0][2*i+1] ]);
  498. }
  499. }
  500. }
  501. static void decode_bgr_bitstream(HYuvContext *s, int count){
  502. int i;
  503. if(s->decorrelate){
  504. if(s->bitstream_bpp==24){
  505. for(i=0; i<count; i++){
  506. s->temp[0][4*i+1]= get_vlc2(&s->gb, s->vlc[1].table, VLC_BITS, 3);
  507. s->temp[0][4*i ]= get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3) + s->temp[0][4*i+1];
  508. s->temp[0][4*i+2]= get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3) + s->temp[0][4*i+1];
  509. }
  510. }else{
  511. for(i=0; i<count; i++){
  512. s->temp[0][4*i+1]= get_vlc2(&s->gb, s->vlc[1].table, VLC_BITS, 3);
  513. s->temp[0][4*i ]= get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3) + s->temp[0][4*i+1];
  514. s->temp[0][4*i+2]= get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3) + s->temp[0][4*i+1];
  515. get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3); //?!
  516. }
  517. }
  518. }else{
  519. if(s->bitstream_bpp==24){
  520. for(i=0; i<count; i++){
  521. s->temp[0][4*i ]= get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3);
  522. s->temp[0][4*i+1]= get_vlc2(&s->gb, s->vlc[1].table, VLC_BITS, 3);
  523. s->temp[0][4*i+2]= get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3);
  524. }
  525. }else{
  526. for(i=0; i<count; i++){
  527. s->temp[0][4*i ]= get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3);
  528. s->temp[0][4*i+1]= get_vlc2(&s->gb, s->vlc[1].table, VLC_BITS, 3);
  529. s->temp[0][4*i+2]= get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3);
  530. get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3); //?!
  531. }
  532. }
  533. }
  534. }
  535. static void draw_slice(HYuvContext *s, int y){
  536. int h, cy;
  537. UINT8 *src_ptr[3];
  538. if(s->avctx->draw_horiz_band==NULL)
  539. return;
  540. h= y - s->last_slice_end;
  541. y -= h;
  542. if(s->bitstream_bpp==12){
  543. cy= y>>1;
  544. }else{
  545. cy= y;
  546. }
  547. src_ptr[0] = s->picture.data[0] + s->picture.linesize[0]*y;
  548. src_ptr[1] = s->picture.data[1] + s->picture.linesize[1]*cy;
  549. src_ptr[2] = s->picture.data[2] + s->picture.linesize[2]*cy;
  550. emms_c();
  551. s->avctx->draw_horiz_band(s->avctx, src_ptr, s->picture.linesize[0], y, s->width, h);
  552. s->last_slice_end= y + h;
  553. }
  554. static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, uint8_t *buf, int buf_size){
  555. HYuvContext *s = avctx->priv_data;
  556. const int width= s->width;
  557. const int width2= s->width>>1;
  558. const int height= s->height;
  559. int fake_ystride, fake_ustride, fake_vstride;
  560. AVFrame * const p= &s->picture;
  561. AVFrame *picture = data;
  562. *data_size = 0;
  563. /* no supplementary picture */
  564. if (buf_size == 0)
  565. return 0;
  566. bswap_buf((uint32_t*)s->bitstream_buffer, (uint32_t*)buf, buf_size/4);
  567. init_get_bits(&s->gb, s->bitstream_buffer, buf_size*8);
  568. p->reference= 0;
  569. if(avctx->get_buffer(avctx, p) < 0){
  570. fprintf(stderr, "get_buffer() failed\n");
  571. return -1;
  572. }
  573. fake_ystride= s->interlaced ? p->linesize[0]*2 : p->linesize[0];
  574. fake_ustride= s->interlaced ? p->linesize[1]*2 : p->linesize[1];
  575. fake_vstride= s->interlaced ? p->linesize[2]*2 : p->linesize[2];
  576. s->last_slice_end= 0;
  577. if(s->bitstream_bpp<24){
  578. int y, cy;
  579. int lefty, leftu, leftv;
  580. int lefttopy, lefttopu, lefttopv;
  581. if(s->yuy2){
  582. p->data[0][3]= get_bits(&s->gb, 8);
  583. p->data[0][2]= get_bits(&s->gb, 8);
  584. p->data[0][1]= get_bits(&s->gb, 8);
  585. p->data[0][0]= get_bits(&s->gb, 8);
  586. fprintf(stderr, "YUY2 output isnt implemenetd yet\n");
  587. return -1;
  588. }else{
  589. leftv= p->data[2][0]= get_bits(&s->gb, 8);
  590. lefty= p->data[0][1]= get_bits(&s->gb, 8);
  591. leftu= p->data[1][0]= get_bits(&s->gb, 8);
  592. p->data[0][0]= get_bits(&s->gb, 8);
  593. switch(s->predictor){
  594. case LEFT:
  595. case PLANE:
  596. decode_422_bitstream(s, width-2);
  597. lefty= add_left_prediction(p->data[0] + 2, s->temp[0], width-2, lefty);
  598. if(!(s->flags&CODEC_FLAG_GRAY)){
  599. leftu= add_left_prediction(p->data[1] + 1, s->temp[1], width2-1, leftu);
  600. leftv= add_left_prediction(p->data[2] + 1, s->temp[2], width2-1, leftv);
  601. }
  602. for(cy=y=1; y<s->height; y++,cy++){
  603. uint8_t *ydst, *udst, *vdst;
  604. if(s->bitstream_bpp==12){
  605. decode_gray_bitstream(s, width);
  606. ydst= p->data[0] + p->linesize[0]*y;
  607. lefty= add_left_prediction(ydst, s->temp[0], width, lefty);
  608. if(s->predictor == PLANE){
  609. if(y>s->interlaced)
  610. s->dsp.add_bytes(ydst, ydst - fake_ystride, width);
  611. }
  612. y++;
  613. if(y>=s->height) break;
  614. }
  615. draw_slice(s, y);
  616. ydst= p->data[0] + p->linesize[0]*y;
  617. udst= p->data[1] + p->linesize[1]*cy;
  618. vdst= p->data[2] + p->linesize[2]*cy;
  619. decode_422_bitstream(s, width);
  620. lefty= add_left_prediction(ydst, s->temp[0], width, lefty);
  621. if(!(s->flags&CODEC_FLAG_GRAY)){
  622. leftu= add_left_prediction(udst, s->temp[1], width2, leftu);
  623. leftv= add_left_prediction(vdst, s->temp[2], width2, leftv);
  624. }
  625. if(s->predictor == PLANE){
  626. if(cy>s->interlaced){
  627. s->dsp.add_bytes(ydst, ydst - fake_ystride, width);
  628. if(!(s->flags&CODEC_FLAG_GRAY)){
  629. s->dsp.add_bytes(udst, udst - fake_ustride, width2);
  630. s->dsp.add_bytes(vdst, vdst - fake_vstride, width2);
  631. }
  632. }
  633. }
  634. }
  635. draw_slice(s, height);
  636. break;
  637. case MEDIAN:
  638. /* first line except first 2 pixels is left predicted */
  639. decode_422_bitstream(s, width-2);
  640. lefty= add_left_prediction(p->data[0] + 2, s->temp[0], width-2, lefty);
  641. if(!(s->flags&CODEC_FLAG_GRAY)){
  642. leftu= add_left_prediction(p->data[1] + 1, s->temp[1], width2-1, leftu);
  643. leftv= add_left_prediction(p->data[2] + 1, s->temp[2], width2-1, leftv);
  644. }
  645. cy=y=1;
  646. /* second line is left predicted for interlaced case */
  647. if(s->interlaced){
  648. decode_422_bitstream(s, width);
  649. lefty= add_left_prediction(p->data[0] + p->linesize[0], s->temp[0], width, lefty);
  650. if(!(s->flags&CODEC_FLAG_GRAY)){
  651. leftu= add_left_prediction(p->data[1] + p->linesize[2], s->temp[1], width2, leftu);
  652. leftv= add_left_prediction(p->data[2] + p->linesize[1], s->temp[2], width2, leftv);
  653. }
  654. y++; cy++;
  655. }
  656. /* next 4 pixels are left predicted too */
  657. decode_422_bitstream(s, 4);
  658. lefty= add_left_prediction(p->data[0] + fake_ystride, s->temp[0], 4, lefty);
  659. if(!(s->flags&CODEC_FLAG_GRAY)){
  660. leftu= add_left_prediction(p->data[1] + fake_ustride, s->temp[1], 2, leftu);
  661. leftv= add_left_prediction(p->data[2] + fake_vstride, s->temp[2], 2, leftv);
  662. }
  663. /* next line except the first 4 pixels is median predicted */
  664. lefttopy= p->data[0][3];
  665. decode_422_bitstream(s, width-4);
  666. add_median_prediction(p->data[0] + fake_ystride+4, p->data[0]+4, s->temp[0], width-4, &lefty, &lefttopy);
  667. if(!(s->flags&CODEC_FLAG_GRAY)){
  668. lefttopu= p->data[1][1];
  669. lefttopv= p->data[2][1];
  670. add_median_prediction(p->data[1] + fake_ustride+2, p->data[1]+2, s->temp[1], width2-2, &leftu, &lefttopu);
  671. add_median_prediction(p->data[2] + fake_vstride+2, p->data[2]+2, s->temp[2], width2-2, &leftv, &lefttopv);
  672. }
  673. y++; cy++;
  674. for(; y<height; y++,cy++){
  675. uint8_t *ydst, *udst, *vdst;
  676. if(s->bitstream_bpp==12){
  677. while(2*cy > y){
  678. decode_gray_bitstream(s, width);
  679. ydst= p->data[0] + p->linesize[0]*y;
  680. add_median_prediction(ydst, ydst - fake_ystride, s->temp[0], width, &lefty, &lefttopy);
  681. y++;
  682. }
  683. if(y>=height) break;
  684. }
  685. draw_slice(s, y);
  686. decode_422_bitstream(s, width);
  687. ydst= p->data[0] + p->linesize[0]*y;
  688. udst= p->data[1] + p->linesize[1]*cy;
  689. vdst= p->data[2] + p->linesize[2]*cy;
  690. add_median_prediction(ydst, ydst - fake_ystride, s->temp[0], width, &lefty, &lefttopy);
  691. if(!(s->flags&CODEC_FLAG_GRAY)){
  692. add_median_prediction(udst, udst - fake_ustride, s->temp[1], width2, &leftu, &lefttopu);
  693. add_median_prediction(vdst, vdst - fake_vstride, s->temp[2], width2, &leftv, &lefttopv);
  694. }
  695. }
  696. draw_slice(s, height);
  697. break;
  698. }
  699. }
  700. }else{
  701. int y;
  702. int leftr, leftg, leftb;
  703. const int last_line= (height-1)*p->linesize[0];
  704. if(s->bitstream_bpp==32){
  705. p->data[0][last_line+3]= get_bits(&s->gb, 8);
  706. leftr= p->data[0][last_line+2]= get_bits(&s->gb, 8);
  707. leftg= p->data[0][last_line+1]= get_bits(&s->gb, 8);
  708. leftb= p->data[0][last_line+0]= get_bits(&s->gb, 8);
  709. }else{
  710. leftr= p->data[0][last_line+2]= get_bits(&s->gb, 8);
  711. leftg= p->data[0][last_line+1]= get_bits(&s->gb, 8);
  712. leftb= p->data[0][last_line+0]= get_bits(&s->gb, 8);
  713. skip_bits(&s->gb, 8);
  714. }
  715. if(s->bgr32){
  716. switch(s->predictor){
  717. case LEFT:
  718. case PLANE:
  719. decode_bgr_bitstream(s, width-1);
  720. add_left_prediction_bgr32(p->data[0] + last_line+4, s->temp[0], width-1, &leftr, &leftg, &leftb);
  721. for(y=s->height-2; y>=0; y--){ //yes its stored upside down
  722. decode_bgr_bitstream(s, width);
  723. add_left_prediction_bgr32(p->data[0] + p->linesize[0]*y, s->temp[0], width, &leftr, &leftg, &leftb);
  724. if(s->predictor == PLANE){
  725. if((y&s->interlaced)==0){
  726. s->dsp.add_bytes(p->data[0] + p->linesize[0]*y,
  727. p->data[0] + p->linesize[0]*y + fake_ystride, fake_ystride);
  728. }
  729. }
  730. }
  731. draw_slice(s, height); // just 1 large slice as this isnt possible in reverse order
  732. break;
  733. default:
  734. fprintf(stderr, "prediction type not supported!\n");
  735. }
  736. }else{
  737. fprintf(stderr, "BGR24 output isnt implemenetd yet\n");
  738. return -1;
  739. }
  740. }
  741. emms_c();
  742. *picture= *p;
  743. avctx->release_buffer(avctx, p);
  744. *data_size = sizeof(AVFrame);
  745. return (get_bits_count(&s->gb)+7)>>3;
  746. }
  747. static int decode_end(AVCodecContext *avctx)
  748. {
  749. HYuvContext *s = avctx->priv_data;
  750. int i;
  751. for(i=0; i<3; i++){
  752. free_vlc(&s->vlc[i]);
  753. }
  754. if(avctx->get_buffer == avcodec_default_get_buffer){
  755. for(i=0; i<4; i++){
  756. av_freep(&s->picture.base[i]);
  757. s->picture.data[i]= NULL;
  758. }
  759. av_freep(&s->picture.opaque);
  760. }
  761. return 0;
  762. }
  763. static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size, void *data){
  764. HYuvContext *s = avctx->priv_data;
  765. AVFrame *pict = data;
  766. const int width= s->width;
  767. const int width2= s->width>>1;
  768. const int height= s->height;
  769. const int fake_ystride= s->interlaced ? pict->linesize[0]*2 : pict->linesize[0];
  770. const int fake_ustride= s->interlaced ? pict->linesize[1]*2 : pict->linesize[1];
  771. const int fake_vstride= s->interlaced ? pict->linesize[2]*2 : pict->linesize[2];
  772. AVFrame * const p= &s->picture;
  773. int i, size;
  774. init_put_bits(&s->pb, buf, buf_size, NULL, NULL);
  775. *p = *pict;
  776. p->pict_type= FF_I_TYPE;
  777. p->key_frame= 1;
  778. if(avctx->pix_fmt == PIX_FMT_YUV422P || avctx->pix_fmt == PIX_FMT_YUV420P){
  779. int lefty, leftu, leftv, y, cy;
  780. put_bits(&s->pb, 8, leftv= p->data[2][0]);
  781. put_bits(&s->pb, 8, lefty= p->data[0][1]);
  782. put_bits(&s->pb, 8, leftu= p->data[1][0]);
  783. put_bits(&s->pb, 8, p->data[0][0]);
  784. lefty= sub_left_prediction(s, s->temp[0], p->data[0]+2, width-2 , lefty);
  785. leftu= sub_left_prediction(s, s->temp[1], p->data[1]+1, width2-1, leftu);
  786. leftv= sub_left_prediction(s, s->temp[2], p->data[2]+1, width2-1, leftv);
  787. encode_422_bitstream(s, width-2);
  788. if(s->predictor==MEDIAN){
  789. int lefttopy, lefttopu, lefttopv;
  790. cy=y=1;
  791. if(s->interlaced){
  792. lefty= sub_left_prediction(s, s->temp[0], p->data[0]+p->linesize[0], width , lefty);
  793. leftu= sub_left_prediction(s, s->temp[1], p->data[1]+p->linesize[1], width2, leftu);
  794. leftv= sub_left_prediction(s, s->temp[2], p->data[2]+p->linesize[2], width2, leftv);
  795. encode_422_bitstream(s, width);
  796. y++; cy++;
  797. }
  798. lefty= sub_left_prediction(s, s->temp[0], p->data[0]+fake_ystride, 4, lefty);
  799. leftu= sub_left_prediction(s, s->temp[1], p->data[1]+fake_ystride, 2, leftu);
  800. leftv= sub_left_prediction(s, s->temp[2], p->data[2]+fake_ystride, 2, leftv);
  801. encode_422_bitstream(s, 4);
  802. lefttopy= p->data[0][3];
  803. lefttopu= p->data[1][1];
  804. lefttopv= p->data[2][1];
  805. sub_median_prediction(s->temp[0], p->data[0]+4, p->data[0] + fake_ystride+4, width-4 , &lefty, &lefttopy);
  806. sub_median_prediction(s->temp[1], p->data[1]+2, p->data[1] + fake_ustride+2, width2-2, &leftu, &lefttopu);
  807. sub_median_prediction(s->temp[2], p->data[2]+2, p->data[2] + fake_vstride+2, width2-2, &leftv, &lefttopv);
  808. encode_422_bitstream(s, width-4);
  809. y++; cy++;
  810. for(; y<height; y++,cy++){
  811. uint8_t *ydst, *udst, *vdst;
  812. if(s->bitstream_bpp==12){
  813. while(2*cy > y){
  814. ydst= p->data[0] + p->linesize[0]*y;
  815. sub_median_prediction(s->temp[0], ydst - fake_ystride, ydst, width , &lefty, &lefttopy);
  816. encode_gray_bitstream(s, width);
  817. y++;
  818. }
  819. if(y>=height) break;
  820. }
  821. ydst= p->data[0] + p->linesize[0]*y;
  822. udst= p->data[1] + p->linesize[1]*cy;
  823. vdst= p->data[2] + p->linesize[2]*cy;
  824. sub_median_prediction(s->temp[0], ydst - fake_ystride, ydst, width , &lefty, &lefttopy);
  825. sub_median_prediction(s->temp[1], udst - fake_ustride, udst, width2, &leftu, &lefttopu);
  826. sub_median_prediction(s->temp[2], vdst - fake_vstride, vdst, width2, &leftv, &lefttopv);
  827. encode_422_bitstream(s, width);
  828. }
  829. }else{
  830. for(cy=y=1; y<height; y++,cy++){
  831. uint8_t *ydst, *udst, *vdst;
  832. /* encode a luma only line & y++ */
  833. if(s->bitstream_bpp==12){
  834. ydst= p->data[0] + p->linesize[0]*y;
  835. if(s->predictor == PLANE && s->interlaced < y){
  836. s->dsp.diff_bytes(s->temp[1], ydst, ydst - fake_ystride, width);
  837. lefty= sub_left_prediction(s, s->temp[0], s->temp[1], width , lefty);
  838. }else{
  839. lefty= sub_left_prediction(s, s->temp[0], ydst, width , lefty);
  840. }
  841. encode_gray_bitstream(s, width);
  842. y++;
  843. if(y>=height) break;
  844. }
  845. ydst= p->data[0] + p->linesize[0]*y;
  846. udst= p->data[1] + p->linesize[1]*cy;
  847. vdst= p->data[2] + p->linesize[2]*cy;
  848. if(s->predictor == PLANE && s->interlaced < cy){
  849. s->dsp.diff_bytes(s->temp[1], ydst, ydst - fake_ystride, width);
  850. s->dsp.diff_bytes(s->temp[2], udst, udst - fake_ustride, width2);
  851. s->dsp.diff_bytes(s->temp[3], vdst, vdst - fake_vstride, width2);
  852. lefty= sub_left_prediction(s, s->temp[0], s->temp[1], width , lefty);
  853. leftu= sub_left_prediction(s, s->temp[1], s->temp[2], width2, leftu);
  854. leftv= sub_left_prediction(s, s->temp[2], s->temp[3], width2, leftv);
  855. }else{
  856. lefty= sub_left_prediction(s, s->temp[0], ydst, width , lefty);
  857. leftu= sub_left_prediction(s, s->temp[1], udst, width2, leftu);
  858. leftv= sub_left_prediction(s, s->temp[2], vdst, width2, leftv);
  859. }
  860. encode_422_bitstream(s, width);
  861. }
  862. }
  863. }else{
  864. fprintf(stderr, "Format not supported!\n");
  865. }
  866. emms_c();
  867. size= (get_bit_count(&s->pb)+31)/32;
  868. if((s->flags&CODEC_FLAG_PASS1) && (s->picture_number&31)==0){
  869. int j;
  870. char *p= avctx->stats_out;
  871. for(i=0; i<3; i++){
  872. for(j=0; j<256; j++){
  873. sprintf(p, "%Ld ", s->stats[i][j]);
  874. p+= strlen(p);
  875. s->stats[i][j]= 0;
  876. }
  877. sprintf(p, "\n");
  878. p++;
  879. }
  880. }else{
  881. flush_put_bits(&s->pb);
  882. bswap_buf((uint32_t*)buf, (uint32_t*)buf, size);
  883. }
  884. s->picture_number++;
  885. return size*4;
  886. }
  887. static int encode_end(AVCodecContext *avctx)
  888. {
  889. // HYuvContext *s = avctx->priv_data;
  890. av_freep(&avctx->extradata);
  891. av_freep(&avctx->stats_out);
  892. return 0;
  893. }
  894. AVCodec huffyuv_decoder = {
  895. "huffyuv",
  896. CODEC_TYPE_VIDEO,
  897. CODEC_ID_HUFFYUV,
  898. sizeof(HYuvContext),
  899. decode_init,
  900. NULL,
  901. decode_end,
  902. decode_frame,
  903. CODEC_CAP_DR1 | CODEC_CAP_DRAW_HORIZ_BAND,
  904. NULL
  905. };
  906. AVCodec huffyuv_encoder = {
  907. "huffyuv",
  908. CODEC_TYPE_VIDEO,
  909. CODEC_ID_HUFFYUV,
  910. sizeof(HYuvContext),
  911. encode_init,
  912. encode_frame,
  913. encode_end,
  914. };