You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

979 lines
31KB

  1. /*
  2. * huffyuv codec for libavcodec
  3. *
  4. * Copyright (c) 2002 Michael Niedermayer <michaelni@gmx.at>
  5. *
  6. * This library is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2 of the License, or (at your option) any later version.
  10. *
  11. * This library is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with this library; if not, write to the Free Software
  18. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  19. *
  20. * see http://www.pcisys.net/~melanson/codecs/huffyuv.txt for a description of
  21. * the algorithm used
  22. */
  23. #include "common.h"
  24. #include "avcodec.h"
  25. #include "dsputil.h"
  26. #ifndef MAX_INT64
  27. #define MAX_INT64 9223372036854775807LL
  28. #endif
  29. #define VLC_BITS 11
  30. typedef enum Predictor{
  31. LEFT= 0,
  32. PLANE,
  33. MEDIAN,
  34. } Predictor;
  35. typedef struct HYuvContext{
  36. AVCodecContext *avctx;
  37. Predictor predictor;
  38. GetBitContext gb;
  39. PutBitContext pb;
  40. int interlaced;
  41. int decorrelate;
  42. int bitstream_bpp;
  43. int version;
  44. int yuy2; //use yuy2 instead of 422P
  45. int bgr32; //use bgr32 instead of bgr24
  46. int width, height;
  47. int flags;
  48. int picture_number;
  49. int linesize[3];
  50. uint8_t __align8 temp[3][2500];
  51. uint64_t stats[3][256];
  52. uint8_t len[3][256];
  53. uint32_t bits[3][256];
  54. VLC vlc[3];
  55. uint8_t __align8 *picture[3];
  56. uint8_t __align8 bitstream_buffer[1024*1024*3]; //FIXME dynamic alloc or some other solution
  57. DSPContext dsp;
  58. }HYuvContext;
  59. static inline void bswap_buf(uint32_t *dst, uint32_t *src, int w){
  60. int i;
  61. for(i=0; i+8<=w; i+=8){
  62. dst[i+0]= bswap_32(src[i+0]);
  63. dst[i+1]= bswap_32(src[i+1]);
  64. dst[i+2]= bswap_32(src[i+2]);
  65. dst[i+3]= bswap_32(src[i+3]);
  66. dst[i+4]= bswap_32(src[i+4]);
  67. dst[i+5]= bswap_32(src[i+5]);
  68. dst[i+6]= bswap_32(src[i+6]);
  69. dst[i+7]= bswap_32(src[i+7]);
  70. }
  71. for(;i<w; i++){
  72. dst[i+0]= bswap_32(src[i+0]);
  73. }
  74. }
  75. static inline int add_left_prediction(uint8_t *dst, uint8_t *src, int w, int acc){
  76. int i;
  77. for(i=0; i<w-1; i++){
  78. acc+= src[i];
  79. dst[i]= acc;
  80. i++;
  81. acc+= src[i];
  82. dst[i]= acc;
  83. }
  84. for(; i<w; i++){
  85. acc+= src[i];
  86. dst[i]= acc;
  87. }
  88. return acc;
  89. }
  90. static inline void add_median_prediction(uint8_t *dst, uint8_t *src1, uint8_t *diff, int w, int *left, int *left_top){
  91. int i;
  92. uint8_t l, lt;
  93. l= *left;
  94. lt= *left_top;
  95. for(i=0; i<w; i++){
  96. l= mid_pred(l, src1[i], (l + src1[i] - lt)&0xFF) + diff[i];
  97. lt= src1[i];
  98. dst[i]= l;
  99. }
  100. *left= l;
  101. *left_top= lt;
  102. }
  103. //FIXME optimize
  104. static inline void sub_median_prediction(uint8_t *dst, uint8_t *src1, uint8_t *src2, int w, int *left, int *left_top){
  105. int i;
  106. uint8_t l, lt;
  107. l= *left;
  108. lt= *left_top;
  109. for(i=0; i<w; i++){
  110. const int pred= mid_pred(l, src1[i], (l + src1[i] - lt)&0xFF);
  111. lt= src1[i];
  112. l= src2[i];
  113. dst[i]= l - pred;
  114. }
  115. *left= l;
  116. *left_top= lt;
  117. }
  118. static inline void add_left_prediction_bgr32(uint8_t *dst, uint8_t *src, int w, int *red, int *green, int *blue){
  119. int i;
  120. int r,g,b;
  121. r= *red;
  122. g= *green;
  123. b= *blue;
  124. for(i=0; i<w; i++){
  125. b+= src[4*i+0];
  126. g+= src[4*i+1];
  127. r+= src[4*i+2];
  128. dst[4*i+0]= b;
  129. dst[4*i+1]= g;
  130. dst[4*i+2]= r;
  131. }
  132. *red= r;
  133. *green= g;
  134. *blue= b;
  135. }
  136. //FIXME optimize
  137. static inline int sub_left_prediction(uint8_t *dst, uint8_t *src, int w, int left){
  138. int i;
  139. for(i=0; i<w; i++){
  140. const int temp= src[i];
  141. dst[i]= temp - left;
  142. left= temp;
  143. }
  144. return left;
  145. }
  146. static void read_len_table(uint8_t *dst, GetBitContext *gb){
  147. int i, val, repeat;
  148. for(i=0; i<256;){
  149. repeat= get_bits(gb, 3);
  150. val = get_bits(gb, 5);
  151. if(repeat==0)
  152. repeat= get_bits(gb, 8);
  153. //printf("%d %d\n", val, repeat);
  154. while (repeat--)
  155. dst[i++] = val;
  156. }
  157. }
  158. static int generate_bits_table(uint32_t *dst, uint8_t *len_table){
  159. int len, index;
  160. uint32_t bits=0;
  161. for(len=32; len>0; len--){
  162. int bit= 1<<(32-len);
  163. for(index=0; index<256; index++){
  164. if(len_table[index]==len){
  165. if(bits & (bit-1)){
  166. fprintf(stderr, "Error generating huffman table\n");
  167. return -1;
  168. }
  169. dst[index]= bits>>(32-len);
  170. bits+= bit;
  171. }
  172. }
  173. }
  174. return 0;
  175. }
  176. static void generate_len_table(uint8_t *dst, uint64_t *stats, int size){
  177. uint64_t counts[2*size];
  178. int up[2*size];
  179. int offset, i, next;
  180. for(offset=1; ; offset<<=1){
  181. for(i=0; i<size; i++){
  182. counts[i]= stats[i] + offset - 1;
  183. }
  184. for(next=size; next<size*2; next++){
  185. uint64_t min1, min2;
  186. int min1_i, min2_i;
  187. min1=min2= INT64_MAX;
  188. min1_i= min2_i=-1;
  189. for(i=0; i<next; i++){
  190. if(min2 > counts[i]){
  191. if(min1 > counts[i]){
  192. min2= min1;
  193. min2_i= min1_i;
  194. min1= counts[i];
  195. min1_i= i;
  196. }else{
  197. min2= counts[i];
  198. min2_i= i;
  199. }
  200. }
  201. }
  202. if(min2==INT64_MAX) break;
  203. counts[next]= min1 + min2;
  204. counts[min1_i]=
  205. counts[min2_i]= MAX_INT64;
  206. up[min1_i]=
  207. up[min2_i]= next;
  208. up[next]= -1;
  209. }
  210. for(i=0; i<size; i++){
  211. int len;
  212. int index=i;
  213. for(len=0; up[index] != -1; len++)
  214. index= up[index];
  215. if(len > 32) break;
  216. dst[i]= len;
  217. }
  218. if(i==size) break;
  219. }
  220. }
  221. static int read_huffman_tables(HYuvContext *s, uint8_t *src, int length){
  222. GetBitContext gb;
  223. int i;
  224. init_get_bits(&gb, src, length);
  225. for(i=0; i<3; i++){
  226. read_len_table(s->len[i], &gb);
  227. if(generate_bits_table(s->bits[i], s->len[i])<0){
  228. return -1;
  229. }
  230. #if 0
  231. for(j=0; j<256; j++){
  232. printf("%6X, %2d, %3d\n", s->bits[i][j], s->len[i][j], j);
  233. }
  234. #endif
  235. init_vlc(&s->vlc[i], VLC_BITS, 256, s->len[i], 1, 1, s->bits[i], 4, 4);
  236. }
  237. return 0;
  238. }
  239. static int read_old_huffman_tables(HYuvContext *s){
  240. #if 0
  241. GetBitContext gb;
  242. int i;
  243. init_get_bits(&gb, classic_shift_luma, sizeof(classic_shift_luma));
  244. read_len_table(s->len[0], &gb);
  245. init_get_bits(&gb, classic_shift_chroma, sizeof(classic_shift_chroma));
  246. read_len_table(s->len[1], &gb);
  247. for(i=0; i<256; i++) s->bits[0][i] = classic_add_luma [i];
  248. for(i=0; i<256; i++) s->bits[1][i] = classic_add_chroma[i];
  249. if(s->bitstream_bpp >= 24){
  250. memcpy(s->bits[1], s->bits[0], 256*sizeof(uint32_t));
  251. memcpy(s->len[1] , s->len [0], 256*sizeof(uint8_t));
  252. }
  253. memcpy(s->bits[2], s->bits[1], 256*sizeof(uint32_t));
  254. memcpy(s->len[2] , s->len [1], 256*sizeof(uint8_t));
  255. for(i=0; i<3; i++)
  256. init_vlc(&s->vlc[i], VLC_BITS, 256, s->len[i], 1, 1, s->bits[i], 4, 4);
  257. return 0;
  258. #else
  259. fprintf(stderr, "v1 huffyuv is not supported \n");
  260. return -1;
  261. #endif
  262. }
  263. static int decode_init(AVCodecContext *avctx)
  264. {
  265. HYuvContext *s = avctx->priv_data;
  266. int width, height, y_size, c_size, stride;
  267. s->avctx= avctx;
  268. s->flags= avctx->flags;
  269. dsputil_init(&s->dsp, avctx->dsp_mask);
  270. width= s->width= avctx->width;
  271. height= s->height= avctx->height;
  272. s->bgr32=1;
  273. assert(width && height);
  274. //if(avctx->extradata)
  275. // printf("extradata:%X, extradata_size:%d\n", *(uint32_t*)avctx->extradata, avctx->extradata_size);
  276. if(avctx->extradata_size){
  277. if(avctx->bits_per_sample&7)
  278. s->version=1; // do such files exist at all?
  279. else
  280. s->version=2;
  281. }else
  282. s->version=0;
  283. if(s->version==2){
  284. int method;
  285. method= ((uint8_t*)avctx->extradata)[0];
  286. s->decorrelate= method&64 ? 1 : 0;
  287. s->predictor= method&63;
  288. s->bitstream_bpp= ((uint8_t*)avctx->extradata)[1];
  289. if(s->bitstream_bpp==0)
  290. s->bitstream_bpp= avctx->bits_per_sample&~7;
  291. if(read_huffman_tables(s, ((uint8_t*)avctx->extradata)+4, avctx->extradata_size) < 0)
  292. return -1;
  293. }else{
  294. switch(avctx->bits_per_sample&7){
  295. case 1:
  296. s->predictor= LEFT;
  297. s->decorrelate= 0;
  298. break;
  299. case 2:
  300. s->predictor= LEFT;
  301. s->decorrelate= 1;
  302. break;
  303. case 3:
  304. s->predictor= PLANE;
  305. s->decorrelate= avctx->bits_per_sample >= 24;
  306. break;
  307. case 4:
  308. s->predictor= MEDIAN;
  309. s->decorrelate= 0;
  310. break;
  311. default:
  312. s->predictor= LEFT; //OLD
  313. s->decorrelate= 0;
  314. break;
  315. }
  316. s->bitstream_bpp= avctx->bits_per_sample & ~7;
  317. if(read_old_huffman_tables(s) < 0)
  318. return -1;
  319. }
  320. s->interlaced= height > 288;
  321. c_size= 0;
  322. switch(s->bitstream_bpp){
  323. case 12:
  324. avctx->pix_fmt = PIX_FMT_YUV420P;
  325. stride= (width+15)&~15;
  326. c_size= height*stride/4;
  327. break;
  328. case 16:
  329. if(s->yuy2){
  330. avctx->pix_fmt = PIX_FMT_YUV422;
  331. stride= (width*2+15)&~15;
  332. }else{
  333. avctx->pix_fmt = PIX_FMT_YUV422P;
  334. stride= (width+15)&~15;
  335. c_size= height*stride/2;
  336. }
  337. break;
  338. case 24:
  339. case 32:
  340. if(s->bgr32){
  341. avctx->pix_fmt = PIX_FMT_BGRA32;
  342. stride= (width*4+15)&~15;
  343. }else{
  344. avctx->pix_fmt = PIX_FMT_BGR24;
  345. stride= (width*3+15)&~15;
  346. }
  347. break;
  348. default:
  349. assert(0);
  350. stride=0; //gcc fix
  351. }
  352. y_size= height*stride;
  353. s->linesize[0]= stride;
  354. s->picture[0]= av_mallocz(y_size);
  355. if(c_size){
  356. s->picture[1]= av_mallocz(c_size);
  357. s->picture[2]= av_mallocz(c_size);
  358. s->linesize[1]= s->linesize[2]= stride/2;
  359. memset(s->picture[1], 128, c_size);
  360. memset(s->picture[2], 128, c_size);
  361. }
  362. // printf("pred:%d bpp:%d hbpp:%d il:%d\n", s->predictor, s->bitstream_bpp, avctx->bits_per_sample, s->interlaced);
  363. return 0;
  364. }
  365. static void store_table(HYuvContext *s, uint8_t *len){
  366. int i;
  367. int index= s->avctx->extradata_size;
  368. for(i=0; i<256;){
  369. int cur=i;
  370. int val= len[i];
  371. int repeat;
  372. for(; i<256 && len[i]==val; i++);
  373. repeat= i - cur;
  374. if(repeat>7){
  375. ((uint8_t*)s->avctx->extradata)[index++]= val;
  376. ((uint8_t*)s->avctx->extradata)[index++]= repeat;
  377. }else{
  378. ((uint8_t*)s->avctx->extradata)[index++]= val | (repeat<<5);
  379. }
  380. }
  381. s->avctx->extradata_size= index;
  382. }
  383. static int encode_init(AVCodecContext *avctx)
  384. {
  385. HYuvContext *s = avctx->priv_data;
  386. int i, j, width, height;
  387. s->avctx= avctx;
  388. s->flags= avctx->flags;
  389. dsputil_init(&s->dsp, avctx->dsp_mask);
  390. width= s->width= avctx->width;
  391. height= s->height= avctx->height;
  392. assert(width && height);
  393. avctx->extradata= av_mallocz(1024*10);
  394. avctx->stats_out= av_mallocz(1024*10);
  395. s->version=2;
  396. switch(avctx->pix_fmt){
  397. case PIX_FMT_YUV422P:
  398. s->bitstream_bpp= 16;
  399. break;
  400. default:
  401. fprintf(stderr, "format not supported\n");
  402. return -1;
  403. }
  404. avctx->bits_per_sample= s->bitstream_bpp;
  405. s->decorrelate= s->bitstream_bpp >= 24;
  406. s->predictor= avctx->prediction_method;
  407. ((uint8_t*)avctx->extradata)[0]= s->predictor;
  408. ((uint8_t*)avctx->extradata)[1]= s->bitstream_bpp;
  409. ((uint8_t*)avctx->extradata)[2]=
  410. ((uint8_t*)avctx->extradata)[3]= 0;
  411. s->avctx->extradata_size= 4;
  412. if(avctx->stats_in){
  413. char *p= avctx->stats_in;
  414. for(i=0; i<3; i++)
  415. for(j=0; j<256; j++)
  416. s->stats[i][j]= 1;
  417. for(;;){
  418. for(i=0; i<3; i++){
  419. char *next;
  420. for(j=0; j<256; j++){
  421. s->stats[i][j]+= strtol(p, &next, 0);
  422. if(next==p) return -1;
  423. p=next;
  424. }
  425. }
  426. if(p[0]==0 || p[1]==0 || p[2]==0) break;
  427. }
  428. }else{
  429. for(i=0; i<3; i++)
  430. for(j=0; j<256; j++){
  431. int d= FFMIN(j, 256-j);
  432. s->stats[i][j]= 100000000/(d+1);
  433. }
  434. }
  435. for(i=0; i<3; i++){
  436. generate_len_table(s->len[i], s->stats[i], 256);
  437. if(generate_bits_table(s->bits[i], s->len[i])<0){
  438. return -1;
  439. }
  440. store_table(s, s->len[i]);
  441. }
  442. for(i=0; i<3; i++)
  443. for(j=0; j<256; j++)
  444. s->stats[i][j]= 0;
  445. s->interlaced= height > 288;
  446. // printf("pred:%d bpp:%d hbpp:%d il:%d\n", s->predictor, s->bitstream_bpp, avctx->bits_per_sample, s->interlaced);
  447. s->picture_number=0;
  448. return 0;
  449. }
  450. static void decode_422_bitstream(HYuvContext *s, int count){
  451. int i;
  452. count/=2;
  453. for(i=0; i<count; i++){
  454. s->temp[0][2*i ]= get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3);
  455. s->temp[1][ i ]= get_vlc2(&s->gb, s->vlc[1].table, VLC_BITS, 3);
  456. s->temp[0][2*i+1]= get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3);
  457. s->temp[2][ i ]= get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3);
  458. }
  459. }
  460. static void encode_422_bitstream(HYuvContext *s, int count){
  461. int i;
  462. count/=2;
  463. if(s->flags&CODEC_FLAG_PASS1){
  464. for(i=0; i<count; i++){
  465. s->stats[0][ s->temp[0][2*i ] ]++;
  466. s->stats[1][ s->temp[1][ i ] ]++;
  467. s->stats[0][ s->temp[0][2*i+1] ]++;
  468. s->stats[2][ s->temp[2][ i ] ]++;
  469. }
  470. }else{
  471. for(i=0; i<count; i++){
  472. put_bits(&s->pb, s->len[0][ s->temp[0][2*i ] ], s->bits[0][ s->temp[0][2*i ] ]);
  473. put_bits(&s->pb, s->len[1][ s->temp[1][ i ] ], s->bits[1][ s->temp[1][ i ] ]);
  474. put_bits(&s->pb, s->len[0][ s->temp[0][2*i+1] ], s->bits[0][ s->temp[0][2*i+1] ]);
  475. put_bits(&s->pb, s->len[2][ s->temp[2][ i ] ], s->bits[2][ s->temp[2][ i ] ]);
  476. }
  477. }
  478. }
  479. static void decode_bgr_bitstream(HYuvContext *s, int count){
  480. int i;
  481. if(s->decorrelate){
  482. if(s->bitstream_bpp==24){
  483. for(i=0; i<count; i++){
  484. s->temp[0][4*i+1]= get_vlc2(&s->gb, s->vlc[1].table, VLC_BITS, 3);
  485. s->temp[0][4*i ]= get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3) + s->temp[0][4*i+1];
  486. s->temp[0][4*i+2]= get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3) + s->temp[0][4*i+1];
  487. }
  488. }else{
  489. for(i=0; i<count; i++){
  490. s->temp[0][4*i+1]= get_vlc2(&s->gb, s->vlc[1].table, VLC_BITS, 3);
  491. s->temp[0][4*i ]= get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3) + s->temp[0][4*i+1];
  492. s->temp[0][4*i+2]= get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3) + s->temp[0][4*i+1];
  493. get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3); //?!
  494. }
  495. }
  496. }else{
  497. if(s->bitstream_bpp==24){
  498. for(i=0; i<count; i++){
  499. s->temp[0][4*i ]= get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3);
  500. s->temp[0][4*i+1]= get_vlc2(&s->gb, s->vlc[1].table, VLC_BITS, 3);
  501. s->temp[0][4*i+2]= get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3);
  502. }
  503. }else{
  504. for(i=0; i<count; i++){
  505. s->temp[0][4*i ]= get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3);
  506. s->temp[0][4*i+1]= get_vlc2(&s->gb, s->vlc[1].table, VLC_BITS, 3);
  507. s->temp[0][4*i+2]= get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3);
  508. get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3); //?!
  509. }
  510. }
  511. }
  512. }
  513. static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, uint8_t *buf, int buf_size){
  514. HYuvContext *s = avctx->priv_data;
  515. const int width= s->width;
  516. const int width2= s->width>>1;
  517. const int height= s->height;
  518. const int fake_ystride= s->interlaced ? s->linesize[0]*2 : s->linesize[0];
  519. const int fake_ustride= s->interlaced ? s->linesize[1]*2 : s->linesize[1];
  520. const int fake_vstride= s->interlaced ? s->linesize[2]*2 : s->linesize[2];
  521. int i;
  522. AVPicture *picture = data;
  523. *data_size = 0;
  524. /* no supplementary picture */
  525. if (buf_size == 0)
  526. return 0;
  527. bswap_buf((uint32_t*)s->bitstream_buffer, (uint32_t*)buf, buf_size/4);
  528. init_get_bits(&s->gb, s->bitstream_buffer, buf_size);
  529. if(s->bitstream_bpp<24){
  530. int y;
  531. int lefty, leftu, leftv;
  532. int lefttopy, lefttopu, lefttopv;
  533. if(s->yuy2){
  534. s->picture[0][3]= get_bits(&s->gb, 8);
  535. s->picture[0][2]= get_bits(&s->gb, 8);
  536. s->picture[0][1]= get_bits(&s->gb, 8);
  537. s->picture[0][0]= get_bits(&s->gb, 8);
  538. fprintf(stderr, "YUY2 output isnt implemenetd yet\n");
  539. return -1;
  540. }else{
  541. leftv= s->picture[2][0]= get_bits(&s->gb, 8);
  542. lefty= s->picture[0][1]= get_bits(&s->gb, 8);
  543. leftu= s->picture[1][0]= get_bits(&s->gb, 8);
  544. s->picture[0][0]= get_bits(&s->gb, 8);
  545. switch(s->predictor){
  546. case LEFT:
  547. case PLANE:
  548. decode_422_bitstream(s, width-2);
  549. lefty= add_left_prediction(s->picture[0] + 2, s->temp[0], width-2, lefty);
  550. if(!(s->flags&CODEC_FLAG_GRAY)){
  551. leftu= add_left_prediction(s->picture[1] + 1, s->temp[1], width2-1, leftu);
  552. leftv= add_left_prediction(s->picture[2] + 1, s->temp[2], width2-1, leftv);
  553. }
  554. for(y=1; y<s->height; y++){
  555. uint8_t *ydst, *udst, *vdst;
  556. decode_422_bitstream(s, width);
  557. ydst= s->picture[0] + s->linesize[0]*y;
  558. udst= s->picture[1] + s->linesize[1]*y;
  559. vdst= s->picture[2] + s->linesize[2]*y;
  560. lefty= add_left_prediction(ydst, s->temp[0], width, lefty);
  561. if(!(s->flags&CODEC_FLAG_GRAY)){
  562. leftu= add_left_prediction(udst, s->temp[1], width2, leftu);
  563. leftv= add_left_prediction(vdst, s->temp[2], width2, leftv);
  564. }
  565. if(s->predictor == PLANE){
  566. if(y>s->interlaced){
  567. s->dsp.add_bytes(ydst, ydst - fake_ystride, width);
  568. if(!(s->flags&CODEC_FLAG_GRAY)){
  569. s->dsp.add_bytes(udst, udst - fake_ustride, width2);
  570. s->dsp.add_bytes(vdst, vdst - fake_vstride, width2);
  571. }
  572. }
  573. }
  574. }
  575. break;
  576. case MEDIAN:
  577. /* first line except first 2 pixels is left predicted */
  578. decode_422_bitstream(s, width-2);
  579. lefty= add_left_prediction(s->picture[0] + 2, s->temp[0], width-2, lefty);
  580. if(!(s->flags&CODEC_FLAG_GRAY)){
  581. leftu= add_left_prediction(s->picture[1] + 1, s->temp[1], width2-1, leftu);
  582. leftv= add_left_prediction(s->picture[2] + 1, s->temp[2], width2-1, leftv);
  583. }
  584. y=1;
  585. /* second line is left predicted for interlaced case */
  586. if(s->interlaced){
  587. decode_422_bitstream(s, width);
  588. lefty= add_left_prediction(s->picture[0] + s->linesize[0], s->temp[0], width, lefty);
  589. if(!(s->flags&CODEC_FLAG_GRAY)){
  590. leftu= add_left_prediction(s->picture[1] + s->linesize[2], s->temp[1], width2, leftu);
  591. leftv= add_left_prediction(s->picture[2] + s->linesize[1], s->temp[2], width2, leftv);
  592. }
  593. y++;
  594. }
  595. /* next 4 pixels are left predicted too */
  596. decode_422_bitstream(s, 4);
  597. lefty= add_left_prediction(s->picture[0] + fake_ystride, s->temp[0], 4, lefty);
  598. if(!(s->flags&CODEC_FLAG_GRAY)){
  599. leftu= add_left_prediction(s->picture[1] + fake_ustride, s->temp[1], 2, leftu);
  600. leftv= add_left_prediction(s->picture[2] + fake_vstride, s->temp[2], 2, leftv);
  601. }
  602. /* next line except the first 4 pixels is median predicted */
  603. lefttopy= s->picture[0][3];
  604. decode_422_bitstream(s, width-4);
  605. add_median_prediction(s->picture[0] + fake_ystride+4, s->picture[0]+4, s->temp[0], width-4, &lefty, &lefttopy);
  606. if(!(s->flags&CODEC_FLAG_GRAY)){
  607. lefttopu= s->picture[1][1];
  608. lefttopv= s->picture[2][1];
  609. add_median_prediction(s->picture[1] + fake_ustride+2, s->picture[1]+2, s->temp[1], width2-2, &leftu, &lefttopu);
  610. add_median_prediction(s->picture[2] + fake_vstride+2, s->picture[2]+2, s->temp[2], width2-2, &leftv, &lefttopv);
  611. }
  612. y++;
  613. for(; y<height; y++){
  614. uint8_t *ydst, *udst, *vdst;
  615. decode_422_bitstream(s, width);
  616. ydst= s->picture[0] + s->linesize[0]*y;
  617. udst= s->picture[1] + s->linesize[1]*y;
  618. vdst= s->picture[2] + s->linesize[2]*y;
  619. add_median_prediction(ydst, ydst - fake_ystride, s->temp[0], width, &lefty, &lefttopy);
  620. if(!(s->flags&CODEC_FLAG_GRAY)){
  621. add_median_prediction(udst, udst - fake_ustride, s->temp[1], width2, &leftu, &lefttopu);
  622. add_median_prediction(vdst, vdst - fake_vstride, s->temp[2], width2, &leftv, &lefttopv);
  623. }
  624. }
  625. break;
  626. }
  627. }
  628. }else{
  629. int y;
  630. int leftr, leftg, leftb;
  631. const int last_line= (height-1)*s->linesize[0];
  632. if(s->bitstream_bpp==32){
  633. s->picture[0][last_line+3]= get_bits(&s->gb, 8);
  634. leftr= s->picture[0][last_line+2]= get_bits(&s->gb, 8);
  635. leftg= s->picture[0][last_line+1]= get_bits(&s->gb, 8);
  636. leftb= s->picture[0][last_line+0]= get_bits(&s->gb, 8);
  637. }else{
  638. leftr= s->picture[0][last_line+2]= get_bits(&s->gb, 8);
  639. leftg= s->picture[0][last_line+1]= get_bits(&s->gb, 8);
  640. leftb= s->picture[0][last_line+0]= get_bits(&s->gb, 8);
  641. skip_bits(&s->gb, 8);
  642. }
  643. if(s->bgr32){
  644. switch(s->predictor){
  645. case LEFT:
  646. case PLANE:
  647. decode_bgr_bitstream(s, width-1);
  648. add_left_prediction_bgr32(s->picture[0] + last_line+4, s->temp[0], width-1, &leftr, &leftg, &leftb);
  649. for(y=s->height-2; y>=0; y--){ //yes its stored upside down
  650. decode_bgr_bitstream(s, width);
  651. add_left_prediction_bgr32(s->picture[0] + s->linesize[0]*y, s->temp[0], width, &leftr, &leftg, &leftb);
  652. if(s->predictor == PLANE){
  653. if((y&s->interlaced)==0){
  654. s->dsp.add_bytes(s->picture[0] + s->linesize[0]*y,
  655. s->picture[0] + s->linesize[0]*y + fake_ystride, fake_ystride);
  656. }
  657. }
  658. }
  659. break;
  660. default:
  661. fprintf(stderr, "prediction type not supported!\n");
  662. }
  663. }else{
  664. fprintf(stderr, "BGR24 output isnt implemenetd yet\n");
  665. return -1;
  666. }
  667. }
  668. emms_c();
  669. for(i=0;i<3;i++) {
  670. picture->data[i] = s->picture[i];
  671. picture->linesize[i]= s->linesize[i];
  672. }
  673. *data_size = sizeof(AVPicture);
  674. return (get_bits_count(&s->gb)+7)>>3;
  675. }
  676. static int decode_end(AVCodecContext *avctx)
  677. {
  678. HYuvContext *s = avctx->priv_data;
  679. int i;
  680. for(i=0; i<3; i++){
  681. av_freep(&s->picture[i]);
  682. free_vlc(&s->vlc[i]);
  683. }
  684. return 0;
  685. }
  686. static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size, void *data){
  687. HYuvContext *s = avctx->priv_data;
  688. AVPicture *pict = data;
  689. const int width= s->width;
  690. const int width2= s->width>>1;
  691. const int height= s->height;
  692. const int fake_ystride= s->interlaced ? pict->linesize[0]*2 : pict->linesize[0];
  693. const int fake_ustride= s->interlaced ? pict->linesize[1]*2 : pict->linesize[1];
  694. const int fake_vstride= s->interlaced ? pict->linesize[2]*2 : pict->linesize[2];
  695. int i, size;
  696. init_put_bits(&s->pb, buf, buf_size, NULL, NULL);
  697. for(i=0; i<3; i++){
  698. s->picture[i]= pict->data[i];
  699. s->linesize[i]= pict->linesize[i];
  700. }
  701. if(avctx->pix_fmt == PIX_FMT_YUV422P){
  702. int lefty, leftu, leftv, y;
  703. put_bits(&s->pb, 8, leftv= s->picture[2][0]);
  704. put_bits(&s->pb, 8, lefty= s->picture[0][1]);
  705. put_bits(&s->pb, 8, leftu= s->picture[1][0]);
  706. put_bits(&s->pb, 8, s->picture[0][0]);
  707. lefty= sub_left_prediction(s->temp[0], s->picture[0]+2, width-2 , lefty);
  708. leftu= sub_left_prediction(s->temp[1], s->picture[1]+1, width2-1, leftu);
  709. leftv= sub_left_prediction(s->temp[2], s->picture[2]+1, width2-1, leftv);
  710. encode_422_bitstream(s, width-2);
  711. if(s->predictor==MEDIAN){
  712. int lefttopy, lefttopu, lefttopv;
  713. y=1;
  714. if(s->interlaced){
  715. lefty= sub_left_prediction(s->temp[0], s->picture[0]+s->linesize[0], width , lefty);
  716. leftu= sub_left_prediction(s->temp[1], s->picture[1]+s->linesize[1], width2, leftu);
  717. leftv= sub_left_prediction(s->temp[2], s->picture[2]+s->linesize[2], width2, leftv);
  718. encode_422_bitstream(s, width);
  719. y++;
  720. }
  721. lefty= sub_left_prediction(s->temp[0], s->picture[0]+fake_ystride, 4, lefty);
  722. leftu= sub_left_prediction(s->temp[1], s->picture[1]+fake_ystride, 2, leftu);
  723. leftv= sub_left_prediction(s->temp[2], s->picture[2]+fake_ystride, 2, leftv);
  724. encode_422_bitstream(s, 4);
  725. lefttopy= s->picture[0][3];
  726. lefttopu= s->picture[1][1];
  727. lefttopv= s->picture[2][1];
  728. sub_median_prediction(s->temp[0], s->picture[0]+4, s->picture[0] + fake_ystride+4, width-4 , &lefty, &lefttopy);
  729. sub_median_prediction(s->temp[1], s->picture[1]+2, s->picture[1] + fake_ustride+2, width2-2, &leftu, &lefttopu);
  730. sub_median_prediction(s->temp[2], s->picture[2]+2, s->picture[2] + fake_vstride+2, width2-2, &leftv, &lefttopv);
  731. encode_422_bitstream(s, width-4);
  732. y++;
  733. for(; y<height; y++){
  734. uint8_t *ydst, *udst, *vdst;
  735. ydst= s->picture[0] + s->linesize[0]*y;
  736. udst= s->picture[1] + s->linesize[1]*y;
  737. vdst= s->picture[2] + s->linesize[2]*y;
  738. sub_median_prediction(s->temp[0], ydst - fake_ystride, ydst, width , &lefty, &lefttopy);
  739. sub_median_prediction(s->temp[1], udst - fake_ustride, udst, width2, &leftu, &lefttopu);
  740. sub_median_prediction(s->temp[2], vdst - fake_vstride, vdst, width2, &leftv, &lefttopv);
  741. encode_422_bitstream(s, width);
  742. }
  743. }else{
  744. for(y=1; y<height; y++){
  745. uint8_t *ydst, *udst, *vdst;
  746. ydst= s->picture[0] + s->linesize[0]*y;
  747. udst= s->picture[1] + s->linesize[1]*y;
  748. vdst= s->picture[2] + s->linesize[2]*y;
  749. if(s->predictor == PLANE && s->interlaced < y){
  750. s->dsp.diff_bytes(s->temp[0], ydst, ydst - fake_ystride, width);
  751. s->dsp.diff_bytes(s->temp[1], udst, udst - fake_ustride, width2);
  752. s->dsp.diff_bytes(s->temp[2], vdst, vdst - fake_vstride, width2);
  753. lefty= sub_left_prediction(s->temp[0], s->temp[0], width , lefty);
  754. leftu= sub_left_prediction(s->temp[1], s->temp[1], width2, leftu);
  755. leftv= sub_left_prediction(s->temp[2], s->temp[2], width2, leftv);
  756. }else{
  757. lefty= sub_left_prediction(s->temp[0], ydst, width , lefty);
  758. leftu= sub_left_prediction(s->temp[1], udst, width2, leftu);
  759. leftv= sub_left_prediction(s->temp[2], vdst, width2, leftv);
  760. }
  761. encode_422_bitstream(s, width);
  762. }
  763. }
  764. }else{
  765. fprintf(stderr, "Format not supported!\n");
  766. }
  767. emms_c();
  768. size= (get_bit_count(&s->pb)+31)/32;
  769. if((s->flags&CODEC_FLAG_PASS1) && (s->picture_number&31)==0){
  770. int j;
  771. char *p= avctx->stats_out;
  772. for(i=0; i<3; i++){
  773. for(j=0; j<256; j++){
  774. sprintf(p, "%Ld ", s->stats[i][j]);
  775. p+= strlen(p);
  776. s->stats[i][j]= 0;
  777. }
  778. sprintf(p, "\n");
  779. p++;
  780. }
  781. }else{
  782. bswap_buf((uint32_t*)buf, (uint32_t*)buf, size);
  783. }
  784. avctx->key_frame= 1;
  785. avctx->pict_type= I_TYPE;
  786. s->picture_number++;
  787. return size*4;
  788. }
  789. static int encode_end(AVCodecContext *avctx)
  790. {
  791. // HYuvContext *s = avctx->priv_data;
  792. av_freep(&avctx->extradata);
  793. av_freep(&avctx->stats_out);
  794. return 0;
  795. }
  796. AVCodec huffyuv_decoder = {
  797. "huffyuv",
  798. CODEC_TYPE_VIDEO,
  799. CODEC_ID_HUFFYUV,
  800. sizeof(HYuvContext),
  801. decode_init,
  802. NULL,
  803. decode_end,
  804. decode_frame,
  805. 0,
  806. NULL
  807. };
  808. AVCodec huffyuv_encoder = {
  809. "huffyuv",
  810. CODEC_TYPE_VIDEO,
  811. CODEC_ID_HUFFYUV,
  812. sizeof(HYuvContext),
  813. encode_init,
  814. encode_frame,
  815. encode_end,
  816. };