You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1131 lines
36KB

  1. /*
  2. * huffyuv codec for libavcodec
  3. *
  4. * Copyright (c) 2002 Michael Niedermayer <michaelni@gmx.at>
  5. *
  6. * This library is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2 of the License, or (at your option) any later version.
  10. *
  11. * This library is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with this library; if not, write to the Free Software
  18. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  19. *
  20. * see http://www.pcisys.net/~melanson/codecs/huffyuv.txt for a description of
  21. * the algorithm used
  22. */
  23. #include "common.h"
  24. #include "avcodec.h"
  25. #include "dsputil.h"
  26. #ifndef INT64_MAX
  27. #define INT64_MAX 9223372036854775807LL
  28. #endif
  29. #define VLC_BITS 11
  30. typedef enum Predictor{
  31. LEFT= 0,
  32. PLANE,
  33. MEDIAN,
  34. } Predictor;
  35. typedef struct HYuvContext{
  36. AVCodecContext *avctx;
  37. Predictor predictor;
  38. GetBitContext gb;
  39. PutBitContext pb;
  40. int interlaced;
  41. int decorrelate;
  42. int bitstream_bpp;
  43. int version;
  44. int yuy2; //use yuy2 instead of 422P
  45. int bgr32; //use bgr32 instead of bgr24
  46. int width, height;
  47. int flags;
  48. int picture_number;
  49. int last_slice_end;
  50. int linesize[3];
  51. uint8_t __align8 temp[3][2500];
  52. uint64_t stats[3][256];
  53. uint8_t len[3][256];
  54. uint32_t bits[3][256];
  55. VLC vlc[3];
  56. uint8_t __align8 *picture[3];
  57. uint8_t __align8 bitstream_buffer[1024*1024*3]; //FIXME dynamic alloc or some other solution
  58. DSPContext dsp;
  59. }HYuvContext;
  60. static inline void bswap_buf(uint32_t *dst, uint32_t *src, int w){
  61. int i;
  62. for(i=0; i+8<=w; i+=8){
  63. dst[i+0]= bswap_32(src[i+0]);
  64. dst[i+1]= bswap_32(src[i+1]);
  65. dst[i+2]= bswap_32(src[i+2]);
  66. dst[i+3]= bswap_32(src[i+3]);
  67. dst[i+4]= bswap_32(src[i+4]);
  68. dst[i+5]= bswap_32(src[i+5]);
  69. dst[i+6]= bswap_32(src[i+6]);
  70. dst[i+7]= bswap_32(src[i+7]);
  71. }
  72. for(;i<w; i++){
  73. dst[i+0]= bswap_32(src[i+0]);
  74. }
  75. }
  76. static inline int add_left_prediction(uint8_t *dst, uint8_t *src, int w, int acc){
  77. int i;
  78. for(i=0; i<w-1; i++){
  79. acc+= src[i];
  80. dst[i]= acc;
  81. i++;
  82. acc+= src[i];
  83. dst[i]= acc;
  84. }
  85. for(; i<w; i++){
  86. acc+= src[i];
  87. dst[i]= acc;
  88. }
  89. return acc;
  90. }
  91. static inline void add_median_prediction(uint8_t *dst, uint8_t *src1, uint8_t *diff, int w, int *left, int *left_top){
  92. int i;
  93. uint8_t l, lt;
  94. l= *left;
  95. lt= *left_top;
  96. for(i=0; i<w; i++){
  97. l= mid_pred(l, src1[i], (l + src1[i] - lt)&0xFF) + diff[i];
  98. lt= src1[i];
  99. dst[i]= l;
  100. }
  101. *left= l;
  102. *left_top= lt;
  103. }
  104. //FIXME optimize
  105. static inline void sub_median_prediction(uint8_t *dst, uint8_t *src1, uint8_t *src2, int w, int *left, int *left_top){
  106. int i;
  107. uint8_t l, lt;
  108. l= *left;
  109. lt= *left_top;
  110. for(i=0; i<w; i++){
  111. const int pred= mid_pred(l, src1[i], (l + src1[i] - lt)&0xFF);
  112. lt= src1[i];
  113. l= src2[i];
  114. dst[i]= l - pred;
  115. }
  116. *left= l;
  117. *left_top= lt;
  118. }
  119. static inline void add_left_prediction_bgr32(uint8_t *dst, uint8_t *src, int w, int *red, int *green, int *blue){
  120. int i;
  121. int r,g,b;
  122. r= *red;
  123. g= *green;
  124. b= *blue;
  125. for(i=0; i<w; i++){
  126. b+= src[4*i+0];
  127. g+= src[4*i+1];
  128. r+= src[4*i+2];
  129. dst[4*i+0]= b;
  130. dst[4*i+1]= g;
  131. dst[4*i+2]= r;
  132. }
  133. *red= r;
  134. *green= g;
  135. *blue= b;
  136. }
  137. static inline int sub_left_prediction(HYuvContext *s, uint8_t *dst, uint8_t *src, int w, int left){
  138. int i;
  139. if(w<32){
  140. for(i=0; i<w; i++){
  141. const int temp= src[i];
  142. dst[i]= temp - left;
  143. left= temp;
  144. }
  145. return left;
  146. }else{
  147. for(i=0; i<16; i++){
  148. const int temp= src[i];
  149. dst[i]= temp - left;
  150. left= temp;
  151. }
  152. s->dsp.diff_bytes(dst+16, src+16, src+15, w-16);
  153. return src[w-1];
  154. }
  155. }
  156. static void read_len_table(uint8_t *dst, GetBitContext *gb){
  157. int i, val, repeat;
  158. for(i=0; i<256;){
  159. repeat= get_bits(gb, 3);
  160. val = get_bits(gb, 5);
  161. if(repeat==0)
  162. repeat= get_bits(gb, 8);
  163. //printf("%d %d\n", val, repeat);
  164. while (repeat--)
  165. dst[i++] = val;
  166. }
  167. }
  168. static int generate_bits_table(uint32_t *dst, uint8_t *len_table){
  169. int len, index;
  170. uint32_t bits=0;
  171. for(len=32; len>0; len--){
  172. int bit= 1<<(32-len);
  173. for(index=0; index<256; index++){
  174. if(len_table[index]==len){
  175. if(bits & (bit-1)){
  176. fprintf(stderr, "Error generating huffman table\n");
  177. return -1;
  178. }
  179. dst[index]= bits>>(32-len);
  180. bits+= bit;
  181. }
  182. }
  183. }
  184. return 0;
  185. }
  186. static void generate_len_table(uint8_t *dst, uint64_t *stats, int size){
  187. uint64_t counts[2*size];
  188. int up[2*size];
  189. int offset, i, next;
  190. for(offset=1; ; offset<<=1){
  191. for(i=0; i<size; i++){
  192. counts[i]= stats[i] + offset - 1;
  193. }
  194. for(next=size; next<size*2; next++){
  195. uint64_t min1, min2;
  196. int min1_i, min2_i;
  197. min1=min2= INT64_MAX;
  198. min1_i= min2_i=-1;
  199. for(i=0; i<next; i++){
  200. if(min2 > counts[i]){
  201. if(min1 > counts[i]){
  202. min2= min1;
  203. min2_i= min1_i;
  204. min1= counts[i];
  205. min1_i= i;
  206. }else{
  207. min2= counts[i];
  208. min2_i= i;
  209. }
  210. }
  211. }
  212. if(min2==INT64_MAX) break;
  213. counts[next]= min1 + min2;
  214. counts[min1_i]=
  215. counts[min2_i]= INT64_MAX;
  216. up[min1_i]=
  217. up[min2_i]= next;
  218. up[next]= -1;
  219. }
  220. for(i=0; i<size; i++){
  221. int len;
  222. int index=i;
  223. for(len=0; up[index] != -1; len++)
  224. index= up[index];
  225. if(len > 32) break;
  226. dst[i]= len;
  227. }
  228. if(i==size) break;
  229. }
  230. }
  231. static int read_huffman_tables(HYuvContext *s, uint8_t *src, int length){
  232. GetBitContext gb;
  233. int i;
  234. init_get_bits(&gb, src, length);
  235. for(i=0; i<3; i++){
  236. read_len_table(s->len[i], &gb);
  237. if(generate_bits_table(s->bits[i], s->len[i])<0){
  238. return -1;
  239. }
  240. #if 0
  241. for(j=0; j<256; j++){
  242. printf("%6X, %2d, %3d\n", s->bits[i][j], s->len[i][j], j);
  243. }
  244. #endif
  245. init_vlc(&s->vlc[i], VLC_BITS, 256, s->len[i], 1, 1, s->bits[i], 4, 4);
  246. }
  247. return 0;
  248. }
  249. static int read_old_huffman_tables(HYuvContext *s){
  250. #if 0
  251. GetBitContext gb;
  252. int i;
  253. init_get_bits(&gb, classic_shift_luma, sizeof(classic_shift_luma));
  254. read_len_table(s->len[0], &gb);
  255. init_get_bits(&gb, classic_shift_chroma, sizeof(classic_shift_chroma));
  256. read_len_table(s->len[1], &gb);
  257. for(i=0; i<256; i++) s->bits[0][i] = classic_add_luma [i];
  258. for(i=0; i<256; i++) s->bits[1][i] = classic_add_chroma[i];
  259. if(s->bitstream_bpp >= 24){
  260. memcpy(s->bits[1], s->bits[0], 256*sizeof(uint32_t));
  261. memcpy(s->len[1] , s->len [0], 256*sizeof(uint8_t));
  262. }
  263. memcpy(s->bits[2], s->bits[1], 256*sizeof(uint32_t));
  264. memcpy(s->len[2] , s->len [1], 256*sizeof(uint8_t));
  265. for(i=0; i<3; i++)
  266. init_vlc(&s->vlc[i], VLC_BITS, 256, s->len[i], 1, 1, s->bits[i], 4, 4);
  267. return 0;
  268. #else
  269. fprintf(stderr, "v1 huffyuv is not supported \n");
  270. return -1;
  271. #endif
  272. }
  273. static int decode_init(AVCodecContext *avctx)
  274. {
  275. HYuvContext *s = avctx->priv_data;
  276. int width, height, y_size, c_size, stride;
  277. s->avctx= avctx;
  278. s->flags= avctx->flags;
  279. dsputil_init(&s->dsp, avctx->dsp_mask);
  280. width= s->width= avctx->width;
  281. height= s->height= avctx->height;
  282. s->bgr32=1;
  283. assert(width && height);
  284. //if(avctx->extradata)
  285. // printf("extradata:%X, extradata_size:%d\n", *(uint32_t*)avctx->extradata, avctx->extradata_size);
  286. if(avctx->extradata_size){
  287. if((avctx->bits_per_sample&7) && avctx->bits_per_sample != 12)
  288. s->version=1; // do such files exist at all?
  289. else
  290. s->version=2;
  291. }else
  292. s->version=0;
  293. if(s->version==2){
  294. int method;
  295. method= ((uint8_t*)avctx->extradata)[0];
  296. s->decorrelate= method&64 ? 1 : 0;
  297. s->predictor= method&63;
  298. s->bitstream_bpp= ((uint8_t*)avctx->extradata)[1];
  299. if(s->bitstream_bpp==0)
  300. s->bitstream_bpp= avctx->bits_per_sample&~7;
  301. if(read_huffman_tables(s, ((uint8_t*)avctx->extradata)+4, avctx->extradata_size) < 0)
  302. return -1;
  303. }else{
  304. switch(avctx->bits_per_sample&7){
  305. case 1:
  306. s->predictor= LEFT;
  307. s->decorrelate= 0;
  308. break;
  309. case 2:
  310. s->predictor= LEFT;
  311. s->decorrelate= 1;
  312. break;
  313. case 3:
  314. s->predictor= PLANE;
  315. s->decorrelate= avctx->bits_per_sample >= 24;
  316. break;
  317. case 4:
  318. s->predictor= MEDIAN;
  319. s->decorrelate= 0;
  320. break;
  321. default:
  322. s->predictor= LEFT; //OLD
  323. s->decorrelate= 0;
  324. break;
  325. }
  326. s->bitstream_bpp= avctx->bits_per_sample & ~7;
  327. if(read_old_huffman_tables(s) < 0)
  328. return -1;
  329. }
  330. s->interlaced= height > 288;
  331. c_size= 0;
  332. switch(s->bitstream_bpp){
  333. case 12:
  334. avctx->pix_fmt = PIX_FMT_YUV420P;
  335. stride= (width+15)&~15;
  336. c_size= height*stride/4;
  337. break;
  338. case 16:
  339. if(s->yuy2){
  340. avctx->pix_fmt = PIX_FMT_YUV422;
  341. stride= (width*2+15)&~15;
  342. }else{
  343. avctx->pix_fmt = PIX_FMT_YUV422P;
  344. stride= (width+15)&~15;
  345. c_size= height*stride/2;
  346. }
  347. break;
  348. case 24:
  349. case 32:
  350. if(s->bgr32){
  351. avctx->pix_fmt = PIX_FMT_BGRA32;
  352. stride= (width*4+15)&~15;
  353. }else{
  354. avctx->pix_fmt = PIX_FMT_BGR24;
  355. stride= (width*3+15)&~15;
  356. }
  357. break;
  358. default:
  359. assert(0);
  360. stride=0; //gcc fix
  361. }
  362. y_size= height*stride;
  363. if(!(avctx->flags&CODEC_FLAG_DR1)){
  364. s->linesize[0]= stride;
  365. s->picture[0]= av_mallocz(y_size);
  366. if(c_size){
  367. s->picture[1]= av_mallocz(c_size);
  368. s->picture[2]= av_mallocz(c_size);
  369. s->linesize[1]= s->linesize[2]= stride/2;
  370. memset(s->picture[1], 128, c_size);
  371. memset(s->picture[2], 128, c_size);
  372. }
  373. }
  374. // printf("pred:%d bpp:%d hbpp:%d il:%d\n", s->predictor, s->bitstream_bpp, avctx->bits_per_sample, s->interlaced);
  375. return 0;
  376. }
  377. static void store_table(HYuvContext *s, uint8_t *len){
  378. int i;
  379. int index= s->avctx->extradata_size;
  380. for(i=0; i<256;){
  381. int cur=i;
  382. int val= len[i];
  383. int repeat;
  384. for(; i<256 && len[i]==val; i++);
  385. repeat= i - cur;
  386. if(repeat>7){
  387. ((uint8_t*)s->avctx->extradata)[index++]= val;
  388. ((uint8_t*)s->avctx->extradata)[index++]= repeat;
  389. }else{
  390. ((uint8_t*)s->avctx->extradata)[index++]= val | (repeat<<5);
  391. }
  392. }
  393. s->avctx->extradata_size= index;
  394. }
  395. static int encode_init(AVCodecContext *avctx)
  396. {
  397. HYuvContext *s = avctx->priv_data;
  398. int i, j, width, height;
  399. s->avctx= avctx;
  400. s->flags= avctx->flags;
  401. dsputil_init(&s->dsp, avctx->dsp_mask);
  402. width= s->width= avctx->width;
  403. height= s->height= avctx->height;
  404. assert(width && height);
  405. avctx->extradata= av_mallocz(1024*10);
  406. avctx->stats_out= av_mallocz(1024*10);
  407. s->version=2;
  408. switch(avctx->pix_fmt){
  409. case PIX_FMT_YUV420P:
  410. if(avctx->strict_std_compliance>=0){
  411. fprintf(stderr, "YV12-huffyuv is experimental, there WILL be no compatbility! (use (v)strict=-1)\n");
  412. return -1;
  413. }
  414. s->bitstream_bpp= 12;
  415. break;
  416. case PIX_FMT_YUV422P:
  417. s->bitstream_bpp= 16;
  418. break;
  419. default:
  420. fprintf(stderr, "format not supported\n");
  421. return -1;
  422. }
  423. avctx->bits_per_sample= s->bitstream_bpp;
  424. s->decorrelate= s->bitstream_bpp >= 24;
  425. s->predictor= avctx->prediction_method;
  426. ((uint8_t*)avctx->extradata)[0]= s->predictor;
  427. ((uint8_t*)avctx->extradata)[1]= s->bitstream_bpp;
  428. ((uint8_t*)avctx->extradata)[2]=
  429. ((uint8_t*)avctx->extradata)[3]= 0;
  430. s->avctx->extradata_size= 4;
  431. if(avctx->stats_in){
  432. char *p= avctx->stats_in;
  433. for(i=0; i<3; i++)
  434. for(j=0; j<256; j++)
  435. s->stats[i][j]= 1;
  436. for(;;){
  437. for(i=0; i<3; i++){
  438. char *next;
  439. for(j=0; j<256; j++){
  440. s->stats[i][j]+= strtol(p, &next, 0);
  441. if(next==p) return -1;
  442. p=next;
  443. }
  444. }
  445. if(p[0]==0 || p[1]==0 || p[2]==0) break;
  446. }
  447. }else{
  448. for(i=0; i<3; i++)
  449. for(j=0; j<256; j++){
  450. int d= FFMIN(j, 256-j);
  451. s->stats[i][j]= 100000000/(d+1);
  452. }
  453. }
  454. for(i=0; i<3; i++){
  455. generate_len_table(s->len[i], s->stats[i], 256);
  456. if(generate_bits_table(s->bits[i], s->len[i])<0){
  457. return -1;
  458. }
  459. store_table(s, s->len[i]);
  460. }
  461. for(i=0; i<3; i++)
  462. for(j=0; j<256; j++)
  463. s->stats[i][j]= 0;
  464. s->interlaced= height > 288;
  465. // printf("pred:%d bpp:%d hbpp:%d il:%d\n", s->predictor, s->bitstream_bpp, avctx->bits_per_sample, s->interlaced);
  466. s->picture_number=0;
  467. return 0;
  468. }
  469. static void decode_422_bitstream(HYuvContext *s, int count){
  470. int i;
  471. count/=2;
  472. for(i=0; i<count; i++){
  473. s->temp[0][2*i ]= get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3);
  474. s->temp[1][ i ]= get_vlc2(&s->gb, s->vlc[1].table, VLC_BITS, 3);
  475. s->temp[0][2*i+1]= get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3);
  476. s->temp[2][ i ]= get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3);
  477. }
  478. }
  479. static void decode_gray_bitstream(HYuvContext *s, int count){
  480. int i;
  481. count/=2;
  482. for(i=0; i<count; i++){
  483. s->temp[0][2*i ]= get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3);
  484. s->temp[0][2*i+1]= get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3);
  485. }
  486. }
  487. static void encode_422_bitstream(HYuvContext *s, int count){
  488. int i;
  489. count/=2;
  490. if(s->flags&CODEC_FLAG_PASS1){
  491. for(i=0; i<count; i++){
  492. s->stats[0][ s->temp[0][2*i ] ]++;
  493. s->stats[1][ s->temp[1][ i ] ]++;
  494. s->stats[0][ s->temp[0][2*i+1] ]++;
  495. s->stats[2][ s->temp[2][ i ] ]++;
  496. }
  497. }else{
  498. for(i=0; i<count; i++){
  499. put_bits(&s->pb, s->len[0][ s->temp[0][2*i ] ], s->bits[0][ s->temp[0][2*i ] ]);
  500. put_bits(&s->pb, s->len[1][ s->temp[1][ i ] ], s->bits[1][ s->temp[1][ i ] ]);
  501. put_bits(&s->pb, s->len[0][ s->temp[0][2*i+1] ], s->bits[0][ s->temp[0][2*i+1] ]);
  502. put_bits(&s->pb, s->len[2][ s->temp[2][ i ] ], s->bits[2][ s->temp[2][ i ] ]);
  503. }
  504. }
  505. }
  506. static void encode_gray_bitstream(HYuvContext *s, int count){
  507. int i;
  508. count/=2;
  509. if(s->flags&CODEC_FLAG_PASS1){
  510. for(i=0; i<count; i++){
  511. s->stats[0][ s->temp[0][2*i ] ]++;
  512. s->stats[0][ s->temp[0][2*i+1] ]++;
  513. }
  514. }else{
  515. for(i=0; i<count; i++){
  516. put_bits(&s->pb, s->len[0][ s->temp[0][2*i ] ], s->bits[0][ s->temp[0][2*i ] ]);
  517. put_bits(&s->pb, s->len[0][ s->temp[0][2*i+1] ], s->bits[0][ s->temp[0][2*i+1] ]);
  518. }
  519. }
  520. }
  521. static void decode_bgr_bitstream(HYuvContext *s, int count){
  522. int i;
  523. if(s->decorrelate){
  524. if(s->bitstream_bpp==24){
  525. for(i=0; i<count; i++){
  526. s->temp[0][4*i+1]= get_vlc2(&s->gb, s->vlc[1].table, VLC_BITS, 3);
  527. s->temp[0][4*i ]= get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3) + s->temp[0][4*i+1];
  528. s->temp[0][4*i+2]= get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3) + s->temp[0][4*i+1];
  529. }
  530. }else{
  531. for(i=0; i<count; i++){
  532. s->temp[0][4*i+1]= get_vlc2(&s->gb, s->vlc[1].table, VLC_BITS, 3);
  533. s->temp[0][4*i ]= get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3) + s->temp[0][4*i+1];
  534. s->temp[0][4*i+2]= get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3) + s->temp[0][4*i+1];
  535. get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3); //?!
  536. }
  537. }
  538. }else{
  539. if(s->bitstream_bpp==24){
  540. for(i=0; i<count; i++){
  541. s->temp[0][4*i ]= get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3);
  542. s->temp[0][4*i+1]= get_vlc2(&s->gb, s->vlc[1].table, VLC_BITS, 3);
  543. s->temp[0][4*i+2]= get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3);
  544. }
  545. }else{
  546. for(i=0; i<count; i++){
  547. s->temp[0][4*i ]= get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3);
  548. s->temp[0][4*i+1]= get_vlc2(&s->gb, s->vlc[1].table, VLC_BITS, 3);
  549. s->temp[0][4*i+2]= get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3);
  550. get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3); //?!
  551. }
  552. }
  553. }
  554. }
  555. static void draw_slice(HYuvContext *s, int y){
  556. int h, cy;
  557. UINT8 *src_ptr[3];
  558. if(s->avctx->draw_horiz_band==NULL)
  559. return;
  560. h= y - s->last_slice_end;
  561. y -= h;
  562. if(s->bitstream_bpp==12){
  563. cy= y>>1;
  564. }else{
  565. cy= y;
  566. }
  567. src_ptr[0] = s->picture[0] + s->linesize[0]*y;
  568. src_ptr[1] = s->picture[1] + s->linesize[1]*cy;
  569. src_ptr[2] = s->picture[2] + s->linesize[2]*cy;
  570. emms_c();
  571. s->avctx->draw_horiz_band(s->avctx, src_ptr, s->linesize[0], y, s->width, h);
  572. s->last_slice_end= y + h;
  573. }
  574. static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, uint8_t *buf, int buf_size){
  575. HYuvContext *s = avctx->priv_data;
  576. const int width= s->width;
  577. const int width2= s->width>>1;
  578. const int height= s->height;
  579. int fake_ystride, fake_ustride, fake_vstride;
  580. int i;
  581. AVPicture *picture = data;
  582. *data_size = 0;
  583. /* no supplementary picture */
  584. if (buf_size == 0)
  585. return 0;
  586. bswap_buf((uint32_t*)s->bitstream_buffer, (uint32_t*)buf, buf_size/4);
  587. init_get_bits(&s->gb, s->bitstream_buffer, buf_size);
  588. if(avctx->flags&CODEC_FLAG_DR1){
  589. if(avctx->get_buffer_callback(avctx, s->width, s->height, I_TYPE) < 0){
  590. fprintf(stderr, "get_buffer() failed\n");
  591. return -1;
  592. }
  593. s->linesize[0]= avctx->dr_stride;
  594. s->linesize[1]=
  595. s->linesize[2]= avctx->dr_uvstride;
  596. for(i=0; i<3;i++)
  597. s->picture[i]= avctx->dr_buffer[i];
  598. }
  599. fake_ystride= s->interlaced ? s->linesize[0]*2 : s->linesize[0];
  600. fake_ustride= s->interlaced ? s->linesize[1]*2 : s->linesize[1];
  601. fake_vstride= s->interlaced ? s->linesize[2]*2 : s->linesize[2];
  602. s->last_slice_end= 0;
  603. if(s->bitstream_bpp<24){
  604. int y, cy;
  605. int lefty, leftu, leftv;
  606. int lefttopy, lefttopu, lefttopv;
  607. if(s->yuy2){
  608. s->picture[0][3]= get_bits(&s->gb, 8);
  609. s->picture[0][2]= get_bits(&s->gb, 8);
  610. s->picture[0][1]= get_bits(&s->gb, 8);
  611. s->picture[0][0]= get_bits(&s->gb, 8);
  612. fprintf(stderr, "YUY2 output isnt implemenetd yet\n");
  613. return -1;
  614. }else{
  615. leftv= s->picture[2][0]= get_bits(&s->gb, 8);
  616. lefty= s->picture[0][1]= get_bits(&s->gb, 8);
  617. leftu= s->picture[1][0]= get_bits(&s->gb, 8);
  618. s->picture[0][0]= get_bits(&s->gb, 8);
  619. switch(s->predictor){
  620. case LEFT:
  621. case PLANE:
  622. decode_422_bitstream(s, width-2);
  623. lefty= add_left_prediction(s->picture[0] + 2, s->temp[0], width-2, lefty);
  624. if(!(s->flags&CODEC_FLAG_GRAY)){
  625. leftu= add_left_prediction(s->picture[1] + 1, s->temp[1], width2-1, leftu);
  626. leftv= add_left_prediction(s->picture[2] + 1, s->temp[2], width2-1, leftv);
  627. }
  628. for(cy=y=1; y<s->height; y++,cy++){
  629. uint8_t *ydst, *udst, *vdst;
  630. if(s->bitstream_bpp==12){
  631. decode_gray_bitstream(s, width);
  632. ydst= s->picture[0] + s->linesize[0]*y;
  633. lefty= add_left_prediction(ydst, s->temp[0], width, lefty);
  634. if(s->predictor == PLANE){
  635. if(y>s->interlaced)
  636. s->dsp.add_bytes(ydst, ydst - fake_ystride, width);
  637. }
  638. y++;
  639. if(y>=s->height) break;
  640. }
  641. draw_slice(s, y);
  642. ydst= s->picture[0] + s->linesize[0]*y;
  643. udst= s->picture[1] + s->linesize[1]*cy;
  644. vdst= s->picture[2] + s->linesize[2]*cy;
  645. decode_422_bitstream(s, width);
  646. lefty= add_left_prediction(ydst, s->temp[0], width, lefty);
  647. if(!(s->flags&CODEC_FLAG_GRAY)){
  648. leftu= add_left_prediction(udst, s->temp[1], width2, leftu);
  649. leftv= add_left_prediction(vdst, s->temp[2], width2, leftv);
  650. }
  651. if(s->predictor == PLANE){
  652. if(cy>s->interlaced){
  653. s->dsp.add_bytes(ydst, ydst - fake_ystride, width);
  654. if(!(s->flags&CODEC_FLAG_GRAY)){
  655. s->dsp.add_bytes(udst, udst - fake_ustride, width2);
  656. s->dsp.add_bytes(vdst, vdst - fake_vstride, width2);
  657. }
  658. }
  659. }
  660. }
  661. draw_slice(s, height);
  662. break;
  663. case MEDIAN:
  664. /* first line except first 2 pixels is left predicted */
  665. decode_422_bitstream(s, width-2);
  666. lefty= add_left_prediction(s->picture[0] + 2, s->temp[0], width-2, lefty);
  667. if(!(s->flags&CODEC_FLAG_GRAY)){
  668. leftu= add_left_prediction(s->picture[1] + 1, s->temp[1], width2-1, leftu);
  669. leftv= add_left_prediction(s->picture[2] + 1, s->temp[2], width2-1, leftv);
  670. }
  671. cy=y=1;
  672. /* second line is left predicted for interlaced case */
  673. if(s->interlaced){
  674. decode_422_bitstream(s, width);
  675. lefty= add_left_prediction(s->picture[0] + s->linesize[0], s->temp[0], width, lefty);
  676. if(!(s->flags&CODEC_FLAG_GRAY)){
  677. leftu= add_left_prediction(s->picture[1] + s->linesize[2], s->temp[1], width2, leftu);
  678. leftv= add_left_prediction(s->picture[2] + s->linesize[1], s->temp[2], width2, leftv);
  679. }
  680. y++; cy++;
  681. }
  682. /* next 4 pixels are left predicted too */
  683. decode_422_bitstream(s, 4);
  684. lefty= add_left_prediction(s->picture[0] + fake_ystride, s->temp[0], 4, lefty);
  685. if(!(s->flags&CODEC_FLAG_GRAY)){
  686. leftu= add_left_prediction(s->picture[1] + fake_ustride, s->temp[1], 2, leftu);
  687. leftv= add_left_prediction(s->picture[2] + fake_vstride, s->temp[2], 2, leftv);
  688. }
  689. /* next line except the first 4 pixels is median predicted */
  690. lefttopy= s->picture[0][3];
  691. decode_422_bitstream(s, width-4);
  692. add_median_prediction(s->picture[0] + fake_ystride+4, s->picture[0]+4, s->temp[0], width-4, &lefty, &lefttopy);
  693. if(!(s->flags&CODEC_FLAG_GRAY)){
  694. lefttopu= s->picture[1][1];
  695. lefttopv= s->picture[2][1];
  696. add_median_prediction(s->picture[1] + fake_ustride+2, s->picture[1]+2, s->temp[1], width2-2, &leftu, &lefttopu);
  697. add_median_prediction(s->picture[2] + fake_vstride+2, s->picture[2]+2, s->temp[2], width2-2, &leftv, &lefttopv);
  698. }
  699. y++; cy++;
  700. for(; y<height; y++,cy++){
  701. uint8_t *ydst, *udst, *vdst;
  702. if(s->bitstream_bpp==12){
  703. while(2*cy > y){
  704. decode_gray_bitstream(s, width);
  705. ydst= s->picture[0] + s->linesize[0]*y;
  706. add_median_prediction(ydst, ydst - fake_ystride, s->temp[0], width, &lefty, &lefttopy);
  707. y++;
  708. }
  709. if(y>=height) break;
  710. }
  711. draw_slice(s, y);
  712. decode_422_bitstream(s, width);
  713. ydst= s->picture[0] + s->linesize[0]*y;
  714. udst= s->picture[1] + s->linesize[1]*cy;
  715. vdst= s->picture[2] + s->linesize[2]*cy;
  716. add_median_prediction(ydst, ydst - fake_ystride, s->temp[0], width, &lefty, &lefttopy);
  717. if(!(s->flags&CODEC_FLAG_GRAY)){
  718. add_median_prediction(udst, udst - fake_ustride, s->temp[1], width2, &leftu, &lefttopu);
  719. add_median_prediction(vdst, vdst - fake_vstride, s->temp[2], width2, &leftv, &lefttopv);
  720. }
  721. }
  722. draw_slice(s, height);
  723. break;
  724. }
  725. }
  726. }else{
  727. int y;
  728. int leftr, leftg, leftb;
  729. const int last_line= (height-1)*s->linesize[0];
  730. if(s->bitstream_bpp==32){
  731. s->picture[0][last_line+3]= get_bits(&s->gb, 8);
  732. leftr= s->picture[0][last_line+2]= get_bits(&s->gb, 8);
  733. leftg= s->picture[0][last_line+1]= get_bits(&s->gb, 8);
  734. leftb= s->picture[0][last_line+0]= get_bits(&s->gb, 8);
  735. }else{
  736. leftr= s->picture[0][last_line+2]= get_bits(&s->gb, 8);
  737. leftg= s->picture[0][last_line+1]= get_bits(&s->gb, 8);
  738. leftb= s->picture[0][last_line+0]= get_bits(&s->gb, 8);
  739. skip_bits(&s->gb, 8);
  740. }
  741. if(s->bgr32){
  742. switch(s->predictor){
  743. case LEFT:
  744. case PLANE:
  745. decode_bgr_bitstream(s, width-1);
  746. add_left_prediction_bgr32(s->picture[0] + last_line+4, s->temp[0], width-1, &leftr, &leftg, &leftb);
  747. for(y=s->height-2; y>=0; y--){ //yes its stored upside down
  748. decode_bgr_bitstream(s, width);
  749. add_left_prediction_bgr32(s->picture[0] + s->linesize[0]*y, s->temp[0], width, &leftr, &leftg, &leftb);
  750. if(s->predictor == PLANE){
  751. if((y&s->interlaced)==0){
  752. s->dsp.add_bytes(s->picture[0] + s->linesize[0]*y,
  753. s->picture[0] + s->linesize[0]*y + fake_ystride, fake_ystride);
  754. }
  755. }
  756. }
  757. draw_slice(s, height); // just 1 large slice as this isnt possible in reverse order
  758. break;
  759. default:
  760. fprintf(stderr, "prediction type not supported!\n");
  761. }
  762. }else{
  763. fprintf(stderr, "BGR24 output isnt implemenetd yet\n");
  764. return -1;
  765. }
  766. }
  767. emms_c();
  768. for(i=0;i<3;i++) {
  769. picture->data[i] = s->picture[i];
  770. picture->linesize[i]= s->linesize[i];
  771. }
  772. *data_size = sizeof(AVPicture);
  773. return (get_bits_count(&s->gb)+7)>>3;
  774. }
  775. static int decode_end(AVCodecContext *avctx)
  776. {
  777. HYuvContext *s = avctx->priv_data;
  778. int i;
  779. for(i=0; i<3; i++){
  780. if(!(avctx->flags&CODEC_FLAG_DR1))
  781. av_freep(&s->picture[i]);
  782. free_vlc(&s->vlc[i]);
  783. }
  784. return 0;
  785. }
  786. static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size, void *data){
  787. HYuvContext *s = avctx->priv_data;
  788. AVPicture *pict = data;
  789. const int width= s->width;
  790. const int width2= s->width>>1;
  791. const int height= s->height;
  792. const int fake_ystride= s->interlaced ? pict->linesize[0]*2 : pict->linesize[0];
  793. const int fake_ustride= s->interlaced ? pict->linesize[1]*2 : pict->linesize[1];
  794. const int fake_vstride= s->interlaced ? pict->linesize[2]*2 : pict->linesize[2];
  795. int i, size;
  796. init_put_bits(&s->pb, buf, buf_size, NULL, NULL);
  797. for(i=0; i<3; i++){
  798. s->picture[i]= pict->data[i];
  799. s->linesize[i]= pict->linesize[i];
  800. }
  801. if(avctx->pix_fmt == PIX_FMT_YUV422P || avctx->pix_fmt == PIX_FMT_YUV420P){
  802. int lefty, leftu, leftv, y, cy;
  803. put_bits(&s->pb, 8, leftv= s->picture[2][0]);
  804. put_bits(&s->pb, 8, lefty= s->picture[0][1]);
  805. put_bits(&s->pb, 8, leftu= s->picture[1][0]);
  806. put_bits(&s->pb, 8, s->picture[0][0]);
  807. lefty= sub_left_prediction(s, s->temp[0], s->picture[0]+2, width-2 , lefty);
  808. leftu= sub_left_prediction(s, s->temp[1], s->picture[1]+1, width2-1, leftu);
  809. leftv= sub_left_prediction(s, s->temp[2], s->picture[2]+1, width2-1, leftv);
  810. encode_422_bitstream(s, width-2);
  811. if(s->predictor==MEDIAN){
  812. int lefttopy, lefttopu, lefttopv;
  813. cy=y=1;
  814. if(s->interlaced){
  815. lefty= sub_left_prediction(s, s->temp[0], s->picture[0]+s->linesize[0], width , lefty);
  816. leftu= sub_left_prediction(s, s->temp[1], s->picture[1]+s->linesize[1], width2, leftu);
  817. leftv= sub_left_prediction(s, s->temp[2], s->picture[2]+s->linesize[2], width2, leftv);
  818. encode_422_bitstream(s, width);
  819. y++; cy++;
  820. }
  821. lefty= sub_left_prediction(s, s->temp[0], s->picture[0]+fake_ystride, 4, lefty);
  822. leftu= sub_left_prediction(s, s->temp[1], s->picture[1]+fake_ystride, 2, leftu);
  823. leftv= sub_left_prediction(s, s->temp[2], s->picture[2]+fake_ystride, 2, leftv);
  824. encode_422_bitstream(s, 4);
  825. lefttopy= s->picture[0][3];
  826. lefttopu= s->picture[1][1];
  827. lefttopv= s->picture[2][1];
  828. sub_median_prediction(s->temp[0], s->picture[0]+4, s->picture[0] + fake_ystride+4, width-4 , &lefty, &lefttopy);
  829. sub_median_prediction(s->temp[1], s->picture[1]+2, s->picture[1] + fake_ustride+2, width2-2, &leftu, &lefttopu);
  830. sub_median_prediction(s->temp[2], s->picture[2]+2, s->picture[2] + fake_vstride+2, width2-2, &leftv, &lefttopv);
  831. encode_422_bitstream(s, width-4);
  832. y++; cy++;
  833. for(; y<height; y++,cy++){
  834. uint8_t *ydst, *udst, *vdst;
  835. if(s->bitstream_bpp==12){
  836. while(2*cy > y){
  837. ydst= s->picture[0] + s->linesize[0]*y;
  838. sub_median_prediction(s->temp[0], ydst - fake_ystride, ydst, width , &lefty, &lefttopy);
  839. encode_gray_bitstream(s, width);
  840. y++;
  841. }
  842. if(y>=height) break;
  843. }
  844. ydst= s->picture[0] + s->linesize[0]*y;
  845. udst= s->picture[1] + s->linesize[1]*cy;
  846. vdst= s->picture[2] + s->linesize[2]*cy;
  847. sub_median_prediction(s->temp[0], ydst - fake_ystride, ydst, width , &lefty, &lefttopy);
  848. sub_median_prediction(s->temp[1], udst - fake_ustride, udst, width2, &leftu, &lefttopu);
  849. sub_median_prediction(s->temp[2], vdst - fake_vstride, vdst, width2, &leftv, &lefttopv);
  850. encode_422_bitstream(s, width);
  851. }
  852. }else{
  853. for(cy=y=1; y<height; y++,cy++){
  854. uint8_t *ydst, *udst, *vdst;
  855. /* encode a luma only line & y++ */
  856. if(s->bitstream_bpp==12){
  857. ydst= s->picture[0] + s->linesize[0]*y;
  858. if(s->predictor == PLANE && s->interlaced < y){
  859. s->dsp.diff_bytes(s->temp[1], ydst, ydst - fake_ystride, width);
  860. lefty= sub_left_prediction(s, s->temp[0], s->temp[1], width , lefty);
  861. }else{
  862. lefty= sub_left_prediction(s, s->temp[0], ydst, width , lefty);
  863. }
  864. encode_gray_bitstream(s, width);
  865. y++;
  866. if(y>=height) break;
  867. }
  868. ydst= s->picture[0] + s->linesize[0]*y;
  869. udst= s->picture[1] + s->linesize[1]*cy;
  870. vdst= s->picture[2] + s->linesize[2]*cy;
  871. if(s->predictor == PLANE && s->interlaced < cy){
  872. s->dsp.diff_bytes(s->temp[1], ydst, ydst - fake_ystride, width);
  873. s->dsp.diff_bytes(s->temp[2], udst, udst - fake_ustride, width2);
  874. s->dsp.diff_bytes(s->temp[3], vdst, vdst - fake_vstride, width2);
  875. lefty= sub_left_prediction(s, s->temp[0], s->temp[1], width , lefty);
  876. leftu= sub_left_prediction(s, s->temp[1], s->temp[2], width2, leftu);
  877. leftv= sub_left_prediction(s, s->temp[2], s->temp[3], width2, leftv);
  878. }else{
  879. lefty= sub_left_prediction(s, s->temp[0], ydst, width , lefty);
  880. leftu= sub_left_prediction(s, s->temp[1], udst, width2, leftu);
  881. leftv= sub_left_prediction(s, s->temp[2], vdst, width2, leftv);
  882. }
  883. encode_422_bitstream(s, width);
  884. }
  885. }
  886. }else{
  887. fprintf(stderr, "Format not supported!\n");
  888. }
  889. emms_c();
  890. size= (get_bit_count(&s->pb)+31)/32;
  891. if((s->flags&CODEC_FLAG_PASS1) && (s->picture_number&31)==0){
  892. int j;
  893. char *p= avctx->stats_out;
  894. for(i=0; i<3; i++){
  895. for(j=0; j<256; j++){
  896. sprintf(p, "%Ld ", s->stats[i][j]);
  897. p+= strlen(p);
  898. s->stats[i][j]= 0;
  899. }
  900. sprintf(p, "\n");
  901. p++;
  902. }
  903. }else{
  904. flush_put_bits(&s->pb);
  905. bswap_buf((uint32_t*)buf, (uint32_t*)buf, size);
  906. }
  907. avctx->key_frame= 1;
  908. avctx->pict_type= I_TYPE;
  909. s->picture_number++;
  910. return size*4;
  911. }
  912. static int encode_end(AVCodecContext *avctx)
  913. {
  914. // HYuvContext *s = avctx->priv_data;
  915. av_freep(&avctx->extradata);
  916. av_freep(&avctx->stats_out);
  917. return 0;
  918. }
  919. AVCodec huffyuv_decoder = {
  920. "huffyuv",
  921. CODEC_TYPE_VIDEO,
  922. CODEC_ID_HUFFYUV,
  923. sizeof(HYuvContext),
  924. decode_init,
  925. NULL,
  926. decode_end,
  927. decode_frame,
  928. CODEC_CAP_DR1 | CODEC_CAP_DRAW_HORIZ_BAND,
  929. NULL
  930. };
  931. AVCodec huffyuv_encoder = {
  932. "huffyuv",
  933. CODEC_TYPE_VIDEO,
  934. CODEC_ID_HUFFYUV,
  935. sizeof(HYuvContext),
  936. encode_init,
  937. encode_frame,
  938. encode_end,
  939. };