You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1156 lines
48KB

  1. /*
  2. * This file is part of FFmpeg.
  3. *
  4. * FFmpeg is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU Lesser General Public
  6. * License as published by the Free Software Foundation; either
  7. * version 2.1 of the License, or (at your option) any later version.
  8. *
  9. * FFmpeg is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  12. * Lesser General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU Lesser General Public
  15. * License along with FFmpeg; if not, write to the Free Software
  16. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  17. */
  18. /**
  19. * @file
  20. * Intel Indeo 3 (IV31, IV32, etc.) video decoder for FFmpeg
  21. * written, produced, and directed by Alan Smithee
  22. *
  23. * For some documentation see:
  24. * http://wiki.multimedia.cx/index.php?title=Indeo_3
  25. */
  26. #include "libavutil/imgutils.h"
  27. #include "avcodec.h"
  28. #include "dsputil.h"
  29. #include "bytestream.h"
  30. #include "indeo3data.h"
  31. typedef struct
  32. {
  33. uint8_t *Ybuf;
  34. uint8_t *Ubuf;
  35. uint8_t *Vbuf;
  36. unsigned short y_w, y_h;
  37. unsigned short uv_w, uv_h;
  38. } YUVBufs;
  39. typedef struct Indeo3DecodeContext {
  40. AVCodecContext *avctx;
  41. int width, height;
  42. AVFrame frame;
  43. uint8_t *buf;
  44. YUVBufs iv_frame[2];
  45. YUVBufs *cur_frame;
  46. YUVBufs *ref_frame;
  47. uint8_t *ModPred;
  48. uint8_t *corrector_type;
  49. } Indeo3DecodeContext;
  50. static const uint8_t corrector_type_0[24] = {
  51. 195, 159, 133, 115, 101, 93, 87, 77,
  52. 195, 159, 133, 115, 101, 93, 87, 77,
  53. 128, 79, 79, 79, 79, 79, 79, 79
  54. };
  55. static const uint8_t corrector_type_2[8] = { 9, 7, 6, 8, 5, 4, 3, 2 };
  56. static av_cold int build_modpred(Indeo3DecodeContext *s)
  57. {
  58. int i, j;
  59. if (!(s->ModPred = av_malloc(8 * 128)))
  60. return AVERROR(ENOMEM);
  61. for (i=0; i < 128; ++i) {
  62. s->ModPred[i+0*128] = i > 126 ? 254 : 2*(i + 1 - ((i + 1) % 2));
  63. s->ModPred[i+1*128] = i == 7 ? 20 :
  64. i == 119 ||
  65. i == 120 ? 236 : 2*(i + 2 - ((i + 1) % 3));
  66. s->ModPred[i+2*128] = i > 125 ? 248 : 2*(i + 2 - ((i + 2) % 4));
  67. s->ModPred[i+3*128] = 2*(i + 1 - ((i - 3) % 5));
  68. s->ModPred[i+4*128] = i == 8 ? 20 : 2*(i + 1 - ((i - 3) % 6));
  69. s->ModPred[i+5*128] = 2*(i + 4 - ((i + 3) % 7));
  70. s->ModPred[i+6*128] = i > 123 ? 240 : 2*(i + 4 - ((i + 4) % 8));
  71. s->ModPred[i+7*128] = 2*(i + 5 - ((i + 4) % 9));
  72. }
  73. if (!(s->corrector_type = av_malloc(24 * 256)))
  74. return AVERROR(ENOMEM);
  75. for (i=0; i < 24; ++i) {
  76. for (j=0; j < 256; ++j) {
  77. s->corrector_type[i*256+j] = j < corrector_type_0[i] ? 1 :
  78. j < 248 || (i == 16 && j == 248) ? 0 :
  79. corrector_type_2[j - 248];
  80. }
  81. }
  82. return 0;
  83. }
  84. static av_cold int iv_alloc_frames(Indeo3DecodeContext *s)
  85. {
  86. int luma_width = (s->width + 3) & ~3,
  87. luma_height = (s->height + 3) & ~3,
  88. chroma_width = ((luma_width >> 2) + 3) & ~3,
  89. chroma_height = ((luma_height >> 2) + 3) & ~3,
  90. luma_pixels = luma_width * luma_height,
  91. chroma_pixels = chroma_width * chroma_height,
  92. i;
  93. unsigned int bufsize = luma_pixels * 2 + luma_width * 3 +
  94. (chroma_pixels + chroma_width) * 4;
  95. av_freep(&s->buf);
  96. if(!(s->buf = av_malloc(bufsize)))
  97. return AVERROR(ENOMEM);
  98. s->iv_frame[0].y_w = s->iv_frame[1].y_w = luma_width;
  99. s->iv_frame[0].y_h = s->iv_frame[1].y_h = luma_height;
  100. s->iv_frame[0].uv_w = s->iv_frame[1].uv_w = chroma_width;
  101. s->iv_frame[0].uv_h = s->iv_frame[1].uv_h = chroma_height;
  102. s->iv_frame[0].Ybuf = s->buf + luma_width;
  103. i = luma_pixels + luma_width * 2;
  104. s->iv_frame[1].Ybuf = s->buf + i;
  105. i += (luma_pixels + luma_width);
  106. s->iv_frame[0].Ubuf = s->buf + i;
  107. i += (chroma_pixels + chroma_width);
  108. s->iv_frame[1].Ubuf = s->buf + i;
  109. i += (chroma_pixels + chroma_width);
  110. s->iv_frame[0].Vbuf = s->buf + i;
  111. i += (chroma_pixels + chroma_width);
  112. s->iv_frame[1].Vbuf = s->buf + i;
  113. for(i = 1; i <= luma_width; i++)
  114. s->iv_frame[0].Ybuf[-i] = s->iv_frame[1].Ybuf[-i] =
  115. s->iv_frame[0].Ubuf[-i] = 0x80;
  116. for(i = 1; i <= chroma_width; i++) {
  117. s->iv_frame[1].Ubuf[-i] = 0x80;
  118. s->iv_frame[0].Vbuf[-i] = 0x80;
  119. s->iv_frame[1].Vbuf[-i] = 0x80;
  120. s->iv_frame[1].Vbuf[chroma_pixels+i-1] = 0x80;
  121. }
  122. return 0;
  123. }
  124. static av_cold void iv_free_func(Indeo3DecodeContext *s)
  125. {
  126. av_freep(&s->buf);
  127. av_freep(&s->ModPred);
  128. av_freep(&s->corrector_type);
  129. }
  130. struct ustr {
  131. int xpos;
  132. int ypos;
  133. int width;
  134. int height;
  135. int split_flag;
  136. int split_direction;
  137. int usl7;
  138. };
  139. #define LV1_CHECK(buf1,rle_v3,lv1,lp2) \
  140. if((lv1 & 0x80) != 0) { \
  141. if(rle_v3 != 0) \
  142. rle_v3 = 0; \
  143. else { \
  144. rle_v3 = 1; \
  145. buf1 -= 2; \
  146. } \
  147. } \
  148. lp2 = 4;
  149. #define RLE_V3_CHECK(buf1,rle_v1,rle_v2,rle_v3) \
  150. if(rle_v3 == 0) { \
  151. rle_v2 = *buf1; \
  152. rle_v1 = 1; \
  153. if(rle_v2 > 32) { \
  154. rle_v2 -= 32; \
  155. rle_v1 = 0; \
  156. } \
  157. rle_v3 = 1; \
  158. } \
  159. buf1--;
  160. #define LP2_CHECK(buf1,rle_v3,lp2) \
  161. if(lp2 == 0 && rle_v3 != 0) \
  162. rle_v3 = 0; \
  163. else { \
  164. buf1--; \
  165. rle_v3 = 1; \
  166. }
  167. #define RLE_V2_CHECK(buf1,rle_v2, rle_v3,lp2) \
  168. rle_v2--; \
  169. if(rle_v2 == 0) { \
  170. rle_v3 = 0; \
  171. buf1 += 2; \
  172. } \
  173. lp2 = 4;
  174. static void iv_Decode_Chunk(Indeo3DecodeContext *s,
  175. uint8_t *cur, uint8_t *ref, int width, int height,
  176. const uint8_t *buf1, int cb_offset, const uint8_t *hdr,
  177. const uint8_t *buf2, int min_width_160)
  178. {
  179. uint8_t bit_buf;
  180. unsigned int bit_pos, lv, lv1, lv2;
  181. int *width_tbl, width_tbl_arr[10];
  182. const signed char *ref_vectors;
  183. uint8_t *cur_frm_pos, *ref_frm_pos, *cp, *cp2;
  184. uint32_t *cur_lp, *ref_lp;
  185. const uint32_t *correction_lp[2], *correctionloworder_lp[2], *correctionhighorder_lp[2];
  186. uint8_t *correction_type_sp[2];
  187. struct ustr strip_tbl[20], *strip;
  188. int i, j, k, lp1, lp2, flag1, cmd, blks_width, blks_height, region_160_width,
  189. rle_v1, rle_v2, rle_v3;
  190. unsigned short res;
  191. bit_buf = 0;
  192. ref_vectors = NULL;
  193. width_tbl = width_tbl_arr + 1;
  194. i = (width < 0 ? width + 3 : width)/4;
  195. for(j = -1; j < 8; j++)
  196. width_tbl[j] = i * j;
  197. strip = strip_tbl;
  198. for(region_160_width = 0; region_160_width < (width - min_width_160); region_160_width += min_width_160);
  199. strip->ypos = strip->xpos = 0;
  200. for(strip->width = min_width_160; width > strip->width; strip->width *= 2);
  201. strip->height = height;
  202. strip->split_direction = 0;
  203. strip->split_flag = 0;
  204. strip->usl7 = 0;
  205. bit_pos = 0;
  206. rle_v1 = rle_v2 = rle_v3 = 0;
  207. while(strip >= strip_tbl) {
  208. if(bit_pos <= 0) {
  209. bit_pos = 8;
  210. bit_buf = *buf1++;
  211. }
  212. bit_pos -= 2;
  213. cmd = (bit_buf >> bit_pos) & 0x03;
  214. if(cmd == 0) {
  215. strip++;
  216. if(strip >= strip_tbl + FF_ARRAY_ELEMS(strip_tbl)) {
  217. av_log(s->avctx, AV_LOG_WARNING, "out of range strip\n");
  218. break;
  219. }
  220. memcpy(strip, strip-1, sizeof(*strip));
  221. strip->split_flag = 1;
  222. strip->split_direction = 0;
  223. strip->height = (strip->height > 8 ? ((strip->height+8)>>4)<<3 : 4);
  224. continue;
  225. } else if(cmd == 1) {
  226. strip++;
  227. if(strip >= strip_tbl + FF_ARRAY_ELEMS(strip_tbl)) {
  228. av_log(s->avctx, AV_LOG_WARNING, "out of range strip\n");
  229. break;
  230. }
  231. memcpy(strip, strip-1, sizeof(*strip));
  232. strip->split_flag = 1;
  233. strip->split_direction = 1;
  234. strip->width = (strip->width > 8 ? ((strip->width+8)>>4)<<3 : 4);
  235. continue;
  236. } else if(cmd == 2) {
  237. if(strip->usl7 == 0) {
  238. strip->usl7 = 1;
  239. ref_vectors = NULL;
  240. continue;
  241. }
  242. } else if(cmd == 3) {
  243. if(strip->usl7 == 0) {
  244. strip->usl7 = 1;
  245. ref_vectors = (const signed char*)buf2 + (*buf1 * 2);
  246. buf1++;
  247. continue;
  248. }
  249. }
  250. cur_frm_pos = cur + width * strip->ypos + strip->xpos;
  251. if((blks_width = strip->width) < 0)
  252. blks_width += 3;
  253. blks_width >>= 2;
  254. blks_height = strip->height;
  255. if(ref_vectors != NULL) {
  256. ref_frm_pos = ref + (ref_vectors[0] + strip->ypos) * width +
  257. ref_vectors[1] + strip->xpos;
  258. } else
  259. ref_frm_pos = cur_frm_pos - width_tbl[4];
  260. if(cmd == 2) {
  261. if(bit_pos <= 0) {
  262. bit_pos = 8;
  263. bit_buf = *buf1++;
  264. }
  265. bit_pos -= 2;
  266. cmd = (bit_buf >> bit_pos) & 0x03;
  267. if(cmd == 0 || ref_vectors != NULL) {
  268. for(lp1 = 0; lp1 < blks_width; lp1++) {
  269. for(i = 0, j = 0; i < blks_height; i++, j += width_tbl[1])
  270. ((uint32_t *)cur_frm_pos)[j] = ((uint32_t *)ref_frm_pos)[j];
  271. cur_frm_pos += 4;
  272. ref_frm_pos += 4;
  273. }
  274. } else if(cmd != 1)
  275. return;
  276. } else {
  277. k = *buf1 >> 4;
  278. j = *buf1 & 0x0f;
  279. buf1++;
  280. lv = j + cb_offset;
  281. if((lv - 8) <= 7 && (k == 0 || k == 3 || k == 10)) {
  282. cp2 = s->ModPred + ((lv - 8) << 7);
  283. cp = ref_frm_pos;
  284. for(i = 0; i < blks_width << 2; i++) {
  285. int v = *cp >> 1;
  286. *(cp++) = cp2[v];
  287. }
  288. }
  289. if(k == 1 || k == 4) {
  290. lv = (hdr[j] & 0xf) + cb_offset;
  291. correction_type_sp[0] = s->corrector_type + (lv << 8);
  292. correction_lp[0] = correction + (lv << 8);
  293. lv = (hdr[j] >> 4) + cb_offset;
  294. correction_lp[1] = correction + (lv << 8);
  295. correction_type_sp[1] = s->corrector_type + (lv << 8);
  296. } else {
  297. correctionloworder_lp[0] = correctionloworder_lp[1] = correctionloworder + (lv << 8);
  298. correctionhighorder_lp[0] = correctionhighorder_lp[1] = correctionhighorder + (lv << 8);
  299. correction_type_sp[0] = correction_type_sp[1] = s->corrector_type + (lv << 8);
  300. correction_lp[0] = correction_lp[1] = correction + (lv << 8);
  301. }
  302. switch(k) {
  303. case 1:
  304. case 0: /********** CASE 0 **********/
  305. for( ; blks_height > 0; blks_height -= 4) {
  306. for(lp1 = 0; lp1 < blks_width; lp1++) {
  307. for(lp2 = 0; lp2 < 4; ) {
  308. k = *buf1++;
  309. cur_lp = ((uint32_t *)cur_frm_pos) + width_tbl[lp2];
  310. ref_lp = ((uint32_t *)ref_frm_pos) + width_tbl[lp2];
  311. switch(correction_type_sp[0][k]) {
  312. case 0:
  313. *cur_lp = av_le2ne32(((av_le2ne32(*ref_lp) >> 1) + correction_lp[lp2 & 0x01][k]) << 1);
  314. lp2++;
  315. break;
  316. case 1:
  317. res = ((av_le2ne16(((unsigned short *)(ref_lp))[0]) >> 1) + correction_lp[lp2 & 0x01][*buf1]) << 1;
  318. ((unsigned short *)cur_lp)[0] = av_le2ne16(res);
  319. res = ((av_le2ne16(((unsigned short *)(ref_lp))[1]) >> 1) + correction_lp[lp2 & 0x01][k]) << 1;
  320. ((unsigned short *)cur_lp)[1] = av_le2ne16(res);
  321. buf1++;
  322. lp2++;
  323. break;
  324. case 2:
  325. if(lp2 == 0) {
  326. for(i = 0, j = 0; i < 2; i++, j += width_tbl[1])
  327. cur_lp[j] = ref_lp[j];
  328. lp2 += 2;
  329. }
  330. break;
  331. case 3:
  332. if(lp2 < 2) {
  333. for(i = 0, j = 0; i < (3 - lp2); i++, j += width_tbl[1])
  334. cur_lp[j] = ref_lp[j];
  335. lp2 = 3;
  336. }
  337. break;
  338. case 8:
  339. if(lp2 == 0) {
  340. RLE_V3_CHECK(buf1,rle_v1,rle_v2,rle_v3)
  341. if(rle_v1 == 1 || ref_vectors != NULL) {
  342. for(i = 0, j = 0; i < 4; i++, j += width_tbl[1])
  343. cur_lp[j] = ref_lp[j];
  344. }
  345. RLE_V2_CHECK(buf1,rle_v2, rle_v3,lp2)
  346. break;
  347. } else {
  348. rle_v1 = 1;
  349. rle_v2 = *buf1 - 1;
  350. }
  351. case 5:
  352. LP2_CHECK(buf1,rle_v3,lp2)
  353. case 4:
  354. for(i = 0, j = 0; i < (4 - lp2); i++, j += width_tbl[1])
  355. cur_lp[j] = ref_lp[j];
  356. lp2 = 4;
  357. break;
  358. case 7:
  359. if(rle_v3 != 0)
  360. rle_v3 = 0;
  361. else {
  362. buf1--;
  363. rle_v3 = 1;
  364. }
  365. case 6:
  366. if(ref_vectors != NULL) {
  367. for(i = 0, j = 0; i < 4; i++, j += width_tbl[1])
  368. cur_lp[j] = ref_lp[j];
  369. }
  370. lp2 = 4;
  371. break;
  372. case 9:
  373. lv1 = *buf1++;
  374. lv = (lv1 & 0x7F) << 1;
  375. lv += (lv << 8);
  376. lv += (lv << 16);
  377. for(i = 0, j = 0; i < 4; i++, j += width_tbl[1])
  378. cur_lp[j] = lv;
  379. LV1_CHECK(buf1,rle_v3,lv1,lp2)
  380. break;
  381. default:
  382. return;
  383. }
  384. }
  385. cur_frm_pos += 4;
  386. ref_frm_pos += 4;
  387. }
  388. cur_frm_pos += ((width - blks_width) * 4);
  389. ref_frm_pos += ((width - blks_width) * 4);
  390. }
  391. break;
  392. case 4:
  393. case 3: /********** CASE 3 **********/
  394. if(ref_vectors != NULL)
  395. return;
  396. flag1 = 1;
  397. for( ; blks_height > 0; blks_height -= 8) {
  398. for(lp1 = 0; lp1 < blks_width; lp1++) {
  399. for(lp2 = 0; lp2 < 4; ) {
  400. k = *buf1++;
  401. cur_lp = ((uint32_t *)cur_frm_pos) + width_tbl[lp2 * 2];
  402. ref_lp = ((uint32_t *)cur_frm_pos) + width_tbl[(lp2 * 2) - 1];
  403. switch(correction_type_sp[lp2 & 0x01][k]) {
  404. case 0:
  405. cur_lp[width_tbl[1]] = av_le2ne32(((av_le2ne32(*ref_lp) >> 1) + correction_lp[lp2 & 0x01][k]) << 1);
  406. if(lp2 > 0 || flag1 == 0 || strip->ypos != 0)
  407. cur_lp[0] = ((cur_lp[-width_tbl[1]] >> 1) + (cur_lp[width_tbl[1]] >> 1)) & 0xFEFEFEFE;
  408. else
  409. cur_lp[0] = av_le2ne32(((av_le2ne32(*ref_lp) >> 1) + correction_lp[lp2 & 0x01][k]) << 1);
  410. lp2++;
  411. break;
  412. case 1:
  413. res = ((av_le2ne16(((unsigned short *)ref_lp)[0]) >> 1) + correction_lp[lp2 & 0x01][*buf1]) << 1;
  414. ((unsigned short *)cur_lp)[width_tbl[2]] = av_le2ne16(res);
  415. res = ((av_le2ne16(((unsigned short *)ref_lp)[1]) >> 1) + correction_lp[lp2 & 0x01][k]) << 1;
  416. ((unsigned short *)cur_lp)[width_tbl[2]+1] = av_le2ne16(res);
  417. if(lp2 > 0 || flag1 == 0 || strip->ypos != 0)
  418. cur_lp[0] = ((cur_lp[-width_tbl[1]] >> 1) + (cur_lp[width_tbl[1]] >> 1)) & 0xFEFEFEFE;
  419. else
  420. cur_lp[0] = cur_lp[width_tbl[1]];
  421. buf1++;
  422. lp2++;
  423. break;
  424. case 2:
  425. if(lp2 == 0) {
  426. for(i = 0, j = 0; i < 4; i++, j += width_tbl[1])
  427. cur_lp[j] = *ref_lp;
  428. lp2 += 2;
  429. }
  430. break;
  431. case 3:
  432. if(lp2 < 2) {
  433. for(i = 0, j = 0; i < 6 - (lp2 * 2); i++, j += width_tbl[1])
  434. cur_lp[j] = *ref_lp;
  435. lp2 = 3;
  436. }
  437. break;
  438. case 6:
  439. lp2 = 4;
  440. break;
  441. case 7:
  442. if(rle_v3 != 0)
  443. rle_v3 = 0;
  444. else {
  445. buf1--;
  446. rle_v3 = 1;
  447. }
  448. lp2 = 4;
  449. break;
  450. case 8:
  451. if(lp2 == 0) {
  452. RLE_V3_CHECK(buf1,rle_v1,rle_v2,rle_v3)
  453. if(rle_v1 == 1) {
  454. for(i = 0, j = 0; i < 8; i++, j += width_tbl[1])
  455. cur_lp[j] = ref_lp[j];
  456. }
  457. RLE_V2_CHECK(buf1,rle_v2, rle_v3,lp2)
  458. break;
  459. } else {
  460. rle_v2 = (*buf1) - 1;
  461. rle_v1 = 1;
  462. }
  463. case 5:
  464. LP2_CHECK(buf1,rle_v3,lp2)
  465. case 4:
  466. for(i = 0, j = 0; i < 8 - (lp2 * 2); i++, j += width_tbl[1])
  467. cur_lp[j] = *ref_lp;
  468. lp2 = 4;
  469. break;
  470. case 9:
  471. av_log(s->avctx, AV_LOG_ERROR, "UNTESTED.\n");
  472. lv1 = *buf1++;
  473. lv = (lv1 & 0x7F) << 1;
  474. lv += (lv << 8);
  475. lv += (lv << 16);
  476. for(i = 0, j = 0; i < 4; i++, j += width_tbl[1])
  477. cur_lp[j] = lv;
  478. LV1_CHECK(buf1,rle_v3,lv1,lp2)
  479. break;
  480. default:
  481. return;
  482. }
  483. }
  484. cur_frm_pos += 4;
  485. }
  486. cur_frm_pos += (((width * 2) - blks_width) * 4);
  487. flag1 = 0;
  488. }
  489. break;
  490. case 10: /********** CASE 10 **********/
  491. if(ref_vectors == NULL) {
  492. flag1 = 1;
  493. for( ; blks_height > 0; blks_height -= 8) {
  494. for(lp1 = 0; lp1 < blks_width; lp1 += 2) {
  495. for(lp2 = 0; lp2 < 4; ) {
  496. k = *buf1++;
  497. cur_lp = ((uint32_t *)cur_frm_pos) + width_tbl[lp2 * 2];
  498. ref_lp = ((uint32_t *)cur_frm_pos) + width_tbl[(lp2 * 2) - 1];
  499. lv1 = ref_lp[0];
  500. lv2 = ref_lp[1];
  501. if(lp2 == 0 && flag1 != 0) {
  502. #if HAVE_BIGENDIAN
  503. lv1 = lv1 & 0xFF00FF00;
  504. lv1 = (lv1 >> 8) | lv1;
  505. lv2 = lv2 & 0xFF00FF00;
  506. lv2 = (lv2 >> 8) | lv2;
  507. #else
  508. lv1 = lv1 & 0x00FF00FF;
  509. lv1 = (lv1 << 8) | lv1;
  510. lv2 = lv2 & 0x00FF00FF;
  511. lv2 = (lv2 << 8) | lv2;
  512. #endif
  513. }
  514. switch(correction_type_sp[lp2 & 0x01][k]) {
  515. case 0:
  516. cur_lp[width_tbl[1]] = av_le2ne32(((av_le2ne32(lv1) >> 1) + correctionloworder_lp[lp2 & 0x01][k]) << 1);
  517. cur_lp[width_tbl[1]+1] = av_le2ne32(((av_le2ne32(lv2) >> 1) + correctionhighorder_lp[lp2 & 0x01][k]) << 1);
  518. if(lp2 > 0 || strip->ypos != 0 || flag1 == 0) {
  519. cur_lp[0] = ((cur_lp[-width_tbl[1]] >> 1) + (cur_lp[width_tbl[1]] >> 1)) & 0xFEFEFEFE;
  520. cur_lp[1] = ((cur_lp[-width_tbl[1]+1] >> 1) + (cur_lp[width_tbl[1]+1] >> 1)) & 0xFEFEFEFE;
  521. } else {
  522. cur_lp[0] = cur_lp[width_tbl[1]];
  523. cur_lp[1] = cur_lp[width_tbl[1]+1];
  524. }
  525. lp2++;
  526. break;
  527. case 1:
  528. cur_lp[width_tbl[1]] = av_le2ne32(((av_le2ne32(lv1) >> 1) + correctionloworder_lp[lp2 & 0x01][*buf1]) << 1);
  529. cur_lp[width_tbl[1]+1] = av_le2ne32(((av_le2ne32(lv2) >> 1) + correctionloworder_lp[lp2 & 0x01][k]) << 1);
  530. if(lp2 > 0 || strip->ypos != 0 || flag1 == 0) {
  531. cur_lp[0] = ((cur_lp[-width_tbl[1]] >> 1) + (cur_lp[width_tbl[1]] >> 1)) & 0xFEFEFEFE;
  532. cur_lp[1] = ((cur_lp[-width_tbl[1]+1] >> 1) + (cur_lp[width_tbl[1]+1] >> 1)) & 0xFEFEFEFE;
  533. } else {
  534. cur_lp[0] = cur_lp[width_tbl[1]];
  535. cur_lp[1] = cur_lp[width_tbl[1]+1];
  536. }
  537. buf1++;
  538. lp2++;
  539. break;
  540. case 2:
  541. if(lp2 == 0) {
  542. if(flag1 != 0) {
  543. for(i = 0, j = width_tbl[1]; i < 3; i++, j += width_tbl[1]) {
  544. cur_lp[j] = lv1;
  545. cur_lp[j+1] = lv2;
  546. }
  547. cur_lp[0] = ((cur_lp[-width_tbl[1]] >> 1) + (cur_lp[width_tbl[1]] >> 1)) & 0xFEFEFEFE;
  548. cur_lp[1] = ((cur_lp[-width_tbl[1]+1] >> 1) + (cur_lp[width_tbl[1]+1] >> 1)) & 0xFEFEFEFE;
  549. } else {
  550. for(i = 0, j = 0; i < 4; i++, j += width_tbl[1]) {
  551. cur_lp[j] = lv1;
  552. cur_lp[j+1] = lv2;
  553. }
  554. }
  555. lp2 += 2;
  556. }
  557. break;
  558. case 3:
  559. if(lp2 < 2) {
  560. if(lp2 == 0 && flag1 != 0) {
  561. for(i = 0, j = width_tbl[1]; i < 5; i++, j += width_tbl[1]) {
  562. cur_lp[j] = lv1;
  563. cur_lp[j+1] = lv2;
  564. }
  565. cur_lp[0] = ((cur_lp[-width_tbl[1]] >> 1) + (cur_lp[width_tbl[1]] >> 1)) & 0xFEFEFEFE;
  566. cur_lp[1] = ((cur_lp[-width_tbl[1]+1] >> 1) + (cur_lp[width_tbl[1]+1] >> 1)) & 0xFEFEFEFE;
  567. } else {
  568. for(i = 0, j = 0; i < 6 - (lp2 * 2); i++, j += width_tbl[1]) {
  569. cur_lp[j] = lv1;
  570. cur_lp[j+1] = lv2;
  571. }
  572. }
  573. lp2 = 3;
  574. }
  575. break;
  576. case 8:
  577. if(lp2 == 0) {
  578. RLE_V3_CHECK(buf1,rle_v1,rle_v2,rle_v3)
  579. if(rle_v1 == 1) {
  580. if(flag1 != 0) {
  581. for(i = 0, j = width_tbl[1]; i < 7; i++, j += width_tbl[1]) {
  582. cur_lp[j] = lv1;
  583. cur_lp[j+1] = lv2;
  584. }
  585. cur_lp[0] = ((cur_lp[-width_tbl[1]] >> 1) + (cur_lp[width_tbl[1]] >> 1)) & 0xFEFEFEFE;
  586. cur_lp[1] = ((cur_lp[-width_tbl[1]+1] >> 1) + (cur_lp[width_tbl[1]+1] >> 1)) & 0xFEFEFEFE;
  587. } else {
  588. for(i = 0, j = 0; i < 8; i++, j += width_tbl[1]) {
  589. cur_lp[j] = lv1;
  590. cur_lp[j+1] = lv2;
  591. }
  592. }
  593. }
  594. RLE_V2_CHECK(buf1,rle_v2, rle_v3,lp2)
  595. break;
  596. } else {
  597. rle_v1 = 1;
  598. rle_v2 = (*buf1) - 1;
  599. }
  600. case 5:
  601. LP2_CHECK(buf1,rle_v3,lp2)
  602. case 4:
  603. if(lp2 == 0 && flag1 != 0) {
  604. for(i = 0, j = width_tbl[1]; i < 7; i++, j += width_tbl[1]) {
  605. cur_lp[j] = lv1;
  606. cur_lp[j+1] = lv2;
  607. }
  608. cur_lp[0] = ((cur_lp[-width_tbl[1]] >> 1) + (cur_lp[width_tbl[1]] >> 1)) & 0xFEFEFEFE;
  609. cur_lp[1] = ((cur_lp[-width_tbl[1]+1] >> 1) + (cur_lp[width_tbl[1]+1] >> 1)) & 0xFEFEFEFE;
  610. } else {
  611. for(i = 0, j = 0; i < 8 - (lp2 * 2); i++, j += width_tbl[1]) {
  612. cur_lp[j] = lv1;
  613. cur_lp[j+1] = lv2;
  614. }
  615. }
  616. lp2 = 4;
  617. break;
  618. case 6:
  619. lp2 = 4;
  620. break;
  621. case 7:
  622. if(lp2 == 0) {
  623. if(rle_v3 != 0)
  624. rle_v3 = 0;
  625. else {
  626. buf1--;
  627. rle_v3 = 1;
  628. }
  629. lp2 = 4;
  630. }
  631. break;
  632. case 9:
  633. av_log(s->avctx, AV_LOG_ERROR, "UNTESTED.\n");
  634. lv1 = *buf1;
  635. lv = (lv1 & 0x7F) << 1;
  636. lv += (lv << 8);
  637. lv += (lv << 16);
  638. for(i = 0, j = 0; i < 8; i++, j += width_tbl[1])
  639. cur_lp[j] = lv;
  640. LV1_CHECK(buf1,rle_v3,lv1,lp2)
  641. break;
  642. default:
  643. return;
  644. }
  645. }
  646. cur_frm_pos += 8;
  647. }
  648. cur_frm_pos += (((width * 2) - blks_width) * 4);
  649. flag1 = 0;
  650. }
  651. } else {
  652. for( ; blks_height > 0; blks_height -= 8) {
  653. for(lp1 = 0; lp1 < blks_width; lp1 += 2) {
  654. for(lp2 = 0; lp2 < 4; ) {
  655. k = *buf1++;
  656. cur_lp = ((uint32_t *)cur_frm_pos) + width_tbl[lp2 * 2];
  657. ref_lp = ((uint32_t *)ref_frm_pos) + width_tbl[lp2 * 2];
  658. switch(correction_type_sp[lp2 & 0x01][k]) {
  659. case 0:
  660. lv1 = correctionloworder_lp[lp2 & 0x01][k];
  661. lv2 = correctionhighorder_lp[lp2 & 0x01][k];
  662. cur_lp[0] = av_le2ne32(((av_le2ne32(ref_lp[0]) >> 1) + lv1) << 1);
  663. cur_lp[1] = av_le2ne32(((av_le2ne32(ref_lp[1]) >> 1) + lv2) << 1);
  664. cur_lp[width_tbl[1]] = av_le2ne32(((av_le2ne32(ref_lp[width_tbl[1]]) >> 1) + lv1) << 1);
  665. cur_lp[width_tbl[1]+1] = av_le2ne32(((av_le2ne32(ref_lp[width_tbl[1]+1]) >> 1) + lv2) << 1);
  666. lp2++;
  667. break;
  668. case 1:
  669. lv1 = correctionloworder_lp[lp2 & 0x01][*buf1++];
  670. lv2 = correctionloworder_lp[lp2 & 0x01][k];
  671. cur_lp[0] = av_le2ne32(((av_le2ne32(ref_lp[0]) >> 1) + lv1) << 1);
  672. cur_lp[1] = av_le2ne32(((av_le2ne32(ref_lp[1]) >> 1) + lv2) << 1);
  673. cur_lp[width_tbl[1]] = av_le2ne32(((av_le2ne32(ref_lp[width_tbl[1]]) >> 1) + lv1) << 1);
  674. cur_lp[width_tbl[1]+1] = av_le2ne32(((av_le2ne32(ref_lp[width_tbl[1]+1]) >> 1) + lv2) << 1);
  675. lp2++;
  676. break;
  677. case 2:
  678. if(lp2 == 0) {
  679. for(i = 0, j = 0; i < 4; i++, j += width_tbl[1]) {
  680. cur_lp[j] = ref_lp[j];
  681. cur_lp[j+1] = ref_lp[j+1];
  682. }
  683. lp2 += 2;
  684. }
  685. break;
  686. case 3:
  687. if(lp2 < 2) {
  688. for(i = 0, j = 0; i < 6 - (lp2 * 2); i++, j += width_tbl[1]) {
  689. cur_lp[j] = ref_lp[j];
  690. cur_lp[j+1] = ref_lp[j+1];
  691. }
  692. lp2 = 3;
  693. }
  694. break;
  695. case 8:
  696. if(lp2 == 0) {
  697. RLE_V3_CHECK(buf1,rle_v1,rle_v2,rle_v3)
  698. for(i = 0, j = 0; i < 8; i++, j += width_tbl[1]) {
  699. ((uint32_t *)cur_frm_pos)[j] = ((uint32_t *)ref_frm_pos)[j];
  700. ((uint32_t *)cur_frm_pos)[j+1] = ((uint32_t *)ref_frm_pos)[j+1];
  701. }
  702. RLE_V2_CHECK(buf1,rle_v2, rle_v3,lp2)
  703. break;
  704. } else {
  705. rle_v1 = 1;
  706. rle_v2 = (*buf1) - 1;
  707. }
  708. case 5:
  709. case 7:
  710. LP2_CHECK(buf1,rle_v3,lp2)
  711. case 6:
  712. case 4:
  713. for(i = 0, j = 0; i < 8 - (lp2 * 2); i++, j += width_tbl[1]) {
  714. cur_lp[j] = ref_lp[j];
  715. cur_lp[j+1] = ref_lp[j+1];
  716. }
  717. lp2 = 4;
  718. break;
  719. case 9:
  720. av_log(s->avctx, AV_LOG_ERROR, "UNTESTED.\n");
  721. lv1 = *buf1;
  722. lv = (lv1 & 0x7F) << 1;
  723. lv += (lv << 8);
  724. lv += (lv << 16);
  725. for(i = 0, j = 0; i < 8; i++, j += width_tbl[1])
  726. ((uint32_t *)cur_frm_pos)[j] = ((uint32_t *)cur_frm_pos)[j+1] = lv;
  727. LV1_CHECK(buf1,rle_v3,lv1,lp2)
  728. break;
  729. default:
  730. return;
  731. }
  732. }
  733. cur_frm_pos += 8;
  734. ref_frm_pos += 8;
  735. }
  736. cur_frm_pos += (((width * 2) - blks_width) * 4);
  737. ref_frm_pos += (((width * 2) - blks_width) * 4);
  738. }
  739. }
  740. break;
  741. case 11: /********** CASE 11 **********/
  742. if(ref_vectors == NULL)
  743. return;
  744. for( ; blks_height > 0; blks_height -= 8) {
  745. for(lp1 = 0; lp1 < blks_width; lp1++) {
  746. for(lp2 = 0; lp2 < 4; ) {
  747. k = *buf1++;
  748. cur_lp = ((uint32_t *)cur_frm_pos) + width_tbl[lp2 * 2];
  749. ref_lp = ((uint32_t *)ref_frm_pos) + width_tbl[lp2 * 2];
  750. switch(correction_type_sp[lp2 & 0x01][k]) {
  751. case 0:
  752. cur_lp[0] = av_le2ne32(((av_le2ne32(*ref_lp) >> 1) + correction_lp[lp2 & 0x01][k]) << 1);
  753. cur_lp[width_tbl[1]] = av_le2ne32(((av_le2ne32(ref_lp[width_tbl[1]]) >> 1) + correction_lp[lp2 & 0x01][k]) << 1);
  754. lp2++;
  755. break;
  756. case 1:
  757. lv1 = (unsigned short)(correction_lp[lp2 & 0x01][*buf1++]);
  758. lv2 = (unsigned short)(correction_lp[lp2 & 0x01][k]);
  759. res = (unsigned short)(((av_le2ne16(((unsigned short *)ref_lp)[0]) >> 1) + lv1) << 1);
  760. ((unsigned short *)cur_lp)[0] = av_le2ne16(res);
  761. res = (unsigned short)(((av_le2ne16(((unsigned short *)ref_lp)[1]) >> 1) + lv2) << 1);
  762. ((unsigned short *)cur_lp)[1] = av_le2ne16(res);
  763. res = (unsigned short)(((av_le2ne16(((unsigned short *)ref_lp)[width_tbl[2]]) >> 1) + lv1) << 1);
  764. ((unsigned short *)cur_lp)[width_tbl[2]] = av_le2ne16(res);
  765. res = (unsigned short)(((av_le2ne16(((unsigned short *)ref_lp)[width_tbl[2]+1]) >> 1) + lv2) << 1);
  766. ((unsigned short *)cur_lp)[width_tbl[2]+1] = av_le2ne16(res);
  767. lp2++;
  768. break;
  769. case 2:
  770. if(lp2 == 0) {
  771. for(i = 0, j = 0; i < 4; i++, j += width_tbl[1])
  772. cur_lp[j] = ref_lp[j];
  773. lp2 += 2;
  774. }
  775. break;
  776. case 3:
  777. if(lp2 < 2) {
  778. for(i = 0, j = 0; i < 6 - (lp2 * 2); i++, j += width_tbl[1])
  779. cur_lp[j] = ref_lp[j];
  780. lp2 = 3;
  781. }
  782. break;
  783. case 8:
  784. if(lp2 == 0) {
  785. RLE_V3_CHECK(buf1,rle_v1,rle_v2,rle_v3)
  786. for(i = 0, j = 0; i < 8; i++, j += width_tbl[1])
  787. cur_lp[j] = ref_lp[j];
  788. RLE_V2_CHECK(buf1,rle_v2, rle_v3,lp2)
  789. break;
  790. } else {
  791. rle_v1 = 1;
  792. rle_v2 = (*buf1) - 1;
  793. }
  794. case 5:
  795. case 7:
  796. LP2_CHECK(buf1,rle_v3,lp2)
  797. case 4:
  798. case 6:
  799. for(i = 0, j = 0; i < 8 - (lp2 * 2); i++, j += width_tbl[1])
  800. cur_lp[j] = ref_lp[j];
  801. lp2 = 4;
  802. break;
  803. case 9:
  804. av_log(s->avctx, AV_LOG_ERROR, "UNTESTED.\n");
  805. lv1 = *buf1++;
  806. lv = (lv1 & 0x7F) << 1;
  807. lv += (lv << 8);
  808. lv += (lv << 16);
  809. for(i = 0, j = 0; i < 4; i++, j += width_tbl[1])
  810. cur_lp[j] = lv;
  811. LV1_CHECK(buf1,rle_v3,lv1,lp2)
  812. break;
  813. default:
  814. return;
  815. }
  816. }
  817. cur_frm_pos += 4;
  818. ref_frm_pos += 4;
  819. }
  820. cur_frm_pos += (((width * 2) - blks_width) * 4);
  821. ref_frm_pos += (((width * 2) - blks_width) * 4);
  822. }
  823. break;
  824. default:
  825. return;
  826. }
  827. }
  828. for( ; strip >= strip_tbl; strip--) {
  829. if(strip->split_flag != 0) {
  830. strip->split_flag = 0;
  831. strip->usl7 = (strip-1)->usl7;
  832. if(strip->split_direction) {
  833. strip->xpos += strip->width;
  834. strip->width = (strip-1)->width - strip->width;
  835. if(region_160_width <= strip->xpos && width < strip->width + strip->xpos)
  836. strip->width = width - strip->xpos;
  837. } else {
  838. strip->ypos += strip->height;
  839. strip->height = (strip-1)->height - strip->height;
  840. }
  841. break;
  842. }
  843. }
  844. }
  845. }
  846. static av_cold int indeo3_decode_init(AVCodecContext *avctx)
  847. {
  848. Indeo3DecodeContext *s = avctx->priv_data;
  849. int ret = 0;
  850. s->avctx = avctx;
  851. s->width = avctx->width;
  852. s->height = avctx->height;
  853. avctx->pix_fmt = PIX_FMT_YUV410P;
  854. avcodec_get_frame_defaults(&s->frame);
  855. if (!(ret = build_modpred(s)))
  856. ret = iv_alloc_frames(s);
  857. if (ret)
  858. iv_free_func(s);
  859. return ret;
  860. }
  861. static int iv_decode_frame(AVCodecContext *avctx,
  862. const uint8_t *buf, int buf_size)
  863. {
  864. Indeo3DecodeContext *s = avctx->priv_data;
  865. unsigned int image_width, image_height,
  866. chroma_width, chroma_height;
  867. unsigned int flags, cb_offset, data_size,
  868. y_offset, v_offset, u_offset, mc_vector_count;
  869. const uint8_t *hdr_pos, *buf_pos;
  870. buf_pos = buf;
  871. buf_pos += 18; /* skip OS header (16 bytes) and version number */
  872. flags = bytestream_get_le16(&buf_pos);
  873. data_size = bytestream_get_le32(&buf_pos);
  874. cb_offset = *buf_pos++;
  875. buf_pos += 3; /* skip reserved byte and checksum */
  876. image_height = bytestream_get_le16(&buf_pos);
  877. image_width = bytestream_get_le16(&buf_pos);
  878. if(av_image_check_size(image_width, image_height, 0, avctx))
  879. return -1;
  880. if (image_width != avctx->width || image_height != avctx->height) {
  881. int ret;
  882. avcodec_set_dimensions(avctx, image_width, image_height);
  883. s->width = avctx->width;
  884. s->height = avctx->height;
  885. ret = iv_alloc_frames(s);
  886. if (ret < 0) {
  887. s->width = s->height = 0;
  888. return ret;
  889. }
  890. }
  891. chroma_height = ((image_height >> 2) + 3) & 0x7ffc;
  892. chroma_width = ((image_width >> 2) + 3) & 0x7ffc;
  893. y_offset = bytestream_get_le32(&buf_pos);
  894. v_offset = bytestream_get_le32(&buf_pos);
  895. u_offset = bytestream_get_le32(&buf_pos);
  896. buf_pos += 4; /* reserved */
  897. hdr_pos = buf_pos;
  898. if(data_size == 0x80) return 4;
  899. if(FFMAX3(y_offset, v_offset, u_offset) >= buf_size-16) {
  900. av_log(s->avctx, AV_LOG_ERROR, "y/u/v offset outside buffer\n");
  901. return -1;
  902. }
  903. if(flags & 0x200) {
  904. s->cur_frame = s->iv_frame + 1;
  905. s->ref_frame = s->iv_frame;
  906. } else {
  907. s->cur_frame = s->iv_frame;
  908. s->ref_frame = s->iv_frame + 1;
  909. }
  910. buf_pos = buf + 16 + y_offset;
  911. mc_vector_count = bytestream_get_le32(&buf_pos);
  912. if(2LL*mc_vector_count >= buf_size-16-y_offset) {
  913. av_log(s->avctx, AV_LOG_ERROR, "mc_vector_count too large\n");
  914. return -1;
  915. }
  916. iv_Decode_Chunk(s, s->cur_frame->Ybuf, s->ref_frame->Ybuf, image_width,
  917. image_height, buf_pos + mc_vector_count * 2, cb_offset, hdr_pos, buf_pos,
  918. FFMIN(image_width, 160));
  919. if (!(s->avctx->flags & CODEC_FLAG_GRAY))
  920. {
  921. buf_pos = buf + 16 + v_offset;
  922. mc_vector_count = bytestream_get_le32(&buf_pos);
  923. if(2LL*mc_vector_count >= buf_size-16-v_offset) {
  924. av_log(s->avctx, AV_LOG_ERROR, "mc_vector_count too large\n");
  925. return -1;
  926. }
  927. iv_Decode_Chunk(s, s->cur_frame->Vbuf, s->ref_frame->Vbuf, chroma_width,
  928. chroma_height, buf_pos + mc_vector_count * 2, cb_offset, hdr_pos, buf_pos,
  929. FFMIN(chroma_width, 40));
  930. buf_pos = buf + 16 + u_offset;
  931. mc_vector_count = bytestream_get_le32(&buf_pos);
  932. if(2LL*mc_vector_count >= buf_size-16-u_offset) {
  933. av_log(s->avctx, AV_LOG_ERROR, "mc_vector_count too large\n");
  934. return -1;
  935. }
  936. iv_Decode_Chunk(s, s->cur_frame->Ubuf, s->ref_frame->Ubuf, chroma_width,
  937. chroma_height, buf_pos + mc_vector_count * 2, cb_offset, hdr_pos, buf_pos,
  938. FFMIN(chroma_width, 40));
  939. }
  940. return 8;
  941. }
  942. static int indeo3_decode_frame(AVCodecContext *avctx,
  943. void *data, int *data_size,
  944. AVPacket *avpkt)
  945. {
  946. const uint8_t *buf = avpkt->data;
  947. int buf_size = avpkt->size;
  948. Indeo3DecodeContext *s=avctx->priv_data;
  949. uint8_t *src, *dest;
  950. int y;
  951. if (iv_decode_frame(avctx, buf, buf_size) < 0)
  952. return -1;
  953. if(s->frame.data[0])
  954. avctx->release_buffer(avctx, &s->frame);
  955. s->frame.reference = 0;
  956. if(avctx->get_buffer(avctx, &s->frame) < 0) {
  957. av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed\n");
  958. return -1;
  959. }
  960. src = s->cur_frame->Ybuf;
  961. dest = s->frame.data[0];
  962. for (y = 0; y < s->height; y++) {
  963. memcpy(dest, src, s->cur_frame->y_w);
  964. src += s->cur_frame->y_w;
  965. dest += s->frame.linesize[0];
  966. }
  967. if (!(s->avctx->flags & CODEC_FLAG_GRAY))
  968. {
  969. src = s->cur_frame->Ubuf;
  970. dest = s->frame.data[1];
  971. for (y = 0; y < s->height / 4; y++) {
  972. memcpy(dest, src, s->cur_frame->uv_w);
  973. src += s->cur_frame->uv_w;
  974. dest += s->frame.linesize[1];
  975. }
  976. src = s->cur_frame->Vbuf;
  977. dest = s->frame.data[2];
  978. for (y = 0; y < s->height / 4; y++) {
  979. memcpy(dest, src, s->cur_frame->uv_w);
  980. src += s->cur_frame->uv_w;
  981. dest += s->frame.linesize[2];
  982. }
  983. }
  984. *data_size=sizeof(AVFrame);
  985. *(AVFrame*)data= s->frame;
  986. return buf_size;
  987. }
  988. static av_cold int indeo3_decode_end(AVCodecContext *avctx)
  989. {
  990. Indeo3DecodeContext *s = avctx->priv_data;
  991. iv_free_func(s);
  992. return 0;
  993. }
  994. AVCodec ff_indeo3_decoder = {
  995. "indeo3",
  996. AVMEDIA_TYPE_VIDEO,
  997. CODEC_ID_INDEO3,
  998. sizeof(Indeo3DecodeContext),
  999. indeo3_decode_init,
  1000. NULL,
  1001. indeo3_decode_end,
  1002. indeo3_decode_frame,
  1003. CODEC_CAP_DR1,
  1004. NULL,
  1005. .long_name = NULL_IF_CONFIG_SMALL("Intel Indeo 3"),
  1006. };