You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

924 lines
28KB

  1. /*
  2. * Duck TrueMotion 1.0 Decoder
  3. * Copyright (C) 2003 Alex Beregszaszi & Mike Melanson
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. /**
  22. * @file truemotion1.c
  23. * Duck TrueMotion v1 Video Decoder by
  24. * Alex Beregszaszi (alex@fsn.hu) and
  25. * Mike Melanson (melanson@pcisys.net)
  26. *
  27. * The TrueMotion v1 decoder presently only decodes 16-bit TM1 data and
  28. * outputs RGB555 (or RGB565) data. 24-bit TM1 data is not supported yet.
  29. */
  30. #include <stdio.h>
  31. #include <stdlib.h>
  32. #include <string.h>
  33. #include <unistd.h>
  34. #include "common.h"
  35. #include "avcodec.h"
  36. #include "dsputil.h"
  37. #include "truemotion1data.h"
  38. typedef struct TrueMotion1Context {
  39. AVCodecContext *avctx;
  40. AVFrame frame;
  41. AVFrame prev_frame;
  42. uint8_t *buf;
  43. int size;
  44. uint8_t *mb_change_bits;
  45. int mb_change_bits_row_size;
  46. uint8_t *index_stream;
  47. int index_stream_size;
  48. int flags;
  49. int x, y, w, h;
  50. uint32_t y_predictor_table[1024];
  51. uint32_t c_predictor_table[1024];
  52. uint32_t fat_y_predictor_table[1024];
  53. uint32_t fat_c_predictor_table[1024];
  54. int compression;
  55. int block_type;
  56. int block_width;
  57. int block_height;
  58. int16_t ydt[8];
  59. int16_t cdt[8];
  60. int16_t fat_ydt[8];
  61. int16_t fat_cdt[8];
  62. int last_deltaset, last_vectable;
  63. unsigned int *vert_pred;
  64. } TrueMotion1Context;
  65. #define FLAG_SPRITE 32
  66. #define FLAG_KEYFRAME 16
  67. #define FLAG_INTERFRAME 8
  68. #define FLAG_INTERPOLATED 4
  69. struct frame_header {
  70. uint8_t header_size;
  71. uint8_t compression;
  72. uint8_t deltaset;
  73. uint8_t vectable;
  74. uint16_t ysize;
  75. uint16_t xsize;
  76. uint16_t checksum;
  77. uint8_t version;
  78. uint8_t header_type;
  79. uint8_t flags;
  80. uint8_t control;
  81. uint16_t xoffset;
  82. uint16_t yoffset;
  83. uint16_t width;
  84. uint16_t height;
  85. };
  86. #define ALGO_NOP 0
  87. #define ALGO_RGB16V 1
  88. #define ALGO_RGB16H 2
  89. #define ALGO_RGB24H 3
  90. /* these are the various block sizes that can occupy a 4x4 block */
  91. #define BLOCK_2x2 0
  92. #define BLOCK_2x4 1
  93. #define BLOCK_4x2 2
  94. #define BLOCK_4x4 3
  95. typedef struct comp_types {
  96. int algorithm;
  97. int block_width; // vres
  98. int block_height; // hres
  99. int block_type;
  100. } comp_types;
  101. /* { valid for metatype }, algorithm, num of deltas, vert res, horiz res */
  102. static comp_types compression_types[17] = {
  103. { ALGO_NOP, 0, 0, 0 },
  104. { ALGO_RGB16V, 4, 4, BLOCK_4x4 },
  105. { ALGO_RGB16H, 4, 4, BLOCK_4x4 },
  106. { ALGO_RGB16V, 4, 2, BLOCK_4x2 },
  107. { ALGO_RGB16H, 4, 2, BLOCK_4x2 },
  108. { ALGO_RGB16V, 2, 4, BLOCK_2x4 },
  109. { ALGO_RGB16H, 2, 4, BLOCK_2x4 },
  110. { ALGO_RGB16V, 2, 2, BLOCK_2x2 },
  111. { ALGO_RGB16H, 2, 2, BLOCK_2x2 },
  112. { ALGO_NOP, 4, 4, BLOCK_4x4 },
  113. { ALGO_RGB24H, 4, 4, BLOCK_4x4 },
  114. { ALGO_NOP, 4, 2, BLOCK_4x2 },
  115. { ALGO_RGB24H, 4, 2, BLOCK_4x2 },
  116. { ALGO_NOP, 2, 4, BLOCK_2x4 },
  117. { ALGO_RGB24H, 2, 4, BLOCK_2x4 },
  118. { ALGO_NOP, 2, 2, BLOCK_2x2 },
  119. { ALGO_RGB24H, 2, 2, BLOCK_2x2 }
  120. };
  121. static void select_delta_tables(TrueMotion1Context *s, int delta_table_index)
  122. {
  123. int i;
  124. if (delta_table_index > 3)
  125. return;
  126. memcpy(s->ydt, ydts[delta_table_index], 8 * sizeof(int16_t));
  127. memcpy(s->cdt, cdts[delta_table_index], 8 * sizeof(int16_t));
  128. memcpy(s->fat_ydt, fat_ydts[delta_table_index], 8 * sizeof(int16_t));
  129. memcpy(s->fat_cdt, fat_cdts[delta_table_index], 8 * sizeof(int16_t));
  130. /* Y skinny deltas need to be halved for some reason; maybe the
  131. * skinny Y deltas should be modified */
  132. for (i = 0; i < 8; i++)
  133. {
  134. /* drop the lsb before dividing by 2-- net effect: round down
  135. * when dividing a negative number (e.g., -3/2 = -2, not -1) */
  136. s->ydt[i] &= 0xFFFE;
  137. s->ydt[i] /= 2;
  138. }
  139. }
  140. #ifdef WORDS_BIGENDIAN
  141. static int make_ydt15_entry(int p2, int p1, int16_t *ydt)
  142. #else
  143. static int make_ydt15_entry(int p1, int p2, int16_t *ydt)
  144. #endif
  145. {
  146. int lo, hi;
  147. lo = ydt[p1];
  148. lo += (lo << 5) + (lo << 10);
  149. hi = ydt[p2];
  150. hi += (hi << 5) + (hi << 10);
  151. return ((lo + (hi << 16)) << 1);
  152. }
  153. #ifdef WORDS_BIGENDIAN
  154. static int make_cdt15_entry(int p2, int p1, int16_t *cdt)
  155. #else
  156. static int make_cdt15_entry(int p1, int p2, int16_t *cdt)
  157. #endif
  158. {
  159. int r, b, lo;
  160. b = cdt[p2];
  161. r = cdt[p1] << 10;
  162. lo = b + r;
  163. return ((lo + (lo << 16)) << 1);
  164. }
  165. #ifdef WORDS_BIGENDIAN
  166. static int make_ydt16_entry(int p2, int p1, int16_t *ydt)
  167. #else
  168. static int make_ydt16_entry(int p1, int p2, int16_t *ydt)
  169. #endif
  170. {
  171. int lo, hi;
  172. lo = ydt[p1];
  173. lo += (lo << 6) + (lo << 11);
  174. hi = ydt[p2];
  175. hi += (hi << 6) + (hi << 11);
  176. return ((lo + (hi << 16)) << 1);
  177. }
  178. #ifdef WORDS_BIGENDIAN
  179. static int make_cdt16_entry(int p2, int p1, int16_t *cdt)
  180. #else
  181. static int make_cdt16_entry(int p1, int p2, int16_t *cdt)
  182. #endif
  183. {
  184. int r, b, lo;
  185. b = cdt[p2];
  186. r = cdt[p1] << 11;
  187. lo = b + r;
  188. return ((lo + (lo << 16)) << 1);
  189. }
  190. #ifdef WORDS_BIGENDIAN
  191. static int make_ydt24_entry(int p2, int p1, int16_t *ydt)
  192. #else
  193. static int make_ydt24_entry(int p1, int p2, int16_t *ydt)
  194. #endif
  195. {
  196. int lo, hi;
  197. lo = ydt[p1];
  198. hi = ydt[p2];
  199. return ((lo + (hi << 8) + (hi << 16)) << 1);
  200. }
  201. #ifdef WORDS_BIGENDIAN
  202. static int make_cdt24_entry(int p2, int p1, int16_t *cdt)
  203. #else
  204. static int make_cdt24_entry(int p1, int p2, int16_t *cdt)
  205. #endif
  206. {
  207. int r, b;
  208. b = cdt[p2];
  209. r = cdt[p1]<<16;
  210. return ((b+r) << 1);
  211. }
  212. static void gen_vector_table15(TrueMotion1Context *s, const uint8_t *sel_vector_table)
  213. {
  214. int len, i, j;
  215. unsigned char delta_pair;
  216. for (i = 0; i < 1024; i += 4)
  217. {
  218. len = *sel_vector_table++ / 2;
  219. for (j = 0; j < len; j++)
  220. {
  221. delta_pair = *sel_vector_table++;
  222. s->y_predictor_table[i+j] = 0xfffffffe &
  223. make_ydt15_entry(delta_pair >> 4, delta_pair & 0xf, s->ydt);
  224. s->c_predictor_table[i+j] = 0xfffffffe &
  225. make_cdt15_entry(delta_pair >> 4, delta_pair & 0xf, s->cdt);
  226. }
  227. s->y_predictor_table[i+(j-1)] |= 1;
  228. s->c_predictor_table[i+(j-1)] |= 1;
  229. }
  230. }
  231. static void gen_vector_table16(TrueMotion1Context *s, const uint8_t *sel_vector_table)
  232. {
  233. int len, i, j;
  234. unsigned char delta_pair;
  235. for (i = 0; i < 1024; i += 4)
  236. {
  237. len = *sel_vector_table++ / 2;
  238. for (j = 0; j < len; j++)
  239. {
  240. delta_pair = *sel_vector_table++;
  241. s->y_predictor_table[i+j] = 0xfffffffe &
  242. make_ydt16_entry(delta_pair >> 4, delta_pair & 0xf, s->ydt);
  243. s->c_predictor_table[i+j] = 0xfffffffe &
  244. make_cdt16_entry(delta_pair >> 4, delta_pair & 0xf, s->cdt);
  245. }
  246. s->y_predictor_table[i+(j-1)] |= 1;
  247. s->c_predictor_table[i+(j-1)] |= 1;
  248. }
  249. }
  250. static void gen_vector_table24(TrueMotion1Context *s, const uint8_t *sel_vector_table)
  251. {
  252. int len, i, j;
  253. unsigned char delta_pair;
  254. for (i = 0; i < 1024; i += 4)
  255. {
  256. len = *sel_vector_table++ / 2;
  257. for (j = 0; j < len; j++)
  258. {
  259. delta_pair = *sel_vector_table++;
  260. s->y_predictor_table[i+j] = 0xfffffffe &
  261. make_ydt24_entry(delta_pair >> 4, delta_pair & 0xf, s->ydt);
  262. s->c_predictor_table[i+j] = 0xfffffffe &
  263. make_cdt24_entry(delta_pair >> 4, delta_pair & 0xf, s->cdt);
  264. s->fat_y_predictor_table[i+j] = 0xfffffffe &
  265. make_ydt24_entry(delta_pair >> 4, delta_pair & 0xf, s->fat_ydt);
  266. s->fat_c_predictor_table[i+j] = 0xfffffffe &
  267. make_cdt24_entry(delta_pair >> 4, delta_pair & 0xf, s->fat_cdt);
  268. }
  269. s->y_predictor_table[i+(j-1)] |= 1;
  270. s->c_predictor_table[i+(j-1)] |= 1;
  271. s->fat_y_predictor_table[i+(j-1)] |= 1;
  272. s->fat_c_predictor_table[i+(j-1)] |= 1;
  273. }
  274. }
  275. /* Returns the number of bytes consumed from the bytestream. Returns -1 if
  276. * there was an error while decoding the header */
  277. static int truemotion1_decode_header(TrueMotion1Context *s)
  278. {
  279. int i;
  280. struct frame_header header;
  281. uint8_t header_buffer[128]; /* logical maximum size of the header */
  282. const uint8_t *sel_vector_table;
  283. /* There is 1 change bit per 4 pixels, so each change byte represents
  284. * 32 pixels; divide width by 4 to obtain the number of change bits and
  285. * then round up to the nearest byte. */
  286. s->mb_change_bits_row_size = ((s->avctx->width >> 2) + 7) >> 3;
  287. header.header_size = ((s->buf[0] >> 5) | (s->buf[0] << 3)) & 0x7f;
  288. if (s->buf[0] < 0x10)
  289. {
  290. av_log(s->avctx, AV_LOG_ERROR, "invalid header size (%d)\n", s->buf[0]);
  291. return -1;
  292. }
  293. /* unscramble the header bytes with a XOR operation */
  294. memset(header_buffer, 0, 128);
  295. for (i = 1; i < header.header_size; i++)
  296. header_buffer[i - 1] = s->buf[i] ^ s->buf[i + 1];
  297. header.compression = header_buffer[0];
  298. header.deltaset = header_buffer[1];
  299. header.vectable = header_buffer[2];
  300. header.ysize = AV_RL16(&header_buffer[3]);
  301. header.xsize = AV_RL16(&header_buffer[5]);
  302. header.checksum = AV_RL16(&header_buffer[7]);
  303. header.version = header_buffer[9];
  304. header.header_type = header_buffer[10];
  305. header.flags = header_buffer[11];
  306. header.control = header_buffer[12];
  307. /* Version 2 */
  308. if (header.version >= 2)
  309. {
  310. if (header.header_type > 3)
  311. {
  312. av_log(s->avctx, AV_LOG_ERROR, "invalid header type (%d)\n", header.header_type);
  313. return -1;
  314. } else if ((header.header_type == 2) || (header.header_type == 3)) {
  315. s->flags = header.flags;
  316. if (!(s->flags & FLAG_INTERFRAME))
  317. s->flags |= FLAG_KEYFRAME;
  318. } else
  319. s->flags = FLAG_KEYFRAME;
  320. } else /* Version 1 */
  321. s->flags = FLAG_KEYFRAME;
  322. if (s->flags & FLAG_SPRITE) {
  323. av_log(s->avctx, AV_LOG_INFO, "SPRITE frame found, please report the sample to the developers\n");
  324. s->w = header.width;
  325. s->h = header.height;
  326. s->x = header.xoffset;
  327. s->y = header.yoffset;
  328. } else {
  329. s->w = header.xsize;
  330. s->h = header.ysize;
  331. if (header.header_type < 2) {
  332. if ((s->w < 213) && (s->h >= 176))
  333. {
  334. s->flags |= FLAG_INTERPOLATED;
  335. av_log(s->avctx, AV_LOG_INFO, "INTERPOLATION selected, please report the sample to the developers\n");
  336. }
  337. }
  338. }
  339. if (header.compression > 17) {
  340. av_log(s->avctx, AV_LOG_ERROR, "invalid compression type (%d)\n", header.compression);
  341. return -1;
  342. }
  343. if ((header.deltaset != s->last_deltaset) ||
  344. (header.vectable != s->last_vectable))
  345. select_delta_tables(s, header.deltaset);
  346. if ((header.compression & 1) && header.header_type)
  347. sel_vector_table = pc_tbl2;
  348. else {
  349. if (header.vectable < 4)
  350. sel_vector_table = tables[header.vectable - 1];
  351. else {
  352. av_log(s->avctx, AV_LOG_ERROR, "invalid vector table id (%d)\n", header.vectable);
  353. return -1;
  354. }
  355. }
  356. // FIXME: where to place this ?!?!
  357. if (compression_types[header.compression].algorithm == ALGO_RGB24H)
  358. s->avctx->pix_fmt = PIX_FMT_RGBA32;
  359. else
  360. s->avctx->pix_fmt = PIX_FMT_RGB555; // RGB565 is supported aswell
  361. if ((header.deltaset != s->last_deltaset) || (header.vectable != s->last_vectable))
  362. {
  363. if (compression_types[header.compression].algorithm == ALGO_RGB24H)
  364. gen_vector_table24(s, sel_vector_table);
  365. else
  366. if (s->avctx->pix_fmt == PIX_FMT_RGB555)
  367. gen_vector_table15(s, sel_vector_table);
  368. else
  369. gen_vector_table16(s, sel_vector_table);
  370. }
  371. /* set up pointers to the other key data chunks */
  372. s->mb_change_bits = s->buf + header.header_size;
  373. if (s->flags & FLAG_KEYFRAME) {
  374. /* no change bits specified for a keyframe; only index bytes */
  375. s->index_stream = s->mb_change_bits;
  376. } else {
  377. /* one change bit per 4x4 block */
  378. s->index_stream = s->mb_change_bits +
  379. (s->mb_change_bits_row_size * (s->avctx->height >> 2));
  380. }
  381. s->index_stream_size = s->size - (s->index_stream - s->buf);
  382. s->last_deltaset = header.deltaset;
  383. s->last_vectable = header.vectable;
  384. s->compression = header.compression;
  385. s->block_width = compression_types[header.compression].block_width;
  386. s->block_height = compression_types[header.compression].block_height;
  387. s->block_type = compression_types[header.compression].block_type;
  388. if (s->avctx->debug & FF_DEBUG_PICT_INFO)
  389. av_log(s->avctx, AV_LOG_INFO, "tables: %d / %d c:%d %dx%d t:%d %s%s%s%s\n",
  390. s->last_deltaset, s->last_vectable, s->compression, s->block_width,
  391. s->block_height, s->block_type,
  392. s->flags & FLAG_KEYFRAME ? " KEY" : "",
  393. s->flags & FLAG_INTERFRAME ? " INTER" : "",
  394. s->flags & FLAG_SPRITE ? " SPRITE" : "",
  395. s->flags & FLAG_INTERPOLATED ? " INTERPOL" : "");
  396. return header.header_size;
  397. }
  398. static int truemotion1_decode_init(AVCodecContext *avctx)
  399. {
  400. TrueMotion1Context *s = (TrueMotion1Context *)avctx->priv_data;
  401. s->avctx = avctx;
  402. // FIXME: it may change ?
  403. // if (avctx->bits_per_sample == 24)
  404. // avctx->pix_fmt = PIX_FMT_RGB24;
  405. // else
  406. // avctx->pix_fmt = PIX_FMT_RGB555;
  407. avctx->has_b_frames = 0;
  408. s->frame.data[0] = s->prev_frame.data[0] = NULL;
  409. /* there is a vertical predictor for each pixel in a line; each vertical
  410. * predictor is 0 to start with */
  411. s->vert_pred =
  412. (unsigned int *)av_malloc(s->avctx->width * sizeof(unsigned int));
  413. return 0;
  414. }
  415. /*
  416. Block decoding order:
  417. dxi: Y-Y
  418. dxic: Y-C-Y
  419. dxic2: Y-C-Y-C
  420. hres,vres,i,i%vres (0 < i < 4)
  421. 2x2 0: 0 dxic2
  422. 2x2 1: 1 dxi
  423. 2x2 2: 0 dxic2
  424. 2x2 3: 1 dxi
  425. 2x4 0: 0 dxic2
  426. 2x4 1: 1 dxi
  427. 2x4 2: 2 dxi
  428. 2x4 3: 3 dxi
  429. 4x2 0: 0 dxic
  430. 4x2 1: 1 dxi
  431. 4x2 2: 0 dxic
  432. 4x2 3: 1 dxi
  433. 4x4 0: 0 dxic
  434. 4x4 1: 1 dxi
  435. 4x4 2: 2 dxi
  436. 4x4 3: 3 dxi
  437. */
  438. #define GET_NEXT_INDEX() \
  439. {\
  440. if (index_stream_index >= s->index_stream_size) { \
  441. av_log(s->avctx, AV_LOG_INFO, " help! truemotion1 decoder went out of bounds\n"); \
  442. return; \
  443. } \
  444. index = s->index_stream[index_stream_index++] * 4; \
  445. }
  446. #define APPLY_C_PREDICTOR() \
  447. predictor_pair = s->c_predictor_table[index]; \
  448. horiz_pred += (predictor_pair >> 1); \
  449. if (predictor_pair & 1) { \
  450. GET_NEXT_INDEX() \
  451. if (!index) { \
  452. GET_NEXT_INDEX() \
  453. predictor_pair = s->c_predictor_table[index]; \
  454. horiz_pred += ((predictor_pair >> 1) * 5); \
  455. if (predictor_pair & 1) \
  456. GET_NEXT_INDEX() \
  457. else \
  458. index++; \
  459. } \
  460. } else \
  461. index++;
  462. #define APPLY_C_PREDICTOR_24() \
  463. predictor_pair = s->c_predictor_table[index]; \
  464. horiz_pred += (predictor_pair >> 1); \
  465. if (predictor_pair & 1) { \
  466. GET_NEXT_INDEX() \
  467. if (!index) { \
  468. GET_NEXT_INDEX() \
  469. predictor_pair = s->fat_c_predictor_table[index]; \
  470. horiz_pred += (predictor_pair >> 1); \
  471. if (predictor_pair & 1) \
  472. GET_NEXT_INDEX() \
  473. else \
  474. index++; \
  475. } \
  476. } else \
  477. index++;
  478. #define APPLY_Y_PREDICTOR() \
  479. predictor_pair = s->y_predictor_table[index]; \
  480. horiz_pred += (predictor_pair >> 1); \
  481. if (predictor_pair & 1) { \
  482. GET_NEXT_INDEX() \
  483. if (!index) { \
  484. GET_NEXT_INDEX() \
  485. predictor_pair = s->y_predictor_table[index]; \
  486. horiz_pred += ((predictor_pair >> 1) * 5); \
  487. if (predictor_pair & 1) \
  488. GET_NEXT_INDEX() \
  489. else \
  490. index++; \
  491. } \
  492. } else \
  493. index++;
  494. #define APPLY_Y_PREDICTOR_24() \
  495. predictor_pair = s->y_predictor_table[index]; \
  496. horiz_pred += (predictor_pair >> 1); \
  497. if (predictor_pair & 1) { \
  498. GET_NEXT_INDEX() \
  499. if (!index) { \
  500. GET_NEXT_INDEX() \
  501. predictor_pair = s->fat_y_predictor_table[index]; \
  502. horiz_pred += (predictor_pair >> 1); \
  503. if (predictor_pair & 1) \
  504. GET_NEXT_INDEX() \
  505. else \
  506. index++; \
  507. } \
  508. } else \
  509. index++;
  510. #define OUTPUT_PIXEL_PAIR() \
  511. *current_pixel_pair = *vert_pred + horiz_pred; \
  512. *vert_pred++ = *current_pixel_pair++; \
  513. prev_pixel_pair++;
  514. static void truemotion1_decode_16bit(TrueMotion1Context *s)
  515. {
  516. int y;
  517. int pixels_left; /* remaining pixels on this line */
  518. unsigned int predictor_pair;
  519. unsigned int horiz_pred;
  520. unsigned int *vert_pred;
  521. unsigned int *current_pixel_pair;
  522. unsigned int *prev_pixel_pair;
  523. unsigned char *current_line = s->frame.data[0];
  524. unsigned char *prev_line = s->prev_frame.data[0];
  525. int keyframe = s->flags & FLAG_KEYFRAME;
  526. /* these variables are for managing the stream of macroblock change bits */
  527. unsigned char *mb_change_bits = s->mb_change_bits;
  528. unsigned char mb_change_byte;
  529. unsigned char mb_change_byte_mask;
  530. int mb_change_index;
  531. /* these variables are for managing the main index stream */
  532. int index_stream_index = 0; /* yes, the index into the index stream */
  533. int index;
  534. /* clean out the line buffer */
  535. memset(s->vert_pred, 0, s->avctx->width * sizeof(unsigned int));
  536. GET_NEXT_INDEX();
  537. for (y = 0; y < s->avctx->height; y++) {
  538. /* re-init variables for the next line iteration */
  539. horiz_pred = 0;
  540. current_pixel_pair = (unsigned int *)current_line;
  541. prev_pixel_pair = (unsigned int *)prev_line;
  542. vert_pred = s->vert_pred;
  543. mb_change_index = 0;
  544. mb_change_byte = mb_change_bits[mb_change_index++];
  545. mb_change_byte_mask = 0x01;
  546. pixels_left = s->avctx->width;
  547. while (pixels_left > 0) {
  548. if (keyframe || ((mb_change_byte & mb_change_byte_mask) == 0)) {
  549. switch (y & 3) {
  550. case 0:
  551. /* if macroblock width is 2, apply C-Y-C-Y; else
  552. * apply C-Y-Y */
  553. if (s->block_width == 2) {
  554. APPLY_C_PREDICTOR();
  555. APPLY_Y_PREDICTOR();
  556. OUTPUT_PIXEL_PAIR();
  557. APPLY_C_PREDICTOR();
  558. APPLY_Y_PREDICTOR();
  559. OUTPUT_PIXEL_PAIR();
  560. } else {
  561. APPLY_C_PREDICTOR();
  562. APPLY_Y_PREDICTOR();
  563. OUTPUT_PIXEL_PAIR();
  564. APPLY_Y_PREDICTOR();
  565. OUTPUT_PIXEL_PAIR();
  566. }
  567. break;
  568. case 1:
  569. case 3:
  570. /* always apply 2 Y predictors on these iterations */
  571. APPLY_Y_PREDICTOR();
  572. OUTPUT_PIXEL_PAIR();
  573. APPLY_Y_PREDICTOR();
  574. OUTPUT_PIXEL_PAIR();
  575. break;
  576. case 2:
  577. /* this iteration might be C-Y-C-Y, Y-Y, or C-Y-Y
  578. * depending on the macroblock type */
  579. if (s->block_type == BLOCK_2x2) {
  580. APPLY_C_PREDICTOR();
  581. APPLY_Y_PREDICTOR();
  582. OUTPUT_PIXEL_PAIR();
  583. APPLY_C_PREDICTOR();
  584. APPLY_Y_PREDICTOR();
  585. OUTPUT_PIXEL_PAIR();
  586. } else if (s->block_type == BLOCK_4x2) {
  587. APPLY_C_PREDICTOR();
  588. APPLY_Y_PREDICTOR();
  589. OUTPUT_PIXEL_PAIR();
  590. APPLY_Y_PREDICTOR();
  591. OUTPUT_PIXEL_PAIR();
  592. } else {
  593. APPLY_Y_PREDICTOR();
  594. OUTPUT_PIXEL_PAIR();
  595. APPLY_Y_PREDICTOR();
  596. OUTPUT_PIXEL_PAIR();
  597. }
  598. break;
  599. }
  600. } else {
  601. /* skip (copy) four pixels, but reassign the horizontal
  602. * predictor */
  603. *current_pixel_pair = *prev_pixel_pair++;
  604. *vert_pred++ = *current_pixel_pair++;
  605. *current_pixel_pair = *prev_pixel_pair++;
  606. horiz_pred = *current_pixel_pair - *vert_pred;
  607. *vert_pred++ = *current_pixel_pair++;
  608. }
  609. if (!keyframe) {
  610. mb_change_byte_mask <<= 1;
  611. /* next byte */
  612. if (!mb_change_byte_mask) {
  613. mb_change_byte = mb_change_bits[mb_change_index++];
  614. mb_change_byte_mask = 0x01;
  615. }
  616. }
  617. pixels_left -= 4;
  618. }
  619. /* next change row */
  620. if (((y + 1) & 3) == 0)
  621. mb_change_bits += s->mb_change_bits_row_size;
  622. current_line += s->frame.linesize[0];
  623. prev_line += s->prev_frame.linesize[0];
  624. }
  625. }
  626. static void truemotion1_decode_24bit(TrueMotion1Context *s)
  627. {
  628. int y;
  629. int pixels_left; /* remaining pixels on this line */
  630. unsigned int predictor_pair;
  631. unsigned int horiz_pred;
  632. unsigned int *vert_pred;
  633. unsigned int *current_pixel_pair;
  634. unsigned int *prev_pixel_pair;
  635. unsigned char *current_line = s->frame.data[0];
  636. unsigned char *prev_line = s->prev_frame.data[0];
  637. int keyframe = s->flags & FLAG_KEYFRAME;
  638. /* these variables are for managing the stream of macroblock change bits */
  639. unsigned char *mb_change_bits = s->mb_change_bits;
  640. unsigned char mb_change_byte;
  641. unsigned char mb_change_byte_mask;
  642. int mb_change_index;
  643. /* these variables are for managing the main index stream */
  644. int index_stream_index = 0; /* yes, the index into the index stream */
  645. int index;
  646. /* clean out the line buffer */
  647. memset(s->vert_pred, 0, s->avctx->width * sizeof(unsigned int));
  648. GET_NEXT_INDEX();
  649. for (y = 0; y < s->avctx->height; y++) {
  650. /* re-init variables for the next line iteration */
  651. horiz_pred = 0;
  652. current_pixel_pair = (unsigned int *)current_line;
  653. prev_pixel_pair = (unsigned int *)prev_line;
  654. vert_pred = s->vert_pred;
  655. mb_change_index = 0;
  656. mb_change_byte = mb_change_bits[mb_change_index++];
  657. mb_change_byte_mask = 0x01;
  658. pixels_left = s->avctx->width;
  659. while (pixels_left > 0) {
  660. if (keyframe || ((mb_change_byte & mb_change_byte_mask) == 0)) {
  661. switch (y & 3) {
  662. case 0:
  663. /* if macroblock width is 2, apply C-Y-C-Y; else
  664. * apply C-Y-Y */
  665. if (s->block_width == 2) {
  666. APPLY_C_PREDICTOR_24();
  667. APPLY_Y_PREDICTOR_24();
  668. OUTPUT_PIXEL_PAIR();
  669. APPLY_C_PREDICTOR_24();
  670. APPLY_Y_PREDICTOR_24();
  671. OUTPUT_PIXEL_PAIR();
  672. } else {
  673. APPLY_C_PREDICTOR_24();
  674. APPLY_Y_PREDICTOR_24();
  675. OUTPUT_PIXEL_PAIR();
  676. APPLY_Y_PREDICTOR_24();
  677. OUTPUT_PIXEL_PAIR();
  678. }
  679. break;
  680. case 1:
  681. case 3:
  682. /* always apply 2 Y predictors on these iterations */
  683. APPLY_Y_PREDICTOR_24();
  684. OUTPUT_PIXEL_PAIR();
  685. APPLY_Y_PREDICTOR_24();
  686. OUTPUT_PIXEL_PAIR();
  687. break;
  688. case 2:
  689. /* this iteration might be C-Y-C-Y, Y-Y, or C-Y-Y
  690. * depending on the macroblock type */
  691. if (s->block_type == BLOCK_2x2) {
  692. APPLY_C_PREDICTOR_24();
  693. APPLY_Y_PREDICTOR_24();
  694. OUTPUT_PIXEL_PAIR();
  695. APPLY_C_PREDICTOR_24();
  696. APPLY_Y_PREDICTOR_24();
  697. OUTPUT_PIXEL_PAIR();
  698. } else if (s->block_type == BLOCK_4x2) {
  699. APPLY_C_PREDICTOR_24();
  700. APPLY_Y_PREDICTOR_24();
  701. OUTPUT_PIXEL_PAIR();
  702. APPLY_Y_PREDICTOR_24();
  703. OUTPUT_PIXEL_PAIR();
  704. } else {
  705. APPLY_Y_PREDICTOR_24();
  706. OUTPUT_PIXEL_PAIR();
  707. APPLY_Y_PREDICTOR_24();
  708. OUTPUT_PIXEL_PAIR();
  709. }
  710. break;
  711. }
  712. } else {
  713. /* skip (copy) four pixels, but reassign the horizontal
  714. * predictor */
  715. *current_pixel_pair = *prev_pixel_pair++;
  716. *vert_pred++ = *current_pixel_pair++;
  717. *current_pixel_pair = *prev_pixel_pair++;
  718. horiz_pred = *current_pixel_pair - *vert_pred;
  719. *vert_pred++ = *current_pixel_pair++;
  720. }
  721. if (!keyframe) {
  722. mb_change_byte_mask <<= 1;
  723. /* next byte */
  724. if (!mb_change_byte_mask) {
  725. mb_change_byte = mb_change_bits[mb_change_index++];
  726. mb_change_byte_mask = 0x01;
  727. }
  728. }
  729. pixels_left -= 4;
  730. }
  731. /* next change row */
  732. if (((y + 1) & 3) == 0)
  733. mb_change_bits += s->mb_change_bits_row_size;
  734. current_line += s->frame.linesize[0];
  735. prev_line += s->prev_frame.linesize[0];
  736. }
  737. }
  738. static int truemotion1_decode_frame(AVCodecContext *avctx,
  739. void *data, int *data_size,
  740. uint8_t *buf, int buf_size)
  741. {
  742. TrueMotion1Context *s = (TrueMotion1Context *)avctx->priv_data;
  743. s->buf = buf;
  744. s->size = buf_size;
  745. if (truemotion1_decode_header(s) == -1)
  746. return -1;
  747. s->frame.reference = 1;
  748. if (avctx->get_buffer(avctx, &s->frame) < 0) {
  749. av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed\n");
  750. return -1;
  751. }
  752. /* check for a do-nothing frame and copy the previous frame */
  753. if (compression_types[s->compression].algorithm == ALGO_NOP)
  754. {
  755. memcpy(s->frame.data[0], s->prev_frame.data[0],
  756. s->frame.linesize[0] * s->avctx->height);
  757. } else if (compression_types[s->compression].algorithm == ALGO_RGB24H) {
  758. truemotion1_decode_24bit(s);
  759. } else {
  760. truemotion1_decode_16bit(s);
  761. }
  762. if (s->prev_frame.data[0])
  763. avctx->release_buffer(avctx, &s->prev_frame);
  764. /* shuffle frames */
  765. s->prev_frame = s->frame;
  766. *data_size = sizeof(AVFrame);
  767. *(AVFrame*)data = s->frame;
  768. /* report that the buffer was completely consumed */
  769. return buf_size;
  770. }
  771. static int truemotion1_decode_end(AVCodecContext *avctx)
  772. {
  773. TrueMotion1Context *s = (TrueMotion1Context *)avctx->priv_data;
  774. /* release the last frame */
  775. if (s->prev_frame.data[0])
  776. avctx->release_buffer(avctx, &s->prev_frame);
  777. av_free(s->vert_pred);
  778. return 0;
  779. }
  780. AVCodec truemotion1_decoder = {
  781. "truemotion1",
  782. CODEC_TYPE_VIDEO,
  783. CODEC_ID_TRUEMOTION1,
  784. sizeof(TrueMotion1Context),
  785. truemotion1_decode_init,
  786. NULL,
  787. truemotion1_decode_end,
  788. truemotion1_decode_frame,
  789. CODEC_CAP_DR1,
  790. };