You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

922 lines
28KB

  1. /*
  2. * Duck TrueMotion 1.0 Decoder
  3. * Copyright (C) 2003 Alex Beregszaszi & Mike Melanson
  4. *
  5. * This library is free software; you can redistribute it and/or
  6. * modify it under the terms of the GNU Lesser General Public
  7. * License as published by the Free Software Foundation; either
  8. * version 2 of the License, or (at your option) any later version.
  9. *
  10. * This library is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  13. * Lesser General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU Lesser General Public
  16. * License along with this library; if not, write to the Free Software
  17. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  18. */
  19. /**
  20. * @file truemotion1.c
  21. * Duck TrueMotion v1 Video Decoder by
  22. * Alex Beregszaszi (alex@fsn.hu) and
  23. * Mike Melanson (melanson@pcisys.net)
  24. *
  25. * The TrueMotion v1 decoder presently only decodes 16-bit TM1 data and
  26. * outputs RGB555 (or RGB565) data. 24-bit TM1 data is not supported yet.
  27. */
  28. #include <stdio.h>
  29. #include <stdlib.h>
  30. #include <string.h>
  31. #include <unistd.h>
  32. #include "common.h"
  33. #include "avcodec.h"
  34. #include "dsputil.h"
  35. #include "truemotion1data.h"
  36. typedef struct TrueMotion1Context {
  37. AVCodecContext *avctx;
  38. AVFrame frame;
  39. AVFrame prev_frame;
  40. uint8_t *buf;
  41. int size;
  42. uint8_t *mb_change_bits;
  43. int mb_change_bits_row_size;
  44. uint8_t *index_stream;
  45. int index_stream_size;
  46. int flags;
  47. int x, y, w, h;
  48. uint32_t y_predictor_table[1024];
  49. uint32_t c_predictor_table[1024];
  50. uint32_t fat_y_predictor_table[1024];
  51. uint32_t fat_c_predictor_table[1024];
  52. int compression;
  53. int block_type;
  54. int block_width;
  55. int block_height;
  56. int16_t ydt[8];
  57. int16_t cdt[8];
  58. int16_t fat_ydt[8];
  59. int16_t fat_cdt[8];
  60. int last_deltaset, last_vectable;
  61. unsigned int *vert_pred;
  62. } TrueMotion1Context;
  63. #define FLAG_SPRITE 32
  64. #define FLAG_KEYFRAME 16
  65. #define FLAG_INTERFRAME 8
  66. #define FLAG_INTERPOLATED 4
  67. struct frame_header {
  68. uint8_t header_size;
  69. uint8_t compression;
  70. uint8_t deltaset;
  71. uint8_t vectable;
  72. uint16_t ysize;
  73. uint16_t xsize;
  74. uint16_t checksum;
  75. uint8_t version;
  76. uint8_t header_type;
  77. uint8_t flags;
  78. uint8_t control;
  79. uint16_t xoffset;
  80. uint16_t yoffset;
  81. uint16_t width;
  82. uint16_t height;
  83. };
  84. #define ALGO_NOP 0
  85. #define ALGO_RGB16V 1
  86. #define ALGO_RGB16H 2
  87. #define ALGO_RGB24H 3
  88. /* these are the various block sizes that can occupy a 4x4 block */
  89. #define BLOCK_2x2 0
  90. #define BLOCK_2x4 1
  91. #define BLOCK_4x2 2
  92. #define BLOCK_4x4 3
  93. typedef struct comp_types {
  94. int algorithm;
  95. int block_width; // vres
  96. int block_height; // hres
  97. int block_type;
  98. } comp_types;
  99. /* { valid for metatype }, algorithm, num of deltas, vert res, horiz res */
  100. static comp_types compression_types[17] = {
  101. { ALGO_NOP, 0, 0, 0 },
  102. { ALGO_RGB16V, 4, 4, BLOCK_4x4 },
  103. { ALGO_RGB16H, 4, 4, BLOCK_4x4 },
  104. { ALGO_RGB16V, 4, 2, BLOCK_4x2 },
  105. { ALGO_RGB16H, 4, 2, BLOCK_4x2 },
  106. { ALGO_RGB16V, 2, 4, BLOCK_2x4 },
  107. { ALGO_RGB16H, 2, 4, BLOCK_2x4 },
  108. { ALGO_RGB16V, 2, 2, BLOCK_2x2 },
  109. { ALGO_RGB16H, 2, 2, BLOCK_2x2 },
  110. { ALGO_NOP, 4, 4, BLOCK_4x4 },
  111. { ALGO_RGB24H, 4, 4, BLOCK_4x4 },
  112. { ALGO_NOP, 4, 2, BLOCK_4x2 },
  113. { ALGO_RGB24H, 4, 2, BLOCK_4x2 },
  114. { ALGO_NOP, 2, 4, BLOCK_2x4 },
  115. { ALGO_RGB24H, 2, 4, BLOCK_2x4 },
  116. { ALGO_NOP, 2, 2, BLOCK_2x2 },
  117. { ALGO_RGB24H, 2, 2, BLOCK_2x2 }
  118. };
  119. static void select_delta_tables(TrueMotion1Context *s, int delta_table_index)
  120. {
  121. int i;
  122. if (delta_table_index > 3)
  123. return;
  124. memcpy(s->ydt, ydts[delta_table_index], 8 * sizeof(int16_t));
  125. memcpy(s->cdt, cdts[delta_table_index], 8 * sizeof(int16_t));
  126. memcpy(s->fat_ydt, fat_ydts[delta_table_index], 8 * sizeof(int16_t));
  127. memcpy(s->fat_cdt, fat_cdts[delta_table_index], 8 * sizeof(int16_t));
  128. /* Y skinny deltas need to be halved for some reason; maybe the
  129. * skinny Y deltas should be modified */
  130. for (i = 0; i < 8; i++)
  131. {
  132. /* drop the lsb before dividing by 2-- net effect: round down
  133. * when dividing a negative number (e.g., -3/2 = -2, not -1) */
  134. s->ydt[i] &= 0xFFFE;
  135. s->ydt[i] /= 2;
  136. }
  137. }
  138. #ifdef WORDS_BIGENDIAN
  139. static int make_ydt15_entry(int p2, int p1, int16_t *ydt)
  140. #else
  141. static int make_ydt15_entry(int p1, int p2, int16_t *ydt)
  142. #endif
  143. {
  144. int lo, hi;
  145. lo = ydt[p1];
  146. lo += (lo << 5) + (lo << 10);
  147. hi = ydt[p2];
  148. hi += (hi << 5) + (hi << 10);
  149. return ((lo + (hi << 16)) << 1);
  150. }
  151. #ifdef WORDS_BIGENDIAN
  152. static int make_cdt15_entry(int p2, int p1, int16_t *cdt)
  153. #else
  154. static int make_cdt15_entry(int p1, int p2, int16_t *cdt)
  155. #endif
  156. {
  157. int r, b, lo;
  158. b = cdt[p2];
  159. r = cdt[p1] << 10;
  160. lo = b + r;
  161. return ((lo + (lo << 16)) << 1);
  162. }
  163. #ifdef WORDS_BIGENDIAN
  164. static int make_ydt16_entry(int p2, int p1, int16_t *ydt)
  165. #else
  166. static int make_ydt16_entry(int p1, int p2, int16_t *ydt)
  167. #endif
  168. {
  169. int lo, hi;
  170. lo = ydt[p1];
  171. lo += (lo << 6) + (lo << 11);
  172. hi = ydt[p2];
  173. hi += (hi << 6) + (hi << 11);
  174. return ((lo + (hi << 16)) << 1);
  175. }
  176. #ifdef WORDS_BIGENDIAN
  177. static int make_cdt16_entry(int p2, int p1, int16_t *cdt)
  178. #else
  179. static int make_cdt16_entry(int p1, int p2, int16_t *cdt)
  180. #endif
  181. {
  182. int r, b, lo;
  183. b = cdt[p2];
  184. r = cdt[p1] << 11;
  185. lo = b + r;
  186. return ((lo + (lo << 16)) << 1);
  187. }
  188. #ifdef WORDS_BIGENDIAN
  189. static int make_ydt24_entry(int p2, int p1, int16_t *ydt)
  190. #else
  191. static int make_ydt24_entry(int p1, int p2, int16_t *ydt)
  192. #endif
  193. {
  194. int lo, hi;
  195. lo = ydt[p1];
  196. hi = ydt[p2];
  197. return ((lo + (hi << 8) + (hi << 16)) << 1);
  198. }
  199. #ifdef WORDS_BIGENDIAN
  200. static int make_cdt24_entry(int p2, int p1, int16_t *cdt)
  201. #else
  202. static int make_cdt24_entry(int p1, int p2, int16_t *cdt)
  203. #endif
  204. {
  205. int r, b;
  206. b = cdt[p2];
  207. r = cdt[p1]<<16;
  208. return ((b+r) << 1);
  209. }
  210. static void gen_vector_table15(TrueMotion1Context *s, const uint8_t *sel_vector_table)
  211. {
  212. int len, i, j;
  213. unsigned char delta_pair;
  214. for (i = 0; i < 1024; i += 4)
  215. {
  216. len = *sel_vector_table++ / 2;
  217. for (j = 0; j < len; j++)
  218. {
  219. delta_pair = *sel_vector_table++;
  220. s->y_predictor_table[i+j] = 0xfffffffe &
  221. make_ydt15_entry(delta_pair >> 4, delta_pair & 0xf, s->ydt);
  222. s->c_predictor_table[i+j] = 0xfffffffe &
  223. make_cdt15_entry(delta_pair >> 4, delta_pair & 0xf, s->cdt);
  224. }
  225. s->y_predictor_table[i+(j-1)] |= 1;
  226. s->c_predictor_table[i+(j-1)] |= 1;
  227. }
  228. }
  229. static void gen_vector_table16(TrueMotion1Context *s, const uint8_t *sel_vector_table)
  230. {
  231. int len, i, j;
  232. unsigned char delta_pair;
  233. for (i = 0; i < 1024; i += 4)
  234. {
  235. len = *sel_vector_table++ / 2;
  236. for (j = 0; j < len; j++)
  237. {
  238. delta_pair = *sel_vector_table++;
  239. s->y_predictor_table[i+j] = 0xfffffffe &
  240. make_ydt16_entry(delta_pair >> 4, delta_pair & 0xf, s->ydt);
  241. s->c_predictor_table[i+j] = 0xfffffffe &
  242. make_cdt16_entry(delta_pair >> 4, delta_pair & 0xf, s->cdt);
  243. }
  244. s->y_predictor_table[i+(j-1)] |= 1;
  245. s->c_predictor_table[i+(j-1)] |= 1;
  246. }
  247. }
  248. static void gen_vector_table24(TrueMotion1Context *s, const uint8_t *sel_vector_table)
  249. {
  250. int len, i, j;
  251. unsigned char delta_pair;
  252. for (i = 0; i < 1024; i += 4)
  253. {
  254. len = *sel_vector_table++ / 2;
  255. for (j = 0; j < len; j++)
  256. {
  257. delta_pair = *sel_vector_table++;
  258. s->y_predictor_table[i+j] = 0xfffffffe &
  259. make_ydt24_entry(delta_pair >> 4, delta_pair & 0xf, s->ydt);
  260. s->c_predictor_table[i+j] = 0xfffffffe &
  261. make_cdt24_entry(delta_pair >> 4, delta_pair & 0xf, s->cdt);
  262. s->fat_y_predictor_table[i+j] = 0xfffffffe &
  263. make_ydt24_entry(delta_pair >> 4, delta_pair & 0xf, s->fat_ydt);
  264. s->fat_c_predictor_table[i+j] = 0xfffffffe &
  265. make_cdt24_entry(delta_pair >> 4, delta_pair & 0xf, s->fat_cdt);
  266. }
  267. s->y_predictor_table[i+(j-1)] |= 1;
  268. s->c_predictor_table[i+(j-1)] |= 1;
  269. s->fat_y_predictor_table[i+(j-1)] |= 1;
  270. s->fat_c_predictor_table[i+(j-1)] |= 1;
  271. }
  272. }
  273. /* Returns the number of bytes consumed from the bytestream. Returns -1 if
  274. * there was an error while decoding the header */
  275. static int truemotion1_decode_header(TrueMotion1Context *s)
  276. {
  277. int i;
  278. struct frame_header header;
  279. uint8_t header_buffer[128]; /* logical maximum size of the header */
  280. const uint8_t *sel_vector_table;
  281. /* There is 1 change bit per 4 pixels, so each change byte represents
  282. * 32 pixels; divide width by 4 to obtain the number of change bits and
  283. * then round up to the nearest byte. */
  284. s->mb_change_bits_row_size = ((s->avctx->width >> 2) + 7) >> 3;
  285. header.header_size = ((s->buf[0] >> 5) | (s->buf[0] << 3)) & 0x7f;
  286. if (s->buf[0] < 0x10)
  287. {
  288. av_log(s->avctx, AV_LOG_ERROR, "invalid header size (%d)\n", s->buf[0]);
  289. return -1;
  290. }
  291. /* unscramble the header bytes with a XOR operation */
  292. memset(header_buffer, 0, 128);
  293. for (i = 1; i < header.header_size; i++)
  294. header_buffer[i - 1] = s->buf[i] ^ s->buf[i + 1];
  295. header.compression = header_buffer[0];
  296. header.deltaset = header_buffer[1];
  297. header.vectable = header_buffer[2];
  298. header.ysize = LE_16(&header_buffer[3]);
  299. header.xsize = LE_16(&header_buffer[5]);
  300. header.checksum = LE_16(&header_buffer[7]);
  301. header.version = header_buffer[9];
  302. header.header_type = header_buffer[10];
  303. header.flags = header_buffer[11];
  304. header.control = header_buffer[12];
  305. /* Version 2 */
  306. if (header.version >= 2)
  307. {
  308. if (header.header_type > 3)
  309. {
  310. av_log(s->avctx, AV_LOG_ERROR, "invalid header type (%d)\n", header.header_type);
  311. return -1;
  312. } else if ((header.header_type == 2) || (header.header_type == 3)) {
  313. s->flags = header.flags;
  314. if (!(s->flags & FLAG_INTERFRAME))
  315. s->flags |= FLAG_KEYFRAME;
  316. } else
  317. s->flags = FLAG_KEYFRAME;
  318. } else /* Version 1 */
  319. s->flags = FLAG_KEYFRAME;
  320. if (s->flags & FLAG_SPRITE) {
  321. av_log(s->avctx, AV_LOG_INFO, "SPRITE frame found, please report the sample to the developers\n");
  322. s->w = header.width;
  323. s->h = header.height;
  324. s->x = header.xoffset;
  325. s->y = header.yoffset;
  326. } else {
  327. s->w = header.xsize;
  328. s->h = header.ysize;
  329. if (header.header_type < 2) {
  330. if ((s->w < 213) && (s->h >= 176))
  331. {
  332. s->flags |= FLAG_INTERPOLATED;
  333. av_log(s->avctx, AV_LOG_INFO, "INTERPOLATION selected, please report the sample to the developers\n");
  334. }
  335. }
  336. }
  337. if (header.compression > 17) {
  338. av_log(s->avctx, AV_LOG_ERROR, "invalid compression type (%d)\n", header.compression);
  339. return -1;
  340. }
  341. if ((header.deltaset != s->last_deltaset) ||
  342. (header.vectable != s->last_vectable))
  343. select_delta_tables(s, header.deltaset);
  344. if ((header.compression & 1) && header.header_type)
  345. sel_vector_table = pc_tbl2;
  346. else {
  347. if (header.vectable < 4)
  348. sel_vector_table = tables[header.vectable - 1];
  349. else {
  350. av_log(s->avctx, AV_LOG_ERROR, "invalid vector table id (%d)\n", header.vectable);
  351. return -1;
  352. }
  353. }
  354. // FIXME: where to place this ?!?!
  355. if (compression_types[header.compression].algorithm == ALGO_RGB24H)
  356. s->avctx->pix_fmt = PIX_FMT_RGBA32;
  357. else
  358. s->avctx->pix_fmt = PIX_FMT_RGB555; // RGB565 is supported aswell
  359. if ((header.deltaset != s->last_deltaset) || (header.vectable != s->last_vectable))
  360. {
  361. if (compression_types[header.compression].algorithm == ALGO_RGB24H)
  362. gen_vector_table24(s, sel_vector_table);
  363. else
  364. if (s->avctx->pix_fmt == PIX_FMT_RGB555)
  365. gen_vector_table15(s, sel_vector_table);
  366. else
  367. gen_vector_table16(s, sel_vector_table);
  368. }
  369. /* set up pointers to the other key data chunks */
  370. s->mb_change_bits = s->buf + header.header_size;
  371. if (s->flags & FLAG_KEYFRAME) {
  372. /* no change bits specified for a keyframe; only index bytes */
  373. s->index_stream = s->mb_change_bits;
  374. } else {
  375. /* one change bit per 4x4 block */
  376. s->index_stream = s->mb_change_bits +
  377. (s->mb_change_bits_row_size * (s->avctx->height >> 2));
  378. }
  379. s->index_stream_size = s->size - (s->index_stream - s->buf);
  380. s->last_deltaset = header.deltaset;
  381. s->last_vectable = header.vectable;
  382. s->compression = header.compression;
  383. s->block_width = compression_types[header.compression].block_width;
  384. s->block_height = compression_types[header.compression].block_height;
  385. s->block_type = compression_types[header.compression].block_type;
  386. if (s->avctx->debug & FF_DEBUG_PICT_INFO)
  387. av_log(s->avctx, AV_LOG_INFO, "tables: %d / %d c:%d %dx%d t:%d %s%s%s%s\n",
  388. s->last_deltaset, s->last_vectable, s->compression, s->block_width,
  389. s->block_height, s->block_type,
  390. s->flags & FLAG_KEYFRAME ? " KEY" : "",
  391. s->flags & FLAG_INTERFRAME ? " INTER" : "",
  392. s->flags & FLAG_SPRITE ? " SPRITE" : "",
  393. s->flags & FLAG_INTERPOLATED ? " INTERPOL" : "");
  394. return header.header_size;
  395. }
  396. static int truemotion1_decode_init(AVCodecContext *avctx)
  397. {
  398. TrueMotion1Context *s = (TrueMotion1Context *)avctx->priv_data;
  399. s->avctx = avctx;
  400. // FIXME: it may change ?
  401. // if (avctx->bits_per_sample == 24)
  402. // avctx->pix_fmt = PIX_FMT_RGB24;
  403. // else
  404. // avctx->pix_fmt = PIX_FMT_RGB555;
  405. avctx->has_b_frames = 0;
  406. s->frame.data[0] = s->prev_frame.data[0] = NULL;
  407. /* there is a vertical predictor for each pixel in a line; each vertical
  408. * predictor is 0 to start with */
  409. s->vert_pred =
  410. (unsigned int *)av_malloc(s->avctx->width * sizeof(unsigned int));
  411. return 0;
  412. }
  413. /*
  414. Block decoding order:
  415. dxi: Y-Y
  416. dxic: Y-C-Y
  417. dxic2: Y-C-Y-C
  418. hres,vres,i,i%vres (0 < i < 4)
  419. 2x2 0: 0 dxic2
  420. 2x2 1: 1 dxi
  421. 2x2 2: 0 dxic2
  422. 2x2 3: 1 dxi
  423. 2x4 0: 0 dxic2
  424. 2x4 1: 1 dxi
  425. 2x4 2: 2 dxi
  426. 2x4 3: 3 dxi
  427. 4x2 0: 0 dxic
  428. 4x2 1: 1 dxi
  429. 4x2 2: 0 dxic
  430. 4x2 3: 1 dxi
  431. 4x4 0: 0 dxic
  432. 4x4 1: 1 dxi
  433. 4x4 2: 2 dxi
  434. 4x4 3: 3 dxi
  435. */
  436. #define GET_NEXT_INDEX() \
  437. {\
  438. if (index_stream_index >= s->index_stream_size) { \
  439. av_log(s->avctx, AV_LOG_INFO, " help! truemotion1 decoder went out of bounds\n"); \
  440. return; \
  441. } \
  442. index = s->index_stream[index_stream_index++] * 4; \
  443. }
  444. #define APPLY_C_PREDICTOR() \
  445. predictor_pair = s->c_predictor_table[index]; \
  446. horiz_pred += (predictor_pair >> 1); \
  447. if (predictor_pair & 1) { \
  448. GET_NEXT_INDEX() \
  449. if (!index) { \
  450. GET_NEXT_INDEX() \
  451. predictor_pair = s->c_predictor_table[index]; \
  452. horiz_pred += ((predictor_pair >> 1) * 5); \
  453. if (predictor_pair & 1) \
  454. GET_NEXT_INDEX() \
  455. else \
  456. index++; \
  457. } \
  458. } else \
  459. index++;
  460. #define APPLY_C_PREDICTOR_24() \
  461. predictor_pair = s->c_predictor_table[index]; \
  462. horiz_pred += (predictor_pair >> 1); \
  463. if (predictor_pair & 1) { \
  464. GET_NEXT_INDEX() \
  465. if (!index) { \
  466. GET_NEXT_INDEX() \
  467. predictor_pair = s->fat_c_predictor_table[index]; \
  468. horiz_pred += (predictor_pair >> 1); \
  469. if (predictor_pair & 1) \
  470. GET_NEXT_INDEX() \
  471. else \
  472. index++; \
  473. } \
  474. } else \
  475. index++;
  476. #define APPLY_Y_PREDICTOR() \
  477. predictor_pair = s->y_predictor_table[index]; \
  478. horiz_pred += (predictor_pair >> 1); \
  479. if (predictor_pair & 1) { \
  480. GET_NEXT_INDEX() \
  481. if (!index) { \
  482. GET_NEXT_INDEX() \
  483. predictor_pair = s->y_predictor_table[index]; \
  484. horiz_pred += ((predictor_pair >> 1) * 5); \
  485. if (predictor_pair & 1) \
  486. GET_NEXT_INDEX() \
  487. else \
  488. index++; \
  489. } \
  490. } else \
  491. index++;
  492. #define APPLY_Y_PREDICTOR_24() \
  493. predictor_pair = s->y_predictor_table[index]; \
  494. horiz_pred += (predictor_pair >> 1); \
  495. if (predictor_pair & 1) { \
  496. GET_NEXT_INDEX() \
  497. if (!index) { \
  498. GET_NEXT_INDEX() \
  499. predictor_pair = s->fat_y_predictor_table[index]; \
  500. horiz_pred += (predictor_pair >> 1); \
  501. if (predictor_pair & 1) \
  502. GET_NEXT_INDEX() \
  503. else \
  504. index++; \
  505. } \
  506. } else \
  507. index++;
  508. #define OUTPUT_PIXEL_PAIR() \
  509. *current_pixel_pair = *vert_pred + horiz_pred; \
  510. *vert_pred++ = *current_pixel_pair++; \
  511. prev_pixel_pair++;
  512. static void truemotion1_decode_16bit(TrueMotion1Context *s)
  513. {
  514. int y;
  515. int pixels_left; /* remaining pixels on this line */
  516. unsigned int predictor_pair;
  517. unsigned int horiz_pred;
  518. unsigned int *vert_pred;
  519. unsigned int *current_pixel_pair;
  520. unsigned int *prev_pixel_pair;
  521. unsigned char *current_line = s->frame.data[0];
  522. unsigned char *prev_line = s->prev_frame.data[0];
  523. int keyframe = s->flags & FLAG_KEYFRAME;
  524. /* these variables are for managing the stream of macroblock change bits */
  525. unsigned char *mb_change_bits = s->mb_change_bits;
  526. unsigned char mb_change_byte;
  527. unsigned char mb_change_byte_mask;
  528. int mb_change_index;
  529. /* these variables are for managing the main index stream */
  530. int index_stream_index = 0; /* yes, the index into the index stream */
  531. int index;
  532. /* clean out the line buffer */
  533. memset(s->vert_pred, 0, s->avctx->width * sizeof(unsigned int));
  534. GET_NEXT_INDEX();
  535. for (y = 0; y < s->avctx->height; y++) {
  536. /* re-init variables for the next line iteration */
  537. horiz_pred = 0;
  538. current_pixel_pair = (unsigned int *)current_line;
  539. prev_pixel_pair = (unsigned int *)prev_line;
  540. vert_pred = s->vert_pred;
  541. mb_change_index = 0;
  542. mb_change_byte = mb_change_bits[mb_change_index++];
  543. mb_change_byte_mask = 0x01;
  544. pixels_left = s->avctx->width;
  545. while (pixels_left > 0) {
  546. if (keyframe || ((mb_change_byte & mb_change_byte_mask) == 0)) {
  547. switch (y & 3) {
  548. case 0:
  549. /* if macroblock width is 2, apply C-Y-C-Y; else
  550. * apply C-Y-Y */
  551. if (s->block_width == 2) {
  552. APPLY_C_PREDICTOR();
  553. APPLY_Y_PREDICTOR();
  554. OUTPUT_PIXEL_PAIR();
  555. APPLY_C_PREDICTOR();
  556. APPLY_Y_PREDICTOR();
  557. OUTPUT_PIXEL_PAIR();
  558. } else {
  559. APPLY_C_PREDICTOR();
  560. APPLY_Y_PREDICTOR();
  561. OUTPUT_PIXEL_PAIR();
  562. APPLY_Y_PREDICTOR();
  563. OUTPUT_PIXEL_PAIR();
  564. }
  565. break;
  566. case 1:
  567. case 3:
  568. /* always apply 2 Y predictors on these iterations */
  569. APPLY_Y_PREDICTOR();
  570. OUTPUT_PIXEL_PAIR();
  571. APPLY_Y_PREDICTOR();
  572. OUTPUT_PIXEL_PAIR();
  573. break;
  574. case 2:
  575. /* this iteration might be C-Y-C-Y, Y-Y, or C-Y-Y
  576. * depending on the macroblock type */
  577. if (s->block_type == BLOCK_2x2) {
  578. APPLY_C_PREDICTOR();
  579. APPLY_Y_PREDICTOR();
  580. OUTPUT_PIXEL_PAIR();
  581. APPLY_C_PREDICTOR();
  582. APPLY_Y_PREDICTOR();
  583. OUTPUT_PIXEL_PAIR();
  584. } else if (s->block_type == BLOCK_4x2) {
  585. APPLY_C_PREDICTOR();
  586. APPLY_Y_PREDICTOR();
  587. OUTPUT_PIXEL_PAIR();
  588. APPLY_Y_PREDICTOR();
  589. OUTPUT_PIXEL_PAIR();
  590. } else {
  591. APPLY_Y_PREDICTOR();
  592. OUTPUT_PIXEL_PAIR();
  593. APPLY_Y_PREDICTOR();
  594. OUTPUT_PIXEL_PAIR();
  595. }
  596. break;
  597. }
  598. } else {
  599. /* skip (copy) four pixels, but reassign the horizontal
  600. * predictor */
  601. *current_pixel_pair = *prev_pixel_pair++;
  602. *vert_pred++ = *current_pixel_pair++;
  603. *current_pixel_pair = *prev_pixel_pair++;
  604. horiz_pred = *current_pixel_pair - *vert_pred;
  605. *vert_pred++ = *current_pixel_pair++;
  606. }
  607. if (!keyframe) {
  608. mb_change_byte_mask <<= 1;
  609. /* next byte */
  610. if (!mb_change_byte_mask) {
  611. mb_change_byte = mb_change_bits[mb_change_index++];
  612. mb_change_byte_mask = 0x01;
  613. }
  614. }
  615. pixels_left -= 4;
  616. }
  617. /* next change row */
  618. if (((y + 1) & 3) == 0)
  619. mb_change_bits += s->mb_change_bits_row_size;
  620. current_line += s->frame.linesize[0];
  621. prev_line += s->prev_frame.linesize[0];
  622. }
  623. }
  624. static void truemotion1_decode_24bit(TrueMotion1Context *s)
  625. {
  626. int y;
  627. int pixels_left; /* remaining pixels on this line */
  628. unsigned int predictor_pair;
  629. unsigned int horiz_pred;
  630. unsigned int *vert_pred;
  631. unsigned int *current_pixel_pair;
  632. unsigned int *prev_pixel_pair;
  633. unsigned char *current_line = s->frame.data[0];
  634. unsigned char *prev_line = s->prev_frame.data[0];
  635. int keyframe = s->flags & FLAG_KEYFRAME;
  636. /* these variables are for managing the stream of macroblock change bits */
  637. unsigned char *mb_change_bits = s->mb_change_bits;
  638. unsigned char mb_change_byte;
  639. unsigned char mb_change_byte_mask;
  640. int mb_change_index;
  641. /* these variables are for managing the main index stream */
  642. int index_stream_index = 0; /* yes, the index into the index stream */
  643. int index;
  644. /* clean out the line buffer */
  645. memset(s->vert_pred, 0, s->avctx->width * sizeof(unsigned int));
  646. GET_NEXT_INDEX();
  647. for (y = 0; y < s->avctx->height; y++) {
  648. /* re-init variables for the next line iteration */
  649. horiz_pred = 0;
  650. current_pixel_pair = (unsigned int *)current_line;
  651. prev_pixel_pair = (unsigned int *)prev_line;
  652. vert_pred = s->vert_pred;
  653. mb_change_index = 0;
  654. mb_change_byte = mb_change_bits[mb_change_index++];
  655. mb_change_byte_mask = 0x01;
  656. pixels_left = s->avctx->width;
  657. while (pixels_left > 0) {
  658. if (keyframe || ((mb_change_byte & mb_change_byte_mask) == 0)) {
  659. switch (y & 3) {
  660. case 0:
  661. /* if macroblock width is 2, apply C-Y-C-Y; else
  662. * apply C-Y-Y */
  663. if (s->block_width == 2) {
  664. APPLY_C_PREDICTOR_24();
  665. APPLY_Y_PREDICTOR_24();
  666. OUTPUT_PIXEL_PAIR();
  667. APPLY_C_PREDICTOR_24();
  668. APPLY_Y_PREDICTOR_24();
  669. OUTPUT_PIXEL_PAIR();
  670. } else {
  671. APPLY_C_PREDICTOR_24();
  672. APPLY_Y_PREDICTOR_24();
  673. OUTPUT_PIXEL_PAIR();
  674. APPLY_Y_PREDICTOR_24();
  675. OUTPUT_PIXEL_PAIR();
  676. }
  677. break;
  678. case 1:
  679. case 3:
  680. /* always apply 2 Y predictors on these iterations */
  681. APPLY_Y_PREDICTOR_24();
  682. OUTPUT_PIXEL_PAIR();
  683. APPLY_Y_PREDICTOR_24();
  684. OUTPUT_PIXEL_PAIR();
  685. break;
  686. case 2:
  687. /* this iteration might be C-Y-C-Y, Y-Y, or C-Y-Y
  688. * depending on the macroblock type */
  689. if (s->block_type == BLOCK_2x2) {
  690. APPLY_C_PREDICTOR_24();
  691. APPLY_Y_PREDICTOR_24();
  692. OUTPUT_PIXEL_PAIR();
  693. APPLY_C_PREDICTOR_24();
  694. APPLY_Y_PREDICTOR_24();
  695. OUTPUT_PIXEL_PAIR();
  696. } else if (s->block_type == BLOCK_4x2) {
  697. APPLY_C_PREDICTOR_24();
  698. APPLY_Y_PREDICTOR_24();
  699. OUTPUT_PIXEL_PAIR();
  700. APPLY_Y_PREDICTOR_24();
  701. OUTPUT_PIXEL_PAIR();
  702. } else {
  703. APPLY_Y_PREDICTOR_24();
  704. OUTPUT_PIXEL_PAIR();
  705. APPLY_Y_PREDICTOR_24();
  706. OUTPUT_PIXEL_PAIR();
  707. }
  708. break;
  709. }
  710. } else {
  711. /* skip (copy) four pixels, but reassign the horizontal
  712. * predictor */
  713. *current_pixel_pair = *prev_pixel_pair++;
  714. *vert_pred++ = *current_pixel_pair++;
  715. *current_pixel_pair = *prev_pixel_pair++;
  716. horiz_pred = *current_pixel_pair - *vert_pred;
  717. *vert_pred++ = *current_pixel_pair++;
  718. }
  719. if (!keyframe) {
  720. mb_change_byte_mask <<= 1;
  721. /* next byte */
  722. if (!mb_change_byte_mask) {
  723. mb_change_byte = mb_change_bits[mb_change_index++];
  724. mb_change_byte_mask = 0x01;
  725. }
  726. }
  727. pixels_left -= 4;
  728. }
  729. /* next change row */
  730. if (((y + 1) & 3) == 0)
  731. mb_change_bits += s->mb_change_bits_row_size;
  732. current_line += s->frame.linesize[0];
  733. prev_line += s->prev_frame.linesize[0];
  734. }
  735. }
  736. static int truemotion1_decode_frame(AVCodecContext *avctx,
  737. void *data, int *data_size,
  738. uint8_t *buf, int buf_size)
  739. {
  740. TrueMotion1Context *s = (TrueMotion1Context *)avctx->priv_data;
  741. s->buf = buf;
  742. s->size = buf_size;
  743. if (truemotion1_decode_header(s) == -1)
  744. return -1;
  745. s->frame.reference = 1;
  746. if (avctx->get_buffer(avctx, &s->frame) < 0) {
  747. av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed\n");
  748. return -1;
  749. }
  750. /* check for a do-nothing frame and copy the previous frame */
  751. if (compression_types[s->compression].algorithm == ALGO_NOP)
  752. {
  753. memcpy(s->frame.data[0], s->prev_frame.data[0],
  754. s->frame.linesize[0] * s->avctx->height);
  755. } else if (compression_types[s->compression].algorithm == ALGO_RGB24H) {
  756. truemotion1_decode_24bit(s);
  757. } else {
  758. truemotion1_decode_16bit(s);
  759. }
  760. if (s->prev_frame.data[0])
  761. avctx->release_buffer(avctx, &s->prev_frame);
  762. /* shuffle frames */
  763. s->prev_frame = s->frame;
  764. *data_size = sizeof(AVFrame);
  765. *(AVFrame*)data = s->frame;
  766. /* report that the buffer was completely consumed */
  767. return buf_size;
  768. }
  769. static int truemotion1_decode_end(AVCodecContext *avctx)
  770. {
  771. TrueMotion1Context *s = (TrueMotion1Context *)avctx->priv_data;
  772. /* release the last frame */
  773. if (s->prev_frame.data[0])
  774. avctx->release_buffer(avctx, &s->prev_frame);
  775. av_free(s->vert_pred);
  776. return 0;
  777. }
  778. AVCodec truemotion1_decoder = {
  779. "truemotion1",
  780. CODEC_TYPE_VIDEO,
  781. CODEC_ID_TRUEMOTION1,
  782. sizeof(TrueMotion1Context),
  783. truemotion1_decode_init,
  784. NULL,
  785. truemotion1_decode_end,
  786. truemotion1_decode_frame,
  787. CODEC_CAP_DR1,
  788. };