You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

916 lines
28KB

  1. /*
  2. * Duck TrueMotion 1.0 Decoder
  3. * Copyright (C) 2003 Alex Beregszaszi & Mike Melanson
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. /**
  22. * @file
  23. * Duck TrueMotion v1 Video Decoder by
  24. * Alex Beregszaszi and
  25. * Mike Melanson (melanson@pcisys.net)
  26. *
  27. * The TrueMotion v1 decoder presently only decodes 16-bit TM1 data and
  28. * outputs RGB555 (or RGB565) data. 24-bit TM1 data is not supported yet.
  29. */
  30. #include <stdio.h>
  31. #include <stdlib.h>
  32. #include <string.h>
  33. #include "avcodec.h"
  34. #include "dsputil.h"
  35. #include "libavutil/imgutils.h"
  36. #include "libavutil/internal.h"
  37. #include "libavutil/mem.h"
  38. #include "truemotion1data.h"
  39. typedef struct TrueMotion1Context {
  40. AVCodecContext *avctx;
  41. AVFrame frame;
  42. const uint8_t *buf;
  43. int size;
  44. const uint8_t *mb_change_bits;
  45. int mb_change_bits_row_size;
  46. const uint8_t *index_stream;
  47. int index_stream_size;
  48. int flags;
  49. int x, y, w, h;
  50. uint32_t y_predictor_table[1024];
  51. uint32_t c_predictor_table[1024];
  52. uint32_t fat_y_predictor_table[1024];
  53. uint32_t fat_c_predictor_table[1024];
  54. int compression;
  55. int block_type;
  56. int block_width;
  57. int block_height;
  58. int16_t ydt[8];
  59. int16_t cdt[8];
  60. int16_t fat_ydt[8];
  61. int16_t fat_cdt[8];
  62. int last_deltaset, last_vectable;
  63. unsigned int *vert_pred;
  64. int vert_pred_size;
  65. } TrueMotion1Context;
  66. #define FLAG_SPRITE 32
  67. #define FLAG_KEYFRAME 16
  68. #define FLAG_INTERFRAME 8
  69. #define FLAG_INTERPOLATED 4
  70. struct frame_header {
  71. uint8_t header_size;
  72. uint8_t compression;
  73. uint8_t deltaset;
  74. uint8_t vectable;
  75. uint16_t ysize;
  76. uint16_t xsize;
  77. uint16_t checksum;
  78. uint8_t version;
  79. uint8_t header_type;
  80. uint8_t flags;
  81. uint8_t control;
  82. uint16_t xoffset;
  83. uint16_t yoffset;
  84. uint16_t width;
  85. uint16_t height;
  86. };
  87. #define ALGO_NOP 0
  88. #define ALGO_RGB16V 1
  89. #define ALGO_RGB16H 2
  90. #define ALGO_RGB24H 3
  91. /* these are the various block sizes that can occupy a 4x4 block */
  92. #define BLOCK_2x2 0
  93. #define BLOCK_2x4 1
  94. #define BLOCK_4x2 2
  95. #define BLOCK_4x4 3
  96. typedef struct comp_types {
  97. int algorithm;
  98. int block_width; // vres
  99. int block_height; // hres
  100. int block_type;
  101. } comp_types;
  102. /* { valid for metatype }, algorithm, num of deltas, vert res, horiz res */
  103. static const comp_types compression_types[17] = {
  104. { ALGO_NOP, 0, 0, 0 },
  105. { ALGO_RGB16V, 4, 4, BLOCK_4x4 },
  106. { ALGO_RGB16H, 4, 4, BLOCK_4x4 },
  107. { ALGO_RGB16V, 4, 2, BLOCK_4x2 },
  108. { ALGO_RGB16H, 4, 2, BLOCK_4x2 },
  109. { ALGO_RGB16V, 2, 4, BLOCK_2x4 },
  110. { ALGO_RGB16H, 2, 4, BLOCK_2x4 },
  111. { ALGO_RGB16V, 2, 2, BLOCK_2x2 },
  112. { ALGO_RGB16H, 2, 2, BLOCK_2x2 },
  113. { ALGO_NOP, 4, 4, BLOCK_4x4 },
  114. { ALGO_RGB24H, 4, 4, BLOCK_4x4 },
  115. { ALGO_NOP, 4, 2, BLOCK_4x2 },
  116. { ALGO_RGB24H, 4, 2, BLOCK_4x2 },
  117. { ALGO_NOP, 2, 4, BLOCK_2x4 },
  118. { ALGO_RGB24H, 2, 4, BLOCK_2x4 },
  119. { ALGO_NOP, 2, 2, BLOCK_2x2 },
  120. { ALGO_RGB24H, 2, 2, BLOCK_2x2 }
  121. };
  122. static void select_delta_tables(TrueMotion1Context *s, int delta_table_index)
  123. {
  124. int i;
  125. if (delta_table_index > 3)
  126. return;
  127. memcpy(s->ydt, ydts[delta_table_index], 8 * sizeof(int16_t));
  128. memcpy(s->cdt, cdts[delta_table_index], 8 * sizeof(int16_t));
  129. memcpy(s->fat_ydt, fat_ydts[delta_table_index], 8 * sizeof(int16_t));
  130. memcpy(s->fat_cdt, fat_cdts[delta_table_index], 8 * sizeof(int16_t));
  131. /* Y skinny deltas need to be halved for some reason; maybe the
  132. * skinny Y deltas should be modified */
  133. for (i = 0; i < 8; i++)
  134. {
  135. /* drop the lsb before dividing by 2-- net effect: round down
  136. * when dividing a negative number (e.g., -3/2 = -2, not -1) */
  137. s->ydt[i] &= 0xFFFE;
  138. s->ydt[i] /= 2;
  139. }
  140. }
  141. #if HAVE_BIGENDIAN
  142. static int make_ydt15_entry(int p2, int p1, int16_t *ydt)
  143. #else
  144. static int make_ydt15_entry(int p1, int p2, int16_t *ydt)
  145. #endif
  146. {
  147. int lo, hi;
  148. lo = ydt[p1];
  149. lo += (lo << 5) + (lo << 10);
  150. hi = ydt[p2];
  151. hi += (hi << 5) + (hi << 10);
  152. return (lo + (hi << 16)) << 1;
  153. }
  154. static int make_cdt15_entry(int p1, int p2, int16_t *cdt)
  155. {
  156. int r, b, lo;
  157. b = cdt[p2];
  158. r = cdt[p1] << 10;
  159. lo = b + r;
  160. return (lo + (lo << 16)) << 1;
  161. }
  162. #if HAVE_BIGENDIAN
  163. static int make_ydt16_entry(int p2, int p1, int16_t *ydt)
  164. #else
  165. static int make_ydt16_entry(int p1, int p2, int16_t *ydt)
  166. #endif
  167. {
  168. int lo, hi;
  169. lo = ydt[p1];
  170. lo += (lo << 6) + (lo << 11);
  171. hi = ydt[p2];
  172. hi += (hi << 6) + (hi << 11);
  173. return (lo + (hi << 16)) << 1;
  174. }
  175. static int make_cdt16_entry(int p1, int p2, int16_t *cdt)
  176. {
  177. int r, b, lo;
  178. b = cdt[p2];
  179. r = cdt[p1] << 11;
  180. lo = b + r;
  181. return (lo + (lo << 16)) << 1;
  182. }
  183. static int make_ydt24_entry(int p1, int p2, int16_t *ydt)
  184. {
  185. int lo, hi;
  186. lo = ydt[p1];
  187. hi = ydt[p2];
  188. return (lo + (hi << 8) + (hi << 16)) << 1;
  189. }
  190. static int make_cdt24_entry(int p1, int p2, int16_t *cdt)
  191. {
  192. int r, b;
  193. b = cdt[p2];
  194. r = cdt[p1]<<16;
  195. return (b+r) << 1;
  196. }
  197. static void gen_vector_table15(TrueMotion1Context *s, const uint8_t *sel_vector_table)
  198. {
  199. int len, i, j;
  200. unsigned char delta_pair;
  201. for (i = 0; i < 1024; i += 4)
  202. {
  203. len = *sel_vector_table++ / 2;
  204. for (j = 0; j < len; j++)
  205. {
  206. delta_pair = *sel_vector_table++;
  207. s->y_predictor_table[i+j] = 0xfffffffe &
  208. make_ydt15_entry(delta_pair >> 4, delta_pair & 0xf, s->ydt);
  209. s->c_predictor_table[i+j] = 0xfffffffe &
  210. make_cdt15_entry(delta_pair >> 4, delta_pair & 0xf, s->cdt);
  211. }
  212. s->y_predictor_table[i+(j-1)] |= 1;
  213. s->c_predictor_table[i+(j-1)] |= 1;
  214. }
  215. }
  216. static void gen_vector_table16(TrueMotion1Context *s, const uint8_t *sel_vector_table)
  217. {
  218. int len, i, j;
  219. unsigned char delta_pair;
  220. for (i = 0; i < 1024; i += 4)
  221. {
  222. len = *sel_vector_table++ / 2;
  223. for (j = 0; j < len; j++)
  224. {
  225. delta_pair = *sel_vector_table++;
  226. s->y_predictor_table[i+j] = 0xfffffffe &
  227. make_ydt16_entry(delta_pair >> 4, delta_pair & 0xf, s->ydt);
  228. s->c_predictor_table[i+j] = 0xfffffffe &
  229. make_cdt16_entry(delta_pair >> 4, delta_pair & 0xf, s->cdt);
  230. }
  231. s->y_predictor_table[i+(j-1)] |= 1;
  232. s->c_predictor_table[i+(j-1)] |= 1;
  233. }
  234. }
  235. static void gen_vector_table24(TrueMotion1Context *s, const uint8_t *sel_vector_table)
  236. {
  237. int len, i, j;
  238. unsigned char delta_pair;
  239. for (i = 0; i < 1024; i += 4)
  240. {
  241. len = *sel_vector_table++ / 2;
  242. for (j = 0; j < len; j++)
  243. {
  244. delta_pair = *sel_vector_table++;
  245. s->y_predictor_table[i+j] = 0xfffffffe &
  246. make_ydt24_entry(delta_pair >> 4, delta_pair & 0xf, s->ydt);
  247. s->c_predictor_table[i+j] = 0xfffffffe &
  248. make_cdt24_entry(delta_pair >> 4, delta_pair & 0xf, s->cdt);
  249. s->fat_y_predictor_table[i+j] = 0xfffffffe &
  250. make_ydt24_entry(delta_pair >> 4, delta_pair & 0xf, s->fat_ydt);
  251. s->fat_c_predictor_table[i+j] = 0xfffffffe &
  252. make_cdt24_entry(delta_pair >> 4, delta_pair & 0xf, s->fat_cdt);
  253. }
  254. s->y_predictor_table[i+(j-1)] |= 1;
  255. s->c_predictor_table[i+(j-1)] |= 1;
  256. s->fat_y_predictor_table[i+(j-1)] |= 1;
  257. s->fat_c_predictor_table[i+(j-1)] |= 1;
  258. }
  259. }
  260. /* Returns the number of bytes consumed from the bytestream. Returns -1 if
  261. * there was an error while decoding the header */
  262. static int truemotion1_decode_header(TrueMotion1Context *s)
  263. {
  264. int i;
  265. int width_shift = 0;
  266. int new_pix_fmt;
  267. struct frame_header header;
  268. uint8_t header_buffer[128] = { 0 }; /* logical maximum size of the header */
  269. const uint8_t *sel_vector_table;
  270. header.header_size = ((s->buf[0] >> 5) | (s->buf[0] << 3)) & 0x7f;
  271. if (s->buf[0] < 0x10 || header.header_size >= s->size)
  272. {
  273. av_log(s->avctx, AV_LOG_ERROR, "invalid header size (%d)\n", s->buf[0]);
  274. return -1;
  275. }
  276. /* unscramble the header bytes with a XOR operation */
  277. for (i = 1; i < header.header_size; i++)
  278. header_buffer[i - 1] = s->buf[i] ^ s->buf[i + 1];
  279. header.compression = header_buffer[0];
  280. header.deltaset = header_buffer[1];
  281. header.vectable = header_buffer[2];
  282. header.ysize = AV_RL16(&header_buffer[3]);
  283. header.xsize = AV_RL16(&header_buffer[5]);
  284. header.checksum = AV_RL16(&header_buffer[7]);
  285. header.version = header_buffer[9];
  286. header.header_type = header_buffer[10];
  287. header.flags = header_buffer[11];
  288. header.control = header_buffer[12];
  289. /* Version 2 */
  290. if (header.version >= 2)
  291. {
  292. if (header.header_type > 3)
  293. {
  294. av_log(s->avctx, AV_LOG_ERROR, "invalid header type (%d)\n", header.header_type);
  295. return -1;
  296. } else if ((header.header_type == 2) || (header.header_type == 3)) {
  297. s->flags = header.flags;
  298. if (!(s->flags & FLAG_INTERFRAME))
  299. s->flags |= FLAG_KEYFRAME;
  300. } else
  301. s->flags = FLAG_KEYFRAME;
  302. } else /* Version 1 */
  303. s->flags = FLAG_KEYFRAME;
  304. if (s->flags & FLAG_SPRITE) {
  305. av_log_ask_for_sample(s->avctx, "SPRITE frame found.\n");
  306. /* FIXME header.width, height, xoffset and yoffset aren't initialized */
  307. return -1;
  308. } else {
  309. s->w = header.xsize;
  310. s->h = header.ysize;
  311. if (header.header_type < 2) {
  312. if ((s->w < 213) && (s->h >= 176))
  313. {
  314. s->flags |= FLAG_INTERPOLATED;
  315. av_log_ask_for_sample(s->avctx, "INTERPOLATION selected.\n");
  316. }
  317. }
  318. }
  319. if (header.compression >= 17) {
  320. av_log(s->avctx, AV_LOG_ERROR, "invalid compression type (%d)\n", header.compression);
  321. return -1;
  322. }
  323. if ((header.deltaset != s->last_deltaset) ||
  324. (header.vectable != s->last_vectable))
  325. select_delta_tables(s, header.deltaset);
  326. if ((header.compression & 1) && header.header_type)
  327. sel_vector_table = pc_tbl2;
  328. else {
  329. if (header.vectable > 0 && header.vectable < 4)
  330. sel_vector_table = tables[header.vectable - 1];
  331. else {
  332. av_log(s->avctx, AV_LOG_ERROR, "invalid vector table id (%d)\n", header.vectable);
  333. return -1;
  334. }
  335. }
  336. if (compression_types[header.compression].algorithm == ALGO_RGB24H) {
  337. new_pix_fmt = PIX_FMT_RGB32;
  338. width_shift = 1;
  339. } else
  340. new_pix_fmt = PIX_FMT_RGB555; // RGB565 is supported as well
  341. s->w >>= width_shift;
  342. if (av_image_check_size(s->w, s->h, 0, s->avctx) < 0)
  343. return -1;
  344. if (s->w != s->avctx->width || s->h != s->avctx->height ||
  345. new_pix_fmt != s->avctx->pix_fmt) {
  346. if (s->frame.data[0])
  347. s->avctx->release_buffer(s->avctx, &s->frame);
  348. s->avctx->sample_aspect_ratio = (AVRational){ 1 << width_shift, 1 };
  349. s->avctx->pix_fmt = new_pix_fmt;
  350. avcodec_set_dimensions(s->avctx, s->w, s->h);
  351. av_fast_malloc(&s->vert_pred, &s->vert_pred_size, s->avctx->width * sizeof(unsigned int));
  352. }
  353. /* There is 1 change bit per 4 pixels, so each change byte represents
  354. * 32 pixels; divide width by 4 to obtain the number of change bits and
  355. * then round up to the nearest byte. */
  356. s->mb_change_bits_row_size = ((s->avctx->width >> (2 - width_shift)) + 7) >> 3;
  357. if ((header.deltaset != s->last_deltaset) || (header.vectable != s->last_vectable))
  358. {
  359. if (compression_types[header.compression].algorithm == ALGO_RGB24H)
  360. gen_vector_table24(s, sel_vector_table);
  361. else
  362. if (s->avctx->pix_fmt == PIX_FMT_RGB555)
  363. gen_vector_table15(s, sel_vector_table);
  364. else
  365. gen_vector_table16(s, sel_vector_table);
  366. }
  367. /* set up pointers to the other key data chunks */
  368. s->mb_change_bits = s->buf + header.header_size;
  369. if (s->flags & FLAG_KEYFRAME) {
  370. /* no change bits specified for a keyframe; only index bytes */
  371. s->index_stream = s->mb_change_bits;
  372. } else {
  373. /* one change bit per 4x4 block */
  374. s->index_stream = s->mb_change_bits +
  375. (s->mb_change_bits_row_size * (s->avctx->height >> 2));
  376. }
  377. s->index_stream_size = s->size - (s->index_stream - s->buf);
  378. s->last_deltaset = header.deltaset;
  379. s->last_vectable = header.vectable;
  380. s->compression = header.compression;
  381. s->block_width = compression_types[header.compression].block_width;
  382. s->block_height = compression_types[header.compression].block_height;
  383. s->block_type = compression_types[header.compression].block_type;
  384. if (s->avctx->debug & FF_DEBUG_PICT_INFO)
  385. av_log(s->avctx, AV_LOG_INFO, "tables: %d / %d c:%d %dx%d t:%d %s%s%s%s\n",
  386. s->last_deltaset, s->last_vectable, s->compression, s->block_width,
  387. s->block_height, s->block_type,
  388. s->flags & FLAG_KEYFRAME ? " KEY" : "",
  389. s->flags & FLAG_INTERFRAME ? " INTER" : "",
  390. s->flags & FLAG_SPRITE ? " SPRITE" : "",
  391. s->flags & FLAG_INTERPOLATED ? " INTERPOL" : "");
  392. return header.header_size;
  393. }
  394. static av_cold int truemotion1_decode_init(AVCodecContext *avctx)
  395. {
  396. TrueMotion1Context *s = avctx->priv_data;
  397. s->avctx = avctx;
  398. // FIXME: it may change ?
  399. // if (avctx->bits_per_sample == 24)
  400. // avctx->pix_fmt = PIX_FMT_RGB24;
  401. // else
  402. // avctx->pix_fmt = PIX_FMT_RGB555;
  403. avcodec_get_frame_defaults(&s->frame);
  404. s->frame.data[0] = NULL;
  405. /* there is a vertical predictor for each pixel in a line; each vertical
  406. * predictor is 0 to start with */
  407. av_fast_malloc(&s->vert_pred, &s->vert_pred_size, s->avctx->width * sizeof(unsigned int));
  408. return 0;
  409. }
  410. /*
  411. Block decoding order:
  412. dxi: Y-Y
  413. dxic: Y-C-Y
  414. dxic2: Y-C-Y-C
  415. hres,vres,i,i%vres (0 < i < 4)
  416. 2x2 0: 0 dxic2
  417. 2x2 1: 1 dxi
  418. 2x2 2: 0 dxic2
  419. 2x2 3: 1 dxi
  420. 2x4 0: 0 dxic2
  421. 2x4 1: 1 dxi
  422. 2x4 2: 2 dxi
  423. 2x4 3: 3 dxi
  424. 4x2 0: 0 dxic
  425. 4x2 1: 1 dxi
  426. 4x2 2: 0 dxic
  427. 4x2 3: 1 dxi
  428. 4x4 0: 0 dxic
  429. 4x4 1: 1 dxi
  430. 4x4 2: 2 dxi
  431. 4x4 3: 3 dxi
  432. */
  433. #define GET_NEXT_INDEX() \
  434. {\
  435. if (index_stream_index >= s->index_stream_size) { \
  436. av_log(s->avctx, AV_LOG_INFO, " help! truemotion1 decoder went out of bounds\n"); \
  437. return; \
  438. } \
  439. index = s->index_stream[index_stream_index++] * 4; \
  440. }
  441. #define APPLY_C_PREDICTOR() \
  442. if(index > 1023){\
  443. av_log(s->avctx, AV_LOG_ERROR, " index %d went out of bounds\n", index); \
  444. return; \
  445. }\
  446. predictor_pair = s->c_predictor_table[index]; \
  447. horiz_pred += (predictor_pair >> 1); \
  448. if (predictor_pair & 1) { \
  449. GET_NEXT_INDEX() \
  450. if (!index) { \
  451. GET_NEXT_INDEX() \
  452. predictor_pair = s->c_predictor_table[index]; \
  453. horiz_pred += ((predictor_pair >> 1) * 5); \
  454. if (predictor_pair & 1) \
  455. GET_NEXT_INDEX() \
  456. else \
  457. index++; \
  458. } \
  459. } else \
  460. index++;
  461. #define APPLY_C_PREDICTOR_24() \
  462. if(index > 1023){\
  463. av_log(s->avctx, AV_LOG_ERROR, " index %d went out of bounds\n", index); \
  464. return; \
  465. }\
  466. predictor_pair = s->c_predictor_table[index]; \
  467. horiz_pred += (predictor_pair >> 1); \
  468. if (predictor_pair & 1) { \
  469. GET_NEXT_INDEX() \
  470. if (!index) { \
  471. GET_NEXT_INDEX() \
  472. predictor_pair = s->fat_c_predictor_table[index]; \
  473. horiz_pred += (predictor_pair >> 1); \
  474. if (predictor_pair & 1) \
  475. GET_NEXT_INDEX() \
  476. else \
  477. index++; \
  478. } \
  479. } else \
  480. index++;
  481. #define APPLY_Y_PREDICTOR() \
  482. if(index > 1023){\
  483. av_log(s->avctx, AV_LOG_ERROR, " index %d went out of bounds\n", index); \
  484. return; \
  485. }\
  486. predictor_pair = s->y_predictor_table[index]; \
  487. horiz_pred += (predictor_pair >> 1); \
  488. if (predictor_pair & 1) { \
  489. GET_NEXT_INDEX() \
  490. if (!index) { \
  491. GET_NEXT_INDEX() \
  492. predictor_pair = s->y_predictor_table[index]; \
  493. horiz_pred += ((predictor_pair >> 1) * 5); \
  494. if (predictor_pair & 1) \
  495. GET_NEXT_INDEX() \
  496. else \
  497. index++; \
  498. } \
  499. } else \
  500. index++;
  501. #define APPLY_Y_PREDICTOR_24() \
  502. if(index > 1023){\
  503. av_log(s->avctx, AV_LOG_ERROR, " index %d went out of bounds\n", index); \
  504. return; \
  505. }\
  506. predictor_pair = s->y_predictor_table[index]; \
  507. horiz_pred += (predictor_pair >> 1); \
  508. if (predictor_pair & 1) { \
  509. GET_NEXT_INDEX() \
  510. if (!index) { \
  511. GET_NEXT_INDEX() \
  512. predictor_pair = s->fat_y_predictor_table[index]; \
  513. horiz_pred += (predictor_pair >> 1); \
  514. if (predictor_pair & 1) \
  515. GET_NEXT_INDEX() \
  516. else \
  517. index++; \
  518. } \
  519. } else \
  520. index++;
  521. #define OUTPUT_PIXEL_PAIR() \
  522. *current_pixel_pair = *vert_pred + horiz_pred; \
  523. *vert_pred++ = *current_pixel_pair++;
  524. static void truemotion1_decode_16bit(TrueMotion1Context *s)
  525. {
  526. int y;
  527. int pixels_left; /* remaining pixels on this line */
  528. unsigned int predictor_pair;
  529. unsigned int horiz_pred;
  530. unsigned int *vert_pred;
  531. unsigned int *current_pixel_pair;
  532. unsigned char *current_line = s->frame.data[0];
  533. int keyframe = s->flags & FLAG_KEYFRAME;
  534. /* these variables are for managing the stream of macroblock change bits */
  535. const unsigned char *mb_change_bits = s->mb_change_bits;
  536. unsigned char mb_change_byte;
  537. unsigned char mb_change_byte_mask;
  538. int mb_change_index;
  539. /* these variables are for managing the main index stream */
  540. int index_stream_index = 0; /* yes, the index into the index stream */
  541. int index;
  542. /* clean out the line buffer */
  543. memset(s->vert_pred, 0, s->avctx->width * sizeof(unsigned int));
  544. GET_NEXT_INDEX();
  545. for (y = 0; y < s->avctx->height; y++) {
  546. /* re-init variables for the next line iteration */
  547. horiz_pred = 0;
  548. current_pixel_pair = (unsigned int *)current_line;
  549. vert_pred = s->vert_pred;
  550. mb_change_index = 0;
  551. mb_change_byte = mb_change_bits[mb_change_index++];
  552. mb_change_byte_mask = 0x01;
  553. pixels_left = s->avctx->width;
  554. while (pixels_left > 0) {
  555. if (keyframe || ((mb_change_byte & mb_change_byte_mask) == 0)) {
  556. switch (y & 3) {
  557. case 0:
  558. /* if macroblock width is 2, apply C-Y-C-Y; else
  559. * apply C-Y-Y */
  560. if (s->block_width == 2) {
  561. APPLY_C_PREDICTOR();
  562. APPLY_Y_PREDICTOR();
  563. OUTPUT_PIXEL_PAIR();
  564. APPLY_C_PREDICTOR();
  565. APPLY_Y_PREDICTOR();
  566. OUTPUT_PIXEL_PAIR();
  567. } else {
  568. APPLY_C_PREDICTOR();
  569. APPLY_Y_PREDICTOR();
  570. OUTPUT_PIXEL_PAIR();
  571. APPLY_Y_PREDICTOR();
  572. OUTPUT_PIXEL_PAIR();
  573. }
  574. break;
  575. case 1:
  576. case 3:
  577. /* always apply 2 Y predictors on these iterations */
  578. APPLY_Y_PREDICTOR();
  579. OUTPUT_PIXEL_PAIR();
  580. APPLY_Y_PREDICTOR();
  581. OUTPUT_PIXEL_PAIR();
  582. break;
  583. case 2:
  584. /* this iteration might be C-Y-C-Y, Y-Y, or C-Y-Y
  585. * depending on the macroblock type */
  586. if (s->block_type == BLOCK_2x2) {
  587. APPLY_C_PREDICTOR();
  588. APPLY_Y_PREDICTOR();
  589. OUTPUT_PIXEL_PAIR();
  590. APPLY_C_PREDICTOR();
  591. APPLY_Y_PREDICTOR();
  592. OUTPUT_PIXEL_PAIR();
  593. } else if (s->block_type == BLOCK_4x2) {
  594. APPLY_C_PREDICTOR();
  595. APPLY_Y_PREDICTOR();
  596. OUTPUT_PIXEL_PAIR();
  597. APPLY_Y_PREDICTOR();
  598. OUTPUT_PIXEL_PAIR();
  599. } else {
  600. APPLY_Y_PREDICTOR();
  601. OUTPUT_PIXEL_PAIR();
  602. APPLY_Y_PREDICTOR();
  603. OUTPUT_PIXEL_PAIR();
  604. }
  605. break;
  606. }
  607. } else {
  608. /* skip (copy) four pixels, but reassign the horizontal
  609. * predictor */
  610. *vert_pred++ = *current_pixel_pair++;
  611. horiz_pred = *current_pixel_pair - *vert_pred;
  612. *vert_pred++ = *current_pixel_pair++;
  613. }
  614. if (!keyframe) {
  615. mb_change_byte_mask <<= 1;
  616. /* next byte */
  617. if (!mb_change_byte_mask) {
  618. mb_change_byte = mb_change_bits[mb_change_index++];
  619. mb_change_byte_mask = 0x01;
  620. }
  621. }
  622. pixels_left -= 4;
  623. }
  624. /* next change row */
  625. if (((y + 1) & 3) == 0)
  626. mb_change_bits += s->mb_change_bits_row_size;
  627. current_line += s->frame.linesize[0];
  628. }
  629. }
  630. static void truemotion1_decode_24bit(TrueMotion1Context *s)
  631. {
  632. int y;
  633. int pixels_left; /* remaining pixels on this line */
  634. unsigned int predictor_pair;
  635. unsigned int horiz_pred;
  636. unsigned int *vert_pred;
  637. unsigned int *current_pixel_pair;
  638. unsigned char *current_line = s->frame.data[0];
  639. int keyframe = s->flags & FLAG_KEYFRAME;
  640. /* these variables are for managing the stream of macroblock change bits */
  641. const unsigned char *mb_change_bits = s->mb_change_bits;
  642. unsigned char mb_change_byte;
  643. unsigned char mb_change_byte_mask;
  644. int mb_change_index;
  645. /* these variables are for managing the main index stream */
  646. int index_stream_index = 0; /* yes, the index into the index stream */
  647. int index;
  648. /* clean out the line buffer */
  649. memset(s->vert_pred, 0, s->avctx->width * sizeof(unsigned int));
  650. GET_NEXT_INDEX();
  651. for (y = 0; y < s->avctx->height; y++) {
  652. /* re-init variables for the next line iteration */
  653. horiz_pred = 0;
  654. current_pixel_pair = (unsigned int *)current_line;
  655. vert_pred = s->vert_pred;
  656. mb_change_index = 0;
  657. mb_change_byte = mb_change_bits[mb_change_index++];
  658. mb_change_byte_mask = 0x01;
  659. pixels_left = s->avctx->width;
  660. while (pixels_left > 0) {
  661. if (keyframe || ((mb_change_byte & mb_change_byte_mask) == 0)) {
  662. switch (y & 3) {
  663. case 0:
  664. /* if macroblock width is 2, apply C-Y-C-Y; else
  665. * apply C-Y-Y */
  666. if (s->block_width == 2) {
  667. APPLY_C_PREDICTOR_24();
  668. APPLY_Y_PREDICTOR_24();
  669. OUTPUT_PIXEL_PAIR();
  670. APPLY_C_PREDICTOR_24();
  671. APPLY_Y_PREDICTOR_24();
  672. OUTPUT_PIXEL_PAIR();
  673. } else {
  674. APPLY_C_PREDICTOR_24();
  675. APPLY_Y_PREDICTOR_24();
  676. OUTPUT_PIXEL_PAIR();
  677. APPLY_Y_PREDICTOR_24();
  678. OUTPUT_PIXEL_PAIR();
  679. }
  680. break;
  681. case 1:
  682. case 3:
  683. /* always apply 2 Y predictors on these iterations */
  684. APPLY_Y_PREDICTOR_24();
  685. OUTPUT_PIXEL_PAIR();
  686. APPLY_Y_PREDICTOR_24();
  687. OUTPUT_PIXEL_PAIR();
  688. break;
  689. case 2:
  690. /* this iteration might be C-Y-C-Y, Y-Y, or C-Y-Y
  691. * depending on the macroblock type */
  692. if (s->block_type == BLOCK_2x2) {
  693. APPLY_C_PREDICTOR_24();
  694. APPLY_Y_PREDICTOR_24();
  695. OUTPUT_PIXEL_PAIR();
  696. APPLY_C_PREDICTOR_24();
  697. APPLY_Y_PREDICTOR_24();
  698. OUTPUT_PIXEL_PAIR();
  699. } else if (s->block_type == BLOCK_4x2) {
  700. APPLY_C_PREDICTOR_24();
  701. APPLY_Y_PREDICTOR_24();
  702. OUTPUT_PIXEL_PAIR();
  703. APPLY_Y_PREDICTOR_24();
  704. OUTPUT_PIXEL_PAIR();
  705. } else {
  706. APPLY_Y_PREDICTOR_24();
  707. OUTPUT_PIXEL_PAIR();
  708. APPLY_Y_PREDICTOR_24();
  709. OUTPUT_PIXEL_PAIR();
  710. }
  711. break;
  712. }
  713. } else {
  714. /* skip (copy) four pixels, but reassign the horizontal
  715. * predictor */
  716. *vert_pred++ = *current_pixel_pair++;
  717. horiz_pred = *current_pixel_pair - *vert_pred;
  718. *vert_pred++ = *current_pixel_pair++;
  719. }
  720. if (!keyframe) {
  721. mb_change_byte_mask <<= 1;
  722. /* next byte */
  723. if (!mb_change_byte_mask) {
  724. mb_change_byte = mb_change_bits[mb_change_index++];
  725. mb_change_byte_mask = 0x01;
  726. }
  727. }
  728. pixels_left -= 2;
  729. }
  730. /* next change row */
  731. if (((y + 1) & 3) == 0)
  732. mb_change_bits += s->mb_change_bits_row_size;
  733. current_line += s->frame.linesize[0];
  734. }
  735. }
  736. static int truemotion1_decode_frame(AVCodecContext *avctx,
  737. void *data, int *data_size,
  738. AVPacket *avpkt)
  739. {
  740. const uint8_t *buf = avpkt->data;
  741. int buf_size = avpkt->size;
  742. TrueMotion1Context *s = avctx->priv_data;
  743. s->buf = buf;
  744. s->size = buf_size;
  745. if (truemotion1_decode_header(s) == -1)
  746. return -1;
  747. s->frame.reference = 3;
  748. s->frame.buffer_hints = FF_BUFFER_HINTS_VALID |
  749. FF_BUFFER_HINTS_PRESERVE | FF_BUFFER_HINTS_REUSABLE;
  750. if (avctx->reget_buffer(avctx, &s->frame) < 0) {
  751. av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed\n");
  752. return -1;
  753. }
  754. if (compression_types[s->compression].algorithm == ALGO_RGB24H) {
  755. truemotion1_decode_24bit(s);
  756. } else if (compression_types[s->compression].algorithm != ALGO_NOP) {
  757. truemotion1_decode_16bit(s);
  758. }
  759. *data_size = sizeof(AVFrame);
  760. *(AVFrame*)data = s->frame;
  761. /* report that the buffer was completely consumed */
  762. return buf_size;
  763. }
  764. static av_cold int truemotion1_decode_end(AVCodecContext *avctx)
  765. {
  766. TrueMotion1Context *s = avctx->priv_data;
  767. if (s->frame.data[0])
  768. avctx->release_buffer(avctx, &s->frame);
  769. av_free(s->vert_pred);
  770. return 0;
  771. }
  772. AVCodec ff_truemotion1_decoder = {
  773. .name = "truemotion1",
  774. .type = AVMEDIA_TYPE_VIDEO,
  775. .id = AV_CODEC_ID_TRUEMOTION1,
  776. .priv_data_size = sizeof(TrueMotion1Context),
  777. .init = truemotion1_decode_init,
  778. .close = truemotion1_decode_end,
  779. .decode = truemotion1_decode_frame,
  780. .capabilities = CODEC_CAP_DR1,
  781. .long_name = NULL_IF_CONFIG_SMALL("Duck TrueMotion 1.0"),
  782. };