You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

917 lines
28KB

  1. /*
  2. * Duck TrueMotion 1.0 Decoder
  3. * Copyright (C) 2003 Alex Beregszaszi & Mike Melanson
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. /**
  22. * @file
  23. * Duck TrueMotion v1 Video Decoder by
  24. * Alex Beregszaszi and
  25. * Mike Melanson (melanson@pcisys.net)
  26. *
  27. * The TrueMotion v1 decoder presently only decodes 16-bit TM1 data and
  28. * outputs RGB555 (or RGB565) data. 24-bit TM1 data is not supported yet.
  29. */
  30. #include <stdio.h>
  31. #include <stdlib.h>
  32. #include <string.h>
  33. #include "avcodec.h"
  34. #include "internal.h"
  35. #include "libavutil/imgutils.h"
  36. #include "libavutil/internal.h"
  37. #include "libavutil/intreadwrite.h"
  38. #include "libavutil/mem.h"
  39. #include "truemotion1data.h"
  40. typedef struct TrueMotion1Context {
  41. AVCodecContext *avctx;
  42. AVFrame *frame;
  43. const uint8_t *buf;
  44. int size;
  45. const uint8_t *mb_change_bits;
  46. int mb_change_bits_row_size;
  47. const uint8_t *index_stream;
  48. int index_stream_size;
  49. int flags;
  50. int x, y, w, h;
  51. uint32_t y_predictor_table[1024];
  52. uint32_t c_predictor_table[1024];
  53. uint32_t fat_y_predictor_table[1024];
  54. uint32_t fat_c_predictor_table[1024];
  55. int compression;
  56. int block_type;
  57. int block_width;
  58. int block_height;
  59. int16_t ydt[8];
  60. int16_t cdt[8];
  61. int16_t fat_ydt[8];
  62. int16_t fat_cdt[8];
  63. int last_deltaset, last_vectable;
  64. unsigned int *vert_pred;
  65. int vert_pred_size;
  66. } TrueMotion1Context;
  67. #define FLAG_SPRITE 32
  68. #define FLAG_KEYFRAME 16
  69. #define FLAG_INTERFRAME 8
  70. #define FLAG_INTERPOLATED 4
  71. struct frame_header {
  72. uint8_t header_size;
  73. uint8_t compression;
  74. uint8_t deltaset;
  75. uint8_t vectable;
  76. uint16_t ysize;
  77. uint16_t xsize;
  78. uint16_t checksum;
  79. uint8_t version;
  80. uint8_t header_type;
  81. uint8_t flags;
  82. uint8_t control;
  83. uint16_t xoffset;
  84. uint16_t yoffset;
  85. uint16_t width;
  86. uint16_t height;
  87. };
  88. #define ALGO_NOP 0
  89. #define ALGO_RGB16V 1
  90. #define ALGO_RGB16H 2
  91. #define ALGO_RGB24H 3
  92. /* these are the various block sizes that can occupy a 4x4 block */
  93. #define BLOCK_2x2 0
  94. #define BLOCK_2x4 1
  95. #define BLOCK_4x2 2
  96. #define BLOCK_4x4 3
  97. typedef struct comp_types {
  98. int algorithm;
  99. int block_width; // vres
  100. int block_height; // hres
  101. int block_type;
  102. } comp_types;
  103. /* { valid for metatype }, algorithm, num of deltas, vert res, horiz res */
  104. static const comp_types compression_types[17] = {
  105. { ALGO_NOP, 0, 0, 0 },
  106. { ALGO_RGB16V, 4, 4, BLOCK_4x4 },
  107. { ALGO_RGB16H, 4, 4, BLOCK_4x4 },
  108. { ALGO_RGB16V, 4, 2, BLOCK_4x2 },
  109. { ALGO_RGB16H, 4, 2, BLOCK_4x2 },
  110. { ALGO_RGB16V, 2, 4, BLOCK_2x4 },
  111. { ALGO_RGB16H, 2, 4, BLOCK_2x4 },
  112. { ALGO_RGB16V, 2, 2, BLOCK_2x2 },
  113. { ALGO_RGB16H, 2, 2, BLOCK_2x2 },
  114. { ALGO_NOP, 4, 4, BLOCK_4x4 },
  115. { ALGO_RGB24H, 4, 4, BLOCK_4x4 },
  116. { ALGO_NOP, 4, 2, BLOCK_4x2 },
  117. { ALGO_RGB24H, 4, 2, BLOCK_4x2 },
  118. { ALGO_NOP, 2, 4, BLOCK_2x4 },
  119. { ALGO_RGB24H, 2, 4, BLOCK_2x4 },
  120. { ALGO_NOP, 2, 2, BLOCK_2x2 },
  121. { ALGO_RGB24H, 2, 2, BLOCK_2x2 }
  122. };
  123. static void select_delta_tables(TrueMotion1Context *s, int delta_table_index)
  124. {
  125. int i;
  126. if (delta_table_index > 3)
  127. return;
  128. memcpy(s->ydt, ydts[delta_table_index], 8 * sizeof(int16_t));
  129. memcpy(s->cdt, cdts[delta_table_index], 8 * sizeof(int16_t));
  130. memcpy(s->fat_ydt, fat_ydts[delta_table_index], 8 * sizeof(int16_t));
  131. memcpy(s->fat_cdt, fat_cdts[delta_table_index], 8 * sizeof(int16_t));
  132. /* Y skinny deltas need to be halved for some reason; maybe the
  133. * skinny Y deltas should be modified */
  134. for (i = 0; i < 8; i++)
  135. {
  136. /* drop the lsb before dividing by 2-- net effect: round down
  137. * when dividing a negative number (e.g., -3/2 = -2, not -1) */
  138. s->ydt[i] &= 0xFFFE;
  139. s->ydt[i] /= 2;
  140. }
  141. }
  142. #if HAVE_BIGENDIAN
  143. static int make_ydt15_entry(int p2, int p1, int16_t *ydt)
  144. #else
  145. static int make_ydt15_entry(int p1, int p2, int16_t *ydt)
  146. #endif
  147. {
  148. int lo, hi;
  149. lo = ydt[p1];
  150. lo += (lo << 5) + (lo << 10);
  151. hi = ydt[p2];
  152. hi += (hi << 5) + (hi << 10);
  153. return (lo + (hi << 16)) << 1;
  154. }
  155. static int make_cdt15_entry(int p1, int p2, int16_t *cdt)
  156. {
  157. int r, b, lo;
  158. b = cdt[p2];
  159. r = cdt[p1] << 10;
  160. lo = b + r;
  161. return (lo + (lo << 16)) << 1;
  162. }
  163. #if HAVE_BIGENDIAN
  164. static int make_ydt16_entry(int p2, int p1, int16_t *ydt)
  165. #else
  166. static int make_ydt16_entry(int p1, int p2, int16_t *ydt)
  167. #endif
  168. {
  169. int lo, hi;
  170. lo = ydt[p1];
  171. lo += (lo << 6) + (lo << 11);
  172. hi = ydt[p2];
  173. hi += (hi << 6) + (hi << 11);
  174. return (lo + (hi << 16)) << 1;
  175. }
  176. static int make_cdt16_entry(int p1, int p2, int16_t *cdt)
  177. {
  178. int r, b, lo;
  179. b = cdt[p2];
  180. r = cdt[p1] << 11;
  181. lo = b + r;
  182. return (lo + (lo << 16)) << 1;
  183. }
  184. static int make_ydt24_entry(int p1, int p2, int16_t *ydt)
  185. {
  186. int lo, hi;
  187. lo = ydt[p1];
  188. hi = ydt[p2];
  189. return (lo + (hi << 8) + (hi << 16)) << 1;
  190. }
  191. static int make_cdt24_entry(int p1, int p2, int16_t *cdt)
  192. {
  193. int r, b;
  194. b = cdt[p2];
  195. r = cdt[p1]<<16;
  196. return (b+r) << 1;
  197. }
  198. static void gen_vector_table15(TrueMotion1Context *s, const uint8_t *sel_vector_table)
  199. {
  200. int len, i, j;
  201. unsigned char delta_pair;
  202. for (i = 0; i < 1024; i += 4)
  203. {
  204. len = *sel_vector_table++ / 2;
  205. for (j = 0; j < len; j++)
  206. {
  207. delta_pair = *sel_vector_table++;
  208. s->y_predictor_table[i+j] = 0xfffffffe &
  209. make_ydt15_entry(delta_pair >> 4, delta_pair & 0xf, s->ydt);
  210. s->c_predictor_table[i+j] = 0xfffffffe &
  211. make_cdt15_entry(delta_pair >> 4, delta_pair & 0xf, s->cdt);
  212. }
  213. s->y_predictor_table[i+(j-1)] |= 1;
  214. s->c_predictor_table[i+(j-1)] |= 1;
  215. }
  216. }
  217. static void gen_vector_table16(TrueMotion1Context *s, const uint8_t *sel_vector_table)
  218. {
  219. int len, i, j;
  220. unsigned char delta_pair;
  221. for (i = 0; i < 1024; i += 4)
  222. {
  223. len = *sel_vector_table++ / 2;
  224. for (j = 0; j < len; j++)
  225. {
  226. delta_pair = *sel_vector_table++;
  227. s->y_predictor_table[i+j] = 0xfffffffe &
  228. make_ydt16_entry(delta_pair >> 4, delta_pair & 0xf, s->ydt);
  229. s->c_predictor_table[i+j] = 0xfffffffe &
  230. make_cdt16_entry(delta_pair >> 4, delta_pair & 0xf, s->cdt);
  231. }
  232. s->y_predictor_table[i+(j-1)] |= 1;
  233. s->c_predictor_table[i+(j-1)] |= 1;
  234. }
  235. }
  236. static void gen_vector_table24(TrueMotion1Context *s, const uint8_t *sel_vector_table)
  237. {
  238. int len, i, j;
  239. unsigned char delta_pair;
  240. for (i = 0; i < 1024; i += 4)
  241. {
  242. len = *sel_vector_table++ / 2;
  243. for (j = 0; j < len; j++)
  244. {
  245. delta_pair = *sel_vector_table++;
  246. s->y_predictor_table[i+j] = 0xfffffffe &
  247. make_ydt24_entry(delta_pair >> 4, delta_pair & 0xf, s->ydt);
  248. s->c_predictor_table[i+j] = 0xfffffffe &
  249. make_cdt24_entry(delta_pair >> 4, delta_pair & 0xf, s->cdt);
  250. s->fat_y_predictor_table[i+j] = 0xfffffffe &
  251. make_ydt24_entry(delta_pair >> 4, delta_pair & 0xf, s->fat_ydt);
  252. s->fat_c_predictor_table[i+j] = 0xfffffffe &
  253. make_cdt24_entry(delta_pair >> 4, delta_pair & 0xf, s->fat_cdt);
  254. }
  255. s->y_predictor_table[i+(j-1)] |= 1;
  256. s->c_predictor_table[i+(j-1)] |= 1;
  257. s->fat_y_predictor_table[i+(j-1)] |= 1;
  258. s->fat_c_predictor_table[i+(j-1)] |= 1;
  259. }
  260. }
  261. /* Returns the number of bytes consumed from the bytestream. Returns -1 if
  262. * there was an error while decoding the header */
  263. static int truemotion1_decode_header(TrueMotion1Context *s)
  264. {
  265. int i, ret;
  266. int width_shift = 0;
  267. int new_pix_fmt;
  268. struct frame_header header;
  269. uint8_t header_buffer[128] = { 0 }; /* logical maximum size of the header */
  270. const uint8_t *sel_vector_table;
  271. header.header_size = ((s->buf[0] >> 5) | (s->buf[0] << 3)) & 0x7f;
  272. if (s->buf[0] < 0x10)
  273. {
  274. av_log(s->avctx, AV_LOG_ERROR, "invalid header size (%d)\n", s->buf[0]);
  275. return AVERROR_INVALIDDATA;
  276. }
  277. if (header.header_size + 1 > s->size) {
  278. av_log(s->avctx, AV_LOG_ERROR, "Input packet too small.\n");
  279. return AVERROR_INVALIDDATA;
  280. }
  281. /* unscramble the header bytes with a XOR operation */
  282. for (i = 1; i < header.header_size; i++)
  283. header_buffer[i - 1] = s->buf[i] ^ s->buf[i + 1];
  284. header.compression = header_buffer[0];
  285. header.deltaset = header_buffer[1];
  286. header.vectable = header_buffer[2];
  287. header.ysize = AV_RL16(&header_buffer[3]);
  288. header.xsize = AV_RL16(&header_buffer[5]);
  289. header.checksum = AV_RL16(&header_buffer[7]);
  290. header.version = header_buffer[9];
  291. header.header_type = header_buffer[10];
  292. header.flags = header_buffer[11];
  293. header.control = header_buffer[12];
  294. /* Version 2 */
  295. if (header.version >= 2)
  296. {
  297. if (header.header_type > 3)
  298. {
  299. av_log(s->avctx, AV_LOG_ERROR, "invalid header type (%d)\n", header.header_type);
  300. return AVERROR_INVALIDDATA;
  301. } else if ((header.header_type == 2) || (header.header_type == 3)) {
  302. s->flags = header.flags;
  303. if (!(s->flags & FLAG_INTERFRAME))
  304. s->flags |= FLAG_KEYFRAME;
  305. } else
  306. s->flags = FLAG_KEYFRAME;
  307. } else /* Version 1 */
  308. s->flags = FLAG_KEYFRAME;
  309. if (s->flags & FLAG_SPRITE) {
  310. avpriv_request_sample(s->avctx, "Frame with sprite");
  311. /* FIXME header.width, height, xoffset and yoffset aren't initialized */
  312. return AVERROR_PATCHWELCOME;
  313. } else {
  314. s->w = header.xsize;
  315. s->h = header.ysize;
  316. if (header.header_type < 2) {
  317. if ((s->w < 213) && (s->h >= 176))
  318. {
  319. s->flags |= FLAG_INTERPOLATED;
  320. avpriv_request_sample(s->avctx, "Interpolated frame");
  321. }
  322. }
  323. }
  324. if (header.compression >= 17) {
  325. av_log(s->avctx, AV_LOG_ERROR, "invalid compression type (%d)\n", header.compression);
  326. return AVERROR_INVALIDDATA;
  327. }
  328. if ((header.deltaset != s->last_deltaset) ||
  329. (header.vectable != s->last_vectable))
  330. select_delta_tables(s, header.deltaset);
  331. if ((header.compression & 1) && header.header_type)
  332. sel_vector_table = pc_tbl2;
  333. else {
  334. if (header.vectable > 0 && header.vectable < 4)
  335. sel_vector_table = tables[header.vectable - 1];
  336. else {
  337. av_log(s->avctx, AV_LOG_ERROR, "invalid vector table id (%d)\n", header.vectable);
  338. return AVERROR_INVALIDDATA;
  339. }
  340. }
  341. if (compression_types[header.compression].algorithm == ALGO_RGB24H) {
  342. new_pix_fmt = AV_PIX_FMT_RGB32;
  343. width_shift = 1;
  344. } else
  345. new_pix_fmt = AV_PIX_FMT_RGB555; // RGB565 is supported as well
  346. s->w >>= width_shift;
  347. if (s->w != s->avctx->width || s->h != s->avctx->height ||
  348. new_pix_fmt != s->avctx->pix_fmt) {
  349. av_frame_unref(s->frame);
  350. s->avctx->sample_aspect_ratio = (AVRational){ 1 << width_shift, 1 };
  351. s->avctx->pix_fmt = new_pix_fmt;
  352. if ((ret = ff_set_dimensions(s->avctx, s->w, s->h)) < 0)
  353. return ret;
  354. ff_set_sar(s->avctx, s->avctx->sample_aspect_ratio);
  355. av_fast_malloc(&s->vert_pred, &s->vert_pred_size, s->avctx->width * sizeof(unsigned int));
  356. if (!s->vert_pred)
  357. return AVERROR(ENOMEM);
  358. }
  359. /* There is 1 change bit per 4 pixels, so each change byte represents
  360. * 32 pixels; divide width by 4 to obtain the number of change bits and
  361. * then round up to the nearest byte. */
  362. s->mb_change_bits_row_size = ((s->avctx->width >> (2 - width_shift)) + 7) >> 3;
  363. if ((header.deltaset != s->last_deltaset) || (header.vectable != s->last_vectable))
  364. {
  365. if (compression_types[header.compression].algorithm == ALGO_RGB24H)
  366. gen_vector_table24(s, sel_vector_table);
  367. else
  368. if (s->avctx->pix_fmt == AV_PIX_FMT_RGB555)
  369. gen_vector_table15(s, sel_vector_table);
  370. else
  371. gen_vector_table16(s, sel_vector_table);
  372. }
  373. /* set up pointers to the other key data chunks */
  374. s->mb_change_bits = s->buf + header.header_size;
  375. if (s->flags & FLAG_KEYFRAME) {
  376. /* no change bits specified for a keyframe; only index bytes */
  377. s->index_stream = s->mb_change_bits;
  378. } else {
  379. /* one change bit per 4x4 block */
  380. s->index_stream = s->mb_change_bits +
  381. (s->mb_change_bits_row_size * (s->avctx->height >> 2));
  382. }
  383. s->index_stream_size = s->size - (s->index_stream - s->buf);
  384. s->last_deltaset = header.deltaset;
  385. s->last_vectable = header.vectable;
  386. s->compression = header.compression;
  387. s->block_width = compression_types[header.compression].block_width;
  388. s->block_height = compression_types[header.compression].block_height;
  389. s->block_type = compression_types[header.compression].block_type;
  390. if (s->avctx->debug & FF_DEBUG_PICT_INFO)
  391. av_log(s->avctx, AV_LOG_INFO, "tables: %d / %d c:%d %dx%d t:%d %s%s%s%s\n",
  392. s->last_deltaset, s->last_vectable, s->compression, s->block_width,
  393. s->block_height, s->block_type,
  394. s->flags & FLAG_KEYFRAME ? " KEY" : "",
  395. s->flags & FLAG_INTERFRAME ? " INTER" : "",
  396. s->flags & FLAG_SPRITE ? " SPRITE" : "",
  397. s->flags & FLAG_INTERPOLATED ? " INTERPOL" : "");
  398. return header.header_size;
  399. }
  400. static av_cold int truemotion1_decode_init(AVCodecContext *avctx)
  401. {
  402. TrueMotion1Context *s = avctx->priv_data;
  403. s->avctx = avctx;
  404. // FIXME: it may change ?
  405. // if (avctx->bits_per_sample == 24)
  406. // avctx->pix_fmt = AV_PIX_FMT_RGB24;
  407. // else
  408. // avctx->pix_fmt = AV_PIX_FMT_RGB555;
  409. s->frame = av_frame_alloc();
  410. if (!s->frame)
  411. return AVERROR(ENOMEM);
  412. /* there is a vertical predictor for each pixel in a line; each vertical
  413. * predictor is 0 to start with */
  414. av_fast_malloc(&s->vert_pred, &s->vert_pred_size, s->avctx->width * sizeof(unsigned int));
  415. if (!s->vert_pred)
  416. return AVERROR(ENOMEM);
  417. return 0;
  418. }
  419. /*
  420. Block decoding order:
  421. dxi: Y-Y
  422. dxic: Y-C-Y
  423. dxic2: Y-C-Y-C
  424. hres,vres,i,i%vres (0 < i < 4)
  425. 2x2 0: 0 dxic2
  426. 2x2 1: 1 dxi
  427. 2x2 2: 0 dxic2
  428. 2x2 3: 1 dxi
  429. 2x4 0: 0 dxic2
  430. 2x4 1: 1 dxi
  431. 2x4 2: 2 dxi
  432. 2x4 3: 3 dxi
  433. 4x2 0: 0 dxic
  434. 4x2 1: 1 dxi
  435. 4x2 2: 0 dxic
  436. 4x2 3: 1 dxi
  437. 4x4 0: 0 dxic
  438. 4x4 1: 1 dxi
  439. 4x4 2: 2 dxi
  440. 4x4 3: 3 dxi
  441. */
  442. #define GET_NEXT_INDEX() \
  443. {\
  444. if (index_stream_index >= s->index_stream_size) { \
  445. av_log(s->avctx, AV_LOG_INFO, " help! truemotion1 decoder went out of bounds\n"); \
  446. return; \
  447. } \
  448. index = s->index_stream[index_stream_index++] * 4; \
  449. }
  450. #define INC_INDEX \
  451. do { \
  452. if (index >= 1023) { \
  453. av_log(s->avctx, AV_LOG_ERROR, "Invalid index value.\n"); \
  454. return; \
  455. } \
  456. index++; \
  457. } while (0)
  458. #define APPLY_C_PREDICTOR() \
  459. predictor_pair = s->c_predictor_table[index]; \
  460. horiz_pred += (predictor_pair >> 1); \
  461. if (predictor_pair & 1) { \
  462. GET_NEXT_INDEX() \
  463. if (!index) { \
  464. GET_NEXT_INDEX() \
  465. predictor_pair = s->c_predictor_table[index]; \
  466. horiz_pred += ((predictor_pair >> 1) * 5); \
  467. if (predictor_pair & 1) \
  468. GET_NEXT_INDEX() \
  469. else \
  470. INC_INDEX; \
  471. } \
  472. } else \
  473. INC_INDEX;
  474. #define APPLY_C_PREDICTOR_24() \
  475. predictor_pair = s->c_predictor_table[index]; \
  476. horiz_pred += (predictor_pair >> 1); \
  477. if (predictor_pair & 1) { \
  478. GET_NEXT_INDEX() \
  479. if (!index) { \
  480. GET_NEXT_INDEX() \
  481. predictor_pair = s->fat_c_predictor_table[index]; \
  482. horiz_pred += (predictor_pair >> 1); \
  483. if (predictor_pair & 1) \
  484. GET_NEXT_INDEX() \
  485. else \
  486. INC_INDEX; \
  487. } \
  488. } else \
  489. INC_INDEX;
  490. #define APPLY_Y_PREDICTOR() \
  491. predictor_pair = s->y_predictor_table[index]; \
  492. horiz_pred += (predictor_pair >> 1); \
  493. if (predictor_pair & 1) { \
  494. GET_NEXT_INDEX() \
  495. if (!index) { \
  496. GET_NEXT_INDEX() \
  497. predictor_pair = s->y_predictor_table[index]; \
  498. horiz_pred += ((predictor_pair >> 1) * 5); \
  499. if (predictor_pair & 1) \
  500. GET_NEXT_INDEX() \
  501. else \
  502. INC_INDEX; \
  503. } \
  504. } else \
  505. INC_INDEX;
  506. #define APPLY_Y_PREDICTOR_24() \
  507. predictor_pair = s->y_predictor_table[index]; \
  508. horiz_pred += (predictor_pair >> 1); \
  509. if (predictor_pair & 1) { \
  510. GET_NEXT_INDEX() \
  511. if (!index) { \
  512. GET_NEXT_INDEX() \
  513. predictor_pair = s->fat_y_predictor_table[index]; \
  514. horiz_pred += (predictor_pair >> 1); \
  515. if (predictor_pair & 1) \
  516. GET_NEXT_INDEX() \
  517. else \
  518. INC_INDEX; \
  519. } \
  520. } else \
  521. INC_INDEX;
  522. #define OUTPUT_PIXEL_PAIR() \
  523. *current_pixel_pair = *vert_pred + horiz_pred; \
  524. *vert_pred++ = *current_pixel_pair++;
  525. static void truemotion1_decode_16bit(TrueMotion1Context *s)
  526. {
  527. int y;
  528. int pixels_left; /* remaining pixels on this line */
  529. unsigned int predictor_pair;
  530. unsigned int horiz_pred;
  531. unsigned int *vert_pred;
  532. unsigned int *current_pixel_pair;
  533. unsigned char *current_line = s->frame->data[0];
  534. int keyframe = s->flags & FLAG_KEYFRAME;
  535. /* these variables are for managing the stream of macroblock change bits */
  536. const unsigned char *mb_change_bits = s->mb_change_bits;
  537. unsigned char mb_change_byte;
  538. unsigned char mb_change_byte_mask;
  539. int mb_change_index;
  540. /* these variables are for managing the main index stream */
  541. int index_stream_index = 0; /* yes, the index into the index stream */
  542. int index;
  543. /* clean out the line buffer */
  544. memset(s->vert_pred, 0, s->avctx->width * sizeof(unsigned int));
  545. GET_NEXT_INDEX();
  546. for (y = 0; y < s->avctx->height; y++) {
  547. /* re-init variables for the next line iteration */
  548. horiz_pred = 0;
  549. current_pixel_pair = (unsigned int *)current_line;
  550. vert_pred = s->vert_pred;
  551. mb_change_index = 0;
  552. mb_change_byte = mb_change_bits[mb_change_index++];
  553. mb_change_byte_mask = 0x01;
  554. pixels_left = s->avctx->width;
  555. while (pixels_left > 0) {
  556. if (keyframe || ((mb_change_byte & mb_change_byte_mask) == 0)) {
  557. switch (y & 3) {
  558. case 0:
  559. /* if macroblock width is 2, apply C-Y-C-Y; else
  560. * apply C-Y-Y */
  561. if (s->block_width == 2) {
  562. APPLY_C_PREDICTOR();
  563. APPLY_Y_PREDICTOR();
  564. OUTPUT_PIXEL_PAIR();
  565. APPLY_C_PREDICTOR();
  566. APPLY_Y_PREDICTOR();
  567. OUTPUT_PIXEL_PAIR();
  568. } else {
  569. APPLY_C_PREDICTOR();
  570. APPLY_Y_PREDICTOR();
  571. OUTPUT_PIXEL_PAIR();
  572. APPLY_Y_PREDICTOR();
  573. OUTPUT_PIXEL_PAIR();
  574. }
  575. break;
  576. case 1:
  577. case 3:
  578. /* always apply 2 Y predictors on these iterations */
  579. APPLY_Y_PREDICTOR();
  580. OUTPUT_PIXEL_PAIR();
  581. APPLY_Y_PREDICTOR();
  582. OUTPUT_PIXEL_PAIR();
  583. break;
  584. case 2:
  585. /* this iteration might be C-Y-C-Y, Y-Y, or C-Y-Y
  586. * depending on the macroblock type */
  587. if (s->block_type == BLOCK_2x2) {
  588. APPLY_C_PREDICTOR();
  589. APPLY_Y_PREDICTOR();
  590. OUTPUT_PIXEL_PAIR();
  591. APPLY_C_PREDICTOR();
  592. APPLY_Y_PREDICTOR();
  593. OUTPUT_PIXEL_PAIR();
  594. } else if (s->block_type == BLOCK_4x2) {
  595. APPLY_C_PREDICTOR();
  596. APPLY_Y_PREDICTOR();
  597. OUTPUT_PIXEL_PAIR();
  598. APPLY_Y_PREDICTOR();
  599. OUTPUT_PIXEL_PAIR();
  600. } else {
  601. APPLY_Y_PREDICTOR();
  602. OUTPUT_PIXEL_PAIR();
  603. APPLY_Y_PREDICTOR();
  604. OUTPUT_PIXEL_PAIR();
  605. }
  606. break;
  607. }
  608. } else {
  609. /* skip (copy) four pixels, but reassign the horizontal
  610. * predictor */
  611. *vert_pred++ = *current_pixel_pair++;
  612. horiz_pred = *current_pixel_pair - *vert_pred;
  613. *vert_pred++ = *current_pixel_pair++;
  614. }
  615. if (!keyframe) {
  616. mb_change_byte_mask <<= 1;
  617. /* next byte */
  618. if (!mb_change_byte_mask) {
  619. mb_change_byte = mb_change_bits[mb_change_index++];
  620. mb_change_byte_mask = 0x01;
  621. }
  622. }
  623. pixels_left -= 4;
  624. }
  625. /* next change row */
  626. if (((y + 1) & 3) == 0)
  627. mb_change_bits += s->mb_change_bits_row_size;
  628. current_line += s->frame->linesize[0];
  629. }
  630. }
  631. static void truemotion1_decode_24bit(TrueMotion1Context *s)
  632. {
  633. int y;
  634. int pixels_left; /* remaining pixels on this line */
  635. unsigned int predictor_pair;
  636. unsigned int horiz_pred;
  637. unsigned int *vert_pred;
  638. unsigned int *current_pixel_pair;
  639. unsigned char *current_line = s->frame->data[0];
  640. int keyframe = s->flags & FLAG_KEYFRAME;
  641. /* these variables are for managing the stream of macroblock change bits */
  642. const unsigned char *mb_change_bits = s->mb_change_bits;
  643. unsigned char mb_change_byte;
  644. unsigned char mb_change_byte_mask;
  645. int mb_change_index;
  646. /* these variables are for managing the main index stream */
  647. int index_stream_index = 0; /* yes, the index into the index stream */
  648. int index;
  649. /* clean out the line buffer */
  650. memset(s->vert_pred, 0, s->avctx->width * sizeof(unsigned int));
  651. GET_NEXT_INDEX();
  652. for (y = 0; y < s->avctx->height; y++) {
  653. /* re-init variables for the next line iteration */
  654. horiz_pred = 0;
  655. current_pixel_pair = (unsigned int *)current_line;
  656. vert_pred = s->vert_pred;
  657. mb_change_index = 0;
  658. mb_change_byte = mb_change_bits[mb_change_index++];
  659. mb_change_byte_mask = 0x01;
  660. pixels_left = s->avctx->width;
  661. while (pixels_left > 0) {
  662. if (keyframe || ((mb_change_byte & mb_change_byte_mask) == 0)) {
  663. switch (y & 3) {
  664. case 0:
  665. /* if macroblock width is 2, apply C-Y-C-Y; else
  666. * apply C-Y-Y */
  667. if (s->block_width == 2) {
  668. APPLY_C_PREDICTOR_24();
  669. APPLY_Y_PREDICTOR_24();
  670. OUTPUT_PIXEL_PAIR();
  671. APPLY_C_PREDICTOR_24();
  672. APPLY_Y_PREDICTOR_24();
  673. OUTPUT_PIXEL_PAIR();
  674. } else {
  675. APPLY_C_PREDICTOR_24();
  676. APPLY_Y_PREDICTOR_24();
  677. OUTPUT_PIXEL_PAIR();
  678. APPLY_Y_PREDICTOR_24();
  679. OUTPUT_PIXEL_PAIR();
  680. }
  681. break;
  682. case 1:
  683. case 3:
  684. /* always apply 2 Y predictors on these iterations */
  685. APPLY_Y_PREDICTOR_24();
  686. OUTPUT_PIXEL_PAIR();
  687. APPLY_Y_PREDICTOR_24();
  688. OUTPUT_PIXEL_PAIR();
  689. break;
  690. case 2:
  691. /* this iteration might be C-Y-C-Y, Y-Y, or C-Y-Y
  692. * depending on the macroblock type */
  693. if (s->block_type == BLOCK_2x2) {
  694. APPLY_C_PREDICTOR_24();
  695. APPLY_Y_PREDICTOR_24();
  696. OUTPUT_PIXEL_PAIR();
  697. APPLY_C_PREDICTOR_24();
  698. APPLY_Y_PREDICTOR_24();
  699. OUTPUT_PIXEL_PAIR();
  700. } else if (s->block_type == BLOCK_4x2) {
  701. APPLY_C_PREDICTOR_24();
  702. APPLY_Y_PREDICTOR_24();
  703. OUTPUT_PIXEL_PAIR();
  704. APPLY_Y_PREDICTOR_24();
  705. OUTPUT_PIXEL_PAIR();
  706. } else {
  707. APPLY_Y_PREDICTOR_24();
  708. OUTPUT_PIXEL_PAIR();
  709. APPLY_Y_PREDICTOR_24();
  710. OUTPUT_PIXEL_PAIR();
  711. }
  712. break;
  713. }
  714. } else {
  715. /* skip (copy) four pixels, but reassign the horizontal
  716. * predictor */
  717. *vert_pred++ = *current_pixel_pair++;
  718. horiz_pred = *current_pixel_pair - *vert_pred;
  719. *vert_pred++ = *current_pixel_pair++;
  720. }
  721. if (!keyframe) {
  722. mb_change_byte_mask <<= 1;
  723. /* next byte */
  724. if (!mb_change_byte_mask) {
  725. mb_change_byte = mb_change_bits[mb_change_index++];
  726. mb_change_byte_mask = 0x01;
  727. }
  728. }
  729. pixels_left -= 2;
  730. }
  731. /* next change row */
  732. if (((y + 1) & 3) == 0)
  733. mb_change_bits += s->mb_change_bits_row_size;
  734. current_line += s->frame->linesize[0];
  735. }
  736. }
  737. static int truemotion1_decode_frame(AVCodecContext *avctx,
  738. void *data, int *got_frame,
  739. AVPacket *avpkt)
  740. {
  741. const uint8_t *buf = avpkt->data;
  742. int ret, buf_size = avpkt->size;
  743. TrueMotion1Context *s = avctx->priv_data;
  744. s->buf = buf;
  745. s->size = buf_size;
  746. if ((ret = truemotion1_decode_header(s)) < 0)
  747. return ret;
  748. if ((ret = ff_reget_buffer(avctx, s->frame)) < 0)
  749. return ret;
  750. if (compression_types[s->compression].algorithm == ALGO_RGB24H) {
  751. truemotion1_decode_24bit(s);
  752. } else if (compression_types[s->compression].algorithm != ALGO_NOP) {
  753. truemotion1_decode_16bit(s);
  754. }
  755. if ((ret = av_frame_ref(data, s->frame)) < 0)
  756. return ret;
  757. *got_frame = 1;
  758. /* report that the buffer was completely consumed */
  759. return buf_size;
  760. }
  761. static av_cold int truemotion1_decode_end(AVCodecContext *avctx)
  762. {
  763. TrueMotion1Context *s = avctx->priv_data;
  764. av_frame_free(&s->frame);
  765. av_freep(&s->vert_pred);
  766. return 0;
  767. }
  768. AVCodec ff_truemotion1_decoder = {
  769. .name = "truemotion1",
  770. .long_name = NULL_IF_CONFIG_SMALL("Duck TrueMotion 1.0"),
  771. .type = AVMEDIA_TYPE_VIDEO,
  772. .id = AV_CODEC_ID_TRUEMOTION1,
  773. .priv_data_size = sizeof(TrueMotion1Context),
  774. .init = truemotion1_decode_init,
  775. .close = truemotion1_decode_end,
  776. .decode = truemotion1_decode_frame,
  777. .capabilities = AV_CODEC_CAP_DR1,
  778. };