You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1363 lines
43KB

  1. /*
  2. * Interplay MVE Video Decoder
  3. * Copyright (C) 2003 The FFmpeg project
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. /**
  22. * @file
  23. * Interplay MVE Video Decoder by Mike Melanson (melanson@pcisys.net)
  24. * For more information about the Interplay MVE format, visit:
  25. * http://www.pcisys.net/~melanson/codecs/interplay-mve.txt
  26. * This code is written in such a way that the identifiers match up
  27. * with the encoding descriptions in the document.
  28. *
  29. * This decoder presently only supports a PAL8 output colorspace.
  30. *
  31. * An Interplay video frame consists of 2 parts: The decoding map and
  32. * the video data. A demuxer must load these 2 parts together in a single
  33. * buffer before sending it through the stream to this decoder.
  34. */
  35. #include <stdio.h>
  36. #include <stdlib.h>
  37. #include <string.h>
  38. #include "libavutil/intreadwrite.h"
  39. #define BITSTREAM_READER_LE
  40. #include "avcodec.h"
  41. #include "bytestream.h"
  42. #include "get_bits.h"
  43. #include "hpeldsp.h"
  44. #include "internal.h"
  45. #define PALETTE_COUNT 256
  46. typedef struct IpvideoContext {
  47. AVCodecContext *avctx;
  48. HpelDSPContext hdsp;
  49. AVFrame *second_last_frame;
  50. AVFrame *last_frame;
  51. /* For format 0x10 */
  52. AVFrame *cur_decode_frame;
  53. AVFrame *prev_decode_frame;
  54. const unsigned char *decoding_map;
  55. int decoding_map_size;
  56. const unsigned char *skip_map;
  57. int skip_map_size;
  58. int is_16bpp;
  59. GetByteContext stream_ptr, mv_ptr;
  60. unsigned char *pixel_ptr;
  61. int line_inc;
  62. int stride;
  63. int upper_motion_limit_offset;
  64. uint32_t pal[256];
  65. } IpvideoContext;
  66. static int copy_from(IpvideoContext *s, AVFrame *src, AVFrame *dst, int delta_x, int delta_y)
  67. {
  68. int current_offset = s->pixel_ptr - dst->data[0];
  69. int motion_offset = current_offset + delta_y * dst->linesize[0]
  70. + delta_x * (1 + s->is_16bpp);
  71. if (motion_offset < 0) {
  72. av_log(s->avctx, AV_LOG_ERROR, "motion offset < 0 (%d)\n", motion_offset);
  73. return AVERROR_INVALIDDATA;
  74. } else if (motion_offset > s->upper_motion_limit_offset) {
  75. av_log(s->avctx, AV_LOG_ERROR, "motion offset above limit (%d >= %d)\n",
  76. motion_offset, s->upper_motion_limit_offset);
  77. return AVERROR_INVALIDDATA;
  78. }
  79. if (!src->data[0]) {
  80. av_log(s->avctx, AV_LOG_ERROR, "Invalid decode type, corrupted header?\n");
  81. return AVERROR(EINVAL);
  82. }
  83. s->hdsp.put_pixels_tab[!s->is_16bpp][0](s->pixel_ptr, src->data[0] + motion_offset,
  84. dst->linesize[0], 8);
  85. return 0;
  86. }
  87. static int ipvideo_decode_block_opcode_0x0(IpvideoContext *s, AVFrame *frame)
  88. {
  89. return copy_from(s, s->last_frame, frame, 0, 0);
  90. }
  91. static int ipvideo_decode_block_opcode_0x1(IpvideoContext *s, AVFrame *frame)
  92. {
  93. return copy_from(s, s->second_last_frame, frame, 0, 0);
  94. }
  95. static int ipvideo_decode_block_opcode_0x2(IpvideoContext *s, AVFrame *frame)
  96. {
  97. unsigned char B;
  98. int x, y;
  99. /* copy block from 2 frames ago using a motion vector; need 1 more byte */
  100. if (!s->is_16bpp) {
  101. B = bytestream2_get_byte(&s->stream_ptr);
  102. } else {
  103. B = bytestream2_get_byte(&s->mv_ptr);
  104. }
  105. if (B < 56) {
  106. x = 8 + (B % 7);
  107. y = B / 7;
  108. } else {
  109. x = -14 + ((B - 56) % 29);
  110. y = 8 + ((B - 56) / 29);
  111. }
  112. ff_tlog(s->avctx, "motion byte = %d, (x, y) = (%d, %d)\n", B, x, y);
  113. return copy_from(s, s->second_last_frame, frame, x, y);
  114. }
  115. static int ipvideo_decode_block_opcode_0x3(IpvideoContext *s, AVFrame *frame)
  116. {
  117. unsigned char B;
  118. int x, y;
  119. /* copy 8x8 block from current frame from an up/left block */
  120. /* need 1 more byte for motion */
  121. if (!s->is_16bpp) {
  122. B = bytestream2_get_byte(&s->stream_ptr);
  123. } else {
  124. B = bytestream2_get_byte(&s->mv_ptr);
  125. }
  126. if (B < 56) {
  127. x = -(8 + (B % 7));
  128. y = -(B / 7);
  129. } else {
  130. x = -(-14 + ((B - 56) % 29));
  131. y = -( 8 + ((B - 56) / 29));
  132. }
  133. ff_tlog(s->avctx, "motion byte = %d, (x, y) = (%d, %d)\n", B, x, y);
  134. return copy_from(s, frame, frame, x, y);
  135. }
  136. static int ipvideo_decode_block_opcode_0x4(IpvideoContext *s, AVFrame *frame)
  137. {
  138. int x, y;
  139. unsigned char B, BL, BH;
  140. /* copy a block from the previous frame; need 1 more byte */
  141. if (!s->is_16bpp) {
  142. B = bytestream2_get_byte(&s->stream_ptr);
  143. } else {
  144. B = bytestream2_get_byte(&s->mv_ptr);
  145. }
  146. BL = B & 0x0F;
  147. BH = (B >> 4) & 0x0F;
  148. x = -8 + BL;
  149. y = -8 + BH;
  150. ff_tlog(s->avctx, "motion byte = %d, (x, y) = (%d, %d)\n", B, x, y);
  151. return copy_from(s, s->last_frame, frame, x, y);
  152. }
  153. static int ipvideo_decode_block_opcode_0x5(IpvideoContext *s, AVFrame *frame)
  154. {
  155. signed char x, y;
  156. /* copy a block from the previous frame using an expanded range;
  157. * need 2 more bytes */
  158. x = bytestream2_get_byte(&s->stream_ptr);
  159. y = bytestream2_get_byte(&s->stream_ptr);
  160. ff_tlog(s->avctx, "motion bytes = %d, %d\n", x, y);
  161. return copy_from(s, s->last_frame, frame, x, y);
  162. }
  163. static int ipvideo_decode_block_opcode_0x6(IpvideoContext *s, AVFrame *frame)
  164. {
  165. /* mystery opcode? skip multiple blocks? */
  166. av_log(s->avctx, AV_LOG_ERROR, "Help! Mystery opcode 0x6 seen\n");
  167. /* report success */
  168. return 0;
  169. }
  170. static int ipvideo_decode_block_opcode_0x7(IpvideoContext *s, AVFrame *frame)
  171. {
  172. int x, y;
  173. unsigned char P[2];
  174. unsigned int flags;
  175. if (bytestream2_get_bytes_left(&s->stream_ptr) < 4) {
  176. av_log(s->avctx, AV_LOG_ERROR, "too little data for opcode 0x7\n");
  177. return AVERROR_INVALIDDATA;
  178. }
  179. /* 2-color encoding */
  180. P[0] = bytestream2_get_byte(&s->stream_ptr);
  181. P[1] = bytestream2_get_byte(&s->stream_ptr);
  182. if (P[0] <= P[1]) {
  183. /* need 8 more bytes from the stream */
  184. for (y = 0; y < 8; y++) {
  185. flags = bytestream2_get_byte(&s->stream_ptr) | 0x100;
  186. for (; flags != 1; flags >>= 1)
  187. *s->pixel_ptr++ = P[flags & 1];
  188. s->pixel_ptr += s->line_inc;
  189. }
  190. } else {
  191. /* need 2 more bytes from the stream */
  192. flags = bytestream2_get_le16(&s->stream_ptr);
  193. for (y = 0; y < 8; y += 2) {
  194. for (x = 0; x < 8; x += 2, flags >>= 1) {
  195. s->pixel_ptr[x ] =
  196. s->pixel_ptr[x + 1 ] =
  197. s->pixel_ptr[x + s->stride] =
  198. s->pixel_ptr[x + 1 + s->stride] = P[flags & 1];
  199. }
  200. s->pixel_ptr += s->stride * 2;
  201. }
  202. }
  203. /* report success */
  204. return 0;
  205. }
  206. static int ipvideo_decode_block_opcode_0x8(IpvideoContext *s, AVFrame *frame)
  207. {
  208. int x, y;
  209. unsigned char P[4];
  210. unsigned int flags = 0;
  211. if (bytestream2_get_bytes_left(&s->stream_ptr) < 12) {
  212. av_log(s->avctx, AV_LOG_ERROR, "too little data for opcode 0x8\n");
  213. return AVERROR_INVALIDDATA;
  214. }
  215. /* 2-color encoding for each 4x4 quadrant, or 2-color encoding on
  216. * either top and bottom or left and right halves */
  217. P[0] = bytestream2_get_byte(&s->stream_ptr);
  218. P[1] = bytestream2_get_byte(&s->stream_ptr);
  219. if (P[0] <= P[1]) {
  220. for (y = 0; y < 16; y++) {
  221. // new values for each 4x4 block
  222. if (!(y & 3)) {
  223. if (y) {
  224. P[0] = bytestream2_get_byte(&s->stream_ptr);
  225. P[1] = bytestream2_get_byte(&s->stream_ptr);
  226. }
  227. flags = bytestream2_get_le16(&s->stream_ptr);
  228. }
  229. for (x = 0; x < 4; x++, flags >>= 1)
  230. *s->pixel_ptr++ = P[flags & 1];
  231. s->pixel_ptr += s->stride - 4;
  232. // switch to right half
  233. if (y == 7) s->pixel_ptr -= 8 * s->stride - 4;
  234. }
  235. } else {
  236. flags = bytestream2_get_le32(&s->stream_ptr);
  237. P[2] = bytestream2_get_byte(&s->stream_ptr);
  238. P[3] = bytestream2_get_byte(&s->stream_ptr);
  239. if (P[2] <= P[3]) {
  240. /* vertical split; left & right halves are 2-color encoded */
  241. for (y = 0; y < 16; y++) {
  242. for (x = 0; x < 4; x++, flags >>= 1)
  243. *s->pixel_ptr++ = P[flags & 1];
  244. s->pixel_ptr += s->stride - 4;
  245. // switch to right half
  246. if (y == 7) {
  247. s->pixel_ptr -= 8 * s->stride - 4;
  248. P[0] = P[2];
  249. P[1] = P[3];
  250. flags = bytestream2_get_le32(&s->stream_ptr);
  251. }
  252. }
  253. } else {
  254. /* horizontal split; top & bottom halves are 2-color encoded */
  255. for (y = 0; y < 8; y++) {
  256. if (y == 4) {
  257. P[0] = P[2];
  258. P[1] = P[3];
  259. flags = bytestream2_get_le32(&s->stream_ptr);
  260. }
  261. for (x = 0; x < 8; x++, flags >>= 1)
  262. *s->pixel_ptr++ = P[flags & 1];
  263. s->pixel_ptr += s->line_inc;
  264. }
  265. }
  266. }
  267. /* report success */
  268. return 0;
  269. }
  270. static int ipvideo_decode_block_opcode_0x9(IpvideoContext *s, AVFrame *frame)
  271. {
  272. int x, y;
  273. unsigned char P[4];
  274. if (bytestream2_get_bytes_left(&s->stream_ptr) < 8) {
  275. av_log(s->avctx, AV_LOG_ERROR, "too little data for opcode 0x9\n");
  276. return AVERROR_INVALIDDATA;
  277. }
  278. /* 4-color encoding */
  279. bytestream2_get_buffer(&s->stream_ptr, P, 4);
  280. if (P[0] <= P[1]) {
  281. if (P[2] <= P[3]) {
  282. /* 1 of 4 colors for each pixel, need 16 more bytes */
  283. for (y = 0; y < 8; y++) {
  284. /* get the next set of 8 2-bit flags */
  285. int flags = bytestream2_get_le16(&s->stream_ptr);
  286. for (x = 0; x < 8; x++, flags >>= 2)
  287. *s->pixel_ptr++ = P[flags & 0x03];
  288. s->pixel_ptr += s->line_inc;
  289. }
  290. } else {
  291. uint32_t flags;
  292. /* 1 of 4 colors for each 2x2 block, need 4 more bytes */
  293. flags = bytestream2_get_le32(&s->stream_ptr);
  294. for (y = 0; y < 8; y += 2) {
  295. for (x = 0; x < 8; x += 2, flags >>= 2) {
  296. s->pixel_ptr[x ] =
  297. s->pixel_ptr[x + 1 ] =
  298. s->pixel_ptr[x + s->stride] =
  299. s->pixel_ptr[x + 1 + s->stride] = P[flags & 0x03];
  300. }
  301. s->pixel_ptr += s->stride * 2;
  302. }
  303. }
  304. } else {
  305. uint64_t flags;
  306. /* 1 of 4 colors for each 2x1 or 1x2 block, need 8 more bytes */
  307. flags = bytestream2_get_le64(&s->stream_ptr);
  308. if (P[2] <= P[3]) {
  309. for (y = 0; y < 8; y++) {
  310. for (x = 0; x < 8; x += 2, flags >>= 2) {
  311. s->pixel_ptr[x ] =
  312. s->pixel_ptr[x + 1] = P[flags & 0x03];
  313. }
  314. s->pixel_ptr += s->stride;
  315. }
  316. } else {
  317. for (y = 0; y < 8; y += 2) {
  318. for (x = 0; x < 8; x++, flags >>= 2) {
  319. s->pixel_ptr[x ] =
  320. s->pixel_ptr[x + s->stride] = P[flags & 0x03];
  321. }
  322. s->pixel_ptr += s->stride * 2;
  323. }
  324. }
  325. }
  326. /* report success */
  327. return 0;
  328. }
  329. static int ipvideo_decode_block_opcode_0xA(IpvideoContext *s, AVFrame *frame)
  330. {
  331. int x, y;
  332. unsigned char P[8];
  333. int flags = 0;
  334. if (bytestream2_get_bytes_left(&s->stream_ptr) < 16) {
  335. av_log(s->avctx, AV_LOG_ERROR, "too little data for opcode 0xA\n");
  336. return AVERROR_INVALIDDATA;
  337. }
  338. bytestream2_get_buffer(&s->stream_ptr, P, 4);
  339. /* 4-color encoding for each 4x4 quadrant, or 4-color encoding on
  340. * either top and bottom or left and right halves */
  341. if (P[0] <= P[1]) {
  342. /* 4-color encoding for each quadrant; need 32 bytes */
  343. for (y = 0; y < 16; y++) {
  344. // new values for each 4x4 block
  345. if (!(y & 3)) {
  346. if (y) bytestream2_get_buffer(&s->stream_ptr, P, 4);
  347. flags = bytestream2_get_le32(&s->stream_ptr);
  348. }
  349. for (x = 0; x < 4; x++, flags >>= 2)
  350. *s->pixel_ptr++ = P[flags & 0x03];
  351. s->pixel_ptr += s->stride - 4;
  352. // switch to right half
  353. if (y == 7) s->pixel_ptr -= 8 * s->stride - 4;
  354. }
  355. } else {
  356. // vertical split?
  357. int vert;
  358. uint64_t flags = bytestream2_get_le64(&s->stream_ptr);
  359. bytestream2_get_buffer(&s->stream_ptr, P + 4, 4);
  360. vert = P[4] <= P[5];
  361. /* 4-color encoding for either left and right or top and bottom
  362. * halves */
  363. for (y = 0; y < 16; y++) {
  364. for (x = 0; x < 4; x++, flags >>= 2)
  365. *s->pixel_ptr++ = P[flags & 0x03];
  366. if (vert) {
  367. s->pixel_ptr += s->stride - 4;
  368. // switch to right half
  369. if (y == 7) s->pixel_ptr -= 8 * s->stride - 4;
  370. } else if (y & 1) s->pixel_ptr += s->line_inc;
  371. // load values for second half
  372. if (y == 7) {
  373. memcpy(P, P + 4, 4);
  374. flags = bytestream2_get_le64(&s->stream_ptr);
  375. }
  376. }
  377. }
  378. /* report success */
  379. return 0;
  380. }
  381. static int ipvideo_decode_block_opcode_0xB(IpvideoContext *s, AVFrame *frame)
  382. {
  383. int y;
  384. /* 64-color encoding (each pixel in block is a different color) */
  385. for (y = 0; y < 8; y++) {
  386. bytestream2_get_buffer(&s->stream_ptr, s->pixel_ptr, 8);
  387. s->pixel_ptr += s->stride;
  388. }
  389. /* report success */
  390. return 0;
  391. }
  392. static int ipvideo_decode_block_opcode_0xC(IpvideoContext *s, AVFrame *frame)
  393. {
  394. int x, y;
  395. /* 16-color block encoding: each 2x2 block is a different color */
  396. for (y = 0; y < 8; y += 2) {
  397. for (x = 0; x < 8; x += 2) {
  398. s->pixel_ptr[x ] =
  399. s->pixel_ptr[x + 1 ] =
  400. s->pixel_ptr[x + s->stride] =
  401. s->pixel_ptr[x + 1 + s->stride] = bytestream2_get_byte(&s->stream_ptr);
  402. }
  403. s->pixel_ptr += s->stride * 2;
  404. }
  405. /* report success */
  406. return 0;
  407. }
  408. static int ipvideo_decode_block_opcode_0xD(IpvideoContext *s, AVFrame *frame)
  409. {
  410. int y;
  411. unsigned char P[2];
  412. if (bytestream2_get_bytes_left(&s->stream_ptr) < 4) {
  413. av_log(s->avctx, AV_LOG_ERROR, "too little data for opcode 0xD\n");
  414. return AVERROR_INVALIDDATA;
  415. }
  416. /* 4-color block encoding: each 4x4 block is a different color */
  417. for (y = 0; y < 8; y++) {
  418. if (!(y & 3)) {
  419. P[0] = bytestream2_get_byte(&s->stream_ptr);
  420. P[1] = bytestream2_get_byte(&s->stream_ptr);
  421. }
  422. memset(s->pixel_ptr, P[0], 4);
  423. memset(s->pixel_ptr + 4, P[1], 4);
  424. s->pixel_ptr += s->stride;
  425. }
  426. /* report success */
  427. return 0;
  428. }
  429. static int ipvideo_decode_block_opcode_0xE(IpvideoContext *s, AVFrame *frame)
  430. {
  431. int y;
  432. unsigned char pix;
  433. /* 1-color encoding: the whole block is 1 solid color */
  434. pix = bytestream2_get_byte(&s->stream_ptr);
  435. for (y = 0; y < 8; y++) {
  436. memset(s->pixel_ptr, pix, 8);
  437. s->pixel_ptr += s->stride;
  438. }
  439. /* report success */
  440. return 0;
  441. }
  442. static int ipvideo_decode_block_opcode_0xF(IpvideoContext *s, AVFrame *frame)
  443. {
  444. int x, y;
  445. unsigned char sample[2];
  446. /* dithered encoding */
  447. sample[0] = bytestream2_get_byte(&s->stream_ptr);
  448. sample[1] = bytestream2_get_byte(&s->stream_ptr);
  449. for (y = 0; y < 8; y++) {
  450. for (x = 0; x < 8; x += 2) {
  451. *s->pixel_ptr++ = sample[ y & 1 ];
  452. *s->pixel_ptr++ = sample[!(y & 1)];
  453. }
  454. s->pixel_ptr += s->line_inc;
  455. }
  456. /* report success */
  457. return 0;
  458. }
  459. static int ipvideo_decode_block_opcode_0x6_16(IpvideoContext *s, AVFrame *frame)
  460. {
  461. signed char x, y;
  462. /* copy a block from the second last frame using an expanded range */
  463. x = bytestream2_get_byte(&s->stream_ptr);
  464. y = bytestream2_get_byte(&s->stream_ptr);
  465. ff_tlog(s->avctx, "motion bytes = %d, %d\n", x, y);
  466. return copy_from(s, s->second_last_frame, frame, x, y);
  467. }
  468. static int ipvideo_decode_block_opcode_0x7_16(IpvideoContext *s, AVFrame *frame)
  469. {
  470. int x, y;
  471. uint16_t P[2];
  472. unsigned int flags;
  473. uint16_t *pixel_ptr = (uint16_t*)s->pixel_ptr;
  474. /* 2-color encoding */
  475. P[0] = bytestream2_get_le16(&s->stream_ptr);
  476. P[1] = bytestream2_get_le16(&s->stream_ptr);
  477. if (!(P[0] & 0x8000)) {
  478. for (y = 0; y < 8; y++) {
  479. flags = bytestream2_get_byte(&s->stream_ptr) | 0x100;
  480. for (; flags != 1; flags >>= 1)
  481. *pixel_ptr++ = P[flags & 1];
  482. pixel_ptr += s->line_inc;
  483. }
  484. } else {
  485. flags = bytestream2_get_le16(&s->stream_ptr);
  486. for (y = 0; y < 8; y += 2) {
  487. for (x = 0; x < 8; x += 2, flags >>= 1) {
  488. pixel_ptr[x ] =
  489. pixel_ptr[x + 1 ] =
  490. pixel_ptr[x + s->stride] =
  491. pixel_ptr[x + 1 + s->stride] = P[flags & 1];
  492. }
  493. pixel_ptr += s->stride * 2;
  494. }
  495. }
  496. return 0;
  497. }
  498. static int ipvideo_decode_block_opcode_0x8_16(IpvideoContext *s, AVFrame *frame)
  499. {
  500. int x, y;
  501. uint16_t P[4];
  502. unsigned int flags = 0;
  503. uint16_t *pixel_ptr = (uint16_t*)s->pixel_ptr;
  504. /* 2-color encoding for each 4x4 quadrant, or 2-color encoding on
  505. * either top and bottom or left and right halves */
  506. P[0] = bytestream2_get_le16(&s->stream_ptr);
  507. P[1] = bytestream2_get_le16(&s->stream_ptr);
  508. if (!(P[0] & 0x8000)) {
  509. for (y = 0; y < 16; y++) {
  510. // new values for each 4x4 block
  511. if (!(y & 3)) {
  512. if (y) {
  513. P[0] = bytestream2_get_le16(&s->stream_ptr);
  514. P[1] = bytestream2_get_le16(&s->stream_ptr);
  515. }
  516. flags = bytestream2_get_le16(&s->stream_ptr);
  517. }
  518. for (x = 0; x < 4; x++, flags >>= 1)
  519. *pixel_ptr++ = P[flags & 1];
  520. pixel_ptr += s->stride - 4;
  521. // switch to right half
  522. if (y == 7) pixel_ptr -= 8 * s->stride - 4;
  523. }
  524. } else {
  525. flags = bytestream2_get_le32(&s->stream_ptr);
  526. P[2] = bytestream2_get_le16(&s->stream_ptr);
  527. P[3] = bytestream2_get_le16(&s->stream_ptr);
  528. if (!(P[2] & 0x8000)) {
  529. /* vertical split; left & right halves are 2-color encoded */
  530. for (y = 0; y < 16; y++) {
  531. for (x = 0; x < 4; x++, flags >>= 1)
  532. *pixel_ptr++ = P[flags & 1];
  533. pixel_ptr += s->stride - 4;
  534. // switch to right half
  535. if (y == 7) {
  536. pixel_ptr -= 8 * s->stride - 4;
  537. P[0] = P[2];
  538. P[1] = P[3];
  539. flags = bytestream2_get_le32(&s->stream_ptr);
  540. }
  541. }
  542. } else {
  543. /* horizontal split; top & bottom halves are 2-color encoded */
  544. for (y = 0; y < 8; y++) {
  545. if (y == 4) {
  546. P[0] = P[2];
  547. P[1] = P[3];
  548. flags = bytestream2_get_le32(&s->stream_ptr);
  549. }
  550. for (x = 0; x < 8; x++, flags >>= 1)
  551. *pixel_ptr++ = P[flags & 1];
  552. pixel_ptr += s->line_inc;
  553. }
  554. }
  555. }
  556. /* report success */
  557. return 0;
  558. }
  559. static int ipvideo_decode_block_opcode_0x9_16(IpvideoContext *s, AVFrame *frame)
  560. {
  561. int x, y;
  562. uint16_t P[4];
  563. uint16_t *pixel_ptr = (uint16_t*)s->pixel_ptr;
  564. /* 4-color encoding */
  565. for (x = 0; x < 4; x++)
  566. P[x] = bytestream2_get_le16(&s->stream_ptr);
  567. if (!(P[0] & 0x8000)) {
  568. if (!(P[2] & 0x8000)) {
  569. /* 1 of 4 colors for each pixel */
  570. for (y = 0; y < 8; y++) {
  571. /* get the next set of 8 2-bit flags */
  572. int flags = bytestream2_get_le16(&s->stream_ptr);
  573. for (x = 0; x < 8; x++, flags >>= 2)
  574. *pixel_ptr++ = P[flags & 0x03];
  575. pixel_ptr += s->line_inc;
  576. }
  577. } else {
  578. uint32_t flags;
  579. /* 1 of 4 colors for each 2x2 block */
  580. flags = bytestream2_get_le32(&s->stream_ptr);
  581. for (y = 0; y < 8; y += 2) {
  582. for (x = 0; x < 8; x += 2, flags >>= 2) {
  583. pixel_ptr[x ] =
  584. pixel_ptr[x + 1 ] =
  585. pixel_ptr[x + s->stride] =
  586. pixel_ptr[x + 1 + s->stride] = P[flags & 0x03];
  587. }
  588. pixel_ptr += s->stride * 2;
  589. }
  590. }
  591. } else {
  592. uint64_t flags;
  593. /* 1 of 4 colors for each 2x1 or 1x2 block */
  594. flags = bytestream2_get_le64(&s->stream_ptr);
  595. if (!(P[2] & 0x8000)) {
  596. for (y = 0; y < 8; y++) {
  597. for (x = 0; x < 8; x += 2, flags >>= 2) {
  598. pixel_ptr[x ] =
  599. pixel_ptr[x + 1] = P[flags & 0x03];
  600. }
  601. pixel_ptr += s->stride;
  602. }
  603. } else {
  604. for (y = 0; y < 8; y += 2) {
  605. for (x = 0; x < 8; x++, flags >>= 2) {
  606. pixel_ptr[x ] =
  607. pixel_ptr[x + s->stride] = P[flags & 0x03];
  608. }
  609. pixel_ptr += s->stride * 2;
  610. }
  611. }
  612. }
  613. /* report success */
  614. return 0;
  615. }
  616. static int ipvideo_decode_block_opcode_0xA_16(IpvideoContext *s, AVFrame *frame)
  617. {
  618. int x, y;
  619. uint16_t P[8];
  620. int flags = 0;
  621. uint16_t *pixel_ptr = (uint16_t*)s->pixel_ptr;
  622. for (x = 0; x < 4; x++)
  623. P[x] = bytestream2_get_le16(&s->stream_ptr);
  624. /* 4-color encoding for each 4x4 quadrant, or 4-color encoding on
  625. * either top and bottom or left and right halves */
  626. if (!(P[0] & 0x8000)) {
  627. /* 4-color encoding for each quadrant */
  628. for (y = 0; y < 16; y++) {
  629. // new values for each 4x4 block
  630. if (!(y & 3)) {
  631. if (y)
  632. for (x = 0; x < 4; x++)
  633. P[x] = bytestream2_get_le16(&s->stream_ptr);
  634. flags = bytestream2_get_le32(&s->stream_ptr);
  635. }
  636. for (x = 0; x < 4; x++, flags >>= 2)
  637. *pixel_ptr++ = P[flags & 0x03];
  638. pixel_ptr += s->stride - 4;
  639. // switch to right half
  640. if (y == 7) pixel_ptr -= 8 * s->stride - 4;
  641. }
  642. } else {
  643. // vertical split?
  644. int vert;
  645. uint64_t flags = bytestream2_get_le64(&s->stream_ptr);
  646. for (x = 4; x < 8; x++)
  647. P[x] = bytestream2_get_le16(&s->stream_ptr);
  648. vert = !(P[4] & 0x8000);
  649. /* 4-color encoding for either left and right or top and bottom
  650. * halves */
  651. for (y = 0; y < 16; y++) {
  652. for (x = 0; x < 4; x++, flags >>= 2)
  653. *pixel_ptr++ = P[flags & 0x03];
  654. if (vert) {
  655. pixel_ptr += s->stride - 4;
  656. // switch to right half
  657. if (y == 7) pixel_ptr -= 8 * s->stride - 4;
  658. } else if (y & 1) pixel_ptr += s->line_inc;
  659. // load values for second half
  660. if (y == 7) {
  661. memcpy(P, P + 4, 8);
  662. flags = bytestream2_get_le64(&s->stream_ptr);
  663. }
  664. }
  665. }
  666. /* report success */
  667. return 0;
  668. }
  669. static int ipvideo_decode_block_opcode_0xB_16(IpvideoContext *s, AVFrame *frame)
  670. {
  671. int x, y;
  672. uint16_t *pixel_ptr = (uint16_t*)s->pixel_ptr;
  673. /* 64-color encoding (each pixel in block is a different color) */
  674. for (y = 0; y < 8; y++) {
  675. for (x = 0; x < 8; x++)
  676. pixel_ptr[x] = bytestream2_get_le16(&s->stream_ptr);
  677. pixel_ptr += s->stride;
  678. }
  679. /* report success */
  680. return 0;
  681. }
  682. static int ipvideo_decode_block_opcode_0xC_16(IpvideoContext *s, AVFrame *frame)
  683. {
  684. int x, y;
  685. uint16_t *pixel_ptr = (uint16_t*)s->pixel_ptr;
  686. /* 16-color block encoding: each 2x2 block is a different color */
  687. for (y = 0; y < 8; y += 2) {
  688. for (x = 0; x < 8; x += 2) {
  689. pixel_ptr[x ] =
  690. pixel_ptr[x + 1 ] =
  691. pixel_ptr[x + s->stride] =
  692. pixel_ptr[x + 1 + s->stride] = bytestream2_get_le16(&s->stream_ptr);
  693. }
  694. pixel_ptr += s->stride * 2;
  695. }
  696. /* report success */
  697. return 0;
  698. }
  699. static int ipvideo_decode_block_opcode_0xD_16(IpvideoContext *s, AVFrame *frame)
  700. {
  701. int x, y;
  702. uint16_t P[2];
  703. uint16_t *pixel_ptr = (uint16_t*)s->pixel_ptr;
  704. /* 4-color block encoding: each 4x4 block is a different color */
  705. for (y = 0; y < 8; y++) {
  706. if (!(y & 3)) {
  707. P[0] = bytestream2_get_le16(&s->stream_ptr);
  708. P[1] = bytestream2_get_le16(&s->stream_ptr);
  709. }
  710. for (x = 0; x < 8; x++)
  711. pixel_ptr[x] = P[x >> 2];
  712. pixel_ptr += s->stride;
  713. }
  714. /* report success */
  715. return 0;
  716. }
  717. static int ipvideo_decode_block_opcode_0xE_16(IpvideoContext *s, AVFrame *frame)
  718. {
  719. int x, y;
  720. uint16_t pix;
  721. uint16_t *pixel_ptr = (uint16_t*)s->pixel_ptr;
  722. /* 1-color encoding: the whole block is 1 solid color */
  723. pix = bytestream2_get_le16(&s->stream_ptr);
  724. for (y = 0; y < 8; y++) {
  725. for (x = 0; x < 8; x++)
  726. pixel_ptr[x] = pix;
  727. pixel_ptr += s->stride;
  728. }
  729. /* report success */
  730. return 0;
  731. }
  732. static int (* const ipvideo_decode_block[])(IpvideoContext *s, AVFrame *frame) = {
  733. ipvideo_decode_block_opcode_0x0, ipvideo_decode_block_opcode_0x1,
  734. ipvideo_decode_block_opcode_0x2, ipvideo_decode_block_opcode_0x3,
  735. ipvideo_decode_block_opcode_0x4, ipvideo_decode_block_opcode_0x5,
  736. ipvideo_decode_block_opcode_0x6, ipvideo_decode_block_opcode_0x7,
  737. ipvideo_decode_block_opcode_0x8, ipvideo_decode_block_opcode_0x9,
  738. ipvideo_decode_block_opcode_0xA, ipvideo_decode_block_opcode_0xB,
  739. ipvideo_decode_block_opcode_0xC, ipvideo_decode_block_opcode_0xD,
  740. ipvideo_decode_block_opcode_0xE, ipvideo_decode_block_opcode_0xF,
  741. };
  742. static int (* const ipvideo_decode_block16[])(IpvideoContext *s, AVFrame *frame) = {
  743. ipvideo_decode_block_opcode_0x0, ipvideo_decode_block_opcode_0x1,
  744. ipvideo_decode_block_opcode_0x2, ipvideo_decode_block_opcode_0x3,
  745. ipvideo_decode_block_opcode_0x4, ipvideo_decode_block_opcode_0x5,
  746. ipvideo_decode_block_opcode_0x6_16, ipvideo_decode_block_opcode_0x7_16,
  747. ipvideo_decode_block_opcode_0x8_16, ipvideo_decode_block_opcode_0x9_16,
  748. ipvideo_decode_block_opcode_0xA_16, ipvideo_decode_block_opcode_0xB_16,
  749. ipvideo_decode_block_opcode_0xC_16, ipvideo_decode_block_opcode_0xD_16,
  750. ipvideo_decode_block_opcode_0xE_16, ipvideo_decode_block_opcode_0x1,
  751. };
  752. static void ipvideo_format_06_firstpass(IpvideoContext *s, AVFrame *frame, int16_t opcode)
  753. {
  754. int line;
  755. if (!opcode) {
  756. for (line = 0; line < 8; ++line) {
  757. bytestream2_get_buffer(&s->stream_ptr, s->pixel_ptr, 8);
  758. s->pixel_ptr += s->stride;
  759. }
  760. } else {
  761. /* Don't try to copy second_last_frame data on the first frames */
  762. if (s->avctx->frame_number > 2)
  763. copy_from(s, s->second_last_frame, frame, 0, 0);
  764. }
  765. }
  766. static void ipvideo_format_06_secondpass(IpvideoContext *s, AVFrame *frame, int16_t opcode)
  767. {
  768. int off_x, off_y;
  769. if (opcode < 0) {
  770. off_x = ((uint16_t)opcode - 0xC000) % frame->linesize[0];
  771. off_y = ((uint16_t)opcode - 0xC000) / frame->linesize[0];
  772. copy_from(s, s->last_frame, frame, off_x, off_y);
  773. } else if (opcode > 0) {
  774. off_x = ((uint16_t)opcode - 0x4000) % frame->linesize[0];
  775. off_y = ((uint16_t)opcode - 0x4000) / frame->linesize[0];
  776. copy_from(s, frame, frame, off_x, off_y);
  777. }
  778. }
  779. static void (* const ipvideo_format_06_passes[])(IpvideoContext *s, AVFrame *frame, int16_t op) = {
  780. ipvideo_format_06_firstpass, ipvideo_format_06_secondpass,
  781. };
  782. static void ipvideo_decode_format_06_opcodes(IpvideoContext *s, AVFrame *frame)
  783. {
  784. int pass, x, y;
  785. int16_t opcode;
  786. GetByteContext decoding_map_ptr;
  787. /* this is PAL8, so make the palette available */
  788. memcpy(frame->data[1], s->pal, AVPALETTE_SIZE);
  789. s->stride = frame->linesize[0];
  790. s->line_inc = s->stride - 8;
  791. s->upper_motion_limit_offset = (s->avctx->height - 8) * frame->linesize[0]
  792. + (s->avctx->width - 8) * (1 + s->is_16bpp);
  793. bytestream2_init(&decoding_map_ptr, s->decoding_map, s->decoding_map_size);
  794. for (pass = 0; pass < 2; ++pass) {
  795. bytestream2_seek(&decoding_map_ptr, 0, SEEK_SET);
  796. for (y = 0; y < s->avctx->height; y += 8) {
  797. for (x = 0; x < s->avctx->width; x += 8) {
  798. opcode = bytestream2_get_le16(&decoding_map_ptr);
  799. ff_tlog(s->avctx,
  800. " block @ (%3d, %3d): opcode 0x%X, data ptr offset %d\n",
  801. x, y, opcode, bytestream2_tell(&s->stream_ptr));
  802. s->pixel_ptr = frame->data[0] + x + y * frame->linesize[0];
  803. ipvideo_format_06_passes[pass](s, frame, opcode);
  804. }
  805. }
  806. }
  807. if (bytestream2_get_bytes_left(&s->stream_ptr) > 1) {
  808. av_log(s->avctx, AV_LOG_DEBUG,
  809. "decode finished with %d bytes left over\n",
  810. bytestream2_get_bytes_left(&s->stream_ptr));
  811. }
  812. }
  813. static void ipvideo_format_10_firstpass(IpvideoContext *s, AVFrame *frame, int16_t opcode)
  814. {
  815. int line;
  816. if (!opcode) {
  817. for (line = 0; line < 8; ++line) {
  818. bytestream2_get_buffer(&s->stream_ptr, s->pixel_ptr, 8);
  819. s->pixel_ptr += s->stride;
  820. }
  821. }
  822. }
  823. static void ipvideo_format_10_secondpass(IpvideoContext *s, AVFrame *frame, int16_t opcode)
  824. {
  825. int off_x, off_y;
  826. if (opcode < 0) {
  827. off_x = ((uint16_t)opcode - 0xC000) % s->cur_decode_frame->linesize[0];
  828. off_y = ((uint16_t)opcode - 0xC000) / s->cur_decode_frame->linesize[0];
  829. copy_from(s, s->prev_decode_frame, s->cur_decode_frame, off_x, off_y);
  830. } else if (opcode > 0) {
  831. off_x = ((uint16_t)opcode - 0x4000) % s->cur_decode_frame->linesize[0];
  832. off_y = ((uint16_t)opcode - 0x4000) / s->cur_decode_frame->linesize[0];
  833. copy_from(s, s->cur_decode_frame, s->cur_decode_frame, off_x, off_y);
  834. }
  835. }
  836. static void (* const ipvideo_format_10_passes[])(IpvideoContext *s, AVFrame *frame, int16_t op) = {
  837. ipvideo_format_10_firstpass, ipvideo_format_10_secondpass,
  838. };
  839. static void ipvideo_decode_format_10_opcodes(IpvideoContext *s, AVFrame *frame)
  840. {
  841. int pass, x, y, changed_block;
  842. int16_t opcode, skip;
  843. GetByteContext decoding_map_ptr;
  844. GetByteContext skip_map_ptr;
  845. bytestream2_skip(&s->stream_ptr, 14); /* data starts 14 bytes in */
  846. /* this is PAL8, so make the palette available */
  847. memcpy(frame->data[1], s->pal, AVPALETTE_SIZE);
  848. s->stride = frame->linesize[0];
  849. s->line_inc = s->stride - 8;
  850. s->upper_motion_limit_offset = (s->avctx->height - 8) * frame->linesize[0]
  851. + (s->avctx->width - 8) * (1 + s->is_16bpp);
  852. bytestream2_init(&decoding_map_ptr, s->decoding_map, s->decoding_map_size);
  853. bytestream2_init(&skip_map_ptr, s->skip_map, s->skip_map_size);
  854. for (pass = 0; pass < 2; ++pass) {
  855. bytestream2_seek(&decoding_map_ptr, 0, SEEK_SET);
  856. bytestream2_seek(&skip_map_ptr, 0, SEEK_SET);
  857. skip = bytestream2_get_le16(&skip_map_ptr);
  858. for (y = 0; y < s->avctx->height; y += 8) {
  859. for (x = 0; x < s->avctx->width; x += 8) {
  860. s->pixel_ptr = s->cur_decode_frame->data[0] + x + y * s->cur_decode_frame->linesize[0];
  861. while (skip <= 0 && bytestream2_get_bytes_left(&skip_map_ptr) > 1) {
  862. if (skip != -0x8000 && skip) {
  863. opcode = bytestream2_get_le16(&decoding_map_ptr);
  864. ipvideo_format_10_passes[pass](s, frame, opcode);
  865. break;
  866. }
  867. skip = bytestream2_get_le16(&skip_map_ptr);
  868. }
  869. skip *= 2;
  870. }
  871. }
  872. }
  873. bytestream2_seek(&skip_map_ptr, 0, SEEK_SET);
  874. skip = bytestream2_get_le16(&skip_map_ptr);
  875. for (y = 0; y < s->avctx->height; y += 8) {
  876. for (x = 0; x < s->avctx->width; x += 8) {
  877. changed_block = 0;
  878. s->pixel_ptr = frame->data[0] + x + y*frame->linesize[0];
  879. while (skip <= 0) {
  880. if (skip != -0x8000 && skip) {
  881. changed_block = 1;
  882. break;
  883. }
  884. if (bytestream2_get_bytes_left(&skip_map_ptr) < 2)
  885. return;
  886. skip = bytestream2_get_le16(&skip_map_ptr);
  887. }
  888. if (changed_block) {
  889. copy_from(s, s->cur_decode_frame, frame, 0, 0);
  890. } else {
  891. /* Don't try to copy last_frame data on the first frame */
  892. if (s->avctx->frame_number)
  893. copy_from(s, s->last_frame, frame, 0, 0);
  894. }
  895. skip *= 2;
  896. }
  897. }
  898. FFSWAP(AVFrame*, s->prev_decode_frame, s->cur_decode_frame);
  899. if (bytestream2_get_bytes_left(&s->stream_ptr) > 1) {
  900. av_log(s->avctx, AV_LOG_DEBUG,
  901. "decode finished with %d bytes left over\n",
  902. bytestream2_get_bytes_left(&s->stream_ptr));
  903. }
  904. }
  905. static void ipvideo_decode_format_11_opcodes(IpvideoContext *s, AVFrame *frame)
  906. {
  907. int x, y;
  908. unsigned char opcode;
  909. int ret;
  910. GetBitContext gb;
  911. bytestream2_skip(&s->stream_ptr, 14); /* data starts 14 bytes in */
  912. if (!s->is_16bpp) {
  913. /* this is PAL8, so make the palette available */
  914. memcpy(frame->data[1], s->pal, AVPALETTE_SIZE);
  915. s->stride = frame->linesize[0];
  916. } else {
  917. s->stride = frame->linesize[0] >> 1;
  918. s->mv_ptr = s->stream_ptr;
  919. bytestream2_skip(&s->mv_ptr, bytestream2_get_le16(&s->stream_ptr));
  920. }
  921. s->line_inc = s->stride - 8;
  922. s->upper_motion_limit_offset = (s->avctx->height - 8) * frame->linesize[0]
  923. + (s->avctx->width - 8) * (1 + s->is_16bpp);
  924. init_get_bits(&gb, s->decoding_map, s->decoding_map_size * 8);
  925. for (y = 0; y < s->avctx->height; y += 8) {
  926. for (x = 0; x < s->avctx->width; x += 8) {
  927. if (get_bits_left(&gb) < 4)
  928. return;
  929. opcode = get_bits(&gb, 4);
  930. ff_tlog(s->avctx,
  931. " block @ (%3d, %3d): encoding 0x%X, data ptr offset %d\n",
  932. x, y, opcode, bytestream2_tell(&s->stream_ptr));
  933. if (!s->is_16bpp) {
  934. s->pixel_ptr = frame->data[0] + x
  935. + y*frame->linesize[0];
  936. ret = ipvideo_decode_block[opcode](s, frame);
  937. } else {
  938. s->pixel_ptr = frame->data[0] + x*2
  939. + y*frame->linesize[0];
  940. ret = ipvideo_decode_block16[opcode](s, frame);
  941. }
  942. if (ret != 0) {
  943. av_log(s->avctx, AV_LOG_ERROR, "decode problem on frame %d, @ block (%d, %d)\n",
  944. s->avctx->frame_number, x, y);
  945. return;
  946. }
  947. }
  948. }
  949. if (bytestream2_get_bytes_left(&s->stream_ptr) > 1) {
  950. av_log(s->avctx, AV_LOG_DEBUG,
  951. "decode finished with %d bytes left over\n",
  952. bytestream2_get_bytes_left(&s->stream_ptr));
  953. }
  954. }
  955. static av_cold int ipvideo_decode_init(AVCodecContext *avctx)
  956. {
  957. IpvideoContext *s = avctx->priv_data;
  958. s->avctx = avctx;
  959. s->is_16bpp = avctx->bits_per_coded_sample == 16;
  960. avctx->pix_fmt = s->is_16bpp ? AV_PIX_FMT_RGB555 : AV_PIX_FMT_PAL8;
  961. ff_hpeldsp_init(&s->hdsp, avctx->flags);
  962. s->last_frame = av_frame_alloc();
  963. s->second_last_frame = av_frame_alloc();
  964. s->cur_decode_frame = av_frame_alloc();
  965. s->prev_decode_frame = av_frame_alloc();
  966. if (!s->last_frame || !s->second_last_frame ||
  967. !s->cur_decode_frame || !s->prev_decode_frame) {
  968. av_frame_free(&s->last_frame);
  969. av_frame_free(&s->second_last_frame);
  970. av_frame_free(&s->cur_decode_frame);
  971. av_frame_free(&s->prev_decode_frame);
  972. return AVERROR(ENOMEM);
  973. }
  974. s->cur_decode_frame->width = avctx->width;
  975. s->prev_decode_frame->width = avctx->width;
  976. s->cur_decode_frame->height = avctx->height;
  977. s->prev_decode_frame->height = avctx->height;
  978. s->cur_decode_frame->format = avctx->pix_fmt;
  979. s->prev_decode_frame->format = avctx->pix_fmt;
  980. ff_get_buffer(avctx, s->cur_decode_frame, 0);
  981. ff_get_buffer(avctx, s->prev_decode_frame, 0);
  982. return 0;
  983. }
  984. static int ipvideo_decode_frame(AVCodecContext *avctx,
  985. void *data, int *got_frame,
  986. AVPacket *avpkt)
  987. {
  988. const uint8_t *buf = avpkt->data;
  989. int buf_size = avpkt->size;
  990. IpvideoContext *s = avctx->priv_data;
  991. AVFrame *frame = data;
  992. int ret;
  993. int send_buffer;
  994. int frame_format;
  995. int video_data_size;
  996. if (av_packet_get_side_data(avpkt, AV_PKT_DATA_PARAM_CHANGE, NULL)) {
  997. av_frame_unref(s->last_frame);
  998. av_frame_unref(s->second_last_frame);
  999. }
  1000. if (buf_size < 8)
  1001. return AVERROR_INVALIDDATA;
  1002. frame_format = AV_RL8(buf);
  1003. send_buffer = AV_RL8(buf + 1);
  1004. video_data_size = AV_RL16(buf + 2);
  1005. s->decoding_map_size = AV_RL16(buf + 4);
  1006. s->skip_map_size = AV_RL16(buf + 6);
  1007. switch(frame_format) {
  1008. case 0x06:
  1009. if (s->decoding_map_size) {
  1010. av_log(avctx, AV_LOG_ERROR, "Decoding map for format 0x06\n");
  1011. return AVERROR_INVALIDDATA;
  1012. }
  1013. if (s->skip_map_size) {
  1014. av_log(avctx, AV_LOG_ERROR, "Skip map for format 0x06\n");
  1015. return AVERROR_INVALIDDATA;
  1016. }
  1017. if (s->is_16bpp) {
  1018. av_log(avctx, AV_LOG_ERROR, "Video format 0x06 does not support 16bpp movies\n");
  1019. return AVERROR_INVALIDDATA;
  1020. }
  1021. /* Decoding map for 0x06 frame format is at the top of pixeldata */
  1022. s->decoding_map_size = ((s->avctx->width / 8) * (s->avctx->height / 8)) * 2;
  1023. s->decoding_map = buf + 8 + 14; /* 14 bits of op data */
  1024. video_data_size -= s->decoding_map_size + 14;
  1025. if (video_data_size <= 0)
  1026. return AVERROR_INVALIDDATA;
  1027. if (buf_size < 8 + s->decoding_map_size + 14 + video_data_size)
  1028. return AVERROR_INVALIDDATA;
  1029. bytestream2_init(&s->stream_ptr, buf + 8 + s->decoding_map_size + 14, video_data_size);
  1030. break;
  1031. case 0x10:
  1032. if (! s->decoding_map_size) {
  1033. av_log(avctx, AV_LOG_ERROR, "Empty decoding map for format 0x10\n");
  1034. return AVERROR_INVALIDDATA;
  1035. }
  1036. if (! s->skip_map_size) {
  1037. av_log(avctx, AV_LOG_ERROR, "Empty skip map for format 0x10\n");
  1038. return AVERROR_INVALIDDATA;
  1039. }
  1040. if (s->is_16bpp) {
  1041. av_log(avctx, AV_LOG_ERROR, "Video format 0x10 does not support 16bpp movies\n");
  1042. return AVERROR_INVALIDDATA;
  1043. }
  1044. if (buf_size < 8 + video_data_size + s->decoding_map_size + s->skip_map_size)
  1045. return AVERROR_INVALIDDATA;
  1046. bytestream2_init(&s->stream_ptr, buf + 8, video_data_size);
  1047. s->decoding_map = buf + 8 + video_data_size;
  1048. s->skip_map = buf + 8 + video_data_size + s->decoding_map_size;
  1049. break;
  1050. case 0x11:
  1051. if (! s->decoding_map_size) {
  1052. av_log(avctx, AV_LOG_ERROR, "Empty decoding map for format 0x11\n");
  1053. return AVERROR_INVALIDDATA;
  1054. }
  1055. if (s->skip_map_size) {
  1056. av_log(avctx, AV_LOG_ERROR, "Skip map for format 0x11\n");
  1057. return AVERROR_INVALIDDATA;
  1058. }
  1059. if (buf_size < 8 + video_data_size + s->decoding_map_size)
  1060. return AVERROR_INVALIDDATA;
  1061. bytestream2_init(&s->stream_ptr, buf + 8, video_data_size);
  1062. s->decoding_map = buf + 8 + video_data_size;
  1063. break;
  1064. default:
  1065. av_log(avctx, AV_LOG_ERROR, "Frame type 0x%02X unsupported\n", frame_format);
  1066. }
  1067. /* ensure we can't overread the packet */
  1068. if (buf_size < 8 + s->decoding_map_size + video_data_size + s->skip_map_size) {
  1069. av_log(avctx, AV_LOG_ERROR, "Invalid IP packet size\n");
  1070. return AVERROR_INVALIDDATA;
  1071. }
  1072. if ((ret = ff_get_buffer(avctx, frame, AV_GET_BUFFER_FLAG_REF)) < 0)
  1073. return ret;
  1074. if (!s->is_16bpp) {
  1075. int size;
  1076. const uint8_t *pal = av_packet_get_side_data(avpkt, AV_PKT_DATA_PALETTE, &size);
  1077. if (pal && size == AVPALETTE_SIZE) {
  1078. frame->palette_has_changed = 1;
  1079. memcpy(s->pal, pal, AVPALETTE_SIZE);
  1080. } else if (pal) {
  1081. av_log(avctx, AV_LOG_ERROR, "Palette size %d is wrong\n", size);
  1082. }
  1083. }
  1084. switch(frame_format) {
  1085. case 0x06:
  1086. ipvideo_decode_format_06_opcodes(s, frame);
  1087. break;
  1088. case 0x10:
  1089. ipvideo_decode_format_10_opcodes(s, frame);
  1090. break;
  1091. case 0x11:
  1092. ipvideo_decode_format_11_opcodes(s, frame);
  1093. break;
  1094. }
  1095. *got_frame = send_buffer;
  1096. /* shuffle frames */
  1097. av_frame_unref(s->second_last_frame);
  1098. FFSWAP(AVFrame*, s->second_last_frame, s->last_frame);
  1099. if ((ret = av_frame_ref(s->last_frame, frame)) < 0)
  1100. return ret;
  1101. /* report that the buffer was completely consumed */
  1102. return buf_size;
  1103. }
  1104. static av_cold int ipvideo_decode_end(AVCodecContext *avctx)
  1105. {
  1106. IpvideoContext *s = avctx->priv_data;
  1107. av_frame_free(&s->last_frame);
  1108. av_frame_free(&s->second_last_frame);
  1109. av_frame_free(&s->cur_decode_frame);
  1110. av_frame_free(&s->prev_decode_frame);
  1111. return 0;
  1112. }
  1113. AVCodec ff_interplay_video_decoder = {
  1114. .name = "interplayvideo",
  1115. .long_name = NULL_IF_CONFIG_SMALL("Interplay MVE video"),
  1116. .type = AVMEDIA_TYPE_VIDEO,
  1117. .id = AV_CODEC_ID_INTERPLAY_VIDEO,
  1118. .priv_data_size = sizeof(IpvideoContext),
  1119. .init = ipvideo_decode_init,
  1120. .close = ipvideo_decode_end,
  1121. .decode = ipvideo_decode_frame,
  1122. .capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_PARAM_CHANGE,
  1123. };