You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

741 lines
24KB

  1. /*
  2. * Argonaut Games Video decoder
  3. * Copyright (c) 2020 Paul B Mahol
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. #include <stdio.h>
  22. #include <stdlib.h>
  23. #include <string.h>
  24. #include "libavutil/imgutils.h"
  25. #include "libavutil/internal.h"
  26. #include "libavutil/intreadwrite.h"
  27. #include "libavutil/mem.h"
  28. #include "avcodec.h"
  29. #include "bytestream.h"
  30. #include "internal.h"
  31. typedef struct ArgoContext {
  32. GetByteContext gb;
  33. int bpp;
  34. int key;
  35. int mv0[128][2];
  36. int mv1[16][2];
  37. uint32_t pal[256];
  38. AVFrame *frame;
  39. } ArgoContext;
  40. static int decode_pal8(AVCodecContext *avctx, uint32_t *pal)
  41. {
  42. ArgoContext *s = avctx->priv_data;
  43. GetByteContext *gb = &s->gb;
  44. int start, count;
  45. start = bytestream2_get_le16(gb);
  46. count = bytestream2_get_le16(gb);
  47. if (start + count > 256)
  48. return AVERROR_INVALIDDATA;
  49. if (bytestream2_get_bytes_left(gb) < 3 * count)
  50. return AVERROR_INVALIDDATA;
  51. for (int i = 0; i < count; i++)
  52. pal[start + i] = (0xFF << 24U) | bytestream2_get_be24u(gb);
  53. return 0;
  54. }
  55. static int decode_avcf(AVCodecContext *avctx, AVFrame *frame)
  56. {
  57. ArgoContext *s = avctx->priv_data;
  58. GetByteContext *gb = &s->gb;
  59. const int l = frame->linesize[0];
  60. const uint8_t *map = gb->buffer;
  61. uint8_t *dst = frame->data[0];
  62. if (bytestream2_get_bytes_left(gb) < 1024 + (frame->width / 2) * (frame->height / 2))
  63. return AVERROR_INVALIDDATA;
  64. bytestream2_skipu(gb, 1024);
  65. for (int y = 0; y < frame->height; y += 2) {
  66. for (int x = 0; x < frame->width; x += 2) {
  67. int index = bytestream2_get_byteu(gb);
  68. const uint8_t *block = map + index * 4;
  69. dst[x+0] = block[0];
  70. dst[x+1] = block[1];
  71. dst[x+l] = block[2];
  72. dst[x+l+1] = block[3];
  73. }
  74. dst += frame->linesize[0] * 2;
  75. }
  76. return 0;
  77. }
  78. static int decode_alcd(AVCodecContext *avctx, AVFrame *frame)
  79. {
  80. ArgoContext *s = avctx->priv_data;
  81. GetByteContext *gb = &s->gb;
  82. GetByteContext sb;
  83. const int l = frame->linesize[0];
  84. const uint8_t *map = gb->buffer;
  85. uint8_t *dst = frame->data[0];
  86. uint8_t codes = 0;
  87. int count = 0;
  88. if (bytestream2_get_bytes_left(gb) < 1024 + (((frame->width / 2) * (frame->height / 2) + 7) >> 3))
  89. return AVERROR_INVALIDDATA;
  90. bytestream2_skipu(gb, 1024);
  91. sb = *gb;
  92. bytestream2_skipu(gb, ((frame->width / 2) * (frame->height / 2) + 7) >> 3);
  93. for (int y = 0; y < frame->height; y += 2) {
  94. for (int x = 0; x < frame->width; x += 2) {
  95. const uint8_t *block;
  96. int index;
  97. if (count == 0) {
  98. codes = bytestream2_get_byteu(&sb);
  99. count = 8;
  100. }
  101. if (codes & 0x80) {
  102. index = bytestream2_get_byte(gb);
  103. block = map + index * 4;
  104. dst[x+0] = block[0];
  105. dst[x+1] = block[1];
  106. dst[x+l] = block[2];
  107. dst[x+l+1] = block[3];
  108. }
  109. codes <<= 1;
  110. count--;
  111. }
  112. dst += frame->linesize[0] * 2;
  113. }
  114. return 0;
  115. }
  116. static int decode_mad1(AVCodecContext *avctx, AVFrame *frame)
  117. {
  118. ArgoContext *s = avctx->priv_data;
  119. GetByteContext *gb = &s->gb;
  120. const int w = frame->width;
  121. const int h = frame->height;
  122. const int l = frame->linesize[0];
  123. while (bytestream2_get_bytes_left(gb) > 0) {
  124. int size, type, pos, dy;
  125. uint8_t *dst;
  126. type = bytestream2_get_byte(gb);
  127. if (type == 0xFF)
  128. break;
  129. switch (type) {
  130. case 8:
  131. dst = frame->data[0];
  132. for (int y = 0; y < h; y += 8) {
  133. for (int x = 0; x < w; x += 8) {
  134. int fill = bytestream2_get_byte(gb);
  135. uint8_t *ddst = dst + x;
  136. for (int by = 0; by < 8; by++) {
  137. memset(ddst, fill, 8);
  138. ddst += l;
  139. }
  140. }
  141. dst += 8 * l;
  142. }
  143. break;
  144. case 7:
  145. while (bytestream2_get_bytes_left(gb) > 0) {
  146. int bsize = bytestream2_get_byte(gb);
  147. uint8_t *src;
  148. int count;
  149. if (!bsize)
  150. break;
  151. count = bytestream2_get_be16(gb);
  152. while (count > 0) {
  153. int mvx, mvy, a, b, c, mx, my;
  154. int bsize_w, bsize_h;
  155. bsize_w = bsize_h = bsize;
  156. if (bytestream2_get_bytes_left(gb) < 4)
  157. return AVERROR_INVALIDDATA;
  158. mvx = bytestream2_get_byte(gb) * bsize;
  159. mvy = bytestream2_get_byte(gb) * bsize;
  160. a = bytestream2_get_byte(gb);
  161. b = bytestream2_get_byte(gb);
  162. c = ((a & 0x3F) << 8) + b;
  163. mx = mvx + (c & 0x7F) - 64;
  164. my = mvy + (c >> 7) - 64;
  165. if (mvy < 0 || mvy >= h)
  166. return AVERROR_INVALIDDATA;
  167. if (mvx < 0 || mvx >= w)
  168. return AVERROR_INVALIDDATA;
  169. if (my < 0 || my >= h)
  170. return AVERROR_INVALIDDATA;
  171. if (mx < 0 || mx >= w)
  172. return AVERROR_INVALIDDATA;
  173. dst = frame->data[0] + mvx + l * mvy;
  174. src = frame->data[0] + mx + l * my;
  175. bsize_w = FFMIN3(bsize_w, w - mvx, w - mx);
  176. bsize_h = FFMIN3(bsize_h, h - mvy, h - my);
  177. if (mvy >= my && (mvy != my || mvx >= mx)) {
  178. src += (bsize_h - 1) * l;
  179. dst += (bsize_h - 1) * l;
  180. for (int by = 0; by < bsize_h; by++) {
  181. memmove(dst, src, bsize_w);
  182. src -= l;
  183. dst -= l;
  184. }
  185. } else {
  186. for (int by = 0; by < bsize_h; by++) {
  187. memmove(dst, src, bsize_w);
  188. src += l;
  189. dst += l;
  190. }
  191. }
  192. count--;
  193. }
  194. }
  195. break;
  196. case 6:
  197. dst = frame->data[0];
  198. if (bytestream2_get_bytes_left(gb) < w * h)
  199. return AVERROR_INVALIDDATA;
  200. for (int y = 0; y < h; y++) {
  201. bytestream2_get_bufferu(gb, dst, w);
  202. dst += l;
  203. }
  204. break;
  205. case 5:
  206. dst = frame->data[0];
  207. for (int y = 0; y < h; y += 2) {
  208. for (int x = 0; x < w; x += 2) {
  209. int fill = bytestream2_get_byte(gb);
  210. uint8_t *ddst = dst + x;
  211. fill = (fill << 8) | fill;
  212. for (int by = 0; by < 2; by++) {
  213. AV_WN16(ddst, fill);
  214. ddst += l;
  215. }
  216. }
  217. dst += 2 * l;
  218. }
  219. break;
  220. case 3:
  221. size = bytestream2_get_le16(gb);
  222. if (size > 0) {
  223. int x = bytestream2_get_byte(gb) * 4;
  224. int y = bytestream2_get_byte(gb) * 4;
  225. int count = bytestream2_get_byte(gb);
  226. int fill = bytestream2_get_byte(gb);
  227. av_log(avctx, AV_LOG_DEBUG, "%d %d %d %d\n", x, y, count, fill);
  228. for (int i = 0; i < count; i++)
  229. ;
  230. return AVERROR_PATCHWELCOME;
  231. }
  232. break;
  233. case 2:
  234. dst = frame->data[0];
  235. pos = 0;
  236. dy = 0;
  237. while (bytestream2_get_bytes_left(gb) > 0) {
  238. int count = bytestream2_get_byteu(gb);
  239. int skip = count & 0x3F;
  240. count = count >> 6;
  241. if (skip == 0x3F) {
  242. pos += 0x3E;
  243. while (pos >= w) {
  244. pos -= w;
  245. dst += l;
  246. dy++;
  247. if (dy >= h)
  248. return 0;
  249. }
  250. } else {
  251. pos += skip;
  252. while (pos >= w) {
  253. pos -= w;
  254. dst += l;
  255. dy++;
  256. if (dy >= h)
  257. return 0;
  258. }
  259. while (count >= 0) {
  260. int bits = bytestream2_get_byte(gb);
  261. for (int i = 0; i < 4; i++) {
  262. switch (bits & 3) {
  263. case 0:
  264. break;
  265. case 1:
  266. if (dy < 1 && !pos)
  267. return AVERROR_INVALIDDATA;
  268. else
  269. dst[pos] = pos ? dst[pos - 1] : dst[-l + w - 1];
  270. break;
  271. case 2:
  272. if (dy < 1)
  273. return AVERROR_INVALIDDATA;
  274. dst[pos] = dst[pos - l];
  275. break;
  276. case 3:
  277. dst[pos] = bytestream2_get_byte(gb);
  278. break;
  279. }
  280. pos++;
  281. if (pos >= w) {
  282. pos -= w;
  283. dst += l;
  284. dy++;
  285. if (dy >= h)
  286. return 0;
  287. }
  288. bits >>= 2;
  289. }
  290. count--;
  291. }
  292. }
  293. }
  294. break;
  295. default:
  296. return AVERROR_INVALIDDATA;
  297. }
  298. }
  299. return 0;
  300. }
  301. static int decode_mad1_24(AVCodecContext *avctx, AVFrame *frame)
  302. {
  303. ArgoContext *s = avctx->priv_data;
  304. GetByteContext *gb = &s->gb;
  305. const int w = frame->width;
  306. const int h = frame->height;
  307. const int l = frame->linesize[0] / 4;
  308. while (bytestream2_get_bytes_left(gb) > 0) {
  309. int osize, type, pos, dy, di, bcode, value, v14;
  310. const uint8_t *bits;
  311. uint32_t *dst;
  312. type = bytestream2_get_byte(gb);
  313. if (type == 0xFF)
  314. return 0;
  315. switch (type) {
  316. case 8:
  317. dst = (uint32_t *)frame->data[0];
  318. for (int y = 0; y + 12 <= h; y += 12) {
  319. for (int x = 0; x + 12 <= w; x += 12) {
  320. int fill = bytestream2_get_be24(gb);
  321. uint32_t *dstp = dst + x;
  322. for (int by = 0; by < 12; by++) {
  323. for (int bx = 0; bx < 12; bx++)
  324. dstp[bx] = fill;
  325. dstp += l;
  326. }
  327. }
  328. dst += 12 * l;
  329. }
  330. break;
  331. case 7:
  332. while (bytestream2_get_bytes_left(gb) > 0) {
  333. int bsize = bytestream2_get_byte(gb);
  334. uint32_t *src;
  335. int count;
  336. if (!bsize)
  337. break;
  338. count = bytestream2_get_be16(gb);
  339. while (count > 0) {
  340. int mvx, mvy, a, b, c, mx, my;
  341. int bsize_w, bsize_h;
  342. bsize_w = bsize_h = bsize;
  343. if (bytestream2_get_bytes_left(gb) < 4)
  344. return AVERROR_INVALIDDATA;
  345. mvx = bytestream2_get_byte(gb) * bsize;
  346. mvy = bytestream2_get_byte(gb) * bsize;
  347. a = bytestream2_get_byte(gb);
  348. b = bytestream2_get_byte(gb);
  349. c = ((a & 0x3F) << 8) + b;
  350. mx = mvx + (c & 0x7F) - 64;
  351. my = mvy + (c >> 7) - 64;
  352. if (mvy < 0 || mvy >= h)
  353. return AVERROR_INVALIDDATA;
  354. if (mvx < 0 || mvx >= w)
  355. return AVERROR_INVALIDDATA;
  356. if (my < 0 || my >= h)
  357. return AVERROR_INVALIDDATA;
  358. if (mx < 0 || mx >= w)
  359. return AVERROR_INVALIDDATA;
  360. dst = (uint32_t *)frame->data[0] + mvx + l * mvy;
  361. src = (uint32_t *)frame->data[0] + mx + l * my;
  362. bsize_w = FFMIN3(bsize_w, w - mvx, w - mx);
  363. bsize_h = FFMIN3(bsize_h, h - mvy, h - my);
  364. if (mvy >= my && (mvy != my || mvx >= mx)) {
  365. src += (bsize_h - 1) * l;
  366. dst += (bsize_h - 1) * l;
  367. for (int by = 0; by < bsize_h; by++) {
  368. memmove(dst, src, bsize_w * 4);
  369. src -= l;
  370. dst -= l;
  371. }
  372. } else {
  373. for (int by = 0; by < bsize_h; by++) {
  374. memmove(dst, src, bsize_w * 4);
  375. src += l;
  376. dst += l;
  377. }
  378. }
  379. count--;
  380. }
  381. }
  382. break;
  383. case 12:
  384. osize = ((h + 3) / 4) * ((w + 3) / 4) + 7;
  385. bits = gb->buffer;
  386. di = 0;
  387. bcode = v14 = 0;
  388. if (bytestream2_get_bytes_left(gb) < osize >> 3)
  389. return AVERROR_INVALIDDATA;
  390. bytestream2_skip(gb, osize >> 3);
  391. for (int x = 0; x < w; x += 4) {
  392. for (int y = 0; y < h; y += 4) {
  393. int astate = 0;
  394. if (bits[di >> 3] & (1 << (di & 7))) {
  395. int codes = bytestream2_get_byte(gb);
  396. for (int count = 0; count < 4; count++) {
  397. uint32_t *src = (uint32_t *)frame->data[0];
  398. size_t src_size = l * (h - 1) + (w - 1);
  399. int nv, v, code = codes & 3;
  400. pos = x;
  401. dy = y + count;
  402. dst = (uint32_t *)frame->data[0] + pos + dy * l;
  403. if (code & 1)
  404. bcode = bytestream2_get_byte(gb);
  405. if (code == 3) {
  406. for (int j = 0; j < 4; j++) {
  407. switch (bcode & 3) {
  408. case 0:
  409. break;
  410. case 1:
  411. if (dy < 1 && !pos)
  412. return AVERROR_INVALIDDATA;
  413. dst[0] = dst[-1];
  414. break;
  415. case 2:
  416. if (dy < 1)
  417. return AVERROR_INVALIDDATA;
  418. dst[0] = dst[-l];
  419. break;
  420. case 3:
  421. if (astate) {
  422. nv = value >> 4;
  423. } else {
  424. value = bytestream2_get_byte(gb);
  425. nv = value & 0xF;
  426. }
  427. astate ^= 1;
  428. dst[0] = src[av_clip(l * (dy + s->mv1[nv][1]) + pos +
  429. s->mv1[nv][0], 0, src_size)];
  430. break;
  431. }
  432. bcode >>= 2;
  433. dst++;
  434. pos++;
  435. }
  436. } else if (code) {
  437. if (code == 1)
  438. v14 = bcode;
  439. else
  440. bcode = v14;
  441. for (int j = 0; j < 4; j++) {
  442. switch (bcode & 3) {
  443. case 0:
  444. break;
  445. case 1:
  446. if (dy < 1 && !pos)
  447. return AVERROR_INVALIDDATA;
  448. dst[0] = dst[-1];
  449. break;
  450. case 2:
  451. if (dy < 1)
  452. return AVERROR_INVALIDDATA;
  453. dst[0] = dst[-l];
  454. break;
  455. case 3:
  456. v = bytestream2_get_byte(gb);
  457. if (v < 128) {
  458. dst[0] = src[av_clip(l * (dy + s->mv0[v][1]) + pos +
  459. s->mv0[v][0], 0, src_size)];
  460. } else {
  461. dst[0] = ((v & 0x7F) << 17) | bytestream2_get_be16(gb);
  462. }
  463. break;
  464. }
  465. bcode >>= 2;
  466. dst++;
  467. pos++;
  468. }
  469. }
  470. codes >>= 2;
  471. }
  472. }
  473. di++;
  474. }
  475. }
  476. break;
  477. default:
  478. return AVERROR_INVALIDDATA;
  479. }
  480. }
  481. return AVERROR_INVALIDDATA;
  482. }
  483. static int decode_rle(AVCodecContext *avctx, AVFrame *frame)
  484. {
  485. ArgoContext *s = avctx->priv_data;
  486. GetByteContext *gb = &s->gb;
  487. const int w = frame->width;
  488. const int h = frame->height;
  489. const int l = frame->linesize[0];
  490. uint8_t *dst = frame->data[0];
  491. int pos = 0, y = 0;
  492. while (bytestream2_get_bytes_left(gb) > 0) {
  493. int count = bytestream2_get_byte(gb);
  494. int pixel = bytestream2_get_byte(gb);
  495. if (!count) {
  496. pos += pixel;
  497. while (pos >= w) {
  498. pos -= w;
  499. y++;
  500. if (y >= h)
  501. return 0;
  502. }
  503. } else {
  504. while (count > 0) {
  505. dst[pos + y * l] = pixel;
  506. count--;
  507. pos++;
  508. if (pos >= w) {
  509. pos = 0;
  510. y++;
  511. if (y >= h)
  512. return 0;
  513. }
  514. }
  515. }
  516. }
  517. return 0;
  518. }
  519. static int decode_frame(AVCodecContext *avctx, void *data,
  520. int *got_frame, AVPacket *avpkt)
  521. {
  522. ArgoContext *s = avctx->priv_data;
  523. GetByteContext *gb = &s->gb;
  524. AVFrame *frame = s->frame;
  525. uint32_t chunk;
  526. int ret;
  527. bytestream2_init(gb, avpkt->data, avpkt->size);
  528. if ((ret = ff_reget_buffer(avctx, frame, 0)) < 0)
  529. return ret;
  530. chunk = bytestream2_get_be32(gb);
  531. switch (chunk) {
  532. case MKBETAG('P', 'A', 'L', '8'):
  533. for (int y = 0; y < frame->height; y++)
  534. memset(frame->data[0] + y * frame->linesize[0], 0, frame->width * s->bpp);
  535. if (avctx->pix_fmt == AV_PIX_FMT_PAL8)
  536. memset(frame->data[1], 0, AVPALETTE_SIZE);
  537. return decode_pal8(avctx, s->pal);
  538. case MKBETAG('M', 'A', 'D', '1'):
  539. if (avctx->pix_fmt == AV_PIX_FMT_PAL8)
  540. ret = decode_mad1(avctx, frame);
  541. else
  542. ret = decode_mad1_24(avctx, frame);
  543. break;
  544. case MKBETAG('A', 'V', 'C', 'F'):
  545. if (avctx->pix_fmt == AV_PIX_FMT_PAL8) {
  546. s->key = 1;
  547. ret = decode_avcf(avctx, frame);
  548. break;
  549. }
  550. case MKBETAG('A', 'L', 'C', 'D'):
  551. if (avctx->pix_fmt == AV_PIX_FMT_PAL8) {
  552. s->key = 0;
  553. ret = decode_alcd(avctx, frame);
  554. break;
  555. }
  556. case MKBETAG('R', 'L', 'E', 'F'):
  557. if (avctx->pix_fmt == AV_PIX_FMT_PAL8) {
  558. s->key = 1;
  559. ret = decode_rle(avctx, frame);
  560. break;
  561. }
  562. case MKBETAG('R', 'L', 'E', 'D'):
  563. if (avctx->pix_fmt == AV_PIX_FMT_PAL8) {
  564. s->key = 0;
  565. ret = decode_rle(avctx, frame);
  566. break;
  567. }
  568. default:
  569. av_log(avctx, AV_LOG_DEBUG, "unknown chunk 0x%X\n", chunk);
  570. break;
  571. }
  572. if (ret < 0)
  573. return ret;
  574. if (avctx->pix_fmt == AV_PIX_FMT_PAL8)
  575. memcpy(frame->data[1], s->pal, AVPALETTE_SIZE);
  576. if ((ret = av_frame_ref(data, s->frame)) < 0)
  577. return ret;
  578. frame->pict_type = s->key ? AV_PICTURE_TYPE_I : AV_PICTURE_TYPE_P;
  579. frame->key_frame = s->key;
  580. *got_frame = 1;
  581. return avpkt->size;
  582. }
  583. static av_cold int decode_init(AVCodecContext *avctx)
  584. {
  585. ArgoContext *s = avctx->priv_data;
  586. switch (avctx->bits_per_raw_sample) {
  587. case 8: s->bpp = 1;
  588. avctx->pix_fmt = AV_PIX_FMT_PAL8; break;
  589. case 24: s->bpp = 4;
  590. avctx->pix_fmt = AV_PIX_FMT_BGR0; break;
  591. default: avpriv_request_sample(s, "depth == %u", avctx->bits_per_raw_sample);
  592. return AVERROR_PATCHWELCOME;
  593. }
  594. s->frame = av_frame_alloc();
  595. if (!s->frame)
  596. return AVERROR(ENOMEM);
  597. for (int n = 0, i = -4; i < 4; i++) {
  598. for (int j = -14; j < 2; j++) {
  599. s->mv0[n][0] = j;
  600. s->mv0[n++][1] = i;
  601. }
  602. }
  603. for (int n = 0, i = -5; i <= 1; i += 2) {
  604. int j = -5;
  605. while (j <= 1) {
  606. s->mv1[n][0] = j;
  607. s->mv1[n++][1] = i;
  608. j += 2;
  609. }
  610. }
  611. return 0;
  612. }
  613. static void decode_flush(AVCodecContext *avctx)
  614. {
  615. ArgoContext *s = avctx->priv_data;
  616. av_frame_unref(s->frame);
  617. }
  618. static av_cold int decode_close(AVCodecContext *avctx)
  619. {
  620. ArgoContext *s = avctx->priv_data;
  621. av_frame_free(&s->frame);
  622. return 0;
  623. }
  624. AVCodec ff_argo_decoder = {
  625. .name = "argo",
  626. .long_name = NULL_IF_CONFIG_SMALL("Argonaut Games Video"),
  627. .type = AVMEDIA_TYPE_VIDEO,
  628. .id = AV_CODEC_ID_ARGO,
  629. .priv_data_size = sizeof(ArgoContext),
  630. .init = decode_init,
  631. .decode = decode_frame,
  632. .flush = decode_flush,
  633. .close = decode_close,
  634. .capabilities = AV_CODEC_CAP_DR1,
  635. .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
  636. };