You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

881 lines
26KB

  1. /*
  2. * Dxtory decoder
  3. *
  4. * Copyright (c) 2011 Konstantin Shishkov
  5. *
  6. * This file is part of FFmpeg.
  7. *
  8. * FFmpeg is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU Lesser General Public
  10. * License as published by the Free Software Foundation; either
  11. * version 2.1 of the License, or (at your option) any later version.
  12. *
  13. * FFmpeg is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * Lesser General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU Lesser General Public
  19. * License along with FFmpeg; if not, write to the Free Software
  20. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  21. */
  22. #include <inttypes.h>
  23. #include "libavutil/common.h"
  24. #include "libavutil/intreadwrite.h"
  25. #define BITSTREAM_READER_LE
  26. #include "avcodec.h"
  27. #include "bytestream.h"
  28. #include "get_bits.h"
  29. #include "internal.h"
  30. #include "unary.h"
  31. static int64_t get_raw_size(enum AVPixelFormat fmt, int width, int height)
  32. {
  33. switch (fmt) {
  34. case AV_PIX_FMT_RGB555LE:
  35. case AV_PIX_FMT_RGB565LE:
  36. return width * height * 2LL;
  37. case AV_PIX_FMT_RGB24:
  38. case AV_PIX_FMT_BGR24:
  39. case AV_PIX_FMT_YUV444P:
  40. return width * height * 3LL;
  41. case AV_PIX_FMT_YUV420P:
  42. return (int64_t)(width * height) + AV_CEIL_RSHIFT(width, 1) * AV_CEIL_RSHIFT(height, 1);
  43. case AV_PIX_FMT_YUV410P:
  44. return (int64_t)(width * height) + AV_CEIL_RSHIFT(width, 2) * AV_CEIL_RSHIFT(height, 2);
  45. }
  46. return 0;
  47. }
  48. static void do_vflip(AVCodecContext *avctx, AVFrame *pic, int vflip)
  49. {
  50. if (!vflip)
  51. return;
  52. switch (pic->format) {
  53. case AV_PIX_FMT_YUV444P:
  54. pic->data[1] += (avctx->height - 1) * pic->linesize[1];
  55. pic->linesize[1] = -pic->linesize[1];
  56. pic->data[2] += (avctx->height - 1) * pic->linesize[2];
  57. pic->linesize[2] = -pic->linesize[2];
  58. case AV_PIX_FMT_RGB555LE:
  59. case AV_PIX_FMT_RGB565LE:
  60. case AV_PIX_FMT_BGR24:
  61. case AV_PIX_FMT_RGB24:
  62. pic->data[0] += (avctx->height - 1) * pic->linesize[0];
  63. pic->linesize[0] = -pic->linesize[0];
  64. break;
  65. case AV_PIX_FMT_YUV410P:
  66. pic->data[0] += (avctx->height - 1) * pic->linesize[0];
  67. pic->linesize[0] = -pic->linesize[0];
  68. pic->data[1] += (AV_CEIL_RSHIFT(avctx->height, 2) - 1) * pic->linesize[1];
  69. pic->linesize[1] = -pic->linesize[1];
  70. pic->data[2] += (AV_CEIL_RSHIFT(avctx->height, 2) - 1) * pic->linesize[2];
  71. pic->linesize[2] = -pic->linesize[2];
  72. break;
  73. case AV_PIX_FMT_YUV420P:
  74. pic->data[0] += (avctx->height - 1) * pic->linesize[0];
  75. pic->linesize[0] = -pic->linesize[0];
  76. pic->data[1] += (AV_CEIL_RSHIFT(avctx->height, 1) - 1) * pic->linesize[1];
  77. pic->linesize[1] = -pic->linesize[1];
  78. pic->data[2] += (AV_CEIL_RSHIFT(avctx->height, 1) - 1) * pic->linesize[2];
  79. pic->linesize[2] = -pic->linesize[2];
  80. break;
  81. }
  82. }
  83. static int dxtory_decode_v1_rgb(AVCodecContext *avctx, AVFrame *pic,
  84. const uint8_t *src, int src_size,
  85. int id, int bpp, uint32_t vflipped)
  86. {
  87. int h;
  88. uint8_t *dst;
  89. int ret;
  90. if (src_size < get_raw_size(id, avctx->width, avctx->height)) {
  91. av_log(avctx, AV_LOG_ERROR, "packet too small\n");
  92. return AVERROR_INVALIDDATA;
  93. }
  94. avctx->pix_fmt = id;
  95. if ((ret = ff_get_buffer(avctx, pic, 0)) < 0)
  96. return ret;
  97. do_vflip(avctx, pic, vflipped);
  98. dst = pic->data[0];
  99. for (h = 0; h < avctx->height; h++) {
  100. memcpy(dst, src, avctx->width * bpp);
  101. src += avctx->width * bpp;
  102. dst += pic->linesize[0];
  103. }
  104. do_vflip(avctx, pic, vflipped);
  105. return 0;
  106. }
  107. static int dxtory_decode_v1_410(AVCodecContext *avctx, AVFrame *pic,
  108. const uint8_t *src, int src_size,
  109. uint32_t vflipped)
  110. {
  111. int h, w;
  112. uint8_t *Y1, *Y2, *Y3, *Y4, *U, *V;
  113. int height, width, hmargin, vmargin;
  114. int huvborder;
  115. int ret;
  116. if (src_size < get_raw_size(AV_PIX_FMT_YUV410P, avctx->width, avctx->height)) {
  117. av_log(avctx, AV_LOG_ERROR, "packet too small\n");
  118. return AVERROR_INVALIDDATA;
  119. }
  120. avctx->pix_fmt = AV_PIX_FMT_YUV410P;
  121. if ((ret = ff_get_buffer(avctx, pic, 0)) < 0)
  122. return ret;
  123. do_vflip(avctx, pic, vflipped);
  124. height = avctx->height & ~3;
  125. width = avctx->width & ~3;
  126. hmargin = avctx->width - width;
  127. vmargin = avctx->height - height;
  128. huvborder = AV_CEIL_RSHIFT(avctx->width, 2) - 1;
  129. Y1 = pic->data[0];
  130. Y2 = pic->data[0] + pic->linesize[0];
  131. Y3 = pic->data[0] + pic->linesize[0] * 2;
  132. Y4 = pic->data[0] + pic->linesize[0] * 3;
  133. U = pic->data[1];
  134. V = pic->data[2];
  135. for (h = 0; h < height; h += 4) {
  136. for (w = 0; w < width; w += 4) {
  137. AV_COPY32U(Y1 + w, src);
  138. AV_COPY32U(Y2 + w, src + 4);
  139. AV_COPY32U(Y3 + w, src + 8);
  140. AV_COPY32U(Y4 + w, src + 12);
  141. U[w >> 2] = src[16] + 0x80;
  142. V[w >> 2] = src[17] + 0x80;
  143. src += 18;
  144. }
  145. if (hmargin) {
  146. for (w = 0; w < hmargin; w++) {
  147. Y1[width + w] = src[w];
  148. Y2[width + w] = src[w + hmargin * 1];
  149. Y3[width + w] = src[w + hmargin * 2];
  150. Y4[width + w] = src[w + hmargin * 3];
  151. }
  152. src += 4 * hmargin;
  153. U[huvborder] = src[0] + 0x80;
  154. V[huvborder] = src[1] + 0x80;
  155. src += 2;
  156. }
  157. Y1 += pic->linesize[0] << 2;
  158. Y2 += pic->linesize[0] << 2;
  159. Y3 += pic->linesize[0] << 2;
  160. Y4 += pic->linesize[0] << 2;
  161. U += pic->linesize[1];
  162. V += pic->linesize[2];
  163. }
  164. if (vmargin) {
  165. for (w = 0; w < width; w += 4) {
  166. AV_COPY32U(Y1 + w, src);
  167. if (vmargin > 1)
  168. AV_COPY32U(Y2 + w, src + 4);
  169. if (vmargin > 2)
  170. AV_COPY32U(Y3 + w, src + 8);
  171. src += 4 * vmargin;
  172. U[w >> 2] = src[0] + 0x80;
  173. V[w >> 2] = src[1] + 0x80;
  174. src += 2;
  175. }
  176. if (hmargin) {
  177. for (w = 0; w < hmargin; w++) {
  178. AV_COPY32U(Y1 + w, src);
  179. if (vmargin > 1)
  180. AV_COPY32U(Y2 + w, src + 4);
  181. if (vmargin > 2)
  182. AV_COPY32U(Y3 + w, src + 8);
  183. src += 4 * vmargin;
  184. }
  185. U[huvborder] = src[0] + 0x80;
  186. V[huvborder] = src[1] + 0x80;
  187. src += 2;
  188. }
  189. }
  190. do_vflip(avctx, pic, vflipped);
  191. return 0;
  192. }
  193. static int dxtory_decode_v1_420(AVCodecContext *avctx, AVFrame *pic,
  194. const uint8_t *src, int src_size,
  195. uint32_t vflipped)
  196. {
  197. int h, w;
  198. uint8_t *Y1, *Y2, *U, *V;
  199. int height, width, hmargin, vmargin;
  200. int huvborder;
  201. int ret;
  202. if (src_size < get_raw_size(AV_PIX_FMT_YUV420P, avctx->width, avctx->height)) {
  203. av_log(avctx, AV_LOG_ERROR, "packet too small\n");
  204. return AVERROR_INVALIDDATA;
  205. }
  206. avctx->pix_fmt = AV_PIX_FMT_YUV420P;
  207. if ((ret = ff_get_buffer(avctx, pic, 0)) < 0)
  208. return ret;
  209. do_vflip(avctx, pic, vflipped);
  210. height = avctx->height & ~1;
  211. width = avctx->width & ~1;
  212. hmargin = avctx->width - width;
  213. vmargin = avctx->height - height;
  214. huvborder = AV_CEIL_RSHIFT(avctx->width, 1) - 1;
  215. Y1 = pic->data[0];
  216. Y2 = pic->data[0] + pic->linesize[0];
  217. U = pic->data[1];
  218. V = pic->data[2];
  219. for (h = 0; h < height; h += 2) {
  220. for (w = 0; w < width; w += 2) {
  221. AV_COPY16(Y1 + w, src);
  222. AV_COPY16(Y2 + w, src + 2);
  223. U[w >> 1] = src[4] + 0x80;
  224. V[w >> 1] = src[5] + 0x80;
  225. src += 6;
  226. }
  227. if (hmargin) {
  228. Y1[width + 1] = src[0];
  229. Y2[width + 1] = src[1];
  230. U[huvborder] = src[2] + 0x80;
  231. V[huvborder] = src[3] + 0x80;
  232. src += 4;
  233. }
  234. Y1 += pic->linesize[0] << 1;
  235. Y2 += pic->linesize[0] << 1;
  236. U += pic->linesize[1];
  237. V += pic->linesize[2];
  238. }
  239. if (vmargin) {
  240. for (w = 0; w < width; w += 2) {
  241. AV_COPY16U(Y1 + w, src);
  242. U[w >> 1] = src[0] + 0x80;
  243. V[w >> 1] = src[1] + 0x80;
  244. src += 4;
  245. }
  246. if (hmargin) {
  247. Y1[w] = src[0];
  248. U[huvborder] = src[1] + 0x80;
  249. V[huvborder] = src[2] + 0x80;
  250. src += 3;
  251. }
  252. }
  253. do_vflip(avctx, pic, vflipped);
  254. return 0;
  255. }
  256. static int dxtory_decode_v1_444(AVCodecContext *avctx, AVFrame *pic,
  257. const uint8_t *src, int src_size,
  258. uint32_t vflipped)
  259. {
  260. int h, w;
  261. uint8_t *Y, *U, *V;
  262. int ret;
  263. if (src_size < get_raw_size(AV_PIX_FMT_YUV444P, avctx->width, avctx->height)) {
  264. av_log(avctx, AV_LOG_ERROR, "packet too small\n");
  265. return AVERROR_INVALIDDATA;
  266. }
  267. avctx->pix_fmt = AV_PIX_FMT_YUV444P;
  268. if ((ret = ff_get_buffer(avctx, pic, 0)) < 0)
  269. return ret;
  270. do_vflip(avctx, pic, vflipped);
  271. Y = pic->data[0];
  272. U = pic->data[1];
  273. V = pic->data[2];
  274. for (h = 0; h < avctx->height; h++) {
  275. for (w = 0; w < avctx->width; w++) {
  276. Y[w] = *src++;
  277. U[w] = *src++ ^ 0x80;
  278. V[w] = *src++ ^ 0x80;
  279. }
  280. Y += pic->linesize[0];
  281. U += pic->linesize[1];
  282. V += pic->linesize[2];
  283. }
  284. do_vflip(avctx, pic, vflipped);
  285. return 0;
  286. }
  287. static const uint8_t def_lru[8] = { 0x00, 0x20, 0x40, 0x60, 0x80, 0xA0, 0xC0, 0xFF };
  288. static const uint8_t def_lru_555[8] = { 0x00, 0x08, 0x10, 0x18, 0x1F };
  289. static const uint8_t def_lru_565[8] = { 0x00, 0x08, 0x10, 0x20, 0x30, 0x3F };
  290. static inline uint8_t decode_sym(GetBitContext *gb, uint8_t lru[8])
  291. {
  292. uint8_t c, val;
  293. c = get_unary(gb, 0, 8);
  294. if (!c) {
  295. val = get_bits(gb, 8);
  296. memmove(lru + 1, lru, sizeof(*lru) * (8 - 1));
  297. } else {
  298. val = lru[c - 1];
  299. memmove(lru + 1, lru, sizeof(*lru) * (c - 1));
  300. }
  301. lru[0] = val;
  302. return val;
  303. }
  304. static int check_slice_size(AVCodecContext *avctx,
  305. const uint8_t *src, int src_size,
  306. int slice_size, int off)
  307. {
  308. int cur_slice_size;
  309. if (slice_size > src_size - off) {
  310. av_log(avctx, AV_LOG_ERROR,
  311. "invalid slice size %d (only %d bytes left)\n",
  312. slice_size, src_size - off);
  313. return AVERROR_INVALIDDATA;
  314. }
  315. if (slice_size <= 16) {
  316. av_log(avctx, AV_LOG_ERROR, "invalid slice size %d\n",
  317. slice_size);
  318. return AVERROR_INVALIDDATA;
  319. }
  320. cur_slice_size = AV_RL32(src + off);
  321. if (cur_slice_size != slice_size - 16) {
  322. av_log(avctx, AV_LOG_ERROR,
  323. "Slice sizes mismatch: got %d instead of %d\n",
  324. cur_slice_size, slice_size - 16);
  325. }
  326. return 0;
  327. }
  328. static int load_buffer(AVCodecContext *avctx,
  329. const uint8_t *src, int src_size,
  330. GetByteContext *gb,
  331. int *nslices, int *off)
  332. {
  333. bytestream2_init(gb, src, src_size);
  334. *nslices = bytestream2_get_le16(gb);
  335. *off = FFALIGN(*nslices * 4 + 2, 16);
  336. if (src_size < *off) {
  337. av_log(avctx, AV_LOG_ERROR, "no slice data\n");
  338. return AVERROR_INVALIDDATA;
  339. }
  340. if (!*nslices) {
  341. avpriv_request_sample(avctx, "%d slices for %dx%d", *nslices,
  342. avctx->width, avctx->height);
  343. return AVERROR_PATCHWELCOME;
  344. }
  345. return 0;
  346. }
  347. static inline uint8_t decode_sym_565(GetBitContext *gb, uint8_t lru[8],
  348. int bits)
  349. {
  350. uint8_t c, val;
  351. c = get_unary(gb, 0, bits);
  352. if (!c) {
  353. val = get_bits(gb, bits);
  354. memmove(lru + 1, lru, sizeof(*lru) * (6 - 1));
  355. } else {
  356. val = lru[c - 1];
  357. memmove(lru + 1, lru, sizeof(*lru) * (c - 1));
  358. }
  359. lru[0] = val;
  360. return val;
  361. }
  362. typedef int (*decode_slice_func)(GetBitContext *gb, AVFrame *frame,
  363. int line, int height, uint8_t lru[3][8]);
  364. typedef void (*setup_lru_func)(uint8_t lru[3][8]);
  365. static int dxtory_decode_v2(AVCodecContext *avctx, AVFrame *pic,
  366. const uint8_t *src, int src_size,
  367. decode_slice_func decode_slice,
  368. setup_lru_func setup_lru,
  369. enum AVPixelFormat fmt,
  370. uint32_t vflipped)
  371. {
  372. GetByteContext gb, gb_check;
  373. GetBitContext gb2;
  374. int nslices, slice, line = 0;
  375. uint32_t off, slice_size;
  376. uint64_t off_check;
  377. uint8_t lru[3][8];
  378. int ret;
  379. ret = load_buffer(avctx, src, src_size, &gb, &nslices, &off);
  380. if (ret < 0)
  381. return ret;
  382. off_check = off;
  383. gb_check = gb;
  384. for (slice = 0; slice < nslices; slice++) {
  385. slice_size = bytestream2_get_le32(&gb_check);
  386. if (slice_size <= 16 + (avctx->height * avctx->width / (8 * nslices)))
  387. return AVERROR_INVALIDDATA;
  388. off_check += slice_size;
  389. }
  390. if (off_check - avctx->discard_damaged_percentage*off_check/100 > src_size)
  391. return AVERROR_INVALIDDATA;
  392. avctx->pix_fmt = fmt;
  393. if ((ret = ff_get_buffer(avctx, pic, 0)) < 0)
  394. return ret;
  395. do_vflip(avctx, pic, vflipped);
  396. for (slice = 0; slice < nslices; slice++) {
  397. slice_size = bytestream2_get_le32(&gb);
  398. setup_lru(lru);
  399. ret = check_slice_size(avctx, src, src_size, slice_size, off);
  400. if (ret < 0)
  401. return ret;
  402. if ((ret = init_get_bits8(&gb2, src + off + 16, slice_size - 16)) < 0)
  403. return ret;
  404. line += decode_slice(&gb2, pic, line, avctx->height - line, lru);
  405. off += slice_size;
  406. }
  407. if (avctx->height - line) {
  408. avpriv_request_sample(avctx, "Not enough slice data available");
  409. }
  410. do_vflip(avctx, pic, vflipped);
  411. return 0;
  412. }
  413. av_always_inline
  414. static int dx2_decode_slice_5x5(GetBitContext *gb, AVFrame *frame,
  415. int line, int left, uint8_t lru[3][8],
  416. int is_565)
  417. {
  418. int x, y;
  419. int r, g, b;
  420. int width = frame->width;
  421. int stride = frame->linesize[0];
  422. uint8_t *dst = frame->data[0] + stride * line;
  423. for (y = 0; y < left && get_bits_left(gb) >= 3 * width; y++) {
  424. for (x = 0; x < width; x++) {
  425. b = decode_sym_565(gb, lru[0], 5);
  426. g = decode_sym_565(gb, lru[1], is_565 ? 6 : 5);
  427. r = decode_sym_565(gb, lru[2], 5);
  428. dst[x * 3 + 0] = (r << 3) | (r >> 2);
  429. dst[x * 3 + 1] = is_565 ? (g << 2) | (g >> 4) : (g << 3) | (g >> 2);
  430. dst[x * 3 + 2] = (b << 3) | (b >> 2);
  431. }
  432. dst += stride;
  433. }
  434. return y;
  435. }
  436. static void setup_lru_555(uint8_t lru[3][8])
  437. {
  438. memcpy(lru[0], def_lru_555, 8 * sizeof(*def_lru));
  439. memcpy(lru[1], def_lru_555, 8 * sizeof(*def_lru));
  440. memcpy(lru[2], def_lru_555, 8 * sizeof(*def_lru));
  441. }
  442. static void setup_lru_565(uint8_t lru[3][8])
  443. {
  444. memcpy(lru[0], def_lru_555, 8 * sizeof(*def_lru));
  445. memcpy(lru[1], def_lru_565, 8 * sizeof(*def_lru));
  446. memcpy(lru[2], def_lru_555, 8 * sizeof(*def_lru));
  447. }
  448. static int dx2_decode_slice_555(GetBitContext *gb, AVFrame *frame,
  449. int line, int left, uint8_t lru[3][8])
  450. {
  451. return dx2_decode_slice_5x5(gb, frame, line, left, lru, 0);
  452. }
  453. static int dx2_decode_slice_565(GetBitContext *gb, AVFrame *frame,
  454. int line, int left, uint8_t lru[3][8])
  455. {
  456. return dx2_decode_slice_5x5(gb, frame, line, left, lru, 1);
  457. }
  458. static int dxtory_decode_v2_565(AVCodecContext *avctx, AVFrame *pic,
  459. const uint8_t *src, int src_size, int is_565,
  460. uint32_t vflipped)
  461. {
  462. enum AVPixelFormat fmt = AV_PIX_FMT_RGB24;
  463. if (is_565)
  464. return dxtory_decode_v2(avctx, pic, src, src_size,
  465. dx2_decode_slice_565,
  466. setup_lru_565,
  467. fmt, vflipped);
  468. else
  469. return dxtory_decode_v2(avctx, pic, src, src_size,
  470. dx2_decode_slice_555,
  471. setup_lru_555,
  472. fmt, vflipped);
  473. }
  474. static int dx2_decode_slice_rgb(GetBitContext *gb, AVFrame *frame,
  475. int line, int left, uint8_t lru[3][8])
  476. {
  477. int x, y;
  478. int width = frame->width;
  479. int stride = frame->linesize[0];
  480. uint8_t *dst = frame->data[0] + stride * line;
  481. for (y = 0; y < left && get_bits_left(gb) >= 3 * width; y++) {
  482. for (x = 0; x < width; x++) {
  483. dst[x * 3 + 0] = decode_sym(gb, lru[0]);
  484. dst[x * 3 + 1] = decode_sym(gb, lru[1]);
  485. dst[x * 3 + 2] = decode_sym(gb, lru[2]);
  486. }
  487. dst += stride;
  488. }
  489. return y;
  490. }
  491. static void default_setup_lru(uint8_t lru[3][8])
  492. {
  493. int i;
  494. for (i = 0; i < 3; i++)
  495. memcpy(lru[i], def_lru, 8 * sizeof(*def_lru));
  496. }
  497. static int dxtory_decode_v2_rgb(AVCodecContext *avctx, AVFrame *pic,
  498. const uint8_t *src, int src_size,
  499. uint32_t vflipped)
  500. {
  501. return dxtory_decode_v2(avctx, pic, src, src_size,
  502. dx2_decode_slice_rgb,
  503. default_setup_lru,
  504. AV_PIX_FMT_BGR24, vflipped);
  505. }
  506. static int dx2_decode_slice_410(GetBitContext *gb, AVFrame *frame,
  507. int line, int left,
  508. uint8_t lru[3][8])
  509. {
  510. int x, y, i, j;
  511. int width = frame->width;
  512. int ystride = frame->linesize[0];
  513. int ustride = frame->linesize[1];
  514. int vstride = frame->linesize[2];
  515. uint8_t *Y = frame->data[0] + ystride * line;
  516. uint8_t *U = frame->data[1] + (ustride >> 2) * line;
  517. uint8_t *V = frame->data[2] + (vstride >> 2) * line;
  518. int h, w, hmargin, vmargin;
  519. int huvborder;
  520. h = frame->height & ~3;
  521. w = frame->width & ~3;
  522. hmargin = frame->width - w;
  523. vmargin = frame->height - h;
  524. huvborder = AV_CEIL_RSHIFT(frame->width, 2) - 1;
  525. for (y = 0; y < left - 3 && get_bits_left(gb) >= 18 * w / 4 + hmargin * 4 + (!!hmargin * 2); y += 4) {
  526. for (x = 0; x < w; x += 4) {
  527. for (j = 0; j < 4; j++)
  528. for (i = 0; i < 4; i++)
  529. Y[x + i + j * ystride] = decode_sym(gb, lru[0]);
  530. U[x >> 2] = decode_sym(gb, lru[1]) ^ 0x80;
  531. V[x >> 2] = decode_sym(gb, lru[2]) ^ 0x80;
  532. }
  533. if (hmargin) {
  534. for (j = 0; j < 4; j++)
  535. for (i = 0; i < hmargin; i++)
  536. Y[x + i + j * ystride] = decode_sym(gb, lru[0]);
  537. U[huvborder] = decode_sym(gb, lru[1]) ^ 0x80;
  538. V[huvborder] = decode_sym(gb, lru[2]) ^ 0x80;
  539. }
  540. Y += ystride << 2;
  541. U += ustride;
  542. V += vstride;
  543. }
  544. if (vmargin && y + vmargin == left) {
  545. for (x = 0; x < width; x += 4) {
  546. for (j = 0; j < vmargin; j++)
  547. for (i = 0; i < 4; i++)
  548. Y[x + i + j * ystride] = decode_sym(gb, lru[0]);
  549. U[x >> 2] = decode_sym(gb, lru[1]) ^ 0x80;
  550. V[x >> 2] = decode_sym(gb, lru[2]) ^ 0x80;
  551. }
  552. if (hmargin) {
  553. for (j = 0; j < vmargin; j++) {
  554. for (i = 0; i < hmargin; i++)
  555. Y[x + i + j * ystride] = decode_sym(gb, lru[0]);
  556. }
  557. U[huvborder] = decode_sym(gb, lru[1]) ^ 0x80;
  558. V[huvborder] = decode_sym(gb, lru[2]) ^ 0x80;
  559. }
  560. y += vmargin;
  561. }
  562. return y;
  563. }
  564. static int dxtory_decode_v2_410(AVCodecContext *avctx, AVFrame *pic,
  565. const uint8_t *src, int src_size,
  566. uint32_t vflipped)
  567. {
  568. return dxtory_decode_v2(avctx, pic, src, src_size,
  569. dx2_decode_slice_410,
  570. default_setup_lru,
  571. AV_PIX_FMT_YUV410P, vflipped);
  572. }
  573. static int dx2_decode_slice_420(GetBitContext *gb, AVFrame *frame,
  574. int line, int left,
  575. uint8_t lru[3][8])
  576. {
  577. int x, y;
  578. int width = frame->width;
  579. int ystride = frame->linesize[0];
  580. int ustride = frame->linesize[1];
  581. int vstride = frame->linesize[2];
  582. uint8_t *Y = frame->data[0] + ystride * line;
  583. uint8_t *U = frame->data[1] + (ustride >> 1) * line;
  584. uint8_t *V = frame->data[2] + (vstride >> 1) * line;
  585. int h, w, hmargin, vmargin;
  586. int huvborder;
  587. h = frame->height & ~1;
  588. w = frame->width & ~1;
  589. hmargin = frame->width - w;
  590. vmargin = frame->height - h;
  591. huvborder = AV_CEIL_RSHIFT(frame->width, 1) - 1;
  592. for (y = 0; y < left - 1 && get_bits_left(gb) >= 3 * w + hmargin * 4; y += 2) {
  593. for (x = 0; x < w; x += 2) {
  594. Y[x + 0 + 0 * ystride] = decode_sym(gb, lru[0]);
  595. Y[x + 1 + 0 * ystride] = decode_sym(gb, lru[0]);
  596. Y[x + 0 + 1 * ystride] = decode_sym(gb, lru[0]);
  597. Y[x + 1 + 1 * ystride] = decode_sym(gb, lru[0]);
  598. U[x >> 1] = decode_sym(gb, lru[1]) ^ 0x80;
  599. V[x >> 1] = decode_sym(gb, lru[2]) ^ 0x80;
  600. }
  601. if (hmargin) {
  602. Y[x + 0 * ystride] = decode_sym(gb, lru[0]);
  603. Y[x + 1 * ystride] = decode_sym(gb, lru[0]);
  604. U[huvborder] = decode_sym(gb, lru[1]) ^ 0x80;
  605. V[huvborder] = decode_sym(gb, lru[2]) ^ 0x80;
  606. }
  607. Y += ystride << 1;
  608. U += ustride;
  609. V += vstride;
  610. }
  611. if (vmargin) {
  612. for (x = 0; x < width; x += 2) {
  613. Y[x + 0] = decode_sym(gb, lru[0]);
  614. U[x >> 1] = decode_sym(gb, lru[1]) ^ 0x80;
  615. V[x >> 1] = decode_sym(gb, lru[2]) ^ 0x80;
  616. }
  617. if (hmargin) {
  618. Y[x] = decode_sym(gb, lru[0]);
  619. U[huvborder] = decode_sym(gb, lru[1]) ^ 0x80;
  620. V[huvborder] = decode_sym(gb, lru[2]) ^ 0x80;
  621. }
  622. }
  623. return y;
  624. }
  625. static int dxtory_decode_v2_420(AVCodecContext *avctx, AVFrame *pic,
  626. const uint8_t *src, int src_size,
  627. uint32_t vflipped)
  628. {
  629. return dxtory_decode_v2(avctx, pic, src, src_size,
  630. dx2_decode_slice_420,
  631. default_setup_lru,
  632. AV_PIX_FMT_YUV420P, vflipped);
  633. }
  634. static int dx2_decode_slice_444(GetBitContext *gb, AVFrame *frame,
  635. int line, int left,
  636. uint8_t lru[3][8])
  637. {
  638. int x, y;
  639. int width = frame->width;
  640. int ystride = frame->linesize[0];
  641. int ustride = frame->linesize[1];
  642. int vstride = frame->linesize[2];
  643. uint8_t *Y = frame->data[0] + ystride * line;
  644. uint8_t *U = frame->data[1] + ustride * line;
  645. uint8_t *V = frame->data[2] + vstride * line;
  646. for (y = 0; y < left && get_bits_left(gb) >= 3 * width; y++) {
  647. for (x = 0; x < width; x++) {
  648. Y[x] = decode_sym(gb, lru[0]);
  649. U[x] = decode_sym(gb, lru[1]) ^ 0x80;
  650. V[x] = decode_sym(gb, lru[2]) ^ 0x80;
  651. }
  652. Y += ystride;
  653. U += ustride;
  654. V += vstride;
  655. }
  656. return y;
  657. }
  658. static int dxtory_decode_v2_444(AVCodecContext *avctx, AVFrame *pic,
  659. const uint8_t *src, int src_size,
  660. uint32_t vflipped)
  661. {
  662. return dxtory_decode_v2(avctx, pic, src, src_size,
  663. dx2_decode_slice_444,
  664. default_setup_lru,
  665. AV_PIX_FMT_YUV444P, vflipped);
  666. }
  667. static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
  668. AVPacket *avpkt)
  669. {
  670. AVFrame *pic = data;
  671. const uint8_t *src = avpkt->data;
  672. uint32_t type;
  673. int vflipped, ret;
  674. if (avpkt->size < 16) {
  675. av_log(avctx, AV_LOG_ERROR, "packet too small\n");
  676. return AVERROR_INVALIDDATA;
  677. }
  678. type = AV_RB32(src);
  679. vflipped = !!(type & 0x20);
  680. switch (type) {
  681. case 0x01000021:
  682. case 0x01000001:
  683. ret = dxtory_decode_v1_rgb(avctx, pic, src + 16, avpkt->size - 16,
  684. AV_PIX_FMT_BGR24, 3, vflipped);
  685. break;
  686. case 0x01000029:
  687. case 0x01000009:
  688. ret = dxtory_decode_v2_rgb(avctx, pic, src + 16, avpkt->size - 16, vflipped);
  689. break;
  690. case 0x02000021:
  691. case 0x02000001:
  692. ret = dxtory_decode_v1_420(avctx, pic, src + 16, avpkt->size - 16, vflipped);
  693. break;
  694. case 0x02000029:
  695. case 0x02000009:
  696. ret = dxtory_decode_v2_420(avctx, pic, src + 16, avpkt->size - 16, vflipped);
  697. break;
  698. case 0x03000021:
  699. case 0x03000001:
  700. ret = dxtory_decode_v1_410(avctx, pic, src + 16, avpkt->size - 16, vflipped);
  701. break;
  702. case 0x03000029:
  703. case 0x03000009:
  704. ret = dxtory_decode_v2_410(avctx, pic, src + 16, avpkt->size - 16, vflipped);
  705. break;
  706. case 0x04000021:
  707. case 0x04000001:
  708. ret = dxtory_decode_v1_444(avctx, pic, src + 16, avpkt->size - 16, vflipped);
  709. break;
  710. case 0x04000029:
  711. case 0x04000009:
  712. ret = dxtory_decode_v2_444(avctx, pic, src + 16, avpkt->size - 16, vflipped);
  713. break;
  714. case 0x17000021:
  715. case 0x17000001:
  716. ret = dxtory_decode_v1_rgb(avctx, pic, src + 16, avpkt->size - 16,
  717. AV_PIX_FMT_RGB565LE, 2, vflipped);
  718. break;
  719. case 0x17000029:
  720. case 0x17000009:
  721. ret = dxtory_decode_v2_565(avctx, pic, src + 16, avpkt->size - 16, 1, vflipped);
  722. break;
  723. case 0x18000021:
  724. case 0x19000021:
  725. case 0x18000001:
  726. case 0x19000001:
  727. ret = dxtory_decode_v1_rgb(avctx, pic, src + 16, avpkt->size - 16,
  728. AV_PIX_FMT_RGB555LE, 2, vflipped);
  729. break;
  730. case 0x18000029:
  731. case 0x19000029:
  732. case 0x18000009:
  733. case 0x19000009:
  734. ret = dxtory_decode_v2_565(avctx, pic, src + 16, avpkt->size - 16, 0, vflipped);
  735. break;
  736. default:
  737. avpriv_request_sample(avctx, "Frame header %"PRIX32, type);
  738. return AVERROR_PATCHWELCOME;
  739. }
  740. if (ret)
  741. return ret;
  742. pic->pict_type = AV_PICTURE_TYPE_I;
  743. pic->key_frame = 1;
  744. *got_frame = 1;
  745. return avpkt->size;
  746. }
  747. AVCodec ff_dxtory_decoder = {
  748. .name = "dxtory",
  749. .long_name = NULL_IF_CONFIG_SMALL("Dxtory"),
  750. .type = AVMEDIA_TYPE_VIDEO,
  751. .id = AV_CODEC_ID_DXTORY,
  752. .decode = decode_frame,
  753. .capabilities = AV_CODEC_CAP_DR1,
  754. };