You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

887 lines
27KB

  1. /*
  2. * Dxtory decoder
  3. *
  4. * Copyright (c) 2011 Konstantin Shishkov
  5. *
  6. * This file is part of FFmpeg.
  7. *
  8. * FFmpeg is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU Lesser General Public
  10. * License as published by the Free Software Foundation; either
  11. * version 2.1 of the License, or (at your option) any later version.
  12. *
  13. * FFmpeg is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * Lesser General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU Lesser General Public
  19. * License along with FFmpeg; if not, write to the Free Software
  20. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  21. */
  22. #include <inttypes.h>
  23. #include "libavutil/common.h"
  24. #include "libavutil/intreadwrite.h"
  25. #define BITSTREAM_READER_LE
  26. #include "avcodec.h"
  27. #include "bytestream.h"
  28. #include "get_bits.h"
  29. #include "internal.h"
  30. #include "unary.h"
  31. #include "thread.h"
  32. static int64_t get_raw_size(enum AVPixelFormat fmt, int width, int height)
  33. {
  34. switch (fmt) {
  35. case AV_PIX_FMT_RGB555LE:
  36. case AV_PIX_FMT_RGB565LE:
  37. return width * height * 2LL;
  38. case AV_PIX_FMT_RGB24:
  39. case AV_PIX_FMT_BGR24:
  40. case AV_PIX_FMT_YUV444P:
  41. return width * height * 3LL;
  42. case AV_PIX_FMT_YUV420P:
  43. return (int64_t)(width * height) + 2 * AV_CEIL_RSHIFT(width, 1) * AV_CEIL_RSHIFT(height, 1);
  44. case AV_PIX_FMT_YUV410P:
  45. return (int64_t)(width * height) + 2 * AV_CEIL_RSHIFT(width, 2) * AV_CEIL_RSHIFT(height, 2);
  46. }
  47. return 0;
  48. }
  49. static void do_vflip(AVCodecContext *avctx, AVFrame *pic, int vflip)
  50. {
  51. if (!vflip)
  52. return;
  53. switch (pic->format) {
  54. case AV_PIX_FMT_YUV444P:
  55. pic->data[1] += (avctx->height - 1) * pic->linesize[1];
  56. pic->linesize[1] = -pic->linesize[1];
  57. pic->data[2] += (avctx->height - 1) * pic->linesize[2];
  58. pic->linesize[2] = -pic->linesize[2];
  59. case AV_PIX_FMT_RGB555LE:
  60. case AV_PIX_FMT_RGB565LE:
  61. case AV_PIX_FMT_BGR24:
  62. case AV_PIX_FMT_RGB24:
  63. pic->data[0] += (avctx->height - 1) * pic->linesize[0];
  64. pic->linesize[0] = -pic->linesize[0];
  65. break;
  66. case AV_PIX_FMT_YUV410P:
  67. pic->data[0] += (avctx->height - 1) * pic->linesize[0];
  68. pic->linesize[0] = -pic->linesize[0];
  69. pic->data[1] += (AV_CEIL_RSHIFT(avctx->height, 2) - 1) * pic->linesize[1];
  70. pic->linesize[1] = -pic->linesize[1];
  71. pic->data[2] += (AV_CEIL_RSHIFT(avctx->height, 2) - 1) * pic->linesize[2];
  72. pic->linesize[2] = -pic->linesize[2];
  73. break;
  74. case AV_PIX_FMT_YUV420P:
  75. pic->data[0] += (avctx->height - 1) * pic->linesize[0];
  76. pic->linesize[0] = -pic->linesize[0];
  77. pic->data[1] += (AV_CEIL_RSHIFT(avctx->height, 1) - 1) * pic->linesize[1];
  78. pic->linesize[1] = -pic->linesize[1];
  79. pic->data[2] += (AV_CEIL_RSHIFT(avctx->height, 1) - 1) * pic->linesize[2];
  80. pic->linesize[2] = -pic->linesize[2];
  81. break;
  82. }
  83. }
  84. static int dxtory_decode_v1_rgb(AVCodecContext *avctx, AVFrame *pic,
  85. const uint8_t *src, int src_size,
  86. int id, int bpp, uint32_t vflipped)
  87. {
  88. ThreadFrame frame = { .f = pic };
  89. int h;
  90. uint8_t *dst;
  91. int ret;
  92. if (src_size < get_raw_size(id, avctx->width, avctx->height)) {
  93. av_log(avctx, AV_LOG_ERROR, "packet too small\n");
  94. return AVERROR_INVALIDDATA;
  95. }
  96. avctx->pix_fmt = id;
  97. if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
  98. return ret;
  99. do_vflip(avctx, pic, vflipped);
  100. dst = pic->data[0];
  101. for (h = 0; h < avctx->height; h++) {
  102. memcpy(dst, src, avctx->width * bpp);
  103. src += avctx->width * bpp;
  104. dst += pic->linesize[0];
  105. }
  106. do_vflip(avctx, pic, vflipped);
  107. return 0;
  108. }
  109. static int dxtory_decode_v1_410(AVCodecContext *avctx, AVFrame *pic,
  110. const uint8_t *src, int src_size,
  111. uint32_t vflipped)
  112. {
  113. ThreadFrame frame = { .f = pic };
  114. int h, w;
  115. uint8_t *Y1, *Y2, *Y3, *Y4, *U, *V;
  116. int height, width, hmargin, vmargin;
  117. int huvborder;
  118. int ret;
  119. if (src_size < get_raw_size(AV_PIX_FMT_YUV410P, avctx->width, avctx->height)) {
  120. av_log(avctx, AV_LOG_ERROR, "packet too small\n");
  121. return AVERROR_INVALIDDATA;
  122. }
  123. avctx->pix_fmt = AV_PIX_FMT_YUV410P;
  124. if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
  125. return ret;
  126. do_vflip(avctx, pic, vflipped);
  127. height = avctx->height & ~3;
  128. width = avctx->width & ~3;
  129. hmargin = avctx->width - width;
  130. vmargin = avctx->height - height;
  131. huvborder = AV_CEIL_RSHIFT(avctx->width, 2) - 1;
  132. Y1 = pic->data[0];
  133. Y2 = pic->data[0] + pic->linesize[0];
  134. Y3 = pic->data[0] + pic->linesize[0] * 2;
  135. Y4 = pic->data[0] + pic->linesize[0] * 3;
  136. U = pic->data[1];
  137. V = pic->data[2];
  138. for (h = 0; h < height; h += 4) {
  139. for (w = 0; w < width; w += 4) {
  140. AV_COPY32U(Y1 + w, src);
  141. AV_COPY32U(Y2 + w, src + 4);
  142. AV_COPY32U(Y3 + w, src + 8);
  143. AV_COPY32U(Y4 + w, src + 12);
  144. U[w >> 2] = src[16] + 0x80;
  145. V[w >> 2] = src[17] + 0x80;
  146. src += 18;
  147. }
  148. if (hmargin) {
  149. for (w = 0; w < hmargin; w++) {
  150. Y1[width + w] = src[w];
  151. Y2[width + w] = src[w + hmargin * 1];
  152. Y3[width + w] = src[w + hmargin * 2];
  153. Y4[width + w] = src[w + hmargin * 3];
  154. }
  155. src += 4 * hmargin;
  156. U[huvborder] = src[0] + 0x80;
  157. V[huvborder] = src[1] + 0x80;
  158. src += 2;
  159. }
  160. Y1 += pic->linesize[0] * 4;
  161. Y2 += pic->linesize[0] * 4;
  162. Y3 += pic->linesize[0] * 4;
  163. Y4 += pic->linesize[0] * 4;
  164. U += pic->linesize[1];
  165. V += pic->linesize[2];
  166. }
  167. if (vmargin) {
  168. for (w = 0; w < width; w += 4) {
  169. AV_COPY32U(Y1 + w, src);
  170. if (vmargin > 1)
  171. AV_COPY32U(Y2 + w, src + 4);
  172. if (vmargin > 2)
  173. AV_COPY32U(Y3 + w, src + 8);
  174. src += 4 * vmargin;
  175. U[w >> 2] = src[0] + 0x80;
  176. V[w >> 2] = src[1] + 0x80;
  177. src += 2;
  178. }
  179. if (hmargin) {
  180. for (w = 0; w < hmargin; w++) {
  181. AV_COPY32U(Y1 + w, src);
  182. if (vmargin > 1)
  183. AV_COPY32U(Y2 + w, src + 4);
  184. if (vmargin > 2)
  185. AV_COPY32U(Y3 + w, src + 8);
  186. src += 4 * vmargin;
  187. }
  188. U[huvborder] = src[0] + 0x80;
  189. V[huvborder] = src[1] + 0x80;
  190. src += 2;
  191. }
  192. }
  193. do_vflip(avctx, pic, vflipped);
  194. return 0;
  195. }
  196. static int dxtory_decode_v1_420(AVCodecContext *avctx, AVFrame *pic,
  197. const uint8_t *src, int src_size,
  198. uint32_t vflipped)
  199. {
  200. ThreadFrame frame = { .f = pic };
  201. int h, w;
  202. uint8_t *Y1, *Y2, *U, *V;
  203. int height, width, hmargin, vmargin;
  204. int huvborder;
  205. int ret;
  206. if (src_size < get_raw_size(AV_PIX_FMT_YUV420P, avctx->width, avctx->height)) {
  207. av_log(avctx, AV_LOG_ERROR, "packet too small\n");
  208. return AVERROR_INVALIDDATA;
  209. }
  210. avctx->pix_fmt = AV_PIX_FMT_YUV420P;
  211. if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
  212. return ret;
  213. do_vflip(avctx, pic, vflipped);
  214. height = avctx->height & ~1;
  215. width = avctx->width & ~1;
  216. hmargin = avctx->width - width;
  217. vmargin = avctx->height - height;
  218. huvborder = AV_CEIL_RSHIFT(avctx->width, 1) - 1;
  219. Y1 = pic->data[0];
  220. Y2 = pic->data[0] + pic->linesize[0];
  221. U = pic->data[1];
  222. V = pic->data[2];
  223. for (h = 0; h < height; h += 2) {
  224. for (w = 0; w < width; w += 2) {
  225. AV_COPY16(Y1 + w, src);
  226. AV_COPY16(Y2 + w, src + 2);
  227. U[w >> 1] = src[4] + 0x80;
  228. V[w >> 1] = src[5] + 0x80;
  229. src += 6;
  230. }
  231. if (hmargin) {
  232. Y1[width + 1] = src[0];
  233. Y2[width + 1] = src[1];
  234. U[huvborder] = src[2] + 0x80;
  235. V[huvborder] = src[3] + 0x80;
  236. src += 4;
  237. }
  238. Y1 += pic->linesize[0] << 1;
  239. Y2 += pic->linesize[0] << 1;
  240. U += pic->linesize[1];
  241. V += pic->linesize[2];
  242. }
  243. if (vmargin) {
  244. for (w = 0; w < width; w += 2) {
  245. AV_COPY16U(Y1 + w, src);
  246. U[w >> 1] = src[0] + 0x80;
  247. V[w >> 1] = src[1] + 0x80;
  248. src += 4;
  249. }
  250. if (hmargin) {
  251. Y1[w] = src[0];
  252. U[huvborder] = src[1] + 0x80;
  253. V[huvborder] = src[2] + 0x80;
  254. src += 3;
  255. }
  256. }
  257. do_vflip(avctx, pic, vflipped);
  258. return 0;
  259. }
  260. static int dxtory_decode_v1_444(AVCodecContext *avctx, AVFrame *pic,
  261. const uint8_t *src, int src_size,
  262. uint32_t vflipped)
  263. {
  264. ThreadFrame frame = { .f = pic };
  265. int h, w;
  266. uint8_t *Y, *U, *V;
  267. int ret;
  268. if (src_size < get_raw_size(AV_PIX_FMT_YUV444P, avctx->width, avctx->height)) {
  269. av_log(avctx, AV_LOG_ERROR, "packet too small\n");
  270. return AVERROR_INVALIDDATA;
  271. }
  272. avctx->pix_fmt = AV_PIX_FMT_YUV444P;
  273. if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
  274. return ret;
  275. do_vflip(avctx, pic, vflipped);
  276. Y = pic->data[0];
  277. U = pic->data[1];
  278. V = pic->data[2];
  279. for (h = 0; h < avctx->height; h++) {
  280. for (w = 0; w < avctx->width; w++) {
  281. Y[w] = *src++;
  282. U[w] = *src++ ^ 0x80;
  283. V[w] = *src++ ^ 0x80;
  284. }
  285. Y += pic->linesize[0];
  286. U += pic->linesize[1];
  287. V += pic->linesize[2];
  288. }
  289. do_vflip(avctx, pic, vflipped);
  290. return 0;
  291. }
  292. static const uint8_t def_lru[8] = { 0x00, 0x20, 0x40, 0x60, 0x80, 0xA0, 0xC0, 0xFF };
  293. static const uint8_t def_lru_555[8] = { 0x00, 0x08, 0x10, 0x18, 0x1F };
  294. static const uint8_t def_lru_565[8] = { 0x00, 0x08, 0x10, 0x20, 0x30, 0x3F };
  295. static inline uint8_t decode_sym(GetBitContext *gb, uint8_t lru[8])
  296. {
  297. uint8_t c, val;
  298. c = get_unary(gb, 0, 8);
  299. if (!c) {
  300. val = get_bits(gb, 8);
  301. memmove(lru + 1, lru, sizeof(*lru) * (8 - 1));
  302. } else {
  303. val = lru[c - 1];
  304. memmove(lru + 1, lru, sizeof(*lru) * (c - 1));
  305. }
  306. lru[0] = val;
  307. return val;
  308. }
  309. static int check_slice_size(AVCodecContext *avctx,
  310. const uint8_t *src, int src_size,
  311. int slice_size, int off)
  312. {
  313. int cur_slice_size;
  314. if (slice_size > src_size - off) {
  315. av_log(avctx, AV_LOG_ERROR,
  316. "invalid slice size %d (only %d bytes left)\n",
  317. slice_size, src_size - off);
  318. return AVERROR_INVALIDDATA;
  319. }
  320. if (slice_size <= 16) {
  321. av_log(avctx, AV_LOG_ERROR, "invalid slice size %d\n",
  322. slice_size);
  323. return AVERROR_INVALIDDATA;
  324. }
  325. cur_slice_size = AV_RL32(src + off);
  326. if (cur_slice_size != slice_size - 16) {
  327. av_log(avctx, AV_LOG_ERROR,
  328. "Slice sizes mismatch: got %d instead of %d\n",
  329. cur_slice_size, slice_size - 16);
  330. }
  331. return 0;
  332. }
  333. static int load_buffer(AVCodecContext *avctx,
  334. const uint8_t *src, int src_size,
  335. GetByteContext *gb,
  336. int *nslices, int *off)
  337. {
  338. bytestream2_init(gb, src, src_size);
  339. *nslices = bytestream2_get_le16(gb);
  340. *off = FFALIGN(*nslices * 4 + 2, 16);
  341. if (src_size < *off) {
  342. av_log(avctx, AV_LOG_ERROR, "no slice data\n");
  343. return AVERROR_INVALIDDATA;
  344. }
  345. if (!*nslices) {
  346. avpriv_request_sample(avctx, "%d slices for %dx%d", *nslices,
  347. avctx->width, avctx->height);
  348. return AVERROR_PATCHWELCOME;
  349. }
  350. return 0;
  351. }
  352. static inline uint8_t decode_sym_565(GetBitContext *gb, uint8_t lru[8],
  353. int bits)
  354. {
  355. uint8_t c, val;
  356. c = get_unary(gb, 0, bits);
  357. if (!c) {
  358. val = get_bits(gb, bits);
  359. memmove(lru + 1, lru, sizeof(*lru) * (6 - 1));
  360. } else {
  361. val = lru[c - 1];
  362. memmove(lru + 1, lru, sizeof(*lru) * (c - 1));
  363. }
  364. lru[0] = val;
  365. return val;
  366. }
  367. typedef int (*decode_slice_func)(GetBitContext *gb, AVFrame *frame,
  368. int line, int height, uint8_t lru[3][8]);
  369. typedef void (*setup_lru_func)(uint8_t lru[3][8]);
  370. static int dxtory_decode_v2(AVCodecContext *avctx, AVFrame *pic,
  371. const uint8_t *src, int src_size,
  372. decode_slice_func decode_slice,
  373. setup_lru_func setup_lru,
  374. enum AVPixelFormat fmt,
  375. uint32_t vflipped)
  376. {
  377. ThreadFrame frame = { .f = pic };
  378. GetByteContext gb, gb_check;
  379. GetBitContext gb2;
  380. int nslices, slice, line = 0;
  381. uint32_t off, slice_size;
  382. uint64_t off_check;
  383. uint8_t lru[3][8];
  384. int ret;
  385. ret = load_buffer(avctx, src, src_size, &gb, &nslices, &off);
  386. if (ret < 0)
  387. return ret;
  388. off_check = off;
  389. gb_check = gb;
  390. for (slice = 0; slice < nslices; slice++) {
  391. slice_size = bytestream2_get_le32(&gb_check);
  392. if (slice_size <= 16 + (avctx->height * avctx->width / (8 * nslices)))
  393. return AVERROR_INVALIDDATA;
  394. off_check += slice_size;
  395. }
  396. if (off_check - avctx->discard_damaged_percentage*off_check/100 > src_size)
  397. return AVERROR_INVALIDDATA;
  398. avctx->pix_fmt = fmt;
  399. if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
  400. return ret;
  401. do_vflip(avctx, pic, vflipped);
  402. for (slice = 0; slice < nslices; slice++) {
  403. slice_size = bytestream2_get_le32(&gb);
  404. setup_lru(lru);
  405. ret = check_slice_size(avctx, src, src_size, slice_size, off);
  406. if (ret < 0)
  407. return ret;
  408. if ((ret = init_get_bits8(&gb2, src + off + 16, slice_size - 16)) < 0)
  409. return ret;
  410. line += decode_slice(&gb2, pic, line, avctx->height - line, lru);
  411. off += slice_size;
  412. }
  413. if (avctx->height - line) {
  414. avpriv_request_sample(avctx, "Not enough slice data available");
  415. }
  416. do_vflip(avctx, pic, vflipped);
  417. return 0;
  418. }
  419. av_always_inline
  420. static int dx2_decode_slice_5x5(GetBitContext *gb, AVFrame *frame,
  421. int line, int left, uint8_t lru[3][8],
  422. int is_565)
  423. {
  424. int x, y;
  425. int r, g, b;
  426. int width = frame->width;
  427. int stride = frame->linesize[0];
  428. uint8_t *dst = frame->data[0] + stride * line;
  429. for (y = 0; y < left && get_bits_left(gb) >= 3 * width; y++) {
  430. for (x = 0; x < width; x++) {
  431. b = decode_sym_565(gb, lru[0], 5);
  432. g = decode_sym_565(gb, lru[1], is_565 ? 6 : 5);
  433. r = decode_sym_565(gb, lru[2], 5);
  434. dst[x * 3 + 0] = (r << 3) | (r >> 2);
  435. dst[x * 3 + 1] = is_565 ? (g << 2) | (g >> 4) : (g << 3) | (g >> 2);
  436. dst[x * 3 + 2] = (b << 3) | (b >> 2);
  437. }
  438. dst += stride;
  439. }
  440. return y;
  441. }
  442. static void setup_lru_555(uint8_t lru[3][8])
  443. {
  444. memcpy(lru[0], def_lru_555, 8 * sizeof(*def_lru));
  445. memcpy(lru[1], def_lru_555, 8 * sizeof(*def_lru));
  446. memcpy(lru[2], def_lru_555, 8 * sizeof(*def_lru));
  447. }
  448. static void setup_lru_565(uint8_t lru[3][8])
  449. {
  450. memcpy(lru[0], def_lru_555, 8 * sizeof(*def_lru));
  451. memcpy(lru[1], def_lru_565, 8 * sizeof(*def_lru));
  452. memcpy(lru[2], def_lru_555, 8 * sizeof(*def_lru));
  453. }
  454. static int dx2_decode_slice_555(GetBitContext *gb, AVFrame *frame,
  455. int line, int left, uint8_t lru[3][8])
  456. {
  457. return dx2_decode_slice_5x5(gb, frame, line, left, lru, 0);
  458. }
  459. static int dx2_decode_slice_565(GetBitContext *gb, AVFrame *frame,
  460. int line, int left, uint8_t lru[3][8])
  461. {
  462. return dx2_decode_slice_5x5(gb, frame, line, left, lru, 1);
  463. }
  464. static int dxtory_decode_v2_565(AVCodecContext *avctx, AVFrame *pic,
  465. const uint8_t *src, int src_size, int is_565,
  466. uint32_t vflipped)
  467. {
  468. enum AVPixelFormat fmt = AV_PIX_FMT_RGB24;
  469. if (is_565)
  470. return dxtory_decode_v2(avctx, pic, src, src_size,
  471. dx2_decode_slice_565,
  472. setup_lru_565,
  473. fmt, vflipped);
  474. else
  475. return dxtory_decode_v2(avctx, pic, src, src_size,
  476. dx2_decode_slice_555,
  477. setup_lru_555,
  478. fmt, vflipped);
  479. }
  480. static int dx2_decode_slice_rgb(GetBitContext *gb, AVFrame *frame,
  481. int line, int left, uint8_t lru[3][8])
  482. {
  483. int x, y;
  484. int width = frame->width;
  485. int stride = frame->linesize[0];
  486. uint8_t *dst = frame->data[0] + stride * line;
  487. for (y = 0; y < left && get_bits_left(gb) >= 3 * width; y++) {
  488. for (x = 0; x < width; x++) {
  489. dst[x * 3 + 0] = decode_sym(gb, lru[0]);
  490. dst[x * 3 + 1] = decode_sym(gb, lru[1]);
  491. dst[x * 3 + 2] = decode_sym(gb, lru[2]);
  492. }
  493. dst += stride;
  494. }
  495. return y;
  496. }
  497. static void default_setup_lru(uint8_t lru[3][8])
  498. {
  499. int i;
  500. for (i = 0; i < 3; i++)
  501. memcpy(lru[i], def_lru, 8 * sizeof(*def_lru));
  502. }
  503. static int dxtory_decode_v2_rgb(AVCodecContext *avctx, AVFrame *pic,
  504. const uint8_t *src, int src_size,
  505. uint32_t vflipped)
  506. {
  507. return dxtory_decode_v2(avctx, pic, src, src_size,
  508. dx2_decode_slice_rgb,
  509. default_setup_lru,
  510. AV_PIX_FMT_BGR24, vflipped);
  511. }
  512. static int dx2_decode_slice_410(GetBitContext *gb, AVFrame *frame,
  513. int line, int left,
  514. uint8_t lru[3][8])
  515. {
  516. int x, y, i, j;
  517. int width = frame->width;
  518. int ystride = frame->linesize[0];
  519. int ustride = frame->linesize[1];
  520. int vstride = frame->linesize[2];
  521. uint8_t *Y = frame->data[0] + ystride * line;
  522. uint8_t *U = frame->data[1] + (ustride >> 2) * line;
  523. uint8_t *V = frame->data[2] + (vstride >> 2) * line;
  524. int h, w, hmargin, vmargin;
  525. int huvborder;
  526. h = frame->height & ~3;
  527. w = frame->width & ~3;
  528. hmargin = frame->width - w;
  529. vmargin = frame->height - h;
  530. huvborder = AV_CEIL_RSHIFT(frame->width, 2) - 1;
  531. for (y = 0; y < left - 3 && get_bits_left(gb) >= 18 * w / 4 + hmargin * 4 + (!!hmargin * 2); y += 4) {
  532. for (x = 0; x < w; x += 4) {
  533. for (j = 0; j < 4; j++)
  534. for (i = 0; i < 4; i++)
  535. Y[x + i + j * ystride] = decode_sym(gb, lru[0]);
  536. U[x >> 2] = decode_sym(gb, lru[1]) ^ 0x80;
  537. V[x >> 2] = decode_sym(gb, lru[2]) ^ 0x80;
  538. }
  539. if (hmargin) {
  540. for (j = 0; j < 4; j++)
  541. for (i = 0; i < hmargin; i++)
  542. Y[x + i + j * ystride] = decode_sym(gb, lru[0]);
  543. U[huvborder] = decode_sym(gb, lru[1]) ^ 0x80;
  544. V[huvborder] = decode_sym(gb, lru[2]) ^ 0x80;
  545. }
  546. Y += ystride * 4;
  547. U += ustride;
  548. V += vstride;
  549. }
  550. if (vmargin && y + vmargin == left) {
  551. for (x = 0; x < width; x += 4) {
  552. for (j = 0; j < vmargin; j++)
  553. for (i = 0; i < 4; i++)
  554. Y[x + i + j * ystride] = decode_sym(gb, lru[0]);
  555. U[x >> 2] = decode_sym(gb, lru[1]) ^ 0x80;
  556. V[x >> 2] = decode_sym(gb, lru[2]) ^ 0x80;
  557. }
  558. if (hmargin) {
  559. for (j = 0; j < vmargin; j++) {
  560. for (i = 0; i < hmargin; i++)
  561. Y[x + i + j * ystride] = decode_sym(gb, lru[0]);
  562. }
  563. U[huvborder] = decode_sym(gb, lru[1]) ^ 0x80;
  564. V[huvborder] = decode_sym(gb, lru[2]) ^ 0x80;
  565. }
  566. y += vmargin;
  567. }
  568. return y;
  569. }
  570. static int dxtory_decode_v2_410(AVCodecContext *avctx, AVFrame *pic,
  571. const uint8_t *src, int src_size,
  572. uint32_t vflipped)
  573. {
  574. return dxtory_decode_v2(avctx, pic, src, src_size,
  575. dx2_decode_slice_410,
  576. default_setup_lru,
  577. AV_PIX_FMT_YUV410P, vflipped);
  578. }
  579. static int dx2_decode_slice_420(GetBitContext *gb, AVFrame *frame,
  580. int line, int left,
  581. uint8_t lru[3][8])
  582. {
  583. int x, y;
  584. int width = frame->width;
  585. int ystride = frame->linesize[0];
  586. int ustride = frame->linesize[1];
  587. int vstride = frame->linesize[2];
  588. uint8_t *Y = frame->data[0] + ystride * line;
  589. uint8_t *U = frame->data[1] + (ustride >> 1) * line;
  590. uint8_t *V = frame->data[2] + (vstride >> 1) * line;
  591. int h, w, hmargin, vmargin;
  592. int huvborder;
  593. h = frame->height & ~1;
  594. w = frame->width & ~1;
  595. hmargin = frame->width - w;
  596. vmargin = frame->height - h;
  597. huvborder = AV_CEIL_RSHIFT(frame->width, 1) - 1;
  598. for (y = 0; y < left - 1 && get_bits_left(gb) >= 3 * w + hmargin * 4; y += 2) {
  599. for (x = 0; x < w; x += 2) {
  600. Y[x + 0 + 0 * ystride] = decode_sym(gb, lru[0]);
  601. Y[x + 1 + 0 * ystride] = decode_sym(gb, lru[0]);
  602. Y[x + 0 + 1 * ystride] = decode_sym(gb, lru[0]);
  603. Y[x + 1 + 1 * ystride] = decode_sym(gb, lru[0]);
  604. U[x >> 1] = decode_sym(gb, lru[1]) ^ 0x80;
  605. V[x >> 1] = decode_sym(gb, lru[2]) ^ 0x80;
  606. }
  607. if (hmargin) {
  608. Y[x + 0 * ystride] = decode_sym(gb, lru[0]);
  609. Y[x + 1 * ystride] = decode_sym(gb, lru[0]);
  610. U[huvborder] = decode_sym(gb, lru[1]) ^ 0x80;
  611. V[huvborder] = decode_sym(gb, lru[2]) ^ 0x80;
  612. }
  613. Y += ystride << 1;
  614. U += ustride;
  615. V += vstride;
  616. }
  617. if (vmargin) {
  618. for (x = 0; x < width; x += 2) {
  619. Y[x + 0] = decode_sym(gb, lru[0]);
  620. U[x >> 1] = decode_sym(gb, lru[1]) ^ 0x80;
  621. V[x >> 1] = decode_sym(gb, lru[2]) ^ 0x80;
  622. }
  623. if (hmargin) {
  624. Y[x] = decode_sym(gb, lru[0]);
  625. U[huvborder] = decode_sym(gb, lru[1]) ^ 0x80;
  626. V[huvborder] = decode_sym(gb, lru[2]) ^ 0x80;
  627. }
  628. }
  629. return y;
  630. }
  631. static int dxtory_decode_v2_420(AVCodecContext *avctx, AVFrame *pic,
  632. const uint8_t *src, int src_size,
  633. uint32_t vflipped)
  634. {
  635. return dxtory_decode_v2(avctx, pic, src, src_size,
  636. dx2_decode_slice_420,
  637. default_setup_lru,
  638. AV_PIX_FMT_YUV420P, vflipped);
  639. }
  640. static int dx2_decode_slice_444(GetBitContext *gb, AVFrame *frame,
  641. int line, int left,
  642. uint8_t lru[3][8])
  643. {
  644. int x, y;
  645. int width = frame->width;
  646. int ystride = frame->linesize[0];
  647. int ustride = frame->linesize[1];
  648. int vstride = frame->linesize[2];
  649. uint8_t *Y = frame->data[0] + ystride * line;
  650. uint8_t *U = frame->data[1] + ustride * line;
  651. uint8_t *V = frame->data[2] + vstride * line;
  652. for (y = 0; y < left && get_bits_left(gb) >= 3 * width; y++) {
  653. for (x = 0; x < width; x++) {
  654. Y[x] = decode_sym(gb, lru[0]);
  655. U[x] = decode_sym(gb, lru[1]) ^ 0x80;
  656. V[x] = decode_sym(gb, lru[2]) ^ 0x80;
  657. }
  658. Y += ystride;
  659. U += ustride;
  660. V += vstride;
  661. }
  662. return y;
  663. }
  664. static int dxtory_decode_v2_444(AVCodecContext *avctx, AVFrame *pic,
  665. const uint8_t *src, int src_size,
  666. uint32_t vflipped)
  667. {
  668. return dxtory_decode_v2(avctx, pic, src, src_size,
  669. dx2_decode_slice_444,
  670. default_setup_lru,
  671. AV_PIX_FMT_YUV444P, vflipped);
  672. }
  673. static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
  674. AVPacket *avpkt)
  675. {
  676. AVFrame *pic = data;
  677. const uint8_t *src = avpkt->data;
  678. uint32_t type;
  679. int vflipped, ret;
  680. if (avpkt->size < 16) {
  681. av_log(avctx, AV_LOG_ERROR, "packet too small\n");
  682. return AVERROR_INVALIDDATA;
  683. }
  684. type = AV_RB32(src);
  685. vflipped = !!(type & 0x20);
  686. switch (type) {
  687. case 0x01000021:
  688. case 0x01000001:
  689. ret = dxtory_decode_v1_rgb(avctx, pic, src + 16, avpkt->size - 16,
  690. AV_PIX_FMT_BGR24, 3, vflipped);
  691. break;
  692. case 0x01000029:
  693. case 0x01000009:
  694. ret = dxtory_decode_v2_rgb(avctx, pic, src + 16, avpkt->size - 16, vflipped);
  695. break;
  696. case 0x02000021:
  697. case 0x02000001:
  698. ret = dxtory_decode_v1_420(avctx, pic, src + 16, avpkt->size - 16, vflipped);
  699. break;
  700. case 0x02000029:
  701. case 0x02000009:
  702. ret = dxtory_decode_v2_420(avctx, pic, src + 16, avpkt->size - 16, vflipped);
  703. break;
  704. case 0x03000021:
  705. case 0x03000001:
  706. ret = dxtory_decode_v1_410(avctx, pic, src + 16, avpkt->size - 16, vflipped);
  707. break;
  708. case 0x03000029:
  709. case 0x03000009:
  710. ret = dxtory_decode_v2_410(avctx, pic, src + 16, avpkt->size - 16, vflipped);
  711. break;
  712. case 0x04000021:
  713. case 0x04000001:
  714. ret = dxtory_decode_v1_444(avctx, pic, src + 16, avpkt->size - 16, vflipped);
  715. break;
  716. case 0x04000029:
  717. case 0x04000009:
  718. ret = dxtory_decode_v2_444(avctx, pic, src + 16, avpkt->size - 16, vflipped);
  719. break;
  720. case 0x17000021:
  721. case 0x17000001:
  722. ret = dxtory_decode_v1_rgb(avctx, pic, src + 16, avpkt->size - 16,
  723. AV_PIX_FMT_RGB565LE, 2, vflipped);
  724. break;
  725. case 0x17000029:
  726. case 0x17000009:
  727. ret = dxtory_decode_v2_565(avctx, pic, src + 16, avpkt->size - 16, 1, vflipped);
  728. break;
  729. case 0x18000021:
  730. case 0x19000021:
  731. case 0x18000001:
  732. case 0x19000001:
  733. ret = dxtory_decode_v1_rgb(avctx, pic, src + 16, avpkt->size - 16,
  734. AV_PIX_FMT_RGB555LE, 2, vflipped);
  735. break;
  736. case 0x18000029:
  737. case 0x19000029:
  738. case 0x18000009:
  739. case 0x19000009:
  740. ret = dxtory_decode_v2_565(avctx, pic, src + 16, avpkt->size - 16, 0, vflipped);
  741. break;
  742. default:
  743. avpriv_request_sample(avctx, "Frame header %"PRIX32, type);
  744. return AVERROR_PATCHWELCOME;
  745. }
  746. if (ret)
  747. return ret;
  748. pic->pict_type = AV_PICTURE_TYPE_I;
  749. pic->key_frame = 1;
  750. *got_frame = 1;
  751. return avpkt->size;
  752. }
  753. AVCodec ff_dxtory_decoder = {
  754. .name = "dxtory",
  755. .long_name = NULL_IF_CONFIG_SMALL("Dxtory"),
  756. .type = AVMEDIA_TYPE_VIDEO,
  757. .id = AV_CODEC_ID_DXTORY,
  758. .decode = decode_frame,
  759. .capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS,
  760. };