You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

879 lines
26KB

  1. /*
  2. * Dxtory decoder
  3. *
  4. * Copyright (c) 2011 Konstantin Shishkov
  5. *
  6. * This file is part of FFmpeg.
  7. *
  8. * FFmpeg is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU Lesser General Public
  10. * License as published by the Free Software Foundation; either
  11. * version 2.1 of the License, or (at your option) any later version.
  12. *
  13. * FFmpeg is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * Lesser General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU Lesser General Public
  19. * License along with FFmpeg; if not, write to the Free Software
  20. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  21. */
  22. #include <inttypes.h>
  23. #include "libavutil/common.h"
  24. #include "libavutil/intreadwrite.h"
  25. #define BITSTREAM_READER_LE
  26. #include "avcodec.h"
  27. #include "bytestream.h"
  28. #include "get_bits.h"
  29. #include "internal.h"
  30. #include "unary.h"
  31. static int64_t get_raw_size(enum AVPixelFormat fmt, int width, int height)
  32. {
  33. switch (fmt) {
  34. case AV_PIX_FMT_RGB555LE:
  35. case AV_PIX_FMT_RGB565LE:
  36. return width * height * 2LL;
  37. case AV_PIX_FMT_RGB24:
  38. case AV_PIX_FMT_BGR24:
  39. case AV_PIX_FMT_YUV444P:
  40. return width * height * 3LL;
  41. case AV_PIX_FMT_YUV420P:
  42. return (int64_t)(width * height) + AV_CEIL_RSHIFT(width, 1) * AV_CEIL_RSHIFT(height, 1);
  43. case AV_PIX_FMT_YUV410P:
  44. return (int64_t)(width * height) + AV_CEIL_RSHIFT(width, 2) * AV_CEIL_RSHIFT(height, 2);
  45. }
  46. return 0;
  47. }
  48. static void do_vflip(AVCodecContext *avctx, AVFrame *pic, int vflip)
  49. {
  50. if (!vflip)
  51. return;
  52. switch (pic->format) {
  53. case AV_PIX_FMT_YUV444P:
  54. pic->data[1] += (avctx->height - 1) * pic->linesize[1];
  55. pic->linesize[1] = -pic->linesize[1];
  56. pic->data[2] += (avctx->height - 1) * pic->linesize[2];
  57. pic->linesize[2] = -pic->linesize[2];
  58. case AV_PIX_FMT_BGR24:
  59. case AV_PIX_FMT_RGB24:
  60. pic->data[0] += (avctx->height - 1) * pic->linesize[0];
  61. pic->linesize[0] = -pic->linesize[0];
  62. break;
  63. case AV_PIX_FMT_YUV410P:
  64. pic->data[0] += (avctx->height - 1) * pic->linesize[0];
  65. pic->linesize[0] = -pic->linesize[0];
  66. pic->data[1] += (AV_CEIL_RSHIFT(avctx->height, 2) - 1) * pic->linesize[1];
  67. pic->linesize[1] = -pic->linesize[1];
  68. pic->data[2] += (AV_CEIL_RSHIFT(avctx->height, 2) - 1) * pic->linesize[2];
  69. pic->linesize[2] = -pic->linesize[2];
  70. break;
  71. case AV_PIX_FMT_YUV420P:
  72. pic->data[0] += (avctx->height - 1) * pic->linesize[0];
  73. pic->linesize[0] = -pic->linesize[0];
  74. pic->data[1] += (AV_CEIL_RSHIFT(avctx->height, 1) - 1) * pic->linesize[1];
  75. pic->linesize[1] = -pic->linesize[1];
  76. pic->data[2] += (AV_CEIL_RSHIFT(avctx->height, 1) - 1) * pic->linesize[2];
  77. pic->linesize[2] = -pic->linesize[2];
  78. break;
  79. }
  80. }
  81. static int dxtory_decode_v1_rgb(AVCodecContext *avctx, AVFrame *pic,
  82. const uint8_t *src, int src_size,
  83. int id, int bpp, uint32_t vflipped)
  84. {
  85. int h;
  86. uint8_t *dst;
  87. int ret;
  88. if (src_size < get_raw_size(id, avctx->width, avctx->height)) {
  89. av_log(avctx, AV_LOG_ERROR, "packet too small\n");
  90. return AVERROR_INVALIDDATA;
  91. }
  92. avctx->pix_fmt = id;
  93. if ((ret = ff_get_buffer(avctx, pic, 0)) < 0)
  94. return ret;
  95. do_vflip(avctx, pic, vflipped);
  96. dst = pic->data[0];
  97. for (h = 0; h < avctx->height; h++) {
  98. memcpy(dst, src, avctx->width * bpp);
  99. src += avctx->width * bpp;
  100. dst += pic->linesize[0];
  101. }
  102. do_vflip(avctx, pic, vflipped);
  103. return 0;
  104. }
  105. static int dxtory_decode_v1_410(AVCodecContext *avctx, AVFrame *pic,
  106. const uint8_t *src, int src_size,
  107. uint32_t vflipped)
  108. {
  109. int h, w;
  110. uint8_t *Y1, *Y2, *Y3, *Y4, *U, *V;
  111. int height, width, hmargin, vmargin;
  112. int huvborder;
  113. int ret;
  114. if (src_size < get_raw_size(AV_PIX_FMT_YUV410P, avctx->width, avctx->height)) {
  115. av_log(avctx, AV_LOG_ERROR, "packet too small\n");
  116. return AVERROR_INVALIDDATA;
  117. }
  118. avctx->pix_fmt = AV_PIX_FMT_YUV410P;
  119. if ((ret = ff_get_buffer(avctx, pic, 0)) < 0)
  120. return ret;
  121. do_vflip(avctx, pic, vflipped);
  122. height = avctx->height & ~3;
  123. width = avctx->width & ~3;
  124. hmargin = avctx->width - width;
  125. vmargin = avctx->height - height;
  126. huvborder = AV_CEIL_RSHIFT(avctx->width, 2) - 1;
  127. Y1 = pic->data[0];
  128. Y2 = pic->data[0] + pic->linesize[0];
  129. Y3 = pic->data[0] + pic->linesize[0] * 2;
  130. Y4 = pic->data[0] + pic->linesize[0] * 3;
  131. U = pic->data[1];
  132. V = pic->data[2];
  133. for (h = 0; h < height; h += 4) {
  134. for (w = 0; w < width; w += 4) {
  135. AV_COPY32U(Y1 + w, src);
  136. AV_COPY32U(Y2 + w, src + 4);
  137. AV_COPY32U(Y3 + w, src + 8);
  138. AV_COPY32U(Y4 + w, src + 12);
  139. U[w >> 2] = src[16] + 0x80;
  140. V[w >> 2] = src[17] + 0x80;
  141. src += 18;
  142. }
  143. if (hmargin) {
  144. for (w = 0; w < hmargin; w++) {
  145. Y1[width + w] = src[w];
  146. Y2[width + w] = src[w + hmargin * 1];
  147. Y3[width + w] = src[w + hmargin * 2];
  148. Y4[width + w] = src[w + hmargin * 3];
  149. }
  150. src += 4 * hmargin;
  151. U[huvborder] = src[0] + 0x80;
  152. V[huvborder] = src[1] + 0x80;
  153. src += 2;
  154. }
  155. Y1 += pic->linesize[0] << 2;
  156. Y2 += pic->linesize[0] << 2;
  157. Y3 += pic->linesize[0] << 2;
  158. Y4 += pic->linesize[0] << 2;
  159. U += pic->linesize[1];
  160. V += pic->linesize[2];
  161. }
  162. if (vmargin) {
  163. for (w = 0; w < width; w += 4) {
  164. AV_COPY32U(Y1 + w, src);
  165. if (vmargin > 1)
  166. AV_COPY32U(Y2 + w, src + 4);
  167. if (vmargin > 2)
  168. AV_COPY32U(Y3 + w, src + 8);
  169. src += 4 * vmargin;
  170. U[w >> 2] = src[0] + 0x80;
  171. V[w >> 2] = src[1] + 0x80;
  172. src += 2;
  173. }
  174. if (hmargin) {
  175. for (w = 0; w < hmargin; w++) {
  176. AV_COPY32U(Y1 + w, src);
  177. if (vmargin > 1)
  178. AV_COPY32U(Y2 + w, src + 4);
  179. if (vmargin > 2)
  180. AV_COPY32U(Y3 + w, src + 8);
  181. src += 4 * vmargin;
  182. }
  183. U[huvborder] = src[0] + 0x80;
  184. V[huvborder] = src[1] + 0x80;
  185. src += 2;
  186. }
  187. }
  188. do_vflip(avctx, pic, vflipped);
  189. return 0;
  190. }
  191. static int dxtory_decode_v1_420(AVCodecContext *avctx, AVFrame *pic,
  192. const uint8_t *src, int src_size,
  193. uint32_t vflipped)
  194. {
  195. int h, w;
  196. uint8_t *Y1, *Y2, *U, *V;
  197. int height, width, hmargin, vmargin;
  198. int huvborder;
  199. int ret;
  200. if (src_size < get_raw_size(AV_PIX_FMT_YUV420P, avctx->width, avctx->height)) {
  201. av_log(avctx, AV_LOG_ERROR, "packet too small\n");
  202. return AVERROR_INVALIDDATA;
  203. }
  204. avctx->pix_fmt = AV_PIX_FMT_YUV420P;
  205. if ((ret = ff_get_buffer(avctx, pic, 0)) < 0)
  206. return ret;
  207. do_vflip(avctx, pic, vflipped);
  208. height = avctx->height & ~1;
  209. width = avctx->width & ~1;
  210. hmargin = avctx->width - width;
  211. vmargin = avctx->height - height;
  212. huvborder = AV_CEIL_RSHIFT(avctx->width, 1) - 1;
  213. Y1 = pic->data[0];
  214. Y2 = pic->data[0] + pic->linesize[0];
  215. U = pic->data[1];
  216. V = pic->data[2];
  217. for (h = 0; h < height; h += 2) {
  218. for (w = 0; w < width; w += 2) {
  219. AV_COPY16(Y1 + w, src);
  220. AV_COPY16(Y2 + w, src + 2);
  221. U[w >> 1] = src[4] + 0x80;
  222. V[w >> 1] = src[5] + 0x80;
  223. src += 6;
  224. }
  225. if (hmargin) {
  226. Y1[width + 1] = src[0];
  227. Y2[width + 1] = src[1];
  228. U[huvborder] = src[2] + 0x80;
  229. V[huvborder] = src[3] + 0x80;
  230. src += 4;
  231. }
  232. Y1 += pic->linesize[0] << 1;
  233. Y2 += pic->linesize[0] << 1;
  234. U += pic->linesize[1];
  235. V += pic->linesize[2];
  236. }
  237. if (vmargin) {
  238. for (w = 0; w < width; w += 2) {
  239. AV_COPY16U(Y1 + w, src);
  240. U[w >> 1] = src[0] + 0x80;
  241. V[w >> 1] = src[1] + 0x80;
  242. src += 4;
  243. }
  244. if (hmargin) {
  245. Y1[w] = src[0];
  246. U[huvborder] = src[1] + 0x80;
  247. V[huvborder] = src[2] + 0x80;
  248. src += 3;
  249. }
  250. }
  251. do_vflip(avctx, pic, vflipped);
  252. return 0;
  253. }
  254. static int dxtory_decode_v1_444(AVCodecContext *avctx, AVFrame *pic,
  255. const uint8_t *src, int src_size,
  256. uint32_t vflipped)
  257. {
  258. int h, w;
  259. uint8_t *Y, *U, *V;
  260. int ret;
  261. if (src_size < get_raw_size(AV_PIX_FMT_YUV444P, avctx->width, avctx->height)) {
  262. av_log(avctx, AV_LOG_ERROR, "packet too small\n");
  263. return AVERROR_INVALIDDATA;
  264. }
  265. avctx->pix_fmt = AV_PIX_FMT_YUV444P;
  266. if ((ret = ff_get_buffer(avctx, pic, 0)) < 0)
  267. return ret;
  268. do_vflip(avctx, pic, vflipped);
  269. Y = pic->data[0];
  270. U = pic->data[1];
  271. V = pic->data[2];
  272. for (h = 0; h < avctx->height; h++) {
  273. for (w = 0; w < avctx->width; w++) {
  274. Y[w] = *src++;
  275. U[w] = *src++ ^ 0x80;
  276. V[w] = *src++ ^ 0x80;
  277. }
  278. Y += pic->linesize[0];
  279. U += pic->linesize[1];
  280. V += pic->linesize[2];
  281. }
  282. do_vflip(avctx, pic, vflipped);
  283. return 0;
  284. }
  285. static const uint8_t def_lru[8] = { 0x00, 0x20, 0x40, 0x60, 0x80, 0xA0, 0xC0, 0xFF };
  286. static const uint8_t def_lru_555[8] = { 0x00, 0x08, 0x10, 0x18, 0x1F };
  287. static const uint8_t def_lru_565[8] = { 0x00, 0x08, 0x10, 0x20, 0x30, 0x3F };
  288. static inline uint8_t decode_sym(GetBitContext *gb, uint8_t lru[8])
  289. {
  290. uint8_t c, val;
  291. c = get_unary(gb, 0, 8);
  292. if (!c) {
  293. val = get_bits(gb, 8);
  294. memmove(lru + 1, lru, sizeof(*lru) * (8 - 1));
  295. } else {
  296. val = lru[c - 1];
  297. memmove(lru + 1, lru, sizeof(*lru) * (c - 1));
  298. }
  299. lru[0] = val;
  300. return val;
  301. }
  302. static int check_slice_size(AVCodecContext *avctx,
  303. const uint8_t *src, int src_size,
  304. int slice_size, int off)
  305. {
  306. int cur_slice_size;
  307. if (slice_size > src_size - off) {
  308. av_log(avctx, AV_LOG_ERROR,
  309. "invalid slice size %d (only %d bytes left)\n",
  310. slice_size, src_size - off);
  311. return AVERROR_INVALIDDATA;
  312. }
  313. if (slice_size <= 16) {
  314. av_log(avctx, AV_LOG_ERROR, "invalid slice size %d\n",
  315. slice_size);
  316. return AVERROR_INVALIDDATA;
  317. }
  318. cur_slice_size = AV_RL32(src + off);
  319. if (cur_slice_size != slice_size - 16) {
  320. av_log(avctx, AV_LOG_ERROR,
  321. "Slice sizes mismatch: got %d instead of %d\n",
  322. cur_slice_size, slice_size - 16);
  323. }
  324. return 0;
  325. }
  326. static int load_buffer(AVCodecContext *avctx,
  327. const uint8_t *src, int src_size,
  328. GetByteContext *gb,
  329. int *nslices, int *off)
  330. {
  331. bytestream2_init(gb, src, src_size);
  332. *nslices = bytestream2_get_le16(gb);
  333. *off = FFALIGN(*nslices * 4 + 2, 16);
  334. if (src_size < *off) {
  335. av_log(avctx, AV_LOG_ERROR, "no slice data\n");
  336. return AVERROR_INVALIDDATA;
  337. }
  338. if (!*nslices) {
  339. avpriv_request_sample(avctx, "%d slices for %dx%d", *nslices,
  340. avctx->width, avctx->height);
  341. return AVERROR_PATCHWELCOME;
  342. }
  343. return 0;
  344. }
  345. static inline uint8_t decode_sym_565(GetBitContext *gb, uint8_t lru[8],
  346. int bits)
  347. {
  348. uint8_t c, val;
  349. c = get_unary(gb, 0, bits);
  350. if (!c) {
  351. val = get_bits(gb, bits);
  352. memmove(lru + 1, lru, sizeof(*lru) * (6 - 1));
  353. } else {
  354. val = lru[c - 1];
  355. memmove(lru + 1, lru, sizeof(*lru) * (c - 1));
  356. }
  357. lru[0] = val;
  358. return val;
  359. }
  360. typedef int (*decode_slice_func)(GetBitContext *gb, AVFrame *frame,
  361. int line, int height, uint8_t lru[3][8]);
  362. typedef void (*setup_lru_func)(uint8_t lru[3][8]);
  363. static int dxtory_decode_v2(AVCodecContext *avctx, AVFrame *pic,
  364. const uint8_t *src, int src_size,
  365. decode_slice_func decode_slice,
  366. setup_lru_func setup_lru,
  367. enum AVPixelFormat fmt,
  368. uint32_t vflipped)
  369. {
  370. GetByteContext gb, gb_check;
  371. GetBitContext gb2;
  372. int nslices, slice, line = 0;
  373. uint32_t off, slice_size;
  374. uint64_t off_check;
  375. uint8_t lru[3][8];
  376. int ret;
  377. ret = load_buffer(avctx, src, src_size, &gb, &nslices, &off);
  378. if (ret < 0)
  379. return ret;
  380. off_check = off;
  381. gb_check = gb;
  382. for (slice = 0; slice < nslices; slice++) {
  383. slice_size = bytestream2_get_le32(&gb_check);
  384. if (slice_size <= 16 + (avctx->height * avctx->width / (8 * nslices)))
  385. return AVERROR_INVALIDDATA;
  386. off_check += slice_size;
  387. }
  388. if (off_check - avctx->discard_damaged_percentage*off_check/100 > src_size)
  389. return AVERROR_INVALIDDATA;
  390. avctx->pix_fmt = fmt;
  391. if ((ret = ff_get_buffer(avctx, pic, 0)) < 0)
  392. return ret;
  393. do_vflip(avctx, pic, vflipped);
  394. for (slice = 0; slice < nslices; slice++) {
  395. slice_size = bytestream2_get_le32(&gb);
  396. setup_lru(lru);
  397. ret = check_slice_size(avctx, src, src_size, slice_size, off);
  398. if (ret < 0)
  399. return ret;
  400. if ((ret = init_get_bits8(&gb2, src + off + 16, slice_size - 16)) < 0)
  401. return ret;
  402. line += decode_slice(&gb2, pic, line, avctx->height - line, lru);
  403. off += slice_size;
  404. }
  405. if (avctx->height - line) {
  406. avpriv_request_sample(avctx, "Not enough slice data available");
  407. }
  408. do_vflip(avctx, pic, vflipped);
  409. return 0;
  410. }
  411. av_always_inline
  412. static int dx2_decode_slice_5x5(GetBitContext *gb, AVFrame *frame,
  413. int line, int left, uint8_t lru[3][8],
  414. int is_565)
  415. {
  416. int x, y;
  417. int r, g, b;
  418. int width = frame->width;
  419. int stride = frame->linesize[0];
  420. uint8_t *dst = frame->data[0] + stride * line;
  421. for (y = 0; y < left && get_bits_left(gb) >= 3 * width; y++) {
  422. for (x = 0; x < width; x++) {
  423. b = decode_sym_565(gb, lru[0], 5);
  424. g = decode_sym_565(gb, lru[1], is_565 ? 6 : 5);
  425. r = decode_sym_565(gb, lru[2], 5);
  426. dst[x * 3 + 0] = (r << 3) | (r >> 2);
  427. dst[x * 3 + 1] = is_565 ? (g << 2) | (g >> 4) : (g << 3) | (g >> 2);
  428. dst[x * 3 + 2] = (b << 3) | (b >> 2);
  429. }
  430. dst += stride;
  431. }
  432. return y;
  433. }
  434. static void setup_lru_555(uint8_t lru[3][8])
  435. {
  436. memcpy(lru[0], def_lru_555, 8 * sizeof(*def_lru));
  437. memcpy(lru[1], def_lru_555, 8 * sizeof(*def_lru));
  438. memcpy(lru[2], def_lru_555, 8 * sizeof(*def_lru));
  439. }
  440. static void setup_lru_565(uint8_t lru[3][8])
  441. {
  442. memcpy(lru[0], def_lru_555, 8 * sizeof(*def_lru));
  443. memcpy(lru[1], def_lru_565, 8 * sizeof(*def_lru));
  444. memcpy(lru[2], def_lru_555, 8 * sizeof(*def_lru));
  445. }
  446. static int dx2_decode_slice_555(GetBitContext *gb, AVFrame *frame,
  447. int line, int left, uint8_t lru[3][8])
  448. {
  449. return dx2_decode_slice_5x5(gb, frame, line, left, lru, 0);
  450. }
  451. static int dx2_decode_slice_565(GetBitContext *gb, AVFrame *frame,
  452. int line, int left, uint8_t lru[3][8])
  453. {
  454. return dx2_decode_slice_5x5(gb, frame, line, left, lru, 1);
  455. }
  456. static int dxtory_decode_v2_565(AVCodecContext *avctx, AVFrame *pic,
  457. const uint8_t *src, int src_size, int is_565,
  458. uint32_t vflipped)
  459. {
  460. enum AVPixelFormat fmt = AV_PIX_FMT_RGB24;
  461. if (is_565)
  462. return dxtory_decode_v2(avctx, pic, src, src_size,
  463. dx2_decode_slice_565,
  464. setup_lru_565,
  465. fmt, vflipped);
  466. else
  467. return dxtory_decode_v2(avctx, pic, src, src_size,
  468. dx2_decode_slice_555,
  469. setup_lru_555,
  470. fmt, vflipped);
  471. }
  472. static int dx2_decode_slice_rgb(GetBitContext *gb, AVFrame *frame,
  473. int line, int left, uint8_t lru[3][8])
  474. {
  475. int x, y;
  476. int width = frame->width;
  477. int stride = frame->linesize[0];
  478. uint8_t *dst = frame->data[0] + stride * line;
  479. for (y = 0; y < left && get_bits_left(gb) >= 3 * width; y++) {
  480. for (x = 0; x < width; x++) {
  481. dst[x * 3 + 0] = decode_sym(gb, lru[0]);
  482. dst[x * 3 + 1] = decode_sym(gb, lru[1]);
  483. dst[x * 3 + 2] = decode_sym(gb, lru[2]);
  484. }
  485. dst += stride;
  486. }
  487. return y;
  488. }
  489. static void default_setup_lru(uint8_t lru[3][8])
  490. {
  491. int i;
  492. for (i = 0; i < 3; i++)
  493. memcpy(lru[i], def_lru, 8 * sizeof(*def_lru));
  494. }
  495. static int dxtory_decode_v2_rgb(AVCodecContext *avctx, AVFrame *pic,
  496. const uint8_t *src, int src_size,
  497. uint32_t vflipped)
  498. {
  499. return dxtory_decode_v2(avctx, pic, src, src_size,
  500. dx2_decode_slice_rgb,
  501. default_setup_lru,
  502. AV_PIX_FMT_BGR24, vflipped);
  503. }
  504. static int dx2_decode_slice_410(GetBitContext *gb, AVFrame *frame,
  505. int line, int left,
  506. uint8_t lru[3][8])
  507. {
  508. int x, y, i, j;
  509. int width = frame->width;
  510. int ystride = frame->linesize[0];
  511. int ustride = frame->linesize[1];
  512. int vstride = frame->linesize[2];
  513. uint8_t *Y = frame->data[0] + ystride * line;
  514. uint8_t *U = frame->data[1] + (ustride >> 2) * line;
  515. uint8_t *V = frame->data[2] + (vstride >> 2) * line;
  516. int h, w, hmargin, vmargin;
  517. int huvborder;
  518. h = frame->height & ~3;
  519. w = frame->width & ~3;
  520. hmargin = frame->width - w;
  521. vmargin = frame->height - h;
  522. huvborder = AV_CEIL_RSHIFT(frame->width, 2) - 1;
  523. for (y = 0; y < left - 3 && get_bits_left(gb) >= 18 * w / 4 + hmargin * 4 + (!!hmargin * 2); y += 4) {
  524. for (x = 0; x < w; x += 4) {
  525. for (j = 0; j < 4; j++)
  526. for (i = 0; i < 4; i++)
  527. Y[x + i + j * ystride] = decode_sym(gb, lru[0]);
  528. U[x >> 2] = decode_sym(gb, lru[1]) ^ 0x80;
  529. V[x >> 2] = decode_sym(gb, lru[2]) ^ 0x80;
  530. }
  531. if (hmargin) {
  532. for (j = 0; j < 4; j++)
  533. for (i = 0; i < hmargin; i++)
  534. Y[x + i + j * ystride] = decode_sym(gb, lru[0]);
  535. U[huvborder] = decode_sym(gb, lru[1]) ^ 0x80;
  536. V[huvborder] = decode_sym(gb, lru[2]) ^ 0x80;
  537. }
  538. Y += ystride << 2;
  539. U += ustride;
  540. V += vstride;
  541. }
  542. if (vmargin && y + vmargin == left) {
  543. for (x = 0; x < width; x += 4) {
  544. for (j = 0; j < vmargin; j++)
  545. for (i = 0; i < 4; i++)
  546. Y[x + i + j * ystride] = decode_sym(gb, lru[0]);
  547. U[x >> 2] = decode_sym(gb, lru[1]) ^ 0x80;
  548. V[x >> 2] = decode_sym(gb, lru[2]) ^ 0x80;
  549. }
  550. if (hmargin) {
  551. for (j = 0; j < vmargin; j++) {
  552. for (i = 0; i < hmargin; i++)
  553. Y[x + i + j * ystride] = decode_sym(gb, lru[0]);
  554. }
  555. U[huvborder] = decode_sym(gb, lru[1]) ^ 0x80;
  556. V[huvborder] = decode_sym(gb, lru[2]) ^ 0x80;
  557. }
  558. y += vmargin;
  559. }
  560. return y;
  561. }
  562. static int dxtory_decode_v2_410(AVCodecContext *avctx, AVFrame *pic,
  563. const uint8_t *src, int src_size,
  564. uint32_t vflipped)
  565. {
  566. return dxtory_decode_v2(avctx, pic, src, src_size,
  567. dx2_decode_slice_410,
  568. default_setup_lru,
  569. AV_PIX_FMT_YUV410P, vflipped);
  570. }
  571. static int dx2_decode_slice_420(GetBitContext *gb, AVFrame *frame,
  572. int line, int left,
  573. uint8_t lru[3][8])
  574. {
  575. int x, y;
  576. int width = frame->width;
  577. int ystride = frame->linesize[0];
  578. int ustride = frame->linesize[1];
  579. int vstride = frame->linesize[2];
  580. uint8_t *Y = frame->data[0] + ystride * line;
  581. uint8_t *U = frame->data[1] + (ustride >> 1) * line;
  582. uint8_t *V = frame->data[2] + (vstride >> 1) * line;
  583. int h, w, hmargin, vmargin;
  584. int huvborder;
  585. h = frame->height & ~1;
  586. w = frame->width & ~1;
  587. hmargin = frame->width - w;
  588. vmargin = frame->height - h;
  589. huvborder = AV_CEIL_RSHIFT(frame->width, 1) - 1;
  590. for (y = 0; y < left - 1 && get_bits_left(gb) >= 3 * w + hmargin * 4; y += 2) {
  591. for (x = 0; x < w; x += 2) {
  592. Y[x + 0 + 0 * ystride] = decode_sym(gb, lru[0]);
  593. Y[x + 1 + 0 * ystride] = decode_sym(gb, lru[0]);
  594. Y[x + 0 + 1 * ystride] = decode_sym(gb, lru[0]);
  595. Y[x + 1 + 1 * ystride] = decode_sym(gb, lru[0]);
  596. U[x >> 1] = decode_sym(gb, lru[1]) ^ 0x80;
  597. V[x >> 1] = decode_sym(gb, lru[2]) ^ 0x80;
  598. }
  599. if (hmargin) {
  600. Y[x + 0 * ystride] = decode_sym(gb, lru[0]);
  601. Y[x + 1 * ystride] = decode_sym(gb, lru[0]);
  602. U[huvborder] = decode_sym(gb, lru[1]) ^ 0x80;
  603. V[huvborder] = decode_sym(gb, lru[2]) ^ 0x80;
  604. }
  605. Y += ystride << 1;
  606. U += ustride;
  607. V += vstride;
  608. }
  609. if (vmargin) {
  610. for (x = 0; x < width; x += 2) {
  611. Y[x + 0] = decode_sym(gb, lru[0]);
  612. U[x >> 1] = decode_sym(gb, lru[1]) ^ 0x80;
  613. V[x >> 1] = decode_sym(gb, lru[2]) ^ 0x80;
  614. }
  615. if (hmargin) {
  616. Y[x] = decode_sym(gb, lru[0]);
  617. U[huvborder] = decode_sym(gb, lru[1]) ^ 0x80;
  618. V[huvborder] = decode_sym(gb, lru[2]) ^ 0x80;
  619. }
  620. }
  621. return y;
  622. }
  623. static int dxtory_decode_v2_420(AVCodecContext *avctx, AVFrame *pic,
  624. const uint8_t *src, int src_size,
  625. uint32_t vflipped)
  626. {
  627. return dxtory_decode_v2(avctx, pic, src, src_size,
  628. dx2_decode_slice_420,
  629. default_setup_lru,
  630. AV_PIX_FMT_YUV420P, vflipped);
  631. }
  632. static int dx2_decode_slice_444(GetBitContext *gb, AVFrame *frame,
  633. int line, int left,
  634. uint8_t lru[3][8])
  635. {
  636. int x, y;
  637. int width = frame->width;
  638. int ystride = frame->linesize[0];
  639. int ustride = frame->linesize[1];
  640. int vstride = frame->linesize[2];
  641. uint8_t *Y = frame->data[0] + ystride * line;
  642. uint8_t *U = frame->data[1] + ustride * line;
  643. uint8_t *V = frame->data[2] + vstride * line;
  644. for (y = 0; y < left && get_bits_left(gb) >= 3 * width; y++) {
  645. for (x = 0; x < width; x++) {
  646. Y[x] = decode_sym(gb, lru[0]);
  647. U[x] = decode_sym(gb, lru[1]) ^ 0x80;
  648. V[x] = decode_sym(gb, lru[2]) ^ 0x80;
  649. }
  650. Y += ystride;
  651. U += ustride;
  652. V += vstride;
  653. }
  654. return y;
  655. }
  656. static int dxtory_decode_v2_444(AVCodecContext *avctx, AVFrame *pic,
  657. const uint8_t *src, int src_size,
  658. uint32_t vflipped)
  659. {
  660. return dxtory_decode_v2(avctx, pic, src, src_size,
  661. dx2_decode_slice_444,
  662. default_setup_lru,
  663. AV_PIX_FMT_YUV444P, vflipped);
  664. }
  665. static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
  666. AVPacket *avpkt)
  667. {
  668. AVFrame *pic = data;
  669. const uint8_t *src = avpkt->data;
  670. uint32_t type;
  671. int vflipped, ret;
  672. if (avpkt->size < 16) {
  673. av_log(avctx, AV_LOG_ERROR, "packet too small\n");
  674. return AVERROR_INVALIDDATA;
  675. }
  676. type = AV_RB32(src);
  677. vflipped = !!(type & 0x20);
  678. switch (type) {
  679. case 0x01000021:
  680. case 0x01000001:
  681. ret = dxtory_decode_v1_rgb(avctx, pic, src + 16, avpkt->size - 16,
  682. AV_PIX_FMT_BGR24, 3, vflipped);
  683. break;
  684. case 0x01000029:
  685. case 0x01000009:
  686. ret = dxtory_decode_v2_rgb(avctx, pic, src + 16, avpkt->size - 16, vflipped);
  687. break;
  688. case 0x02000021:
  689. case 0x02000001:
  690. ret = dxtory_decode_v1_420(avctx, pic, src + 16, avpkt->size - 16, vflipped);
  691. break;
  692. case 0x02000029:
  693. case 0x02000009:
  694. ret = dxtory_decode_v2_420(avctx, pic, src + 16, avpkt->size - 16, vflipped);
  695. break;
  696. case 0x03000021:
  697. case 0x03000001:
  698. ret = dxtory_decode_v1_410(avctx, pic, src + 16, avpkt->size - 16, vflipped);
  699. break;
  700. case 0x03000029:
  701. case 0x03000009:
  702. ret = dxtory_decode_v2_410(avctx, pic, src + 16, avpkt->size - 16, vflipped);
  703. break;
  704. case 0x04000021:
  705. case 0x04000001:
  706. ret = dxtory_decode_v1_444(avctx, pic, src + 16, avpkt->size - 16, vflipped);
  707. break;
  708. case 0x04000029:
  709. case 0x04000009:
  710. ret = dxtory_decode_v2_444(avctx, pic, src + 16, avpkt->size - 16, vflipped);
  711. break;
  712. case 0x17000021:
  713. case 0x17000001:
  714. ret = dxtory_decode_v1_rgb(avctx, pic, src + 16, avpkt->size - 16,
  715. AV_PIX_FMT_RGB565LE, 2, vflipped);
  716. break;
  717. case 0x17000029:
  718. case 0x17000009:
  719. ret = dxtory_decode_v2_565(avctx, pic, src + 16, avpkt->size - 16, 1, vflipped);
  720. break;
  721. case 0x18000021:
  722. case 0x19000021:
  723. case 0x18000001:
  724. case 0x19000001:
  725. ret = dxtory_decode_v1_rgb(avctx, pic, src + 16, avpkt->size - 16,
  726. AV_PIX_FMT_RGB555LE, 2, vflipped);
  727. break;
  728. case 0x18000029:
  729. case 0x19000029:
  730. case 0x18000009:
  731. case 0x19000009:
  732. ret = dxtory_decode_v2_565(avctx, pic, src + 16, avpkt->size - 16, 0, vflipped);
  733. break;
  734. default:
  735. avpriv_request_sample(avctx, "Frame header %"PRIX32, type);
  736. return AVERROR_PATCHWELCOME;
  737. }
  738. if (ret)
  739. return ret;
  740. pic->pict_type = AV_PICTURE_TYPE_I;
  741. pic->key_frame = 1;
  742. *got_frame = 1;
  743. return avpkt->size;
  744. }
  745. AVCodec ff_dxtory_decoder = {
  746. .name = "dxtory",
  747. .long_name = NULL_IF_CONFIG_SMALL("Dxtory"),
  748. .type = AVMEDIA_TYPE_VIDEO,
  749. .id = AV_CODEC_ID_DXTORY,
  750. .decode = decode_frame,
  751. .capabilities = AV_CODEC_CAP_DR1,
  752. };