You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

346 lines
10.0KB

  1. /*
  2. * LOCO codec
  3. * Copyright (c) 2005 Konstantin Shishkov
  4. *
  5. * This file is part of Libav.
  6. *
  7. * Libav is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * Libav is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with Libav; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. /**
  22. * @file
  23. * LOCO codec.
  24. */
  25. #include "avcodec.h"
  26. #include "get_bits.h"
  27. #include "golomb.h"
  28. #include "internal.h"
  29. #include "mathops.h"
  30. enum LOCO_MODE {
  31. LOCO_UNKN = 0,
  32. LOCO_CYUY2 = -1,
  33. LOCO_CRGB = -2,
  34. LOCO_CRGBA = -3,
  35. LOCO_CYV12 = -4,
  36. LOCO_YUY2 = 1,
  37. LOCO_UYVY = 2,
  38. LOCO_RGB = 3,
  39. LOCO_RGBA = 4,
  40. LOCO_YV12 = 5,
  41. };
  42. typedef struct LOCOContext {
  43. AVCodecContext *avctx;
  44. AVFrame pic;
  45. int lossy;
  46. int mode;
  47. } LOCOContext;
  48. typedef struct RICEContext {
  49. GetBitContext gb;
  50. int save, run, run2; /* internal rice decoder state */
  51. int sum, count; /* sum and count for getting rice parameter */
  52. int lossy;
  53. } RICEContext;
  54. static int loco_get_rice_param(RICEContext *r)
  55. {
  56. int cnt = 0;
  57. int val = r->count;
  58. while (r->sum > val && cnt < 9) {
  59. val <<= 1;
  60. cnt++;
  61. }
  62. return cnt;
  63. }
  64. static inline void loco_update_rice_param(RICEContext *r, int val)
  65. {
  66. r->sum += val;
  67. r->count++;
  68. if (r->count == 16) {
  69. r->sum >>= 1;
  70. r->count >>= 1;
  71. }
  72. }
  73. static inline int loco_get_rice(RICEContext *r)
  74. {
  75. int v;
  76. if (r->run > 0) { /* we have zero run */
  77. r->run--;
  78. loco_update_rice_param(r, 0);
  79. return 0;
  80. }
  81. v = get_ur_golomb_jpegls(&r->gb, loco_get_rice_param(r), INT_MAX, 0);
  82. loco_update_rice_param(r, (v + 1) >> 1);
  83. if (!v) {
  84. if (r->save >= 0) {
  85. r->run = get_ur_golomb_jpegls(&r->gb, 2, INT_MAX, 0);
  86. if (r->run > 1)
  87. r->save += r->run + 1;
  88. else
  89. r->save -= 3;
  90. } else
  91. r->run2++;
  92. } else {
  93. v = ((v >> 1) + r->lossy) ^ -(v & 1);
  94. if (r->run2 > 0) {
  95. if (r->run2 > 2)
  96. r->save += r->run2;
  97. else
  98. r->save -= 3;
  99. r->run2 = 0;
  100. }
  101. }
  102. return v;
  103. }
  104. /* LOCO main predictor - LOCO-I/JPEG-LS predictor */
  105. static inline int loco_predict(uint8_t* data, int stride, int step)
  106. {
  107. int a, b, c;
  108. a = data[-stride];
  109. b = data[-step];
  110. c = data[-stride - step];
  111. return mid_pred(a, a + b - c, b);
  112. }
  113. static int loco_decode_plane(LOCOContext *l, uint8_t *data, int width, int height,
  114. int stride, const uint8_t *buf, int buf_size, int step)
  115. {
  116. RICEContext rc;
  117. int val;
  118. int i, j;
  119. init_get_bits(&rc.gb, buf, buf_size*8);
  120. rc.save = 0;
  121. rc.run = 0;
  122. rc.run2 = 0;
  123. rc.lossy = l->lossy;
  124. rc.sum = 8;
  125. rc.count = 1;
  126. /* restore top left pixel */
  127. val = loco_get_rice(&rc);
  128. data[0] = 128 + val;
  129. /* restore top line */
  130. for (i = 1; i < width; i++) {
  131. val = loco_get_rice(&rc);
  132. data[i * step] = data[i * step - step] + val;
  133. }
  134. data += stride;
  135. for (j = 1; j < height; j++) {
  136. /* restore left column */
  137. val = loco_get_rice(&rc);
  138. data[0] = data[-stride] + val;
  139. /* restore all other pixels */
  140. for (i = 1; i < width; i++) {
  141. val = loco_get_rice(&rc);
  142. data[i * step] = loco_predict(&data[i * step], stride, step) + val;
  143. }
  144. data += stride;
  145. }
  146. return (get_bits_count(&rc.gb) + 7) >> 3;
  147. }
  148. static int decode_frame(AVCodecContext *avctx,
  149. void *data, int *got_frame,
  150. AVPacket *avpkt)
  151. {
  152. LOCOContext * const l = avctx->priv_data;
  153. const uint8_t *buf = avpkt->data;
  154. int buf_size = avpkt->size;
  155. AVFrame * const p = &l->pic;
  156. int decoded, ret;
  157. if (p->data[0])
  158. avctx->release_buffer(avctx, p);
  159. p->reference = 0;
  160. if ((ret = ff_get_buffer(avctx, p)) < 0) {
  161. av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
  162. return ret;
  163. }
  164. p->key_frame = 1;
  165. switch(l->mode) {
  166. case LOCO_CYUY2: case LOCO_YUY2: case LOCO_UYVY:
  167. decoded = loco_decode_plane(l, p->data[0], avctx->width, avctx->height,
  168. p->linesize[0], buf, buf_size, 1);
  169. if (decoded >= buf_size)
  170. goto buf_too_small;
  171. buf += decoded; buf_size -= decoded;
  172. decoded = loco_decode_plane(l, p->data[1], avctx->width / 2, avctx->height,
  173. p->linesize[1], buf, buf_size, 1);
  174. if (decoded >= buf_size)
  175. goto buf_too_small;
  176. buf += decoded; buf_size -= decoded;
  177. decoded = loco_decode_plane(l, p->data[2], avctx->width / 2, avctx->height,
  178. p->linesize[2], buf, buf_size, 1);
  179. break;
  180. case LOCO_CYV12: case LOCO_YV12:
  181. decoded = loco_decode_plane(l, p->data[0], avctx->width, avctx->height,
  182. p->linesize[0], buf, buf_size, 1);
  183. if (decoded >= buf_size)
  184. goto buf_too_small;
  185. buf += decoded; buf_size -= decoded;
  186. decoded = loco_decode_plane(l, p->data[2], avctx->width / 2, avctx->height / 2,
  187. p->linesize[2], buf, buf_size, 1);
  188. if (decoded >= buf_size)
  189. goto buf_too_small;
  190. buf += decoded; buf_size -= decoded;
  191. decoded = loco_decode_plane(l, p->data[1], avctx->width / 2, avctx->height / 2,
  192. p->linesize[1], buf, buf_size, 1);
  193. break;
  194. case LOCO_CRGB: case LOCO_RGB:
  195. decoded = loco_decode_plane(l, p->data[0] + p->linesize[0]*(avctx->height-1), avctx->width, avctx->height,
  196. -p->linesize[0], buf, buf_size, 3);
  197. if (decoded >= buf_size)
  198. goto buf_too_small;
  199. buf += decoded; buf_size -= decoded;
  200. decoded = loco_decode_plane(l, p->data[0] + p->linesize[0]*(avctx->height-1) + 1, avctx->width, avctx->height,
  201. -p->linesize[0], buf, buf_size, 3);
  202. if (decoded >= buf_size)
  203. goto buf_too_small;
  204. buf += decoded; buf_size -= decoded;
  205. decoded = loco_decode_plane(l, p->data[0] + p->linesize[0]*(avctx->height-1) + 2, avctx->width, avctx->height,
  206. -p->linesize[0], buf, buf_size, 3);
  207. break;
  208. case LOCO_RGBA:
  209. decoded = loco_decode_plane(l, p->data[0], avctx->width, avctx->height,
  210. p->linesize[0], buf, buf_size, 4);
  211. if (decoded >= buf_size)
  212. goto buf_too_small;
  213. buf += decoded; buf_size -= decoded;
  214. decoded = loco_decode_plane(l, p->data[0] + 1, avctx->width, avctx->height,
  215. p->linesize[0], buf, buf_size, 4);
  216. if (decoded >= buf_size)
  217. goto buf_too_small;
  218. buf += decoded; buf_size -= decoded;
  219. decoded = loco_decode_plane(l, p->data[0] + 2, avctx->width, avctx->height,
  220. p->linesize[0], buf, buf_size, 4);
  221. if (decoded >= buf_size)
  222. goto buf_too_small;
  223. buf += decoded; buf_size -= decoded;
  224. decoded = loco_decode_plane(l, p->data[0] + 3, avctx->width, avctx->height,
  225. p->linesize[0], buf, buf_size, 4);
  226. break;
  227. }
  228. *got_frame = 1;
  229. *(AVFrame*)data = l->pic;
  230. return buf_size;
  231. buf_too_small:
  232. av_log(avctx, AV_LOG_ERROR, "Input data too small.\n");
  233. return AVERROR(EINVAL);
  234. }
  235. static av_cold int decode_init(AVCodecContext *avctx)
  236. {
  237. LOCOContext * const l = avctx->priv_data;
  238. int version;
  239. l->avctx = avctx;
  240. if (avctx->extradata_size < 12) {
  241. av_log(avctx, AV_LOG_ERROR, "Extradata size must be >= 12 instead of %i\n",
  242. avctx->extradata_size);
  243. return AVERROR_INVALIDDATA;
  244. }
  245. version = AV_RL32(avctx->extradata);
  246. switch (version) {
  247. case 1:
  248. l->lossy = 0;
  249. break;
  250. case 2:
  251. l->lossy = AV_RL32(avctx->extradata + 8);
  252. break;
  253. default:
  254. l->lossy = AV_RL32(avctx->extradata + 8);
  255. av_log_ask_for_sample(avctx, "This is LOCO codec version %i.\n", version);
  256. }
  257. l->mode = AV_RL32(avctx->extradata + 4);
  258. switch (l->mode) {
  259. case LOCO_CYUY2:
  260. case LOCO_YUY2:
  261. case LOCO_UYVY:
  262. avctx->pix_fmt = AV_PIX_FMT_YUV422P;
  263. break;
  264. case LOCO_CRGB:
  265. case LOCO_RGB:
  266. avctx->pix_fmt = AV_PIX_FMT_BGR24;
  267. break;
  268. case LOCO_CYV12:
  269. case LOCO_YV12:
  270. avctx->pix_fmt = AV_PIX_FMT_YUV420P;
  271. break;
  272. case LOCO_CRGBA:
  273. case LOCO_RGBA:
  274. avctx->pix_fmt = AV_PIX_FMT_RGB32;
  275. break;
  276. default:
  277. av_log(avctx, AV_LOG_INFO, "Unknown colorspace, index = %i\n", l->mode);
  278. return AVERROR_INVALIDDATA;
  279. }
  280. if (avctx->debug & FF_DEBUG_PICT_INFO)
  281. av_log(avctx, AV_LOG_INFO, "lossy:%i, version:%i, mode: %i\n", l->lossy, version, l->mode);
  282. return 0;
  283. }
  284. static av_cold int decode_end(AVCodecContext *avctx)
  285. {
  286. LOCOContext * const l = avctx->priv_data;
  287. AVFrame *pic = &l->pic;
  288. if (pic->data[0])
  289. avctx->release_buffer(avctx, pic);
  290. return 0;
  291. }
  292. AVCodec ff_loco_decoder = {
  293. .name = "loco",
  294. .type = AVMEDIA_TYPE_VIDEO,
  295. .id = AV_CODEC_ID_LOCO,
  296. .priv_data_size = sizeof(LOCOContext),
  297. .init = decode_init,
  298. .close = decode_end,
  299. .decode = decode_frame,
  300. .capabilities = CODEC_CAP_DR1,
  301. .long_name = NULL_IF_CONFIG_SMALL("LOCO"),
  302. };