You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

404 lines
14KB

  1. /*
  2. * a64 video encoder - multicolor modes
  3. * Copyright (c) 2009 Tobias Bindhammer
  4. *
  5. * This file is part of Libav.
  6. *
  7. * Libav is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * Libav is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with Libav; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. /**
  22. * @file
  23. * a64 video encoder - multicolor modes
  24. */
  25. #include "a64enc.h"
  26. #include "a64colors.h"
  27. #include "a64tables.h"
  28. #include "elbg.h"
  29. #include "internal.h"
  30. #include "libavutil/common.h"
  31. #include "libavutil/intreadwrite.h"
  32. #define DITHERSTEPS 8
  33. #define CHARSET_CHARS 256
  34. #define INTERLACED 1
  35. #define CROP_SCREENS 1
  36. /* gray gradient */
  37. static const int mc_colors[5]={0x0,0xb,0xc,0xf,0x1};
  38. /* other possible gradients - to be tested */
  39. //static const int mc_colors[5]={0x0,0x8,0xa,0xf,0x7};
  40. //static const int mc_colors[5]={0x0,0x9,0x8,0xa,0x3};
  41. static void to_meta_with_crop(AVCodecContext *avctx, AVFrame *p, int *dest)
  42. {
  43. int blockx, blocky, x, y;
  44. int luma = 0;
  45. int height = FFMIN(avctx->height, C64YRES);
  46. int width = FFMIN(avctx->width , C64XRES);
  47. uint8_t *src = p->data[0];
  48. for (blocky = 0; blocky < C64YRES; blocky += 8) {
  49. for (blockx = 0; blockx < C64XRES; blockx += 8) {
  50. for (y = blocky; y < blocky + 8 && y < C64YRES; y++) {
  51. for (x = blockx; x < blockx + 8 && x < C64XRES; x += 2) {
  52. if(x < width && y < height) {
  53. /* build average over 2 pixels */
  54. luma = (src[(x + 0 + y * p->linesize[0])] +
  55. src[(x + 1 + y * p->linesize[0])]) / 2;
  56. /* write blocks as linear data now so they are suitable for elbg */
  57. dest[0] = luma;
  58. }
  59. dest++;
  60. }
  61. }
  62. }
  63. }
  64. }
  65. static void render_charset(AVCodecContext *avctx, uint8_t *charset,
  66. uint8_t *colrammap)
  67. {
  68. A64Context *c = avctx->priv_data;
  69. uint8_t row1, row2;
  70. int charpos, x, y;
  71. int a, b;
  72. uint8_t pix;
  73. int lowdiff, highdiff;
  74. int *best_cb = c->mc_best_cb;
  75. static uint8_t index1[256];
  76. static uint8_t index2[256];
  77. static uint8_t dither[256];
  78. int i;
  79. int distance;
  80. /* generate lookup-tables for dither and index before looping */
  81. i = 0;
  82. for (a=0; a < 256; a++) {
  83. if(i < c->mc_pal_size -1 && a == c->mc_luma_vals[i + 1]) {
  84. distance = c->mc_luma_vals[i + 1] - c->mc_luma_vals[i];
  85. for(b = 0; b <= distance; b++) {
  86. dither[c->mc_luma_vals[i] + b] = b * (DITHERSTEPS - 1) / distance;
  87. }
  88. i++;
  89. }
  90. if(i >= c->mc_pal_size - 1) dither[a] = 0;
  91. index1[a] = i;
  92. index2[a] = FFMIN(i + 1, c->mc_pal_size - 1);
  93. }
  94. /* and render charset */
  95. for (charpos = 0; charpos < CHARSET_CHARS; charpos++) {
  96. lowdiff = 0;
  97. highdiff = 0;
  98. for (y = 0; y < 8; y++) {
  99. row1 = 0; row2 = 0;
  100. for (x = 0; x < 4; x++) {
  101. pix = best_cb[y * 4 + x];
  102. /* accumulate error for brightest/darkest color */
  103. if (index1[pix] >= 3)
  104. highdiff += pix - c->mc_luma_vals[3];
  105. if (index1[pix] < 1)
  106. lowdiff += c->mc_luma_vals[1] - pix;
  107. row1 <<= 2;
  108. if (INTERLACED) {
  109. row2 <<= 2;
  110. if (interlaced_dither_patterns[dither[pix]][(y & 3) * 2 + 0][x & 3])
  111. row1 |= 3-(index2[pix] & 3);
  112. else
  113. row1 |= 3-(index1[pix] & 3);
  114. if (interlaced_dither_patterns[dither[pix]][(y & 3) * 2 + 1][x & 3])
  115. row2 |= 3-(index2[pix] & 3);
  116. else
  117. row2 |= 3-(index1[pix] & 3);
  118. }
  119. else {
  120. if (multi_dither_patterns[dither[pix]][(y & 3)][x & 3])
  121. row1 |= 3-(index2[pix] & 3);
  122. else
  123. row1 |= 3-(index1[pix] & 3);
  124. }
  125. }
  126. charset[y+0x000] = row1;
  127. if (INTERLACED) charset[y+0x800] = row2;
  128. }
  129. /* do we need to adjust pixels? */
  130. if (highdiff > 0 && lowdiff > 0 && c->mc_use_5col) {
  131. if (lowdiff > highdiff) {
  132. for (x = 0; x < 32; x++)
  133. best_cb[x] = FFMIN(c->mc_luma_vals[3], best_cb[x]);
  134. } else {
  135. for (x = 0; x < 32; x++)
  136. best_cb[x] = FFMAX(c->mc_luma_vals[1], best_cb[x]);
  137. }
  138. charpos--; /* redo now adjusted char */
  139. /* no adjustment needed, all fine */
  140. } else {
  141. /* advance pointers */
  142. best_cb += 32;
  143. charset += 8;
  144. /* remember colorram value */
  145. colrammap[charpos] = (highdiff > 0);
  146. }
  147. }
  148. }
  149. static av_cold int a64multi_close_encoder(AVCodecContext *avctx)
  150. {
  151. A64Context *c = avctx->priv_data;
  152. av_frame_free(&avctx->coded_frame);
  153. av_free(c->mc_meta_charset);
  154. av_free(c->mc_best_cb);
  155. av_free(c->mc_charset);
  156. av_free(c->mc_charmap);
  157. av_free(c->mc_colram);
  158. return 0;
  159. }
  160. static av_cold int a64multi_encode_init(AVCodecContext *avctx)
  161. {
  162. A64Context *c = avctx->priv_data;
  163. int a;
  164. av_lfg_init(&c->randctx, 1);
  165. if (avctx->global_quality < 1) {
  166. c->mc_lifetime = 4;
  167. } else {
  168. c->mc_lifetime = avctx->global_quality /= FF_QP2LAMBDA;
  169. }
  170. av_log(avctx, AV_LOG_INFO, "charset lifetime set to %d frame(s)\n", c->mc_lifetime);
  171. c->mc_frame_counter = 0;
  172. c->mc_use_5col = avctx->codec->id == AV_CODEC_ID_A64_MULTI5;
  173. c->mc_pal_size = 4 + c->mc_use_5col;
  174. /* precalc luma values for later use */
  175. for (a = 0; a < c->mc_pal_size; a++) {
  176. c->mc_luma_vals[a]=a64_palette[mc_colors[a]][0] * 0.30 +
  177. a64_palette[mc_colors[a]][1] * 0.59 +
  178. a64_palette[mc_colors[a]][2] * 0.11;
  179. }
  180. if (!(c->mc_meta_charset = av_malloc(32000 * c->mc_lifetime * sizeof(int))) ||
  181. !(c->mc_best_cb = av_malloc(CHARSET_CHARS * 32 * sizeof(int))) ||
  182. !(c->mc_charmap = av_mallocz(1000 * c->mc_lifetime * sizeof(int))) ||
  183. !(c->mc_colram = av_mallocz(CHARSET_CHARS * sizeof(uint8_t))) ||
  184. !(c->mc_charset = av_malloc(0x800 * (INTERLACED+1) * sizeof(uint8_t)))) {
  185. av_log(avctx, AV_LOG_ERROR, "Failed to allocate buffer memory.\n");
  186. return AVERROR(ENOMEM);
  187. }
  188. /* set up extradata */
  189. if (!(avctx->extradata = av_mallocz(8 * 4 + FF_INPUT_BUFFER_PADDING_SIZE))) {
  190. av_log(avctx, AV_LOG_ERROR, "Failed to allocate memory for extradata.\n");
  191. return AVERROR(ENOMEM);
  192. }
  193. avctx->extradata_size = 8 * 4;
  194. AV_WB32(avctx->extradata, c->mc_lifetime);
  195. AV_WB32(avctx->extradata + 16, INTERLACED);
  196. avctx->coded_frame = av_frame_alloc();
  197. if (!avctx->coded_frame) {
  198. a64multi_close_encoder(avctx);
  199. return AVERROR(ENOMEM);
  200. }
  201. avctx->coded_frame->pict_type = AV_PICTURE_TYPE_I;
  202. avctx->coded_frame->key_frame = 1;
  203. if (!avctx->codec_tag)
  204. avctx->codec_tag = AV_RL32("a64m");
  205. c->next_pts = AV_NOPTS_VALUE;
  206. return 0;
  207. }
  208. static void a64_compress_colram(unsigned char *buf, int *charmap, uint8_t *colram)
  209. {
  210. int a;
  211. uint8_t temp;
  212. /* only needs to be done in 5col mode */
  213. /* XXX could be squeezed to 0x80 bytes */
  214. for (a = 0; a < 256; a++) {
  215. temp = colram[charmap[a + 0x000]] << 0;
  216. temp |= colram[charmap[a + 0x100]] << 1;
  217. temp |= colram[charmap[a + 0x200]] << 2;
  218. if (a < 0xe8) temp |= colram[charmap[a + 0x300]] << 3;
  219. buf[a] = temp << 2;
  220. }
  221. }
  222. static int a64multi_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
  223. const AVFrame *pict, int *got_packet)
  224. {
  225. A64Context *c = avctx->priv_data;
  226. AVFrame *const p = avctx->coded_frame;
  227. int frame;
  228. int x, y;
  229. int b_height;
  230. int b_width;
  231. int req_size, ret;
  232. uint8_t *buf;
  233. int *charmap = c->mc_charmap;
  234. uint8_t *colram = c->mc_colram;
  235. uint8_t *charset = c->mc_charset;
  236. int *meta = c->mc_meta_charset;
  237. int *best_cb = c->mc_best_cb;
  238. int charset_size = 0x800 * (INTERLACED + 1);
  239. int colram_size = 0x100 * c->mc_use_5col;
  240. int screen_size;
  241. if(CROP_SCREENS) {
  242. b_height = FFMIN(avctx->height,C64YRES) >> 3;
  243. b_width = FFMIN(avctx->width ,C64XRES) >> 3;
  244. screen_size = b_width * b_height;
  245. } else {
  246. b_height = C64YRES >> 3;
  247. b_width = C64XRES >> 3;
  248. screen_size = 0x400;
  249. }
  250. /* no data, means end encoding asap */
  251. if (!pict) {
  252. /* all done, end encoding */
  253. if (!c->mc_lifetime) return 0;
  254. /* no more frames in queue, prepare to flush remaining frames */
  255. if (!c->mc_frame_counter) {
  256. c->mc_lifetime = 0;
  257. }
  258. /* still frames in queue so limit lifetime to remaining frames */
  259. else c->mc_lifetime = c->mc_frame_counter;
  260. /* still new data available */
  261. } else {
  262. /* fill up mc_meta_charset with data until lifetime exceeds */
  263. if (c->mc_frame_counter < c->mc_lifetime) {
  264. *p = *pict;
  265. p->pict_type = AV_PICTURE_TYPE_I;
  266. p->key_frame = 1;
  267. to_meta_with_crop(avctx, p, meta + 32000 * c->mc_frame_counter);
  268. c->mc_frame_counter++;
  269. if (c->next_pts == AV_NOPTS_VALUE)
  270. c->next_pts = pict->pts;
  271. /* lifetime is not reached so wait for next frame first */
  272. return 0;
  273. }
  274. }
  275. /* lifetime reached so now convert X frames at once */
  276. if (c->mc_frame_counter == c->mc_lifetime) {
  277. req_size = 0;
  278. /* any frames to encode? */
  279. if (c->mc_lifetime) {
  280. req_size = charset_size + c->mc_lifetime*(screen_size + colram_size);
  281. if ((ret = ff_alloc_packet(pkt, req_size)) < 0) {
  282. av_log(avctx, AV_LOG_ERROR, "Error getting output packet of size %d.\n", req_size);
  283. return ret;
  284. }
  285. buf = pkt->data;
  286. /* calc optimal new charset + charmaps */
  287. ff_init_elbg(meta, 32, 1000 * c->mc_lifetime, best_cb, CHARSET_CHARS, 50, charmap, &c->randctx);
  288. ff_do_elbg (meta, 32, 1000 * c->mc_lifetime, best_cb, CHARSET_CHARS, 50, charmap, &c->randctx);
  289. /* create colorram map and a c64 readable charset */
  290. render_charset(avctx, charset, colram);
  291. /* copy charset to buf */
  292. memcpy(buf, charset, charset_size);
  293. /* advance pointers */
  294. buf += charset_size;
  295. charset += charset_size;
  296. }
  297. /* write x frames to buf */
  298. for (frame = 0; frame < c->mc_lifetime; frame++) {
  299. /* copy charmap to buf. buf is uchar*, charmap is int*, so no memcpy here, sorry */
  300. for (y = 0; y < b_height; y++) {
  301. for (x = 0; x < b_width; x++) {
  302. buf[y * b_width + x] = charmap[y * b_width + x];
  303. }
  304. }
  305. /* advance pointers */
  306. buf += screen_size;
  307. req_size += screen_size;
  308. /* compress and copy colram to buf */
  309. if (c->mc_use_5col) {
  310. a64_compress_colram(buf, charmap, colram);
  311. /* advance pointers */
  312. buf += colram_size;
  313. req_size += colram_size;
  314. }
  315. /* advance to next charmap */
  316. charmap += 1000;
  317. }
  318. AV_WB32(avctx->extradata + 4, c->mc_frame_counter);
  319. AV_WB32(avctx->extradata + 8, charset_size);
  320. AV_WB32(avctx->extradata + 12, screen_size + colram_size);
  321. /* reset counter */
  322. c->mc_frame_counter = 0;
  323. pkt->pts = pkt->dts = c->next_pts;
  324. c->next_pts = AV_NOPTS_VALUE;
  325. pkt->size = req_size;
  326. pkt->flags |= AV_PKT_FLAG_KEY;
  327. *got_packet = !!req_size;
  328. }
  329. return 0;
  330. }
  331. AVCodec ff_a64multi_encoder = {
  332. .name = "a64multi",
  333. .long_name = NULL_IF_CONFIG_SMALL("Multicolor charset for Commodore 64"),
  334. .type = AVMEDIA_TYPE_VIDEO,
  335. .id = AV_CODEC_ID_A64_MULTI,
  336. .priv_data_size = sizeof(A64Context),
  337. .init = a64multi_encode_init,
  338. .encode2 = a64multi_encode_frame,
  339. .close = a64multi_close_encoder,
  340. .pix_fmts = (const enum AVPixelFormat[]) {AV_PIX_FMT_GRAY8, AV_PIX_FMT_NONE},
  341. .capabilities = CODEC_CAP_DELAY,
  342. };
  343. AVCodec ff_a64multi5_encoder = {
  344. .name = "a64multi5",
  345. .long_name = NULL_IF_CONFIG_SMALL("Multicolor charset for Commodore 64, extended with 5th color (colram)"),
  346. .type = AVMEDIA_TYPE_VIDEO,
  347. .id = AV_CODEC_ID_A64_MULTI5,
  348. .priv_data_size = sizeof(A64Context),
  349. .init = a64multi_encode_init,
  350. .encode2 = a64multi_encode_frame,
  351. .close = a64multi_close_encoder,
  352. .pix_fmts = (const enum AVPixelFormat[]) {AV_PIX_FMT_GRAY8, AV_PIX_FMT_NONE},
  353. .capabilities = CODEC_CAP_DELAY,
  354. };