You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

400 lines
13KB

  1. /*
  2. * a64 video encoder - multicolor modes
  3. * Copyright (c) 2009 Tobias Bindhammer
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. /**
  22. * @file
  23. * a64 video encoder - multicolor modes
  24. */
  25. #include "a64enc.h"
  26. #include "a64colors.h"
  27. #include "a64tables.h"
  28. #include "elbg.h"
  29. #include "internal.h"
  30. #include "libavutil/common.h"
  31. #include "libavutil/intreadwrite.h"
  32. #define DITHERSTEPS 8
  33. #define CHARSET_CHARS 256
  34. #define INTERLACED 1
  35. #define CROP_SCREENS 1
  36. /* gray gradient */
  37. static const int mc_colors[5]={0x0,0xb,0xc,0xf,0x1};
  38. /* other possible gradients - to be tested */
  39. //static const int mc_colors[5]={0x0,0x8,0xa,0xf,0x7};
  40. //static const int mc_colors[5]={0x0,0x9,0x8,0xa,0x3};
  41. static void to_meta_with_crop(AVCodecContext *avctx, AVFrame *p, int *dest)
  42. {
  43. int blockx, blocky, x, y;
  44. int luma = 0;
  45. int height = FFMIN(avctx->height, C64YRES);
  46. int width = FFMIN(avctx->width , C64XRES);
  47. uint8_t *src = p->data[0];
  48. for (blocky = 0; blocky < C64YRES; blocky += 8) {
  49. for (blockx = 0; blockx < C64XRES; blockx += 8) {
  50. for (y = blocky; y < blocky + 8 && y < C64YRES; y++) {
  51. for (x = blockx; x < blockx + 8 && x < C64XRES; x += 2) {
  52. if(x < width && y < height) {
  53. /* build average over 2 pixels */
  54. luma = (src[(x + 0 + y * p->linesize[0])] +
  55. src[(x + 1 + y * p->linesize[0])]) / 2;
  56. /* write blocks as linear data now so they are suitable for elbg */
  57. dest[0] = luma;
  58. }
  59. dest++;
  60. }
  61. }
  62. }
  63. }
  64. }
  65. static void render_charset(AVCodecContext *avctx, uint8_t *charset,
  66. uint8_t *colrammap)
  67. {
  68. A64Context *c = avctx->priv_data;
  69. uint8_t row1, row2;
  70. int charpos, x, y;
  71. int a, b;
  72. uint8_t pix;
  73. int lowdiff, highdiff;
  74. int *best_cb = c->mc_best_cb;
  75. static uint8_t index1[256];
  76. static uint8_t index2[256];
  77. static uint8_t dither[256];
  78. int i;
  79. int distance;
  80. /* generate lookup-tables for dither and index before looping */
  81. i = 0;
  82. for (a=0; a < 256; a++) {
  83. if(i < c->mc_pal_size -1 && a == c->mc_luma_vals[i + 1]) {
  84. distance = c->mc_luma_vals[i + 1] - c->mc_luma_vals[i];
  85. for(b = 0; b <= distance; b++) {
  86. dither[c->mc_luma_vals[i] + b] = b * (DITHERSTEPS - 1) / distance;
  87. }
  88. i++;
  89. }
  90. if(i >= c->mc_pal_size - 1) dither[a] = 0;
  91. index1[a] = i;
  92. index2[a] = FFMIN(i + 1, c->mc_pal_size - 1);
  93. }
  94. /* and render charset */
  95. for (charpos = 0; charpos < CHARSET_CHARS; charpos++) {
  96. lowdiff = 0;
  97. highdiff = 0;
  98. for (y = 0; y < 8; y++) {
  99. row1 = 0; row2 = 0;
  100. for (x = 0; x < 4; x++) {
  101. pix = best_cb[y * 4 + x];
  102. /* accumulate error for brightest/darkest color */
  103. if (index1[pix] >= 3)
  104. highdiff += pix - c->mc_luma_vals[3];
  105. if (index1[pix] < 1)
  106. lowdiff += c->mc_luma_vals[1] - pix;
  107. row1 <<= 2;
  108. if (INTERLACED) {
  109. row2 <<= 2;
  110. if (interlaced_dither_patterns[dither[pix]][(y & 3) * 2 + 0][x & 3])
  111. row1 |= 3-(index2[pix] & 3);
  112. else
  113. row1 |= 3-(index1[pix] & 3);
  114. if (interlaced_dither_patterns[dither[pix]][(y & 3) * 2 + 1][x & 3])
  115. row2 |= 3-(index2[pix] & 3);
  116. else
  117. row2 |= 3-(index1[pix] & 3);
  118. }
  119. else {
  120. if (multi_dither_patterns[dither[pix]][(y & 3)][x & 3])
  121. row1 |= 3-(index2[pix] & 3);
  122. else
  123. row1 |= 3-(index1[pix] & 3);
  124. }
  125. }
  126. charset[y+0x000] = row1;
  127. if (INTERLACED) charset[y+0x800] = row2;
  128. }
  129. /* do we need to adjust pixels? */
  130. if (highdiff > 0 && lowdiff > 0 && c->mc_use_5col) {
  131. if (lowdiff > highdiff) {
  132. for (x = 0; x < 32; x++)
  133. best_cb[x] = FFMIN(c->mc_luma_vals[3], best_cb[x]);
  134. } else {
  135. for (x = 0; x < 32; x++)
  136. best_cb[x] = FFMAX(c->mc_luma_vals[1], best_cb[x]);
  137. }
  138. charpos--; /* redo now adjusted char */
  139. /* no adjustment needed, all fine */
  140. } else {
  141. /* advance pointers */
  142. best_cb += 32;
  143. charset += 8;
  144. /* remember colorram value */
  145. colrammap[charpos] = (highdiff > 0);
  146. }
  147. }
  148. }
  149. static av_cold int a64multi_close_encoder(AVCodecContext *avctx)
  150. {
  151. A64Context *c = avctx->priv_data;
  152. av_free(c->mc_meta_charset);
  153. av_free(c->mc_best_cb);
  154. av_free(c->mc_charset);
  155. av_free(c->mc_charmap);
  156. av_free(c->mc_colram);
  157. return 0;
  158. }
  159. static av_cold int a64multi_init_encoder(AVCodecContext *avctx)
  160. {
  161. A64Context *c = avctx->priv_data;
  162. int a;
  163. av_lfg_init(&c->randctx, 1);
  164. if (avctx->global_quality < 1) {
  165. c->mc_lifetime = 4;
  166. } else {
  167. c->mc_lifetime = avctx->global_quality /= FF_QP2LAMBDA;
  168. }
  169. av_log(avctx, AV_LOG_INFO, "charset lifetime set to %d frame(s)\n", c->mc_lifetime);
  170. c->mc_frame_counter = 0;
  171. c->mc_use_5col = avctx->codec->id == AV_CODEC_ID_A64_MULTI5;
  172. c->mc_pal_size = 4 + c->mc_use_5col;
  173. /* precalc luma values for later use */
  174. for (a = 0; a < c->mc_pal_size; a++) {
  175. c->mc_luma_vals[a]=a64_palette[mc_colors[a]][0] * 0.30 +
  176. a64_palette[mc_colors[a]][1] * 0.59 +
  177. a64_palette[mc_colors[a]][2] * 0.11;
  178. }
  179. if (!(c->mc_meta_charset = av_malloc(32000 * c->mc_lifetime * sizeof(int))) ||
  180. !(c->mc_best_cb = av_malloc(CHARSET_CHARS * 32 * sizeof(int))) ||
  181. !(c->mc_charmap = av_mallocz(1000 * c->mc_lifetime * sizeof(int))) ||
  182. !(c->mc_colram = av_mallocz(CHARSET_CHARS * sizeof(uint8_t))) ||
  183. !(c->mc_charset = av_malloc(0x800 * (INTERLACED+1) * sizeof(uint8_t)))) {
  184. av_log(avctx, AV_LOG_ERROR, "Failed to allocate buffer memory.\n");
  185. return AVERROR(ENOMEM);
  186. }
  187. /* set up extradata */
  188. if (!(avctx->extradata = av_mallocz(8 * 4 + FF_INPUT_BUFFER_PADDING_SIZE))) {
  189. av_log(avctx, AV_LOG_ERROR, "Failed to allocate memory for extradata.\n");
  190. return AVERROR(ENOMEM);
  191. }
  192. avctx->extradata_size = 8 * 4;
  193. AV_WB32(avctx->extradata, c->mc_lifetime);
  194. AV_WB32(avctx->extradata + 16, INTERLACED);
  195. avcodec_get_frame_defaults(&c->picture);
  196. avctx->coded_frame = &c->picture;
  197. avctx->coded_frame->pict_type = AV_PICTURE_TYPE_I;
  198. avctx->coded_frame->key_frame = 1;
  199. if (!avctx->codec_tag)
  200. avctx->codec_tag = AV_RL32("a64m");
  201. c->next_pts = AV_NOPTS_VALUE;
  202. return 0;
  203. }
  204. static void a64_compress_colram(unsigned char *buf, int *charmap, uint8_t *colram)
  205. {
  206. int a;
  207. uint8_t temp;
  208. /* only needs to be done in 5col mode */
  209. /* XXX could be squeezed to 0x80 bytes */
  210. for (a = 0; a < 256; a++) {
  211. temp = colram[charmap[a + 0x000]] << 0;
  212. temp |= colram[charmap[a + 0x100]] << 1;
  213. temp |= colram[charmap[a + 0x200]] << 2;
  214. if (a < 0xe8) temp |= colram[charmap[a + 0x300]] << 3;
  215. buf[a] = temp << 2;
  216. }
  217. }
  218. static int a64multi_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
  219. const AVFrame *pict, int *got_packet)
  220. {
  221. A64Context *c = avctx->priv_data;
  222. AVFrame *const p = &c->picture;
  223. int frame;
  224. int x, y;
  225. int b_height;
  226. int b_width;
  227. int req_size, ret;
  228. uint8_t *buf = NULL;
  229. int *charmap = c->mc_charmap;
  230. uint8_t *colram = c->mc_colram;
  231. uint8_t *charset = c->mc_charset;
  232. int *meta = c->mc_meta_charset;
  233. int *best_cb = c->mc_best_cb;
  234. int charset_size = 0x800 * (INTERLACED + 1);
  235. int colram_size = 0x100 * c->mc_use_5col;
  236. int screen_size;
  237. if(CROP_SCREENS) {
  238. b_height = FFMIN(avctx->height,C64YRES) >> 3;
  239. b_width = FFMIN(avctx->width ,C64XRES) >> 3;
  240. screen_size = b_width * b_height;
  241. } else {
  242. b_height = C64YRES >> 3;
  243. b_width = C64XRES >> 3;
  244. screen_size = 0x400;
  245. }
  246. /* no data, means end encoding asap */
  247. if (!pict) {
  248. /* all done, end encoding */
  249. if (!c->mc_lifetime) return 0;
  250. /* no more frames in queue, prepare to flush remaining frames */
  251. if (!c->mc_frame_counter) {
  252. c->mc_lifetime = 0;
  253. }
  254. /* still frames in queue so limit lifetime to remaining frames */
  255. else c->mc_lifetime = c->mc_frame_counter;
  256. /* still new data available */
  257. } else {
  258. /* fill up mc_meta_charset with data until lifetime exceeds */
  259. if (c->mc_frame_counter < c->mc_lifetime) {
  260. *p = *pict;
  261. p->pict_type = AV_PICTURE_TYPE_I;
  262. p->key_frame = 1;
  263. to_meta_with_crop(avctx, p, meta + 32000 * c->mc_frame_counter);
  264. c->mc_frame_counter++;
  265. if (c->next_pts == AV_NOPTS_VALUE)
  266. c->next_pts = pict->pts;
  267. /* lifetime is not reached so wait for next frame first */
  268. return 0;
  269. }
  270. }
  271. /* lifetime reached so now convert X frames at once */
  272. if (c->mc_frame_counter == c->mc_lifetime) {
  273. req_size = 0;
  274. /* any frames to encode? */
  275. if (c->mc_lifetime) {
  276. req_size = charset_size + c->mc_lifetime*(screen_size + colram_size);
  277. if ((ret = ff_alloc_packet2(avctx, pkt, req_size)) < 0)
  278. return ret;
  279. buf = pkt->data;
  280. /* calc optimal new charset + charmaps */
  281. ff_init_elbg(meta, 32, 1000 * c->mc_lifetime, best_cb, CHARSET_CHARS, 50, charmap, &c->randctx);
  282. ff_do_elbg (meta, 32, 1000 * c->mc_lifetime, best_cb, CHARSET_CHARS, 50, charmap, &c->randctx);
  283. /* create colorram map and a c64 readable charset */
  284. render_charset(avctx, charset, colram);
  285. /* copy charset to buf */
  286. memcpy(buf, charset, charset_size);
  287. /* advance pointers */
  288. buf += charset_size;
  289. charset += charset_size;
  290. }
  291. /* write x frames to buf */
  292. for (frame = 0; frame < c->mc_lifetime; frame++) {
  293. /* copy charmap to buf. buf is uchar*, charmap is int*, so no memcpy here, sorry */
  294. for (y = 0; y < b_height; y++) {
  295. for (x = 0; x < b_width; x++) {
  296. buf[y * b_width + x] = charmap[y * b_width + x];
  297. }
  298. }
  299. /* advance pointers */
  300. buf += screen_size;
  301. req_size += screen_size;
  302. /* compress and copy colram to buf */
  303. if (c->mc_use_5col) {
  304. a64_compress_colram(buf, charmap, colram);
  305. /* advance pointers */
  306. buf += colram_size;
  307. req_size += colram_size;
  308. }
  309. /* advance to next charmap */
  310. charmap += 1000;
  311. }
  312. AV_WB32(avctx->extradata + 4, c->mc_frame_counter);
  313. AV_WB32(avctx->extradata + 8, charset_size);
  314. AV_WB32(avctx->extradata + 12, screen_size + colram_size);
  315. /* reset counter */
  316. c->mc_frame_counter = 0;
  317. pkt->pts = pkt->dts = c->next_pts;
  318. c->next_pts = AV_NOPTS_VALUE;
  319. pkt->size = req_size;
  320. pkt->flags |= AV_PKT_FLAG_KEY;
  321. *got_packet = !!req_size;
  322. }
  323. return 0;
  324. }
  325. #if CONFIG_A64MULTI_ENCODER
  326. AVCodec ff_a64multi_encoder = {
  327. .name = "a64multi",
  328. .type = AVMEDIA_TYPE_VIDEO,
  329. .id = AV_CODEC_ID_A64_MULTI,
  330. .priv_data_size = sizeof(A64Context),
  331. .init = a64multi_init_encoder,
  332. .encode2 = a64multi_encode_frame,
  333. .close = a64multi_close_encoder,
  334. .pix_fmts = (const enum PixelFormat[]) {PIX_FMT_GRAY8, PIX_FMT_NONE},
  335. .long_name = NULL_IF_CONFIG_SMALL("Multicolor charset for Commodore 64"),
  336. .capabilities = CODEC_CAP_DELAY,
  337. };
  338. #endif
  339. #if CONFIG_A64MULTI5_ENCODER
  340. AVCodec ff_a64multi5_encoder = {
  341. .name = "a64multi5",
  342. .type = AVMEDIA_TYPE_VIDEO,
  343. .id = AV_CODEC_ID_A64_MULTI5,
  344. .priv_data_size = sizeof(A64Context),
  345. .init = a64multi_init_encoder,
  346. .encode2 = a64multi_encode_frame,
  347. .close = a64multi_close_encoder,
  348. .pix_fmts = (const enum PixelFormat[]) {PIX_FMT_GRAY8, PIX_FMT_NONE},
  349. .long_name = NULL_IF_CONFIG_SMALL("Multicolor charset for Commodore 64, extended with 5th color (colram)"),
  350. .capabilities = CODEC_CAP_DELAY,
  351. };
  352. #endif