You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

410 lines
14KB

  1. /*
  2. * a64 video encoder - multicolor modes
  3. * Copyright (c) 2009 Tobias Bindhammer
  4. *
  5. * This file is part of Libav.
  6. *
  7. * Libav is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * Libav is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with Libav; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. /**
  22. * @file
  23. * a64 video encoder - multicolor modes
  24. */
  25. #include "a64enc.h"
  26. #include "a64colors.h"
  27. #include "a64tables.h"
  28. #include "elbg.h"
  29. #include "internal.h"
  30. #include "libavutil/common.h"
  31. #include "libavutil/intreadwrite.h"
  32. #define DITHERSTEPS 8
  33. #define CHARSET_CHARS 256
  34. #define INTERLACED 1
  35. #define CROP_SCREENS 1
  36. /* gray gradient */
  37. static const int mc_colors[5]={0x0,0xb,0xc,0xf,0x1};
  38. /* other possible gradients - to be tested */
  39. //static const int mc_colors[5]={0x0,0x8,0xa,0xf,0x7};
  40. //static const int mc_colors[5]={0x0,0x9,0x8,0xa,0x3};
  41. static void to_meta_with_crop(AVCodecContext *avctx,
  42. const AVFrame *p, int *dest)
  43. {
  44. int blockx, blocky, x, y;
  45. int luma = 0;
  46. int height = FFMIN(avctx->height, C64YRES);
  47. int width = FFMIN(avctx->width , C64XRES);
  48. uint8_t *src = p->data[0];
  49. for (blocky = 0; blocky < C64YRES; blocky += 8) {
  50. for (blockx = 0; blockx < C64XRES; blockx += 8) {
  51. for (y = blocky; y < blocky + 8 && y < C64YRES; y++) {
  52. for (x = blockx; x < blockx + 8 && x < C64XRES; x += 2) {
  53. if(x < width && y < height) {
  54. /* build average over 2 pixels */
  55. luma = (src[(x + 0 + y * p->linesize[0])] +
  56. src[(x + 1 + y * p->linesize[0])]) / 2;
  57. /* write blocks as linear data now so they are suitable for elbg */
  58. dest[0] = luma;
  59. }
  60. dest++;
  61. }
  62. }
  63. }
  64. }
  65. }
  66. static void render_charset(AVCodecContext *avctx, uint8_t *charset,
  67. uint8_t *colrammap)
  68. {
  69. A64Context *c = avctx->priv_data;
  70. uint8_t row1, row2;
  71. int charpos, x, y;
  72. int a, b;
  73. uint8_t pix;
  74. int lowdiff, highdiff;
  75. int *best_cb = c->mc_best_cb;
  76. static uint8_t index1[256];
  77. static uint8_t index2[256];
  78. static uint8_t dither[256];
  79. int i;
  80. int distance;
  81. /* generate lookup-tables for dither and index before looping */
  82. i = 0;
  83. for (a=0; a < 256; a++) {
  84. if(i < c->mc_pal_size -1 && a == c->mc_luma_vals[i + 1]) {
  85. distance = c->mc_luma_vals[i + 1] - c->mc_luma_vals[i];
  86. for(b = 0; b <= distance; b++) {
  87. dither[c->mc_luma_vals[i] + b] = b * (DITHERSTEPS - 1) / distance;
  88. }
  89. i++;
  90. }
  91. if(i >= c->mc_pal_size - 1) dither[a] = 0;
  92. index1[a] = i;
  93. index2[a] = FFMIN(i + 1, c->mc_pal_size - 1);
  94. }
  95. /* and render charset */
  96. for (charpos = 0; charpos < CHARSET_CHARS; charpos++) {
  97. lowdiff = 0;
  98. highdiff = 0;
  99. for (y = 0; y < 8; y++) {
  100. row1 = 0; row2 = 0;
  101. for (x = 0; x < 4; x++) {
  102. pix = best_cb[y * 4 + x];
  103. /* accumulate error for brightest/darkest color */
  104. if (index1[pix] >= 3)
  105. highdiff += pix - c->mc_luma_vals[3];
  106. if (index1[pix] < 1)
  107. lowdiff += c->mc_luma_vals[1] - pix;
  108. row1 <<= 2;
  109. if (INTERLACED) {
  110. row2 <<= 2;
  111. if (interlaced_dither_patterns[dither[pix]][(y & 3) * 2 + 0][x & 3])
  112. row1 |= 3-(index2[pix] & 3);
  113. else
  114. row1 |= 3-(index1[pix] & 3);
  115. if (interlaced_dither_patterns[dither[pix]][(y & 3) * 2 + 1][x & 3])
  116. row2 |= 3-(index2[pix] & 3);
  117. else
  118. row2 |= 3-(index1[pix] & 3);
  119. }
  120. else {
  121. if (multi_dither_patterns[dither[pix]][(y & 3)][x & 3])
  122. row1 |= 3-(index2[pix] & 3);
  123. else
  124. row1 |= 3-(index1[pix] & 3);
  125. }
  126. }
  127. charset[y+0x000] = row1;
  128. if (INTERLACED) charset[y+0x800] = row2;
  129. }
  130. /* do we need to adjust pixels? */
  131. if (highdiff > 0 && lowdiff > 0 && c->mc_use_5col) {
  132. if (lowdiff > highdiff) {
  133. for (x = 0; x < 32; x++)
  134. best_cb[x] = FFMIN(c->mc_luma_vals[3], best_cb[x]);
  135. } else {
  136. for (x = 0; x < 32; x++)
  137. best_cb[x] = FFMAX(c->mc_luma_vals[1], best_cb[x]);
  138. }
  139. charpos--; /* redo now adjusted char */
  140. /* no adjustment needed, all fine */
  141. } else {
  142. /* advance pointers */
  143. best_cb += 32;
  144. charset += 8;
  145. /* remember colorram value */
  146. colrammap[charpos] = (highdiff > 0);
  147. }
  148. }
  149. }
  150. static av_cold int a64multi_close_encoder(AVCodecContext *avctx)
  151. {
  152. A64Context *c = avctx->priv_data;
  153. av_free(c->mc_meta_charset);
  154. av_free(c->mc_best_cb);
  155. av_free(c->mc_charset);
  156. av_free(c->mc_charmap);
  157. av_free(c->mc_colram);
  158. return 0;
  159. }
  160. static av_cold int a64multi_encode_init(AVCodecContext *avctx)
  161. {
  162. A64Context *c = avctx->priv_data;
  163. int a;
  164. av_lfg_init(&c->randctx, 1);
  165. if (avctx->global_quality < 1) {
  166. c->mc_lifetime = 4;
  167. } else {
  168. c->mc_lifetime = avctx->global_quality /= FF_QP2LAMBDA;
  169. }
  170. av_log(avctx, AV_LOG_INFO, "charset lifetime set to %d frame(s)\n", c->mc_lifetime);
  171. c->mc_frame_counter = 0;
  172. c->mc_use_5col = avctx->codec->id == AV_CODEC_ID_A64_MULTI5;
  173. c->mc_pal_size = 4 + c->mc_use_5col;
  174. /* precalc luma values for later use */
  175. for (a = 0; a < c->mc_pal_size; a++) {
  176. c->mc_luma_vals[a]=a64_palette[mc_colors[a]][0] * 0.30 +
  177. a64_palette[mc_colors[a]][1] * 0.59 +
  178. a64_palette[mc_colors[a]][2] * 0.11;
  179. }
  180. if (!(c->mc_meta_charset = av_malloc(32000 * c->mc_lifetime * sizeof(int))) ||
  181. !(c->mc_best_cb = av_malloc(CHARSET_CHARS * 32 * sizeof(int))) ||
  182. !(c->mc_charmap = av_mallocz(1000 * c->mc_lifetime * sizeof(int))) ||
  183. !(c->mc_colram = av_mallocz(CHARSET_CHARS * sizeof(uint8_t))) ||
  184. !(c->mc_charset = av_malloc(0x800 * (INTERLACED+1) * sizeof(uint8_t)))) {
  185. av_log(avctx, AV_LOG_ERROR, "Failed to allocate buffer memory.\n");
  186. return AVERROR(ENOMEM);
  187. }
  188. /* set up extradata */
  189. if (!(avctx->extradata = av_mallocz(8 * 4 + AV_INPUT_BUFFER_PADDING_SIZE))) {
  190. av_log(avctx, AV_LOG_ERROR, "Failed to allocate memory for extradata.\n");
  191. return AVERROR(ENOMEM);
  192. }
  193. avctx->extradata_size = 8 * 4;
  194. AV_WB32(avctx->extradata, c->mc_lifetime);
  195. AV_WB32(avctx->extradata + 16, INTERLACED);
  196. #if FF_API_CODED_FRAME
  197. FF_DISABLE_DEPRECATION_WARNINGS
  198. avctx->coded_frame->pict_type = AV_PICTURE_TYPE_I;
  199. avctx->coded_frame->key_frame = 1;
  200. FF_ENABLE_DEPRECATION_WARNINGS
  201. #endif
  202. if (!avctx->codec_tag)
  203. avctx->codec_tag = AV_RL32("a64m");
  204. c->next_pts = AV_NOPTS_VALUE;
  205. return 0;
  206. }
  207. static void a64_compress_colram(unsigned char *buf, int *charmap, uint8_t *colram)
  208. {
  209. int a;
  210. uint8_t temp;
  211. /* only needs to be done in 5col mode */
  212. /* XXX could be squeezed to 0x80 bytes */
  213. for (a = 0; a < 256; a++) {
  214. temp = colram[charmap[a + 0x000]] << 0;
  215. temp |= colram[charmap[a + 0x100]] << 1;
  216. temp |= colram[charmap[a + 0x200]] << 2;
  217. if (a < 0xe8) temp |= colram[charmap[a + 0x300]] << 3;
  218. buf[a] = temp << 2;
  219. }
  220. }
  221. static int a64multi_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
  222. const AVFrame *pict, int *got_packet)
  223. {
  224. A64Context *c = avctx->priv_data;
  225. int frame;
  226. int x, y;
  227. int b_height;
  228. int b_width;
  229. int req_size, ret;
  230. uint8_t *buf;
  231. int *charmap = c->mc_charmap;
  232. uint8_t *colram = c->mc_colram;
  233. uint8_t *charset = c->mc_charset;
  234. int *meta = c->mc_meta_charset;
  235. int *best_cb = c->mc_best_cb;
  236. int charset_size = 0x800 * (INTERLACED + 1);
  237. int colram_size = 0x100 * c->mc_use_5col;
  238. int screen_size;
  239. if(CROP_SCREENS) {
  240. b_height = FFMIN(avctx->height,C64YRES) >> 3;
  241. b_width = FFMIN(avctx->width ,C64XRES) >> 3;
  242. screen_size = b_width * b_height;
  243. } else {
  244. b_height = C64YRES >> 3;
  245. b_width = C64XRES >> 3;
  246. screen_size = 0x400;
  247. }
  248. /* no data, means end encoding asap */
  249. if (!pict) {
  250. /* all done, end encoding */
  251. if (!c->mc_lifetime) return 0;
  252. /* no more frames in queue, prepare to flush remaining frames */
  253. if (!c->mc_frame_counter) {
  254. c->mc_lifetime = 0;
  255. }
  256. /* still frames in queue so limit lifetime to remaining frames */
  257. else c->mc_lifetime = c->mc_frame_counter;
  258. /* still new data available */
  259. } else {
  260. /* fill up mc_meta_charset with data until lifetime exceeds */
  261. if (c->mc_frame_counter < c->mc_lifetime) {
  262. #if FF_API_CODED_FRAME
  263. FF_DISABLE_DEPRECATION_WARNINGS
  264. avctx->coded_frame->pict_type = AV_PICTURE_TYPE_I;
  265. avctx->coded_frame->key_frame = 1;
  266. FF_ENABLE_DEPRECATION_WARNINGS
  267. #endif
  268. to_meta_with_crop(avctx, pict, meta + 32000 * c->mc_frame_counter);
  269. c->mc_frame_counter++;
  270. if (c->next_pts == AV_NOPTS_VALUE)
  271. c->next_pts = pict->pts;
  272. /* lifetime is not reached so wait for next frame first */
  273. return 0;
  274. }
  275. }
  276. /* lifetime reached so now convert X frames at once */
  277. if (c->mc_frame_counter == c->mc_lifetime) {
  278. req_size = 0;
  279. /* any frames to encode? */
  280. if (c->mc_lifetime) {
  281. req_size = charset_size + c->mc_lifetime*(screen_size + colram_size);
  282. if ((ret = ff_alloc_packet(pkt, req_size)) < 0) {
  283. av_log(avctx, AV_LOG_ERROR, "Error getting output packet of size %d.\n", req_size);
  284. return ret;
  285. }
  286. buf = pkt->data;
  287. /* calc optimal new charset + charmaps */
  288. ret = ff_init_elbg(meta, 32, 1000 * c->mc_lifetime, best_cb,
  289. CHARSET_CHARS, 50, charmap, &c->randctx);
  290. if (ret < 0)
  291. return ret;
  292. ret = ff_do_elbg(meta, 32, 1000 * c->mc_lifetime, best_cb,
  293. CHARSET_CHARS, 50, charmap, &c->randctx);
  294. if (ret < 0)
  295. return ret;
  296. /* create colorram map and a c64 readable charset */
  297. render_charset(avctx, charset, colram);
  298. /* copy charset to buf */
  299. memcpy(buf, charset, charset_size);
  300. /* advance pointers */
  301. buf += charset_size;
  302. charset += charset_size;
  303. }
  304. /* write x frames to buf */
  305. for (frame = 0; frame < c->mc_lifetime; frame++) {
  306. /* copy charmap to buf. buf is uchar*, charmap is int*, so no memcpy here, sorry */
  307. for (y = 0; y < b_height; y++) {
  308. for (x = 0; x < b_width; x++) {
  309. buf[y * b_width + x] = charmap[y * b_width + x];
  310. }
  311. }
  312. /* advance pointers */
  313. buf += screen_size;
  314. req_size += screen_size;
  315. /* compress and copy colram to buf */
  316. if (c->mc_use_5col) {
  317. a64_compress_colram(buf, charmap, colram);
  318. /* advance pointers */
  319. buf += colram_size;
  320. req_size += colram_size;
  321. }
  322. /* advance to next charmap */
  323. charmap += 1000;
  324. }
  325. AV_WB32(avctx->extradata + 4, c->mc_frame_counter);
  326. AV_WB32(avctx->extradata + 8, charset_size);
  327. AV_WB32(avctx->extradata + 12, screen_size + colram_size);
  328. /* reset counter */
  329. c->mc_frame_counter = 0;
  330. pkt->pts = pkt->dts = c->next_pts;
  331. c->next_pts = AV_NOPTS_VALUE;
  332. pkt->size = req_size;
  333. pkt->flags |= AV_PKT_FLAG_KEY;
  334. *got_packet = !!req_size;
  335. }
  336. return 0;
  337. }
  338. AVCodec ff_a64multi_encoder = {
  339. .name = "a64multi",
  340. .long_name = NULL_IF_CONFIG_SMALL("Multicolor charset for Commodore 64"),
  341. .type = AVMEDIA_TYPE_VIDEO,
  342. .id = AV_CODEC_ID_A64_MULTI,
  343. .priv_data_size = sizeof(A64Context),
  344. .init = a64multi_encode_init,
  345. .encode2 = a64multi_encode_frame,
  346. .close = a64multi_close_encoder,
  347. .pix_fmts = (const enum AVPixelFormat[]) {AV_PIX_FMT_GRAY8, AV_PIX_FMT_NONE},
  348. .capabilities = AV_CODEC_CAP_DELAY,
  349. };
  350. AVCodec ff_a64multi5_encoder = {
  351. .name = "a64multi5",
  352. .long_name = NULL_IF_CONFIG_SMALL("Multicolor charset for Commodore 64, extended with 5th color (colram)"),
  353. .type = AVMEDIA_TYPE_VIDEO,
  354. .id = AV_CODEC_ID_A64_MULTI5,
  355. .priv_data_size = sizeof(A64Context),
  356. .init = a64multi_encode_init,
  357. .encode2 = a64multi_encode_frame,
  358. .close = a64multi_close_encoder,
  359. .pix_fmts = (const enum AVPixelFormat[]) {AV_PIX_FMT_GRAY8, AV_PIX_FMT_NONE},
  360. .capabilities = AV_CODEC_CAP_DELAY,
  361. };