You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1432 lines
52KB

  1. /*
  2. * FFV1 encoder
  3. *
  4. * Copyright (c) 2003-2013 Michael Niedermayer <michaelni@gmx.at>
  5. *
  6. * This file is part of FFmpeg.
  7. *
  8. * FFmpeg is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU Lesser General Public
  10. * License as published by the Free Software Foundation; either
  11. * version 2.1 of the License, or (at your option) any later version.
  12. *
  13. * FFmpeg is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * Lesser General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU Lesser General Public
  19. * License along with FFmpeg; if not, write to the Free Software
  20. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  21. */
  22. /**
  23. * @file
  24. * FF Video Codec 1 (a lossless codec) encoder
  25. */
  26. #include "libavutil/attributes.h"
  27. #include "libavutil/avassert.h"
  28. #include "libavutil/crc.h"
  29. #include "libavutil/opt.h"
  30. #include "libavutil/imgutils.h"
  31. #include "libavutil/pixdesc.h"
  32. #include "libavutil/timer.h"
  33. #include "avcodec.h"
  34. #include "internal.h"
  35. #include "put_bits.h"
  36. #include "rangecoder.h"
  37. #include "golomb.h"
  38. #include "mathops.h"
  39. #include "ffv1.h"
  40. static const int8_t quant5_10bit[256] = {
  41. 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1,
  42. 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
  43. 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
  44. 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
  45. 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
  46. 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
  47. 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
  48. 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
  49. -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2,
  50. -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2,
  51. -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2,
  52. -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2,
  53. -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -1,
  54. -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
  55. -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
  56. -1, -1, -1, -1, -1, -1, -0, -0, -0, -0, -0, -0, -0, -0, -0, -0,
  57. };
  58. static const int8_t quant5[256] = {
  59. 0, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
  60. 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
  61. 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
  62. 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
  63. 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
  64. 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
  65. 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
  66. 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
  67. -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2,
  68. -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2,
  69. -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2,
  70. -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2,
  71. -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2,
  72. -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2,
  73. -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2,
  74. -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -1, -1, -1,
  75. };
  76. static const int8_t quant9_10bit[256] = {
  77. 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2,
  78. 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3,
  79. 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
  80. 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4,
  81. 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
  82. 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
  83. 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
  84. 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
  85. -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4,
  86. -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4,
  87. -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4,
  88. -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4,
  89. -4, -4, -4, -4, -4, -4, -4, -4, -4, -3, -3, -3, -3, -3, -3, -3,
  90. -3, -3, -3, -3, -3, -3, -3, -3, -3, -3, -3, -3, -3, -3, -3, -3,
  91. -3, -3, -3, -3, -3, -3, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2,
  92. -2, -2, -2, -2, -1, -1, -1, -1, -1, -1, -1, -1, -0, -0, -0, -0,
  93. };
  94. static const int8_t quant11[256] = {
  95. 0, 1, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4,
  96. 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
  97. 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
  98. 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
  99. 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
  100. 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
  101. 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
  102. 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
  103. -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5,
  104. -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5,
  105. -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5,
  106. -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5,
  107. -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5,
  108. -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -4, -4,
  109. -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4,
  110. -4, -4, -4, -4, -4, -3, -3, -3, -3, -3, -3, -3, -2, -2, -2, -1,
  111. };
  112. static const uint8_t ver2_state[256] = {
  113. 0, 10, 10, 10, 10, 16, 16, 16, 28, 16, 16, 29, 42, 49, 20, 49,
  114. 59, 25, 26, 26, 27, 31, 33, 33, 33, 34, 34, 37, 67, 38, 39, 39,
  115. 40, 40, 41, 79, 43, 44, 45, 45, 48, 48, 64, 50, 51, 52, 88, 52,
  116. 53, 74, 55, 57, 58, 58, 74, 60, 101, 61, 62, 84, 66, 66, 68, 69,
  117. 87, 82, 71, 97, 73, 73, 82, 75, 111, 77, 94, 78, 87, 81, 83, 97,
  118. 85, 83, 94, 86, 99, 89, 90, 99, 111, 92, 93, 134, 95, 98, 105, 98,
  119. 105, 110, 102, 108, 102, 118, 103, 106, 106, 113, 109, 112, 114, 112, 116, 125,
  120. 115, 116, 117, 117, 126, 119, 125, 121, 121, 123, 145, 124, 126, 131, 127, 129,
  121. 165, 130, 132, 138, 133, 135, 145, 136, 137, 139, 146, 141, 143, 142, 144, 148,
  122. 147, 155, 151, 149, 151, 150, 152, 157, 153, 154, 156, 168, 158, 162, 161, 160,
  123. 172, 163, 169, 164, 166, 184, 167, 170, 177, 174, 171, 173, 182, 176, 180, 178,
  124. 175, 189, 179, 181, 186, 183, 192, 185, 200, 187, 191, 188, 190, 197, 193, 196,
  125. 197, 194, 195, 196, 198, 202, 199, 201, 210, 203, 207, 204, 205, 206, 208, 214,
  126. 209, 211, 221, 212, 213, 215, 224, 216, 217, 218, 219, 220, 222, 228, 223, 225,
  127. 226, 224, 227, 229, 240, 230, 231, 232, 233, 234, 235, 236, 238, 239, 237, 242,
  128. 241, 243, 242, 244, 245, 246, 247, 248, 249, 250, 251, 252, 252, 253, 254, 255,
  129. };
  130. static void find_best_state(uint8_t best_state[256][256],
  131. const uint8_t one_state[256])
  132. {
  133. int i, j, k, m;
  134. double l2tab[256];
  135. for (i = 1; i < 256; i++)
  136. l2tab[i] = log2(i / 256.0);
  137. for (i = 0; i < 256; i++) {
  138. double best_len[256];
  139. double p = i / 256.0;
  140. for (j = 0; j < 256; j++)
  141. best_len[j] = 1 << 30;
  142. for (j = FFMAX(i - 10, 1); j < FFMIN(i + 11, 256); j++) {
  143. double occ[256] = { 0 };
  144. double len = 0;
  145. occ[j] = 1.0;
  146. if (!one_state[j])
  147. continue;
  148. for (k = 0; k < 256; k++) {
  149. double newocc[256] = { 0 };
  150. for (m = 1; m < 256; m++)
  151. if (occ[m]) {
  152. len -=occ[m]*( p *l2tab[ m]
  153. + (1-p)*l2tab[256-m]);
  154. }
  155. if (len < best_len[k]) {
  156. best_len[k] = len;
  157. best_state[i][k] = j;
  158. }
  159. for (m = 1; m < 256; m++)
  160. if (occ[m]) {
  161. newocc[ one_state[ m]] += occ[m] * p;
  162. newocc[256 - one_state[256 - m]] += occ[m] * (1 - p);
  163. }
  164. memcpy(occ, newocc, sizeof(occ));
  165. }
  166. }
  167. }
  168. }
  169. static av_always_inline av_flatten void put_symbol_inline(RangeCoder *c,
  170. uint8_t *state, int v,
  171. int is_signed,
  172. uint64_t rc_stat[256][2],
  173. uint64_t rc_stat2[32][2])
  174. {
  175. int i;
  176. #define put_rac(C, S, B) \
  177. do { \
  178. if (rc_stat) { \
  179. rc_stat[*(S)][B]++; \
  180. rc_stat2[(S) - state][B]++; \
  181. } \
  182. put_rac(C, S, B); \
  183. } while (0)
  184. if (v) {
  185. const int a = FFABS(v);
  186. const int e = av_log2(a);
  187. put_rac(c, state + 0, 0);
  188. if (e <= 9) {
  189. for (i = 0; i < e; i++)
  190. put_rac(c, state + 1 + i, 1); // 1..10
  191. put_rac(c, state + 1 + i, 0);
  192. for (i = e - 1; i >= 0; i--)
  193. put_rac(c, state + 22 + i, (a >> i) & 1); // 22..31
  194. if (is_signed)
  195. put_rac(c, state + 11 + e, v < 0); // 11..21
  196. } else {
  197. for (i = 0; i < e; i++)
  198. put_rac(c, state + 1 + FFMIN(i, 9), 1); // 1..10
  199. put_rac(c, state + 1 + 9, 0);
  200. for (i = e - 1; i >= 0; i--)
  201. put_rac(c, state + 22 + FFMIN(i, 9), (a >> i) & 1); // 22..31
  202. if (is_signed)
  203. put_rac(c, state + 11 + 10, v < 0); // 11..21
  204. }
  205. } else {
  206. put_rac(c, state + 0, 1);
  207. }
  208. #undef put_rac
  209. }
  210. static av_noinline void put_symbol(RangeCoder *c, uint8_t *state,
  211. int v, int is_signed)
  212. {
  213. put_symbol_inline(c, state, v, is_signed, NULL, NULL);
  214. }
  215. static inline void put_vlc_symbol(PutBitContext *pb, VlcState *const state,
  216. int v, int bits)
  217. {
  218. int i, k, code;
  219. v = fold(v - state->bias, bits);
  220. i = state->count;
  221. k = 0;
  222. while (i < state->error_sum) { // FIXME: optimize
  223. k++;
  224. i += i;
  225. }
  226. av_assert2(k <= 13);
  227. #if 0 // JPEG LS
  228. if (k == 0 && 2 * state->drift <= -state->count)
  229. code = v ^ (-1);
  230. else
  231. code = v;
  232. #else
  233. code = v ^ ((2 * state->drift + state->count) >> 31);
  234. #endif
  235. ff_dlog(NULL, "v:%d/%d bias:%d error:%d drift:%d count:%d k:%d\n", v, code,
  236. state->bias, state->error_sum, state->drift, state->count, k);
  237. set_sr_golomb(pb, code, k, 12, bits);
  238. update_vlc_state(state, v);
  239. }
  240. static av_always_inline int encode_line(FFV1Context *s, int w,
  241. int16_t *sample[3],
  242. int plane_index, int bits)
  243. {
  244. PlaneContext *const p = &s->plane[plane_index];
  245. RangeCoder *const c = &s->c;
  246. int x;
  247. int run_index = s->run_index;
  248. int run_count = 0;
  249. int run_mode = 0;
  250. if (s->ac != AC_GOLOMB_RICE) {
  251. if (c->bytestream_end - c->bytestream < w * 35) {
  252. av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
  253. return AVERROR_INVALIDDATA;
  254. }
  255. } else {
  256. if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb) >> 3) < w * 4) {
  257. av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
  258. return AVERROR_INVALIDDATA;
  259. }
  260. }
  261. if (s->slice_coding_mode == 1) {
  262. for (x = 0; x < w; x++) {
  263. int i;
  264. int v = sample[0][x];
  265. for (i = bits-1; i>=0; i--) {
  266. uint8_t state = 128;
  267. put_rac(c, &state, (v>>i) & 1);
  268. }
  269. }
  270. return 0;
  271. }
  272. for (x = 0; x < w; x++) {
  273. int diff, context;
  274. context = get_context(p, sample[0] + x, sample[1] + x, sample[2] + x);
  275. diff = sample[0][x] - predict(sample[0] + x, sample[1] + x);
  276. if (context < 0) {
  277. context = -context;
  278. diff = -diff;
  279. }
  280. diff = fold(diff, bits);
  281. if (s->ac != AC_GOLOMB_RICE) {
  282. if (s->flags & AV_CODEC_FLAG_PASS1) {
  283. put_symbol_inline(c, p->state[context], diff, 1, s->rc_stat,
  284. s->rc_stat2[p->quant_table_index][context]);
  285. } else {
  286. put_symbol_inline(c, p->state[context], diff, 1, NULL, NULL);
  287. }
  288. } else {
  289. if (context == 0)
  290. run_mode = 1;
  291. if (run_mode) {
  292. if (diff) {
  293. while (run_count >= 1 << ff_log2_run[run_index]) {
  294. run_count -= 1 << ff_log2_run[run_index];
  295. run_index++;
  296. put_bits(&s->pb, 1, 1);
  297. }
  298. put_bits(&s->pb, 1 + ff_log2_run[run_index], run_count);
  299. if (run_index)
  300. run_index--;
  301. run_count = 0;
  302. run_mode = 0;
  303. if (diff > 0)
  304. diff--;
  305. } else {
  306. run_count++;
  307. }
  308. }
  309. ff_dlog(s->avctx, "count:%d index:%d, mode:%d, x:%d pos:%d\n",
  310. run_count, run_index, run_mode, x,
  311. (int)put_bits_count(&s->pb));
  312. if (run_mode == 0)
  313. put_vlc_symbol(&s->pb, &p->vlc_state[context], diff, bits);
  314. }
  315. }
  316. if (run_mode) {
  317. while (run_count >= 1 << ff_log2_run[run_index]) {
  318. run_count -= 1 << ff_log2_run[run_index];
  319. run_index++;
  320. put_bits(&s->pb, 1, 1);
  321. }
  322. if (run_count)
  323. put_bits(&s->pb, 1, 1);
  324. }
  325. s->run_index = run_index;
  326. return 0;
  327. }
  328. static int encode_plane(FFV1Context *s, uint8_t *src, int w, int h,
  329. int stride, int plane_index, int pixel_stride)
  330. {
  331. int x, y, i, ret;
  332. const int ring_size = s->avctx->context_model ? 3 : 2;
  333. int16_t *sample[3];
  334. s->run_index = 0;
  335. memset(s->sample_buffer, 0, ring_size * (w + 6) * sizeof(*s->sample_buffer));
  336. for (y = 0; y < h; y++) {
  337. for (i = 0; i < ring_size; i++)
  338. sample[i] = s->sample_buffer + (w + 6) * ((h + i - y) % ring_size) + 3;
  339. sample[0][-1]= sample[1][0 ];
  340. sample[1][ w]= sample[1][w-1];
  341. // { START_TIMER
  342. if (s->bits_per_raw_sample <= 8) {
  343. for (x = 0; x < w; x++)
  344. sample[0][x] = src[x * pixel_stride + stride * y];
  345. if((ret = encode_line(s, w, sample, plane_index, 8)) < 0)
  346. return ret;
  347. } else {
  348. if (s->packed_at_lsb) {
  349. for (x = 0; x < w; x++) {
  350. sample[0][x] = ((uint16_t*)(src + stride*y))[x];
  351. }
  352. } else {
  353. for (x = 0; x < w; x++) {
  354. sample[0][x] = ((uint16_t*)(src + stride*y))[x] >> (16 - s->bits_per_raw_sample);
  355. }
  356. }
  357. if((ret = encode_line(s, w, sample, plane_index, s->bits_per_raw_sample)) < 0)
  358. return ret;
  359. }
  360. // STOP_TIMER("encode line") }
  361. }
  362. return 0;
  363. }
  364. static int encode_rgb_frame(FFV1Context *s, const uint8_t *src[3],
  365. int w, int h, const int stride[3])
  366. {
  367. int x, y, p, i;
  368. const int ring_size = s->avctx->context_model ? 3 : 2;
  369. int16_t *sample[4][3];
  370. int lbd = s->bits_per_raw_sample <= 8;
  371. int bits = s->bits_per_raw_sample > 0 ? s->bits_per_raw_sample : 8;
  372. int offset = 1 << bits;
  373. s->run_index = 0;
  374. memset(s->sample_buffer, 0, ring_size * MAX_PLANES *
  375. (w + 6) * sizeof(*s->sample_buffer));
  376. for (y = 0; y < h; y++) {
  377. for (i = 0; i < ring_size; i++)
  378. for (p = 0; p < MAX_PLANES; p++)
  379. sample[p][i]= s->sample_buffer + p*ring_size*(w+6) + ((h+i-y)%ring_size)*(w+6) + 3;
  380. for (x = 0; x < w; x++) {
  381. int b, g, r, av_uninit(a);
  382. if (lbd) {
  383. unsigned v = *((const uint32_t*)(src[0] + x*4 + stride[0]*y));
  384. b = v & 0xFF;
  385. g = (v >> 8) & 0xFF;
  386. r = (v >> 16) & 0xFF;
  387. a = v >> 24;
  388. } else {
  389. b = *((const uint16_t *)(src[0] + x*2 + stride[0]*y));
  390. g = *((const uint16_t *)(src[1] + x*2 + stride[1]*y));
  391. r = *((const uint16_t *)(src[2] + x*2 + stride[2]*y));
  392. }
  393. if (s->slice_coding_mode != 1) {
  394. b -= g;
  395. r -= g;
  396. g += (b * s->slice_rct_by_coef + r * s->slice_rct_ry_coef) >> 2;
  397. b += offset;
  398. r += offset;
  399. }
  400. sample[0][0][x] = g;
  401. sample[1][0][x] = b;
  402. sample[2][0][x] = r;
  403. sample[3][0][x] = a;
  404. }
  405. for (p = 0; p < 3 + s->transparency; p++) {
  406. int ret;
  407. sample[p][0][-1] = sample[p][1][0 ];
  408. sample[p][1][ w] = sample[p][1][w-1];
  409. if (lbd && s->slice_coding_mode == 0)
  410. ret = encode_line(s, w, sample[p], (p + 1) / 2, 9);
  411. else
  412. ret = encode_line(s, w, sample[p], (p + 1) / 2, bits + (s->slice_coding_mode != 1));
  413. if (ret < 0)
  414. return ret;
  415. }
  416. }
  417. return 0;
  418. }
  419. static void write_quant_table(RangeCoder *c, int16_t *quant_table)
  420. {
  421. int last = 0;
  422. int i;
  423. uint8_t state[CONTEXT_SIZE];
  424. memset(state, 128, sizeof(state));
  425. for (i = 1; i < 128; i++)
  426. if (quant_table[i] != quant_table[i - 1]) {
  427. put_symbol(c, state, i - last - 1, 0);
  428. last = i;
  429. }
  430. put_symbol(c, state, i - last - 1, 0);
  431. }
  432. static void write_quant_tables(RangeCoder *c,
  433. int16_t quant_table[MAX_CONTEXT_INPUTS][256])
  434. {
  435. int i;
  436. for (i = 0; i < 5; i++)
  437. write_quant_table(c, quant_table[i]);
  438. }
  439. static void write_header(FFV1Context *f)
  440. {
  441. uint8_t state[CONTEXT_SIZE];
  442. int i, j;
  443. RangeCoder *const c = &f->slice_context[0]->c;
  444. memset(state, 128, sizeof(state));
  445. if (f->version < 2) {
  446. put_symbol(c, state, f->version, 0);
  447. put_symbol(c, state, f->ac, 0);
  448. if (f->ac == AC_RANGE_CUSTOM_TAB) {
  449. for (i = 1; i < 256; i++)
  450. put_symbol(c, state,
  451. f->state_transition[i] - c->one_state[i], 1);
  452. }
  453. put_symbol(c, state, f->colorspace, 0); //YUV cs type
  454. if (f->version > 0)
  455. put_symbol(c, state, f->bits_per_raw_sample, 0);
  456. put_rac(c, state, f->chroma_planes);
  457. put_symbol(c, state, f->chroma_h_shift, 0);
  458. put_symbol(c, state, f->chroma_v_shift, 0);
  459. put_rac(c, state, f->transparency);
  460. write_quant_tables(c, f->quant_table);
  461. } else if (f->version < 3) {
  462. put_symbol(c, state, f->slice_count, 0);
  463. for (i = 0; i < f->slice_count; i++) {
  464. FFV1Context *fs = f->slice_context[i];
  465. put_symbol(c, state,
  466. (fs->slice_x + 1) * f->num_h_slices / f->width, 0);
  467. put_symbol(c, state,
  468. (fs->slice_y + 1) * f->num_v_slices / f->height, 0);
  469. put_symbol(c, state,
  470. (fs->slice_width + 1) * f->num_h_slices / f->width - 1,
  471. 0);
  472. put_symbol(c, state,
  473. (fs->slice_height + 1) * f->num_v_slices / f->height - 1,
  474. 0);
  475. for (j = 0; j < f->plane_count; j++) {
  476. put_symbol(c, state, f->plane[j].quant_table_index, 0);
  477. av_assert0(f->plane[j].quant_table_index == f->avctx->context_model);
  478. }
  479. }
  480. }
  481. }
  482. static int write_extradata(FFV1Context *f)
  483. {
  484. RangeCoder *const c = &f->c;
  485. uint8_t state[CONTEXT_SIZE];
  486. int i, j, k;
  487. uint8_t state2[32][CONTEXT_SIZE];
  488. unsigned v;
  489. memset(state2, 128, sizeof(state2));
  490. memset(state, 128, sizeof(state));
  491. f->avctx->extradata_size = 10000 + 4 +
  492. (11 * 11 * 5 * 5 * 5 + 11 * 11 * 11) * 32;
  493. f->avctx->extradata = av_malloc(f->avctx->extradata_size + AV_INPUT_BUFFER_PADDING_SIZE);
  494. if (!f->avctx->extradata)
  495. return AVERROR(ENOMEM);
  496. ff_init_range_encoder(c, f->avctx->extradata, f->avctx->extradata_size);
  497. ff_build_rac_states(c, 0.05 * (1LL << 32), 256 - 8);
  498. put_symbol(c, state, f->version, 0);
  499. if (f->version > 2) {
  500. if (f->version == 3) {
  501. f->micro_version = 4;
  502. } else if (f->version == 4)
  503. f->micro_version = 2;
  504. put_symbol(c, state, f->micro_version, 0);
  505. }
  506. put_symbol(c, state, f->ac, 0);
  507. if (f->ac == AC_RANGE_CUSTOM_TAB)
  508. for (i = 1; i < 256; i++)
  509. put_symbol(c, state, f->state_transition[i] - c->one_state[i], 1);
  510. put_symbol(c, state, f->colorspace, 0); // YUV cs type
  511. put_symbol(c, state, f->bits_per_raw_sample, 0);
  512. put_rac(c, state, f->chroma_planes);
  513. put_symbol(c, state, f->chroma_h_shift, 0);
  514. put_symbol(c, state, f->chroma_v_shift, 0);
  515. put_rac(c, state, f->transparency);
  516. put_symbol(c, state, f->num_h_slices - 1, 0);
  517. put_symbol(c, state, f->num_v_slices - 1, 0);
  518. put_symbol(c, state, f->quant_table_count, 0);
  519. for (i = 0; i < f->quant_table_count; i++)
  520. write_quant_tables(c, f->quant_tables[i]);
  521. for (i = 0; i < f->quant_table_count; i++) {
  522. for (j = 0; j < f->context_count[i] * CONTEXT_SIZE; j++)
  523. if (f->initial_states[i] && f->initial_states[i][0][j] != 128)
  524. break;
  525. if (j < f->context_count[i] * CONTEXT_SIZE) {
  526. put_rac(c, state, 1);
  527. for (j = 0; j < f->context_count[i]; j++)
  528. for (k = 0; k < CONTEXT_SIZE; k++) {
  529. int pred = j ? f->initial_states[i][j - 1][k] : 128;
  530. put_symbol(c, state2[k],
  531. (int8_t)(f->initial_states[i][j][k] - pred), 1);
  532. }
  533. } else {
  534. put_rac(c, state, 0);
  535. }
  536. }
  537. if (f->version > 2) {
  538. put_symbol(c, state, f->ec, 0);
  539. put_symbol(c, state, f->intra = (f->avctx->gop_size < 2), 0);
  540. }
  541. f->avctx->extradata_size = ff_rac_terminate(c);
  542. v = av_crc(av_crc_get_table(AV_CRC_32_IEEE), 0, f->avctx->extradata, f->avctx->extradata_size);
  543. AV_WL32(f->avctx->extradata + f->avctx->extradata_size, v);
  544. f->avctx->extradata_size += 4;
  545. return 0;
  546. }
  547. static int sort_stt(FFV1Context *s, uint8_t stt[256])
  548. {
  549. int i, i2, changed, print = 0;
  550. do {
  551. changed = 0;
  552. for (i = 12; i < 244; i++) {
  553. for (i2 = i + 1; i2 < 245 && i2 < i + 4; i2++) {
  554. #define COST(old, new) \
  555. s->rc_stat[old][0] * -log2((256 - (new)) / 256.0) + \
  556. s->rc_stat[old][1] * -log2((new) / 256.0)
  557. #define COST2(old, new) \
  558. COST(old, new) + COST(256 - (old), 256 - (new))
  559. double size0 = COST2(i, i) + COST2(i2, i2);
  560. double sizeX = COST2(i, i2) + COST2(i2, i);
  561. if (size0 - sizeX > size0*(1e-14) && i != 128 && i2 != 128) {
  562. int j;
  563. FFSWAP(int, stt[i], stt[i2]);
  564. FFSWAP(int, s->rc_stat[i][0], s->rc_stat[i2][0]);
  565. FFSWAP(int, s->rc_stat[i][1], s->rc_stat[i2][1]);
  566. if (i != 256 - i2) {
  567. FFSWAP(int, stt[256 - i], stt[256 - i2]);
  568. FFSWAP(int, s->rc_stat[256 - i][0], s->rc_stat[256 - i2][0]);
  569. FFSWAP(int, s->rc_stat[256 - i][1], s->rc_stat[256 - i2][1]);
  570. }
  571. for (j = 1; j < 256; j++) {
  572. if (stt[j] == i)
  573. stt[j] = i2;
  574. else if (stt[j] == i2)
  575. stt[j] = i;
  576. if (i != 256 - i2) {
  577. if (stt[256 - j] == 256 - i)
  578. stt[256 - j] = 256 - i2;
  579. else if (stt[256 - j] == 256 - i2)
  580. stt[256 - j] = 256 - i;
  581. }
  582. }
  583. print = changed = 1;
  584. }
  585. }
  586. }
  587. } while (changed);
  588. return print;
  589. }
  590. static av_cold int encode_init(AVCodecContext *avctx)
  591. {
  592. FFV1Context *s = avctx->priv_data;
  593. const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(avctx->pix_fmt);
  594. int i, j, k, m, ret;
  595. if ((ret = ff_ffv1_common_init(avctx)) < 0)
  596. return ret;
  597. s->version = 0;
  598. if ((avctx->flags & (AV_CODEC_FLAG_PASS1 | AV_CODEC_FLAG_PASS2)) ||
  599. avctx->slices > 1)
  600. s->version = FFMAX(s->version, 2);
  601. // Unspecified level & slices, we choose version 1.2+ to ensure multithreaded decodability
  602. if (avctx->slices == 0 && avctx->level < 0 && avctx->width * avctx->height > 720*576)
  603. s->version = FFMAX(s->version, 2);
  604. if (avctx->level <= 0 && s->version == 2) {
  605. s->version = 3;
  606. }
  607. if (avctx->level >= 0 && avctx->level <= 4) {
  608. if (avctx->level < s->version) {
  609. av_log(avctx, AV_LOG_ERROR, "Version %d needed for requested features but %d requested\n", s->version, avctx->level);
  610. return AVERROR(EINVAL);
  611. }
  612. s->version = avctx->level;
  613. }
  614. if (s->ec < 0) {
  615. s->ec = (s->version >= 3);
  616. }
  617. if ((s->version == 2 || s->version>3) && avctx->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL) {
  618. av_log(avctx, AV_LOG_ERROR, "Version 2 needed for requested features but version 2 is experimental and not enabled\n");
  619. return AVERROR_INVALIDDATA;
  620. }
  621. #if FF_API_CODER_TYPE
  622. FF_DISABLE_DEPRECATION_WARNINGS
  623. if (avctx->coder_type != -1)
  624. s->ac = avctx->coder_type > 0 ? AC_RANGE_CUSTOM_TAB : AC_GOLOMB_RICE;
  625. else
  626. FF_ENABLE_DEPRECATION_WARNINGS
  627. #endif
  628. if (s->ac == 1) // Compatbility with common command line usage
  629. s->ac = AC_RANGE_CUSTOM_TAB;
  630. else if (s->ac == AC_RANGE_DEFAULT_TAB_FORCE)
  631. s->ac = AC_RANGE_DEFAULT_TAB;
  632. s->plane_count = 3;
  633. switch(avctx->pix_fmt) {
  634. case AV_PIX_FMT_YUV444P9:
  635. case AV_PIX_FMT_YUV422P9:
  636. case AV_PIX_FMT_YUV420P9:
  637. case AV_PIX_FMT_YUVA444P9:
  638. case AV_PIX_FMT_YUVA422P9:
  639. case AV_PIX_FMT_YUVA420P9:
  640. if (!avctx->bits_per_raw_sample)
  641. s->bits_per_raw_sample = 9;
  642. case AV_PIX_FMT_YUV444P10:
  643. case AV_PIX_FMT_YUV420P10:
  644. case AV_PIX_FMT_YUV422P10:
  645. case AV_PIX_FMT_YUVA444P10:
  646. case AV_PIX_FMT_YUVA422P10:
  647. case AV_PIX_FMT_YUVA420P10:
  648. s->packed_at_lsb = 1;
  649. if (!avctx->bits_per_raw_sample && !s->bits_per_raw_sample)
  650. s->bits_per_raw_sample = 10;
  651. case AV_PIX_FMT_GRAY16:
  652. case AV_PIX_FMT_YUV444P16:
  653. case AV_PIX_FMT_YUV422P16:
  654. case AV_PIX_FMT_YUV420P16:
  655. case AV_PIX_FMT_YUVA444P16:
  656. case AV_PIX_FMT_YUVA422P16:
  657. case AV_PIX_FMT_YUVA420P16:
  658. if (!avctx->bits_per_raw_sample && !s->bits_per_raw_sample) {
  659. s->bits_per_raw_sample = 16;
  660. } else if (!s->bits_per_raw_sample) {
  661. s->bits_per_raw_sample = avctx->bits_per_raw_sample;
  662. }
  663. if (s->bits_per_raw_sample <= 8) {
  664. av_log(avctx, AV_LOG_ERROR, "bits_per_raw_sample invalid\n");
  665. return AVERROR_INVALIDDATA;
  666. }
  667. if (s->ac == AC_GOLOMB_RICE) {
  668. av_log(avctx, AV_LOG_INFO,
  669. "bits_per_raw_sample > 8, forcing range coder\n");
  670. s->ac = AC_RANGE_CUSTOM_TAB;
  671. }
  672. s->version = FFMAX(s->version, 1);
  673. case AV_PIX_FMT_GRAY8:
  674. case AV_PIX_FMT_YA8:
  675. case AV_PIX_FMT_YUV444P:
  676. case AV_PIX_FMT_YUV440P:
  677. case AV_PIX_FMT_YUV422P:
  678. case AV_PIX_FMT_YUV420P:
  679. case AV_PIX_FMT_YUV411P:
  680. case AV_PIX_FMT_YUV410P:
  681. case AV_PIX_FMT_YUVA444P:
  682. case AV_PIX_FMT_YUVA422P:
  683. case AV_PIX_FMT_YUVA420P:
  684. s->chroma_planes = desc->nb_components < 3 ? 0 : 1;
  685. s->colorspace = 0;
  686. s->transparency = desc->nb_components == 4 || desc->nb_components == 2;
  687. if (!avctx->bits_per_raw_sample && !s->bits_per_raw_sample)
  688. s->bits_per_raw_sample = 8;
  689. else if (!s->bits_per_raw_sample)
  690. s->bits_per_raw_sample = 8;
  691. break;
  692. case AV_PIX_FMT_RGB32:
  693. s->colorspace = 1;
  694. s->transparency = 1;
  695. s->chroma_planes = 1;
  696. if (!avctx->bits_per_raw_sample)
  697. s->bits_per_raw_sample = 8;
  698. break;
  699. case AV_PIX_FMT_0RGB32:
  700. s->colorspace = 1;
  701. s->chroma_planes = 1;
  702. if (!avctx->bits_per_raw_sample)
  703. s->bits_per_raw_sample = 8;
  704. break;
  705. case AV_PIX_FMT_GBRP9:
  706. if (!avctx->bits_per_raw_sample)
  707. s->bits_per_raw_sample = 9;
  708. case AV_PIX_FMT_GBRP10:
  709. if (!avctx->bits_per_raw_sample && !s->bits_per_raw_sample)
  710. s->bits_per_raw_sample = 10;
  711. case AV_PIX_FMT_GBRP12:
  712. if (!avctx->bits_per_raw_sample && !s->bits_per_raw_sample)
  713. s->bits_per_raw_sample = 12;
  714. case AV_PIX_FMT_GBRP14:
  715. if (!avctx->bits_per_raw_sample && !s->bits_per_raw_sample)
  716. s->bits_per_raw_sample = 14;
  717. else if (!s->bits_per_raw_sample)
  718. s->bits_per_raw_sample = avctx->bits_per_raw_sample;
  719. s->colorspace = 1;
  720. s->chroma_planes = 1;
  721. s->version = FFMAX(s->version, 1);
  722. if (s->ac == AC_GOLOMB_RICE) {
  723. av_log(avctx, AV_LOG_INFO,
  724. "bits_per_raw_sample > 8, forcing coder 1\n");
  725. s->ac = AC_RANGE_CUSTOM_TAB;
  726. }
  727. break;
  728. default:
  729. av_log(avctx, AV_LOG_ERROR, "format not supported\n");
  730. return AVERROR(ENOSYS);
  731. }
  732. av_assert0(s->bits_per_raw_sample >= 8);
  733. if (s->transparency) {
  734. av_log(avctx, AV_LOG_WARNING, "Storing alpha plane, this will require a recent FFV1 decoder to playback!\n");
  735. }
  736. if (avctx->context_model > 1U) {
  737. av_log(avctx, AV_LOG_ERROR, "Invalid context model %d, valid values are 0 and 1\n", avctx->context_model);
  738. return AVERROR(EINVAL);
  739. }
  740. if (s->ac == AC_RANGE_CUSTOM_TAB) {
  741. for (i = 1; i < 256; i++)
  742. s->state_transition[i] = ver2_state[i];
  743. } else {
  744. RangeCoder c;
  745. ff_build_rac_states(&c, 0.05 * (1LL << 32), 256 - 8);
  746. for (i = 1; i < 256; i++)
  747. s->state_transition[i] = c.one_state[i];
  748. }
  749. for (i = 0; i < 256; i++) {
  750. s->quant_table_count = 2;
  751. if (s->bits_per_raw_sample <= 8) {
  752. s->quant_tables[0][0][i]= quant11[i];
  753. s->quant_tables[0][1][i]= 11*quant11[i];
  754. s->quant_tables[0][2][i]= 11*11*quant11[i];
  755. s->quant_tables[1][0][i]= quant11[i];
  756. s->quant_tables[1][1][i]= 11*quant11[i];
  757. s->quant_tables[1][2][i]= 11*11*quant5 [i];
  758. s->quant_tables[1][3][i]= 5*11*11*quant5 [i];
  759. s->quant_tables[1][4][i]= 5*5*11*11*quant5 [i];
  760. } else {
  761. s->quant_tables[0][0][i]= quant9_10bit[i];
  762. s->quant_tables[0][1][i]= 11*quant9_10bit[i];
  763. s->quant_tables[0][2][i]= 11*11*quant9_10bit[i];
  764. s->quant_tables[1][0][i]= quant9_10bit[i];
  765. s->quant_tables[1][1][i]= 11*quant9_10bit[i];
  766. s->quant_tables[1][2][i]= 11*11*quant5_10bit[i];
  767. s->quant_tables[1][3][i]= 5*11*11*quant5_10bit[i];
  768. s->quant_tables[1][4][i]= 5*5*11*11*quant5_10bit[i];
  769. }
  770. }
  771. s->context_count[0] = (11 * 11 * 11 + 1) / 2;
  772. s->context_count[1] = (11 * 11 * 5 * 5 * 5 + 1) / 2;
  773. memcpy(s->quant_table, s->quant_tables[avctx->context_model],
  774. sizeof(s->quant_table));
  775. for (i = 0; i < s->plane_count; i++) {
  776. PlaneContext *const p = &s->plane[i];
  777. memcpy(p->quant_table, s->quant_table, sizeof(p->quant_table));
  778. p->quant_table_index = avctx->context_model;
  779. p->context_count = s->context_count[p->quant_table_index];
  780. }
  781. if ((ret = ff_ffv1_allocate_initial_states(s)) < 0)
  782. return ret;
  783. #if FF_API_CODED_FRAME
  784. FF_DISABLE_DEPRECATION_WARNINGS
  785. avctx->coded_frame->pict_type = AV_PICTURE_TYPE_I;
  786. FF_ENABLE_DEPRECATION_WARNINGS
  787. #endif
  788. if (!s->transparency)
  789. s->plane_count = 2;
  790. if (!s->chroma_planes && s->version > 3)
  791. s->plane_count--;
  792. avcodec_get_chroma_sub_sample(avctx->pix_fmt, &s->chroma_h_shift, &s->chroma_v_shift);
  793. s->picture_number = 0;
  794. if (avctx->flags & (AV_CODEC_FLAG_PASS1 | AV_CODEC_FLAG_PASS2)) {
  795. for (i = 0; i < s->quant_table_count; i++) {
  796. s->rc_stat2[i] = av_mallocz(s->context_count[i] *
  797. sizeof(*s->rc_stat2[i]));
  798. if (!s->rc_stat2[i])
  799. return AVERROR(ENOMEM);
  800. }
  801. }
  802. if (avctx->stats_in) {
  803. char *p = avctx->stats_in;
  804. uint8_t (*best_state)[256] = av_malloc_array(256, 256);
  805. int gob_count = 0;
  806. char *next;
  807. if (!best_state)
  808. return AVERROR(ENOMEM);
  809. av_assert0(s->version >= 2);
  810. for (;;) {
  811. for (j = 0; j < 256; j++)
  812. for (i = 0; i < 2; i++) {
  813. s->rc_stat[j][i] = strtol(p, &next, 0);
  814. if (next == p) {
  815. av_log(avctx, AV_LOG_ERROR,
  816. "2Pass file invalid at %d %d [%s]\n", j, i, p);
  817. av_freep(&best_state);
  818. return AVERROR_INVALIDDATA;
  819. }
  820. p = next;
  821. }
  822. for (i = 0; i < s->quant_table_count; i++)
  823. for (j = 0; j < s->context_count[i]; j++) {
  824. for (k = 0; k < 32; k++)
  825. for (m = 0; m < 2; m++) {
  826. s->rc_stat2[i][j][k][m] = strtol(p, &next, 0);
  827. if (next == p) {
  828. av_log(avctx, AV_LOG_ERROR,
  829. "2Pass file invalid at %d %d %d %d [%s]\n",
  830. i, j, k, m, p);
  831. av_freep(&best_state);
  832. return AVERROR_INVALIDDATA;
  833. }
  834. p = next;
  835. }
  836. }
  837. gob_count = strtol(p, &next, 0);
  838. if (next == p || gob_count <= 0) {
  839. av_log(avctx, AV_LOG_ERROR, "2Pass file invalid\n");
  840. av_freep(&best_state);
  841. return AVERROR_INVALIDDATA;
  842. }
  843. p = next;
  844. while (*p == '\n' || *p == ' ')
  845. p++;
  846. if (p[0] == 0)
  847. break;
  848. }
  849. if (s->ac == AC_RANGE_CUSTOM_TAB)
  850. sort_stt(s, s->state_transition);
  851. find_best_state(best_state, s->state_transition);
  852. for (i = 0; i < s->quant_table_count; i++) {
  853. for (k = 0; k < 32; k++) {
  854. double a=0, b=0;
  855. int jp = 0;
  856. for (j = 0; j < s->context_count[i]; j++) {
  857. double p = 128;
  858. if (s->rc_stat2[i][j][k][0] + s->rc_stat2[i][j][k][1] > 200 && j || a+b > 200) {
  859. if (a+b)
  860. p = 256.0 * b / (a + b);
  861. s->initial_states[i][jp][k] =
  862. best_state[av_clip(round(p), 1, 255)][av_clip_uint8((a + b) / gob_count)];
  863. for(jp++; jp<j; jp++)
  864. s->initial_states[i][jp][k] = s->initial_states[i][jp-1][k];
  865. a=b=0;
  866. }
  867. a += s->rc_stat2[i][j][k][0];
  868. b += s->rc_stat2[i][j][k][1];
  869. if (a+b) {
  870. p = 256.0 * b / (a + b);
  871. }
  872. s->initial_states[i][j][k] =
  873. best_state[av_clip(round(p), 1, 255)][av_clip_uint8((a + b) / gob_count)];
  874. }
  875. }
  876. }
  877. av_freep(&best_state);
  878. }
  879. if (s->version > 1) {
  880. s->num_v_slices = (avctx->width > 352 || avctx->height > 288 || !avctx->slices) ? 2 : 1;
  881. for (; s->num_v_slices < 9; s->num_v_slices++) {
  882. for (s->num_h_slices = s->num_v_slices; s->num_h_slices < 2*s->num_v_slices; s->num_h_slices++) {
  883. if (avctx->slices == s->num_h_slices * s->num_v_slices && avctx->slices <= 64 || !avctx->slices)
  884. goto slices_ok;
  885. }
  886. }
  887. av_log(avctx, AV_LOG_ERROR,
  888. "Unsupported number %d of slices requested, please specify a "
  889. "supported number with -slices (ex:4,6,9,12,16, ...)\n",
  890. avctx->slices);
  891. return AVERROR(ENOSYS);
  892. slices_ok:
  893. if ((ret = write_extradata(s)) < 0)
  894. return ret;
  895. }
  896. if ((ret = ff_ffv1_init_slice_contexts(s)) < 0)
  897. return ret;
  898. s->slice_count = s->max_slice_count;
  899. if ((ret = ff_ffv1_init_slices_state(s)) < 0)
  900. return ret;
  901. #define STATS_OUT_SIZE 1024 * 1024 * 6
  902. if (avctx->flags & AV_CODEC_FLAG_PASS1) {
  903. avctx->stats_out = av_mallocz(STATS_OUT_SIZE);
  904. if (!avctx->stats_out)
  905. return AVERROR(ENOMEM);
  906. for (i = 0; i < s->quant_table_count; i++)
  907. for (j = 0; j < s->max_slice_count; j++) {
  908. FFV1Context *sf = s->slice_context[j];
  909. av_assert0(!sf->rc_stat2[i]);
  910. sf->rc_stat2[i] = av_mallocz(s->context_count[i] *
  911. sizeof(*sf->rc_stat2[i]));
  912. if (!sf->rc_stat2[i])
  913. return AVERROR(ENOMEM);
  914. }
  915. }
  916. return 0;
  917. }
  918. static void encode_slice_header(FFV1Context *f, FFV1Context *fs)
  919. {
  920. RangeCoder *c = &fs->c;
  921. uint8_t state[CONTEXT_SIZE];
  922. int j;
  923. memset(state, 128, sizeof(state));
  924. put_symbol(c, state, (fs->slice_x +1)*f->num_h_slices / f->width , 0);
  925. put_symbol(c, state, (fs->slice_y +1)*f->num_v_slices / f->height , 0);
  926. put_symbol(c, state, (fs->slice_width +1)*f->num_h_slices / f->width -1, 0);
  927. put_symbol(c, state, (fs->slice_height+1)*f->num_v_slices / f->height-1, 0);
  928. for (j=0; j<f->plane_count; j++) {
  929. put_symbol(c, state, f->plane[j].quant_table_index, 0);
  930. av_assert0(f->plane[j].quant_table_index == f->avctx->context_model);
  931. }
  932. if (!f->picture.f->interlaced_frame)
  933. put_symbol(c, state, 3, 0);
  934. else
  935. put_symbol(c, state, 1 + !f->picture.f->top_field_first, 0);
  936. put_symbol(c, state, f->picture.f->sample_aspect_ratio.num, 0);
  937. put_symbol(c, state, f->picture.f->sample_aspect_ratio.den, 0);
  938. if (f->version > 3) {
  939. put_rac(c, state, fs->slice_coding_mode == 1);
  940. if (fs->slice_coding_mode == 1)
  941. ff_ffv1_clear_slice_state(f, fs);
  942. put_symbol(c, state, fs->slice_coding_mode, 0);
  943. if (fs->slice_coding_mode != 1) {
  944. put_symbol(c, state, fs->slice_rct_by_coef, 0);
  945. put_symbol(c, state, fs->slice_rct_ry_coef, 0);
  946. }
  947. }
  948. }
  949. static void choose_rct_params(FFV1Context *fs, const uint8_t *src[3], const int stride[3], int w, int h)
  950. {
  951. #define NB_Y_COEFF 15
  952. static const int rct_y_coeff[15][2] = {
  953. {0, 0}, // 4G
  954. {1, 1}, // R + 2G + B
  955. {2, 2}, // 2R + 2B
  956. {0, 2}, // 2G + 2B
  957. {2, 0}, // 2R + 2G
  958. {4, 0}, // 4R
  959. {0, 4}, // 4B
  960. {0, 3}, // 1G + 3B
  961. {3, 0}, // 3R + 1G
  962. {3, 1}, // 3R + B
  963. {1, 3}, // R + 3B
  964. {1, 2}, // R + G + 2B
  965. {2, 1}, // 2R + G + B
  966. {0, 1}, // 3G + B
  967. {1, 0}, // R + 3G
  968. };
  969. int stat[NB_Y_COEFF] = {0};
  970. int x, y, i, p, best;
  971. int16_t *sample[3];
  972. int lbd = fs->bits_per_raw_sample <= 8;
  973. for (y = 0; y < h; y++) {
  974. int lastr=0, lastg=0, lastb=0;
  975. for (p = 0; p < 3; p++)
  976. sample[p] = fs->sample_buffer + p*w;
  977. for (x = 0; x < w; x++) {
  978. int b, g, r;
  979. int ab, ag, ar;
  980. if (lbd) {
  981. unsigned v = *((const uint32_t*)(src[0] + x*4 + stride[0]*y));
  982. b = v & 0xFF;
  983. g = (v >> 8) & 0xFF;
  984. r = (v >> 16) & 0xFF;
  985. } else {
  986. b = *((const uint16_t*)(src[0] + x*2 + stride[0]*y));
  987. g = *((const uint16_t*)(src[1] + x*2 + stride[1]*y));
  988. r = *((const uint16_t*)(src[2] + x*2 + stride[2]*y));
  989. }
  990. ar = r - lastr;
  991. ag = g - lastg;
  992. ab = b - lastb;
  993. if (x && y) {
  994. int bg = ag - sample[0][x];
  995. int bb = ab - sample[1][x];
  996. int br = ar - sample[2][x];
  997. br -= bg;
  998. bb -= bg;
  999. for (i = 0; i<NB_Y_COEFF; i++) {
  1000. stat[i] += FFABS(bg + ((br*rct_y_coeff[i][0] + bb*rct_y_coeff[i][1])>>2));
  1001. }
  1002. }
  1003. sample[0][x] = ag;
  1004. sample[1][x] = ab;
  1005. sample[2][x] = ar;
  1006. lastr = r;
  1007. lastg = g;
  1008. lastb = b;
  1009. }
  1010. }
  1011. best = 0;
  1012. for (i=1; i<NB_Y_COEFF; i++) {
  1013. if (stat[i] < stat[best])
  1014. best = i;
  1015. }
  1016. fs->slice_rct_by_coef = rct_y_coeff[best][1];
  1017. fs->slice_rct_ry_coef = rct_y_coeff[best][0];
  1018. }
  1019. static int encode_slice(AVCodecContext *c, void *arg)
  1020. {
  1021. FFV1Context *fs = *(void **)arg;
  1022. FFV1Context *f = fs->avctx->priv_data;
  1023. int width = fs->slice_width;
  1024. int height = fs->slice_height;
  1025. int x = fs->slice_x;
  1026. int y = fs->slice_y;
  1027. const AVFrame *const p = f->picture.f;
  1028. const int ps = av_pix_fmt_desc_get(c->pix_fmt)->comp[0].step;
  1029. int ret;
  1030. RangeCoder c_bak = fs->c;
  1031. const uint8_t *planes[3] = {p->data[0] + ps*x + y*p->linesize[0],
  1032. p->data[1] + ps*x + y*p->linesize[1],
  1033. p->data[2] + ps*x + y*p->linesize[2]};
  1034. fs->slice_coding_mode = 0;
  1035. if (f->version > 3) {
  1036. choose_rct_params(fs, planes, p->linesize, width, height);
  1037. } else {
  1038. fs->slice_rct_by_coef = 1;
  1039. fs->slice_rct_ry_coef = 1;
  1040. }
  1041. retry:
  1042. if (f->key_frame)
  1043. ff_ffv1_clear_slice_state(f, fs);
  1044. if (f->version > 2) {
  1045. encode_slice_header(f, fs);
  1046. }
  1047. if (fs->ac == AC_GOLOMB_RICE) {
  1048. if (f->version > 2)
  1049. put_rac(&fs->c, (uint8_t[]) { 129 }, 0);
  1050. fs->ac_byte_count = f->version > 2 || (!x && !y) ? ff_rac_terminate(&fs->c) : 0;
  1051. init_put_bits(&fs->pb,
  1052. fs->c.bytestream_start + fs->ac_byte_count,
  1053. fs->c.bytestream_end - fs->c.bytestream_start - fs->ac_byte_count);
  1054. }
  1055. if (f->colorspace == 0 && c->pix_fmt != AV_PIX_FMT_YA8) {
  1056. const int chroma_width = AV_CEIL_RSHIFT(width, f->chroma_h_shift);
  1057. const int chroma_height = AV_CEIL_RSHIFT(height, f->chroma_v_shift);
  1058. const int cx = x >> f->chroma_h_shift;
  1059. const int cy = y >> f->chroma_v_shift;
  1060. ret = encode_plane(fs, p->data[0] + ps*x + y*p->linesize[0], width, height, p->linesize[0], 0, 1);
  1061. if (f->chroma_planes) {
  1062. ret |= encode_plane(fs, p->data[1] + ps*cx+cy*p->linesize[1], chroma_width, chroma_height, p->linesize[1], 1, 1);
  1063. ret |= encode_plane(fs, p->data[2] + ps*cx+cy*p->linesize[2], chroma_width, chroma_height, p->linesize[2], 1, 1);
  1064. }
  1065. if (fs->transparency)
  1066. ret |= encode_plane(fs, p->data[3] + ps*x + y*p->linesize[3], width, height, p->linesize[3], 2, 1);
  1067. } else if (c->pix_fmt == AV_PIX_FMT_YA8) {
  1068. ret = encode_plane(fs, p->data[0] + ps*x + y*p->linesize[0], width, height, p->linesize[0], 0, 2);
  1069. ret |= encode_plane(fs, p->data[0] + 1 + ps*x + y*p->linesize[0], width, height, p->linesize[0], 1, 2);
  1070. } else {
  1071. ret = encode_rgb_frame(fs, planes, width, height, p->linesize);
  1072. }
  1073. emms_c();
  1074. if (ret < 0) {
  1075. av_assert0(fs->slice_coding_mode == 0);
  1076. if (fs->version < 4 || !fs->ac) {
  1077. av_log(c, AV_LOG_ERROR, "Buffer too small\n");
  1078. return ret;
  1079. }
  1080. av_log(c, AV_LOG_DEBUG, "Coding slice as PCM\n");
  1081. fs->slice_coding_mode = 1;
  1082. fs->c = c_bak;
  1083. goto retry;
  1084. }
  1085. return 0;
  1086. }
  1087. static int encode_frame(AVCodecContext *avctx, AVPacket *pkt,
  1088. const AVFrame *pict, int *got_packet)
  1089. {
  1090. FFV1Context *f = avctx->priv_data;
  1091. RangeCoder *const c = &f->slice_context[0]->c;
  1092. AVFrame *const p = f->picture.f;
  1093. int used_count = 0;
  1094. uint8_t keystate = 128;
  1095. uint8_t *buf_p;
  1096. int i, ret;
  1097. int64_t maxsize = AV_INPUT_BUFFER_MIN_SIZE
  1098. + avctx->width*avctx->height*35LL*4;
  1099. if(!pict) {
  1100. if (avctx->flags & AV_CODEC_FLAG_PASS1) {
  1101. int j, k, m;
  1102. char *p = avctx->stats_out;
  1103. char *end = p + STATS_OUT_SIZE;
  1104. memset(f->rc_stat, 0, sizeof(f->rc_stat));
  1105. for (i = 0; i < f->quant_table_count; i++)
  1106. memset(f->rc_stat2[i], 0, f->context_count[i] * sizeof(*f->rc_stat2[i]));
  1107. av_assert0(f->slice_count == f->max_slice_count);
  1108. for (j = 0; j < f->slice_count; j++) {
  1109. FFV1Context *fs = f->slice_context[j];
  1110. for (i = 0; i < 256; i++) {
  1111. f->rc_stat[i][0] += fs->rc_stat[i][0];
  1112. f->rc_stat[i][1] += fs->rc_stat[i][1];
  1113. }
  1114. for (i = 0; i < f->quant_table_count; i++) {
  1115. for (k = 0; k < f->context_count[i]; k++)
  1116. for (m = 0; m < 32; m++) {
  1117. f->rc_stat2[i][k][m][0] += fs->rc_stat2[i][k][m][0];
  1118. f->rc_stat2[i][k][m][1] += fs->rc_stat2[i][k][m][1];
  1119. }
  1120. }
  1121. }
  1122. for (j = 0; j < 256; j++) {
  1123. snprintf(p, end - p, "%" PRIu64 " %" PRIu64 " ",
  1124. f->rc_stat[j][0], f->rc_stat[j][1]);
  1125. p += strlen(p);
  1126. }
  1127. snprintf(p, end - p, "\n");
  1128. for (i = 0; i < f->quant_table_count; i++) {
  1129. for (j = 0; j < f->context_count[i]; j++)
  1130. for (m = 0; m < 32; m++) {
  1131. snprintf(p, end - p, "%" PRIu64 " %" PRIu64 " ",
  1132. f->rc_stat2[i][j][m][0], f->rc_stat2[i][j][m][1]);
  1133. p += strlen(p);
  1134. }
  1135. }
  1136. snprintf(p, end - p, "%d\n", f->gob_count);
  1137. }
  1138. return 0;
  1139. }
  1140. if (f->version > 3)
  1141. maxsize = AV_INPUT_BUFFER_MIN_SIZE + avctx->width*avctx->height*3LL*4;
  1142. if ((ret = ff_alloc_packet2(avctx, pkt, maxsize, 0)) < 0)
  1143. return ret;
  1144. ff_init_range_encoder(c, pkt->data, pkt->size);
  1145. ff_build_rac_states(c, 0.05 * (1LL << 32), 256 - 8);
  1146. av_frame_unref(p);
  1147. if ((ret = av_frame_ref(p, pict)) < 0)
  1148. return ret;
  1149. avctx->coded_frame->pict_type = AV_PICTURE_TYPE_I;
  1150. if (avctx->gop_size == 0 || f->picture_number % avctx->gop_size == 0) {
  1151. put_rac(c, &keystate, 1);
  1152. f->key_frame = 1;
  1153. f->gob_count++;
  1154. write_header(f);
  1155. } else {
  1156. put_rac(c, &keystate, 0);
  1157. f->key_frame = 0;
  1158. }
  1159. if (f->ac == AC_RANGE_CUSTOM_TAB) {
  1160. int i;
  1161. for (i = 1; i < 256; i++) {
  1162. c->one_state[i] = f->state_transition[i];
  1163. c->zero_state[256 - i] = 256 - c->one_state[i];
  1164. }
  1165. }
  1166. for (i = 1; i < f->slice_count; i++) {
  1167. FFV1Context *fs = f->slice_context[i];
  1168. uint8_t *start = pkt->data + (pkt->size - used_count) * (int64_t)i / f->slice_count;
  1169. int len = pkt->size / f->slice_count;
  1170. ff_init_range_encoder(&fs->c, start, len);
  1171. }
  1172. avctx->execute(avctx, encode_slice, &f->slice_context[0], NULL,
  1173. f->slice_count, sizeof(void *));
  1174. buf_p = pkt->data;
  1175. for (i = 0; i < f->slice_count; i++) {
  1176. FFV1Context *fs = f->slice_context[i];
  1177. int bytes;
  1178. if (fs->ac != AC_GOLOMB_RICE) {
  1179. uint8_t state = 129;
  1180. put_rac(&fs->c, &state, 0);
  1181. bytes = ff_rac_terminate(&fs->c);
  1182. } else {
  1183. flush_put_bits(&fs->pb); // FIXME: nicer padding
  1184. bytes = fs->ac_byte_count + (put_bits_count(&fs->pb) + 7) / 8;
  1185. }
  1186. if (i > 0 || f->version > 2) {
  1187. av_assert0(bytes < pkt->size / f->slice_count);
  1188. memmove(buf_p, fs->c.bytestream_start, bytes);
  1189. av_assert0(bytes < (1 << 24));
  1190. AV_WB24(buf_p + bytes, bytes);
  1191. bytes += 3;
  1192. }
  1193. if (f->ec) {
  1194. unsigned v;
  1195. buf_p[bytes++] = 0;
  1196. v = av_crc(av_crc_get_table(AV_CRC_32_IEEE), 0, buf_p, bytes);
  1197. AV_WL32(buf_p + bytes, v);
  1198. bytes += 4;
  1199. }
  1200. buf_p += bytes;
  1201. }
  1202. if (avctx->flags & AV_CODEC_FLAG_PASS1)
  1203. avctx->stats_out[0] = '\0';
  1204. #if FF_API_CODED_FRAME
  1205. FF_DISABLE_DEPRECATION_WARNINGS
  1206. avctx->coded_frame->key_frame = f->key_frame;
  1207. FF_ENABLE_DEPRECATION_WARNINGS
  1208. #endif
  1209. f->picture_number++;
  1210. pkt->size = buf_p - pkt->data;
  1211. pkt->pts =
  1212. pkt->dts = pict->pts;
  1213. pkt->flags |= AV_PKT_FLAG_KEY * f->key_frame;
  1214. *got_packet = 1;
  1215. return 0;
  1216. }
  1217. static av_cold int encode_close(AVCodecContext *avctx)
  1218. {
  1219. ff_ffv1_close(avctx);
  1220. return 0;
  1221. }
  1222. #define OFFSET(x) offsetof(FFV1Context, x)
  1223. #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
  1224. static const AVOption options[] = {
  1225. { "slicecrc", "Protect slices with CRCs", OFFSET(ec), AV_OPT_TYPE_BOOL, { .i64 = -1 }, -1, 1, VE },
  1226. { "coder", "Coder type", OFFSET(ac), AV_OPT_TYPE_INT,
  1227. { .i64 = 0 }, -2, 2, VE, "coder" },
  1228. { "rice", "Golomb rice", 0, AV_OPT_TYPE_CONST,
  1229. { .i64 = AC_GOLOMB_RICE }, INT_MIN, INT_MAX, VE, "coder" },
  1230. { "range_def", "Range with default table", 0, AV_OPT_TYPE_CONST,
  1231. { .i64 = AC_RANGE_DEFAULT_TAB_FORCE }, INT_MIN, INT_MAX, VE, "coder" },
  1232. { "range_tab", "Range with custom table", 0, AV_OPT_TYPE_CONST,
  1233. { .i64 = AC_RANGE_CUSTOM_TAB }, INT_MIN, INT_MAX, VE, "coder" },
  1234. { "ac", "Range with custom table (the ac option exists for compatibility and is deprecated)", 0, AV_OPT_TYPE_CONST,
  1235. { .i64 = 1 }, INT_MIN, INT_MAX, VE, "coder" },
  1236. { NULL }
  1237. };
  1238. static const AVClass ffv1_class = {
  1239. .class_name = "ffv1 encoder",
  1240. .item_name = av_default_item_name,
  1241. .option = options,
  1242. .version = LIBAVUTIL_VERSION_INT,
  1243. };
  1244. #if FF_API_CODER_TYPE
  1245. static const AVCodecDefault ffv1_defaults[] = {
  1246. { "coder", "-1" },
  1247. { NULL },
  1248. };
  1249. #endif
  1250. AVCodec ff_ffv1_encoder = {
  1251. .name = "ffv1",
  1252. .long_name = NULL_IF_CONFIG_SMALL("FFmpeg video codec #1"),
  1253. .type = AVMEDIA_TYPE_VIDEO,
  1254. .id = AV_CODEC_ID_FFV1,
  1255. .priv_data_size = sizeof(FFV1Context),
  1256. .init = encode_init,
  1257. .encode2 = encode_frame,
  1258. .close = encode_close,
  1259. .capabilities = AV_CODEC_CAP_SLICE_THREADS | AV_CODEC_CAP_DELAY,
  1260. .pix_fmts = (const enum AVPixelFormat[]) {
  1261. AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUVA420P, AV_PIX_FMT_YUVA422P, AV_PIX_FMT_YUV444P,
  1262. AV_PIX_FMT_YUVA444P, AV_PIX_FMT_YUV440P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV411P,
  1263. AV_PIX_FMT_YUV410P, AV_PIX_FMT_0RGB32, AV_PIX_FMT_RGB32, AV_PIX_FMT_YUV420P16,
  1264. AV_PIX_FMT_YUV422P16, AV_PIX_FMT_YUV444P16, AV_PIX_FMT_YUV444P9, AV_PIX_FMT_YUV422P9,
  1265. AV_PIX_FMT_YUV420P9, AV_PIX_FMT_YUV420P10, AV_PIX_FMT_YUV422P10, AV_PIX_FMT_YUV444P10,
  1266. AV_PIX_FMT_YUVA444P16, AV_PIX_FMT_YUVA422P16, AV_PIX_FMT_YUVA420P16,
  1267. AV_PIX_FMT_YUVA444P10, AV_PIX_FMT_YUVA422P10, AV_PIX_FMT_YUVA420P10,
  1268. AV_PIX_FMT_YUVA444P9, AV_PIX_FMT_YUVA422P9, AV_PIX_FMT_YUVA420P9,
  1269. AV_PIX_FMT_GRAY16, AV_PIX_FMT_GRAY8, AV_PIX_FMT_GBRP9, AV_PIX_FMT_GBRP10,
  1270. AV_PIX_FMT_GBRP12, AV_PIX_FMT_GBRP14,
  1271. AV_PIX_FMT_YA8,
  1272. AV_PIX_FMT_NONE
  1273. },
  1274. #if FF_API_CODER_TYPE
  1275. .defaults = ffv1_defaults,
  1276. #endif
  1277. .priv_class = &ffv1_class,
  1278. };