You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1438 lines
52KB

  1. /*
  2. * FFV1 encoder
  3. *
  4. * Copyright (c) 2003-2013 Michael Niedermayer <michaelni@gmx.at>
  5. *
  6. * This file is part of FFmpeg.
  7. *
  8. * FFmpeg is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU Lesser General Public
  10. * License as published by the Free Software Foundation; either
  11. * version 2.1 of the License, or (at your option) any later version.
  12. *
  13. * FFmpeg is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * Lesser General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU Lesser General Public
  19. * License along with FFmpeg; if not, write to the Free Software
  20. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  21. */
  22. /**
  23. * @file
  24. * FF Video Codec 1 (a lossless codec) encoder
  25. */
  26. #include "libavutil/attributes.h"
  27. #include "libavutil/avassert.h"
  28. #include "libavutil/crc.h"
  29. #include "libavutil/opt.h"
  30. #include "libavutil/imgutils.h"
  31. #include "libavutil/pixdesc.h"
  32. #include "libavutil/timer.h"
  33. #include "avcodec.h"
  34. #include "internal.h"
  35. #include "put_bits.h"
  36. #include "rangecoder.h"
  37. #include "golomb.h"
  38. #include "mathops.h"
  39. #include "ffv1.h"
  40. static const int8_t quant5_10bit[256] = {
  41. 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1,
  42. 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
  43. 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
  44. 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
  45. 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
  46. 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
  47. 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
  48. 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
  49. -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2,
  50. -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2,
  51. -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2,
  52. -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2,
  53. -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -1,
  54. -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
  55. -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
  56. -1, -1, -1, -1, -1, -1, -0, -0, -0, -0, -0, -0, -0, -0, -0, -0,
  57. };
  58. static const int8_t quant5[256] = {
  59. 0, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
  60. 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
  61. 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
  62. 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
  63. 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
  64. 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
  65. 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
  66. 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
  67. -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2,
  68. -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2,
  69. -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2,
  70. -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2,
  71. -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2,
  72. -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2,
  73. -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2,
  74. -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -1, -1, -1,
  75. };
  76. static const int8_t quant9_10bit[256] = {
  77. 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2,
  78. 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3,
  79. 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
  80. 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4,
  81. 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
  82. 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
  83. 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
  84. 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
  85. -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4,
  86. -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4,
  87. -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4,
  88. -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4,
  89. -4, -4, -4, -4, -4, -4, -4, -4, -4, -3, -3, -3, -3, -3, -3, -3,
  90. -3, -3, -3, -3, -3, -3, -3, -3, -3, -3, -3, -3, -3, -3, -3, -3,
  91. -3, -3, -3, -3, -3, -3, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2,
  92. -2, -2, -2, -2, -1, -1, -1, -1, -1, -1, -1, -1, -0, -0, -0, -0,
  93. };
  94. static const int8_t quant11[256] = {
  95. 0, 1, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4,
  96. 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
  97. 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
  98. 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
  99. 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
  100. 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
  101. 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
  102. 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
  103. -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5,
  104. -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5,
  105. -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5,
  106. -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5,
  107. -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5,
  108. -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -4, -4,
  109. -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4,
  110. -4, -4, -4, -4, -4, -3, -3, -3, -3, -3, -3, -3, -2, -2, -2, -1,
  111. };
  112. static const uint8_t ver2_state[256] = {
  113. 0, 10, 10, 10, 10, 16, 16, 16, 28, 16, 16, 29, 42, 49, 20, 49,
  114. 59, 25, 26, 26, 27, 31, 33, 33, 33, 34, 34, 37, 67, 38, 39, 39,
  115. 40, 40, 41, 79, 43, 44, 45, 45, 48, 48, 64, 50, 51, 52, 88, 52,
  116. 53, 74, 55, 57, 58, 58, 74, 60, 101, 61, 62, 84, 66, 66, 68, 69,
  117. 87, 82, 71, 97, 73, 73, 82, 75, 111, 77, 94, 78, 87, 81, 83, 97,
  118. 85, 83, 94, 86, 99, 89, 90, 99, 111, 92, 93, 134, 95, 98, 105, 98,
  119. 105, 110, 102, 108, 102, 118, 103, 106, 106, 113, 109, 112, 114, 112, 116, 125,
  120. 115, 116, 117, 117, 126, 119, 125, 121, 121, 123, 145, 124, 126, 131, 127, 129,
  121. 165, 130, 132, 138, 133, 135, 145, 136, 137, 139, 146, 141, 143, 142, 144, 148,
  122. 147, 155, 151, 149, 151, 150, 152, 157, 153, 154, 156, 168, 158, 162, 161, 160,
  123. 172, 163, 169, 164, 166, 184, 167, 170, 177, 174, 171, 173, 182, 176, 180, 178,
  124. 175, 189, 179, 181, 186, 183, 192, 185, 200, 187, 191, 188, 190, 197, 193, 196,
  125. 197, 194, 195, 196, 198, 202, 199, 201, 210, 203, 207, 204, 205, 206, 208, 214,
  126. 209, 211, 221, 212, 213, 215, 224, 216, 217, 218, 219, 220, 222, 228, 223, 225,
  127. 226, 224, 227, 229, 240, 230, 231, 232, 233, 234, 235, 236, 238, 239, 237, 242,
  128. 241, 243, 242, 244, 245, 246, 247, 248, 249, 250, 251, 252, 252, 253, 254, 255,
  129. };
  130. static void find_best_state(uint8_t best_state[256][256],
  131. const uint8_t one_state[256])
  132. {
  133. int i, j, k, m;
  134. double l2tab[256];
  135. for (i = 1; i < 256; i++)
  136. l2tab[i] = log2(i / 256.0);
  137. for (i = 0; i < 256; i++) {
  138. double best_len[256];
  139. double p = i / 256.0;
  140. for (j = 0; j < 256; j++)
  141. best_len[j] = 1 << 30;
  142. for (j = FFMAX(i - 10, 1); j < FFMIN(i + 11, 256); j++) {
  143. double occ[256] = { 0 };
  144. double len = 0;
  145. occ[j] = 1.0;
  146. if (!one_state[j])
  147. continue;
  148. for (k = 0; k < 256; k++) {
  149. double newocc[256] = { 0 };
  150. for (m = 1; m < 256; m++)
  151. if (occ[m]) {
  152. len -=occ[m]*( p *l2tab[ m]
  153. + (1-p)*l2tab[256-m]);
  154. }
  155. if (len < best_len[k]) {
  156. best_len[k] = len;
  157. best_state[i][k] = j;
  158. }
  159. for (m = 1; m < 256; m++)
  160. if (occ[m]) {
  161. newocc[ one_state[ m]] += occ[m] * p;
  162. newocc[256 - one_state[256 - m]] += occ[m] * (1 - p);
  163. }
  164. memcpy(occ, newocc, sizeof(occ));
  165. }
  166. }
  167. }
  168. }
  169. static av_always_inline av_flatten void put_symbol_inline(RangeCoder *c,
  170. uint8_t *state, int v,
  171. int is_signed,
  172. uint64_t rc_stat[256][2],
  173. uint64_t rc_stat2[32][2])
  174. {
  175. int i;
  176. #define put_rac(C, S, B) \
  177. do { \
  178. if (rc_stat) { \
  179. rc_stat[*(S)][B]++; \
  180. rc_stat2[(S) - state][B]++; \
  181. } \
  182. put_rac(C, S, B); \
  183. } while (0)
  184. if (v) {
  185. const int a = FFABS(v);
  186. const int e = av_log2(a);
  187. put_rac(c, state + 0, 0);
  188. if (e <= 9) {
  189. for (i = 0; i < e; i++)
  190. put_rac(c, state + 1 + i, 1); // 1..10
  191. put_rac(c, state + 1 + i, 0);
  192. for (i = e - 1; i >= 0; i--)
  193. put_rac(c, state + 22 + i, (a >> i) & 1); // 22..31
  194. if (is_signed)
  195. put_rac(c, state + 11 + e, v < 0); // 11..21
  196. } else {
  197. for (i = 0; i < e; i++)
  198. put_rac(c, state + 1 + FFMIN(i, 9), 1); // 1..10
  199. put_rac(c, state + 1 + 9, 0);
  200. for (i = e - 1; i >= 0; i--)
  201. put_rac(c, state + 22 + FFMIN(i, 9), (a >> i) & 1); // 22..31
  202. if (is_signed)
  203. put_rac(c, state + 11 + 10, v < 0); // 11..21
  204. }
  205. } else {
  206. put_rac(c, state + 0, 1);
  207. }
  208. #undef put_rac
  209. }
  210. static av_noinline void put_symbol(RangeCoder *c, uint8_t *state,
  211. int v, int is_signed)
  212. {
  213. put_symbol_inline(c, state, v, is_signed, NULL, NULL);
  214. }
  215. static inline void put_vlc_symbol(PutBitContext *pb, VlcState *const state,
  216. int v, int bits)
  217. {
  218. int i, k, code;
  219. v = fold(v - state->bias, bits);
  220. i = state->count;
  221. k = 0;
  222. while (i < state->error_sum) { // FIXME: optimize
  223. k++;
  224. i += i;
  225. }
  226. av_assert2(k <= 13);
  227. #if 0 // JPEG LS
  228. if (k == 0 && 2 * state->drift <= -state->count)
  229. code = v ^ (-1);
  230. else
  231. code = v;
  232. #else
  233. code = v ^ ((2 * state->drift + state->count) >> 31);
  234. #endif
  235. ff_dlog(NULL, "v:%d/%d bias:%d error:%d drift:%d count:%d k:%d\n", v, code,
  236. state->bias, state->error_sum, state->drift, state->count, k);
  237. set_sr_golomb(pb, code, k, 12, bits);
  238. update_vlc_state(state, v);
  239. }
  240. static av_always_inline int encode_line(FFV1Context *s, int w,
  241. int16_t *sample[3],
  242. int plane_index, int bits)
  243. {
  244. PlaneContext *const p = &s->plane[plane_index];
  245. RangeCoder *const c = &s->c;
  246. int x;
  247. int run_index = s->run_index;
  248. int run_count = 0;
  249. int run_mode = 0;
  250. if (s->ac != AC_GOLOMB_RICE) {
  251. if (c->bytestream_end - c->bytestream < w * 35) {
  252. av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
  253. return AVERROR_INVALIDDATA;
  254. }
  255. } else {
  256. if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb) >> 3) < w * 4) {
  257. av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
  258. return AVERROR_INVALIDDATA;
  259. }
  260. }
  261. if (s->slice_coding_mode == 1) {
  262. for (x = 0; x < w; x++) {
  263. int i;
  264. int v = sample[0][x];
  265. for (i = bits-1; i>=0; i--) {
  266. uint8_t state = 128;
  267. put_rac(c, &state, (v>>i) & 1);
  268. }
  269. }
  270. return 0;
  271. }
  272. for (x = 0; x < w; x++) {
  273. int diff, context;
  274. context = get_context(p, sample[0] + x, sample[1] + x, sample[2] + x);
  275. diff = sample[0][x] - predict(sample[0] + x, sample[1] + x);
  276. if (context < 0) {
  277. context = -context;
  278. diff = -diff;
  279. }
  280. diff = fold(diff, bits);
  281. if (s->ac != AC_GOLOMB_RICE) {
  282. if (s->flags & AV_CODEC_FLAG_PASS1) {
  283. put_symbol_inline(c, p->state[context], diff, 1, s->rc_stat,
  284. s->rc_stat2[p->quant_table_index][context]);
  285. } else {
  286. put_symbol_inline(c, p->state[context], diff, 1, NULL, NULL);
  287. }
  288. } else {
  289. if (context == 0)
  290. run_mode = 1;
  291. if (run_mode) {
  292. if (diff) {
  293. while (run_count >= 1 << ff_log2_run[run_index]) {
  294. run_count -= 1 << ff_log2_run[run_index];
  295. run_index++;
  296. put_bits(&s->pb, 1, 1);
  297. }
  298. put_bits(&s->pb, 1 + ff_log2_run[run_index], run_count);
  299. if (run_index)
  300. run_index--;
  301. run_count = 0;
  302. run_mode = 0;
  303. if (diff > 0)
  304. diff--;
  305. } else {
  306. run_count++;
  307. }
  308. }
  309. ff_dlog(s->avctx, "count:%d index:%d, mode:%d, x:%d pos:%d\n",
  310. run_count, run_index, run_mode, x,
  311. (int)put_bits_count(&s->pb));
  312. if (run_mode == 0)
  313. put_vlc_symbol(&s->pb, &p->vlc_state[context], diff, bits);
  314. }
  315. }
  316. if (run_mode) {
  317. while (run_count >= 1 << ff_log2_run[run_index]) {
  318. run_count -= 1 << ff_log2_run[run_index];
  319. run_index++;
  320. put_bits(&s->pb, 1, 1);
  321. }
  322. if (run_count)
  323. put_bits(&s->pb, 1, 1);
  324. }
  325. s->run_index = run_index;
  326. return 0;
  327. }
  328. static int encode_plane(FFV1Context *s, uint8_t *src, int w, int h,
  329. int stride, int plane_index, int pixel_stride)
  330. {
  331. int x, y, i, ret;
  332. const int ring_size = s->context_model ? 3 : 2;
  333. int16_t *sample[3];
  334. s->run_index = 0;
  335. memset(s->sample_buffer, 0, ring_size * (w + 6) * sizeof(*s->sample_buffer));
  336. for (y = 0; y < h; y++) {
  337. for (i = 0; i < ring_size; i++)
  338. sample[i] = s->sample_buffer + (w + 6) * ((h + i - y) % ring_size) + 3;
  339. sample[0][-1]= sample[1][0 ];
  340. sample[1][ w]= sample[1][w-1];
  341. // { START_TIMER
  342. if (s->bits_per_raw_sample <= 8) {
  343. for (x = 0; x < w; x++)
  344. sample[0][x] = src[x * pixel_stride + stride * y];
  345. if((ret = encode_line(s, w, sample, plane_index, 8)) < 0)
  346. return ret;
  347. } else {
  348. if (s->packed_at_lsb) {
  349. for (x = 0; x < w; x++) {
  350. sample[0][x] = ((uint16_t*)(src + stride*y))[x];
  351. }
  352. } else {
  353. for (x = 0; x < w; x++) {
  354. sample[0][x] = ((uint16_t*)(src + stride*y))[x] >> (16 - s->bits_per_raw_sample);
  355. }
  356. }
  357. if((ret = encode_line(s, w, sample, plane_index, s->bits_per_raw_sample)) < 0)
  358. return ret;
  359. }
  360. // STOP_TIMER("encode line") }
  361. }
  362. return 0;
  363. }
  364. static int encode_rgb_frame(FFV1Context *s, const uint8_t *src[3],
  365. int w, int h, const int stride[3])
  366. {
  367. int x, y, p, i;
  368. const int ring_size = s->context_model ? 3 : 2;
  369. int16_t *sample[4][3];
  370. int lbd = s->bits_per_raw_sample <= 8;
  371. int bits = s->bits_per_raw_sample > 0 ? s->bits_per_raw_sample : 8;
  372. int offset = 1 << bits;
  373. s->run_index = 0;
  374. memset(s->sample_buffer, 0, ring_size * MAX_PLANES *
  375. (w + 6) * sizeof(*s->sample_buffer));
  376. for (y = 0; y < h; y++) {
  377. for (i = 0; i < ring_size; i++)
  378. for (p = 0; p < MAX_PLANES; p++)
  379. sample[p][i]= s->sample_buffer + p*ring_size*(w+6) + ((h+i-y)%ring_size)*(w+6) + 3;
  380. for (x = 0; x < w; x++) {
  381. int b, g, r, av_uninit(a);
  382. if (lbd) {
  383. unsigned v = *((const uint32_t*)(src[0] + x*4 + stride[0]*y));
  384. b = v & 0xFF;
  385. g = (v >> 8) & 0xFF;
  386. r = (v >> 16) & 0xFF;
  387. a = v >> 24;
  388. } else {
  389. b = *((const uint16_t *)(src[0] + x*2 + stride[0]*y));
  390. g = *((const uint16_t *)(src[1] + x*2 + stride[1]*y));
  391. r = *((const uint16_t *)(src[2] + x*2 + stride[2]*y));
  392. }
  393. if (s->slice_coding_mode != 1) {
  394. b -= g;
  395. r -= g;
  396. g += (b * s->slice_rct_by_coef + r * s->slice_rct_ry_coef) >> 2;
  397. b += offset;
  398. r += offset;
  399. }
  400. sample[0][0][x] = g;
  401. sample[1][0][x] = b;
  402. sample[2][0][x] = r;
  403. sample[3][0][x] = a;
  404. }
  405. for (p = 0; p < 3 + s->transparency; p++) {
  406. int ret;
  407. sample[p][0][-1] = sample[p][1][0 ];
  408. sample[p][1][ w] = sample[p][1][w-1];
  409. if (lbd && s->slice_coding_mode == 0)
  410. ret = encode_line(s, w, sample[p], (p + 1) / 2, 9);
  411. else
  412. ret = encode_line(s, w, sample[p], (p + 1) / 2, bits + (s->slice_coding_mode != 1));
  413. if (ret < 0)
  414. return ret;
  415. }
  416. }
  417. return 0;
  418. }
  419. static void write_quant_table(RangeCoder *c, int16_t *quant_table)
  420. {
  421. int last = 0;
  422. int i;
  423. uint8_t state[CONTEXT_SIZE];
  424. memset(state, 128, sizeof(state));
  425. for (i = 1; i < 128; i++)
  426. if (quant_table[i] != quant_table[i - 1]) {
  427. put_symbol(c, state, i - last - 1, 0);
  428. last = i;
  429. }
  430. put_symbol(c, state, i - last - 1, 0);
  431. }
  432. static void write_quant_tables(RangeCoder *c,
  433. int16_t quant_table[MAX_CONTEXT_INPUTS][256])
  434. {
  435. int i;
  436. for (i = 0; i < 5; i++)
  437. write_quant_table(c, quant_table[i]);
  438. }
  439. static void write_header(FFV1Context *f)
  440. {
  441. uint8_t state[CONTEXT_SIZE];
  442. int i, j;
  443. RangeCoder *const c = &f->slice_context[0]->c;
  444. memset(state, 128, sizeof(state));
  445. if (f->version < 2) {
  446. put_symbol(c, state, f->version, 0);
  447. put_symbol(c, state, f->ac, 0);
  448. if (f->ac == AC_RANGE_CUSTOM_TAB) {
  449. for (i = 1; i < 256; i++)
  450. put_symbol(c, state,
  451. f->state_transition[i] - c->one_state[i], 1);
  452. }
  453. put_symbol(c, state, f->colorspace, 0); //YUV cs type
  454. if (f->version > 0)
  455. put_symbol(c, state, f->bits_per_raw_sample, 0);
  456. put_rac(c, state, f->chroma_planes);
  457. put_symbol(c, state, f->chroma_h_shift, 0);
  458. put_symbol(c, state, f->chroma_v_shift, 0);
  459. put_rac(c, state, f->transparency);
  460. write_quant_tables(c, f->quant_table);
  461. } else if (f->version < 3) {
  462. put_symbol(c, state, f->slice_count, 0);
  463. for (i = 0; i < f->slice_count; i++) {
  464. FFV1Context *fs = f->slice_context[i];
  465. put_symbol(c, state,
  466. (fs->slice_x + 1) * f->num_h_slices / f->width, 0);
  467. put_symbol(c, state,
  468. (fs->slice_y + 1) * f->num_v_slices / f->height, 0);
  469. put_symbol(c, state,
  470. (fs->slice_width + 1) * f->num_h_slices / f->width - 1,
  471. 0);
  472. put_symbol(c, state,
  473. (fs->slice_height + 1) * f->num_v_slices / f->height - 1,
  474. 0);
  475. for (j = 0; j < f->plane_count; j++) {
  476. put_symbol(c, state, f->plane[j].quant_table_index, 0);
  477. av_assert0(f->plane[j].quant_table_index == f->context_model);
  478. }
  479. }
  480. }
  481. }
  482. static int write_extradata(FFV1Context *f)
  483. {
  484. RangeCoder *const c = &f->c;
  485. uint8_t state[CONTEXT_SIZE];
  486. int i, j, k;
  487. uint8_t state2[32][CONTEXT_SIZE];
  488. unsigned v;
  489. memset(state2, 128, sizeof(state2));
  490. memset(state, 128, sizeof(state));
  491. f->avctx->extradata_size = 10000 + 4 +
  492. (11 * 11 * 5 * 5 * 5 + 11 * 11 * 11) * 32;
  493. f->avctx->extradata = av_malloc(f->avctx->extradata_size + AV_INPUT_BUFFER_PADDING_SIZE);
  494. if (!f->avctx->extradata)
  495. return AVERROR(ENOMEM);
  496. ff_init_range_encoder(c, f->avctx->extradata, f->avctx->extradata_size);
  497. ff_build_rac_states(c, 0.05 * (1LL << 32), 256 - 8);
  498. put_symbol(c, state, f->version, 0);
  499. if (f->version > 2) {
  500. if (f->version == 3) {
  501. f->micro_version = 4;
  502. } else if (f->version == 4)
  503. f->micro_version = 2;
  504. put_symbol(c, state, f->micro_version, 0);
  505. }
  506. put_symbol(c, state, f->ac, 0);
  507. if (f->ac == AC_RANGE_CUSTOM_TAB)
  508. for (i = 1; i < 256; i++)
  509. put_symbol(c, state, f->state_transition[i] - c->one_state[i], 1);
  510. put_symbol(c, state, f->colorspace, 0); // YUV cs type
  511. put_symbol(c, state, f->bits_per_raw_sample, 0);
  512. put_rac(c, state, f->chroma_planes);
  513. put_symbol(c, state, f->chroma_h_shift, 0);
  514. put_symbol(c, state, f->chroma_v_shift, 0);
  515. put_rac(c, state, f->transparency);
  516. put_symbol(c, state, f->num_h_slices - 1, 0);
  517. put_symbol(c, state, f->num_v_slices - 1, 0);
  518. put_symbol(c, state, f->quant_table_count, 0);
  519. for (i = 0; i < f->quant_table_count; i++)
  520. write_quant_tables(c, f->quant_tables[i]);
  521. for (i = 0; i < f->quant_table_count; i++) {
  522. for (j = 0; j < f->context_count[i] * CONTEXT_SIZE; j++)
  523. if (f->initial_states[i] && f->initial_states[i][0][j] != 128)
  524. break;
  525. if (j < f->context_count[i] * CONTEXT_SIZE) {
  526. put_rac(c, state, 1);
  527. for (j = 0; j < f->context_count[i]; j++)
  528. for (k = 0; k < CONTEXT_SIZE; k++) {
  529. int pred = j ? f->initial_states[i][j - 1][k] : 128;
  530. put_symbol(c, state2[k],
  531. (int8_t)(f->initial_states[i][j][k] - pred), 1);
  532. }
  533. } else {
  534. put_rac(c, state, 0);
  535. }
  536. }
  537. if (f->version > 2) {
  538. put_symbol(c, state, f->ec, 0);
  539. put_symbol(c, state, f->intra = (f->avctx->gop_size < 2), 0);
  540. }
  541. f->avctx->extradata_size = ff_rac_terminate(c);
  542. v = av_crc(av_crc_get_table(AV_CRC_32_IEEE), 0, f->avctx->extradata, f->avctx->extradata_size);
  543. AV_WL32(f->avctx->extradata + f->avctx->extradata_size, v);
  544. f->avctx->extradata_size += 4;
  545. return 0;
  546. }
  547. static int sort_stt(FFV1Context *s, uint8_t stt[256])
  548. {
  549. int i, i2, changed, print = 0;
  550. do {
  551. changed = 0;
  552. for (i = 12; i < 244; i++) {
  553. for (i2 = i + 1; i2 < 245 && i2 < i + 4; i2++) {
  554. #define COST(old, new) \
  555. s->rc_stat[old][0] * -log2((256 - (new)) / 256.0) + \
  556. s->rc_stat[old][1] * -log2((new) / 256.0)
  557. #define COST2(old, new) \
  558. COST(old, new) + COST(256 - (old), 256 - (new))
  559. double size0 = COST2(i, i) + COST2(i2, i2);
  560. double sizeX = COST2(i, i2) + COST2(i2, i);
  561. if (size0 - sizeX > size0*(1e-14) && i != 128 && i2 != 128) {
  562. int j;
  563. FFSWAP(int, stt[i], stt[i2]);
  564. FFSWAP(int, s->rc_stat[i][0], s->rc_stat[i2][0]);
  565. FFSWAP(int, s->rc_stat[i][1], s->rc_stat[i2][1]);
  566. if (i != 256 - i2) {
  567. FFSWAP(int, stt[256 - i], stt[256 - i2]);
  568. FFSWAP(int, s->rc_stat[256 - i][0], s->rc_stat[256 - i2][0]);
  569. FFSWAP(int, s->rc_stat[256 - i][1], s->rc_stat[256 - i2][1]);
  570. }
  571. for (j = 1; j < 256; j++) {
  572. if (stt[j] == i)
  573. stt[j] = i2;
  574. else if (stt[j] == i2)
  575. stt[j] = i;
  576. if (i != 256 - i2) {
  577. if (stt[256 - j] == 256 - i)
  578. stt[256 - j] = 256 - i2;
  579. else if (stt[256 - j] == 256 - i2)
  580. stt[256 - j] = 256 - i;
  581. }
  582. }
  583. print = changed = 1;
  584. }
  585. }
  586. }
  587. } while (changed);
  588. return print;
  589. }
  590. static av_cold int encode_init(AVCodecContext *avctx)
  591. {
  592. FFV1Context *s = avctx->priv_data;
  593. const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(avctx->pix_fmt);
  594. int i, j, k, m, ret;
  595. if ((ret = ff_ffv1_common_init(avctx)) < 0)
  596. return ret;
  597. s->version = 0;
  598. if ((avctx->flags & (AV_CODEC_FLAG_PASS1 | AV_CODEC_FLAG_PASS2)) ||
  599. avctx->slices > 1)
  600. s->version = FFMAX(s->version, 2);
  601. // Unspecified level & slices, we choose version 1.2+ to ensure multithreaded decodability
  602. if (avctx->slices == 0 && avctx->level < 0 && avctx->width * avctx->height > 720*576)
  603. s->version = FFMAX(s->version, 2);
  604. if (avctx->level <= 0 && s->version == 2) {
  605. s->version = 3;
  606. }
  607. if (avctx->level >= 0 && avctx->level <= 4) {
  608. if (avctx->level < s->version) {
  609. av_log(avctx, AV_LOG_ERROR, "Version %d needed for requested features but %d requested\n", s->version, avctx->level);
  610. return AVERROR(EINVAL);
  611. }
  612. s->version = avctx->level;
  613. }
  614. if (s->ec < 0) {
  615. s->ec = (s->version >= 3);
  616. }
  617. if ((s->version == 2 || s->version>3) && avctx->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL) {
  618. av_log(avctx, AV_LOG_ERROR, "Version 2 needed for requested features but version 2 is experimental and not enabled\n");
  619. return AVERROR_INVALIDDATA;
  620. }
  621. #if FF_API_CODER_TYPE
  622. FF_DISABLE_DEPRECATION_WARNINGS
  623. if (avctx->coder_type != -1)
  624. s->ac = avctx->coder_type > 0 ? AC_RANGE_CUSTOM_TAB : AC_GOLOMB_RICE;
  625. else
  626. FF_ENABLE_DEPRECATION_WARNINGS
  627. #endif
  628. if (s->ac == 1) // Compatbility with common command line usage
  629. s->ac = AC_RANGE_CUSTOM_TAB;
  630. else if (s->ac == AC_RANGE_DEFAULT_TAB_FORCE)
  631. s->ac = AC_RANGE_DEFAULT_TAB;
  632. s->plane_count = 3;
  633. switch(avctx->pix_fmt) {
  634. case AV_PIX_FMT_YUV444P9:
  635. case AV_PIX_FMT_YUV422P9:
  636. case AV_PIX_FMT_YUV420P9:
  637. case AV_PIX_FMT_YUVA444P9:
  638. case AV_PIX_FMT_YUVA422P9:
  639. case AV_PIX_FMT_YUVA420P9:
  640. if (!avctx->bits_per_raw_sample)
  641. s->bits_per_raw_sample = 9;
  642. case AV_PIX_FMT_YUV444P10:
  643. case AV_PIX_FMT_YUV420P10:
  644. case AV_PIX_FMT_YUV422P10:
  645. case AV_PIX_FMT_YUVA444P10:
  646. case AV_PIX_FMT_YUVA422P10:
  647. case AV_PIX_FMT_YUVA420P10:
  648. s->packed_at_lsb = 1;
  649. if (!avctx->bits_per_raw_sample && !s->bits_per_raw_sample)
  650. s->bits_per_raw_sample = 10;
  651. case AV_PIX_FMT_GRAY16:
  652. case AV_PIX_FMT_YUV444P16:
  653. case AV_PIX_FMT_YUV422P16:
  654. case AV_PIX_FMT_YUV420P16:
  655. case AV_PIX_FMT_YUVA444P16:
  656. case AV_PIX_FMT_YUVA422P16:
  657. case AV_PIX_FMT_YUVA420P16:
  658. if (!avctx->bits_per_raw_sample && !s->bits_per_raw_sample) {
  659. s->bits_per_raw_sample = 16;
  660. } else if (!s->bits_per_raw_sample) {
  661. s->bits_per_raw_sample = avctx->bits_per_raw_sample;
  662. }
  663. if (s->bits_per_raw_sample <= 8) {
  664. av_log(avctx, AV_LOG_ERROR, "bits_per_raw_sample invalid\n");
  665. return AVERROR_INVALIDDATA;
  666. }
  667. if (s->ac == AC_GOLOMB_RICE) {
  668. av_log(avctx, AV_LOG_INFO,
  669. "bits_per_raw_sample > 8, forcing range coder\n");
  670. s->ac = AC_RANGE_CUSTOM_TAB;
  671. }
  672. s->version = FFMAX(s->version, 1);
  673. case AV_PIX_FMT_GRAY8:
  674. case AV_PIX_FMT_YA8:
  675. case AV_PIX_FMT_YUV444P:
  676. case AV_PIX_FMT_YUV440P:
  677. case AV_PIX_FMT_YUV422P:
  678. case AV_PIX_FMT_YUV420P:
  679. case AV_PIX_FMT_YUV411P:
  680. case AV_PIX_FMT_YUV410P:
  681. case AV_PIX_FMT_YUVA444P:
  682. case AV_PIX_FMT_YUVA422P:
  683. case AV_PIX_FMT_YUVA420P:
  684. s->chroma_planes = desc->nb_components < 3 ? 0 : 1;
  685. s->colorspace = 0;
  686. s->transparency = desc->nb_components == 4 || desc->nb_components == 2;
  687. if (!avctx->bits_per_raw_sample && !s->bits_per_raw_sample)
  688. s->bits_per_raw_sample = 8;
  689. else if (!s->bits_per_raw_sample)
  690. s->bits_per_raw_sample = 8;
  691. break;
  692. case AV_PIX_FMT_RGB32:
  693. s->colorspace = 1;
  694. s->transparency = 1;
  695. s->chroma_planes = 1;
  696. s->bits_per_raw_sample = 8;
  697. break;
  698. case AV_PIX_FMT_0RGB32:
  699. s->colorspace = 1;
  700. s->chroma_planes = 1;
  701. s->bits_per_raw_sample = 8;
  702. break;
  703. case AV_PIX_FMT_GBRP9:
  704. if (!avctx->bits_per_raw_sample)
  705. s->bits_per_raw_sample = 9;
  706. case AV_PIX_FMT_GBRP10:
  707. if (!avctx->bits_per_raw_sample && !s->bits_per_raw_sample)
  708. s->bits_per_raw_sample = 10;
  709. case AV_PIX_FMT_GBRP12:
  710. if (!avctx->bits_per_raw_sample && !s->bits_per_raw_sample)
  711. s->bits_per_raw_sample = 12;
  712. case AV_PIX_FMT_GBRP14:
  713. if (!avctx->bits_per_raw_sample && !s->bits_per_raw_sample)
  714. s->bits_per_raw_sample = 14;
  715. else if (!s->bits_per_raw_sample)
  716. s->bits_per_raw_sample = avctx->bits_per_raw_sample;
  717. s->colorspace = 1;
  718. s->chroma_planes = 1;
  719. s->version = FFMAX(s->version, 1);
  720. if (s->ac == AC_GOLOMB_RICE) {
  721. av_log(avctx, AV_LOG_INFO,
  722. "bits_per_raw_sample > 8, forcing coder 1\n");
  723. s->ac = AC_RANGE_CUSTOM_TAB;
  724. }
  725. break;
  726. default:
  727. av_log(avctx, AV_LOG_ERROR, "format not supported\n");
  728. return AVERROR(ENOSYS);
  729. }
  730. av_assert0(s->bits_per_raw_sample >= 8);
  731. if (s->transparency) {
  732. av_log(avctx, AV_LOG_WARNING, "Storing alpha plane, this will require a recent FFV1 decoder to playback!\n");
  733. }
  734. #if FF_API_PRIVATE_OPT
  735. FF_DISABLE_DEPRECATION_WARNINGS
  736. if (avctx->context_model)
  737. s->context_model = avctx->context_model;
  738. if (avctx->context_model > 1U) {
  739. av_log(avctx, AV_LOG_ERROR, "Invalid context model %d, valid values are 0 and 1\n", avctx->context_model);
  740. return AVERROR(EINVAL);
  741. }
  742. FF_ENABLE_DEPRECATION_WARNINGS
  743. #endif
  744. if (s->ac == AC_RANGE_CUSTOM_TAB) {
  745. for (i = 1; i < 256; i++)
  746. s->state_transition[i] = ver2_state[i];
  747. } else {
  748. RangeCoder c;
  749. ff_build_rac_states(&c, 0.05 * (1LL << 32), 256 - 8);
  750. for (i = 1; i < 256; i++)
  751. s->state_transition[i] = c.one_state[i];
  752. }
  753. for (i = 0; i < 256; i++) {
  754. s->quant_table_count = 2;
  755. if (s->bits_per_raw_sample <= 8) {
  756. s->quant_tables[0][0][i]= quant11[i];
  757. s->quant_tables[0][1][i]= 11*quant11[i];
  758. s->quant_tables[0][2][i]= 11*11*quant11[i];
  759. s->quant_tables[1][0][i]= quant11[i];
  760. s->quant_tables[1][1][i]= 11*quant11[i];
  761. s->quant_tables[1][2][i]= 11*11*quant5 [i];
  762. s->quant_tables[1][3][i]= 5*11*11*quant5 [i];
  763. s->quant_tables[1][4][i]= 5*5*11*11*quant5 [i];
  764. } else {
  765. s->quant_tables[0][0][i]= quant9_10bit[i];
  766. s->quant_tables[0][1][i]= 11*quant9_10bit[i];
  767. s->quant_tables[0][2][i]= 11*11*quant9_10bit[i];
  768. s->quant_tables[1][0][i]= quant9_10bit[i];
  769. s->quant_tables[1][1][i]= 11*quant9_10bit[i];
  770. s->quant_tables[1][2][i]= 11*11*quant5_10bit[i];
  771. s->quant_tables[1][3][i]= 5*11*11*quant5_10bit[i];
  772. s->quant_tables[1][4][i]= 5*5*11*11*quant5_10bit[i];
  773. }
  774. }
  775. s->context_count[0] = (11 * 11 * 11 + 1) / 2;
  776. s->context_count[1] = (11 * 11 * 5 * 5 * 5 + 1) / 2;
  777. memcpy(s->quant_table, s->quant_tables[s->context_model],
  778. sizeof(s->quant_table));
  779. for (i = 0; i < s->plane_count; i++) {
  780. PlaneContext *const p = &s->plane[i];
  781. memcpy(p->quant_table, s->quant_table, sizeof(p->quant_table));
  782. p->quant_table_index = s->context_model;
  783. p->context_count = s->context_count[p->quant_table_index];
  784. }
  785. if ((ret = ff_ffv1_allocate_initial_states(s)) < 0)
  786. return ret;
  787. #if FF_API_CODED_FRAME
  788. FF_DISABLE_DEPRECATION_WARNINGS
  789. avctx->coded_frame->pict_type = AV_PICTURE_TYPE_I;
  790. FF_ENABLE_DEPRECATION_WARNINGS
  791. #endif
  792. if (!s->transparency)
  793. s->plane_count = 2;
  794. if (!s->chroma_planes && s->version > 3)
  795. s->plane_count--;
  796. avcodec_get_chroma_sub_sample(avctx->pix_fmt, &s->chroma_h_shift, &s->chroma_v_shift);
  797. s->picture_number = 0;
  798. if (avctx->flags & (AV_CODEC_FLAG_PASS1 | AV_CODEC_FLAG_PASS2)) {
  799. for (i = 0; i < s->quant_table_count; i++) {
  800. s->rc_stat2[i] = av_mallocz(s->context_count[i] *
  801. sizeof(*s->rc_stat2[i]));
  802. if (!s->rc_stat2[i])
  803. return AVERROR(ENOMEM);
  804. }
  805. }
  806. if (avctx->stats_in) {
  807. char *p = avctx->stats_in;
  808. uint8_t (*best_state)[256] = av_malloc_array(256, 256);
  809. int gob_count = 0;
  810. char *next;
  811. if (!best_state)
  812. return AVERROR(ENOMEM);
  813. av_assert0(s->version >= 2);
  814. for (;;) {
  815. for (j = 0; j < 256; j++)
  816. for (i = 0; i < 2; i++) {
  817. s->rc_stat[j][i] = strtol(p, &next, 0);
  818. if (next == p) {
  819. av_log(avctx, AV_LOG_ERROR,
  820. "2Pass file invalid at %d %d [%s]\n", j, i, p);
  821. av_freep(&best_state);
  822. return AVERROR_INVALIDDATA;
  823. }
  824. p = next;
  825. }
  826. for (i = 0; i < s->quant_table_count; i++)
  827. for (j = 0; j < s->context_count[i]; j++) {
  828. for (k = 0; k < 32; k++)
  829. for (m = 0; m < 2; m++) {
  830. s->rc_stat2[i][j][k][m] = strtol(p, &next, 0);
  831. if (next == p) {
  832. av_log(avctx, AV_LOG_ERROR,
  833. "2Pass file invalid at %d %d %d %d [%s]\n",
  834. i, j, k, m, p);
  835. av_freep(&best_state);
  836. return AVERROR_INVALIDDATA;
  837. }
  838. p = next;
  839. }
  840. }
  841. gob_count = strtol(p, &next, 0);
  842. if (next == p || gob_count <= 0) {
  843. av_log(avctx, AV_LOG_ERROR, "2Pass file invalid\n");
  844. av_freep(&best_state);
  845. return AVERROR_INVALIDDATA;
  846. }
  847. p = next;
  848. while (*p == '\n' || *p == ' ')
  849. p++;
  850. if (p[0] == 0)
  851. break;
  852. }
  853. if (s->ac == AC_RANGE_CUSTOM_TAB)
  854. sort_stt(s, s->state_transition);
  855. find_best_state(best_state, s->state_transition);
  856. for (i = 0; i < s->quant_table_count; i++) {
  857. for (k = 0; k < 32; k++) {
  858. double a=0, b=0;
  859. int jp = 0;
  860. for (j = 0; j < s->context_count[i]; j++) {
  861. double p = 128;
  862. if (s->rc_stat2[i][j][k][0] + s->rc_stat2[i][j][k][1] > 200 && j || a+b > 200) {
  863. if (a+b)
  864. p = 256.0 * b / (a + b);
  865. s->initial_states[i][jp][k] =
  866. best_state[av_clip(round(p), 1, 255)][av_clip_uint8((a + b) / gob_count)];
  867. for(jp++; jp<j; jp++)
  868. s->initial_states[i][jp][k] = s->initial_states[i][jp-1][k];
  869. a=b=0;
  870. }
  871. a += s->rc_stat2[i][j][k][0];
  872. b += s->rc_stat2[i][j][k][1];
  873. if (a+b) {
  874. p = 256.0 * b / (a + b);
  875. }
  876. s->initial_states[i][j][k] =
  877. best_state[av_clip(round(p), 1, 255)][av_clip_uint8((a + b) / gob_count)];
  878. }
  879. }
  880. }
  881. av_freep(&best_state);
  882. }
  883. if (s->version > 1) {
  884. s->num_v_slices = (avctx->width > 352 || avctx->height > 288 || !avctx->slices) ? 2 : 1;
  885. for (; s->num_v_slices < 9; s->num_v_slices++) {
  886. for (s->num_h_slices = s->num_v_slices; s->num_h_slices < 2*s->num_v_slices; s->num_h_slices++) {
  887. if (avctx->slices == s->num_h_slices * s->num_v_slices && avctx->slices <= 64 || !avctx->slices)
  888. goto slices_ok;
  889. }
  890. }
  891. av_log(avctx, AV_LOG_ERROR,
  892. "Unsupported number %d of slices requested, please specify a "
  893. "supported number with -slices (ex:4,6,9,12,16, ...)\n",
  894. avctx->slices);
  895. return AVERROR(ENOSYS);
  896. slices_ok:
  897. if ((ret = write_extradata(s)) < 0)
  898. return ret;
  899. }
  900. if ((ret = ff_ffv1_init_slice_contexts(s)) < 0)
  901. return ret;
  902. s->slice_count = s->max_slice_count;
  903. if ((ret = ff_ffv1_init_slices_state(s)) < 0)
  904. return ret;
  905. #define STATS_OUT_SIZE 1024 * 1024 * 6
  906. if (avctx->flags & AV_CODEC_FLAG_PASS1) {
  907. avctx->stats_out = av_mallocz(STATS_OUT_SIZE);
  908. if (!avctx->stats_out)
  909. return AVERROR(ENOMEM);
  910. for (i = 0; i < s->quant_table_count; i++)
  911. for (j = 0; j < s->max_slice_count; j++) {
  912. FFV1Context *sf = s->slice_context[j];
  913. av_assert0(!sf->rc_stat2[i]);
  914. sf->rc_stat2[i] = av_mallocz(s->context_count[i] *
  915. sizeof(*sf->rc_stat2[i]));
  916. if (!sf->rc_stat2[i])
  917. return AVERROR(ENOMEM);
  918. }
  919. }
  920. return 0;
  921. }
  922. static void encode_slice_header(FFV1Context *f, FFV1Context *fs)
  923. {
  924. RangeCoder *c = &fs->c;
  925. uint8_t state[CONTEXT_SIZE];
  926. int j;
  927. memset(state, 128, sizeof(state));
  928. put_symbol(c, state, (fs->slice_x +1)*f->num_h_slices / f->width , 0);
  929. put_symbol(c, state, (fs->slice_y +1)*f->num_v_slices / f->height , 0);
  930. put_symbol(c, state, (fs->slice_width +1)*f->num_h_slices / f->width -1, 0);
  931. put_symbol(c, state, (fs->slice_height+1)*f->num_v_slices / f->height-1, 0);
  932. for (j=0; j<f->plane_count; j++) {
  933. put_symbol(c, state, f->plane[j].quant_table_index, 0);
  934. av_assert0(f->plane[j].quant_table_index == f->context_model);
  935. }
  936. if (!f->picture.f->interlaced_frame)
  937. put_symbol(c, state, 3, 0);
  938. else
  939. put_symbol(c, state, 1 + !f->picture.f->top_field_first, 0);
  940. put_symbol(c, state, f->picture.f->sample_aspect_ratio.num, 0);
  941. put_symbol(c, state, f->picture.f->sample_aspect_ratio.den, 0);
  942. if (f->version > 3) {
  943. put_rac(c, state, fs->slice_coding_mode == 1);
  944. if (fs->slice_coding_mode == 1)
  945. ff_ffv1_clear_slice_state(f, fs);
  946. put_symbol(c, state, fs->slice_coding_mode, 0);
  947. if (fs->slice_coding_mode != 1) {
  948. put_symbol(c, state, fs->slice_rct_by_coef, 0);
  949. put_symbol(c, state, fs->slice_rct_ry_coef, 0);
  950. }
  951. }
  952. }
  953. static void choose_rct_params(FFV1Context *fs, const uint8_t *src[3], const int stride[3], int w, int h)
  954. {
  955. #define NB_Y_COEFF 15
  956. static const int rct_y_coeff[15][2] = {
  957. {0, 0}, // 4G
  958. {1, 1}, // R + 2G + B
  959. {2, 2}, // 2R + 2B
  960. {0, 2}, // 2G + 2B
  961. {2, 0}, // 2R + 2G
  962. {4, 0}, // 4R
  963. {0, 4}, // 4B
  964. {0, 3}, // 1G + 3B
  965. {3, 0}, // 3R + 1G
  966. {3, 1}, // 3R + B
  967. {1, 3}, // R + 3B
  968. {1, 2}, // R + G + 2B
  969. {2, 1}, // 2R + G + B
  970. {0, 1}, // 3G + B
  971. {1, 0}, // R + 3G
  972. };
  973. int stat[NB_Y_COEFF] = {0};
  974. int x, y, i, p, best;
  975. int16_t *sample[3];
  976. int lbd = fs->bits_per_raw_sample <= 8;
  977. for (y = 0; y < h; y++) {
  978. int lastr=0, lastg=0, lastb=0;
  979. for (p = 0; p < 3; p++)
  980. sample[p] = fs->sample_buffer + p*w;
  981. for (x = 0; x < w; x++) {
  982. int b, g, r;
  983. int ab, ag, ar;
  984. if (lbd) {
  985. unsigned v = *((const uint32_t*)(src[0] + x*4 + stride[0]*y));
  986. b = v & 0xFF;
  987. g = (v >> 8) & 0xFF;
  988. r = (v >> 16) & 0xFF;
  989. } else {
  990. b = *((const uint16_t*)(src[0] + x*2 + stride[0]*y));
  991. g = *((const uint16_t*)(src[1] + x*2 + stride[1]*y));
  992. r = *((const uint16_t*)(src[2] + x*2 + stride[2]*y));
  993. }
  994. ar = r - lastr;
  995. ag = g - lastg;
  996. ab = b - lastb;
  997. if (x && y) {
  998. int bg = ag - sample[0][x];
  999. int bb = ab - sample[1][x];
  1000. int br = ar - sample[2][x];
  1001. br -= bg;
  1002. bb -= bg;
  1003. for (i = 0; i<NB_Y_COEFF; i++) {
  1004. stat[i] += FFABS(bg + ((br*rct_y_coeff[i][0] + bb*rct_y_coeff[i][1])>>2));
  1005. }
  1006. }
  1007. sample[0][x] = ag;
  1008. sample[1][x] = ab;
  1009. sample[2][x] = ar;
  1010. lastr = r;
  1011. lastg = g;
  1012. lastb = b;
  1013. }
  1014. }
  1015. best = 0;
  1016. for (i=1; i<NB_Y_COEFF; i++) {
  1017. if (stat[i] < stat[best])
  1018. best = i;
  1019. }
  1020. fs->slice_rct_by_coef = rct_y_coeff[best][1];
  1021. fs->slice_rct_ry_coef = rct_y_coeff[best][0];
  1022. }
  1023. static int encode_slice(AVCodecContext *c, void *arg)
  1024. {
  1025. FFV1Context *fs = *(void **)arg;
  1026. FFV1Context *f = fs->avctx->priv_data;
  1027. int width = fs->slice_width;
  1028. int height = fs->slice_height;
  1029. int x = fs->slice_x;
  1030. int y = fs->slice_y;
  1031. const AVFrame *const p = f->picture.f;
  1032. const int ps = av_pix_fmt_desc_get(c->pix_fmt)->comp[0].step;
  1033. int ret;
  1034. RangeCoder c_bak = fs->c;
  1035. const uint8_t *planes[3] = {p->data[0] + ps*x + y*p->linesize[0],
  1036. p->data[1] + ps*x + y*p->linesize[1],
  1037. p->data[2] + ps*x + y*p->linesize[2]};
  1038. fs->slice_coding_mode = 0;
  1039. if (f->version > 3) {
  1040. choose_rct_params(fs, planes, p->linesize, width, height);
  1041. } else {
  1042. fs->slice_rct_by_coef = 1;
  1043. fs->slice_rct_ry_coef = 1;
  1044. }
  1045. retry:
  1046. if (f->key_frame)
  1047. ff_ffv1_clear_slice_state(f, fs);
  1048. if (f->version > 2) {
  1049. encode_slice_header(f, fs);
  1050. }
  1051. if (fs->ac == AC_GOLOMB_RICE) {
  1052. if (f->version > 2)
  1053. put_rac(&fs->c, (uint8_t[]) { 129 }, 0);
  1054. fs->ac_byte_count = f->version > 2 || (!x && !y) ? ff_rac_terminate(&fs->c) : 0;
  1055. init_put_bits(&fs->pb,
  1056. fs->c.bytestream_start + fs->ac_byte_count,
  1057. fs->c.bytestream_end - fs->c.bytestream_start - fs->ac_byte_count);
  1058. }
  1059. if (f->colorspace == 0 && c->pix_fmt != AV_PIX_FMT_YA8) {
  1060. const int chroma_width = AV_CEIL_RSHIFT(width, f->chroma_h_shift);
  1061. const int chroma_height = AV_CEIL_RSHIFT(height, f->chroma_v_shift);
  1062. const int cx = x >> f->chroma_h_shift;
  1063. const int cy = y >> f->chroma_v_shift;
  1064. ret = encode_plane(fs, p->data[0] + ps*x + y*p->linesize[0], width, height, p->linesize[0], 0, 1);
  1065. if (f->chroma_planes) {
  1066. ret |= encode_plane(fs, p->data[1] + ps*cx+cy*p->linesize[1], chroma_width, chroma_height, p->linesize[1], 1, 1);
  1067. ret |= encode_plane(fs, p->data[2] + ps*cx+cy*p->linesize[2], chroma_width, chroma_height, p->linesize[2], 1, 1);
  1068. }
  1069. if (fs->transparency)
  1070. ret |= encode_plane(fs, p->data[3] + ps*x + y*p->linesize[3], width, height, p->linesize[3], 2, 1);
  1071. } else if (c->pix_fmt == AV_PIX_FMT_YA8) {
  1072. ret = encode_plane(fs, p->data[0] + ps*x + y*p->linesize[0], width, height, p->linesize[0], 0, 2);
  1073. ret |= encode_plane(fs, p->data[0] + 1 + ps*x + y*p->linesize[0], width, height, p->linesize[0], 1, 2);
  1074. } else {
  1075. ret = encode_rgb_frame(fs, planes, width, height, p->linesize);
  1076. }
  1077. emms_c();
  1078. if (ret < 0) {
  1079. av_assert0(fs->slice_coding_mode == 0);
  1080. if (fs->version < 4 || !fs->ac) {
  1081. av_log(c, AV_LOG_ERROR, "Buffer too small\n");
  1082. return ret;
  1083. }
  1084. av_log(c, AV_LOG_DEBUG, "Coding slice as PCM\n");
  1085. fs->slice_coding_mode = 1;
  1086. fs->c = c_bak;
  1087. goto retry;
  1088. }
  1089. return 0;
  1090. }
  1091. static int encode_frame(AVCodecContext *avctx, AVPacket *pkt,
  1092. const AVFrame *pict, int *got_packet)
  1093. {
  1094. FFV1Context *f = avctx->priv_data;
  1095. RangeCoder *const c = &f->slice_context[0]->c;
  1096. AVFrame *const p = f->picture.f;
  1097. int used_count = 0;
  1098. uint8_t keystate = 128;
  1099. uint8_t *buf_p;
  1100. int i, ret;
  1101. int64_t maxsize = AV_INPUT_BUFFER_MIN_SIZE
  1102. + avctx->width*avctx->height*35LL*4;
  1103. if(!pict) {
  1104. if (avctx->flags & AV_CODEC_FLAG_PASS1) {
  1105. int j, k, m;
  1106. char *p = avctx->stats_out;
  1107. char *end = p + STATS_OUT_SIZE;
  1108. memset(f->rc_stat, 0, sizeof(f->rc_stat));
  1109. for (i = 0; i < f->quant_table_count; i++)
  1110. memset(f->rc_stat2[i], 0, f->context_count[i] * sizeof(*f->rc_stat2[i]));
  1111. av_assert0(f->slice_count == f->max_slice_count);
  1112. for (j = 0; j < f->slice_count; j++) {
  1113. FFV1Context *fs = f->slice_context[j];
  1114. for (i = 0; i < 256; i++) {
  1115. f->rc_stat[i][0] += fs->rc_stat[i][0];
  1116. f->rc_stat[i][1] += fs->rc_stat[i][1];
  1117. }
  1118. for (i = 0; i < f->quant_table_count; i++) {
  1119. for (k = 0; k < f->context_count[i]; k++)
  1120. for (m = 0; m < 32; m++) {
  1121. f->rc_stat2[i][k][m][0] += fs->rc_stat2[i][k][m][0];
  1122. f->rc_stat2[i][k][m][1] += fs->rc_stat2[i][k][m][1];
  1123. }
  1124. }
  1125. }
  1126. for (j = 0; j < 256; j++) {
  1127. snprintf(p, end - p, "%" PRIu64 " %" PRIu64 " ",
  1128. f->rc_stat[j][0], f->rc_stat[j][1]);
  1129. p += strlen(p);
  1130. }
  1131. snprintf(p, end - p, "\n");
  1132. for (i = 0; i < f->quant_table_count; i++) {
  1133. for (j = 0; j < f->context_count[i]; j++)
  1134. for (m = 0; m < 32; m++) {
  1135. snprintf(p, end - p, "%" PRIu64 " %" PRIu64 " ",
  1136. f->rc_stat2[i][j][m][0], f->rc_stat2[i][j][m][1]);
  1137. p += strlen(p);
  1138. }
  1139. }
  1140. snprintf(p, end - p, "%d\n", f->gob_count);
  1141. }
  1142. return 0;
  1143. }
  1144. if (f->version > 3)
  1145. maxsize = AV_INPUT_BUFFER_MIN_SIZE + avctx->width*avctx->height*3LL*4;
  1146. if ((ret = ff_alloc_packet2(avctx, pkt, maxsize, 0)) < 0)
  1147. return ret;
  1148. ff_init_range_encoder(c, pkt->data, pkt->size);
  1149. ff_build_rac_states(c, 0.05 * (1LL << 32), 256 - 8);
  1150. av_frame_unref(p);
  1151. if ((ret = av_frame_ref(p, pict)) < 0)
  1152. return ret;
  1153. avctx->coded_frame->pict_type = AV_PICTURE_TYPE_I;
  1154. if (avctx->gop_size == 0 || f->picture_number % avctx->gop_size == 0) {
  1155. put_rac(c, &keystate, 1);
  1156. f->key_frame = 1;
  1157. f->gob_count++;
  1158. write_header(f);
  1159. } else {
  1160. put_rac(c, &keystate, 0);
  1161. f->key_frame = 0;
  1162. }
  1163. if (f->ac == AC_RANGE_CUSTOM_TAB) {
  1164. int i;
  1165. for (i = 1; i < 256; i++) {
  1166. c->one_state[i] = f->state_transition[i];
  1167. c->zero_state[256 - i] = 256 - c->one_state[i];
  1168. }
  1169. }
  1170. for (i = 1; i < f->slice_count; i++) {
  1171. FFV1Context *fs = f->slice_context[i];
  1172. uint8_t *start = pkt->data + (pkt->size - used_count) * (int64_t)i / f->slice_count;
  1173. int len = pkt->size / f->slice_count;
  1174. ff_init_range_encoder(&fs->c, start, len);
  1175. }
  1176. avctx->execute(avctx, encode_slice, &f->slice_context[0], NULL,
  1177. f->slice_count, sizeof(void *));
  1178. buf_p = pkt->data;
  1179. for (i = 0; i < f->slice_count; i++) {
  1180. FFV1Context *fs = f->slice_context[i];
  1181. int bytes;
  1182. if (fs->ac != AC_GOLOMB_RICE) {
  1183. uint8_t state = 129;
  1184. put_rac(&fs->c, &state, 0);
  1185. bytes = ff_rac_terminate(&fs->c);
  1186. } else {
  1187. flush_put_bits(&fs->pb); // FIXME: nicer padding
  1188. bytes = fs->ac_byte_count + (put_bits_count(&fs->pb) + 7) / 8;
  1189. }
  1190. if (i > 0 || f->version > 2) {
  1191. av_assert0(bytes < pkt->size / f->slice_count);
  1192. memmove(buf_p, fs->c.bytestream_start, bytes);
  1193. av_assert0(bytes < (1 << 24));
  1194. AV_WB24(buf_p + bytes, bytes);
  1195. bytes += 3;
  1196. }
  1197. if (f->ec) {
  1198. unsigned v;
  1199. buf_p[bytes++] = 0;
  1200. v = av_crc(av_crc_get_table(AV_CRC_32_IEEE), 0, buf_p, bytes);
  1201. AV_WL32(buf_p + bytes, v);
  1202. bytes += 4;
  1203. }
  1204. buf_p += bytes;
  1205. }
  1206. if (avctx->flags & AV_CODEC_FLAG_PASS1)
  1207. avctx->stats_out[0] = '\0';
  1208. #if FF_API_CODED_FRAME
  1209. FF_DISABLE_DEPRECATION_WARNINGS
  1210. avctx->coded_frame->key_frame = f->key_frame;
  1211. FF_ENABLE_DEPRECATION_WARNINGS
  1212. #endif
  1213. f->picture_number++;
  1214. pkt->size = buf_p - pkt->data;
  1215. pkt->pts =
  1216. pkt->dts = pict->pts;
  1217. pkt->flags |= AV_PKT_FLAG_KEY * f->key_frame;
  1218. *got_packet = 1;
  1219. return 0;
  1220. }
  1221. static av_cold int encode_close(AVCodecContext *avctx)
  1222. {
  1223. ff_ffv1_close(avctx);
  1224. return 0;
  1225. }
  1226. #define OFFSET(x) offsetof(FFV1Context, x)
  1227. #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
  1228. static const AVOption options[] = {
  1229. { "slicecrc", "Protect slices with CRCs", OFFSET(ec), AV_OPT_TYPE_BOOL, { .i64 = -1 }, -1, 1, VE },
  1230. { "coder", "Coder type", OFFSET(ac), AV_OPT_TYPE_INT,
  1231. { .i64 = 0 }, -2, 2, VE, "coder" },
  1232. { "rice", "Golomb rice", 0, AV_OPT_TYPE_CONST,
  1233. { .i64 = AC_GOLOMB_RICE }, INT_MIN, INT_MAX, VE, "coder" },
  1234. { "range_def", "Range with default table", 0, AV_OPT_TYPE_CONST,
  1235. { .i64 = AC_RANGE_DEFAULT_TAB_FORCE }, INT_MIN, INT_MAX, VE, "coder" },
  1236. { "range_tab", "Range with custom table", 0, AV_OPT_TYPE_CONST,
  1237. { .i64 = AC_RANGE_CUSTOM_TAB }, INT_MIN, INT_MAX, VE, "coder" },
  1238. { "ac", "Range with custom table (the ac option exists for compatibility and is deprecated)", 0, AV_OPT_TYPE_CONST,
  1239. { .i64 = 1 }, INT_MIN, INT_MAX, VE, "coder" },
  1240. { "context", "Context model", OFFSET(context_model), AV_OPT_TYPE_INT,
  1241. { .i64 = 0 }, 0, 1, VE },
  1242. { NULL }
  1243. };
  1244. static const AVClass ffv1_class = {
  1245. .class_name = "ffv1 encoder",
  1246. .item_name = av_default_item_name,
  1247. .option = options,
  1248. .version = LIBAVUTIL_VERSION_INT,
  1249. };
  1250. #if FF_API_CODER_TYPE
  1251. static const AVCodecDefault ffv1_defaults[] = {
  1252. { "coder", "-1" },
  1253. { NULL },
  1254. };
  1255. #endif
  1256. AVCodec ff_ffv1_encoder = {
  1257. .name = "ffv1",
  1258. .long_name = NULL_IF_CONFIG_SMALL("FFmpeg video codec #1"),
  1259. .type = AVMEDIA_TYPE_VIDEO,
  1260. .id = AV_CODEC_ID_FFV1,
  1261. .priv_data_size = sizeof(FFV1Context),
  1262. .init = encode_init,
  1263. .encode2 = encode_frame,
  1264. .close = encode_close,
  1265. .capabilities = AV_CODEC_CAP_SLICE_THREADS | AV_CODEC_CAP_DELAY,
  1266. .pix_fmts = (const enum AVPixelFormat[]) {
  1267. AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUVA420P, AV_PIX_FMT_YUVA422P, AV_PIX_FMT_YUV444P,
  1268. AV_PIX_FMT_YUVA444P, AV_PIX_FMT_YUV440P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV411P,
  1269. AV_PIX_FMT_YUV410P, AV_PIX_FMT_0RGB32, AV_PIX_FMT_RGB32, AV_PIX_FMT_YUV420P16,
  1270. AV_PIX_FMT_YUV422P16, AV_PIX_FMT_YUV444P16, AV_PIX_FMT_YUV444P9, AV_PIX_FMT_YUV422P9,
  1271. AV_PIX_FMT_YUV420P9, AV_PIX_FMT_YUV420P10, AV_PIX_FMT_YUV422P10, AV_PIX_FMT_YUV444P10,
  1272. AV_PIX_FMT_YUVA444P16, AV_PIX_FMT_YUVA422P16, AV_PIX_FMT_YUVA420P16,
  1273. AV_PIX_FMT_YUVA444P10, AV_PIX_FMT_YUVA422P10, AV_PIX_FMT_YUVA420P10,
  1274. AV_PIX_FMT_YUVA444P9, AV_PIX_FMT_YUVA422P9, AV_PIX_FMT_YUVA420P9,
  1275. AV_PIX_FMT_GRAY16, AV_PIX_FMT_GRAY8, AV_PIX_FMT_GBRP9, AV_PIX_FMT_GBRP10,
  1276. AV_PIX_FMT_GBRP12, AV_PIX_FMT_GBRP14,
  1277. AV_PIX_FMT_YA8,
  1278. AV_PIX_FMT_NONE
  1279. },
  1280. #if FF_API_CODER_TYPE
  1281. .defaults = ffv1_defaults,
  1282. #endif
  1283. .priv_class = &ffv1_class,
  1284. };