You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1887 lines
62KB

  1. /*
  2. * FFV1 codec for libavcodec
  3. *
  4. * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
  5. *
  6. * This file is part of Libav.
  7. *
  8. * Libav is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU Lesser General Public
  10. * License as published by the Free Software Foundation; either
  11. * version 2.1 of the License, or (at your option) any later version.
  12. *
  13. * Libav is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * Lesser General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU Lesser General Public
  19. * License along with Libav; if not, write to the Free Software
  20. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  21. */
  22. /**
  23. * @file
  24. * FF Video Codec 1 (a lossless codec)
  25. */
  26. #include "libavutil/avassert.h"
  27. #include "avcodec.h"
  28. #include "get_bits.h"
  29. #include "put_bits.h"
  30. #include "dsputil.h"
  31. #include "rangecoder.h"
  32. #include "golomb.h"
  33. #include "mathops.h"
  34. #define MAX_PLANES 4
  35. #define CONTEXT_SIZE 32
  36. #define MAX_QUANT_TABLES 8
  37. #define MAX_CONTEXT_INPUTS 5
  38. extern const uint8_t ff_log2_run[41];
  39. static const int8_t quant5_10bit[256] = {
  40. 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1,
  41. 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
  42. 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
  43. 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
  44. 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
  45. 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
  46. 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
  47. 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
  48. -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2,
  49. -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2,
  50. -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2,
  51. -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2,
  52. -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -1,
  53. -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
  54. -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
  55. -1, -1, -1, -1, -1, -1, -0, -0, -0, -0, -0, -0, -0, -0, -0, -0,
  56. };
  57. static const int8_t quant5[256] = {
  58. 0, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
  59. 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
  60. 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
  61. 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
  62. 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
  63. 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
  64. 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
  65. 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
  66. -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2,
  67. -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2,
  68. -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2,
  69. -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2,
  70. -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2,
  71. -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2,
  72. -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2,
  73. -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -1, -1, -1,
  74. };
  75. static const int8_t quant9_10bit[256] = {
  76. 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2,
  77. 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3,
  78. 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
  79. 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4,
  80. 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
  81. 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
  82. 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
  83. 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
  84. -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4,
  85. -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4,
  86. -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4,
  87. -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4,
  88. -4, -4, -4, -4, -4, -4, -4, -4, -4, -3, -3, -3, -3, -3, -3, -3,
  89. -3, -3, -3, -3, -3, -3, -3, -3, -3, -3, -3, -3, -3, -3, -3, -3,
  90. -3, -3, -3, -3, -3, -3, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2,
  91. -2, -2, -2, -2, -1, -1, -1, -1, -1, -1, -1, -1, -0, -0, -0, -0,
  92. };
  93. static const int8_t quant11[256] = {
  94. 0, 1, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4,
  95. 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
  96. 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
  97. 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
  98. 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
  99. 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
  100. 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
  101. 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
  102. -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5,
  103. -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5,
  104. -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5,
  105. -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5,
  106. -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5,
  107. -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -4, -4,
  108. -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4,
  109. -4, -4, -4, -4, -4, -3, -3, -3, -3, -3, -3, -3, -2, -2, -2, -1,
  110. };
  111. static const uint8_t ver2_state[256] = {
  112. 0, 10, 10, 10, 10, 16, 16, 16, 28, 16, 16, 29, 42, 49, 20, 49,
  113. 59, 25, 26, 26, 27, 31, 33, 33, 33, 34, 34, 37, 67, 38, 39, 39,
  114. 40, 40, 41, 79, 43, 44, 45, 45, 48, 48, 64, 50, 51, 52, 88, 52,
  115. 53, 74, 55, 57, 58, 58, 74, 60, 101, 61, 62, 84, 66, 66, 68, 69,
  116. 87, 82, 71, 97, 73, 73, 82, 75, 111, 77, 94, 78, 87, 81, 83, 97,
  117. 85, 83, 94, 86, 99, 89, 90, 99, 111, 92, 93, 134, 95, 98, 105, 98,
  118. 105, 110, 102, 108, 102, 118, 103, 106, 106, 113, 109, 112, 114, 112, 116, 125,
  119. 115, 116, 117, 117, 126, 119, 125, 121, 121, 123, 145, 124, 126, 131, 127, 129,
  120. 165, 130, 132, 138, 133, 135, 145, 136, 137, 139, 146, 141, 143, 142, 144, 148,
  121. 147, 155, 151, 149, 151, 150, 152, 157, 153, 154, 156, 168, 158, 162, 161, 160,
  122. 172, 163, 169, 164, 166, 184, 167, 170, 177, 174, 171, 173, 182, 176, 180, 178,
  123. 175, 189, 179, 181, 186, 183, 192, 185, 200, 187, 191, 188, 190, 197, 193, 196,
  124. 197, 194, 195, 196, 198, 202, 199, 201, 210, 203, 207, 204, 205, 206, 208, 214,
  125. 209, 211, 221, 212, 213, 215, 224, 216, 217, 218, 219, 220, 222, 228, 223, 225,
  126. 226, 224, 227, 229, 240, 230, 231, 232, 233, 234, 235, 236, 238, 239, 237, 242,
  127. 241, 243, 242, 244, 245, 246, 247, 248, 249, 250, 251, 252, 252, 253, 254, 255,
  128. };
  129. typedef struct VlcState {
  130. int16_t drift;
  131. uint16_t error_sum;
  132. int8_t bias;
  133. uint8_t count;
  134. } VlcState;
  135. typedef struct PlaneContext {
  136. int16_t quant_table[MAX_CONTEXT_INPUTS][256];
  137. int quant_table_index;
  138. int context_count;
  139. uint8_t (*state)[CONTEXT_SIZE];
  140. VlcState *vlc_state;
  141. uint8_t interlace_bit_state[2];
  142. } PlaneContext;
  143. #define MAX_SLICES 256
  144. typedef struct FFV1Context {
  145. AVCodecContext *avctx;
  146. RangeCoder c;
  147. GetBitContext gb;
  148. PutBitContext pb;
  149. uint64_t rc_stat[256][2];
  150. uint64_t (*rc_stat2[MAX_QUANT_TABLES])[32][2];
  151. int version;
  152. int width, height;
  153. int chroma_h_shift, chroma_v_shift;
  154. int flags;
  155. int picture_number;
  156. AVFrame picture;
  157. int plane_count;
  158. int ac; // 1 = range coder <-> 0 = golomb rice
  159. PlaneContext plane[MAX_PLANES];
  160. int16_t quant_table[MAX_CONTEXT_INPUTS][256];
  161. int16_t quant_tables[MAX_QUANT_TABLES][MAX_CONTEXT_INPUTS][256];
  162. int context_count[MAX_QUANT_TABLES];
  163. uint8_t state_transition[256];
  164. uint8_t (*initial_states[MAX_QUANT_TABLES])[32];
  165. int run_index;
  166. int colorspace;
  167. int16_t *sample_buffer;
  168. int gob_count;
  169. int quant_table_count;
  170. DSPContext dsp;
  171. struct FFV1Context *slice_context[MAX_SLICES];
  172. int slice_count;
  173. int num_v_slices;
  174. int num_h_slices;
  175. int slice_width;
  176. int slice_height;
  177. int slice_x;
  178. int slice_y;
  179. } FFV1Context;
  180. static av_always_inline int fold(int diff, int bits)
  181. {
  182. if (bits == 8)
  183. diff = (int8_t)diff;
  184. else {
  185. diff += 1 << (bits - 1);
  186. diff &= (1 << bits) - 1;
  187. diff -= 1 << (bits - 1);
  188. }
  189. return diff;
  190. }
  191. static inline int predict(int16_t *src, int16_t *last)
  192. {
  193. const int LT = last[-1];
  194. const int T = last[0];
  195. const int L = src[-1];
  196. return mid_pred(L, L + T - LT, T);
  197. }
  198. static inline int get_context(PlaneContext *p, int16_t *src,
  199. int16_t *last, int16_t *last2)
  200. {
  201. const int LT = last[-1];
  202. const int T = last[0];
  203. const int RT = last[1];
  204. const int L = src[-1];
  205. if (p->quant_table[3][127]) {
  206. const int TT = last2[0];
  207. const int LL = src[-2];
  208. return p->quant_table[0][(L - LT) & 0xFF] +
  209. p->quant_table[1][(LT - T) & 0xFF] +
  210. p->quant_table[2][(T - RT) & 0xFF] +
  211. p->quant_table[3][(LL - L) & 0xFF] +
  212. p->quant_table[4][(TT - T) & 0xFF];
  213. } else
  214. return p->quant_table[0][(L - LT) & 0xFF] +
  215. p->quant_table[1][(LT - T) & 0xFF] +
  216. p->quant_table[2][(T - RT) & 0xFF];
  217. }
  218. static void find_best_state(uint8_t best_state[256][256],
  219. const uint8_t one_state[256])
  220. {
  221. int i, j, k, m;
  222. double l2tab[256];
  223. for (i = 1; i < 256; i++)
  224. l2tab[i] = log2(i / 256.0);
  225. for (i = 0; i < 256; i++) {
  226. double best_len[256];
  227. double p = i / 256.0;
  228. for (j = 0; j < 256; j++)
  229. best_len[j] = 1 << 30;
  230. for (j = FFMAX(i - 10, 1); j < FFMIN(i + 11, 256); j++) {
  231. double occ[256] = { 0 };
  232. double len = 0;
  233. occ[j] = 1.0;
  234. for (k = 0; k < 256; k++) {
  235. double newocc[256] = { 0 };
  236. for (m = 0; m < 256; m++)
  237. if (occ[m]) {
  238. len -= occ[m] * (p * l2tab[m] +
  239. (1 - p) * l2tab[256 - m]);
  240. }
  241. if (len < best_len[k]) {
  242. best_len[k] = len;
  243. best_state[i][k] = j;
  244. }
  245. for (m = 0; m < 256; m++)
  246. if (occ[m]) {
  247. newocc[one_state[m]] += occ[m] * p;
  248. newocc[256 - one_state[256 - m]] += occ[m] * (1 - p);
  249. }
  250. memcpy(occ, newocc, sizeof(occ));
  251. }
  252. }
  253. }
  254. }
  255. static av_always_inline av_flatten void put_symbol_inline(RangeCoder *c,
  256. uint8_t *state, int v,
  257. int is_signed,
  258. uint64_t rc_stat[256][2],
  259. uint64_t rc_stat2[32][2])
  260. {
  261. int i;
  262. #define put_rac(C, S, B) \
  263. do { \
  264. if (rc_stat) { \
  265. rc_stat[*(S)][B]++; \
  266. rc_stat2[(S) - state][B]++; \
  267. } \
  268. put_rac(C, S, B); \
  269. } while (0)
  270. if (v) {
  271. const int a = FFABS(v);
  272. const int e = av_log2(a);
  273. put_rac(c, state + 0, 0);
  274. if (e <= 9) {
  275. for (i = 0; i < e; i++)
  276. put_rac(c, state + 1 + i, 1); // 1..10
  277. put_rac(c, state + 1 + i, 0);
  278. for (i = e - 1; i >= 0; i--)
  279. put_rac(c, state + 22 + i, (a >> i) & 1); // 22..31
  280. if (is_signed)
  281. put_rac(c, state + 11 + e, v < 0); // 11..21
  282. } else {
  283. for (i = 0; i < e; i++)
  284. put_rac(c, state + 1 + FFMIN(i, 9), 1); // 1..10
  285. put_rac(c, state + 1 + 9, 0);
  286. for (i = e - 1; i >= 0; i--)
  287. put_rac(c, state + 22 + FFMIN(i, 9), (a >> i) & 1); // 22..31
  288. if (is_signed)
  289. put_rac(c, state + 11 + 10, v < 0); // 11..21
  290. }
  291. } else {
  292. put_rac(c, state + 0, 1);
  293. }
  294. #undef put_rac
  295. }
  296. static av_noinline void put_symbol(RangeCoder *c, uint8_t *state,
  297. int v, int is_signed)
  298. {
  299. put_symbol_inline(c, state, v, is_signed, NULL, NULL);
  300. }
  301. static inline av_flatten int get_symbol_inline(RangeCoder *c, uint8_t *state,
  302. int is_signed)
  303. {
  304. if (get_rac(c, state + 0))
  305. return 0;
  306. else {
  307. int i, e, a;
  308. e = 0;
  309. while (get_rac(c, state + 1 + FFMIN(e, 9))) // 1..10
  310. e++;
  311. a = 1;
  312. for (i = e - 1; i >= 0; i--)
  313. a += a + get_rac(c, state + 22 + FFMIN(i, 9)); // 22..31
  314. e = -(is_signed && get_rac(c, state + 11 + FFMIN(e, 10))); // 11..21
  315. return (a ^ e) - e;
  316. }
  317. }
  318. static av_noinline int get_symbol(RangeCoder *c, uint8_t *state, int is_signed)
  319. {
  320. return get_symbol_inline(c, state, is_signed);
  321. }
  322. static inline void update_vlc_state(VlcState *const state, const int v)
  323. {
  324. int drift = state->drift;
  325. int count = state->count;
  326. state->error_sum += FFABS(v);
  327. drift += v;
  328. if (count == 128) { // FIXME: variable
  329. count >>= 1;
  330. drift >>= 1;
  331. state->error_sum >>= 1;
  332. }
  333. count++;
  334. if (drift <= -count) {
  335. if (state->bias > -128)
  336. state->bias--;
  337. drift += count;
  338. if (drift <= -count)
  339. drift = -count + 1;
  340. } else if (drift > 0) {
  341. if (state->bias < 127)
  342. state->bias++;
  343. drift -= count;
  344. if (drift > 0)
  345. drift = 0;
  346. }
  347. state->drift = drift;
  348. state->count = count;
  349. }
  350. static inline void put_vlc_symbol(PutBitContext *pb, VlcState *const state,
  351. int v, int bits)
  352. {
  353. int i, k, code;
  354. v = fold(v - state->bias, bits);
  355. i = state->count;
  356. k = 0;
  357. while (i < state->error_sum) { // FIXME: optimize
  358. k++;
  359. i += i;
  360. }
  361. assert(k <= 8);
  362. #if 0 // JPEG LS
  363. if (k == 0 && 2 * state->drift <= -state->count)
  364. code = v ^ (-1);
  365. else
  366. code = v;
  367. #else
  368. code = v ^ ((2 * state->drift + state->count) >> 31);
  369. #endif
  370. av_dlog(NULL, "v:%d/%d bias:%d error:%d drift:%d count:%d k:%d\n", v, code,
  371. state->bias, state->error_sum, state->drift, state->count, k);
  372. set_sr_golomb(pb, code, k, 12, bits);
  373. update_vlc_state(state, v);
  374. }
  375. static inline int get_vlc_symbol(GetBitContext *gb, VlcState *const state,
  376. int bits)
  377. {
  378. int k, i, v, ret;
  379. i = state->count;
  380. k = 0;
  381. while (i < state->error_sum) { // FIXME: optimize
  382. k++;
  383. i += i;
  384. }
  385. assert(k <= 8);
  386. v = get_sr_golomb(gb, k, 12, bits);
  387. av_dlog(NULL, "v:%d bias:%d error:%d drift:%d count:%d k:%d",
  388. v, state->bias, state->error_sum, state->drift, state->count, k);
  389. #if 0 // JPEG LS
  390. if (k == 0 && 2 * state->drift <= -state->count)
  391. v ^= (-1);
  392. #else
  393. v ^= ((2 * state->drift + state->count) >> 31);
  394. #endif
  395. ret = fold(v + state->bias, bits);
  396. update_vlc_state(state, v);
  397. return ret;
  398. }
  399. #if CONFIG_FFV1_ENCODER
  400. static av_always_inline int encode_line(FFV1Context *s, int w,
  401. int16_t *sample[3],
  402. int plane_index, int bits)
  403. {
  404. PlaneContext *const p = &s->plane[plane_index];
  405. RangeCoder *const c = &s->c;
  406. int x;
  407. int run_index = s->run_index;
  408. int run_count = 0;
  409. int run_mode = 0;
  410. if (s->ac) {
  411. if (c->bytestream_end - c->bytestream < w * 20) {
  412. av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
  413. return -1;
  414. }
  415. } else {
  416. if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb) >> 3) < w * 4) {
  417. av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
  418. return -1;
  419. }
  420. }
  421. for (x = 0; x < w; x++) {
  422. int diff, context;
  423. context = get_context(p, sample[0] + x, sample[1] + x, sample[2] + x);
  424. diff = sample[0][x] - predict(sample[0] + x, sample[1] + x);
  425. if (context < 0) {
  426. context = -context;
  427. diff = -diff;
  428. }
  429. diff = fold(diff, bits);
  430. if (s->ac) {
  431. if (s->flags & CODEC_FLAG_PASS1) {
  432. put_symbol_inline(c, p->state[context], diff, 1, s->rc_stat,
  433. s->rc_stat2[p->quant_table_index][context]);
  434. } else {
  435. put_symbol_inline(c, p->state[context], diff, 1, NULL, NULL);
  436. }
  437. } else {
  438. if (context == 0)
  439. run_mode = 1;
  440. if (run_mode) {
  441. if (diff) {
  442. while (run_count >= 1 << ff_log2_run[run_index]) {
  443. run_count -= 1 << ff_log2_run[run_index];
  444. run_index++;
  445. put_bits(&s->pb, 1, 1);
  446. }
  447. put_bits(&s->pb, 1 + ff_log2_run[run_index], run_count);
  448. if (run_index)
  449. run_index--;
  450. run_count = 0;
  451. run_mode = 0;
  452. if (diff > 0)
  453. diff--;
  454. } else {
  455. run_count++;
  456. }
  457. }
  458. av_dlog(s->avctx, "count:%d index:%d, mode:%d, x:%d pos:%d\n",
  459. run_count, run_index, run_mode, x,
  460. (int)put_bits_count(&s->pb));
  461. if (run_mode == 0)
  462. put_vlc_symbol(&s->pb, &p->vlc_state[context], diff, bits);
  463. }
  464. }
  465. if (run_mode) {
  466. while (run_count >= 1 << ff_log2_run[run_index]) {
  467. run_count -= 1 << ff_log2_run[run_index];
  468. run_index++;
  469. put_bits(&s->pb, 1, 1);
  470. }
  471. if (run_count)
  472. put_bits(&s->pb, 1, 1);
  473. }
  474. s->run_index = run_index;
  475. return 0;
  476. }
  477. static void encode_plane(FFV1Context *s, uint8_t *src, int w, int h,
  478. int stride, int plane_index)
  479. {
  480. int x, y, i;
  481. const int ring_size = s->avctx->context_model ? 3 : 2;
  482. int16_t *sample[3];
  483. s->run_index = 0;
  484. memset(s->sample_buffer, 0, ring_size * (w + 6) * sizeof(*s->sample_buffer));
  485. for (y = 0; y < h; y++) {
  486. for (i = 0; i < ring_size; i++)
  487. sample[i] = s->sample_buffer + (w + 6) * ((h + i - y) % ring_size) + 3;
  488. sample[0][-1] = sample[1][0];
  489. sample[1][w] = sample[1][w - 1];
  490. // { START_TIMER
  491. if (s->avctx->bits_per_raw_sample <= 8) {
  492. for (x = 0; x < w; x++)
  493. sample[0][x] = src[x + stride * y];
  494. encode_line(s, w, sample, plane_index, 8);
  495. } else {
  496. for (x = 0; x < w; x++)
  497. sample[0][x] = ((uint16_t *)(src + stride * y))[x] >>
  498. (16 - s->avctx->bits_per_raw_sample);
  499. encode_line(s, w, sample, plane_index, s->avctx->bits_per_raw_sample);
  500. }
  501. // STOP_TIMER("encode line") }
  502. }
  503. }
  504. static void encode_rgb_frame(FFV1Context *s, uint32_t *src, int w, int h,
  505. int stride)
  506. {
  507. int x, y, p, i;
  508. const int ring_size = s->avctx->context_model ? 3 : 2;
  509. int16_t *sample[3][3];
  510. s->run_index = 0;
  511. memset(s->sample_buffer, 0, ring_size * 3 * (w + 6) * sizeof(*s->sample_buffer));
  512. for (y = 0; y < h; y++) {
  513. for (i = 0; i < ring_size; i++)
  514. for (p = 0; p < 3; p++)
  515. sample[p][i] = s->sample_buffer + p * ring_size * (w + 6) +
  516. ((h + i - y) % ring_size) * (w + 6) + 3;
  517. for (x = 0; x < w; x++) {
  518. int v = src[x + stride * y];
  519. int b = v & 0xFF;
  520. int g = (v >> 8) & 0xFF;
  521. int r = (v >> 16) & 0xFF;
  522. b -= g;
  523. r -= g;
  524. g += (b + r) >> 2;
  525. b += 0x100;
  526. r += 0x100;
  527. sample[0][0][x] = g;
  528. sample[1][0][x] = b;
  529. sample[2][0][x] = r;
  530. }
  531. for (p = 0; p < 3; p++) {
  532. sample[p][0][-1] = sample[p][1][0];
  533. sample[p][1][w] = sample[p][1][w - 1];
  534. encode_line(s, w, sample[p], FFMIN(p, 1), 9);
  535. }
  536. }
  537. }
  538. static void write_quant_table(RangeCoder *c, int16_t *quant_table)
  539. {
  540. int last = 0;
  541. int i;
  542. uint8_t state[CONTEXT_SIZE];
  543. memset(state, 128, sizeof(state));
  544. for (i = 1; i < 128; i++)
  545. if (quant_table[i] != quant_table[i - 1]) {
  546. put_symbol(c, state, i - last - 1, 0);
  547. last = i;
  548. }
  549. put_symbol(c, state, i - last - 1, 0);
  550. }
  551. static void write_quant_tables(RangeCoder *c,
  552. int16_t quant_table[MAX_CONTEXT_INPUTS][256])
  553. {
  554. int i;
  555. for (i = 0; i < 5; i++)
  556. write_quant_table(c, quant_table[i]);
  557. }
  558. static void write_header(FFV1Context *f)
  559. {
  560. uint8_t state[CONTEXT_SIZE];
  561. int i, j;
  562. RangeCoder *const c = &f->slice_context[0]->c;
  563. memset(state, 128, sizeof(state));
  564. if (f->version < 2) {
  565. put_symbol(c, state, f->version, 0);
  566. put_symbol(c, state, f->ac, 0);
  567. if (f->ac > 1) {
  568. for (i = 1; i < 256; i++)
  569. put_symbol(c, state,
  570. f->state_transition[i] - c->one_state[i], 1);
  571. }
  572. put_symbol(c, state, f->colorspace, 0); // YUV cs type
  573. if (f->version > 0)
  574. put_symbol(c, state, f->avctx->bits_per_raw_sample, 0);
  575. put_rac(c, state, 1); // chroma planes
  576. put_symbol(c, state, f->chroma_h_shift, 0);
  577. put_symbol(c, state, f->chroma_v_shift, 0);
  578. put_rac(c, state, 0); // no transparency plane
  579. write_quant_tables(c, f->quant_table);
  580. } else {
  581. put_symbol(c, state, f->slice_count, 0);
  582. for (i = 0; i < f->slice_count; i++) {
  583. FFV1Context *fs = f->slice_context[i];
  584. put_symbol(c, state,
  585. (fs->slice_x + 1) * f->num_h_slices / f->width, 0);
  586. put_symbol(c, state,
  587. (fs->slice_y + 1) * f->num_v_slices / f->height, 0);
  588. put_symbol(c, state,
  589. (fs->slice_width + 1) * f->num_h_slices / f->width - 1,
  590. 0);
  591. put_symbol(c, state,
  592. (fs->slice_height + 1) * f->num_v_slices / f->height - 1,
  593. 0);
  594. for (j = 0; j < f->plane_count; j++) {
  595. put_symbol(c, state, f->plane[j].quant_table_index, 0);
  596. av_assert0(f->plane[j].quant_table_index == f->avctx->context_model);
  597. }
  598. }
  599. }
  600. }
  601. #endif /* CONFIG_FFV1_ENCODER */
  602. static av_cold int common_init(AVCodecContext *avctx)
  603. {
  604. FFV1Context *s = avctx->priv_data;
  605. s->avctx = avctx;
  606. s->flags = avctx->flags;
  607. ff_dsputil_init(&s->dsp, avctx);
  608. s->width = avctx->width;
  609. s->height = avctx->height;
  610. assert(s->width && s->height);
  611. // defaults
  612. s->num_h_slices = 1;
  613. s->num_v_slices = 1;
  614. return 0;
  615. }
  616. static int init_slice_state(FFV1Context *f)
  617. {
  618. int i, j;
  619. for (i = 0; i < f->slice_count; i++) {
  620. FFV1Context *fs = f->slice_context[i];
  621. for (j = 0; j < f->plane_count; j++) {
  622. PlaneContext *const p = &fs->plane[j];
  623. if (fs->ac) {
  624. if (!p->state)
  625. p->state = av_malloc(CONTEXT_SIZE * p->context_count *
  626. sizeof(uint8_t));
  627. if (!p->state)
  628. return AVERROR(ENOMEM);
  629. } else {
  630. if (!p->vlc_state)
  631. p->vlc_state = av_malloc(p->context_count * sizeof(VlcState));
  632. if (!p->vlc_state)
  633. return AVERROR(ENOMEM);
  634. }
  635. }
  636. if (fs->ac > 1) {
  637. // FIXME: only redo if state_transition changed
  638. for (j = 1; j < 256; j++) {
  639. fs->c.one_state[j] = fs->state_transition[j];
  640. fs->c.zero_state[256 - j] = 256 - fs->c.one_state[j];
  641. }
  642. }
  643. }
  644. return 0;
  645. }
  646. static av_cold int init_slice_contexts(FFV1Context *f)
  647. {
  648. int i;
  649. f->slice_count = f->num_h_slices * f->num_v_slices;
  650. for (i = 0; i < f->slice_count; i++) {
  651. FFV1Context *fs = av_mallocz(sizeof(*fs));
  652. int sx = i % f->num_h_slices;
  653. int sy = i / f->num_h_slices;
  654. int sxs = f->avctx->width * sx / f->num_h_slices;
  655. int sxe = f->avctx->width * (sx + 1) / f->num_h_slices;
  656. int sys = f->avctx->height * sy / f->num_v_slices;
  657. int sye = f->avctx->height * (sy + 1) / f->num_v_slices;
  658. f->slice_context[i] = fs;
  659. memcpy(fs, f, sizeof(*fs));
  660. memset(fs->rc_stat2, 0, sizeof(fs->rc_stat2));
  661. fs->slice_width = sxe - sxs;
  662. fs->slice_height = sye - sys;
  663. fs->slice_x = sxs;
  664. fs->slice_y = sys;
  665. fs->sample_buffer = av_malloc(9 * (fs->width + 6) *
  666. sizeof(*fs->sample_buffer));
  667. if (!fs->sample_buffer)
  668. return AVERROR(ENOMEM);
  669. }
  670. return 0;
  671. }
  672. static int allocate_initial_states(FFV1Context *f)
  673. {
  674. int i;
  675. for (i = 0; i < f->quant_table_count; i++) {
  676. f->initial_states[i] = av_malloc(f->context_count[i] *
  677. sizeof(*f->initial_states[i]));
  678. if (!f->initial_states[i])
  679. return AVERROR(ENOMEM);
  680. memset(f->initial_states[i], 128,
  681. f->context_count[i] * sizeof(*f->initial_states[i]));
  682. }
  683. return 0;
  684. }
  685. #if CONFIG_FFV1_ENCODER
  686. static int write_extra_header(FFV1Context *f)
  687. {
  688. RangeCoder *const c = &f->c;
  689. uint8_t state[CONTEXT_SIZE];
  690. int i, j, k;
  691. uint8_t state2[32][CONTEXT_SIZE];
  692. memset(state2, 128, sizeof(state2));
  693. memset(state, 128, sizeof(state));
  694. f->avctx->extradata = av_malloc(f->avctx->extradata_size = 10000 +
  695. (11 * 11 * 5 * 5 * 5 + 11 * 11 * 11) * 32);
  696. ff_init_range_encoder(c, f->avctx->extradata, f->avctx->extradata_size);
  697. ff_build_rac_states(c, 0.05 * (1LL << 32), 256 - 8);
  698. put_symbol(c, state, f->version, 0);
  699. put_symbol(c, state, f->ac, 0);
  700. if (f->ac > 1)
  701. for (i = 1; i < 256; i++)
  702. put_symbol(c, state, f->state_transition[i] - c->one_state[i], 1);
  703. put_symbol(c, state, f->colorspace, 0); // YUV cs type
  704. put_symbol(c, state, f->avctx->bits_per_raw_sample, 0);
  705. put_rac(c, state, 1); // chroma planes
  706. put_symbol(c, state, f->chroma_h_shift, 0);
  707. put_symbol(c, state, f->chroma_v_shift, 0);
  708. put_rac(c, state, 0); // no transparency plane
  709. put_symbol(c, state, f->num_h_slices - 1, 0);
  710. put_symbol(c, state, f->num_v_slices - 1, 0);
  711. put_symbol(c, state, f->quant_table_count, 0);
  712. for (i = 0; i < f->quant_table_count; i++)
  713. write_quant_tables(c, f->quant_tables[i]);
  714. for (i = 0; i < f->quant_table_count; i++) {
  715. for (j = 0; j < f->context_count[i] * CONTEXT_SIZE; j++)
  716. if (f->initial_states[i] && f->initial_states[i][0][j] != 128)
  717. break;
  718. if (j < f->context_count[i] * CONTEXT_SIZE) {
  719. put_rac(c, state, 1);
  720. for (j = 0; j < f->context_count[i]; j++)
  721. for (k = 0; k < CONTEXT_SIZE; k++) {
  722. int pred = j ? f->initial_states[i][j - 1][k] : 128;
  723. put_symbol(c, state2[k],
  724. (int8_t)(f->initial_states[i][j][k] - pred), 1);
  725. }
  726. } else {
  727. put_rac(c, state, 0);
  728. }
  729. }
  730. f->avctx->extradata_size = ff_rac_terminate(c);
  731. return 0;
  732. }
  733. static int sort_stt(FFV1Context *s, uint8_t stt[256])
  734. {
  735. int i, i2, changed, print = 0;
  736. do {
  737. changed = 0;
  738. for (i = 12; i < 244; i++) {
  739. for (i2 = i + 1; i2 < 245 && i2 < i + 4; i2++) {
  740. #define COST(old, new) \
  741. s->rc_stat[old][0] * -log2((256 - (new)) / 256.0) + \
  742. s->rc_stat[old][1] * -log2((new) / 256.0)
  743. #define COST2(old, new) \
  744. COST(old, new) + COST(256 - (old), 256 - (new))
  745. double size0 = COST2(i, i) + COST2(i2, i2);
  746. double sizeX = COST2(i, i2) + COST2(i2, i);
  747. if (sizeX < size0 && i != 128 && i2 != 128) {
  748. int j;
  749. FFSWAP(int, stt[i], stt[i2]);
  750. FFSWAP(int, s->rc_stat[i][0], s->rc_stat[i2][0]);
  751. FFSWAP(int, s->rc_stat[i][1], s->rc_stat[i2][1]);
  752. if (i != 256 - i2) {
  753. FFSWAP(int, stt[256 - i], stt[256 - i2]);
  754. FFSWAP(int, s->rc_stat[256 - i][0], s->rc_stat[256 - i2][0]);
  755. FFSWAP(int, s->rc_stat[256 - i][1], s->rc_stat[256 - i2][1]);
  756. }
  757. for (j = 1; j < 256; j++) {
  758. if (stt[j] == i)
  759. stt[j] = i2;
  760. else if (stt[j] == i2)
  761. stt[j] = i;
  762. if (i != 256 - i2) {
  763. if (stt[256 - j] == 256 - i)
  764. stt[256 - j] = 256 - i2;
  765. else if (stt[256 - j] == 256 - i2)
  766. stt[256 - j] = 256 - i;
  767. }
  768. }
  769. print = changed = 1;
  770. }
  771. }
  772. }
  773. } while (changed);
  774. return print;
  775. }
  776. static av_cold int encode_init(AVCodecContext *avctx)
  777. {
  778. FFV1Context *s = avctx->priv_data;
  779. int i, j, k, m;
  780. common_init(avctx);
  781. s->version = 0;
  782. s->ac = avctx->coder_type ? 2 : 0;
  783. if (s->ac > 1)
  784. for (i = 1; i < 256; i++)
  785. s->state_transition[i] = ver2_state[i];
  786. s->plane_count = 2;
  787. for (i = 0; i < 256; i++) {
  788. s->quant_table_count = 2;
  789. if (avctx->bits_per_raw_sample <= 8) {
  790. s->quant_tables[0][0][i] = quant11[i];
  791. s->quant_tables[0][1][i] = quant11[i] * 11;
  792. s->quant_tables[0][2][i] = quant11[i] * 11 * 11;
  793. s->quant_tables[1][0][i] = quant11[i];
  794. s->quant_tables[1][1][i] = quant11[i] * 11;
  795. s->quant_tables[1][2][i] = quant5[i] * 11 * 11;
  796. s->quant_tables[1][3][i] = quant5[i] * 5 * 11 * 11;
  797. s->quant_tables[1][4][i] = quant5[i] * 5 * 5 * 11 * 11;
  798. } else {
  799. s->quant_tables[0][0][i] = quant9_10bit[i];
  800. s->quant_tables[0][1][i] = quant9_10bit[i] * 11;
  801. s->quant_tables[0][2][i] = quant9_10bit[i] * 11 * 11;
  802. s->quant_tables[1][0][i] = quant9_10bit[i];
  803. s->quant_tables[1][1][i] = quant9_10bit[i] * 11;
  804. s->quant_tables[1][2][i] = quant5_10bit[i] * 11 * 11;
  805. s->quant_tables[1][3][i] = quant5_10bit[i] * 5 * 11 * 11;
  806. s->quant_tables[1][4][i] = quant5_10bit[i] * 5 * 5 * 11 * 11;
  807. }
  808. }
  809. s->context_count[0] = (11 * 11 * 11 + 1) / 2;
  810. s->context_count[1] = (11 * 11 * 5 * 5 * 5 + 1) / 2;
  811. memcpy(s->quant_table, s->quant_tables[avctx->context_model],
  812. sizeof(s->quant_table));
  813. for (i = 0; i < s->plane_count; i++) {
  814. PlaneContext *const p = &s->plane[i];
  815. memcpy(p->quant_table, s->quant_table, sizeof(p->quant_table));
  816. p->quant_table_index = avctx->context_model;
  817. p->context_count = s->context_count[p->quant_table_index];
  818. }
  819. if (allocate_initial_states(s) < 0)
  820. return AVERROR(ENOMEM);
  821. avctx->coded_frame = &s->picture;
  822. switch (avctx->pix_fmt) {
  823. case AV_PIX_FMT_YUV444P16:
  824. case AV_PIX_FMT_YUV422P16:
  825. case AV_PIX_FMT_YUV420P16:
  826. if (avctx->bits_per_raw_sample <= 8) {
  827. av_log(avctx, AV_LOG_ERROR, "bits_per_raw_sample invalid\n");
  828. return -1;
  829. }
  830. if (!s->ac) {
  831. av_log(avctx, AV_LOG_ERROR,
  832. "bits_per_raw_sample of more than 8 needs -coder 1 currently\n");
  833. return -1;
  834. }
  835. s->version = FFMAX(s->version, 1);
  836. case AV_PIX_FMT_YUV444P:
  837. case AV_PIX_FMT_YUV422P:
  838. case AV_PIX_FMT_YUV420P:
  839. case AV_PIX_FMT_YUV411P:
  840. case AV_PIX_FMT_YUV410P:
  841. s->colorspace = 0;
  842. break;
  843. case AV_PIX_FMT_RGB32:
  844. s->colorspace = 1;
  845. break;
  846. default:
  847. av_log(avctx, AV_LOG_ERROR, "format not supported\n");
  848. return -1;
  849. }
  850. avcodec_get_chroma_sub_sample(avctx->pix_fmt, &s->chroma_h_shift,
  851. &s->chroma_v_shift);
  852. s->picture_number = 0;
  853. if (avctx->flags & (CODEC_FLAG_PASS1 | CODEC_FLAG_PASS2)) {
  854. for (i = 0; i < s->quant_table_count; i++) {
  855. s->rc_stat2[i] = av_mallocz(s->context_count[i] *
  856. sizeof(*s->rc_stat2[i]));
  857. if (!s->rc_stat2[i])
  858. return AVERROR(ENOMEM);
  859. }
  860. }
  861. if (avctx->stats_in) {
  862. char *p = avctx->stats_in;
  863. uint8_t best_state[256][256];
  864. int gob_count = 0;
  865. char *next;
  866. av_assert0(s->version >= 2);
  867. for (;; ) {
  868. for (j = 0; j < 256; j++)
  869. for (i = 0; i < 2; i++) {
  870. s->rc_stat[j][i] = strtol(p, &next, 0);
  871. if (next == p) {
  872. av_log(avctx, AV_LOG_ERROR,
  873. "2Pass file invalid at %d %d [%s]\n", j, i, p);
  874. return -1;
  875. }
  876. p = next;
  877. }
  878. for (i = 0; i < s->quant_table_count; i++)
  879. for (j = 0; j < s->context_count[i]; j++) {
  880. for (k = 0; k < 32; k++)
  881. for (m = 0; m < 2; m++) {
  882. s->rc_stat2[i][j][k][m] = strtol(p, &next, 0);
  883. if (next == p) {
  884. av_log(avctx, AV_LOG_ERROR,
  885. "2Pass file invalid at %d %d %d %d [%s]\n",
  886. i, j, k, m, p);
  887. return -1;
  888. }
  889. p = next;
  890. }
  891. }
  892. gob_count = strtol(p, &next, 0);
  893. if (next == p || gob_count < 0) {
  894. av_log(avctx, AV_LOG_ERROR, "2Pass file invalid\n");
  895. return -1;
  896. }
  897. p = next;
  898. while (*p == '\n' || *p == ' ')
  899. p++;
  900. if (p[0] == 0)
  901. break;
  902. }
  903. sort_stt(s, s->state_transition);
  904. find_best_state(best_state, s->state_transition);
  905. for (i = 0; i < s->quant_table_count; i++) {
  906. for (j = 0; j < s->context_count[i]; j++)
  907. for (k = 0; k < 32; k++) {
  908. double p = 128;
  909. if (s->rc_stat2[i][j][k][0] + s->rc_stat2[i][j][k][1]) {
  910. p = 256.0 * s->rc_stat2[i][j][k][1] /
  911. (s->rc_stat2[i][j][k][0] + s->rc_stat2[i][j][k][1]);
  912. }
  913. s->initial_states[i][j][k] =
  914. best_state[av_clip(round(p), 1, 255)][av_clip((s->rc_stat2[i][j][k][0] +
  915. s->rc_stat2[i][j][k][1]) /
  916. gob_count, 0, 255)];
  917. }
  918. }
  919. }
  920. if (s->version > 1) {
  921. s->num_h_slices = 2;
  922. s->num_v_slices = 2;
  923. write_extra_header(s);
  924. }
  925. if (init_slice_contexts(s) < 0)
  926. return -1;
  927. if (init_slice_state(s) < 0)
  928. return -1;
  929. #define STATS_OUT_SIZE 1024 * 1024 * 6
  930. if (avctx->flags & CODEC_FLAG_PASS1) {
  931. avctx->stats_out = av_mallocz(STATS_OUT_SIZE);
  932. for (i = 0; i < s->quant_table_count; i++)
  933. for (j = 0; j < s->slice_count; j++) {
  934. FFV1Context *sf = s->slice_context[j];
  935. av_assert0(!sf->rc_stat2[i]);
  936. sf->rc_stat2[i] = av_mallocz(s->context_count[i] *
  937. sizeof(*sf->rc_stat2[i]));
  938. if (!sf->rc_stat2[i])
  939. return AVERROR(ENOMEM);
  940. }
  941. }
  942. return 0;
  943. }
  944. #endif /* CONFIG_FFV1_ENCODER */
  945. static void clear_state(FFV1Context *f)
  946. {
  947. int i, si, j;
  948. for (si = 0; si < f->slice_count; si++) {
  949. FFV1Context *fs = f->slice_context[si];
  950. for (i = 0; i < f->plane_count; i++) {
  951. PlaneContext *p = &fs->plane[i];
  952. p->interlace_bit_state[0] = 128;
  953. p->interlace_bit_state[1] = 128;
  954. if (fs->ac) {
  955. if (f->initial_states[p->quant_table_index]) {
  956. memcpy(p->state, f->initial_states[p->quant_table_index],
  957. CONTEXT_SIZE * p->context_count);
  958. } else
  959. memset(p->state, 128, CONTEXT_SIZE * p->context_count);
  960. } else {
  961. for (j = 0; j < p->context_count; j++) {
  962. p->vlc_state[j].drift = 0;
  963. p->vlc_state[j].error_sum = 4; // FFMAX((RANGE + 32)/64, 2);
  964. p->vlc_state[j].bias = 0;
  965. p->vlc_state[j].count = 1;
  966. }
  967. }
  968. }
  969. }
  970. }
  971. #if CONFIG_FFV1_ENCODER
  972. static int encode_slice(AVCodecContext *c, void *arg)
  973. {
  974. FFV1Context *fs = *(void **)arg;
  975. FFV1Context *f = fs->avctx->priv_data;
  976. int width = fs->slice_width;
  977. int height = fs->slice_height;
  978. int x = fs->slice_x;
  979. int y = fs->slice_y;
  980. AVFrame *const p = &f->picture;
  981. if (f->colorspace == 0) {
  982. const int chroma_width = -((-width) >> f->chroma_h_shift);
  983. const int chroma_height = -((-height) >> f->chroma_v_shift);
  984. const int cx = x >> f->chroma_h_shift;
  985. const int cy = y >> f->chroma_v_shift;
  986. encode_plane(fs, p->data[0] + x + y * p->linesize[0],
  987. width, height, p->linesize[0], 0);
  988. encode_plane(fs, p->data[1] + cx + cy * p->linesize[1],
  989. chroma_width, chroma_height, p->linesize[1], 1);
  990. encode_plane(fs, p->data[2] + cx + cy * p->linesize[2],
  991. chroma_width, chroma_height, p->linesize[2], 1);
  992. } else {
  993. encode_rgb_frame(fs, (uint32_t *)(p->data[0]) +
  994. x + y * (p->linesize[0] / 4),
  995. width, height, p->linesize[0] / 4);
  996. }
  997. emms_c();
  998. return 0;
  999. }
  1000. static int encode_frame(AVCodecContext *avctx, AVPacket *pkt,
  1001. const AVFrame *pict, int *got_packet)
  1002. {
  1003. FFV1Context *f = avctx->priv_data;
  1004. RangeCoder *const c = &f->slice_context[0]->c;
  1005. AVFrame *const p = &f->picture;
  1006. int used_count = 0;
  1007. uint8_t keystate = 128;
  1008. uint8_t *buf_p;
  1009. int i, ret;
  1010. if (!pkt->data &&
  1011. (ret = av_new_packet(pkt, avctx->width * avctx->height *
  1012. ((8 * 2 + 1 + 1) * 4) / 8 + FF_MIN_BUFFER_SIZE)) < 0) {
  1013. av_log(avctx, AV_LOG_ERROR, "Error getting output packet.\n");
  1014. return ret;
  1015. }
  1016. ff_init_range_encoder(c, pkt->data, pkt->size);
  1017. ff_build_rac_states(c, 0.05 * (1LL << 32), 256 - 8);
  1018. *p = *pict;
  1019. p->pict_type = AV_PICTURE_TYPE_I;
  1020. if (avctx->gop_size == 0 || f->picture_number % avctx->gop_size == 0) {
  1021. put_rac(c, &keystate, 1);
  1022. p->key_frame = 1;
  1023. f->gob_count++;
  1024. write_header(f);
  1025. clear_state(f);
  1026. } else {
  1027. put_rac(c, &keystate, 0);
  1028. p->key_frame = 0;
  1029. }
  1030. if (!f->ac) {
  1031. used_count += ff_rac_terminate(c);
  1032. init_put_bits(&f->slice_context[0]->pb, pkt->data + used_count,
  1033. pkt->size - used_count);
  1034. } else if (f->ac > 1) {
  1035. int i;
  1036. for (i = 1; i < 256; i++) {
  1037. c->one_state[i] = f->state_transition[i];
  1038. c->zero_state[256 - i] = 256 - c->one_state[i];
  1039. }
  1040. }
  1041. for (i = 1; i < f->slice_count; i++) {
  1042. FFV1Context *fs = f->slice_context[i];
  1043. uint8_t *start = pkt->data + (pkt->size - used_count) * i / f->slice_count;
  1044. int len = pkt->size / f->slice_count;
  1045. if (fs->ac)
  1046. ff_init_range_encoder(&fs->c, start, len);
  1047. else
  1048. init_put_bits(&fs->pb, start, len);
  1049. }
  1050. avctx->execute(avctx, encode_slice, &f->slice_context[0], NULL,
  1051. f->slice_count, sizeof(void *));
  1052. buf_p = pkt->data;
  1053. for (i = 0; i < f->slice_count; i++) {
  1054. FFV1Context *fs = f->slice_context[i];
  1055. int bytes;
  1056. if (fs->ac) {
  1057. uint8_t state = 128;
  1058. put_rac(&fs->c, &state, 0);
  1059. bytes = ff_rac_terminate(&fs->c);
  1060. } else {
  1061. flush_put_bits(&fs->pb); // FIXME: nicer padding
  1062. bytes = used_count + (put_bits_count(&fs->pb) + 7) / 8;
  1063. used_count = 0;
  1064. }
  1065. if (i > 0) {
  1066. av_assert0(bytes < pkt->size / f->slice_count);
  1067. memmove(buf_p, fs->ac ? fs->c.bytestream_start : fs->pb.buf, bytes);
  1068. av_assert0(bytes < (1 << 24));
  1069. AV_WB24(buf_p + bytes, bytes);
  1070. bytes += 3;
  1071. }
  1072. buf_p += bytes;
  1073. }
  1074. if ((avctx->flags & CODEC_FLAG_PASS1) && (f->picture_number & 31) == 0) {
  1075. int j, k, m;
  1076. char *p = avctx->stats_out;
  1077. char *end = p + STATS_OUT_SIZE;
  1078. memset(f->rc_stat, 0, sizeof(f->rc_stat));
  1079. for (i = 0; i < f->quant_table_count; i++)
  1080. memset(f->rc_stat2[i], 0, f->context_count[i] * sizeof(*f->rc_stat2[i]));
  1081. for (j = 0; j < f->slice_count; j++) {
  1082. FFV1Context *fs = f->slice_context[j];
  1083. for (i = 0; i < 256; i++) {
  1084. f->rc_stat[i][0] += fs->rc_stat[i][0];
  1085. f->rc_stat[i][1] += fs->rc_stat[i][1];
  1086. }
  1087. for (i = 0; i < f->quant_table_count; i++) {
  1088. for (k = 0; k < f->context_count[i]; k++)
  1089. for (m = 0; m < 32; m++) {
  1090. f->rc_stat2[i][k][m][0] += fs->rc_stat2[i][k][m][0];
  1091. f->rc_stat2[i][k][m][1] += fs->rc_stat2[i][k][m][1];
  1092. }
  1093. }
  1094. }
  1095. for (j = 0; j < 256; j++) {
  1096. snprintf(p, end - p, "%" PRIu64 " %" PRIu64 " ",
  1097. f->rc_stat[j][0], f->rc_stat[j][1]);
  1098. p += strlen(p);
  1099. }
  1100. snprintf(p, end - p, "\n");
  1101. for (i = 0; i < f->quant_table_count; i++) {
  1102. for (j = 0; j < f->context_count[i]; j++)
  1103. for (m = 0; m < 32; m++) {
  1104. snprintf(p, end - p, "%" PRIu64 " %" PRIu64 " ",
  1105. f->rc_stat2[i][j][m][0], f->rc_stat2[i][j][m][1]);
  1106. p += strlen(p);
  1107. }
  1108. }
  1109. snprintf(p, end - p, "%d\n", f->gob_count);
  1110. } else if (avctx->flags & CODEC_FLAG_PASS1)
  1111. avctx->stats_out[0] = '\0';
  1112. f->picture_number++;
  1113. pkt->size = buf_p - pkt->data;
  1114. pkt->flags |= AV_PKT_FLAG_KEY * p->key_frame;
  1115. *got_packet = 1;
  1116. return 0;
  1117. }
  1118. #endif /* CONFIG_FFV1_ENCODER */
  1119. static av_cold int common_end(AVCodecContext *avctx)
  1120. {
  1121. FFV1Context *s = avctx->priv_data;
  1122. int i, j;
  1123. if (avctx->codec->decode && s->picture.data[0])
  1124. avctx->release_buffer(avctx, &s->picture);
  1125. for (j = 0; j < s->slice_count; j++) {
  1126. FFV1Context *fs = s->slice_context[j];
  1127. for (i = 0; i < s->plane_count; i++) {
  1128. PlaneContext *p = &fs->plane[i];
  1129. av_freep(&p->state);
  1130. av_freep(&p->vlc_state);
  1131. }
  1132. av_freep(&fs->sample_buffer);
  1133. }
  1134. av_freep(&avctx->stats_out);
  1135. for (j = 0; j < s->quant_table_count; j++) {
  1136. av_freep(&s->initial_states[j]);
  1137. for (i = 0; i < s->slice_count; i++) {
  1138. FFV1Context *sf = s->slice_context[i];
  1139. av_freep(&sf->rc_stat2[j]);
  1140. }
  1141. av_freep(&s->rc_stat2[j]);
  1142. }
  1143. for (i = 0; i < s->slice_count; i++)
  1144. av_freep(&s->slice_context[i]);
  1145. return 0;
  1146. }
  1147. static av_always_inline void decode_line(FFV1Context *s, int w,
  1148. int16_t *sample[2],
  1149. int plane_index, int bits)
  1150. {
  1151. PlaneContext *const p = &s->plane[plane_index];
  1152. RangeCoder *const c = &s->c;
  1153. int x;
  1154. int run_count = 0;
  1155. int run_mode = 0;
  1156. int run_index = s->run_index;
  1157. for (x = 0; x < w; x++) {
  1158. int diff, context, sign;
  1159. context = get_context(p, sample[1] + x, sample[0] + x, sample[1] + x);
  1160. if (context < 0) {
  1161. context = -context;
  1162. sign = 1;
  1163. } else
  1164. sign = 0;
  1165. av_assert2(context < p->context_count);
  1166. if (s->ac) {
  1167. diff = get_symbol_inline(c, p->state[context], 1);
  1168. } else {
  1169. if (context == 0 && run_mode == 0)
  1170. run_mode = 1;
  1171. if (run_mode) {
  1172. if (run_count == 0 && run_mode == 1) {
  1173. if (get_bits1(&s->gb)) {
  1174. run_count = 1 << ff_log2_run[run_index];
  1175. if (x + run_count <= w)
  1176. run_index++;
  1177. } else {
  1178. if (ff_log2_run[run_index])
  1179. run_count = get_bits(&s->gb, ff_log2_run[run_index]);
  1180. else
  1181. run_count = 0;
  1182. if (run_index)
  1183. run_index--;
  1184. run_mode = 2;
  1185. }
  1186. }
  1187. run_count--;
  1188. if (run_count < 0) {
  1189. run_mode = 0;
  1190. run_count = 0;
  1191. diff = get_vlc_symbol(&s->gb, &p->vlc_state[context],
  1192. bits);
  1193. if (diff >= 0)
  1194. diff++;
  1195. } else
  1196. diff = 0;
  1197. } else
  1198. diff = get_vlc_symbol(&s->gb, &p->vlc_state[context], bits);
  1199. av_dlog(s->avctx, "count:%d index:%d, mode:%d, x:%d pos:%d\n",
  1200. run_count, run_index, run_mode, x, get_bits_count(&s->gb));
  1201. }
  1202. if (sign)
  1203. diff = -diff;
  1204. sample[1][x] = (predict(sample[1] + x, sample[0] + x) + diff) &
  1205. ((1 << bits) - 1);
  1206. }
  1207. s->run_index = run_index;
  1208. }
  1209. static void decode_plane(FFV1Context *s, uint8_t *src,
  1210. int w, int h, int stride, int plane_index)
  1211. {
  1212. int x, y;
  1213. int16_t *sample[2];
  1214. sample[0] = s->sample_buffer + 3;
  1215. sample[1] = s->sample_buffer + w + 6 + 3;
  1216. s->run_index = 0;
  1217. memset(s->sample_buffer, 0, 2 * (w + 6) * sizeof(*s->sample_buffer));
  1218. for (y = 0; y < h; y++) {
  1219. int16_t *temp = sample[0]; // FIXME: try a normal buffer
  1220. sample[0] = sample[1];
  1221. sample[1] = temp;
  1222. sample[1][-1] = sample[0][0];
  1223. sample[0][w] = sample[0][w - 1];
  1224. // { START_TIMER
  1225. if (s->avctx->bits_per_raw_sample <= 8) {
  1226. decode_line(s, w, sample, plane_index, 8);
  1227. for (x = 0; x < w; x++)
  1228. src[x + stride * y] = sample[1][x];
  1229. } else {
  1230. decode_line(s, w, sample, plane_index,
  1231. s->avctx->bits_per_raw_sample);
  1232. for (x = 0; x < w; x++)
  1233. ((uint16_t *)(src + stride * y))[x] =
  1234. sample[1][x] << (16 - s->avctx->bits_per_raw_sample);
  1235. }
  1236. // STOP_TIMER("decode-line") }
  1237. }
  1238. }
  1239. static void decode_rgb_frame(FFV1Context *s, uint32_t *src,
  1240. int w, int h, int stride)
  1241. {
  1242. int x, y, p;
  1243. int16_t *sample[3][2];
  1244. for (x = 0; x < 3; x++) {
  1245. sample[x][0] = s->sample_buffer + x * 2 * (w + 6) + 3;
  1246. sample[x][1] = s->sample_buffer + (x * 2 + 1) * (w + 6) + 3;
  1247. }
  1248. s->run_index = 0;
  1249. memset(s->sample_buffer, 0, 6 * (w + 6) * sizeof(*s->sample_buffer));
  1250. for (y = 0; y < h; y++) {
  1251. for (p = 0; p < 3; p++) {
  1252. int16_t *temp = sample[p][0]; // FIXME: try a normal buffer
  1253. sample[p][0] = sample[p][1];
  1254. sample[p][1] = temp;
  1255. sample[p][1][-1] = sample[p][0][0];
  1256. sample[p][0][w] = sample[p][0][w - 1];
  1257. decode_line(s, w, sample[p], FFMIN(p, 1), 9);
  1258. }
  1259. for (x = 0; x < w; x++) {
  1260. int g = sample[0][1][x];
  1261. int b = sample[1][1][x];
  1262. int r = sample[2][1][x];
  1263. // assert(g >= 0 && b >= 0 && r >= 0);
  1264. // assert(g < 256 && b < 512 && r < 512);
  1265. b -= 0x100;
  1266. r -= 0x100;
  1267. g -= (b + r) >> 2;
  1268. b += g;
  1269. r += g;
  1270. src[x + stride * y] = b + (g << 8) + (r << 16) + (0xFF << 24);
  1271. }
  1272. }
  1273. }
  1274. static int decode_slice(AVCodecContext *c, void *arg)
  1275. {
  1276. FFV1Context *fs = *(void **)arg;
  1277. FFV1Context *f = fs->avctx->priv_data;
  1278. int width = fs->slice_width;
  1279. int height = fs->slice_height;
  1280. int x = fs->slice_x;
  1281. int y = fs->slice_y;
  1282. AVFrame *const p = &f->picture;
  1283. av_assert1(width && height);
  1284. if (f->colorspace == 0) {
  1285. const int chroma_width = -((-width) >> f->chroma_h_shift);
  1286. const int chroma_height = -((-height) >> f->chroma_v_shift);
  1287. const int cx = x >> f->chroma_h_shift;
  1288. const int cy = y >> f->chroma_v_shift;
  1289. decode_plane(fs, p->data[0] + x + y * p->linesize[0],
  1290. width, height, p->linesize[0], 0);
  1291. decode_plane(fs, p->data[1] + cx + cy * p->linesize[1],
  1292. chroma_width, chroma_height, p->linesize[1], 1);
  1293. decode_plane(fs, p->data[2] + cx + cy * p->linesize[1],
  1294. chroma_width, chroma_height, p->linesize[2], 1);
  1295. } else {
  1296. decode_rgb_frame(fs,
  1297. (uint32_t *)p->data[0] + x + y * (p->linesize[0] / 4),
  1298. width, height, p->linesize[0] / 4);
  1299. }
  1300. emms_c();
  1301. return 0;
  1302. }
  1303. static int read_quant_table(RangeCoder *c, int16_t *quant_table, int scale)
  1304. {
  1305. int v;
  1306. int i = 0;
  1307. uint8_t state[CONTEXT_SIZE];
  1308. memset(state, 128, sizeof(state));
  1309. for (v = 0; i < 128; v++) {
  1310. int len = get_symbol(c, state, 0) + 1;
  1311. if (len + i > 128)
  1312. return -1;
  1313. while (len--) {
  1314. quant_table[i] = scale * v;
  1315. i++;
  1316. }
  1317. }
  1318. for (i = 1; i < 128; i++)
  1319. quant_table[256 - i] = -quant_table[i];
  1320. quant_table[128] = -quant_table[127];
  1321. return 2 * v - 1;
  1322. }
  1323. static int read_quant_tables(RangeCoder *c,
  1324. int16_t quant_table[MAX_CONTEXT_INPUTS][256])
  1325. {
  1326. int i;
  1327. int context_count = 1;
  1328. for (i = 0; i < 5; i++) {
  1329. context_count *= read_quant_table(c, quant_table[i], context_count);
  1330. if (context_count > 32768U) {
  1331. return -1;
  1332. }
  1333. }
  1334. return (context_count + 1) / 2;
  1335. }
  1336. static int read_extra_header(FFV1Context *f)
  1337. {
  1338. RangeCoder *const c = &f->c;
  1339. uint8_t state[CONTEXT_SIZE];
  1340. int i, j, k;
  1341. uint8_t state2[32][CONTEXT_SIZE];
  1342. memset(state2, 128, sizeof(state2));
  1343. memset(state, 128, sizeof(state));
  1344. ff_init_range_decoder(c, f->avctx->extradata, f->avctx->extradata_size);
  1345. ff_build_rac_states(c, 0.05 * (1LL << 32), 256 - 8);
  1346. f->version = get_symbol(c, state, 0);
  1347. f->ac = f->avctx->coder_type = get_symbol(c, state, 0);
  1348. if (f->ac > 1)
  1349. for (i = 1; i < 256; i++)
  1350. f->state_transition[i] = get_symbol(c, state, 1) + c->one_state[i];
  1351. f->colorspace = get_symbol(c, state, 0); // YUV cs type
  1352. f->avctx->bits_per_raw_sample = get_symbol(c, state, 0);
  1353. get_rac(c, state); // no chroma = false
  1354. f->chroma_h_shift = get_symbol(c, state, 0);
  1355. f->chroma_v_shift = get_symbol(c, state, 0);
  1356. get_rac(c, state); // transparency plane
  1357. f->plane_count = 2;
  1358. f->num_h_slices = 1 + get_symbol(c, state, 0);
  1359. f->num_v_slices = 1 + get_symbol(c, state, 0);
  1360. if (f->num_h_slices > (unsigned)f->width ||
  1361. f->num_v_slices > (unsigned)f->height) {
  1362. av_log(f->avctx, AV_LOG_ERROR, "too many slices\n");
  1363. return -1;
  1364. }
  1365. f->quant_table_count = get_symbol(c, state, 0);
  1366. if (f->quant_table_count > (unsigned)MAX_QUANT_TABLES)
  1367. return -1;
  1368. for (i = 0; i < f->quant_table_count; i++) {
  1369. f->context_count[i] = read_quant_tables(c, f->quant_tables[i]);
  1370. if (f->context_count[i] < 0) {
  1371. av_log(f->avctx, AV_LOG_ERROR, "read_quant_table error\n");
  1372. return -1;
  1373. }
  1374. }
  1375. if (allocate_initial_states(f) < 0)
  1376. return AVERROR(ENOMEM);
  1377. for (i = 0; i < f->quant_table_count; i++)
  1378. if (get_rac(c, state))
  1379. for (j = 0; j < f->context_count[i]; j++)
  1380. for (k = 0; k < CONTEXT_SIZE; k++) {
  1381. int pred = j ? f->initial_states[i][j - 1][k] : 128;
  1382. f->initial_states[i][j][k] =
  1383. (pred + get_symbol(c, state2[k], 1)) & 0xFF;
  1384. }
  1385. return 0;
  1386. }
  1387. static int read_header(FFV1Context *f)
  1388. {
  1389. uint8_t state[CONTEXT_SIZE];
  1390. int i, j, context_count;
  1391. RangeCoder *const c = &f->slice_context[0]->c;
  1392. memset(state, 128, sizeof(state));
  1393. if (f->version < 2) {
  1394. f->version = get_symbol(c, state, 0);
  1395. f->ac = f->avctx->coder_type = get_symbol(c, state, 0);
  1396. if (f->ac > 1)
  1397. for (i = 1; i < 256; i++)
  1398. f->state_transition[i] = get_symbol(c, state, 1) + c->one_state[i];
  1399. f->colorspace = get_symbol(c, state, 0); // YUV cs type
  1400. if (f->version > 0)
  1401. f->avctx->bits_per_raw_sample = get_symbol(c, state, 0);
  1402. get_rac(c, state); // no chroma = false
  1403. f->chroma_h_shift = get_symbol(c, state, 0);
  1404. f->chroma_v_shift = get_symbol(c, state, 0);
  1405. get_rac(c, state); // transparency plane
  1406. f->plane_count = 2;
  1407. }
  1408. if (f->colorspace == 0) {
  1409. if (f->avctx->bits_per_raw_sample <= 8) {
  1410. switch (16 * f->chroma_h_shift + f->chroma_v_shift) {
  1411. case 0x00:
  1412. f->avctx->pix_fmt = AV_PIX_FMT_YUV444P;
  1413. break;
  1414. case 0x10:
  1415. f->avctx->pix_fmt = AV_PIX_FMT_YUV422P;
  1416. break;
  1417. case 0x11:
  1418. f->avctx->pix_fmt = AV_PIX_FMT_YUV420P;
  1419. break;
  1420. case 0x20:
  1421. f->avctx->pix_fmt = AV_PIX_FMT_YUV411P;
  1422. break;
  1423. case 0x22:
  1424. f->avctx->pix_fmt = AV_PIX_FMT_YUV410P;
  1425. break;
  1426. default:
  1427. av_log(f->avctx, AV_LOG_ERROR, "format not supported\n");
  1428. return -1;
  1429. }
  1430. } else {
  1431. switch (16 * f->chroma_h_shift + f->chroma_v_shift) {
  1432. case 0x00:
  1433. f->avctx->pix_fmt = AV_PIX_FMT_YUV444P16;
  1434. break;
  1435. case 0x10:
  1436. f->avctx->pix_fmt = AV_PIX_FMT_YUV422P16;
  1437. break;
  1438. case 0x11:
  1439. f->avctx->pix_fmt = AV_PIX_FMT_YUV420P16;
  1440. break;
  1441. default:
  1442. av_log(f->avctx, AV_LOG_ERROR, "format not supported\n");
  1443. return -1;
  1444. }
  1445. }
  1446. } else if (f->colorspace == 1) {
  1447. if (f->chroma_h_shift || f->chroma_v_shift) {
  1448. av_log(f->avctx, AV_LOG_ERROR,
  1449. "chroma subsampling not supported in this colorspace\n");
  1450. return -1;
  1451. }
  1452. f->avctx->pix_fmt = AV_PIX_FMT_RGB32;
  1453. } else {
  1454. av_log(f->avctx, AV_LOG_ERROR, "colorspace not supported\n");
  1455. return -1;
  1456. }
  1457. av_dlog(f->avctx, "%d %d %d\n",
  1458. f->chroma_h_shift, f->chroma_v_shift, f->avctx->pix_fmt);
  1459. if (f->version < 2) {
  1460. context_count = read_quant_tables(c, f->quant_table);
  1461. if (context_count < 0) {
  1462. av_log(f->avctx, AV_LOG_ERROR, "read_quant_table error\n");
  1463. return -1;
  1464. }
  1465. } else {
  1466. f->slice_count = get_symbol(c, state, 0);
  1467. if (f->slice_count > (unsigned)MAX_SLICES)
  1468. return -1;
  1469. }
  1470. for (j = 0; j < f->slice_count; j++) {
  1471. FFV1Context *fs = f->slice_context[j];
  1472. fs->ac = f->ac;
  1473. if (f->version >= 2) {
  1474. fs->slice_x = get_symbol(c, state, 0) * f->width;
  1475. fs->slice_y = get_symbol(c, state, 0) * f->height;
  1476. fs->slice_width = (get_symbol(c, state, 0) + 1) * f->width + fs->slice_x;
  1477. fs->slice_height = (get_symbol(c, state, 0) + 1) * f->height + fs->slice_y;
  1478. fs->slice_x /= f->num_h_slices;
  1479. fs->slice_y /= f->num_v_slices;
  1480. fs->slice_width = fs->slice_width / f->num_h_slices - fs->slice_x;
  1481. fs->slice_height = fs->slice_height / f->num_v_slices - fs->slice_y;
  1482. if ((unsigned)fs->slice_width > f->width ||
  1483. (unsigned)fs->slice_height > f->height)
  1484. return -1;
  1485. if ((unsigned)fs->slice_x + (uint64_t)fs->slice_width > f->width ||
  1486. (unsigned)fs->slice_y + (uint64_t)fs->slice_height > f->height)
  1487. return -1;
  1488. }
  1489. for (i = 0; i < f->plane_count; i++) {
  1490. PlaneContext *const p = &fs->plane[i];
  1491. if (f->version >= 2) {
  1492. int idx = get_symbol(c, state, 0);
  1493. if (idx > (unsigned)f->quant_table_count) {
  1494. av_log(f->avctx, AV_LOG_ERROR,
  1495. "quant_table_index out of range\n");
  1496. return -1;
  1497. }
  1498. p->quant_table_index = idx;
  1499. memcpy(p->quant_table, f->quant_tables[idx],
  1500. sizeof(p->quant_table));
  1501. context_count = f->context_count[idx];
  1502. } else {
  1503. memcpy(p->quant_table, f->quant_table, sizeof(p->quant_table));
  1504. }
  1505. if (p->context_count < context_count) {
  1506. av_freep(&p->state);
  1507. av_freep(&p->vlc_state);
  1508. }
  1509. p->context_count = context_count;
  1510. }
  1511. }
  1512. return 0;
  1513. }
  1514. static av_cold int decode_init(AVCodecContext *avctx)
  1515. {
  1516. FFV1Context *f = avctx->priv_data;
  1517. common_init(avctx);
  1518. if (avctx->extradata && read_extra_header(f) < 0)
  1519. return -1;
  1520. if (init_slice_contexts(f) < 0)
  1521. return -1;
  1522. return 0;
  1523. }
  1524. static int decode_frame(AVCodecContext *avctx, void *data,
  1525. int *data_size, AVPacket *avpkt)
  1526. {
  1527. const uint8_t *buf = avpkt->data;
  1528. int buf_size = avpkt->size;
  1529. FFV1Context *f = avctx->priv_data;
  1530. RangeCoder *const c = &f->slice_context[0]->c;
  1531. AVFrame *const p = &f->picture;
  1532. int bytes_read, i;
  1533. uint8_t keystate = 128;
  1534. const uint8_t *buf_p;
  1535. AVFrame *picture = data;
  1536. /* release previously stored data */
  1537. if (p->data[0])
  1538. avctx->release_buffer(avctx, p);
  1539. ff_init_range_decoder(c, buf, buf_size);
  1540. ff_build_rac_states(c, 0.05 * (1LL << 32), 256 - 8);
  1541. p->pict_type = AV_PICTURE_TYPE_I; // FIXME: I vs. P
  1542. if (get_rac(c, &keystate)) {
  1543. p->key_frame = 1;
  1544. if (read_header(f) < 0)
  1545. return -1;
  1546. if (init_slice_state(f) < 0)
  1547. return -1;
  1548. clear_state(f);
  1549. } else {
  1550. p->key_frame = 0;
  1551. }
  1552. if (f->ac > 1) {
  1553. int i;
  1554. for (i = 1; i < 256; i++) {
  1555. c->one_state[i] = f->state_transition[i];
  1556. c->zero_state[256 - i] = 256 - c->one_state[i];
  1557. }
  1558. }
  1559. p->reference = 0;
  1560. if (avctx->get_buffer(avctx, p) < 0) {
  1561. av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
  1562. return -1;
  1563. }
  1564. if (avctx->debug & FF_DEBUG_PICT_INFO)
  1565. av_log(avctx, AV_LOG_ERROR, "keyframe:%d coder:%d\n", p->key_frame, f->ac);
  1566. if (!f->ac) {
  1567. bytes_read = c->bytestream - c->bytestream_start - 1;
  1568. if (bytes_read == 0)
  1569. av_log(avctx, AV_LOG_ERROR, "error at end of AC stream\n"); // FIXME
  1570. init_get_bits(&f->slice_context[0]->gb, buf + bytes_read,
  1571. (buf_size - bytes_read) * 8);
  1572. } else {
  1573. bytes_read = 0; /* avoid warning */
  1574. }
  1575. buf_p = buf + buf_size;
  1576. for (i = f->slice_count - 1; i > 0; i--) {
  1577. FFV1Context *fs = f->slice_context[i];
  1578. int v = AV_RB24(buf_p - 3) + 3;
  1579. if (buf_p - buf <= v) {
  1580. av_log(avctx, AV_LOG_ERROR, "Slice pointer chain broken\n");
  1581. return -1;
  1582. }
  1583. buf_p -= v;
  1584. if (fs->ac)
  1585. ff_init_range_decoder(&fs->c, buf_p, v);
  1586. else
  1587. init_get_bits(&fs->gb, buf_p, v * 8);
  1588. }
  1589. avctx->execute(avctx, decode_slice, &f->slice_context[0],
  1590. NULL, f->slice_count, sizeof(void *));
  1591. f->picture_number++;
  1592. *picture = *p;
  1593. *data_size = sizeof(AVFrame);
  1594. return buf_size;
  1595. }
  1596. AVCodec ff_ffv1_decoder = {
  1597. .name = "ffv1",
  1598. .type = AVMEDIA_TYPE_VIDEO,
  1599. .id = AV_CODEC_ID_FFV1,
  1600. .priv_data_size = sizeof(FFV1Context),
  1601. .init = decode_init,
  1602. .close = common_end,
  1603. .decode = decode_frame,
  1604. .capabilities = CODEC_CAP_DR1 /*| CODEC_CAP_DRAW_HORIZ_BAND*/ |
  1605. CODEC_CAP_SLICE_THREADS,
  1606. .long_name = NULL_IF_CONFIG_SMALL("FFmpeg video codec #1"),
  1607. };
  1608. #if CONFIG_FFV1_ENCODER
  1609. AVCodec ff_ffv1_encoder = {
  1610. .name = "ffv1",
  1611. .type = AVMEDIA_TYPE_VIDEO,
  1612. .id = AV_CODEC_ID_FFV1,
  1613. .priv_data_size = sizeof(FFV1Context),
  1614. .init = encode_init,
  1615. .encode2 = encode_frame,
  1616. .close = common_end,
  1617. .capabilities = CODEC_CAP_SLICE_THREADS,
  1618. .pix_fmts = (const enum AVPixelFormat[]) {
  1619. AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV444P,
  1620. AV_PIX_FMT_YUV411P, AV_PIX_FMT_YUV410P,
  1621. AV_PIX_FMT_YUV420P16, AV_PIX_FMT_YUV422P16, AV_PIX_FMT_YUV444P16,
  1622. AV_PIX_FMT_RGB32,
  1623. AV_PIX_FMT_NONE
  1624. },
  1625. .long_name = NULL_IF_CONFIG_SMALL("FFmpeg video codec #1"),
  1626. };
  1627. #endif