You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1104 lines
33KB

  1. /*
  2. * RoQ Video Encoder.
  3. *
  4. * Copyright (C) 2007 Vitor Sessak <vitor1001@gmail.com>
  5. * Copyright (C) 2004-2007 Eric Lasota
  6. * Based on RoQ specs (C) 2001 Tim Ferguson
  7. *
  8. * This file is part of FFmpeg.
  9. *
  10. * FFmpeg is free software; you can redistribute it and/or
  11. * modify it under the terms of the GNU Lesser General Public
  12. * License as published by the Free Software Foundation; either
  13. * version 2.1 of the License, or (at your option) any later version.
  14. *
  15. * FFmpeg is distributed in the hope that it will be useful,
  16. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  17. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  18. * Lesser General Public License for more details.
  19. *
  20. * You should have received a copy of the GNU Lesser General Public
  21. * License along with FFmpeg; if not, write to the Free Software
  22. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  23. */
  24. /**
  25. * @file
  26. * id RoQ encoder by Vitor. Based on the Switchblade3 library and the
  27. * Switchblade3 FFmpeg glue by Eric Lasota.
  28. */
  29. /*
  30. * COSTS:
  31. * Level 1:
  32. * SKIP - 2 bits
  33. * MOTION - 2 + 8 bits
  34. * CODEBOOK - 2 + 8 bits
  35. * SUBDIVIDE - 2 + combined subcel cost
  36. *
  37. * Level 2:
  38. * SKIP - 2 bits
  39. * MOTION - 2 + 8 bits
  40. * CODEBOOK - 2 + 8 bits
  41. * SUBDIVIDE - 2 + 4*8 bits
  42. *
  43. * Maximum cost: 138 bits per cel
  44. *
  45. * Proper evaluation requires LCD fraction comparison, which requires
  46. * Squared Error (SE) loss * savings increase
  47. *
  48. * Maximum savings increase: 136 bits
  49. * Maximum SE loss without overflow: 31580641
  50. * Components in 8x8 supercel: 192
  51. * Maximum SE precision per component: 164482
  52. * >65025, so no truncation is needed (phew)
  53. */
  54. #include <string.h>
  55. #include "libavutil/attributes.h"
  56. #include "libavutil/opt.h"
  57. #include "roqvideo.h"
  58. #include "bytestream.h"
  59. #include "elbg.h"
  60. #include "internal.h"
  61. #include "mathops.h"
  62. #define CHROMA_BIAS 1
  63. /**
  64. * Maximum number of generated 4x4 codebooks. Can't be 256 to workaround a
  65. * Quake 3 bug.
  66. */
  67. #define MAX_CBS_4x4 256
  68. #define MAX_CBS_2x2 256 ///< Maximum number of 2x2 codebooks.
  69. /* The cast is useful when multiplying it by INT_MAX */
  70. #define ROQ_LAMBDA_SCALE ((uint64_t) FF_LAMBDA_SCALE)
  71. /* Macroblock support functions */
  72. static void unpack_roq_cell(roq_cell *cell, uint8_t u[4*3])
  73. {
  74. memcpy(u , cell->y, 4);
  75. memset(u+4, cell->u, 4);
  76. memset(u+8, cell->v, 4);
  77. }
  78. static void unpack_roq_qcell(uint8_t cb2[], roq_qcell *qcell, uint8_t u[4*4*3])
  79. {
  80. int i,cp;
  81. static const int offsets[4] = {0, 2, 8, 10};
  82. for (cp=0; cp<3; cp++)
  83. for (i=0; i<4; i++) {
  84. u[4*4*cp + offsets[i] ] = cb2[qcell->idx[i]*2*2*3 + 4*cp ];
  85. u[4*4*cp + offsets[i]+1] = cb2[qcell->idx[i]*2*2*3 + 4*cp+1];
  86. u[4*4*cp + offsets[i]+4] = cb2[qcell->idx[i]*2*2*3 + 4*cp+2];
  87. u[4*4*cp + offsets[i]+5] = cb2[qcell->idx[i]*2*2*3 + 4*cp+3];
  88. }
  89. }
  90. static void enlarge_roq_mb4(uint8_t base[3*16], uint8_t u[3*64])
  91. {
  92. int x,y,cp;
  93. for(cp=0; cp<3; cp++)
  94. for(y=0; y<8; y++)
  95. for(x=0; x<8; x++)
  96. *u++ = base[(y/2)*4 + (x/2) + 16*cp];
  97. }
  98. static inline int square(int x)
  99. {
  100. return x*x;
  101. }
  102. static inline int eval_sse(const uint8_t *a, const uint8_t *b, int count)
  103. {
  104. int diff=0;
  105. while(count--)
  106. diff += square(*b++ - *a++);
  107. return diff;
  108. }
  109. // FIXME Could use DSPContext.sse, but it is not so speed critical (used
  110. // just for motion estimation).
  111. static int block_sse(uint8_t * const *buf1, uint8_t * const *buf2, int x1, int y1,
  112. int x2, int y2, const int *stride1, const int *stride2, int size)
  113. {
  114. int i, k;
  115. int sse=0;
  116. for (k=0; k<3; k++) {
  117. int bias = (k ? CHROMA_BIAS : 4);
  118. for (i=0; i<size; i++)
  119. sse += bias*eval_sse(buf1[k] + (y1+i)*stride1[k] + x1,
  120. buf2[k] + (y2+i)*stride2[k] + x2, size);
  121. }
  122. return sse;
  123. }
  124. static int eval_motion_dist(RoqContext *enc, int x, int y, motion_vect vect,
  125. int size)
  126. {
  127. int mx=vect.d[0];
  128. int my=vect.d[1];
  129. if (mx < -7 || mx > 7)
  130. return INT_MAX;
  131. if (my < -7 || my > 7)
  132. return INT_MAX;
  133. mx += x;
  134. my += y;
  135. if ((unsigned) mx > enc->width-size || (unsigned) my > enc->height-size)
  136. return INT_MAX;
  137. return block_sse(enc->frame_to_enc->data, enc->last_frame->data, x, y,
  138. mx, my,
  139. enc->frame_to_enc->linesize, enc->last_frame->linesize,
  140. size);
  141. }
  142. /**
  143. * @return distortion between two macroblocks
  144. */
  145. static inline int squared_diff_macroblock(uint8_t a[], uint8_t b[], int size)
  146. {
  147. int cp, sdiff=0;
  148. for(cp=0;cp<3;cp++) {
  149. int bias = (cp ? CHROMA_BIAS : 4);
  150. sdiff += bias*eval_sse(a, b, size*size);
  151. a += size*size;
  152. b += size*size;
  153. }
  154. return sdiff;
  155. }
  156. typedef struct
  157. {
  158. int eval_dist[4];
  159. int best_bit_use;
  160. int best_coding;
  161. int subCels[4];
  162. motion_vect motion;
  163. int cbEntry;
  164. } SubcelEvaluation;
  165. typedef struct
  166. {
  167. int eval_dist[4];
  168. int best_coding;
  169. SubcelEvaluation subCels[4];
  170. motion_vect motion;
  171. int cbEntry;
  172. int sourceX, sourceY;
  173. } CelEvaluation;
  174. typedef struct
  175. {
  176. int numCB4;
  177. int numCB2;
  178. int usedCB2[MAX_CBS_2x2];
  179. int usedCB4[MAX_CBS_4x4];
  180. uint8_t unpacked_cb2[MAX_CBS_2x2*2*2*3];
  181. uint8_t unpacked_cb4[MAX_CBS_4x4*4*4*3];
  182. uint8_t unpacked_cb4_enlarged[MAX_CBS_4x4*8*8*3];
  183. } RoqCodebooks;
  184. /**
  185. * Temporary vars
  186. */
  187. typedef struct RoqTempData
  188. {
  189. CelEvaluation *cel_evals;
  190. int f2i4[MAX_CBS_4x4];
  191. int i2f4[MAX_CBS_4x4];
  192. int f2i2[MAX_CBS_2x2];
  193. int i2f2[MAX_CBS_2x2];
  194. int mainChunkSize;
  195. int numCB4;
  196. int numCB2;
  197. RoqCodebooks codebooks;
  198. int *closest_cb2;
  199. int used_option[4];
  200. } RoqTempdata;
  201. /**
  202. * Initialize cel evaluators and set their source coordinates
  203. */
  204. static void create_cel_evals(RoqContext *enc, RoqTempdata *tempData)
  205. {
  206. int n=0, x, y, i;
  207. tempData->cel_evals = av_malloc(enc->width*enc->height/64 * sizeof(CelEvaluation));
  208. /* Map to the ROQ quadtree order */
  209. for (y=0; y<enc->height; y+=16)
  210. for (x=0; x<enc->width; x+=16)
  211. for(i=0; i<4; i++) {
  212. tempData->cel_evals[n ].sourceX = x + (i&1)*8;
  213. tempData->cel_evals[n++].sourceY = y + (i&2)*4;
  214. }
  215. }
  216. /**
  217. * Get macroblocks from parts of the image
  218. */
  219. static void get_frame_mb(const AVFrame *frame, int x, int y, uint8_t mb[], int dim)
  220. {
  221. int i, j, cp;
  222. for (cp=0; cp<3; cp++) {
  223. int stride = frame->linesize[cp];
  224. for (i=0; i<dim; i++)
  225. for (j=0; j<dim; j++)
  226. *mb++ = frame->data[cp][(y+i)*stride + x + j];
  227. }
  228. }
  229. /**
  230. * Find the codebook with the lowest distortion from an image
  231. */
  232. static int index_mb(uint8_t cluster[], uint8_t cb[], int numCB,
  233. int *outIndex, int dim)
  234. {
  235. int i, lDiff = INT_MAX, pick=0;
  236. /* Diff against the others */
  237. for (i=0; i<numCB; i++) {
  238. int diff = squared_diff_macroblock(cluster, cb + i*dim*dim*3, dim);
  239. if (diff < lDiff) {
  240. lDiff = diff;
  241. pick = i;
  242. }
  243. }
  244. *outIndex = pick;
  245. return lDiff;
  246. }
  247. #define EVAL_MOTION(MOTION) \
  248. do { \
  249. diff = eval_motion_dist(enc, j, i, MOTION, blocksize); \
  250. \
  251. if (diff < lowestdiff) { \
  252. lowestdiff = diff; \
  253. bestpick = MOTION; \
  254. } \
  255. } while(0)
  256. static void motion_search(RoqContext *enc, int blocksize)
  257. {
  258. static const motion_vect offsets[8] = {
  259. {{ 0,-1}},
  260. {{ 0, 1}},
  261. {{-1, 0}},
  262. {{ 1, 0}},
  263. {{-1, 1}},
  264. {{ 1,-1}},
  265. {{-1,-1}},
  266. {{ 1, 1}},
  267. };
  268. int diff, lowestdiff, oldbest;
  269. int off[3];
  270. motion_vect bestpick = {{0,0}};
  271. int i, j, k, offset;
  272. motion_vect *last_motion;
  273. motion_vect *this_motion;
  274. motion_vect vect, vect2;
  275. int max=(enc->width/blocksize)*enc->height/blocksize;
  276. if (blocksize == 4) {
  277. last_motion = enc->last_motion4;
  278. this_motion = enc->this_motion4;
  279. } else {
  280. last_motion = enc->last_motion8;
  281. this_motion = enc->this_motion8;
  282. }
  283. for (i=0; i<enc->height; i+=blocksize)
  284. for (j=0; j<enc->width; j+=blocksize) {
  285. lowestdiff = eval_motion_dist(enc, j, i, (motion_vect) {{0,0}},
  286. blocksize);
  287. bestpick.d[0] = 0;
  288. bestpick.d[1] = 0;
  289. if (blocksize == 4)
  290. EVAL_MOTION(enc->this_motion8[(i/8)*(enc->width/8) + j/8]);
  291. offset = (i/blocksize)*enc->width/blocksize + j/blocksize;
  292. if (offset < max && offset >= 0)
  293. EVAL_MOTION(last_motion[offset]);
  294. offset++;
  295. if (offset < max && offset >= 0)
  296. EVAL_MOTION(last_motion[offset]);
  297. offset = (i/blocksize + 1)*enc->width/blocksize + j/blocksize;
  298. if (offset < max && offset >= 0)
  299. EVAL_MOTION(last_motion[offset]);
  300. off[0]= (i/blocksize)*enc->width/blocksize + j/blocksize - 1;
  301. off[1]= off[0] - enc->width/blocksize + 1;
  302. off[2]= off[1] + 1;
  303. if (i) {
  304. for(k=0; k<2; k++)
  305. vect.d[k]= mid_pred(this_motion[off[0]].d[k],
  306. this_motion[off[1]].d[k],
  307. this_motion[off[2]].d[k]);
  308. EVAL_MOTION(vect);
  309. for(k=0; k<3; k++)
  310. EVAL_MOTION(this_motion[off[k]]);
  311. } else if(j)
  312. EVAL_MOTION(this_motion[off[0]]);
  313. vect = bestpick;
  314. oldbest = -1;
  315. while (oldbest != lowestdiff) {
  316. oldbest = lowestdiff;
  317. for (k=0; k<8; k++) {
  318. vect2 = vect;
  319. vect2.d[0] += offsets[k].d[0];
  320. vect2.d[1] += offsets[k].d[1];
  321. EVAL_MOTION(vect2);
  322. }
  323. vect = bestpick;
  324. }
  325. offset = (i/blocksize)*enc->width/blocksize + j/blocksize;
  326. this_motion[offset] = bestpick;
  327. }
  328. }
  329. /**
  330. * Get distortion for all options available to a subcel
  331. */
  332. static void gather_data_for_subcel(SubcelEvaluation *subcel, int x,
  333. int y, RoqContext *enc, RoqTempdata *tempData)
  334. {
  335. uint8_t mb4[4*4*3];
  336. uint8_t mb2[2*2*3];
  337. int cluster_index;
  338. int i, best_dist;
  339. static const int bitsUsed[4] = {2, 10, 10, 34};
  340. if (enc->framesSinceKeyframe >= 1) {
  341. subcel->motion = enc->this_motion4[y*enc->width/16 + x/4];
  342. subcel->eval_dist[RoQ_ID_FCC] =
  343. eval_motion_dist(enc, x, y,
  344. enc->this_motion4[y*enc->width/16 + x/4], 4);
  345. } else
  346. subcel->eval_dist[RoQ_ID_FCC] = INT_MAX;
  347. if (enc->framesSinceKeyframe >= 2)
  348. subcel->eval_dist[RoQ_ID_MOT] = block_sse(enc->frame_to_enc->data,
  349. enc->current_frame->data, x,
  350. y, x, y,
  351. enc->frame_to_enc->linesize,
  352. enc->current_frame->linesize,
  353. 4);
  354. else
  355. subcel->eval_dist[RoQ_ID_MOT] = INT_MAX;
  356. cluster_index = y*enc->width/16 + x/4;
  357. get_frame_mb(enc->frame_to_enc, x, y, mb4, 4);
  358. subcel->eval_dist[RoQ_ID_SLD] = index_mb(mb4,
  359. tempData->codebooks.unpacked_cb4,
  360. tempData->codebooks.numCB4,
  361. &subcel->cbEntry, 4);
  362. subcel->eval_dist[RoQ_ID_CCC] = 0;
  363. for(i=0;i<4;i++) {
  364. subcel->subCels[i] = tempData->closest_cb2[cluster_index*4+i];
  365. get_frame_mb(enc->frame_to_enc, x+2*(i&1),
  366. y+(i&2), mb2, 2);
  367. subcel->eval_dist[RoQ_ID_CCC] +=
  368. squared_diff_macroblock(tempData->codebooks.unpacked_cb2 + subcel->subCels[i]*2*2*3, mb2, 2);
  369. }
  370. best_dist = INT_MAX;
  371. for (i=0; i<4; i++)
  372. if (ROQ_LAMBDA_SCALE*subcel->eval_dist[i] + enc->lambda*bitsUsed[i] <
  373. best_dist) {
  374. subcel->best_coding = i;
  375. subcel->best_bit_use = bitsUsed[i];
  376. best_dist = ROQ_LAMBDA_SCALE*subcel->eval_dist[i] +
  377. enc->lambda*bitsUsed[i];
  378. }
  379. }
  380. /**
  381. * Get distortion for all options available to a cel
  382. */
  383. static void gather_data_for_cel(CelEvaluation *cel, RoqContext *enc,
  384. RoqTempdata *tempData)
  385. {
  386. uint8_t mb8[8*8*3];
  387. int index = cel->sourceY*enc->width/64 + cel->sourceX/8;
  388. int i, j, best_dist, divide_bit_use;
  389. int bitsUsed[4] = {2, 10, 10, 0};
  390. if (enc->framesSinceKeyframe >= 1) {
  391. cel->motion = enc->this_motion8[index];
  392. cel->eval_dist[RoQ_ID_FCC] =
  393. eval_motion_dist(enc, cel->sourceX, cel->sourceY,
  394. enc->this_motion8[index], 8);
  395. } else
  396. cel->eval_dist[RoQ_ID_FCC] = INT_MAX;
  397. if (enc->framesSinceKeyframe >= 2)
  398. cel->eval_dist[RoQ_ID_MOT] = block_sse(enc->frame_to_enc->data,
  399. enc->current_frame->data,
  400. cel->sourceX, cel->sourceY,
  401. cel->sourceX, cel->sourceY,
  402. enc->frame_to_enc->linesize,
  403. enc->current_frame->linesize,8);
  404. else
  405. cel->eval_dist[RoQ_ID_MOT] = INT_MAX;
  406. get_frame_mb(enc->frame_to_enc, cel->sourceX, cel->sourceY, mb8, 8);
  407. cel->eval_dist[RoQ_ID_SLD] =
  408. index_mb(mb8, tempData->codebooks.unpacked_cb4_enlarged,
  409. tempData->codebooks.numCB4, &cel->cbEntry, 8);
  410. gather_data_for_subcel(cel->subCels + 0, cel->sourceX+0, cel->sourceY+0, enc, tempData);
  411. gather_data_for_subcel(cel->subCels + 1, cel->sourceX+4, cel->sourceY+0, enc, tempData);
  412. gather_data_for_subcel(cel->subCels + 2, cel->sourceX+0, cel->sourceY+4, enc, tempData);
  413. gather_data_for_subcel(cel->subCels + 3, cel->sourceX+4, cel->sourceY+4, enc, tempData);
  414. cel->eval_dist[RoQ_ID_CCC] = 0;
  415. divide_bit_use = 0;
  416. for (i=0; i<4; i++) {
  417. cel->eval_dist[RoQ_ID_CCC] +=
  418. cel->subCels[i].eval_dist[cel->subCels[i].best_coding];
  419. divide_bit_use += cel->subCels[i].best_bit_use;
  420. }
  421. best_dist = INT_MAX;
  422. bitsUsed[3] = 2 + divide_bit_use;
  423. for (i=0; i<4; i++)
  424. if (ROQ_LAMBDA_SCALE*cel->eval_dist[i] + enc->lambda*bitsUsed[i] <
  425. best_dist) {
  426. cel->best_coding = i;
  427. best_dist = ROQ_LAMBDA_SCALE*cel->eval_dist[i] +
  428. enc->lambda*bitsUsed[i];
  429. }
  430. tempData->used_option[cel->best_coding]++;
  431. tempData->mainChunkSize += bitsUsed[cel->best_coding];
  432. if (cel->best_coding == RoQ_ID_SLD)
  433. tempData->codebooks.usedCB4[cel->cbEntry]++;
  434. if (cel->best_coding == RoQ_ID_CCC)
  435. for (i=0; i<4; i++) {
  436. if (cel->subCels[i].best_coding == RoQ_ID_SLD)
  437. tempData->codebooks.usedCB4[cel->subCels[i].cbEntry]++;
  438. else if (cel->subCels[i].best_coding == RoQ_ID_CCC)
  439. for (j=0; j<4; j++)
  440. tempData->codebooks.usedCB2[cel->subCels[i].subCels[j]]++;
  441. }
  442. }
  443. static void remap_codebooks(RoqContext *enc, RoqTempdata *tempData)
  444. {
  445. int i, j, idx=0;
  446. /* Make remaps for the final codebook usage */
  447. for (i=0; i<(enc->quake3_compat ? MAX_CBS_4x4-1 : MAX_CBS_4x4); i++) {
  448. if (tempData->codebooks.usedCB4[i]) {
  449. tempData->i2f4[i] = idx;
  450. tempData->f2i4[idx] = i;
  451. for (j=0; j<4; j++)
  452. tempData->codebooks.usedCB2[enc->cb4x4[i].idx[j]]++;
  453. idx++;
  454. }
  455. }
  456. tempData->numCB4 = idx;
  457. idx = 0;
  458. for (i=0; i<MAX_CBS_2x2; i++) {
  459. if (tempData->codebooks.usedCB2[i]) {
  460. tempData->i2f2[i] = idx;
  461. tempData->f2i2[idx] = i;
  462. idx++;
  463. }
  464. }
  465. tempData->numCB2 = idx;
  466. }
  467. /**
  468. * Write codebook chunk
  469. */
  470. static void write_codebooks(RoqContext *enc, RoqTempdata *tempData)
  471. {
  472. int i, j;
  473. uint8_t **outp= &enc->out_buf;
  474. if (tempData->numCB2) {
  475. bytestream_put_le16(outp, RoQ_QUAD_CODEBOOK);
  476. bytestream_put_le32(outp, tempData->numCB2*6 + tempData->numCB4*4);
  477. bytestream_put_byte(outp, tempData->numCB4);
  478. bytestream_put_byte(outp, tempData->numCB2);
  479. for (i=0; i<tempData->numCB2; i++) {
  480. bytestream_put_buffer(outp, enc->cb2x2[tempData->f2i2[i]].y, 4);
  481. bytestream_put_byte(outp, enc->cb2x2[tempData->f2i2[i]].u);
  482. bytestream_put_byte(outp, enc->cb2x2[tempData->f2i2[i]].v);
  483. }
  484. for (i=0; i<tempData->numCB4; i++)
  485. for (j=0; j<4; j++)
  486. bytestream_put_byte(outp, tempData->i2f2[enc->cb4x4[tempData->f2i4[i]].idx[j]]);
  487. }
  488. }
  489. static inline uint8_t motion_arg(motion_vect mot)
  490. {
  491. uint8_t ax = 8 - ((uint8_t) mot.d[0]);
  492. uint8_t ay = 8 - ((uint8_t) mot.d[1]);
  493. return ((ax&15)<<4) | (ay&15);
  494. }
  495. typedef struct
  496. {
  497. int typeSpool;
  498. int typeSpoolLength;
  499. uint8_t argumentSpool[64];
  500. uint8_t *args;
  501. uint8_t **pout;
  502. } CodingSpool;
  503. /* NOTE: Typecodes must be spooled AFTER arguments!! */
  504. static void write_typecode(CodingSpool *s, uint8_t type)
  505. {
  506. s->typeSpool |= (type & 3) << (14 - s->typeSpoolLength);
  507. s->typeSpoolLength += 2;
  508. if (s->typeSpoolLength == 16) {
  509. bytestream_put_le16(s->pout, s->typeSpool);
  510. bytestream_put_buffer(s->pout, s->argumentSpool,
  511. s->args - s->argumentSpool);
  512. s->typeSpoolLength = 0;
  513. s->typeSpool = 0;
  514. s->args = s->argumentSpool;
  515. }
  516. }
  517. static void reconstruct_and_encode_image(RoqContext *enc, RoqTempdata *tempData, int w, int h, int numBlocks)
  518. {
  519. int i, j, k;
  520. int x, y;
  521. int subX, subY;
  522. int dist=0;
  523. roq_qcell *qcell;
  524. CelEvaluation *eval;
  525. CodingSpool spool;
  526. spool.typeSpool=0;
  527. spool.typeSpoolLength=0;
  528. spool.args = spool.argumentSpool;
  529. spool.pout = &enc->out_buf;
  530. if (tempData->used_option[RoQ_ID_CCC]%2)
  531. tempData->mainChunkSize+=8; //FIXME
  532. /* Write the video chunk header */
  533. bytestream_put_le16(&enc->out_buf, RoQ_QUAD_VQ);
  534. bytestream_put_le32(&enc->out_buf, tempData->mainChunkSize/8);
  535. bytestream_put_byte(&enc->out_buf, 0x0);
  536. bytestream_put_byte(&enc->out_buf, 0x0);
  537. for (i=0; i<numBlocks; i++) {
  538. eval = tempData->cel_evals + i;
  539. x = eval->sourceX;
  540. y = eval->sourceY;
  541. dist += eval->eval_dist[eval->best_coding];
  542. switch (eval->best_coding) {
  543. case RoQ_ID_MOT:
  544. write_typecode(&spool, RoQ_ID_MOT);
  545. break;
  546. case RoQ_ID_FCC:
  547. bytestream_put_byte(&spool.args, motion_arg(eval->motion));
  548. write_typecode(&spool, RoQ_ID_FCC);
  549. ff_apply_motion_8x8(enc, x, y,
  550. eval->motion.d[0], eval->motion.d[1]);
  551. break;
  552. case RoQ_ID_SLD:
  553. bytestream_put_byte(&spool.args, tempData->i2f4[eval->cbEntry]);
  554. write_typecode(&spool, RoQ_ID_SLD);
  555. qcell = enc->cb4x4 + eval->cbEntry;
  556. ff_apply_vector_4x4(enc, x , y , enc->cb2x2 + qcell->idx[0]);
  557. ff_apply_vector_4x4(enc, x+4, y , enc->cb2x2 + qcell->idx[1]);
  558. ff_apply_vector_4x4(enc, x , y+4, enc->cb2x2 + qcell->idx[2]);
  559. ff_apply_vector_4x4(enc, x+4, y+4, enc->cb2x2 + qcell->idx[3]);
  560. break;
  561. case RoQ_ID_CCC:
  562. write_typecode(&spool, RoQ_ID_CCC);
  563. for (j=0; j<4; j++) {
  564. subX = x + 4*(j&1);
  565. subY = y + 2*(j&2);
  566. switch(eval->subCels[j].best_coding) {
  567. case RoQ_ID_MOT:
  568. break;
  569. case RoQ_ID_FCC:
  570. bytestream_put_byte(&spool.args,
  571. motion_arg(eval->subCels[j].motion));
  572. ff_apply_motion_4x4(enc, subX, subY,
  573. eval->subCels[j].motion.d[0],
  574. eval->subCels[j].motion.d[1]);
  575. break;
  576. case RoQ_ID_SLD:
  577. bytestream_put_byte(&spool.args,
  578. tempData->i2f4[eval->subCels[j].cbEntry]);
  579. qcell = enc->cb4x4 + eval->subCels[j].cbEntry;
  580. ff_apply_vector_2x2(enc, subX , subY ,
  581. enc->cb2x2 + qcell->idx[0]);
  582. ff_apply_vector_2x2(enc, subX+2, subY ,
  583. enc->cb2x2 + qcell->idx[1]);
  584. ff_apply_vector_2x2(enc, subX , subY+2,
  585. enc->cb2x2 + qcell->idx[2]);
  586. ff_apply_vector_2x2(enc, subX+2, subY+2,
  587. enc->cb2x2 + qcell->idx[3]);
  588. break;
  589. case RoQ_ID_CCC:
  590. for (k=0; k<4; k++) {
  591. int cb_idx = eval->subCels[j].subCels[k];
  592. bytestream_put_byte(&spool.args,
  593. tempData->i2f2[cb_idx]);
  594. ff_apply_vector_2x2(enc, subX + 2*(k&1), subY + (k&2),
  595. enc->cb2x2 + cb_idx);
  596. }
  597. break;
  598. }
  599. write_typecode(&spool, eval->subCels[j].best_coding);
  600. }
  601. break;
  602. }
  603. }
  604. /* Flush the remainder of the argument/type spool */
  605. while (spool.typeSpoolLength)
  606. write_typecode(&spool, 0x0);
  607. #if 0
  608. uint8_t *fdata[3] = {enc->frame_to_enc->data[0],
  609. enc->frame_to_enc->data[1],
  610. enc->frame_to_enc->data[2]};
  611. uint8_t *cdata[3] = {enc->current_frame->data[0],
  612. enc->current_frame->data[1],
  613. enc->current_frame->data[2]};
  614. av_log(enc->avctx, AV_LOG_ERROR, "Expected distortion: %i Actual: %i\n",
  615. dist,
  616. block_sse(fdata, cdata, 0, 0, 0, 0,
  617. enc->frame_to_enc->linesize,
  618. enc->current_frame->linesize,
  619. enc->width)); //WARNING: Square dimensions implied...
  620. #endif
  621. }
  622. /**
  623. * Create a single YUV cell from a 2x2 section of the image
  624. */
  625. static inline void frame_block_to_cell(uint8_t *block, uint8_t * const *data,
  626. int top, int left, const int *stride)
  627. {
  628. int i, j, u=0, v=0;
  629. for (i=0; i<2; i++)
  630. for (j=0; j<2; j++) {
  631. int x = (top+i)*stride[0] + left + j;
  632. *block++ = data[0][x];
  633. x = (top+i)*stride[1] + left + j;
  634. u += data[1][x];
  635. v += data[2][x];
  636. }
  637. *block++ = (u+2)/4;
  638. *block++ = (v+2)/4;
  639. }
  640. /**
  641. * Create YUV clusters for the entire image
  642. */
  643. static void create_clusters(const AVFrame *frame, int w, int h, uint8_t *yuvClusters)
  644. {
  645. int i, j, k, l;
  646. for (i=0; i<h; i+=4)
  647. for (j=0; j<w; j+=4) {
  648. for (k=0; k < 2; k++)
  649. for (l=0; l < 2; l++)
  650. frame_block_to_cell(yuvClusters + (l + 2*k)*6, frame->data,
  651. i+2*k, j+2*l, frame->linesize);
  652. yuvClusters += 24;
  653. }
  654. }
  655. static void generate_codebook(RoqContext *enc, RoqTempdata *tempdata,
  656. int *points, int inputCount, roq_cell *results,
  657. int size, int cbsize)
  658. {
  659. int i, j, k;
  660. int c_size = size*size/4;
  661. int *buf;
  662. int *codebook = av_malloc(6*c_size*cbsize*sizeof(int));
  663. int *closest_cb;
  664. if (size == 4)
  665. closest_cb = av_malloc(6*c_size*inputCount*sizeof(int));
  666. else
  667. closest_cb = tempdata->closest_cb2;
  668. avpriv_init_elbg(points, 6*c_size, inputCount, codebook, cbsize, 1, closest_cb, &enc->randctx);
  669. avpriv_do_elbg(points, 6*c_size, inputCount, codebook, cbsize, 1, closest_cb, &enc->randctx);
  670. if (size == 4)
  671. av_free(closest_cb);
  672. buf = codebook;
  673. for (i=0; i<cbsize; i++)
  674. for (k=0; k<c_size; k++) {
  675. for(j=0; j<4; j++)
  676. results->y[j] = *buf++;
  677. results->u = (*buf++ + CHROMA_BIAS/2)/CHROMA_BIAS;
  678. results->v = (*buf++ + CHROMA_BIAS/2)/CHROMA_BIAS;
  679. results++;
  680. }
  681. av_free(codebook);
  682. }
  683. static void generate_new_codebooks(RoqContext *enc, RoqTempdata *tempData)
  684. {
  685. int i,j;
  686. RoqCodebooks *codebooks = &tempData->codebooks;
  687. int max = enc->width*enc->height/16;
  688. uint8_t mb2[3*4];
  689. roq_cell *results4 = av_malloc(sizeof(roq_cell)*MAX_CBS_4x4*4);
  690. uint8_t *yuvClusters=av_malloc(sizeof(int)*max*6*4);
  691. int *points = av_malloc(max*6*4*sizeof(int));
  692. int bias;
  693. /* Subsample YUV data */
  694. create_clusters(enc->frame_to_enc, enc->width, enc->height, yuvClusters);
  695. /* Cast to integer and apply chroma bias */
  696. for (i=0; i<max*24; i++) {
  697. bias = ((i%6)<4) ? 1 : CHROMA_BIAS;
  698. points[i] = bias*yuvClusters[i];
  699. }
  700. /* Create 4x4 codebooks */
  701. generate_codebook(enc, tempData, points, max, results4, 4, (enc->quake3_compat ? MAX_CBS_4x4-1 : MAX_CBS_4x4));
  702. codebooks->numCB4 = (enc->quake3_compat ? MAX_CBS_4x4-1 : MAX_CBS_4x4);
  703. tempData->closest_cb2 = av_malloc(max*4*sizeof(int));
  704. /* Create 2x2 codebooks */
  705. generate_codebook(enc, tempData, points, max*4, enc->cb2x2, 2, MAX_CBS_2x2);
  706. codebooks->numCB2 = MAX_CBS_2x2;
  707. /* Unpack 2x2 codebook clusters */
  708. for (i=0; i<codebooks->numCB2; i++)
  709. unpack_roq_cell(enc->cb2x2 + i, codebooks->unpacked_cb2 + i*2*2*3);
  710. /* Index all 4x4 entries to the 2x2 entries, unpack, and enlarge */
  711. for (i=0; i<codebooks->numCB4; i++) {
  712. for (j=0; j<4; j++) {
  713. unpack_roq_cell(&results4[4*i + j], mb2);
  714. index_mb(mb2, codebooks->unpacked_cb2, codebooks->numCB2,
  715. &enc->cb4x4[i].idx[j], 2);
  716. }
  717. unpack_roq_qcell(codebooks->unpacked_cb2, enc->cb4x4 + i,
  718. codebooks->unpacked_cb4 + i*4*4*3);
  719. enlarge_roq_mb4(codebooks->unpacked_cb4 + i*4*4*3,
  720. codebooks->unpacked_cb4_enlarged + i*8*8*3);
  721. }
  722. av_free(yuvClusters);
  723. av_free(points);
  724. av_free(results4);
  725. }
  726. static void roq_encode_video(RoqContext *enc)
  727. {
  728. RoqTempdata *tempData = enc->tmpData;
  729. int i;
  730. memset(tempData, 0, sizeof(*tempData));
  731. create_cel_evals(enc, tempData);
  732. generate_new_codebooks(enc, tempData);
  733. if (enc->framesSinceKeyframe >= 1) {
  734. motion_search(enc, 8);
  735. motion_search(enc, 4);
  736. }
  737. retry_encode:
  738. for (i=0; i<enc->width*enc->height/64; i++)
  739. gather_data_for_cel(tempData->cel_evals + i, enc, tempData);
  740. /* Quake 3 can't handle chunks bigger than 65535 bytes */
  741. if (tempData->mainChunkSize/8 > 65535 && enc->quake3_compat) {
  742. av_log(enc->avctx, AV_LOG_ERROR,
  743. "Warning, generated a frame too big for Quake (%d > 65535), "
  744. "now switching to a bigger qscale value.\n",
  745. tempData->mainChunkSize/8);
  746. enc->lambda *= 1.5;
  747. tempData->mainChunkSize = 0;
  748. memset(tempData->used_option, 0, sizeof(tempData->used_option));
  749. memset(tempData->codebooks.usedCB4, 0,
  750. sizeof(tempData->codebooks.usedCB4));
  751. memset(tempData->codebooks.usedCB2, 0,
  752. sizeof(tempData->codebooks.usedCB2));
  753. goto retry_encode;
  754. }
  755. remap_codebooks(enc, tempData);
  756. write_codebooks(enc, tempData);
  757. reconstruct_and_encode_image(enc, tempData, enc->width, enc->height,
  758. enc->width*enc->height/64);
  759. enc->avctx->coded_frame = enc->current_frame;
  760. /* Rotate frame history */
  761. FFSWAP(AVFrame *, enc->current_frame, enc->last_frame);
  762. FFSWAP(motion_vect *, enc->last_motion4, enc->this_motion4);
  763. FFSWAP(motion_vect *, enc->last_motion8, enc->this_motion8);
  764. av_free(tempData->cel_evals);
  765. av_free(tempData->closest_cb2);
  766. enc->framesSinceKeyframe++;
  767. }
  768. static av_cold int roq_encode_end(AVCodecContext *avctx)
  769. {
  770. RoqContext *enc = avctx->priv_data;
  771. av_frame_free(&enc->current_frame);
  772. av_frame_free(&enc->last_frame);
  773. av_free(enc->tmpData);
  774. av_free(enc->this_motion4);
  775. av_free(enc->last_motion4);
  776. av_free(enc->this_motion8);
  777. av_free(enc->last_motion8);
  778. return 0;
  779. }
  780. static av_cold int roq_encode_init(AVCodecContext *avctx)
  781. {
  782. RoqContext *enc = avctx->priv_data;
  783. av_lfg_init(&enc->randctx, 1);
  784. enc->framesSinceKeyframe = 0;
  785. if ((avctx->width & 0xf) || (avctx->height & 0xf)) {
  786. av_log(avctx, AV_LOG_ERROR, "Dimensions must be divisible by 16\n");
  787. return AVERROR(EINVAL);
  788. }
  789. if (((avctx->width)&(avctx->width-1))||((avctx->height)&(avctx->height-1)))
  790. av_log(avctx, AV_LOG_ERROR, "Warning: dimensions not power of two\n");
  791. enc->width = avctx->width;
  792. enc->height = avctx->height;
  793. enc->framesSinceKeyframe = 0;
  794. enc->first_frame = 1;
  795. enc->last_frame = av_frame_alloc();
  796. enc->current_frame = av_frame_alloc();
  797. if (!enc->last_frame || !enc->current_frame) {
  798. roq_encode_end(avctx);
  799. return AVERROR(ENOMEM);
  800. }
  801. enc->tmpData = av_malloc(sizeof(RoqTempdata));
  802. enc->this_motion4 =
  803. av_mallocz((enc->width*enc->height/16)*sizeof(motion_vect));
  804. enc->last_motion4 =
  805. av_malloc ((enc->width*enc->height/16)*sizeof(motion_vect));
  806. enc->this_motion8 =
  807. av_mallocz((enc->width*enc->height/64)*sizeof(motion_vect));
  808. enc->last_motion8 =
  809. av_malloc ((enc->width*enc->height/64)*sizeof(motion_vect));
  810. return 0;
  811. }
  812. static void roq_write_video_info_chunk(RoqContext *enc)
  813. {
  814. /* ROQ info chunk */
  815. bytestream_put_le16(&enc->out_buf, RoQ_INFO);
  816. /* Size: 8 bytes */
  817. bytestream_put_le32(&enc->out_buf, 8);
  818. /* Unused argument */
  819. bytestream_put_byte(&enc->out_buf, 0x00);
  820. bytestream_put_byte(&enc->out_buf, 0x00);
  821. /* Width */
  822. bytestream_put_le16(&enc->out_buf, enc->width);
  823. /* Height */
  824. bytestream_put_le16(&enc->out_buf, enc->height);
  825. /* Unused in Quake 3, mimics the output of the real encoder */
  826. bytestream_put_byte(&enc->out_buf, 0x08);
  827. bytestream_put_byte(&enc->out_buf, 0x00);
  828. bytestream_put_byte(&enc->out_buf, 0x04);
  829. bytestream_put_byte(&enc->out_buf, 0x00);
  830. }
  831. static int roq_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
  832. const AVFrame *frame, int *got_packet)
  833. {
  834. RoqContext *enc = avctx->priv_data;
  835. int size, ret;
  836. enc->avctx = avctx;
  837. enc->frame_to_enc = frame;
  838. if (frame->quality)
  839. enc->lambda = frame->quality - 1;
  840. else
  841. enc->lambda = 2*ROQ_LAMBDA_SCALE;
  842. /* 138 bits max per 8x8 block +
  843. * 256 codebooks*(6 bytes 2x2 + 4 bytes 4x4) + 8 bytes frame header */
  844. size = ((enc->width * enc->height / 64) * 138 + 7) / 8 + 256 * (6 + 4) + 8;
  845. if ((ret = ff_alloc_packet2(avctx, pkt, size)) < 0)
  846. return ret;
  847. enc->out_buf = pkt->data;
  848. /* Check for I frame */
  849. if (enc->framesSinceKeyframe == avctx->gop_size)
  850. enc->framesSinceKeyframe = 0;
  851. if (enc->first_frame) {
  852. /* Alloc memory for the reconstruction data (we must know the stride
  853. for that) */
  854. if ((ret = ff_get_buffer(avctx, enc->current_frame, 0)) < 0 ||
  855. (ret = ff_get_buffer(avctx, enc->last_frame, 0)) < 0)
  856. return ret;
  857. /* Before the first video frame, write a "video info" chunk */
  858. roq_write_video_info_chunk(enc);
  859. enc->first_frame = 0;
  860. }
  861. /* Encode the actual frame */
  862. roq_encode_video(enc);
  863. pkt->size = enc->out_buf - pkt->data;
  864. if (enc->framesSinceKeyframe == 1)
  865. pkt->flags |= AV_PKT_FLAG_KEY;
  866. *got_packet = 1;
  867. return 0;
  868. }
  869. #define OFFSET(x) offsetof(RoqContext, x)
  870. #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
  871. static const AVOption options[] = {
  872. { "quake3_compat", "Whether to respect known limitations in Quake 3 decoder", OFFSET(quake3_compat), AV_OPT_TYPE_INT, { .i64 = 1 }, 0, 1, VE },
  873. { NULL },
  874. };
  875. static const AVClass roq_class = {
  876. .class_name = "RoQ",
  877. .item_name = av_default_item_name,
  878. .option = options,
  879. .version = LIBAVUTIL_VERSION_INT,
  880. };
  881. AVCodec ff_roq_encoder = {
  882. .name = "roqvideo",
  883. .long_name = NULL_IF_CONFIG_SMALL("id RoQ video"),
  884. .type = AVMEDIA_TYPE_VIDEO,
  885. .id = AV_CODEC_ID_ROQ,
  886. .priv_data_size = sizeof(RoqContext),
  887. .init = roq_encode_init,
  888. .encode2 = roq_encode_frame,
  889. .close = roq_encode_end,
  890. .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV444P,
  891. AV_PIX_FMT_NONE },
  892. .priv_class = &roq_class,
  893. };