You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1128 lines
33KB

  1. /*
  2. * RoQ Video Encoder.
  3. *
  4. * Copyright (C) 2007 Vitor Sessak <vitor1001@gmail.com>
  5. * Copyright (C) 2004-2007 Eric Lasota
  6. * Based on RoQ specs (C) 2001 Tim Ferguson
  7. *
  8. * This file is part of Libav.
  9. *
  10. * Libav is free software; you can redistribute it and/or
  11. * modify it under the terms of the GNU Lesser General Public
  12. * License as published by the Free Software Foundation; either
  13. * version 2.1 of the License, or (at your option) any later version.
  14. *
  15. * Libav is distributed in the hope that it will be useful,
  16. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  17. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  18. * Lesser General Public License for more details.
  19. *
  20. * You should have received a copy of the GNU Lesser General Public
  21. * License along with Libav; if not, write to the Free Software
  22. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  23. */
  24. /**
  25. * @file
  26. * id RoQ encoder by Vitor. Based on the Switchblade3 library and the
  27. * Switchblade3 Libav glue by Eric Lasota.
  28. */
  29. /*
  30. * COSTS:
  31. * Level 1:
  32. * SKIP - 2 bits
  33. * MOTION - 2 + 8 bits
  34. * CODEBOOK - 2 + 8 bits
  35. * SUBDIVIDE - 2 + combined subcel cost
  36. *
  37. * Level 2:
  38. * SKIP - 2 bits
  39. * MOTION - 2 + 8 bits
  40. * CODEBOOK - 2 + 8 bits
  41. * SUBDIVIDE - 2 + 4*8 bits
  42. *
  43. * Maximum cost: 138 bits per cel
  44. *
  45. * Proper evaluation requires LCD fraction comparison, which requires
  46. * Squared Error (SE) loss * savings increase
  47. *
  48. * Maximum savings increase: 136 bits
  49. * Maximum SE loss without overflow: 31580641
  50. * Components in 8x8 supercel: 192
  51. * Maximum SE precision per component: 164482
  52. * >65025, so no truncation is needed (phew)
  53. */
  54. #include <string.h>
  55. #include "libavutil/attributes.h"
  56. #include "roqvideo.h"
  57. #include "bytestream.h"
  58. #include "elbg.h"
  59. #include "internal.h"
  60. #include "mathops.h"
  61. #define CHROMA_BIAS 1
  62. /**
  63. * Maximum number of generated 4x4 codebooks. Can't be 256 to workaround a
  64. * Quake 3 bug.
  65. */
  66. #define MAX_CBS_4x4 255
  67. #define MAX_CBS_2x2 256 ///< Maximum number of 2x2 codebooks.
  68. /* The cast is useful when multiplying it by INT_MAX */
  69. #define ROQ_LAMBDA_SCALE ((uint64_t) FF_LAMBDA_SCALE)
  70. /* Macroblock support functions */
  71. static void unpack_roq_cell(roq_cell *cell, uint8_t u[4*3])
  72. {
  73. memcpy(u , cell->y, 4);
  74. memset(u+4, cell->u, 4);
  75. memset(u+8, cell->v, 4);
  76. }
  77. static void unpack_roq_qcell(uint8_t cb2[], roq_qcell *qcell, uint8_t u[4*4*3])
  78. {
  79. int i,cp;
  80. static const int offsets[4] = {0, 2, 8, 10};
  81. for (cp=0; cp<3; cp++)
  82. for (i=0; i<4; i++) {
  83. u[4*4*cp + offsets[i] ] = cb2[qcell->idx[i]*2*2*3 + 4*cp ];
  84. u[4*4*cp + offsets[i]+1] = cb2[qcell->idx[i]*2*2*3 + 4*cp+1];
  85. u[4*4*cp + offsets[i]+4] = cb2[qcell->idx[i]*2*2*3 + 4*cp+2];
  86. u[4*4*cp + offsets[i]+5] = cb2[qcell->idx[i]*2*2*3 + 4*cp+3];
  87. }
  88. }
  89. static void enlarge_roq_mb4(uint8_t base[3*16], uint8_t u[3*64])
  90. {
  91. int x,y,cp;
  92. for(cp=0; cp<3; cp++)
  93. for(y=0; y<8; y++)
  94. for(x=0; x<8; x++)
  95. *u++ = base[(y/2)*4 + (x/2) + 16*cp];
  96. }
  97. static inline int square(int x)
  98. {
  99. return x*x;
  100. }
  101. static inline int eval_sse(const uint8_t *a, const uint8_t *b, int count)
  102. {
  103. int diff=0;
  104. while(count--)
  105. diff += square(*b++ - *a++);
  106. return diff;
  107. }
  108. // FIXME Could use DSPContext.sse, but it is not so speed critical (used
  109. // just for motion estimation).
  110. static int block_sse(uint8_t * const *buf1, uint8_t * const *buf2, int x1, int y1,
  111. int x2, int y2, const int *stride1, const int *stride2, int size)
  112. {
  113. int i, k;
  114. int sse=0;
  115. for (k=0; k<3; k++) {
  116. int bias = (k ? CHROMA_BIAS : 4);
  117. for (i=0; i<size; i++)
  118. sse += bias*eval_sse(buf1[k] + (y1+i)*stride1[k] + x1,
  119. buf2[k] + (y2+i)*stride2[k] + x2, size);
  120. }
  121. return sse;
  122. }
  123. static int eval_motion_dist(RoqContext *enc, int x, int y, motion_vect vect,
  124. int size)
  125. {
  126. int mx=vect.d[0];
  127. int my=vect.d[1];
  128. if (mx < -7 || mx > 7)
  129. return INT_MAX;
  130. if (my < -7 || my > 7)
  131. return INT_MAX;
  132. mx += x;
  133. my += y;
  134. if ((unsigned) mx > enc->width-size || (unsigned) my > enc->height-size)
  135. return INT_MAX;
  136. return block_sse(enc->frame_to_enc->data, enc->last_frame->data, x, y,
  137. mx, my,
  138. enc->frame_to_enc->linesize, enc->last_frame->linesize,
  139. size);
  140. }
  141. /**
  142. * @return distortion between two macroblocks
  143. */
  144. static inline int squared_diff_macroblock(uint8_t a[], uint8_t b[], int size)
  145. {
  146. int cp, sdiff=0;
  147. for(cp=0;cp<3;cp++) {
  148. int bias = (cp ? CHROMA_BIAS : 4);
  149. sdiff += bias*eval_sse(a, b, size*size);
  150. a += size*size;
  151. b += size*size;
  152. }
  153. return sdiff;
  154. }
  155. typedef struct SubcelEvaluation {
  156. int eval_dist[4];
  157. int best_bit_use;
  158. int best_coding;
  159. int subCels[4];
  160. motion_vect motion;
  161. int cbEntry;
  162. } SubcelEvaluation;
  163. typedef struct CelEvaluation {
  164. int eval_dist[4];
  165. int best_coding;
  166. SubcelEvaluation subCels[4];
  167. motion_vect motion;
  168. int cbEntry;
  169. int sourceX, sourceY;
  170. } CelEvaluation;
  171. typedef struct RoqCodebooks {
  172. int numCB4;
  173. int numCB2;
  174. int usedCB2[MAX_CBS_2x2];
  175. int usedCB4[MAX_CBS_4x4];
  176. uint8_t unpacked_cb2[MAX_CBS_2x2*2*2*3];
  177. uint8_t unpacked_cb4[MAX_CBS_4x4*4*4*3];
  178. uint8_t unpacked_cb4_enlarged[MAX_CBS_4x4*8*8*3];
  179. } RoqCodebooks;
  180. /**
  181. * Temporary vars
  182. */
  183. typedef struct RoqTempData
  184. {
  185. CelEvaluation *cel_evals;
  186. int f2i4[MAX_CBS_4x4];
  187. int i2f4[MAX_CBS_4x4];
  188. int f2i2[MAX_CBS_2x2];
  189. int i2f2[MAX_CBS_2x2];
  190. int mainChunkSize;
  191. int numCB4;
  192. int numCB2;
  193. RoqCodebooks codebooks;
  194. int *closest_cb2;
  195. int used_option[4];
  196. } RoqTempdata;
  197. /**
  198. * Initialize cel evaluators and set their source coordinates
  199. */
  200. static int create_cel_evals(RoqContext *enc, RoqTempdata *tempData)
  201. {
  202. int n=0, x, y, i;
  203. tempData->cel_evals = av_malloc(enc->width*enc->height/64 * sizeof(CelEvaluation));
  204. if (!tempData->cel_evals)
  205. return AVERROR(ENOMEM);
  206. /* Map to the ROQ quadtree order */
  207. for (y=0; y<enc->height; y+=16)
  208. for (x=0; x<enc->width; x+=16)
  209. for(i=0; i<4; i++) {
  210. tempData->cel_evals[n ].sourceX = x + (i&1)*8;
  211. tempData->cel_evals[n++].sourceY = y + (i&2)*4;
  212. }
  213. return 0;
  214. }
  215. /**
  216. * Get macroblocks from parts of the image
  217. */
  218. static void get_frame_mb(const AVFrame *frame, int x, int y, uint8_t mb[], int dim)
  219. {
  220. int i, j, cp;
  221. for (cp=0; cp<3; cp++) {
  222. int stride = frame->linesize[cp];
  223. for (i=0; i<dim; i++)
  224. for (j=0; j<dim; j++)
  225. *mb++ = frame->data[cp][(y+i)*stride + x + j];
  226. }
  227. }
  228. /**
  229. * Find the codebook with the lowest distortion from an image
  230. */
  231. static int index_mb(uint8_t cluster[], uint8_t cb[], int numCB,
  232. int *outIndex, int dim)
  233. {
  234. int i, lDiff = INT_MAX, pick=0;
  235. /* Diff against the others */
  236. for (i=0; i<numCB; i++) {
  237. int diff = squared_diff_macroblock(cluster, cb + i*dim*dim*3, dim);
  238. if (diff < lDiff) {
  239. lDiff = diff;
  240. pick = i;
  241. }
  242. }
  243. *outIndex = pick;
  244. return lDiff;
  245. }
  246. #define EVAL_MOTION(MOTION) \
  247. do { \
  248. diff = eval_motion_dist(enc, j, i, MOTION, blocksize); \
  249. \
  250. if (diff < lowestdiff) { \
  251. lowestdiff = diff; \
  252. bestpick = MOTION; \
  253. } \
  254. } while(0)
  255. static void motion_search(RoqContext *enc, int blocksize)
  256. {
  257. static const motion_vect offsets[8] = {
  258. {{ 0,-1}},
  259. {{ 0, 1}},
  260. {{-1, 0}},
  261. {{ 1, 0}},
  262. {{-1, 1}},
  263. {{ 1,-1}},
  264. {{-1,-1}},
  265. {{ 1, 1}},
  266. };
  267. int diff, lowestdiff, oldbest;
  268. int off[3];
  269. motion_vect bestpick = {{0,0}};
  270. int i, j, k, offset;
  271. motion_vect *last_motion;
  272. motion_vect *this_motion;
  273. motion_vect vect, vect2;
  274. int max=(enc->width/blocksize)*enc->height/blocksize;
  275. if (blocksize == 4) {
  276. last_motion = enc->last_motion4;
  277. this_motion = enc->this_motion4;
  278. } else {
  279. last_motion = enc->last_motion8;
  280. this_motion = enc->this_motion8;
  281. }
  282. for (i=0; i<enc->height; i+=blocksize)
  283. for (j=0; j<enc->width; j+=blocksize) {
  284. lowestdiff = eval_motion_dist(enc, j, i, (motion_vect) {{0,0}},
  285. blocksize);
  286. bestpick.d[0] = 0;
  287. bestpick.d[1] = 0;
  288. if (blocksize == 4)
  289. EVAL_MOTION(enc->this_motion8[(i/8)*(enc->width/8) + j/8]);
  290. offset = (i/blocksize)*enc->width/blocksize + j/blocksize;
  291. if (offset < max && offset >= 0)
  292. EVAL_MOTION(last_motion[offset]);
  293. offset++;
  294. if (offset < max && offset >= 0)
  295. EVAL_MOTION(last_motion[offset]);
  296. offset = (i/blocksize + 1)*enc->width/blocksize + j/blocksize;
  297. if (offset < max && offset >= 0)
  298. EVAL_MOTION(last_motion[offset]);
  299. off[0]= (i/blocksize)*enc->width/blocksize + j/blocksize - 1;
  300. off[1]= off[0] - enc->width/blocksize + 1;
  301. off[2]= off[1] + 1;
  302. if (i) {
  303. for(k=0; k<2; k++)
  304. vect.d[k]= mid_pred(this_motion[off[0]].d[k],
  305. this_motion[off[1]].d[k],
  306. this_motion[off[2]].d[k]);
  307. EVAL_MOTION(vect);
  308. for(k=0; k<3; k++)
  309. EVAL_MOTION(this_motion[off[k]]);
  310. } else if(j)
  311. EVAL_MOTION(this_motion[off[0]]);
  312. vect = bestpick;
  313. oldbest = -1;
  314. while (oldbest != lowestdiff) {
  315. oldbest = lowestdiff;
  316. for (k=0; k<8; k++) {
  317. vect2 = vect;
  318. vect2.d[0] += offsets[k].d[0];
  319. vect2.d[1] += offsets[k].d[1];
  320. EVAL_MOTION(vect2);
  321. }
  322. vect = bestpick;
  323. }
  324. offset = (i/blocksize)*enc->width/blocksize + j/blocksize;
  325. this_motion[offset] = bestpick;
  326. }
  327. }
  328. /**
  329. * Get distortion for all options available to a subcel
  330. */
  331. static void gather_data_for_subcel(SubcelEvaluation *subcel, int x,
  332. int y, RoqContext *enc, RoqTempdata *tempData)
  333. {
  334. uint8_t mb4[4*4*3];
  335. uint8_t mb2[2*2*3];
  336. int cluster_index;
  337. int i, best_dist;
  338. static const int bitsUsed[4] = {2, 10, 10, 34};
  339. if (enc->framesSinceKeyframe >= 1) {
  340. subcel->motion = enc->this_motion4[y*enc->width/16 + x/4];
  341. subcel->eval_dist[RoQ_ID_FCC] =
  342. eval_motion_dist(enc, x, y,
  343. enc->this_motion4[y*enc->width/16 + x/4], 4);
  344. } else
  345. subcel->eval_dist[RoQ_ID_FCC] = INT_MAX;
  346. if (enc->framesSinceKeyframe >= 2)
  347. subcel->eval_dist[RoQ_ID_MOT] = block_sse(enc->frame_to_enc->data,
  348. enc->current_frame->data, x,
  349. y, x, y,
  350. enc->frame_to_enc->linesize,
  351. enc->current_frame->linesize,
  352. 4);
  353. else
  354. subcel->eval_dist[RoQ_ID_MOT] = INT_MAX;
  355. cluster_index = y*enc->width/16 + x/4;
  356. get_frame_mb(enc->frame_to_enc, x, y, mb4, 4);
  357. subcel->eval_dist[RoQ_ID_SLD] = index_mb(mb4,
  358. tempData->codebooks.unpacked_cb4,
  359. tempData->codebooks.numCB4,
  360. &subcel->cbEntry, 4);
  361. subcel->eval_dist[RoQ_ID_CCC] = 0;
  362. for(i=0;i<4;i++) {
  363. subcel->subCels[i] = tempData->closest_cb2[cluster_index*4+i];
  364. get_frame_mb(enc->frame_to_enc, x+2*(i&1),
  365. y+(i&2), mb2, 2);
  366. subcel->eval_dist[RoQ_ID_CCC] +=
  367. squared_diff_macroblock(tempData->codebooks.unpacked_cb2 + subcel->subCels[i]*2*2*3, mb2, 2);
  368. }
  369. best_dist = INT_MAX;
  370. for (i=0; i<4; i++)
  371. if (ROQ_LAMBDA_SCALE*subcel->eval_dist[i] + enc->lambda*bitsUsed[i] <
  372. best_dist) {
  373. subcel->best_coding = i;
  374. subcel->best_bit_use = bitsUsed[i];
  375. best_dist = ROQ_LAMBDA_SCALE*subcel->eval_dist[i] +
  376. enc->lambda*bitsUsed[i];
  377. }
  378. }
  379. /**
  380. * Get distortion for all options available to a cel
  381. */
  382. static void gather_data_for_cel(CelEvaluation *cel, RoqContext *enc,
  383. RoqTempdata *tempData)
  384. {
  385. uint8_t mb8[8*8*3];
  386. int index = cel->sourceY*enc->width/64 + cel->sourceX/8;
  387. int i, j, best_dist, divide_bit_use;
  388. int bitsUsed[4] = {2, 10, 10, 0};
  389. if (enc->framesSinceKeyframe >= 1) {
  390. cel->motion = enc->this_motion8[index];
  391. cel->eval_dist[RoQ_ID_FCC] =
  392. eval_motion_dist(enc, cel->sourceX, cel->sourceY,
  393. enc->this_motion8[index], 8);
  394. } else
  395. cel->eval_dist[RoQ_ID_FCC] = INT_MAX;
  396. if (enc->framesSinceKeyframe >= 2)
  397. cel->eval_dist[RoQ_ID_MOT] = block_sse(enc->frame_to_enc->data,
  398. enc->current_frame->data,
  399. cel->sourceX, cel->sourceY,
  400. cel->sourceX, cel->sourceY,
  401. enc->frame_to_enc->linesize,
  402. enc->current_frame->linesize,8);
  403. else
  404. cel->eval_dist[RoQ_ID_MOT] = INT_MAX;
  405. get_frame_mb(enc->frame_to_enc, cel->sourceX, cel->sourceY, mb8, 8);
  406. cel->eval_dist[RoQ_ID_SLD] =
  407. index_mb(mb8, tempData->codebooks.unpacked_cb4_enlarged,
  408. tempData->codebooks.numCB4, &cel->cbEntry, 8);
  409. gather_data_for_subcel(cel->subCels + 0, cel->sourceX+0, cel->sourceY+0, enc, tempData);
  410. gather_data_for_subcel(cel->subCels + 1, cel->sourceX+4, cel->sourceY+0, enc, tempData);
  411. gather_data_for_subcel(cel->subCels + 2, cel->sourceX+0, cel->sourceY+4, enc, tempData);
  412. gather_data_for_subcel(cel->subCels + 3, cel->sourceX+4, cel->sourceY+4, enc, tempData);
  413. cel->eval_dist[RoQ_ID_CCC] = 0;
  414. divide_bit_use = 0;
  415. for (i=0; i<4; i++) {
  416. cel->eval_dist[RoQ_ID_CCC] +=
  417. cel->subCels[i].eval_dist[cel->subCels[i].best_coding];
  418. divide_bit_use += cel->subCels[i].best_bit_use;
  419. }
  420. best_dist = INT_MAX;
  421. bitsUsed[3] = 2 + divide_bit_use;
  422. for (i=0; i<4; i++)
  423. if (ROQ_LAMBDA_SCALE*cel->eval_dist[i] + enc->lambda*bitsUsed[i] <
  424. best_dist) {
  425. cel->best_coding = i;
  426. best_dist = ROQ_LAMBDA_SCALE*cel->eval_dist[i] +
  427. enc->lambda*bitsUsed[i];
  428. }
  429. tempData->used_option[cel->best_coding]++;
  430. tempData->mainChunkSize += bitsUsed[cel->best_coding];
  431. if (cel->best_coding == RoQ_ID_SLD)
  432. tempData->codebooks.usedCB4[cel->cbEntry]++;
  433. if (cel->best_coding == RoQ_ID_CCC)
  434. for (i=0; i<4; i++) {
  435. if (cel->subCels[i].best_coding == RoQ_ID_SLD)
  436. tempData->codebooks.usedCB4[cel->subCels[i].cbEntry]++;
  437. else if (cel->subCels[i].best_coding == RoQ_ID_CCC)
  438. for (j=0; j<4; j++)
  439. tempData->codebooks.usedCB2[cel->subCels[i].subCels[j]]++;
  440. }
  441. }
  442. static void remap_codebooks(RoqContext *enc, RoqTempdata *tempData)
  443. {
  444. int i, j, idx=0;
  445. /* Make remaps for the final codebook usage */
  446. for (i=0; i<MAX_CBS_4x4; i++) {
  447. if (tempData->codebooks.usedCB4[i]) {
  448. tempData->i2f4[i] = idx;
  449. tempData->f2i4[idx] = i;
  450. for (j=0; j<4; j++)
  451. tempData->codebooks.usedCB2[enc->cb4x4[i].idx[j]]++;
  452. idx++;
  453. }
  454. }
  455. tempData->numCB4 = idx;
  456. idx = 0;
  457. for (i=0; i<MAX_CBS_2x2; i++) {
  458. if (tempData->codebooks.usedCB2[i]) {
  459. tempData->i2f2[i] = idx;
  460. tempData->f2i2[idx] = i;
  461. idx++;
  462. }
  463. }
  464. tempData->numCB2 = idx;
  465. }
  466. /**
  467. * Write codebook chunk
  468. */
  469. static void write_codebooks(RoqContext *enc, RoqTempdata *tempData)
  470. {
  471. int i, j;
  472. uint8_t **outp= &enc->out_buf;
  473. if (tempData->numCB2) {
  474. bytestream_put_le16(outp, RoQ_QUAD_CODEBOOK);
  475. bytestream_put_le32(outp, tempData->numCB2*6 + tempData->numCB4*4);
  476. bytestream_put_byte(outp, tempData->numCB4);
  477. bytestream_put_byte(outp, tempData->numCB2);
  478. for (i=0; i<tempData->numCB2; i++) {
  479. bytestream_put_buffer(outp, enc->cb2x2[tempData->f2i2[i]].y, 4);
  480. bytestream_put_byte(outp, enc->cb2x2[tempData->f2i2[i]].u);
  481. bytestream_put_byte(outp, enc->cb2x2[tempData->f2i2[i]].v);
  482. }
  483. for (i=0; i<tempData->numCB4; i++)
  484. for (j=0; j<4; j++)
  485. bytestream_put_byte(outp, tempData->i2f2[enc->cb4x4[tempData->f2i4[i]].idx[j]]);
  486. }
  487. }
  488. static inline uint8_t motion_arg(motion_vect mot)
  489. {
  490. uint8_t ax = 8 - ((uint8_t) mot.d[0]);
  491. uint8_t ay = 8 - ((uint8_t) mot.d[1]);
  492. return ((ax&15)<<4) | (ay&15);
  493. }
  494. typedef struct CodingSpool {
  495. int typeSpool;
  496. int typeSpoolLength;
  497. uint8_t argumentSpool[64];
  498. uint8_t *args;
  499. uint8_t **pout;
  500. } CodingSpool;
  501. /* NOTE: Typecodes must be spooled AFTER arguments!! */
  502. static void write_typecode(CodingSpool *s, uint8_t type)
  503. {
  504. s->typeSpool |= (type & 3) << (14 - s->typeSpoolLength);
  505. s->typeSpoolLength += 2;
  506. if (s->typeSpoolLength == 16) {
  507. bytestream_put_le16(s->pout, s->typeSpool);
  508. bytestream_put_buffer(s->pout, s->argumentSpool,
  509. s->args - s->argumentSpool);
  510. s->typeSpoolLength = 0;
  511. s->typeSpool = 0;
  512. s->args = s->argumentSpool;
  513. }
  514. }
  515. static void reconstruct_and_encode_image(RoqContext *enc, RoqTempdata *tempData, int w, int h, int numBlocks)
  516. {
  517. int i, j, k;
  518. int x, y;
  519. int subX, subY;
  520. int dist=0;
  521. roq_qcell *qcell;
  522. CelEvaluation *eval;
  523. CodingSpool spool;
  524. spool.typeSpool=0;
  525. spool.typeSpoolLength=0;
  526. spool.args = spool.argumentSpool;
  527. spool.pout = &enc->out_buf;
  528. if (tempData->used_option[RoQ_ID_CCC]%2)
  529. tempData->mainChunkSize+=8; //FIXME
  530. /* Write the video chunk header */
  531. bytestream_put_le16(&enc->out_buf, RoQ_QUAD_VQ);
  532. bytestream_put_le32(&enc->out_buf, tempData->mainChunkSize/8);
  533. bytestream_put_byte(&enc->out_buf, 0x0);
  534. bytestream_put_byte(&enc->out_buf, 0x0);
  535. for (i=0; i<numBlocks; i++) {
  536. eval = tempData->cel_evals + i;
  537. x = eval->sourceX;
  538. y = eval->sourceY;
  539. dist += eval->eval_dist[eval->best_coding];
  540. switch (eval->best_coding) {
  541. case RoQ_ID_MOT:
  542. write_typecode(&spool, RoQ_ID_MOT);
  543. break;
  544. case RoQ_ID_FCC:
  545. bytestream_put_byte(&spool.args, motion_arg(eval->motion));
  546. write_typecode(&spool, RoQ_ID_FCC);
  547. ff_apply_motion_8x8(enc, x, y,
  548. eval->motion.d[0], eval->motion.d[1]);
  549. break;
  550. case RoQ_ID_SLD:
  551. bytestream_put_byte(&spool.args, tempData->i2f4[eval->cbEntry]);
  552. write_typecode(&spool, RoQ_ID_SLD);
  553. qcell = enc->cb4x4 + eval->cbEntry;
  554. ff_apply_vector_4x4(enc, x , y , enc->cb2x2 + qcell->idx[0]);
  555. ff_apply_vector_4x4(enc, x+4, y , enc->cb2x2 + qcell->idx[1]);
  556. ff_apply_vector_4x4(enc, x , y+4, enc->cb2x2 + qcell->idx[2]);
  557. ff_apply_vector_4x4(enc, x+4, y+4, enc->cb2x2 + qcell->idx[3]);
  558. break;
  559. case RoQ_ID_CCC:
  560. write_typecode(&spool, RoQ_ID_CCC);
  561. for (j=0; j<4; j++) {
  562. subX = x + 4*(j&1);
  563. subY = y + 2*(j&2);
  564. switch(eval->subCels[j].best_coding) {
  565. case RoQ_ID_MOT:
  566. break;
  567. case RoQ_ID_FCC:
  568. bytestream_put_byte(&spool.args,
  569. motion_arg(eval->subCels[j].motion));
  570. ff_apply_motion_4x4(enc, subX, subY,
  571. eval->subCels[j].motion.d[0],
  572. eval->subCels[j].motion.d[1]);
  573. break;
  574. case RoQ_ID_SLD:
  575. bytestream_put_byte(&spool.args,
  576. tempData->i2f4[eval->subCels[j].cbEntry]);
  577. qcell = enc->cb4x4 + eval->subCels[j].cbEntry;
  578. ff_apply_vector_2x2(enc, subX , subY ,
  579. enc->cb2x2 + qcell->idx[0]);
  580. ff_apply_vector_2x2(enc, subX+2, subY ,
  581. enc->cb2x2 + qcell->idx[1]);
  582. ff_apply_vector_2x2(enc, subX , subY+2,
  583. enc->cb2x2 + qcell->idx[2]);
  584. ff_apply_vector_2x2(enc, subX+2, subY+2,
  585. enc->cb2x2 + qcell->idx[3]);
  586. break;
  587. case RoQ_ID_CCC:
  588. for (k=0; k<4; k++) {
  589. int cb_idx = eval->subCels[j].subCels[k];
  590. bytestream_put_byte(&spool.args,
  591. tempData->i2f2[cb_idx]);
  592. ff_apply_vector_2x2(enc, subX + 2*(k&1), subY + (k&2),
  593. enc->cb2x2 + cb_idx);
  594. }
  595. break;
  596. }
  597. write_typecode(&spool, eval->subCels[j].best_coding);
  598. }
  599. break;
  600. }
  601. }
  602. /* Flush the remainder of the argument/type spool */
  603. while (spool.typeSpoolLength)
  604. write_typecode(&spool, 0x0);
  605. #if 0
  606. uint8_t *fdata[3] = {enc->frame_to_enc->data[0],
  607. enc->frame_to_enc->data[1],
  608. enc->frame_to_enc->data[2]};
  609. uint8_t *cdata[3] = {enc->current_frame->data[0],
  610. enc->current_frame->data[1],
  611. enc->current_frame->data[2]};
  612. av_log(enc->avctx, AV_LOG_ERROR, "Expected distortion: %i Actual: %i\n",
  613. dist,
  614. block_sse(fdata, cdata, 0, 0, 0, 0,
  615. enc->frame_to_enc->linesize,
  616. enc->current_frame->linesize,
  617. enc->width)); //WARNING: Square dimensions implied...
  618. #endif
  619. }
  620. /**
  621. * Create a single YUV cell from a 2x2 section of the image
  622. */
  623. static inline void frame_block_to_cell(uint8_t *block, uint8_t * const *data,
  624. int top, int left, const int *stride)
  625. {
  626. int i, j, u=0, v=0;
  627. for (i=0; i<2; i++)
  628. for (j=0; j<2; j++) {
  629. int x = (top+i)*stride[0] + left + j;
  630. *block++ = data[0][x];
  631. x = (top+i)*stride[1] + left + j;
  632. u += data[1][x];
  633. v += data[2][x];
  634. }
  635. *block++ = (u+2)/4;
  636. *block++ = (v+2)/4;
  637. }
  638. /**
  639. * Create YUV clusters for the entire image
  640. */
  641. static void create_clusters(const AVFrame *frame, int w, int h, uint8_t *yuvClusters)
  642. {
  643. int i, j, k, l;
  644. for (i=0; i<h; i+=4)
  645. for (j=0; j<w; j+=4) {
  646. for (k=0; k < 2; k++)
  647. for (l=0; l < 2; l++)
  648. frame_block_to_cell(yuvClusters + (l + 2*k)*6, frame->data,
  649. i+2*k, j+2*l, frame->linesize);
  650. yuvClusters += 24;
  651. }
  652. }
  653. static int generate_codebook(RoqContext *enc, RoqTempdata *tempdata,
  654. int *points, int inputCount, roq_cell *results,
  655. int size, int cbsize)
  656. {
  657. int i, j, k, ret = 0;
  658. int c_size = size*size/4;
  659. int *buf;
  660. int *codebook = av_malloc(6*c_size*cbsize*sizeof(int));
  661. int *closest_cb;
  662. if (!codebook)
  663. return AVERROR(ENOMEM);
  664. if (size == 4) {
  665. closest_cb = av_malloc(6*c_size*inputCount*sizeof(int));
  666. if (!closest_cb) {
  667. ret = AVERROR(ENOMEM);
  668. goto out;
  669. }
  670. } else
  671. closest_cb = tempdata->closest_cb2;
  672. ret = ff_init_elbg(points, 6 * c_size, inputCount, codebook,
  673. cbsize, 1, closest_cb, &enc->randctx);
  674. if (ret < 0)
  675. goto out;
  676. ret = ff_do_elbg(points, 6 * c_size, inputCount, codebook,
  677. cbsize, 1, closest_cb, &enc->randctx);
  678. if (ret < 0)
  679. goto out;
  680. buf = codebook;
  681. for (i=0; i<cbsize; i++)
  682. for (k=0; k<c_size; k++) {
  683. for(j=0; j<4; j++)
  684. results->y[j] = *buf++;
  685. results->u = (*buf++ + CHROMA_BIAS/2)/CHROMA_BIAS;
  686. results->v = (*buf++ + CHROMA_BIAS/2)/CHROMA_BIAS;
  687. results++;
  688. }
  689. out:
  690. if (size == 4)
  691. av_free(closest_cb);
  692. av_free(codebook);
  693. return ret;
  694. }
  695. static int generate_new_codebooks(RoqContext *enc, RoqTempdata *tempData)
  696. {
  697. int i, j, ret = 0;
  698. RoqCodebooks *codebooks = &tempData->codebooks;
  699. int max = enc->width*enc->height/16;
  700. uint8_t mb2[3*4];
  701. roq_cell *results4 = av_malloc(sizeof(roq_cell)*MAX_CBS_4x4*4);
  702. uint8_t *yuvClusters=av_malloc(sizeof(int)*max*6*4);
  703. int *points = av_malloc(max*6*4*sizeof(int));
  704. int bias;
  705. if (!results4 || !yuvClusters || !points) {
  706. ret = AVERROR(ENOMEM);
  707. goto out;
  708. }
  709. /* Subsample YUV data */
  710. create_clusters(enc->frame_to_enc, enc->width, enc->height, yuvClusters);
  711. /* Cast to integer and apply chroma bias */
  712. for (i=0; i<max*24; i++) {
  713. bias = ((i%6)<4) ? 1 : CHROMA_BIAS;
  714. points[i] = bias*yuvClusters[i];
  715. }
  716. /* Create 4x4 codebooks */
  717. if ((ret = generate_codebook(enc, tempData, points, max,
  718. results4, 4, MAX_CBS_4x4)) < 0)
  719. goto out;
  720. codebooks->numCB4 = MAX_CBS_4x4;
  721. tempData->closest_cb2 = av_malloc(max*4*sizeof(int));
  722. if (!tempData->closest_cb2) {
  723. ret = AVERROR(ENOMEM);
  724. goto out;
  725. }
  726. /* Create 2x2 codebooks */
  727. if ((ret = generate_codebook(enc, tempData, points, max * 4,
  728. enc->cb2x2, 2, MAX_CBS_2x2)) < 0)
  729. goto out;
  730. codebooks->numCB2 = MAX_CBS_2x2;
  731. /* Unpack 2x2 codebook clusters */
  732. for (i=0; i<codebooks->numCB2; i++)
  733. unpack_roq_cell(enc->cb2x2 + i, codebooks->unpacked_cb2 + i*2*2*3);
  734. /* Index all 4x4 entries to the 2x2 entries, unpack, and enlarge */
  735. for (i=0; i<codebooks->numCB4; i++) {
  736. for (j=0; j<4; j++) {
  737. unpack_roq_cell(&results4[4*i + j], mb2);
  738. index_mb(mb2, codebooks->unpacked_cb2, codebooks->numCB2,
  739. &enc->cb4x4[i].idx[j], 2);
  740. }
  741. unpack_roq_qcell(codebooks->unpacked_cb2, enc->cb4x4 + i,
  742. codebooks->unpacked_cb4 + i*4*4*3);
  743. enlarge_roq_mb4(codebooks->unpacked_cb4 + i*4*4*3,
  744. codebooks->unpacked_cb4_enlarged + i*8*8*3);
  745. }
  746. out:
  747. av_free(yuvClusters);
  748. av_free(points);
  749. av_free(results4);
  750. return ret;
  751. }
  752. static int roq_encode_video(RoqContext *enc)
  753. {
  754. RoqTempdata *tempData = enc->tmpData;
  755. int i, ret;
  756. memset(tempData, 0, sizeof(*tempData));
  757. ret = create_cel_evals(enc, tempData);
  758. if (ret < 0)
  759. return ret;
  760. ret = generate_new_codebooks(enc, tempData);
  761. if (ret < 0)
  762. return ret;
  763. if (enc->framesSinceKeyframe >= 1) {
  764. motion_search(enc, 8);
  765. motion_search(enc, 4);
  766. }
  767. retry_encode:
  768. for (i=0; i<enc->width*enc->height/64; i++)
  769. gather_data_for_cel(tempData->cel_evals + i, enc, tempData);
  770. /* Quake 3 can't handle chunks bigger than 65535 bytes */
  771. if (tempData->mainChunkSize/8 > 65535) {
  772. av_log(enc->avctx, AV_LOG_ERROR,
  773. "Warning, generated a frame too big (%d > 65535), "
  774. "try using a smaller qscale value.\n",
  775. tempData->mainChunkSize/8);
  776. enc->lambda *= 1.5;
  777. tempData->mainChunkSize = 0;
  778. memset(tempData->used_option, 0, sizeof(tempData->used_option));
  779. memset(tempData->codebooks.usedCB4, 0,
  780. sizeof(tempData->codebooks.usedCB4));
  781. memset(tempData->codebooks.usedCB2, 0,
  782. sizeof(tempData->codebooks.usedCB2));
  783. goto retry_encode;
  784. }
  785. remap_codebooks(enc, tempData);
  786. write_codebooks(enc, tempData);
  787. reconstruct_and_encode_image(enc, tempData, enc->width, enc->height,
  788. enc->width*enc->height/64);
  789. /* Rotate frame history */
  790. FFSWAP(AVFrame *, enc->current_frame, enc->last_frame);
  791. FFSWAP(motion_vect *, enc->last_motion4, enc->this_motion4);
  792. FFSWAP(motion_vect *, enc->last_motion8, enc->this_motion8);
  793. av_free(tempData->cel_evals);
  794. av_free(tempData->closest_cb2);
  795. enc->framesSinceKeyframe++;
  796. return 0;
  797. }
  798. static av_cold int roq_encode_end(AVCodecContext *avctx)
  799. {
  800. RoqContext *enc = avctx->priv_data;
  801. av_frame_free(&enc->current_frame);
  802. av_frame_free(&enc->last_frame);
  803. av_free(enc->tmpData);
  804. av_free(enc->this_motion4);
  805. av_free(enc->last_motion4);
  806. av_free(enc->this_motion8);
  807. av_free(enc->last_motion8);
  808. return 0;
  809. }
  810. static av_cold int roq_encode_init(AVCodecContext *avctx)
  811. {
  812. RoqContext *enc = avctx->priv_data;
  813. av_lfg_init(&enc->randctx, 1);
  814. enc->avctx = avctx;
  815. enc->framesSinceKeyframe = 0;
  816. if ((avctx->width & 0xf) || (avctx->height & 0xf)) {
  817. av_log(avctx, AV_LOG_ERROR, "Dimensions must be divisible by 16\n");
  818. return -1;
  819. }
  820. if (((avctx->width)&(avctx->width-1))||((avctx->height)&(avctx->height-1)))
  821. av_log(avctx, AV_LOG_ERROR, "Warning: dimensions not power of two\n");
  822. enc->width = avctx->width;
  823. enc->height = avctx->height;
  824. enc->framesSinceKeyframe = 0;
  825. enc->first_frame = 1;
  826. enc->last_frame = av_frame_alloc();
  827. enc->current_frame = av_frame_alloc();
  828. if (!enc->last_frame || !enc->current_frame) {
  829. roq_encode_end(avctx);
  830. return AVERROR(ENOMEM);
  831. }
  832. enc->tmpData = av_malloc(sizeof(RoqTempdata));
  833. enc->this_motion4 =
  834. av_mallocz((enc->width*enc->height/16)*sizeof(motion_vect));
  835. enc->last_motion4 =
  836. av_malloc ((enc->width*enc->height/16)*sizeof(motion_vect));
  837. enc->this_motion8 =
  838. av_mallocz((enc->width*enc->height/64)*sizeof(motion_vect));
  839. enc->last_motion8 =
  840. av_malloc ((enc->width*enc->height/64)*sizeof(motion_vect));
  841. return 0;
  842. }
  843. static void roq_write_video_info_chunk(RoqContext *enc)
  844. {
  845. /* ROQ info chunk */
  846. bytestream_put_le16(&enc->out_buf, RoQ_INFO);
  847. /* Size: 8 bytes */
  848. bytestream_put_le32(&enc->out_buf, 8);
  849. /* Unused argument */
  850. bytestream_put_byte(&enc->out_buf, 0x00);
  851. bytestream_put_byte(&enc->out_buf, 0x00);
  852. /* Width */
  853. bytestream_put_le16(&enc->out_buf, enc->width);
  854. /* Height */
  855. bytestream_put_le16(&enc->out_buf, enc->height);
  856. /* Unused in Quake 3, mimics the output of the real encoder */
  857. bytestream_put_byte(&enc->out_buf, 0x08);
  858. bytestream_put_byte(&enc->out_buf, 0x00);
  859. bytestream_put_byte(&enc->out_buf, 0x04);
  860. bytestream_put_byte(&enc->out_buf, 0x00);
  861. }
  862. static int roq_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
  863. const AVFrame *frame, int *got_packet)
  864. {
  865. RoqContext *enc = avctx->priv_data;
  866. int size, ret;
  867. enc->avctx = avctx;
  868. enc->frame_to_enc = frame;
  869. if (frame->quality)
  870. enc->lambda = frame->quality - 1;
  871. else
  872. enc->lambda = 2*ROQ_LAMBDA_SCALE;
  873. /* 138 bits max per 8x8 block +
  874. * 256 codebooks*(6 bytes 2x2 + 4 bytes 4x4) + 8 bytes frame header */
  875. size = ((enc->width * enc->height / 64) * 138 + 7) / 8 + 256 * (6 + 4) + 8;
  876. if ((ret = ff_alloc_packet(pkt, size)) < 0) {
  877. av_log(avctx, AV_LOG_ERROR, "Error getting output packet with size %d.\n", size);
  878. return ret;
  879. }
  880. enc->out_buf = pkt->data;
  881. /* Check for I-frame */
  882. if (enc->framesSinceKeyframe == avctx->gop_size)
  883. enc->framesSinceKeyframe = 0;
  884. if (enc->first_frame) {
  885. /* Alloc memory for the reconstruction data (we must know the stride
  886. for that) */
  887. if (ff_get_buffer(avctx, enc->current_frame, 0) ||
  888. ff_get_buffer(avctx, enc->last_frame, 0)) {
  889. av_log(avctx, AV_LOG_ERROR, " RoQ: get_buffer() failed\n");
  890. return -1;
  891. }
  892. /* Before the first video frame, write a "video info" chunk */
  893. roq_write_video_info_chunk(enc);
  894. enc->first_frame = 0;
  895. }
  896. /* Encode the actual frame */
  897. ret = roq_encode_video(enc);
  898. if (ret < 0)
  899. return ret;
  900. pkt->size = enc->out_buf - pkt->data;
  901. if (enc->framesSinceKeyframe == 1)
  902. pkt->flags |= AV_PKT_FLAG_KEY;
  903. *got_packet = 1;
  904. return 0;
  905. }
  906. AVCodec ff_roq_encoder = {
  907. .name = "roqvideo",
  908. .long_name = NULL_IF_CONFIG_SMALL("id RoQ video"),
  909. .type = AVMEDIA_TYPE_VIDEO,
  910. .id = AV_CODEC_ID_ROQ,
  911. .priv_data_size = sizeof(RoqContext),
  912. .init = roq_encode_init,
  913. .encode2 = roq_encode_frame,
  914. .close = roq_encode_end,
  915. .supported_framerates = (const AVRational[]){ {30,1}, {0,0} },
  916. .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV444P,
  917. AV_PIX_FMT_NONE },
  918. };