You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1113 lines
32KB

  1. /*
  2. * RoQ Video Encoder.
  3. *
  4. * Copyright (C) 2007 Vitor Sessak <vitor1001@gmail.com>
  5. * Copyright (C) 2004-2007 Eric Lasota
  6. * Based on RoQ specs (C) 2001 Tim Ferguson
  7. *
  8. * This file is part of Libav.
  9. *
  10. * Libav is free software; you can redistribute it and/or
  11. * modify it under the terms of the GNU Lesser General Public
  12. * License as published by the Free Software Foundation; either
  13. * version 2.1 of the License, or (at your option) any later version.
  14. *
  15. * Libav is distributed in the hope that it will be useful,
  16. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  17. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  18. * Lesser General Public License for more details.
  19. *
  20. * You should have received a copy of the GNU Lesser General Public
  21. * License along with Libav; if not, write to the Free Software
  22. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  23. */
  24. /**
  25. * @file
  26. * id RoQ encoder by Vitor. Based on the Switchblade3 library and the
  27. * Switchblade3 Libav glue by Eric Lasota.
  28. */
  29. /*
  30. * COSTS:
  31. * Level 1:
  32. * SKIP - 2 bits
  33. * MOTION - 2 + 8 bits
  34. * CODEBOOK - 2 + 8 bits
  35. * SUBDIVIDE - 2 + combined subcel cost
  36. *
  37. * Level 2:
  38. * SKIP - 2 bits
  39. * MOTION - 2 + 8 bits
  40. * CODEBOOK - 2 + 8 bits
  41. * SUBDIVIDE - 2 + 4*8 bits
  42. *
  43. * Maximum cost: 138 bits per cel
  44. *
  45. * Proper evaluation requires LCD fraction comparison, which requires
  46. * Squared Error (SE) loss * savings increase
  47. *
  48. * Maximum savings increase: 136 bits
  49. * Maximum SE loss without overflow: 31580641
  50. * Components in 8x8 supercel: 192
  51. * Maximum SE precision per component: 164482
  52. * >65025, so no truncation is needed (phew)
  53. */
  54. #include <string.h>
  55. #include "libavutil/attributes.h"
  56. #include "roqvideo.h"
  57. #include "bytestream.h"
  58. #include "elbg.h"
  59. #include "internal.h"
  60. #include "mathops.h"
  61. #define CHROMA_BIAS 1
  62. /**
  63. * Maximum number of generated 4x4 codebooks. Can't be 256 to workaround a
  64. * Quake 3 bug.
  65. */
  66. #define MAX_CBS_4x4 255
  67. #define MAX_CBS_2x2 256 ///< Maximum number of 2x2 codebooks.
  68. /* The cast is useful when multiplying it by INT_MAX */
  69. #define ROQ_LAMBDA_SCALE ((uint64_t) FF_LAMBDA_SCALE)
  70. /* Macroblock support functions */
  71. static void unpack_roq_cell(roq_cell *cell, uint8_t u[4*3])
  72. {
  73. memcpy(u , cell->y, 4);
  74. memset(u+4, cell->u, 4);
  75. memset(u+8, cell->v, 4);
  76. }
  77. static void unpack_roq_qcell(uint8_t cb2[], roq_qcell *qcell, uint8_t u[4*4*3])
  78. {
  79. int i,cp;
  80. static const int offsets[4] = {0, 2, 8, 10};
  81. for (cp=0; cp<3; cp++)
  82. for (i=0; i<4; i++) {
  83. u[4*4*cp + offsets[i] ] = cb2[qcell->idx[i]*2*2*3 + 4*cp ];
  84. u[4*4*cp + offsets[i]+1] = cb2[qcell->idx[i]*2*2*3 + 4*cp+1];
  85. u[4*4*cp + offsets[i]+4] = cb2[qcell->idx[i]*2*2*3 + 4*cp+2];
  86. u[4*4*cp + offsets[i]+5] = cb2[qcell->idx[i]*2*2*3 + 4*cp+3];
  87. }
  88. }
  89. static void enlarge_roq_mb4(uint8_t base[3*16], uint8_t u[3*64])
  90. {
  91. int x,y,cp;
  92. for(cp=0; cp<3; cp++)
  93. for(y=0; y<8; y++)
  94. for(x=0; x<8; x++)
  95. *u++ = base[(y/2)*4 + (x/2) + 16*cp];
  96. }
  97. static inline int square(int x)
  98. {
  99. return x*x;
  100. }
  101. static inline int eval_sse(const uint8_t *a, const uint8_t *b, int count)
  102. {
  103. int diff=0;
  104. while(count--)
  105. diff += square(*b++ - *a++);
  106. return diff;
  107. }
  108. // FIXME Could use DSPContext.sse, but it is not so speed critical (used
  109. // just for motion estimation).
  110. static int block_sse(uint8_t * const *buf1, uint8_t * const *buf2, int x1, int y1,
  111. int x2, int y2, const int *stride1, const int *stride2, int size)
  112. {
  113. int i, k;
  114. int sse=0;
  115. for (k=0; k<3; k++) {
  116. int bias = (k ? CHROMA_BIAS : 4);
  117. for (i=0; i<size; i++)
  118. sse += bias*eval_sse(buf1[k] + (y1+i)*stride1[k] + x1,
  119. buf2[k] + (y2+i)*stride2[k] + x2, size);
  120. }
  121. return sse;
  122. }
  123. static int eval_motion_dist(RoqContext *enc, int x, int y, motion_vect vect,
  124. int size)
  125. {
  126. int mx=vect.d[0];
  127. int my=vect.d[1];
  128. if (mx < -7 || mx > 7)
  129. return INT_MAX;
  130. if (my < -7 || my > 7)
  131. return INT_MAX;
  132. mx += x;
  133. my += y;
  134. if ((unsigned) mx > enc->width-size || (unsigned) my > enc->height-size)
  135. return INT_MAX;
  136. return block_sse(enc->frame_to_enc->data, enc->last_frame->data, x, y,
  137. mx, my,
  138. enc->frame_to_enc->linesize, enc->last_frame->linesize,
  139. size);
  140. }
  141. /**
  142. * @return distortion between two macroblocks
  143. */
  144. static inline int squared_diff_macroblock(uint8_t a[], uint8_t b[], int size)
  145. {
  146. int cp, sdiff=0;
  147. for(cp=0;cp<3;cp++) {
  148. int bias = (cp ? CHROMA_BIAS : 4);
  149. sdiff += bias*eval_sse(a, b, size*size);
  150. a += size*size;
  151. b += size*size;
  152. }
  153. return sdiff;
  154. }
  155. typedef struct SubcelEvaluation {
  156. int eval_dist[4];
  157. int best_bit_use;
  158. int best_coding;
  159. int subCels[4];
  160. motion_vect motion;
  161. int cbEntry;
  162. } SubcelEvaluation;
  163. typedef struct CelEvaluation {
  164. int eval_dist[4];
  165. int best_coding;
  166. SubcelEvaluation subCels[4];
  167. motion_vect motion;
  168. int cbEntry;
  169. int sourceX, sourceY;
  170. } CelEvaluation;
  171. typedef struct RoqCodebooks {
  172. int numCB4;
  173. int numCB2;
  174. int usedCB2[MAX_CBS_2x2];
  175. int usedCB4[MAX_CBS_4x4];
  176. uint8_t unpacked_cb2[MAX_CBS_2x2*2*2*3];
  177. uint8_t unpacked_cb4[MAX_CBS_4x4*4*4*3];
  178. uint8_t unpacked_cb4_enlarged[MAX_CBS_4x4*8*8*3];
  179. } RoqCodebooks;
  180. /**
  181. * Temporary vars
  182. */
  183. typedef struct RoqTempData
  184. {
  185. CelEvaluation *cel_evals;
  186. int f2i4[MAX_CBS_4x4];
  187. int i2f4[MAX_CBS_4x4];
  188. int f2i2[MAX_CBS_2x2];
  189. int i2f2[MAX_CBS_2x2];
  190. int mainChunkSize;
  191. int numCB4;
  192. int numCB2;
  193. RoqCodebooks codebooks;
  194. int *closest_cb2;
  195. int used_option[4];
  196. } RoqTempdata;
  197. /**
  198. * Initialize cel evaluators and set their source coordinates
  199. */
  200. static int create_cel_evals(RoqContext *enc, RoqTempdata *tempData)
  201. {
  202. int n=0, x, y, i;
  203. tempData->cel_evals = av_malloc(enc->width*enc->height/64 * sizeof(CelEvaluation));
  204. if (!tempData->cel_evals)
  205. return AVERROR(ENOMEM);
  206. /* Map to the ROQ quadtree order */
  207. for (y=0; y<enc->height; y+=16)
  208. for (x=0; x<enc->width; x+=16)
  209. for(i=0; i<4; i++) {
  210. tempData->cel_evals[n ].sourceX = x + (i&1)*8;
  211. tempData->cel_evals[n++].sourceY = y + (i&2)*4;
  212. }
  213. return 0;
  214. }
  215. /**
  216. * Get macroblocks from parts of the image
  217. */
  218. static void get_frame_mb(const AVFrame *frame, int x, int y, uint8_t mb[], int dim)
  219. {
  220. int i, j, cp;
  221. for (cp=0; cp<3; cp++) {
  222. int stride = frame->linesize[cp];
  223. for (i=0; i<dim; i++)
  224. for (j=0; j<dim; j++)
  225. *mb++ = frame->data[cp][(y+i)*stride + x + j];
  226. }
  227. }
  228. /**
  229. * Find the codebook with the lowest distortion from an image
  230. */
  231. static int index_mb(uint8_t cluster[], uint8_t cb[], int numCB,
  232. int *outIndex, int dim)
  233. {
  234. int i, lDiff = INT_MAX, pick=0;
  235. /* Diff against the others */
  236. for (i=0; i<numCB; i++) {
  237. int diff = squared_diff_macroblock(cluster, cb + i*dim*dim*3, dim);
  238. if (diff < lDiff) {
  239. lDiff = diff;
  240. pick = i;
  241. }
  242. }
  243. *outIndex = pick;
  244. return lDiff;
  245. }
  246. #define EVAL_MOTION(MOTION) \
  247. do { \
  248. diff = eval_motion_dist(enc, j, i, MOTION, blocksize); \
  249. \
  250. if (diff < lowestdiff) { \
  251. lowestdiff = diff; \
  252. bestpick = MOTION; \
  253. } \
  254. } while(0)
  255. static void motion_search(RoqContext *enc, int blocksize)
  256. {
  257. static const motion_vect offsets[8] = {
  258. {{ 0,-1}},
  259. {{ 0, 1}},
  260. {{-1, 0}},
  261. {{ 1, 0}},
  262. {{-1, 1}},
  263. {{ 1,-1}},
  264. {{-1,-1}},
  265. {{ 1, 1}},
  266. };
  267. int diff, lowestdiff, oldbest;
  268. int off[3];
  269. motion_vect bestpick = {{0,0}};
  270. int i, j, k, offset;
  271. motion_vect *last_motion;
  272. motion_vect *this_motion;
  273. motion_vect vect, vect2;
  274. int max=(enc->width/blocksize)*enc->height/blocksize;
  275. if (blocksize == 4) {
  276. last_motion = enc->last_motion4;
  277. this_motion = enc->this_motion4;
  278. } else {
  279. last_motion = enc->last_motion8;
  280. this_motion = enc->this_motion8;
  281. }
  282. for (i=0; i<enc->height; i+=blocksize)
  283. for (j=0; j<enc->width; j+=blocksize) {
  284. lowestdiff = eval_motion_dist(enc, j, i, (motion_vect) {{0,0}},
  285. blocksize);
  286. bestpick.d[0] = 0;
  287. bestpick.d[1] = 0;
  288. if (blocksize == 4)
  289. EVAL_MOTION(enc->this_motion8[(i/8)*(enc->width/8) + j/8]);
  290. offset = (i/blocksize)*enc->width/blocksize + j/blocksize;
  291. if (offset < max && offset >= 0)
  292. EVAL_MOTION(last_motion[offset]);
  293. offset++;
  294. if (offset < max && offset >= 0)
  295. EVAL_MOTION(last_motion[offset]);
  296. offset = (i/blocksize + 1)*enc->width/blocksize + j/blocksize;
  297. if (offset < max && offset >= 0)
  298. EVAL_MOTION(last_motion[offset]);
  299. off[0]= (i/blocksize)*enc->width/blocksize + j/blocksize - 1;
  300. off[1]= off[0] - enc->width/blocksize + 1;
  301. off[2]= off[1] + 1;
  302. if (i) {
  303. for(k=0; k<2; k++)
  304. vect.d[k]= mid_pred(this_motion[off[0]].d[k],
  305. this_motion[off[1]].d[k],
  306. this_motion[off[2]].d[k]);
  307. EVAL_MOTION(vect);
  308. for(k=0; k<3; k++)
  309. EVAL_MOTION(this_motion[off[k]]);
  310. } else if(j)
  311. EVAL_MOTION(this_motion[off[0]]);
  312. vect = bestpick;
  313. oldbest = -1;
  314. while (oldbest != lowestdiff) {
  315. oldbest = lowestdiff;
  316. for (k=0; k<8; k++) {
  317. vect2 = vect;
  318. vect2.d[0] += offsets[k].d[0];
  319. vect2.d[1] += offsets[k].d[1];
  320. EVAL_MOTION(vect2);
  321. }
  322. vect = bestpick;
  323. }
  324. offset = (i/blocksize)*enc->width/blocksize + j/blocksize;
  325. this_motion[offset] = bestpick;
  326. }
  327. }
  328. /**
  329. * Get distortion for all options available to a subcel
  330. */
  331. static void gather_data_for_subcel(SubcelEvaluation *subcel, int x,
  332. int y, RoqContext *enc, RoqTempdata *tempData)
  333. {
  334. uint8_t mb4[4*4*3];
  335. uint8_t mb2[2*2*3];
  336. int cluster_index;
  337. int i, best_dist;
  338. static const int bitsUsed[4] = {2, 10, 10, 34};
  339. if (enc->framesSinceKeyframe >= 1) {
  340. subcel->motion = enc->this_motion4[y*enc->width/16 + x/4];
  341. subcel->eval_dist[RoQ_ID_FCC] =
  342. eval_motion_dist(enc, x, y,
  343. enc->this_motion4[y*enc->width/16 + x/4], 4);
  344. } else
  345. subcel->eval_dist[RoQ_ID_FCC] = INT_MAX;
  346. if (enc->framesSinceKeyframe >= 2)
  347. subcel->eval_dist[RoQ_ID_MOT] = block_sse(enc->frame_to_enc->data,
  348. enc->current_frame->data, x,
  349. y, x, y,
  350. enc->frame_to_enc->linesize,
  351. enc->current_frame->linesize,
  352. 4);
  353. else
  354. subcel->eval_dist[RoQ_ID_MOT] = INT_MAX;
  355. cluster_index = y*enc->width/16 + x/4;
  356. get_frame_mb(enc->frame_to_enc, x, y, mb4, 4);
  357. subcel->eval_dist[RoQ_ID_SLD] = index_mb(mb4,
  358. tempData->codebooks.unpacked_cb4,
  359. tempData->codebooks.numCB4,
  360. &subcel->cbEntry, 4);
  361. subcel->eval_dist[RoQ_ID_CCC] = 0;
  362. for(i=0;i<4;i++) {
  363. subcel->subCels[i] = tempData->closest_cb2[cluster_index*4+i];
  364. get_frame_mb(enc->frame_to_enc, x+2*(i&1),
  365. y+(i&2), mb2, 2);
  366. subcel->eval_dist[RoQ_ID_CCC] +=
  367. squared_diff_macroblock(tempData->codebooks.unpacked_cb2 + subcel->subCels[i]*2*2*3, mb2, 2);
  368. }
  369. best_dist = INT_MAX;
  370. for (i=0; i<4; i++)
  371. if (ROQ_LAMBDA_SCALE*subcel->eval_dist[i] + enc->lambda*bitsUsed[i] <
  372. best_dist) {
  373. subcel->best_coding = i;
  374. subcel->best_bit_use = bitsUsed[i];
  375. best_dist = ROQ_LAMBDA_SCALE*subcel->eval_dist[i] +
  376. enc->lambda*bitsUsed[i];
  377. }
  378. }
  379. /**
  380. * Get distortion for all options available to a cel
  381. */
  382. static void gather_data_for_cel(CelEvaluation *cel, RoqContext *enc,
  383. RoqTempdata *tempData)
  384. {
  385. uint8_t mb8[8*8*3];
  386. int index = cel->sourceY*enc->width/64 + cel->sourceX/8;
  387. int i, j, best_dist, divide_bit_use;
  388. int bitsUsed[4] = {2, 10, 10, 0};
  389. if (enc->framesSinceKeyframe >= 1) {
  390. cel->motion = enc->this_motion8[index];
  391. cel->eval_dist[RoQ_ID_FCC] =
  392. eval_motion_dist(enc, cel->sourceX, cel->sourceY,
  393. enc->this_motion8[index], 8);
  394. } else
  395. cel->eval_dist[RoQ_ID_FCC] = INT_MAX;
  396. if (enc->framesSinceKeyframe >= 2)
  397. cel->eval_dist[RoQ_ID_MOT] = block_sse(enc->frame_to_enc->data,
  398. enc->current_frame->data,
  399. cel->sourceX, cel->sourceY,
  400. cel->sourceX, cel->sourceY,
  401. enc->frame_to_enc->linesize,
  402. enc->current_frame->linesize,8);
  403. else
  404. cel->eval_dist[RoQ_ID_MOT] = INT_MAX;
  405. get_frame_mb(enc->frame_to_enc, cel->sourceX, cel->sourceY, mb8, 8);
  406. cel->eval_dist[RoQ_ID_SLD] =
  407. index_mb(mb8, tempData->codebooks.unpacked_cb4_enlarged,
  408. tempData->codebooks.numCB4, &cel->cbEntry, 8);
  409. gather_data_for_subcel(cel->subCels + 0, cel->sourceX+0, cel->sourceY+0, enc, tempData);
  410. gather_data_for_subcel(cel->subCels + 1, cel->sourceX+4, cel->sourceY+0, enc, tempData);
  411. gather_data_for_subcel(cel->subCels + 2, cel->sourceX+0, cel->sourceY+4, enc, tempData);
  412. gather_data_for_subcel(cel->subCels + 3, cel->sourceX+4, cel->sourceY+4, enc, tempData);
  413. cel->eval_dist[RoQ_ID_CCC] = 0;
  414. divide_bit_use = 0;
  415. for (i=0; i<4; i++) {
  416. cel->eval_dist[RoQ_ID_CCC] +=
  417. cel->subCels[i].eval_dist[cel->subCels[i].best_coding];
  418. divide_bit_use += cel->subCels[i].best_bit_use;
  419. }
  420. best_dist = INT_MAX;
  421. bitsUsed[3] = 2 + divide_bit_use;
  422. for (i=0; i<4; i++)
  423. if (ROQ_LAMBDA_SCALE*cel->eval_dist[i] + enc->lambda*bitsUsed[i] <
  424. best_dist) {
  425. cel->best_coding = i;
  426. best_dist = ROQ_LAMBDA_SCALE*cel->eval_dist[i] +
  427. enc->lambda*bitsUsed[i];
  428. }
  429. tempData->used_option[cel->best_coding]++;
  430. tempData->mainChunkSize += bitsUsed[cel->best_coding];
  431. if (cel->best_coding == RoQ_ID_SLD)
  432. tempData->codebooks.usedCB4[cel->cbEntry]++;
  433. if (cel->best_coding == RoQ_ID_CCC)
  434. for (i=0; i<4; i++) {
  435. if (cel->subCels[i].best_coding == RoQ_ID_SLD)
  436. tempData->codebooks.usedCB4[cel->subCels[i].cbEntry]++;
  437. else if (cel->subCels[i].best_coding == RoQ_ID_CCC)
  438. for (j=0; j<4; j++)
  439. tempData->codebooks.usedCB2[cel->subCels[i].subCels[j]]++;
  440. }
  441. }
  442. static void remap_codebooks(RoqContext *enc, RoqTempdata *tempData)
  443. {
  444. int i, j, idx=0;
  445. /* Make remaps for the final codebook usage */
  446. for (i=0; i<MAX_CBS_4x4; i++) {
  447. if (tempData->codebooks.usedCB4[i]) {
  448. tempData->i2f4[i] = idx;
  449. tempData->f2i4[idx] = i;
  450. for (j=0; j<4; j++)
  451. tempData->codebooks.usedCB2[enc->cb4x4[i].idx[j]]++;
  452. idx++;
  453. }
  454. }
  455. tempData->numCB4 = idx;
  456. idx = 0;
  457. for (i=0; i<MAX_CBS_2x2; i++) {
  458. if (tempData->codebooks.usedCB2[i]) {
  459. tempData->i2f2[i] = idx;
  460. tempData->f2i2[idx] = i;
  461. idx++;
  462. }
  463. }
  464. tempData->numCB2 = idx;
  465. }
  466. /**
  467. * Write codebook chunk
  468. */
  469. static void write_codebooks(RoqContext *enc, RoqTempdata *tempData)
  470. {
  471. int i, j;
  472. uint8_t **outp= &enc->out_buf;
  473. if (tempData->numCB2) {
  474. bytestream_put_le16(outp, RoQ_QUAD_CODEBOOK);
  475. bytestream_put_le32(outp, tempData->numCB2*6 + tempData->numCB4*4);
  476. bytestream_put_byte(outp, tempData->numCB4);
  477. bytestream_put_byte(outp, tempData->numCB2);
  478. for (i=0; i<tempData->numCB2; i++) {
  479. bytestream_put_buffer(outp, enc->cb2x2[tempData->f2i2[i]].y, 4);
  480. bytestream_put_byte(outp, enc->cb2x2[tempData->f2i2[i]].u);
  481. bytestream_put_byte(outp, enc->cb2x2[tempData->f2i2[i]].v);
  482. }
  483. for (i=0; i<tempData->numCB4; i++)
  484. for (j=0; j<4; j++)
  485. bytestream_put_byte(outp, tempData->i2f2[enc->cb4x4[tempData->f2i4[i]].idx[j]]);
  486. }
  487. }
  488. static inline uint8_t motion_arg(motion_vect mot)
  489. {
  490. uint8_t ax = 8 - ((uint8_t) mot.d[0]);
  491. uint8_t ay = 8 - ((uint8_t) mot.d[1]);
  492. return ((ax&15)<<4) | (ay&15);
  493. }
  494. typedef struct CodingSpool {
  495. int typeSpool;
  496. int typeSpoolLength;
  497. uint8_t argumentSpool[64];
  498. uint8_t *args;
  499. uint8_t **pout;
  500. } CodingSpool;
  501. /* NOTE: Typecodes must be spooled AFTER arguments!! */
  502. static void write_typecode(CodingSpool *s, uint8_t type)
  503. {
  504. s->typeSpool |= (type & 3) << (14 - s->typeSpoolLength);
  505. s->typeSpoolLength += 2;
  506. if (s->typeSpoolLength == 16) {
  507. bytestream_put_le16(s->pout, s->typeSpool);
  508. bytestream_put_buffer(s->pout, s->argumentSpool,
  509. s->args - s->argumentSpool);
  510. s->typeSpoolLength = 0;
  511. s->typeSpool = 0;
  512. s->args = s->argumentSpool;
  513. }
  514. }
  515. static void reconstruct_and_encode_image(RoqContext *enc, RoqTempdata *tempData, int w, int h, int numBlocks)
  516. {
  517. int i, j, k;
  518. int x, y;
  519. int subX, subY;
  520. int dist=0;
  521. roq_qcell *qcell;
  522. CelEvaluation *eval;
  523. CodingSpool spool;
  524. spool.typeSpool=0;
  525. spool.typeSpoolLength=0;
  526. spool.args = spool.argumentSpool;
  527. spool.pout = &enc->out_buf;
  528. if (tempData->used_option[RoQ_ID_CCC]%2)
  529. tempData->mainChunkSize+=8; //FIXME
  530. /* Write the video chunk header */
  531. bytestream_put_le16(&enc->out_buf, RoQ_QUAD_VQ);
  532. bytestream_put_le32(&enc->out_buf, tempData->mainChunkSize/8);
  533. bytestream_put_byte(&enc->out_buf, 0x0);
  534. bytestream_put_byte(&enc->out_buf, 0x0);
  535. for (i=0; i<numBlocks; i++) {
  536. eval = tempData->cel_evals + i;
  537. x = eval->sourceX;
  538. y = eval->sourceY;
  539. dist += eval->eval_dist[eval->best_coding];
  540. switch (eval->best_coding) {
  541. case RoQ_ID_MOT:
  542. write_typecode(&spool, RoQ_ID_MOT);
  543. break;
  544. case RoQ_ID_FCC:
  545. bytestream_put_byte(&spool.args, motion_arg(eval->motion));
  546. write_typecode(&spool, RoQ_ID_FCC);
  547. ff_apply_motion_8x8(enc, x, y,
  548. eval->motion.d[0], eval->motion.d[1]);
  549. break;
  550. case RoQ_ID_SLD:
  551. bytestream_put_byte(&spool.args, tempData->i2f4[eval->cbEntry]);
  552. write_typecode(&spool, RoQ_ID_SLD);
  553. qcell = enc->cb4x4 + eval->cbEntry;
  554. ff_apply_vector_4x4(enc, x , y , enc->cb2x2 + qcell->idx[0]);
  555. ff_apply_vector_4x4(enc, x+4, y , enc->cb2x2 + qcell->idx[1]);
  556. ff_apply_vector_4x4(enc, x , y+4, enc->cb2x2 + qcell->idx[2]);
  557. ff_apply_vector_4x4(enc, x+4, y+4, enc->cb2x2 + qcell->idx[3]);
  558. break;
  559. case RoQ_ID_CCC:
  560. write_typecode(&spool, RoQ_ID_CCC);
  561. for (j=0; j<4; j++) {
  562. subX = x + 4*(j&1);
  563. subY = y + 2*(j&2);
  564. switch(eval->subCels[j].best_coding) {
  565. case RoQ_ID_MOT:
  566. break;
  567. case RoQ_ID_FCC:
  568. bytestream_put_byte(&spool.args,
  569. motion_arg(eval->subCels[j].motion));
  570. ff_apply_motion_4x4(enc, subX, subY,
  571. eval->subCels[j].motion.d[0],
  572. eval->subCels[j].motion.d[1]);
  573. break;
  574. case RoQ_ID_SLD:
  575. bytestream_put_byte(&spool.args,
  576. tempData->i2f4[eval->subCels[j].cbEntry]);
  577. qcell = enc->cb4x4 + eval->subCels[j].cbEntry;
  578. ff_apply_vector_2x2(enc, subX , subY ,
  579. enc->cb2x2 + qcell->idx[0]);
  580. ff_apply_vector_2x2(enc, subX+2, subY ,
  581. enc->cb2x2 + qcell->idx[1]);
  582. ff_apply_vector_2x2(enc, subX , subY+2,
  583. enc->cb2x2 + qcell->idx[2]);
  584. ff_apply_vector_2x2(enc, subX+2, subY+2,
  585. enc->cb2x2 + qcell->idx[3]);
  586. break;
  587. case RoQ_ID_CCC:
  588. for (k=0; k<4; k++) {
  589. int cb_idx = eval->subCels[j].subCels[k];
  590. bytestream_put_byte(&spool.args,
  591. tempData->i2f2[cb_idx]);
  592. ff_apply_vector_2x2(enc, subX + 2*(k&1), subY + (k&2),
  593. enc->cb2x2 + cb_idx);
  594. }
  595. break;
  596. }
  597. write_typecode(&spool, eval->subCels[j].best_coding);
  598. }
  599. break;
  600. }
  601. }
  602. /* Flush the remainder of the argument/type spool */
  603. while (spool.typeSpoolLength)
  604. write_typecode(&spool, 0x0);
  605. }
  606. /**
  607. * Create a single YUV cell from a 2x2 section of the image
  608. */
  609. static inline void frame_block_to_cell(uint8_t *block, uint8_t * const *data,
  610. int top, int left, const int *stride)
  611. {
  612. int i, j, u=0, v=0;
  613. for (i=0; i<2; i++)
  614. for (j=0; j<2; j++) {
  615. int x = (top+i)*stride[0] + left + j;
  616. *block++ = data[0][x];
  617. x = (top+i)*stride[1] + left + j;
  618. u += data[1][x];
  619. v += data[2][x];
  620. }
  621. *block++ = (u+2)/4;
  622. *block++ = (v+2)/4;
  623. }
  624. /**
  625. * Create YUV clusters for the entire image
  626. */
  627. static void create_clusters(const AVFrame *frame, int w, int h, uint8_t *yuvClusters)
  628. {
  629. int i, j, k, l;
  630. for (i=0; i<h; i+=4)
  631. for (j=0; j<w; j+=4) {
  632. for (k=0; k < 2; k++)
  633. for (l=0; l < 2; l++)
  634. frame_block_to_cell(yuvClusters + (l + 2*k)*6, frame->data,
  635. i+2*k, j+2*l, frame->linesize);
  636. yuvClusters += 24;
  637. }
  638. }
  639. static int generate_codebook(RoqContext *enc, RoqTempdata *tempdata,
  640. int *points, int inputCount, roq_cell *results,
  641. int size, int cbsize)
  642. {
  643. int i, j, k, ret = 0;
  644. int c_size = size*size/4;
  645. int *buf;
  646. int *codebook = av_malloc(6*c_size*cbsize*sizeof(int));
  647. int *closest_cb;
  648. if (!codebook)
  649. return AVERROR(ENOMEM);
  650. if (size == 4) {
  651. closest_cb = av_malloc(6*c_size*inputCount*sizeof(int));
  652. if (!closest_cb) {
  653. ret = AVERROR(ENOMEM);
  654. goto out;
  655. }
  656. } else
  657. closest_cb = tempdata->closest_cb2;
  658. ret = ff_init_elbg(points, 6 * c_size, inputCount, codebook,
  659. cbsize, 1, closest_cb, &enc->randctx);
  660. if (ret < 0)
  661. goto out;
  662. ret = ff_do_elbg(points, 6 * c_size, inputCount, codebook,
  663. cbsize, 1, closest_cb, &enc->randctx);
  664. if (ret < 0)
  665. goto out;
  666. buf = codebook;
  667. for (i=0; i<cbsize; i++)
  668. for (k=0; k<c_size; k++) {
  669. for(j=0; j<4; j++)
  670. results->y[j] = *buf++;
  671. results->u = (*buf++ + CHROMA_BIAS/2)/CHROMA_BIAS;
  672. results->v = (*buf++ + CHROMA_BIAS/2)/CHROMA_BIAS;
  673. results++;
  674. }
  675. out:
  676. if (size == 4)
  677. av_free(closest_cb);
  678. av_free(codebook);
  679. return ret;
  680. }
  681. static int generate_new_codebooks(RoqContext *enc, RoqTempdata *tempData)
  682. {
  683. int i, j, ret = 0;
  684. RoqCodebooks *codebooks = &tempData->codebooks;
  685. int max = enc->width*enc->height/16;
  686. uint8_t mb2[3*4];
  687. roq_cell *results4 = av_malloc(sizeof(roq_cell)*MAX_CBS_4x4*4);
  688. uint8_t *yuvClusters=av_malloc(sizeof(int)*max*6*4);
  689. int *points = av_malloc(max*6*4*sizeof(int));
  690. int bias;
  691. if (!results4 || !yuvClusters || !points) {
  692. ret = AVERROR(ENOMEM);
  693. goto out;
  694. }
  695. /* Subsample YUV data */
  696. create_clusters(enc->frame_to_enc, enc->width, enc->height, yuvClusters);
  697. /* Cast to integer and apply chroma bias */
  698. for (i=0; i<max*24; i++) {
  699. bias = ((i%6)<4) ? 1 : CHROMA_BIAS;
  700. points[i] = bias*yuvClusters[i];
  701. }
  702. /* Create 4x4 codebooks */
  703. if ((ret = generate_codebook(enc, tempData, points, max,
  704. results4, 4, MAX_CBS_4x4)) < 0)
  705. goto out;
  706. codebooks->numCB4 = MAX_CBS_4x4;
  707. tempData->closest_cb2 = av_malloc(max*4*sizeof(int));
  708. if (!tempData->closest_cb2) {
  709. ret = AVERROR(ENOMEM);
  710. goto out;
  711. }
  712. /* Create 2x2 codebooks */
  713. if ((ret = generate_codebook(enc, tempData, points, max * 4,
  714. enc->cb2x2, 2, MAX_CBS_2x2)) < 0)
  715. goto out;
  716. codebooks->numCB2 = MAX_CBS_2x2;
  717. /* Unpack 2x2 codebook clusters */
  718. for (i=0; i<codebooks->numCB2; i++)
  719. unpack_roq_cell(enc->cb2x2 + i, codebooks->unpacked_cb2 + i*2*2*3);
  720. /* Index all 4x4 entries to the 2x2 entries, unpack, and enlarge */
  721. for (i=0; i<codebooks->numCB4; i++) {
  722. for (j=0; j<4; j++) {
  723. unpack_roq_cell(&results4[4*i + j], mb2);
  724. index_mb(mb2, codebooks->unpacked_cb2, codebooks->numCB2,
  725. &enc->cb4x4[i].idx[j], 2);
  726. }
  727. unpack_roq_qcell(codebooks->unpacked_cb2, enc->cb4x4 + i,
  728. codebooks->unpacked_cb4 + i*4*4*3);
  729. enlarge_roq_mb4(codebooks->unpacked_cb4 + i*4*4*3,
  730. codebooks->unpacked_cb4_enlarged + i*8*8*3);
  731. }
  732. out:
  733. av_free(yuvClusters);
  734. av_free(points);
  735. av_free(results4);
  736. return ret;
  737. }
  738. static int roq_encode_video(RoqContext *enc)
  739. {
  740. RoqTempdata *tempData = enc->tmpData;
  741. int i, ret;
  742. memset(tempData, 0, sizeof(*tempData));
  743. ret = create_cel_evals(enc, tempData);
  744. if (ret < 0)
  745. return ret;
  746. ret = generate_new_codebooks(enc, tempData);
  747. if (ret < 0)
  748. return ret;
  749. if (enc->framesSinceKeyframe >= 1) {
  750. motion_search(enc, 8);
  751. motion_search(enc, 4);
  752. }
  753. retry_encode:
  754. for (i=0; i<enc->width*enc->height/64; i++)
  755. gather_data_for_cel(tempData->cel_evals + i, enc, tempData);
  756. /* Quake 3 can't handle chunks bigger than 65535 bytes */
  757. if (tempData->mainChunkSize/8 > 65535) {
  758. av_log(enc->avctx, AV_LOG_ERROR,
  759. "Warning, generated a frame too big (%d > 65535), "
  760. "try using a smaller qscale value.\n",
  761. tempData->mainChunkSize/8);
  762. enc->lambda *= 1.5;
  763. tempData->mainChunkSize = 0;
  764. memset(tempData->used_option, 0, sizeof(tempData->used_option));
  765. memset(tempData->codebooks.usedCB4, 0,
  766. sizeof(tempData->codebooks.usedCB4));
  767. memset(tempData->codebooks.usedCB2, 0,
  768. sizeof(tempData->codebooks.usedCB2));
  769. goto retry_encode;
  770. }
  771. remap_codebooks(enc, tempData);
  772. write_codebooks(enc, tempData);
  773. reconstruct_and_encode_image(enc, tempData, enc->width, enc->height,
  774. enc->width*enc->height/64);
  775. /* Rotate frame history */
  776. FFSWAP(AVFrame *, enc->current_frame, enc->last_frame);
  777. FFSWAP(motion_vect *, enc->last_motion4, enc->this_motion4);
  778. FFSWAP(motion_vect *, enc->last_motion8, enc->this_motion8);
  779. av_free(tempData->cel_evals);
  780. av_free(tempData->closest_cb2);
  781. enc->framesSinceKeyframe++;
  782. return 0;
  783. }
  784. static av_cold int roq_encode_end(AVCodecContext *avctx)
  785. {
  786. RoqContext *enc = avctx->priv_data;
  787. av_frame_free(&enc->current_frame);
  788. av_frame_free(&enc->last_frame);
  789. av_free(enc->tmpData);
  790. av_free(enc->this_motion4);
  791. av_free(enc->last_motion4);
  792. av_free(enc->this_motion8);
  793. av_free(enc->last_motion8);
  794. return 0;
  795. }
  796. static av_cold int roq_encode_init(AVCodecContext *avctx)
  797. {
  798. RoqContext *enc = avctx->priv_data;
  799. av_lfg_init(&enc->randctx, 1);
  800. enc->avctx = avctx;
  801. enc->framesSinceKeyframe = 0;
  802. if ((avctx->width & 0xf) || (avctx->height & 0xf)) {
  803. av_log(avctx, AV_LOG_ERROR, "Dimensions must be divisible by 16\n");
  804. return -1;
  805. }
  806. if (((avctx->width)&(avctx->width-1))||((avctx->height)&(avctx->height-1)))
  807. av_log(avctx, AV_LOG_ERROR, "Warning: dimensions not power of two\n");
  808. enc->width = avctx->width;
  809. enc->height = avctx->height;
  810. enc->framesSinceKeyframe = 0;
  811. enc->first_frame = 1;
  812. enc->last_frame = av_frame_alloc();
  813. enc->current_frame = av_frame_alloc();
  814. if (!enc->last_frame || !enc->current_frame) {
  815. roq_encode_end(avctx);
  816. return AVERROR(ENOMEM);
  817. }
  818. enc->tmpData = av_malloc(sizeof(RoqTempdata));
  819. enc->this_motion4 =
  820. av_mallocz((enc->width*enc->height/16)*sizeof(motion_vect));
  821. enc->last_motion4 =
  822. av_malloc ((enc->width*enc->height/16)*sizeof(motion_vect));
  823. enc->this_motion8 =
  824. av_mallocz((enc->width*enc->height/64)*sizeof(motion_vect));
  825. enc->last_motion8 =
  826. av_malloc ((enc->width*enc->height/64)*sizeof(motion_vect));
  827. return 0;
  828. }
  829. static void roq_write_video_info_chunk(RoqContext *enc)
  830. {
  831. /* ROQ info chunk */
  832. bytestream_put_le16(&enc->out_buf, RoQ_INFO);
  833. /* Size: 8 bytes */
  834. bytestream_put_le32(&enc->out_buf, 8);
  835. /* Unused argument */
  836. bytestream_put_byte(&enc->out_buf, 0x00);
  837. bytestream_put_byte(&enc->out_buf, 0x00);
  838. /* Width */
  839. bytestream_put_le16(&enc->out_buf, enc->width);
  840. /* Height */
  841. bytestream_put_le16(&enc->out_buf, enc->height);
  842. /* Unused in Quake 3, mimics the output of the real encoder */
  843. bytestream_put_byte(&enc->out_buf, 0x08);
  844. bytestream_put_byte(&enc->out_buf, 0x00);
  845. bytestream_put_byte(&enc->out_buf, 0x04);
  846. bytestream_put_byte(&enc->out_buf, 0x00);
  847. }
  848. static int roq_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
  849. const AVFrame *frame, int *got_packet)
  850. {
  851. RoqContext *enc = avctx->priv_data;
  852. int size, ret;
  853. enc->avctx = avctx;
  854. enc->frame_to_enc = frame;
  855. if (frame->quality)
  856. enc->lambda = frame->quality - 1;
  857. else
  858. enc->lambda = 2*ROQ_LAMBDA_SCALE;
  859. /* 138 bits max per 8x8 block +
  860. * 256 codebooks*(6 bytes 2x2 + 4 bytes 4x4) + 8 bytes frame header */
  861. size = ((enc->width * enc->height / 64) * 138 + 7) / 8 + 256 * (6 + 4) + 8;
  862. if ((ret = ff_alloc_packet(pkt, size)) < 0) {
  863. av_log(avctx, AV_LOG_ERROR, "Error getting output packet with size %d.\n", size);
  864. return ret;
  865. }
  866. enc->out_buf = pkt->data;
  867. /* Check for I-frame */
  868. if (enc->framesSinceKeyframe == avctx->gop_size)
  869. enc->framesSinceKeyframe = 0;
  870. if (enc->first_frame) {
  871. /* Alloc memory for the reconstruction data (we must know the stride
  872. for that) */
  873. if (ff_get_buffer(avctx, enc->current_frame, 0) ||
  874. ff_get_buffer(avctx, enc->last_frame, 0)) {
  875. av_log(avctx, AV_LOG_ERROR, " RoQ: get_buffer() failed\n");
  876. return -1;
  877. }
  878. /* Before the first video frame, write a "video info" chunk */
  879. roq_write_video_info_chunk(enc);
  880. enc->first_frame = 0;
  881. }
  882. /* Encode the actual frame */
  883. ret = roq_encode_video(enc);
  884. if (ret < 0)
  885. return ret;
  886. pkt->size = enc->out_buf - pkt->data;
  887. if (enc->framesSinceKeyframe == 1)
  888. pkt->flags |= AV_PKT_FLAG_KEY;
  889. *got_packet = 1;
  890. return 0;
  891. }
  892. AVCodec ff_roq_encoder = {
  893. .name = "roqvideo",
  894. .long_name = NULL_IF_CONFIG_SMALL("id RoQ video"),
  895. .type = AVMEDIA_TYPE_VIDEO,
  896. .id = AV_CODEC_ID_ROQ,
  897. .priv_data_size = sizeof(RoqContext),
  898. .init = roq_encode_init,
  899. .encode2 = roq_encode_frame,
  900. .close = roq_encode_end,
  901. .supported_framerates = (const AVRational[]){ {30,1}, {0,0} },
  902. .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV444P,
  903. AV_PIX_FMT_NONE },
  904. };