You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1885 lines
63KB

  1. /*
  2. * QDM2 compatible decoder
  3. * Copyright (c) 2003 Ewald Snel
  4. * Copyright (c) 2005 Benjamin Larsson
  5. * Copyright (c) 2005 Alex Beregszaszi
  6. * Copyright (c) 2005 Roberto Togni
  7. *
  8. * This file is part of FFmpeg.
  9. *
  10. * FFmpeg is free software; you can redistribute it and/or
  11. * modify it under the terms of the GNU Lesser General Public
  12. * License as published by the Free Software Foundation; either
  13. * version 2.1 of the License, or (at your option) any later version.
  14. *
  15. * FFmpeg is distributed in the hope that it will be useful,
  16. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  17. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  18. * Lesser General Public License for more details.
  19. *
  20. * You should have received a copy of the GNU Lesser General Public
  21. * License along with FFmpeg; if not, write to the Free Software
  22. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  23. */
  24. /**
  25. * @file
  26. * QDM2 decoder
  27. * @author Ewald Snel, Benjamin Larsson, Alex Beregszaszi, Roberto Togni
  28. *
  29. * The decoder is not perfect yet, there are still some distortions
  30. * especially on files encoded with 16 or 8 subbands.
  31. */
  32. #include <math.h>
  33. #include <stddef.h>
  34. #include <stdio.h>
  35. #include "libavutil/channel_layout.h"
  36. #define BITSTREAM_READER_LE
  37. #include "avcodec.h"
  38. #include "get_bits.h"
  39. #include "bytestream.h"
  40. #include "internal.h"
  41. #include "mpegaudio.h"
  42. #include "mpegaudiodsp.h"
  43. #include "rdft.h"
  44. #include "qdm2_tablegen.h"
  45. #define QDM2_LIST_ADD(list, size, packet) \
  46. do { \
  47. if (size > 0) { \
  48. list[size - 1].next = &list[size]; \
  49. } \
  50. list[size].packet = packet; \
  51. list[size].next = NULL; \
  52. size++; \
  53. } while(0)
  54. // Result is 8, 16 or 30
  55. #define QDM2_SB_USED(sub_sampling) (((sub_sampling) >= 2) ? 30 : 8 << (sub_sampling))
  56. #define FIX_NOISE_IDX(noise_idx) \
  57. if ((noise_idx) >= 3840) \
  58. (noise_idx) -= 3840; \
  59. #define SB_DITHERING_NOISE(sb,noise_idx) (noise_table[(noise_idx)++] * sb_noise_attenuation[(sb)])
  60. #define SAMPLES_NEEDED \
  61. av_log (NULL,AV_LOG_INFO,"This file triggers some untested code. Please contact the developers.\n");
  62. #define SAMPLES_NEEDED_2(why) \
  63. av_log (NULL,AV_LOG_INFO,"This file triggers some missing code. Please contact the developers.\nPosition: %s\n",why);
  64. #define QDM2_MAX_FRAME_SIZE 512
  65. typedef int8_t sb_int8_array[2][30][64];
  66. /**
  67. * Subpacket
  68. */
  69. typedef struct QDM2SubPacket {
  70. int type; ///< subpacket type
  71. unsigned int size; ///< subpacket size
  72. const uint8_t *data; ///< pointer to subpacket data (points to input data buffer, it's not a private copy)
  73. } QDM2SubPacket;
  74. /**
  75. * A node in the subpacket list
  76. */
  77. typedef struct QDM2SubPNode {
  78. QDM2SubPacket *packet; ///< packet
  79. struct QDM2SubPNode *next; ///< pointer to next packet in the list, NULL if leaf node
  80. } QDM2SubPNode;
  81. typedef struct QDM2Complex {
  82. float re;
  83. float im;
  84. } QDM2Complex;
  85. typedef struct FFTTone {
  86. float level;
  87. QDM2Complex *complex;
  88. const float *table;
  89. int phase;
  90. int phase_shift;
  91. int duration;
  92. short time_index;
  93. short cutoff;
  94. } FFTTone;
  95. typedef struct FFTCoefficient {
  96. int16_t sub_packet;
  97. uint8_t channel;
  98. int16_t offset;
  99. int16_t exp;
  100. uint8_t phase;
  101. } FFTCoefficient;
  102. typedef struct QDM2FFT {
  103. DECLARE_ALIGNED(32, QDM2Complex, complex)[MPA_MAX_CHANNELS][256];
  104. } QDM2FFT;
  105. /**
  106. * QDM2 decoder context
  107. */
  108. typedef struct QDM2Context {
  109. /// Parameters from codec header, do not change during playback
  110. int nb_channels; ///< number of channels
  111. int channels; ///< number of channels
  112. int group_size; ///< size of frame group (16 frames per group)
  113. int fft_size; ///< size of FFT, in complex numbers
  114. int checksum_size; ///< size of data block, used also for checksum
  115. /// Parameters built from header parameters, do not change during playback
  116. int group_order; ///< order of frame group
  117. int fft_order; ///< order of FFT (actually fftorder+1)
  118. int frame_size; ///< size of data frame
  119. int frequency_range;
  120. int sub_sampling; ///< subsampling: 0=25%, 1=50%, 2=100% */
  121. int coeff_per_sb_select; ///< selector for "num. of coeffs. per subband" tables. Can be 0, 1, 2
  122. int cm_table_select; ///< selector for "coding method" tables. Can be 0, 1 (from init: 0-4)
  123. /// Packets and packet lists
  124. QDM2SubPacket sub_packets[16]; ///< the packets themselves
  125. QDM2SubPNode sub_packet_list_A[16]; ///< list of all packets
  126. QDM2SubPNode sub_packet_list_B[16]; ///< FFT packets B are on list
  127. int sub_packets_B; ///< number of packets on 'B' list
  128. QDM2SubPNode sub_packet_list_C[16]; ///< packets with errors?
  129. QDM2SubPNode sub_packet_list_D[16]; ///< DCT packets
  130. /// FFT and tones
  131. FFTTone fft_tones[1000];
  132. int fft_tone_start;
  133. int fft_tone_end;
  134. FFTCoefficient fft_coefs[1000];
  135. int fft_coefs_index;
  136. int fft_coefs_min_index[5];
  137. int fft_coefs_max_index[5];
  138. int fft_level_exp[6];
  139. RDFTContext rdft_ctx;
  140. QDM2FFT fft;
  141. /// I/O data
  142. const uint8_t *compressed_data;
  143. int compressed_size;
  144. float output_buffer[QDM2_MAX_FRAME_SIZE * MPA_MAX_CHANNELS * 2];
  145. /// Synthesis filter
  146. MPADSPContext mpadsp;
  147. DECLARE_ALIGNED(32, float, synth_buf)[MPA_MAX_CHANNELS][512*2];
  148. int synth_buf_offset[MPA_MAX_CHANNELS];
  149. DECLARE_ALIGNED(32, float, sb_samples)[MPA_MAX_CHANNELS][128][SBLIMIT];
  150. DECLARE_ALIGNED(32, float, samples)[MPA_MAX_CHANNELS * MPA_FRAME_SIZE];
  151. /// Mixed temporary data used in decoding
  152. float tone_level[MPA_MAX_CHANNELS][30][64];
  153. int8_t coding_method[MPA_MAX_CHANNELS][30][64];
  154. int8_t quantized_coeffs[MPA_MAX_CHANNELS][10][8];
  155. int8_t tone_level_idx_base[MPA_MAX_CHANNELS][30][8];
  156. int8_t tone_level_idx_hi1[MPA_MAX_CHANNELS][3][8][8];
  157. int8_t tone_level_idx_mid[MPA_MAX_CHANNELS][26][8];
  158. int8_t tone_level_idx_hi2[MPA_MAX_CHANNELS][26];
  159. int8_t tone_level_idx[MPA_MAX_CHANNELS][30][64];
  160. int8_t tone_level_idx_temp[MPA_MAX_CHANNELS][30][64];
  161. // Flags
  162. int has_errors; ///< packet has errors
  163. int superblocktype_2_3; ///< select fft tables and some algorithm based on superblock type
  164. int do_synth_filter; ///< used to perform or skip synthesis filter
  165. int sub_packet;
  166. int noise_idx; ///< index for dithering noise table
  167. } QDM2Context;
  168. static const int switchtable[23] = {
  169. 0, 5, 1, 5, 5, 5, 5, 5, 2, 5, 5, 5, 5, 5, 5, 5, 3, 5, 5, 5, 5, 5, 4
  170. };
  171. static int qdm2_get_vlc(GetBitContext *gb, const VLC *vlc, int flag, int depth)
  172. {
  173. int value;
  174. value = get_vlc2(gb, vlc->table, vlc->bits, depth);
  175. /* stage-2, 3 bits exponent escape sequence */
  176. if (value-- == 0)
  177. value = get_bits(gb, get_bits(gb, 3) + 1);
  178. /* stage-3, optional */
  179. if (flag) {
  180. int tmp;
  181. if (value >= 60) {
  182. av_log(NULL, AV_LOG_ERROR, "value %d in qdm2_get_vlc too large\n", value);
  183. return 0;
  184. }
  185. tmp= vlc_stage3_values[value];
  186. if ((value & ~3) > 0)
  187. tmp += get_bits(gb, (value >> 2));
  188. value = tmp;
  189. }
  190. return value;
  191. }
  192. static int qdm2_get_se_vlc(const VLC *vlc, GetBitContext *gb, int depth)
  193. {
  194. int value = qdm2_get_vlc(gb, vlc, 0, depth);
  195. return (value & 1) ? ((value + 1) >> 1) : -(value >> 1);
  196. }
  197. /**
  198. * QDM2 checksum
  199. *
  200. * @param data pointer to data to be checksummed
  201. * @param length data length
  202. * @param value checksum value
  203. *
  204. * @return 0 if checksum is OK
  205. */
  206. static uint16_t qdm2_packet_checksum(const uint8_t *data, int length, int value)
  207. {
  208. int i;
  209. for (i = 0; i < length; i++)
  210. value -= data[i];
  211. return (uint16_t)(value & 0xffff);
  212. }
  213. /**
  214. * Fill a QDM2SubPacket structure with packet type, size, and data pointer.
  215. *
  216. * @param gb bitreader context
  217. * @param sub_packet packet under analysis
  218. */
  219. static void qdm2_decode_sub_packet_header(GetBitContext *gb,
  220. QDM2SubPacket *sub_packet)
  221. {
  222. sub_packet->type = get_bits(gb, 8);
  223. if (sub_packet->type == 0) {
  224. sub_packet->size = 0;
  225. sub_packet->data = NULL;
  226. } else {
  227. sub_packet->size = get_bits(gb, 8);
  228. if (sub_packet->type & 0x80) {
  229. sub_packet->size <<= 8;
  230. sub_packet->size |= get_bits(gb, 8);
  231. sub_packet->type &= 0x7f;
  232. }
  233. if (sub_packet->type == 0x7f)
  234. sub_packet->type |= (get_bits(gb, 8) << 8);
  235. // FIXME: this depends on bitreader-internal data
  236. sub_packet->data = &gb->buffer[get_bits_count(gb) / 8];
  237. }
  238. av_log(NULL, AV_LOG_DEBUG, "Subpacket: type=%d size=%d start_offs=%x\n",
  239. sub_packet->type, sub_packet->size, get_bits_count(gb) / 8);
  240. }
  241. /**
  242. * Return node pointer to first packet of requested type in list.
  243. *
  244. * @param list list of subpackets to be scanned
  245. * @param type type of searched subpacket
  246. * @return node pointer for subpacket if found, else NULL
  247. */
  248. static QDM2SubPNode *qdm2_search_subpacket_type_in_list(QDM2SubPNode *list,
  249. int type)
  250. {
  251. while (list && list->packet) {
  252. if (list->packet->type == type)
  253. return list;
  254. list = list->next;
  255. }
  256. return NULL;
  257. }
  258. /**
  259. * Replace 8 elements with their average value.
  260. * Called by qdm2_decode_superblock before starting subblock decoding.
  261. *
  262. * @param q context
  263. */
  264. static void average_quantized_coeffs(QDM2Context *q)
  265. {
  266. int i, j, n, ch, sum;
  267. n = coeff_per_sb_for_avg[q->coeff_per_sb_select][QDM2_SB_USED(q->sub_sampling) - 1] + 1;
  268. for (ch = 0; ch < q->nb_channels; ch++)
  269. for (i = 0; i < n; i++) {
  270. sum = 0;
  271. for (j = 0; j < 8; j++)
  272. sum += q->quantized_coeffs[ch][i][j];
  273. sum /= 8;
  274. if (sum > 0)
  275. sum--;
  276. for (j = 0; j < 8; j++)
  277. q->quantized_coeffs[ch][i][j] = sum;
  278. }
  279. }
  280. /**
  281. * Build subband samples with noise weighted by q->tone_level.
  282. * Called by synthfilt_build_sb_samples.
  283. *
  284. * @param q context
  285. * @param sb subband index
  286. */
  287. static void build_sb_samples_from_noise(QDM2Context *q, int sb)
  288. {
  289. int ch, j;
  290. FIX_NOISE_IDX(q->noise_idx);
  291. if (!q->nb_channels)
  292. return;
  293. for (ch = 0; ch < q->nb_channels; ch++) {
  294. for (j = 0; j < 64; j++) {
  295. q->sb_samples[ch][j * 2][sb] =
  296. SB_DITHERING_NOISE(sb, q->noise_idx) * q->tone_level[ch][sb][j];
  297. q->sb_samples[ch][j * 2 + 1][sb] =
  298. SB_DITHERING_NOISE(sb, q->noise_idx) * q->tone_level[ch][sb][j];
  299. }
  300. }
  301. }
  302. /**
  303. * Called while processing data from subpackets 11 and 12.
  304. * Used after making changes to coding_method array.
  305. *
  306. * @param sb subband index
  307. * @param channels number of channels
  308. * @param coding_method q->coding_method[0][0][0]
  309. */
  310. static int fix_coding_method_array(int sb, int channels,
  311. sb_int8_array coding_method)
  312. {
  313. int j, k;
  314. int ch;
  315. int run, case_val;
  316. for (ch = 0; ch < channels; ch++) {
  317. for (j = 0; j < 64; ) {
  318. if (coding_method[ch][sb][j] < 8)
  319. return -1;
  320. if ((coding_method[ch][sb][j] - 8) > 22) {
  321. run = 1;
  322. case_val = 8;
  323. } else {
  324. switch (switchtable[coding_method[ch][sb][j] - 8]) {
  325. case 0: run = 10;
  326. case_val = 10;
  327. break;
  328. case 1: run = 1;
  329. case_val = 16;
  330. break;
  331. case 2: run = 5;
  332. case_val = 24;
  333. break;
  334. case 3: run = 3;
  335. case_val = 30;
  336. break;
  337. case 4: run = 1;
  338. case_val = 30;
  339. break;
  340. case 5: run = 1;
  341. case_val = 8;
  342. break;
  343. default: run = 1;
  344. case_val = 8;
  345. break;
  346. }
  347. }
  348. for (k = 0; k < run; k++) {
  349. if (j + k < 128) {
  350. int sbjk = sb + (j + k) / 64;
  351. if (sbjk > 29) {
  352. SAMPLES_NEEDED
  353. continue;
  354. }
  355. if (coding_method[ch][sbjk][(j + k) % 64] > coding_method[ch][sb][j]) {
  356. if (k > 0) {
  357. SAMPLES_NEEDED
  358. //not debugged, almost never used
  359. memset(&coding_method[ch][sb][j + k], case_val,
  360. k *sizeof(int8_t));
  361. memset(&coding_method[ch][sb][j + k], case_val,
  362. 3 * sizeof(int8_t));
  363. }
  364. }
  365. }
  366. }
  367. j += run;
  368. }
  369. }
  370. return 0;
  371. }
  372. /**
  373. * Related to synthesis filter
  374. * Called by process_subpacket_10
  375. *
  376. * @param q context
  377. * @param flag 1 if called after getting data from subpacket 10, 0 if no subpacket 10
  378. */
  379. static void fill_tone_level_array(QDM2Context *q, int flag)
  380. {
  381. int i, sb, ch, sb_used;
  382. int tmp, tab;
  383. for (ch = 0; ch < q->nb_channels; ch++)
  384. for (sb = 0; sb < 30; sb++)
  385. for (i = 0; i < 8; i++) {
  386. if ((tab=coeff_per_sb_for_dequant[q->coeff_per_sb_select][sb]) < (last_coeff[q->coeff_per_sb_select] - 1))
  387. tmp = q->quantized_coeffs[ch][tab + 1][i] * dequant_table[q->coeff_per_sb_select][tab + 1][sb]+
  388. q->quantized_coeffs[ch][tab][i] * dequant_table[q->coeff_per_sb_select][tab][sb];
  389. else
  390. tmp = q->quantized_coeffs[ch][tab][i] * dequant_table[q->coeff_per_sb_select][tab][sb];
  391. if(tmp < 0)
  392. tmp += 0xff;
  393. q->tone_level_idx_base[ch][sb][i] = (tmp / 256) & 0xff;
  394. }
  395. sb_used = QDM2_SB_USED(q->sub_sampling);
  396. if ((q->superblocktype_2_3 != 0) && !flag) {
  397. for (sb = 0; sb < sb_used; sb++)
  398. for (ch = 0; ch < q->nb_channels; ch++)
  399. for (i = 0; i < 64; i++) {
  400. q->tone_level_idx[ch][sb][i] = q->tone_level_idx_base[ch][sb][i / 8];
  401. if (q->tone_level_idx[ch][sb][i] < 0)
  402. q->tone_level[ch][sb][i] = 0;
  403. else
  404. q->tone_level[ch][sb][i] = fft_tone_level_table[0][q->tone_level_idx[ch][sb][i] & 0x3f];
  405. }
  406. } else {
  407. tab = q->superblocktype_2_3 ? 0 : 1;
  408. for (sb = 0; sb < sb_used; sb++) {
  409. if ((sb >= 4) && (sb <= 23)) {
  410. for (ch = 0; ch < q->nb_channels; ch++)
  411. for (i = 0; i < 64; i++) {
  412. tmp = q->tone_level_idx_base[ch][sb][i / 8] -
  413. q->tone_level_idx_hi1[ch][sb / 8][i / 8][i % 8] -
  414. q->tone_level_idx_mid[ch][sb - 4][i / 8] -
  415. q->tone_level_idx_hi2[ch][sb - 4];
  416. q->tone_level_idx[ch][sb][i] = tmp & 0xff;
  417. if ((tmp < 0) || (!q->superblocktype_2_3 && !tmp))
  418. q->tone_level[ch][sb][i] = 0;
  419. else
  420. q->tone_level[ch][sb][i] = fft_tone_level_table[tab][tmp & 0x3f];
  421. }
  422. } else {
  423. if (sb > 4) {
  424. for (ch = 0; ch < q->nb_channels; ch++)
  425. for (i = 0; i < 64; i++) {
  426. tmp = q->tone_level_idx_base[ch][sb][i / 8] -
  427. q->tone_level_idx_hi1[ch][2][i / 8][i % 8] -
  428. q->tone_level_idx_hi2[ch][sb - 4];
  429. q->tone_level_idx[ch][sb][i] = tmp & 0xff;
  430. if ((tmp < 0) || (!q->superblocktype_2_3 && !tmp))
  431. q->tone_level[ch][sb][i] = 0;
  432. else
  433. q->tone_level[ch][sb][i] = fft_tone_level_table[tab][tmp & 0x3f];
  434. }
  435. } else {
  436. for (ch = 0; ch < q->nb_channels; ch++)
  437. for (i = 0; i < 64; i++) {
  438. tmp = q->tone_level_idx[ch][sb][i] = q->tone_level_idx_base[ch][sb][i / 8];
  439. if ((tmp < 0) || (!q->superblocktype_2_3 && !tmp))
  440. q->tone_level[ch][sb][i] = 0;
  441. else
  442. q->tone_level[ch][sb][i] = fft_tone_level_table[tab][tmp & 0x3f];
  443. }
  444. }
  445. }
  446. }
  447. }
  448. }
  449. /**
  450. * Related to synthesis filter
  451. * Called by process_subpacket_11
  452. * c is built with data from subpacket 11
  453. * Most of this function is used only if superblock_type_2_3 == 0,
  454. * never seen it in samples.
  455. *
  456. * @param tone_level_idx
  457. * @param tone_level_idx_temp
  458. * @param coding_method q->coding_method[0][0][0]
  459. * @param nb_channels number of channels
  460. * @param c coming from subpacket 11, passed as 8*c
  461. * @param superblocktype_2_3 flag based on superblock packet type
  462. * @param cm_table_select q->cm_table_select
  463. */
  464. static void fill_coding_method_array(sb_int8_array tone_level_idx,
  465. sb_int8_array tone_level_idx_temp,
  466. sb_int8_array coding_method,
  467. int nb_channels,
  468. int c, int superblocktype_2_3,
  469. int cm_table_select)
  470. {
  471. int ch, sb, j;
  472. int tmp, acc, esp_40, comp;
  473. int add1, add2, add3, add4;
  474. int64_t multres;
  475. if (!superblocktype_2_3) {
  476. /* This case is untested, no samples available */
  477. avpriv_request_sample(NULL, "!superblocktype_2_3");
  478. return;
  479. for (ch = 0; ch < nb_channels; ch++) {
  480. for (sb = 0; sb < 30; sb++) {
  481. for (j = 1; j < 63; j++) { // The loop only iterates to 63 so the code doesn't overflow the buffer
  482. add1 = tone_level_idx[ch][sb][j] - 10;
  483. if (add1 < 0)
  484. add1 = 0;
  485. add2 = add3 = add4 = 0;
  486. if (sb > 1) {
  487. add2 = tone_level_idx[ch][sb - 2][j] + tone_level_idx_offset_table[sb][0] - 6;
  488. if (add2 < 0)
  489. add2 = 0;
  490. }
  491. if (sb > 0) {
  492. add3 = tone_level_idx[ch][sb - 1][j] + tone_level_idx_offset_table[sb][1] - 6;
  493. if (add3 < 0)
  494. add3 = 0;
  495. }
  496. if (sb < 29) {
  497. add4 = tone_level_idx[ch][sb + 1][j] + tone_level_idx_offset_table[sb][3] - 6;
  498. if (add4 < 0)
  499. add4 = 0;
  500. }
  501. tmp = tone_level_idx[ch][sb][j + 1] * 2 - add4 - add3 - add2 - add1;
  502. if (tmp < 0)
  503. tmp = 0;
  504. tone_level_idx_temp[ch][sb][j + 1] = tmp & 0xff;
  505. }
  506. tone_level_idx_temp[ch][sb][0] = tone_level_idx_temp[ch][sb][1];
  507. }
  508. }
  509. acc = 0;
  510. for (ch = 0; ch < nb_channels; ch++)
  511. for (sb = 0; sb < 30; sb++)
  512. for (j = 0; j < 64; j++)
  513. acc += tone_level_idx_temp[ch][sb][j];
  514. multres = 0x66666667LL * (acc * 10);
  515. esp_40 = (multres >> 32) / 8 + ((multres & 0xffffffff) >> 31);
  516. for (ch = 0; ch < nb_channels; ch++)
  517. for (sb = 0; sb < 30; sb++)
  518. for (j = 0; j < 64; j++) {
  519. comp = tone_level_idx_temp[ch][sb][j]* esp_40 * 10;
  520. if (comp < 0)
  521. comp += 0xff;
  522. comp /= 256; // signed shift
  523. switch(sb) {
  524. case 0:
  525. if (comp < 30)
  526. comp = 30;
  527. comp += 15;
  528. break;
  529. case 1:
  530. if (comp < 24)
  531. comp = 24;
  532. comp += 10;
  533. break;
  534. case 2:
  535. case 3:
  536. case 4:
  537. if (comp < 16)
  538. comp = 16;
  539. }
  540. if (comp <= 5)
  541. tmp = 0;
  542. else if (comp <= 10)
  543. tmp = 10;
  544. else if (comp <= 16)
  545. tmp = 16;
  546. else if (comp <= 24)
  547. tmp = -1;
  548. else
  549. tmp = 0;
  550. coding_method[ch][sb][j] = ((tmp & 0xfffa) + 30 )& 0xff;
  551. }
  552. for (sb = 0; sb < 30; sb++)
  553. fix_coding_method_array(sb, nb_channels, coding_method);
  554. for (ch = 0; ch < nb_channels; ch++)
  555. for (sb = 0; sb < 30; sb++)
  556. for (j = 0; j < 64; j++)
  557. if (sb >= 10) {
  558. if (coding_method[ch][sb][j] < 10)
  559. coding_method[ch][sb][j] = 10;
  560. } else {
  561. if (sb >= 2) {
  562. if (coding_method[ch][sb][j] < 16)
  563. coding_method[ch][sb][j] = 16;
  564. } else {
  565. if (coding_method[ch][sb][j] < 30)
  566. coding_method[ch][sb][j] = 30;
  567. }
  568. }
  569. } else { // superblocktype_2_3 != 0
  570. for (ch = 0; ch < nb_channels; ch++)
  571. for (sb = 0; sb < 30; sb++)
  572. for (j = 0; j < 64; j++)
  573. coding_method[ch][sb][j] = coding_method_table[cm_table_select][sb];
  574. }
  575. }
  576. /**
  577. * Called by process_subpacket_11 to process more data from subpacket 11
  578. * with sb 0-8.
  579. * Called by process_subpacket_12 to process data from subpacket 12 with
  580. * sb 8-sb_used.
  581. *
  582. * @param q context
  583. * @param gb bitreader context
  584. * @param length packet length in bits
  585. * @param sb_min lower subband processed (sb_min included)
  586. * @param sb_max higher subband processed (sb_max excluded)
  587. */
  588. static int synthfilt_build_sb_samples(QDM2Context *q, GetBitContext *gb,
  589. int length, int sb_min, int sb_max)
  590. {
  591. int sb, j, k, n, ch, run, channels;
  592. int joined_stereo, zero_encoding;
  593. int type34_first;
  594. float type34_div = 0;
  595. float type34_predictor;
  596. float samples[10];
  597. int sign_bits[16] = {0};
  598. if (length == 0) {
  599. // If no data use noise
  600. for (sb=sb_min; sb < sb_max; sb++)
  601. build_sb_samples_from_noise(q, sb);
  602. return 0;
  603. }
  604. for (sb = sb_min; sb < sb_max; sb++) {
  605. channels = q->nb_channels;
  606. if (q->nb_channels <= 1 || sb < 12)
  607. joined_stereo = 0;
  608. else if (sb >= 24)
  609. joined_stereo = 1;
  610. else
  611. joined_stereo = (get_bits_left(gb) >= 1) ? get_bits1(gb) : 0;
  612. if (joined_stereo) {
  613. if (get_bits_left(gb) >= 16)
  614. for (j = 0; j < 16; j++)
  615. sign_bits[j] = get_bits1(gb);
  616. for (j = 0; j < 64; j++)
  617. if (q->coding_method[1][sb][j] > q->coding_method[0][sb][j])
  618. q->coding_method[0][sb][j] = q->coding_method[1][sb][j];
  619. if (fix_coding_method_array(sb, q->nb_channels,
  620. q->coding_method)) {
  621. av_log(NULL, AV_LOG_ERROR, "coding method invalid\n");
  622. build_sb_samples_from_noise(q, sb);
  623. continue;
  624. }
  625. channels = 1;
  626. }
  627. for (ch = 0; ch < channels; ch++) {
  628. FIX_NOISE_IDX(q->noise_idx);
  629. zero_encoding = (get_bits_left(gb) >= 1) ? get_bits1(gb) : 0;
  630. type34_predictor = 0.0;
  631. type34_first = 1;
  632. for (j = 0; j < 128; ) {
  633. switch (q->coding_method[ch][sb][j / 2]) {
  634. case 8:
  635. if (get_bits_left(gb) >= 10) {
  636. if (zero_encoding) {
  637. for (k = 0; k < 5; k++) {
  638. if ((j + 2 * k) >= 128)
  639. break;
  640. samples[2 * k] = get_bits1(gb) ? dequant_1bit[joined_stereo][2 * get_bits1(gb)] : 0;
  641. }
  642. } else {
  643. n = get_bits(gb, 8);
  644. if (n >= 243) {
  645. av_log(NULL, AV_LOG_ERROR, "Invalid 8bit codeword\n");
  646. return AVERROR_INVALIDDATA;
  647. }
  648. for (k = 0; k < 5; k++)
  649. samples[2 * k] = dequant_1bit[joined_stereo][random_dequant_index[n][k]];
  650. }
  651. for (k = 0; k < 5; k++)
  652. samples[2 * k + 1] = SB_DITHERING_NOISE(sb,q->noise_idx);
  653. } else {
  654. for (k = 0; k < 10; k++)
  655. samples[k] = SB_DITHERING_NOISE(sb,q->noise_idx);
  656. }
  657. run = 10;
  658. break;
  659. case 10:
  660. if (get_bits_left(gb) >= 1) {
  661. float f = 0.81;
  662. if (get_bits1(gb))
  663. f = -f;
  664. f -= noise_samples[((sb + 1) * (j +5 * ch + 1)) & 127] * 9.0 / 40.0;
  665. samples[0] = f;
  666. } else {
  667. samples[0] = SB_DITHERING_NOISE(sb,q->noise_idx);
  668. }
  669. run = 1;
  670. break;
  671. case 16:
  672. if (get_bits_left(gb) >= 10) {
  673. if (zero_encoding) {
  674. for (k = 0; k < 5; k++) {
  675. if ((j + k) >= 128)
  676. break;
  677. samples[k] = (get_bits1(gb) == 0) ? 0 : dequant_1bit[joined_stereo][2 * get_bits1(gb)];
  678. }
  679. } else {
  680. n = get_bits (gb, 8);
  681. if (n >= 243) {
  682. av_log(NULL, AV_LOG_ERROR, "Invalid 8bit codeword\n");
  683. return AVERROR_INVALIDDATA;
  684. }
  685. for (k = 0; k < 5; k++)
  686. samples[k] = dequant_1bit[joined_stereo][random_dequant_index[n][k]];
  687. }
  688. } else {
  689. for (k = 0; k < 5; k++)
  690. samples[k] = SB_DITHERING_NOISE(sb,q->noise_idx);
  691. }
  692. run = 5;
  693. break;
  694. case 24:
  695. if (get_bits_left(gb) >= 7) {
  696. n = get_bits(gb, 7);
  697. if (n >= 125) {
  698. av_log(NULL, AV_LOG_ERROR, "Invalid 7bit codeword\n");
  699. return AVERROR_INVALIDDATA;
  700. }
  701. for (k = 0; k < 3; k++)
  702. samples[k] = (random_dequant_type24[n][k] - 2.0) * 0.5;
  703. } else {
  704. for (k = 0; k < 3; k++)
  705. samples[k] = SB_DITHERING_NOISE(sb,q->noise_idx);
  706. }
  707. run = 3;
  708. break;
  709. case 30:
  710. if (get_bits_left(gb) >= 4) {
  711. unsigned index = qdm2_get_vlc(gb, &vlc_tab_type30, 0, 1);
  712. if (index >= FF_ARRAY_ELEMS(type30_dequant)) {
  713. av_log(NULL, AV_LOG_ERROR, "index %d out of type30_dequant array\n", index);
  714. return AVERROR_INVALIDDATA;
  715. }
  716. samples[0] = type30_dequant[index];
  717. } else
  718. samples[0] = SB_DITHERING_NOISE(sb,q->noise_idx);
  719. run = 1;
  720. break;
  721. case 34:
  722. if (get_bits_left(gb) >= 7) {
  723. if (type34_first) {
  724. type34_div = (float)(1 << get_bits(gb, 2));
  725. samples[0] = ((float)get_bits(gb, 5) - 16.0) / 15.0;
  726. type34_predictor = samples[0];
  727. type34_first = 0;
  728. } else {
  729. unsigned index = qdm2_get_vlc(gb, &vlc_tab_type34, 0, 1);
  730. if (index >= FF_ARRAY_ELEMS(type34_delta)) {
  731. av_log(NULL, AV_LOG_ERROR, "index %d out of type34_delta array\n", index);
  732. return AVERROR_INVALIDDATA;
  733. }
  734. samples[0] = type34_delta[index] / type34_div + type34_predictor;
  735. type34_predictor = samples[0];
  736. }
  737. } else {
  738. samples[0] = SB_DITHERING_NOISE(sb,q->noise_idx);
  739. }
  740. run = 1;
  741. break;
  742. default:
  743. samples[0] = SB_DITHERING_NOISE(sb,q->noise_idx);
  744. run = 1;
  745. break;
  746. }
  747. if (joined_stereo) {
  748. for (k = 0; k < run && j + k < 128; k++) {
  749. q->sb_samples[0][j + k][sb] =
  750. q->tone_level[0][sb][(j + k) / 2] * samples[k];
  751. if (q->nb_channels == 2) {
  752. if (sign_bits[(j + k) / 8])
  753. q->sb_samples[1][j + k][sb] =
  754. q->tone_level[1][sb][(j + k) / 2] * -samples[k];
  755. else
  756. q->sb_samples[1][j + k][sb] =
  757. q->tone_level[1][sb][(j + k) / 2] * samples[k];
  758. }
  759. }
  760. } else {
  761. for (k = 0; k < run; k++)
  762. if ((j + k) < 128)
  763. q->sb_samples[ch][j + k][sb] = q->tone_level[ch][sb][(j + k)/2] * samples[k];
  764. }
  765. j += run;
  766. } // j loop
  767. } // channel loop
  768. } // subband loop
  769. return 0;
  770. }
  771. /**
  772. * Init the first element of a channel in quantized_coeffs with data
  773. * from packet 10 (quantized_coeffs[ch][0]).
  774. * This is similar to process_subpacket_9, but for a single channel
  775. * and for element [0]
  776. * same VLC tables as process_subpacket_9 are used.
  777. *
  778. * @param quantized_coeffs pointer to quantized_coeffs[ch][0]
  779. * @param gb bitreader context
  780. */
  781. static int init_quantized_coeffs_elem0(int8_t *quantized_coeffs,
  782. GetBitContext *gb)
  783. {
  784. int i, k, run, level, diff;
  785. if (get_bits_left(gb) < 16)
  786. return -1;
  787. level = qdm2_get_vlc(gb, &vlc_tab_level, 0, 2);
  788. quantized_coeffs[0] = level;
  789. for (i = 0; i < 7; ) {
  790. if (get_bits_left(gb) < 16)
  791. return -1;
  792. run = qdm2_get_vlc(gb, &vlc_tab_run, 0, 1) + 1;
  793. if (i + run >= 8)
  794. return -1;
  795. if (get_bits_left(gb) < 16)
  796. return -1;
  797. diff = qdm2_get_se_vlc(&vlc_tab_diff, gb, 2);
  798. for (k = 1; k <= run; k++)
  799. quantized_coeffs[i + k] = (level + ((k * diff) / run));
  800. level += diff;
  801. i += run;
  802. }
  803. return 0;
  804. }
  805. /**
  806. * Related to synthesis filter, process data from packet 10
  807. * Init part of quantized_coeffs via function init_quantized_coeffs_elem0
  808. * Init tone_level_idx_hi1, tone_level_idx_hi2, tone_level_idx_mid with
  809. * data from packet 10
  810. *
  811. * @param q context
  812. * @param gb bitreader context
  813. */
  814. static void init_tone_level_dequantization(QDM2Context *q, GetBitContext *gb)
  815. {
  816. int sb, j, k, n, ch;
  817. for (ch = 0; ch < q->nb_channels; ch++) {
  818. init_quantized_coeffs_elem0(q->quantized_coeffs[ch][0], gb);
  819. if (get_bits_left(gb) < 16) {
  820. memset(q->quantized_coeffs[ch][0], 0, 8);
  821. break;
  822. }
  823. }
  824. n = q->sub_sampling + 1;
  825. for (sb = 0; sb < n; sb++)
  826. for (ch = 0; ch < q->nb_channels; ch++)
  827. for (j = 0; j < 8; j++) {
  828. if (get_bits_left(gb) < 1)
  829. break;
  830. if (get_bits1(gb)) {
  831. for (k=0; k < 8; k++) {
  832. if (get_bits_left(gb) < 16)
  833. break;
  834. q->tone_level_idx_hi1[ch][sb][j][k] = qdm2_get_vlc(gb, &vlc_tab_tone_level_idx_hi1, 0, 2);
  835. }
  836. } else {
  837. for (k=0; k < 8; k++)
  838. q->tone_level_idx_hi1[ch][sb][j][k] = 0;
  839. }
  840. }
  841. n = QDM2_SB_USED(q->sub_sampling) - 4;
  842. for (sb = 0; sb < n; sb++)
  843. for (ch = 0; ch < q->nb_channels; ch++) {
  844. if (get_bits_left(gb) < 16)
  845. break;
  846. q->tone_level_idx_hi2[ch][sb] = qdm2_get_vlc(gb, &vlc_tab_tone_level_idx_hi2, 0, 2);
  847. if (sb > 19)
  848. q->tone_level_idx_hi2[ch][sb] -= 16;
  849. else
  850. for (j = 0; j < 8; j++)
  851. q->tone_level_idx_mid[ch][sb][j] = -16;
  852. }
  853. n = QDM2_SB_USED(q->sub_sampling) - 5;
  854. for (sb = 0; sb < n; sb++)
  855. for (ch = 0; ch < q->nb_channels; ch++)
  856. for (j = 0; j < 8; j++) {
  857. if (get_bits_left(gb) < 16)
  858. break;
  859. q->tone_level_idx_mid[ch][sb][j] = qdm2_get_vlc(gb, &vlc_tab_tone_level_idx_mid, 0, 2) - 32;
  860. }
  861. }
  862. /**
  863. * Process subpacket 9, init quantized_coeffs with data from it
  864. *
  865. * @param q context
  866. * @param node pointer to node with packet
  867. */
  868. static int process_subpacket_9(QDM2Context *q, QDM2SubPNode *node)
  869. {
  870. GetBitContext gb;
  871. int i, j, k, n, ch, run, level, diff;
  872. init_get_bits(&gb, node->packet->data, node->packet->size * 8);
  873. n = coeff_per_sb_for_avg[q->coeff_per_sb_select][QDM2_SB_USED(q->sub_sampling) - 1] + 1;
  874. for (i = 1; i < n; i++)
  875. for (ch = 0; ch < q->nb_channels; ch++) {
  876. level = qdm2_get_vlc(&gb, &vlc_tab_level, 0, 2);
  877. q->quantized_coeffs[ch][i][0] = level;
  878. for (j = 0; j < (8 - 1); ) {
  879. run = qdm2_get_vlc(&gb, &vlc_tab_run, 0, 1) + 1;
  880. diff = qdm2_get_se_vlc(&vlc_tab_diff, &gb, 2);
  881. if (j + run >= 8)
  882. return -1;
  883. for (k = 1; k <= run; k++)
  884. q->quantized_coeffs[ch][i][j + k] = (level + ((k * diff) / run));
  885. level += diff;
  886. j += run;
  887. }
  888. }
  889. for (ch = 0; ch < q->nb_channels; ch++)
  890. for (i = 0; i < 8; i++)
  891. q->quantized_coeffs[ch][0][i] = 0;
  892. return 0;
  893. }
  894. /**
  895. * Process subpacket 10 if not null, else
  896. *
  897. * @param q context
  898. * @param node pointer to node with packet
  899. */
  900. static void process_subpacket_10(QDM2Context *q, QDM2SubPNode *node)
  901. {
  902. GetBitContext gb;
  903. if (node) {
  904. init_get_bits(&gb, node->packet->data, node->packet->size * 8);
  905. init_tone_level_dequantization(q, &gb);
  906. fill_tone_level_array(q, 1);
  907. } else {
  908. fill_tone_level_array(q, 0);
  909. }
  910. }
  911. /**
  912. * Process subpacket 11
  913. *
  914. * @param q context
  915. * @param node pointer to node with packet
  916. */
  917. static void process_subpacket_11(QDM2Context *q, QDM2SubPNode *node)
  918. {
  919. GetBitContext gb;
  920. int length = 0;
  921. if (node) {
  922. length = node->packet->size * 8;
  923. init_get_bits(&gb, node->packet->data, length);
  924. }
  925. if (length >= 32) {
  926. int c = get_bits(&gb, 13);
  927. if (c > 3)
  928. fill_coding_method_array(q->tone_level_idx,
  929. q->tone_level_idx_temp, q->coding_method,
  930. q->nb_channels, 8 * c,
  931. q->superblocktype_2_3, q->cm_table_select);
  932. }
  933. synthfilt_build_sb_samples(q, &gb, length, 0, 8);
  934. }
  935. /**
  936. * Process subpacket 12
  937. *
  938. * @param q context
  939. * @param node pointer to node with packet
  940. */
  941. static void process_subpacket_12(QDM2Context *q, QDM2SubPNode *node)
  942. {
  943. GetBitContext gb;
  944. int length = 0;
  945. if (node) {
  946. length = node->packet->size * 8;
  947. init_get_bits(&gb, node->packet->data, length);
  948. }
  949. synthfilt_build_sb_samples(q, &gb, length, 8, QDM2_SB_USED(q->sub_sampling));
  950. }
  951. /**
  952. * Process new subpackets for synthesis filter
  953. *
  954. * @param q context
  955. * @param list list with synthesis filter packets (list D)
  956. */
  957. static void process_synthesis_subpackets(QDM2Context *q, QDM2SubPNode *list)
  958. {
  959. QDM2SubPNode *nodes[4];
  960. nodes[0] = qdm2_search_subpacket_type_in_list(list, 9);
  961. if (nodes[0])
  962. process_subpacket_9(q, nodes[0]);
  963. nodes[1] = qdm2_search_subpacket_type_in_list(list, 10);
  964. if (nodes[1])
  965. process_subpacket_10(q, nodes[1]);
  966. else
  967. process_subpacket_10(q, NULL);
  968. nodes[2] = qdm2_search_subpacket_type_in_list(list, 11);
  969. if (nodes[0] && nodes[1] && nodes[2])
  970. process_subpacket_11(q, nodes[2]);
  971. else
  972. process_subpacket_11(q, NULL);
  973. nodes[3] = qdm2_search_subpacket_type_in_list(list, 12);
  974. if (nodes[0] && nodes[1] && nodes[3])
  975. process_subpacket_12(q, nodes[3]);
  976. else
  977. process_subpacket_12(q, NULL);
  978. }
  979. /**
  980. * Decode superblock, fill packet lists.
  981. *
  982. * @param q context
  983. */
  984. static void qdm2_decode_super_block(QDM2Context *q)
  985. {
  986. GetBitContext gb;
  987. QDM2SubPacket header, *packet;
  988. int i, packet_bytes, sub_packet_size, sub_packets_D;
  989. unsigned int next_index = 0;
  990. memset(q->tone_level_idx_hi1, 0, sizeof(q->tone_level_idx_hi1));
  991. memset(q->tone_level_idx_mid, 0, sizeof(q->tone_level_idx_mid));
  992. memset(q->tone_level_idx_hi2, 0, sizeof(q->tone_level_idx_hi2));
  993. q->sub_packets_B = 0;
  994. sub_packets_D = 0;
  995. average_quantized_coeffs(q); // average elements in quantized_coeffs[max_ch][10][8]
  996. init_get_bits(&gb, q->compressed_data, q->compressed_size * 8);
  997. qdm2_decode_sub_packet_header(&gb, &header);
  998. if (header.type < 2 || header.type >= 8) {
  999. q->has_errors = 1;
  1000. av_log(NULL, AV_LOG_ERROR, "bad superblock type\n");
  1001. return;
  1002. }
  1003. q->superblocktype_2_3 = (header.type == 2 || header.type == 3);
  1004. packet_bytes = (q->compressed_size - get_bits_count(&gb) / 8);
  1005. init_get_bits(&gb, header.data, header.size * 8);
  1006. if (header.type == 2 || header.type == 4 || header.type == 5) {
  1007. int csum = 257 * get_bits(&gb, 8);
  1008. csum += 2 * get_bits(&gb, 8);
  1009. csum = qdm2_packet_checksum(q->compressed_data, q->checksum_size, csum);
  1010. if (csum != 0) {
  1011. q->has_errors = 1;
  1012. av_log(NULL, AV_LOG_ERROR, "bad packet checksum\n");
  1013. return;
  1014. }
  1015. }
  1016. q->sub_packet_list_B[0].packet = NULL;
  1017. q->sub_packet_list_D[0].packet = NULL;
  1018. for (i = 0; i < 6; i++)
  1019. if (--q->fft_level_exp[i] < 0)
  1020. q->fft_level_exp[i] = 0;
  1021. for (i = 0; packet_bytes > 0; i++) {
  1022. int j;
  1023. if (i >= FF_ARRAY_ELEMS(q->sub_packet_list_A)) {
  1024. SAMPLES_NEEDED_2("too many packet bytes");
  1025. return;
  1026. }
  1027. q->sub_packet_list_A[i].next = NULL;
  1028. if (i > 0) {
  1029. q->sub_packet_list_A[i - 1].next = &q->sub_packet_list_A[i];
  1030. /* seek to next block */
  1031. init_get_bits(&gb, header.data, header.size * 8);
  1032. skip_bits(&gb, next_index * 8);
  1033. if (next_index >= header.size)
  1034. break;
  1035. }
  1036. /* decode subpacket */
  1037. packet = &q->sub_packets[i];
  1038. qdm2_decode_sub_packet_header(&gb, packet);
  1039. next_index = packet->size + get_bits_count(&gb) / 8;
  1040. sub_packet_size = ((packet->size > 0xff) ? 1 : 0) + packet->size + 2;
  1041. if (packet->type == 0)
  1042. break;
  1043. if (sub_packet_size > packet_bytes) {
  1044. if (packet->type != 10 && packet->type != 11 && packet->type != 12)
  1045. break;
  1046. packet->size += packet_bytes - sub_packet_size;
  1047. }
  1048. packet_bytes -= sub_packet_size;
  1049. /* add subpacket to 'all subpackets' list */
  1050. q->sub_packet_list_A[i].packet = packet;
  1051. /* add subpacket to related list */
  1052. if (packet->type == 8) {
  1053. SAMPLES_NEEDED_2("packet type 8");
  1054. return;
  1055. } else if (packet->type >= 9 && packet->type <= 12) {
  1056. /* packets for MPEG Audio like Synthesis Filter */
  1057. QDM2_LIST_ADD(q->sub_packet_list_D, sub_packets_D, packet);
  1058. } else if (packet->type == 13) {
  1059. for (j = 0; j < 6; j++)
  1060. q->fft_level_exp[j] = get_bits(&gb, 6);
  1061. } else if (packet->type == 14) {
  1062. for (j = 0; j < 6; j++)
  1063. q->fft_level_exp[j] = qdm2_get_vlc(&gb, &fft_level_exp_vlc, 0, 2);
  1064. } else if (packet->type == 15) {
  1065. SAMPLES_NEEDED_2("packet type 15")
  1066. return;
  1067. } else if (packet->type >= 16 && packet->type < 48 &&
  1068. !fft_subpackets[packet->type - 16]) {
  1069. /* packets for FFT */
  1070. QDM2_LIST_ADD(q->sub_packet_list_B, q->sub_packets_B, packet);
  1071. }
  1072. } // Packet bytes loop
  1073. if (q->sub_packet_list_D[0].packet) {
  1074. process_synthesis_subpackets(q, q->sub_packet_list_D);
  1075. q->do_synth_filter = 1;
  1076. } else if (q->do_synth_filter) {
  1077. process_subpacket_10(q, NULL);
  1078. process_subpacket_11(q, NULL);
  1079. process_subpacket_12(q, NULL);
  1080. }
  1081. }
  1082. static void qdm2_fft_init_coefficient(QDM2Context *q, int sub_packet,
  1083. int offset, int duration, int channel,
  1084. int exp, int phase)
  1085. {
  1086. if (q->fft_coefs_min_index[duration] < 0)
  1087. q->fft_coefs_min_index[duration] = q->fft_coefs_index;
  1088. q->fft_coefs[q->fft_coefs_index].sub_packet =
  1089. ((sub_packet >= 16) ? (sub_packet - 16) : sub_packet);
  1090. q->fft_coefs[q->fft_coefs_index].channel = channel;
  1091. q->fft_coefs[q->fft_coefs_index].offset = offset;
  1092. q->fft_coefs[q->fft_coefs_index].exp = exp;
  1093. q->fft_coefs[q->fft_coefs_index].phase = phase;
  1094. q->fft_coefs_index++;
  1095. }
  1096. static void qdm2_fft_decode_tones(QDM2Context *q, int duration,
  1097. GetBitContext *gb, int b)
  1098. {
  1099. int channel, stereo, phase, exp;
  1100. int local_int_4, local_int_8, stereo_phase, local_int_10;
  1101. int local_int_14, stereo_exp, local_int_20, local_int_28;
  1102. int n, offset;
  1103. local_int_4 = 0;
  1104. local_int_28 = 0;
  1105. local_int_20 = 2;
  1106. local_int_8 = (4 - duration);
  1107. local_int_10 = 1 << (q->group_order - duration - 1);
  1108. offset = 1;
  1109. while (get_bits_left(gb)>0) {
  1110. if (q->superblocktype_2_3) {
  1111. while ((n = qdm2_get_vlc(gb, &vlc_tab_fft_tone_offset[local_int_8], 1, 2)) < 2) {
  1112. if (get_bits_left(gb)<0) {
  1113. if(local_int_4 < q->group_size)
  1114. av_log(NULL, AV_LOG_ERROR, "overread in qdm2_fft_decode_tones()\n");
  1115. return;
  1116. }
  1117. offset = 1;
  1118. if (n == 0) {
  1119. local_int_4 += local_int_10;
  1120. local_int_28 += (1 << local_int_8);
  1121. } else {
  1122. local_int_4 += 8 * local_int_10;
  1123. local_int_28 += (8 << local_int_8);
  1124. }
  1125. }
  1126. offset += (n - 2);
  1127. } else {
  1128. if (local_int_10 <= 2) {
  1129. av_log(NULL, AV_LOG_ERROR, "qdm2_fft_decode_tones() stuck\n");
  1130. return;
  1131. }
  1132. offset += qdm2_get_vlc(gb, &vlc_tab_fft_tone_offset[local_int_8], 1, 2);
  1133. while (offset >= (local_int_10 - 1)) {
  1134. offset += (1 - (local_int_10 - 1));
  1135. local_int_4 += local_int_10;
  1136. local_int_28 += (1 << local_int_8);
  1137. }
  1138. }
  1139. if (local_int_4 >= q->group_size)
  1140. return;
  1141. local_int_14 = (offset >> local_int_8);
  1142. if (local_int_14 >= FF_ARRAY_ELEMS(fft_level_index_table))
  1143. return;
  1144. if (q->nb_channels > 1) {
  1145. channel = get_bits1(gb);
  1146. stereo = get_bits1(gb);
  1147. } else {
  1148. channel = 0;
  1149. stereo = 0;
  1150. }
  1151. exp = qdm2_get_vlc(gb, (b ? &fft_level_exp_vlc : &fft_level_exp_alt_vlc), 0, 2);
  1152. exp += q->fft_level_exp[fft_level_index_table[local_int_14]];
  1153. exp = (exp < 0) ? 0 : exp;
  1154. phase = get_bits(gb, 3);
  1155. stereo_exp = 0;
  1156. stereo_phase = 0;
  1157. if (stereo) {
  1158. stereo_exp = (exp - qdm2_get_vlc(gb, &fft_stereo_exp_vlc, 0, 1));
  1159. stereo_phase = (phase - qdm2_get_vlc(gb, &fft_stereo_phase_vlc, 0, 1));
  1160. if (stereo_phase < 0)
  1161. stereo_phase += 8;
  1162. }
  1163. if (q->frequency_range > (local_int_14 + 1)) {
  1164. int sub_packet = (local_int_20 + local_int_28);
  1165. qdm2_fft_init_coefficient(q, sub_packet, offset, duration,
  1166. channel, exp, phase);
  1167. if (stereo)
  1168. qdm2_fft_init_coefficient(q, sub_packet, offset, duration,
  1169. 1 - channel,
  1170. stereo_exp, stereo_phase);
  1171. }
  1172. offset++;
  1173. }
  1174. }
  1175. static void qdm2_decode_fft_packets(QDM2Context *q)
  1176. {
  1177. int i, j, min, max, value, type, unknown_flag;
  1178. GetBitContext gb;
  1179. if (!q->sub_packet_list_B[0].packet)
  1180. return;
  1181. /* reset minimum indexes for FFT coefficients */
  1182. q->fft_coefs_index = 0;
  1183. for (i = 0; i < 5; i++)
  1184. q->fft_coefs_min_index[i] = -1;
  1185. /* process subpackets ordered by type, largest type first */
  1186. for (i = 0, max = 256; i < q->sub_packets_B; i++) {
  1187. QDM2SubPacket *packet = NULL;
  1188. /* find subpacket with largest type less than max */
  1189. for (j = 0, min = 0; j < q->sub_packets_B; j++) {
  1190. value = q->sub_packet_list_B[j].packet->type;
  1191. if (value > min && value < max) {
  1192. min = value;
  1193. packet = q->sub_packet_list_B[j].packet;
  1194. }
  1195. }
  1196. max = min;
  1197. /* check for errors (?) */
  1198. if (!packet)
  1199. return;
  1200. if (i == 0 &&
  1201. (packet->type < 16 || packet->type >= 48 ||
  1202. fft_subpackets[packet->type - 16]))
  1203. return;
  1204. /* decode FFT tones */
  1205. init_get_bits(&gb, packet->data, packet->size * 8);
  1206. if (packet->type >= 32 && packet->type < 48 && !fft_subpackets[packet->type - 16])
  1207. unknown_flag = 1;
  1208. else
  1209. unknown_flag = 0;
  1210. type = packet->type;
  1211. if ((type >= 17 && type < 24) || (type >= 33 && type < 40)) {
  1212. int duration = q->sub_sampling + 5 - (type & 15);
  1213. if (duration >= 0 && duration < 4)
  1214. qdm2_fft_decode_tones(q, duration, &gb, unknown_flag);
  1215. } else if (type == 31) {
  1216. for (j = 0; j < 4; j++)
  1217. qdm2_fft_decode_tones(q, j, &gb, unknown_flag);
  1218. } else if (type == 46) {
  1219. for (j = 0; j < 6; j++)
  1220. q->fft_level_exp[j] = get_bits(&gb, 6);
  1221. for (j = 0; j < 4; j++)
  1222. qdm2_fft_decode_tones(q, j, &gb, unknown_flag);
  1223. }
  1224. } // Loop on B packets
  1225. /* calculate maximum indexes for FFT coefficients */
  1226. for (i = 0, j = -1; i < 5; i++)
  1227. if (q->fft_coefs_min_index[i] >= 0) {
  1228. if (j >= 0)
  1229. q->fft_coefs_max_index[j] = q->fft_coefs_min_index[i];
  1230. j = i;
  1231. }
  1232. if (j >= 0)
  1233. q->fft_coefs_max_index[j] = q->fft_coefs_index;
  1234. }
  1235. static void qdm2_fft_generate_tone(QDM2Context *q, FFTTone *tone)
  1236. {
  1237. float level, f[6];
  1238. int i;
  1239. QDM2Complex c;
  1240. const double iscale = 2.0 * M_PI / 512.0;
  1241. tone->phase += tone->phase_shift;
  1242. /* calculate current level (maximum amplitude) of tone */
  1243. level = fft_tone_envelope_table[tone->duration][tone->time_index] * tone->level;
  1244. c.im = level * sin(tone->phase * iscale);
  1245. c.re = level * cos(tone->phase * iscale);
  1246. /* generate FFT coefficients for tone */
  1247. if (tone->duration >= 3 || tone->cutoff >= 3) {
  1248. tone->complex[0].im += c.im;
  1249. tone->complex[0].re += c.re;
  1250. tone->complex[1].im -= c.im;
  1251. tone->complex[1].re -= c.re;
  1252. } else {
  1253. f[1] = -tone->table[4];
  1254. f[0] = tone->table[3] - tone->table[0];
  1255. f[2] = 1.0 - tone->table[2] - tone->table[3];
  1256. f[3] = tone->table[1] + tone->table[4] - 1.0;
  1257. f[4] = tone->table[0] - tone->table[1];
  1258. f[5] = tone->table[2];
  1259. for (i = 0; i < 2; i++) {
  1260. tone->complex[fft_cutoff_index_table[tone->cutoff][i]].re +=
  1261. c.re * f[i];
  1262. tone->complex[fft_cutoff_index_table[tone->cutoff][i]].im +=
  1263. c.im * ((tone->cutoff <= i) ? -f[i] : f[i]);
  1264. }
  1265. for (i = 0; i < 4; i++) {
  1266. tone->complex[i].re += c.re * f[i + 2];
  1267. tone->complex[i].im += c.im * f[i + 2];
  1268. }
  1269. }
  1270. /* copy the tone if it has not yet died out */
  1271. if (++tone->time_index < ((1 << (5 - tone->duration)) - 1)) {
  1272. memcpy(&q->fft_tones[q->fft_tone_end], tone, sizeof(FFTTone));
  1273. q->fft_tone_end = (q->fft_tone_end + 1) % 1000;
  1274. }
  1275. }
  1276. static void qdm2_fft_tone_synthesizer(QDM2Context *q, int sub_packet)
  1277. {
  1278. int i, j, ch;
  1279. const double iscale = 0.25 * M_PI;
  1280. for (ch = 0; ch < q->channels; ch++) {
  1281. memset(q->fft.complex[ch], 0, q->fft_size * sizeof(QDM2Complex));
  1282. }
  1283. /* apply FFT tones with duration 4 (1 FFT period) */
  1284. if (q->fft_coefs_min_index[4] >= 0)
  1285. for (i = q->fft_coefs_min_index[4]; i < q->fft_coefs_max_index[4]; i++) {
  1286. float level;
  1287. QDM2Complex c;
  1288. if (q->fft_coefs[i].sub_packet != sub_packet)
  1289. break;
  1290. ch = (q->channels == 1) ? 0 : q->fft_coefs[i].channel;
  1291. level = (q->fft_coefs[i].exp < 0) ? 0.0 : fft_tone_level_table[q->superblocktype_2_3 ? 0 : 1][q->fft_coefs[i].exp & 63];
  1292. c.re = level * cos(q->fft_coefs[i].phase * iscale);
  1293. c.im = level * sin(q->fft_coefs[i].phase * iscale);
  1294. q->fft.complex[ch][q->fft_coefs[i].offset + 0].re += c.re;
  1295. q->fft.complex[ch][q->fft_coefs[i].offset + 0].im += c.im;
  1296. q->fft.complex[ch][q->fft_coefs[i].offset + 1].re -= c.re;
  1297. q->fft.complex[ch][q->fft_coefs[i].offset + 1].im -= c.im;
  1298. }
  1299. /* generate existing FFT tones */
  1300. for (i = q->fft_tone_end; i != q->fft_tone_start; ) {
  1301. qdm2_fft_generate_tone(q, &q->fft_tones[q->fft_tone_start]);
  1302. q->fft_tone_start = (q->fft_tone_start + 1) % 1000;
  1303. }
  1304. /* create and generate new FFT tones with duration 0 (long) to 3 (short) */
  1305. for (i = 0; i < 4; i++)
  1306. if (q->fft_coefs_min_index[i] >= 0) {
  1307. for (j = q->fft_coefs_min_index[i]; j < q->fft_coefs_max_index[i]; j++) {
  1308. int offset, four_i;
  1309. FFTTone tone;
  1310. if (q->fft_coefs[j].sub_packet != sub_packet)
  1311. break;
  1312. four_i = (4 - i);
  1313. offset = q->fft_coefs[j].offset >> four_i;
  1314. ch = (q->channels == 1) ? 0 : q->fft_coefs[j].channel;
  1315. if (offset < q->frequency_range) {
  1316. if (offset < 2)
  1317. tone.cutoff = offset;
  1318. else
  1319. tone.cutoff = (offset >= 60) ? 3 : 2;
  1320. tone.level = (q->fft_coefs[j].exp < 0) ? 0.0 : fft_tone_level_table[q->superblocktype_2_3 ? 0 : 1][q->fft_coefs[j].exp & 63];
  1321. tone.complex = &q->fft.complex[ch][offset];
  1322. tone.table = fft_tone_sample_table[i][q->fft_coefs[j].offset - (offset << four_i)];
  1323. tone.phase = 64 * q->fft_coefs[j].phase - (offset << 8) - 128;
  1324. tone.phase_shift = (2 * q->fft_coefs[j].offset + 1) << (7 - four_i);
  1325. tone.duration = i;
  1326. tone.time_index = 0;
  1327. qdm2_fft_generate_tone(q, &tone);
  1328. }
  1329. }
  1330. q->fft_coefs_min_index[i] = j;
  1331. }
  1332. }
  1333. static void qdm2_calculate_fft(QDM2Context *q, int channel, int sub_packet)
  1334. {
  1335. const float gain = (q->channels == 1 && q->nb_channels == 2) ? 0.5f : 1.0f;
  1336. float *out = q->output_buffer + channel;
  1337. int i;
  1338. q->fft.complex[channel][0].re *= 2.0f;
  1339. q->fft.complex[channel][0].im = 0.0f;
  1340. q->rdft_ctx.rdft_calc(&q->rdft_ctx, (FFTSample *)q->fft.complex[channel]);
  1341. /* add samples to output buffer */
  1342. for (i = 0; i < FFALIGN(q->fft_size, 8); i++) {
  1343. out[0] += q->fft.complex[channel][i].re * gain;
  1344. out[q->channels] += q->fft.complex[channel][i].im * gain;
  1345. out += 2 * q->channels;
  1346. }
  1347. }
  1348. /**
  1349. * @param q context
  1350. * @param index subpacket number
  1351. */
  1352. static void qdm2_synthesis_filter(QDM2Context *q, int index)
  1353. {
  1354. int i, k, ch, sb_used, sub_sampling, dither_state = 0;
  1355. /* copy sb_samples */
  1356. sb_used = QDM2_SB_USED(q->sub_sampling);
  1357. for (ch = 0; ch < q->channels; ch++)
  1358. for (i = 0; i < 8; i++)
  1359. for (k = sb_used; k < SBLIMIT; k++)
  1360. q->sb_samples[ch][(8 * index) + i][k] = 0;
  1361. for (ch = 0; ch < q->nb_channels; ch++) {
  1362. float *samples_ptr = q->samples + ch;
  1363. for (i = 0; i < 8; i++) {
  1364. ff_mpa_synth_filter_float(&q->mpadsp,
  1365. q->synth_buf[ch], &(q->synth_buf_offset[ch]),
  1366. ff_mpa_synth_window_float, &dither_state,
  1367. samples_ptr, q->nb_channels,
  1368. q->sb_samples[ch][(8 * index) + i]);
  1369. samples_ptr += 32 * q->nb_channels;
  1370. }
  1371. }
  1372. /* add samples to output buffer */
  1373. sub_sampling = (4 >> q->sub_sampling);
  1374. for (ch = 0; ch < q->channels; ch++)
  1375. for (i = 0; i < q->frame_size; i++)
  1376. q->output_buffer[q->channels * i + ch] += (1 << 23) * q->samples[q->nb_channels * sub_sampling * i + ch];
  1377. }
  1378. /**
  1379. * Init static data (does not depend on specific file)
  1380. *
  1381. * @param q context
  1382. */
  1383. static av_cold void qdm2_init_static_data(void) {
  1384. static int done;
  1385. if(done)
  1386. return;
  1387. qdm2_init_vlc();
  1388. ff_mpa_synth_init_float(ff_mpa_synth_window_float);
  1389. softclip_table_init();
  1390. rnd_table_init();
  1391. init_noise_samples();
  1392. done = 1;
  1393. }
  1394. /**
  1395. * Init parameters from codec extradata
  1396. */
  1397. static av_cold int qdm2_decode_init(AVCodecContext *avctx)
  1398. {
  1399. QDM2Context *s = avctx->priv_data;
  1400. int tmp_val, tmp, size;
  1401. GetByteContext gb;
  1402. qdm2_init_static_data();
  1403. /* extradata parsing
  1404. Structure:
  1405. wave {
  1406. frma (QDM2)
  1407. QDCA
  1408. QDCP
  1409. }
  1410. 32 size (including this field)
  1411. 32 tag (=frma)
  1412. 32 type (=QDM2 or QDMC)
  1413. 32 size (including this field, in bytes)
  1414. 32 tag (=QDCA) // maybe mandatory parameters
  1415. 32 unknown (=1)
  1416. 32 channels (=2)
  1417. 32 samplerate (=44100)
  1418. 32 bitrate (=96000)
  1419. 32 block size (=4096)
  1420. 32 frame size (=256) (for one channel)
  1421. 32 packet size (=1300)
  1422. 32 size (including this field, in bytes)
  1423. 32 tag (=QDCP) // maybe some tuneable parameters
  1424. 32 float1 (=1.0)
  1425. 32 zero ?
  1426. 32 float2 (=1.0)
  1427. 32 float3 (=1.0)
  1428. 32 unknown (27)
  1429. 32 unknown (8)
  1430. 32 zero ?
  1431. */
  1432. if (!avctx->extradata || (avctx->extradata_size < 48)) {
  1433. av_log(avctx, AV_LOG_ERROR, "extradata missing or truncated\n");
  1434. return AVERROR_INVALIDDATA;
  1435. }
  1436. bytestream2_init(&gb, avctx->extradata, avctx->extradata_size);
  1437. while (bytestream2_get_bytes_left(&gb) > 8) {
  1438. if (bytestream2_peek_be64(&gb) == (((uint64_t)MKBETAG('f','r','m','a') << 32) |
  1439. (uint64_t)MKBETAG('Q','D','M','2')))
  1440. break;
  1441. bytestream2_skip(&gb, 1);
  1442. }
  1443. if (bytestream2_get_bytes_left(&gb) < 12) {
  1444. av_log(avctx, AV_LOG_ERROR, "not enough extradata (%i)\n",
  1445. bytestream2_get_bytes_left(&gb));
  1446. return AVERROR_INVALIDDATA;
  1447. }
  1448. bytestream2_skip(&gb, 8);
  1449. size = bytestream2_get_be32(&gb);
  1450. if (size > bytestream2_get_bytes_left(&gb)) {
  1451. av_log(avctx, AV_LOG_ERROR, "extradata size too small, %i < %i\n",
  1452. bytestream2_get_bytes_left(&gb), size);
  1453. return AVERROR_INVALIDDATA;
  1454. }
  1455. av_log(avctx, AV_LOG_DEBUG, "size: %d\n", size);
  1456. if (bytestream2_get_be32(&gb) != MKBETAG('Q','D','C','A')) {
  1457. av_log(avctx, AV_LOG_ERROR, "invalid extradata, expecting QDCA\n");
  1458. return AVERROR_INVALIDDATA;
  1459. }
  1460. bytestream2_skip(&gb, 4);
  1461. avctx->channels = s->nb_channels = s->channels = bytestream2_get_be32(&gb);
  1462. if (s->channels <= 0 || s->channels > MPA_MAX_CHANNELS) {
  1463. av_log(avctx, AV_LOG_ERROR, "Invalid number of channels\n");
  1464. return AVERROR_INVALIDDATA;
  1465. }
  1466. avctx->channel_layout = avctx->channels == 2 ? AV_CH_LAYOUT_STEREO :
  1467. AV_CH_LAYOUT_MONO;
  1468. avctx->sample_rate = bytestream2_get_be32(&gb);
  1469. avctx->bit_rate = bytestream2_get_be32(&gb);
  1470. s->group_size = bytestream2_get_be32(&gb);
  1471. s->fft_size = bytestream2_get_be32(&gb);
  1472. s->checksum_size = bytestream2_get_be32(&gb);
  1473. if (s->checksum_size >= 1U << 28 || !s->checksum_size) {
  1474. av_log(avctx, AV_LOG_ERROR, "data block size invalid (%u)\n", s->checksum_size);
  1475. return AVERROR_INVALIDDATA;
  1476. }
  1477. s->fft_order = av_log2(s->fft_size) + 1;
  1478. // Fail on unknown fft order
  1479. if ((s->fft_order < 7) || (s->fft_order > 9)) {
  1480. avpriv_request_sample(avctx, "Unknown FFT order %d", s->fft_order);
  1481. return AVERROR_PATCHWELCOME;
  1482. }
  1483. // something like max decodable tones
  1484. s->group_order = av_log2(s->group_size) + 1;
  1485. s->frame_size = s->group_size / 16; // 16 iterations per super block
  1486. if (s->frame_size > QDM2_MAX_FRAME_SIZE)
  1487. return AVERROR_INVALIDDATA;
  1488. s->sub_sampling = s->fft_order - 7;
  1489. s->frequency_range = 255 / (1 << (2 - s->sub_sampling));
  1490. if (s->frame_size * 4 >> s->sub_sampling > MPA_FRAME_SIZE) {
  1491. avpriv_request_sample(avctx, "large frames");
  1492. return AVERROR_PATCHWELCOME;
  1493. }
  1494. switch ((s->sub_sampling * 2 + s->channels - 1)) {
  1495. case 0: tmp = 40; break;
  1496. case 1: tmp = 48; break;
  1497. case 2: tmp = 56; break;
  1498. case 3: tmp = 72; break;
  1499. case 4: tmp = 80; break;
  1500. case 5: tmp = 100;break;
  1501. default: tmp=s->sub_sampling; break;
  1502. }
  1503. tmp_val = 0;
  1504. if ((tmp * 1000) < avctx->bit_rate) tmp_val = 1;
  1505. if ((tmp * 1440) < avctx->bit_rate) tmp_val = 2;
  1506. if ((tmp * 1760) < avctx->bit_rate) tmp_val = 3;
  1507. if ((tmp * 2240) < avctx->bit_rate) tmp_val = 4;
  1508. s->cm_table_select = tmp_val;
  1509. if (avctx->bit_rate <= 8000)
  1510. s->coeff_per_sb_select = 0;
  1511. else if (avctx->bit_rate < 16000)
  1512. s->coeff_per_sb_select = 1;
  1513. else
  1514. s->coeff_per_sb_select = 2;
  1515. if (s->fft_size != (1 << (s->fft_order - 1))) {
  1516. av_log(avctx, AV_LOG_ERROR, "FFT size %d not power of 2.\n", s->fft_size);
  1517. return AVERROR_INVALIDDATA;
  1518. }
  1519. ff_rdft_init(&s->rdft_ctx, s->fft_order, IDFT_C2R);
  1520. ff_mpadsp_init(&s->mpadsp);
  1521. avctx->sample_fmt = AV_SAMPLE_FMT_S16;
  1522. return 0;
  1523. }
  1524. static av_cold int qdm2_decode_close(AVCodecContext *avctx)
  1525. {
  1526. QDM2Context *s = avctx->priv_data;
  1527. ff_rdft_end(&s->rdft_ctx);
  1528. return 0;
  1529. }
  1530. static int qdm2_decode(QDM2Context *q, const uint8_t *in, int16_t *out)
  1531. {
  1532. int ch, i;
  1533. const int frame_size = (q->frame_size * q->channels);
  1534. if((unsigned)frame_size > FF_ARRAY_ELEMS(q->output_buffer)/2)
  1535. return -1;
  1536. /* select input buffer */
  1537. q->compressed_data = in;
  1538. q->compressed_size = q->checksum_size;
  1539. /* copy old block, clear new block of output samples */
  1540. memmove(q->output_buffer, &q->output_buffer[frame_size], frame_size * sizeof(float));
  1541. memset(&q->output_buffer[frame_size], 0, frame_size * sizeof(float));
  1542. /* decode block of QDM2 compressed data */
  1543. if (q->sub_packet == 0) {
  1544. q->has_errors = 0; // zero it for a new super block
  1545. av_log(NULL,AV_LOG_DEBUG,"Superblock follows\n");
  1546. qdm2_decode_super_block(q);
  1547. }
  1548. /* parse subpackets */
  1549. if (!q->has_errors) {
  1550. if (q->sub_packet == 2)
  1551. qdm2_decode_fft_packets(q);
  1552. qdm2_fft_tone_synthesizer(q, q->sub_packet);
  1553. }
  1554. /* sound synthesis stage 1 (FFT) */
  1555. for (ch = 0; ch < q->channels; ch++) {
  1556. qdm2_calculate_fft(q, ch, q->sub_packet);
  1557. if (!q->has_errors && q->sub_packet_list_C[0].packet) {
  1558. SAMPLES_NEEDED_2("has errors, and C list is not empty")
  1559. return -1;
  1560. }
  1561. }
  1562. /* sound synthesis stage 2 (MPEG audio like synthesis filter) */
  1563. if (!q->has_errors && q->do_synth_filter)
  1564. qdm2_synthesis_filter(q, q->sub_packet);
  1565. q->sub_packet = (q->sub_packet + 1) % 16;
  1566. /* clip and convert output float[] to 16-bit signed samples */
  1567. for (i = 0; i < frame_size; i++) {
  1568. int value = (int)q->output_buffer[i];
  1569. if (value > SOFTCLIP_THRESHOLD)
  1570. value = (value > HARDCLIP_THRESHOLD) ? 32767 : softclip_table[ value - SOFTCLIP_THRESHOLD];
  1571. else if (value < -SOFTCLIP_THRESHOLD)
  1572. value = (value < -HARDCLIP_THRESHOLD) ? -32767 : -softclip_table[-value - SOFTCLIP_THRESHOLD];
  1573. out[i] = value;
  1574. }
  1575. return 0;
  1576. }
  1577. static int qdm2_decode_frame(AVCodecContext *avctx, void *data,
  1578. int *got_frame_ptr, AVPacket *avpkt)
  1579. {
  1580. AVFrame *frame = data;
  1581. const uint8_t *buf = avpkt->data;
  1582. int buf_size = avpkt->size;
  1583. QDM2Context *s = avctx->priv_data;
  1584. int16_t *out;
  1585. int i, ret;
  1586. if(!buf)
  1587. return 0;
  1588. if(buf_size < s->checksum_size)
  1589. return -1;
  1590. /* get output buffer */
  1591. frame->nb_samples = 16 * s->frame_size;
  1592. if ((ret = ff_get_buffer(avctx, frame, 0)) < 0)
  1593. return ret;
  1594. out = (int16_t *)frame->data[0];
  1595. for (i = 0; i < 16; i++) {
  1596. if ((ret = qdm2_decode(s, buf, out)) < 0)
  1597. return ret;
  1598. out += s->channels * s->frame_size;
  1599. }
  1600. *got_frame_ptr = 1;
  1601. return s->checksum_size;
  1602. }
  1603. AVCodec ff_qdm2_decoder = {
  1604. .name = "qdm2",
  1605. .long_name = NULL_IF_CONFIG_SMALL("QDesign Music Codec 2"),
  1606. .type = AVMEDIA_TYPE_AUDIO,
  1607. .id = AV_CODEC_ID_QDM2,
  1608. .priv_data_size = sizeof(QDM2Context),
  1609. .init = qdm2_decode_init,
  1610. .close = qdm2_decode_close,
  1611. .decode = qdm2_decode_frame,
  1612. .capabilities = AV_CODEC_CAP_DR1,
  1613. };