FFmpeg  4.4.4
adpcm.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2001-2003 The FFmpeg project
3  *
4  * first version by Francois Revol (revol@free.fr)
5  * fringe ADPCM codecs (e.g., DK3, DK4, Westwood)
6  * by Mike Melanson (melanson@pcisys.net)
7  * CD-ROM XA ADPCM codec by BERO
8  * EA ADPCM decoder by Robin Kay (komadori@myrealbox.com)
9  * EA ADPCM R1/R2/R3 decoder by Peter Ross (pross@xvid.org)
10  * EA IMA EACS decoder by Peter Ross (pross@xvid.org)
11  * EA IMA SEAD decoder by Peter Ross (pross@xvid.org)
12  * EA ADPCM XAS decoder by Peter Ross (pross@xvid.org)
13  * MAXIS EA ADPCM decoder by Robert Marston (rmarston@gmail.com)
14  * THP ADPCM decoder by Marco Gerards (mgerards@xs4all.nl)
15  * Argonaut Games ADPCM decoder by Zane van Iperen (zane@zanevaniperen.com)
16  * Simon & Schuster Interactive ADPCM decoder by Zane van Iperen (zane@zanevaniperen.com)
17  * Ubisoft ADPCM decoder by Zane van Iperen (zane@zanevaniperen.com)
18  * High Voltage Software ALP decoder by Zane van Iperen (zane@zanevaniperen.com)
19  * Cunning Developments decoder by Zane van Iperen (zane@zanevaniperen.com)
20  *
21  * This file is part of FFmpeg.
22  *
23  * FFmpeg is free software; you can redistribute it and/or
24  * modify it under the terms of the GNU Lesser General Public
25  * License as published by the Free Software Foundation; either
26  * version 2.1 of the License, or (at your option) any later version.
27  *
28  * FFmpeg is distributed in the hope that it will be useful,
29  * but WITHOUT ANY WARRANTY; without even the implied warranty of
30  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
31  * Lesser General Public License for more details.
32  *
33  * You should have received a copy of the GNU Lesser General Public
34  * License along with FFmpeg; if not, write to the Free Software
35  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
36  */
37 #include "avcodec.h"
38 #include "get_bits.h"
39 #include "bytestream.h"
40 #include "adpcm.h"
41 #include "adpcm_data.h"
42 #include "internal.h"
43 
44 /**
45  * @file
46  * ADPCM decoders
47  * Features and limitations:
48  *
49  * Reference documents:
50  * http://wiki.multimedia.cx/index.php?title=Category:ADPCM_Audio_Codecs
51  * http://www.pcisys.net/~melanson/codecs/simpleaudio.html [dead]
52  * http://www.geocities.com/SiliconValley/8682/aud3.txt [dead]
53  * http://openquicktime.sourceforge.net/
54  * XAnim sources (xa_codec.c) http://xanim.polter.net/
55  * http://www.cs.ucla.edu/~leec/mediabench/applications.html [dead]
56  * SoX source code http://sox.sourceforge.net/
57  *
58  * CD-ROM XA:
59  * http://ku-www.ss.titech.ac.jp/~yatsushi/xaadpcm.html [dead]
60  * vagpack & depack http://homepages.compuserve.de/bITmASTER32/psx-index.html [dead]
61  * readstr http://www.geocities.co.jp/Playtown/2004/
62  */
63 
64 /* These are for CD-ROM XA ADPCM */
65 static const int8_t xa_adpcm_table[5][2] = {
66  { 0, 0 },
67  { 60, 0 },
68  { 115, -52 },
69  { 98, -55 },
70  { 122, -60 }
71 };
72 
73 static const int16_t ea_adpcm_table[] = {
74  0, 240, 460, 392,
75  0, 0, -208, -220,
76  0, 1, 3, 4,
77  7, 8, 10, 11,
78  0, -1, -3, -4
79 };
80 
81 // padded to zero where table size is less then 16
82 static const int8_t swf_index_tables[4][16] = {
83  /*2*/ { -1, 2 },
84  /*3*/ { -1, -1, 2, 4 },
85  /*4*/ { -1, -1, -1, -1, 2, 4, 6, 8 },
86  /*5*/ { -1, -1, -1, -1, -1, -1, -1, -1, 1, 2, 4, 6, 8, 10, 13, 16 }
87 };
88 
89 static const int8_t zork_index_table[8] = {
90  -1, -1, -1, 1, 4, 7, 10, 12,
91 };
92 
93 static const int8_t mtf_index_table[16] = {
94  8, 6, 4, 2, -1, -1, -1, -1,
95  -1, -1, -1, -1, 2, 4, 6, 8,
96 };
97 
98 /* end of tables */
99 
100 typedef struct ADPCMDecodeContext {
102  int vqa_version; /**< VQA version. Used for ADPCM_IMA_WS */
103  int has_status; /**< Status flag. Reset to 0 after a flush. */
105 
107 {
108  ADPCMDecodeContext *c = avctx->priv_data;
109  unsigned int min_channels = 1;
110  unsigned int max_channels = 2;
111 
112  switch(avctx->codec->id) {
114  max_channels = 1;
115  break;
118  min_channels = 2;
119  break;
126  max_channels = 6;
127  break;
129  min_channels = 2;
130  max_channels = 8;
131  if (avctx->channels & 1) {
132  avpriv_request_sample(avctx, "channel count %d", avctx->channels);
133  return AVERROR_PATCHWELCOME;
134  }
135  break;
137  max_channels = 8;
138  if (avctx->channels <= 0 || avctx->block_align % (16 * avctx->channels))
139  return AVERROR_INVALIDDATA;
140  break;
144  max_channels = 14;
145  break;
146  }
147  if (avctx->channels < min_channels || avctx->channels > max_channels) {
148  av_log(avctx, AV_LOG_ERROR, "Invalid number of channels\n");
149  return AVERROR(EINVAL);
150  }
151 
152  switch(avctx->codec->id) {
154  c->status[0].step = c->status[1].step = 511;
155  break;
157  if (avctx->bits_per_coded_sample < 2 || avctx->bits_per_coded_sample > 5)
158  return AVERROR_INVALIDDATA;
159  break;
161  if (avctx->extradata && avctx->extradata_size >= 8) {
162  c->status[0].predictor = av_clip_intp2(AV_RL32(avctx->extradata ), 18);
163  c->status[1].predictor = av_clip_intp2(AV_RL32(avctx->extradata + 4), 18);
164  }
165  break;
167  if (avctx->extradata) {
168  if (avctx->extradata_size >= 28) {
169  c->status[0].predictor = av_clip_intp2(AV_RL32(avctx->extradata + 16), 18);
170  c->status[0].step_index = av_clip(AV_RL32(avctx->extradata + 20), 0, 88);
171  c->status[1].predictor = av_clip_intp2(AV_RL32(avctx->extradata + 4), 18);
172  c->status[1].step_index = av_clip(AV_RL32(avctx->extradata + 8), 0, 88);
173  } else if (avctx->extradata_size >= 16) {
174  c->status[0].predictor = av_clip_intp2(AV_RL32(avctx->extradata + 0), 18);
175  c->status[0].step_index = av_clip(AV_RL32(avctx->extradata + 4), 0, 88);
176  c->status[1].predictor = av_clip_intp2(AV_RL32(avctx->extradata + 8), 18);
177  c->status[1].step_index = av_clip(AV_RL32(avctx->extradata + 12), 0, 88);
178  }
179  }
180  break;
182  if (avctx->extradata && avctx->extradata_size >= 2)
183  c->vqa_version = AV_RL16(avctx->extradata);
184  break;
186  if (avctx->bits_per_coded_sample != 4 || avctx->block_align != 17 * avctx->channels)
187  return AVERROR_INVALIDDATA;
188  break;
190  if (avctx->bits_per_coded_sample != 8)
191  return AVERROR_INVALIDDATA;
192  break;
193  default:
194  break;
195  }
196 
197  switch (avctx->codec->id) {
218  break;
220  avctx->sample_fmt = c->vqa_version == 3 ? AV_SAMPLE_FMT_S16P :
222  break;
224  avctx->sample_fmt = avctx->channels > 2 ? AV_SAMPLE_FMT_S16P :
226  break;
227  default:
228  avctx->sample_fmt = AV_SAMPLE_FMT_S16;
229  }
230 
231  return 0;
232 }
233 
234 static inline int16_t adpcm_agm_expand_nibble(ADPCMChannelStatus *c, int8_t nibble)
235 {
236  int delta, pred, step, add;
237 
238  pred = c->predictor;
239  delta = nibble & 7;
240  step = c->step;
241  add = (delta * 2 + 1) * step;
242  if (add < 0)
243  add = add + 7;
244 
245  if ((nibble & 8) == 0)
246  pred = av_clip(pred + (add >> 3), -32767, 32767);
247  else
248  pred = av_clip(pred - (add >> 3), -32767, 32767);
249 
250  switch (delta) {
251  case 7:
252  step *= 0x99;
253  break;
254  case 6:
255  c->step = av_clip(c->step * 2, 127, 24576);
256  c->predictor = pred;
257  return pred;
258  case 5:
259  step *= 0x66;
260  break;
261  case 4:
262  step *= 0x4d;
263  break;
264  default:
265  step *= 0x39;
266  break;
267  }
268 
269  if (step < 0)
270  step += 0x3f;
271 
272  c->step = step >> 6;
273  c->step = av_clip(c->step, 127, 24576);
274  c->predictor = pred;
275  return pred;
276 }
277 
278 static inline int16_t adpcm_ima_expand_nibble(ADPCMChannelStatus *c, int8_t nibble, int shift)
279 {
280  int step_index;
281  int predictor;
282  int sign, delta, diff, step;
283 
284  step = ff_adpcm_step_table[c->step_index];
285  step_index = c->step_index + ff_adpcm_index_table[(unsigned)nibble];
286  step_index = av_clip(step_index, 0, 88);
287 
288  sign = nibble & 8;
289  delta = nibble & 7;
290  /* perform direct multiplication instead of series of jumps proposed by
291  * the reference ADPCM implementation since modern CPUs can do the mults
292  * quickly enough */
293  diff = ((2 * delta + 1) * step) >> shift;
294  predictor = c->predictor;
295  if (sign) predictor -= diff;
296  else predictor += diff;
297 
298  c->predictor = av_clip_int16(predictor);
299  c->step_index = step_index;
300 
301  return (int16_t)c->predictor;
302 }
303 
304 static inline int16_t adpcm_ima_alp_expand_nibble(ADPCMChannelStatus *c, int8_t nibble, int shift)
305 {
306  int step_index;
307  int predictor;
308  int sign, delta, diff, step;
309 
310  step = ff_adpcm_step_table[c->step_index];
311  step_index = c->step_index + ff_adpcm_index_table[(unsigned)nibble];
312  step_index = av_clip(step_index, 0, 88);
313 
314  sign = nibble & 8;
315  delta = nibble & 7;
316  diff = (delta * step) >> shift;
317  predictor = c->predictor;
318  if (sign) predictor -= diff;
319  else predictor += diff;
320 
321  c->predictor = av_clip_int16(predictor);
322  c->step_index = step_index;
323 
324  return (int16_t)c->predictor;
325 }
326 
327 static inline int16_t adpcm_ima_mtf_expand_nibble(ADPCMChannelStatus *c, int nibble)
328 {
329  int step_index, step, delta, predictor;
330 
331  step = ff_adpcm_step_table[c->step_index];
332 
333  delta = step * (2 * nibble - 15);
334  predictor = c->predictor + delta;
335 
336  step_index = c->step_index + mtf_index_table[(unsigned)nibble];
337  c->predictor = av_clip_int16(predictor >> 4);
338  c->step_index = av_clip(step_index, 0, 88);
339 
340  return (int16_t)c->predictor;
341 }
342 
343 static inline int16_t adpcm_ima_cunning_expand_nibble(ADPCMChannelStatus *c, int8_t nibble)
344 {
345  int step_index;
346  int predictor;
347  int step;
348 
349  nibble = sign_extend(nibble & 0xF, 4);
350 
351  step = ff_adpcm_ima_cunning_step_table[c->step_index];
352  step_index = c->step_index + ff_adpcm_ima_cunning_index_table[abs(nibble)];
353  step_index = av_clip(step_index, 0, 60);
354 
355  predictor = c->predictor + step * nibble;
356 
357  c->predictor = av_clip_int16(predictor);
358  c->step_index = step_index;
359 
360  return c->predictor;
361 }
362 
364 {
365  int nibble, step_index, predictor, sign, delta, diff, step, shift;
366 
367  shift = bps - 1;
368  nibble = get_bits_le(gb, bps),
369  step = ff_adpcm_step_table[c->step_index];
370  step_index = c->step_index + ff_adpcm_index_tables[bps - 2][nibble];
371  step_index = av_clip(step_index, 0, 88);
372 
373  sign = nibble & (1 << shift);
374  delta = av_mod_uintp2(nibble, shift);
375  diff = ((2 * delta + 1) * step) >> shift;
376  predictor = c->predictor;
377  if (sign) predictor -= diff;
378  else predictor += diff;
379 
380  c->predictor = av_clip_int16(predictor);
381  c->step_index = step_index;
382 
383  return (int16_t)c->predictor;
384 }
385 
386 static inline int adpcm_ima_qt_expand_nibble(ADPCMChannelStatus *c, int nibble)
387 {
388  int step_index;
389  int predictor;
390  int diff, step;
391 
392  step = ff_adpcm_step_table[c->step_index];
393  step_index = c->step_index + ff_adpcm_index_table[nibble];
394  step_index = av_clip(step_index, 0, 88);
395 
396  diff = step >> 3;
397  if (nibble & 4) diff += step;
398  if (nibble & 2) diff += step >> 1;
399  if (nibble & 1) diff += step >> 2;
400 
401  if (nibble & 8)
402  predictor = c->predictor - diff;
403  else
404  predictor = c->predictor + diff;
405 
406  c->predictor = av_clip_int16(predictor);
407  c->step_index = step_index;
408 
409  return c->predictor;
410 }
411 
412 static inline int16_t adpcm_ms_expand_nibble(ADPCMChannelStatus *c, int nibble)
413 {
414  int predictor;
415 
416  predictor = (((c->sample1) * (c->coeff1)) + ((c->sample2) * (c->coeff2))) / 64;
417  predictor += ((nibble & 0x08)?(nibble - 0x10):(nibble)) * c->idelta;
418 
419  c->sample2 = c->sample1;
420  c->sample1 = av_clip_int16(predictor);
421  c->idelta = (ff_adpcm_AdaptationTable[(int)nibble] * c->idelta) >> 8;
422  if (c->idelta < 16) c->idelta = 16;
423  if (c->idelta > INT_MAX/768) {
424  av_log(NULL, AV_LOG_WARNING, "idelta overflow\n");
425  c->idelta = INT_MAX/768;
426  }
427 
428  return c->sample1;
429 }
430 
431 static inline int16_t adpcm_ima_oki_expand_nibble(ADPCMChannelStatus *c, int nibble)
432 {
433  int step_index, predictor, sign, delta, diff, step;
434 
435  step = ff_adpcm_oki_step_table[c->step_index];
436  step_index = c->step_index + ff_adpcm_index_table[(unsigned)nibble];
437  step_index = av_clip(step_index, 0, 48);
438 
439  sign = nibble & 8;
440  delta = nibble & 7;
441  diff = ((2 * delta + 1) * step) >> 3;
442  predictor = c->predictor;
443  if (sign) predictor -= diff;
444  else predictor += diff;
445 
446  c->predictor = av_clip_intp2(predictor, 11);
447  c->step_index = step_index;
448 
449  return c->predictor * 16;
450 }
451 
452 static inline int16_t adpcm_ct_expand_nibble(ADPCMChannelStatus *c, int8_t nibble)
453 {
454  int sign, delta, diff;
455  int new_step;
456 
457  sign = nibble & 8;
458  delta = nibble & 7;
459  /* perform direct multiplication instead of series of jumps proposed by
460  * the reference ADPCM implementation since modern CPUs can do the mults
461  * quickly enough */
462  diff = ((2 * delta + 1) * c->step) >> 3;
463  /* predictor update is not so trivial: predictor is multiplied on 254/256 before updating */
464  c->predictor = ((c->predictor * 254) >> 8) + (sign ? -diff : diff);
465  c->predictor = av_clip_int16(c->predictor);
466  /* calculate new step and clamp it to range 511..32767 */
467  new_step = (ff_adpcm_AdaptationTable[nibble & 7] * c->step) >> 8;
468  c->step = av_clip(new_step, 511, 32767);
469 
470  return (int16_t)c->predictor;
471 }
472 
473 static inline int16_t adpcm_sbpro_expand_nibble(ADPCMChannelStatus *c, int8_t nibble, int size, int shift)
474 {
475  int sign, delta, diff;
476 
477  sign = nibble & (1<<(size-1));
478  delta = nibble & ((1<<(size-1))-1);
479  diff = delta << (7 + c->step + shift);
480 
481  /* clamp result */
482  c->predictor = av_clip(c->predictor + (sign ? -diff : diff), -16384,16256);
483 
484  /* calculate new step */
485  if (delta >= (2*size - 3) && c->step < 3)
486  c->step++;
487  else if (delta == 0 && c->step > 0)
488  c->step--;
489 
490  return (int16_t) c->predictor;
491 }
492 
494 {
495  if(!c->step) {
496  c->predictor = 0;
497  c->step = 127;
498  }
499 
500  c->predictor += (c->step * ff_adpcm_yamaha_difflookup[nibble]) / 8;
501  c->predictor = av_clip_int16(c->predictor);
502  c->step = (c->step * ff_adpcm_yamaha_indexscale[nibble]) >> 8;
503  c->step = av_clip(c->step, 127, 24576);
504  return c->predictor;
505 }
506 
507 static inline int16_t adpcm_mtaf_expand_nibble(ADPCMChannelStatus *c, uint8_t nibble)
508 {
509  c->predictor += ff_adpcm_mtaf_stepsize[c->step][nibble];
510  c->predictor = av_clip_int16(c->predictor);
511  c->step += ff_adpcm_index_table[nibble];
512  c->step = av_clip_uintp2(c->step, 5);
513  return c->predictor;
514 }
515 
516 static inline int16_t adpcm_zork_expand_nibble(ADPCMChannelStatus *c, uint8_t nibble)
517 {
518  int16_t index = c->step_index;
519  uint32_t lookup_sample = ff_adpcm_step_table[index];
520  int32_t sample = 0;
521 
522  if (nibble & 0x40)
523  sample += lookup_sample;
524  if (nibble & 0x20)
525  sample += lookup_sample >> 1;
526  if (nibble & 0x10)
527  sample += lookup_sample >> 2;
528  if (nibble & 0x08)
529  sample += lookup_sample >> 3;
530  if (nibble & 0x04)
531  sample += lookup_sample >> 4;
532  if (nibble & 0x02)
533  sample += lookup_sample >> 5;
534  if (nibble & 0x01)
535  sample += lookup_sample >> 6;
536  if (nibble & 0x80)
537  sample = -sample;
538 
539  sample += c->predictor;
541 
542  index += zork_index_table[(nibble >> 4) & 7];
543  index = av_clip(index, 0, 88);
544 
545  c->predictor = sample;
546  c->step_index = index;
547 
548  return sample;
549 }
550 
551 static int xa_decode(AVCodecContext *avctx, int16_t *out0, int16_t *out1,
552  const uint8_t *in, ADPCMChannelStatus *left,
553  ADPCMChannelStatus *right, int channels, int sample_offset)
554 {
555  int i, j;
556  int shift,filter,f0,f1;
557  int s_1,s_2;
558  int d,s,t;
559 
560  out0 += sample_offset;
561  if (channels == 1)
562  out1 = out0 + 28;
563  else
564  out1 += sample_offset;
565 
566  for(i=0;i<4;i++) {
567  shift = 12 - (in[4+i*2] & 15);
568  filter = in[4+i*2] >> 4;
570  avpriv_request_sample(avctx, "unknown XA-ADPCM filter %d", filter);
571  filter=0;
572  }
573  if (shift < 0) {
574  avpriv_request_sample(avctx, "unknown XA-ADPCM shift %d", shift);
575  shift = 0;
576  }
577  f0 = xa_adpcm_table[filter][0];
578  f1 = xa_adpcm_table[filter][1];
579 
580  s_1 = left->sample1;
581  s_2 = left->sample2;
582 
583  for(j=0;j<28;j++) {
584  d = in[16+i+j*4];
585 
586  t = sign_extend(d, 4);
587  s = t*(1<<shift) + ((s_1*f0 + s_2*f1+32)>>6);
588  s_2 = s_1;
589  s_1 = av_clip_int16(s);
590  out0[j] = s_1;
591  }
592 
593  if (channels == 2) {
594  left->sample1 = s_1;
595  left->sample2 = s_2;
596  s_1 = right->sample1;
597  s_2 = right->sample2;
598  }
599 
600  shift = 12 - (in[5+i*2] & 15);
601  filter = in[5+i*2] >> 4;
602  if (filter >= FF_ARRAY_ELEMS(xa_adpcm_table) || shift < 0) {
603  avpriv_request_sample(avctx, "unknown XA-ADPCM filter %d", filter);
604  filter=0;
605  }
606  if (shift < 0) {
607  avpriv_request_sample(avctx, "unknown XA-ADPCM shift %d", shift);
608  shift = 0;
609  }
610 
611  f0 = xa_adpcm_table[filter][0];
612  f1 = xa_adpcm_table[filter][1];
613 
614  for(j=0;j<28;j++) {
615  d = in[16+i+j*4];
616 
617  t = sign_extend(d >> 4, 4);
618  s = t*(1<<shift) + ((s_1*f0 + s_2*f1+32)>>6);
619  s_2 = s_1;
620  s_1 = av_clip_int16(s);
621  out1[j] = s_1;
622  }
623 
624  if (channels == 2) {
625  right->sample1 = s_1;
626  right->sample2 = s_2;
627  } else {
628  left->sample1 = s_1;
629  left->sample2 = s_2;
630  }
631 
632  out0 += 28 * (3 - channels);
633  out1 += 28 * (3 - channels);
634  }
635 
636  return 0;
637 }
638 
639 static void adpcm_swf_decode(AVCodecContext *avctx, const uint8_t *buf, int buf_size, int16_t *samples)
640 {
641  ADPCMDecodeContext *c = avctx->priv_data;
642  GetBitContext gb;
643  const int8_t *table;
644  int k0, signmask, nb_bits, count;
645  int size = buf_size*8;
646  int i;
647 
648  init_get_bits(&gb, buf, size);
649 
650  //read bits & initial values
651  nb_bits = get_bits(&gb, 2)+2;
652  table = swf_index_tables[nb_bits-2];
653  k0 = 1 << (nb_bits-2);
654  signmask = 1 << (nb_bits-1);
655 
656  while (get_bits_count(&gb) <= size - 22*avctx->channels) {
657  for (i = 0; i < avctx->channels; i++) {
658  *samples++ = c->status[i].predictor = get_sbits(&gb, 16);
659  c->status[i].step_index = get_bits(&gb, 6);
660  }
661 
662  for (count = 0; get_bits_count(&gb) <= size - nb_bits*avctx->channels && count < 4095; count++) {
663  int i;
664 
665  for (i = 0; i < avctx->channels; i++) {
666  // similar to IMA adpcm
667  int delta = get_bits(&gb, nb_bits);
668  int step = ff_adpcm_step_table[c->status[i].step_index];
669  int vpdiff = 0; // vpdiff = (delta+0.5)*step/4
670  int k = k0;
671 
672  do {
673  if (delta & k)
674  vpdiff += step;
675  step >>= 1;
676  k >>= 1;
677  } while(k);
678  vpdiff += step;
679 
680  if (delta & signmask)
681  c->status[i].predictor -= vpdiff;
682  else
683  c->status[i].predictor += vpdiff;
684 
685  c->status[i].step_index += table[delta & (~signmask)];
686 
687  c->status[i].step_index = av_clip(c->status[i].step_index, 0, 88);
688  c->status[i].predictor = av_clip_int16(c->status[i].predictor);
689 
690  *samples++ = c->status[i].predictor;
691  }
692  }
693  }
694 }
695 
696 int16_t ff_adpcm_argo_expand_nibble(ADPCMChannelStatus *cs, int nibble, int shift, int flag)
697 {
698  int sample = sign_extend(nibble, 4) * (1 << shift);
699 
700  if (flag)
701  sample += (8 * cs->sample1) - (4 * cs->sample2);
702  else
703  sample += 4 * cs->sample1;
704 
705  sample = av_clip_int16(sample >> 2);
706 
707  cs->sample2 = cs->sample1;
708  cs->sample1 = sample;
709 
710  return sample;
711 }
712 
713 /**
714  * Get the number of samples (per channel) that will be decoded from the packet.
715  * In one case, this is actually the maximum number of samples possible to
716  * decode with the given buf_size.
717  *
718  * @param[out] coded_samples set to the number of samples as coded in the
719  * packet, or 0 if the codec does not encode the
720  * number of samples in each frame.
721  * @param[out] approx_nb_samples set to non-zero if the number of samples
722  * returned is an approximation.
723  */
725  int buf_size, int *coded_samples, int *approx_nb_samples)
726 {
727  ADPCMDecodeContext *s = avctx->priv_data;
728  int nb_samples = 0;
729  int ch = avctx->channels;
730  int has_coded_samples = 0;
731  int header_size;
732 
733  *coded_samples = 0;
734  *approx_nb_samples = 0;
735 
736  if(ch <= 0)
737  return 0;
738 
739  switch (avctx->codec->id) {
740  /* constant, only check buf_size */
742  if (buf_size < 76 * ch)
743  return 0;
744  nb_samples = 128;
745  break;
747  if (buf_size < 34 * ch)
748  return 0;
749  nb_samples = 64;
750  break;
751  /* simple 4-bit adpcm */
764  nb_samples = buf_size * 2 / ch;
765  break;
766  }
767  if (nb_samples)
768  return nb_samples;
769 
770  /* simple 4-bit adpcm, with header */
771  header_size = 0;
772  switch (avctx->codec->id) {
777  case AV_CODEC_ID_ADPCM_IMA_ISS: header_size = 4 * ch; break;
778  case AV_CODEC_ID_ADPCM_IMA_SMJPEG: header_size = 4 * ch; break;
779  }
780  if (header_size > 0)
781  return (buf_size - header_size) * 2 / ch;
782 
783  /* more complex formats */
784  switch (avctx->codec->id) {
786  bytestream2_skip(gb, 4);
787  has_coded_samples = 1;
788  *coded_samples = bytestream2_get_le32u(gb);
789  nb_samples = FFMIN((buf_size - 8) * 2, *coded_samples);
790  bytestream2_seek(gb, -8, SEEK_CUR);
791  break;
793  has_coded_samples = 1;
794  *coded_samples = bytestream2_get_le32(gb);
795  *coded_samples -= *coded_samples % 28;
796  nb_samples = (buf_size - 12) / 30 * 28;
797  break;
799  has_coded_samples = 1;
800  *coded_samples = bytestream2_get_le32(gb);
801  nb_samples = (buf_size - (4 + 8 * ch)) * 2 / ch;
802  break;
804  nb_samples = (buf_size - ch) / ch * 2;
805  break;
809  /* maximum number of samples */
810  /* has internal offsets and a per-frame switch to signal raw 16-bit */
811  has_coded_samples = 1;
812  switch (avctx->codec->id) {
814  header_size = 4 + 9 * ch;
815  *coded_samples = bytestream2_get_le32(gb);
816  break;
818  header_size = 4 + 5 * ch;
819  *coded_samples = bytestream2_get_le32(gb);
820  break;
822  header_size = 4 + 5 * ch;
823  *coded_samples = bytestream2_get_be32(gb);
824  break;
825  }
826  *coded_samples -= *coded_samples % 28;
827  nb_samples = (buf_size - header_size) * 2 / ch;
828  nb_samples -= nb_samples % 28;
829  *approx_nb_samples = 1;
830  break;
832  if (avctx->block_align > 0)
833  buf_size = FFMIN(buf_size, avctx->block_align);
834  nb_samples = ((buf_size - 16) * 2 / 3 * 4) / ch;
835  break;
837  if (avctx->block_align > 0)
838  buf_size = FFMIN(buf_size, avctx->block_align);
839  if (buf_size < 4 * ch)
840  return AVERROR_INVALIDDATA;
841  nb_samples = 1 + (buf_size - 4 * ch) * 2 / ch;
842  break;
844  if (avctx->block_align > 0)
845  buf_size = FFMIN(buf_size, avctx->block_align);
846  nb_samples = (buf_size - 4 * ch) * 2 / ch;
847  break;
849  {
850  int bsize = ff_adpcm_ima_block_sizes[avctx->bits_per_coded_sample - 2];
851  int bsamples = ff_adpcm_ima_block_samples[avctx->bits_per_coded_sample - 2];
852  if (avctx->block_align > 0)
853  buf_size = FFMIN(buf_size, avctx->block_align);
854  if (buf_size < 4 * ch)
855  return AVERROR_INVALIDDATA;
856  nb_samples = 1 + (buf_size - 4 * ch) / (bsize * ch) * bsamples;
857  break;
858  }
860  if (avctx->block_align > 0)
861  buf_size = FFMIN(buf_size, avctx->block_align);
862  nb_samples = (buf_size - 6 * ch) * 2 / ch;
863  break;
865  if (avctx->block_align > 0)
866  buf_size = FFMIN(buf_size, avctx->block_align);
867  nb_samples = (buf_size - 16 * (ch / 2)) * 2 / ch;
868  break;
872  {
873  int samples_per_byte;
874  switch (avctx->codec->id) {
875  case AV_CODEC_ID_ADPCM_SBPRO_2: samples_per_byte = 4; break;
876  case AV_CODEC_ID_ADPCM_SBPRO_3: samples_per_byte = 3; break;
877  case AV_CODEC_ID_ADPCM_SBPRO_4: samples_per_byte = 2; break;
878  }
879  if (!s->status[0].step_index) {
880  if (buf_size < ch)
881  return AVERROR_INVALIDDATA;
882  nb_samples++;
883  buf_size -= ch;
884  }
885  nb_samples += buf_size * samples_per_byte / ch;
886  break;
887  }
889  {
890  int buf_bits = buf_size * 8 - 2;
891  int nbits = (bytestream2_get_byte(gb) >> 6) + 2;
892  int block_hdr_size = 22 * ch;
893  int block_size = block_hdr_size + nbits * ch * 4095;
894  int nblocks = buf_bits / block_size;
895  int bits_left = buf_bits - nblocks * block_size;
896  nb_samples = nblocks * 4096;
897  if (bits_left >= block_hdr_size)
898  nb_samples += 1 + (bits_left - block_hdr_size) / (nbits * ch);
899  break;
900  }
903  if (avctx->extradata) {
904  nb_samples = buf_size * 14 / (8 * ch);
905  break;
906  }
907  has_coded_samples = 1;
908  bytestream2_skip(gb, 4); // channel size
909  *coded_samples = (avctx->codec->id == AV_CODEC_ID_ADPCM_THP_LE) ?
910  bytestream2_get_le32(gb) :
911  bytestream2_get_be32(gb);
912  buf_size -= 8 + 36 * ch;
913  buf_size /= ch;
914  nb_samples = buf_size / 8 * 14;
915  if (buf_size % 8 > 1)
916  nb_samples += (buf_size % 8 - 1) * 2;
917  *approx_nb_samples = 1;
918  break;
920  nb_samples = buf_size / (9 * ch) * 16;
921  break;
923  nb_samples = (buf_size / 128) * 224 / ch;
924  break;
927  nb_samples = buf_size / (16 * ch) * 28;
928  break;
930  nb_samples = buf_size / avctx->block_align * 32;
931  break;
933  nb_samples = buf_size / ch;
934  break;
935  }
936 
937  /* validate coded sample count */
938  if (has_coded_samples && (*coded_samples <= 0 || *coded_samples > nb_samples))
939  return AVERROR_INVALIDDATA;
940 
941  return nb_samples;
942 }
943 
944 static int adpcm_decode_frame(AVCodecContext *avctx, void *data,
945  int *got_frame_ptr, AVPacket *avpkt)
946 {
947  AVFrame *frame = data;
948  const uint8_t *buf = avpkt->data;
949  int buf_size = avpkt->size;
950  ADPCMDecodeContext *c = avctx->priv_data;
951  ADPCMChannelStatus *cs;
952  int n, m, channel, i;
953  int16_t *samples;
954  int16_t **samples_p;
955  int st; /* stereo */
956  int count1, count2;
957  int nb_samples, coded_samples, approx_nb_samples, ret;
958  GetByteContext gb;
959 
960  bytestream2_init(&gb, buf, buf_size);
961  nb_samples = get_nb_samples(avctx, &gb, buf_size, &coded_samples, &approx_nb_samples);
962  if (nb_samples <= 0) {
963  av_log(avctx, AV_LOG_ERROR, "invalid number of samples in packet\n");
964  return AVERROR_INVALIDDATA;
965  }
966 
967  /* get output buffer */
968  frame->nb_samples = nb_samples;
969  if ((ret = ff_get_buffer(avctx, frame, 0)) < 0)
970  return ret;
971  samples = (int16_t *)frame->data[0];
972  samples_p = (int16_t **)frame->extended_data;
973 
974  /* use coded_samples when applicable */
975  /* it is always <= nb_samples, so the output buffer will be large enough */
976  if (coded_samples) {
977  if (!approx_nb_samples && coded_samples != nb_samples)
978  av_log(avctx, AV_LOG_WARNING, "mismatch in coded sample count\n");
979  frame->nb_samples = nb_samples = coded_samples;
980  }
981 
982  st = avctx->channels == 2 ? 1 : 0;
983 
984  switch(avctx->codec->id) {
986  /* In QuickTime, IMA is encoded by chunks of 34 bytes (=64 samples).
987  Channel data is interleaved per-chunk. */
988  for (channel = 0; channel < avctx->channels; channel++) {
989  int predictor;
990  int step_index;
991  cs = &(c->status[channel]);
992  /* (pppppp) (piiiiiii) */
993 
994  /* Bits 15-7 are the _top_ 9 bits of the 16-bit initial predictor value */
995  predictor = sign_extend(bytestream2_get_be16u(&gb), 16);
996  step_index = predictor & 0x7F;
997  predictor &= ~0x7F;
998 
999  if (cs->step_index == step_index) {
1000  int diff = predictor - cs->predictor;
1001  if (diff < 0)
1002  diff = - diff;
1003  if (diff > 0x7f)
1004  goto update;
1005  } else {
1006  update:
1007  cs->step_index = step_index;
1008  cs->predictor = predictor;
1009  }
1010 
1011  if (cs->step_index > 88u){
1012  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
1013  channel, cs->step_index);
1014  return AVERROR_INVALIDDATA;
1015  }
1016 
1017  samples = samples_p[channel];
1018 
1019  for (m = 0; m < 64; m += 2) {
1020  int byte = bytestream2_get_byteu(&gb);
1021  samples[m ] = adpcm_ima_qt_expand_nibble(cs, byte & 0x0F);
1022  samples[m + 1] = adpcm_ima_qt_expand_nibble(cs, byte >> 4 );
1023  }
1024  }
1025  break;
1027  for(i=0; i<avctx->channels; i++){
1028  cs = &(c->status[i]);
1029  cs->predictor = samples_p[i][0] = sign_extend(bytestream2_get_le16u(&gb), 16);
1030 
1031  cs->step_index = sign_extend(bytestream2_get_le16u(&gb), 16);
1032  if (cs->step_index > 88u){
1033  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
1034  i, cs->step_index);
1035  return AVERROR_INVALIDDATA;
1036  }
1037  }
1038 
1039  if (avctx->bits_per_coded_sample != 4) {
1040  int samples_per_block = ff_adpcm_ima_block_samples[avctx->bits_per_coded_sample - 2];
1041  int block_size = ff_adpcm_ima_block_sizes[avctx->bits_per_coded_sample - 2];
1043  GetBitContext g;
1044 
1045  for (n = 0; n < (nb_samples - 1) / samples_per_block; n++) {
1046  for (i = 0; i < avctx->channels; i++) {
1047  int j;
1048 
1049  cs = &c->status[i];
1050  samples = &samples_p[i][1 + n * samples_per_block];
1051  for (j = 0; j < block_size; j++) {
1052  temp[j] = buf[4 * avctx->channels + block_size * n * avctx->channels +
1053  (j % 4) + (j / 4) * (avctx->channels * 4) + i * 4];
1054  }
1055  ret = init_get_bits8(&g, (const uint8_t *)&temp, block_size);
1056  if (ret < 0)
1057  return ret;
1058  for (m = 0; m < samples_per_block; m++) {
1059  samples[m] = adpcm_ima_wav_expand_nibble(cs, &g,
1060  avctx->bits_per_coded_sample);
1061  }
1062  }
1063  }
1064  bytestream2_skip(&gb, avctx->block_align - avctx->channels * 4);
1065  } else {
1066  for (n = 0; n < (nb_samples - 1) / 8; n++) {
1067  for (i = 0; i < avctx->channels; i++) {
1068  cs = &c->status[i];
1069  samples = &samples_p[i][1 + n * 8];
1070  for (m = 0; m < 8; m += 2) {
1071  int v = bytestream2_get_byteu(&gb);
1072  samples[m ] = adpcm_ima_expand_nibble(cs, v & 0x0F, 3);
1073  samples[m + 1] = adpcm_ima_expand_nibble(cs, v >> 4 , 3);
1074  }
1075  }
1076  }
1077  }
1078  break;
1079  case AV_CODEC_ID_ADPCM_4XM:
1080  for (i = 0; i < avctx->channels; i++)
1081  c->status[i].predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1082 
1083  for (i = 0; i < avctx->channels; i++) {
1084  c->status[i].step_index = sign_extend(bytestream2_get_le16u(&gb), 16);
1085  if (c->status[i].step_index > 88u) {
1086  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
1087  i, c->status[i].step_index);
1088  return AVERROR_INVALIDDATA;
1089  }
1090  }
1091 
1092  for (i = 0; i < avctx->channels; i++) {
1093  samples = (int16_t *)frame->data[i];
1094  cs = &c->status[i];
1095  for (n = nb_samples >> 1; n > 0; n--) {
1096  int v = bytestream2_get_byteu(&gb);
1097  *samples++ = adpcm_ima_expand_nibble(cs, v & 0x0F, 4);
1098  *samples++ = adpcm_ima_expand_nibble(cs, v >> 4 , 4);
1099  }
1100  }
1101  break;
1102  case AV_CODEC_ID_ADPCM_AGM:
1103  for (i = 0; i < avctx->channels; i++)
1104  c->status[i].predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1105  for (i = 0; i < avctx->channels; i++)
1106  c->status[i].step = sign_extend(bytestream2_get_le16u(&gb), 16);
1107 
1108  for (n = 0; n < nb_samples >> (1 - st); n++) {
1109  int v = bytestream2_get_byteu(&gb);
1110  *samples++ = adpcm_agm_expand_nibble(&c->status[0], v & 0xF);
1111  *samples++ = adpcm_agm_expand_nibble(&c->status[st], v >> 4 );
1112  }
1113  break;
1114  case AV_CODEC_ID_ADPCM_MS:
1115  {
1116  int block_predictor;
1117 
1118  if (avctx->channels > 2) {
1119  for (channel = 0; channel < avctx->channels; channel++) {
1120  samples = samples_p[channel];
1121  block_predictor = bytestream2_get_byteu(&gb);
1122  if (block_predictor > 6) {
1123  av_log(avctx, AV_LOG_ERROR, "ERROR: block_predictor[%d] = %d\n",
1124  channel, block_predictor);
1125  return AVERROR_INVALIDDATA;
1126  }
1127  c->status[channel].coeff1 = ff_adpcm_AdaptCoeff1[block_predictor];
1128  c->status[channel].coeff2 = ff_adpcm_AdaptCoeff2[block_predictor];
1129  c->status[channel].idelta = sign_extend(bytestream2_get_le16u(&gb), 16);
1130  c->status[channel].sample1 = sign_extend(bytestream2_get_le16u(&gb), 16);
1131  c->status[channel].sample2 = sign_extend(bytestream2_get_le16u(&gb), 16);
1132  *samples++ = c->status[channel].sample2;
1133  *samples++ = c->status[channel].sample1;
1134  for(n = (nb_samples - 2) >> 1; n > 0; n--) {
1135  int byte = bytestream2_get_byteu(&gb);
1136  *samples++ = adpcm_ms_expand_nibble(&c->status[channel], byte >> 4 );
1137  *samples++ = adpcm_ms_expand_nibble(&c->status[channel], byte & 0x0F);
1138  }
1139  }
1140  } else {
1141  block_predictor = bytestream2_get_byteu(&gb);
1142  if (block_predictor > 6) {
1143  av_log(avctx, AV_LOG_ERROR, "ERROR: block_predictor[0] = %d\n",
1144  block_predictor);
1145  return AVERROR_INVALIDDATA;
1146  }
1147  c->status[0].coeff1 = ff_adpcm_AdaptCoeff1[block_predictor];
1148  c->status[0].coeff2 = ff_adpcm_AdaptCoeff2[block_predictor];
1149  if (st) {
1150  block_predictor = bytestream2_get_byteu(&gb);
1151  if (block_predictor > 6) {
1152  av_log(avctx, AV_LOG_ERROR, "ERROR: block_predictor[1] = %d\n",
1153  block_predictor);
1154  return AVERROR_INVALIDDATA;
1155  }
1156  c->status[1].coeff1 = ff_adpcm_AdaptCoeff1[block_predictor];
1157  c->status[1].coeff2 = ff_adpcm_AdaptCoeff2[block_predictor];
1158  }
1159  c->status[0].idelta = sign_extend(bytestream2_get_le16u(&gb), 16);
1160  if (st){
1161  c->status[1].idelta = sign_extend(bytestream2_get_le16u(&gb), 16);
1162  }
1163 
1164  c->status[0].sample1 = sign_extend(bytestream2_get_le16u(&gb), 16);
1165  if (st) c->status[1].sample1 = sign_extend(bytestream2_get_le16u(&gb), 16);
1166  c->status[0].sample2 = sign_extend(bytestream2_get_le16u(&gb), 16);
1167  if (st) c->status[1].sample2 = sign_extend(bytestream2_get_le16u(&gb), 16);
1168 
1169  *samples++ = c->status[0].sample2;
1170  if (st) *samples++ = c->status[1].sample2;
1171  *samples++ = c->status[0].sample1;
1172  if (st) *samples++ = c->status[1].sample1;
1173  for(n = (nb_samples - 2) >> (1 - st); n > 0; n--) {
1174  int byte = bytestream2_get_byteu(&gb);
1175  *samples++ = adpcm_ms_expand_nibble(&c->status[0 ], byte >> 4 );
1176  *samples++ = adpcm_ms_expand_nibble(&c->status[st], byte & 0x0F);
1177  }
1178  }
1179  break;
1180  }
1182  for (channel = 0; channel < avctx->channels; channel+=2) {
1183  bytestream2_skipu(&gb, 4);
1184  c->status[channel ].step = bytestream2_get_le16u(&gb) & 0x1f;
1185  c->status[channel + 1].step = bytestream2_get_le16u(&gb) & 0x1f;
1186  c->status[channel ].predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1187  bytestream2_skipu(&gb, 2);
1188  c->status[channel + 1].predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1189  bytestream2_skipu(&gb, 2);
1190  for (n = 0; n < nb_samples; n+=2) {
1191  int v = bytestream2_get_byteu(&gb);
1192  samples_p[channel][n ] = adpcm_mtaf_expand_nibble(&c->status[channel], v & 0x0F);
1193  samples_p[channel][n + 1] = adpcm_mtaf_expand_nibble(&c->status[channel], v >> 4 );
1194  }
1195  for (n = 0; n < nb_samples; n+=2) {
1196  int v = bytestream2_get_byteu(&gb);
1197  samples_p[channel + 1][n ] = adpcm_mtaf_expand_nibble(&c->status[channel + 1], v & 0x0F);
1198  samples_p[channel + 1][n + 1] = adpcm_mtaf_expand_nibble(&c->status[channel + 1], v >> 4 );
1199  }
1200  }
1201  break;
1203  for (channel = 0; channel < avctx->channels; channel++) {
1204  cs = &c->status[channel];
1205  cs->predictor = *samples++ = sign_extend(bytestream2_get_le16u(&gb), 16);
1206  cs->step_index = sign_extend(bytestream2_get_le16u(&gb), 16);
1207  if (cs->step_index > 88u){
1208  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
1209  channel, cs->step_index);
1210  return AVERROR_INVALIDDATA;
1211  }
1212  }
1213  for (n = (nb_samples - 1) >> (1 - st); n > 0; n--) {
1214  int v = bytestream2_get_byteu(&gb);
1215  *samples++ = adpcm_ima_expand_nibble(&c->status[0 ], v >> 4 , 3);
1216  *samples++ = adpcm_ima_expand_nibble(&c->status[st], v & 0x0F, 3);
1217  }
1218  break;
1220  {
1221  int last_byte = 0;
1222  int nibble;
1223  int decode_top_nibble_next = 0;
1224  int diff_channel;
1225  const int16_t *samples_end = samples + avctx->channels * nb_samples;
1226 
1227  bytestream2_skipu(&gb, 10);
1228  c->status[0].predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1229  c->status[1].predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1230  c->status[0].step_index = bytestream2_get_byteu(&gb);
1231  c->status[1].step_index = bytestream2_get_byteu(&gb);
1232  if (c->status[0].step_index > 88u || c->status[1].step_index > 88u){
1233  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index = %i/%i\n",
1234  c->status[0].step_index, c->status[1].step_index);
1235  return AVERROR_INVALIDDATA;
1236  }
1237  /* sign extend the predictors */
1238  diff_channel = c->status[1].predictor;
1239 
1240  /* DK3 ADPCM support macro */
1241 #define DK3_GET_NEXT_NIBBLE() \
1242  if (decode_top_nibble_next) { \
1243  nibble = last_byte >> 4; \
1244  decode_top_nibble_next = 0; \
1245  } else { \
1246  last_byte = bytestream2_get_byteu(&gb); \
1247  nibble = last_byte & 0x0F; \
1248  decode_top_nibble_next = 1; \
1249  }
1250 
1251  while (samples < samples_end) {
1252 
1253  /* for this algorithm, c->status[0] is the sum channel and
1254  * c->status[1] is the diff channel */
1255 
1256  /* process the first predictor of the sum channel */
1258  adpcm_ima_expand_nibble(&c->status[0], nibble, 3);
1259 
1260  /* process the diff channel predictor */
1262  adpcm_ima_expand_nibble(&c->status[1], nibble, 3);
1263 
1264  /* process the first pair of stereo PCM samples */
1265  diff_channel = (diff_channel + c->status[1].predictor) / 2;
1266  *samples++ = c->status[0].predictor + c->status[1].predictor;
1267  *samples++ = c->status[0].predictor - c->status[1].predictor;
1268 
1269  /* process the second predictor of the sum channel */
1271  adpcm_ima_expand_nibble(&c->status[0], nibble, 3);
1272 
1273  /* process the second pair of stereo PCM samples */
1274  diff_channel = (diff_channel + c->status[1].predictor) / 2;
1275  *samples++ = c->status[0].predictor + c->status[1].predictor;
1276  *samples++ = c->status[0].predictor - c->status[1].predictor;
1277  }
1278 
1279  if ((bytestream2_tell(&gb) & 1))
1280  bytestream2_skip(&gb, 1);
1281  break;
1282  }
1284  for (channel = 0; channel < avctx->channels; channel++) {
1285  cs = &c->status[channel];
1286  cs->predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1287  cs->step_index = sign_extend(bytestream2_get_le16u(&gb), 16);
1288  if (cs->step_index > 88u){
1289  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
1290  channel, cs->step_index);
1291  return AVERROR_INVALIDDATA;
1292  }
1293  }
1294 
1295  for (n = nb_samples >> (1 - st); n > 0; n--) {
1296  int v1, v2;
1297  int v = bytestream2_get_byteu(&gb);
1298  /* nibbles are swapped for mono */
1299  if (st) {
1300  v1 = v >> 4;
1301  v2 = v & 0x0F;
1302  } else {
1303  v2 = v >> 4;
1304  v1 = v & 0x0F;
1305  }
1306  *samples++ = adpcm_ima_expand_nibble(&c->status[0 ], v1, 3);
1307  *samples++ = adpcm_ima_expand_nibble(&c->status[st], v2, 3);
1308  }
1309  break;
1311  for (channel = 0; channel < avctx->channels; channel++) {
1312  cs = &c->status[channel];
1313  cs->step_index = sign_extend(bytestream2_get_le16u(&gb), 16);
1314  cs->predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1315  if (cs->step_index > 88u){
1316  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
1317  channel, cs->step_index);
1318  return AVERROR_INVALIDDATA;
1319  }
1320  }
1321 
1322  for (int subframe = 0; subframe < nb_samples / 256; subframe++) {
1323  for (channel = 0; channel < avctx->channels; channel++) {
1324  samples = samples_p[channel] + 256 * subframe;
1325  for (n = 0; n < 256; n += 2) {
1326  int v = bytestream2_get_byteu(&gb);
1327  *samples++ = adpcm_ima_expand_nibble(&c->status[channel], v & 0x0F, 3);
1328  *samples++ = adpcm_ima_expand_nibble(&c->status[channel], v >> 4 , 3);
1329  }
1330  }
1331  }
1332  break;
1334  for (channel = 0; channel < avctx->channels; channel++) {
1335  cs = &c->status[channel];
1336  samples = samples_p[channel];
1337  bytestream2_skip(&gb, 4);
1338  for (n = 0; n < nb_samples; n += 2) {
1339  int v = bytestream2_get_byteu(&gb);
1340  *samples++ = adpcm_ima_expand_nibble(cs, v >> 4 , 3);
1341  *samples++ = adpcm_ima_expand_nibble(cs, v & 0x0F, 3);
1342  }
1343  }
1344  break;
1346  for (n = nb_samples >> (1 - st); n > 0; n--) {
1347  int v = bytestream2_get_byteu(&gb);
1348  *samples++ = adpcm_ima_expand_nibble(&c->status[0], v >> 4 , 3);
1349  *samples++ = adpcm_ima_expand_nibble(&c->status[st], v & 0x0F, 3);
1350  }
1351  break;
1353  for (n = nb_samples >> (1 - st); n > 0; n--) {
1354  int v = bytestream2_get_byteu(&gb);
1355  *samples++ = adpcm_ima_qt_expand_nibble(&c->status[0], v >> 4 );
1356  *samples++ = adpcm_ima_qt_expand_nibble(&c->status[st], v & 0x0F);
1357  }
1358  break;
1360  for (n = nb_samples / 2; n > 0; n--) {
1361  for (channel = 0; channel < avctx->channels; channel++) {
1362  int v = bytestream2_get_byteu(&gb);
1363  *samples++ = adpcm_ima_qt_expand_nibble(&c->status[channel], v >> 4 );
1364  samples[st] = adpcm_ima_qt_expand_nibble(&c->status[channel], v & 0x0F);
1365  }
1366  samples += avctx->channels;
1367  }
1368  break;
1370  for (n = nb_samples / 2; n > 0; n--) {
1371  for (channel = 0; channel < avctx->channels; channel++) {
1372  int v = bytestream2_get_byteu(&gb);
1373  *samples++ = adpcm_ima_alp_expand_nibble(&c->status[channel], v >> 4 , 2);
1374  samples[st] = adpcm_ima_alp_expand_nibble(&c->status[channel], v & 0x0F, 2);
1375  }
1376  samples += avctx->channels;
1377  }
1378  break;
1380  for (channel = 0; channel < avctx->channels; channel++) {
1381  int16_t *smp = samples_p[channel];
1382  for (n = 0; n < nb_samples / 2; n++) {
1383  int v = bytestream2_get_byteu(&gb);
1384  *smp++ = adpcm_ima_cunning_expand_nibble(&c->status[channel], v & 0x0F);
1385  *smp++ = adpcm_ima_cunning_expand_nibble(&c->status[channel], v >> 4);
1386  }
1387  }
1388  break;
1390  for (n = nb_samples >> (1 - st); n > 0; n--) {
1391  int v = bytestream2_get_byteu(&gb);
1392  *samples++ = adpcm_ima_oki_expand_nibble(&c->status[0], v >> 4 );
1393  *samples++ = adpcm_ima_oki_expand_nibble(&c->status[st], v & 0x0F);
1394  }
1395  break;
1397  for (channel = 0; channel < avctx->channels; channel++) {
1398  cs = &c->status[channel];
1399  cs->step_index = sign_extend(bytestream2_get_le16u(&gb), 16);
1400  cs->predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1401  if (cs->step_index > 88u){
1402  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
1403  channel, cs->step_index);
1404  return AVERROR_INVALIDDATA;
1405  }
1406  }
1407  for (n = 0; n < nb_samples / 2; n++) {
1408  int byte[2];
1409 
1410  byte[0] = bytestream2_get_byteu(&gb);
1411  if (st)
1412  byte[1] = bytestream2_get_byteu(&gb);
1413  for(channel = 0; channel < avctx->channels; channel++) {
1414  *samples++ = adpcm_ima_expand_nibble(&c->status[channel], byte[channel] & 0x0F, 3);
1415  }
1416  for(channel = 0; channel < avctx->channels; channel++) {
1417  *samples++ = adpcm_ima_expand_nibble(&c->status[channel], byte[channel] >> 4 , 3);
1418  }
1419  }
1420  break;
1422  if (c->vqa_version == 3) {
1423  for (channel = 0; channel < avctx->channels; channel++) {
1424  int16_t *smp = samples_p[channel];
1425 
1426  for (n = nb_samples / 2; n > 0; n--) {
1427  int v = bytestream2_get_byteu(&gb);
1428  *smp++ = adpcm_ima_expand_nibble(&c->status[channel], v >> 4 , 3);
1429  *smp++ = adpcm_ima_expand_nibble(&c->status[channel], v & 0x0F, 3);
1430  }
1431  }
1432  } else {
1433  for (n = nb_samples / 2; n > 0; n--) {
1434  for (channel = 0; channel < avctx->channels; channel++) {
1435  int v = bytestream2_get_byteu(&gb);
1436  *samples++ = adpcm_ima_expand_nibble(&c->status[channel], v >> 4 , 3);
1437  samples[st] = adpcm_ima_expand_nibble(&c->status[channel], v & 0x0F, 3);
1438  }
1439  samples += avctx->channels;
1440  }
1441  }
1442  bytestream2_seek(&gb, 0, SEEK_END);
1443  break;
1444  case AV_CODEC_ID_ADPCM_XA:
1445  {
1446  int16_t *out0 = samples_p[0];
1447  int16_t *out1 = samples_p[1];
1448  int samples_per_block = 28 * (3 - avctx->channels) * 4;
1449  int sample_offset = 0;
1450  int bytes_remaining;
1451  while (bytestream2_get_bytes_left(&gb) >= 128) {
1452  if ((ret = xa_decode(avctx, out0, out1, buf + bytestream2_tell(&gb),
1453  &c->status[0], &c->status[1],
1454  avctx->channels, sample_offset)) < 0)
1455  return ret;
1456  bytestream2_skipu(&gb, 128);
1457  sample_offset += samples_per_block;
1458  }
1459  /* Less than a full block of data left, e.g. when reading from
1460  * 2324 byte per sector XA; the remainder is padding */
1461  bytes_remaining = bytestream2_get_bytes_left(&gb);
1462  if (bytes_remaining > 0) {
1463  bytestream2_skip(&gb, bytes_remaining);
1464  }
1465  break;
1466  }
1468  for (i=0; i<=st; i++) {
1469  c->status[i].step_index = bytestream2_get_le32u(&gb);
1470  if (c->status[i].step_index > 88u) {
1471  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n",
1472  i, c->status[i].step_index);
1473  return AVERROR_INVALIDDATA;
1474  }
1475  }
1476  for (i=0; i<=st; i++) {
1477  c->status[i].predictor = bytestream2_get_le32u(&gb);
1478  if (FFABS((int64_t)c->status[i].predictor) > (1<<16))
1479  return AVERROR_INVALIDDATA;
1480  }
1481 
1482  for (n = nb_samples >> (1 - st); n > 0; n--) {
1483  int byte = bytestream2_get_byteu(&gb);
1484  *samples++ = adpcm_ima_expand_nibble(&c->status[0], byte >> 4, 3);
1485  *samples++ = adpcm_ima_expand_nibble(&c->status[st], byte & 0x0F, 3);
1486  }
1487  break;
1489  for (n = nb_samples >> (1 - st); n > 0; n--) {
1490  int byte = bytestream2_get_byteu(&gb);
1491  *samples++ = adpcm_ima_expand_nibble(&c->status[0], byte >> 4, 6);
1492  *samples++ = adpcm_ima_expand_nibble(&c->status[st], byte & 0x0F, 6);
1493  }
1494  break;
1495  case AV_CODEC_ID_ADPCM_EA:
1496  {
1497  int previous_left_sample, previous_right_sample;
1498  int current_left_sample, current_right_sample;
1499  int next_left_sample, next_right_sample;
1500  int coeff1l, coeff2l, coeff1r, coeff2r;
1501  int shift_left, shift_right;
1502 
1503  /* Each EA ADPCM frame has a 12-byte header followed by 30-byte pieces,
1504  each coding 28 stereo samples. */
1505 
1506  if(avctx->channels != 2)
1507  return AVERROR_INVALIDDATA;
1508 
1509  current_left_sample = sign_extend(bytestream2_get_le16u(&gb), 16);
1510  previous_left_sample = sign_extend(bytestream2_get_le16u(&gb), 16);
1511  current_right_sample = sign_extend(bytestream2_get_le16u(&gb), 16);
1512  previous_right_sample = sign_extend(bytestream2_get_le16u(&gb), 16);
1513 
1514  for (count1 = 0; count1 < nb_samples / 28; count1++) {
1515  int byte = bytestream2_get_byteu(&gb);
1516  coeff1l = ea_adpcm_table[ byte >> 4 ];
1517  coeff2l = ea_adpcm_table[(byte >> 4 ) + 4];
1518  coeff1r = ea_adpcm_table[ byte & 0x0F];
1519  coeff2r = ea_adpcm_table[(byte & 0x0F) + 4];
1520 
1521  byte = bytestream2_get_byteu(&gb);
1522  shift_left = 20 - (byte >> 4);
1523  shift_right = 20 - (byte & 0x0F);
1524 
1525  for (count2 = 0; count2 < 28; count2++) {
1526  byte = bytestream2_get_byteu(&gb);
1527  next_left_sample = sign_extend(byte >> 4, 4) * (1 << shift_left);
1528  next_right_sample = sign_extend(byte, 4) * (1 << shift_right);
1529 
1530  next_left_sample = (next_left_sample +
1531  (current_left_sample * coeff1l) +
1532  (previous_left_sample * coeff2l) + 0x80) >> 8;
1533  next_right_sample = (next_right_sample +
1534  (current_right_sample * coeff1r) +
1535  (previous_right_sample * coeff2r) + 0x80) >> 8;
1536 
1537  previous_left_sample = current_left_sample;
1538  current_left_sample = av_clip_int16(next_left_sample);
1539  previous_right_sample = current_right_sample;
1540  current_right_sample = av_clip_int16(next_right_sample);
1541  *samples++ = current_left_sample;
1542  *samples++ = current_right_sample;
1543  }
1544  }
1545 
1546  bytestream2_skip(&gb, 2); // Skip terminating 0x0000
1547 
1548  break;
1549  }
1551  {
1552  int coeff[2][2], shift[2];
1553 
1554  for(channel = 0; channel < avctx->channels; channel++) {
1555  int byte = bytestream2_get_byteu(&gb);
1556  for (i=0; i<2; i++)
1557  coeff[channel][i] = ea_adpcm_table[(byte >> 4) + 4*i];
1558  shift[channel] = 20 - (byte & 0x0F);
1559  }
1560  for (count1 = 0; count1 < nb_samples / 2; count1++) {
1561  int byte[2];
1562 
1563  byte[0] = bytestream2_get_byteu(&gb);
1564  if (st) byte[1] = bytestream2_get_byteu(&gb);
1565  for(i = 4; i >= 0; i-=4) { /* Pairwise samples LL RR (st) or LL LL (mono) */
1566  for(channel = 0; channel < avctx->channels; channel++) {
1567  int sample = sign_extend(byte[channel] >> i, 4) * (1 << shift[channel]);
1568  sample = (sample +
1569  c->status[channel].sample1 * coeff[channel][0] +
1570  c->status[channel].sample2 * coeff[channel][1] + 0x80) >> 8;
1571  c->status[channel].sample2 = c->status[channel].sample1;
1572  c->status[channel].sample1 = av_clip_int16(sample);
1573  *samples++ = c->status[channel].sample1;
1574  }
1575  }
1576  }
1577  bytestream2_seek(&gb, 0, SEEK_END);
1578  break;
1579  }
1582  case AV_CODEC_ID_ADPCM_EA_R3: {
1583  /* channel numbering
1584  2chan: 0=fl, 1=fr
1585  4chan: 0=fl, 1=rl, 2=fr, 3=rr
1586  6chan: 0=fl, 1=c, 2=fr, 3=rl, 4=rr, 5=sub */
1587  const int big_endian = avctx->codec->id == AV_CODEC_ID_ADPCM_EA_R3;
1588  int previous_sample, current_sample, next_sample;
1589  int coeff1, coeff2;
1590  int shift;
1591  unsigned int channel;
1592  uint16_t *samplesC;
1593  int count = 0;
1594  int offsets[6];
1595 
1596  for (channel=0; channel<avctx->channels; channel++)
1597  offsets[channel] = (big_endian ? bytestream2_get_be32(&gb) :
1598  bytestream2_get_le32(&gb)) +
1599  (avctx->channels + 1) * 4;
1600 
1601  for (channel=0; channel<avctx->channels; channel++) {
1602  bytestream2_seek(&gb, offsets[channel], SEEK_SET);
1603  samplesC = samples_p[channel];
1604 
1605  if (avctx->codec->id == AV_CODEC_ID_ADPCM_EA_R1) {
1606  current_sample = sign_extend(bytestream2_get_le16(&gb), 16);
1607  previous_sample = sign_extend(bytestream2_get_le16(&gb), 16);
1608  } else {
1609  current_sample = c->status[channel].predictor;
1610  previous_sample = c->status[channel].prev_sample;
1611  }
1612 
1613  for (count1 = 0; count1 < nb_samples / 28; count1++) {
1614  int byte = bytestream2_get_byte(&gb);
1615  if (byte == 0xEE) { /* only seen in R2 and R3 */
1616  current_sample = sign_extend(bytestream2_get_be16(&gb), 16);
1617  previous_sample = sign_extend(bytestream2_get_be16(&gb), 16);
1618 
1619  for (count2=0; count2<28; count2++)
1620  *samplesC++ = sign_extend(bytestream2_get_be16(&gb), 16);
1621  } else {
1622  coeff1 = ea_adpcm_table[ byte >> 4 ];
1623  coeff2 = ea_adpcm_table[(byte >> 4) + 4];
1624  shift = 20 - (byte & 0x0F);
1625 
1626  for (count2=0; count2<28; count2++) {
1627  if (count2 & 1)
1628  next_sample = (unsigned)sign_extend(byte, 4) << shift;
1629  else {
1630  byte = bytestream2_get_byte(&gb);
1631  next_sample = (unsigned)sign_extend(byte >> 4, 4) << shift;
1632  }
1633 
1634  next_sample += (current_sample * coeff1) +
1635  (previous_sample * coeff2);
1636  next_sample = av_clip_int16(next_sample >> 8);
1637 
1638  previous_sample = current_sample;
1639  current_sample = next_sample;
1640  *samplesC++ = current_sample;
1641  }
1642  }
1643  }
1644  if (!count) {
1645  count = count1;
1646  } else if (count != count1) {
1647  av_log(avctx, AV_LOG_WARNING, "per-channel sample count mismatch\n");
1648  count = FFMAX(count, count1);
1649  }
1650 
1651  if (avctx->codec->id != AV_CODEC_ID_ADPCM_EA_R1) {
1652  c->status[channel].predictor = current_sample;
1653  c->status[channel].prev_sample = previous_sample;
1654  }
1655  }
1656 
1657  frame->nb_samples = count * 28;
1658  bytestream2_seek(&gb, 0, SEEK_END);
1659  break;
1660  }
1662  for (channel=0; channel<avctx->channels; channel++) {
1663  int coeff[2][4], shift[4];
1664  int16_t *s = samples_p[channel];
1665  for (n = 0; n < 4; n++, s += 32) {
1666  int val = sign_extend(bytestream2_get_le16u(&gb), 16);
1667  for (i=0; i<2; i++)
1668  coeff[i][n] = ea_adpcm_table[(val&0x0F)+4*i];
1669  s[0] = val & ~0x0F;
1670 
1671  val = sign_extend(bytestream2_get_le16u(&gb), 16);
1672  shift[n] = 20 - (val & 0x0F);
1673  s[1] = val & ~0x0F;
1674  }
1675 
1676  for (m=2; m<32; m+=2) {
1677  s = &samples_p[channel][m];
1678  for (n = 0; n < 4; n++, s += 32) {
1679  int level, pred;
1680  int byte = bytestream2_get_byteu(&gb);
1681 
1682  level = sign_extend(byte >> 4, 4) * (1 << shift[n]);
1683  pred = s[-1] * coeff[0][n] + s[-2] * coeff[1][n];
1684  s[0] = av_clip_int16((level + pred + 0x80) >> 8);
1685 
1686  level = sign_extend(byte, 4) * (1 << shift[n]);
1687  pred = s[0] * coeff[0][n] + s[-1] * coeff[1][n];
1688  s[1] = av_clip_int16((level + pred + 0x80) >> 8);
1689  }
1690  }
1691  }
1692  break;
1694  av_assert0(avctx->channels == 1);
1695 
1696  /*
1697  * Header format:
1698  * int16_t predictor;
1699  * uint8_t step_index;
1700  * uint8_t reserved;
1701  * uint32_t frame_size;
1702  *
1703  * Some implementations have step_index as 16-bits, but others
1704  * only use the lower 8 and store garbage in the upper 8.
1705  */
1706  c->status[0].predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1707  c->status[0].step_index = bytestream2_get_byteu(&gb);
1708  bytestream2_skipu(&gb, 5);
1709  if (c->status[0].step_index > 88u) {
1710  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index = %i\n",
1711  c->status[0].step_index);
1712  return AVERROR_INVALIDDATA;
1713  }
1714 
1715  for (n = nb_samples >> 1; n > 0; n--) {
1716  int v = bytestream2_get_byteu(&gb);
1717 
1718  *samples++ = adpcm_ima_expand_nibble(&c->status[0], v >> 4, 3);
1719  *samples++ = adpcm_ima_expand_nibble(&c->status[0], v & 0xf, 3);
1720  }
1721 
1722  if (nb_samples & 1) {
1723  int v = bytestream2_get_byteu(&gb);
1724  *samples++ = adpcm_ima_expand_nibble(&c->status[0], v >> 4, 3);
1725 
1726  if (v & 0x0F) {
1727  /* Holds true on all the http://samples.mplayerhq.hu/amv samples. */
1728  av_log(avctx, AV_LOG_WARNING, "Last nibble set on packet with odd sample count.\n");
1729  av_log(avctx, AV_LOG_WARNING, "Sample will be skipped.\n");
1730  }
1731  }
1732  break;
1734  for (i = 0; i < avctx->channels; i++) {
1735  c->status[i].predictor = sign_extend(bytestream2_get_be16u(&gb), 16);
1736  c->status[i].step_index = bytestream2_get_byteu(&gb);
1737  bytestream2_skipu(&gb, 1);
1738  if (c->status[i].step_index > 88u) {
1739  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index = %i\n",
1740  c->status[i].step_index);
1741  return AVERROR_INVALIDDATA;
1742  }
1743  }
1744 
1745  for (n = nb_samples >> (1 - st); n > 0; n--) {
1746  int v = bytestream2_get_byteu(&gb);
1747 
1748  *samples++ = adpcm_ima_qt_expand_nibble(&c->status[0 ], v >> 4 );
1749  *samples++ = adpcm_ima_qt_expand_nibble(&c->status[st], v & 0xf);
1750  }
1751  break;
1752  case AV_CODEC_ID_ADPCM_CT:
1753  for (n = nb_samples >> (1 - st); n > 0; n--) {
1754  int v = bytestream2_get_byteu(&gb);
1755  *samples++ = adpcm_ct_expand_nibble(&c->status[0 ], v >> 4 );
1756  *samples++ = adpcm_ct_expand_nibble(&c->status[st], v & 0x0F);
1757  }
1758  break;
1762  if (!c->status[0].step_index) {
1763  /* the first byte is a raw sample */
1764  *samples++ = 128 * (bytestream2_get_byteu(&gb) - 0x80);
1765  if (st)
1766  *samples++ = 128 * (bytestream2_get_byteu(&gb) - 0x80);
1767  c->status[0].step_index = 1;
1768  nb_samples--;
1769  }
1770  if (avctx->codec->id == AV_CODEC_ID_ADPCM_SBPRO_4) {
1771  for (n = nb_samples >> (1 - st); n > 0; n--) {
1772  int byte = bytestream2_get_byteu(&gb);
1773  *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
1774  byte >> 4, 4, 0);
1775  *samples++ = adpcm_sbpro_expand_nibble(&c->status[st],
1776  byte & 0x0F, 4, 0);
1777  }
1778  } else if (avctx->codec->id == AV_CODEC_ID_ADPCM_SBPRO_3) {
1779  for (n = (nb_samples<<st) / 3; n > 0; n--) {
1780  int byte = bytestream2_get_byteu(&gb);
1781  *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
1782  byte >> 5 , 3, 0);
1783  *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
1784  (byte >> 2) & 0x07, 3, 0);
1785  *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
1786  byte & 0x03, 2, 0);
1787  }
1788  } else {
1789  for (n = nb_samples >> (2 - st); n > 0; n--) {
1790  int byte = bytestream2_get_byteu(&gb);
1791  *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
1792  byte >> 6 , 2, 2);
1793  *samples++ = adpcm_sbpro_expand_nibble(&c->status[st],
1794  (byte >> 4) & 0x03, 2, 2);
1795  *samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
1796  (byte >> 2) & 0x03, 2, 2);
1797  *samples++ = adpcm_sbpro_expand_nibble(&c->status[st],
1798  byte & 0x03, 2, 2);
1799  }
1800  }
1801  break;
1802  case AV_CODEC_ID_ADPCM_SWF:
1803  adpcm_swf_decode(avctx, buf, buf_size, samples);
1804  bytestream2_seek(&gb, 0, SEEK_END);
1805  break;
1807  for (n = nb_samples >> (1 - st); n > 0; n--) {
1808  int v = bytestream2_get_byteu(&gb);
1809  *samples++ = adpcm_yamaha_expand_nibble(&c->status[0 ], v & 0x0F);
1810  *samples++ = adpcm_yamaha_expand_nibble(&c->status[st], v >> 4 );
1811  }
1812  break;
1814  for (channel = 0; channel < avctx->channels; channel++) {
1815  samples = samples_p[channel];
1816  for (n = nb_samples >> 1; n > 0; n--) {
1817  int v = bytestream2_get_byteu(&gb);
1818  *samples++ = adpcm_yamaha_expand_nibble(&c->status[channel], v & 0x0F);
1819  *samples++ = adpcm_yamaha_expand_nibble(&c->status[channel], v >> 4 );
1820  }
1821  }
1822  break;
1823  case AV_CODEC_ID_ADPCM_AFC:
1824  {
1825  int samples_per_block;
1826  int blocks;
1827 
1828  if (avctx->extradata && avctx->extradata_size == 1 && avctx->extradata[0]) {
1829  samples_per_block = avctx->extradata[0] / 16;
1830  blocks = nb_samples / avctx->extradata[0];
1831  } else {
1832  samples_per_block = nb_samples / 16;
1833  blocks = 1;
1834  }
1835 
1836  for (m = 0; m < blocks; m++) {
1837  for (channel = 0; channel < avctx->channels; channel++) {
1838  int prev1 = c->status[channel].sample1;
1839  int prev2 = c->status[channel].sample2;
1840 
1841  samples = samples_p[channel] + m * 16;
1842  /* Read in every sample for this channel. */
1843  for (i = 0; i < samples_per_block; i++) {
1844  int byte = bytestream2_get_byteu(&gb);
1845  int scale = 1 << (byte >> 4);
1846  int index = byte & 0xf;
1847  int factor1 = ff_adpcm_afc_coeffs[0][index];
1848  int factor2 = ff_adpcm_afc_coeffs[1][index];
1849 
1850  /* Decode 16 samples. */
1851  for (n = 0; n < 16; n++) {
1852  int32_t sampledat;
1853 
1854  if (n & 1) {
1855  sampledat = sign_extend(byte, 4);
1856  } else {
1857  byte = bytestream2_get_byteu(&gb);
1858  sampledat = sign_extend(byte >> 4, 4);
1859  }
1860 
1861  sampledat = ((prev1 * factor1 + prev2 * factor2) >> 11) +
1862  sampledat * scale;
1863  *samples = av_clip_int16(sampledat);
1864  prev2 = prev1;
1865  prev1 = *samples++;
1866  }
1867  }
1868 
1869  c->status[channel].sample1 = prev1;
1870  c->status[channel].sample2 = prev2;
1871  }
1872  }
1873  bytestream2_seek(&gb, 0, SEEK_END);
1874  break;
1875  }
1876  case AV_CODEC_ID_ADPCM_THP:
1878  {
1879  int table[14][16];
1880  int ch;
1881 
1882 #define THP_GET16(g) \
1883  sign_extend( \
1884  avctx->codec->id == AV_CODEC_ID_ADPCM_THP_LE ? \
1885  bytestream2_get_le16u(&(g)) : \
1886  bytestream2_get_be16u(&(g)), 16)
1887 
1888  if (avctx->extradata) {
1890  if (avctx->extradata_size < 32 * avctx->channels) {
1891  av_log(avctx, AV_LOG_ERROR, "Missing coeff table\n");
1892  return AVERROR_INVALIDDATA;
1893  }
1894 
1895  bytestream2_init(&tb, avctx->extradata, avctx->extradata_size);
1896  for (i = 0; i < avctx->channels; i++)
1897  for (n = 0; n < 16; n++)
1898  table[i][n] = THP_GET16(tb);
1899  } else {
1900  for (i = 0; i < avctx->channels; i++)
1901  for (n = 0; n < 16; n++)
1902  table[i][n] = THP_GET16(gb);
1903 
1904  if (!c->has_status) {
1905  /* Initialize the previous sample. */
1906  for (i = 0; i < avctx->channels; i++) {
1907  c->status[i].sample1 = THP_GET16(gb);
1908  c->status[i].sample2 = THP_GET16(gb);
1909  }
1910  c->has_status = 1;
1911  } else {
1912  bytestream2_skip(&gb, avctx->channels * 4);
1913  }
1914  }
1915 
1916  for (ch = 0; ch < avctx->channels; ch++) {
1917  samples = samples_p[ch];
1918 
1919  /* Read in every sample for this channel. */
1920  for (i = 0; i < (nb_samples + 13) / 14; i++) {
1921  int byte = bytestream2_get_byteu(&gb);
1922  int index = (byte >> 4) & 7;
1923  unsigned int exp = byte & 0x0F;
1924  int64_t factor1 = table[ch][index * 2];
1925  int64_t factor2 = table[ch][index * 2 + 1];
1926 
1927  /* Decode 14 samples. */
1928  for (n = 0; n < 14 && (i * 14 + n < nb_samples); n++) {
1929  int32_t sampledat;
1930 
1931  if (n & 1) {
1932  sampledat = sign_extend(byte, 4);
1933  } else {
1934  byte = bytestream2_get_byteu(&gb);
1935  sampledat = sign_extend(byte >> 4, 4);
1936  }
1937 
1938  sampledat = ((c->status[ch].sample1 * factor1
1939  + c->status[ch].sample2 * factor2) >> 11) + sampledat * (1 << exp);
1940  *samples = av_clip_int16(sampledat);
1941  c->status[ch].sample2 = c->status[ch].sample1;
1942  c->status[ch].sample1 = *samples++;
1943  }
1944  }
1945  }
1946  break;
1947  }
1948  case AV_CODEC_ID_ADPCM_DTK:
1949  for (channel = 0; channel < avctx->channels; channel++) {
1950  samples = samples_p[channel];
1951 
1952  /* Read in every sample for this channel. */
1953  for (i = 0; i < nb_samples / 28; i++) {
1954  int byte, header;
1955  if (channel)
1956  bytestream2_skipu(&gb, 1);
1957  header = bytestream2_get_byteu(&gb);
1958  bytestream2_skipu(&gb, 3 - channel);
1959 
1960  /* Decode 28 samples. */
1961  for (n = 0; n < 28; n++) {
1962  int32_t sampledat, prev;
1963 
1964  switch (header >> 4) {
1965  case 1:
1966  prev = (c->status[channel].sample1 * 0x3c);
1967  break;
1968  case 2:
1969  prev = (c->status[channel].sample1 * 0x73) - (c->status[channel].sample2 * 0x34);
1970  break;
1971  case 3:
1972  prev = (c->status[channel].sample1 * 0x62) - (c->status[channel].sample2 * 0x37);
1973  break;
1974  default:
1975  prev = 0;
1976  }
1977 
1978  prev = av_clip_intp2((prev + 0x20) >> 6, 21);
1979 
1980  byte = bytestream2_get_byteu(&gb);
1981  if (!channel)
1982  sampledat = sign_extend(byte, 4);
1983  else
1984  sampledat = sign_extend(byte >> 4, 4);
1985 
1986  sampledat = ((sampledat * (1 << 12)) >> (header & 0xf)) * (1 << 6) + prev;
1987  *samples++ = av_clip_int16(sampledat >> 6);
1988  c->status[channel].sample2 = c->status[channel].sample1;
1989  c->status[channel].sample1 = sampledat;
1990  }
1991  }
1992  if (!channel)
1993  bytestream2_seek(&gb, 0, SEEK_SET);
1994  }
1995  break;
1996  case AV_CODEC_ID_ADPCM_PSX:
1997  for (int block = 0; block < avpkt->size / FFMAX(avctx->block_align, 16 * avctx->channels); block++) {
1998  int nb_samples_per_block = 28 * FFMAX(avctx->block_align, 16 * avctx->channels) / (16 * avctx->channels);
1999  for (channel = 0; channel < avctx->channels; channel++) {
2000  samples = samples_p[channel] + block * nb_samples_per_block;
2001  av_assert0((block + 1) * nb_samples_per_block <= nb_samples);
2002 
2003  /* Read in every sample for this channel. */
2004  for (i = 0; i < nb_samples_per_block / 28; i++) {
2005  int filter, shift, flag, byte;
2006 
2007  filter = bytestream2_get_byteu(&gb);
2008  shift = filter & 0xf;
2009  filter = filter >> 4;
2011  return AVERROR_INVALIDDATA;
2012  flag = bytestream2_get_byteu(&gb);
2013 
2014  /* Decode 28 samples. */
2015  for (n = 0; n < 28; n++) {
2016  int sample = 0, scale;
2017 
2018  if (flag < 0x07) {
2019  if (n & 1) {
2020  scale = sign_extend(byte >> 4, 4);
2021  } else {
2022  byte = bytestream2_get_byteu(&gb);
2023  scale = sign_extend(byte, 4);
2024  }
2025 
2026  scale = scale * (1 << 12);
2027  sample = (int)((scale >> shift) + (c->status[channel].sample1 * xa_adpcm_table[filter][0] + c->status[channel].sample2 * xa_adpcm_table[filter][1]) / 64);
2028  }
2029  *samples++ = av_clip_int16(sample);
2030  c->status[channel].sample2 = c->status[channel].sample1;
2031  c->status[channel].sample1 = sample;
2032  }
2033  }
2034  }
2035  }
2036  break;
2038  /*
2039  * The format of each block:
2040  * uint8_t left_control;
2041  * uint4_t left_samples[nb_samples];
2042  * ---- and if stereo ----
2043  * uint8_t right_control;
2044  * uint4_t right_samples[nb_samples];
2045  *
2046  * Format of the control byte:
2047  * MSB [SSSSRDRR] LSB
2048  * S = (Shift Amount - 2)
2049  * D = Decoder flag.
2050  * R = Reserved
2051  *
2052  * Each block relies on the previous two samples of each channel.
2053  * They should be 0 initially.
2054  */
2055  for (int block = 0; block < avpkt->size / avctx->block_align; block++) {
2056  for (channel = 0; channel < avctx->channels; channel++) {
2057  int control, shift;
2058 
2059  samples = samples_p[channel] + block * 32;
2060  cs = c->status + channel;
2061 
2062  /* Get the control byte and decode the samples, 2 at a time. */
2063  control = bytestream2_get_byteu(&gb);
2064  shift = (control >> 4) + 2;
2065 
2066  for (n = 0; n < 16; n++) {
2067  int sample = bytestream2_get_byteu(&gb);
2068  *samples++ = ff_adpcm_argo_expand_nibble(cs, sample >> 4, shift, control & 0x04);
2069  *samples++ = ff_adpcm_argo_expand_nibble(cs, sample >> 0, shift, control & 0x04);
2070  }
2071  }
2072  }
2073  break;
2075  for (n = 0; n < nb_samples * avctx->channels; n++) {
2076  int v = bytestream2_get_byteu(&gb);
2077  *samples++ = adpcm_zork_expand_nibble(&c->status[n % avctx->channels], v);
2078  }
2079  break;
2081  for (n = nb_samples / 2; n > 0; n--) {
2082  for (channel = 0; channel < avctx->channels; channel++) {
2083  int v = bytestream2_get_byteu(&gb);
2084  *samples++ = adpcm_ima_mtf_expand_nibble(&c->status[channel], v >> 4);
2085  samples[st] = adpcm_ima_mtf_expand_nibble(&c->status[channel], v & 0x0F);
2086  }
2087  samples += avctx->channels;
2088  }
2089  break;
2090  default:
2091  av_assert0(0); // unsupported codec_id should not happen
2092  }
2093 
2094  if (avpkt->size && bytestream2_tell(&gb) == 0) {
2095  av_log(avctx, AV_LOG_ERROR, "Nothing consumed\n");
2096  return AVERROR_INVALIDDATA;
2097  }
2098 
2099  *got_frame_ptr = 1;
2100 
2101  if (avpkt->size < bytestream2_tell(&gb)) {
2102  av_log(avctx, AV_LOG_ERROR, "Overread of %d < %d\n", avpkt->size, bytestream2_tell(&gb));
2103  return avpkt->size;
2104  }
2105 
2106  return bytestream2_tell(&gb);
2107 }
2108 
2109 static void adpcm_flush(AVCodecContext *avctx)
2110 {
2111  ADPCMDecodeContext *c = avctx->priv_data;
2112 
2113  switch(avctx->codec_id) {
2115  for (int channel = 0; channel < avctx->channels; channel++)
2116  c->status[channel].step = 0;
2117  break;
2118 
2120  for (int channel = 0; channel < avctx->channels; channel++) {
2121  c->status[channel].sample1 = 0;
2122  c->status[channel].sample2 = 0;
2123  }
2124  break;
2125 
2130  for (int channel = 0; channel < avctx->channels; channel++) {
2131  c->status[channel].predictor = 0;
2132  c->status[channel].step_index = 0;
2133  }
2134  break;
2135 
2136  default:
2137  /* Other codecs may want to handle this during decoding. */
2138  c->has_status = 0;
2139  return;
2140  }
2141 
2142  c->has_status = 1;
2143 }
2144 
2145 
2146 static const enum AVSampleFormat sample_fmts_s16[] = { AV_SAMPLE_FMT_S16,
2148 static const enum AVSampleFormat sample_fmts_s16p[] = { AV_SAMPLE_FMT_S16P,
2150 static const enum AVSampleFormat sample_fmts_both[] = { AV_SAMPLE_FMT_S16,
2153 
2154 #define ADPCM_DECODER(id_, sample_fmts_, name_, long_name_) \
2155 AVCodec ff_ ## name_ ## _decoder = { \
2156  .name = #name_, \
2157  .long_name = NULL_IF_CONFIG_SMALL(long_name_), \
2158  .type = AVMEDIA_TYPE_AUDIO, \
2159  .id = id_, \
2160  .priv_data_size = sizeof(ADPCMDecodeContext), \
2161  .init = adpcm_decode_init, \
2162  .decode = adpcm_decode_frame, \
2163  .flush = adpcm_flush, \
2164  .capabilities = AV_CODEC_CAP_DR1, \
2165  .sample_fmts = sample_fmts_, \
2166  .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE, \
2167 }
2168 
2169 /* Note: Do not forget to add new entries to the Makefile as well. */
2171 ADPCM_DECODER(AV_CODEC_ID_ADPCM_AFC, sample_fmts_s16p, adpcm_afc, "ADPCM Nintendo Gamecube AFC");
2172 ADPCM_DECODER(AV_CODEC_ID_ADPCM_AGM, sample_fmts_s16, adpcm_agm, "ADPCM AmuseGraphics Movie");
2173 ADPCM_DECODER(AV_CODEC_ID_ADPCM_AICA, sample_fmts_s16p, adpcm_aica, "ADPCM Yamaha AICA");
2174 ADPCM_DECODER(AV_CODEC_ID_ADPCM_ARGO, sample_fmts_s16p, adpcm_argo, "ADPCM Argonaut Games");
2175 ADPCM_DECODER(AV_CODEC_ID_ADPCM_CT, sample_fmts_s16, adpcm_ct, "ADPCM Creative Technology");
2176 ADPCM_DECODER(AV_CODEC_ID_ADPCM_DTK, sample_fmts_s16p, adpcm_dtk, "ADPCM Nintendo Gamecube DTK");
2177 ADPCM_DECODER(AV_CODEC_ID_ADPCM_EA, sample_fmts_s16, adpcm_ea, "ADPCM Electronic Arts");
2178 ADPCM_DECODER(AV_CODEC_ID_ADPCM_EA_MAXIS_XA, sample_fmts_s16, adpcm_ea_maxis_xa, "ADPCM Electronic Arts Maxis CDROM XA");
2179 ADPCM_DECODER(AV_CODEC_ID_ADPCM_EA_R1, sample_fmts_s16p, adpcm_ea_r1, "ADPCM Electronic Arts R1");
2180 ADPCM_DECODER(AV_CODEC_ID_ADPCM_EA_R2, sample_fmts_s16p, adpcm_ea_r2, "ADPCM Electronic Arts R2");
2181 ADPCM_DECODER(AV_CODEC_ID_ADPCM_EA_R3, sample_fmts_s16p, adpcm_ea_r3, "ADPCM Electronic Arts R3");
2182 ADPCM_DECODER(AV_CODEC_ID_ADPCM_EA_XAS, sample_fmts_s16p, adpcm_ea_xas, "ADPCM Electronic Arts XAS");
2183 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_AMV, sample_fmts_s16, adpcm_ima_amv, "ADPCM IMA AMV");
2184 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_APC, sample_fmts_s16, adpcm_ima_apc, "ADPCM IMA CRYO APC");
2185 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_APM, sample_fmts_s16, adpcm_ima_apm, "ADPCM IMA Ubisoft APM");
2186 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_CUNNING, sample_fmts_s16p, adpcm_ima_cunning, "ADPCM IMA Cunning Developments");
2187 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_DAT4, sample_fmts_s16, adpcm_ima_dat4, "ADPCM IMA Eurocom DAT4");
2188 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_DK3, sample_fmts_s16, adpcm_ima_dk3, "ADPCM IMA Duck DK3");
2189 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_DK4, sample_fmts_s16, adpcm_ima_dk4, "ADPCM IMA Duck DK4");
2190 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_EA_EACS, sample_fmts_s16, adpcm_ima_ea_eacs, "ADPCM IMA Electronic Arts EACS");
2191 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_EA_SEAD, sample_fmts_s16, adpcm_ima_ea_sead, "ADPCM IMA Electronic Arts SEAD");
2192 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_ISS, sample_fmts_s16, adpcm_ima_iss, "ADPCM IMA Funcom ISS");
2193 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_MOFLEX, sample_fmts_s16p, adpcm_ima_moflex, "ADPCM IMA MobiClip MOFLEX");
2194 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_MTF, sample_fmts_s16, adpcm_ima_mtf, "ADPCM IMA Capcom's MT Framework");
2195 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_OKI, sample_fmts_s16, adpcm_ima_oki, "ADPCM IMA Dialogic OKI");
2196 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_QT, sample_fmts_s16p, adpcm_ima_qt, "ADPCM IMA QuickTime");
2197 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_RAD, sample_fmts_s16, adpcm_ima_rad, "ADPCM IMA Radical");
2198 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_SSI, sample_fmts_s16, adpcm_ima_ssi, "ADPCM IMA Simon & Schuster Interactive");
2199 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_SMJPEG, sample_fmts_s16, adpcm_ima_smjpeg, "ADPCM IMA Loki SDL MJPEG");
2200 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_ALP, sample_fmts_s16, adpcm_ima_alp, "ADPCM IMA High Voltage Software ALP");
2202 ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_WS, sample_fmts_both, adpcm_ima_ws, "ADPCM IMA Westwood");
2203 ADPCM_DECODER(AV_CODEC_ID_ADPCM_MS, sample_fmts_both, adpcm_ms, "ADPCM Microsoft");
2205 ADPCM_DECODER(AV_CODEC_ID_ADPCM_PSX, sample_fmts_s16p, adpcm_psx, "ADPCM Playstation");
2206 ADPCM_DECODER(AV_CODEC_ID_ADPCM_SBPRO_2, sample_fmts_s16, adpcm_sbpro_2, "ADPCM Sound Blaster Pro 2-bit");
2207 ADPCM_DECODER(AV_CODEC_ID_ADPCM_SBPRO_3, sample_fmts_s16, adpcm_sbpro_3, "ADPCM Sound Blaster Pro 2.6-bit");
2208 ADPCM_DECODER(AV_CODEC_ID_ADPCM_SBPRO_4, sample_fmts_s16, adpcm_sbpro_4, "ADPCM Sound Blaster Pro 4-bit");
2209 ADPCM_DECODER(AV_CODEC_ID_ADPCM_SWF, sample_fmts_s16, adpcm_swf, "ADPCM Shockwave Flash");
2210 ADPCM_DECODER(AV_CODEC_ID_ADPCM_THP_LE, sample_fmts_s16p, adpcm_thp_le, "ADPCM Nintendo THP (little-endian)");
2211 ADPCM_DECODER(AV_CODEC_ID_ADPCM_THP, sample_fmts_s16p, adpcm_thp, "ADPCM Nintendo THP");
int16_t ff_adpcm_argo_expand_nibble(ADPCMChannelStatus *cs, int nibble, int shift, int flag)
Definition: adpcm.c:696
static const int8_t swf_index_tables[4][16]
Definition: adpcm.c:82
static enum AVSampleFormat sample_fmts_s16[]
Definition: adpcm.c:2146
static int16_t adpcm_ima_expand_nibble(ADPCMChannelStatus *c, int8_t nibble, int shift)
Definition: adpcm.c:278
static int16_t adpcm_mtaf_expand_nibble(ADPCMChannelStatus *c, uint8_t nibble)
Definition: adpcm.c:507
static int16_t adpcm_ima_oki_expand_nibble(ADPCMChannelStatus *c, int nibble)
Definition: adpcm.c:431
static int16_t adpcm_ima_mtf_expand_nibble(ADPCMChannelStatus *c, int nibble)
Definition: adpcm.c:327
static int adpcm_decode_frame(AVCodecContext *avctx, void *data, int *got_frame_ptr, AVPacket *avpkt)
Definition: adpcm.c:944
#define THP_GET16(g)
#define DK3_GET_NEXT_NIBBLE()
static int16_t adpcm_ima_wav_expand_nibble(ADPCMChannelStatus *c, GetBitContext *gb, int bps)
Definition: adpcm.c:363
static int16_t adpcm_sbpro_expand_nibble(ADPCMChannelStatus *c, int8_t nibble, int size, int shift)
Definition: adpcm.c:473
static void adpcm_swf_decode(AVCodecContext *avctx, const uint8_t *buf, int buf_size, int16_t *samples)
Definition: adpcm.c:639
static const int8_t xa_adpcm_table[5][2]
Definition: adpcm.c:65
static int get_nb_samples(AVCodecContext *avctx, GetByteContext *gb, int buf_size, int *coded_samples, int *approx_nb_samples)
Get the number of samples (per channel) that will be decoded from the packet.
Definition: adpcm.c:724
static int16_t adpcm_agm_expand_nibble(ADPCMChannelStatus *c, int8_t nibble)
Definition: adpcm.c:234
static int16_t adpcm_ima_alp_expand_nibble(ADPCMChannelStatus *c, int8_t nibble, int shift)
Definition: adpcm.c:304
static int xa_decode(AVCodecContext *avctx, int16_t *out0, int16_t *out1, const uint8_t *in, ADPCMChannelStatus *left, ADPCMChannelStatus *right, int channels, int sample_offset)
Definition: adpcm.c:551
static const int16_t ea_adpcm_table[]
Definition: adpcm.c:73
static int16_t adpcm_zork_expand_nibble(ADPCMChannelStatus *c, uint8_t nibble)
Definition: adpcm.c:516
static enum AVSampleFormat sample_fmts_s16p[]
Definition: adpcm.c:2148
static int16_t adpcm_yamaha_expand_nibble(ADPCMChannelStatus *c, uint8_t nibble)
Definition: adpcm.c:493
static const int8_t zork_index_table[8]
Definition: adpcm.c:89
static int adpcm_ima_qt_expand_nibble(ADPCMChannelStatus *c, int nibble)
Definition: adpcm.c:386
static int16_t adpcm_ima_cunning_expand_nibble(ADPCMChannelStatus *c, int8_t nibble)
Definition: adpcm.c:343
static int16_t adpcm_ms_expand_nibble(ADPCMChannelStatus *c, int nibble)
Definition: adpcm.c:412
static const int8_t mtf_index_table[16]
Definition: adpcm.c:93
static int16_t adpcm_ct_expand_nibble(ADPCMChannelStatus *c, int8_t nibble)
Definition: adpcm.c:452
static av_cold int adpcm_decode_init(AVCodecContext *avctx)
Definition: adpcm.c:106
static void adpcm_flush(AVCodecContext *avctx)
Definition: adpcm.c:2109
static enum AVSampleFormat sample_fmts_both[]
Definition: adpcm.c:2150
#define ADPCM_DECODER(id_, sample_fmts_, name_, long_name_)
Definition: adpcm.c:2154
ADPCM encoder/decoder common header.
const int8_t ff_adpcm_AdaptCoeff2[]
Divided by 4 to fit in 8-bit integers.
Definition: adpcm_data.c:95
const int8_t ff_adpcm_index_table[16]
Definition: adpcm_data.c:40
const int16_t ff_adpcm_mtaf_stepsize[32][16]
Definition: adpcm_data.c:114
const int8_t ff_adpcm_ima_cunning_index_table[9]
Definition: adpcm_data.c:187
const int8_t ff_adpcm_yamaha_difflookup[]
Definition: adpcm_data.c:104
const int16_t ff_adpcm_step_table[89]
This is the step table.
Definition: adpcm_data.c:61
const int16_t ff_adpcm_ima_cunning_step_table[61]
Definition: adpcm_data.c:197
const uint8_t ff_adpcm_AdaptCoeff1[]
Divided by 4 to fit in 8-bit integers.
Definition: adpcm_data.c:90
const int16_t ff_adpcm_yamaha_indexscale[]
Definition: adpcm_data.c:99
const uint16_t ff_adpcm_afc_coeffs[2][16]
Definition: adpcm_data.c:109
const int16_t ff_adpcm_AdaptationTable[]
Definition: adpcm_data.c:84
const int8_t *const ff_adpcm_index_tables[4]
Definition: adpcm_data.c:50
const int16_t ff_adpcm_oki_step_table[49]
Definition: adpcm_data.c:73
ADPCM tables.
static const uint8_t ff_adpcm_ima_block_sizes[4]
Definition: adpcm_data.h:31
static const uint8_t ff_adpcm_ima_block_samples[4]
Definition: adpcm_data.h:32
static double val(void *priv, double ch)
Definition: aeval.c:76
static av_always_inline void update(SilenceDetectContext *s, AVFrame *insamples, int is_silence, int current_sample, int64_t nb_samples_notify, AVRational time_base)
channels
Definition: aptx.h:33
#define av_cold
Definition: attributes.h:88
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) #define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac) { } void ff_audio_convert_free(AudioConvert **ac) { if(! *ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);} AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map) { AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method !=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2) { ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc) { av_free(ac);return NULL;} return ac;} in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar) { ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar ? ac->channels :1;} else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;} int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in) { int use_generic=1;int len=in->nb_samples;int p;if(ac->dc) { av_log(ac->avr, AV_LOG_TRACE, "%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
uint8_t
int32_t
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
Libavcodec external API header.
#define AV_RL16
Definition: intreadwrite.h:42
#define AV_RL32
Definition: intreadwrite.h:146
static av_always_inline void bytestream2_skipu(GetByteContext *g, unsigned int size)
Definition: bytestream.h:174
static av_always_inline int bytestream2_get_bytes_left(GetByteContext *g)
Definition: bytestream.h:158
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
Definition: bytestream.h:137
static av_always_inline void bytestream2_skip(GetByteContext *g, unsigned int size)
Definition: bytestream.h:168
static av_always_inline int bytestream2_seek(GetByteContext *g, int offset, int whence)
Definition: bytestream.h:212
static av_always_inline int bytestream2_tell(GetByteContext *g)
Definition: bytestream.h:192
#define flag(name)
Definition: cbs_av1.c:553
#define s(width, name)
Definition: cbs_vp9.c:257
static av_always_inline void filter(int16_t *output, ptrdiff_t out_stride, const int16_t *low, ptrdiff_t low_stride, const int16_t *high, ptrdiff_t high_stride, int len, int clip)
Definition: cfhddsp.c:27
#define av_clip_intp2
Definition: common.h:143
#define FFMIN(a, b)
Definition: common.h:105
#define av_mod_uintp2
Definition: common.h:149
#define av_clip
Definition: common.h:122
#define av_clip_int16
Definition: common.h:137
#define FFMAX(a, b)
Definition: common.h:103
#define av_clip_uintp2
Definition: common.h:146
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:72
#define NULL
Definition: coverity.c:32
#define abs(x)
Definition: cuda_runtime.h:35
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
Definition: decode.c:1900
static AVFrame * frame
static float add(float src0, float src1)
channel
Use these values when setting the channel map with ebur128_set_channel().
Definition: ebur128.h:39
int8_t exp
Definition: eval.c:72
static void predictor(uint8_t *src, ptrdiff_t size)
Definition: exrenc.c:163
int
#define sample
bitstream reader API header.
static unsigned int get_bits_le(GetBitContext *s, int n)
Definition: get_bits.h:420
static int get_sbits(GetBitContext *s, int n)
Definition: get_bits.h:359
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:677
static int get_bits_count(const GetBitContext *s)
Definition: get_bits.h:219
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:379
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
Definition: get_bits.h:659
@ AV_CODEC_ID_ADPCM_SWF
Definition: codec_id.h:366
@ AV_CODEC_ID_ADPCM_CT
Definition: codec_id.h:365
@ AV_CODEC_ID_ADPCM_IMA_WS
Definition: codec_id.h:357
@ AV_CODEC_ID_ADPCM_EA_R1
Definition: codec_id.h:373
@ AV_CODEC_ID_ADPCM_4XM
Definition: codec_id.h:360
@ AV_CODEC_ID_ADPCM_IMA_OKI
Definition: codec_id.h:386
@ AV_CODEC_ID_ADPCM_IMA_EA_EACS
Definition: codec_id.h:377
@ AV_CODEC_ID_ADPCM_SBPRO_2
Definition: codec_id.h:370
@ AV_CODEC_ID_ADPCM_DTK
Definition: codec_id.h:387
@ AV_CODEC_ID_ADPCM_IMA_EA_SEAD
Definition: codec_id.h:376
@ AV_CODEC_ID_ADPCM_PSX
Definition: codec_id.h:391
@ AV_CODEC_ID_ADPCM_XA
Definition: codec_id.h:361
@ AV_CODEC_ID_ADPCM_YAMAHA
Definition: codec_id.h:367
@ AV_CODEC_ID_ADPCM_SBPRO_3
Definition: codec_id.h:369
@ AV_CODEC_ID_ADPCM_EA_R2
Definition: codec_id.h:375
@ AV_CODEC_ID_ADPCM_IMA_ISS
Definition: codec_id.h:380
@ AV_CODEC_ID_ADPCM_MS
Definition: codec_id.h:359
@ AV_CODEC_ID_ADPCM_ZORK
Definition: codec_id.h:398
@ AV_CODEC_ID_ADPCM_SBPRO_4
Definition: codec_id.h:368
@ AV_CODEC_ID_ADPCM_EA_MAXIS_XA
Definition: codec_id.h:379
@ AV_CODEC_ID_ADPCM_ARGO
Definition: codec_id.h:396
@ AV_CODEC_ID_ADPCM_IMA_APC
Definition: codec_id.h:382
@ AV_CODEC_ID_ADPCM_IMA_AMV
Definition: codec_id.h:372
@ AV_CODEC_ID_ADPCM_EA_XAS
Definition: codec_id.h:378
@ AV_CODEC_ID_ADPCM_AGM
Definition: codec_id.h:395
@ AV_CODEC_ID_ADPCM_IMA_CUNNING
Definition: codec_id.h:402
@ AV_CODEC_ID_ADPCM_IMA_DK4
Definition: codec_id.h:356
@ AV_CODEC_ID_ADPCM_IMA_DK3
Definition: codec_id.h:355
@ AV_CODEC_ID_ADPCM_IMA_DAT4
Definition: codec_id.h:393
@ AV_CODEC_ID_ADPCM_IMA_QT
Definition: codec_id.h:353
@ AV_CODEC_ID_ADPCM_EA
Definition: codec_id.h:363
@ AV_CODEC_ID_ADPCM_IMA_SMJPEG
Definition: codec_id.h:358
@ AV_CODEC_ID_ADPCM_MTAF
Definition: codec_id.h:394
@ AV_CODEC_ID_ADPCM_AICA
Definition: codec_id.h:392
@ AV_CODEC_ID_ADPCM_IMA_MTF
Definition: codec_id.h:401
@ AV_CODEC_ID_ADPCM_IMA_APM
Definition: codec_id.h:399
@ AV_CODEC_ID_ADPCM_THP
Definition: codec_id.h:371
@ AV_CODEC_ID_ADPCM_AFC
Definition: codec_id.h:385
@ AV_CODEC_ID_ADPCM_IMA_WAV
Definition: codec_id.h:354
@ AV_CODEC_ID_ADPCM_THP_LE
Definition: codec_id.h:390
@ AV_CODEC_ID_ADPCM_IMA_ALP
Definition: codec_id.h:400
@ AV_CODEC_ID_ADPCM_EA_R3
Definition: codec_id.h:374
@ AV_CODEC_ID_ADPCM_IMA_RAD
Definition: codec_id.h:388
@ AV_CODEC_ID_ADPCM_IMA_SSI
Definition: codec_id.h:397
@ AV_CODEC_ID_ADPCM_IMA_MOFLEX
Definition: codec_id.h:403
#define AV_INPUT_BUFFER_PADDING_SIZE
Required number of additionally allocated bytes at the end of the input bitstream for decoding.
Definition: avcodec.h:215
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:62
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
#define AVERROR(e)
Definition: error.h:43
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:200
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:194
AVSampleFormat
Audio sample formats.
Definition: samplefmt.h:58
@ AV_SAMPLE_FMT_S16P
signed 16 bits, planar
Definition: samplefmt.h:67
@ AV_SAMPLE_FMT_NONE
Definition: samplefmt.h:59
@ AV_SAMPLE_FMT_S16
signed 16 bits
Definition: samplefmt.h:61
int index
Definition: gxfenc.c:89
for(j=16;j >0;--j)
static const int offsets[]
Definition: hevc_pel.c:34
int i
Definition: input.c:407
common internal API header
static av_const int sign_extend(int val, unsigned bits)
Definition: mathops.h:130
unsigned bps
Definition: movenc.c:1612
const char data[16]
Definition: mxf.c:142
static const uint16_t table[]
Definition: prosumer.c:206
#define tb
Definition: regdef.h:68
static const uint8_t header[24]
Definition: sdr2.c:67
#define FF_ARRAY_ELEMS(a)
static const float pred[4]
Definition: siprdata.h:259
static int shift(int a, int b)
Definition: sonic.c:82
int16_t step_index
Definition: adpcm.h:33
int vqa_version
VQA version.
Definition: adpcm.c:102
ADPCMChannelStatus status[14]
Definition: adpcm.c:101
int has_status
Status flag.
Definition: adpcm.c:103
main external API structure.
Definition: avcodec.h:536
enum AVSampleFormat sample_fmt
audio sample format
Definition: avcodec.h:1204
int bits_per_coded_sample
bits per sample/pixel from the demuxer (needed for huffyuv).
Definition: avcodec.h:1740
const struct AVCodec * codec
Definition: avcodec.h:545
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:637
int channels
number of audio channels
Definition: avcodec.h:1197
enum AVCodecID codec_id
Definition: avcodec.h:546
int extradata_size
Definition: avcodec.h:638
int block_align
number of bytes per packet if constant and known or 0 Used by some WAV based audio codecs.
Definition: avcodec.h:1233
void * priv_data
Definition: avcodec.h:563
enum AVCodecID id
Definition: codec.h:211
This structure describes decoded (raw) audio or video data.
Definition: frame.h:318
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:384
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:332
uint8_t ** extended_data
pointers to the data planes/channels.
Definition: frame.h:365
This structure stores compressed data.
Definition: packet.h:346
int size
Definition: packet.h:370
uint8_t * data
Definition: packet.h:369
uint8_t level
Definition: svq3.c:206
#define avpriv_request_sample(...)
#define av_log(a,...)
static int16_t block[64]
Definition: dct.c:116
int size
const char * g
Definition: vf_curves.c:117
else temp
Definition: vf_mcdeint.c:259
static const double coeff[2][5]
Definition: vf_owdenoise.c:73
static av_always_inline int diff(const uint32_t a, const uint32_t b)
float delta
static double c[64]