94 #define OFFSET(x) offsetof(MCompandContext, x)
95 #define A AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
98 {
"args",
"set parameters for each band",
OFFSET(args),
AV_OPT_TYPE_STRING, { .str =
"0.005,0.1 6 -47/-40,-34/-34,-17/-33 100 | 0.003,0.05 6 -47/-40,-34/-34,-17/-33 400 | 0.000625,0.0125 6 -47/-40,-34/-34,-15/-33 1600 | 0.0001,0.025 6 -47/-40,-34/-34,-31/-31,-0/-30 6400 | 0,0.025 6 -38/-31,-28/-28,-0/-25 22000" }, 0, 0,
A },
114 for (
i = 0;
i <
s->nb_bands;
i++) {
156 static void count_items(
char *item_str,
int *nb_items,
char delimiter)
161 for (p = item_str; *p; p++) {
172 cb->volume[ch] +=
delta *
cb->attack_rate[ch];
174 cb->volume[ch] +=
delta *
cb->decay_rate[ch];
180 double in_log, out_log;
183 if (in_lin <= s->in_min_lin)
184 return s->out_min_lin;
186 in_log = log(in_lin);
188 for (
i = 1;
i <
s->nb_segments;
i++)
189 if (in_log <= s->segments[
i].x)
191 cs = &
s->segments[
i - 1];
193 out_log = cs->
y + in_log * (cs->
a * in_log + cs->
b);
201 int new_nb_items, num;
202 char *saveptr =
NULL;
206 #define S(x) s->segments[2 * ((x) + 1)]
207 for (
i = 0, new_nb_items = 0;
i < nb_points;
i++) {
208 char *tstr =
av_strtok(p,
",", &saveptr);
210 if (!tstr || sscanf(tstr,
"%lf/%lf", &
S(
i).x, &
S(
i).y) != 2) {
212 "Invalid and/or missing input/output value.\n");
215 if (
i &&
S(
i - 1).x >
S(
i).x) {
217 "Transfer function input values must be increasing.\n");
227 if (num == 0 ||
S(num - 1).x)
231 #define S(x) s->segments[2 * (x)]
233 S(0).x =
S(1).x - 2 *
s->curve_dB;
238 for (
i = 2;
i < num;
i++) {
239 double g1 = (
S(
i - 1).y -
S(
i - 2).y) * (
S(
i - 0).x -
S(
i - 1).x);
240 double g2 = (
S(
i - 0).y -
S(
i - 1).y) * (
S(
i - 1).x -
S(
i - 2).x);
246 for (j = --
i; j < num; j++)
250 for (
i = 0;
i <
s->nb_segments;
i += 2) {
251 s->segments[
i].y +=
s->gain_dB;
256 #define L(x) s->segments[i - (x)]
257 for (
i = 4;
i <
s->nb_segments;
i += 2) {
258 double x, y, cx, cy, in1, in2, out1, out2, theta,
len,
r;
261 L(4).b = (
L(2).y -
L(4).y) / (
L(2).x -
L(4).x);
264 L(2).b = (
L(0).y -
L(2).y) / (
L(0).x -
L(2).x);
266 theta = atan2(
L(2).y -
L(4).y,
L(2).x -
L(4).x);
269 L(3).x =
L(2).x -
r * cos(theta);
270 L(3).y =
L(2).y -
r * sin(theta);
272 theta = atan2(
L(0).y -
L(2).y,
L(0).x -
L(2).x);
275 x =
L(2).x +
r * cos(theta);
276 y =
L(2).y +
r * sin(theta);
278 cx = (
L(3).x +
L(2).x + x) / 3;
279 cy = (
L(3).y +
L(2).y + y) / 3;
286 in2 =
L(2).x -
L(3).x;
287 out2 =
L(2).y -
L(3).y;
288 L(3).a = (out2 / in2 - out1 / in1) / (in2 - in1);
289 L(3).b = out1 / in1 -
L(3).a * in1;
294 s->in_min_lin =
exp(
s->segments[1].x);
295 s->out_min_lin =
exp(
s->segments[1].y);
303 y[1] = 2 * x[0] * x[1];
304 y[2] = 2 * x[0] * x[2] + x[1] * x[1];
305 y[3] = 2 * x[1] * x[2];
312 double Q = sqrt(.5),
alpha = sin(w0) / (2*Q);
319 x[0] = (1 - cos(w0))/2;
321 x[2] = (1 - cos(w0))/2;
322 x[3] = (1 + cos(w0))/2;
323 x[4] = -(1 + cos(w0));
324 x[5] = (1 + cos(w0))/2;
329 for (norm = x[6],
i = 0;
i < 9; ++
i)
347 int ret, ch,
i, k, new_nb_items, nb_bands;
348 char *p =
s->args, *saveptr =
NULL;
349 int max_delay_size = 0;
352 s->nb_bands =
FFMAX(1, nb_bands);
358 for (
i = 0, new_nb_items = 0;
i < nb_bands;
i++) {
359 int nb_points, nb_attacks, nb_items = 0;
360 char *tstr2, *tstr =
av_strtok(p,
"|", &saveptr);
361 char *p2, *p3, *saveptr2 =
NULL, *saveptr3 =
NULL;
379 if (!nb_attacks || nb_attacks & 1) {
387 for (k = 0; k <
FFMIN(nb_attacks / 2, outlink->
channels); k++) {
388 char *tstr3 =
av_strtok(p3,
",", &saveptr3);
391 sscanf(tstr3,
"%lf", &
s->bands[
i].attack_rate[k]);
393 sscanf(tstr3,
"%lf", &
s->bands[
i].decay_rate[k]);
395 if (
s->bands[
i].attack_rate[k] > 1.0 / outlink->
sample_rate) {
396 s->bands[
i].attack_rate[k] = 1.0 -
exp(-1.0 / (outlink->
sample_rate *
s->bands[
i].attack_rate[k]));
398 s->bands[
i].attack_rate[k] = 1.0;
401 if (
s->bands[
i].decay_rate[k] > 1.0 / outlink->
sample_rate) {
402 s->bands[
i].decay_rate[k] = 1.0 -
exp(-1.0 / (outlink->
sample_rate *
s->bands[
i].decay_rate[k]));
404 s->bands[
i].decay_rate[k] = 1.0;
408 for (ch = k; ch < outlink->
channels; ch++) {
409 s->bands[
i].attack_rate[ch] =
s->bands[
i].attack_rate[k - 1];
410 s->bands[
i].decay_rate[ch] =
s->bands[
i].decay_rate[k - 1];
418 sscanf(tstr2,
"%lf", &
s->bands[
i].transfer_fn.curve_dB);
420 radius =
s->bands[
i].transfer_fn.curve_dB *
M_LN10 / 20.0;
429 s->bands[
i].transfer_fn.nb_segments = (nb_points + 4) * 2;
430 s->bands[
i].transfer_fn.segments =
av_calloc(
s->bands[
i].transfer_fn.nb_segments,
432 if (!
s->bands[
i].transfer_fn.segments)
447 new_nb_items += sscanf(tstr2,
"%lf", &
s->bands[
i].topfreq) == 1;
448 if (
s->bands[
i].topfreq < 0 ||
s->bands[
i].topfreq >= outlink->
sample_rate / 2) {
453 if (
s->bands[
i].topfreq != 0) {
461 sscanf(tstr2,
"%lf", &
s->bands[
i].delay);
466 double initial_volume;
468 sscanf(tstr2,
"%lf", &initial_volume);
469 initial_volume = pow(10.0, initial_volume / 20);
471 for (k = 0; k < outlink->
channels; k++) {
472 s->bands[
i].volume[k] = initial_volume;
477 sscanf(tstr2,
"%lf", &
s->bands[
i].transfer_fn.gain_dB);
482 s->nb_bands = new_nb_items;
484 for (
i = 0; max_delay_size > 0 &&
i <
s->nb_bands;
i++) {
486 if (!
s->bands[
i].delay_buf)
489 s->delay_buf_size = max_delay_size;
494 #define CONVOLVE _ _ _ _
497 double *ibuf,
double *obuf_low,
498 double *obuf_high,
size_t len)
500 double out_low, out_high;
504 #define _ out_low += p->coefs[j] * p->previous[ch][p->pos + j].in \
505 - p->coefs[2*N+2 + j] * p->previous[ch][p->pos + j].out_low, j++;
508 out_low = p->
coefs[0] * *ibuf;
510 *obuf_low++ = out_low;
513 #define _ out_high += p->coefs[j+N+1] * p->previous[ch][p->pos + j].in \
514 - p->coefs[2*N+2 + j] * p->previous[ch][p->pos + j].out_high, j++;
517 out_high = p->
coefs[
N+1] * *ibuf;
519 *obuf_high++ = out_high;
531 for (
i = 0;
i <
len;
i++) {
532 double level_in_lin, level_out_lin, checkbuf;
537 level_in_lin = l->
volume[ch];
540 if (
c->delay_buf_size <= 0) {
541 checkbuf = ibuf[
i] * level_out_lin;
563 l->
delay_size) %
c->delay_buf_size] * level_out_lin;
594 if (
s->band_samples <
in->nb_samples) {
602 s->band_samples =
in->nb_samples;
605 for (ch = 0; ch < outlink->
channels; ch++) {
606 double *
a, *dst = (
double *)
out->extended_data[ch];
608 for (band = 0, abuf =
in, bbuf =
s->band_buf2, cbuf =
s->band_buf1; band < s->nb_bands; band++) {
669 "Multiband Compress or expand audio dynamic range."),
672 .priv_class = &mcompand_class,
static enum AVSampleFormat sample_fmts[]
static const AVFilterPad inputs[]
static const AVFilterPad outputs[]
static const AVFilterPad mcompand_outputs[]
static void square_quadratic(double const *x, double *y)
static void update_volume(CompBand *cb, double in, int ch)
static int mcompand_channel(MCompandContext *c, CompBand *l, double *ibuf, double *obuf, int len, int ch)
static int query_formats(AVFilterContext *ctx)
static int parse_points(char *points, int nb_points, double radius, CompandT *s, AVFilterContext *ctx)
static int crossover_setup(AVFilterLink *outlink, Crossover *p, double frequency)
static int request_frame(AVFilterLink *outlink)
static const AVFilterPad mcompand_inputs[]
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
static void count_items(char *item_str, int *nb_items, char delimiter)
AVFILTER_DEFINE_CLASS(mcompand)
static double get_volume(CompandT *s, double in_lin)
static av_cold void uninit(AVFilterContext *ctx)
static void crossover(int ch, Crossover *p, double *ibuf, double *obuf_low, double *obuf_high, size_t len)
static int config_output(AVFilterLink *outlink)
static const AVOption mcompand_options[]
AVFrame * ff_get_audio_buffer(AVFilterLink *link, int nb_samples)
Request an audio samples buffer with a specific set of permissions.
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) #define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac) { } void ff_audio_convert_free(AudioConvert **ac) { if(! *ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);} AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map) { AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method !=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2) { ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc) { av_free(ac);return NULL;} return ac;} in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar) { ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar ? ac->channels :1;} else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;} int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in) { int use_generic=1;int len=in->nb_samples;int p;if(ac->dc) { av_log(ac->avr, AV_LOG_TRACE, "%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
simple assert() macros that are a bit more flexible than ISO C assert().
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
int ff_request_frame(AVFilterLink *link)
Request an input frame from the filter at the other end of the link.
Main libavfilter public API header.
#define FFSWAP(type, a, b)
static __device__ float fabs(float a)
internal math functions header
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
void * av_calloc(size_t nmemb, size_t size)
Non-inlined equivalent of av_mallocz_array().
AVSampleFormat
Audio sample formats.
@ AV_SAMPLE_FMT_DBLP
double, planar
char * av_strtok(char *s, const char *delim, char **saveptr)
Split the string into several tokens which can be accessed by successive calls to av_strtok().
static const int16_t alpha[]
common internal API header
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
static av_const double hypot(double x, double y)
enum MovChannelLayoutTag * layouts
Describe the class of an AVClass context structure.
A list of supported channel layouts.
A link between two filters.
int channels
Number of channels.
AVFilterContext * src
source filter
int sample_rate
samples per second
AVFilterContext * dst
dest filter
A filter pad used for either input or output.
const char * name
Pad name.
const char * name
Filter name.
This structure describes decoded (raw) audio or video data.
uint8_t ** extended_data
pointers to the data planes/channels.
CompandSegment * segments
static double cb(void *priv, double x, double y)