FFmpeg  4.4.4
f_interleave.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2013 Stefano Sabatini
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * audio and video interleaver
24  */
25 
26 #include "libavutil/avassert.h"
27 #include "libavutil/avstring.h"
28 #include "libavutil/opt.h"
29 
30 #include "avfilter.h"
31 #include "formats.h"
32 #include "filters.h"
33 #include "internal.h"
34 #include "audio.h"
35 #include "video.h"
36 
37 typedef struct InterleaveContext {
38  const AVClass *class;
39  int nb_inputs;
41  int64_t pts;
43 
44 #define DURATION_LONGEST 0
45 #define DURATION_SHORTEST 1
46 #define DURATION_FIRST 2
47 
48 #define OFFSET(x) offsetof(InterleaveContext, x)
49 
50 #define DEFINE_OPTIONS(filt_name, flags_) \
51 static const AVOption filt_name##_options[] = { \
52  { "nb_inputs", "set number of inputs", OFFSET(nb_inputs), AV_OPT_TYPE_INT, {.i64 = 2}, 1, INT_MAX, .flags = flags_ }, \
53  { "n", "set number of inputs", OFFSET(nb_inputs), AV_OPT_TYPE_INT, {.i64 = 2}, 1, INT_MAX, .flags = flags_ }, \
54  { "duration", "how to determine the end-of-stream", \
55  OFFSET(duration_mode), AV_OPT_TYPE_INT, { .i64 = DURATION_LONGEST }, 0, 2, flags_, "duration" }, \
56  { "longest", "Duration of longest input", 0, AV_OPT_TYPE_CONST, { .i64 = DURATION_LONGEST }, 0, 0, flags_, "duration" }, \
57  { "shortest", "Duration of shortest input", 0, AV_OPT_TYPE_CONST, { .i64 = DURATION_SHORTEST }, 0, 0, flags_, "duration" }, \
58  { "first", "Duration of first input", 0, AV_OPT_TYPE_CONST, { .i64 = DURATION_FIRST }, 0, 0, flags_, "duration" }, \
59  { NULL } \
60 }
61 
63 {
64  AVFilterLink *outlink = ctx->outputs[0];
65  InterleaveContext *s = ctx->priv;
66  int64_t q_pts, pts = INT64_MAX;
67  int i, nb_eofs = 0, input_idx = -1;
68  int first_eof = 0;
69  int64_t rpts;
70  int status;
71  int nb_inputs_with_frames = 0;
72 
74 
75  for (i = 0; i < ctx->nb_inputs; i++) {
76  int is_eof = !!ff_inlink_acknowledge_status(ctx->inputs[i], &status, &rpts);
77 
78  nb_eofs += is_eof;
79  if (i == 0)
80  first_eof = is_eof;
81  }
82 
83  if ((nb_eofs > 0 && s->duration_mode == DURATION_SHORTEST) ||
84  (nb_eofs == ctx->nb_inputs && s->duration_mode == DURATION_LONGEST) ||
85  (first_eof && s->duration_mode == DURATION_FIRST)) {
86  ff_outlink_set_status(outlink, AVERROR_EOF, s->pts);
87  return 0;
88  }
89 
90  for (i = 0; i < ctx->nb_inputs; i++) {
91  if (!ff_inlink_queued_frames(ctx->inputs[i]))
92  continue;
93  nb_inputs_with_frames++;
94  }
95 
96  if (nb_inputs_with_frames >= ctx->nb_inputs - nb_eofs) {
97  for (i = 0; i < ctx->nb_inputs; i++) {
98  AVFrame *frame;
99 
100  if (ff_inlink_queued_frames(ctx->inputs[i]) == 0)
101  continue;
102 
103  frame = ff_inlink_peek_frame(ctx->inputs[i], 0);
104  if (frame->pts == AV_NOPTS_VALUE) {
105  int ret;
106 
108  "NOPTS value for input frame cannot be accepted, frame discarded\n");
109  ret = ff_inlink_consume_frame(ctx->inputs[i], &frame);
110  if (ret < 0)
111  return ret;
113  return AVERROR_INVALIDDATA;
114  }
115 
116  q_pts = av_rescale_q(frame->pts, ctx->inputs[i]->time_base, AV_TIME_BASE_Q);
117  if (q_pts < pts) {
118  pts = q_pts;
119  input_idx = i;
120  }
121  }
122 
123  if (input_idx >= 0) {
124  AVFrame *frame;
125  int ret;
126 
127  ret = ff_inlink_consume_frame(ctx->inputs[input_idx], &frame);
128  if (ret < 0)
129  return ret;
130 
131  frame->pts = s->pts = pts;
132  return ff_filter_frame(outlink, frame);
133  }
134  }
135 
136  for (i = 0; i < ctx->nb_inputs; i++) {
137  if (ff_inlink_queued_frames(ctx->inputs[i]))
138  continue;
139  if (ff_outlink_frame_wanted(outlink) &&
140  !ff_outlink_get_status(ctx->inputs[i])) {
141  ff_inlink_request_frame(ctx->inputs[i]);
142  return 0;
143  }
144  }
145 
146  if (i == ctx->nb_inputs - nb_eofs && ff_outlink_frame_wanted(outlink)) {
147  ff_filter_set_ready(ctx, 100);
148  return 0;
149  }
150 
151  return FFERROR_NOT_READY;
152 }
153 
155 {
156  InterleaveContext *s = ctx->priv;
157  const AVFilterPad *outpad = &ctx->filter->outputs[0];
158  int i, ret;
159 
160  for (i = 0; i < s->nb_inputs; i++) {
161  AVFilterPad inpad = { 0 };
162 
163  inpad.name = av_asprintf("input%d", i);
164  if (!inpad.name)
165  return AVERROR(ENOMEM);
166  inpad.type = outpad->type;
167 
168  switch (outpad->type) {
169  case AVMEDIA_TYPE_VIDEO:
171  case AVMEDIA_TYPE_AUDIO:
173  default:
174  av_assert0(0);
175  }
176  if ((ret = ff_insert_inpad(ctx, i, &inpad)) < 0) {
177  av_freep(&inpad.name);
178  return ret;
179  }
180  }
181 
182  return 0;
183 }
184 
186 {
187  for (int i = 0; i < ctx->nb_inputs; i++)
188  av_freep(&ctx->input_pads[i].name);
189 }
190 
191 static int config_output(AVFilterLink *outlink)
192 {
193  AVFilterContext *ctx = outlink->src;
194  AVFilterLink *inlink0 = ctx->inputs[0];
195  int i;
196 
197  if (outlink->type == AVMEDIA_TYPE_VIDEO) {
198  outlink->time_base = AV_TIME_BASE_Q;
199  outlink->w = inlink0->w;
200  outlink->h = inlink0->h;
201  outlink->sample_aspect_ratio = inlink0->sample_aspect_ratio;
202  outlink->format = inlink0->format;
203  outlink->frame_rate = (AVRational) {1, 0};
204  for (i = 1; i < ctx->nb_inputs; i++) {
205  AVFilterLink *inlink = ctx->inputs[i];
206 
207  if (outlink->w != inlink->w ||
208  outlink->h != inlink->h ||
209  outlink->sample_aspect_ratio.num != inlink->sample_aspect_ratio.num ||
210  outlink->sample_aspect_ratio.den != inlink->sample_aspect_ratio.den) {
211  av_log(ctx, AV_LOG_ERROR, "Parameters for input link %s "
212  "(size %dx%d, SAR %d:%d) do not match the corresponding "
213  "output link parameters (%dx%d, SAR %d:%d)\n",
214  ctx->input_pads[i].name, inlink->w, inlink->h,
215  inlink->sample_aspect_ratio.num,
216  inlink->sample_aspect_ratio.den,
217  outlink->w, outlink->h,
218  outlink->sample_aspect_ratio.num,
219  outlink->sample_aspect_ratio.den);
220  return AVERROR(EINVAL);
221  }
222  }
223  }
224  return 0;
225 }
226 
227 #if CONFIG_INTERLEAVE_FILTER
228 
231 
232 static const AVFilterPad interleave_outputs[] = {
233  {
234  .name = "default",
235  .type = AVMEDIA_TYPE_VIDEO,
236  .config_props = config_output,
237  },
238  { NULL }
239 };
240 
242  .name = "interleave",
243  .description = NULL_IF_CONFIG_SMALL("Temporally interleave video inputs."),
244  .priv_size = sizeof(InterleaveContext),
245  .init = init,
246  .uninit = uninit,
247  .activate = activate,
248  .outputs = interleave_outputs,
249  .priv_class = &interleave_class,
251 };
252 
253 #endif
254 
255 #if CONFIG_AINTERLEAVE_FILTER
256 
258 AVFILTER_DEFINE_CLASS(ainterleave);
259 
260 static const AVFilterPad ainterleave_outputs[] = {
261  {
262  .name = "default",
263  .type = AVMEDIA_TYPE_AUDIO,
264  .config_props = config_output,
265  },
266  { NULL }
267 };
268 
270  .name = "ainterleave",
271  .description = NULL_IF_CONFIG_SMALL("Temporally interleave audio inputs."),
272  .priv_size = sizeof(InterleaveContext),
273  .init = init,
274  .uninit = uninit,
275  .activate = activate,
276  .outputs = ainterleave_outputs,
277  .priv_class = &ainterleave_class,
279 };
280 
281 #endif
static const AVFilterPad outputs[]
Definition: af_acontrast.c:203
AVFilter ff_vf_interleave
AVFilter ff_af_ainterleave
#define av_cold
Definition: attributes.h:88
AVFrame * ff_null_get_audio_buffer(AVFilterLink *link, int nb_samples)
get_audio_buffer() handler for filters which simply pass audio along
Definition: audio.c:33
simple assert() macros that are a bit more flexible than ISO C assert().
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
AVFrame * ff_inlink_peek_frame(AVFilterLink *link, size_t idx)
Access a frame in the link fifo without consuming it.
Definition: avfilter.c:1533
int ff_outlink_get_status(AVFilterLink *link)
Get the status on an output link.
Definition: avfilter.c:1643
int ff_inlink_acknowledge_status(AVFilterLink *link, int *rstatus, int64_t *rpts)
Test and acknowledge the change of status on the link.
Definition: avfilter.c:1449
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1096
size_t ff_inlink_queued_frames(AVFilterLink *link)
Get the number of frames available on the link.
Definition: avfilter.c:1464
void ff_filter_set_ready(AVFilterContext *filter, unsigned priority)
Mark a filter ready and schedule it for activation.
Definition: avfilter.c:193
int ff_inlink_consume_frame(AVFilterLink *link, AVFrame **rframe)
Take a frame from the link's FIFO and update the link's stats.
Definition: avfilter.c:1494
void ff_inlink_request_frame(AVFilterLink *link)
Mark that a frame is wanted on the link.
Definition: avfilter.c:1620
Main libavfilter public API header.
char * av_asprintf(const char *fmt,...)
Definition: avstring.c:113
#define flags(name, subs,...)
Definition: cbs_av1.c:561
#define s(width, name)
Definition: cbs_vp9.c:257
#define NULL
Definition: coverity.c:32
static AVFrame * frame
static av_always_inline void RENAME() interleave(TYPE *dst, TYPE *src0, TYPE *src1, int w2, int add, int shift)
#define DEFINE_OPTIONS(filt_name, flags_)
Definition: f_interleave.c:50
#define DURATION_SHORTEST
Definition: f_interleave.c:45
#define DURATION_FIRST
Definition: f_interleave.c:46
#define DURATION_LONGEST
Definition: f_interleave.c:44
static int activate(AVFilterContext *ctx)
Definition: f_interleave.c:62
static av_cold int init(AVFilterContext *ctx)
Definition: f_interleave.c:154
static av_cold void uninit(AVFilterContext *ctx)
Definition: f_interleave.c:185
static int config_output(AVFilterLink *outlink)
Definition: f_interleave.c:191
static void ff_outlink_set_status(AVFilterLink *link, int status, int64_t pts)
Set the status field of a link from the source filter.
Definition: filters.h:189
#define FFERROR_NOT_READY
Filters implementation helper functions.
Definition: filters.h:34
#define FF_FILTER_FORWARD_STATUS_BACK_ALL(outlink, filter)
Forward the status on an output link to all input links.
Definition: filters.h:212
static int ff_outlink_frame_wanted(AVFilterLink *link)
Test if a frame is wanted on an output link.
Definition: filters.h:172
#define AVFILTER_FLAG_DYNAMIC_INPUTS
The number of the filter inputs is not determined just by AVFilter.inputs.
Definition: avfilter.h:106
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
#define AVERROR_EOF
End of file.
Definition: error.h:55
#define AVERROR(e)
Definition: error.h:43
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:203
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:200
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:194
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:142
@ AVMEDIA_TYPE_AUDIO
Definition: avutil.h:202
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
#define AV_TIME_BASE_Q
Internal time base represented as fractional value.
Definition: avutil.h:260
int i
Definition: input.c:407
static int ff_insert_inpad(AVFilterContext *f, unsigned index, AVFilterPad *p)
Insert a new input pad for the filter.
Definition: internal.h:240
#define AVFILTER_DEFINE_CLASS(fname)
Definition: internal.h:288
common internal API header
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:117
AVOptions.
#define AV_OPT_FLAG_FILTERING_PARAM
a generic parameter which can be set by the user for filtering
Definition: opt.h:294
#define AV_OPT_FLAG_AUDIO_PARAM
Definition: opt.h:280
#define AV_OPT_FLAG_VIDEO_PARAM
Definition: opt.h:281
Describe the class of an AVClass context structure.
Definition: log.h:67
An instance of a filter.
Definition: avfilter.h:341
A filter pad used for either input or output.
Definition: internal.h:54
AVFrame *(* get_audio_buffer)(AVFilterLink *link, int nb_samples)
Callback function to get an audio buffer.
Definition: internal.h:81
enum AVMediaType type
AVFilterPad type.
Definition: internal.h:65
AVFrame *(* get_video_buffer)(AVFilterLink *link, int w, int h)
Callback function to get a video buffer.
Definition: internal.h:73
const char * name
Pad name.
Definition: internal.h:60
Filter definition.
Definition: avfilter.h:145
const char * name
Filter name.
Definition: avfilter.h:149
This structure describes decoded (raw) audio or video data.
Definition: frame.h:318
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:411
Rational number (pair of numerator and denominator).
Definition: rational.h:58
int num
Numerator.
Definition: rational.h:59
int den
Denominator.
Definition: rational.h:60
#define av_freep(p)
#define av_log(a,...)
AVFormatContext * ctx
Definition: movenc.c:48
static int64_t pts
AVFrame * ff_null_get_video_buffer(AVFilterLink *link, int w, int h)
Definition: video.c:39