FFmpeg  4.4.4
vf_gblur.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2011 Pascal Getreuer
3  * Copyright (c) 2016 Paul B Mahol
4  *
5  * Redistribution and use in source and binary forms, with or without modification,
6  * are permitted provided that the following conditions are met:
7  *
8  * * Redistributions of source code must retain the above copyright
9  * notice, this list of conditions and the following disclaimer.
10  * * Redistributions in binary form must reproduce the above
11  * copyright notice, this list of conditions and the following
12  * disclaimer in the documentation and/or other materials provided
13  * with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
18  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
19  * HOLDER BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
20  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
21  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
22  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
23  * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
24  * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
25  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26  */
27 
28 #include <float.h>
29 
30 #include "libavutil/imgutils.h"
31 #include "libavutil/opt.h"
32 #include "libavutil/pixdesc.h"
33 #include "avfilter.h"
34 #include "formats.h"
35 #include "gblur.h"
36 #include "internal.h"
37 #include "video.h"
38 
39 #define OFFSET(x) offsetof(GBlurContext, x)
40 #define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_RUNTIME_PARAM
41 
42 static const AVOption gblur_options[] = {
43  { "sigma", "set sigma", OFFSET(sigma), AV_OPT_TYPE_FLOAT, {.dbl=0.5}, 0.0, 1024, FLAGS },
44  { "steps", "set number of steps", OFFSET(steps), AV_OPT_TYPE_INT, {.i64=1}, 1, 6, FLAGS },
45  { "planes", "set planes to filter", OFFSET(planes), AV_OPT_TYPE_INT, {.i64=0xF}, 0, 0xF, FLAGS },
46  { "sigmaV", "set vertical sigma", OFFSET(sigmaV), AV_OPT_TYPE_FLOAT, {.dbl=-1}, -1, 1024, FLAGS },
47  { NULL }
48 };
49 
51 
52 typedef struct ThreadData {
53  int height;
54  int width;
55 } ThreadData;
56 
57 static void postscale_c(float *buffer, int length,
58  float postscale, float min, float max)
59 {
60  for (int i = 0; i < length; i++) {
61  buffer[i] *= postscale;
62  buffer[i] = av_clipf(buffer[i], min, max);
63  }
64 }
65 
66 static void horiz_slice_c(float *buffer, int width, int height, int steps,
67  float nu, float bscale)
68 {
69  int step, x, y;
70  float *ptr;
71  for (y = 0; y < height; y++) {
72  for (step = 0; step < steps; step++) {
73  ptr = buffer + width * y;
74  ptr[0] *= bscale;
75 
76  /* Filter rightwards */
77  for (x = 1; x < width; x++)
78  ptr[x] += nu * ptr[x - 1];
79  ptr[x = width - 1] *= bscale;
80 
81  /* Filter leftwards */
82  for (; x > 0; x--)
83  ptr[x - 1] += nu * ptr[x];
84  }
85  }
86 }
87 
88 static int filter_horizontally(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
89 {
90  GBlurContext *s = ctx->priv;
91  ThreadData *td = arg;
92  const int height = td->height;
93  const int width = td->width;
94  const int slice_start = (height * jobnr ) / nb_jobs;
95  const int slice_end = (height * (jobnr+1)) / nb_jobs;
96  const float boundaryscale = s->boundaryscale;
97  const int steps = s->steps;
98  const float nu = s->nu;
99  float *buffer = s->buffer;
100 
101  s->horiz_slice(buffer + width * slice_start, width, slice_end - slice_start,
102  steps, nu, boundaryscale);
103  emms_c();
104  return 0;
105 }
106 
107 static void do_vertical_columns(float *buffer, int width, int height,
108  int column_begin, int column_end, int steps,
109  float nu, float boundaryscale, int column_step)
110 {
111  const int numpixels = width * height;
112  int i, x, k, step;
113  float *ptr;
114  for (x = column_begin; x < column_end;) {
115  for (step = 0; step < steps; step++) {
116  ptr = buffer + x;
117  for (k = 0; k < column_step; k++) {
118  ptr[k] *= boundaryscale;
119  }
120  /* Filter downwards */
121  for (i = width; i < numpixels; i += width) {
122  for (k = 0; k < column_step; k++) {
123  ptr[i + k] += nu * ptr[i - width + k];
124  }
125  }
126  i = numpixels - width;
127 
128  for (k = 0; k < column_step; k++)
129  ptr[i + k] *= boundaryscale;
130 
131  /* Filter upwards */
132  for (; i > 0; i -= width) {
133  for (k = 0; k < column_step; k++)
134  ptr[i - width + k] += nu * ptr[i + k];
135  }
136  }
137  x += column_step;
138  }
139 }
140 
141 static int filter_vertically(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
142 {
143  GBlurContext *s = ctx->priv;
144  ThreadData *td = arg;
145  const int height = td->height;
146  const int width = td->width;
147  const int slice_start = (width * jobnr ) / nb_jobs;
148  const int slice_end = (width * (jobnr+1)) / nb_jobs;
149  const float boundaryscale = s->boundaryscaleV;
150  const int steps = s->steps;
151  const float nu = s->nuV;
152  float *buffer = s->buffer;
153  int aligned_end;
154 
155  aligned_end = slice_start + (((slice_end - slice_start) >> 3) << 3);
156  /* Filter vertically along columns (process 8 columns in each step) */
157  do_vertical_columns(buffer, width, height, slice_start, aligned_end,
158  steps, nu, boundaryscale, 8);
159 
160  /* Filter un-aligned columns one by one */
162  steps, nu, boundaryscale, 1);
163  return 0;
164 }
165 
166 static int filter_postscale(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
167 {
168  GBlurContext *s = ctx->priv;
169  ThreadData *td = arg;
170  const float max = s->flt ? FLT_MAX : (1 << s->depth) - 1;
171  const float min = s->flt ? -FLT_MAX : 0.f;
172  const int height = td->height;
173  const int width = td->width;
174  const int awidth = FFALIGN(width, 64);
175  const int slice_start = (height * jobnr ) / nb_jobs;
176  const int slice_end = (height * (jobnr+1)) / nb_jobs;
177  const float postscale = s->postscale * s->postscaleV;
178  const int slice_size = slice_end - slice_start;
179 
180  s->postscale_slice(s->buffer + slice_start * awidth,
181  slice_size * awidth, postscale, min, max);
182 
183  return 0;
184 }
185 
186 static void gaussianiir2d(AVFilterContext *ctx, int plane)
187 {
188  GBlurContext *s = ctx->priv;
189  const int width = s->planewidth[plane];
190  const int height = s->planeheight[plane];
191  const int nb_threads = ff_filter_get_nb_threads(ctx);
192  ThreadData td;
193 
194  if (s->sigma <= 0 || s->steps < 0)
195  return;
196 
197  td.width = width;
198  td.height = height;
199  ctx->internal->execute(ctx, filter_horizontally, &td, NULL, FFMIN(height, nb_threads));
200  ctx->internal->execute(ctx, filter_vertically, &td, NULL, FFMIN(width, nb_threads));
201  ctx->internal->execute(ctx, filter_postscale, &td, NULL, FFMIN(width * height, nb_threads));
202 }
203 
205 {
206  static const enum AVPixelFormat pix_fmts[] = {
228  };
229 
231 }
232 
234 {
235  s->horiz_slice = horiz_slice_c;
236  s->postscale_slice = postscale_c;
237  if (ARCH_X86)
239 }
240 
241 static int config_input(AVFilterLink *inlink)
242 {
244  GBlurContext *s = inlink->dst->priv;
245 
246  s->depth = desc->comp[0].depth;
247  s->flt = !!(desc->flags & AV_PIX_FMT_FLAG_FLOAT);
248  s->planewidth[1] = s->planewidth[2] = AV_CEIL_RSHIFT(inlink->w, desc->log2_chroma_w);
249  s->planewidth[0] = s->planewidth[3] = inlink->w;
250  s->planeheight[1] = s->planeheight[2] = AV_CEIL_RSHIFT(inlink->h, desc->log2_chroma_h);
251  s->planeheight[0] = s->planeheight[3] = inlink->h;
252 
253  s->nb_planes = av_pix_fmt_count_planes(inlink->format);
254 
255  s->buffer = av_malloc_array(FFALIGN(inlink->w, 64), FFALIGN(inlink->h, 64) * sizeof(*s->buffer));
256  if (!s->buffer)
257  return AVERROR(ENOMEM);
258 
259  if (s->sigmaV < 0) {
260  s->sigmaV = s->sigma;
261  }
262  ff_gblur_init(s);
263 
264  return 0;
265 }
266 
267 static void set_params(float sigma, int steps, float *postscale, float *boundaryscale, float *nu)
268 {
269  double dnu, lambda;
270 
271  lambda = (sigma * sigma) / (2.0 * steps);
272  dnu = (1.0 + 2.0 * lambda - sqrt(1.0 + 4.0 * lambda)) / (2.0 * lambda);
273  *postscale = pow(dnu / lambda, steps);
274  *boundaryscale = 1.0 / (1.0 - dnu);
275  *nu = (float)dnu;
276 }
277 
278 static int filter_frame(AVFilterLink *inlink, AVFrame *in)
279 {
280  AVFilterContext *ctx = inlink->dst;
281  GBlurContext *s = ctx->priv;
282  AVFilterLink *outlink = ctx->outputs[0];
283  AVFrame *out;
284  int plane;
285 
286  set_params(s->sigma, s->steps, &s->postscale, &s->boundaryscale, &s->nu);
287  set_params(s->sigmaV, s->steps, &s->postscaleV, &s->boundaryscaleV, &s->nuV);
288 
289  if (av_frame_is_writable(in)) {
290  out = in;
291  } else {
292  out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
293  if (!out) {
294  av_frame_free(&in);
295  return AVERROR(ENOMEM);
296  }
298  }
299 
300  for (plane = 0; plane < s->nb_planes; plane++) {
301  const int height = s->planeheight[plane];
302  const int width = s->planewidth[plane];
303  float *bptr = s->buffer;
304  const uint8_t *src = in->data[plane];
305  const uint16_t *src16 = (const uint16_t *)in->data[plane];
306  uint8_t *dst = out->data[plane];
307  uint16_t *dst16 = (uint16_t *)out->data[plane];
308  int y, x;
309 
310  if (!s->sigma || !(s->planes & (1 << plane))) {
311  if (out != in)
312  av_image_copy_plane(out->data[plane], out->linesize[plane],
313  in->data[plane], in->linesize[plane],
314  width * ((s->depth + 7) / 8), height);
315  continue;
316  }
317 
318  if (s->flt) {
319  av_image_copy_plane((uint8_t *)bptr, width * sizeof(float),
320  in->data[plane], in->linesize[plane],
321  width * sizeof(float), height);
322  } else if (s->depth == 8) {
323  for (y = 0; y < height; y++) {
324  for (x = 0; x < width; x++) {
325  bptr[x] = src[x];
326  }
327  bptr += width;
328  src += in->linesize[plane];
329  }
330  } else {
331  for (y = 0; y < height; y++) {
332  for (x = 0; x < width; x++) {
333  bptr[x] = src16[x];
334  }
335  bptr += width;
336  src16 += in->linesize[plane] / 2;
337  }
338  }
339 
340  gaussianiir2d(ctx, plane);
341 
342  bptr = s->buffer;
343  if (s->flt) {
344  av_image_copy_plane(out->data[plane], out->linesize[plane],
345  (uint8_t *)bptr, width * sizeof(float),
346  width * sizeof(float), height);
347  } else if (s->depth == 8) {
348  for (y = 0; y < height; y++) {
349  for (x = 0; x < width; x++) {
350  dst[x] = bptr[x];
351  }
352  bptr += width;
353  dst += out->linesize[plane];
354  }
355  } else {
356  for (y = 0; y < height; y++) {
357  for (x = 0; x < width; x++) {
358  dst16[x] = bptr[x];
359  }
360  bptr += width;
361  dst16 += out->linesize[plane] / 2;
362  }
363  }
364  }
365 
366  if (out != in)
367  av_frame_free(&in);
368  return ff_filter_frame(outlink, out);
369 }
370 
372 {
373  GBlurContext *s = ctx->priv;
374 
375  av_freep(&s->buffer);
376 }
377 
378 static const AVFilterPad gblur_inputs[] = {
379  {
380  .name = "default",
381  .type = AVMEDIA_TYPE_VIDEO,
382  .config_props = config_input,
383  .filter_frame = filter_frame,
384  },
385  { NULL }
386 };
387 
388 static const AVFilterPad gblur_outputs[] = {
389  {
390  .name = "default",
391  .type = AVMEDIA_TYPE_VIDEO,
392  },
393  { NULL }
394 };
395 
397  .name = "gblur",
398  .description = NULL_IF_CONFIG_SMALL("Apply Gaussian Blur filter."),
399  .priv_size = sizeof(GBlurContext),
400  .priv_class = &gblur_class,
401  .uninit = uninit,
403  .inputs = gblur_inputs,
407 };
static const AVFilterPad inputs[]
Definition: af_acontrast.c:193
static const AVFilterPad outputs[]
Definition: af_acontrast.c:203
static int process_command(AVFilterContext *ctx, const char *cmd, const char *args, char *res, int res_len, int flags)
Definition: af_acrusher.c:336
#define av_cold
Definition: attributes.h:88
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) #define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac) { } void ff_audio_convert_free(AudioConvert **ac) { if(! *ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);} AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map) { AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method !=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2) { ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc) { av_free(ac);return NULL;} return ac;} in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar) { ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar ? ac->channels :1;} else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;} int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in) { int use_generic=1;int len=in->nb_samples;int p;if(ac->dc) { av_log(ac->avr, AV_LOG_TRACE, "%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
uint8_t
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1096
int ff_filter_process_command(AVFilterContext *ctx, const char *cmd, const char *arg, char *res, int res_len, int flags)
Generic processing of user supplied commands that are set in the same way as the filter options.
Definition: avfilter.c:882
int ff_filter_get_nb_threads(AVFilterContext *ctx)
Get number of threads for current filter instance.
Definition: avfilter.c:802
Main libavfilter public API header.
#define flags(name, subs,...)
Definition: cbs_av1.c:561
#define s(width, name)
Definition: cbs_vp9.c:257
#define FFMIN(a, b)
Definition: common.h:105
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:58
#define av_clipf
Definition: common.h:170
#define ARCH_X86
Definition: config.h:39
#define NULL
Definition: coverity.c:32
#define max(a, b)
Definition: cuda_runtime.h:33
static const FLOAT postscale[64]
Definition: faandct.c:54
int ff_set_common_formats(AVFilterContext *ctx, AVFilterFormats *formats)
A helper for query_formats() which sets all links to the same list of formats.
Definition: formats.c:587
AVFilterFormats * ff_make_format_list(const int *fmts)
Create a list of supported formats.
Definition: formats.c:286
void ff_gblur_init_x86(GBlurContext *s)
Definition: vf_gblur_init.c:33
@ AV_OPT_TYPE_INT
Definition: opt.h:225
@ AV_OPT_TYPE_FLOAT
Definition: opt.h:228
#define AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC
Some filters support a generic "enable" expression option that can be used to enable or disable a fil...
Definition: avfilter.h:126
#define AVFILTER_FLAG_SLICE_THREADS
The filter supports multithreading by splitting frames into multiple parts and processing them concur...
Definition: avfilter.h:117
#define AVERROR(e)
Definition: error.h:43
int av_frame_is_writable(AVFrame *frame)
Check if the frame data is writable.
Definition: frame.c:594
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:203
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:658
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
void av_image_copy_plane(uint8_t *dst, int dst_linesize, const uint8_t *src, int src_linesize, int bytewidth, int height)
Copy image plane from src to dst.
Definition: imgutils.c:373
misc image utilities
int i
Definition: input.c:407
const char * arg
Definition: jacosubdec.c:66
static const AVFilterPad gblur_outputs[]
Definition: vf_gblur.c:388
AVFILTER_DEFINE_CLASS(gblur)
static int query_formats(AVFilterContext *ctx)
Definition: vf_gblur.c:204
static void gaussianiir2d(AVFilterContext *ctx, int plane)
Definition: vf_gblur.c:186
static int config_input(AVFilterLink *inlink)
Definition: vf_gblur.c:241
static int filter_horizontally(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_gblur.c:88
static int filter_vertically(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_gblur.c:141
static void do_vertical_columns(float *buffer, int width, int height, int column_begin, int column_end, int steps, float nu, float boundaryscale, int column_step)
Definition: vf_gblur.c:107
#define FLAGS
Definition: vf_gblur.c:40
AVFilter ff_vf_gblur
Definition: vf_gblur.c:396
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
Definition: vf_gblur.c:278
static const AVFilterPad gblur_inputs[]
Definition: vf_gblur.c:378
static void set_params(float sigma, int steps, float *postscale, float *boundaryscale, float *nu)
Definition: vf_gblur.c:267
static av_cold void uninit(AVFilterContext *ctx)
Definition: vf_gblur.c:371
#define OFFSET(x)
Definition: vf_gblur.c:39
static const AVOption gblur_options[]
Definition: vf_gblur.c:42
static int filter_postscale(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_gblur.c:166
static void postscale_c(float *buffer, int length, float postscale, float min, float max)
Definition: vf_gblur.c:57
static void horiz_slice_c(float *buffer, int width, int height, int steps, float nu, float bscale)
Definition: vf_gblur.c:66
void ff_gblur_init(GBlurContext *s)
Definition: vf_gblur.c:233
common internal API header
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:117
#define emms_c()
Definition: internal.h:54
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:309
const char * desc
Definition: libsvtav1.c:79
static const struct @322 planes[]
#define FFALIGN(x, a)
Definition: macros.h:48
static int slice_end(AVCodecContext *avctx, AVFrame *pict)
Handle slice ends.
Definition: mpeg12dec.c:2033
AVOptions.
int av_pix_fmt_count_planes(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2613
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2573
#define AV_PIX_FMT_FLAG_FLOAT
The pixel format contains IEEE-754 floating point values.
Definition: pixdesc.h:190
#define AV_PIX_FMT_GBRAP12
Definition: pixfmt.h:420
#define AV_PIX_FMT_YUV420P16
Definition: pixfmt.h:410
#define AV_PIX_FMT_GBRPF32
Definition: pixfmt.h:428
#define AV_PIX_FMT_YUV444P12
Definition: pixfmt.h:406
#define AV_PIX_FMT_YUV444P9
Definition: pixfmt.h:398
#define AV_PIX_FMT_YUV420P10
Definition: pixfmt.h:399
#define AV_PIX_FMT_YUV440P12
Definition: pixfmt.h:405
#define AV_PIX_FMT_GRAY9
Definition: pixfmt.h:379
#define AV_PIX_FMT_GBRAP16
Definition: pixfmt.h:421
#define AV_PIX_FMT_GBRP9
Definition: pixfmt.h:414
#define AV_PIX_FMT_YUV422P9
Definition: pixfmt.h:397
#define AV_PIX_FMT_YUVA444P10
Definition: pixfmt.h:438
#define AV_PIX_FMT_YUVA420P16
Definition: pixfmt.h:441
#define AV_PIX_FMT_YUV420P12
Definition: pixfmt.h:403
#define AV_PIX_FMT_YUVA420P10
Definition: pixfmt.h:436
#define AV_PIX_FMT_YUVA422P9
Definition: pixfmt.h:434
#define AV_PIX_FMT_YUV422P12
Definition: pixfmt.h:404
#define AV_PIX_FMT_GBRP10
Definition: pixfmt.h:415
#define AV_PIX_FMT_YUV422P10
Definition: pixfmt.h:400
#define AV_PIX_FMT_GRAY12
Definition: pixfmt.h:381
#define AV_PIX_FMT_GBRP12
Definition: pixfmt.h:416
#define AV_PIX_FMT_YUV420P9
Definition: pixfmt.h:396
#define AV_PIX_FMT_YUVA420P9
Definition: pixfmt.h:433
#define AV_PIX_FMT_YUVA422P10
Definition: pixfmt.h:437
#define AV_PIX_FMT_GRAYF32
Definition: pixfmt.h:431
#define AV_PIX_FMT_YUV420P14
Definition: pixfmt.h:407
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
@ AV_PIX_FMT_YUV440P
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
Definition: pixfmt.h:99
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
Definition: pixfmt.h:74
@ AV_PIX_FMT_YUVA420P
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
Definition: pixfmt.h:101
@ AV_PIX_FMT_YUVJ440P
planar YUV 4:4:0 full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV440P and setting color_range
Definition: pixfmt.h:100
@ AV_PIX_FMT_YUV410P
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
Definition: pixfmt.h:72
@ AV_PIX_FMT_YUV411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
Definition: pixfmt.h:73
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
@ AV_PIX_FMT_YUVA444P
planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
Definition: pixfmt.h:177
@ AV_PIX_FMT_YUVJ411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples) full scale (JPEG), deprecated in favor ...
Definition: pixfmt.h:258
@ AV_PIX_FMT_GBRAP
planar GBRA 4:4:4:4 32bpp
Definition: pixfmt.h:215
@ AV_PIX_FMT_YUVJ422P
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
Definition: pixfmt.h:79
@ AV_PIX_FMT_YUVA422P
planar YUV 4:2:2 24bpp, (1 Cr & Cb sample per 2x1 Y & A samples)
Definition: pixfmt.h:176
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:168
@ AV_PIX_FMT_YUVJ444P
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
Definition: pixfmt.h:80
@ AV_PIX_FMT_YUVJ420P
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:78
#define AV_PIX_FMT_YUVA422P12
Definition: pixfmt.h:439
#define AV_PIX_FMT_YUV422P14
Definition: pixfmt.h:408
#define AV_PIX_FMT_GRAY10
Definition: pixfmt.h:380
#define AV_PIX_FMT_GRAY14
Definition: pixfmt.h:382
#define AV_PIX_FMT_YUV422P16
Definition: pixfmt.h:411
#define AV_PIX_FMT_GRAY16
Definition: pixfmt.h:383
#define AV_PIX_FMT_GBRAP10
Definition: pixfmt.h:419
#define AV_PIX_FMT_YUVA444P16
Definition: pixfmt.h:443
#define AV_PIX_FMT_YUVA422P16
Definition: pixfmt.h:442
#define AV_PIX_FMT_GBRP16
Definition: pixfmt.h:418
#define AV_PIX_FMT_YUV444P14
Definition: pixfmt.h:409
#define AV_PIX_FMT_YUVA444P9
Definition: pixfmt.h:435
#define AV_PIX_FMT_GBRP14
Definition: pixfmt.h:417
#define AV_PIX_FMT_YUVA444P12
Definition: pixfmt.h:440
#define AV_PIX_FMT_GBRAPF32
Definition: pixfmt.h:429
#define AV_PIX_FMT_YUV444P16
Definition: pixfmt.h:412
#define AV_PIX_FMT_YUV444P10
Definition: pixfmt.h:402
#define td
Definition: regdef.h:70
static char buffer[20]
Definition: seek.c:32
An instance of a filter.
Definition: avfilter.h:341
void * priv
private data for use by the filter
Definition: avfilter.h:356
A filter pad used for either input or output.
Definition: internal.h:54
const char * name
Pad name.
Definition: internal.h:60
Filter definition.
Definition: avfilter.h:145
const char * name
Filter name.
Definition: avfilter.h:149
AVFormatInternal * internal
An opaque field for libavformat internal usage.
Definition: avformat.h:1699
This structure describes decoded (raw) audio or video data.
Definition: frame.h:318
AVOption.
Definition: opt.h:248
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:81
Used for passing data between threads.
Definition: dsddec.c:67
int height
Definition: vf_avgblur.c:61
#define av_malloc_array(a, b)
#define av_freep(p)
#define src
Definition: vp8dsp.c:255
FILE * out
Definition: movenc.c:54
AVFormatContext * ctx
Definition: movenc.c:48
#define height
#define width
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
Definition: video.c:104
float min