FFmpeg  4.4.4
vf_super2xsai.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2010 Niel van der Westhuizen <nielkie@gmail.com>
3  * Copyright (c) 2002 A'rpi
4  * Copyright (c) 1997-2001 ZSNES Team ( zsknight@zsnes.com / _demo_@zsnes.com )
5  *
6  * This file is part of FFmpeg.
7  *
8  * FFmpeg is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * FFmpeg is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License along
19  * with FFmpeg; if not, write to the Free Software Foundation, Inc.,
20  * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
21  */
22 
23 /**
24  * @file
25  * Super 2xSaI video filter
26  * Ported from MPlayer libmpcodecs/vf_2xsai.c.
27  */
28 
29 #include "libavutil/pixdesc.h"
30 #include "libavutil/intreadwrite.h"
31 #include "avfilter.h"
32 #include "formats.h"
33 #include "internal.h"
34 #include "video.h"
35 
36 typedef struct Super2xSaIContext {
37  /* masks used for two pixels interpolation */
38  uint32_t hi_pixel_mask;
39  uint32_t lo_pixel_mask;
40 
41  /* masks used for four pixels interpolation */
42  uint32_t q_hi_pixel_mask;
43  uint32_t q_lo_pixel_mask;
44 
45  int bpp; ///< bytes per pixel, pixel stride for each (packed) pixel
46  int is_be;
48 
49 typedef struct ThreadData {
50  AVFrame *in, *out;
51 } ThreadData;
52 
53 #define GET_RESULT(A, B, C, D) ((A != C || A != D) - (B != C || B != D))
54 
55 #define INTERPOLATE(A, B) (((A & hi_pixel_mask) >> 1) + ((B & hi_pixel_mask) >> 1) + (A & B & lo_pixel_mask))
56 
57 #define Q_INTERPOLATE(A, B, C, D) ((A & q_hi_pixel_mask) >> 2) + ((B & q_hi_pixel_mask) >> 2) + ((C & q_hi_pixel_mask) >> 2) + ((D & q_hi_pixel_mask) >> 2) \
58  + ((((A & q_lo_pixel_mask) + (B & q_lo_pixel_mask) + (C & q_lo_pixel_mask) + (D & q_lo_pixel_mask)) >> 2) & q_lo_pixel_mask)
59 
60 static int super2xsai(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
61 {
62  Super2xSaIContext *s = ctx->priv;
63  ThreadData *td = arg;
64  AVFrame *in = td->in;
65  AVFrame *out = td->out;
66  const uint8_t *src = in->data[0];
67  uint8_t *dst = out->data[0];
68  const int src_linesize = in->linesize[0];
69  const int dst_linesize = out->linesize[0];
70  const int width = in->width;
71  const int height = in->height;
72  unsigned int x, y;
73  uint32_t color[4][4];
74  const uint8_t *src_line[4];
75  const int bpp = s->bpp;
76  const uint32_t hi_pixel_mask = s->hi_pixel_mask;
77  const uint32_t lo_pixel_mask = s->lo_pixel_mask;
78  const uint32_t q_hi_pixel_mask = s->q_hi_pixel_mask;
79  const uint32_t q_lo_pixel_mask = s->q_lo_pixel_mask;
80  const int slice_start = (height * jobnr) / nb_jobs;
81  const int slice_end = (height * (jobnr+1)) / nb_jobs;
82 
83  /* Point to the first 4 lines, first line is duplicated */
84  src_line[0] = src + src_linesize*FFMAX(slice_start - 1, 0);
85  src_line[1] = src + src_linesize*slice_start;
86  src_line[2] = src + src_linesize*FFMIN(slice_start + 1, height-1);
87  src_line[3] = src + src_linesize*FFMIN(slice_start + 2, height-1);
88 
89 #define READ_COLOR4(dst, src_line, off) dst = *((const uint32_t *)src_line + off)
90 #define READ_COLOR3(dst, src_line, off) dst = AV_RL24 (src_line + 3*off)
91 #define READ_COLOR2(dst, src_line, off) dst = s->is_be ? AV_RB16(src_line + 2 * off) : AV_RL16(src_line + 2 * off)
92 
93  for (y = slice_start; y < slice_end; y++) {
94  uint8_t *dst_line[2];
95 
96  dst_line[0] = dst + dst_linesize*2*y;
97  dst_line[1] = dst + dst_linesize*(2*y+1);
98 
99  switch (bpp) {
100  case 4:
101  READ_COLOR4(color[0][0], src_line[0], 0); color[0][1] = color[0][0]; READ_COLOR4(color[0][2], src_line[0], 1); READ_COLOR4(color[0][3], src_line[0], 2);
102  READ_COLOR4(color[1][0], src_line[1], 0); color[1][1] = color[1][0]; READ_COLOR4(color[1][2], src_line[1], 1); READ_COLOR4(color[1][3], src_line[1], 2);
103  READ_COLOR4(color[2][0], src_line[2], 0); color[2][1] = color[2][0]; READ_COLOR4(color[2][2], src_line[2], 1); READ_COLOR4(color[2][3], src_line[2], 2);
104  READ_COLOR4(color[3][0], src_line[3], 0); color[3][1] = color[3][0]; READ_COLOR4(color[3][2], src_line[3], 1); READ_COLOR4(color[3][3], src_line[3], 2);
105  break;
106  case 3:
107  READ_COLOR3(color[0][0], src_line[0], 0); color[0][1] = color[0][0]; READ_COLOR3(color[0][2], src_line[0], 1); READ_COLOR3(color[0][3], src_line[0], 2);
108  READ_COLOR3(color[1][0], src_line[1], 0); color[1][1] = color[1][0]; READ_COLOR3(color[1][2], src_line[1], 1); READ_COLOR3(color[1][3], src_line[1], 2);
109  READ_COLOR3(color[2][0], src_line[2], 0); color[2][1] = color[2][0]; READ_COLOR3(color[2][2], src_line[2], 1); READ_COLOR3(color[2][3], src_line[2], 2);
110  READ_COLOR3(color[3][0], src_line[3], 0); color[3][1] = color[3][0]; READ_COLOR3(color[3][2], src_line[3], 1); READ_COLOR3(color[3][3], src_line[3], 2);
111  break;
112  default:
113  READ_COLOR2(color[0][0], src_line[0], 0); color[0][1] = color[0][0]; READ_COLOR2(color[0][2], src_line[0], 1); READ_COLOR2(color[0][3], src_line[0], 2);
114  READ_COLOR2(color[1][0], src_line[1], 0); color[1][1] = color[1][0]; READ_COLOR2(color[1][2], src_line[1], 1); READ_COLOR2(color[1][3], src_line[1], 2);
115  READ_COLOR2(color[2][0], src_line[2], 0); color[2][1] = color[2][0]; READ_COLOR2(color[2][2], src_line[2], 1); READ_COLOR2(color[2][3], src_line[2], 2);
116  READ_COLOR2(color[3][0], src_line[3], 0); color[3][1] = color[3][0]; READ_COLOR2(color[3][2], src_line[3], 1); READ_COLOR2(color[3][3], src_line[3], 2);
117  }
118 
119  for (x = 0; x < width; x++) {
120  uint32_t product1a, product1b, product2a, product2b;
121 
122 //--------------------------------------- B0 B1 B2 B3 0 1 2 3
123 // 4 5* 6 S2 -> 4 5* 6 7
124 // 1 2 3 S1 8 9 10 11
125 // A0 A1 A2 A3 12 13 14 15
126 //--------------------------------------
127  if (color[2][1] == color[1][2] && color[1][1] != color[2][2]) {
128  product2b = color[2][1];
129  product1b = product2b;
130  } else if (color[1][1] == color[2][2] && color[2][1] != color[1][2]) {
131  product2b = color[1][1];
132  product1b = product2b;
133  } else if (color[1][1] == color[2][2] && color[2][1] == color[1][2]) {
134  int r = 0;
135 
136  r += GET_RESULT(color[1][2], color[1][1], color[1][0], color[3][1]);
137  r += GET_RESULT(color[1][2], color[1][1], color[2][0], color[0][1]);
138  r += GET_RESULT(color[1][2], color[1][1], color[3][2], color[2][3]);
139  r += GET_RESULT(color[1][2], color[1][1], color[0][2], color[1][3]);
140 
141  if (r > 0)
142  product1b = color[1][2];
143  else if (r < 0)
144  product1b = color[1][1];
145  else
146  product1b = INTERPOLATE(color[1][1], color[1][2]);
147 
148  product2b = product1b;
149  } else {
150  if (color[1][2] == color[2][2] && color[2][2] == color[3][1] && color[2][1] != color[3][2] && color[2][2] != color[3][0])
151  product2b = Q_INTERPOLATE(color[2][2], color[2][2], color[2][2], color[2][1]);
152  else if (color[1][1] == color[2][1] && color[2][1] == color[3][2] && color[3][1] != color[2][2] && color[2][1] != color[3][3])
153  product2b = Q_INTERPOLATE(color[2][1], color[2][1], color[2][1], color[2][2]);
154  else
155  product2b = INTERPOLATE(color[2][1], color[2][2]);
156 
157  if (color[1][2] == color[2][2] && color[1][2] == color[0][1] && color[1][1] != color[0][2] && color[1][2] != color[0][0])
158  product1b = Q_INTERPOLATE(color[1][2], color[1][2], color[1][2], color[1][1]);
159  else if (color[1][1] == color[2][1] && color[1][1] == color[0][2] && color[0][1] != color[1][2] && color[1][1] != color[0][3])
160  product1b = Q_INTERPOLATE(color[1][2], color[1][1], color[1][1], color[1][1]);
161  else
162  product1b = INTERPOLATE(color[1][1], color[1][2]);
163  }
164 
165  if (color[1][1] == color[2][2] && color[2][1] != color[1][2] && color[1][0] == color[1][1] && color[1][1] != color[3][2])
166  product2a = INTERPOLATE(color[2][1], color[1][1]);
167  else if (color[1][1] == color[2][0] && color[1][2] == color[1][1] && color[1][0] != color[2][1] && color[1][1] != color[3][0])
168  product2a = INTERPOLATE(color[2][1], color[1][1]);
169  else
170  product2a = color[2][1];
171 
172  if (color[2][1] == color[1][2] && color[1][1] != color[2][2] && color[2][0] == color[2][1] && color[2][1] != color[0][2])
173  product1a = INTERPOLATE(color[2][1], color[1][1]);
174  else if (color[1][0] == color[2][1] && color[2][2] == color[2][1] && color[2][0] != color[1][1] && color[2][1] != color[0][0])
175  product1a = INTERPOLATE(color[2][1], color[1][1]);
176  else
177  product1a = color[1][1];
178 
179  /* Set the calculated pixels */
180  switch (bpp) {
181  case 4:
182  AV_WN32A(dst_line[0] + x * 8, product1a);
183  AV_WN32A(dst_line[0] + x * 8 + 4, product1b);
184  AV_WN32A(dst_line[1] + x * 8, product2a);
185  AV_WN32A(dst_line[1] + x * 8 + 4, product2b);
186  break;
187  case 3:
188  AV_WL24(dst_line[0] + x * 6, product1a);
189  AV_WL24(dst_line[0] + x * 6 + 3, product1b);
190  AV_WL24(dst_line[1] + x * 6, product2a);
191  AV_WL24(dst_line[1] + x * 6 + 3, product2b);
192  break;
193  default: // bpp = 2
194  if (s->is_be) {
195  AV_WB32(dst_line[0] + x * 4, product1a | (product1b << 16));
196  AV_WB32(dst_line[1] + x * 4, product2a | (product2b << 16));
197  } else {
198  AV_WL32(dst_line[0] + x * 4, product1a | (product1b << 16));
199  AV_WL32(dst_line[1] + x * 4, product2a | (product2b << 16));
200  }
201  }
202 
203  /* Move color matrix forward */
204  color[0][0] = color[0][1]; color[0][1] = color[0][2]; color[0][2] = color[0][3];
205  color[1][0] = color[1][1]; color[1][1] = color[1][2]; color[1][2] = color[1][3];
206  color[2][0] = color[2][1]; color[2][1] = color[2][2]; color[2][2] = color[2][3];
207  color[3][0] = color[3][1]; color[3][1] = color[3][2]; color[3][2] = color[3][3];
208 
209  if (x < width - 3) {
210  x += 3;
211  switch (bpp) {
212  case 4:
213  READ_COLOR4(color[0][3], src_line[0], x);
214  READ_COLOR4(color[1][3], src_line[1], x);
215  READ_COLOR4(color[2][3], src_line[2], x);
216  READ_COLOR4(color[3][3], src_line[3], x);
217  break;
218  case 3:
219  READ_COLOR3(color[0][3], src_line[0], x);
220  READ_COLOR3(color[1][3], src_line[1], x);
221  READ_COLOR3(color[2][3], src_line[2], x);
222  READ_COLOR3(color[3][3], src_line[3], x);
223  break;
224  default: /* case 2 */
225  READ_COLOR2(color[0][3], src_line[0], x);
226  READ_COLOR2(color[1][3], src_line[1], x);
227  READ_COLOR2(color[2][3], src_line[2], x);
228  READ_COLOR2(color[3][3], src_line[3], x);
229  }
230  x -= 3;
231  }
232  }
233 
234  /* We're done with one line, so we shift the source lines up */
235  src_line[0] = src_line[1];
236  src_line[1] = src_line[2];
237  src_line[2] = src_line[3];
238 
239  /* Read next line */
240  src_line[3] = src_line[2];
241  if (y < height - 3)
242  src_line[3] += src_linesize;
243  } // y loop
244 
245  return 0;
246 }
247 
249 {
250  static const enum AVPixelFormat pix_fmts[] = {
256  };
257 
259  if (!fmts_list)
260  return AVERROR(ENOMEM);
261  return ff_set_common_formats(ctx, fmts_list);
262 }
263 
264 static int config_input(AVFilterLink *inlink)
265 {
266  Super2xSaIContext *s = inlink->dst->priv;
267 
268  s->hi_pixel_mask = 0xFEFEFEFE;
269  s->lo_pixel_mask = 0x01010101;
270  s->q_hi_pixel_mask = 0xFCFCFCFC;
271  s->q_lo_pixel_mask = 0x03030303;
272  s->bpp = 4;
273 
274  switch (inlink->format) {
275  case AV_PIX_FMT_RGB24:
276  case AV_PIX_FMT_BGR24:
277  s->bpp = 3;
278  break;
279 
280  case AV_PIX_FMT_RGB565BE:
281  case AV_PIX_FMT_BGR565BE:
282  s->is_be = 1;
283  case AV_PIX_FMT_RGB565LE:
284  case AV_PIX_FMT_BGR565LE:
285  s->hi_pixel_mask = 0xF7DEF7DE;
286  s->lo_pixel_mask = 0x08210821;
287  s->q_hi_pixel_mask = 0xE79CE79C;
288  s->q_lo_pixel_mask = 0x18631863;
289  s->bpp = 2;
290  break;
291 
292  case AV_PIX_FMT_BGR555BE:
293  case AV_PIX_FMT_RGB555BE:
294  s->is_be = 1;
295  case AV_PIX_FMT_BGR555LE:
296  case AV_PIX_FMT_RGB555LE:
297  s->hi_pixel_mask = 0x7BDE7BDE;
298  s->lo_pixel_mask = 0x04210421;
299  s->q_hi_pixel_mask = 0x739C739C;
300  s->q_lo_pixel_mask = 0x0C630C63;
301  s->bpp = 2;
302  break;
303  }
304 
305  return 0;
306 }
307 
308 static int config_output(AVFilterLink *outlink)
309 {
310  AVFilterLink *inlink = outlink->src->inputs[0];
311 
312  outlink->w = inlink->w*2;
313  outlink->h = inlink->h*2;
314 
315  av_log(inlink->dst, AV_LOG_VERBOSE, "fmt:%s size:%dx%d -> size:%dx%d\n",
316  av_get_pix_fmt_name(inlink->format),
317  inlink->w, inlink->h, outlink->w, outlink->h);
318 
319  return 0;
320 }
321 
322 static int filter_frame(AVFilterLink *inlink, AVFrame *in)
323 {
324  AVFilterContext *ctx = inlink->dst;
325  AVFilterLink *outlink = ctx->outputs[0];
326  ThreadData td;
327  AVFrame *out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
328  if (!out) {
329  av_frame_free(&in);
330  return AVERROR(ENOMEM);
331  }
333  out->width = outlink->w;
334  out->height = outlink->h;
335 
336  td.in = in, td.out = out;
337  ctx->internal->execute(ctx, super2xsai, &td, NULL, FFMIN(in->height, ff_filter_get_nb_threads(ctx)));
338 
339  av_frame_free(&in);
340  return ff_filter_frame(outlink, out);
341 }
342 
343 static const AVFilterPad super2xsai_inputs[] = {
344  {
345  .name = "default",
346  .type = AVMEDIA_TYPE_VIDEO,
347  .config_props = config_input,
348  .filter_frame = filter_frame,
349  },
350  { NULL }
351 };
352 
353 static const AVFilterPad super2xsai_outputs[] = {
354  {
355  .name = "default",
356  .type = AVMEDIA_TYPE_VIDEO,
357  .config_props = config_output,
358  },
359  { NULL }
360 };
361 
363  .name = "super2xsai",
364  .description = NULL_IF_CONFIG_SMALL("Scale the input by 2x using the Super2xSaI pixel art algorithm."),
365  .priv_size = sizeof(Super2xSaIContext),
370 };
static const AVFilterPad inputs[]
Definition: af_acontrast.c:193
static const AVFilterPad outputs[]
Definition: af_acontrast.c:203
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) #define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac) { } void ff_audio_convert_free(AudioConvert **ac) { if(! *ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);} AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map) { AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method !=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2) { ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc) { av_free(ac);return NULL;} return ac;} in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar) { ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar ? ac->channels :1;} else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;} int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in) { int use_generic=1;int len=in->nb_samples;int p;if(ac->dc) { av_log(ac->avr, AV_LOG_TRACE, "%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
uint8_t
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1096
int ff_filter_get_nb_threads(AVFilterContext *ctx)
Get number of threads for current filter instance.
Definition: avfilter.c:802
Main libavfilter public API header.
#define flags(name, subs,...)
Definition: cbs_av1.c:561
#define s(width, name)
Definition: cbs_vp9.c:257
#define FFMIN(a, b)
Definition: common.h:105
#define FFMAX(a, b)
Definition: common.h:103
#define NULL
Definition: coverity.c:32
int ff_set_common_formats(AVFilterContext *ctx, AVFilterFormats *formats)
A helper for query_formats() which sets all links to the same list of formats.
Definition: formats.c:587
AVFilterFormats * ff_make_format_list(const int *fmts)
Create a list of supported formats.
Definition: formats.c:286
#define AVFILTER_FLAG_SLICE_THREADS
The filter supports multithreading by splitting frames into multiple parts and processing them concur...
Definition: avfilter.h:117
#define AVERROR(e)
Definition: error.h:43
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:203
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:658
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:210
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
#define AV_WB32(p, v)
Definition: intreadwrite.h:419
#define AV_WL32(p, v)
Definition: intreadwrite.h:426
#define AV_WN32A(p, v)
Definition: intreadwrite.h:538
#define AV_WL24(p, d)
Definition: intreadwrite.h:464
const char * arg
Definition: jacosubdec.c:66
common internal API header
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:117
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:309
static int slice_end(AVCodecContext *avctx, AVFrame *pict)
Handle slice ends.
Definition: mpeg12dec.c:2033
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
Definition: pixdesc.c:2489
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
@ AV_PIX_FMT_RGB24
packed RGB 8:8:8, 24bpp, RGBRGB...
Definition: pixfmt.h:68
@ AV_PIX_FMT_BGR565BE
packed BGR 5:6:5, 16bpp, (msb) 5B 6G 5R(lsb), big-endian
Definition: pixfmt.h:110
@ AV_PIX_FMT_ARGB
packed ARGB 8:8:8:8, 32bpp, ARGBARGB...
Definition: pixfmt.h:92
@ AV_PIX_FMT_RGB555BE
packed RGB 5:5:5, 16bpp, (msb)1X 5R 5G 5B(lsb), big-endian , X=unused/undefined
Definition: pixfmt.h:107
@ AV_PIX_FMT_BGRA
packed BGRA 8:8:8:8, 32bpp, BGRABGRA...
Definition: pixfmt.h:95
@ AV_PIX_FMT_ABGR
packed ABGR 8:8:8:8, 32bpp, ABGRABGR...
Definition: pixfmt.h:94
@ AV_PIX_FMT_RGBA
packed RGBA 8:8:8:8, 32bpp, RGBARGBA...
Definition: pixfmt.h:93
@ AV_PIX_FMT_RGB565LE
packed RGB 5:6:5, 16bpp, (msb) 5R 6G 5B(lsb), little-endian
Definition: pixfmt.h:106
@ AV_PIX_FMT_RGB555LE
packed RGB 5:5:5, 16bpp, (msb)1X 5R 5G 5B(lsb), little-endian, X=unused/undefined
Definition: pixfmt.h:108
@ AV_PIX_FMT_BGR555BE
packed BGR 5:5:5, 16bpp, (msb)1X 5B 5G 5R(lsb), big-endian , X=unused/undefined
Definition: pixfmt.h:112
@ AV_PIX_FMT_RGB565BE
packed RGB 5:6:5, 16bpp, (msb) 5R 6G 5B(lsb), big-endian
Definition: pixfmt.h:105
@ AV_PIX_FMT_BGR555LE
packed BGR 5:5:5, 16bpp, (msb)1X 5B 5G 5R(lsb), little-endian, X=unused/undefined
Definition: pixfmt.h:113
@ AV_PIX_FMT_BGR24
packed RGB 8:8:8, 24bpp, BGRBGR...
Definition: pixfmt.h:69
@ AV_PIX_FMT_BGR565LE
packed BGR 5:6:5, 16bpp, (msb) 5B 6G 5R(lsb), little-endian
Definition: pixfmt.h:111
#define td
Definition: regdef.h:70
An instance of a filter.
Definition: avfilter.h:341
AVFilterLink ** inputs
array of pointers to input links
Definition: avfilter.h:349
void * priv
private data for use by the filter
Definition: avfilter.h:356
A list of supported formats for one end of a filter link.
Definition: formats.h:65
A filter pad used for either input or output.
Definition: internal.h:54
const char * name
Pad name.
Definition: internal.h:60
Filter definition.
Definition: avfilter.h:145
const char * name
Filter name.
Definition: avfilter.h:149
AVFormatInternal * internal
An opaque field for libavformat internal usage.
Definition: avformat.h:1699
This structure describes decoded (raw) audio or video data.
Definition: frame.h:318
uint32_t q_hi_pixel_mask
Definition: vf_super2xsai.c:42
int bpp
bytes per pixel, pixel stride for each (packed) pixel
Definition: vf_super2xsai.c:45
uint32_t q_lo_pixel_mask
Definition: vf_super2xsai.c:43
uint32_t hi_pixel_mask
Definition: vf_super2xsai.c:38
uint32_t lo_pixel_mask
Definition: vf_super2xsai.c:39
Used for passing data between threads.
Definition: dsddec.c:67
AVFrame * out
Definition: af_adeclick.c:502
AVFrame * in
Definition: af_adenorm.c:223
#define av_log(a,...)
#define src
Definition: vp8dsp.c:255
FILE * out
Definition: movenc.c:54
AVFormatContext * ctx
Definition: movenc.c:48
#define height
#define width
const char * r
Definition: vf_curves.c:116
#define READ_COLOR2(dst, src_line, off)
static int query_formats(AVFilterContext *ctx)
static int config_input(AVFilterLink *inlink)
AVFilter ff_vf_super2xsai
static const AVFilterPad super2xsai_outputs[]
#define Q_INTERPOLATE(A, B, C, D)
Definition: vf_super2xsai.c:57
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
#define READ_COLOR3(dst, src_line, off)
static int super2xsai(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_super2xsai.c:60
#define GET_RESULT(A, B, C, D)
Definition: vf_super2xsai.c:53
static int config_output(AVFilterLink *outlink)
#define READ_COLOR4(dst, src_line, off)
#define INTERPOLATE(A, B)
Definition: vf_super2xsai.c:55
static const AVFilterPad super2xsai_inputs[]
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
Definition: video.c:104