41 int jobnr,
int nb_jobs);
43 int jobnr,
int nb_jobs);
48 const float beta = 0.6f;
51 const float tmp =
fabsf(x / beta - 1.f);
55 const float tmp = (1.f - x) / (1.f - beta);
69 const int cx = x >> subw; \
70 float y = yptr[x] * imax; \
71 float u = uptr[cx] * imax - .5f; \
72 float v = vptr[cx] * imax - .5f; \
75 ny = filter(b, r, u, v, size); \
77 t = tt + (1.f - tt) * ihigh; \
78 ny = (1.f - t) * y + t * ny * y;
84 const int depth =
s->depth;
85 const int subw =
s->subw;
86 const int subh =
s->subh;
87 const float max = (1 << depth) - 1;
88 const float imax = 1.f /
max;
91 const int slice_start = (
height * jobnr) / nb_jobs;
97 const float ihigh = 1.f -
s->high;
98 const float size = 1.f /
s->size;
99 const float b =
s->b * .5f;
100 const float r =
s->r * .5f;
102 for (
int y = slice_start; y <
slice_end; y++) {
103 const int cy = y >> subh;
107 for (
int x = 0; x <
width; x++) {
123 const int depth =
s->depth;
124 const int subw =
s->subw;
125 const int subh =
s->subh;
126 const float max = (1 << depth) - 1;
127 const float imax = 1.f /
max;
130 const int slice_start = (
height * jobnr) / nb_jobs;
135 uint16_t *yptr = (uint16_t *)
frame->
data[0] + slice_start * ylinesize;
136 const float ihigh = 1.f -
s->high;
137 const float size = 1.f /
s->size;
138 const float b =
s->b * .5f;
139 const float r =
s->r * .5f;
142 const int cy = y >> subh;
143 uint16_t *uptr = (uint16_t *)
frame->
data[1] + cy * ulinesize;
144 uint16_t *vptr = (uint16_t *)
frame->
data[2] + cy * vlinesize;
146 for (
int x = 0; x <
width; x++) {
162 const int depth =
s->depth;
163 const int half = 1 << (depth - 1);
164 const int subw =
s->subw;
165 const int subh =
s->subh;
168 const int slice_start = (
height * jobnr) / nb_jobs;
173 for (
int y = slice_start; y <
slice_end; y++) {
188 const int depth =
s->depth;
189 const int half = 1 << (depth - 1);
190 const int subw =
s->subw;
191 const int subh =
s->subh;
194 const int slice_start = (
height * jobnr) / nb_jobs;
199 for (
int y = slice_start; y <
slice_end; y++) {
200 uint16_t *uptr = (uint16_t *)
frame->
data[1] + y * ulinesize;
201 uint16_t *vptr = (uint16_t *)
frame->
data[2] + y * vlinesize;
203 for (
int x = 0; x <
width; x++) {
263 s->depth =
desc->comp[0].depth;
266 s->subw =
desc->log2_chroma_w;
267 s->subh =
desc->log2_chroma_h;
291 #define OFFSET(x) offsetof(MonochromeContext, x)
292 #define VF AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_RUNTIME_PARAM
305 .
name =
"monochrome",
308 .priv_class = &monochrome_class,
static const AVFilterPad inputs[]
static const AVFilterPad outputs[]
static int process_command(AVFilterContext *ctx, const char *cmd, const char *args, char *res, int res_len, int flags)
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
int ff_filter_process_command(AVFilterContext *ctx, const char *cmd, const char *arg, char *res, int res_len, int flags)
Generic processing of user supplied commands that are set in the same way as the filter options.
int ff_filter_get_nb_threads(AVFilterContext *ctx)
Get number of threads for current filter instance.
Main libavfilter public API header.
#define flags(name, subs,...)
#define u(width, name, range_min, range_max)
#define AV_CEIL_RSHIFT(a, b)
static av_always_inline av_const unsigned av_clip_uintp2_c(int a, int p)
Clip a signed integer to an unsigned power of two range.
static __device__ float fabsf(float a)
#define AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC
Some filters support a generic "enable" expression option that can be used to enable or disable a fil...
#define AVFILTER_FLAG_SLICE_THREADS
The filter supports multithreading by splitting frames into multiple parts and processing them concur...
common internal API header
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
static uint8_t half(int a, int b)
static int slice_end(AVCodecContext *avctx, AVFrame *pict)
Handle slice ends.
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
#define AV_PIX_FMT_YUV420P16
#define AV_PIX_FMT_YUV444P12
#define AV_PIX_FMT_YUV444P9
#define AV_PIX_FMT_YUV420P10
#define AV_PIX_FMT_YUV440P12
#define AV_PIX_FMT_YUV422P9
#define AV_PIX_FMT_YUVA444P10
#define AV_PIX_FMT_YUVA420P16
#define AV_PIX_FMT_YUV420P12
#define AV_PIX_FMT_YUVA420P10
#define AV_PIX_FMT_YUVA422P9
#define AV_PIX_FMT_YUV422P12
#define AV_PIX_FMT_YUV422P10
#define AV_PIX_FMT_YUV420P9
#define AV_PIX_FMT_YUVA420P9
#define AV_PIX_FMT_YUVA422P10
#define AV_PIX_FMT_YUV420P14
AVPixelFormat
Pixel format.
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
@ AV_PIX_FMT_YUV440P
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
@ AV_PIX_FMT_YUVA420P
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
@ AV_PIX_FMT_YUVJ440P
planar YUV 4:4:0 full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV440P and setting color_range
@ AV_PIX_FMT_YUV410P
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
@ AV_PIX_FMT_YUV411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
@ AV_PIX_FMT_YUVA444P
planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
@ AV_PIX_FMT_YUVJ411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples) full scale (JPEG), deprecated in favor ...
@ AV_PIX_FMT_YUVJ422P
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
@ AV_PIX_FMT_YUVA422P
planar YUV 4:2:2 24bpp, (1 Cr & Cb sample per 2x1 Y & A samples)
@ AV_PIX_FMT_YUVJ444P
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
@ AV_PIX_FMT_YUVJ420P
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
#define AV_PIX_FMT_YUVA422P12
#define AV_PIX_FMT_YUV422P14
#define AV_PIX_FMT_YUV422P16
#define AV_PIX_FMT_YUV440P10
#define AV_PIX_FMT_YUVA444P16
#define AV_PIX_FMT_YUVA422P16
#define AV_PIX_FMT_YUV444P14
#define AV_PIX_FMT_YUVA444P9
#define AV_PIX_FMT_YUVA444P12
#define AV_PIX_FMT_YUV444P16
#define AV_PIX_FMT_YUV444P10
Describe the class of an AVClass context structure.
A link between two filters.
AVFilterContext * dst
dest filter
int format
agreed upon media format
A filter pad used for either input or output.
const char * name
Pad name.
const char * name
Filter name.
AVFormatInternal * internal
An opaque field for libavformat internal usage.
This structure describes decoded (raw) audio or video data.
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
int(* do_slice)(AVFilterContext *s, void *arg, int jobnr, int nb_jobs)
int(* clear_uv)(AVFilterContext *s, void *arg, int jobnr, int nb_jobs)
AVFilter ff_vf_monochrome
AVFILTER_DEFINE_CLASS(monochrome)
static int clear_slice8(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
static float filter(float b, float r, float u, float v, float size)
static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
static av_cold int query_formats(AVFilterContext *ctx)
static const AVFilterPad monochrome_inputs[]
static const AVFilterPad monochrome_outputs[]
static int clear_slice16(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
static av_cold int config_input(AVFilterLink *inlink)
static int monochrome_slice8(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
static const AVOption monochrome_options[]
static int monochrome_slice16(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
static float envelope(const float x)