FFmpeg  4.4.4
rawdec.c
Go to the documentation of this file.
1 /*
2  * Raw Video Decoder
3  * Copyright (c) 2001 Fabrice Bellard
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 /**
23  * @file
24  * Raw Video Decoder
25  */
26 
27 #include "avcodec.h"
28 #include "bswapdsp.h"
29 #include "decode.h"
30 #include "get_bits.h"
31 #include "internal.h"
32 #include "raw.h"
33 #include "libavutil/avassert.h"
34 #include "libavutil/buffer.h"
35 #include "libavutil/common.h"
36 #include "libavutil/intreadwrite.h"
37 #include "libavutil/imgutils.h"
38 #include "libavutil/opt.h"
39 
40 typedef struct RawVideoContext {
43  int frame_size; /* size of the frame in bytes */
44  int flip;
45  int is_1_2_4_8_bpp; // 1, 2, 4 and 8 bpp in avi/mov, 1 and 8 bpp in nut
46  int is_mono;
47  int is_pal8;
50  int is_yuv2;
51  int is_lt_16bpp; // 16bpp pixfmt and bits_per_coded_sample < 16
52  int tff;
53 
56  unsigned int bitstream_buf_size;
58 
59 static const AVOption options[]={
60 {"top", "top field first", offsetof(RawVideoContext, tff), AV_OPT_TYPE_BOOL, {.i64 = -1}, -1, 1, AV_OPT_FLAG_DECODING_PARAM|AV_OPT_FLAG_VIDEO_PARAM},
61 {NULL}
62 };
63 
64 static const AVClass rawdec_class = {
65  .class_name = "rawdec",
66  .option = options,
67  .version = LIBAVUTIL_VERSION_INT,
68 };
69 
71 {
72  RawVideoContext *context = avctx->priv_data;
73  const AVPixFmtDescriptor *desc;
74 
75  ff_bswapdsp_init(&context->bbdsp);
76 
77  if ( avctx->codec_tag == MKTAG('r','a','w',' ')
78  || avctx->codec_tag == MKTAG('N','O','1','6'))
80  avctx->bits_per_coded_sample);
81  else if (avctx->codec_tag == MKTAG('W', 'R', 'A', 'W'))
83  avctx->bits_per_coded_sample);
84  else if (avctx->codec_tag && (avctx->codec_tag & 0xFFFFFF) != MKTAG('B','I','T', 0))
86  else if (avctx->pix_fmt == AV_PIX_FMT_NONE && avctx->bits_per_coded_sample)
88  avctx->bits_per_coded_sample);
89 
91  if (!desc) {
92  av_log(avctx, AV_LOG_ERROR, "Invalid pixel format.\n");
93  return AVERROR(EINVAL);
94  }
95 
96  if (desc->flags & (AV_PIX_FMT_FLAG_PAL | FF_PSEUDOPAL)) {
98  if (!context->palette)
99  return AVERROR(ENOMEM);
100 #if FF_API_PSEUDOPAL
101  if (desc->flags & AV_PIX_FMT_FLAG_PSEUDOPAL)
102  avpriv_set_systematic_pal2((uint32_t*)context->palette->data, avctx->pix_fmt);
103 #endif
104  else {
105  memset(context->palette->data, 0, AVPALETTE_SIZE);
106  if (avctx->bits_per_coded_sample == 1)
107  memset(context->palette->data, 0xff, 4);
108  }
109  }
110 
111  if ((avctx->extradata_size >= 9 &&
112  !memcmp(avctx->extradata + avctx->extradata_size - 9, "BottomUp", 9)) ||
113  avctx->codec_tag == MKTAG('c','y','u','v') ||
114  avctx->codec_tag == MKTAG(3, 0, 0, 0) ||
115  avctx->codec_tag == MKTAG('W','R','A','W'))
116  context->flip = 1;
117 
118  if (avctx->pix_fmt == AV_PIX_FMT_MONOWHITE ||
119  avctx->pix_fmt == AV_PIX_FMT_MONOBLACK)
120  context->is_mono = 1;
121  else if (avctx->pix_fmt == AV_PIX_FMT_PAL8)
122  context->is_pal8 = 1;
123 
124  if (avctx->codec_tag == MKTAG('B','1','W','0') ||
125  avctx->codec_tag == MKTAG('B','0','W','1'))
126  context->is_nut_mono = 1;
127  else if (avctx->codec_tag == MKTAG('P','A','L',8))
128  context->is_nut_pal8 = 1;
129 
130  if (avctx->codec_tag == AV_RL32("yuv2") &&
131  avctx->pix_fmt == AV_PIX_FMT_YUYV422)
132  context->is_yuv2 = 1;
133 
134  return 0;
135 }
136 
137 static void flip(AVCodecContext *avctx, AVFrame *frame)
138 {
139  frame->data[0] += frame->linesize[0] * (avctx->height - 1);
140  frame->linesize[0] *= -1;
141 }
142 
143 /*
144  * Scale sample to 16-bit resolution
145  */
146 #define SCALE16(x, bits) (((x) << (16 - (bits))) | ((x) >> (2 * (bits) - 16)))
147 
148 /**
149  * Scale buffer to 16 bits per coded sample resolution
150  */
151 #define MKSCALE16(name, r16, w16) \
152 static void name(AVCodecContext *avctx, uint8_t * dst, const uint8_t *buf, int buf_size, int packed) \
153 { \
154  int i; \
155  if (!packed) { \
156  for (i = 0; i + 1 < buf_size; i += 2) \
157  w16(dst + i, SCALE16(r16(buf + i), avctx->bits_per_coded_sample)); \
158  } else { \
159  GetBitContext gb; \
160  init_get_bits(&gb, buf, buf_size * 8); \
161  for (i = 0; i < avctx->width * avctx->height; i++) { \
162  int sample = get_bits(&gb, avctx->bits_per_coded_sample); \
163  w16(dst + i*2, SCALE16(sample, avctx->bits_per_coded_sample)); \
164  } \
165  } \
166 }
167 
168 MKSCALE16(scale16be, AV_RB16, AV_WB16)
169 MKSCALE16(scale16le, AV_RL16, AV_WL16)
170 
171 static int raw_decode(AVCodecContext *avctx, void *data, int *got_frame,
172  AVPacket *avpkt)
173 {
174  const AVPixFmtDescriptor *desc;
175  RawVideoContext *context = avctx->priv_data;
176  const uint8_t *buf = avpkt->data;
177  int buf_size = avpkt->size;
178  int linesize_align = 4;
179  int stride;
180  int res, len;
181  int need_copy;
182 
183  AVFrame *frame = data;
184 
185  if (avctx->width <= 0) {
186  av_log(avctx, AV_LOG_ERROR, "width is not set\n");
187  return AVERROR_INVALIDDATA;
188  }
189  if (avctx->height <= 0) {
190  av_log(avctx, AV_LOG_ERROR, "height is not set\n");
191  return AVERROR_INVALIDDATA;
192  }
193 
194  if (context->is_nut_mono)
195  stride = avctx->width / 8 + (avctx->width & 7 ? 1 : 0);
196  else if (context->is_nut_pal8)
197  stride = avctx->width;
198  else
199  stride = avpkt->size / avctx->height;
200 
201  av_log(avctx, AV_LOG_DEBUG, "PACKET SIZE: %d, STRIDE: %d\n", avpkt->size, stride);
202 
203  if (stride == 0 || avpkt->size < stride * avctx->height) {
204  av_log(avctx, AV_LOG_ERROR, "Packet too small (%d)\n", avpkt->size);
205  return AVERROR_INVALIDDATA;
206  }
207 
208  desc = av_pix_fmt_desc_get(avctx->pix_fmt);
209 
210  if ((avctx->bits_per_coded_sample == 8 || avctx->bits_per_coded_sample == 4 ||
211  avctx->bits_per_coded_sample == 2 || avctx->bits_per_coded_sample == 1 ||
212  (avctx->bits_per_coded_sample == 0 && (context->is_nut_pal8 || context->is_mono)) ) &&
213  (context->is_mono || context->is_pal8) &&
214  (!avctx->codec_tag || avctx->codec_tag == MKTAG('r','a','w',' ') ||
215  context->is_nut_mono || context->is_nut_pal8)) {
216  context->is_1_2_4_8_bpp = 1;
217  if (context->is_mono) {
218  int row_bytes = avctx->width / 8 + (avctx->width & 7 ? 1 : 0);
219  context->frame_size = av_image_get_buffer_size(avctx->pix_fmt,
220  FFALIGN(row_bytes, 16) * 8,
221  avctx->height, 1);
222  } else
223  context->frame_size = av_image_get_buffer_size(avctx->pix_fmt,
224  FFALIGN(avctx->width, 16),
225  avctx->height, 1);
226  } else {
227  context->is_lt_16bpp = av_get_bits_per_pixel(desc) == 16 && avctx->bits_per_coded_sample > 8 && avctx->bits_per_coded_sample < 16;
228  context->frame_size = av_image_get_buffer_size(avctx->pix_fmt, avctx->width,
229  avctx->height, 1);
230  }
231  if (context->frame_size < 0)
232  return context->frame_size;
233 
234  need_copy = !avpkt->buf || context->is_1_2_4_8_bpp || context->is_yuv2 || context->is_lt_16bpp;
235 
237  frame->key_frame = 1;
238 
239  res = ff_decode_frame_props(avctx, frame);
240  if (res < 0)
241  return res;
242 
243  frame->pkt_pos = avctx->internal->last_pkt_props->pos;
244  frame->pkt_duration = avctx->internal->last_pkt_props->duration;
245 
246  if (context->tff >= 0) {
247  frame->interlaced_frame = 1;
248  frame->top_field_first = context->tff;
249  }
250 
251  if ((res = av_image_check_size(avctx->width, avctx->height, 0, avctx)) < 0)
252  return res;
253 
254  if (need_copy)
255  frame->buf[0] = av_buffer_alloc(FFMAX(context->frame_size, buf_size));
256  else
257  frame->buf[0] = av_buffer_ref(avpkt->buf);
258  if (!frame->buf[0])
259  return AVERROR(ENOMEM);
260 
261  // 1, 2, 4 and 8 bpp in avi/mov, 1 and 8 bpp in nut
262  if (context->is_1_2_4_8_bpp) {
263  int i, j, row_pix = 0;
264  uint8_t *dst = frame->buf[0]->data;
265  buf_size = context->frame_size - (context->is_pal8 ? AVPALETTE_SIZE : 0);
266  if (avctx->bits_per_coded_sample == 8 || context->is_nut_pal8 || context->is_mono) {
267  int pix_per_byte = context->is_mono ? 8 : 1;
268  for (i = 0, j = 0; j < buf_size && i<avpkt->size; i++, j++) {
269  dst[j] = buf[i];
270  row_pix += pix_per_byte;
271  if (row_pix >= avctx->width) {
272  i += stride - (i % stride) - 1;
273  j += 16 - (j % 16) - 1;
274  row_pix = 0;
275  }
276  }
277  } else if (avctx->bits_per_coded_sample == 4) {
278  for (i = 0, j = 0; 2 * j + 1 < buf_size && i<avpkt->size; i++, j++) {
279  dst[2 * j + 0] = buf[i] >> 4;
280  dst[2 * j + 1] = buf[i] & 15;
281  row_pix += 2;
282  if (row_pix >= avctx->width) {
283  i += stride - (i % stride) - 1;
284  j += 8 - (j % 8) - 1;
285  row_pix = 0;
286  }
287  }
288  } else if (avctx->bits_per_coded_sample == 2) {
289  for (i = 0, j = 0; 4 * j + 3 < buf_size && i<avpkt->size; i++, j++) {
290  dst[4 * j + 0] = buf[i] >> 6;
291  dst[4 * j + 1] = buf[i] >> 4 & 3;
292  dst[4 * j + 2] = buf[i] >> 2 & 3;
293  dst[4 * j + 3] = buf[i] & 3;
294  row_pix += 4;
295  if (row_pix >= avctx->width) {
296  i += stride - (i % stride) - 1;
297  j += 4 - (j % 4) - 1;
298  row_pix = 0;
299  }
300  }
301  } else {
302  av_assert0(avctx->bits_per_coded_sample == 1);
303  for (i = 0, j = 0; 8 * j + 7 < buf_size && i<avpkt->size; i++, j++) {
304  dst[8 * j + 0] = buf[i] >> 7;
305  dst[8 * j + 1] = buf[i] >> 6 & 1;
306  dst[8 * j + 2] = buf[i] >> 5 & 1;
307  dst[8 * j + 3] = buf[i] >> 4 & 1;
308  dst[8 * j + 4] = buf[i] >> 3 & 1;
309  dst[8 * j + 5] = buf[i] >> 2 & 1;
310  dst[8 * j + 6] = buf[i] >> 1 & 1;
311  dst[8 * j + 7] = buf[i] & 1;
312  row_pix += 8;
313  if (row_pix >= avctx->width) {
314  i += stride - (i % stride) - 1;
315  j += 2 - (j % 2) - 1;
316  row_pix = 0;
317  }
318  }
319  }
320  linesize_align = 16;
321  buf = dst;
322  } else if (context->is_lt_16bpp) {
323  uint8_t *dst = frame->buf[0]->data;
324  int packed = (avctx->codec_tag & 0xFFFFFF) == MKTAG('B','I','T', 0);
325  int swap = avctx->codec_tag >> 24;
326 
327  if (packed && swap) {
328  av_fast_padded_malloc(&context->bitstream_buf, &context->bitstream_buf_size, buf_size);
329  if (!context->bitstream_buf)
330  return AVERROR(ENOMEM);
331  if (swap == 16)
332  context->bbdsp.bswap16_buf(context->bitstream_buf, (const uint16_t*)buf, buf_size / 2);
333  else if (swap == 32)
334  context->bbdsp.bswap_buf(context->bitstream_buf, (const uint32_t*)buf, buf_size / 4);
335  else
336  return AVERROR_INVALIDDATA;
337  buf = context->bitstream_buf;
338  }
339 
340  if (desc->flags & AV_PIX_FMT_FLAG_BE)
341  scale16be(avctx, dst, buf, buf_size, packed);
342  else
343  scale16le(avctx, dst, buf, buf_size, packed);
344 
345  buf = dst;
346  } else if (need_copy) {
347  memcpy(frame->buf[0]->data, buf, buf_size);
348  buf = frame->buf[0]->data;
349  }
350 
351  if (avctx->codec_tag == MKTAG('A', 'V', '1', 'x') ||
352  avctx->codec_tag == MKTAG('A', 'V', 'u', 'p'))
353  buf += buf_size - context->frame_size;
354 
355  len = context->frame_size - (avctx->pix_fmt==AV_PIX_FMT_PAL8 ? AVPALETTE_SIZE : 0);
356  if (buf_size < len && ((avctx->codec_tag & 0xFFFFFF) != MKTAG('B','I','T', 0) || !need_copy)) {
357  av_log(avctx, AV_LOG_ERROR, "Invalid buffer size, packet size %d < expected frame_size %d\n", buf_size, len);
358  av_buffer_unref(&frame->buf[0]);
359  return AVERROR(EINVAL);
360  }
361 
363  buf, avctx->pix_fmt,
364  avctx->width, avctx->height, 1)) < 0) {
365  av_buffer_unref(&frame->buf[0]);
366  return res;
367  }
368 
369  if (avctx->pix_fmt == AV_PIX_FMT_PAL8) {
370  buffer_size_t pal_size;
372  &pal_size);
373  int ret;
374 
375  if (pal && pal_size != AVPALETTE_SIZE) {
376  av_log(avctx, AV_LOG_ERROR, "Palette size %d is wrong\n", pal_size);
377  pal = NULL;
378  }
379 
380  if (!context->palette)
382  if (!context->palette) {
383  av_buffer_unref(&frame->buf[0]);
384  return AVERROR(ENOMEM);
385  }
386  ret = av_buffer_make_writable(&context->palette);
387  if (ret < 0) {
388  av_buffer_unref(&frame->buf[0]);
389  return ret;
390  }
391 
392  if (pal) {
393  memcpy(context->palette->data, pal, AVPALETTE_SIZE);
395  } else if (context->is_nut_pal8) {
396  int vid_size = avctx->width * avctx->height;
397  int pal_size = avpkt->size - vid_size;
398 
399  if (avpkt->size > vid_size && pal_size <= AVPALETTE_SIZE) {
400  pal = avpkt->data + vid_size;
401  memcpy(context->palette->data, pal, pal_size);
403  }
404  }
405  }
406 
407  if ((avctx->pix_fmt==AV_PIX_FMT_RGB24 ||
408  avctx->pix_fmt==AV_PIX_FMT_BGR24 ||
409  avctx->pix_fmt==AV_PIX_FMT_GRAY8 ||
410  avctx->pix_fmt==AV_PIX_FMT_RGB555LE ||
411  avctx->pix_fmt==AV_PIX_FMT_RGB555BE ||
412  avctx->pix_fmt==AV_PIX_FMT_RGB565LE ||
413  avctx->pix_fmt==AV_PIX_FMT_MONOWHITE ||
414  avctx->pix_fmt==AV_PIX_FMT_MONOBLACK ||
415  avctx->pix_fmt==AV_PIX_FMT_PAL8) &&
416  FFALIGN(frame->linesize[0], linesize_align) * avctx->height <= buf_size)
417  frame->linesize[0] = FFALIGN(frame->linesize[0], linesize_align);
418 
419  if (avctx->pix_fmt == AV_PIX_FMT_NV12 && avctx->codec_tag == MKTAG('N', 'V', '1', '2') &&
420  FFALIGN(frame->linesize[0], linesize_align) * avctx->height +
421  FFALIGN(frame->linesize[1], linesize_align) * ((avctx->height + 1) / 2) <= buf_size) {
422  int la0 = FFALIGN(frame->linesize[0], linesize_align);
423  frame->data[1] += (la0 - frame->linesize[0]) * avctx->height;
424  frame->linesize[0] = la0;
425  frame->linesize[1] = FFALIGN(frame->linesize[1], linesize_align);
426  }
427 
428  if ((avctx->pix_fmt == AV_PIX_FMT_PAL8 && buf_size < context->frame_size) ||
429  (desc->flags & FF_PSEUDOPAL)) {
430  frame->buf[1] = av_buffer_ref(context->palette);
431  if (!frame->buf[1]) {
432  av_buffer_unref(&frame->buf[0]);
433  return AVERROR(ENOMEM);
434  }
435  frame->data[1] = frame->buf[1]->data;
436  }
437 
438  if (avctx->pix_fmt == AV_PIX_FMT_BGR24 &&
439  ((frame->linesize[0] + 3) & ~3) * avctx->height <= buf_size)
440  frame->linesize[0] = (frame->linesize[0] + 3) & ~3;
441 
442  if (context->flip)
443  flip(avctx, frame);
444 
445  if (avctx->codec_tag == MKTAG('Y', 'V', '1', '2') ||
446  avctx->codec_tag == MKTAG('Y', 'V', '1', '6') ||
447  avctx->codec_tag == MKTAG('Y', 'V', '2', '4') ||
448  avctx->codec_tag == MKTAG('Y', 'V', 'U', '9'))
449  FFSWAP(uint8_t *, frame->data[1], frame->data[2]);
450 
451  if (avctx->codec_tag == AV_RL32("I420") && (avctx->width+1)*(avctx->height+1) * 3/2 == buf_size) {
452  frame->data[1] = frame->data[1] + (avctx->width+1)*(avctx->height+1) -avctx->width*avctx->height;
453  frame->data[2] = frame->data[2] + ((avctx->width+1)*(avctx->height+1) -avctx->width*avctx->height)*5/4;
454  }
455 
456  if (avctx->codec_tag == AV_RL32("yuv2") &&
457  avctx->pix_fmt == AV_PIX_FMT_YUYV422) {
458  int x, y;
459  uint8_t *line = frame->data[0];
460  for (y = 0; y < avctx->height; y++) {
461  for (x = 0; x < avctx->width; x++)
462  line[2 * x + 1] ^= 0x80;
463  line += frame->linesize[0];
464  }
465  }
466 
467  if (avctx->codec_tag == AV_RL32("b64a") &&
468  avctx->pix_fmt == AV_PIX_FMT_RGBA64BE) {
469  uint8_t *dst = frame->data[0];
470  uint64_t v;
471  int x, y;
472  for (y = 0; y < avctx->height; y++) {
473  for (x = 0; x >> 3 < avctx->width; x += 8) {
474  v = AV_RB64(&dst[x]);
475  AV_WB64(&dst[x], v << 16 | v >> 48);
476  }
477  dst += frame->linesize[0];
478  }
479  }
480 
481  if (avctx->field_order > AV_FIELD_PROGRESSIVE) { /* we have interlaced material flagged in container */
482  frame->interlaced_frame = 1;
483  if (avctx->field_order == AV_FIELD_TT || avctx->field_order == AV_FIELD_TB)
484  frame->top_field_first = 1;
485  }
486 
487  *got_frame = 1;
488  return buf_size;
489 }
490 
492 {
493  RawVideoContext *context = avctx->priv_data;
494 
495  av_buffer_unref(&context->palette);
496  av_freep(&context->bitstream_buf);
497  return 0;
498 }
499 
501  .name = "rawvideo",
502  .long_name = NULL_IF_CONFIG_SMALL("raw video"),
503  .type = AVMEDIA_TYPE_VIDEO,
504  .id = AV_CODEC_ID_RAWVIDEO,
505  .priv_data_size = sizeof(RawVideoContext),
507  .close = raw_close_decoder,
508  .decode = raw_decode,
509  .priv_class = &rawdec_class,
510  .capabilities = AV_CODEC_CAP_PARAM_CHANGE,
511 };
#define av_cold
Definition: attributes.h:88
uint8_t
simple assert() macros that are a bit more flexible than ISO C assert().
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
Libavcodec external API header.
uint8_t * av_packet_get_side_data(const AVPacket *pkt, enum AVPacketSideDataType type, buffer_size_t *size)
Definition: avpacket.c:368
#define AV_RL16
Definition: intreadwrite.h:42
#define AV_RB16
Definition: intreadwrite.h:53
#define AV_RL32
Definition: intreadwrite.h:146
#define AV_RB64
Definition: intreadwrite.h:164
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:31
refcounted data buffer API
@ AV_FIELD_TT
Definition: codec_par.h:39
@ AV_FIELD_PROGRESSIVE
Definition: codec_par.h:38
@ AV_FIELD_TB
Definition: codec_par.h:41
common internal and external API header
#define FFSWAP(type, a, b)
Definition: common.h:108
#define MKTAG(a, b, c, d)
Definition: common.h:478
#define FFMAX(a, b)
Definition: common.h:103
#define NULL
Definition: coverity.c:32
int ff_decode_frame_props(AVCodecContext *avctx, AVFrame *frame)
Set various frame properties from the codec context / packet data.
Definition: decode.c:1731
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame, FILE *outfile)
Definition: decode_audio.c:71
static AVFrame * frame
bitstream reader API header.
@ AV_OPT_TYPE_BOOL
Definition: opt.h:242
#define AV_CODEC_CAP_PARAM_CHANGE
Codec supports changed parameters at any point.
Definition: codec.h:116
@ AV_CODEC_ID_RAWVIDEO
Definition: codec_id.h:62
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
Definition: utils.c:50
@ AV_PKT_DATA_PALETTE
An AV_PKT_DATA_PALETTE side data packet contains exactly AVPALETTE_SIZE bytes worth of palette.
Definition: packet.h:46
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it.
Definition: buffer.c:125
AVBufferRef * av_buffer_alloc(buffer_size_t size)
Allocate an AVBuffer of the given size using av_malloc().
Definition: buffer.c:67
int av_buffer_make_writable(AVBufferRef **pbuf)
Create a writable reference from a given buffer reference, avoiding data copy if possible.
Definition: buffer.c:151
AVBufferRef * av_buffer_ref(AVBufferRef *buf)
Create a new reference to an AVBuffer.
Definition: buffer.c:93
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
#define AVERROR(e)
Definition: error.h:43
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:215
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:194
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
int av_image_get_buffer_size(enum AVPixelFormat pix_fmt, int width, int height, int align)
Return the size in bytes of the amount of data required to store an image with the given parameters.
Definition: imgutils.c:466
int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of the image can be address...
Definition: imgutils.c:317
int av_image_fill_arrays(uint8_t *dst_data[4], int dst_linesize[4], const uint8_t *src, enum AVPixelFormat pix_fmt, int width, int height, int align)
Setup the data pointers and linesizes based on the specified image parameters and the provided array.
Definition: imgutils.c:446
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:274
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
int avpriv_set_systematic_pal2(uint32_t pal[256], enum AVPixelFormat pix_fmt)
Definition: imgutils.c:176
misc image utilities
int i
Definition: input.c:407
#define AV_WB64(p, v)
Definition: intreadwrite.h:433
#define AV_WB16(p, v)
Definition: intreadwrite.h:405
#define AV_WL16(p, v)
Definition: intreadwrite.h:412
av_cold void ff_bswapdsp_init(BswapDSPContext *c)
Definition: bswapdsp.c:49
static void flip(AVCodecContext *avctx, AVFrame *frame)
Definition: rawdec.c:137
static av_cold int raw_close_decoder(AVCodecContext *avctx)
Definition: rawdec.c:491
static const AVOption options[]
Definition: rawdec.c:59
static av_cold int raw_init_decoder(AVCodecContext *avctx)
Definition: rawdec.c:70
#define MKSCALE16(name, r16, w16)
Scale buffer to 16 bits per coded sample resolution.
Definition: rawdec.c:151
static int raw_decode(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
Definition: rawdec.c:171
static const AVClass rawdec_class
Definition: rawdec.c:64
AVCodec ff_rawvideo_decoder
Definition: rawdec.c:500
common internal API header
int buffer_size_t
Definition: internal.h:306
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:117
#define FF_PSEUDOPAL
Definition: internal.h:299
const char * desc
Definition: libsvtav1.c:79
int stride
Definition: mace.c:144
#define FFALIGN(x, a)
Definition: macros.h:48
const char data[16]
Definition: mxf.c:142
int frame_size
Definition: mxfenc.c:2206
AVOptions.
#define AV_OPT_FLAG_DECODING_PARAM
a generic parameter which can be set by the user for demuxing or decoding
Definition: opt.h:279
#define AV_OPT_FLAG_VIDEO_PARAM
Definition: opt.h:281
int av_get_bits_per_pixel(const AVPixFmtDescriptor *pixdesc)
Return the number of bits per pixel used by the pixel format described by pixdesc.
Definition: pixdesc.c:2525
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2573
#define AV_PIX_FMT_FLAG_PSEUDOPAL
The pixel format is "pseudo-paletted".
Definition: pixdesc.h:167
#define AV_PIX_FMT_FLAG_BE
Pixel format is big-endian.
Definition: pixdesc.h:128
#define AV_PIX_FMT_FLAG_PAL
Pixel format has a palette in data[1], values are indexes in this palette.
Definition: pixdesc.h:132
@ AV_PIX_FMT_NV12
planar YUV 4:2:0, 12bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (firs...
Definition: pixfmt.h:89
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
@ AV_PIX_FMT_RGB24
packed RGB 8:8:8, 24bpp, RGBRGB...
Definition: pixfmt.h:68
@ AV_PIX_FMT_MONOBLACK
Y , 1bpp, 0 is black, 1 is white, in each byte pixels are ordered from the msb to the lsb.
Definition: pixfmt.h:76
@ AV_PIX_FMT_RGB555BE
packed RGB 5:5:5, 16bpp, (msb)1X 5R 5G 5B(lsb), big-endian , X=unused/undefined
Definition: pixfmt.h:107
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
Definition: pixfmt.h:74
@ AV_PIX_FMT_RGBA64BE
packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is st...
Definition: pixfmt.h:205
@ AV_PIX_FMT_RGB565LE
packed RGB 5:6:5, 16bpp, (msb) 5R 6G 5B(lsb), little-endian
Definition: pixfmt.h:106
@ AV_PIX_FMT_RGB555LE
packed RGB 5:5:5, 16bpp, (msb)1X 5R 5G 5B(lsb), little-endian, X=unused/undefined
Definition: pixfmt.h:108
@ AV_PIX_FMT_YUYV422
packed YUV 4:2:2, 16bpp, Y0 Cb Y1 Cr
Definition: pixfmt.h:67
@ AV_PIX_FMT_PAL8
8 bits with AV_PIX_FMT_RGB32 palette
Definition: pixfmt.h:77
@ AV_PIX_FMT_BGR24
packed RGB 8:8:8, 24bpp, BGRBGR...
Definition: pixfmt.h:69
@ AV_PIX_FMT_MONOWHITE
Y , 1bpp, 0 is white, 1 is black, in each byte pixels are ordered from the msb to the lsb.
Definition: pixfmt.h:75
#define AVPALETTE_SIZE
Definition: pixfmt.h:32
const PixelFormatTag avpriv_pix_fmt_bps_avi[]
Definition: raw.c:316
const PixelFormatTag avpriv_pix_fmt_bps_mov[]
Definition: raw.c:329
const PixelFormatTag ff_raw_pix_fmt_tags[]
Definition: raw.c:31
Raw Video Codec.
enum AVPixelFormat avpriv_find_pix_fmt(const PixelFormatTag *tags, unsigned int fourcc)
Definition: utils.c:444
A reference to a data buffer.
Definition: buffer.h:84
uint8_t * data
The data buffer.
Definition: buffer.h:92
Describe the class of an AVClass context structure.
Definition: log.h:67
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:72
main external API structure.
Definition: avcodec.h:536
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:746
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
Definition: avcodec.h:561
int bits_per_coded_sample
bits per sample/pixel from the demuxer (needed for huffyuv).
Definition: avcodec.h:1740
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:637
int extradata_size
Definition: avcodec.h:638
void * priv_data
Definition: avcodec.h:563
AVCodec.
Definition: codec.h:197
const char * name
Name of the codec implementation.
Definition: codec.h:204
This structure describes decoded (raw) audio or video data.
Definition: frame.h:318
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:332
int key_frame
1 -> keyframe, 0-> not
Definition: frame.h:396
int64_t pkt_duration
duration of the corresponding packet, expressed in AVStream->time_base units, 0 if unknown.
Definition: frame.h:597
int64_t pkt_pos
reordered pos from the last AVPacket that has been input into the decoder
Definition: frame.h:589
AVBufferRef * buf[AV_NUM_DATA_POINTERS]
AVBuffer references backing the data for this frame.
Definition: frame.h:509
int palette_has_changed
Tell user application that palette has changed from previous frame.
Definition: frame.h:475
int top_field_first
If the content is interlaced, is top field displayed first.
Definition: frame.h:470
int interlaced_frame
The content of the picture is interlaced.
Definition: frame.h:465
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:349
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:401
AVOption.
Definition: opt.h:248
This structure stores compressed data.
Definition: packet.h:346
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:81
void(* bswap16_buf)(uint16_t *dst, const uint16_t *src, int len)
Definition: bswapdsp.h:26
void(* bswap_buf)(uint32_t *dst, const uint32_t *src, int w)
Definition: bswapdsp.h:25
int is_lt_16bpp
Definition: rawdec.c:51
AVClass * av_class
Definition: rawdec.c:41
int frame_size
Definition: rawdec.c:43
int is_1_2_4_8_bpp
Definition: rawdec.c:45
BswapDSPContext bbdsp
Definition: rawdec.c:54
void * bitstream_buf
Definition: rawdec.c:55
int is_nut_mono
Definition: rawdec.c:48
AVBufferRef * palette
Definition: rawdec.c:42
int is_nut_pal8
Definition: rawdec.c:49
unsigned int bitstream_buf_size
Definition: rawdec.c:56
Definition: graph2dot.c:48
#define av_freep(p)
#define av_log(a,...)
int len