48 const uint8_t ff_hevc_pel_weight[65] = { [2] = 0, [4] = 1, [6] = 2, [8] = 3, [12] = 4, [16] = 5, [24] = 6, [32] = 7, [48] = 8, [64] = 9 };
90 int log2_min_cb_size =
sps->log2_min_cb_size;
93 int pic_size_in_ctb = ((
width >> log2_min_cb_size) + 1) *
94 ((
height >> log2_min_cb_size) + 1);
95 int ctb_count =
sps->ctb_width *
sps->ctb_height;
96 int min_pu_size =
sps->min_pu_width *
sps->min_pu_height;
98 s->bs_width = (
width >> 2) + 1;
99 s->bs_height = (
height >> 2) + 1;
103 if (!
s->sao || !
s->deblock)
108 if (!
s->skip_flag || !
s->tab_ct_depth)
114 if (!
s->tab_ipm || !
s->cbf_luma || !
s->is_pcm)
119 sizeof(*
s->tab_slice_address));
121 sizeof(*
s->qp_y_tab));
122 if (!
s->qp_y_tab || !
s->filter_slice_edges || !
s->tab_slice_address)
127 if (!
s->horizontal_bs || !
s->vertical_bs)
134 if (!
s->tab_mvf_pool || !
s->rpl_tab_pool)
148 uint8_t luma_weight_l0_flag[16];
149 uint8_t chroma_weight_l0_flag[16];
150 uint8_t luma_weight_l1_flag[16];
151 uint8_t chroma_weight_l1_flag[16];
152 int luma_log2_weight_denom;
155 if (luma_log2_weight_denom < 0 || luma_log2_weight_denom > 7) {
156 av_log(
s->avctx,
AV_LOG_ERROR,
"luma_log2_weight_denom %d is invalid\n", luma_log2_weight_denom);
159 s->sh.luma_log2_weight_denom =
av_clip_uintp2(luma_log2_weight_denom, 3);
160 if (
s->ps.sps->chroma_format_idc != 0) {
161 int64_t chroma_log2_weight_denom = luma_log2_weight_denom + (int64_t)
get_se_golomb(gb);
162 if (chroma_log2_weight_denom < 0 || chroma_log2_weight_denom > 7) {
163 av_log(
s->avctx,
AV_LOG_ERROR,
"chroma_log2_weight_denom %"PRId64
" is invalid\n", chroma_log2_weight_denom);
166 s->sh.chroma_log2_weight_denom = chroma_log2_weight_denom;
169 for (
i = 0;
i <
s->sh.nb_refs[
L0];
i++) {
171 if (!luma_weight_l0_flag[
i]) {
172 s->sh.luma_weight_l0[
i] = 1 <<
s->sh.luma_log2_weight_denom;
173 s->sh.luma_offset_l0[
i] = 0;
176 if (
s->ps.sps->chroma_format_idc != 0) {
177 for (
i = 0;
i <
s->sh.nb_refs[
L0];
i++)
180 for (
i = 0;
i <
s->sh.nb_refs[
L0];
i++)
181 chroma_weight_l0_flag[
i] = 0;
183 for (
i = 0;
i <
s->sh.nb_refs[
L0];
i++) {
184 if (luma_weight_l0_flag[
i]) {
186 if ((int8_t)delta_luma_weight_l0 != delta_luma_weight_l0)
188 s->sh.luma_weight_l0[
i] = (1 <<
s->sh.luma_log2_weight_denom) + delta_luma_weight_l0;
191 if (chroma_weight_l0_flag[
i]) {
192 for (j = 0; j < 2; j++) {
196 if ( (int8_t)delta_chroma_weight_l0 != delta_chroma_weight_l0
197 || delta_chroma_offset_l0 < -(1<<17) || delta_chroma_offset_l0 > (1<<17)) {
201 s->sh.chroma_weight_l0[
i][j] = (1 <<
s->sh.chroma_log2_weight_denom) + delta_chroma_weight_l0;
202 s->sh.chroma_offset_l0[
i][j] =
av_clip((delta_chroma_offset_l0 - ((128 *
s->sh.chroma_weight_l0[
i][j])
203 >>
s->sh.chroma_log2_weight_denom) + 128), -128, 127);
206 s->sh.chroma_weight_l0[
i][0] = 1 <<
s->sh.chroma_log2_weight_denom;
207 s->sh.chroma_offset_l0[
i][0] = 0;
208 s->sh.chroma_weight_l0[
i][1] = 1 <<
s->sh.chroma_log2_weight_denom;
209 s->sh.chroma_offset_l0[
i][1] = 0;
213 for (
i = 0;
i <
s->sh.nb_refs[
L1];
i++) {
215 if (!luma_weight_l1_flag[
i]) {
216 s->sh.luma_weight_l1[
i] = 1 <<
s->sh.luma_log2_weight_denom;
217 s->sh.luma_offset_l1[
i] = 0;
220 if (
s->ps.sps->chroma_format_idc != 0) {
221 for (
i = 0;
i <
s->sh.nb_refs[
L1];
i++)
224 for (
i = 0;
i <
s->sh.nb_refs[
L1];
i++)
225 chroma_weight_l1_flag[
i] = 0;
227 for (
i = 0;
i <
s->sh.nb_refs[
L1];
i++) {
228 if (luma_weight_l1_flag[
i]) {
230 if ((int8_t)delta_luma_weight_l1 != delta_luma_weight_l1)
232 s->sh.luma_weight_l1[
i] = (1 <<
s->sh.luma_log2_weight_denom) + delta_luma_weight_l1;
235 if (chroma_weight_l1_flag[
i]) {
236 for (j = 0; j < 2; j++) {
240 if ( (int8_t)delta_chroma_weight_l1 != delta_chroma_weight_l1
241 || delta_chroma_offset_l1 < -(1<<17) || delta_chroma_offset_l1 > (1<<17)) {
245 s->sh.chroma_weight_l1[
i][j] = (1 <<
s->sh.chroma_log2_weight_denom) + delta_chroma_weight_l1;
246 s->sh.chroma_offset_l1[
i][j] =
av_clip((delta_chroma_offset_l1 - ((128 *
s->sh.chroma_weight_l1[
i][j])
247 >>
s->sh.chroma_log2_weight_denom) + 128), -128, 127);
250 s->sh.chroma_weight_l1[
i][0] = 1 <<
s->sh.chroma_log2_weight_denom;
251 s->sh.chroma_offset_l1[
i][0] = 0;
252 s->sh.chroma_weight_l1[
i][1] = 1 <<
s->sh.chroma_log2_weight_denom;
253 s->sh.chroma_offset_l1[
i][1] = 0;
263 int max_poc_lsb = 1 <<
sps->log2_max_poc_lsb;
264 int prev_delta_msb = 0;
265 unsigned int nb_sps = 0, nb_sh;
269 if (!
sps->long_term_ref_pics_present_flag)
272 if (
sps->num_long_term_ref_pics_sps > 0)
276 if (nb_sps >
sps->num_long_term_ref_pics_sps)
288 if (
sps->num_long_term_ref_pics_sps > 1)
291 rps->
poc[
i] =
sps->lt_ref_pic_poc_lsb_sps[lt_idx_sps];
292 rps->
used[
i] =
sps->used_by_curr_pic_lt_sps_flag[lt_idx_sps];
303 if (
i &&
i != nb_sps)
304 delta += prev_delta_msb;
306 poc = rps->
poc[
i] +
s->poc -
delta * max_poc_lsb -
s->sh.pic_order_cnt_lsb;
310 prev_delta_msb =
delta;
323 unsigned int num = 0, den = 0;
328 avctx->
width =
sps->width - ow->left_offset - ow->right_offset;
329 avctx->
height =
sps->height - ow->top_offset - ow->bottom_offset;
331 avctx->
profile =
sps->ptl.general_ptl.profile_idc;
332 avctx->
level =
sps->ptl.general_ptl.level_idc;
336 if (
sps->vui.video_signal_type_present_flag)
342 if (
sps->vui.colour_description_present_flag) {
353 if (
sps->chroma_format_idc == 1) {
354 if (
sps->vui.chroma_loc_info_present_flag) {
355 if (
sps->vui.chroma_sample_loc_type_top_field <= 5)
361 if (
vps->vps_timing_info_present_flag) {
362 num =
vps->vps_num_units_in_tick;
363 den =
vps->vps_time_scale;
364 }
else if (
sps->vui.vui_timing_info_present_flag) {
365 num =
sps->vui.vui_num_units_in_tick;
366 den =
sps->vui.vui_time_scale;
369 if (num != 0 && den != 0)
378 if (
s->sei.a53_caption.buf_ref)
381 if (
s->sei.alternative_transfer.present &&
384 avctx->
color_trc =
s->sei.alternative_transfer.preferred_transfer_characteristics;
392 #define HWACCEL_MAX (CONFIG_HEVC_DXVA2_HWACCEL + \
393 CONFIG_HEVC_D3D11VA_HWACCEL * 2 + \
394 CONFIG_HEVC_NVDEC_HWACCEL + \
395 CONFIG_HEVC_VAAPI_HWACCEL + \
396 CONFIG_HEVC_VIDEOTOOLBOX_HWACCEL + \
397 CONFIG_HEVC_VDPAU_HWACCEL)
400 switch (
sps->pix_fmt) {
403 #if CONFIG_HEVC_DXVA2_HWACCEL
406 #if CONFIG_HEVC_D3D11VA_HWACCEL
410 #if CONFIG_HEVC_VAAPI_HWACCEL
413 #if CONFIG_HEVC_VDPAU_HWACCEL
416 #if CONFIG_HEVC_NVDEC_HWACCEL
419 #if CONFIG_HEVC_VIDEOTOOLBOX_HWACCEL
424 #if CONFIG_HEVC_DXVA2_HWACCEL
427 #if CONFIG_HEVC_D3D11VA_HWACCEL
431 #if CONFIG_HEVC_VAAPI_HWACCEL
434 #if CONFIG_HEVC_VIDEOTOOLBOX_HWACCEL
437 #if CONFIG_HEVC_VDPAU_HWACCEL
440 #if CONFIG_HEVC_NVDEC_HWACCEL
445 #if CONFIG_HEVC_VDPAU_HWACCEL
448 #if CONFIG_HEVC_NVDEC_HWACCEL
454 #if CONFIG_HEVC_VAAPI_HWACCEL
461 #if CONFIG_HEVC_VDPAU_HWACCEL
464 #if CONFIG_HEVC_NVDEC_HWACCEL
470 *fmt++ =
sps->pix_fmt;
500 for (
i = 0;
i < 3;
i++) {
505 if (
sps->sao_enabled && !
s->avctx->hwaccel) {
506 int c_count = (
sps->chroma_format_idc != 0) ? 3 : 1;
509 for(c_idx = 0; c_idx < c_count; c_idx++) {
510 int w =
sps->width >>
sps->hshift[c_idx];
511 int h =
sps->height >>
sps->vshift[c_idx];
512 s->sao_pixel_buffer_h[c_idx] =
515 s->sao_pixel_buffer_v[c_idx] =
518 if (!
s->sao_pixel_buffer_h[c_idx] ||
519 !
s->sao_pixel_buffer_v[c_idx])
525 s->ps.vps = (
HEVCVPS*)
s->ps.vps_list[
s->ps.sps->vps_id]->data;
531 for (
i = 0;
i < 3;
i++) {
548 av_log(
s->avctx,
AV_LOG_ERROR,
"Two slices reporting being the first in the same frame.\n");
553 s->seq_decode = (
s->seq_decode + 1) & 0xff;
576 if (
s->ps.sps != (
HEVCSPS*)
s->ps.sps_list[
s->ps.pps->sps_id]->data) {
578 const HEVCSPS *last_sps =
s->ps.sps;
582 if (
sps->width != last_sps->width ||
sps->height != last_sps->height ||
583 sps->temporal_layer[
sps->max_sub_layers - 1].max_dec_pic_buffering !=
584 last_sps->temporal_layer[last_sps->max_sub_layers - 1].max_dec_pic_buffering)
598 s->seq_decode = (
s->seq_decode + 1) & 0xff;
608 int slice_address_length;
610 if (
s->ps.pps->dependent_slice_segments_enabled_flag)
614 s->ps.sps->ctb_height);
618 "Invalid slice segment address: %u.\n",
630 s->slice_initialized = 0;
634 s->slice_initialized = 0;
636 for (
i = 0;
i <
s->ps.pps->num_extra_slice_header_bits;
i++)
654 if (
s->ps.pps->output_flag_present_flag)
657 if (
s->ps.sps->separate_colour_plane_flag)
667 "Ignoring POC change between slices: %d -> %d\n",
s->poc, poc);
683 int numbits, rps_idx;
685 if (!
s->ps.sps->nb_st_rps) {
691 rps_idx = numbits > 0 ?
get_bits(gb, numbits) : 0;
705 if (
s->ps.sps->sps_temporal_mvp_enabled_flag)
710 s->sh.short_term_rps =
NULL;
725 if (
s->ps.sps->sao_enabled) {
727 if (
s->ps.sps->chroma_format_idc) {
741 sh->
nb_refs[
L0] =
s->ps.pps->num_ref_idx_l0_default_active;
743 sh->
nb_refs[
L1] =
s->ps.pps->num_ref_idx_l1_default_active;
764 if (
s->ps.pps->lists_modification_present_flag && nb_refs > 1) {
782 if (
s->ps.pps->cabac_init_present_flag)
797 "Invalid collocated_ref_idx: %d.\n",
814 "Invalid number of merging MVP candidates: %d.\n",
822 if (
s->ps.pps->pic_slice_level_chroma_qp_offsets_present_flag) {
835 if (
s->ps.pps->chroma_qp_offset_list_enabled_flag)
840 if (
s->ps.pps->deblocking_filter_control_present_flag) {
841 int deblocking_filter_override_flag = 0;
843 if (
s->ps.pps->deblocking_filter_override_enabled_flag)
844 deblocking_filter_override_flag =
get_bits1(gb);
846 if (deblocking_filter_override_flag) {
851 if (beta_offset_div2 < -6 || beta_offset_div2 > 6 ||
852 tc_offset_div2 < -6 || tc_offset_div2 > 6) {
854 "Invalid deblock filter offsets: %d, %d\n",
855 beta_offset_div2, tc_offset_div2);
872 if (
s->ps.pps->seq_loop_filter_across_slices_enabled_flag &&
880 }
else if (!
s->slice_initialized) {
886 if (
s->ps.pps->tiles_enabled_flag ||
s->ps.pps->entropy_coding_sync_enabled_flag) {
890 av_log(
s->avctx,
AV_LOG_ERROR,
"num_entry_point_offsets %d is invalid\n", num_entry_point_offsets);
898 if (offset_len < 1 || offset_len > 32) {
919 if (
s->threads_number > 1 && (
s->ps.pps->num_tile_rows > 1 ||
s->ps.pps->num_tile_columns > 1)) {
920 s->enable_parallel_tiles = 0;
921 s->threads_number = 1;
923 s->enable_parallel_tiles = 0;
925 s->enable_parallel_tiles = 0;
928 if (
s->ps.pps->slice_header_extension_present_flag) {
934 for (
i = 0;
i < length;
i++)
941 sh->
slice_qp < -
s->ps.sps->qp_bd_offset) {
943 "The slice_qp %d is outside the valid range "
946 -
s->ps.sps->qp_bd_offset);
952 if (!
s->sh.slice_ctb_addr_rs &&
s->sh.dependent_slice_segment_flag) {
963 s->HEVClc->first_qp_group = !
s->sh.dependent_slice_segment_flag;
965 if (!
s->ps.pps->cu_qp_delta_enabled_flag)
966 s->HEVClc->qp_y =
s->sh.slice_qp;
968 s->slice_initialized = 1;
969 s->HEVClc->tu.cu_qp_offset_cb = 0;
970 s->HEVClc->tu.cu_qp_offset_cr = 0;
975 #define CTB(tab, x, y) ((tab)[(y) * s->ps.sps->ctb_width + (x)])
977 #define SET_SAO(elem, value) \
979 if (!sao_merge_up_flag && !sao_merge_left_flag) \
981 else if (sao_merge_left_flag) \
982 sao->elem = CTB(s->sao, rx-1, ry).elem; \
983 else if (sao_merge_up_flag) \
984 sao->elem = CTB(s->sao, rx, ry-1).elem; \
992 int sao_merge_left_flag = 0;
993 int sao_merge_up_flag = 0;
997 if (
s->sh.slice_sample_adaptive_offset_flag[0] ||
998 s->sh.slice_sample_adaptive_offset_flag[1]) {
1003 if (ry > 0 && !sao_merge_left_flag) {
1009 for (c_idx = 0; c_idx < (
s->ps.sps->chroma_format_idc ? 3 : 1); c_idx++) {
1010 int log2_sao_offset_scale = c_idx == 0 ?
s->ps.pps->log2_sao_offset_scale_luma :
1011 s->ps.pps->log2_sao_offset_scale_chroma;
1013 if (!
s->sh.slice_sample_adaptive_offset_flag[c_idx]) {
1028 for (
i = 0;
i < 4;
i++)
1032 for (
i = 0;
i < 4;
i++) {
1041 }
else if (c_idx != 2) {
1047 for (
i = 0;
i < 4;
i++) {
1055 sao->
offset_val[c_idx][
i + 1] *= 1 << log2_sao_offset_scale;
1067 if (log2_res_scale_abs_plus1 != 0) {
1070 (1 - 2 * res_scale_sign_flag);
1080 int xBase,
int yBase,
int cb_xBase,
int cb_yBase,
1081 int log2_cb_size,
int log2_trafo_size,
1082 int blk_idx,
int cbf_luma,
int *cbf_cb,
int *cbf_cr)
1085 const int log2_trafo_size_c = log2_trafo_size -
s->ps.sps->hshift[1];
1089 int trafo_size = 1 << log2_trafo_size;
1092 s->hpc.intra_pred[log2_trafo_size - 2](
s, x0, y0, 0);
1095 if (cbf_luma || cbf_cb[0] || cbf_cr[0] ||
1096 (
s->ps.sps->chroma_format_idc == 2 && (cbf_cb[1] || cbf_cr[1]))) {
1099 int cbf_chroma = cbf_cb[0] || cbf_cr[0] ||
1100 (
s->ps.sps->chroma_format_idc == 2 &&
1101 (cbf_cb[1] || cbf_cr[1]));
1113 "The cu_qp_delta %d is outside the valid range "
1116 -(26 +
s->ps.sps->qp_bd_offset / 2),
1117 (25 +
s->ps.sps->qp_bd_offset / 2));
1124 if (
s->sh.cu_chroma_qp_offset_enabled_flag && cbf_chroma &&
1127 if (cu_chroma_qp_offset_flag) {
1128 int cu_chroma_qp_offset_idx = 0;
1129 if (
s->ps.pps->chroma_qp_offset_list_len_minus1 > 0) {
1132 "cu_chroma_qp_offset_idx not yet tested.\n");
1165 if (
s->ps.sps->chroma_format_idc && (log2_trafo_size > 2 ||
s->ps.sps->chroma_format_idc == 3)) {
1166 int trafo_size_h = 1 << (log2_trafo_size_c +
s->ps.sps->hshift[1]);
1167 int trafo_size_v = 1 << (log2_trafo_size_c +
s->ps.sps->vshift[1]);
1168 lc->
tu.
cross_pf = (
s->ps.pps->cross_component_prediction_enabled_flag && cbf_luma &&
1175 for (
i = 0;
i < (
s->ps.sps->chroma_format_idc == 2 ? 2 : 1);
i++) {
1178 s->hpc.intra_pred[log2_trafo_size_c - 2](
s, x0, y0 + (
i << log2_trafo_size_c), 1);
1182 log2_trafo_size_c, scan_idx_c, 1);
1185 ptrdiff_t
stride =
s->frame->linesize[1];
1186 int hshift =
s->ps.sps->hshift[1];
1187 int vshift =
s->ps.sps->vshift[1];
1190 int size = 1 << log2_trafo_size_c;
1193 ((x0 >> hshift) <<
s->ps.sps->pixel_shift)];
1197 s->hevcdsp.add_residual[log2_trafo_size_c-2](dst, coeffs,
stride);
1204 for (
i = 0;
i < (
s->ps.sps->chroma_format_idc == 2 ? 2 : 1);
i++) {
1207 s->hpc.intra_pred[log2_trafo_size_c - 2](
s, x0, y0 + (
i << log2_trafo_size_c), 2);
1211 log2_trafo_size_c, scan_idx_c, 2);
1214 ptrdiff_t
stride =
s->frame->linesize[2];
1215 int hshift =
s->ps.sps->hshift[2];
1216 int vshift =
s->ps.sps->vshift[2];
1219 int size = 1 << log2_trafo_size_c;
1222 ((x0 >> hshift) <<
s->ps.sps->pixel_shift)];
1226 s->hevcdsp.add_residual[log2_trafo_size_c-2](dst, coeffs,
stride);
1229 }
else if (
s->ps.sps->chroma_format_idc && blk_idx == 3) {
1230 int trafo_size_h = 1 << (log2_trafo_size + 1);
1231 int trafo_size_v = 1 << (log2_trafo_size +
s->ps.sps->vshift[1]);
1232 for (
i = 0;
i < (
s->ps.sps->chroma_format_idc == 2 ? 2 : 1);
i++) {
1235 trafo_size_h, trafo_size_v);
1236 s->hpc.intra_pred[log2_trafo_size - 2](
s, xBase, yBase + (
i << log2_trafo_size), 1);
1240 log2_trafo_size, scan_idx_c, 1);
1242 for (
i = 0;
i < (
s->ps.sps->chroma_format_idc == 2 ? 2 : 1);
i++) {
1245 trafo_size_h, trafo_size_v);
1246 s->hpc.intra_pred[log2_trafo_size - 2](
s, xBase, yBase + (
i << log2_trafo_size), 2);
1250 log2_trafo_size, scan_idx_c, 2);
1254 if (log2_trafo_size > 2 ||
s->ps.sps->chroma_format_idc == 3) {
1255 int trafo_size_h = 1 << (log2_trafo_size_c +
s->ps.sps->hshift[1]);
1256 int trafo_size_v = 1 << (log2_trafo_size_c +
s->ps.sps->vshift[1]);
1258 s->hpc.intra_pred[log2_trafo_size_c - 2](
s, x0, y0, 1);
1259 s->hpc.intra_pred[log2_trafo_size_c - 2](
s, x0, y0, 2);
1260 if (
s->ps.sps->chroma_format_idc == 2) {
1262 trafo_size_h, trafo_size_v);
1263 s->hpc.intra_pred[log2_trafo_size_c - 2](
s, x0, y0 + (1 << log2_trafo_size_c), 1);
1264 s->hpc.intra_pred[log2_trafo_size_c - 2](
s, x0, y0 + (1 << log2_trafo_size_c), 2);
1266 }
else if (blk_idx == 3) {
1267 int trafo_size_h = 1 << (log2_trafo_size + 1);
1268 int trafo_size_v = 1 << (log2_trafo_size +
s->ps.sps->vshift[1]);
1270 trafo_size_h, trafo_size_v);
1271 s->hpc.intra_pred[log2_trafo_size - 2](
s, xBase, yBase, 1);
1272 s->hpc.intra_pred[log2_trafo_size - 2](
s, xBase, yBase, 2);
1273 if (
s->ps.sps->chroma_format_idc == 2) {
1275 trafo_size_h, trafo_size_v);
1276 s->hpc.intra_pred[log2_trafo_size - 2](
s, xBase, yBase + (1 << (log2_trafo_size)), 1);
1277 s->hpc.intra_pred[log2_trafo_size - 2](
s, xBase, yBase + (1 << (log2_trafo_size)), 2);
1287 int cb_size = 1 << log2_cb_size;
1288 int log2_min_pu_size =
s->ps.sps->log2_min_pu_size;
1290 int min_pu_width =
s->ps.sps->min_pu_width;
1291 int x_end =
FFMIN(x0 + cb_size,
s->ps.sps->width);
1292 int y_end =
FFMIN(y0 + cb_size,
s->ps.sps->height);
1295 for (j = (y0 >> log2_min_pu_size); j < (y_end >> log2_min_pu_size); j++)
1296 for (
i = (x0 >> log2_min_pu_size);
i < (x_end >> log2_min_pu_size);
i++)
1297 s->is_pcm[
i + j * min_pu_width] = 2;
1301 int xBase,
int yBase,
int cb_xBase,
int cb_yBase,
1302 int log2_cb_size,
int log2_trafo_size,
1303 int trafo_depth,
int blk_idx,
1304 const int *base_cbf_cb,
const int *base_cbf_cr)
1312 cbf_cb[0] = base_cbf_cb[0];
1313 cbf_cb[1] = base_cbf_cb[1];
1314 cbf_cr[0] = base_cbf_cr[0];
1315 cbf_cr[1] = base_cbf_cr[1];
1318 if (trafo_depth == 1) {
1320 if (
s->ps.sps->chroma_format_idc == 3) {
1334 if (log2_trafo_size <= s->ps.sps->log2_max_trafo_size &&
1335 log2_trafo_size >
s->ps.sps->log2_min_tb_size &&
1336 trafo_depth < lc->cu.max_trafo_depth &&
1340 int inter_split =
s->ps.sps->max_transform_hierarchy_depth_inter == 0 &&
1345 split_transform_flag = log2_trafo_size >
s->ps.sps->log2_max_trafo_size ||
1350 if (
s->ps.sps->chroma_format_idc && (log2_trafo_size > 2 ||
s->ps.sps->chroma_format_idc == 3)) {
1351 if (trafo_depth == 0 || cbf_cb[0]) {
1353 if (
s->ps.sps->chroma_format_idc == 2 && (!split_transform_flag || log2_trafo_size == 3)) {
1358 if (trafo_depth == 0 || cbf_cr[0]) {
1360 if (
s->ps.sps->chroma_format_idc == 2 && (!split_transform_flag || log2_trafo_size == 3)) {
1366 if (split_transform_flag) {
1367 const int trafo_size_split = 1 << (log2_trafo_size - 1);
1368 const int x1 = x0 + trafo_size_split;
1369 const int y1 = y0 + trafo_size_split;
1371 #define SUBDIVIDE(x, y, idx) \
1373 ret = hls_transform_tree(s, x, y, x0, y0, cb_xBase, cb_yBase, log2_cb_size, \
1374 log2_trafo_size - 1, trafo_depth + 1, idx, \
1387 int min_tu_size = 1 <<
s->ps.sps->log2_min_tb_size;
1388 int log2_min_tu_size =
s->ps.sps->log2_min_tb_size;
1389 int min_tu_width =
s->ps.sps->min_tb_width;
1393 cbf_cb[0] || cbf_cr[0] ||
1394 (
s->ps.sps->chroma_format_idc == 2 && (cbf_cb[1] || cbf_cr[1]))) {
1399 log2_cb_size, log2_trafo_size,
1400 blk_idx, cbf_luma, cbf_cb, cbf_cr);
1406 for (
i = 0;
i < (1 << log2_trafo_size);
i += min_tu_size)
1407 for (j = 0; j < (1 << log2_trafo_size); j += min_tu_size) {
1408 int x_tu = (x0 + j) >> log2_min_tu_size;
1409 int y_tu = (y0 +
i) >> log2_min_tu_size;
1410 s->cbf_luma[y_tu * min_tu_width + x_tu] = 1;
1413 if (!
s->sh.disable_deblocking_filter_flag) {
1415 if (
s->ps.pps->transquant_bypass_enable_flag &&
1427 int cb_size = 1 << log2_cb_size;
1428 ptrdiff_t stride0 =
s->frame->linesize[0];
1429 ptrdiff_t stride1 =
s->frame->linesize[1];
1430 ptrdiff_t stride2 =
s->frame->linesize[2];
1431 uint8_t *dst0 = &
s->frame->data[0][y0 * stride0 + (x0 <<
s->ps.sps->pixel_shift)];
1432 uint8_t *dst1 = &
s->frame->data[1][(y0 >>
s->ps.sps->vshift[1]) * stride1 + ((x0 >>
s->ps.sps->hshift[1]) <<
s->ps.sps->pixel_shift)];
1433 uint8_t *dst2 = &
s->frame->data[2][(y0 >>
s->ps.sps->vshift[2]) * stride2 + ((x0 >>
s->ps.sps->hshift[2]) <<
s->ps.sps->pixel_shift)];
1435 int length = cb_size * cb_size *
s->ps.sps->pcm.bit_depth +
1436 (((cb_size >>
s->ps.sps->hshift[1]) * (cb_size >>
s->ps.sps->vshift[1])) +
1437 ((cb_size >>
s->ps.sps->hshift[2]) * (cb_size >>
s->ps.sps->vshift[2]))) *
1438 s->ps.sps->pcm.bit_depth_chroma;
1442 if (!
s->sh.disable_deblocking_filter_flag)
1449 s->hevcdsp.put_pcm(dst0, stride0, cb_size, cb_size, &gb,
s->ps.sps->pcm.bit_depth);
1450 if (
s->ps.sps->chroma_format_idc) {
1451 s->hevcdsp.put_pcm(dst1, stride1,
1452 cb_size >>
s->ps.sps->hshift[1],
1453 cb_size >>
s->ps.sps->vshift[1],
1454 &gb,
s->ps.sps->pcm.bit_depth_chroma);
1455 s->hevcdsp.put_pcm(dst2, stride2,
1456 cb_size >>
s->ps.sps->hshift[2],
1457 cb_size >>
s->ps.sps->vshift[2],
1458 &gb,
s->ps.sps->pcm.bit_depth_chroma);
1482 int block_w,
int block_h,
int luma_weight,
int luma_offset)
1486 ptrdiff_t srcstride =
ref->linesize[0];
1487 int pic_width =
s->ps.sps->width;
1488 int pic_height =
s->ps.sps->height;
1491 int weight_flag = (
s->sh.slice_type ==
HEVC_SLICE_P &&
s->ps.pps->weighted_pred_flag) ||
1492 (
s->sh.slice_type ==
HEVC_SLICE_B &&
s->ps.pps->weighted_bipred_flag);
1495 x_off +=
mv->x >> 2;
1496 y_off +=
mv->y >> 2;
1497 src += y_off * srcstride + (x_off * (1 <<
s->ps.sps->pixel_shift));
1507 edge_emu_stride, srcstride,
1511 pic_width, pic_height);
1513 srcstride = edge_emu_stride;
1517 s->hevcdsp.put_hevc_qpel_uni[idx][!!my][!!mx](dst, dststride,
src, srcstride,
1518 block_h, mx, my, block_w);
1520 s->hevcdsp.put_hevc_qpel_uni_w[idx][!!my][!!mx](dst, dststride,
src, srcstride,
1521 block_h,
s->sh.luma_log2_weight_denom,
1522 luma_weight, luma_offset, mx, my, block_w);
1542 AVFrame *ref0,
const Mv *mv0,
int x_off,
int y_off,
1543 int block_w,
int block_h,
AVFrame *ref1,
const Mv *mv1,
struct MvField *current_mv)
1546 ptrdiff_t src0stride = ref0->
linesize[0];
1547 ptrdiff_t src1stride = ref1->
linesize[0];
1548 int pic_width =
s->ps.sps->width;
1549 int pic_height =
s->ps.sps->height;
1550 int mx0 = mv0->
x & 3;
1551 int my0 = mv0->
y & 3;
1552 int mx1 = mv1->
x & 3;
1553 int my1 = mv1->
y & 3;
1554 int weight_flag = (
s->sh.slice_type ==
HEVC_SLICE_P &&
s->ps.pps->weighted_pred_flag) ||
1555 (
s->sh.slice_type ==
HEVC_SLICE_B &&
s->ps.pps->weighted_bipred_flag);
1556 int x_off0 = x_off + (mv0->
x >> 2);
1557 int y_off0 = y_off + (mv0->
y >> 2);
1558 int x_off1 = x_off + (mv1->
x >> 2);
1559 int y_off1 = y_off + (mv1->
y >> 2);
1562 uint8_t *
src0 = ref0->
data[0] + y_off0 * src0stride + (
int)((
unsigned)x_off0 <<
s->ps.sps->pixel_shift);
1563 uint8_t *
src1 = ref1->
data[0] + y_off1 * src1stride + (
int)((
unsigned)x_off1 <<
s->ps.sps->pixel_shift);
1573 edge_emu_stride, src0stride,
1577 pic_width, pic_height);
1579 src0stride = edge_emu_stride;
1590 edge_emu_stride, src1stride,
1594 pic_width, pic_height);
1596 src1stride = edge_emu_stride;
1599 s->hevcdsp.put_hevc_qpel[idx][!!my0][!!mx0](lc->
tmp,
src0, src0stride,
1600 block_h, mx0, my0, block_w);
1602 s->hevcdsp.put_hevc_qpel_bi[idx][!!my1][!!mx1](dst, dststride,
src1, src1stride, lc->
tmp,
1603 block_h, mx1, my1, block_w);
1605 s->hevcdsp.put_hevc_qpel_bi_w[idx][!!my1][!!mx1](dst, dststride,
src1, src1stride, lc->
tmp,
1606 block_h,
s->sh.luma_log2_weight_denom,
1607 s->sh.luma_weight_l0[current_mv->
ref_idx[0]],
1608 s->sh.luma_weight_l1[current_mv->
ref_idx[1]],
1609 s->sh.luma_offset_l0[current_mv->
ref_idx[0]],
1610 s->sh.luma_offset_l1[current_mv->
ref_idx[1]],
1633 ptrdiff_t dststride,
uint8_t *
src0, ptrdiff_t srcstride,
int reflist,
1634 int x_off,
int y_off,
int block_w,
int block_h,
struct MvField *current_mv,
int chroma_weight,
int chroma_offset)
1637 int pic_width =
s->ps.sps->width >>
s->ps.sps->hshift[1];
1638 int pic_height =
s->ps.sps->height >>
s->ps.sps->vshift[1];
1639 const Mv *
mv = ¤t_mv->
mv[reflist];
1640 int weight_flag = (
s->sh.slice_type ==
HEVC_SLICE_P &&
s->ps.pps->weighted_pred_flag) ||
1641 (
s->sh.slice_type ==
HEVC_SLICE_B &&
s->ps.pps->weighted_bipred_flag);
1643 int hshift =
s->ps.sps->hshift[1];
1644 int vshift =
s->ps.sps->vshift[1];
1647 intptr_t _mx = mx << (1 - hshift);
1648 intptr_t _my = my << (1 - vshift);
1650 x_off +=
mv->x >> (2 + hshift);
1651 y_off +=
mv->y >> (2 + vshift);
1652 src0 += y_off * srcstride + (x_off * (1 <<
s->ps.sps->pixel_shift));
1660 (edge_emu_stride + (1 <<
s->ps.sps->pixel_shift));
1662 edge_emu_stride, srcstride,
1666 pic_width, pic_height);
1669 srcstride = edge_emu_stride;
1672 s->hevcdsp.put_hevc_epel_uni[idx][!!my][!!mx](dst0, dststride,
src0, srcstride,
1673 block_h, _mx, _my, block_w);
1675 s->hevcdsp.put_hevc_epel_uni_w[idx][!!my][!!mx](dst0, dststride,
src0, srcstride,
1676 block_h,
s->sh.chroma_log2_weight_denom,
1677 chroma_weight, chroma_offset, _mx, _my, block_w);
1698 int x_off,
int y_off,
int block_w,
int block_h,
struct MvField *current_mv,
int cidx)
1703 ptrdiff_t src1stride = ref0->
linesize[cidx+1];
1704 ptrdiff_t src2stride = ref1->
linesize[cidx+1];
1705 int weight_flag = (
s->sh.slice_type ==
HEVC_SLICE_P &&
s->ps.pps->weighted_pred_flag) ||
1706 (
s->sh.slice_type ==
HEVC_SLICE_B &&
s->ps.pps->weighted_bipred_flag);
1707 int pic_width =
s->ps.sps->width >>
s->ps.sps->hshift[1];
1708 int pic_height =
s->ps.sps->height >>
s->ps.sps->vshift[1];
1709 Mv *mv0 = ¤t_mv->
mv[0];
1710 Mv *mv1 = ¤t_mv->
mv[1];
1711 int hshift =
s->ps.sps->hshift[1];
1712 int vshift =
s->ps.sps->vshift[1];
1718 intptr_t _mx0 = mx0 << (1 - hshift);
1719 intptr_t _my0 = my0 << (1 - vshift);
1720 intptr_t _mx1 = mx1 << (1 - hshift);
1721 intptr_t _my1 = my1 << (1 - vshift);
1723 int x_off0 = x_off + (mv0->
x >> (2 + hshift));
1724 int y_off0 = y_off + (mv0->
y >> (2 + vshift));
1725 int x_off1 = x_off + (mv1->
x >> (2 + hshift));
1726 int y_off1 = y_off + (mv1->
y >> (2 + vshift));
1728 src1 += y_off0 * src1stride + (
int)((
unsigned)x_off0 <<
s->ps.sps->pixel_shift);
1729 src2 += y_off1 * src2stride + (
int)((
unsigned)x_off1 <<
s->ps.sps->pixel_shift);
1737 (edge_emu_stride + (1 <<
s->ps.sps->pixel_shift));
1740 edge_emu_stride, src1stride,
1744 pic_width, pic_height);
1747 src1stride = edge_emu_stride;
1756 (edge_emu_stride + (1 <<
s->ps.sps->pixel_shift));
1759 edge_emu_stride, src2stride,
1763 pic_width, pic_height);
1766 src2stride = edge_emu_stride;
1769 s->hevcdsp.put_hevc_epel[idx][!!my0][!!mx0](lc->
tmp,
src1, src1stride,
1770 block_h, _mx0, _my0, block_w);
1772 s->hevcdsp.put_hevc_epel_bi[idx][!!my1][!!mx1](dst0,
s->frame->linesize[cidx+1],
1773 src2, src2stride, lc->
tmp,
1774 block_h, _mx1, _my1, block_w);
1776 s->hevcdsp.put_hevc_epel_bi_w[idx][!!my1][!!mx1](dst0,
s->frame->linesize[cidx+1],
1777 src2, src2stride, lc->
tmp,
1779 s->sh.chroma_log2_weight_denom,
1780 s->sh.chroma_weight_l0[current_mv->
ref_idx[0]][cidx],
1781 s->sh.chroma_weight_l1[current_mv->
ref_idx[1]][cidx],
1782 s->sh.chroma_offset_l0[current_mv->
ref_idx[0]][cidx],
1783 s->sh.chroma_offset_l1[current_mv->
ref_idx[1]][cidx],
1784 _mx1, _my1, block_w);
1798 int nPbH,
int log2_cb_size,
int part_idx,
1810 if (inter_pred_idc !=
PRED_L1) {
1811 if (
s->sh.nb_refs[
L0])
1818 part_idx, merge_idx,
mv, mvp_flag, 0);
1823 if (inter_pred_idc !=
PRED_L0) {
1824 if (
s->sh.nb_refs[
L1])
1827 if (
s->sh.mvd_l1_zero_flag == 1 && inter_pred_idc ==
PRED_BI) {
1836 part_idx, merge_idx,
mv, mvp_flag, 1);
1844 int log2_cb_size,
int partIdx,
int idx)
1846 #define POS(c_idx, x, y) \
1847 &s->frame->data[c_idx][((y) >> s->ps.sps->vshift[c_idx]) * s->frame->linesize[c_idx] + \
1848 (((x) >> s->ps.sps->hshift[c_idx]) << s->ps.sps->pixel_shift)]
1851 struct MvField current_mv = {{{ 0 }}};
1853 int min_pu_width =
s->ps.sps->min_pu_width;
1855 MvField *tab_mvf =
s->ref->tab_mvf;
1861 int log2_min_cb_size =
s->ps.sps->log2_min_cb_size;
1862 int min_cb_width =
s->ps.sps->min_cb_width;
1863 int x_cb = x0 >> log2_min_cb_size;
1864 int y_cb = y0 >> log2_min_cb_size;
1868 int skip_flag =
SAMPLE_CTB(
s->skip_flag, x_cb, y_cb);
1874 if (
s->sh.max_num_merge_cand > 1)
1880 partIdx, merge_idx, ¤t_mv);
1883 partIdx, merge_idx, ¤t_mv);
1886 x_pu = x0 >>
s->ps.sps->log2_min_pu_size;
1887 y_pu = y0 >>
s->ps.sps->log2_min_pu_size;
1889 for (j = 0; j < nPbH >>
s->ps.sps->log2_min_pu_size; j++)
1890 for (
i = 0; i < nPbW >>
s->ps.sps->log2_min_pu_size;
i++)
1891 tab_mvf[(y_pu + j) * min_pu_width + x_pu +
i] = current_mv;
1894 ref0 = refPicList[0].
ref[current_mv.
ref_idx[0]];
1900 ref1 = refPicList[1].
ref[current_mv.
ref_idx[1]];
1907 int x0_c = x0 >>
s->ps.sps->hshift[1];
1908 int y0_c = y0 >>
s->ps.sps->vshift[1];
1909 int nPbW_c = nPbW >>
s->ps.sps->hshift[1];
1910 int nPbH_c = nPbH >>
s->ps.sps->vshift[1];
1913 ¤t_mv.
mv[0], x0, y0, nPbW, nPbH,
1914 s->sh.luma_weight_l0[current_mv.
ref_idx[0]],
1915 s->sh.luma_offset_l0[current_mv.
ref_idx[0]]);
1917 if (
s->ps.sps->chroma_format_idc) {
1919 0, x0_c, y0_c, nPbW_c, nPbH_c, ¤t_mv,
1920 s->sh.chroma_weight_l0[current_mv.
ref_idx[0]][0],
s->sh.chroma_offset_l0[current_mv.
ref_idx[0]][0]);
1922 0, x0_c, y0_c, nPbW_c, nPbH_c, ¤t_mv,
1923 s->sh.chroma_weight_l0[current_mv.
ref_idx[0]][1],
s->sh.chroma_offset_l0[current_mv.
ref_idx[0]][1]);
1926 int x0_c = x0 >>
s->ps.sps->hshift[1];
1927 int y0_c = y0 >>
s->ps.sps->vshift[1];
1928 int nPbW_c = nPbW >>
s->ps.sps->hshift[1];
1929 int nPbH_c = nPbH >>
s->ps.sps->vshift[1];
1932 ¤t_mv.
mv[1], x0, y0, nPbW, nPbH,
1933 s->sh.luma_weight_l1[current_mv.
ref_idx[1]],
1934 s->sh.luma_offset_l1[current_mv.
ref_idx[1]]);
1936 if (
s->ps.sps->chroma_format_idc) {
1937 chroma_mc_uni(
s, dst1,
s->frame->linesize[1], ref1->frame->data[1], ref1->frame->linesize[1],
1938 1, x0_c, y0_c, nPbW_c, nPbH_c, ¤t_mv,
1939 s->sh.chroma_weight_l1[current_mv.
ref_idx[1]][0],
s->sh.chroma_offset_l1[current_mv.
ref_idx[1]][0]);
1941 chroma_mc_uni(
s, dst2,
s->frame->linesize[2], ref1->frame->data[2], ref1->frame->linesize[2],
1942 1, x0_c, y0_c, nPbW_c, nPbH_c, ¤t_mv,
1943 s->sh.chroma_weight_l1[current_mv.
ref_idx[1]][1],
s->sh.chroma_offset_l1[current_mv.
ref_idx[1]][1]);
1946 int x0_c = x0 >>
s->ps.sps->hshift[1];
1947 int y0_c = y0 >>
s->ps.sps->vshift[1];
1948 int nPbW_c = nPbW >>
s->ps.sps->hshift[1];
1949 int nPbH_c = nPbH >>
s->ps.sps->vshift[1];
1952 ¤t_mv.
mv[0], x0, y0, nPbW, nPbH,
1953 ref1->frame, ¤t_mv.
mv[1], ¤t_mv);
1955 if (
s->ps.sps->chroma_format_idc) {
1957 x0_c, y0_c, nPbW_c, nPbH_c, ¤t_mv, 0);
1960 x0_c, y0_c, nPbW_c, nPbH_c, ¤t_mv, 1);
1969 int prev_intra_luma_pred_flag)
1972 int x_pu = x0 >>
s->ps.sps->log2_min_pu_size;
1973 int y_pu = y0 >>
s->ps.sps->log2_min_pu_size;
1974 int min_pu_width =
s->ps.sps->min_pu_width;
1975 int size_in_pus = pu_size >>
s->ps.sps->log2_min_pu_size;
1980 s->tab_ipm[(y_pu - 1) * min_pu_width + x_pu] :
INTRA_DC;
1982 s->tab_ipm[y_pu * min_pu_width + x_pu - 1] :
INTRA_DC;
1984 int y_ctb = (y0 >> (
s->ps.sps->log2_ctb_size)) << (
s->ps.sps->log2_ctb_size);
1986 MvField *tab_mvf =
s->ref->tab_mvf;
1987 int intra_pred_mode;
1992 if ((y0 - 1) < y_ctb)
1995 if (cand_left == cand_up) {
1996 if (cand_left < 2) {
2001 candidate[0] = cand_left;
2002 candidate[1] = 2 + ((cand_left - 2 - 1 + 32) & 31);
2003 candidate[2] = 2 + ((cand_left - 2 + 1) & 31);
2006 candidate[0] = cand_left;
2007 candidate[1] = cand_up;
2017 if (prev_intra_luma_pred_flag) {
2018 intra_pred_mode = candidate[lc->
pu.
mpm_idx];
2020 if (candidate[0] > candidate[1])
2022 if (candidate[0] > candidate[2])
2024 if (candidate[1] > candidate[2])
2028 for (
i = 0;
i < 3;
i++)
2029 if (intra_pred_mode >= candidate[
i])
2036 for (
i = 0;
i < size_in_pus;
i++) {
2037 memset(&
s->tab_ipm[(y_pu +
i) * min_pu_width + x_pu],
2038 intra_pred_mode, size_in_pus);
2040 for (j = 0; j < size_in_pus; j++) {
2045 return intra_pred_mode;
2049 int log2_cb_size,
int ct_depth)
2051 int length = (1 << log2_cb_size) >>
s->ps.sps->log2_min_cb_size;
2052 int x_cb = x0 >>
s->ps.sps->log2_min_cb_size;
2053 int y_cb = y0 >>
s->ps.sps->log2_min_cb_size;
2056 for (y = 0; y < length; y++)
2057 memset(&
s->tab_ct_depth[(y_cb + y) *
s->ps.sps->min_cb_width + x_cb],
2062 0, 1, 2, 2, 2, 2, 3, 5, 7, 8, 10, 12, 13, 15, 17, 18, 19, 20,
2063 21, 22, 23, 23, 24, 24, 25, 25, 26, 27, 27, 28, 28, 29, 29, 30, 31};
2069 static const uint8_t intra_chroma_table[4] = { 0, 26, 10, 1 };
2070 uint8_t prev_intra_luma_pred_flag[4];
2072 int pb_size = (1 << log2_cb_size) >>
split;
2073 int side =
split + 1;
2077 for (
i = 0;
i < side;
i++)
2078 for (j = 0; j < side; j++)
2081 for (
i = 0;
i < side;
i++) {
2082 for (j = 0; j < side; j++) {
2083 if (prev_intra_luma_pred_flag[2 *
i + j])
2090 prev_intra_luma_pred_flag[2 *
i + j]);
2094 if (
s->ps.sps->chroma_format_idc == 3) {
2095 for (
i = 0;
i < side;
i++) {
2096 for (j = 0; j < side; j++) {
2098 if (chroma_mode != 4) {
2108 }
else if (
s->ps.sps->chroma_format_idc == 2) {
2111 if (chroma_mode != 4) {
2115 mode_idx = intra_chroma_table[chroma_mode];
2120 }
else if (
s->ps.sps->chroma_format_idc != 0) {
2122 if (chroma_mode != 4) {
2138 int pb_size = 1 << log2_cb_size;
2139 int size_in_pus = pb_size >>
s->ps.sps->log2_min_pu_size;
2140 int min_pu_width =
s->ps.sps->min_pu_width;
2141 MvField *tab_mvf =
s->ref->tab_mvf;
2142 int x_pu = x0 >>
s->ps.sps->log2_min_pu_size;
2143 int y_pu = y0 >>
s->ps.sps->log2_min_pu_size;
2146 if (size_in_pus == 0)
2148 for (j = 0; j < size_in_pus; j++)
2149 memset(&
s->tab_ipm[(y_pu + j) * min_pu_width + x_pu],
INTRA_DC, size_in_pus);
2151 for (j = 0; j < size_in_pus; j++)
2152 for (k = 0; k < size_in_pus; k++)
2158 int cb_size = 1 << log2_cb_size;
2160 int log2_min_cb_size =
s->ps.sps->log2_min_cb_size;
2161 int length = cb_size >> log2_min_cb_size;
2162 int min_cb_width =
s->ps.sps->min_cb_width;
2163 int x_cb = x0 >> log2_min_cb_size;
2164 int y_cb = y0 >> log2_min_cb_size;
2165 int idx = log2_cb_size - 2;
2166 int qp_block_mask = (1<<(
s->ps.sps->log2_ctb_size -
s->ps.pps->diff_cu_qp_delta_depth)) - 1;
2176 for (x = 0; x < 4; x++)
2178 if (
s->ps.pps->transquant_bypass_enable_flag) {
2188 x = y_cb * min_cb_width + x_cb;
2189 for (y = 0; y < length; y++) {
2190 memset(&
s->skip_flag[x], skip_flag, length);
2195 x = y_cb * min_cb_width + x_cb;
2196 for (y = 0; y < length; y++) {
2197 memset(&
s->skip_flag[x], 0, length);
2206 if (!
s->sh.disable_deblocking_filter_flag)
2214 log2_cb_size ==
s->ps.sps->log2_min_cb_size) {
2222 log2_cb_size >=
s->ps.sps->pcm.log2_min_pcm_cb_size &&
2223 log2_cb_size <= s->ps.sps->pcm.log2_max_pcm_cb_size) {
2229 if (
s->ps.sps->pcm.loop_filter_disable_flag)
2261 hls_prediction_unit(
s, x0 + cb_size / 4, y0, cb_size * 3 / 4, cb_size, log2_cb_size, 1, idx - 2);
2265 hls_prediction_unit(
s, x0 + cb_size * 3 / 4, y0, cb_size / 4, cb_size, log2_cb_size, 1, idx - 2);
2269 hls_prediction_unit(
s, x0 + cb_size / 2, y0, cb_size / 2, cb_size / 2, log2_cb_size, 1, idx - 1);
2270 hls_prediction_unit(
s, x0, y0 + cb_size / 2, cb_size / 2, cb_size / 2, log2_cb_size, 2, idx - 1);
2271 hls_prediction_unit(
s, x0 + cb_size / 2, y0 + cb_size / 2, cb_size / 2, cb_size / 2, log2_cb_size, 3, idx - 1);
2277 int rqt_root_cbf = 1;
2284 const static int cbf[2] = { 0 };
2287 s->ps.sps->max_transform_hierarchy_depth_inter;
2290 log2_cb_size, 0, 0, cbf, cbf);
2294 if (!
s->sh.disable_deblocking_filter_flag)
2303 x = y_cb * min_cb_width + x_cb;
2304 for (y = 0; y < length; y++) {
2305 memset(&
s->qp_y_tab[x], lc->
qp_y, length);
2309 if(((x0 + (1<<log2_cb_size)) & qp_block_mask) == 0 &&
2310 ((y0 + (1<<log2_cb_size)) & qp_block_mask) == 0) {
2320 int log2_cb_size,
int cb_depth)
2323 const int cb_size = 1 << log2_cb_size;
2328 if (x0 + cb_size <= s->ps.sps->width &&
2329 y0 + cb_size <= s->ps.sps->height &&
2330 log2_cb_size >
s->ps.sps->log2_min_cb_size) {
2333 split_cu = (log2_cb_size >
s->ps.sps->log2_min_cb_size);
2335 if (
s->ps.pps->cu_qp_delta_enabled_flag &&
2336 log2_cb_size >=
s->ps.sps->log2_ctb_size -
s->ps.pps->diff_cu_qp_delta_depth) {
2341 if (
s->sh.cu_chroma_qp_offset_enabled_flag &&
2342 log2_cb_size >=
s->ps.sps->log2_ctb_size -
s->ps.pps->diff_cu_chroma_qp_offset_depth) {
2347 int qp_block_mask = (1<<(
s->ps.sps->log2_ctb_size -
s->ps.pps->diff_cu_qp_delta_depth)) - 1;
2348 const int cb_size_split = cb_size >> 1;
2349 const int x1 = x0 + cb_size_split;
2350 const int y1 = y0 + cb_size_split;
2358 if (more_data && x1 < s->ps.sps->width) {
2363 if (more_data && y1 < s->ps.sps->height) {
2368 if (more_data && x1 < s->ps.sps->width &&
2369 y1 < s->ps.sps->height) {
2375 if(((x0 + (1<<log2_cb_size)) & qp_block_mask) == 0 &&
2376 ((y0 + (1<<log2_cb_size)) & qp_block_mask) == 0)
2380 return ((x1 + cb_size_split) <
s->ps.sps->width ||
2381 (y1 + cb_size_split) <
s->ps.sps->height);
2388 if ((!((x0 + cb_size) %
2389 (1 << (
s->ps.sps->log2_ctb_size))) ||
2390 (x0 + cb_size >=
s->ps.sps->width)) &&
2392 (1 << (
s->ps.sps->log2_ctb_size))) ||
2393 (y0 + cb_size >=
s->ps.sps->height))) {
2395 return !end_of_slice_flag;
2408 int ctb_size = 1 <<
s->ps.sps->log2_ctb_size;
2409 int ctb_addr_rs =
s->ps.pps->ctb_addr_ts_to_rs[ctb_addr_ts];
2410 int ctb_addr_in_slice = ctb_addr_rs -
s->sh.slice_addr;
2412 s->tab_slice_address[ctb_addr_rs] =
s->sh.slice_addr;
2414 if (
s->ps.pps->entropy_coding_sync_enabled_flag) {
2415 if (x_ctb == 0 && (y_ctb & (ctb_size - 1)) == 0)
2418 }
else if (
s->ps.pps->tiles_enabled_flag) {
2419 if (ctb_addr_ts &&
s->ps.pps->tile_id[ctb_addr_ts] !=
s->ps.pps->tile_id[ctb_addr_ts - 1]) {
2420 int idxX =
s->ps.pps->col_idxX[x_ctb >>
s->ps.sps->log2_ctb_size];
2421 lc->
end_of_tiles_x = x_ctb + (
s->ps.pps->column_width[idxX] <<
s->ps.sps->log2_ctb_size);
2431 if (
s->ps.pps->tiles_enabled_flag) {
2432 if (x_ctb > 0 &&
s->ps.pps->tile_id[ctb_addr_ts] !=
s->ps.pps->tile_id[
s->ps.pps->ctb_addr_rs_to_ts[ctb_addr_rs - 1]])
2434 if (x_ctb > 0 &&
s->tab_slice_address[ctb_addr_rs] !=
s->tab_slice_address[ctb_addr_rs - 1])
2436 if (y_ctb > 0 &&
s->ps.pps->tile_id[ctb_addr_ts] !=
s->ps.pps->tile_id[
s->ps.pps->ctb_addr_rs_to_ts[ctb_addr_rs -
s->ps.sps->ctb_width]])
2438 if (y_ctb > 0 &&
s->tab_slice_address[ctb_addr_rs] !=
s->tab_slice_address[ctb_addr_rs -
s->ps.sps->ctb_width])
2441 if (ctb_addr_in_slice <= 0)
2443 if (ctb_addr_in_slice < s->ps.sps->ctb_width)
2449 lc->
ctb_up_right_flag = ((y_ctb > 0) && (ctb_addr_in_slice+1 >=
s->ps.sps->ctb_width) && (
s->ps.pps->tile_id[ctb_addr_ts] ==
s->ps.pps->tile_id[
s->ps.pps->ctb_addr_rs_to_ts[ctb_addr_rs+1 -
s->ps.sps->ctb_width]]));
2450 lc->
ctb_up_left_flag = ((x_ctb > 0) && (y_ctb > 0) && (ctb_addr_in_slice-1 >=
s->ps.sps->ctb_width) && (
s->ps.pps->tile_id[ctb_addr_ts] ==
s->ps.pps->tile_id[
s->ps.pps->ctb_addr_rs_to_ts[ctb_addr_rs-1 -
s->ps.sps->ctb_width]]));
2456 int ctb_size = 1 <<
s->ps.sps->log2_ctb_size;
2460 int ctb_addr_ts =
s->ps.pps->ctb_addr_rs_to_ts[
s->sh.slice_ctb_addr_rs];
2463 if (!ctb_addr_ts &&
s->sh.dependent_slice_segment_flag) {
2468 if (
s->sh.dependent_slice_segment_flag) {
2469 int prev_rs =
s->ps.pps->ctb_addr_ts_to_rs[ctb_addr_ts - 1];
2470 if (
s->tab_slice_address[prev_rs] !=
s->sh.slice_addr) {
2476 while (more_data && ctb_addr_ts < s->ps.sps->ctb_size) {
2477 int ctb_addr_rs =
s->ps.pps->ctb_addr_ts_to_rs[ctb_addr_ts];
2479 x_ctb = (ctb_addr_rs % ((
s->ps.sps->width + ctb_size - 1) >>
s->ps.sps->log2_ctb_size)) <<
s->ps.sps->log2_ctb_size;
2480 y_ctb = (ctb_addr_rs / ((
s->ps.sps->width + ctb_size - 1) >>
s->ps.sps->log2_ctb_size)) <<
s->ps.sps->log2_ctb_size;
2485 s->tab_slice_address[ctb_addr_rs] = -1;
2489 hls_sao_param(
s, x_ctb >>
s->ps.sps->log2_ctb_size, y_ctb >>
s->ps.sps->log2_ctb_size);
2491 s->deblock[ctb_addr_rs].beta_offset =
s->sh.beta_offset;
2492 s->deblock[ctb_addr_rs].tc_offset =
s->sh.tc_offset;
2493 s->filter_slice_edges[ctb_addr_rs] =
s->sh.slice_loop_filter_across_slices_enabled_flag;
2496 if (more_data < 0) {
2497 s->tab_slice_address[ctb_addr_rs] = -1;
2507 if (x_ctb + ctb_size >=
s->ps.sps->width &&
2508 y_ctb + ctb_size >=
s->ps.sps->height)
2529 int ctb_size = 1<<
s1->ps.sps->log2_ctb_size;
2531 int *ctb_row_p = input_ctb_row;
2532 int ctb_row = ctb_row_p[job];
2533 int ctb_addr_rs =
s1->sh.slice_ctb_addr_rs + ctb_row * ((
s1->ps.sps->width + ctb_size - 1) >>
s1->ps.sps->log2_ctb_size);
2534 int ctb_addr_ts =
s1->ps.pps->ctb_addr_rs_to_ts[ctb_addr_rs];
2535 int thread = ctb_row %
s1->threads_number;
2538 s =
s1->sList[self_id];
2542 ret =
init_get_bits8(&lc->
gb,
s->data +
s->sh.offset[ctb_row - 1],
s->sh.size[ctb_row - 1]);
2548 while(more_data && ctb_addr_ts < s->ps.sps->ctb_size) {
2549 int x_ctb = (ctb_addr_rs %
s->ps.sps->ctb_width) <<
s->ps.sps->log2_ctb_size;
2550 int y_ctb = (ctb_addr_rs /
s->ps.sps->ctb_width) <<
s->ps.sps->log2_ctb_size;
2564 hls_sao_param(
s, x_ctb >>
s->ps.sps->log2_ctb_size, y_ctb >>
s->ps.sps->log2_ctb_size);
2567 if (more_data < 0) {
2578 if (!more_data && (x_ctb+ctb_size) <
s->ps.sps->width && ctb_row !=
s->sh.num_entry_point_offsets) {
2584 if ((x_ctb+ctb_size) >=
s->ps.sps->width && (y_ctb+ctb_size) >=
s->ps.sps->height ) {
2589 ctb_addr_rs =
s->ps.pps->ctb_addr_ts_to_rs[ctb_addr_ts];
2592 if(x_ctb >=
s->ps.sps->width) {
2600 s->tab_slice_address[ctb_addr_rs] = -1;
2609 int length = nal->
size;
2611 int *ret =
av_malloc_array(
s->sh.num_entry_point_offsets + 1,
sizeof(
int));
2614 int64_t startheader, cmpt = 0;
2623 if (
s->sh.slice_ctb_addr_rs +
s->sh.num_entry_point_offsets *
s->ps.sps->ctb_width >=
s->ps.sps->ctb_width *
s->ps.sps->ctb_height) {
2625 s->sh.slice_ctb_addr_rs,
s->sh.num_entry_point_offsets,
2626 s->ps.sps->ctb_width,
s->ps.sps->ctb_height
2634 for (
i = 1;
i <
s->threads_number;
i++) {
2635 if (
s->sList[
i] &&
s->HEVClcList[
i])
2641 if (!
s->sList[
i] || !
s->HEVClcList[
i]) {
2646 s->sList[
i]->HEVClc =
s->HEVClcList[
i];
2651 for (j = 0, cmpt = 0, startheader =
offset +
s->sh.entry_point_offset[0]; j < nal->skipped_bytes; j++) {
2658 for (
i = 1;
i <
s->sh.num_entry_point_offsets;
i++) {
2659 offset += (
s->sh.entry_point_offset[
i - 1] - cmpt);
2660 for (j = 0, cmpt = 0, startheader =
offset
2661 +
s->sh.entry_point_offset[
i]; j < nal->skipped_bytes; j++) {
2667 s->sh.size[
i - 1] =
s->sh.entry_point_offset[
i] - cmpt;
2671 if (
s->sh.num_entry_point_offsets != 0) {
2672 offset +=
s->sh.entry_point_offset[
s->sh.num_entry_point_offsets - 1] - cmpt;
2678 s->sh.size[
s->sh.num_entry_point_offsets - 1] = length -
offset;
2679 s->sh.offset[
s->sh.num_entry_point_offsets - 1] =
offset;
2684 for (
i = 1;
i <
s->threads_number;
i++) {
2685 s->sList[
i]->HEVClc->first_qp_group = 1;
2686 s->sList[
i]->HEVClc->qp_y =
s->sList[0]->HEVClc->qp_y;
2688 s->sList[
i]->HEVClc =
s->HEVClcList[
i];
2694 for (
i = 0;
i <=
s->sh.num_entry_point_offsets;
i++) {
2699 if (
s->ps.pps->entropy_coding_sync_enabled_flag)
2702 for (
i = 0;
i <=
s->sh.num_entry_point_offsets;
i++)
2714 if (
s->sei.frame_packing.present &&
2715 s->sei.frame_packing.arrangement_type >= 3 &&
2716 s->sei.frame_packing.arrangement_type <= 5 &&
2717 s->sei.frame_packing.content_interpretation_type > 0 &&
2718 s->sei.frame_packing.content_interpretation_type < 3) {
2723 switch (
s->sei.frame_packing.arrangement_type) {
2725 if (
s->sei.frame_packing.quincunx_subsampling)
2738 if (
s->sei.frame_packing.content_interpretation_type == 2)
2741 if (
s->sei.frame_packing.arrangement_type == 5) {
2742 if (
s->sei.frame_packing.current_frame_is_frame0_flag)
2749 if (
s->sei.display_orientation.present &&
2750 (
s->sei.display_orientation.anticlockwise_rotation ||
2751 s->sei.display_orientation.hflip ||
s->sei.display_orientation.vflip)) {
2752 double angle =
s->sei.display_orientation.anticlockwise_rotation * 360 / (double) (1 << 16);
2761 s->sei.display_orientation.hflip,
2762 s->sei.display_orientation.vflip);
2767 if (
s->sei.mastering_display.present > 0 &&
2769 s->sei.mastering_display.present--;
2771 if (
s->sei.mastering_display.present) {
2773 const int mapping[3] = {2, 0, 1};
2774 const int chroma_den = 50000;
2775 const int luma_den = 10000;
2782 for (
i = 0;
i < 3;
i++) {
2783 const int j = mapping[
i];
2789 metadata->
white_point[0].
num =
s->sei.mastering_display.white_point[0];
2791 metadata->
white_point[1].
num =
s->sei.mastering_display.white_point[1];
2803 "r(%5.4f,%5.4f) g(%5.4f,%5.4f) b(%5.4f %5.4f) wp(%5.4f, %5.4f)\n",
2812 "min_luminance=%f, max_luminance=%f\n",
2817 if (
s->sei.content_light.present > 0 &&
2819 s->sei.content_light.present--;
2821 if (
s->sei.content_light.present) {
2826 metadata->
MaxCLL =
s->sei.content_light.max_content_light_level;
2827 metadata->
MaxFALL =
s->sei.content_light.max_pic_average_light_level;
2834 if (
s->sei.a53_caption.buf_ref) {
2843 for (
int i = 0;
i <
s->sei.unregistered.nb_buf_ref;
i++) {
2855 s->sei.unregistered.nb_buf_ref = 0;
2857 if (
s->sei.timecode.present) {
2861 sizeof(uint32_t) * 4);
2865 tc_sd = (uint32_t*)tcside->
data;
2866 tc_sd[0] =
s->sei.timecode.num_clock_ts;
2868 for (
int i = 0;
i < tc_sd[0];
i++) {
2869 int drop =
s->sei.timecode.cnt_dropped_flag[
i];
2870 int hh =
s->sei.timecode.hours_value[
i];
2871 int mm =
s->sei.timecode.minutes_value[
i];
2872 int ss =
s->sei.timecode.seconds_value[
i];
2873 int ff =
s->sei.timecode.n_frames[
i];
2880 s->sei.timecode.num_clock_ts = 0;
2883 if (
s->sei.dynamic_hdr_plus.info) {
2900 int pic_size_in_ctb = ((
s->ps.sps->width >>
s->ps.sps->log2_min_cb_size) + 1) *
2901 ((
s->ps.sps->height >>
s->ps.sps->log2_min_cb_size) + 1);
2904 memset(
s->horizontal_bs, 0,
s->bs_width *
s->bs_height);
2905 memset(
s->vertical_bs, 0,
s->bs_width *
s->bs_height);
2906 memset(
s->cbf_luma, 0,
s->ps.sps->min_tb_width *
s->ps.sps->min_tb_height);
2907 memset(
s->is_pcm, 0, (
s->ps.sps->min_pu_width + 1) * (
s->ps.sps->min_pu_height + 1));
2908 memset(
s->tab_slice_address, -1, pic_size_in_ctb *
sizeof(*
s->tab_slice_address));
2911 s->first_nal_type =
s->nal_unit_type;
2915 if (
s->ps.pps->tiles_enabled_flag)
2916 lc->
end_of_tiles_x =
s->ps.pps->column_width[0] <<
s->ps.sps->log2_ctb_size;
2934 s->frame->pict_type = 3 -
s->sh.slice_type;
2944 if (!
s->avctx->hwaccel)
2960 int ctb_addr_ts, ret;
2963 s->nal_unit_type = nal->
type;
2966 switch (
s->nal_unit_type) {
2968 if (
s->avctx->hwaccel &&
s->avctx->hwaccel->decode_params) {
2969 ret =
s->avctx->hwaccel->decode_params(
s->avctx,
2981 if (
s->avctx->hwaccel &&
s->avctx->hwaccel->decode_params) {
2982 ret =
s->avctx->hwaccel->decode_params(
s->avctx,
2990 s->apply_defdispwin);
2995 if (
s->avctx->hwaccel &&
s->avctx->hwaccel->decode_params) {
2996 ret =
s->avctx->hwaccel->decode_params(
s->avctx,
3009 if (
s->avctx->hwaccel &&
s->avctx->hwaccel->decode_params) {
3010 ret =
s->avctx->hwaccel->decode_params(
s->avctx,
3053 if (
s->sh.first_slice_in_pic_flag) {
3054 if (
s->max_ra == INT_MAX) {
3059 s->max_ra = INT_MIN;
3064 s->poc <=
s->max_ra) {
3069 s->max_ra = INT_MIN;
3076 }
else if (!
s->ref) {
3081 if (
s->nal_unit_type !=
s->first_nal_type) {
3083 "Non-matching NAL types of the VCL NALUs: %d %d\n",
3084 s->first_nal_type,
s->nal_unit_type);
3088 if (!
s->sh.dependent_slice_segment_flag &&
3093 "Error constructing the reference lists for the current slice.\n");
3098 if (
s->sh.first_slice_in_pic_flag &&
s->avctx->hwaccel) {
3099 ret =
s->avctx->hwaccel->start_frame(
s->avctx,
NULL, 0);
3104 if (
s->avctx->hwaccel) {
3109 if (
s->threads_number > 1 &&
s->sh.num_entry_point_offsets > 0)
3113 if (ctb_addr_ts >= (
s->ps.sps->ctb_width *
s->ps.sps->ctb_height)) {
3117 if (ctb_addr_ts < 0) {
3125 s->seq_decode = (
s->seq_decode + 1) & 0xff;
3126 s->max_ra = INT_MAX;
3133 "Skipping NAL unit %d\n",
s->nal_unit_type);
3146 int eos_at_start = 1;
3149 s->last_eos =
s->eos;
3156 s->nal_length_size,
s->avctx->codec_id, 1, 0);
3159 "Error splitting the input into NAL units.\n");
3163 for (
i = 0;
i <
s->pkt.nb_nals;
i++) {
3177 for (
i = 0;
i <
s->pkt.nb_nals;
i++) {
3186 if (ret >= 0 &&
s->overlap > 2)
3190 "Error parsing NAL unit #%d.\n",
i);
3205 for (
i = 0;
i < 16;
i++)
3218 pixel_shift =
desc->comp[0].depth > 8;
3226 if (pixel_shift && !
s->checksum_buf) {
3230 if (!
s->checksum_buf)
3236 int width =
s->avctx->coded_width;
3237 int height =
s->avctx->coded_height;
3243 for (j = 0; j <
h; j++) {
3247 s->bdsp.bswap16_buf((uint16_t *)
s->checksum_buf,
3248 (
const uint16_t *)
src,
w);
3249 src =
s->checksum_buf;
3256 if (!memcmp(
md5,
s->sei.picture_hash.md5[
i], 16)) {
3280 &
s->nal_length_size,
s->avctx->err_recognition,
3281 s->apply_defdispwin,
s->avctx);
3287 if (first &&
s->ps.sps_list[
i]) {
3320 &new_extradata_size);
3321 if (new_extradata && new_extradata_size > 0) {
3335 "hardware accelerator failed to decode picture\n");
3342 s->sei.picture_hash.is_md5) {
3350 s->sei.picture_hash.is_md5 = 0;
3352 if (
s->is_decoded) {
3357 if (
s->output_frame->buf[0]) {
3392 if (
src->hwaccel_picture_private) {
3416 for (
i = 0;
i < 3;
i++) {
3433 if (
s->HEVClcList &&
s->sList) {
3434 for (
i = 1;
i <
s->threads_number;
i++) {
3460 if (!
s->HEVClc || !
s->HEVClcList || !
s->sList)
3462 s->HEVClcList[0] =
s->HEVClc;
3466 if (!
s->cabac_state)
3470 if (!
s->output_frame)
3475 if (!
s->DPB[
i].frame)
3477 s->DPB[
i].tf.f =
s->DPB[
i].frame;
3480 s->max_ra = INT_MAX;
3488 s->context_initialized = 1;
3508 if (!
s->context_initialized) {
3516 if (
s0->DPB[
i].frame->buf[0]) {
3523 if (
s->ps.sps !=
s0->ps.sps)
3543 if (
s->ps.sps !=
s0->ps.sps)
3547 s->seq_decode =
s0->seq_decode;
3548 s->seq_output =
s0->seq_output;
3549 s->pocTid0 =
s0->pocTid0;
3550 s->max_ra =
s0->max_ra;
3552 s->no_rasl_output_flag =
s0->no_rasl_output_flag;
3554 s->is_nalff =
s0->is_nalff;
3555 s->nal_length_size =
s0->nal_length_size;
3557 s->threads_number =
s0->threads_number;
3558 s->threads_type =
s0->threads_type;
3561 s->seq_decode = (
s->seq_decode + 1) & 0xff;
3562 s->max_ra = INT_MAX;
3569 for (
i = 0;
i <
s->sei.unregistered.nb_buf_ref;
i++)
3571 s->sei.unregistered.nb_buf_ref = 0;
3573 if (
s0->sei.unregistered.nb_buf_ref) {
3575 s0->sei.unregistered.nb_buf_ref,
3576 sizeof(*
s->sei.unregistered.buf_ref));
3580 for (
i = 0;
i <
s0->sei.unregistered.nb_buf_ref;
i++) {
3582 if (!
s->sei.unregistered.buf_ref[
i])
3584 s->sei.unregistered.nb_buf_ref++;
3592 s->sei.frame_packing =
s0->sei.frame_packing;
3593 s->sei.display_orientation =
s0->sei.display_orientation;
3594 s->sei.mastering_display =
s0->sei.mastering_display;
3595 s->sei.content_light =
s0->sei.content_light;
3596 s->sei.alternative_transfer =
s0->sei.alternative_transfer;
3614 s->threads_number = 1;
3625 s->enable_parallel_tiles = 0;
3626 s->sei.picture_timing.picture_struct = 0;
3648 s->max_ra = INT_MAX;
3652 #define OFFSET(x) offsetof(HEVCContext, x)
3653 #define PAR (AV_OPT_FLAG_DECODING_PARAM | AV_OPT_FLAG_VIDEO_PARAM)
3656 {
"apply_defdispwin",
"Apply default display window from VUI",
OFFSET(apply_defdispwin),
3658 {
"strict-displaywin",
"stricly apply default display window size",
OFFSET(apply_defdispwin),
3688 #if CONFIG_HEVC_DXVA2_HWACCEL
3691 #if CONFIG_HEVC_D3D11VA_HWACCEL
3694 #if CONFIG_HEVC_D3D11VA2_HWACCEL
3697 #if CONFIG_HEVC_NVDEC_HWACCEL
3700 #if CONFIG_HEVC_VAAPI_HWACCEL
3703 #if CONFIG_HEVC_VDPAU_HWACCEL
3706 #if CONFIG_HEVC_VIDEOTOOLBOX_HWACCEL
static void flush(AVCodecContext *avctx)
static double val(void *priv, double ch)
static char * split(char *message, char delim)
Macro definitions for various function/variable attributes.
#define FF_CODEC_PROPERTY_CLOSED_CAPTIONS
#define FF_THREAD_FRAME
Decode more than one frame at once.
#define AV_EF_CRCCHECK
Verify checksums embedded in the bitstream (could be of either encoded or decoded data,...
#define AV_EF_EXPLODE
abort decoding on minor error detection
#define FF_THREAD_SLICE
Decode more than one part of a single frame at once.
uint8_t * av_packet_get_side_data(const AVPacket *pkt, enum AVPacketSideDataType type, buffer_size_t *size)
static av_cold int init(AVCodecContext *avctx)
int ff_init_cabac_decoder(CABACContext *c, const uint8_t *buf, int buf_size)
Context Adaptive Binary Arithmetic Coder inline functions.
static av_unused const uint8_t * skip_bytes(CABACContext *c, int n)
Skip n bytes and reset the decoder.
static int FUNC() sps(CodedBitstreamContext *ctx, RWContext *rw, H264RawSPS *current)
static int FUNC() vps(CodedBitstreamContext *ctx, RWContext *rw, H265RawVPS *current)
#define ss(width, name, subs,...)
common internal and external API header
#define FFSWAP(type, a, b)
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame, FILE *outfile)
static enum AVPixelFormat pix_fmt
#define atomic_store(object, desired)
#define atomic_load(object)
#define atomic_init(obj, value)
static unsigned int get_bits_long(GetBitContext *s, int n)
Read 0-32 bits.
static int get_bits_left(GetBitContext *gb)
static unsigned int get_bits1(GetBitContext *s)
static void skip_bits(GetBitContext *s, int n)
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
static av_always_inline int get_bitsz(GetBitContext *s, int n)
Read 0-25 bits.
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
static int get_se_golomb(GetBitContext *gb)
read signed exp golomb code.
static unsigned get_ue_golomb_long(GetBitContext *gb)
Read an unsigned Exp-Golomb code in the range 0 to UINT32_MAX-1.
#define AV_CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
#define AV_CODEC_CAP_SLICE_THREADS
Codec supports slice-based (or partition-based) multithreading.
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
@ AVDISCARD_ALL
discard all
@ AVDISCARD_NONKEY
discard all frames except keyframes
@ AVDISCARD_BIDIR
discard all bidirectional frames
@ AVDISCARD_NONINTRA
discard all non intra frames
@ AVDISCARD_NONREF
discard all non reference
@ AV_PKT_DATA_NEW_EXTRADATA
The AV_PKT_DATA_NEW_EXTRADATA is used to notify the codec or the format that the extradata buffer was...
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it.
AVBufferRef * av_buffer_allocz(buffer_size_t size)
Same as av_buffer_alloc(), except the returned buffer will be initialized to zero.
int av_buffer_replace(AVBufferRef **pdst, AVBufferRef *src)
Ensure dst refers to the same data as src.
AVBufferRef * av_buffer_ref(AVBufferRef *buf)
Create a new reference to an AVBuffer.
AVBufferPool * av_buffer_pool_init(buffer_size_t size, AVBufferRef *(*alloc)(buffer_size_t size))
Allocate and initialize a buffer pool.
void av_buffer_pool_uninit(AVBufferPool **ppool)
Mark the pool as being available for freeing.
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
void av_frame_move_ref(AVFrame *dst, AVFrame *src)
Move everything contained in src to dst and reset src.
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
AVFrameSideData * av_frame_new_side_data(AVFrame *frame, enum AVFrameSideDataType type, buffer_size_t size)
Add a new side data to a frame.
AVFrameSideData * av_frame_new_side_data_from_buf(AVFrame *frame, enum AVFrameSideDataType type, AVBufferRef *buf)
Add a new side data to a frame from an existing AVBufferRef.
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
@ AV_FRAME_DATA_DISPLAYMATRIX
This side data contains a 3x3 transformation matrix describing an affine transformation that needs to...
@ AV_FRAME_DATA_A53_CC
ATSC A53 Part 4 Closed Captions.
@ AV_FRAME_DATA_SEI_UNREGISTERED
User data unregistered metadata associated with a video frame.
@ AV_FRAME_DATA_DYNAMIC_HDR_PLUS
HDR dynamic metadata associated with a video frame.
@ AV_FRAME_DATA_S12M_TIMECODE
Timecode which conforms to SMPTE ST 12-1.
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
#define AV_LOG_WARNING
Something somehow does not look correct.
#define AV_LOG_INFO
Standard information.
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
const char * av_default_item_name(void *ptr)
Return the context name.
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
static double av_q2d(AVRational a)
Convert an AVRational to a double.
void av_md5_init(AVMD5 *ctx)
Initialize MD5 hashing.
struct AVMD5 * av_md5_alloc(void)
Allocate an AVMD5 context.
void av_md5_final(AVMD5 *ctx, uint8_t *dst)
Finish hashing and output digest value.
void av_md5_update(AVMD5 *ctx, const uint8_t *src, int len)
Update hash value.
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
int av_reallocp_array(void *ptr, size_t nmemb, size_t size)
Allocate, reallocate, or free an array through a pointer to a pointer.
void * av_mallocz_array(size_t nmemb, size_t size)
Allocate a memory block for an array with av_mallocz().
#define LIBAVUTIL_VERSION_INT
void av_display_rotation_set(int32_t matrix[9], double angle)
Initialize a transformation matrix describing a pure counterclockwise rotation by the specified angle...
void av_display_matrix_flip(int32_t matrix[9], int hflip, int vflip)
Flip the input matrix horizontally and/or vertically.
#define AV_STEREO3D_FLAG_INVERT
Inverted views, Right/Bottom represents the left view.
AVStereo3D * av_stereo3d_create_side_data(AVFrame *frame)
Allocate a complete AVFrameSideData and add it to the frame.
@ AV_STEREO3D_TOPBOTTOM
Views are on top of each other.
@ AV_STEREO3D_SIDEBYSIDE_QUINCUNX
Views are next to each other, but when upscaling apply a checkerboard pattern.
@ AV_STEREO3D_FRAMESEQUENCE
Views are alternated temporally.
@ AV_STEREO3D_SIDEBYSIDE
Views are next to each other.
@ AV_STEREO3D_VIEW_RIGHT
Frame contains only the right view.
@ AV_STEREO3D_VIEW_LEFT
Frame contains only the left view.
int ff_h2645_packet_split(H2645Packet *pkt, const uint8_t *buf, int length, void *logctx, int is_nalff, int nal_length_size, enum AVCodecID codec_id, int small_padding, int use_ref)
Split an input packet into NAL units.
void ff_h2645_packet_uninit(H2645Packet *pkt)
Free all the allocated memory in the packet.
int ff_hevc_inter_pred_idc_decode(HEVCContext *s, int nPbW, int nPbH)
int ff_hevc_intra_chroma_pred_mode_decode(HEVCContext *s)
int ff_hevc_cu_transquant_bypass_flag_decode(HEVCContext *s)
int ff_hevc_sao_offset_abs_decode(HEVCContext *s)
int ff_hevc_split_coding_unit_flag_decode(HEVCContext *s, int ct_depth, int x0, int y0)
int ff_hevc_res_scale_sign_flag(HEVCContext *s, int idx)
int ff_hevc_rem_intra_luma_pred_mode_decode(HEVCContext *s)
int ff_hevc_no_residual_syntax_flag_decode(HEVCContext *s)
int ff_hevc_mvp_lx_flag_decode(HEVCContext *s)
int ff_hevc_pred_mode_decode(HEVCContext *s)
int ff_hevc_prev_intra_luma_pred_flag_decode(HEVCContext *s)
int ff_hevc_cu_qp_delta_abs(HEVCContext *s)
void ff_hevc_hls_residual_coding(HEVCContext *s, int x0, int y0, int log2_trafo_size, enum ScanType scan_idx, int c_idx)
int ff_hevc_split_transform_flag_decode(HEVCContext *s, int log2_trafo_size)
int ff_hevc_cu_chroma_qp_offset_flag(HEVCContext *s)
int ff_hevc_log2_res_scale_abs(HEVCContext *s, int idx)
int ff_hevc_cbf_luma_decode(HEVCContext *s, int trafo_depth)
int ff_hevc_part_mode_decode(HEVCContext *s, int log2_cb_size)
int ff_hevc_cu_chroma_qp_offset_idx(HEVCContext *s)
void ff_hevc_save_states(HEVCContext *s, int ctb_addr_ts)
int ff_hevc_cabac_init(HEVCContext *s, int ctb_addr_ts, int thread)
int ff_hevc_sao_band_position_decode(HEVCContext *s)
int ff_hevc_ref_idx_lx_decode(HEVCContext *s, int num_ref_idx_lx)
int ff_hevc_skip_flag_decode(HEVCContext *s, int x0, int y0, int x_cb, int y_cb)
int ff_hevc_sao_type_idx_decode(HEVCContext *s)
int ff_hevc_sao_offset_sign_decode(HEVCContext *s)
int ff_hevc_cbf_cb_cr_decode(HEVCContext *s, int trafo_depth)
int ff_hevc_end_of_slice_flag_decode(HEVCContext *s)
int ff_hevc_sao_merge_flag_decode(HEVCContext *s)
int ff_hevc_merge_idx_decode(HEVCContext *s)
int ff_hevc_merge_flag_decode(HEVCContext *s)
int ff_hevc_sao_eo_class_decode(HEVCContext *s)
int ff_hevc_pcm_flag_decode(HEVCContext *s)
int ff_hevc_mpm_idx_decode(HEVCContext *s)
void ff_hevc_hls_mvd_coding(HEVCContext *s, int x0, int y0, int log2_cb_size)
int ff_hevc_cu_qp_delta_sign_flag(HEVCContext *s)
void ff_hevc_deblocking_boundary_strengths(HEVCContext *s, int x0, int y0, int log2_trafo_size)
void ff_hevc_set_qPy(HEVCContext *s, int xBase, int yBase, int log2_cb_size)
void ff_hevc_hls_filters(HEVCContext *s, int x_ctb, int y_ctb, int ctb_size)
void ff_hevc_hls_filter(HEVCContext *s, int x, int y, int ctb_size)
void ff_hevc_set_neighbour_available(HEVCContext *s, int x0, int y0, int nPbW, int nPbH)
void ff_hevc_luma_mv_merge_mode(HEVCContext *s, int x0, int y0, int nPbW, int nPbH, int log2_cb_size, int part_idx, int merge_idx, MvField *mv)
void ff_hevc_luma_mv_mvp_mode(HEVCContext *s, int x0, int y0, int nPbW, int nPbH, int log2_cb_size, int part_idx, int merge_idx, MvField *mv, int mvp_lx_flag, int LX)
int ff_hevc_decode_extradata(const uint8_t *data, int size, HEVCParamSets *ps, HEVCSEI *sei, int *is_nalff, int *nal_length_size, int err_recognition, int apply_defdispwin, void *logctx)
int ff_hevc_decode_nal_pps(GetBitContext *gb, AVCodecContext *avctx, HEVCParamSets *ps)
void ff_hevc_ps_uninit(HEVCParamSets *ps)
int ff_hevc_decode_nal_vps(GetBitContext *gb, AVCodecContext *avctx, HEVCParamSets *ps)
int ff_hevc_decode_short_term_rps(GetBitContext *gb, AVCodecContext *avctx, ShortTermRPS *rps, const HEVCSPS *sps, int is_slice_header)
int ff_hevc_decode_nal_sps(GetBitContext *gb, AVCodecContext *avctx, HEVCParamSets *ps, int apply_defdispwin)
int ff_hevc_compute_poc(const HEVCSPS *sps, int pocTid0, int poc_lsb, int nal_unit_type)
Compute POC of the current frame and return it.
int ff_hevc_output_frame(HEVCContext *s, AVFrame *out, int flush)
Find next frame in output order and put a reference to it in frame.
int ff_hevc_frame_nb_refs(const HEVCContext *s)
Get the number of candidate references for the current frame.
void ff_hevc_flush_dpb(HEVCContext *s)
Drop all frames currently in DPB.
int ff_hevc_set_new_ref(HEVCContext *s, AVFrame **frame, int poc)
void ff_hevc_unref_frame(HEVCContext *s, HEVCFrame *frame, int flags)
void ff_hevc_bump_frame(HEVCContext *s)
int ff_hevc_frame_rps(HEVCContext *s)
Construct the reference picture sets for the current frame.
void ff_hevc_clear_refs(HEVCContext *s)
Mark all frames in DPB as unused for reference.
int ff_hevc_slice_rpl(HEVCContext *s)
Construct the reference picture list(s) for the current slice.
int ff_hevc_decode_nal_sei(GetBitContext *gb, void *logctx, HEVCSEI *s, const HEVCParamSets *ps, int type)
void ff_hevc_reset_sei(HEVCSEI *s)
Reset SEI values that are stored on the Context.
#define BOUNDARY_UPPER_SLICE
#define BOUNDARY_LEFT_TILE
#define QPEL_EXTRA_BEFORE
static av_always_inline int ff_hevc_nal_is_nonref(enum HEVCNALUnitType type)
#define EPEL_EXTRA_BEFORE
#define SAMPLE_CTB(tab, x, y)
#define BOUNDARY_UPPER_TILE
#define EDGE_EMU_BUFFER_STRIDE
#define BOUNDARY_LEFT_SLICE
void ff_hevc_dsp_init(HEVCDSPContext *hevcdsp, int bit_depth)
void ff_hevc_pred_init(HEVCPredContext *hpc, int bit_depth)
#define HWACCEL_DXVA2(codec)
#define HWACCEL_VDPAU(codec)
#define HWACCEL_NVDEC(codec)
#define HWACCEL_VAAPI(codec)
#define HWACCEL_D3D11VA(codec)
#define HWACCEL_D3D11VA2(codec)
static const int8_t mv[256][2]
av_cold void ff_bswapdsp_init(BswapDSPContext *c)
static av_cold int hevc_init_context(AVCodecContext *avctx)
static int hls_transform_unit(HEVCContext *s, int x0, int y0, int xBase, int yBase, int cb_xBase, int cb_yBase, int log2_cb_size, int log2_trafo_size, int blk_idx, int cbf_luma, int *cbf_cb, int *cbf_cr)
static void set_deblocking_bypass(HEVCContext *s, int x0, int y0, int log2_cb_size)
static void chroma_mc_bi(HEVCContext *s, uint8_t *dst0, ptrdiff_t dststride, AVFrame *ref0, AVFrame *ref1, int x_off, int y_off, int block_w, int block_h, struct MvField *current_mv, int cidx)
8.5.3.2.2.2 Chroma sample bidirectional interpolation process
static int decode_nal_units(HEVCContext *s, const uint8_t *buf, int length)
static av_cold int hevc_decode_init(AVCodecContext *avctx)
static void luma_mc_bi(HEVCContext *s, uint8_t *dst, ptrdiff_t dststride, AVFrame *ref0, const Mv *mv0, int x_off, int y_off, int block_w, int block_h, AVFrame *ref1, const Mv *mv1, struct MvField *current_mv)
8.5.3.2.2.1 Luma sample bidirectional interpolation process
static const AVClass hevc_decoder_class
static const AVOption options[]
static int hevc_frame_start(HEVCContext *s)
static int hevc_ref_frame(HEVCContext *s, HEVCFrame *dst, HEVCFrame *src)
static av_cold int hevc_decode_free(AVCodecContext *avctx)
static int hevc_decode_frame(AVCodecContext *avctx, void *data, int *got_output, AVPacket *avpkt)
static int pic_arrays_init(HEVCContext *s, const HEVCSPS *sps)
static int hls_slice_header(HEVCContext *s)
static void luma_mc_uni(HEVCContext *s, uint8_t *dst, ptrdiff_t dststride, AVFrame *ref, const Mv *mv, int x_off, int y_off, int block_w, int block_h, int luma_weight, int luma_offset)
8.5.3.2.2.1 Luma sample unidirectional interpolation process
static int hevc_decode_extradata(HEVCContext *s, uint8_t *buf, int length, int first)
static void pic_arrays_free(HEVCContext *s)
NOTE: Each function hls_foo correspond to the function foo in the specification (HLS stands for High ...
static int export_stream_params_from_sei(HEVCContext *s)
const uint8_t ff_hevc_pel_weight[65]
static int pred_weight_table(HEVCContext *s, GetBitContext *gb)
static int hls_pcm_sample(HEVCContext *s, int x0, int y0, int log2_cb_size)
static void chroma_mc_uni(HEVCContext *s, uint8_t *dst0, ptrdiff_t dststride, uint8_t *src0, ptrdiff_t srcstride, int reflist, int x_off, int y_off, int block_w, int block_h, struct MvField *current_mv, int chroma_weight, int chroma_offset)
8.5.3.2.2.2 Chroma sample uniprediction interpolation process
#define SET_SAO(elem, value)
static int hls_coding_unit(HEVCContext *s, int x0, int y0, int log2_cb_size)
static int hls_coding_quadtree(HEVCContext *s, int x0, int y0, int log2_cb_size, int cb_depth)
static int hls_decode_entry_wpp(AVCodecContext *avctxt, void *input_ctb_row, int job, int self_id)
static int hls_decode_entry(AVCodecContext *avctxt, void *isFilterThread)
static void hevc_luma_mv_mvp_mode(HEVCContext *s, int x0, int y0, int nPbW, int nPbH, int log2_cb_size, int part_idx, int merge_idx, MvField *mv)
static int verify_md5(HEVCContext *s, AVFrame *frame)
static void print_md5(void *log_ctx, int level, uint8_t md5[16])
static int set_sps(HEVCContext *s, const HEVCSPS *sps, enum AVPixelFormat pix_fmt)
static int luma_intra_pred_mode(HEVCContext *s, int x0, int y0, int pu_size, int prev_intra_luma_pred_flag)
8.4.1
static int hls_slice_data(HEVCContext *s)
static void intra_prediction_unit(HEVCContext *s, int x0, int y0, int log2_cb_size)
static int set_side_data(HEVCContext *s)
static void hls_prediction_unit(HEVCContext *s, int x0, int y0, int nPbW, int nPbH, int log2_cb_size, int partIdx, int idx)
#define SUBDIVIDE(x, y, idx)
static av_always_inline void set_ct_depth(HEVCContext *s, int x0, int y0, int log2_cb_size, int ct_depth)
static void hls_decode_neighbour(HEVCContext *s, int x_ctb, int y_ctb, int ctb_addr_ts)
static int hls_cross_component_pred(HEVCContext *s, int idx)
static void hevc_decode_flush(AVCodecContext *avctx)
static void hls_sao_param(HEVCContext *s, int rx, int ry)
static int decode_nal_unit(HEVCContext *s, const H2645NAL *nal)
static const uint8_t tab_mode_idx[]
static int hls_slice_data_wpp(HEVCContext *s, const H2645NAL *nal)
static int hls_transform_tree(HEVCContext *s, int x0, int y0, int xBase, int yBase, int cb_xBase, int cb_yBase, int log2_cb_size, int log2_trafo_size, int trafo_depth, int blk_idx, const int *base_cbf_cb, const int *base_cbf_cr)
static int decode_lt_rps(HEVCContext *s, LongTermRPS *rps, GetBitContext *gb)
static void hevc_await_progress(HEVCContext *s, HEVCFrame *ref, const Mv *mv, int y0, int height)
static void export_stream_params(HEVCContext *s, const HEVCSPS *sps)
static void intra_prediction_unit_default_value(HEVCContext *s, int x0, int y0, int log2_cb_size)
static enum AVPixelFormat get_format(HEVCContext *s, const HEVCSPS *sps)
#define FF_CODEC_CAP_ALLOCATE_PROGRESS
#define FF_CODEC_CAP_EXPORTS_CROPPING
The decoder sets the cropping fields in the output frames manually.
int ff_set_sar(AVCodecContext *avctx, AVRational sar)
Check that the provided sample aspect ratio is valid and set it on the codec context.
#define FF_CODEC_CAP_INIT_THREADSAFE
The codec does not modify any global variables in the init function, allowing to call the init functi...
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
int ff_thread_ref_frame(ThreadFrame *dst, const ThreadFrame *src)
av_cold void ff_videodsp_init(VideoDSPContext *ctx, int bpc)
common internal API header
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
#define ONLY_IF_THREADS_ENABLED(x)
Define a function with only the non-default version specified.
static enum AVPixelFormat pix_fmts[]
Public header for MD5 hash function implementation.
const char * av_color_transfer_name(enum AVColorTransferCharacteristic transfer)
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
#define AV_PIX_FMT_YUV444P12
@ AVCHROMA_LOC_LEFT
MPEG-2/4 4:2:0, H.264 default for 4:2:0.
@ AVCHROMA_LOC_UNSPECIFIED
#define AV_PIX_FMT_YUV420P10
@ AVCOL_RANGE_MPEG
Narrow or limited range content.
@ AVCOL_RANGE_JPEG
Full range content.
#define AV_PIX_FMT_YUV420P12
AVPixelFormat
Pixel format.
@ AV_PIX_FMT_VIDEOTOOLBOX
hardware decoding through Videotoolbox
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
@ AV_PIX_FMT_DXVA2_VLD
HW decoding through DXVA2, Picture.data[3] contains a LPDIRECT3DSURFACE9 pointer.
@ AV_PIX_FMT_CUDA
HW acceleration through CUDA.
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
@ AV_PIX_FMT_D3D11
Hardware surfaces for Direct3D11.
@ AV_PIX_FMT_D3D11VA_VLD
HW decoding through Direct3D11 via old API, Picture.data[3] contains a ID3D11VideoDecoderOutputView p...
@ AV_PIX_FMT_YUV422P10LE
planar YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian
@ AV_PIX_FMT_YUVJ420P
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
@ AV_PIX_FMT_VDPAU
HW acceleration through VDPAU, Picture.data[3] contains a VdpVideoSurface.
#define AV_PIX_FMT_YUV444P10
const AVProfile ff_hevc_profiles[]
void ff_thread_report_progress(ThreadFrame *f, int n, int field)
Notify later decoding threads when part of their reference picture is ready.
void ff_thread_await_progress(ThreadFrame *f, int n, int field)
Wait for earlier decoding threads to finish reference pictures.
void ff_thread_finish_setup(AVCodecContext *avctx)
If the codec defines update_thread_context(), call this when they are ready for the next thread to st...
FF_DISABLE_DEPRECATION_WARNINGS enum AVPixelFormat ff_thread_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt)
Wrapper around get_format() for frame-multithreaded codecs.
void ff_reset_entries(AVCodecContext *avctx)
int ff_alloc_entries(AVCodecContext *avctx, int count)
void ff_thread_await_progress2(AVCodecContext *avctx, int field, int thread, int shift)
void ff_thread_report_progress2(AVCodecContext *avctx, int field, int thread, int n)
#define FF_ARRAY_ELEMS(a)
A reference to a data buffer.
uint8_t * data
The data buffer.
Describe the class of an AVClass context structure.
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
main external API structure.
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
int width
picture width / height.
enum AVColorRange color_range
MPEG vs JPEG YUV range.
enum AVColorPrimaries color_primaries
Chromaticity coordinates of the source primaries.
const struct AVHWAccel * hwaccel
Hardware accelerator in use.
int active_thread_type
Which multithreading methods are in use by the codec.
int has_b_frames
Size of the frame reordering buffer in the decoder.
enum AVColorSpace colorspace
YUV colorspace type.
int thread_count
thread count is used to decide how many independent tasks should be passed to execute()
enum AVColorTransferCharacteristic color_trc
Color Transfer Characteristic.
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
enum AVChromaLocation chroma_sample_location
This defines the location of chroma samples.
int coded_width
Bitstream width / height, may be different from width/height e.g.
struct AVCodecInternal * internal
Private context used for internal data.
int err_recognition
Error recognition; may misdetect some more or less valid parts as errors.
int is_copy
Whether the parent AVCodecContext is a copy of the context which had init() called on it.
const char * name
Name of the codec implementation.
Content light level needed by to transmit HDR over HDMI (CTA-861.3).
unsigned MaxFALL
Max average light level per frame (cd/m^2).
unsigned MaxCLL
Max content light level (cd/m^2).
Structure to hold side data for an AVFrame.
This structure describes decoded (raw) audio or video data.
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
int(* end_frame)(AVCodecContext *avctx)
Called at the end of each frame or field picture.
This structure stores compressed data.
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Stereo 3D type: this structure describes how two videos are packed within a single video surface,...
enum AVStereo3DType type
How views are packed within the video.
int flags
Additional information about the frame packing.
enum AVStereo3DView view
Determines which views are packed.
enum PredMode pred_mode
PredMode.
uint8_t intra_split_flag
IntraSplitFlag.
uint8_t max_trafo_depth
MaxTrafoDepth.
enum PartMode part_mode
PartMode.
uint8_t cu_transquant_bypass_flag
int temporal_id
HEVC only, nuh_temporal_id_plus_1 - 1.
void * hwaccel_picture_private
uint8_t flags
A combination of HEVC_FRAME_FLAG_*.
uint16_t sequence
A sequence counter, so that old frames are output first after a POC reset.
AVBufferRef * rpl_tab_buf
AVBufferRef * tab_mvf_buf
AVBufferRef * hwaccel_priv_buf
uint8_t edge_emu_buffer[(MAX_PB_SIZE+7) *EDGE_EMU_BUFFER_STRIDE *2]
int16_t tmp[MAX_PB_SIZE *MAX_PB_SIZE]
uint8_t ctb_up_right_flag
uint8_t edge_emu_buffer2[(MAX_PB_SIZE+7) *EDGE_EMU_BUFFER_STRIDE *2]
AVBufferRef * vps_list[HEVC_MAX_VPS_COUNT]
uint8_t poc_msb_present[32]
int16_t x
horizontal component of motion vector
int16_t y
vertical component of motion vector
uint8_t intra_pred_mode_c[4]
uint8_t intra_pred_mode[4]
int rem_intra_luma_pred_mode
struct HEVCFrame * ref[HEVC_MAX_REFS]
int offset_abs[3][4]
sao_offset_abs
int eo_class[3]
sao_eo_class
uint8_t type_idx[3]
sao_type_idx
int16_t offset_val[3][5]
SaoOffsetVal.
int offset_sign[3][4]
sao_offset_sign
#define av_malloc_array(a, b)
static void error(const char *err)
static int ref[MAX_W *MAX_W]
uint32_t av_timecode_get_smpte(AVRational rate, int drop, int hh, int mm, int ss, int ff)
Convert sei info to SMPTE 12M binary representation.
char * av_timecode_make_smpte_tc_string2(char *buf, AVRational rate, uint32_t tcsmpte, int prevent_df, int skip_field)
Get the timecode string from the SMPTE timecode format.
#define AV_TIMECODE_STR_SIZE
static const uint8_t offset[127][2]