FFmpeg
hevcdec.c
Go to the documentation of this file.
1 /*
2  * HEVC video Decoder
3  *
4  * Copyright (C) 2012 - 2013 Guillaume Martres
5  * Copyright (C) 2012 - 2013 Mickael Raulet
6  * Copyright (C) 2012 - 2013 Gildas Cocherel
7  * Copyright (C) 2012 - 2013 Wassim Hamidouche
8  *
9  * This file is part of FFmpeg.
10  *
11  * FFmpeg is free software; you can redistribute it and/or
12  * modify it under the terms of the GNU Lesser General Public
13  * License as published by the Free Software Foundation; either
14  * version 2.1 of the License, or (at your option) any later version.
15  *
16  * FFmpeg is distributed in the hope that it will be useful,
17  * but WITHOUT ANY WARRANTY; without even the implied warranty of
18  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19  * Lesser General Public License for more details.
20  *
21  * You should have received a copy of the GNU Lesser General Public
22  * License along with FFmpeg; if not, write to the Free Software
23  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
24  */
25 
26 #include "libavutil/attributes.h"
27 #include "libavutil/common.h"
28 #include "libavutil/display.h"
30 #include "libavutil/internal.h"
32 #include "libavutil/md5.h"
33 #include "libavutil/opt.h"
34 #include "libavutil/pixdesc.h"
35 #include "libavutil/stereo3d.h"
36 #include "libavutil/timecode.h"
37 
38 #include "bswapdsp.h"
39 #include "bytestream.h"
40 #include "cabac_functions.h"
41 #include "golomb.h"
42 #include "hevc.h"
43 #include "hevc_data.h"
44 #include "hevc_parse.h"
45 #include "hevcdec.h"
46 #include "hwconfig.h"
47 #include "profiles.h"
48 
49 const uint8_t ff_hevc_pel_weight[65] = { [2] = 0, [4] = 1, [6] = 2, [8] = 3, [12] = 4, [16] = 5, [24] = 6, [32] = 7, [48] = 8, [64] = 9 };
50 
51 /**
52  * NOTE: Each function hls_foo correspond to the function foo in the
53  * specification (HLS stands for High Level Syntax).
54  */
55 
56 /**
57  * Section 5.7
58  */
59 
60 /* free everything allocated by pic_arrays_init() */
62 {
63  av_freep(&s->sao);
64  av_freep(&s->deblock);
65 
66  av_freep(&s->skip_flag);
67  av_freep(&s->tab_ct_depth);
68 
69  av_freep(&s->tab_ipm);
70  av_freep(&s->cbf_luma);
71  av_freep(&s->is_pcm);
72 
73  av_freep(&s->qp_y_tab);
74  av_freep(&s->tab_slice_address);
75  av_freep(&s->filter_slice_edges);
76 
77  av_freep(&s->horizontal_bs);
78  av_freep(&s->vertical_bs);
79 
80  av_freep(&s->sh.entry_point_offset);
81  av_freep(&s->sh.size);
82  av_freep(&s->sh.offset);
83 
84  av_buffer_pool_uninit(&s->tab_mvf_pool);
85  av_buffer_pool_uninit(&s->rpl_tab_pool);
86 }
87 
88 /* allocate arrays that depend on frame dimensions */
89 static int pic_arrays_init(HEVCContext *s, const HEVCSPS *sps)
90 {
91  int log2_min_cb_size = sps->log2_min_cb_size;
92  int width = sps->width;
93  int height = sps->height;
94  int pic_size_in_ctb = ((width >> log2_min_cb_size) + 1) *
95  ((height >> log2_min_cb_size) + 1);
96  int ctb_count = sps->ctb_width * sps->ctb_height;
97  int min_pu_size = sps->min_pu_width * sps->min_pu_height;
98 
99  s->bs_width = (width >> 2) + 1;
100  s->bs_height = (height >> 2) + 1;
101 
102  s->sao = av_calloc(ctb_count, sizeof(*s->sao));
103  s->deblock = av_calloc(ctb_count, sizeof(*s->deblock));
104  if (!s->sao || !s->deblock)
105  goto fail;
106 
107  s->skip_flag = av_malloc_array(sps->min_cb_height, sps->min_cb_width);
108  s->tab_ct_depth = av_malloc_array(sps->min_cb_height, sps->min_cb_width);
109  if (!s->skip_flag || !s->tab_ct_depth)
110  goto fail;
111 
112  s->cbf_luma = av_malloc_array(sps->min_tb_width, sps->min_tb_height);
113  s->tab_ipm = av_mallocz(min_pu_size);
114  s->is_pcm = av_malloc_array(sps->min_pu_width + 1, sps->min_pu_height + 1);
115  if (!s->tab_ipm || !s->cbf_luma || !s->is_pcm)
116  goto fail;
117 
118  s->filter_slice_edges = av_mallocz(ctb_count);
119  s->tab_slice_address = av_malloc_array(pic_size_in_ctb,
120  sizeof(*s->tab_slice_address));
121  s->qp_y_tab = av_malloc_array(pic_size_in_ctb,
122  sizeof(*s->qp_y_tab));
123  if (!s->qp_y_tab || !s->filter_slice_edges || !s->tab_slice_address)
124  goto fail;
125 
126  s->horizontal_bs = av_calloc(s->bs_width, s->bs_height);
127  s->vertical_bs = av_calloc(s->bs_width, s->bs_height);
128  if (!s->horizontal_bs || !s->vertical_bs)
129  goto fail;
130 
131  s->tab_mvf_pool = av_buffer_pool_init(min_pu_size * sizeof(MvField),
133  s->rpl_tab_pool = av_buffer_pool_init(ctb_count * sizeof(RefPicListTab),
135  if (!s->tab_mvf_pool || !s->rpl_tab_pool)
136  goto fail;
137 
138  return 0;
139 
140 fail:
142  return AVERROR(ENOMEM);
143 }
144 
146 {
147  int i = 0;
148  int j = 0;
149  uint8_t luma_weight_l0_flag[16];
150  uint8_t chroma_weight_l0_flag[16];
151  uint8_t luma_weight_l1_flag[16];
152  uint8_t chroma_weight_l1_flag[16];
153  int luma_log2_weight_denom;
154 
155  luma_log2_weight_denom = get_ue_golomb_long(gb);
156  if (luma_log2_weight_denom < 0 || luma_log2_weight_denom > 7) {
157  av_log(s->avctx, AV_LOG_ERROR, "luma_log2_weight_denom %d is invalid\n", luma_log2_weight_denom);
158  return AVERROR_INVALIDDATA;
159  }
160  s->sh.luma_log2_weight_denom = av_clip_uintp2(luma_log2_weight_denom, 3);
161  if (s->ps.sps->chroma_format_idc != 0) {
162  int64_t chroma_log2_weight_denom = luma_log2_weight_denom + (int64_t)get_se_golomb(gb);
163  if (chroma_log2_weight_denom < 0 || chroma_log2_weight_denom > 7) {
164  av_log(s->avctx, AV_LOG_ERROR, "chroma_log2_weight_denom %"PRId64" is invalid\n", chroma_log2_weight_denom);
165  return AVERROR_INVALIDDATA;
166  }
167  s->sh.chroma_log2_weight_denom = chroma_log2_weight_denom;
168  }
169 
170  for (i = 0; i < s->sh.nb_refs[L0]; i++) {
171  luma_weight_l0_flag[i] = get_bits1(gb);
172  if (!luma_weight_l0_flag[i]) {
173  s->sh.luma_weight_l0[i] = 1 << s->sh.luma_log2_weight_denom;
174  s->sh.luma_offset_l0[i] = 0;
175  }
176  }
177  if (s->ps.sps->chroma_format_idc != 0) {
178  for (i = 0; i < s->sh.nb_refs[L0]; i++)
179  chroma_weight_l0_flag[i] = get_bits1(gb);
180  } else {
181  for (i = 0; i < s->sh.nb_refs[L0]; i++)
182  chroma_weight_l0_flag[i] = 0;
183  }
184  for (i = 0; i < s->sh.nb_refs[L0]; i++) {
185  if (luma_weight_l0_flag[i]) {
186  int delta_luma_weight_l0 = get_se_golomb(gb);
187  if ((int8_t)delta_luma_weight_l0 != delta_luma_weight_l0)
188  return AVERROR_INVALIDDATA;
189  s->sh.luma_weight_l0[i] = (1 << s->sh.luma_log2_weight_denom) + delta_luma_weight_l0;
190  s->sh.luma_offset_l0[i] = get_se_golomb(gb);
191  }
192  if (chroma_weight_l0_flag[i]) {
193  for (j = 0; j < 2; j++) {
194  int delta_chroma_weight_l0 = get_se_golomb(gb);
195  int delta_chroma_offset_l0 = get_se_golomb(gb);
196 
197  if ( (int8_t)delta_chroma_weight_l0 != delta_chroma_weight_l0
198  || delta_chroma_offset_l0 < -(1<<17) || delta_chroma_offset_l0 > (1<<17)) {
199  return AVERROR_INVALIDDATA;
200  }
201 
202  s->sh.chroma_weight_l0[i][j] = (1 << s->sh.chroma_log2_weight_denom) + delta_chroma_weight_l0;
203  s->sh.chroma_offset_l0[i][j] = av_clip((delta_chroma_offset_l0 - ((128 * s->sh.chroma_weight_l0[i][j])
204  >> s->sh.chroma_log2_weight_denom) + 128), -128, 127);
205  }
206  } else {
207  s->sh.chroma_weight_l0[i][0] = 1 << s->sh.chroma_log2_weight_denom;
208  s->sh.chroma_offset_l0[i][0] = 0;
209  s->sh.chroma_weight_l0[i][1] = 1 << s->sh.chroma_log2_weight_denom;
210  s->sh.chroma_offset_l0[i][1] = 0;
211  }
212  }
213  if (s->sh.slice_type == HEVC_SLICE_B) {
214  for (i = 0; i < s->sh.nb_refs[L1]; i++) {
215  luma_weight_l1_flag[i] = get_bits1(gb);
216  if (!luma_weight_l1_flag[i]) {
217  s->sh.luma_weight_l1[i] = 1 << s->sh.luma_log2_weight_denom;
218  s->sh.luma_offset_l1[i] = 0;
219  }
220  }
221  if (s->ps.sps->chroma_format_idc != 0) {
222  for (i = 0; i < s->sh.nb_refs[L1]; i++)
223  chroma_weight_l1_flag[i] = get_bits1(gb);
224  } else {
225  for (i = 0; i < s->sh.nb_refs[L1]; i++)
226  chroma_weight_l1_flag[i] = 0;
227  }
228  for (i = 0; i < s->sh.nb_refs[L1]; i++) {
229  if (luma_weight_l1_flag[i]) {
230  int delta_luma_weight_l1 = get_se_golomb(gb);
231  if ((int8_t)delta_luma_weight_l1 != delta_luma_weight_l1)
232  return AVERROR_INVALIDDATA;
233  s->sh.luma_weight_l1[i] = (1 << s->sh.luma_log2_weight_denom) + delta_luma_weight_l1;
234  s->sh.luma_offset_l1[i] = get_se_golomb(gb);
235  }
236  if (chroma_weight_l1_flag[i]) {
237  for (j = 0; j < 2; j++) {
238  int delta_chroma_weight_l1 = get_se_golomb(gb);
239  int delta_chroma_offset_l1 = get_se_golomb(gb);
240 
241  if ( (int8_t)delta_chroma_weight_l1 != delta_chroma_weight_l1
242  || delta_chroma_offset_l1 < -(1<<17) || delta_chroma_offset_l1 > (1<<17)) {
243  return AVERROR_INVALIDDATA;
244  }
245 
246  s->sh.chroma_weight_l1[i][j] = (1 << s->sh.chroma_log2_weight_denom) + delta_chroma_weight_l1;
247  s->sh.chroma_offset_l1[i][j] = av_clip((delta_chroma_offset_l1 - ((128 * s->sh.chroma_weight_l1[i][j])
248  >> s->sh.chroma_log2_weight_denom) + 128), -128, 127);
249  }
250  } else {
251  s->sh.chroma_weight_l1[i][0] = 1 << s->sh.chroma_log2_weight_denom;
252  s->sh.chroma_offset_l1[i][0] = 0;
253  s->sh.chroma_weight_l1[i][1] = 1 << s->sh.chroma_log2_weight_denom;
254  s->sh.chroma_offset_l1[i][1] = 0;
255  }
256  }
257  }
258  return 0;
259 }
260 
262 {
263  const HEVCSPS *sps = s->ps.sps;
264  int max_poc_lsb = 1 << sps->log2_max_poc_lsb;
265  int prev_delta_msb = 0;
266  unsigned int nb_sps = 0, nb_sh;
267  int i;
268 
269  rps->nb_refs = 0;
270  if (!sps->long_term_ref_pics_present_flag)
271  return 0;
272 
273  if (sps->num_long_term_ref_pics_sps > 0)
274  nb_sps = get_ue_golomb_long(gb);
275  nb_sh = get_ue_golomb_long(gb);
276 
277  if (nb_sps > sps->num_long_term_ref_pics_sps)
278  return AVERROR_INVALIDDATA;
279  if (nb_sh + (uint64_t)nb_sps > FF_ARRAY_ELEMS(rps->poc))
280  return AVERROR_INVALIDDATA;
281 
282  rps->nb_refs = nb_sh + nb_sps;
283 
284  for (i = 0; i < rps->nb_refs; i++) {
285 
286  if (i < nb_sps) {
287  uint8_t lt_idx_sps = 0;
288 
289  if (sps->num_long_term_ref_pics_sps > 1)
290  lt_idx_sps = get_bits(gb, av_ceil_log2(sps->num_long_term_ref_pics_sps));
291 
292  rps->poc[i] = sps->lt_ref_pic_poc_lsb_sps[lt_idx_sps];
293  rps->used[i] = sps->used_by_curr_pic_lt_sps_flag[lt_idx_sps];
294  } else {
295  rps->poc[i] = get_bits(gb, sps->log2_max_poc_lsb);
296  rps->used[i] = get_bits1(gb);
297  }
298 
299  rps->poc_msb_present[i] = get_bits1(gb);
300  if (rps->poc_msb_present[i]) {
301  int64_t delta = get_ue_golomb_long(gb);
302  int64_t poc;
303 
304  if (i && i != nb_sps)
305  delta += prev_delta_msb;
306 
307  poc = rps->poc[i] + s->poc - delta * max_poc_lsb - s->sh.pic_order_cnt_lsb;
308  if (poc != (int32_t)poc)
309  return AVERROR_INVALIDDATA;
310  rps->poc[i] = poc;
311  prev_delta_msb = delta;
312  }
313  }
314 
315  return 0;
316 }
317 
319 {
320  AVCodecContext *avctx = s->avctx;
321  const HEVCParamSets *ps = &s->ps;
322  const HEVCVPS *vps = (const HEVCVPS*)ps->vps_list[sps->vps_id]->data;
323  const HEVCWindow *ow = &sps->output_window;
324  unsigned int num = 0, den = 0;
325 
326  avctx->pix_fmt = sps->pix_fmt;
327  avctx->coded_width = sps->width;
328  avctx->coded_height = sps->height;
329  avctx->width = sps->width - ow->left_offset - ow->right_offset;
330  avctx->height = sps->height - ow->top_offset - ow->bottom_offset;
331  avctx->has_b_frames = sps->temporal_layer[sps->max_sub_layers - 1].num_reorder_pics;
332  avctx->profile = sps->ptl.general_ptl.profile_idc;
333  avctx->level = sps->ptl.general_ptl.level_idc;
334 
335  ff_set_sar(avctx, sps->vui.sar);
336 
337  if (sps->vui.video_signal_type_present_flag)
338  avctx->color_range = sps->vui.video_full_range_flag ? AVCOL_RANGE_JPEG
340  else
341  avctx->color_range = AVCOL_RANGE_MPEG;
342 
343  if (sps->vui.colour_description_present_flag) {
344  avctx->color_primaries = sps->vui.colour_primaries;
345  avctx->color_trc = sps->vui.transfer_characteristic;
346  avctx->colorspace = sps->vui.matrix_coeffs;
347  } else {
351  }
352 
354  if (sps->chroma_format_idc == 1) {
355  if (sps->vui.chroma_loc_info_present_flag) {
356  if (sps->vui.chroma_sample_loc_type_top_field <= 5)
357  avctx->chroma_sample_location = sps->vui.chroma_sample_loc_type_top_field + 1;
358  } else
360  }
361 
362  if (vps->vps_timing_info_present_flag) {
363  num = vps->vps_num_units_in_tick;
364  den = vps->vps_time_scale;
365  } else if (sps->vui.vui_timing_info_present_flag) {
366  num = sps->vui.vui_num_units_in_tick;
367  den = sps->vui.vui_time_scale;
368  }
369 
370  if (num != 0 && den != 0)
371  av_reduce(&avctx->framerate.den, &avctx->framerate.num,
372  num, den, 1 << 30);
373 }
374 
376 {
377  AVCodecContext *avctx = s->avctx;
378 
379  if (s->sei.a53_caption.buf_ref)
380  s->avctx->properties |= FF_CODEC_PROPERTY_CLOSED_CAPTIONS;
381 
382  if (s->sei.alternative_transfer.present &&
383  av_color_transfer_name(s->sei.alternative_transfer.preferred_transfer_characteristics) &&
384  s->sei.alternative_transfer.preferred_transfer_characteristics != AVCOL_TRC_UNSPECIFIED) {
385  avctx->color_trc = s->sei.alternative_transfer.preferred_transfer_characteristics;
386  }
387 
388  if (s->sei.film_grain_characteristics.present)
390 
391  return 0;
392 }
393 
395 {
396 #define HWACCEL_MAX (CONFIG_HEVC_DXVA2_HWACCEL + \
397  CONFIG_HEVC_D3D11VA_HWACCEL * 2 + \
398  CONFIG_HEVC_NVDEC_HWACCEL + \
399  CONFIG_HEVC_VAAPI_HWACCEL + \
400  CONFIG_HEVC_VIDEOTOOLBOX_HWACCEL + \
401  CONFIG_HEVC_VDPAU_HWACCEL)
402  enum AVPixelFormat pix_fmts[HWACCEL_MAX + 2], *fmt = pix_fmts;
403 
404  switch (sps->pix_fmt) {
405  case AV_PIX_FMT_YUV420P:
406  case AV_PIX_FMT_YUVJ420P:
407 #if CONFIG_HEVC_DXVA2_HWACCEL
408  *fmt++ = AV_PIX_FMT_DXVA2_VLD;
409 #endif
410 #if CONFIG_HEVC_D3D11VA_HWACCEL
411  *fmt++ = AV_PIX_FMT_D3D11VA_VLD;
412  *fmt++ = AV_PIX_FMT_D3D11;
413 #endif
414 #if CONFIG_HEVC_VAAPI_HWACCEL
415  *fmt++ = AV_PIX_FMT_VAAPI;
416 #endif
417 #if CONFIG_HEVC_VDPAU_HWACCEL
418  *fmt++ = AV_PIX_FMT_VDPAU;
419 #endif
420 #if CONFIG_HEVC_NVDEC_HWACCEL
421  *fmt++ = AV_PIX_FMT_CUDA;
422 #endif
423 #if CONFIG_HEVC_VIDEOTOOLBOX_HWACCEL
424  *fmt++ = AV_PIX_FMT_VIDEOTOOLBOX;
425 #endif
426  break;
428 #if CONFIG_HEVC_DXVA2_HWACCEL
429  *fmt++ = AV_PIX_FMT_DXVA2_VLD;
430 #endif
431 #if CONFIG_HEVC_D3D11VA_HWACCEL
432  *fmt++ = AV_PIX_FMT_D3D11VA_VLD;
433  *fmt++ = AV_PIX_FMT_D3D11;
434 #endif
435 #if CONFIG_HEVC_VAAPI_HWACCEL
436  *fmt++ = AV_PIX_FMT_VAAPI;
437 #endif
438 #if CONFIG_HEVC_VIDEOTOOLBOX_HWACCEL
439  *fmt++ = AV_PIX_FMT_VIDEOTOOLBOX;
440 #endif
441 #if CONFIG_HEVC_VDPAU_HWACCEL
442  *fmt++ = AV_PIX_FMT_VDPAU;
443 #endif
444 #if CONFIG_HEVC_NVDEC_HWACCEL
445  *fmt++ = AV_PIX_FMT_CUDA;
446 #endif
447  break;
448  case AV_PIX_FMT_YUV444P:
449 #if CONFIG_HEVC_VDPAU_HWACCEL
450  *fmt++ = AV_PIX_FMT_VDPAU;
451 #endif
452 #if CONFIG_HEVC_NVDEC_HWACCEL
453  *fmt++ = AV_PIX_FMT_CUDA;
454 #endif
455 #if CONFIG_HEVC_VIDEOTOOLBOX_HWACCEL
456  *fmt++ = AV_PIX_FMT_VIDEOTOOLBOX;
457 #endif
458  break;
459  case AV_PIX_FMT_YUV422P:
461 #if CONFIG_HEVC_VAAPI_HWACCEL
462  *fmt++ = AV_PIX_FMT_VAAPI;
463 #endif
464 #if CONFIG_HEVC_VIDEOTOOLBOX_HWACCEL
465  *fmt++ = AV_PIX_FMT_VIDEOTOOLBOX;
466 #endif
467  break;
469 #if CONFIG_HEVC_VIDEOTOOLBOX_HWACCEL
470  *fmt++ = AV_PIX_FMT_VIDEOTOOLBOX;
471 #endif
474 #if CONFIG_HEVC_VDPAU_HWACCEL
475  *fmt++ = AV_PIX_FMT_VDPAU;
476 #endif
477 #if CONFIG_HEVC_NVDEC_HWACCEL
478  *fmt++ = AV_PIX_FMT_CUDA;
479 #endif
480  break;
481  }
482 
483  *fmt++ = sps->pix_fmt;
484  *fmt = AV_PIX_FMT_NONE;
485 
486  return ff_thread_get_format(s->avctx, pix_fmts);
487 }
488 
489 static int set_sps(HEVCContext *s, const HEVCSPS *sps,
490  enum AVPixelFormat pix_fmt)
491 {
492  int ret, i;
493 
495  s->ps.sps = NULL;
496  s->ps.vps = NULL;
497 
498  if (!sps)
499  return 0;
500 
501  ret = pic_arrays_init(s, sps);
502  if (ret < 0)
503  goto fail;
504 
506 
507  s->avctx->pix_fmt = pix_fmt;
508 
509  ff_hevc_pred_init(&s->hpc, sps->bit_depth);
510  ff_hevc_dsp_init (&s->hevcdsp, sps->bit_depth);
511  ff_videodsp_init (&s->vdsp, sps->bit_depth);
512 
513  for (i = 0; i < 3; i++) {
514  av_freep(&s->sao_pixel_buffer_h[i]);
515  av_freep(&s->sao_pixel_buffer_v[i]);
516  }
517 
518  if (sps->sao_enabled && !s->avctx->hwaccel) {
519  int c_count = (sps->chroma_format_idc != 0) ? 3 : 1;
520  int c_idx;
521 
522  for(c_idx = 0; c_idx < c_count; c_idx++) {
523  int w = sps->width >> sps->hshift[c_idx];
524  int h = sps->height >> sps->vshift[c_idx];
525  s->sao_pixel_buffer_h[c_idx] =
526  av_malloc((w * 2 * sps->ctb_height) <<
527  sps->pixel_shift);
528  s->sao_pixel_buffer_v[c_idx] =
529  av_malloc((h * 2 * sps->ctb_width) <<
530  sps->pixel_shift);
531  if (!s->sao_pixel_buffer_h[c_idx] ||
532  !s->sao_pixel_buffer_v[c_idx])
533  goto fail;
534  }
535  }
536 
537  s->ps.sps = sps;
538  s->ps.vps = (HEVCVPS*) s->ps.vps_list[s->ps.sps->vps_id]->data;
539 
540  return 0;
541 
542 fail:
544  for (i = 0; i < 3; i++) {
545  av_freep(&s->sao_pixel_buffer_h[i]);
546  av_freep(&s->sao_pixel_buffer_v[i]);
547  }
548  s->ps.sps = NULL;
549  return ret;
550 }
551 
553 {
554  GetBitContext *gb = &s->HEVClc->gb;
555  SliceHeader *sh = &s->sh;
556  int i, ret;
557 
558  // Coded parameters
560  if (s->ref && sh->first_slice_in_pic_flag) {
561  av_log(s->avctx, AV_LOG_ERROR, "Two slices reporting being the first in the same frame.\n");
562  return 1; // This slice will be skipped later, do not corrupt state
563  }
564 
565  if ((IS_IDR(s) || IS_BLA(s)) && sh->first_slice_in_pic_flag) {
566  s->seq_decode = (s->seq_decode + 1) & 0xff;
567  s->max_ra = INT_MAX;
568  if (IS_IDR(s))
570  }
572  if (IS_IRAP(s))
574 
575  sh->pps_id = get_ue_golomb_long(gb);
576  if (sh->pps_id >= HEVC_MAX_PPS_COUNT || !s->ps.pps_list[sh->pps_id]) {
577  av_log(s->avctx, AV_LOG_ERROR, "PPS id out of range: %d\n", sh->pps_id);
578  return AVERROR_INVALIDDATA;
579  }
580  if (!sh->first_slice_in_pic_flag &&
581  s->ps.pps != (HEVCPPS*)s->ps.pps_list[sh->pps_id]->data) {
582  av_log(s->avctx, AV_LOG_ERROR, "PPS changed between slices.\n");
583  return AVERROR_INVALIDDATA;
584  }
585  s->ps.pps = (HEVCPPS*)s->ps.pps_list[sh->pps_id]->data;
586  if (s->nal_unit_type == HEVC_NAL_CRA_NUT && s->last_eos == 1)
588 
589  if (s->ps.sps != (HEVCSPS*)s->ps.sps_list[s->ps.pps->sps_id]->data) {
590  const HEVCSPS *sps = (HEVCSPS*)s->ps.sps_list[s->ps.pps->sps_id]->data;
591  const HEVCSPS *last_sps = s->ps.sps;
592  enum AVPixelFormat pix_fmt;
593 
594  if (last_sps && IS_IRAP(s) && s->nal_unit_type != HEVC_NAL_CRA_NUT) {
595  if (sps->width != last_sps->width || sps->height != last_sps->height ||
596  sps->temporal_layer[sps->max_sub_layers - 1].max_dec_pic_buffering !=
597  last_sps->temporal_layer[last_sps->max_sub_layers - 1].max_dec_pic_buffering)
599  }
601 
602  ret = set_sps(s, sps, sps->pix_fmt);
603  if (ret < 0)
604  return ret;
605 
606  pix_fmt = get_format(s, sps);
607  if (pix_fmt < 0)
608  return pix_fmt;
609  s->avctx->pix_fmt = pix_fmt;
610 
611  s->seq_decode = (s->seq_decode + 1) & 0xff;
612  s->max_ra = INT_MAX;
613  }
614 
616  if (ret < 0)
617  return ret;
618 
620  if (!sh->first_slice_in_pic_flag) {
621  int slice_address_length;
622 
623  if (s->ps.pps->dependent_slice_segments_enabled_flag)
625 
626  slice_address_length = av_ceil_log2(s->ps.sps->ctb_width *
627  s->ps.sps->ctb_height);
628  sh->slice_segment_addr = get_bitsz(gb, slice_address_length);
629  if (sh->slice_segment_addr >= s->ps.sps->ctb_width * s->ps.sps->ctb_height) {
630  av_log(s->avctx, AV_LOG_ERROR,
631  "Invalid slice segment address: %u.\n",
632  sh->slice_segment_addr);
633  return AVERROR_INVALIDDATA;
634  }
635 
636  if (!sh->dependent_slice_segment_flag) {
637  sh->slice_addr = sh->slice_segment_addr;
638  s->slice_idx++;
639  }
640  } else {
641  sh->slice_segment_addr = sh->slice_addr = 0;
642  s->slice_idx = 0;
643  s->slice_initialized = 0;
644  }
645 
646  if (!sh->dependent_slice_segment_flag) {
647  s->slice_initialized = 0;
648 
649  for (i = 0; i < s->ps.pps->num_extra_slice_header_bits; i++)
650  skip_bits(gb, 1); // slice_reserved_undetermined_flag[]
651 
652  sh->slice_type = get_ue_golomb_long(gb);
653  if (!(sh->slice_type == HEVC_SLICE_I ||
654  sh->slice_type == HEVC_SLICE_P ||
655  sh->slice_type == HEVC_SLICE_B)) {
656  av_log(s->avctx, AV_LOG_ERROR, "Unknown slice type: %d.\n",
657  sh->slice_type);
658  return AVERROR_INVALIDDATA;
659  }
660  if (IS_IRAP(s) && sh->slice_type != HEVC_SLICE_I) {
661  av_log(s->avctx, AV_LOG_ERROR, "Inter slices in an IRAP frame.\n");
662  return AVERROR_INVALIDDATA;
663  }
664 
665  // when flag is not present, picture is inferred to be output
666  sh->pic_output_flag = 1;
667  if (s->ps.pps->output_flag_present_flag)
668  sh->pic_output_flag = get_bits1(gb);
669 
670  if (s->ps.sps->separate_colour_plane_flag)
671  sh->colour_plane_id = get_bits(gb, 2);
672 
673  if (!IS_IDR(s)) {
674  int poc, pos;
675 
676  sh->pic_order_cnt_lsb = get_bits(gb, s->ps.sps->log2_max_poc_lsb);
677  poc = ff_hevc_compute_poc(s->ps.sps, s->pocTid0, sh->pic_order_cnt_lsb, s->nal_unit_type);
678  if (!sh->first_slice_in_pic_flag && poc != s->poc) {
679  av_log(s->avctx, AV_LOG_WARNING,
680  "Ignoring POC change between slices: %d -> %d\n", s->poc, poc);
681  if (s->avctx->err_recognition & AV_EF_EXPLODE)
682  return AVERROR_INVALIDDATA;
683  poc = s->poc;
684  }
685  s->poc = poc;
686 
688  pos = get_bits_left(gb);
690  ret = ff_hevc_decode_short_term_rps(gb, s->avctx, &sh->slice_rps, s->ps.sps, 1);
691  if (ret < 0)
692  return ret;
693 
694  sh->short_term_rps = &sh->slice_rps;
695  } else {
696  int numbits, rps_idx;
697 
698  if (!s->ps.sps->nb_st_rps) {
699  av_log(s->avctx, AV_LOG_ERROR, "No ref lists in the SPS.\n");
700  return AVERROR_INVALIDDATA;
701  }
702 
703  numbits = av_ceil_log2(s->ps.sps->nb_st_rps);
704  rps_idx = numbits > 0 ? get_bits(gb, numbits) : 0;
705  sh->short_term_rps = &s->ps.sps->st_rps[rps_idx];
706  }
708 
709  pos = get_bits_left(gb);
710  ret = decode_lt_rps(s, &sh->long_term_rps, gb);
711  if (ret < 0) {
712  av_log(s->avctx, AV_LOG_WARNING, "Invalid long term RPS.\n");
713  if (s->avctx->err_recognition & AV_EF_EXPLODE)
714  return AVERROR_INVALIDDATA;
715  }
717 
718  if (s->ps.sps->sps_temporal_mvp_enabled_flag)
720  else
722  } else {
723  s->sh.short_term_rps = NULL;
724  s->poc = 0;
725  }
726 
727  /* 8.3.1 */
728  if (sh->first_slice_in_pic_flag && s->temporal_id == 0 &&
729  s->nal_unit_type != HEVC_NAL_TRAIL_N &&
730  s->nal_unit_type != HEVC_NAL_TSA_N &&
731  s->nal_unit_type != HEVC_NAL_STSA_N &&
732  s->nal_unit_type != HEVC_NAL_RADL_N &&
733  s->nal_unit_type != HEVC_NAL_RADL_R &&
734  s->nal_unit_type != HEVC_NAL_RASL_N &&
735  s->nal_unit_type != HEVC_NAL_RASL_R)
736  s->pocTid0 = s->poc;
737 
738  if (s->ps.sps->sao_enabled) {
740  if (s->ps.sps->chroma_format_idc) {
743  }
744  } else {
748  }
749 
750  sh->nb_refs[L0] = sh->nb_refs[L1] = 0;
751  if (sh->slice_type == HEVC_SLICE_P || sh->slice_type == HEVC_SLICE_B) {
752  int nb_refs;
753 
754  sh->nb_refs[L0] = s->ps.pps->num_ref_idx_l0_default_active;
755  if (sh->slice_type == HEVC_SLICE_B)
756  sh->nb_refs[L1] = s->ps.pps->num_ref_idx_l1_default_active;
757 
758  if (get_bits1(gb)) { // num_ref_idx_active_override_flag
759  sh->nb_refs[L0] = get_ue_golomb_long(gb) + 1;
760  if (sh->slice_type == HEVC_SLICE_B)
761  sh->nb_refs[L1] = get_ue_golomb_long(gb) + 1;
762  }
763  if (sh->nb_refs[L0] > HEVC_MAX_REFS || sh->nb_refs[L1] > HEVC_MAX_REFS) {
764  av_log(s->avctx, AV_LOG_ERROR, "Too many refs: %d/%d.\n",
765  sh->nb_refs[L0], sh->nb_refs[L1]);
766  return AVERROR_INVALIDDATA;
767  }
768 
769  sh->rpl_modification_flag[0] = 0;
770  sh->rpl_modification_flag[1] = 0;
771  nb_refs = ff_hevc_frame_nb_refs(s);
772  if (!nb_refs) {
773  av_log(s->avctx, AV_LOG_ERROR, "Zero refs for a frame with P or B slices.\n");
774  return AVERROR_INVALIDDATA;
775  }
776 
777  if (s->ps.pps->lists_modification_present_flag && nb_refs > 1) {
778  sh->rpl_modification_flag[0] = get_bits1(gb);
779  if (sh->rpl_modification_flag[0]) {
780  for (i = 0; i < sh->nb_refs[L0]; i++)
781  sh->list_entry_lx[0][i] = get_bits(gb, av_ceil_log2(nb_refs));
782  }
783 
784  if (sh->slice_type == HEVC_SLICE_B) {
785  sh->rpl_modification_flag[1] = get_bits1(gb);
786  if (sh->rpl_modification_flag[1] == 1)
787  for (i = 0; i < sh->nb_refs[L1]; i++)
788  sh->list_entry_lx[1][i] = get_bits(gb, av_ceil_log2(nb_refs));
789  }
790  }
791 
792  if (sh->slice_type == HEVC_SLICE_B)
793  sh->mvd_l1_zero_flag = get_bits1(gb);
794 
795  if (s->ps.pps->cabac_init_present_flag)
796  sh->cabac_init_flag = get_bits1(gb);
797  else
798  sh->cabac_init_flag = 0;
799 
800  sh->collocated_ref_idx = 0;
802  sh->collocated_list = L0;
803  if (sh->slice_type == HEVC_SLICE_B)
804  sh->collocated_list = !get_bits1(gb);
805 
806  if (sh->nb_refs[sh->collocated_list] > 1) {
808  if (sh->collocated_ref_idx >= sh->nb_refs[sh->collocated_list]) {
809  av_log(s->avctx, AV_LOG_ERROR,
810  "Invalid collocated_ref_idx: %d.\n",
811  sh->collocated_ref_idx);
812  return AVERROR_INVALIDDATA;
813  }
814  }
815  }
816 
817  if ((s->ps.pps->weighted_pred_flag && sh->slice_type == HEVC_SLICE_P) ||
818  (s->ps.pps->weighted_bipred_flag && sh->slice_type == HEVC_SLICE_B)) {
819  int ret = pred_weight_table(s, gb);
820  if (ret < 0)
821  return ret;
822  }
823 
825  if (sh->max_num_merge_cand < 1 || sh->max_num_merge_cand > 5) {
826  av_log(s->avctx, AV_LOG_ERROR,
827  "Invalid number of merging MVP candidates: %d.\n",
828  sh->max_num_merge_cand);
829  return AVERROR_INVALIDDATA;
830  }
831  }
832 
833  sh->slice_qp_delta = get_se_golomb(gb);
834 
835  if (s->ps.pps->pic_slice_level_chroma_qp_offsets_present_flag) {
838  if (sh->slice_cb_qp_offset < -12 || sh->slice_cb_qp_offset > 12 ||
839  sh->slice_cr_qp_offset < -12 || sh->slice_cr_qp_offset > 12) {
840  av_log(s->avctx, AV_LOG_ERROR, "Invalid slice cx qp offset.\n");
841  return AVERROR_INVALIDDATA;
842  }
843  } else {
844  sh->slice_cb_qp_offset = 0;
845  sh->slice_cr_qp_offset = 0;
846  }
847 
848  if (s->ps.pps->chroma_qp_offset_list_enabled_flag)
850  else
852 
853  if (s->ps.pps->deblocking_filter_control_present_flag) {
854  int deblocking_filter_override_flag = 0;
855 
856  if (s->ps.pps->deblocking_filter_override_enabled_flag)
857  deblocking_filter_override_flag = get_bits1(gb);
858 
859  if (deblocking_filter_override_flag) {
862  int beta_offset_div2 = get_se_golomb(gb);
863  int tc_offset_div2 = get_se_golomb(gb) ;
864  if (beta_offset_div2 < -6 || beta_offset_div2 > 6 ||
865  tc_offset_div2 < -6 || tc_offset_div2 > 6) {
866  av_log(s->avctx, AV_LOG_ERROR,
867  "Invalid deblock filter offsets: %d, %d\n",
868  beta_offset_div2, tc_offset_div2);
869  return AVERROR_INVALIDDATA;
870  }
871  sh->beta_offset = beta_offset_div2 * 2;
872  sh->tc_offset = tc_offset_div2 * 2;
873  }
874  } else {
875  sh->disable_deblocking_filter_flag = s->ps.pps->disable_dbf;
876  sh->beta_offset = s->ps.pps->beta_offset;
877  sh->tc_offset = s->ps.pps->tc_offset;
878  }
879  } else {
881  sh->beta_offset = 0;
882  sh->tc_offset = 0;
883  }
884 
885  if (s->ps.pps->seq_loop_filter_across_slices_enabled_flag &&
890  } else {
891  sh->slice_loop_filter_across_slices_enabled_flag = s->ps.pps->seq_loop_filter_across_slices_enabled_flag;
892  }
893  } else if (!s->slice_initialized) {
894  av_log(s->avctx, AV_LOG_ERROR, "Independent slice segment missing.\n");
895  return AVERROR_INVALIDDATA;
896  }
897 
898  sh->num_entry_point_offsets = 0;
899  if (s->ps.pps->tiles_enabled_flag || s->ps.pps->entropy_coding_sync_enabled_flag) {
900  unsigned num_entry_point_offsets = get_ue_golomb_long(gb);
901  // It would be possible to bound this tighter but this here is simpler
902  if (num_entry_point_offsets > get_bits_left(gb)) {
903  av_log(s->avctx, AV_LOG_ERROR, "num_entry_point_offsets %d is invalid\n", num_entry_point_offsets);
904  return AVERROR_INVALIDDATA;
905  }
906 
907  sh->num_entry_point_offsets = num_entry_point_offsets;
908  if (sh->num_entry_point_offsets > 0) {
909  int offset_len = get_ue_golomb_long(gb) + 1;
910 
911  if (offset_len < 1 || offset_len > 32) {
912  sh->num_entry_point_offsets = 0;
913  av_log(s->avctx, AV_LOG_ERROR, "offset_len %d is invalid\n", offset_len);
914  return AVERROR_INVALIDDATA;
915  }
916 
918  av_freep(&sh->offset);
919  av_freep(&sh->size);
920  sh->entry_point_offset = av_malloc_array(sh->num_entry_point_offsets, sizeof(unsigned));
921  sh->offset = av_malloc_array(sh->num_entry_point_offsets, sizeof(int));
922  sh->size = av_malloc_array(sh->num_entry_point_offsets, sizeof(int));
923  if (!sh->entry_point_offset || !sh->offset || !sh->size) {
924  sh->num_entry_point_offsets = 0;
925  av_log(s->avctx, AV_LOG_ERROR, "Failed to allocate memory\n");
926  return AVERROR(ENOMEM);
927  }
928  for (i = 0; i < sh->num_entry_point_offsets; i++) {
929  unsigned val = get_bits_long(gb, offset_len);
930  sh->entry_point_offset[i] = val + 1; // +1; // +1 to get the size
931  }
932  if (s->threads_number > 1 && (s->ps.pps->num_tile_rows > 1 || s->ps.pps->num_tile_columns > 1)) {
933  s->enable_parallel_tiles = 0; // TODO: you can enable tiles in parallel here
934  s->threads_number = 1;
935  } else
936  s->enable_parallel_tiles = 0;
937  } else
938  s->enable_parallel_tiles = 0;
939  }
940 
941  if (s->ps.pps->slice_header_extension_present_flag) {
942  unsigned int length = get_ue_golomb_long(gb);
943  if (length*8LL > get_bits_left(gb)) {
944  av_log(s->avctx, AV_LOG_ERROR, "too many slice_header_extension_data_bytes\n");
945  return AVERROR_INVALIDDATA;
946  }
947  for (i = 0; i < length; i++)
948  skip_bits(gb, 8); // slice_header_extension_data_byte
949  }
950 
951  // Inferred parameters
952  sh->slice_qp = 26U + s->ps.pps->pic_init_qp_minus26 + sh->slice_qp_delta;
953  if (sh->slice_qp > 51 ||
954  sh->slice_qp < -s->ps.sps->qp_bd_offset) {
955  av_log(s->avctx, AV_LOG_ERROR,
956  "The slice_qp %d is outside the valid range "
957  "[%d, 51].\n",
958  sh->slice_qp,
959  -s->ps.sps->qp_bd_offset);
960  return AVERROR_INVALIDDATA;
961  }
962 
964 
965  if (!s->sh.slice_ctb_addr_rs && s->sh.dependent_slice_segment_flag) {
966  av_log(s->avctx, AV_LOG_ERROR, "Impossible slice segment.\n");
967  return AVERROR_INVALIDDATA;
968  }
969 
970  if (get_bits_left(gb) < 0) {
971  av_log(s->avctx, AV_LOG_ERROR,
972  "Overread slice header by %d bits\n", -get_bits_left(gb));
973  return AVERROR_INVALIDDATA;
974  }
975 
976  s->HEVClc->first_qp_group = !s->sh.dependent_slice_segment_flag;
977 
978  if (!s->ps.pps->cu_qp_delta_enabled_flag)
979  s->HEVClc->qp_y = s->sh.slice_qp;
980 
981  s->slice_initialized = 1;
982  s->HEVClc->tu.cu_qp_offset_cb = 0;
983  s->HEVClc->tu.cu_qp_offset_cr = 0;
984 
985  return 0;
986 }
987 
988 #define CTB(tab, x, y) ((tab)[(y) * s->ps.sps->ctb_width + (x)])
989 
990 #define SET_SAO(elem, value) \
991 do { \
992  if (!sao_merge_up_flag && !sao_merge_left_flag) \
993  sao->elem = value; \
994  else if (sao_merge_left_flag) \
995  sao->elem = CTB(s->sao, rx-1, ry).elem; \
996  else if (sao_merge_up_flag) \
997  sao->elem = CTB(s->sao, rx, ry-1).elem; \
998  else \
999  sao->elem = 0; \
1000 } while (0)
1001 
1002 static void hls_sao_param(HEVCContext *s, int rx, int ry)
1003 {
1004  HEVCLocalContext *lc = s->HEVClc;
1005  int sao_merge_left_flag = 0;
1006  int sao_merge_up_flag = 0;
1007  SAOParams *sao = &CTB(s->sao, rx, ry);
1008  int c_idx, i;
1009 
1010  if (s->sh.slice_sample_adaptive_offset_flag[0] ||
1011  s->sh.slice_sample_adaptive_offset_flag[1]) {
1012  if (rx > 0) {
1013  if (lc->ctb_left_flag)
1014  sao_merge_left_flag = ff_hevc_sao_merge_flag_decode(s);
1015  }
1016  if (ry > 0 && !sao_merge_left_flag) {
1017  if (lc->ctb_up_flag)
1018  sao_merge_up_flag = ff_hevc_sao_merge_flag_decode(s);
1019  }
1020  }
1021 
1022  for (c_idx = 0; c_idx < (s->ps.sps->chroma_format_idc ? 3 : 1); c_idx++) {
1023  int log2_sao_offset_scale = c_idx == 0 ? s->ps.pps->log2_sao_offset_scale_luma :
1024  s->ps.pps->log2_sao_offset_scale_chroma;
1025 
1026  if (!s->sh.slice_sample_adaptive_offset_flag[c_idx]) {
1027  sao->type_idx[c_idx] = SAO_NOT_APPLIED;
1028  continue;
1029  }
1030 
1031  if (c_idx == 2) {
1032  sao->type_idx[2] = sao->type_idx[1];
1033  sao->eo_class[2] = sao->eo_class[1];
1034  } else {
1035  SET_SAO(type_idx[c_idx], ff_hevc_sao_type_idx_decode(s));
1036  }
1037 
1038  if (sao->type_idx[c_idx] == SAO_NOT_APPLIED)
1039  continue;
1040 
1041  for (i = 0; i < 4; i++)
1042  SET_SAO(offset_abs[c_idx][i], ff_hevc_sao_offset_abs_decode(s));
1043 
1044  if (sao->type_idx[c_idx] == SAO_BAND) {
1045  for (i = 0; i < 4; i++) {
1046  if (sao->offset_abs[c_idx][i]) {
1047  SET_SAO(offset_sign[c_idx][i],
1049  } else {
1050  sao->offset_sign[c_idx][i] = 0;
1051  }
1052  }
1053  SET_SAO(band_position[c_idx], ff_hevc_sao_band_position_decode(s));
1054  } else if (c_idx != 2) {
1055  SET_SAO(eo_class[c_idx], ff_hevc_sao_eo_class_decode(s));
1056  }
1057 
1058  // Inferred parameters
1059  sao->offset_val[c_idx][0] = 0;
1060  for (i = 0; i < 4; i++) {
1061  sao->offset_val[c_idx][i + 1] = sao->offset_abs[c_idx][i];
1062  if (sao->type_idx[c_idx] == SAO_EDGE) {
1063  if (i > 1)
1064  sao->offset_val[c_idx][i + 1] = -sao->offset_val[c_idx][i + 1];
1065  } else if (sao->offset_sign[c_idx][i]) {
1066  sao->offset_val[c_idx][i + 1] = -sao->offset_val[c_idx][i + 1];
1067  }
1068  sao->offset_val[c_idx][i + 1] *= 1 << log2_sao_offset_scale;
1069  }
1070  }
1071 }
1072 
1073 #undef SET_SAO
1074 #undef CTB
1075 
1076 static int hls_cross_component_pred(HEVCContext *s, int idx) {
1077  HEVCLocalContext *lc = s->HEVClc;
1078  int log2_res_scale_abs_plus1 = ff_hevc_log2_res_scale_abs(s, idx);
1079 
1080  if (log2_res_scale_abs_plus1 != 0) {
1081  int res_scale_sign_flag = ff_hevc_res_scale_sign_flag(s, idx);
1082  lc->tu.res_scale_val = (1 << (log2_res_scale_abs_plus1 - 1)) *
1083  (1 - 2 * res_scale_sign_flag);
1084  } else {
1085  lc->tu.res_scale_val = 0;
1086  }
1087 
1088 
1089  return 0;
1090 }
1091 
1092 static int hls_transform_unit(HEVCContext *s, int x0, int y0,
1093  int xBase, int yBase, int cb_xBase, int cb_yBase,
1094  int log2_cb_size, int log2_trafo_size,
1095  int blk_idx, int cbf_luma, int *cbf_cb, int *cbf_cr)
1096 {
1097  HEVCLocalContext *lc = s->HEVClc;
1098  const int log2_trafo_size_c = log2_trafo_size - s->ps.sps->hshift[1];
1099  int i;
1100 
1101  if (lc->cu.pred_mode == MODE_INTRA) {
1102  int trafo_size = 1 << log2_trafo_size;
1103  ff_hevc_set_neighbour_available(s, x0, y0, trafo_size, trafo_size);
1104 
1105  s->hpc.intra_pred[log2_trafo_size - 2](s, x0, y0, 0);
1106  }
1107 
1108  if (cbf_luma || cbf_cb[0] || cbf_cr[0] ||
1109  (s->ps.sps->chroma_format_idc == 2 && (cbf_cb[1] || cbf_cr[1]))) {
1110  int scan_idx = SCAN_DIAG;
1111  int scan_idx_c = SCAN_DIAG;
1112  int cbf_chroma = cbf_cb[0] || cbf_cr[0] ||
1113  (s->ps.sps->chroma_format_idc == 2 &&
1114  (cbf_cb[1] || cbf_cr[1]));
1115 
1116  if (s->ps.pps->cu_qp_delta_enabled_flag && !lc->tu.is_cu_qp_delta_coded) {
1118  if (lc->tu.cu_qp_delta != 0)
1119  if (ff_hevc_cu_qp_delta_sign_flag(s) == 1)
1120  lc->tu.cu_qp_delta = -lc->tu.cu_qp_delta;
1121  lc->tu.is_cu_qp_delta_coded = 1;
1122 
1123  if (lc->tu.cu_qp_delta < -(26 + s->ps.sps->qp_bd_offset / 2) ||
1124  lc->tu.cu_qp_delta > (25 + s->ps.sps->qp_bd_offset / 2)) {
1125  av_log(s->avctx, AV_LOG_ERROR,
1126  "The cu_qp_delta %d is outside the valid range "
1127  "[%d, %d].\n",
1128  lc->tu.cu_qp_delta,
1129  -(26 + s->ps.sps->qp_bd_offset / 2),
1130  (25 + s->ps.sps->qp_bd_offset / 2));
1131  return AVERROR_INVALIDDATA;
1132  }
1133 
1134  ff_hevc_set_qPy(s, cb_xBase, cb_yBase, log2_cb_size);
1135  }
1136 
1137  if (s->sh.cu_chroma_qp_offset_enabled_flag && cbf_chroma &&
1139  int cu_chroma_qp_offset_flag = ff_hevc_cu_chroma_qp_offset_flag(s);
1140  if (cu_chroma_qp_offset_flag) {
1141  int cu_chroma_qp_offset_idx = 0;
1142  if (s->ps.pps->chroma_qp_offset_list_len_minus1 > 0) {
1143  cu_chroma_qp_offset_idx = ff_hevc_cu_chroma_qp_offset_idx(s);
1144  av_log(s->avctx, AV_LOG_ERROR,
1145  "cu_chroma_qp_offset_idx not yet tested.\n");
1146  }
1147  lc->tu.cu_qp_offset_cb = s->ps.pps->cb_qp_offset_list[cu_chroma_qp_offset_idx];
1148  lc->tu.cu_qp_offset_cr = s->ps.pps->cr_qp_offset_list[cu_chroma_qp_offset_idx];
1149  } else {
1150  lc->tu.cu_qp_offset_cb = 0;
1151  lc->tu.cu_qp_offset_cr = 0;
1152  }
1154  }
1155 
1156  if (lc->cu.pred_mode == MODE_INTRA && log2_trafo_size < 4) {
1157  if (lc->tu.intra_pred_mode >= 6 &&
1158  lc->tu.intra_pred_mode <= 14) {
1159  scan_idx = SCAN_VERT;
1160  } else if (lc->tu.intra_pred_mode >= 22 &&
1161  lc->tu.intra_pred_mode <= 30) {
1162  scan_idx = SCAN_HORIZ;
1163  }
1164 
1165  if (lc->tu.intra_pred_mode_c >= 6 &&
1166  lc->tu.intra_pred_mode_c <= 14) {
1167  scan_idx_c = SCAN_VERT;
1168  } else if (lc->tu.intra_pred_mode_c >= 22 &&
1169  lc->tu.intra_pred_mode_c <= 30) {
1170  scan_idx_c = SCAN_HORIZ;
1171  }
1172  }
1173 
1174  lc->tu.cross_pf = 0;
1175 
1176  if (cbf_luma)
1177  ff_hevc_hls_residual_coding(s, x0, y0, log2_trafo_size, scan_idx, 0);
1178  if (s->ps.sps->chroma_format_idc && (log2_trafo_size > 2 || s->ps.sps->chroma_format_idc == 3)) {
1179  int trafo_size_h = 1 << (log2_trafo_size_c + s->ps.sps->hshift[1]);
1180  int trafo_size_v = 1 << (log2_trafo_size_c + s->ps.sps->vshift[1]);
1181  lc->tu.cross_pf = (s->ps.pps->cross_component_prediction_enabled_flag && cbf_luma &&
1182  (lc->cu.pred_mode == MODE_INTER ||
1183  (lc->tu.chroma_mode_c == 4)));
1184 
1185  if (lc->tu.cross_pf) {
1187  }
1188  for (i = 0; i < (s->ps.sps->chroma_format_idc == 2 ? 2 : 1); i++) {
1189  if (lc->cu.pred_mode == MODE_INTRA) {
1190  ff_hevc_set_neighbour_available(s, x0, y0 + (i << log2_trafo_size_c), trafo_size_h, trafo_size_v);
1191  s->hpc.intra_pred[log2_trafo_size_c - 2](s, x0, y0 + (i << log2_trafo_size_c), 1);
1192  }
1193  if (cbf_cb[i])
1194  ff_hevc_hls_residual_coding(s, x0, y0 + (i << log2_trafo_size_c),
1195  log2_trafo_size_c, scan_idx_c, 1);
1196  else
1197  if (lc->tu.cross_pf) {
1198  ptrdiff_t stride = s->frame->linesize[1];
1199  int hshift = s->ps.sps->hshift[1];
1200  int vshift = s->ps.sps->vshift[1];
1201  int16_t *coeffs_y = (int16_t*)lc->edge_emu_buffer;
1202  int16_t *coeffs = (int16_t*)lc->edge_emu_buffer2;
1203  int size = 1 << log2_trafo_size_c;
1204 
1205  uint8_t *dst = &s->frame->data[1][(y0 >> vshift) * stride +
1206  ((x0 >> hshift) << s->ps.sps->pixel_shift)];
1207  for (i = 0; i < (size * size); i++) {
1208  coeffs[i] = ((lc->tu.res_scale_val * coeffs_y[i]) >> 3);
1209  }
1210  s->hevcdsp.add_residual[log2_trafo_size_c-2](dst, coeffs, stride);
1211  }
1212  }
1213 
1214  if (lc->tu.cross_pf) {
1216  }
1217  for (i = 0; i < (s->ps.sps->chroma_format_idc == 2 ? 2 : 1); i++) {
1218  if (lc->cu.pred_mode == MODE_INTRA) {
1219  ff_hevc_set_neighbour_available(s, x0, y0 + (i << log2_trafo_size_c), trafo_size_h, trafo_size_v);
1220  s->hpc.intra_pred[log2_trafo_size_c - 2](s, x0, y0 + (i << log2_trafo_size_c), 2);
1221  }
1222  if (cbf_cr[i])
1223  ff_hevc_hls_residual_coding(s, x0, y0 + (i << log2_trafo_size_c),
1224  log2_trafo_size_c, scan_idx_c, 2);
1225  else
1226  if (lc->tu.cross_pf) {
1227  ptrdiff_t stride = s->frame->linesize[2];
1228  int hshift = s->ps.sps->hshift[2];
1229  int vshift = s->ps.sps->vshift[2];
1230  int16_t *coeffs_y = (int16_t*)lc->edge_emu_buffer;
1231  int16_t *coeffs = (int16_t*)lc->edge_emu_buffer2;
1232  int size = 1 << log2_trafo_size_c;
1233 
1234  uint8_t *dst = &s->frame->data[2][(y0 >> vshift) * stride +
1235  ((x0 >> hshift) << s->ps.sps->pixel_shift)];
1236  for (i = 0; i < (size * size); i++) {
1237  coeffs[i] = ((lc->tu.res_scale_val * coeffs_y[i]) >> 3);
1238  }
1239  s->hevcdsp.add_residual[log2_trafo_size_c-2](dst, coeffs, stride);
1240  }
1241  }
1242  } else if (s->ps.sps->chroma_format_idc && blk_idx == 3) {
1243  int trafo_size_h = 1 << (log2_trafo_size + 1);
1244  int trafo_size_v = 1 << (log2_trafo_size + s->ps.sps->vshift[1]);
1245  for (i = 0; i < (s->ps.sps->chroma_format_idc == 2 ? 2 : 1); i++) {
1246  if (lc->cu.pred_mode == MODE_INTRA) {
1247  ff_hevc_set_neighbour_available(s, xBase, yBase + (i << log2_trafo_size),
1248  trafo_size_h, trafo_size_v);
1249  s->hpc.intra_pred[log2_trafo_size - 2](s, xBase, yBase + (i << log2_trafo_size), 1);
1250  }
1251  if (cbf_cb[i])
1252  ff_hevc_hls_residual_coding(s, xBase, yBase + (i << log2_trafo_size),
1253  log2_trafo_size, scan_idx_c, 1);
1254  }
1255  for (i = 0; i < (s->ps.sps->chroma_format_idc == 2 ? 2 : 1); i++) {
1256  if (lc->cu.pred_mode == MODE_INTRA) {
1257  ff_hevc_set_neighbour_available(s, xBase, yBase + (i << log2_trafo_size),
1258  trafo_size_h, trafo_size_v);
1259  s->hpc.intra_pred[log2_trafo_size - 2](s, xBase, yBase + (i << log2_trafo_size), 2);
1260  }
1261  if (cbf_cr[i])
1262  ff_hevc_hls_residual_coding(s, xBase, yBase + (i << log2_trafo_size),
1263  log2_trafo_size, scan_idx_c, 2);
1264  }
1265  }
1266  } else if (s->ps.sps->chroma_format_idc && lc->cu.pred_mode == MODE_INTRA) {
1267  if (log2_trafo_size > 2 || s->ps.sps->chroma_format_idc == 3) {
1268  int trafo_size_h = 1 << (log2_trafo_size_c + s->ps.sps->hshift[1]);
1269  int trafo_size_v = 1 << (log2_trafo_size_c + s->ps.sps->vshift[1]);
1270  ff_hevc_set_neighbour_available(s, x0, y0, trafo_size_h, trafo_size_v);
1271  s->hpc.intra_pred[log2_trafo_size_c - 2](s, x0, y0, 1);
1272  s->hpc.intra_pred[log2_trafo_size_c - 2](s, x0, y0, 2);
1273  if (s->ps.sps->chroma_format_idc == 2) {
1274  ff_hevc_set_neighbour_available(s, x0, y0 + (1 << log2_trafo_size_c),
1275  trafo_size_h, trafo_size_v);
1276  s->hpc.intra_pred[log2_trafo_size_c - 2](s, x0, y0 + (1 << log2_trafo_size_c), 1);
1277  s->hpc.intra_pred[log2_trafo_size_c - 2](s, x0, y0 + (1 << log2_trafo_size_c), 2);
1278  }
1279  } else if (blk_idx == 3) {
1280  int trafo_size_h = 1 << (log2_trafo_size + 1);
1281  int trafo_size_v = 1 << (log2_trafo_size + s->ps.sps->vshift[1]);
1282  ff_hevc_set_neighbour_available(s, xBase, yBase,
1283  trafo_size_h, trafo_size_v);
1284  s->hpc.intra_pred[log2_trafo_size - 2](s, xBase, yBase, 1);
1285  s->hpc.intra_pred[log2_trafo_size - 2](s, xBase, yBase, 2);
1286  if (s->ps.sps->chroma_format_idc == 2) {
1287  ff_hevc_set_neighbour_available(s, xBase, yBase + (1 << (log2_trafo_size)),
1288  trafo_size_h, trafo_size_v);
1289  s->hpc.intra_pred[log2_trafo_size - 2](s, xBase, yBase + (1 << (log2_trafo_size)), 1);
1290  s->hpc.intra_pred[log2_trafo_size - 2](s, xBase, yBase + (1 << (log2_trafo_size)), 2);
1291  }
1292  }
1293  }
1294 
1295  return 0;
1296 }
1297 
1298 static void set_deblocking_bypass(HEVCContext *s, int x0, int y0, int log2_cb_size)
1299 {
1300  int cb_size = 1 << log2_cb_size;
1301  int log2_min_pu_size = s->ps.sps->log2_min_pu_size;
1302 
1303  int min_pu_width = s->ps.sps->min_pu_width;
1304  int x_end = FFMIN(x0 + cb_size, s->ps.sps->width);
1305  int y_end = FFMIN(y0 + cb_size, s->ps.sps->height);
1306  int i, j;
1307 
1308  for (j = (y0 >> log2_min_pu_size); j < (y_end >> log2_min_pu_size); j++)
1309  for (i = (x0 >> log2_min_pu_size); i < (x_end >> log2_min_pu_size); i++)
1310  s->is_pcm[i + j * min_pu_width] = 2;
1311 }
1312 
1313 static int hls_transform_tree(HEVCContext *s, int x0, int y0,
1314  int xBase, int yBase, int cb_xBase, int cb_yBase,
1315  int log2_cb_size, int log2_trafo_size,
1316  int trafo_depth, int blk_idx,
1317  const int *base_cbf_cb, const int *base_cbf_cr)
1318 {
1319  HEVCLocalContext *lc = s->HEVClc;
1320  uint8_t split_transform_flag;
1321  int cbf_cb[2];
1322  int cbf_cr[2];
1323  int ret;
1324 
1325  cbf_cb[0] = base_cbf_cb[0];
1326  cbf_cb[1] = base_cbf_cb[1];
1327  cbf_cr[0] = base_cbf_cr[0];
1328  cbf_cr[1] = base_cbf_cr[1];
1329 
1330  if (lc->cu.intra_split_flag) {
1331  if (trafo_depth == 1) {
1332  lc->tu.intra_pred_mode = lc->pu.intra_pred_mode[blk_idx];
1333  if (s->ps.sps->chroma_format_idc == 3) {
1334  lc->tu.intra_pred_mode_c = lc->pu.intra_pred_mode_c[blk_idx];
1335  lc->tu.chroma_mode_c = lc->pu.chroma_mode_c[blk_idx];
1336  } else {
1338  lc->tu.chroma_mode_c = lc->pu.chroma_mode_c[0];
1339  }
1340  }
1341  } else {
1342  lc->tu.intra_pred_mode = lc->pu.intra_pred_mode[0];
1344  lc->tu.chroma_mode_c = lc->pu.chroma_mode_c[0];
1345  }
1346 
1347  if (log2_trafo_size <= s->ps.sps->log2_max_trafo_size &&
1348  log2_trafo_size > s->ps.sps->log2_min_tb_size &&
1349  trafo_depth < lc->cu.max_trafo_depth &&
1350  !(lc->cu.intra_split_flag && trafo_depth == 0)) {
1351  split_transform_flag = ff_hevc_split_transform_flag_decode(s, log2_trafo_size);
1352  } else {
1353  int inter_split = s->ps.sps->max_transform_hierarchy_depth_inter == 0 &&
1354  lc->cu.pred_mode == MODE_INTER &&
1355  lc->cu.part_mode != PART_2Nx2N &&
1356  trafo_depth == 0;
1357 
1358  split_transform_flag = log2_trafo_size > s->ps.sps->log2_max_trafo_size ||
1359  (lc->cu.intra_split_flag && trafo_depth == 0) ||
1360  inter_split;
1361  }
1362 
1363  if (s->ps.sps->chroma_format_idc && (log2_trafo_size > 2 || s->ps.sps->chroma_format_idc == 3)) {
1364  if (trafo_depth == 0 || cbf_cb[0]) {
1365  cbf_cb[0] = ff_hevc_cbf_cb_cr_decode(s, trafo_depth);
1366  if (s->ps.sps->chroma_format_idc == 2 && (!split_transform_flag || log2_trafo_size == 3)) {
1367  cbf_cb[1] = ff_hevc_cbf_cb_cr_decode(s, trafo_depth);
1368  }
1369  }
1370 
1371  if (trafo_depth == 0 || cbf_cr[0]) {
1372  cbf_cr[0] = ff_hevc_cbf_cb_cr_decode(s, trafo_depth);
1373  if (s->ps.sps->chroma_format_idc == 2 && (!split_transform_flag || log2_trafo_size == 3)) {
1374  cbf_cr[1] = ff_hevc_cbf_cb_cr_decode(s, trafo_depth);
1375  }
1376  }
1377  }
1378 
1379  if (split_transform_flag) {
1380  const int trafo_size_split = 1 << (log2_trafo_size - 1);
1381  const int x1 = x0 + trafo_size_split;
1382  const int y1 = y0 + trafo_size_split;
1383 
1384 #define SUBDIVIDE(x, y, idx) \
1385 do { \
1386  ret = hls_transform_tree(s, x, y, x0, y0, cb_xBase, cb_yBase, log2_cb_size, \
1387  log2_trafo_size - 1, trafo_depth + 1, idx, \
1388  cbf_cb, cbf_cr); \
1389  if (ret < 0) \
1390  return ret; \
1391 } while (0)
1392 
1393  SUBDIVIDE(x0, y0, 0);
1394  SUBDIVIDE(x1, y0, 1);
1395  SUBDIVIDE(x0, y1, 2);
1396  SUBDIVIDE(x1, y1, 3);
1397 
1398 #undef SUBDIVIDE
1399  } else {
1400  int min_tu_size = 1 << s->ps.sps->log2_min_tb_size;
1401  int log2_min_tu_size = s->ps.sps->log2_min_tb_size;
1402  int min_tu_width = s->ps.sps->min_tb_width;
1403  int cbf_luma = 1;
1404 
1405  if (lc->cu.pred_mode == MODE_INTRA || trafo_depth != 0 ||
1406  cbf_cb[0] || cbf_cr[0] ||
1407  (s->ps.sps->chroma_format_idc == 2 && (cbf_cb[1] || cbf_cr[1]))) {
1408  cbf_luma = ff_hevc_cbf_luma_decode(s, trafo_depth);
1409  }
1410 
1411  ret = hls_transform_unit(s, x0, y0, xBase, yBase, cb_xBase, cb_yBase,
1412  log2_cb_size, log2_trafo_size,
1413  blk_idx, cbf_luma, cbf_cb, cbf_cr);
1414  if (ret < 0)
1415  return ret;
1416  // TODO: store cbf_luma somewhere else
1417  if (cbf_luma) {
1418  int i, j;
1419  for (i = 0; i < (1 << log2_trafo_size); i += min_tu_size)
1420  for (j = 0; j < (1 << log2_trafo_size); j += min_tu_size) {
1421  int x_tu = (x0 + j) >> log2_min_tu_size;
1422  int y_tu = (y0 + i) >> log2_min_tu_size;
1423  s->cbf_luma[y_tu * min_tu_width + x_tu] = 1;
1424  }
1425  }
1426  if (!s->sh.disable_deblocking_filter_flag) {
1427  ff_hevc_deblocking_boundary_strengths(s, x0, y0, log2_trafo_size);
1428  if (s->ps.pps->transquant_bypass_enable_flag &&
1430  set_deblocking_bypass(s, x0, y0, log2_trafo_size);
1431  }
1432  }
1433  return 0;
1434 }
1435 
1436 static int hls_pcm_sample(HEVCContext *s, int x0, int y0, int log2_cb_size)
1437 {
1438  HEVCLocalContext *lc = s->HEVClc;
1439  GetBitContext gb;
1440  int cb_size = 1 << log2_cb_size;
1441  ptrdiff_t stride0 = s->frame->linesize[0];
1442  ptrdiff_t stride1 = s->frame->linesize[1];
1443  ptrdiff_t stride2 = s->frame->linesize[2];
1444  uint8_t *dst0 = &s->frame->data[0][y0 * stride0 + (x0 << s->ps.sps->pixel_shift)];
1445  uint8_t *dst1 = &s->frame->data[1][(y0 >> s->ps.sps->vshift[1]) * stride1 + ((x0 >> s->ps.sps->hshift[1]) << s->ps.sps->pixel_shift)];
1446  uint8_t *dst2 = &s->frame->data[2][(y0 >> s->ps.sps->vshift[2]) * stride2 + ((x0 >> s->ps.sps->hshift[2]) << s->ps.sps->pixel_shift)];
1447 
1448  int length = cb_size * cb_size * s->ps.sps->pcm.bit_depth +
1449  (((cb_size >> s->ps.sps->hshift[1]) * (cb_size >> s->ps.sps->vshift[1])) +
1450  ((cb_size >> s->ps.sps->hshift[2]) * (cb_size >> s->ps.sps->vshift[2]))) *
1451  s->ps.sps->pcm.bit_depth_chroma;
1452  const uint8_t *pcm = skip_bytes(&lc->cc, (length + 7) >> 3);
1453  int ret;
1454 
1455  if (!s->sh.disable_deblocking_filter_flag)
1456  ff_hevc_deblocking_boundary_strengths(s, x0, y0, log2_cb_size);
1457 
1458  ret = init_get_bits(&gb, pcm, length);
1459  if (ret < 0)
1460  return ret;
1461 
1462  s->hevcdsp.put_pcm(dst0, stride0, cb_size, cb_size, &gb, s->ps.sps->pcm.bit_depth);
1463  if (s->ps.sps->chroma_format_idc) {
1464  s->hevcdsp.put_pcm(dst1, stride1,
1465  cb_size >> s->ps.sps->hshift[1],
1466  cb_size >> s->ps.sps->vshift[1],
1467  &gb, s->ps.sps->pcm.bit_depth_chroma);
1468  s->hevcdsp.put_pcm(dst2, stride2,
1469  cb_size >> s->ps.sps->hshift[2],
1470  cb_size >> s->ps.sps->vshift[2],
1471  &gb, s->ps.sps->pcm.bit_depth_chroma);
1472  }
1473 
1474  return 0;
1475 }
1476 
1477 /**
1478  * 8.5.3.2.2.1 Luma sample unidirectional interpolation process
1479  *
1480  * @param s HEVC decoding context
1481  * @param dst target buffer for block data at block position
1482  * @param dststride stride of the dst buffer
1483  * @param ref reference picture buffer at origin (0, 0)
1484  * @param mv motion vector (relative to block position) to get pixel data from
1485  * @param x_off horizontal position of block from origin (0, 0)
1486  * @param y_off vertical position of block from origin (0, 0)
1487  * @param block_w width of block
1488  * @param block_h height of block
1489  * @param luma_weight weighting factor applied to the luma prediction
1490  * @param luma_offset additive offset applied to the luma prediction value
1491  */
1492 
1493 static void luma_mc_uni(HEVCContext *s, uint8_t *dst, ptrdiff_t dststride,
1494  AVFrame *ref, const Mv *mv, int x_off, int y_off,
1495  int block_w, int block_h, int luma_weight, int luma_offset)
1496 {
1497  HEVCLocalContext *lc = s->HEVClc;
1498  uint8_t *src = ref->data[0];
1499  ptrdiff_t srcstride = ref->linesize[0];
1500  int pic_width = s->ps.sps->width;
1501  int pic_height = s->ps.sps->height;
1502  int mx = mv->x & 3;
1503  int my = mv->y & 3;
1504  int weight_flag = (s->sh.slice_type == HEVC_SLICE_P && s->ps.pps->weighted_pred_flag) ||
1505  (s->sh.slice_type == HEVC_SLICE_B && s->ps.pps->weighted_bipred_flag);
1506  int idx = ff_hevc_pel_weight[block_w];
1507 
1508  x_off += mv->x >> 2;
1509  y_off += mv->y >> 2;
1510  src += y_off * srcstride + (x_off * (1 << s->ps.sps->pixel_shift));
1511 
1512  if (x_off < QPEL_EXTRA_BEFORE || y_off < QPEL_EXTRA_AFTER ||
1513  x_off >= pic_width - block_w - QPEL_EXTRA_AFTER ||
1514  y_off >= pic_height - block_h - QPEL_EXTRA_AFTER) {
1515  const ptrdiff_t edge_emu_stride = EDGE_EMU_BUFFER_STRIDE << s->ps.sps->pixel_shift;
1516  int offset = QPEL_EXTRA_BEFORE * srcstride + (QPEL_EXTRA_BEFORE << s->ps.sps->pixel_shift);
1517  int buf_offset = QPEL_EXTRA_BEFORE * edge_emu_stride + (QPEL_EXTRA_BEFORE << s->ps.sps->pixel_shift);
1518 
1519  s->vdsp.emulated_edge_mc(lc->edge_emu_buffer, src - offset,
1520  edge_emu_stride, srcstride,
1521  block_w + QPEL_EXTRA,
1522  block_h + QPEL_EXTRA,
1523  x_off - QPEL_EXTRA_BEFORE, y_off - QPEL_EXTRA_BEFORE,
1524  pic_width, pic_height);
1525  src = lc->edge_emu_buffer + buf_offset;
1526  srcstride = edge_emu_stride;
1527  }
1528 
1529  if (!weight_flag)
1530  s->hevcdsp.put_hevc_qpel_uni[idx][!!my][!!mx](dst, dststride, src, srcstride,
1531  block_h, mx, my, block_w);
1532  else
1533  s->hevcdsp.put_hevc_qpel_uni_w[idx][!!my][!!mx](dst, dststride, src, srcstride,
1534  block_h, s->sh.luma_log2_weight_denom,
1535  luma_weight, luma_offset, mx, my, block_w);
1536 }
1537 
1538 /**
1539  * 8.5.3.2.2.1 Luma sample bidirectional interpolation process
1540  *
1541  * @param s HEVC decoding context
1542  * @param dst target buffer for block data at block position
1543  * @param dststride stride of the dst buffer
1544  * @param ref0 reference picture0 buffer at origin (0, 0)
1545  * @param mv0 motion vector0 (relative to block position) to get pixel data from
1546  * @param x_off horizontal position of block from origin (0, 0)
1547  * @param y_off vertical position of block from origin (0, 0)
1548  * @param block_w width of block
1549  * @param block_h height of block
1550  * @param ref1 reference picture1 buffer at origin (0, 0)
1551  * @param mv1 motion vector1 (relative to block position) to get pixel data from
1552  * @param current_mv current motion vector structure
1553  */
1554  static void luma_mc_bi(HEVCContext *s, uint8_t *dst, ptrdiff_t dststride,
1555  AVFrame *ref0, const Mv *mv0, int x_off, int y_off,
1556  int block_w, int block_h, AVFrame *ref1, const Mv *mv1, struct MvField *current_mv)
1557 {
1558  HEVCLocalContext *lc = s->HEVClc;
1559  ptrdiff_t src0stride = ref0->linesize[0];
1560  ptrdiff_t src1stride = ref1->linesize[0];
1561  int pic_width = s->ps.sps->width;
1562  int pic_height = s->ps.sps->height;
1563  int mx0 = mv0->x & 3;
1564  int my0 = mv0->y & 3;
1565  int mx1 = mv1->x & 3;
1566  int my1 = mv1->y & 3;
1567  int weight_flag = (s->sh.slice_type == HEVC_SLICE_P && s->ps.pps->weighted_pred_flag) ||
1568  (s->sh.slice_type == HEVC_SLICE_B && s->ps.pps->weighted_bipred_flag);
1569  int x_off0 = x_off + (mv0->x >> 2);
1570  int y_off0 = y_off + (mv0->y >> 2);
1571  int x_off1 = x_off + (mv1->x >> 2);
1572  int y_off1 = y_off + (mv1->y >> 2);
1573  int idx = ff_hevc_pel_weight[block_w];
1574 
1575  uint8_t *src0 = ref0->data[0] + y_off0 * src0stride + (int)((unsigned)x_off0 << s->ps.sps->pixel_shift);
1576  uint8_t *src1 = ref1->data[0] + y_off1 * src1stride + (int)((unsigned)x_off1 << s->ps.sps->pixel_shift);
1577 
1578  if (x_off0 < QPEL_EXTRA_BEFORE || y_off0 < QPEL_EXTRA_AFTER ||
1579  x_off0 >= pic_width - block_w - QPEL_EXTRA_AFTER ||
1580  y_off0 >= pic_height - block_h - QPEL_EXTRA_AFTER) {
1581  const ptrdiff_t edge_emu_stride = EDGE_EMU_BUFFER_STRIDE << s->ps.sps->pixel_shift;
1582  int offset = QPEL_EXTRA_BEFORE * src0stride + (QPEL_EXTRA_BEFORE << s->ps.sps->pixel_shift);
1583  int buf_offset = QPEL_EXTRA_BEFORE * edge_emu_stride + (QPEL_EXTRA_BEFORE << s->ps.sps->pixel_shift);
1584 
1585  s->vdsp.emulated_edge_mc(lc->edge_emu_buffer, src0 - offset,
1586  edge_emu_stride, src0stride,
1587  block_w + QPEL_EXTRA,
1588  block_h + QPEL_EXTRA,
1589  x_off0 - QPEL_EXTRA_BEFORE, y_off0 - QPEL_EXTRA_BEFORE,
1590  pic_width, pic_height);
1591  src0 = lc->edge_emu_buffer + buf_offset;
1592  src0stride = edge_emu_stride;
1593  }
1594 
1595  if (x_off1 < QPEL_EXTRA_BEFORE || y_off1 < QPEL_EXTRA_AFTER ||
1596  x_off1 >= pic_width - block_w - QPEL_EXTRA_AFTER ||
1597  y_off1 >= pic_height - block_h - QPEL_EXTRA_AFTER) {
1598  const ptrdiff_t edge_emu_stride = EDGE_EMU_BUFFER_STRIDE << s->ps.sps->pixel_shift;
1599  int offset = QPEL_EXTRA_BEFORE * src1stride + (QPEL_EXTRA_BEFORE << s->ps.sps->pixel_shift);
1600  int buf_offset = QPEL_EXTRA_BEFORE * edge_emu_stride + (QPEL_EXTRA_BEFORE << s->ps.sps->pixel_shift);
1601 
1602  s->vdsp.emulated_edge_mc(lc->edge_emu_buffer2, src1 - offset,
1603  edge_emu_stride, src1stride,
1604  block_w + QPEL_EXTRA,
1605  block_h + QPEL_EXTRA,
1606  x_off1 - QPEL_EXTRA_BEFORE, y_off1 - QPEL_EXTRA_BEFORE,
1607  pic_width, pic_height);
1608  src1 = lc->edge_emu_buffer2 + buf_offset;
1609  src1stride = edge_emu_stride;
1610  }
1611 
1612  s->hevcdsp.put_hevc_qpel[idx][!!my0][!!mx0](lc->tmp, src0, src0stride,
1613  block_h, mx0, my0, block_w);
1614  if (!weight_flag)
1615  s->hevcdsp.put_hevc_qpel_bi[idx][!!my1][!!mx1](dst, dststride, src1, src1stride, lc->tmp,
1616  block_h, mx1, my1, block_w);
1617  else
1618  s->hevcdsp.put_hevc_qpel_bi_w[idx][!!my1][!!mx1](dst, dststride, src1, src1stride, lc->tmp,
1619  block_h, s->sh.luma_log2_weight_denom,
1620  s->sh.luma_weight_l0[current_mv->ref_idx[0]],
1621  s->sh.luma_weight_l1[current_mv->ref_idx[1]],
1622  s->sh.luma_offset_l0[current_mv->ref_idx[0]],
1623  s->sh.luma_offset_l1[current_mv->ref_idx[1]],
1624  mx1, my1, block_w);
1625 
1626 }
1627 
1628 /**
1629  * 8.5.3.2.2.2 Chroma sample uniprediction interpolation process
1630  *
1631  * @param s HEVC decoding context
1632  * @param dst1 target buffer for block data at block position (U plane)
1633  * @param dst2 target buffer for block data at block position (V plane)
1634  * @param dststride stride of the dst1 and dst2 buffers
1635  * @param ref reference picture buffer at origin (0, 0)
1636  * @param mv motion vector (relative to block position) to get pixel data from
1637  * @param x_off horizontal position of block from origin (0, 0)
1638  * @param y_off vertical position of block from origin (0, 0)
1639  * @param block_w width of block
1640  * @param block_h height of block
1641  * @param chroma_weight weighting factor applied to the chroma prediction
1642  * @param chroma_offset additive offset applied to the chroma prediction value
1643  */
1644 
1645 static void chroma_mc_uni(HEVCContext *s, uint8_t *dst0,
1646  ptrdiff_t dststride, uint8_t *src0, ptrdiff_t srcstride, int reflist,
1647  int x_off, int y_off, int block_w, int block_h, struct MvField *current_mv, int chroma_weight, int chroma_offset)
1648 {
1649  HEVCLocalContext *lc = s->HEVClc;
1650  int pic_width = s->ps.sps->width >> s->ps.sps->hshift[1];
1651  int pic_height = s->ps.sps->height >> s->ps.sps->vshift[1];
1652  const Mv *mv = &current_mv->mv[reflist];
1653  int weight_flag = (s->sh.slice_type == HEVC_SLICE_P && s->ps.pps->weighted_pred_flag) ||
1654  (s->sh.slice_type == HEVC_SLICE_B && s->ps.pps->weighted_bipred_flag);
1655  int idx = ff_hevc_pel_weight[block_w];
1656  int hshift = s->ps.sps->hshift[1];
1657  int vshift = s->ps.sps->vshift[1];
1658  intptr_t mx = av_mod_uintp2(mv->x, 2 + hshift);
1659  intptr_t my = av_mod_uintp2(mv->y, 2 + vshift);
1660  intptr_t _mx = mx << (1 - hshift);
1661  intptr_t _my = my << (1 - vshift);
1662 
1663  x_off += mv->x >> (2 + hshift);
1664  y_off += mv->y >> (2 + vshift);
1665  src0 += y_off * srcstride + (x_off * (1 << s->ps.sps->pixel_shift));
1666 
1667  if (x_off < EPEL_EXTRA_BEFORE || y_off < EPEL_EXTRA_AFTER ||
1668  x_off >= pic_width - block_w - EPEL_EXTRA_AFTER ||
1669  y_off >= pic_height - block_h - EPEL_EXTRA_AFTER) {
1670  const int edge_emu_stride = EDGE_EMU_BUFFER_STRIDE << s->ps.sps->pixel_shift;
1671  int offset0 = EPEL_EXTRA_BEFORE * (srcstride + (1 << s->ps.sps->pixel_shift));
1672  int buf_offset0 = EPEL_EXTRA_BEFORE *
1673  (edge_emu_stride + (1 << s->ps.sps->pixel_shift));
1674  s->vdsp.emulated_edge_mc(lc->edge_emu_buffer, src0 - offset0,
1675  edge_emu_stride, srcstride,
1676  block_w + EPEL_EXTRA, block_h + EPEL_EXTRA,
1677  x_off - EPEL_EXTRA_BEFORE,
1678  y_off - EPEL_EXTRA_BEFORE,
1679  pic_width, pic_height);
1680 
1681  src0 = lc->edge_emu_buffer + buf_offset0;
1682  srcstride = edge_emu_stride;
1683  }
1684  if (!weight_flag)
1685  s->hevcdsp.put_hevc_epel_uni[idx][!!my][!!mx](dst0, dststride, src0, srcstride,
1686  block_h, _mx, _my, block_w);
1687  else
1688  s->hevcdsp.put_hevc_epel_uni_w[idx][!!my][!!mx](dst0, dststride, src0, srcstride,
1689  block_h, s->sh.chroma_log2_weight_denom,
1690  chroma_weight, chroma_offset, _mx, _my, block_w);
1691 }
1692 
1693 /**
1694  * 8.5.3.2.2.2 Chroma sample bidirectional interpolation process
1695  *
1696  * @param s HEVC decoding context
1697  * @param dst target buffer for block data at block position
1698  * @param dststride stride of the dst buffer
1699  * @param ref0 reference picture0 buffer at origin (0, 0)
1700  * @param mv0 motion vector0 (relative to block position) to get pixel data from
1701  * @param x_off horizontal position of block from origin (0, 0)
1702  * @param y_off vertical position of block from origin (0, 0)
1703  * @param block_w width of block
1704  * @param block_h height of block
1705  * @param ref1 reference picture1 buffer at origin (0, 0)
1706  * @param mv1 motion vector1 (relative to block position) to get pixel data from
1707  * @param current_mv current motion vector structure
1708  * @param cidx chroma component(cb, cr)
1709  */
1710 static void chroma_mc_bi(HEVCContext *s, uint8_t *dst0, ptrdiff_t dststride, AVFrame *ref0, AVFrame *ref1,
1711  int x_off, int y_off, int block_w, int block_h, struct MvField *current_mv, int cidx)
1712 {
1713  HEVCLocalContext *lc = s->HEVClc;
1714  uint8_t *src1 = ref0->data[cidx+1];
1715  uint8_t *src2 = ref1->data[cidx+1];
1716  ptrdiff_t src1stride = ref0->linesize[cidx+1];
1717  ptrdiff_t src2stride = ref1->linesize[cidx+1];
1718  int weight_flag = (s->sh.slice_type == HEVC_SLICE_P && s->ps.pps->weighted_pred_flag) ||
1719  (s->sh.slice_type == HEVC_SLICE_B && s->ps.pps->weighted_bipred_flag);
1720  int pic_width = s->ps.sps->width >> s->ps.sps->hshift[1];
1721  int pic_height = s->ps.sps->height >> s->ps.sps->vshift[1];
1722  Mv *mv0 = &current_mv->mv[0];
1723  Mv *mv1 = &current_mv->mv[1];
1724  int hshift = s->ps.sps->hshift[1];
1725  int vshift = s->ps.sps->vshift[1];
1726 
1727  intptr_t mx0 = av_mod_uintp2(mv0->x, 2 + hshift);
1728  intptr_t my0 = av_mod_uintp2(mv0->y, 2 + vshift);
1729  intptr_t mx1 = av_mod_uintp2(mv1->x, 2 + hshift);
1730  intptr_t my1 = av_mod_uintp2(mv1->y, 2 + vshift);
1731  intptr_t _mx0 = mx0 << (1 - hshift);
1732  intptr_t _my0 = my0 << (1 - vshift);
1733  intptr_t _mx1 = mx1 << (1 - hshift);
1734  intptr_t _my1 = my1 << (1 - vshift);
1735 
1736  int x_off0 = x_off + (mv0->x >> (2 + hshift));
1737  int y_off0 = y_off + (mv0->y >> (2 + vshift));
1738  int x_off1 = x_off + (mv1->x >> (2 + hshift));
1739  int y_off1 = y_off + (mv1->y >> (2 + vshift));
1740  int idx = ff_hevc_pel_weight[block_w];
1741  src1 += y_off0 * src1stride + (int)((unsigned)x_off0 << s->ps.sps->pixel_shift);
1742  src2 += y_off1 * src2stride + (int)((unsigned)x_off1 << s->ps.sps->pixel_shift);
1743 
1744  if (x_off0 < EPEL_EXTRA_BEFORE || y_off0 < EPEL_EXTRA_AFTER ||
1745  x_off0 >= pic_width - block_w - EPEL_EXTRA_AFTER ||
1746  y_off0 >= pic_height - block_h - EPEL_EXTRA_AFTER) {
1747  const int edge_emu_stride = EDGE_EMU_BUFFER_STRIDE << s->ps.sps->pixel_shift;
1748  int offset1 = EPEL_EXTRA_BEFORE * (src1stride + (1 << s->ps.sps->pixel_shift));
1749  int buf_offset1 = EPEL_EXTRA_BEFORE *
1750  (edge_emu_stride + (1 << s->ps.sps->pixel_shift));
1751 
1752  s->vdsp.emulated_edge_mc(lc->edge_emu_buffer, src1 - offset1,
1753  edge_emu_stride, src1stride,
1754  block_w + EPEL_EXTRA, block_h + EPEL_EXTRA,
1755  x_off0 - EPEL_EXTRA_BEFORE,
1756  y_off0 - EPEL_EXTRA_BEFORE,
1757  pic_width, pic_height);
1758 
1759  src1 = lc->edge_emu_buffer + buf_offset1;
1760  src1stride = edge_emu_stride;
1761  }
1762 
1763  if (x_off1 < EPEL_EXTRA_BEFORE || y_off1 < EPEL_EXTRA_AFTER ||
1764  x_off1 >= pic_width - block_w - EPEL_EXTRA_AFTER ||
1765  y_off1 >= pic_height - block_h - EPEL_EXTRA_AFTER) {
1766  const int edge_emu_stride = EDGE_EMU_BUFFER_STRIDE << s->ps.sps->pixel_shift;
1767  int offset1 = EPEL_EXTRA_BEFORE * (src2stride + (1 << s->ps.sps->pixel_shift));
1768  int buf_offset1 = EPEL_EXTRA_BEFORE *
1769  (edge_emu_stride + (1 << s->ps.sps->pixel_shift));
1770 
1771  s->vdsp.emulated_edge_mc(lc->edge_emu_buffer2, src2 - offset1,
1772  edge_emu_stride, src2stride,
1773  block_w + EPEL_EXTRA, block_h + EPEL_EXTRA,
1774  x_off1 - EPEL_EXTRA_BEFORE,
1775  y_off1 - EPEL_EXTRA_BEFORE,
1776  pic_width, pic_height);
1777 
1778  src2 = lc->edge_emu_buffer2 + buf_offset1;
1779  src2stride = edge_emu_stride;
1780  }
1781 
1782  s->hevcdsp.put_hevc_epel[idx][!!my0][!!mx0](lc->tmp, src1, src1stride,
1783  block_h, _mx0, _my0, block_w);
1784  if (!weight_flag)
1785  s->hevcdsp.put_hevc_epel_bi[idx][!!my1][!!mx1](dst0, s->frame->linesize[cidx+1],
1786  src2, src2stride, lc->tmp,
1787  block_h, _mx1, _my1, block_w);
1788  else
1789  s->hevcdsp.put_hevc_epel_bi_w[idx][!!my1][!!mx1](dst0, s->frame->linesize[cidx+1],
1790  src2, src2stride, lc->tmp,
1791  block_h,
1792  s->sh.chroma_log2_weight_denom,
1793  s->sh.chroma_weight_l0[current_mv->ref_idx[0]][cidx],
1794  s->sh.chroma_weight_l1[current_mv->ref_idx[1]][cidx],
1795  s->sh.chroma_offset_l0[current_mv->ref_idx[0]][cidx],
1796  s->sh.chroma_offset_l1[current_mv->ref_idx[1]][cidx],
1797  _mx1, _my1, block_w);
1798 }
1799 
1801  const Mv *mv, int y0, int height)
1802 {
1803  if (s->threads_type == FF_THREAD_FRAME ) {
1804  int y = FFMAX(0, (mv->y >> 2) + y0 + height + 9);
1805 
1806  ff_thread_await_progress(&ref->tf, y, 0);
1807  }
1808 }
1809 
1810 static void hevc_luma_mv_mvp_mode(HEVCContext *s, int x0, int y0, int nPbW,
1811  int nPbH, int log2_cb_size, int part_idx,
1812  int merge_idx, MvField *mv)
1813 {
1814  HEVCLocalContext *lc = s->HEVClc;
1815  enum InterPredIdc inter_pred_idc = PRED_L0;
1816  int mvp_flag;
1817 
1818  ff_hevc_set_neighbour_available(s, x0, y0, nPbW, nPbH);
1819  mv->pred_flag = 0;
1820  if (s->sh.slice_type == HEVC_SLICE_B)
1821  inter_pred_idc = ff_hevc_inter_pred_idc_decode(s, nPbW, nPbH);
1822 
1823  if (inter_pred_idc != PRED_L1) {
1824  if (s->sh.nb_refs[L0])
1825  mv->ref_idx[0]= ff_hevc_ref_idx_lx_decode(s, s->sh.nb_refs[L0]);
1826 
1827  mv->pred_flag = PF_L0;
1828  ff_hevc_hls_mvd_coding(s, x0, y0, 0);
1829  mvp_flag = ff_hevc_mvp_lx_flag_decode(s);
1830  ff_hevc_luma_mv_mvp_mode(s, x0, y0, nPbW, nPbH, log2_cb_size,
1831  part_idx, merge_idx, mv, mvp_flag, 0);
1832  mv->mv[0].x += lc->pu.mvd.x;
1833  mv->mv[0].y += lc->pu.mvd.y;
1834  }
1835 
1836  if (inter_pred_idc != PRED_L0) {
1837  if (s->sh.nb_refs[L1])
1838  mv->ref_idx[1]= ff_hevc_ref_idx_lx_decode(s, s->sh.nb_refs[L1]);
1839 
1840  if (s->sh.mvd_l1_zero_flag == 1 && inter_pred_idc == PRED_BI) {
1841  AV_ZERO32(&lc->pu.mvd);
1842  } else {
1843  ff_hevc_hls_mvd_coding(s, x0, y0, 1);
1844  }
1845 
1846  mv->pred_flag += PF_L1;
1847  mvp_flag = ff_hevc_mvp_lx_flag_decode(s);
1848  ff_hevc_luma_mv_mvp_mode(s, x0, y0, nPbW, nPbH, log2_cb_size,
1849  part_idx, merge_idx, mv, mvp_flag, 1);
1850  mv->mv[1].x += lc->pu.mvd.x;
1851  mv->mv[1].y += lc->pu.mvd.y;
1852  }
1853 }
1854 
1855 static void hls_prediction_unit(HEVCContext *s, int x0, int y0,
1856  int nPbW, int nPbH,
1857  int log2_cb_size, int partIdx, int idx)
1858 {
1859 #define POS(c_idx, x, y) \
1860  &s->frame->data[c_idx][((y) >> s->ps.sps->vshift[c_idx]) * s->frame->linesize[c_idx] + \
1861  (((x) >> s->ps.sps->hshift[c_idx]) << s->ps.sps->pixel_shift)]
1862  HEVCLocalContext *lc = s->HEVClc;
1863  int merge_idx = 0;
1864  struct MvField current_mv = {{{ 0 }}};
1865 
1866  int min_pu_width = s->ps.sps->min_pu_width;
1867 
1868  MvField *tab_mvf = s->ref->tab_mvf;
1869  RefPicList *refPicList = s->ref->refPicList;
1870  HEVCFrame *ref0 = NULL, *ref1 = NULL;
1871  uint8_t *dst0 = POS(0, x0, y0);
1872  uint8_t *dst1 = POS(1, x0, y0);
1873  uint8_t *dst2 = POS(2, x0, y0);
1874  int log2_min_cb_size = s->ps.sps->log2_min_cb_size;
1875  int min_cb_width = s->ps.sps->min_cb_width;
1876  int x_cb = x0 >> log2_min_cb_size;
1877  int y_cb = y0 >> log2_min_cb_size;
1878  int x_pu, y_pu;
1879  int i, j;
1880 
1881  int skip_flag = SAMPLE_CTB(s->skip_flag, x_cb, y_cb);
1882 
1883  if (!skip_flag)
1885 
1886  if (skip_flag || lc->pu.merge_flag) {
1887  if (s->sh.max_num_merge_cand > 1)
1888  merge_idx = ff_hevc_merge_idx_decode(s);
1889  else
1890  merge_idx = 0;
1891 
1892  ff_hevc_luma_mv_merge_mode(s, x0, y0, nPbW, nPbH, log2_cb_size,
1893  partIdx, merge_idx, &current_mv);
1894  } else {
1895  hevc_luma_mv_mvp_mode(s, x0, y0, nPbW, nPbH, log2_cb_size,
1896  partIdx, merge_idx, &current_mv);
1897  }
1898 
1899  x_pu = x0 >> s->ps.sps->log2_min_pu_size;
1900  y_pu = y0 >> s->ps.sps->log2_min_pu_size;
1901 
1902  for (j = 0; j < nPbH >> s->ps.sps->log2_min_pu_size; j++)
1903  for (i = 0; i < nPbW >> s->ps.sps->log2_min_pu_size; i++)
1904  tab_mvf[(y_pu + j) * min_pu_width + x_pu + i] = current_mv;
1905 
1906  if (current_mv.pred_flag & PF_L0) {
1907  ref0 = refPicList[0].ref[current_mv.ref_idx[0]];
1908  if (!ref0)
1909  return;
1910  hevc_await_progress(s, ref0, &current_mv.mv[0], y0, nPbH);
1911  }
1912  if (current_mv.pred_flag & PF_L1) {
1913  ref1 = refPicList[1].ref[current_mv.ref_idx[1]];
1914  if (!ref1)
1915  return;
1916  hevc_await_progress(s, ref1, &current_mv.mv[1], y0, nPbH);
1917  }
1918 
1919  if (current_mv.pred_flag == PF_L0) {
1920  int x0_c = x0 >> s->ps.sps->hshift[1];
1921  int y0_c = y0 >> s->ps.sps->vshift[1];
1922  int nPbW_c = nPbW >> s->ps.sps->hshift[1];
1923  int nPbH_c = nPbH >> s->ps.sps->vshift[1];
1924 
1925  luma_mc_uni(s, dst0, s->frame->linesize[0], ref0->frame,
1926  &current_mv.mv[0], x0, y0, nPbW, nPbH,
1927  s->sh.luma_weight_l0[current_mv.ref_idx[0]],
1928  s->sh.luma_offset_l0[current_mv.ref_idx[0]]);
1929 
1930  if (s->ps.sps->chroma_format_idc) {
1931  chroma_mc_uni(s, dst1, s->frame->linesize[1], ref0->frame->data[1], ref0->frame->linesize[1],
1932  0, x0_c, y0_c, nPbW_c, nPbH_c, &current_mv,
1933  s->sh.chroma_weight_l0[current_mv.ref_idx[0]][0], s->sh.chroma_offset_l0[current_mv.ref_idx[0]][0]);
1934  chroma_mc_uni(s, dst2, s->frame->linesize[2], ref0->frame->data[2], ref0->frame->linesize[2],
1935  0, x0_c, y0_c, nPbW_c, nPbH_c, &current_mv,
1936  s->sh.chroma_weight_l0[current_mv.ref_idx[0]][1], s->sh.chroma_offset_l0[current_mv.ref_idx[0]][1]);
1937  }
1938  } else if (current_mv.pred_flag == PF_L1) {
1939  int x0_c = x0 >> s->ps.sps->hshift[1];
1940  int y0_c = y0 >> s->ps.sps->vshift[1];
1941  int nPbW_c = nPbW >> s->ps.sps->hshift[1];
1942  int nPbH_c = nPbH >> s->ps.sps->vshift[1];
1943 
1944  luma_mc_uni(s, dst0, s->frame->linesize[0], ref1->frame,
1945  &current_mv.mv[1], x0, y0, nPbW, nPbH,
1946  s->sh.luma_weight_l1[current_mv.ref_idx[1]],
1947  s->sh.luma_offset_l1[current_mv.ref_idx[1]]);
1948 
1949  if (s->ps.sps->chroma_format_idc) {
1950  chroma_mc_uni(s, dst1, s->frame->linesize[1], ref1->frame->data[1], ref1->frame->linesize[1],
1951  1, x0_c, y0_c, nPbW_c, nPbH_c, &current_mv,
1952  s->sh.chroma_weight_l1[current_mv.ref_idx[1]][0], s->sh.chroma_offset_l1[current_mv.ref_idx[1]][0]);
1953 
1954  chroma_mc_uni(s, dst2, s->frame->linesize[2], ref1->frame->data[2], ref1->frame->linesize[2],
1955  1, x0_c, y0_c, nPbW_c, nPbH_c, &current_mv,
1956  s->sh.chroma_weight_l1[current_mv.ref_idx[1]][1], s->sh.chroma_offset_l1[current_mv.ref_idx[1]][1]);
1957  }
1958  } else if (current_mv.pred_flag == PF_BI) {
1959  int x0_c = x0 >> s->ps.sps->hshift[1];
1960  int y0_c = y0 >> s->ps.sps->vshift[1];
1961  int nPbW_c = nPbW >> s->ps.sps->hshift[1];
1962  int nPbH_c = nPbH >> s->ps.sps->vshift[1];
1963 
1964  luma_mc_bi(s, dst0, s->frame->linesize[0], ref0->frame,
1965  &current_mv.mv[0], x0, y0, nPbW, nPbH,
1966  ref1->frame, &current_mv.mv[1], &current_mv);
1967 
1968  if (s->ps.sps->chroma_format_idc) {
1969  chroma_mc_bi(s, dst1, s->frame->linesize[1], ref0->frame, ref1->frame,
1970  x0_c, y0_c, nPbW_c, nPbH_c, &current_mv, 0);
1971 
1972  chroma_mc_bi(s, dst2, s->frame->linesize[2], ref0->frame, ref1->frame,
1973  x0_c, y0_c, nPbW_c, nPbH_c, &current_mv, 1);
1974  }
1975  }
1976 }
1977 
1978 /**
1979  * 8.4.1
1980  */
1981 static int luma_intra_pred_mode(HEVCContext *s, int x0, int y0, int pu_size,
1982  int prev_intra_luma_pred_flag)
1983 {
1984  HEVCLocalContext *lc = s->HEVClc;
1985  int x_pu = x0 >> s->ps.sps->log2_min_pu_size;
1986  int y_pu = y0 >> s->ps.sps->log2_min_pu_size;
1987  int min_pu_width = s->ps.sps->min_pu_width;
1988  int size_in_pus = pu_size >> s->ps.sps->log2_min_pu_size;
1989  int x0b = av_mod_uintp2(x0, s->ps.sps->log2_ctb_size);
1990  int y0b = av_mod_uintp2(y0, s->ps.sps->log2_ctb_size);
1991 
1992  int cand_up = (lc->ctb_up_flag || y0b) ?
1993  s->tab_ipm[(y_pu - 1) * min_pu_width + x_pu] : INTRA_DC;
1994  int cand_left = (lc->ctb_left_flag || x0b) ?
1995  s->tab_ipm[y_pu * min_pu_width + x_pu - 1] : INTRA_DC;
1996 
1997  int y_ctb = (y0 >> (s->ps.sps->log2_ctb_size)) << (s->ps.sps->log2_ctb_size);
1998 
1999  MvField *tab_mvf = s->ref->tab_mvf;
2000  int intra_pred_mode;
2001  int candidate[3];
2002  int i, j;
2003 
2004  // intra_pred_mode prediction does not cross vertical CTB boundaries
2005  if ((y0 - 1) < y_ctb)
2006  cand_up = INTRA_DC;
2007 
2008  if (cand_left == cand_up) {
2009  if (cand_left < 2) {
2010  candidate[0] = INTRA_PLANAR;
2011  candidate[1] = INTRA_DC;
2012  candidate[2] = INTRA_ANGULAR_26;
2013  } else {
2014  candidate[0] = cand_left;
2015  candidate[1] = 2 + ((cand_left - 2 - 1 + 32) & 31);
2016  candidate[2] = 2 + ((cand_left - 2 + 1) & 31);
2017  }
2018  } else {
2019  candidate[0] = cand_left;
2020  candidate[1] = cand_up;
2021  if (candidate[0] != INTRA_PLANAR && candidate[1] != INTRA_PLANAR) {
2022  candidate[2] = INTRA_PLANAR;
2023  } else if (candidate[0] != INTRA_DC && candidate[1] != INTRA_DC) {
2024  candidate[2] = INTRA_DC;
2025  } else {
2026  candidate[2] = INTRA_ANGULAR_26;
2027  }
2028  }
2029 
2030  if (prev_intra_luma_pred_flag) {
2031  intra_pred_mode = candidate[lc->pu.mpm_idx];
2032  } else {
2033  if (candidate[0] > candidate[1])
2034  FFSWAP(uint8_t, candidate[0], candidate[1]);
2035  if (candidate[0] > candidate[2])
2036  FFSWAP(uint8_t, candidate[0], candidate[2]);
2037  if (candidate[1] > candidate[2])
2038  FFSWAP(uint8_t, candidate[1], candidate[2]);
2039 
2040  intra_pred_mode = lc->pu.rem_intra_luma_pred_mode;
2041  for (i = 0; i < 3; i++)
2042  if (intra_pred_mode >= candidate[i])
2043  intra_pred_mode++;
2044  }
2045 
2046  /* write the intra prediction units into the mv array */
2047  if (!size_in_pus)
2048  size_in_pus = 1;
2049  for (i = 0; i < size_in_pus; i++) {
2050  memset(&s->tab_ipm[(y_pu + i) * min_pu_width + x_pu],
2051  intra_pred_mode, size_in_pus);
2052 
2053  for (j = 0; j < size_in_pus; j++) {
2054  tab_mvf[(y_pu + j) * min_pu_width + x_pu + i].pred_flag = PF_INTRA;
2055  }
2056  }
2057 
2058  return intra_pred_mode;
2059 }
2060 
2061 static av_always_inline void set_ct_depth(HEVCContext *s, int x0, int y0,
2062  int log2_cb_size, int ct_depth)
2063 {
2064  int length = (1 << log2_cb_size) >> s->ps.sps->log2_min_cb_size;
2065  int x_cb = x0 >> s->ps.sps->log2_min_cb_size;
2066  int y_cb = y0 >> s->ps.sps->log2_min_cb_size;
2067  int y;
2068 
2069  for (y = 0; y < length; y++)
2070  memset(&s->tab_ct_depth[(y_cb + y) * s->ps.sps->min_cb_width + x_cb],
2071  ct_depth, length);
2072 }
2073 
2074 static const uint8_t tab_mode_idx[] = {
2075  0, 1, 2, 2, 2, 2, 3, 5, 7, 8, 10, 12, 13, 15, 17, 18, 19, 20,
2076  21, 22, 23, 23, 24, 24, 25, 25, 26, 27, 27, 28, 28, 29, 29, 30, 31};
2077 
2078 static void intra_prediction_unit(HEVCContext *s, int x0, int y0,
2079  int log2_cb_size)
2080 {
2081  HEVCLocalContext *lc = s->HEVClc;
2082  static const uint8_t intra_chroma_table[4] = { 0, 26, 10, 1 };
2083  uint8_t prev_intra_luma_pred_flag[4];
2084  int split = lc->cu.part_mode == PART_NxN;
2085  int pb_size = (1 << log2_cb_size) >> split;
2086  int side = split + 1;
2087  int chroma_mode;
2088  int i, j;
2089 
2090  for (i = 0; i < side; i++)
2091  for (j = 0; j < side; j++)
2092  prev_intra_luma_pred_flag[2 * i + j] = ff_hevc_prev_intra_luma_pred_flag_decode(s);
2093 
2094  for (i = 0; i < side; i++) {
2095  for (j = 0; j < side; j++) {
2096  if (prev_intra_luma_pred_flag[2 * i + j])
2098  else
2100 
2101  lc->pu.intra_pred_mode[2 * i + j] =
2102  luma_intra_pred_mode(s, x0 + pb_size * j, y0 + pb_size * i, pb_size,
2103  prev_intra_luma_pred_flag[2 * i + j]);
2104  }
2105  }
2106 
2107  if (s->ps.sps->chroma_format_idc == 3) {
2108  for (i = 0; i < side; i++) {
2109  for (j = 0; j < side; j++) {
2110  lc->pu.chroma_mode_c[2 * i + j] = chroma_mode = ff_hevc_intra_chroma_pred_mode_decode(s);
2111  if (chroma_mode != 4) {
2112  if (lc->pu.intra_pred_mode[2 * i + j] == intra_chroma_table[chroma_mode])
2113  lc->pu.intra_pred_mode_c[2 * i + j] = 34;
2114  else
2115  lc->pu.intra_pred_mode_c[2 * i + j] = intra_chroma_table[chroma_mode];
2116  } else {
2117  lc->pu.intra_pred_mode_c[2 * i + j] = lc->pu.intra_pred_mode[2 * i + j];
2118  }
2119  }
2120  }
2121  } else if (s->ps.sps->chroma_format_idc == 2) {
2122  int mode_idx;
2123  lc->pu.chroma_mode_c[0] = chroma_mode = ff_hevc_intra_chroma_pred_mode_decode(s);
2124  if (chroma_mode != 4) {
2125  if (lc->pu.intra_pred_mode[0] == intra_chroma_table[chroma_mode])
2126  mode_idx = 34;
2127  else
2128  mode_idx = intra_chroma_table[chroma_mode];
2129  } else {
2130  mode_idx = lc->pu.intra_pred_mode[0];
2131  }
2132  lc->pu.intra_pred_mode_c[0] = tab_mode_idx[mode_idx];
2133  } else if (s->ps.sps->chroma_format_idc != 0) {
2135  if (chroma_mode != 4) {
2136  if (lc->pu.intra_pred_mode[0] == intra_chroma_table[chroma_mode])
2137  lc->pu.intra_pred_mode_c[0] = 34;
2138  else
2139  lc->pu.intra_pred_mode_c[0] = intra_chroma_table[chroma_mode];
2140  } else {
2141  lc->pu.intra_pred_mode_c[0] = lc->pu.intra_pred_mode[0];
2142  }
2143  }
2144 }
2145 
2147  int x0, int y0,
2148  int log2_cb_size)
2149 {
2150  HEVCLocalContext *lc = s->HEVClc;
2151  int pb_size = 1 << log2_cb_size;
2152  int size_in_pus = pb_size >> s->ps.sps->log2_min_pu_size;
2153  int min_pu_width = s->ps.sps->min_pu_width;
2154  MvField *tab_mvf = s->ref->tab_mvf;
2155  int x_pu = x0 >> s->ps.sps->log2_min_pu_size;
2156  int y_pu = y0 >> s->ps.sps->log2_min_pu_size;
2157  int j, k;
2158 
2159  if (size_in_pus == 0)
2160  size_in_pus = 1;
2161  for (j = 0; j < size_in_pus; j++)
2162  memset(&s->tab_ipm[(y_pu + j) * min_pu_width + x_pu], INTRA_DC, size_in_pus);
2163  if (lc->cu.pred_mode == MODE_INTRA)
2164  for (j = 0; j < size_in_pus; j++)
2165  for (k = 0; k < size_in_pus; k++)
2166  tab_mvf[(y_pu + j) * min_pu_width + x_pu + k].pred_flag = PF_INTRA;
2167 }
2168 
2169 static int hls_coding_unit(HEVCContext *s, int x0, int y0, int log2_cb_size)
2170 {
2171  int cb_size = 1 << log2_cb_size;
2172  HEVCLocalContext *lc = s->HEVClc;
2173  int log2_min_cb_size = s->ps.sps->log2_min_cb_size;
2174  int length = cb_size >> log2_min_cb_size;
2175  int min_cb_width = s->ps.sps->min_cb_width;
2176  int x_cb = x0 >> log2_min_cb_size;
2177  int y_cb = y0 >> log2_min_cb_size;
2178  int idx = log2_cb_size - 2;
2179  int qp_block_mask = (1<<(s->ps.sps->log2_ctb_size - s->ps.pps->diff_cu_qp_delta_depth)) - 1;
2180  int x, y, ret;
2181 
2182  lc->cu.x = x0;
2183  lc->cu.y = y0;
2184  lc->cu.pred_mode = MODE_INTRA;
2185  lc->cu.part_mode = PART_2Nx2N;
2186  lc->cu.intra_split_flag = 0;
2187 
2188  SAMPLE_CTB(s->skip_flag, x_cb, y_cb) = 0;
2189  for (x = 0; x < 4; x++)
2190  lc->pu.intra_pred_mode[x] = 1;
2191  if (s->ps.pps->transquant_bypass_enable_flag) {
2193  if (lc->cu.cu_transquant_bypass_flag)
2194  set_deblocking_bypass(s, x0, y0, log2_cb_size);
2195  } else
2196  lc->cu.cu_transquant_bypass_flag = 0;
2197 
2198  if (s->sh.slice_type != HEVC_SLICE_I) {
2199  uint8_t skip_flag = ff_hevc_skip_flag_decode(s, x0, y0, x_cb, y_cb);
2200 
2201  x = y_cb * min_cb_width + x_cb;
2202  for (y = 0; y < length; y++) {
2203  memset(&s->skip_flag[x], skip_flag, length);
2204  x += min_cb_width;
2205  }
2206  lc->cu.pred_mode = skip_flag ? MODE_SKIP : MODE_INTER;
2207  } else {
2208  x = y_cb * min_cb_width + x_cb;
2209  for (y = 0; y < length; y++) {
2210  memset(&s->skip_flag[x], 0, length);
2211  x += min_cb_width;
2212  }
2213  }
2214 
2215  if (SAMPLE_CTB(s->skip_flag, x_cb, y_cb)) {
2216  hls_prediction_unit(s, x0, y0, cb_size, cb_size, log2_cb_size, 0, idx);
2217  intra_prediction_unit_default_value(s, x0, y0, log2_cb_size);
2218 
2219  if (!s->sh.disable_deblocking_filter_flag)
2220  ff_hevc_deblocking_boundary_strengths(s, x0, y0, log2_cb_size);
2221  } else {
2222  int pcm_flag = 0;
2223 
2224  if (s->sh.slice_type != HEVC_SLICE_I)
2226  if (lc->cu.pred_mode != MODE_INTRA ||
2227  log2_cb_size == s->ps.sps->log2_min_cb_size) {
2228  lc->cu.part_mode = ff_hevc_part_mode_decode(s, log2_cb_size);
2229  lc->cu.intra_split_flag = lc->cu.part_mode == PART_NxN &&
2230  lc->cu.pred_mode == MODE_INTRA;
2231  }
2232 
2233  if (lc->cu.pred_mode == MODE_INTRA) {
2234  if (lc->cu.part_mode == PART_2Nx2N && s->ps.sps->pcm_enabled_flag &&
2235  log2_cb_size >= s->ps.sps->pcm.log2_min_pcm_cb_size &&
2236  log2_cb_size <= s->ps.sps->pcm.log2_max_pcm_cb_size) {
2237  pcm_flag = ff_hevc_pcm_flag_decode(s);
2238  }
2239  if (pcm_flag) {
2240  intra_prediction_unit_default_value(s, x0, y0, log2_cb_size);
2241  ret = hls_pcm_sample(s, x0, y0, log2_cb_size);
2242  if (s->ps.sps->pcm.loop_filter_disable_flag)
2243  set_deblocking_bypass(s, x0, y0, log2_cb_size);
2244 
2245  if (ret < 0)
2246  return ret;
2247  } else {
2248  intra_prediction_unit(s, x0, y0, log2_cb_size);
2249  }
2250  } else {
2251  intra_prediction_unit_default_value(s, x0, y0, log2_cb_size);
2252  switch (lc->cu.part_mode) {
2253  case PART_2Nx2N:
2254  hls_prediction_unit(s, x0, y0, cb_size, cb_size, log2_cb_size, 0, idx);
2255  break;
2256  case PART_2NxN:
2257  hls_prediction_unit(s, x0, y0, cb_size, cb_size / 2, log2_cb_size, 0, idx);
2258  hls_prediction_unit(s, x0, y0 + cb_size / 2, cb_size, cb_size / 2, log2_cb_size, 1, idx);
2259  break;
2260  case PART_Nx2N:
2261  hls_prediction_unit(s, x0, y0, cb_size / 2, cb_size, log2_cb_size, 0, idx - 1);
2262  hls_prediction_unit(s, x0 + cb_size / 2, y0, cb_size / 2, cb_size, log2_cb_size, 1, idx - 1);
2263  break;
2264  case PART_2NxnU:
2265  hls_prediction_unit(s, x0, y0, cb_size, cb_size / 4, log2_cb_size, 0, idx);
2266  hls_prediction_unit(s, x0, y0 + cb_size / 4, cb_size, cb_size * 3 / 4, log2_cb_size, 1, idx);
2267  break;
2268  case PART_2NxnD:
2269  hls_prediction_unit(s, x0, y0, cb_size, cb_size * 3 / 4, log2_cb_size, 0, idx);
2270  hls_prediction_unit(s, x0, y0 + cb_size * 3 / 4, cb_size, cb_size / 4, log2_cb_size, 1, idx);
2271  break;
2272  case PART_nLx2N:
2273  hls_prediction_unit(s, x0, y0, cb_size / 4, cb_size, log2_cb_size, 0, idx - 2);
2274  hls_prediction_unit(s, x0 + cb_size / 4, y0, cb_size * 3 / 4, cb_size, log2_cb_size, 1, idx - 2);
2275  break;
2276  case PART_nRx2N:
2277  hls_prediction_unit(s, x0, y0, cb_size * 3 / 4, cb_size, log2_cb_size, 0, idx - 2);
2278  hls_prediction_unit(s, x0 + cb_size * 3 / 4, y0, cb_size / 4, cb_size, log2_cb_size, 1, idx - 2);
2279  break;
2280  case PART_NxN:
2281  hls_prediction_unit(s, x0, y0, cb_size / 2, cb_size / 2, log2_cb_size, 0, idx - 1);
2282  hls_prediction_unit(s, x0 + cb_size / 2, y0, cb_size / 2, cb_size / 2, log2_cb_size, 1, idx - 1);
2283  hls_prediction_unit(s, x0, y0 + cb_size / 2, cb_size / 2, cb_size / 2, log2_cb_size, 2, idx - 1);
2284  hls_prediction_unit(s, x0 + cb_size / 2, y0 + cb_size / 2, cb_size / 2, cb_size / 2, log2_cb_size, 3, idx - 1);
2285  break;
2286  }
2287  }
2288 
2289  if (!pcm_flag) {
2290  int rqt_root_cbf = 1;
2291 
2292  if (lc->cu.pred_mode != MODE_INTRA &&
2293  !(lc->cu.part_mode == PART_2Nx2N && lc->pu.merge_flag)) {
2294  rqt_root_cbf = ff_hevc_no_residual_syntax_flag_decode(s);
2295  }
2296  if (rqt_root_cbf) {
2297  const static int cbf[2] = { 0 };
2298  lc->cu.max_trafo_depth = lc->cu.pred_mode == MODE_INTRA ?
2299  s->ps.sps->max_transform_hierarchy_depth_intra + lc->cu.intra_split_flag :
2300  s->ps.sps->max_transform_hierarchy_depth_inter;
2301  ret = hls_transform_tree(s, x0, y0, x0, y0, x0, y0,
2302  log2_cb_size,
2303  log2_cb_size, 0, 0, cbf, cbf);
2304  if (ret < 0)
2305  return ret;
2306  } else {
2307  if (!s->sh.disable_deblocking_filter_flag)
2308  ff_hevc_deblocking_boundary_strengths(s, x0, y0, log2_cb_size);
2309  }
2310  }
2311  }
2312 
2313  if (s->ps.pps->cu_qp_delta_enabled_flag && lc->tu.is_cu_qp_delta_coded == 0)
2314  ff_hevc_set_qPy(s, x0, y0, log2_cb_size);
2315 
2316  x = y_cb * min_cb_width + x_cb;
2317  for (y = 0; y < length; y++) {
2318  memset(&s->qp_y_tab[x], lc->qp_y, length);
2319  x += min_cb_width;
2320  }
2321 
2322  if(((x0 + (1<<log2_cb_size)) & qp_block_mask) == 0 &&
2323  ((y0 + (1<<log2_cb_size)) & qp_block_mask) == 0) {
2324  lc->qPy_pred = lc->qp_y;
2325  }
2326 
2327  set_ct_depth(s, x0, y0, log2_cb_size, lc->ct_depth);
2328 
2329  return 0;
2330 }
2331 
2332 static int hls_coding_quadtree(HEVCContext *s, int x0, int y0,
2333  int log2_cb_size, int cb_depth)
2334 {
2335  HEVCLocalContext *lc = s->HEVClc;
2336  const int cb_size = 1 << log2_cb_size;
2337  int ret;
2338  int split_cu;
2339 
2340  lc->ct_depth = cb_depth;
2341  if (x0 + cb_size <= s->ps.sps->width &&
2342  y0 + cb_size <= s->ps.sps->height &&
2343  log2_cb_size > s->ps.sps->log2_min_cb_size) {
2344  split_cu = ff_hevc_split_coding_unit_flag_decode(s, cb_depth, x0, y0);
2345  } else {
2346  split_cu = (log2_cb_size > s->ps.sps->log2_min_cb_size);
2347  }
2348  if (s->ps.pps->cu_qp_delta_enabled_flag &&
2349  log2_cb_size >= s->ps.sps->log2_ctb_size - s->ps.pps->diff_cu_qp_delta_depth) {
2350  lc->tu.is_cu_qp_delta_coded = 0;
2351  lc->tu.cu_qp_delta = 0;
2352  }
2353 
2354  if (s->sh.cu_chroma_qp_offset_enabled_flag &&
2355  log2_cb_size >= s->ps.sps->log2_ctb_size - s->ps.pps->diff_cu_chroma_qp_offset_depth) {
2357  }
2358 
2359  if (split_cu) {
2360  int qp_block_mask = (1<<(s->ps.sps->log2_ctb_size - s->ps.pps->diff_cu_qp_delta_depth)) - 1;
2361  const int cb_size_split = cb_size >> 1;
2362  const int x1 = x0 + cb_size_split;
2363  const int y1 = y0 + cb_size_split;
2364 
2365  int more_data = 0;
2366 
2367  more_data = hls_coding_quadtree(s, x0, y0, log2_cb_size - 1, cb_depth + 1);
2368  if (more_data < 0)
2369  return more_data;
2370 
2371  if (more_data && x1 < s->ps.sps->width) {
2372  more_data = hls_coding_quadtree(s, x1, y0, log2_cb_size - 1, cb_depth + 1);
2373  if (more_data < 0)
2374  return more_data;
2375  }
2376  if (more_data && y1 < s->ps.sps->height) {
2377  more_data = hls_coding_quadtree(s, x0, y1, log2_cb_size - 1, cb_depth + 1);
2378  if (more_data < 0)
2379  return more_data;
2380  }
2381  if (more_data && x1 < s->ps.sps->width &&
2382  y1 < s->ps.sps->height) {
2383  more_data = hls_coding_quadtree(s, x1, y1, log2_cb_size - 1, cb_depth + 1);
2384  if (more_data < 0)
2385  return more_data;
2386  }
2387 
2388  if(((x0 + (1<<log2_cb_size)) & qp_block_mask) == 0 &&
2389  ((y0 + (1<<log2_cb_size)) & qp_block_mask) == 0)
2390  lc->qPy_pred = lc->qp_y;
2391 
2392  if (more_data)
2393  return ((x1 + cb_size_split) < s->ps.sps->width ||
2394  (y1 + cb_size_split) < s->ps.sps->height);
2395  else
2396  return 0;
2397  } else {
2398  ret = hls_coding_unit(s, x0, y0, log2_cb_size);
2399  if (ret < 0)
2400  return ret;
2401  if ((!((x0 + cb_size) %
2402  (1 << (s->ps.sps->log2_ctb_size))) ||
2403  (x0 + cb_size >= s->ps.sps->width)) &&
2404  (!((y0 + cb_size) %
2405  (1 << (s->ps.sps->log2_ctb_size))) ||
2406  (y0 + cb_size >= s->ps.sps->height))) {
2407  int end_of_slice_flag = ff_hevc_end_of_slice_flag_decode(s);
2408  return !end_of_slice_flag;
2409  } else {
2410  return 1;
2411  }
2412  }
2413 
2414  return 0;
2415 }
2416 
2417 static void hls_decode_neighbour(HEVCContext *s, int x_ctb, int y_ctb,
2418  int ctb_addr_ts)
2419 {
2420  HEVCLocalContext *lc = s->HEVClc;
2421  int ctb_size = 1 << s->ps.sps->log2_ctb_size;
2422  int ctb_addr_rs = s->ps.pps->ctb_addr_ts_to_rs[ctb_addr_ts];
2423  int ctb_addr_in_slice = ctb_addr_rs - s->sh.slice_addr;
2424 
2425  s->tab_slice_address[ctb_addr_rs] = s->sh.slice_addr;
2426 
2427  if (s->ps.pps->entropy_coding_sync_enabled_flag) {
2428  if (x_ctb == 0 && (y_ctb & (ctb_size - 1)) == 0)
2429  lc->first_qp_group = 1;
2430  lc->end_of_tiles_x = s->ps.sps->width;
2431  } else if (s->ps.pps->tiles_enabled_flag) {
2432  if (ctb_addr_ts && s->ps.pps->tile_id[ctb_addr_ts] != s->ps.pps->tile_id[ctb_addr_ts - 1]) {
2433  int idxX = s->ps.pps->col_idxX[x_ctb >> s->ps.sps->log2_ctb_size];
2434  lc->end_of_tiles_x = x_ctb + (s->ps.pps->column_width[idxX] << s->ps.sps->log2_ctb_size);
2435  lc->first_qp_group = 1;
2436  }
2437  } else {
2438  lc->end_of_tiles_x = s->ps.sps->width;
2439  }
2440 
2441  lc->end_of_tiles_y = FFMIN(y_ctb + ctb_size, s->ps.sps->height);
2442 
2443  lc->boundary_flags = 0;
2444  if (s->ps.pps->tiles_enabled_flag) {
2445  if (x_ctb > 0 && s->ps.pps->tile_id[ctb_addr_ts] != s->ps.pps->tile_id[s->ps.pps->ctb_addr_rs_to_ts[ctb_addr_rs - 1]])
2447  if (x_ctb > 0 && s->tab_slice_address[ctb_addr_rs] != s->tab_slice_address[ctb_addr_rs - 1])
2449  if (y_ctb > 0 && s->ps.pps->tile_id[ctb_addr_ts] != s->ps.pps->tile_id[s->ps.pps->ctb_addr_rs_to_ts[ctb_addr_rs - s->ps.sps->ctb_width]])
2451  if (y_ctb > 0 && s->tab_slice_address[ctb_addr_rs] != s->tab_slice_address[ctb_addr_rs - s->ps.sps->ctb_width])
2453  } else {
2454  if (ctb_addr_in_slice <= 0)
2456  if (ctb_addr_in_slice < s->ps.sps->ctb_width)
2458  }
2459 
2460  lc->ctb_left_flag = ((x_ctb > 0) && (ctb_addr_in_slice > 0) && !(lc->boundary_flags & BOUNDARY_LEFT_TILE));
2461  lc->ctb_up_flag = ((y_ctb > 0) && (ctb_addr_in_slice >= s->ps.sps->ctb_width) && !(lc->boundary_flags & BOUNDARY_UPPER_TILE));
2462  lc->ctb_up_right_flag = ((y_ctb > 0) && (ctb_addr_in_slice+1 >= s->ps.sps->ctb_width) && (s->ps.pps->tile_id[ctb_addr_ts] == s->ps.pps->tile_id[s->ps.pps->ctb_addr_rs_to_ts[ctb_addr_rs+1 - s->ps.sps->ctb_width]]));
2463  lc->ctb_up_left_flag = ((x_ctb > 0) && (y_ctb > 0) && (ctb_addr_in_slice-1 >= s->ps.sps->ctb_width) && (s->ps.pps->tile_id[ctb_addr_ts] == s->ps.pps->tile_id[s->ps.pps->ctb_addr_rs_to_ts[ctb_addr_rs-1 - s->ps.sps->ctb_width]]));
2464 }
2465 
2466 static int hls_decode_entry(AVCodecContext *avctxt, void *isFilterThread)
2467 {
2468  HEVCContext *s = avctxt->priv_data;
2469  int ctb_size = 1 << s->ps.sps->log2_ctb_size;
2470  int more_data = 1;
2471  int x_ctb = 0;
2472  int y_ctb = 0;
2473  int ctb_addr_ts = s->ps.pps->ctb_addr_rs_to_ts[s->sh.slice_ctb_addr_rs];
2474  int ret;
2475 
2476  if (!ctb_addr_ts && s->sh.dependent_slice_segment_flag) {
2477  av_log(s->avctx, AV_LOG_ERROR, "Impossible initial tile.\n");
2478  return AVERROR_INVALIDDATA;
2479  }
2480 
2481  if (s->sh.dependent_slice_segment_flag) {
2482  int prev_rs = s->ps.pps->ctb_addr_ts_to_rs[ctb_addr_ts - 1];
2483  if (s->tab_slice_address[prev_rs] != s->sh.slice_addr) {
2484  av_log(s->avctx, AV_LOG_ERROR, "Previous slice segment missing\n");
2485  return AVERROR_INVALIDDATA;
2486  }
2487  }
2488 
2489  while (more_data && ctb_addr_ts < s->ps.sps->ctb_size) {
2490  int ctb_addr_rs = s->ps.pps->ctb_addr_ts_to_rs[ctb_addr_ts];
2491 
2492  x_ctb = (ctb_addr_rs % ((s->ps.sps->width + ctb_size - 1) >> s->ps.sps->log2_ctb_size)) << s->ps.sps->log2_ctb_size;
2493  y_ctb = (ctb_addr_rs / ((s->ps.sps->width + ctb_size - 1) >> s->ps.sps->log2_ctb_size)) << s->ps.sps->log2_ctb_size;
2494  hls_decode_neighbour(s, x_ctb, y_ctb, ctb_addr_ts);
2495 
2496  ret = ff_hevc_cabac_init(s, ctb_addr_ts, 0);
2497  if (ret < 0) {
2498  s->tab_slice_address[ctb_addr_rs] = -1;
2499  return ret;
2500  }
2501 
2502  hls_sao_param(s, x_ctb >> s->ps.sps->log2_ctb_size, y_ctb >> s->ps.sps->log2_ctb_size);
2503 
2504  s->deblock[ctb_addr_rs].beta_offset = s->sh.beta_offset;
2505  s->deblock[ctb_addr_rs].tc_offset = s->sh.tc_offset;
2506  s->filter_slice_edges[ctb_addr_rs] = s->sh.slice_loop_filter_across_slices_enabled_flag;
2507 
2508  more_data = hls_coding_quadtree(s, x_ctb, y_ctb, s->ps.sps->log2_ctb_size, 0);
2509  if (more_data < 0) {
2510  s->tab_slice_address[ctb_addr_rs] = -1;
2511  return more_data;
2512  }
2513 
2514 
2515  ctb_addr_ts++;
2516  ff_hevc_save_states(s, ctb_addr_ts);
2517  ff_hevc_hls_filters(s, x_ctb, y_ctb, ctb_size);
2518  }
2519 
2520  if (x_ctb + ctb_size >= s->ps.sps->width &&
2521  y_ctb + ctb_size >= s->ps.sps->height)
2522  ff_hevc_hls_filter(s, x_ctb, y_ctb, ctb_size);
2523 
2524  return ctb_addr_ts;
2525 }
2526 
2528 {
2529  int arg[2];
2530  int ret[2];
2531 
2532  arg[0] = 0;
2533  arg[1] = 1;
2534 
2535  s->avctx->execute(s->avctx, hls_decode_entry, arg, ret , 1, sizeof(int));
2536  return ret[0];
2537 }
2538 static int hls_decode_entry_wpp(AVCodecContext *avctxt, void *input_ctb_row, int job, int self_id)
2539 {
2540  HEVCContext *s1 = avctxt->priv_data, *s;
2541  HEVCLocalContext *lc;
2542  int ctb_size = 1<< s1->ps.sps->log2_ctb_size;
2543  int more_data = 1;
2544  int *ctb_row_p = input_ctb_row;
2545  int ctb_row = ctb_row_p[job];
2546  int ctb_addr_rs = s1->sh.slice_ctb_addr_rs + ctb_row * ((s1->ps.sps->width + ctb_size - 1) >> s1->ps.sps->log2_ctb_size);
2547  int ctb_addr_ts = s1->ps.pps->ctb_addr_rs_to_ts[ctb_addr_rs];
2548  int thread = ctb_row % s1->threads_number;
2549  int ret;
2550 
2551  s = s1->sList[self_id];
2552  lc = s->HEVClc;
2553 
2554  if(ctb_row) {
2555  ret = init_get_bits8(&lc->gb, s->data + s->sh.offset[ctb_row - 1], s->sh.size[ctb_row - 1]);
2556  if (ret < 0)
2557  goto error;
2558  ff_init_cabac_decoder(&lc->cc, s->data + s->sh.offset[(ctb_row)-1], s->sh.size[ctb_row - 1]);
2559  }
2560 
2561  while(more_data && ctb_addr_ts < s->ps.sps->ctb_size) {
2562  int x_ctb = (ctb_addr_rs % s->ps.sps->ctb_width) << s->ps.sps->log2_ctb_size;
2563  int y_ctb = (ctb_addr_rs / s->ps.sps->ctb_width) << s->ps.sps->log2_ctb_size;
2564 
2565  hls_decode_neighbour(s, x_ctb, y_ctb, ctb_addr_ts);
2566 
2567  ff_thread_await_progress2(s->avctx, ctb_row, thread, SHIFT_CTB_WPP);
2568 
2569  if (atomic_load(&s1->wpp_err)) {
2570  ff_thread_report_progress2(s->avctx, ctb_row , thread, SHIFT_CTB_WPP);
2571  return 0;
2572  }
2573 
2574  ret = ff_hevc_cabac_init(s, ctb_addr_ts, thread);
2575  if (ret < 0)
2576  goto error;
2577  hls_sao_param(s, x_ctb >> s->ps.sps->log2_ctb_size, y_ctb >> s->ps.sps->log2_ctb_size);
2578  more_data = hls_coding_quadtree(s, x_ctb, y_ctb, s->ps.sps->log2_ctb_size, 0);
2579 
2580  if (more_data < 0) {
2581  ret = more_data;
2582  goto error;
2583  }
2584 
2585  ctb_addr_ts++;
2586 
2587  ff_hevc_save_states(s, ctb_addr_ts);
2588  ff_thread_report_progress2(s->avctx, ctb_row, thread, 1);
2589  ff_hevc_hls_filters(s, x_ctb, y_ctb, ctb_size);
2590 
2591  if (!more_data && (x_ctb+ctb_size) < s->ps.sps->width && ctb_row != s->sh.num_entry_point_offsets) {
2592  atomic_store(&s1->wpp_err, 1);
2593  ff_thread_report_progress2(s->avctx, ctb_row ,thread, SHIFT_CTB_WPP);
2594  return 0;
2595  }
2596 
2597  if ((x_ctb+ctb_size) >= s->ps.sps->width && (y_ctb+ctb_size) >= s->ps.sps->height ) {
2598  ff_hevc_hls_filter(s, x_ctb, y_ctb, ctb_size);
2599  ff_thread_report_progress2(s->avctx, ctb_row , thread, SHIFT_CTB_WPP);
2600  return ctb_addr_ts;
2601  }
2602  ctb_addr_rs = s->ps.pps->ctb_addr_ts_to_rs[ctb_addr_ts];
2603  x_ctb+=ctb_size;
2604 
2605  if(x_ctb >= s->ps.sps->width) {
2606  break;
2607  }
2608  }
2609  ff_thread_report_progress2(s->avctx, ctb_row ,thread, SHIFT_CTB_WPP);
2610 
2611  return 0;
2612 error:
2613  s->tab_slice_address[ctb_addr_rs] = -1;
2614  atomic_store(&s1->wpp_err, 1);
2615  ff_thread_report_progress2(s->avctx, ctb_row ,thread, SHIFT_CTB_WPP);
2616  return ret;
2617 }
2618 
2619 static int hls_slice_data_wpp(HEVCContext *s, const H2645NAL *nal)
2620 {
2621  const uint8_t *data = nal->data;
2622  int length = nal->size;
2623  HEVCLocalContext *lc = s->HEVClc;
2624  int *ret = av_malloc_array(s->sh.num_entry_point_offsets + 1, sizeof(int));
2625  int *arg = av_malloc_array(s->sh.num_entry_point_offsets + 1, sizeof(int));
2626  int64_t offset;
2627  int64_t startheader, cmpt = 0;
2628  int i, j, res = 0;
2629 
2630  if (!ret || !arg) {
2631  av_free(ret);
2632  av_free(arg);
2633  return AVERROR(ENOMEM);
2634  }
2635 
2636  if (s->sh.slice_ctb_addr_rs + s->sh.num_entry_point_offsets * s->ps.sps->ctb_width >= s->ps.sps->ctb_width * s->ps.sps->ctb_height) {
2637  av_log(s->avctx, AV_LOG_ERROR, "WPP ctb addresses are wrong (%d %d %d %d)\n",
2638  s->sh.slice_ctb_addr_rs, s->sh.num_entry_point_offsets,
2639  s->ps.sps->ctb_width, s->ps.sps->ctb_height
2640  );
2641  res = AVERROR_INVALIDDATA;
2642  goto error;
2643  }
2644 
2645  ff_alloc_entries(s->avctx, s->sh.num_entry_point_offsets + 1);
2646 
2647  for (i = 1; i < s->threads_number; i++) {
2648  if (s->sList[i] && s->HEVClcList[i])
2649  continue;
2650  av_freep(&s->sList[i]);
2651  av_freep(&s->HEVClcList[i]);
2652  s->sList[i] = av_malloc(sizeof(HEVCContext));
2653  s->HEVClcList[i] = av_mallocz(sizeof(HEVCLocalContext));
2654  if (!s->sList[i] || !s->HEVClcList[i]) {
2655  res = AVERROR(ENOMEM);
2656  goto error;
2657  }
2658  memcpy(s->sList[i], s, sizeof(HEVCContext));
2659  s->sList[i]->HEVClc = s->HEVClcList[i];
2660  }
2661 
2662  offset = (lc->gb.index >> 3);
2663 
2664  for (j = 0, cmpt = 0, startheader = offset + s->sh.entry_point_offset[0]; j < nal->skipped_bytes; j++) {
2665  if (nal->skipped_bytes_pos[j] >= offset && nal->skipped_bytes_pos[j] < startheader) {
2666  startheader--;
2667  cmpt++;
2668  }
2669  }
2670 
2671  for (i = 1; i < s->sh.num_entry_point_offsets; i++) {
2672  offset += (s->sh.entry_point_offset[i - 1] - cmpt);
2673  for (j = 0, cmpt = 0, startheader = offset
2674  + s->sh.entry_point_offset[i]; j < nal->skipped_bytes; j++) {
2675  if (nal->skipped_bytes_pos[j] >= offset && nal->skipped_bytes_pos[j] < startheader) {
2676  startheader--;
2677  cmpt++;
2678  }
2679  }
2680  s->sh.size[i - 1] = s->sh.entry_point_offset[i] - cmpt;
2681  s->sh.offset[i - 1] = offset;
2682 
2683  }
2684  if (s->sh.num_entry_point_offsets != 0) {
2685  offset += s->sh.entry_point_offset[s->sh.num_entry_point_offsets - 1] - cmpt;
2686  if (length < offset) {
2687  av_log(s->avctx, AV_LOG_ERROR, "entry_point_offset table is corrupted\n");
2688  res = AVERROR_INVALIDDATA;
2689  goto error;
2690  }
2691  s->sh.size[s->sh.num_entry_point_offsets - 1] = length - offset;
2692  s->sh.offset[s->sh.num_entry_point_offsets - 1] = offset;
2693 
2694  }
2695  s->data = data;
2696 
2697  for (i = 1; i < s->threads_number; i++) {
2698  s->sList[i]->HEVClc->first_qp_group = 1;
2699  s->sList[i]->HEVClc->qp_y = s->sList[0]->HEVClc->qp_y;
2700  memcpy(s->sList[i], s, sizeof(HEVCContext));
2701  s->sList[i]->HEVClc = s->HEVClcList[i];
2702  }
2703 
2704  atomic_store(&s->wpp_err, 0);
2705  ff_reset_entries(s->avctx);
2706 
2707  for (i = 0; i <= s->sh.num_entry_point_offsets; i++) {
2708  arg[i] = i;
2709  ret[i] = 0;
2710  }
2711 
2712  if (s->ps.pps->entropy_coding_sync_enabled_flag)
2713  s->avctx->execute2(s->avctx, hls_decode_entry_wpp, arg, ret, s->sh.num_entry_point_offsets + 1);
2714 
2715  for (i = 0; i <= s->sh.num_entry_point_offsets; i++)
2716  res += ret[i];
2717 error:
2718  av_free(ret);
2719  av_free(arg);
2720  return res;
2721 }
2722 
2724 {
2725  AVFrame *out = s->ref->frame;
2726  int ret;
2727 
2728  if (s->sei.frame_packing.present &&
2729  s->sei.frame_packing.arrangement_type >= 3 &&
2730  s->sei.frame_packing.arrangement_type <= 5 &&
2731  s->sei.frame_packing.content_interpretation_type > 0 &&
2732  s->sei.frame_packing.content_interpretation_type < 3) {
2734  if (!stereo)
2735  return AVERROR(ENOMEM);
2736 
2737  switch (s->sei.frame_packing.arrangement_type) {
2738  case 3:
2739  if (s->sei.frame_packing.quincunx_subsampling)
2741  else
2742  stereo->type = AV_STEREO3D_SIDEBYSIDE;
2743  break;
2744  case 4:
2745  stereo->type = AV_STEREO3D_TOPBOTTOM;
2746  break;
2747  case 5:
2748  stereo->type = AV_STEREO3D_FRAMESEQUENCE;
2749  break;
2750  }
2751 
2752  if (s->sei.frame_packing.content_interpretation_type == 2)
2753  stereo->flags = AV_STEREO3D_FLAG_INVERT;
2754 
2755  if (s->sei.frame_packing.arrangement_type == 5) {
2756  if (s->sei.frame_packing.current_frame_is_frame0_flag)
2757  stereo->view = AV_STEREO3D_VIEW_LEFT;
2758  else
2759  stereo->view = AV_STEREO3D_VIEW_RIGHT;
2760  }
2761  }
2762 
2763  if (s->sei.display_orientation.present &&
2764  (s->sei.display_orientation.anticlockwise_rotation ||
2765  s->sei.display_orientation.hflip || s->sei.display_orientation.vflip)) {
2766  double angle = s->sei.display_orientation.anticlockwise_rotation * 360 / (double) (1 << 16);
2769  sizeof(int32_t) * 9);
2770  if (!rotation)
2771  return AVERROR(ENOMEM);
2772 
2773  /* av_display_rotation_set() expects the angle in the clockwise
2774  * direction, hence the first minus.
2775  * The below code applies the flips after the rotation, yet
2776  * the H.2645 specs require flipping to be applied first.
2777  * Because of R O(phi) = O(-phi) R (where R is flipping around
2778  * an arbitatry axis and O(phi) is the proper rotation by phi)
2779  * we can create display matrices as desired by negating
2780  * the degree once for every flip applied. */
2781  angle = -angle * (1 - 2 * !!s->sei.display_orientation.hflip)
2782  * (1 - 2 * !!s->sei.display_orientation.vflip);
2783  av_display_rotation_set((int32_t *)rotation->data, angle);
2784  av_display_matrix_flip((int32_t *)rotation->data,
2785  s->sei.display_orientation.hflip,
2786  s->sei.display_orientation.vflip);
2787  }
2788 
2789  // Decrement the mastering display flag when IRAP frame has no_rasl_output_flag=1
2790  // so the side data persists for the entire coded video sequence.
2791  if (s->sei.mastering_display.present > 0 &&
2792  IS_IRAP(s) && s->no_rasl_output_flag) {
2793  s->sei.mastering_display.present--;
2794  }
2795  if (s->sei.mastering_display.present) {
2796  // HEVC uses a g,b,r ordering, which we convert to a more natural r,g,b
2797  const int mapping[3] = {2, 0, 1};
2798  const int chroma_den = 50000;
2799  const int luma_den = 10000;
2800  int i;
2801  AVMasteringDisplayMetadata *metadata =
2803  if (!metadata)
2804  return AVERROR(ENOMEM);
2805 
2806  for (i = 0; i < 3; i++) {
2807  const int j = mapping[i];
2808  metadata->display_primaries[i][0].num = s->sei.mastering_display.display_primaries[j][0];
2809  metadata->display_primaries[i][0].den = chroma_den;
2810  metadata->display_primaries[i][1].num = s->sei.mastering_display.display_primaries[j][1];
2811  metadata->display_primaries[i][1].den = chroma_den;
2812  }
2813  metadata->white_point[0].num = s->sei.mastering_display.white_point[0];
2814  metadata->white_point[0].den = chroma_den;
2815  metadata->white_point[1].num = s->sei.mastering_display.white_point[1];
2816  metadata->white_point[1].den = chroma_den;
2817 
2818  metadata->max_luminance.num = s->sei.mastering_display.max_luminance;
2819  metadata->max_luminance.den = luma_den;
2820  metadata->min_luminance.num = s->sei.mastering_display.min_luminance;
2821  metadata->min_luminance.den = luma_den;
2822  metadata->has_luminance = 1;
2823  metadata->has_primaries = 1;
2824 
2825  av_log(s->avctx, AV_LOG_DEBUG, "Mastering Display Metadata:\n");
2826  av_log(s->avctx, AV_LOG_DEBUG,
2827  "r(%5.4f,%5.4f) g(%5.4f,%5.4f) b(%5.4f %5.4f) wp(%5.4f, %5.4f)\n",
2828  av_q2d(metadata->display_primaries[0][0]),
2829  av_q2d(metadata->display_primaries[0][1]),
2830  av_q2d(metadata->display_primaries[1][0]),
2831  av_q2d(metadata->display_primaries[1][1]),
2832  av_q2d(metadata->display_primaries[2][0]),
2833  av_q2d(metadata->display_primaries[2][1]),
2834  av_q2d(metadata->white_point[0]), av_q2d(metadata->white_point[1]));
2835  av_log(s->avctx, AV_LOG_DEBUG,
2836  "min_luminance=%f, max_luminance=%f\n",
2837  av_q2d(metadata->min_luminance), av_q2d(metadata->max_luminance));
2838  }
2839  // Decrement the mastering display flag when IRAP frame has no_rasl_output_flag=1
2840  // so the side data persists for the entire coded video sequence.
2841  if (s->sei.content_light.present > 0 &&
2842  IS_IRAP(s) && s->no_rasl_output_flag) {
2843  s->sei.content_light.present--;
2844  }
2845  if (s->sei.content_light.present) {
2846  AVContentLightMetadata *metadata =
2848  if (!metadata)
2849  return AVERROR(ENOMEM);
2850  metadata->MaxCLL = s->sei.content_light.max_content_light_level;
2851  metadata->MaxFALL = s->sei.content_light.max_pic_average_light_level;
2852 
2853  av_log(s->avctx, AV_LOG_DEBUG, "Content Light Level Metadata:\n");
2854  av_log(s->avctx, AV_LOG_DEBUG, "MaxCLL=%d, MaxFALL=%d\n",
2855  metadata->MaxCLL, metadata->MaxFALL);
2856  }
2857 
2858  if (s->sei.a53_caption.buf_ref) {
2859  HEVCSEIA53Caption *a53 = &s->sei.a53_caption;
2860 
2862  if (!sd)
2863  av_buffer_unref(&a53->buf_ref);
2864  a53->buf_ref = NULL;
2865  }
2866 
2867  for (int i = 0; i < s->sei.unregistered.nb_buf_ref; i++) {
2868  HEVCSEIUnregistered *unreg = &s->sei.unregistered;
2869 
2870  if (unreg->buf_ref[i]) {
2873  unreg->buf_ref[i]);
2874  if (!sd)
2875  av_buffer_unref(&unreg->buf_ref[i]);
2876  unreg->buf_ref[i] = NULL;
2877  }
2878  }
2879  s->sei.unregistered.nb_buf_ref = 0;
2880 
2881  if (s->sei.timecode.present) {
2882  uint32_t *tc_sd;
2883  char tcbuf[AV_TIMECODE_STR_SIZE];
2885  sizeof(uint32_t) * 4);
2886  if (!tcside)
2887  return AVERROR(ENOMEM);
2888 
2889  tc_sd = (uint32_t*)tcside->data;
2890  tc_sd[0] = s->sei.timecode.num_clock_ts;
2891 
2892  for (int i = 0; i < tc_sd[0]; i++) {
2893  int drop = s->sei.timecode.cnt_dropped_flag[i];
2894  int hh = s->sei.timecode.hours_value[i];
2895  int mm = s->sei.timecode.minutes_value[i];
2896  int ss = s->sei.timecode.seconds_value[i];
2897  int ff = s->sei.timecode.n_frames[i];
2898 
2899  tc_sd[i + 1] = av_timecode_get_smpte(s->avctx->framerate, drop, hh, mm, ss, ff);
2900  av_timecode_make_smpte_tc_string2(tcbuf, s->avctx->framerate, tc_sd[i + 1], 0, 0);
2901  av_dict_set(&out->metadata, "timecode", tcbuf, 0);
2902  }
2903 
2904  s->sei.timecode.num_clock_ts = 0;
2905  }
2906 
2907  if (s->sei.film_grain_characteristics.present) {
2908  HEVCSEIFilmGrainCharacteristics *fgc = &s->sei.film_grain_characteristics;
2910  if (!fgp)
2911  return AVERROR(ENOMEM);
2912 
2914  fgp->seed = s->ref->poc; /* no poc_offset in HEVC */
2915 
2916  fgp->codec.h274.model_id = fgc->model_id;
2920  fgp->codec.h274.color_range = fgc->full_range + 1;
2923  fgp->codec.h274.color_space = fgc->matrix_coeffs;
2924  } else {
2925  const HEVCSPS *sps = s->ps.sps;
2926  const VUI *vui = &sps->vui;
2927  fgp->codec.h274.bit_depth_luma = sps->bit_depth;
2928  fgp->codec.h274.bit_depth_chroma = sps->bit_depth_chroma;
2930  fgp->codec.h274.color_range = vui->video_full_range_flag + 1;
2931  else
2936  fgp->codec.h274.color_space = vui->matrix_coeffs;
2937  } else {
2941  }
2942  }
2945 
2947  sizeof(fgp->codec.h274.component_model_present));
2949  sizeof(fgp->codec.h274.num_intensity_intervals));
2950  memcpy(&fgp->codec.h274.num_model_values, &fgc->num_model_values,
2951  sizeof(fgp->codec.h274.num_model_values));
2956  memcpy(&fgp->codec.h274.comp_model_value, &fgc->comp_model_value,
2957  sizeof(fgp->codec.h274.comp_model_value));
2958 
2959  fgc->present = fgc->persistence_flag;
2960  }
2961 
2962  if (s->sei.dynamic_hdr_plus.info) {
2963  AVBufferRef *info_ref = av_buffer_ref(s->sei.dynamic_hdr_plus.info);
2964  if (!info_ref)
2965  return AVERROR(ENOMEM);
2966 
2968  av_buffer_unref(&info_ref);
2969  return AVERROR(ENOMEM);
2970  }
2971  }
2972 
2973  if (s->rpu_buf) {
2975  if (!rpu)
2976  return AVERROR(ENOMEM);
2977 
2978  s->rpu_buf = NULL;
2979  }
2980 
2981  if ((ret = ff_dovi_attach_side_data(&s->dovi_ctx, out)) < 0)
2982  return ret;
2983 
2984  return 0;
2985 }
2986 
2988 {
2989  HEVCLocalContext *lc = s->HEVClc;
2990  int pic_size_in_ctb = ((s->ps.sps->width >> s->ps.sps->log2_min_cb_size) + 1) *
2991  ((s->ps.sps->height >> s->ps.sps->log2_min_cb_size) + 1);
2992  int ret;
2993 
2994  memset(s->horizontal_bs, 0, s->bs_width * s->bs_height);
2995  memset(s->vertical_bs, 0, s->bs_width * s->bs_height);
2996  memset(s->cbf_luma, 0, s->ps.sps->min_tb_width * s->ps.sps->min_tb_height);
2997  memset(s->is_pcm, 0, (s->ps.sps->min_pu_width + 1) * (s->ps.sps->min_pu_height + 1));
2998  memset(s->tab_slice_address, -1, pic_size_in_ctb * sizeof(*s->tab_slice_address));
2999 
3000  s->is_decoded = 0;
3001  s->first_nal_type = s->nal_unit_type;
3002 
3003  s->no_rasl_output_flag = IS_IDR(s) || IS_BLA(s) || (s->nal_unit_type == HEVC_NAL_CRA_NUT && s->last_eos);
3004 
3005  if (s->ps.pps->tiles_enabled_flag)
3006  lc->end_of_tiles_x = s->ps.pps->column_width[0] << s->ps.sps->log2_ctb_size;
3007 
3008  ret = ff_hevc_set_new_ref(s, &s->frame, s->poc);
3009  if (ret < 0)
3010  goto fail;
3011 
3012  ret = ff_hevc_frame_rps(s);
3013  if (ret < 0) {
3014  av_log(s->avctx, AV_LOG_ERROR, "Error constructing the frame RPS.\n");
3015  goto fail;
3016  }
3017 
3018  s->ref->frame->key_frame = IS_IRAP(s);
3019 
3020  s->ref->needs_fg = s->sei.film_grain_characteristics.present &&
3021  !(s->avctx->export_side_data & AV_CODEC_EXPORT_DATA_FILM_GRAIN) &&
3022  !s->avctx->hwaccel;
3023 
3024  if (s->ref->needs_fg) {
3025  s->ref->frame_grain->format = s->ref->frame->format;
3026  s->ref->frame_grain->width = s->ref->frame->width;
3027  s->ref->frame_grain->height = s->ref->frame->height;
3028  if ((ret = ff_thread_get_buffer(s->avctx, &s->ref->tf_grain, 0)) < 0)
3029  goto fail;
3030  }
3031 
3032  ret = set_side_data(s);
3033  if (ret < 0)
3034  goto fail;
3035 
3036  s->frame->pict_type = 3 - s->sh.slice_type;
3037 
3038  if (!IS_IRAP(s))
3040 
3041  av_frame_unref(s->output_frame);
3042  ret = ff_hevc_output_frame(s, s->output_frame, 0);
3043  if (ret < 0)
3044  goto fail;
3045 
3046  if (!s->avctx->hwaccel)
3047  ff_thread_finish_setup(s->avctx);
3048 
3049  return 0;
3050 
3051 fail:
3052  if (s->ref)
3053  ff_hevc_unref_frame(s, s->ref, ~0);
3054  s->ref = NULL;
3055  return ret;
3056 }
3057 
3059 {
3060  HEVCFrame *out = s->ref;
3061  const AVFrameSideData *sd;
3062  int ret;
3063 
3064  if (out->needs_fg) {
3066  av_assert0(out->frame_grain->buf[0] && sd);
3067  ret = ff_h274_apply_film_grain(out->frame_grain, out->frame, &s->h274db,
3068  (AVFilmGrainParams *) sd->data);
3069 
3070  if (ret < 0) {
3071  av_log(s->avctx, AV_LOG_WARNING, "Failed synthesizing film "
3072  "grain, ignoring: %s\n", av_err2str(ret));
3073  out->needs_fg = 0;
3074  }
3075  }
3076 
3077  return 0;
3078 }
3079 
3080 static int decode_nal_unit(HEVCContext *s, const H2645NAL *nal)
3081 {
3082  HEVCLocalContext *lc = s->HEVClc;
3083  GetBitContext *gb = &lc->gb;
3084  int ctb_addr_ts, ret;
3085 
3086  *gb = nal->gb;
3087  s->nal_unit_type = nal->type;
3088  s->temporal_id = nal->temporal_id;
3089 
3090  switch (s->nal_unit_type) {
3091  case HEVC_NAL_VPS:
3092  if (s->avctx->hwaccel && s->avctx->hwaccel->decode_params) {
3093  ret = s->avctx->hwaccel->decode_params(s->avctx,
3094  nal->type,
3095  nal->raw_data,
3096  nal->raw_size);
3097  if (ret < 0)
3098  goto fail;
3099  }
3100  ret = ff_hevc_decode_nal_vps(gb, s->avctx, &s->ps);
3101  if (ret < 0)
3102  goto fail;
3103  break;
3104  case HEVC_NAL_SPS:
3105  if (s->avctx->hwaccel && s->avctx->hwaccel->decode_params) {
3106  ret = s->avctx->hwaccel->decode_params(s->avctx,
3107  nal->type,
3108  nal->raw_data,
3109  nal->raw_size);
3110  if (ret < 0)
3111  goto fail;
3112  }
3113  ret = ff_hevc_decode_nal_sps(gb, s->avctx, &s->ps,
3114  s->apply_defdispwin);
3115  if (ret < 0)
3116  goto fail;
3117  break;
3118  case HEVC_NAL_PPS:
3119  if (s->avctx->hwaccel && s->avctx->hwaccel->decode_params) {
3120  ret = s->avctx->hwaccel->decode_params(s->avctx,
3121  nal->type,
3122  nal->raw_data,
3123  nal->raw_size);
3124  if (ret < 0)
3125  goto fail;
3126  }
3127  ret = ff_hevc_decode_nal_pps(gb, s->avctx, &s->ps);
3128  if (ret < 0)
3129  goto fail;
3130  break;
3131  case HEVC_NAL_SEI_PREFIX:
3132  case HEVC_NAL_SEI_SUFFIX:
3133  if (s->avctx->hwaccel && s->avctx->hwaccel->decode_params) {
3134  ret = s->avctx->hwaccel->decode_params(s->avctx,
3135  nal->type,
3136  nal->raw_data,
3137  nal->raw_size);
3138  if (ret < 0)
3139  goto fail;
3140  }
3141  ret = ff_hevc_decode_nal_sei(gb, s->avctx, &s->sei, &s->ps, s->nal_unit_type);
3142  if (ret < 0)
3143  goto fail;
3144  break;
3145  case HEVC_NAL_TRAIL_R:
3146  case HEVC_NAL_TRAIL_N:
3147  case HEVC_NAL_TSA_N:
3148  case HEVC_NAL_TSA_R:
3149  case HEVC_NAL_STSA_N:
3150  case HEVC_NAL_STSA_R:
3151  case HEVC_NAL_BLA_W_LP:
3152  case HEVC_NAL_BLA_W_RADL:
3153  case HEVC_NAL_BLA_N_LP:
3154  case HEVC_NAL_IDR_W_RADL:
3155  case HEVC_NAL_IDR_N_LP:
3156  case HEVC_NAL_CRA_NUT:
3157  case HEVC_NAL_RADL_N:
3158  case HEVC_NAL_RADL_R:
3159  case HEVC_NAL_RASL_N:
3160  case HEVC_NAL_RASL_R:
3161  ret = hls_slice_header(s);
3162  if (ret < 0)
3163  return ret;
3164  if (ret == 1) {
3166  goto fail;
3167  }
3168 
3169 
3170  if (
3171  (s->avctx->skip_frame >= AVDISCARD_BIDIR && s->sh.slice_type == HEVC_SLICE_B) ||
3172  (s->avctx->skip_frame >= AVDISCARD_NONINTRA && s->sh.slice_type != HEVC_SLICE_I) ||
3173  (s->avctx->skip_frame >= AVDISCARD_NONKEY && !IS_IRAP(s))) {
3174  break;
3175  }
3176 
3177  if (s->sh.first_slice_in_pic_flag) {
3178  if (s->max_ra == INT_MAX) {
3179  if (s->nal_unit_type == HEVC_NAL_CRA_NUT || IS_BLA(s)) {
3180  s->max_ra = s->poc;
3181  } else {
3182  if (IS_IDR(s))
3183  s->max_ra = INT_MIN;
3184  }
3185  }
3186 
3187  if ((s->nal_unit_type == HEVC_NAL_RASL_R || s->nal_unit_type == HEVC_NAL_RASL_N) &&
3188  s->poc <= s->max_ra) {
3189  s->is_decoded = 0;
3190  break;
3191  } else {
3192  if (s->nal_unit_type == HEVC_NAL_RASL_R && s->poc > s->max_ra)
3193  s->max_ra = INT_MIN;
3194  }
3195 
3196  s->overlap ++;
3197  ret = hevc_frame_start(s);
3198  if (ret < 0)
3199  return ret;
3200  } else if (!s->ref) {
3201  av_log(s->avctx, AV_LOG_ERROR, "First slice in a frame missing.\n");
3202  goto fail;
3203  }
3204 
3205  if (s->nal_unit_type != s->first_nal_type) {
3206  av_log(s->avctx, AV_LOG_ERROR,
3207  "Non-matching NAL types of the VCL NALUs: %d %d\n",
3208  s->first_nal_type, s->nal_unit_type);
3209  return AVERROR_INVALIDDATA;
3210  }
3211 
3212  if (!s->sh.dependent_slice_segment_flag &&
3213  s->sh.slice_type != HEVC_SLICE_I) {
3214  ret = ff_hevc_slice_rpl(s);
3215  if (ret < 0) {
3216  av_log(s->avctx, AV_LOG_WARNING,
3217  "Error constructing the reference lists for the current slice.\n");
3218  goto fail;
3219  }
3220  }
3221 
3222  if (s->sh.first_slice_in_pic_flag && s->avctx->hwaccel) {
3223  ret = s->avctx->hwaccel->start_frame(s->avctx, NULL, 0);
3224  if (ret < 0)
3225  goto fail;
3226  }
3227 
3228  if (s->avctx->hwaccel) {
3229  ret = s->avctx->hwaccel->decode_slice(s->avctx, nal->raw_data, nal->raw_size);
3230  if (ret < 0)
3231  goto fail;
3232  } else {
3233  if (s->threads_number > 1 && s->sh.num_entry_point_offsets > 0)
3234  ctb_addr_ts = hls_slice_data_wpp(s, nal);
3235  else
3236  ctb_addr_ts = hls_slice_data(s);
3237  if (ctb_addr_ts >= (s->ps.sps->ctb_width * s->ps.sps->ctb_height)) {
3238  ret = hevc_frame_end(s);
3239  if (ret < 0)
3240  goto fail;
3241  s->is_decoded = 1;
3242  }
3243 
3244  if (ctb_addr_ts < 0) {
3245  ret = ctb_addr_ts;
3246  goto fail;
3247  }
3248  }
3249  break;
3250  case HEVC_NAL_EOS_NUT:
3251  case HEVC_NAL_EOB_NUT:
3252  s->seq_decode = (s->seq_decode + 1) & 0xff;
3253  s->max_ra = INT_MAX;
3254  break;
3255  case HEVC_NAL_AUD:
3256  case HEVC_NAL_FD_NUT:
3257  case HEVC_NAL_UNSPEC62:
3258  break;
3259  default:
3260  av_log(s->avctx, AV_LOG_INFO,
3261  "Skipping NAL unit %d\n", s->nal_unit_type);
3262  }
3263 
3264  return 0;
3265 fail:
3266  if (s->avctx->err_recognition & AV_EF_EXPLODE)
3267  return ret;
3268  return 0;
3269 }
3270 
3271 static int decode_nal_units(HEVCContext *s, const uint8_t *buf, int length)
3272 {
3273  int i, ret = 0;
3274  int eos_at_start = 1;
3275 
3276  s->ref = NULL;
3277  s->last_eos = s->eos;
3278  s->eos = 0;
3279  s->overlap = 0;
3280 
3281  /* split the input packet into NAL units, so we know the upper bound on the
3282  * number of slices in the frame */
3283  ret = ff_h2645_packet_split(&s->pkt, buf, length, s->avctx, s->is_nalff,
3284  s->nal_length_size, s->avctx->codec_id, 1, 0);
3285  if (ret < 0) {
3286  av_log(s->avctx, AV_LOG_ERROR,
3287  "Error splitting the input into NAL units.\n");
3288  return ret;
3289  }
3290 
3291  for (i = 0; i < s->pkt.nb_nals; i++) {
3292  if (s->pkt.nals[i].type == HEVC_NAL_EOB_NUT ||
3293  s->pkt.nals[i].type == HEVC_NAL_EOS_NUT) {
3294  if (eos_at_start) {
3295  s->last_eos = 1;
3296  } else {
3297  s->eos = 1;
3298  }
3299  } else {
3300  eos_at_start = 0;
3301  }
3302  }
3303 
3304  /*
3305  * Check for RPU delimiter.
3306  *
3307  * Dolby Vision RPUs masquerade as unregistered NALs of type 62.
3308  *
3309  * We have to do this check here an create the rpu buffer, since RPUs are appended
3310  * to the end of an AU; they are the last non-EOB/EOS NAL in the AU.
3311  */
3312  if (s->pkt.nb_nals > 1 && s->pkt.nals[s->pkt.nb_nals - 1].type == HEVC_NAL_UNSPEC62 &&
3313  s->pkt.nals[s->pkt.nb_nals - 1].size > 2 && !s->pkt.nals[s->pkt.nb_nals - 1].nuh_layer_id
3314  && !s->pkt.nals[s->pkt.nb_nals - 1].temporal_id) {
3315  H2645NAL *nal = &s->pkt.nals[s->pkt.nb_nals - 1];
3316  if (s->rpu_buf) {
3317  av_buffer_unref(&s->rpu_buf);
3318  av_log(s->avctx, AV_LOG_WARNING, "Multiple Dolby Vision RPUs found in one AU. Skipping previous.\n");
3319  }
3320 
3321  s->rpu_buf = av_buffer_alloc(nal->raw_size - 2);
3322  if (!s->rpu_buf)
3323  return AVERROR(ENOMEM);
3324  memcpy(s->rpu_buf->data, nal->raw_data + 2, nal->raw_size - 2);
3325 
3326  ret = ff_dovi_rpu_parse(&s->dovi_ctx, nal->data + 2, nal->size - 2);
3327  if (ret < 0) {
3328  av_buffer_unref(&s->rpu_buf);
3329  av_log(s->avctx, AV_LOG_WARNING, "Error parsing DOVI NAL unit.\n");
3330  /* ignore */
3331  }
3332  }
3333 
3334  /* decode the NAL units */
3335  for (i = 0; i < s->pkt.nb_nals; i++) {
3336  H2645NAL *nal = &s->pkt.nals[i];
3337 
3338  if (s->avctx->skip_frame >= AVDISCARD_ALL ||
3339  (s->avctx->skip_frame >= AVDISCARD_NONREF
3340  && ff_hevc_nal_is_nonref(nal->type)) || nal->nuh_layer_id > 0)
3341  continue;
3342 
3343  ret = decode_nal_unit(s, nal);
3344  if (ret >= 0 && s->overlap > 2)
3346  if (ret < 0) {
3347  av_log(s->avctx, AV_LOG_WARNING,
3348  "Error parsing NAL unit #%d.\n", i);
3349  goto fail;
3350  }
3351  }
3352 
3353 fail:
3354  if (s->ref && s->threads_type == FF_THREAD_FRAME)
3355  ff_thread_report_progress(&s->ref->tf, INT_MAX, 0);
3356 
3357  return ret;
3358 }
3359 
3360 static void print_md5(void *log_ctx, int level, uint8_t md5[16])
3361 {
3362  int i;
3363  for (i = 0; i < 16; i++)
3364  av_log(log_ctx, level, "%02"PRIx8, md5[i]);
3365 }
3366 
3368 {
3370  int pixel_shift;
3371  int i, j;
3372 
3373  if (!desc)
3374  return AVERROR(EINVAL);
3375 
3376  pixel_shift = desc->comp[0].depth > 8;
3377 
3378  av_log(s->avctx, AV_LOG_DEBUG, "Verifying checksum for frame with POC %d: ",
3379  s->poc);
3380 
3381  /* the checksums are LE, so we have to byteswap for >8bpp formats
3382  * on BE arches */
3383 #if HAVE_BIGENDIAN
3384  if (pixel_shift && !s->checksum_buf) {
3385  av_fast_malloc(&s->checksum_buf, &s->checksum_buf_size,
3386  FFMAX3(frame->linesize[0], frame->linesize[1],
3387  frame->linesize[2]));
3388  if (!s->checksum_buf)
3389  return AVERROR(ENOMEM);
3390  }
3391 #endif
3392 
3393  for (i = 0; frame->data[i]; i++) {
3394  int width = s->avctx->coded_width;
3395  int height = s->avctx->coded_height;
3396  int w = (i == 1 || i == 2) ? (width >> desc->log2_chroma_w) : width;
3397  int h = (i == 1 || i == 2) ? (height >> desc->log2_chroma_h) : height;
3398  uint8_t md5[16];
3399 
3400  av_md5_init(s->md5_ctx);
3401  for (j = 0; j < h; j++) {
3402  const uint8_t *src = frame->data[i] + j * frame->linesize[i];
3403 #if HAVE_BIGENDIAN
3404  if (pixel_shift) {
3405  s->bdsp.bswap16_buf((uint16_t *) s->checksum_buf,
3406  (const uint16_t *) src, w);
3407  src = s->checksum_buf;
3408  }
3409 #endif
3410  av_md5_update(s->md5_ctx, src, w << pixel_shift);
3411  }
3412  av_md5_final(s->md5_ctx, md5);
3413 
3414  if (!memcmp(md5, s->sei.picture_hash.md5[i], 16)) {
3415  av_log (s->avctx, AV_LOG_DEBUG, "plane %d - correct ", i);
3416  print_md5(s->avctx, AV_LOG_DEBUG, md5);
3417  av_log (s->avctx, AV_LOG_DEBUG, "; ");
3418  } else {
3419  av_log (s->avctx, AV_LOG_ERROR, "mismatching checksum of plane %d - ", i);
3420  print_md5(s->avctx, AV_LOG_ERROR, md5);
3421  av_log (s->avctx, AV_LOG_ERROR, " != ");
3422  print_md5(s->avctx, AV_LOG_ERROR, s->sei.picture_hash.md5[i]);
3423  av_log (s->avctx, AV_LOG_ERROR, "\n");
3424  return AVERROR_INVALIDDATA;
3425  }
3426  }
3427 
3428  av_log(s->avctx, AV_LOG_DEBUG, "\n");
3429 
3430  return 0;
3431 }
3432 
3433 static int hevc_decode_extradata(HEVCContext *s, uint8_t *buf, int length, int first)
3434 {
3435  int ret, i;
3436 
3437  ret = ff_hevc_decode_extradata(buf, length, &s->ps, &s->sei, &s->is_nalff,
3438  &s->nal_length_size, s->avctx->err_recognition,
3439  s->apply_defdispwin, s->avctx);
3440  if (ret < 0)
3441  return ret;
3442 
3443  /* export stream parameters from the first SPS */
3444  for (i = 0; i < FF_ARRAY_ELEMS(s->ps.sps_list); i++) {
3445  if (first && s->ps.sps_list[i]) {
3446  const HEVCSPS *sps = (const HEVCSPS*)s->ps.sps_list[i]->data;
3448  break;
3449  }
3450  }
3451 
3452  /* export stream parameters from SEI */
3454  if (ret < 0)
3455  return ret;
3456 
3457  return 0;
3458 }
3459 
3460 static int hevc_decode_frame(AVCodecContext *avctx, void *data, int *got_output,
3461  AVPacket *avpkt)
3462 {
3463  int ret;
3464  uint8_t *sd;
3465  size_t sd_size;
3466  HEVCContext *s = avctx->priv_data;
3467 
3468  if (!avpkt->size) {
3469  ret = ff_hevc_output_frame(s, data, 1);
3470  if (ret < 0)
3471  return ret;
3472 
3473  *got_output = ret;
3474  return 0;
3475  }
3476 
3477  sd = av_packet_get_side_data(avpkt, AV_PKT_DATA_NEW_EXTRADATA, &sd_size);
3478  if (sd && sd_size > 0) {
3479  ret = hevc_decode_extradata(s, sd, sd_size, 0);
3480  if (ret < 0)
3481  return ret;
3482  }
3483 
3484  sd = av_packet_get_side_data(avpkt, AV_PKT_DATA_DOVI_CONF, &sd_size);
3485  if (sd && sd_size > 0)
3487 
3488  s->ref = NULL;
3489  ret = decode_nal_units(s, avpkt->data, avpkt->size);
3490  if (ret < 0)
3491  return ret;
3492 
3493  if (avctx->hwaccel) {
3494  if (s->ref && (ret = avctx->hwaccel->end_frame(avctx)) < 0) {
3495  av_log(avctx, AV_LOG_ERROR,
3496  "hardware accelerator failed to decode picture\n");
3497  ff_hevc_unref_frame(s, s->ref, ~0);
3498  return ret;
3499  }
3500  } else {
3501  /* verify the SEI checksum */
3502  if (avctx->err_recognition & AV_EF_CRCCHECK && s->is_decoded &&
3503  s->sei.picture_hash.is_md5) {
3504  ret = verify_md5(s, s->ref->frame);
3505  if (ret < 0 && avctx->err_recognition & AV_EF_EXPLODE) {
3506  ff_hevc_unref_frame(s, s->ref, ~0);
3507  return ret;
3508  }
3509  }
3510  }
3511  s->sei.picture_hash.is_md5 = 0;
3512 
3513  if (s->is_decoded) {
3514  av_log(avctx, AV_LOG_DEBUG, "Decoded frame with POC %d.\n", s->poc);
3515  s->is_decoded = 0;
3516  }
3517 
3518  if (s->output_frame->buf[0]) {
3519  av_frame_move_ref(data, s->output_frame);
3520  *got_output = 1;
3521  }
3522 
3523  return avpkt->size;
3524 }
3525 
3527 {
3528  int ret;
3529 
3530  ret = ff_thread_ref_frame(&dst->tf, &src->tf);
3531  if (ret < 0)
3532  return ret;
3533 
3534  if (src->needs_fg) {
3535  ret = ff_thread_ref_frame(&dst->tf_grain, &src->tf_grain);
3536  if (ret < 0)
3537  return ret;
3538  dst->needs_fg = 1;
3539  }
3540 
3541  dst->tab_mvf_buf = av_buffer_ref(src->tab_mvf_buf);
3542  if (!dst->tab_mvf_buf)
3543  goto fail;
3544  dst->tab_mvf = src->tab_mvf;
3545 
3546  dst->rpl_tab_buf = av_buffer_ref(src->rpl_tab_buf);
3547  if (!dst->rpl_tab_buf)
3548  goto fail;
3549  dst->rpl_tab = src->rpl_tab;
3550 
3551  dst->rpl_buf = av_buffer_ref(src->rpl_buf);
3552  if (!dst->rpl_buf)
3553  goto fail;
3554 
3555  dst->poc = src->poc;
3556  dst->ctb_count = src->ctb_count;
3557  dst->flags = src->flags;
3558  dst->sequence = src->sequence;
3559 
3560  if (src->hwaccel_picture_private) {
3561  dst->hwaccel_priv_buf = av_buffer_ref(src->hwaccel_priv_buf);
3562  if (!dst->hwaccel_priv_buf)
3563  goto fail;
3565  }
3566 
3567  return 0;
3568 fail:
3569  ff_hevc_unref_frame(s, dst, ~0);
3570  return AVERROR(ENOMEM);
3571 }
3572 
3574 {
3575  HEVCContext *s = avctx->priv_data;
3576  int i;
3577 
3578  pic_arrays_free(s);
3579 
3580  ff_dovi_ctx_unref(&s->dovi_ctx);
3581  av_buffer_unref(&s->rpu_buf);
3582 
3583  av_freep(&s->md5_ctx);
3584 
3585  av_freep(&s->cabac_state);
3586 
3587  for (i = 0; i < 3; i++) {
3588  av_freep(&s->sao_pixel_buffer_h[i]);
3589  av_freep(&s->sao_pixel_buffer_v[i]);
3590  }
3591  av_frame_free(&s->output_frame);
3592 
3593  for (i = 0; i < FF_ARRAY_ELEMS(s->DPB); i++) {
3594  ff_hevc_unref_frame(s, &s->DPB[i], ~0);
3595  av_frame_free(&s->DPB[i].frame);
3596  av_frame_free(&s->DPB[i].frame_grain);
3597  }
3598 
3599  ff_hevc_ps_uninit(&s->ps);
3600 
3601  av_freep(&s->sh.entry_point_offset);
3602  av_freep(&s->sh.offset);
3603  av_freep(&s->sh.size);
3604 
3605  if (s->HEVClcList && s->sList) {
3606  for (i = 1; i < s->threads_number; i++) {
3607  av_freep(&s->HEVClcList[i]);
3608  av_freep(&s->sList[i]);
3609  }
3610  }
3611  av_freep(&s->HEVClc);
3612  av_freep(&s->HEVClcList);
3613  av_freep(&s->sList);
3614 
3615  ff_h2645_packet_uninit(&s->pkt);
3616 
3617  ff_hevc_reset_sei(&s->sei);
3618 
3619  return 0;
3620 }
3621 
3623 {
3624  HEVCContext *s = avctx->priv_data;
3625  int i;
3626 
3627  s->avctx = avctx;
3628 
3629  s->HEVClc = av_mallocz(sizeof(HEVCLocalContext));
3630  s->HEVClcList = av_mallocz(sizeof(HEVCLocalContext*) * s->threads_number);
3631  s->sList = av_mallocz(sizeof(HEVCContext*) * s->threads_number);
3632  if (!s->HEVClc || !s->HEVClcList || !s->sList)
3633  goto fail;
3634  s->HEVClcList[0] = s->HEVClc;
3635  s->sList[0] = s;
3636 
3637  s->cabac_state = av_malloc(HEVC_CONTEXTS);
3638  if (!s->cabac_state)
3639  goto fail;
3640 
3641  s->output_frame = av_frame_alloc();
3642  if (!s->output_frame)
3643  goto fail;
3644 
3645  for (i = 0; i < FF_ARRAY_ELEMS(s->DPB); i++) {
3646  s->DPB[i].frame = av_frame_alloc();
3647  if (!s->DPB[i].frame)
3648  goto fail;
3649  s->DPB[i].tf.f = s->DPB[i].frame;
3650 
3651  s->DPB[i].frame_grain = av_frame_alloc();
3652  if (!s->DPB[i].frame_grain)
3653  goto fail;
3654  s->DPB[i].tf_grain.f = s->DPB[i].frame_grain;
3655  }
3656 
3657  s->max_ra = INT_MAX;
3658 
3659  s->md5_ctx = av_md5_alloc();
3660  if (!s->md5_ctx)
3661  goto fail;
3662 
3663  ff_bswapdsp_init(&s->bdsp);
3664 
3665  s->dovi_ctx.logctx = avctx;
3666  s->context_initialized = 1;
3667  s->eos = 0;
3668 
3669  ff_hevc_reset_sei(&s->sei);
3670 
3671  return 0;
3672 
3673 fail:
3674  hevc_decode_free(avctx);
3675  return AVERROR(ENOMEM);
3676 }
3677 
3678 #if HAVE_THREADS
3679 static int hevc_update_thread_context(AVCodecContext *dst,
3680  const AVCodecContext *src)
3681 {
3682  HEVCContext *s = dst->priv_data;
3683  HEVCContext *s0 = src->priv_data;
3684  int i, ret;
3685 
3686  if (!s->context_initialized) {
3687  ret = hevc_init_context(dst);
3688  if (ret < 0)
3689  return ret;
3690  }
3691 
3692  for (i = 0; i < FF_ARRAY_ELEMS(s->DPB); i++) {
3693  ff_hevc_unref_frame(s, &s->DPB[i], ~0);
3694  if (s0->DPB[i].frame->buf[0]) {
3695  ret = hevc_ref_frame(s, &s->DPB[i], &s0->DPB[i]);
3696  if (ret < 0)
3697  return ret;
3698  }
3699  }
3700 
3701  if (s->ps.sps != s0->ps.sps)
3702  s->ps.sps = NULL;
3703  for (i = 0; i < FF_ARRAY_ELEMS(s->ps.vps_list); i++) {
3704  ret = av_buffer_replace(&s->ps.vps_list[i], s0->ps.vps_list[i]);
3705  if (ret < 0)
3706  return ret;
3707  }
3708 
3709  for (i = 0; i < FF_ARRAY_ELEMS(s->ps.sps_list); i++) {
3710  ret = av_buffer_replace(&s->ps.sps_list[i], s0->ps.sps_list[i]);
3711  if (ret < 0)
3712  return ret;
3713  }
3714 
3715  for (i = 0; i < FF_ARRAY_ELEMS(s->ps.pps_list); i++) {
3716  ret = av_buffer_replace(&s->ps.pps_list[i], s0->ps.pps_list[i]);
3717  if (ret < 0)
3718  return ret;
3719  }
3720 
3721  if (s->ps.sps != s0->ps.sps)
3722  if ((ret = set_sps(s, s0->ps.sps, src->pix_fmt)) < 0)
3723  return ret;
3724 
3725  s->seq_decode = s0->seq_decode;
3726  s->seq_output = s0->seq_output;
3727  s->pocTid0 = s0->pocTid0;
3728  s->max_ra = s0->max_ra;
3729  s->eos = s0->eos;
3730  s->no_rasl_output_flag = s0->no_rasl_output_flag;
3731 
3732  s->is_nalff = s0->is_nalff;
3733  s->nal_length_size = s0->nal_length_size;
3734 
3735  s->threads_number = s0->threads_number;
3736  s->threads_type = s0->threads_type;
3737 
3738  if (s0->eos) {
3739  s->seq_decode = (s->seq_decode + 1) & 0xff;
3740  s->max_ra = INT_MAX;
3741  }
3742 
3743  ret = av_buffer_replace(&s->sei.a53_caption.buf_ref, s0->sei.a53_caption.buf_ref);
3744  if (ret < 0)
3745  return ret;
3746 
3747  for (i = 0; i < s->sei.unregistered.nb_buf_ref; i++)
3748  av_buffer_unref(&s->sei.unregistered.buf_ref[i]);
3749  s->sei.unregistered.nb_buf_ref = 0;
3750 
3751  if (s0->sei.unregistered.nb_buf_ref) {
3752  ret = av_reallocp_array(&s->sei.unregistered.buf_ref,
3753  s0->sei.unregistered.nb_buf_ref,
3754  sizeof(*s->sei.unregistered.buf_ref));
3755  if (ret < 0)
3756  return ret;
3757 
3758  for (i = 0; i < s0->sei.unregistered.nb_buf_ref; i++) {
3759  s->sei.unregistered.buf_ref[i] = av_buffer_ref(s0->sei.unregistered.buf_ref[i]);
3760  if (!s->sei.unregistered.buf_ref[i])
3761  return AVERROR(ENOMEM);
3762  s->sei.unregistered.nb_buf_ref++;
3763  }
3764  }
3765 
3766  ret = av_buffer_replace(&s->sei.dynamic_hdr_plus.info, s0->sei.dynamic_hdr_plus.info);
3767  if (ret < 0)
3768  return ret;
3769 
3770  ret = av_buffer_replace(&s->rpu_buf, s0->rpu_buf);
3771  if (ret < 0)
3772  return ret;
3773 
3774  ret = ff_dovi_ctx_replace(&s->dovi_ctx, &s0->dovi_ctx);
3775  if (ret < 0)
3776  return ret;
3777 
3778  s->sei.frame_packing = s0->sei.frame_packing;
3779  s->sei.display_orientation = s0->sei.display_orientation;
3780  s->sei.mastering_display = s0->sei.mastering_display;
3781  s->sei.content_light = s0->sei.content_light;
3782  s->sei.alternative_transfer = s0->sei.alternative_transfer;
3783 
3785  if (ret < 0)
3786  return ret;
3787 
3788  return 0;
3789 }
3790 #endif
3791 
3793 {
3794  HEVCContext *s = avctx->priv_data;
3795  int ret;
3796 
3797  if(avctx->active_thread_type & FF_THREAD_SLICE)
3798  s->threads_number = avctx->thread_count;
3799  else
3800  s->threads_number = 1;
3801 
3802  if((avctx->active_thread_type & FF_THREAD_FRAME) && avctx->thread_count > 1)
3803  s->threads_type = FF_THREAD_FRAME;
3804  else
3805  s->threads_type = FF_THREAD_SLICE;
3806 
3807  ret = hevc_init_context(avctx);
3808  if (ret < 0)
3809  return ret;
3810 
3811  s->enable_parallel_tiles = 0;
3812  s->sei.picture_timing.picture_struct = 0;
3813  s->eos = 1;
3814 
3815  atomic_init(&s->wpp_err, 0);
3816 
3817  if (!avctx->internal->is_copy) {
3818  if (avctx->extradata_size > 0 && avctx->extradata) {
3819  ret = hevc_decode_extradata(s, avctx->extradata, avctx->extradata_size, 1);
3820  if (ret < 0) {
3821  return ret;
3822  }
3823  }
3824  }
3825 
3826  return 0;
3827 }
3828 
3830 {
3831  HEVCContext *s = avctx->priv_data;
3833  ff_hevc_reset_sei(&s->sei);
3834  ff_dovi_ctx_flush(&s->dovi_ctx);
3835  av_buffer_unref(&s->rpu_buf);
3836  s->max_ra = INT_MAX;
3837  s->eos = 1;
3838 }
3839 
3840 #define OFFSET(x) offsetof(HEVCContext, x)
3841 #define PAR (AV_OPT_FLAG_DECODING_PARAM | AV_OPT_FLAG_VIDEO_PARAM)
3842 
3843 static const AVOption options[] = {
3844  { "apply_defdispwin", "Apply default display window from VUI", OFFSET(apply_defdispwin),
3845  AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, PAR },
3846  { "strict-displaywin", "stricly apply default display window size", OFFSET(apply_defdispwin),
3847  AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, PAR },
3848  { NULL },
3849 };
3850 
3851 static const AVClass hevc_decoder_class = {
3852  .class_name = "HEVC decoder",
3853  .item_name = av_default_item_name,
3854  .option = options,
3855  .version = LIBAVUTIL_VERSION_INT,
3856 };
3857 
3859  .name = "hevc",
3860  .long_name = NULL_IF_CONFIG_SMALL("HEVC (High Efficiency Video Coding)"),
3861  .type = AVMEDIA_TYPE_VIDEO,
3862  .id = AV_CODEC_ID_HEVC,
3863  .priv_data_size = sizeof(HEVCContext),
3864  .priv_class = &hevc_decoder_class,
3866  .close = hevc_decode_free,
3869  .update_thread_context = ONLY_IF_THREADS_ENABLED(hevc_update_thread_context),
3870  .capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DELAY |
3875  .hw_configs = (const AVCodecHWConfigInternal *const []) {
3876 #if CONFIG_HEVC_DXVA2_HWACCEL
3877  HWACCEL_DXVA2(hevc),
3878 #endif
3879 #if CONFIG_HEVC_D3D11VA_HWACCEL
3880  HWACCEL_D3D11VA(hevc),
3881 #endif
3882 #if CONFIG_HEVC_D3D11VA2_HWACCEL
3883  HWACCEL_D3D11VA2(hevc),
3884 #endif
3885 #if CONFIG_HEVC_NVDEC_HWACCEL
3886  HWACCEL_NVDEC(hevc),
3887 #endif
3888 #if CONFIG_HEVC_VAAPI_HWACCEL
3889  HWACCEL_VAAPI(hevc),
3890 #endif
3891 #if CONFIG_HEVC_VDPAU_HWACCEL
3892  HWACCEL_VDPAU(hevc),
3893 #endif
3894 #if CONFIG_HEVC_VIDEOTOOLBOX_HWACCEL
3895  HWACCEL_VIDEOTOOLBOX(hevc),
3896 #endif
3897  NULL
3898  },
3899 };
error
static void error(const char *err)
Definition: target_bsf_fuzzer.c:31
HEVCSEIFilmGrainCharacteristics::comp_model_present_flag
int comp_model_present_flag[3]
Definition: hevc_sei.h:124
HEVC_MAX_PPS_COUNT
@ HEVC_MAX_PPS_COUNT
Definition: hevc.h:114
verify_md5
static int verify_md5(HEVCContext *s, AVFrame *frame)
Definition: hevcdec.c:3367
hwconfig.h
AVMasteringDisplayMetadata::has_primaries
int has_primaries
Flag indicating whether the display primaries (and white point) are set.
Definition: mastering_display_metadata.h:62
HEVC_NAL_RADL_N
@ HEVC_NAL_RADL_N
Definition: hevc.h:35
AVCodecContext::hwaccel
const struct AVHWAccel * hwaccel
Hardware accelerator in use.
Definition: avcodec.h:1359
SliceHeader::beta_offset
int beta_offset
beta_offset_div2 * 2
Definition: hevcdec.h:297
AVCodec
AVCodec.
Definition: codec.h:202
bswapdsp.h
L1
F H1 F F H1 F F F F H1<-F-------F-------F v v v H2 H3 H2 ^ ^ ^ F-------F-------F-> H1<-F-------F-------F|||||||||F H1 F|||||||||F H1 Funavailable fullpel samples(outside the picture for example) shall be equalto the closest available fullpel sampleSmaller pel interpolation:--------------------------if diag_mc is set then points which lie on a line between 2 vertically, horizontally or diagonally adjacent halfpel points shall be interpolatedlinearly with rounding to nearest and halfway values rounded up.points which lie on 2 diagonals at the same time should only use the onediagonal not containing the fullpel point F--> O q O<--h1-> O q O<--F v \/v \/v O O O O O O O|/|\|q q q q q|/|\|O O O O O O O ^/\ ^/\ ^ h2--> O q O<--h3-> O q O<--h2 v \/v \/v O O O O O O O|\|/|q q q q q|\|/|O O O O O O O ^/\ ^/\ ^ F--> O q O<--h1-> O q O<--Fthe remaining points shall be bilinearly interpolated from theup to 4 surrounding halfpel and fullpel points, again rounding should be tonearest and halfway values rounded upcompliant Snow decoders MUST support 1-1/8 pel luma and 1/2-1/16 pel chromainterpolation at leastOverlapped block motion compensation:-------------------------------------FIXMELL band prediction:===================Each sample in the LL0 subband is predicted by the median of the left, top andleft+top-topleft samples, samples outside the subband shall be considered tobe 0. To reverse this prediction in the decoder apply the following.for(y=0;y< height;y++){ for(x=0;x< width;x++){ sample[y][x]+=median(sample[y-1][x], sample[y][x-1], sample[y-1][x]+sample[y][x-1]-sample[y-1][x-1]);}}sample[-1][ *]=sample[ *][-1]=0;width, height here are the width and height of the LL0 subband not of the finalvideoDequantization:===============FIXMEWavelet Transform:==================Snow supports 2 wavelet transforms, the symmetric biorthogonal 5/3 integertransform and an integer approximation of the symmetric biorthogonal 9/7daubechies wavelet.2D IDWT(inverse discrete wavelet transform) --------------------------------------------The 2D IDWT applies a 2D filter recursively, each time combining the4 lowest frequency subbands into a single subband until only 1 subbandremains.The 2D filter is done by first applying a 1D filter in the vertical directionand then applying it in the horizontal one. --------------- --------------- --------------- ---------------|LL0|HL0|||||||||||||---+---|HL1||L0|H0|HL1||LL1|HL1|||||LH0|HH0|||||||||||||-------+-------|-> L1 H1 LH1 HH1 LH1 HH1 LH1 HH1 L1
Definition: snow.txt:554
stride
int stride
Definition: mace.c:144
av_buffer_pool_init
AVBufferPool * av_buffer_pool_init(size_t size, AVBufferRef *(*alloc)(size_t size))
Allocate and initialize a buffer pool.
Definition: buffer.c:280
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:186
AV_TIMECODE_STR_SIZE
#define AV_TIMECODE_STR_SIZE
Definition: timecode.h:33
FF_CODEC_CAP_INIT_THREADSAFE
#define FF_CODEC_CAP_INIT_THREADSAFE
The codec does not modify any global variables in the init function, allowing to call the init functi...
Definition: internal.h:42
HEVCLocalContext
Definition: hevcdec.h:427
AV_PIX_FMT_CUDA
@ AV_PIX_FMT_CUDA
HW acceleration through CUDA.
Definition: pixfmt.h:225
HEVCFrame::flags
uint8_t flags
A combination of HEVC_FRAME_FLAG_*.
Definition: hevcdec.h:424
AVMasteringDisplayMetadata::max_luminance
AVRational max_luminance
Max luminance of mastering display (cd/m^2).
Definition: mastering_display_metadata.h:57
HWACCEL_MAX
#define HWACCEL_MAX
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
ff_hevc_sao_type_idx_decode
int ff_hevc_sao_type_idx_decode(HEVCContext *s)
Definition: hevc_cabac.c:573
HEVCFrame::tf
ThreadFrame tf
Definition: hevcdec.h:398
HEVCFrame::hwaccel_priv_buf
AVBufferRef * hwaccel_priv_buf
Definition: hevcdec.h:412
level
uint8_t level
Definition: svq3.c:204
ff_hevc_no_residual_syntax_flag_decode
int ff_hevc_no_residual_syntax_flag_decode(HEVCContext *s)
Definition: hevc_cabac.c:835
AV_STEREO3D_VIEW_LEFT
@ AV_STEREO3D_VIEW_LEFT
Frame contains only the left view.
Definition: stereo3d.h:156
av_clip
#define av_clip
Definition: common.h:96
atomic_store
#define atomic_store(object, desired)
Definition: stdatomic.h:85
hls_decode_neighbour
static void hls_decode_neighbour(HEVCContext *s, int x_ctb, int y_ctb, int ctb_addr_ts)
Definition: hevcdec.c:2417
ff_hevc_sao_eo_class_decode
int ff_hevc_sao_eo_class_decode(HEVCContext *s)
Definition: hevc_cabac.c:608
get_bits_left
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:850
ff_hevc_pred_init
void ff_hevc_pred_init(HEVCPredContext *hpc, int bit_depth)
Definition: hevcpred.c:43
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
AV_STEREO3D_SIDEBYSIDE_QUINCUNX
@ AV_STEREO3D_SIDEBYSIDE_QUINCUNX
Views are next to each other, but when upscaling apply a checkerboard pattern.
Definition: stereo3d.h:117
ff_dovi_ctx_unref
void ff_dovi_ctx_unref(DOVIContext *s)
Completely reset a DOVIContext, preserving only logctx.
Definition: dovi_rpu.c:43
HEVCSEIUnregistered
Definition: hevc_sei.h:64
hevc_decode_flush
static void hevc_decode_flush(AVCodecContext *avctx)
Definition: hevcdec.c:3829
AVCodecContext::colorspace
enum AVColorSpace colorspace
YUV colorspace type.
Definition: avcodec.h:960
ff_hevc_set_qPy
void ff_hevc_set_qPy(HEVCContext *s, int xBase, int yBase, int log2_cb_size)
Definition: hevc_filter.c:118
chroma_mc_bi
static void chroma_mc_bi(HEVCContext *s, uint8_t *dst0, ptrdiff_t dststride, AVFrame *ref0, AVFrame *ref1, int x_off, int y_off, int block_w, int block_h, struct MvField *current_mv, int cidx)
8.5.3.2.2.2 Chroma sample bidirectional interpolation process
Definition: hevcdec.c:1710
PART_NxN
@ PART_NxN
Definition: hevcdec.h:147
luma_mc_bi
static void luma_mc_bi(HEVCContext *s, uint8_t *dst, ptrdiff_t dststride, AVFrame *ref0, const Mv *mv0, int x_off, int y_off, int block_w, int block_h, AVFrame *ref1, const Mv *mv1, struct MvField *current_mv)
8.5.3.2.2.1 Luma sample bidirectional interpolation process
Definition: hevcdec.c:1554
ff_hevc_res_scale_sign_flag
int ff_hevc_res_scale_sign_flag(HEVCContext *s, int idx)
Definition: hevc_cabac.c:912
decode_nal_unit
static int decode_nal_unit(HEVCContext *s, const H2645NAL *nal)
Definition: hevcdec.c:3080
ff_hevc_split_transform_flag_decode
int ff_hevc_split_transform_flag_decode(HEVCContext *s, int log2_trafo_size)
Definition: hevc_cabac.c:873
ff_thread_ref_frame
int ff_thread_ref_frame(ThreadFrame *dst, const ThreadFrame *src)
Definition: utils.c:866
out
FILE * out
Definition: movenc.c:54
av_frame_get_side_data
AVFrameSideData * av_frame_get_side_data(const AVFrame *frame, enum AVFrameSideDataType type)
Definition: frame.c:617
av_frame_new_side_data
AVFrameSideData * av_frame_new_side_data(AVFrame *frame, enum AVFrameSideDataType type, size_t size)
Add a new side data to a frame.
Definition: frame.c:605
SAO_BAND
@ SAO_BAND
Definition: hevcdec.h:213
ff_hevc_profiles
const AVProfile ff_hevc_profiles[]
Definition: profiles.c:83
HEVCSEIFilmGrainCharacteristics::matrix_coeffs
int matrix_coeffs
Definition: hevc_sei.h:121
AVFilmGrainH274Params::color_space
enum AVColorSpace color_space
Definition: film_grain_params.h:152
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2660
ff_hevc_hls_filter
void ff_hevc_hls_filter(HEVCContext *s, int x, int y, int ctb_size)
Definition: hevc_filter.c:839
AVBufferRef::data
uint8_t * data
The data buffer.
Definition: buffer.h:90
AV_FRAME_DATA_A53_CC
@ AV_FRAME_DATA_A53_CC
ATSC A53 Part 4 Closed Captions.
Definition: frame.h:58
av_clip_uintp2
#define av_clip_uintp2
Definition: common.h:120
AVMasteringDisplayMetadata::display_primaries
AVRational display_primaries[3][2]
CIE 1931 xy chromaticity coords of color primaries (r, g, b order).
Definition: mastering_display_metadata.h:42
AVMasteringDisplayMetadata::has_luminance
int has_luminance
Flag indicating whether the luminance (min_ and max_) have been set.
Definition: mastering_display_metadata.h:67
AVCodecContext::err_recognition
int err_recognition
Error recognition; may misdetect some more or less valid parts as errors.
Definition: avcodec.h:1324
get_bits_long
static unsigned int get_bits_long(GetBitContext *s, int n)
Read 0-32 bits.
Definition: get_bits.h:547
HEVCLocalContext::ctb_up_flag
uint8_t ctb_up_flag
Definition: hevcdec.h:445
HEVCFrame::needs_fg
int needs_fg
Definition: hevcdec.h:400
mv
static const int8_t mv[256][2]
Definition: 4xm.c:79
SliceHeader::num_entry_point_offsets
int num_entry_point_offsets
Definition: hevcdec.h:305
HEVC_NAL_STSA_N
@ HEVC_NAL_STSA_N
Definition: hevc.h:33
AV_FRAME_DATA_FILM_GRAIN_PARAMS
@ AV_FRAME_DATA_FILM_GRAIN_PARAMS
Film grain parameters for a frame, described by AVFilmGrainParams.
Definition: frame.h:183
PART_2NxnU
@ PART_2NxnU
Definition: hevcdec.h:148
AVFilmGrainH274Params::blending_mode_id
int blending_mode_id
Specifies the blending mode used to blend the simulated film grain with the decoded images.
Definition: film_grain_params.h:160
ff_hevc_cu_qp_delta_abs
int ff_hevc_cu_qp_delta_abs(HEVCContext *s)
Definition: hevc_cabac.c:640
AV_FRAME_DATA_S12M_TIMECODE
@ AV_FRAME_DATA_S12M_TIMECODE
Timecode which conforms to SMPTE ST 12-1.
Definition: frame.h:151
av_mod_uintp2
#define av_mod_uintp2
Definition: common.h:123
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:109
AVContentLightMetadata::MaxCLL
unsigned MaxCLL
Max content light level (cd/m^2).
Definition: mastering_display_metadata.h:102
H2645NAL::nuh_layer_id
int nuh_layer_id
Definition: h2645_parse.h:67
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:317
set_deblocking_bypass
static void set_deblocking_bypass(HEVCContext *s, int x0, int y0, int log2_cb_size)
Definition: hevcdec.c:1298
pixdesc.h
HEVCFrame::tab_mvf
MvField * tab_mvf
Definition: hevcdec.h:401
AVCodecContext::color_trc
enum AVColorTransferCharacteristic color_trc
Color Transfer Characteristic.
Definition: avcodec.h:953
TransformUnit::cu_qp_delta
int cu_qp_delta
Definition: hevcdec.h:370
HEVC_NAL_TSA_N
@ HEVC_NAL_TSA_N
Definition: hevc.h:31
w
uint8_t w
Definition: llviddspenc.c:38
HWACCEL_DXVA2
#define HWACCEL_DXVA2(codec)
Definition: hwconfig.h:67
AVCOL_RANGE_JPEG
@ AVCOL_RANGE_JPEG
Full range content.
Definition: pixfmt.h:597
HEVCFrame::hwaccel_picture_private
void * hwaccel_picture_private
Definition: hevcdec.h:413
av_display_matrix_flip
void av_display_matrix_flip(int32_t matrix[9], int hflip, int vflip)
Flip the input matrix horizontally and/or vertically.
Definition: display.c:65
AVPacket::data
uint8_t * data
Definition: packet.h:373
PAR
#define PAR
Definition: hevcdec.c:3841
INTRA_DC
@ INTRA_DC
Definition: hevcdec.h:175
AVOption
AVOption.
Definition: opt.h:247
HWACCEL_D3D11VA2
#define HWACCEL_D3D11VA2(codec)
Definition: hwconfig.h:69
ff_h2645_packet_uninit
void ff_h2645_packet_uninit(H2645Packet *pkt)
Free all the allocated memory in the packet.
Definition: h2645_parse.c:522
AVCOL_TRC_UNSPECIFIED
@ AVCOL_TRC_UNSPECIFIED
Definition: pixfmt.h:497
hevc_decode_free
static av_cold int hevc_decode_free(AVCodecContext *avctx)
Definition: hevcdec.c:3573
ff_hevc_hls_filters
void ff_hevc_hls_filters(HEVCContext *s, int x_ctb, int y_ctb, int ctb_size)
Definition: hevc_filter.c:875
data
const char data[16]
Definition: mxf.c:143
Mv::y
int16_t y
vertical component of motion vector
Definition: hevcdec.h:341
ff_hevc_mpm_idx_decode
int ff_hevc_mpm_idx_decode(HEVCContext *s)
Definition: hevc_cabac.c:759
AV_FRAME_DATA_DOVI_RPU_BUFFER
@ AV_FRAME_DATA_DOVI_RPU_BUFFER
Dolby Vision RPU raw data, suitable for passing to x265 or other libraries.
Definition: frame.h:196
AV_PIX_FMT_YUV420P10
#define AV_PIX_FMT_YUV420P10
Definition: pixfmt.h:404
SAO_EDGE
@ SAO_EDGE
Definition: hevcdec.h:214
ff_hevc_hls_residual_coding
void ff_hevc_hls_residual_coding(HEVCContext *s, int x0, int y0, int log2_trafo_size, enum ScanType scan_idx, int c_idx)
Definition: hevc_cabac.c:1031
SliceHeader::slice_temporal_mvp_enabled_flag
uint8_t slice_temporal_mvp_enabled_flag
Definition: hevcdec.h:277
MvField::mv
Mv mv[2]
Definition: hevcdec.h:345
AV_PIX_FMT_D3D11VA_VLD
@ AV_PIX_FMT_D3D11VA_VLD
HW decoding through Direct3D11 via old API, Picture.data[3] contains a ID3D11VideoDecoderOutputView p...
Definition: pixfmt.h:219
TransformUnit::is_cu_qp_delta_coded
uint8_t is_cu_qp_delta_coded
Definition: hevcdec.h:378
HEVC_NAL_RASL_N
@ HEVC_NAL_RASL_N
Definition: hevc.h:37
HEVC_NAL_STSA_R
@ HEVC_NAL_STSA_R
Definition: hevc.h:34
MODE_INTRA
@ MODE_INTRA
Definition: hevcdec.h:156
AVFilmGrainH274Params::color_range
enum AVColorRange color_range
Definition: film_grain_params.h:149
av_display_rotation_set
void av_display_rotation_set(int32_t matrix[9], double angle)
Initialize a transformation matrix describing a pure clockwise rotation by the specified angle (in de...
Definition: display.c:50
AV_FRAME_DATA_DISPLAYMATRIX
@ AV_FRAME_DATA_DISPLAYMATRIX
This side data contains a 3x3 transformation matrix describing an affine transformation that needs to...
Definition: frame.h:84
HEVC_NAL_BLA_W_RADL
@ HEVC_NAL_BLA_W_RADL
Definition: hevc.h:46
SliceHeader::slice_loop_filter_across_slices_enabled_flag
uint8_t slice_loop_filter_across_slices_enabled_flag
Definition: hevcdec.h:286
SAOParams::offset_sign
int offset_sign[3][4]
sao_offset_sign
Definition: hevcdsp.h:36
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
av_buffer_ref
AVBufferRef * av_buffer_ref(const AVBufferRef *buf)
Create a new reference to an AVBuffer.
Definition: buffer.c:103
export_stream_params
static void export_stream_params(HEVCContext *s, const HEVCSPS *sps)
Definition: hevcdec.c:318
HEVCLocalContext::ctb_up_left_flag
uint8_t ctb_up_left_flag
Definition: hevcdec.h:447
ff_dovi_ctx_replace
int ff_dovi_ctx_replace(DOVIContext *s, const DOVIContext *s0)
Definition: dovi_rpu.c:64
H2645NAL::temporal_id
int temporal_id
HEVC only, nuh_temporal_id_plus_1 - 1.
Definition: h2645_parse.h:62
av_timecode_get_smpte
uint32_t av_timecode_get_smpte(AVRational rate, int drop, int hh, int mm, int ss, int ff)
Convert sei info to SMPTE 12M binary representation.
Definition: timecode.c:68
RefPicList
Definition: hevcdec.h:238
AV_STEREO3D_VIEW_RIGHT
@ AV_STEREO3D_VIEW_RIGHT
Frame contains only the right view.
Definition: stereo3d.h:161
init_get_bits
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
Definition: get_bits.h:660
ff_thread_await_progress
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before ff_thread_await_progress() has been called on them. reget_buffer() and buffer age optimizations no longer work. *The contents of buffers must not be written to after ff_thread_report_progress() has been called on them. This includes draw_edges(). Porting codecs to frame threading
OFFSET
#define OFFSET(x)
Definition: hevcdec.c:3840
AVFilmGrainParams::seed
uint64_t seed
Seed to use for the synthesis process, if the codec allows for it.
Definition: film_grain_params.h:228
PF_INTRA
@ PF_INTRA
Definition: hevcdec.h:167
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:338
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:31
MODE_SKIP
@ MODE_SKIP
Definition: hevcdec.h:157
init
static int init
Definition: av_tx.c:47
HEVCLocalContext::end_of_tiles_x
int end_of_tiles_x
Definition: hevcdec.h:448
AVContentLightMetadata
Content light level needed by to transmit HDR over HDMI (CTA-861.3).
Definition: mastering_display_metadata.h:98
CodingUnit::x
int x
Definition: hevcdec.h:327
skip_bits
static void skip_bits(GetBitContext *s, int n)
Definition: get_bits.h:468
BOUNDARY_LEFT_TILE
#define BOUNDARY_LEFT_TILE
Definition: hevcdec.h:462
AVCodecContext::framerate
AVRational framerate
Definition: avcodec.h:1710
golomb.h
exp golomb vlc stuff
AVCodecInternal::is_copy
int is_copy
When using frame-threaded decoding, this field is set for the first worker thread (e....
Definition: internal.h:124
AV_STEREO3D_SIDEBYSIDE
@ AV_STEREO3D_SIDEBYSIDE
Views are next to each other.
Definition: stereo3d.h:67
PART_2Nx2N
@ PART_2Nx2N
Definition: hevcdec.h:144
get_bits
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:380
SET_SAO
#define SET_SAO(elem, value)
Definition: hevcdec.c:990
HEVCLocalContext::ctb_up_right_flag
uint8_t ctb_up_right_flag
Definition: hevcdec.h:446
ff_hevc_clear_refs
void ff_hevc_clear_refs(HEVCContext *s)
Mark all frames in DPB as unused for reference.
Definition: hevc_refs.c:68
PRED_BI
@ PRED_BI
Definition: hevcdec.h:163
U
#define U(x)
Definition: vp56_arith.h:37
av_ceil_log2
#define av_ceil_log2
Definition: common.h:93
ff_hevc_split_coding_unit_flag_decode
int ff_hevc_split_coding_unit_flag_decode(HEVCContext *s, int ct_depth, int x0, int y0)
Definition: hevc_cabac.c:693
fail
#define fail()
Definition: checkasm.h:127
PredictionUnit::intra_pred_mode_c
uint8_t intra_pred_mode_c[4]
Definition: hevcdec.h:365
ff_hevc_sao_merge_flag_decode
int ff_hevc_sao_merge_flag_decode(HEVCContext *s)
Definition: hevc_cabac.c:568
AVCodecContext::thread_count
int thread_count
thread count is used to decide how many independent tasks should be passed to execute()
Definition: avcodec.h:1440
md5
struct AVMD5 * md5
Definition: movenc.c:56
InterPredIdc
InterPredIdc
Definition: hevcdec.h:160
MODE_INTER
@ MODE_INTER
Definition: hevcdec.h:155
HEVCSEIA53Caption
Definition: hevc_sei.h:60
timecode.h
GetBitContext
Definition: get_bits.h:62
HEVCLocalContext::pu
PredictionUnit pu
Definition: hevcdec.h:458
ff_thread_get_buffer
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call have so the codec calls ff_thread_report set FF_CODEC_CAP_ALLOCATE_PROGRESS in AVCodec caps_internal and use ff_thread_get_buffer() to allocate frames. The frames must then be freed with ff_thread_release_buffer(). Otherwise decode directly into the user-supplied frames. Call ff_thread_report_progress() after some part of the current picture has decoded. A good place to put this is where draw_horiz_band() is called - add this if it isn 't called anywhere
decode_lt_rps
static int decode_lt_rps(HEVCContext *s, LongTermRPS *rps, GetBitContext *gb)
Definition: hevcdec.c:261
TransformUnit::res_scale_val
int res_scale_val
Definition: hevcdec.h:372
HEVCSEIFilmGrainCharacteristics::present
int present
Definition: hevc_sei.h:113
SliceHeader::short_term_ref_pic_set_size
int short_term_ref_pic_set_size
Definition: hevcdec.h:268
hevc_decoder_class
static const AVClass hevc_decoder_class
Definition: hevcdec.c:3851
val
static double val(void *priv, double ch)
Definition: aeval.c:76
HWACCEL_VDPAU
#define HWACCEL_VDPAU(codec)
Definition: hwconfig.h:75
ff_videodsp_init
av_cold void ff_videodsp_init(VideoDSPContext *ctx, int bpc)
Definition: videodsp.c:38
ff_hevc_output_frame
int ff_hevc_output_frame(HEVCContext *s, AVFrame *out, int flush)
Find next frame in output order and put a reference to it in frame.
Definition: hevc_refs.c:176
HEVCSEIFilmGrainCharacteristics::bit_depth_chroma
int bit_depth_chroma
Definition: hevc_sei.h:117
HEVC_MAX_REFS
@ HEVC_MAX_REFS
Definition: hevc.h:119
AVCodecContext::coded_height
int coded_height
Definition: avcodec.h:571
SliceHeader::long_term_ref_pic_set_size
int long_term_ref_pic_set_size
Definition: hevcdec.h:271
HEVCSEIFilmGrainCharacteristics::log2_scale_factor
int log2_scale_factor
Definition: hevc_sei.h:123
ss
#define ss(width, name, subs,...)
Definition: cbs_vp9.c:261
ff_hevc_luma_mv_mvp_mode
void ff_hevc_luma_mv_mvp_mode(HEVCContext *s, int x0, int y0, int nPbW, int nPbH, int log2_cb_size, int part_idx, int merge_idx, MvField *mv, int mvp_lx_flag, int LX)
Definition: hevc_mvs.c:582
CTB
#define CTB(tab, x, y)
Definition: hevcdec.c:988
ff_reset_entries
void ff_reset_entries(AVCodecContext *avctx)
Definition: pthread_slice.c:238
av_reduce
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
Definition: rational.c:35
ff_hevc_skip_flag_decode
int ff_hevc_skip_flag_decode(HEVCContext *s, int x0, int y0, int x_cb, int y_cb)
Definition: hevc_cabac.c:625
ff_hevc_merge_flag_decode
int ff_hevc_merge_flag_decode(HEVCContext *s)
Definition: hevc_cabac.c:799
AVRational::num
int num
Numerator.
Definition: rational.h:59
HEVC_NAL_UNSPEC62
@ HEVC_NAL_UNSPEC62
Definition: hevc.h:91
SliceHeader::slice_segment_addr
unsigned int slice_segment_addr
address (in raster order) of the first block in the current slice
Definition: hevcdec.h:253
AVFilmGrainH274Params::intensity_interval_upper_bound
uint8_t intensity_interval_upper_bound[3][256]
Specifies the upper bound of each intensity interval for which the set of model values applies for th...
Definition: film_grain_params.h:194
hevc_parse.h
MvField::ref_idx
int8_t ref_idx[2]
Definition: hevcdec.h:346
ff_hevc_save_states
void ff_hevc_save_states(HEVCContext *s, int ctb_addr_ts)
Definition: hevc_cabac.c:450
AVFilmGrainH274Params::bit_depth_luma
int bit_depth_luma
Specifies the bit depth used for the luma component.
Definition: film_grain_params.h:142
ff_hevc_deblocking_boundary_strengths
void ff_hevc_deblocking_boundary_strengths(HEVCContext *s, int x0, int y0, int log2_trafo_size)
Definition: hevc_filter.c:711
SAOParams::eo_class
int eo_class[3]
sao_eo_class
Definition: hevcdsp.h:40
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:97
AV_PIX_FMT_YUV444P10
#define AV_PIX_FMT_YUV444P10
Definition: pixfmt.h:407
ff_hevc_prev_intra_luma_pred_flag_decode
int ff_hevc_prev_intra_luma_pred_flag_decode(HEVCContext *s)
Definition: hevc_cabac.c:754
ff_hevc_decode_nal_sei
int ff_hevc_decode_nal_sei(GetBitContext *gb, void *logctx, HEVCSEI *s, const HEVCParamSets *ps, int type)
Definition: hevc_sei.c:540
ff_thread_report_progress2
void ff_thread_report_progress2(AVCodecContext *avctx, int field, int thread, int n)
Definition: pthread_slice.c:174
first
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But first
Definition: rate_distortion.txt:12
hls_decode_entry_wpp
static int hls_decode_entry_wpp(AVCodecContext *avctxt, void *input_ctb_row, int job, int self_id)
Definition: hevcdec.c:2538
AVCodecContext::color_primaries
enum AVColorPrimaries color_primaries
Chromaticity coordinates of the source primaries.
Definition: avcodec.h:946
AV_STEREO3D_FRAMESEQUENCE
@ AV_STEREO3D_FRAMESEQUENCE
Views are alternated temporally.
Definition: stereo3d.h:92
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
QPEL_EXTRA_AFTER
#define QPEL_EXTRA_AFTER
Definition: hevcdec.h:66
HEVC_NAL_BLA_N_LP
@ HEVC_NAL_BLA_N_LP
Definition: hevc.h:47
SAOParams::type_idx
uint8_t type_idx[3]
sao_type_idx
Definition: hevcdsp.h:44
film_grain_params.h
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
av_cold
#define av_cold
Definition: attributes.h:90
TransformUnit::intra_pred_mode
int intra_pred_mode
Definition: hevcdec.h:375
ff_hevc_hls_mvd_coding
void ff_hevc_hls_mvd_coding(HEVCContext *s, int x0, int y0, int log2_cb_size)
Definition: hevc_cabac.c:1541
init_get_bits8
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:678
HEVCSEIFilmGrainCharacteristics::full_range
int full_range
Definition: hevc_sei.h:118
HEVC_NAL_RADL_R
@ HEVC_NAL_RADL_R
Definition: hevc.h:36
ff_thread_report_progress
void ff_thread_report_progress(ThreadFrame *f, int n, int field)
Notify later decoding threads when part of their reference picture is ready.
Definition: pthread_frame.c:588
SliceHeader::cabac_init_flag
uint8_t cabac_init_flag
Definition: hevcdec.h:284
H2645NAL::size
int size
Definition: h2645_parse.h:36
decode
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame, FILE *outfile)
Definition: decode_audio.c:71
hls_pcm_sample
static int hls_pcm_sample(HEVCContext *s, int x0, int y0, int log2_cb_size)
Definition: hevcdec.c:1436
AVCodecContext::extradata_size
int extradata_size
Definition: avcodec.h:485
AVCodecContext::has_b_frames
int has_b_frames
Size of the frame reordering buffer in the decoder.
Definition: avcodec.h:679
VUI::matrix_coeffs
uint8_t matrix_coeffs
Definition: hevc_ps.h:61
width
#define width
QPEL_EXTRA_BEFORE
#define QPEL_EXTRA_BEFORE
Definition: hevcdec.h:65
stereo3d.h
AVMasteringDisplayMetadata::white_point
AVRational white_point[2]
CIE 1931 xy chromaticity coords of white point.
Definition: mastering_display_metadata.h:47
AV_PIX_FMT_DXVA2_VLD
@ AV_PIX_FMT_DXVA2_VLD
HW decoding through DXVA2, Picture.data[3] contains a LPDIRECT3DSURFACE9 pointer.
Definition: pixfmt.h:127
s
#define s(width, name)
Definition: cbs_vp9.c:257
ff_thread_await_progress2
void ff_thread_await_progress2(AVCodecContext *avctx, int field, int thread, int shift)
Definition: pthread_slice.c:185
SAO_NOT_APPLIED
@ SAO_NOT_APPLIED
Definition: hevcdec.h:212
hls_sao_param
static void hls_sao_param(HEVCContext *s, int rx, int ry)
Definition: hevcdec.c:1002
set_sps
static int set_sps(HEVCContext *s, const HEVCSPS *sps, enum AVPixelFormat pix_fmt)
Definition: hevcdec.c:489
HEVCSEIFilmGrainCharacteristics::num_intensity_intervals
uint16_t num_intensity_intervals[3]
Definition: hevc_sei.h:125
AV_ZERO32
#define AV_ZERO32(d)
Definition: intreadwrite.h:629
ff_hevc_ref_idx_lx_decode
int ff_hevc_ref_idx_lx_decode(HEVCContext *s, int num_ref_idx_lx)
Definition: hevc_cabac.c:814
s1
#define s1
Definition: regdef.h:38
ff_hevc_nal_is_nonref
static av_always_inline int ff_hevc_nal_is_nonref(enum HEVCNALUnitType type)
Definition: hevcdec.h:644
av_q2d
static double av_q2d(AVRational a)
Convert an AVRational to a double.
Definition: rational.h:104
av_film_grain_params_create_side_data
AVFilmGrainParams * av_film_grain_params_create_side_data(AVFrame *frame)
Allocate a complete AVFilmGrainParams and add it to the frame.
Definition: film_grain_params.c:31
luma_intra_pred_mode
static int luma_intra_pred_mode(HEVCContext *s, int x0, int y0, int pu_size, int prev_intra_luma_pred_flag)
8.4.1
Definition: hevcdec.c:1981
ff_hevc_set_new_ref
int ff_hevc_set_new_ref(HEVCContext *s, AVFrame **frame, int poc)
Definition: hevc_refs.c:137
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:296
SliceHeader::slice_rps
ShortTermRPS slice_rps
Definition: hevcdec.h:269
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:201
HEVCSEIFilmGrainCharacteristics::model_id
int model_id
Definition: hevc_sei.h:114
ff_hevc_cu_transquant_bypass_flag_decode
int ff_hevc_cu_transquant_bypass_flag_decode(HEVCContext *s)
Definition: hevc_cabac.c:620
IS_IDR
#define IS_IDR(s)
Definition: hevcdec.h:77
ff_hevc_intra_chroma_pred_mode_decode
int ff_hevc_intra_chroma_pred_mode_decode(HEVCContext *s)
Definition: hevc_cabac.c:777
set_ct_depth
static av_always_inline void set_ct_depth(HEVCContext *s, int x0, int y0, int log2_cb_size, int ct_depth)
Definition: hevcdec.c:2061
H2645NAL::data
const uint8_t * data
Definition: h2645_parse.h:35
ff_hevc_slice_rpl
int ff_hevc_slice_rpl(HEVCContext *s)
Construct the reference picture list(s) for the current slice.
Definition: hevc_refs.c:299
RefPicList::ref
struct HEVCFrame * ref[HEVC_MAX_REFS]
Definition: hevcdec.h:239
pix_fmt
static enum AVPixelFormat pix_fmt
Definition: demuxing_decoding.c:41
ff_hevc_sao_offset_abs_decode
int ff_hevc_sao_offset_abs_decode(HEVCContext *s)
Definition: hevc_cabac.c:593
H2645NAL::skipped_bytes_pos
int * skipped_bytes_pos
Definition: h2645_parse.h:71
VUI::colour_primaries
uint8_t colour_primaries
Definition: hevc_ps.h:59
HEVC_SLICE_I
@ HEVC_SLICE_I
Definition: hevc.h:98
hls_coding_unit
static int hls_coding_unit(HEVCContext *s, int x0, int y0, int log2_cb_size)
Definition: hevcdec.c:2169
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
SliceHeader::size
int * size
Definition: hevcdec.h:304
HEVCSEIUnregistered::buf_ref
AVBufferRef ** buf_ref
Definition: hevc_sei.h:65
AVFilmGrainH274Params::comp_model_value
int16_t comp_model_value[3][256][6]
Specifies the model values for the component for each intensity interval.
Definition: film_grain_params.h:205
SliceHeader::collocated_list
uint8_t collocated_list
Definition: hevcdec.h:287
atomic_load
#define atomic_load(object)
Definition: stdatomic.h:93
ff_hevc_luma_mv_merge_mode
void ff_hevc_luma_mv_merge_mode(HEVCContext *s, int x0, int y0, int nPbW, int nPbH, int log2_cb_size, int part_idx, int merge_idx, MvField *mv)
Definition: hevc_mvs.c:479
AVCOL_PRI_UNSPECIFIED
@ AVCOL_PRI_UNSPECIFIED
Definition: pixfmt.h:472
AVDISCARD_BIDIR
@ AVDISCARD_BIDIR
discard all bidirectional frames
Definition: defs.h:51
get_se_golomb
static int get_se_golomb(GetBitContext *gb)
read signed exp golomb code.
Definition: golomb.h:241
INTRA_ANGULAR_26
@ INTRA_ANGULAR_26
Definition: hevcdec.h:200
H2645NAL::type
int type
NAL unit type.
Definition: h2645_parse.h:52
CodingUnit::max_trafo_depth
uint8_t max_trafo_depth
MaxTrafoDepth.
Definition: hevcdec.h:335
SliceHeader::slice_ctb_addr_rs
int slice_ctb_addr_rs
Definition: hevcdec.h:323
FF_CODEC_PROPERTY_FILM_GRAIN
#define FF_CODEC_PROPERTY_FILM_GRAIN
Definition: avcodec.h:1825
arg
const char * arg
Definition: jacosubdec.c:67
AVStereo3D::flags
int flags
Additional information about the frame packing.
Definition: stereo3d.h:185
if
if(ret)
Definition: filter_design.txt:179
HEVC_NAL_IDR_N_LP
@ HEVC_NAL_IDR_N_LP
Definition: hevc.h:49
AVFilmGrainH274Params::model_id
int model_id
Specifies the film grain simulation mode.
Definition: film_grain_params.h:137
SliceHeader::pic_output_flag
uint8_t pic_output_flag
Definition: hevcdec.h:263
hls_slice_data_wpp
static int hls_slice_data_wpp(HEVCContext *s, const H2645NAL *nal)
Definition: hevcdec.c:2619
AV_CODEC_CAP_FRAME_THREADS
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
Definition: codec.h:113
AVDISCARD_ALL
@ AVDISCARD_ALL
discard all
Definition: defs.h:54
ff_hevc_sao_offset_sign_decode
int ff_hevc_sao_offset_sign_decode(HEVCContext *s)
Definition: hevc_cabac.c:603
PredictionUnit::rem_intra_luma_pred_mode
int rem_intra_luma_pred_mode
Definition: hevcdec.h:361
H2645NAL::raw_size
int raw_size
Definition: h2645_parse.h:44
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
IS_BLA
#define IS_BLA(s)
Definition: hevcdec.h:78
ff_hevc_merge_idx_decode
int ff_hevc_merge_idx_decode(HEVCContext *s)
Definition: hevc_cabac.c:788
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
ff_bswapdsp_init
av_cold void ff_bswapdsp_init(BswapDSPContext *c)
Definition: bswapdsp.c:49
HEVC_SLICE_B
@ HEVC_SLICE_B
Definition: hevc.h:96
flush
static void flush(AVCodecContext *avctx)
Definition: aacdec_template.c:593
NULL
#define NULL
Definition: coverity.c:32
hevc_ref_frame
static int hevc_ref_frame(HEVCContext *s, HEVCFrame *dst, HEVCFrame *src)
Definition: hevcdec.c:3526
HEVCSEIFilmGrainCharacteristics::separate_colour_description_present_flag
int separate_colour_description_present_flag
Definition: hevc_sei.h:115
AVCodecContext::color_range
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: avcodec.h:967
av_buffer_unref
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it.
Definition: buffer.c:139
HEVCLocalContext::tmp
int16_t tmp[MAX_PB_SIZE *MAX_PB_SIZE]
Definition: hevcdec.h:454
ff_hevc_ps_uninit
void ff_hevc_ps_uninit(HEVCParamSets *ps)
Definition: hevc_ps.c:1747
HEVC_NAL_PPS
@ HEVC_NAL_PPS
Definition: hevc.h:63
LongTermRPS::poc
int poc[32]
Definition: hevcdec.h:232
AVCHROMA_LOC_LEFT
@ AVCHROMA_LOC_LEFT
MPEG-2/4 4:2:0, H.264 default for 4:2:0.
Definition: pixfmt.h:618
CodingUnit::cu_transquant_bypass_flag
uint8_t cu_transquant_bypass_flag
Definition: hevcdec.h:336
AVCodecContext::internal
struct AVCodecInternal * internal
Private context used for internal data.
Definition: avcodec.h:418
AV_PIX_FMT_YUVJ420P
@ AV_PIX_FMT_YUVJ420P
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:78
HEVCSEIFilmGrainCharacteristics::color_primaries
int color_primaries
Definition: hevc_sei.h:119
HEVCLocalContext::first_qp_group
uint8_t first_qp_group
Definition: hevcdec.h:432
av_default_item_name
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:235
hls_transform_unit
static int hls_transform_unit(HEVCContext *s, int x0, int y0, int xBase, int yBase, int cb_xBase, int cb_yBase, int log2_cb_size, int log2_trafo_size, int blk_idx, int cbf_luma, int *cbf_cb, int *cbf_cr)
Definition: hevcdec.c:1092
AVHWAccel::end_frame
int(* end_frame)(AVCodecContext *avctx)
Called at the end of each frame or field picture.
Definition: avcodec.h:2140
get_bits1
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:499
ff_dovi_update_cfg
void ff_dovi_update_cfg(DOVIContext *s, const AVDOVIDecoderConfigurationRecord *cfg)
Read the contents of an AVDOVIDecoderConfigurationRecord (usually provided by stream side data) and u...
Definition: dovi_rpu.c:83
profiles.h
src
#define src
Definition: vp8dsp.c:255
av_buffer_pool_uninit
void av_buffer_pool_uninit(AVBufferPool **ppool)
Mark the pool as being available for freeing.
Definition: buffer.c:322
L0
#define L0
Definition: hevcdec.h:59
HEVCFrame::rpl_tab
RefPicListTab ** rpl_tab
Definition: hevcdec.h:403
LongTermRPS::poc_msb_present
uint8_t poc_msb_present[32]
Definition: hevcdec.h:233
ff_hevc_pel_weight
const uint8_t ff_hevc_pel_weight[65]
Definition: hevcdec.c:49
HEVC_NAL_SEI_SUFFIX
@ HEVC_NAL_SEI_SUFFIX
Definition: hevc.h:69
HEVCSEIFilmGrainCharacteristics::intensity_interval_lower_bound
uint8_t intensity_interval_lower_bound[3][256]
Definition: hevc_sei.h:127
HEVC_NAL_CRA_NUT
@ HEVC_NAL_CRA_NUT
Definition: hevc.h:50
av_frame_new_side_data_from_buf
AVFrameSideData * av_frame_new_side_data_from_buf(AVFrame *frame, enum AVFrameSideDataType type, AVBufferRef *buf)
Add a new side data to a frame from an existing AVBufferRef.
Definition: frame.c:573
ONLY_IF_THREADS_ENABLED
#define ONLY_IF_THREADS_ENABLED(x)
Define a function with only the non-default version specified.
Definition: internal.h:156
PART_Nx2N
@ PART_Nx2N
Definition: hevcdec.h:146
RefPicListTab
Definition: hevcdec.h:245
BOUNDARY_UPPER_TILE
#define BOUNDARY_UPPER_TILE
Definition: hevcdec.h:464
vps
static int FUNC() vps(CodedBitstreamContext *ctx, RWContext *rw, H265RawVPS *current)
Definition: cbs_h265_syntax_template.c:423
ff_hevc_decode_extradata
int ff_hevc_decode_extradata(const uint8_t *data, int size, HEVCParamSets *ps, HEVCSEI *sei, int *is_nalff, int *nal_length_size, int err_recognition, int apply_defdispwin, void *logctx)
Definition: hevc_parse.c:80
AV_EF_EXPLODE
#define AV_EF_EXPLODE
abort decoding on minor error detection
Definition: avcodec.h:1335
SliceHeader::nb_refs
unsigned int nb_refs[2]
Definition: hevcdec.h:279
Mv::x
int16_t x
horizontal component of motion vector
Definition: hevcdec.h:340
FF_CODEC_CAP_EXPORTS_CROPPING
#define FF_CODEC_CAP_EXPORTS_CROPPING
The decoder sets the cropping fields in the output frames manually.
Definition: internal.h:68
AVCodecContext::level
int level
level
Definition: avcodec.h:1651
AVCOL_RANGE_UNSPECIFIED
@ AVCOL_RANGE_UNSPECIFIED
Definition: pixfmt.h:563
HEVC_NAL_RASL_R
@ HEVC_NAL_RASL_R
Definition: hevc.h:38
PF_BI
@ PF_BI
Definition: hevcdec.h:170
AV_FRAME_DATA_SEI_UNREGISTERED
@ AV_FRAME_DATA_SEI_UNREGISTERED
User data unregistered metadata associated with a video frame.
Definition: frame.h:177
SAMPLE_CTB
#define SAMPLE_CTB(tab, x, y)
Definition: hevcdec.h:75
HEVCWindow
Definition: hevc_ps.h:42
SCAN_HORIZ
@ SCAN_HORIZ
Definition: hevcdec.h:227
hevc_data.h
hevc_decode_frame
static int hevc_decode_frame(AVCodecContext *avctx, void *data, int *got_output, AVPacket *avpkt)
Definition: hevcdec.c:3460
for
for(j=16;j >0;--j)
Definition: h264pred_template.c:469
ff_hevc_frame_rps
int ff_hevc_frame_rps(HEVCContext *s)
Construct the reference picture sets for the current frame.
Definition: hevc_refs.c:451
HEVCLocalContext::edge_emu_buffer
uint8_t edge_emu_buffer[(MAX_PB_SIZE+7) *EDGE_EMU_BUFFER_STRIDE *2]
Definition: hevcdec.h:451
IS_IRAP
#define IS_IRAP(s)
Definition: hevcdec.h:80
HEVCSEIFilmGrainCharacteristics::blending_mode_id
int blending_mode_id
Definition: hevc_sei.h:122
LongTermRPS::used
uint8_t used[32]
Definition: hevcdec.h:234
SliceHeader::colour_plane_id
uint8_t colour_plane_id
RPS coded in the slice header itself is stored here.
Definition: hevcdec.h:264
ff_hevc_mvp_lx_flag_decode
int ff_hevc_mvp_lx_flag_decode(HEVCContext *s)
Definition: hevc_cabac.c:830
PART_nLx2N
@ PART_nLx2N
Definition: hevcdec.h:150
SliceHeader::dependent_slice_segment_flag
uint8_t dependent_slice_segment_flag
Definition: hevcdec.h:262
POS
#define POS(c_idx, x, y)
AVDISCARD_NONKEY
@ AVDISCARD_NONKEY
discard all frames except keyframes
Definition: defs.h:53
SliceHeader::first_slice_in_pic_flag
uint8_t first_slice_in_pic_flag
Definition: hevcdec.h:261
HEVCLocalContext::ctb_left_flag
uint8_t ctb_left_flag
Definition: hevcdec.h:444
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:52
chroma_mc_uni
static void chroma_mc_uni(HEVCContext *s, uint8_t *dst0, ptrdiff_t dststride, uint8_t *src0, ptrdiff_t srcstride, int reflist, int x_off, int y_off, int block_w, int block_h, struct MvField *current_mv, int chroma_weight, int chroma_offset)
8.5.3.2.2.2 Chroma sample uniprediction interpolation process
Definition: hevcdec.c:1645
ff_dovi_ctx_flush
void ff_dovi_ctx_flush(DOVIContext *s)
Partially reset the internal state.
Definition: dovi_rpu.c:53
ff_hevc_pred_mode_decode
int ff_hevc_pred_mode_decode(HEVCContext *s)
Definition: hevc_cabac.c:688
AVPacket::size
int size
Definition: packet.h:374
BOUNDARY_UPPER_SLICE
#define BOUNDARY_UPPER_SLICE
Definition: hevcdec.h:463
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:117
hevcdec.h
ff_hevc_set_neighbour_available
void ff_hevc_set_neighbour_available(HEVCContext *s, int x0, int y0, int nPbW, int nPbH)
Definition: hevc_mvs.c:42
decode_nal_units
static int decode_nal_units(HEVCContext *s, const uint8_t *buf, int length)
Definition: hevcdec.c:3271
HEVCSEIFilmGrainCharacteristics::intensity_interval_upper_bound
uint8_t intensity_interval_upper_bound[3][256]
Definition: hevc_sei.h:128
AVFilmGrainH274Params::component_model_present
int component_model_present[3]
Indicates if the modelling of film grain for a given component is present.
Definition: film_grain_params.h:170
SAOParams::offset_abs
int offset_abs[3][4]
sao_offset_abs
Definition: hevcdsp.h:35
AV_PIX_FMT_YUV422P10LE
@ AV_PIX_FMT_YUV422P10LE
planar YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian
Definition: pixfmt.h:151
print_md5
static void print_md5(void *log_ctx, int level, uint8_t md5[16])
Definition: hevcdec.c:3360
av_err2str
#define av_err2str(errnum)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: error.h:121
INTRA_PLANAR
@ INTRA_PLANAR
Definition: hevcdec.h:174
HEVCFrame::rpl_buf
AVBufferRef * rpl_buf
Definition: hevcdec.h:410
ff_hevc_decode_nal_sps
int ff_hevc_decode_nal_sps(GetBitContext *gb, AVCodecContext *avctx, HEVCParamSets *ps, int apply_defdispwin)
Definition: hevc_ps.c:1250
PART_2NxnD
@ PART_2NxnD
Definition: hevcdec.h:149
ff_hevc_cabac_init
int ff_hevc_cabac_init(HEVCContext *s, int ctb_addr_ts, int thread)
Definition: hevc_cabac.c:511
size
int size
Definition: twinvq_data.h:10344
HEVC_NAL_BLA_W_LP
@ HEVC_NAL_BLA_W_LP
Definition: hevc.h:45
SCAN_VERT
@ SCAN_VERT
Definition: hevcdec.h:228
VUI::transfer_characteristic
uint8_t transfer_characteristic
Definition: hevc_ps.h:60
ff_hevc_compute_poc
int ff_hevc_compute_poc(const HEVCSPS *sps, int pocTid0, int poc_lsb, int nal_unit_type)
Compute POC of the current frame and return it.
Definition: hevc_ps.c:1763
H2645NAL::gb
GetBitContext gb
Definition: h2645_parse.h:47
intra_prediction_unit_default_value
static void intra_prediction_unit_default_value(HEVCContext *s, int x0, int y0, int log2_cb_size)
Definition: hevcdec.c:2146
SliceHeader::collocated_ref_idx
unsigned int collocated_ref_idx
Definition: hevcdec.h:289
SliceHeader::entry_point_offset
unsigned * entry_point_offset
Definition: hevcdec.h:302
VUI
Definition: hevc_ps.h:49
H2645NAL
Definition: h2645_parse.h:34
hevc_await_progress
static void hevc_await_progress(HEVCContext *s, HEVCFrame *ref, const Mv *mv, int y0, int height)
Definition: hevcdec.c:1800
AV_PIX_FMT_YUV444P12
#define AV_PIX_FMT_YUV444P12
Definition: pixfmt.h:411
ff_hevc_decode_nal_vps
int ff_hevc_decode_nal_vps(GetBitContext *gb, AVCodecContext *avctx, HEVCParamSets *ps)
Definition: hevc_ps.c:458
pic_arrays_free
static void pic_arrays_free(HEVCContext *s)
NOTE: Each function hls_foo correspond to the function foo in the specification (HLS stands for High ...
Definition: hevcdec.c:61
AVFrameSideData::data
uint8_t * data
Definition: frame.h:225
TransformUnit::chroma_mode_c
int chroma_mode_c
Definition: hevcdec.h:377
FF_THREAD_SLICE
#define FF_THREAD_SLICE
Decode more than one part of a single frame at once.
Definition: avcodec.h:1452
AVFilmGrainParams
This structure describes how to handle film grain synthesis in video for specific codecs.
Definition: film_grain_params.h:216
GetBitContext::index
int index
Definition: get_bits.h:68
SliceHeader::short_term_ref_pic_set_sps_flag
int short_term_ref_pic_set_sps_flag
Definition: hevcdec.h:267
AVCHROMA_LOC_UNSPECIFIED
@ AVCHROMA_LOC_UNSPECIFIED
Definition: pixfmt.h:617
SliceHeader::no_output_of_prior_pics_flag
uint8_t no_output_of_prior_pics_flag
Definition: hevcdec.h:276
SliceHeader::max_num_merge_cand
unsigned int max_num_merge_cand
5 - 5_minus_max_num_merge_cand
Definition: hevcdec.h:300
AVCodecHWConfigInternal
Definition: hwconfig.h:29
MvField
Definition: hevcdec.h:344
QPEL_EXTRA
#define QPEL_EXTRA
Definition: hevcdec.h:67
PF_L1
@ PF_L1
Definition: hevcdec.h:169
ff_hevc_unref_frame
void ff_hevc_unref_frame(HEVCContext *s, HEVCFrame *frame, int flags)
Definition: hevc_refs.c:32
split
static char * split(char *message, char delim)
Definition: af_channelmap.c:81
get_format
static enum AVPixelFormat get_format(HEVCContext *s, const HEVCSPS *sps)
Definition: hevcdec.c:394
ff_h2645_packet_split
int ff_h2645_packet_split(H2645Packet *pkt, const uint8_t *buf, int length, void *logctx, int is_nalff, int nal_length_size, enum AVCodecID codec_id, int small_padding, int use_ref)
Split an input packet into NAL units.
Definition: h2645_parse.c:391
height
#define height
av_reallocp_array
int av_reallocp_array(void *ptr, size_t nmemb, size_t size)
Allocate, reallocate, or free an array through a pointer to a pointer.
Definition: mem.c:232
hevc_frame_end
static int hevc_frame_end(HEVCContext *s)
Definition: hevcdec.c:3058
AV_CODEC_CAP_SLICE_THREADS
#define AV_CODEC_CAP_SLICE_THREADS
Codec supports slice-based (or partition-based) multithreading.
Definition: codec.h:117
HWACCEL_D3D11VA
#define HWACCEL_D3D11VA(codec)
Definition: hwconfig.h:79
ff_hevc_pcm_flag_decode
int ff_hevc_pcm_flag_decode(HEVCContext *s)
Definition: hevc_cabac.c:749
av_content_light_metadata_create_side_data
AVContentLightMetadata * av_content_light_metadata_create_side_data(AVFrame *frame)
Allocate a complete AVContentLightMetadata and add it to the frame.
Definition: mastering_display_metadata.c:55
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
ff_hevc_cbf_cb_cr_decode
int ff_hevc_cbf_cb_cr_decode(HEVCContext *s, int trafo_depth)
Definition: hevc_cabac.c:878
attributes.h
AV_PIX_FMT_D3D11
@ AV_PIX_FMT_D3D11
Hardware surfaces for Direct3D11.
Definition: pixfmt.h:303
av_buffer_alloc
AVBufferRef * av_buffer_alloc(size_t size)
Allocate an AVBuffer of the given size using av_malloc().
Definition: buffer.c:77
HWACCEL_NVDEC
#define HWACCEL_NVDEC(codec)
Definition: hwconfig.h:71
AVFilmGrainParams::h274
AVFilmGrainH274Params h274
Definition: film_grain_params.h:237
hls_slice_data
static int hls_slice_data(HEVCContext *s)
Definition: hevcdec.c:2527
TransformUnit::cu_qp_offset_cb
int8_t cu_qp_offset_cb
Definition: hevcdec.h:380
pic_arrays_init
static int pic_arrays_init(HEVCContext *s, const HEVCSPS *sps)
Definition: hevcdec.c:89
AV_STEREO3D_FLAG_INVERT
#define AV_STEREO3D_FLAG_INVERT
Inverted views, Right/Bottom represents the left view.
Definition: stereo3d.h:167
HEVCFrame::rpl_tab_buf
AVBufferRef * rpl_tab_buf
Definition: hevcdec.h:409
AV_PIX_FMT_VAAPI
@ AV_PIX_FMT_VAAPI
Hardware acceleration through VA-API, data[3] contains a VASurfaceID.
Definition: pixfmt.h:119
MvField::pred_flag
int8_t pred_flag
Definition: hevcdec.h:347
HEVCLocalContext::ct_depth
int ct_depth
Definition: hevcdec.h:456
src0
#define src0
Definition: h264pred.c:139
AV_LOG_INFO
#define AV_LOG_INFO
Standard information.
Definition: log.h:191
FF_THREAD_FRAME
#define FF_THREAD_FRAME
Decode more than one frame at once.
Definition: avcodec.h:1451
ff_init_cabac_decoder
int ff_init_cabac_decoder(CABACContext *c, const uint8_t *buf, int buf_size)
Definition: cabac.c:165
AV_PIX_FMT_VDPAU
@ AV_PIX_FMT_VDPAU
HW acceleration through VDPAU, Picture.data[3] contains a VdpVideoSurface.
Definition: pixfmt.h:187
PART_nRx2N
@ PART_nRx2N
Definition: hevcdec.h:151
EPEL_EXTRA_BEFORE
#define EPEL_EXTRA_BEFORE
Definition: hevcdec.h:62
AV_PIX_FMT_VIDEOTOOLBOX
@ AV_PIX_FMT_VIDEOTOOLBOX
hardware decoding through Videotoolbox
Definition: pixfmt.h:272
SliceHeader::slice_cb_qp_offset
int slice_cb_qp_offset
Definition: hevcdec.h:292
SliceHeader
Definition: hevcdec.h:249
HEVCFrame::frame
AVFrame * frame
Definition: hevcdec.h:396
HEVC_NAL_TRAIL_R
@ HEVC_NAL_TRAIL_R
Definition: hevc.h:30
src1
#define src1
Definition: h264pred.c:140
hls_decode_entry
static int hls_decode_entry(AVCodecContext *avctxt, void *isFilterThread)
Definition: hevcdec.c:2466
ff_hevc_inter_pred_idc_decode
int ff_hevc_inter_pred_idc_decode(HEVCContext *s, int nPbW, int nPbH)
Definition: hevc_cabac.c:804
ff_hevc_cu_qp_delta_sign_flag
int ff_hevc_cu_qp_delta_sign_flag(HEVCContext *s)
Definition: hevc_cabac.c:667
hevc_frame_start
static int hevc_frame_start(HEVCContext *s)
Definition: hevcdec.c:2987
av_md5_init
void av_md5_init(AVMD5 *ctx)
Initialize MD5 hashing.
Definition: md5.c:141
ff_h274_apply_film_grain
int ff_h274_apply_film_grain(AVFrame *out_frame, const AVFrame *in_frame, H274FilmGrainDatabase *database, const AVFilmGrainParams *params)
Definition: h274.c:217
SliceHeader::slice_sample_adaptive_offset_flag
uint8_t slice_sample_adaptive_offset_flag[3]
Definition: hevcdec.h:281
ff_hevc_decoder
const AVCodec ff_hevc_decoder
Definition: hevcdec.c:3858
AVDISCARD_NONINTRA
@ AVDISCARD_NONINTRA
discard all non intra frames
Definition: defs.h:52
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:271
av_timecode_make_smpte_tc_string2
char * av_timecode_make_smpte_tc_string2(char *buf, AVRational rate, uint32_t tcsmpte, int prevent_df, int skip_field)
Get the timecode string from the SMPTE timecode format.
Definition: timecode.c:136
AVCodecContext::properties
unsigned properties
Properties of the stream that gets decoded.
Definition: avcodec.h:1822
HEVCFrame
Definition: hevcdec.h:395
AVCodecContext::extradata
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:484
av_packet_get_side_data
uint8_t * av_packet_get_side_data(const AVPacket *pkt, enum AVPacketSideDataType type, size_t *size)
Get side information from packet.
Definition: avpacket.c:253
FF_CODEC_CAP_INIT_CLEANUP
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: internal.h:50
HEVCLocalContext::gb
GetBitContext gb
Definition: hevcdec.h:434
ff_hevc_cbf_luma_decode
int ff_hevc_cbf_luma_decode(HEVCContext *s, int trafo_depth)
Definition: hevc_cabac.c:883
internal.h
EPEL_EXTRA_AFTER
#define EPEL_EXTRA_AFTER
Definition: hevcdec.h:63
AVFilmGrainH274Params::num_intensity_intervals
uint16_t num_intensity_intervals[3]
Specifies the number of intensity intervals for which a specific set of model values has been estimat...
Definition: film_grain_params.h:176
HEVCFrame::ctb_count
int ctb_count
Definition: hevcdec.h:404
av_malloc_array
#define av_malloc_array(a, b)
Definition: tableprint_vlc.h:32
display.h
SliceHeader::offset
int * offset
Definition: hevcdec.h:303
AV_STEREO3D_TOPBOTTOM
@ AV_STEREO3D_TOPBOTTOM
Views are on top of each other.
Definition: stereo3d.h:79
common.h
HEVCFrame::sequence
uint16_t sequence
A sequence counter, so that old frames are output first after a POC reset.
Definition: hevcdec.h:419
SliceHeader::mvd_l1_zero_flag
uint8_t mvd_l1_zero_flag
Definition: hevcdec.h:282
delta
float delta
Definition: vorbis_enc_data.h:430
md5.h
AV_CODEC_ID_HEVC
@ AV_CODEC_ID_HEVC
Definition: codec_id.h:224
ff_hevc_bump_frame
void ff_hevc_bump_frame(HEVCContext *s)
Definition: hevc_refs.c:241
av_always_inline
#define av_always_inline
Definition: attributes.h:49
HEVC_SLICE_P
@ HEVC_SLICE_P
Definition: hevc.h:97
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
av_frame_move_ref
void av_frame_move_ref(AVFrame *dst, AVFrame *src)
Move everything contained in src to dst and reset src.
Definition: frame.c:462
PF_L0
@ PF_L0
Definition: hevcdec.h:168
EDGE_EMU_BUFFER_STRIDE
#define EDGE_EMU_BUFFER_STRIDE
Definition: hevcdec.h:69
tab_mode_idx
static const uint8_t tab_mode_idx[]
Definition: hevcdec.c:2074
cabac_functions.h
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:435
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:263
HEVCLocalContext::qp_y
int8_t qp_y
Definition: hevcdec.h:437
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:209
update_thread_context
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call have update_thread_context() run it in the next thread. Add AV_CODEC_CAP_FRAME_THREADS to the codec capabilities. There will be very little speed gain at this point but it should work. If there are inter-frame dependencies
av_buffer_replace
int av_buffer_replace(AVBufferRef **pdst, const AVBufferRef *src)
Ensure dst refers to the same data as src.
Definition: buffer.c:233
AVCodecContext::chroma_sample_location
enum AVChromaLocation chroma_sample_location
This defines the location of chroma samples.
Definition: avcodec.h:974
AVMasteringDisplayMetadata
Mastering display metadata capable of representing the color volume of the display used to master the...
Definition: mastering_display_metadata.h:38
HEVC_NAL_TSA_R
@ HEVC_NAL_TSA_R
Definition: hevc.h:32
AVCOL_SPC_UNSPECIFIED
@ AVCOL_SPC_UNSPECIFIED
Definition: pixfmt.h:526
SliceHeader::list_entry_lx
unsigned int list_entry_lx[2][32]
Definition: hevcdec.h:273
AVFilmGrainH274Params::color_primaries
enum AVColorPrimaries color_primaries
Definition: film_grain_params.h:150
AVCodecContext::height
int height
Definition: avcodec.h:556
HEVCSEIA53Caption::buf_ref
AVBufferRef * buf_ref
Definition: hevc_sei.h:61
hevc_decode_extradata
static int hevc_decode_extradata(HEVCContext *s, uint8_t *buf, int length, int first)
Definition: hevcdec.c:3433
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:593
av_md5_final
void av_md5_final(AVMD5 *ctx, uint8_t *dst)
Finish hashing and output digest value.
Definition: md5.c:186
hevc_decode_init
static av_cold int hevc_decode_init(AVCodecContext *avctx)
Definition: hevcdec.c:3792
HEVCFrame::poc
int poc
Definition: hevcdec.h:405
AVCOL_RANGE_MPEG
@ AVCOL_RANGE_MPEG
Narrow or limited range content.
Definition: pixfmt.h:580
AVFilmGrainH274Params::intensity_interval_lower_bound
uint8_t intensity_interval_lower_bound[3][256]
Specifies the lower ounds of each intensity interval for whichthe set of model values applies for the...
Definition: film_grain_params.h:188
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:271
HWACCEL_VIDEOTOOLBOX
#define HWACCEL_VIDEOTOOLBOX(codec)
Definition: hwconfig.h:77
hevc.h
ff_hevc_cu_chroma_qp_offset_idx
int ff_hevc_cu_chroma_qp_offset_idx(HEVCContext *s)
Definition: hevc_cabac.c:677
SAOParams
Definition: hevcdsp.h:34
SliceHeader::short_term_rps
const ShortTermRPS * short_term_rps
Definition: hevcdec.h:270
ff_dovi_rpu_parse
int ff_dovi_rpu_parse(DOVIContext *s, const uint8_t *rpu, size_t rpu_size)
Parse the contents of a Dovi RPU NAL and update the parsed values in the DOVIContext struct.
Definition: dovi_rpu.c:194
HEVC_NAL_VPS
@ HEVC_NAL_VPS
Definition: hevc.h:61
SliceHeader::cu_chroma_qp_offset_enabled_flag
uint8_t cu_chroma_qp_offset_enabled_flag
Definition: hevcdec.h:295
HEVC_NAL_IDR_W_RADL
@ HEVC_NAL_IDR_W_RADL
Definition: hevc.h:48
ff_hevc_cu_chroma_qp_offset_flag
int ff_hevc_cu_chroma_qp_offset_flag(HEVCContext *s)
Definition: hevc_cabac.c:672
av_buffer_allocz
AVBufferRef * av_buffer_allocz(size_t size)
Same as av_buffer_alloc(), except the returned buffer will be initialized to zero.
Definition: buffer.c:93
ret
ret
Definition: filter_design.txt:187
AV_PKT_DATA_DOVI_CONF
@ AV_PKT_DATA_DOVI_CONF
DOVI configuration ref: dolby-vision-bitstreams-within-the-iso-base-media-file-format-v2....
Definition: packet.h:283
H2645NAL::raw_data
const uint8_t * raw_data
Definition: h2645_parse.h:45
ff_hevc_reset_sei
void ff_hevc_reset_sei(HEVCSEI *s)
Reset SEI values that are stored on the Context.
Definition: hevc_sei.c:553
VUI::colour_description_present_flag
int colour_description_present_flag
Definition: hevc_ps.h:58
FFSWAP
#define FFSWAP(type, a, b)
Definition: macros.h:52
AVClass::class_name
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:71
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
PRED_L1
@ PRED_L1
Definition: hevcdec.h:162
PredictionUnit::mvd
Mv mvd
Definition: hevcdec.h:363
SliceHeader::disable_deblocking_filter_flag
uint8_t disable_deblocking_filter_flag
slice_header_disable_deblocking_filter_flag
Definition: hevcdec.h:285
ff_hevc_dsp_init
void ff_hevc_dsp_init(HEVCDSPContext *hevcdsp, int bit_depth)
Definition: hevcdsp.c:126
HEVCLocalContext::edge_emu_buffer2
uint8_t edge_emu_buffer2[(MAX_PB_SIZE+7) *EDGE_EMU_BUFFER_STRIDE *2]
Definition: hevcdec.h:453
AV_EF_CRCCHECK
#define AV_EF_CRCCHECK
Verify checksums embedded in the bitstream (could be of either encoded or decoded data,...
Definition: avcodec.h:1332
AVStereo3D::type
enum AVStereo3DType type
How views are packed within the video.
Definition: stereo3d.h:180
sps
static int FUNC() sps(CodedBitstreamContext *ctx, RWContext *rw, H264RawSPS *current)
Definition: cbs_h264_syntax_template.c:260
hevc_init_context
static av_cold int hevc_init_context(AVCodecContext *avctx)
Definition: hevcdec.c:3622
pos
unsigned int pos
Definition: spdifenc.c:412
hevc_luma_mv_mvp_mode
static void hevc_luma_mv_mvp_mode(HEVCContext *s, int x0, int y0, int nPbW, int nPbH, int log2_cb_size, int part_idx, int merge_idx, MvField *mv)
Definition: hevcdec.c:1810
ff_thread_finish_setup
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call ff_thread_finish_setup() afterwards. If some code can 't be moved
AV_PIX_FMT_YUV420P12
#define AV_PIX_FMT_YUV420P12
Definition: pixfmt.h:408
HEVC_NAL_EOS_NUT
@ HEVC_NAL_EOS_NUT
Definition: hevc.h:65
ff_hevc_rem_intra_luma_pred_mode_decode
int ff_hevc_rem_intra_luma_pred_mode_decode(HEVCContext *s)
Definition: hevc_cabac.c:767
ff_hevc_frame_nb_refs
int ff_hevc_frame_nb_refs(const HEVCContext *s)
Get the number of candidate references for the current frame.
Definition: hevc_refs.c:511
hls_prediction_unit
static void hls_prediction_unit(HEVCContext *s, int x0, int y0, int nPbW, int nPbH, int log2_cb_size, int partIdx, int idx)
Definition: hevcdec.c:1855
HEVCLocalContext::boundary_flags
int boundary_flags
Definition: hevcdec.h:467
HEVCSEIFilmGrainCharacteristics::comp_model_value
int16_t comp_model_value[3][256][6]
Definition: hevc_sei.h:129
HEVC_NAL_TRAIL_N
@ HEVC_NAL_TRAIL_N
Definition: hevc.h:29
LongTermRPS
Definition: hevcdec.h:231
ff_set_sar
int ff_set_sar(AVCodecContext *avctx, AVRational sar)
Check that the provided sample aspect ratio is valid and set it on the codec context.
Definition: utils.c:101
SliceHeader::slice_type
enum HEVCSliceType slice_type
Definition: hevcdec.h:257
ff_hevc_flush_dpb
void ff_hevc_flush_dpb(HEVCContext *s)
Drop all frames currently in DPB.
Definition: hevc_refs.c:77
HEVC_NAL_AUD
@ HEVC_NAL_AUD
Definition: hevc.h:64
AV_FRAME_DATA_DYNAMIC_HDR_PLUS
@ AV_FRAME_DATA_DYNAMIC_HDR_PLUS
HDR dynamic metadata associated with a video frame.
Definition: frame.h:158
AVCodecContext
main external API structure.
Definition: avcodec.h:383
AVCodecContext::active_thread_type
int active_thread_type
Which multithreading methods are in use by the codec.
Definition: avcodec.h:1459
SliceHeader::slice_qp
int8_t slice_qp
Definition: hevcdec.h:307
hls_coding_quadtree
static int hls_coding_quadtree(HEVCContext *s, int x0, int y0, int log2_cb_size, int cb_depth)
Definition: hevcdec.c:2332
AV_FILM_GRAIN_PARAMS_H274
@ AV_FILM_GRAIN_PARAMS_H274
The union is valid when interpreted as AVFilmGrainH274Params (codec.h274)
Definition: film_grain_params.h:35
av_mastering_display_metadata_create_side_data
AVMasteringDisplayMetadata * av_mastering_display_metadata_create_side_data(AVFrame *frame)
Allocate a complete AVMasteringDisplayMetadata and add it to the frame.
Definition: mastering_display_metadata.c:32
SUBDIVIDE
#define SUBDIVIDE(x, y, idx)
PredictionUnit::merge_flag
uint8_t merge_flag
Definition: hevcdec.h:364
av_md5_alloc
struct AVMD5 * av_md5_alloc(void)
Allocate an AVMD5 context.
Definition: md5.c:48
AV_PKT_DATA_NEW_EXTRADATA
@ AV_PKT_DATA_NEW_EXTRADATA
The AV_PKT_DATA_NEW_EXTRADATA is used to notify the codec or the format that the extradata buffer was...
Definition: packet.h:55
AVRational::den
int den
Denominator.
Definition: rational.h:60
pred_weight_table
static int pred_weight_table(HEVCContext *s, GetBitContext *gb)
Definition: hevcdec.c:145
SliceHeader::slice_cr_qp_offset
int slice_cr_qp_offset
Definition: hevcdec.h:293
export_stream_params_from_sei
static int export_stream_params_from_sei(HEVCContext *s)
Definition: hevcdec.c:375
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
HEVCContext
Definition: hevcdec.h:470
AVCodecContext::profile
int profile
profile
Definition: avcodec.h:1525
CodingUnit::pred_mode
enum PredMode pred_mode
PredMode.
Definition: hevcdec.h:330
SliceHeader::pic_order_cnt_lsb
int pic_order_cnt_lsb
Definition: hevcdec.h:259
HEVCSEIFilmGrainCharacteristics::transfer_characteristics
int transfer_characteristics
Definition: hevc_sei.h:120
HEVCLocalContext::qPy_pred
int qPy_pred
Definition: hevcdec.h:440
ff_thread_get_format
FF_DISABLE_DEPRECATION_WARNINGS enum AVPixelFormat ff_thread_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt)
Wrapper around get_format() for frame-multithreaded codecs.
Definition: pthread_frame.c:1020
HEVCFrame::tab_mvf_buf
AVBufferRef * tab_mvf_buf
Definition: hevcdec.h:408
AVFilmGrainH274Params::log2_scale_factor
int log2_scale_factor
Specifies a scale factor used in the film grain characterization equations.
Definition: film_grain_params.h:165
SCAN_DIAG
@ SCAN_DIAG
Definition: hevcdec.h:226
SliceHeader::rpl_modification_flag
uint8_t rpl_modification_flag[2]
Definition: hevcdec.h:275
ref
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:107
AV_CODEC_CAP_DELAY
#define AV_CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
Definition: codec.h:82
AVMasteringDisplayMetadata::min_luminance
AVRational min_luminance
Min luminance of mastering display (cd/m^2).
Definition: mastering_display_metadata.h:52
FF_CODEC_PROPERTY_CLOSED_CAPTIONS
#define FF_CODEC_PROPERTY_CLOSED_CAPTIONS
Definition: avcodec.h:1824
av_md5_update
void av_md5_update(AVMD5 *ctx, const uint8_t *src, size_t len)
Update hash value.
Definition: md5.c:151
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
AVFilmGrainH274Params::num_model_values
uint8_t num_model_values[3]
Specifies the number of model values present for each intensity interval in which the film grain has ...
Definition: film_grain_params.h:182
HEVCLocalContext::tu
TransformUnit tu
Definition: hevcdec.h:442
hls_cross_component_pred
static int hls_cross_component_pred(HEVCContext *s, int idx)
Definition: hevcdec.c:1076
hls_slice_header
static int hls_slice_header(HEVCContext *s)
Definition: hevcdec.c:552
CodingUnit::y
int y
Definition: hevcdec.h:328
HEVCFrame::tf_grain
ThreadFrame tf_grain
Definition: hevcdec.h:399
set_side_data
static int set_side_data(HEVCContext *s)
Definition: hevcdec.c:2723
AVCodecContext::coded_width
int coded_width
Bitstream width / height, may be different from width/height e.g.
Definition: avcodec.h:571
HEVCSEIFilmGrainCharacteristics::num_model_values
uint8_t num_model_values[3]
Definition: hevc_sei.h:126
desc
const char * desc
Definition: libsvtav1.c:79
Mv
Definition: hevcdec.h:339
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
HEVC_NAL_SPS
@ HEVC_NAL_SPS
Definition: hevc.h:62
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
PRED_L0
@ PRED_L0
Definition: hevcdec.h:161
AVBufferRef
A reference to a data buffer.
Definition: buffer.h:82
get_bitsz
static av_always_inline int get_bitsz(GetBitContext *s, int n)
Read 0-25 bits.
Definition: get_bits.h:416
HEVCVPS
Definition: hevc_ps.h:123
VUI::video_signal_type_present_flag
int video_signal_type_present_flag
Definition: hevc_ps.h:55
mastering_display_metadata.h
get_ue_golomb_long
static unsigned get_ue_golomb_long(GetBitContext *gb)
Read an unsigned Exp-Golomb code in the range 0 to UINT32_MAX-1.
Definition: golomb.h:106
FF_CODEC_CAP_ALLOCATE_PROGRESS
#define FF_CODEC_CAP_ALLOCATE_PROGRESS
Definition: internal.h:77
ff_hevc_sao_band_position_decode
int ff_hevc_sao_band_position_decode(HEVCContext *s)
Definition: hevc_cabac.c:583
EPEL_EXTRA
#define EPEL_EXTRA
Definition: hevcdec.h:64
ff_hevc_part_mode_decode
int ff_hevc_part_mode_decode(HEVCContext *s, int log2_cb_size)
Definition: hevc_cabac.c:712
s0
#define s0
Definition: regdef.h:37
av_stereo3d_create_side_data
AVStereo3D * av_stereo3d_create_side_data(AVFrame *frame)
Allocate a complete AVFrameSideData and add it to the frame.
Definition: stereo3d.c:33
HEVCSPS
Definition: hevc_ps.h:153
AVFilmGrainH274Params::color_trc
enum AVColorTransferCharacteristic color_trc
Definition: film_grain_params.h:151
AVFrameSideData
Structure to hold side data for an AVFrame.
Definition: frame.h:223
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:69
HEVCPPS
Definition: hevc_ps.h:249
CodingUnit::part_mode
enum PartMode part_mode
PartMode.
Definition: hevcdec.h:331
AVStereo3D::view
enum AVStereo3DView view
Determines which views are packed.
Definition: stereo3d.h:190
av_free
#define av_free(p)
Definition: tableprint_vlc.h:34
SliceHeader::tc_offset
int tc_offset
tc_offset_div2 * 2
Definition: hevcdec.h:298
LongTermRPS::nb_refs
uint8_t nb_refs
Definition: hevcdec.h:235
AVFilmGrainParams::codec
union AVFilmGrainParams::@294 codec
Additional fields may be added both here and in any structure included.
AVPacket
This structure stores compressed data.
Definition: packet.h:350
AVContentLightMetadata::MaxFALL
unsigned MaxFALL
Max average light level per frame (cd/m^2).
Definition: mastering_display_metadata.h:107
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:410
AV_OPT_TYPE_BOOL
@ AV_OPT_TYPE_BOOL
Definition: opt.h:241
TransformUnit::cross_pf
uint8_t cross_pf
Definition: hevcdec.h:382
SAOParams::offset_val
int16_t offset_val[3][5]
SaoOffsetVal.
Definition: hevcdsp.h:42
HEVCLocalContext::cu
CodingUnit cu
Definition: hevcdec.h:457
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
av_dict_set
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
Definition: dict.c:70
av_fast_malloc
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
Definition: mem.c:560
SliceHeader::pps_id
unsigned int pps_id
address (in raster order) of the first block in the current slice segment
Definition: hevcdec.h:250
HWACCEL_VAAPI
#define HWACCEL_VAAPI(codec)
Definition: hwconfig.h:73
FFMAX3
#define FFMAX3(a, b, c)
Definition: macros.h:48
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:556
int32_t
int32_t
Definition: audioconvert.c:56
bytestream.h
ff_hevc_decode_short_term_rps
int ff_hevc_decode_short_term_rps(GetBitContext *gb, AVCodecContext *avctx, ShortTermRPS *rps, const HEVCSPS *sps, int is_slice_header)
Definition: hevc_ps.c:119
PredictionUnit::mpm_idx
int mpm_idx
Definition: hevcdec.h:360
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:362
VUI::video_full_range_flag
int video_full_range_flag
Definition: hevc_ps.h:57
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
HEVC_NAL_FD_NUT
@ HEVC_NAL_FD_NUT
Definition: hevc.h:67
PredictionUnit::chroma_mode_c
uint8_t chroma_mode_c[4]
Definition: hevcdec.h:366
AVFilmGrainH274Params::bit_depth_chroma
int bit_depth_chroma
Specifies the bit depth used for the chroma components.
Definition: film_grain_params.h:147
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
skip_bytes
static const av_unused uint8_t * skip_bytes(CABACContext *c, int n)
Skip n bytes and reset the decoder.
Definition: cabac_functions.h:201
PredictionUnit::intra_pred_mode
uint8_t intra_pred_mode[4]
Definition: hevcdec.h:362
ff_hevc_decode_nal_pps
int ff_hevc_decode_nal_pps(GetBitContext *gb, AVCodecContext *avctx, HEVCParamSets *ps)
Definition: hevc_ps.c:1499
TransformUnit::is_cu_chroma_qp_offset_coded
uint8_t is_cu_chroma_qp_offset_coded
Definition: hevcdec.h:379
hls_transform_tree
static int hls_transform_tree(HEVCContext *s, int x0, int y0, int xBase, int yBase, int cb_xBase, int cb_yBase, int log2_cb_size, int log2_trafo_size, int trafo_depth, int blk_idx, const int *base_cbf_cb, const int *base_cbf_cr)
Definition: hevcdec.c:1313
h
h
Definition: vp9dsp_template.c:2038
HEVCSEIFilmGrainCharacteristics::bit_depth_luma
int bit_depth_luma
Definition: hevc_sei.h:116
BOUNDARY_LEFT_SLICE
#define BOUNDARY_LEFT_SLICE
Definition: hevcdec.h:461
AVStereo3D
Stereo 3D type: this structure describes how two videos are packed within a single video surface,...
Definition: stereo3d.h:176
SliceHeader::slice_qp_delta
int slice_qp_delta
Definition: hevcdec.h:291
SliceHeader::slice_addr
unsigned int slice_addr
Definition: hevcdec.h:255
ff_hevc_log2_res_scale_abs
int ff_hevc_log2_res_scale_abs(HEVCContext *s, int idx)
Definition: hevc_cabac.c:903
HEVC_NAL_EOB_NUT
@ HEVC_NAL_EOB_NUT
Definition: hevc.h:66
atomic_init
#define atomic_init(obj, value)
Definition: stdatomic.h:33
TransformUnit::intra_pred_mode_c
int intra_pred_mode_c
Definition: hevcdec.h:376
AVDISCARD_NONREF
@ AVDISCARD_NONREF
discard all non reference
Definition: defs.h:50
HEVC_NAL_SEI_PREFIX
@ HEVC_NAL_SEI_PREFIX
Definition: hevc.h:68
int
int
Definition: ffmpeg_filter.c:153
HEVCSEIFilmGrainCharacteristics
Definition: hevc_sei.h:112
HEVCLocalContext::end_of_tiles_y
int end_of_tiles_y
Definition: hevcdec.h:449
AVFilmGrainParams::type
enum AVFilmGrainParamsType type
Specifies the codec for which this structure is valid.
Definition: film_grain_params.h:220
CodingUnit::intra_split_flag
uint8_t intra_split_flag
IntraSplitFlag.
Definition: hevcdec.h:334
ff_hevc_end_of_slice_flag_decode
int ff_hevc_end_of_slice_flag_decode(HEVCContext *s)
Definition: hevc_cabac.c:615
intra_prediction_unit
static void intra_prediction_unit(HEVCContext *s, int x0, int y0, int log2_cb_size)
Definition: hevcdec.c:2078
SHIFT_CTB_WPP
#define SHIFT_CTB_WPP
Definition: hevcdec.h:48
av_color_transfer_name
const char * av_color_transfer_name(enum AVColorTransferCharacteristic transfer)
Definition: pixdesc.c:3027
luma_mc_uni
static void luma_mc_uni(HEVCContext *s, uint8_t *dst, ptrdiff_t dststride, AVFrame *ref, const Mv *mv, int x_off, int y_off, int block_w, int block_h, int luma_weight, int luma_offset)
8.5.3.2.2.1 Luma sample unidirectional interpolation process
Definition: hevcdec.c:1493
PART_2NxN
@ PART_2NxN
Definition: hevcdec.h:145
HEVCParamSets::vps_list
AVBufferRef * vps_list[HEVC_MAX_VPS_COUNT]
Definition: hevc_ps.h:328
ff_dovi_attach_side_data
int ff_dovi_attach_side_data(DOVIContext *s, AVFrame *frame)
Attach the decoded AVDOVIMetadata as side data to an AVFrame.
Definition: dovi_rpu.c:91
AV_CODEC_EXPORT_DATA_FILM_GRAIN
#define AV_CODEC_EXPORT_DATA_FILM_GRAIN
Decoding only.
Definition: avcodec.h:356
SliceHeader::long_term_rps
LongTermRPS long_term_rps
Definition: hevcdec.h:272
HEVCSEIFilmGrainCharacteristics::persistence_flag
int persistence_flag
Definition: hevc_sei.h:130
HEVCLocalContext::cc
CABACContext cc
Definition: hevcdec.h:435
TransformUnit::cu_qp_offset_cr
int8_t cu_qp_offset_cr
Definition: hevcdec.h:381
ff_alloc_entries
int ff_alloc_entries(AVCodecContext *avctx, int count)
Definition: pthread_slice.c:201
options
static const AVOption options[]
Definition: hevcdec.c:3843
AVDOVIDecoderConfigurationRecord
Definition: dovi_meta.h:52
HEVC_CONTEXTS
#define HEVC_CONTEXTS
Definition: hevcdec.h:54
HEVCParamSets
Definition: hevc_ps.h:327