FFmpeg
hevcdec.c
Go to the documentation of this file.
1 /*
2  * HEVC video Decoder
3  *
4  * Copyright (C) 2012 - 2013 Guillaume Martres
5  * Copyright (C) 2012 - 2013 Mickael Raulet
6  * Copyright (C) 2012 - 2013 Gildas Cocherel
7  * Copyright (C) 2012 - 2013 Wassim Hamidouche
8  *
9  * This file is part of FFmpeg.
10  *
11  * FFmpeg is free software; you can redistribute it and/or
12  * modify it under the terms of the GNU Lesser General Public
13  * License as published by the Free Software Foundation; either
14  * version 2.1 of the License, or (at your option) any later version.
15  *
16  * FFmpeg is distributed in the hope that it will be useful,
17  * but WITHOUT ANY WARRANTY; without even the implied warranty of
18  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19  * Lesser General Public License for more details.
20  *
21  * You should have received a copy of the GNU Lesser General Public
22  * License along with FFmpeg; if not, write to the Free Software
23  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
24  */
25 
26 #include "config_components.h"
27 
28 #include "libavutil/attributes.h"
29 #include "libavutil/avstring.h"
30 #include "libavutil/common.h"
31 #include "libavutil/display.h"
33 #include "libavutil/internal.h"
35 #include "libavutil/md5.h"
36 #include "libavutil/opt.h"
37 #include "libavutil/pixdesc.h"
38 #include "libavutil/timecode.h"
39 
40 #include "bswapdsp.h"
41 #include "bytestream.h"
42 #include "cabac_functions.h"
43 #include "codec_internal.h"
44 #include "decode.h"
45 #include "golomb.h"
46 #include "hevc.h"
47 #include "hevc_data.h"
48 #include "hevc_parse.h"
49 #include "hevcdec.h"
50 #include "hwconfig.h"
51 #include "internal.h"
52 #include "profiles.h"
53 #include "thread.h"
54 #include "threadframe.h"
55 
56 static const uint8_t hevc_pel_weight[65] = { [2] = 0, [4] = 1, [6] = 2, [8] = 3, [12] = 4, [16] = 5, [24] = 6, [32] = 7, [48] = 8, [64] = 9 };
57 
58 /**
59  * NOTE: Each function hls_foo correspond to the function foo in the
60  * specification (HLS stands for High Level Syntax).
61  */
62 
63 /**
64  * Section 5.7
65  */
66 
67 /* free everything allocated by pic_arrays_init() */
69 {
70  av_freep(&s->sao);
71  av_freep(&s->deblock);
72 
73  av_freep(&s->skip_flag);
74  av_freep(&s->tab_ct_depth);
75 
76  av_freep(&s->tab_ipm);
77  av_freep(&s->cbf_luma);
78  av_freep(&s->is_pcm);
79 
80  av_freep(&s->qp_y_tab);
81  av_freep(&s->tab_slice_address);
82  av_freep(&s->filter_slice_edges);
83 
84  av_freep(&s->horizontal_bs);
85  av_freep(&s->vertical_bs);
86 
87  av_freep(&s->sh.entry_point_offset);
88  av_freep(&s->sh.size);
89  av_freep(&s->sh.offset);
90 
91  av_buffer_pool_uninit(&s->tab_mvf_pool);
92  av_buffer_pool_uninit(&s->rpl_tab_pool);
93 }
94 
95 /* allocate arrays that depend on frame dimensions */
96 static int pic_arrays_init(HEVCContext *s, const HEVCSPS *sps)
97 {
98  int log2_min_cb_size = sps->log2_min_cb_size;
99  int width = sps->width;
100  int height = sps->height;
101  int pic_size_in_ctb = ((width >> log2_min_cb_size) + 1) *
102  ((height >> log2_min_cb_size) + 1);
103  int ctb_count = sps->ctb_width * sps->ctb_height;
104  int min_pu_size = sps->min_pu_width * sps->min_pu_height;
105 
106  s->bs_width = (width >> 2) + 1;
107  s->bs_height = (height >> 2) + 1;
108 
109  s->sao = av_calloc(ctb_count, sizeof(*s->sao));
110  s->deblock = av_calloc(ctb_count, sizeof(*s->deblock));
111  if (!s->sao || !s->deblock)
112  goto fail;
113 
114  s->skip_flag = av_malloc_array(sps->min_cb_height, sps->min_cb_width);
115  s->tab_ct_depth = av_malloc_array(sps->min_cb_height, sps->min_cb_width);
116  if (!s->skip_flag || !s->tab_ct_depth)
117  goto fail;
118 
119  s->cbf_luma = av_malloc_array(sps->min_tb_width, sps->min_tb_height);
120  s->tab_ipm = av_mallocz(min_pu_size);
121  s->is_pcm = av_malloc_array(sps->min_pu_width + 1, sps->min_pu_height + 1);
122  if (!s->tab_ipm || !s->cbf_luma || !s->is_pcm)
123  goto fail;
124 
125  s->filter_slice_edges = av_mallocz(ctb_count);
126  s->tab_slice_address = av_malloc_array(pic_size_in_ctb,
127  sizeof(*s->tab_slice_address));
128  s->qp_y_tab = av_malloc_array(pic_size_in_ctb,
129  sizeof(*s->qp_y_tab));
130  if (!s->qp_y_tab || !s->filter_slice_edges || !s->tab_slice_address)
131  goto fail;
132 
133  s->horizontal_bs = av_calloc(s->bs_width, s->bs_height);
134  s->vertical_bs = av_calloc(s->bs_width, s->bs_height);
135  if (!s->horizontal_bs || !s->vertical_bs)
136  goto fail;
137 
138  s->tab_mvf_pool = av_buffer_pool_init(min_pu_size * sizeof(MvField),
140  s->rpl_tab_pool = av_buffer_pool_init(ctb_count * sizeof(RefPicListTab),
142  if (!s->tab_mvf_pool || !s->rpl_tab_pool)
143  goto fail;
144 
145  return 0;
146 
147 fail:
149  return AVERROR(ENOMEM);
150 }
151 
153 {
154  int i = 0;
155  int j = 0;
156  uint8_t luma_weight_l0_flag[16];
157  uint8_t chroma_weight_l0_flag[16];
158  uint8_t luma_weight_l1_flag[16];
159  uint8_t chroma_weight_l1_flag[16];
160  int luma_log2_weight_denom;
161 
162  luma_log2_weight_denom = get_ue_golomb_long(gb);
163  if (luma_log2_weight_denom < 0 || luma_log2_weight_denom > 7) {
164  av_log(s->avctx, AV_LOG_ERROR, "luma_log2_weight_denom %d is invalid\n", luma_log2_weight_denom);
165  return AVERROR_INVALIDDATA;
166  }
167  s->sh.luma_log2_weight_denom = av_clip_uintp2(luma_log2_weight_denom, 3);
168  if (s->ps.sps->chroma_format_idc != 0) {
169  int64_t chroma_log2_weight_denom = luma_log2_weight_denom + (int64_t)get_se_golomb(gb);
170  if (chroma_log2_weight_denom < 0 || chroma_log2_weight_denom > 7) {
171  av_log(s->avctx, AV_LOG_ERROR, "chroma_log2_weight_denom %"PRId64" is invalid\n", chroma_log2_weight_denom);
172  return AVERROR_INVALIDDATA;
173  }
174  s->sh.chroma_log2_weight_denom = chroma_log2_weight_denom;
175  }
176 
177  for (i = 0; i < s->sh.nb_refs[L0]; i++) {
178  luma_weight_l0_flag[i] = get_bits1(gb);
179  if (!luma_weight_l0_flag[i]) {
180  s->sh.luma_weight_l0[i] = 1 << s->sh.luma_log2_weight_denom;
181  s->sh.luma_offset_l0[i] = 0;
182  }
183  }
184  if (s->ps.sps->chroma_format_idc != 0) {
185  for (i = 0; i < s->sh.nb_refs[L0]; i++)
186  chroma_weight_l0_flag[i] = get_bits1(gb);
187  } else {
188  for (i = 0; i < s->sh.nb_refs[L0]; i++)
189  chroma_weight_l0_flag[i] = 0;
190  }
191  for (i = 0; i < s->sh.nb_refs[L0]; i++) {
192  if (luma_weight_l0_flag[i]) {
193  int delta_luma_weight_l0 = get_se_golomb(gb);
194  if ((int8_t)delta_luma_weight_l0 != delta_luma_weight_l0)
195  return AVERROR_INVALIDDATA;
196  s->sh.luma_weight_l0[i] = (1 << s->sh.luma_log2_weight_denom) + delta_luma_weight_l0;
197  s->sh.luma_offset_l0[i] = get_se_golomb(gb);
198  }
199  if (chroma_weight_l0_flag[i]) {
200  for (j = 0; j < 2; j++) {
201  int delta_chroma_weight_l0 = get_se_golomb(gb);
202  int delta_chroma_offset_l0 = get_se_golomb(gb);
203 
204  if ( (int8_t)delta_chroma_weight_l0 != delta_chroma_weight_l0
205  || delta_chroma_offset_l0 < -(1<<17) || delta_chroma_offset_l0 > (1<<17)) {
206  return AVERROR_INVALIDDATA;
207  }
208 
209  s->sh.chroma_weight_l0[i][j] = (1 << s->sh.chroma_log2_weight_denom) + delta_chroma_weight_l0;
210  s->sh.chroma_offset_l0[i][j] = av_clip((delta_chroma_offset_l0 - ((128 * s->sh.chroma_weight_l0[i][j])
211  >> s->sh.chroma_log2_weight_denom) + 128), -128, 127);
212  }
213  } else {
214  s->sh.chroma_weight_l0[i][0] = 1 << s->sh.chroma_log2_weight_denom;
215  s->sh.chroma_offset_l0[i][0] = 0;
216  s->sh.chroma_weight_l0[i][1] = 1 << s->sh.chroma_log2_weight_denom;
217  s->sh.chroma_offset_l0[i][1] = 0;
218  }
219  }
220  if (s->sh.slice_type == HEVC_SLICE_B) {
221  for (i = 0; i < s->sh.nb_refs[L1]; i++) {
222  luma_weight_l1_flag[i] = get_bits1(gb);
223  if (!luma_weight_l1_flag[i]) {
224  s->sh.luma_weight_l1[i] = 1 << s->sh.luma_log2_weight_denom;
225  s->sh.luma_offset_l1[i] = 0;
226  }
227  }
228  if (s->ps.sps->chroma_format_idc != 0) {
229  for (i = 0; i < s->sh.nb_refs[L1]; i++)
230  chroma_weight_l1_flag[i] = get_bits1(gb);
231  } else {
232  for (i = 0; i < s->sh.nb_refs[L1]; i++)
233  chroma_weight_l1_flag[i] = 0;
234  }
235  for (i = 0; i < s->sh.nb_refs[L1]; i++) {
236  if (luma_weight_l1_flag[i]) {
237  int delta_luma_weight_l1 = get_se_golomb(gb);
238  if ((int8_t)delta_luma_weight_l1 != delta_luma_weight_l1)
239  return AVERROR_INVALIDDATA;
240  s->sh.luma_weight_l1[i] = (1 << s->sh.luma_log2_weight_denom) + delta_luma_weight_l1;
241  s->sh.luma_offset_l1[i] = get_se_golomb(gb);
242  }
243  if (chroma_weight_l1_flag[i]) {
244  for (j = 0; j < 2; j++) {
245  int delta_chroma_weight_l1 = get_se_golomb(gb);
246  int delta_chroma_offset_l1 = get_se_golomb(gb);
247 
248  if ( (int8_t)delta_chroma_weight_l1 != delta_chroma_weight_l1
249  || delta_chroma_offset_l1 < -(1<<17) || delta_chroma_offset_l1 > (1<<17)) {
250  return AVERROR_INVALIDDATA;
251  }
252 
253  s->sh.chroma_weight_l1[i][j] = (1 << s->sh.chroma_log2_weight_denom) + delta_chroma_weight_l1;
254  s->sh.chroma_offset_l1[i][j] = av_clip((delta_chroma_offset_l1 - ((128 * s->sh.chroma_weight_l1[i][j])
255  >> s->sh.chroma_log2_weight_denom) + 128), -128, 127);
256  }
257  } else {
258  s->sh.chroma_weight_l1[i][0] = 1 << s->sh.chroma_log2_weight_denom;
259  s->sh.chroma_offset_l1[i][0] = 0;
260  s->sh.chroma_weight_l1[i][1] = 1 << s->sh.chroma_log2_weight_denom;
261  s->sh.chroma_offset_l1[i][1] = 0;
262  }
263  }
264  }
265  return 0;
266 }
267 
269 {
270  const HEVCSPS *sps = s->ps.sps;
271  int max_poc_lsb = 1 << sps->log2_max_poc_lsb;
272  int prev_delta_msb = 0;
273  unsigned int nb_sps = 0, nb_sh;
274  int i;
275 
276  rps->nb_refs = 0;
277  if (!sps->long_term_ref_pics_present_flag)
278  return 0;
279 
280  if (sps->num_long_term_ref_pics_sps > 0)
281  nb_sps = get_ue_golomb_long(gb);
282  nb_sh = get_ue_golomb_long(gb);
283 
284  if (nb_sps > sps->num_long_term_ref_pics_sps)
285  return AVERROR_INVALIDDATA;
286  if (nb_sh + (uint64_t)nb_sps > FF_ARRAY_ELEMS(rps->poc))
287  return AVERROR_INVALIDDATA;
288 
289  rps->nb_refs = nb_sh + nb_sps;
290 
291  for (i = 0; i < rps->nb_refs; i++) {
292 
293  if (i < nb_sps) {
294  uint8_t lt_idx_sps = 0;
295 
296  if (sps->num_long_term_ref_pics_sps > 1)
297  lt_idx_sps = get_bits(gb, av_ceil_log2(sps->num_long_term_ref_pics_sps));
298 
299  rps->poc[i] = sps->lt_ref_pic_poc_lsb_sps[lt_idx_sps];
300  rps->used[i] = sps->used_by_curr_pic_lt_sps_flag[lt_idx_sps];
301  } else {
302  rps->poc[i] = get_bits(gb, sps->log2_max_poc_lsb);
303  rps->used[i] = get_bits1(gb);
304  }
305 
306  rps->poc_msb_present[i] = get_bits1(gb);
307  if (rps->poc_msb_present[i]) {
308  int64_t delta = get_ue_golomb_long(gb);
309  int64_t poc;
310 
311  if (i && i != nb_sps)
312  delta += prev_delta_msb;
313 
314  poc = rps->poc[i] + s->poc - delta * max_poc_lsb - s->sh.pic_order_cnt_lsb;
315  if (poc != (int32_t)poc)
316  return AVERROR_INVALIDDATA;
317  rps->poc[i] = poc;
318  prev_delta_msb = delta;
319  }
320  }
321 
322  return 0;
323 }
324 
326 {
327  AVCodecContext *avctx = s->avctx;
328  const HEVCParamSets *ps = &s->ps;
329  const HEVCVPS *vps = (const HEVCVPS*)ps->vps_list[sps->vps_id]->data;
330  const HEVCWindow *ow = &sps->output_window;
331  unsigned int num = 0, den = 0;
332 
333  avctx->pix_fmt = sps->pix_fmt;
334  avctx->coded_width = sps->width;
335  avctx->coded_height = sps->height;
336  avctx->width = sps->width - ow->left_offset - ow->right_offset;
337  avctx->height = sps->height - ow->top_offset - ow->bottom_offset;
338  avctx->has_b_frames = sps->temporal_layer[sps->max_sub_layers - 1].num_reorder_pics;
339  avctx->profile = sps->ptl.general_ptl.profile_idc;
340  avctx->level = sps->ptl.general_ptl.level_idc;
341 
342  ff_set_sar(avctx, sps->vui.common.sar);
343 
344  if (sps->vui.common.video_signal_type_present_flag)
345  avctx->color_range = sps->vui.common.video_full_range_flag ? AVCOL_RANGE_JPEG
347  else
348  avctx->color_range = AVCOL_RANGE_MPEG;
349 
350  if (sps->vui.common.colour_description_present_flag) {
351  avctx->color_primaries = sps->vui.common.colour_primaries;
352  avctx->color_trc = sps->vui.common.transfer_characteristics;
353  avctx->colorspace = sps->vui.common.matrix_coeffs;
354  } else {
358  }
359 
361  if (sps->chroma_format_idc == 1) {
362  if (sps->vui.common.chroma_loc_info_present_flag) {
363  if (sps->vui.common.chroma_sample_loc_type_top_field <= 5)
364  avctx->chroma_sample_location = sps->vui.common.chroma_sample_loc_type_top_field + 1;
365  } else
367  }
368 
369  if (vps->vps_timing_info_present_flag) {
370  num = vps->vps_num_units_in_tick;
371  den = vps->vps_time_scale;
372  } else if (sps->vui.vui_timing_info_present_flag) {
373  num = sps->vui.vui_num_units_in_tick;
374  den = sps->vui.vui_time_scale;
375  }
376 
377  if (num != 0 && den != 0)
378  av_reduce(&avctx->framerate.den, &avctx->framerate.num,
379  num, den, 1 << 30);
380 }
381 
383 {
384  AVCodecContext *avctx = s->avctx;
385 
386  if (s->sei.common.a53_caption.buf_ref)
387  s->avctx->properties |= FF_CODEC_PROPERTY_CLOSED_CAPTIONS;
388 
389  if (s->sei.common.alternative_transfer.present &&
390  av_color_transfer_name(s->sei.common.alternative_transfer.preferred_transfer_characteristics) &&
391  s->sei.common.alternative_transfer.preferred_transfer_characteristics != AVCOL_TRC_UNSPECIFIED) {
392  avctx->color_trc = s->sei.common.alternative_transfer.preferred_transfer_characteristics;
393  }
394 
395  if (s->sei.common.film_grain_characteristics.present)
397 
398  return 0;
399 }
400 
402 {
403 #define HWACCEL_MAX (CONFIG_HEVC_DXVA2_HWACCEL + \
404  CONFIG_HEVC_D3D11VA_HWACCEL * 2 + \
405  CONFIG_HEVC_NVDEC_HWACCEL + \
406  CONFIG_HEVC_VAAPI_HWACCEL + \
407  CONFIG_HEVC_VIDEOTOOLBOX_HWACCEL + \
408  CONFIG_HEVC_VDPAU_HWACCEL)
409  enum AVPixelFormat pix_fmts[HWACCEL_MAX + 2], *fmt = pix_fmts;
410 
411  switch (sps->pix_fmt) {
412  case AV_PIX_FMT_YUV420P:
413  case AV_PIX_FMT_YUVJ420P:
414 #if CONFIG_HEVC_DXVA2_HWACCEL
415  *fmt++ = AV_PIX_FMT_DXVA2_VLD;
416 #endif
417 #if CONFIG_HEVC_D3D11VA_HWACCEL
418  *fmt++ = AV_PIX_FMT_D3D11VA_VLD;
419  *fmt++ = AV_PIX_FMT_D3D11;
420 #endif
421 #if CONFIG_HEVC_VAAPI_HWACCEL
422  *fmt++ = AV_PIX_FMT_VAAPI;
423 #endif
424 #if CONFIG_HEVC_VDPAU_HWACCEL
425  *fmt++ = AV_PIX_FMT_VDPAU;
426 #endif
427 #if CONFIG_HEVC_NVDEC_HWACCEL
428  *fmt++ = AV_PIX_FMT_CUDA;
429 #endif
430 #if CONFIG_HEVC_VIDEOTOOLBOX_HWACCEL
431  *fmt++ = AV_PIX_FMT_VIDEOTOOLBOX;
432 #endif
433  break;
435 #if CONFIG_HEVC_DXVA2_HWACCEL
436  *fmt++ = AV_PIX_FMT_DXVA2_VLD;
437 #endif
438 #if CONFIG_HEVC_D3D11VA_HWACCEL
439  *fmt++ = AV_PIX_FMT_D3D11VA_VLD;
440  *fmt++ = AV_PIX_FMT_D3D11;
441 #endif
442 #if CONFIG_HEVC_VAAPI_HWACCEL
443  *fmt++ = AV_PIX_FMT_VAAPI;
444 #endif
445 #if CONFIG_HEVC_VIDEOTOOLBOX_HWACCEL
446  *fmt++ = AV_PIX_FMT_VIDEOTOOLBOX;
447 #endif
448 #if CONFIG_HEVC_VDPAU_HWACCEL
449  *fmt++ = AV_PIX_FMT_VDPAU;
450 #endif
451 #if CONFIG_HEVC_NVDEC_HWACCEL
452  *fmt++ = AV_PIX_FMT_CUDA;
453 #endif
454  break;
455  case AV_PIX_FMT_YUV444P:
456 #if CONFIG_HEVC_VAAPI_HWACCEL
457  *fmt++ = AV_PIX_FMT_VAAPI;
458 #endif
459 #if CONFIG_HEVC_VDPAU_HWACCEL
460  *fmt++ = AV_PIX_FMT_VDPAU;
461 #endif
462 #if CONFIG_HEVC_NVDEC_HWACCEL
463  *fmt++ = AV_PIX_FMT_CUDA;
464 #endif
465 #if CONFIG_HEVC_VIDEOTOOLBOX_HWACCEL
466  *fmt++ = AV_PIX_FMT_VIDEOTOOLBOX;
467 #endif
468  break;
469  case AV_PIX_FMT_YUV422P:
471 #if CONFIG_HEVC_VAAPI_HWACCEL
472  *fmt++ = AV_PIX_FMT_VAAPI;
473 #endif
474 #if CONFIG_HEVC_VIDEOTOOLBOX_HWACCEL
475  *fmt++ = AV_PIX_FMT_VIDEOTOOLBOX;
476 #endif
477  break;
479 #if CONFIG_HEVC_VIDEOTOOLBOX_HWACCEL
480  *fmt++ = AV_PIX_FMT_VIDEOTOOLBOX;
481 #endif
484 #if CONFIG_HEVC_VAAPI_HWACCEL
485  *fmt++ = AV_PIX_FMT_VAAPI;
486 #endif
487 #if CONFIG_HEVC_VDPAU_HWACCEL
488  *fmt++ = AV_PIX_FMT_VDPAU;
489 #endif
490 #if CONFIG_HEVC_NVDEC_HWACCEL
491  *fmt++ = AV_PIX_FMT_CUDA;
492 #endif
493  break;
495 #if CONFIG_HEVC_VAAPI_HWACCEL
496  *fmt++ = AV_PIX_FMT_VAAPI;
497 #endif
498  break;
499  }
500 
501  *fmt++ = sps->pix_fmt;
502  *fmt = AV_PIX_FMT_NONE;
503 
504  return ff_thread_get_format(s->avctx, pix_fmts);
505 }
506 
507 static int set_sps(HEVCContext *s, const HEVCSPS *sps,
508  enum AVPixelFormat pix_fmt)
509 {
510  int ret, i;
511 
513  s->ps.sps = NULL;
514  s->ps.vps = NULL;
515 
516  if (!sps)
517  return 0;
518 
519  ret = pic_arrays_init(s, sps);
520  if (ret < 0)
521  goto fail;
522 
524 
525  s->avctx->pix_fmt = pix_fmt;
526 
527  ff_hevc_pred_init(&s->hpc, sps->bit_depth);
528  ff_hevc_dsp_init (&s->hevcdsp, sps->bit_depth);
529  ff_videodsp_init (&s->vdsp, sps->bit_depth);
530 
531  for (i = 0; i < 3; i++) {
532  av_freep(&s->sao_pixel_buffer_h[i]);
533  av_freep(&s->sao_pixel_buffer_v[i]);
534  }
535 
536  if (sps->sao_enabled && !s->avctx->hwaccel) {
537  int c_count = (sps->chroma_format_idc != 0) ? 3 : 1;
538  int c_idx;
539 
540  for(c_idx = 0; c_idx < c_count; c_idx++) {
541  int w = sps->width >> sps->hshift[c_idx];
542  int h = sps->height >> sps->vshift[c_idx];
543  s->sao_pixel_buffer_h[c_idx] =
544  av_malloc((w * 2 * sps->ctb_height) <<
545  sps->pixel_shift);
546  s->sao_pixel_buffer_v[c_idx] =
547  av_malloc((h * 2 * sps->ctb_width) <<
548  sps->pixel_shift);
549  if (!s->sao_pixel_buffer_h[c_idx] ||
550  !s->sao_pixel_buffer_v[c_idx])
551  goto fail;
552  }
553  }
554 
555  s->ps.sps = sps;
556  s->ps.vps = (HEVCVPS*) s->ps.vps_list[s->ps.sps->vps_id]->data;
557 
558  return 0;
559 
560 fail:
562  for (i = 0; i < 3; i++) {
563  av_freep(&s->sao_pixel_buffer_h[i]);
564  av_freep(&s->sao_pixel_buffer_v[i]);
565  }
566  s->ps.sps = NULL;
567  return ret;
568 }
569 
571 {
572  GetBitContext *gb = &s->HEVClc->gb;
573  SliceHeader *sh = &s->sh;
574  int i, ret;
575 
576  // Coded parameters
578  if (s->ref && sh->first_slice_in_pic_flag) {
579  av_log(s->avctx, AV_LOG_ERROR, "Two slices reporting being the first in the same frame.\n");
580  return 1; // This slice will be skipped later, do not corrupt state
581  }
582 
583  if ((IS_IDR(s) || IS_BLA(s)) && sh->first_slice_in_pic_flag) {
584  s->seq_decode = (s->seq_decode + 1) & HEVC_SEQUENCE_COUNTER_MASK;
585  s->max_ra = INT_MAX;
586  if (IS_IDR(s))
588  }
590  if (IS_IRAP(s))
592 
593  sh->pps_id = get_ue_golomb_long(gb);
594  if (sh->pps_id >= HEVC_MAX_PPS_COUNT || !s->ps.pps_list[sh->pps_id]) {
595  av_log(s->avctx, AV_LOG_ERROR, "PPS id out of range: %d\n", sh->pps_id);
596  return AVERROR_INVALIDDATA;
597  }
598  if (!sh->first_slice_in_pic_flag &&
599  s->ps.pps != (HEVCPPS*)s->ps.pps_list[sh->pps_id]->data) {
600  av_log(s->avctx, AV_LOG_ERROR, "PPS changed between slices.\n");
601  return AVERROR_INVALIDDATA;
602  }
603  s->ps.pps = (HEVCPPS*)s->ps.pps_list[sh->pps_id]->data;
604  if (s->nal_unit_type == HEVC_NAL_CRA_NUT && s->last_eos == 1)
606 
607  if (s->ps.sps != (HEVCSPS*)s->ps.sps_list[s->ps.pps->sps_id]->data) {
608  const HEVCSPS *sps = (HEVCSPS*)s->ps.sps_list[s->ps.pps->sps_id]->data;
609  enum AVPixelFormat pix_fmt;
610 
612 
613  ret = set_sps(s, sps, sps->pix_fmt);
614  if (ret < 0)
615  return ret;
616 
617  pix_fmt = get_format(s, sps);
618  if (pix_fmt < 0)
619  return pix_fmt;
620  s->avctx->pix_fmt = pix_fmt;
621 
622  s->seq_decode = (s->seq_decode + 1) & HEVC_SEQUENCE_COUNTER_MASK;
623  s->max_ra = INT_MAX;
624  }
625 
627  if (ret < 0)
628  return ret;
629 
631  if (!sh->first_slice_in_pic_flag) {
632  int slice_address_length;
633 
634  if (s->ps.pps->dependent_slice_segments_enabled_flag)
636 
637  slice_address_length = av_ceil_log2(s->ps.sps->ctb_width *
638  s->ps.sps->ctb_height);
639  sh->slice_segment_addr = get_bitsz(gb, slice_address_length);
640  if (sh->slice_segment_addr >= s->ps.sps->ctb_width * s->ps.sps->ctb_height) {
641  av_log(s->avctx, AV_LOG_ERROR,
642  "Invalid slice segment address: %u.\n",
643  sh->slice_segment_addr);
644  return AVERROR_INVALIDDATA;
645  }
646 
647  if (!sh->dependent_slice_segment_flag) {
648  sh->slice_addr = sh->slice_segment_addr;
649  s->slice_idx++;
650  }
651  } else {
652  sh->slice_segment_addr = sh->slice_addr = 0;
653  s->slice_idx = 0;
654  s->slice_initialized = 0;
655  }
656 
657  if (!sh->dependent_slice_segment_flag) {
658  s->slice_initialized = 0;
659 
660  for (i = 0; i < s->ps.pps->num_extra_slice_header_bits; i++)
661  skip_bits(gb, 1); // slice_reserved_undetermined_flag[]
662 
663  sh->slice_type = get_ue_golomb_long(gb);
664  if (!(sh->slice_type == HEVC_SLICE_I ||
665  sh->slice_type == HEVC_SLICE_P ||
666  sh->slice_type == HEVC_SLICE_B)) {
667  av_log(s->avctx, AV_LOG_ERROR, "Unknown slice type: %d.\n",
668  sh->slice_type);
669  return AVERROR_INVALIDDATA;
670  }
671  if (IS_IRAP(s) && sh->slice_type != HEVC_SLICE_I) {
672  av_log(s->avctx, AV_LOG_ERROR, "Inter slices in an IRAP frame.\n");
673  return AVERROR_INVALIDDATA;
674  }
675 
676  // when flag is not present, picture is inferred to be output
677  sh->pic_output_flag = 1;
678  if (s->ps.pps->output_flag_present_flag)
679  sh->pic_output_flag = get_bits1(gb);
680 
681  if (s->ps.sps->separate_colour_plane_flag)
682  sh->colour_plane_id = get_bits(gb, 2);
683 
684  if (!IS_IDR(s)) {
685  int poc, pos;
686 
687  sh->pic_order_cnt_lsb = get_bits(gb, s->ps.sps->log2_max_poc_lsb);
688  poc = ff_hevc_compute_poc(s->ps.sps, s->pocTid0, sh->pic_order_cnt_lsb, s->nal_unit_type);
689  if (!sh->first_slice_in_pic_flag && poc != s->poc) {
690  av_log(s->avctx, AV_LOG_WARNING,
691  "Ignoring POC change between slices: %d -> %d\n", s->poc, poc);
692  if (s->avctx->err_recognition & AV_EF_EXPLODE)
693  return AVERROR_INVALIDDATA;
694  poc = s->poc;
695  }
696  s->poc = poc;
697 
699  pos = get_bits_left(gb);
701  ret = ff_hevc_decode_short_term_rps(gb, s->avctx, &sh->slice_rps, s->ps.sps, 1);
702  if (ret < 0)
703  return ret;
704 
705  sh->short_term_rps = &sh->slice_rps;
706  } else {
707  int numbits, rps_idx;
708 
709  if (!s->ps.sps->nb_st_rps) {
710  av_log(s->avctx, AV_LOG_ERROR, "No ref lists in the SPS.\n");
711  return AVERROR_INVALIDDATA;
712  }
713 
714  numbits = av_ceil_log2(s->ps.sps->nb_st_rps);
715  rps_idx = numbits > 0 ? get_bits(gb, numbits) : 0;
716  sh->short_term_rps = &s->ps.sps->st_rps[rps_idx];
717  }
719 
720  pos = get_bits_left(gb);
721  ret = decode_lt_rps(s, &sh->long_term_rps, gb);
722  if (ret < 0) {
723  av_log(s->avctx, AV_LOG_WARNING, "Invalid long term RPS.\n");
724  if (s->avctx->err_recognition & AV_EF_EXPLODE)
725  return AVERROR_INVALIDDATA;
726  }
728 
729  if (s->ps.sps->sps_temporal_mvp_enabled_flag)
731  else
733  } else {
734  s->sh.short_term_rps = NULL;
735  s->poc = 0;
736  }
737 
738  /* 8.3.1 */
739  if (sh->first_slice_in_pic_flag && s->temporal_id == 0 &&
740  s->nal_unit_type != HEVC_NAL_TRAIL_N &&
741  s->nal_unit_type != HEVC_NAL_TSA_N &&
742  s->nal_unit_type != HEVC_NAL_STSA_N &&
743  s->nal_unit_type != HEVC_NAL_RADL_N &&
744  s->nal_unit_type != HEVC_NAL_RADL_R &&
745  s->nal_unit_type != HEVC_NAL_RASL_N &&
746  s->nal_unit_type != HEVC_NAL_RASL_R)
747  s->pocTid0 = s->poc;
748 
749  if (s->ps.sps->sao_enabled) {
751  if (s->ps.sps->chroma_format_idc) {
754  }
755  } else {
759  }
760 
761  sh->nb_refs[L0] = sh->nb_refs[L1] = 0;
762  if (sh->slice_type == HEVC_SLICE_P || sh->slice_type == HEVC_SLICE_B) {
763  int nb_refs;
764 
765  sh->nb_refs[L0] = s->ps.pps->num_ref_idx_l0_default_active;
766  if (sh->slice_type == HEVC_SLICE_B)
767  sh->nb_refs[L1] = s->ps.pps->num_ref_idx_l1_default_active;
768 
769  if (get_bits1(gb)) { // num_ref_idx_active_override_flag
770  sh->nb_refs[L0] = get_ue_golomb_long(gb) + 1;
771  if (sh->slice_type == HEVC_SLICE_B)
772  sh->nb_refs[L1] = get_ue_golomb_long(gb) + 1;
773  }
774  if (sh->nb_refs[L0] > HEVC_MAX_REFS || sh->nb_refs[L1] > HEVC_MAX_REFS) {
775  av_log(s->avctx, AV_LOG_ERROR, "Too many refs: %d/%d.\n",
776  sh->nb_refs[L0], sh->nb_refs[L1]);
777  return AVERROR_INVALIDDATA;
778  }
779 
780  sh->rpl_modification_flag[0] = 0;
781  sh->rpl_modification_flag[1] = 0;
782  nb_refs = ff_hevc_frame_nb_refs(s);
783  if (!nb_refs) {
784  av_log(s->avctx, AV_LOG_ERROR, "Zero refs for a frame with P or B slices.\n");
785  return AVERROR_INVALIDDATA;
786  }
787 
788  if (s->ps.pps->lists_modification_present_flag && nb_refs > 1) {
789  sh->rpl_modification_flag[0] = get_bits1(gb);
790  if (sh->rpl_modification_flag[0]) {
791  for (i = 0; i < sh->nb_refs[L0]; i++)
792  sh->list_entry_lx[0][i] = get_bits(gb, av_ceil_log2(nb_refs));
793  }
794 
795  if (sh->slice_type == HEVC_SLICE_B) {
796  sh->rpl_modification_flag[1] = get_bits1(gb);
797  if (sh->rpl_modification_flag[1] == 1)
798  for (i = 0; i < sh->nb_refs[L1]; i++)
799  sh->list_entry_lx[1][i] = get_bits(gb, av_ceil_log2(nb_refs));
800  }
801  }
802 
803  if (sh->slice_type == HEVC_SLICE_B)
804  sh->mvd_l1_zero_flag = get_bits1(gb);
805 
806  if (s->ps.pps->cabac_init_present_flag)
807  sh->cabac_init_flag = get_bits1(gb);
808  else
809  sh->cabac_init_flag = 0;
810 
811  sh->collocated_ref_idx = 0;
813  sh->collocated_list = L0;
814  if (sh->slice_type == HEVC_SLICE_B)
815  sh->collocated_list = !get_bits1(gb);
816 
817  if (sh->nb_refs[sh->collocated_list] > 1) {
819  if (sh->collocated_ref_idx >= sh->nb_refs[sh->collocated_list]) {
820  av_log(s->avctx, AV_LOG_ERROR,
821  "Invalid collocated_ref_idx: %d.\n",
822  sh->collocated_ref_idx);
823  return AVERROR_INVALIDDATA;
824  }
825  }
826  }
827 
828  if ((s->ps.pps->weighted_pred_flag && sh->slice_type == HEVC_SLICE_P) ||
829  (s->ps.pps->weighted_bipred_flag && sh->slice_type == HEVC_SLICE_B)) {
830  int ret = pred_weight_table(s, gb);
831  if (ret < 0)
832  return ret;
833  }
834 
836  if (sh->max_num_merge_cand < 1 || sh->max_num_merge_cand > 5) {
837  av_log(s->avctx, AV_LOG_ERROR,
838  "Invalid number of merging MVP candidates: %d.\n",
839  sh->max_num_merge_cand);
840  return AVERROR_INVALIDDATA;
841  }
842  }
843 
844  sh->slice_qp_delta = get_se_golomb(gb);
845 
846  if (s->ps.pps->pic_slice_level_chroma_qp_offsets_present_flag) {
849  if (sh->slice_cb_qp_offset < -12 || sh->slice_cb_qp_offset > 12 ||
850  sh->slice_cr_qp_offset < -12 || sh->slice_cr_qp_offset > 12) {
851  av_log(s->avctx, AV_LOG_ERROR, "Invalid slice cx qp offset.\n");
852  return AVERROR_INVALIDDATA;
853  }
854  } else {
855  sh->slice_cb_qp_offset = 0;
856  sh->slice_cr_qp_offset = 0;
857  }
858 
859  if (s->ps.pps->chroma_qp_offset_list_enabled_flag)
861  else
863 
864  if (s->ps.pps->deblocking_filter_control_present_flag) {
865  int deblocking_filter_override_flag = 0;
866 
867  if (s->ps.pps->deblocking_filter_override_enabled_flag)
868  deblocking_filter_override_flag = get_bits1(gb);
869 
870  if (deblocking_filter_override_flag) {
873  int beta_offset_div2 = get_se_golomb(gb);
874  int tc_offset_div2 = get_se_golomb(gb) ;
875  if (beta_offset_div2 < -6 || beta_offset_div2 > 6 ||
876  tc_offset_div2 < -6 || tc_offset_div2 > 6) {
877  av_log(s->avctx, AV_LOG_ERROR,
878  "Invalid deblock filter offsets: %d, %d\n",
879  beta_offset_div2, tc_offset_div2);
880  return AVERROR_INVALIDDATA;
881  }
882  sh->beta_offset = beta_offset_div2 * 2;
883  sh->tc_offset = tc_offset_div2 * 2;
884  }
885  } else {
886  sh->disable_deblocking_filter_flag = s->ps.pps->disable_dbf;
887  sh->beta_offset = s->ps.pps->beta_offset;
888  sh->tc_offset = s->ps.pps->tc_offset;
889  }
890  } else {
892  sh->beta_offset = 0;
893  sh->tc_offset = 0;
894  }
895 
896  if (s->ps.pps->seq_loop_filter_across_slices_enabled_flag &&
901  } else {
902  sh->slice_loop_filter_across_slices_enabled_flag = s->ps.pps->seq_loop_filter_across_slices_enabled_flag;
903  }
904  } else if (!s->slice_initialized) {
905  av_log(s->avctx, AV_LOG_ERROR, "Independent slice segment missing.\n");
906  return AVERROR_INVALIDDATA;
907  }
908 
909  sh->num_entry_point_offsets = 0;
910  if (s->ps.pps->tiles_enabled_flag || s->ps.pps->entropy_coding_sync_enabled_flag) {
911  unsigned num_entry_point_offsets = get_ue_golomb_long(gb);
912  // It would be possible to bound this tighter but this here is simpler
913  if (num_entry_point_offsets > get_bits_left(gb)) {
914  av_log(s->avctx, AV_LOG_ERROR, "num_entry_point_offsets %d is invalid\n", num_entry_point_offsets);
915  return AVERROR_INVALIDDATA;
916  }
917 
918  sh->num_entry_point_offsets = num_entry_point_offsets;
919  if (sh->num_entry_point_offsets > 0) {
920  int offset_len = get_ue_golomb_long(gb) + 1;
921 
922  if (offset_len < 1 || offset_len > 32) {
923  sh->num_entry_point_offsets = 0;
924  av_log(s->avctx, AV_LOG_ERROR, "offset_len %d is invalid\n", offset_len);
925  return AVERROR_INVALIDDATA;
926  }
927 
929  av_freep(&sh->offset);
930  av_freep(&sh->size);
931  sh->entry_point_offset = av_malloc_array(sh->num_entry_point_offsets, sizeof(unsigned));
932  sh->offset = av_malloc_array(sh->num_entry_point_offsets, sizeof(int));
933  sh->size = av_malloc_array(sh->num_entry_point_offsets, sizeof(int));
934  if (!sh->entry_point_offset || !sh->offset || !sh->size) {
935  sh->num_entry_point_offsets = 0;
936  av_log(s->avctx, AV_LOG_ERROR, "Failed to allocate memory\n");
937  return AVERROR(ENOMEM);
938  }
939  for (i = 0; i < sh->num_entry_point_offsets; i++) {
940  unsigned val = get_bits_long(gb, offset_len);
941  sh->entry_point_offset[i] = val + 1; // +1; // +1 to get the size
942  }
943  if (s->threads_number > 1 && (s->ps.pps->num_tile_rows > 1 || s->ps.pps->num_tile_columns > 1)) {
944  s->enable_parallel_tiles = 0; // TODO: you can enable tiles in parallel here
945  s->threads_number = 1;
946  } else
947  s->enable_parallel_tiles = 0;
948  } else
949  s->enable_parallel_tiles = 0;
950  }
951 
952  if (s->ps.pps->slice_header_extension_present_flag) {
953  unsigned int length = get_ue_golomb_long(gb);
954  if (length*8LL > get_bits_left(gb)) {
955  av_log(s->avctx, AV_LOG_ERROR, "too many slice_header_extension_data_bytes\n");
956  return AVERROR_INVALIDDATA;
957  }
958  for (i = 0; i < length; i++)
959  skip_bits(gb, 8); // slice_header_extension_data_byte
960  }
961 
962  // Inferred parameters
963  sh->slice_qp = 26U + s->ps.pps->pic_init_qp_minus26 + sh->slice_qp_delta;
964  if (sh->slice_qp > 51 ||
965  sh->slice_qp < -s->ps.sps->qp_bd_offset) {
966  av_log(s->avctx, AV_LOG_ERROR,
967  "The slice_qp %d is outside the valid range "
968  "[%d, 51].\n",
969  sh->slice_qp,
970  -s->ps.sps->qp_bd_offset);
971  return AVERROR_INVALIDDATA;
972  }
973 
975 
976  if (!s->sh.slice_ctb_addr_rs && s->sh.dependent_slice_segment_flag) {
977  av_log(s->avctx, AV_LOG_ERROR, "Impossible slice segment.\n");
978  return AVERROR_INVALIDDATA;
979  }
980 
981  if (get_bits_left(gb) < 0) {
982  av_log(s->avctx, AV_LOG_ERROR,
983  "Overread slice header by %d bits\n", -get_bits_left(gb));
984  return AVERROR_INVALIDDATA;
985  }
986 
987  s->HEVClc->first_qp_group = !s->sh.dependent_slice_segment_flag;
988 
989  if (!s->ps.pps->cu_qp_delta_enabled_flag)
990  s->HEVClc->qp_y = s->sh.slice_qp;
991 
992  s->slice_initialized = 1;
993  s->HEVClc->tu.cu_qp_offset_cb = 0;
994  s->HEVClc->tu.cu_qp_offset_cr = 0;
995 
996  return 0;
997 }
998 
999 #define CTB(tab, x, y) ((tab)[(y) * s->ps.sps->ctb_width + (x)])
1000 
1001 #define SET_SAO(elem, value) \
1002 do { \
1003  if (!sao_merge_up_flag && !sao_merge_left_flag) \
1004  sao->elem = value; \
1005  else if (sao_merge_left_flag) \
1006  sao->elem = CTB(s->sao, rx-1, ry).elem; \
1007  else if (sao_merge_up_flag) \
1008  sao->elem = CTB(s->sao, rx, ry-1).elem; \
1009  else \
1010  sao->elem = 0; \
1011 } while (0)
1012 
1013 static void hls_sao_param(HEVCLocalContext *lc, int rx, int ry)
1014 {
1015  const HEVCContext *const s = lc->parent;
1016  int sao_merge_left_flag = 0;
1017  int sao_merge_up_flag = 0;
1018  SAOParams *sao = &CTB(s->sao, rx, ry);
1019  int c_idx, i;
1020 
1021  if (s->sh.slice_sample_adaptive_offset_flag[0] ||
1022  s->sh.slice_sample_adaptive_offset_flag[1]) {
1023  if (rx > 0) {
1024  if (lc->ctb_left_flag)
1025  sao_merge_left_flag = ff_hevc_sao_merge_flag_decode(lc);
1026  }
1027  if (ry > 0 && !sao_merge_left_flag) {
1028  if (lc->ctb_up_flag)
1029  sao_merge_up_flag = ff_hevc_sao_merge_flag_decode(lc);
1030  }
1031  }
1032 
1033  for (c_idx = 0; c_idx < (s->ps.sps->chroma_format_idc ? 3 : 1); c_idx++) {
1034  int log2_sao_offset_scale = c_idx == 0 ? s->ps.pps->log2_sao_offset_scale_luma :
1035  s->ps.pps->log2_sao_offset_scale_chroma;
1036 
1037  if (!s->sh.slice_sample_adaptive_offset_flag[c_idx]) {
1038  sao->type_idx[c_idx] = SAO_NOT_APPLIED;
1039  continue;
1040  }
1041 
1042  if (c_idx == 2) {
1043  sao->type_idx[2] = sao->type_idx[1];
1044  sao->eo_class[2] = sao->eo_class[1];
1045  } else {
1046  SET_SAO(type_idx[c_idx], ff_hevc_sao_type_idx_decode(lc));
1047  }
1048 
1049  if (sao->type_idx[c_idx] == SAO_NOT_APPLIED)
1050  continue;
1051 
1052  for (i = 0; i < 4; i++)
1053  SET_SAO(offset_abs[c_idx][i], ff_hevc_sao_offset_abs_decode(lc));
1054 
1055  if (sao->type_idx[c_idx] == SAO_BAND) {
1056  for (i = 0; i < 4; i++) {
1057  if (sao->offset_abs[c_idx][i]) {
1058  SET_SAO(offset_sign[c_idx][i],
1060  } else {
1061  sao->offset_sign[c_idx][i] = 0;
1062  }
1063  }
1064  SET_SAO(band_position[c_idx], ff_hevc_sao_band_position_decode(lc));
1065  } else if (c_idx != 2) {
1066  SET_SAO(eo_class[c_idx], ff_hevc_sao_eo_class_decode(lc));
1067  }
1068 
1069  // Inferred parameters
1070  sao->offset_val[c_idx][0] = 0;
1071  for (i = 0; i < 4; i++) {
1072  sao->offset_val[c_idx][i + 1] = sao->offset_abs[c_idx][i];
1073  if (sao->type_idx[c_idx] == SAO_EDGE) {
1074  if (i > 1)
1075  sao->offset_val[c_idx][i + 1] = -sao->offset_val[c_idx][i + 1];
1076  } else if (sao->offset_sign[c_idx][i]) {
1077  sao->offset_val[c_idx][i + 1] = -sao->offset_val[c_idx][i + 1];
1078  }
1079  sao->offset_val[c_idx][i + 1] *= 1 << log2_sao_offset_scale;
1080  }
1081  }
1082 }
1083 
1084 #undef SET_SAO
1085 #undef CTB
1086 
1088 {
1089  int log2_res_scale_abs_plus1 = ff_hevc_log2_res_scale_abs(lc, idx);
1090 
1091  if (log2_res_scale_abs_plus1 != 0) {
1092  int res_scale_sign_flag = ff_hevc_res_scale_sign_flag(lc, idx);
1093  lc->tu.res_scale_val = (1 << (log2_res_scale_abs_plus1 - 1)) *
1094  (1 - 2 * res_scale_sign_flag);
1095  } else {
1096  lc->tu.res_scale_val = 0;
1097  }
1098 
1099 
1100  return 0;
1101 }
1102 
1103 static int hls_transform_unit(HEVCLocalContext *lc, int x0, int y0,
1104  int xBase, int yBase, int cb_xBase, int cb_yBase,
1105  int log2_cb_size, int log2_trafo_size,
1106  int blk_idx, int cbf_luma, int *cbf_cb, int *cbf_cr)
1107 {
1108  const HEVCContext *const s = lc->parent;
1109  const int log2_trafo_size_c = log2_trafo_size - s->ps.sps->hshift[1];
1110  int i;
1111 
1112  if (lc->cu.pred_mode == MODE_INTRA) {
1113  int trafo_size = 1 << log2_trafo_size;
1114  ff_hevc_set_neighbour_available(lc, x0, y0, trafo_size, trafo_size);
1115 
1116  s->hpc.intra_pred[log2_trafo_size - 2](lc, x0, y0, 0);
1117  }
1118 
1119  if (cbf_luma || cbf_cb[0] || cbf_cr[0] ||
1120  (s->ps.sps->chroma_format_idc == 2 && (cbf_cb[1] || cbf_cr[1]))) {
1121  int scan_idx = SCAN_DIAG;
1122  int scan_idx_c = SCAN_DIAG;
1123  int cbf_chroma = cbf_cb[0] || cbf_cr[0] ||
1124  (s->ps.sps->chroma_format_idc == 2 &&
1125  (cbf_cb[1] || cbf_cr[1]));
1126 
1127  if (s->ps.pps->cu_qp_delta_enabled_flag && !lc->tu.is_cu_qp_delta_coded) {
1129  if (lc->tu.cu_qp_delta != 0)
1130  if (ff_hevc_cu_qp_delta_sign_flag(lc) == 1)
1131  lc->tu.cu_qp_delta = -lc->tu.cu_qp_delta;
1132  lc->tu.is_cu_qp_delta_coded = 1;
1133 
1134  if (lc->tu.cu_qp_delta < -(26 + s->ps.sps->qp_bd_offset / 2) ||
1135  lc->tu.cu_qp_delta > (25 + s->ps.sps->qp_bd_offset / 2)) {
1136  av_log(s->avctx, AV_LOG_ERROR,
1137  "The cu_qp_delta %d is outside the valid range "
1138  "[%d, %d].\n",
1139  lc->tu.cu_qp_delta,
1140  -(26 + s->ps.sps->qp_bd_offset / 2),
1141  (25 + s->ps.sps->qp_bd_offset / 2));
1142  return AVERROR_INVALIDDATA;
1143  }
1144 
1145  ff_hevc_set_qPy(lc, cb_xBase, cb_yBase, log2_cb_size);
1146  }
1147 
1148  if (s->sh.cu_chroma_qp_offset_enabled_flag && cbf_chroma &&
1150  int cu_chroma_qp_offset_flag = ff_hevc_cu_chroma_qp_offset_flag(lc);
1151  if (cu_chroma_qp_offset_flag) {
1152  int cu_chroma_qp_offset_idx = 0;
1153  if (s->ps.pps->chroma_qp_offset_list_len_minus1 > 0) {
1154  cu_chroma_qp_offset_idx = ff_hevc_cu_chroma_qp_offset_idx(lc);
1155  av_log(s->avctx, AV_LOG_ERROR,
1156  "cu_chroma_qp_offset_idx not yet tested.\n");
1157  }
1158  lc->tu.cu_qp_offset_cb = s->ps.pps->cb_qp_offset_list[cu_chroma_qp_offset_idx];
1159  lc->tu.cu_qp_offset_cr = s->ps.pps->cr_qp_offset_list[cu_chroma_qp_offset_idx];
1160  } else {
1161  lc->tu.cu_qp_offset_cb = 0;
1162  lc->tu.cu_qp_offset_cr = 0;
1163  }
1165  }
1166 
1167  if (lc->cu.pred_mode == MODE_INTRA && log2_trafo_size < 4) {
1168  if (lc->tu.intra_pred_mode >= 6 &&
1169  lc->tu.intra_pred_mode <= 14) {
1170  scan_idx = SCAN_VERT;
1171  } else if (lc->tu.intra_pred_mode >= 22 &&
1172  lc->tu.intra_pred_mode <= 30) {
1173  scan_idx = SCAN_HORIZ;
1174  }
1175 
1176  if (lc->tu.intra_pred_mode_c >= 6 &&
1177  lc->tu.intra_pred_mode_c <= 14) {
1178  scan_idx_c = SCAN_VERT;
1179  } else if (lc->tu.intra_pred_mode_c >= 22 &&
1180  lc->tu.intra_pred_mode_c <= 30) {
1181  scan_idx_c = SCAN_HORIZ;
1182  }
1183  }
1184 
1185  lc->tu.cross_pf = 0;
1186 
1187  if (cbf_luma)
1188  ff_hevc_hls_residual_coding(lc, x0, y0, log2_trafo_size, scan_idx, 0);
1189  if (s->ps.sps->chroma_format_idc && (log2_trafo_size > 2 || s->ps.sps->chroma_format_idc == 3)) {
1190  int trafo_size_h = 1 << (log2_trafo_size_c + s->ps.sps->hshift[1]);
1191  int trafo_size_v = 1 << (log2_trafo_size_c + s->ps.sps->vshift[1]);
1192  lc->tu.cross_pf = (s->ps.pps->cross_component_prediction_enabled_flag && cbf_luma &&
1193  (lc->cu.pred_mode == MODE_INTER ||
1194  (lc->tu.chroma_mode_c == 4)));
1195 
1196  if (lc->tu.cross_pf) {
1197  hls_cross_component_pred(lc, 0);
1198  }
1199  for (i = 0; i < (s->ps.sps->chroma_format_idc == 2 ? 2 : 1); i++) {
1200  if (lc->cu.pred_mode == MODE_INTRA) {
1201  ff_hevc_set_neighbour_available(lc, x0, y0 + (i << log2_trafo_size_c), trafo_size_h, trafo_size_v);
1202  s->hpc.intra_pred[log2_trafo_size_c - 2](lc, x0, y0 + (i << log2_trafo_size_c), 1);
1203  }
1204  if (cbf_cb[i])
1205  ff_hevc_hls_residual_coding(lc, x0, y0 + (i << log2_trafo_size_c),
1206  log2_trafo_size_c, scan_idx_c, 1);
1207  else
1208  if (lc->tu.cross_pf) {
1209  ptrdiff_t stride = s->frame->linesize[1];
1210  int hshift = s->ps.sps->hshift[1];
1211  int vshift = s->ps.sps->vshift[1];
1212  const int16_t *coeffs_y = (int16_t*)lc->edge_emu_buffer;
1213  int16_t *coeffs = (int16_t*)lc->edge_emu_buffer2;
1214  int size = 1 << log2_trafo_size_c;
1215 
1216  uint8_t *dst = &s->frame->data[1][(y0 >> vshift) * stride +
1217  ((x0 >> hshift) << s->ps.sps->pixel_shift)];
1218  for (i = 0; i < (size * size); i++) {
1219  coeffs[i] = ((lc->tu.res_scale_val * coeffs_y[i]) >> 3);
1220  }
1221  s->hevcdsp.add_residual[log2_trafo_size_c-2](dst, coeffs, stride);
1222  }
1223  }
1224 
1225  if (lc->tu.cross_pf) {
1226  hls_cross_component_pred(lc, 1);
1227  }
1228  for (i = 0; i < (s->ps.sps->chroma_format_idc == 2 ? 2 : 1); i++) {
1229  if (lc->cu.pred_mode == MODE_INTRA) {
1230  ff_hevc_set_neighbour_available(lc, x0, y0 + (i << log2_trafo_size_c),
1231  trafo_size_h, trafo_size_v);
1232  s->hpc.intra_pred[log2_trafo_size_c - 2](lc, x0, y0 + (i << log2_trafo_size_c), 2);
1233  }
1234  if (cbf_cr[i])
1235  ff_hevc_hls_residual_coding(lc, x0, y0 + (i << log2_trafo_size_c),
1236  log2_trafo_size_c, scan_idx_c, 2);
1237  else
1238  if (lc->tu.cross_pf) {
1239  ptrdiff_t stride = s->frame->linesize[2];
1240  int hshift = s->ps.sps->hshift[2];
1241  int vshift = s->ps.sps->vshift[2];
1242  const int16_t *coeffs_y = (int16_t*)lc->edge_emu_buffer;
1243  int16_t *coeffs = (int16_t*)lc->edge_emu_buffer2;
1244  int size = 1 << log2_trafo_size_c;
1245 
1246  uint8_t *dst = &s->frame->data[2][(y0 >> vshift) * stride +
1247  ((x0 >> hshift) << s->ps.sps->pixel_shift)];
1248  for (i = 0; i < (size * size); i++) {
1249  coeffs[i] = ((lc->tu.res_scale_val * coeffs_y[i]) >> 3);
1250  }
1251  s->hevcdsp.add_residual[log2_trafo_size_c-2](dst, coeffs, stride);
1252  }
1253  }
1254  } else if (s->ps.sps->chroma_format_idc && blk_idx == 3) {
1255  int trafo_size_h = 1 << (log2_trafo_size + 1);
1256  int trafo_size_v = 1 << (log2_trafo_size + s->ps.sps->vshift[1]);
1257  for (i = 0; i < (s->ps.sps->chroma_format_idc == 2 ? 2 : 1); i++) {
1258  if (lc->cu.pred_mode == MODE_INTRA) {
1259  ff_hevc_set_neighbour_available(lc, xBase, yBase + (i << log2_trafo_size),
1260  trafo_size_h, trafo_size_v);
1261  s->hpc.intra_pred[log2_trafo_size - 2](lc, xBase, yBase + (i << log2_trafo_size), 1);
1262  }
1263  if (cbf_cb[i])
1264  ff_hevc_hls_residual_coding(lc, xBase, yBase + (i << log2_trafo_size),
1265  log2_trafo_size, scan_idx_c, 1);
1266  }
1267  for (i = 0; i < (s->ps.sps->chroma_format_idc == 2 ? 2 : 1); i++) {
1268  if (lc->cu.pred_mode == MODE_INTRA) {
1269  ff_hevc_set_neighbour_available(lc, xBase, yBase + (i << log2_trafo_size),
1270  trafo_size_h, trafo_size_v);
1271  s->hpc.intra_pred[log2_trafo_size - 2](lc, xBase, yBase + (i << log2_trafo_size), 2);
1272  }
1273  if (cbf_cr[i])
1274  ff_hevc_hls_residual_coding(lc, xBase, yBase + (i << log2_trafo_size),
1275  log2_trafo_size, scan_idx_c, 2);
1276  }
1277  }
1278  } else if (s->ps.sps->chroma_format_idc && lc->cu.pred_mode == MODE_INTRA) {
1279  if (log2_trafo_size > 2 || s->ps.sps->chroma_format_idc == 3) {
1280  int trafo_size_h = 1 << (log2_trafo_size_c + s->ps.sps->hshift[1]);
1281  int trafo_size_v = 1 << (log2_trafo_size_c + s->ps.sps->vshift[1]);
1282  ff_hevc_set_neighbour_available(lc, x0, y0, trafo_size_h, trafo_size_v);
1283  s->hpc.intra_pred[log2_trafo_size_c - 2](lc, x0, y0, 1);
1284  s->hpc.intra_pred[log2_trafo_size_c - 2](lc, x0, y0, 2);
1285  if (s->ps.sps->chroma_format_idc == 2) {
1286  ff_hevc_set_neighbour_available(lc, x0, y0 + (1 << log2_trafo_size_c),
1287  trafo_size_h, trafo_size_v);
1288  s->hpc.intra_pred[log2_trafo_size_c - 2](lc, x0, y0 + (1 << log2_trafo_size_c), 1);
1289  s->hpc.intra_pred[log2_trafo_size_c - 2](lc, x0, y0 + (1 << log2_trafo_size_c), 2);
1290  }
1291  } else if (blk_idx == 3) {
1292  int trafo_size_h = 1 << (log2_trafo_size + 1);
1293  int trafo_size_v = 1 << (log2_trafo_size + s->ps.sps->vshift[1]);
1294  ff_hevc_set_neighbour_available(lc, xBase, yBase,
1295  trafo_size_h, trafo_size_v);
1296  s->hpc.intra_pred[log2_trafo_size - 2](lc, xBase, yBase, 1);
1297  s->hpc.intra_pred[log2_trafo_size - 2](lc, xBase, yBase, 2);
1298  if (s->ps.sps->chroma_format_idc == 2) {
1299  ff_hevc_set_neighbour_available(lc, xBase, yBase + (1 << log2_trafo_size),
1300  trafo_size_h, trafo_size_v);
1301  s->hpc.intra_pred[log2_trafo_size - 2](lc, xBase, yBase + (1 << log2_trafo_size), 1);
1302  s->hpc.intra_pred[log2_trafo_size - 2](lc, xBase, yBase + (1 << log2_trafo_size), 2);
1303  }
1304  }
1305  }
1306 
1307  return 0;
1308 }
1309 
1310 static void set_deblocking_bypass(const HEVCContext *s, int x0, int y0, int log2_cb_size)
1311 {
1312  int cb_size = 1 << log2_cb_size;
1313  int log2_min_pu_size = s->ps.sps->log2_min_pu_size;
1314 
1315  int min_pu_width = s->ps.sps->min_pu_width;
1316  int x_end = FFMIN(x0 + cb_size, s->ps.sps->width);
1317  int y_end = FFMIN(y0 + cb_size, s->ps.sps->height);
1318  int i, j;
1319 
1320  for (j = (y0 >> log2_min_pu_size); j < (y_end >> log2_min_pu_size); j++)
1321  for (i = (x0 >> log2_min_pu_size); i < (x_end >> log2_min_pu_size); i++)
1322  s->is_pcm[i + j * min_pu_width] = 2;
1323 }
1324 
1325 static int hls_transform_tree(HEVCLocalContext *lc, int x0, int y0,
1326  int xBase, int yBase, int cb_xBase, int cb_yBase,
1327  int log2_cb_size, int log2_trafo_size,
1328  int trafo_depth, int blk_idx,
1329  const int *base_cbf_cb, const int *base_cbf_cr)
1330 {
1331  const HEVCContext *const s = lc->parent;
1332  uint8_t split_transform_flag;
1333  int cbf_cb[2];
1334  int cbf_cr[2];
1335  int ret;
1336 
1337  cbf_cb[0] = base_cbf_cb[0];
1338  cbf_cb[1] = base_cbf_cb[1];
1339  cbf_cr[0] = base_cbf_cr[0];
1340  cbf_cr[1] = base_cbf_cr[1];
1341 
1342  if (lc->cu.intra_split_flag) {
1343  if (trafo_depth == 1) {
1344  lc->tu.intra_pred_mode = lc->pu.intra_pred_mode[blk_idx];
1345  if (s->ps.sps->chroma_format_idc == 3) {
1346  lc->tu.intra_pred_mode_c = lc->pu.intra_pred_mode_c[blk_idx];
1347  lc->tu.chroma_mode_c = lc->pu.chroma_mode_c[blk_idx];
1348  } else {
1350  lc->tu.chroma_mode_c = lc->pu.chroma_mode_c[0];
1351  }
1352  }
1353  } else {
1354  lc->tu.intra_pred_mode = lc->pu.intra_pred_mode[0];
1356  lc->tu.chroma_mode_c = lc->pu.chroma_mode_c[0];
1357  }
1358 
1359  if (log2_trafo_size <= s->ps.sps->log2_max_trafo_size &&
1360  log2_trafo_size > s->ps.sps->log2_min_tb_size &&
1361  trafo_depth < lc->cu.max_trafo_depth &&
1362  !(lc->cu.intra_split_flag && trafo_depth == 0)) {
1363  split_transform_flag = ff_hevc_split_transform_flag_decode(lc, log2_trafo_size);
1364  } else {
1365  int inter_split = s->ps.sps->max_transform_hierarchy_depth_inter == 0 &&
1366  lc->cu.pred_mode == MODE_INTER &&
1367  lc->cu.part_mode != PART_2Nx2N &&
1368  trafo_depth == 0;
1369 
1370  split_transform_flag = log2_trafo_size > s->ps.sps->log2_max_trafo_size ||
1371  (lc->cu.intra_split_flag && trafo_depth == 0) ||
1372  inter_split;
1373  }
1374 
1375  if (s->ps.sps->chroma_format_idc && (log2_trafo_size > 2 || s->ps.sps->chroma_format_idc == 3)) {
1376  if (trafo_depth == 0 || cbf_cb[0]) {
1377  cbf_cb[0] = ff_hevc_cbf_cb_cr_decode(lc, trafo_depth);
1378  if (s->ps.sps->chroma_format_idc == 2 && (!split_transform_flag || log2_trafo_size == 3)) {
1379  cbf_cb[1] = ff_hevc_cbf_cb_cr_decode(lc, trafo_depth);
1380  }
1381  }
1382 
1383  if (trafo_depth == 0 || cbf_cr[0]) {
1384  cbf_cr[0] = ff_hevc_cbf_cb_cr_decode(lc, trafo_depth);
1385  if (s->ps.sps->chroma_format_idc == 2 && (!split_transform_flag || log2_trafo_size == 3)) {
1386  cbf_cr[1] = ff_hevc_cbf_cb_cr_decode(lc, trafo_depth);
1387  }
1388  }
1389  }
1390 
1391  if (split_transform_flag) {
1392  const int trafo_size_split = 1 << (log2_trafo_size - 1);
1393  const int x1 = x0 + trafo_size_split;
1394  const int y1 = y0 + trafo_size_split;
1395 
1396 #define SUBDIVIDE(x, y, idx) \
1397 do { \
1398  ret = hls_transform_tree(lc, x, y, x0, y0, cb_xBase, cb_yBase, log2_cb_size,\
1399  log2_trafo_size - 1, trafo_depth + 1, idx, \
1400  cbf_cb, cbf_cr); \
1401  if (ret < 0) \
1402  return ret; \
1403 } while (0)
1404 
1405  SUBDIVIDE(x0, y0, 0);
1406  SUBDIVIDE(x1, y0, 1);
1407  SUBDIVIDE(x0, y1, 2);
1408  SUBDIVIDE(x1, y1, 3);
1409 
1410 #undef SUBDIVIDE
1411  } else {
1412  int min_tu_size = 1 << s->ps.sps->log2_min_tb_size;
1413  int log2_min_tu_size = s->ps.sps->log2_min_tb_size;
1414  int min_tu_width = s->ps.sps->min_tb_width;
1415  int cbf_luma = 1;
1416 
1417  if (lc->cu.pred_mode == MODE_INTRA || trafo_depth != 0 ||
1418  cbf_cb[0] || cbf_cr[0] ||
1419  (s->ps.sps->chroma_format_idc == 2 && (cbf_cb[1] || cbf_cr[1]))) {
1420  cbf_luma = ff_hevc_cbf_luma_decode(lc, trafo_depth);
1421  }
1422 
1423  ret = hls_transform_unit(lc, x0, y0, xBase, yBase, cb_xBase, cb_yBase,
1424  log2_cb_size, log2_trafo_size,
1425  blk_idx, cbf_luma, cbf_cb, cbf_cr);
1426  if (ret < 0)
1427  return ret;
1428  // TODO: store cbf_luma somewhere else
1429  if (cbf_luma) {
1430  int i, j;
1431  for (i = 0; i < (1 << log2_trafo_size); i += min_tu_size)
1432  for (j = 0; j < (1 << log2_trafo_size); j += min_tu_size) {
1433  int x_tu = (x0 + j) >> log2_min_tu_size;
1434  int y_tu = (y0 + i) >> log2_min_tu_size;
1435  s->cbf_luma[y_tu * min_tu_width + x_tu] = 1;
1436  }
1437  }
1438  if (!s->sh.disable_deblocking_filter_flag) {
1439  ff_hevc_deblocking_boundary_strengths(lc, x0, y0, log2_trafo_size);
1440  if (s->ps.pps->transquant_bypass_enable_flag &&
1442  set_deblocking_bypass(s, x0, y0, log2_trafo_size);
1443  }
1444  }
1445  return 0;
1446 }
1447 
1448 static int hls_pcm_sample(HEVCLocalContext *lc, int x0, int y0, int log2_cb_size)
1449 {
1450  const HEVCContext *const s = lc->parent;
1451  GetBitContext gb;
1452  int cb_size = 1 << log2_cb_size;
1453  ptrdiff_t stride0 = s->frame->linesize[0];
1454  ptrdiff_t stride1 = s->frame->linesize[1];
1455  ptrdiff_t stride2 = s->frame->linesize[2];
1456  uint8_t *dst0 = &s->frame->data[0][y0 * stride0 + (x0 << s->ps.sps->pixel_shift)];
1457  uint8_t *dst1 = &s->frame->data[1][(y0 >> s->ps.sps->vshift[1]) * stride1 + ((x0 >> s->ps.sps->hshift[1]) << s->ps.sps->pixel_shift)];
1458  uint8_t *dst2 = &s->frame->data[2][(y0 >> s->ps.sps->vshift[2]) * stride2 + ((x0 >> s->ps.sps->hshift[2]) << s->ps.sps->pixel_shift)];
1459 
1460  int length = cb_size * cb_size * s->ps.sps->pcm.bit_depth +
1461  (((cb_size >> s->ps.sps->hshift[1]) * (cb_size >> s->ps.sps->vshift[1])) +
1462  ((cb_size >> s->ps.sps->hshift[2]) * (cb_size >> s->ps.sps->vshift[2]))) *
1463  s->ps.sps->pcm.bit_depth_chroma;
1464  const uint8_t *pcm = skip_bytes(&lc->cc, (length + 7) >> 3);
1465  int ret;
1466 
1467  if (!s->sh.disable_deblocking_filter_flag)
1468  ff_hevc_deblocking_boundary_strengths(lc, x0, y0, log2_cb_size);
1469 
1470  ret = init_get_bits(&gb, pcm, length);
1471  if (ret < 0)
1472  return ret;
1473 
1474  s->hevcdsp.put_pcm(dst0, stride0, cb_size, cb_size, &gb, s->ps.sps->pcm.bit_depth);
1475  if (s->ps.sps->chroma_format_idc) {
1476  s->hevcdsp.put_pcm(dst1, stride1,
1477  cb_size >> s->ps.sps->hshift[1],
1478  cb_size >> s->ps.sps->vshift[1],
1479  &gb, s->ps.sps->pcm.bit_depth_chroma);
1480  s->hevcdsp.put_pcm(dst2, stride2,
1481  cb_size >> s->ps.sps->hshift[2],
1482  cb_size >> s->ps.sps->vshift[2],
1483  &gb, s->ps.sps->pcm.bit_depth_chroma);
1484  }
1485 
1486  return 0;
1487 }
1488 
1489 /**
1490  * 8.5.3.2.2.1 Luma sample unidirectional interpolation process
1491  *
1492  * @param s HEVC decoding context
1493  * @param dst target buffer for block data at block position
1494  * @param dststride stride of the dst buffer
1495  * @param ref reference picture buffer at origin (0, 0)
1496  * @param mv motion vector (relative to block position) to get pixel data from
1497  * @param x_off horizontal position of block from origin (0, 0)
1498  * @param y_off vertical position of block from origin (0, 0)
1499  * @param block_w width of block
1500  * @param block_h height of block
1501  * @param luma_weight weighting factor applied to the luma prediction
1502  * @param luma_offset additive offset applied to the luma prediction value
1503  */
1504 
1505 static void luma_mc_uni(HEVCLocalContext *lc, uint8_t *dst, ptrdiff_t dststride,
1506  const AVFrame *ref, const Mv *mv, int x_off, int y_off,
1507  int block_w, int block_h, int luma_weight, int luma_offset)
1508 {
1509  const HEVCContext *const s = lc->parent;
1510  const uint8_t *src = ref->data[0];
1511  ptrdiff_t srcstride = ref->linesize[0];
1512  int pic_width = s->ps.sps->width;
1513  int pic_height = s->ps.sps->height;
1514  int mx = mv->x & 3;
1515  int my = mv->y & 3;
1516  int weight_flag = (s->sh.slice_type == HEVC_SLICE_P && s->ps.pps->weighted_pred_flag) ||
1517  (s->sh.slice_type == HEVC_SLICE_B && s->ps.pps->weighted_bipred_flag);
1518  int idx = hevc_pel_weight[block_w];
1519 
1520  x_off += mv->x >> 2;
1521  y_off += mv->y >> 2;
1522  src += y_off * srcstride + (x_off * (1 << s->ps.sps->pixel_shift));
1523 
1524  if (x_off < QPEL_EXTRA_BEFORE || y_off < QPEL_EXTRA_AFTER ||
1525  x_off >= pic_width - block_w - QPEL_EXTRA_AFTER ||
1526  y_off >= pic_height - block_h - QPEL_EXTRA_AFTER) {
1527  const ptrdiff_t edge_emu_stride = EDGE_EMU_BUFFER_STRIDE << s->ps.sps->pixel_shift;
1528  int offset = QPEL_EXTRA_BEFORE * srcstride + (QPEL_EXTRA_BEFORE << s->ps.sps->pixel_shift);
1529  int buf_offset = QPEL_EXTRA_BEFORE * edge_emu_stride + (QPEL_EXTRA_BEFORE << s->ps.sps->pixel_shift);
1530 
1531  s->vdsp.emulated_edge_mc(lc->edge_emu_buffer, src - offset,
1532  edge_emu_stride, srcstride,
1533  block_w + QPEL_EXTRA,
1534  block_h + QPEL_EXTRA,
1535  x_off - QPEL_EXTRA_BEFORE, y_off - QPEL_EXTRA_BEFORE,
1536  pic_width, pic_height);
1537  src = lc->edge_emu_buffer + buf_offset;
1538  srcstride = edge_emu_stride;
1539  }
1540 
1541  if (!weight_flag)
1542  s->hevcdsp.put_hevc_qpel_uni[idx][!!my][!!mx](dst, dststride, src, srcstride,
1543  block_h, mx, my, block_w);
1544  else
1545  s->hevcdsp.put_hevc_qpel_uni_w[idx][!!my][!!mx](dst, dststride, src, srcstride,
1546  block_h, s->sh.luma_log2_weight_denom,
1547  luma_weight, luma_offset, mx, my, block_w);
1548 }
1549 
1550 /**
1551  * 8.5.3.2.2.1 Luma sample bidirectional interpolation process
1552  *
1553  * @param s HEVC decoding context
1554  * @param dst target buffer for block data at block position
1555  * @param dststride stride of the dst buffer
1556  * @param ref0 reference picture0 buffer at origin (0, 0)
1557  * @param mv0 motion vector0 (relative to block position) to get pixel data from
1558  * @param x_off horizontal position of block from origin (0, 0)
1559  * @param y_off vertical position of block from origin (0, 0)
1560  * @param block_w width of block
1561  * @param block_h height of block
1562  * @param ref1 reference picture1 buffer at origin (0, 0)
1563  * @param mv1 motion vector1 (relative to block position) to get pixel data from
1564  * @param current_mv current motion vector structure
1565  */
1566  static void luma_mc_bi(HEVCLocalContext *lc, uint8_t *dst, ptrdiff_t dststride,
1567  const AVFrame *ref0, const Mv *mv0, int x_off, int y_off,
1568  int block_w, int block_h, const AVFrame *ref1,
1569  const Mv *mv1, struct MvField *current_mv)
1570 {
1571  const HEVCContext *const s = lc->parent;
1572  ptrdiff_t src0stride = ref0->linesize[0];
1573  ptrdiff_t src1stride = ref1->linesize[0];
1574  int pic_width = s->ps.sps->width;
1575  int pic_height = s->ps.sps->height;
1576  int mx0 = mv0->x & 3;
1577  int my0 = mv0->y & 3;
1578  int mx1 = mv1->x & 3;
1579  int my1 = mv1->y & 3;
1580  int weight_flag = (s->sh.slice_type == HEVC_SLICE_P && s->ps.pps->weighted_pred_flag) ||
1581  (s->sh.slice_type == HEVC_SLICE_B && s->ps.pps->weighted_bipred_flag);
1582  int x_off0 = x_off + (mv0->x >> 2);
1583  int y_off0 = y_off + (mv0->y >> 2);
1584  int x_off1 = x_off + (mv1->x >> 2);
1585  int y_off1 = y_off + (mv1->y >> 2);
1586  int idx = hevc_pel_weight[block_w];
1587 
1588  const uint8_t *src0 = ref0->data[0] + y_off0 * src0stride + (int)((unsigned)x_off0 << s->ps.sps->pixel_shift);
1589  const uint8_t *src1 = ref1->data[0] + y_off1 * src1stride + (int)((unsigned)x_off1 << s->ps.sps->pixel_shift);
1590 
1591  if (x_off0 < QPEL_EXTRA_BEFORE || y_off0 < QPEL_EXTRA_AFTER ||
1592  x_off0 >= pic_width - block_w - QPEL_EXTRA_AFTER ||
1593  y_off0 >= pic_height - block_h - QPEL_EXTRA_AFTER) {
1594  const ptrdiff_t edge_emu_stride = EDGE_EMU_BUFFER_STRIDE << s->ps.sps->pixel_shift;
1595  int offset = QPEL_EXTRA_BEFORE * src0stride + (QPEL_EXTRA_BEFORE << s->ps.sps->pixel_shift);
1596  int buf_offset = QPEL_EXTRA_BEFORE * edge_emu_stride + (QPEL_EXTRA_BEFORE << s->ps.sps->pixel_shift);
1597 
1598  s->vdsp.emulated_edge_mc(lc->edge_emu_buffer, src0 - offset,
1599  edge_emu_stride, src0stride,
1600  block_w + QPEL_EXTRA,
1601  block_h + QPEL_EXTRA,
1602  x_off0 - QPEL_EXTRA_BEFORE, y_off0 - QPEL_EXTRA_BEFORE,
1603  pic_width, pic_height);
1604  src0 = lc->edge_emu_buffer + buf_offset;
1605  src0stride = edge_emu_stride;
1606  }
1607 
1608  if (x_off1 < QPEL_EXTRA_BEFORE || y_off1 < QPEL_EXTRA_AFTER ||
1609  x_off1 >= pic_width - block_w - QPEL_EXTRA_AFTER ||
1610  y_off1 >= pic_height - block_h - QPEL_EXTRA_AFTER) {
1611  const ptrdiff_t edge_emu_stride = EDGE_EMU_BUFFER_STRIDE << s->ps.sps->pixel_shift;
1612  int offset = QPEL_EXTRA_BEFORE * src1stride + (QPEL_EXTRA_BEFORE << s->ps.sps->pixel_shift);
1613  int buf_offset = QPEL_EXTRA_BEFORE * edge_emu_stride + (QPEL_EXTRA_BEFORE << s->ps.sps->pixel_shift);
1614 
1615  s->vdsp.emulated_edge_mc(lc->edge_emu_buffer2, src1 - offset,
1616  edge_emu_stride, src1stride,
1617  block_w + QPEL_EXTRA,
1618  block_h + QPEL_EXTRA,
1619  x_off1 - QPEL_EXTRA_BEFORE, y_off1 - QPEL_EXTRA_BEFORE,
1620  pic_width, pic_height);
1621  src1 = lc->edge_emu_buffer2 + buf_offset;
1622  src1stride = edge_emu_stride;
1623  }
1624 
1625  s->hevcdsp.put_hevc_qpel[idx][!!my0][!!mx0](lc->tmp, src0, src0stride,
1626  block_h, mx0, my0, block_w);
1627  if (!weight_flag)
1628  s->hevcdsp.put_hevc_qpel_bi[idx][!!my1][!!mx1](dst, dststride, src1, src1stride, lc->tmp,
1629  block_h, mx1, my1, block_w);
1630  else
1631  s->hevcdsp.put_hevc_qpel_bi_w[idx][!!my1][!!mx1](dst, dststride, src1, src1stride, lc->tmp,
1632  block_h, s->sh.luma_log2_weight_denom,
1633  s->sh.luma_weight_l0[current_mv->ref_idx[0]],
1634  s->sh.luma_weight_l1[current_mv->ref_idx[1]],
1635  s->sh.luma_offset_l0[current_mv->ref_idx[0]],
1636  s->sh.luma_offset_l1[current_mv->ref_idx[1]],
1637  mx1, my1, block_w);
1638 
1639 }
1640 
1641 /**
1642  * 8.5.3.2.2.2 Chroma sample uniprediction interpolation process
1643  *
1644  * @param s HEVC decoding context
1645  * @param dst1 target buffer for block data at block position (U plane)
1646  * @param dst2 target buffer for block data at block position (V plane)
1647  * @param dststride stride of the dst1 and dst2 buffers
1648  * @param ref reference picture buffer at origin (0, 0)
1649  * @param mv motion vector (relative to block position) to get pixel data from
1650  * @param x_off horizontal position of block from origin (0, 0)
1651  * @param y_off vertical position of block from origin (0, 0)
1652  * @param block_w width of block
1653  * @param block_h height of block
1654  * @param chroma_weight weighting factor applied to the chroma prediction
1655  * @param chroma_offset additive offset applied to the chroma prediction value
1656  */
1657 
1658 static void chroma_mc_uni(HEVCLocalContext *lc, uint8_t *dst0,
1659  ptrdiff_t dststride, const uint8_t *src0, ptrdiff_t srcstride, int reflist,
1660  int x_off, int y_off, int block_w, int block_h,
1661  const struct MvField *current_mv, int chroma_weight, int chroma_offset)
1662 {
1663  const HEVCContext *const s = lc->parent;
1664  int pic_width = s->ps.sps->width >> s->ps.sps->hshift[1];
1665  int pic_height = s->ps.sps->height >> s->ps.sps->vshift[1];
1666  const Mv *mv = &current_mv->mv[reflist];
1667  int weight_flag = (s->sh.slice_type == HEVC_SLICE_P && s->ps.pps->weighted_pred_flag) ||
1668  (s->sh.slice_type == HEVC_SLICE_B && s->ps.pps->weighted_bipred_flag);
1669  int idx = hevc_pel_weight[block_w];
1670  int hshift = s->ps.sps->hshift[1];
1671  int vshift = s->ps.sps->vshift[1];
1672  intptr_t mx = av_mod_uintp2(mv->x, 2 + hshift);
1673  intptr_t my = av_mod_uintp2(mv->y, 2 + vshift);
1674  intptr_t _mx = mx << (1 - hshift);
1675  intptr_t _my = my << (1 - vshift);
1676 
1677  x_off += mv->x >> (2 + hshift);
1678  y_off += mv->y >> (2 + vshift);
1679  src0 += y_off * srcstride + (x_off * (1 << s->ps.sps->pixel_shift));
1680 
1681  if (x_off < EPEL_EXTRA_BEFORE || y_off < EPEL_EXTRA_AFTER ||
1682  x_off >= pic_width - block_w - EPEL_EXTRA_AFTER ||
1683  y_off >= pic_height - block_h - EPEL_EXTRA_AFTER) {
1684  const int edge_emu_stride = EDGE_EMU_BUFFER_STRIDE << s->ps.sps->pixel_shift;
1685  int offset0 = EPEL_EXTRA_BEFORE * (srcstride + (1 << s->ps.sps->pixel_shift));
1686  int buf_offset0 = EPEL_EXTRA_BEFORE *
1687  (edge_emu_stride + (1 << s->ps.sps->pixel_shift));
1688  s->vdsp.emulated_edge_mc(lc->edge_emu_buffer, src0 - offset0,
1689  edge_emu_stride, srcstride,
1690  block_w + EPEL_EXTRA, block_h + EPEL_EXTRA,
1691  x_off - EPEL_EXTRA_BEFORE,
1692  y_off - EPEL_EXTRA_BEFORE,
1693  pic_width, pic_height);
1694 
1695  src0 = lc->edge_emu_buffer + buf_offset0;
1696  srcstride = edge_emu_stride;
1697  }
1698  if (!weight_flag)
1699  s->hevcdsp.put_hevc_epel_uni[idx][!!my][!!mx](dst0, dststride, src0, srcstride,
1700  block_h, _mx, _my, block_w);
1701  else
1702  s->hevcdsp.put_hevc_epel_uni_w[idx][!!my][!!mx](dst0, dststride, src0, srcstride,
1703  block_h, s->sh.chroma_log2_weight_denom,
1704  chroma_weight, chroma_offset, _mx, _my, block_w);
1705 }
1706 
1707 /**
1708  * 8.5.3.2.2.2 Chroma sample bidirectional interpolation process
1709  *
1710  * @param s HEVC decoding context
1711  * @param dst target buffer for block data at block position
1712  * @param dststride stride of the dst buffer
1713  * @param ref0 reference picture0 buffer at origin (0, 0)
1714  * @param mv0 motion vector0 (relative to block position) to get pixel data from
1715  * @param x_off horizontal position of block from origin (0, 0)
1716  * @param y_off vertical position of block from origin (0, 0)
1717  * @param block_w width of block
1718  * @param block_h height of block
1719  * @param ref1 reference picture1 buffer at origin (0, 0)
1720  * @param mv1 motion vector1 (relative to block position) to get pixel data from
1721  * @param current_mv current motion vector structure
1722  * @param cidx chroma component(cb, cr)
1723  */
1724 static void chroma_mc_bi(HEVCLocalContext *lc, uint8_t *dst0, ptrdiff_t dststride,
1725  const AVFrame *ref0, const AVFrame *ref1,
1726  int x_off, int y_off, int block_w, int block_h, const MvField *current_mv, int cidx)
1727 {
1728  const HEVCContext *const s = lc->parent;
1729  const uint8_t *src1 = ref0->data[cidx+1];
1730  const uint8_t *src2 = ref1->data[cidx+1];
1731  ptrdiff_t src1stride = ref0->linesize[cidx+1];
1732  ptrdiff_t src2stride = ref1->linesize[cidx+1];
1733  int weight_flag = (s->sh.slice_type == HEVC_SLICE_P && s->ps.pps->weighted_pred_flag) ||
1734  (s->sh.slice_type == HEVC_SLICE_B && s->ps.pps->weighted_bipred_flag);
1735  int pic_width = s->ps.sps->width >> s->ps.sps->hshift[1];
1736  int pic_height = s->ps.sps->height >> s->ps.sps->vshift[1];
1737  const Mv *const mv0 = &current_mv->mv[0];
1738  const Mv *const mv1 = &current_mv->mv[1];
1739  int hshift = s->ps.sps->hshift[1];
1740  int vshift = s->ps.sps->vshift[1];
1741 
1742  intptr_t mx0 = av_mod_uintp2(mv0->x, 2 + hshift);
1743  intptr_t my0 = av_mod_uintp2(mv0->y, 2 + vshift);
1744  intptr_t mx1 = av_mod_uintp2(mv1->x, 2 + hshift);
1745  intptr_t my1 = av_mod_uintp2(mv1->y, 2 + vshift);
1746  intptr_t _mx0 = mx0 << (1 - hshift);
1747  intptr_t _my0 = my0 << (1 - vshift);
1748  intptr_t _mx1 = mx1 << (1 - hshift);
1749  intptr_t _my1 = my1 << (1 - vshift);
1750 
1751  int x_off0 = x_off + (mv0->x >> (2 + hshift));
1752  int y_off0 = y_off + (mv0->y >> (2 + vshift));
1753  int x_off1 = x_off + (mv1->x >> (2 + hshift));
1754  int y_off1 = y_off + (mv1->y >> (2 + vshift));
1755  int idx = hevc_pel_weight[block_w];
1756  src1 += y_off0 * src1stride + (int)((unsigned)x_off0 << s->ps.sps->pixel_shift);
1757  src2 += y_off1 * src2stride + (int)((unsigned)x_off1 << s->ps.sps->pixel_shift);
1758 
1759  if (x_off0 < EPEL_EXTRA_BEFORE || y_off0 < EPEL_EXTRA_AFTER ||
1760  x_off0 >= pic_width - block_w - EPEL_EXTRA_AFTER ||
1761  y_off0 >= pic_height - block_h - EPEL_EXTRA_AFTER) {
1762  const int edge_emu_stride = EDGE_EMU_BUFFER_STRIDE << s->ps.sps->pixel_shift;
1763  int offset1 = EPEL_EXTRA_BEFORE * (src1stride + (1 << s->ps.sps->pixel_shift));
1764  int buf_offset1 = EPEL_EXTRA_BEFORE *
1765  (edge_emu_stride + (1 << s->ps.sps->pixel_shift));
1766 
1767  s->vdsp.emulated_edge_mc(lc->edge_emu_buffer, src1 - offset1,
1768  edge_emu_stride, src1stride,
1769  block_w + EPEL_EXTRA, block_h + EPEL_EXTRA,
1770  x_off0 - EPEL_EXTRA_BEFORE,
1771  y_off0 - EPEL_EXTRA_BEFORE,
1772  pic_width, pic_height);
1773 
1774  src1 = lc->edge_emu_buffer + buf_offset1;
1775  src1stride = edge_emu_stride;
1776  }
1777 
1778  if (x_off1 < EPEL_EXTRA_BEFORE || y_off1 < EPEL_EXTRA_AFTER ||
1779  x_off1 >= pic_width - block_w - EPEL_EXTRA_AFTER ||
1780  y_off1 >= pic_height - block_h - EPEL_EXTRA_AFTER) {
1781  const int edge_emu_stride = EDGE_EMU_BUFFER_STRIDE << s->ps.sps->pixel_shift;
1782  int offset1 = EPEL_EXTRA_BEFORE * (src2stride + (1 << s->ps.sps->pixel_shift));
1783  int buf_offset1 = EPEL_EXTRA_BEFORE *
1784  (edge_emu_stride + (1 << s->ps.sps->pixel_shift));
1785 
1786  s->vdsp.emulated_edge_mc(lc->edge_emu_buffer2, src2 - offset1,
1787  edge_emu_stride, src2stride,
1788  block_w + EPEL_EXTRA, block_h + EPEL_EXTRA,
1789  x_off1 - EPEL_EXTRA_BEFORE,
1790  y_off1 - EPEL_EXTRA_BEFORE,
1791  pic_width, pic_height);
1792 
1793  src2 = lc->edge_emu_buffer2 + buf_offset1;
1794  src2stride = edge_emu_stride;
1795  }
1796 
1797  s->hevcdsp.put_hevc_epel[idx][!!my0][!!mx0](lc->tmp, src1, src1stride,
1798  block_h, _mx0, _my0, block_w);
1799  if (!weight_flag)
1800  s->hevcdsp.put_hevc_epel_bi[idx][!!my1][!!mx1](dst0, s->frame->linesize[cidx+1],
1801  src2, src2stride, lc->tmp,
1802  block_h, _mx1, _my1, block_w);
1803  else
1804  s->hevcdsp.put_hevc_epel_bi_w[idx][!!my1][!!mx1](dst0, s->frame->linesize[cidx+1],
1805  src2, src2stride, lc->tmp,
1806  block_h,
1807  s->sh.chroma_log2_weight_denom,
1808  s->sh.chroma_weight_l0[current_mv->ref_idx[0]][cidx],
1809  s->sh.chroma_weight_l1[current_mv->ref_idx[1]][cidx],
1810  s->sh.chroma_offset_l0[current_mv->ref_idx[0]][cidx],
1811  s->sh.chroma_offset_l1[current_mv->ref_idx[1]][cidx],
1812  _mx1, _my1, block_w);
1813 }
1814 
1815 static void hevc_await_progress(const HEVCContext *s, const HEVCFrame *ref,
1816  const Mv *mv, int y0, int height)
1817 {
1818  if (s->threads_type == FF_THREAD_FRAME ) {
1819  int y = FFMAX(0, (mv->y >> 2) + y0 + height + 9);
1820 
1821  ff_thread_await_progress(&ref->tf, y, 0);
1822  }
1823 }
1824 
1825 static void hevc_luma_mv_mvp_mode(HEVCLocalContext *lc, int x0, int y0, int nPbW,
1826  int nPbH, int log2_cb_size, int part_idx,
1827  int merge_idx, MvField *mv)
1828 {
1829  const HEVCContext *const s = lc->parent;
1830  enum InterPredIdc inter_pred_idc = PRED_L0;
1831  int mvp_flag;
1832 
1833  ff_hevc_set_neighbour_available(lc, x0, y0, nPbW, nPbH);
1834  mv->pred_flag = 0;
1835  if (s->sh.slice_type == HEVC_SLICE_B)
1836  inter_pred_idc = ff_hevc_inter_pred_idc_decode(lc, nPbW, nPbH);
1837 
1838  if (inter_pred_idc != PRED_L1) {
1839  if (s->sh.nb_refs[L0])
1840  mv->ref_idx[0]= ff_hevc_ref_idx_lx_decode(lc, s->sh.nb_refs[L0]);
1841 
1842  mv->pred_flag = PF_L0;
1843  ff_hevc_hls_mvd_coding(lc, x0, y0, 0);
1844  mvp_flag = ff_hevc_mvp_lx_flag_decode(lc);
1845  ff_hevc_luma_mv_mvp_mode(lc, x0, y0, nPbW, nPbH, log2_cb_size,
1846  part_idx, merge_idx, mv, mvp_flag, 0);
1847  mv->mv[0].x += lc->pu.mvd.x;
1848  mv->mv[0].y += lc->pu.mvd.y;
1849  }
1850 
1851  if (inter_pred_idc != PRED_L0) {
1852  if (s->sh.nb_refs[L1])
1853  mv->ref_idx[1]= ff_hevc_ref_idx_lx_decode(lc, s->sh.nb_refs[L1]);
1854 
1855  if (s->sh.mvd_l1_zero_flag == 1 && inter_pred_idc == PRED_BI) {
1856  AV_ZERO32(&lc->pu.mvd);
1857  } else {
1858  ff_hevc_hls_mvd_coding(lc, x0, y0, 1);
1859  }
1860 
1861  mv->pred_flag += PF_L1;
1862  mvp_flag = ff_hevc_mvp_lx_flag_decode(lc);
1863  ff_hevc_luma_mv_mvp_mode(lc, x0, y0, nPbW, nPbH, log2_cb_size,
1864  part_idx, merge_idx, mv, mvp_flag, 1);
1865  mv->mv[1].x += lc->pu.mvd.x;
1866  mv->mv[1].y += lc->pu.mvd.y;
1867  }
1868 }
1869 
1870 static void hls_prediction_unit(HEVCLocalContext *lc, int x0, int y0,
1871  int nPbW, int nPbH,
1872  int log2_cb_size, int partIdx, int idx)
1873 {
1874 #define POS(c_idx, x, y) \
1875  &s->frame->data[c_idx][((y) >> s->ps.sps->vshift[c_idx]) * s->frame->linesize[c_idx] + \
1876  (((x) >> s->ps.sps->hshift[c_idx]) << s->ps.sps->pixel_shift)]
1877  const HEVCContext *const s = lc->parent;
1878  int merge_idx = 0;
1879  struct MvField current_mv = {{{ 0 }}};
1880 
1881  int min_pu_width = s->ps.sps->min_pu_width;
1882 
1883  MvField *tab_mvf = s->ref->tab_mvf;
1884  const RefPicList *refPicList = s->ref->refPicList;
1885  const HEVCFrame *ref0 = NULL, *ref1 = NULL;
1886  uint8_t *dst0 = POS(0, x0, y0);
1887  uint8_t *dst1 = POS(1, x0, y0);
1888  uint8_t *dst2 = POS(2, x0, y0);
1889  int log2_min_cb_size = s->ps.sps->log2_min_cb_size;
1890  int min_cb_width = s->ps.sps->min_cb_width;
1891  int x_cb = x0 >> log2_min_cb_size;
1892  int y_cb = y0 >> log2_min_cb_size;
1893  int x_pu, y_pu;
1894  int i, j;
1895 
1896  int skip_flag = SAMPLE_CTB(s->skip_flag, x_cb, y_cb);
1897 
1898  if (!skip_flag)
1900 
1901  if (skip_flag || lc->pu.merge_flag) {
1902  if (s->sh.max_num_merge_cand > 1)
1903  merge_idx = ff_hevc_merge_idx_decode(lc);
1904  else
1905  merge_idx = 0;
1906 
1907  ff_hevc_luma_mv_merge_mode(lc, x0, y0, nPbW, nPbH, log2_cb_size,
1908  partIdx, merge_idx, &current_mv);
1909  } else {
1910  hevc_luma_mv_mvp_mode(lc, x0, y0, nPbW, nPbH, log2_cb_size,
1911  partIdx, merge_idx, &current_mv);
1912  }
1913 
1914  x_pu = x0 >> s->ps.sps->log2_min_pu_size;
1915  y_pu = y0 >> s->ps.sps->log2_min_pu_size;
1916 
1917  for (j = 0; j < nPbH >> s->ps.sps->log2_min_pu_size; j++)
1918  for (i = 0; i < nPbW >> s->ps.sps->log2_min_pu_size; i++)
1919  tab_mvf[(y_pu + j) * min_pu_width + x_pu + i] = current_mv;
1920 
1921  if (current_mv.pred_flag & PF_L0) {
1922  ref0 = refPicList[0].ref[current_mv.ref_idx[0]];
1923  if (!ref0)
1924  return;
1925  hevc_await_progress(s, ref0, &current_mv.mv[0], y0, nPbH);
1926  }
1927  if (current_mv.pred_flag & PF_L1) {
1928  ref1 = refPicList[1].ref[current_mv.ref_idx[1]];
1929  if (!ref1)
1930  return;
1931  hevc_await_progress(s, ref1, &current_mv.mv[1], y0, nPbH);
1932  }
1933 
1934  if (current_mv.pred_flag == PF_L0) {
1935  int x0_c = x0 >> s->ps.sps->hshift[1];
1936  int y0_c = y0 >> s->ps.sps->vshift[1];
1937  int nPbW_c = nPbW >> s->ps.sps->hshift[1];
1938  int nPbH_c = nPbH >> s->ps.sps->vshift[1];
1939 
1940  luma_mc_uni(lc, dst0, s->frame->linesize[0], ref0->frame,
1941  &current_mv.mv[0], x0, y0, nPbW, nPbH,
1942  s->sh.luma_weight_l0[current_mv.ref_idx[0]],
1943  s->sh.luma_offset_l0[current_mv.ref_idx[0]]);
1944 
1945  if (s->ps.sps->chroma_format_idc) {
1946  chroma_mc_uni(lc, dst1, s->frame->linesize[1], ref0->frame->data[1], ref0->frame->linesize[1],
1947  0, x0_c, y0_c, nPbW_c, nPbH_c, &current_mv,
1948  s->sh.chroma_weight_l0[current_mv.ref_idx[0]][0], s->sh.chroma_offset_l0[current_mv.ref_idx[0]][0]);
1949  chroma_mc_uni(lc, dst2, s->frame->linesize[2], ref0->frame->data[2], ref0->frame->linesize[2],
1950  0, x0_c, y0_c, nPbW_c, nPbH_c, &current_mv,
1951  s->sh.chroma_weight_l0[current_mv.ref_idx[0]][1], s->sh.chroma_offset_l0[current_mv.ref_idx[0]][1]);
1952  }
1953  } else if (current_mv.pred_flag == PF_L1) {
1954  int x0_c = x0 >> s->ps.sps->hshift[1];
1955  int y0_c = y0 >> s->ps.sps->vshift[1];
1956  int nPbW_c = nPbW >> s->ps.sps->hshift[1];
1957  int nPbH_c = nPbH >> s->ps.sps->vshift[1];
1958 
1959  luma_mc_uni(lc, dst0, s->frame->linesize[0], ref1->frame,
1960  &current_mv.mv[1], x0, y0, nPbW, nPbH,
1961  s->sh.luma_weight_l1[current_mv.ref_idx[1]],
1962  s->sh.luma_offset_l1[current_mv.ref_idx[1]]);
1963 
1964  if (s->ps.sps->chroma_format_idc) {
1965  chroma_mc_uni(lc, dst1, s->frame->linesize[1], ref1->frame->data[1], ref1->frame->linesize[1],
1966  1, x0_c, y0_c, nPbW_c, nPbH_c, &current_mv,
1967  s->sh.chroma_weight_l1[current_mv.ref_idx[1]][0], s->sh.chroma_offset_l1[current_mv.ref_idx[1]][0]);
1968 
1969  chroma_mc_uni(lc, dst2, s->frame->linesize[2], ref1->frame->data[2], ref1->frame->linesize[2],
1970  1, x0_c, y0_c, nPbW_c, nPbH_c, &current_mv,
1971  s->sh.chroma_weight_l1[current_mv.ref_idx[1]][1], s->sh.chroma_offset_l1[current_mv.ref_idx[1]][1]);
1972  }
1973  } else if (current_mv.pred_flag == PF_BI) {
1974  int x0_c = x0 >> s->ps.sps->hshift[1];
1975  int y0_c = y0 >> s->ps.sps->vshift[1];
1976  int nPbW_c = nPbW >> s->ps.sps->hshift[1];
1977  int nPbH_c = nPbH >> s->ps.sps->vshift[1];
1978 
1979  luma_mc_bi(lc, dst0, s->frame->linesize[0], ref0->frame,
1980  &current_mv.mv[0], x0, y0, nPbW, nPbH,
1981  ref1->frame, &current_mv.mv[1], &current_mv);
1982 
1983  if (s->ps.sps->chroma_format_idc) {
1984  chroma_mc_bi(lc, dst1, s->frame->linesize[1], ref0->frame, ref1->frame,
1985  x0_c, y0_c, nPbW_c, nPbH_c, &current_mv, 0);
1986 
1987  chroma_mc_bi(lc, dst2, s->frame->linesize[2], ref0->frame, ref1->frame,
1988  x0_c, y0_c, nPbW_c, nPbH_c, &current_mv, 1);
1989  }
1990  }
1991 }
1992 
1993 /**
1994  * 8.4.1
1995  */
1996 static int luma_intra_pred_mode(HEVCLocalContext *lc, int x0, int y0, int pu_size,
1997  int prev_intra_luma_pred_flag)
1998 {
1999  const HEVCContext *const s = lc->parent;
2000  int x_pu = x0 >> s->ps.sps->log2_min_pu_size;
2001  int y_pu = y0 >> s->ps.sps->log2_min_pu_size;
2002  int min_pu_width = s->ps.sps->min_pu_width;
2003  int size_in_pus = pu_size >> s->ps.sps->log2_min_pu_size;
2004  int x0b = av_mod_uintp2(x0, s->ps.sps->log2_ctb_size);
2005  int y0b = av_mod_uintp2(y0, s->ps.sps->log2_ctb_size);
2006 
2007  int cand_up = (lc->ctb_up_flag || y0b) ?
2008  s->tab_ipm[(y_pu - 1) * min_pu_width + x_pu] : INTRA_DC;
2009  int cand_left = (lc->ctb_left_flag || x0b) ?
2010  s->tab_ipm[y_pu * min_pu_width + x_pu - 1] : INTRA_DC;
2011 
2012  int y_ctb = (y0 >> (s->ps.sps->log2_ctb_size)) << (s->ps.sps->log2_ctb_size);
2013 
2014  MvField *tab_mvf = s->ref->tab_mvf;
2015  int intra_pred_mode;
2016  int candidate[3];
2017  int i, j;
2018 
2019  // intra_pred_mode prediction does not cross vertical CTB boundaries
2020  if ((y0 - 1) < y_ctb)
2021  cand_up = INTRA_DC;
2022 
2023  if (cand_left == cand_up) {
2024  if (cand_left < 2) {
2025  candidate[0] = INTRA_PLANAR;
2026  candidate[1] = INTRA_DC;
2027  candidate[2] = INTRA_ANGULAR_26;
2028  } else {
2029  candidate[0] = cand_left;
2030  candidate[1] = 2 + ((cand_left - 2 - 1 + 32) & 31);
2031  candidate[2] = 2 + ((cand_left - 2 + 1) & 31);
2032  }
2033  } else {
2034  candidate[0] = cand_left;
2035  candidate[1] = cand_up;
2036  if (candidate[0] != INTRA_PLANAR && candidate[1] != INTRA_PLANAR) {
2037  candidate[2] = INTRA_PLANAR;
2038  } else if (candidate[0] != INTRA_DC && candidate[1] != INTRA_DC) {
2039  candidate[2] = INTRA_DC;
2040  } else {
2041  candidate[2] = INTRA_ANGULAR_26;
2042  }
2043  }
2044 
2045  if (prev_intra_luma_pred_flag) {
2046  intra_pred_mode = candidate[lc->pu.mpm_idx];
2047  } else {
2048  if (candidate[0] > candidate[1])
2049  FFSWAP(uint8_t, candidate[0], candidate[1]);
2050  if (candidate[0] > candidate[2])
2051  FFSWAP(uint8_t, candidate[0], candidate[2]);
2052  if (candidate[1] > candidate[2])
2053  FFSWAP(uint8_t, candidate[1], candidate[2]);
2054 
2055  intra_pred_mode = lc->pu.rem_intra_luma_pred_mode;
2056  for (i = 0; i < 3; i++)
2057  if (intra_pred_mode >= candidate[i])
2058  intra_pred_mode++;
2059  }
2060 
2061  /* write the intra prediction units into the mv array */
2062  if (!size_in_pus)
2063  size_in_pus = 1;
2064  for (i = 0; i < size_in_pus; i++) {
2065  memset(&s->tab_ipm[(y_pu + i) * min_pu_width + x_pu],
2066  intra_pred_mode, size_in_pus);
2067 
2068  for (j = 0; j < size_in_pus; j++) {
2069  tab_mvf[(y_pu + j) * min_pu_width + x_pu + i].pred_flag = PF_INTRA;
2070  }
2071  }
2072 
2073  return intra_pred_mode;
2074 }
2075 
2076 static av_always_inline void set_ct_depth(const HEVCContext *s, int x0, int y0,
2077  int log2_cb_size, int ct_depth)
2078 {
2079  int length = (1 << log2_cb_size) >> s->ps.sps->log2_min_cb_size;
2080  int x_cb = x0 >> s->ps.sps->log2_min_cb_size;
2081  int y_cb = y0 >> s->ps.sps->log2_min_cb_size;
2082  int y;
2083 
2084  for (y = 0; y < length; y++)
2085  memset(&s->tab_ct_depth[(y_cb + y) * s->ps.sps->min_cb_width + x_cb],
2086  ct_depth, length);
2087 }
2088 
2089 static const uint8_t tab_mode_idx[] = {
2090  0, 1, 2, 2, 2, 2, 3, 5, 7, 8, 10, 12, 13, 15, 17, 18, 19, 20,
2091  21, 22, 23, 23, 24, 24, 25, 25, 26, 27, 27, 28, 28, 29, 29, 30, 31};
2092 
2093 static void intra_prediction_unit(HEVCLocalContext *lc, int x0, int y0,
2094  int log2_cb_size)
2095 {
2096  const HEVCContext *const s = lc->parent;
2097  static const uint8_t intra_chroma_table[4] = { 0, 26, 10, 1 };
2098  uint8_t prev_intra_luma_pred_flag[4];
2099  int split = lc->cu.part_mode == PART_NxN;
2100  int pb_size = (1 << log2_cb_size) >> split;
2101  int side = split + 1;
2102  int chroma_mode;
2103  int i, j;
2104 
2105  for (i = 0; i < side; i++)
2106  for (j = 0; j < side; j++)
2107  prev_intra_luma_pred_flag[2 * i + j] = ff_hevc_prev_intra_luma_pred_flag_decode(lc);
2108 
2109  for (i = 0; i < side; i++) {
2110  for (j = 0; j < side; j++) {
2111  if (prev_intra_luma_pred_flag[2 * i + j])
2112  lc->pu.mpm_idx = ff_hevc_mpm_idx_decode(lc);
2113  else
2115 
2116  lc->pu.intra_pred_mode[2 * i + j] =
2117  luma_intra_pred_mode(lc, x0 + pb_size * j, y0 + pb_size * i, pb_size,
2118  prev_intra_luma_pred_flag[2 * i + j]);
2119  }
2120  }
2121 
2122  if (s->ps.sps->chroma_format_idc == 3) {
2123  for (i = 0; i < side; i++) {
2124  for (j = 0; j < side; j++) {
2125  lc->pu.chroma_mode_c[2 * i + j] = chroma_mode = ff_hevc_intra_chroma_pred_mode_decode(lc);
2126  if (chroma_mode != 4) {
2127  if (lc->pu.intra_pred_mode[2 * i + j] == intra_chroma_table[chroma_mode])
2128  lc->pu.intra_pred_mode_c[2 * i + j] = 34;
2129  else
2130  lc->pu.intra_pred_mode_c[2 * i + j] = intra_chroma_table[chroma_mode];
2131  } else {
2132  lc->pu.intra_pred_mode_c[2 * i + j] = lc->pu.intra_pred_mode[2 * i + j];
2133  }
2134  }
2135  }
2136  } else if (s->ps.sps->chroma_format_idc == 2) {
2137  int mode_idx;
2138  lc->pu.chroma_mode_c[0] = chroma_mode = ff_hevc_intra_chroma_pred_mode_decode(lc);
2139  if (chroma_mode != 4) {
2140  if (lc->pu.intra_pred_mode[0] == intra_chroma_table[chroma_mode])
2141  mode_idx = 34;
2142  else
2143  mode_idx = intra_chroma_table[chroma_mode];
2144  } else {
2145  mode_idx = lc->pu.intra_pred_mode[0];
2146  }
2147  lc->pu.intra_pred_mode_c[0] = tab_mode_idx[mode_idx];
2148  } else if (s->ps.sps->chroma_format_idc != 0) {
2149  chroma_mode = ff_hevc_intra_chroma_pred_mode_decode(lc);
2150  if (chroma_mode != 4) {
2151  if (lc->pu.intra_pred_mode[0] == intra_chroma_table[chroma_mode])
2152  lc->pu.intra_pred_mode_c[0] = 34;
2153  else
2154  lc->pu.intra_pred_mode_c[0] = intra_chroma_table[chroma_mode];
2155  } else {
2156  lc->pu.intra_pred_mode_c[0] = lc->pu.intra_pred_mode[0];
2157  }
2158  }
2159 }
2160 
2162  int x0, int y0,
2163  int log2_cb_size)
2164 {
2165  const HEVCContext *const s = lc->parent;
2166  int pb_size = 1 << log2_cb_size;
2167  int size_in_pus = pb_size >> s->ps.sps->log2_min_pu_size;
2168  int min_pu_width = s->ps.sps->min_pu_width;
2169  MvField *tab_mvf = s->ref->tab_mvf;
2170  int x_pu = x0 >> s->ps.sps->log2_min_pu_size;
2171  int y_pu = y0 >> s->ps.sps->log2_min_pu_size;
2172  int j, k;
2173 
2174  if (size_in_pus == 0)
2175  size_in_pus = 1;
2176  for (j = 0; j < size_in_pus; j++)
2177  memset(&s->tab_ipm[(y_pu + j) * min_pu_width + x_pu], INTRA_DC, size_in_pus);
2178  if (lc->cu.pred_mode == MODE_INTRA)
2179  for (j = 0; j < size_in_pus; j++)
2180  for (k = 0; k < size_in_pus; k++)
2181  tab_mvf[(y_pu + j) * min_pu_width + x_pu + k].pred_flag = PF_INTRA;
2182 }
2183 
2184 static int hls_coding_unit(HEVCLocalContext *lc, const HEVCContext *s, int x0, int y0, int log2_cb_size)
2185 {
2186  int cb_size = 1 << log2_cb_size;
2187  int log2_min_cb_size = s->ps.sps->log2_min_cb_size;
2188  int length = cb_size >> log2_min_cb_size;
2189  int min_cb_width = s->ps.sps->min_cb_width;
2190  int x_cb = x0 >> log2_min_cb_size;
2191  int y_cb = y0 >> log2_min_cb_size;
2192  int idx = log2_cb_size - 2;
2193  int qp_block_mask = (1<<(s->ps.sps->log2_ctb_size - s->ps.pps->diff_cu_qp_delta_depth)) - 1;
2194  int x, y, ret;
2195 
2196  lc->cu.x = x0;
2197  lc->cu.y = y0;
2198  lc->cu.pred_mode = MODE_INTRA;
2199  lc->cu.part_mode = PART_2Nx2N;
2200  lc->cu.intra_split_flag = 0;
2201 
2202  SAMPLE_CTB(s->skip_flag, x_cb, y_cb) = 0;
2203  for (x = 0; x < 4; x++)
2204  lc->pu.intra_pred_mode[x] = 1;
2205  if (s->ps.pps->transquant_bypass_enable_flag) {
2207  if (lc->cu.cu_transquant_bypass_flag)
2208  set_deblocking_bypass(s, x0, y0, log2_cb_size);
2209  } else
2210  lc->cu.cu_transquant_bypass_flag = 0;
2211 
2212  if (s->sh.slice_type != HEVC_SLICE_I) {
2213  uint8_t skip_flag = ff_hevc_skip_flag_decode(lc, x0, y0, x_cb, y_cb);
2214 
2215  x = y_cb * min_cb_width + x_cb;
2216  for (y = 0; y < length; y++) {
2217  memset(&s->skip_flag[x], skip_flag, length);
2218  x += min_cb_width;
2219  }
2220  lc->cu.pred_mode = skip_flag ? MODE_SKIP : MODE_INTER;
2221  } else {
2222  x = y_cb * min_cb_width + x_cb;
2223  for (y = 0; y < length; y++) {
2224  memset(&s->skip_flag[x], 0, length);
2225  x += min_cb_width;
2226  }
2227  }
2228 
2229  if (SAMPLE_CTB(s->skip_flag, x_cb, y_cb)) {
2230  hls_prediction_unit(lc, x0, y0, cb_size, cb_size, log2_cb_size, 0, idx);
2231  intra_prediction_unit_default_value(lc, x0, y0, log2_cb_size);
2232 
2233  if (!s->sh.disable_deblocking_filter_flag)
2234  ff_hevc_deblocking_boundary_strengths(lc, x0, y0, log2_cb_size);
2235  } else {
2236  int pcm_flag = 0;
2237 
2238  if (s->sh.slice_type != HEVC_SLICE_I)
2240  if (lc->cu.pred_mode != MODE_INTRA ||
2241  log2_cb_size == s->ps.sps->log2_min_cb_size) {
2242  lc->cu.part_mode = ff_hevc_part_mode_decode(lc, log2_cb_size);
2243  lc->cu.intra_split_flag = lc->cu.part_mode == PART_NxN &&
2244  lc->cu.pred_mode == MODE_INTRA;
2245  }
2246 
2247  if (lc->cu.pred_mode == MODE_INTRA) {
2248  if (lc->cu.part_mode == PART_2Nx2N && s->ps.sps->pcm_enabled_flag &&
2249  log2_cb_size >= s->ps.sps->pcm.log2_min_pcm_cb_size &&
2250  log2_cb_size <= s->ps.sps->pcm.log2_max_pcm_cb_size) {
2251  pcm_flag = ff_hevc_pcm_flag_decode(lc);
2252  }
2253  if (pcm_flag) {
2254  intra_prediction_unit_default_value(lc, x0, y0, log2_cb_size);
2255  ret = hls_pcm_sample(lc, x0, y0, log2_cb_size);
2256  if (s->ps.sps->pcm.loop_filter_disable_flag)
2257  set_deblocking_bypass(s, x0, y0, log2_cb_size);
2258 
2259  if (ret < 0)
2260  return ret;
2261  } else {
2262  intra_prediction_unit(lc, x0, y0, log2_cb_size);
2263  }
2264  } else {
2265  intra_prediction_unit_default_value(lc, x0, y0, log2_cb_size);
2266  switch (lc->cu.part_mode) {
2267  case PART_2Nx2N:
2268  hls_prediction_unit(lc, x0, y0, cb_size, cb_size, log2_cb_size, 0, idx);
2269  break;
2270  case PART_2NxN:
2271  hls_prediction_unit(lc, x0, y0, cb_size, cb_size / 2, log2_cb_size, 0, idx);
2272  hls_prediction_unit(lc, x0, y0 + cb_size / 2, cb_size, cb_size / 2, log2_cb_size, 1, idx);
2273  break;
2274  case PART_Nx2N:
2275  hls_prediction_unit(lc, x0, y0, cb_size / 2, cb_size, log2_cb_size, 0, idx - 1);
2276  hls_prediction_unit(lc, x0 + cb_size / 2, y0, cb_size / 2, cb_size, log2_cb_size, 1, idx - 1);
2277  break;
2278  case PART_2NxnU:
2279  hls_prediction_unit(lc, x0, y0, cb_size, cb_size / 4, log2_cb_size, 0, idx);
2280  hls_prediction_unit(lc, x0, y0 + cb_size / 4, cb_size, cb_size * 3 / 4, log2_cb_size, 1, idx);
2281  break;
2282  case PART_2NxnD:
2283  hls_prediction_unit(lc, x0, y0, cb_size, cb_size * 3 / 4, log2_cb_size, 0, idx);
2284  hls_prediction_unit(lc, x0, y0 + cb_size * 3 / 4, cb_size, cb_size / 4, log2_cb_size, 1, idx);
2285  break;
2286  case PART_nLx2N:
2287  hls_prediction_unit(lc, x0, y0, cb_size / 4, cb_size, log2_cb_size, 0, idx - 2);
2288  hls_prediction_unit(lc, x0 + cb_size / 4, y0, cb_size * 3 / 4, cb_size, log2_cb_size, 1, idx - 2);
2289  break;
2290  case PART_nRx2N:
2291  hls_prediction_unit(lc, x0, y0, cb_size * 3 / 4, cb_size, log2_cb_size, 0, idx - 2);
2292  hls_prediction_unit(lc, x0 + cb_size * 3 / 4, y0, cb_size / 4, cb_size, log2_cb_size, 1, idx - 2);
2293  break;
2294  case PART_NxN:
2295  hls_prediction_unit(lc, x0, y0, cb_size / 2, cb_size / 2, log2_cb_size, 0, idx - 1);
2296  hls_prediction_unit(lc, x0 + cb_size / 2, y0, cb_size / 2, cb_size / 2, log2_cb_size, 1, idx - 1);
2297  hls_prediction_unit(lc, x0, y0 + cb_size / 2, cb_size / 2, cb_size / 2, log2_cb_size, 2, idx - 1);
2298  hls_prediction_unit(lc, x0 + cb_size / 2, y0 + cb_size / 2, cb_size / 2, cb_size / 2, log2_cb_size, 3, idx - 1);
2299  break;
2300  }
2301  }
2302 
2303  if (!pcm_flag) {
2304  int rqt_root_cbf = 1;
2305 
2306  if (lc->cu.pred_mode != MODE_INTRA &&
2307  !(lc->cu.part_mode == PART_2Nx2N && lc->pu.merge_flag)) {
2308  rqt_root_cbf = ff_hevc_no_residual_syntax_flag_decode(lc);
2309  }
2310  if (rqt_root_cbf) {
2311  const static int cbf[2] = { 0 };
2312  lc->cu.max_trafo_depth = lc->cu.pred_mode == MODE_INTRA ?
2313  s->ps.sps->max_transform_hierarchy_depth_intra + lc->cu.intra_split_flag :
2314  s->ps.sps->max_transform_hierarchy_depth_inter;
2315  ret = hls_transform_tree(lc, x0, y0, x0, y0, x0, y0,
2316  log2_cb_size,
2317  log2_cb_size, 0, 0, cbf, cbf);
2318  if (ret < 0)
2319  return ret;
2320  } else {
2321  if (!s->sh.disable_deblocking_filter_flag)
2322  ff_hevc_deblocking_boundary_strengths(lc, x0, y0, log2_cb_size);
2323  }
2324  }
2325  }
2326 
2327  if (s->ps.pps->cu_qp_delta_enabled_flag && lc->tu.is_cu_qp_delta_coded == 0)
2328  ff_hevc_set_qPy(lc, x0, y0, log2_cb_size);
2329 
2330  x = y_cb * min_cb_width + x_cb;
2331  for (y = 0; y < length; y++) {
2332  memset(&s->qp_y_tab[x], lc->qp_y, length);
2333  x += min_cb_width;
2334  }
2335 
2336  if(((x0 + (1<<log2_cb_size)) & qp_block_mask) == 0 &&
2337  ((y0 + (1<<log2_cb_size)) & qp_block_mask) == 0) {
2338  lc->qPy_pred = lc->qp_y;
2339  }
2340 
2341  set_ct_depth(s, x0, y0, log2_cb_size, lc->ct_depth);
2342 
2343  return 0;
2344 }
2345 
2346 static int hls_coding_quadtree(HEVCLocalContext *lc, int x0, int y0,
2347  int log2_cb_size, int cb_depth)
2348 {
2349  const HEVCContext *const s = lc->parent;
2350  const int cb_size = 1 << log2_cb_size;
2351  int ret;
2352  int split_cu;
2353 
2354  lc->ct_depth = cb_depth;
2355  if (x0 + cb_size <= s->ps.sps->width &&
2356  y0 + cb_size <= s->ps.sps->height &&
2357  log2_cb_size > s->ps.sps->log2_min_cb_size) {
2358  split_cu = ff_hevc_split_coding_unit_flag_decode(lc, cb_depth, x0, y0);
2359  } else {
2360  split_cu = (log2_cb_size > s->ps.sps->log2_min_cb_size);
2361  }
2362  if (s->ps.pps->cu_qp_delta_enabled_flag &&
2363  log2_cb_size >= s->ps.sps->log2_ctb_size - s->ps.pps->diff_cu_qp_delta_depth) {
2364  lc->tu.is_cu_qp_delta_coded = 0;
2365  lc->tu.cu_qp_delta = 0;
2366  }
2367 
2368  if (s->sh.cu_chroma_qp_offset_enabled_flag &&
2369  log2_cb_size >= s->ps.sps->log2_ctb_size - s->ps.pps->diff_cu_chroma_qp_offset_depth) {
2371  }
2372 
2373  if (split_cu) {
2374  int qp_block_mask = (1<<(s->ps.sps->log2_ctb_size - s->ps.pps->diff_cu_qp_delta_depth)) - 1;
2375  const int cb_size_split = cb_size >> 1;
2376  const int x1 = x0 + cb_size_split;
2377  const int y1 = y0 + cb_size_split;
2378 
2379  int more_data = 0;
2380 
2381  more_data = hls_coding_quadtree(lc, x0, y0, log2_cb_size - 1, cb_depth + 1);
2382  if (more_data < 0)
2383  return more_data;
2384 
2385  if (more_data && x1 < s->ps.sps->width) {
2386  more_data = hls_coding_quadtree(lc, x1, y0, log2_cb_size - 1, cb_depth + 1);
2387  if (more_data < 0)
2388  return more_data;
2389  }
2390  if (more_data && y1 < s->ps.sps->height) {
2391  more_data = hls_coding_quadtree(lc, x0, y1, log2_cb_size - 1, cb_depth + 1);
2392  if (more_data < 0)
2393  return more_data;
2394  }
2395  if (more_data && x1 < s->ps.sps->width &&
2396  y1 < s->ps.sps->height) {
2397  more_data = hls_coding_quadtree(lc, x1, y1, log2_cb_size - 1, cb_depth + 1);
2398  if (more_data < 0)
2399  return more_data;
2400  }
2401 
2402  if(((x0 + (1<<log2_cb_size)) & qp_block_mask) == 0 &&
2403  ((y0 + (1<<log2_cb_size)) & qp_block_mask) == 0)
2404  lc->qPy_pred = lc->qp_y;
2405 
2406  if (more_data)
2407  return ((x1 + cb_size_split) < s->ps.sps->width ||
2408  (y1 + cb_size_split) < s->ps.sps->height);
2409  else
2410  return 0;
2411  } else {
2412  ret = hls_coding_unit(lc, s, x0, y0, log2_cb_size);
2413  if (ret < 0)
2414  return ret;
2415  if ((!((x0 + cb_size) %
2416  (1 << (s->ps.sps->log2_ctb_size))) ||
2417  (x0 + cb_size >= s->ps.sps->width)) &&
2418  (!((y0 + cb_size) %
2419  (1 << (s->ps.sps->log2_ctb_size))) ||
2420  (y0 + cb_size >= s->ps.sps->height))) {
2421  int end_of_slice_flag = ff_hevc_end_of_slice_flag_decode(lc);
2422  return !end_of_slice_flag;
2423  } else {
2424  return 1;
2425  }
2426  }
2427 
2428  return 0;
2429 }
2430 
2431 static void hls_decode_neighbour(HEVCLocalContext *lc, int x_ctb, int y_ctb,
2432  int ctb_addr_ts)
2433 {
2434  const HEVCContext *const s = lc->parent;
2435  int ctb_size = 1 << s->ps.sps->log2_ctb_size;
2436  int ctb_addr_rs = s->ps.pps->ctb_addr_ts_to_rs[ctb_addr_ts];
2437  int ctb_addr_in_slice = ctb_addr_rs - s->sh.slice_addr;
2438 
2439  s->tab_slice_address[ctb_addr_rs] = s->sh.slice_addr;
2440 
2441  if (s->ps.pps->entropy_coding_sync_enabled_flag) {
2442  if (x_ctb == 0 && (y_ctb & (ctb_size - 1)) == 0)
2443  lc->first_qp_group = 1;
2444  lc->end_of_tiles_x = s->ps.sps->width;
2445  } else if (s->ps.pps->tiles_enabled_flag) {
2446  if (ctb_addr_ts && s->ps.pps->tile_id[ctb_addr_ts] != s->ps.pps->tile_id[ctb_addr_ts - 1]) {
2447  int idxX = s->ps.pps->col_idxX[x_ctb >> s->ps.sps->log2_ctb_size];
2448  lc->end_of_tiles_x = x_ctb + (s->ps.pps->column_width[idxX] << s->ps.sps->log2_ctb_size);
2449  lc->first_qp_group = 1;
2450  }
2451  } else {
2452  lc->end_of_tiles_x = s->ps.sps->width;
2453  }
2454 
2455  lc->end_of_tiles_y = FFMIN(y_ctb + ctb_size, s->ps.sps->height);
2456 
2457  lc->boundary_flags = 0;
2458  if (s->ps.pps->tiles_enabled_flag) {
2459  if (x_ctb > 0 && s->ps.pps->tile_id[ctb_addr_ts] != s->ps.pps->tile_id[s->ps.pps->ctb_addr_rs_to_ts[ctb_addr_rs - 1]])
2461  if (x_ctb > 0 && s->tab_slice_address[ctb_addr_rs] != s->tab_slice_address[ctb_addr_rs - 1])
2463  if (y_ctb > 0 && s->ps.pps->tile_id[ctb_addr_ts] != s->ps.pps->tile_id[s->ps.pps->ctb_addr_rs_to_ts[ctb_addr_rs - s->ps.sps->ctb_width]])
2465  if (y_ctb > 0 && s->tab_slice_address[ctb_addr_rs] != s->tab_slice_address[ctb_addr_rs - s->ps.sps->ctb_width])
2467  } else {
2468  if (ctb_addr_in_slice <= 0)
2470  if (ctb_addr_in_slice < s->ps.sps->ctb_width)
2472  }
2473 
2474  lc->ctb_left_flag = ((x_ctb > 0) && (ctb_addr_in_slice > 0) && !(lc->boundary_flags & BOUNDARY_LEFT_TILE));
2475  lc->ctb_up_flag = ((y_ctb > 0) && (ctb_addr_in_slice >= s->ps.sps->ctb_width) && !(lc->boundary_flags & BOUNDARY_UPPER_TILE));
2476  lc->ctb_up_right_flag = ((y_ctb > 0) && (ctb_addr_in_slice+1 >= s->ps.sps->ctb_width) && (s->ps.pps->tile_id[ctb_addr_ts] == s->ps.pps->tile_id[s->ps.pps->ctb_addr_rs_to_ts[ctb_addr_rs+1 - s->ps.sps->ctb_width]]));
2477  lc->ctb_up_left_flag = ((x_ctb > 0) && (y_ctb > 0) && (ctb_addr_in_slice-1 >= s->ps.sps->ctb_width) && (s->ps.pps->tile_id[ctb_addr_ts] == s->ps.pps->tile_id[s->ps.pps->ctb_addr_rs_to_ts[ctb_addr_rs-1 - s->ps.sps->ctb_width]]));
2478 }
2479 
2480 static int hls_decode_entry(AVCodecContext *avctxt, void *isFilterThread)
2481 {
2482  HEVCContext *s = avctxt->priv_data;
2483  HEVCLocalContext *const lc = s->HEVClc;
2484  int ctb_size = 1 << s->ps.sps->log2_ctb_size;
2485  int more_data = 1;
2486  int x_ctb = 0;
2487  int y_ctb = 0;
2488  int ctb_addr_ts = s->ps.pps->ctb_addr_rs_to_ts[s->sh.slice_ctb_addr_rs];
2489  int ret;
2490 
2491  if (!ctb_addr_ts && s->sh.dependent_slice_segment_flag) {
2492  av_log(s->avctx, AV_LOG_ERROR, "Impossible initial tile.\n");
2493  return AVERROR_INVALIDDATA;
2494  }
2495 
2496  if (s->sh.dependent_slice_segment_flag) {
2497  int prev_rs = s->ps.pps->ctb_addr_ts_to_rs[ctb_addr_ts - 1];
2498  if (s->tab_slice_address[prev_rs] != s->sh.slice_addr) {
2499  av_log(s->avctx, AV_LOG_ERROR, "Previous slice segment missing\n");
2500  return AVERROR_INVALIDDATA;
2501  }
2502  }
2503 
2504  while (more_data && ctb_addr_ts < s->ps.sps->ctb_size) {
2505  int ctb_addr_rs = s->ps.pps->ctb_addr_ts_to_rs[ctb_addr_ts];
2506 
2507  x_ctb = (ctb_addr_rs % ((s->ps.sps->width + ctb_size - 1) >> s->ps.sps->log2_ctb_size)) << s->ps.sps->log2_ctb_size;
2508  y_ctb = (ctb_addr_rs / ((s->ps.sps->width + ctb_size - 1) >> s->ps.sps->log2_ctb_size)) << s->ps.sps->log2_ctb_size;
2509  hls_decode_neighbour(lc, x_ctb, y_ctb, ctb_addr_ts);
2510 
2511  ret = ff_hevc_cabac_init(lc, ctb_addr_ts);
2512  if (ret < 0) {
2513  s->tab_slice_address[ctb_addr_rs] = -1;
2514  return ret;
2515  }
2516 
2517  hls_sao_param(lc, x_ctb >> s->ps.sps->log2_ctb_size, y_ctb >> s->ps.sps->log2_ctb_size);
2518 
2519  s->deblock[ctb_addr_rs].beta_offset = s->sh.beta_offset;
2520  s->deblock[ctb_addr_rs].tc_offset = s->sh.tc_offset;
2521  s->filter_slice_edges[ctb_addr_rs] = s->sh.slice_loop_filter_across_slices_enabled_flag;
2522 
2523  more_data = hls_coding_quadtree(lc, x_ctb, y_ctb, s->ps.sps->log2_ctb_size, 0);
2524  if (more_data < 0) {
2525  s->tab_slice_address[ctb_addr_rs] = -1;
2526  return more_data;
2527  }
2528 
2529 
2530  ctb_addr_ts++;
2531  ff_hevc_save_states(lc, ctb_addr_ts);
2532  ff_hevc_hls_filters(lc, x_ctb, y_ctb, ctb_size);
2533  }
2534 
2535  if (x_ctb + ctb_size >= s->ps.sps->width &&
2536  y_ctb + ctb_size >= s->ps.sps->height)
2537  ff_hevc_hls_filter(lc, x_ctb, y_ctb, ctb_size);
2538 
2539  return ctb_addr_ts;
2540 }
2541 
2543 {
2544  int arg[2];
2545  int ret[2];
2546 
2547  arg[0] = 0;
2548  arg[1] = 1;
2549 
2550  s->avctx->execute(s->avctx, hls_decode_entry, arg, ret , 1, sizeof(int));
2551  return ret[0];
2552 }
2553 static int hls_decode_entry_wpp(AVCodecContext *avctxt, void *hevc_lclist,
2554  int job, int self_id)
2555 {
2556  HEVCLocalContext *lc = ((HEVCLocalContext**)hevc_lclist)[self_id];
2557  const HEVCContext *const s = lc->parent;
2558  int ctb_size = 1 << s->ps.sps->log2_ctb_size;
2559  int more_data = 1;
2560  int ctb_row = job;
2561  int ctb_addr_rs = s->sh.slice_ctb_addr_rs + ctb_row * ((s->ps.sps->width + ctb_size - 1) >> s->ps.sps->log2_ctb_size);
2562  int ctb_addr_ts = s->ps.pps->ctb_addr_rs_to_ts[ctb_addr_rs];
2563  int thread = ctb_row % s->threads_number;
2564  int ret;
2565 
2566  if(ctb_row) {
2567  ret = init_get_bits8(&lc->gb, s->data + s->sh.offset[ctb_row - 1], s->sh.size[ctb_row - 1]);
2568  if (ret < 0)
2569  goto error;
2570  ff_init_cabac_decoder(&lc->cc, s->data + s->sh.offset[(ctb_row)-1], s->sh.size[ctb_row - 1]);
2571  }
2572 
2573  while(more_data && ctb_addr_ts < s->ps.sps->ctb_size) {
2574  int x_ctb = (ctb_addr_rs % s->ps.sps->ctb_width) << s->ps.sps->log2_ctb_size;
2575  int y_ctb = (ctb_addr_rs / s->ps.sps->ctb_width) << s->ps.sps->log2_ctb_size;
2576 
2577  hls_decode_neighbour(lc, x_ctb, y_ctb, ctb_addr_ts);
2578 
2579  ff_thread_await_progress2(s->avctx, ctb_row, thread, SHIFT_CTB_WPP);
2580 
2581  /* atomic_load's prototype requires a pointer to non-const atomic variable
2582  * (due to implementations via mutexes, where reads involve writes).
2583  * Of course, casting const away here is nevertheless safe. */
2584  if (atomic_load((atomic_int*)&s->wpp_err)) {
2585  ff_thread_report_progress2(s->avctx, ctb_row , thread, SHIFT_CTB_WPP);
2586  return 0;
2587  }
2588 
2589  ret = ff_hevc_cabac_init(lc, ctb_addr_ts);
2590  if (ret < 0)
2591  goto error;
2592  hls_sao_param(lc, x_ctb >> s->ps.sps->log2_ctb_size, y_ctb >> s->ps.sps->log2_ctb_size);
2593  more_data = hls_coding_quadtree(lc, x_ctb, y_ctb, s->ps.sps->log2_ctb_size, 0);
2594 
2595  if (more_data < 0) {
2596  ret = more_data;
2597  goto error;
2598  }
2599 
2600  ctb_addr_ts++;
2601 
2602  ff_hevc_save_states(lc, ctb_addr_ts);
2603  ff_thread_report_progress2(s->avctx, ctb_row, thread, 1);
2604  ff_hevc_hls_filters(lc, x_ctb, y_ctb, ctb_size);
2605 
2606  if (!more_data && (x_ctb+ctb_size) < s->ps.sps->width && ctb_row != s->sh.num_entry_point_offsets) {
2607  /* Casting const away here is safe, because it is an atomic operation. */
2608  atomic_store((atomic_int*)&s->wpp_err, 1);
2609  ff_thread_report_progress2(s->avctx, ctb_row ,thread, SHIFT_CTB_WPP);
2610  return 0;
2611  }
2612 
2613  if ((x_ctb+ctb_size) >= s->ps.sps->width && (y_ctb+ctb_size) >= s->ps.sps->height ) {
2614  ff_hevc_hls_filter(lc, x_ctb, y_ctb, ctb_size);
2615  ff_thread_report_progress2(s->avctx, ctb_row , thread, SHIFT_CTB_WPP);
2616  return ctb_addr_ts;
2617  }
2618  ctb_addr_rs = s->ps.pps->ctb_addr_ts_to_rs[ctb_addr_ts];
2619  x_ctb+=ctb_size;
2620 
2621  if(x_ctb >= s->ps.sps->width) {
2622  break;
2623  }
2624  }
2625  ff_thread_report_progress2(s->avctx, ctb_row ,thread, SHIFT_CTB_WPP);
2626 
2627  return 0;
2628 error:
2629  s->tab_slice_address[ctb_addr_rs] = -1;
2630  /* Casting const away here is safe, because it is an atomic operation. */
2631  atomic_store((atomic_int*)&s->wpp_err, 1);
2632  ff_thread_report_progress2(s->avctx, ctb_row ,thread, SHIFT_CTB_WPP);
2633  return ret;
2634 }
2635 
2636 static int hls_slice_data_wpp(HEVCContext *s, const H2645NAL *nal)
2637 {
2638  const uint8_t *data = nal->data;
2639  int length = nal->size;
2640  HEVCLocalContext *lc = s->HEVClc;
2641  int *ret;
2642  int64_t offset;
2643  int64_t startheader, cmpt = 0;
2644  int i, j, res = 0;
2645 
2646  if (s->sh.slice_ctb_addr_rs + s->sh.num_entry_point_offsets * s->ps.sps->ctb_width >= s->ps.sps->ctb_width * s->ps.sps->ctb_height) {
2647  av_log(s->avctx, AV_LOG_ERROR, "WPP ctb addresses are wrong (%d %d %d %d)\n",
2648  s->sh.slice_ctb_addr_rs, s->sh.num_entry_point_offsets,
2649  s->ps.sps->ctb_width, s->ps.sps->ctb_height
2650  );
2651  return AVERROR_INVALIDDATA;
2652  }
2653 
2654  for (i = 1; i < s->threads_number; i++) {
2655  if (s->HEVClcList[i])
2656  continue;
2657  s->HEVClcList[i] = av_mallocz(sizeof(HEVCLocalContext));
2658  if (!s->HEVClcList[i])
2659  return AVERROR(ENOMEM);
2660  s->HEVClcList[i]->logctx = s->avctx;
2661  s->HEVClcList[i]->parent = s;
2662  s->HEVClcList[i]->common_cabac_state = &s->cabac;
2663  }
2664 
2665  offset = (lc->gb.index >> 3);
2666 
2667  for (j = 0, cmpt = 0, startheader = offset + s->sh.entry_point_offset[0]; j < nal->skipped_bytes; j++) {
2668  if (nal->skipped_bytes_pos[j] >= offset && nal->skipped_bytes_pos[j] < startheader) {
2669  startheader--;
2670  cmpt++;
2671  }
2672  }
2673 
2674  for (i = 1; i < s->sh.num_entry_point_offsets; i++) {
2675  offset += (s->sh.entry_point_offset[i - 1] - cmpt);
2676  for (j = 0, cmpt = 0, startheader = offset
2677  + s->sh.entry_point_offset[i]; j < nal->skipped_bytes; j++) {
2678  if (nal->skipped_bytes_pos[j] >= offset && nal->skipped_bytes_pos[j] < startheader) {
2679  startheader--;
2680  cmpt++;
2681  }
2682  }
2683  s->sh.size[i - 1] = s->sh.entry_point_offset[i] - cmpt;
2684  s->sh.offset[i - 1] = offset;
2685 
2686  }
2687  if (s->sh.num_entry_point_offsets != 0) {
2688  offset += s->sh.entry_point_offset[s->sh.num_entry_point_offsets - 1] - cmpt;
2689  if (length < offset) {
2690  av_log(s->avctx, AV_LOG_ERROR, "entry_point_offset table is corrupted\n");
2691  return AVERROR_INVALIDDATA;
2692  }
2693  s->sh.size[s->sh.num_entry_point_offsets - 1] = length - offset;
2694  s->sh.offset[s->sh.num_entry_point_offsets - 1] = offset;
2695 
2696  }
2697  s->data = data;
2698 
2699  for (i = 1; i < s->threads_number; i++) {
2700  s->HEVClcList[i]->first_qp_group = 1;
2701  s->HEVClcList[i]->qp_y = s->HEVClc->qp_y;
2702  }
2703 
2704  atomic_store(&s->wpp_err, 0);
2705  res = ff_slice_thread_allocz_entries(s->avctx, s->sh.num_entry_point_offsets + 1);
2706  if (res < 0)
2707  return res;
2708 
2709  ret = av_calloc(s->sh.num_entry_point_offsets + 1, sizeof(*ret));
2710  if (!ret)
2711  return AVERROR(ENOMEM);
2712 
2713  if (s->ps.pps->entropy_coding_sync_enabled_flag)
2714  s->avctx->execute2(s->avctx, hls_decode_entry_wpp, s->HEVClcList, ret, s->sh.num_entry_point_offsets + 1);
2715 
2716  for (i = 0; i <= s->sh.num_entry_point_offsets; i++)
2717  res += ret[i];
2718 
2719  av_free(ret);
2720  return res;
2721 }
2722 
2724 {
2725  AVFrame *out = s->ref->frame;
2726  int ret;
2727 
2728  // Decrement the mastering display flag when IRAP frame has no_rasl_output_flag=1
2729  // so the side data persists for the entire coded video sequence.
2730  if (s->sei.mastering_display.present > 0 &&
2731  IS_IRAP(s) && s->no_rasl_output_flag) {
2732  s->sei.mastering_display.present--;
2733  }
2734  if (s->sei.mastering_display.present) {
2735  // HEVC uses a g,b,r ordering, which we convert to a more natural r,g,b
2736  const int mapping[3] = {2, 0, 1};
2737  const int chroma_den = 50000;
2738  const int luma_den = 10000;
2739  int i;
2740  AVMasteringDisplayMetadata *metadata =
2742  if (!metadata)
2743  return AVERROR(ENOMEM);
2744 
2745  for (i = 0; i < 3; i++) {
2746  const int j = mapping[i];
2747  metadata->display_primaries[i][0].num = s->sei.mastering_display.display_primaries[j][0];
2748  metadata->display_primaries[i][0].den = chroma_den;
2749  metadata->display_primaries[i][1].num = s->sei.mastering_display.display_primaries[j][1];
2750  metadata->display_primaries[i][1].den = chroma_den;
2751  }
2752  metadata->white_point[0].num = s->sei.mastering_display.white_point[0];
2753  metadata->white_point[0].den = chroma_den;
2754  metadata->white_point[1].num = s->sei.mastering_display.white_point[1];
2755  metadata->white_point[1].den = chroma_den;
2756 
2757  metadata->max_luminance.num = s->sei.mastering_display.max_luminance;
2758  metadata->max_luminance.den = luma_den;
2759  metadata->min_luminance.num = s->sei.mastering_display.min_luminance;
2760  metadata->min_luminance.den = luma_den;
2761  metadata->has_luminance = 1;
2762  metadata->has_primaries = 1;
2763 
2764  av_log(s->avctx, AV_LOG_DEBUG, "Mastering Display Metadata:\n");
2765  av_log(s->avctx, AV_LOG_DEBUG,
2766  "r(%5.4f,%5.4f) g(%5.4f,%5.4f) b(%5.4f %5.4f) wp(%5.4f, %5.4f)\n",
2767  av_q2d(metadata->display_primaries[0][0]),
2768  av_q2d(metadata->display_primaries[0][1]),
2769  av_q2d(metadata->display_primaries[1][0]),
2770  av_q2d(metadata->display_primaries[1][1]),
2771  av_q2d(metadata->display_primaries[2][0]),
2772  av_q2d(metadata->display_primaries[2][1]),
2773  av_q2d(metadata->white_point[0]), av_q2d(metadata->white_point[1]));
2774  av_log(s->avctx, AV_LOG_DEBUG,
2775  "min_luminance=%f, max_luminance=%f\n",
2776  av_q2d(metadata->min_luminance), av_q2d(metadata->max_luminance));
2777  }
2778  // Decrement the mastering display flag when IRAP frame has no_rasl_output_flag=1
2779  // so the side data persists for the entire coded video sequence.
2780  if (s->sei.content_light.present > 0 &&
2781  IS_IRAP(s) && s->no_rasl_output_flag) {
2782  s->sei.content_light.present--;
2783  }
2784  if (s->sei.content_light.present) {
2785  AVContentLightMetadata *metadata =
2787  if (!metadata)
2788  return AVERROR(ENOMEM);
2789  metadata->MaxCLL = s->sei.content_light.max_content_light_level;
2790  metadata->MaxFALL = s->sei.content_light.max_pic_average_light_level;
2791 
2792  av_log(s->avctx, AV_LOG_DEBUG, "Content Light Level Metadata:\n");
2793  av_log(s->avctx, AV_LOG_DEBUG, "MaxCLL=%d, MaxFALL=%d\n",
2794  metadata->MaxCLL, metadata->MaxFALL);
2795  }
2796 
2797  ret = ff_h2645_sei_to_frame(out, &s->sei.common, AV_CODEC_ID_HEVC, NULL,
2798  &s->ps.sps->vui.common,
2799  s->ps.sps->bit_depth, s->ps.sps->bit_depth_chroma,
2800  s->ref->poc /* no poc_offset in HEVC */);
2801  if (ret < 0)
2802  return ret;
2803 
2804  if (s->sei.timecode.present) {
2805  uint32_t *tc_sd;
2806  char tcbuf[AV_TIMECODE_STR_SIZE];
2808  sizeof(uint32_t) * 4);
2809  if (!tcside)
2810  return AVERROR(ENOMEM);
2811 
2812  tc_sd = (uint32_t*)tcside->data;
2813  tc_sd[0] = s->sei.timecode.num_clock_ts;
2814 
2815  for (int i = 0; i < tc_sd[0]; i++) {
2816  int drop = s->sei.timecode.cnt_dropped_flag[i];
2817  int hh = s->sei.timecode.hours_value[i];
2818  int mm = s->sei.timecode.minutes_value[i];
2819  int ss = s->sei.timecode.seconds_value[i];
2820  int ff = s->sei.timecode.n_frames[i];
2821 
2822  tc_sd[i + 1] = av_timecode_get_smpte(s->avctx->framerate, drop, hh, mm, ss, ff);
2823  av_timecode_make_smpte_tc_string2(tcbuf, s->avctx->framerate, tc_sd[i + 1], 0, 0);
2824  av_dict_set(&out->metadata, "timecode", tcbuf, 0);
2825  }
2826 
2827  s->sei.timecode.num_clock_ts = 0;
2828  }
2829 
2830  if (s->sei.common.dynamic_hdr_plus.info) {
2831  AVBufferRef *info_ref = av_buffer_ref(s->sei.common.dynamic_hdr_plus.info);
2832  if (!info_ref)
2833  return AVERROR(ENOMEM);
2834 
2836  av_buffer_unref(&info_ref);
2837  return AVERROR(ENOMEM);
2838  }
2839  }
2840 
2841  if (s->rpu_buf) {
2843  if (!rpu)
2844  return AVERROR(ENOMEM);
2845 
2846  s->rpu_buf = NULL;
2847  }
2848 
2849  if ((ret = ff_dovi_attach_side_data(&s->dovi_ctx, out)) < 0)
2850  return ret;
2851 
2852  if (s->sei.common.dynamic_hdr_vivid.info) {
2853  AVBufferRef *info_ref = av_buffer_ref(s->sei.common.dynamic_hdr_vivid.info);
2854  if (!info_ref)
2855  return AVERROR(ENOMEM);
2856 
2858  av_buffer_unref(&info_ref);
2859  return AVERROR(ENOMEM);
2860  }
2861  }
2862 
2863  return 0;
2864 }
2865 
2867 {
2868  HEVCLocalContext *lc = s->HEVClc;
2869  int pic_size_in_ctb = ((s->ps.sps->width >> s->ps.sps->log2_min_cb_size) + 1) *
2870  ((s->ps.sps->height >> s->ps.sps->log2_min_cb_size) + 1);
2871  int ret;
2872 
2873  memset(s->horizontal_bs, 0, s->bs_width * s->bs_height);
2874  memset(s->vertical_bs, 0, s->bs_width * s->bs_height);
2875  memset(s->cbf_luma, 0, s->ps.sps->min_tb_width * s->ps.sps->min_tb_height);
2876  memset(s->is_pcm, 0, (s->ps.sps->min_pu_width + 1) * (s->ps.sps->min_pu_height + 1));
2877  memset(s->tab_slice_address, -1, pic_size_in_ctb * sizeof(*s->tab_slice_address));
2878 
2879  s->is_decoded = 0;
2880  s->first_nal_type = s->nal_unit_type;
2881 
2882  s->no_rasl_output_flag = IS_IDR(s) || IS_BLA(s) || (s->nal_unit_type == HEVC_NAL_CRA_NUT && s->last_eos);
2883 
2884  if (s->ps.pps->tiles_enabled_flag)
2885  lc->end_of_tiles_x = s->ps.pps->column_width[0] << s->ps.sps->log2_ctb_size;
2886 
2887  ret = ff_hevc_set_new_ref(s, &s->frame, s->poc);
2888  if (ret < 0)
2889  goto fail;
2890 
2891  ret = ff_hevc_frame_rps(s);
2892  if (ret < 0) {
2893  av_log(s->avctx, AV_LOG_ERROR, "Error constructing the frame RPS.\n");
2894  goto fail;
2895  }
2896 
2897  s->ref->frame->key_frame = IS_IRAP(s);
2898 
2899  s->ref->needs_fg = s->sei.common.film_grain_characteristics.present &&
2900  !(s->avctx->export_side_data & AV_CODEC_EXPORT_DATA_FILM_GRAIN) &&
2901  !s->avctx->hwaccel;
2902 
2903  if (s->ref->needs_fg) {
2904  s->ref->frame_grain->format = s->ref->frame->format;
2905  s->ref->frame_grain->width = s->ref->frame->width;
2906  s->ref->frame_grain->height = s->ref->frame->height;
2907  if ((ret = ff_thread_get_buffer(s->avctx, s->ref->frame_grain, 0)) < 0)
2908  goto fail;
2909  }
2910 
2911  ret = set_side_data(s);
2912  if (ret < 0)
2913  goto fail;
2914 
2915  s->frame->pict_type = 3 - s->sh.slice_type;
2916 
2917  if (!IS_IRAP(s))
2919 
2920  av_frame_unref(s->output_frame);
2921  ret = ff_hevc_output_frame(s, s->output_frame, 0);
2922  if (ret < 0)
2923  goto fail;
2924 
2925  if (!s->avctx->hwaccel)
2926  ff_thread_finish_setup(s->avctx);
2927 
2928  return 0;
2929 
2930 fail:
2931  if (s->ref)
2932  ff_hevc_unref_frame(s, s->ref, ~0);
2933  s->ref = NULL;
2934  return ret;
2935 }
2936 
2938 {
2939  HEVCFrame *out = s->ref;
2940  const AVFrameSideData *sd;
2941  int ret;
2942 
2943  if (out->needs_fg) {
2945  av_assert0(out->frame_grain->buf[0] && sd);
2946  ret = ff_h274_apply_film_grain(out->frame_grain, out->frame, &s->h274db,
2947  (AVFilmGrainParams *) sd->data);
2948 
2949  if (ret < 0) {
2950  av_log(s->avctx, AV_LOG_WARNING, "Failed synthesizing film "
2951  "grain, ignoring: %s\n", av_err2str(ret));
2952  out->needs_fg = 0;
2953  }
2954  }
2955 
2956  return 0;
2957 }
2958 
2959 static int decode_nal_unit(HEVCContext *s, const H2645NAL *nal)
2960 {
2961  HEVCLocalContext *lc = s->HEVClc;
2962  GetBitContext *gb = &lc->gb;
2963  int ctb_addr_ts, ret;
2964 
2965  *gb = nal->gb;
2966  s->nal_unit_type = nal->type;
2967  s->temporal_id = nal->temporal_id;
2968 
2969  switch (s->nal_unit_type) {
2970  case HEVC_NAL_VPS:
2971  if (s->avctx->hwaccel && s->avctx->hwaccel->decode_params) {
2972  ret = s->avctx->hwaccel->decode_params(s->avctx,
2973  nal->type,
2974  nal->raw_data,
2975  nal->raw_size);
2976  if (ret < 0)
2977  goto fail;
2978  }
2979  ret = ff_hevc_decode_nal_vps(gb, s->avctx, &s->ps);
2980  if (ret < 0)
2981  goto fail;
2982  break;
2983  case HEVC_NAL_SPS:
2984  if (s->avctx->hwaccel && s->avctx->hwaccel->decode_params) {
2985  ret = s->avctx->hwaccel->decode_params(s->avctx,
2986  nal->type,
2987  nal->raw_data,
2988  nal->raw_size);
2989  if (ret < 0)
2990  goto fail;
2991  }
2992  ret = ff_hevc_decode_nal_sps(gb, s->avctx, &s->ps,
2993  s->apply_defdispwin);
2994  if (ret < 0)
2995  goto fail;
2996  break;
2997  case HEVC_NAL_PPS:
2998  if (s->avctx->hwaccel && s->avctx->hwaccel->decode_params) {
2999  ret = s->avctx->hwaccel->decode_params(s->avctx,
3000  nal->type,
3001  nal->raw_data,
3002  nal->raw_size);
3003  if (ret < 0)
3004  goto fail;
3005  }
3006  ret = ff_hevc_decode_nal_pps(gb, s->avctx, &s->ps);
3007  if (ret < 0)
3008  goto fail;
3009  break;
3010  case HEVC_NAL_SEI_PREFIX:
3011  case HEVC_NAL_SEI_SUFFIX:
3012  if (s->avctx->hwaccel && s->avctx->hwaccel->decode_params) {
3013  ret = s->avctx->hwaccel->decode_params(s->avctx,
3014  nal->type,
3015  nal->raw_data,
3016  nal->raw_size);
3017  if (ret < 0)
3018  goto fail;
3019  }
3020  ret = ff_hevc_decode_nal_sei(gb, s->avctx, &s->sei, &s->ps, s->nal_unit_type);
3021  if (ret < 0)
3022  goto fail;
3023  break;
3024  case HEVC_NAL_TRAIL_R:
3025  case HEVC_NAL_TRAIL_N:
3026  case HEVC_NAL_TSA_N:
3027  case HEVC_NAL_TSA_R:
3028  case HEVC_NAL_STSA_N:
3029  case HEVC_NAL_STSA_R:
3030  case HEVC_NAL_BLA_W_LP:
3031  case HEVC_NAL_BLA_W_RADL:
3032  case HEVC_NAL_BLA_N_LP:
3033  case HEVC_NAL_IDR_W_RADL:
3034  case HEVC_NAL_IDR_N_LP:
3035  case HEVC_NAL_CRA_NUT:
3036  case HEVC_NAL_RADL_N:
3037  case HEVC_NAL_RADL_R:
3038  case HEVC_NAL_RASL_N:
3039  case HEVC_NAL_RASL_R:
3040  ret = hls_slice_header(s);
3041  if (ret < 0)
3042  return ret;
3043  if (ret == 1) {
3045  goto fail;
3046  }
3047 
3048 
3049  if (
3050  (s->avctx->skip_frame >= AVDISCARD_BIDIR && s->sh.slice_type == HEVC_SLICE_B) ||
3051  (s->avctx->skip_frame >= AVDISCARD_NONINTRA && s->sh.slice_type != HEVC_SLICE_I) ||
3052  (s->avctx->skip_frame >= AVDISCARD_NONKEY && !IS_IRAP(s))) {
3053  break;
3054  }
3055 
3056  if (s->sh.first_slice_in_pic_flag) {
3057  if (s->max_ra == INT_MAX) {
3058  if (s->nal_unit_type == HEVC_NAL_CRA_NUT || IS_BLA(s)) {
3059  s->max_ra = s->poc;
3060  } else {
3061  if (IS_IDR(s))
3062  s->max_ra = INT_MIN;
3063  }
3064  }
3065 
3066  if ((s->nal_unit_type == HEVC_NAL_RASL_R || s->nal_unit_type == HEVC_NAL_RASL_N) &&
3067  s->poc <= s->max_ra) {
3068  s->is_decoded = 0;
3069  break;
3070  } else {
3071  if (s->nal_unit_type == HEVC_NAL_RASL_R && s->poc > s->max_ra)
3072  s->max_ra = INT_MIN;
3073  }
3074 
3075  s->overlap ++;
3076  ret = hevc_frame_start(s);
3077  if (ret < 0)
3078  return ret;
3079  } else if (!s->ref) {
3080  av_log(s->avctx, AV_LOG_ERROR, "First slice in a frame missing.\n");
3081  goto fail;
3082  }
3083 
3084  if (s->nal_unit_type != s->first_nal_type) {
3085  av_log(s->avctx, AV_LOG_ERROR,
3086  "Non-matching NAL types of the VCL NALUs: %d %d\n",
3087  s->first_nal_type, s->nal_unit_type);
3088  return AVERROR_INVALIDDATA;
3089  }
3090 
3091  if (!s->sh.dependent_slice_segment_flag &&
3092  s->sh.slice_type != HEVC_SLICE_I) {
3093  ret = ff_hevc_slice_rpl(s);
3094  if (ret < 0) {
3095  av_log(s->avctx, AV_LOG_WARNING,
3096  "Error constructing the reference lists for the current slice.\n");
3097  goto fail;
3098  }
3099  }
3100 
3101  if (s->sh.first_slice_in_pic_flag && s->avctx->hwaccel) {
3102  ret = s->avctx->hwaccel->start_frame(s->avctx, NULL, 0);
3103  if (ret < 0)
3104  goto fail;
3105  }
3106 
3107  if (s->avctx->hwaccel) {
3108  ret = s->avctx->hwaccel->decode_slice(s->avctx, nal->raw_data, nal->raw_size);
3109  if (ret < 0)
3110  goto fail;
3111  } else {
3112  if (s->threads_number > 1 && s->sh.num_entry_point_offsets > 0)
3113  ctb_addr_ts = hls_slice_data_wpp(s, nal);
3114  else
3115  ctb_addr_ts = hls_slice_data(s);
3116  if (ctb_addr_ts >= (s->ps.sps->ctb_width * s->ps.sps->ctb_height)) {
3117  ret = hevc_frame_end(s);
3118  if (ret < 0)
3119  goto fail;
3120  s->is_decoded = 1;
3121  }
3122 
3123  if (ctb_addr_ts < 0) {
3124  ret = ctb_addr_ts;
3125  goto fail;
3126  }
3127  }
3128  break;
3129  case HEVC_NAL_EOS_NUT:
3130  case HEVC_NAL_EOB_NUT:
3131  s->seq_decode = (s->seq_decode + 1) & HEVC_SEQUENCE_COUNTER_MASK;
3132  s->max_ra = INT_MAX;
3133  break;
3134  case HEVC_NAL_AUD:
3135  case HEVC_NAL_FD_NUT:
3136  case HEVC_NAL_UNSPEC62:
3137  break;
3138  default:
3139  av_log(s->avctx, AV_LOG_INFO,
3140  "Skipping NAL unit %d\n", s->nal_unit_type);
3141  }
3142 
3143  return 0;
3144 fail:
3145  if (s->avctx->err_recognition & AV_EF_EXPLODE)
3146  return ret;
3147  return 0;
3148 }
3149 
3150 static int decode_nal_units(HEVCContext *s, const uint8_t *buf, int length)
3151 {
3152  int i, ret = 0;
3153  int eos_at_start = 1;
3154 
3155  s->ref = NULL;
3156  s->last_eos = s->eos;
3157  s->eos = 0;
3158  s->overlap = 0;
3159 
3160  /* split the input packet into NAL units, so we know the upper bound on the
3161  * number of slices in the frame */
3162  ret = ff_h2645_packet_split(&s->pkt, buf, length, s->avctx, s->is_nalff,
3163  s->nal_length_size, s->avctx->codec_id, 1, 0);
3164  if (ret < 0) {
3165  av_log(s->avctx, AV_LOG_ERROR,
3166  "Error splitting the input into NAL units.\n");
3167  return ret;
3168  }
3169 
3170  for (i = 0; i < s->pkt.nb_nals; i++) {
3171  if (s->pkt.nals[i].type == HEVC_NAL_EOB_NUT ||
3172  s->pkt.nals[i].type == HEVC_NAL_EOS_NUT) {
3173  if (eos_at_start) {
3174  s->last_eos = 1;
3175  } else {
3176  s->eos = 1;
3177  }
3178  } else {
3179  eos_at_start = 0;
3180  }
3181  }
3182 
3183  /*
3184  * Check for RPU delimiter.
3185  *
3186  * Dolby Vision RPUs masquerade as unregistered NALs of type 62.
3187  *
3188  * We have to do this check here an create the rpu buffer, since RPUs are appended
3189  * to the end of an AU; they are the last non-EOB/EOS NAL in the AU.
3190  */
3191  if (s->pkt.nb_nals > 1 && s->pkt.nals[s->pkt.nb_nals - 1].type == HEVC_NAL_UNSPEC62 &&
3192  s->pkt.nals[s->pkt.nb_nals - 1].size > 2 && !s->pkt.nals[s->pkt.nb_nals - 1].nuh_layer_id
3193  && !s->pkt.nals[s->pkt.nb_nals - 1].temporal_id) {
3194  H2645NAL *nal = &s->pkt.nals[s->pkt.nb_nals - 1];
3195  if (s->rpu_buf) {
3196  av_buffer_unref(&s->rpu_buf);
3197  av_log(s->avctx, AV_LOG_WARNING, "Multiple Dolby Vision RPUs found in one AU. Skipping previous.\n");
3198  }
3199 
3200  s->rpu_buf = av_buffer_alloc(nal->raw_size - 2);
3201  if (!s->rpu_buf)
3202  return AVERROR(ENOMEM);
3203  memcpy(s->rpu_buf->data, nal->raw_data + 2, nal->raw_size - 2);
3204 
3205  ret = ff_dovi_rpu_parse(&s->dovi_ctx, nal->data + 2, nal->size - 2);
3206  if (ret < 0) {
3207  av_buffer_unref(&s->rpu_buf);
3208  av_log(s->avctx, AV_LOG_WARNING, "Error parsing DOVI NAL unit.\n");
3209  /* ignore */
3210  }
3211  }
3212 
3213  /* decode the NAL units */
3214  for (i = 0; i < s->pkt.nb_nals; i++) {
3215  H2645NAL *nal = &s->pkt.nals[i];
3216 
3217  if (s->avctx->skip_frame >= AVDISCARD_ALL ||
3218  (s->avctx->skip_frame >= AVDISCARD_NONREF
3219  && ff_hevc_nal_is_nonref(nal->type)) || nal->nuh_layer_id > 0)
3220  continue;
3221 
3222  ret = decode_nal_unit(s, nal);
3223  if (ret >= 0 && s->overlap > 2)
3225  if (ret < 0) {
3226  av_log(s->avctx, AV_LOG_WARNING,
3227  "Error parsing NAL unit #%d.\n", i);
3228  goto fail;
3229  }
3230  }
3231 
3232 fail:
3233  if (s->ref && s->threads_type == FF_THREAD_FRAME)
3234  ff_thread_report_progress(&s->ref->tf, INT_MAX, 0);
3235 
3236  return ret;
3237 }
3238 
3240 {
3242  char msg_buf[4 * (50 + 2 * 2 * 16 /* MD5-size */)];
3243  int pixel_shift;
3244  int err = 0;
3245  int i, j;
3246 
3247  if (!desc)
3248  return AVERROR(EINVAL);
3249 
3250  pixel_shift = desc->comp[0].depth > 8;
3251 
3252  /* the checksums are LE, so we have to byteswap for >8bpp formats
3253  * on BE arches */
3254 #if HAVE_BIGENDIAN
3255  if (pixel_shift && !s->checksum_buf) {
3256  av_fast_malloc(&s->checksum_buf, &s->checksum_buf_size,
3257  FFMAX3(frame->linesize[0], frame->linesize[1],
3258  frame->linesize[2]));
3259  if (!s->checksum_buf)
3260  return AVERROR(ENOMEM);
3261  }
3262 #endif
3263 
3264  msg_buf[0] = '\0';
3265  for (i = 0; frame->data[i]; i++) {
3266  int width = s->avctx->coded_width;
3267  int height = s->avctx->coded_height;
3268  int w = (i == 1 || i == 2) ? (width >> desc->log2_chroma_w) : width;
3269  int h = (i == 1 || i == 2) ? (height >> desc->log2_chroma_h) : height;
3270  uint8_t md5[16];
3271 
3272  av_md5_init(s->md5_ctx);
3273  for (j = 0; j < h; j++) {
3274  const uint8_t *src = frame->data[i] + j * frame->linesize[i];
3275 #if HAVE_BIGENDIAN
3276  if (pixel_shift) {
3277  s->bdsp.bswap16_buf((uint16_t *) s->checksum_buf,
3278  (const uint16_t *) src, w);
3279  src = s->checksum_buf;
3280  }
3281 #endif
3282  av_md5_update(s->md5_ctx, src, w << pixel_shift);
3283  }
3284  av_md5_final(s->md5_ctx, md5);
3285 
3286 #define MD5_PRI "%016" PRIx64 "%016" PRIx64
3287 #define MD5_PRI_ARG(buf) AV_RB64(buf), AV_RB64((const uint8_t*)(buf) + 8)
3288 
3289  if (!memcmp(md5, s->sei.picture_hash.md5[i], 16)) {
3290  av_strlcatf(msg_buf, sizeof(msg_buf),
3291  "plane %d - correct " MD5_PRI "; ",
3292  i, MD5_PRI_ARG(md5));
3293  } else {
3294  av_strlcatf(msg_buf, sizeof(msg_buf),
3295  "mismatching checksum of plane %d - " MD5_PRI " != " MD5_PRI "; ",
3296  i, MD5_PRI_ARG(md5), MD5_PRI_ARG(s->sei.picture_hash.md5[i]));
3297  err = AVERROR_INVALIDDATA;
3298  }
3299  }
3300 
3301  av_log(s->avctx, err < 0 ? AV_LOG_ERROR : AV_LOG_DEBUG,
3302  "Verifying checksum for frame with POC %d: %s\n",
3303  s->poc, msg_buf);
3304 
3305  return err;
3306 }
3307 
3308 static int hevc_decode_extradata(HEVCContext *s, uint8_t *buf, int length, int first)
3309 {
3310  int ret, i;
3311 
3312  ret = ff_hevc_decode_extradata(buf, length, &s->ps, &s->sei, &s->is_nalff,
3313  &s->nal_length_size, s->avctx->err_recognition,
3314  s->apply_defdispwin, s->avctx);
3315  if (ret < 0)
3316  return ret;
3317 
3318  /* export stream parameters from the first SPS */
3319  for (i = 0; i < FF_ARRAY_ELEMS(s->ps.sps_list); i++) {
3320  if (first && s->ps.sps_list[i]) {
3321  const HEVCSPS *sps = (const HEVCSPS*)s->ps.sps_list[i]->data;
3323  break;
3324  }
3325  }
3326 
3327  /* export stream parameters from SEI */
3329  if (ret < 0)
3330  return ret;
3331 
3332  return 0;
3333 }
3334 
3335 static int hevc_decode_frame(AVCodecContext *avctx, AVFrame *rframe,
3336  int *got_output, AVPacket *avpkt)
3337 {
3338  int ret;
3339  uint8_t *sd;
3340  size_t sd_size;
3341  HEVCContext *s = avctx->priv_data;
3342 
3343  if (!avpkt->size) {
3344  ret = ff_hevc_output_frame(s, rframe, 1);
3345  if (ret < 0)
3346  return ret;
3347 
3348  *got_output = ret;
3349  return 0;
3350  }
3351 
3352  sd = av_packet_get_side_data(avpkt, AV_PKT_DATA_NEW_EXTRADATA, &sd_size);
3353  if (sd && sd_size > 0) {
3354  ret = hevc_decode_extradata(s, sd, sd_size, 0);
3355  if (ret < 0)
3356  return ret;
3357  }
3358 
3359  sd = av_packet_get_side_data(avpkt, AV_PKT_DATA_DOVI_CONF, &sd_size);
3360  if (sd && sd_size > 0)
3362 
3363  s->ref = NULL;
3364  ret = decode_nal_units(s, avpkt->data, avpkt->size);
3365  if (ret < 0)
3366  return ret;
3367 
3368  if (avctx->hwaccel) {
3369  if (s->ref && (ret = avctx->hwaccel->end_frame(avctx)) < 0) {
3370  av_log(avctx, AV_LOG_ERROR,
3371  "hardware accelerator failed to decode picture\n");
3372  ff_hevc_unref_frame(s, s->ref, ~0);
3373  return ret;
3374  }
3375  } else {
3376  /* verify the SEI checksum */
3377  if (avctx->err_recognition & AV_EF_CRCCHECK && s->ref && s->is_decoded &&
3378  s->sei.picture_hash.is_md5) {
3379  ret = verify_md5(s, s->ref->frame);
3380  if (ret < 0 && avctx->err_recognition & AV_EF_EXPLODE) {
3381  ff_hevc_unref_frame(s, s->ref, ~0);
3382  return ret;
3383  }
3384  }
3385  }
3386  s->sei.picture_hash.is_md5 = 0;
3387 
3388  if (s->is_decoded) {
3389  av_log(avctx, AV_LOG_DEBUG, "Decoded frame with POC %d.\n", s->poc);
3390  s->is_decoded = 0;
3391  }
3392 
3393  if (s->output_frame->buf[0]) {
3394  av_frame_move_ref(rframe, s->output_frame);
3395  *got_output = 1;
3396  }
3397 
3398  return avpkt->size;
3399 }
3400 
3402 {
3403  int ret;
3404 
3405  ret = ff_thread_ref_frame(&dst->tf, &src->tf);
3406  if (ret < 0)
3407  return ret;
3408 
3409  if (src->needs_fg) {
3410  ret = av_frame_ref(dst->frame_grain, src->frame_grain);
3411  if (ret < 0)
3412  return ret;
3413  dst->needs_fg = 1;
3414  }
3415 
3416  dst->tab_mvf_buf = av_buffer_ref(src->tab_mvf_buf);
3417  if (!dst->tab_mvf_buf)
3418  goto fail;
3419  dst->tab_mvf = src->tab_mvf;
3420 
3421  dst->rpl_tab_buf = av_buffer_ref(src->rpl_tab_buf);
3422  if (!dst->rpl_tab_buf)
3423  goto fail;
3424  dst->rpl_tab = src->rpl_tab;
3425 
3426  dst->rpl_buf = av_buffer_ref(src->rpl_buf);
3427  if (!dst->rpl_buf)
3428  goto fail;
3429 
3430  dst->poc = src->poc;
3431  dst->ctb_count = src->ctb_count;
3432  dst->flags = src->flags;
3433  dst->sequence = src->sequence;
3434 
3435  if (src->hwaccel_picture_private) {
3436  dst->hwaccel_priv_buf = av_buffer_ref(src->hwaccel_priv_buf);
3437  if (!dst->hwaccel_priv_buf)
3438  goto fail;
3440  }
3441 
3442  return 0;
3443 fail:
3444  ff_hevc_unref_frame(s, dst, ~0);
3445  return AVERROR(ENOMEM);
3446 }
3447 
3449 {
3450  HEVCContext *s = avctx->priv_data;
3451  int i;
3452 
3453  pic_arrays_free(s);
3454 
3455  ff_dovi_ctx_unref(&s->dovi_ctx);
3456  av_buffer_unref(&s->rpu_buf);
3457 
3458  av_freep(&s->md5_ctx);
3459 
3460  for (i = 0; i < 3; i++) {
3461  av_freep(&s->sao_pixel_buffer_h[i]);
3462  av_freep(&s->sao_pixel_buffer_v[i]);
3463  }
3464  av_frame_free(&s->output_frame);
3465 
3466  for (i = 0; i < FF_ARRAY_ELEMS(s->DPB); i++) {
3467  ff_hevc_unref_frame(s, &s->DPB[i], ~0);
3468  av_frame_free(&s->DPB[i].frame);
3469  av_frame_free(&s->DPB[i].frame_grain);
3470  }
3471 
3472  ff_hevc_ps_uninit(&s->ps);
3473 
3474  av_freep(&s->sh.entry_point_offset);
3475  av_freep(&s->sh.offset);
3476  av_freep(&s->sh.size);
3477 
3478  if (s->HEVClcList) {
3479  for (i = 1; i < s->threads_number; i++) {
3480  av_freep(&s->HEVClcList[i]);
3481  }
3482  }
3483  av_freep(&s->HEVClc);
3484  av_freep(&s->HEVClcList);
3485 
3486  ff_h2645_packet_uninit(&s->pkt);
3487 
3488  ff_hevc_reset_sei(&s->sei);
3489 
3490  return 0;
3491 }
3492 
3494 {
3495  HEVCContext *s = avctx->priv_data;
3496  int i;
3497 
3498  s->avctx = avctx;
3499 
3500  s->HEVClc = av_mallocz(sizeof(HEVCLocalContext));
3501  s->HEVClcList = av_mallocz(sizeof(HEVCLocalContext*) * s->threads_number);
3502  if (!s->HEVClc || !s->HEVClcList)
3503  return AVERROR(ENOMEM);
3504  s->HEVClc->parent = s;
3505  s->HEVClc->logctx = avctx;
3506  s->HEVClc->common_cabac_state = &s->cabac;
3507  s->HEVClcList[0] = s->HEVClc;
3508 
3509  s->output_frame = av_frame_alloc();
3510  if (!s->output_frame)
3511  return AVERROR(ENOMEM);
3512 
3513  for (i = 0; i < FF_ARRAY_ELEMS(s->DPB); i++) {
3514  s->DPB[i].frame = av_frame_alloc();
3515  if (!s->DPB[i].frame)
3516  return AVERROR(ENOMEM);
3517  s->DPB[i].tf.f = s->DPB[i].frame;
3518 
3519  s->DPB[i].frame_grain = av_frame_alloc();
3520  if (!s->DPB[i].frame_grain)
3521  return AVERROR(ENOMEM);
3522  }
3523 
3524  s->max_ra = INT_MAX;
3525 
3526  s->md5_ctx = av_md5_alloc();
3527  if (!s->md5_ctx)
3528  return AVERROR(ENOMEM);
3529 
3530  ff_bswapdsp_init(&s->bdsp);
3531 
3532  s->dovi_ctx.logctx = avctx;
3533  s->eos = 0;
3534 
3535  ff_hevc_reset_sei(&s->sei);
3536 
3537  return 0;
3538 }
3539 
3540 #if HAVE_THREADS
3541 static int hevc_update_thread_context(AVCodecContext *dst,
3542  const AVCodecContext *src)
3543 {
3544  HEVCContext *s = dst->priv_data;
3545  HEVCContext *s0 = src->priv_data;
3546  int i, ret;
3547 
3548  for (i = 0; i < FF_ARRAY_ELEMS(s->DPB); i++) {
3549  ff_hevc_unref_frame(s, &s->DPB[i], ~0);
3550  if (s0->DPB[i].frame->buf[0]) {
3551  ret = hevc_ref_frame(s, &s->DPB[i], &s0->DPB[i]);
3552  if (ret < 0)
3553  return ret;
3554  }
3555  }
3556 
3557  if (s->ps.sps != s0->ps.sps)
3558  s->ps.sps = NULL;
3559  for (i = 0; i < FF_ARRAY_ELEMS(s->ps.vps_list); i++) {
3560  ret = av_buffer_replace(&s->ps.vps_list[i], s0->ps.vps_list[i]);
3561  if (ret < 0)
3562  return ret;
3563  }
3564 
3565  for (i = 0; i < FF_ARRAY_ELEMS(s->ps.sps_list); i++) {
3566  ret = av_buffer_replace(&s->ps.sps_list[i], s0->ps.sps_list[i]);
3567  if (ret < 0)
3568  return ret;
3569  }
3570 
3571  for (i = 0; i < FF_ARRAY_ELEMS(s->ps.pps_list); i++) {
3572  ret = av_buffer_replace(&s->ps.pps_list[i], s0->ps.pps_list[i]);
3573  if (ret < 0)
3574  return ret;
3575  }
3576 
3577  if (s->ps.sps != s0->ps.sps)
3578  if ((ret = set_sps(s, s0->ps.sps, src->pix_fmt)) < 0)
3579  return ret;
3580 
3581  s->seq_decode = s0->seq_decode;
3582  s->seq_output = s0->seq_output;
3583  s->pocTid0 = s0->pocTid0;
3584  s->max_ra = s0->max_ra;
3585  s->eos = s0->eos;
3586  s->no_rasl_output_flag = s0->no_rasl_output_flag;
3587 
3588  s->is_nalff = s0->is_nalff;
3589  s->nal_length_size = s0->nal_length_size;
3590 
3591  s->threads_number = s0->threads_number;
3592  s->threads_type = s0->threads_type;
3593 
3594  if (s0->eos) {
3595  s->seq_decode = (s->seq_decode + 1) & HEVC_SEQUENCE_COUNTER_MASK;
3596  s->max_ra = INT_MAX;
3597  }
3598 
3599  ret = ff_h2645_sei_ctx_replace(&s->sei.common, &s0->sei.common);
3600  if (ret < 0)
3601  return ret;
3602 
3603  ret = av_buffer_replace(&s->sei.common.dynamic_hdr_plus.info,
3604  s0->sei.common.dynamic_hdr_plus.info);
3605  if (ret < 0)
3606  return ret;
3607 
3608  ret = av_buffer_replace(&s->rpu_buf, s0->rpu_buf);
3609  if (ret < 0)
3610  return ret;
3611 
3612  ret = ff_dovi_ctx_replace(&s->dovi_ctx, &s0->dovi_ctx);
3613  if (ret < 0)
3614  return ret;
3615 
3616  ret = av_buffer_replace(&s->sei.common.dynamic_hdr_vivid.info,
3617  s0->sei.common.dynamic_hdr_vivid.info);
3618  if (ret < 0)
3619  return ret;
3620 
3621  s->sei.common.frame_packing = s0->sei.common.frame_packing;
3622  s->sei.common.display_orientation = s0->sei.common.display_orientation;
3623  s->sei.common.alternative_transfer = s0->sei.common.alternative_transfer;
3624  s->sei.mastering_display = s0->sei.mastering_display;
3625  s->sei.content_light = s0->sei.content_light;
3626 
3628  if (ret < 0)
3629  return ret;
3630 
3631  return 0;
3632 }
3633 #endif
3634 
3636 {
3637  HEVCContext *s = avctx->priv_data;
3638  int ret;
3639 
3640  if (avctx->active_thread_type & FF_THREAD_SLICE) {
3641  s->threads_number = avctx->thread_count;
3643  if (ret < 0)
3644  return ret;
3645  } else
3646  s->threads_number = 1;
3647 
3648  if((avctx->active_thread_type & FF_THREAD_FRAME) && avctx->thread_count > 1)
3649  s->threads_type = FF_THREAD_FRAME;
3650  else
3651  s->threads_type = FF_THREAD_SLICE;
3652 
3653  ret = hevc_init_context(avctx);
3654  if (ret < 0)
3655  return ret;
3656 
3657  s->enable_parallel_tiles = 0;
3658  s->sei.picture_timing.picture_struct = 0;
3659  s->eos = 1;
3660 
3661  atomic_init(&s->wpp_err, 0);
3662 
3663  if (!avctx->internal->is_copy) {
3664  if (avctx->extradata_size > 0 && avctx->extradata) {
3665  ret = hevc_decode_extradata(s, avctx->extradata, avctx->extradata_size, 1);
3666  if (ret < 0) {
3667  return ret;
3668  }
3669  }
3670  }
3671 
3672  return 0;
3673 }
3674 
3676 {
3677  HEVCContext *s = avctx->priv_data;
3679  ff_hevc_reset_sei(&s->sei);
3680  ff_dovi_ctx_flush(&s->dovi_ctx);
3681  av_buffer_unref(&s->rpu_buf);
3682  s->max_ra = INT_MAX;
3683  s->eos = 1;
3684 }
3685 
3686 #define OFFSET(x) offsetof(HEVCContext, x)
3687 #define PAR (AV_OPT_FLAG_DECODING_PARAM | AV_OPT_FLAG_VIDEO_PARAM)
3688 
3689 static const AVOption options[] = {
3690  { "apply_defdispwin", "Apply default display window from VUI", OFFSET(apply_defdispwin),
3691  AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, PAR },
3692  { "strict-displaywin", "stricly apply default display window size", OFFSET(apply_defdispwin),
3693  AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, PAR },
3694  { NULL },
3695 };
3696 
3697 static const AVClass hevc_decoder_class = {
3698  .class_name = "HEVC decoder",
3699  .item_name = av_default_item_name,
3700  .option = options,
3701  .version = LIBAVUTIL_VERSION_INT,
3702 };
3703 
3705  .p.name = "hevc",
3706  CODEC_LONG_NAME("HEVC (High Efficiency Video Coding)"),
3707  .p.type = AVMEDIA_TYPE_VIDEO,
3708  .p.id = AV_CODEC_ID_HEVC,
3709  .priv_data_size = sizeof(HEVCContext),
3710  .p.priv_class = &hevc_decoder_class,
3711  .init = hevc_decode_init,
3712  .close = hevc_decode_free,
3714  .flush = hevc_decode_flush,
3715  UPDATE_THREAD_CONTEXT(hevc_update_thread_context),
3716  .p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DELAY |
3718  .caps_internal = FF_CODEC_CAP_EXPORTS_CROPPING |
3720  .p.profiles = NULL_IF_CONFIG_SMALL(ff_hevc_profiles),
3721  .hw_configs = (const AVCodecHWConfigInternal *const []) {
3722 #if CONFIG_HEVC_DXVA2_HWACCEL
3723  HWACCEL_DXVA2(hevc),
3724 #endif
3725 #if CONFIG_HEVC_D3D11VA_HWACCEL
3726  HWACCEL_D3D11VA(hevc),
3727 #endif
3728 #if CONFIG_HEVC_D3D11VA2_HWACCEL
3729  HWACCEL_D3D11VA2(hevc),
3730 #endif
3731 #if CONFIG_HEVC_NVDEC_HWACCEL
3732  HWACCEL_NVDEC(hevc),
3733 #endif
3734 #if CONFIG_HEVC_VAAPI_HWACCEL
3735  HWACCEL_VAAPI(hevc),
3736 #endif
3737 #if CONFIG_HEVC_VDPAU_HWACCEL
3738  HWACCEL_VDPAU(hevc),
3739 #endif
3740 #if CONFIG_HEVC_VIDEOTOOLBOX_HWACCEL
3741  HWACCEL_VIDEOTOOLBOX(hevc),
3742 #endif
3743  NULL
3744  },
3745 };
error
static void error(const char *err)
Definition: target_bsf_fuzzer.c:31
ff_hevc_sao_offset_sign_decode
int ff_hevc_sao_offset_sign_decode(HEVCLocalContext *lc)
Definition: hevc_cabac.c:606
verify_md5
static int verify_md5(HEVCContext *s, AVFrame *frame)
Definition: hevcdec.c:3239
hwconfig.h
AVMasteringDisplayMetadata::has_primaries
int has_primaries
Flag indicating whether the display primaries (and white point) are set.
Definition: mastering_display_metadata.h:62
MD5_PRI
#define MD5_PRI
HEVC_NAL_RADL_N
@ HEVC_NAL_RADL_N
Definition: hevc.h:35
AVCodecContext::hwaccel
const struct AVHWAccel * hwaccel
Hardware accelerator in use.
Definition: avcodec.h:1405
SliceHeader::beta_offset
int beta_offset
beta_offset_div2 * 2
Definition: hevcdec.h:300
bswapdsp.h
L1
F H1 F F H1 F F F F H1<-F-------F-------F v v v H2 H3 H2 ^ ^ ^ F-------F-------F-> H1<-F-------F-------F|||||||||F H1 F|||||||||F H1 Funavailable fullpel samples(outside the picture for example) shall be equalto the closest available fullpel sampleSmaller pel interpolation:--------------------------if diag_mc is set then points which lie on a line between 2 vertically, horizontally or diagonally adjacent halfpel points shall be interpolatedlinearly with rounding to nearest and halfway values rounded up.points which lie on 2 diagonals at the same time should only use the onediagonal not containing the fullpel point F--> O q O<--h1-> O q O<--F v \/v \/v O O O O O O O|/|\|q q q q q|/|\|O O O O O O O ^/\ ^/\ ^ h2--> O q O<--h3-> O q O<--h2 v \/v \/v O O O O O O O|\|/|q q q q q|\|/|O O O O O O O ^/\ ^/\ ^ F--> O q O<--h1-> O q O<--Fthe remaining points shall be bilinearly interpolated from theup to 4 surrounding halfpel and fullpel points, again rounding should be tonearest and halfway values rounded upcompliant Snow decoders MUST support 1-1/8 pel luma and 1/2-1/16 pel chromainterpolation at leastOverlapped block motion compensation:-------------------------------------FIXMELL band prediction:===================Each sample in the LL0 subband is predicted by the median of the left, top andleft+top-topleft samples, samples outside the subband shall be considered tobe 0. To reverse this prediction in the decoder apply the following.for(y=0;y< height;y++){ for(x=0;x< width;x++){ sample[y][x]+=median(sample[y-1][x], sample[y][x-1], sample[y-1][x]+sample[y][x-1]-sample[y-1][x-1]);}}sample[-1][ *]=sample[ *][-1]=0;width, height here are the width and height of the LL0 subband not of the finalvideoDequantization:===============FIXMEWavelet Transform:==================Snow supports 2 wavelet transforms, the symmetric biorthogonal 5/3 integertransform and an integer approximation of the symmetric biorthogonal 9/7daubechies wavelet.2D IDWT(inverse discrete wavelet transform) --------------------------------------------The 2D IDWT applies a 2D filter recursively, each time combining the4 lowest frequency subbands into a single subband until only 1 subbandremains.The 2D filter is done by first applying a 1D filter in the vertical directionand then applying it in the horizontal one. --------------- --------------- --------------- ---------------|LL0|HL0|||||||||||||---+---|HL1||L0|H0|HL1||LL1|HL1|||||LH0|HH0|||||||||||||-------+-------|-> L1 H1 LH1 HH1 LH1 HH1 LH1 HH1 L1
Definition: snow.txt:554
av_buffer_pool_init
AVBufferPool * av_buffer_pool_init(size_t size, AVBufferRef *(*alloc)(size_t size))
Allocate and initialize a buffer pool.
Definition: buffer.c:280
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:186
AV_TIMECODE_STR_SIZE
#define AV_TIMECODE_STR_SIZE
Definition: timecode.h:33
HEVCLocalContext
Definition: hevcdec.h:432
AV_PIX_FMT_CUDA
@ AV_PIX_FMT_CUDA
HW acceleration through CUDA.
Definition: pixfmt.h:253
HEVCFrame::flags
uint8_t flags
A combination of HEVC_FRAME_FLAG_*.
Definition: hevcdec.h:429
AVMasteringDisplayMetadata::max_luminance
AVRational max_luminance
Max luminance of mastering display (cd/m^2).
Definition: mastering_display_metadata.h:57
HWACCEL_MAX
#define HWACCEL_MAX
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
HEVCFrame::tf
ThreadFrame tf
Definition: hevcdec.h:404
ff_hevc_hls_residual_coding
void ff_hevc_hls_residual_coding(HEVCLocalContext *lc, int x0, int y0, int log2_trafo_size, enum ScanType scan_idx, int c_idx)
Definition: hevc_cabac.c:1039
ff_hevc_skip_flag_decode
int ff_hevc_skip_flag_decode(HEVCLocalContext *lc, int x0, int y0, int x_cb, int y_cb)
Definition: hevc_cabac.c:628
HEVCFrame::hwaccel_priv_buf
AVBufferRef * hwaccel_priv_buf
Definition: hevcdec.h:417
AV_EF_EXPLODE
#define AV_EF_EXPLODE
abort decoding on minor error detection
Definition: defs.h:51
ff_hevc_cu_chroma_qp_offset_idx
int ff_hevc_cu_chroma_qp_offset_idx(HEVCLocalContext *lc)
Definition: hevc_cabac.c:681
av_clip
#define av_clip
Definition: common.h:95
atomic_store
#define atomic_store(object, desired)
Definition: stdatomic.h:85
ff_hevc_pcm_flag_decode
int ff_hevc_pcm_flag_decode(HEVCLocalContext *lc)
Definition: hevc_cabac.c:755
set_deblocking_bypass
static void set_deblocking_bypass(const HEVCContext *s, int x0, int y0, int log2_cb_size)
Definition: hevcdec.c:1310
FF_CODEC_CAP_INIT_CLEANUP
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: codec_internal.h:42
get_bits_left
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:664
ff_hevc_pred_init
void ff_hevc_pred_init(HEVCPredContext *hpc, int bit_depth)
Definition: hevcpred.c:43
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
ff_dovi_ctx_unref
void ff_dovi_ctx_unref(DOVIContext *s)
Completely reset a DOVIContext, preserving only logctx.
Definition: dovi_rpu.c:43
chroma_mc_uni
static void chroma_mc_uni(HEVCLocalContext *lc, uint8_t *dst0, ptrdiff_t dststride, const uint8_t *src0, ptrdiff_t srcstride, int reflist, int x_off, int y_off, int block_w, int block_h, const struct MvField *current_mv, int chroma_weight, int chroma_offset)
8.5.3.2.2.2 Chroma sample uniprediction interpolation process
Definition: hevcdec.c:1658
hevc_decode_flush
static void hevc_decode_flush(AVCodecContext *avctx)
Definition: hevcdec.c:3675
AVCodecContext::colorspace
enum AVColorSpace colorspace
YUV colorspace type.
Definition: avcodec.h:1002
PART_NxN
@ PART_NxN
Definition: hevcdec.h:145
decode_nal_unit
static int decode_nal_unit(HEVCContext *s, const H2645NAL *nal)
Definition: hevcdec.c:2959
out
FILE * out
Definition: movenc.c:54
ff_thread_get_format
#define ff_thread_get_format
Definition: thread.h:65
av_frame_get_side_data
AVFrameSideData * av_frame_get_side_data(const AVFrame *frame, enum AVFrameSideDataType type)
Definition: frame.c:682
av_frame_new_side_data
AVFrameSideData * av_frame_new_side_data(AVFrame *frame, enum AVFrameSideDataType type, size_t size)
Add a new side data to a frame.
Definition: frame.c:670
SAO_BAND
@ SAO_BAND
Definition: hevcdec.h:211
ff_hevc_profiles
const AVProfile ff_hevc_profiles[]
Definition: profiles.c:83
ff_hevc_pred_mode_decode
int ff_hevc_pred_mode_decode(HEVCLocalContext *lc)
Definition: hevc_cabac.c:692
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2888
AVBufferRef::data
uint8_t * data
The data buffer.
Definition: buffer.h:90
av_clip_uintp2
#define av_clip_uintp2
Definition: common.h:119
ff_h2645_sei_to_frame
int ff_h2645_sei_to_frame(AVFrame *frame, H2645SEI *sei, enum AVCodecID codec_id, AVCodecContext *avctx, const H2645VUI *vui, unsigned bit_depth_luma, unsigned bit_depth_chroma, int seed)
Definition: h2645_sei.c:459
AVMasteringDisplayMetadata::display_primaries
AVRational display_primaries[3][2]
CIE 1931 xy chromaticity coords of color primaries (r, g, b order).
Definition: mastering_display_metadata.h:42
src1
const pixel * src1
Definition: h264pred_template.c:421
AVMasteringDisplayMetadata::has_luminance
int has_luminance
Flag indicating whether the luminance (min_ and max_) have been set.
Definition: mastering_display_metadata.h:67
set_ct_depth
static av_always_inline void set_ct_depth(const HEVCContext *s, int x0, int y0, int log2_cb_size, int ct_depth)
Definition: hevcdec.c:2076
HEVC_MAX_REFS
@ HEVC_MAX_REFS
Definition: hevc.h:119
AVCodecContext::err_recognition
int err_recognition
Error recognition; may misdetect some more or less valid parts as errors.
Definition: avcodec.h:1382
get_bits_long
static unsigned int get_bits_long(GetBitContext *s, int n)
Read 0-32 bits.
Definition: get_bits.h:411
HEVCLocalContext::ctb_up_flag
uint8_t ctb_up_flag
Definition: hevcdec.h:465
HEVCFrame::needs_fg
int needs_fg
Definition: hevcdec.h:405
mv
static const int8_t mv[256][2]
Definition: 4xm.c:80
SliceHeader::num_entry_point_offsets
int num_entry_point_offsets
Definition: hevcdec.h:308
HEVC_NAL_STSA_N
@ HEVC_NAL_STSA_N
Definition: hevc.h:33
HEVCFrame::frame_grain
AVFrame * frame_grain
Definition: hevcdec.h:403
AV_FRAME_DATA_FILM_GRAIN_PARAMS
@ AV_FRAME_DATA_FILM_GRAIN_PARAMS
Film grain parameters for a frame, described by AVFilmGrainParams.
Definition: frame.h:184
PART_2NxnU
@ PART_2NxnU
Definition: hevcdec.h:146
ff_hevc_luma_mv_mvp_mode
void ff_hevc_luma_mv_mvp_mode(HEVCLocalContext *lc, int x0, int y0, int nPbW, int nPbH, int log2_cb_size, int part_idx, int merge_idx, MvField *mv, int mvp_lx_flag, int LX)
Definition: hevc_mvs.c:583
AV_FRAME_DATA_S12M_TIMECODE
@ AV_FRAME_DATA_S12M_TIMECODE
Timecode which conforms to SMPTE ST 12-1.
Definition: frame.h:152
av_mod_uintp2
#define av_mod_uintp2
Definition: common.h:122
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:99
AVContentLightMetadata::MaxCLL
unsigned MaxCLL
Max content light level (cd/m^2).
Definition: mastering_display_metadata.h:102
luma_intra_pred_mode
static int luma_intra_pred_mode(HEVCLocalContext *lc, int x0, int y0, int pu_size, int prev_intra_luma_pred_flag)
8.4.1
Definition: hevcdec.c:1996
H2645NAL::nuh_layer_id
int nuh_layer_id
Definition: h2645_parse.h:67
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:330
pixdesc.h
HEVCFrame::tab_mvf
MvField * tab_mvf
Definition: hevcdec.h:406
AVCodecContext::color_trc
enum AVColorTransferCharacteristic color_trc
Color Transfer Characteristic.
Definition: avcodec.h:995
TransformUnit::cu_qp_delta
int cu_qp_delta
Definition: hevcdec.h:373
HEVC_NAL_TSA_N
@ HEVC_NAL_TSA_N
Definition: hevc.h:31
ff_hevc_cu_transquant_bypass_flag_decode
int ff_hevc_cu_transquant_bypass_flag_decode(HEVCLocalContext *lc)
Definition: hevc_cabac.c:623
w
uint8_t w
Definition: llviddspenc.c:38
HWACCEL_DXVA2
#define HWACCEL_DXVA2(codec)
Definition: hwconfig.h:67
AVCOL_RANGE_JPEG
@ AVCOL_RANGE_JPEG
Full range content.
Definition: pixfmt.h:661
HEVCFrame::hwaccel_picture_private
void * hwaccel_picture_private
Definition: hevcdec.h:418
HEVC_MAX_PPS_COUNT
@ HEVC_MAX_PPS_COUNT
Definition: hevc.h:114
internal.h
AVPacket::data
uint8_t * data
Definition: packet.h:374
PAR
#define PAR
Definition: hevcdec.c:3687
INTRA_DC
@ INTRA_DC
Definition: hevcdec.h:173
AVOption
AVOption.
Definition: opt.h:251
HWACCEL_D3D11VA2
#define HWACCEL_D3D11VA2(codec)
Definition: hwconfig.h:69
ff_h2645_packet_uninit
void ff_h2645_packet_uninit(H2645Packet *pkt)
Free all the allocated memory in the packet.
Definition: h2645_parse.c:528
AVCOL_TRC_UNSPECIFIED
@ AVCOL_TRC_UNSPECIFIED
Definition: pixfmt.h:561
hevc_decode_free
static av_cold int hevc_decode_free(AVCodecContext *avctx)
Definition: hevcdec.c:3448
data
const char data[16]
Definition: mxf.c:146
Mv::y
int16_t y
vertical component of motion vector
Definition: hevcdec.h:344
AV_FRAME_DATA_DOVI_RPU_BUFFER
@ AV_FRAME_DATA_DOVI_RPU_BUFFER
Dolby Vision RPU raw data, suitable for passing to x265 or other libraries.
Definition: frame.h:197
AV_PIX_FMT_YUV420P10
#define AV_PIX_FMT_YUV420P10
Definition: pixfmt.h:459
SAO_EDGE
@ SAO_EDGE
Definition: hevcdec.h:212
atomic_int
intptr_t atomic_int
Definition: stdatomic.h:55
SliceHeader::slice_temporal_mvp_enabled_flag
uint8_t slice_temporal_mvp_enabled_flag
Definition: hevcdec.h:280
MvField::mv
Mv mv[2]
Definition: hevcdec.h:348
AV_PIX_FMT_D3D11VA_VLD
@ AV_PIX_FMT_D3D11VA_VLD
HW decoding through Direct3D11 via old API, Picture.data[3] contains a ID3D11VideoDecoderOutputView p...
Definition: pixfmt.h:247
TransformUnit::is_cu_qp_delta_coded
uint8_t is_cu_qp_delta_coded
Definition: hevcdec.h:381
FFCodec
Definition: codec_internal.h:127
HEVC_NAL_RASL_N
@ HEVC_NAL_RASL_N
Definition: hevc.h:37
ff_hevc_intra_chroma_pred_mode_decode
int ff_hevc_intra_chroma_pred_mode_decode(HEVCLocalContext *lc)
Definition: hevc_cabac.c:783
HEVC_NAL_STSA_R
@ HEVC_NAL_STSA_R
Definition: hevc.h:34
MODE_INTRA
@ MODE_INTRA
Definition: hevcdec.h:154
HEVC_NAL_BLA_W_RADL
@ HEVC_NAL_BLA_W_RADL
Definition: hevc.h:46
SliceHeader::slice_loop_filter_across_slices_enabled_flag
uint8_t slice_loop_filter_across_slices_enabled_flag
Definition: hevcdec.h:289
SAOParams::offset_sign
int offset_sign[3][4]
sao_offset_sign
Definition: hevcdsp.h:36
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
av_buffer_ref
AVBufferRef * av_buffer_ref(const AVBufferRef *buf)
Create a new reference to an AVBuffer.
Definition: buffer.c:103
export_stream_params
static void export_stream_params(HEVCContext *s, const HEVCSPS *sps)
Definition: hevcdec.c:325
HEVCLocalContext::ctb_up_left_flag
uint8_t ctb_up_left_flag
Definition: hevcdec.h:467
ff_dovi_ctx_replace
int ff_dovi_ctx_replace(DOVIContext *s, const DOVIContext *s0)
Definition: dovi_rpu.c:64
H2645NAL::temporal_id
int temporal_id
HEVC only, nuh_temporal_id_plus_1 - 1.
Definition: h2645_parse.h:62
av_timecode_get_smpte
uint32_t av_timecode_get_smpte(AVRational rate, int drop, int hh, int mm, int ss, int ff)
Convert sei info to SMPTE 12M binary representation.
Definition: timecode.c:69
RefPicList
Definition: hevcdec.h:241
av_strlcatf
size_t av_strlcatf(char *dst, size_t size, const char *fmt,...)
Definition: avstring.c:104
init_get_bits
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
Definition: get_bits.h:493
thread.h
ff_thread_await_progress
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before ff_thread_await_progress() has been called on them. reget_buffer() and buffer age optimizations no longer work. *The contents of buffers must not be written to after ff_thread_report_progress() has been called on them. This includes draw_edges(). Porting codecs to frame threading
OFFSET
#define OFFSET(x)
Definition: hevcdec.c:3686
PF_INTRA
@ PF_INTRA
Definition: hevcdec.h:165
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:351
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:30
ff_hevc_cu_qp_delta_sign_flag
int ff_hevc_cu_qp_delta_sign_flag(HEVCLocalContext *lc)
Definition: hevc_cabac.c:671
hls_decode_neighbour
static void hls_decode_neighbour(HEVCLocalContext *lc, int x_ctb, int y_ctb, int ctb_addr_ts)
Definition: hevcdec.c:2431
MODE_SKIP
@ MODE_SKIP
Definition: hevcdec.h:155
HEVCLocalContext::end_of_tiles_x
int end_of_tiles_x
Definition: hevcdec.h:468
AVContentLightMetadata
Content light level needed by to transmit HDR over HDMI (CTA-861.3).
Definition: mastering_display_metadata.h:98
CodingUnit::x
int x
Definition: hevcdec.h:330
skip_bits
static void skip_bits(GetBitContext *s, int n)
Definition: get_bits.h:371
BOUNDARY_LEFT_TILE
#define BOUNDARY_LEFT_TILE
Definition: hevcdec.h:482
AVCodecContext::framerate
AVRational framerate
Definition: avcodec.h:1750
golomb.h
exp golomb vlc stuff
AVCodecInternal::is_copy
int is_copy
When using frame-threaded decoding, this field is set for the first worker thread (e....
Definition: internal.h:57
PART_2Nx2N
@ PART_2Nx2N
Definition: hevcdec.h:142
get_bits
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:325
SET_SAO
#define SET_SAO(elem, value)
Definition: hevcdec.c:1001
HEVCLocalContext::ctb_up_right_flag
uint8_t ctb_up_right_flag
Definition: hevcdec.h:466
FFCodec::p
AVCodec p
The public AVCodec.
Definition: codec_internal.h:131
ff_hevc_clear_refs
void ff_hevc_clear_refs(HEVCContext *s)
Mark all frames in DPB as unused for reference.
Definition: hevc_refs.c:68
PRED_BI
@ PRED_BI
Definition: hevcdec.h:161
ff_hevc_log2_res_scale_abs
int ff_hevc_log2_res_scale_abs(HEVCLocalContext *lc, int idx)
Definition: hevc_cabac.c:909
ff_hevc_hls_mvd_coding
void ff_hevc_hls_mvd_coding(HEVCLocalContext *lc, int x0, int y0, int log2_cb_size)
Definition: hevc_cabac.c:1549
luma_mc_uni
static void luma_mc_uni(HEVCLocalContext *lc, uint8_t *dst, ptrdiff_t dststride, const AVFrame *ref, const Mv *mv, int x_off, int y_off, int block_w, int block_h, int luma_weight, int luma_offset)
8.5.3.2.2.1 Luma sample unidirectional interpolation process
Definition: hevcdec.c:1505
av_ceil_log2
#define av_ceil_log2
Definition: common.h:92
fail
#define fail()
Definition: checkasm.h:134
PredictionUnit::intra_pred_mode_c
uint8_t intra_pred_mode_c[4]
Definition: hevcdec.h:368
AVCodecContext::thread_count
int thread_count
thread count is used to decide how many independent tasks should be passed to execute()
Definition: avcodec.h:1502
md5
struct AVMD5 * md5
Definition: movenc.c:56
InterPredIdc
InterPredIdc
Definition: hevcdec.h:158
MODE_INTER
@ MODE_INTER
Definition: hevcdec.h:153
ff_hevc_hls_filter
void ff_hevc_hls_filter(HEVCLocalContext *lc, int x, int y, int ctb_size)
Definition: hevc_filter.c:851
timecode.h
GetBitContext
Definition: get_bits.h:107
HEVCLocalContext::pu
PredictionUnit pu
Definition: hevcdec.h:478
ff_thread_get_buffer
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call have so the codec calls ff_thread_report set FF_CODEC_CAP_ALLOCATE_PROGRESS in AVCodec caps_internal and use ff_thread_get_buffer() to allocate frames. The frames must then be freed with ff_thread_release_buffer(). Otherwise decode directly into the user-supplied frames. Call ff_thread_report_progress() after some part of the current picture has decoded. A good place to put this is where draw_horiz_band() is called - add this if it isn 't called anywhere
ff_hevc_cu_chroma_qp_offset_flag
int ff_hevc_cu_chroma_qp_offset_flag(HEVCLocalContext *lc)
Definition: hevc_cabac.c:676
decode_lt_rps
static int decode_lt_rps(HEVCContext *s, LongTermRPS *rps, GetBitContext *gb)
Definition: hevcdec.c:268
TransformUnit::res_scale_val
int res_scale_val
Definition: hevcdec.h:375
SliceHeader::short_term_ref_pic_set_size
int short_term_ref_pic_set_size
Definition: hevcdec.h:271
hevc_decoder_class
static const AVClass hevc_decoder_class
Definition: hevcdec.c:3697
val
static double val(void *priv, double ch)
Definition: aeval.c:77
HWACCEL_VDPAU
#define HWACCEL_VDPAU(codec)
Definition: hwconfig.h:75
ff_videodsp_init
av_cold void ff_videodsp_init(VideoDSPContext *ctx, int bpc)
Definition: videodsp.c:39
ff_hevc_output_frame
int ff_hevc_output_frame(HEVCContext *s, AVFrame *out, int flush)
Find next frame in output order and put a reference to it in frame.
Definition: hevc_refs.c:186
AVCodecContext::coded_height
int coded_height
Definition: avcodec.h:613
SliceHeader::long_term_ref_pic_set_size
int long_term_ref_pic_set_size
Definition: hevcdec.h:274
ss
#define ss(width, name, subs,...)
Definition: cbs_vp9.c:260
CTB
#define CTB(tab, x, y)
Definition: hevcdec.c:999
av_reduce
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
Definition: rational.c:35
ff_hevc_decode_nal_sei
int ff_hevc_decode_nal_sei(GetBitContext *gb, void *logctx, HEVCSEI *s, const HEVCParamSets *ps, enum HEVCNALUnitType type)
Definition: hevc_sei.c:278
AVRational::num
int num
Numerator.
Definition: rational.h:59
intra_prediction_unit
static void intra_prediction_unit(HEVCLocalContext *lc, int x0, int y0, int log2_cb_size)
Definition: hevcdec.c:2093
HEVC_NAL_UNSPEC62
@ HEVC_NAL_UNSPEC62
Definition: hevc.h:91
ff_hevc_deblocking_boundary_strengths
void ff_hevc_deblocking_boundary_strengths(HEVCLocalContext *lc, int x0, int y0, int log2_trafo_size)
Definition: hevc_filter.c:723
SliceHeader::slice_segment_addr
unsigned int slice_segment_addr
address (in raster order) of the first block in the current slice
Definition: hevcdec.h:256
hevc_parse.h
MvField::ref_idx
int8_t ref_idx[2]
Definition: hevcdec.h:349
SAOParams::eo_class
int eo_class[3]
sao_eo_class
Definition: hevcdsp.h:40
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:87
AV_PIX_FMT_YUV444P10
#define AV_PIX_FMT_YUV444P10
Definition: pixfmt.h:462
hevc_luma_mv_mvp_mode
static void hevc_luma_mv_mvp_mode(HEVCLocalContext *lc, int x0, int y0, int nPbW, int nPbH, int log2_cb_size, int part_idx, int merge_idx, MvField *mv)
Definition: hevcdec.c:1825
ff_thread_report_progress2
void ff_thread_report_progress2(AVCodecContext *avctx, int field, int thread, int n)
Definition: pthread_slice.c:210
first
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But first
Definition: rate_distortion.txt:12
AVCodecContext::color_primaries
enum AVColorPrimaries color_primaries
Chromaticity coordinates of the source primaries.
Definition: avcodec.h:988
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
QPEL_EXTRA_AFTER
#define QPEL_EXTRA_AFTER
Definition: hevcdec.h:64
HEVC_NAL_BLA_N_LP
@ HEVC_NAL_BLA_N_LP
Definition: hevc.h:47
SAOParams::type_idx
uint8_t type_idx[3]
sao_type_idx
Definition: hevcdsp.h:44
film_grain_params.h
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
av_cold
#define av_cold
Definition: attributes.h:90
TransformUnit::intra_pred_mode
int intra_pred_mode
Definition: hevcdec.h:378
init_get_bits8
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:524
HEVC_NAL_RADL_R
@ HEVC_NAL_RADL_R
Definition: hevc.h:36
hls_prediction_unit
static void hls_prediction_unit(HEVCLocalContext *lc, int x0, int y0, int nPbW, int nPbH, int log2_cb_size, int partIdx, int idx)
Definition: hevcdec.c:1870
ff_thread_report_progress
void ff_thread_report_progress(ThreadFrame *f, int n, int field)
Notify later decoding threads when part of their reference picture is ready.
Definition: pthread_frame.c:544
SliceHeader::cabac_init_flag
uint8_t cabac_init_flag
Definition: hevcdec.h:287
H2645NAL::size
int size
Definition: h2645_parse.h:36
AVCodecContext::extradata_size
int extradata_size
Definition: avcodec.h:528
AVCodecContext::has_b_frames
int has_b_frames
Size of the frame reordering buffer in the decoder.
Definition: avcodec.h:721
hls_transform_unit
static int hls_transform_unit(HEVCLocalContext *lc, int x0, int y0, int xBase, int yBase, int cb_xBase, int cb_yBase, int log2_cb_size, int log2_trafo_size, int blk_idx, int cbf_luma, int *cbf_cb, int *cbf_cr)
Definition: hevcdec.c:1103
width
#define width
FF_CODEC_DECODE_CB
#define FF_CODEC_DECODE_CB(func)
Definition: codec_internal.h:306
QPEL_EXTRA_BEFORE
#define QPEL_EXTRA_BEFORE
Definition: hevcdec.h:63
ff_hevc_rem_intra_luma_pred_mode_decode
int ff_hevc_rem_intra_luma_pred_mode_decode(HEVCLocalContext *lc)
Definition: hevc_cabac.c:773
ff_hevc_sao_merge_flag_decode
int ff_hevc_sao_merge_flag_decode(HEVCLocalContext *lc)
Definition: hevc_cabac.c:571
AVMasteringDisplayMetadata::white_point
AVRational white_point[2]
CIE 1931 xy chromaticity coords of white point.
Definition: mastering_display_metadata.h:47
AV_PIX_FMT_DXVA2_VLD
@ AV_PIX_FMT_DXVA2_VLD
HW decoding through DXVA2, Picture.data[3] contains a LPDIRECT3DSURFACE9 pointer.
Definition: pixfmt.h:127
HEVCLocalContext::parent
const struct HEVCContext * parent
Definition: hevcdec.h:440
s
#define s(width, name)
Definition: cbs_vp9.c:256
ff_thread_await_progress2
void ff_thread_await_progress2(AVCodecContext *avctx, int field, int thread, int shift)
Definition: pthread_slice.c:222
SAO_NOT_APPLIED
@ SAO_NOT_APPLIED
Definition: hevcdec.h:210
set_sps
static int set_sps(HEVCContext *s, const HEVCSPS *sps, enum AVPixelFormat pix_fmt)
Definition: hevcdec.c:507
AV_ZERO32
#define AV_ZERO32(d)
Definition: intreadwrite.h:629
pix_fmt
static enum AVPixelFormat pix_fmt
Definition: demux_decode.c:41
ff_hevc_nal_is_nonref
static av_always_inline int ff_hevc_nal_is_nonref(enum HEVCNALUnitType type)
Definition: hevcdec.h:661
av_q2d
static double av_q2d(AVRational a)
Convert an AVRational to a double.
Definition: rational.h:104
ff_hevc_set_new_ref
int ff_hevc_set_new_ref(HEVCContext *s, AVFrame **frame, int poc)
Definition: hevc_refs.c:137
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:296
SliceHeader::slice_rps
ShortTermRPS slice_rps
Definition: hevcdec.h:272
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:201
decode.h
IS_IDR
#define IS_IDR(s)
Definition: hevcdec.h:75
H2645NAL::data
const uint8_t * data
Definition: h2645_parse.h:35
ff_hevc_slice_rpl
int ff_hevc_slice_rpl(HEVCContext *s)
Construct the reference picture list(s) for the current slice.
Definition: hevc_refs.c:312
RefPicList::ref
struct HEVCFrame * ref[HEVC_MAX_REFS]
Definition: hevcdec.h:242
H2645NAL::skipped_bytes_pos
int * skipped_bytes_pos
Definition: h2645_parse.h:71
HEVC_SLICE_I
@ HEVC_SLICE_I
Definition: hevc.h:98
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
SliceHeader::size
int * size
Definition: hevcdec.h:307
ff_hevc_cabac_init
int ff_hevc_cabac_init(HEVCLocalContext *lc, int ctb_addr_ts)
Definition: hevc_cabac.c:512
ff_hevc_set_neighbour_available
void ff_hevc_set_neighbour_available(HEVCLocalContext *lc, int x0, int y0, int nPbW, int nPbH)
Definition: hevc_mvs.c:43
SliceHeader::collocated_list
uint8_t collocated_list
Definition: hevcdec.h:290
atomic_load
#define atomic_load(object)
Definition: stdatomic.h:93
AVCOL_PRI_UNSPECIFIED
@ AVCOL_PRI_UNSPECIFIED
Definition: pixfmt.h:536
AVDISCARD_BIDIR
@ AVDISCARD_BIDIR
discard all bidirectional frames
Definition: defs.h:73
get_se_golomb
static int get_se_golomb(GetBitContext *gb)
read signed exp golomb code.
Definition: golomb.h:239
INTRA_ANGULAR_26
@ INTRA_ANGULAR_26
Definition: hevcdec.h:198
H2645NAL::type
int type
NAL unit type.
Definition: h2645_parse.h:52
CodingUnit::max_trafo_depth
uint8_t max_trafo_depth
MaxTrafoDepth.
Definition: hevcdec.h:338
AV_FRAME_DATA_DYNAMIC_HDR_VIVID
@ AV_FRAME_DATA_DYNAMIC_HDR_VIVID
HDR Vivid dynamic metadata associated with a video frame.
Definition: frame.h:211
CODEC_LONG_NAME
#define CODEC_LONG_NAME(str)
Definition: codec_internal.h:272
SliceHeader::slice_ctb_addr_rs
int slice_ctb_addr_rs
Definition: hevcdec.h:326
ff_thread_ref_frame
int ff_thread_ref_frame(ThreadFrame *dst, const ThreadFrame *src)
Definition: utils.c:896
FF_CODEC_PROPERTY_FILM_GRAIN
#define FF_CODEC_PROPERTY_FILM_GRAIN
Definition: avcodec.h:1854
arg
const char * arg
Definition: jacosubdec.c:67
if
if(ret)
Definition: filter_design.txt:179
HEVC_NAL_IDR_N_LP
@ HEVC_NAL_IDR_N_LP
Definition: hevc.h:49
SliceHeader::pic_output_flag
uint8_t pic_output_flag
Definition: hevcdec.h:266
ff_hevc_cbf_cb_cr_decode
int ff_hevc_cbf_cb_cr_decode(HEVCLocalContext *lc, int trafo_depth)
Definition: hevc_cabac.c:884
hls_slice_data_wpp
static int hls_slice_data_wpp(HEVCContext *s, const H2645NAL *nal)
Definition: hevcdec.c:2636
AV_CODEC_CAP_FRAME_THREADS
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
Definition: codec.h:107
AVDISCARD_ALL
@ AVDISCARD_ALL
discard all
Definition: defs.h:76
threadframe.h
PredictionUnit::rem_intra_luma_pred_mode
int rem_intra_luma_pred_mode
Definition: hevcdec.h:364
H2645NAL::raw_size
int raw_size
Definition: h2645_parse.h:44
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
IS_BLA
#define IS_BLA(s)
Definition: hevcdec.h:76
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
ff_bswapdsp_init
av_cold void ff_bswapdsp_init(BswapDSPContext *c)
Definition: bswapdsp.c:49
HEVC_SLICE_B
@ HEVC_SLICE_B
Definition: hevc.h:96
NULL
#define NULL
Definition: coverity.c:32
hevc_ref_frame
static int hevc_ref_frame(HEVCContext *s, HEVCFrame *dst, HEVCFrame *src)
Definition: hevcdec.c:3401
HEVC_SEQUENCE_COUNTER_MASK
#define HEVC_SEQUENCE_COUNTER_MASK
Definition: hevcdec.h:398
hls_coding_unit
static int hls_coding_unit(HEVCLocalContext *lc, const HEVCContext *s, int x0, int y0, int log2_cb_size)
Definition: hevcdec.c:2184
AVCodecContext::color_range
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: avcodec.h:1009
av_buffer_unref
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it.
Definition: buffer.c:139
HEVCLocalContext::tmp
int16_t tmp[MAX_PB_SIZE *MAX_PB_SIZE]
Definition: hevcdec.h:474
ff_hevc_ps_uninit
void ff_hevc_ps_uninit(HEVCParamSets *ps)
Definition: hevc_ps.c:1687
HEVC_NAL_PPS
@ HEVC_NAL_PPS
Definition: hevc.h:63
LongTermRPS::poc
int poc[32]
Definition: hevcdec.h:235
AVCHROMA_LOC_LEFT
@ AVCHROMA_LOC_LEFT
MPEG-2/4 4:2:0, H.264 default for 4:2:0.
Definition: pixfmt.h:682
CodingUnit::cu_transquant_bypass_flag
uint8_t cu_transquant_bypass_flag
Definition: hevcdec.h:339
AVCodecContext::internal
struct AVCodecInternal * internal
Private context used for internal data.
Definition: avcodec.h:461
AV_PIX_FMT_YUVJ420P
@ AV_PIX_FMT_YUVJ420P
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:78
HEVCLocalContext::first_qp_group
uint8_t first_qp_group
Definition: hevcdec.h:437
av_default_item_name
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:237
AVHWAccel::end_frame
int(* end_frame)(AVCodecContext *avctx)
Called at the end of each frame or field picture.
Definition: avcodec.h:2176
get_bits1
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:378
ff_dovi_update_cfg
void ff_dovi_update_cfg(DOVIContext *s, const AVDOVIDecoderConfigurationRecord *cfg)
Read the contents of an AVDOVIDecoderConfigurationRecord (usually provided by stream side data) and u...
Definition: dovi_rpu.c:83
profiles.h
av_buffer_pool_uninit
void av_buffer_pool_uninit(AVBufferPool **ppool)
Mark the pool as being available for freeing.
Definition: buffer.c:322
ff_set_sar
int ff_set_sar(AVCodecContext *avctx, AVRational sar)
Check that the provided sample aspect ratio is valid and set it on the codec context.
Definition: utils.c:106
L0
#define L0
Definition: hevcdec.h:57
HEVCFrame::rpl_tab
RefPicListTab ** rpl_tab
Definition: hevcdec.h:408
LongTermRPS::poc_msb_present
uint8_t poc_msb_present[32]
Definition: hevcdec.h:236
HEVC_NAL_SEI_SUFFIX
@ HEVC_NAL_SEI_SUFFIX
Definition: hevc.h:69
ff_hevc_sao_band_position_decode
int ff_hevc_sao_band_position_decode(HEVCLocalContext *lc)
Definition: hevc_cabac.c:586
HEVC_NAL_CRA_NUT
@ HEVC_NAL_CRA_NUT
Definition: hevc.h:50
av_frame_new_side_data_from_buf
AVFrameSideData * av_frame_new_side_data_from_buf(AVFrame *frame, enum AVFrameSideDataType type, AVBufferRef *buf)
Add a new side data to a frame from an existing AVBufferRef.
Definition: frame.c:638
hevc_pel_weight
static const uint8_t hevc_pel_weight[65]
Definition: hevcdec.c:56
PART_Nx2N
@ PART_Nx2N
Definition: hevcdec.h:144
RefPicListTab
Definition: hevcdec.h:248
ff_hevc_split_coding_unit_flag_decode
int ff_hevc_split_coding_unit_flag_decode(HEVCLocalContext *lc, int ct_depth, int x0, int y0)
Definition: hevc_cabac.c:697
UPDATE_THREAD_CONTEXT
#define UPDATE_THREAD_CONTEXT(func)
Definition: codec_internal.h:281
BOUNDARY_UPPER_TILE
#define BOUNDARY_UPPER_TILE
Definition: hevcdec.h:484
vps
static int FUNC() vps(CodedBitstreamContext *ctx, RWContext *rw, H265RawVPS *current)
Definition: cbs_h265_syntax_template.c:423
ff_hevc_decode_extradata
int ff_hevc_decode_extradata(const uint8_t *data, int size, HEVCParamSets *ps, HEVCSEI *sei, int *is_nalff, int *nal_length_size, int err_recognition, int apply_defdispwin, void *logctx)
Definition: hevc_parse.c:80
AV_EF_CRCCHECK
#define AV_EF_CRCCHECK
Verify checksums embedded in the bitstream (could be of either encoded or decoded data,...
Definition: defs.h:48
SliceHeader::nb_refs
unsigned int nb_refs[2]
Definition: hevcdec.h:282
Mv::x
int16_t x
horizontal component of motion vector
Definition: hevcdec.h:343
ff_slice_thread_init_progress
int av_cold ff_slice_thread_init_progress(AVCodecContext *avctx)
Definition: pthread_slice.c:179
AVCodecContext::level
int level
level
Definition: avcodec.h:1691
hls_sao_param
static void hls_sao_param(HEVCLocalContext *lc, int rx, int ry)
Definition: hevcdec.c:1013
HEVC_NAL_RASL_R
@ HEVC_NAL_RASL_R
Definition: hevc.h:38
PF_BI
@ PF_BI
Definition: hevcdec.h:168
ff_hevc_no_residual_syntax_flag_decode
int ff_hevc_no_residual_syntax_flag_decode(HEVCLocalContext *lc)
Definition: hevc_cabac.c:841
SAMPLE_CTB
#define SAMPLE_CTB(tab, x, y)
Definition: hevcdec.h:73
HEVCWindow
Definition: hevc_ps.h:43
SCAN_HORIZ
@ SCAN_HORIZ
Definition: hevcdec.h:225
hevc_data.h
ff_hevc_frame_rps
int ff_hevc_frame_rps(HEVCContext *s)
Construct the reference picture sets for the current frame.
Definition: hevc_refs.c:464
HEVCLocalContext::edge_emu_buffer
uint8_t edge_emu_buffer[(MAX_PB_SIZE+7) *EDGE_EMU_BUFFER_STRIDE *2]
Definition: hevcdec.h:471
hevc_await_progress
static void hevc_await_progress(const HEVCContext *s, const HEVCFrame *ref, const Mv *mv, int y0, int height)
Definition: hevcdec.c:1815
IS_IRAP
#define IS_IRAP(s)
Definition: hevcdec.h:78
LongTermRPS::used
uint8_t used[32]
Definition: hevcdec.h:237
SliceHeader::colour_plane_id
uint8_t colour_plane_id
RPS coded in the slice header itself is stored here.
Definition: hevcdec.h:267
PART_nLx2N
@ PART_nLx2N
Definition: hevcdec.h:148
SliceHeader::dependent_slice_segment_flag
uint8_t dependent_slice_segment_flag
Definition: hevcdec.h:265
POS
#define POS(c_idx, x, y)
AVDISCARD_NONKEY
@ AVDISCARD_NONKEY
discard all frames except keyframes
Definition: defs.h:75
SliceHeader::first_slice_in_pic_flag
uint8_t first_slice_in_pic_flag
Definition: hevcdec.h:264
HEVCLocalContext::ctb_left_flag
uint8_t ctb_left_flag
Definition: hevcdec.h:464
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:52
ff_hevc_res_scale_sign_flag
int ff_hevc_res_scale_sign_flag(HEVCLocalContext *lc, int idx)
Definition: hevc_cabac.c:919
ff_dovi_ctx_flush
void ff_dovi_ctx_flush(DOVIContext *s)
Partially reset the internal state.
Definition: dovi_rpu.c:53
ff_hevc_merge_idx_decode
int ff_hevc_merge_idx_decode(HEVCLocalContext *lc)
Definition: hevc_cabac.c:794
AVPacket::size
int size
Definition: packet.h:375
BOUNDARY_UPPER_SLICE
#define BOUNDARY_UPPER_SLICE
Definition: hevcdec.h:483
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:115
hevcdec.h
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:344
decode_nal_units
static int decode_nal_units(HEVCContext *s, const uint8_t *buf, int length)
Definition: hevcdec.c:3150
codec_internal.h
SAOParams::offset_abs
int offset_abs[3][4]
sao_offset_abs
Definition: hevcdsp.h:35
AV_PIX_FMT_YUV422P10LE
@ AV_PIX_FMT_YUV422P10LE
planar YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian
Definition: pixfmt.h:151
av_err2str
#define av_err2str(errnum)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: error.h:121
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:425
INTRA_PLANAR
@ INTRA_PLANAR
Definition: hevcdec.h:172
HEVCFrame::rpl_buf
AVBufferRef * rpl_buf
Definition: hevcdec.h:415
ff_hevc_decode_nal_sps
int ff_hevc_decode_nal_sps(GetBitContext *gb, AVCodecContext *avctx, HEVCParamSets *ps, int apply_defdispwin)
Definition: hevc_ps.c:1190
PART_2NxnD
@ PART_2NxnD
Definition: hevcdec.h:147
AV_PIX_FMT_YUV422P12
#define AV_PIX_FMT_YUV422P12
Definition: pixfmt.h:464
FF_CODEC_CAP_EXPORTS_CROPPING
#define FF_CODEC_CAP_EXPORTS_CROPPING
The decoder sets the cropping fields in the output frames manually.
Definition: codec_internal.h:60
size
int size
Definition: twinvq_data.h:10344
HEVC_NAL_BLA_W_LP
@ HEVC_NAL_BLA_W_LP
Definition: hevc.h:45
SCAN_VERT
@ SCAN_VERT
Definition: hevcdec.h:226
FF_CODEC_CAP_ALLOCATE_PROGRESS
#define FF_CODEC_CAP_ALLOCATE_PROGRESS
Definition: codec_internal.h:69
ff_hevc_compute_poc
int ff_hevc_compute_poc(const HEVCSPS *sps, int pocTid0, int poc_lsb, int nal_unit_type)
Compute POC of the current frame and return it.
Definition: hevc_ps.c:1703
H2645NAL::gb
GetBitContext gb
Definition: h2645_parse.h:47
SliceHeader::collocated_ref_idx
unsigned int collocated_ref_idx
Definition: hevcdec.h:292
SliceHeader::entry_point_offset
unsigned * entry_point_offset
Definition: hevcdec.h:305
H2645NAL
Definition: h2645_parse.h:34
ff_hevc_cbf_luma_decode
int ff_hevc_cbf_luma_decode(HEVCLocalContext *lc, int trafo_depth)
Definition: hevc_cabac.c:889
AV_PIX_FMT_YUV444P12
#define AV_PIX_FMT_YUV444P12
Definition: pixfmt.h:466
ff_hevc_decode_nal_vps
int ff_hevc_decode_nal_vps(GetBitContext *gb, AVCodecContext *avctx, HEVCParamSets *ps)
Definition: hevc_ps.c:436
pic_arrays_free
static void pic_arrays_free(HEVCContext *s)
NOTE: Each function hls_foo correspond to the function foo in the specification (HLS stands for High ...
Definition: hevcdec.c:68
ff_hevc_luma_mv_merge_mode
void ff_hevc_luma_mv_merge_mode(HEVCLocalContext *lc, int x0, int y0, int nPbW, int nPbH, int log2_cb_size, int part_idx, int merge_idx, MvField *mv)
Definition: hevc_mvs.c:480
AVFrameSideData::data
uint8_t * data
Definition: frame.h:238
TransformUnit::chroma_mode_c
int chroma_mode_c
Definition: hevcdec.h:380
ff_hevc_prev_intra_luma_pred_flag_decode
int ff_hevc_prev_intra_luma_pred_flag_decode(HEVCLocalContext *lc)
Definition: hevc_cabac.c:760
FF_THREAD_SLICE
#define FF_THREAD_SLICE
Decode more than one part of a single frame at once.
Definition: avcodec.h:1514
AVFilmGrainParams
This structure describes how to handle film grain synthesis in video for specific codecs.
Definition: film_grain_params.h:216
GetBitContext::index
int index
Definition: get_bits.h:109
SliceHeader::short_term_ref_pic_set_sps_flag
int short_term_ref_pic_set_sps_flag
Definition: hevcdec.h:270
AVCHROMA_LOC_UNSPECIFIED
@ AVCHROMA_LOC_UNSPECIFIED
Definition: pixfmt.h:681
SliceHeader::no_output_of_prior_pics_flag
uint8_t no_output_of_prior_pics_flag
Definition: hevcdec.h:279
SliceHeader::max_num_merge_cand
unsigned int max_num_merge_cand
5 - 5_minus_max_num_merge_cand
Definition: hevcdec.h:303
AVCodecHWConfigInternal
Definition: hwconfig.h:29
MvField
Definition: hevcdec.h:347
QPEL_EXTRA
#define QPEL_EXTRA
Definition: hevcdec.h:65
ff_hevc_end_of_slice_flag_decode
int ff_hevc_end_of_slice_flag_decode(HEVCLocalContext *lc)
Definition: hevc_cabac.c:618
PF_L1
@ PF_L1
Definition: hevcdec.h:167
ff_hevc_unref_frame
void ff_hevc_unref_frame(HEVCContext *s, HEVCFrame *frame, int flags)
Definition: hevc_refs.c:31
intra_prediction_unit_default_value
static void intra_prediction_unit_default_value(HEVCLocalContext *lc, int x0, int y0, int log2_cb_size)
Definition: hevcdec.c:2161
split
static char * split(char *message, char delim)
Definition: af_channelmap.c:81
get_format
static enum AVPixelFormat get_format(HEVCContext *s, const HEVCSPS *sps)
Definition: hevcdec.c:401
ff_h2645_packet_split
int ff_h2645_packet_split(H2645Packet *pkt, const uint8_t *buf, int length, void *logctx, int is_nalff, int nal_length_size, enum AVCodecID codec_id, int small_padding, int use_ref)
Split an input packet into NAL units.
Definition: h2645_parse.c:396
height
#define height
hevc_frame_end
static int hevc_frame_end(HEVCContext *s)
Definition: hevcdec.c:2937
AV_CODEC_CAP_SLICE_THREADS
#define AV_CODEC_CAP_SLICE_THREADS
Codec supports slice-based (or partition-based) multithreading.
Definition: codec.h:111
HWACCEL_D3D11VA
#define HWACCEL_D3D11VA(codec)
Definition: hwconfig.h:79
av_content_light_metadata_create_side_data
AVContentLightMetadata * av_content_light_metadata_create_side_data(AVFrame *frame)
Allocate a complete AVContentLightMetadata and add it to the frame.
Definition: mastering_display_metadata.c:55
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
attributes.h
AV_PIX_FMT_D3D11
@ AV_PIX_FMT_D3D11
Hardware surfaces for Direct3D11.
Definition: pixfmt.h:333
av_buffer_alloc
AVBufferRef * av_buffer_alloc(size_t size)
Allocate an AVBuffer of the given size using av_malloc().
Definition: buffer.c:77
HWACCEL_NVDEC
#define HWACCEL_NVDEC(codec)
Definition: hwconfig.h:71
hls_slice_data
static int hls_slice_data(HEVCContext *s)
Definition: hevcdec.c:2542
TransformUnit::cu_qp_offset_cb
int8_t cu_qp_offset_cb
Definition: hevcdec.h:383
pic_arrays_init
static int pic_arrays_init(HEVCContext *s, const HEVCSPS *sps)
Definition: hevcdec.c:96
HEVCFrame::rpl_tab_buf
AVBufferRef * rpl_tab_buf
Definition: hevcdec.h:414
AV_PIX_FMT_VAAPI
@ AV_PIX_FMT_VAAPI
Hardware acceleration through VA-API, data[3] contains a VASurfaceID.
Definition: pixfmt.h:119
MvField::pred_flag
int8_t pred_flag
Definition: hevcdec.h:350
HEVCLocalContext::ct_depth
int ct_depth
Definition: hevcdec.h:476
AV_LOG_INFO
#define AV_LOG_INFO
Standard information.
Definition: log.h:191
FF_THREAD_FRAME
#define FF_THREAD_FRAME
Decode more than one frame at once.
Definition: avcodec.h:1513
ff_init_cabac_decoder
int ff_init_cabac_decoder(CABACContext *c, const uint8_t *buf, int buf_size)
Definition: cabac.c:162
AV_PIX_FMT_VDPAU
@ AV_PIX_FMT_VDPAU
HW acceleration through VDPAU, Picture.data[3] contains a VdpVideoSurface.
Definition: pixfmt.h:187
PART_nRx2N
@ PART_nRx2N
Definition: hevcdec.h:149
EPEL_EXTRA_BEFORE
#define EPEL_EXTRA_BEFORE
Definition: hevcdec.h:60
AV_PIX_FMT_VIDEOTOOLBOX
@ AV_PIX_FMT_VIDEOTOOLBOX
hardware decoding through Videotoolbox
Definition: pixfmt.h:302
SliceHeader::slice_cb_qp_offset
int slice_cb_qp_offset
Definition: hevcdec.h:295
SliceHeader
Definition: hevcdec.h:252
HEVCFrame::frame
AVFrame * frame
Definition: hevcdec.h:402
HEVC_NAL_TRAIL_R
@ HEVC_NAL_TRAIL_R
Definition: hevc.h:30
hls_decode_entry
static int hls_decode_entry(AVCodecContext *avctxt, void *isFilterThread)
Definition: hevcdec.c:2480
hevc_frame_start
static int hevc_frame_start(HEVCContext *s)
Definition: hevcdec.c:2866
av_md5_init
void av_md5_init(AVMD5 *ctx)
Initialize MD5 hashing.
Definition: md5.c:141
ff_h274_apply_film_grain
int ff_h274_apply_film_grain(AVFrame *out_frame, const AVFrame *in_frame, H274FilmGrainDatabase *database, const AVFilmGrainParams *params)
Definition: h274.c:217
SliceHeader::slice_sample_adaptive_offset_flag
uint8_t slice_sample_adaptive_offset_flag[3]
Definition: hevcdec.h:284
AVDISCARD_NONINTRA
@ AVDISCARD_NONINTRA
discard all non intra frames
Definition: defs.h:74
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:269
av_timecode_make_smpte_tc_string2
char * av_timecode_make_smpte_tc_string2(char *buf, AVRational rate, uint32_t tcsmpte, int prevent_df, int skip_field)
Get the timecode string from the SMPTE timecode format.
Definition: timecode.c:138
AVCodecContext::properties
unsigned properties
Properties of the stream that gets decoded.
Definition: avcodec.h:1851
HEVCFrame
Definition: hevcdec.h:401
AVCodecContext::extradata
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:527
av_packet_get_side_data
uint8_t * av_packet_get_side_data(const AVPacket *pkt, enum AVPacketSideDataType type, size_t *size)
Get side information from packet.
Definition: avpacket.c:251
HEVCLocalContext::gb
GetBitContext gb
Definition: hevcdec.h:442
internal.h
EPEL_EXTRA_AFTER
#define EPEL_EXTRA_AFTER
Definition: hevcdec.h:61
HEVCFrame::ctb_count
int ctb_count
Definition: hevcdec.h:409
src2
const pixel * src2
Definition: h264pred_template.c:422
av_malloc_array
#define av_malloc_array(a, b)
Definition: tableprint_vlc.h:31
display.h
SliceHeader::offset
int * offset
Definition: hevcdec.h:306
common.h
HEVCFrame::sequence
uint16_t sequence
A sequence counter, so that old frames are output first after a POC reset.
Definition: hevcdec.h:424
SliceHeader::mvd_l1_zero_flag
uint8_t mvd_l1_zero_flag
Definition: hevcdec.h:285
delta
float delta
Definition: vorbis_enc_data.h:430
md5.h
AV_CODEC_ID_HEVC
@ AV_CODEC_ID_HEVC
Definition: codec_id.h:226
ff_hevc_bump_frame
void ff_hevc_bump_frame(HEVCContext *s)
Definition: hevc_refs.c:254
av_always_inline
#define av_always_inline
Definition: attributes.h:49
HEVC_SLICE_P
@ HEVC_SLICE_P
Definition: hevc.h:97
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
av_frame_move_ref
void av_frame_move_ref(AVFrame *dst, AVFrame *src)
Move everything contained in src to dst and reset src.
Definition: frame.c:507
PF_L0
@ PF_L0
Definition: hevcdec.h:166
EDGE_EMU_BUFFER_STRIDE
#define EDGE_EMU_BUFFER_STRIDE
Definition: hevcdec.h:67
tab_mode_idx
static const uint8_t tab_mode_idx[]
Definition: hevcdec.c:2089
cabac_functions.h
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:478
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:254
ff_hevc_sao_eo_class_decode
int ff_hevc_sao_eo_class_decode(HEVCLocalContext *lc)
Definition: hevc_cabac.c:611
HEVCLocalContext::qp_y
int8_t qp_y
Definition: hevcdec.h:457
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:191
av_buffer_replace
int av_buffer_replace(AVBufferRef **pdst, const AVBufferRef *src)
Ensure dst refers to the same data as src.
Definition: buffer.c:233
AVCodecContext::chroma_sample_location
enum AVChromaLocation chroma_sample_location
This defines the location of chroma samples.
Definition: avcodec.h:1016
AVMasteringDisplayMetadata
Mastering display metadata capable of representing the color volume of the display used to master the...
Definition: mastering_display_metadata.h:38
HEVC_NAL_TSA_R
@ HEVC_NAL_TSA_R
Definition: hevc.h:32
AVCOL_SPC_UNSPECIFIED
@ AVCOL_SPC_UNSPECIFIED
Definition: pixfmt.h:590
ff_hevc_cu_qp_delta_abs
int ff_hevc_cu_qp_delta_abs(HEVCLocalContext *lc)
Definition: hevc_cabac.c:644
SliceHeader::list_entry_lx
unsigned int list_entry_lx[2][32]
Definition: hevcdec.h:276
AVCodecContext::height
int height
Definition: avcodec.h:598
hevc_decode_extradata
static int hevc_decode_extradata(HEVCContext *s, uint8_t *buf, int length, int first)
Definition: hevcdec.c:3308
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:635
av_md5_final
void av_md5_final(AVMD5 *ctx, uint8_t *dst)
Finish hashing and output digest value.
Definition: md5.c:186
hevc_decode_init
static av_cold int hevc_decode_init(AVCodecContext *avctx)
Definition: hevcdec.c:3635
HEVCFrame::poc
int poc
Definition: hevcdec.h:410
AVCOL_RANGE_MPEG
@ AVCOL_RANGE_MPEG
Narrow or limited range content.
Definition: pixfmt.h:644
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:262
HWACCEL_VIDEOTOOLBOX
#define HWACCEL_VIDEOTOOLBOX(codec)
Definition: hwconfig.h:77
hevc.h
SAOParams
Definition: hevcdsp.h:34
SliceHeader::short_term_rps
const ShortTermRPS * short_term_rps
Definition: hevcdec.h:273
stride
#define stride
Definition: h264pred_template.c:537
ff_dovi_rpu_parse
int ff_dovi_rpu_parse(DOVIContext *s, const uint8_t *rpu, size_t rpu_size)
Parse the contents of a Dovi RPU NAL and update the parsed values in the DOVIContext struct.
Definition: dovi_rpu.c:194
HEVC_NAL_VPS
@ HEVC_NAL_VPS
Definition: hevc.h:61
SliceHeader::cu_chroma_qp_offset_enabled_flag
uint8_t cu_chroma_qp_offset_enabled_flag
Definition: hevcdec.h:298
HEVC_NAL_IDR_W_RADL
@ HEVC_NAL_IDR_W_RADL
Definition: hevc.h:48
av_buffer_allocz
AVBufferRef * av_buffer_allocz(size_t size)
Same as av_buffer_alloc(), except the returned buffer will be initialized to zero.
Definition: buffer.c:93
ret
ret
Definition: filter_design.txt:187
AV_PKT_DATA_DOVI_CONF
@ AV_PKT_DATA_DOVI_CONF
DOVI configuration ref: dolby-vision-bitstreams-within-the-iso-base-media-file-format-v2....
Definition: packet.h:284
H2645NAL::raw_data
const uint8_t * raw_data
Definition: h2645_parse.h:45
FFSWAP
#define FFSWAP(type, a, b)
Definition: macros.h:52
AVClass::class_name
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:71
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
PRED_L1
@ PRED_L1
Definition: hevcdec.h:160
PredictionUnit::mvd
Mv mvd
Definition: hevcdec.h:366
SliceHeader::disable_deblocking_filter_flag
uint8_t disable_deblocking_filter_flag
slice_header_disable_deblocking_filter_flag
Definition: hevcdec.h:288
ff_hevc_dsp_init
void ff_hevc_dsp_init(HEVCDSPContext *hevcdsp, int bit_depth)
Definition: hevcdsp.c:126
HEVCLocalContext::edge_emu_buffer2
uint8_t edge_emu_buffer2[(MAX_PB_SIZE+7) *EDGE_EMU_BUFFER_STRIDE *2]
Definition: hevcdec.h:473
sps
static int FUNC() sps(CodedBitstreamContext *ctx, RWContext *rw, H264RawSPS *current)
Definition: cbs_h264_syntax_template.c:260
hevc_init_context
static av_cold int hevc_init_context(AVCodecContext *avctx)
Definition: hevcdec.c:3493
ff_hevc_save_states
void ff_hevc_save_states(HEVCLocalContext *lc, int ctb_addr_ts)
Definition: hevc_cabac.c:450
pos
unsigned int pos
Definition: spdifenc.c:413
ff_thread_finish_setup
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call ff_thread_finish_setup() afterwards. If some code can 't be moved
chroma_mc_bi
static void chroma_mc_bi(HEVCLocalContext *lc, uint8_t *dst0, ptrdiff_t dststride, const AVFrame *ref0, const AVFrame *ref1, int x_off, int y_off, int block_w, int block_h, const MvField *current_mv, int cidx)
8.5.3.2.2.2 Chroma sample bidirectional interpolation process
Definition: hevcdec.c:1724
AV_PIX_FMT_YUV420P12
#define AV_PIX_FMT_YUV420P12
Definition: pixfmt.h:463
HEVC_NAL_EOS_NUT
@ HEVC_NAL_EOS_NUT
Definition: hevc.h:65
ff_hevc_frame_nb_refs
int ff_hevc_frame_nb_refs(const HEVCContext *s)
Get the number of candidate references for the current frame.
Definition: hevc_refs.c:526
HEVCLocalContext::boundary_flags
int boundary_flags
Definition: hevcdec.h:487
ff_slice_thread_allocz_entries
int ff_slice_thread_allocz_entries(AVCodecContext *avctx, int count)
Definition: pthread_slice.c:240
U
#define U(x)
Definition: vpx_arith.h:37
HEVC_NAL_TRAIL_N
@ HEVC_NAL_TRAIL_N
Definition: hevc.h:29
hls_decode_entry_wpp
static int hls_decode_entry_wpp(AVCodecContext *avctxt, void *hevc_lclist, int job, int self_id)
Definition: hevcdec.c:2553
LongTermRPS
Definition: hevcdec.h:234
SliceHeader::slice_type
enum HEVCSliceType slice_type
Definition: hevcdec.h:260
ff_hevc_flush_dpb
void ff_hevc_flush_dpb(HEVCContext *s)
Drop all frames currently in DPB.
Definition: hevc_refs.c:77
hls_coding_quadtree
static int hls_coding_quadtree(HEVCLocalContext *lc, int x0, int y0, int log2_cb_size, int cb_depth)
Definition: hevcdec.c:2346
HEVC_NAL_AUD
@ HEVC_NAL_AUD
Definition: hevc.h:64
AV_FRAME_DATA_DYNAMIC_HDR_PLUS
@ AV_FRAME_DATA_DYNAMIC_HDR_PLUS
HDR dynamic metadata associated with a video frame.
Definition: frame.h:159
AVCodecContext
main external API structure.
Definition: avcodec.h:426
ff_hevc_hls_filters
void ff_hevc_hls_filters(HEVCLocalContext *lc, int x_ctb, int y_ctb, int ctb_size)
Definition: hevc_filter.c:888
AVCodecContext::active_thread_type
int active_thread_type
Which multithreading methods are in use by the codec.
Definition: avcodec.h:1521
ff_hevc_mvp_lx_flag_decode
int ff_hevc_mvp_lx_flag_decode(HEVCLocalContext *lc)
Definition: hevc_cabac.c:836
SliceHeader::slice_qp
int8_t slice_qp
Definition: hevcdec.h:310
av_mastering_display_metadata_create_side_data
AVMasteringDisplayMetadata * av_mastering_display_metadata_create_side_data(AVFrame *frame)
Allocate a complete AVMasteringDisplayMetadata and add it to the frame.
Definition: mastering_display_metadata.c:32
SUBDIVIDE
#define SUBDIVIDE(x, y, idx)
PredictionUnit::merge_flag
uint8_t merge_flag
Definition: hevcdec.h:367
av_md5_alloc
struct AVMD5 * av_md5_alloc(void)
Allocate an AVMD5 context.
Definition: md5.c:48
AV_PKT_DATA_NEW_EXTRADATA
@ AV_PKT_DATA_NEW_EXTRADATA
The AV_PKT_DATA_NEW_EXTRADATA is used to notify the codec or the format that the extradata buffer was...
Definition: packet.h:56
AVRational::den
int den
Denominator.
Definition: rational.h:60
pred_weight_table
static int pred_weight_table(HEVCContext *s, GetBitContext *gb)
Definition: hevcdec.c:152
SliceHeader::slice_cr_qp_offset
int slice_cr_qp_offset
Definition: hevcdec.h:296
export_stream_params_from_sei
static int export_stream_params_from_sei(HEVCContext *s)
Definition: hevcdec.c:382
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
HEVCContext
Definition: hevcdec.h:490
AVCodecContext::profile
int profile
profile
Definition: avcodec.h:1565
CodingUnit::pred_mode
enum PredMode pred_mode
PredMode.
Definition: hevcdec.h:333
SliceHeader::pic_order_cnt_lsb
int pic_order_cnt_lsb
Definition: hevcdec.h:262
HEVCLocalContext::qPy_pred
int qPy_pred
Definition: hevcdec.h:460
HEVCFrame::tab_mvf_buf
AVBufferRef * tab_mvf_buf
Definition: hevcdec.h:413
SCAN_DIAG
@ SCAN_DIAG
Definition: hevcdec.h:224
SliceHeader::rpl_modification_flag
uint8_t rpl_modification_flag[2]
Definition: hevcdec.h:278
ff_hevc_mpm_idx_decode
int ff_hevc_mpm_idx_decode(HEVCLocalContext *lc)
Definition: hevc_cabac.c:765
ref
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:112
ff_hevc_set_qPy
void ff_hevc_set_qPy(HEVCLocalContext *lc, int xBase, int yBase, int log2_cb_size)
Definition: hevc_filter.c:119
AV_CODEC_CAP_DELAY
#define AV_CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
Definition: codec.h:76
AVMasteringDisplayMetadata::min_luminance
AVRational min_luminance
Min luminance of mastering display (cd/m^2).
Definition: mastering_display_metadata.h:52
FF_CODEC_PROPERTY_CLOSED_CAPTIONS
#define FF_CODEC_PROPERTY_CLOSED_CAPTIONS
Definition: avcodec.h:1853
hevc_decode_frame
static int hevc_decode_frame(AVCodecContext *avctx, AVFrame *rframe, int *got_output, AVPacket *avpkt)
Definition: hevcdec.c:3335
av_md5_update
void av_md5_update(AVMD5 *ctx, const uint8_t *src, size_t len)
Update hash value.
Definition: md5.c:151
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
ff_hevc_inter_pred_idc_decode
int ff_hevc_inter_pred_idc_decode(HEVCLocalContext *lc, int nPbW, int nPbH)
Definition: hevc_cabac.c:810
HEVCLocalContext::tu
TransformUnit tu
Definition: hevcdec.h:462
hls_slice_header
static int hls_slice_header(HEVCContext *s)
Definition: hevcdec.c:570
ff_hevc_part_mode_decode
int ff_hevc_part_mode_decode(HEVCLocalContext *lc, int log2_cb_size)
Definition: hevc_cabac.c:718
CodingUnit::y
int y
Definition: hevcdec.h:331
src0
const pixel *const src0
Definition: h264pred_template.c:420
set_side_data
static int set_side_data(HEVCContext *s)
Definition: hevcdec.c:2723
AVCodecContext::coded_width
int coded_width
Bitstream width / height, may be different from width/height e.g.
Definition: avcodec.h:613
desc
const char * desc
Definition: libsvtav1.c:83
Mv
Definition: hevcdec.h:342
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
HEVC_NAL_SPS
@ HEVC_NAL_SPS
Definition: hevc.h:62
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
PRED_L0
@ PRED_L0
Definition: hevcdec.h:159
AVBufferRef
A reference to a data buffer.
Definition: buffer.h:82
get_bitsz
static av_always_inline int get_bitsz(GetBitContext *s, int n)
Read 0-25 bits.
Definition: get_bits.h:341
ff_hevc_split_transform_flag_decode
int ff_hevc_split_transform_flag_decode(HEVCLocalContext *lc, int log2_trafo_size)
Definition: hevc_cabac.c:879
HEVCVPS
Definition: hevc_ps.h:110
mastering_display_metadata.h
get_ue_golomb_long
static unsigned get_ue_golomb_long(GetBitContext *gb)
Read an unsigned Exp-Golomb code in the range 0 to UINT32_MAX-1.
Definition: golomb.h:104
EPEL_EXTRA
#define EPEL_EXTRA
Definition: hevcdec.h:62
ff_h2645_sei_ctx_replace
int ff_h2645_sei_ctx_replace(H2645SEI *dst, const H2645SEI *src)
Definition: h2645_sei.c:420
s0
#define s0
Definition: regdef.h:37
HEVCSPS
Definition: hevc_ps.h:140
AVFrameSideData
Structure to hold side data for an AVFrame.
Definition: frame.h:236
ff_hevc_sao_offset_abs_decode
int ff_hevc_sao_offset_abs_decode(HEVCLocalContext *lc)
Definition: hevc_cabac.c:596
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:69
HEVCPPS
Definition: hevc_ps.h:236
CodingUnit::part_mode
enum PartMode part_mode
PartMode.
Definition: hevcdec.h:334
av_free
#define av_free(p)
Definition: tableprint_vlc.h:33
SliceHeader::tc_offset
int tc_offset
tc_offset_div2 * 2
Definition: hevcdec.h:301
ff_hevc_reset_sei
static void ff_hevc_reset_sei(HEVCSEI *sei)
Reset SEI values that are stored on the Context.
Definition: hevc_sei.h:122
LongTermRPS::nb_refs
uint8_t nb_refs
Definition: hevcdec.h:238
AVPacket
This structure stores compressed data.
Definition: packet.h:351
AVContentLightMetadata::MaxFALL
unsigned MaxFALL
Max average light level per frame (cd/m^2).
Definition: mastering_display_metadata.h:107
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:453
AV_OPT_TYPE_BOOL
@ AV_OPT_TYPE_BOOL
Definition: opt.h:244
TransformUnit::cross_pf
uint8_t cross_pf
Definition: hevcdec.h:385
SAOParams::offset_val
int16_t offset_val[3][5]
SaoOffsetVal.
Definition: hevcdsp.h:42
HEVCLocalContext::cu
CodingUnit cu
Definition: hevcdec.h:477
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
av_dict_set
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
Definition: dict.c:86
av_fast_malloc
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
Definition: mem.c:555
src
INIT_CLIP pixel * src
Definition: h264pred_template.c:418
SliceHeader::pps_id
unsigned int pps_id
address (in raster order) of the first block in the current slice segment
Definition: hevcdec.h:253
ff_hevc_decoder
const FFCodec ff_hevc_decoder
Definition: hevcdec.c:3704
HWACCEL_VAAPI
#define HWACCEL_VAAPI(codec)
Definition: hwconfig.h:73
FFMAX3
#define FFMAX3(a, b, c)
Definition: macros.h:48
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:598
int32_t
int32_t
Definition: audioconvert.c:56
bytestream.h
hls_pcm_sample
static int hls_pcm_sample(HEVCLocalContext *lc, int x0, int y0, int log2_cb_size)
Definition: hevcdec.c:1448
ff_hevc_decode_short_term_rps
int ff_hevc_decode_short_term_rps(GetBitContext *gb, AVCodecContext *avctx, ShortTermRPS *rps, const HEVCSPS *sps, int is_slice_header)
Definition: hevc_ps.c:100
PredictionUnit::mpm_idx
int mpm_idx
Definition: hevcdec.h:363
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:375
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
HEVC_NAL_FD_NUT
@ HEVC_NAL_FD_NUT
Definition: hevc.h:67
PredictionUnit::chroma_mode_c
uint8_t chroma_mode_c[4]
Definition: hevcdec.h:369
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
skip_bytes
static const av_unused uint8_t * skip_bytes(CABACContext *c, int n)
Skip n bytes and reset the decoder.
Definition: cabac_functions.h:203
PredictionUnit::intra_pred_mode
uint8_t intra_pred_mode[4]
Definition: hevcdec.h:365
ff_hevc_decode_nal_pps
int ff_hevc_decode_nal_pps(GetBitContext *gb, AVCodecContext *avctx, HEVCParamSets *ps)
Definition: hevc_ps.c:1439
TransformUnit::is_cu_chroma_qp_offset_coded
uint8_t is_cu_chroma_qp_offset_coded
Definition: hevcdec.h:382
h
h
Definition: vp9dsp_template.c:2038
BOUNDARY_LEFT_SLICE
#define BOUNDARY_LEFT_SLICE
Definition: hevcdec.h:481
SliceHeader::slice_qp_delta
int slice_qp_delta
Definition: hevcdec.h:294
SliceHeader::slice_addr
unsigned int slice_addr
Definition: hevcdec.h:258
avstring.h
HEVC_NAL_EOB_NUT
@ HEVC_NAL_EOB_NUT
Definition: hevc.h:66
atomic_init
#define atomic_init(obj, value)
Definition: stdatomic.h:33
ff_hevc_merge_flag_decode
int ff_hevc_merge_flag_decode(HEVCLocalContext *lc)
Definition: hevc_cabac.c:805
TransformUnit::intra_pred_mode_c
int intra_pred_mode_c
Definition: hevcdec.h:379
AVDISCARD_NONREF
@ AVDISCARD_NONREF
discard all non reference
Definition: defs.h:72
HEVC_NAL_SEI_PREFIX
@ HEVC_NAL_SEI_PREFIX
Definition: hevc.h:68
MD5_PRI_ARG
#define MD5_PRI_ARG(buf)
int
int
Definition: ffmpeg_filter.c:156
HEVCLocalContext::end_of_tiles_y
int end_of_tiles_y
Definition: hevcdec.h:469
luma_mc_bi
static void luma_mc_bi(HEVCLocalContext *lc, uint8_t *dst, ptrdiff_t dststride, const AVFrame *ref0, const Mv *mv0, int x_off, int y_off, int block_w, int block_h, const AVFrame *ref1, const Mv *mv1, struct MvField *current_mv)
8.5.3.2.2.1 Luma sample bidirectional interpolation process
Definition: hevcdec.c:1566
CodingUnit::intra_split_flag
uint8_t intra_split_flag
IntraSplitFlag.
Definition: hevcdec.h:337
SHIFT_CTB_WPP
#define SHIFT_CTB_WPP
Definition: hevcdec.h:46
ff_hevc_ref_idx_lx_decode
int ff_hevc_ref_idx_lx_decode(HEVCLocalContext *lc, int num_ref_idx_lx)
Definition: hevc_cabac.c:820
av_color_transfer_name
const char * av_color_transfer_name(enum AVColorTransferCharacteristic transfer)
Definition: pixdesc.c:3243
PART_2NxN
@ PART_2NxN
Definition: hevcdec.h:143
HEVCParamSets::vps_list
AVBufferRef * vps_list[HEVC_MAX_VPS_COUNT]
Definition: hevc_ps.h:315
ff_dovi_attach_side_data
int ff_dovi_attach_side_data(DOVIContext *s, AVFrame *frame)
Attach the decoded AVDOVIMetadata as side data to an AVFrame.
Definition: dovi_rpu.c:91
AV_CODEC_EXPORT_DATA_FILM_GRAIN
#define AV_CODEC_EXPORT_DATA_FILM_GRAIN
Decoding only.
Definition: avcodec.h:399
SliceHeader::long_term_rps
LongTermRPS long_term_rps
Definition: hevcdec.h:275
hls_transform_tree
static int hls_transform_tree(HEVCLocalContext *lc, int x0, int y0, int xBase, int yBase, int cb_xBase, int cb_yBase, int log2_cb_size, int log2_trafo_size, int trafo_depth, int blk_idx, const int *base_cbf_cb, const int *base_cbf_cr)
Definition: hevcdec.c:1325
HEVCLocalContext::cc
CABACContext cc
Definition: hevcdec.h:443
TransformUnit::cu_qp_offset_cr
int8_t cu_qp_offset_cr
Definition: hevcdec.h:384
ff_hevc_sao_type_idx_decode
int ff_hevc_sao_type_idx_decode(HEVCLocalContext *lc)
Definition: hevc_cabac.c:576
hls_cross_component_pred
static int hls_cross_component_pred(HEVCLocalContext *lc, int idx)
Definition: hevcdec.c:1087
options
static const AVOption options[]
Definition: hevcdec.c:3689
AVDOVIDecoderConfigurationRecord
Definition: dovi_meta.h:52
HEVCParamSets
Definition: hevc_ps.h:314