FFmpeg
hevcdec.c
Go to the documentation of this file.
1 /*
2  * HEVC video Decoder
3  *
4  * Copyright (C) 2012 - 2013 Guillaume Martres
5  * Copyright (C) 2012 - 2013 Mickael Raulet
6  * Copyright (C) 2012 - 2013 Gildas Cocherel
7  * Copyright (C) 2012 - 2013 Wassim Hamidouche
8  *
9  * This file is part of FFmpeg.
10  *
11  * FFmpeg is free software; you can redistribute it and/or
12  * modify it under the terms of the GNU Lesser General Public
13  * License as published by the Free Software Foundation; either
14  * version 2.1 of the License, or (at your option) any later version.
15  *
16  * FFmpeg is distributed in the hope that it will be useful,
17  * but WITHOUT ANY WARRANTY; without even the implied warranty of
18  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19  * Lesser General Public License for more details.
20  *
21  * You should have received a copy of the GNU Lesser General Public
22  * License along with FFmpeg; if not, write to the Free Software
23  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
24  */
25 
26 #include "config_components.h"
27 
28 #include "libavutil/attributes.h"
29 #include "libavutil/avstring.h"
30 #include "libavutil/common.h"
32 #include "libavutil/internal.h"
33 #include "libavutil/md5.h"
34 #include "libavutil/opt.h"
35 #include "libavutil/pixdesc.h"
36 #include "libavutil/timecode.h"
37 
38 #include "bswapdsp.h"
39 #include "cabac_functions.h"
40 #include "codec_internal.h"
41 #include "decode.h"
42 #include "golomb.h"
43 #include "hevc.h"
44 #include "hevc_parse.h"
45 #include "hevcdec.h"
46 #include "hwaccel_internal.h"
47 #include "hwconfig.h"
48 #include "internal.h"
49 #include "profiles.h"
50 #include "refstruct.h"
51 #include "thread.h"
52 #include "threadframe.h"
53 
54 static const uint8_t hevc_pel_weight[65] = { [2] = 0, [4] = 1, [6] = 2, [8] = 3, [12] = 4, [16] = 5, [24] = 6, [32] = 7, [48] = 8, [64] = 9 };
55 
56 /**
57  * NOTE: Each function hls_foo correspond to the function foo in the
58  * specification (HLS stands for High Level Syntax).
59  */
60 
61 /**
62  * Section 5.7
63  */
64 
65 /* free everything allocated by pic_arrays_init() */
67 {
68  av_freep(&s->sao);
69  av_freep(&s->deblock);
70 
71  av_freep(&s->skip_flag);
72  av_freep(&s->tab_ct_depth);
73 
74  av_freep(&s->tab_ipm);
75  av_freep(&s->cbf_luma);
76  av_freep(&s->is_pcm);
77 
78  av_freep(&s->qp_y_tab);
79  av_freep(&s->tab_slice_address);
80  av_freep(&s->filter_slice_edges);
81 
82  av_freep(&s->horizontal_bs);
83  av_freep(&s->vertical_bs);
84 
85  av_freep(&s->sh.entry_point_offset);
86  av_freep(&s->sh.size);
87  av_freep(&s->sh.offset);
88 
89  av_buffer_pool_uninit(&s->tab_mvf_pool);
90  av_buffer_pool_uninit(&s->rpl_tab_pool);
91 }
92 
93 /* allocate arrays that depend on frame dimensions */
94 static int pic_arrays_init(HEVCContext *s, const HEVCSPS *sps)
95 {
96  int log2_min_cb_size = sps->log2_min_cb_size;
97  int width = sps->width;
98  int height = sps->height;
99  int pic_size_in_ctb = ((width >> log2_min_cb_size) + 1) *
100  ((height >> log2_min_cb_size) + 1);
101  int ctb_count = sps->ctb_width * sps->ctb_height;
102  int min_pu_size = sps->min_pu_width * sps->min_pu_height;
103 
104  s->bs_width = (width >> 2) + 1;
105  s->bs_height = (height >> 2) + 1;
106 
107  s->sao = av_calloc(ctb_count, sizeof(*s->sao));
108  s->deblock = av_calloc(ctb_count, sizeof(*s->deblock));
109  if (!s->sao || !s->deblock)
110  goto fail;
111 
112  s->skip_flag = av_malloc_array(sps->min_cb_height, sps->min_cb_width);
113  s->tab_ct_depth = av_malloc_array(sps->min_cb_height, sps->min_cb_width);
114  if (!s->skip_flag || !s->tab_ct_depth)
115  goto fail;
116 
117  s->cbf_luma = av_malloc_array(sps->min_tb_width, sps->min_tb_height);
118  s->tab_ipm = av_mallocz(min_pu_size);
119  s->is_pcm = av_malloc_array(sps->min_pu_width + 1, sps->min_pu_height + 1);
120  if (!s->tab_ipm || !s->cbf_luma || !s->is_pcm)
121  goto fail;
122 
123  s->filter_slice_edges = av_mallocz(ctb_count);
124  s->tab_slice_address = av_malloc_array(pic_size_in_ctb,
125  sizeof(*s->tab_slice_address));
126  s->qp_y_tab = av_malloc_array(pic_size_in_ctb,
127  sizeof(*s->qp_y_tab));
128  if (!s->qp_y_tab || !s->filter_slice_edges || !s->tab_slice_address)
129  goto fail;
130 
131  s->horizontal_bs = av_calloc(s->bs_width, s->bs_height);
132  s->vertical_bs = av_calloc(s->bs_width, s->bs_height);
133  if (!s->horizontal_bs || !s->vertical_bs)
134  goto fail;
135 
136  s->tab_mvf_pool = av_buffer_pool_init(min_pu_size * sizeof(MvField),
138  s->rpl_tab_pool = av_buffer_pool_init(ctb_count * sizeof(RefPicListTab),
140  if (!s->tab_mvf_pool || !s->rpl_tab_pool)
141  goto fail;
142 
143  return 0;
144 
145 fail:
147  return AVERROR(ENOMEM);
148 }
149 
151 {
152  int i = 0;
153  int j = 0;
154  uint8_t luma_weight_l0_flag[16];
155  uint8_t chroma_weight_l0_flag[16];
156  uint8_t luma_weight_l1_flag[16];
157  uint8_t chroma_weight_l1_flag[16];
158  int luma_log2_weight_denom;
159 
160  luma_log2_weight_denom = get_ue_golomb_long(gb);
161  if (luma_log2_weight_denom < 0 || luma_log2_weight_denom > 7) {
162  av_log(s->avctx, AV_LOG_ERROR, "luma_log2_weight_denom %d is invalid\n", luma_log2_weight_denom);
163  return AVERROR_INVALIDDATA;
164  }
165  s->sh.luma_log2_weight_denom = av_clip_uintp2(luma_log2_weight_denom, 3);
166  if (s->ps.sps->chroma_format_idc != 0) {
167  int64_t chroma_log2_weight_denom = luma_log2_weight_denom + (int64_t)get_se_golomb(gb);
168  if (chroma_log2_weight_denom < 0 || chroma_log2_weight_denom > 7) {
169  av_log(s->avctx, AV_LOG_ERROR, "chroma_log2_weight_denom %"PRId64" is invalid\n", chroma_log2_weight_denom);
170  return AVERROR_INVALIDDATA;
171  }
172  s->sh.chroma_log2_weight_denom = chroma_log2_weight_denom;
173  }
174 
175  for (i = 0; i < s->sh.nb_refs[L0]; i++) {
176  luma_weight_l0_flag[i] = get_bits1(gb);
177  if (!luma_weight_l0_flag[i]) {
178  s->sh.luma_weight_l0[i] = 1 << s->sh.luma_log2_weight_denom;
179  s->sh.luma_offset_l0[i] = 0;
180  }
181  }
182  if (s->ps.sps->chroma_format_idc != 0) {
183  for (i = 0; i < s->sh.nb_refs[L0]; i++)
184  chroma_weight_l0_flag[i] = get_bits1(gb);
185  } else {
186  for (i = 0; i < s->sh.nb_refs[L0]; i++)
187  chroma_weight_l0_flag[i] = 0;
188  }
189  for (i = 0; i < s->sh.nb_refs[L0]; i++) {
190  if (luma_weight_l0_flag[i]) {
191  int delta_luma_weight_l0 = get_se_golomb(gb);
192  if ((int8_t)delta_luma_weight_l0 != delta_luma_weight_l0)
193  return AVERROR_INVALIDDATA;
194  s->sh.luma_weight_l0[i] = (1 << s->sh.luma_log2_weight_denom) + delta_luma_weight_l0;
195  s->sh.luma_offset_l0[i] = get_se_golomb(gb);
196  }
197  if (chroma_weight_l0_flag[i]) {
198  for (j = 0; j < 2; j++) {
199  int delta_chroma_weight_l0 = get_se_golomb(gb);
200  int delta_chroma_offset_l0 = get_se_golomb(gb);
201 
202  if ( (int8_t)delta_chroma_weight_l0 != delta_chroma_weight_l0
203  || delta_chroma_offset_l0 < -(1<<17) || delta_chroma_offset_l0 > (1<<17)) {
204  return AVERROR_INVALIDDATA;
205  }
206 
207  s->sh.chroma_weight_l0[i][j] = (1 << s->sh.chroma_log2_weight_denom) + delta_chroma_weight_l0;
208  s->sh.chroma_offset_l0[i][j] = av_clip((delta_chroma_offset_l0 - ((128 * s->sh.chroma_weight_l0[i][j])
209  >> s->sh.chroma_log2_weight_denom) + 128), -128, 127);
210  }
211  } else {
212  s->sh.chroma_weight_l0[i][0] = 1 << s->sh.chroma_log2_weight_denom;
213  s->sh.chroma_offset_l0[i][0] = 0;
214  s->sh.chroma_weight_l0[i][1] = 1 << s->sh.chroma_log2_weight_denom;
215  s->sh.chroma_offset_l0[i][1] = 0;
216  }
217  }
218  if (s->sh.slice_type == HEVC_SLICE_B) {
219  for (i = 0; i < s->sh.nb_refs[L1]; i++) {
220  luma_weight_l1_flag[i] = get_bits1(gb);
221  if (!luma_weight_l1_flag[i]) {
222  s->sh.luma_weight_l1[i] = 1 << s->sh.luma_log2_weight_denom;
223  s->sh.luma_offset_l1[i] = 0;
224  }
225  }
226  if (s->ps.sps->chroma_format_idc != 0) {
227  for (i = 0; i < s->sh.nb_refs[L1]; i++)
228  chroma_weight_l1_flag[i] = get_bits1(gb);
229  } else {
230  for (i = 0; i < s->sh.nb_refs[L1]; i++)
231  chroma_weight_l1_flag[i] = 0;
232  }
233  for (i = 0; i < s->sh.nb_refs[L1]; i++) {
234  if (luma_weight_l1_flag[i]) {
235  int delta_luma_weight_l1 = get_se_golomb(gb);
236  if ((int8_t)delta_luma_weight_l1 != delta_luma_weight_l1)
237  return AVERROR_INVALIDDATA;
238  s->sh.luma_weight_l1[i] = (1 << s->sh.luma_log2_weight_denom) + delta_luma_weight_l1;
239  s->sh.luma_offset_l1[i] = get_se_golomb(gb);
240  }
241  if (chroma_weight_l1_flag[i]) {
242  for (j = 0; j < 2; j++) {
243  int delta_chroma_weight_l1 = get_se_golomb(gb);
244  int delta_chroma_offset_l1 = get_se_golomb(gb);
245 
246  if ( (int8_t)delta_chroma_weight_l1 != delta_chroma_weight_l1
247  || delta_chroma_offset_l1 < -(1<<17) || delta_chroma_offset_l1 > (1<<17)) {
248  return AVERROR_INVALIDDATA;
249  }
250 
251  s->sh.chroma_weight_l1[i][j] = (1 << s->sh.chroma_log2_weight_denom) + delta_chroma_weight_l1;
252  s->sh.chroma_offset_l1[i][j] = av_clip((delta_chroma_offset_l1 - ((128 * s->sh.chroma_weight_l1[i][j])
253  >> s->sh.chroma_log2_weight_denom) + 128), -128, 127);
254  }
255  } else {
256  s->sh.chroma_weight_l1[i][0] = 1 << s->sh.chroma_log2_weight_denom;
257  s->sh.chroma_offset_l1[i][0] = 0;
258  s->sh.chroma_weight_l1[i][1] = 1 << s->sh.chroma_log2_weight_denom;
259  s->sh.chroma_offset_l1[i][1] = 0;
260  }
261  }
262  }
263  return 0;
264 }
265 
267 {
268  const HEVCSPS *sps = s->ps.sps;
269  int max_poc_lsb = 1 << sps->log2_max_poc_lsb;
270  int prev_delta_msb = 0;
271  unsigned int nb_sps = 0, nb_sh;
272  int i;
273 
274  rps->nb_refs = 0;
275  if (!sps->long_term_ref_pics_present_flag)
276  return 0;
277 
278  if (sps->num_long_term_ref_pics_sps > 0)
279  nb_sps = get_ue_golomb_long(gb);
280  nb_sh = get_ue_golomb_long(gb);
281 
282  if (nb_sps > sps->num_long_term_ref_pics_sps)
283  return AVERROR_INVALIDDATA;
284  if (nb_sh + (uint64_t)nb_sps > FF_ARRAY_ELEMS(rps->poc))
285  return AVERROR_INVALIDDATA;
286 
287  rps->nb_refs = nb_sh + nb_sps;
288 
289  for (i = 0; i < rps->nb_refs; i++) {
290 
291  if (i < nb_sps) {
292  uint8_t lt_idx_sps = 0;
293 
294  if (sps->num_long_term_ref_pics_sps > 1)
295  lt_idx_sps = get_bits(gb, av_ceil_log2(sps->num_long_term_ref_pics_sps));
296 
297  rps->poc[i] = sps->lt_ref_pic_poc_lsb_sps[lt_idx_sps];
298  rps->used[i] = sps->used_by_curr_pic_lt_sps_flag[lt_idx_sps];
299  } else {
300  rps->poc[i] = get_bits(gb, sps->log2_max_poc_lsb);
301  rps->used[i] = get_bits1(gb);
302  }
303 
304  rps->poc_msb_present[i] = get_bits1(gb);
305  if (rps->poc_msb_present[i]) {
306  int64_t delta = get_ue_golomb_long(gb);
307  int64_t poc;
308 
309  if (i && i != nb_sps)
310  delta += prev_delta_msb;
311 
312  poc = rps->poc[i] + s->poc - delta * max_poc_lsb - s->sh.pic_order_cnt_lsb;
313  if (poc != (int32_t)poc)
314  return AVERROR_INVALIDDATA;
315  rps->poc[i] = poc;
316  prev_delta_msb = delta;
317  }
318  }
319 
320  return 0;
321 }
322 
324 {
325  AVCodecContext *avctx = s->avctx;
326  const HEVCParamSets *ps = &s->ps;
327  const HEVCVPS *vps = ps->vps_list[sps->vps_id];
328  const HEVCWindow *ow = &sps->output_window;
329  unsigned int num = 0, den = 0;
330 
331  avctx->pix_fmt = sps->pix_fmt;
332  avctx->coded_width = sps->width;
333  avctx->coded_height = sps->height;
334  avctx->width = sps->width - ow->left_offset - ow->right_offset;
335  avctx->height = sps->height - ow->top_offset - ow->bottom_offset;
336  avctx->has_b_frames = sps->temporal_layer[sps->max_sub_layers - 1].num_reorder_pics;
337  avctx->profile = sps->ptl.general_ptl.profile_idc;
338  avctx->level = sps->ptl.general_ptl.level_idc;
339 
340  ff_set_sar(avctx, sps->vui.common.sar);
341 
342  if (sps->vui.common.video_signal_type_present_flag)
343  avctx->color_range = sps->vui.common.video_full_range_flag ? AVCOL_RANGE_JPEG
345  else
346  avctx->color_range = AVCOL_RANGE_MPEG;
347 
348  if (sps->vui.common.colour_description_present_flag) {
349  avctx->color_primaries = sps->vui.common.colour_primaries;
350  avctx->color_trc = sps->vui.common.transfer_characteristics;
351  avctx->colorspace = sps->vui.common.matrix_coeffs;
352  } else {
356  }
357 
359  if (sps->chroma_format_idc == 1) {
360  if (sps->vui.common.chroma_loc_info_present_flag) {
361  if (sps->vui.common.chroma_sample_loc_type_top_field <= 5)
362  avctx->chroma_sample_location = sps->vui.common.chroma_sample_loc_type_top_field + 1;
363  } else
365  }
366 
367  if (vps->vps_timing_info_present_flag) {
368  num = vps->vps_num_units_in_tick;
369  den = vps->vps_time_scale;
370  } else if (sps->vui.vui_timing_info_present_flag) {
371  num = sps->vui.vui_num_units_in_tick;
372  den = sps->vui.vui_time_scale;
373  }
374 
375  if (num != 0 && den != 0)
376  av_reduce(&avctx->framerate.den, &avctx->framerate.num,
377  num, den, 1 << 30);
378 }
379 
381 {
382  AVCodecContext *avctx = s->avctx;
383 
384  if (s->sei.common.a53_caption.buf_ref)
385  s->avctx->properties |= FF_CODEC_PROPERTY_CLOSED_CAPTIONS;
386 
387  if (s->sei.common.alternative_transfer.present &&
388  av_color_transfer_name(s->sei.common.alternative_transfer.preferred_transfer_characteristics) &&
389  s->sei.common.alternative_transfer.preferred_transfer_characteristics != AVCOL_TRC_UNSPECIFIED) {
390  avctx->color_trc = s->sei.common.alternative_transfer.preferred_transfer_characteristics;
391  }
392 
393  if (s->sei.common.film_grain_characteristics.present)
395 
396  return 0;
397 }
398 
400 {
401 #define HWACCEL_MAX (CONFIG_HEVC_DXVA2_HWACCEL + \
402  CONFIG_HEVC_D3D11VA_HWACCEL * 2 + \
403  CONFIG_HEVC_NVDEC_HWACCEL + \
404  CONFIG_HEVC_VAAPI_HWACCEL + \
405  CONFIG_HEVC_VIDEOTOOLBOX_HWACCEL + \
406  CONFIG_HEVC_VDPAU_HWACCEL + \
407  CONFIG_HEVC_VULKAN_HWACCEL)
408  enum AVPixelFormat pix_fmts[HWACCEL_MAX + 2], *fmt = pix_fmts;
409 
410  switch (sps->pix_fmt) {
411  case AV_PIX_FMT_YUV420P:
412  case AV_PIX_FMT_YUVJ420P:
413 #if CONFIG_HEVC_DXVA2_HWACCEL
414  *fmt++ = AV_PIX_FMT_DXVA2_VLD;
415 #endif
416 #if CONFIG_HEVC_D3D11VA_HWACCEL
417  *fmt++ = AV_PIX_FMT_D3D11VA_VLD;
418  *fmt++ = AV_PIX_FMT_D3D11;
419 #endif
420 #if CONFIG_HEVC_VAAPI_HWACCEL
421  *fmt++ = AV_PIX_FMT_VAAPI;
422 #endif
423 #if CONFIG_HEVC_VDPAU_HWACCEL
424  *fmt++ = AV_PIX_FMT_VDPAU;
425 #endif
426 #if CONFIG_HEVC_NVDEC_HWACCEL
427  *fmt++ = AV_PIX_FMT_CUDA;
428 #endif
429 #if CONFIG_HEVC_VIDEOTOOLBOX_HWACCEL
430  *fmt++ = AV_PIX_FMT_VIDEOTOOLBOX;
431 #endif
432 #if CONFIG_HEVC_VULKAN_HWACCEL
433  *fmt++ = AV_PIX_FMT_VULKAN;
434 #endif
435  break;
437 #if CONFIG_HEVC_DXVA2_HWACCEL
438  *fmt++ = AV_PIX_FMT_DXVA2_VLD;
439 #endif
440 #if CONFIG_HEVC_D3D11VA_HWACCEL
441  *fmt++ = AV_PIX_FMT_D3D11VA_VLD;
442  *fmt++ = AV_PIX_FMT_D3D11;
443 #endif
444 #if CONFIG_HEVC_VAAPI_HWACCEL
445  *fmt++ = AV_PIX_FMT_VAAPI;
446 #endif
447 #if CONFIG_HEVC_VIDEOTOOLBOX_HWACCEL
448  *fmt++ = AV_PIX_FMT_VIDEOTOOLBOX;
449 #endif
450 #if CONFIG_HEVC_VULKAN_HWACCEL
451  *fmt++ = AV_PIX_FMT_VULKAN;
452 #endif
453 #if CONFIG_HEVC_VDPAU_HWACCEL
454  *fmt++ = AV_PIX_FMT_VDPAU;
455 #endif
456 #if CONFIG_HEVC_NVDEC_HWACCEL
457  *fmt++ = AV_PIX_FMT_CUDA;
458 #endif
459  break;
460  case AV_PIX_FMT_YUV444P:
461 #if CONFIG_HEVC_VAAPI_HWACCEL
462  *fmt++ = AV_PIX_FMT_VAAPI;
463 #endif
464 #if CONFIG_HEVC_VDPAU_HWACCEL
465  *fmt++ = AV_PIX_FMT_VDPAU;
466 #endif
467 #if CONFIG_HEVC_NVDEC_HWACCEL
468  *fmt++ = AV_PIX_FMT_CUDA;
469 #endif
470 #if CONFIG_HEVC_VIDEOTOOLBOX_HWACCEL
471  *fmt++ = AV_PIX_FMT_VIDEOTOOLBOX;
472 #endif
473 #if CONFIG_HEVC_VULKAN_HWACCEL
474  *fmt++ = AV_PIX_FMT_VULKAN;
475 #endif
476  break;
477  case AV_PIX_FMT_YUV422P:
479 #if CONFIG_HEVC_VAAPI_HWACCEL
480  *fmt++ = AV_PIX_FMT_VAAPI;
481 #endif
482 #if CONFIG_HEVC_VIDEOTOOLBOX_HWACCEL
483  *fmt++ = AV_PIX_FMT_VIDEOTOOLBOX;
484 #endif
485 #if CONFIG_HEVC_VULKAN_HWACCEL
486  *fmt++ = AV_PIX_FMT_VULKAN;
487 #endif
488  break;
490 #if CONFIG_HEVC_VIDEOTOOLBOX_HWACCEL
491  *fmt++ = AV_PIX_FMT_VIDEOTOOLBOX;
492 #endif
493  /* NOTE: fallthrough */
496 #if CONFIG_HEVC_VAAPI_HWACCEL
497  *fmt++ = AV_PIX_FMT_VAAPI;
498 #endif
499 #if CONFIG_HEVC_VDPAU_HWACCEL
500  *fmt++ = AV_PIX_FMT_VDPAU;
501 #endif
502 #if CONFIG_HEVC_VULKAN_HWACCEL
503  *fmt++ = AV_PIX_FMT_VULKAN;
504 #endif
505 #if CONFIG_HEVC_NVDEC_HWACCEL
506  *fmt++ = AV_PIX_FMT_CUDA;
507 #endif
508  break;
510 #if CONFIG_HEVC_VAAPI_HWACCEL
511  *fmt++ = AV_PIX_FMT_VAAPI;
512 #endif
513 #if CONFIG_HEVC_VULKAN_HWACCEL
514  *fmt++ = AV_PIX_FMT_VULKAN;
515 #endif
516  break;
517  }
518 
519  *fmt++ = sps->pix_fmt;
520  *fmt = AV_PIX_FMT_NONE;
521 
522  return ff_get_format(s->avctx, pix_fmts);
523 }
524 
525 static int set_sps(HEVCContext *s, const HEVCSPS *sps,
526  enum AVPixelFormat pix_fmt)
527 {
528  int ret, i;
529 
531  s->ps.sps = NULL;
532  s->ps.vps = NULL;
533 
534  if (!sps)
535  return 0;
536 
537  ret = pic_arrays_init(s, sps);
538  if (ret < 0)
539  goto fail;
540 
542 
543  s->avctx->pix_fmt = pix_fmt;
544 
545  ff_hevc_pred_init(&s->hpc, sps->bit_depth);
546  ff_hevc_dsp_init (&s->hevcdsp, sps->bit_depth);
547  ff_videodsp_init (&s->vdsp, sps->bit_depth);
548 
549  for (i = 0; i < 3; i++) {
550  av_freep(&s->sao_pixel_buffer_h[i]);
551  av_freep(&s->sao_pixel_buffer_v[i]);
552  }
553 
554  if (sps->sao_enabled && !s->avctx->hwaccel) {
555  int c_count = (sps->chroma_format_idc != 0) ? 3 : 1;
556  int c_idx;
557 
558  for(c_idx = 0; c_idx < c_count; c_idx++) {
559  int w = sps->width >> sps->hshift[c_idx];
560  int h = sps->height >> sps->vshift[c_idx];
561  s->sao_pixel_buffer_h[c_idx] =
562  av_malloc((w * 2 * sps->ctb_height) <<
563  sps->pixel_shift);
564  s->sao_pixel_buffer_v[c_idx] =
565  av_malloc((h * 2 * sps->ctb_width) <<
566  sps->pixel_shift);
567  if (!s->sao_pixel_buffer_h[c_idx] ||
568  !s->sao_pixel_buffer_v[c_idx])
569  goto fail;
570  }
571  }
572 
573  s->ps.sps = sps;
574  s->ps.vps = s->ps.vps_list[s->ps.sps->vps_id];
575 
576  return 0;
577 
578 fail:
580  for (i = 0; i < 3; i++) {
581  av_freep(&s->sao_pixel_buffer_h[i]);
582  av_freep(&s->sao_pixel_buffer_v[i]);
583  }
584  s->ps.sps = NULL;
585  return ret;
586 }
587 
589 {
590  GetBitContext *gb = &s->HEVClc->gb;
591  SliceHeader *sh = &s->sh;
592  int i, ret;
593 
594  // Coded parameters
596  if (s->ref && sh->first_slice_in_pic_flag) {
597  av_log(s->avctx, AV_LOG_ERROR, "Two slices reporting being the first in the same frame.\n");
598  return 1; // This slice will be skipped later, do not corrupt state
599  }
600 
601  if ((IS_IDR(s) || IS_BLA(s)) && sh->first_slice_in_pic_flag) {
602  s->seq_decode = (s->seq_decode + 1) & HEVC_SEQUENCE_COUNTER_MASK;
603  s->max_ra = INT_MAX;
604  if (IS_IDR(s))
606  }
608  if (IS_IRAP(s))
610 
611  sh->pps_id = get_ue_golomb_long(gb);
612  if (sh->pps_id >= HEVC_MAX_PPS_COUNT || !s->ps.pps_list[sh->pps_id]) {
613  av_log(s->avctx, AV_LOG_ERROR, "PPS id out of range: %d\n", sh->pps_id);
614  return AVERROR_INVALIDDATA;
615  }
616  if (!sh->first_slice_in_pic_flag &&
617  s->ps.pps != s->ps.pps_list[sh->pps_id]) {
618  av_log(s->avctx, AV_LOG_ERROR, "PPS changed between slices.\n");
619  return AVERROR_INVALIDDATA;
620  }
621  s->ps.pps = s->ps.pps_list[sh->pps_id];
622  if (s->nal_unit_type == HEVC_NAL_CRA_NUT && s->last_eos == 1)
624 
625  if (s->ps.sps != s->ps.sps_list[s->ps.pps->sps_id]) {
626  const HEVCSPS *sps = s->ps.sps_list[s->ps.pps->sps_id];
627  enum AVPixelFormat pix_fmt;
628 
630 
631  ret = set_sps(s, sps, sps->pix_fmt);
632  if (ret < 0)
633  return ret;
634 
635  pix_fmt = get_format(s, sps);
636  if (pix_fmt < 0)
637  return pix_fmt;
638  s->avctx->pix_fmt = pix_fmt;
639 
640  s->seq_decode = (s->seq_decode + 1) & HEVC_SEQUENCE_COUNTER_MASK;
641  s->max_ra = INT_MAX;
642  }
643 
645  if (ret < 0)
646  return ret;
647 
649  if (!sh->first_slice_in_pic_flag) {
650  int slice_address_length;
651 
652  if (s->ps.pps->dependent_slice_segments_enabled_flag)
654 
655  slice_address_length = av_ceil_log2(s->ps.sps->ctb_width *
656  s->ps.sps->ctb_height);
657  sh->slice_segment_addr = get_bitsz(gb, slice_address_length);
658  if (sh->slice_segment_addr >= s->ps.sps->ctb_width * s->ps.sps->ctb_height) {
659  av_log(s->avctx, AV_LOG_ERROR,
660  "Invalid slice segment address: %u.\n",
661  sh->slice_segment_addr);
662  return AVERROR_INVALIDDATA;
663  }
664 
665  if (!sh->dependent_slice_segment_flag) {
666  sh->slice_addr = sh->slice_segment_addr;
667  s->slice_idx++;
668  }
669  } else {
670  sh->slice_segment_addr = sh->slice_addr = 0;
671  s->slice_idx = 0;
672  s->slice_initialized = 0;
673  }
674 
675  if (!sh->dependent_slice_segment_flag) {
676  s->slice_initialized = 0;
677 
678  for (i = 0; i < s->ps.pps->num_extra_slice_header_bits; i++)
679  skip_bits(gb, 1); // slice_reserved_undetermined_flag[]
680 
681  sh->slice_type = get_ue_golomb_long(gb);
682  if (!(sh->slice_type == HEVC_SLICE_I ||
683  sh->slice_type == HEVC_SLICE_P ||
684  sh->slice_type == HEVC_SLICE_B)) {
685  av_log(s->avctx, AV_LOG_ERROR, "Unknown slice type: %d.\n",
686  sh->slice_type);
687  return AVERROR_INVALIDDATA;
688  }
689  if (IS_IRAP(s) && sh->slice_type != HEVC_SLICE_I &&
690  !s->ps.pps->pps_curr_pic_ref_enabled_flag) {
691  av_log(s->avctx, AV_LOG_ERROR, "Inter slices in an IRAP frame.\n");
692  return AVERROR_INVALIDDATA;
693  }
694 
695  // when flag is not present, picture is inferred to be output
696  sh->pic_output_flag = 1;
697  if (s->ps.pps->output_flag_present_flag)
698  sh->pic_output_flag = get_bits1(gb);
699 
700  if (s->ps.sps->separate_colour_plane_flag)
701  sh->colour_plane_id = get_bits(gb, 2);
702 
703  if (!IS_IDR(s)) {
704  int poc, pos;
705 
706  sh->pic_order_cnt_lsb = get_bits(gb, s->ps.sps->log2_max_poc_lsb);
707  poc = ff_hevc_compute_poc(s->ps.sps, s->pocTid0, sh->pic_order_cnt_lsb, s->nal_unit_type);
708  if (!sh->first_slice_in_pic_flag && poc != s->poc) {
709  av_log(s->avctx, AV_LOG_WARNING,
710  "Ignoring POC change between slices: %d -> %d\n", s->poc, poc);
711  if (s->avctx->err_recognition & AV_EF_EXPLODE)
712  return AVERROR_INVALIDDATA;
713  poc = s->poc;
714  }
715  s->poc = poc;
716 
718  pos = get_bits_left(gb);
720  ret = ff_hevc_decode_short_term_rps(gb, s->avctx, &sh->slice_rps, s->ps.sps, 1);
721  if (ret < 0)
722  return ret;
723 
724  sh->short_term_rps = &sh->slice_rps;
725  } else {
726  int numbits, rps_idx;
727 
728  if (!s->ps.sps->nb_st_rps) {
729  av_log(s->avctx, AV_LOG_ERROR, "No ref lists in the SPS.\n");
730  return AVERROR_INVALIDDATA;
731  }
732 
733  numbits = av_ceil_log2(s->ps.sps->nb_st_rps);
734  rps_idx = numbits > 0 ? get_bits(gb, numbits) : 0;
735  sh->short_term_rps = &s->ps.sps->st_rps[rps_idx];
736  }
738 
739  pos = get_bits_left(gb);
740  ret = decode_lt_rps(s, &sh->long_term_rps, gb);
741  if (ret < 0) {
742  av_log(s->avctx, AV_LOG_WARNING, "Invalid long term RPS.\n");
743  if (s->avctx->err_recognition & AV_EF_EXPLODE)
744  return AVERROR_INVALIDDATA;
745  }
747 
748  if (s->ps.sps->sps_temporal_mvp_enabled_flag)
750  else
752  } else {
753  s->poc = 0;
754  sh->pic_order_cnt_lsb = 0;
757  sh->short_term_rps = NULL;
760  }
761 
762  /* 8.3.1 */
763  if (sh->first_slice_in_pic_flag && s->temporal_id == 0 &&
764  s->nal_unit_type != HEVC_NAL_TRAIL_N &&
765  s->nal_unit_type != HEVC_NAL_TSA_N &&
766  s->nal_unit_type != HEVC_NAL_STSA_N &&
767  s->nal_unit_type != HEVC_NAL_RADL_N &&
768  s->nal_unit_type != HEVC_NAL_RADL_R &&
769  s->nal_unit_type != HEVC_NAL_RASL_N &&
770  s->nal_unit_type != HEVC_NAL_RASL_R)
771  s->pocTid0 = s->poc;
772 
773  if (s->ps.sps->sao_enabled) {
775  if (s->ps.sps->chroma_format_idc) {
778  }
779  } else {
783  }
784 
785  sh->nb_refs[L0] = sh->nb_refs[L1] = 0;
786  if (sh->slice_type == HEVC_SLICE_P || sh->slice_type == HEVC_SLICE_B) {
787  int nb_refs;
788 
789  sh->nb_refs[L0] = s->ps.pps->num_ref_idx_l0_default_active;
790  if (sh->slice_type == HEVC_SLICE_B)
791  sh->nb_refs[L1] = s->ps.pps->num_ref_idx_l1_default_active;
792 
793  if (get_bits1(gb)) { // num_ref_idx_active_override_flag
794  sh->nb_refs[L0] = get_ue_golomb_31(gb) + 1;
795  if (sh->slice_type == HEVC_SLICE_B)
796  sh->nb_refs[L1] = get_ue_golomb_31(gb) + 1;
797  }
798  if (sh->nb_refs[L0] >= HEVC_MAX_REFS || sh->nb_refs[L1] >= HEVC_MAX_REFS) {
799  av_log(s->avctx, AV_LOG_ERROR, "Too many refs: %d/%d.\n",
800  sh->nb_refs[L0], sh->nb_refs[L1]);
801  return AVERROR_INVALIDDATA;
802  }
803 
804  sh->rpl_modification_flag[0] = 0;
805  sh->rpl_modification_flag[1] = 0;
806  nb_refs = ff_hevc_frame_nb_refs(s);
807  if (!nb_refs) {
808  av_log(s->avctx, AV_LOG_ERROR, "Zero refs for a frame with P or B slices.\n");
809  return AVERROR_INVALIDDATA;
810  }
811 
812  if (s->ps.pps->lists_modification_present_flag && nb_refs > 1) {
813  sh->rpl_modification_flag[0] = get_bits1(gb);
814  if (sh->rpl_modification_flag[0]) {
815  for (i = 0; i < sh->nb_refs[L0]; i++)
816  sh->list_entry_lx[0][i] = get_bits(gb, av_ceil_log2(nb_refs));
817  }
818 
819  if (sh->slice_type == HEVC_SLICE_B) {
820  sh->rpl_modification_flag[1] = get_bits1(gb);
821  if (sh->rpl_modification_flag[1] == 1)
822  for (i = 0; i < sh->nb_refs[L1]; i++)
823  sh->list_entry_lx[1][i] = get_bits(gb, av_ceil_log2(nb_refs));
824  }
825  }
826 
827  if (sh->slice_type == HEVC_SLICE_B)
828  sh->mvd_l1_zero_flag = get_bits1(gb);
829 
830  if (s->ps.pps->cabac_init_present_flag)
831  sh->cabac_init_flag = get_bits1(gb);
832  else
833  sh->cabac_init_flag = 0;
834 
835  sh->collocated_ref_idx = 0;
837  sh->collocated_list = L0;
838  if (sh->slice_type == HEVC_SLICE_B)
839  sh->collocated_list = !get_bits1(gb);
840 
841  if (sh->nb_refs[sh->collocated_list] > 1) {
843  if (sh->collocated_ref_idx >= sh->nb_refs[sh->collocated_list]) {
844  av_log(s->avctx, AV_LOG_ERROR,
845  "Invalid collocated_ref_idx: %d.\n",
846  sh->collocated_ref_idx);
847  return AVERROR_INVALIDDATA;
848  }
849  }
850  }
851 
852  if ((s->ps.pps->weighted_pred_flag && sh->slice_type == HEVC_SLICE_P) ||
853  (s->ps.pps->weighted_bipred_flag && sh->slice_type == HEVC_SLICE_B)) {
854  int ret = pred_weight_table(s, gb);
855  if (ret < 0)
856  return ret;
857  }
858 
860  if (sh->max_num_merge_cand < 1 || sh->max_num_merge_cand > 5) {
861  av_log(s->avctx, AV_LOG_ERROR,
862  "Invalid number of merging MVP candidates: %d.\n",
863  sh->max_num_merge_cand);
864  return AVERROR_INVALIDDATA;
865  }
866 
867  // Syntax in 7.3.6.1
868  if (s->ps.sps->motion_vector_resolution_control_idc == 2)
869  sh->use_integer_mv_flag = get_bits1(gb);
870  else
871  // Inferred to be equal to motion_vector_resolution_control_idc if not present
872  sh->use_integer_mv_flag = s->ps.sps->motion_vector_resolution_control_idc;
873 
874  }
875 
876  sh->slice_qp_delta = get_se_golomb(gb);
877 
878  if (s->ps.pps->pic_slice_level_chroma_qp_offsets_present_flag) {
881  if (sh->slice_cb_qp_offset < -12 || sh->slice_cb_qp_offset > 12 ||
882  sh->slice_cr_qp_offset < -12 || sh->slice_cr_qp_offset > 12) {
883  av_log(s->avctx, AV_LOG_ERROR, "Invalid slice cx qp offset.\n");
884  return AVERROR_INVALIDDATA;
885  }
886  } else {
887  sh->slice_cb_qp_offset = 0;
888  sh->slice_cr_qp_offset = 0;
889  }
890 
891  if (s->ps.pps->pps_slice_act_qp_offsets_present_flag) {
895  }
896 
897  if (s->ps.pps->chroma_qp_offset_list_enabled_flag)
899  else
901 
902  if (s->ps.pps->deblocking_filter_control_present_flag) {
903  int deblocking_filter_override_flag = 0;
904 
905  if (s->ps.pps->deblocking_filter_override_enabled_flag)
906  deblocking_filter_override_flag = get_bits1(gb);
907 
908  if (deblocking_filter_override_flag) {
911  int beta_offset_div2 = get_se_golomb(gb);
912  int tc_offset_div2 = get_se_golomb(gb) ;
913  if (beta_offset_div2 < -6 || beta_offset_div2 > 6 ||
914  tc_offset_div2 < -6 || tc_offset_div2 > 6) {
915  av_log(s->avctx, AV_LOG_ERROR,
916  "Invalid deblock filter offsets: %d, %d\n",
917  beta_offset_div2, tc_offset_div2);
918  return AVERROR_INVALIDDATA;
919  }
920  sh->beta_offset = beta_offset_div2 * 2;
921  sh->tc_offset = tc_offset_div2 * 2;
922  }
923  } else {
924  sh->disable_deblocking_filter_flag = s->ps.pps->disable_dbf;
925  sh->beta_offset = s->ps.pps->beta_offset;
926  sh->tc_offset = s->ps.pps->tc_offset;
927  }
928  } else {
930  sh->beta_offset = 0;
931  sh->tc_offset = 0;
932  }
933 
934  if (s->ps.pps->seq_loop_filter_across_slices_enabled_flag &&
939  } else {
940  sh->slice_loop_filter_across_slices_enabled_flag = s->ps.pps->seq_loop_filter_across_slices_enabled_flag;
941  }
942  } else if (!s->slice_initialized) {
943  av_log(s->avctx, AV_LOG_ERROR, "Independent slice segment missing.\n");
944  return AVERROR_INVALIDDATA;
945  }
946 
947  sh->num_entry_point_offsets = 0;
948  if (s->ps.pps->tiles_enabled_flag || s->ps.pps->entropy_coding_sync_enabled_flag) {
949  unsigned num_entry_point_offsets = get_ue_golomb_long(gb);
950  // It would be possible to bound this tighter but this here is simpler
951  if (num_entry_point_offsets > get_bits_left(gb)) {
952  av_log(s->avctx, AV_LOG_ERROR, "num_entry_point_offsets %d is invalid\n", num_entry_point_offsets);
953  return AVERROR_INVALIDDATA;
954  }
955 
956  sh->num_entry_point_offsets = num_entry_point_offsets;
957  if (sh->num_entry_point_offsets > 0) {
958  int offset_len = get_ue_golomb_long(gb) + 1;
959 
960  if (offset_len < 1 || offset_len > 32) {
961  sh->num_entry_point_offsets = 0;
962  av_log(s->avctx, AV_LOG_ERROR, "offset_len %d is invalid\n", offset_len);
963  return AVERROR_INVALIDDATA;
964  }
965 
967  av_freep(&sh->offset);
968  av_freep(&sh->size);
969  sh->entry_point_offset = av_malloc_array(sh->num_entry_point_offsets, sizeof(unsigned));
970  sh->offset = av_malloc_array(sh->num_entry_point_offsets, sizeof(int));
971  sh->size = av_malloc_array(sh->num_entry_point_offsets, sizeof(int));
972  if (!sh->entry_point_offset || !sh->offset || !sh->size) {
973  sh->num_entry_point_offsets = 0;
974  av_log(s->avctx, AV_LOG_ERROR, "Failed to allocate memory\n");
975  return AVERROR(ENOMEM);
976  }
977  for (i = 0; i < sh->num_entry_point_offsets; i++) {
978  unsigned val = get_bits_long(gb, offset_len);
979  sh->entry_point_offset[i] = val + 1; // +1; // +1 to get the size
980  }
981  if (s->threads_number > 1 && (s->ps.pps->num_tile_rows > 1 || s->ps.pps->num_tile_columns > 1)) {
982  s->enable_parallel_tiles = 0; // TODO: you can enable tiles in parallel here
983  s->threads_number = 1;
984  } else
985  s->enable_parallel_tiles = 0;
986  } else
987  s->enable_parallel_tiles = 0;
988  }
989 
990  if (s->ps.pps->slice_header_extension_present_flag) {
991  unsigned int length = get_ue_golomb_long(gb);
992  if (length*8LL > get_bits_left(gb)) {
993  av_log(s->avctx, AV_LOG_ERROR, "too many slice_header_extension_data_bytes\n");
994  return AVERROR_INVALIDDATA;
995  }
996  for (i = 0; i < length; i++)
997  skip_bits(gb, 8); // slice_header_extension_data_byte
998  }
999 
1000  // Inferred parameters
1001  sh->slice_qp = 26U + s->ps.pps->pic_init_qp_minus26 + sh->slice_qp_delta;
1002  if (sh->slice_qp > 51 ||
1003  sh->slice_qp < -s->ps.sps->qp_bd_offset) {
1004  av_log(s->avctx, AV_LOG_ERROR,
1005  "The slice_qp %d is outside the valid range "
1006  "[%d, 51].\n",
1007  sh->slice_qp,
1008  -s->ps.sps->qp_bd_offset);
1009  return AVERROR_INVALIDDATA;
1010  }
1011 
1013 
1014  if (!s->sh.slice_ctb_addr_rs && s->sh.dependent_slice_segment_flag) {
1015  av_log(s->avctx, AV_LOG_ERROR, "Impossible slice segment.\n");
1016  return AVERROR_INVALIDDATA;
1017  }
1018 
1019  if (get_bits_left(gb) < 0) {
1020  av_log(s->avctx, AV_LOG_ERROR,
1021  "Overread slice header by %d bits\n", -get_bits_left(gb));
1022  return AVERROR_INVALIDDATA;
1023  }
1024 
1025  s->HEVClc->first_qp_group = !s->sh.dependent_slice_segment_flag;
1026 
1027  if (!s->ps.pps->cu_qp_delta_enabled_flag)
1028  s->HEVClc->qp_y = s->sh.slice_qp;
1029 
1030  s->slice_initialized = 1;
1031  s->HEVClc->tu.cu_qp_offset_cb = 0;
1032  s->HEVClc->tu.cu_qp_offset_cr = 0;
1033 
1034  return 0;
1035 }
1036 
1037 #define CTB(tab, x, y) ((tab)[(y) * s->ps.sps->ctb_width + (x)])
1038 
1039 #define SET_SAO(elem, value) \
1040 do { \
1041  if (!sao_merge_up_flag && !sao_merge_left_flag) \
1042  sao->elem = value; \
1043  else if (sao_merge_left_flag) \
1044  sao->elem = CTB(s->sao, rx-1, ry).elem; \
1045  else if (sao_merge_up_flag) \
1046  sao->elem = CTB(s->sao, rx, ry-1).elem; \
1047  else \
1048  sao->elem = 0; \
1049 } while (0)
1050 
1051 static void hls_sao_param(HEVCLocalContext *lc, int rx, int ry)
1052 {
1053  const HEVCContext *const s = lc->parent;
1054  int sao_merge_left_flag = 0;
1055  int sao_merge_up_flag = 0;
1056  SAOParams *sao = &CTB(s->sao, rx, ry);
1057  int c_idx, i;
1058 
1059  if (s->sh.slice_sample_adaptive_offset_flag[0] ||
1060  s->sh.slice_sample_adaptive_offset_flag[1]) {
1061  if (rx > 0) {
1062  if (lc->ctb_left_flag)
1063  sao_merge_left_flag = ff_hevc_sao_merge_flag_decode(lc);
1064  }
1065  if (ry > 0 && !sao_merge_left_flag) {
1066  if (lc->ctb_up_flag)
1067  sao_merge_up_flag = ff_hevc_sao_merge_flag_decode(lc);
1068  }
1069  }
1070 
1071  for (c_idx = 0; c_idx < (s->ps.sps->chroma_format_idc ? 3 : 1); c_idx++) {
1072  int log2_sao_offset_scale = c_idx == 0 ? s->ps.pps->log2_sao_offset_scale_luma :
1073  s->ps.pps->log2_sao_offset_scale_chroma;
1074 
1075  if (!s->sh.slice_sample_adaptive_offset_flag[c_idx]) {
1076  sao->type_idx[c_idx] = SAO_NOT_APPLIED;
1077  continue;
1078  }
1079 
1080  if (c_idx == 2) {
1081  sao->type_idx[2] = sao->type_idx[1];
1082  sao->eo_class[2] = sao->eo_class[1];
1083  } else {
1084  SET_SAO(type_idx[c_idx], ff_hevc_sao_type_idx_decode(lc));
1085  }
1086 
1087  if (sao->type_idx[c_idx] == SAO_NOT_APPLIED)
1088  continue;
1089 
1090  for (i = 0; i < 4; i++)
1091  SET_SAO(offset_abs[c_idx][i], ff_hevc_sao_offset_abs_decode(lc));
1092 
1093  if (sao->type_idx[c_idx] == SAO_BAND) {
1094  for (i = 0; i < 4; i++) {
1095  if (sao->offset_abs[c_idx][i]) {
1096  SET_SAO(offset_sign[c_idx][i],
1098  } else {
1099  sao->offset_sign[c_idx][i] = 0;
1100  }
1101  }
1102  SET_SAO(band_position[c_idx], ff_hevc_sao_band_position_decode(lc));
1103  } else if (c_idx != 2) {
1104  SET_SAO(eo_class[c_idx], ff_hevc_sao_eo_class_decode(lc));
1105  }
1106 
1107  // Inferred parameters
1108  sao->offset_val[c_idx][0] = 0;
1109  for (i = 0; i < 4; i++) {
1110  sao->offset_val[c_idx][i + 1] = sao->offset_abs[c_idx][i];
1111  if (sao->type_idx[c_idx] == SAO_EDGE) {
1112  if (i > 1)
1113  sao->offset_val[c_idx][i + 1] = -sao->offset_val[c_idx][i + 1];
1114  } else if (sao->offset_sign[c_idx][i]) {
1115  sao->offset_val[c_idx][i + 1] = -sao->offset_val[c_idx][i + 1];
1116  }
1117  sao->offset_val[c_idx][i + 1] *= 1 << log2_sao_offset_scale;
1118  }
1119  }
1120 }
1121 
1122 #undef SET_SAO
1123 #undef CTB
1124 
1126 {
1127  int log2_res_scale_abs_plus1 = ff_hevc_log2_res_scale_abs(lc, idx);
1128 
1129  if (log2_res_scale_abs_plus1 != 0) {
1130  int res_scale_sign_flag = ff_hevc_res_scale_sign_flag(lc, idx);
1131  lc->tu.res_scale_val = (1 << (log2_res_scale_abs_plus1 - 1)) *
1132  (1 - 2 * res_scale_sign_flag);
1133  } else {
1134  lc->tu.res_scale_val = 0;
1135  }
1136 
1137 
1138  return 0;
1139 }
1140 
1141 static int hls_transform_unit(HEVCLocalContext *lc, int x0, int y0,
1142  int xBase, int yBase, int cb_xBase, int cb_yBase,
1143  int log2_cb_size, int log2_trafo_size,
1144  int blk_idx, int cbf_luma, int *cbf_cb, int *cbf_cr)
1145 {
1146  const HEVCContext *const s = lc->parent;
1147  const int log2_trafo_size_c = log2_trafo_size - s->ps.sps->hshift[1];
1148  int i;
1149 
1150  if (lc->cu.pred_mode == MODE_INTRA) {
1151  int trafo_size = 1 << log2_trafo_size;
1152  ff_hevc_set_neighbour_available(lc, x0, y0, trafo_size, trafo_size);
1153 
1154  s->hpc.intra_pred[log2_trafo_size - 2](lc, x0, y0, 0);
1155  }
1156 
1157  if (cbf_luma || cbf_cb[0] || cbf_cr[0] ||
1158  (s->ps.sps->chroma_format_idc == 2 && (cbf_cb[1] || cbf_cr[1]))) {
1159  int scan_idx = SCAN_DIAG;
1160  int scan_idx_c = SCAN_DIAG;
1161  int cbf_chroma = cbf_cb[0] || cbf_cr[0] ||
1162  (s->ps.sps->chroma_format_idc == 2 &&
1163  (cbf_cb[1] || cbf_cr[1]));
1164 
1165  if (s->ps.pps->cu_qp_delta_enabled_flag && !lc->tu.is_cu_qp_delta_coded) {
1167  if (lc->tu.cu_qp_delta != 0)
1168  if (ff_hevc_cu_qp_delta_sign_flag(lc) == 1)
1169  lc->tu.cu_qp_delta = -lc->tu.cu_qp_delta;
1170  lc->tu.is_cu_qp_delta_coded = 1;
1171 
1172  if (lc->tu.cu_qp_delta < -(26 + s->ps.sps->qp_bd_offset / 2) ||
1173  lc->tu.cu_qp_delta > (25 + s->ps.sps->qp_bd_offset / 2)) {
1174  av_log(s->avctx, AV_LOG_ERROR,
1175  "The cu_qp_delta %d is outside the valid range "
1176  "[%d, %d].\n",
1177  lc->tu.cu_qp_delta,
1178  -(26 + s->ps.sps->qp_bd_offset / 2),
1179  (25 + s->ps.sps->qp_bd_offset / 2));
1180  return AVERROR_INVALIDDATA;
1181  }
1182 
1183  ff_hevc_set_qPy(lc, cb_xBase, cb_yBase, log2_cb_size);
1184  }
1185 
1186  if (s->sh.cu_chroma_qp_offset_enabled_flag && cbf_chroma &&
1188  int cu_chroma_qp_offset_flag = ff_hevc_cu_chroma_qp_offset_flag(lc);
1189  if (cu_chroma_qp_offset_flag) {
1190  int cu_chroma_qp_offset_idx = 0;
1191  if (s->ps.pps->chroma_qp_offset_list_len_minus1 > 0) {
1192  cu_chroma_qp_offset_idx = ff_hevc_cu_chroma_qp_offset_idx(lc);
1193  av_log(s->avctx, AV_LOG_ERROR,
1194  "cu_chroma_qp_offset_idx not yet tested.\n");
1195  }
1196  lc->tu.cu_qp_offset_cb = s->ps.pps->cb_qp_offset_list[cu_chroma_qp_offset_idx];
1197  lc->tu.cu_qp_offset_cr = s->ps.pps->cr_qp_offset_list[cu_chroma_qp_offset_idx];
1198  } else {
1199  lc->tu.cu_qp_offset_cb = 0;
1200  lc->tu.cu_qp_offset_cr = 0;
1201  }
1203  }
1204 
1205  if (lc->cu.pred_mode == MODE_INTRA && log2_trafo_size < 4) {
1206  if (lc->tu.intra_pred_mode >= 6 &&
1207  lc->tu.intra_pred_mode <= 14) {
1208  scan_idx = SCAN_VERT;
1209  } else if (lc->tu.intra_pred_mode >= 22 &&
1210  lc->tu.intra_pred_mode <= 30) {
1211  scan_idx = SCAN_HORIZ;
1212  }
1213 
1214  if (lc->tu.intra_pred_mode_c >= 6 &&
1215  lc->tu.intra_pred_mode_c <= 14) {
1216  scan_idx_c = SCAN_VERT;
1217  } else if (lc->tu.intra_pred_mode_c >= 22 &&
1218  lc->tu.intra_pred_mode_c <= 30) {
1219  scan_idx_c = SCAN_HORIZ;
1220  }
1221  }
1222 
1223  lc->tu.cross_pf = 0;
1224 
1225  if (cbf_luma)
1226  ff_hevc_hls_residual_coding(lc, x0, y0, log2_trafo_size, scan_idx, 0);
1227  if (s->ps.sps->chroma_format_idc && (log2_trafo_size > 2 || s->ps.sps->chroma_format_idc == 3)) {
1228  int trafo_size_h = 1 << (log2_trafo_size_c + s->ps.sps->hshift[1]);
1229  int trafo_size_v = 1 << (log2_trafo_size_c + s->ps.sps->vshift[1]);
1230  lc->tu.cross_pf = (s->ps.pps->cross_component_prediction_enabled_flag && cbf_luma &&
1231  (lc->cu.pred_mode == MODE_INTER ||
1232  (lc->tu.chroma_mode_c == 4)));
1233 
1234  if (lc->tu.cross_pf) {
1235  hls_cross_component_pred(lc, 0);
1236  }
1237  for (i = 0; i < (s->ps.sps->chroma_format_idc == 2 ? 2 : 1); i++) {
1238  if (lc->cu.pred_mode == MODE_INTRA) {
1239  ff_hevc_set_neighbour_available(lc, x0, y0 + (i << log2_trafo_size_c), trafo_size_h, trafo_size_v);
1240  s->hpc.intra_pred[log2_trafo_size_c - 2](lc, x0, y0 + (i << log2_trafo_size_c), 1);
1241  }
1242  if (cbf_cb[i])
1243  ff_hevc_hls_residual_coding(lc, x0, y0 + (i << log2_trafo_size_c),
1244  log2_trafo_size_c, scan_idx_c, 1);
1245  else
1246  if (lc->tu.cross_pf) {
1247  ptrdiff_t stride = s->frame->linesize[1];
1248  int hshift = s->ps.sps->hshift[1];
1249  int vshift = s->ps.sps->vshift[1];
1250  const int16_t *coeffs_y = (int16_t*)lc->edge_emu_buffer;
1251  int16_t *coeffs = (int16_t*)lc->edge_emu_buffer2;
1252  int size = 1 << log2_trafo_size_c;
1253 
1254  uint8_t *dst = &s->frame->data[1][(y0 >> vshift) * stride +
1255  ((x0 >> hshift) << s->ps.sps->pixel_shift)];
1256  for (i = 0; i < (size * size); i++) {
1257  coeffs[i] = ((lc->tu.res_scale_val * coeffs_y[i]) >> 3);
1258  }
1259  s->hevcdsp.add_residual[log2_trafo_size_c-2](dst, coeffs, stride);
1260  }
1261  }
1262 
1263  if (lc->tu.cross_pf) {
1264  hls_cross_component_pred(lc, 1);
1265  }
1266  for (i = 0; i < (s->ps.sps->chroma_format_idc == 2 ? 2 : 1); i++) {
1267  if (lc->cu.pred_mode == MODE_INTRA) {
1268  ff_hevc_set_neighbour_available(lc, x0, y0 + (i << log2_trafo_size_c),
1269  trafo_size_h, trafo_size_v);
1270  s->hpc.intra_pred[log2_trafo_size_c - 2](lc, x0, y0 + (i << log2_trafo_size_c), 2);
1271  }
1272  if (cbf_cr[i])
1273  ff_hevc_hls_residual_coding(lc, x0, y0 + (i << log2_trafo_size_c),
1274  log2_trafo_size_c, scan_idx_c, 2);
1275  else
1276  if (lc->tu.cross_pf) {
1277  ptrdiff_t stride = s->frame->linesize[2];
1278  int hshift = s->ps.sps->hshift[2];
1279  int vshift = s->ps.sps->vshift[2];
1280  const int16_t *coeffs_y = (int16_t*)lc->edge_emu_buffer;
1281  int16_t *coeffs = (int16_t*)lc->edge_emu_buffer2;
1282  int size = 1 << log2_trafo_size_c;
1283 
1284  uint8_t *dst = &s->frame->data[2][(y0 >> vshift) * stride +
1285  ((x0 >> hshift) << s->ps.sps->pixel_shift)];
1286  for (i = 0; i < (size * size); i++) {
1287  coeffs[i] = ((lc->tu.res_scale_val * coeffs_y[i]) >> 3);
1288  }
1289  s->hevcdsp.add_residual[log2_trafo_size_c-2](dst, coeffs, stride);
1290  }
1291  }
1292  } else if (s->ps.sps->chroma_format_idc && blk_idx == 3) {
1293  int trafo_size_h = 1 << (log2_trafo_size + 1);
1294  int trafo_size_v = 1 << (log2_trafo_size + s->ps.sps->vshift[1]);
1295  for (i = 0; i < (s->ps.sps->chroma_format_idc == 2 ? 2 : 1); i++) {
1296  if (lc->cu.pred_mode == MODE_INTRA) {
1297  ff_hevc_set_neighbour_available(lc, xBase, yBase + (i << log2_trafo_size),
1298  trafo_size_h, trafo_size_v);
1299  s->hpc.intra_pred[log2_trafo_size - 2](lc, xBase, yBase + (i << log2_trafo_size), 1);
1300  }
1301  if (cbf_cb[i])
1302  ff_hevc_hls_residual_coding(lc, xBase, yBase + (i << log2_trafo_size),
1303  log2_trafo_size, scan_idx_c, 1);
1304  }
1305  for (i = 0; i < (s->ps.sps->chroma_format_idc == 2 ? 2 : 1); i++) {
1306  if (lc->cu.pred_mode == MODE_INTRA) {
1307  ff_hevc_set_neighbour_available(lc, xBase, yBase + (i << log2_trafo_size),
1308  trafo_size_h, trafo_size_v);
1309  s->hpc.intra_pred[log2_trafo_size - 2](lc, xBase, yBase + (i << log2_trafo_size), 2);
1310  }
1311  if (cbf_cr[i])
1312  ff_hevc_hls_residual_coding(lc, xBase, yBase + (i << log2_trafo_size),
1313  log2_trafo_size, scan_idx_c, 2);
1314  }
1315  }
1316  } else if (s->ps.sps->chroma_format_idc && lc->cu.pred_mode == MODE_INTRA) {
1317  if (log2_trafo_size > 2 || s->ps.sps->chroma_format_idc == 3) {
1318  int trafo_size_h = 1 << (log2_trafo_size_c + s->ps.sps->hshift[1]);
1319  int trafo_size_v = 1 << (log2_trafo_size_c + s->ps.sps->vshift[1]);
1320  ff_hevc_set_neighbour_available(lc, x0, y0, trafo_size_h, trafo_size_v);
1321  s->hpc.intra_pred[log2_trafo_size_c - 2](lc, x0, y0, 1);
1322  s->hpc.intra_pred[log2_trafo_size_c - 2](lc, x0, y0, 2);
1323  if (s->ps.sps->chroma_format_idc == 2) {
1324  ff_hevc_set_neighbour_available(lc, x0, y0 + (1 << log2_trafo_size_c),
1325  trafo_size_h, trafo_size_v);
1326  s->hpc.intra_pred[log2_trafo_size_c - 2](lc, x0, y0 + (1 << log2_trafo_size_c), 1);
1327  s->hpc.intra_pred[log2_trafo_size_c - 2](lc, x0, y0 + (1 << log2_trafo_size_c), 2);
1328  }
1329  } else if (blk_idx == 3) {
1330  int trafo_size_h = 1 << (log2_trafo_size + 1);
1331  int trafo_size_v = 1 << (log2_trafo_size + s->ps.sps->vshift[1]);
1332  ff_hevc_set_neighbour_available(lc, xBase, yBase,
1333  trafo_size_h, trafo_size_v);
1334  s->hpc.intra_pred[log2_trafo_size - 2](lc, xBase, yBase, 1);
1335  s->hpc.intra_pred[log2_trafo_size - 2](lc, xBase, yBase, 2);
1336  if (s->ps.sps->chroma_format_idc == 2) {
1337  ff_hevc_set_neighbour_available(lc, xBase, yBase + (1 << log2_trafo_size),
1338  trafo_size_h, trafo_size_v);
1339  s->hpc.intra_pred[log2_trafo_size - 2](lc, xBase, yBase + (1 << log2_trafo_size), 1);
1340  s->hpc.intra_pred[log2_trafo_size - 2](lc, xBase, yBase + (1 << log2_trafo_size), 2);
1341  }
1342  }
1343  }
1344 
1345  return 0;
1346 }
1347 
1348 static void set_deblocking_bypass(const HEVCContext *s, int x0, int y0, int log2_cb_size)
1349 {
1350  int cb_size = 1 << log2_cb_size;
1351  int log2_min_pu_size = s->ps.sps->log2_min_pu_size;
1352 
1353  int min_pu_width = s->ps.sps->min_pu_width;
1354  int x_end = FFMIN(x0 + cb_size, s->ps.sps->width);
1355  int y_end = FFMIN(y0 + cb_size, s->ps.sps->height);
1356  int i, j;
1357 
1358  for (j = (y0 >> log2_min_pu_size); j < (y_end >> log2_min_pu_size); j++)
1359  for (i = (x0 >> log2_min_pu_size); i < (x_end >> log2_min_pu_size); i++)
1360  s->is_pcm[i + j * min_pu_width] = 2;
1361 }
1362 
1363 static int hls_transform_tree(HEVCLocalContext *lc, int x0, int y0,
1364  int xBase, int yBase, int cb_xBase, int cb_yBase,
1365  int log2_cb_size, int log2_trafo_size,
1366  int trafo_depth, int blk_idx,
1367  const int *base_cbf_cb, const int *base_cbf_cr)
1368 {
1369  const HEVCContext *const s = lc->parent;
1370  uint8_t split_transform_flag;
1371  int cbf_cb[2];
1372  int cbf_cr[2];
1373  int ret;
1374 
1375  cbf_cb[0] = base_cbf_cb[0];
1376  cbf_cb[1] = base_cbf_cb[1];
1377  cbf_cr[0] = base_cbf_cr[0];
1378  cbf_cr[1] = base_cbf_cr[1];
1379 
1380  if (lc->cu.intra_split_flag) {
1381  if (trafo_depth == 1) {
1382  lc->tu.intra_pred_mode = lc->pu.intra_pred_mode[blk_idx];
1383  if (s->ps.sps->chroma_format_idc == 3) {
1384  lc->tu.intra_pred_mode_c = lc->pu.intra_pred_mode_c[blk_idx];
1385  lc->tu.chroma_mode_c = lc->pu.chroma_mode_c[blk_idx];
1386  } else {
1388  lc->tu.chroma_mode_c = lc->pu.chroma_mode_c[0];
1389  }
1390  }
1391  } else {
1392  lc->tu.intra_pred_mode = lc->pu.intra_pred_mode[0];
1394  lc->tu.chroma_mode_c = lc->pu.chroma_mode_c[0];
1395  }
1396 
1397  if (log2_trafo_size <= s->ps.sps->log2_max_trafo_size &&
1398  log2_trafo_size > s->ps.sps->log2_min_tb_size &&
1399  trafo_depth < lc->cu.max_trafo_depth &&
1400  !(lc->cu.intra_split_flag && trafo_depth == 0)) {
1401  split_transform_flag = ff_hevc_split_transform_flag_decode(lc, log2_trafo_size);
1402  } else {
1403  int inter_split = s->ps.sps->max_transform_hierarchy_depth_inter == 0 &&
1404  lc->cu.pred_mode == MODE_INTER &&
1405  lc->cu.part_mode != PART_2Nx2N &&
1406  trafo_depth == 0;
1407 
1408  split_transform_flag = log2_trafo_size > s->ps.sps->log2_max_trafo_size ||
1409  (lc->cu.intra_split_flag && trafo_depth == 0) ||
1410  inter_split;
1411  }
1412 
1413  if (s->ps.sps->chroma_format_idc && (log2_trafo_size > 2 || s->ps.sps->chroma_format_idc == 3)) {
1414  if (trafo_depth == 0 || cbf_cb[0]) {
1415  cbf_cb[0] = ff_hevc_cbf_cb_cr_decode(lc, trafo_depth);
1416  if (s->ps.sps->chroma_format_idc == 2 && (!split_transform_flag || log2_trafo_size == 3)) {
1417  cbf_cb[1] = ff_hevc_cbf_cb_cr_decode(lc, trafo_depth);
1418  }
1419  }
1420 
1421  if (trafo_depth == 0 || cbf_cr[0]) {
1422  cbf_cr[0] = ff_hevc_cbf_cb_cr_decode(lc, trafo_depth);
1423  if (s->ps.sps->chroma_format_idc == 2 && (!split_transform_flag || log2_trafo_size == 3)) {
1424  cbf_cr[1] = ff_hevc_cbf_cb_cr_decode(lc, trafo_depth);
1425  }
1426  }
1427  }
1428 
1429  if (split_transform_flag) {
1430  const int trafo_size_split = 1 << (log2_trafo_size - 1);
1431  const int x1 = x0 + trafo_size_split;
1432  const int y1 = y0 + trafo_size_split;
1433 
1434 #define SUBDIVIDE(x, y, idx) \
1435 do { \
1436  ret = hls_transform_tree(lc, x, y, x0, y0, cb_xBase, cb_yBase, log2_cb_size,\
1437  log2_trafo_size - 1, trafo_depth + 1, idx, \
1438  cbf_cb, cbf_cr); \
1439  if (ret < 0) \
1440  return ret; \
1441 } while (0)
1442 
1443  SUBDIVIDE(x0, y0, 0);
1444  SUBDIVIDE(x1, y0, 1);
1445  SUBDIVIDE(x0, y1, 2);
1446  SUBDIVIDE(x1, y1, 3);
1447 
1448 #undef SUBDIVIDE
1449  } else {
1450  int min_tu_size = 1 << s->ps.sps->log2_min_tb_size;
1451  int log2_min_tu_size = s->ps.sps->log2_min_tb_size;
1452  int min_tu_width = s->ps.sps->min_tb_width;
1453  int cbf_luma = 1;
1454 
1455  if (lc->cu.pred_mode == MODE_INTRA || trafo_depth != 0 ||
1456  cbf_cb[0] || cbf_cr[0] ||
1457  (s->ps.sps->chroma_format_idc == 2 && (cbf_cb[1] || cbf_cr[1]))) {
1458  cbf_luma = ff_hevc_cbf_luma_decode(lc, trafo_depth);
1459  }
1460 
1461  ret = hls_transform_unit(lc, x0, y0, xBase, yBase, cb_xBase, cb_yBase,
1462  log2_cb_size, log2_trafo_size,
1463  blk_idx, cbf_luma, cbf_cb, cbf_cr);
1464  if (ret < 0)
1465  return ret;
1466  // TODO: store cbf_luma somewhere else
1467  if (cbf_luma) {
1468  int i, j;
1469  for (i = 0; i < (1 << log2_trafo_size); i += min_tu_size)
1470  for (j = 0; j < (1 << log2_trafo_size); j += min_tu_size) {
1471  int x_tu = (x0 + j) >> log2_min_tu_size;
1472  int y_tu = (y0 + i) >> log2_min_tu_size;
1473  s->cbf_luma[y_tu * min_tu_width + x_tu] = 1;
1474  }
1475  }
1476  if (!s->sh.disable_deblocking_filter_flag) {
1477  ff_hevc_deblocking_boundary_strengths(lc, x0, y0, log2_trafo_size);
1478  if (s->ps.pps->transquant_bypass_enable_flag &&
1480  set_deblocking_bypass(s, x0, y0, log2_trafo_size);
1481  }
1482  }
1483  return 0;
1484 }
1485 
1486 static int hls_pcm_sample(HEVCLocalContext *lc, int x0, int y0, int log2_cb_size)
1487 {
1488  const HEVCContext *const s = lc->parent;
1489  GetBitContext gb;
1490  int cb_size = 1 << log2_cb_size;
1491  ptrdiff_t stride0 = s->frame->linesize[0];
1492  ptrdiff_t stride1 = s->frame->linesize[1];
1493  ptrdiff_t stride2 = s->frame->linesize[2];
1494  uint8_t *dst0 = &s->frame->data[0][y0 * stride0 + (x0 << s->ps.sps->pixel_shift)];
1495  uint8_t *dst1 = &s->frame->data[1][(y0 >> s->ps.sps->vshift[1]) * stride1 + ((x0 >> s->ps.sps->hshift[1]) << s->ps.sps->pixel_shift)];
1496  uint8_t *dst2 = &s->frame->data[2][(y0 >> s->ps.sps->vshift[2]) * stride2 + ((x0 >> s->ps.sps->hshift[2]) << s->ps.sps->pixel_shift)];
1497 
1498  int length = cb_size * cb_size * s->ps.sps->pcm.bit_depth +
1499  (((cb_size >> s->ps.sps->hshift[1]) * (cb_size >> s->ps.sps->vshift[1])) +
1500  ((cb_size >> s->ps.sps->hshift[2]) * (cb_size >> s->ps.sps->vshift[2]))) *
1501  s->ps.sps->pcm.bit_depth_chroma;
1502  const uint8_t *pcm = skip_bytes(&lc->cc, (length + 7) >> 3);
1503  int ret;
1504 
1505  if (!s->sh.disable_deblocking_filter_flag)
1506  ff_hevc_deblocking_boundary_strengths(lc, x0, y0, log2_cb_size);
1507 
1508  ret = init_get_bits(&gb, pcm, length);
1509  if (ret < 0)
1510  return ret;
1511 
1512  s->hevcdsp.put_pcm(dst0, stride0, cb_size, cb_size, &gb, s->ps.sps->pcm.bit_depth);
1513  if (s->ps.sps->chroma_format_idc) {
1514  s->hevcdsp.put_pcm(dst1, stride1,
1515  cb_size >> s->ps.sps->hshift[1],
1516  cb_size >> s->ps.sps->vshift[1],
1517  &gb, s->ps.sps->pcm.bit_depth_chroma);
1518  s->hevcdsp.put_pcm(dst2, stride2,
1519  cb_size >> s->ps.sps->hshift[2],
1520  cb_size >> s->ps.sps->vshift[2],
1521  &gb, s->ps.sps->pcm.bit_depth_chroma);
1522  }
1523 
1524  return 0;
1525 }
1526 
1527 /**
1528  * 8.5.3.2.2.1 Luma sample unidirectional interpolation process
1529  *
1530  * @param s HEVC decoding context
1531  * @param dst target buffer for block data at block position
1532  * @param dststride stride of the dst buffer
1533  * @param ref reference picture buffer at origin (0, 0)
1534  * @param mv motion vector (relative to block position) to get pixel data from
1535  * @param x_off horizontal position of block from origin (0, 0)
1536  * @param y_off vertical position of block from origin (0, 0)
1537  * @param block_w width of block
1538  * @param block_h height of block
1539  * @param luma_weight weighting factor applied to the luma prediction
1540  * @param luma_offset additive offset applied to the luma prediction value
1541  */
1542 
1543 static void luma_mc_uni(HEVCLocalContext *lc, uint8_t *dst, ptrdiff_t dststride,
1544  const AVFrame *ref, const Mv *mv, int x_off, int y_off,
1545  int block_w, int block_h, int luma_weight, int luma_offset)
1546 {
1547  const HEVCContext *const s = lc->parent;
1548  const uint8_t *src = ref->data[0];
1549  ptrdiff_t srcstride = ref->linesize[0];
1550  int pic_width = s->ps.sps->width;
1551  int pic_height = s->ps.sps->height;
1552  int mx = mv->x & 3;
1553  int my = mv->y & 3;
1554  int weight_flag = (s->sh.slice_type == HEVC_SLICE_P && s->ps.pps->weighted_pred_flag) ||
1555  (s->sh.slice_type == HEVC_SLICE_B && s->ps.pps->weighted_bipred_flag);
1556  int idx = hevc_pel_weight[block_w];
1557 
1558  x_off += mv->x >> 2;
1559  y_off += mv->y >> 2;
1560  src += y_off * srcstride + (x_off * (1 << s->ps.sps->pixel_shift));
1561 
1562  if (x_off < QPEL_EXTRA_BEFORE || y_off < QPEL_EXTRA_AFTER ||
1563  x_off >= pic_width - block_w - QPEL_EXTRA_AFTER ||
1564  y_off >= pic_height - block_h - QPEL_EXTRA_AFTER ||
1565  ref == s->frame) {
1566  const ptrdiff_t edge_emu_stride = EDGE_EMU_BUFFER_STRIDE << s->ps.sps->pixel_shift;
1567  int offset = QPEL_EXTRA_BEFORE * srcstride + (QPEL_EXTRA_BEFORE << s->ps.sps->pixel_shift);
1568  int buf_offset = QPEL_EXTRA_BEFORE * edge_emu_stride + (QPEL_EXTRA_BEFORE << s->ps.sps->pixel_shift);
1569 
1570  s->vdsp.emulated_edge_mc(lc->edge_emu_buffer, src - offset,
1571  edge_emu_stride, srcstride,
1572  block_w + QPEL_EXTRA,
1573  block_h + QPEL_EXTRA,
1574  x_off - QPEL_EXTRA_BEFORE, y_off - QPEL_EXTRA_BEFORE,
1575  pic_width, pic_height);
1576  src = lc->edge_emu_buffer + buf_offset;
1577  srcstride = edge_emu_stride;
1578  }
1579 
1580  if (!weight_flag)
1581  s->hevcdsp.put_hevc_qpel_uni[idx][!!my][!!mx](dst, dststride, src, srcstride,
1582  block_h, mx, my, block_w);
1583  else
1584  s->hevcdsp.put_hevc_qpel_uni_w[idx][!!my][!!mx](dst, dststride, src, srcstride,
1585  block_h, s->sh.luma_log2_weight_denom,
1586  luma_weight, luma_offset, mx, my, block_w);
1587 }
1588 
1589 /**
1590  * 8.5.3.2.2.1 Luma sample bidirectional interpolation process
1591  *
1592  * @param s HEVC decoding context
1593  * @param dst target buffer for block data at block position
1594  * @param dststride stride of the dst buffer
1595  * @param ref0 reference picture0 buffer at origin (0, 0)
1596  * @param mv0 motion vector0 (relative to block position) to get pixel data from
1597  * @param x_off horizontal position of block from origin (0, 0)
1598  * @param y_off vertical position of block from origin (0, 0)
1599  * @param block_w width of block
1600  * @param block_h height of block
1601  * @param ref1 reference picture1 buffer at origin (0, 0)
1602  * @param mv1 motion vector1 (relative to block position) to get pixel data from
1603  * @param current_mv current motion vector structure
1604  */
1605  static void luma_mc_bi(HEVCLocalContext *lc, uint8_t *dst, ptrdiff_t dststride,
1606  const AVFrame *ref0, const Mv *mv0, int x_off, int y_off,
1607  int block_w, int block_h, const AVFrame *ref1,
1608  const Mv *mv1, struct MvField *current_mv)
1609 {
1610  const HEVCContext *const s = lc->parent;
1611  ptrdiff_t src0stride = ref0->linesize[0];
1612  ptrdiff_t src1stride = ref1->linesize[0];
1613  int pic_width = s->ps.sps->width;
1614  int pic_height = s->ps.sps->height;
1615  int mx0 = mv0->x & 3;
1616  int my0 = mv0->y & 3;
1617  int mx1 = mv1->x & 3;
1618  int my1 = mv1->y & 3;
1619  int weight_flag = (s->sh.slice_type == HEVC_SLICE_P && s->ps.pps->weighted_pred_flag) ||
1620  (s->sh.slice_type == HEVC_SLICE_B && s->ps.pps->weighted_bipred_flag);
1621  int x_off0 = x_off + (mv0->x >> 2);
1622  int y_off0 = y_off + (mv0->y >> 2);
1623  int x_off1 = x_off + (mv1->x >> 2);
1624  int y_off1 = y_off + (mv1->y >> 2);
1625  int idx = hevc_pel_weight[block_w];
1626 
1627  const uint8_t *src0 = ref0->data[0] + y_off0 * src0stride + (int)((unsigned)x_off0 << s->ps.sps->pixel_shift);
1628  const uint8_t *src1 = ref1->data[0] + y_off1 * src1stride + (int)((unsigned)x_off1 << s->ps.sps->pixel_shift);
1629 
1630  if (x_off0 < QPEL_EXTRA_BEFORE || y_off0 < QPEL_EXTRA_AFTER ||
1631  x_off0 >= pic_width - block_w - QPEL_EXTRA_AFTER ||
1632  y_off0 >= pic_height - block_h - QPEL_EXTRA_AFTER) {
1633  const ptrdiff_t edge_emu_stride = EDGE_EMU_BUFFER_STRIDE << s->ps.sps->pixel_shift;
1634  int offset = QPEL_EXTRA_BEFORE * src0stride + (QPEL_EXTRA_BEFORE << s->ps.sps->pixel_shift);
1635  int buf_offset = QPEL_EXTRA_BEFORE * edge_emu_stride + (QPEL_EXTRA_BEFORE << s->ps.sps->pixel_shift);
1636 
1637  s->vdsp.emulated_edge_mc(lc->edge_emu_buffer, src0 - offset,
1638  edge_emu_stride, src0stride,
1639  block_w + QPEL_EXTRA,
1640  block_h + QPEL_EXTRA,
1641  x_off0 - QPEL_EXTRA_BEFORE, y_off0 - QPEL_EXTRA_BEFORE,
1642  pic_width, pic_height);
1643  src0 = lc->edge_emu_buffer + buf_offset;
1644  src0stride = edge_emu_stride;
1645  }
1646 
1647  if (x_off1 < QPEL_EXTRA_BEFORE || y_off1 < QPEL_EXTRA_AFTER ||
1648  x_off1 >= pic_width - block_w - QPEL_EXTRA_AFTER ||
1649  y_off1 >= pic_height - block_h - QPEL_EXTRA_AFTER) {
1650  const ptrdiff_t edge_emu_stride = EDGE_EMU_BUFFER_STRIDE << s->ps.sps->pixel_shift;
1651  int offset = QPEL_EXTRA_BEFORE * src1stride + (QPEL_EXTRA_BEFORE << s->ps.sps->pixel_shift);
1652  int buf_offset = QPEL_EXTRA_BEFORE * edge_emu_stride + (QPEL_EXTRA_BEFORE << s->ps.sps->pixel_shift);
1653 
1654  s->vdsp.emulated_edge_mc(lc->edge_emu_buffer2, src1 - offset,
1655  edge_emu_stride, src1stride,
1656  block_w + QPEL_EXTRA,
1657  block_h + QPEL_EXTRA,
1658  x_off1 - QPEL_EXTRA_BEFORE, y_off1 - QPEL_EXTRA_BEFORE,
1659  pic_width, pic_height);
1660  src1 = lc->edge_emu_buffer2 + buf_offset;
1661  src1stride = edge_emu_stride;
1662  }
1663 
1664  s->hevcdsp.put_hevc_qpel[idx][!!my0][!!mx0](lc->tmp, src0, src0stride,
1665  block_h, mx0, my0, block_w);
1666  if (!weight_flag)
1667  s->hevcdsp.put_hevc_qpel_bi[idx][!!my1][!!mx1](dst, dststride, src1, src1stride, lc->tmp,
1668  block_h, mx1, my1, block_w);
1669  else
1670  s->hevcdsp.put_hevc_qpel_bi_w[idx][!!my1][!!mx1](dst, dststride, src1, src1stride, lc->tmp,
1671  block_h, s->sh.luma_log2_weight_denom,
1672  s->sh.luma_weight_l0[current_mv->ref_idx[0]],
1673  s->sh.luma_weight_l1[current_mv->ref_idx[1]],
1674  s->sh.luma_offset_l0[current_mv->ref_idx[0]],
1675  s->sh.luma_offset_l1[current_mv->ref_idx[1]],
1676  mx1, my1, block_w);
1677 
1678 }
1679 
1680 /**
1681  * 8.5.3.2.2.2 Chroma sample uniprediction interpolation process
1682  *
1683  * @param s HEVC decoding context
1684  * @param dst1 target buffer for block data at block position (U plane)
1685  * @param dst2 target buffer for block data at block position (V plane)
1686  * @param dststride stride of the dst1 and dst2 buffers
1687  * @param ref reference picture buffer at origin (0, 0)
1688  * @param mv motion vector (relative to block position) to get pixel data from
1689  * @param x_off horizontal position of block from origin (0, 0)
1690  * @param y_off vertical position of block from origin (0, 0)
1691  * @param block_w width of block
1692  * @param block_h height of block
1693  * @param chroma_weight weighting factor applied to the chroma prediction
1694  * @param chroma_offset additive offset applied to the chroma prediction value
1695  */
1696 
1697 static void chroma_mc_uni(HEVCLocalContext *lc, uint8_t *dst0,
1698  ptrdiff_t dststride, const uint8_t *src0, ptrdiff_t srcstride, int reflist,
1699  int x_off, int y_off, int block_w, int block_h,
1700  const struct MvField *current_mv, int chroma_weight, int chroma_offset)
1701 {
1702  const HEVCContext *const s = lc->parent;
1703  int pic_width = s->ps.sps->width >> s->ps.sps->hshift[1];
1704  int pic_height = s->ps.sps->height >> s->ps.sps->vshift[1];
1705  const Mv *mv = &current_mv->mv[reflist];
1706  int weight_flag = (s->sh.slice_type == HEVC_SLICE_P && s->ps.pps->weighted_pred_flag) ||
1707  (s->sh.slice_type == HEVC_SLICE_B && s->ps.pps->weighted_bipred_flag);
1708  int idx = hevc_pel_weight[block_w];
1709  int hshift = s->ps.sps->hshift[1];
1710  int vshift = s->ps.sps->vshift[1];
1711  intptr_t mx = av_mod_uintp2(mv->x, 2 + hshift);
1712  intptr_t my = av_mod_uintp2(mv->y, 2 + vshift);
1713  intptr_t _mx = mx << (1 - hshift);
1714  intptr_t _my = my << (1 - vshift);
1715  int emu = src0 == s->frame->data[1] || src0 == s->frame->data[2];
1716 
1717  x_off += mv->x >> (2 + hshift);
1718  y_off += mv->y >> (2 + vshift);
1719  src0 += y_off * srcstride + (x_off * (1 << s->ps.sps->pixel_shift));
1720 
1721  if (x_off < EPEL_EXTRA_BEFORE || y_off < EPEL_EXTRA_AFTER ||
1722  x_off >= pic_width - block_w - EPEL_EXTRA_AFTER ||
1723  y_off >= pic_height - block_h - EPEL_EXTRA_AFTER ||
1724  emu) {
1725  const int edge_emu_stride = EDGE_EMU_BUFFER_STRIDE << s->ps.sps->pixel_shift;
1726  int offset0 = EPEL_EXTRA_BEFORE * (srcstride + (1 << s->ps.sps->pixel_shift));
1727  int buf_offset0 = EPEL_EXTRA_BEFORE *
1728  (edge_emu_stride + (1 << s->ps.sps->pixel_shift));
1729  s->vdsp.emulated_edge_mc(lc->edge_emu_buffer, src0 - offset0,
1730  edge_emu_stride, srcstride,
1731  block_w + EPEL_EXTRA, block_h + EPEL_EXTRA,
1732  x_off - EPEL_EXTRA_BEFORE,
1733  y_off - EPEL_EXTRA_BEFORE,
1734  pic_width, pic_height);
1735 
1736  src0 = lc->edge_emu_buffer + buf_offset0;
1737  srcstride = edge_emu_stride;
1738  }
1739  if (!weight_flag)
1740  s->hevcdsp.put_hevc_epel_uni[idx][!!my][!!mx](dst0, dststride, src0, srcstride,
1741  block_h, _mx, _my, block_w);
1742  else
1743  s->hevcdsp.put_hevc_epel_uni_w[idx][!!my][!!mx](dst0, dststride, src0, srcstride,
1744  block_h, s->sh.chroma_log2_weight_denom,
1745  chroma_weight, chroma_offset, _mx, _my, block_w);
1746 }
1747 
1748 /**
1749  * 8.5.3.2.2.2 Chroma sample bidirectional interpolation process
1750  *
1751  * @param s HEVC decoding context
1752  * @param dst target buffer for block data at block position
1753  * @param dststride stride of the dst buffer
1754  * @param ref0 reference picture0 buffer at origin (0, 0)
1755  * @param mv0 motion vector0 (relative to block position) to get pixel data from
1756  * @param x_off horizontal position of block from origin (0, 0)
1757  * @param y_off vertical position of block from origin (0, 0)
1758  * @param block_w width of block
1759  * @param block_h height of block
1760  * @param ref1 reference picture1 buffer at origin (0, 0)
1761  * @param mv1 motion vector1 (relative to block position) to get pixel data from
1762  * @param current_mv current motion vector structure
1763  * @param cidx chroma component(cb, cr)
1764  */
1765 static void chroma_mc_bi(HEVCLocalContext *lc, uint8_t *dst0, ptrdiff_t dststride,
1766  const AVFrame *ref0, const AVFrame *ref1,
1767  int x_off, int y_off, int block_w, int block_h, const MvField *current_mv, int cidx)
1768 {
1769  const HEVCContext *const s = lc->parent;
1770  const uint8_t *src1 = ref0->data[cidx+1];
1771  const uint8_t *src2 = ref1->data[cidx+1];
1772  ptrdiff_t src1stride = ref0->linesize[cidx+1];
1773  ptrdiff_t src2stride = ref1->linesize[cidx+1];
1774  int weight_flag = (s->sh.slice_type == HEVC_SLICE_P && s->ps.pps->weighted_pred_flag) ||
1775  (s->sh.slice_type == HEVC_SLICE_B && s->ps.pps->weighted_bipred_flag);
1776  int pic_width = s->ps.sps->width >> s->ps.sps->hshift[1];
1777  int pic_height = s->ps.sps->height >> s->ps.sps->vshift[1];
1778  const Mv *const mv0 = &current_mv->mv[0];
1779  const Mv *const mv1 = &current_mv->mv[1];
1780  int hshift = s->ps.sps->hshift[1];
1781  int vshift = s->ps.sps->vshift[1];
1782 
1783  intptr_t mx0 = av_mod_uintp2(mv0->x, 2 + hshift);
1784  intptr_t my0 = av_mod_uintp2(mv0->y, 2 + vshift);
1785  intptr_t mx1 = av_mod_uintp2(mv1->x, 2 + hshift);
1786  intptr_t my1 = av_mod_uintp2(mv1->y, 2 + vshift);
1787  intptr_t _mx0 = mx0 << (1 - hshift);
1788  intptr_t _my0 = my0 << (1 - vshift);
1789  intptr_t _mx1 = mx1 << (1 - hshift);
1790  intptr_t _my1 = my1 << (1 - vshift);
1791 
1792  int x_off0 = x_off + (mv0->x >> (2 + hshift));
1793  int y_off0 = y_off + (mv0->y >> (2 + vshift));
1794  int x_off1 = x_off + (mv1->x >> (2 + hshift));
1795  int y_off1 = y_off + (mv1->y >> (2 + vshift));
1796  int idx = hevc_pel_weight[block_w];
1797  src1 += y_off0 * src1stride + (int)((unsigned)x_off0 << s->ps.sps->pixel_shift);
1798  src2 += y_off1 * src2stride + (int)((unsigned)x_off1 << s->ps.sps->pixel_shift);
1799 
1800  if (x_off0 < EPEL_EXTRA_BEFORE || y_off0 < EPEL_EXTRA_AFTER ||
1801  x_off0 >= pic_width - block_w - EPEL_EXTRA_AFTER ||
1802  y_off0 >= pic_height - block_h - EPEL_EXTRA_AFTER) {
1803  const int edge_emu_stride = EDGE_EMU_BUFFER_STRIDE << s->ps.sps->pixel_shift;
1804  int offset1 = EPEL_EXTRA_BEFORE * (src1stride + (1 << s->ps.sps->pixel_shift));
1805  int buf_offset1 = EPEL_EXTRA_BEFORE *
1806  (edge_emu_stride + (1 << s->ps.sps->pixel_shift));
1807 
1808  s->vdsp.emulated_edge_mc(lc->edge_emu_buffer, src1 - offset1,
1809  edge_emu_stride, src1stride,
1810  block_w + EPEL_EXTRA, block_h + EPEL_EXTRA,
1811  x_off0 - EPEL_EXTRA_BEFORE,
1812  y_off0 - EPEL_EXTRA_BEFORE,
1813  pic_width, pic_height);
1814 
1815  src1 = lc->edge_emu_buffer + buf_offset1;
1816  src1stride = edge_emu_stride;
1817  }
1818 
1819  if (x_off1 < EPEL_EXTRA_BEFORE || y_off1 < EPEL_EXTRA_AFTER ||
1820  x_off1 >= pic_width - block_w - EPEL_EXTRA_AFTER ||
1821  y_off1 >= pic_height - block_h - EPEL_EXTRA_AFTER) {
1822  const int edge_emu_stride = EDGE_EMU_BUFFER_STRIDE << s->ps.sps->pixel_shift;
1823  int offset1 = EPEL_EXTRA_BEFORE * (src2stride + (1 << s->ps.sps->pixel_shift));
1824  int buf_offset1 = EPEL_EXTRA_BEFORE *
1825  (edge_emu_stride + (1 << s->ps.sps->pixel_shift));
1826 
1827  s->vdsp.emulated_edge_mc(lc->edge_emu_buffer2, src2 - offset1,
1828  edge_emu_stride, src2stride,
1829  block_w + EPEL_EXTRA, block_h + EPEL_EXTRA,
1830  x_off1 - EPEL_EXTRA_BEFORE,
1831  y_off1 - EPEL_EXTRA_BEFORE,
1832  pic_width, pic_height);
1833 
1834  src2 = lc->edge_emu_buffer2 + buf_offset1;
1835  src2stride = edge_emu_stride;
1836  }
1837 
1838  s->hevcdsp.put_hevc_epel[idx][!!my0][!!mx0](lc->tmp, src1, src1stride,
1839  block_h, _mx0, _my0, block_w);
1840  if (!weight_flag)
1841  s->hevcdsp.put_hevc_epel_bi[idx][!!my1][!!mx1](dst0, s->frame->linesize[cidx+1],
1842  src2, src2stride, lc->tmp,
1843  block_h, _mx1, _my1, block_w);
1844  else
1845  s->hevcdsp.put_hevc_epel_bi_w[idx][!!my1][!!mx1](dst0, s->frame->linesize[cidx+1],
1846  src2, src2stride, lc->tmp,
1847  block_h,
1848  s->sh.chroma_log2_weight_denom,
1849  s->sh.chroma_weight_l0[current_mv->ref_idx[0]][cidx],
1850  s->sh.chroma_weight_l1[current_mv->ref_idx[1]][cidx],
1851  s->sh.chroma_offset_l0[current_mv->ref_idx[0]][cidx],
1852  s->sh.chroma_offset_l1[current_mv->ref_idx[1]][cidx],
1853  _mx1, _my1, block_w);
1854 }
1855 
1856 static void hevc_await_progress(const HEVCContext *s, const HEVCFrame *ref,
1857  const Mv *mv, int y0, int height)
1858 {
1859  if (s->threads_type == FF_THREAD_FRAME ) {
1860  int y = FFMAX(0, (mv->y >> 2) + y0 + height + 9);
1861 
1862  ff_thread_await_progress(&ref->tf, y, 0);
1863  }
1864 }
1865 
1866 static void hevc_luma_mv_mvp_mode(HEVCLocalContext *lc, int x0, int y0, int nPbW,
1867  int nPbH, int log2_cb_size, int part_idx,
1868  int merge_idx, MvField *mv)
1869 {
1870  const HEVCContext *const s = lc->parent;
1871  enum InterPredIdc inter_pred_idc = PRED_L0;
1872  int mvp_flag;
1873 
1874  ff_hevc_set_neighbour_available(lc, x0, y0, nPbW, nPbH);
1875  mv->pred_flag = 0;
1876  if (s->sh.slice_type == HEVC_SLICE_B)
1877  inter_pred_idc = ff_hevc_inter_pred_idc_decode(lc, nPbW, nPbH);
1878 
1879  if (inter_pred_idc != PRED_L1) {
1880  if (s->sh.nb_refs[L0])
1881  mv->ref_idx[0]= ff_hevc_ref_idx_lx_decode(lc, s->sh.nb_refs[L0]);
1882 
1883  mv->pred_flag = PF_L0;
1884  ff_hevc_hls_mvd_coding(lc, x0, y0, 0);
1885  mvp_flag = ff_hevc_mvp_lx_flag_decode(lc);
1886  ff_hevc_luma_mv_mvp_mode(lc, x0, y0, nPbW, nPbH, log2_cb_size,
1887  part_idx, merge_idx, mv, mvp_flag, 0);
1888  mv->mv[0].x += lc->pu.mvd.x;
1889  mv->mv[0].y += lc->pu.mvd.y;
1890  }
1891 
1892  if (inter_pred_idc != PRED_L0) {
1893  if (s->sh.nb_refs[L1])
1894  mv->ref_idx[1]= ff_hevc_ref_idx_lx_decode(lc, s->sh.nb_refs[L1]);
1895 
1896  if (s->sh.mvd_l1_zero_flag == 1 && inter_pred_idc == PRED_BI) {
1897  AV_ZERO32(&lc->pu.mvd);
1898  } else {
1899  ff_hevc_hls_mvd_coding(lc, x0, y0, 1);
1900  }
1901 
1902  mv->pred_flag += PF_L1;
1903  mvp_flag = ff_hevc_mvp_lx_flag_decode(lc);
1904  ff_hevc_luma_mv_mvp_mode(lc, x0, y0, nPbW, nPbH, log2_cb_size,
1905  part_idx, merge_idx, mv, mvp_flag, 1);
1906  mv->mv[1].x += lc->pu.mvd.x;
1907  mv->mv[1].y += lc->pu.mvd.y;
1908  }
1909 }
1910 
1911 static void hls_prediction_unit(HEVCLocalContext *lc, int x0, int y0,
1912  int nPbW, int nPbH,
1913  int log2_cb_size, int partIdx, int idx)
1914 {
1915 #define POS(c_idx, x, y) \
1916  &s->frame->data[c_idx][((y) >> s->ps.sps->vshift[c_idx]) * s->frame->linesize[c_idx] + \
1917  (((x) >> s->ps.sps->hshift[c_idx]) << s->ps.sps->pixel_shift)]
1918  const HEVCContext *const s = lc->parent;
1919  int merge_idx = 0;
1920  struct MvField current_mv = {{{ 0 }}};
1921 
1922  int min_pu_width = s->ps.sps->min_pu_width;
1923 
1924  MvField *tab_mvf = s->ref->tab_mvf;
1925  const RefPicList *refPicList = s->ref->refPicList;
1926  const HEVCFrame *ref0 = NULL, *ref1 = NULL;
1927  uint8_t *dst0 = POS(0, x0, y0);
1928  uint8_t *dst1 = POS(1, x0, y0);
1929  uint8_t *dst2 = POS(2, x0, y0);
1930  int log2_min_cb_size = s->ps.sps->log2_min_cb_size;
1931  int min_cb_width = s->ps.sps->min_cb_width;
1932  int x_cb = x0 >> log2_min_cb_size;
1933  int y_cb = y0 >> log2_min_cb_size;
1934  int x_pu, y_pu;
1935  int i, j;
1936 
1937  int skip_flag = SAMPLE_CTB(s->skip_flag, x_cb, y_cb);
1938 
1939  if (!skip_flag)
1941 
1942  if (skip_flag || lc->pu.merge_flag) {
1943  if (s->sh.max_num_merge_cand > 1)
1944  merge_idx = ff_hevc_merge_idx_decode(lc);
1945  else
1946  merge_idx = 0;
1947 
1948  ff_hevc_luma_mv_merge_mode(lc, x0, y0, nPbW, nPbH, log2_cb_size,
1949  partIdx, merge_idx, &current_mv);
1950  } else {
1951  hevc_luma_mv_mvp_mode(lc, x0, y0, nPbW, nPbH, log2_cb_size,
1952  partIdx, merge_idx, &current_mv);
1953  }
1954 
1955  x_pu = x0 >> s->ps.sps->log2_min_pu_size;
1956  y_pu = y0 >> s->ps.sps->log2_min_pu_size;
1957 
1958  for (j = 0; j < nPbH >> s->ps.sps->log2_min_pu_size; j++)
1959  for (i = 0; i < nPbW >> s->ps.sps->log2_min_pu_size; i++)
1960  tab_mvf[(y_pu + j) * min_pu_width + x_pu + i] = current_mv;
1961 
1962  if (current_mv.pred_flag & PF_L0) {
1963  ref0 = refPicList[0].ref[current_mv.ref_idx[0]];
1964  if (!ref0 || !ref0->frame->data[0])
1965  return;
1966  hevc_await_progress(s, ref0, &current_mv.mv[0], y0, nPbH);
1967  }
1968  if (current_mv.pred_flag & PF_L1) {
1969  ref1 = refPicList[1].ref[current_mv.ref_idx[1]];
1970  if (!ref1 || !ref1->frame->data[0])
1971  return;
1972  hevc_await_progress(s, ref1, &current_mv.mv[1], y0, nPbH);
1973  }
1974 
1975  if (current_mv.pred_flag == PF_L0) {
1976  int x0_c = x0 >> s->ps.sps->hshift[1];
1977  int y0_c = y0 >> s->ps.sps->vshift[1];
1978  int nPbW_c = nPbW >> s->ps.sps->hshift[1];
1979  int nPbH_c = nPbH >> s->ps.sps->vshift[1];
1980 
1981  luma_mc_uni(lc, dst0, s->frame->linesize[0], ref0->frame,
1982  &current_mv.mv[0], x0, y0, nPbW, nPbH,
1983  s->sh.luma_weight_l0[current_mv.ref_idx[0]],
1984  s->sh.luma_offset_l0[current_mv.ref_idx[0]]);
1985 
1986  if (s->ps.sps->chroma_format_idc) {
1987  chroma_mc_uni(lc, dst1, s->frame->linesize[1], ref0->frame->data[1], ref0->frame->linesize[1],
1988  0, x0_c, y0_c, nPbW_c, nPbH_c, &current_mv,
1989  s->sh.chroma_weight_l0[current_mv.ref_idx[0]][0], s->sh.chroma_offset_l0[current_mv.ref_idx[0]][0]);
1990  chroma_mc_uni(lc, dst2, s->frame->linesize[2], ref0->frame->data[2], ref0->frame->linesize[2],
1991  0, x0_c, y0_c, nPbW_c, nPbH_c, &current_mv,
1992  s->sh.chroma_weight_l0[current_mv.ref_idx[0]][1], s->sh.chroma_offset_l0[current_mv.ref_idx[0]][1]);
1993  }
1994  } else if (current_mv.pred_flag == PF_L1) {
1995  int x0_c = x0 >> s->ps.sps->hshift[1];
1996  int y0_c = y0 >> s->ps.sps->vshift[1];
1997  int nPbW_c = nPbW >> s->ps.sps->hshift[1];
1998  int nPbH_c = nPbH >> s->ps.sps->vshift[1];
1999 
2000  luma_mc_uni(lc, dst0, s->frame->linesize[0], ref1->frame,
2001  &current_mv.mv[1], x0, y0, nPbW, nPbH,
2002  s->sh.luma_weight_l1[current_mv.ref_idx[1]],
2003  s->sh.luma_offset_l1[current_mv.ref_idx[1]]);
2004 
2005  if (s->ps.sps->chroma_format_idc) {
2006  chroma_mc_uni(lc, dst1, s->frame->linesize[1], ref1->frame->data[1], ref1->frame->linesize[1],
2007  1, x0_c, y0_c, nPbW_c, nPbH_c, &current_mv,
2008  s->sh.chroma_weight_l1[current_mv.ref_idx[1]][0], s->sh.chroma_offset_l1[current_mv.ref_idx[1]][0]);
2009 
2010  chroma_mc_uni(lc, dst2, s->frame->linesize[2], ref1->frame->data[2], ref1->frame->linesize[2],
2011  1, x0_c, y0_c, nPbW_c, nPbH_c, &current_mv,
2012  s->sh.chroma_weight_l1[current_mv.ref_idx[1]][1], s->sh.chroma_offset_l1[current_mv.ref_idx[1]][1]);
2013  }
2014  } else if (current_mv.pred_flag == PF_BI) {
2015  int x0_c = x0 >> s->ps.sps->hshift[1];
2016  int y0_c = y0 >> s->ps.sps->vshift[1];
2017  int nPbW_c = nPbW >> s->ps.sps->hshift[1];
2018  int nPbH_c = nPbH >> s->ps.sps->vshift[1];
2019 
2020  luma_mc_bi(lc, dst0, s->frame->linesize[0], ref0->frame,
2021  &current_mv.mv[0], x0, y0, nPbW, nPbH,
2022  ref1->frame, &current_mv.mv[1], &current_mv);
2023 
2024  if (s->ps.sps->chroma_format_idc) {
2025  chroma_mc_bi(lc, dst1, s->frame->linesize[1], ref0->frame, ref1->frame,
2026  x0_c, y0_c, nPbW_c, nPbH_c, &current_mv, 0);
2027 
2028  chroma_mc_bi(lc, dst2, s->frame->linesize[2], ref0->frame, ref1->frame,
2029  x0_c, y0_c, nPbW_c, nPbH_c, &current_mv, 1);
2030  }
2031  }
2032 }
2033 
2034 /**
2035  * 8.4.1
2036  */
2037 static int luma_intra_pred_mode(HEVCLocalContext *lc, int x0, int y0, int pu_size,
2038  int prev_intra_luma_pred_flag)
2039 {
2040  const HEVCContext *const s = lc->parent;
2041  int x_pu = x0 >> s->ps.sps->log2_min_pu_size;
2042  int y_pu = y0 >> s->ps.sps->log2_min_pu_size;
2043  int min_pu_width = s->ps.sps->min_pu_width;
2044  int size_in_pus = pu_size >> s->ps.sps->log2_min_pu_size;
2045  int x0b = av_mod_uintp2(x0, s->ps.sps->log2_ctb_size);
2046  int y0b = av_mod_uintp2(y0, s->ps.sps->log2_ctb_size);
2047 
2048  int cand_up = (lc->ctb_up_flag || y0b) ?
2049  s->tab_ipm[(y_pu - 1) * min_pu_width + x_pu] : INTRA_DC;
2050  int cand_left = (lc->ctb_left_flag || x0b) ?
2051  s->tab_ipm[y_pu * min_pu_width + x_pu - 1] : INTRA_DC;
2052 
2053  int y_ctb = (y0 >> (s->ps.sps->log2_ctb_size)) << (s->ps.sps->log2_ctb_size);
2054 
2055  MvField *tab_mvf = s->ref->tab_mvf;
2056  int intra_pred_mode;
2057  int candidate[3];
2058  int i, j;
2059 
2060  // intra_pred_mode prediction does not cross vertical CTB boundaries
2061  if ((y0 - 1) < y_ctb)
2062  cand_up = INTRA_DC;
2063 
2064  if (cand_left == cand_up) {
2065  if (cand_left < 2) {
2066  candidate[0] = INTRA_PLANAR;
2067  candidate[1] = INTRA_DC;
2068  candidate[2] = INTRA_ANGULAR_26;
2069  } else {
2070  candidate[0] = cand_left;
2071  candidate[1] = 2 + ((cand_left - 2 - 1 + 32) & 31);
2072  candidate[2] = 2 + ((cand_left - 2 + 1) & 31);
2073  }
2074  } else {
2075  candidate[0] = cand_left;
2076  candidate[1] = cand_up;
2077  if (candidate[0] != INTRA_PLANAR && candidate[1] != INTRA_PLANAR) {
2078  candidate[2] = INTRA_PLANAR;
2079  } else if (candidate[0] != INTRA_DC && candidate[1] != INTRA_DC) {
2080  candidate[2] = INTRA_DC;
2081  } else {
2082  candidate[2] = INTRA_ANGULAR_26;
2083  }
2084  }
2085 
2086  if (prev_intra_luma_pred_flag) {
2087  intra_pred_mode = candidate[lc->pu.mpm_idx];
2088  } else {
2089  if (candidate[0] > candidate[1])
2090  FFSWAP(uint8_t, candidate[0], candidate[1]);
2091  if (candidate[0] > candidate[2])
2092  FFSWAP(uint8_t, candidate[0], candidate[2]);
2093  if (candidate[1] > candidate[2])
2094  FFSWAP(uint8_t, candidate[1], candidate[2]);
2095 
2096  intra_pred_mode = lc->pu.rem_intra_luma_pred_mode;
2097  for (i = 0; i < 3; i++)
2098  if (intra_pred_mode >= candidate[i])
2099  intra_pred_mode++;
2100  }
2101 
2102  /* write the intra prediction units into the mv array */
2103  if (!size_in_pus)
2104  size_in_pus = 1;
2105  for (i = 0; i < size_in_pus; i++) {
2106  memset(&s->tab_ipm[(y_pu + i) * min_pu_width + x_pu],
2107  intra_pred_mode, size_in_pus);
2108 
2109  for (j = 0; j < size_in_pus; j++) {
2110  tab_mvf[(y_pu + j) * min_pu_width + x_pu + i].pred_flag = PF_INTRA;
2111  }
2112  }
2113 
2114  return intra_pred_mode;
2115 }
2116 
2117 static av_always_inline void set_ct_depth(const HEVCContext *s, int x0, int y0,
2118  int log2_cb_size, int ct_depth)
2119 {
2120  int length = (1 << log2_cb_size) >> s->ps.sps->log2_min_cb_size;
2121  int x_cb = x0 >> s->ps.sps->log2_min_cb_size;
2122  int y_cb = y0 >> s->ps.sps->log2_min_cb_size;
2123  int y;
2124 
2125  for (y = 0; y < length; y++)
2126  memset(&s->tab_ct_depth[(y_cb + y) * s->ps.sps->min_cb_width + x_cb],
2127  ct_depth, length);
2128 }
2129 
2130 static const uint8_t tab_mode_idx[] = {
2131  0, 1, 2, 2, 2, 2, 3, 5, 7, 8, 10, 12, 13, 15, 17, 18, 19, 20,
2132  21, 22, 23, 23, 24, 24, 25, 25, 26, 27, 27, 28, 28, 29, 29, 30, 31};
2133 
2134 static void intra_prediction_unit(HEVCLocalContext *lc, int x0, int y0,
2135  int log2_cb_size)
2136 {
2137  const HEVCContext *const s = lc->parent;
2138  static const uint8_t intra_chroma_table[4] = { 0, 26, 10, 1 };
2139  uint8_t prev_intra_luma_pred_flag[4];
2140  int split = lc->cu.part_mode == PART_NxN;
2141  int pb_size = (1 << log2_cb_size) >> split;
2142  int side = split + 1;
2143  int chroma_mode;
2144  int i, j;
2145 
2146  for (i = 0; i < side; i++)
2147  for (j = 0; j < side; j++)
2148  prev_intra_luma_pred_flag[2 * i + j] = ff_hevc_prev_intra_luma_pred_flag_decode(lc);
2149 
2150  for (i = 0; i < side; i++) {
2151  for (j = 0; j < side; j++) {
2152  if (prev_intra_luma_pred_flag[2 * i + j])
2153  lc->pu.mpm_idx = ff_hevc_mpm_idx_decode(lc);
2154  else
2156 
2157  lc->pu.intra_pred_mode[2 * i + j] =
2158  luma_intra_pred_mode(lc, x0 + pb_size * j, y0 + pb_size * i, pb_size,
2159  prev_intra_luma_pred_flag[2 * i + j]);
2160  }
2161  }
2162 
2163  if (s->ps.sps->chroma_format_idc == 3) {
2164  for (i = 0; i < side; i++) {
2165  for (j = 0; j < side; j++) {
2166  lc->pu.chroma_mode_c[2 * i + j] = chroma_mode = ff_hevc_intra_chroma_pred_mode_decode(lc);
2167  if (chroma_mode != 4) {
2168  if (lc->pu.intra_pred_mode[2 * i + j] == intra_chroma_table[chroma_mode])
2169  lc->pu.intra_pred_mode_c[2 * i + j] = 34;
2170  else
2171  lc->pu.intra_pred_mode_c[2 * i + j] = intra_chroma_table[chroma_mode];
2172  } else {
2173  lc->pu.intra_pred_mode_c[2 * i + j] = lc->pu.intra_pred_mode[2 * i + j];
2174  }
2175  }
2176  }
2177  } else if (s->ps.sps->chroma_format_idc == 2) {
2178  int mode_idx;
2179  lc->pu.chroma_mode_c[0] = chroma_mode = ff_hevc_intra_chroma_pred_mode_decode(lc);
2180  if (chroma_mode != 4) {
2181  if (lc->pu.intra_pred_mode[0] == intra_chroma_table[chroma_mode])
2182  mode_idx = 34;
2183  else
2184  mode_idx = intra_chroma_table[chroma_mode];
2185  } else {
2186  mode_idx = lc->pu.intra_pred_mode[0];
2187  }
2188  lc->pu.intra_pred_mode_c[0] = tab_mode_idx[mode_idx];
2189  } else if (s->ps.sps->chroma_format_idc != 0) {
2190  chroma_mode = ff_hevc_intra_chroma_pred_mode_decode(lc);
2191  if (chroma_mode != 4) {
2192  if (lc->pu.intra_pred_mode[0] == intra_chroma_table[chroma_mode])
2193  lc->pu.intra_pred_mode_c[0] = 34;
2194  else
2195  lc->pu.intra_pred_mode_c[0] = intra_chroma_table[chroma_mode];
2196  } else {
2197  lc->pu.intra_pred_mode_c[0] = lc->pu.intra_pred_mode[0];
2198  }
2199  }
2200 }
2201 
2203  int x0, int y0,
2204  int log2_cb_size)
2205 {
2206  const HEVCContext *const s = lc->parent;
2207  int pb_size = 1 << log2_cb_size;
2208  int size_in_pus = pb_size >> s->ps.sps->log2_min_pu_size;
2209  int min_pu_width = s->ps.sps->min_pu_width;
2210  MvField *tab_mvf = s->ref->tab_mvf;
2211  int x_pu = x0 >> s->ps.sps->log2_min_pu_size;
2212  int y_pu = y0 >> s->ps.sps->log2_min_pu_size;
2213  int j, k;
2214 
2215  if (size_in_pus == 0)
2216  size_in_pus = 1;
2217  for (j = 0; j < size_in_pus; j++)
2218  memset(&s->tab_ipm[(y_pu + j) * min_pu_width + x_pu], INTRA_DC, size_in_pus);
2219  if (lc->cu.pred_mode == MODE_INTRA)
2220  for (j = 0; j < size_in_pus; j++)
2221  for (k = 0; k < size_in_pus; k++)
2222  tab_mvf[(y_pu + j) * min_pu_width + x_pu + k].pred_flag = PF_INTRA;
2223 }
2224 
2225 static int hls_coding_unit(HEVCLocalContext *lc, const HEVCContext *s, int x0, int y0, int log2_cb_size)
2226 {
2227  int cb_size = 1 << log2_cb_size;
2228  int log2_min_cb_size = s->ps.sps->log2_min_cb_size;
2229  int length = cb_size >> log2_min_cb_size;
2230  int min_cb_width = s->ps.sps->min_cb_width;
2231  int x_cb = x0 >> log2_min_cb_size;
2232  int y_cb = y0 >> log2_min_cb_size;
2233  int idx = log2_cb_size - 2;
2234  int qp_block_mask = (1<<(s->ps.sps->log2_ctb_size - s->ps.pps->diff_cu_qp_delta_depth)) - 1;
2235  int x, y, ret;
2236 
2237  lc->cu.x = x0;
2238  lc->cu.y = y0;
2239  lc->cu.pred_mode = MODE_INTRA;
2240  lc->cu.part_mode = PART_2Nx2N;
2241  lc->cu.intra_split_flag = 0;
2242 
2243  SAMPLE_CTB(s->skip_flag, x_cb, y_cb) = 0;
2244  for (x = 0; x < 4; x++)
2245  lc->pu.intra_pred_mode[x] = 1;
2246  if (s->ps.pps->transquant_bypass_enable_flag) {
2248  if (lc->cu.cu_transquant_bypass_flag)
2249  set_deblocking_bypass(s, x0, y0, log2_cb_size);
2250  } else
2251  lc->cu.cu_transquant_bypass_flag = 0;
2252 
2253  if (s->sh.slice_type != HEVC_SLICE_I) {
2254  uint8_t skip_flag = ff_hevc_skip_flag_decode(lc, x0, y0, x_cb, y_cb);
2255 
2256  x = y_cb * min_cb_width + x_cb;
2257  for (y = 0; y < length; y++) {
2258  memset(&s->skip_flag[x], skip_flag, length);
2259  x += min_cb_width;
2260  }
2261  lc->cu.pred_mode = skip_flag ? MODE_SKIP : MODE_INTER;
2262  } else {
2263  x = y_cb * min_cb_width + x_cb;
2264  for (y = 0; y < length; y++) {
2265  memset(&s->skip_flag[x], 0, length);
2266  x += min_cb_width;
2267  }
2268  }
2269 
2270  if (SAMPLE_CTB(s->skip_flag, x_cb, y_cb)) {
2271  hls_prediction_unit(lc, x0, y0, cb_size, cb_size, log2_cb_size, 0, idx);
2272  intra_prediction_unit_default_value(lc, x0, y0, log2_cb_size);
2273 
2274  if (!s->sh.disable_deblocking_filter_flag)
2275  ff_hevc_deblocking_boundary_strengths(lc, x0, y0, log2_cb_size);
2276  } else {
2277  int pcm_flag = 0;
2278 
2279  if (s->sh.slice_type != HEVC_SLICE_I)
2281  if (lc->cu.pred_mode != MODE_INTRA ||
2282  log2_cb_size == s->ps.sps->log2_min_cb_size) {
2283  lc->cu.part_mode = ff_hevc_part_mode_decode(lc, log2_cb_size);
2284  lc->cu.intra_split_flag = lc->cu.part_mode == PART_NxN &&
2285  lc->cu.pred_mode == MODE_INTRA;
2286  }
2287 
2288  if (lc->cu.pred_mode == MODE_INTRA) {
2289  if (lc->cu.part_mode == PART_2Nx2N && s->ps.sps->pcm_enabled_flag &&
2290  log2_cb_size >= s->ps.sps->pcm.log2_min_pcm_cb_size &&
2291  log2_cb_size <= s->ps.sps->pcm.log2_max_pcm_cb_size) {
2292  pcm_flag = ff_hevc_pcm_flag_decode(lc);
2293  }
2294  if (pcm_flag) {
2295  intra_prediction_unit_default_value(lc, x0, y0, log2_cb_size);
2296  ret = hls_pcm_sample(lc, x0, y0, log2_cb_size);
2297  if (s->ps.sps->pcm.loop_filter_disable_flag)
2298  set_deblocking_bypass(s, x0, y0, log2_cb_size);
2299 
2300  if (ret < 0)
2301  return ret;
2302  } else {
2303  intra_prediction_unit(lc, x0, y0, log2_cb_size);
2304  }
2305  } else {
2306  intra_prediction_unit_default_value(lc, x0, y0, log2_cb_size);
2307  switch (lc->cu.part_mode) {
2308  case PART_2Nx2N:
2309  hls_prediction_unit(lc, x0, y0, cb_size, cb_size, log2_cb_size, 0, idx);
2310  break;
2311  case PART_2NxN:
2312  hls_prediction_unit(lc, x0, y0, cb_size, cb_size / 2, log2_cb_size, 0, idx);
2313  hls_prediction_unit(lc, x0, y0 + cb_size / 2, cb_size, cb_size / 2, log2_cb_size, 1, idx);
2314  break;
2315  case PART_Nx2N:
2316  hls_prediction_unit(lc, x0, y0, cb_size / 2, cb_size, log2_cb_size, 0, idx - 1);
2317  hls_prediction_unit(lc, x0 + cb_size / 2, y0, cb_size / 2, cb_size, log2_cb_size, 1, idx - 1);
2318  break;
2319  case PART_2NxnU:
2320  hls_prediction_unit(lc, x0, y0, cb_size, cb_size / 4, log2_cb_size, 0, idx);
2321  hls_prediction_unit(lc, x0, y0 + cb_size / 4, cb_size, cb_size * 3 / 4, log2_cb_size, 1, idx);
2322  break;
2323  case PART_2NxnD:
2324  hls_prediction_unit(lc, x0, y0, cb_size, cb_size * 3 / 4, log2_cb_size, 0, idx);
2325  hls_prediction_unit(lc, x0, y0 + cb_size * 3 / 4, cb_size, cb_size / 4, log2_cb_size, 1, idx);
2326  break;
2327  case PART_nLx2N:
2328  hls_prediction_unit(lc, x0, y0, cb_size / 4, cb_size, log2_cb_size, 0, idx - 2);
2329  hls_prediction_unit(lc, x0 + cb_size / 4, y0, cb_size * 3 / 4, cb_size, log2_cb_size, 1, idx - 2);
2330  break;
2331  case PART_nRx2N:
2332  hls_prediction_unit(lc, x0, y0, cb_size * 3 / 4, cb_size, log2_cb_size, 0, idx - 2);
2333  hls_prediction_unit(lc, x0 + cb_size * 3 / 4, y0, cb_size / 4, cb_size, log2_cb_size, 1, idx - 2);
2334  break;
2335  case PART_NxN:
2336  hls_prediction_unit(lc, x0, y0, cb_size / 2, cb_size / 2, log2_cb_size, 0, idx - 1);
2337  hls_prediction_unit(lc, x0 + cb_size / 2, y0, cb_size / 2, cb_size / 2, log2_cb_size, 1, idx - 1);
2338  hls_prediction_unit(lc, x0, y0 + cb_size / 2, cb_size / 2, cb_size / 2, log2_cb_size, 2, idx - 1);
2339  hls_prediction_unit(lc, x0 + cb_size / 2, y0 + cb_size / 2, cb_size / 2, cb_size / 2, log2_cb_size, 3, idx - 1);
2340  break;
2341  }
2342  }
2343 
2344  if (!pcm_flag) {
2345  int rqt_root_cbf = 1;
2346 
2347  if (lc->cu.pred_mode != MODE_INTRA &&
2348  !(lc->cu.part_mode == PART_2Nx2N && lc->pu.merge_flag)) {
2349  rqt_root_cbf = ff_hevc_no_residual_syntax_flag_decode(lc);
2350  }
2351  if (rqt_root_cbf) {
2352  const static int cbf[2] = { 0 };
2353  lc->cu.max_trafo_depth = lc->cu.pred_mode == MODE_INTRA ?
2354  s->ps.sps->max_transform_hierarchy_depth_intra + lc->cu.intra_split_flag :
2355  s->ps.sps->max_transform_hierarchy_depth_inter;
2356  ret = hls_transform_tree(lc, x0, y0, x0, y0, x0, y0,
2357  log2_cb_size,
2358  log2_cb_size, 0, 0, cbf, cbf);
2359  if (ret < 0)
2360  return ret;
2361  } else {
2362  if (!s->sh.disable_deblocking_filter_flag)
2363  ff_hevc_deblocking_boundary_strengths(lc, x0, y0, log2_cb_size);
2364  }
2365  }
2366  }
2367 
2368  if (s->ps.pps->cu_qp_delta_enabled_flag && lc->tu.is_cu_qp_delta_coded == 0)
2369  ff_hevc_set_qPy(lc, x0, y0, log2_cb_size);
2370 
2371  x = y_cb * min_cb_width + x_cb;
2372  for (y = 0; y < length; y++) {
2373  memset(&s->qp_y_tab[x], lc->qp_y, length);
2374  x += min_cb_width;
2375  }
2376 
2377  if(((x0 + (1<<log2_cb_size)) & qp_block_mask) == 0 &&
2378  ((y0 + (1<<log2_cb_size)) & qp_block_mask) == 0) {
2379  lc->qPy_pred = lc->qp_y;
2380  }
2381 
2382  set_ct_depth(s, x0, y0, log2_cb_size, lc->ct_depth);
2383 
2384  return 0;
2385 }
2386 
2387 static int hls_coding_quadtree(HEVCLocalContext *lc, int x0, int y0,
2388  int log2_cb_size, int cb_depth)
2389 {
2390  const HEVCContext *const s = lc->parent;
2391  const int cb_size = 1 << log2_cb_size;
2392  int ret;
2393  int split_cu;
2394 
2395  lc->ct_depth = cb_depth;
2396  if (x0 + cb_size <= s->ps.sps->width &&
2397  y0 + cb_size <= s->ps.sps->height &&
2398  log2_cb_size > s->ps.sps->log2_min_cb_size) {
2399  split_cu = ff_hevc_split_coding_unit_flag_decode(lc, cb_depth, x0, y0);
2400  } else {
2401  split_cu = (log2_cb_size > s->ps.sps->log2_min_cb_size);
2402  }
2403  if (s->ps.pps->cu_qp_delta_enabled_flag &&
2404  log2_cb_size >= s->ps.sps->log2_ctb_size - s->ps.pps->diff_cu_qp_delta_depth) {
2405  lc->tu.is_cu_qp_delta_coded = 0;
2406  lc->tu.cu_qp_delta = 0;
2407  }
2408 
2409  if (s->sh.cu_chroma_qp_offset_enabled_flag &&
2410  log2_cb_size >= s->ps.sps->log2_ctb_size - s->ps.pps->diff_cu_chroma_qp_offset_depth) {
2412  }
2413 
2414  if (split_cu) {
2415  int qp_block_mask = (1<<(s->ps.sps->log2_ctb_size - s->ps.pps->diff_cu_qp_delta_depth)) - 1;
2416  const int cb_size_split = cb_size >> 1;
2417  const int x1 = x0 + cb_size_split;
2418  const int y1 = y0 + cb_size_split;
2419 
2420  int more_data = 0;
2421 
2422  more_data = hls_coding_quadtree(lc, x0, y0, log2_cb_size - 1, cb_depth + 1);
2423  if (more_data < 0)
2424  return more_data;
2425 
2426  if (more_data && x1 < s->ps.sps->width) {
2427  more_data = hls_coding_quadtree(lc, x1, y0, log2_cb_size - 1, cb_depth + 1);
2428  if (more_data < 0)
2429  return more_data;
2430  }
2431  if (more_data && y1 < s->ps.sps->height) {
2432  more_data = hls_coding_quadtree(lc, x0, y1, log2_cb_size - 1, cb_depth + 1);
2433  if (more_data < 0)
2434  return more_data;
2435  }
2436  if (more_data && x1 < s->ps.sps->width &&
2437  y1 < s->ps.sps->height) {
2438  more_data = hls_coding_quadtree(lc, x1, y1, log2_cb_size - 1, cb_depth + 1);
2439  if (more_data < 0)
2440  return more_data;
2441  }
2442 
2443  if(((x0 + (1<<log2_cb_size)) & qp_block_mask) == 0 &&
2444  ((y0 + (1<<log2_cb_size)) & qp_block_mask) == 0)
2445  lc->qPy_pred = lc->qp_y;
2446 
2447  if (more_data)
2448  return ((x1 + cb_size_split) < s->ps.sps->width ||
2449  (y1 + cb_size_split) < s->ps.sps->height);
2450  else
2451  return 0;
2452  } else {
2453  ret = hls_coding_unit(lc, s, x0, y0, log2_cb_size);
2454  if (ret < 0)
2455  return ret;
2456  if ((!((x0 + cb_size) %
2457  (1 << (s->ps.sps->log2_ctb_size))) ||
2458  (x0 + cb_size >= s->ps.sps->width)) &&
2459  (!((y0 + cb_size) %
2460  (1 << (s->ps.sps->log2_ctb_size))) ||
2461  (y0 + cb_size >= s->ps.sps->height))) {
2462  int end_of_slice_flag = ff_hevc_end_of_slice_flag_decode(lc);
2463  return !end_of_slice_flag;
2464  } else {
2465  return 1;
2466  }
2467  }
2468 
2469  return 0;
2470 }
2471 
2472 static void hls_decode_neighbour(HEVCLocalContext *lc, int x_ctb, int y_ctb,
2473  int ctb_addr_ts)
2474 {
2475  const HEVCContext *const s = lc->parent;
2476  int ctb_size = 1 << s->ps.sps->log2_ctb_size;
2477  int ctb_addr_rs = s->ps.pps->ctb_addr_ts_to_rs[ctb_addr_ts];
2478  int ctb_addr_in_slice = ctb_addr_rs - s->sh.slice_addr;
2479 
2480  s->tab_slice_address[ctb_addr_rs] = s->sh.slice_addr;
2481 
2482  if (s->ps.pps->entropy_coding_sync_enabled_flag) {
2483  if (x_ctb == 0 && (y_ctb & (ctb_size - 1)) == 0)
2484  lc->first_qp_group = 1;
2485  lc->end_of_tiles_x = s->ps.sps->width;
2486  } else if (s->ps.pps->tiles_enabled_flag) {
2487  if (ctb_addr_ts && s->ps.pps->tile_id[ctb_addr_ts] != s->ps.pps->tile_id[ctb_addr_ts - 1]) {
2488  int idxX = s->ps.pps->col_idxX[x_ctb >> s->ps.sps->log2_ctb_size];
2489  lc->end_of_tiles_x = x_ctb + (s->ps.pps->column_width[idxX] << s->ps.sps->log2_ctb_size);
2490  lc->first_qp_group = 1;
2491  }
2492  } else {
2493  lc->end_of_tiles_x = s->ps.sps->width;
2494  }
2495 
2496  lc->end_of_tiles_y = FFMIN(y_ctb + ctb_size, s->ps.sps->height);
2497 
2498  lc->boundary_flags = 0;
2499  if (s->ps.pps->tiles_enabled_flag) {
2500  if (x_ctb > 0 && s->ps.pps->tile_id[ctb_addr_ts] != s->ps.pps->tile_id[s->ps.pps->ctb_addr_rs_to_ts[ctb_addr_rs - 1]])
2502  if (x_ctb > 0 && s->tab_slice_address[ctb_addr_rs] != s->tab_slice_address[ctb_addr_rs - 1])
2504  if (y_ctb > 0 && s->ps.pps->tile_id[ctb_addr_ts] != s->ps.pps->tile_id[s->ps.pps->ctb_addr_rs_to_ts[ctb_addr_rs - s->ps.sps->ctb_width]])
2506  if (y_ctb > 0 && s->tab_slice_address[ctb_addr_rs] != s->tab_slice_address[ctb_addr_rs - s->ps.sps->ctb_width])
2508  } else {
2509  if (ctb_addr_in_slice <= 0)
2511  if (ctb_addr_in_slice < s->ps.sps->ctb_width)
2513  }
2514 
2515  lc->ctb_left_flag = ((x_ctb > 0) && (ctb_addr_in_slice > 0) && !(lc->boundary_flags & BOUNDARY_LEFT_TILE));
2516  lc->ctb_up_flag = ((y_ctb > 0) && (ctb_addr_in_slice >= s->ps.sps->ctb_width) && !(lc->boundary_flags & BOUNDARY_UPPER_TILE));
2517  lc->ctb_up_right_flag = ((y_ctb > 0) && (ctb_addr_in_slice+1 >= s->ps.sps->ctb_width) && (s->ps.pps->tile_id[ctb_addr_ts] == s->ps.pps->tile_id[s->ps.pps->ctb_addr_rs_to_ts[ctb_addr_rs+1 - s->ps.sps->ctb_width]]));
2518  lc->ctb_up_left_flag = ((x_ctb > 0) && (y_ctb > 0) && (ctb_addr_in_slice-1 >= s->ps.sps->ctb_width) && (s->ps.pps->tile_id[ctb_addr_ts] == s->ps.pps->tile_id[s->ps.pps->ctb_addr_rs_to_ts[ctb_addr_rs-1 - s->ps.sps->ctb_width]]));
2519 }
2520 
2521 static int hls_decode_entry(AVCodecContext *avctxt, void *arg)
2522 {
2523  HEVCContext *s = avctxt->priv_data;
2524  HEVCLocalContext *const lc = s->HEVClc;
2525  int ctb_size = 1 << s->ps.sps->log2_ctb_size;
2526  int more_data = 1;
2527  int x_ctb = 0;
2528  int y_ctb = 0;
2529  int ctb_addr_ts = s->ps.pps->ctb_addr_rs_to_ts[s->sh.slice_ctb_addr_rs];
2530  int ret;
2531 
2532  if (!ctb_addr_ts && s->sh.dependent_slice_segment_flag) {
2533  av_log(s->avctx, AV_LOG_ERROR, "Impossible initial tile.\n");
2534  return AVERROR_INVALIDDATA;
2535  }
2536 
2537  if (s->sh.dependent_slice_segment_flag) {
2538  int prev_rs = s->ps.pps->ctb_addr_ts_to_rs[ctb_addr_ts - 1];
2539  if (s->tab_slice_address[prev_rs] != s->sh.slice_addr) {
2540  av_log(s->avctx, AV_LOG_ERROR, "Previous slice segment missing\n");
2541  return AVERROR_INVALIDDATA;
2542  }
2543  }
2544 
2545  while (more_data && ctb_addr_ts < s->ps.sps->ctb_size) {
2546  int ctb_addr_rs = s->ps.pps->ctb_addr_ts_to_rs[ctb_addr_ts];
2547 
2548  x_ctb = (ctb_addr_rs % ((s->ps.sps->width + ctb_size - 1) >> s->ps.sps->log2_ctb_size)) << s->ps.sps->log2_ctb_size;
2549  y_ctb = (ctb_addr_rs / ((s->ps.sps->width + ctb_size - 1) >> s->ps.sps->log2_ctb_size)) << s->ps.sps->log2_ctb_size;
2550  hls_decode_neighbour(lc, x_ctb, y_ctb, ctb_addr_ts);
2551 
2552  ret = ff_hevc_cabac_init(lc, ctb_addr_ts);
2553  if (ret < 0) {
2554  s->tab_slice_address[ctb_addr_rs] = -1;
2555  return ret;
2556  }
2557 
2558  hls_sao_param(lc, x_ctb >> s->ps.sps->log2_ctb_size, y_ctb >> s->ps.sps->log2_ctb_size);
2559 
2560  s->deblock[ctb_addr_rs].beta_offset = s->sh.beta_offset;
2561  s->deblock[ctb_addr_rs].tc_offset = s->sh.tc_offset;
2562  s->filter_slice_edges[ctb_addr_rs] = s->sh.slice_loop_filter_across_slices_enabled_flag;
2563 
2564  more_data = hls_coding_quadtree(lc, x_ctb, y_ctb, s->ps.sps->log2_ctb_size, 0);
2565  if (more_data < 0) {
2566  s->tab_slice_address[ctb_addr_rs] = -1;
2567  return more_data;
2568  }
2569 
2570 
2571  ctb_addr_ts++;
2572  ff_hevc_save_states(lc, ctb_addr_ts);
2573  ff_hevc_hls_filters(lc, x_ctb, y_ctb, ctb_size);
2574  }
2575 
2576  if (x_ctb + ctb_size >= s->ps.sps->width &&
2577  y_ctb + ctb_size >= s->ps.sps->height)
2578  ff_hevc_hls_filter(lc, x_ctb, y_ctb, ctb_size);
2579 
2580  return ctb_addr_ts;
2581 }
2582 
2584 {
2585  int ret = 0;
2586 
2587  s->avctx->execute(s->avctx, hls_decode_entry, NULL, &ret , 1, 0);
2588  return ret;
2589 }
2590 static int hls_decode_entry_wpp(AVCodecContext *avctxt, void *hevc_lclist,
2591  int job, int self_id)
2592 {
2593  HEVCLocalContext *lc = ((HEVCLocalContext**)hevc_lclist)[self_id];
2594  const HEVCContext *const s = lc->parent;
2595  int ctb_size = 1 << s->ps.sps->log2_ctb_size;
2596  int more_data = 1;
2597  int ctb_row = job;
2598  int ctb_addr_rs = s->sh.slice_ctb_addr_rs + ctb_row * ((s->ps.sps->width + ctb_size - 1) >> s->ps.sps->log2_ctb_size);
2599  int ctb_addr_ts = s->ps.pps->ctb_addr_rs_to_ts[ctb_addr_rs];
2600  int thread = ctb_row % s->threads_number;
2601  int ret;
2602 
2603  if(ctb_row) {
2604  ret = init_get_bits8(&lc->gb, s->data + s->sh.offset[ctb_row - 1], s->sh.size[ctb_row - 1]);
2605  if (ret < 0)
2606  goto error;
2607  ff_init_cabac_decoder(&lc->cc, s->data + s->sh.offset[(ctb_row)-1], s->sh.size[ctb_row - 1]);
2608  }
2609 
2610  while(more_data && ctb_addr_ts < s->ps.sps->ctb_size) {
2611  int x_ctb = (ctb_addr_rs % s->ps.sps->ctb_width) << s->ps.sps->log2_ctb_size;
2612  int y_ctb = (ctb_addr_rs / s->ps.sps->ctb_width) << s->ps.sps->log2_ctb_size;
2613 
2614  hls_decode_neighbour(lc, x_ctb, y_ctb, ctb_addr_ts);
2615 
2616  ff_thread_await_progress2(s->avctx, ctb_row, thread, SHIFT_CTB_WPP);
2617 
2618  /* atomic_load's prototype requires a pointer to non-const atomic variable
2619  * (due to implementations via mutexes, where reads involve writes).
2620  * Of course, casting const away here is nevertheless safe. */
2621  if (atomic_load((atomic_int*)&s->wpp_err)) {
2622  ff_thread_report_progress2(s->avctx, ctb_row , thread, SHIFT_CTB_WPP);
2623  return 0;
2624  }
2625 
2626  ret = ff_hevc_cabac_init(lc, ctb_addr_ts);
2627  if (ret < 0)
2628  goto error;
2629  hls_sao_param(lc, x_ctb >> s->ps.sps->log2_ctb_size, y_ctb >> s->ps.sps->log2_ctb_size);
2630  more_data = hls_coding_quadtree(lc, x_ctb, y_ctb, s->ps.sps->log2_ctb_size, 0);
2631 
2632  if (more_data < 0) {
2633  ret = more_data;
2634  goto error;
2635  }
2636 
2637  ctb_addr_ts++;
2638 
2639  ff_hevc_save_states(lc, ctb_addr_ts);
2640  ff_thread_report_progress2(s->avctx, ctb_row, thread, 1);
2641  ff_hevc_hls_filters(lc, x_ctb, y_ctb, ctb_size);
2642 
2643  if (!more_data && (x_ctb+ctb_size) < s->ps.sps->width && ctb_row != s->sh.num_entry_point_offsets) {
2644  /* Casting const away here is safe, because it is an atomic operation. */
2645  atomic_store((atomic_int*)&s->wpp_err, 1);
2646  ff_thread_report_progress2(s->avctx, ctb_row ,thread, SHIFT_CTB_WPP);
2647  return 0;
2648  }
2649 
2650  if ((x_ctb+ctb_size) >= s->ps.sps->width && (y_ctb+ctb_size) >= s->ps.sps->height ) {
2651  ff_hevc_hls_filter(lc, x_ctb, y_ctb, ctb_size);
2652  ff_thread_report_progress2(s->avctx, ctb_row , thread, SHIFT_CTB_WPP);
2653  return ctb_addr_ts;
2654  }
2655  ctb_addr_rs = s->ps.pps->ctb_addr_ts_to_rs[ctb_addr_ts];
2656  x_ctb+=ctb_size;
2657 
2658  if(x_ctb >= s->ps.sps->width) {
2659  break;
2660  }
2661  }
2662  ff_thread_report_progress2(s->avctx, ctb_row ,thread, SHIFT_CTB_WPP);
2663 
2664  return 0;
2665 error:
2666  s->tab_slice_address[ctb_addr_rs] = -1;
2667  /* Casting const away here is safe, because it is an atomic operation. */
2668  atomic_store((atomic_int*)&s->wpp_err, 1);
2669  ff_thread_report_progress2(s->avctx, ctb_row ,thread, SHIFT_CTB_WPP);
2670  return ret;
2671 }
2672 
2673 static int hls_slice_data_wpp(HEVCContext *s, const H2645NAL *nal)
2674 {
2675  const uint8_t *data = nal->data;
2676  int length = nal->size;
2677  HEVCLocalContext *lc = s->HEVClc;
2678  int *ret;
2679  int64_t offset;
2680  int64_t startheader, cmpt = 0;
2681  int i, j, res = 0;
2682 
2683  if (s->sh.slice_ctb_addr_rs + s->sh.num_entry_point_offsets * s->ps.sps->ctb_width >= s->ps.sps->ctb_width * s->ps.sps->ctb_height) {
2684  av_log(s->avctx, AV_LOG_ERROR, "WPP ctb addresses are wrong (%d %d %d %d)\n",
2685  s->sh.slice_ctb_addr_rs, s->sh.num_entry_point_offsets,
2686  s->ps.sps->ctb_width, s->ps.sps->ctb_height
2687  );
2688  return AVERROR_INVALIDDATA;
2689  }
2690 
2691  for (i = 1; i < s->threads_number; i++) {
2692  if (s->HEVClcList[i])
2693  continue;
2694  s->HEVClcList[i] = av_mallocz(sizeof(HEVCLocalContext));
2695  if (!s->HEVClcList[i])
2696  return AVERROR(ENOMEM);
2697  s->HEVClcList[i]->logctx = s->avctx;
2698  s->HEVClcList[i]->parent = s;
2699  s->HEVClcList[i]->common_cabac_state = &s->cabac;
2700  }
2701 
2702  offset = (lc->gb.index >> 3);
2703 
2704  for (j = 0, cmpt = 0, startheader = offset + s->sh.entry_point_offset[0]; j < nal->skipped_bytes; j++) {
2705  if (nal->skipped_bytes_pos[j] >= offset && nal->skipped_bytes_pos[j] < startheader) {
2706  startheader--;
2707  cmpt++;
2708  }
2709  }
2710 
2711  for (i = 1; i < s->sh.num_entry_point_offsets; i++) {
2712  offset += (s->sh.entry_point_offset[i - 1] - cmpt);
2713  for (j = 0, cmpt = 0, startheader = offset
2714  + s->sh.entry_point_offset[i]; j < nal->skipped_bytes; j++) {
2715  if (nal->skipped_bytes_pos[j] >= offset && nal->skipped_bytes_pos[j] < startheader) {
2716  startheader--;
2717  cmpt++;
2718  }
2719  }
2720  s->sh.size[i - 1] = s->sh.entry_point_offset[i] - cmpt;
2721  s->sh.offset[i - 1] = offset;
2722 
2723  }
2724  if (s->sh.num_entry_point_offsets != 0) {
2725  offset += s->sh.entry_point_offset[s->sh.num_entry_point_offsets - 1] - cmpt;
2726  if (length < offset) {
2727  av_log(s->avctx, AV_LOG_ERROR, "entry_point_offset table is corrupted\n");
2728  return AVERROR_INVALIDDATA;
2729  }
2730  s->sh.size[s->sh.num_entry_point_offsets - 1] = length - offset;
2731  s->sh.offset[s->sh.num_entry_point_offsets - 1] = offset;
2732 
2733  }
2734  s->data = data;
2735 
2736  for (i = 1; i < s->threads_number; i++) {
2737  s->HEVClcList[i]->first_qp_group = 1;
2738  s->HEVClcList[i]->qp_y = s->HEVClc->qp_y;
2739  }
2740 
2741  atomic_store(&s->wpp_err, 0);
2742  res = ff_slice_thread_allocz_entries(s->avctx, s->sh.num_entry_point_offsets + 1);
2743  if (res < 0)
2744  return res;
2745 
2746  ret = av_calloc(s->sh.num_entry_point_offsets + 1, sizeof(*ret));
2747  if (!ret)
2748  return AVERROR(ENOMEM);
2749 
2750  if (s->ps.pps->entropy_coding_sync_enabled_flag)
2751  s->avctx->execute2(s->avctx, hls_decode_entry_wpp, s->HEVClcList, ret, s->sh.num_entry_point_offsets + 1);
2752 
2753  for (i = 0; i <= s->sh.num_entry_point_offsets; i++)
2754  res += ret[i];
2755 
2756  av_free(ret);
2757  return res;
2758 }
2759 
2761 {
2762  AVFrame *out = s->ref->frame;
2763  int ret;
2764 
2765  // Decrement the mastering display and content light level flag when IRAP
2766  // frame has no_rasl_output_flag=1 so the side data persists for the entire
2767  // coded video sequence.
2768  if (IS_IRAP(s) && s->no_rasl_output_flag) {
2769  if (s->sei.common.mastering_display.present > 0)
2770  s->sei.common.mastering_display.present--;
2771 
2772  if (s->sei.common.content_light.present > 0)
2773  s->sei.common.content_light.present--;
2774  }
2775 
2776  ret = ff_h2645_sei_to_frame(out, &s->sei.common, AV_CODEC_ID_HEVC, NULL,
2777  &s->ps.sps->vui.common,
2778  s->ps.sps->bit_depth, s->ps.sps->bit_depth_chroma,
2779  s->ref->poc /* no poc_offset in HEVC */);
2780  if (ret < 0)
2781  return ret;
2782 
2783  if (s->sei.timecode.present) {
2784  uint32_t *tc_sd;
2785  char tcbuf[AV_TIMECODE_STR_SIZE];
2787  sizeof(uint32_t) * 4);
2788  if (!tcside)
2789  return AVERROR(ENOMEM);
2790 
2791  tc_sd = (uint32_t*)tcside->data;
2792  tc_sd[0] = s->sei.timecode.num_clock_ts;
2793 
2794  for (int i = 0; i < tc_sd[0]; i++) {
2795  int drop = s->sei.timecode.cnt_dropped_flag[i];
2796  int hh = s->sei.timecode.hours_value[i];
2797  int mm = s->sei.timecode.minutes_value[i];
2798  int ss = s->sei.timecode.seconds_value[i];
2799  int ff = s->sei.timecode.n_frames[i];
2800 
2801  tc_sd[i + 1] = av_timecode_get_smpte(s->avctx->framerate, drop, hh, mm, ss, ff);
2802  av_timecode_make_smpte_tc_string2(tcbuf, s->avctx->framerate, tc_sd[i + 1], 0, 0);
2803  av_dict_set(&out->metadata, "timecode", tcbuf, 0);
2804  }
2805 
2806  s->sei.timecode.num_clock_ts = 0;
2807  }
2808 
2809  if (s->sei.common.dynamic_hdr_plus.info) {
2810  AVBufferRef *info_ref = av_buffer_ref(s->sei.common.dynamic_hdr_plus.info);
2811  if (!info_ref)
2812  return AVERROR(ENOMEM);
2813 
2815  av_buffer_unref(&info_ref);
2816  return AVERROR(ENOMEM);
2817  }
2818  }
2819 
2820  if (s->rpu_buf) {
2822  if (!rpu)
2823  return AVERROR(ENOMEM);
2824 
2825  s->rpu_buf = NULL;
2826  }
2827 
2828  if ((ret = ff_dovi_attach_side_data(&s->dovi_ctx, out)) < 0)
2829  return ret;
2830 
2831  if (s->sei.common.dynamic_hdr_vivid.info) {
2832  AVBufferRef *info_ref = av_buffer_ref(s->sei.common.dynamic_hdr_vivid.info);
2833  if (!info_ref)
2834  return AVERROR(ENOMEM);
2835 
2837  av_buffer_unref(&info_ref);
2838  return AVERROR(ENOMEM);
2839  }
2840  }
2841 
2842  return 0;
2843 }
2844 
2846 {
2847  HEVCLocalContext *lc = s->HEVClc;
2848  int pic_size_in_ctb = ((s->ps.sps->width >> s->ps.sps->log2_min_cb_size) + 1) *
2849  ((s->ps.sps->height >> s->ps.sps->log2_min_cb_size) + 1);
2850  int ret;
2851 
2852  memset(s->horizontal_bs, 0, s->bs_width * s->bs_height);
2853  memset(s->vertical_bs, 0, s->bs_width * s->bs_height);
2854  memset(s->cbf_luma, 0, s->ps.sps->min_tb_width * s->ps.sps->min_tb_height);
2855  memset(s->is_pcm, 0, (s->ps.sps->min_pu_width + 1) * (s->ps.sps->min_pu_height + 1));
2856  memset(s->tab_slice_address, -1, pic_size_in_ctb * sizeof(*s->tab_slice_address));
2857 
2858  s->is_decoded = 0;
2859  s->first_nal_type = s->nal_unit_type;
2860 
2861  s->no_rasl_output_flag = IS_IDR(s) || IS_BLA(s) || (s->nal_unit_type == HEVC_NAL_CRA_NUT && s->last_eos);
2862 
2863  if (s->ps.pps->tiles_enabled_flag)
2864  lc->end_of_tiles_x = s->ps.pps->column_width[0] << s->ps.sps->log2_ctb_size;
2865 
2866  ret = ff_hevc_set_new_ref(s, &s->frame, s->poc);
2867  if (ret < 0)
2868  goto fail;
2869 
2870  ret = ff_hevc_frame_rps(s);
2871  if (ret < 0) {
2872  av_log(s->avctx, AV_LOG_ERROR, "Error constructing the frame RPS.\n");
2873  goto fail;
2874  }
2875 
2876  if (IS_IRAP(s))
2877  s->ref->frame->flags |= AV_FRAME_FLAG_KEY;
2878  else
2879  s->ref->frame->flags &= ~AV_FRAME_FLAG_KEY;
2880 
2881  s->ref->needs_fg = s->sei.common.film_grain_characteristics.present &&
2882  !(s->avctx->export_side_data & AV_CODEC_EXPORT_DATA_FILM_GRAIN) &&
2883  !s->avctx->hwaccel;
2884 
2885  if (s->ref->needs_fg &&
2886  !ff_h274_film_grain_params_supported(s->sei.common.film_grain_characteristics.model_id,
2887  s->ref->frame->format)) {
2888  av_log_once(s->avctx, AV_LOG_WARNING, AV_LOG_DEBUG, &s->film_grain_warning_shown,
2889  "Unsupported film grain parameters. Ignoring film grain.\n");
2890  s->ref->needs_fg = 0;
2891  }
2892 
2893  if (s->ref->needs_fg) {
2894  s->ref->frame_grain->format = s->ref->frame->format;
2895  s->ref->frame_grain->width = s->ref->frame->width;
2896  s->ref->frame_grain->height = s->ref->frame->height;
2897  if ((ret = ff_thread_get_buffer(s->avctx, s->ref->frame_grain, 0)) < 0)
2898  goto fail;
2899  }
2900 
2901  ret = set_side_data(s);
2902  if (ret < 0)
2903  goto fail;
2904 
2905  s->frame->pict_type = 3 - s->sh.slice_type;
2906 
2907  if (!IS_IRAP(s))
2909 
2910  av_frame_unref(s->output_frame);
2911  ret = ff_hevc_output_frame(s, s->output_frame, 0);
2912  if (ret < 0)
2913  goto fail;
2914 
2915  if (!s->avctx->hwaccel)
2916  ff_thread_finish_setup(s->avctx);
2917 
2918  return 0;
2919 
2920 fail:
2921  if (s->ref)
2922  ff_hevc_unref_frame(s->ref, ~0);
2923  s->ref = s->collocated_ref = NULL;
2924  return ret;
2925 }
2926 
2928 {
2929  HEVCFrame *out = s->ref;
2930  const AVFrameSideData *sd;
2931  av_unused int ret;
2932 
2933  if (out->needs_fg) {
2935  av_assert0(out->frame_grain->buf[0] && sd);
2936  ret = ff_h274_apply_film_grain(out->frame_grain, out->frame, &s->h274db,
2937  (AVFilmGrainParams *) sd->data);
2938  av_assert1(ret >= 0);
2939  }
2940 
2941  return 0;
2942 }
2943 
2944 static int decode_nal_unit(HEVCContext *s, const H2645NAL *nal)
2945 {
2946  HEVCLocalContext *lc = s->HEVClc;
2947  GetBitContext *gb = &lc->gb;
2948  int ctb_addr_ts, ret;
2949 
2950  *gb = nal->gb;
2951  s->nal_unit_type = nal->type;
2952  s->temporal_id = nal->temporal_id;
2953 
2954  switch (s->nal_unit_type) {
2955  case HEVC_NAL_VPS:
2956  if (FF_HW_HAS_CB(s->avctx, decode_params)) {
2957  ret = FF_HW_CALL(s->avctx, decode_params,
2958  nal->type, nal->raw_data, nal->raw_size);
2959  if (ret < 0)
2960  goto fail;
2961  }
2962  ret = ff_hevc_decode_nal_vps(gb, s->avctx, &s->ps);
2963  if (ret < 0)
2964  goto fail;
2965  break;
2966  case HEVC_NAL_SPS:
2967  if (FF_HW_HAS_CB(s->avctx, decode_params)) {
2968  ret = FF_HW_CALL(s->avctx, decode_params,
2969  nal->type, nal->raw_data, nal->raw_size);
2970  if (ret < 0)
2971  goto fail;
2972  }
2973  ret = ff_hevc_decode_nal_sps(gb, s->avctx, &s->ps,
2974  s->apply_defdispwin);
2975  if (ret < 0)
2976  goto fail;
2977  break;
2978  case HEVC_NAL_PPS:
2979  if (FF_HW_HAS_CB(s->avctx, decode_params)) {
2980  ret = FF_HW_CALL(s->avctx, decode_params,
2981  nal->type, nal->raw_data, nal->raw_size);
2982  if (ret < 0)
2983  goto fail;
2984  }
2985  ret = ff_hevc_decode_nal_pps(gb, s->avctx, &s->ps);
2986  if (ret < 0)
2987  goto fail;
2988  break;
2989  case HEVC_NAL_SEI_PREFIX:
2990  case HEVC_NAL_SEI_SUFFIX:
2991  if (FF_HW_HAS_CB(s->avctx, decode_params)) {
2992  ret = FF_HW_CALL(s->avctx, decode_params,
2993  nal->type, nal->raw_data, nal->raw_size);
2994  if (ret < 0)
2995  goto fail;
2996  }
2997  ret = ff_hevc_decode_nal_sei(gb, s->avctx, &s->sei, &s->ps, s->nal_unit_type);
2998  if (ret < 0)
2999  goto fail;
3000  break;
3001  case HEVC_NAL_TRAIL_R:
3002  case HEVC_NAL_TRAIL_N:
3003  case HEVC_NAL_TSA_N:
3004  case HEVC_NAL_TSA_R:
3005  case HEVC_NAL_STSA_N:
3006  case HEVC_NAL_STSA_R:
3007  case HEVC_NAL_BLA_W_LP:
3008  case HEVC_NAL_BLA_W_RADL:
3009  case HEVC_NAL_BLA_N_LP:
3010  case HEVC_NAL_IDR_W_RADL:
3011  case HEVC_NAL_IDR_N_LP:
3012  case HEVC_NAL_CRA_NUT:
3013  case HEVC_NAL_RADL_N:
3014  case HEVC_NAL_RADL_R:
3015  case HEVC_NAL_RASL_N:
3016  case HEVC_NAL_RASL_R:
3017  ret = hls_slice_header(s);
3018  if (ret < 0)
3019  return ret;
3020  if (ret == 1) {
3022  goto fail;
3023  }
3024 
3025 
3026  if (
3027  (s->avctx->skip_frame >= AVDISCARD_BIDIR && s->sh.slice_type == HEVC_SLICE_B) ||
3028  (s->avctx->skip_frame >= AVDISCARD_NONINTRA && s->sh.slice_type != HEVC_SLICE_I) ||
3029  (s->avctx->skip_frame >= AVDISCARD_NONKEY && !IS_IRAP(s))) {
3030  break;
3031  }
3032 
3033  if (s->sh.first_slice_in_pic_flag) {
3034  if (s->max_ra == INT_MAX) {
3035  if (s->nal_unit_type == HEVC_NAL_CRA_NUT || IS_BLA(s)) {
3036  s->max_ra = s->poc;
3037  } else {
3038  if (IS_IDR(s))
3039  s->max_ra = INT_MIN;
3040  }
3041  }
3042 
3043  if ((s->nal_unit_type == HEVC_NAL_RASL_R || s->nal_unit_type == HEVC_NAL_RASL_N) &&
3044  s->poc <= s->max_ra) {
3045  s->is_decoded = 0;
3046  break;
3047  } else {
3048  if (s->nal_unit_type == HEVC_NAL_RASL_R && s->poc > s->max_ra)
3049  s->max_ra = INT_MIN;
3050  }
3051 
3052  s->overlap ++;
3053  ret = hevc_frame_start(s);
3054  if (ret < 0)
3055  return ret;
3056  } else if (!s->ref) {
3057  av_log(s->avctx, AV_LOG_ERROR, "First slice in a frame missing.\n");
3058  goto fail;
3059  }
3060 
3061  if (s->nal_unit_type != s->first_nal_type) {
3062  av_log(s->avctx, AV_LOG_ERROR,
3063  "Non-matching NAL types of the VCL NALUs: %d %d\n",
3064  s->first_nal_type, s->nal_unit_type);
3065  return AVERROR_INVALIDDATA;
3066  }
3067 
3068  if (!s->sh.dependent_slice_segment_flag &&
3069  s->sh.slice_type != HEVC_SLICE_I) {
3070  ret = ff_hevc_slice_rpl(s);
3071  if (ret < 0) {
3072  av_log(s->avctx, AV_LOG_WARNING,
3073  "Error constructing the reference lists for the current slice.\n");
3074  goto fail;
3075  }
3076  }
3077 
3078  if (s->sh.first_slice_in_pic_flag && s->avctx->hwaccel) {
3079  ret = FF_HW_CALL(s->avctx, start_frame, NULL, 0);
3080  if (ret < 0)
3081  goto fail;
3082  }
3083 
3084  if (s->avctx->hwaccel) {
3085  ret = FF_HW_CALL(s->avctx, decode_slice, nal->raw_data, nal->raw_size);
3086  if (ret < 0)
3087  goto fail;
3088  } else {
3089  if (s->avctx->profile == AV_PROFILE_HEVC_SCC) {
3090  av_log(s->avctx, AV_LOG_ERROR,
3091  "SCC profile is not yet implemented in hevc native decoder.\n");
3093  goto fail;
3094  }
3095 
3096  if (s->threads_number > 1 && s->sh.num_entry_point_offsets > 0)
3097  ctb_addr_ts = hls_slice_data_wpp(s, nal);
3098  else
3099  ctb_addr_ts = hls_slice_data(s);
3100  if (ctb_addr_ts >= (s->ps.sps->ctb_width * s->ps.sps->ctb_height)) {
3101  ret = hevc_frame_end(s);
3102  if (ret < 0)
3103  goto fail;
3104  s->is_decoded = 1;
3105  }
3106 
3107  if (ctb_addr_ts < 0) {
3108  ret = ctb_addr_ts;
3109  goto fail;
3110  }
3111  }
3112  break;
3113  case HEVC_NAL_EOS_NUT:
3114  case HEVC_NAL_EOB_NUT:
3115  s->seq_decode = (s->seq_decode + 1) & HEVC_SEQUENCE_COUNTER_MASK;
3116  s->max_ra = INT_MAX;
3117  break;
3118  case HEVC_NAL_AUD:
3119  case HEVC_NAL_FD_NUT:
3120  case HEVC_NAL_UNSPEC62:
3121  break;
3122  default:
3123  av_log(s->avctx, AV_LOG_INFO,
3124  "Skipping NAL unit %d\n", s->nal_unit_type);
3125  }
3126 
3127  return 0;
3128 fail:
3129  if (s->avctx->err_recognition & AV_EF_EXPLODE)
3130  return ret;
3131  return 0;
3132 }
3133 
3134 static int decode_nal_units(HEVCContext *s, const uint8_t *buf, int length)
3135 {
3136  int i, ret = 0;
3137  int eos_at_start = 1;
3138 
3139  s->ref = s->collocated_ref = NULL;
3140  s->last_eos = s->eos;
3141  s->eos = 0;
3142  s->overlap = 0;
3143 
3144  /* split the input packet into NAL units, so we know the upper bound on the
3145  * number of slices in the frame */
3146  ret = ff_h2645_packet_split(&s->pkt, buf, length, s->avctx, s->is_nalff,
3147  s->nal_length_size, s->avctx->codec_id, 1, 0);
3148  if (ret < 0) {
3149  av_log(s->avctx, AV_LOG_ERROR,
3150  "Error splitting the input into NAL units.\n");
3151  return ret;
3152  }
3153 
3154  for (i = 0; i < s->pkt.nb_nals; i++) {
3155  if (s->pkt.nals[i].type == HEVC_NAL_EOB_NUT ||
3156  s->pkt.nals[i].type == HEVC_NAL_EOS_NUT) {
3157  if (eos_at_start) {
3158  s->last_eos = 1;
3159  } else {
3160  s->eos = 1;
3161  }
3162  } else {
3163  eos_at_start = 0;
3164  }
3165  }
3166 
3167  /*
3168  * Check for RPU delimiter.
3169  *
3170  * Dolby Vision RPUs masquerade as unregistered NALs of type 62.
3171  *
3172  * We have to do this check here an create the rpu buffer, since RPUs are appended
3173  * to the end of an AU; they are the last non-EOB/EOS NAL in the AU.
3174  */
3175  if (s->pkt.nb_nals > 1 && s->pkt.nals[s->pkt.nb_nals - 1].type == HEVC_NAL_UNSPEC62 &&
3176  s->pkt.nals[s->pkt.nb_nals - 1].size > 2 && !s->pkt.nals[s->pkt.nb_nals - 1].nuh_layer_id
3177  && !s->pkt.nals[s->pkt.nb_nals - 1].temporal_id) {
3178  H2645NAL *nal = &s->pkt.nals[s->pkt.nb_nals - 1];
3179  if (s->rpu_buf) {
3180  av_buffer_unref(&s->rpu_buf);
3181  av_log(s->avctx, AV_LOG_WARNING, "Multiple Dolby Vision RPUs found in one AU. Skipping previous.\n");
3182  }
3183 
3184  s->rpu_buf = av_buffer_alloc(nal->raw_size - 2);
3185  if (!s->rpu_buf)
3186  return AVERROR(ENOMEM);
3187  memcpy(s->rpu_buf->data, nal->raw_data + 2, nal->raw_size - 2);
3188 
3189  ret = ff_dovi_rpu_parse(&s->dovi_ctx, nal->data + 2, nal->size - 2);
3190  if (ret < 0) {
3191  av_buffer_unref(&s->rpu_buf);
3192  av_log(s->avctx, AV_LOG_WARNING, "Error parsing DOVI NAL unit.\n");
3193  /* ignore */
3194  }
3195  }
3196 
3197  /* decode the NAL units */
3198  for (i = 0; i < s->pkt.nb_nals; i++) {
3199  H2645NAL *nal = &s->pkt.nals[i];
3200 
3201  if (s->avctx->skip_frame >= AVDISCARD_ALL ||
3202  (s->avctx->skip_frame >= AVDISCARD_NONREF
3203  && ff_hevc_nal_is_nonref(nal->type)) || nal->nuh_layer_id > 0)
3204  continue;
3205 
3206  ret = decode_nal_unit(s, nal);
3207  if (ret >= 0 && s->overlap > 2)
3209  if (ret < 0) {
3210  av_log(s->avctx, AV_LOG_WARNING,
3211  "Error parsing NAL unit #%d.\n", i);
3212  goto fail;
3213  }
3214  }
3215 
3216 fail:
3217  if (s->ref && s->threads_type == FF_THREAD_FRAME)
3218  ff_thread_report_progress(&s->ref->tf, INT_MAX, 0);
3219 
3220  return ret;
3221 }
3222 
3224 {
3226  char msg_buf[4 * (50 + 2 * 2 * 16 /* MD5-size */)];
3227  int pixel_shift;
3228  int err = 0;
3229  int i, j;
3230 
3231  if (!desc)
3232  return AVERROR(EINVAL);
3233 
3234  pixel_shift = desc->comp[0].depth > 8;
3235 
3236  /* the checksums are LE, so we have to byteswap for >8bpp formats
3237  * on BE arches */
3238 #if HAVE_BIGENDIAN
3239  if (pixel_shift && !s->checksum_buf) {
3240  av_fast_malloc(&s->checksum_buf, &s->checksum_buf_size,
3241  FFMAX3(frame->linesize[0], frame->linesize[1],
3242  frame->linesize[2]));
3243  if (!s->checksum_buf)
3244  return AVERROR(ENOMEM);
3245  }
3246 #endif
3247 
3248  msg_buf[0] = '\0';
3249  for (i = 0; frame->data[i]; i++) {
3250  int width = s->avctx->coded_width;
3251  int height = s->avctx->coded_height;
3252  int w = (i == 1 || i == 2) ? (width >> desc->log2_chroma_w) : width;
3253  int h = (i == 1 || i == 2) ? (height >> desc->log2_chroma_h) : height;
3254  uint8_t md5[16];
3255 
3256  av_md5_init(s->md5_ctx);
3257  for (j = 0; j < h; j++) {
3258  const uint8_t *src = frame->data[i] + j * frame->linesize[i];
3259 #if HAVE_BIGENDIAN
3260  if (pixel_shift) {
3261  s->bdsp.bswap16_buf((uint16_t *) s->checksum_buf,
3262  (const uint16_t *) src, w);
3263  src = s->checksum_buf;
3264  }
3265 #endif
3266  av_md5_update(s->md5_ctx, src, w << pixel_shift);
3267  }
3268  av_md5_final(s->md5_ctx, md5);
3269 
3270 #define MD5_PRI "%016" PRIx64 "%016" PRIx64
3271 #define MD5_PRI_ARG(buf) AV_RB64(buf), AV_RB64((const uint8_t*)(buf) + 8)
3272 
3273  if (!memcmp(md5, s->sei.picture_hash.md5[i], 16)) {
3274  av_strlcatf(msg_buf, sizeof(msg_buf),
3275  "plane %d - correct " MD5_PRI "; ",
3276  i, MD5_PRI_ARG(md5));
3277  } else {
3278  av_strlcatf(msg_buf, sizeof(msg_buf),
3279  "mismatching checksum of plane %d - " MD5_PRI " != " MD5_PRI "; ",
3280  i, MD5_PRI_ARG(md5), MD5_PRI_ARG(s->sei.picture_hash.md5[i]));
3281  err = AVERROR_INVALIDDATA;
3282  }
3283  }
3284 
3285  av_log(s->avctx, err < 0 ? AV_LOG_ERROR : AV_LOG_DEBUG,
3286  "Verifying checksum for frame with POC %d: %s\n",
3287  s->poc, msg_buf);
3288 
3289  return err;
3290 }
3291 
3292 static int hevc_decode_extradata(HEVCContext *s, uint8_t *buf, int length, int first)
3293 {
3294  int ret, i;
3295 
3296  ret = ff_hevc_decode_extradata(buf, length, &s->ps, &s->sei, &s->is_nalff,
3297  &s->nal_length_size, s->avctx->err_recognition,
3298  s->apply_defdispwin, s->avctx);
3299  if (ret < 0)
3300  return ret;
3301 
3302  /* export stream parameters from the first SPS */
3303  for (i = 0; i < FF_ARRAY_ELEMS(s->ps.sps_list); i++) {
3304  if (first && s->ps.sps_list[i]) {
3305  const HEVCSPS *sps = s->ps.sps_list[i];
3307  break;
3308  }
3309  }
3310 
3311  /* export stream parameters from SEI */
3313  if (ret < 0)
3314  return ret;
3315 
3316  return 0;
3317 }
3318 
3319 static int hevc_decode_frame(AVCodecContext *avctx, AVFrame *rframe,
3320  int *got_output, AVPacket *avpkt)
3321 {
3322  int ret;
3323  uint8_t *sd;
3324  size_t sd_size;
3325  HEVCContext *s = avctx->priv_data;
3326 
3327  if (!avpkt->size) {
3328  ret = ff_hevc_output_frame(s, rframe, 1);
3329  if (ret < 0)
3330  return ret;
3331 
3332  *got_output = ret;
3333  return 0;
3334  }
3335 
3336  sd = av_packet_get_side_data(avpkt, AV_PKT_DATA_NEW_EXTRADATA, &sd_size);
3337  if (sd && sd_size > 0) {
3338  ret = hevc_decode_extradata(s, sd, sd_size, 0);
3339  if (ret < 0)
3340  return ret;
3341  }
3342 
3343  sd = av_packet_get_side_data(avpkt, AV_PKT_DATA_DOVI_CONF, &sd_size);
3344  if (sd && sd_size > 0) {
3345  int old = s->dovi_ctx.dv_profile;
3346 
3348  if (old)
3349  av_log(avctx, AV_LOG_DEBUG,
3350  "New DOVI configuration record from input packet (profile %d -> %u).\n",
3351  old, s->dovi_ctx.dv_profile);
3352  }
3353 
3354  s->ref = s->collocated_ref = NULL;
3355  ret = decode_nal_units(s, avpkt->data, avpkt->size);
3356  if (ret < 0)
3357  return ret;
3358 
3359  if (avctx->hwaccel) {
3360  if (s->ref && (ret = FF_HW_SIMPLE_CALL(avctx, end_frame)) < 0) {
3361  av_log(avctx, AV_LOG_ERROR,
3362  "hardware accelerator failed to decode picture\n");
3363  ff_hevc_unref_frame(s->ref, ~0);
3364  return ret;
3365  }
3366  } else {
3367  /* verify the SEI checksum */
3368  if (avctx->err_recognition & AV_EF_CRCCHECK && s->ref && s->is_decoded &&
3369  s->sei.picture_hash.is_md5) {
3370  ret = verify_md5(s, s->ref->frame);
3371  if (ret < 0 && avctx->err_recognition & AV_EF_EXPLODE) {
3372  ff_hevc_unref_frame(s->ref, ~0);
3373  return ret;
3374  }
3375  }
3376  }
3377  s->sei.picture_hash.is_md5 = 0;
3378 
3379  if (s->is_decoded) {
3380  av_log(avctx, AV_LOG_DEBUG, "Decoded frame with POC %d.\n", s->poc);
3381  s->is_decoded = 0;
3382  }
3383 
3384  if (s->output_frame->buf[0]) {
3385  av_frame_move_ref(rframe, s->output_frame);
3386  *got_output = 1;
3387  }
3388 
3389  return avpkt->size;
3390 }
3391 
3393 {
3394  int ret;
3395 
3396  ret = ff_thread_ref_frame(&dst->tf, &src->tf);
3397  if (ret < 0)
3398  return ret;
3399 
3400  if (src->needs_fg) {
3401  ret = av_frame_ref(dst->frame_grain, src->frame_grain);
3402  if (ret < 0)
3403  return ret;
3404  dst->needs_fg = 1;
3405  }
3406 
3407  dst->tab_mvf_buf = av_buffer_ref(src->tab_mvf_buf);
3408  if (!dst->tab_mvf_buf)
3409  goto fail;
3410  dst->tab_mvf = src->tab_mvf;
3411 
3412  dst->rpl_tab_buf = av_buffer_ref(src->rpl_tab_buf);
3413  if (!dst->rpl_tab_buf)
3414  goto fail;
3415  dst->rpl_tab = src->rpl_tab;
3416 
3417  dst->rpl = ff_refstruct_ref(src->rpl);
3418  dst->nb_rpl_elems = src->nb_rpl_elems;
3419 
3420  dst->poc = src->poc;
3421  dst->ctb_count = src->ctb_count;
3422  dst->flags = src->flags;
3423  dst->sequence = src->sequence;
3424 
3426  src->hwaccel_picture_private);
3427 
3428  return 0;
3429 fail:
3430  ff_hevc_unref_frame(dst, ~0);
3431  return AVERROR(ENOMEM);
3432 }
3433 
3435 {
3436  HEVCContext *s = avctx->priv_data;
3437  int i;
3438 
3439  pic_arrays_free(s);
3440 
3441  ff_dovi_ctx_unref(&s->dovi_ctx);
3442  av_buffer_unref(&s->rpu_buf);
3443 
3444  av_freep(&s->md5_ctx);
3445 
3446  for (i = 0; i < 3; i++) {
3447  av_freep(&s->sao_pixel_buffer_h[i]);
3448  av_freep(&s->sao_pixel_buffer_v[i]);
3449  }
3450  av_frame_free(&s->output_frame);
3451 
3452  for (i = 0; i < FF_ARRAY_ELEMS(s->DPB); i++) {
3453  ff_hevc_unref_frame(&s->DPB[i], ~0);
3454  av_frame_free(&s->DPB[i].frame);
3455  av_frame_free(&s->DPB[i].frame_grain);
3456  }
3457 
3458  ff_hevc_ps_uninit(&s->ps);
3459 
3460  av_freep(&s->sh.entry_point_offset);
3461  av_freep(&s->sh.offset);
3462  av_freep(&s->sh.size);
3463 
3464  if (s->HEVClcList) {
3465  for (i = 1; i < s->threads_number; i++) {
3466  av_freep(&s->HEVClcList[i]);
3467  }
3468  }
3469  av_freep(&s->HEVClc);
3470  av_freep(&s->HEVClcList);
3471 
3472  ff_h2645_packet_uninit(&s->pkt);
3473 
3474  ff_hevc_reset_sei(&s->sei);
3475 
3476  return 0;
3477 }
3478 
3480 {
3481  HEVCContext *s = avctx->priv_data;
3482  int i;
3483 
3484  s->avctx = avctx;
3485 
3486  s->HEVClc = av_mallocz(sizeof(HEVCLocalContext));
3487  s->HEVClcList = av_mallocz(sizeof(HEVCLocalContext*) * s->threads_number);
3488  if (!s->HEVClc || !s->HEVClcList)
3489  return AVERROR(ENOMEM);
3490  s->HEVClc->parent = s;
3491  s->HEVClc->logctx = avctx;
3492  s->HEVClc->common_cabac_state = &s->cabac;
3493  s->HEVClcList[0] = s->HEVClc;
3494 
3495  s->output_frame = av_frame_alloc();
3496  if (!s->output_frame)
3497  return AVERROR(ENOMEM);
3498 
3499  for (i = 0; i < FF_ARRAY_ELEMS(s->DPB); i++) {
3500  s->DPB[i].frame = av_frame_alloc();
3501  if (!s->DPB[i].frame)
3502  return AVERROR(ENOMEM);
3503  s->DPB[i].tf.f = s->DPB[i].frame;
3504 
3505  s->DPB[i].frame_grain = av_frame_alloc();
3506  if (!s->DPB[i].frame_grain)
3507  return AVERROR(ENOMEM);
3508  }
3509 
3510  s->max_ra = INT_MAX;
3511 
3512  s->md5_ctx = av_md5_alloc();
3513  if (!s->md5_ctx)
3514  return AVERROR(ENOMEM);
3515 
3516  ff_bswapdsp_init(&s->bdsp);
3517 
3518  s->dovi_ctx.logctx = avctx;
3519  s->eos = 0;
3520 
3521  ff_hevc_reset_sei(&s->sei);
3522 
3523  return 0;
3524 }
3525 
3526 #if HAVE_THREADS
3527 static int hevc_update_thread_context(AVCodecContext *dst,
3528  const AVCodecContext *src)
3529 {
3530  HEVCContext *s = dst->priv_data;
3531  HEVCContext *s0 = src->priv_data;
3532  int i, ret;
3533 
3534  for (i = 0; i < FF_ARRAY_ELEMS(s->DPB); i++) {
3535  ff_hevc_unref_frame(&s->DPB[i], ~0);
3536  if (s0->DPB[i].frame->buf[0]) {
3537  ret = hevc_ref_frame(&s->DPB[i], &s0->DPB[i]);
3538  if (ret < 0)
3539  return ret;
3540  }
3541  }
3542 
3543  if (s->ps.sps != s0->ps.sps)
3544  s->ps.sps = NULL;
3545  for (int i = 0; i < FF_ARRAY_ELEMS(s->ps.vps_list); i++)
3546  ff_refstruct_replace(&s->ps.vps_list[i], s0->ps.vps_list[i]);
3547 
3548  for (int i = 0; i < FF_ARRAY_ELEMS(s->ps.sps_list); i++)
3549  ff_refstruct_replace(&s->ps.sps_list[i], s0->ps.sps_list[i]);
3550 
3551  for (int i = 0; i < FF_ARRAY_ELEMS(s->ps.pps_list); i++)
3552  ff_refstruct_replace(&s->ps.pps_list[i], s0->ps.pps_list[i]);
3553 
3554  if (s->ps.sps != s0->ps.sps)
3555  if ((ret = set_sps(s, s0->ps.sps, src->pix_fmt)) < 0)
3556  return ret;
3557 
3558  s->seq_decode = s0->seq_decode;
3559  s->seq_output = s0->seq_output;
3560  s->pocTid0 = s0->pocTid0;
3561  s->max_ra = s0->max_ra;
3562  s->eos = s0->eos;
3563  s->no_rasl_output_flag = s0->no_rasl_output_flag;
3564 
3565  s->is_nalff = s0->is_nalff;
3566  s->nal_length_size = s0->nal_length_size;
3567 
3568  s->threads_number = s0->threads_number;
3569  s->threads_type = s0->threads_type;
3570 
3571  s->film_grain_warning_shown = s0->film_grain_warning_shown;
3572 
3573  if (s0->eos) {
3574  s->seq_decode = (s->seq_decode + 1) & HEVC_SEQUENCE_COUNTER_MASK;
3575  s->max_ra = INT_MAX;
3576  }
3577 
3578  ret = ff_h2645_sei_ctx_replace(&s->sei.common, &s0->sei.common);
3579  if (ret < 0)
3580  return ret;
3581 
3582  ret = av_buffer_replace(&s->sei.common.dynamic_hdr_plus.info,
3583  s0->sei.common.dynamic_hdr_plus.info);
3584  if (ret < 0)
3585  return ret;
3586 
3587  ret = av_buffer_replace(&s->rpu_buf, s0->rpu_buf);
3588  if (ret < 0)
3589  return ret;
3590 
3591  ff_dovi_ctx_replace(&s->dovi_ctx, &s0->dovi_ctx);
3592 
3593  ret = av_buffer_replace(&s->sei.common.dynamic_hdr_vivid.info,
3594  s0->sei.common.dynamic_hdr_vivid.info);
3595  if (ret < 0)
3596  return ret;
3597 
3598  s->sei.common.frame_packing = s0->sei.common.frame_packing;
3599  s->sei.common.display_orientation = s0->sei.common.display_orientation;
3600  s->sei.common.alternative_transfer = s0->sei.common.alternative_transfer;
3601  s->sei.common.mastering_display = s0->sei.common.mastering_display;
3602  s->sei.common.content_light = s0->sei.common.content_light;
3603 
3605  if (ret < 0)
3606  return ret;
3607 
3608  return 0;
3609 }
3610 #endif
3611 
3613 {
3614  HEVCContext *s = avctx->priv_data;
3615  int ret;
3616 
3617  if (avctx->active_thread_type & FF_THREAD_SLICE) {
3618  s->threads_number = avctx->thread_count;
3620  if (ret < 0)
3621  return ret;
3622  } else
3623  s->threads_number = 1;
3624 
3625  if((avctx->active_thread_type & FF_THREAD_FRAME) && avctx->thread_count > 1)
3626  s->threads_type = FF_THREAD_FRAME;
3627  else
3628  s->threads_type = FF_THREAD_SLICE;
3629 
3630  ret = hevc_init_context(avctx);
3631  if (ret < 0)
3632  return ret;
3633 
3634  s->enable_parallel_tiles = 0;
3635  s->sei.picture_timing.picture_struct = 0;
3636  s->eos = 1;
3637 
3638  atomic_init(&s->wpp_err, 0);
3639 
3640  if (!avctx->internal->is_copy) {
3641  const AVPacketSideData *sd;
3642 
3643  if (avctx->extradata_size > 0 && avctx->extradata) {
3644  ret = hevc_decode_extradata(s, avctx->extradata, avctx->extradata_size, 1);
3645  if (ret < 0) {
3646  return ret;
3647  }
3648  }
3649 
3651  if (sd && sd->size > 0)
3653  }
3654 
3655  return 0;
3656 }
3657 
3659 {
3660  HEVCContext *s = avctx->priv_data;
3662  ff_hevc_reset_sei(&s->sei);
3663  ff_dovi_ctx_flush(&s->dovi_ctx);
3664  av_buffer_unref(&s->rpu_buf);
3665  s->max_ra = INT_MAX;
3666  s->eos = 1;
3667 
3668  if (FF_HW_HAS_CB(avctx, flush))
3669  FF_HW_SIMPLE_CALL(avctx, flush);
3670 }
3671 
3672 #define OFFSET(x) offsetof(HEVCContext, x)
3673 #define PAR (AV_OPT_FLAG_DECODING_PARAM | AV_OPT_FLAG_VIDEO_PARAM)
3674 
3675 static const AVOption options[] = {
3676  { "apply_defdispwin", "Apply default display window from VUI", OFFSET(apply_defdispwin),
3677  AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, PAR },
3678  { "strict-displaywin", "stricly apply default display window size", OFFSET(apply_defdispwin),
3679  AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, PAR },
3680  { NULL },
3681 };
3682 
3683 static const AVClass hevc_decoder_class = {
3684  .class_name = "HEVC decoder",
3685  .item_name = av_default_item_name,
3686  .option = options,
3687  .version = LIBAVUTIL_VERSION_INT,
3688 };
3689 
3691  .p.name = "hevc",
3692  CODEC_LONG_NAME("HEVC (High Efficiency Video Coding)"),
3693  .p.type = AVMEDIA_TYPE_VIDEO,
3694  .p.id = AV_CODEC_ID_HEVC,
3695  .priv_data_size = sizeof(HEVCContext),
3696  .p.priv_class = &hevc_decoder_class,
3697  .init = hevc_decode_init,
3698  .close = hevc_decode_free,
3700  .flush = hevc_decode_flush,
3701  UPDATE_THREAD_CONTEXT(hevc_update_thread_context),
3702  .p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DELAY |
3704  .caps_internal = FF_CODEC_CAP_EXPORTS_CROPPING |
3706  .p.profiles = NULL_IF_CONFIG_SMALL(ff_hevc_profiles),
3707  .hw_configs = (const AVCodecHWConfigInternal *const []) {
3708 #if CONFIG_HEVC_DXVA2_HWACCEL
3709  HWACCEL_DXVA2(hevc),
3710 #endif
3711 #if CONFIG_HEVC_D3D11VA_HWACCEL
3712  HWACCEL_D3D11VA(hevc),
3713 #endif
3714 #if CONFIG_HEVC_D3D11VA2_HWACCEL
3715  HWACCEL_D3D11VA2(hevc),
3716 #endif
3717 #if CONFIG_HEVC_NVDEC_HWACCEL
3718  HWACCEL_NVDEC(hevc),
3719 #endif
3720 #if CONFIG_HEVC_VAAPI_HWACCEL
3721  HWACCEL_VAAPI(hevc),
3722 #endif
3723 #if CONFIG_HEVC_VDPAU_HWACCEL
3724  HWACCEL_VDPAU(hevc),
3725 #endif
3726 #if CONFIG_HEVC_VIDEOTOOLBOX_HWACCEL
3727  HWACCEL_VIDEOTOOLBOX(hevc),
3728 #endif
3729 #if CONFIG_HEVC_VULKAN_HWACCEL
3730  HWACCEL_VULKAN(hevc),
3731 #endif
3732  NULL
3733  },
3734 };
error
static void error(const char *err)
Definition: target_bsf_fuzzer.c:31
ff_hevc_sao_offset_sign_decode
int ff_hevc_sao_offset_sign_decode(HEVCLocalContext *lc)
Definition: hevc_cabac.c:606
ff_get_coded_side_data
const AVPacketSideData * ff_get_coded_side_data(const AVCodecContext *avctx, enum AVPacketSideDataType type)
Get side data of the given type from a decoding context.
Definition: decode.c:1402
verify_md5
static int verify_md5(HEVCContext *s, AVFrame *frame)
Definition: hevcdec.c:3223
hwconfig.h
MD5_PRI
#define MD5_PRI
HEVC_NAL_RADL_N
@ HEVC_NAL_RADL_N
Definition: hevc.h:35
AVCodecContext::hwaccel
const struct AVHWAccel * hwaccel
Hardware accelerator in use.
Definition: avcodec.h:1435
SliceHeader::beta_offset
int beta_offset
beta_offset_div2 * 2
Definition: hevcdec.h:304
bswapdsp.h
L1
F H1 F F H1 F F F F H1<-F-------F-------F v v v H2 H3 H2 ^ ^ ^ F-------F-------F-> H1<-F-------F-------F|||||||||F H1 F|||||||||F H1 Funavailable fullpel samples(outside the picture for example) shall be equalto the closest available fullpel sampleSmaller pel interpolation:--------------------------if diag_mc is set then points which lie on a line between 2 vertically, horizontally or diagonally adjacent halfpel points shall be interpolatedlinearly with rounding to nearest and halfway values rounded up.points which lie on 2 diagonals at the same time should only use the onediagonal not containing the fullpel point F--> O q O<--h1-> O q O<--F v \/v \/v O O O O O O O|/|\|q q q q q|/|\|O O O O O O O ^/\ ^/\ ^ h2--> O q O<--h3-> O q O<--h2 v \/v \/v O O O O O O O|\|/|q q q q q|\|/|O O O O O O O ^/\ ^/\ ^ F--> O q O<--h1-> O q O<--Fthe remaining points shall be bilinearly interpolated from theup to 4 surrounding halfpel and fullpel points, again rounding should be tonearest and halfway values rounded upcompliant Snow decoders MUST support 1-1/8 pel luma and 1/2-1/16 pel chromainterpolation at leastOverlapped block motion compensation:-------------------------------------FIXMELL band prediction:===================Each sample in the LL0 subband is predicted by the median of the left, top andleft+top-topleft samples, samples outside the subband shall be considered tobe 0. To reverse this prediction in the decoder apply the following.for(y=0;y< height;y++){ for(x=0;x< width;x++){ sample[y][x]+=median(sample[y-1][x], sample[y][x-1], sample[y-1][x]+sample[y][x-1]-sample[y-1][x-1]);}}sample[-1][ *]=sample[ *][-1]=0;width, height here are the width and height of the LL0 subband not of the finalvideoDequantization:===============FIXMEWavelet Transform:==================Snow supports 2 wavelet transforms, the symmetric biorthogonal 5/3 integertransform and an integer approximation of the symmetric biorthogonal 9/7daubechies wavelet.2D IDWT(inverse discrete wavelet transform) --------------------------------------------The 2D IDWT applies a 2D filter recursively, each time combining the4 lowest frequency subbands into a single subband until only 1 subbandremains.The 2D filter is done by first applying a 1D filter in the vertical directionand then applying it in the horizontal one. --------------- --------------- --------------- ---------------|LL0|HL0|||||||||||||---+---|HL1||L0|H0|HL1||LL1|HL1|||||LH0|HH0|||||||||||||-------+-------|-> L1 H1 LH1 HH1 LH1 HH1 LH1 HH1 L1
Definition: snow.txt:554
av_buffer_pool_init
AVBufferPool * av_buffer_pool_init(size_t size, AVBufferRef *(*alloc)(size_t size))
Allocate and initialize a buffer pool.
Definition: buffer.c:280
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:186
AV_TIMECODE_STR_SIZE
#define AV_TIMECODE_STR_SIZE
Definition: timecode.h:33
HEVCLocalContext
Definition: hevcdec.h:436
AV_PIX_FMT_CUDA
@ AV_PIX_FMT_CUDA
HW acceleration through CUDA.
Definition: pixfmt.h:253
HEVCFrame::flags
uint8_t flags
A combination of HEVC_FRAME_FLAG_*.
Definition: hevcdec.h:433
SliceHeader::slice_act_cr_qp_offset
int slice_act_cr_qp_offset
Definition: hevcdec.h:300
HWACCEL_MAX
#define HWACCEL_MAX
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
HEVCFrame::tf
ThreadFrame tf
Definition: hevcdec.h:409
ff_hevc_hls_residual_coding
void ff_hevc_hls_residual_coding(HEVCLocalContext *lc, int x0, int y0, int log2_trafo_size, enum ScanType scan_idx, int c_idx)
Definition: hevc_cabac.c:1039
ff_hevc_skip_flag_decode
int ff_hevc_skip_flag_decode(HEVCLocalContext *lc, int x0, int y0, int x_cb, int y_cb)
Definition: hevc_cabac.c:628
AV_EF_EXPLODE
#define AV_EF_EXPLODE
abort decoding on minor error detection
Definition: defs.h:51
ff_hevc_cu_chroma_qp_offset_idx
int ff_hevc_cu_chroma_qp_offset_idx(HEVCLocalContext *lc)
Definition: hevc_cabac.c:681
HEVC_MAX_REFS
@ HEVC_MAX_REFS
Definition: hevc.h:119
av_clip
#define av_clip
Definition: common.h:96
atomic_store
#define atomic_store(object, desired)
Definition: stdatomic.h:85
ff_hevc_pcm_flag_decode
int ff_hevc_pcm_flag_decode(HEVCLocalContext *lc)
Definition: hevc_cabac.c:755
set_deblocking_bypass
static void set_deblocking_bypass(const HEVCContext *s, int x0, int y0, int log2_cb_size)
Definition: hevcdec.c:1348
FF_CODEC_CAP_INIT_CLEANUP
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: codec_internal.h:42
ff_refstruct_ref
void * ff_refstruct_ref(void *obj)
Create a new reference to an object managed via this API, i.e.
Definition: refstruct.c:136
get_bits_left
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:694
ff_hevc_pred_init
void ff_hevc_pred_init(HEVCPredContext *hpc, int bit_depth)
Definition: hevcpred.c:43
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
decode_slice
static int decode_slice(AVCodecContext *c, void *arg)
Definition: ffv1dec.c:255
opt.h
ff_dovi_ctx_unref
void ff_dovi_ctx_unref(DOVIContext *s)
Completely reset a DOVIContext, preserving only logctx.
Definition: dovi_rpu.c:44
chroma_mc_uni
static void chroma_mc_uni(HEVCLocalContext *lc, uint8_t *dst0, ptrdiff_t dststride, const uint8_t *src0, ptrdiff_t srcstride, int reflist, int x_off, int y_off, int block_w, int block_h, const struct MvField *current_mv, int chroma_weight, int chroma_offset)
8.5.3.2.2.2 Chroma sample uniprediction interpolation process
Definition: hevcdec.c:1697
hevc_decode_flush
static void hevc_decode_flush(AVCodecContext *avctx)
Definition: hevcdec.c:3658
AVCodecContext::colorspace
enum AVColorSpace colorspace
YUV colorspace type.
Definition: avcodec.h:1029
PART_NxN
@ PART_NxN
Definition: hevcdec.h:145
decode_nal_unit
static int decode_nal_unit(HEVCContext *s, const H2645NAL *nal)
Definition: hevcdec.c:2944
SliceHeader::slice_act_y_qp_offset
int slice_act_y_qp_offset
Definition: hevcdec.h:298
ff_get_format
int ff_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt)
Select the (possibly hardware accelerated) pixel format.
Definition: decode.c:1264
out
FILE * out
Definition: movenc.c:54
av_frame_get_side_data
AVFrameSideData * av_frame_get_side_data(const AVFrame *frame, enum AVFrameSideDataType type)
Definition: frame.c:824
HEVCWindow::bottom_offset
unsigned int bottom_offset
Definition: hevc_ps.h:90
av_frame_new_side_data
AVFrameSideData * av_frame_new_side_data(AVFrame *frame, enum AVFrameSideDataType type, size_t size)
Add a new side data to a frame.
Definition: frame.c:812
SAO_BAND
@ SAO_BAND
Definition: hevcdec.h:211
ff_hevc_profiles
const AVProfile ff_hevc_profiles[]
Definition: profiles.c:95
ff_hevc_pred_mode_decode
int ff_hevc_pred_mode_decode(HEVCLocalContext *lc)
Definition: hevc_cabac.c:692
AV_PKT_DATA_NEW_EXTRADATA
@ AV_PKT_DATA_NEW_EXTRADATA
The AV_PKT_DATA_NEW_EXTRADATA is used to notify the codec or the format that the extradata buffer was...
Definition: packet.h:56
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2964
av_clip_uintp2
#define av_clip_uintp2
Definition: common.h:120
ff_h2645_sei_to_frame
int ff_h2645_sei_to_frame(AVFrame *frame, H2645SEI *sei, enum AVCodecID codec_id, AVCodecContext *avctx, const H2645VUI *vui, unsigned bit_depth_luma, unsigned bit_depth_chroma, int seed)
Definition: h2645_sei.c:511
src1
const pixel * src1
Definition: h264pred_template.c:421
set_ct_depth
static av_always_inline void set_ct_depth(const HEVCContext *s, int x0, int y0, int log2_cb_size, int ct_depth)
Definition: hevcdec.c:2117
AVCodecContext::err_recognition
int err_recognition
Error recognition; may misdetect some more or less valid parts as errors.
Definition: avcodec.h:1412
get_bits_long
static unsigned int get_bits_long(GetBitContext *s, int n)
Read 0-32 bits.
Definition: get_bits.h:421
HEVCLocalContext::ctb_up_flag
uint8_t ctb_up_flag
Definition: hevcdec.h:469
HEVCFrame::needs_fg
int needs_fg
Definition: hevcdec.h:410
mv
static const int8_t mv[256][2]
Definition: 4xm.c:80
SliceHeader::num_entry_point_offsets
int num_entry_point_offsets
Definition: hevcdec.h:313
HEVC_NAL_STSA_N
@ HEVC_NAL_STSA_N
Definition: hevc.h:33
HEVCFrame::frame_grain
AVFrame * frame_grain
Definition: hevcdec.h:408
AV_FRAME_DATA_FILM_GRAIN_PARAMS
@ AV_FRAME_DATA_FILM_GRAIN_PARAMS
Film grain parameters for a frame, described by AVFilmGrainParams.
Definition: frame.h:184
PART_2NxnU
@ PART_2NxnU
Definition: hevcdec.h:146
av_unused
#define av_unused
Definition: attributes.h:131
ff_hevc_luma_mv_mvp_mode
void ff_hevc_luma_mv_mvp_mode(HEVCLocalContext *lc, int x0, int y0, int nPbW, int nPbH, int log2_cb_size, int part_idx, int merge_idx, MvField *mv, int mvp_lx_flag, int LX)
Definition: hevc_mvs.c:583
AV_FRAME_DATA_S12M_TIMECODE
@ AV_FRAME_DATA_S12M_TIMECODE
Timecode which conforms to SMPTE ST 12-1.
Definition: frame.h:152
av_mod_uintp2
#define av_mod_uintp2
Definition: common.h:123
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:100
luma_intra_pred_mode
static int luma_intra_pred_mode(HEVCLocalContext *lc, int x0, int y0, int pu_size, int prev_intra_luma_pred_flag)
8.4.1
Definition: hevcdec.c:2037
H2645NAL::nuh_layer_id
int nuh_layer_id
Definition: h2645_parse.h:67
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:340
pixdesc.h
HEVCFrame::tab_mvf
MvField * tab_mvf
Definition: hevcdec.h:411
AVCodecContext::color_trc
enum AVColorTransferCharacteristic color_trc
Color Transfer Characteristic.
Definition: avcodec.h:1022
TransformUnit::cu_qp_delta
int cu_qp_delta
Definition: hevcdec.h:378
AVPacketSideData
This structure stores auxiliary information for decoding, presenting, or otherwise processing the cod...
Definition: packet.h:342
HEVC_NAL_TSA_N
@ HEVC_NAL_TSA_N
Definition: hevc.h:31
ff_hevc_cu_transquant_bypass_flag_decode
int ff_hevc_cu_transquant_bypass_flag_decode(HEVCLocalContext *lc)
Definition: hevc_cabac.c:623
w
uint8_t w
Definition: llviddspenc.c:38
HWACCEL_DXVA2
#define HWACCEL_DXVA2(codec)
Definition: hwconfig.h:64
AVCOL_RANGE_JPEG
@ AVCOL_RANGE_JPEG
Full range content.
Definition: pixfmt.h:673
HEVCFrame::hwaccel_picture_private
void * hwaccel_picture_private
RefStruct reference.
Definition: hevcdec.h:422
internal.h
AVPacket::data
uint8_t * data
Definition: packet.h:491
PAR
#define PAR
Definition: hevcdec.c:3673
INTRA_DC
@ INTRA_DC
Definition: hevcdec.h:173
AVOption
AVOption.
Definition: opt.h:251
HWACCEL_D3D11VA2
#define HWACCEL_D3D11VA2(codec)
Definition: hwconfig.h:66
ff_h2645_packet_uninit
void ff_h2645_packet_uninit(H2645Packet *pkt)
Free all the allocated memory in the packet.
Definition: h2645_parse.c:597
AVCOL_TRC_UNSPECIFIED
@ AVCOL_TRC_UNSPECIFIED
Definition: pixfmt.h:573
hls_decode_entry
static int hls_decode_entry(AVCodecContext *avctxt, void *arg)
Definition: hevcdec.c:2521
hevc_decode_free
static av_cold int hevc_decode_free(AVCodecContext *avctx)
Definition: hevcdec.c:3434
data
const char data[16]
Definition: mxf.c:148
Mv::y
int16_t y
vertical component of motion vector
Definition: hevcdec.h:349
AV_FRAME_DATA_DOVI_RPU_BUFFER
@ AV_FRAME_DATA_DOVI_RPU_BUFFER
Dolby Vision RPU raw data, suitable for passing to x265 or other libraries.
Definition: frame.h:197
AV_PIX_FMT_YUV420P10
#define AV_PIX_FMT_YUV420P10
Definition: pixfmt.h:468
SAO_EDGE
@ SAO_EDGE
Definition: hevcdec.h:212
atomic_int
intptr_t atomic_int
Definition: stdatomic.h:55
SliceHeader::slice_temporal_mvp_enabled_flag
uint8_t slice_temporal_mvp_enabled_flag
Definition: hevcdec.h:280
MvField::mv
Mv mv[2]
Definition: hevcdec.h:353
AV_PIX_FMT_D3D11VA_VLD
@ AV_PIX_FMT_D3D11VA_VLD
HW decoding through Direct3D11 via old API, Picture.data[3] contains a ID3D11VideoDecoderOutputView p...
Definition: pixfmt.h:247
TransformUnit::is_cu_qp_delta_coded
uint8_t is_cu_qp_delta_coded
Definition: hevcdec.h:386
FFCodec
Definition: codec_internal.h:127
HEVC_NAL_RASL_N
@ HEVC_NAL_RASL_N
Definition: hevc.h:37
ff_hevc_intra_chroma_pred_mode_decode
int ff_hevc_intra_chroma_pred_mode_decode(HEVCLocalContext *lc)
Definition: hevc_cabac.c:783
HEVC_NAL_STSA_R
@ HEVC_NAL_STSA_R
Definition: hevc.h:34
MODE_INTRA
@ MODE_INTRA
Definition: hevcdec.h:154
FF_HW_SIMPLE_CALL
#define FF_HW_SIMPLE_CALL(avctx, function)
Definition: hwaccel_internal.h:174
HEVC_NAL_BLA_W_RADL
@ HEVC_NAL_BLA_W_RADL
Definition: hevc.h:46
SliceHeader::slice_loop_filter_across_slices_enabled_flag
uint8_t slice_loop_filter_across_slices_enabled_flag
Definition: hevcdec.h:289
SAOParams::offset_sign
int offset_sign[3][4]
sao_offset_sign
Definition: hevcdsp.h:36
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
av_buffer_ref
AVBufferRef * av_buffer_ref(const AVBufferRef *buf)
Create a new reference to an AVBuffer.
Definition: buffer.c:103
export_stream_params
static void export_stream_params(HEVCContext *s, const HEVCSPS *sps)
Definition: hevcdec.c:323
HEVCLocalContext::ctb_up_left_flag
uint8_t ctb_up_left_flag
Definition: hevcdec.h:471
H2645NAL::temporal_id
int temporal_id
HEVC only, nuh_temporal_id_plus_1 - 1.
Definition: h2645_parse.h:62
av_timecode_get_smpte
uint32_t av_timecode_get_smpte(AVRational rate, int drop, int hh, int mm, int ss, int ff)
Convert sei info to SMPTE 12M binary representation.
Definition: timecode.c:69
RefPicList
Definition: hevcdec.h:241
av_strlcatf
size_t av_strlcatf(char *dst, size_t size, const char *fmt,...)
Definition: avstring.c:103
init_get_bits
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
Definition: get_bits.h:514
thread.h
ff_thread_await_progress
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before ff_thread_await_progress() has been called on them. reget_buffer() and buffer age optimizations no longer work. *The contents of buffers must not be written to after ff_thread_report_progress() has been called on them. This includes draw_edges(). Porting codecs to frame threading
OFFSET
#define OFFSET(x)
Definition: hevcdec.c:3672
PF_INTRA
@ PF_INTRA
Definition: hevcdec.h:165
AV_PIX_FMT_VULKAN
@ AV_PIX_FMT_VULKAN
Vulkan hardware images.
Definition: pixfmt.h:376
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:361
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:30
ff_thread_get_buffer
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call have so the codec calls ff_thread_report set FF_CODEC_CAP_ALLOCATE_PROGRESS in FFCodec caps_internal and use ff_thread_get_buffer() to allocate frames. Otherwise decode directly into the user-supplied frames. Call ff_thread_report_progress() after some part of the current picture has decoded. A good place to put this is where draw_horiz_band() is called - add this if it isn 't called anywhere
ff_hevc_cu_qp_delta_sign_flag
int ff_hevc_cu_qp_delta_sign_flag(HEVCLocalContext *lc)
Definition: hevc_cabac.c:671
hls_decode_neighbour
static void hls_decode_neighbour(HEVCLocalContext *lc, int x_ctb, int y_ctb, int ctb_addr_ts)
Definition: hevcdec.c:2472
MODE_SKIP
@ MODE_SKIP
Definition: hevcdec.h:155
HEVCLocalContext::end_of_tiles_x
int end_of_tiles_x
Definition: hevcdec.h:472
AV_PKT_DATA_DOVI_CONF
@ AV_PKT_DATA_DOVI_CONF
DOVI configuration ref: dolby-vision-bitstreams-within-the-iso-base-media-file-format-v2....
Definition: packet.h:284
CodingUnit::x
int x
Definition: hevcdec.h:335
skip_bits
static void skip_bits(GetBitContext *s, int n)
Definition: get_bits.h:381
BOUNDARY_LEFT_TILE
#define BOUNDARY_LEFT_TILE
Definition: hevcdec.h:486
AVCodecContext::framerate
AVRational framerate
Definition: avcodec.h:1803
golomb.h
exp golomb vlc stuff
AVCodecInternal::is_copy
int is_copy
When using frame-threaded decoding, this field is set for the first worker thread (e....
Definition: internal.h:57
AVPacketSideData::size
size_t size
Definition: packet.h:344
PART_2Nx2N
@ PART_2Nx2N
Definition: hevcdec.h:142
get_bits
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:335
SET_SAO
#define SET_SAO(elem, value)
Definition: hevcdec.c:1039
HEVCLocalContext::ctb_up_right_flag
uint8_t ctb_up_right_flag
Definition: hevcdec.h:470
FFCodec::p
AVCodec p
The public AVCodec.
Definition: codec_internal.h:131
ff_hevc_clear_refs
void ff_hevc_clear_refs(HEVCContext *s)
Mark all frames in DPB as unused for reference.
Definition: hevc_refs.c:68
PRED_BI
@ PRED_BI
Definition: hevcdec.h:161
ff_hevc_log2_res_scale_abs
int ff_hevc_log2_res_scale_abs(HEVCLocalContext *lc, int idx)
Definition: hevc_cabac.c:909
ff_hevc_hls_mvd_coding
void ff_hevc_hls_mvd_coding(HEVCLocalContext *lc, int x0, int y0, int log2_cb_size)
Definition: hevc_cabac.c:1549
luma_mc_uni
static void luma_mc_uni(HEVCLocalContext *lc, uint8_t *dst, ptrdiff_t dststride, const AVFrame *ref, const Mv *mv, int x_off, int y_off, int block_w, int block_h, int luma_weight, int luma_offset)
8.5.3.2.2.1 Luma sample unidirectional interpolation process
Definition: hevcdec.c:1543
av_ceil_log2
#define av_ceil_log2
Definition: common.h:93
fail
#define fail()
Definition: checkasm.h:138
PredictionUnit::intra_pred_mode_c
uint8_t intra_pred_mode_c[4]
Definition: hevcdec.h:373
AVCodecContext::thread_count
int thread_count
thread count is used to decide how many independent tasks should be passed to execute()
Definition: avcodec.h:1532
md5
struct AVMD5 * md5
Definition: movenc.c:56
InterPredIdc
InterPredIdc
Definition: hevcdec.h:158
MODE_INTER
@ MODE_INTER
Definition: hevcdec.h:153
ff_hevc_hls_filter
void ff_hevc_hls_filter(HEVCLocalContext *lc, int x, int y, int ctb_size)
Definition: hevc_filter.c:851
timecode.h
HEVCWindow::left_offset
unsigned int left_offset
Definition: hevc_ps.h:87
GetBitContext
Definition: get_bits.h:108
HEVCLocalContext::pu
PredictionUnit pu
Definition: hevcdec.h:482
ff_hevc_cu_chroma_qp_offset_flag
int ff_hevc_cu_chroma_qp_offset_flag(HEVCLocalContext *lc)
Definition: hevc_cabac.c:676
decode_lt_rps
static int decode_lt_rps(HEVCContext *s, LongTermRPS *rps, GetBitContext *gb)
Definition: hevcdec.c:266
TransformUnit::res_scale_val
int res_scale_val
Definition: hevcdec.h:380
SliceHeader::short_term_ref_pic_set_size
int short_term_ref_pic_set_size
Definition: hevcdec.h:271
hevc_decoder_class
static const AVClass hevc_decoder_class
Definition: hevcdec.c:3683
val
static double val(void *priv, double ch)
Definition: aeval.c:78
HWACCEL_VDPAU
#define HWACCEL_VDPAU(codec)
Definition: hwconfig.h:72
ff_videodsp_init
av_cold void ff_videodsp_init(VideoDSPContext *ctx, int bpc)
Definition: videodsp.c:39
ff_hevc_output_frame
int ff_hevc_output_frame(HEVCContext *s, AVFrame *out, int flush)
Find next frame in output order and put a reference to it in frame.
Definition: hevc_refs.c:184
AVCodecContext::coded_height
int coded_height
Definition: avcodec.h:636
SliceHeader::long_term_ref_pic_set_size
int long_term_ref_pic_set_size
Definition: hevcdec.h:274
ss
#define ss(width, name, subs,...)
Definition: cbs_vp9.c:202
CTB
#define CTB(tab, x, y)
Definition: hevcdec.c:1037
av_reduce
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
Definition: rational.c:35
ff_hevc_decode_nal_sei
int ff_hevc_decode_nal_sei(GetBitContext *gb, void *logctx, HEVCSEI *s, const HEVCParamSets *ps, enum HEVCNALUnitType type)
Definition: hevc_sei.c:227
AVRational::num
int num
Numerator.
Definition: rational.h:59
intra_prediction_unit
static void intra_prediction_unit(HEVCLocalContext *lc, int x0, int y0, int log2_cb_size)
Definition: hevcdec.c:2134
refstruct.h
HEVC_NAL_UNSPEC62
@ HEVC_NAL_UNSPEC62
Definition: hevc.h:91
ff_hevc_deblocking_boundary_strengths
void ff_hevc_deblocking_boundary_strengths(HEVCLocalContext *lc, int x0, int y0, int log2_trafo_size)
Definition: hevc_filter.c:723
SliceHeader::slice_segment_addr
unsigned int slice_segment_addr
address (in raster order) of the first block in the current slice
Definition: hevcdec.h:256
hevc_parse.h
MvField::ref_idx
int8_t ref_idx[2]
Definition: hevcdec.h:354
SAOParams::eo_class
int eo_class[3]
sao_eo_class
Definition: hevcdsp.h:40
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:88
AV_PIX_FMT_YUV444P10
#define AV_PIX_FMT_YUV444P10
Definition: pixfmt.h:471
hevc_luma_mv_mvp_mode
static void hevc_luma_mv_mvp_mode(HEVCLocalContext *lc, int x0, int y0, int nPbW, int nPbH, int log2_cb_size, int part_idx, int merge_idx, MvField *mv)
Definition: hevcdec.c:1866
ff_thread_report_progress2
void ff_thread_report_progress2(AVCodecContext *avctx, int field, int thread, int n)
Definition: pthread_slice.c:210
first
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But first
Definition: rate_distortion.txt:12
AVCodecContext::color_primaries
enum AVColorPrimaries color_primaries
Chromaticity coordinates of the source primaries.
Definition: avcodec.h:1015
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
QPEL_EXTRA_AFTER
#define QPEL_EXTRA_AFTER
Definition: hevcdec.h:64
HEVC_NAL_BLA_N_LP
@ HEVC_NAL_BLA_N_LP
Definition: hevc.h:47
SAOParams::type_idx
uint8_t type_idx[3]
sao_type_idx
Definition: hevcdsp.h:44
film_grain_params.h
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
av_cold
#define av_cold
Definition: attributes.h:90
TransformUnit::intra_pred_mode
int intra_pred_mode
Definition: hevcdec.h:383
init_get_bits8
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:545
HEVC_NAL_RADL_R
@ HEVC_NAL_RADL_R
Definition: hevc.h:36
AV_FRAME_FLAG_KEY
#define AV_FRAME_FLAG_KEY
A flag to mark frames that are keyframes.
Definition: frame.h:628
hls_prediction_unit
static void hls_prediction_unit(HEVCLocalContext *lc, int x0, int y0, int nPbW, int nPbH, int log2_cb_size, int partIdx, int idx)
Definition: hevcdec.c:1911
hevc_ref_frame
static int hevc_ref_frame(HEVCFrame *dst, HEVCFrame *src)
Definition: hevcdec.c:3392
ff_thread_report_progress
void ff_thread_report_progress(ThreadFrame *f, int n, int field)
Notify later decoding threads when part of their reference picture is ready.
Definition: pthread_frame.c:589
SliceHeader::cabac_init_flag
uint8_t cabac_init_flag
Definition: hevcdec.h:287
H2645NAL::size
int size
Definition: h2645_parse.h:36
AVCodecContext::extradata_size
int extradata_size
Definition: avcodec.h:543
AVCodecContext::has_b_frames
int has_b_frames
Size of the frame reordering buffer in the decoder.
Definition: avcodec.h:744
hls_transform_unit
static int hls_transform_unit(HEVCLocalContext *lc, int x0, int y0, int xBase, int yBase, int cb_xBase, int cb_yBase, int log2_cb_size, int log2_trafo_size, int blk_idx, int cbf_luma, int *cbf_cb, int *cbf_cr)
Definition: hevcdec.c:1141
width
#define width
FF_CODEC_DECODE_CB
#define FF_CODEC_DECODE_CB(func)
Definition: codec_internal.h:306
QPEL_EXTRA_BEFORE
#define QPEL_EXTRA_BEFORE
Definition: hevcdec.h:63
ff_hevc_rem_intra_luma_pred_mode_decode
int ff_hevc_rem_intra_luma_pred_mode_decode(HEVCLocalContext *lc)
Definition: hevc_cabac.c:773
ff_hevc_sao_merge_flag_decode
int ff_hevc_sao_merge_flag_decode(HEVCLocalContext *lc)
Definition: hevc_cabac.c:571
AV_PIX_FMT_DXVA2_VLD
@ AV_PIX_FMT_DXVA2_VLD
HW decoding through DXVA2, Picture.data[3] contains a LPDIRECT3DSURFACE9 pointer.
Definition: pixfmt.h:127
HEVCLocalContext::parent
const struct HEVCContext * parent
Definition: hevcdec.h:444
s
#define s(width, name)
Definition: cbs_vp9.c:198
ff_thread_await_progress2
void ff_thread_await_progress2(AVCodecContext *avctx, int field, int thread, int shift)
Definition: pthread_slice.c:222
SAO_NOT_APPLIED
@ SAO_NOT_APPLIED
Definition: hevcdec.h:210
AV_PROFILE_HEVC_SCC
#define AV_PROFILE_HEVC_SCC
Definition: defs.h:162
set_sps
static int set_sps(HEVCContext *s, const HEVCSPS *sps, enum AVPixelFormat pix_fmt)
Definition: hevcdec.c:525
AV_ZERO32
#define AV_ZERO32(d)
Definition: intreadwrite.h:627
pix_fmt
static enum AVPixelFormat pix_fmt
Definition: demux_decode.c:41
ff_hevc_nal_is_nonref
static av_always_inline int ff_hevc_nal_is_nonref(enum HEVCNALUnitType type)
Definition: hevcdec.h:668
ff_hevc_set_new_ref
int ff_hevc_set_new_ref(HEVCContext *s, AVFrame **frame, int poc)
Definition: hevc_refs.c:134
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:40
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:304
SliceHeader::slice_rps
ShortTermRPS slice_rps
Definition: hevcdec.h:272
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:201
AVPacketSideData::data
uint8_t * data
Definition: packet.h:343
HEVCFrame::rpl
RefPicListTab * rpl
RefStruct reference.
Definition: hevcdec.h:419
decode.h
IS_IDR
#define IS_IDR(s)
Definition: hevcdec.h:75
H2645NAL::data
const uint8_t * data
Definition: h2645_parse.h:35
ff_hevc_slice_rpl
int ff_hevc_slice_rpl(HEVCContext *s)
Construct the reference picture list(s) for the current slice.
Definition: hevc_refs.c:310
RefPicList::ref
struct HEVCFrame * ref[HEVC_MAX_REFS]
Definition: hevcdec.h:242
H2645NAL::skipped_bytes_pos
int * skipped_bytes_pos
Definition: h2645_parse.h:71
HEVCWindow::top_offset
unsigned int top_offset
Definition: hevc_ps.h:89
HEVC_SLICE_I
@ HEVC_SLICE_I
Definition: hevc.h:98
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
SliceHeader::size
int * size
Definition: hevcdec.h:312
ff_hevc_cabac_init
int ff_hevc_cabac_init(HEVCLocalContext *lc, int ctb_addr_ts)
Definition: hevc_cabac.c:512
ff_hevc_set_neighbour_available
void ff_hevc_set_neighbour_available(HEVCLocalContext *lc, int x0, int y0, int nPbW, int nPbH)
Definition: hevc_mvs.c:43
SliceHeader::collocated_list
uint8_t collocated_list
Definition: hevcdec.h:290
atomic_load
#define atomic_load(object)
Definition: stdatomic.h:93
AVCOL_PRI_UNSPECIFIED
@ AVCOL_PRI_UNSPECIFIED
Definition: pixfmt.h:548
AVDISCARD_BIDIR
@ AVDISCARD_BIDIR
discard all bidirectional frames
Definition: defs.h:216
get_se_golomb
static int get_se_golomb(GetBitContext *gb)
read signed exp golomb code.
Definition: golomb.h:239
INTRA_ANGULAR_26
@ INTRA_ANGULAR_26
Definition: hevcdec.h:198
H2645NAL::type
int type
NAL unit type.
Definition: h2645_parse.h:52
CodingUnit::max_trafo_depth
uint8_t max_trafo_depth
MaxTrafoDepth.
Definition: hevcdec.h:343
AV_FRAME_DATA_DYNAMIC_HDR_VIVID
@ AV_FRAME_DATA_DYNAMIC_HDR_VIVID
HDR Vivid dynamic metadata associated with a video frame.
Definition: frame.h:211
CODEC_LONG_NAME
#define CODEC_LONG_NAME(str)
Definition: codec_internal.h:272
SliceHeader::slice_ctb_addr_rs
int slice_ctb_addr_rs
Definition: hevcdec.h:331
frame
static AVFrame * frame
Definition: demux_decode.c:54
ff_thread_ref_frame
int ff_thread_ref_frame(ThreadFrame *dst, const ThreadFrame *src)
Definition: utils.c:871
FF_CODEC_PROPERTY_FILM_GRAIN
#define FF_CODEC_PROPERTY_FILM_GRAIN
Definition: avcodec.h:1907
arg
const char * arg
Definition: jacosubdec.c:67
if
if(ret)
Definition: filter_design.txt:179
HEVC_NAL_IDR_N_LP
@ HEVC_NAL_IDR_N_LP
Definition: hevc.h:49
SliceHeader::pic_output_flag
uint8_t pic_output_flag
Definition: hevcdec.h:266
ff_hevc_cbf_cb_cr_decode
int ff_hevc_cbf_cb_cr_decode(HEVCLocalContext *lc, int trafo_depth)
Definition: hevc_cabac.c:884
hls_slice_data_wpp
static int hls_slice_data_wpp(HEVCContext *s, const H2645NAL *nal)
Definition: hevcdec.c:2673
AV_CODEC_CAP_FRAME_THREADS
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
Definition: codec.h:110
AVDISCARD_ALL
@ AVDISCARD_ALL
discard all
Definition: defs.h:219
threadframe.h
PredictionUnit::rem_intra_luma_pred_mode
int rem_intra_luma_pred_mode
Definition: hevcdec.h:369
H2645NAL::raw_size
int raw_size
Definition: h2645_parse.h:44
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
IS_BLA
#define IS_BLA(s)
Definition: hevcdec.h:76
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
ff_bswapdsp_init
av_cold void ff_bswapdsp_init(BswapDSPContext *c)
Definition: bswapdsp.c:49
HEVC_SLICE_B
@ HEVC_SLICE_B
Definition: hevc.h:96
NULL
#define NULL
Definition: coverity.c:32
HEVC_SEQUENCE_COUNTER_MASK
#define HEVC_SEQUENCE_COUNTER_MASK
Definition: hevcdec.h:403
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:64
hls_coding_unit
static int hls_coding_unit(HEVCLocalContext *lc, const HEVCContext *s, int x0, int y0, int log2_cb_size)
Definition: hevcdec.c:2225
AVCodecContext::color_range
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: avcodec.h:1039
av_buffer_unref
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it.
Definition: buffer.c:139
HEVCLocalContext::tmp
int16_t tmp[MAX_PB_SIZE *MAX_PB_SIZE]
Definition: hevcdec.h:478
ff_hevc_ps_uninit
void ff_hevc_ps_uninit(HEVCParamSets *ps)
Definition: hevc_ps.c:1999
hwaccel_internal.h
HEVC_NAL_PPS
@ HEVC_NAL_PPS
Definition: hevc.h:63
LongTermRPS::poc
int poc[32]
Definition: hevcdec.h:235
AVCHROMA_LOC_LEFT
@ AVCHROMA_LOC_LEFT
MPEG-2/4 4:2:0, H.264 default for 4:2:0.
Definition: pixfmt.h:694
CodingUnit::cu_transquant_bypass_flag
uint8_t cu_transquant_bypass_flag
Definition: hevcdec.h:344
AVCodecContext::internal
struct AVCodecInternal * internal
Private context used for internal data.
Definition: avcodec.h:476
AV_PIX_FMT_YUVJ420P
@ AV_PIX_FMT_YUVJ420P
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:78
HEVCLocalContext::first_qp_group
uint8_t first_qp_group
Definition: hevcdec.h:441
av_default_item_name
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:237
get_bits1
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:388
ff_dovi_update_cfg
void ff_dovi_update_cfg(DOVIContext *s, const AVDOVIDecoderConfigurationRecord *cfg)
Read the contents of an AVDOVIDecoderConfigurationRecord (usually provided by stream side data) and u...
Definition: dovi_rpu.c:75
profiles.h
av_buffer_pool_uninit
void av_buffer_pool_uninit(AVBufferPool **ppool)
Mark the pool as being available for freeing.
Definition: buffer.c:322
ff_set_sar
int ff_set_sar(AVCodecContext *avctx, AVRational sar)
Check that the provided sample aspect ratio is valid and set it on the codec context.
Definition: utils.c:109
L0
#define L0
Definition: hevcdec.h:57
HEVCFrame::rpl_tab
RefPicListTab ** rpl_tab
Definition: hevcdec.h:413
LongTermRPS::poc_msb_present
uint8_t poc_msb_present[32]
Definition: hevcdec.h:236
HEVC_NAL_SEI_SUFFIX
@ HEVC_NAL_SEI_SUFFIX
Definition: hevc.h:69
ff_hevc_sao_band_position_decode
int ff_hevc_sao_band_position_decode(HEVCLocalContext *lc)
Definition: hevc_cabac.c:586
HEVC_NAL_CRA_NUT
@ HEVC_NAL_CRA_NUT
Definition: hevc.h:50
av_frame_new_side_data_from_buf
AVFrameSideData * av_frame_new_side_data_from_buf(AVFrame *frame, enum AVFrameSideDataType type, AVBufferRef *buf)
Add a new side data to a frame from an existing AVBufferRef.
Definition: frame.c:780
hevc_pel_weight
static const uint8_t hevc_pel_weight[65]
Definition: hevcdec.c:54
PART_Nx2N
@ PART_Nx2N
Definition: hevcdec.h:144
RefPicListTab
Definition: hevcdec.h:248
ff_hevc_split_coding_unit_flag_decode
int ff_hevc_split_coding_unit_flag_decode(HEVCLocalContext *lc, int ct_depth, int x0, int y0)
Definition: hevc_cabac.c:697
UPDATE_THREAD_CONTEXT
#define UPDATE_THREAD_CONTEXT(func)
Definition: codec_internal.h:281
BOUNDARY_UPPER_TILE
#define BOUNDARY_UPPER_TILE
Definition: hevcdec.h:488
vps
static int FUNC() vps(CodedBitstreamContext *ctx, RWContext *rw, H265RawVPS *current)
Definition: cbs_h265_syntax_template.c:423
ff_hevc_decode_extradata
int ff_hevc_decode_extradata(const uint8_t *data, int size, HEVCParamSets *ps, HEVCSEI *sei, int *is_nalff, int *nal_length_size, int err_recognition, int apply_defdispwin, void *logctx)
Definition: hevc_parse.c:80
AV_EF_CRCCHECK
#define AV_EF_CRCCHECK
Verify checksums embedded in the bitstream (could be of either encoded or decoded data,...
Definition: defs.h:48
FF_HW_HAS_CB
#define FF_HW_HAS_CB(avctx, function)
Definition: hwaccel_internal.h:177
SliceHeader::nb_refs
unsigned int nb_refs[2]
Definition: hevcdec.h:282
Mv::x
int16_t x
horizontal component of motion vector
Definition: hevcdec.h:348
ff_slice_thread_init_progress
int av_cold ff_slice_thread_init_progress(AVCodecContext *avctx)
Definition: pthread_slice.c:179
AVCodecContext::level
int level
Encoding level descriptor.
Definition: avcodec.h:1740
hls_sao_param
static void hls_sao_param(HEVCLocalContext *lc, int rx, int ry)
Definition: hevcdec.c:1051
HEVC_NAL_RASL_R
@ HEVC_NAL_RASL_R
Definition: hevc.h:38
PF_BI
@ PF_BI
Definition: hevcdec.h:168
ff_hevc_no_residual_syntax_flag_decode
int ff_hevc_no_residual_syntax_flag_decode(HEVCLocalContext *lc)
Definition: hevc_cabac.c:841
SAMPLE_CTB
#define SAMPLE_CTB(tab, x, y)
Definition: hevcdec.h:73
HEVCWindow
Definition: hevc_ps.h:86
SCAN_HORIZ
@ SCAN_HORIZ
Definition: hevcdec.h:225
ff_hevc_frame_rps
int ff_hevc_frame_rps(HEVCContext *s)
Construct the reference picture sets for the current frame.
Definition: hevc_refs.c:477
HEVCLocalContext::edge_emu_buffer
uint8_t edge_emu_buffer[(MAX_PB_SIZE+7) *EDGE_EMU_BUFFER_STRIDE *2]
Definition: hevcdec.h:475
hevc_await_progress
static void hevc_await_progress(const HEVCContext *s, const HEVCFrame *ref, const Mv *mv, int y0, int height)
Definition: hevcdec.c:1856
IS_IRAP
#define IS_IRAP(s)
Definition: hevcdec.h:78
LongTermRPS::used
uint8_t used[32]
Definition: hevcdec.h:237
SliceHeader::colour_plane_id
uint8_t colour_plane_id
RPS coded in the slice header itself is stored here.
Definition: hevcdec.h:267
PART_nLx2N
@ PART_nLx2N
Definition: hevcdec.h:148
SliceHeader::dependent_slice_segment_flag
uint8_t dependent_slice_segment_flag
Definition: hevcdec.h:265
POS
#define POS(c_idx, x, y)
SliceHeader::slice_act_cb_qp_offset
int slice_act_cb_qp_offset
Definition: hevcdec.h:299
AVDISCARD_NONKEY
@ AVDISCARD_NONKEY
discard all frames except keyframes
Definition: defs.h:218
SliceHeader::first_slice_in_pic_flag
uint8_t first_slice_in_pic_flag
Definition: hevcdec.h:264
HEVCLocalContext::ctb_left_flag
uint8_t ctb_left_flag
Definition: hevcdec.h:468
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:52
ff_hevc_res_scale_sign_flag
int ff_hevc_res_scale_sign_flag(HEVCLocalContext *lc, int idx)
Definition: hevc_cabac.c:919
ff_dovi_ctx_flush
void ff_dovi_ctx_flush(DOVIContext *s)
Partially reset the internal state.
Definition: dovi_rpu.c:54
ff_hevc_merge_idx_decode
int ff_hevc_merge_idx_decode(HEVCLocalContext *lc)
Definition: hevc_cabac.c:794
AVPacket::size
int size
Definition: packet.h:492
BOUNDARY_UPPER_SLICE
#define BOUNDARY_UPPER_SLICE
Definition: hevcdec.h:487
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:106
hevcdec.h
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:361
decode_nal_units
static int decode_nal_units(HEVCContext *s, const uint8_t *buf, int length)
Definition: hevcdec.c:3134
codec_internal.h
SAOParams::offset_abs
int offset_abs[3][4]
sao_offset_abs
Definition: hevcdsp.h:35
AV_PIX_FMT_YUV422P10LE
@ AV_PIX_FMT_YUV422P10LE
planar YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian
Definition: pixfmt.h:151
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:425
INTRA_PLANAR
@ INTRA_PLANAR
Definition: hevcdec.h:172
ff_hevc_decode_nal_sps
int ff_hevc_decode_nal_sps(GetBitContext *gb, AVCodecContext *avctx, HEVCParamSets *ps, int apply_defdispwin)
Definition: hevc_ps.c:1270
PART_2NxnD
@ PART_2NxnD
Definition: hevcdec.h:147
AV_PIX_FMT_YUV422P12
#define AV_PIX_FMT_YUV422P12
Definition: pixfmt.h:473
FF_CODEC_CAP_EXPORTS_CROPPING
#define FF_CODEC_CAP_EXPORTS_CROPPING
The decoder sets the cropping fields in the output frames manually.
Definition: codec_internal.h:60
size
int size
Definition: twinvq_data.h:10344
HEVC_NAL_BLA_W_LP
@ HEVC_NAL_BLA_W_LP
Definition: hevc.h:45
SCAN_VERT
@ SCAN_VERT
Definition: hevcdec.h:226
FF_CODEC_CAP_ALLOCATE_PROGRESS
#define FF_CODEC_CAP_ALLOCATE_PROGRESS
Definition: codec_internal.h:69
ff_hevc_compute_poc
int ff_hevc_compute_poc(const HEVCSPS *sps, int pocTid0, int poc_lsb, int nal_unit_type)
Compute POC of the current frame and return it.
Definition: hevc_ps.c:2015
H2645NAL::gb
GetBitContext gb
Definition: h2645_parse.h:47
SliceHeader::collocated_ref_idx
unsigned int collocated_ref_idx
Definition: hevcdec.h:292
SliceHeader::entry_point_offset
unsigned * entry_point_offset
Definition: hevcdec.h:310
H2645NAL
Definition: h2645_parse.h:34
ff_hevc_cbf_luma_decode
int ff_hevc_cbf_luma_decode(HEVCLocalContext *lc, int trafo_depth)
Definition: hevc_cabac.c:889
AV_PIX_FMT_YUV444P12
#define AV_PIX_FMT_YUV444P12
Definition: pixfmt.h:475
ff_hevc_decode_nal_vps
int ff_hevc_decode_nal_vps(GetBitContext *gb, AVCodecContext *avctx, HEVCParamSets *ps)
Definition: hevc_ps.c:441
pic_arrays_free
static void pic_arrays_free(HEVCContext *s)
NOTE: Each function hls_foo correspond to the function foo in the specification (HLS stands for High ...
Definition: hevcdec.c:66
ff_hevc_luma_mv_merge_mode
void ff_hevc_luma_mv_merge_mode(HEVCLocalContext *lc, int x0, int y0, int nPbW, int nPbH, int log2_cb_size, int part_idx, int merge_idx, MvField *mv)
Definition: hevc_mvs.c:480
AVFrameSideData::data
uint8_t * data
Definition: frame.h:248
TransformUnit::chroma_mode_c
int chroma_mode_c
Definition: hevcdec.h:385
ff_hevc_prev_intra_luma_pred_flag_decode
int ff_hevc_prev_intra_luma_pred_flag_decode(HEVCLocalContext *lc)
Definition: hevc_cabac.c:760
FF_THREAD_SLICE
#define FF_THREAD_SLICE
Decode more than one part of a single frame at once.
Definition: avcodec.h:1544
AVFilmGrainParams
This structure describes how to handle film grain synthesis in video for specific codecs.
Definition: film_grain_params.h:216
GetBitContext::index
int index
Definition: get_bits.h:110
SliceHeader::short_term_ref_pic_set_sps_flag
int short_term_ref_pic_set_sps_flag
Definition: hevcdec.h:270
AVCHROMA_LOC_UNSPECIFIED
@ AVCHROMA_LOC_UNSPECIFIED
Definition: pixfmt.h:693
SliceHeader::no_output_of_prior_pics_flag
uint8_t no_output_of_prior_pics_flag
Definition: hevcdec.h:279
AVFrame::format
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
Definition: frame.h:427
AVCodecHWConfigInternal
Definition: hwconfig.h:25
MvField
Definition: hevcdec.h:352
QPEL_EXTRA
#define QPEL_EXTRA
Definition: hevcdec.h:65
ff_hevc_end_of_slice_flag_decode
int ff_hevc_end_of_slice_flag_decode(HEVCLocalContext *lc)
Definition: hevc_cabac.c:618
PF_L1
@ PF_L1
Definition: hevcdec.h:167
intra_prediction_unit_default_value
static void intra_prediction_unit_default_value(HEVCLocalContext *lc, int x0, int y0, int log2_cb_size)
Definition: hevcdec.c:2202
split
static char * split(char *message, char delim)
Definition: af_channelmap.c:81
get_format
static enum AVPixelFormat get_format(HEVCContext *s, const HEVCSPS *sps)
Definition: hevcdec.c:399
ff_h2645_packet_split
int ff_h2645_packet_split(H2645Packet *pkt, const uint8_t *buf, int length, void *logctx, int is_nalff, int nal_length_size, enum AVCodecID codec_id, int small_padding, int use_ref)
Split an input packet into NAL units.
Definition: h2645_parse.c:463
height
#define height
hevc_frame_end
static int hevc_frame_end(HEVCContext *s)
Definition: hevcdec.c:2927
AV_CODEC_CAP_SLICE_THREADS
#define AV_CODEC_CAP_SLICE_THREADS
Codec supports slice-based (or partition-based) multithreading.
Definition: codec.h:114
HWACCEL_D3D11VA
#define HWACCEL_D3D11VA(codec)
Definition: hwconfig.h:78
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
attributes.h
AV_PIX_FMT_D3D11
@ AV_PIX_FMT_D3D11
Hardware surfaces for Direct3D11.
Definition: pixfmt.h:333
av_buffer_alloc
AVBufferRef * av_buffer_alloc(size_t size)
Allocate an AVBuffer of the given size using av_malloc().
Definition: buffer.c:77
HWACCEL_NVDEC
#define HWACCEL_NVDEC(codec)
Definition: hwconfig.h:68
hls_slice_data
static int hls_slice_data(HEVCContext *s)
Definition: hevcdec.c:2583
TransformUnit::cu_qp_offset_cb
int8_t cu_qp_offset_cb
Definition: hevcdec.h:388
pic_arrays_init
static int pic_arrays_init(HEVCContext *s, const HEVCSPS *sps)
Definition: hevcdec.c:94
HEVCFrame::rpl_tab_buf
AVBufferRef * rpl_tab_buf
Definition: hevcdec.h:418
AV_PIX_FMT_VAAPI
@ AV_PIX_FMT_VAAPI
Hardware acceleration through VA-API, data[3] contains a VASurfaceID.
Definition: pixfmt.h:119
MvField::pred_flag
int8_t pred_flag
Definition: hevcdec.h:355
HEVCLocalContext::ct_depth
int ct_depth
Definition: hevcdec.h:480
AV_LOG_INFO
#define AV_LOG_INFO
Standard information.
Definition: log.h:191
FF_THREAD_FRAME
#define FF_THREAD_FRAME
Decode more than one frame at once.
Definition: avcodec.h:1543
ff_init_cabac_decoder
int ff_init_cabac_decoder(CABACContext *c, const uint8_t *buf, int buf_size)
Definition: cabac.c:162
AV_PIX_FMT_VDPAU
@ AV_PIX_FMT_VDPAU
HW acceleration through VDPAU, Picture.data[3] contains a VdpVideoSurface.
Definition: pixfmt.h:187
PART_nRx2N
@ PART_nRx2N
Definition: hevcdec.h:149
EPEL_EXTRA_BEFORE
#define EPEL_EXTRA_BEFORE
Definition: hevcdec.h:60
AV_PIX_FMT_VIDEOTOOLBOX
@ AV_PIX_FMT_VIDEOTOOLBOX
hardware decoding through Videotoolbox
Definition: pixfmt.h:302
SliceHeader::slice_cb_qp_offset
int slice_cb_qp_offset
Definition: hevcdec.h:295
SliceHeader
Definition: hevcdec.h:252
HEVCFrame::frame
AVFrame * frame
Definition: hevcdec.h:407
HEVC_NAL_TRAIL_R
@ HEVC_NAL_TRAIL_R
Definition: hevc.h:30
hevc_frame_start
static int hevc_frame_start(HEVCContext *s)
Definition: hevcdec.c:2845
av_md5_init
void av_md5_init(AVMD5 *ctx)
Initialize MD5 hashing.
Definition: md5.c:143
ff_h274_apply_film_grain
int ff_h274_apply_film_grain(AVFrame *out_frame, const AVFrame *in_frame, H274FilmGrainDatabase *database, const AVFilmGrainParams *params)
Definition: h274.c:217
SliceHeader::slice_sample_adaptive_offset_flag
uint8_t slice_sample_adaptive_offset_flag[3]
Definition: hevcdec.h:284
AVDISCARD_NONINTRA
@ AVDISCARD_NONINTRA
discard all non intra frames
Definition: defs.h:217
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:255
av_timecode_make_smpte_tc_string2
char * av_timecode_make_smpte_tc_string2(char *buf, AVRational rate, uint32_t tcsmpte, int prevent_df, int skip_field)
Get the timecode string from the SMPTE timecode format.
Definition: timecode.c:138
AVCodecContext::properties
unsigned properties
Properties of the stream that gets decoded.
Definition: avcodec.h:1904
HEVCFrame
Definition: hevcdec.h:406
AVCodecContext::extradata
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:542
av_packet_get_side_data
uint8_t * av_packet_get_side_data(const AVPacket *pkt, enum AVPacketSideDataType type, size_t *size)
Get side information from packet.
Definition: avpacket.c:252
HEVCLocalContext::gb
GetBitContext gb
Definition: hevcdec.h:446
internal.h
EPEL_EXTRA_AFTER
#define EPEL_EXTRA_AFTER
Definition: hevcdec.h:61
HEVCFrame::nb_rpl_elems
int nb_rpl_elems
Definition: hevcdec.h:420